在Linux 驱动程序中,可以使用等待队列来实现阻塞进程的唤醒。等待队列的头部定义如下,是一个双向列表。
struct list_head {
struct list_head *next, *prev;
};
struct __wait_queue_head {
spinlock_t lock;
struct list_head task_list;
};
typedef struct __wait_queue_head wait_queue_head_t;
Linux提供了如下关于等待队列的操作:
1 __init_waitqueue_head(&my_queue):进行等待队列的初始化。
2 DECLARE_WAITQUEUE: 定义等待队列元素。代码如下
#define __WAITQUEUE_INITIALIZER(name, tsk) { \
.private = tsk, \
.func = default_wake_function, \
.task_list = { NULL, NULL } }
}
#define DECLARE_WAITQUEUE(name, tsk) \
wait_queue_t name = __WAITQUEUE_INITIALIZER(name, tsk)
其实就是两个宏定义,这个两个宏定义扩展开来其实就是
wait_queue_t name={
.private = tsk, #private代表当前进程指针
.func = default_wake_function, #唤醒的回调函数
.task_list = { NULL, NULL } } #等待队列
}
那么回到代码中的调用DECLARE_WAITQUEUE(wait, current)就是定义了一个等待队列元素wait。等待队列的private就等于当前进程指针。
3 add_wait_queue/remove_wait_queue:添加移除等待队列
4 等待事件
wait_event(queue,condition)
wait_event_interrupt(queue,condition)
wait_event_timeout(queue,condition,timeout)
wait_event_interruptible_timeout(queue,condition,timeout)
等待第1个参数queue作为等待队列头部的队列被唤醒,而且第2个参数必须满足,否则继续阻塞,wait_event和wait_event_interrupt的区别在于后者可以被信号中断打断。加上timeout后的宏意味着阻塞等待的超时时间,在第三个参数timeout到达时,不论condition是否满足,均返回。
来看下代码的实现,首先是wait_event。 先判断条件,如果条件,则立即退出,否则进入__wait_event
#define wait_event(wq, condition) \
do { \
if (condition) \
break; \
__wait_event(wq, condition); \
} while (0)
#define __wait_event(wq, condition) \
do { \
DEFINE_WAIT(__wait); \
\
for (;;) { \
prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
if (condition) \
break; \
schedule(); \
} \
finish_wait(&wq, &__wait); \
} while (0)
(1) DEFINE_WAIT(__wait) 中申明一个当前进展的等待队列
#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
#define DEFINE_WAIT_FUNC(name, function) \
wait_queue_t name = { \
.private = current, \
.func = function, \
.task_list = LIST_HEAD_INIT((name).task_list), \
}
(2) 进入一个无限循环。prepare_to_wait中将申明的等待任务加入到等待队列中去。并设置任务状态为TASK_UNINTERRUPTIBLE。如果condition满足,则退出循环。如果不满足则进行一次任务调度
(3) 满足条件退出循环后调用finish_wait从等待任务队列里面删除并设置进程状态为TASK_RUNNING
wait_event_timeout的代码也一样,不过是加了个schedule_timeout,如果超时则不管条件是否满足都直接退出
#define __wait_event_timeout(wq, condition, ret) \
do { \
DEFINE_WAIT(__wait); \
\
for (;;) { \
prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE); \
if (condition) \
break; \
ret = schedule_timeout(ret); \
if (!ret) \
break; \
} \
finish_wait(&wq, &__wait); \
} while (0)
5 wake_up(wait_queue_head_t *queue) wake_up会唤醒作为等待队列头部的队列中的所有进程。代码按如下,遍历队列,然后依次执行唤醒回调函数
static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
int nr_exclusive, int wake_flags, void *key)
{
wait_queue_t *curr, *next;
list_for_each_entry_safe(curr, next, &q->task_list, task_list) {
unsigned flags = curr->flags;
if (curr->func(curr, mode, wake_flags, key) &&
(flags & WQ_FLAG_EXCLUSIVE) && !--nr_exclusive)
break;
}
}
回调函数在生成队列wait的时候赋值为default_wake_function, 里面执行的就是try_to_wake_up。
6 sleep_on在等待队列上睡眠。将当前进程置成TASK_UNINTERRUPTIBLE,然后将当前进程加入到等待队列中去,待睡眠时间超时后,再从等待队列中删除。代码如下
sleep_on_common(wait_queue_head_t *q, int state, long timeout)
{
unsigned long flags;
wait_queue_t wait;
init_waitqueue_entry(&wait, current);
__set_current_state(state);
spin_lock_irqsave(&q->lock, flags);
__add_wait_queue(q, &wait);
spin_unlock(&q->lock);
timeout = schedule_timeout(timeout);
spin_lock_irq(&q->lock);
__remove_wait_queue(q, &wait);
spin_unlock_irqrestore(&q->lock, flags);
return timeout;
}
现在将前面的globalmem改成阻塞型的,代码如下:
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/cdev.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/delay.h>
#include <linux/wait.h>
#include <linux/sched/signal.h> #define GLOBALMEM_SIZE 0X1000
#define MEM_CLEAR 0X1
#define GLOBALMEM_MAJOR 230 static int globalmem_major=GLOBALMEM_MAJOR;
module_param(globalmem_major,int,S_IRUGO); struct globalmem_dev{
struct cdev cdev;
unsigned char mem[GLOBALMEM_SIZE];
struct mutex mutex;
unsigned int current_len;
wait_queue_head_t r_wait;
wait_queue_head_t w_wait;
}; struct globalmem_dev *globalmem_devp; static int globalmem_open(struct inode *inode,struct file *filp)
{
filp->private_data=globalmem_devp;
return ;
} static int globalmem_release(struct inode *inode,struct file *filp)
{
return ;
} static long globalmem_ioctl(struct file *filp,unsigned int cmd,unsigned long arg)
{
struct globalmem_dev *dev=filp->private_data;
switch(cmd)
{
case MEM_CLEAR:
mutex_lock(&dev->mutex);
memset(dev->mem,,GLOBALMEM_SIZE);
printk(KERN_INFO "globalmem is set to zero\n");
mutex_unlock(&dev->mutex);
default:
return -EINVAL;
}
return ;
} static ssize_t globalmem_read_queue(struct file *filp,char __user *buf,size_t size,loff_t *ppos)
{
int ret;
struct globalmem_dev *dev=filp->private_data;
DECLARE_WAITQUEUE(wait,current);
mutex_lock(&dev->mutex);
add_wait_queue(&dev->r_wait,&wait);
while(dev->current_len == ){
if(filp->f_flags & O_NONBLOCK){
ret=-EAGAIN;
goto out;
}
__set_current_state(TASK_INTERRUPTIBLE);
mutex_unlock(&dev->mutex);
schedule();
if(signal_pending(current)){
ret=-ERESTARTSYS;
goto out2;
}
mutex_lock(&dev->mutex);
}
if(size > dev->current_len)
size=dev->current_len;
if(copy_to_user(buf,dev->mem,size)){
printk("copy_to_user_fail\n");
ret=-EFAULT;
goto out;
}else{
memcpy(dev->mem,dev->mem+size,dev->current_len-size);
dev->current_len-=size;
wake_up_interruptible(&dev->w_wait);
ret=size;
}
out:
mutex_unlock(&dev->mutex);
out2:
remove_wait_queue(&dev->w_wait,&wait);
set_current_state(TASK_RUNNING); return ret;
} static ssize_t globalmem_write_queue(struct file *filp,const char __user *buf,size_t size, loff_t *ppos)
{
int ret;
struct globalmem_dev *dev=filp->private_data;
DECLARE_WAITQUEUE(wait,current);
mutex_lock(&dev->mutex);
add_wait_queue(&dev->w_wait,&wait);
while(dev->current_len == GLOBALMEM_SIZE){
if(filp->f_flags & O_NONBLOCK){
ret=-EAGAIN;
goto out;
}
__set_current_state(TASK_INTERRUPTIBLE);
mutex_unlock(&dev->mutex);
schedule();
if(signal_pending(current)){
ret=-ERESTARTSYS;
goto out2;
}
mutex_lock(&dev->mutex);
}
if(size > GLOBALMEM_SIZE- dev->current_len)
size=GLOBALMEM_SIZE - dev->current_len;
if(copy_from_user(dev->mem+dev->current_len,buf,size)){
ret=-EFAULT;
goto out;
}else{
dev->current_len+=size;
wake_up_interruptible(&dev->r_wait);
ret=size;
}
out:
mutex_unlock(&dev->mutex);
out2:
remove_wait_queue(&dev->w_wait,&wait);
set_current_state(TASK_RUNNING);
return ret;
} static loff_t globalmem_llseek(struct file *filp,loff_t offset,int orig)
{
loff_t ret=;
switch(orig){
case :
if (offset < )
ret=-EFAULT;
break;
if ((unsigned int)offset > GLOBALMEM_SIZE){
ret=-EFAULT;
break;
}
filp->f_pos=(unsigned int)offset;
ret=filp->f_pos;
break;
case :
if((filp->f_pos+offset) > GLOBALMEM_SIZE){
ret=-EFAULT;
break;
}
if((filp->f_pos+offset) < ){
ret=-EFAULT;
break;
}
filp->f_pos+=offset;
ret=filp->f_pos;
break;
}
return ret;
} static const struct file_operations globalmem_fops={
.owner=THIS_MODULE,
.llseek=globalmem_llseek,
.read=globalmem_read_queue,
.write=globalmem_write_queue,
.unlocked_ioctl=globalmem_ioctl,
.open=globalmem_open,
.release=globalmem_release,
}; static void globalmem_setup_dev(struct globalmem_dev *dev,int index)
{
int err,devno=MKDEV(globalmem_major,index);
cdev_init(&dev->cdev,&globalmem_fops);
dev->cdev.owner=THIS_MODULE;
err=cdev_add(&dev->cdev,devno,);
if(err)
printk(KERN_NOTICE "Error %d adding globalmem%d",err,index);
} static int __init globalmem_init(void)
{
int ret;
dev_t devno=MKDEV(globalmem_major,);
printk("devno=%d\n",devno);
if(globalmem_major)
ret=register_chrdev_region(devno,,"globalmem_tmp");
else{
ret=alloc_chrdev_region(&devno,,,"globalmem_tmp");
globalmem_major=MAJOR(devno);
}
if(ret < )
return ret;
globalmem_devp=kzalloc(sizeof(struct globalmem_dev),GFP_KERNEL);
if(!globalmem_devp){
ret=-EFAULT;
goto fail_malloc;
} mutex_init(&globalmem_devp->mutex);
globalmem_setup_dev(globalmem_devp,);
printk("globalmem init success\n");
init_waitqueue_head(&globalmem_devp->r_wait);
init_waitqueue_head(&globalmem_devp->w_wait);
return ;
fail_malloc:
unregister_chrdev_region(devno,);
return ret;
} module_init(globalmem_init); static void __exit globalmem_exit(void)
{
cdev_del(&globalmem_devp->cdev);
kfree(globalmem_devp);
unregister_chrdev_region(MKDEV(globalmem_major,),);
printk("global_mem exited\n");
} module_exit(globalmem_exit); MODULE_AUTHOR("zhf");
MODULE_LICENSE("GPL");
代码主要做了如下几点改动:
1 在globalmem_dev中增加读和写的队列r_wait以及w_wait
2 在globalmem_init中调用init_waitqueue_head初始化写和读队列
3 在globalmem_read_queue中将当前进程加入读的等待队列,只有当读队列完成copy_to_user的操作后,才唤醒写队列的进程
4 在globalmem_write_queue中将当前进程加入写的等待队列,只有当写队列完成copy_from_user的操作后,才唤醒读队列的进程