您的位置 首页 模拟

Linux学习-等候行列

由于学习linux驱动编程,学习到了堵塞型IO读写,等待队列的操作比较的有意思,拿来分析分析,其中的一些代码还是蛮有意思的,感受到了linux

因为学习linux驱动编程,学习到了阻塞型IO读写,等候行列的操作比较的有意思,拿来剖析剖析,其间的一些代码仍是蛮有意思的,感触到了linux的美,领会到了艺术家和一般程序员的不同。

我就扼要的剖析剖析等候行列的一些问题,就相当于自己的总结吧。边学驱动,边学内核,仍是蛮有意思的。
1、等候行列的界说,包含两个,等候行列头,节点。
struct __wait_queue_head {
spinlock_t lock; /*自旋锁*/
struct list_head task_list; /*链表头*/
};
typedef struct __wait_queue_head wait_queue_head_t;
struct __wait_queue {
unsigned int flags;
#define WQ_FLAG_EXCLUSIVE 0x01
void *private;
wait_queue_func_t func;
struct list_head task_list;
};
/*关于等候行列的操作首要是初始化操作*/
#define DECLARE_WAIT_QUEUE_HEAD(name)
wait_queue_head_t name = __WAIT_QUEUE_HEAD_INITIALIZER(name)
/*便是初始化两个元素*/
#define __WAIT_QUEUE_HEAD_INITIALIZER(name) {
.lock = __SPIN_LOCK_UNLOCKED(name.lock),
.task_list = { &(name).task_list, &(name).task_list } }
#define init_waitqueue_head(q)
do {
static struct lock_class_key __key;
__init_waitqueue_head((q), &__key);
} while (0)
void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *key)
{
spin_lock_init(&q->lock);
lockdep_set_class(&q->lock, key);
INIT_LIST_HEAD(&q->task_list);
}
从上面的界说可知,实质上等候行列头很简单,只需便是一个链表头,而等候行列的节点首要包含了一个函数指针和对应的参数,以及链表。
咱们在驱动进程中首要运用的函数首要包含wait_event(),wait_event_interruptible(),wait_event_killable(),以及唤醒进程中的wait_up(),wait_up_interruptible().
根本的流程便是:
#define wait_event(wq, condition)
do {
if (condition)
/*增加满意,则直接跳出*/
break;
/*担任进入等候行列*/
__wait_event(wq, condition);
} while (0)
#define __wait_event(wq, condition)
do {
/*界说新的等候行列节点*/
DEFINE_WAIT(__wait);
for (;;) {/*一个循环的进程,或许导致阻塞*/
/*将增加的节点增加到行列中,并改动进程的运转状况*/
prepare_to_wait(&wq, &__wait, TASK_UNINTERRUPTIBLE);
if (condition)/*假如条件适宜了,就跳出当时的循环,也便是等候条件取得*/
break;
/*当时进程抛弃CPU,进行调度其他的进程,这时的进程进入睡觉状况
也便是说在schedule中函数就不在持续履行,只要调用wake_up函数唤
醒当时的进程,才会退出schedule函数,然后持续履行下面的函数,也便是持续循环
真实的退出循环,只要当条件满意时,假如条件不满意,调用wake_up函数
依然不会满意条件,只会再次调度,再次失掉CPU,
依据上面的剖析可知,只要上面的条件满意了,并调用
wake_up函数才干跳出当时的for循环。
*/
schedule();
}
/*完结等候*/
finish_wait(&wq, &__wait);
} while (0)
#define DEFINE_WAIT(name) DEFINE_WAIT_FUNC(name, autoremove_wake_function)
#define DEFINE_WAIT_FUNC(name, function)
wait_queue_t name = {
.private = current,
.func = function,
.task_list = LIST_HEAD_INIT((name).task_list),
}
void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
{
unsigned long flags;
/*改动状况*/
wait->flags &= ~WQ_FLAG_EXCLUSIVE;
spin_lock_irqsave(&q->lock, flags);
/*假如链表是空,则将当时的这个节点增加进来,这样能防止wait被重复的增加,形成很多的糟蹋*/
if (list_empty(&wait->task_list))
__add_wait_queue(q, wait);
/*修正当时进程的状况*/
set_current_state(state);
spin_unlock_irqrestore(&q->lock, flags);
}
#define set_current_state(state_value)
set_mb(current->state, (state_value))
static inline void __add_wait_queue(wait_queue_head_t *head, wait_queue_t *new)
{
/*便是将链表增加进来罢了*/
list_add(&new->task_list, &head->task_list);
}
void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
{
unsigned long flags;
wait->flags &= ~WQ_FLAG_EXCLUSIVE;
spin_lock_irqsave(&q->lock, flags);
__add_wait_queue(q, wait);
spin_unlock_irqrestore(&q->lock, flags);
}
void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
{
unsigned long flags;
/*修正当时进程的状况为TASK_RUNNING,因而能够被履行*/
__set_current_state(TASK_RUNNING);
/*
* We can check for list emptiness outside the lock
* IFF:
* – we use the “careful” check that verifies both
* the next and prev pointers, so that there cannot
* be any half-pending updates in progress on other
* CPUs that we havent seen yet (and that might
* still change the stack area.
* and
* – all other users take the lock (ie we can only
* have _one_ other CPU that looks at or modifies
* the list).
*/
/*删去链表,实质上便是开释*/
if (!list_empty_careful(&wait->task_list)) {
spin_lock_irqsave(&q->lock, flags);
list_del_init(&wait->task_list);
spin_unlock_irqrestore(&q->lock, flags);
}
}
asmlinkage void __sched schedule(void)
{
struct task_struct *prev, *next;
unsigned long *switch_count;
struct rq *rq;
int cpu;
need_resched:
preempt_disable();
cpu = smp_processor_id();
rq = cpu_rq(cpu);
rcu_note_context_switch(cpu);
prev = rq->curr;
release_kernel_lock(prev);
need_resched_nonpreemptible:
schedule_debug(prev);
if (sched_feat(HRTICK))
hrtick_clear(rq);
raw_spin_lock_irq(&rq->lock);
switch_count = &prev->nivcsw;
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
if (unlikely(signal_pending_state(prev->state, prev))) {
prev->state = TASK_RUNNING;
} else {
/*
* If a worker is going to sleep, notify and
* ask workqueue whether it wants to wake up a
* task to maintain concurrency. If so, wake
* up the task.
*/
if (prev->flags & PF_WQ_WORKER) {
struct task_struct *to_wakeup;
to_wakeup = wq_worker_sleeping(prev, cpu);
if (to_wakeup)
try_to_wake_up_local(to_wakeup);
}
deactivate_task(rq, prev, DEQUEUE_SLEEP);
}
switch_count = &prev->nvcsw;
}
pre_schedule(rq, prev);
if (unlikely(!rq->nr_running))
idle_balance(cpu, rq);
put_prev_task(rq, prev);
next = pick_next_task(rq);
clear_tsk_need_resched(prev);
rq->skip_clock_update = 0;
if (likely(prev != next)) {
sched_info_switch(prev, next);
perf_event_task_sched_out(prev, next);
rq->nr_switches++;
rq->curr = next;
++*switch_count;
context_switch(rq, prev, next); /* unlocks the rq */
/*
* The context switch have flipped the stack from under us
* and restored the local variables which were saved when
* this task called schedule() in the past. prev == current
* is still correct, but it can be moved to another cpu/rq.
*/
cpu = smp_processor_id();
rq = cpu_rq(cpu);
} else
raw_spin_unlock_irq(&rq->lock);
post_schedule(rq);
if (unlikely(reacquire_kernel_lock(prev)))
goto need_resched_nonpreemptible;
preempt_enable_no_resched();
if (need_resched())
goto need_resched;
}
依据上面的各个函数,宏界说可知,在wait_event函数中完结了大部分的工作,其间包含等候行列节点的界说,增加,当时进程运转状况的改动,等候条件的满意,跳出等候,函数介绍之前需求完结的使命是修正当时进程的状况为TASK_RUNNING,删去链表,开释一些空间。
其他的函数wait_event_interruptible以及wait_event_killable具有类似的操作,仅仅对前期修正进程状况存在不同。wait_event_interruptible则不必定只能在条件满意时唤醒,也能够被信号唤醒,而wait_event则在条件满意时被唤醒。

声明:本文内容来自网络转载或用户投稿,文章版权归原作者和原出处所有。文中观点,不代表本站立场。若有侵权请联系本站删除(kf@86ic.com)https://www.86ic.net/zhishi/moni/317799.html

为您推荐

联系我们

联系我们

在线咨询: QQ交谈

邮箱: kf@86ic.com

关注微信
微信扫一扫关注我们

微信扫一扫关注我们

返回顶部