好记性不如铅笔头

kernel, linux, 操作系统

Linux软中断简单笔记

最近在看软中断,还没研究完,先占个坑吧~

CONTENTS

interrupt.h 部分内容

/* SoftIRQ primitives.  */
#define local_bh_disable() \
		do { add_preempt_count(SOFTIRQ_OFFSET); barrier(); } while (0)
#define __local_bh_enable() \
		do { barrier(); sub_preempt_count(SOFTIRQ_OFFSET); } while (0)

extern void local_bh_enable(void);

/* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
   frequency threaded job scheduling. For almost all the purposes
   tasklets are more than enough. F.e. all serial device BHs et
   al. should be converted to tasklets, not to softirqs.
 */

enum
{
	HI_SOFTIRQ=0,
	TIMER_SOFTIRQ,
	NET_TX_SOFTIRQ,
	NET_RX_SOFTIRQ,
	SCSI_SOFTIRQ,
	TASKLET_SOFTIRQ
};

/* softirq mask and active fields moved to irq_cpustat_t in
 * asm/hardirq.h to get better cache usage.  KAO
 */

struct softirq_action
{
	void	(*action)(struct softirq_action *);
	void	*data;
};

asmlinkage void do_softirq(void);
extern void open_softirq(int nr, void (*action)(struct softirq_action*), void *data);
extern void softirq_init(void);
#define __raise_softirq_irqoff(nr) do { local_softirq_pending() |= 1UL << (nr); } while (0)
extern void FASTCALL(raise_softirq_irqoff(unsigned int nr));
extern void FASTCALL(raise_softirq(unsigned int nr));


/* Tasklets --- multithreaded analogue of BHs.

   Main feature differing them of generic softirqs: tasklet
   is running only on one CPU simultaneously.

   Main feature differing them of BHs: different tasklets
   may be run simultaneously on different CPUs.

   Properties:
   * If tasklet_schedule() is called, then tasklet is guaranteed
     to be executed on some cpu at least once after this.
   * If the tasklet is already scheduled, but its excecution is still not
     started, it will be executed only once.
   * If this tasklet is already running on another CPU (or schedule is called
     from tasklet itself), it is rescheduled for later.
   * Tasklet is strictly serialized wrt itself, but not
     wrt another tasklets. If client needs some intertask synchronization,
     he makes it with spinlocks.
 */

struct tasklet_struct
{
	struct tasklet_struct *next;
	unsigned long state;
	atomic_t count;
	void (*func)(unsigned long);
	unsigned long data;
};

#define DECLARE_TASKLET(name, func, data) \
struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }

#define DECLARE_TASKLET_DISABLED(name, func, data) \
struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }


enum
{
	TASKLET_STATE_SCHED,	/* Tasklet is scheduled for execution */
	TASKLET_STATE_RUN	/* Tasklet is running (SMP only) */
};

#ifdef CONFIG_SMP
static inline int tasklet_trylock(struct tasklet_struct *t)
{
	return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
}

static inline void tasklet_unlock(struct tasklet_struct *t)
{
	smp_mb__before_clear_bit(); 
	clear_bit(TASKLET_STATE_RUN, &(t)->state);
}

static inline void tasklet_unlock_wait(struct tasklet_struct *t)
{
	while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
}
#else
#define tasklet_trylock(t) 1
#define tasklet_unlock_wait(t) do { } while (0)
#define tasklet_unlock(t) do { } while (0)
#endif

extern void FASTCALL(__tasklet_schedule(struct tasklet_struct *t));

static inline void tasklet_schedule(struct tasklet_struct *t)
{
	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
		__tasklet_schedule(t);
}

extern void FASTCALL(__tasklet_hi_schedule(struct tasklet_struct *t));

static inline void tasklet_hi_schedule(struct tasklet_struct *t)
{
	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
		__tasklet_hi_schedule(t);
}


static inline void tasklet_disable_nosync(struct tasklet_struct *t)
{
	atomic_inc(&t->count);
	smp_mb__after_atomic_inc();
}

static inline void tasklet_disable(struct tasklet_struct *t)
{
	tasklet_disable_nosync(t);
	tasklet_unlock_wait(t);
	smp_mb();
}

static inline void tasklet_enable(struct tasklet_struct *t)
{
	smp_mb__before_atomic_dec();
	atomic_dec(&t->count);
}

static inline void tasklet_hi_enable(struct tasklet_struct *t)
{
	smp_mb__before_atomic_dec();
	atomic_dec(&t->count);
}

extern void tasklet_kill(struct tasklet_struct *t);
extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
extern void tasklet_init(struct tasklet_struct *t,
			 void (*func)(unsigned long), unsigned long data);

softirq.c

/*
 *    linux/kernel/softirq.c
 *
 *    Copyright (C) 1992 Linus Torvalds
 *
 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
 */

#include <linux/module.h>
#include <linux/kernel_stat.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/notifier.h>
#include <linux/percpu.h>
#include <linux/cpu.h>
#include <linux/kthread.h>
#include <linux/rcupdate.h>

#include <asm/irq.h>
/*
   - No shared variables, all the data are CPU local.
   - If a softirq needs serialization, let it serialize itself
     by its own spinlocks.
   - Even if softirq is serialized, only local cpu is marked for
     execution. Hence, we get something sort of weak cpu binding.
     Though it is still not clear, will it result in better locality
     or will not.

   Examples:
   - NET RX softirq. It is multithreaded and does not require
     any global serialization.
   - NET TX softirq. It kicks software netdevice queues, hence
     it is logically serialized per device, but this serialization
     is invisible to common code.
   - Tasklets: serialized wrt itself.
 */

#ifndef __ARCH_IRQ_STAT
irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
EXPORT_SYMBOL(irq_stat);
#endif

/*
软中断处理函数数组
*/
static struct softirq_action softirq_vec[32] __cacheline_aligned_in_smp;

static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);

/*
 * we cannot loop indefinitely here to avoid userspace starvation,
 * but we also don't want to introduce a worst case 1/HZ latency
 * to the pending events, so lets the scheduler to balance
 * the softirq load for us.
 */
/*
唤醒软中断处理函数,由于在每个CPU上都有中断函数,因此这里需要找到是哪一个CPU函数
*/
static inline void wakeup_softirqd(void)
{
    /* Interrupts are disabled: no need to stop preemption */
    struct task_struct *tsk = __get_cpu_var(ksoftirqd);

    if (tsk && tsk->state != TASK_RUNNING)
        wake_up_process(tsk);
}

/*
 * We restart softirq processing MAX_SOFTIRQ_RESTART times,
 * and we fall back to softirqd after that.
 *
 * This number has been established via experimentation.
 * The two things to balance is latency against fairness -
 * we want to handle softirqs as soon as possible, but they
 * should not be able to lock up the box.
 */
/* 最大循环尝试次数 */
#define MAX_SOFTIRQ_RESTART 10

/*
软中断处理函数
*/
asmlinkage void __do_softirq(void)
{
    struct softirq_action *h;
    __u32 pending;
    int max_restart = MAX_SOFTIRQ_RESTART;
    int cpu;
    /*
     pending记录了当前有哪些软中断号上需要处理
    */
    pending = local_softirq_pending();

    local_bh_disable();
    cpu = smp_processor_id();
restart:
    /* Reset the pending bitmask before enabling irqs */
    local_softirq_pending() = 0;/* 对记录的中断号请0 */

    local_irq_enable();

    h = softirq_vec;

 /*
  *pending的每一位标记了是否有对应的软中断,这里一位一位的进行判断, 然后执行对应的软中断
*/
    do {
        if (pending & 1) {/* 如果这一位有效就执行 */
            h->action(h);/* 执行对应的软中断  */
            rcu_bh_qsctr_inc(cpu);
        }
        h++;
        pending >>= 1;/* 一位一位处理 */
    } while (pending);

    local_irq_disable();

 /* 如果此时又有了新的软中断进来,那么就在重新刷一遍上述流程,*/
    pending = local_softirq_pending();
    if (pending && --max_restart)
        goto restart;

    /* 如果还有中断没有搞定,就从外部再次调度把,不能一直耗在这里 */
    if (pending)
        wakeup_softirqd();

    __local_bh_enable();
}

#ifndef __ARCH_HAS_DO_SOFTIRQ
/*
软中断处理函数
*/
asmlinkage void do_softirq(void)
{
    __u32 pending;
    unsigned long flags;

    /* 如果在中断中,那么就不用处理了,直接返回 */
    if (in_interrupt())
        return;

    local_irq_save(flags);
/* 如果有软中断需要处理,就进行处理 */
    pending = local_softirq_pending();

    if (pending)
        __do_softirq();

    local_irq_restore(flags);
}

EXPORT_SYMBOL(do_softirq);

#endif

/*
启用本地内核抢占
*/
void local_bh_enable(void)
{
    WARN_ON(irqs_disabled());
    /*
     * Keep preemption disabled until we are done with
     * softirq processing:
      */
     sub_preempt_count(SOFTIRQ_OFFSET - 1);

    if (unlikely(!in_interrupt() && local_softirq_pending()))
        do_softirq();

    dec_preempt_count();
    preempt_check_resched();
}
EXPORT_SYMBOL(local_bh_enable);

#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
# define invoke_softirq()    __do_softirq()
#else
# define invoke_softirq()    do_softirq()
#endif

/*
 * Exit an interrupt context. Process softirqs if needed and possible:
 */
void irq_exit(void)
{
    account_system_vtime(current);
    sub_preempt_count(IRQ_EXIT_OFFSET);
    if (!in_interrupt() && local_softirq_pending())
        invoke_softirq();
    preempt_enable_no_resched();
}

/*
 * This function must run with irqs disabled!
 */
/*
唤醒某一个软中断
*/
inline fastcall void raise_softirq_irqoff(unsigned int nr)
{
    __raise_softirq_irqoff(nr);

    /*
     * If we're in an interrupt or softirq, we're done
     * (this also catches softirq-disabled code). We will
     * actually run the softirq once we return from
     * the irq or softirq.
     *
     * Otherwise we wake up ksoftirqd to make sure we
     * schedule the softirq soon.
     */
    if (!in_interrupt())
        wakeup_softirqd();
}

EXPORT_SYMBOL(raise_softirq_irqoff);

/*
唤醒某一个软中断
*/
void fastcall raise_softirq(unsigned int nr)
{
    unsigned long flags;

    local_irq_save(flags);
    raise_softirq_irqoff(nr);
    local_irq_restore(flags);
}

/*
初始化某一个软中断,设置它的响应函数和入参
*/
void open_softirq(int nr, void (*action)(struct softirq_action*), void *data)
{
    softirq_vec[nr].data = data;
    softirq_vec[nr].action = action;
}

EXPORT_SYMBOL(open_softirq);


/* Tasklets */

/*
tasklet链表,这里不是标准的linux链表,而是自己实现的,
新的元素放到链表头,这里的指针始终指向链表头
*/
struct tasklet_head
{
    struct tasklet_struct *list;
};

/* Some compilers disobey section attribute on statics when not
   initialized -- RR */
/*
tasklet在软中断中一共占有两个优先级,
每个都是一个tasklet链表
*/

static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec) = { NULL };
static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec) = { NULL };

/*
将一个tasklet加入到tasklet链表中,这里是加入到链表头上
*/
void fastcall __tasklet_schedule(struct tasklet_struct *t)
{
    unsigned long flags;

    local_irq_save(flags);
    t->next = __get_cpu_var(tasklet_vec).list;
    __get_cpu_var(tasklet_vec).list = t;
    raise_softirq_irqoff(TASKLET_SOFTIRQ);/* 加入后触发一个软中断 */
    local_irq_restore(flags);
}

EXPORT_SYMBOL(__tasklet_schedule);

void fastcall __tasklet_hi_schedule(struct tasklet_struct *t)
{
    unsigned long flags;

    local_irq_save(flags);
    t->next = __get_cpu_var(tasklet_hi_vec).list;
    __get_cpu_var(tasklet_hi_vec).list = t;
    raise_softirq_irqoff(HI_SOFTIRQ);
    local_irq_restore(flags);
}

EXPORT_SYMBOL(__tasklet_hi_schedule);

/*
遍历一个tasklet链表,执行里面的tasklet
*/
static void tasklet_action(struct softirq_action *a)
{
    struct tasklet_struct *list;

    local_irq_disable();
    list = __get_cpu_var(tasklet_vec).list;
    __get_cpu_var(tasklet_vec).list = NULL;
    local_irq_enable();

    while (list) {
        struct tasklet_struct *t = list;

        list = list->next;

        if (tasklet_trylock(t)) {
            if (!atomic_read(&t->count)) {
                if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
                    BUG();
                t->func(t->data);/*执行tasklet*/
                tasklet_unlock(t);
                continue;
            }
            tasklet_unlock(t);
        }

        local_irq_disable();
        t->next = __get_cpu_var(tasklet_vec).list;
        __get_cpu_var(tasklet_vec).list = t;
        __raise_softirq_irqoff(TASKLET_SOFTIRQ);
        local_irq_enable();
    }
}

static void tasklet_hi_action(struct softirq_action *a)
{
    struct tasklet_struct *list;

    local_irq_disable();
    list = __get_cpu_var(tasklet_hi_vec).list;
    __get_cpu_var(tasklet_hi_vec).list = NULL;
    local_irq_enable();

    while (list) {
        struct tasklet_struct *t = list;

        list = list->next;

        if (tasklet_trylock(t)) {
            if (!atomic_read(&t->count)) {
                if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state))
                    BUG();
                t->func(t->data);
                tasklet_unlock(t);
                continue;
            }
            tasklet_unlock(t);
        }

        local_irq_disable();
        t->next = __get_cpu_var(tasklet_hi_vec).list;
        __get_cpu_var(tasklet_hi_vec).list = t;
        __raise_softirq_irqoff(HI_SOFTIRQ);
        local_irq_enable();
    }
}


/*初始化一个tasklet指针*/
void tasklet_init(struct tasklet_struct *t,
          void (*func)(unsigned long), unsigned long data)
{
    t->next = NULL;
    t->state = 0;
    atomic_set(&t->count, 0);
    t->func = func;
    t->data = data;
}

EXPORT_SYMBOL(tasklet_init);

void tasklet_kill(struct tasklet_struct *t)
{
    if (in_interrupt())
        printk("Attempt to kill tasklet from interrupt\n");

    while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
        do
            yield();
        while (test_bit(TASKLET_STATE_SCHED, &t->state));
    }
    tasklet_unlock_wait(t);
    clear_bit(TASKLET_STATE_SCHED, &t->state);
}

EXPORT_SYMBOL(tasklet_kill);

/*
软中断在初始化的时候初始化tasklet处理
*/
void __init softirq_init(void)
{
    open_softirq(TASKLET_SOFTIRQ, tasklet_action, NULL);
    open_softirq(HI_SOFTIRQ, tasklet_hi_action, NULL);
}

static int ksoftirqd(void * __bind_cpu)
{
    set_user_nice(current, 19);
    current->flags |= PF_NOFREEZE;

    set_current_state(TASK_INTERRUPTIBLE);

    while (!kthread_should_stop()) {
        if (!local_softirq_pending())
            schedule();

        __set_current_state(TASK_RUNNING);

        while (local_softirq_pending()) {
            /* Preempt disable stops cpu going offline.
               If already offline, we'll be on wrong CPU:
               don't process */
            preempt_disable();
            if (cpu_is_offline((long)__bind_cpu))
                goto wait_to_die;
            do_softirq();
            preempt_enable();
            cond_resched();
        }

        set_current_state(TASK_INTERRUPTIBLE);
    }
    __set_current_state(TASK_RUNNING);
    return 0;

wait_to_die:
    preempt_enable();
    /* Wait for kthread_stop */
    set_current_state(TASK_INTERRUPTIBLE);
    while (!kthread_should_stop()) {
        schedule();
        set_current_state(TASK_INTERRUPTIBLE);
    }
    __set_current_state(TASK_RUNNING);
    return 0;
}

#ifdef CONFIG_HOTPLUG_CPU
/*
 * tasklet_kill_immediate is called to remove a tasklet which can already be
 * scheduled for execution on @cpu.
 *
 * Unlike tasklet_kill, this function removes the tasklet
 * _immediately_, even if the tasklet is in TASKLET_STATE_SCHED state.
 *
 * When this function is called, @cpu must be in the CPU_DEAD state.
 */
void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu)
{
    struct tasklet_struct **i;

    BUG_ON(cpu_online(cpu));
    BUG_ON(test_bit(TASKLET_STATE_RUN, &t->state));

    if (!test_bit(TASKLET_STATE_SCHED, &t->state))
        return;

    /* CPU is dead, so no lock needed. */
    for (i = &per_cpu(tasklet_vec, cpu).list; *i; i = &(*i)->next) {
        if (*i == t) {
            *i = t->next;
            return;
        }
    }
    BUG();
}

static void takeover_tasklets(unsigned int cpu)
{
    struct tasklet_struct **i;

    /* CPU is dead, so no lock needed. */
    local_irq_disable();

    /* Find end, append list for that CPU. */
    for (i = &__get_cpu_var(tasklet_vec).list; *i; i = &(*i)->next);
    *i = per_cpu(tasklet_vec, cpu).list;
    per_cpu(tasklet_vec, cpu).list = NULL;
    raise_softirq_irqoff(TASKLET_SOFTIRQ);

    for (i = &__get_cpu_var(tasklet_hi_vec).list; *i; i = &(*i)->next);
    *i = per_cpu(tasklet_hi_vec, cpu).list;
    per_cpu(tasklet_hi_vec, cpu).list = NULL;
    raise_softirq_irqoff(HI_SOFTIRQ);

    local_irq_enable();
}
#endif /* CONFIG_HOTPLUG_CPU */

static int __devinit cpu_callback(struct notifier_block *nfb,
                  unsigned long action,
                  void *hcpu)
{
    int hotcpu = (unsigned long)hcpu;
    struct task_struct *p;

    switch (action) {
    case CPU_UP_PREPARE:
        BUG_ON(per_cpu(tasklet_vec, hotcpu).list);
        BUG_ON(per_cpu(tasklet_hi_vec, hotcpu).list);
        p = kthread_create(ksoftirqd, hcpu, "ksoftirqd/%d", hotcpu);
        if (IS_ERR(p)) {
            printk("ksoftirqd for %i failed\n", hotcpu);
            return NOTIFY_BAD;
        }
        kthread_bind(p, hotcpu);
          per_cpu(ksoftirqd, hotcpu) = p;
         break;
    case CPU_ONLINE:
        wake_up_process(per_cpu(ksoftirqd, hotcpu));
        break;
#ifdef CONFIG_HOTPLUG_CPU
    case CPU_UP_CANCELED:
        /* Unbind so it can run.  Fall thru. */
        kthread_bind(per_cpu(ksoftirqd, hotcpu), smp_processor_id());
    case CPU_DEAD:
        p = per_cpu(ksoftirqd, hotcpu);
        per_cpu(ksoftirqd, hotcpu) = NULL;
        kthread_stop(p);
        takeover_tasklets(hotcpu);
        break;
#endif /* CONFIG_HOTPLUG_CPU */
     }
    return NOTIFY_OK;
}

static struct notifier_block __devinitdata cpu_nfb = {
    .notifier_call = cpu_callback
};

__init int spawn_ksoftirqd(void)
{
    void *cpu = (void *)(long)smp_processor_id();
    cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu);
    cpu_callback(&cpu_nfb, CPU_ONLINE, cpu);
    register_cpu_notifier(&cpu_nfb);
    return 0;
}

 

发表评论

2 × 1 =

此站点使用Akismet来减少垃圾评论。了解我们如何处理您的评论数据