softnet_data是个percpu变量
/*
* Incoming packets are placed on per-cpu queues
*/
struct softnet_data {
struct Qdisc *output_queue;
struct Qdisc **output_queue_tailp;
// napi->poll_list结构挂入这个list,包括NAPI接口的driver以及非NAPI接口的driver都可以统一加入到这个poll_list上
// 对于NAPI接口,每个driver的napi结构可以通过napi_schedule加入到这个poll_list上
// 对于非NAPI接口,把softnet_data->backlog通过napi_schedule加入到这个poll_list上
// 这样做的目的是,无论NAPI还是non NAPI都统一到net_rx_action函数的一致处理流程上来
struct list_head poll_list;
struct sk_buff *completion_queue;
//process_queue 在process_backlog中接管input_packet_queue,目的可能是减少锁的范围
struct sk_buff_head process_queue;
/* stats */
unsigned int processed;
unsigned int time_squeeze;
unsigned int cpu_collision;
unsigned int received_rps;
#ifdef CONFIG_RPS
struct softnet_data *rps_ipi_list;
/* Elements below can be accessed between CPUs for RPS */
struct call_single_data csd ____cacheline_aligned_in_smp;
struct softnet_data *rps_ipi_next;
unsigned int cpu;
unsigned int input_queue_head;
unsigned int input_queue_tail;
#endif
unsigned dropped;
//旧的接口,netif_rx 会把skb挂到input_packet_queue上
//旧的接口使用backlog作为一个napi,挂入softnet_data->poll_list
// backlog的poll函数是process_backlog, backlog的初始化如下net_dev_init
struct sk_buff_head input_pkt_queue;
struct napi_struct backlog;
};
static int __init net_dev_init(void)
{
....
for_each_possible_cpu(i) {
struct softnet_data *sd = &per_cpu(softnet_data, i);
....
sd->backlog.poll = process_backlog;
sd->backlog.weight = weight_p; // 通过proc/sys/net/core/dev_weight 来调整
sd->backlog.gro_list = NULL;
sd->backlog.gro_count = 0;
}
....
}