Linux 接收网络报文流程

2240阅读 0评论2015-01-23 jonas_mao
分类:LINUX

linux version:2.6.32
基于de4x5 驱动的PCI总线描述。所涉及的函数仅包含了报文接收的关键部分。

这篇文章是为了记录一下这几天看linux 网络报文接收过程,作为一个总结,也是一个记忆点。

一 de4x5初始化:

点击(此处)折叠或打开

  1. static struct pci_driver de4x5_pci_driver = {
  2.         .name = "de4x5",
  3.         .id_table = de4x5_pci_tbl,
  4.         .probe = de4x5_pci_probe, //主要关注这个函数,是整个初始化的主体入口,探测总线上的设备并进行初始化
  5.     .remove = __devexit_p (de4x5_pci_remove),
  6. };

点击(此处)折叠或打开

  1. static int __init de4x5_module_init (void)
  2. {
  3. int err = 0;
  4. #ifdef CONFIG_PCI
  5. err = pci_register_driver(&de4x5_pci_driver); //使用静态初始化的pci driver进行注册。
  6. #endif
  7. #ifdef CONFIG_EISA
  8. err |= eisa_driver_register (&de4x5_eisa_driver);
  9. #endif

  10. return err;
  11. }
接下来是de4x5_pci_driver这个结构中的prob探测成员,这里主要是驱动探测是否是自己负责的设备,如果有则负责做初始化。

点击(此处)折叠或打开

  1. static int __devinit de4x5_pci_probe (struct pci_dev *pdev,
  2.                  const struct pci_device_id *ent)
  3. {
  4.     ......

  5.     /* Ok, the device seems to be for us. 探测到属于自己负责的设备 */
  6.     if ((error = pci_enable_device (pdev)))
  7.         return error;
  8.     /* 为这个设备申请net_device空间并做一些初始化,这里其实还申请了dev的private空间,所以下面的代码中可以直接访问。看了代码还有tx 队列的申请,rx ring是在其它地方申请的 */
  9.     if (!(dev = alloc_etherdev (sizeof (struct de4x5_private)))) {
  10.         error = -ENOMEM;
  11.         goto disable_dev;
  12.     }
  13.     
  14.     /* 根据上面申请的地址做偏移得到dev的private空间地址 然后对其进行一些基本的初始化 */
  15.     lp = netdev_priv(dev);
  16.     lp->bus = PCI;
  17.     lp->bus_num = 0;

  18.     /* Search for an SROM on this bus */
  19.     if (lp->bus_num != pb) {
  20.      lp->bus_num = pb;
  21.      srom_search(dev, pdev);
  22.     }

  23.     /* Get the chip configuration revision register */
  24.     lp->cfrv = pdev->revision;

  25.     /* Set the device number information */
  26.     lp->device = dev_num;
  27.     lp->bus_num = pb;

  28.     /* Set the chipset information */
  29.     if (is_DC2114x) {
  30.      device = ((lp->cfrv & CFRV_RN) < DC2114x_BRK ? DC21142 : DC21143);
  31.     }
  32.     lp->chipset = device;

  33.     /* Get the board I/O address (64 bits on sparc64) */
  34.     iobase = pci_resource_start(pdev, 0);

  35.     /* Fetch the IRQ to be used */
  36.     irq = pdev->irq;
  37.     if ((irq == 0) || (irq == 0xff) || ((int)irq == -1)) {
  38.         error = -ENODEV;
  39.         goto free_dev;
  40.     }

  41.     ......

  42.     dev->irq = irq;
  43.     /* 对设备做进一步初始化 接下面代码 */
  44.     if ((error = de4x5_hw_init(dev, iobase, &pdev->dev))) {
  45.         goto release;
  46.     }

  47.     return 0;
  48.     ......
  49. }
de4x5_hw_init这个函数是重要的一个初始化,主要是针对dev private结构的一些操作。

点击(此处)折叠或打开

  1. static int __devinit
  2. de4x5_hw_init(struct net_device *dev, u_long iobase, struct device *gendev)
  3. {
  4.     char name[DE4X5_NAME_LENGTH + 1];
  5.     struct de4x5_private *lp = netdev_priv(dev);
  6.     struct pci_dev *pdev = NULL;
  7.     int i, status=0;

  8.     dev_set_drvdata(gendev, dev);

  9.     /* Ensure we're not sleeping */
  10.     if (lp->bus == EISA) {
  11.     outb(WAKEUP, PCI_CFPM);
  12.     } else {
  13.     pdev = to_pci_dev (gendev);
  14.     pci_write_config_byte(pdev, PCI_CFDA_PSM, WAKEUP);
  15.     }
  16.     mdelay(10);

  17.     RESET_DE4X5;

  18.     if ((inl(DE4X5_STS) & (STS_TS | STS_RS)) != 0) {
  19.     return -ENXIO; /* Hardware could not reset */
  20.     }

  21.     /*
  22.     ** Now find out what kind of DC21040/DC21041/DC21140 board we have.
  23.     */
  24.     lp->useSROM = false;
  25.     if (lp->bus == PCI) {
  26.     PCI_signature(name, lp);
  27.     } else {
  28.     EISA_signature(name, gendev);
  29.     }

  30.     if (*name == '\0') { /* Not found a board signature */
  31.     return -ENXIO;
  32.     }

  33.     dev->base_addr = iobase;
  34.     printk ("%s: %s at 0x%04lx", dev_name(gendev), name, iobase);
  35.     
  36.     /* 根据chipset做一些地址的操作检查 */
  37.     status = get_hw_addr(dev);
  38.     printk(", h/w address %pM\n", dev->dev_addr);

  39.     if (status != 0) {
  40.     printk(" which has an Ethernet PROM CRC error.\n");/* CRC 错误 */
  41.     return -ENXIO;
  42.     } else {
  43.     skb_queue_head_init(&lp->cache.queue);/* 初始化缓存skb队列 jymao */
  44.     lp->cache.gepc = GEP_INIT;
  45.     lp->asBit = GEP_SLNK;
  46.     lp->asPolarity = GEP_SLNK;
  47.     lp->asBitValid = ~0;
  48.     lp->timeout = -1;
  49.     lp->gendev = gendev;
  50.     spin_lock_init(&lp->lock);
  51.     init_timer(&lp->timer);
  52.     lp->timer.function = (void (*)(unsigned long))de4x5_ast;
  53.     lp->timer.data = (unsigned long)dev;
  54.     de4x5_parse_params(dev);

  55.     ......
  56.     lp->fdx = lp->params.fdx;
  57.     sprintf(lp->adapter_name,"%s (%s)", name, dev_name(gendev));
  58.     
  59.     /* 计算DMA环形缓冲区的desc控制结构大小 分为RX+TX */
  60.     lp->dma_size = (NUM_RX_DESC + NUM_TX_DESC) * sizeof(struct de4x5_desc);

  61.     /* 申请连续的dev desc 并初始化dma_rings为申请的起始地址 */
  62.     lp->rx_ring = dma_alloc_coherent(gendev, lp->dma_size,
  63.                      &lp->dma_rings, GFP_ATOMIC);
  64.     if (lp->rx_ring == NULL) {
  65.      return -ENOMEM;
  66.     }

  67.     /* 通过dma_size可以得出rx_ring加上rx的size就可得到tx_ring的地址 */
  68.     lp->tx_ring = lp->rx_ring + NUM_RX_DESC;

  69.     /*
  70.     ** Set up the RX descriptor ring (Intels)
  71.     ** Allocate contiguous receive buffers, long word aligned (Alphas)
  72.     */
  73. #if !defined(__alpha__) && !defined(__powerpc__) && !defined(CONFIG_SPARC) && !defined(DE4X5_DO_MEMCPY)
  74.     for (i=0; i<NUM_RX_DESC; i++) {
  75.      lp->rx_ring[i].status = 0; /* 有报文接收到的时候会变化,后续代码会有使用 */
  76.      lp->rx_ring[i].des1 = cpu_to_le32(RX_BUFF_SZ);
  77.      lp->rx_ring[i].buf = 0;
  78.      lp->rx_ring[i].next = 0;
  79.      lp->rx_skb[i] = (struct sk_buff *) 1; /* Dummy entry 后续申请初始环形缓存的时候会用到,即de4x5_open中 */
  80.     }

  81. #else
  82.     ......
  83. #endif
  84.     /* 省略了一些初始化*/
  85.     ......
  86.     lp->state = CLOSED;

  87.     /* The DE4X5-specific entries in the device structure. */
  88.     SET_NETDEV_DEV(dev, gendev);
  89.     dev->netdev_ops = &de4x5_netdev_ops;/* 这里包含了所有dev的操作集 这个静态结构下面会给出 */
  90.     dev->mem_start = 0;

  91.     /* Fill in the generic fields of the device structure.向内核通知该设备 */
  92.     if ((status = register_netdev (dev))) {
  93.      dma_free_coherent (gendev, lp->dma_size,
  94.              lp->rx_ring, lp->dma_rings);
  95.      return status;
  96.     }

  97.     /* Let the adapter sleep to save power */
  98.     yawn(dev, SLEEP);

  99.     return status;
  100. }
de4x5_netdev_ops静态结构,包含了dev的操作集:

点击(此处)折叠或打开

  1. static const struct net_device_ops de4x5_netdev_ops = {
  2.     .ndo_open        = de4x5_open, //netdev的start 函数
  3.     .ndo_stop        = de4x5_close,
  4.     .ndo_start_xmit    = de4x5_queue_pkt,
  5.     .ndo_get_stats    = de4x5_get_stats,
  6.     .ndo_set_multicast_list = set_multicast_list,
  7.     .ndo_do_ioctl    = de4x5_ioctl,
  8.     .ndo_change_mtu    = eth_change_mtu,
  9.     .ndo_set_mac_address= eth_mac_addr,
  10.     .ndo_validate_addr    = eth_validate_addr,
  11. };
通过注册了驱动初始化了探测到的设备,并向内核注册,现在进行 dev 的open,即上面结构中的.ndo_open成员。

点击(此处)折叠或打开

  1. static int
  2. de4x5_open(struct net_device *dev)
  3. {
  4.     struct de4x5_private *lp = netdev_priv(dev);
  5.     u_long iobase = dev->base_addr;
  6.     int i, status = 0;
  7.     s32 omr;

  8.     /* 这里申请了DMA环形缓冲区,用来让网卡把硬件接收到的信息填充进去 这个内存是和CPU共享的 */
  9.     /* Allocate the RX buffers 大小是一个以太网报文的MTU+二层报头+pad 以32对齐*/
  10.     /* 这里的数量是和desc的控制结构数量一致的,即一一对应 */
  11.     for (i=0; i<lp->rxRingSize; i++) {
  12.     if (de4x5_alloc_rx_buff(dev, i, 0) == NULL) {
  13.      de4x5_free_rx_buffs(dev);
  14.      return -EAGAIN;
  15.     }
  16.     }

  17.     /*
  18.     ** Wake up the adapter
  19.     */
  20.     yawn(dev, WAKEUP);

  21.     /*
  22.     ** Re-initialize the DE4X5...
  23.     */
  24.     status = de4x5_init(dev);
  25.     spin_lock_init(&lp->lock);
  26.     lp->state = OPEN;
  27.     de4x5_dbg_open(dev);

  28.     /* 终端的注册 register the interrupt handlers (IRQF_SHARED mearns multiple devices share the interrupt )*/
  29.     /* IRQF_DISABLED means high quick interrupt which need prohibit other interrupts when it is handling interrupt*/
  30.     if (request_irq(dev->irq, de4x5_interrupt, IRQF_SHARED,
  31.          lp->adapter_name, dev)) {
  32.     printk("de4x5_open(): Requested IRQ%d is busy - attemping FAST/SHARE...", dev->irq);
  33.     if (request_irq(dev->irq, de4x5_interrupt, IRQF_DISABLED | IRQF_SHARED,
  34.              lp->adapter_name, dev)) {
  35.      printk("\n Cannot get IRQ- reconfigure your hardware.\n");
  36.      disable_ast(dev);
  37.      de4x5_free_rx_buffs(dev);
  38.      de4x5_free_tx_buffs(dev);
  39.      yawn(dev, SLEEP);
  40.      lp->state = CLOSED;
  41.      return -EAGAIN;
  42.     } else {
  43.      printk("\n Succeeded, but you should reconfigure your hardware to avoid this.\n");
  44.      printk("WARNING: there may be IRQ related problems in heavily loaded systems.\n");
  45.     }
  46.     }

  47.     lp->interrupt = UNMASK_INTERRUPTS;
  48.     dev->trans_start = jiffies;

  49.     START_DE4X5;

  50.     de4x5_setup_intr(dev);

  51.     ......

  52.     return status;
  53. }

以上是驱动和设备的初始化,上面代码包含了主要的一些初始化工作:
1. 总线上驱动注册初始化
2. 驱动探测设备注册初始化
3. 开启设备,初始化接收缓存

下面的步骤是网卡从网上上接收到了数据报文填充到设备的接收缓存中,然后触发了一个注册好的硬中断。

二:驱动中断处理过程


点击(此处)折叠或打开

  1. /* 在dev open中注册的中断处理函数 */
  2. static irqreturn_t
  3. de4x5_interrupt(int irq, void *dev_id)
  4. {
  5.     struct net_device *dev = dev_id;
  6.     struct de4x5_private *lp;
  7.     s32 imr, omr, sts, limit;
  8.     u_long iobase;
  9.     unsigned int handled = 0;

  10.     lp = netdev_priv(dev);
  11.     spin_lock(&lp->lock);
  12.     iobase = dev->base_addr;

  13.     DISABLE_IRQs; /* Ensure non re-entrancy */
  14.     /* private 中的中断位置1 这里应该是类似中断嵌套的 ?*/
  15.     if (test_and_set_bit(MASK_INTERRUPTS, (void*) &lp->interrupt))
  16.     printk("%s: Re-entering the interrupt handler.\n", dev->name);

  17.     synchronize_irq(dev->irq);

  18.     for (limit=0; limit<8; limit++) {
  19.     sts = inl(DE4X5_STS); /* Read IRQ status */
  20.     outl(sts, DE4X5_STS); /* Reset the board interrupts */

  21.     if (!(sts & lp->irq_mask)) break;/* All done */
  22.     handled = 1;
  23.     
  24.     /* 主要关注这里的接收 当收到 RX 中断 */
  25.     if (sts & (STS_RI | STS_RU)) /* Rx interrupt (packet[s] arrived) */
  26.      de4x5_rx(dev);

  27.     if (sts & (STS_TI | STS_TU)) /* Tx interrupt (packet sent) */
  28.      de4x5_tx(dev);

  29.     if (sts & STS_LNF) { /* TP Link has failed */
  30.      lp->irq_mask &= ~IMR_LFM;
  31.     }

  32.     if (sts & STS_UNF) { /* Transmit underrun */
  33.      de4x5_txur(dev);
  34.     }

  35.     ......
  36.     lp->interrupt = UNMASK_INTERRUPTS;
  37.     ENABLE_IRQs;
  38.     spin_unlock(&lp->lock);

  39.     return IRQ_RETVAL(handled);
  40. }
RX 中断接收函数主要是进行rx ring环形缓冲区的遍历,找到接收的skb,并放入cpu的报文等待处理队列

点击(此处)折叠或打开

  1. static int
  2. de4x5_rx(struct net_device *dev)
  3. {
  4.     struct de4x5_private *lp = netdev_priv(dev);
  5.     u_long iobase = dev->base_addr;
  6.     int entry;
  7.     s32 status;
  8.     
  9.     /* 遍历所有rx ring 中entry的status 大于等于0的,其实是遍历所有初始化的,因为初始化的时候都是0 */
  10.     for (entry=lp->rx_new; (s32)le32_to_cpu(lp->rx_ring[entry].status)>=0;
  11.      entry=lp->rx_new) {
  12.     status = (s32)le32_to_cpu(lp->rx_ring[entry].status);

  13.     if (lp->rx_ovf) {
  14.      if (inl(DE4X5_MFC) & MFC_FOCM) {
  15.         de4x5_rx_ovfc(dev);
  16.         break;
  17.      }
  18.     }
  19.     
  20.     /* 记录第一个作为old */
  21.     if (status & RD_FS) { /* Remember the start of frame */
  22.      lp->rx_old = entry;
  23.     }

  24.     if (status & RD_LS) { /* Valid frame status */
  25.      if (lp->tx_enable) lp->linkOK++;
  26.      /* 记录一些错误统计 */
  27.      if (status & RD_ES) {     /* There was an error. */
  28.         lp->stats.rx_errors++; /* Update the error stats. */
  29.         if (status & (RD_RF | RD_TL)) lp->stats.rx_frame_errors++;
  30.         if (status & RD_CE) lp->stats.rx_crc_errors++;
  31.         if (status & RD_OF) lp->stats.rx_fifo_errors++;
  32.         if (status & RD_TL) lp->stats.rx_length_errors++;
  33.         if (status & RD_RF) lp->pktStats.rx_runt_frames++;
  34.         if (status & RD_CS) lp->pktStats.rx_collision++;
  35.         if (status & RD_DB) lp->pktStats.rx_dribble++;
  36.         if (status & RD_OF) lp->pktStats.rx_overflow++;
  37.      } else { /* A valid frame received */
  38.         /* 正确的报文接收到了,计算长度 */
  39.         struct sk_buff *skb;
  40.         short pkt_len = (short)(le32_to_cpu(lp->rx_ring[entry].status)
  41.                      >> 16) - 4;

  42.         /* 如果rx_ring 中entry的status check 合法 则说明接收到了包,而且已经缓存到物理地址了,现在直接申请skb来放入cpu缓存队列
  43.          * 这里申请的skb是用来替换已经接收到的报文的,即此时status大于0则说明这个entry下的rx_skb[entry]是保存了网卡接收的skb
  44.          * 我们需要接收它,然后重新申请一个空的skb来替换它,以供下一次网卡接收 
  45.          */
  46.         if ((skb = de4x5_alloc_rx_buff(dev, entry, pkt_len)) == NULL) {
  47.          printk("%s: Insufficient memory; nuking packet.\n",
  48.              dev->name);
  49.          lp->stats.rx_dropped++;
  50.         } else {
  51.          de4x5_dbg_rx(skb, pkt_len);

  52.          /* Push up the protocol stack */
  53.          skb->protocol=eth_type_trans(skb,dev);
  54.          de4x5_local_stats(dev, skb->data, pkt_len);
  55.          /* 准备放入cpu 报文等待处理队列 */
  56.          netif_rx(skb);

  57.          /* Update stats */
  58.          lp->stats.rx_packets++;
  59.           lp->stats.rx_bytes += pkt_len;
  60.         }
  61.      }

  62.     ......
  63.     /*
  64.     ** Update entry information 计算下一个
  65.     */
  66.     lp->rx_new = (++lp->rx_new) % lp->rxRingSize;
  67.     }

  68.     return 0;
  69. }
netif_rx主要是把接收到的skb放入特定的CPU等待处理队列,并把把backlog 放入poll_list中以便后续轮询。这里便是NAPI机制的关键。其实相对于de4x5驱动的NAPI是没有注册的,即没有支持NAPI机制的。

点击(此处)折叠或打开

  1. int netif_rx(struct sk_buff *skb)
  2. {
  3.     struct softnet_data *queue;
  4.     unsigned long flags;

  5.     /* if netpoll wants it, pretend we never saw it 这里的netpoll是一种在内核没有初始化好时候的一种简单的网络处理机制具体没研究 */
  6.     if (netpoll_rx(skb))
  7.         return NET_RX_DROP;

  8.     if (!skb->tstamp.tv64)
  9.         net_timestamp(skb);

  10.     /*
  11.      * The code is rearranged so that the path is the most
  12.      * short when CPU is congested, but is still operating.
  13.      */
  14.     local_irq_save(flags);/* save the current irq system status flags, and disable local irq */
  15.     /* 得到特定cpu的接收队列 由于没有使用NAPI,所以直接使用softnet的队列 */
  16.     queue = &__get_cpu_var(softnet_data);

  17.     __get_cpu_var(netdev_rx_stat).total++;
  18.     if (queue->input_pkt_queue.qlen <= netdev_max_backlog) {//1000
  19.         /* 第一次进来的时候qlen为0 即会调用下面napi_schedule */
  20.         if (queue->input_pkt_queue.qlen) {
  21. enqueue:
  22.             __skb_queue_tail(&queue->input_pkt_queue, skb);//加入到接收队列
  23.             local_irq_restore(flags);/* 退出中断上下文 */
  24.             return NET_RX_SUCCESS;
  25.         }
  26.         /* 这里起初qlen是为0的,然后通过schedule引发软中断,并且把backlog加入到poll_list中,并触发软中断 */
  27.         napi_schedule(&queue->backlog);
  28.         goto enqueue;
  29.     }

  30.     __get_cpu_var(netdev_rx_stat).dropped++;
  31.     local_irq_restore(flags);

  32.     kfree_skb(skb);
  33.     return NET_RX_DROP;
  34. }
上述步骤主要是把rx_ring中网卡收到的报文copy到CPU的等待处理队列中,然后触发软中断,交给下半部处理。

三:设备软中断处理

net_dev_init是一个内核通用dev模块的初始化过程。由于de4x5没有使用NAPI机制,所以很多直接沿用这个初始化中的处理。

点击(此处)折叠或打开

  1. static int __init net_dev_init(void)
  2. {
  3.     int i, rc = -ENOMEM;

  4.     BUG_ON(!dev_boot_phase);

  5.    ......

  6.     /*
  7.      *    Initialise the packet receive queues.针对每个CPU进行接收队列和backlog的初始化
  8.      */

  9.     for_each_possible_cpu(i) {
  10.         struct softnet_data *queue;

  11.         queue = &per_cpu(softnet_data, i);
  12.         skb_queue_head_init(&queue->input_pkt_queue);
  13.         queue->completion_queue = NULL;
  14.         INIT_LIST_HEAD(&queue->poll_list);

  15.         queue->backlog.poll = process_backlog;//这里没有NAPI的直接掉用这个
  16.         queue->backlog.weight = weight_p;
  17.         queue->backlog.gro_list = NULL;
  18.         queue->backlog.gro_count = 0;
  19.     }

  20.     dev_boot_phase = 0;

  21.    ......

  22.     open_softirq(NET_TX_SOFTIRQ, net_tx_action);/* 注册相应的软中断 */
  23.     open_softirq(NET_RX_SOFTIRQ, net_rx_action);/* RX softirq 注册 */

  24.    ......
  25. out:
  26.     return rc;
  27. }
接着第二步的过程,现在应该是来到了net_rx_action接收软中断的处理程序;

点击(此处)折叠或打开

  1. static void net_rx_action(struct softirq_action *h)
  2. {
  3.     struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
  4.     unsigned long time_limit = jiffies + 2;
  5.     int budget = netdev_budget;
  6.     void *have;

  7.     local_irq_disable();

  8.     while (!list_empty(list)) { //这里不会引起死循环,因为napi_complete会删除已完成的链表 process_backlog中也有调用
  9.         struct napi_struct *n;
  10.         int work, weight;

  11.         /* If softirq window is exhuasted then punt.
  12.          * Allow this to run for 2 jiffies since which will allow
  13.          * an average latency of 1.5/HZ. 这里是两个时间片,即防止在软中断中耗时过长,以前好像是一个时间片
  14.          */
  15.         if (unlikely(budget <= 0 || time_after(jiffies, time_limit)))//份额用完或者是时间到期则会退出
  16.             goto softnet_break;

  17.         local_irq_enable();

  18.         /* Even though interrupts have been re-enabled, this
  19.          * access is safe because interrupts can only add new
  20.          * entries to the tail of this list, and only ->poll()
  21.          * calls can remove this head entry from the list.
  22.          */
  23.         /* 获取poll_list上的实例 napi 即前面加入的*/
  24.         n = list_entry(list->next, struct napi_struct, poll_list);

  25.         have = netpoll_poll_lock(n);

  26.         weight = n->weight;

  27.         /* This NAPI_STATE_SCHED test is for avoiding a race
  28.          * with netpoll's poll_napi(). Only the entity which
  29.          * obtains the lock and sees NAPI_STATE_SCHED set will
  30.          * actually make the ->poll() call. Therefore we avoid
  31.          * accidently calling ->poll() when NAPI is not scheduled.
  32.          */
  33.         work = 0;
  34.         /* 这里就是napi_add的时候会注册的poll轮询函数,当然de4x5是process_backlog */
  35.         if (test_bit(NAPI_STATE_SCHED, &n->state)) {
  36.             work = n->poll(n, weight);
  37.             trace_napi_poll(n);
  38.         }

  39.         WARN_ON_ONCE(work > weight);

  40.         budget -= work;

  41.         local_irq_disable();

  42.         /* Drivers must not modify the NAPI state if they
  43.          * consume the entire weight. In such cases this code
  44.          * still "owns" the NAPI instance and therefore can
  45.          * move the instance around on the list at-will.
  46.          */
  47.         /*达到了权重,处理的最大数 */
  48.         if (unlikely(work == weight)) {
  49.             /* state被clear */
  50.             if (unlikely(napi_disable_pending(n))) {
  51.                 local_irq_enable();
  52.                 napi_complete(n);
  53.                 local_irq_disable();
  54.             } else
  55.                 list_move_tail(&n->poll_list, list);//说明还有报文,放入队尾
  56.         }

  57.         netpoll_poll_unlock(have);
  58.     }
  59. out:
  60.     local_irq_enable();

  61. #ifdef CONFIG_NET_DMA
  62.     /*
  63.      * There may not be any more sk_buffs coming right now, so push
  64.      * any pending DMA copies to hardware
  65.      */
  66.     dma_issue_pending_all();
  67. #endif

  68.     return;

  69. softnet_break:
  70.     __get_cpu_var(netdev_rx_stat).time_squeeze++;
  71.     __raise_softirq_irqoff(NET_RX_SOFTIRQ);
  72.     goto out;
  73. }
napi的poll处理函数,de4x5的为process_backlog。这里如果是NAPI机制的可能是其他的轮询函数,如8139cp  的cp_rx_poll ,可以对比看看。

点击(此处)折叠或打开

  1. static int process_backlog(struct napi_struct *napi, int quota)
  2. {
  3.     int work = 0;
  4.     struct softnet_data *queue = &__get_cpu_var(softnet_data);//取得队列
  5.     unsigned long start_time = jiffies;

  6.     napi->weight = weight_p;
  7.     do {
  8.         struct sk_buff *skb;

  9.         local_irq_disable();
  10.         skb = __skb_dequeue(&queue->input_pkt_queue);//skb出队列,进行处理
  11.         if (!skb) {//如果队列的报文已经没有了,此时会设置napi从poll_list中删除
  12.             __napi_complete(napi);
  13.             local_irq_enable();
  14.             break;
  15.         }
  16.         local_irq_enable();

  17.         netif_receive_skb(skb); //这里即要达到上层的一个处理,根据每个协议注册的处理函数进行适配。
  18.     } while (++work < quota && jiffies == start_time);

  19.     return work;
  20. }

后续的netif_receive_skb就不列了,到达上层的处理已经看得蛮多的了,这次主要是弄清楚底层的一个接受过程。
下面附上NAPI 和非 NAPI的一个区别图:(来源网络)






下次可以再贴代码讲一下。



上一篇:AIX kernel extension 开发中的timer
下一篇:关于copy_from_user函数