From: Steffen Klassert Date: Tue, 15 Apr 2008 10:11:36 +0200 Subject: [PATCH] 3c59x: ethtool ringparam changes all in one. --- drivers/net/3c59x.c | 224 ++++++++++++++++++++++++++++++++------------------- 1 files changed, 141 insertions(+), 83 deletions(-) diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index 6f8e7d4..22ab77c 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c @@ -35,8 +35,10 @@ /* A few values that may be tweaked. */ /* Keep the ring sizes a power of two for efficiency. */ -#define TX_RING_SIZE 16 -#define RX_RING_SIZE 32 +#define TX_MAX_RING_SIZE 512 +#define RX_MAX_RING_SIZE 1024 +#define TX_RING_SIZE 16 +#define RX_RING_SIZE 32 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ /* "Knobs" that adjust features and parameters. */ @@ -596,10 +598,11 @@ struct vortex_private { dma_addr_t rx_ring_dma; dma_addr_t tx_ring_dma; /* The addresses of transmit- and receive-in-place skbuffs. */ - struct sk_buff* rx_skbuff[RX_RING_SIZE]; - struct sk_buff* tx_skbuff[TX_RING_SIZE]; + struct sk_buff* rx_skbuff[RX_MAX_RING_SIZE]; + struct sk_buff* tx_skbuff[TX_MAX_RING_SIZE]; unsigned int cur_rx, cur_tx; /* The next free ring entry */ unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */ + unsigned int rx_pending, tx_pending; /* The actual size of the ring */ struct net_device_stats stats; /* Generic stats */ struct vortex_extra_stats xstats; /* NIC-specific extra stats */ struct sk_buff *tx_skb; /* Packet being eaten by bus master ctrl. */ @@ -803,8 +806,9 @@ static void poll_vortex(struct net_device *dev) static int vortex_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *dev = pci_get_drvdata(pdev); + struct vortex_private *vp = netdev_priv(dev); - if (dev && dev->priv) { + if (dev && vp) { if (netif_running(dev)) { netif_device_detach(dev); vortex_down(dev, 1); @@ -952,6 +956,63 @@ static int __init vortex_eisa_init(void) return vortex_cards_found - orig_cards_found + eisa_found; } +static int vortex_alloc_ring(struct net_device *dev) +{ + struct vortex_private *vp = netdev_priv(dev); + /* Makes sure rings are at least 16 byte aligned. */ + vp->rx_ring = pci_alloc_consistent(VORTEX_PCI(vp), sizeof(struct boom_rx_desc) * vp->rx_pending + + sizeof(struct boom_tx_desc) * vp->tx_pending, + &vp->rx_ring_dma); + + if (!vp->rx_ring) + return -ENOMEM; + + vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + vp->rx_pending); + vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * vp->rx_pending; + + return 0; +} + +static void vortex_free_ring(struct net_device *dev) +{ + int i; + struct vortex_private *vp = netdev_priv(dev); + + if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */ + for (i = 0; i < vp->rx_pending; i++) + if (vp->rx_skbuff[i]) { + pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->rx_ring[i].addr), PKT_BUF_SZ, PCI_DMA_FROMDEVICE); + dev_kfree_skb(vp->rx_skbuff[i]); + vp->rx_skbuff[i] = NULL; + } + } + if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */ + for (i = 0; i < vp->tx_pending; i++) { + if (vp->tx_skbuff[i]) { + struct sk_buff *skb = vp->tx_skbuff[i]; +#if DO_ZEROCOPY + int k; + + for (k=0; k<=skb_shinfo(skb)->nr_frags; k++) + pci_unmap_single(VORTEX_PCI(vp), + le32_to_cpu(vp->tx_ring[i].frag[k].addr), + le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF, + PCI_DMA_TODEVICE); +#else + pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE); +#endif + dev_kfree_skb(skb); + vp->tx_skbuff[i] = NULL; + } + } + } + + if(vp->rx_ring) + pci_free_consistent(VORTEX_PCI(vp), + sizeof(struct boom_rx_desc) * vp->rx_pending + sizeof(struct boom_tx_desc) * vp->tx_pending, + vp->rx_ring, vp->rx_ring_dma); +} + /* returns count (>= 0), or negative on error */ static int __devinit vortex_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) @@ -1083,6 +1144,8 @@ static int __devinit vortex_probe1(struct device *gendev, vp->has_nway = (vci->drv_flags & HAS_NWAY) ? 1 : 0; vp->io_size = vci->io_size; vp->card_idx = card_idx; + vp->tx_pending = TX_RING_SIZE; + vp->rx_pending = RX_RING_SIZE; /* module list only for Compaq device */ if (gendev == NULL) { @@ -1126,17 +1189,6 @@ static int __devinit vortex_probe1(struct device *gendev, vp->mii.phy_id_mask = 0x1f; vp->mii.reg_num_mask = 0x1f; - /* Makes sure rings are at least 16 byte aligned. */ - vp->rx_ring = pci_alloc_consistent(pdev, sizeof(struct boom_rx_desc) * RX_RING_SIZE - + sizeof(struct boom_tx_desc) * TX_RING_SIZE, - &vp->rx_ring_dma); - retval = -ENOMEM; - if (!vp->rx_ring) - goto free_region; - - vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE); - vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE; - /* if we are a PCI driver, we store info in pdev->driver_data * instead of a module list */ if (pdev) @@ -1213,7 +1265,7 @@ static int __devinit vortex_probe1(struct device *gendev, if (!is_valid_ether_addr(dev->dev_addr)) { retval = -EINVAL; printk(KERN_ERR "*** EEPROM MAC address is invalid.\n"); - goto free_ring; /* With every pack */ + goto free_region; /* With every pack */ } EL3WINDOW(2); for (i = 0; i < 6; i++) @@ -1241,7 +1293,7 @@ static int __devinit vortex_probe1(struct device *gendev, vp->cb_fn_base = pci_iomap(pdev, 2, 0); if (!vp->cb_fn_base) { retval = -ENOMEM; - goto free_ring; + goto free_region; } if (print_info) { @@ -1409,12 +1461,6 @@ static int __devinit vortex_probe1(struct device *gendev, if (retval == 0) return 0; -free_ring: - pci_free_consistent(pdev, - sizeof(struct boom_rx_desc) * RX_RING_SIZE - + sizeof(struct boom_tx_desc) * TX_RING_SIZE, - vp->rx_ring, - vp->rx_ring_dma); free_region: if (vp->must_free_region) release_region(dev->base_addr, vci->io_size); @@ -1636,9 +1682,9 @@ vortex_up(struct net_device *dev) if (vp->drv_flags & IS_BOOMERANG) iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); /* Room for a packet. */ /* Clear the Rx, Tx rings. */ - for (i = 0; i < RX_RING_SIZE; i++) /* AKPM: this is done in vortex_open, too */ + for (i = 0; i < vp->rx_pending; i++) /* AKPM: this is done in vortex_open, too */ vp->rx_ring[i].status = 0; - for (i = 0; i < TX_RING_SIZE; i++) + for (i = 0; i < vp->tx_pending; i++) vp->tx_skbuff[i] = NULL; iowrite32(0, ioaddr + DownListPtr); } @@ -1678,17 +1724,22 @@ vortex_open(struct net_device *dev) int i; int retval; + if ((retval = vortex_alloc_ring(dev))) { + printk(KERN_ERR "%s: Could not alloc memory for ring.\n", dev->name); + goto err; + } + /* Use the now-standard shared IRQ implementation. */ if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ? &boomerang_interrupt : &vortex_interrupt, IRQF_SHARED, dev->name, dev))) { printk(KERN_ERR "%s: Could not reserve IRQ %d\n", dev->name, dev->irq); - goto err; + goto err_free_ring; } if (vp->full_bus_master_rx) { /* Boomerang bus master. */ if (vortex_debug > 2) printk(KERN_DEBUG "%s: Filling in the Rx ring.\n", dev->name); - for (i = 0; i < RX_RING_SIZE; i++) { + for (i = 0; i < vp->rx_pending; i++) { struct sk_buff *skb; vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1)); vp->rx_ring[i].status = 0; /* Clear complete bit. */ @@ -1701,7 +1752,7 @@ vortex_open(struct net_device *dev) skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); } - if (i != RX_RING_SIZE) { + if (i != vp->rx_pending) { int j; printk(KERN_EMERG "%s: no memory for rx ring\n", dev->name); for (j = 0; j < i; j++) { @@ -1723,6 +1774,9 @@ vortex_open(struct net_device *dev) err_free_irq: free_irq(dev->irq, dev); +err_free_ring: + pci_free_consistent(VORTEX_PCI(vp), sizeof(struct boom_rx_desc) * vp->rx_pending + sizeof(struct boom_tx_desc) * vp->tx_pending, + vp->rx_ring, vp->rx_ring_dma); err: if (vortex_debug > 1) printk(KERN_ERR "%s: vortex_open() fails: returning %d\n", dev->name, retval); @@ -1879,9 +1933,9 @@ static void vortex_tx_timeout(struct net_device *dev) if (vp->full_bus_master_tx) { printk(KERN_DEBUG "%s: Resetting the Tx ring pointer.\n", dev->name); if (vp->cur_tx - vp->dirty_tx > 0 && ioread32(ioaddr + DownListPtr) == 0) - iowrite32(vp->tx_ring_dma + (vp->dirty_tx % TX_RING_SIZE) * sizeof(struct boom_tx_desc), + iowrite32(vp->tx_ring_dma + (vp->dirty_tx % vp->tx_pending) * sizeof(struct boom_tx_desc), ioaddr + DownListPtr); - if (vp->cur_tx - vp->dirty_tx < TX_RING_SIZE) + if (vp->cur_tx - vp->dirty_tx < vp->tx_pending) netif_wake_queue (dev); if (vp->drv_flags & IS_BOOMERANG) iowrite8(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); @@ -2070,8 +2124,8 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; /* Calculate the next Tx descriptor entry. */ - int entry = vp->cur_tx % TX_RING_SIZE; - struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE]; + int entry = vp->cur_tx % vp->tx_pending; + struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % vp->tx_pending]; unsigned long flags; if (vortex_debug > 6) { @@ -2080,7 +2134,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) dev->name, vp->cur_tx); } - if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) { + if (vp->cur_tx - vp->dirty_tx >= vp->tx_pending) { if (vortex_debug > 0) printk(KERN_WARNING "%s: BUG! Tx Ring full, refusing to send buffer.\n", dev->name); @@ -2138,7 +2192,7 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev) } vp->cur_tx++; - if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) { + if (vp->cur_tx - vp->dirty_tx > vp->tx_pending - 1) { netif_stop_queue (dev); } else { /* Clear previous interrupt enable. */ #if defined(tx_interrupt_mitigation) @@ -2322,7 +2376,7 @@ boomerang_interrupt(int irq, void *dev_id) iowrite16(AckIntr | DownComplete, ioaddr + EL3_CMD); while (vp->cur_tx - dirty_tx > 0) { - int entry = dirty_tx % TX_RING_SIZE; + int entry = dirty_tx % vp->tx_pending; #if 1 /* AKPM: the latter is faster, but cyclone-only */ if (ioread32(ioaddr + DownListPtr) == vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc)) @@ -2354,7 +2408,7 @@ boomerang_interrupt(int irq, void *dev_id) dirty_tx++; } vp->dirty_tx = dirty_tx; - if (vp->cur_tx - dirty_tx <= TX_RING_SIZE - 1) { + if (vp->cur_tx - dirty_tx <= vp->tx_pending - 1) { if (vortex_debug > 6) printk(KERN_DEBUG "boomerang_interrupt: wake queue\n"); netif_wake_queue (dev); @@ -2467,10 +2521,10 @@ static int boomerang_rx(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); - int entry = vp->cur_rx % RX_RING_SIZE; + int entry = vp->cur_rx % vp->rx_pending; void __iomem *ioaddr = vp->ioaddr; int rx_status; - int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx; + int rx_work_limit = vp->dirty_rx + vp->rx_pending - vp->cur_rx; if (vortex_debug > 5) printk(KERN_DEBUG "boomerang_rx(): status %4.4x\n", ioread16(ioaddr+EL3_STATUS)); @@ -2531,12 +2585,12 @@ boomerang_rx(struct net_device *dev) dev->last_rx = jiffies; vp->stats.rx_packets++; } - entry = (++vp->cur_rx) % RX_RING_SIZE; + entry = (++vp->cur_rx) % vp->rx_pending; } /* Refill the Rx ring buffers. */ for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) { struct sk_buff *skb; - entry = vp->dirty_rx % RX_RING_SIZE; + entry = vp->dirty_rx % vp->rx_pending; if (vp->rx_skbuff[entry] == NULL) { skb = dev_alloc_skb(PKT_BUF_SZ); if (skb == NULL) { @@ -2545,7 +2599,7 @@ boomerang_rx(struct net_device *dev) printk(KERN_WARNING "%s: memory shortage\n", dev->name); last_jif = jiffies; } - if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) + if ((vp->cur_rx - vp->dirty_rx) == vp->rx_pending) mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1)); break; /* Bad news! */ } @@ -2571,11 +2625,11 @@ rx_oom_timer(unsigned long arg) struct vortex_private *vp = netdev_priv(dev); spin_lock_irq(&vp->lock); - if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE) /* This test is redundant, but makes me feel good */ + if ((vp->cur_rx - vp->dirty_rx) == vp->rx_pending) /* This test is redundant, but makes me feel good */ boomerang_rx(dev); if (vortex_debug > 1) { printk(KERN_DEBUG "%s: rx_oom_timer %s\n", dev->name, - ((vp->cur_rx - vp->dirty_rx) != RX_RING_SIZE) ? "succeeded" : "retrying"); + ((vp->cur_rx - vp->dirty_rx) != vp->rx_pending) ? "succeeded" : "retrying"); } spin_unlock_irq(&vp->lock); } @@ -2625,7 +2679,6 @@ vortex_close(struct net_device *dev) { struct vortex_private *vp = netdev_priv(dev); void __iomem *ioaddr = vp->ioaddr; - int i; if (netif_device_present(dev)) vortex_down(dev, 1); @@ -2649,35 +2702,7 @@ vortex_close(struct net_device *dev) free_irq(dev->irq, dev); - if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */ - for (i = 0; i < RX_RING_SIZE; i++) - if (vp->rx_skbuff[i]) { - pci_unmap_single( VORTEX_PCI(vp), le32_to_cpu(vp->rx_ring[i].addr), - PKT_BUF_SZ, PCI_DMA_FROMDEVICE); - dev_kfree_skb(vp->rx_skbuff[i]); - vp->rx_skbuff[i] = NULL; - } - } - if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */ - for (i = 0; i < TX_RING_SIZE; i++) { - if (vp->tx_skbuff[i]) { - struct sk_buff *skb = vp->tx_skbuff[i]; -#if DO_ZEROCOPY - int k; - - for (k=0; k<=skb_shinfo(skb)->nr_frags; k++) - pci_unmap_single(VORTEX_PCI(vp), - le32_to_cpu(vp->tx_ring[i].frag[k].addr), - le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF, - PCI_DMA_TODEVICE); -#else - pci_unmap_single(VORTEX_PCI(vp), le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE); -#endif - dev_kfree_skb(skb); - vp->tx_skbuff[i] = NULL; - } - } - } + vortex_free_ring(dev); return 0; } @@ -2695,13 +2720,13 @@ dump_tx_ring(struct net_device *dev) printk(KERN_ERR " Flags; bus-master %d, dirty %d(%d) current %d(%d)\n", vp->full_bus_master_tx, - vp->dirty_tx, vp->dirty_tx % TX_RING_SIZE, - vp->cur_tx, vp->cur_tx % TX_RING_SIZE); + vp->dirty_tx, vp->dirty_tx % vp->tx_pending, + vp->cur_tx, vp->cur_tx % vp->tx_pending); printk(KERN_ERR " Transmit list %8.8x vs. %p.\n", ioread32(ioaddr + DownListPtr), - &vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]); + &vp->tx_ring[vp->dirty_tx % vp->tx_pending]); issue_and_wait(dev, DownStall); - for (i = 0; i < TX_RING_SIZE; i++) { + for (i = 0; i < vp->tx_pending; i++) { printk(KERN_ERR " %d: @%p length %8.8x status %8.8x\n", i, &vp->tx_ring[i], #if DO_ZEROCOPY @@ -2874,6 +2899,42 @@ static void vortex_get_strings(struct net_device *dev, u32 stringset, u8 *data) } } +static void vortex_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) +{ + struct vortex_private *vp = netdev_priv(dev); + + ering->rx_max_pending = RX_MAX_RING_SIZE; + ering->rx_mini_max_pending = 0; + ering->rx_jumbo_max_pending = 0; + ering->tx_max_pending = TX_MAX_RING_SIZE; + + ering->rx_pending = vp->rx_pending; + ering->rx_mini_pending = 0; + ering->rx_jumbo_pending = 0; + ering->tx_pending = vp->tx_pending; +} + +static int vortex_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering) +{ + struct vortex_private *vp = netdev_priv(dev); + int err = 0; + + if (ering->rx_pending > RX_MAX_RING_SIZE || ering->tx_pending > TX_MAX_RING_SIZE || + ering->rx_mini_pending || ering->rx_jumbo_pending) + return -EINVAL; + + if (netif_running(dev)) + vortex_close(dev); + + vp->rx_pending = ering->rx_pending; + vp->tx_pending = ering->tx_pending; + + if (netif_running(dev)) + err = vortex_open(dev); + + return err; +} + static void vortex_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { @@ -2902,6 +2963,8 @@ static const struct ethtool_ops vortex_ethtool_ops = { .set_settings = vortex_set_settings, .get_link = ethtool_op_get_link, .nway_reset = vortex_nway_reset, + .get_ringparam = vortex_get_ringparam, + .set_ringparam = vortex_set_ringparam, }; #ifdef CONFIG_PCI @@ -3161,11 +3224,6 @@ static void __devexit vortex_remove_one(struct pci_dev *pdev) pci_iounmap(VORTEX_PCI(vp), vp->ioaddr); - pci_free_consistent(pdev, - sizeof(struct boom_rx_desc) * RX_RING_SIZE - + sizeof(struct boom_tx_desc) * TX_RING_SIZE, - vp->rx_ring, - vp->rx_ring_dma); if (vp->must_free_region) release_region(dev->base_addr, vp->io_size); free_netdev(dev); -- 1.5.3