View | Details | Raw Unified | Return to bug 5632 | Differences between
and this patch

Collapse All | Expand All

(-)forcedeth_v47.c (-66 / +109 lines)
Lines 348-353 Link Here
348
#define NV_TX2_VALID		(1<<31)
348
#define NV_TX2_VALID		(1<<31)
349
#define NV_TX2_TSO		(1<<28)
349
#define NV_TX2_TSO		(1<<28)
350
#define NV_TX2_TSO_SHIFT	14
350
#define NV_TX2_TSO_SHIFT	14
351
#define NV_TX2_TSO_MAX_SHIFT	14
352
#define NV_TX2_TSO_MAX_SIZE	(1<<NV_TX2_TSO_MAX_SHIFT)
351
#define NV_TX2_CHECKSUM_L3	(1<<27)
353
#define NV_TX2_CHECKSUM_L3	(1<<27)
352
#define NV_TX2_CHECKSUM_L4	(1<<26)
354
#define NV_TX2_CHECKSUM_L4	(1<<26)
353
355
Lines 407-421 Link Here
407
#define NV_WATCHDOG_TIMEO	(5*HZ)
409
#define NV_WATCHDOG_TIMEO	(5*HZ)
408
410
409
#define RX_RING		128
411
#define RX_RING		128
410
#define TX_RING		64
412
#define TX_RING		256
411
/* 
413
/* 
412
 * If your nic mysteriously hangs then try to reduce the limits
414
 * If your nic mysteriously hangs then try to reduce the limits
413
 * to 1/0: It might be required to set NV_TX_LASTPACKET in the
415
 * to 1/0: It might be required to set NV_TX_LASTPACKET in the
414
 * last valid ring entry. But this would be impossible to
416
 * last valid ring entry. But this would be impossible to
415
 * implement - probably a disassembly error.
417
 * implement - probably a disassembly error.
416
 */
418
 */
417
#define TX_LIMIT_STOP	63
419
#define TX_LIMIT_STOP	255
418
#define TX_LIMIT_START	62
420
#define TX_LIMIT_START	254
419
421
420
/* rx/tx mac addr + type + vlan + align + slack*/
422
/* rx/tx mac addr + type + vlan + align + slack*/
421
#define NV_RX_HEADERS		(64)
423
#define NV_RX_HEADERS		(64)
Lines 534-539 Link Here
534
	unsigned int next_tx, nic_tx;
536
	unsigned int next_tx, nic_tx;
535
	struct sk_buff *tx_skbuff[TX_RING];
537
	struct sk_buff *tx_skbuff[TX_RING];
536
	dma_addr_t tx_dma[TX_RING];
538
	dma_addr_t tx_dma[TX_RING];
539
	unsigned int tx_dma_len[TX_RING];
537
	u32 tx_flags;
540
	u32 tx_flags;
538
};
541
};
539
542
Lines 934-939 Link Here
934
	        else
937
	        else
935
			np->tx_ring.ex[i].FlagLen = 0;
938
			np->tx_ring.ex[i].FlagLen = 0;
936
		np->tx_skbuff[i] = NULL;
939
		np->tx_skbuff[i] = NULL;
940
		np->tx_dma[i] = 0;
937
	}
941
	}
938
}
942
}
939
943
Lines 944-973 Link Here
944
	return nv_alloc_rx(dev);
948
	return nv_alloc_rx(dev);
945
}
949
}
946
950
947
static void nv_release_txskb(struct net_device *dev, unsigned int skbnr)
951
static int nv_release_txskb(struct net_device *dev, unsigned int skbnr)
948
{
952
{
949
	struct fe_priv *np = netdev_priv(dev);
953
	struct fe_priv *np = netdev_priv(dev);
950
	struct sk_buff *skb = np->tx_skbuff[skbnr];
954
		
951
	unsigned int j, entry, fragments;
955
	dprintk(KERN_INFO "%s: nv_release_txskb for skbnr %d\n",
952
			
956
		dev->name, skbnr);
953
	dprintk(KERN_INFO "%s: nv_release_txskb for skbnr %d, skb %p\n",
957
954
		dev->name, skbnr, np->tx_skbuff[skbnr]);
958
	if (np->tx_dma[skbnr]) {
955
	
959
		pci_unmap_page(np->pci_dev, np->tx_dma[skbnr],
956
	entry = skbnr;
960
			       np->tx_dma_len[skbnr],
957
	if ((fragments = skb_shinfo(skb)->nr_frags) != 0) {
961
			       PCI_DMA_TODEVICE);
958
		for (j = fragments; j >= 1; j--) {
962
		np->tx_dma[skbnr] = 0;
959
			skb_frag_t *frag = &skb_shinfo(skb)->frags[j-1];
963
	}
960
			pci_unmap_page(np->pci_dev, np->tx_dma[entry],
964
961
				       frag->size,
965
	if (np->tx_skbuff[skbnr]) {
962
				       PCI_DMA_TODEVICE);
966
		dev_kfree_skb_irq(np->tx_skbuff[skbnr]);
963
			entry = (entry - 1) % TX_RING;
967
		np->tx_skbuff[skbnr] = NULL;
964
		}
968
		return 1;
969
	} else {
970
		return 0;
965
	}
971
	}
966
	pci_unmap_single(np->pci_dev, np->tx_dma[entry],
967
			 skb->len - skb->data_len,
968
			 PCI_DMA_TODEVICE);
969
	dev_kfree_skb_irq(skb);
970
	np->tx_skbuff[skbnr] = NULL;
971
}
972
}
972
973
973
static void nv_drain_tx(struct net_device *dev)
974
static void nv_drain_tx(struct net_device *dev)
Lines 980-989 Link Here
980
			np->tx_ring.orig[i].FlagLen = 0;
981
			np->tx_ring.orig[i].FlagLen = 0;
981
		else
982
		else
982
			np->tx_ring.ex[i].FlagLen = 0;
983
			np->tx_ring.ex[i].FlagLen = 0;
983
		if (np->tx_skbuff[i]) {
984
		if (nv_release_txskb(dev, i))
984
			nv_release_txskb(dev, i);
985
			np->stats.tx_dropped++;
985
			np->stats.tx_dropped++;
986
		}
987
	}
986
	}
988
}
987
}
989
988
Lines 1020-1083 Link Here
1020
static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1019
static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
1021
{
1020
{
1022
	struct fe_priv *np = netdev_priv(dev);
1021
	struct fe_priv *np = netdev_priv(dev);
1022
	u32 tx_flags = 0;
1023
	u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
1023
	u32 tx_flags_extra = (np->desc_ver == DESC_VER_1 ? NV_TX_LASTPACKET : NV_TX2_LASTPACKET);
1024
	unsigned int fragments = skb_shinfo(skb)->nr_frags;
1024
	unsigned int fragments = skb_shinfo(skb)->nr_frags;
1025
	unsigned int nr = (np->next_tx + fragments) % TX_RING;
1025
	unsigned int nr = (np->next_tx - 1) % TX_RING;
1026
	unsigned int start_nr = np->next_tx % TX_RING;
1026
	unsigned int i;
1027
	unsigned int i;
1028
	u32 offset = 0;
1029
	u32 bcnt;
1030
	u32 size = skb->len-skb->data_len;
1031
	u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1032
1033
	/* add fragments to entries count */
1034
	for (i = 0; i < fragments; i++) {
1035
		entries += (skb_shinfo(skb)->frags[i].size >> NV_TX2_TSO_MAX_SHIFT) + 
1036
			   ((skb_shinfo(skb)->frags[i].size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
1037
	}
1038
1039
	printk(KERN_INFO "forcedeth: num of entries %d\n", entries);
1027
1040
1028
	spin_lock_irq(&np->lock);
1041
	spin_lock_irq(&np->lock);
1029
1042
1030
	if ((np->next_tx - np->nic_tx + fragments) > TX_LIMIT_STOP) {
1043
	if ((np->next_tx - np->nic_tx + entries - 1) > TX_LIMIT_STOP) {
1031
		spin_unlock_irq(&np->lock);
1044
		spin_unlock_irq(&np->lock);
1032
		netif_stop_queue(dev);
1045
		netif_stop_queue(dev);
1033
		return NETDEV_TX_BUSY;
1046
		return NETDEV_TX_BUSY;
1034
	}
1047
	}
1035
1048
1036
	np->tx_skbuff[nr] = skb;
1049
	do {
1037
	
1050
		bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1038
	if (fragments) {
1051
		nr = (nr + 1) % TX_RING;
1039
		dprintk(KERN_DEBUG "%s: nv_start_xmit: buffer contains %d fragments\n", dev->name, fragments);
1040
		/* setup descriptors in reverse order */
1041
		for (i = fragments; i >= 1; i--) {
1042
			skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
1043
			np->tx_dma[nr] = pci_map_page(np->pci_dev, frag->page, frag->page_offset, frag->size,
1044
							PCI_DMA_TODEVICE);
1045
1052
1046
			if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1053
		np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data + offset, bcnt,
1047
				np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
1054
						PCI_DMA_TODEVICE);
1048
				np->tx_ring.orig[nr].FlagLen = cpu_to_le32( (frag->size-1) | np->tx_flags | tx_flags_extra);
1055
		np->tx_dma_len[nr] = bcnt;
1049
			} else {
1050
				np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
1051
				np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
1052
				np->tx_ring.ex[nr].FlagLen = cpu_to_le32( (frag->size-1) | np->tx_flags | tx_flags_extra);
1053
			}
1054
			
1055
			nr = (nr - 1) % TX_RING;
1056
1056
1057
			if (np->desc_ver == DESC_VER_1)
1057
		if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1058
				tx_flags_extra &= ~NV_TX_LASTPACKET;
1058
			np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
1059
			else
1059
			np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
1060
				tx_flags_extra &= ~NV_TX2_LASTPACKET;		
1060
		} else {
1061
			np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
1062
			np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
1063
			np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
1064
		}
1065
		tx_flags = np->tx_flags;
1066
		offset += bcnt;
1067
		size -= bcnt;
1068
		printk(KERN_INFO "FlagLen Begin %x\n", np->tx_ring.ex[nr].FlagLen);
1069
	} while(size);
1070
1071
	if (fragments) {
1072
		printk(KERN_INFO "%s: nv_start_xmit: buffer contains %d fragments\n", dev->name, fragments);
1073
		for (i = 0; i < fragments; i++) {
1074
			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1075
			u32 size = frag->size;
1076
			offset = 0;
1077
1078
			do {
1079
				bcnt = (size > NV_TX2_TSO_MAX_SIZE) ? NV_TX2_TSO_MAX_SIZE : size;
1080
				nr = (nr + 1) % TX_RING;
1081
			
1082
				np->tx_dma[nr] = pci_map_page(np->pci_dev, frag->page, frag->page_offset+offset, bcnt,
1083
							      PCI_DMA_TODEVICE);
1084
				np->tx_dma_len[nr] = bcnt;
1085
1086
				if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1087
					np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
1088
					np->tx_ring.orig[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
1089
				} else {
1090
					np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
1091
					np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
1092
					np->tx_ring.ex[nr].FlagLen = cpu_to_le32((bcnt-1) | tx_flags);
1093
					printk(KERN_INFO "FlagLen %d %x\n", i, np->tx_ring.ex[nr].FlagLen);
1094
				}
1095
				offset += bcnt;
1096
				size -= bcnt;
1097
			} while (size);
1061
		}
1098
		}
1062
	}
1099
	}
1063
1100
1101
	/* set last fragment flag  */
1102
	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1103
		np->tx_ring.orig[nr].FlagLen |= cpu_to_le32(tx_flags_extra);
1104
	} else {
1105
		np->tx_ring.ex[nr].FlagLen |= cpu_to_le32(tx_flags_extra);
1106
	}
1107
	printk(KERN_INFO "FlagLen Last %x\n", np->tx_ring.ex[nr].FlagLen);
1108
1109
	np->tx_skbuff[nr] = skb;
1110
1064
#ifdef NETIF_F_TSO
1111
#ifdef NETIF_F_TSO
1065
	if (skb_shinfo(skb)->tso_size)
1112
	if (skb_shinfo(skb)->tso_size) {
1066
		tx_flags_extra |= NV_TX2_TSO | (skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT);
1113
		tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->tso_size << NV_TX2_TSO_SHIFT);
1114
	}
1067
	else
1115
	else
1068
#endif
1116
#endif
1069
	tx_flags_extra |= (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
1117
	tx_flags_extra = (skb->ip_summed == CHECKSUM_HW ? (NV_TX2_CHECKSUM_L3|NV_TX2_CHECKSUM_L4) : 0);
1070
1118
1071
	np->tx_dma[nr] = pci_map_single(np->pci_dev, skb->data, skb->len-skb->data_len,
1119
	/* set tx flags */
1072
					PCI_DMA_TODEVICE);
1073
	
1074
	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1120
	if (np->desc_ver == DESC_VER_1 || np->desc_ver == DESC_VER_2) {
1075
		np->tx_ring.orig[nr].PacketBuffer = cpu_to_le32(np->tx_dma[nr]);
1121
		np->tx_ring.orig[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
1076
		np->tx_ring.orig[nr].FlagLen = cpu_to_le32( (skb->len-skb->data_len-1) | np->tx_flags | tx_flags_extra);
1077
	} else {
1122
	} else {
1078
		np->tx_ring.ex[nr].PacketBufferHigh = cpu_to_le64(np->tx_dma[nr]) >> 32;
1123
		np->tx_ring.ex[start_nr].FlagLen |= cpu_to_le32(tx_flags | tx_flags_extra);
1079
		np->tx_ring.ex[nr].PacketBufferLow = cpu_to_le64(np->tx_dma[nr]) & 0x0FFFFFFFF;
1124
		printk(KERN_INFO "FlagLen First %x\n", np->tx_ring.ex[start_nr].FlagLen);
1080
		np->tx_ring.ex[nr].FlagLen = cpu_to_le32( (skb->len-skb->data_len-1) | np->tx_flags | tx_flags_extra);
1081
	}	
1125
	}	
1082
1126
1083
	dprintk(KERN_DEBUG "%s: nv_start_xmit: packet packet %d queued for transmission. tx_flags_extra: %x\n",
1127
	dprintk(KERN_DEBUG "%s: nv_start_xmit: packet packet %d queued for transmission. tx_flags_extra: %x\n",
Lines 1092-1098 Link Here
1092
		dprintk("\n");
1136
		dprintk("\n");
1093
	}
1137
	}
1094
1138
1095
	np->next_tx += 1 + fragments;
1139
	np->next_tx += entries;
1096
1140
1097
	dev->trans_start = jiffies;
1141
	dev->trans_start = jiffies;
1098
	spin_unlock_irq(&np->lock);
1142
	spin_unlock_irq(&np->lock);
Lines 1139-1145 Link Here
1139
					np->stats.tx_packets++;
1183
					np->stats.tx_packets++;
1140
					np->stats.tx_bytes += skb->len;
1184
					np->stats.tx_bytes += skb->len;
1141
				}
1185
				}
1142
				nv_release_txskb(dev, i);
1143
			}
1186
			}
1144
		} else {
1187
		} else {
1145
			if (Flags & NV_TX2_LASTPACKET) {
1188
			if (Flags & NV_TX2_LASTPACKET) {
Lines 1155-1163 Link Here
1155
					np->stats.tx_packets++;
1198
					np->stats.tx_packets++;
1156
					np->stats.tx_bytes += skb->len;
1199
					np->stats.tx_bytes += skb->len;
1157
				}				
1200
				}				
1158
				nv_release_txskb(dev, i);
1159
			}
1201
			}
1160
		}
1202
		}
1203
		nv_release_txskb(dev, i);
1161
		np->nic_tx++;
1204
		np->nic_tx++;
1162
	}
1205
	}
1163
	if (np->next_tx - np->nic_tx < TX_LIMIT_START)
1206
	if (np->next_tx - np->nic_tx < TX_LIMIT_START)

Return to bug 5632