drivers: net: use skb_headlen()

replaces (skb->len - skb->data_len) occurrences by skb_headlen(skb)

Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Eric Dumazet 2010-04-14 15:59:40 -07:00 committed by David S. Miller
parent b4bf665c57
commit e743d31312
17 changed files with 33 additions and 33 deletions

View File

@ -1131,7 +1131,7 @@ DPRINTK("doing direct send\n"); /* @@@ well, this doesn't work anyway */
if (i == -1) if (i == -1)
put_dma(tx->index,eni_dev->dma,&j,(unsigned long) put_dma(tx->index,eni_dev->dma,&j,(unsigned long)
skb->data, skb->data,
skb->len - skb->data_len); skb_headlen(skb));
else else
put_dma(tx->index,eni_dev->dma,&j,(unsigned long) put_dma(tx->index,eni_dev->dma,&j,(unsigned long)
skb_shinfo(skb)->frags[i].page + skb_shinfo(skb)->frags[i].page_offset, skb_shinfo(skb)->frags[i].page + skb_shinfo(skb)->frags[i].page_offset,

View File

@ -2664,8 +2664,8 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
#ifdef USE_SCATTERGATHER #ifdef USE_SCATTERGATHER
tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data, tpd->iovec[slot].addr = pci_map_single(he_dev->pci_dev, skb->data,
skb->len - skb->data_len, PCI_DMA_TODEVICE); skb_headlen(skb), PCI_DMA_TODEVICE);
tpd->iovec[slot].len = skb->len - skb->data_len; tpd->iovec[slot].len = skb_headlen(skb);
++slot; ++slot;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {

View File

@ -2129,8 +2129,8 @@ boomerang_start_xmit(struct sk_buff *skb, struct net_device *dev)
int i; int i;
vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data,
skb->len-skb->data_len, PCI_DMA_TODEVICE)); skb_headlen(skb), PCI_DMA_TODEVICE));
vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len-skb->data_len); vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb_headlen(skb));
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; skb_frag_t *frag = &skb_shinfo(skb)->frags[i];

View File

@ -1679,7 +1679,7 @@ static void atl1e_tx_map(struct atl1e_adapter *adapter,
{ {
struct atl1e_tpd_desc *use_tpd = NULL; struct atl1e_tpd_desc *use_tpd = NULL;
struct atl1e_tx_buffer *tx_buffer = NULL; struct atl1e_tx_buffer *tx_buffer = NULL;
u16 buf_len = skb->len - skb->data_len; u16 buf_len = skb_headlen(skb);
u16 map_len = 0; u16 map_len = 0;
u16 mapped_len = 0; u16 mapped_len = 0;
u16 hdr_len = 0; u16 hdr_len = 0;

View File

@ -2347,7 +2347,7 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
{ {
struct atl1_adapter *adapter = netdev_priv(netdev); struct atl1_adapter *adapter = netdev_priv(netdev);
struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring;
int len = skb->len; int len;
int tso; int tso;
int count = 1; int count = 1;
int ret_val; int ret_val;
@ -2359,7 +2359,7 @@ static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb,
unsigned int f; unsigned int f;
unsigned int proto_hdr_len; unsigned int proto_hdr_len;
len -= skb->data_len; len = skb_headlen(skb);
if (unlikely(skb->len <= 0)) { if (unlikely(skb->len <= 0)) {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);

View File

@ -432,7 +432,7 @@ static int make_tx_wrbs(struct be_adapter *adapter,
map_head = txq->head; map_head = txq->head;
if (skb->len > skb->data_len) { if (skb->len > skb->data_len) {
int len = skb->len - skb->data_len; int len = skb_headlen(skb);
busaddr = pci_map_single(pdev, skb->data, len, busaddr = pci_map_single(pdev, skb->data, len,
PCI_DMA_TODEVICE); PCI_DMA_TODEVICE);
if (pci_dma_mapping_error(pdev, busaddr)) if (pci_dma_mapping_error(pdev, busaddr))
@ -1098,7 +1098,7 @@ static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index)
cur_index = txq->tail; cur_index = txq->tail;
wrb = queue_tail_node(txq); wrb = queue_tail_node(txq);
unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr && unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr &&
sent_skb->len > sent_skb->data_len)); skb_headlen(sent_skb)));
unmap_skb_hdr = false; unmap_skb_hdr = false;
num_wrbs++; num_wrbs++;

View File

@ -1123,7 +1123,7 @@ static inline unsigned int compute_large_page_tx_descs(struct sk_buff *skb)
if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) { if (PAGE_SIZE > SGE_TX_DESC_MAX_PLEN) {
unsigned int nfrags = skb_shinfo(skb)->nr_frags; unsigned int nfrags = skb_shinfo(skb)->nr_frags;
unsigned int i, len = skb->len - skb->data_len; unsigned int i, len = skb_headlen(skb);
while (len > SGE_TX_DESC_MAX_PLEN) { while (len > SGE_TX_DESC_MAX_PLEN) {
count++; count++;
len -= SGE_TX_DESC_MAX_PLEN; len -= SGE_TX_DESC_MAX_PLEN;
@ -1219,10 +1219,10 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
ce = &q->centries[pidx]; ce = &q->centries[pidx];
mapping = pci_map_single(adapter->pdev, skb->data, mapping = pci_map_single(adapter->pdev, skb->data,
skb->len - skb->data_len, PCI_DMA_TODEVICE); skb_headlen(skb), PCI_DMA_TODEVICE);
desc_mapping = mapping; desc_mapping = mapping;
desc_len = skb->len - skb->data_len; desc_len = skb_headlen(skb);
flags = F_CMD_DATAVALID | F_CMD_SOP | flags = F_CMD_DATAVALID | F_CMD_SOP |
V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) | V_CMD_EOP(nfrags == 0 && desc_len <= SGE_TX_DESC_MAX_PLEN) |
@ -1258,7 +1258,7 @@ static inline void write_tx_descs(struct adapter *adapter, struct sk_buff *skb,
ce->skb = NULL; ce->skb = NULL;
dma_unmap_addr_set(ce, dma_addr, mapping); dma_unmap_addr_set(ce, dma_addr, mapping);
dma_unmap_len_set(ce, dma_len, skb->len - skb->data_len); dma_unmap_len_set(ce, dma_len, skb_headlen(skb));
for (i = 0; nfrags--; i++) { for (i = 0; nfrags--; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; skb_frag_t *frag = &skb_shinfo(skb)->frags[i];

View File

@ -2929,7 +2929,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD; unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
unsigned int tx_flags = 0; unsigned int tx_flags = 0;
unsigned int len = skb->len - skb->data_len; unsigned int len = skb_headlen(skb);
unsigned int nr_frags; unsigned int nr_frags;
unsigned int mss; unsigned int mss;
int count = 0; int count = 0;
@ -2980,7 +2980,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
len = skb->len - skb->data_len; len = skb_headlen(skb);
break; break;
default: default:
/* do nothing */ /* do nothing */

View File

@ -4132,7 +4132,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
unsigned int max_per_txd = E1000_MAX_PER_TXD; unsigned int max_per_txd = E1000_MAX_PER_TXD;
unsigned int max_txd_pwr = E1000_MAX_TXD_PWR; unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
unsigned int tx_flags = 0; unsigned int tx_flags = 0;
unsigned int len = skb->len - skb->data_len; unsigned int len = skb_headlen(skb);
unsigned int nr_frags; unsigned int nr_frags;
unsigned int mss; unsigned int mss;
int count = 0; int count = 0;
@ -4182,7 +4182,7 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
len = skb->len - skb->data_len; len = skb_headlen(skb);
} }
} }

View File

@ -1618,7 +1618,7 @@ static void write_swqe2_TSO(struct sk_buff *skb,
{ {
struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry; struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0]; u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
int skb_data_size = skb->len - skb->data_len; int skb_data_size = skb_headlen(skb);
int headersize; int headersize;
/* Packet is TCP with TSO enabled */ /* Packet is TCP with TSO enabled */
@ -1629,7 +1629,7 @@ static void write_swqe2_TSO(struct sk_buff *skb,
*/ */
headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb); headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
skb_data_size = skb->len - skb->data_len; skb_data_size = skb_headlen(skb);
if (skb_data_size >= headersize) { if (skb_data_size >= headersize) {
/* copy immediate data */ /* copy immediate data */
@ -1651,7 +1651,7 @@ static void write_swqe2_TSO(struct sk_buff *skb,
static void write_swqe2_nonTSO(struct sk_buff *skb, static void write_swqe2_nonTSO(struct sk_buff *skb,
struct ehea_swqe *swqe, u32 lkey) struct ehea_swqe *swqe, u32 lkey)
{ {
int skb_data_size = skb->len - skb->data_len; int skb_data_size = skb_headlen(skb);
u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0]; u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry; struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
@ -2108,8 +2108,8 @@ static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
} else { } else {
/* first copy data from the skb->data buffer ... */ /* first copy data from the skb->data buffer ... */
skb_copy_from_linear_data(skb, imm_data, skb_copy_from_linear_data(skb, imm_data,
skb->len - skb->data_len); skb_headlen(skb));
imm_data += skb->len - skb->data_len; imm_data += skb_headlen(skb);
/* ... then copy data from the fragments */ /* ... then copy data from the fragments */
for (i = 0; i < nfrags; i++) { for (i = 0; i < nfrags; i++) {

View File

@ -2148,7 +2148,7 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
unsigned int i; unsigned int i;
u32 offset = 0; u32 offset = 0;
u32 bcnt; u32 bcnt;
u32 size = skb->len-skb->data_len; u32 size = skb_headlen(skb);
u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
u32 empty_slots; u32 empty_slots;
struct ring_desc* put_tx; struct ring_desc* put_tx;
@ -2269,7 +2269,7 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
unsigned int i; unsigned int i;
u32 offset = 0; u32 offset = 0;
u32 bcnt; u32 bcnt;
u32 size = skb->len-skb->data_len; u32 size = skb_headlen(skb);
u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
u32 empty_slots; u32 empty_slots;
struct ring_desc_ex* put_tx; struct ring_desc_ex* put_tx;

View File

@ -604,7 +604,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
* packets not getting split correctly * packets not getting split correctly
*/ */
if (staterr & IXGBE_RXD_STAT_LB) { if (staterr & IXGBE_RXD_STAT_LB) {
u32 header_fixup_len = skb->len - skb->data_len; u32 header_fixup_len = skb_headlen(skb);
if (header_fixup_len < 14) if (header_fixup_len < 14)
skb_push(skb, header_fixup_len); skb_push(skb, header_fixup_len);
} }

View File

@ -4684,7 +4684,7 @@ static void send_packet(struct sk_buff *skb, struct net_device *dev)
int frag; int frag;
skb_frag_t *this_frag; skb_frag_t *this_frag;
dma_buf->len = skb->len - skb->data_len; dma_buf->len = skb_headlen(skb);
dma_buf->dma = pci_map_single( dma_buf->dma = pci_map_single(
hw_priv->pdev, skb->data, dma_buf->len, hw_priv->pdev, skb->data, dma_buf->len,

View File

@ -2757,7 +2757,7 @@ again:
} }
/* map the skb for DMA */ /* map the skb for DMA */
len = skb->len - skb->data_len; len = skb_headlen(skb);
idx = tx->req & tx->mask; idx = tx->req & tx->mask;
tx->info[idx].skb = skb; tx->info[idx].skb = skb;
bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE); bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE);

View File

@ -2400,7 +2400,7 @@ static struct sk_buff *s2io_txdl_getskb(struct fifo_info *fifo_data,
return NULL; return NULL;
} }
pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer, pci_unmap_single(nic->pdev, (dma_addr_t)txds->Buffer_Pointer,
skb->len - skb->data_len, PCI_DMA_TODEVICE); skb_headlen(skb), PCI_DMA_TODEVICE);
frg_cnt = skb_shinfo(skb)->nr_frags; frg_cnt = skb_shinfo(skb)->nr_frags;
if (frg_cnt) { if (frg_cnt) {
txds++; txds++;
@ -4202,7 +4202,7 @@ static netdev_tx_t s2io_xmit(struct sk_buff *skb, struct net_device *dev)
txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag); txdp->Control_2 |= TXD_VLAN_TAG(vlan_tag);
} }
frg_len = skb->len - skb->data_len; frg_len = skb_headlen(skb);
if (offload_type == SKB_GSO_UDP) { if (offload_type == SKB_GSO_UDP) {
int ufo_size; int ufo_size;

View File

@ -1508,7 +1508,7 @@ bdx_tx_map_skb(struct bdx_priv *priv, struct sk_buff *skb,
int nr_frags = skb_shinfo(skb)->nr_frags; int nr_frags = skb_shinfo(skb)->nr_frags;
int i; int i;
db->wptr->len = skb->len - skb->data_len; db->wptr->len = skb_headlen(skb);
db->wptr->addr.dma = pci_map_single(priv->pdev, skb->data, db->wptr->addr.dma = pci_map_single(priv->pdev, skb->data,
db->wptr->len, PCI_DMA_TODEVICE); db->wptr->len, PCI_DMA_TODEVICE);
pbl->len = CPU_CHIP_SWAP32(db->wptr->len); pbl->len = CPU_CHIP_SWAP32(db->wptr->len);

View File

@ -704,8 +704,8 @@ static int tsi108_send_packet(struct sk_buff * skb, struct net_device *dev)
if (i == 0) { if (i == 0) {
data->txring[tx].buf0 = dma_map_single(NULL, skb->data, data->txring[tx].buf0 = dma_map_single(NULL, skb->data,
skb->len - skb->data_len, DMA_TO_DEVICE); skb_headlen(skb), DMA_TO_DEVICE);
data->txring[tx].len = skb->len - skb->data_len; data->txring[tx].len = skb_headlen(skb);
misc |= TSI108_TX_SOF; misc |= TSI108_TX_SOF;
} else { } else {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];