drivers:net: Remove dma_alloc_coherent OOM messages

I believe these error messages are already logged
on allocation failure by warn_alloc_failed and so
get a dump_stack on OOM.

Remove the unnecessary additional error logging.

Around these deletions:

o Alignment neatening.
o Remove unnecessary casts of dma_alloc_coherent.
o Hoist assigns from ifs.

Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Joe Perches 2013-03-14 13:07:21 +00:00 committed by David S. Miller
parent 68c45a2da3
commit d0320f7500
42 changed files with 121 additions and 277 deletions

View File

@ -1464,14 +1464,10 @@ static int greth_of_probe(struct platform_device *ofdev)
} }
/* Allocate TX descriptor ring in coherent memory */ /* Allocate TX descriptor ring in coherent memory */
greth->tx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev, greth->tx_bd_base = dma_alloc_coherent(greth->dev, 1024,
1024,
&greth->tx_bd_base_phys, &greth->tx_bd_base_phys,
GFP_KERNEL); GFP_KERNEL);
if (!greth->tx_bd_base) { if (!greth->tx_bd_base) {
if (netif_msg_probe(greth))
dev_err(&dev->dev, "could not allocate descriptor memory.\n");
err = -ENOMEM; err = -ENOMEM;
goto error3; goto error3;
} }
@ -1479,14 +1475,10 @@ static int greth_of_probe(struct platform_device *ofdev)
memset(greth->tx_bd_base, 0, 1024); memset(greth->tx_bd_base, 0, 1024);
/* Allocate RX descriptor ring in coherent memory */ /* Allocate RX descriptor ring in coherent memory */
greth->rx_bd_base = (struct greth_bd *) dma_alloc_coherent(greth->dev, greth->rx_bd_base = dma_alloc_coherent(greth->dev, 1024,
1024,
&greth->rx_bd_base_phys, &greth->rx_bd_base_phys,
GFP_KERNEL); GFP_KERNEL);
if (!greth->rx_bd_base) { if (!greth->rx_bd_base) {
if (netif_msg_probe(greth))
dev_err(greth->dev, "could not allocate descriptor memory.\n");
err = -ENOMEM; err = -ENOMEM;
goto error4; goto error4;
} }

View File

@ -1373,10 +1373,9 @@ static int sparc_lance_probe_one(struct platform_device *op,
dma_alloc_coherent(&op->dev, dma_alloc_coherent(&op->dev,
sizeof(struct lance_init_block), sizeof(struct lance_init_block),
&lp->init_block_dvma, GFP_ATOMIC); &lp->init_block_dvma, GFP_ATOMIC);
if (!lp->init_block_mem) { if (!lp->init_block_mem)
printk(KERN_ERR "SunLance: Cannot allocate consistent DMA memory.\n");
goto fail; goto fail;
}
lp->pio_buffer = 0; lp->pio_buffer = 0;
lp->init_ring = lance_init_ring_dvma; lp->init_ring = lance_init_ring_dvma;
lp->rx = lance_rx_dvma; lp->rx = lance_rx_dvma;

View File

@ -388,18 +388,14 @@ static int mace_open(struct net_device *dev)
mp->tx_ring = dma_alloc_coherent(mp->device, mp->tx_ring = dma_alloc_coherent(mp->device,
N_TX_RING * MACE_BUFF_SIZE, N_TX_RING * MACE_BUFF_SIZE,
&mp->tx_ring_phys, GFP_KERNEL); &mp->tx_ring_phys, GFP_KERNEL);
if (mp->tx_ring == NULL) { if (mp->tx_ring == NULL)
printk(KERN_ERR "%s: unable to allocate DMA tx buffers\n", dev->name);
goto out1; goto out1;
}
mp->rx_ring = dma_alloc_coherent(mp->device, mp->rx_ring = dma_alloc_coherent(mp->device,
N_RX_RING * MACE_BUFF_SIZE, N_RX_RING * MACE_BUFF_SIZE,
&mp->rx_ring_phys, GFP_KERNEL); &mp->rx_ring_phys, GFP_KERNEL);
if (mp->rx_ring == NULL) { if (mp->rx_ring == NULL)
printk(KERN_ERR "%s: unable to allocate DMA rx buffers\n", dev->name);
goto out2; goto out2;
}
mace_dma_off(dev); mace_dma_off(dev);

View File

@ -864,7 +864,6 @@ static int bcm_enet_open(struct net_device *dev)
size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
if (!p) { if (!p) {
dev_err(kdev, "cannot allocate rx ring %u\n", size);
ret = -ENOMEM; ret = -ENOMEM;
goto out_freeirq_tx; goto out_freeirq_tx;
} }
@ -877,7 +876,6 @@ static int bcm_enet_open(struct net_device *dev)
size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
if (!p) { if (!p) {
dev_err(kdev, "cannot allocate tx ring\n");
ret = -ENOMEM; ret = -ENOMEM;
goto out_free_rx_ring; goto out_free_rx_ring;
} }

View File

@ -47,19 +47,16 @@ static int at91ether_start(struct net_device *dev)
int i; int i;
lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev, lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
MAX_RX_DESCR * sizeof(struct macb_dma_desc), (MAX_RX_DESCR *
sizeof(struct macb_dma_desc)),
&lp->rx_ring_dma, GFP_KERNEL); &lp->rx_ring_dma, GFP_KERNEL);
if (!lp->rx_ring) { if (!lp->rx_ring)
netdev_err(dev, "unable to alloc rx ring DMA buffer\n");
return -ENOMEM; return -ENOMEM;
}
lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev, lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
MAX_RX_DESCR * MAX_RBUFF_SZ, MAX_RX_DESCR * MAX_RBUFF_SZ,
&lp->rx_buffers_dma, GFP_KERNEL); &lp->rx_buffers_dma, GFP_KERNEL);
if (!lp->rx_buffers) { if (!lp->rx_buffers) {
netdev_err(dev, "unable to alloc rx data DMA buffer\n");
dma_free_coherent(&lp->pdev->dev, dma_free_coherent(&lp->pdev->dev,
MAX_RX_DESCR * sizeof(struct macb_dma_desc), MAX_RX_DESCR * sizeof(struct macb_dma_desc),
lp->rx_ring, lp->rx_ring_dma); lp->rx_ring, lp->rx_ring_dma);

View File

@ -236,17 +236,14 @@ static int xircom_probe(struct pci_dev *pdev, const struct pci_device_id *id)
private->rx_buffer = dma_alloc_coherent(d, 8192, private->rx_buffer = dma_alloc_coherent(d, 8192,
&private->rx_dma_handle, &private->rx_dma_handle,
GFP_KERNEL); GFP_KERNEL);
if (private->rx_buffer == NULL) { if (private->rx_buffer == NULL)
pr_err("%s: no memory for rx buffer\n", __func__);
goto rx_buf_fail; goto rx_buf_fail;
}
private->tx_buffer = dma_alloc_coherent(d, 8192, private->tx_buffer = dma_alloc_coherent(d, 8192,
&private->tx_dma_handle, &private->tx_dma_handle,
GFP_KERNEL); GFP_KERNEL);
if (private->tx_buffer == NULL) { if (private->tx_buffer == NULL)
pr_err("%s: no memory for tx buffer\n", __func__);
goto tx_buf_fail; goto tx_buf_fail;
}
SET_NETDEV_DEV(dev, &pdev->dev); SET_NETDEV_DEV(dev, &pdev->dev);

View File

@ -2667,10 +2667,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
cmd.size = sizeof(struct be_cmd_req_set_mac_list); cmd.size = sizeof(struct be_cmd_req_set_mac_list);
cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
&cmd.dma, GFP_KERNEL); &cmd.dma, GFP_KERNEL);
if (!cmd.va) { if (!cmd.va)
dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
return -ENOMEM; return -ENOMEM;
}
spin_lock_bh(&adapter->mcc_lock); spin_lock_bh(&adapter->mcc_lock);

View File

@ -719,10 +719,8 @@ be_test_ddr_dma(struct be_adapter *adapter)
ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size, ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
&ddrdma_cmd.dma, GFP_KERNEL); &ddrdma_cmd.dma, GFP_KERNEL);
if (!ddrdma_cmd.va) { if (!ddrdma_cmd.va)
dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
return -ENOMEM; return -ENOMEM;
}
for (i = 0; i < 2; i++) { for (i = 0; i < 2; i++) {
ret = be_cmd_ddr_dma_test(adapter, pattern[i], ret = be_cmd_ddr_dma_test(adapter, pattern[i],
@ -845,11 +843,8 @@ be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size, eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
&eeprom_cmd.dma, GFP_KERNEL); &eeprom_cmd.dma, GFP_KERNEL);
if (!eeprom_cmd.va) { if (!eeprom_cmd.va)
dev_err(&adapter->pdev->dev,
"Memory allocation failure. Could not read eeprom\n");
return -ENOMEM; return -ENOMEM;
}
status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd); status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd);

View File

@ -3467,8 +3467,6 @@ static int lancer_fw_download(struct be_adapter *adapter,
&flash_cmd.dma, GFP_KERNEL); &flash_cmd.dma, GFP_KERNEL);
if (!flash_cmd.va) { if (!flash_cmd.va) {
status = -ENOMEM; status = -ENOMEM;
dev_err(&adapter->pdev->dev,
"Memory allocation failure while flashing\n");
goto lancer_fw_exit; goto lancer_fw_exit;
} }
@ -3570,8 +3568,6 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
&flash_cmd.dma, GFP_KERNEL); &flash_cmd.dma, GFP_KERNEL);
if (!flash_cmd.va) { if (!flash_cmd.va) {
status = -ENOMEM; status = -ENOMEM;
dev_err(&adapter->pdev->dev,
"Memory allocation failure while flashing\n");
goto be_fw_exit; goto be_fw_exit;
} }

View File

@ -1595,10 +1595,8 @@ static int fec_enet_init(struct net_device *ndev)
/* Allocate memory for buffer descriptors. */ /* Allocate memory for buffer descriptors. */
cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma, cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma,
GFP_KERNEL); GFP_KERNEL);
if (!cbd_base) { if (!cbd_base)
printk("FEC: allocate descriptor memory failed?\n");
return -ENOMEM; return -ENOMEM;
}
spin_lock_init(&fep->hw_lock); spin_lock_init(&fep->hw_lock);

View File

@ -245,14 +245,13 @@ static int gfar_alloc_skb_resources(struct net_device *ndev)
/* Allocate memory for the buffer descriptors */ /* Allocate memory for the buffer descriptors */
vaddr = dma_alloc_coherent(dev, vaddr = dma_alloc_coherent(dev,
sizeof(struct txbd8) * priv->total_tx_ring_size + (priv->total_tx_ring_size *
sizeof(struct rxbd8) * priv->total_rx_ring_size, sizeof(struct txbd8)) +
(priv->total_rx_ring_size *
sizeof(struct rxbd8)),
&addr, GFP_KERNEL); &addr, GFP_KERNEL);
if (!vaddr) { if (!vaddr)
netif_err(priv, ifup, ndev,
"Could not allocate buffer descriptors!\n");
return -ENOMEM; return -ENOMEM;
}
for (i = 0; i < priv->num_tx_queues; i++) { for (i = 0; i < priv->num_tx_queues; i++) {
tx_queue = priv->tx_queue[i]; tx_queue = priv->tx_queue[i];

View File

@ -637,13 +637,9 @@ static int mal_probe(struct platform_device *ofdev)
bd_size = sizeof(struct mal_descriptor) * bd_size = sizeof(struct mal_descriptor) *
(NUM_TX_BUFF * mal->num_tx_chans + (NUM_TX_BUFF * mal->num_tx_chans +
NUM_RX_BUFF * mal->num_rx_chans); NUM_RX_BUFF * mal->num_rx_chans);
mal->bd_virt = mal->bd_virt = dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
dma_alloc_coherent(&ofdev->dev, bd_size, &mal->bd_dma,
GFP_KERNEL); GFP_KERNEL);
if (mal->bd_virt == NULL) { if (mal->bd_virt == NULL) {
printk(KERN_ERR
"mal%d: out of memory allocating RX/TX descriptors!\n",
index);
err = -ENOMEM; err = -ENOMEM;
goto fail_unmap; goto fail_unmap;
} }

View File

@ -558,9 +558,7 @@ static int ibmveth_open(struct net_device *netdev)
adapter->rx_queue.queue_addr = adapter->rx_queue.queue_addr =
dma_alloc_coherent(dev, adapter->rx_queue.queue_len, dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
&adapter->rx_queue.queue_dma, GFP_KERNEL); &adapter->rx_queue.queue_dma, GFP_KERNEL);
if (!adapter->rx_queue.queue_addr) { if (!adapter->rx_queue.queue_addr) {
netdev_err(netdev, "unable to allocate rx queue pages\n");
rc = -ENOMEM; rc = -ENOMEM;
goto err_out; goto err_out;
} }

View File

@ -1516,8 +1516,6 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
if (!txdr->desc) { if (!txdr->desc) {
setup_tx_desc_die: setup_tx_desc_die:
vfree(txdr->buffer_info); vfree(txdr->buffer_info);
e_err(probe, "Unable to allocate memory for the Tx descriptor "
"ring\n");
return -ENOMEM; return -ENOMEM;
} }
@ -1707,10 +1705,7 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma, rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
GFP_KERNEL); GFP_KERNEL);
if (!rxdr->desc) { if (!rxdr->desc) {
e_err(probe, "Unable to allocate memory for the Rx descriptor "
"ring\n");
setup_rx_desc_die: setup_rx_desc_die:
vfree(rxdr->buffer_info); vfree(rxdr->buffer_info);
return -ENOMEM; return -ENOMEM;
@ -1729,8 +1724,6 @@ setup_rx_desc_die:
if (!rxdr->desc) { if (!rxdr->desc) {
dma_free_coherent(&pdev->dev, rxdr->size, olddesc, dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
olddma); olddma);
e_err(probe, "Unable to allocate memory for the Rx "
"descriptor ring\n");
goto setup_rx_desc_die; goto setup_rx_desc_die;
} }

View File

@ -720,8 +720,6 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
GFP_KERNEL); GFP_KERNEL);
if (!txdr->desc) { if (!txdr->desc) {
vfree(txdr->buffer_info); vfree(txdr->buffer_info);
netif_err(adapter, probe, adapter->netdev,
"Unable to allocate transmit descriptor memory\n");
return -ENOMEM; return -ENOMEM;
} }
memset(txdr->desc, 0, txdr->size); memset(txdr->desc, 0, txdr->size);
@ -807,8 +805,6 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
if (!rxdr->desc) { if (!rxdr->desc) {
vfree(rxdr->buffer_info); vfree(rxdr->buffer_info);
netif_err(adapter, probe, adapter->netdev,
"Unable to allocate receive descriptors\n");
return -ENOMEM; return -ENOMEM;
} }
memset(rxdr->desc, 0, rxdr->size); memset(rxdr->desc, 0, rxdr->size);

View File

@ -2423,9 +2423,6 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
&rx_ring->dma, GFP_KERNEL); &rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc) { if (!rx_ring->desc) {
hw_dbg(&adapter->hw,
"Unable to allocate memory for "
"the receive descriptor ring\n");
vfree(rx_ring->rx_buffer_info); vfree(rx_ring->rx_buffer_info);
rx_ring->rx_buffer_info = NULL; rx_ring->rx_buffer_info = NULL;
goto alloc_failed; goto alloc_failed;

View File

@ -1969,13 +1969,8 @@ static int mvneta_rxq_init(struct mvneta_port *pp,
rxq->descs = dma_alloc_coherent(pp->dev->dev.parent, rxq->descs = dma_alloc_coherent(pp->dev->dev.parent,
rxq->size * MVNETA_DESC_ALIGNED_SIZE, rxq->size * MVNETA_DESC_ALIGNED_SIZE,
&rxq->descs_phys, GFP_KERNEL); &rxq->descs_phys, GFP_KERNEL);
if (rxq->descs == NULL) { if (rxq->descs == NULL)
netdev_err(pp->dev,
"rxq=%d: Can't allocate %d bytes for %d RX descr\n",
rxq->id, rxq->size * MVNETA_DESC_ALIGNED_SIZE,
rxq->size);
return -ENOMEM; return -ENOMEM;
}
BUG_ON(rxq->descs != BUG_ON(rxq->descs !=
PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE)); PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE));
@ -2029,13 +2024,8 @@ static int mvneta_txq_init(struct mvneta_port *pp,
txq->descs = dma_alloc_coherent(pp->dev->dev.parent, txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
txq->size * MVNETA_DESC_ALIGNED_SIZE, txq->size * MVNETA_DESC_ALIGNED_SIZE,
&txq->descs_phys, GFP_KERNEL); &txq->descs_phys, GFP_KERNEL);
if (txq->descs == NULL) { if (txq->descs == NULL)
netdev_err(pp->dev,
"txQ=%d: Can't allocate %d bytes for %d TX descr\n",
txq->id, txq->size * MVNETA_DESC_ALIGNED_SIZE,
txq->size);
return -ENOMEM; return -ENOMEM;
}
/* Make sure descriptor address is cache line size aligned */ /* Make sure descriptor address is cache line size aligned */
BUG_ON(txq->descs != BUG_ON(txq->descs !=

View File

@ -1024,11 +1024,9 @@ static int rxq_init(struct net_device *dev)
pep->rx_desc_area_size = size; pep->rx_desc_area_size = size;
pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, pep->p_rx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
&pep->rx_desc_dma, GFP_KERNEL); &pep->rx_desc_dma, GFP_KERNEL);
if (!pep->p_rx_desc_area) { if (!pep->p_rx_desc_area)
printk(KERN_ERR "%s: Cannot alloc RX ring (size %d bytes)\n",
dev->name, size);
goto out; goto out;
}
memset((void *)pep->p_rx_desc_area, 0, size); memset((void *)pep->p_rx_desc_area, 0, size);
/* initialize the next_desc_ptr links in the Rx descriptors ring */ /* initialize the next_desc_ptr links in the Rx descriptors ring */
p_rx_desc = pep->p_rx_desc_area; p_rx_desc = pep->p_rx_desc_area;
@ -1087,11 +1085,8 @@ static int txq_init(struct net_device *dev)
pep->tx_desc_area_size = size; pep->tx_desc_area_size = size;
pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size, pep->p_tx_desc_area = dma_alloc_coherent(pep->dev->dev.parent, size,
&pep->tx_desc_dma, GFP_KERNEL); &pep->tx_desc_dma, GFP_KERNEL);
if (!pep->p_tx_desc_area) { if (!pep->p_tx_desc_area)
printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
dev->name, size);
goto out; goto out;
}
memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size); memset((void *)pep->p_tx_desc_area, 0, pep->tx_desc_area_size);
/* Initialize the next_desc_ptr links in the Tx descriptors ring */ /* Initialize the next_desc_ptr links in the Tx descriptors ring */
p_tx_desc = pep->p_tx_desc_area; p_tx_desc = pep->p_tx_desc_area;

View File

@ -1837,11 +1837,9 @@ int mlx4_cmd_init(struct mlx4_dev *dev)
priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE, priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
&priv->mfunc.vhcr_dma, &priv->mfunc.vhcr_dma,
GFP_KERNEL); GFP_KERNEL);
if (!priv->mfunc.vhcr) { if (!priv->mfunc.vhcr)
mlx4_err(dev, "Couldn't allocate VHCR.\n");
goto err_hcr; goto err_hcr;
} }
}
priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev, priv->cmd.pool = pci_pool_create("mlx4_cmd", dev->pdev,
MLX4_MAILBOX_SIZE, MLX4_MAILBOX_SIZE,

View File

@ -175,13 +175,13 @@ static int sonic_probe1(struct net_device *dev)
/* Allocate the entire chunk of memory for the descriptors. /* Allocate the entire chunk of memory for the descriptors.
Note that this cannot cross a 64K boundary. */ Note that this cannot cross a 64K boundary. */
if ((lp->descriptors = dma_alloc_coherent(lp->device, lp->descriptors = dma_alloc_coherent(lp->device,
SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), SIZEOF_SONIC_DESC *
&lp->descriptors_laddr, GFP_KERNEL)) == NULL) { SONIC_BUS_SCALE(lp->dma_bitmode),
printk(KERN_ERR "%s: couldn't alloc DMA memory for descriptors.\n", &lp->descriptors_laddr,
dev_name(lp->device)); GFP_KERNEL);
if (lp->descriptors == NULL)
goto out; goto out;
}
/* Now set up the pointers to point to the appropriate places */ /* Now set up the pointers to point to the appropriate places */
lp->cda = lp->descriptors; lp->cda = lp->descriptors;

View File

@ -202,13 +202,13 @@ static int macsonic_init(struct net_device *dev)
/* Allocate the entire chunk of memory for the descriptors. /* Allocate the entire chunk of memory for the descriptors.
Note that this cannot cross a 64K boundary. */ Note that this cannot cross a 64K boundary. */
if ((lp->descriptors = dma_alloc_coherent(lp->device, lp->descriptors = dma_alloc_coherent(lp->device,
SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), SIZEOF_SONIC_DESC *
&lp->descriptors_laddr, GFP_KERNEL)) == NULL) { SONIC_BUS_SCALE(lp->dma_bitmode),
printk(KERN_ERR "%s: couldn't alloc DMA memory for descriptors.\n", &lp->descriptors_laddr,
dev_name(lp->device)); GFP_KERNEL);
if (lp->descriptors == NULL)
return -ENOMEM; return -ENOMEM;
}
/* Now set up the pointers to point to the appropriate places */ /* Now set up the pointers to point to the appropriate places */
lp->cda = lp->descriptors; lp->cda = lp->descriptors;

View File

@ -197,14 +197,12 @@ static int __init sonic_probe1(struct net_device *dev)
* We also allocate extra space for a pointer to allow freeing * We also allocate extra space for a pointer to allow freeing
* this structure later on (in xtsonic_cleanup_module()). * this structure later on (in xtsonic_cleanup_module()).
*/ */
lp->descriptors = lp->descriptors = dma_alloc_coherent(lp->device,
dma_alloc_coherent(lp->device, SIZEOF_SONIC_DESC *
SIZEOF_SONIC_DESC * SONIC_BUS_SCALE(lp->dma_bitmode), SONIC_BUS_SCALE(lp->dma_bitmode),
&lp->descriptors_laddr, GFP_KERNEL); &lp->descriptors_laddr,
GFP_KERNEL);
if (lp->descriptors == NULL) { if (lp->descriptors == NULL) {
printk(KERN_ERR "%s: couldn't alloc DMA memory for "
" descriptors.\n", dev_name(lp->device));
err = -ENOMEM; err = -ENOMEM;
goto out; goto out;
} }

View File

@ -287,21 +287,14 @@ static int w90p910_init_desc(struct net_device *dev)
ether = netdev_priv(dev); ether = netdev_priv(dev);
pdev = ether->pdev; pdev = ether->pdev;
ether->tdesc = (struct tran_pdesc *) ether->tdesc = dma_alloc_coherent(&pdev->dev, sizeof(struct tran_pdesc),
dma_alloc_coherent(&pdev->dev, sizeof(struct tran_pdesc),
&ether->tdesc_phys, GFP_KERNEL); &ether->tdesc_phys, GFP_KERNEL);
if (!ether->tdesc)
if (!ether->tdesc) {
dev_err(&pdev->dev, "Failed to allocate memory for tx desc\n");
return -ENOMEM; return -ENOMEM;
}
ether->rdesc = (struct recv_pdesc *) ether->rdesc = dma_alloc_coherent(&pdev->dev, sizeof(struct recv_pdesc),
dma_alloc_coherent(&pdev->dev, sizeof(struct recv_pdesc),
&ether->rdesc_phys, GFP_KERNEL); &ether->rdesc_phys, GFP_KERNEL);
if (!ether->rdesc) { if (!ether->rdesc) {
dev_err(&pdev->dev, "Failed to allocate memory for rx desc\n");
dma_free_coherent(&pdev->dev, sizeof(struct tran_pdesc), dma_free_coherent(&pdev->dev, sizeof(struct tran_pdesc),
ether->tdesc, ether->tdesc_phys); ether->tdesc, ether->tdesc_phys);
return -ENOMEM; return -ENOMEM;

View File

@ -1409,9 +1409,7 @@ static int lpc_eth_drv_probe(struct platform_device *pdev)
dma_alloc_coherent(&pldat->pdev->dev, dma_alloc_coherent(&pldat->pdev->dev,
pldat->dma_buff_size, &dma_handle, pldat->dma_buff_size, &dma_handle,
GFP_KERNEL); GFP_KERNEL);
if (pldat->dma_buff_base_v == NULL) { if (pldat->dma_buff_base_v == NULL) {
dev_err(&pdev->dev, "error getting DMA region.\n");
ret = -ENOMEM; ret = -ENOMEM;
goto err_out_free_irq; goto err_out_free_irq;
} }

View File

@ -1471,10 +1471,9 @@ pch_gbe_alloc_rx_buffers_pool(struct pch_gbe_adapter *adapter,
rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size, rx_ring->rx_buff_pool = dma_alloc_coherent(&pdev->dev, size,
&rx_ring->rx_buff_pool_logic, &rx_ring->rx_buff_pool_logic,
GFP_KERNEL); GFP_KERNEL);
if (!rx_ring->rx_buff_pool) { if (!rx_ring->rx_buff_pool)
pr_err("Unable to allocate memory for the receive pool buffer\n");
return -ENOMEM; return -ENOMEM;
}
memset(rx_ring->rx_buff_pool, 0, size); memset(rx_ring->rx_buff_pool, 0, size);
rx_ring->rx_buff_pool_size = size; rx_ring->rx_buff_pool_size = size;
for (i = 0; i < rx_ring->count; i++) { for (i = 0; i < rx_ring->count; i++) {
@ -1777,7 +1776,6 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
&tx_ring->dma, GFP_KERNEL); &tx_ring->dma, GFP_KERNEL);
if (!tx_ring->desc) { if (!tx_ring->desc) {
vfree(tx_ring->buffer_info); vfree(tx_ring->buffer_info);
pr_err("Unable to allocate memory for the transmit descriptor ring\n");
return -ENOMEM; return -ENOMEM;
} }
memset(tx_ring->desc, 0, tx_ring->size); memset(tx_ring->desc, 0, tx_ring->size);
@ -1821,9 +1819,7 @@ int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc); rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
&rx_ring->dma, GFP_KERNEL); &rx_ring->dma, GFP_KERNEL);
if (!rx_ring->desc) { if (!rx_ring->desc) {
pr_err("Unable to allocate memory for the receive descriptor ring\n");
vfree(rx_ring->buffer_info); vfree(rx_ring->buffer_info);
return -ENOMEM; return -ENOMEM;
} }

View File

@ -532,20 +532,15 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
ptr = (__le32 *)dma_alloc_coherent(&pdev->dev, sizeof(u32), ptr = (__le32 *)dma_alloc_coherent(&pdev->dev, sizeof(u32),
&tx_ring->hw_cons_phys_addr, &tx_ring->hw_cons_phys_addr,
GFP_KERNEL); GFP_KERNEL);
if (ptr == NULL)
if (ptr == NULL) {
dev_err(&pdev->dev, "failed to allocate tx consumer\n");
return -ENOMEM; return -ENOMEM;
}
tx_ring->hw_consumer = ptr; tx_ring->hw_consumer = ptr;
/* cmd desc ring */ /* cmd desc ring */
addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring), addr = dma_alloc_coherent(&pdev->dev, TX_DESC_RINGSIZE(tx_ring),
&tx_ring->phys_addr, &tx_ring->phys_addr,
GFP_KERNEL); GFP_KERNEL);
if (addr == NULL) { if (addr == NULL) {
dev_err(&pdev->dev,
"failed to allocate tx desc ring\n");
err = -ENOMEM; err = -ENOMEM;
goto err_out_free; goto err_out_free;
} }
@ -559,8 +554,6 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
RCV_DESC_RINGSIZE(rds_ring), RCV_DESC_RINGSIZE(rds_ring),
&rds_ring->phys_addr, GFP_KERNEL); &rds_ring->phys_addr, GFP_KERNEL);
if (addr == NULL) { if (addr == NULL) {
dev_err(&pdev->dev,
"failed to allocate rds ring [%d]\n", ring);
err = -ENOMEM; err = -ENOMEM;
goto err_out_free; goto err_out_free;
} }
@ -575,8 +568,6 @@ int qlcnic_alloc_hw_resources(struct qlcnic_adapter *adapter)
STATUS_DESC_RINGSIZE(sds_ring), STATUS_DESC_RINGSIZE(sds_ring),
&sds_ring->phys_addr, GFP_KERNEL); &sds_ring->phys_addr, GFP_KERNEL);
if (addr == NULL) { if (addr == NULL) {
dev_err(&pdev->dev,
"failed to allocate sds ring [%d]\n", ring);
err = -ENOMEM; err = -ENOMEM;
goto err_out_free; goto err_out_free;
} }
@ -950,10 +941,9 @@ int qlcnic_get_port_stats(struct qlcnic_adapter *adapter, const u8 func,
stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
&stats_dma_t, GFP_KERNEL); &stats_dma_t, GFP_KERNEL);
if (!stats_addr) { if (!stats_addr)
dev_err(&adapter->pdev->dev, "Unable to allocate memory\n");
return -ENOMEM; return -ENOMEM;
}
memset(stats_addr, 0, stats_size); memset(stats_addr, 0, stats_size);
arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12; arg1 = func | QLCNIC_STATS_VERSION << 8 | QLCNIC_STATS_PORT << 12;
@ -1004,11 +994,9 @@ int qlcnic_get_mac_stats(struct qlcnic_adapter *adapter,
stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size, stats_addr = dma_alloc_coherent(&adapter->pdev->dev, stats_size,
&stats_dma_t, GFP_KERNEL); &stats_dma_t, GFP_KERNEL);
if (!stats_addr) { if (!stats_addr)
dev_err(&adapter->pdev->dev,
"%s: Unable to allocate memory.\n", __func__);
return -ENOMEM; return -ENOMEM;
}
memset(stats_addr, 0, stats_size); memset(stats_addr, 0, stats_size);
qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_MAC_STATS); qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_MAC_STATS);
cmd.req.arg[1] = stats_size << 16; cmd.req.arg[1] = stats_size << 16;

View File

@ -810,11 +810,8 @@ static int __qlcnic_fw_cmd_get_minidump_temp(struct qlcnic_adapter *adapter,
tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size, tmp_addr = dma_alloc_coherent(&adapter->pdev->dev, temp_size,
&tmp_addr_t, GFP_KERNEL); &tmp_addr_t, GFP_KERNEL);
if (!tmp_addr) { if (!tmp_addr)
dev_err(&adapter->pdev->dev,
"Can't get memory for FW dump template\n");
return -ENOMEM; return -ENOMEM;
}
if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_TEMP_HDR)) { if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_TEMP_HDR)) {
err = -ENOMEM; err = -ENOMEM;

View File

@ -909,10 +909,7 @@ static int sh_eth_ring_init(struct net_device *ndev)
rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring; rx_ringsize = sizeof(struct sh_eth_rxdesc) * mdp->num_rx_ring;
mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma, mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
GFP_KERNEL); GFP_KERNEL);
if (!mdp->rx_ring) { if (!mdp->rx_ring) {
dev_err(&ndev->dev, "Cannot allocate Rx Ring (size %d bytes)\n",
rx_ringsize);
ret = -ENOMEM; ret = -ENOMEM;
goto desc_ring_free; goto desc_ring_free;
} }
@ -924,8 +921,6 @@ static int sh_eth_ring_init(struct net_device *ndev)
mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma, mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
GFP_KERNEL); GFP_KERNEL);
if (!mdp->tx_ring) { if (!mdp->tx_ring) {
dev_err(&ndev->dev, "Cannot allocate Tx Ring (size %d bytes)\n",
tx_ringsize);
ret = -ENOMEM; ret = -ENOMEM;
goto desc_ring_free; goto desc_ring_free;
} }

View File

@ -534,25 +534,17 @@ static void init_dma_desc_rings(struct net_device *dev)
GFP_KERNEL); GFP_KERNEL);
priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *), priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
GFP_KERNEL); GFP_KERNEL);
priv->dma_rx = priv->dma_rx = dma_alloc_coherent(priv->device,
(struct dma_desc *)dma_alloc_coherent(priv->device, rxsize * sizeof(struct dma_desc),
rxsize * &priv->dma_rx_phy, GFP_KERNEL);
sizeof(struct dma_desc),
&priv->dma_rx_phy,
GFP_KERNEL);
priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *), priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
GFP_KERNEL); GFP_KERNEL);
priv->dma_tx = priv->dma_tx = dma_alloc_coherent(priv->device,
(struct dma_desc *)dma_alloc_coherent(priv->device, txsize * sizeof(struct dma_desc),
txsize * &priv->dma_tx_phy, GFP_KERNEL);
sizeof(struct dma_desc),
&priv->dma_tx_phy,
GFP_KERNEL);
if ((priv->dma_rx == NULL) || (priv->dma_tx == NULL)) { if ((priv->dma_rx == NULL) || (priv->dma_tx == NULL))
pr_err("%s:ERROR allocating the DMA Tx/Rx desc\n", __func__);
return; return;
}
DBG(probe, INFO, "stmmac (%s) DMA desc: virt addr (Rx %p, " DBG(probe, INFO, "stmmac (%s) DMA desc: virt addr (Rx %p, "
"Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n", "Tx %p)\n\tDMA phy addr (Rx 0x%08x, Tx 0x%08x)\n",

View File

@ -1169,10 +1169,8 @@ static int bigmac_ether_init(struct platform_device *op,
bp->bmac_block = dma_alloc_coherent(&bp->bigmac_op->dev, bp->bmac_block = dma_alloc_coherent(&bp->bigmac_op->dev,
PAGE_SIZE, PAGE_SIZE,
&bp->bblock_dvma, GFP_ATOMIC); &bp->bblock_dvma, GFP_ATOMIC);
if (bp->bmac_block == NULL || bp->bblock_dvma == 0) { if (bp->bmac_block == NULL || bp->bblock_dvma == 0)
printk(KERN_ERR "BIGMAC: Cannot allocate consistent DMA.\n");
goto fail_and_cleanup; goto fail_and_cleanup;
}
/* Get the board revision of this BigMAC. */ /* Get the board revision of this BigMAC. */
bp->board_rev = of_getintprop_default(bp->bigmac_op->dev.of_node, bp->board_rev = of_getintprop_default(bp->bigmac_op->dev.of_node,

View File

@ -2752,10 +2752,8 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
&hp->hblock_dvma, &hp->hblock_dvma,
GFP_ATOMIC); GFP_ATOMIC);
err = -ENOMEM; err = -ENOMEM;
if (!hp->happy_block) { if (!hp->happy_block)
printk(KERN_ERR "happymeal: Cannot allocate descriptors.\n");
goto err_out_iounmap; goto err_out_iounmap;
}
/* Force check of the link first time we are brought up. */ /* Force check of the link first time we are brought up. */
hp->linkcheck = 0; hp->linkcheck = 0;
@ -3068,14 +3066,11 @@ static int happy_meal_pci_probe(struct pci_dev *pdev,
hp->happy_bursts = DMA_BURSTBITS; hp->happy_bursts = DMA_BURSTBITS;
#endif #endif
hp->happy_block = (struct hmeal_init_block *) hp->happy_block = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &hp->hblock_dvma, GFP_KERNEL); &hp->hblock_dvma, GFP_KERNEL);
err = -ENODEV; err = -ENODEV;
if (!hp->happy_block) { if (!hp->happy_block)
printk(KERN_ERR "happymeal(PCI): Cannot get hme init block.\n");
goto err_out_iounmap; goto err_out_iounmap;
}
hp->linkcheck = 0; hp->linkcheck = 0;
hp->timer_state = asleep; hp->timer_state = asleep;

View File

@ -1310,10 +1310,7 @@ static int tsi108_open(struct net_device *dev)
data->rxring = dma_alloc_coherent(NULL, rxring_size, data->rxring = dma_alloc_coherent(NULL, rxring_size,
&data->rxdma, GFP_KERNEL); &data->rxdma, GFP_KERNEL);
if (!data->rxring) { if (!data->rxring) {
printk(KERN_DEBUG
"TSI108_ETH: failed to allocate memory for rxring!\n");
return -ENOMEM; return -ENOMEM;
} else { } else {
memset(data->rxring, 0, rxring_size); memset(data->rxring, 0, rxring_size);
@ -1321,10 +1318,7 @@ static int tsi108_open(struct net_device *dev)
data->txring = dma_alloc_coherent(NULL, txring_size, data->txring = dma_alloc_coherent(NULL, txring_size,
&data->txdma, GFP_KERNEL); &data->txdma, GFP_KERNEL);
if (!data->txring) { if (!data->txring) {
printk(KERN_DEBUG
"TSI108_ETH: failed to allocate memory for txring!\n");
pci_free_consistent(0, rxring_size, data->rxring, data->rxdma); pci_free_consistent(0, rxring_size, data->rxring, data->rxdma);
return -ENOMEM; return -ENOMEM;
} else { } else {

View File

@ -246,19 +246,14 @@ static int temac_dma_bd_init(struct net_device *ndev)
lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
sizeof(*lp->tx_bd_v) * TX_BD_NUM, sizeof(*lp->tx_bd_v) * TX_BD_NUM,
&lp->tx_bd_p, GFP_KERNEL); &lp->tx_bd_p, GFP_KERNEL);
if (!lp->tx_bd_v) { if (!lp->tx_bd_v)
dev_err(&ndev->dev,
"unable to allocate DMA TX buffer descriptors");
goto out; goto out;
}
lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
sizeof(*lp->rx_bd_v) * RX_BD_NUM, sizeof(*lp->rx_bd_v) * RX_BD_NUM,
&lp->rx_bd_p, GFP_KERNEL); &lp->rx_bd_p, GFP_KERNEL);
if (!lp->rx_bd_v) { if (!lp->rx_bd_v)
dev_err(&ndev->dev,
"unable to allocate DMA RX buffer descriptors");
goto out; goto out;
}
memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM); memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM);
for (i = 0; i < TX_BD_NUM; i++) { for (i = 0; i < TX_BD_NUM; i++) {

View File

@ -205,21 +205,15 @@ static int axienet_dma_bd_init(struct net_device *ndev)
sizeof(*lp->tx_bd_v) * TX_BD_NUM, sizeof(*lp->tx_bd_v) * TX_BD_NUM,
&lp->tx_bd_p, &lp->tx_bd_p,
GFP_KERNEL); GFP_KERNEL);
if (!lp->tx_bd_v) { if (!lp->tx_bd_v)
dev_err(&ndev->dev, "unable to allocate DMA Tx buffer "
"descriptors");
goto out; goto out;
}
lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
sizeof(*lp->rx_bd_v) * RX_BD_NUM, sizeof(*lp->rx_bd_v) * RX_BD_NUM,
&lp->rx_bd_p, &lp->rx_bd_p,
GFP_KERNEL); GFP_KERNEL);
if (!lp->rx_bd_v) { if (!lp->rx_bd_v)
dev_err(&ndev->dev, "unable to allocate DMA Rx buffer "
"descriptors");
goto out; goto out;
}
memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM); memset(lp->tx_bd_v, 0, sizeof(*lp->tx_bd_v) * TX_BD_NUM);
for (i = 0; i < TX_BD_NUM; i++) { for (i = 0; i < TX_BD_NUM; i++) {

View File

@ -1071,11 +1071,9 @@ static int dfx_driver_init(struct net_device *dev, const char *print_name,
bp->kmalloced = top_v = dma_alloc_coherent(bp->bus_dev, alloc_size, bp->kmalloced = top_v = dma_alloc_coherent(bp->bus_dev, alloc_size,
&bp->kmalloced_dma, &bp->kmalloced_dma,
GFP_ATOMIC); GFP_ATOMIC);
if (top_v == NULL) { if (top_v == NULL)
printk("%s: Could not allocate memory for host buffers "
"and structures!\n", print_name);
return DFX_K_FAILURE; return DFX_K_FAILURE;
}
memset(top_v, 0, alloc_size); /* zero out memory before continuing */ memset(top_v, 0, alloc_size); /* zero out memory before continuing */
top_p = bp->kmalloced_dma; /* get physical address of buffer */ top_p = bp->kmalloced_dma; /* get physical address of buffer */

View File

@ -389,7 +389,8 @@ static int bfin_sir_startup(struct bfin_sir_port *port, struct net_device *dev)
set_dma_callback(port->rx_dma_channel, bfin_sir_dma_rx_int, dev); set_dma_callback(port->rx_dma_channel, bfin_sir_dma_rx_int, dev);
set_dma_callback(port->tx_dma_channel, bfin_sir_dma_tx_int, dev); set_dma_callback(port->tx_dma_channel, bfin_sir_dma_tx_int, dev);
port->rx_dma_buf.buf = (unsigned char *)dma_alloc_coherent(NULL, PAGE_SIZE, &dma_handle, GFP_DMA); port->rx_dma_buf.buf = dma_alloc_coherent(NULL, PAGE_SIZE,
&dma_handle, GFP_DMA);
port->rx_dma_buf.head = 0; port->rx_dma_buf.head = 0;
port->rx_dma_buf.tail = 0; port->rx_dma_buf.tail = 0;
port->rx_dma_nrows = 0; port->rx_dma_nrows = 0;

View File

@ -564,20 +564,14 @@ static int smsc_ircc_open(unsigned int fir_base, unsigned int sir_base, u8 dma,
self->rx_buff.head = self->rx_buff.head =
dma_alloc_coherent(NULL, self->rx_buff.truesize, dma_alloc_coherent(NULL, self->rx_buff.truesize,
&self->rx_buff_dma, GFP_KERNEL); &self->rx_buff_dma, GFP_KERNEL);
if (self->rx_buff.head == NULL) { if (self->rx_buff.head == NULL)
IRDA_ERROR("%s, Can't allocate memory for receive buffer!\n",
driver_name);
goto err_out2; goto err_out2;
}
self->tx_buff.head = self->tx_buff.head =
dma_alloc_coherent(NULL, self->tx_buff.truesize, dma_alloc_coherent(NULL, self->tx_buff.truesize,
&self->tx_buff_dma, GFP_KERNEL); &self->tx_buff_dma, GFP_KERNEL);
if (self->tx_buff.head == NULL) { if (self->tx_buff.head == NULL)
IRDA_ERROR("%s, Can't allocate memory for transmit buffer!\n",
driver_name);
goto err_out3; goto err_out3;
}
memset(self->rx_buff.head, 0, self->rx_buff.truesize); memset(self->rx_buff.head, 0, self->rx_buff.truesize);
memset(self->tx_buff.head, 0, self->tx_buff.truesize); memset(self->tx_buff.head, 0, self->tx_buff.truesize);

View File

@ -83,8 +83,6 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
*/ */
vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL); vring->va = dma_alloc_coherent(dev, sz, &vring->pa, GFP_KERNEL);
if (!vring->va) { if (!vring->va) {
wil_err(wil, "vring_alloc [%d] failed to alloc DMA mem\n",
vring->size);
kfree(vring->ctx); kfree(vring->ctx);
vring->ctx = NULL; vring->ctx = NULL;
return -ENOMEM; return -ENOMEM;

View File

@ -335,11 +335,8 @@ static int alloc_ringmemory(struct b43legacy_dmaring *ring)
B43legacy_DMA_RINGMEMSIZE, B43legacy_DMA_RINGMEMSIZE,
&(ring->dmabase), &(ring->dmabase),
GFP_KERNEL); GFP_KERNEL);
if (!ring->descbase) { if (!ring->descbase)
b43legacyerr(ring->dev->wl, "DMA ringmemory allocation"
" failed\n");
return -ENOMEM; return -ENOMEM;
}
memset(ring->descbase, 0, B43legacy_DMA_RINGMEMSIZE); memset(ring->descbase, 0, B43legacy_DMA_RINGMEMSIZE);
return 0; return 0;

View File

@ -2379,10 +2379,8 @@ il3945_hw_set_hw_params(struct il_priv *il)
il->_3945.shared_virt = il->_3945.shared_virt =
dma_alloc_coherent(&il->pci_dev->dev, sizeof(struct il3945_shared), dma_alloc_coherent(&il->pci_dev->dev, sizeof(struct il3945_shared),
&il->_3945.shared_phys, GFP_KERNEL); &il->_3945.shared_phys, GFP_KERNEL);
if (!il->_3945.shared_virt) { if (!il->_3945.shared_virt)
IL_ERR("failed to allocate pci memory\n");
return -ENOMEM; return -ENOMEM;
}
il->hw_params.bcast_id = IL3945_BROADCAST_ID; il->hw_params.bcast_id = IL3945_BROADCAST_ID;

View File

@ -2941,10 +2941,9 @@ il_tx_queue_alloc(struct il_priv *il, struct il_tx_queue *txq, u32 id)
* shared with device */ * shared with device */
txq->tfds = txq->tfds =
dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL); dma_alloc_coherent(dev, tfd_sz, &txq->q.dma_addr, GFP_KERNEL);
if (!txq->tfds) { if (!txq->tfds)
IL_ERR("Fail to alloc TFDs\n");
goto error; goto error;
}
txq->q.id = id; txq->q.id = id;
return 0; return 0;

View File

@ -501,10 +501,8 @@ static int iwl_pcie_txq_alloc(struct iwl_trans *trans,
* shared with device */ * shared with device */
txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
&txq->q.dma_addr, GFP_KERNEL); &txq->q.dma_addr, GFP_KERNEL);
if (!txq->tfds) { if (!txq->tfds)
IWL_ERR(trans, "dma_alloc_coherent(%zd) failed\n", tfd_sz);
goto error; goto error;
}
BUILD_BUG_ON(IWL_HCMD_SCRATCHBUF_SIZE != sizeof(*txq->scratchbufs)); BUILD_BUG_ON(IWL_HCMD_SCRATCHBUF_SIZE != sizeof(*txq->scratchbufs));
BUILD_BUG_ON(offsetof(struct iwl_pcie_txq_scratch_buf, scratch) != BUILD_BUG_ON(offsetof(struct iwl_pcie_txq_scratch_buf, scratch) !=