e1000e: use GFP_KERNEL allocations at init time

In process and sleep allowed context, favor GFP_KERNEL allocations over
GFP_ATOMIC ones.

-v2: fixed checkpatch.pl warnings

CC: Eric Dumazet <eric.dumazet@gmail.com>
CC: Ben Greear <greearb@candelatech.com>
CC: Bruce Allan <bruce.w.allan@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
Tested-by: Aaron Brown <aaron.f.brown@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
Jeff Kirsher 2011-07-12 16:10:12 +00:00 committed by David S. Miller
parent a3d72d5d01
commit c2fed9965c
2 changed files with 22 additions and 17 deletions

View File

@ -334,7 +334,7 @@ struct e1000_adapter {
int *work_done, int work_to_do)
____cacheline_aligned_in_smp;
void (*alloc_rx_buf) (struct e1000_adapter *adapter,
int cleaned_count);
int cleaned_count, gfp_t gfp);
struct e1000_ring *rx_ring;
u32 rx_int_delay;

View File

@ -523,7 +523,7 @@ static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
* @adapter: address of board private structure
**/
static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
int cleaned_count)
int cleaned_count, gfp_t gfp)
{
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
@ -544,7 +544,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
goto map_skb;
}
skb = netdev_alloc_skb_ip_align(netdev, bufsz);
skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
if (!skb) {
/* Better luck next round */
adapter->alloc_rx_buff_failed++;
@ -589,7 +589,7 @@ map_skb:
* @adapter: address of board private structure
**/
static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
int cleaned_count)
int cleaned_count, gfp_t gfp)
{
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
@ -615,7 +615,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
continue;
}
if (!ps_page->page) {
ps_page->page = alloc_page(GFP_ATOMIC);
ps_page->page = alloc_page(gfp);
if (!ps_page->page) {
adapter->alloc_rx_buff_failed++;
goto no_buffers;
@ -641,8 +641,9 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
cpu_to_le64(ps_page->dma);
}
skb = netdev_alloc_skb_ip_align(netdev,
adapter->rx_ps_bsize0);
skb = __netdev_alloc_skb_ip_align(netdev,
adapter->rx_ps_bsize0,
gfp);
if (!skb) {
adapter->alloc_rx_buff_failed++;
@ -692,7 +693,7 @@ no_buffers:
**/
static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
int cleaned_count)
int cleaned_count, gfp_t gfp)
{
struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
@ -713,7 +714,7 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
goto check_page;
}
skb = netdev_alloc_skb_ip_align(netdev, bufsz);
skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
if (unlikely(!skb)) {
/* Better luck next round */
adapter->alloc_rx_buff_failed++;
@ -724,7 +725,7 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
check_page:
/* allocate a new page if necessary */
if (!buffer_info->page) {
buffer_info->page = alloc_page(GFP_ATOMIC);
buffer_info->page = alloc_page(gfp);
if (unlikely(!buffer_info->page)) {
adapter->alloc_rx_buff_failed++;
break;
@ -888,7 +889,8 @@ next_desc:
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
adapter->alloc_rx_buf(adapter, cleaned_count);
adapter->alloc_rx_buf(adapter, cleaned_count,
GFP_ATOMIC);
cleaned_count = 0;
}
@ -900,7 +902,7 @@ next_desc:
cleaned_count = e1000_desc_unused(rx_ring);
if (cleaned_count)
adapter->alloc_rx_buf(adapter, cleaned_count);
adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC);
adapter->total_rx_bytes += total_rx_bytes;
adapter->total_rx_packets += total_rx_packets;
@ -1230,7 +1232,8 @@ next_desc:
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
adapter->alloc_rx_buf(adapter, cleaned_count);
adapter->alloc_rx_buf(adapter, cleaned_count,
GFP_ATOMIC);
cleaned_count = 0;
}
@ -1244,7 +1247,7 @@ next_desc:
cleaned_count = e1000_desc_unused(rx_ring);
if (cleaned_count)
adapter->alloc_rx_buf(adapter, cleaned_count);
adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC);
adapter->total_rx_bytes += total_rx_bytes;
adapter->total_rx_packets += total_rx_packets;
@ -1411,7 +1414,8 @@ next_desc:
/* return some buffers to hardware, one at a time is too slow */
if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
adapter->alloc_rx_buf(adapter, cleaned_count);
adapter->alloc_rx_buf(adapter, cleaned_count,
GFP_ATOMIC);
cleaned_count = 0;
}
@ -1423,7 +1427,7 @@ next_desc:
cleaned_count = e1000_desc_unused(rx_ring);
if (cleaned_count)
adapter->alloc_rx_buf(adapter, cleaned_count);
adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC);
adapter->total_rx_bytes += total_rx_bytes;
adapter->total_rx_packets += total_rx_packets;
@ -3105,7 +3109,8 @@ static void e1000_configure(struct e1000_adapter *adapter)
e1000_configure_tx(adapter);
e1000_setup_rctl(adapter);
e1000_configure_rx(adapter);
adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring));
adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring),
GFP_KERNEL);
}
/**