bnx2: Use kmalloc_size_roundup() to match ksize() usage
Round up allocations with kmalloc_size_roundup() so that build_skb()'s use of ksize() is always accurate and no special handling of the memory is needed by KASAN, UBSAN_BOUNDS, nor FORTIFY_SOURCE. Cc: Rasesh Mody <rmody@marvell.com> Cc: GR-Linux-NIC-Dev@marvell.com Cc: "David S. Miller" <davem@davemloft.net> Cc: Eric Dumazet <edumazet@google.com> Cc: Jakub Kicinski <kuba@kernel.org> Cc: Paolo Abeni <pabeni@redhat.com> Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook <keescook@chromium.org> Link: https://lore.kernel.org/r/20221022021004.gonna.489-kees@kernel.org Signed-off-by: Paolo Abeni <pabeni@redhat.com>
This commit is contained in:
parent
6459838af0
commit
d6dd508080
@ -5415,8 +5415,9 @@ bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
|
||||
|
||||
bp->rx_buf_use_size = rx_size;
|
||||
/* hw alignment + build_skb() overhead*/
|
||||
bp->rx_buf_size = SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
|
||||
NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
|
||||
bp->rx_buf_size = kmalloc_size_roundup(
|
||||
SKB_DATA_ALIGN(bp->rx_buf_use_size + BNX2_RX_ALIGN) +
|
||||
NET_SKB_PAD + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
|
||||
bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
|
||||
bp->rx_ring_size = size;
|
||||
bp->rx_max_ring = bnx2_find_max_ring(size, BNX2_MAX_RX_RINGS);
|
||||
|
Loading…
Reference in New Issue
Block a user