Merge branch 'net-ipa-small-collected-improvements'
Alex Elder says: ==================== net: ipa: small collected improvements This series contains a somewhat unrelated set of changes, some inspired by some recent work posted for back-port. For the most part they're meant to improve the code without changing it's functionality. Each basically stands on its own. ==================== Link: https://lore.kernel.org/r/20211124202511.862588-1-elder@linaro.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
342e5f9fc7
@ -339,10 +339,10 @@ static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset)
|
||||
* completion to be signaled. Returns true if the command completes
|
||||
* or false if it times out.
|
||||
*/
|
||||
static bool
|
||||
gsi_command(struct gsi *gsi, u32 reg, u32 val, struct completion *completion)
|
||||
static bool gsi_command(struct gsi *gsi, u32 reg, u32 val)
|
||||
{
|
||||
unsigned long timeout = msecs_to_jiffies(GSI_CMD_TIMEOUT);
|
||||
struct completion *completion = &gsi->completion;
|
||||
|
||||
reinit_completion(completion);
|
||||
|
||||
@ -366,8 +366,6 @@ gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
|
||||
static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
|
||||
enum gsi_evt_cmd_opcode opcode)
|
||||
{
|
||||
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
|
||||
struct completion *completion = &evt_ring->completion;
|
||||
struct device *dev = gsi->dev;
|
||||
bool timeout;
|
||||
u32 val;
|
||||
@ -378,7 +376,7 @@ static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
|
||||
val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
|
||||
val |= u32_encode_bits(opcode, EV_OPCODE_FMASK);
|
||||
|
||||
timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion);
|
||||
timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val);
|
||||
|
||||
gsi_irq_ev_ctrl_disable(gsi);
|
||||
|
||||
@ -478,7 +476,6 @@ static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
|
||||
static void
|
||||
gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
|
||||
{
|
||||
struct completion *completion = &channel->completion;
|
||||
u32 channel_id = gsi_channel_id(channel);
|
||||
struct gsi *gsi = channel->gsi;
|
||||
struct device *dev = gsi->dev;
|
||||
@ -490,7 +487,7 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
|
||||
|
||||
val = u32_encode_bits(channel_id, CH_CHID_FMASK);
|
||||
val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
|
||||
timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion);
|
||||
timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val);
|
||||
|
||||
gsi_irq_ch_ctrl_disable(gsi);
|
||||
|
||||
@ -1074,13 +1071,10 @@ static void gsi_isr_chan_ctrl(struct gsi *gsi)
|
||||
|
||||
while (channel_mask) {
|
||||
u32 channel_id = __ffs(channel_mask);
|
||||
struct gsi_channel *channel;
|
||||
|
||||
channel_mask ^= BIT(channel_id);
|
||||
|
||||
channel = &gsi->channel[channel_id];
|
||||
|
||||
complete(&channel->completion);
|
||||
complete(&gsi->completion);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1094,13 +1088,10 @@ static void gsi_isr_evt_ctrl(struct gsi *gsi)
|
||||
|
||||
while (event_mask) {
|
||||
u32 evt_ring_id = __ffs(event_mask);
|
||||
struct gsi_evt_ring *evt_ring;
|
||||
|
||||
event_mask ^= BIT(evt_ring_id);
|
||||
|
||||
evt_ring = &gsi->evt_ring[evt_ring_id];
|
||||
|
||||
complete(&evt_ring->completion);
|
||||
complete(&gsi->completion);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1110,7 +1101,7 @@ gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code)
|
||||
{
|
||||
if (code == GSI_OUT_OF_RESOURCES) {
|
||||
dev_err(gsi->dev, "channel %u out of resources\n", channel_id);
|
||||
complete(&gsi->channel[channel_id].completion);
|
||||
complete(&gsi->completion);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1127,7 +1118,7 @@ gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code)
|
||||
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
|
||||
u32 channel_id = gsi_channel_id(evt_ring->channel);
|
||||
|
||||
complete(&evt_ring->completion);
|
||||
complete(&gsi->completion);
|
||||
dev_err(gsi->dev, "evt_ring for channel %u out of resources\n",
|
||||
channel_id);
|
||||
return;
|
||||
@ -1651,7 +1642,6 @@ static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id)
|
||||
static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
|
||||
enum gsi_generic_cmd_opcode opcode)
|
||||
{
|
||||
struct completion *completion = &gsi->completion;
|
||||
bool timeout;
|
||||
u32 val;
|
||||
|
||||
@ -1675,7 +1665,7 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
|
||||
val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
|
||||
val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
|
||||
|
||||
timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion);
|
||||
timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val);
|
||||
|
||||
/* Disable the GP_INT1 IRQ type again */
|
||||
iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
|
||||
@ -1975,18 +1965,6 @@ static void gsi_channel_evt_ring_exit(struct gsi_channel *channel)
|
||||
gsi_evt_ring_id_free(gsi, evt_ring_id);
|
||||
}
|
||||
|
||||
/* Init function for event rings; there is no gsi_evt_ring_exit() */
|
||||
static void gsi_evt_ring_init(struct gsi *gsi)
|
||||
{
|
||||
u32 evt_ring_id = 0;
|
||||
|
||||
gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
|
||||
gsi->ieob_enabled_bitmap = 0;
|
||||
do
|
||||
init_completion(&gsi->evt_ring[evt_ring_id].completion);
|
||||
while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX);
|
||||
}
|
||||
|
||||
static bool gsi_channel_data_valid(struct gsi *gsi,
|
||||
const struct ipa_gsi_endpoint_data *data)
|
||||
{
|
||||
@ -2069,7 +2047,6 @@ static int gsi_channel_init_one(struct gsi *gsi,
|
||||
channel->tlv_count = data->channel.tlv_count;
|
||||
channel->tre_count = tre_count;
|
||||
channel->event_count = data->channel.event_count;
|
||||
init_completion(&channel->completion);
|
||||
|
||||
ret = gsi_channel_evt_ring_init(channel);
|
||||
if (ret)
|
||||
@ -2129,7 +2106,8 @@ static int gsi_channel_init(struct gsi *gsi, u32 count,
|
||||
/* IPA v4.2 requires the AP to allocate channels for the modem */
|
||||
modem_alloc = gsi->version == IPA_VERSION_4_2;
|
||||
|
||||
gsi_evt_ring_init(gsi); /* No matching exit required */
|
||||
gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
|
||||
gsi->ieob_enabled_bitmap = 0;
|
||||
|
||||
/* The endpoint data array is indexed by endpoint name */
|
||||
for (i = 0; i < count; i++) {
|
||||
|
@ -114,8 +114,6 @@ struct gsi_channel {
|
||||
u16 tre_count;
|
||||
u16 event_count;
|
||||
|
||||
struct completion completion; /* signals channel command completion */
|
||||
|
||||
struct gsi_ring tre_ring;
|
||||
u32 evt_ring_id;
|
||||
|
||||
@ -141,28 +139,27 @@ enum gsi_evt_ring_state {
|
||||
|
||||
struct gsi_evt_ring {
|
||||
struct gsi_channel *channel;
|
||||
struct completion completion; /* signals event ring state changes */
|
||||
struct gsi_ring ring;
|
||||
};
|
||||
|
||||
struct gsi {
|
||||
struct device *dev; /* Same as IPA device */
|
||||
enum ipa_version version;
|
||||
struct net_device dummy_dev; /* needed for NAPI */
|
||||
void __iomem *virt_raw; /* I/O mapped address range */
|
||||
void __iomem *virt; /* Adjusted for most registers */
|
||||
u32 irq;
|
||||
u32 channel_count;
|
||||
u32 evt_ring_count;
|
||||
struct gsi_channel channel[GSI_CHANNEL_COUNT_MAX];
|
||||
struct gsi_evt_ring evt_ring[GSI_EVT_RING_COUNT_MAX];
|
||||
u32 event_bitmap; /* allocated event rings */
|
||||
u32 modem_channel_bitmap; /* modem channels to allocate */
|
||||
u32 type_enabled_bitmap; /* GSI IRQ types enabled */
|
||||
u32 ieob_enabled_bitmap; /* IEOB IRQ enabled (event rings) */
|
||||
struct completion completion; /* for global EE commands */
|
||||
int result; /* Negative errno (generic commands) */
|
||||
struct completion completion; /* Signals GSI command completion */
|
||||
struct mutex mutex; /* protects commands, programming */
|
||||
struct gsi_channel channel[GSI_CHANNEL_COUNT_MAX];
|
||||
struct gsi_evt_ring evt_ring[GSI_EVT_RING_COUNT_MAX];
|
||||
struct net_device dummy_dev; /* needed for NAPI */
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -860,7 +860,7 @@ static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
|
||||
}
|
||||
|
||||
static void
|
||||
ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable)
|
||||
ipa_endpoint_init_hol_block_en(struct ipa_endpoint *endpoint, bool enable)
|
||||
{
|
||||
u32 endpoint_id = endpoint->endpoint_id;
|
||||
u32 offset;
|
||||
@ -874,6 +874,19 @@ ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable)
|
||||
iowrite32(val, endpoint->ipa->reg_virt + offset);
|
||||
}
|
||||
|
||||
/* Assumes HOL_BLOCK is in disabled state */
|
||||
static void ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint,
|
||||
u32 microseconds)
|
||||
{
|
||||
ipa_endpoint_init_hol_block_timer(endpoint, microseconds);
|
||||
ipa_endpoint_init_hol_block_en(endpoint, true);
|
||||
}
|
||||
|
||||
static void ipa_endpoint_init_hol_block_disable(struct ipa_endpoint *endpoint)
|
||||
{
|
||||
ipa_endpoint_init_hol_block_en(endpoint, false);
|
||||
}
|
||||
|
||||
void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
|
||||
{
|
||||
u32 i;
|
||||
@ -884,9 +897,8 @@ void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
|
||||
if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
|
||||
continue;
|
||||
|
||||
ipa_endpoint_init_hol_block_enable(endpoint, false);
|
||||
ipa_endpoint_init_hol_block_timer(endpoint, 0);
|
||||
ipa_endpoint_init_hol_block_enable(endpoint, true);
|
||||
ipa_endpoint_init_hol_block_disable(endpoint);
|
||||
ipa_endpoint_init_hol_block_enable(endpoint, 0);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1141,18 +1153,19 @@ static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
skb = __dev_alloc_skb(len, GFP_ATOMIC);
|
||||
if (skb) {
|
||||
skb_put(skb, len);
|
||||
memcpy(skb->data, data, len);
|
||||
skb->truesize += extra;
|
||||
}
|
||||
if (!endpoint->netdev)
|
||||
return;
|
||||
|
||||
/* Now receive it, or drop it if there's no netdev */
|
||||
if (endpoint->netdev)
|
||||
ipa_modem_skb_rx(endpoint->netdev, skb);
|
||||
else if (skb)
|
||||
dev_kfree_skb_any(skb);
|
||||
skb = __dev_alloc_skb(len, GFP_ATOMIC);
|
||||
if (!skb)
|
||||
return;
|
||||
|
||||
/* Copy the data into the socket buffer and receive it */
|
||||
skb_put(skb, len);
|
||||
memcpy(skb->data, data, len);
|
||||
skb->truesize += extra;
|
||||
|
||||
ipa_modem_skb_rx(endpoint->netdev, skb);
|
||||
}
|
||||
|
||||
static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
|
||||
@ -1530,6 +1543,8 @@ static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
|
||||
ipa_endpoint_init_hdr_metadata_mask(endpoint);
|
||||
ipa_endpoint_init_mode(endpoint);
|
||||
ipa_endpoint_init_aggr(endpoint);
|
||||
if (!endpoint->toward_ipa)
|
||||
ipa_endpoint_init_hol_block_disable(endpoint);
|
||||
ipa_endpoint_init_deaggr(endpoint);
|
||||
ipa_endpoint_init_rsrc_grp(endpoint);
|
||||
ipa_endpoint_init_seq(endpoint);
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "ipa_mem.h"
|
||||
#include "ipa_table.h"
|
||||
#include "ipa_modem.h"
|
||||
#include "ipa_smp2p.h"
|
||||
#include "ipa_uc.h"
|
||||
#include "ipa_interrupt.h"
|
||||
#include "gsi_trans.h"
|
||||
@ -733,7 +734,7 @@ static int ipa_probe(struct platform_device *pdev)
|
||||
if (ret)
|
||||
goto err_endpoint_exit;
|
||||
|
||||
ret = ipa_modem_init(ipa, modem_init);
|
||||
ret = ipa_smp2p_init(ipa, modem_init);
|
||||
if (ret)
|
||||
goto err_table_exit;
|
||||
|
||||
@ -775,7 +776,7 @@ err_deconfig:
|
||||
ipa_deconfig(ipa);
|
||||
err_power_put:
|
||||
pm_runtime_put_noidle(dev);
|
||||
ipa_modem_exit(ipa);
|
||||
ipa_smp2p_exit(ipa);
|
||||
err_table_exit:
|
||||
ipa_table_exit(ipa);
|
||||
err_endpoint_exit:
|
||||
@ -821,7 +822,7 @@ static int ipa_remove(struct platform_device *pdev)
|
||||
ipa_deconfig(ipa);
|
||||
out_power_put:
|
||||
pm_runtime_put_noidle(dev);
|
||||
ipa_modem_exit(ipa);
|
||||
ipa_smp2p_exit(ipa);
|
||||
ipa_table_exit(ipa);
|
||||
ipa_endpoint_exit(ipa);
|
||||
gsi_exit(&ipa->gsi);
|
||||
|
@ -442,16 +442,6 @@ static int ipa_modem_notify(struct notifier_block *nb, unsigned long action,
|
||||
return NOTIFY_OK;
|
||||
}
|
||||
|
||||
int ipa_modem_init(struct ipa *ipa, bool modem_init)
|
||||
{
|
||||
return ipa_smp2p_init(ipa, modem_init);
|
||||
}
|
||||
|
||||
void ipa_modem_exit(struct ipa *ipa)
|
||||
{
|
||||
ipa_smp2p_exit(ipa);
|
||||
}
|
||||
|
||||
int ipa_modem_config(struct ipa *ipa)
|
||||
{
|
||||
void *notifier;
|
||||
|
@ -18,9 +18,6 @@ void ipa_modem_skb_rx(struct net_device *netdev, struct sk_buff *skb);
|
||||
void ipa_modem_suspend(struct net_device *netdev);
|
||||
void ipa_modem_resume(struct net_device *netdev);
|
||||
|
||||
int ipa_modem_init(struct ipa *ipa, bool modem_init);
|
||||
void ipa_modem_exit(struct ipa *ipa);
|
||||
|
||||
int ipa_modem_config(struct ipa *ipa);
|
||||
void ipa_modem_deconfig(struct ipa *ipa);
|
||||
|
||||
|
@ -419,21 +419,26 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
|
||||
const struct ipa_mem *mem = ipa_mem_find(ipa, mem_id);
|
||||
dma_addr_t hash_addr;
|
||||
dma_addr_t addr;
|
||||
u32 zero_offset;
|
||||
u16 hash_count;
|
||||
u32 zero_size;
|
||||
u16 hash_size;
|
||||
u16 count;
|
||||
u16 size;
|
||||
|
||||
/* The number of filtering endpoints determines number of entries
|
||||
* in the filter table. The hashed and non-hashed filter table
|
||||
* will have the same number of entries. The size of the route
|
||||
* table region determines the number of entries it has.
|
||||
*/
|
||||
/* Compute the number of table entries to initialize */
|
||||
if (filter) {
|
||||
/* Include one extra "slot" to hold the filter map itself */
|
||||
/* The number of filtering endpoints determines number of
|
||||
* entries in the filter table; we also add one more "slot"
|
||||
* to hold the bitmap itself. The size of the hashed filter
|
||||
* table is either the same as the non-hashed one, or zero.
|
||||
*/
|
||||
count = 1 + hweight32(ipa->filter_map);
|
||||
hash_count = hash_mem->size ? count : 0;
|
||||
} else {
|
||||
/* The size of a route table region determines the number
|
||||
* of entries it has.
|
||||
*/
|
||||
count = mem->size / sizeof(__le64);
|
||||
hash_count = hash_mem->size / sizeof(__le64);
|
||||
}
|
||||
@ -445,13 +450,42 @@ static void ipa_table_init_add(struct gsi_trans *trans, bool filter,
|
||||
|
||||
ipa_cmd_table_init_add(trans, opcode, size, mem->offset, addr,
|
||||
hash_size, hash_mem->offset, hash_addr);
|
||||
if (!filter)
|
||||
return;
|
||||
|
||||
/* Zero the unused space in the filter table */
|
||||
zero_offset = mem->offset + size;
|
||||
zero_size = mem->size - size;
|
||||
ipa_cmd_dma_shared_mem_add(trans, zero_offset, zero_size,
|
||||
ipa->zero_addr, true);
|
||||
if (!hash_size)
|
||||
return;
|
||||
|
||||
/* Zero the unused space in the hashed filter table */
|
||||
zero_offset = hash_mem->offset + hash_size;
|
||||
zero_size = hash_mem->size - hash_size;
|
||||
ipa_cmd_dma_shared_mem_add(trans, zero_offset, zero_size,
|
||||
ipa->zero_addr, true);
|
||||
}
|
||||
|
||||
int ipa_table_setup(struct ipa *ipa)
|
||||
{
|
||||
struct gsi_trans *trans;
|
||||
|
||||
trans = ipa_cmd_trans_alloc(ipa, 4);
|
||||
/* We will need at most 8 TREs:
|
||||
* - IPv4:
|
||||
* - One for route table initialization (non-hashed and hashed)
|
||||
* - One for filter table initialization (non-hashed and hashed)
|
||||
* - One to zero unused entries in the non-hashed filter table
|
||||
* - One to zero unused entries in the hashed filter table
|
||||
* - IPv6:
|
||||
* - One for route table initialization (non-hashed and hashed)
|
||||
* - One for filter table initialization (non-hashed and hashed)
|
||||
* - One to zero unused entries in the non-hashed filter table
|
||||
* - One to zero unused entries in the hashed filter table
|
||||
* All platforms support at least 8 TREs in a transaction.
|
||||
*/
|
||||
trans = ipa_cmd_trans_alloc(ipa, 8);
|
||||
if (!trans) {
|
||||
dev_err(&ipa->pdev->dev, "no transaction for table setup\n");
|
||||
return -EBUSY;
|
||||
|
Loading…
x
Reference in New Issue
Block a user