Merge branch 'crypto-caam-unembed-net_dev'
Breno Leitao says: ==================== crypto: caam: Unembed net_dev This will un-embed the net_device struct from inside other struct, so we can add flexible array into net_device. This also enable COMPILE test for FSL_CAAM, as any config option that depends on ARCH_LAYERSCAPE. ==================== Link: https://patch.msgid.link/20240702185557.3699991-1-leitao@debian.org Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
7829b376f2
@ -10,7 +10,7 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC
|
||||
|
||||
config CRYPTO_DEV_FSL_CAAM
|
||||
tristate "Freescale CAAM-Multicore platform driver backend"
|
||||
depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE
|
||||
depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE || COMPILE_TEST
|
||||
select SOC_BUS
|
||||
select CRYPTO_DEV_FSL_CAAM_COMMON
|
||||
imply FSL_MC_BUS
|
||||
|
@ -4990,11 +4990,23 @@ err_dma_map:
|
||||
return err;
|
||||
}
|
||||
|
||||
static void free_dpaa2_pcpu_netdev(struct dpaa2_caam_priv *priv, const cpumask_t *cpus)
|
||||
{
|
||||
struct dpaa2_caam_priv_per_cpu *ppriv;
|
||||
int i;
|
||||
|
||||
for_each_cpu(i, cpus) {
|
||||
ppriv = per_cpu_ptr(priv->ppriv, i);
|
||||
free_netdev(ppriv->net_dev);
|
||||
}
|
||||
}
|
||||
|
||||
static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
|
||||
{
|
||||
struct device *dev = &ls_dev->dev;
|
||||
struct dpaa2_caam_priv *priv;
|
||||
struct dpaa2_caam_priv_per_cpu *ppriv;
|
||||
cpumask_t clean_mask;
|
||||
int err, cpu;
|
||||
u8 i;
|
||||
|
||||
@ -5073,6 +5085,7 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
|
||||
}
|
||||
}
|
||||
|
||||
cpumask_clear(&clean_mask);
|
||||
i = 0;
|
||||
for_each_online_cpu(cpu) {
|
||||
u8 j;
|
||||
@ -5096,15 +5109,23 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev)
|
||||
priv->rx_queue_attr[j].fqid,
|
||||
priv->tx_queue_attr[j].fqid);
|
||||
|
||||
ppriv->net_dev.dev = *dev;
|
||||
INIT_LIST_HEAD(&ppriv->net_dev.napi_list);
|
||||
netif_napi_add_tx_weight(&ppriv->net_dev, &ppriv->napi,
|
||||
ppriv->net_dev = alloc_netdev_dummy(0);
|
||||
if (!ppriv->net_dev) {
|
||||
err = -ENOMEM;
|
||||
goto err_alloc_netdev;
|
||||
}
|
||||
cpumask_set_cpu(cpu, &clean_mask);
|
||||
ppriv->net_dev->dev = *dev;
|
||||
|
||||
netif_napi_add_tx_weight(ppriv->net_dev, &ppriv->napi,
|
||||
dpaa2_dpseci_poll,
|
||||
DPAA2_CAAM_NAPI_WEIGHT);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_alloc_netdev:
|
||||
free_dpaa2_pcpu_netdev(priv, &clean_mask);
|
||||
err_get_rx_queue:
|
||||
dpaa2_dpseci_congestion_free(priv);
|
||||
err_get_vers:
|
||||
@ -5153,6 +5174,7 @@ static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv)
|
||||
ppriv = per_cpu_ptr(priv->ppriv, i);
|
||||
napi_disable(&ppriv->napi);
|
||||
netif_napi_del(&ppriv->napi);
|
||||
free_netdev(ppriv->net_dev);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -81,7 +81,7 @@ struct dpaa2_caam_priv {
|
||||
*/
|
||||
struct dpaa2_caam_priv_per_cpu {
|
||||
struct napi_struct napi;
|
||||
struct net_device net_dev;
|
||||
struct net_device *net_dev;
|
||||
int req_fqid;
|
||||
int rsp_fqid;
|
||||
int prio;
|
||||
|
@ -80,6 +80,7 @@ static void build_deinstantiation_desc(u32 *desc, int handle)
|
||||
append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_OF
|
||||
static const struct of_device_id imx8m_machine_match[] = {
|
||||
{ .compatible = "fsl,imx8mm", },
|
||||
{ .compatible = "fsl,imx8mn", },
|
||||
@ -88,6 +89,7 @@ static const struct of_device_id imx8m_machine_match[] = {
|
||||
{ .compatible = "fsl,imx8ulp", },
|
||||
{ }
|
||||
};
|
||||
#endif
|
||||
|
||||
/*
|
||||
* run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of
|
||||
|
@ -57,7 +57,7 @@ struct caam_napi {
|
||||
*/
|
||||
struct caam_qi_pcpu_priv {
|
||||
struct caam_napi caam_napi;
|
||||
struct net_device net_dev;
|
||||
struct net_device *net_dev;
|
||||
struct qman_fq *rsp_fq;
|
||||
} ____cacheline_aligned;
|
||||
|
||||
@ -144,7 +144,7 @@ static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq,
|
||||
{
|
||||
const struct qm_fd *fd;
|
||||
struct caam_drv_req *drv_req;
|
||||
struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
|
||||
struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev->dev);
|
||||
struct caam_drv_private *priv = dev_get_drvdata(qidev);
|
||||
|
||||
fd = &msg->ern.fd;
|
||||
@ -530,6 +530,7 @@ static void caam_qi_shutdown(void *data)
|
||||
|
||||
if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i)))
|
||||
dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i);
|
||||
free_netdev(per_cpu(pcpu_qipriv.net_dev, i));
|
||||
}
|
||||
|
||||
qman_delete_cgr_safe(&priv->cgr);
|
||||
@ -573,7 +574,7 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
|
||||
struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi);
|
||||
struct caam_drv_req *drv_req;
|
||||
const struct qm_fd *fd;
|
||||
struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev);
|
||||
struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev->dev);
|
||||
struct caam_drv_private *priv = dev_get_drvdata(qidev);
|
||||
u32 status;
|
||||
|
||||
@ -718,12 +719,24 @@ static void free_rsp_fqs(void)
|
||||
kfree(per_cpu(pcpu_qipriv.rsp_fq, i));
|
||||
}
|
||||
|
||||
static void free_caam_qi_pcpu_netdev(const cpumask_t *cpus)
|
||||
{
|
||||
struct caam_qi_pcpu_priv *priv;
|
||||
int i;
|
||||
|
||||
for_each_cpu(i, cpus) {
|
||||
priv = per_cpu_ptr(&pcpu_qipriv, i);
|
||||
free_netdev(priv->net_dev);
|
||||
}
|
||||
}
|
||||
|
||||
int caam_qi_init(struct platform_device *caam_pdev)
|
||||
{
|
||||
int err, i;
|
||||
struct device *ctrldev = &caam_pdev->dev, *qidev;
|
||||
struct caam_drv_private *ctrlpriv;
|
||||
const cpumask_t *cpus = qman_affine_cpus();
|
||||
cpumask_t clean_mask;
|
||||
|
||||
ctrlpriv = dev_get_drvdata(ctrldev);
|
||||
qidev = ctrldev;
|
||||
@ -743,6 +756,8 @@ int caam_qi_init(struct platform_device *caam_pdev)
|
||||
return err;
|
||||
}
|
||||
|
||||
cpumask_clear(&clean_mask);
|
||||
|
||||
/*
|
||||
* Enable the NAPI contexts on each of the core which has an affine
|
||||
* portal.
|
||||
@ -751,10 +766,16 @@ int caam_qi_init(struct platform_device *caam_pdev)
|
||||
struct caam_qi_pcpu_priv *priv = per_cpu_ptr(&pcpu_qipriv, i);
|
||||
struct caam_napi *caam_napi = &priv->caam_napi;
|
||||
struct napi_struct *irqtask = &caam_napi->irqtask;
|
||||
struct net_device *net_dev = &priv->net_dev;
|
||||
struct net_device *net_dev;
|
||||
|
||||
net_dev = alloc_netdev_dummy(0);
|
||||
if (!net_dev) {
|
||||
err = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
cpumask_set_cpu(i, &clean_mask);
|
||||
priv->net_dev = net_dev;
|
||||
net_dev->dev = *qidev;
|
||||
INIT_LIST_HEAD(&net_dev->napi_list);
|
||||
|
||||
netif_napi_add_tx_weight(net_dev, irqtask, caam_qi_poll,
|
||||
CAAM_NAPI_WEIGHT);
|
||||
@ -766,16 +787,22 @@ int caam_qi_init(struct platform_device *caam_pdev)
|
||||
dma_get_cache_alignment(), 0, NULL);
|
||||
if (!qi_cache) {
|
||||
dev_err(qidev, "Can't allocate CAAM cache\n");
|
||||
free_rsp_fqs();
|
||||
return -ENOMEM;
|
||||
err = -ENOMEM;
|
||||
goto fail2;
|
||||
}
|
||||
|
||||
caam_debugfs_qi_init(ctrlpriv);
|
||||
|
||||
err = devm_add_action_or_reset(qidev, caam_qi_shutdown, ctrlpriv);
|
||||
if (err)
|
||||
return err;
|
||||
goto fail2;
|
||||
|
||||
dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n");
|
||||
return 0;
|
||||
|
||||
fail2:
|
||||
free_rsp_fqs();
|
||||
fail:
|
||||
free_caam_qi_pcpu_netdev(&clean_mask);
|
||||
return err;
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user