Merge branch 'fsl-qbman-in_interrupt-cleanup'
Sebastian Andrzej Siewior says: ==================== fsl/qbman: in_interrupt() cleanup. This is the in_interrupt() clean for FSL DPAA framework and the two users. The `napi' parameter has been renamed to `sched_napi', the other parts are same as in the previous post [0]. [0] https://lkml.kernel.org/r/20201027225454.3492351-1-bigeasy@linutronix.de ==================== Link: https://lore.kernel.org/r/20201101232257.3028508-1-bigeasy@linutronix.de Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
45e9fbf023
@ -545,14 +545,10 @@ static void cgr_cb(struct qman_portal *qm, struct qman_cgr *cgr, int congested)
|
||||
}
|
||||
}
|
||||
|
||||
static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np)
|
||||
static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np,
|
||||
bool sched_napi)
|
||||
{
|
||||
/*
|
||||
* In case of threaded ISR, for RT kernels in_irq() does not return
|
||||
* appropriate value, so use in_serving_softirq to distinguish between
|
||||
* softirq and irq contexts.
|
||||
*/
|
||||
if (unlikely(in_irq() || !in_serving_softirq())) {
|
||||
if (sched_napi) {
|
||||
/* Disable QMan IRQ source and invoke NAPI */
|
||||
qman_p_irqsource_remove(p, QM_PIRQ_DQRI);
|
||||
np->p = p;
|
||||
@ -564,7 +560,8 @@ static int caam_qi_napi_schedule(struct qman_portal *p, struct caam_napi *np)
|
||||
|
||||
static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
|
||||
struct qman_fq *rsp_fq,
|
||||
const struct qm_dqrr_entry *dqrr)
|
||||
const struct qm_dqrr_entry *dqrr,
|
||||
bool sched_napi)
|
||||
{
|
||||
struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi);
|
||||
struct caam_drv_req *drv_req;
|
||||
@ -573,7 +570,7 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p,
|
||||
struct caam_drv_private *priv = dev_get_drvdata(qidev);
|
||||
u32 status;
|
||||
|
||||
if (caam_qi_napi_schedule(p, caam_napi))
|
||||
if (caam_qi_napi_schedule(p, caam_napi, sched_napi))
|
||||
return qman_cb_dqrr_stop;
|
||||
|
||||
fd = &dqrr->fd;
|
||||
|
@ -2300,9 +2300,9 @@ static void dpaa_tx_conf(struct net_device *net_dev,
|
||||
}
|
||||
|
||||
static inline int dpaa_eth_napi_schedule(struct dpaa_percpu_priv *percpu_priv,
|
||||
struct qman_portal *portal)
|
||||
struct qman_portal *portal, bool sched_napi)
|
||||
{
|
||||
if (unlikely(in_irq() || !in_serving_softirq())) {
|
||||
if (sched_napi) {
|
||||
/* Disable QMan IRQ and invoke NAPI */
|
||||
qman_p_irqsource_remove(portal, QM_PIRQ_DQRI);
|
||||
|
||||
@ -2316,7 +2316,8 @@ static inline int dpaa_eth_napi_schedule(struct dpaa_percpu_priv *percpu_priv,
|
||||
|
||||
static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal,
|
||||
struct qman_fq *fq,
|
||||
const struct qm_dqrr_entry *dq)
|
||||
const struct qm_dqrr_entry *dq,
|
||||
bool sched_napi)
|
||||
{
|
||||
struct dpaa_fq *dpaa_fq = container_of(fq, struct dpaa_fq, fq_base);
|
||||
struct dpaa_percpu_priv *percpu_priv;
|
||||
@ -2332,7 +2333,7 @@ static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal,
|
||||
|
||||
percpu_priv = this_cpu_ptr(priv->percpu_priv);
|
||||
|
||||
if (dpaa_eth_napi_schedule(percpu_priv, portal))
|
||||
if (dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi))
|
||||
return qman_cb_dqrr_stop;
|
||||
|
||||
dpaa_eth_refill_bpools(priv);
|
||||
@ -2343,7 +2344,8 @@ static enum qman_cb_dqrr_result rx_error_dqrr(struct qman_portal *portal,
|
||||
|
||||
static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
|
||||
struct qman_fq *fq,
|
||||
const struct qm_dqrr_entry *dq)
|
||||
const struct qm_dqrr_entry *dq,
|
||||
bool sched_napi)
|
||||
{
|
||||
struct skb_shared_hwtstamps *shhwtstamps;
|
||||
struct rtnl_link_stats64 *percpu_stats;
|
||||
@ -2375,7 +2377,7 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
|
||||
percpu_priv = this_cpu_ptr(priv->percpu_priv);
|
||||
percpu_stats = &percpu_priv->stats;
|
||||
|
||||
if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal)))
|
||||
if (unlikely(dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi)))
|
||||
return qman_cb_dqrr_stop;
|
||||
|
||||
/* Make sure we didn't run out of buffers */
|
||||
@ -2460,7 +2462,8 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
|
||||
|
||||
static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal,
|
||||
struct qman_fq *fq,
|
||||
const struct qm_dqrr_entry *dq)
|
||||
const struct qm_dqrr_entry *dq,
|
||||
bool sched_napi)
|
||||
{
|
||||
struct dpaa_percpu_priv *percpu_priv;
|
||||
struct net_device *net_dev;
|
||||
@ -2471,7 +2474,7 @@ static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal,
|
||||
|
||||
percpu_priv = this_cpu_ptr(priv->percpu_priv);
|
||||
|
||||
if (dpaa_eth_napi_schedule(percpu_priv, portal))
|
||||
if (dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi))
|
||||
return qman_cb_dqrr_stop;
|
||||
|
||||
dpaa_tx_error(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
|
||||
@ -2481,7 +2484,8 @@ static enum qman_cb_dqrr_result conf_error_dqrr(struct qman_portal *portal,
|
||||
|
||||
static enum qman_cb_dqrr_result conf_dflt_dqrr(struct qman_portal *portal,
|
||||
struct qman_fq *fq,
|
||||
const struct qm_dqrr_entry *dq)
|
||||
const struct qm_dqrr_entry *dq,
|
||||
bool sched_napi)
|
||||
{
|
||||
struct dpaa_percpu_priv *percpu_priv;
|
||||
struct net_device *net_dev;
|
||||
@ -2495,7 +2499,7 @@ static enum qman_cb_dqrr_result conf_dflt_dqrr(struct qman_portal *portal,
|
||||
|
||||
percpu_priv = this_cpu_ptr(priv->percpu_priv);
|
||||
|
||||
if (dpaa_eth_napi_schedule(percpu_priv, portal))
|
||||
if (dpaa_eth_napi_schedule(percpu_priv, portal, sched_napi))
|
||||
return qman_cb_dqrr_stop;
|
||||
|
||||
dpaa_tx_conf(net_dev, priv, percpu_priv, &dq->fd, fq->fqid);
|
||||
|
@ -1159,7 +1159,7 @@ static u32 fq_to_tag(struct qman_fq *fq)
|
||||
|
||||
static u32 __poll_portal_slow(struct qman_portal *p, u32 is);
|
||||
static inline unsigned int __poll_portal_fast(struct qman_portal *p,
|
||||
unsigned int poll_limit);
|
||||
unsigned int poll_limit, bool sched_napi);
|
||||
static void qm_congestion_task(struct work_struct *work);
|
||||
static void qm_mr_process_task(struct work_struct *work);
|
||||
|
||||
@ -1174,7 +1174,7 @@ static irqreturn_t portal_isr(int irq, void *ptr)
|
||||
|
||||
/* DQRR-handling if it's interrupt-driven */
|
||||
if (is & QM_PIRQ_DQRI) {
|
||||
__poll_portal_fast(p, QMAN_POLL_LIMIT);
|
||||
__poll_portal_fast(p, QMAN_POLL_LIMIT, true);
|
||||
clear = QM_DQAVAIL_MASK | QM_PIRQ_DQRI;
|
||||
}
|
||||
/* Handling of anything else that's interrupt-driven */
|
||||
@ -1602,7 +1602,7 @@ static noinline void clear_vdqcr(struct qman_portal *p, struct qman_fq *fq)
|
||||
* user callbacks to call into any QMan API.
|
||||
*/
|
||||
static inline unsigned int __poll_portal_fast(struct qman_portal *p,
|
||||
unsigned int poll_limit)
|
||||
unsigned int poll_limit, bool sched_napi)
|
||||
{
|
||||
const struct qm_dqrr_entry *dq;
|
||||
struct qman_fq *fq;
|
||||
@ -1636,7 +1636,7 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p,
|
||||
* and we don't want multiple if()s in the critical
|
||||
* path (SDQCR).
|
||||
*/
|
||||
res = fq->cb.dqrr(p, fq, dq);
|
||||
res = fq->cb.dqrr(p, fq, dq, sched_napi);
|
||||
if (res == qman_cb_dqrr_stop)
|
||||
break;
|
||||
/* Check for VDQCR completion */
|
||||
@ -1646,7 +1646,7 @@ static inline unsigned int __poll_portal_fast(struct qman_portal *p,
|
||||
/* SDQCR: context_b points to the FQ */
|
||||
fq = tag_to_fq(be32_to_cpu(dq->context_b));
|
||||
/* Now let the callback do its stuff */
|
||||
res = fq->cb.dqrr(p, fq, dq);
|
||||
res = fq->cb.dqrr(p, fq, dq, sched_napi);
|
||||
/*
|
||||
* The callback can request that we exit without
|
||||
* consuming this entry nor advancing;
|
||||
@ -1753,7 +1753,7 @@ EXPORT_SYMBOL(qman_start_using_portal);
|
||||
|
||||
int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
|
||||
{
|
||||
return __poll_portal_fast(p, limit);
|
||||
return __poll_portal_fast(p, limit, false);
|
||||
}
|
||||
EXPORT_SYMBOL(qman_p_poll_dqrr);
|
||||
|
||||
|
@ -45,7 +45,8 @@
|
||||
|
||||
static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *,
|
||||
struct qman_fq *,
|
||||
const struct qm_dqrr_entry *);
|
||||
const struct qm_dqrr_entry *,
|
||||
bool sched_napi);
|
||||
static void cb_ern(struct qman_portal *, struct qman_fq *,
|
||||
const union qm_mr_entry *);
|
||||
static void cb_fqs(struct qman_portal *, struct qman_fq *,
|
||||
@ -208,7 +209,8 @@ failed:
|
||||
|
||||
static enum qman_cb_dqrr_result cb_dqrr(struct qman_portal *p,
|
||||
struct qman_fq *fq,
|
||||
const struct qm_dqrr_entry *dq)
|
||||
const struct qm_dqrr_entry *dq,
|
||||
bool sched_napi)
|
||||
{
|
||||
if (WARN_ON(fd_neq(&fd_dq, &dq->fd))) {
|
||||
pr_err("BADNESS: dequeued frame doesn't match;\n");
|
||||
|
@ -275,7 +275,8 @@ static inline int process_frame_data(struct hp_handler *handler,
|
||||
|
||||
static enum qman_cb_dqrr_result normal_dqrr(struct qman_portal *portal,
|
||||
struct qman_fq *fq,
|
||||
const struct qm_dqrr_entry *dqrr)
|
||||
const struct qm_dqrr_entry *dqrr,
|
||||
bool sched_napi)
|
||||
{
|
||||
struct hp_handler *handler = (struct hp_handler *)fq;
|
||||
|
||||
@ -293,7 +294,8 @@ skip:
|
||||
|
||||
static enum qman_cb_dqrr_result special_dqrr(struct qman_portal *portal,
|
||||
struct qman_fq *fq,
|
||||
const struct qm_dqrr_entry *dqrr)
|
||||
const struct qm_dqrr_entry *dqrr,
|
||||
bool sched_napi)
|
||||
{
|
||||
struct hp_handler *handler = (struct hp_handler *)fq;
|
||||
|
||||
|
@ -689,7 +689,8 @@ enum qman_cb_dqrr_result {
|
||||
};
|
||||
typedef enum qman_cb_dqrr_result (*qman_cb_dqrr)(struct qman_portal *qm,
|
||||
struct qman_fq *fq,
|
||||
const struct qm_dqrr_entry *dqrr);
|
||||
const struct qm_dqrr_entry *dqrr,
|
||||
bool sched_napi);
|
||||
|
||||
/*
|
||||
* This callback type is used when handling ERNs, FQRNs and FQRLs via MR. They
|
||||
|
Loading…
x
Reference in New Issue
Block a user