USB-serial fixes for 6.7-rc3

Here are a couple of modem device entry fixes and some new modem device
 ids.
 
 All have been in linux-next with no reported issues.
 -----BEGIN PGP SIGNATURE-----
 
 iHUEABYIAB0WIQQHbPq+cpGvN/peuzMLxc3C7H1lCAUCZWBhzwAKCRALxc3C7H1l
 COqFAP4uRs5B1dsUxOHQ8HpsbBTmCE9/iFd0k/vegyGqTWd6CAD+LFjP7ySTm+2D
 p5RkbllxNh+rHSeo2DFxnM27bXvmvQE=
 =nvJ4
 -----END PGP SIGNATURE-----

Merge tag 'usb-serial-6.7-rc3' of https://git.kernel.org/pub/scm/linux/kernel/git/johan/usb-serial into usb-linus

Johan writes:

USB-serial fixes for 6.7-rc3

Here are a couple of modem device entry fixes and some new modem device
ids.

All have been in linux-next with no reported issues.

* tag 'usb-serial-6.7-rc3' of https://git.kernel.org/pub/scm/linux/kernel/git/johan/usb-serial: (329 commits)
  USB: serial: option: add Luat Air72*U series products
  USB: serial: option: add Fibocom L7xx modules
  USB: serial: option: fix FM101R-GL defines
  USB: serial: option: don't claim interface 4 for ZTE MF290
  Linux 6.7-rc2
  prctl: Disable prctl(PR_SET_MDWE) on parisc
  parisc/power: Fix power soft-off when running on qemu
  parisc: Replace strlcpy() with strscpy()
  NFSD: Fix checksum mismatches in the duplicate reply cache
  NFSD: Fix "start of NFS reply" pointer passed to nfsd_cache_update()
  NFSD: Update nfsd_cache_append() to use xdr_stream
  nfsd: fix file memleak on client_opens_release
  dm-crypt: start allocating with MAX_ORDER
  dm-verity: don't use blocking calls from tasklets
  dm-bufio: fix no-sleep mode
  dm-delay: avoid duplicate logic
  dm-delay: fix bugs introduced by kthread mode
  dm-delay: fix a race between delay_presuspend and delay_bio
  drm/amdgpu/gmc9: disable AGP aperture
  drm/amdgpu/gmc10: disable AGP aperture
  ...
This commit is contained in:
Greg Kroah-Hartman 2023-11-24 16:30:38 +00:00
commit cb9a830e87
251 changed files with 4125 additions and 3383 deletions

View File

@ -36,6 +36,7 @@ properties:
- qcom,sm8350-ufshc
- qcom,sm8450-ufshc
- qcom,sm8550-ufshc
- qcom,sm8650-ufshc
- const: qcom,ufshc
- const: jedec,ufs-2.0
@ -122,6 +123,7 @@ allOf:
- qcom,sm8350-ufshc
- qcom,sm8450-ufshc
- qcom,sm8550-ufshc
- qcom,sm8650-ufshc
then:
properties:
clocks:

View File

@ -8950,7 +8950,6 @@ S: Maintained
F: scripts/get_maintainer.pl
GFS2 FILE SYSTEM
M: Bob Peterson <rpeterso@redhat.com>
M: Andreas Gruenbacher <agruenba@redhat.com>
L: gfs2@lists.linux.dev
S: Supported
@ -21769,7 +21768,9 @@ F: Documentation/devicetree/bindings/counter/ti-eqep.yaml
F: drivers/counter/ti-eqep.c
TI ETHERNET SWITCH DRIVER (CPSW)
R: Grygorii Strashko <grygorii.strashko@ti.com>
R: Siddharth Vadapalli <s-vadapalli@ti.com>
R: Ravi Gunasekaran <r-gunasekaran@ti.com>
R: Roger Quadros <rogerq@kernel.org>
L: linux-omap@vger.kernel.org
L: netdev@vger.kernel.org
S: Maintained
@ -21793,6 +21794,15 @@ F: Documentation/devicetree/bindings/media/i2c/ti,ds90*
F: drivers/media/i2c/ds90*
F: include/media/i2c/ds90*
TI ICSSG ETHERNET DRIVER (ICSSG)
R: MD Danish Anwar <danishanwar@ti.com>
R: Roger Quadros <rogerq@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: netdev@vger.kernel.org
S: Maintained
F: Documentation/devicetree/bindings/net/ti,icss*.yaml
F: drivers/net/ethernet/ti/icssg/*
TI J721E CSI2RX DRIVER
M: Jai Luthra <j-luthra@ti.com>
L: linux-media@vger.kernel.org
@ -23692,6 +23702,20 @@ F: arch/x86/kernel/dumpstack.c
F: arch/x86/kernel/stacktrace.c
F: arch/x86/kernel/unwind_*.c
X86 TRUST DOMAIN EXTENSIONS (TDX)
M: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
R: Dave Hansen <dave.hansen@linux.intel.com>
L: x86@kernel.org
L: linux-coco@lists.linux.dev
S: Supported
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/tdx
F: arch/x86/boot/compressed/tdx*
F: arch/x86/coco/tdx/
F: arch/x86/include/asm/shared/tdx.h
F: arch/x86/include/asm/tdx.h
F: arch/x86/virt/vmx/tdx/
F: drivers/virt/coco/tdx-guest
X86 VDSO
M: Andy Lutomirski <luto@kernel.org>
L: linux-kernel@vger.kernel.org
@ -23872,8 +23896,7 @@ T: git git://git.kernel.org/pub/scm/fs/xfs/xfs-linux.git
P: Documentation/filesystems/xfs-maintainer-entry-profile.rst
F: Documentation/ABI/testing/sysfs-fs-xfs
F: Documentation/admin-guide/xfs.rst
F: Documentation/filesystems/xfs-delayed-logging-design.rst
F: Documentation/filesystems/xfs-self-describing-metadata.rst
F: Documentation/filesystems/xfs-*
F: fs/xfs/
F: include/uapi/linux/dqblk_xfs.h
F: include/uapi/linux/fsmap.h

View File

@ -2,7 +2,7 @@
VERSION = 6
PATCHLEVEL = 7
SUBLEVEL = 0
EXTRAVERSION = -rc1
EXTRAVERSION = -rc2
NAME = Hurr durr I'ma ninja sloth
# *DOCUMENTATION*

View File

@ -140,11 +140,11 @@ config ARCH_MMAP_RND_COMPAT_BITS_MIN
default 8
config ARCH_MMAP_RND_BITS_MAX
default 24 if 64BIT
default 17
default 18 if 64BIT
default 13
config ARCH_MMAP_RND_COMPAT_BITS_MAX
default 17
default 13
# unless you want to implement ACPI on PA-RISC ... ;-)
config PM

View File

@ -349,15 +349,7 @@ struct pt_regs; /* forward declaration... */
#define ELF_HWCAP 0
/* Masks for stack and mmap randomization */
#define BRK_RND_MASK (is_32bit_task() ? 0x07ffUL : 0x3ffffUL)
#define MMAP_RND_MASK (is_32bit_task() ? 0x1fffUL : 0x3ffffUL)
#define STACK_RND_MASK MMAP_RND_MASK
struct mm_struct;
extern unsigned long arch_randomize_brk(struct mm_struct *);
#define arch_randomize_brk arch_randomize_brk
#define STACK_RND_MASK 0x7ff /* 8MB of VA */
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
struct linux_binprm;

View File

@ -47,6 +47,8 @@
#ifndef __ASSEMBLY__
struct rlimit;
unsigned long mmap_upper_limit(struct rlimit *rlim_stack);
unsigned long calc_max_stack_size(unsigned long stack_max);
/*

View File

@ -383,7 +383,7 @@ show_cpuinfo (struct seq_file *m, void *v)
char cpu_name[60], *p;
/* strip PA path from CPU name to not confuse lscpu */
strlcpy(cpu_name, per_cpu(cpu_data, 0).dev->name, sizeof(cpu_name));
strscpy(cpu_name, per_cpu(cpu_data, 0).dev->name, sizeof(cpu_name));
p = strrchr(cpu_name, '[');
if (p)
*(--p) = 0;

View File

@ -77,7 +77,7 @@ unsigned long calc_max_stack_size(unsigned long stack_max)
* indicating that "current" should be used instead of a passed-in
* value from the exec bprm as done with arch_pick_mmap_layout().
*/
static unsigned long mmap_upper_limit(struct rlimit *rlim_stack)
unsigned long mmap_upper_limit(struct rlimit *rlim_stack)
{
unsigned long stack_base;

View File

@ -16,6 +16,9 @@
#include <asm/x86_init.h>
#include <asm/cpufeature.h>
#include <asm/irq_vectors.h>
#include <asm/xen/hypervisor.h>
#include <xen/xen.h>
#ifdef CONFIG_ACPI_APEI
# include <asm/pgtable_types.h>
@ -127,6 +130,17 @@ static inline void arch_acpi_set_proc_cap_bits(u32 *cap)
if (!cpu_has(c, X86_FEATURE_MWAIT) ||
boot_option_idle_override == IDLE_NOMWAIT)
*cap &= ~(ACPI_PROC_CAP_C_C1_FFH | ACPI_PROC_CAP_C_C2C3_FFH);
if (xen_initial_domain()) {
/*
* When Linux is running as Xen dom0, the hypervisor is the
* entity in charge of the processor power management, and so
* Xen needs to check the OS capabilities reported in the
* processor capabilities buffer matches what the hypervisor
* driver supports.
*/
xen_sanitize_proc_cap_bits(cap);
}
}
static inline bool acpi_has_cpu_in_madt(void)

View File

@ -100,4 +100,13 @@ static inline void leave_lazy(enum xen_lazy_mode mode)
enum xen_lazy_mode xen_get_lazy_mode(void);
#if defined(CONFIG_XEN_DOM0) && defined(CONFIG_ACPI)
void xen_sanitize_proc_cap_bits(uint32_t *buf);
#else
static inline void xen_sanitize_proc_cap_bits(uint32_t *buf)
{
BUG();
}
#endif
#endif /* _ASM_X86_XEN_HYPERVISOR_H */

View File

@ -63,6 +63,7 @@ int acpi_fix_pin2_polarity __initdata;
#ifdef CONFIG_X86_LOCAL_APIC
static u64 acpi_lapic_addr __initdata = APIC_DEFAULT_PHYS_BASE;
static bool has_lapic_cpus __initdata;
static bool acpi_support_online_capable;
#endif
@ -232,6 +233,14 @@ acpi_parse_x2apic(union acpi_subtable_headers *header, const unsigned long end)
if (!acpi_is_processor_usable(processor->lapic_flags))
return 0;
/*
* According to https://uefi.org/specs/ACPI/6.5/05_ACPI_Software_Programming_Model.html#processor-local-x2apic-structure
* when MADT provides both valid LAPIC and x2APIC entries, the APIC ID
* in x2APIC must be equal or greater than 0xff.
*/
if (has_lapic_cpus && apic_id < 0xff)
return 0;
/*
* We need to register disabled CPU as well to permit
* counting disabled CPUs. This allows us to size
@ -1114,10 +1123,7 @@ static int __init early_acpi_parse_madt_lapic_addr_ovr(void)
static int __init acpi_parse_madt_lapic_entries(void)
{
int count;
int x2count = 0;
int ret;
struct acpi_subtable_proc madt_proc[2];
int count, x2count = 0;
if (!boot_cpu_has(X86_FEATURE_APIC))
return -ENODEV;
@ -1126,21 +1132,11 @@ static int __init acpi_parse_madt_lapic_entries(void)
acpi_parse_sapic, MAX_LOCAL_APIC);
if (!count) {
memset(madt_proc, 0, sizeof(madt_proc));
madt_proc[0].id = ACPI_MADT_TYPE_LOCAL_APIC;
madt_proc[0].handler = acpi_parse_lapic;
madt_proc[1].id = ACPI_MADT_TYPE_LOCAL_X2APIC;
madt_proc[1].handler = acpi_parse_x2apic;
ret = acpi_table_parse_entries_array(ACPI_SIG_MADT,
sizeof(struct acpi_table_madt),
madt_proc, ARRAY_SIZE(madt_proc), MAX_LOCAL_APIC);
if (ret < 0) {
pr_err("Error parsing LAPIC/X2APIC entries\n");
return ret;
}
count = madt_proc[0].count;
x2count = madt_proc[1].count;
count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_APIC,
acpi_parse_lapic, MAX_LOCAL_APIC);
has_lapic_cpus = count > 0;
x2count = acpi_table_parse_madt(ACPI_MADT_TYPE_LOCAL_X2APIC,
acpi_parse_x2apic, MAX_LOCAL_APIC);
}
if (!count && !x2count) {
pr_err("No LAPIC entries present\n");

View File

@ -175,9 +175,6 @@ int x64_setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
frame = get_sigframe(ksig, regs, sizeof(struct rt_sigframe), &fp);
uc_flags = frame_uc_flags(regs);
if (setup_signal_shadow_stack(ksig))
return -EFAULT;
if (!user_access_begin(frame, sizeof(*frame)))
return -EFAULT;
@ -198,6 +195,9 @@ int x64_setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
return -EFAULT;
}
if (setup_signal_shadow_stack(ksig))
return -EFAULT;
/* Set up registers for signal handler */
regs->di = ksig->sig;
/* In case the signal handler was declared without prototypes */

View File

@ -2858,11 +2858,8 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
};
struct request *rq;
if (unlikely(bio_queue_enter(bio)))
return NULL;
if (blk_mq_attempt_bio_merge(q, bio, nsegs))
goto queue_exit;
return NULL;
rq_qos_throttle(q, bio);
@ -2878,35 +2875,23 @@ static struct request *blk_mq_get_new_requests(struct request_queue *q,
rq_qos_cleanup(q, bio);
if (bio->bi_opf & REQ_NOWAIT)
bio_wouldblock_error(bio);
queue_exit:
blk_queue_exit(q);
return NULL;
}
static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
struct blk_plug *plug, struct bio **bio, unsigned int nsegs)
/* return true if this @rq can be used for @bio */
static bool blk_mq_can_use_cached_rq(struct request *rq, struct blk_plug *plug,
struct bio *bio)
{
struct request *rq;
enum hctx_type type, hctx_type;
enum hctx_type type = blk_mq_get_hctx_type(bio->bi_opf);
enum hctx_type hctx_type = rq->mq_hctx->type;
if (!plug)
return NULL;
rq = rq_list_peek(&plug->cached_rq);
if (!rq || rq->q != q)
return NULL;
WARN_ON_ONCE(rq_list_peek(&plug->cached_rq) != rq);
if (blk_mq_attempt_bio_merge(q, *bio, nsegs)) {
*bio = NULL;
return NULL;
}
type = blk_mq_get_hctx_type((*bio)->bi_opf);
hctx_type = rq->mq_hctx->type;
if (type != hctx_type &&
!(type == HCTX_TYPE_READ && hctx_type == HCTX_TYPE_DEFAULT))
return NULL;
if (op_is_flush(rq->cmd_flags) != op_is_flush((*bio)->bi_opf))
return NULL;
return false;
if (op_is_flush(rq->cmd_flags) != op_is_flush(bio->bi_opf))
return false;
/*
* If any qos ->throttle() end up blocking, we will have flushed the
@ -2914,12 +2899,12 @@ static inline struct request *blk_mq_get_cached_request(struct request_queue *q,
* before we throttle.
*/
plug->cached_rq = rq_list_next(rq);
rq_qos_throttle(q, *bio);
rq_qos_throttle(rq->q, bio);
blk_mq_rq_time_init(rq, 0);
rq->cmd_flags = (*bio)->bi_opf;
rq->cmd_flags = bio->bi_opf;
INIT_LIST_HEAD(&rq->queuelist);
return rq;
return true;
}
static void bio_set_ioprio(struct bio *bio)
@ -2949,7 +2934,7 @@ void blk_mq_submit_bio(struct bio *bio)
struct blk_plug *plug = blk_mq_plug(bio);
const int is_sync = op_is_sync(bio->bi_opf);
struct blk_mq_hw_ctx *hctx;
struct request *rq;
struct request *rq = NULL;
unsigned int nr_segs = 1;
blk_status_t ret;
@ -2960,20 +2945,36 @@ void blk_mq_submit_bio(struct bio *bio)
return;
}
if (!bio_integrity_prep(bio))
return;
bio_set_ioprio(bio);
rq = blk_mq_get_cached_request(q, plug, &bio, nr_segs);
if (!rq) {
if (!bio)
if (plug) {
rq = rq_list_peek(&plug->cached_rq);
if (rq && rq->q != q)
rq = NULL;
}
if (rq) {
if (!bio_integrity_prep(bio))
return;
if (blk_mq_attempt_bio_merge(q, bio, nr_segs))
return;
if (blk_mq_can_use_cached_rq(rq, plug, bio))
goto done;
percpu_ref_get(&q->q_usage_counter);
} else {
if (unlikely(bio_queue_enter(bio)))
return;
if (!bio_integrity_prep(bio))
goto fail;
}
rq = blk_mq_get_new_requests(q, plug, bio, nr_segs);
if (unlikely(!rq))
if (unlikely(!rq)) {
fail:
blk_queue_exit(q);
return;
}
done:
trace_block_getrq(bio);
rq_qos_track(q, rq, bio);

View File

@ -250,9 +250,6 @@ int ivpu_rpm_get_if_active(struct ivpu_device *vdev)
{
int ret;
ivpu_dbg(vdev, RPM, "rpm_get_if_active count %d\n",
atomic_read(&vdev->drm.dev->power.usage_count));
ret = pm_runtime_get_if_active(vdev->drm.dev, false);
drm_WARN_ON(&vdev->drm, ret < 0);

View File

@ -131,7 +131,7 @@ config RASPBERRYPI_FIRMWARE
config FW_CFG_SYSFS
tristate "QEMU fw_cfg device support in sysfs"
depends on SYSFS && (ARM || ARM64 || PARISC || PPC_PMAC || SPARC || X86)
depends on SYSFS && (ARM || ARM64 || PARISC || PPC_PMAC || RISCV || SPARC || X86)
depends on HAS_IOPORT_MAP
default n
help

View File

@ -211,7 +211,7 @@ static void fw_cfg_io_cleanup(void)
/* arch-specific ctrl & data register offsets are not available in ACPI, DT */
#if !(defined(FW_CFG_CTRL_OFF) && defined(FW_CFG_DATA_OFF))
# if (defined(CONFIG_ARM) || defined(CONFIG_ARM64))
# if (defined(CONFIG_ARM) || defined(CONFIG_ARM64) || defined(CONFIG_RISCV))
# define FW_CFG_CTRL_OFF 0x08
# define FW_CFG_DATA_OFF 0x00
# define FW_CFG_DMA_OFF 0x10

View File

@ -248,6 +248,7 @@ extern int amdgpu_umsch_mm;
extern int amdgpu_seamless;
extern int amdgpu_user_partt_mode;
extern int amdgpu_agp;
#define AMDGPU_VM_MAX_NUM_CTX 4096
#define AMDGPU_SG_THRESHOLD (256*1024*1024)

View File

@ -207,7 +207,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,
}
for (i = 0; i < p->nchunks; i++) {
struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL;
struct drm_amdgpu_cs_chunk __user *chunk_ptr = NULL;
struct drm_amdgpu_cs_chunk user_chunk;
uint32_t __user *cdata;

View File

@ -207,6 +207,7 @@ int amdgpu_user_partt_mode = AMDGPU_AUTO_COMPUTE_PARTITION_MODE;
int amdgpu_umsch_mm;
int amdgpu_seamless = -1; /* auto */
uint amdgpu_debug_mask;
int amdgpu_agp = -1; /* auto */
static void amdgpu_drv_delayed_reset_work_handler(struct work_struct *work);
@ -961,6 +962,15 @@ module_param_named(seamless, amdgpu_seamless, int, 0444);
MODULE_PARM_DESC(debug_mask, "debug options for amdgpu, disabled by default");
module_param_named(debug_mask, amdgpu_debug_mask, uint, 0444);
/**
* DOC: agp (int)
* Enable the AGP aperture. This provides an aperture in the GPU's internal
* address space for direct access to system memory. Note that these accesses
* are non-snooped, so they are only used for access to uncached memory.
*/
MODULE_PARM_DESC(agp, "AGP (-1 = auto (default), 0 = disable, 1 = enable)");
module_param_named(agp, amdgpu_agp, int, 0444);
/* These devices are not supported by amdgpu.
* They are supported by the mach64, r128, radeon drivers
*/

View File

@ -1473,6 +1473,11 @@ int psp_xgmi_get_topology_info(struct psp_context *psp,
topology->nodes[i].num_links = (requires_reflection && topology->nodes[i].num_links) ?
topology->nodes[i].num_links : node_num_links;
}
/* popluate the connected port num info if supported and available */
if (ta_port_num_support && topology->nodes[i].num_links) {
memcpy(topology->nodes[i].port_num, link_extend_info_output->nodes[i].port_num,
sizeof(struct xgmi_connected_port_num) * TA_XGMI__MAX_PORT_NUM);
}
/* reflect the topology information for bi-directionality */
if (requires_reflection && topology->nodes[i].num_hops)

View File

@ -150,6 +150,7 @@ struct psp_xgmi_node_info {
uint8_t is_sharing_enabled;
enum ta_xgmi_assigned_sdma_engine sdma_engine;
uint8_t num_links;
struct xgmi_connected_port_num port_num[TA_XGMI__MAX_PORT_NUM];
};
struct psp_xgmi_topology_info {

View File

@ -1188,7 +1188,7 @@ static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev,
}
if (block_obj->hw_ops->query_ras_error_count)
block_obj->hw_ops->query_ras_error_count(adev, &err_data);
block_obj->hw_ops->query_ras_error_count(adev, err_data);
if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) ||
(info->head.block == AMDGPU_RAS_BLOCK__GFX) ||

View File

@ -398,6 +398,7 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
* amdgpu_uvd_entity_init - init entity
*
* @adev: amdgpu_device pointer
* @ring: amdgpu_ring pointer to check
*
* Initialize the entity used for handle management in the kernel driver.
*/

View File

@ -230,6 +230,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
* amdgpu_vce_entity_init - init entity
*
* @adev: amdgpu_device pointer
* @ring: amdgpu_ring pointer to check
*
* Initialize the entity used for handle management in the kernel driver.
*/

View File

@ -675,7 +675,7 @@ static void gmc_v10_0_vram_gtt_location(struct amdgpu_device *adev,
amdgpu_gmc_set_agp_default(adev, mc);
amdgpu_gmc_vram_location(adev, &adev->gmc, base);
amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
if (!amdgpu_sriov_vf(adev))
if (!amdgpu_sriov_vf(adev) && (amdgpu_agp == 1))
amdgpu_gmc_agp_location(adev, mc);
/* base offset of vram pages */

View File

@ -640,8 +640,9 @@ static void gmc_v11_0_vram_gtt_location(struct amdgpu_device *adev,
amdgpu_gmc_set_agp_default(adev, mc);
amdgpu_gmc_vram_location(adev, &adev->gmc, base);
amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_HIGH);
if (!amdgpu_sriov_vf(adev) ||
(amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(11, 5, 0)))
if (!amdgpu_sriov_vf(adev) &&
(amdgpu_ip_version(adev, GC_HWIP, 0) < IP_VERSION(11, 5, 0)) &&
(amdgpu_agp == 1))
amdgpu_gmc_agp_location(adev, mc);
/* base offset of vram pages */

View File

@ -1630,7 +1630,7 @@ static void gmc_v9_0_vram_gtt_location(struct amdgpu_device *adev,
} else {
amdgpu_gmc_vram_location(adev, mc, base);
amdgpu_gmc_gart_location(adev, mc, AMDGPU_GART_PLACEMENT_BEST_FIT);
if (!amdgpu_sriov_vf(adev))
if (!amdgpu_sriov_vf(adev) && (amdgpu_agp == 1))
amdgpu_gmc_agp_location(adev, mc);
}
/* base offset of vram pages */
@ -2170,8 +2170,6 @@ static int gmc_v9_0_sw_fini(void *handle)
if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3))
amdgpu_gmc_sysfs_fini(adev);
adev->gmc.num_mem_partitions = 0;
kfree(adev->gmc.mem_partitions);
amdgpu_gmc_ras_fini(adev);
amdgpu_gem_force_release(adev);
@ -2185,6 +2183,9 @@ static int gmc_v9_0_sw_fini(void *handle)
amdgpu_bo_free_kernel(&adev->gmc.pdb0_bo, NULL, &adev->gmc.ptr_pdb0);
amdgpu_bo_fini(adev);
adev->gmc.num_mem_partitions = 0;
kfree(adev->gmc.mem_partitions);
return 0;
}

View File

@ -130,6 +130,9 @@ static void mmhub_v1_8_init_system_aperture_regs(struct amdgpu_device *adev)
uint64_t value;
int i;
if (amdgpu_sriov_vf(adev))
return;
inst_mask = adev->aid_mask;
for_each_inst(i, inst_mask) {
/* Program the AGP BAR */
@ -139,9 +142,6 @@ static void mmhub_v1_8_init_system_aperture_regs(struct amdgpu_device *adev)
WREG32_SOC15(MMHUB, i, regMC_VM_AGP_TOP,
adev->gmc.agp_end >> 24);
if (amdgpu_sriov_vf(adev))
return;
/* Program the system aperture low logical page number. */
WREG32_SOC15(MMHUB, i, regMC_VM_SYSTEM_APERTURE_LOW_ADDR,
min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18);

View File

@ -2079,7 +2079,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
struct dmub_srv_create_params create_params;
struct dmub_srv_region_params region_params;
struct dmub_srv_region_info region_info;
struct dmub_srv_fb_params fb_params;
struct dmub_srv_memory_params memory_params;
struct dmub_srv_fb_info *fb_info;
struct dmub_srv *dmub_srv;
const struct dmcub_firmware_header_v1_0 *hdr;
@ -2182,6 +2182,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
adev->dm.dmub_fw->data +
le32_to_cpu(hdr->header.ucode_array_offset_bytes) +
PSP_HEADER_BYTES;
region_params.is_mailbox_in_inbox = false;
status = dmub_srv_calc_region_info(dmub_srv, &region_params,
&region_info);
@ -2205,10 +2206,10 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
return r;
/* Rebase the regions on the framebuffer address. */
memset(&fb_params, 0, sizeof(fb_params));
fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr;
fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr;
fb_params.region_info = &region_info;
memset(&memory_params, 0, sizeof(memory_params));
memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr;
memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr;
memory_params.region_info = &region_info;
adev->dm.dmub_fb_info =
kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL);
@ -2220,7 +2221,7 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
return -ENOMEM;
}
status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info);
status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info);
if (status != DMUB_STATUS_OK) {
DRM_ERROR("Error calculating DMUB FB info: %d\n", status);
return -EINVAL;
@ -7481,6 +7482,9 @@ static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap,
int i;
int result = -EIO;
if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported)
return result;
cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL);
if (!cmd.payloads)
@ -9603,14 +9607,14 @@ static bool should_reset_plane(struct drm_atomic_state *state,
struct drm_plane *other;
struct drm_plane_state *old_other_state, *new_other_state;
struct drm_crtc_state *new_crtc_state;
struct amdgpu_device *adev = drm_to_adev(plane->dev);
int i;
/*
* TODO: Remove this hack once the checks below are sufficient
* enough to determine when we need to reset all the planes on
* the stream.
* TODO: Remove this hack for all asics once it proves that the
* fast updates works fine on DCN3.2+.
*/
if (state->allow_modeset)
if (adev->ip_versions[DCE_HWIP][0] < IP_VERSION(3, 2, 0) && state->allow_modeset)
return true;
/* Exit early if we know that we're adding or removing the plane. */

View File

@ -536,11 +536,8 @@ bool dm_helpers_dp_read_dpcd(
struct amdgpu_dm_connector *aconnector = link->priv;
if (!aconnector) {
drm_dbg_dp(aconnector->base.dev,
"Failed to find connector for link!\n");
if (!aconnector)
return false;
}
return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address, data,
size) == size;

View File

@ -1604,31 +1604,31 @@ enum dc_status dm_dp_mst_is_port_support_mode(
unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0;
unsigned int max_compressed_bw_in_kbps = 0;
struct dc_dsc_bw_range bw_range = {0};
struct drm_dp_mst_topology_mgr *mst_mgr;
uint16_t full_pbn = aconnector->mst_output_port->full_pbn;
/*
* check if the mode could be supported if DSC pass-through is supported
* AND check if there enough bandwidth available to support the mode
* with DSC enabled.
* Consider the case with the depth of the mst topology tree is equal or less than 2
* A. When dsc bitstream can be transmitted along the entire path
* 1. dsc is possible between source and branch/leaf device (common dsc params is possible), AND
* 2. dsc passthrough supported at MST branch, or
* 3. dsc decoding supported at leaf MST device
* Use maximum dsc compression as bw constraint
* B. When dsc bitstream cannot be transmitted along the entire path
* Use native bw as bw constraint
*/
if (is_dsc_common_config_possible(stream, &bw_range) &&
aconnector->mst_output_port->passthrough_aux) {
mst_mgr = aconnector->mst_output_port->mgr;
mutex_lock(&mst_mgr->lock);
(aconnector->mst_output_port->passthrough_aux ||
aconnector->dsc_aux == &aconnector->mst_output_port->aux)) {
cur_link_settings = stream->link->verified_link_cap;
upper_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link,
&cur_link_settings
);
down_link_bw_in_kbps = kbps_from_pbn(aconnector->mst_output_port->full_pbn);
&cur_link_settings);
down_link_bw_in_kbps = kbps_from_pbn(full_pbn);
/* pick the bottleneck */
end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps,
down_link_bw_in_kbps);
mutex_unlock(&mst_mgr->lock);
/*
* use the maximum dsc compression bandwidth as the required
* bandwidth for the mode
@ -1643,8 +1643,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
/* check if mode could be supported within full_pbn */
bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3;
pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp, false);
if (pbn > aconnector->mst_output_port->full_pbn)
if (pbn > full_pbn)
return DC_FAIL_BANDWIDTH_VALIDATE;
}

View File

@ -820,22 +820,22 @@ static void dcn35_set_idle_state(struct clk_mgr *clk_mgr_base, bool allow_idle)
if (dc->config.disable_ips == DMUB_IPS_ENABLE ||
dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) {
val |= DMUB_IPS1_ALLOW_MASK;
val |= DMUB_IPS2_ALLOW_MASK;
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) {
val = val & ~DMUB_IPS1_ALLOW_MASK;
val = val & ~DMUB_IPS2_ALLOW_MASK;
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) {
val |= DMUB_IPS1_ALLOW_MASK;
val = val & ~DMUB_IPS2_ALLOW_MASK;
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) {
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) {
val |= DMUB_IPS1_ALLOW_MASK;
val |= DMUB_IPS2_ALLOW_MASK;
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) {
val = val & ~DMUB_IPS1_ALLOW_MASK;
val |= DMUB_IPS2_ALLOW_MASK;
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) {
val = val & ~DMUB_IPS1_ALLOW_MASK;
val = val & ~DMUB_IPS2_ALLOW_MASK;
}
if (!allow_idle) {
val = val & ~DMUB_IPS1_ALLOW_MASK;
val = val & ~DMUB_IPS2_ALLOW_MASK;
val |= DMUB_IPS1_ALLOW_MASK;
val |= DMUB_IPS2_ALLOW_MASK;
}
dcn35_smu_write_ips_scratch(clk_mgr, val);

View File

@ -3178,7 +3178,7 @@ static bool update_planes_and_stream_state(struct dc *dc,
struct pipe_ctx *otg_master = resource_get_otg_master_for_stream(&context->res_ctx,
context->streams[i]);
if (otg_master->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE)
if (otg_master && otg_master->stream->test_pattern.type != DP_TEST_PATTERN_VIDEO_MODE)
resource_build_test_pattern_params(&context->res_ctx, otg_master);
}
}
@ -4934,8 +4934,8 @@ bool dc_dmub_is_ips_idle_state(struct dc *dc)
if (dc->hwss.get_idle_state)
idle_state = dc->hwss.get_idle_state(dc);
if ((idle_state & DMUB_IPS1_ALLOW_MASK) ||
(idle_state & DMUB_IPS2_ALLOW_MASK))
if (!(idle_state & DMUB_IPS1_ALLOW_MASK) ||
!(idle_state & DMUB_IPS2_ALLOW_MASK))
return true;
return false;

View File

@ -5190,6 +5190,9 @@ bool dc_resource_acquire_secondary_pipe_for_mpc_odm_legacy(
sec_next = sec_pipe->next_odm_pipe;
sec_prev = sec_pipe->prev_odm_pipe;
if (pri_pipe == NULL)
return false;
*sec_pipe = *pri_pipe;
sec_pipe->top_pipe = sec_top;

View File

@ -1202,11 +1202,11 @@ void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
allow_state = dc->hwss.get_idle_state(dc);
dc->hwss.set_idle_state(dc, false);
if (allow_state & DMUB_IPS2_ALLOW_MASK) {
if (!(allow_state & DMUB_IPS2_ALLOW_MASK)) {
// Wait for evaluation time
udelay(dc->debug.ips2_eval_delay_us);
commit_state = dc->hwss.get_idle_state(dc);
if (commit_state & DMUB_IPS2_COMMIT_MASK) {
if (!(commit_state & DMUB_IPS2_COMMIT_MASK)) {
// Tell PMFW to exit low power state
dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
@ -1216,7 +1216,7 @@ void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
for (i = 0; i < max_num_polls; ++i) {
commit_state = dc->hwss.get_idle_state(dc);
if (!(commit_state & DMUB_IPS2_COMMIT_MASK))
if (commit_state & DMUB_IPS2_COMMIT_MASK)
break;
udelay(1);
@ -1235,10 +1235,10 @@ void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
}
dc_dmub_srv_notify_idle(dc, false);
if (allow_state & DMUB_IPS1_ALLOW_MASK) {
if (!(allow_state & DMUB_IPS1_ALLOW_MASK)) {
for (i = 0; i < max_num_polls; ++i) {
commit_state = dc->hwss.get_idle_state(dc);
if (!(commit_state & DMUB_IPS1_COMMIT_MASK))
if (commit_state & DMUB_IPS1_COMMIT_MASK)
break;
udelay(1);

View File

@ -177,6 +177,7 @@ struct dc_panel_patch {
unsigned int disable_fams;
unsigned int skip_avmute;
unsigned int mst_start_top_delay;
unsigned int remove_sink_ext_caps;
};
struct dc_edid_caps {

View File

@ -261,12 +261,6 @@ static void enc35_stream_encoder_enable(
/* invalid mode ! */
ASSERT_CRITICAL(false);
}
REG_UPDATE(DIG_FE_CLK_CNTL, DIG_FE_CLK_EN, 1);
REG_UPDATE(DIG_FE_EN_CNTL, DIG_FE_ENABLE, 1);
} else {
REG_UPDATE(DIG_FE_EN_CNTL, DIG_FE_ENABLE, 0);
REG_UPDATE(DIG_FE_CLK_CNTL, DIG_FE_CLK_EN, 0);
}
}
@ -436,6 +430,8 @@ static void enc35_disable_fifo(struct stream_encoder *enc)
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 0);
REG_UPDATE(DIG_FE_EN_CNTL, DIG_FE_ENABLE, 0);
REG_UPDATE(DIG_FE_CLK_CNTL, DIG_FE_CLK_EN, 0);
}
static void enc35_enable_fifo(struct stream_encoder *enc)
@ -443,6 +439,8 @@ static void enc35_enable_fifo(struct stream_encoder *enc)
struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc);
REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7);
REG_UPDATE(DIG_FE_CLK_CNTL, DIG_FE_CLK_EN, 1);
REG_UPDATE(DIG_FE_EN_CNTL, DIG_FE_ENABLE, 1);
enc35_reset_fifo(enc, true);
enc35_reset_fifo(enc, false);

View File

@ -1088,6 +1088,9 @@ static bool detect_link_and_local_sink(struct dc_link *link,
if (sink->edid_caps.panel_patch.skip_scdc_overwrite)
link->ctx->dc->debug.hdmi20_disable = true;
if (sink->edid_caps.panel_patch.remove_sink_ext_caps)
link->dpcd_sink_ext_caps.raw = 0;
if (dc_is_hdmi_signal(link->connector_signal))
read_scdc_caps(link->ddc, link->local_sink);

View File

@ -195,6 +195,7 @@ struct dmub_srv_region_params {
uint32_t vbios_size;
const uint8_t *fw_inst_const;
const uint8_t *fw_bss_data;
bool is_mailbox_in_inbox;
};
/**
@ -214,20 +215,25 @@ struct dmub_srv_region_params {
*/
struct dmub_srv_region_info {
uint32_t fb_size;
uint32_t inbox_size;
uint8_t num_regions;
struct dmub_region regions[DMUB_WINDOW_TOTAL];
};
/**
* struct dmub_srv_fb_params - parameters used for driver fb setup
* struct dmub_srv_memory_params - parameters used for driver fb setup
* @region_info: region info calculated by dmub service
* @cpu_addr: base cpu address for the framebuffer
* @gpu_addr: base gpu virtual address for the framebuffer
* @cpu_fb_addr: base cpu address for the framebuffer
* @cpu_inbox_addr: base cpu address for the gart
* @gpu_fb_addr: base gpu virtual address for the framebuffer
* @gpu_inbox_addr: base gpu virtual address for the gart
*/
struct dmub_srv_fb_params {
struct dmub_srv_memory_params {
const struct dmub_srv_region_info *region_info;
void *cpu_addr;
uint64_t gpu_addr;
void *cpu_fb_addr;
void *cpu_inbox_addr;
uint64_t gpu_fb_addr;
uint64_t gpu_inbox_addr;
};
/**
@ -563,8 +569,8 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
* DMUB_STATUS_OK - success
* DMUB_STATUS_INVALID - unspecified error
*/
enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
const struct dmub_srv_fb_params *params,
enum dmub_status dmub_srv_calc_mem_info(struct dmub_srv *dmub,
const struct dmub_srv_memory_params *params,
struct dmub_srv_fb_info *out);
/**

View File

@ -434,7 +434,7 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
uint32_t fw_state_size = DMUB_FW_STATE_SIZE;
uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE;
uint32_t scratch_mem_size = DMUB_SCRATCH_MEM_SIZE;
uint32_t previous_top = 0;
if (!dmub->sw_init)
return DMUB_STATUS_INVALID;
@ -459,8 +459,15 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
bios->base = dmub_align(stack->top, 256);
bios->top = bios->base + params->vbios_size;
if (params->is_mailbox_in_inbox) {
mail->base = 0;
mail->top = mail->base + DMUB_MAILBOX_SIZE;
previous_top = bios->top;
} else {
mail->base = dmub_align(bios->top, 256);
mail->top = mail->base + DMUB_MAILBOX_SIZE;
previous_top = mail->top;
}
fw_info = dmub_get_fw_meta_info(params);
@ -479,7 +486,7 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
dmub->fw_version = fw_info->fw_version;
}
trace_buff->base = dmub_align(mail->top, 256);
trace_buff->base = dmub_align(previous_top, 256);
trace_buff->top = trace_buff->base + dmub_align(trace_buffer_size, 64);
fw_state->base = dmub_align(trace_buff->top, 256);
@ -490,11 +497,14 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
out->fb_size = dmub_align(scratch_mem->top, 4096);
if (params->is_mailbox_in_inbox)
out->inbox_size = dmub_align(mail->top, 4096);
return DMUB_STATUS_OK;
}
enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
const struct dmub_srv_fb_params *params,
enum dmub_status dmub_srv_calc_mem_info(struct dmub_srv *dmub,
const struct dmub_srv_memory_params *params,
struct dmub_srv_fb_info *out)
{
uint8_t *cpu_base;
@ -509,8 +519,8 @@ enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
if (params->region_info->num_regions != DMUB_NUM_WINDOWS)
return DMUB_STATUS_INVALID;
cpu_base = (uint8_t *)params->cpu_addr;
gpu_base = params->gpu_addr;
cpu_base = (uint8_t *)params->cpu_fb_addr;
gpu_base = params->gpu_fb_addr;
for (i = 0; i < DMUB_NUM_WINDOWS; ++i) {
const struct dmub_region *reg =
@ -518,6 +528,12 @@ enum dmub_status dmub_srv_calc_fb_info(struct dmub_srv *dmub,
out->fb[i].cpu_addr = cpu_base + reg->base;
out->fb[i].gpu_addr = gpu_base + reg->base;
if (i == DMUB_WINDOW_4_MAILBOX && params->cpu_inbox_addr != 0) {
out->fb[i].cpu_addr = (uint8_t *)params->cpu_inbox_addr + reg->base;
out->fb[i].gpu_addr = params->gpu_inbox_addr + reg->base;
}
out->fb[i].size = reg->top - reg->base;
}
@ -707,10 +723,17 @@ enum dmub_status dmub_srv_sync_inbox1(struct dmub_srv *dmub)
return DMUB_STATUS_INVALID;
if (dmub->hw_funcs.get_inbox1_rptr && dmub->hw_funcs.get_inbox1_wptr) {
dmub->inbox1_rb.rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
dmub->inbox1_rb.wrpt = dmub->hw_funcs.get_inbox1_wptr(dmub);
uint32_t rptr = dmub->hw_funcs.get_inbox1_rptr(dmub);
uint32_t wptr = dmub->hw_funcs.get_inbox1_wptr(dmub);
if (rptr > dmub->inbox1_rb.capacity || wptr > dmub->inbox1_rb.capacity) {
return DMUB_STATUS_HW_FAILURE;
} else {
dmub->inbox1_rb.rptr = rptr;
dmub->inbox1_rb.wrpt = wptr;
dmub->inbox1_last_wptr = dmub->inbox1_rb.wrpt;
}
}
return DMUB_STATUS_OK;
}
@ -743,6 +766,11 @@ enum dmub_status dmub_srv_cmd_queue(struct dmub_srv *dmub,
if (!dmub->hw_init)
return DMUB_STATUS_INVALID;
if (dmub->inbox1_rb.rptr > dmub->inbox1_rb.capacity ||
dmub->inbox1_rb.wrpt > dmub->inbox1_rb.capacity) {
return DMUB_STATUS_HW_FAILURE;
}
if (dmub_rb_push_front(&dmub->inbox1_rb, cmd))
return DMUB_STATUS_OK;

View File

@ -123,7 +123,7 @@ typedef enum {
VOLTAGE_GUARDBAND_COUNT
} GFX_GUARDBAND_e;
#define SMU_METRICS_TABLE_VERSION 0x8
#define SMU_METRICS_TABLE_VERSION 0x9
typedef struct __attribute__((packed, aligned(4))) {
uint32_t AccumulationCounter;
@ -211,6 +211,14 @@ typedef struct __attribute__((packed, aligned(4))) {
//XGMI Data tranfser size
uint64_t XgmiReadDataSizeAcc[8];//in KByte
uint64_t XgmiWriteDataSizeAcc[8];//in KByte
//PCIE BW Data and error count
uint32_t PcieBandwidth[4];
uint32_t PCIeL0ToRecoveryCountAcc; // The Pcie counter itself is accumulated
uint32_t PCIenReplayAAcc; // The Pcie counter itself is accumulated
uint32_t PCIenReplayARolloverCountAcc; // The Pcie counter itself is accumulated
uint32_t PCIeNAKSentCountAcc; // The Pcie counter itself is accumulated
uint32_t PCIeNAKReceivedCountAcc; // The Pcie counter itself is accumulated
} MetricsTable_t;
#define SMU_VF_METRICS_TABLE_VERSION 0x3

View File

@ -1454,7 +1454,7 @@ static int smu_v13_0_6_register_irq_handler(struct smu_context *smu)
static int smu_v13_0_6_notify_unload(struct smu_context *smu)
{
if (smu->smc_fw_version <= 0x553500)
if (amdgpu_in_reset(smu->adev))
return 0;
dev_dbg(smu->adev->dev, "Notify PMFW about driver unload");
@ -2095,6 +2095,14 @@ static ssize_t smu_v13_0_6_get_gpu_metrics(struct smu_context *smu, void **table
smu_v13_0_6_get_current_pcie_link_speed(smu);
gpu_metrics->pcie_bandwidth_acc =
SMUQ10_ROUND(metrics->PcieBandwidthAcc[0]);
gpu_metrics->pcie_bandwidth_inst =
SMUQ10_ROUND(metrics->PcieBandwidth[0]);
gpu_metrics->pcie_l0_to_recov_count_acc =
metrics->PCIeL0ToRecoveryCountAcc;
gpu_metrics->pcie_replay_count_acc =
metrics->PCIenReplayAAcc;
gpu_metrics->pcie_replay_rover_count_acc =
metrics->PCIenReplayARolloverCountAcc;
}
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();

View File

@ -5,7 +5,7 @@ termcolor==2.3.0
certifi==2023.7.22
charset-normalizer==3.2.0
idna==3.4
pip==23.2.1
pip==23.3
python-gitlab==3.15.0
requests==2.31.0
requests-toolbelt==1.0.0
@ -13,5 +13,5 @@ ruamel.yaml==0.17.32
ruamel.yaml.clib==0.2.7
setuptools==68.0.0
tenacity==8.2.3
urllib3==2.0.4
urllib3==2.0.7
wheel==0.41.1

View File

@ -336,6 +336,12 @@ static const struct dmi_system_id orientation_data[] = {
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "IdeaPad Duet 3 10IGL5"),
},
.driver_data = (void *)&lcd1200x1920_rightside_up,
}, { /* Lenovo Legion Go 8APU1 */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "Legion Go 8APU1"),
},
.driver_data = (void *)&lcd1600x2560_leftside_up,
}, { /* Lenovo Yoga Book X90F / X90L */
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),

View File

@ -14,7 +14,7 @@ struct nvkm_event {
int index_nr;
spinlock_t refs_lock;
spinlock_t list_lock;
rwlock_t list_lock;
int *refs;
struct list_head ntfy;
@ -38,7 +38,7 @@ nvkm_event_init(const struct nvkm_event_func *func, struct nvkm_subdev *subdev,
int types_nr, int index_nr, struct nvkm_event *event)
{
spin_lock_init(&event->refs_lock);
spin_lock_init(&event->list_lock);
rwlock_init(&event->list_lock);
return __nvkm_event_init(func, subdev, types_nr, index_nr, event);
}

View File

@ -726,6 +726,11 @@ nouveau_display_create(struct drm_device *dev)
if (nouveau_modeset != 2) {
ret = nvif_disp_ctor(&drm->client.device, "kmsDisp", 0, &disp->disp);
/* no display hw */
if (ret == -ENODEV) {
ret = 0;
goto disp_create_err;
}
if (!ret && (disp->disp.outp_mask || drm->vbios.dcb.entries)) {
nouveau_display_create_properties(dev);

View File

@ -81,17 +81,17 @@ nvkm_event_ntfy_state(struct nvkm_event_ntfy *ntfy)
static void
nvkm_event_ntfy_remove(struct nvkm_event_ntfy *ntfy)
{
spin_lock_irq(&ntfy->event->list_lock);
write_lock_irq(&ntfy->event->list_lock);
list_del_init(&ntfy->head);
spin_unlock_irq(&ntfy->event->list_lock);
write_unlock_irq(&ntfy->event->list_lock);
}
static void
nvkm_event_ntfy_insert(struct nvkm_event_ntfy *ntfy)
{
spin_lock_irq(&ntfy->event->list_lock);
write_lock_irq(&ntfy->event->list_lock);
list_add_tail(&ntfy->head, &ntfy->event->ntfy);
spin_unlock_irq(&ntfy->event->list_lock);
write_unlock_irq(&ntfy->event->list_lock);
}
static void
@ -176,7 +176,7 @@ nvkm_event_ntfy(struct nvkm_event *event, int id, u32 bits)
return;
nvkm_trace(event->subdev, "event: ntfy %08x on %d\n", bits, id);
spin_lock_irqsave(&event->list_lock, flags);
read_lock_irqsave(&event->list_lock, flags);
list_for_each_entry_safe(ntfy, ntmp, &event->ntfy, head) {
if (ntfy->id == id && ntfy->bits & bits) {
@ -185,7 +185,7 @@ nvkm_event_ntfy(struct nvkm_event *event, int id, u32 bits)
}
}
spin_unlock_irqrestore(&event->list_lock, flags);
read_unlock_irqrestore(&event->list_lock, flags);
}
void

View File

@ -689,8 +689,8 @@ r535_gsp_rpc_get(struct nvkm_gsp *gsp, u32 fn, u32 argc)
struct nvfw_gsp_rpc *rpc;
rpc = r535_gsp_cmdq_get(gsp, ALIGN(sizeof(*rpc) + argc, sizeof(u64)));
if (!rpc)
return NULL;
if (IS_ERR(rpc))
return ERR_CAST(rpc);
rpc->header_version = 0x03000000;
rpc->signature = ('C' << 24) | ('P' << 16) | ('R' << 8) | 'V';
@ -1159,7 +1159,7 @@ static void
r535_gsp_acpi_mux_id(acpi_handle handle, u32 id, MUX_METHOD_DATA_ELEMENT *mode,
MUX_METHOD_DATA_ELEMENT *part)
{
acpi_handle iter = NULL, handle_mux;
acpi_handle iter = NULL, handle_mux = NULL;
acpi_status status;
unsigned long long value;

View File

@ -63,7 +63,7 @@ static int dw_reg_read(void *context, unsigned int reg, unsigned int *val)
{
struct dw_i2c_dev *dev = context;
*val = readl_relaxed(dev->base + reg);
*val = readl(dev->base + reg);
return 0;
}
@ -72,7 +72,7 @@ static int dw_reg_write(void *context, unsigned int reg, unsigned int val)
{
struct dw_i2c_dev *dev = context;
writel_relaxed(val, dev->base + reg);
writel(val, dev->base + reg);
return 0;
}
@ -81,7 +81,7 @@ static int dw_reg_read_swab(void *context, unsigned int reg, unsigned int *val)
{
struct dw_i2c_dev *dev = context;
*val = swab32(readl_relaxed(dev->base + reg));
*val = swab32(readl(dev->base + reg));
return 0;
}
@ -90,7 +90,7 @@ static int dw_reg_write_swab(void *context, unsigned int reg, unsigned int val)
{
struct dw_i2c_dev *dev = context;
writel_relaxed(swab32(val), dev->base + reg);
writel(swab32(val), dev->base + reg);
return 0;
}
@ -99,8 +99,8 @@ static int dw_reg_read_word(void *context, unsigned int reg, unsigned int *val)
{
struct dw_i2c_dev *dev = context;
*val = readw_relaxed(dev->base + reg) |
(readw_relaxed(dev->base + reg + 2) << 16);
*val = readw(dev->base + reg) |
(readw(dev->base + reg + 2) << 16);
return 0;
}
@ -109,8 +109,8 @@ static int dw_reg_write_word(void *context, unsigned int reg, unsigned int val)
{
struct dw_i2c_dev *dev = context;
writew_relaxed(val, dev->base + reg);
writew_relaxed(val >> 16, dev->base + reg + 2);
writew(val, dev->base + reg);
writew(val >> 16, dev->base + reg + 2);
return 0;
}

View File

@ -771,7 +771,7 @@ static int ocores_i2c_resume(struct device *dev)
return ocores_init(dev, i2c);
}
static DEFINE_SIMPLE_DEV_PM_OPS(ocores_i2c_pm,
static DEFINE_NOIRQ_DEV_PM_OPS(ocores_i2c_pm,
ocores_i2c_suspend, ocores_i2c_resume);
static struct platform_driver ocores_i2c_driver = {

View File

@ -265,6 +265,9 @@ struct pxa_i2c {
u32 hs_mask;
struct i2c_bus_recovery_info recovery;
struct pinctrl *pinctrl;
struct pinctrl_state *pinctrl_default;
struct pinctrl_state *pinctrl_recovery;
};
#define _IBMR(i2c) ((i2c)->reg_ibmr)
@ -1299,12 +1302,13 @@ static void i2c_pxa_prepare_recovery(struct i2c_adapter *adap)
*/
gpiod_set_value(i2c->recovery.scl_gpiod, ibmr & IBMR_SCLS);
gpiod_set_value(i2c->recovery.sda_gpiod, ibmr & IBMR_SDAS);
WARN_ON(pinctrl_select_state(i2c->pinctrl, i2c->pinctrl_recovery));
}
static void i2c_pxa_unprepare_recovery(struct i2c_adapter *adap)
{
struct pxa_i2c *i2c = adap->algo_data;
struct i2c_bus_recovery_info *bri = adap->bus_recovery_info;
u32 isr;
/*
@ -1318,7 +1322,7 @@ static void i2c_pxa_unprepare_recovery(struct i2c_adapter *adap)
i2c_pxa_do_reset(i2c);
}
WARN_ON(pinctrl_select_state(bri->pinctrl, bri->pins_default));
WARN_ON(pinctrl_select_state(i2c->pinctrl, i2c->pinctrl_default));
dev_dbg(&i2c->adap.dev, "recovery: IBMR 0x%08x ISR 0x%08x\n",
readl(_IBMR(i2c)), readl(_ISR(i2c)));
@ -1340,20 +1344,76 @@ static int i2c_pxa_init_recovery(struct pxa_i2c *i2c)
if (IS_ENABLED(CONFIG_I2C_PXA_SLAVE))
return 0;
bri->pinctrl = devm_pinctrl_get(dev);
if (PTR_ERR(bri->pinctrl) == -ENODEV) {
bri->pinctrl = NULL;
i2c->pinctrl = devm_pinctrl_get(dev);
if (PTR_ERR(i2c->pinctrl) == -ENODEV)
i2c->pinctrl = NULL;
if (IS_ERR(i2c->pinctrl))
return PTR_ERR(i2c->pinctrl);
if (!i2c->pinctrl)
return 0;
i2c->pinctrl_default = pinctrl_lookup_state(i2c->pinctrl,
PINCTRL_STATE_DEFAULT);
i2c->pinctrl_recovery = pinctrl_lookup_state(i2c->pinctrl, "recovery");
if (IS_ERR(i2c->pinctrl_default) || IS_ERR(i2c->pinctrl_recovery)) {
dev_info(dev, "missing pinmux recovery information: %ld %ld\n",
PTR_ERR(i2c->pinctrl_default),
PTR_ERR(i2c->pinctrl_recovery));
return 0;
}
/*
* Claiming GPIOs can influence the pinmux state, and may glitch the
* I2C bus. Do this carefully.
*/
bri->scl_gpiod = devm_gpiod_get(dev, "scl", GPIOD_OUT_HIGH_OPEN_DRAIN);
if (bri->scl_gpiod == ERR_PTR(-EPROBE_DEFER))
return -EPROBE_DEFER;
if (IS_ERR(bri->scl_gpiod)) {
dev_info(dev, "missing scl gpio recovery information: %pe\n",
bri->scl_gpiod);
return 0;
}
/*
* We have SCL. Pull SCL low and wait a bit so that SDA glitches
* have no effect.
*/
gpiod_direction_output(bri->scl_gpiod, 0);
udelay(10);
bri->sda_gpiod = devm_gpiod_get(dev, "sda", GPIOD_OUT_HIGH_OPEN_DRAIN);
/* Wait a bit in case of a SDA glitch, and then release SCL. */
udelay(10);
gpiod_direction_output(bri->scl_gpiod, 1);
if (bri->sda_gpiod == ERR_PTR(-EPROBE_DEFER))
return -EPROBE_DEFER;
if (IS_ERR(bri->sda_gpiod)) {
dev_info(dev, "missing sda gpio recovery information: %pe\n",
bri->sda_gpiod);
return 0;
}
if (IS_ERR(bri->pinctrl))
return PTR_ERR(bri->pinctrl);
bri->prepare_recovery = i2c_pxa_prepare_recovery;
bri->unprepare_recovery = i2c_pxa_unprepare_recovery;
bri->recover_bus = i2c_generic_scl_recovery;
i2c->adap.bus_recovery_info = bri;
return 0;
/*
* Claiming GPIOs can change the pinmux state, which confuses the
* pinctrl since pinctrl's idea of the current setting is unaffected
* by the pinmux change caused by claiming the GPIO. Work around that
* by switching pinctrl to the GPIO state here. We do it this way to
* avoid glitching the I2C bus.
*/
pinctrl_select_state(i2c->pinctrl, i2c->pinctrl_recovery);
return pinctrl_select_state(i2c->pinctrl, i2c->pinctrl_default);
}
static int i2c_pxa_probe(struct platform_device *dev)

View File

@ -2379,12 +2379,12 @@ retry_baser:
break;
}
if (!shr)
gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
its_write_baser(its, baser, val);
tmp = baser->val;
if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE)
tmp &= ~GITS_BASER_SHAREABILITY_MASK;
if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
/*
* Shareability didn't stick. Just use
@ -2394,10 +2394,9 @@ retry_baser:
* non-cacheable as well.
*/
shr = tmp & GITS_BASER_SHAREABILITY_MASK;
if (!shr) {
if (!shr)
cache = GITS_BASER_nC;
gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
}
goto retry_baser;
}
@ -2609,6 +2608,11 @@ static int its_alloc_tables(struct its_node *its)
/* erratum 24313: ignore memory access type */
cache = GITS_BASER_nCnB;
if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE) {
cache = GITS_BASER_nC;
shr = 0;
}
for (i = 0; i < GITS_BASER_NR_REGS; i++) {
struct its_baser *baser = its->tables + i;
u64 val = its_read_baser(its, baser);

View File

@ -254,7 +254,7 @@ enum evict_result {
typedef enum evict_result (*le_predicate)(struct lru_entry *le, void *context);
static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *context)
static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *context, bool no_sleep)
{
unsigned long tested = 0;
struct list_head *h = lru->cursor;
@ -295,6 +295,7 @@ static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *con
h = h->next;
if (!no_sleep)
cond_resched();
}
@ -382,7 +383,10 @@ struct dm_buffer {
*/
struct buffer_tree {
union {
struct rw_semaphore lock;
rwlock_t spinlock;
} u;
struct rb_root root;
} ____cacheline_aligned_in_smp;
@ -393,9 +397,12 @@ struct dm_buffer_cache {
* on the locks.
*/
unsigned int num_locks;
bool no_sleep;
struct buffer_tree trees[];
};
static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled);
static inline unsigned int cache_index(sector_t block, unsigned int num_locks)
{
return dm_hash_locks_index(block, num_locks);
@ -403,22 +410,34 @@ static inline unsigned int cache_index(sector_t block, unsigned int num_locks)
static inline void cache_read_lock(struct dm_buffer_cache *bc, sector_t block)
{
down_read(&bc->trees[cache_index(block, bc->num_locks)].lock);
if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
read_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
else
down_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
}
static inline void cache_read_unlock(struct dm_buffer_cache *bc, sector_t block)
{
up_read(&bc->trees[cache_index(block, bc->num_locks)].lock);
if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
read_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
else
up_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
}
static inline void cache_write_lock(struct dm_buffer_cache *bc, sector_t block)
{
down_write(&bc->trees[cache_index(block, bc->num_locks)].lock);
if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
write_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
else
down_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
}
static inline void cache_write_unlock(struct dm_buffer_cache *bc, sector_t block)
{
up_write(&bc->trees[cache_index(block, bc->num_locks)].lock);
if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
write_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
else
up_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
}
/*
@ -442,18 +461,32 @@ static void lh_init(struct lock_history *lh, struct dm_buffer_cache *cache, bool
static void __lh_lock(struct lock_history *lh, unsigned int index)
{
if (lh->write)
down_write(&lh->cache->trees[index].lock);
if (lh->write) {
if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
write_lock_bh(&lh->cache->trees[index].u.spinlock);
else
down_read(&lh->cache->trees[index].lock);
down_write(&lh->cache->trees[index].u.lock);
} else {
if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
read_lock_bh(&lh->cache->trees[index].u.spinlock);
else
down_read(&lh->cache->trees[index].u.lock);
}
}
static void __lh_unlock(struct lock_history *lh, unsigned int index)
{
if (lh->write)
up_write(&lh->cache->trees[index].lock);
if (lh->write) {
if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
write_unlock_bh(&lh->cache->trees[index].u.spinlock);
else
up_read(&lh->cache->trees[index].lock);
up_write(&lh->cache->trees[index].u.lock);
} else {
if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
read_unlock_bh(&lh->cache->trees[index].u.spinlock);
else
up_read(&lh->cache->trees[index].u.lock);
}
}
/*
@ -502,14 +535,18 @@ static struct dm_buffer *list_to_buffer(struct list_head *l)
return le_to_buffer(le);
}
static void cache_init(struct dm_buffer_cache *bc, unsigned int num_locks)
static void cache_init(struct dm_buffer_cache *bc, unsigned int num_locks, bool no_sleep)
{
unsigned int i;
bc->num_locks = num_locks;
bc->no_sleep = no_sleep;
for (i = 0; i < bc->num_locks; i++) {
init_rwsem(&bc->trees[i].lock);
if (no_sleep)
rwlock_init(&bc->trees[i].u.spinlock);
else
init_rwsem(&bc->trees[i].u.lock);
bc->trees[i].root = RB_ROOT;
}
@ -648,7 +685,7 @@ static struct dm_buffer *__cache_evict(struct dm_buffer_cache *bc, int list_mode
struct lru_entry *le;
struct dm_buffer *b;
le = lru_evict(&bc->lru[list_mode], __evict_pred, &w);
le = lru_evict(&bc->lru[list_mode], __evict_pred, &w, bc->no_sleep);
if (!le)
return NULL;
@ -702,7 +739,7 @@ static void __cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_
struct evict_wrapper w = {.lh = lh, .pred = pred, .context = context};
while (true) {
le = lru_evict(&bc->lru[old_mode], __evict_pred, &w);
le = lru_evict(&bc->lru[old_mode], __evict_pred, &w, bc->no_sleep);
if (!le)
break;
@ -915,10 +952,11 @@ static void cache_remove_range(struct dm_buffer_cache *bc,
{
unsigned int i;
BUG_ON(bc->no_sleep);
for (i = 0; i < bc->num_locks; i++) {
down_write(&bc->trees[i].lock);
down_write(&bc->trees[i].u.lock);
__remove_range(bc, &bc->trees[i].root, begin, end, pred, release);
up_write(&bc->trees[i].lock);
up_write(&bc->trees[i].u.lock);
}
}
@ -979,8 +1017,6 @@ struct dm_bufio_client {
struct dm_buffer_cache cache; /* must be last member */
};
static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled);
/*----------------------------------------------------------------*/
#define dm_bufio_in_request() (!!current->bio_list)
@ -1871,6 +1907,7 @@ static void *new_read(struct dm_bufio_client *c, sector_t block,
if (need_submit)
submit_io(b, REQ_OP_READ, read_endio);
if (nf != NF_GET) /* we already tested this condition above */
wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
if (b->read_error) {
@ -2421,7 +2458,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
r = -ENOMEM;
goto bad_client;
}
cache_init(&c->cache, num_locks);
cache_init(&c->cache, num_locks, (flags & DM_BUFIO_CLIENT_NO_SLEEP) != 0);
c->bdev = bdev;
c->block_size = block_size;

View File

@ -1673,7 +1673,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned int size)
unsigned int nr_iovecs = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
gfp_t gfp_mask = GFP_NOWAIT | __GFP_HIGHMEM;
unsigned int remaining_size;
unsigned int order = MAX_ORDER - 1;
unsigned int order = MAX_ORDER;
retry:
if (unlikely(gfp_mask & __GFP_DIRECT_RECLAIM))

View File

@ -33,7 +33,7 @@ struct delay_c {
struct work_struct flush_expired_bios;
struct list_head delayed_bios;
struct task_struct *worker;
atomic_t may_delay;
bool may_delay;
struct delay_class read;
struct delay_class write;
@ -73,39 +73,6 @@ static inline bool delay_is_fast(struct delay_c *dc)
return !!dc->worker;
}
static void flush_delayed_bios_fast(struct delay_c *dc, bool flush_all)
{
struct dm_delay_info *delayed, *next;
mutex_lock(&delayed_bios_lock);
list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) {
if (flush_all || time_after_eq(jiffies, delayed->expires)) {
struct bio *bio = dm_bio_from_per_bio_data(delayed,
sizeof(struct dm_delay_info));
list_del(&delayed->list);
dm_submit_bio_remap(bio, NULL);
delayed->class->ops--;
}
}
mutex_unlock(&delayed_bios_lock);
}
static int flush_worker_fn(void *data)
{
struct delay_c *dc = data;
while (1) {
flush_delayed_bios_fast(dc, false);
if (unlikely(list_empty(&dc->delayed_bios))) {
set_current_state(TASK_INTERRUPTIBLE);
schedule();
} else
cond_resched();
}
return 0;
}
static void flush_bios(struct bio *bio)
{
struct bio *n;
@ -118,36 +85,61 @@ static void flush_bios(struct bio *bio)
}
}
static struct bio *flush_delayed_bios(struct delay_c *dc, bool flush_all)
static void flush_delayed_bios(struct delay_c *dc, bool flush_all)
{
struct dm_delay_info *delayed, *next;
struct bio_list flush_bio_list;
unsigned long next_expires = 0;
unsigned long start_timer = 0;
struct bio_list flush_bios = { };
bool start_timer = false;
bio_list_init(&flush_bio_list);
mutex_lock(&delayed_bios_lock);
list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) {
cond_resched();
if (flush_all || time_after_eq(jiffies, delayed->expires)) {
struct bio *bio = dm_bio_from_per_bio_data(delayed,
sizeof(struct dm_delay_info));
list_del(&delayed->list);
bio_list_add(&flush_bios, bio);
bio_list_add(&flush_bio_list, bio);
delayed->class->ops--;
continue;
}
if (!delay_is_fast(dc)) {
if (!start_timer) {
start_timer = 1;
start_timer = true;
next_expires = delayed->expires;
} else
} else {
next_expires = min(next_expires, delayed->expires);
}
}
}
mutex_unlock(&delayed_bios_lock);
if (start_timer)
queue_timeout(dc, next_expires);
return bio_list_get(&flush_bios);
flush_bios(bio_list_get(&flush_bio_list));
}
static int flush_worker_fn(void *data)
{
struct delay_c *dc = data;
while (!kthread_should_stop()) {
flush_delayed_bios(dc, false);
mutex_lock(&delayed_bios_lock);
if (unlikely(list_empty(&dc->delayed_bios))) {
set_current_state(TASK_INTERRUPTIBLE);
mutex_unlock(&delayed_bios_lock);
schedule();
} else {
mutex_unlock(&delayed_bios_lock);
cond_resched();
}
}
return 0;
}
static void flush_expired_bios(struct work_struct *work)
@ -155,10 +147,7 @@ static void flush_expired_bios(struct work_struct *work)
struct delay_c *dc;
dc = container_of(work, struct delay_c, flush_expired_bios);
if (delay_is_fast(dc))
flush_delayed_bios_fast(dc, false);
else
flush_bios(flush_delayed_bios(dc, false));
flush_delayed_bios(dc, false);
}
static void delay_dtr(struct dm_target *ti)
@ -177,7 +166,6 @@ static void delay_dtr(struct dm_target *ti)
if (dc->worker)
kthread_stop(dc->worker);
if (!delay_is_fast(dc))
mutex_destroy(&dc->timer_lock);
kfree(dc);
@ -236,7 +224,8 @@ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
ti->private = dc;
INIT_LIST_HEAD(&dc->delayed_bios);
atomic_set(&dc->may_delay, 1);
mutex_init(&dc->timer_lock);
dc->may_delay = true;
dc->argc = argc;
ret = delay_class_ctr(ti, &dc->read, argv);
@ -282,12 +271,12 @@ out:
"dm-delay-flush-worker");
if (IS_ERR(dc->worker)) {
ret = PTR_ERR(dc->worker);
dc->worker = NULL;
goto bad;
}
} else {
timer_setup(&dc->delay_timer, handle_delayed_timer, 0);
INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
mutex_init(&dc->timer_lock);
dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
if (!dc->kdelayd_wq) {
ret = -EINVAL;
@ -312,7 +301,7 @@ static int delay_bio(struct delay_c *dc, struct delay_class *c, struct bio *bio)
struct dm_delay_info *delayed;
unsigned long expires = 0;
if (!c->delay || !atomic_read(&dc->may_delay))
if (!c->delay)
return DM_MAPIO_REMAPPED;
delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info));
@ -321,6 +310,10 @@ static int delay_bio(struct delay_c *dc, struct delay_class *c, struct bio *bio)
delayed->expires = expires = jiffies + msecs_to_jiffies(c->delay);
mutex_lock(&delayed_bios_lock);
if (unlikely(!dc->may_delay)) {
mutex_unlock(&delayed_bios_lock);
return DM_MAPIO_REMAPPED;
}
c->ops++;
list_add_tail(&delayed->list, &dc->delayed_bios);
mutex_unlock(&delayed_bios_lock);
@ -337,21 +330,20 @@ static void delay_presuspend(struct dm_target *ti)
{
struct delay_c *dc = ti->private;
atomic_set(&dc->may_delay, 0);
mutex_lock(&delayed_bios_lock);
dc->may_delay = false;
mutex_unlock(&delayed_bios_lock);
if (delay_is_fast(dc))
flush_delayed_bios_fast(dc, true);
else {
if (!delay_is_fast(dc))
del_timer_sync(&dc->delay_timer);
flush_bios(flush_delayed_bios(dc, true));
}
flush_delayed_bios(dc, true);
}
static void delay_resume(struct dm_target *ti)
{
struct delay_c *dc = ti->private;
atomic_set(&dc->may_delay, 1);
dc->may_delay = true;
}
static int delay_map(struct dm_target *ti, struct bio *bio)

View File

@ -185,7 +185,7 @@ static int fec_is_erasure(struct dm_verity *v, struct dm_verity_io *io,
{
if (unlikely(verity_hash(v, verity_io_hash_req(v, io),
data, 1 << v->data_dev_block_bits,
verity_io_real_digest(v, io))))
verity_io_real_digest(v, io), true)))
return 0;
return memcmp(verity_io_real_digest(v, io), want_digest,
@ -386,7 +386,7 @@ static int fec_decode_rsb(struct dm_verity *v, struct dm_verity_io *io,
/* Always re-validate the corrected block against the expected hash */
r = verity_hash(v, verity_io_hash_req(v, io), fio->output,
1 << v->data_dev_block_bits,
verity_io_real_digest(v, io));
verity_io_real_digest(v, io), true);
if (unlikely(r < 0))
return r;

View File

@ -135,19 +135,20 @@ static int verity_hash_update(struct dm_verity *v, struct ahash_request *req,
* Wrapper for crypto_ahash_init, which handles verity salting.
*/
static int verity_hash_init(struct dm_verity *v, struct ahash_request *req,
struct crypto_wait *wait)
struct crypto_wait *wait, bool may_sleep)
{
int r;
ahash_request_set_tfm(req, v->tfm);
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP |
CRYPTO_TFM_REQ_MAY_BACKLOG,
ahash_request_set_callback(req,
may_sleep ? CRYPTO_TFM_REQ_MAY_SLEEP | CRYPTO_TFM_REQ_MAY_BACKLOG : 0,
crypto_req_done, (void *)wait);
crypto_init_wait(wait);
r = crypto_wait_req(crypto_ahash_init(req), wait);
if (unlikely(r < 0)) {
if (r != -ENOMEM)
DMERR("crypto_ahash_init failed: %d", r);
return r;
}
@ -179,12 +180,12 @@ out:
}
int verity_hash(struct dm_verity *v, struct ahash_request *req,
const u8 *data, size_t len, u8 *digest)
const u8 *data, size_t len, u8 *digest, bool may_sleep)
{
int r;
struct crypto_wait wait;
r = verity_hash_init(v, req, &wait);
r = verity_hash_init(v, req, &wait, may_sleep);
if (unlikely(r < 0))
goto out;
@ -322,7 +323,7 @@ static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
r = verity_hash(v, verity_io_hash_req(v, io),
data, 1 << v->hash_dev_block_bits,
verity_io_real_digest(v, io));
verity_io_real_digest(v, io), !io->in_tasklet);
if (unlikely(r < 0))
goto release_ret_r;
@ -556,7 +557,7 @@ static int verity_verify_io(struct dm_verity_io *io)
continue;
}
r = verity_hash_init(v, req, &wait);
r = verity_hash_init(v, req, &wait, !io->in_tasklet);
if (unlikely(r < 0))
return r;
@ -652,7 +653,7 @@ static void verity_tasklet(unsigned long data)
io->in_tasklet = true;
err = verity_verify_io(io);
if (err == -EAGAIN) {
if (err == -EAGAIN || err == -ENOMEM) {
/* fallback to retrying with work-queue */
INIT_WORK(&io->work, verity_work);
queue_work(io->v->verify_wq, &io->work);
@ -1033,7 +1034,7 @@ static int verity_alloc_zero_digest(struct dm_verity *v)
goto out;
r = verity_hash(v, req, zero_data, 1 << v->data_dev_block_bits,
v->zero_digest);
v->zero_digest, true);
out:
kfree(req);

View File

@ -128,7 +128,7 @@ extern int verity_for_bv_block(struct dm_verity *v, struct dm_verity_io *io,
u8 *data, size_t len));
extern int verity_hash(struct dm_verity *v, struct ahash_request *req,
const u8 *data, size_t len, u8 *digest);
const u8 *data, size_t len, u8 *digest, bool may_sleep);
extern int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io,
sector_t block, u8 *digest, bool *is_zero);

View File

@ -1500,6 +1500,10 @@ done:
static void bond_setup_by_slave(struct net_device *bond_dev,
struct net_device *slave_dev)
{
bool was_up = !!(bond_dev->flags & IFF_UP);
dev_close(bond_dev);
bond_dev->header_ops = slave_dev->header_ops;
bond_dev->type = slave_dev->type;
@ -1514,6 +1518,8 @@ static void bond_setup_by_slave(struct net_device *bond_dev,
bond_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
bond_dev->flags |= (IFF_POINTOPOINT | IFF_NOARP);
}
if (was_up)
dev_open(bond_dev, NULL);
}
/* On bonding slaves other than the currently active slave, suppress

View File

@ -146,7 +146,7 @@ irqreturn_t pdsc_adminq_isr(int irq, void *data)
}
queue_work(pdsc->wq, &qcq->work);
pds_core_intr_mask(&pdsc->intr_ctrl[irq], PDS_CORE_INTR_MASK_CLEAR);
pds_core_intr_mask(&pdsc->intr_ctrl[qcq->intx], PDS_CORE_INTR_MASK_CLEAR);
return IRQ_HANDLED;
}

View File

@ -15,7 +15,7 @@
#define PDSC_DRV_DESCRIPTION "AMD/Pensando Core Driver"
#define PDSC_WATCHDOG_SECS 5
#define PDSC_QUEUE_NAME_MAX_SZ 32
#define PDSC_QUEUE_NAME_MAX_SZ 16
#define PDSC_ADMINQ_MIN_LENGTH 16 /* must be a power of two */
#define PDSC_NOTIFYQ_LENGTH 64 /* must be a power of two */
#define PDSC_TEARDOWN_RECOVERY false

View File

@ -261,10 +261,14 @@ static int pdsc_identify(struct pdsc *pdsc)
struct pds_core_drv_identity drv = {};
size_t sz;
int err;
int n;
drv.drv_type = cpu_to_le32(PDS_DRIVER_LINUX);
snprintf(drv.driver_ver_str, sizeof(drv.driver_ver_str),
/* Catching the return quiets a Wformat-truncation complaint */
n = snprintf(drv.driver_ver_str, sizeof(drv.driver_ver_str),
"%s %s", PDS_CORE_DRV_NAME, utsname()->release);
if (n > sizeof(drv.driver_ver_str))
dev_dbg(pdsc->dev, "release name truncated, don't care\n");
/* Next let's get some info about the device
* We use the devcmd_lock at this level in order to

View File

@ -104,7 +104,7 @@ int pdsc_dl_info_get(struct devlink *dl, struct devlink_info_req *req,
struct pds_core_fw_list_info fw_list;
struct pdsc *pdsc = devlink_priv(dl);
union pds_core_dev_comp comp;
char buf[16];
char buf[32];
int listlen;
int err;
int i;

View File

@ -6889,7 +6889,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
desc_idx, *post_ptr);
drop_it_no_recycle:
/* Other statistics kept track of by card. */
tp->rx_dropped++;
tnapi->rx_dropped++;
goto next_pkt;
}
@ -7918,8 +7918,10 @@ static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
segs = skb_gso_segment(skb, tp->dev->features &
~(NETIF_F_TSO | NETIF_F_TSO6));
if (IS_ERR(segs) || !segs)
if (IS_ERR(segs) || !segs) {
tnapi->tx_dropped++;
goto tg3_tso_bug_end;
}
skb_list_walk_safe(segs, seg, next) {
skb_mark_not_on_list(seg);
@ -8190,7 +8192,7 @@ dma_error:
drop:
dev_kfree_skb_any(skb);
drop_nofree:
tp->tx_dropped++;
tnapi->tx_dropped++;
return NETDEV_TX_OK;
}
@ -9405,7 +9407,7 @@ static void __tg3_set_rx_mode(struct net_device *);
/* tp->lock is held. */
static int tg3_halt(struct tg3 *tp, int kind, bool silent)
{
int err;
int err, i;
tg3_stop_fw(tp);
@ -9426,6 +9428,13 @@ static int tg3_halt(struct tg3 *tp, int kind, bool silent)
/* And make sure the next sample is new data */
memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
for (i = 0; i < TG3_IRQ_MAX_VECS; ++i) {
struct tg3_napi *tnapi = &tp->napi[i];
tnapi->rx_dropped = 0;
tnapi->tx_dropped = 0;
}
}
return err;
@ -11975,6 +11984,9 @@ static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
{
struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
struct tg3_hw_stats *hw_stats = tp->hw_stats;
unsigned long rx_dropped;
unsigned long tx_dropped;
int i;
stats->rx_packets = old_stats->rx_packets +
get_stat64(&hw_stats->rx_ucast_packets) +
@ -12021,8 +12033,26 @@ static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
stats->rx_missed_errors = old_stats->rx_missed_errors +
get_stat64(&hw_stats->rx_discards);
stats->rx_dropped = tp->rx_dropped;
stats->tx_dropped = tp->tx_dropped;
/* Aggregate per-queue counters. The per-queue counters are updated
* by a single writer, race-free. The result computed by this loop
* might not be 100% accurate (counters can be updated in the middle of
* the loop) but the next tg3_get_nstats() will recompute the current
* value so it is acceptable.
*
* Note that these counters wrap around at 4G on 32bit machines.
*/
rx_dropped = (unsigned long)(old_stats->rx_dropped);
tx_dropped = (unsigned long)(old_stats->tx_dropped);
for (i = 0; i < tp->irq_cnt; i++) {
struct tg3_napi *tnapi = &tp->napi[i];
rx_dropped += tnapi->rx_dropped;
tx_dropped += tnapi->tx_dropped;
}
stats->rx_dropped = rx_dropped;
stats->tx_dropped = tx_dropped;
}
static int tg3_get_regs_len(struct net_device *dev)

View File

@ -3018,6 +3018,7 @@ struct tg3_napi {
u16 *rx_rcb_prod_idx;
struct tg3_rx_prodring_set prodring;
struct tg3_rx_buffer_desc *rx_rcb;
unsigned long rx_dropped;
u32 tx_prod ____cacheline_aligned;
u32 tx_cons;
@ -3026,6 +3027,7 @@ struct tg3_napi {
u32 prodmbox;
struct tg3_tx_buffer_desc *tx_ring;
struct tg3_tx_ring_info *tx_buffers;
unsigned long tx_dropped;
dma_addr_t status_mapping;
dma_addr_t rx_rcb_mapping;
@ -3220,8 +3222,6 @@ struct tg3 {
/* begin "everything else" cacheline(s) section */
unsigned long rx_dropped;
unsigned long tx_dropped;
struct rtnl_link_stats64 net_stats_prev;
struct tg3_ethtool_stats estats_prev;

View File

@ -432,8 +432,8 @@ static const struct gmac_max_framelen gmac_maxlens[] = {
.val = CONFIG0_MAXLEN_1536,
},
{
.max_l3_len = 1542,
.val = CONFIG0_MAXLEN_1542,
.max_l3_len = 1548,
.val = CONFIG0_MAXLEN_1548,
},
{
.max_l3_len = 9212,
@ -1145,6 +1145,7 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
dma_addr_t mapping;
unsigned short mtu;
void *buffer;
int ret;
mtu = ETH_HLEN;
mtu += netdev->mtu;
@ -1159,9 +1160,30 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb,
word3 |= mtu;
}
if (skb->ip_summed != CHECKSUM_NONE) {
if (skb->len >= ETH_FRAME_LEN) {
/* Hardware offloaded checksumming isn't working on frames
* bigger than 1514 bytes. A hypothesis about this is that the
* checksum buffer is only 1518 bytes, so when the frames get
* bigger they get truncated, or the last few bytes get
* overwritten by the FCS.
*
* Just use software checksumming and bypass on bigger frames.
*/
if (skb->ip_summed == CHECKSUM_PARTIAL) {
ret = skb_checksum_help(skb);
if (ret)
return ret;
}
word1 |= TSS_BYPASS_BIT;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
int tcp = 0;
/* We do not switch off the checksumming on non TCP/UDP
* frames: as is shown from tests, the checksumming engine
* is smart enough to see that a frame is not actually TCP
* or UDP and then just pass it through without any changes
* to the frame.
*/
if (skb->protocol == htons(ETH_P_IP)) {
word1 |= TSS_IP_CHKSUM_BIT;
tcp = ip_hdr(skb)->protocol == IPPROTO_TCP;
@ -1978,15 +2000,6 @@ static int gmac_change_mtu(struct net_device *netdev, int new_mtu)
return 0;
}
static netdev_features_t gmac_fix_features(struct net_device *netdev,
netdev_features_t features)
{
if (netdev->mtu + ETH_HLEN + VLAN_HLEN > MTU_SIZE_BIT_MASK)
features &= ~GMAC_OFFLOAD_FEATURES;
return features;
}
static int gmac_set_features(struct net_device *netdev,
netdev_features_t features)
{
@ -2212,7 +2225,6 @@ static const struct net_device_ops gmac_351x_ops = {
.ndo_set_mac_address = gmac_set_mac_address,
.ndo_get_stats64 = gmac_get_stats64,
.ndo_change_mtu = gmac_change_mtu,
.ndo_fix_features = gmac_fix_features,
.ndo_set_features = gmac_set_features,
};
@ -2464,11 +2476,12 @@ static int gemini_ethernet_port_probe(struct platform_device *pdev)
netdev->hw_features = GMAC_OFFLOAD_FEATURES;
netdev->features |= GMAC_OFFLOAD_FEATURES | NETIF_F_GRO;
/* We can handle jumbo frames up to 10236 bytes so, let's accept
* payloads of 10236 bytes minus VLAN and ethernet header
/* We can receive jumbo frames up to 10236 bytes but only
* transmit 2047 bytes so, let's accept payloads of 2047
* bytes minus VLAN and ethernet header
*/
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = 10236 - VLAN_ETH_HLEN;
netdev->max_mtu = MTU_SIZE_BIT_MASK - VLAN_ETH_HLEN;
port->freeq_refill = 0;
netif_napi_add(netdev, &port->napi, gmac_napi_poll);

View File

@ -502,7 +502,7 @@ union gmac_txdesc_3 {
#define SOF_BIT 0x80000000
#define EOF_BIT 0x40000000
#define EOFIE_BIT BIT(29)
#define MTU_SIZE_BIT_MASK 0x1fff
#define MTU_SIZE_BIT_MASK 0x7ff /* Max MTU 2047 bytes */
/* GMAC Tx Descriptor */
struct gmac_txdesc {
@ -787,7 +787,7 @@ union gmac_config0 {
#define CONFIG0_MAXLEN_1536 0
#define CONFIG0_MAXLEN_1518 1
#define CONFIG0_MAXLEN_1522 2
#define CONFIG0_MAXLEN_1542 3
#define CONFIG0_MAXLEN_1548 3
#define CONFIG0_MAXLEN_9k 4 /* 9212 */
#define CONFIG0_MAXLEN_10k 5 /* 10236 */
#define CONFIG0_MAXLEN_1518__6 6

View File

@ -254,10 +254,13 @@ static int gve_napi_poll(struct napi_struct *napi, int budget)
if (block->tx) {
if (block->tx->q_num < priv->tx_cfg.num_queues)
reschedule |= gve_tx_poll(block, budget);
else
else if (budget)
reschedule |= gve_xdp_poll(block, budget);
}
if (!budget)
return 0;
if (block->rx) {
work_done = gve_rx_poll(block, budget);
reschedule |= work_done == budget;
@ -298,6 +301,9 @@ static int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
if (block->tx)
reschedule |= gve_tx_poll_dqo(block, /*do_clean=*/true);
if (!budget)
return 0;
if (block->rx) {
work_done = gve_rx_poll_dqo(block, budget);
reschedule |= work_done == budget;

View File

@ -1007,10 +1007,6 @@ int gve_rx_poll(struct gve_notify_block *block, int budget)
feat = block->napi.dev->features;
/* If budget is 0, do all the work */
if (budget == 0)
budget = INT_MAX;
if (budget > 0)
work_done = gve_clean_rx_done(rx, budget, feat);

View File

@ -925,10 +925,6 @@ bool gve_xdp_poll(struct gve_notify_block *block, int budget)
bool repoll;
u32 to_do;
/* If budget is 0, do all the work */
if (budget == 0)
budget = INT_MAX;
/* Find out how much work there is to be done */
nic_done = gve_tx_load_event_counter(priv, tx);
to_do = min_t(u32, (nic_done - tx->done), budget);

View File

@ -503,11 +503,14 @@ static void hns3_get_coal_info(struct hns3_enet_tqp_vector *tqp_vector,
}
sprintf(result[j++], "%d", i);
sprintf(result[j++], "%s", dim_state_str[dim->state]);
sprintf(result[j++], "%s", dim->state < ARRAY_SIZE(dim_state_str) ?
dim_state_str[dim->state] : "unknown");
sprintf(result[j++], "%u", dim->profile_ix);
sprintf(result[j++], "%s", dim_cqe_mode_str[dim->mode]);
sprintf(result[j++], "%s", dim->mode < ARRAY_SIZE(dim_cqe_mode_str) ?
dim_cqe_mode_str[dim->mode] : "unknown");
sprintf(result[j++], "%s",
dim_tune_stat_str[dim->tune_state]);
dim->tune_state < ARRAY_SIZE(dim_tune_stat_str) ?
dim_tune_stat_str[dim->tune_state] : "unknown");
sprintf(result[j++], "%u", dim->steps_left);
sprintf(result[j++], "%u", dim->steps_right);
sprintf(result[j++], "%u", dim->tired);

View File

@ -5139,7 +5139,7 @@ static int hns3_init_mac_addr(struct net_device *netdev)
struct hns3_nic_priv *priv = netdev_priv(netdev);
char format_mac_addr[HNAE3_FORMAT_MAC_ADDR_LEN];
struct hnae3_handle *h = priv->ae_handle;
u8 mac_addr_temp[ETH_ALEN];
u8 mac_addr_temp[ETH_ALEN] = {0};
int ret = 0;
if (h->ae_algo->ops->get_mac_addr)

View File

@ -61,6 +61,7 @@ static void hclge_sync_fd_table(struct hclge_dev *hdev);
static void hclge_update_fec_stats(struct hclge_dev *hdev);
static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret,
int wait_cnt);
static int hclge_update_port_info(struct hclge_dev *hdev);
static struct hnae3_ae_algo ae_algo;
@ -3041,6 +3042,9 @@ static void hclge_update_link_status(struct hclge_dev *hdev)
if (state != hdev->hw.mac.link) {
hdev->hw.mac.link = state;
if (state == HCLGE_LINK_STATUS_UP)
hclge_update_port_info(hdev);
client->ops->link_status_change(handle, state);
hclge_config_mac_tnl_int(hdev, state);
if (rclient && rclient->ops->link_status_change)
@ -10025,8 +10029,6 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
struct hclge_vport_vlan_cfg *vlan, *tmp;
struct hclge_dev *hdev = vport->back;
mutex_lock(&hdev->vport_lock);
list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) {
if (vlan->vlan_id == vlan_id) {
if (is_write_tbl && vlan->hd_tbl_status)
@ -10041,8 +10043,6 @@ static void hclge_rm_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id,
break;
}
}
mutex_unlock(&hdev->vport_lock);
}
void hclge_rm_vport_all_vlan_table(struct hclge_vport *vport, bool is_del_list)
@ -10451,11 +10451,16 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
* handle mailbox. Just record the vlan id, and remove it after
* reset finished.
*/
mutex_lock(&hdev->vport_lock);
if ((test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) ||
test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) && is_kill) {
set_bit(vlan_id, vport->vlan_del_fail_bmap);
mutex_unlock(&hdev->vport_lock);
return -EBUSY;
} else if (!is_kill && test_bit(vlan_id, vport->vlan_del_fail_bmap)) {
clear_bit(vlan_id, vport->vlan_del_fail_bmap);
}
mutex_unlock(&hdev->vport_lock);
/* when port base vlan enabled, we use port base vlan as the vlan
* filter entry. In this case, we don't update vlan filter table
@ -10470,17 +10475,22 @@ int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto,
}
if (!ret) {
if (!is_kill)
if (!is_kill) {
hclge_add_vport_vlan_table(vport, vlan_id,
writen_to_tbl);
else if (is_kill && vlan_id != 0)
} else if (is_kill && vlan_id != 0) {
mutex_lock(&hdev->vport_lock);
hclge_rm_vport_vlan_table(vport, vlan_id, false);
mutex_unlock(&hdev->vport_lock);
}
} else if (is_kill) {
/* when remove hw vlan filter failed, record the vlan id,
* and try to remove it from hw later, to be consistence
* with stack
*/
mutex_lock(&hdev->vport_lock);
set_bit(vlan_id, vport->vlan_del_fail_bmap);
mutex_unlock(&hdev->vport_lock);
}
hclge_set_vport_vlan_fltr_change(vport);
@ -10520,6 +10530,7 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
int i, ret, sync_cnt = 0;
u16 vlan_id;
mutex_lock(&hdev->vport_lock);
/* start from vport 1 for PF is always alive */
for (i = 0; i < hdev->num_alloc_vport; i++) {
struct hclge_vport *vport = &hdev->vport[i];
@ -10530,21 +10541,26 @@ static void hclge_sync_vlan_filter(struct hclge_dev *hdev)
ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
vport->vport_id, vlan_id,
true);
if (ret && ret != -EINVAL)
if (ret && ret != -EINVAL) {
mutex_unlock(&hdev->vport_lock);
return;
}
clear_bit(vlan_id, vport->vlan_del_fail_bmap);
hclge_rm_vport_vlan_table(vport, vlan_id, false);
hclge_set_vport_vlan_fltr_change(vport);
sync_cnt++;
if (sync_cnt >= HCLGE_MAX_SYNC_COUNT)
if (sync_cnt >= HCLGE_MAX_SYNC_COUNT) {
mutex_unlock(&hdev->vport_lock);
return;
}
vlan_id = find_first_bit(vport->vlan_del_fail_bmap,
VLAN_N_VID);
}
}
mutex_unlock(&hdev->vport_lock);
hclge_sync_vlan_fltr_state(hdev);
}
@ -11651,6 +11667,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_msi_irq_uninit;
if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) {
clear_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps);
if (hnae3_dev_phy_imp_supported(hdev))
ret = hclge_update_tp_port_info(hdev);
else

View File

@ -1206,6 +1206,8 @@ static int hclgevf_set_vlan_filter(struct hnae3_handle *handle,
test_bit(HCLGEVF_STATE_RST_FAIL, &hdev->state)) && is_kill) {
set_bit(vlan_id, hdev->vlan_del_fail_bmap);
return -EBUSY;
} else if (!is_kill && test_bit(vlan_id, hdev->vlan_del_fail_bmap)) {
clear_bit(vlan_id, hdev->vlan_del_fail_bmap);
}
hclgevf_build_send_msg(&send_msg, HCLGE_MBX_SET_VLAN,
@ -1233,20 +1235,25 @@ static void hclgevf_sync_vlan_filter(struct hclgevf_dev *hdev)
int ret, sync_cnt = 0;
u16 vlan_id;
if (bitmap_empty(hdev->vlan_del_fail_bmap, VLAN_N_VID))
return;
rtnl_lock();
vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
while (vlan_id != VLAN_N_VID) {
ret = hclgevf_set_vlan_filter(handle, htons(ETH_P_8021Q),
vlan_id, true);
if (ret)
return;
break;
clear_bit(vlan_id, hdev->vlan_del_fail_bmap);
sync_cnt++;
if (sync_cnt >= HCLGEVF_MAX_SYNC_COUNT)
return;
break;
vlan_id = find_first_bit(hdev->vlan_del_fail_bmap, VLAN_N_VID);
}
rtnl_unlock();
}
static int hclgevf_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable)
@ -1974,8 +1981,18 @@ static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev,
return HCLGEVF_VECTOR0_EVENT_OTHER;
}
static void hclgevf_reset_timer(struct timer_list *t)
{
struct hclgevf_dev *hdev = from_timer(hdev, t, reset_timer);
hclgevf_clear_event_cause(hdev, HCLGEVF_VECTOR0_EVENT_RST);
hclgevf_reset_task_schedule(hdev);
}
static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
{
#define HCLGEVF_RESET_DELAY 5
enum hclgevf_evt_cause event_cause;
struct hclgevf_dev *hdev = data;
u32 clearval;
@ -1987,7 +2004,8 @@ static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data)
switch (event_cause) {
case HCLGEVF_VECTOR0_EVENT_RST:
hclgevf_reset_task_schedule(hdev);
mod_timer(&hdev->reset_timer,
jiffies + msecs_to_jiffies(HCLGEVF_RESET_DELAY));
break;
case HCLGEVF_VECTOR0_EVENT_MBX:
hclgevf_mbx_handler(hdev);
@ -2930,6 +2948,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
HCLGEVF_DRIVER_NAME);
hclgevf_task_schedule(hdev, round_jiffies_relative(HZ));
timer_setup(&hdev->reset_timer, hclgevf_reset_timer, 0);
return 0;

View File

@ -219,6 +219,7 @@ struct hclgevf_dev {
enum hnae3_reset_type reset_level;
unsigned long reset_pending;
enum hnae3_reset_type reset_type;
struct timer_list reset_timer;
#define HCLGEVF_RESET_REQUESTED 0
#define HCLGEVF_RESET_PENDING 1

View File

@ -63,6 +63,9 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1,
i++;
}
/* ensure additional_info will be seen after received_resp */
smp_rmb();
if (i >= HCLGEVF_MAX_TRY_TIMES) {
dev_err(&hdev->pdev->dev,
"VF could not get mbx(%u,%u) resp(=%d) from PF in %d tries\n",
@ -178,6 +181,10 @@ static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev,
resp->resp_status = hclgevf_resp_to_errno(resp_status);
memcpy(resp->additional_info, req->msg.resp_data,
HCLGE_MBX_MAX_RESP_DATA_SIZE * sizeof(u8));
/* ensure additional_info will be seen before setting received_resp */
smp_wmb();
if (match_id) {
/* If match_id is not zero, it means PF support match_id.
* if the match_id is right, VF get the right response, or

View File

@ -1479,14 +1479,14 @@ ice_post_dwnld_pkg_actions(struct ice_hw *hw)
}
/**
* ice_download_pkg
* ice_download_pkg_with_sig_seg
* @hw: pointer to the hardware structure
* @pkg_hdr: pointer to package header
*
* Handles the download of a complete package.
*/
static enum ice_ddp_state
ice_download_pkg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
ice_download_pkg_with_sig_seg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
{
enum ice_aq_err aq_err = hw->adminq.sq_last_status;
enum ice_ddp_state state = ICE_DDP_PKG_ERR;
@ -1519,6 +1519,103 @@ ice_download_pkg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
state = ice_post_dwnld_pkg_actions(hw);
ice_release_global_cfg_lock(hw);
return state;
}
/**
* ice_dwnld_cfg_bufs
* @hw: pointer to the hardware structure
* @bufs: pointer to an array of buffers
* @count: the number of buffers in the array
*
* Obtains global config lock and downloads the package configuration buffers
* to the firmware.
*/
static enum ice_ddp_state
ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
{
enum ice_ddp_state state;
struct ice_buf_hdr *bh;
int status;
if (!bufs || !count)
return ICE_DDP_PKG_ERR;
/* If the first buffer's first section has its metadata bit set
* then there are no buffers to be downloaded, and the operation is
* considered a success.
*/
bh = (struct ice_buf_hdr *)bufs;
if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF)
return ICE_DDP_PKG_SUCCESS;
status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
if (status) {
if (status == -EALREADY)
return ICE_DDP_PKG_ALREADY_LOADED;
return ice_map_aq_err_to_ddp_state(hw->adminq.sq_last_status);
}
state = ice_dwnld_cfg_bufs_no_lock(hw, bufs, 0, count, true);
if (!state)
state = ice_post_dwnld_pkg_actions(hw);
ice_release_global_cfg_lock(hw);
return state;
}
/**
* ice_download_pkg_without_sig_seg
* @hw: pointer to the hardware structure
* @ice_seg: pointer to the segment of the package to be downloaded
*
* Handles the download of a complete package without signature segment.
*/
static enum ice_ddp_state
ice_download_pkg_without_sig_seg(struct ice_hw *hw, struct ice_seg *ice_seg)
{
struct ice_buf_table *ice_buf_tbl;
ice_debug(hw, ICE_DBG_PKG, "Segment format version: %d.%d.%d.%d\n",
ice_seg->hdr.seg_format_ver.major,
ice_seg->hdr.seg_format_ver.minor,
ice_seg->hdr.seg_format_ver.update,
ice_seg->hdr.seg_format_ver.draft);
ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
le32_to_cpu(ice_seg->hdr.seg_type),
le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_id);
ice_buf_tbl = ice_find_buf_table(ice_seg);
ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
le32_to_cpu(ice_buf_tbl->buf_count));
return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
le32_to_cpu(ice_buf_tbl->buf_count));
}
/**
* ice_download_pkg
* @hw: pointer to the hardware structure
* @pkg_hdr: pointer to package header
* @ice_seg: pointer to the segment of the package to be downloaded
*
* Handles the download of a complete package.
*/
static enum ice_ddp_state
ice_download_pkg(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr,
struct ice_seg *ice_seg)
{
enum ice_ddp_state state;
if (hw->pkg_has_signing_seg)
state = ice_download_pkg_with_sig_seg(hw, pkg_hdr);
else
state = ice_download_pkg_without_sig_seg(hw, ice_seg);
ice_post_pkg_dwnld_vlan_mode_cfg(hw);
return state;
@ -2083,7 +2180,7 @@ enum ice_ddp_state ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
/* initialize package hints and then download package */
ice_init_pkg_hints(hw, seg);
state = ice_download_pkg(hw, pkg);
state = ice_download_pkg(hw, pkg, seg);
if (state == ICE_DDP_PKG_ALREADY_LOADED) {
ice_debug(hw, ICE_DBG_INIT,
"package previously loaded - no work.\n");

View File

@ -815,12 +815,6 @@ ice_dpll_input_prio_set(const struct dpll_pin *pin, void *pin_priv,
struct ice_pf *pf = d->pf;
int ret;
if (prio > ICE_DPLL_PRIO_MAX) {
NL_SET_ERR_MSG_FMT(extack, "prio out of supported range 0-%d",
ICE_DPLL_PRIO_MAX);
return -EINVAL;
}
mutex_lock(&pf->dplls.lock);
ret = ice_dpll_hw_input_prio_set(pf, d, p, prio, extack);
mutex_unlock(&pf->dplls.lock);
@ -1756,6 +1750,7 @@ ice_dpll_init_dpll(struct ice_pf *pf, struct ice_dpll *d, bool cgu,
}
d->pf = pf;
if (cgu) {
ice_dpll_update_state(pf, d, true);
ret = dpll_device_register(d->dpll, type, &ice_dpll_ops, d);
if (ret) {
dpll_device_put(d->dpll);
@ -1796,8 +1791,6 @@ static int ice_dpll_init_worker(struct ice_pf *pf)
struct ice_dplls *d = &pf->dplls;
struct kthread_worker *kworker;
ice_dpll_update_state(pf, &d->eec, true);
ice_dpll_update_state(pf, &d->pps, true);
kthread_init_delayed_work(&d->work, ice_dpll_periodic_work);
kworker = kthread_create_worker(0, "ice-dplls-%s",
dev_name(ice_pf_to_dev(pf)));
@ -1830,6 +1823,7 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
int num_pins, i, ret = -EINVAL;
struct ice_hw *hw = &pf->hw;
struct ice_dpll_pin *pins;
unsigned long caps;
u8 freq_supp_num;
bool input;
@ -1849,6 +1843,7 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
}
for (i = 0; i < num_pins; i++) {
caps = 0;
pins[i].idx = i;
pins[i].prop.board_label = ice_cgu_get_pin_name(hw, i, input);
pins[i].prop.type = ice_cgu_get_pin_type(hw, i, input);
@ -1861,8 +1856,8 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
&dp->input_prio[i]);
if (ret)
return ret;
pins[i].prop.capabilities |=
DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE;
caps |= (DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE |
DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE);
pins[i].prop.phase_range.min =
pf->dplls.input_phase_adj_max;
pins[i].prop.phase_range.max =
@ -1872,9 +1867,11 @@ ice_dpll_init_info_direct_pins(struct ice_pf *pf,
pf->dplls.output_phase_adj_max;
pins[i].prop.phase_range.max =
-pf->dplls.output_phase_adj_max;
ret = ice_cgu_get_output_pin_state_caps(hw, i, &caps);
if (ret)
return ret;
}
pins[i].prop.capabilities |=
DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
pins[i].prop.capabilities = caps;
ret = ice_dpll_pin_state_update(pf, &pins[i], pin_type, NULL);
if (ret)
return ret;

View File

@ -6,7 +6,6 @@
#include "ice.h"
#define ICE_DPLL_PRIO_MAX 0xF
#define ICE_DPLL_RCLK_NUM_MAX 4
/** ice_dpll_pin - store info about pins

View File

@ -3961,3 +3961,57 @@ int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num)
return ret;
}
/**
* ice_cgu_get_output_pin_state_caps - get output pin state capabilities
* @hw: pointer to the hw struct
* @pin_id: id of a pin
* @caps: capabilities to modify
*
* Return:
* * 0 - success, state capabilities were modified
* * negative - failure, capabilities were not modified
*/
int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
unsigned long *caps)
{
bool can_change = true;
switch (hw->device_id) {
case ICE_DEV_ID_E810C_SFP:
if (pin_id == ZL_OUT2 || pin_id == ZL_OUT3)
can_change = false;
break;
case ICE_DEV_ID_E810C_QSFP:
if (pin_id == ZL_OUT2 || pin_id == ZL_OUT3 || pin_id == ZL_OUT4)
can_change = false;
break;
case ICE_DEV_ID_E823L_10G_BASE_T:
case ICE_DEV_ID_E823L_1GBE:
case ICE_DEV_ID_E823L_BACKPLANE:
case ICE_DEV_ID_E823L_QSFP:
case ICE_DEV_ID_E823L_SFP:
case ICE_DEV_ID_E823C_10G_BASE_T:
case ICE_DEV_ID_E823C_BACKPLANE:
case ICE_DEV_ID_E823C_QSFP:
case ICE_DEV_ID_E823C_SFP:
case ICE_DEV_ID_E823C_SGMII:
if (hw->cgu_part_number ==
ICE_AQC_GET_LINK_TOPO_NODE_NR_ZL30632_80032 &&
pin_id == ZL_OUT2)
can_change = false;
else if (hw->cgu_part_number ==
ICE_AQC_GET_LINK_TOPO_NODE_NR_SI5383_5384 &&
pin_id == SI_OUT1)
can_change = false;
break;
default:
return -EINVAL;
}
if (can_change)
*caps |= DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
else
*caps &= ~DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE;
return 0;
}

View File

@ -282,6 +282,8 @@ int ice_get_cgu_state(struct ice_hw *hw, u8 dpll_idx,
int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num);
void ice_ptp_init_phy_model(struct ice_hw *hw);
int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id,
unsigned long *caps);
#define PFTSYN_SEM_BYTES 4

View File

@ -4790,16 +4790,19 @@ static void mvneta_ethtool_get_strings(struct net_device *netdev, u32 sset,
u8 *data)
{
if (sset == ETH_SS_STATS) {
struct mvneta_port *pp = netdev_priv(netdev);
int i;
for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
memcpy(data + i * ETH_GSTRING_LEN,
mvneta_statistics[i].name, ETH_GSTRING_LEN);
if (!pp->bm_priv) {
data += ETH_GSTRING_LEN * ARRAY_SIZE(mvneta_statistics);
page_pool_ethtool_stats_get_strings(data);
}
}
}
static void
mvneta_ethtool_update_pcpu_stats(struct mvneta_port *pp,
@ -4915,8 +4918,10 @@ static void mvneta_ethtool_pp_stats(struct mvneta_port *pp, u64 *data)
struct page_pool_stats stats = {};
int i;
for (i = 0; i < rxq_number; i++)
for (i = 0; i < rxq_number; i++) {
if (pp->rxqs[i].page_pool)
page_pool_get_stats(pp->rxqs[i].page_pool, &stats);
}
page_pool_ethtool_stats_get(data, &stats);
}
@ -4932,14 +4937,21 @@ static void mvneta_ethtool_get_stats(struct net_device *dev,
for (i = 0; i < ARRAY_SIZE(mvneta_statistics); i++)
*data++ = pp->ethtool_stats[i];
if (!pp->bm_priv)
mvneta_ethtool_pp_stats(pp, data);
}
static int mvneta_ethtool_get_sset_count(struct net_device *dev, int sset)
{
if (sset == ETH_SS_STATS)
return ARRAY_SIZE(mvneta_statistics) +
page_pool_ethtool_stats_get_count();
if (sset == ETH_SS_STATS) {
int count = ARRAY_SIZE(mvneta_statistics);
struct mvneta_port *pp = netdev_priv(dev);
if (!pp->bm_priv)
count += page_pool_ethtool_stats_get_count();
return count;
}
return -EOPNOTSUPP;
}

View File

@ -177,6 +177,8 @@ static void mlx5e_ptpsq_mark_ts_cqes_undelivered(struct mlx5e_ptpsq *ptpsq,
static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
struct mlx5_cqe64 *cqe,
u8 *md_buff,
u8 *md_buff_sz,
int budget)
{
struct mlx5e_ptp_port_ts_cqe_list *pending_cqe_list = ptpsq->ts_cqe_pending_list;
@ -211,19 +213,24 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
mlx5e_ptpsq_mark_ts_cqes_undelivered(ptpsq, hwtstamp);
out:
napi_consume_skb(skb, budget);
mlx5e_ptp_metadata_fifo_push(&ptpsq->metadata_freelist, metadata_id);
md_buff[*md_buff_sz++] = metadata_id;
if (unlikely(mlx5e_ptp_metadata_map_unhealthy(&ptpsq->metadata_map)) &&
!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
queue_work(ptpsq->txqsq.priv->wq, &ptpsq->report_unhealthy_work);
}
static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int napi_budget)
{
struct mlx5e_ptpsq *ptpsq = container_of(cq, struct mlx5e_ptpsq, ts_cq);
struct mlx5_cqwq *cqwq = &cq->wq;
int budget = min(napi_budget, MLX5E_TX_CQ_POLL_BUDGET);
u8 metadata_buff[MLX5E_TX_CQ_POLL_BUDGET];
u8 metadata_buff_sz = 0;
struct mlx5_cqwq *cqwq;
struct mlx5_cqe64 *cqe;
int work_done = 0;
cqwq = &cq->wq;
if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &ptpsq->txqsq.state)))
return false;
@ -234,7 +241,8 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
do {
mlx5_cqwq_pop(cqwq);
mlx5e_ptp_handle_ts_cqe(ptpsq, cqe, budget);
mlx5e_ptp_handle_ts_cqe(ptpsq, cqe,
metadata_buff, &metadata_buff_sz, napi_budget);
} while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(cqwq)));
mlx5_cqwq_update_db_record(cqwq);
@ -242,6 +250,10 @@ static bool mlx5e_ptp_poll_ts_cq(struct mlx5e_cq *cq, int budget)
/* ensure cq space is freed before enabling more cqes */
wmb();
while (metadata_buff_sz > 0)
mlx5e_ptp_metadata_fifo_push(&ptpsq->metadata_freelist,
metadata_buff[--metadata_buff_sz]);
mlx5e_txqsq_wake(&ptpsq->txqsq);
return work_done == budget;

View File

@ -492,11 +492,11 @@ static int mlx5e_rx_reporter_dump(struct devlink_health_reporter *reporter,
void mlx5e_reporter_rx_timeout(struct mlx5e_rq *rq)
{
char icosq_str[MLX5E_REPORTER_PER_Q_MAX_LEN] = {};
char err_str[MLX5E_REPORTER_PER_Q_MAX_LEN];
struct mlx5e_icosq *icosq = rq->icosq;
struct mlx5e_priv *priv = rq->priv;
struct mlx5e_err_ctx err_ctx = {};
char icosq_str[32] = {};
err_ctx.ctx = rq;
err_ctx.recover = mlx5e_rx_reporter_timeout_recover;

View File

@ -300,9 +300,6 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
if (err)
goto destroy_neigh_entry;
e->encap_size = ipv4_encap_size;
e->encap_header = encap_header;
if (!(nud_state & NUD_VALID)) {
neigh_event_send(attr.n, NULL);
/* the encap entry will be made valid on neigh update event
@ -322,6 +319,8 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
goto destroy_neigh_entry;
}
e->encap_size = ipv4_encap_size;
e->encap_header = encap_header;
e->flags |= MLX5_ENCAP_ENTRY_VALID;
mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
mlx5e_route_lookup_ipv4_put(&attr);
@ -404,16 +403,12 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
if (err)
goto free_encap;
e->encap_size = ipv4_encap_size;
kfree(e->encap_header);
e->encap_header = encap_header;
if (!(nud_state & NUD_VALID)) {
neigh_event_send(attr.n, NULL);
/* the encap entry will be made valid on neigh update event
* and not used before that.
*/
goto release_neigh;
goto free_encap;
}
memset(&reformat_params, 0, sizeof(reformat_params));
@ -427,6 +422,10 @@ int mlx5e_tc_tun_update_header_ipv4(struct mlx5e_priv *priv,
goto free_encap;
}
e->encap_size = ipv4_encap_size;
kfree(e->encap_header);
e->encap_header = encap_header;
e->flags |= MLX5_ENCAP_ENTRY_VALID;
mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
mlx5e_route_lookup_ipv4_put(&attr);
@ -568,9 +567,6 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
if (err)
goto destroy_neigh_entry;
e->encap_size = ipv6_encap_size;
e->encap_header = encap_header;
if (!(nud_state & NUD_VALID)) {
neigh_event_send(attr.n, NULL);
/* the encap entry will be made valid on neigh update event
@ -590,6 +586,8 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
goto destroy_neigh_entry;
}
e->encap_size = ipv6_encap_size;
e->encap_header = encap_header;
e->flags |= MLX5_ENCAP_ENTRY_VALID;
mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
mlx5e_route_lookup_ipv6_put(&attr);
@ -671,16 +669,12 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
if (err)
goto free_encap;
e->encap_size = ipv6_encap_size;
kfree(e->encap_header);
e->encap_header = encap_header;
if (!(nud_state & NUD_VALID)) {
neigh_event_send(attr.n, NULL);
/* the encap entry will be made valid on neigh update event
* and not used before that.
*/
goto release_neigh;
goto free_encap;
}
memset(&reformat_params, 0, sizeof(reformat_params));
@ -694,6 +688,10 @@ int mlx5e_tc_tun_update_header_ipv6(struct mlx5e_priv *priv,
goto free_encap;
}
e->encap_size = ipv6_encap_size;
kfree(e->encap_header);
e->encap_header = encap_header;
e->flags |= MLX5_ENCAP_ENTRY_VALID;
mlx5e_rep_queue_neigh_stats_work(netdev_priv(attr.out_dev));
mlx5e_route_lookup_ipv6_put(&attr);

View File

@ -43,12 +43,17 @@ void mlx5e_ethtool_get_drvinfo(struct mlx5e_priv *priv,
struct ethtool_drvinfo *drvinfo)
{
struct mlx5_core_dev *mdev = priv->mdev;
int count;
strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%04d (%.16s)", fw_rev_maj(mdev),
fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id);
if (count == sizeof(drvinfo->fw_version))
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%04d (%.16s)",
fw_rev_maj(mdev), fw_rev_min(mdev), fw_rev_sub(mdev),
mdev->board_id);
"%d.%d.%04d", fw_rev_maj(mdev),
fw_rev_min(mdev), fw_rev_sub(mdev));
strscpy(drvinfo->bus_info, dev_name(mdev->device),
sizeof(drvinfo->bus_info));
}

View File

@ -71,13 +71,17 @@ static void mlx5e_rep_get_drvinfo(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
int count;
strscpy(drvinfo->driver, mlx5e_rep_driver_name,
sizeof(drvinfo->driver));
count = snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%04d (%.16s)", fw_rev_maj(mdev),
fw_rev_min(mdev), fw_rev_sub(mdev), mdev->board_id);
if (count == sizeof(drvinfo->fw_version))
snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
"%d.%d.%04d (%.16s)",
fw_rev_maj(mdev), fw_rev_min(mdev),
fw_rev_sub(mdev), mdev->board_id);
"%d.%d.%04d", fw_rev_maj(mdev),
fw_rev_min(mdev), fw_rev_sub(mdev));
}
static const struct counter_desc sw_rep_stats_desc[] = {

View File

@ -3147,7 +3147,7 @@ static struct mlx5_fields fields[] = {
OFFLOAD(DIPV6_31_0, 32, U32_MAX, ip6.daddr.s6_addr32[3], 0,
dst_ipv4_dst_ipv6.ipv6_layout.ipv6[12]),
OFFLOAD(IPV6_HOPLIMIT, 8, U8_MAX, ip6.hop_limit, 0, ttl_hoplimit),
OFFLOAD(IP_DSCP, 16, 0xc00f, ip6, 0, ip_dscp),
OFFLOAD(IP_DSCP, 16, 0x0fc0, ip6, 0, ip_dscp),
OFFLOAD(TCP_SPORT, 16, U16_MAX, tcp.source, 0, tcp_sport),
OFFLOAD(TCP_DPORT, 16, U16_MAX, tcp.dest, 0, tcp_dport),
@ -3158,21 +3158,31 @@ static struct mlx5_fields fields[] = {
OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport),
};
static unsigned long mask_to_le(unsigned long mask, int size)
static u32 mask_field_get(void *mask, struct mlx5_fields *f)
{
__be32 mask_be32;
__be16 mask_be16;
if (size == 32) {
mask_be32 = (__force __be32)(mask);
mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32));
} else if (size == 16) {
mask_be32 = (__force __be32)(mask);
mask_be16 = *(__be16 *)&mask_be32;
mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16));
switch (f->field_bsize) {
case 32:
return be32_to_cpu(*(__be32 *)mask) & f->field_mask;
case 16:
return be16_to_cpu(*(__be16 *)mask) & (u16)f->field_mask;
default:
return *(u8 *)mask & (u8)f->field_mask;
}
}
return mask;
static void mask_field_clear(void *mask, struct mlx5_fields *f)
{
switch (f->field_bsize) {
case 32:
*(__be32 *)mask &= ~cpu_to_be32(f->field_mask);
break;
case 16:
*(__be16 *)mask &= ~cpu_to_be16((u16)f->field_mask);
break;
default:
*(u8 *)mask &= ~(u8)f->field_mask;
break;
}
}
static int offload_pedit_fields(struct mlx5e_priv *priv,
@ -3184,11 +3194,12 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
struct pedit_headers *set_masks, *add_masks, *set_vals, *add_vals;
struct pedit_headers_action *hdrs = parse_attr->hdrs;
void *headers_c, *headers_v, *action, *vals_p;
u32 *s_masks_p, *a_masks_p, s_mask, a_mask;
struct mlx5e_tc_mod_hdr_acts *mod_acts;
unsigned long mask, field_mask;
void *s_masks_p, *a_masks_p;
int i, first, last, next_z;
struct mlx5_fields *f;
unsigned long mask;
u32 s_mask, a_mask;
u8 cmd;
mod_acts = &parse_attr->mod_hdr_acts;
@ -3204,15 +3215,11 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
bool skip;
f = &fields[i];
/* avoid seeing bits set from previous iterations */
s_mask = 0;
a_mask = 0;
s_masks_p = (void *)set_masks + f->offset;
a_masks_p = (void *)add_masks + f->offset;
s_mask = *s_masks_p & f->field_mask;
a_mask = *a_masks_p & f->field_mask;
s_mask = mask_field_get(s_masks_p, f);
a_mask = mask_field_get(a_masks_p, f);
if (!s_mask && !a_mask) /* nothing to offload here */
continue;
@ -3239,22 +3246,20 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
match_mask, f->field_bsize))
skip = true;
/* clear to denote we consumed this field */
*s_masks_p &= ~f->field_mask;
mask_field_clear(s_masks_p, f);
} else {
cmd = MLX5_ACTION_TYPE_ADD;
mask = a_mask;
vals_p = (void *)add_vals + f->offset;
/* add 0 is no change */
if ((*(u32 *)vals_p & f->field_mask) == 0)
if (!mask_field_get(vals_p, f))
skip = true;
/* clear to denote we consumed this field */
*a_masks_p &= ~f->field_mask;
mask_field_clear(a_masks_p, f);
}
if (skip)
continue;
mask = mask_to_le(mask, f->field_bsize);
first = find_first_bit(&mask, f->field_bsize);
next_z = find_next_zero_bit(&mask, f->field_bsize, first);
last = find_last_bit(&mask, f->field_bsize);
@ -3281,10 +3286,9 @@ static int offload_pedit_fields(struct mlx5e_priv *priv,
MLX5_SET(set_action_in, action, field, f->field);
if (cmd == MLX5_ACTION_TYPE_SET) {
unsigned long field_mask = f->field_mask;
int start;
field_mask = mask_to_le(f->field_mask, f->field_bsize);
/* if field is bit sized it can start not from first bit */
start = find_first_bit(&field_mask, f->field_bsize);

View File

@ -399,9 +399,9 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb,
u8 metadata_index = be32_to_cpu(eseg->flow_table_metadata);
mlx5e_skb_cb_hwtstamp_init(skb);
mlx5e_ptpsq_track_metadata(sq->ptpsq, metadata_index);
mlx5e_ptp_metadata_map_put(&sq->ptpsq->metadata_map, skb,
metadata_index);
mlx5e_ptpsq_track_metadata(sq->ptpsq, metadata_index);
if (!netif_tx_queue_stopped(sq->txq) &&
mlx5e_ptpsq_metadata_freelist_empty(sq->ptpsq)) {
netif_tx_stop_queue(sq->txq);
@ -494,10 +494,10 @@ mlx5e_sq_xmit_wqe(struct mlx5e_txqsq *sq, struct sk_buff *skb,
err_drop:
stats->dropped++;
dev_kfree_skb_any(skb);
if (unlikely(sq->ptpsq && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
mlx5e_ptp_metadata_fifo_push(&sq->ptpsq->metadata_freelist,
be32_to_cpu(eseg->flow_table_metadata));
dev_kfree_skb_any(skb);
mlx5e_tx_flush(sq);
}

View File

@ -885,11 +885,14 @@ static void comp_irq_release_sf(struct mlx5_core_dev *dev, u16 vecidx)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
struct mlx5_irq *irq;
int cpu;
irq = xa_load(&table->comp_irqs, vecidx);
if (!irq)
return;
cpu = cpumask_first(mlx5_irq_get_affinity_mask(irq));
cpumask_clear_cpu(cpu, &table->used_cpus);
xa_erase(&table->comp_irqs, vecidx);
mlx5_irq_affinity_irq_release(dev, irq);
}
@ -897,16 +900,26 @@ static void comp_irq_release_sf(struct mlx5_core_dev *dev, u16 vecidx)
static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx)
{
struct mlx5_eq_table *table = dev->priv.eq_table;
struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
struct irq_affinity_desc af_desc = {};
struct mlx5_irq *irq;
irq = mlx5_irq_affinity_irq_request_auto(dev, &table->used_cpus, vecidx);
if (IS_ERR(irq)) {
/* In case SF irq pool does not exist, fallback to the PF irqs*/
if (PTR_ERR(irq) == -ENOENT)
if (!mlx5_irq_pool_is_sf_pool(pool))
return comp_irq_request_pci(dev, vecidx);
af_desc.is_managed = 1;
cpumask_copy(&af_desc.mask, cpu_online_mask);
cpumask_andnot(&af_desc.mask, &af_desc.mask, &table->used_cpus);
irq = mlx5_irq_affinity_request(pool, &af_desc);
if (IS_ERR(irq))
return PTR_ERR(irq);
}
cpumask_or(&table->used_cpus, &table->used_cpus, mlx5_irq_get_affinity_mask(irq));
mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n",
pci_irq_vector(dev->pdev, mlx5_irq_get_index(irq)),
cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)),
mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ);
return xa_err(xa_store(&table->comp_irqs, vecidx, irq, GFP_KERNEL));
}

View File

@ -984,7 +984,8 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
if (rep->vport == MLX5_VPORT_UPLINK && on_esw->offloads.ft_ipsec_tx_pol) {
if (rep->vport == MLX5_VPORT_UPLINK &&
on_esw == from_esw && on_esw->offloads.ft_ipsec_tx_pol) {
dest.ft = on_esw->offloads.ft_ipsec_tx_pol;
flow_act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL;
dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;

View File

@ -168,45 +168,3 @@ void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *i
if (pool->irqs_per_cpu)
cpu_put(pool, cpu);
}
/**
* mlx5_irq_affinity_irq_request_auto - request one IRQ for mlx5 device.
* @dev: mlx5 device that is requesting the IRQ.
* @used_cpus: cpumask of bounded cpus by the device
* @vecidx: vector index to request an IRQ for.
*
* Each IRQ is bounded to at most 1 CPU.
* This function is requesting an IRQ according to the default assignment.
* The default assignment policy is:
* - request the least loaded IRQ which is not bound to any
* CPU of the previous IRQs requested.
*
* On success, this function updates used_cpus mask and returns an irq pointer.
* In case of an error, an appropriate error pointer is returned.
*/
struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev,
struct cpumask *used_cpus, u16 vecidx)
{
struct mlx5_irq_pool *pool = mlx5_irq_pool_get(dev);
struct irq_affinity_desc af_desc = {};
struct mlx5_irq *irq;
if (!mlx5_irq_pool_is_sf_pool(pool))
return ERR_PTR(-ENOENT);
af_desc.is_managed = 1;
cpumask_copy(&af_desc.mask, cpu_online_mask);
cpumask_andnot(&af_desc.mask, &af_desc.mask, used_cpus);
irq = mlx5_irq_affinity_request(pool, &af_desc);
if (IS_ERR(irq))
return irq;
cpumask_or(used_cpus, used_cpus, mlx5_irq_get_affinity_mask(irq));
mlx5_core_dbg(pool->dev, "IRQ %u mapped to cpu %*pbl, %u EQs on this irq\n",
pci_irq_vector(dev->pdev, mlx5_irq_get_index(irq)),
cpumask_pr_args(mlx5_irq_get_affinity_mask(irq)),
mlx5_irq_read_locked(irq) / MLX5_EQ_REFS_PER_IRQ);
return irq;
}

View File

@ -384,7 +384,12 @@ static int mlx5_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
static int mlx5_ptp_adjphase(struct ptp_clock_info *ptp, s32 delta)
{
return mlx5_ptp_adjtime(ptp, delta);
struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info);
struct mlx5_core_dev *mdev;
mdev = container_of(clock, struct mlx5_core_dev, clock);
return mlx5_ptp_adjtime_real_time(mdev, delta);
}
static int mlx5_ptp_freq_adj_real_time(struct mlx5_core_dev *mdev, long scaled_ppm)

View File

@ -28,7 +28,7 @@
struct mlx5_irq {
struct atomic_notifier_head nh;
cpumask_var_t mask;
char name[MLX5_MAX_IRQ_NAME];
char name[MLX5_MAX_IRQ_FORMATTED_NAME];
struct mlx5_irq_pool *pool;
int refcount;
struct msi_map map;
@ -292,8 +292,8 @@ struct mlx5_irq *mlx5_irq_alloc(struct mlx5_irq_pool *pool, int i,
else
irq_sf_set_name(pool, name, i);
ATOMIC_INIT_NOTIFIER_HEAD(&irq->nh);
snprintf(irq->name, MLX5_MAX_IRQ_NAME,
"%s@pci:%s", name, pci_name(dev->pdev));
snprintf(irq->name, MLX5_MAX_IRQ_FORMATTED_NAME,
MLX5_IRQ_NAME_FORMAT_STR, name, pci_name(dev->pdev));
err = request_irq(irq->map.virq, irq_int_handler, 0, irq->name,
&irq->nh);
if (err) {

View File

@ -7,6 +7,9 @@
#include <linux/mlx5/driver.h>
#define MLX5_MAX_IRQ_NAME (32)
#define MLX5_IRQ_NAME_FORMAT_STR ("%s@pci:%s")
#define MLX5_MAX_IRQ_FORMATTED_NAME \
(MLX5_MAX_IRQ_NAME + sizeof(MLX5_IRQ_NAME_FORMAT_STR))
/* max irq_index is 2047, so four chars */
#define MLX5_MAX_IRQ_IDX_CHARS (4)
#define MLX5_EQ_REFS_PER_IRQ (2)

View File

@ -57,7 +57,8 @@ static const char *dr_action_id_to_str(enum mlx5dr_action_type action_id)
static bool mlx5dr_action_supp_fwd_fdb_multi_ft(struct mlx5_core_dev *dev)
{
return (MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_any_table_limit_regc) ||
return (MLX5_CAP_GEN(dev, steering_format_version) < MLX5_STEERING_FORMAT_CONNECTX_6DX ||
MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_any_table_limit_regc) ||
MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_any_table));
}

View File

@ -52,7 +52,6 @@ struct dr_qp_init_attr {
u32 cqn;
u32 pdn;
u32 max_send_wr;
u32 max_send_sge;
struct mlx5_uars_page *uar;
u8 isolate_vl_tc:1;
};
@ -247,37 +246,6 @@ static int dr_poll_cq(struct mlx5dr_cq *dr_cq, int ne)
return err == CQ_POLL_ERR ? err : npolled;
}
static int dr_qp_get_args_update_send_wqe_size(struct dr_qp_init_attr *attr)
{
return roundup_pow_of_two(sizeof(struct mlx5_wqe_ctrl_seg) +
sizeof(struct mlx5_wqe_flow_update_ctrl_seg) +
sizeof(struct mlx5_wqe_header_modify_argument_update_seg));
}
/* We calculate for specific RC QP with the required functionality */
static int dr_qp_calc_rc_send_wqe(struct dr_qp_init_attr *attr)
{
int update_arg_size;
int inl_size = 0;
int tot_size;
int size;
update_arg_size = dr_qp_get_args_update_send_wqe_size(attr);
size = sizeof(struct mlx5_wqe_ctrl_seg) +
sizeof(struct mlx5_wqe_raddr_seg);
inl_size = size + ALIGN(sizeof(struct mlx5_wqe_inline_seg) +
DR_STE_SIZE, 16);
size += attr->max_send_sge * sizeof(struct mlx5_wqe_data_seg);
size = max(size, update_arg_size);
tot_size = max(size, inl_size);
return ALIGN(tot_size, MLX5_SEND_WQE_BB);
}
static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
struct dr_qp_init_attr *attr)
{
@ -285,7 +253,6 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
u32 temp_qpc[MLX5_ST_SZ_DW(qpc)] = {};
struct mlx5_wq_param wqp;
struct mlx5dr_qp *dr_qp;
int wqe_size;
int inlen;
void *qpc;
void *in;
@ -365,15 +332,6 @@ static struct mlx5dr_qp *dr_create_rc_qp(struct mlx5_core_dev *mdev,
if (err)
goto err_in;
dr_qp->uar = attr->uar;
wqe_size = dr_qp_calc_rc_send_wqe(attr);
dr_qp->max_inline_data = min(wqe_size -
(sizeof(struct mlx5_wqe_ctrl_seg) +
sizeof(struct mlx5_wqe_raddr_seg) +
sizeof(struct mlx5_wqe_inline_seg)),
(2 * MLX5_SEND_WQE_BB -
(sizeof(struct mlx5_wqe_ctrl_seg) +
sizeof(struct mlx5_wqe_raddr_seg) +
sizeof(struct mlx5_wqe_inline_seg))));
return dr_qp;
@ -437,48 +395,8 @@ dr_rdma_handle_flow_access_arg_segments(struct mlx5_wqe_ctrl_seg *wq_ctrl,
MLX5_SEND_WQE_DS;
}
static int dr_set_data_inl_seg(struct mlx5dr_qp *dr_qp,
struct dr_data_seg *data_seg, void *wqe)
{
int inline_header_size = sizeof(struct mlx5_wqe_ctrl_seg) +
sizeof(struct mlx5_wqe_raddr_seg) +
sizeof(struct mlx5_wqe_inline_seg);
struct mlx5_wqe_inline_seg *seg;
int left_space;
int inl = 0;
void *addr;
int len;
int idx;
seg = wqe;
wqe += sizeof(*seg);
addr = (void *)(unsigned long)(data_seg->addr);
len = data_seg->length;
inl += len;
left_space = MLX5_SEND_WQE_BB - inline_header_size;
if (likely(len > left_space)) {
memcpy(wqe, addr, left_space);
len -= left_space;
addr += left_space;
idx = (dr_qp->sq.pc + 1) & (dr_qp->sq.wqe_cnt - 1);
wqe = mlx5_wq_cyc_get_wqe(&dr_qp->wq.sq, idx);
}
memcpy(wqe, addr, len);
if (likely(inl)) {
seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);
return DIV_ROUND_UP(inl + sizeof(seg->byte_count),
MLX5_SEND_WQE_DS);
} else {
return 0;
}
}
static void
dr_rdma_handle_icm_write_segments(struct mlx5dr_qp *dr_qp,
struct mlx5_wqe_ctrl_seg *wq_ctrl,
dr_rdma_handle_icm_write_segments(struct mlx5_wqe_ctrl_seg *wq_ctrl,
u64 remote_addr,
u32 rkey,
struct dr_data_seg *data_seg,
@ -494,17 +412,15 @@ dr_rdma_handle_icm_write_segments(struct mlx5dr_qp *dr_qp,
wq_raddr->reserved = 0;
wq_dseg = (void *)(wq_raddr + 1);
/* WQE ctrl segment + WQE remote addr segment */
*size = (sizeof(*wq_ctrl) + sizeof(*wq_raddr)) / MLX5_SEND_WQE_DS;
if (data_seg->send_flags & IB_SEND_INLINE) {
*size += dr_set_data_inl_seg(dr_qp, data_seg, wq_dseg);
} else {
wq_dseg->byte_count = cpu_to_be32(data_seg->length);
wq_dseg->lkey = cpu_to_be32(data_seg->lkey);
wq_dseg->addr = cpu_to_be64(data_seg->addr);
*size += sizeof(*wq_dseg) / MLX5_SEND_WQE_DS; /* WQE data segment */
}
*size = (sizeof(*wq_ctrl) + /* WQE ctrl segment */
sizeof(*wq_dseg) + /* WQE data segment */
sizeof(*wq_raddr)) / /* WQE remote addr segment */
MLX5_SEND_WQE_DS;
}
static void dr_set_ctrl_seg(struct mlx5_wqe_ctrl_seg *wq_ctrl,
@ -535,7 +451,7 @@ static void dr_rdma_segments(struct mlx5dr_qp *dr_qp, u64 remote_addr,
switch (opcode) {
case MLX5_OPCODE_RDMA_READ:
case MLX5_OPCODE_RDMA_WRITE:
dr_rdma_handle_icm_write_segments(dr_qp, wq_ctrl, remote_addr,
dr_rdma_handle_icm_write_segments(wq_ctrl, remote_addr,
rkey, data_seg, &size);
break;
case MLX5_OPCODE_FLOW_TBL_ACCESS:
@ -656,7 +572,7 @@ static void dr_fill_write_args_segs(struct mlx5dr_send_ring *send_ring,
if (send_ring->pending_wqe % send_ring->signal_th == 0)
send_info->write.send_flags |= IB_SEND_SIGNALED;
else
send_info->write.send_flags &= ~IB_SEND_SIGNALED;
send_info->write.send_flags = 0;
}
static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn,
@ -680,13 +596,9 @@ static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn,
}
send_ring->pending_wqe++;
if (!send_info->write.lkey)
send_info->write.send_flags |= IB_SEND_INLINE;
if (send_ring->pending_wqe % send_ring->signal_th == 0)
send_info->write.send_flags |= IB_SEND_SIGNALED;
else
send_info->write.send_flags &= ~IB_SEND_SIGNALED;
send_ring->pending_wqe++;
send_info->read.length = send_info->write.length;
@ -696,9 +608,9 @@ static void dr_fill_write_icm_segs(struct mlx5dr_domain *dmn,
send_info->read.lkey = send_ring->sync_mr->mkey;
if (send_ring->pending_wqe % send_ring->signal_th == 0)
send_info->read.send_flags |= IB_SEND_SIGNALED;
send_info->read.send_flags = IB_SEND_SIGNALED;
else
send_info->read.send_flags &= ~IB_SEND_SIGNALED;
send_info->read.send_flags = 0;
}
static void dr_fill_data_segs(struct mlx5dr_domain *dmn,
@ -1345,7 +1257,6 @@ int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn)
dmn->send_ring->cq->qp = dmn->send_ring->qp;
dmn->info.max_send_wr = QUEUE_SIZE;
init_attr.max_send_sge = 1;
dmn->info.max_inline_size = min(dmn->send_ring->qp->max_inline_data,
DR_STE_SIZE);

View File

@ -624,6 +624,7 @@ struct rtl8169_private {
unsigned supports_gmii:1;
unsigned aspm_manageable:1;
unsigned dash_enabled:1;
dma_addr_t counters_phys_addr;
struct rtl8169_counters *counters;
struct rtl8169_tc_offsets tc_offset;
@ -1253,14 +1254,26 @@ static bool r8168ep_check_dash(struct rtl8169_private *tp)
return r8168ep_ocp_read(tp, 0x128) & BIT(0);
}
static enum rtl_dash_type rtl_check_dash(struct rtl8169_private *tp)
static bool rtl_dash_is_enabled(struct rtl8169_private *tp)
{
switch (tp->dash_type) {
case RTL_DASH_DP:
return r8168dp_check_dash(tp);
case RTL_DASH_EP:
return r8168ep_check_dash(tp);
default:
return false;
}
}
static enum rtl_dash_type rtl_get_dash_type(struct rtl8169_private *tp)
{
switch (tp->mac_version) {
case RTL_GIGA_MAC_VER_28:
case RTL_GIGA_MAC_VER_31:
return r8168dp_check_dash(tp) ? RTL_DASH_DP : RTL_DASH_NONE;
return RTL_DASH_DP;
case RTL_GIGA_MAC_VER_51 ... RTL_GIGA_MAC_VER_53:
return r8168ep_check_dash(tp) ? RTL_DASH_EP : RTL_DASH_NONE;
return RTL_DASH_EP;
default:
return RTL_DASH_NONE;
}
@ -1453,7 +1466,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
device_set_wakeup_enable(tp_to_dev(tp), wolopts);
if (tp->dash_type == RTL_DASH_NONE) {
if (!tp->dash_enabled) {
rtl_set_d3_pll_down(tp, !wolopts);
tp->dev->wol_enabled = wolopts ? 1 : 0;
}
@ -2512,7 +2525,7 @@ static void rtl_wol_enable_rx(struct rtl8169_private *tp)
static void rtl_prepare_power_down(struct rtl8169_private *tp)
{
if (tp->dash_type != RTL_DASH_NONE)
if (tp->dash_enabled)
return;
if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
@ -4648,10 +4661,16 @@ static void rtl8169_down(struct rtl8169_private *tp)
rtl8169_cleanup(tp);
rtl_disable_exit_l1(tp);
rtl_prepare_power_down(tp);
if (tp->dash_type != RTL_DASH_NONE)
rtl8168_driver_stop(tp);
}
static void rtl8169_up(struct rtl8169_private *tp)
{
if (tp->dash_type != RTL_DASH_NONE)
rtl8168_driver_start(tp);
pci_set_master(tp->pci_dev);
phy_init_hw(tp->phydev);
phy_resume(tp->phydev);
@ -4869,7 +4888,7 @@ static int rtl8169_runtime_idle(struct device *device)
{
struct rtl8169_private *tp = dev_get_drvdata(device);
if (tp->dash_type != RTL_DASH_NONE)
if (tp->dash_enabled)
return -EBUSY;
if (!netif_running(tp->dev) || !netif_carrier_ok(tp->dev))
@ -4895,8 +4914,7 @@ static void rtl_shutdown(struct pci_dev *pdev)
/* Restore original MAC address */
rtl_rar_set(tp, tp->dev->perm_addr);
if (system_state == SYSTEM_POWER_OFF &&
tp->dash_type == RTL_DASH_NONE) {
if (system_state == SYSTEM_POWER_OFF && !tp->dash_enabled) {
pci_wake_from_d3(pdev, tp->saved_wolopts);
pci_set_power_state(pdev, PCI_D3hot);
}
@ -5254,7 +5272,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1);
tp->aspm_manageable = !rc;
tp->dash_type = rtl_check_dash(tp);
tp->dash_type = rtl_get_dash_type(tp);
tp->dash_enabled = rtl_dash_is_enabled(tp);
tp->cp_cmd = RTL_R16(tp, CPlusCmd) & CPCMD_MASK;
@ -5325,7 +5344,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
/* configure chip for default features */
rtl8169_set_features(dev, dev->features);
if (tp->dash_type == RTL_DASH_NONE) {
if (!tp->dash_enabled) {
rtl_set_d3_pll_down(tp, true);
} else {
rtl_set_d3_pll_down(tp, false);
@ -5365,7 +5384,8 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
"ok" : "ko");
if (tp->dash_type != RTL_DASH_NONE) {
netdev_info(dev, "DASH enabled\n");
netdev_info(dev, "DASH %s\n",
tp->dash_enabled ? "enabled" : "disabled");
rtl8168_driver_start(tp);
}

View File

@ -5293,6 +5293,7 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
if (netif_msg_rx_status(priv)) {
void *rx_head;
@ -5328,10 +5329,10 @@ static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
len = 0;
}
read_again:
if (count >= limit)
break;
read_again:
buf1_len = 0;
buf2_len = 0;
entry = next_entry;

View File

@ -2063,7 +2063,7 @@ static int prueth_probe(struct platform_device *pdev)
&prueth->shram);
if (ret) {
dev_err(dev, "unable to get PRUSS SHRD RAM2: %d\n", ret);
pruss_put(prueth->pruss);
goto put_pruss;
}
prueth->sram_pool = of_gen_pool_get(np, "sram", 0);
@ -2105,10 +2105,7 @@ static int prueth_probe(struct platform_device *pdev)
prueth->iep1 = icss_iep_get_idx(np, 1);
if (IS_ERR(prueth->iep1)) {
ret = dev_err_probe(dev, PTR_ERR(prueth->iep1), "iep1 get failed\n");
icss_iep_put(prueth->iep0);
prueth->iep0 = NULL;
prueth->iep1 = NULL;
goto free_pool;
goto put_iep0;
}
if (prueth->pdata.quirk_10m_link_issue) {
@ -2205,6 +2202,12 @@ netdev_exit:
exit_iep:
if (prueth->pdata.quirk_10m_link_issue)
icss_iep_exit_fw(prueth->iep1);
icss_iep_put(prueth->iep1);
put_iep0:
icss_iep_put(prueth->iep0);
prueth->iep0 = NULL;
prueth->iep1 = NULL;
free_pool:
gen_pool_free(prueth->sram_pool,
@ -2212,6 +2215,8 @@ free_pool:
put_mem:
pruss_release_mem_region(prueth->pruss, &prueth->shram);
put_pruss:
pruss_put(prueth->pruss);
put_cores:

Some files were not shown because too many files have changed in this diff Show More