Merge branch 'linus' into x86/mm, to pick up a bugfix
Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
74c8ce958d
@ -1142,16 +1142,17 @@ used by the kernel.
|
||||
|
||||
pids.max
|
||||
|
||||
A read-write single value file which exists on non-root cgroups. The
|
||||
default is "max".
|
||||
A read-write single value file which exists on non-root
|
||||
cgroups. The default is "max".
|
||||
|
||||
Hard limit of number of processes.
|
||||
Hard limit of number of processes.
|
||||
|
||||
pids.current
|
||||
|
||||
A read-only single value file which exists on all cgroups.
|
||||
A read-only single value file which exists on all cgroups.
|
||||
|
||||
The number of processes currently in the cgroup and its descendants.
|
||||
The number of processes currently in the cgroup and its
|
||||
descendants.
|
||||
|
||||
Organisational operations are not blocked by cgroup policies, so it is
|
||||
possible to have pids.current > pids.max. This can be done by either
|
||||
|
@ -71,6 +71,9 @@
|
||||
For Axon it can be absent, though my current driver
|
||||
doesn't handle phy-address yet so for now, keep
|
||||
0x00ffffff in it.
|
||||
- phy-handle : Used to describe configurations where a external PHY
|
||||
is used. Please refer to:
|
||||
Documentation/devicetree/bindings/net/ethernet.txt
|
||||
- rx-fifo-size-gige : 1 cell, Rx fifo size in bytes for 1000 Mb/sec
|
||||
operations (if absent the value is the same as
|
||||
rx-fifo-size). For Axon, either absent or 2048.
|
||||
@ -81,8 +84,22 @@
|
||||
offload, phandle of the TAH device node.
|
||||
- tah-channel : 1 cell, optional. If appropriate, channel used on the
|
||||
TAH engine.
|
||||
- fixed-link : Fixed-link subnode describing a link to a non-MDIO
|
||||
managed entity. See
|
||||
Documentation/devicetree/bindings/net/fixed-link.txt
|
||||
for details.
|
||||
- mdio subnode : When the EMAC has a phy connected to its local
|
||||
mdio, which us supported by the kernel's network
|
||||
PHY library in drivers/net/phy, there must be device
|
||||
tree subnode with the following required properties:
|
||||
- #address-cells: Must be <1>.
|
||||
- #size-cells: Must be <0>.
|
||||
|
||||
Example:
|
||||
For PHY definitions: Please refer to
|
||||
Documentation/devicetree/bindings/net/phy.txt and
|
||||
Documentation/devicetree/bindings/net/ethernet.txt
|
||||
|
||||
Examples:
|
||||
|
||||
EMAC0: ethernet@40000800 {
|
||||
device_type = "network";
|
||||
@ -104,6 +121,48 @@
|
||||
zmii-channel = <0>;
|
||||
};
|
||||
|
||||
EMAC1: ethernet@ef600c00 {
|
||||
device_type = "network";
|
||||
compatible = "ibm,emac-apm821xx", "ibm,emac4sync";
|
||||
interrupt-parent = <&EMAC1>;
|
||||
interrupts = <0 1>;
|
||||
#interrupt-cells = <1>;
|
||||
#address-cells = <0>;
|
||||
#size-cells = <0>;
|
||||
interrupt-map = <0 &UIC2 0x10 IRQ_TYPE_LEVEL_HIGH /* Status */
|
||||
1 &UIC2 0x14 IRQ_TYPE_LEVEL_HIGH /* Wake */>;
|
||||
reg = <0xef600c00 0x000000c4>;
|
||||
local-mac-address = [000000000000]; /* Filled in by U-Boot */
|
||||
mal-device = <&MAL0>;
|
||||
mal-tx-channel = <0>;
|
||||
mal-rx-channel = <0>;
|
||||
cell-index = <0>;
|
||||
max-frame-size = <9000>;
|
||||
rx-fifo-size = <16384>;
|
||||
tx-fifo-size = <2048>;
|
||||
fifo-entry-size = <10>;
|
||||
phy-mode = "rgmii";
|
||||
phy-handle = <&phy0>;
|
||||
phy-map = <0x00000000>;
|
||||
rgmii-device = <&RGMII0>;
|
||||
rgmii-channel = <0>;
|
||||
tah-device = <&TAH0>;
|
||||
tah-channel = <0>;
|
||||
has-inverted-stacr-oc;
|
||||
has-new-stacr-staopc;
|
||||
|
||||
mdio {
|
||||
#address-cells = <1>;
|
||||
#size-cells = <0>;
|
||||
|
||||
phy0: ethernet-phy@0 {
|
||||
compatible = "ethernet-phy-ieee802.3-c22";
|
||||
reg = <0>;
|
||||
};
|
||||
};
|
||||
};
|
||||
|
||||
|
||||
ii) McMAL node
|
||||
|
||||
Required properties:
|
||||
@ -145,4 +204,3 @@
|
||||
- revision : as provided by the RGMII new version register if
|
||||
available.
|
||||
For Axon: 0x0000012a
|
||||
|
||||
|
@ -1006,7 +1006,8 @@ accept_redirects - BOOLEAN
|
||||
FALSE (router)
|
||||
|
||||
forwarding - BOOLEAN
|
||||
Enable IP forwarding on this interface.
|
||||
Enable IP forwarding on this interface. This controls whether packets
|
||||
received _on_ this interface can be forwarded.
|
||||
|
||||
mc_forwarding - BOOLEAN
|
||||
Do multicast routing. The kernel needs to be compiled with CONFIG_MROUTE
|
||||
|
@ -1073,6 +1073,10 @@ config SYSVIPC_COMPAT
|
||||
def_bool y
|
||||
depends on COMPAT && SYSVIPC
|
||||
|
||||
config KEYS_COMPAT
|
||||
def_bool y
|
||||
depends on COMPAT && KEYS
|
||||
|
||||
endmenu
|
||||
|
||||
menu "Power management options"
|
||||
|
@ -251,7 +251,7 @@ static inline bool system_supports_fpsimd(void)
|
||||
static inline bool system_uses_ttbr0_pan(void)
|
||||
{
|
||||
return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) &&
|
||||
!cpus_have_cap(ARM64_HAS_PAN);
|
||||
!cpus_have_const_cap(ARM64_HAS_PAN);
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
@ -30,7 +30,7 @@ int arm_cpuidle_init(unsigned int cpu)
|
||||
}
|
||||
|
||||
/**
|
||||
* cpu_suspend() - function to enter a low-power idle state
|
||||
* arm_cpuidle_suspend() - function to enter a low-power idle state
|
||||
* @arg: argument to pass to CPU suspend operations
|
||||
*
|
||||
* Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU
|
||||
|
@ -372,12 +372,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
|
||||
unsigned long val, void *data)
|
||||
{
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static void __kprobes kprobe_handler(struct pt_regs *regs)
|
||||
{
|
||||
struct kprobe *p, *cur_kprobe;
|
||||
|
@ -162,7 +162,7 @@ void __init kasan_init(void)
|
||||
clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
|
||||
|
||||
vmemmap_populate(kimg_shadow_start, kimg_shadow_end,
|
||||
pfn_to_nid(virt_to_pfn(_text)));
|
||||
pfn_to_nid(virt_to_pfn(lm_alias(_text))));
|
||||
|
||||
/*
|
||||
* vmemmap_populate() has populated the shadow region that covers the
|
||||
|
@ -68,6 +68,7 @@ SECTIONS
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PPC64_BOOT_WRAPPER
|
||||
. = ALIGN(256);
|
||||
.got :
|
||||
{
|
||||
__toc_start = .;
|
||||
|
@ -52,7 +52,7 @@ static int crc32c_vpmsum_cra_init(struct crypto_tfm *tfm)
|
||||
{
|
||||
u32 *key = crypto_tfm_ctx(tfm);
|
||||
|
||||
*key = 0;
|
||||
*key = ~0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -51,6 +51,10 @@
|
||||
#define PPC_BIT(bit) (1UL << PPC_BITLSHIFT(bit))
|
||||
#define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs))
|
||||
|
||||
/* Put a PPC bit into a "normal" bit position */
|
||||
#define PPC_BITEXTRACT(bits, ppc_bit, dst_bit) \
|
||||
((((bits) >> PPC_BITLSHIFT(ppc_bit)) & 1) << (dst_bit))
|
||||
|
||||
#include <asm/barrier.h>
|
||||
|
||||
/* Macro for generating the ***_bits() functions */
|
||||
|
@ -66,6 +66,55 @@
|
||||
|
||||
#define P8_DSISR_MC_SLB_ERRORS (P7_DSISR_MC_SLB_ERRORS | \
|
||||
P8_DSISR_MC_ERAT_MULTIHIT_SEC)
|
||||
|
||||
/*
|
||||
* Machine Check bits on power9
|
||||
*/
|
||||
#define P9_SRR1_MC_LOADSTORE(srr1) (((srr1) >> PPC_BITLSHIFT(42)) & 1)
|
||||
|
||||
#define P9_SRR1_MC_IFETCH(srr1) ( \
|
||||
PPC_BITEXTRACT(srr1, 45, 0) | \
|
||||
PPC_BITEXTRACT(srr1, 44, 1) | \
|
||||
PPC_BITEXTRACT(srr1, 43, 2) | \
|
||||
PPC_BITEXTRACT(srr1, 36, 3) )
|
||||
|
||||
/* 0 is reserved */
|
||||
#define P9_SRR1_MC_IFETCH_UE 1
|
||||
#define P9_SRR1_MC_IFETCH_SLB_PARITY 2
|
||||
#define P9_SRR1_MC_IFETCH_SLB_MULTIHIT 3
|
||||
#define P9_SRR1_MC_IFETCH_ERAT_MULTIHIT 4
|
||||
#define P9_SRR1_MC_IFETCH_TLB_MULTIHIT 5
|
||||
#define P9_SRR1_MC_IFETCH_UE_TLB_RELOAD 6
|
||||
/* 7 is reserved */
|
||||
#define P9_SRR1_MC_IFETCH_LINK_TIMEOUT 8
|
||||
#define P9_SRR1_MC_IFETCH_LINK_TABLEWALK_TIMEOUT 9
|
||||
/* 10 ? */
|
||||
#define P9_SRR1_MC_IFETCH_RA 11
|
||||
#define P9_SRR1_MC_IFETCH_RA_TABLEWALK 12
|
||||
#define P9_SRR1_MC_IFETCH_RA_ASYNC_STORE 13
|
||||
#define P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT 14
|
||||
#define P9_SRR1_MC_IFETCH_RA_TABLEWALK_FOREIGN 15
|
||||
|
||||
/* DSISR bits for machine check (On Power9) */
|
||||
#define P9_DSISR_MC_UE (PPC_BIT(48))
|
||||
#define P9_DSISR_MC_UE_TABLEWALK (PPC_BIT(49))
|
||||
#define P9_DSISR_MC_LINK_LOAD_TIMEOUT (PPC_BIT(50))
|
||||
#define P9_DSISR_MC_LINK_TABLEWALK_TIMEOUT (PPC_BIT(51))
|
||||
#define P9_DSISR_MC_ERAT_MULTIHIT (PPC_BIT(52))
|
||||
#define P9_DSISR_MC_TLB_MULTIHIT_MFTLB (PPC_BIT(53))
|
||||
#define P9_DSISR_MC_USER_TLBIE (PPC_BIT(54))
|
||||
#define P9_DSISR_MC_SLB_PARITY_MFSLB (PPC_BIT(55))
|
||||
#define P9_DSISR_MC_SLB_MULTIHIT_MFSLB (PPC_BIT(56))
|
||||
#define P9_DSISR_MC_RA_LOAD (PPC_BIT(57))
|
||||
#define P9_DSISR_MC_RA_TABLEWALK (PPC_BIT(58))
|
||||
#define P9_DSISR_MC_RA_TABLEWALK_FOREIGN (PPC_BIT(59))
|
||||
#define P9_DSISR_MC_RA_FOREIGN (PPC_BIT(60))
|
||||
|
||||
/* SLB error bits */
|
||||
#define P9_DSISR_MC_SLB_ERRORS (P9_DSISR_MC_ERAT_MULTIHIT | \
|
||||
P9_DSISR_MC_SLB_PARITY_MFSLB | \
|
||||
P9_DSISR_MC_SLB_MULTIHIT_MFSLB)
|
||||
|
||||
enum MCE_Version {
|
||||
MCE_V1 = 1,
|
||||
};
|
||||
@ -93,6 +142,9 @@ enum MCE_ErrorType {
|
||||
MCE_ERROR_TYPE_SLB = 2,
|
||||
MCE_ERROR_TYPE_ERAT = 3,
|
||||
MCE_ERROR_TYPE_TLB = 4,
|
||||
MCE_ERROR_TYPE_USER = 5,
|
||||
MCE_ERROR_TYPE_RA = 6,
|
||||
MCE_ERROR_TYPE_LINK = 7,
|
||||
};
|
||||
|
||||
enum MCE_UeErrorType {
|
||||
@ -121,6 +173,32 @@ enum MCE_TlbErrorType {
|
||||
MCE_TLB_ERROR_MULTIHIT = 2,
|
||||
};
|
||||
|
||||
enum MCE_UserErrorType {
|
||||
MCE_USER_ERROR_INDETERMINATE = 0,
|
||||
MCE_USER_ERROR_TLBIE = 1,
|
||||
};
|
||||
|
||||
enum MCE_RaErrorType {
|
||||
MCE_RA_ERROR_INDETERMINATE = 0,
|
||||
MCE_RA_ERROR_IFETCH = 1,
|
||||
MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH = 2,
|
||||
MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN = 3,
|
||||
MCE_RA_ERROR_LOAD = 4,
|
||||
MCE_RA_ERROR_STORE = 5,
|
||||
MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 6,
|
||||
MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN = 7,
|
||||
MCE_RA_ERROR_LOAD_STORE_FOREIGN = 8,
|
||||
};
|
||||
|
||||
enum MCE_LinkErrorType {
|
||||
MCE_LINK_ERROR_INDETERMINATE = 0,
|
||||
MCE_LINK_ERROR_IFETCH_TIMEOUT = 1,
|
||||
MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT = 2,
|
||||
MCE_LINK_ERROR_LOAD_TIMEOUT = 3,
|
||||
MCE_LINK_ERROR_STORE_TIMEOUT = 4,
|
||||
MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT = 5,
|
||||
};
|
||||
|
||||
struct machine_check_event {
|
||||
enum MCE_Version version:8; /* 0x00 */
|
||||
uint8_t in_use; /* 0x01 */
|
||||
@ -166,6 +244,30 @@ struct machine_check_event {
|
||||
uint64_t effective_address;
|
||||
uint8_t reserved_2[16];
|
||||
} tlb_error;
|
||||
|
||||
struct {
|
||||
enum MCE_UserErrorType user_error_type:8;
|
||||
uint8_t effective_address_provided;
|
||||
uint8_t reserved_1[6];
|
||||
uint64_t effective_address;
|
||||
uint8_t reserved_2[16];
|
||||
} user_error;
|
||||
|
||||
struct {
|
||||
enum MCE_RaErrorType ra_error_type:8;
|
||||
uint8_t effective_address_provided;
|
||||
uint8_t reserved_1[6];
|
||||
uint64_t effective_address;
|
||||
uint8_t reserved_2[16];
|
||||
} ra_error;
|
||||
|
||||
struct {
|
||||
enum MCE_LinkErrorType link_error_type:8;
|
||||
uint8_t effective_address_provided;
|
||||
uint8_t reserved_1[6];
|
||||
uint64_t effective_address;
|
||||
uint8_t reserved_2[16];
|
||||
} link_error;
|
||||
} u;
|
||||
};
|
||||
|
||||
@ -176,8 +278,12 @@ struct mce_error_info {
|
||||
enum MCE_SlbErrorType slb_error_type:8;
|
||||
enum MCE_EratErrorType erat_error_type:8;
|
||||
enum MCE_TlbErrorType tlb_error_type:8;
|
||||
enum MCE_UserErrorType user_error_type:8;
|
||||
enum MCE_RaErrorType ra_error_type:8;
|
||||
enum MCE_LinkErrorType link_error_type:8;
|
||||
} u;
|
||||
uint8_t reserved[2];
|
||||
enum MCE_Severity severity:8;
|
||||
enum MCE_Initiator initiator:8;
|
||||
};
|
||||
|
||||
#define MAX_MC_EVT 100
|
||||
|
@ -77,6 +77,7 @@ extern void __flush_tlb_power8(unsigned int action);
|
||||
extern void __flush_tlb_power9(unsigned int action);
|
||||
extern long __machine_check_early_realmode_p7(struct pt_regs *regs);
|
||||
extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
|
||||
extern long __machine_check_early_realmode_p9(struct pt_regs *regs);
|
||||
#endif /* CONFIG_PPC64 */
|
||||
#if defined(CONFIG_E500)
|
||||
extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec);
|
||||
@ -540,6 +541,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
|
||||
.cpu_setup = __setup_cpu_power9,
|
||||
.cpu_restore = __restore_cpu_power9,
|
||||
.flush_tlb = __flush_tlb_power9,
|
||||
.machine_check_early = __machine_check_early_realmode_p9,
|
||||
.platform = "power9",
|
||||
},
|
||||
{ /* Power9 */
|
||||
@ -559,6 +561,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
|
||||
.cpu_setup = __setup_cpu_power9,
|
||||
.cpu_restore = __restore_cpu_power9,
|
||||
.flush_tlb = __flush_tlb_power9,
|
||||
.machine_check_early = __machine_check_early_realmode_p9,
|
||||
.platform = "power9",
|
||||
},
|
||||
{ /* Cell Broadband Engine */
|
||||
|
@ -58,6 +58,15 @@ static void mce_set_error_info(struct machine_check_event *mce,
|
||||
case MCE_ERROR_TYPE_TLB:
|
||||
mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type;
|
||||
break;
|
||||
case MCE_ERROR_TYPE_USER:
|
||||
mce->u.user_error.user_error_type = mce_err->u.user_error_type;
|
||||
break;
|
||||
case MCE_ERROR_TYPE_RA:
|
||||
mce->u.ra_error.ra_error_type = mce_err->u.ra_error_type;
|
||||
break;
|
||||
case MCE_ERROR_TYPE_LINK:
|
||||
mce->u.link_error.link_error_type = mce_err->u.link_error_type;
|
||||
break;
|
||||
case MCE_ERROR_TYPE_UNKNOWN:
|
||||
default:
|
||||
break;
|
||||
@ -90,13 +99,14 @@ void save_mce_event(struct pt_regs *regs, long handled,
|
||||
mce->gpr3 = regs->gpr[3];
|
||||
mce->in_use = 1;
|
||||
|
||||
mce->initiator = MCE_INITIATOR_CPU;
|
||||
/* Mark it recovered if we have handled it and MSR(RI=1). */
|
||||
if (handled && (regs->msr & MSR_RI))
|
||||
mce->disposition = MCE_DISPOSITION_RECOVERED;
|
||||
else
|
||||
mce->disposition = MCE_DISPOSITION_NOT_RECOVERED;
|
||||
mce->severity = MCE_SEV_ERROR_SYNC;
|
||||
|
||||
mce->initiator = mce_err->initiator;
|
||||
mce->severity = mce_err->severity;
|
||||
|
||||
/*
|
||||
* Populate the mce error_type and type-specific error_type.
|
||||
@ -115,6 +125,15 @@ void save_mce_event(struct pt_regs *regs, long handled,
|
||||
} else if (mce->error_type == MCE_ERROR_TYPE_ERAT) {
|
||||
mce->u.erat_error.effective_address_provided = true;
|
||||
mce->u.erat_error.effective_address = addr;
|
||||
} else if (mce->error_type == MCE_ERROR_TYPE_USER) {
|
||||
mce->u.user_error.effective_address_provided = true;
|
||||
mce->u.user_error.effective_address = addr;
|
||||
} else if (mce->error_type == MCE_ERROR_TYPE_RA) {
|
||||
mce->u.ra_error.effective_address_provided = true;
|
||||
mce->u.ra_error.effective_address = addr;
|
||||
} else if (mce->error_type == MCE_ERROR_TYPE_LINK) {
|
||||
mce->u.link_error.effective_address_provided = true;
|
||||
mce->u.link_error.effective_address = addr;
|
||||
} else if (mce->error_type == MCE_ERROR_TYPE_UE) {
|
||||
mce->u.ue_error.effective_address_provided = true;
|
||||
mce->u.ue_error.effective_address = addr;
|
||||
@ -239,6 +258,29 @@ void machine_check_print_event_info(struct machine_check_event *evt)
|
||||
"Parity",
|
||||
"Multihit",
|
||||
};
|
||||
static const char *mc_user_types[] = {
|
||||
"Indeterminate",
|
||||
"tlbie(l) invalid",
|
||||
};
|
||||
static const char *mc_ra_types[] = {
|
||||
"Indeterminate",
|
||||
"Instruction fetch (bad)",
|
||||
"Page table walk ifetch (bad)",
|
||||
"Page table walk ifetch (foreign)",
|
||||
"Load (bad)",
|
||||
"Store (bad)",
|
||||
"Page table walk Load/Store (bad)",
|
||||
"Page table walk Load/Store (foreign)",
|
||||
"Load/Store (foreign)",
|
||||
};
|
||||
static const char *mc_link_types[] = {
|
||||
"Indeterminate",
|
||||
"Instruction fetch (timeout)",
|
||||
"Page table walk ifetch (timeout)",
|
||||
"Load (timeout)",
|
||||
"Store (timeout)",
|
||||
"Page table walk Load/Store (timeout)",
|
||||
};
|
||||
|
||||
/* Print things out */
|
||||
if (evt->version != MCE_V1) {
|
||||
@ -315,6 +357,36 @@ void machine_check_print_event_info(struct machine_check_event *evt)
|
||||
printk("%s Effective address: %016llx\n",
|
||||
level, evt->u.tlb_error.effective_address);
|
||||
break;
|
||||
case MCE_ERROR_TYPE_USER:
|
||||
subtype = evt->u.user_error.user_error_type <
|
||||
ARRAY_SIZE(mc_user_types) ?
|
||||
mc_user_types[evt->u.user_error.user_error_type]
|
||||
: "Unknown";
|
||||
printk("%s Error type: User [%s]\n", level, subtype);
|
||||
if (evt->u.user_error.effective_address_provided)
|
||||
printk("%s Effective address: %016llx\n",
|
||||
level, evt->u.user_error.effective_address);
|
||||
break;
|
||||
case MCE_ERROR_TYPE_RA:
|
||||
subtype = evt->u.ra_error.ra_error_type <
|
||||
ARRAY_SIZE(mc_ra_types) ?
|
||||
mc_ra_types[evt->u.ra_error.ra_error_type]
|
||||
: "Unknown";
|
||||
printk("%s Error type: Real address [%s]\n", level, subtype);
|
||||
if (evt->u.ra_error.effective_address_provided)
|
||||
printk("%s Effective address: %016llx\n",
|
||||
level, evt->u.ra_error.effective_address);
|
||||
break;
|
||||
case MCE_ERROR_TYPE_LINK:
|
||||
subtype = evt->u.link_error.link_error_type <
|
||||
ARRAY_SIZE(mc_link_types) ?
|
||||
mc_link_types[evt->u.link_error.link_error_type]
|
||||
: "Unknown";
|
||||
printk("%s Error type: Link [%s]\n", level, subtype);
|
||||
if (evt->u.link_error.effective_address_provided)
|
||||
printk("%s Effective address: %016llx\n",
|
||||
level, evt->u.link_error.effective_address);
|
||||
break;
|
||||
default:
|
||||
case MCE_ERROR_TYPE_UNKNOWN:
|
||||
printk("%s Error type: Unknown\n", level);
|
||||
@ -341,6 +413,18 @@ uint64_t get_mce_fault_addr(struct machine_check_event *evt)
|
||||
if (evt->u.tlb_error.effective_address_provided)
|
||||
return evt->u.tlb_error.effective_address;
|
||||
break;
|
||||
case MCE_ERROR_TYPE_USER:
|
||||
if (evt->u.user_error.effective_address_provided)
|
||||
return evt->u.user_error.effective_address;
|
||||
break;
|
||||
case MCE_ERROR_TYPE_RA:
|
||||
if (evt->u.ra_error.effective_address_provided)
|
||||
return evt->u.ra_error.effective_address;
|
||||
break;
|
||||
case MCE_ERROR_TYPE_LINK:
|
||||
if (evt->u.link_error.effective_address_provided)
|
||||
return evt->u.link_error.effective_address;
|
||||
break;
|
||||
default:
|
||||
case MCE_ERROR_TYPE_UNKNOWN:
|
||||
break;
|
||||
|
@ -116,6 +116,51 @@ static void flush_and_reload_slb(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
static void flush_erat(void)
|
||||
{
|
||||
asm volatile(PPC_INVALIDATE_ERAT : : :"memory");
|
||||
}
|
||||
|
||||
#define MCE_FLUSH_SLB 1
|
||||
#define MCE_FLUSH_TLB 2
|
||||
#define MCE_FLUSH_ERAT 3
|
||||
|
||||
static int mce_flush(int what)
|
||||
{
|
||||
#ifdef CONFIG_PPC_STD_MMU_64
|
||||
if (what == MCE_FLUSH_SLB) {
|
||||
flush_and_reload_slb();
|
||||
return 1;
|
||||
}
|
||||
#endif
|
||||
if (what == MCE_FLUSH_ERAT) {
|
||||
flush_erat();
|
||||
return 1;
|
||||
}
|
||||
if (what == MCE_FLUSH_TLB) {
|
||||
if (cur_cpu_spec && cur_cpu_spec->flush_tlb) {
|
||||
cur_cpu_spec->flush_tlb(TLB_INVAL_SCOPE_GLOBAL);
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mce_handle_flush_derrors(uint64_t dsisr, uint64_t slb, uint64_t tlb, uint64_t erat)
|
||||
{
|
||||
if ((dsisr & slb) && mce_flush(MCE_FLUSH_SLB))
|
||||
dsisr &= ~slb;
|
||||
if ((dsisr & erat) && mce_flush(MCE_FLUSH_ERAT))
|
||||
dsisr &= ~erat;
|
||||
if ((dsisr & tlb) && mce_flush(MCE_FLUSH_TLB))
|
||||
dsisr &= ~tlb;
|
||||
/* Any other errors we don't understand? */
|
||||
if (dsisr)
|
||||
return 0;
|
||||
return 1;
|
||||
}
|
||||
|
||||
static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits)
|
||||
{
|
||||
long handled = 1;
|
||||
@ -281,6 +326,9 @@ long __machine_check_early_realmode_p7(struct pt_regs *regs)
|
||||
long handled = 1;
|
||||
struct mce_error_info mce_error_info = { 0 };
|
||||
|
||||
mce_error_info.severity = MCE_SEV_ERROR_SYNC;
|
||||
mce_error_info.initiator = MCE_INITIATOR_CPU;
|
||||
|
||||
srr1 = regs->msr;
|
||||
nip = regs->nip;
|
||||
|
||||
@ -352,6 +400,9 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs)
|
||||
long handled = 1;
|
||||
struct mce_error_info mce_error_info = { 0 };
|
||||
|
||||
mce_error_info.severity = MCE_SEV_ERROR_SYNC;
|
||||
mce_error_info.initiator = MCE_INITIATOR_CPU;
|
||||
|
||||
srr1 = regs->msr;
|
||||
nip = regs->nip;
|
||||
|
||||
@ -372,3 +423,189 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs)
|
||||
save_mce_event(regs, handled, &mce_error_info, nip, addr);
|
||||
return handled;
|
||||
}
|
||||
|
||||
static int mce_handle_derror_p9(struct pt_regs *regs)
|
||||
{
|
||||
uint64_t dsisr = regs->dsisr;
|
||||
|
||||
return mce_handle_flush_derrors(dsisr,
|
||||
P9_DSISR_MC_SLB_PARITY_MFSLB |
|
||||
P9_DSISR_MC_SLB_MULTIHIT_MFSLB,
|
||||
|
||||
P9_DSISR_MC_TLB_MULTIHIT_MFTLB,
|
||||
|
||||
P9_DSISR_MC_ERAT_MULTIHIT);
|
||||
}
|
||||
|
||||
static int mce_handle_ierror_p9(struct pt_regs *regs)
|
||||
{
|
||||
uint64_t srr1 = regs->msr;
|
||||
|
||||
switch (P9_SRR1_MC_IFETCH(srr1)) {
|
||||
case P9_SRR1_MC_IFETCH_SLB_PARITY:
|
||||
case P9_SRR1_MC_IFETCH_SLB_MULTIHIT:
|
||||
return mce_flush(MCE_FLUSH_SLB);
|
||||
case P9_SRR1_MC_IFETCH_TLB_MULTIHIT:
|
||||
return mce_flush(MCE_FLUSH_TLB);
|
||||
case P9_SRR1_MC_IFETCH_ERAT_MULTIHIT:
|
||||
return mce_flush(MCE_FLUSH_ERAT);
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static void mce_get_derror_p9(struct pt_regs *regs,
|
||||
struct mce_error_info *mce_err, uint64_t *addr)
|
||||
{
|
||||
uint64_t dsisr = regs->dsisr;
|
||||
|
||||
mce_err->severity = MCE_SEV_ERROR_SYNC;
|
||||
mce_err->initiator = MCE_INITIATOR_CPU;
|
||||
|
||||
if (dsisr & P9_DSISR_MC_USER_TLBIE)
|
||||
*addr = regs->nip;
|
||||
else
|
||||
*addr = regs->dar;
|
||||
|
||||
if (dsisr & P9_DSISR_MC_UE) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_UE;
|
||||
mce_err->u.ue_error_type = MCE_UE_ERROR_LOAD_STORE;
|
||||
} else if (dsisr & P9_DSISR_MC_UE_TABLEWALK) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_UE;
|
||||
mce_err->u.ue_error_type = MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE;
|
||||
} else if (dsisr & P9_DSISR_MC_LINK_LOAD_TIMEOUT) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_LINK;
|
||||
mce_err->u.link_error_type = MCE_LINK_ERROR_LOAD_TIMEOUT;
|
||||
} else if (dsisr & P9_DSISR_MC_LINK_TABLEWALK_TIMEOUT) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_LINK;
|
||||
mce_err->u.link_error_type = MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT;
|
||||
} else if (dsisr & P9_DSISR_MC_ERAT_MULTIHIT) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_ERAT;
|
||||
mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT;
|
||||
} else if (dsisr & P9_DSISR_MC_TLB_MULTIHIT_MFTLB) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_TLB;
|
||||
mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT;
|
||||
} else if (dsisr & P9_DSISR_MC_USER_TLBIE) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_USER;
|
||||
mce_err->u.user_error_type = MCE_USER_ERROR_TLBIE;
|
||||
} else if (dsisr & P9_DSISR_MC_SLB_PARITY_MFSLB) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_SLB;
|
||||
mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY;
|
||||
} else if (dsisr & P9_DSISR_MC_SLB_MULTIHIT_MFSLB) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_SLB;
|
||||
mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT;
|
||||
} else if (dsisr & P9_DSISR_MC_RA_LOAD) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_RA;
|
||||
mce_err->u.ra_error_type = MCE_RA_ERROR_LOAD;
|
||||
} else if (dsisr & P9_DSISR_MC_RA_TABLEWALK) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_RA;
|
||||
mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE;
|
||||
} else if (dsisr & P9_DSISR_MC_RA_TABLEWALK_FOREIGN) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_RA;
|
||||
mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN;
|
||||
} else if (dsisr & P9_DSISR_MC_RA_FOREIGN) {
|
||||
mce_err->error_type = MCE_ERROR_TYPE_RA;
|
||||
mce_err->u.ra_error_type = MCE_RA_ERROR_LOAD_STORE_FOREIGN;
|
||||
}
|
||||
}
|
||||
|
||||
static void mce_get_ierror_p9(struct pt_regs *regs,
|
||||
struct mce_error_info *mce_err, uint64_t *addr)
|
||||
{
|
||||
uint64_t srr1 = regs->msr;
|
||||
|
||||
switch (P9_SRR1_MC_IFETCH(srr1)) {
|
||||
case P9_SRR1_MC_IFETCH_RA_ASYNC_STORE:
|
||||
case P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT:
|
||||
mce_err->severity = MCE_SEV_FATAL;
|
||||
break;
|
||||
default:
|
||||
mce_err->severity = MCE_SEV_ERROR_SYNC;
|
||||
break;
|
||||
}
|
||||
|
||||
mce_err->initiator = MCE_INITIATOR_CPU;
|
||||
|
||||
*addr = regs->nip;
|
||||
|
||||
switch (P9_SRR1_MC_IFETCH(srr1)) {
|
||||
case P9_SRR1_MC_IFETCH_UE:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_UE;
|
||||
mce_err->u.ue_error_type = MCE_UE_ERROR_IFETCH;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_SLB_PARITY:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_SLB;
|
||||
mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_SLB_MULTIHIT:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_SLB;
|
||||
mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_ERAT_MULTIHIT:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_ERAT;
|
||||
mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_TLB_MULTIHIT:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_TLB;
|
||||
mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_UE_TLB_RELOAD:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_UE;
|
||||
mce_err->u.ue_error_type = MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_LINK_TIMEOUT:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_LINK;
|
||||
mce_err->u.link_error_type = MCE_LINK_ERROR_IFETCH_TIMEOUT;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_LINK_TABLEWALK_TIMEOUT:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_LINK;
|
||||
mce_err->u.link_error_type = MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_RA:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_RA;
|
||||
mce_err->u.ra_error_type = MCE_RA_ERROR_IFETCH;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_RA_TABLEWALK:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_RA;
|
||||
mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_RA_ASYNC_STORE:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_RA;
|
||||
mce_err->u.ra_error_type = MCE_RA_ERROR_STORE;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_LINK;
|
||||
mce_err->u.link_error_type = MCE_LINK_ERROR_STORE_TIMEOUT;
|
||||
break;
|
||||
case P9_SRR1_MC_IFETCH_RA_TABLEWALK_FOREIGN:
|
||||
mce_err->error_type = MCE_ERROR_TYPE_RA;
|
||||
mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
long __machine_check_early_realmode_p9(struct pt_regs *regs)
|
||||
{
|
||||
uint64_t nip, addr;
|
||||
long handled;
|
||||
struct mce_error_info mce_error_info = { 0 };
|
||||
|
||||
nip = regs->nip;
|
||||
|
||||
if (P9_SRR1_MC_LOADSTORE(regs->msr)) {
|
||||
handled = mce_handle_derror_p9(regs);
|
||||
mce_get_derror_p9(regs, &mce_error_info, &addr);
|
||||
} else {
|
||||
handled = mce_handle_ierror_p9(regs);
|
||||
mce_get_ierror_p9(regs, &mce_error_info, &addr);
|
||||
}
|
||||
|
||||
/* Handle UE error. */
|
||||
if (mce_error_info.error_type == MCE_ERROR_TYPE_UE)
|
||||
handled = mce_handle_ue_error(regs);
|
||||
|
||||
save_mce_event(regs, handled, &mce_error_info, nip, addr);
|
||||
return handled;
|
||||
}
|
||||
|
@ -188,6 +188,8 @@ static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
|
||||
sdsync = POWER7P_MMCRA_SDAR_VALID;
|
||||
else if (ppmu->flags & PPMU_ALT_SIPR)
|
||||
sdsync = POWER6_MMCRA_SDSYNC;
|
||||
else if (ppmu->flags & PPMU_NO_SIAR)
|
||||
sdsync = MMCRA_SAMPLE_ENABLE;
|
||||
else
|
||||
sdsync = MMCRA_SDSYNC;
|
||||
|
||||
|
@ -65,12 +65,41 @@ static bool is_event_valid(u64 event)
|
||||
return !(event & ~valid_mask);
|
||||
}
|
||||
|
||||
static u64 mmcra_sdar_mode(u64 event)
|
||||
static inline bool is_event_marked(u64 event)
|
||||
{
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1))
|
||||
return p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT;
|
||||
if (event & EVENT_IS_MARKED)
|
||||
return true;
|
||||
|
||||
return MMCRA_SDAR_MODE_TLB;
|
||||
return false;
|
||||
}
|
||||
|
||||
static void mmcra_sdar_mode(u64 event, unsigned long *mmcra)
|
||||
{
|
||||
/*
|
||||
* MMCRA[SDAR_MODE] specifices how the SDAR should be updated in
|
||||
* continous sampling mode.
|
||||
*
|
||||
* Incase of Power8:
|
||||
* MMCRA[SDAR_MODE] will be programmed as "0b01" for continous sampling
|
||||
* mode and will be un-changed when setting MMCRA[63] (Marked events).
|
||||
*
|
||||
* Incase of Power9:
|
||||
* Marked event: MMCRA[SDAR_MODE] will be set to 0b00 ('No Updates'),
|
||||
* or if group already have any marked events.
|
||||
* Non-Marked events (for DD1):
|
||||
* MMCRA[SDAR_MODE] will be set to 0b01
|
||||
* For rest
|
||||
* MMCRA[SDAR_MODE] will be set from event code.
|
||||
*/
|
||||
if (cpu_has_feature(CPU_FTR_ARCH_300)) {
|
||||
if (is_event_marked(event) || (*mmcra & MMCRA_SAMPLE_ENABLE))
|
||||
*mmcra &= MMCRA_SDAR_MODE_NO_UPDATES;
|
||||
else if (!cpu_has_feature(CPU_FTR_POWER9_DD1))
|
||||
*mmcra |= p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT;
|
||||
else if (cpu_has_feature(CPU_FTR_POWER9_DD1))
|
||||
*mmcra |= MMCRA_SDAR_MODE_TLB;
|
||||
} else
|
||||
*mmcra |= MMCRA_SDAR_MODE_TLB;
|
||||
}
|
||||
|
||||
static u64 thresh_cmp_val(u64 value)
|
||||
@ -180,7 +209,7 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
|
||||
value |= CNST_L1_QUAL_VAL(cache);
|
||||
}
|
||||
|
||||
if (event & EVENT_IS_MARKED) {
|
||||
if (is_event_marked(event)) {
|
||||
mask |= CNST_SAMPLE_MASK;
|
||||
value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT);
|
||||
}
|
||||
@ -276,7 +305,7 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
|
||||
}
|
||||
|
||||
/* In continuous sampling mode, update SDAR on TLB miss */
|
||||
mmcra |= mmcra_sdar_mode(event[i]);
|
||||
mmcra_sdar_mode(event[i], &mmcra);
|
||||
|
||||
if (event[i] & EVENT_IS_L1) {
|
||||
cache = event[i] >> EVENT_CACHE_SEL_SHIFT;
|
||||
@ -285,7 +314,7 @@ int isa207_compute_mmcr(u64 event[], int n_ev,
|
||||
mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT;
|
||||
}
|
||||
|
||||
if (event[i] & EVENT_IS_MARKED) {
|
||||
if (is_event_marked(event[i])) {
|
||||
mmcra |= MMCRA_SAMPLE_ENABLE;
|
||||
|
||||
val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
|
||||
|
@ -246,6 +246,7 @@
|
||||
#define MMCRA_THR_CMP_SHIFT 32
|
||||
#define MMCRA_SDAR_MODE_SHIFT 42
|
||||
#define MMCRA_SDAR_MODE_TLB (1ull << MMCRA_SDAR_MODE_SHIFT)
|
||||
#define MMCRA_SDAR_MODE_NO_UPDATES ~(0x3ull << MMCRA_SDAR_MODE_SHIFT)
|
||||
#define MMCRA_IFM_SHIFT 30
|
||||
|
||||
/* MMCR1 Threshold Compare bit constant for power9 */
|
||||
|
@ -395,7 +395,6 @@ static int opal_recover_mce(struct pt_regs *regs,
|
||||
struct machine_check_event *evt)
|
||||
{
|
||||
int recovered = 0;
|
||||
uint64_t ea = get_mce_fault_addr(evt);
|
||||
|
||||
if (!(regs->msr & MSR_RI)) {
|
||||
/* If MSR_RI isn't set, we cannot recover */
|
||||
@ -404,26 +403,18 @@ static int opal_recover_mce(struct pt_regs *regs,
|
||||
} else if (evt->disposition == MCE_DISPOSITION_RECOVERED) {
|
||||
/* Platform corrected itself */
|
||||
recovered = 1;
|
||||
} else if (ea && !is_kernel_addr(ea)) {
|
||||
} else if (evt->severity == MCE_SEV_FATAL) {
|
||||
/* Fatal machine check */
|
||||
pr_err("Machine check interrupt is fatal\n");
|
||||
recovered = 0;
|
||||
} else if ((evt->severity == MCE_SEV_ERROR_SYNC) &&
|
||||
(user_mode(regs) && !is_global_init(current))) {
|
||||
/*
|
||||
* Faulting address is not in kernel text. We should be fine.
|
||||
* We need to find which process uses this address.
|
||||
* For now, kill the task if we have received exception when
|
||||
* in userspace.
|
||||
*
|
||||
* TODO: Queue up this address for hwpoisioning later.
|
||||
*/
|
||||
if (user_mode(regs) && !is_global_init(current)) {
|
||||
_exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
|
||||
recovered = 1;
|
||||
} else
|
||||
recovered = 0;
|
||||
} else if (user_mode(regs) && !is_global_init(current) &&
|
||||
evt->severity == MCE_SEV_ERROR_SYNC) {
|
||||
/*
|
||||
* If we have received a synchronous error when in userspace
|
||||
* kill the task.
|
||||
*/
|
||||
_exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip);
|
||||
recovered = 1;
|
||||
}
|
||||
|
@ -1775,17 +1775,20 @@ static u64 pnv_pci_ioda_dma_get_required_mask(struct pci_dev *pdev)
|
||||
}
|
||||
|
||||
static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe,
|
||||
struct pci_bus *bus)
|
||||
struct pci_bus *bus,
|
||||
bool add_to_group)
|
||||
{
|
||||
struct pci_dev *dev;
|
||||
|
||||
list_for_each_entry(dev, &bus->devices, bus_list) {
|
||||
set_iommu_table_base(&dev->dev, pe->table_group.tables[0]);
|
||||
set_dma_offset(&dev->dev, pe->tce_bypass_base);
|
||||
iommu_add_device(&dev->dev);
|
||||
if (add_to_group)
|
||||
iommu_add_device(&dev->dev);
|
||||
|
||||
if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
|
||||
pnv_ioda_setup_bus_dma(pe, dev->subordinate);
|
||||
pnv_ioda_setup_bus_dma(pe, dev->subordinate,
|
||||
add_to_group);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2191,7 +2194,7 @@ found:
|
||||
set_iommu_table_base(&pe->pdev->dev, tbl);
|
||||
iommu_add_device(&pe->pdev->dev);
|
||||
} else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
|
||||
pnv_ioda_setup_bus_dma(pe, pe->pbus);
|
||||
pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
|
||||
|
||||
return;
|
||||
fail:
|
||||
@ -2426,6 +2429,8 @@ static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group)
|
||||
|
||||
pnv_pci_ioda2_set_bypass(pe, false);
|
||||
pnv_pci_ioda2_unset_window(&pe->table_group, 0);
|
||||
if (pe->pbus)
|
||||
pnv_ioda_setup_bus_dma(pe, pe->pbus, false);
|
||||
pnv_ioda2_table_free(tbl);
|
||||
}
|
||||
|
||||
@ -2435,6 +2440,8 @@ static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group)
|
||||
table_group);
|
||||
|
||||
pnv_pci_ioda2_setup_default_config(pe);
|
||||
if (pe->pbus)
|
||||
pnv_ioda_setup_bus_dma(pe, pe->pbus, false);
|
||||
}
|
||||
|
||||
static struct iommu_table_group_ops pnv_pci_ioda2_ops = {
|
||||
@ -2624,6 +2631,9 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
|
||||
level_shift = entries_shift + 3;
|
||||
level_shift = max_t(unsigned, level_shift, PAGE_SHIFT);
|
||||
|
||||
if ((level_shift - 3) * levels + page_shift >= 60)
|
||||
return -EINVAL;
|
||||
|
||||
/* Allocate TCE table */
|
||||
addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
|
||||
levels, tce_table_size, &offset, &total_allocated);
|
||||
@ -2728,7 +2738,7 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
|
||||
if (pe->flags & PNV_IODA_PE_DEV)
|
||||
iommu_add_device(&pe->pdev->dev);
|
||||
else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
|
||||
pnv_ioda_setup_bus_dma(pe, pe->pbus);
|
||||
pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PCI_MSI
|
||||
|
12
block/bio.c
12
block/bio.c
@ -376,10 +376,14 @@ static void punt_bios_to_rescuer(struct bio_set *bs)
|
||||
bio_list_init(&punt);
|
||||
bio_list_init(&nopunt);
|
||||
|
||||
while ((bio = bio_list_pop(current->bio_list)))
|
||||
while ((bio = bio_list_pop(¤t->bio_list[0])))
|
||||
bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
|
||||
current->bio_list[0] = nopunt;
|
||||
|
||||
*current->bio_list = nopunt;
|
||||
bio_list_init(&nopunt);
|
||||
while ((bio = bio_list_pop(¤t->bio_list[1])))
|
||||
bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
|
||||
current->bio_list[1] = nopunt;
|
||||
|
||||
spin_lock(&bs->rescue_lock);
|
||||
bio_list_merge(&bs->rescue_list, &punt);
|
||||
@ -466,7 +470,9 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
|
||||
* we retry with the original gfp_flags.
|
||||
*/
|
||||
|
||||
if (current->bio_list && !bio_list_empty(current->bio_list))
|
||||
if (current->bio_list &&
|
||||
(!bio_list_empty(¤t->bio_list[0]) ||
|
||||
!bio_list_empty(¤t->bio_list[1])))
|
||||
gfp_mask &= ~__GFP_DIRECT_RECLAIM;
|
||||
|
||||
p = mempool_alloc(bs->bio_pool, gfp_mask);
|
||||
|
@ -1973,7 +1973,14 @@ end_io:
|
||||
*/
|
||||
blk_qc_t generic_make_request(struct bio *bio)
|
||||
{
|
||||
struct bio_list bio_list_on_stack;
|
||||
/*
|
||||
* bio_list_on_stack[0] contains bios submitted by the current
|
||||
* make_request_fn.
|
||||
* bio_list_on_stack[1] contains bios that were submitted before
|
||||
* the current make_request_fn, but that haven't been processed
|
||||
* yet.
|
||||
*/
|
||||
struct bio_list bio_list_on_stack[2];
|
||||
blk_qc_t ret = BLK_QC_T_NONE;
|
||||
|
||||
if (!generic_make_request_checks(bio))
|
||||
@ -1990,7 +1997,7 @@ blk_qc_t generic_make_request(struct bio *bio)
|
||||
* should be added at the tail
|
||||
*/
|
||||
if (current->bio_list) {
|
||||
bio_list_add(current->bio_list, bio);
|
||||
bio_list_add(¤t->bio_list[0], bio);
|
||||
goto out;
|
||||
}
|
||||
|
||||
@ -2009,18 +2016,17 @@ blk_qc_t generic_make_request(struct bio *bio)
|
||||
* bio_list, and call into ->make_request() again.
|
||||
*/
|
||||
BUG_ON(bio->bi_next);
|
||||
bio_list_init(&bio_list_on_stack);
|
||||
current->bio_list = &bio_list_on_stack;
|
||||
bio_list_init(&bio_list_on_stack[0]);
|
||||
current->bio_list = bio_list_on_stack;
|
||||
do {
|
||||
struct request_queue *q = bdev_get_queue(bio->bi_bdev);
|
||||
|
||||
if (likely(blk_queue_enter(q, false) == 0)) {
|
||||
struct bio_list hold;
|
||||
struct bio_list lower, same;
|
||||
|
||||
/* Create a fresh bio_list for all subordinate requests */
|
||||
hold = bio_list_on_stack;
|
||||
bio_list_init(&bio_list_on_stack);
|
||||
bio_list_on_stack[1] = bio_list_on_stack[0];
|
||||
bio_list_init(&bio_list_on_stack[0]);
|
||||
ret = q->make_request_fn(q, bio);
|
||||
|
||||
blk_queue_exit(q);
|
||||
@ -2030,19 +2036,19 @@ blk_qc_t generic_make_request(struct bio *bio)
|
||||
*/
|
||||
bio_list_init(&lower);
|
||||
bio_list_init(&same);
|
||||
while ((bio = bio_list_pop(&bio_list_on_stack)) != NULL)
|
||||
while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
|
||||
if (q == bdev_get_queue(bio->bi_bdev))
|
||||
bio_list_add(&same, bio);
|
||||
else
|
||||
bio_list_add(&lower, bio);
|
||||
/* now assemble so we handle the lowest level first */
|
||||
bio_list_merge(&bio_list_on_stack, &lower);
|
||||
bio_list_merge(&bio_list_on_stack, &same);
|
||||
bio_list_merge(&bio_list_on_stack, &hold);
|
||||
bio_list_merge(&bio_list_on_stack[0], &lower);
|
||||
bio_list_merge(&bio_list_on_stack[0], &same);
|
||||
bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
|
||||
} else {
|
||||
bio_io_error(bio);
|
||||
}
|
||||
bio = bio_list_pop(current->bio_list);
|
||||
bio = bio_list_pop(&bio_list_on_stack[0]);
|
||||
} while (bio);
|
||||
current->bio_list = NULL; /* deactivate */
|
||||
|
||||
|
@ -295,6 +295,9 @@ int blk_mq_reinit_tagset(struct blk_mq_tag_set *set)
|
||||
for (i = 0; i < set->nr_hw_queues; i++) {
|
||||
struct blk_mq_tags *tags = set->tags[i];
|
||||
|
||||
if (!tags)
|
||||
continue;
|
||||
|
||||
for (j = 0; j < tags->nr_tags; j++) {
|
||||
if (!tags->static_rqs[j])
|
||||
continue;
|
||||
|
@ -1434,7 +1434,8 @@ static blk_qc_t request_to_qc_t(struct blk_mq_hw_ctx *hctx, struct request *rq)
|
||||
return blk_tag_to_qc_t(rq->internal_tag, hctx->queue_num, true);
|
||||
}
|
||||
|
||||
static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
|
||||
static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie,
|
||||
bool may_sleep)
|
||||
{
|
||||
struct request_queue *q = rq->q;
|
||||
struct blk_mq_queue_data bd = {
|
||||
@ -1475,7 +1476,7 @@ static void blk_mq_try_issue_directly(struct request *rq, blk_qc_t *cookie)
|
||||
}
|
||||
|
||||
insert:
|
||||
blk_mq_sched_insert_request(rq, false, true, true, false);
|
||||
blk_mq_sched_insert_request(rq, false, true, false, may_sleep);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -1569,11 +1570,11 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
|
||||
|
||||
if (!(data.hctx->flags & BLK_MQ_F_BLOCKING)) {
|
||||
rcu_read_lock();
|
||||
blk_mq_try_issue_directly(old_rq, &cookie);
|
||||
blk_mq_try_issue_directly(old_rq, &cookie, false);
|
||||
rcu_read_unlock();
|
||||
} else {
|
||||
srcu_idx = srcu_read_lock(&data.hctx->queue_rq_srcu);
|
||||
blk_mq_try_issue_directly(old_rq, &cookie);
|
||||
blk_mq_try_issue_directly(old_rq, &cookie, true);
|
||||
srcu_read_unlock(&data.hctx->queue_rq_srcu, srcu_idx);
|
||||
}
|
||||
goto done;
|
||||
|
@ -266,7 +266,7 @@ unlock:
|
||||
return err;
|
||||
}
|
||||
|
||||
int af_alg_accept(struct sock *sk, struct socket *newsock)
|
||||
int af_alg_accept(struct sock *sk, struct socket *newsock, bool kern)
|
||||
{
|
||||
struct alg_sock *ask = alg_sk(sk);
|
||||
const struct af_alg_type *type;
|
||||
@ -281,7 +281,7 @@ int af_alg_accept(struct sock *sk, struct socket *newsock)
|
||||
if (!type)
|
||||
goto unlock;
|
||||
|
||||
sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, 0);
|
||||
sk2 = sk_alloc(sock_net(sk), PF_ALG, GFP_KERNEL, &alg_proto, kern);
|
||||
err = -ENOMEM;
|
||||
if (!sk2)
|
||||
goto unlock;
|
||||
@ -323,9 +323,10 @@ unlock:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(af_alg_accept);
|
||||
|
||||
static int alg_accept(struct socket *sock, struct socket *newsock, int flags)
|
||||
static int alg_accept(struct socket *sock, struct socket *newsock, int flags,
|
||||
bool kern)
|
||||
{
|
||||
return af_alg_accept(sock->sk, newsock);
|
||||
return af_alg_accept(sock->sk, newsock, kern);
|
||||
}
|
||||
|
||||
static const struct proto_ops alg_proto_ops = {
|
||||
|
@ -239,7 +239,8 @@ unlock:
|
||||
return err ?: len;
|
||||
}
|
||||
|
||||
static int hash_accept(struct socket *sock, struct socket *newsock, int flags)
|
||||
static int hash_accept(struct socket *sock, struct socket *newsock, int flags,
|
||||
bool kern)
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
struct alg_sock *ask = alg_sk(sk);
|
||||
@ -260,7 +261,7 @@ static int hash_accept(struct socket *sock, struct socket *newsock, int flags)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = af_alg_accept(ask->parent, newsock);
|
||||
err = af_alg_accept(ask->parent, newsock, kern);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -378,7 +379,7 @@ static int hash_recvmsg_nokey(struct socket *sock, struct msghdr *msg,
|
||||
}
|
||||
|
||||
static int hash_accept_nokey(struct socket *sock, struct socket *newsock,
|
||||
int flags)
|
||||
int flags, bool kern)
|
||||
{
|
||||
int err;
|
||||
|
||||
@ -386,7 +387,7 @@ static int hash_accept_nokey(struct socket *sock, struct socket *newsock,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
return hash_accept(sock, newsock, flags);
|
||||
return hash_accept(sock, newsock, flags, kern);
|
||||
}
|
||||
|
||||
static struct proto_ops algif_hash_ops_nokey = {
|
||||
|
@ -177,7 +177,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
|
||||
case AHCI_LS1043A:
|
||||
if (!qpriv->ecc_addr)
|
||||
return -EINVAL;
|
||||
writel(ECC_DIS_ARMV8_CH2, qpriv->ecc_addr);
|
||||
writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
|
||||
qpriv->ecc_addr);
|
||||
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
|
||||
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
|
||||
if (qpriv->is_dmacoherent)
|
||||
@ -194,7 +195,8 @@ static int ahci_qoriq_phy_init(struct ahci_host_priv *hpriv)
|
||||
case AHCI_LS1046A:
|
||||
if (!qpriv->ecc_addr)
|
||||
return -EINVAL;
|
||||
writel(ECC_DIS_ARMV8_CH2, qpriv->ecc_addr);
|
||||
writel(readl(qpriv->ecc_addr) | ECC_DIS_ARMV8_CH2,
|
||||
qpriv->ecc_addr);
|
||||
writel(AHCI_PORT_PHY_1_CFG, reg_base + PORT_PHY1);
|
||||
writel(AHCI_PORT_TRANS_CFG, reg_base + PORT_TRANS);
|
||||
if (qpriv->is_dmacoherent)
|
||||
|
@ -1482,7 +1482,6 @@ unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc)
|
||||
break;
|
||||
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
return AC_ERR_SYSTEM;
|
||||
}
|
||||
|
||||
|
@ -224,7 +224,6 @@ static DECLARE_TRANSPORT_CLASS(ata_port_class,
|
||||
|
||||
static void ata_tport_release(struct device *dev)
|
||||
{
|
||||
put_device(dev->parent);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -284,7 +283,7 @@ int ata_tport_add(struct device *parent,
|
||||
device_initialize(dev);
|
||||
dev->type = &ata_port_type;
|
||||
|
||||
dev->parent = get_device(parent);
|
||||
dev->parent = parent;
|
||||
dev->release = ata_tport_release;
|
||||
dev_set_name(dev, "ata%d", ap->print_id);
|
||||
transport_setup_device(dev);
|
||||
@ -348,7 +347,6 @@ static DECLARE_TRANSPORT_CLASS(ata_link_class,
|
||||
|
||||
static void ata_tlink_release(struct device *dev)
|
||||
{
|
||||
put_device(dev->parent);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -410,7 +408,7 @@ int ata_tlink_add(struct ata_link *link)
|
||||
int error;
|
||||
|
||||
device_initialize(dev);
|
||||
dev->parent = get_device(&ap->tdev);
|
||||
dev->parent = &ap->tdev;
|
||||
dev->release = ata_tlink_release;
|
||||
if (ata_is_host_link(link))
|
||||
dev_set_name(dev, "link%d", ap->print_id);
|
||||
@ -589,7 +587,6 @@ static DECLARE_TRANSPORT_CLASS(ata_dev_class,
|
||||
|
||||
static void ata_tdev_release(struct device *dev)
|
||||
{
|
||||
put_device(dev->parent);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -662,7 +659,7 @@ static int ata_tdev_add(struct ata_device *ata_dev)
|
||||
int error;
|
||||
|
||||
device_initialize(dev);
|
||||
dev->parent = get_device(&link->tdev);
|
||||
dev->parent = &link->tdev;
|
||||
dev->release = ata_tdev_release;
|
||||
if (ata_is_host_link(link))
|
||||
dev_set_name(dev, "dev%d.%d", ap->print_id,ata_dev->devno);
|
||||
|
@ -397,9 +397,8 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
|
||||
irq, err);
|
||||
return err;
|
||||
}
|
||||
omap_rng_write(priv, RNG_INTMASK_REG, RNG_SHUTDOWN_OFLO_MASK);
|
||||
|
||||
priv->clk = of_clk_get(pdev->dev.of_node, 0);
|
||||
priv->clk = devm_clk_get(&pdev->dev, NULL);
|
||||
if (IS_ERR(priv->clk) && PTR_ERR(priv->clk) == -EPROBE_DEFER)
|
||||
return -EPROBE_DEFER;
|
||||
if (!IS_ERR(priv->clk)) {
|
||||
@ -408,6 +407,19 @@ static int of_get_omap_rng_device_details(struct omap_rng_dev *priv,
|
||||
dev_err(&pdev->dev, "unable to enable the clk, "
|
||||
"err = %d\n", err);
|
||||
}
|
||||
|
||||
/*
|
||||
* On OMAP4, enabling the shutdown_oflo interrupt is
|
||||
* done in the interrupt mask register. There is no
|
||||
* such register on EIP76, and it's enabled by the
|
||||
* same bit in the control register
|
||||
*/
|
||||
if (priv->pdata->regs[RNG_INTMASK_REG])
|
||||
omap_rng_write(priv, RNG_INTMASK_REG,
|
||||
RNG_SHUTDOWN_OFLO_MASK);
|
||||
else
|
||||
omap_rng_write(priv, RNG_CONTROL_REG,
|
||||
RNG_SHUTDOWN_OFLO_MASK);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -270,7 +270,7 @@ static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg,
|
||||
scatterwalk_done(&walk, out, 0);
|
||||
}
|
||||
|
||||
static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
|
||||
static void s5p_sg_done(struct s5p_aes_dev *dev)
|
||||
{
|
||||
if (dev->sg_dst_cpy) {
|
||||
dev_dbg(dev->dev,
|
||||
@ -281,8 +281,11 @@ static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
|
||||
}
|
||||
s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
|
||||
s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
|
||||
}
|
||||
|
||||
/* holding a lock outside */
|
||||
/* Calls the completion. Cannot be called with dev->lock hold. */
|
||||
static void s5p_aes_complete(struct s5p_aes_dev *dev, int err)
|
||||
{
|
||||
dev->req->base.complete(&dev->req->base, err);
|
||||
dev->busy = false;
|
||||
}
|
||||
@ -368,51 +371,44 @@ exit:
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if new transmitting (output) data is ready and its
|
||||
* address+length have to be written to device (by calling
|
||||
* s5p_set_dma_outdata()). False otherwise.
|
||||
* Returns -ERRNO on error (mapping of new data failed).
|
||||
* On success returns:
|
||||
* - 0 if there is no more data,
|
||||
* - 1 if new transmitting (output) data is ready and its address+length
|
||||
* have to be written to device (by calling s5p_set_dma_outdata()).
|
||||
*/
|
||||
static bool s5p_aes_tx(struct s5p_aes_dev *dev)
|
||||
static int s5p_aes_tx(struct s5p_aes_dev *dev)
|
||||
{
|
||||
int err = 0;
|
||||
bool ret = false;
|
||||
int ret = 0;
|
||||
|
||||
s5p_unset_outdata(dev);
|
||||
|
||||
if (!sg_is_last(dev->sg_dst)) {
|
||||
err = s5p_set_outdata(dev, sg_next(dev->sg_dst));
|
||||
if (err)
|
||||
s5p_aes_complete(dev, err);
|
||||
else
|
||||
ret = true;
|
||||
} else {
|
||||
s5p_aes_complete(dev, err);
|
||||
|
||||
dev->busy = true;
|
||||
tasklet_schedule(&dev->tasklet);
|
||||
ret = s5p_set_outdata(dev, sg_next(dev->sg_dst));
|
||||
if (!ret)
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Returns true if new receiving (input) data is ready and its
|
||||
* address+length have to be written to device (by calling
|
||||
* s5p_set_dma_indata()). False otherwise.
|
||||
* Returns -ERRNO on error (mapping of new data failed).
|
||||
* On success returns:
|
||||
* - 0 if there is no more data,
|
||||
* - 1 if new receiving (input) data is ready and its address+length
|
||||
* have to be written to device (by calling s5p_set_dma_indata()).
|
||||
*/
|
||||
static bool s5p_aes_rx(struct s5p_aes_dev *dev)
|
||||
static int s5p_aes_rx(struct s5p_aes_dev *dev/*, bool *set_dma*/)
|
||||
{
|
||||
int err;
|
||||
bool ret = false;
|
||||
int ret = 0;
|
||||
|
||||
s5p_unset_indata(dev);
|
||||
|
||||
if (!sg_is_last(dev->sg_src)) {
|
||||
err = s5p_set_indata(dev, sg_next(dev->sg_src));
|
||||
if (err)
|
||||
s5p_aes_complete(dev, err);
|
||||
else
|
||||
ret = true;
|
||||
ret = s5p_set_indata(dev, sg_next(dev->sg_src));
|
||||
if (!ret)
|
||||
ret = 1;
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -422,33 +418,73 @@ static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
|
||||
{
|
||||
struct platform_device *pdev = dev_id;
|
||||
struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
|
||||
bool set_dma_tx = false;
|
||||
bool set_dma_rx = false;
|
||||
int err_dma_tx = 0;
|
||||
int err_dma_rx = 0;
|
||||
bool tx_end = false;
|
||||
unsigned long flags;
|
||||
uint32_t status;
|
||||
int err;
|
||||
|
||||
spin_lock_irqsave(&dev->lock, flags);
|
||||
|
||||
/*
|
||||
* Handle rx or tx interrupt. If there is still data (scatterlist did not
|
||||
* reach end), then map next scatterlist entry.
|
||||
* In case of such mapping error, s5p_aes_complete() should be called.
|
||||
*
|
||||
* If there is no more data in tx scatter list, call s5p_aes_complete()
|
||||
* and schedule new tasklet.
|
||||
*/
|
||||
status = SSS_READ(dev, FCINTSTAT);
|
||||
if (status & SSS_FCINTSTAT_BRDMAINT)
|
||||
set_dma_rx = s5p_aes_rx(dev);
|
||||
if (status & SSS_FCINTSTAT_BTDMAINT)
|
||||
set_dma_tx = s5p_aes_tx(dev);
|
||||
err_dma_rx = s5p_aes_rx(dev);
|
||||
|
||||
if (status & SSS_FCINTSTAT_BTDMAINT) {
|
||||
if (sg_is_last(dev->sg_dst))
|
||||
tx_end = true;
|
||||
err_dma_tx = s5p_aes_tx(dev);
|
||||
}
|
||||
|
||||
SSS_WRITE(dev, FCINTPEND, status);
|
||||
|
||||
/*
|
||||
* Writing length of DMA block (either receiving or transmitting)
|
||||
* will start the operation immediately, so this should be done
|
||||
* at the end (even after clearing pending interrupts to not miss the
|
||||
* interrupt).
|
||||
*/
|
||||
if (set_dma_tx)
|
||||
s5p_set_dma_outdata(dev, dev->sg_dst);
|
||||
if (set_dma_rx)
|
||||
s5p_set_dma_indata(dev, dev->sg_src);
|
||||
if (err_dma_rx < 0) {
|
||||
err = err_dma_rx;
|
||||
goto error;
|
||||
}
|
||||
if (err_dma_tx < 0) {
|
||||
err = err_dma_tx;
|
||||
goto error;
|
||||
}
|
||||
|
||||
if (tx_end) {
|
||||
s5p_sg_done(dev);
|
||||
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
|
||||
s5p_aes_complete(dev, 0);
|
||||
dev->busy = true;
|
||||
tasklet_schedule(&dev->tasklet);
|
||||
} else {
|
||||
/*
|
||||
* Writing length of DMA block (either receiving or
|
||||
* transmitting) will start the operation immediately, so this
|
||||
* should be done at the end (even after clearing pending
|
||||
* interrupts to not miss the interrupt).
|
||||
*/
|
||||
if (err_dma_tx == 1)
|
||||
s5p_set_dma_outdata(dev, dev->sg_dst);
|
||||
if (err_dma_rx == 1)
|
||||
s5p_set_dma_indata(dev, dev->sg_src);
|
||||
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
|
||||
error:
|
||||
s5p_sg_done(dev);
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
s5p_aes_complete(dev, err);
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
@ -597,8 +633,9 @@ outdata_error:
|
||||
s5p_unset_indata(dev);
|
||||
|
||||
indata_error:
|
||||
s5p_aes_complete(dev, err);
|
||||
s5p_sg_done(dev);
|
||||
spin_unlock_irqrestore(&dev->lock, flags);
|
||||
s5p_aes_complete(dev, err);
|
||||
}
|
||||
|
||||
static void s5p_tasklet_cb(unsigned long data)
|
||||
@ -805,8 +842,9 @@ static int s5p_aes_probe(struct platform_device *pdev)
|
||||
dev_warn(dev, "feed control interrupt is not available.\n");
|
||||
goto err_irq;
|
||||
}
|
||||
err = devm_request_irq(dev, pdata->irq_fc, s5p_aes_interrupt,
|
||||
IRQF_SHARED, pdev->name, pdev);
|
||||
err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL,
|
||||
s5p_aes_interrupt, IRQF_ONESHOT,
|
||||
pdev->name, pdev);
|
||||
if (err < 0) {
|
||||
dev_warn(dev, "feed control interrupt is not available.\n");
|
||||
goto err_irq;
|
||||
|
@ -2317,6 +2317,9 @@ static int gigaset_probe(struct usb_interface *interface,
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
if (hostif->desc.bNumEndpoints < 1)
|
||||
return -ENODEV;
|
||||
|
||||
dev_info(&udev->dev,
|
||||
"%s: Device matched (Vendor: 0x%x, Product: 0x%x)\n",
|
||||
__func__, le16_to_cpu(udev->descriptor.idVendor),
|
||||
|
@ -392,6 +392,7 @@ static struct macio_dev * macio_add_one_device(struct macio_chip *chip,
|
||||
* To get all the fields, copy all archdata
|
||||
*/
|
||||
dev->ofdev.dev.archdata = chip->lbus.pdev->dev.archdata;
|
||||
dev->ofdev.dev.dma_ops = chip->lbus.pdev->dev.dma_ops;
|
||||
#endif /* CONFIG_PCI */
|
||||
|
||||
#ifdef DEBUG
|
||||
|
@ -989,26 +989,29 @@ static void flush_current_bio_list(struct blk_plug_cb *cb, bool from_schedule)
|
||||
struct dm_offload *o = container_of(cb, struct dm_offload, cb);
|
||||
struct bio_list list;
|
||||
struct bio *bio;
|
||||
int i;
|
||||
|
||||
INIT_LIST_HEAD(&o->cb.list);
|
||||
|
||||
if (unlikely(!current->bio_list))
|
||||
return;
|
||||
|
||||
list = *current->bio_list;
|
||||
bio_list_init(current->bio_list);
|
||||
for (i = 0; i < 2; i++) {
|
||||
list = current->bio_list[i];
|
||||
bio_list_init(¤t->bio_list[i]);
|
||||
|
||||
while ((bio = bio_list_pop(&list))) {
|
||||
struct bio_set *bs = bio->bi_pool;
|
||||
if (unlikely(!bs) || bs == fs_bio_set) {
|
||||
bio_list_add(current->bio_list, bio);
|
||||
continue;
|
||||
while ((bio = bio_list_pop(&list))) {
|
||||
struct bio_set *bs = bio->bi_pool;
|
||||
if (unlikely(!bs) || bs == fs_bio_set) {
|
||||
bio_list_add(¤t->bio_list[i], bio);
|
||||
continue;
|
||||
}
|
||||
|
||||
spin_lock(&bs->rescue_lock);
|
||||
bio_list_add(&bs->rescue_list, bio);
|
||||
queue_work(bs->rescue_workqueue, &bs->rescue_work);
|
||||
spin_unlock(&bs->rescue_lock);
|
||||
}
|
||||
|
||||
spin_lock(&bs->rescue_lock);
|
||||
bio_list_add(&bs->rescue_list, bio);
|
||||
queue_work(bs->rescue_workqueue, &bs->rescue_work);
|
||||
spin_unlock(&bs->rescue_lock);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -777,7 +777,6 @@ static int gather_all_resync_info(struct mddev *mddev, int total_slots)
|
||||
bm_lockres->flags |= DLM_LKF_NOQUEUE;
|
||||
ret = dlm_lock_sync(bm_lockres, DLM_LOCK_PW);
|
||||
if (ret == -EAGAIN) {
|
||||
memset(bm_lockres->lksb.sb_lvbptr, '\0', LVB_SIZE);
|
||||
s = read_resync_info(mddev, bm_lockres);
|
||||
if (s) {
|
||||
pr_info("%s:%d Resync[%llu..%llu] in progress on %d\n",
|
||||
@ -974,6 +973,7 @@ static int leave(struct mddev *mddev)
|
||||
lockres_free(cinfo->bitmap_lockres);
|
||||
unlock_all_bitmaps(mddev);
|
||||
dlm_release_lockspace(cinfo->lockspace, 2);
|
||||
kfree(cinfo);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -440,14 +440,6 @@ void md_flush_request(struct mddev *mddev, struct bio *bio)
|
||||
}
|
||||
EXPORT_SYMBOL(md_flush_request);
|
||||
|
||||
void md_unplug(struct blk_plug_cb *cb, bool from_schedule)
|
||||
{
|
||||
struct mddev *mddev = cb->data;
|
||||
md_wakeup_thread(mddev->thread);
|
||||
kfree(cb);
|
||||
}
|
||||
EXPORT_SYMBOL(md_unplug);
|
||||
|
||||
static inline struct mddev *mddev_get(struct mddev *mddev)
|
||||
{
|
||||
atomic_inc(&mddev->active);
|
||||
@ -1887,7 +1879,7 @@ super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors)
|
||||
}
|
||||
sb = page_address(rdev->sb_page);
|
||||
sb->data_size = cpu_to_le64(num_sectors);
|
||||
sb->super_offset = rdev->sb_start;
|
||||
sb->super_offset = cpu_to_le64(rdev->sb_start);
|
||||
sb->sb_csum = calc_sb_1_csum(sb);
|
||||
do {
|
||||
md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size,
|
||||
@ -2295,7 +2287,7 @@ static bool does_sb_need_changing(struct mddev *mddev)
|
||||
/* Check if any mddev parameters have changed */
|
||||
if ((mddev->dev_sectors != le64_to_cpu(sb->size)) ||
|
||||
(mddev->reshape_position != le64_to_cpu(sb->reshape_position)) ||
|
||||
(mddev->layout != le64_to_cpu(sb->layout)) ||
|
||||
(mddev->layout != le32_to_cpu(sb->layout)) ||
|
||||
(mddev->raid_disks != le32_to_cpu(sb->raid_disks)) ||
|
||||
(mddev->chunk_sectors != le32_to_cpu(sb->chunksize)))
|
||||
return true;
|
||||
@ -6458,11 +6450,10 @@ static int set_array_info(struct mddev *mddev, mdu_array_info_t *info)
|
||||
mddev->layout = info->layout;
|
||||
mddev->chunk_sectors = info->chunk_size >> 9;
|
||||
|
||||
mddev->max_disks = MD_SB_DISKS;
|
||||
|
||||
if (mddev->persistent) {
|
||||
mddev->flags = 0;
|
||||
mddev->sb_flags = 0;
|
||||
mddev->max_disks = MD_SB_DISKS;
|
||||
mddev->flags = 0;
|
||||
mddev->sb_flags = 0;
|
||||
}
|
||||
set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
|
||||
|
||||
@ -6533,8 +6524,12 @@ static int update_size(struct mddev *mddev, sector_t num_sectors)
|
||||
return -ENOSPC;
|
||||
}
|
||||
rv = mddev->pers->resize(mddev, num_sectors);
|
||||
if (!rv)
|
||||
revalidate_disk(mddev->gendisk);
|
||||
if (!rv) {
|
||||
if (mddev->queue) {
|
||||
set_capacity(mddev->gendisk, mddev->array_sectors);
|
||||
revalidate_disk(mddev->gendisk);
|
||||
}
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
|
@ -676,16 +676,10 @@ extern void mddev_resume(struct mddev *mddev);
|
||||
extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
|
||||
struct mddev *mddev);
|
||||
|
||||
extern void md_unplug(struct blk_plug_cb *cb, bool from_schedule);
|
||||
extern void md_reload_sb(struct mddev *mddev, int raid_disk);
|
||||
extern void md_update_sb(struct mddev *mddev, int force);
|
||||
extern void md_kick_rdev_from_array(struct md_rdev * rdev);
|
||||
struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr);
|
||||
static inline int mddev_check_plugged(struct mddev *mddev)
|
||||
{
|
||||
return !!blk_check_plugged(md_unplug, mddev,
|
||||
sizeof(struct blk_plug_cb));
|
||||
}
|
||||
|
||||
static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
|
||||
{
|
||||
|
@ -1027,7 +1027,7 @@ static int get_unqueued_pending(struct r1conf *conf)
|
||||
static void freeze_array(struct r1conf *conf, int extra)
|
||||
{
|
||||
/* Stop sync I/O and normal I/O and wait for everything to
|
||||
* go quite.
|
||||
* go quiet.
|
||||
* This is called in two situations:
|
||||
* 1) management command handlers (reshape, remove disk, quiesce).
|
||||
* 2) one normal I/O request failed.
|
||||
@ -1587,9 +1587,30 @@ static void raid1_make_request(struct mddev *mddev, struct bio *bio)
|
||||
split = bio;
|
||||
}
|
||||
|
||||
if (bio_data_dir(split) == READ)
|
||||
if (bio_data_dir(split) == READ) {
|
||||
raid1_read_request(mddev, split);
|
||||
else
|
||||
|
||||
/*
|
||||
* If a bio is splitted, the first part of bio will
|
||||
* pass barrier but the bio is queued in
|
||||
* current->bio_list (see generic_make_request). If
|
||||
* there is a raise_barrier() called here, the second
|
||||
* part of bio can't pass barrier. But since the first
|
||||
* part bio isn't dispatched to underlaying disks yet,
|
||||
* the barrier is never released, hence raise_barrier
|
||||
* will alays wait. We have a deadlock.
|
||||
* Note, this only happens in read path. For write
|
||||
* path, the first part of bio is dispatched in a
|
||||
* schedule() call (because of blk plug) or offloaded
|
||||
* to raid10d.
|
||||
* Quitting from the function immediately can change
|
||||
* the bio order queued in bio_list and avoid the deadlock.
|
||||
*/
|
||||
if (split != bio) {
|
||||
generic_make_request(bio);
|
||||
break;
|
||||
}
|
||||
} else
|
||||
raid1_write_request(mddev, split);
|
||||
} while (split != bio);
|
||||
}
|
||||
@ -3246,8 +3267,6 @@ static int raid1_resize(struct mddev *mddev, sector_t sectors)
|
||||
return ret;
|
||||
}
|
||||
md_set_array_sectors(mddev, newsize);
|
||||
set_capacity(mddev->gendisk, mddev->array_sectors);
|
||||
revalidate_disk(mddev->gendisk);
|
||||
if (sectors > mddev->dev_sectors &&
|
||||
mddev->recovery_cp > mddev->dev_sectors) {
|
||||
mddev->recovery_cp = mddev->dev_sectors;
|
||||
|
@ -974,7 +974,8 @@ static void wait_barrier(struct r10conf *conf)
|
||||
!conf->barrier ||
|
||||
(atomic_read(&conf->nr_pending) &&
|
||||
current->bio_list &&
|
||||
!bio_list_empty(current->bio_list)),
|
||||
(!bio_list_empty(¤t->bio_list[0]) ||
|
||||
!bio_list_empty(¤t->bio_list[1]))),
|
||||
conf->resync_lock);
|
||||
conf->nr_waiting--;
|
||||
if (!conf->nr_waiting)
|
||||
@ -1477,11 +1478,24 @@ retry_write:
|
||||
mbio->bi_bdev = (void*)rdev;
|
||||
|
||||
atomic_inc(&r10_bio->remaining);
|
||||
|
||||
cb = blk_check_plugged(raid10_unplug, mddev,
|
||||
sizeof(*plug));
|
||||
if (cb)
|
||||
plug = container_of(cb, struct raid10_plug_cb,
|
||||
cb);
|
||||
else
|
||||
plug = NULL;
|
||||
spin_lock_irqsave(&conf->device_lock, flags);
|
||||
bio_list_add(&conf->pending_bio_list, mbio);
|
||||
conf->pending_count++;
|
||||
if (plug) {
|
||||
bio_list_add(&plug->pending, mbio);
|
||||
plug->pending_cnt++;
|
||||
} else {
|
||||
bio_list_add(&conf->pending_bio_list, mbio);
|
||||
conf->pending_count++;
|
||||
}
|
||||
spin_unlock_irqrestore(&conf->device_lock, flags);
|
||||
if (!mddev_check_plugged(mddev))
|
||||
if (!plug)
|
||||
md_wakeup_thread(mddev->thread);
|
||||
}
|
||||
}
|
||||
@ -1571,7 +1585,25 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio)
|
||||
split = bio;
|
||||
}
|
||||
|
||||
/*
|
||||
* If a bio is splitted, the first part of bio will pass
|
||||
* barrier but the bio is queued in current->bio_list (see
|
||||
* generic_make_request). If there is a raise_barrier() called
|
||||
* here, the second part of bio can't pass barrier. But since
|
||||
* the first part bio isn't dispatched to underlaying disks
|
||||
* yet, the barrier is never released, hence raise_barrier will
|
||||
* alays wait. We have a deadlock.
|
||||
* Note, this only happens in read path. For write path, the
|
||||
* first part of bio is dispatched in a schedule() call
|
||||
* (because of blk plug) or offloaded to raid10d.
|
||||
* Quitting from the function immediately can change the bio
|
||||
* order queued in bio_list and avoid the deadlock.
|
||||
*/
|
||||
__make_request(mddev, split);
|
||||
if (split != bio && bio_data_dir(bio) == READ) {
|
||||
generic_make_request(bio);
|
||||
break;
|
||||
}
|
||||
} while (split != bio);
|
||||
|
||||
/* In case raid10d snuck in to freeze_array */
|
||||
@ -3943,10 +3975,6 @@ static int raid10_resize(struct mddev *mddev, sector_t sectors)
|
||||
return ret;
|
||||
}
|
||||
md_set_array_sectors(mddev, size);
|
||||
if (mddev->queue) {
|
||||
set_capacity(mddev->gendisk, mddev->array_sectors);
|
||||
revalidate_disk(mddev->gendisk);
|
||||
}
|
||||
if (sectors > mddev->dev_sectors &&
|
||||
mddev->recovery_cp > oldsize) {
|
||||
mddev->recovery_cp = oldsize;
|
||||
|
@ -1401,7 +1401,8 @@ static int set_syndrome_sources(struct page **srcs,
|
||||
(test_bit(R5_Wantdrain, &dev->flags) ||
|
||||
test_bit(R5_InJournal, &dev->flags))) ||
|
||||
(srctype == SYNDROME_SRC_WRITTEN &&
|
||||
dev->written)) {
|
||||
(dev->written ||
|
||||
test_bit(R5_InJournal, &dev->flags)))) {
|
||||
if (test_bit(R5_InJournal, &dev->flags))
|
||||
srcs[slot] = sh->dev[i].orig_page;
|
||||
else
|
||||
@ -7605,8 +7606,6 @@ static int raid5_resize(struct mddev *mddev, sector_t sectors)
|
||||
return ret;
|
||||
}
|
||||
md_set_array_sectors(mddev, newsize);
|
||||
set_capacity(mddev->gendisk, mddev->array_sectors);
|
||||
revalidate_disk(mddev->gendisk);
|
||||
if (sectors > mddev->dev_sectors &&
|
||||
mddev->recovery_cp > mddev->dev_sectors) {
|
||||
mddev->recovery_cp = mddev->dev_sectors;
|
||||
|
@ -2272,10 +2272,7 @@ static int xgbe_one_poll(struct napi_struct *napi, int budget)
|
||||
processed = xgbe_rx_poll(channel, budget);
|
||||
|
||||
/* If we processed everything, we are done */
|
||||
if (processed < budget) {
|
||||
/* Turn off polling */
|
||||
napi_complete_done(napi, processed);
|
||||
|
||||
if ((processed < budget) && napi_complete_done(napi, processed)) {
|
||||
/* Enable Tx and Rx interrupts */
|
||||
if (pdata->channel_irq_mode)
|
||||
xgbe_enable_rx_tx_int(pdata, channel);
|
||||
@ -2317,10 +2314,7 @@ static int xgbe_all_poll(struct napi_struct *napi, int budget)
|
||||
} while ((processed < budget) && (processed != last_processed));
|
||||
|
||||
/* If we processed everything, we are done */
|
||||
if (processed < budget) {
|
||||
/* Turn off polling */
|
||||
napi_complete_done(napi, processed);
|
||||
|
||||
if ((processed < budget) && napi_complete_done(napi, processed)) {
|
||||
/* Enable Tx and Rx interrupts */
|
||||
xgbe_enable_rx_tx_ints(pdata);
|
||||
}
|
||||
|
@ -213,9 +213,9 @@ void aq_pci_func_free_irqs(struct aq_pci_func_s *self)
|
||||
if (!((1U << i) & self->msix_entry_mask))
|
||||
continue;
|
||||
|
||||
free_irq(pci_irq_vector(pdev, i), self->aq_vec[i]);
|
||||
if (pdev->msix_enabled)
|
||||
irq_set_affinity_hint(pci_irq_vector(pdev, i), NULL);
|
||||
free_irq(pci_irq_vector(pdev, i), self->aq_vec[i]);
|
||||
self->msix_entry_mask &= ~(1U << i);
|
||||
}
|
||||
}
|
||||
|
@ -13292,17 +13292,15 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
|
||||
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
|
||||
NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
|
||||
|
||||
/* VF with OLD Hypervisor or old PF do not support filtering */
|
||||
if (IS_PF(bp)) {
|
||||
if (chip_is_e1x)
|
||||
bp->accept_any_vlan = true;
|
||||
else
|
||||
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
#ifdef CONFIG_BNX2X_SRIOV
|
||||
} else if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
|
||||
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
#endif
|
||||
}
|
||||
/* For VF we'll know whether to enable VLAN filtering after
|
||||
* getting a response to CHANNEL_TLV_ACQUIRE from PF.
|
||||
*/
|
||||
|
||||
dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
|
||||
dev->features |= NETIF_F_HIGHDMA;
|
||||
@ -13738,7 +13736,7 @@ static int bnx2x_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
|
||||
if (!netif_running(bp->dev)) {
|
||||
DP(BNX2X_MSG_PTP,
|
||||
"PTP adjfreq called while the interface is down\n");
|
||||
return -EFAULT;
|
||||
return -ENETDOWN;
|
||||
}
|
||||
|
||||
if (ppb < 0) {
|
||||
@ -13797,6 +13795,12 @@ static int bnx2x_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
|
||||
{
|
||||
struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
|
||||
|
||||
if (!netif_running(bp->dev)) {
|
||||
DP(BNX2X_MSG_PTP,
|
||||
"PTP adjtime called while the interface is down\n");
|
||||
return -ENETDOWN;
|
||||
}
|
||||
|
||||
DP(BNX2X_MSG_PTP, "PTP adjtime called, delta = %llx\n", delta);
|
||||
|
||||
timecounter_adjtime(&bp->timecounter, delta);
|
||||
@ -13809,6 +13813,12 @@ static int bnx2x_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
|
||||
struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
|
||||
u64 ns;
|
||||
|
||||
if (!netif_running(bp->dev)) {
|
||||
DP(BNX2X_MSG_PTP,
|
||||
"PTP gettime called while the interface is down\n");
|
||||
return -ENETDOWN;
|
||||
}
|
||||
|
||||
ns = timecounter_read(&bp->timecounter);
|
||||
|
||||
DP(BNX2X_MSG_PTP, "PTP gettime called, ns = %llu\n", ns);
|
||||
@ -13824,6 +13834,12 @@ static int bnx2x_ptp_settime(struct ptp_clock_info *ptp,
|
||||
struct bnx2x *bp = container_of(ptp, struct bnx2x, ptp_clock_info);
|
||||
u64 ns;
|
||||
|
||||
if (!netif_running(bp->dev)) {
|
||||
DP(BNX2X_MSG_PTP,
|
||||
"PTP settime called while the interface is down\n");
|
||||
return -ENETDOWN;
|
||||
}
|
||||
|
||||
ns = timespec64_to_ns(ts);
|
||||
|
||||
DP(BNX2X_MSG_PTP, "PTP settime called, ns = %llu\n", ns);
|
||||
@ -13991,6 +14007,14 @@ static int bnx2x_init_one(struct pci_dev *pdev,
|
||||
rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
|
||||
if (rc)
|
||||
goto init_one_freemem;
|
||||
|
||||
#ifdef CONFIG_BNX2X_SRIOV
|
||||
/* VF with OLD Hypervisor or old PF do not support filtering */
|
||||
if (bp->acquire_resp.pfdev_info.pf_cap & PFVF_CAP_VLAN_FILTER) {
|
||||
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
dev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/* Enable SRIOV if capability found in configuration space */
|
||||
|
@ -434,7 +434,9 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
|
||||
|
||||
/* Add/Remove the filter */
|
||||
rc = bnx2x_config_vlan_mac(bp, &ramrod);
|
||||
if (rc && rc != -EEXIST) {
|
||||
if (rc == -EEXIST)
|
||||
return 0;
|
||||
if (rc) {
|
||||
BNX2X_ERR("Failed to %s %s\n",
|
||||
filter->add ? "add" : "delete",
|
||||
(filter->type == BNX2X_VF_FILTER_VLAN_MAC) ?
|
||||
@ -444,6 +446,8 @@ static int bnx2x_vf_mac_vlan_config(struct bnx2x *bp,
|
||||
return rc;
|
||||
}
|
||||
|
||||
filter->applied = true;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -469,8 +473,10 @@ int bnx2x_vf_mac_vlan_config_list(struct bnx2x *bp, struct bnx2x_virtf *vf,
|
||||
/* Rollback if needed */
|
||||
if (i != filters->count) {
|
||||
BNX2X_ERR("Managed only %d/%d filters - rolling back\n",
|
||||
i, filters->count + 1);
|
||||
i, filters->count);
|
||||
while (--i >= 0) {
|
||||
if (!filters->filters[i].applied)
|
||||
continue;
|
||||
filters->filters[i].add = !filters->filters[i].add;
|
||||
bnx2x_vf_mac_vlan_config(bp, vf, qid,
|
||||
&filters->filters[i],
|
||||
@ -1899,7 +1905,8 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
|
||||
continue;
|
||||
}
|
||||
|
||||
DP(BNX2X_MSG_IOV, "add addresses for vf %d\n", vf->abs_vfid);
|
||||
DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
|
||||
"add addresses for vf %d\n", vf->abs_vfid);
|
||||
for_each_vfq(vf, j) {
|
||||
struct bnx2x_vf_queue *rxq = vfq_get(vf, j);
|
||||
|
||||
@ -1920,11 +1927,12 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp)
|
||||
cpu_to_le32(U64_HI(q_stats_addr));
|
||||
cur_query_entry->address.lo =
|
||||
cpu_to_le32(U64_LO(q_stats_addr));
|
||||
DP(BNX2X_MSG_IOV,
|
||||
"added address %x %x for vf %d queue %d client %d\n",
|
||||
cur_query_entry->address.hi,
|
||||
cur_query_entry->address.lo, cur_query_entry->funcID,
|
||||
j, cur_query_entry->index);
|
||||
DP_AND((BNX2X_MSG_IOV | BNX2X_MSG_STATS),
|
||||
"added address %x %x for vf %d queue %d client %d\n",
|
||||
cur_query_entry->address.hi,
|
||||
cur_query_entry->address.lo,
|
||||
cur_query_entry->funcID,
|
||||
j, cur_query_entry->index);
|
||||
cur_query_entry++;
|
||||
cur_data_offset += sizeof(struct per_queue_stats);
|
||||
stats_count++;
|
||||
|
@ -114,6 +114,7 @@ struct bnx2x_vf_mac_vlan_filter {
|
||||
(BNX2X_VF_FILTER_MAC | BNX2X_VF_FILTER_VLAN) /*shortcut*/
|
||||
|
||||
bool add;
|
||||
bool applied;
|
||||
u8 *mac;
|
||||
u16 vid;
|
||||
};
|
||||
|
@ -868,7 +868,7 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
|
||||
struct bnx2x *bp = netdev_priv(dev);
|
||||
struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters;
|
||||
struct pfvf_general_resp_tlv *resp = &bp->vf2pf_mbox->resp.general_resp;
|
||||
int rc, i = 0;
|
||||
int rc = 0, i = 0;
|
||||
struct netdev_hw_addr *ha;
|
||||
|
||||
if (bp->state != BNX2X_STATE_OPEN) {
|
||||
@ -883,6 +883,15 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
|
||||
/* Get Rx mode requested */
|
||||
DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
|
||||
|
||||
/* We support PFVF_MAX_MULTICAST_PER_VF mcast addresses tops */
|
||||
if (netdev_mc_count(dev) > PFVF_MAX_MULTICAST_PER_VF) {
|
||||
DP(NETIF_MSG_IFUP,
|
||||
"VF supports not more than %d multicast MAC addresses\n",
|
||||
PFVF_MAX_MULTICAST_PER_VF);
|
||||
rc = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
netdev_for_each_mc_addr(ha, dev) {
|
||||
DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
|
||||
bnx2x_mc_addr(ha));
|
||||
@ -890,16 +899,6 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
|
||||
i++;
|
||||
}
|
||||
|
||||
/* We support four PFVF_MAX_MULTICAST_PER_VF mcast
|
||||
* addresses tops
|
||||
*/
|
||||
if (i >= PFVF_MAX_MULTICAST_PER_VF) {
|
||||
DP(NETIF_MSG_IFUP,
|
||||
"VF supports not more than %d multicast MAC addresses\n",
|
||||
PFVF_MAX_MULTICAST_PER_VF);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
req->n_multicast = i;
|
||||
req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
|
||||
req->vf_qid = 0;
|
||||
@ -924,7 +923,7 @@ int bnx2x_vfpf_set_mcast(struct net_device *dev)
|
||||
out:
|
||||
bnx2x_vfpf_finalize(bp, &req->first_tlv);
|
||||
|
||||
return 0;
|
||||
return rc;
|
||||
}
|
||||
|
||||
/* request pf to add a vlan for the vf */
|
||||
@ -1778,6 +1777,23 @@ static int bnx2x_vf_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
|
||||
goto op_err;
|
||||
}
|
||||
|
||||
/* build vlan list */
|
||||
fl = NULL;
|
||||
|
||||
rc = bnx2x_vf_mbx_macvlan_list(bp, vf, msg, &fl,
|
||||
VFPF_VLAN_FILTER);
|
||||
if (rc)
|
||||
goto op_err;
|
||||
|
||||
if (fl) {
|
||||
/* set vlan list */
|
||||
rc = bnx2x_vf_mac_vlan_config_list(bp, vf, fl,
|
||||
msg->vf_qid,
|
||||
false);
|
||||
if (rc)
|
||||
goto op_err;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
|
||||
|
@ -4465,6 +4465,10 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
|
||||
vf->vlan = le16_to_cpu(resp->vlan) & VLAN_VID_MASK;
|
||||
}
|
||||
#endif
|
||||
if (BNXT_PF(bp) && (le16_to_cpu(resp->flags) &
|
||||
FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED))
|
||||
bp->flags |= BNXT_FLAG_FW_LLDP_AGENT;
|
||||
|
||||
switch (resp->port_partition_type) {
|
||||
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_0:
|
||||
case FUNC_QCFG_RESP_PORT_PARTITION_TYPE_NPAR1_5:
|
||||
@ -5507,8 +5511,9 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp)
|
||||
bp->lpi_tmr_hi = le32_to_cpu(resp->valid_tx_lpi_timer_high) &
|
||||
PORT_PHY_QCAPS_RESP_TX_LPI_TIMER_HIGH_MASK;
|
||||
}
|
||||
link_info->support_auto_speeds =
|
||||
le16_to_cpu(resp->supported_speeds_auto_mode);
|
||||
if (resp->supported_speeds_auto_mode)
|
||||
link_info->support_auto_speeds =
|
||||
le16_to_cpu(resp->supported_speeds_auto_mode);
|
||||
|
||||
hwrm_phy_qcaps_exit:
|
||||
mutex_unlock(&bp->hwrm_cmd_lock);
|
||||
@ -6495,8 +6500,14 @@ static void bnxt_reset_task(struct bnxt *bp, bool silent)
|
||||
if (!silent)
|
||||
bnxt_dbg_dump_states(bp);
|
||||
if (netif_running(bp->dev)) {
|
||||
int rc;
|
||||
|
||||
if (!silent)
|
||||
bnxt_ulp_stop(bp);
|
||||
bnxt_close_nic(bp, false, false);
|
||||
bnxt_open_nic(bp, false, false);
|
||||
rc = bnxt_open_nic(bp, false, false);
|
||||
if (!silent && !rc)
|
||||
bnxt_ulp_start(bp);
|
||||
}
|
||||
}
|
||||
|
||||
@ -7444,6 +7455,10 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
if (rc)
|
||||
goto init_err_pci_clean;
|
||||
|
||||
rc = bnxt_hwrm_func_reset(bp);
|
||||
if (rc)
|
||||
goto init_err_pci_clean;
|
||||
|
||||
bnxt_hwrm_fw_set_time(bp);
|
||||
|
||||
dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
|
||||
@ -7554,10 +7569,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
if (rc)
|
||||
goto init_err_pci_clean;
|
||||
|
||||
rc = bnxt_hwrm_func_reset(bp);
|
||||
if (rc)
|
||||
goto init_err_pci_clean;
|
||||
|
||||
rc = bnxt_init_int_mode(bp);
|
||||
if (rc)
|
||||
goto init_err_pci_clean;
|
||||
|
@ -993,6 +993,7 @@ struct bnxt {
|
||||
BNXT_FLAG_ROCEV2_CAP)
|
||||
#define BNXT_FLAG_NO_AGG_RINGS 0x20000
|
||||
#define BNXT_FLAG_RX_PAGE_MODE 0x40000
|
||||
#define BNXT_FLAG_FW_LLDP_AGENT 0x80000
|
||||
#define BNXT_FLAG_CHIP_NITRO_A0 0x1000000
|
||||
|
||||
#define BNXT_FLAG_ALL_CONFIG_FEATS (BNXT_FLAG_TPA | \
|
||||
|
@ -474,7 +474,7 @@ void bnxt_dcb_init(struct bnxt *bp)
|
||||
return;
|
||||
|
||||
bp->dcbx_cap = DCB_CAP_DCBX_VER_IEEE;
|
||||
if (BNXT_PF(bp))
|
||||
if (BNXT_PF(bp) && !(bp->flags & BNXT_FLAG_FW_LLDP_AGENT))
|
||||
bp->dcbx_cap |= DCB_CAP_DCBX_HOST;
|
||||
else
|
||||
bp->dcbx_cap |= DCB_CAP_DCBX_LLD_MANAGED;
|
||||
|
@ -1,7 +1,7 @@
|
||||
/*
|
||||
* Broadcom GENET (Gigabit Ethernet) controller driver
|
||||
*
|
||||
* Copyright (c) 2014 Broadcom Corporation
|
||||
* Copyright (c) 2014-2017 Broadcom
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
@ -450,6 +450,22 @@ static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
|
||||
genet_dma_ring_regs[r]);
|
||||
}
|
||||
|
||||
static int bcmgenet_begin(struct net_device *dev)
|
||||
{
|
||||
struct bcmgenet_priv *priv = netdev_priv(dev);
|
||||
|
||||
/* Turn on the clock */
|
||||
return clk_prepare_enable(priv->clk);
|
||||
}
|
||||
|
||||
static void bcmgenet_complete(struct net_device *dev)
|
||||
{
|
||||
struct bcmgenet_priv *priv = netdev_priv(dev);
|
||||
|
||||
/* Turn off the clock */
|
||||
clk_disable_unprepare(priv->clk);
|
||||
}
|
||||
|
||||
static int bcmgenet_get_link_ksettings(struct net_device *dev,
|
||||
struct ethtool_link_ksettings *cmd)
|
||||
{
|
||||
@ -778,8 +794,9 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
|
||||
STAT_GENET_RUNT("rx_runt_bytes", mib.rx_runt_bytes),
|
||||
/* Misc UniMAC counters */
|
||||
STAT_GENET_MISC("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt,
|
||||
UMAC_RBUF_OVFL_CNT),
|
||||
STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt, UMAC_RBUF_ERR_CNT),
|
||||
UMAC_RBUF_OVFL_CNT_V1),
|
||||
STAT_GENET_MISC("rbuf_err_cnt", mib.rbuf_err_cnt,
|
||||
UMAC_RBUF_ERR_CNT_V1),
|
||||
STAT_GENET_MISC("mdf_err_cnt", mib.mdf_err_cnt, UMAC_MDF_ERR_CNT),
|
||||
STAT_GENET_SOFT_MIB("alloc_rx_buff_failed", mib.alloc_rx_buff_failed),
|
||||
STAT_GENET_SOFT_MIB("rx_dma_failed", mib.rx_dma_failed),
|
||||
@ -821,6 +838,45 @@ static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
|
||||
}
|
||||
}
|
||||
|
||||
static u32 bcmgenet_update_stat_misc(struct bcmgenet_priv *priv, u16 offset)
|
||||
{
|
||||
u16 new_offset;
|
||||
u32 val;
|
||||
|
||||
switch (offset) {
|
||||
case UMAC_RBUF_OVFL_CNT_V1:
|
||||
if (GENET_IS_V2(priv))
|
||||
new_offset = RBUF_OVFL_CNT_V2;
|
||||
else
|
||||
new_offset = RBUF_OVFL_CNT_V3PLUS;
|
||||
|
||||
val = bcmgenet_rbuf_readl(priv, new_offset);
|
||||
/* clear if overflowed */
|
||||
if (val == ~0)
|
||||
bcmgenet_rbuf_writel(priv, 0, new_offset);
|
||||
break;
|
||||
case UMAC_RBUF_ERR_CNT_V1:
|
||||
if (GENET_IS_V2(priv))
|
||||
new_offset = RBUF_ERR_CNT_V2;
|
||||
else
|
||||
new_offset = RBUF_ERR_CNT_V3PLUS;
|
||||
|
||||
val = bcmgenet_rbuf_readl(priv, new_offset);
|
||||
/* clear if overflowed */
|
||||
if (val == ~0)
|
||||
bcmgenet_rbuf_writel(priv, 0, new_offset);
|
||||
break;
|
||||
default:
|
||||
val = bcmgenet_umac_readl(priv, offset);
|
||||
/* clear if overflowed */
|
||||
if (val == ~0)
|
||||
bcmgenet_umac_writel(priv, 0, offset);
|
||||
break;
|
||||
}
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
|
||||
{
|
||||
int i, j = 0;
|
||||
@ -836,19 +892,28 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
|
||||
case BCMGENET_STAT_NETDEV:
|
||||
case BCMGENET_STAT_SOFT:
|
||||
continue;
|
||||
case BCMGENET_STAT_MIB_RX:
|
||||
case BCMGENET_STAT_MIB_TX:
|
||||
case BCMGENET_STAT_RUNT:
|
||||
if (s->type != BCMGENET_STAT_MIB_RX)
|
||||
offset = BCMGENET_STAT_OFFSET;
|
||||
offset += BCMGENET_STAT_OFFSET;
|
||||
/* fall through */
|
||||
case BCMGENET_STAT_MIB_TX:
|
||||
offset += BCMGENET_STAT_OFFSET;
|
||||
/* fall through */
|
||||
case BCMGENET_STAT_MIB_RX:
|
||||
val = bcmgenet_umac_readl(priv,
|
||||
UMAC_MIB_START + j + offset);
|
||||
offset = 0; /* Reset Offset */
|
||||
break;
|
||||
case BCMGENET_STAT_MISC:
|
||||
val = bcmgenet_umac_readl(priv, s->reg_offset);
|
||||
/* clear if overflowed */
|
||||
if (val == ~0)
|
||||
bcmgenet_umac_writel(priv, 0, s->reg_offset);
|
||||
if (GENET_IS_V1(priv)) {
|
||||
val = bcmgenet_umac_readl(priv, s->reg_offset);
|
||||
/* clear if overflowed */
|
||||
if (val == ~0)
|
||||
bcmgenet_umac_writel(priv, 0,
|
||||
s->reg_offset);
|
||||
} else {
|
||||
val = bcmgenet_update_stat_misc(priv,
|
||||
s->reg_offset);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
@ -973,6 +1038,8 @@ static int bcmgenet_set_eee(struct net_device *dev, struct ethtool_eee *e)
|
||||
|
||||
/* standard ethtool support functions. */
|
||||
static const struct ethtool_ops bcmgenet_ethtool_ops = {
|
||||
.begin = bcmgenet_begin,
|
||||
.complete = bcmgenet_complete,
|
||||
.get_strings = bcmgenet_get_strings,
|
||||
.get_sset_count = bcmgenet_get_sset_count,
|
||||
.get_ethtool_stats = bcmgenet_get_ethtool_stats,
|
||||
@ -1167,7 +1234,6 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
|
||||
struct bcmgenet_priv *priv = netdev_priv(dev);
|
||||
struct device *kdev = &priv->pdev->dev;
|
||||
struct enet_cb *tx_cb_ptr;
|
||||
struct netdev_queue *txq;
|
||||
unsigned int pkts_compl = 0;
|
||||
unsigned int bytes_compl = 0;
|
||||
unsigned int c_index;
|
||||
@ -1219,13 +1285,8 @@ static unsigned int __bcmgenet_tx_reclaim(struct net_device *dev,
|
||||
dev->stats.tx_packets += pkts_compl;
|
||||
dev->stats.tx_bytes += bytes_compl;
|
||||
|
||||
txq = netdev_get_tx_queue(dev, ring->queue);
|
||||
netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
|
||||
|
||||
if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
|
||||
if (netif_tx_queue_stopped(txq))
|
||||
netif_tx_wake_queue(txq);
|
||||
}
|
||||
netdev_tx_completed_queue(netdev_get_tx_queue(dev, ring->queue),
|
||||
pkts_compl, bytes_compl);
|
||||
|
||||
return pkts_compl;
|
||||
}
|
||||
@ -1248,8 +1309,16 @@ static int bcmgenet_tx_poll(struct napi_struct *napi, int budget)
|
||||
struct bcmgenet_tx_ring *ring =
|
||||
container_of(napi, struct bcmgenet_tx_ring, napi);
|
||||
unsigned int work_done = 0;
|
||||
struct netdev_queue *txq;
|
||||
unsigned long flags;
|
||||
|
||||
work_done = bcmgenet_tx_reclaim(ring->priv->dev, ring);
|
||||
spin_lock_irqsave(&ring->lock, flags);
|
||||
work_done = __bcmgenet_tx_reclaim(ring->priv->dev, ring);
|
||||
if (ring->free_bds > (MAX_SKB_FRAGS + 1)) {
|
||||
txq = netdev_get_tx_queue(ring->priv->dev, ring->queue);
|
||||
netif_tx_wake_queue(txq);
|
||||
}
|
||||
spin_unlock_irqrestore(&ring->lock, flags);
|
||||
|
||||
if (work_done == 0) {
|
||||
napi_complete(napi);
|
||||
@ -2457,24 +2526,28 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
|
||||
/* Interrupt bottom half */
|
||||
static void bcmgenet_irq_task(struct work_struct *work)
|
||||
{
|
||||
unsigned long flags;
|
||||
unsigned int status;
|
||||
struct bcmgenet_priv *priv = container_of(
|
||||
work, struct bcmgenet_priv, bcmgenet_irq_work);
|
||||
|
||||
netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
|
||||
|
||||
if (priv->irq0_stat & UMAC_IRQ_MPD_R) {
|
||||
priv->irq0_stat &= ~UMAC_IRQ_MPD_R;
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
status = priv->irq0_stat;
|
||||
priv->irq0_stat = 0;
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
||||
if (status & UMAC_IRQ_MPD_R) {
|
||||
netif_dbg(priv, wol, priv->dev,
|
||||
"magic packet detected, waking up\n");
|
||||
bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
|
||||
}
|
||||
|
||||
/* Link UP/DOWN event */
|
||||
if (priv->irq0_stat & UMAC_IRQ_LINK_EVENT) {
|
||||
if (status & UMAC_IRQ_LINK_EVENT)
|
||||
phy_mac_interrupt(priv->phydev,
|
||||
!!(priv->irq0_stat & UMAC_IRQ_LINK_UP));
|
||||
priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT;
|
||||
}
|
||||
!!(status & UMAC_IRQ_LINK_UP));
|
||||
}
|
||||
|
||||
/* bcmgenet_isr1: handle Rx and Tx priority queues */
|
||||
@ -2483,22 +2556,21 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
|
||||
struct bcmgenet_priv *priv = dev_id;
|
||||
struct bcmgenet_rx_ring *rx_ring;
|
||||
struct bcmgenet_tx_ring *tx_ring;
|
||||
unsigned int index;
|
||||
unsigned int index, status;
|
||||
|
||||
/* Save irq status for bottom-half processing. */
|
||||
priv->irq1_stat =
|
||||
bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
|
||||
/* Read irq status */
|
||||
status = bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
|
||||
~bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_MASK_STATUS);
|
||||
|
||||
/* clear interrupts */
|
||||
bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
|
||||
bcmgenet_intrl2_1_writel(priv, status, INTRL2_CPU_CLEAR);
|
||||
|
||||
netif_dbg(priv, intr, priv->dev,
|
||||
"%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
|
||||
"%s: IRQ=0x%x\n", __func__, status);
|
||||
|
||||
/* Check Rx priority queue interrupts */
|
||||
for (index = 0; index < priv->hw_params->rx_queues; index++) {
|
||||
if (!(priv->irq1_stat & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
|
||||
if (!(status & BIT(UMAC_IRQ1_RX_INTR_SHIFT + index)))
|
||||
continue;
|
||||
|
||||
rx_ring = &priv->rx_rings[index];
|
||||
@ -2511,7 +2583,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
|
||||
|
||||
/* Check Tx priority queue interrupts */
|
||||
for (index = 0; index < priv->hw_params->tx_queues; index++) {
|
||||
if (!(priv->irq1_stat & BIT(index)))
|
||||
if (!(status & BIT(index)))
|
||||
continue;
|
||||
|
||||
tx_ring = &priv->tx_rings[index];
|
||||
@ -2531,19 +2603,20 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
|
||||
struct bcmgenet_priv *priv = dev_id;
|
||||
struct bcmgenet_rx_ring *rx_ring;
|
||||
struct bcmgenet_tx_ring *tx_ring;
|
||||
unsigned int status;
|
||||
unsigned long flags;
|
||||
|
||||
/* Save irq status for bottom-half processing. */
|
||||
priv->irq0_stat =
|
||||
bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
|
||||
/* Read irq status */
|
||||
status = bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
|
||||
~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
|
||||
|
||||
/* clear interrupts */
|
||||
bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
|
||||
bcmgenet_intrl2_0_writel(priv, status, INTRL2_CPU_CLEAR);
|
||||
|
||||
netif_dbg(priv, intr, priv->dev,
|
||||
"IRQ=0x%x\n", priv->irq0_stat);
|
||||
"IRQ=0x%x\n", status);
|
||||
|
||||
if (priv->irq0_stat & UMAC_IRQ_RXDMA_DONE) {
|
||||
if (status & UMAC_IRQ_RXDMA_DONE) {
|
||||
rx_ring = &priv->rx_rings[DESC_INDEX];
|
||||
|
||||
if (likely(napi_schedule_prep(&rx_ring->napi))) {
|
||||
@ -2552,7 +2625,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
|
||||
}
|
||||
}
|
||||
|
||||
if (priv->irq0_stat & UMAC_IRQ_TXDMA_DONE) {
|
||||
if (status & UMAC_IRQ_TXDMA_DONE) {
|
||||
tx_ring = &priv->tx_rings[DESC_INDEX];
|
||||
|
||||
if (likely(napi_schedule_prep(&tx_ring->napi))) {
|
||||
@ -2561,20 +2634,21 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
|
||||
}
|
||||
}
|
||||
|
||||
if (priv->irq0_stat & (UMAC_IRQ_PHY_DET_R |
|
||||
UMAC_IRQ_PHY_DET_F |
|
||||
UMAC_IRQ_LINK_EVENT |
|
||||
UMAC_IRQ_HFB_SM |
|
||||
UMAC_IRQ_HFB_MM |
|
||||
UMAC_IRQ_MPD_R)) {
|
||||
/* all other interested interrupts handled in bottom half */
|
||||
schedule_work(&priv->bcmgenet_irq_work);
|
||||
if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
|
||||
status & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
|
||||
wake_up(&priv->wq);
|
||||
}
|
||||
|
||||
if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
|
||||
priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
|
||||
priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
|
||||
wake_up(&priv->wq);
|
||||
/* all other interested interrupts handled in bottom half */
|
||||
status &= (UMAC_IRQ_LINK_EVENT |
|
||||
UMAC_IRQ_MPD_R);
|
||||
if (status) {
|
||||
/* Save irq status for bottom-half processing. */
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
priv->irq0_stat |= status;
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
||||
schedule_work(&priv->bcmgenet_irq_work);
|
||||
}
|
||||
|
||||
return IRQ_HANDLED;
|
||||
@ -2801,6 +2875,8 @@ err_irq0:
|
||||
err_fini_dma:
|
||||
bcmgenet_fini_dma(priv);
|
||||
err_clk_disable:
|
||||
if (priv->internal_phy)
|
||||
bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
|
||||
clk_disable_unprepare(priv->clk);
|
||||
return ret;
|
||||
}
|
||||
@ -3177,6 +3253,12 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
|
||||
*/
|
||||
gphy_rev = reg & 0xffff;
|
||||
|
||||
/* This is reserved so should require special treatment */
|
||||
if (gphy_rev == 0 || gphy_rev == 0x01ff) {
|
||||
pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
|
||||
return;
|
||||
}
|
||||
|
||||
/* This is the good old scheme, just GPHY major, no minor nor patch */
|
||||
if ((gphy_rev & 0xf0) != 0)
|
||||
priv->gphy_rev = gphy_rev << 8;
|
||||
@ -3185,12 +3267,6 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
|
||||
else if ((gphy_rev & 0xff00) != 0)
|
||||
priv->gphy_rev = gphy_rev;
|
||||
|
||||
/* This is reserved so should require special treatment */
|
||||
else if (gphy_rev == 0 || gphy_rev == 0x01ff) {
|
||||
pr_warn("Invalid GPHY revision detected: 0x%04x\n", gphy_rev);
|
||||
return;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_PHYS_ADDR_T_64BIT
|
||||
if (!(params->flags & GENET_HAS_40BITS))
|
||||
pr_warn("GENET does not support 40-bits PA\n");
|
||||
@ -3233,6 +3309,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
|
||||
const void *macaddr;
|
||||
struct resource *r;
|
||||
int err = -EIO;
|
||||
const char *phy_mode_str;
|
||||
|
||||
/* Up to GENET_MAX_MQ_CNT + 1 TX queues and RX queues */
|
||||
dev = alloc_etherdev_mqs(sizeof(*priv), GENET_MAX_MQ_CNT + 1,
|
||||
@ -3276,6 +3353,8 @@ static int bcmgenet_probe(struct platform_device *pdev)
|
||||
goto err;
|
||||
}
|
||||
|
||||
spin_lock_init(&priv->lock);
|
||||
|
||||
SET_NETDEV_DEV(dev, &pdev->dev);
|
||||
dev_set_drvdata(&pdev->dev, dev);
|
||||
ether_addr_copy(dev->dev_addr, macaddr);
|
||||
@ -3338,6 +3417,13 @@ static int bcmgenet_probe(struct platform_device *pdev)
|
||||
priv->clk_eee = NULL;
|
||||
}
|
||||
|
||||
/* If this is an internal GPHY, power it on now, before UniMAC is
|
||||
* brought out of reset as absolutely no UniMAC activity is allowed
|
||||
*/
|
||||
if (dn && !of_property_read_string(dn, "phy-mode", &phy_mode_str) &&
|
||||
!strcasecmp(phy_mode_str, "internal"))
|
||||
bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
|
||||
|
||||
err = reset_umac(priv);
|
||||
if (err)
|
||||
goto err_clk_disable;
|
||||
@ -3502,6 +3588,8 @@ static int bcmgenet_resume(struct device *d)
|
||||
return 0;
|
||||
|
||||
out_clk_disable:
|
||||
if (priv->internal_phy)
|
||||
bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
|
||||
clk_disable_unprepare(priv->clk);
|
||||
return ret;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2014 Broadcom Corporation
|
||||
* Copyright (c) 2014-2017 Broadcom
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
@ -214,7 +214,9 @@ struct bcmgenet_mib_counters {
|
||||
#define MDIO_REG_SHIFT 16
|
||||
#define MDIO_REG_MASK 0x1F
|
||||
|
||||
#define UMAC_RBUF_OVFL_CNT 0x61C
|
||||
#define UMAC_RBUF_OVFL_CNT_V1 0x61C
|
||||
#define RBUF_OVFL_CNT_V2 0x80
|
||||
#define RBUF_OVFL_CNT_V3PLUS 0x94
|
||||
|
||||
#define UMAC_MPD_CTRL 0x620
|
||||
#define MPD_EN (1 << 0)
|
||||
@ -224,7 +226,9 @@ struct bcmgenet_mib_counters {
|
||||
|
||||
#define UMAC_MPD_PW_MS 0x624
|
||||
#define UMAC_MPD_PW_LS 0x628
|
||||
#define UMAC_RBUF_ERR_CNT 0x634
|
||||
#define UMAC_RBUF_ERR_CNT_V1 0x634
|
||||
#define RBUF_ERR_CNT_V2 0x84
|
||||
#define RBUF_ERR_CNT_V3PLUS 0x98
|
||||
#define UMAC_MDF_ERR_CNT 0x638
|
||||
#define UMAC_MDF_CTRL 0x650
|
||||
#define UMAC_MDF_ADDR 0x654
|
||||
@ -619,11 +623,13 @@ struct bcmgenet_priv {
|
||||
struct work_struct bcmgenet_irq_work;
|
||||
int irq0;
|
||||
int irq1;
|
||||
unsigned int irq0_stat;
|
||||
unsigned int irq1_stat;
|
||||
int wol_irq;
|
||||
bool wol_irq_disabled;
|
||||
|
||||
/* shared status */
|
||||
spinlock_t lock;
|
||||
unsigned int irq0_stat;
|
||||
|
||||
/* HW descriptors/checksum variables */
|
||||
bool desc_64b_en;
|
||||
bool desc_rxchk_en;
|
||||
|
@ -152,7 +152,7 @@ struct octnic_gather {
|
||||
*/
|
||||
struct octeon_sg_entry *sg;
|
||||
|
||||
u64 sg_dma_ptr;
|
||||
dma_addr_t sg_dma_ptr;
|
||||
};
|
||||
|
||||
struct handshake {
|
||||
@ -734,6 +734,9 @@ static void delete_glists(struct lio *lio)
|
||||
struct octnic_gather *g;
|
||||
int i;
|
||||
|
||||
kfree(lio->glist_lock);
|
||||
lio->glist_lock = NULL;
|
||||
|
||||
if (!lio->glist)
|
||||
return;
|
||||
|
||||
@ -741,23 +744,26 @@ static void delete_glists(struct lio *lio)
|
||||
do {
|
||||
g = (struct octnic_gather *)
|
||||
list_delete_head(&lio->glist[i]);
|
||||
if (g) {
|
||||
if (g->sg) {
|
||||
dma_unmap_single(&lio->oct_dev->
|
||||
pci_dev->dev,
|
||||
g->sg_dma_ptr,
|
||||
g->sg_size,
|
||||
DMA_TO_DEVICE);
|
||||
kfree((void *)((unsigned long)g->sg -
|
||||
g->adjust));
|
||||
}
|
||||
if (g)
|
||||
kfree(g);
|
||||
}
|
||||
} while (g);
|
||||
|
||||
if (lio->glists_virt_base && lio->glists_virt_base[i]) {
|
||||
lio_dma_free(lio->oct_dev,
|
||||
lio->glist_entry_size * lio->tx_qsize,
|
||||
lio->glists_virt_base[i],
|
||||
lio->glists_dma_base[i]);
|
||||
}
|
||||
}
|
||||
|
||||
kfree((void *)lio->glist);
|
||||
kfree((void *)lio->glist_lock);
|
||||
kfree(lio->glists_virt_base);
|
||||
lio->glists_virt_base = NULL;
|
||||
|
||||
kfree(lio->glists_dma_base);
|
||||
lio->glists_dma_base = NULL;
|
||||
|
||||
kfree(lio->glist);
|
||||
lio->glist = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -772,13 +778,30 @@ static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
|
||||
lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock),
|
||||
GFP_KERNEL);
|
||||
if (!lio->glist_lock)
|
||||
return 1;
|
||||
return -ENOMEM;
|
||||
|
||||
lio->glist = kcalloc(num_iqs, sizeof(*lio->glist),
|
||||
GFP_KERNEL);
|
||||
if (!lio->glist) {
|
||||
kfree((void *)lio->glist_lock);
|
||||
return 1;
|
||||
kfree(lio->glist_lock);
|
||||
lio->glist_lock = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
lio->glist_entry_size =
|
||||
ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
|
||||
|
||||
/* allocate memory to store virtual and dma base address of
|
||||
* per glist consistent memory
|
||||
*/
|
||||
lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
|
||||
GFP_KERNEL);
|
||||
lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!lio->glists_virt_base || !lio->glists_dma_base) {
|
||||
delete_glists(lio);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_iqs; i++) {
|
||||
@ -788,6 +811,16 @@ static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
|
||||
|
||||
INIT_LIST_HEAD(&lio->glist[i]);
|
||||
|
||||
lio->glists_virt_base[i] =
|
||||
lio_dma_alloc(oct,
|
||||
lio->glist_entry_size * lio->tx_qsize,
|
||||
&lio->glists_dma_base[i]);
|
||||
|
||||
if (!lio->glists_virt_base[i]) {
|
||||
delete_glists(lio);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (j = 0; j < lio->tx_qsize; j++) {
|
||||
g = kzalloc_node(sizeof(*g), GFP_KERNEL,
|
||||
numa_node);
|
||||
@ -796,43 +829,18 @@ static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
|
||||
if (!g)
|
||||
break;
|
||||
|
||||
g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) *
|
||||
OCT_SG_ENTRY_SIZE);
|
||||
g->sg = lio->glists_virt_base[i] +
|
||||
(j * lio->glist_entry_size);
|
||||
|
||||
g->sg = kmalloc_node(g->sg_size + 8,
|
||||
GFP_KERNEL, numa_node);
|
||||
if (!g->sg)
|
||||
g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL);
|
||||
if (!g->sg) {
|
||||
kfree(g);
|
||||
break;
|
||||
}
|
||||
|
||||
/* The gather component should be aligned on 64-bit
|
||||
* boundary
|
||||
*/
|
||||
if (((unsigned long)g->sg) & 7) {
|
||||
g->adjust = 8 - (((unsigned long)g->sg) & 7);
|
||||
g->sg = (struct octeon_sg_entry *)
|
||||
((unsigned long)g->sg + g->adjust);
|
||||
}
|
||||
g->sg_dma_ptr = dma_map_single(&oct->pci_dev->dev,
|
||||
g->sg, g->sg_size,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(&oct->pci_dev->dev,
|
||||
g->sg_dma_ptr)) {
|
||||
kfree((void *)((unsigned long)g->sg -
|
||||
g->adjust));
|
||||
kfree(g);
|
||||
break;
|
||||
}
|
||||
g->sg_dma_ptr = lio->glists_dma_base[i] +
|
||||
(j * lio->glist_entry_size);
|
||||
|
||||
list_add_tail(&g->list, &lio->glist[i]);
|
||||
}
|
||||
|
||||
if (j != lio->tx_qsize) {
|
||||
delete_glists(lio);
|
||||
return 1;
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1885,9 +1893,6 @@ static void free_netsgbuf(void *buf)
|
||||
i++;
|
||||
}
|
||||
|
||||
dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
|
||||
g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
|
||||
|
||||
iq = skb_iq(lio, skb);
|
||||
spin_lock(&lio->glist_lock[iq]);
|
||||
list_add_tail(&g->list, &lio->glist[iq]);
|
||||
@ -1933,9 +1938,6 @@ static void free_netsgbuf_with_resp(void *buf)
|
||||
i++;
|
||||
}
|
||||
|
||||
dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
|
||||
g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
|
||||
|
||||
iq = skb_iq(lio, skb);
|
||||
|
||||
spin_lock(&lio->glist_lock[iq]);
|
||||
@ -3273,8 +3275,6 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
i++;
|
||||
}
|
||||
|
||||
dma_sync_single_for_device(&oct->pci_dev->dev, g->sg_dma_ptr,
|
||||
g->sg_size, DMA_TO_DEVICE);
|
||||
dptr = g->sg_dma_ptr;
|
||||
|
||||
if (OCTEON_CN23XX_PF(oct))
|
||||
|
@ -108,6 +108,8 @@ struct octnic_gather {
|
||||
* received from the IP layer.
|
||||
*/
|
||||
struct octeon_sg_entry *sg;
|
||||
|
||||
dma_addr_t sg_dma_ptr;
|
||||
};
|
||||
|
||||
struct octeon_device_priv {
|
||||
@ -490,6 +492,9 @@ static void delete_glists(struct lio *lio)
|
||||
struct octnic_gather *g;
|
||||
int i;
|
||||
|
||||
kfree(lio->glist_lock);
|
||||
lio->glist_lock = NULL;
|
||||
|
||||
if (!lio->glist)
|
||||
return;
|
||||
|
||||
@ -497,17 +502,26 @@ static void delete_glists(struct lio *lio)
|
||||
do {
|
||||
g = (struct octnic_gather *)
|
||||
list_delete_head(&lio->glist[i]);
|
||||
if (g) {
|
||||
if (g->sg)
|
||||
kfree((void *)((unsigned long)g->sg -
|
||||
g->adjust));
|
||||
if (g)
|
||||
kfree(g);
|
||||
}
|
||||
} while (g);
|
||||
|
||||
if (lio->glists_virt_base && lio->glists_virt_base[i]) {
|
||||
lio_dma_free(lio->oct_dev,
|
||||
lio->glist_entry_size * lio->tx_qsize,
|
||||
lio->glists_virt_base[i],
|
||||
lio->glists_dma_base[i]);
|
||||
}
|
||||
}
|
||||
|
||||
kfree(lio->glists_virt_base);
|
||||
lio->glists_virt_base = NULL;
|
||||
|
||||
kfree(lio->glists_dma_base);
|
||||
lio->glists_dma_base = NULL;
|
||||
|
||||
kfree(lio->glist);
|
||||
kfree(lio->glist_lock);
|
||||
lio->glist = NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -522,13 +536,30 @@ static int setup_glists(struct lio *lio, int num_iqs)
|
||||
lio->glist_lock =
|
||||
kzalloc(sizeof(*lio->glist_lock) * num_iqs, GFP_KERNEL);
|
||||
if (!lio->glist_lock)
|
||||
return 1;
|
||||
return -ENOMEM;
|
||||
|
||||
lio->glist =
|
||||
kzalloc(sizeof(*lio->glist) * num_iqs, GFP_KERNEL);
|
||||
if (!lio->glist) {
|
||||
kfree(lio->glist_lock);
|
||||
return 1;
|
||||
lio->glist_lock = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
lio->glist_entry_size =
|
||||
ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
|
||||
|
||||
/* allocate memory to store virtual and dma base address of
|
||||
* per glist consistent memory
|
||||
*/
|
||||
lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
|
||||
GFP_KERNEL);
|
||||
lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
|
||||
GFP_KERNEL);
|
||||
|
||||
if (!lio->glists_virt_base || !lio->glists_dma_base) {
|
||||
delete_glists(lio);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (i = 0; i < num_iqs; i++) {
|
||||
@ -536,34 +567,33 @@ static int setup_glists(struct lio *lio, int num_iqs)
|
||||
|
||||
INIT_LIST_HEAD(&lio->glist[i]);
|
||||
|
||||
lio->glists_virt_base[i] =
|
||||
lio_dma_alloc(lio->oct_dev,
|
||||
lio->glist_entry_size * lio->tx_qsize,
|
||||
&lio->glists_dma_base[i]);
|
||||
|
||||
if (!lio->glists_virt_base[i]) {
|
||||
delete_glists(lio);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
for (j = 0; j < lio->tx_qsize; j++) {
|
||||
g = kzalloc(sizeof(*g), GFP_KERNEL);
|
||||
if (!g)
|
||||
break;
|
||||
|
||||
g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) *
|
||||
OCT_SG_ENTRY_SIZE);
|
||||
g->sg = lio->glists_virt_base[i] +
|
||||
(j * lio->glist_entry_size);
|
||||
|
||||
g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL);
|
||||
if (!g->sg) {
|
||||
kfree(g);
|
||||
break;
|
||||
}
|
||||
g->sg_dma_ptr = lio->glists_dma_base[i] +
|
||||
(j * lio->glist_entry_size);
|
||||
|
||||
/* The gather component should be aligned on 64-bit
|
||||
* boundary
|
||||
*/
|
||||
if (((unsigned long)g->sg) & 7) {
|
||||
g->adjust = 8 - (((unsigned long)g->sg) & 7);
|
||||
g->sg = (struct octeon_sg_entry *)
|
||||
((unsigned long)g->sg + g->adjust);
|
||||
}
|
||||
list_add_tail(&g->list, &lio->glist[i]);
|
||||
}
|
||||
|
||||
if (j != lio->tx_qsize) {
|
||||
delete_glists(lio);
|
||||
return 1;
|
||||
return -ENOMEM;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1324,10 +1354,6 @@ static void free_netsgbuf(void *buf)
|
||||
i++;
|
||||
}
|
||||
|
||||
dma_unmap_single(&lio->oct_dev->pci_dev->dev,
|
||||
finfo->dptr, g->sg_size,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
iq = skb_iq(lio, skb);
|
||||
|
||||
spin_lock(&lio->glist_lock[iq]);
|
||||
@ -1374,10 +1400,6 @@ static void free_netsgbuf_with_resp(void *buf)
|
||||
i++;
|
||||
}
|
||||
|
||||
dma_unmap_single(&lio->oct_dev->pci_dev->dev,
|
||||
finfo->dptr, g->sg_size,
|
||||
DMA_TO_DEVICE);
|
||||
|
||||
iq = skb_iq(lio, skb);
|
||||
|
||||
spin_lock(&lio->glist_lock[iq]);
|
||||
@ -2382,23 +2404,7 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
i++;
|
||||
}
|
||||
|
||||
dptr = dma_map_single(&oct->pci_dev->dev,
|
||||
g->sg, g->sg_size,
|
||||
DMA_TO_DEVICE);
|
||||
if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
|
||||
dev_err(&oct->pci_dev->dev, "%s DMA mapping error 4\n",
|
||||
__func__);
|
||||
dma_unmap_single(&oct->pci_dev->dev, g->sg[0].ptr[0],
|
||||
skb->len - skb->data_len,
|
||||
DMA_TO_DEVICE);
|
||||
for (j = 1; j <= frags; j++) {
|
||||
frag = &skb_shinfo(skb)->frags[j - 1];
|
||||
dma_unmap_page(&oct->pci_dev->dev,
|
||||
g->sg[j >> 2].ptr[j & 3],
|
||||
frag->size, DMA_TO_DEVICE);
|
||||
}
|
||||
return NETDEV_TX_BUSY;
|
||||
}
|
||||
dptr = g->sg_dma_ptr;
|
||||
|
||||
ndata.cmd.cmd3.dptr = dptr;
|
||||
finfo->dptr = dptr;
|
||||
|
@ -71,17 +71,17 @@
|
||||
#define CN23XX_MAX_RINGS_PER_VF 8
|
||||
|
||||
#define CN23XX_MAX_INPUT_QUEUES CN23XX_MAX_RINGS_PER_PF
|
||||
#define CN23XX_MAX_IQ_DESCRIPTORS 2048
|
||||
#define CN23XX_MAX_IQ_DESCRIPTORS 512
|
||||
#define CN23XX_DB_MIN 1
|
||||
#define CN23XX_DB_MAX 8
|
||||
#define CN23XX_DB_TIMEOUT 1
|
||||
|
||||
#define CN23XX_MAX_OUTPUT_QUEUES CN23XX_MAX_RINGS_PER_PF
|
||||
#define CN23XX_MAX_OQ_DESCRIPTORS 2048
|
||||
#define CN23XX_MAX_OQ_DESCRIPTORS 512
|
||||
#define CN23XX_OQ_BUF_SIZE 1536
|
||||
#define CN23XX_OQ_PKTSPER_INTR 128
|
||||
/*#define CAVIUM_ONLY_CN23XX_RX_PERF*/
|
||||
#define CN23XX_OQ_REFIL_THRESHOLD 128
|
||||
#define CN23XX_OQ_REFIL_THRESHOLD 16
|
||||
|
||||
#define CN23XX_OQ_INTR_PKT 64
|
||||
#define CN23XX_OQ_INTR_TIME 100
|
||||
|
@ -155,11 +155,6 @@ octeon_droq_destroy_ring_buffers(struct octeon_device *oct,
|
||||
recv_buffer_destroy(droq->recv_buf_list[i].buffer,
|
||||
pg_info);
|
||||
|
||||
if (droq->desc_ring && droq->desc_ring[i].info_ptr)
|
||||
lio_unmap_ring_info(oct->pci_dev,
|
||||
(u64)droq->
|
||||
desc_ring[i].info_ptr,
|
||||
OCT_DROQ_INFO_SIZE);
|
||||
droq->recv_buf_list[i].buffer = NULL;
|
||||
}
|
||||
|
||||
@ -211,10 +206,7 @@ int octeon_delete_droq(struct octeon_device *oct, u32 q_no)
|
||||
vfree(droq->recv_buf_list);
|
||||
|
||||
if (droq->info_base_addr)
|
||||
cnnic_free_aligned_dma(oct->pci_dev, droq->info_list,
|
||||
droq->info_alloc_size,
|
||||
droq->info_base_addr,
|
||||
droq->info_list_dma);
|
||||
lio_free_info_buffer(oct, droq);
|
||||
|
||||
if (droq->desc_ring)
|
||||
lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
|
||||
@ -294,12 +286,7 @@ int octeon_init_droq(struct octeon_device *oct,
|
||||
dev_dbg(&oct->pci_dev->dev, "droq[%d]: num_desc: %d\n", q_no,
|
||||
droq->max_count);
|
||||
|
||||
droq->info_list =
|
||||
cnnic_numa_alloc_aligned_dma((droq->max_count *
|
||||
OCT_DROQ_INFO_SIZE),
|
||||
&droq->info_alloc_size,
|
||||
&droq->info_base_addr,
|
||||
numa_node);
|
||||
droq->info_list = lio_alloc_info_buffer(oct, droq);
|
||||
if (!droq->info_list) {
|
||||
dev_err(&oct->pci_dev->dev, "Cannot allocate memory for info list.\n");
|
||||
lio_dma_free(oct, (droq->max_count * OCT_DROQ_DESC_SIZE),
|
||||
|
@ -325,10 +325,10 @@ struct octeon_droq {
|
||||
size_t desc_ring_dma;
|
||||
|
||||
/** Info ptr list are allocated at this virtual address. */
|
||||
size_t info_base_addr;
|
||||
void *info_base_addr;
|
||||
|
||||
/** DMA mapped address of the info list */
|
||||
size_t info_list_dma;
|
||||
dma_addr_t info_list_dma;
|
||||
|
||||
/** Allocated size of info list. */
|
||||
u32 info_alloc_size;
|
||||
|
@ -140,48 +140,6 @@ err_release_region:
|
||||
return 1;
|
||||
}
|
||||
|
||||
static inline void *
|
||||
cnnic_numa_alloc_aligned_dma(u32 size,
|
||||
u32 *alloc_size,
|
||||
size_t *orig_ptr,
|
||||
int numa_node)
|
||||
{
|
||||
int retries = 0;
|
||||
void *ptr = NULL;
|
||||
|
||||
#define OCTEON_MAX_ALLOC_RETRIES 1
|
||||
do {
|
||||
struct page *page = NULL;
|
||||
|
||||
page = alloc_pages_node(numa_node,
|
||||
GFP_KERNEL,
|
||||
get_order(size));
|
||||
if (!page)
|
||||
page = alloc_pages(GFP_KERNEL,
|
||||
get_order(size));
|
||||
ptr = (void *)page_address(page);
|
||||
if ((unsigned long)ptr & 0x07) {
|
||||
__free_pages(page, get_order(size));
|
||||
ptr = NULL;
|
||||
/* Increment the size required if the first
|
||||
* attempt failed.
|
||||
*/
|
||||
if (!retries)
|
||||
size += 7;
|
||||
}
|
||||
retries++;
|
||||
} while ((retries <= OCTEON_MAX_ALLOC_RETRIES) && !ptr);
|
||||
|
||||
*alloc_size = size;
|
||||
*orig_ptr = (unsigned long)ptr;
|
||||
if ((unsigned long)ptr & 0x07)
|
||||
ptr = (void *)(((unsigned long)ptr + 7) & ~(7UL));
|
||||
return ptr;
|
||||
}
|
||||
|
||||
#define cnnic_free_aligned_dma(pci_dev, ptr, size, orig_ptr, dma_addr) \
|
||||
free_pages(orig_ptr, get_order(size))
|
||||
|
||||
static inline int
|
||||
sleep_cond(wait_queue_head_t *wait_queue, int *condition)
|
||||
{
|
||||
|
@ -62,6 +62,9 @@ struct lio {
|
||||
|
||||
/** Array of gather component linked lists */
|
||||
struct list_head *glist;
|
||||
void **glists_virt_base;
|
||||
dma_addr_t *glists_dma_base;
|
||||
u32 glist_entry_size;
|
||||
|
||||
/** Pointer to the NIC properties for the Octeon device this network
|
||||
* interface is associated with.
|
||||
@ -344,6 +347,29 @@ static inline void tx_buffer_free(void *buffer)
|
||||
#define lio_dma_free(oct, size, virt_addr, dma_addr) \
|
||||
dma_free_coherent(&(oct)->pci_dev->dev, size, virt_addr, dma_addr)
|
||||
|
||||
static inline void *
|
||||
lio_alloc_info_buffer(struct octeon_device *oct,
|
||||
struct octeon_droq *droq)
|
||||
{
|
||||
void *virt_ptr;
|
||||
|
||||
virt_ptr = lio_dma_alloc(oct, (droq->max_count * OCT_DROQ_INFO_SIZE),
|
||||
&droq->info_list_dma);
|
||||
if (virt_ptr) {
|
||||
droq->info_alloc_size = droq->max_count * OCT_DROQ_INFO_SIZE;
|
||||
droq->info_base_addr = virt_ptr;
|
||||
}
|
||||
|
||||
return virt_ptr;
|
||||
}
|
||||
|
||||
static inline void lio_free_info_buffer(struct octeon_device *oct,
|
||||
struct octeon_droq *droq)
|
||||
{
|
||||
lio_dma_free(oct, droq->info_alloc_size, droq->info_base_addr,
|
||||
droq->info_list_dma);
|
||||
}
|
||||
|
||||
static inline
|
||||
void *get_rbd(struct sk_buff *skb)
|
||||
{
|
||||
@ -359,22 +385,7 @@ void *get_rbd(struct sk_buff *skb)
|
||||
static inline u64
|
||||
lio_map_ring_info(struct octeon_droq *droq, u32 i)
|
||||
{
|
||||
dma_addr_t dma_addr;
|
||||
struct octeon_device *oct = droq->oct_dev;
|
||||
|
||||
dma_addr = dma_map_single(&oct->pci_dev->dev, &droq->info_list[i],
|
||||
OCT_DROQ_INFO_SIZE, DMA_FROM_DEVICE);
|
||||
|
||||
WARN_ON(dma_mapping_error(&oct->pci_dev->dev, dma_addr));
|
||||
|
||||
return (u64)dma_addr;
|
||||
}
|
||||
|
||||
static inline void
|
||||
lio_unmap_ring_info(struct pci_dev *pci_dev,
|
||||
u64 info_ptr, u32 size)
|
||||
{
|
||||
dma_unmap_single(&pci_dev->dev, info_ptr, size, DMA_FROM_DEVICE);
|
||||
return droq->info_list_dma + (i * sizeof(struct octeon_droq_info));
|
||||
}
|
||||
|
||||
static inline u64
|
||||
|
@ -269,6 +269,7 @@ struct nicvf {
|
||||
#define MAX_QUEUES_PER_QSET 8
|
||||
struct queue_set *qs;
|
||||
struct nicvf_cq_poll *napi[8];
|
||||
void *iommu_domain;
|
||||
u8 vf_id;
|
||||
u8 sqs_id;
|
||||
bool sqs_mode;
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include <linux/log2.h>
|
||||
#include <linux/prefetch.h>
|
||||
#include <linux/irq.h>
|
||||
#include <linux/iommu.h>
|
||||
|
||||
#include "nic_reg.h"
|
||||
#include "nic.h"
|
||||
@ -525,7 +526,12 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
|
||||
/* Get actual TSO descriptors and free them */
|
||||
tso_sqe =
|
||||
(struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
|
||||
nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2,
|
||||
tso_sqe->subdesc_cnt);
|
||||
nicvf_put_sq_desc(sq, tso_sqe->subdesc_cnt + 1);
|
||||
} else {
|
||||
nicvf_unmap_sndq_buffers(nic, sq, cqe_tx->sqe_ptr,
|
||||
hdr->subdesc_cnt);
|
||||
}
|
||||
nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
|
||||
prefetch(skb);
|
||||
@ -576,6 +582,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct nicvf *nic = netdev_priv(netdev);
|
||||
struct nicvf *snic = nic;
|
||||
int err = 0;
|
||||
int rq_idx;
|
||||
|
||||
@ -592,7 +599,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
|
||||
if (err && !cqe_rx->rb_cnt)
|
||||
return;
|
||||
|
||||
skb = nicvf_get_rcv_skb(nic, cqe_rx);
|
||||
skb = nicvf_get_rcv_skb(snic, cqe_rx);
|
||||
if (!skb) {
|
||||
netdev_dbg(nic->netdev, "Packet not received\n");
|
||||
return;
|
||||
@ -1643,6 +1650,9 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
if (!pass1_silicon(nic->pdev))
|
||||
nic->hw_tso = true;
|
||||
|
||||
/* Get iommu domain for iova to physical addr conversion */
|
||||
nic->iommu_domain = iommu_get_domain_for_dev(dev);
|
||||
|
||||
pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
|
||||
if (sdevid == 0xA134)
|
||||
nic->t88 = true;
|
||||
|
@ -10,6 +10,7 @@
|
||||
#include <linux/netdevice.h>
|
||||
#include <linux/ip.h>
|
||||
#include <linux/etherdevice.h>
|
||||
#include <linux/iommu.h>
|
||||
#include <net/ip.h>
|
||||
#include <net/tso.h>
|
||||
|
||||
@ -18,6 +19,16 @@
|
||||
#include "q_struct.h"
|
||||
#include "nicvf_queues.h"
|
||||
|
||||
#define NICVF_PAGE_ORDER ((PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0)
|
||||
|
||||
static inline u64 nicvf_iova_to_phys(struct nicvf *nic, dma_addr_t dma_addr)
|
||||
{
|
||||
/* Translation is installed only when IOMMU is present */
|
||||
if (nic->iommu_domain)
|
||||
return iommu_iova_to_phys(nic->iommu_domain, dma_addr);
|
||||
return dma_addr;
|
||||
}
|
||||
|
||||
static void nicvf_get_page(struct nicvf *nic)
|
||||
{
|
||||
if (!nic->rb_pageref || !nic->rb_page)
|
||||
@ -87,7 +98,7 @@ static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
|
||||
static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
|
||||
u32 buf_len, u64 **rbuf)
|
||||
{
|
||||
int order = (PAGE_SIZE <= 4096) ? PAGE_ALLOC_COSTLY_ORDER : 0;
|
||||
int order = NICVF_PAGE_ORDER;
|
||||
|
||||
/* Check if request can be accomodated in previous allocated page */
|
||||
if (nic->rb_page &&
|
||||
@ -97,22 +108,27 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
|
||||
}
|
||||
|
||||
nicvf_get_page(nic);
|
||||
nic->rb_page = NULL;
|
||||
|
||||
/* Allocate a new page */
|
||||
nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
|
||||
order);
|
||||
if (!nic->rb_page) {
|
||||
nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
|
||||
order);
|
||||
if (!nic->rb_page) {
|
||||
this_cpu_inc(nic->pnicvf->drv_stats->
|
||||
rcv_buffer_alloc_failures);
|
||||
return -ENOMEM;
|
||||
}
|
||||
nic->rb_page_offset = 0;
|
||||
this_cpu_inc(nic->pnicvf->drv_stats->rcv_buffer_alloc_failures);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
nic->rb_page_offset = 0;
|
||||
ret:
|
||||
*rbuf = (u64 *)((u64)page_address(nic->rb_page) + nic->rb_page_offset);
|
||||
/* HW will ensure data coherency, CPU sync not required */
|
||||
*rbuf = (u64 *)((u64)dma_map_page_attrs(&nic->pdev->dev, nic->rb_page,
|
||||
nic->rb_page_offset, buf_len,
|
||||
DMA_FROM_DEVICE,
|
||||
DMA_ATTR_SKIP_CPU_SYNC));
|
||||
if (dma_mapping_error(&nic->pdev->dev, (dma_addr_t)*rbuf)) {
|
||||
if (!nic->rb_page_offset)
|
||||
__free_pages(nic->rb_page, order);
|
||||
nic->rb_page = NULL;
|
||||
return -ENOMEM;
|
||||
}
|
||||
nic->rb_page_offset += buf_len;
|
||||
|
||||
return 0;
|
||||
@ -158,16 +174,21 @@ static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
|
||||
rbdr->dma_size = buf_size;
|
||||
rbdr->enable = true;
|
||||
rbdr->thresh = RBDR_THRESH;
|
||||
rbdr->head = 0;
|
||||
rbdr->tail = 0;
|
||||
|
||||
nic->rb_page = NULL;
|
||||
for (idx = 0; idx < ring_len; idx++) {
|
||||
err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN,
|
||||
&rbuf);
|
||||
if (err)
|
||||
if (err) {
|
||||
/* To free already allocated and mapped ones */
|
||||
rbdr->tail = idx - 1;
|
||||
return err;
|
||||
}
|
||||
|
||||
desc = GET_RBDR_DESC(rbdr, idx);
|
||||
desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
|
||||
desc->buf_addr = (u64)rbuf >> NICVF_RCV_BUF_ALIGN;
|
||||
}
|
||||
|
||||
nicvf_get_page(nic);
|
||||
@ -179,7 +200,7 @@ static int nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
|
||||
static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
|
||||
{
|
||||
int head, tail;
|
||||
u64 buf_addr;
|
||||
u64 buf_addr, phys_addr;
|
||||
struct rbdr_entry_t *desc;
|
||||
|
||||
if (!rbdr)
|
||||
@ -192,18 +213,26 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
|
||||
head = rbdr->head;
|
||||
tail = rbdr->tail;
|
||||
|
||||
/* Free SKBs */
|
||||
/* Release page references */
|
||||
while (head != tail) {
|
||||
desc = GET_RBDR_DESC(rbdr, head);
|
||||
buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
|
||||
put_page(virt_to_page(phys_to_virt(buf_addr)));
|
||||
buf_addr = ((u64)desc->buf_addr) << NICVF_RCV_BUF_ALIGN;
|
||||
phys_addr = nicvf_iova_to_phys(nic, buf_addr);
|
||||
dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN,
|
||||
DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
if (phys_addr)
|
||||
put_page(virt_to_page(phys_to_virt(phys_addr)));
|
||||
head++;
|
||||
head &= (rbdr->dmem.q_len - 1);
|
||||
}
|
||||
/* Free SKB of tail desc */
|
||||
/* Release buffer of tail desc */
|
||||
desc = GET_RBDR_DESC(rbdr, tail);
|
||||
buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
|
||||
put_page(virt_to_page(phys_to_virt(buf_addr)));
|
||||
buf_addr = ((u64)desc->buf_addr) << NICVF_RCV_BUF_ALIGN;
|
||||
phys_addr = nicvf_iova_to_phys(nic, buf_addr);
|
||||
dma_unmap_page_attrs(&nic->pdev->dev, buf_addr, RCV_FRAG_LEN,
|
||||
DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
if (phys_addr)
|
||||
put_page(virt_to_page(phys_to_virt(phys_addr)));
|
||||
|
||||
/* Free RBDR ring */
|
||||
nicvf_free_q_desc_mem(nic, &rbdr->dmem);
|
||||
@ -250,7 +279,7 @@ refill:
|
||||
break;
|
||||
|
||||
desc = GET_RBDR_DESC(rbdr, tail);
|
||||
desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
|
||||
desc->buf_addr = (u64)rbuf >> NICVF_RCV_BUF_ALIGN;
|
||||
refill_rb_cnt--;
|
||||
new_rb++;
|
||||
}
|
||||
@ -361,9 +390,29 @@ static int nicvf_init_snd_queue(struct nicvf *nic,
|
||||
return 0;
|
||||
}
|
||||
|
||||
void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq,
|
||||
int hdr_sqe, u8 subdesc_cnt)
|
||||
{
|
||||
u8 idx;
|
||||
struct sq_gather_subdesc *gather;
|
||||
|
||||
/* Unmap DMA mapped skb data buffers */
|
||||
for (idx = 0; idx < subdesc_cnt; idx++) {
|
||||
hdr_sqe++;
|
||||
hdr_sqe &= (sq->dmem.q_len - 1);
|
||||
gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, hdr_sqe);
|
||||
/* HW will ensure data coherency, CPU sync not required */
|
||||
dma_unmap_page_attrs(&nic->pdev->dev, gather->addr,
|
||||
gather->size, DMA_TO_DEVICE,
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
}
|
||||
}
|
||||
|
||||
static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
struct sq_hdr_subdesc *hdr;
|
||||
struct sq_hdr_subdesc *tso_sqe;
|
||||
|
||||
if (!sq)
|
||||
return;
|
||||
@ -379,8 +428,22 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
|
||||
smp_rmb();
|
||||
while (sq->head != sq->tail) {
|
||||
skb = (struct sk_buff *)sq->skbuff[sq->head];
|
||||
if (skb)
|
||||
dev_kfree_skb_any(skb);
|
||||
if (!skb)
|
||||
goto next;
|
||||
hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
|
||||
/* Check for dummy descriptor used for HW TSO offload on 88xx */
|
||||
if (hdr->dont_send) {
|
||||
/* Get actual TSO descriptors and unmap them */
|
||||
tso_sqe =
|
||||
(struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
|
||||
nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2,
|
||||
tso_sqe->subdesc_cnt);
|
||||
} else {
|
||||
nicvf_unmap_sndq_buffers(nic, sq, sq->head,
|
||||
hdr->subdesc_cnt);
|
||||
}
|
||||
dev_kfree_skb_any(skb);
|
||||
next:
|
||||
sq->head++;
|
||||
sq->head &= (sq->dmem.q_len - 1);
|
||||
}
|
||||
@ -559,9 +622,11 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
|
||||
nicvf_send_msg_to_pf(nic, &mbx);
|
||||
|
||||
if (!nic->sqs_mode && (qidx == 0)) {
|
||||
/* Enable checking L3/L4 length and TCP/UDP checksums */
|
||||
/* Enable checking L3/L4 length and TCP/UDP checksums
|
||||
* Also allow IPv6 pkts with zero UDP checksum.
|
||||
*/
|
||||
nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0,
|
||||
(BIT(24) | BIT(23) | BIT(21)));
|
||||
(BIT(24) | BIT(23) | BIT(21) | BIT(20)));
|
||||
nicvf_config_vlan_stripping(nic, nic->netdev->features);
|
||||
}
|
||||
|
||||
@ -882,6 +947,14 @@ static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
|
||||
return qentry;
|
||||
}
|
||||
|
||||
/* Rollback to previous tail pointer when descriptors not used */
|
||||
static inline void nicvf_rollback_sq_desc(struct snd_queue *sq,
|
||||
int qentry, int desc_cnt)
|
||||
{
|
||||
sq->tail = qentry;
|
||||
atomic_add(desc_cnt, &sq->free_cnt);
|
||||
}
|
||||
|
||||
/* Free descriptor back to SQ for future use */
|
||||
void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
|
||||
{
|
||||
@ -1207,8 +1280,9 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
|
||||
struct sk_buff *skb, u8 sq_num)
|
||||
{
|
||||
int i, size;
|
||||
int subdesc_cnt, tso_sqe = 0;
|
||||
int subdesc_cnt, hdr_sqe = 0;
|
||||
int qentry;
|
||||
u64 dma_addr;
|
||||
|
||||
subdesc_cnt = nicvf_sq_subdesc_required(nic, skb);
|
||||
if (subdesc_cnt > atomic_read(&sq->free_cnt))
|
||||
@ -1223,12 +1297,21 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
|
||||
/* Add SQ header subdesc */
|
||||
nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1,
|
||||
skb, skb->len);
|
||||
tso_sqe = qentry;
|
||||
hdr_sqe = qentry;
|
||||
|
||||
/* Add SQ gather subdescs */
|
||||
qentry = nicvf_get_nxt_sqentry(sq, qentry);
|
||||
size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
|
||||
nicvf_sq_add_gather_subdesc(sq, qentry, size, virt_to_phys(skb->data));
|
||||
/* HW will ensure data coherency, CPU sync not required */
|
||||
dma_addr = dma_map_page_attrs(&nic->pdev->dev, virt_to_page(skb->data),
|
||||
offset_in_page(skb->data), size,
|
||||
DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
|
||||
if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
|
||||
nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt);
|
||||
return 0;
|
||||
}
|
||||
|
||||
nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr);
|
||||
|
||||
/* Check for scattered buffer */
|
||||
if (!skb_is_nonlinear(skb))
|
||||
@ -1241,15 +1324,26 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
|
||||
|
||||
qentry = nicvf_get_nxt_sqentry(sq, qentry);
|
||||
size = skb_frag_size(frag);
|
||||
nicvf_sq_add_gather_subdesc(sq, qentry, size,
|
||||
virt_to_phys(
|
||||
skb_frag_address(frag)));
|
||||
dma_addr = dma_map_page_attrs(&nic->pdev->dev,
|
||||
skb_frag_page(frag),
|
||||
frag->page_offset, size,
|
||||
DMA_TO_DEVICE,
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
if (dma_mapping_error(&nic->pdev->dev, dma_addr)) {
|
||||
/* Free entire chain of mapped buffers
|
||||
* here 'i' = frags mapped + above mapped skb->data
|
||||
*/
|
||||
nicvf_unmap_sndq_buffers(nic, sq, hdr_sqe, i);
|
||||
nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt);
|
||||
return 0;
|
||||
}
|
||||
nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr);
|
||||
}
|
||||
|
||||
doorbell:
|
||||
if (nic->t88 && skb_shinfo(skb)->gso_size) {
|
||||
qentry = nicvf_get_nxt_sqentry(sq, qentry);
|
||||
nicvf_sq_add_cqe_subdesc(sq, qentry, tso_sqe, skb);
|
||||
nicvf_sq_add_cqe_subdesc(sq, qentry, hdr_sqe, skb);
|
||||
}
|
||||
|
||||
nicvf_sq_doorbell(nic, skb, sq_num, subdesc_cnt);
|
||||
@ -1282,6 +1376,7 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
|
||||
int offset;
|
||||
u16 *rb_lens = NULL;
|
||||
u64 *rb_ptrs = NULL;
|
||||
u64 phys_addr;
|
||||
|
||||
rb_lens = (void *)cqe_rx + (3 * sizeof(u64));
|
||||
/* Except 88xx pass1 on all other chips CQE_RX2_S is added to
|
||||
@ -1296,15 +1391,23 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
|
||||
else
|
||||
rb_ptrs = (void *)cqe_rx + (7 * sizeof(u64));
|
||||
|
||||
netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n",
|
||||
__func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz);
|
||||
|
||||
for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
|
||||
payload_len = rb_lens[frag_num(frag)];
|
||||
phys_addr = nicvf_iova_to_phys(nic, *rb_ptrs);
|
||||
if (!phys_addr) {
|
||||
if (skb)
|
||||
dev_kfree_skb_any(skb);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (!frag) {
|
||||
/* First fragment */
|
||||
dma_unmap_page_attrs(&nic->pdev->dev,
|
||||
*rb_ptrs - cqe_rx->align_pad,
|
||||
RCV_FRAG_LEN, DMA_FROM_DEVICE,
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
skb = nicvf_rb_ptr_to_skb(nic,
|
||||
*rb_ptrs - cqe_rx->align_pad,
|
||||
phys_addr - cqe_rx->align_pad,
|
||||
payload_len);
|
||||
if (!skb)
|
||||
return NULL;
|
||||
@ -1312,8 +1415,11 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
|
||||
skb_put(skb, payload_len);
|
||||
} else {
|
||||
/* Add fragments */
|
||||
page = virt_to_page(phys_to_virt(*rb_ptrs));
|
||||
offset = phys_to_virt(*rb_ptrs) - page_address(page);
|
||||
dma_unmap_page_attrs(&nic->pdev->dev, *rb_ptrs,
|
||||
RCV_FRAG_LEN, DMA_FROM_DEVICE,
|
||||
DMA_ATTR_SKIP_CPU_SYNC);
|
||||
page = virt_to_page(phys_to_virt(phys_addr));
|
||||
offset = phys_to_virt(phys_addr) - page_address(page);
|
||||
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
|
||||
offset, payload_len, RCV_FRAG_LEN);
|
||||
}
|
||||
|
@ -87,7 +87,7 @@
|
||||
#define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13))
|
||||
#define MAX_RCV_BUF_COUNT (1ULL << (RBDR_SIZE6 + 13))
|
||||
#define RBDR_THRESH (RCV_BUF_COUNT / 2)
|
||||
#define DMA_BUFFER_LEN 2048 /* In multiples of 128bytes */
|
||||
#define DMA_BUFFER_LEN 1536 /* In multiples of 128bytes */
|
||||
#define RCV_FRAG_LEN (SKB_DATA_ALIGN(DMA_BUFFER_LEN + NET_SKB_PAD) + \
|
||||
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
|
||||
|
||||
@ -301,6 +301,8 @@ struct queue_set {
|
||||
|
||||
#define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT)
|
||||
|
||||
void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq,
|
||||
int hdr_sqe, u8 subdesc_cnt);
|
||||
void nicvf_config_vlan_stripping(struct nicvf *nic,
|
||||
netdev_features_t features);
|
||||
int nicvf_set_qset_resources(struct nicvf *nic);
|
||||
|
@ -123,14 +123,44 @@ static int bgx_poll_reg(struct bgx *bgx, u8 lmac, u64 reg, u64 mask, bool zero)
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int max_bgx_per_node;
|
||||
static void set_max_bgx_per_node(struct pci_dev *pdev)
|
||||
{
|
||||
u16 sdevid;
|
||||
|
||||
if (max_bgx_per_node)
|
||||
return;
|
||||
|
||||
pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid);
|
||||
switch (sdevid) {
|
||||
case PCI_SUBSYS_DEVID_81XX_BGX:
|
||||
max_bgx_per_node = MAX_BGX_PER_CN81XX;
|
||||
break;
|
||||
case PCI_SUBSYS_DEVID_83XX_BGX:
|
||||
max_bgx_per_node = MAX_BGX_PER_CN83XX;
|
||||
break;
|
||||
case PCI_SUBSYS_DEVID_88XX_BGX:
|
||||
default:
|
||||
max_bgx_per_node = MAX_BGX_PER_CN88XX;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static struct bgx *get_bgx(int node, int bgx_idx)
|
||||
{
|
||||
int idx = (node * max_bgx_per_node) + bgx_idx;
|
||||
|
||||
return bgx_vnic[idx];
|
||||
}
|
||||
|
||||
/* Return number of BGX present in HW */
|
||||
unsigned bgx_get_map(int node)
|
||||
{
|
||||
int i;
|
||||
unsigned map = 0;
|
||||
|
||||
for (i = 0; i < MAX_BGX_PER_NODE; i++) {
|
||||
if (bgx_vnic[(node * MAX_BGX_PER_NODE) + i])
|
||||
for (i = 0; i < max_bgx_per_node; i++) {
|
||||
if (bgx_vnic[(node * max_bgx_per_node) + i])
|
||||
map |= (1 << i);
|
||||
}
|
||||
|
||||
@ -143,7 +173,7 @@ int bgx_get_lmac_count(int node, int bgx_idx)
|
||||
{
|
||||
struct bgx *bgx;
|
||||
|
||||
bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
|
||||
bgx = get_bgx(node, bgx_idx);
|
||||
if (bgx)
|
||||
return bgx->lmac_count;
|
||||
|
||||
@ -158,7 +188,7 @@ void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
|
||||
struct bgx *bgx;
|
||||
struct lmac *lmac;
|
||||
|
||||
bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
|
||||
bgx = get_bgx(node, bgx_idx);
|
||||
if (!bgx)
|
||||
return;
|
||||
|
||||
@ -172,7 +202,7 @@ EXPORT_SYMBOL(bgx_get_lmac_link_state);
|
||||
|
||||
const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
|
||||
{
|
||||
struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
|
||||
struct bgx *bgx = get_bgx(node, bgx_idx);
|
||||
|
||||
if (bgx)
|
||||
return bgx->lmac[lmacid].mac;
|
||||
@ -183,7 +213,7 @@ EXPORT_SYMBOL(bgx_get_lmac_mac);
|
||||
|
||||
void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
|
||||
{
|
||||
struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
|
||||
struct bgx *bgx = get_bgx(node, bgx_idx);
|
||||
|
||||
if (!bgx)
|
||||
return;
|
||||
@ -194,7 +224,7 @@ EXPORT_SYMBOL(bgx_set_lmac_mac);
|
||||
|
||||
void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
|
||||
{
|
||||
struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
|
||||
struct bgx *bgx = get_bgx(node, bgx_idx);
|
||||
struct lmac *lmac;
|
||||
u64 cfg;
|
||||
|
||||
@ -217,7 +247,7 @@ EXPORT_SYMBOL(bgx_lmac_rx_tx_enable);
|
||||
void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause)
|
||||
{
|
||||
struct pfc *pfc = (struct pfc *)pause;
|
||||
struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
|
||||
struct bgx *bgx = get_bgx(node, bgx_idx);
|
||||
struct lmac *lmac;
|
||||
u64 cfg;
|
||||
|
||||
@ -237,7 +267,7 @@ EXPORT_SYMBOL(bgx_lmac_get_pfc);
|
||||
void bgx_lmac_set_pfc(int node, int bgx_idx, int lmacid, void *pause)
|
||||
{
|
||||
struct pfc *pfc = (struct pfc *)pause;
|
||||
struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
|
||||
struct bgx *bgx = get_bgx(node, bgx_idx);
|
||||
struct lmac *lmac;
|
||||
u64 cfg;
|
||||
|
||||
@ -369,7 +399,7 @@ u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx)
|
||||
{
|
||||
struct bgx *bgx;
|
||||
|
||||
bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
|
||||
bgx = get_bgx(node, bgx_idx);
|
||||
if (!bgx)
|
||||
return 0;
|
||||
|
||||
@ -383,7 +413,7 @@ u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
|
||||
{
|
||||
struct bgx *bgx;
|
||||
|
||||
bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
|
||||
bgx = get_bgx(node, bgx_idx);
|
||||
if (!bgx)
|
||||
return 0;
|
||||
|
||||
@ -411,7 +441,7 @@ void bgx_lmac_internal_loopback(int node, int bgx_idx,
|
||||
struct lmac *lmac;
|
||||
u64 cfg;
|
||||
|
||||
bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
|
||||
bgx = get_bgx(node, bgx_idx);
|
||||
if (!bgx)
|
||||
return;
|
||||
|
||||
@ -1011,12 +1041,6 @@ static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid)
|
||||
dev_info(dev, "%s: 40G_KR4\n", (char *)str);
|
||||
break;
|
||||
case BGX_MODE_QSGMII:
|
||||
if ((lmacid == 0) &&
|
||||
(bgx_get_lane2sds_cfg(bgx, lmac) != lmacid))
|
||||
return;
|
||||
if ((lmacid == 2) &&
|
||||
(bgx_get_lane2sds_cfg(bgx, lmac) == lmacid))
|
||||
return;
|
||||
dev_info(dev, "%s: QSGMII\n", (char *)str);
|
||||
break;
|
||||
case BGX_MODE_RGMII:
|
||||
@ -1334,11 +1358,13 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
goto err_release_regions;
|
||||
}
|
||||
|
||||
set_max_bgx_per_node(pdev);
|
||||
|
||||
pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid);
|
||||
if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) {
|
||||
bgx->bgx_id = (pci_resource_start(pdev,
|
||||
PCI_CFG_REG_BAR_NUM) >> 24) & BGX_ID_MASK;
|
||||
bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_NODE;
|
||||
bgx->bgx_id += nic_get_node_id(pdev) * max_bgx_per_node;
|
||||
bgx->max_lmac = MAX_LMAC_PER_BGX;
|
||||
bgx_vnic[bgx->bgx_id] = bgx;
|
||||
} else {
|
||||
|
@ -22,7 +22,6 @@
|
||||
#define MAX_BGX_PER_CN88XX 2
|
||||
#define MAX_BGX_PER_CN81XX 3 /* 2 BGXs + 1 RGX */
|
||||
#define MAX_BGX_PER_CN83XX 4
|
||||
#define MAX_BGX_PER_NODE 4
|
||||
#define MAX_LMAC_PER_BGX 4
|
||||
#define MAX_BGX_CHANS_PER_LMAC 16
|
||||
#define MAX_DMAC_PER_LMAC 8
|
||||
|
@ -2589,8 +2589,6 @@ static int emac_dt_mdio_probe(struct emac_instance *dev)
|
||||
static int emac_dt_phy_connect(struct emac_instance *dev,
|
||||
struct device_node *phy_handle)
|
||||
{
|
||||
int res;
|
||||
|
||||
dev->phy.def = devm_kzalloc(&dev->ofdev->dev, sizeof(*dev->phy.def),
|
||||
GFP_KERNEL);
|
||||
if (!dev->phy.def)
|
||||
@ -2617,7 +2615,7 @@ static int emac_dt_phy_probe(struct emac_instance *dev)
|
||||
{
|
||||
struct device_node *np = dev->ofdev->dev.of_node;
|
||||
struct device_node *phy_handle;
|
||||
int res = 0;
|
||||
int res = 1;
|
||||
|
||||
phy_handle = of_parse_phandle(np, "phy-handle", 0);
|
||||
|
||||
@ -2714,13 +2712,24 @@ static int emac_init_phy(struct emac_instance *dev)
|
||||
if (emac_has_feature(dev, EMAC_FTR_HAS_RGMII)) {
|
||||
int res = emac_dt_phy_probe(dev);
|
||||
|
||||
mutex_unlock(&emac_phy_map_lock);
|
||||
if (!res)
|
||||
switch (res) {
|
||||
case 1:
|
||||
/* No phy-handle property configured.
|
||||
* Continue with the existing phy probe
|
||||
* and setup code.
|
||||
*/
|
||||
break;
|
||||
|
||||
case 0:
|
||||
mutex_unlock(&emac_phy_map_lock);
|
||||
goto init_phy;
|
||||
|
||||
dev_err(&dev->ofdev->dev, "failed to attach dt phy (%d).\n",
|
||||
res);
|
||||
return res;
|
||||
default:
|
||||
mutex_unlock(&emac_phy_map_lock);
|
||||
dev_err(&dev->ofdev->dev, "failed to attach dt phy (%d).\n",
|
||||
res);
|
||||
return res;
|
||||
}
|
||||
}
|
||||
|
||||
if (dev->phy_address != 0xffffffff)
|
||||
|
@ -404,7 +404,7 @@ static int ibmvnic_open(struct net_device *netdev)
|
||||
send_map_query(adapter);
|
||||
for (i = 0; i < rxadd_subcrqs; i++) {
|
||||
init_rx_pool(adapter, &adapter->rx_pool[i],
|
||||
IBMVNIC_BUFFS_PER_POOL, i,
|
||||
adapter->req_rx_add_entries_per_subcrq, i,
|
||||
be64_to_cpu(size_array[i]), 1);
|
||||
if (alloc_rx_pool(adapter, &adapter->rx_pool[i])) {
|
||||
dev_err(dev, "Couldn't alloc rx pool\n");
|
||||
@ -419,23 +419,23 @@ static int ibmvnic_open(struct net_device *netdev)
|
||||
for (i = 0; i < tx_subcrqs; i++) {
|
||||
tx_pool = &adapter->tx_pool[i];
|
||||
tx_pool->tx_buff =
|
||||
kcalloc(adapter->max_tx_entries_per_subcrq,
|
||||
kcalloc(adapter->req_tx_entries_per_subcrq,
|
||||
sizeof(struct ibmvnic_tx_buff), GFP_KERNEL);
|
||||
if (!tx_pool->tx_buff)
|
||||
goto tx_pool_alloc_failed;
|
||||
|
||||
if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
|
||||
adapter->max_tx_entries_per_subcrq *
|
||||
adapter->req_tx_entries_per_subcrq *
|
||||
adapter->req_mtu))
|
||||
goto tx_ltb_alloc_failed;
|
||||
|
||||
tx_pool->free_map =
|
||||
kcalloc(adapter->max_tx_entries_per_subcrq,
|
||||
kcalloc(adapter->req_tx_entries_per_subcrq,
|
||||
sizeof(int), GFP_KERNEL);
|
||||
if (!tx_pool->free_map)
|
||||
goto tx_fm_alloc_failed;
|
||||
|
||||
for (j = 0; j < adapter->max_tx_entries_per_subcrq; j++)
|
||||
for (j = 0; j < adapter->req_tx_entries_per_subcrq; j++)
|
||||
tx_pool->free_map[j] = j;
|
||||
|
||||
tx_pool->consumer_index = 0;
|
||||
@ -705,6 +705,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
|
||||
struct device *dev = &adapter->vdev->dev;
|
||||
struct ibmvnic_tx_buff *tx_buff = NULL;
|
||||
struct ibmvnic_sub_crq_queue *tx_scrq;
|
||||
struct ibmvnic_tx_pool *tx_pool;
|
||||
unsigned int tx_send_failed = 0;
|
||||
unsigned int tx_map_failed = 0;
|
||||
@ -724,6 +725,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
int ret = 0;
|
||||
|
||||
tx_pool = &adapter->tx_pool[queue_num];
|
||||
tx_scrq = adapter->tx_scrq[queue_num];
|
||||
txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
|
||||
handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
|
||||
be32_to_cpu(adapter->login_rsp_buf->
|
||||
@ -744,7 +746,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
|
||||
tx_pool->consumer_index =
|
||||
(tx_pool->consumer_index + 1) %
|
||||
adapter->max_tx_entries_per_subcrq;
|
||||
adapter->req_tx_entries_per_subcrq;
|
||||
|
||||
tx_buff = &tx_pool->tx_buff[index];
|
||||
tx_buff->skb = skb;
|
||||
@ -817,7 +819,7 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
|
||||
if (tx_pool->consumer_index == 0)
|
||||
tx_pool->consumer_index =
|
||||
adapter->max_tx_entries_per_subcrq - 1;
|
||||
adapter->req_tx_entries_per_subcrq - 1;
|
||||
else
|
||||
tx_pool->consumer_index--;
|
||||
|
||||
@ -826,6 +828,14 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
|
||||
ret = NETDEV_TX_BUSY;
|
||||
goto out;
|
||||
}
|
||||
|
||||
atomic_inc(&tx_scrq->used);
|
||||
|
||||
if (atomic_read(&tx_scrq->used) >= adapter->req_tx_entries_per_subcrq) {
|
||||
netdev_info(netdev, "Stopping queue %d\n", queue_num);
|
||||
netif_stop_subqueue(netdev, queue_num);
|
||||
}
|
||||
|
||||
tx_packets++;
|
||||
tx_bytes += skb->len;
|
||||
txq->trans_start = jiffies;
|
||||
@ -1213,6 +1223,7 @@ static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
|
||||
scrq->adapter = adapter;
|
||||
scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
|
||||
scrq->cur = 0;
|
||||
atomic_set(&scrq->used, 0);
|
||||
scrq->rx_skb_top = NULL;
|
||||
spin_lock_init(&scrq->lock);
|
||||
|
||||
@ -1355,14 +1366,28 @@ restart_loop:
|
||||
DMA_TO_DEVICE);
|
||||
}
|
||||
|
||||
if (txbuff->last_frag)
|
||||
if (txbuff->last_frag) {
|
||||
atomic_dec(&scrq->used);
|
||||
|
||||
if (atomic_read(&scrq->used) <=
|
||||
(adapter->req_tx_entries_per_subcrq / 2) &&
|
||||
netif_subqueue_stopped(adapter->netdev,
|
||||
txbuff->skb)) {
|
||||
netif_wake_subqueue(adapter->netdev,
|
||||
scrq->pool_index);
|
||||
netdev_dbg(adapter->netdev,
|
||||
"Started queue %d\n",
|
||||
scrq->pool_index);
|
||||
}
|
||||
|
||||
dev_kfree_skb_any(txbuff->skb);
|
||||
}
|
||||
|
||||
adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
|
||||
producer_index] = index;
|
||||
adapter->tx_pool[pool].producer_index =
|
||||
(adapter->tx_pool[pool].producer_index + 1) %
|
||||
adapter->max_tx_entries_per_subcrq;
|
||||
adapter->req_tx_entries_per_subcrq;
|
||||
}
|
||||
/* remove tx_comp scrq*/
|
||||
next->tx_comp.first = 0;
|
||||
|
@ -863,6 +863,7 @@ struct ibmvnic_sub_crq_queue {
|
||||
spinlock_t lock;
|
||||
struct sk_buff *rx_skb_top;
|
||||
struct ibmvnic_adapter *adapter;
|
||||
atomic_t used;
|
||||
};
|
||||
|
||||
struct ibmvnic_long_term_buff {
|
||||
|
@ -14,6 +14,7 @@ config MLX5_CORE
|
||||
config MLX5_CORE_EN
|
||||
bool "Mellanox Technologies ConnectX-4 Ethernet support"
|
||||
depends on NETDEVICES && ETHERNET && PCI && MLX5_CORE
|
||||
depends on IPV6=y || IPV6=n || MLX5_CORE=m
|
||||
imply PTP_1588_CLOCK
|
||||
default n
|
||||
---help---
|
||||
|
@ -302,6 +302,9 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
|
||||
struct mlx5e_priv *priv = netdev_priv(dev);
|
||||
struct mlx5e_dcbx *dcbx = &priv->dcbx;
|
||||
|
||||
if (mode & DCB_CAP_DCBX_LLD_MANAGED)
|
||||
return 1;
|
||||
|
||||
if ((!mode) && MLX5_CAP_GEN(priv->mdev, dcbx)) {
|
||||
if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_AUTO)
|
||||
return 0;
|
||||
@ -315,13 +318,10 @@ static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev)))
|
||||
if (!(mode & DCB_CAP_DCBX_HOST))
|
||||
return 1;
|
||||
|
||||
if ((mode & DCB_CAP_DCBX_LLD_MANAGED) ||
|
||||
!(mode & DCB_CAP_DCBX_VER_CEE) ||
|
||||
!(mode & DCB_CAP_DCBX_VER_IEEE) ||
|
||||
!(mode & DCB_CAP_DCBX_HOST))
|
||||
if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev)))
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
|
@ -204,9 +204,6 @@ mlx5e_test_loopback_validate(struct sk_buff *skb,
|
||||
struct iphdr *iph;
|
||||
|
||||
/* We are only going to peek, no need to clone the SKB */
|
||||
if (skb->protocol != htons(ETH_P_IP))
|
||||
goto out;
|
||||
|
||||
if (MLX5E_TEST_PKT_SIZE - ETH_HLEN > skb_headlen(skb))
|
||||
goto out;
|
||||
|
||||
@ -249,7 +246,7 @@ static int mlx5e_test_loopback_setup(struct mlx5e_priv *priv,
|
||||
lbtp->loopback_ok = false;
|
||||
init_completion(&lbtp->comp);
|
||||
|
||||
lbtp->pt.type = htons(ETH_P_ALL);
|
||||
lbtp->pt.type = htons(ETH_P_IP);
|
||||
lbtp->pt.func = mlx5e_test_loopback_validate;
|
||||
lbtp->pt.dev = priv->netdev;
|
||||
lbtp->pt.af_packet_priv = lbtp;
|
||||
|
@ -48,9 +48,14 @@
|
||||
#include "eswitch.h"
|
||||
#include "vxlan.h"
|
||||
|
||||
enum {
|
||||
MLX5E_TC_FLOW_ESWITCH = BIT(0),
|
||||
};
|
||||
|
||||
struct mlx5e_tc_flow {
|
||||
struct rhash_head node;
|
||||
u64 cookie;
|
||||
u8 flags;
|
||||
struct mlx5_flow_handle *rule;
|
||||
struct list_head encap; /* flows sharing the same encap */
|
||||
struct mlx5_esw_flow_attr *attr;
|
||||
@ -177,7 +182,7 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
|
||||
mlx5_fc_destroy(priv->mdev, counter);
|
||||
}
|
||||
|
||||
if (esw && esw->mode == SRIOV_OFFLOADS) {
|
||||
if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
|
||||
mlx5_eswitch_del_vlan_action(esw, flow->attr);
|
||||
if (flow->attr->action & MLX5_FLOW_CONTEXT_ACTION_ENCAP)
|
||||
mlx5e_detach_encap(priv, flow);
|
||||
@ -598,6 +603,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
|
||||
}
|
||||
|
||||
static int parse_cls_flower(struct mlx5e_priv *priv,
|
||||
struct mlx5e_tc_flow *flow,
|
||||
struct mlx5_flow_spec *spec,
|
||||
struct tc_cls_flower_offload *f)
|
||||
{
|
||||
@ -609,7 +615,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv,
|
||||
|
||||
err = __parse_cls_flower(priv, spec, f, &min_inline);
|
||||
|
||||
if (!err && esw->mode == SRIOV_OFFLOADS &&
|
||||
if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH) &&
|
||||
rep->vport != FDB_UPLINK_VPORT) {
|
||||
if (min_inline > esw->offloads.inline_mode) {
|
||||
netdev_warn(priv->netdev,
|
||||
@ -1132,23 +1138,19 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
|
||||
struct tc_cls_flower_offload *f)
|
||||
{
|
||||
struct mlx5e_tc_table *tc = &priv->fs.tc;
|
||||
int err = 0;
|
||||
bool fdb_flow = false;
|
||||
int err, attr_size = 0;
|
||||
u32 flow_tag, action;
|
||||
struct mlx5e_tc_flow *flow;
|
||||
struct mlx5_flow_spec *spec;
|
||||
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
|
||||
u8 flow_flags = 0;
|
||||
|
||||
if (esw && esw->mode == SRIOV_OFFLOADS)
|
||||
fdb_flow = true;
|
||||
|
||||
if (fdb_flow)
|
||||
flow = kzalloc(sizeof(*flow) +
|
||||
sizeof(struct mlx5_esw_flow_attr),
|
||||
GFP_KERNEL);
|
||||
else
|
||||
flow = kzalloc(sizeof(*flow), GFP_KERNEL);
|
||||
if (esw && esw->mode == SRIOV_OFFLOADS) {
|
||||
flow_flags = MLX5E_TC_FLOW_ESWITCH;
|
||||
attr_size = sizeof(struct mlx5_esw_flow_attr);
|
||||
}
|
||||
|
||||
flow = kzalloc(sizeof(*flow) + attr_size, GFP_KERNEL);
|
||||
spec = mlx5_vzalloc(sizeof(*spec));
|
||||
if (!spec || !flow) {
|
||||
err = -ENOMEM;
|
||||
@ -1156,12 +1158,13 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
|
||||
}
|
||||
|
||||
flow->cookie = f->cookie;
|
||||
flow->flags = flow_flags;
|
||||
|
||||
err = parse_cls_flower(priv, spec, f);
|
||||
err = parse_cls_flower(priv, flow, spec, f);
|
||||
if (err < 0)
|
||||
goto err_free;
|
||||
|
||||
if (fdb_flow) {
|
||||
if (flow->flags & MLX5E_TC_FLOW_ESWITCH) {
|
||||
flow->attr = (struct mlx5_esw_flow_attr *)(flow + 1);
|
||||
err = parse_tc_fdb_actions(priv, f->exts, flow);
|
||||
if (err < 0)
|
||||
|
@ -1136,7 +1136,7 @@ static struct mlx5_flow_group *create_autogroup(struct mlx5_flow_table *ft,
|
||||
u32 *match_criteria)
|
||||
{
|
||||
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
|
||||
struct list_head *prev = ft->node.children.prev;
|
||||
struct list_head *prev = &ft->node.children;
|
||||
unsigned int candidate_index = 0;
|
||||
struct mlx5_flow_group *fg;
|
||||
void *match_criteria_addr;
|
||||
|
@ -1352,6 +1352,7 @@ static int init_one(struct pci_dev *pdev,
|
||||
if (err)
|
||||
goto clean_load;
|
||||
|
||||
pci_save_state(pdev);
|
||||
return 0;
|
||||
|
||||
clean_load:
|
||||
@ -1407,9 +1408,8 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
|
||||
|
||||
mlx5_enter_error_state(dev);
|
||||
mlx5_unload_one(dev, priv, false);
|
||||
/* In case of kernel call save the pci state and drain the health wq */
|
||||
/* In case of kernel call drain the health wq */
|
||||
if (state) {
|
||||
pci_save_state(pdev);
|
||||
mlx5_drain_health_wq(dev);
|
||||
mlx5_pci_disable_device(dev);
|
||||
}
|
||||
@ -1461,6 +1461,7 @@ static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
|
||||
|
||||
pci_set_master(pdev);
|
||||
pci_restore_state(pdev);
|
||||
pci_save_state(pdev);
|
||||
|
||||
if (wait_vital(pdev)) {
|
||||
dev_err(&pdev->dev, "%s: wait_vital timed out\n", __func__);
|
||||
|
@ -769,7 +769,7 @@ static inline void mlxsw_reg_spvid_pack(char *payload, u8 local_port, u16 pvid)
|
||||
#define MLXSW_REG_SPVM_ID 0x200F
|
||||
#define MLXSW_REG_SPVM_BASE_LEN 0x04 /* base length, without records */
|
||||
#define MLXSW_REG_SPVM_REC_LEN 0x04 /* record length */
|
||||
#define MLXSW_REG_SPVM_REC_MAX_COUNT 256
|
||||
#define MLXSW_REG_SPVM_REC_MAX_COUNT 255
|
||||
#define MLXSW_REG_SPVM_LEN (MLXSW_REG_SPVM_BASE_LEN + \
|
||||
MLXSW_REG_SPVM_REC_LEN * MLXSW_REG_SPVM_REC_MAX_COUNT)
|
||||
|
||||
@ -1702,7 +1702,7 @@ static inline void mlxsw_reg_sfmr_pack(char *payload,
|
||||
#define MLXSW_REG_SPVMLR_ID 0x2020
|
||||
#define MLXSW_REG_SPVMLR_BASE_LEN 0x04 /* base length, without records */
|
||||
#define MLXSW_REG_SPVMLR_REC_LEN 0x04 /* record length */
|
||||
#define MLXSW_REG_SPVMLR_REC_MAX_COUNT 256
|
||||
#define MLXSW_REG_SPVMLR_REC_MAX_COUNT 255
|
||||
#define MLXSW_REG_SPVMLR_LEN (MLXSW_REG_SPVMLR_BASE_LEN + \
|
||||
MLXSW_REG_SPVMLR_REC_LEN * \
|
||||
MLXSW_REG_SPVMLR_REC_MAX_COUNT)
|
||||
|
@ -303,11 +303,11 @@ void mlxsw_sp_flower_destroy(struct mlxsw_sp_port *mlxsw_sp_port, bool ingress,
|
||||
ruleset = mlxsw_sp_acl_ruleset_get(mlxsw_sp, mlxsw_sp_port->dev,
|
||||
ingress,
|
||||
MLXSW_SP_ACL_PROFILE_FLOWER);
|
||||
if (WARN_ON(IS_ERR(ruleset)))
|
||||
if (IS_ERR(ruleset))
|
||||
return;
|
||||
|
||||
rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, f->cookie);
|
||||
if (!WARN_ON(!rule)) {
|
||||
if (rule) {
|
||||
mlxsw_sp_acl_rule_del(mlxsw_sp, rule);
|
||||
mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule);
|
||||
}
|
||||
|
@ -422,8 +422,9 @@ static void qed_cxt_set_proto_cid_count(struct qed_hwfn *p_hwfn,
|
||||
u32 page_sz = p_mgr->clients[ILT_CLI_CDUC].p_size.val;
|
||||
u32 cxt_size = CONN_CXT_SIZE(p_hwfn);
|
||||
u32 elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size;
|
||||
u32 align = elems_per_page * DQ_RANGE_ALIGN;
|
||||
|
||||
p_conn->cid_count = roundup(p_conn->cid_count, elems_per_page);
|
||||
p_conn->cid_count = roundup(p_conn->cid_count, align);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2389,9 +2389,8 @@ qed_chain_alloc_sanity_check(struct qed_dev *cdev,
|
||||
* size/capacity fields are of a u32 type.
|
||||
*/
|
||||
if ((cnt_type == QED_CHAIN_CNT_TYPE_U16 &&
|
||||
chain_size > 0x10000) ||
|
||||
(cnt_type == QED_CHAIN_CNT_TYPE_U32 &&
|
||||
chain_size > 0x100000000ULL)) {
|
||||
chain_size > ((u32)U16_MAX + 1)) ||
|
||||
(cnt_type == QED_CHAIN_CNT_TYPE_U32 && chain_size > U32_MAX)) {
|
||||
DP_NOTICE(cdev,
|
||||
"The actual chain size (0x%llx) is larger than the maximal possible value\n",
|
||||
chain_size);
|
||||
|
@ -190,6 +190,9 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
|
||||
p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring;
|
||||
p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring;
|
||||
p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring;
|
||||
p_init->ooo_enable = p_params->ooo_enable;
|
||||
p_init->ll2_rx_queue_id = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] +
|
||||
p_params->ll2_ooo_queue_id;
|
||||
p_init->func_params.log_page_size = p_params->log_page_size;
|
||||
val = p_params->num_tasks;
|
||||
p_init->func_params.num_tasks = cpu_to_le16(val);
|
||||
@ -786,6 +789,23 @@ static void qed_iscsi_release_connection(struct qed_hwfn *p_hwfn,
|
||||
spin_unlock_bh(&p_hwfn->p_iscsi_info->lock);
|
||||
}
|
||||
|
||||
void qed_iscsi_free_connection(struct qed_hwfn *p_hwfn,
|
||||
struct qed_iscsi_conn *p_conn)
|
||||
{
|
||||
qed_chain_free(p_hwfn->cdev, &p_conn->xhq);
|
||||
qed_chain_free(p_hwfn->cdev, &p_conn->uhq);
|
||||
qed_chain_free(p_hwfn->cdev, &p_conn->r2tq);
|
||||
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
|
||||
sizeof(struct tcp_upload_params),
|
||||
p_conn->tcp_upload_params_virt_addr,
|
||||
p_conn->tcp_upload_params_phys_addr);
|
||||
dma_free_coherent(&p_hwfn->cdev->pdev->dev,
|
||||
sizeof(struct scsi_terminate_extra_params),
|
||||
p_conn->queue_cnts_virt_addr,
|
||||
p_conn->queue_cnts_phys_addr);
|
||||
kfree(p_conn);
|
||||
}
|
||||
|
||||
struct qed_iscsi_info *qed_iscsi_alloc(struct qed_hwfn *p_hwfn)
|
||||
{
|
||||
struct qed_iscsi_info *p_iscsi_info;
|
||||
@ -807,6 +827,17 @@ void qed_iscsi_setup(struct qed_hwfn *p_hwfn,
|
||||
void qed_iscsi_free(struct qed_hwfn *p_hwfn,
|
||||
struct qed_iscsi_info *p_iscsi_info)
|
||||
{
|
||||
struct qed_iscsi_conn *p_conn = NULL;
|
||||
|
||||
while (!list_empty(&p_hwfn->p_iscsi_info->free_list)) {
|
||||
p_conn = list_first_entry(&p_hwfn->p_iscsi_info->free_list,
|
||||
struct qed_iscsi_conn, list_entry);
|
||||
if (p_conn) {
|
||||
list_del(&p_conn->list_entry);
|
||||
qed_iscsi_free_connection(p_hwfn, p_conn);
|
||||
}
|
||||
}
|
||||
|
||||
kfree(p_iscsi_info);
|
||||
}
|
||||
|
||||
|
@ -211,6 +211,8 @@ static void qed_ll2b_complete_rx_packet(struct qed_hwfn *p_hwfn,
|
||||
/* If need to reuse or there's no replacement buffer, repost this */
|
||||
if (rc)
|
||||
goto out_post;
|
||||
dma_unmap_single(&cdev->pdev->dev, buffer->phys_addr,
|
||||
cdev->ll2->rx_size, DMA_FROM_DEVICE);
|
||||
|
||||
skb = build_skb(buffer->data, 0);
|
||||
if (!skb) {
|
||||
@ -474,7 +476,7 @@ qed_ll2_rxq_completion_gsi(struct qed_hwfn *p_hwfn,
|
||||
static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
|
||||
struct qed_ll2_info *p_ll2_conn,
|
||||
union core_rx_cqe_union *p_cqe,
|
||||
unsigned long lock_flags,
|
||||
unsigned long *p_lock_flags,
|
||||
bool b_last_cqe)
|
||||
{
|
||||
struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
|
||||
@ -495,10 +497,10 @@ static int qed_ll2_rxq_completion_reg(struct qed_hwfn *p_hwfn,
|
||||
"Mismatch between active_descq and the LL2 Rx chain\n");
|
||||
list_add_tail(&p_pkt->list_entry, &p_rx->free_descq);
|
||||
|
||||
spin_unlock_irqrestore(&p_rx->lock, lock_flags);
|
||||
spin_unlock_irqrestore(&p_rx->lock, *p_lock_flags);
|
||||
qed_ll2b_complete_rx_packet(p_hwfn, p_ll2_conn->my_id,
|
||||
p_pkt, &p_cqe->rx_cqe_fp, b_last_cqe);
|
||||
spin_lock_irqsave(&p_rx->lock, lock_flags);
|
||||
spin_lock_irqsave(&p_rx->lock, *p_lock_flags);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -538,7 +540,8 @@ static int qed_ll2_rxq_completion(struct qed_hwfn *p_hwfn, void *cookie)
|
||||
break;
|
||||
case CORE_RX_CQE_TYPE_REGULAR:
|
||||
rc = qed_ll2_rxq_completion_reg(p_hwfn, p_ll2_conn,
|
||||
cqe, flags, b_last_cqe);
|
||||
cqe, &flags,
|
||||
b_last_cqe);
|
||||
break;
|
||||
default:
|
||||
rc = -EIO;
|
||||
@ -968,7 +971,7 @@ static int qed_ll2_start_ooo(struct qed_dev *cdev,
|
||||
{
|
||||
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
|
||||
u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
|
||||
struct qed_ll2_conn ll2_info;
|
||||
struct qed_ll2_conn ll2_info = { 0 };
|
||||
int rc;
|
||||
|
||||
ll2_info.conn_type = QED_LL2_TYPE_ISCSI_OOO;
|
||||
|
@ -159,6 +159,8 @@ struct qed_ooo_info *qed_ooo_alloc(struct qed_hwfn *p_hwfn)
|
||||
if (!p_ooo_info->ooo_history.p_cqes)
|
||||
goto no_history_mem;
|
||||
|
||||
p_ooo_info->ooo_history.num_of_cqes = QED_MAX_NUM_OOO_HISTORY_ENTRIES;
|
||||
|
||||
return p_ooo_info;
|
||||
|
||||
no_history_mem:
|
||||
|
@ -1535,32 +1535,33 @@ static int smc_close(struct net_device *dev)
|
||||
* Ethtool support
|
||||
*/
|
||||
static int
|
||||
smc_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
|
||||
smc_ethtool_get_link_ksettings(struct net_device *dev,
|
||||
struct ethtool_link_ksettings *cmd)
|
||||
{
|
||||
struct smc_local *lp = netdev_priv(dev);
|
||||
int ret;
|
||||
|
||||
cmd->maxtxpkt = 1;
|
||||
cmd->maxrxpkt = 1;
|
||||
|
||||
if (lp->phy_type != 0) {
|
||||
spin_lock_irq(&lp->lock);
|
||||
ret = mii_ethtool_gset(&lp->mii, cmd);
|
||||
ret = mii_ethtool_get_link_ksettings(&lp->mii, cmd);
|
||||
spin_unlock_irq(&lp->lock);
|
||||
} else {
|
||||
cmd->supported = SUPPORTED_10baseT_Half |
|
||||
u32 supported = SUPPORTED_10baseT_Half |
|
||||
SUPPORTED_10baseT_Full |
|
||||
SUPPORTED_TP | SUPPORTED_AUI;
|
||||
|
||||
if (lp->ctl_rspeed == 10)
|
||||
ethtool_cmd_speed_set(cmd, SPEED_10);
|
||||
cmd->base.speed = SPEED_10;
|
||||
else if (lp->ctl_rspeed == 100)
|
||||
ethtool_cmd_speed_set(cmd, SPEED_100);
|
||||
cmd->base.speed = SPEED_100;
|
||||
|
||||
cmd->autoneg = AUTONEG_DISABLE;
|
||||
cmd->transceiver = XCVR_INTERNAL;
|
||||
cmd->port = 0;
|
||||
cmd->duplex = lp->tcr_cur_mode & TCR_SWFDUP ? DUPLEX_FULL : DUPLEX_HALF;
|
||||
cmd->base.autoneg = AUTONEG_DISABLE;
|
||||
cmd->base.port = 0;
|
||||
cmd->base.duplex = lp->tcr_cur_mode & TCR_SWFDUP ?
|
||||
DUPLEX_FULL : DUPLEX_HALF;
|
||||
|
||||
ethtool_convert_legacy_u32_to_link_mode(
|
||||
cmd->link_modes.supported, supported);
|
||||
|
||||
ret = 0;
|
||||
}
|
||||
@ -1569,24 +1570,26 @@ smc_ethtool_getsettings(struct net_device *dev, struct ethtool_cmd *cmd)
|
||||
}
|
||||
|
||||
static int
|
||||
smc_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
|
||||
smc_ethtool_set_link_ksettings(struct net_device *dev,
|
||||
const struct ethtool_link_ksettings *cmd)
|
||||
{
|
||||
struct smc_local *lp = netdev_priv(dev);
|
||||
int ret;
|
||||
|
||||
if (lp->phy_type != 0) {
|
||||
spin_lock_irq(&lp->lock);
|
||||
ret = mii_ethtool_sset(&lp->mii, cmd);
|
||||
ret = mii_ethtool_set_link_ksettings(&lp->mii, cmd);
|
||||
spin_unlock_irq(&lp->lock);
|
||||
} else {
|
||||
if (cmd->autoneg != AUTONEG_DISABLE ||
|
||||
cmd->speed != SPEED_10 ||
|
||||
(cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL) ||
|
||||
(cmd->port != PORT_TP && cmd->port != PORT_AUI))
|
||||
if (cmd->base.autoneg != AUTONEG_DISABLE ||
|
||||
cmd->base.speed != SPEED_10 ||
|
||||
(cmd->base.duplex != DUPLEX_HALF &&
|
||||
cmd->base.duplex != DUPLEX_FULL) ||
|
||||
(cmd->base.port != PORT_TP && cmd->base.port != PORT_AUI))
|
||||
return -EINVAL;
|
||||
|
||||
// lp->port = cmd->port;
|
||||
lp->ctl_rfduplx = cmd->duplex == DUPLEX_FULL;
|
||||
// lp->port = cmd->base.port;
|
||||
lp->ctl_rfduplx = cmd->base.duplex == DUPLEX_FULL;
|
||||
|
||||
// if (netif_running(dev))
|
||||
// smc_set_port(dev);
|
||||
@ -1744,8 +1747,6 @@ static int smc_ethtool_seteeprom(struct net_device *dev,
|
||||
|
||||
|
||||
static const struct ethtool_ops smc_ethtool_ops = {
|
||||
.get_settings = smc_ethtool_getsettings,
|
||||
.set_settings = smc_ethtool_setsettings,
|
||||
.get_drvinfo = smc_ethtool_getdrvinfo,
|
||||
|
||||
.get_msglevel = smc_ethtool_getmsglevel,
|
||||
@ -1755,6 +1756,8 @@ static const struct ethtool_ops smc_ethtool_ops = {
|
||||
.get_eeprom_len = smc_ethtool_geteeprom_len,
|
||||
.get_eeprom = smc_ethtool_geteeprom,
|
||||
.set_eeprom = smc_ethtool_seteeprom,
|
||||
.get_link_ksettings = smc_ethtool_get_link_ksettings,
|
||||
.set_link_ksettings = smc_ethtool_set_link_ksettings,
|
||||
};
|
||||
|
||||
static const struct net_device_ops smc_netdev_ops = {
|
||||
|
@ -700,6 +700,8 @@ struct net_device_context {
|
||||
|
||||
u32 tx_checksum_mask;
|
||||
|
||||
u32 tx_send_table[VRSS_SEND_TAB_SIZE];
|
||||
|
||||
/* Ethtool settings */
|
||||
u8 duplex;
|
||||
u32 speed;
|
||||
@ -757,7 +759,6 @@ struct netvsc_device {
|
||||
|
||||
struct nvsp_message revoke_packet;
|
||||
|
||||
u32 send_table[VRSS_SEND_TAB_SIZE];
|
||||
u32 max_chn;
|
||||
u32 num_chn;
|
||||
spinlock_t sc_lock; /* Protects num_sc_offered variable */
|
||||
|
@ -1136,15 +1136,11 @@ static void netvsc_receive(struct net_device *ndev,
|
||||
static void netvsc_send_table(struct hv_device *hdev,
|
||||
struct nvsp_message *nvmsg)
|
||||
{
|
||||
struct netvsc_device *nvscdev;
|
||||
struct net_device *ndev = hv_get_drvdata(hdev);
|
||||
struct net_device_context *net_device_ctx = netdev_priv(ndev);
|
||||
int i;
|
||||
u32 count, *tab;
|
||||
|
||||
nvscdev = get_outbound_net_device(hdev);
|
||||
if (!nvscdev)
|
||||
return;
|
||||
|
||||
count = nvmsg->msg.v5_msg.send_table.count;
|
||||
if (count != VRSS_SEND_TAB_SIZE) {
|
||||
netdev_err(ndev, "Received wrong send-table size:%u\n", count);
|
||||
@ -1155,7 +1151,7 @@ static void netvsc_send_table(struct hv_device *hdev,
|
||||
nvmsg->msg.v5_msg.send_table.offset);
|
||||
|
||||
for (i = 0; i < count; i++)
|
||||
nvscdev->send_table[i] = tab[i];
|
||||
net_device_ctx->tx_send_table[i] = tab[i];
|
||||
}
|
||||
|
||||
static void netvsc_send_vf(struct net_device_context *net_device_ctx,
|
||||
|
@ -206,17 +206,15 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
|
||||
void *accel_priv, select_queue_fallback_t fallback)
|
||||
{
|
||||
struct net_device_context *net_device_ctx = netdev_priv(ndev);
|
||||
struct netvsc_device *nvsc_dev = net_device_ctx->nvdev;
|
||||
unsigned int num_tx_queues = ndev->real_num_tx_queues;
|
||||
struct sock *sk = skb->sk;
|
||||
int q_idx = sk_tx_queue_get(sk);
|
||||
|
||||
if (q_idx < 0 || skb->ooo_okay ||
|
||||
q_idx >= ndev->real_num_tx_queues) {
|
||||
if (q_idx < 0 || skb->ooo_okay || q_idx >= num_tx_queues) {
|
||||
u16 hash = __skb_tx_hash(ndev, skb, VRSS_SEND_TAB_SIZE);
|
||||
int new_idx;
|
||||
|
||||
new_idx = nvsc_dev->send_table[hash]
|
||||
% nvsc_dev->num_chn;
|
||||
new_idx = net_device_ctx->tx_send_table[hash] % num_tx_queues;
|
||||
|
||||
if (q_idx != new_idx && sk &&
|
||||
sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
|
||||
@ -225,9 +223,6 @@ static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
|
||||
q_idx = new_idx;
|
||||
}
|
||||
|
||||
if (unlikely(!nvsc_dev->chan_table[q_idx].channel))
|
||||
q_idx = 0;
|
||||
|
||||
return q_idx;
|
||||
}
|
||||
|
||||
|
@ -1883,17 +1883,6 @@ static int m88e1510_probe(struct phy_device *phydev)
|
||||
return m88e1510_hwmon_probe(phydev);
|
||||
}
|
||||
|
||||
static void marvell_remove(struct phy_device *phydev)
|
||||
{
|
||||
#ifdef CONFIG_HWMON
|
||||
|
||||
struct marvell_priv *priv = phydev->priv;
|
||||
|
||||
if (priv && priv->hwmon_dev)
|
||||
hwmon_device_unregister(priv->hwmon_dev);
|
||||
#endif
|
||||
}
|
||||
|
||||
static struct phy_driver marvell_drivers[] = {
|
||||
{
|
||||
.phy_id = MARVELL_PHY_ID_88E1101,
|
||||
@ -1974,7 +1963,6 @@ static struct phy_driver marvell_drivers[] = {
|
||||
.features = PHY_GBIT_FEATURES,
|
||||
.flags = PHY_HAS_INTERRUPT,
|
||||
.probe = &m88e1121_probe,
|
||||
.remove = &marvell_remove,
|
||||
.config_init = &m88e1121_config_init,
|
||||
.config_aneg = &m88e1121_config_aneg,
|
||||
.read_status = &marvell_read_status,
|
||||
@ -2087,7 +2075,6 @@ static struct phy_driver marvell_drivers[] = {
|
||||
.features = PHY_GBIT_FEATURES | SUPPORTED_FIBRE,
|
||||
.flags = PHY_HAS_INTERRUPT,
|
||||
.probe = &m88e1510_probe,
|
||||
.remove = &marvell_remove,
|
||||
.config_init = &m88e1510_config_init,
|
||||
.config_aneg = &m88e1510_config_aneg,
|
||||
.read_status = &marvell_read_status,
|
||||
@ -2109,7 +2096,6 @@ static struct phy_driver marvell_drivers[] = {
|
||||
.features = PHY_GBIT_FEATURES,
|
||||
.flags = PHY_HAS_INTERRUPT,
|
||||
.probe = m88e1510_probe,
|
||||
.remove = &marvell_remove,
|
||||
.config_init = &marvell_config_init,
|
||||
.config_aneg = &m88e1510_config_aneg,
|
||||
.read_status = &marvell_read_status,
|
||||
@ -2127,7 +2113,6 @@ static struct phy_driver marvell_drivers[] = {
|
||||
.phy_id_mask = MARVELL_PHY_ID_MASK,
|
||||
.name = "Marvell 88E1545",
|
||||
.probe = m88e1510_probe,
|
||||
.remove = &marvell_remove,
|
||||
.features = PHY_GBIT_FEATURES,
|
||||
.flags = PHY_HAS_INTERRUPT,
|
||||
.config_init = &marvell_config_init,
|
||||
|
@ -1864,7 +1864,7 @@ static struct phy_driver genphy_driver[] = {
|
||||
.phy_id = 0xffffffff,
|
||||
.phy_id_mask = 0xffffffff,
|
||||
.name = "Generic PHY",
|
||||
.soft_reset = genphy_soft_reset,
|
||||
.soft_reset = genphy_no_soft_reset,
|
||||
.config_init = genphy_config_init,
|
||||
.features = PHY_GBIT_FEATURES | SUPPORTED_MII |
|
||||
SUPPORTED_AUI | SUPPORTED_FIBRE |
|
||||
|
@ -491,13 +491,14 @@ static int ks8995_probe(struct spi_device *spi)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
ks->regs_attr.size = ks->chip->regs_size;
|
||||
memcpy(&ks->regs_attr, &ks8995_registers_attr, sizeof(ks->regs_attr));
|
||||
ks->regs_attr.size = ks->chip->regs_size;
|
||||
|
||||
err = ks8995_reset(ks);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
sysfs_attr_init(&ks->regs_attr.attr);
|
||||
err = sysfs_create_bin_file(&spi->dev.kobj, &ks->regs_attr);
|
||||
if (err) {
|
||||
dev_err(&spi->dev, "unable to create sysfs file, err=%d\n",
|
||||
|
@ -2072,6 +2072,7 @@ static int team_dev_type_check_change(struct net_device *dev,
|
||||
static void team_setup(struct net_device *dev)
|
||||
{
|
||||
ether_setup(dev);
|
||||
dev->max_mtu = ETH_MAX_MTU;
|
||||
|
||||
dev->netdev_ops = &team_netdev_ops;
|
||||
dev->ethtool_ops = &team_ethtool_ops;
|
||||
|
@ -822,7 +822,18 @@ static void tun_net_uninit(struct net_device *dev)
|
||||
/* Net device open. */
|
||||
static int tun_net_open(struct net_device *dev)
|
||||
{
|
||||
struct tun_struct *tun = netdev_priv(dev);
|
||||
int i;
|
||||
|
||||
netif_tx_start_all_queues(dev);
|
||||
|
||||
for (i = 0; i < tun->numqueues; i++) {
|
||||
struct tun_file *tfile;
|
||||
|
||||
tfile = rtnl_dereference(tun->tfiles[i]);
|
||||
tfile->socket.sk->sk_write_space(tfile->socket.sk);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1103,9 +1114,10 @@ static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
|
||||
if (!skb_array_empty(&tfile->tx_array))
|
||||
mask |= POLLIN | POLLRDNORM;
|
||||
|
||||
if (sock_writeable(sk) ||
|
||||
(!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
|
||||
sock_writeable(sk)))
|
||||
if (tun->dev->flags & IFF_UP &&
|
||||
(sock_writeable(sk) ||
|
||||
(!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
|
||||
sock_writeable(sk))))
|
||||
mask |= POLLOUT | POLLWRNORM;
|
||||
|
||||
if (tun->dev->reg_state != NETREG_REGISTERED)
|
||||
@ -2570,7 +2582,6 @@ static int __init tun_init(void)
|
||||
int ret = 0;
|
||||
|
||||
pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
|
||||
pr_info("%s\n", DRV_COPYRIGHT);
|
||||
|
||||
ret = rtnl_link_register(&tun_link_ops);
|
||||
if (ret) {
|
||||
|
@ -340,6 +340,7 @@ static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
int len = skb->len;
|
||||
netdev_tx_t ret = is_ip_tx_frame(skb, dev);
|
||||
|
||||
if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
|
||||
@ -347,7 +348,7 @@ static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
|
||||
u64_stats_update_begin(&dstats->syncp);
|
||||
dstats->tx_pkts++;
|
||||
dstats->tx_bytes += skb->len;
|
||||
dstats->tx_bytes += len;
|
||||
u64_stats_update_end(&dstats->syncp);
|
||||
} else {
|
||||
this_cpu_inc(dev->dstats->tx_drps);
|
||||
|
@ -2976,6 +2976,44 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __vxlan_dev_create(struct net *net, struct net_device *dev,
|
||||
struct vxlan_config *conf)
|
||||
{
|
||||
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
|
||||
struct vxlan_dev *vxlan = netdev_priv(dev);
|
||||
int err;
|
||||
|
||||
err = vxlan_dev_configure(net, dev, conf, false);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
dev->ethtool_ops = &vxlan_ethtool_ops;
|
||||
|
||||
/* create an fdb entry for a valid default destination */
|
||||
if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
|
||||
err = vxlan_fdb_create(vxlan, all_zeros_mac,
|
||||
&vxlan->default_dst.remote_ip,
|
||||
NUD_REACHABLE | NUD_PERMANENT,
|
||||
NLM_F_EXCL | NLM_F_CREATE,
|
||||
vxlan->cfg.dst_port,
|
||||
vxlan->default_dst.remote_vni,
|
||||
vxlan->default_dst.remote_vni,
|
||||
vxlan->default_dst.remote_ifindex,
|
||||
NTF_SELF);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
err = register_netdevice(dev);
|
||||
if (err) {
|
||||
vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni);
|
||||
return err;
|
||||
}
|
||||
|
||||
list_add(&vxlan->next, &vn->vxlan_list);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
|
||||
struct net_device *dev, struct vxlan_config *conf,
|
||||
bool changelink)
|
||||
@ -3172,8 +3210,6 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
|
||||
static int vxlan_newlink(struct net *src_net, struct net_device *dev,
|
||||
struct nlattr *tb[], struct nlattr *data[])
|
||||
{
|
||||
struct vxlan_net *vn = net_generic(src_net, vxlan_net_id);
|
||||
struct vxlan_dev *vxlan = netdev_priv(dev);
|
||||
struct vxlan_config conf;
|
||||
int err;
|
||||
|
||||
@ -3181,36 +3217,7 @@ static int vxlan_newlink(struct net *src_net, struct net_device *dev,
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = vxlan_dev_configure(src_net, dev, &conf, false);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
dev->ethtool_ops = &vxlan_ethtool_ops;
|
||||
|
||||
/* create an fdb entry for a valid default destination */
|
||||
if (!vxlan_addr_any(&vxlan->default_dst.remote_ip)) {
|
||||
err = vxlan_fdb_create(vxlan, all_zeros_mac,
|
||||
&vxlan->default_dst.remote_ip,
|
||||
NUD_REACHABLE | NUD_PERMANENT,
|
||||
NLM_F_EXCL | NLM_F_CREATE,
|
||||
vxlan->cfg.dst_port,
|
||||
vxlan->default_dst.remote_vni,
|
||||
vxlan->default_dst.remote_vni,
|
||||
vxlan->default_dst.remote_ifindex,
|
||||
NTF_SELF);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
err = register_netdevice(dev);
|
||||
if (err) {
|
||||
vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni);
|
||||
return err;
|
||||
}
|
||||
|
||||
list_add(&vxlan->next, &vn->vxlan_list);
|
||||
|
||||
return 0;
|
||||
return __vxlan_dev_create(src_net, dev, &conf);
|
||||
}
|
||||
|
||||
static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
|
||||
@ -3440,7 +3447,7 @@ struct net_device *vxlan_dev_create(struct net *net, const char *name,
|
||||
if (IS_ERR(dev))
|
||||
return dev;
|
||||
|
||||
err = vxlan_dev_configure(net, dev, conf, false);
|
||||
err = __vxlan_dev_create(net, dev, conf);
|
||||
if (err < 0) {
|
||||
free_netdev(dev);
|
||||
return ERR_PTR(err);
|
||||
|
@ -381,8 +381,8 @@ static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
|
||||
/* set bd status and length */
|
||||
bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
|
||||
|
||||
iowrite16be(bd_status, &bd->status);
|
||||
iowrite16be(skb->len, &bd->length);
|
||||
iowrite16be(bd_status, &bd->status);
|
||||
|
||||
/* Move to next BD in the ring */
|
||||
if (!(bd_status & T_W_S))
|
||||
@ -457,7 +457,7 @@ static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
|
||||
struct sk_buff *skb;
|
||||
hdlc_device *hdlc = dev_to_hdlc(dev);
|
||||
struct qe_bd *bd;
|
||||
u32 bd_status;
|
||||
u16 bd_status;
|
||||
u16 length, howmany = 0;
|
||||
u8 *bdbuffer;
|
||||
int i;
|
||||
|
@ -467,6 +467,9 @@ int i2400mu_probe(struct usb_interface *iface,
|
||||
struct i2400mu *i2400mu;
|
||||
struct usb_device *usb_dev = interface_to_usbdev(iface);
|
||||
|
||||
if (iface->cur_altsetting->desc.bNumEndpoints < 4)
|
||||
return -ENODEV;
|
||||
|
||||
if (usb_dev->speed != USB_SPEED_HIGH)
|
||||
dev_err(dev, "device not connected as high speed\n");
|
||||
|
||||
|
@ -165,13 +165,17 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct xenvif *vif = netdev_priv(dev);
|
||||
struct xenvif_queue *queue = NULL;
|
||||
unsigned int num_queues = vif->num_queues;
|
||||
unsigned int num_queues;
|
||||
u16 index;
|
||||
struct xenvif_rx_cb *cb;
|
||||
|
||||
BUG_ON(skb->dev != dev);
|
||||
|
||||
/* Drop the packet if queues are not set up */
|
||||
/* Drop the packet if queues are not set up.
|
||||
* This handler should be called inside an RCU read section
|
||||
* so we don't need to enter it here explicitly.
|
||||
*/
|
||||
num_queues = READ_ONCE(vif->num_queues);
|
||||
if (num_queues < 1)
|
||||
goto drop;
|
||||
|
||||
@ -222,18 +226,18 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
|
||||
{
|
||||
struct xenvif *vif = netdev_priv(dev);
|
||||
struct xenvif_queue *queue = NULL;
|
||||
unsigned int num_queues;
|
||||
u64 rx_bytes = 0;
|
||||
u64 rx_packets = 0;
|
||||
u64 tx_bytes = 0;
|
||||
u64 tx_packets = 0;
|
||||
unsigned int index;
|
||||
|
||||
spin_lock(&vif->lock);
|
||||
if (vif->queues == NULL)
|
||||
goto out;
|
||||
rcu_read_lock();
|
||||
num_queues = READ_ONCE(vif->num_queues);
|
||||
|
||||
/* Aggregate tx and rx stats from each queue */
|
||||
for (index = 0; index < vif->num_queues; ++index) {
|
||||
for (index = 0; index < num_queues; ++index) {
|
||||
queue = &vif->queues[index];
|
||||
rx_bytes += queue->stats.rx_bytes;
|
||||
rx_packets += queue->stats.rx_packets;
|
||||
@ -241,8 +245,7 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
|
||||
tx_packets += queue->stats.tx_packets;
|
||||
}
|
||||
|
||||
out:
|
||||
spin_unlock(&vif->lock);
|
||||
rcu_read_unlock();
|
||||
|
||||
vif->dev->stats.rx_bytes = rx_bytes;
|
||||
vif->dev->stats.rx_packets = rx_packets;
|
||||
@ -378,10 +381,13 @@ static void xenvif_get_ethtool_stats(struct net_device *dev,
|
||||
struct ethtool_stats *stats, u64 * data)
|
||||
{
|
||||
struct xenvif *vif = netdev_priv(dev);
|
||||
unsigned int num_queues = vif->num_queues;
|
||||
unsigned int num_queues;
|
||||
int i;
|
||||
unsigned int queue_index;
|
||||
|
||||
rcu_read_lock();
|
||||
num_queues = READ_ONCE(vif->num_queues);
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) {
|
||||
unsigned long accum = 0;
|
||||
for (queue_index = 0; queue_index < num_queues; ++queue_index) {
|
||||
@ -390,6 +396,8 @@ static void xenvif_get_ethtool_stats(struct net_device *dev,
|
||||
}
|
||||
data[i] = accum;
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
static void xenvif_get_strings(struct net_device *dev, u32 stringset, u8 * data)
|
||||
|
@ -214,7 +214,7 @@ static void xenvif_fatal_tx_err(struct xenvif *vif)
|
||||
netdev_err(vif->dev, "fatal error; disabling device\n");
|
||||
vif->disabled = true;
|
||||
/* Disable the vif from queue 0's kthread */
|
||||
if (vif->queues)
|
||||
if (vif->num_queues)
|
||||
xenvif_kick_thread(&vif->queues[0]);
|
||||
}
|
||||
|
||||
|
@ -495,26 +495,26 @@ static void backend_disconnect(struct backend_info *be)
|
||||
struct xenvif *vif = be->vif;
|
||||
|
||||
if (vif) {
|
||||
unsigned int num_queues = vif->num_queues;
|
||||
unsigned int queue_index;
|
||||
struct xenvif_queue *queues;
|
||||
|
||||
xen_unregister_watchers(vif);
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
xenvif_debugfs_delif(vif);
|
||||
#endif /* CONFIG_DEBUG_FS */
|
||||
xenvif_disconnect_data(vif);
|
||||
for (queue_index = 0;
|
||||
queue_index < vif->num_queues;
|
||||
++queue_index)
|
||||
|
||||
/* At this point some of the handlers may still be active
|
||||
* so we need to have additional synchronization here.
|
||||
*/
|
||||
vif->num_queues = 0;
|
||||
synchronize_net();
|
||||
|
||||
for (queue_index = 0; queue_index < num_queues; ++queue_index)
|
||||
xenvif_deinit_queue(&vif->queues[queue_index]);
|
||||
|
||||
spin_lock(&vif->lock);
|
||||
queues = vif->queues;
|
||||
vif->num_queues = 0;
|
||||
vfree(vif->queues);
|
||||
vif->queues = NULL;
|
||||
spin_unlock(&vif->lock);
|
||||
|
||||
vfree(queues);
|
||||
|
||||
xenvif_disconnect_ctrl(vif);
|
||||
}
|
||||
|
@ -103,15 +103,6 @@ static struct quirk_entry quirk_asus_x200ca = {
|
||||
.wapf = 2,
|
||||
};
|
||||
|
||||
static struct quirk_entry quirk_no_rfkill = {
|
||||
.no_rfkill = true,
|
||||
};
|
||||
|
||||
static struct quirk_entry quirk_no_rfkill_wapf4 = {
|
||||
.wapf = 4,
|
||||
.no_rfkill = true,
|
||||
};
|
||||
|
||||
static struct quirk_entry quirk_asus_ux303ub = {
|
||||
.wmi_backlight_native = true,
|
||||
};
|
||||
@ -194,7 +185,7 @@ static const struct dmi_system_id asus_quirks[] = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "X456UA"),
|
||||
},
|
||||
.driver_data = &quirk_no_rfkill_wapf4,
|
||||
.driver_data = &quirk_asus_wapf4,
|
||||
},
|
||||
{
|
||||
.callback = dmi_matched,
|
||||
@ -203,7 +194,7 @@ static const struct dmi_system_id asus_quirks[] = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "X456UF"),
|
||||
},
|
||||
.driver_data = &quirk_no_rfkill_wapf4,
|
||||
.driver_data = &quirk_asus_wapf4,
|
||||
},
|
||||
{
|
||||
.callback = dmi_matched,
|
||||
@ -367,42 +358,6 @@ static const struct dmi_system_id asus_quirks[] = {
|
||||
},
|
||||
.driver_data = &quirk_asus_x200ca,
|
||||
},
|
||||
{
|
||||
.callback = dmi_matched,
|
||||
.ident = "ASUSTeK COMPUTER INC. X555UB",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "X555UB"),
|
||||
},
|
||||
.driver_data = &quirk_no_rfkill,
|
||||
},
|
||||
{
|
||||
.callback = dmi_matched,
|
||||
.ident = "ASUSTeK COMPUTER INC. N552VW",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "N552VW"),
|
||||
},
|
||||
.driver_data = &quirk_no_rfkill,
|
||||
},
|
||||
{
|
||||
.callback = dmi_matched,
|
||||
.ident = "ASUSTeK COMPUTER INC. U303LB",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "U303LB"),
|
||||
},
|
||||
.driver_data = &quirk_no_rfkill,
|
||||
},
|
||||
{
|
||||
.callback = dmi_matched,
|
||||
.ident = "ASUSTeK COMPUTER INC. Z550MA",
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Z550MA"),
|
||||
},
|
||||
.driver_data = &quirk_no_rfkill,
|
||||
},
|
||||
{
|
||||
.callback = dmi_matched,
|
||||
.ident = "ASUSTeK COMPUTER INC. UX303UB",
|
||||
|
@ -159,6 +159,8 @@ MODULE_LICENSE("GPL");
|
||||
#define USB_INTEL_XUSB2PR 0xD0
|
||||
#define PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI 0x9c31
|
||||
|
||||
static const char * const ashs_ids[] = { "ATK4001", "ATK4002", NULL };
|
||||
|
||||
struct bios_args {
|
||||
u32 arg0;
|
||||
u32 arg1;
|
||||
@ -2051,6 +2053,16 @@ static int asus_wmi_fan_init(struct asus_wmi *asus)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool ashs_present(void)
|
||||
{
|
||||
int i = 0;
|
||||
while (ashs_ids[i]) {
|
||||
if (acpi_dev_found(ashs_ids[i++]))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* WMI Driver
|
||||
*/
|
||||
@ -2095,7 +2107,11 @@ static int asus_wmi_add(struct platform_device *pdev)
|
||||
if (err)
|
||||
goto fail_leds;
|
||||
|
||||
if (!asus->driver->quirks->no_rfkill) {
|
||||
asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WLAN, &result);
|
||||
if (result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT))
|
||||
asus->driver->wlan_ctrl_by_user = 1;
|
||||
|
||||
if (!(asus->driver->wlan_ctrl_by_user && ashs_present())) {
|
||||
err = asus_wmi_rfkill_init(asus);
|
||||
if (err)
|
||||
goto fail_rfkill;
|
||||
@ -2134,10 +2150,6 @@ static int asus_wmi_add(struct platform_device *pdev)
|
||||
if (err)
|
||||
goto fail_debugfs;
|
||||
|
||||
asus_wmi_get_devstate(asus, ASUS_WMI_DEVID_WLAN, &result);
|
||||
if (result & (ASUS_WMI_DSTS_PRESENCE_BIT | ASUS_WMI_DSTS_USER_BIT))
|
||||
asus->driver->wlan_ctrl_by_user = 1;
|
||||
|
||||
return 0;
|
||||
|
||||
fail_debugfs:
|
||||
|
@ -39,7 +39,6 @@ struct key_entry;
|
||||
struct asus_wmi;
|
||||
|
||||
struct quirk_entry {
|
||||
bool no_rfkill;
|
||||
bool hotplug_wireless;
|
||||
bool scalar_panel_brightness;
|
||||
bool store_backlight_power;
|
||||
|
@ -78,18 +78,18 @@
|
||||
|
||||
#define FUJITSU_LCD_N_LEVELS 8
|
||||
|
||||
#define ACPI_FUJITSU_CLASS "fujitsu"
|
||||
#define ACPI_FUJITSU_HID "FUJ02B1"
|
||||
#define ACPI_FUJITSU_DRIVER_NAME "Fujitsu laptop FUJ02B1 ACPI brightness driver"
|
||||
#define ACPI_FUJITSU_DEVICE_NAME "Fujitsu FUJ02B1"
|
||||
#define ACPI_FUJITSU_HOTKEY_HID "FUJ02E3"
|
||||
#define ACPI_FUJITSU_HOTKEY_DRIVER_NAME "Fujitsu laptop FUJ02E3 ACPI hotkeys driver"
|
||||
#define ACPI_FUJITSU_HOTKEY_DEVICE_NAME "Fujitsu FUJ02E3"
|
||||
#define ACPI_FUJITSU_CLASS "fujitsu"
|
||||
#define ACPI_FUJITSU_BL_HID "FUJ02B1"
|
||||
#define ACPI_FUJITSU_BL_DRIVER_NAME "Fujitsu laptop FUJ02B1 ACPI brightness driver"
|
||||
#define ACPI_FUJITSU_BL_DEVICE_NAME "Fujitsu FUJ02B1"
|
||||
#define ACPI_FUJITSU_LAPTOP_HID "FUJ02E3"
|
||||
#define ACPI_FUJITSU_LAPTOP_DRIVER_NAME "Fujitsu laptop FUJ02E3 ACPI hotkeys driver"
|
||||
#define ACPI_FUJITSU_LAPTOP_DEVICE_NAME "Fujitsu FUJ02E3"
|
||||
|
||||
#define ACPI_FUJITSU_NOTIFY_CODE1 0x80
|
||||
|
||||
/* FUNC interface - command values */
|
||||
#define FUNC_RFKILL 0x1000
|
||||
#define FUNC_FLAGS 0x1000
|
||||
#define FUNC_LEDS 0x1001
|
||||
#define FUNC_BUTTONS 0x1002
|
||||
#define FUNC_BACKLIGHT 0x1004
|
||||
@ -97,6 +97,11 @@
|
||||
/* FUNC interface - responses */
|
||||
#define UNSUPPORTED_CMD 0x80000000
|
||||
|
||||
/* FUNC interface - status flags */
|
||||
#define FLAG_RFKILL 0x020
|
||||
#define FLAG_LID 0x100
|
||||
#define FLAG_DOCK 0x200
|
||||
|
||||
#if IS_ENABLED(CONFIG_LEDS_CLASS)
|
||||
/* FUNC interface - LED control */
|
||||
#define FUNC_LED_OFF 0x1
|
||||
@ -136,7 +141,7 @@
|
||||
#endif
|
||||
|
||||
/* Device controlling the backlight and associated keys */
|
||||
struct fujitsu_t {
|
||||
struct fujitsu_bl {
|
||||
acpi_handle acpi_handle;
|
||||
struct acpi_device *dev;
|
||||
struct input_dev *input;
|
||||
@ -150,12 +155,12 @@ struct fujitsu_t {
|
||||
unsigned int brightness_level;
|
||||
};
|
||||
|
||||
static struct fujitsu_t *fujitsu;
|
||||
static struct fujitsu_bl *fujitsu_bl;
|
||||
static int use_alt_lcd_levels = -1;
|
||||
static int disable_brightness_adjust = -1;
|
||||
|
||||
/* Device used to access other hotkeys on the laptop */
|
||||
struct fujitsu_hotkey_t {
|
||||
/* Device used to access hotkeys and other features on the laptop */
|
||||
struct fujitsu_laptop {
|
||||
acpi_handle acpi_handle;
|
||||
struct acpi_device *dev;
|
||||
struct input_dev *input;
|
||||
@ -163,17 +168,15 @@ struct fujitsu_hotkey_t {
|
||||
struct platform_device *pf_device;
|
||||
struct kfifo fifo;
|
||||
spinlock_t fifo_lock;
|
||||
int rfkill_supported;
|
||||
int rfkill_state;
|
||||
int flags_supported;
|
||||
int flags_state;
|
||||
int logolamp_registered;
|
||||
int kblamps_registered;
|
||||
int radio_led_registered;
|
||||
int eco_led_registered;
|
||||
};
|
||||
|
||||
static struct fujitsu_hotkey_t *fujitsu_hotkey;
|
||||
|
||||
static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event);
|
||||
static struct fujitsu_laptop *fujitsu_laptop;
|
||||
|
||||
#if IS_ENABLED(CONFIG_LEDS_CLASS)
|
||||
static enum led_brightness logolamp_get(struct led_classdev *cdev);
|
||||
@ -222,8 +225,6 @@ static struct led_classdev eco_led = {
|
||||
static u32 dbg_level = 0x03;
|
||||
#endif
|
||||
|
||||
static void acpi_fujitsu_notify(struct acpi_device *device, u32 event);
|
||||
|
||||
/* Fujitsu ACPI interface function */
|
||||
|
||||
static int call_fext_func(int cmd, int arg0, int arg1, int arg2)
|
||||
@ -239,7 +240,7 @@ static int call_fext_func(int cmd, int arg0, int arg1, int arg2)
|
||||
unsigned long long value;
|
||||
acpi_handle handle = NULL;
|
||||
|
||||
status = acpi_get_handle(fujitsu_hotkey->acpi_handle, "FUNC", &handle);
|
||||
status = acpi_get_handle(fujitsu_laptop->acpi_handle, "FUNC", &handle);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
vdbg_printk(FUJLAPTOP_DBG_ERROR,
|
||||
"FUNC interface is not present\n");
|
||||
@ -300,9 +301,9 @@ static int radio_led_set(struct led_classdev *cdev,
|
||||
enum led_brightness brightness)
|
||||
{
|
||||
if (brightness >= LED_FULL)
|
||||
return call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, RADIO_LED_ON);
|
||||
return call_fext_func(FUNC_FLAGS, 0x5, RADIO_LED_ON, RADIO_LED_ON);
|
||||
else
|
||||
return call_fext_func(FUNC_RFKILL, 0x5, RADIO_LED_ON, 0x0);
|
||||
return call_fext_func(FUNC_FLAGS, 0x5, RADIO_LED_ON, 0x0);
|
||||
}
|
||||
|
||||
static int eco_led_set(struct led_classdev *cdev,
|
||||
@ -346,7 +347,7 @@ static enum led_brightness radio_led_get(struct led_classdev *cdev)
|
||||
{
|
||||
enum led_brightness brightness = LED_OFF;
|
||||
|
||||
if (call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0) & RADIO_LED_ON)
|
||||
if (call_fext_func(FUNC_FLAGS, 0x4, 0x0, 0x0) & RADIO_LED_ON)
|
||||
brightness = LED_FULL;
|
||||
|
||||
return brightness;
|
||||
@ -373,10 +374,10 @@ static int set_lcd_level(int level)
|
||||
vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBLL [%d]\n",
|
||||
level);
|
||||
|
||||
if (level < 0 || level >= fujitsu->max_brightness)
|
||||
if (level < 0 || level >= fujitsu_bl->max_brightness)
|
||||
return -EINVAL;
|
||||
|
||||
status = acpi_get_handle(fujitsu->acpi_handle, "SBLL", &handle);
|
||||
status = acpi_get_handle(fujitsu_bl->acpi_handle, "SBLL", &handle);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
vdbg_printk(FUJLAPTOP_DBG_ERROR, "SBLL not present\n");
|
||||
return -ENODEV;
|
||||
@ -398,10 +399,10 @@ static int set_lcd_level_alt(int level)
|
||||
vdbg_printk(FUJLAPTOP_DBG_TRACE, "set lcd level via SBL2 [%d]\n",
|
||||
level);
|
||||
|
||||
if (level < 0 || level >= fujitsu->max_brightness)
|
||||
if (level < 0 || level >= fujitsu_bl->max_brightness)
|
||||
return -EINVAL;
|
||||
|
||||
status = acpi_get_handle(fujitsu->acpi_handle, "SBL2", &handle);
|
||||
status = acpi_get_handle(fujitsu_bl->acpi_handle, "SBL2", &handle);
|
||||
if (ACPI_FAILURE(status)) {
|
||||
vdbg_printk(FUJLAPTOP_DBG_ERROR, "SBL2 not present\n");
|
||||
return -ENODEV;
|
||||
@ -421,19 +422,19 @@ static int get_lcd_level(void)
|
||||
|
||||
vdbg_printk(FUJLAPTOP_DBG_TRACE, "get lcd level via GBLL\n");
|
||||
|
||||
status =
|
||||
acpi_evaluate_integer(fujitsu->acpi_handle, "GBLL", NULL, &state);
|
||||
status = acpi_evaluate_integer(fujitsu_bl->acpi_handle, "GBLL", NULL,
|
||||
&state);
|
||||
if (ACPI_FAILURE(status))
|
||||
return 0;
|
||||
|
||||
fujitsu->brightness_level = state & 0x0fffffff;
|
||||
fujitsu_bl->brightness_level = state & 0x0fffffff;
|
||||
|
||||
if (state & 0x80000000)
|
||||
fujitsu->brightness_changed = 1;
|
||||
fujitsu_bl->brightness_changed = 1;
|
||||
else
|
||||
fujitsu->brightness_changed = 0;
|
||||
fujitsu_bl->brightness_changed = 0;
|
||||
|
||||
return fujitsu->brightness_level;
|
||||
return fujitsu_bl->brightness_level;
|
||||
}
|
||||
|
||||
static int get_max_brightness(void)
|
||||
@ -443,14 +444,14 @@ static int get_max_brightness(void)
|
||||
|
||||
vdbg_printk(FUJLAPTOP_DBG_TRACE, "get max lcd level via RBLL\n");
|
||||
|
||||
status =
|
||||
acpi_evaluate_integer(fujitsu->acpi_handle, "RBLL", NULL, &state);
|
||||
status = acpi_evaluate_integer(fujitsu_bl->acpi_handle, "RBLL", NULL,
|
||||
&state);
|
||||
if (ACPI_FAILURE(status))
|
||||
return -1;
|
||||
|
||||
fujitsu->max_brightness = state;
|
||||
fujitsu_bl->max_brightness = state;
|
||||
|
||||
return fujitsu->max_brightness;
|
||||
return fujitsu_bl->max_brightness;
|
||||
}
|
||||
|
||||
/* Backlight device stuff */
|
||||
@ -483,7 +484,7 @@ static int bl_update_status(struct backlight_device *b)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct backlight_ops fujitsubl_ops = {
|
||||
static const struct backlight_ops fujitsu_bl_ops = {
|
||||
.get_brightness = bl_get_brightness,
|
||||
.update_status = bl_update_status,
|
||||
};
|
||||
@ -511,7 +512,7 @@ show_brightness_changed(struct device *dev,
|
||||
|
||||
int ret;
|
||||
|
||||
ret = fujitsu->brightness_changed;
|
||||
ret = fujitsu_bl->brightness_changed;
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
@ -539,7 +540,7 @@ static ssize_t store_lcd_level(struct device *dev,
|
||||
int level, ret;
|
||||
|
||||
if (sscanf(buf, "%i", &level) != 1
|
||||
|| (level < 0 || level >= fujitsu->max_brightness))
|
||||
|| (level < 0 || level >= fujitsu_bl->max_brightness))
|
||||
return -EINVAL;
|
||||
|
||||
if (use_alt_lcd_levels)
|
||||
@ -567,9 +568,9 @@ static ssize_t
|
||||
show_lid_state(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
if (!(fujitsu_hotkey->rfkill_supported & 0x100))
|
||||
if (!(fujitsu_laptop->flags_supported & FLAG_LID))
|
||||
return sprintf(buf, "unknown\n");
|
||||
if (fujitsu_hotkey->rfkill_state & 0x100)
|
||||
if (fujitsu_laptop->flags_state & FLAG_LID)
|
||||
return sprintf(buf, "open\n");
|
||||
else
|
||||
return sprintf(buf, "closed\n");
|
||||
@ -579,9 +580,9 @@ static ssize_t
|
||||
show_dock_state(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
if (!(fujitsu_hotkey->rfkill_supported & 0x200))
|
||||
if (!(fujitsu_laptop->flags_supported & FLAG_DOCK))
|
||||
return sprintf(buf, "unknown\n");
|
||||
if (fujitsu_hotkey->rfkill_state & 0x200)
|
||||
if (fujitsu_laptop->flags_state & FLAG_DOCK)
|
||||
return sprintf(buf, "docked\n");
|
||||
else
|
||||
return sprintf(buf, "undocked\n");
|
||||
@ -591,9 +592,9 @@ static ssize_t
|
||||
show_radios_state(struct device *dev,
|
||||
struct device_attribute *attr, char *buf)
|
||||
{
|
||||
if (!(fujitsu_hotkey->rfkill_supported & 0x20))
|
||||
if (!(fujitsu_laptop->flags_supported & FLAG_RFKILL))
|
||||
return sprintf(buf, "unknown\n");
|
||||
if (fujitsu_hotkey->rfkill_state & 0x20)
|
||||
if (fujitsu_laptop->flags_state & FLAG_RFKILL)
|
||||
return sprintf(buf, "on\n");
|
||||
else
|
||||
return sprintf(buf, "killed\n");
|
||||
@ -607,7 +608,7 @@ static DEVICE_ATTR(lid, 0444, show_lid_state, ignore_store);
|
||||
static DEVICE_ATTR(dock, 0444, show_dock_state, ignore_store);
|
||||
static DEVICE_ATTR(radios, 0444, show_radios_state, ignore_store);
|
||||
|
||||
static struct attribute *fujitsupf_attributes[] = {
|
||||
static struct attribute *fujitsu_pf_attributes[] = {
|
||||
&dev_attr_brightness_changed.attr,
|
||||
&dev_attr_max_brightness.attr,
|
||||
&dev_attr_lcd_level.attr,
|
||||
@ -617,11 +618,11 @@ static struct attribute *fujitsupf_attributes[] = {
|
||||
NULL
|
||||
};
|
||||
|
||||
static struct attribute_group fujitsupf_attribute_group = {
|
||||
.attrs = fujitsupf_attributes
|
||||
static struct attribute_group fujitsu_pf_attribute_group = {
|
||||
.attrs = fujitsu_pf_attributes
|
||||
};
|
||||
|
||||
static struct platform_driver fujitsupf_driver = {
|
||||
static struct platform_driver fujitsu_pf_driver = {
|
||||
.driver = {
|
||||
.name = "fujitsu-laptop",
|
||||
}
|
||||
@ -630,39 +631,30 @@ static struct platform_driver fujitsupf_driver = {
|
||||
static void __init dmi_check_cb_common(const struct dmi_system_id *id)
|
||||
{
|
||||
pr_info("Identified laptop model '%s'\n", id->ident);
|
||||
if (use_alt_lcd_levels == -1) {
|
||||
if (acpi_has_method(NULL,
|
||||
"\\_SB.PCI0.LPCB.FJEX.SBL2"))
|
||||
use_alt_lcd_levels = 1;
|
||||
else
|
||||
use_alt_lcd_levels = 0;
|
||||
vdbg_printk(FUJLAPTOP_DBG_TRACE, "auto-detected usealt as "
|
||||
"%i\n", use_alt_lcd_levels);
|
||||
}
|
||||
}
|
||||
|
||||
static int __init dmi_check_cb_s6410(const struct dmi_system_id *id)
|
||||
{
|
||||
dmi_check_cb_common(id);
|
||||
fujitsu->keycode1 = KEY_SCREENLOCK; /* "Lock" */
|
||||
fujitsu->keycode2 = KEY_HELP; /* "Mobility Center" */
|
||||
fujitsu_bl->keycode1 = KEY_SCREENLOCK; /* "Lock" */
|
||||
fujitsu_bl->keycode2 = KEY_HELP; /* "Mobility Center" */
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int __init dmi_check_cb_s6420(const struct dmi_system_id *id)
|
||||
{
|
||||
dmi_check_cb_common(id);
|
||||
fujitsu->keycode1 = KEY_SCREENLOCK; /* "Lock" */
|
||||
fujitsu->keycode2 = KEY_HELP; /* "Mobility Center" */
|
||||
fujitsu_bl->keycode1 = KEY_SCREENLOCK; /* "Lock" */
|
||||
fujitsu_bl->keycode2 = KEY_HELP; /* "Mobility Center" */
|
||||
return 1;
|
||||
}
|
||||
|
||||
static int __init dmi_check_cb_p8010(const struct dmi_system_id *id)
|
||||
{
|
||||
dmi_check_cb_common(id);
|
||||
fujitsu->keycode1 = KEY_HELP; /* "Support" */
|
||||
fujitsu->keycode3 = KEY_SWITCHVIDEOMODE; /* "Presentation" */
|
||||
fujitsu->keycode4 = KEY_WWW; /* "Internet" */
|
||||
fujitsu_bl->keycode1 = KEY_HELP; /* "Support" */
|
||||
fujitsu_bl->keycode3 = KEY_SWITCHVIDEOMODE; /* "Presentation" */
|
||||
fujitsu_bl->keycode4 = KEY_WWW; /* "Internet" */
|
||||
return 1;
|
||||
}
|
||||
|
||||
@ -693,7 +685,7 @@ static const struct dmi_system_id fujitsu_dmi_table[] __initconst = {
|
||||
|
||||
/* ACPI device for LCD brightness control */
|
||||
|
||||
static int acpi_fujitsu_add(struct acpi_device *device)
|
||||
static int acpi_fujitsu_bl_add(struct acpi_device *device)
|
||||
{
|
||||
int state = 0;
|
||||
struct input_dev *input;
|
||||
@ -702,22 +694,22 @@ static int acpi_fujitsu_add(struct acpi_device *device)
|
||||
if (!device)
|
||||
return -EINVAL;
|
||||
|
||||
fujitsu->acpi_handle = device->handle;
|
||||
sprintf(acpi_device_name(device), "%s", ACPI_FUJITSU_DEVICE_NAME);
|
||||
fujitsu_bl->acpi_handle = device->handle;
|
||||
sprintf(acpi_device_name(device), "%s", ACPI_FUJITSU_BL_DEVICE_NAME);
|
||||
sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS);
|
||||
device->driver_data = fujitsu;
|
||||
device->driver_data = fujitsu_bl;
|
||||
|
||||
fujitsu->input = input = input_allocate_device();
|
||||
fujitsu_bl->input = input = input_allocate_device();
|
||||
if (!input) {
|
||||
error = -ENOMEM;
|
||||
goto err_stop;
|
||||
}
|
||||
|
||||
snprintf(fujitsu->phys, sizeof(fujitsu->phys),
|
||||
snprintf(fujitsu_bl->phys, sizeof(fujitsu_bl->phys),
|
||||
"%s/video/input0", acpi_device_hid(device));
|
||||
|
||||
input->name = acpi_device_name(device);
|
||||
input->phys = fujitsu->phys;
|
||||
input->phys = fujitsu_bl->phys;
|
||||
input->id.bustype = BUS_HOST;
|
||||
input->id.product = 0x06;
|
||||
input->dev.parent = &device->dev;
|
||||
@ -730,7 +722,7 @@ static int acpi_fujitsu_add(struct acpi_device *device)
|
||||
if (error)
|
||||
goto err_free_input_dev;
|
||||
|
||||
error = acpi_bus_update_power(fujitsu->acpi_handle, &state);
|
||||
error = acpi_bus_update_power(fujitsu_bl->acpi_handle, &state);
|
||||
if (error) {
|
||||
pr_err("Error reading power state\n");
|
||||
goto err_unregister_input_dev;
|
||||
@ -740,7 +732,7 @@ static int acpi_fujitsu_add(struct acpi_device *device)
|
||||
acpi_device_name(device), acpi_device_bid(device),
|
||||
!device->power.state ? "on" : "off");
|
||||
|
||||
fujitsu->dev = device;
|
||||
fujitsu_bl->dev = device;
|
||||
|
||||
if (acpi_has_method(device->handle, METHOD_NAME__INI)) {
|
||||
vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n");
|
||||
@ -750,6 +742,15 @@ static int acpi_fujitsu_add(struct acpi_device *device)
|
||||
pr_err("_INI Method failed\n");
|
||||
}
|
||||
|
||||
if (use_alt_lcd_levels == -1) {
|
||||
if (acpi_has_method(NULL, "\\_SB.PCI0.LPCB.FJEX.SBL2"))
|
||||
use_alt_lcd_levels = 1;
|
||||
else
|
||||
use_alt_lcd_levels = 0;
|
||||
vdbg_printk(FUJLAPTOP_DBG_TRACE, "auto-detected usealt as %i\n",
|
||||
use_alt_lcd_levels);
|
||||
}
|
||||
|
||||
/* do config (detect defaults) */
|
||||
use_alt_lcd_levels = use_alt_lcd_levels == 1 ? 1 : 0;
|
||||
disable_brightness_adjust = disable_brightness_adjust == 1 ? 1 : 0;
|
||||
@ -758,7 +759,7 @@ static int acpi_fujitsu_add(struct acpi_device *device)
|
||||
use_alt_lcd_levels, disable_brightness_adjust);
|
||||
|
||||
if (get_max_brightness() <= 0)
|
||||
fujitsu->max_brightness = FUJITSU_LCD_N_LEVELS;
|
||||
fujitsu_bl->max_brightness = FUJITSU_LCD_N_LEVELS;
|
||||
get_lcd_level();
|
||||
|
||||
return 0;
|
||||
@ -772,38 +773,38 @@ err_stop:
|
||||
return error;
|
||||
}
|
||||
|
||||
static int acpi_fujitsu_remove(struct acpi_device *device)
|
||||
static int acpi_fujitsu_bl_remove(struct acpi_device *device)
|
||||
{
|
||||
struct fujitsu_t *fujitsu = acpi_driver_data(device);
|
||||
struct input_dev *input = fujitsu->input;
|
||||
struct fujitsu_bl *fujitsu_bl = acpi_driver_data(device);
|
||||
struct input_dev *input = fujitsu_bl->input;
|
||||
|
||||
input_unregister_device(input);
|
||||
|
||||
fujitsu->acpi_handle = NULL;
|
||||
fujitsu_bl->acpi_handle = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Brightness notify */
|
||||
|
||||
static void acpi_fujitsu_notify(struct acpi_device *device, u32 event)
|
||||
static void acpi_fujitsu_bl_notify(struct acpi_device *device, u32 event)
|
||||
{
|
||||
struct input_dev *input;
|
||||
int keycode;
|
||||
int oldb, newb;
|
||||
|
||||
input = fujitsu->input;
|
||||
input = fujitsu_bl->input;
|
||||
|
||||
switch (event) {
|
||||
case ACPI_FUJITSU_NOTIFY_CODE1:
|
||||
keycode = 0;
|
||||
oldb = fujitsu->brightness_level;
|
||||
oldb = fujitsu_bl->brightness_level;
|
||||
get_lcd_level();
|
||||
newb = fujitsu->brightness_level;
|
||||
newb = fujitsu_bl->brightness_level;
|
||||
|
||||
vdbg_printk(FUJLAPTOP_DBG_TRACE,
|
||||
"brightness button event [%i -> %i (%i)]\n",
|
||||
oldb, newb, fujitsu->brightness_changed);
|
||||
oldb, newb, fujitsu_bl->brightness_changed);
|
||||
|
||||
if (oldb < newb) {
|
||||
if (disable_brightness_adjust != 1) {
|
||||
@ -840,7 +841,7 @@ static void acpi_fujitsu_notify(struct acpi_device *device, u32 event)
|
||||
|
||||
/* ACPI device for hotkey handling */
|
||||
|
||||
static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
|
||||
static int acpi_fujitsu_laptop_add(struct acpi_device *device)
|
||||
{
|
||||
int result = 0;
|
||||
int state = 0;
|
||||
@ -851,42 +852,42 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
|
||||
if (!device)
|
||||
return -EINVAL;
|
||||
|
||||
fujitsu_hotkey->acpi_handle = device->handle;
|
||||
fujitsu_laptop->acpi_handle = device->handle;
|
||||
sprintf(acpi_device_name(device), "%s",
|
||||
ACPI_FUJITSU_HOTKEY_DEVICE_NAME);
|
||||
ACPI_FUJITSU_LAPTOP_DEVICE_NAME);
|
||||
sprintf(acpi_device_class(device), "%s", ACPI_FUJITSU_CLASS);
|
||||
device->driver_data = fujitsu_hotkey;
|
||||
device->driver_data = fujitsu_laptop;
|
||||
|
||||
/* kfifo */
|
||||
spin_lock_init(&fujitsu_hotkey->fifo_lock);
|
||||
error = kfifo_alloc(&fujitsu_hotkey->fifo, RINGBUFFERSIZE * sizeof(int),
|
||||
spin_lock_init(&fujitsu_laptop->fifo_lock);
|
||||
error = kfifo_alloc(&fujitsu_laptop->fifo, RINGBUFFERSIZE * sizeof(int),
|
||||
GFP_KERNEL);
|
||||
if (error) {
|
||||
pr_err("kfifo_alloc failed\n");
|
||||
goto err_stop;
|
||||
}
|
||||
|
||||
fujitsu_hotkey->input = input = input_allocate_device();
|
||||
fujitsu_laptop->input = input = input_allocate_device();
|
||||
if (!input) {
|
||||
error = -ENOMEM;
|
||||
goto err_free_fifo;
|
||||
}
|
||||
|
||||
snprintf(fujitsu_hotkey->phys, sizeof(fujitsu_hotkey->phys),
|
||||
snprintf(fujitsu_laptop->phys, sizeof(fujitsu_laptop->phys),
|
||||
"%s/video/input0", acpi_device_hid(device));
|
||||
|
||||
input->name = acpi_device_name(device);
|
||||
input->phys = fujitsu_hotkey->phys;
|
||||
input->phys = fujitsu_laptop->phys;
|
||||
input->id.bustype = BUS_HOST;
|
||||
input->id.product = 0x06;
|
||||
input->dev.parent = &device->dev;
|
||||
|
||||
set_bit(EV_KEY, input->evbit);
|
||||
set_bit(fujitsu->keycode1, input->keybit);
|
||||
set_bit(fujitsu->keycode2, input->keybit);
|
||||
set_bit(fujitsu->keycode3, input->keybit);
|
||||
set_bit(fujitsu->keycode4, input->keybit);
|
||||
set_bit(fujitsu->keycode5, input->keybit);
|
||||
set_bit(fujitsu_bl->keycode1, input->keybit);
|
||||
set_bit(fujitsu_bl->keycode2, input->keybit);
|
||||
set_bit(fujitsu_bl->keycode3, input->keybit);
|
||||
set_bit(fujitsu_bl->keycode4, input->keybit);
|
||||
set_bit(fujitsu_bl->keycode5, input->keybit);
|
||||
set_bit(KEY_TOUCHPAD_TOGGLE, input->keybit);
|
||||
set_bit(KEY_UNKNOWN, input->keybit);
|
||||
|
||||
@ -894,7 +895,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
|
||||
if (error)
|
||||
goto err_free_input_dev;
|
||||
|
||||
error = acpi_bus_update_power(fujitsu_hotkey->acpi_handle, &state);
|
||||
error = acpi_bus_update_power(fujitsu_laptop->acpi_handle, &state);
|
||||
if (error) {
|
||||
pr_err("Error reading power state\n");
|
||||
goto err_unregister_input_dev;
|
||||
@ -904,7 +905,7 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
|
||||
acpi_device_name(device), acpi_device_bid(device),
|
||||
!device->power.state ? "on" : "off");
|
||||
|
||||
fujitsu_hotkey->dev = device;
|
||||
fujitsu_laptop->dev = device;
|
||||
|
||||
if (acpi_has_method(device->handle, METHOD_NAME__INI)) {
|
||||
vdbg_printk(FUJLAPTOP_DBG_INFO, "Invoking _INI\n");
|
||||
@ -920,27 +921,27 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
|
||||
; /* No action, result is discarded */
|
||||
vdbg_printk(FUJLAPTOP_DBG_INFO, "Discarded %i ringbuffer entries\n", i);
|
||||
|
||||
fujitsu_hotkey->rfkill_supported =
|
||||
call_fext_func(FUNC_RFKILL, 0x0, 0x0, 0x0);
|
||||
fujitsu_laptop->flags_supported =
|
||||
call_fext_func(FUNC_FLAGS, 0x0, 0x0, 0x0);
|
||||
|
||||
/* Make sure our bitmask of supported functions is cleared if the
|
||||
RFKILL function block is not implemented, like on the S7020. */
|
||||
if (fujitsu_hotkey->rfkill_supported == UNSUPPORTED_CMD)
|
||||
fujitsu_hotkey->rfkill_supported = 0;
|
||||
if (fujitsu_laptop->flags_supported == UNSUPPORTED_CMD)
|
||||
fujitsu_laptop->flags_supported = 0;
|
||||
|
||||
if (fujitsu_hotkey->rfkill_supported)
|
||||
fujitsu_hotkey->rfkill_state =
|
||||
call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0);
|
||||
if (fujitsu_laptop->flags_supported)
|
||||
fujitsu_laptop->flags_state =
|
||||
call_fext_func(FUNC_FLAGS, 0x4, 0x0, 0x0);
|
||||
|
||||
/* Suspect this is a keymap of the application panel, print it */
|
||||
pr_info("BTNI: [0x%x]\n", call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0));
|
||||
|
||||
#if IS_ENABLED(CONFIG_LEDS_CLASS)
|
||||
if (call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & LOGOLAMP_POWERON) {
|
||||
result = led_classdev_register(&fujitsu->pf_device->dev,
|
||||
result = led_classdev_register(&fujitsu_bl->pf_device->dev,
|
||||
&logolamp_led);
|
||||
if (result == 0) {
|
||||
fujitsu_hotkey->logolamp_registered = 1;
|
||||
fujitsu_laptop->logolamp_registered = 1;
|
||||
} else {
|
||||
pr_err("Could not register LED handler for logo lamp, error %i\n",
|
||||
result);
|
||||
@ -949,10 +950,10 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
|
||||
|
||||
if ((call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & KEYBOARD_LAMPS) &&
|
||||
(call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0) == 0x0)) {
|
||||
result = led_classdev_register(&fujitsu->pf_device->dev,
|
||||
result = led_classdev_register(&fujitsu_bl->pf_device->dev,
|
||||
&kblamps_led);
|
||||
if (result == 0) {
|
||||
fujitsu_hotkey->kblamps_registered = 1;
|
||||
fujitsu_laptop->kblamps_registered = 1;
|
||||
} else {
|
||||
pr_err("Could not register LED handler for keyboard lamps, error %i\n",
|
||||
result);
|
||||
@ -966,10 +967,10 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
|
||||
* that an RF LED is present.
|
||||
*/
|
||||
if (call_fext_func(FUNC_BUTTONS, 0x0, 0x0, 0x0) & BIT(24)) {
|
||||
result = led_classdev_register(&fujitsu->pf_device->dev,
|
||||
result = led_classdev_register(&fujitsu_bl->pf_device->dev,
|
||||
&radio_led);
|
||||
if (result == 0) {
|
||||
fujitsu_hotkey->radio_led_registered = 1;
|
||||
fujitsu_laptop->radio_led_registered = 1;
|
||||
} else {
|
||||
pr_err("Could not register LED handler for radio LED, error %i\n",
|
||||
result);
|
||||
@ -983,10 +984,10 @@ static int acpi_fujitsu_hotkey_add(struct acpi_device *device)
|
||||
*/
|
||||
if ((call_fext_func(FUNC_LEDS, 0x0, 0x0, 0x0) & BIT(14)) &&
|
||||
(call_fext_func(FUNC_LEDS, 0x2, ECO_LED, 0x0) != UNSUPPORTED_CMD)) {
|
||||
result = led_classdev_register(&fujitsu->pf_device->dev,
|
||||
result = led_classdev_register(&fujitsu_bl->pf_device->dev,
|
||||
&eco_led);
|
||||
if (result == 0) {
|
||||
fujitsu_hotkey->eco_led_registered = 1;
|
||||
fujitsu_laptop->eco_led_registered = 1;
|
||||
} else {
|
||||
pr_err("Could not register LED handler for eco LED, error %i\n",
|
||||
result);
|
||||
@ -1002,47 +1003,47 @@ err_unregister_input_dev:
|
||||
err_free_input_dev:
|
||||
input_free_device(input);
|
||||
err_free_fifo:
|
||||
kfifo_free(&fujitsu_hotkey->fifo);
|
||||
kfifo_free(&fujitsu_laptop->fifo);
|
||||
err_stop:
|
||||
return error;
|
||||
}
|
||||
|
||||
static int acpi_fujitsu_hotkey_remove(struct acpi_device *device)
|
||||
static int acpi_fujitsu_laptop_remove(struct acpi_device *device)
|
||||
{
|
||||
struct fujitsu_hotkey_t *fujitsu_hotkey = acpi_driver_data(device);
|
||||
struct input_dev *input = fujitsu_hotkey->input;
|
||||
struct fujitsu_laptop *fujitsu_laptop = acpi_driver_data(device);
|
||||
struct input_dev *input = fujitsu_laptop->input;
|
||||
|
||||
#if IS_ENABLED(CONFIG_LEDS_CLASS)
|
||||
if (fujitsu_hotkey->logolamp_registered)
|
||||
if (fujitsu_laptop->logolamp_registered)
|
||||
led_classdev_unregister(&logolamp_led);
|
||||
|
||||
if (fujitsu_hotkey->kblamps_registered)
|
||||
if (fujitsu_laptop->kblamps_registered)
|
||||
led_classdev_unregister(&kblamps_led);
|
||||
|
||||
if (fujitsu_hotkey->radio_led_registered)
|
||||
if (fujitsu_laptop->radio_led_registered)
|
||||
led_classdev_unregister(&radio_led);
|
||||
|
||||
if (fujitsu_hotkey->eco_led_registered)
|
||||
if (fujitsu_laptop->eco_led_registered)
|
||||
led_classdev_unregister(&eco_led);
|
||||
#endif
|
||||
|
||||
input_unregister_device(input);
|
||||
|
||||
kfifo_free(&fujitsu_hotkey->fifo);
|
||||
kfifo_free(&fujitsu_laptop->fifo);
|
||||
|
||||
fujitsu_hotkey->acpi_handle = NULL;
|
||||
fujitsu_laptop->acpi_handle = NULL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void acpi_fujitsu_hotkey_press(int keycode)
|
||||
static void acpi_fujitsu_laptop_press(int keycode)
|
||||
{
|
||||
struct input_dev *input = fujitsu_hotkey->input;
|
||||
struct input_dev *input = fujitsu_laptop->input;
|
||||
int status;
|
||||
|
||||
status = kfifo_in_locked(&fujitsu_hotkey->fifo,
|
||||
status = kfifo_in_locked(&fujitsu_laptop->fifo,
|
||||
(unsigned char *)&keycode, sizeof(keycode),
|
||||
&fujitsu_hotkey->fifo_lock);
|
||||
&fujitsu_laptop->fifo_lock);
|
||||
if (status != sizeof(keycode)) {
|
||||
vdbg_printk(FUJLAPTOP_DBG_WARN,
|
||||
"Could not push keycode [0x%x]\n", keycode);
|
||||
@ -1054,16 +1055,16 @@ static void acpi_fujitsu_hotkey_press(int keycode)
|
||||
"Push keycode into ringbuffer [%d]\n", keycode);
|
||||
}
|
||||
|
||||
static void acpi_fujitsu_hotkey_release(void)
|
||||
static void acpi_fujitsu_laptop_release(void)
|
||||
{
|
||||
struct input_dev *input = fujitsu_hotkey->input;
|
||||
struct input_dev *input = fujitsu_laptop->input;
|
||||
int keycode, status;
|
||||
|
||||
while (true) {
|
||||
status = kfifo_out_locked(&fujitsu_hotkey->fifo,
|
||||
status = kfifo_out_locked(&fujitsu_laptop->fifo,
|
||||
(unsigned char *)&keycode,
|
||||
sizeof(keycode),
|
||||
&fujitsu_hotkey->fifo_lock);
|
||||
&fujitsu_laptop->fifo_lock);
|
||||
if (status != sizeof(keycode))
|
||||
return;
|
||||
input_report_key(input, keycode, 0);
|
||||
@ -1073,14 +1074,14 @@ static void acpi_fujitsu_hotkey_release(void)
|
||||
}
|
||||
}
|
||||
|
||||
static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event)
|
||||
static void acpi_fujitsu_laptop_notify(struct acpi_device *device, u32 event)
|
||||
{
|
||||
struct input_dev *input;
|
||||
int keycode;
|
||||
unsigned int irb = 1;
|
||||
int i;
|
||||
|
||||
input = fujitsu_hotkey->input;
|
||||
input = fujitsu_laptop->input;
|
||||
|
||||
if (event != ACPI_FUJITSU_NOTIFY_CODE1) {
|
||||
keycode = KEY_UNKNOWN;
|
||||
@ -1093,9 +1094,9 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event)
|
||||
return;
|
||||
}
|
||||
|
||||
if (fujitsu_hotkey->rfkill_supported)
|
||||
fujitsu_hotkey->rfkill_state =
|
||||
call_fext_func(FUNC_RFKILL, 0x4, 0x0, 0x0);
|
||||
if (fujitsu_laptop->flags_supported)
|
||||
fujitsu_laptop->flags_state =
|
||||
call_fext_func(FUNC_FLAGS, 0x4, 0x0, 0x0);
|
||||
|
||||
i = 0;
|
||||
while ((irb =
|
||||
@ -1103,19 +1104,19 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event)
|
||||
&& (i++) < MAX_HOTKEY_RINGBUFFER_SIZE) {
|
||||
switch (irb & 0x4ff) {
|
||||
case KEY1_CODE:
|
||||
keycode = fujitsu->keycode1;
|
||||
keycode = fujitsu_bl->keycode1;
|
||||
break;
|
||||
case KEY2_CODE:
|
||||
keycode = fujitsu->keycode2;
|
||||
keycode = fujitsu_bl->keycode2;
|
||||
break;
|
||||
case KEY3_CODE:
|
||||
keycode = fujitsu->keycode3;
|
||||
keycode = fujitsu_bl->keycode3;
|
||||
break;
|
||||
case KEY4_CODE:
|
||||
keycode = fujitsu->keycode4;
|
||||
keycode = fujitsu_bl->keycode4;
|
||||
break;
|
||||
case KEY5_CODE:
|
||||
keycode = fujitsu->keycode5;
|
||||
keycode = fujitsu_bl->keycode5;
|
||||
break;
|
||||
case 0:
|
||||
keycode = 0;
|
||||
@ -1128,17 +1129,17 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event)
|
||||
}
|
||||
|
||||
if (keycode > 0)
|
||||
acpi_fujitsu_hotkey_press(keycode);
|
||||
acpi_fujitsu_laptop_press(keycode);
|
||||
else if (keycode == 0)
|
||||
acpi_fujitsu_hotkey_release();
|
||||
acpi_fujitsu_laptop_release();
|
||||
}
|
||||
|
||||
/* On some models (first seen on the Skylake-based Lifebook
|
||||
* E736/E746/E756), the touchpad toggle hotkey (Fn+F4) is
|
||||
* handled in software; its state is queried using FUNC_RFKILL
|
||||
* handled in software; its state is queried using FUNC_FLAGS
|
||||
*/
|
||||
if ((fujitsu_hotkey->rfkill_supported & BIT(26)) &&
|
||||
(call_fext_func(FUNC_RFKILL, 0x1, 0x0, 0x0) & BIT(26))) {
|
||||
if ((fujitsu_laptop->flags_supported & BIT(26)) &&
|
||||
(call_fext_func(FUNC_FLAGS, 0x1, 0x0, 0x0) & BIT(26))) {
|
||||
keycode = KEY_TOUCHPAD_TOGGLE;
|
||||
input_report_key(input, keycode, 1);
|
||||
input_sync(input);
|
||||
@ -1150,83 +1151,81 @@ static void acpi_fujitsu_hotkey_notify(struct acpi_device *device, u32 event)
|
||||
|
||||
/* Initialization */
|
||||
|
||||
static const struct acpi_device_id fujitsu_device_ids[] = {
|
||||
{ACPI_FUJITSU_HID, 0},
|
||||
static const struct acpi_device_id fujitsu_bl_device_ids[] = {
|
||||
{ACPI_FUJITSU_BL_HID, 0},
|
||||
{"", 0},
|
||||
};
|
||||
|
||||
static struct acpi_driver acpi_fujitsu_driver = {
|
||||
.name = ACPI_FUJITSU_DRIVER_NAME,
|
||||
static struct acpi_driver acpi_fujitsu_bl_driver = {
|
||||
.name = ACPI_FUJITSU_BL_DRIVER_NAME,
|
||||
.class = ACPI_FUJITSU_CLASS,
|
||||
.ids = fujitsu_device_ids,
|
||||
.ids = fujitsu_bl_device_ids,
|
||||
.ops = {
|
||||
.add = acpi_fujitsu_add,
|
||||
.remove = acpi_fujitsu_remove,
|
||||
.notify = acpi_fujitsu_notify,
|
||||
.add = acpi_fujitsu_bl_add,
|
||||
.remove = acpi_fujitsu_bl_remove,
|
||||
.notify = acpi_fujitsu_bl_notify,
|
||||
},
|
||||
};
|
||||
|
||||
static const struct acpi_device_id fujitsu_hotkey_device_ids[] = {
|
||||
{ACPI_FUJITSU_HOTKEY_HID, 0},
|
||||
static const struct acpi_device_id fujitsu_laptop_device_ids[] = {
|
||||
{ACPI_FUJITSU_LAPTOP_HID, 0},
|
||||
{"", 0},
|
||||
};
|
||||
|
||||
static struct acpi_driver acpi_fujitsu_hotkey_driver = {
|
||||
.name = ACPI_FUJITSU_HOTKEY_DRIVER_NAME,
|
||||
static struct acpi_driver acpi_fujitsu_laptop_driver = {
|
||||
.name = ACPI_FUJITSU_LAPTOP_DRIVER_NAME,
|
||||
.class = ACPI_FUJITSU_CLASS,
|
||||
.ids = fujitsu_hotkey_device_ids,
|
||||
.ids = fujitsu_laptop_device_ids,
|
||||
.ops = {
|
||||
.add = acpi_fujitsu_hotkey_add,
|
||||
.remove = acpi_fujitsu_hotkey_remove,
|
||||
.notify = acpi_fujitsu_hotkey_notify,
|
||||
.add = acpi_fujitsu_laptop_add,
|
||||
.remove = acpi_fujitsu_laptop_remove,
|
||||
.notify = acpi_fujitsu_laptop_notify,
|
||||
},
|
||||
};
|
||||
|
||||
static const struct acpi_device_id fujitsu_ids[] __used = {
|
||||
{ACPI_FUJITSU_HID, 0},
|
||||
{ACPI_FUJITSU_HOTKEY_HID, 0},
|
||||
{ACPI_FUJITSU_BL_HID, 0},
|
||||
{ACPI_FUJITSU_LAPTOP_HID, 0},
|
||||
{"", 0}
|
||||
};
|
||||
MODULE_DEVICE_TABLE(acpi, fujitsu_ids);
|
||||
|
||||
static int __init fujitsu_init(void)
|
||||
{
|
||||
int ret, result, max_brightness;
|
||||
int ret, max_brightness;
|
||||
|
||||
if (acpi_disabled)
|
||||
return -ENODEV;
|
||||
|
||||
fujitsu = kzalloc(sizeof(struct fujitsu_t), GFP_KERNEL);
|
||||
if (!fujitsu)
|
||||
fujitsu_bl = kzalloc(sizeof(struct fujitsu_bl), GFP_KERNEL);
|
||||
if (!fujitsu_bl)
|
||||
return -ENOMEM;
|
||||
fujitsu->keycode1 = KEY_PROG1;
|
||||
fujitsu->keycode2 = KEY_PROG2;
|
||||
fujitsu->keycode3 = KEY_PROG3;
|
||||
fujitsu->keycode4 = KEY_PROG4;
|
||||
fujitsu->keycode5 = KEY_RFKILL;
|
||||
fujitsu_bl->keycode1 = KEY_PROG1;
|
||||
fujitsu_bl->keycode2 = KEY_PROG2;
|
||||
fujitsu_bl->keycode3 = KEY_PROG3;
|
||||
fujitsu_bl->keycode4 = KEY_PROG4;
|
||||
fujitsu_bl->keycode5 = KEY_RFKILL;
|
||||
dmi_check_system(fujitsu_dmi_table);
|
||||
|
||||
result = acpi_bus_register_driver(&acpi_fujitsu_driver);
|
||||
if (result < 0) {
|
||||
ret = -ENODEV;
|
||||
ret = acpi_bus_register_driver(&acpi_fujitsu_bl_driver);
|
||||
if (ret)
|
||||
goto fail_acpi;
|
||||
}
|
||||
|
||||
/* Register platform stuff */
|
||||
|
||||
fujitsu->pf_device = platform_device_alloc("fujitsu-laptop", -1);
|
||||
if (!fujitsu->pf_device) {
|
||||
fujitsu_bl->pf_device = platform_device_alloc("fujitsu-laptop", -1);
|
||||
if (!fujitsu_bl->pf_device) {
|
||||
ret = -ENOMEM;
|
||||
goto fail_platform_driver;
|
||||
}
|
||||
|
||||
ret = platform_device_add(fujitsu->pf_device);
|
||||
ret = platform_device_add(fujitsu_bl->pf_device);
|
||||
if (ret)
|
||||
goto fail_platform_device1;
|
||||
|
||||
ret =
|
||||
sysfs_create_group(&fujitsu->pf_device->dev.kobj,
|
||||
&fujitsupf_attribute_group);
|
||||
sysfs_create_group(&fujitsu_bl->pf_device->dev.kobj,
|
||||
&fujitsu_pf_attribute_group);
|
||||
if (ret)
|
||||
goto fail_platform_device2;
|
||||
|
||||
@ -1236,90 +1235,88 @@ static int __init fujitsu_init(void)
|
||||
struct backlight_properties props;
|
||||
|
||||
memset(&props, 0, sizeof(struct backlight_properties));
|
||||
max_brightness = fujitsu->max_brightness;
|
||||
max_brightness = fujitsu_bl->max_brightness;
|
||||
props.type = BACKLIGHT_PLATFORM;
|
||||
props.max_brightness = max_brightness - 1;
|
||||
fujitsu->bl_device = backlight_device_register("fujitsu-laptop",
|
||||
NULL, NULL,
|
||||
&fujitsubl_ops,
|
||||
&props);
|
||||
if (IS_ERR(fujitsu->bl_device)) {
|
||||
ret = PTR_ERR(fujitsu->bl_device);
|
||||
fujitsu->bl_device = NULL;
|
||||
fujitsu_bl->bl_device = backlight_device_register("fujitsu-laptop",
|
||||
NULL, NULL,
|
||||
&fujitsu_bl_ops,
|
||||
&props);
|
||||
if (IS_ERR(fujitsu_bl->bl_device)) {
|
||||
ret = PTR_ERR(fujitsu_bl->bl_device);
|
||||
fujitsu_bl->bl_device = NULL;
|
||||
goto fail_sysfs_group;
|
||||
}
|
||||
fujitsu->bl_device->props.brightness = fujitsu->brightness_level;
|
||||
fujitsu_bl->bl_device->props.brightness = fujitsu_bl->brightness_level;
|
||||
}
|
||||
|
||||
ret = platform_driver_register(&fujitsupf_driver);
|
||||
ret = platform_driver_register(&fujitsu_pf_driver);
|
||||
if (ret)
|
||||
goto fail_backlight;
|
||||
|
||||
/* Register hotkey driver */
|
||||
/* Register laptop driver */
|
||||
|
||||
fujitsu_hotkey = kzalloc(sizeof(struct fujitsu_hotkey_t), GFP_KERNEL);
|
||||
if (!fujitsu_hotkey) {
|
||||
fujitsu_laptop = kzalloc(sizeof(struct fujitsu_laptop), GFP_KERNEL);
|
||||
if (!fujitsu_laptop) {
|
||||
ret = -ENOMEM;
|
||||
goto fail_hotkey;
|
||||
goto fail_laptop;
|
||||
}
|
||||
|
||||
result = acpi_bus_register_driver(&acpi_fujitsu_hotkey_driver);
|
||||
if (result < 0) {
|
||||
ret = -ENODEV;
|
||||
goto fail_hotkey1;
|
||||
}
|
||||
ret = acpi_bus_register_driver(&acpi_fujitsu_laptop_driver);
|
||||
if (ret)
|
||||
goto fail_laptop1;
|
||||
|
||||
/* Sync backlight power status (needs FUJ02E3 device, hence deferred) */
|
||||
if (acpi_video_get_backlight_type() == acpi_backlight_vendor) {
|
||||
if (call_fext_func(FUNC_BACKLIGHT, 0x2, 0x4, 0x0) == 3)
|
||||
fujitsu->bl_device->props.power = FB_BLANK_POWERDOWN;
|
||||
fujitsu_bl->bl_device->props.power = FB_BLANK_POWERDOWN;
|
||||
else
|
||||
fujitsu->bl_device->props.power = FB_BLANK_UNBLANK;
|
||||
fujitsu_bl->bl_device->props.power = FB_BLANK_UNBLANK;
|
||||
}
|
||||
|
||||
pr_info("driver " FUJITSU_DRIVER_VERSION " successfully loaded\n");
|
||||
|
||||
return 0;
|
||||
|
||||
fail_hotkey1:
|
||||
kfree(fujitsu_hotkey);
|
||||
fail_hotkey:
|
||||
platform_driver_unregister(&fujitsupf_driver);
|
||||
fail_laptop1:
|
||||
kfree(fujitsu_laptop);
|
||||
fail_laptop:
|
||||
platform_driver_unregister(&fujitsu_pf_driver);
|
||||
fail_backlight:
|
||||
backlight_device_unregister(fujitsu->bl_device);
|
||||
backlight_device_unregister(fujitsu_bl->bl_device);
|
||||
fail_sysfs_group:
|
||||
sysfs_remove_group(&fujitsu->pf_device->dev.kobj,
|
||||
&fujitsupf_attribute_group);
|
||||
sysfs_remove_group(&fujitsu_bl->pf_device->dev.kobj,
|
||||
&fujitsu_pf_attribute_group);
|
||||
fail_platform_device2:
|
||||
platform_device_del(fujitsu->pf_device);
|
||||
platform_device_del(fujitsu_bl->pf_device);
|
||||
fail_platform_device1:
|
||||
platform_device_put(fujitsu->pf_device);
|
||||
platform_device_put(fujitsu_bl->pf_device);
|
||||
fail_platform_driver:
|
||||
acpi_bus_unregister_driver(&acpi_fujitsu_driver);
|
||||
acpi_bus_unregister_driver(&acpi_fujitsu_bl_driver);
|
||||
fail_acpi:
|
||||
kfree(fujitsu);
|
||||
kfree(fujitsu_bl);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void __exit fujitsu_cleanup(void)
|
||||
{
|
||||
acpi_bus_unregister_driver(&acpi_fujitsu_hotkey_driver);
|
||||
acpi_bus_unregister_driver(&acpi_fujitsu_laptop_driver);
|
||||
|
||||
kfree(fujitsu_hotkey);
|
||||
kfree(fujitsu_laptop);
|
||||
|
||||
platform_driver_unregister(&fujitsupf_driver);
|
||||
platform_driver_unregister(&fujitsu_pf_driver);
|
||||
|
||||
backlight_device_unregister(fujitsu->bl_device);
|
||||
backlight_device_unregister(fujitsu_bl->bl_device);
|
||||
|
||||
sysfs_remove_group(&fujitsu->pf_device->dev.kobj,
|
||||
&fujitsupf_attribute_group);
|
||||
sysfs_remove_group(&fujitsu_bl->pf_device->dev.kobj,
|
||||
&fujitsu_pf_attribute_group);
|
||||
|
||||
platform_device_unregister(fujitsu->pf_device);
|
||||
platform_device_unregister(fujitsu_bl->pf_device);
|
||||
|
||||
acpi_bus_unregister_driver(&acpi_fujitsu_driver);
|
||||
acpi_bus_unregister_driver(&acpi_fujitsu_bl_driver);
|
||||
|
||||
kfree(fujitsu);
|
||||
kfree(fujitsu_bl);
|
||||
|
||||
pr_info("driver unloaded\n");
|
||||
}
|
||||
@ -1341,7 +1338,3 @@ MODULE_AUTHOR("Jonathan Woithe, Peter Gruber, Tony Vroon");
|
||||
MODULE_DESCRIPTION("Fujitsu laptop extras support");
|
||||
MODULE_VERSION(FUJITSU_DRIVER_VERSION);
|
||||
MODULE_LICENSE("GPL");
|
||||
|
||||
MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1D3:*:cvrS6410:*");
|
||||
MODULE_ALIAS("dmi:*:svnFUJITSUSIEMENS:*:pvr:rvnFUJITSU:rnFJNB1E6:*:cvrS6420:*");
|
||||
MODULE_ALIAS("dmi:*:svnFUJITSU:*:pvr:rvnFUJITSU:rnFJNB19C:*:cvrS7020:*");
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user