diff --git a/Documentation/ABI/testing/sysfs-bus-auxiliary b/Documentation/ABI/testing/sysfs-bus-auxiliary new file mode 100644 index 000000000000..cc856079690f --- /dev/null +++ b/Documentation/ABI/testing/sysfs-bus-auxiliary @@ -0,0 +1,9 @@ +What: /sys/bus/auxiliary/devices/.../irqs/ +Date: April, 2024 +Contact: Shay Drory +Description: + The /sys/devices/.../irqs directory contains a variable set of + files, with each file is named as irq number similar to PCI PF + or VF's irq number located in msi_irqs directory. + These irq files are added and removed dynamically when an IRQ + is requested and freed respectively for the PCI SF. diff --git a/Documentation/bpf/libbpf/libbpf_overview.rst b/Documentation/bpf/libbpf/libbpf_overview.rst index f36a2d4ffea2..f4d22f0c62b0 100644 --- a/Documentation/bpf/libbpf/libbpf_overview.rst +++ b/Documentation/bpf/libbpf/libbpf_overview.rst @@ -219,6 +219,14 @@ compilation and skeleton generation. Using Libbpf-rs will make building user space part of the BPF application easier. Note that the BPF program themselves must still be written in plain C. +libbpf logging +============== + +By default, libbpf logs informational and warning messages to stderr. The +verbosity of these messages can be controlled by setting the environment +variable LIBBPF_LOG_LEVEL to either warn, info, or debug. A custom log +callback can be set using ``libbpf_set_print()``. + Additional Documentation ======================== diff --git a/Documentation/bpf/standardization/abi.rst b/Documentation/bpf/standardization/abi.rst index 0c2e10eeb89a..41514137cb7b 100644 --- a/Documentation/bpf/standardization/abi.rst +++ b/Documentation/bpf/standardization/abi.rst @@ -23,3 +23,6 @@ The BPF calling convention is defined as: R0 - R5 are scratch registers and BPF programs needs to spill/fill them if necessary across calls. + +The BPF program needs to store the return value into register R0 before doing an +``EXIT``. diff --git a/Documentation/bpf/standardization/instruction-set.rst b/Documentation/bpf/standardization/instruction-set.rst index 00c93eb42613..ab820d565052 100644 --- a/Documentation/bpf/standardization/instruction-set.rst +++ b/Documentation/bpf/standardization/instruction-set.rst @@ -5,15 +5,29 @@ BPF Instruction Set Architecture (ISA) ====================================== -eBPF (which is no longer an acronym for anything), also commonly +eBPF, also commonly referred to as BPF, is a technology with origins in the Linux kernel that can run untrusted programs in a privileged context such as an operating system kernel. This document specifies the BPF instruction set architecture (ISA). +As a historical note, BPF originally stood for Berkeley Packet Filter, +but now that it can do so much more than packet filtering, the acronym +no longer makes sense. BPF is now considered a standalone term that +does not stand for anything. The original BPF is sometimes referred to +as cBPF (classic BPF) to distinguish it from the now widely deployed +eBPF (extended BPF). + Documentation conventions ========================= +The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", +"SHOULD", "SHOULD NOT", "RECOMMENDED", "NOT RECOMMENDED", "MAY", and +"OPTIONAL" in this document are to be interpreted as described in +BCP 14 ``_ +``_ +when, and only when, they appear in all capitals, as shown here. + For brevity and consistency, this document refers to families of types using a shorthand syntax and refers to several expository, mnemonic functions when describing the semantics of instructions. @@ -25,7 +39,7 @@ Types This document refers to integer types with the notation `SN` to specify a type's signedness (`S`) and bit width (`N`), respectively. -.. table:: Meaning of signedness notation. +.. table:: Meaning of signedness notation ==== ========= S Meaning @@ -34,7 +48,7 @@ a type's signedness (`S`) and bit width (`N`), respectively. s signed ==== ========= -.. table:: Meaning of bit-width notation. +.. table:: Meaning of bit-width notation ===== ========= N Bit width @@ -52,24 +66,18 @@ numbers. Functions --------- -* htobe16: Takes an unsigned 16-bit number in host-endian format and - returns the equivalent number as an unsigned 16-bit number in big-endian - format. -* htobe32: Takes an unsigned 32-bit number in host-endian format and - returns the equivalent number as an unsigned 32-bit number in big-endian - format. -* htobe64: Takes an unsigned 64-bit number in host-endian format and - returns the equivalent number as an unsigned 64-bit number in big-endian - format. -* htole16: Takes an unsigned 16-bit number in host-endian format and - returns the equivalent number as an unsigned 16-bit number in little-endian - format. -* htole32: Takes an unsigned 32-bit number in host-endian format and - returns the equivalent number as an unsigned 32-bit number in little-endian - format. -* htole64: Takes an unsigned 64-bit number in host-endian format and - returns the equivalent number as an unsigned 64-bit number in little-endian - format. + +The following byteswap functions are direction-agnostic. That is, +the same function is used for conversion in either direction discussed +below. + +* be16: Takes an unsigned 16-bit number and converts it between + host byte order and big-endian + (`IEN137 `_) byte order. +* be32: Takes an unsigned 32-bit number and converts it between + host byte order and big-endian byte order. +* be64: Takes an unsigned 64-bit number and converts it between + host byte order and big-endian byte order. * bswap16: Takes an unsigned 16-bit number in either big- or little-endian format and returns the equivalent number with the same bit width but opposite endianness. @@ -79,7 +87,12 @@ Functions * bswap64: Takes an unsigned 64-bit number in either big- or little-endian format and returns the equivalent number with the same bit width but opposite endianness. - +* le16: Takes an unsigned 16-bit number and converts it between + host byte order and little-endian byte order. +* le32: Takes an unsigned 32-bit number and converts it between + host byte order and little-endian byte order. +* le64: Takes an unsigned 64-bit number and converts it between + host byte order and little-endian byte order. Definitions ----------- @@ -106,9 +119,9 @@ Conformance groups An implementation does not need to support all instructions specified in this document (e.g., deprecated instructions). Instead, a number of conformance -groups are specified. An implementation must support the base32 conformance -group and may support additional conformance groups, where supporting a -conformance group means it must support all instructions in that conformance +groups are specified. An implementation MUST support the base32 conformance +group and MAY support additional conformance groups, where supporting a +conformance group means it MUST support all instructions in that conformance group. The use of named conformance groups enables interoperability between a runtime @@ -209,7 +222,7 @@ For example:: 07 1 0 00 00 11 22 33 44 r1 += 0x11223344 // big Note that most instructions do not use all of the fields. -Unused fields shall be cleared to zero. +Unused fields SHALL be cleared to zero. Wide instruction encoding -------------------------- @@ -256,18 +269,20 @@ Instruction classes The three least significant bits of the 'opcode' field store the instruction class: -===== ===== =============================== =================================== -class value description reference -===== ===== =============================== =================================== -LD 0x0 non-standard load operations `Load and store instructions`_ -LDX 0x1 load into register operations `Load and store instructions`_ -ST 0x2 store from immediate operations `Load and store instructions`_ -STX 0x3 store from register operations `Load and store instructions`_ -ALU 0x4 32-bit arithmetic operations `Arithmetic and jump instructions`_ -JMP 0x5 64-bit jump operations `Arithmetic and jump instructions`_ -JMP32 0x6 32-bit jump operations `Arithmetic and jump instructions`_ -ALU64 0x7 64-bit arithmetic operations `Arithmetic and jump instructions`_ -===== ===== =============================== =================================== +.. table:: Instruction class + + ===== ===== =============================== =================================== + class value description reference + ===== ===== =============================== =================================== + LD 0x0 non-standard load operations `Load and store instructions`_ + LDX 0x1 load into register operations `Load and store instructions`_ + ST 0x2 store from immediate operations `Load and store instructions`_ + STX 0x3 store from register operations `Load and store instructions`_ + ALU 0x4 32-bit arithmetic operations `Arithmetic and jump instructions`_ + JMP 0x5 64-bit jump operations `Arithmetic and jump instructions`_ + JMP32 0x6 32-bit jump operations `Arithmetic and jump instructions`_ + ALU64 0x7 64-bit arithmetic operations `Arithmetic and jump instructions`_ + ===== ===== =============================== =================================== Arithmetic and jump instructions ================================ @@ -285,12 +300,14 @@ For arithmetic and jump instructions (``ALU``, ``ALU64``, ``JMP`` and **s (source)** the source operand location, which unless otherwise specified is one of: - ====== ===== ============================================== - source value description - ====== ===== ============================================== - K 0 use 32-bit 'imm' value as source operand - X 1 use 'src_reg' register value as source operand - ====== ===== ============================================== + .. table:: Source operand location + + ====== ===== ============================================== + source value description + ====== ===== ============================================== + K 0 use 32-bit 'imm' value as source operand + X 1 use 'src_reg' register value as source operand + ====== ===== ============================================== **instruction class** the instruction class (see `Instruction classes`_) @@ -305,27 +322,29 @@ The 'code' field encodes the operation as below, where 'src' refers to the the source operand and 'dst' refers to the value of the destination register. -===== ===== ======= ========================================================== -name code offset description -===== ===== ======= ========================================================== -ADD 0x0 0 dst += src -SUB 0x1 0 dst -= src -MUL 0x2 0 dst \*= src -DIV 0x3 0 dst = (src != 0) ? (dst / src) : 0 -SDIV 0x3 1 dst = (src != 0) ? (dst s/ src) : 0 -OR 0x4 0 dst \|= src -AND 0x5 0 dst &= src -LSH 0x6 0 dst <<= (src & mask) -RSH 0x7 0 dst >>= (src & mask) -NEG 0x8 0 dst = -dst -MOD 0x9 0 dst = (src != 0) ? (dst % src) : dst -SMOD 0x9 1 dst = (src != 0) ? (dst s% src) : dst -XOR 0xa 0 dst ^= src -MOV 0xb 0 dst = src -MOVSX 0xb 8/16/32 dst = (s8,s16,s32)src -ARSH 0xc 0 :term:`sign extending` dst >>= (src & mask) -END 0xd 0 byte swap operations (see `Byte swap instructions`_ below) -===== ===== ======= ========================================================== +.. table:: Arithmetic instructions + + ===== ===== ======= ========================================================== + name code offset description + ===== ===== ======= ========================================================== + ADD 0x0 0 dst += src + SUB 0x1 0 dst -= src + MUL 0x2 0 dst \*= src + DIV 0x3 0 dst = (src != 0) ? (dst / src) : 0 + SDIV 0x3 1 dst = (src != 0) ? (dst s/ src) : 0 + OR 0x4 0 dst \|= src + AND 0x5 0 dst &= src + LSH 0x6 0 dst <<= (src & mask) + RSH 0x7 0 dst >>= (src & mask) + NEG 0x8 0 dst = -dst + MOD 0x9 0 dst = (src != 0) ? (dst % src) : dst + SMOD 0x9 1 dst = (src != 0) ? (dst s% src) : dst + XOR 0xa 0 dst ^= src + MOV 0xb 0 dst = src + MOVSX 0xb 8/16/32 dst = (s8,s16,s32)src + ARSH 0xc 0 :term:`sign extending` dst >>= (src & mask) + END 0xd 0 byte swap operations (see `Byte swap instructions`_ below) + ===== ===== ======= ========================================================== Underflow and overflow are allowed during arithmetic operations, meaning the 64-bit or 32-bit value will wrap. If BPF program execution would @@ -374,7 +393,7 @@ interpreted as a 64-bit signed value. Note that there are varying definitions of the signed modulo operation when the dividend or divisor are negative, where implementations often vary by language such that Python, Ruby, etc. differ from C, Go, Java, -etc. This specification requires that signed modulo use truncated division +etc. This specification requires that signed modulo MUST use truncated division (where -13 % 3 == -1) as implemented in C, Go, etc.:: a % n = a - n * trunc(a / n) @@ -386,6 +405,19 @@ The ``MOVSX`` instruction does a move operation with sign extension. operands into 64-bit operands. Unlike other arithmetic instructions, ``MOVSX`` is only defined for register source operands (``X``). +``{MOV, K, ALU64}`` means:: + + dst = (s64)imm + +``{MOV, X, ALU}`` means:: + + dst = (u32)src + +``{MOVSX, X, ALU}`` with 'offset' 8 means:: + + dst = (u32)(s32)(s8)src + + The ``NEG`` instruction is only defined when the source bit is clear (``K``). @@ -404,15 +436,17 @@ only and do not use a separate source register or immediate value. For ``ALU``, the 1-bit source operand field in the opcode is used to select what byte order the operation converts from or to. For ``ALU64``, the 1-bit source operand field in the opcode is reserved -and must be set to 0. +and MUST be set to 0. -===== ======== ===== ================================================= -class source value description -===== ======== ===== ================================================= -ALU TO_LE 0 convert between host byte order and little endian -ALU TO_BE 1 convert between host byte order and big endian -ALU64 Reserved 0 do byte swap unconditionally -===== ======== ===== ================================================= +.. table:: Byte swap instructions + + ===== ======== ===== ================================================= + class source value description + ===== ======== ===== ================================================= + ALU LE 0 convert between host byte order and little endian + ALU BE 1 convert between host byte order and big endian + ALU64 Reserved 0 do byte swap unconditionally + ===== ======== ===== ================================================= The 'imm' field encodes the width of the swap operations. The following widths are supported: 16, 32 and 64. Width 64 operations belong to the base64 @@ -421,19 +455,19 @@ conformance group. Examples: -``{END, TO_LE, ALU}`` with 'imm' = 16/32/64 means:: +``{END, LE, ALU}`` with 'imm' = 16/32/64 means:: - dst = htole16(dst) - dst = htole32(dst) - dst = htole64(dst) + dst = le16(dst) + dst = le32(dst) + dst = le64(dst) -``{END, TO_BE, ALU}`` with 'imm' = 16/32/64 means:: +``{END, BE, ALU}`` with 'imm' = 16/32/64 means:: - dst = htobe16(dst) - dst = htobe32(dst) - dst = htobe64(dst) + dst = be16(dst) + dst = be32(dst) + dst = be64(dst) -``{END, TO_LE, ALU64}`` with 'imm' = 16/32/64 means:: +``{END, TO, ALU64}`` with 'imm' = 16/32/64 means:: dst = bswap16(dst) dst = bswap32(dst) @@ -448,27 +482,29 @@ otherwise identical operations, and indicates the base64 conformance group unless otherwise specified. The 'code' field encodes the operation as below: -======== ===== ======= ================================= =================================================== -code value src_reg description notes -======== ===== ======= ================================= =================================================== -JA 0x0 0x0 PC += offset {JA, K, JMP} only -JA 0x0 0x0 PC += imm {JA, K, JMP32} only -JEQ 0x1 any PC += offset if dst == src -JGT 0x2 any PC += offset if dst > src unsigned -JGE 0x3 any PC += offset if dst >= src unsigned -JSET 0x4 any PC += offset if dst & src -JNE 0x5 any PC += offset if dst != src -JSGT 0x6 any PC += offset if dst > src signed -JSGE 0x7 any PC += offset if dst >= src signed -CALL 0x8 0x0 call helper function by static ID {CALL, K, JMP} only, see `Helper functions`_ -CALL 0x8 0x1 call PC += imm {CALL, K, JMP} only, see `Program-local functions`_ -CALL 0x8 0x2 call helper function by BTF ID {CALL, K, JMP} only, see `Helper functions`_ -EXIT 0x9 0x0 return {CALL, K, JMP} only -JLT 0xa any PC += offset if dst < src unsigned -JLE 0xb any PC += offset if dst <= src unsigned -JSLT 0xc any PC += offset if dst < src signed -JSLE 0xd any PC += offset if dst <= src signed -======== ===== ======= ================================= =================================================== +.. table:: Jump instructions + + ======== ===== ======= ================================= =================================================== + code value src_reg description notes + ======== ===== ======= ================================= =================================================== + JA 0x0 0x0 PC += offset {JA, K, JMP} only + JA 0x0 0x0 PC += imm {JA, K, JMP32} only + JEQ 0x1 any PC += offset if dst == src + JGT 0x2 any PC += offset if dst > src unsigned + JGE 0x3 any PC += offset if dst >= src unsigned + JSET 0x4 any PC += offset if dst & src + JNE 0x5 any PC += offset if dst != src + JSGT 0x6 any PC += offset if dst > src signed + JSGE 0x7 any PC += offset if dst >= src signed + CALL 0x8 0x0 call helper function by static ID {CALL, K, JMP} only, see `Helper functions`_ + CALL 0x8 0x1 call PC += imm {CALL, K, JMP} only, see `Program-local functions`_ + CALL 0x8 0x2 call helper function by BTF ID {CALL, K, JMP} only, see `Helper functions`_ + EXIT 0x9 0x0 return {CALL, K, JMP} only + JLT 0xa any PC += offset if dst < src unsigned + JLE 0xb any PC += offset if dst <= src unsigned + JSLT 0xc any PC += offset if dst < src signed + JSLE 0xd any PC += offset if dst <= src signed + ======== ===== ======= ================================= =================================================== where 'PC' denotes the program counter, and the offset to increment by is in units of 64-bit instructions relative to the instruction following @@ -476,9 +512,6 @@ the jump instruction. Thus 'PC += 1' skips execution of the next instruction if it's a basic instruction or results in undefined behavior if the next instruction is a 128-bit wide instruction. -The BPF program needs to store the return value into register R0 before doing an -``EXIT``. - Example: ``{JSGE, X, JMP32}`` means:: @@ -487,6 +520,10 @@ Example: where 's>=' indicates a signed '>=' comparison. +``{JLE, K, JMP}`` means:: + + if dst <= (u64)(s64)imm goto +offset + ``{JA, K, JMP32}`` means:: gotol +imm @@ -510,19 +547,25 @@ Helper functions are a concept whereby BPF programs can call into a set of function calls exposed by the underlying platform. Historically, each helper function was identified by a static ID -encoded in the 'imm' field. The available helper functions may differ -for each program type, but static IDs are unique across all program types. +encoded in the 'imm' field. Further documentation of helper functions +is outside the scope of this document and standardization is left for +future work, but use is widely deployed and more information can be +found in platform-specific documentation (e.g., Linux kernel documentation). Platforms that support the BPF Type Format (BTF) support identifying a helper function by a BTF ID encoded in the 'imm' field, where the BTF ID -identifies the helper name and type. +identifies the helper name and type. Further documentation of BTF +is outside the scope of this document and standardization is left for +future work, but use is widely deployed and more information can be +found in platform-specific documentation (e.g., Linux kernel documentation). Program-local functions ~~~~~~~~~~~~~~~~~~~~~~~ Program-local functions are functions exposed by the same BPF program as the -caller, and are referenced by offset from the call instruction, similar to -``JA``. The offset is encoded in the 'imm' field of the call instruction. -An ``EXIT`` within the program-local function will return to the caller. +caller, and are referenced by offset from the instruction following the call +instruction, similar to ``JA``. The offset is encoded in the 'imm' field of +the call instruction. An ``EXIT`` within the program-local function will +return to the caller. Load and store instructions =========================== @@ -537,6 +580,8 @@ For load and store instructions (``LD``, ``LDX``, ``ST``, and ``STX``), the **mode** The mode modifier is one of: + .. table:: Mode modifier + ============= ===== ==================================== ============= mode modifier value description reference ============= ===== ==================================== ============= @@ -551,6 +596,8 @@ For load and store instructions (``LD``, ``LDX``, ``ST``, and ``STX``), the **sz (size)** The size modifier is one of: + .. table:: Size modifier + ==== ===== ===================== size value description ==== ===== ===================== @@ -619,14 +666,16 @@ The 'imm' field is used to encode the actual atomic operation. Simple atomic operation use a subset of the values defined to encode arithmetic operations in the 'imm' field to encode the atomic operation: -======== ===== =========== -imm value description -======== ===== =========== -ADD 0x00 atomic add -OR 0x40 atomic or -AND 0x50 atomic and -XOR 0xa0 atomic xor -======== ===== =========== +.. table:: Simple atomic operations + + ======== ===== =========== + imm value description + ======== ===== =========== + ADD 0x00 atomic add + OR 0x40 atomic or + AND 0x50 atomic and + XOR 0xa0 atomic xor + ======== ===== =========== ``{ATOMIC, W, STX}`` with 'imm' = ADD means:: @@ -640,13 +689,15 @@ XOR 0xa0 atomic xor In addition to the simple atomic operations, there also is a modifier and two complex atomic operations: -=========== ================ =========================== -imm value description -=========== ================ =========================== -FETCH 0x01 modifier: return old value -XCHG 0xe0 | FETCH atomic exchange -CMPXCHG 0xf0 | FETCH atomic compare and exchange -=========== ================ =========================== +.. table:: Complex atomic operations + + =========== ================ =========================== + imm value description + =========== ================ =========================== + FETCH 0x01 modifier: return old value + XCHG 0xe0 | FETCH atomic exchange + CMPXCHG 0xf0 | FETCH atomic compare and exchange + =========== ================ =========================== The ``FETCH`` modifier is optional for simple atomic operations, and always set for the complex atomic operations. If the ``FETCH`` flag @@ -673,17 +724,19 @@ The following table defines a set of ``{IMM, DW, LD}`` instructions with opcode subtypes in the 'src_reg' field, using new terms such as "map" defined further below: -======= ========================================= =========== ============== -src_reg pseudocode imm type dst type -======= ========================================= =========== ============== -0x0 dst = (next_imm << 32) | imm integer integer -0x1 dst = map_by_fd(imm) map fd map -0x2 dst = map_val(map_by_fd(imm)) + next_imm map fd data address -0x3 dst = var_addr(imm) variable id data address -0x4 dst = code_addr(imm) integer code address -0x5 dst = map_by_idx(imm) map index map -0x6 dst = map_val(map_by_idx(imm)) + next_imm map index data address -======= ========================================= =========== ============== +.. table:: 64-bit immediate instructions + + ======= ========================================= =========== ============== + src_reg pseudocode imm type dst type + ======= ========================================= =========== ============== + 0x0 dst = (next_imm << 32) | imm integer integer + 0x1 dst = map_by_fd(imm) map fd map + 0x2 dst = map_val(map_by_fd(imm)) + next_imm map fd data address + 0x3 dst = var_addr(imm) variable id data address + 0x4 dst = code_addr(imm) integer code address + 0x5 dst = map_by_idx(imm) map index map + 0x6 dst = map_val(map_by_idx(imm)) + next_imm map index data address + ======= ========================================= =========== ============== where @@ -725,5 +778,5 @@ carried over from classic BPF. These instructions used an instruction class of ``LD``, a size modifier of ``W``, ``H``, or ``B``, and a mode modifier of ``ABS`` or ``IND``. The 'dst_reg' and 'offset' fields were set to zero, and 'src_reg' was set to zero for ``ABS``. However, these -instructions are deprecated and should no longer be used. All legacy packet +instructions are deprecated and SHOULD no longer be used. All legacy packet access instructions belong to the "packet" conformance group. diff --git a/Documentation/devicetree/bindings/net/airoha,en7581-eth.yaml b/Documentation/devicetree/bindings/net/airoha,en7581-eth.yaml new file mode 100644 index 000000000000..c578637c5826 --- /dev/null +++ b/Documentation/devicetree/bindings/net/airoha,en7581-eth.yaml @@ -0,0 +1,143 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/airoha,en7581-eth.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Airoha EN7581 Frame Engine Ethernet controller + +maintainers: + - Lorenzo Bianconi + +description: + The frame engine ethernet controller can be found on Airoha SoCs. + These SoCs have multi-GMAC ports. + +properties: + compatible: + enum: + - airoha,en7581-eth + + reg: + items: + - description: Frame engine base address + - description: QDMA0 base address + - description: QDMA1 base address + + reg-names: + items: + - const: fe + - const: qdma0 + - const: qdma1 + + interrupts: + items: + - description: QDMA lan irq0 + - description: QDMA lan irq1 + - description: QDMA lan irq2 + - description: QDMA lan irq3 + - description: QDMA wan irq0 + - description: QDMA wan irq1 + - description: QDMA wan irq2 + - description: QDMA wan irq3 + - description: FE error irq + - description: PDMA irq + + resets: + maxItems: 8 + + reset-names: + items: + - const: fe + - const: pdma + - const: qdma + - const: xsi-mac + - const: hsi0-mac + - const: hsi1-mac + - const: hsi-mac + - const: xfp-mac + + "#address-cells": + const: 1 + + "#size-cells": + const: 0 + +patternProperties: + "^ethernet@[1-4]$": + type: object + unevaluatedProperties: false + $ref: ethernet-controller.yaml# + description: + Ethernet GMAC port associated to the MAC controller + properties: + compatible: + const: airoha,eth-mac + + reg: + minimum: 1 + maximum: 4 + description: GMAC port identifier + + required: + - reg + - compatible + +required: + - compatible + - reg + - interrupts + - resets + - reset-names + +unevaluatedProperties: false + +examples: + - | + #include + #include + #include + + soc { + #address-cells = <2>; + #size-cells = <2>; + + eth: ethernet@1fb50000 { + compatible = "airoha,en7581-eth"; + reg = <0 0x1fb50000 0 0x2600>, + <0 0x1fb54000 0 0x2000>, + <0 0x1fb56000 0 0x2000>; + reg-names = "fe", "qdma0", "qdma1"; + + resets = <&scuclk 44>, + <&scuclk 30>, + <&scuclk 31>, + <&scuclk 6>, + <&scuclk 15>, + <&scuclk 16>, + <&scuclk 17>, + <&scuclk 26>; + reset-names = "fe", "pdma", "qdma", "xsi-mac", + "hsi0-mac", "hsi1-mac", "hsi-mac", + "xfp-mac"; + + interrupts = , + , + , + , + , + , + , + , + , + ; + + #address-cells = <1>; + #size-cells = <0>; + + mac: ethernet@1 { + compatible = "airoha,eth-mac"; + reg = <1>; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/net/arc_emac.txt b/Documentation/devicetree/bindings/net/arc_emac.txt deleted file mode 100644 index c73a0e9c625e..000000000000 --- a/Documentation/devicetree/bindings/net/arc_emac.txt +++ /dev/null @@ -1,46 +0,0 @@ -* Synopsys ARC EMAC 10/100 Ethernet driver (EMAC) - -Required properties: -- compatible: Should be "snps,arc-emac" -- reg: Address and length of the register set for the device -- interrupts: Should contain the EMAC interrupts -- max-speed: see ethernet.txt file in the same directory. -- phy: see ethernet.txt file in the same directory. - -Optional properties: -- phy-reset-gpios : Should specify the gpio for phy reset -- phy-reset-duration : Reset duration in milliseconds. Should present - only if property "phy-reset-gpios" is available. Missing the property - will have the duration be 1 millisecond. Numbers greater than 1000 are - invalid and 1 millisecond will be used instead. - -Clock handling: -The clock frequency is needed to calculate and set polling period of EMAC. -It must be provided by one of: -- clock-frequency: CPU frequency. -- clocks: reference to the clock supplying the EMAC. - -Child nodes of the driver are the individual PHY devices connected to the -MDIO bus. They must have a "reg" property given the PHY address on the MDIO bus. - -Examples: - - ethernet@c0fc2000 { - compatible = "snps,arc-emac"; - reg = <0xc0fc2000 0x3c>; - interrupts = <6>; - mac-address = [ 00 11 22 33 44 55 ]; - - clock-frequency = <80000000>; - /* or */ - clocks = <&emac_clock>; - - max-speed = <100>; - phy = <&phy0>; - - #address-cells = <1>; - #size-cells = <0>; - phy0: ethernet-phy@0 { - reg = <1>; - }; - }; diff --git a/Documentation/devicetree/bindings/net/bluetooth/mediatek,mt7622-bluetooth.yaml b/Documentation/devicetree/bindings/net/bluetooth/mediatek,mt7622-bluetooth.yaml new file mode 100644 index 000000000000..3f9e69208127 --- /dev/null +++ b/Documentation/devicetree/bindings/net/bluetooth/mediatek,mt7622-bluetooth.yaml @@ -0,0 +1,51 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/bluetooth/mediatek,mt7622-bluetooth.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: MediaTek SoC built-in Bluetooth + +description: + This device is a serial attached device to BTIF device and thus it must be a + child node of the serial node with BTIF. The dt-bindings details for BTIF + device can be known via Documentation/devicetree/bindings/serial/8250.yaml. + +maintainers: + - Sean Wang + +allOf: + - $ref: bluetooth-controller.yaml# + +properties: + compatible: + const: mediatek,mt7622-bluetooth + + clocks: + maxItems: 1 + + clock-names: + const: ref + + power-domains: + maxItems: 1 + +required: + - clocks + - clock-names + - power-domains + +unevaluatedProperties: false + +examples: + - | + #include + + serial { + bluetooth { + compatible = "mediatek,mt7622-bluetooth"; + power-domains = <&scpsys MT7622_POWER_DOMAIN_WB>; + clocks = <&clk25m>; + clock-names = "ref"; + }; + }; diff --git a/Documentation/devicetree/bindings/net/bluetooth/nxp,88w8987-bt.yaml b/Documentation/devicetree/bindings/net/bluetooth/nxp,88w8987-bt.yaml index f01a3988538c..37a65badb448 100644 --- a/Documentation/devicetree/bindings/net/bluetooth/nxp,88w8987-bt.yaml +++ b/Documentation/devicetree/bindings/net/bluetooth/nxp,88w8987-bt.yaml @@ -31,6 +31,9 @@ properties: This property depends on the module vendor's configuration. + firmware-name: + maxItems: 1 + required: - compatible @@ -42,5 +45,6 @@ examples: bluetooth { compatible = "nxp,88w8987-bt"; fw-init-baudrate = <3000000>; + firmware-name = "uartuart8987_bt_v0.bin"; }; }; diff --git a/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml b/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml index 055a3351880b..68c5ed111417 100644 --- a/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml +++ b/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml @@ -62,6 +62,9 @@ properties: vdddig-supply: description: VDD_DIG supply regulator handle + vddbtcmx-supply: + description: VDD_BT_CMX supply regulator handle + vddbtcxmx-supply: description: VDD_BT_CXMX supply regulator handle @@ -74,6 +77,9 @@ properties: vddrfa1p7-supply: description: VDD_RFA_1P7 supply regulator handle + vddrfa1p8-supply: + description: VDD_RFA_1P8 supply regulator handle + vddrfa1p2-supply: description: VDD_RFA_1P2 supply regulator handle @@ -86,6 +92,12 @@ properties: vddasd-supply: description: VDD_ASD supply regulator handle + vddwlcx-supply: + description: VDD_WLCX supply regulator handle + + vddwlmx-supply: + description: VDD_WLMX supply regulator handle + max-speed: description: see Documentation/devicetree/bindings/serial/serial.yaml @@ -176,14 +188,27 @@ allOf: - qcom,wcn7850-bt then: required: - - enable-gpios - - swctrl-gpios - - vddio-supply + - vddrfacmn-supply - vddaon-supply - - vdddig-supply + - vddwlcx-supply + - vddwlmx-supply - vddrfa0p8-supply - vddrfa1p2-supply - - vddrfa1p9-supply + - vddrfa1p8-supply + - if: + properties: + compatible: + contains: + enum: + - qcom,qca6390-bt + then: + required: + - vddrfacmn-supply + - vddaon-supply + - vddbtcmx-supply + - vddrfa0p8-supply + - vddrfa1p2-supply + - vddrfa1p7-supply examples: - | diff --git a/Documentation/devicetree/bindings/net/can/xilinx,can.yaml b/Documentation/devicetree/bindings/net/can/xilinx,can.yaml index 8d4e5af6fd6c..40835497050a 100644 --- a/Documentation/devicetree/bindings/net/can/xilinx,can.yaml +++ b/Documentation/devicetree/bindings/net/can/xilinx,can.yaml @@ -5,7 +5,7 @@ $id: http://devicetree.org/schemas/net/can/xilinx,can.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# title: - Xilinx Axi CAN/Zynq CANPS controller + Xilinx CAN and CANFD controller maintainers: - Appana Durga Kedareswara rao diff --git a/Documentation/devicetree/bindings/net/cdns,macb.yaml b/Documentation/devicetree/bindings/net/cdns,macb.yaml index 2c71e2cf3a2f..3c30dd23cd4e 100644 --- a/Documentation/devicetree/bindings/net/cdns,macb.yaml +++ b/Documentation/devicetree/bindings/net/cdns,macb.yaml @@ -146,6 +146,7 @@ patternProperties: magic-packet: type: boolean + deprecated: true description: Indicates that the hardware supports waking up via magic packet. diff --git a/Documentation/devicetree/bindings/net/dsa/lantiq,gswip.yaml b/Documentation/devicetree/bindings/net/dsa/lantiq,gswip.yaml new file mode 100644 index 000000000000..f3154b19af78 --- /dev/null +++ b/Documentation/devicetree/bindings/net/dsa/lantiq,gswip.yaml @@ -0,0 +1,202 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/dsa/lantiq,gswip.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Lantiq GSWIP Ethernet switches + +allOf: + - $ref: dsa.yaml#/$defs/ethernet-ports + +maintainers: + - Hauke Mehrtens + +properties: + compatible: + enum: + - lantiq,xrx200-gswip + - lantiq,xrx300-gswip + - lantiq,xrx330-gswip + + reg: + minItems: 3 + maxItems: 3 + + reg-names: + items: + - const: switch + - const: mdio + - const: mii + + mdio: + $ref: /schemas/net/mdio.yaml# + unevaluatedProperties: false + + properties: + compatible: + const: lantiq,xrx200-mdio + + required: + - compatible + + gphy-fw: + type: object + properties: + '#address-cells': + const: 1 + + '#size-cells': + const: 0 + + compatible: + items: + - enum: + - lantiq,xrx200-gphy-fw + - lantiq,xrx300-gphy-fw + - lantiq,xrx330-gphy-fw + - const: lantiq,gphy-fw + + lantiq,rcu: + $ref: /schemas/types.yaml#/definitions/phandle + description: phandle to the RCU syscon + + patternProperties: + "^gphy@[0-9a-f]{1,2}$": + type: object + + additionalProperties: false + + properties: + reg: + minimum: 0 + maximum: 255 + description: + Offset of the GPHY firmware register in the RCU register range + + resets: + items: + - description: GPHY reset line + + reset-names: + items: + - const: gphy + + required: + - reg + + required: + - compatible + - lantiq,rcu + + additionalProperties: false + +required: + - compatible + - reg + +unevaluatedProperties: false + +examples: + - | + switch@e108000 { + compatible = "lantiq,xrx200-gswip"; + reg = <0xe108000 0x3100>, /* switch */ + <0xe10b100 0xd8>, /* mdio */ + <0xe10b1d8 0x130>; /* mii */ + dsa,member = <0 0>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + label = "lan3"; + phy-mode = "rgmii"; + phy-handle = <&phy0>; + }; + + port@1 { + reg = <1>; + label = "lan4"; + phy-mode = "rgmii"; + phy-handle = <&phy1>; + }; + + port@2 { + reg = <2>; + label = "lan2"; + phy-mode = "internal"; + phy-handle = <&phy11>; + }; + + port@4 { + reg = <4>; + label = "lan1"; + phy-mode = "internal"; + phy-handle = <&phy13>; + }; + + port@5 { + reg = <5>; + label = "wan"; + phy-mode = "rgmii"; + phy-handle = <&phy5>; + }; + + port@6 { + reg = <0x6>; + phy-mode = "internal"; + ethernet = <ð0>; + + fixed-link { + speed = <1000>; + full-duplex; + }; + }; + }; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + compatible = "lantiq,xrx200-mdio"; + + phy0: ethernet-phy@0 { + reg = <0x0>; + }; + phy1: ethernet-phy@1 { + reg = <0x1>; + }; + phy5: ethernet-phy@5 { + reg = <0x5>; + }; + phy11: ethernet-phy@11 { + reg = <0x11>; + }; + phy13: ethernet-phy@13 { + reg = <0x13>; + }; + }; + + gphy-fw { + #address-cells = <1>; + #size-cells = <0>; + compatible = "lantiq,xrx200-gphy-fw", "lantiq,gphy-fw"; + lantiq,rcu = <&rcu0>; + + gphy@20 { + reg = <0x20>; + + resets = <&reset0 31 30>; + reset-names = "gphy"; + }; + + gphy@68 { + reg = <0x68>; + + resets = <&reset0 29 28>; + reset-names = "gphy"; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/net/dsa/lantiq-gswip.txt b/Documentation/devicetree/bindings/net/dsa/lantiq-gswip.txt deleted file mode 100644 index 8bb1eff21cb1..000000000000 --- a/Documentation/devicetree/bindings/net/dsa/lantiq-gswip.txt +++ /dev/null @@ -1,146 +0,0 @@ -Lantiq GSWIP Ethernet switches -================================== - -Required properties for GSWIP core: - -- compatible : "lantiq,xrx200-gswip" for the embedded GSWIP in the - xRX200 SoC - "lantiq,xrx300-gswip" for the embedded GSWIP in the - xRX300 SoC - "lantiq,xrx330-gswip" for the embedded GSWIP in the - xRX330 SoC -- reg : memory range of the GSWIP core registers - : memory range of the GSWIP MDIO registers - : memory range of the GSWIP MII registers - -See Documentation/devicetree/bindings/net/dsa/dsa.txt for a list of -additional required and optional properties. - - -Required properties for MDIO bus: -- compatible : "lantiq,xrx200-mdio" for the MDIO bus inside the GSWIP - core of the xRX200 SoC and the PHYs connected to it. - -See Documentation/devicetree/bindings/net/mdio.txt for a list of additional -required and optional properties. - - -Required properties for GPHY firmware loading: -- compatible : "lantiq,xrx200-gphy-fw", "lantiq,gphy-fw" - "lantiq,xrx300-gphy-fw", "lantiq,gphy-fw" - "lantiq,xrx330-gphy-fw", "lantiq,gphy-fw" - for the loading of the firmware into the embedded - GPHY core of the SoC. -- lantiq,rcu : reference to the rcu syscon - -The GPHY firmware loader has a list of GPHY entries, one for each -embedded GPHY - -- reg : Offset of the GPHY firmware register in the RCU - register range -- resets : list of resets of the embedded GPHY -- reset-names : list of names of the resets - -Example: - -Ethernet switch on the VRX200 SoC: - -switch@e108000 { - #address-cells = <1>; - #size-cells = <0>; - compatible = "lantiq,xrx200-gswip"; - reg = < 0xe108000 0x3100 /* switch */ - 0xe10b100 0xd8 /* mdio */ - 0xe10b1d8 0x130 /* mii */ - >; - dsa,member = <0 0>; - - ports { - #address-cells = <1>; - #size-cells = <0>; - - port@0 { - reg = <0>; - label = "lan3"; - phy-mode = "rgmii"; - phy-handle = <&phy0>; - }; - - port@1 { - reg = <1>; - label = "lan4"; - phy-mode = "rgmii"; - phy-handle = <&phy1>; - }; - - port@2 { - reg = <2>; - label = "lan2"; - phy-mode = "internal"; - phy-handle = <&phy11>; - }; - - port@4 { - reg = <4>; - label = "lan1"; - phy-mode = "internal"; - phy-handle = <&phy13>; - }; - - port@5 { - reg = <5>; - label = "wan"; - phy-mode = "rgmii"; - phy-handle = <&phy5>; - }; - - port@6 { - reg = <0x6>; - ethernet = <ð0>; - }; - }; - - mdio { - #address-cells = <1>; - #size-cells = <0>; - compatible = "lantiq,xrx200-mdio"; - reg = <0>; - - phy0: ethernet-phy@0 { - reg = <0x0>; - }; - phy1: ethernet-phy@1 { - reg = <0x1>; - }; - phy5: ethernet-phy@5 { - reg = <0x5>; - }; - phy11: ethernet-phy@11 { - reg = <0x11>; - }; - phy13: ethernet-phy@13 { - reg = <0x13>; - }; - }; - - gphy-fw { - compatible = "lantiq,xrx200-gphy-fw", "lantiq,gphy-fw"; - lantiq,rcu = <&rcu0>; - #address-cells = <1>; - #size-cells = <0>; - - gphy@20 { - reg = <0x20>; - - resets = <&reset0 31 30>; - reset-names = "gphy"; - }; - - gphy@68 { - reg = <0x68>; - - resets = <&reset0 29 28>; - reset-names = "gphy"; - }; - }; -}; diff --git a/Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml b/Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml index 1c2444121e60..7e405ad96eb2 100644 --- a/Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml +++ b/Documentation/devicetree/bindings/net/dsa/mediatek,mt7530.yaml @@ -22,16 +22,16 @@ description: | The MT7988 SoC comes with a built-in switch similar to MT7531 as well as four Gigabit Ethernet PHYs. The switch registers are directly mapped into the SoC's - memory map rather than using MDIO. The switch got an internally connected 10G + memory map rather than using MDIO. The switch has an internally connected 10G CPU port and 4 user ports connected to the built-in Gigabit Ethernet PHYs. - MT7530 in MT7620AN, MT7620DA, MT7620DAN and MT7620NN SoCs has got 10/100 PHYs + The MT7530 in MT7620AN, MT7620DA, MT7620DAN and MT7620NN SoCs has 10/100 PHYs and the switch registers are directly mapped into SoC's memory map rather than using MDIO. The DSA driver currently doesn't support MT7620 variants. There is only the standalone version of MT7531. - Port 5 on MT7530 has got various ways of configuration: + Port 5 on MT7530 supports various configurations: - Port 5 can be used as a CPU port. diff --git a/Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.txt b/Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.txt deleted file mode 100644 index 258bef483673..000000000000 --- a/Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.txt +++ /dev/null @@ -1,129 +0,0 @@ -Vitesse VSC73xx Switches -======================== - -This defines device tree bindings for the Vitesse VSC73xx switch chips. -The Vitesse company has been acquired by Microsemi and Microsemi has -been acquired Microchip but retains this vendor branding. - -The currently supported switch chips are: -Vitesse VSC7385 SparX-G5 5+1-port Integrated Gigabit Ethernet Switch -Vitesse VSC7388 SparX-G8 8-port Integrated Gigabit Ethernet Switch -Vitesse VSC7395 SparX-G5e 5+1-port Integrated Gigabit Ethernet Switch -Vitesse VSC7398 SparX-G8e 8-port Integrated Gigabit Ethernet Switch - -This switch could have two different management interface. - -If SPI interface is used, the device tree node is an SPI device so it must -reside inside a SPI bus device tree node, see spi/spi-bus.txt - -When the chip is connected to a parallel memory bus and work in memory-mapped -I/O mode, a platform device is used to represent the vsc73xx. In this case it -must reside inside a platform bus device tree node. - -Required properties: - -- compatible: must be exactly one of: - "vitesse,vsc7385" - "vitesse,vsc7388" - "vitesse,vsc7395" - "vitesse,vsc7398" -- gpio-controller: indicates that this switch is also a GPIO controller, - see gpio/gpio.txt -- #gpio-cells: this must be set to <2> and indicates that we are a twocell - GPIO controller, see gpio/gpio.txt - -Optional properties: - -- reset-gpios: a handle to a GPIO line that can issue reset of the chip. - It should be tagged as active low. - -Required subnodes: - -See net/dsa/dsa.txt for a list of additional required and optional properties -and subnodes of DSA switches. - -Examples: - -SPI: -switch@0 { - compatible = "vitesse,vsc7395"; - reg = <0>; - /* Specified for 2.5 MHz or below */ - spi-max-frequency = <2500000>; - gpio-controller; - #gpio-cells = <2>; - - ports { - #address-cells = <1>; - #size-cells = <0>; - - port@0 { - reg = <0>; - label = "lan1"; - }; - port@1 { - reg = <1>; - label = "lan2"; - }; - port@2 { - reg = <2>; - label = "lan3"; - }; - port@3 { - reg = <3>; - label = "lan4"; - }; - vsc: port@6 { - reg = <6>; - ethernet = <&gmac1>; - phy-mode = "rgmii"; - fixed-link { - speed = <1000>; - full-duplex; - pause; - }; - }; - }; -}; - -Platform: -switch@2,0 { - #address-cells = <1>; - #size-cells = <1>; - compatible = "vitesse,vsc7385"; - reg = <0x2 0x0 0x20000>; - reset-gpios = <&gpio0 12 GPIO_ACTIVE_LOW>; - - ports { - #address-cells = <1>; - #size-cells = <0>; - - port@0 { - reg = <0>; - label = "lan1"; - }; - port@1 { - reg = <1>; - label = "lan2"; - }; - port@2 { - reg = <2>; - label = "lan3"; - }; - port@3 { - reg = <3>; - label = "lan4"; - }; - vsc: port@6 { - reg = <6>; - ethernet = <&enet0>; - phy-mode = "rgmii"; - fixed-link { - speed = <1000>; - full-duplex; - pause; - }; - }; - }; - -}; diff --git a/Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.yaml b/Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.yaml new file mode 100644 index 000000000000..b99d7a694b70 --- /dev/null +++ b/Documentation/devicetree/bindings/net/dsa/vitesse,vsc73xx.yaml @@ -0,0 +1,162 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/dsa/vitesse,vsc73xx.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Vitesse VSC73xx DSA Switches + +maintainers: + - Linus Walleij + +description: + The Vitesse DSA Switches were produced in the early-to-mid 2000s. + + The Vitesse company has been acquired by Microsemi and Microsemi has + been acquired Microchip but the new owner retains this vendor branding. + + The currently supported switch chips are + Vitesse VSC7385 SparX-G5 5+1-port Integrated Gigabit Ethernet Switch + Vitesse VSC7388 SparX-G8 8-port Integrated Gigabit Ethernet Switch + Vitesse VSC7395 SparX-G5e 5+1-port Integrated Gigabit Ethernet Switch + Vitesse VSC7398 SparX-G8e 8-port Integrated Gigabit Ethernet Switch + + This switch can use one of two different management interfaces. + + If SPI interface is used, the device tree node is an SPI device so it must + reside inside a SPI bus device tree node, see spi/spi-bus.txt + + When the chip is connected to a parallel memory bus and work in memory-mapped + I/O mode, a platform device is used to represent the vsc73xx. In this case it + must reside inside a platform bus device tree node. + +properties: + compatible: + enum: + - vitesse,vsc7385 + - vitesse,vsc7388 + - vitesse,vsc7395 + - vitesse,vsc7398 + + reg: + maxItems: 1 + + gpio-controller: true + "#gpio-cells": + const: 2 + + reset-gpios: + description: GPIO to be used to reset the whole device + maxItems: 1 + +allOf: + - $ref: dsa.yaml#/$defs/ethernet-ports + +# This checks if reg is a chipselect so the device is on an SPI +# bus, the if-clause will fail if reg is a tuple such as for a +# platform device. +if: + properties: + reg: + minimum: 0 + maximum: 256 +then: + $ref: /schemas/spi/spi-peripheral-props.yaml# + +required: + - compatible + - reg + +unevaluatedProperties: false + +examples: + - | + #include + + spi { + #address-cells = <1>; + #size-cells = <0>; + + ethernet-switch@0 { + compatible = "vitesse,vsc7395"; + reg = <0>; + spi-max-frequency = <2500000>; + gpio-controller; + #gpio-cells = <2>; + + ethernet-ports { + #address-cells = <1>; + #size-cells = <0>; + + ethernet-port@0 { + reg = <0>; + label = "lan1"; + }; + ethernet-port@1 { + reg = <1>; + label = "lan2"; + }; + ethernet-port@2 { + reg = <2>; + label = "lan3"; + }; + ethernet-port@3 { + reg = <3>; + label = "lan4"; + }; + ethernet-port@6 { + reg = <6>; + ethernet = <&gmac1>; + phy-mode = "rgmii"; + fixed-link { + speed = <1000>; + full-duplex; + pause; + }; + }; + }; + }; + }; + + bus { + #address-cells = <1>; + #size-cells = <1>; + + ethernet-switch@10000000 { + compatible = "vitesse,vsc7385"; + reg = <0x10000000 0x20000>; + reset-gpios = <&gpio0 12 GPIO_ACTIVE_LOW>; + + ethernet-ports { + #address-cells = <1>; + #size-cells = <0>; + + ethernet-port@0 { + reg = <0>; + label = "lan1"; + }; + ethernet-port@1 { + reg = <1>; + label = "lan2"; + }; + ethernet-port@2 { + reg = <2>; + label = "lan3"; + }; + ethernet-port@3 { + reg = <3>; + label = "lan4"; + }; + ethernet-port@6 { + reg = <6>; + ethernet = <&enet0>; + phy-mode = "rgmii"; + fixed-link { + speed = <1000>; + full-duplex; + pause; + }; + }; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/net/ethernet-controller.yaml b/Documentation/devicetree/bindings/net/ethernet-controller.yaml index b2785b03139f..45819b235800 100644 --- a/Documentation/devicetree/bindings/net/ethernet-controller.yaml +++ b/Documentation/devicetree/bindings/net/ethernet-controller.yaml @@ -103,6 +103,7 @@ properties: - usxgmii - 10gbase-r - 25gbase-r + - 10g-qxgmii phy-mode: $ref: "#/properties/phy-connection-type" diff --git a/Documentation/devicetree/bindings/net/ethernet-phy.yaml b/Documentation/devicetree/bindings/net/ethernet-phy.yaml index 8fb2a6ee7e5b..d9b62741a225 100644 --- a/Documentation/devicetree/bindings/net/ethernet-phy.yaml +++ b/Documentation/devicetree/bindings/net/ethernet-phy.yaml @@ -93,6 +93,14 @@ properties: the turn around line low at end of the control phase of the MDIO transaction. + brr-mode: + $ref: /schemas/types.yaml#/definitions/flag + description: + If set, indicates the network cable interface is an alternative one as + defined in the BroadR-Reach link mode specification under 1BR-100 and + 1BR-10 names. The PHY must be configured to operate in BroadR-Reach mode + by software. + clocks: maxItems: 1 description: diff --git a/Documentation/devicetree/bindings/net/fsl,enetc-ierb.yaml b/Documentation/devicetree/bindings/net/fsl,enetc-ierb.yaml new file mode 100644 index 000000000000..c8a654310b90 --- /dev/null +++ b/Documentation/devicetree/bindings/net/fsl,enetc-ierb.yaml @@ -0,0 +1,38 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/fsl,enetc-ierb.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Integrated Endpoint Register Block + +description: + The fsl_enetc driver can probe on the Integrated Endpoint Register Block, + which preconfigures the FIFO limits for the ENETC ports. + +maintainers: + - Frank Li + - Vladimir Oltean + - Wei Fang + - Claudiu Manoil + +properties: + compatible: + enum: + - fsl,ls1028a-enetc-ierb + + reg: + maxItems: 1 + +required: + - compatible + - reg + +additionalProperties: false + +examples: + - | + endpoint-config@f0800000 { + compatible = "fsl,ls1028a-enetc-ierb"; + reg = <0xf0800000 0x10000>; + }; diff --git a/Documentation/devicetree/bindings/net/fsl,enetc-mdio.yaml b/Documentation/devicetree/bindings/net/fsl,enetc-mdio.yaml new file mode 100644 index 000000000000..c1dd6aa04321 --- /dev/null +++ b/Documentation/devicetree/bindings/net/fsl,enetc-mdio.yaml @@ -0,0 +1,57 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/fsl,enetc-mdio.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: ENETC external MDIO PCIe endpoint device + +description: + NETC provides an external master MDIO interface (EMDIO) for managing external + devices (PHYs). EMDIO supports both Clause 22 and 45 protocols. And the EMDIO + provides a means for different software modules to share a single set of MDIO + signals to access their PHYs. + +maintainers: + - Frank Li + - Vladimir Oltean + - Wei Fang + - Claudiu Manoil + +properties: + compatible: + items: + - enum: + - pci1957,ee01 + - const: fsl,enetc-mdio + + reg: + maxItems: 1 + +required: + - compatible + - reg + +allOf: + - $ref: mdio.yaml + - $ref: /schemas/pci/pci-device.yaml + +unevaluatedProperties: false + +examples: + - | + pcie{ + #address-cells = <3>; + #size-cells = <2>; + + mdio@0,3 { + compatible = "pci1957,ee01", "fsl,enetc-mdio"; + reg = <0x000300 0 0 0 0>; + #address-cells = <1>; + #size-cells = <0>; + + ethernet-phy@2 { + reg = <0x2>; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/net/fsl,enetc.yaml b/Documentation/devicetree/bindings/net/fsl,enetc.yaml new file mode 100644 index 000000000000..e152c93998fe --- /dev/null +++ b/Documentation/devicetree/bindings/net/fsl,enetc.yaml @@ -0,0 +1,66 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/fsl,enetc.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: The NIC functionality of NXP NETC + +description: + The NIC functionality in NETC is known as EtherNET Controller (ENETC). ENETC + supports virtualization/isolation based on PCIe Single Root IO Virtualization + (SR-IOV), advanced QoS with 8 traffic classes and 4 drop resilience levels, + and a full range of TSN standards and NIC offload capabilities + +maintainers: + - Frank Li + - Vladimir Oltean + - Wei Fang + - Claudiu Manoil + +properties: + compatible: + items: + - enum: + - pci1957,e100 + - const: fsl,enetc + + reg: + maxItems: 1 + + mdio: + $ref: mdio.yaml + unevaluatedProperties: false + description: Optional child node for ENETC instance, otherwise use NETC EMDIO. + +required: + - compatible + - reg + +allOf: + - $ref: /schemas/pci/pci-device.yaml + - $ref: ethernet-controller.yaml + +unevaluatedProperties: false + +examples: + - | + pcie { + #address-cells = <3>; + #size-cells = <2>; + + ethernet@0,0 { + compatible = "pci1957,e100", "fsl,enetc"; + reg = <0x000000 0 0 0 0>; + phy-handle = <&sgmii_phy0>; + phy-connection-type = "sgmii"; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + phy@2 { + reg = <0x2>; + }; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/net/fsl,fman-mdio.yaml b/Documentation/devicetree/bindings/net/fsl,fman-mdio.yaml new file mode 100644 index 000000000000..6b2c0aa407a2 --- /dev/null +++ b/Documentation/devicetree/bindings/net/fsl,fman-mdio.yaml @@ -0,0 +1,123 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/fsl,fman-mdio.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Freescale Frame Manager MDIO Device + +maintainers: + - Frank Li + +description: FMan MDIO Node. + The MDIO is a bus to which the PHY devices are connected. + +properties: + compatible: + enum: + - fsl,fman-mdio + - fsl,fman-xmdio + - fsl,fman-memac-mdio + description: + Must include "fsl,fman-mdio" for 1 Gb/s MDIO from FMan v2. + Must include "fsl,fman-xmdio" for 10 Gb/s MDIO from FMan v2. + Must include "fsl,fman-memac-mdio" for 1/10 Gb/s MDIO from + FMan v3. + + reg: + maxItems: 1 + + clocks: + items: + - description: A reference to the input clock of the controller + from which the MDC frequency is derived. + + interrupts: + maxItems: 1 + + fsl,fman-internal-mdio: + $ref: /schemas/types.yaml#/definitions/flag + description: + Fman has internal MDIO for internal PCS(Physical + Coding Sublayer) PHYs and external MDIO for external PHYs. + The settings and programming routines for internal/external + MDIO are different. Must be included for internal MDIO. + + fsl,erratum-a009885: + $ref: /schemas/types.yaml#/definitions/flag + description: Indicates the presence of the A009885 + erratum describing that the contents of MDIO_DATA may + become corrupt unless it is read within 16 MDC cycles + of MDIO_CFG[BSY] being cleared, when performing an + MDIO read operation. + + fsl,erratum-a011043: + $ref: /schemas/types.yaml#/definitions/flag + description: + Indicates the presence of the A011043 erratum + describing that the MDIO_CFG[MDIO_RD_ER] bit may be falsely + set when reading internal PCS registers. MDIO reads to + internal PCS registers may result in having the + MDIO_CFG[MDIO_RD_ER] bit set, even when there is no error and + read data (MDIO_DATA[MDIO_DATA]) is correct. + Software may get false read error when reading internal + PCS registers through MDIO. As a workaround, all internal + MDIO accesses should ignore the MDIO_CFG[MDIO_RD_ER] bit. + + For internal PHY device on internal mdio bus, a PHY node should be created. + See the definition of the PHY node in booting-without-of.txt for an + example of how to define a PHY (Internal PHY has no interrupt line). + - For "fsl,fman-mdio" compatible internal mdio bus, the PHY is TBI PHY. + - For "fsl,fman-memac-mdio" compatible internal mdio bus, the PHY is PCS PHY. + The PCS PHY address should correspond to the value of the appropriate + MDEV_PORT. + + little-endian: + $ref: /schemas/types.yaml#/definitions/flag + description: + IP block is little-endian mode. The default endian mode is big-endian. + +required: + - compatible + - reg + +allOf: + - $ref: mdio.yaml# + +unevaluatedProperties: false + +examples: + - | + mdio@f1000 { + compatible = "fsl,fman-xmdio"; + reg = <0xf1000 0x1000>; + interrupts = <101 2 0 0>; + }; + + - | + mdio@e3120 { + compatible = "fsl,fman-mdio"; + reg = <0xe3120 0xee0>; + fsl,fman-internal-mdio; + #address-cells = <1>; + #size-cells = <0>; + + tbi-phy@8 { + reg = <0x8>; + device_type = "tbi-phy"; + }; + }; + + - | + mdio@f1000 { + compatible = "fsl,fman-memac-mdio"; + reg = <0xf1000 0x1000>; + fsl,fman-internal-mdio; + #address-cells = <1>; + #size-cells = <0>; + + pcsphy6: ethernet-phy@0 { + reg = <0x0>; + }; + }; + diff --git a/Documentation/devicetree/bindings/net/fsl,fman-muram.yaml b/Documentation/devicetree/bindings/net/fsl,fman-muram.yaml new file mode 100644 index 000000000000..aa71acc7fa5b --- /dev/null +++ b/Documentation/devicetree/bindings/net/fsl,fman-muram.yaml @@ -0,0 +1,40 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/fsl,fman-muram.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Freescale Frame Manager MURAM Device + +maintainers: + - Frank Li + +description: | + FMan Internal memory - shared between all the FMan modules. + It contains data structures that are common and written to or read by + the modules. + + FMan internal memory is split into the following parts: + Packet buffering (Tx/Rx FIFOs) + Frames internal context + +properties: + compatible: + enum: + - fsl,fman-muram + + reg: + maxItems: 1 + +required: + - compatible + - reg + +additionalProperties: false + +examples: + - | + muram@0 { + compatible = "fsl,fman-muram"; + reg = <0x0 0x28000>; + }; diff --git a/Documentation/devicetree/bindings/net/fsl,fman-port.yaml b/Documentation/devicetree/bindings/net/fsl,fman-port.yaml new file mode 100644 index 000000000000..9de445307830 --- /dev/null +++ b/Documentation/devicetree/bindings/net/fsl,fman-port.yaml @@ -0,0 +1,75 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/fsl,fman-port.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Freescale Frame Manager Port Device + +maintainers: + - Frank Li + +description: | + The Frame Manager (FMan) supports several types of hardware ports: + Ethernet receiver (RX) + Ethernet transmitter (TX) + Offline/Host command (O/H) + +properties: + compatible: + enum: + - fsl,fman-v2-port-oh + - fsl,fman-v2-port-rx + - fsl,fman-v2-port-tx + - fsl,fman-v3-port-oh + - fsl,fman-v3-port-rx + - fsl,fman-v3-port-tx + + cell-index: + $ref: /schemas/types.yaml#/definitions/uint32 + description: + Specifies the hardware port id. + Each hardware port on the FMan has its own hardware PortID. + Super set of all hardware Port IDs available at FMan Reference + Manual under "FMan Hardware Ports in Freescale Devices" table. + + Each hardware port is assigned a 4KB, port-specific page in + the FMan hardware port memory region (which is part of the + FMan memory map). The first 4 KB in the FMan hardware ports + memory region is used for what are called common registers. + The subsequent 63 4KB pages are allocated to the hardware + ports. + The page of a specific port is determined by the cell-index. + + reg: + items: + - description: There is one reg region describing the port + configuration registers. + + fsl,fman-10g-port: + $ref: /schemas/types.yaml#/definitions/flag + description: The default port rate is 1G. + If this property exists, the port is s 10G port. + + fsl,fman-best-effort-port: + $ref: /schemas/types.yaml#/definitions/flag + description: The default port rate is 1G. + Can be defined only if 10G-support is set. + This property marks a best-effort 10G port (10G port that + may not be capable of line rate). + +required: + - compatible + - reg + - cell-index + +additionalProperties: false + +examples: + - | + port@a8000 { + compatible = "fsl,fman-v2-port-tx"; + reg = <0xa8000 0x1000>; + cell-index = <0x28>; + }; + diff --git a/Documentation/devicetree/bindings/net/fsl,fman.yaml b/Documentation/devicetree/bindings/net/fsl,fman.yaml new file mode 100644 index 000000000000..9bbf39ef31a2 --- /dev/null +++ b/Documentation/devicetree/bindings/net/fsl,fman.yaml @@ -0,0 +1,210 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/fsl,fman.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Freescale Frame Manager Device + +maintainers: + - Frank Li + +description: + Due to the fact that the FMan is an aggregation of sub-engines (ports, MACs, + etc.) the FMan node will have child nodes for each of them. + +properties: + compatible: + enum: + - fsl,fman + description: + FMan version can be determined via FM_IP_REV_1 register in the + FMan block. The offset is 0xc4 from the beginning of the + Frame Processing Manager memory map (0xc3000 from the + beginning of the FMan node). + + cell-index: + $ref: /schemas/types.yaml#/definitions/uint32 + description: | + Specifies the index of the FMan unit. + + The cell-index value may be used by the SoC, to identify the + FMan unit in the SoC memory map. In the table below, + there's a description of the cell-index use in each SoC: + + - P1023: + register[bit] FMan unit cell-index + ============================================================ + DEVDISR[1] 1 0 + + - P2041, P3041, P4080 P5020, P5040: + register[bit] FMan unit cell-index + ============================================================ + DCFG_DEVDISR2[6] 1 0 + DCFG_DEVDISR2[14] 2 1 + (Second FM available only in P4080 and P5040) + + - B4860, T1040, T2080, T4240: + register[bit] FMan unit cell-index + ============================================================ + DCFG_CCSR_DEVDISR2[24] 1 0 + DCFG_CCSR_DEVDISR2[25] 2 1 + (Second FM available only in T4240) + + DEVDISR, DCFG_DEVDISR2 and DCFG_CCSR_DEVDISR2 are located in + the specific SoC "Device Configuration/Pin Control" Memory + Map. + + reg: + items: + - description: BMI configuration registers. + - description: QMI configuration registers. + - description: DMA configuration registers. + - description: FPM configuration registers. + - description: FMan controller configuration registers. + minItems: 1 + + ranges: true + + clocks: + maxItems: 1 + + clock-names: + items: + - const: fmanclk + + interrupts: + items: + - description: The first element is associated with the event interrupts. + - description: the second element is associated with the error interrupts. + + dma-coherent: true + + ptimer-handle: + $ref: /schemas/types.yaml#/definitions/phandle + description: see ptp/fsl,ptp.yaml + + fsl,qman-channel-range: + $ref: /schemas/types.yaml#/definitions/uint32-array + description: + Specifies the range of the available dedicated + channels in the FMan. The first cell specifies the beginning + of the range and the second cell specifies the number of + channels + items: + - description: The first cell specifies the beginning of the range. + - description: | + The second cell specifies the number of channels. + Further information available at: + "Work Queue (WQ) Channel Assignments in the QMan" section + in DPAA Reference Manual. + + fsl,qman: + $ref: /schemas/types.yaml#/definitions/phandle + description: See soc/fsl/qman.txt + + fsl,bman: + $ref: /schemas/types.yaml#/definitions/phandle + description: See soc/fsl/bman.txt + + fsl,erratum-a050385: + $ref: /schemas/types.yaml#/definitions/flag + description: A boolean property. Indicates the presence of the + erratum A050385 which indicates that DMA transactions that are + split can result in a FMan lock. + + '#address-cells': + const: 1 + + '#size-cells': + const: 1 + +patternProperties: + '^muram@[a-f0-9]+$': + $ref: fsl,fman-muram.yaml + + '^port@[a-f0-9]+$': + $ref: fsl,fman-port.yaml + + '^ethernet@[a-f0-9]+$': + $ref: fsl,fman-dtsec.yaml + + '^mdio@[a-f0-9]+$': + $ref: fsl,fman-mdio.yaml + + '^phc@[a-f0-9]+$': + $ref: /schemas/ptp/fsl,ptp.yaml + +required: + - compatible + - cell-index + - reg + - ranges + - clocks + - clock-names + - interrupts + - fsl,qman-channel-range + +additionalProperties: false + +examples: + - | + #include + + fman@400000 { + compatible = "fsl,fman"; + reg = <0x400000 0x100000>; + ranges = <0 0x400000 0x100000>; + #address-cells = <1>; + #size-cells = <1>; + cell-index = <1>; + clocks = <&fman_clk>; + clock-names = "fmanclk"; + interrupts = <96 IRQ_TYPE_EDGE_FALLING>, + <16 IRQ_TYPE_EDGE_FALLING>; + fsl,qman-channel-range = <0x40 0xc>; + + muram@0 { + compatible = "fsl,fman-muram"; + reg = <0x0 0x28000>; + }; + + port@81000 { + cell-index = <1>; + compatible = "fsl,fman-v2-port-oh"; + reg = <0x81000 0x1000>; + }; + + fman1_rx_0x8: port@88000 { + cell-index = <0x8>; + compatible = "fsl,fman-v2-port-rx"; + reg = <0x88000 0x1000>; + }; + + fman1_tx_0x28: port@a8000 { + cell-index = <0x28>; + compatible = "fsl,fman-v2-port-tx"; + reg = <0xa8000 0x1000>; + }; + + ethernet@e0000 { + compatible = "fsl,fman-dtsec"; + cell-index = <0>; + reg = <0xe0000 0x1000>; + ptp-timer = <&ptp_timer>; + fsl,fman-ports = <&fman1_rx_0x8 &fman1_tx_0x28>; + tbi-handle = <&tbi5>; + }; + + ptp_timer: phc@fe000 { + compatible = "fsl,fman-ptp-timer"; + reg = <0xfe000 0x1000>; + interrupts = <12 IRQ_TYPE_LEVEL_LOW>; + }; + + mdio@f1000 { + compatible = "fsl,fman-xmdio"; + reg = <0xf1000 0x1000>; + interrupts = <101 IRQ_TYPE_EDGE_FALLING>; + }; + }; diff --git a/Documentation/devicetree/bindings/net/fsl-enetc.txt b/Documentation/devicetree/bindings/net/fsl-enetc.txt deleted file mode 100644 index 9b9a3f197e2d..000000000000 --- a/Documentation/devicetree/bindings/net/fsl-enetc.txt +++ /dev/null @@ -1,119 +0,0 @@ -* ENETC ethernet device tree bindings - -Depending on board design and ENETC port type (internal or -external) there are two supported link modes specified by -below device tree bindings. - -Required properties: - -- reg : Specifies PCIe Device Number and Function - Number of the ENETC endpoint device, according - to parent node bindings. -- compatible : Should be "fsl,enetc". - -1. The ENETC external port is connected to a MDIO configurable phy - -1.1. Using the local ENETC Port MDIO interface - -In this case, the ENETC node should include a "mdio" sub-node -that in turn should contain the "ethernet-phy" node describing the -external phy. Below properties are required, their bindings -already defined in Documentation/devicetree/bindings/net/ethernet.txt or -Documentation/devicetree/bindings/net/phy.txt. - -Required: - -- phy-handle : Phandle to a PHY on the MDIO bus. - Defined in ethernet.txt. - -- phy-connection-type : Defined in ethernet.txt. - -- mdio : "mdio" node, defined in mdio.txt. - -- ethernet-phy : "ethernet-phy" node, defined in phy.txt. - -Example: - - ethernet@0,0 { - compatible = "fsl,enetc"; - reg = <0x000000 0 0 0 0>; - phy-handle = <&sgmii_phy0>; - phy-connection-type = "sgmii"; - - mdio { - #address-cells = <1>; - #size-cells = <0>; - sgmii_phy0: ethernet-phy@2 { - reg = <0x2>; - }; - }; - }; - -1.2. Using the central MDIO PCIe endpoint device - -In this case, the mdio node should be defined as another PCIe -endpoint node, at the same level with the ENETC port nodes. - -Required properties: - -- reg : Specifies PCIe Device Number and Function - Number of the ENETC endpoint device, according - to parent node bindings. -- compatible : Should be "fsl,enetc-mdio". - -The remaining required mdio bus properties are standard, their bindings -already defined in Documentation/devicetree/bindings/net/mdio.txt. - -Example: - - ethernet@0,0 { - compatible = "fsl,enetc"; - reg = <0x000000 0 0 0 0>; - phy-handle = <&sgmii_phy0>; - phy-connection-type = "sgmii"; - }; - - mdio@0,3 { - compatible = "fsl,enetc-mdio"; - reg = <0x000300 0 0 0 0>; - #address-cells = <1>; - #size-cells = <0>; - sgmii_phy0: ethernet-phy@2 { - reg = <0x2>; - }; - }; - -2. The ENETC port is an internal port or has a fixed-link external -connection - -In this case, the ENETC port node defines a fixed link connection, -as specified by Documentation/devicetree/bindings/net/fixed-link.txt. - -Required: - -- fixed-link : "fixed-link" node, defined in "fixed-link.txt". - -Example: - ethernet@0,2 { - compatible = "fsl,enetc"; - reg = <0x000200 0 0 0 0>; - fixed-link { - speed = <1000>; - full-duplex; - }; - }; - -* Integrated Endpoint Register Block bindings - -Optionally, the fsl_enetc driver can probe on the Integrated Endpoint Register -Block, which preconfigures the FIFO limits for the ENETC ports. This is a node -with the following properties: - -- reg : Specifies the address in the SoC memory space. -- compatible : Must be "fsl,ls1028a-enetc-ierb". - -Example: - ierb@1f0800000 { - compatible = "fsl,ls1028a-enetc-ierb"; - reg = <0x01 0xf0800000 0x0 0x10000>; - }; diff --git a/Documentation/devicetree/bindings/net/fsl-fman.txt b/Documentation/devicetree/bindings/net/fsl-fman.txt deleted file mode 100644 index bda4b41af074..000000000000 --- a/Documentation/devicetree/bindings/net/fsl-fman.txt +++ /dev/null @@ -1,548 +0,0 @@ -============================================================================= -Freescale Frame Manager Device Bindings - -CONTENTS - - FMan Node - - FMan Port Node - - FMan MURAM Node - - FMan dTSEC/XGEC/mEMAC Node - - FMan IEEE 1588 Node - - FMan MDIO Node - - Example - -============================================================================= -FMan Node - -DESCRIPTION - -Due to the fact that the FMan is an aggregation of sub-engines (ports, MACs, -etc.) the FMan node will have child nodes for each of them. - -PROPERTIES - -- compatible - Usage: required - Value type: - Definition: Must include "fsl,fman" - FMan version can be determined via FM_IP_REV_1 register in the - FMan block. The offset is 0xc4 from the beginning of the - Frame Processing Manager memory map (0xc3000 from the - beginning of the FMan node). - -- cell-index - Usage: required - Value type: - Definition: Specifies the index of the FMan unit. - - The cell-index value may be used by the SoC, to identify the - FMan unit in the SoC memory map. In the table below, - there's a description of the cell-index use in each SoC: - - - P1023: - register[bit] FMan unit cell-index - ============================================================ - DEVDISR[1] 1 0 - - - P2041, P3041, P4080 P5020, P5040: - register[bit] FMan unit cell-index - ============================================================ - DCFG_DEVDISR2[6] 1 0 - DCFG_DEVDISR2[14] 2 1 - (Second FM available only in P4080 and P5040) - - - B4860, T1040, T2080, T4240: - register[bit] FMan unit cell-index - ============================================================ - DCFG_CCSR_DEVDISR2[24] 1 0 - DCFG_CCSR_DEVDISR2[25] 2 1 - (Second FM available only in T4240) - - DEVDISR, DCFG_DEVDISR2 and DCFG_CCSR_DEVDISR2 are located in - the specific SoC "Device Configuration/Pin Control" Memory - Map. - -- reg - Usage: required - Value type: - Definition: A standard property. Specifies the offset of the - following configuration registers: - - BMI configuration registers. - - QMI configuration registers. - - DMA configuration registers. - - FPM configuration registers. - - FMan controller configuration registers. - -- ranges - Usage: required - Value type: - Definition: A standard property. - -- clocks - Usage: required - Value type: - Definition: phandle for the fman input clock. - -- clock-names - usage: required - Value type: - Definition: "fmanclk" for the fman input clock. - -- interrupts - Usage: required - Value type: - Definition: A pair of IRQs are specified in this property. - The first element is associated with the event interrupts and - the second element is associated with the error interrupts. - -- fsl,qman-channel-range - Usage: required - Value type: - Definition: Specifies the range of the available dedicated - channels in the FMan. The first cell specifies the beginning - of the range and the second cell specifies the number of - channels. - Further information available at: - "Work Queue (WQ) Channel Assignments in the QMan" section - in DPAA Reference Manual. - -- fsl,qman -- fsl,bman - Usage: required - Definition: See soc/fsl/qman.txt and soc/fsl/bman.txt - -- fsl,erratum-a050385 - Usage: optional - Value type: boolean - Definition: A boolean property. Indicates the presence of the - erratum A050385 which indicates that DMA transactions that are - split can result in a FMan lock. - -============================================================================= -FMan MURAM Node - -DESCRIPTION - -FMan Internal memory - shared between all the FMan modules. -It contains data structures that are common and written to or read by -the modules. -FMan internal memory is split into the following parts: - Packet buffering (Tx/Rx FIFOs) - Frames internal context - -PROPERTIES - -- compatible - Usage: required - Value type: - Definition: Must include "fsl,fman-muram" - -- ranges - Usage: required - Value type: - Definition: A standard property. - Specifies the multi-user memory offset and the size within - the FMan. - -EXAMPLE - -muram@0 { - compatible = "fsl,fman-muram"; - ranges = <0 0x000000 0x28000>; -}; - -============================================================================= -FMan Port Node - -DESCRIPTION - -The Frame Manager (FMan) supports several types of hardware ports: - Ethernet receiver (RX) - Ethernet transmitter (TX) - Offline/Host command (O/H) - -PROPERTIES - -- compatible - Usage: required - Value type: - Definition: A standard property. - Must include one of the following: - - "fsl,fman-v2-port-oh" for FManV2 OH ports - - "fsl,fman-v2-port-rx" for FManV2 RX ports - - "fsl,fman-v2-port-tx" for FManV2 TX ports - - "fsl,fman-v3-port-oh" for FManV3 OH ports - - "fsl,fman-v3-port-rx" for FManV3 RX ports - - "fsl,fman-v3-port-tx" for FManV3 TX ports - -- cell-index - Usage: required - Value type: - Definition: Specifies the hardware port id. - Each hardware port on the FMan has its own hardware PortID. - Super set of all hardware Port IDs available at FMan Reference - Manual under "FMan Hardware Ports in Freescale Devices" table. - - Each hardware port is assigned a 4KB, port-specific page in - the FMan hardware port memory region (which is part of the - FMan memory map). The first 4 KB in the FMan hardware ports - memory region is used for what are called common registers. - The subsequent 63 4KB pages are allocated to the hardware - ports. - The page of a specific port is determined by the cell-index. - -- reg - Usage: required - Value type: - Definition: There is one reg region describing the port - configuration registers. - -- fsl,fman-10g-port - Usage: optional - Value type: boolean - Definition: The default port rate is 1G. - If this property exists, the port is s 10G port. - -- fsl,fman-best-effort-port - Usage: optional - Value type: boolean - Definition: Can be defined only if 10G-support is set. - This property marks a best-effort 10G port (10G port that - may not be capable of line rate). - -EXAMPLE - -port@a8000 { - cell-index = <0x28>; - compatible = "fsl,fman-v2-port-tx"; - reg = <0xa8000 0x1000>; -}; - -port@88000 { - cell-index = <0x8>; - compatible = "fsl,fman-v2-port-rx"; - reg = <0x88000 0x1000>; -}; - -port@81000 { - cell-index = <0x1>; - compatible = "fsl,fman-v2-port-oh"; - reg = <0x81000 0x1000>; -}; - -============================================================================= -FMan dTSEC/XGEC/mEMAC Node - -Refer to Documentation/devicetree/bindings/net/fsl,fman-dtsec.yaml - -============================================================================ -FMan IEEE 1588 Node - -Refer to Documentation/devicetree/bindings/ptp/ptp-qoriq.txt - -============================================================================= -FMan MDIO Node - -DESCRIPTION - -The MDIO is a bus to which the PHY devices are connected. - -PROPERTIES - -- compatible - Usage: required - Value type: - Definition: A standard property. - Must include "fsl,fman-mdio" for 1 Gb/s MDIO from FMan v2. - Must include "fsl,fman-xmdio" for 10 Gb/s MDIO from FMan v2. - Must include "fsl,fman-memac-mdio" for 1/10 Gb/s MDIO from - FMan v3. - -- reg - Usage: required - Value type: - Definition: A standard property. - -- clocks - Usage: optional - Value type: - Definition: A reference to the input clock of the controller - from which the MDC frequency is derived. - -- clock-frequency - Usage: optional - Value type: - Definition: Specifies the external MDC frequency, in Hertz, to - be used. Requires that the input clock is specified in the - "clocks" property. See also: mdio.yaml. - -- suppress-preamble - Usage: optional - Value type: - Definition: Disable generation of preamble bits. See also: - mdio.yaml. - -- interrupts - Usage: required for external MDIO - Value type: - Definition: Event interrupt of external MDIO controller. - -- fsl,fman-internal-mdio - Usage: required for internal MDIO - Value type: boolean - Definition: Fman has internal MDIO for internal PCS(Physical - Coding Sublayer) PHYs and external MDIO for external PHYs. - The settings and programming routines for internal/external - MDIO are different. Must be included for internal MDIO. - -- fsl,erratum-a009885 - Usage: optional - Value type: - Definition: Indicates the presence of the A009885 - erratum describing that the contents of MDIO_DATA may - become corrupt unless it is read within 16 MDC cycles - of MDIO_CFG[BSY] being cleared, when performing an - MDIO read operation. - -- fsl,erratum-a011043 - Usage: optional - Value type: - Definition: Indicates the presence of the A011043 erratum - describing that the MDIO_CFG[MDIO_RD_ER] bit may be falsely - set when reading internal PCS registers. MDIO reads to - internal PCS registers may result in having the - MDIO_CFG[MDIO_RD_ER] bit set, even when there is no error and - read data (MDIO_DATA[MDIO_DATA]) is correct. - Software may get false read error when reading internal - PCS registers through MDIO. As a workaround, all internal - MDIO accesses should ignore the MDIO_CFG[MDIO_RD_ER] bit. - -For internal PHY device on internal mdio bus, a PHY node should be created. -See the definition of the PHY node in booting-without-of.txt for an -example of how to define a PHY (Internal PHY has no interrupt line). -- For "fsl,fman-mdio" compatible internal mdio bus, the PHY is TBI PHY. -- For "fsl,fman-memac-mdio" compatible internal mdio bus, the PHY is PCS PHY. - The PCS PHY address should correspond to the value of the appropriate - MDEV_PORT. - -EXAMPLE - -Example for FMan v2 external MDIO: - -mdio@f1000 { - compatible = "fsl,fman-xmdio"; - reg = <0xf1000 0x1000>; - interrupts = <101 2 0 0>; -}; - -Example for FMan v2 internal MDIO: - -mdio@e3120 { - compatible = "fsl,fman-mdio"; - reg = <0xe3120 0xee0>; - fsl,fman-internal-mdio; - - tbi1: tbi-phy@8 { - reg = <0x8>; - device_type = "tbi-phy"; - }; -}; - -Example for FMan v3 internal MDIO: - -mdio@f1000 { - compatible = "fsl,fman-memac-mdio"; - reg = <0xf1000 0x1000>; - fsl,fman-internal-mdio; - - pcsphy6: ethernet-phy@0 { - reg = <0x0>; - }; -}; - -============================================================================= -Example - -fman@400000 { - #address-cells = <1>; - #size-cells = <1>; - cell-index = <1>; - compatible = "fsl,fman" - ranges = <0 0x400000 0x100000>; - reg = <0x400000 0x100000>; - clocks = <&fman_clk>; - clock-names = "fmanclk"; - interrupts = < - 96 2 0 0 - 16 2 1 1>; - fsl,qman-channel-range = <0x40 0xc>; - - muram@0 { - compatible = "fsl,fman-muram"; - reg = <0x0 0x28000>; - }; - - port@81000 { - cell-index = <1>; - compatible = "fsl,fman-v2-port-oh"; - reg = <0x81000 0x1000>; - }; - - port@82000 { - cell-index = <2>; - compatible = "fsl,fman-v2-port-oh"; - reg = <0x82000 0x1000>; - }; - - port@83000 { - cell-index = <3>; - compatible = "fsl,fman-v2-port-oh"; - reg = <0x83000 0x1000>; - }; - - port@84000 { - cell-index = <4>; - compatible = "fsl,fman-v2-port-oh"; - reg = <0x84000 0x1000>; - }; - - port@85000 { - cell-index = <5>; - compatible = "fsl,fman-v2-port-oh"; - reg = <0x85000 0x1000>; - }; - - port@86000 { - cell-index = <6>; - compatible = "fsl,fman-v2-port-oh"; - reg = <0x86000 0x1000>; - }; - - fman1_rx_0x8: port@88000 { - cell-index = <0x8>; - compatible = "fsl,fman-v2-port-rx"; - reg = <0x88000 0x1000>; - }; - - fman1_rx_0x9: port@89000 { - cell-index = <0x9>; - compatible = "fsl,fman-v2-port-rx"; - reg = <0x89000 0x1000>; - }; - - fman1_rx_0xa: port@8a000 { - cell-index = <0xa>; - compatible = "fsl,fman-v2-port-rx"; - reg = <0x8a000 0x1000>; - }; - - fman1_rx_0xb: port@8b000 { - cell-index = <0xb>; - compatible = "fsl,fman-v2-port-rx"; - reg = <0x8b000 0x1000>; - }; - - fman1_rx_0xc: port@8c000 { - cell-index = <0xc>; - compatible = "fsl,fman-v2-port-rx"; - reg = <0x8c000 0x1000>; - }; - - fman1_rx_0x10: port@90000 { - cell-index = <0x10>; - compatible = "fsl,fman-v2-port-rx"; - reg = <0x90000 0x1000>; - }; - - fman1_tx_0x28: port@a8000 { - cell-index = <0x28>; - compatible = "fsl,fman-v2-port-tx"; - reg = <0xa8000 0x1000>; - }; - - fman1_tx_0x29: port@a9000 { - cell-index = <0x29>; - compatible = "fsl,fman-v2-port-tx"; - reg = <0xa9000 0x1000>; - }; - - fman1_tx_0x2a: port@aa000 { - cell-index = <0x2a>; - compatible = "fsl,fman-v2-port-tx"; - reg = <0xaa000 0x1000>; - }; - - fman1_tx_0x2b: port@ab000 { - cell-index = <0x2b>; - compatible = "fsl,fman-v2-port-tx"; - reg = <0xab000 0x1000>; - }; - - fman1_tx_0x2c: port@ac0000 { - cell-index = <0x2c>; - compatible = "fsl,fman-v2-port-tx"; - reg = <0xac000 0x1000>; - }; - - fman1_tx_0x30: port@b0000 { - cell-index = <0x30>; - compatible = "fsl,fman-v2-port-tx"; - reg = <0xb0000 0x1000>; - }; - - ethernet@e0000 { - compatible = "fsl,fman-dtsec"; - cell-index = <0>; - reg = <0xe0000 0x1000>; - fsl,fman-ports = <&fman1_rx_0x8 &fman1_tx_0x28>; - tbi-handle = <&tbi5>; - }; - - ethernet@e2000 { - compatible = "fsl,fman-dtsec"; - cell-index = <1>; - reg = <0xe2000 0x1000>; - fsl,fman-ports = <&fman1_rx_0x9 &fman1_tx_0x29>; - tbi-handle = <&tbi6>; - }; - - ethernet@e4000 { - compatible = "fsl,fman-dtsec"; - cell-index = <2>; - reg = <0xe4000 0x1000>; - fsl,fman-ports = <&fman1_rx_0xa &fman1_tx_0x2a>; - tbi-handle = <&tbi7>; - }; - - ethernet@e6000 { - compatible = "fsl,fman-dtsec"; - cell-index = <3>; - reg = <0xe6000 0x1000>; - fsl,fman-ports = <&fman1_rx_0xb &fman1_tx_0x2b>; - tbi-handle = <&tbi8>; - }; - - ethernet@e8000 { - compatible = "fsl,fman-dtsec"; - cell-index = <4>; - reg = <0xf0000 0x1000>; - fsl,fman-ports = <&fman1_rx_0xc &fman1_tx_0x2c>; - tbi-handle = <&tbi9>; - - ethernet@f0000 { - cell-index = <8>; - compatible = "fsl,fman-xgec"; - reg = <0xf0000 0x1000>; - fsl,fman-ports = <&fman1_rx_0x10 &fman1_tx_0x30>; - }; - - ptp-timer@fe000 { - compatible = "fsl,fman-ptp-timer"; - reg = <0xfe000 0x1000>; - }; - - mdio@f1000 { - compatible = "fsl,fman-xmdio"; - reg = <0xf1000 0x1000>; - interrupts = <101 2 0 0>; - }; -}; diff --git a/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt b/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt index 047bdf7bdd2f..9c9668c1b6a2 100644 --- a/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt +++ b/Documentation/devicetree/bindings/net/fsl-tsec-phy.txt @@ -86,4 +86,4 @@ Example: * Gianfar PTP clock nodes -Refer to Documentation/devicetree/bindings/ptp/ptp-qoriq.txt +Refer to Documentation/devicetree/bindings/ptp/fsl,ptp.yaml diff --git a/Documentation/devicetree/bindings/net/mediatek,net.yaml b/Documentation/devicetree/bindings/net/mediatek,net.yaml index 3202dc7967c5..686b5c2fae40 100644 --- a/Documentation/devicetree/bindings/net/mediatek,net.yaml +++ b/Documentation/devicetree/bindings/net/mediatek,net.yaml @@ -68,6 +68,17 @@ properties: Phandle to the syscon node that handles the path from GMAC to PHY variants. + mediatek,pcie-mirror: + $ref: /schemas/types.yaml#/definitions/phandle + description: + Phandle to the mediatek pcie-mirror controller. + + mediatek,pctl: + $ref: /schemas/types.yaml#/definitions/phandle + description: + Phandle to the syscon node that handles the ports slew rate and + driver current. + mediatek,sgmiisys: $ref: /schemas/types.yaml#/definitions/phandle-array minItems: 1 @@ -131,15 +142,12 @@ allOf: mediatek,infracfg: false - mediatek,pctl: - $ref: /schemas/types.yaml#/definitions/phandle - description: - Phandle to the syscon node that handles the ports slew rate and - driver current. - mediatek,wed: false mediatek,wed-pcie: false + else: + properties: + mediatek,pctl: false - if: properties: @@ -201,12 +209,10 @@ allOf: minItems: 1 maxItems: 1 - mediatek,pcie-mirror: - $ref: /schemas/types.yaml#/definitions/phandle - description: - Phandle to the mediatek pcie-mirror controller. - mediatek,wed-pcie: false + else: + properties: + mediatek,pcie-mirror: false - if: properties: diff --git a/Documentation/devicetree/bindings/net/mediatek-bluetooth.txt b/Documentation/devicetree/bindings/net/mediatek-bluetooth.txt index 9ef5bacda8c1..988c72685cbf 100644 --- a/Documentation/devicetree/bindings/net/mediatek-bluetooth.txt +++ b/Documentation/devicetree/bindings/net/mediatek-bluetooth.txt @@ -1,39 +1,3 @@ -MediaTek SoC built-in Bluetooth Devices -================================== - -This device is a serial attached device to BTIF device and thus it must be a -child node of the serial node with BTIF. The dt-bindings details for BTIF -device can be known via Documentation/devicetree/bindings/serial/8250.yaml. - -Required properties: - -- compatible: Must be - "mediatek,mt7622-bluetooth": for MT7622 SoC -- clocks: Should be the clock specifiers corresponding to the entry in - clock-names property. -- clock-names: Should contain "ref" entries. -- power-domains: Phandle to the power domain that the device is part of - -Example: - - btif: serial@1100c000 { - compatible = "mediatek,mt7622-btif", - "mediatek,mtk-btif"; - reg = <0 0x1100c000 0 0x1000>; - interrupts = ; - clocks = <&pericfg CLK_PERI_BTIF_PD>; - clock-names = "main"; - reg-shift = <2>; - reg-io-width = <4>; - - bluetooth { - compatible = "mediatek,mt7622-bluetooth"; - power-domains = <&scpsys MT7622_POWER_DOMAIN_WB>; - clocks = <&clk25m>; - clock-names = "ref"; - }; - }; - MediaTek UART based Bluetooth Devices ================================== diff --git a/Documentation/devicetree/bindings/net/mscc,miim.yaml b/Documentation/devicetree/bindings/net/mscc,miim.yaml index 5b292e7c9e46..792f26b06b06 100644 --- a/Documentation/devicetree/bindings/net/mscc,miim.yaml +++ b/Documentation/devicetree/bindings/net/mscc,miim.yaml @@ -38,6 +38,16 @@ properties: clock-frequency: true + resets: + items: + - description: + Reset shared with all blocks attached to the Switch Core Register + Bus (CSR) including VRAP slave. + + reset-names: + items: + - const: switch + required: - compatible - reg diff --git a/Documentation/devicetree/bindings/net/pcs/snps,dw-xpcs.yaml b/Documentation/devicetree/bindings/net/pcs/snps,dw-xpcs.yaml new file mode 100644 index 000000000000..e77eec9ac9ee --- /dev/null +++ b/Documentation/devicetree/bindings/net/pcs/snps,dw-xpcs.yaml @@ -0,0 +1,136 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/pcs/snps,dw-xpcs.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Synopsys DesignWare Ethernet PCS + +maintainers: + - Serge Semin + +description: + Synopsys DesignWare Ethernet Physical Coding Sublayer provides an interface + between Media Access Control and Physical Medium Attachment Sublayer through + the Media Independent Interface (XGMII, USXGMII, XLGMII, GMII, etc) + controlled by means of the IEEE std. Clause 45 registers set. The PCS can be + optionally synthesized with a vendor-specific interface connected to + Synopsys PMA (also called DesignWare Consumer/Enterprise PHY) although in + general it can be used to communicate with any compatible PHY. + + The PCS CSRs can be accessible either over the Ethernet MDIO bus or directly + by means of the APB3/MCI interfaces. In the later case the XPCS can be mapped + right to the system IO memory space. + +properties: + compatible: + oneOf: + - description: Synopsys DesignWare XPCS with none or unknown PMA + const: snps,dw-xpcs + - description: Synopsys DesignWare XPCS with Consumer Gen1 3G PMA + const: snps,dw-xpcs-gen1-3g + - description: Synopsys DesignWare XPCS with Consumer Gen2 3G PMA + const: snps,dw-xpcs-gen2-3g + - description: Synopsys DesignWare XPCS with Consumer Gen2 6G PMA + const: snps,dw-xpcs-gen2-6g + - description: Synopsys DesignWare XPCS with Consumer Gen4 3G PMA + const: snps,dw-xpcs-gen4-3g + - description: Synopsys DesignWare XPCS with Consumer Gen4 6G PMA + const: snps,dw-xpcs-gen4-6g + - description: Synopsys DesignWare XPCS with Consumer Gen5 10G PMA + const: snps,dw-xpcs-gen5-10g + - description: Synopsys DesignWare XPCS with Consumer Gen5 12G PMA + const: snps,dw-xpcs-gen5-12g + + reg: + items: + - description: + In case of the MDIO management interface this just a 5-bits ID + of the MDIO bus device. If DW XPCS CSRs space is accessed over the + MCI or APB3 management interfaces, then the space mapping can be + either 'direct' or 'indirect'. In the former case all Clause 45 + registers are contiguously mapped within the address space + MMD '[20:16]', Reg '[15:0]'. In the later case the space is divided + to the multiple 256 register sets. There is a special viewport CSR + which is responsible for the set selection. The upper part of + the CSR address MMD+REG[20:8] is supposed to be written in there + so the corresponding subset would be mapped to the lowest 255 CSRs. + + reg-names: + items: + - enum: [ direct, indirect ] + + reg-io-width: + description: + The way the CSRs are mapped to the memory is platform depended. Since + each Clause 45 CSR is of 16-bits wide the access instructions must be + two bytes aligned at least. + default: 2 + enum: [ 2, 4 ] + + interrupts: + description: + System interface interrupt output (sbd_intr_o) indicating Clause 73/37 + auto-negotiation events':' Page received, AN is completed or incompatible + link partner. + maxItems: 1 + + clocks: + description: + The MCI and APB3 interfaces are supposed to be equipped with a clock + source connected to the clk_csr_i line. + + PCS/PMA layer can be clocked by an internal reference clock source + (phyN_core_refclk) or by an externally connected (phyN_pad_refclk) clock + generator. Both clocks can be supplied at a time. + minItems: 1 + maxItems: 3 + + clock-names: + oneOf: + - minItems: 1 + items: # MDIO + - enum: [core, pad] + - const: pad + - minItems: 1 + items: # MCI or APB + - const: csr + - enum: [core, pad] + - const: pad + +required: + - compatible + - reg + +additionalProperties: false + +examples: + - | + #include + + ethernet-pcs@1f05d000 { + compatible = "snps,dw-xpcs"; + reg = <0x1f05d000 0x1000>; + reg-names = "indirect"; + + reg-io-width = <4>; + + interrupts = <79 IRQ_TYPE_LEVEL_HIGH>; + + clocks = <&ccu_pclk>, <&ccu_core>, <&ccu_pad>; + clock-names = "csr", "core", "pad"; + }; + - | + mdio-bus { + #address-cells = <1>; + #size-cells = <0>; + + ethernet-pcs@0 { + compatible = "snps,dw-xpcs"; + reg = <0>; + + clocks = <&ccu_core>, <&ccu_pad>; + clock-names = "core", "pad"; + }; + }; +... diff --git a/Documentation/devicetree/bindings/net/realtek,rtl82xx.yaml b/Documentation/devicetree/bindings/net/realtek,rtl82xx.yaml index bb94a2388520..d248a08a2136 100644 --- a/Documentation/devicetree/bindings/net/realtek,rtl82xx.yaml +++ b/Documentation/devicetree/bindings/net/realtek,rtl82xx.yaml @@ -14,10 +14,32 @@ maintainers: description: Bindings for Realtek RTL82xx PHYs -allOf: - - $ref: ethernet-phy.yaml# - properties: + compatible: + enum: + - ethernet-phy-id001c.c800 + - ethernet-phy-id001c.c816 + - ethernet-phy-id001c.c838 + - ethernet-phy-id001c.c840 + - ethernet-phy-id001c.c848 + - ethernet-phy-id001c.c849 + - ethernet-phy-id001c.c84a + - ethernet-phy-id001c.c862 + - ethernet-phy-id001c.c878 + - ethernet-phy-id001c.c880 + - ethernet-phy-id001c.c910 + - ethernet-phy-id001c.c912 + - ethernet-phy-id001c.c913 + - ethernet-phy-id001c.c914 + - ethernet-phy-id001c.c915 + - ethernet-phy-id001c.c916 + - ethernet-phy-id001c.c942 + - ethernet-phy-id001c.c961 + - ethernet-phy-id001c.cad0 + - ethernet-phy-id001c.cb00 + + leds: true + realtek,clkout-disable: type: boolean description: @@ -31,6 +53,18 @@ properties: unevaluatedProperties: false +allOf: + - $ref: ethernet-phy.yaml# + - if: + not: + properties: + compatible: + contains: + const: ethernet-phy-id001c.c916 + then: + properties: + leds: false + examples: - | mdio { diff --git a/Documentation/devicetree/bindings/net/snps,dwmac.yaml b/Documentation/devicetree/bindings/net/snps,dwmac.yaml index 21cc27e75f50..3eb65e63fdae 100644 --- a/Documentation/devicetree/bindings/net/snps,dwmac.yaml +++ b/Documentation/devicetree/bindings/net/snps,dwmac.yaml @@ -76,6 +76,7 @@ properties: - rockchip,rk3128-gmac - rockchip,rk3228-gmac - rockchip,rk3288-gmac + - rockchip,rk3308-gmac - rockchip,rk3328-gmac - rockchip,rk3366-gmac - rockchip,rk3368-gmac @@ -435,6 +436,32 @@ properties: description: Use Address-Aligned Beats + snps,pbl: + description: + Programmable Burst Length (tx and rx) + $ref: /schemas/types.yaml#/definitions/uint32 + enum: [1, 2, 4, 8, 16, 32] + + snps,txpbl: + description: + Tx Programmable Burst Length. If set, DMA tx will use this + value rather than snps,pbl. + $ref: /schemas/types.yaml#/definitions/uint32 + enum: [1, 2, 4, 8, 16, 32] + + snps,rxpbl: + description: + Rx Programmable Burst Length. If set, DMA rx will use this + value rather than snps,pbl. + $ref: /schemas/types.yaml#/definitions/uint32 + enum: [1, 2, 4, 8, 16, 32] + + snps,no-pbl-x8: + $ref: /schemas/types.yaml#/definitions/flag + description: + Don\'t multiply the pbl/txpbl/rxpbl values by 8. For core + rev < 3.50, don\'t multiply the values by 4. + snps,fixed-burst: $ref: /schemas/types.yaml#/definitions/flag description: @@ -485,6 +512,12 @@ properties: description: Frequency division factor for MDC clock. + snps,tso: + $ref: /schemas/types.yaml#/definitions/flag + description: + Enables the TSO feature otherwise it will be managed by MAC HW capability + register. + mdio: $ref: mdio.yaml# unevaluatedProperties: false @@ -568,95 +601,38 @@ allOf: - if: properties: compatible: - contains: - enum: - - allwinner,sun7i-a20-gmac - - allwinner,sun8i-a83t-emac - - allwinner,sun8i-h3-emac - - allwinner,sun8i-r40-gmac - - allwinner,sun8i-v3s-emac - - allwinner,sun50i-a64-emac - - ingenic,jz4775-mac - - ingenic,x1000-mac - - ingenic,x1600-mac - - ingenic,x1830-mac - - ingenic,x2000-mac - - qcom,sa8775p-ethqos - - qcom,sc8280xp-ethqos - - snps,dwmac-3.50a - - snps,dwmac-4.10a - - snps,dwmac-4.20a - - snps,dwmac-5.20 - - snps,dwxgmac - - snps,dwxgmac-2.10 - - st,spear600-gmac + not: + contains: + enum: + - allwinner,sun7i-a20-gmac + - allwinner,sun8i-a83t-emac + - allwinner,sun8i-h3-emac + - allwinner,sun8i-r40-gmac + - allwinner,sun8i-v3s-emac + - allwinner,sun50i-a64-emac + - loongson,ls2k-dwmac + - loongson,ls7a-dwmac + - ingenic,jz4775-mac + - ingenic,x1000-mac + - ingenic,x1600-mac + - ingenic,x1830-mac + - ingenic,x2000-mac + - qcom,qcs404-ethqos + - qcom,sa8775p-ethqos + - qcom,sc8280xp-ethqos + - qcom,sm8150-ethqos + - snps,dwmac-4.00 + - snps,dwmac-4.10a + - snps,dwmac-4.20a + - snps,dwmac-5.10a + - snps,dwmac-5.20 + - snps,dwxgmac + - snps,dwxgmac-2.10 + - st,spear600-gmac then: properties: - snps,pbl: - description: - Programmable Burst Length (tx and rx) - $ref: /schemas/types.yaml#/definitions/uint32 - enum: [1, 2, 4, 8, 16, 32] - - snps,txpbl: - description: - Tx Programmable Burst Length. If set, DMA tx will use this - value rather than snps,pbl. - $ref: /schemas/types.yaml#/definitions/uint32 - enum: [1, 2, 4, 8, 16, 32] - - snps,rxpbl: - description: - Rx Programmable Burst Length. If set, DMA rx will use this - value rather than snps,pbl. - $ref: /schemas/types.yaml#/definitions/uint32 - enum: [1, 2, 4, 8, 16, 32] - - snps,no-pbl-x8: - $ref: /schemas/types.yaml#/definitions/flag - description: - Don\'t multiply the pbl/txpbl/rxpbl values by 8. For core - rev < 3.50, don\'t multiply the values by 4. - - - if: - properties: - compatible: - contains: - enum: - - allwinner,sun7i-a20-gmac - - allwinner,sun8i-a83t-emac - - allwinner,sun8i-h3-emac - - allwinner,sun8i-r40-gmac - - allwinner,sun8i-v3s-emac - - allwinner,sun50i-a64-emac - - loongson,ls2k-dwmac - - loongson,ls7a-dwmac - - ingenic,jz4775-mac - - ingenic,x1000-mac - - ingenic,x1600-mac - - ingenic,x1830-mac - - ingenic,x2000-mac - - qcom,qcs404-ethqos - - qcom,sa8775p-ethqos - - qcom,sc8280xp-ethqos - - qcom,sm8150-ethqos - - snps,dwmac-4.00 - - snps,dwmac-4.10a - - snps,dwmac-4.20a - - snps,dwmac-5.10a - - snps,dwmac-5.20 - - snps,dwxgmac - - snps,dwxgmac-2.10 - - st,spear600-gmac - - then: - properties: - snps,tso: - $ref: /schemas/types.yaml#/definitions/flag - description: - Enables the TSO feature otherwise it will be managed by - MAC HW capability register. + snps,tso: false additionalProperties: true diff --git a/Documentation/devicetree/bindings/net/stm32-dwmac.yaml b/Documentation/devicetree/bindings/net/stm32-dwmac.yaml index 7ccf75676b6d..bf23838fe6e8 100644 --- a/Documentation/devicetree/bindings/net/stm32-dwmac.yaml +++ b/Documentation/devicetree/bindings/net/stm32-dwmac.yaml @@ -22,18 +22,22 @@ select: enum: - st,stm32-dwmac - st,stm32mp1-dwmac + - st,stm32mp13-dwmac + - st,stm32mp25-dwmac required: - compatible -allOf: - - $ref: snps,dwmac.yaml# - properties: compatible: oneOf: + - items: + - enum: + - st,stm32mp25-dwmac + - const: snps,dwmac-5.20 - items: - enum: - st,stm32mp1-dwmac + - st,stm32mp13-dwmac - const: snps,dwmac-4.20a - items: - enum: @@ -75,12 +79,15 @@ properties: st,syscon: $ref: /schemas/types.yaml#/definitions/phandle-array items: - - items: + - minItems: 2 + items: - description: phandle to the syscon node which encompases the glue register - description: offset of the control register + - description: field to set mask in register description: Should be phandle/offset pair. The phandle to the syscon node which - encompases the glue register, and the offset of the control register + encompases the glue register, the offset of the control register and + the mask to set bitfield in control register st,ext-phyclk: description: @@ -112,12 +119,40 @@ required: unevaluatedProperties: false +allOf: + - $ref: snps,dwmac.yaml# + - if: + properties: + compatible: + contains: + enum: + - st,stm32-dwmac + - st,stm32mp1-dwmac + - st,stm32mp25-dwmac + then: + properties: + st,syscon: + items: + minItems: 2 + maxItems: 2 + + - if: + properties: + compatible: + contains: + enum: + - st,stm32mp13-dwmac + then: + properties: + st,syscon: + items: + minItems: 3 + maxItems: 3 + examples: - | #include #include - #include - #include //Example 1 ethernet0: ethernet@5800a000 { compatible = "st,stm32mp1-dwmac", "snps,dwmac-4.20a"; diff --git a/Documentation/devicetree/bindings/net/ti,icss-iep.yaml b/Documentation/devicetree/bindings/net/ti,icss-iep.yaml index f5c22d6dcaee..e36e3a622904 100644 --- a/Documentation/devicetree/bindings/net/ti,icss-iep.yaml +++ b/Documentation/devicetree/bindings/net/ti,icss-iep.yaml @@ -28,6 +28,15 @@ properties: maxItems: 1 description: phandle to the IEP source clock + interrupts: + maxItems: 1 + description: + Interrupt specifier for capture/compare IRQ. + + interrupt-names: + items: + - const: iep_cap_cmp + required: - compatible - reg diff --git a/Documentation/devicetree/bindings/net/ti,icssg-prueth.yaml b/Documentation/devicetree/bindings/net/ti,icssg-prueth.yaml index e253fa786092..c296e5711848 100644 --- a/Documentation/devicetree/bindings/net/ti,icssg-prueth.yaml +++ b/Documentation/devicetree/bindings/net/ti,icssg-prueth.yaml @@ -55,6 +55,14 @@ properties: description: phandle to MII_RT module's syscon regmap + ti,pa-stats: + $ref: /schemas/types.yaml#/definitions/phandle + description: + phandle to PA_STATS module's syscon regmap. PA_STATS is a set of + registers where different statistics related to ICSSG, are dumped by + ICSSG firmware. PA_STATS module's syscon regmap will help the device to + access/read/write those statistics. + ti,iep: $ref: /schemas/types.yaml#/definitions/phandle-array maxItems: 2 @@ -194,6 +202,7 @@ examples: "tx1-0", "tx1-1", "tx1-2", "tx1-3", "rx0", "rx1"; ti,mii-g-rt = <&icssg2_mii_g_rt>; + ti,pa-stats = <&icssg2_pa_stats>; ti,iep = <&icssg2_iep0>, <&icssg2_iep1>; interrupt-parent = <&icssg2_intc>; interrupts = <24 0 2>, <25 1 3>; diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.yaml b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.yaml index 5c4498b762c8..070c4c9b8643 100644 --- a/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.yaml +++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath10k.yaml @@ -128,6 +128,11 @@ properties: Whether to skip executing an SCM call that reassigns the memory region ownership. + qcom,no-msa-ready-indicator: + type: boolean + description: + Don't wait for MSA_READY indicator to complete init. + qcom,smem-states: $ref: /schemas/types.yaml#/definitions/phandle-array description: State bits used by the AP to signal the WLAN Q6. diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml index 41d023797d7d..8675d7d0215c 100644 --- a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml +++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k-pci.yaml @@ -17,6 +17,7 @@ description: | properties: compatible: enum: + - pci17cb,1101 # QCA6390 - pci17cb,1103 # WCN6855 reg: @@ -28,10 +29,55 @@ properties: string to uniquely identify variant of the calibration data for designs with colliding bus and device ids + vddrfacmn-supply: + description: VDD_RFA_CMN supply regulator handle + + vddaon-supply: + description: VDD_AON supply regulator handle + + vddwlcx-supply: + description: VDD_WL_CX supply regulator handle + + vddwlmx-supply: + description: VDD_WL_MX supply regulator handle + + vddrfa0p8-supply: + description: VDD_RFA_0P8 supply regulator handle + + vddrfa1p2-supply: + description: VDD_RFA_1P2 supply regulator handle + + vddrfa1p7-supply: + description: VDD_RFA_1P7 supply regulator handle + + vddpcie0p9-supply: + description: VDD_PCIE_0P9 supply regulator handle + + vddpcie1p8-supply: + description: VDD_PCIE_1P8 supply regulator handle + required: - compatible - reg +allOf: + - if: + properties: + compatible: + contains: + const: pci17cb,1101 + then: + required: + - vddrfacmn-supply + - vddaon-supply + - vddwlcx-supply + - vddwlmx-supply + - vddrfa0p8-supply + - vddrfa1p2-supply + - vddrfa1p7-supply + - vddpcie0p9-supply + - vddpcie1p8-supply + additionalProperties: false examples: diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml index a2d55bf4c7a5..ff5763dc66a8 100644 --- a/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml +++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath11k.yaml @@ -265,15 +265,6 @@ allOf: examples: - | - - q6v5_wcss: remoteproc@cd00000 { - compatible = "qcom,ipq8074-wcss-pil"; - reg = <0xcd00000 0x4040>, - <0x4ab000 0x20>; - reg-names = "qdsp6", - "rmb"; - }; - wifi0: wifi@c000000 { compatible = "qcom,ipq8074-wifi"; reg = <0xc000000 0x2000000>; diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,ath12k.yaml b/Documentation/devicetree/bindings/net/wireless/qcom,ath12k.yaml new file mode 100644 index 000000000000..1b5884015b15 --- /dev/null +++ b/Documentation/devicetree/bindings/net/wireless/qcom,ath12k.yaml @@ -0,0 +1,99 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +# Copyright (c) 2024 Linaro Limited +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/wireless/qcom,ath12k.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Qualcomm Technologies ath12k wireless devices (PCIe) + +maintainers: + - Jeff Johnson + - Kalle Valo + +description: + Qualcomm Technologies IEEE 802.11be PCIe devices. + +properties: + compatible: + enum: + - pci17cb,1107 # WCN7850 + + reg: + maxItems: 1 + + vddaon-supply: + description: VDD_AON supply regulator handle + + vddwlcx-supply: + description: VDD_WLCX supply regulator handle + + vddwlmx-supply: + description: VDD_WLMX supply regulator handle + + vddrfacmn-supply: + description: VDD_RFA_CMN supply regulator handle + + vddrfa0p8-supply: + description: VDD_RFA_0P8 supply regulator handle + + vddrfa1p2-supply: + description: VDD_RFA_1P2 supply regulator handle + + vddrfa1p8-supply: + description: VDD_RFA_1P8 supply regulator handle + + vddpcie0p9-supply: + description: VDD_PCIE_0P9 supply regulator handle + + vddpcie1p8-supply: + description: VDD_PCIE_1P8 supply regulator handle + +required: + - compatible + - reg + - vddaon-supply + - vddwlcx-supply + - vddwlmx-supply + - vddrfacmn-supply + - vddrfa0p8-supply + - vddrfa1p2-supply + - vddrfa1p8-supply + - vddpcie0p9-supply + - vddpcie1p8-supply + +additionalProperties: false + +examples: + - | + #include + #include + pcie { + #address-cells = <3>; + #size-cells = <2>; + + pcie@0 { + device_type = "pci"; + reg = <0x0 0x0 0x0 0x0 0x0>; + #address-cells = <3>; + #size-cells = <2>; + ranges; + + bus-range = <0x01 0xff>; + + wifi@0 { + compatible = "pci17cb,1107"; + reg = <0x10000 0x0 0x0 0x0 0x0>; + + vddaon-supply = <&vreg_pmu_aon_0p59>; + vddwlcx-supply = <&vreg_pmu_wlcx_0p8>; + vddwlmx-supply = <&vreg_pmu_wlmx_0p85>; + vddrfacmn-supply = <&vreg_pmu_rfa_cmn>; + vddrfa0p8-supply = <&vreg_pmu_rfa_0p8>; + vddrfa1p2-supply = <&vreg_pmu_rfa_1p2>; + vddrfa1p8-supply = <&vreg_pmu_rfa_1p8>; + vddpcie0p9-supply = <&vreg_pmu_pcie_0p9>; + vddpcie1p8-supply = <&vreg_pmu_pcie_1p8>; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/net/xlnx,gmii-to-rgmii.yaml b/Documentation/devicetree/bindings/net/xlnx,gmii-to-rgmii.yaml index 0f781dac6717..eb803ddd13e0 100644 --- a/Documentation/devicetree/bindings/net/xlnx,gmii-to-rgmii.yaml +++ b/Documentation/devicetree/bindings/net/xlnx,gmii-to-rgmii.yaml @@ -31,6 +31,10 @@ properties: phy-handle: $ref: ethernet-controller.yaml#/properties/phy-handle + clocks: + items: + - description: 200/375 MHz free-running clock is used as input clock. + required: - compatible - reg @@ -51,5 +55,6 @@ examples: compatible = "xlnx,gmii-to-rgmii-1.0"; reg = <8>; phy-handle = <&phy>; + clocks = <&dummy>; }; }; diff --git a/Documentation/devicetree/bindings/ptp/fsl,ptp.yaml b/Documentation/devicetree/bindings/ptp/fsl,ptp.yaml new file mode 100644 index 000000000000..3bb8615e3e91 --- /dev/null +++ b/Documentation/devicetree/bindings/ptp/fsl,ptp.yaml @@ -0,0 +1,144 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/ptp/fsl,ptp.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Freescale QorIQ 1588 timer based PTP clock + +maintainers: + - Frank Li + +properties: + compatible: + enum: + - fsl,etsec-ptp + - fsl,fman-ptp-timer + - fsl,dpaa2-ptp + - fsl,enetc-ptp + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + clocks: + maxItems: 1 + + fsl,cksel: + $ref: /schemas/types.yaml#/definitions/uint32 + description: | + Timer reference clock source. + + Reference clock source is determined by the value, which is holded + in CKSEL bits in TMR_CTRL register. "fsl,cksel" property keeps the + value, which will be directly written in those bits, that is why, + according to reference manual, the next clock sources can be used: + + For eTSEC, + <0> - external high precision timer reference clock (TSEC_TMR_CLK + input is used for this purpose); + <1> - eTSEC system clock; + <2> - eTSEC1 transmit clock; + <3> - RTC clock input. + + For DPAA FMan, + <0> - external high precision timer reference clock (TMR_1588_CLK) + <1> - MAC system clock (1/2 FMan clock) + <2> - reserved + <3> - RTC clock oscillator + + fsl,tclk-period: + $ref: /schemas/types.yaml#/definitions/uint32 + description: Timer reference clock period in nanoseconds. + + fsl,tmr-prsc: + $ref: /schemas/types.yaml#/definitions/uint32 + description: Prescaler, divides the output clock. + + fsl,tmr-add: + $ref: /schemas/types.yaml#/definitions/uint32 + description: Frequency compensation value. + + fsl,tmr-fiper1: + $ref: /schemas/types.yaml#/definitions/uint32 + description: Fixed interval period pulse generator. + + fsl,tmr-fiper2: + $ref: /schemas/types.yaml#/definitions/uint32 + description: Fixed interval period pulse generator. + + fsl,tmr-fiper3: + $ref: /schemas/types.yaml#/definitions/uint32 + description: + Fixed interval period pulse generator. + Supported only on DPAA2 and ENETC hardware. + + fsl,max-adj: + $ref: /schemas/types.yaml#/definitions/uint32 + description: | + Maximum frequency adjustment in parts per billion. + + These properties set the operational parameters for the PTP + clock. You must choose these carefully for the clock to work right. + Here is how to figure good values: + + TimerOsc = selected reference clock MHz + tclk_period = desired clock period nanoseconds + NominalFreq = 1000 / tclk_period MHz + FreqDivRatio = TimerOsc / NominalFreq (must be greater that 1.0) + tmr_add = ceil(2^32 / FreqDivRatio) + OutputClock = NominalFreq / tmr_prsc MHz + PulseWidth = 1 / OutputClock microseconds + FiperFreq1 = desired frequency in Hz + FiperDiv1 = 1000000 * OutputClock / FiperFreq1 + tmr_fiper1 = tmr_prsc * tclk_period * FiperDiv1 - tclk_period + max_adj = 1000000000 * (FreqDivRatio - 1.0) - 1 + + The calculation for tmr_fiper2 is the same as for tmr_fiper1. The + driver expects that tmr_fiper1 will be correctly set to produce a 1 + Pulse Per Second (PPS) signal, since this will be offered to the PPS + subsystem to synchronize the Linux clock. + + When this attribute is not used, the IEEE 1588 timer reference clock + will use the eTSEC system clock (for Gianfar) or the MAC system + clock (for DPAA). + + fsl,extts-fifo: + $ref: /schemas/types.yaml#/definitions/flag + description: + The presence of this property indicates hardware + support for the external trigger stamp FIFO + + little-endian: + $ref: /schemas/types.yaml#/definitions/flag + description: + The presence of this property indicates the 1588 timer + support for the external trigger stamp FIFO. + IP block is little-endian mode. The default endian mode + is big-endian. + +required: + - compatible + - reg + +additionalProperties: false + +examples: + - | + #include + + phc@24e00 { + compatible = "fsl,etsec-ptp"; + reg = <0x24e00 0xb0>; + interrupts = <12 IRQ_TYPE_LEVEL_LOW>; + interrupt-parent = <&ipic>; + fsl,cksel = <1>; + fsl,tclk-period = <10>; + fsl,tmr-prsc = <100>; + fsl,tmr-add = <0x999999a4>; + fsl,tmr-fiper1 = <0x3b9ac9f6>; + fsl,tmr-fiper2 = <0x00018696>; + fsl,max-adj = <659999998>; + }; diff --git a/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt b/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt deleted file mode 100644 index 743eda754e65..000000000000 --- a/Documentation/devicetree/bindings/ptp/ptp-qoriq.txt +++ /dev/null @@ -1,87 +0,0 @@ -* Freescale QorIQ 1588 timer based PTP clock - -General Properties: - - - compatible Should be "fsl,etsec-ptp" for eTSEC - Should be "fsl,fman-ptp-timer" for DPAA FMan - Should be "fsl,dpaa2-ptp" for DPAA2 - Should be "fsl,enetc-ptp" for ENETC - - reg Offset and length of the register set for the device - - interrupts There should be at least two interrupts. Some devices - have as many as four PTP related interrupts. - -Clock Properties: - - - fsl,cksel Timer reference clock source. - - fsl,tclk-period Timer reference clock period in nanoseconds. - - fsl,tmr-prsc Prescaler, divides the output clock. - - fsl,tmr-add Frequency compensation value. - - fsl,tmr-fiper1 Fixed interval period pulse generator. - - fsl,tmr-fiper2 Fixed interval period pulse generator. - - fsl,tmr-fiper3 Fixed interval period pulse generator. - Supported only on DPAA2 and ENETC hardware. - - fsl,max-adj Maximum frequency adjustment in parts per billion. - - fsl,extts-fifo The presence of this property indicates hardware - support for the external trigger stamp FIFO. - - little-endian The presence of this property indicates the 1588 timer - IP block is little-endian mode. The default endian mode - is big-endian. - - These properties set the operational parameters for the PTP - clock. You must choose these carefully for the clock to work right. - Here is how to figure good values: - - TimerOsc = selected reference clock MHz - tclk_period = desired clock period nanoseconds - NominalFreq = 1000 / tclk_period MHz - FreqDivRatio = TimerOsc / NominalFreq (must be greater that 1.0) - tmr_add = ceil(2^32 / FreqDivRatio) - OutputClock = NominalFreq / tmr_prsc MHz - PulseWidth = 1 / OutputClock microseconds - FiperFreq1 = desired frequency in Hz - FiperDiv1 = 1000000 * OutputClock / FiperFreq1 - tmr_fiper1 = tmr_prsc * tclk_period * FiperDiv1 - tclk_period - max_adj = 1000000000 * (FreqDivRatio - 1.0) - 1 - - The calculation for tmr_fiper2 is the same as for tmr_fiper1. The - driver expects that tmr_fiper1 will be correctly set to produce a 1 - Pulse Per Second (PPS) signal, since this will be offered to the PPS - subsystem to synchronize the Linux clock. - - Reference clock source is determined by the value, which is holded - in CKSEL bits in TMR_CTRL register. "fsl,cksel" property keeps the - value, which will be directly written in those bits, that is why, - according to reference manual, the next clock sources can be used: - - For eTSEC, - <0> - external high precision timer reference clock (TSEC_TMR_CLK - input is used for this purpose); - <1> - eTSEC system clock; - <2> - eTSEC1 transmit clock; - <3> - RTC clock input. - - For DPAA FMan, - <0> - external high precision timer reference clock (TMR_1588_CLK) - <1> - MAC system clock (1/2 FMan clock) - <2> - reserved - <3> - RTC clock oscillator - - When this attribute is not used, the IEEE 1588 timer reference clock - will use the eTSEC system clock (for Gianfar) or the MAC system - clock (for DPAA). - -Example: - - ptp_clock@24e00 { - compatible = "fsl,etsec-ptp"; - reg = <0x24E00 0xB0>; - interrupts = <12 0x8 13 0x8>; - interrupt-parent = < &ipic >; - fsl,cksel = <1>; - fsl,tclk-period = <10>; - fsl,tmr-prsc = <100>; - fsl,tmr-add = <0x999999A4>; - fsl,tmr-fiper1 = <0x3B9AC9F6>; - fsl,tmr-fiper2 = <0x00018696>; - fsl,max-adj = <659999998>; - }; diff --git a/Documentation/netlink/specs/dpll.yaml b/Documentation/netlink/specs/dpll.yaml index 95b0eb1486bf..94132d30e0e0 100644 --- a/Documentation/netlink/specs/dpll.yaml +++ b/Documentation/netlink/specs/dpll.yaml @@ -479,6 +479,7 @@ operations: name: pin-get doc: | Get list of pins and its attributes. + - dump request without any attributes given - list all the pins in the system - dump request with target dpll - list all the pins registered with diff --git a/Documentation/netlink/specs/ethtool.yaml b/Documentation/netlink/specs/ethtool.yaml index 4510e8d1adcb..495e35fcfb21 100644 --- a/Documentation/netlink/specs/ethtool.yaml +++ b/Documentation/netlink/specs/ethtool.yaml @@ -20,6 +20,25 @@ definitions: name: header-flags type: flags entries: [ compact-bitsets, omit-reply, stats ] + - + name: module-fw-flash-status + type: enum + entries: [ started, in_progress, completed, error ] + - + name: c33-pse-ext-state + enum-name: + type: enum + name-prefix: ethtool-c33-pse-ext-state- + entries: + - none + - error-condition + - mr-mps-valid + - mr-pse-enable + - option-detect-ted + - option-vport-lim + - ovld-detected + - power-not-available + - short-detected attribute-sets: - @@ -414,6 +433,26 @@ attribute-sets: name: combined-count type: u32 + - + name: irq-moderation + attributes: + - + name: usec + type: u32 + - + name: pkts + type: u32 + - + name: comps + type: u32 + - + name: profile + attributes: + - + name: irq-moderation + type: nest + multi-attr: true + nested-attributes: irq-moderation - name: coalesce attributes: @@ -502,6 +541,15 @@ attribute-sets: - name: tx-aggr-time-usecs type: u32 + - + name: rx-profile + type: nest + nested-attributes: profile + - + name: tx-profile + type: nest + nested-attributes: profile + - name: pause-stat attributes: @@ -891,6 +939,15 @@ attribute-sets: - name: power-mode type: u8 + - + name: c33-pse-pw-limit + attributes: + - + name: min + type: u32 + - + name: max + type: u32 - name: pse attributes: @@ -922,6 +979,33 @@ attribute-sets: name: c33-pse-pw-d-status type: u32 name-prefix: ethtool-a- + - + name: c33-pse-pw-class + type: u32 + name-prefix: ethtool-a- + - + name: c33-pse-actual-pw + type: u32 + name-prefix: ethtool-a- + - + name: c33-pse-ext-state + type: u32 + name-prefix: ethtool-a- + enum: c33-pse-ext-state + - + name: c33-pse-ext-substate + type: u32 + name-prefix: ethtool-a- + - + name: c33-pse-avail-pw-limit + type: u32 + name-prefix: ethtool-a- + - + name: c33-pse-pw-limit-ranges + name-prefix: ethtool-a- + type: nest + multi-attr: true + nested-attributes: c33-pse-pw-limit - name: rss attributes: @@ -975,6 +1059,32 @@ attribute-sets: - name: burst-tmr type: u32 + - + name: module-fw-flash + attributes: + - + name: header + type: nest + nested-attributes: header + - + name: file-name + type: string + - + name: password + type: u32 + - + name: status + type: u32 + enum: module-fw-flash-status + - + name: status-msg + type: string + - + name: done + type: uint + - + name: total + type: uint operations: enum-model: directional @@ -1325,6 +1435,8 @@ operations: - tx-aggr-max-bytes - tx-aggr-max-frames - tx-aggr-time-usecs + - rx-profile + - tx-profile dump: *coalesce-get-op - name: coalesce-set @@ -1611,6 +1723,12 @@ operations: - c33-pse-admin-state - c33-pse-admin-control - c33-pse-pw-d-status + - c33-pse-pw-class + - c33-pse-actual-pw + - c33-pse-ext-state + - c33-pse-ext-substate + - c33-pse-avail-pw-limit + - c33-pse-pw-limit-ranges dump: *pse-get-op - name: pse-set @@ -1624,6 +1742,7 @@ operations: - header - podl-pse-admin-control - c33-pse-admin-control + - c33-pse-avail-pw-limit - name: rss-get doc: Get RSS params. @@ -1733,3 +1852,28 @@ operations: name: mm-ntf doc: Notification for change in MAC Merge configuration. notify: mm-get + - + name: module-fw-flash-act + doc: Flash transceiver module firmware. + + attribute-set: module-fw-flash + + do: + request: + attributes: + - header + - file-name + - password + - + name: module-fw-flash-ntf + doc: Notification for firmware flashing progress and status. + + attribute-set: module-fw-flash + + event: + attributes: + - header + - status + - status-msg + - done + - total diff --git a/Documentation/netlink/specs/ovs_flow.yaml b/Documentation/netlink/specs/ovs_flow.yaml index 4fdfc6b5cae9..46f5d1cd8a5f 100644 --- a/Documentation/netlink/specs/ovs_flow.yaml +++ b/Documentation/netlink/specs/ovs_flow.yaml @@ -727,6 +727,12 @@ attribute-sets: name: dec-ttl type: nest nested-attributes: dec-ttl-attrs + - + name: psample + type: nest + nested-attributes: psample-attrs + doc: | + Sends a packet sample to psample for external observation. - name: tunnel-key-attrs enum-name: ovs-tunnel-key-attr @@ -938,6 +944,17 @@ attribute-sets: - name: gbp type: u32 + - + name: psample-attrs + enum-name: ovs-psample-attr + name-prefix: ovs-psample-attr- + attributes: + - + name: group + type: u32 + - + name: cookie + type: binary operations: name-prefix: ovs-flow-cmd- diff --git a/Documentation/netlink/specs/tc.yaml b/Documentation/netlink/specs/tc.yaml index 8c01e4e13195..b02d59a0349c 100644 --- a/Documentation/netlink/specs/tc.yaml +++ b/Documentation/netlink/specs/tc.yaml @@ -41,6 +41,16 @@ definitions: - in-hw - not-in-nw - verbose + - + name: tc-flower-key-ctrl-flags + type: flags + entries: + - frag + - firstfrag + - tuncsum + - tundf + - tunoam + - tuncrit - name: tc-stats type: struct @@ -2536,10 +2546,14 @@ attribute-sets: name: key-flags type: u32 byte-order: big-endian + enum: tc-flower-key-ctrl-flags + enum-as-flags: true - name: key-flags-mask type: u32 byte-order: big-endian + enum: tc-flower-key-ctrl-flags + enum-as-flags: true - name: key-icmpv4-code type: u8 @@ -2749,6 +2763,18 @@ attribute-sets: name: key-spi-mask type: u32 byte-order: big-endian + - + name: key-enc-flags + type: u32 + byte-order: big-endian + enum: tc-flower-key-ctrl-flags + enum-as-flags: true + - + name: key-enc-flags-mask + type: u32 + byte-order: big-endian + enum: tc-flower-key-ctrl-flags + enum-as-flags: true - name: tc-flower-key-enc-opts-attrs attributes: diff --git a/Documentation/netlink/specs/tcp_metrics.yaml b/Documentation/netlink/specs/tcp_metrics.yaml new file mode 100644 index 000000000000..1bd94f43e526 --- /dev/null +++ b/Documentation/netlink/specs/tcp_metrics.yaml @@ -0,0 +1,169 @@ +# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) + +name: tcp_metrics + +protocol: genetlink-legacy + +doc: | + Management interface for TCP metrics. + +c-family-name: tcp-metrics-genl-name +c-version-name: tcp-metrics-genl-version +max-by-define: true +kernel-policy: global + +definitions: + - + name: tcp-fastopen-cookie-max + type: const + value: 16 + +attribute-sets: + - + name: tcp-metrics + name-prefix: tcp-metrics-attr- + attributes: + - + name: addr-ipv4 + type: u32 + byte-order: big-endian + display-hint: ipv4 + - + name: addr-ipv6 + type: binary + checks: + min-len: 16 + byte-order: big-endian + display-hint: ipv6 + - + name: age + type: u64 + - + name: tw-tsval + type: u32 + doc: unused + - + name: tw-ts-stamp + type: s32 + doc: unused + - + name: vals + type: nest + nested-attributes: metrics + - + name: fopen-mss + type: u16 + - + name: fopen-syn-drops + type: u16 + - + name: fopen-syn-drop-ts + type: u64 + - + name: fopen-cookie + type: binary + checks: + min-len: tcp-fastopen-cookie-max + - + name: saddr-ipv4 + type: u32 + byte-order: big-endian + display-hint: ipv4 + - + name: saddr-ipv6 + type: binary + checks: + min-len: 16 + byte-order: big-endian + display-hint: ipv6 + - + name: pad + type: pad + + - + name: metrics + # Intentionally don't define the name-prefix, see below. + doc: | + Attributes with metrics. Note that the values here do not match + the TCP_METRIC_* defines in the kernel, because kernel defines + are off-by one (e.g. rtt is defined as enum 0, while netlink carries + attribute type 1). + attributes: + - + name: rtt + type: u32 + doc: | + Round Trip Time (RTT), in msecs with 3 bits fractional + (left-shift by 3 to get the msec value). + - + name: rttvar + type: u32 + doc: | + Round Trip Time VARiance (RTT), in msecs with 2 bits fractional + (left-shift by 2 to get the msec value). + - + name: ssthresh + type: u32 + doc: Slow Start THRESHold. + - + name: cwnd + type: u32 + doc: Congestion Window. + - + name: reodering + type: u32 + doc: Reodering metric. + - + name: rtt-us + type: u32 + doc: | + Round Trip Time (RTT), in usecs, with 3 bits fractional + (left-shift by 3 to get the msec value). + - + name: rttvar-us + type: u32 + doc: | + Round Trip Time (RTT), in usecs, with 2 bits fractional + (left-shift by 3 to get the msec value). + +operations: + list: + - + name: get + doc: Retrieve metrics. + attribute-set: tcp-metrics + + dont-validate: [ strict, dump ] + + do: + request: &sel_attrs + attributes: + - addr-ipv4 + - addr-ipv6 + - saddr-ipv4 + - saddr-ipv6 + reply: &all_attrs + attributes: + - addr-ipv4 + - addr-ipv6 + - saddr-ipv4 + - saddr-ipv6 + - age + - vals + - fopen-mss + - fopen-syn-drops + - fopen-syn-drop-ts + - fopen-cookie + dump: + reply: *all_attrs + + - + name: del + doc: Delete metrics. + attribute-set: tcp-metrics + + dont-validate: [ strict, dump ] + flags: [ admin-perm ] + + do: + request: *sel_attrs diff --git a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst index fed821ef9b09..3bd72577af9a 100644 --- a/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst +++ b/Documentation/networking/device_drivers/ethernet/mellanox/mlx5/counters.rst @@ -189,22 +189,19 @@ the software port. * - `rx[i]_gro_packets` - Number of received packets processed using hardware-accelerated GRO. The - number of hardware GRO offloaded packets received on ring i. + number of hardware GRO offloaded packets received on ring i. Only true GRO + packets are counted: only packets that are in an SKB with a GRO count > 1. - Acceleration * - `rx[i]_gro_bytes` - Number of received bytes processed using hardware-accelerated GRO. The - number of hardware GRO offloaded bytes received on ring i. + number of hardware GRO offloaded bytes received on ring i. Only true GRO + packets are counted: only packets that are in an SKB with a GRO count > 1. - Acceleration * - `rx[i]_gro_skbs` - - The number of receive SKBs constructed while performing - hardware-accelerated GRO. - - Informative - - * - `rx[i]_gro_match_packets` - - Number of received packets processed using hardware-accelerated GRO that - met the flow table match criteria. + - The number of GRO SKBs constructed from hardware-accelerated GRO. Only SKBs + with a GRO count > 1 are counted. - Informative * - `rx[i]_gro_large_hds` @@ -212,6 +209,15 @@ the software port. headers that require additional memory to be allocated. - Informative + * - `rx[i]_hds_nodata_packets` + - Number of header only packets in header/data split mode [#accel]_. + - Informative + + * - `rx[i]_hds_nodata_bytes` + - Number of bytes for header only packets in header/data split mode + [#accel]_. + - Informative + * - `rx[i]_lro_packets` - The number of LRO packets received on ring i [#accel]_. - Acceleration diff --git a/Documentation/networking/devlink/ice.rst b/Documentation/networking/devlink/ice.rst index 830c04354222..e3972d03cea0 100644 --- a/Documentation/networking/devlink/ice.rst +++ b/Documentation/networking/devlink/ice.rst @@ -11,6 +11,7 @@ Parameters ========== .. list-table:: Generic parameters implemented + :widths: 5 5 90 * - Name - Mode @@ -68,6 +69,30 @@ Parameters To verify that value has been set: $ devlink dev param show pci/0000:16:00.0 name tx_scheduling_layers +.. list-table:: Driver specific parameters implemented + :widths: 5 5 90 + + * - Name + - Mode + - Description + * - ``local_forwarding`` + - runtime + - Controls loopback behavior by tuning scheduler bandwidth. + It impacts all kinds of functions: physical, virtual and + subfunctions. + Supported values are: + + ``enabled`` - loopback traffic is allowed on port + + ``disabled`` - loopback traffic is not allowed on this port + + ``prioritized`` - loopback traffic is prioritized on this port + + Default value of ``local_forwarding`` parameter is ``enabled``. + ``prioritized`` provides ability to adjust loopback traffic rate to increase + one port capacity at cost of the another. User needs to disable + local forwarding on one of the ports in order have increased capacity + on the ``prioritized`` port. Info versions ============= diff --git a/Documentation/networking/devlink/octeontx2.rst b/Documentation/networking/devlink/octeontx2.rst index 610de99b728a..d33a90dd44bf 100644 --- a/Documentation/networking/devlink/octeontx2.rst +++ b/Documentation/networking/devlink/octeontx2.rst @@ -40,3 +40,19 @@ The ``octeontx2 AF`` driver implements the following driver-specific parameters. - runtime - Use to set the quantum which hardware uses for scheduling among transmit queues. Hardware uses weighted DWRR algorithm to schedule among all transmit queues. + +The ``octeontx2 PF`` driver implements the following driver-specific parameters. + +.. list-table:: Driver-specific parameters implemented + :widths: 5 5 5 85 + + * - Name + - Type + - Mode + - Description + * - ``unicast_filter_count`` + - u8 + - runtime + - Set the maximum number of unicast filters that can be programmed for + the device. This can be used to achieve better device resource + utilization, avoiding over consumption of unused MCAM table entries. diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst index 160bfb0ae8ba..3ab423b80e91 100644 --- a/Documentation/networking/ethtool-netlink.rst +++ b/Documentation/networking/ethtool-netlink.rst @@ -228,6 +228,7 @@ Userspace to kernel: ``ETHTOOL_MSG_PLCA_GET_STATUS`` get PLCA RS status ``ETHTOOL_MSG_MM_GET`` get MAC merge layer state ``ETHTOOL_MSG_MM_SET`` set MAC merge layer parameters + ``ETHTOOL_MSG_MODULE_FW_FLASH_ACT`` flash transceiver module firmware ===================================== ================================= Kernel to userspace: @@ -274,6 +275,7 @@ Kernel to userspace: ``ETHTOOL_MSG_PLCA_GET_STATUS_REPLY`` PLCA RS status ``ETHTOOL_MSG_PLCA_NTF`` PLCA RS parameters ``ETHTOOL_MSG_MM_GET_REPLY`` MAC merge layer status + ``ETHTOOL_MSG_MODULE_FW_FLASH_NTF`` transceiver module flash updates ======================================== ================================= ``GET`` requests are sent by userspace applications to retrieve device @@ -1033,6 +1035,8 @@ Kernel response contents: ``ETHTOOL_A_COALESCE_TX_AGGR_MAX_BYTES`` u32 max aggr size, Tx ``ETHTOOL_A_COALESCE_TX_AGGR_MAX_FRAMES`` u32 max aggr packets, Tx ``ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS`` u32 time (us), aggr, Tx + ``ETHTOOL_A_COALESCE_RX_PROFILE`` nested profile of DIM, Rx + ``ETHTOOL_A_COALESCE_TX_PROFILE`` nested profile of DIM, Tx =========================================== ====== ======================= Attributes are only included in reply if their value is not zero or the @@ -1062,6 +1066,10 @@ block should be sent. This feature is mainly of interest for specific USB devices which does not cope well with frequent small-sized URBs transmissions. +``ETHTOOL_A_COALESCE_RX_PROFILE`` and ``ETHTOOL_A_COALESCE_TX_PROFILE`` refer +to DIM parameters, see `Generic Network Dynamic Interrupt Moderation (Net DIM) +`_. + COALESCE_SET ============ @@ -1098,6 +1106,8 @@ Request contents: ``ETHTOOL_A_COALESCE_TX_AGGR_MAX_BYTES`` u32 max aggr size, Tx ``ETHTOOL_A_COALESCE_TX_AGGR_MAX_FRAMES`` u32 max aggr packets, Tx ``ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS`` u32 time (us), aggr, Tx + ``ETHTOOL_A_COALESCE_RX_PROFILE`` nested profile of DIM, Rx + ``ETHTOOL_A_COALESCE_TX_PROFILE`` nested profile of DIM, Tx =========================================== ====== ======================= Request is rejected if it attributes declared as unsupported by driver (i.e. @@ -1720,17 +1730,28 @@ Request contents: Kernel response contents: - ====================================== ====== ============================= - ``ETHTOOL_A_PSE_HEADER`` nested reply header - ``ETHTOOL_A_PODL_PSE_ADMIN_STATE`` u32 Operational state of the PoDL - PSE functions - ``ETHTOOL_A_PODL_PSE_PW_D_STATUS`` u32 power detection status of the - PoDL PSE. - ``ETHTOOL_A_C33_PSE_ADMIN_STATE`` u32 Operational state of the PoE - PSE functions. - ``ETHTOOL_A_C33_PSE_PW_D_STATUS`` u32 power detection status of the - PoE PSE. - ====================================== ====== ============================= + ========================================== ====== ============================= + ``ETHTOOL_A_PSE_HEADER`` nested reply header + ``ETHTOOL_A_PODL_PSE_ADMIN_STATE`` u32 Operational state of the PoDL + PSE functions + ``ETHTOOL_A_PODL_PSE_PW_D_STATUS`` u32 power detection status of the + PoDL PSE. + ``ETHTOOL_A_C33_PSE_ADMIN_STATE`` u32 Operational state of the PoE + PSE functions. + ``ETHTOOL_A_C33_PSE_PW_D_STATUS`` u32 power detection status of the + PoE PSE. + ``ETHTOOL_A_C33_PSE_PW_CLASS`` u32 power class of the PoE PSE. + ``ETHTOOL_A_C33_PSE_ACTUAL_PW`` u32 actual power drawn on the + PoE PSE. + ``ETHTOOL_A_C33_PSE_EXT_STATE`` u32 power extended state of the + PoE PSE. + ``ETHTOOL_A_C33_PSE_EXT_SUBSTATE`` u32 power extended substatus of + the PoE PSE. + ``ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT`` u32 currently configured power + limit of the PoE PSE. + ``ETHTOOL_A_C33_PSE_PW_LIMIT_RANGES`` nested Supported power limit + configuration ranges. + ========================================== ====== ============================= When set, the optional ``ETHTOOL_A_PODL_PSE_ADMIN_STATE`` attribute identifies the operational state of the PoDL PSE functions. The operational state of the @@ -1762,6 +1783,46 @@ The same goes for ``ETHTOOL_A_C33_PSE_ADMIN_PW_D_STATUS`` implementing .. kernel-doc:: include/uapi/linux/ethtool.h :identifiers: ethtool_c33_pse_pw_d_status +When set, the optional ``ETHTOOL_A_C33_PSE_PW_CLASS`` attribute identifies +the power class of the C33 PSE. It depends on the class negotiated between +the PSE and the PD. This option is corresponding to ``IEEE 802.3-2022`` +30.9.1.1.8 aPSEPowerClassification. + +When set, the optional ``ETHTOOL_A_C33_PSE_ACTUAL_PW`` attribute identifies +This option is corresponding to ``IEEE 802.3-2022`` 30.9.1.1.23 aPSEActualPower. +Actual power is reported in mW. + +When set, the optional ``ETHTOOL_A_C33_PSE_EXT_STATE`` attribute identifies +the extended error state of the C33 PSE. Possible values are: + +.. kernel-doc:: include/uapi/linux/ethtool.h + :identifiers: ethtool_c33_pse_ext_state + +When set, the optional ``ETHTOOL_A_C33_PSE_EXT_SUBSTATE`` attribute identifies +the extended error state of the C33 PSE. Possible values are: +Possible values are: + +.. kernel-doc:: include/uapi/linux/ethtool.h + :identifiers: ethtool_c33_pse_ext_substate_class_num_events + ethtool_c33_pse_ext_substate_error_condition + ethtool_c33_pse_ext_substate_mr_pse_enable + ethtool_c33_pse_ext_substate_option_detect_ted + ethtool_c33_pse_ext_substate_option_vport_lim + ethtool_c33_pse_ext_substate_ovld_detected + ethtool_c33_pse_ext_substate_pd_dll_power_type + ethtool_c33_pse_ext_substate_power_not_available + ethtool_c33_pse_ext_substate_short_detected + +When set, the optional ``ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT`` attribute +identifies the C33 PSE power limit in mW. + +When set the optional ``ETHTOOL_A_C33_PSE_PW_LIMIT_RANGES`` nested attribute +identifies the C33 PSE power limit ranges through +``ETHTOOL_A_C33_PSE_PWR_VAL_LIMIT_RANGE_MIN`` and +``ETHTOOL_A_C33_PSE_PWR_VAL_LIMIT_RANGE_MAX``. +If the controller works with fixed classes, the min and max values will be +equal. + PSE_SET ======= @@ -1773,6 +1834,8 @@ Request contents: ``ETHTOOL_A_PSE_HEADER`` nested request header ``ETHTOOL_A_PODL_PSE_ADMIN_CONTROL`` u32 Control PoDL PSE Admin state ``ETHTOOL_A_C33_PSE_ADMIN_CONTROL`` u32 Control PSE Admin state + ``ETHTOOL_A_C33_PSE_AVAIL_PWR_LIMIT`` u32 Control PoE PSE available + power limit ====================================== ====== ============================= When set, the optional ``ETHTOOL_A_PODL_PSE_ADMIN_CONTROL`` attribute is used @@ -1783,6 +1846,18 @@ to control PoDL PSE Admin functions. This option is implementing The same goes for ``ETHTOOL_A_C33_PSE_ADMIN_CONTROL`` implementing ``IEEE 802.3-2022`` 30.9.1.2.1 acPSEAdminControl. +When set, the optional ``ETHTOOL_A_C33_PSE_AVAIL_PWR_LIMIT`` attribute is +used to control the available power value limit for C33 PSE in milliwatts. +This attribute corresponds to the `pse_available_power` variable described in +``IEEE 802.3-2022`` 33.2.4.4 Variables and `pse_avail_pwr` in 145.2.5.4 +Variables, which are described in power classes. + +It was decided to use milliwatts for this interface to unify it with other +power monitoring interfaces, which also use milliwatts, and to align with +various existing products that document power consumption in watts rather than +classes. If power limit configuration based on classes is needed, the +conversion can be done in user space, for example by ethtool. + RSS_GET ======= @@ -2033,6 +2108,73 @@ The attributes are propagated to the driver through the following structure: .. kernel-doc:: include/linux/ethtool.h :identifiers: ethtool_mm_cfg +MODULE_FW_FLASH_ACT +=================== + +Flashes transceiver module firmware. + +Request contents: + + ======================================= ====== =========================== + ``ETHTOOL_A_MODULE_FW_FLASH_HEADER`` nested request header + ``ETHTOOL_A_MODULE_FW_FLASH_FILE_NAME`` string firmware image file name + ``ETHTOOL_A_MODULE_FW_FLASH_PASSWORD`` u32 transceiver module password + ======================================= ====== =========================== + +The firmware update process consists of three logical steps: + +1. Downloading a firmware image to the transceiver module and validating it. +2. Running the firmware image. +3. Committing the firmware image so that it is run upon reset. + +When flash command is given, those three steps are taken in that order. + +This message merely schedules the update process and returns immediately +without blocking. The process then runs asynchronously. +Since it can take several minutes to complete, during the update process +notifications are emitted from the kernel to user space updating it about +the status and progress. + +The ``ETHTOOL_A_MODULE_FW_FLASH_FILE_NAME`` attribute encodes the firmware +image file name. The firmware image is downloaded to the transceiver module, +validated, run and committed. + +The optional ``ETHTOOL_A_MODULE_FW_FLASH_PASSWORD`` attribute encodes a password +that might be required as part of the transceiver module firmware update +process. + +The firmware update process can take several minutes to complete. Therefore, +during the update process notifications are emitted from the kernel to user +space updating it about the status and progress. + + + +Notification contents: + + +---------------------------------------------------+--------+----------------+ + | ``ETHTOOL_A_MODULE_FW_FLASH_HEADER`` | nested | reply header | + +---------------------------------------------------+--------+----------------+ + | ``ETHTOOL_A_MODULE_FW_FLASH_STATUS`` | u32 | status | + +---------------------------------------------------+--------+----------------+ + | ``ETHTOOL_A_MODULE_FW_FLASH_STATUS_MSG`` | string | status message | + +---------------------------------------------------+--------+----------------+ + | ``ETHTOOL_A_MODULE_FW_FLASH_DONE`` | uint | progress | + +---------------------------------------------------+--------+----------------+ + | ``ETHTOOL_A_MODULE_FW_FLASH_TOTAL`` | uint | total | + +---------------------------------------------------+--------+----------------+ + +The ``ETHTOOL_A_MODULE_FW_FLASH_STATUS`` attribute encodes the current status +of the firmware update process. Possible values are: + +.. kernel-doc:: include/uapi/linux/ethtool.h + :identifiers: ethtool_module_fw_flash_status + +The ``ETHTOOL_A_MODULE_FW_FLASH_STATUS_MSG`` attribute encodes a status message +string. + +The ``ETHTOOL_A_MODULE_FW_FLASH_DONE`` and ``ETHTOOL_A_MODULE_FW_FLASH_TOTAL`` +attributes encode the completed and total amount of work, respectively. + Request translation =================== @@ -2139,4 +2281,5 @@ are netlink only. n/a ``ETHTOOL_MSG_PLCA_GET_STATUS`` n/a ``ETHTOOL_MSG_MM_GET`` n/a ``ETHTOOL_MSG_MM_SET`` + n/a ``ETHTOOL_MSG_MODULE_FW_FLASH_ACT`` =================================== ===================================== diff --git a/Documentation/networking/index.rst b/Documentation/networking/index.rst index 7664c0bfe461..d1af04b952f8 100644 --- a/Documentation/networking/index.rst +++ b/Documentation/networking/index.rst @@ -19,6 +19,7 @@ Contents: caif/index ethtool-netlink ieee802154 + iso15765-2 j1939 kapi msg_zerocopy @@ -72,6 +73,7 @@ Contents: mac80211-injection mctp mpls-sysctl + mptcp mptcp-sysctl multiqueue multi-pf-netdev @@ -104,6 +106,7 @@ Contents: seg6-sysctl skbuff smc-sysctl + sriov statistics strparser switchdev diff --git a/Documentation/networking/ip-sysctl.rst b/Documentation/networking/ip-sysctl.rst index bd50df6a5a42..3616389c8c2d 100644 --- a/Documentation/networking/ip-sysctl.rst +++ b/Documentation/networking/ip-sysctl.rst @@ -131,6 +131,20 @@ fib_multipath_hash_fields - UNSIGNED INTEGER Default: 0x0007 (source IP, destination IP and IP protocol) +fib_multipath_hash_seed - UNSIGNED INTEGER + The seed value used when calculating hash for multipath routes. Applies + to both IPv4 and IPv6 datapath. Only present for kernels built with + CONFIG_IP_ROUTE_MULTIPATH enabled. + + When set to 0, the seed value used for multipath routing defaults to an + internal random-generated one. + + The actual hashing algorithm is not specified -- there is no guarantee + that a next hop distribution effected by a given seed will keep stable + across kernel versions. + + Default: 0 (random) + fib_sync_mem - UNSIGNED INTEGER Amount of dirty memory from fib entries that can be backlogged before synchronize_rcu is forced. @@ -1196,6 +1210,19 @@ tcp_pingpong_thresh - INTEGER Default: 1 +tcp_rto_min_us - INTEGER + Minimal TCP retransmission timeout (in microseconds). Note that the + rto_min route option has the highest precedence for configuring this + setting, followed by the TCP_BPF_RTO_MIN socket option, followed by + this tcp_rto_min_us sysctl. + + The recommended practice is to use a value less or equal to 200000 + microseconds. + + Possible Values: 1 - INT_MAX + + Default: 200000 + UDP variables ============= diff --git a/Documentation/networking/iso15765-2.rst b/Documentation/networking/iso15765-2.rst new file mode 100644 index 000000000000..0e9d96074178 --- /dev/null +++ b/Documentation/networking/iso15765-2.rst @@ -0,0 +1,386 @@ +.. SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) + +==================== +ISO 15765-2 (ISO-TP) +==================== + +Overview +======== + +ISO 15765-2, also known as ISO-TP, is a transport protocol specifically defined +for diagnostic communication on CAN. It is widely used in the automotive +industry, for example as the transport protocol for UDSonCAN (ISO 14229-3) or +emission-related diagnostic services (ISO 15031-5). + +ISO-TP can be used both on CAN CC (aka Classical CAN) and CAN FD (CAN with +Flexible Datarate) based networks. It is also designed to be compatible with a +CAN network using SAE J1939 as data link layer (however, this is not a +requirement). + +Specifications used +------------------- + +* ISO 15765-2:2024 : Road vehicles - Diagnostic communication over Controller + Area Network (DoCAN). Part 2: Transport protocol and network layer services. + +Addressing +---------- + +In its simplest form, ISO-TP is based on two kinds of addressing modes for the +nodes connected to the same network: + +* physical addressing is implemented by two node-specific addresses and is used + in 1-to-1 communication. + +* functional addressing is implemented by one node-specific address and is used + in 1-to-N communication. + +Three different addressing formats can be employed: + +* "normal" : each address is represented simply by a CAN ID. + +* "extended": each address is represented by a CAN ID plus the first byte of + the CAN payload; both the CAN ID and the byte inside the payload shall be + different between two addresses. + +* "mixed": each address is represented by a CAN ID plus the first byte of + the CAN payload; the CAN ID is different between two addresses, but the + additional byte is the same. + +Transport protocol and associated frame types +--------------------------------------------- + +When transmitting data using the ISO-TP protocol, the payload can either fit +inside one single CAN message or not, also considering the overhead the protocol +is generating and the optional extended addressing. In the first case, the data +is transmitted at once using a so-called Single Frame (SF). In the second case, +ISO-TP defines a multi-frame protocol, in which the sender provides (through a +First Frame - FF) the PDU length which is to be transmitted and also asks for a +Flow Control (FC) frame, which provides the maximum supported size of a macro +data block (``blocksize``) and the minimum time between the single CAN messages +composing such block (``stmin``). Once this information has been received, the +sender starts to send frames containing fragments of the data payload (called +Consecutive Frames - CF), stopping after every ``blocksize``-sized block to wait +confirmation from the receiver which should then send another Flow Control +frame to inform the sender about its availability to receive more data. + +How to Use ISO-TP +================= + +As with others CAN protocols, the ISO-TP stack support is built into the +Linux network subsystem for the CAN bus, aka. Linux-CAN or SocketCAN, and +thus follows the same socket API. + +Creation and basic usage of an ISO-TP socket +-------------------------------------------- + +To use the ISO-TP stack, ``#include `` shall be used. A +socket can then be created using the ``PF_CAN`` protocol family, the +``SOCK_DGRAM`` type (as the underlying protocol is datagram-based by design) +and the ``CAN_ISOTP`` protocol: + +.. code-block:: C + + s = socket(PF_CAN, SOCK_DGRAM, CAN_ISOTP); + +After the socket has been successfully created, ``bind(2)`` shall be called to +bind the socket to the desired CAN interface; to do so: + +* a TX CAN ID shall be specified as part of the sockaddr supplied to the call + itself. + +* a RX CAN ID shall also be specified, unless broadcast flags have been set + through socket option (explained below). + +Once bound to an interface, the socket can be read from and written to using +the usual ``read(2)`` and ``write(2)`` system calls, as well as ``send(2)``, +``sendmsg(2)``, ``recv(2)`` and ``recvmsg(2)``. +Unlike the CAN_RAW socket API, only the ISO-TP data field (the actual payload) +is sent and received by the userspace application using these calls. The address +information and the protocol information are automatically filled by the ISO-TP +stack using the configuration supplied during socket creation. In the same way, +the stack will use the transport mechanism when required (i.e., when the size +of the data payload exceeds the MTU of the underlying CAN bus). + +The sockaddr structure used for SocketCAN has extensions for use with ISO-TP, +as specified below: + +.. code-block:: C + + struct sockaddr_can { + sa_family_t can_family; + int can_ifindex; + union { + struct { canid_t rx_id, tx_id; } tp; + ... + } can_addr; + } + +* ``can_family`` and ``can_ifindex`` serve the same purpose as for other + SocketCAN sockets. + +* ``can_addr.tp.rx_id`` specifies the receive (RX) CAN ID and will be used as + a RX filter. + +* ``can_addr.tp.tx_id`` specifies the transmit (TX) CAN ID + +ISO-TP socket options +--------------------- + +When creating an ISO-TP socket, reasonable defaults are set. Some options can +be modified with ``setsockopt(2)`` and/or read back with ``getsockopt(2)``. + +General options +~~~~~~~~~~~~~~~ + +General socket options can be passed using the ``CAN_ISOTP_OPTS`` optname: + +.. code-block:: C + + struct can_isotp_options opts; + ret = setsockopt(s, SOL_CAN_ISOTP, CAN_ISOTP_OPTS, &opts, sizeof(opts)) + +where the ``can_isotp_options`` structure has the following contents: + +.. code-block:: C + + struct can_isotp_options { + u32 flags; + u32 frame_txtime; + u8 ext_address; + u8 txpad_content; + u8 rxpad_content; + u8 rx_ext_address; + }; + +* ``flags``: modifiers to be applied to the default behaviour of the ISO-TP + stack. Following flags are available: + + * ``CAN_ISOTP_LISTEN_MODE``: listen only (do not send FC frames); normally + used as a testing feature. + + * ``CAN_ISOTP_EXTEND_ADDR``: use the byte specified in ``ext_address`` as an + additional address component. This enables the "mixed" addressing format if + used alone, or the "extended" addressing format if used in conjunction with + ``CAN_ISOTP_RX_EXT_ADDR``. + + * ``CAN_ISOTP_TX_PADDING``: enable padding for transmitted frames, using + ``txpad_content`` as value for the padding bytes. + + * ``CAN_ISOTP_RX_PADDING``: enable padding for the received frames, using + ``rxpad_content`` as value for the padding bytes. + + * ``CAN_ISOTP_CHK_PAD_LEN``: check for correct padding length on the received + frames. + + * ``CAN_ISOTP_CHK_PAD_DATA``: check padding bytes on the received frames + against ``rxpad_content``; if ``CAN_ISOTP_RX_PADDING`` is not specified, + this flag is ignored. + + * ``CAN_ISOTP_HALF_DUPLEX``: force ISO-TP socket in half duplex mode + (that is, transport mechanism can only be incoming or outgoing at the same + time, not both). + + * ``CAN_ISOTP_FORCE_TXSTMIN``: ignore stmin from received FC; normally + used as a testing feature. + + * ``CAN_ISOTP_FORCE_RXSTMIN``: ignore CFs depending on rx stmin; normally + used as a testing feature. + + * ``CAN_ISOTP_RX_EXT_ADDR``: use ``rx_ext_address`` instead of ``ext_address`` + as extended addressing byte on the reception path. If used in conjunction + with ``CAN_ISOTP_EXTEND_ADDR``, this flag effectively enables the "extended" + addressing format. + + * ``CAN_ISOTP_WAIT_TX_DONE``: wait until the frame is sent before returning + from ``write(2)`` and ``send(2)`` calls (i.e., blocking write operations). + + * ``CAN_ISOTP_SF_BROADCAST``: use 1-to-N functional addressing (cannot be + specified alongside ``CAN_ISOTP_CF_BROADCAST``). + + * ``CAN_ISOTP_CF_BROADCAST``: use 1-to-N transmission without flow control + (cannot be specified alongside ``CAN_ISOTP_SF_BROADCAST``). + NOTE: this is not covered by the ISO 15765-2 standard. + + * ``CAN_ISOTP_DYN_FC_PARMS``: enable dynamic update of flow control + parameters. + +* ``frame_txtime``: frame transmission time (defined as N_As/N_Ar inside the + ISO standard); if ``0``, the default (or the last set value) is used. + To set the transmission time to ``0``, the ``CAN_ISOTP_FRAME_TXTIME_ZERO`` + macro (equal to 0xFFFFFFFF) shall be used. + +* ``ext_address``: extended addressing byte, used if the + ``CAN_ISOTP_EXTEND_ADDR`` flag is specified. + +* ``txpad_content``: byte used as padding value for transmitted frames. + +* ``rxpad_content``: byte used as padding value for received frames. + +* ``rx_ext_address``: extended addressing byte for the reception path, used if + the ``CAN_ISOTP_RX_EXT_ADDR`` flag is specified. + +Flow Control options +~~~~~~~~~~~~~~~~~~~~ + +Flow Control (FC) options can be passed using the ``CAN_ISOTP_RECV_FC`` optname +to provide the communication parameters for receiving ISO-TP PDUs. + +.. code-block:: C + + struct can_isotp_fc_options fc_opts; + ret = setsockopt(s, SOL_CAN_ISOTP, CAN_ISOTP_RECV_FC, &fc_opts, sizeof(fc_opts)); + +where the ``can_isotp_fc_options`` structure has the following contents: + +.. code-block:: C + + struct can_isotp_options { + u8 bs; + u8 stmin; + u8 wftmax; + }; + +* ``bs``: blocksize provided in flow control frames. + +* ``stmin``: minimum separation time provided in flow control frames; can + have the following values (others are reserved): + + * 0x00 - 0x7F : 0 - 127 ms + + * 0xF1 - 0xF9 : 100 us - 900 us + +* ``wftmax``: maximum number of wait frames provided in flow control frames. + +Link Layer options +~~~~~~~~~~~~~~~~~~ + +Link Layer (LL) options can be passed using the ``CAN_ISOTP_LL_OPTS`` optname: + +.. code-block:: C + + struct can_isotp_ll_options ll_opts; + ret = setsockopt(s, SOL_CAN_ISOTP, CAN_ISOTP_LL_OPTS, &ll_opts, sizeof(ll_opts)); + +where the ``can_isotp_ll_options`` structure has the following contents: + +.. code-block:: C + + struct can_isotp_ll_options { + u8 mtu; + u8 tx_dl; + u8 tx_flags; + }; + +* ``mtu``: generated and accepted CAN frame type, can be equal to ``CAN_MTU`` + for classical CAN frames or ``CANFD_MTU`` for CAN FD frames. + +* ``tx_dl``: maximum payload length for transmitted frames, can have one value + among: 8, 12, 16, 20, 24, 32, 48, 64. Values above 8 only apply to CAN FD + traffic (i.e.: ``mtu = CANFD_MTU``). + +* ``tx_flags``: flags set into ``struct canfd_frame.flags`` at frame creation. + Only applies to CAN FD traffic (i.e.: ``mtu = CANFD_MTU``). + +Transmission stmin +~~~~~~~~~~~~~~~~~~ + +The transmission minimum separation time (stmin) can be forced using the +``CAN_ISOTP_TX_STMIN`` optname and providing an stmin value in microseconds as +a 32bit unsigned integer; this will overwrite the value sent by the receiver in +flow control frames: + +.. code-block:: C + + uint32_t stmin; + ret = setsockopt(s, SOL_CAN_ISOTP, CAN_ISOTP_TX_STMIN, &stmin, sizeof(stmin)); + +Reception stmin +~~~~~~~~~~~~~~~ + +The reception minimum separation time (stmin) can be forced using the +``CAN_ISOTP_RX_STMIN`` optname and providing an stmin value in microseconds as +a 32bit unsigned integer; received Consecutive Frames (CF) which timestamps +differ less than this value will be ignored: + +.. code-block:: C + + uint32_t stmin; + ret = setsockopt(s, SOL_CAN_ISOTP, CAN_ISOTP_RX_STMIN, &stmin, sizeof(stmin)); + +Multi-frame transport support +----------------------------- + +The ISO-TP stack contained inside the Linux kernel supports the multi-frame +transport mechanism defined by the standard, with the following constraints: + +* the maximum size of a PDU is defined by a module parameter, with an hard + limit imposed at build time. + +* when a transmission is in progress, subsequent calls to ``write(2)`` will + block, while calls to ``send(2)`` will either block or fail depending on the + presence of the ``MSG_DONTWAIT`` flag. + +* no support is present for sending "wait frames": whether a PDU can be fully + received or not is decided when the First Frame is received. + +Errors +------ + +Following errors are reported to userspace: + +RX path errors +~~~~~~~~~~~~~~ + +============ =============================================================== +-ETIMEDOUT timeout of data reception +-EILSEQ sequence number mismatch during a multi-frame reception +-EBADMSG data reception with wrong padding +============ =============================================================== + +TX path errors +~~~~~~~~~~~~~~ + +========== ================================================================= +-ECOMM flow control reception timeout +-EMSGSIZE flow control reception overflow +-EBADMSG flow control reception with wrong layout/padding +========== ================================================================= + +Examples +======== + +Basic node example +------------------ + +Following example implements a node using "normal" physical addressing, with +RX ID equal to 0x18DAF142 and a TX ID equal to 0x18DA42F1. All options are left +to their default. + +.. code-block:: C + + int s; + struct sockaddr_can addr; + int ret; + + s = socket(PF_CAN, SOCK_DGRAM, CAN_ISOTP); + if (s < 0) + exit(1); + + addr.can_family = AF_CAN; + addr.can_ifindex = if_nametoindex("can0"); + addr.tp.tx_id = 0x18DA42F1 | CAN_EFF_FLAG; + addr.tp.rx_id = 0x18DAF142 | CAN_EFF_FLAG; + + ret = bind(s, (struct sockaddr *)&addr, sizeof(addr)); + if (ret < 0) + exit(1); + + /* Data can now be received using read(s, ...) and sent using write(s, ...) */ + +Additional examples +------------------- + +More complete (and complex) examples can be found inside the ``isotp*`` userland +tools, distributed as part of the ``can-utils`` utilities at: +https://github.com/linux-can/can-utils diff --git a/Documentation/networking/mptcp-sysctl.rst b/Documentation/networking/mptcp-sysctl.rst index 69975ce25a02..fd514bba8c43 100644 --- a/Documentation/networking/mptcp-sysctl.rst +++ b/Documentation/networking/mptcp-sysctl.rst @@ -7,14 +7,6 @@ MPTCP Sysfs variables /proc/sys/net/mptcp/* Variables =============================== -enabled - BOOLEAN - Control whether MPTCP sockets can be created. - - MPTCP sockets can be created if the value is 1. This is a - per-namespace sysctl. - - Default: 1 (enabled) - add_addr_timeout - INTEGER (seconds) Set the timeout after which an ADD_ADDR control message will be resent to an MPTCP peer that has not acknowledged a previous @@ -25,25 +17,6 @@ add_addr_timeout - INTEGER (seconds) Default: 120 -close_timeout - INTEGER (seconds) - Set the make-after-break timeout: in absence of any close or - shutdown syscall, MPTCP sockets will maintain the status - unchanged for such time, after the last subflow removal, before - moving to TCP_CLOSE. - - The default value matches TCP_TIMEWAIT_LEN. This is a per-namespace - sysctl. - - Default: 60 - -checksum_enabled - BOOLEAN - Control whether DSS checksum can be enabled. - - DSS checksum can be enabled if the value is nonzero. This is a - per-namespace sysctl. - - Default: 0 - allow_join_initial_addr_port - BOOLEAN Allow peers to send join requests to the IP address and port number used by the initial subflow if the value is 1. This controls a flag that is @@ -57,6 +30,37 @@ allow_join_initial_addr_port - BOOLEAN Default: 1 +available_schedulers - STRING + Shows the available schedulers choices that are registered. More packet + schedulers may be available, but not loaded. + +checksum_enabled - BOOLEAN + Control whether DSS checksum can be enabled. + + DSS checksum can be enabled if the value is nonzero. This is a + per-namespace sysctl. + + Default: 0 + +close_timeout - INTEGER (seconds) + Set the make-after-break timeout: in absence of any close or + shutdown syscall, MPTCP sockets will maintain the status + unchanged for such time, after the last subflow removal, before + moving to TCP_CLOSE. + + The default value matches TCP_TIMEWAIT_LEN. This is a per-namespace + sysctl. + + Default: 60 + +enabled - BOOLEAN + Control whether MPTCP sockets can be created. + + MPTCP sockets can be created if the value is 1. This is a + per-namespace sysctl. + + Default: 1 (enabled) + pm_type - INTEGER Set the default path manager type to use for each new MPTCP socket. In-kernel path management will control subflow @@ -74,6 +78,14 @@ pm_type - INTEGER Default: 0 +scheduler - STRING + Select the scheduler of your choice. + + Support for selection of different schedulers. This is a per-namespace + sysctl. + + Default: "default" + stale_loss_cnt - INTEGER The number of MPTCP-level retransmission intervals with no traffic and pending outstanding data on a given subflow required to declare it stale. @@ -85,11 +97,3 @@ stale_loss_cnt - INTEGER This is a per-namespace sysctl. Default: 4 - -scheduler - STRING - Select the scheduler of your choice. - - Support for selection of different schedulers. This is a per-namespace - sysctl. - - Default: "default" diff --git a/Documentation/networking/mptcp.rst b/Documentation/networking/mptcp.rst new file mode 100644 index 000000000000..17f2bab61164 --- /dev/null +++ b/Documentation/networking/mptcp.rst @@ -0,0 +1,156 @@ +.. SPDX-License-Identifier: GPL-2.0 + +===================== +Multipath TCP (MPTCP) +===================== + +Introduction +============ + +Multipath TCP or MPTCP is an extension to the standard TCP and is described in +`RFC 8684 (MPTCPv1) `_. It allows a +device to make use of multiple interfaces at once to send and receive TCP +packets over a single MPTCP connection. MPTCP can aggregate the bandwidth of +multiple interfaces or prefer the one with the lowest latency. It also allows a +fail-over if one path is down, and the traffic is seamlessly reinjected on other +paths. + +For more details about Multipath TCP in the Linux kernel, please see the +official website: `mptcp.dev `_. + + +Use cases +========= + +Thanks to MPTCP, being able to use multiple paths in parallel or simultaneously +brings new use-cases, compared to TCP: + +- Seamless handovers: switching from one path to another while preserving + established connections, e.g. to be used in mobility use-cases, like on + smartphones. +- Best network selection: using the "best" available path depending on some + conditions, e.g. latency, losses, cost, bandwidth, etc. +- Network aggregation: using multiple paths at the same time to have a higher + throughput, e.g. to combine fixed and mobile networks to send files faster. + + +Concepts +======== + +Technically, when a new socket is created with the ``IPPROTO_MPTCP`` protocol +(Linux-specific), a *subflow* (or *path*) is created. This *subflow* consists of +a regular TCP connection that is used to transmit data through one interface. +Additional *subflows* can be negotiated later between the hosts. For the remote +host to be able to detect the use of MPTCP, a new field is added to the TCP +*option* field of the underlying TCP *subflow*. This field contains, amongst +other things, a ``MP_CAPABLE`` option that tells the other host to use MPTCP if +it is supported. If the remote host or any middlebox in between does not support +it, the returned ``SYN+ACK`` packet will not contain MPTCP options in the TCP +*option* field. In that case, the connection will be "downgraded" to plain TCP, +and it will continue with a single path. + +This behavior is made possible by two internal components: the path manager, and +the packet scheduler. + +Path Manager +------------ + +The Path Manager is in charge of *subflows*, from creation to deletion, and also +address announcements. Typically, it is the client side that initiates subflows, +and the server side that announces additional addresses via the ``ADD_ADDR`` and +``REMOVE_ADDR`` options. + +Path managers are controlled by the ``net.mptcp.pm_type`` sysctl knob -- see +mptcp-sysctl.rst. There are two types: the in-kernel one (type ``0``) where the +same rules are applied for all the connections (see: ``ip mptcp``) ; and the +userspace one (type ``1``), controlled by a userspace daemon (i.e. `mptcpd +`_) where different rules can be applied for each +connection. The path managers can be controlled via a Netlink API; see +netlink_spec/mptcp_pm.rst. + +To be able to use multiple IP addresses on a host to create multiple *subflows* +(paths), the default in-kernel MPTCP path-manager needs to know which IP +addresses can be used. This can be configured with ``ip mptcp endpoint`` for +example. + +Packet Scheduler +---------------- + +The Packet Scheduler is in charge of selecting which available *subflow(s)* to +use to send the next data packet. It can decide to maximize the use of the +available bandwidth, only to pick the path with the lower latency, or any other +policy depending on the configuration. + +Packet schedulers are controlled by the ``net.mptcp.scheduler`` sysctl knob -- +see mptcp-sysctl.rst. + + +Sockets API +=========== + +Creating MPTCP sockets +---------------------- + +On Linux, MPTCP can be used by selecting MPTCP instead of TCP when creating the +``socket``: + +.. code-block:: C + + int sd = socket(AF_INET(6), SOCK_STREAM, IPPROTO_MPTCP); + +Note that ``IPPROTO_MPTCP`` is defined as ``262``. + +If MPTCP is not supported, ``errno`` will be set to: + +- ``EINVAL``: (*Invalid argument*): MPTCP is not available, on kernels < 5.6. +- ``EPROTONOSUPPORT`` (*Protocol not supported*): MPTCP has not been compiled, + on kernels >= v5.6. +- ``ENOPROTOOPT`` (*Protocol not available*): MPTCP has been disabled using + ``net.mptcp.enabled`` sysctl knob; see mptcp-sysctl.rst. + +MPTCP is then opt-in: applications need to explicitly request it. Note that +applications can be forced to use MPTCP with different techniques, e.g. +``LD_PRELOAD`` (see ``mptcpize``), eBPF (see ``mptcpify``), SystemTAP, +``GODEBUG`` (``GODEBUG=multipathtcp=1``), etc. + +Switching to ``IPPROTO_MPTCP`` instead of ``IPPROTO_TCP`` should be as +transparent as possible for the userspace applications. + +Socket options +-------------- + +MPTCP supports most socket options handled by TCP. It is possible some less +common options are not supported, but contributions are welcome. + +Generally, the same value is propagated to all subflows, including the ones +created after the calls to ``setsockopt()``. eBPF can be used to set different +values per subflow. + +There are some MPTCP specific socket options at the ``SOL_MPTCP`` (284) level to +retrieve info. They fill the ``optval`` buffer of the ``getsockopt()`` system +call: + +- ``MPTCP_INFO``: Uses ``struct mptcp_info``. +- ``MPTCP_TCPINFO``: Uses ``struct mptcp_subflow_data``, followed by an array of + ``struct tcp_info``. +- ``MPTCP_SUBFLOW_ADDRS``: Uses ``struct mptcp_subflow_data``, followed by an + array of ``mptcp_subflow_addrs``. +- ``MPTCP_FULL_INFO``: Uses ``struct mptcp_full_info``, with one pointer to an + array of ``struct mptcp_subflow_info`` (including the + ``struct mptcp_subflow_addrs``), and one pointer to an array of + ``struct tcp_info``, followed by the content of ``struct mptcp_info``. + +Note that at the TCP level, ``TCP_IS_MPTCP`` socket option can be used to know +if MPTCP is currently being used: the value will be set to 1 if it is. + + +Design choices +============== + +A new socket type has been added for MPTCP for the userspace-facing socket. The +kernel is in charge of creating subflow sockets: they are TCP sockets where the +behavior is modified using TCP-ULP. + +MPTCP listen sockets will create "plain" *accepted* TCP sockets if the +connection request from the client didn't ask for MPTCP, making the performance +impact minimal when MPTCP is enabled by default. diff --git a/Documentation/networking/net_dim.rst b/Documentation/networking/net_dim.rst index 3bed9fd95336..8908fd7b0a8d 100644 --- a/Documentation/networking/net_dim.rst +++ b/Documentation/networking/net_dim.rst @@ -169,6 +169,48 @@ usage is not complete but it should make the outline of the usage clear. ... } + +Tuning DIM +========== + +Net DIM serves a range of network devices and delivers excellent acceleration +benefits. Yet, it has been observed that some preset configurations of DIM may +not align seamlessly with the varying specifications of network devices, and +this discrepancy has been identified as a factor to the suboptimal performance +outcomes of DIM-enabled network devices, related to a mismatch in profiles. + +To address this issue, Net DIM introduces a per-device control to modify and +access a device's ``rx-profile`` and ``tx-profile`` parameters: +Assume that the target network device is named ethx, and ethx only declares +support for RX profile setting and supports modification of ``usec`` field +and ``pkts`` field (See the data structure: +:c:type:`struct dim_cq_moder `). + +You can use ethtool to modify the current RX DIM profile where all +values are 64:: + + $ ethtool -C ethx rx-profile 1,1,n_2,2,n_3,n,n_n,4,n_n,n,n + +``n`` means do not modify this field, and ``_`` separates structure +elements of the profile array. + +Querying the current profiles using:: + + $ ethtool -c ethx + ... + rx-profile: + {.usec = 1, .pkts = 1, .comps = n/a,}, + {.usec = 2, .pkts = 2, .comps = n/a,}, + {.usec = 3, .pkts = 64, .comps = n/a,}, + {.usec = 64, .pkts = 4, .comps = n/a,}, + {.usec = 64, .pkts = 64, .comps = n/a,} + tx-profile: n/a + +If the network device does not support specific fields of DIM profiles, +the corresponding ``n/a`` will display. If the ``n/a`` field is being +modified, error messages will be reported. + + Dynamic Interrupt Moderation (DIM) library API ============================================== diff --git a/Documentation/networking/phy.rst b/Documentation/networking/phy.rst index 1283240d7620..f64641417c54 100644 --- a/Documentation/networking/phy.rst +++ b/Documentation/networking/phy.rst @@ -327,6 +327,12 @@ Some of the interface modes are described below: This is the Penta SGMII mode, it is similar to QSGMII but it combines 5 SGMII lines into a single link compared to 4 on QSGMII. +``PHY_INTERFACE_MODE_10G_QXGMII`` + Represents the 10G-QXGMII PHY-MAC interface as defined by the Cisco USXGMII + Multiport Copper Interface document. It supports 4 ports over a 10.3125 GHz + SerDes lane, each port having speeds of 2.5G / 1G / 100M / 10M achieved + through symbol replication. The PCS expects the standard USXGMII code word. + Pause frames / flow control =========================== diff --git a/Documentation/networking/sriov.rst b/Documentation/networking/sriov.rst new file mode 100644 index 000000000000..5deb4ff3154f --- /dev/null +++ b/Documentation/networking/sriov.rst @@ -0,0 +1,25 @@ +.. SPDX-License-Identifier: GPL-2.0 + +=============== +NIC SR-IOV APIs +=============== + +Modern NICs are strongly encouraged to focus on implementing the ``switchdev`` +model (see :ref:`switchdev`) to configure forwarding and security of SR-IOV +functionality. + +Legacy API +========== + +The old SR-IOV API is implemented in ``rtnetlink`` Netlink family as part of +the ``RTM_GETLINK`` and ``RTM_SETLINK`` commands. On the driver side +it consists of a number of ``ndo_set_vf_*`` and ``ndo_get_vf_*`` callbacks. + +Since the legacy APIs do not integrate well with the rest of the stack +the API is considered frozen; no new functionality or extensions +will be accepted. New drivers should not implement the uncommon callbacks; +namely the following callbacks are off limits: + + - ``ndo_get_vf_port`` + - ``ndo_set_vf_port`` + - ``ndo_set_vf_rss_query_en`` diff --git a/Documentation/networking/tcp_ao.rst b/Documentation/networking/tcp_ao.rst index 8a58321acce7..e96e62d1dab3 100644 --- a/Documentation/networking/tcp_ao.rst +++ b/Documentation/networking/tcp_ao.rst @@ -337,6 +337,15 @@ TCP-AO per-socket counters are also duplicated with per-netns counters, exposed with SNMP. Those are ``TCPAOGood``, ``TCPAOBad``, ``TCPAOKeyNotFound``, ``TCPAORequired`` and ``TCPAODroppedIcmps``. +For monitoring purposes, there are following TCP-AO trace events: +``tcp_hash_bad_header``, ``tcp_hash_ao_required``, ``tcp_ao_handshake_failure``, +``tcp_ao_wrong_maclen``, ``tcp_ao_wrong_maclen``, ``tcp_ao_key_not_found``, +``tcp_ao_rnext_request``, ``tcp_ao_synack_no_key``, ``tcp_ao_snd_sne_update``, +``tcp_ao_rcv_sne_update``. It's possible to separately enable any of them and +one can filter them by net-namespace, 4-tuple, family, L3 index, and TCP header +flags. If a segment has a TCP-AO header, the filters may also include +keyid, rnext, and maclen. SNE updates include the rolled-over numbers. + RFC 5925 very permissively specifies how TCP port matching can be done for MKTs:: diff --git a/MAINTAINERS b/MAINTAINERS index 03d24edf3f35..1928b8094443 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -682,6 +682,15 @@ S: Supported F: fs/aio.c F: include/linux/*aio*.h +AIROHA ETHERNET DRIVER +M: Lorenzo Bianconi +L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) +L: linux-mediatek@lists.infradead.org (moderated for non-subscribers) +L: netdev@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/net/airoha,en7581-eth.yaml +F: drivers/net/ethernet/mediatek/airoha_eth.c + AIROHA SPI SNFI DRIVER M: Lorenzo Bianconi M: Ray Liu @@ -4896,6 +4905,7 @@ W: https://github.com/linux-can T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git F: Documentation/networking/can.rst +F: Documentation/networking/iso15765-2.rst F: include/linux/can/can-ml.h F: include/linux/can/core.h F: include/linux/can/skb.h @@ -8880,14 +8890,14 @@ M: Madalin Bucur R: Sean Anderson L: netdev@vger.kernel.org S: Maintained -F: Documentation/devicetree/bindings/net/fsl-fman.txt +F: Documentation/devicetree/bindings/net/fsl,fman*.yaml F: drivers/net/ethernet/freescale/fman FREESCALE QORIQ PTP CLOCK DRIVER M: Yangbo Lu L: netdev@vger.kernel.org S: Maintained -F: Documentation/devicetree/bindings/ptp/ptp-qoriq.txt +F: Documentation/devicetree/bindings/ptp/fsl,ptp.yaml F: drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp* F: drivers/net/ethernet/freescale/dpaa2/dprtc* F: drivers/net/ethernet/freescale/enetc/enetc_ptp.c @@ -11123,8 +11133,8 @@ F: include/drm/xe* F: include/uapi/drm/xe_drm.h INTEL ETHERNET DRIVERS -M: Jesse Brandeburg M: Tony Nguyen +M: Przemek Kitszel L: intel-wired-lan@lists.osuosl.org (moderated for non-subscribers) S: Supported W: https://www.intel.com/content/www/us/en/support.html @@ -12523,6 +12533,7 @@ LANTIQ / INTEL Ethernet drivers M: Hauke Mehrtens L: netdev@vger.kernel.org S: Maintained +F: Documentation/devicetree/bindings/net/dsa/lantiq,gswip.yaml F: drivers/net/dsa/lantiq_gswip.c F: drivers/net/dsa/lantiq_pce.h F: drivers/net/ethernet/lantiq_xrx200.c @@ -14643,6 +14654,13 @@ T: git git://linuxtv.org/media_tree.git F: Documentation/devicetree/bindings/media/amlogic,gx-vdec.yaml F: drivers/staging/media/meson/vdec/ +META ETHERNET DRIVERS +M: Alexander Duyck +M: Jakub Kicinski +R: kernel-team@meta.com +S: Supported +F: drivers/net/ethernet/meta/ + METHODE UDPU SUPPORT M: Robert Marko S: Maintained @@ -15847,7 +15865,7 @@ B: https://github.com/multipath-tcp/mptcp_net-next/issues T: git https://github.com/multipath-tcp/mptcp_net-next.git export-net T: git https://github.com/multipath-tcp/mptcp_net-next.git export F: Documentation/netlink/specs/mptcp_pm.yaml -F: Documentation/networking/mptcp-sysctl.rst +F: Documentation/networking/mptcp*.rst F: include/net/mptcp.h F: include/trace/events/mptcp.h F: include/uapi/linux/mptcp*.h @@ -15864,8 +15882,13 @@ F: include/linux/tcp.h F: include/net/tcp.h F: include/trace/events/tcp.h F: include/uapi/linux/tcp.h +F: net/ipv4/inet_connection_sock.c +F: net/ipv4/inet_hashtables.c +F: net/ipv4/inet_timewait_sock.c F: net/ipv4/syncookies.c F: net/ipv4/tcp*.c +F: net/ipv6/inet6_connection_sock.c +F: net/ipv6/inet6_hashtables.c F: net/ipv6/syncookies.c F: net/ipv6/tcp*.c @@ -19154,6 +19177,14 @@ F: drivers/net/ethernet/renesas/Makefile F: drivers/net/ethernet/renesas/rcar_gen4* F: drivers/net/ethernet/renesas/rswitch* +RENESAS ETHERNET TSN DRIVER +M: Niklas Söderlund +L: netdev@vger.kernel.org +L: linux-renesas-soc@vger.kernel.org +S: Supported +F: Documentation/devicetree/bindings/net/renesas,ethertsn.yaml +F: drivers/net/ethernet/renesas/rtsn.* + RENESAS IDT821034 ASoC CODEC M: Herve Codina L: alsa-devel@alsa-project.org (moderated for non-subscribers) @@ -22262,7 +22293,13 @@ TEHUTI ETHERNET DRIVER M: Andy Gospodarek L: netdev@vger.kernel.org S: Supported -F: drivers/net/ethernet/tehuti/* +F: drivers/net/ethernet/tehuti/tehuti.* + +TEHUTI TN40XX ETHERNET DRIVER +M: FUJITA Tomonori +L: netdev@vger.kernel.org +S: Maintained +F: drivers/net/ethernet/tehuti/tn40* TELECOM CLOCK DRIVER FOR MCPL0010 M: Mark Gross diff --git a/arch/arm/boot/dts/rockchip/rk3066a.dtsi b/arch/arm/boot/dts/rockchip/rk3066a.dtsi index 5e0750547ab5..3f6d49459734 100644 --- a/arch/arm/boot/dts/rockchip/rk3066a.dtsi +++ b/arch/arm/boot/dts/rockchip/rk3066a.dtsi @@ -896,7 +896,3 @@ &wdt { compatible = "rockchip,rk3066-wdt", "snps,dw-wdt"; }; - -&emac { - compatible = "rockchip,rk3066-emac"; -}; diff --git a/arch/arm/boot/dts/rockchip/rk3xxx.dtsi b/arch/arm/boot/dts/rockchip/rk3xxx.dtsi index f37137f298d5..e6a78bcf9163 100644 --- a/arch/arm/boot/dts/rockchip/rk3xxx.dtsi +++ b/arch/arm/boot/dts/rockchip/rk3xxx.dtsi @@ -194,17 +194,14 @@ }; emac: ethernet@10204000 { - compatible = "snps,arc-emac"; + compatible = "rockchip,rk3066-emac"; reg = <0x10204000 0x3c>; interrupts = ; - - rockchip,grf = <&grf>; - clocks = <&cru HCLK_EMAC>, <&cru SCLK_MAC>; clock-names = "hclk", "macref"; max-speed = <100>; phy-mode = "rmii"; - + rockchip,grf = <&grf>; status = "disabled"; }; diff --git a/arch/arm64/boot/dts/ti/k3-am65-iot2050-common-pg1.dtsi b/arch/arm64/boot/dts/ti/k3-am65-iot2050-common-pg1.dtsi index ef7897763ef8..0a29ed172215 100644 --- a/arch/arm64/boot/dts/ti/k3-am65-iot2050-common-pg1.dtsi +++ b/arch/arm64/boot/dts/ti/k3-am65-iot2050-common-pg1.dtsi @@ -73,3 +73,15 @@ "rx0", "rx1", "rxmgm0", "rxmgm1"; }; + +&icssg0_iep0 { + interrupt-parent = <&icssg0_intc>; + interrupts = <7 7 7>; + interrupt-names = "iep_cap_cmp"; +}; + +&icssg0_iep1 { + interrupt-parent = <&icssg0_intc>; + interrupts = <56 8 8>; + interrupt-names = "iep_cap_cmp"; +}; diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c index 720336d28856..dd0bb069df4b 100644 --- a/arch/arm64/net/bpf_jit_comp.c +++ b/arch/arm64/net/bpf_jit_comp.c @@ -1244,6 +1244,13 @@ emit_cond_jmp: break; } + /* Implement helper call to bpf_get_current_task/_btf() inline */ + if (insn->src_reg == 0 && (insn->imm == BPF_FUNC_get_current_task || + insn->imm == BPF_FUNC_get_current_task_btf)) { + emit(A64_MRS_SP_EL0(r0), ctx); + break; + } + ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, &func_addr, &func_addr_fixed); if (ret < 0) @@ -1829,8 +1836,7 @@ skip_init_ctx: prog->jited_len = 0; goto out_free_hdr; } - if (WARN_ON(bpf_jit_binary_pack_finalize(prog, ro_header, - header))) { + if (WARN_ON(bpf_jit_binary_pack_finalize(ro_header, header))) { /* ro_header has been freed */ ro_header = NULL; prog = orig_prog; @@ -2141,7 +2147,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, emit(A64_STR64I(A64_R(20), A64_SP, regs_off + 8), ctx); if (flags & BPF_TRAMP_F_CALL_ORIG) { - emit_addr_mov_i64(A64_R(0), (const u64)im, ctx); + emit_a64_mov_i64(A64_R(0), (const u64)im, ctx); emit_call((const u64)__bpf_tramp_enter, ctx); } @@ -2185,7 +2191,7 @@ static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im, if (flags & BPF_TRAMP_F_CALL_ORIG) { im->ip_epilogue = ctx->ro_image + ctx->idx; - emit_addr_mov_i64(A64_R(0), (const u64)im, ctx); + emit_a64_mov_i64(A64_R(0), (const u64)im, ctx); emit_call((const u64)__bpf_tramp_exit, ctx); } @@ -2581,6 +2587,8 @@ bool bpf_jit_inlines_helper_call(s32 imm) { switch (imm) { case BPF_FUNC_get_smp_processor_id: + case BPF_FUNC_get_current_task: + case BPF_FUNC_get_current_task_btf: return true; default: return false; diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c index 984655419da5..2a36cc2e7e9e 100644 --- a/arch/powerpc/net/bpf_jit_comp.c +++ b/arch/powerpc/net/bpf_jit_comp.c @@ -225,7 +225,7 @@ skip_init_ctx: fp->jited_len = proglen + FUNCTION_DESCR_SIZE; if (!fp->is_func || extra_pass) { - if (bpf_jit_binary_pack_finalize(fp, fhdr, hdr)) { + if (bpf_jit_binary_pack_finalize(fhdr, hdr)) { fp = org_fp; goto out_addrs; } @@ -348,7 +348,7 @@ void bpf_jit_free(struct bpf_prog *fp) * before freeing it. */ if (jit_data) { - bpf_jit_binary_pack_finalize(fp, jit_data->fhdr, jit_data->hdr); + bpf_jit_binary_pack_finalize(jit_data->fhdr, jit_data->hdr); kvfree(jit_data->addrs); kfree(jit_data); } diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 0525ee2d63c7..9f38a5ecbee3 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -610,6 +610,18 @@ config TOOLCHAIN_HAS_VECTOR_CRYPTO def_bool $(as-instr, .option arch$(comma) +v$(comma) +zvkb) depends on AS_HAS_OPTION_ARCH +config RISCV_ISA_ZBA + bool "Zba extension support for bit manipulation instructions" + default y + help + Add support for enabling optimisations in the kernel when the Zba + extension is detected at boot. + + The Zba extension provides instructions to accelerate the generation + of addresses that index into arrays of basic data types. + + If you don't know what to do here, say Y. + config RISCV_ISA_ZBB bool "Zbb extension support for bit manipulation instructions" depends on TOOLCHAIN_HAS_ZBB diff --git a/arch/riscv/net/bpf_jit.h b/arch/riscv/net/bpf_jit.h index fdbf88ca8b70..1d1c78d4cff1 100644 --- a/arch/riscv/net/bpf_jit.h +++ b/arch/riscv/net/bpf_jit.h @@ -18,6 +18,11 @@ static inline bool rvc_enabled(void) return IS_ENABLED(CONFIG_RISCV_ISA_C); } +static inline bool rvzba_enabled(void) +{ + return IS_ENABLED(CONFIG_RISCV_ISA_ZBA) && riscv_has_extension_likely(RISCV_ISA_EXT_ZBA); +} + static inline bool rvzbb_enabled(void) { return IS_ENABLED(CONFIG_RISCV_ISA_ZBB) && riscv_has_extension_likely(RISCV_ISA_EXT_ZBB); @@ -737,6 +742,17 @@ static inline u16 rvc_swsp(u32 imm8, u8 rs2) return rv_css_insn(0x6, imm, rs2, 0x2); } +/* RVZBA instructions. */ +static inline u32 rvzba_sh2add(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0x10, rs2, rs1, 0x4, rd, 0x33); +} + +static inline u32 rvzba_sh3add(u8 rd, u8 rs1, u8 rs2) +{ + return rv_r_insn(0x10, rs2, rs1, 0x6, rd, 0x33); +} + /* RVZBB instructions. */ static inline u32 rvzbb_sextb(u8 rd, u8 rs1) { @@ -939,6 +955,14 @@ static inline u16 rvc_sdsp(u32 imm9, u8 rs2) return rv_css_insn(0x7, imm, rs2, 0x2); } +/* RV64-only ZBA instructions. */ + +static inline u32 rvzba_zextw(u8 rd, u8 rs1) +{ + /* add.uw rd, rs1, ZERO */ + return rv_r_insn(0x04, RV_REG_ZERO, rs1, 0, rd, 0x3b); +} + #endif /* __riscv_xlen == 64 */ /* Helper functions that emit RVC instructions when possible. */ @@ -1082,6 +1106,28 @@ static inline void emit_sw(u8 rs1, s32 off, u8 rs2, struct rv_jit_context *ctx) emit(rv_sw(rs1, off, rs2), ctx); } +static inline void emit_sh2add(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx) +{ + if (rvzba_enabled()) { + emit(rvzba_sh2add(rd, rs1, rs2), ctx); + return; + } + + emit_slli(rd, rs1, 2, ctx); + emit_add(rd, rd, rs2, ctx); +} + +static inline void emit_sh3add(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx) +{ + if (rvzba_enabled()) { + emit(rvzba_sh3add(rd, rs1, rs2), ctx); + return; + } + + emit_slli(rd, rs1, 3, ctx); + emit_add(rd, rd, rs2, ctx); +} + /* RV64-only helper functions. */ #if __riscv_xlen == 64 @@ -1161,6 +1207,11 @@ static inline void emit_zexth(u8 rd, u8 rs, struct rv_jit_context *ctx) static inline void emit_zextw(u8 rd, u8 rs, struct rv_jit_context *ctx) { + if (rvzba_enabled()) { + emit(rvzba_zextw(rd, rs), ctx); + return; + } + emit_slli(rd, rs, 32, ctx); emit_srli(rd, rd, 32, ctx); } diff --git a/arch/riscv/net/bpf_jit_comp32.c b/arch/riscv/net/bpf_jit_comp32.c index f5ba73bb153d..592dd86fbf81 100644 --- a/arch/riscv/net/bpf_jit_comp32.c +++ b/arch/riscv/net/bpf_jit_comp32.c @@ -811,8 +811,7 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx) * if (!prog) * goto out; */ - emit(rv_slli(RV_REG_T0, lo(idx_reg), 2), ctx); - emit(rv_add(RV_REG_T0, RV_REG_T0, lo(arr_reg)), ctx); + emit_sh2add(RV_REG_T0, lo(idx_reg), lo(arr_reg), ctx); off = offsetof(struct bpf_array, ptrs); if (is_12b_check(off, insn)) return -1; diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c index 79a001d5533e..0795efdd3519 100644 --- a/arch/riscv/net/bpf_jit_comp64.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -15,7 +15,10 @@ #include #include "bpf_jit.h" +#define RV_MAX_REG_ARGS 8 #define RV_FENTRY_NINSNS 2 +/* imm that allows emit_imm to emit max count insns */ +#define RV_MAX_COUNT_IMM 0x7FFF7FF7FF7FF7FF #define RV_REG_TCC RV_REG_A6 #define RV_REG_TCC_SAVED RV_REG_S6 /* Store A6 in S6 if program do calls */ @@ -380,8 +383,7 @@ static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx) * if (!prog) * goto out; */ - emit_slli(RV_REG_T2, RV_REG_A2, 3, ctx); - emit_add(RV_REG_T2, RV_REG_T2, RV_REG_A1, ctx); + emit_sh3add(RV_REG_T2, RV_REG_A2, RV_REG_A1, ctx); off = offsetof(struct bpf_array, ptrs); if (is_12b_check(off, insn)) return -1; @@ -537,8 +539,10 @@ static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64, /* r0 = atomic_cmpxchg(dst_reg + off16, r0, src_reg); */ case BPF_CMPXCHG: r0 = bpf_to_rv_reg(BPF_REG_0, ctx); - emit(is64 ? rv_addi(RV_REG_T2, r0, 0) : - rv_addiw(RV_REG_T2, r0, 0), ctx); + if (is64) + emit_mv(RV_REG_T2, r0, ctx); + else + emit_addiw(RV_REG_T2, r0, 0, ctx); emit(is64 ? rv_lr_d(r0, 0, rd, 0, 0) : rv_lr_w(r0, 0, rd, 0, 0), ctx); jmp_offset = ninsns_rvoff(8); @@ -689,26 +693,45 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type, return ret; } -static void store_args(int nregs, int args_off, struct rv_jit_context *ctx) +static void store_args(int nr_arg_slots, int args_off, struct rv_jit_context *ctx) { int i; - for (i = 0; i < nregs; i++) { - emit_sd(RV_REG_FP, -args_off, RV_REG_A0 + i, ctx); + for (i = 0; i < nr_arg_slots; i++) { + if (i < RV_MAX_REG_ARGS) { + emit_sd(RV_REG_FP, -args_off, RV_REG_A0 + i, ctx); + } else { + /* skip slots for T0 and FP of traced function */ + emit_ld(RV_REG_T1, 16 + (i - RV_MAX_REG_ARGS) * 8, RV_REG_FP, ctx); + emit_sd(RV_REG_FP, -args_off, RV_REG_T1, ctx); + } args_off -= 8; } } -static void restore_args(int nregs, int args_off, struct rv_jit_context *ctx) +static void restore_args(int nr_reg_args, int args_off, struct rv_jit_context *ctx) { int i; - for (i = 0; i < nregs; i++) { + for (i = 0; i < nr_reg_args; i++) { emit_ld(RV_REG_A0 + i, -args_off, RV_REG_FP, ctx); args_off -= 8; } } +static void restore_stack_args(int nr_stack_args, int args_off, int stk_arg_off, + struct rv_jit_context *ctx) +{ + int i; + + for (i = 0; i < nr_stack_args; i++) { + emit_ld(RV_REG_T1, -(args_off - RV_MAX_REG_ARGS * 8), RV_REG_FP, ctx); + emit_sd(RV_REG_FP, -stk_arg_off, RV_REG_T1, ctx); + args_off -= 8; + stk_arg_off -= 8; + } +} + static int invoke_bpf_prog(struct bpf_tramp_link *l, int args_off, int retval_off, int run_ctx_off, bool save_ret, struct rv_jit_context *ctx) { @@ -781,8 +804,8 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, { int i, ret, offset; int *branches_off = NULL; - int stack_size = 0, nregs = m->nr_args; - int retval_off, args_off, nregs_off, ip_off, run_ctx_off, sreg_off; + int stack_size = 0, nr_arg_slots = 0; + int retval_off, args_off, nregs_off, ip_off, run_ctx_off, sreg_off, stk_arg_off; struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY]; struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT]; struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; @@ -828,20 +851,21 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, * FP - sreg_off [ callee saved reg ] * * [ pads ] pads for 16 bytes alignment + * + * [ stack_argN ] + * [ ... ] + * FP - stk_arg_off [ stack_arg1 ] BPF_TRAMP_F_CALL_ORIG */ if (flags & (BPF_TRAMP_F_ORIG_STACK | BPF_TRAMP_F_SHARE_IPMODIFY)) return -ENOTSUPP; - /* extra regiters for struct arguments */ - for (i = 0; i < m->nr_args; i++) - if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) - nregs += round_up(m->arg_size[i], 8) / 8 - 1; - - /* 8 arguments passed by registers */ - if (nregs > 8) + if (m->nr_args > MAX_BPF_FUNC_ARGS) return -ENOTSUPP; + for (i = 0; i < m->nr_args; i++) + nr_arg_slots += round_up(m->arg_size[i], 8) / 8; + /* room of trampoline frame to store return address and frame pointer */ stack_size += 16; @@ -851,7 +875,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, retval_off = stack_size; } - stack_size += nregs * 8; + stack_size += nr_arg_slots * 8; args_off = stack_size; stack_size += 8; @@ -868,7 +892,13 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, stack_size += 8; sreg_off = stack_size; - stack_size = round_up(stack_size, 16); + if ((flags & BPF_TRAMP_F_CALL_ORIG) && (nr_arg_slots - RV_MAX_REG_ARGS > 0)) + stack_size += (nr_arg_slots - RV_MAX_REG_ARGS) * 8; + + stack_size = round_up(stack_size, STACK_ALIGN); + + /* room for args on stack must be at the top of stack */ + stk_arg_off = stack_size; if (!is_struct_ops) { /* For the trampoline called from function entry, @@ -905,17 +935,17 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, emit_sd(RV_REG_FP, -ip_off, RV_REG_T1, ctx); } - emit_li(RV_REG_T1, nregs, ctx); + emit_li(RV_REG_T1, nr_arg_slots, ctx); emit_sd(RV_REG_FP, -nregs_off, RV_REG_T1, ctx); - store_args(nregs, args_off, ctx); + store_args(nr_arg_slots, args_off, ctx); /* skip to actual body of traced function */ if (flags & BPF_TRAMP_F_SKIP_FRAME) orig_call += RV_FENTRY_NINSNS * 4; if (flags & BPF_TRAMP_F_CALL_ORIG) { - emit_imm(RV_REG_A0, (const s64)im, ctx); + emit_imm(RV_REG_A0, ctx->insns ? (const s64)im : RV_MAX_COUNT_IMM, ctx); ret = emit_call((const u64)__bpf_tramp_enter, true, ctx); if (ret) return ret; @@ -948,13 +978,14 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, } if (flags & BPF_TRAMP_F_CALL_ORIG) { - restore_args(nregs, args_off, ctx); + restore_args(min_t(int, nr_arg_slots, RV_MAX_REG_ARGS), args_off, ctx); + restore_stack_args(nr_arg_slots - RV_MAX_REG_ARGS, args_off, stk_arg_off, ctx); ret = emit_call((const u64)orig_call, true, ctx); if (ret) goto out; emit_sd(RV_REG_FP, -retval_off, RV_REG_A0, ctx); emit_sd(RV_REG_FP, -(retval_off - 8), regmap[BPF_REG_0], ctx); - im->ip_after_call = ctx->insns + ctx->ninsns; + im->ip_after_call = ctx->ro_insns + ctx->ninsns; /* 2 nops reserved for auipc+jalr pair */ emit(rv_nop(), ctx); emit(rv_nop(), ctx); @@ -975,15 +1006,15 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, } if (flags & BPF_TRAMP_F_CALL_ORIG) { - im->ip_epilogue = ctx->insns + ctx->ninsns; - emit_imm(RV_REG_A0, (const s64)im, ctx); + im->ip_epilogue = ctx->ro_insns + ctx->ninsns; + emit_imm(RV_REG_A0, ctx->insns ? (const s64)im : RV_MAX_COUNT_IMM, ctx); ret = emit_call((const u64)__bpf_tramp_exit, true, ctx); if (ret) goto out; } if (flags & BPF_TRAMP_F_RESTORE_REGS) - restore_args(nregs, args_off, ctx); + restore_args(min_t(int, nr_arg_slots, RV_MAX_REG_ARGS), args_off, ctx); if (save_ret) { emit_ld(RV_REG_A0, -retval_off, RV_REG_FP, ctx); @@ -1038,31 +1069,52 @@ int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, return ret < 0 ? ret : ninsns_rvoff(ctx.ninsns); } -int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, - void *image_end, const struct btf_func_model *m, +void *arch_alloc_bpf_trampoline(unsigned int size) +{ + return bpf_prog_pack_alloc(size, bpf_fill_ill_insns); +} + +void arch_free_bpf_trampoline(void *image, unsigned int size) +{ + bpf_prog_pack_free(image, size); +} + +int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *ro_image, + void *ro_image_end, const struct btf_func_model *m, u32 flags, struct bpf_tramp_links *tlinks, void *func_addr) { int ret; + void *image, *res; struct rv_jit_context ctx; + u32 size = ro_image_end - ro_image; + + image = kvmalloc(size, GFP_KERNEL); + if (!image) + return -ENOMEM; ctx.ninsns = 0; - /* - * The bpf_int_jit_compile() uses a RW buffer (ctx.insns) to write the - * JITed instructions and later copies it to a RX region (ctx.ro_insns). - * It also uses ctx.ro_insns to calculate offsets for jumps etc. As the - * trampoline image uses the same memory area for writing and execution, - * both ctx.insns and ctx.ro_insns can be set to image. - */ ctx.insns = image; - ctx.ro_insns = image; + ctx.ro_insns = ro_image; ret = __arch_prepare_bpf_trampoline(im, m, tlinks, func_addr, flags, &ctx); if (ret < 0) - return ret; + goto out; - bpf_flush_icache(ctx.insns, ctx.insns + ctx.ninsns); + if (WARN_ON(size < ninsns_rvoff(ctx.ninsns))) { + ret = -E2BIG; + goto out; + } - return ninsns_rvoff(ret); + res = bpf_arch_text_copy(ro_image, image, size); + if (IS_ERR(res)) { + ret = PTR_ERR(res); + goto out; + } + + bpf_flush_icache(ro_image, ro_image_end); +out: + kvfree(image); + return ret < 0 ? ret : size; } int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, @@ -1097,12 +1149,10 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx, /* Load current CPU number in T1 */ emit_ld(RV_REG_T1, offsetof(struct thread_info, cpu), RV_REG_TP, ctx); - /* << 3 because offsets are 8 bytes */ - emit_slli(RV_REG_T1, RV_REG_T1, 3, ctx); /* Load address of __per_cpu_offset array in T2 */ emit_addr(RV_REG_T2, (u64)&__per_cpu_offset, extra_pass, ctx); - /* Add offset of current CPU to __per_cpu_offset */ - emit_add(RV_REG_T1, RV_REG_T2, RV_REG_T1, ctx); + /* Get address of __per_cpu_offset[cpu] in T1 */ + emit_sh3add(RV_REG_T1, RV_REG_T1, RV_REG_T2, ctx); /* Load __per_cpu_offset[cpu] in T1 */ emit_ld(RV_REG_T1, 0, RV_REG_T1, ctx); /* Add the offset to Rd */ @@ -1960,7 +2010,7 @@ void bpf_jit_build_prologue(struct rv_jit_context *ctx, bool is_subprog) { int i, stack_adjust = 0, store_offset, bpf_stack_adjust; - bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16); + bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, STACK_ALIGN); if (bpf_stack_adjust) mark_fp(ctx); @@ -1982,7 +2032,7 @@ void bpf_jit_build_prologue(struct rv_jit_context *ctx, bool is_subprog) if (ctx->arena_vm_start) stack_adjust += 8; - stack_adjust = round_up(stack_adjust, 16); + stack_adjust = round_up(stack_adjust, STACK_ALIGN); stack_adjust += bpf_stack_adjust; store_offset = stack_adjust - 8; diff --git a/arch/riscv/net/bpf_jit_core.c b/arch/riscv/net/bpf_jit_core.c index 0a96abdaca65..6de753c667f4 100644 --- a/arch/riscv/net/bpf_jit_core.c +++ b/arch/riscv/net/bpf_jit_core.c @@ -178,8 +178,7 @@ skip_init_ctx: prog->jited_len = prog_size - cfi_get_offset(); if (!prog->is_func || extra_pass) { - if (WARN_ON(bpf_jit_binary_pack_finalize(prog, jit_data->ro_header, - jit_data->header))) { + if (WARN_ON(bpf_jit_binary_pack_finalize(jit_data->ro_header, jit_data->header))) { /* ro_header has been freed */ jit_data->ro_header = NULL; prog = orig_prog; @@ -258,7 +257,7 @@ void bpf_jit_free(struct bpf_prog *prog) * before freeing it. */ if (jit_data) { - bpf_jit_binary_pack_finalize(prog, jit_data->ro_header, jit_data->header); + bpf_jit_binary_pack_finalize(jit_data->ro_header, jit_data->header); kfree(jit_data); } hdr = bpf_jit_binary_pack_hdr(prog); diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index 4be8f5cadd02..9d440a0b729e 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -31,11 +31,12 @@ #include #include #include +#include #include "bpf_jit.h" struct bpf_jit { u32 seen; /* Flags to remember seen eBPF instructions */ - u32 seen_reg[16]; /* Array to remember which registers are used */ + u16 seen_regs; /* Mask to remember which registers are used */ u32 *addrs; /* Array with relative instruction addresses */ u8 *prg_buf; /* Start of program */ int size; /* Size of program and literal pool */ @@ -53,6 +54,8 @@ struct bpf_jit { int excnt; /* Number of exception table entries */ int prologue_plt_ret; /* Return address for prologue hotpatch PLT */ int prologue_plt; /* Start of prologue hotpatch PLT */ + int kern_arena; /* Pool offset of kernel arena address */ + u64 user_arena; /* User arena address */ }; #define SEEN_MEM BIT(0) /* use mem[] for temporary storage */ @@ -60,6 +63,8 @@ struct bpf_jit { #define SEEN_FUNC BIT(2) /* calls C functions */ #define SEEN_STACK (SEEN_FUNC | SEEN_MEM) +#define NVREGS 0xffc0 /* %r6-%r15 */ + /* * s390 registers */ @@ -118,8 +123,8 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) { u32 r1 = reg2hex[b1]; - if (r1 >= 6 && r1 <= 15 && !jit->seen_reg[r1]) - jit->seen_reg[r1] = 1; + if (r1 >= 6 && r1 <= 15) + jit->seen_regs |= (1 << r1); } #define REG_SET_SEEN(b1) \ @@ -127,8 +132,6 @@ static inline void reg_set_seen(struct bpf_jit *jit, u32 b1) reg_set_seen(jit, b1); \ }) -#define REG_SEEN(b1) jit->seen_reg[reg2hex[(b1)]] - /* * EMIT macros for code generation */ @@ -436,12 +439,12 @@ static void restore_regs(struct bpf_jit *jit, u32 rs, u32 re, u32 stack_depth) /* * Return first seen register (from start) */ -static int get_start(struct bpf_jit *jit, int start) +static int get_start(u16 seen_regs, int start) { int i; for (i = start; i <= 15; i++) { - if (jit->seen_reg[i]) + if (seen_regs & (1 << i)) return i; } return 0; @@ -450,15 +453,15 @@ static int get_start(struct bpf_jit *jit, int start) /* * Return last seen register (from start) (gap >= 2) */ -static int get_end(struct bpf_jit *jit, int start) +static int get_end(u16 seen_regs, int start) { int i; for (i = start; i < 15; i++) { - if (!jit->seen_reg[i] && !jit->seen_reg[i + 1]) + if (!(seen_regs & (3 << i))) return i - 1; } - return jit->seen_reg[15] ? 15 : 14; + return (seen_regs & (1 << 15)) ? 15 : 14; } #define REGS_SAVE 1 @@ -467,8 +470,10 @@ static int get_end(struct bpf_jit *jit, int start) * Save and restore clobbered registers (6-15) on stack. * We save/restore registers in chunks with gap >= 2 registers. */ -static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth) +static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth, + u16 extra_regs) { + u16 seen_regs = jit->seen_regs | extra_regs; const int last = 15, save_restore_size = 6; int re = 6, rs; @@ -482,10 +487,10 @@ static void save_restore_regs(struct bpf_jit *jit, int op, u32 stack_depth) } do { - rs = get_start(jit, re); + rs = get_start(seen_regs, re); if (!rs) break; - re = get_end(jit, rs + 1); + re = get_end(seen_regs, rs + 1); if (op == REGS_SAVE) save_regs(jit, rs, re); else @@ -570,8 +575,21 @@ static void bpf_jit_prologue(struct bpf_jit *jit, struct bpf_prog *fp, } /* Tail calls have to skip above initialization */ jit->tail_call_start = jit->prg; - /* Save registers */ - save_restore_regs(jit, REGS_SAVE, stack_depth); + if (fp->aux->exception_cb) { + /* + * Switch stack, the new address is in the 2nd parameter. + * + * Arrange the restoration of %r6-%r15 in the epilogue. + * Do not restore them now, the prog does not need them. + */ + /* lgr %r15,%r3 */ + EMIT4(0xb9040000, REG_15, REG_3); + jit->seen_regs |= NVREGS; + } else { + /* Save registers */ + save_restore_regs(jit, REGS_SAVE, stack_depth, + fp->aux->exception_boundary ? NVREGS : 0); + } /* Setup literal pool */ if (is_first_pass(jit) || (jit->seen & SEEN_LITERAL)) { if (!is_first_pass(jit) && @@ -647,7 +665,7 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth) /* Load exit code: lgr %r2,%b0 */ EMIT4(0xb9040000, REG_2, BPF_REG_0); /* Restore registers */ - save_restore_regs(jit, REGS_RESTORE, stack_depth); + save_restore_regs(jit, REGS_RESTORE, stack_depth, 0); if (nospec_uses_trampoline()) { jit->r14_thunk_ip = jit->prg; /* Generate __s390_indirect_jump_r14 thunk */ @@ -667,50 +685,111 @@ static void bpf_jit_epilogue(struct bpf_jit *jit, u32 stack_depth) jit->prg += sizeof(struct bpf_plt); } -static int get_probe_mem_regno(const u8 *insn) -{ - /* - * insn must point to llgc, llgh, llgf, lg, lgb, lgh or lgf, which have - * destination register at the same position. - */ - if (insn[0] != 0xe3) /* common prefix */ - return -1; - if (insn[5] != 0x90 && /* llgc */ - insn[5] != 0x91 && /* llgh */ - insn[5] != 0x16 && /* llgf */ - insn[5] != 0x04 && /* lg */ - insn[5] != 0x77 && /* lgb */ - insn[5] != 0x15 && /* lgh */ - insn[5] != 0x14) /* lgf */ - return -1; - return insn[1] >> 4; -} - bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs) { regs->psw.addr = extable_fixup(x); - regs->gprs[x->data] = 0; + if (x->data != -1) + regs->gprs[x->data] = 0; return true; } -static int bpf_jit_probe_mem(struct bpf_jit *jit, struct bpf_prog *fp, - int probe_prg, int nop_prg) +/* + * A single BPF probe instruction + */ +struct bpf_jit_probe { + int prg; /* JITed instruction offset */ + int nop_prg; /* JITed nop offset */ + int reg; /* Register to clear on exception */ + int arena_reg; /* Register to use for arena addressing */ +}; + +static void bpf_jit_probe_init(struct bpf_jit_probe *probe) +{ + probe->prg = -1; + probe->nop_prg = -1; + probe->reg = -1; + probe->arena_reg = REG_0; +} + +/* + * Handlers of certain exceptions leave psw.addr pointing to the instruction + * directly after the failing one. Therefore, create two exception table + * entries and also add a nop in case two probing instructions come directly + * after each other. + */ +static void bpf_jit_probe_emit_nop(struct bpf_jit *jit, + struct bpf_jit_probe *probe) +{ + if (probe->prg == -1 || probe->nop_prg != -1) + /* The probe is not armed or nop is already emitted. */ + return; + + probe->nop_prg = jit->prg; + /* bcr 0,%0 */ + _EMIT2(0x0700); +} + +static void bpf_jit_probe_load_pre(struct bpf_jit *jit, struct bpf_insn *insn, + struct bpf_jit_probe *probe) +{ + if (BPF_MODE(insn->code) != BPF_PROBE_MEM && + BPF_MODE(insn->code) != BPF_PROBE_MEMSX && + BPF_MODE(insn->code) != BPF_PROBE_MEM32) + return; + + if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) { + /* lgrl %r1,kern_arena */ + EMIT6_PCREL_RILB(0xc4080000, REG_W1, jit->kern_arena); + probe->arena_reg = REG_W1; + } + probe->prg = jit->prg; + probe->reg = reg2hex[insn->dst_reg]; +} + +static void bpf_jit_probe_store_pre(struct bpf_jit *jit, struct bpf_insn *insn, + struct bpf_jit_probe *probe) +{ + if (BPF_MODE(insn->code) != BPF_PROBE_MEM32) + return; + + /* lgrl %r1,kern_arena */ + EMIT6_PCREL_RILB(0xc4080000, REG_W1, jit->kern_arena); + probe->arena_reg = REG_W1; + probe->prg = jit->prg; +} + +static void bpf_jit_probe_atomic_pre(struct bpf_jit *jit, + struct bpf_insn *insn, + struct bpf_jit_probe *probe) +{ + if (BPF_MODE(insn->code) != BPF_PROBE_ATOMIC) + return; + + /* lgrl %r1,kern_arena */ + EMIT6_PCREL_RILB(0xc4080000, REG_W1, jit->kern_arena); + /* agr %r1,%dst */ + EMIT4(0xb9080000, REG_W1, insn->dst_reg); + probe->arena_reg = REG_W1; + probe->prg = jit->prg; +} + +static int bpf_jit_probe_post(struct bpf_jit *jit, struct bpf_prog *fp, + struct bpf_jit_probe *probe) { struct exception_table_entry *ex; - int reg, prg; + int i, prg; s64 delta; u8 *insn; - int i; + if (probe->prg == -1) + /* The probe is not armed. */ + return 0; + bpf_jit_probe_emit_nop(jit, probe); if (!fp->aux->extable) /* Do nothing during early JIT passes. */ return 0; - insn = jit->prg_buf + probe_prg; - reg = get_probe_mem_regno(insn); - if (WARN_ON_ONCE(reg < 0)) - /* JIT bug - unexpected probe instruction. */ - return -1; - if (WARN_ON_ONCE(probe_prg + insn_length(*insn) != nop_prg)) + insn = jit->prg_buf + probe->prg; + if (WARN_ON_ONCE(probe->prg + insn_length(*insn) != probe->nop_prg)) /* JIT bug - gap between probe and nop instructions. */ return -1; for (i = 0; i < 2; i++) { @@ -719,23 +798,24 @@ static int bpf_jit_probe_mem(struct bpf_jit *jit, struct bpf_prog *fp, return -1; ex = &fp->aux->extable[jit->excnt]; /* Add extable entries for probe and nop instructions. */ - prg = i == 0 ? probe_prg : nop_prg; + prg = i == 0 ? probe->prg : probe->nop_prg; delta = jit->prg_buf + prg - (u8 *)&ex->insn; if (WARN_ON_ONCE(delta < INT_MIN || delta > INT_MAX)) /* JIT bug - code and extable must be close. */ return -1; ex->insn = delta; /* - * Always land on the nop. Note that extable infrastructure - * ignores fixup field, it is handled by ex_handler_bpf(). + * Land on the current instruction. Note that the extable + * infrastructure ignores the fixup field; it is handled by + * ex_handler_bpf(). */ - delta = jit->prg_buf + nop_prg - (u8 *)&ex->fixup; + delta = jit->prg_buf + jit->prg - (u8 *)&ex->fixup; if (WARN_ON_ONCE(delta < INT_MIN || delta > INT_MAX)) /* JIT bug - landing pad and extable must be close. */ return -1; ex->fixup = delta; ex->type = EX_TYPE_BPF; - ex->data = reg; + ex->data = probe->reg; jit->excnt++; } return 0; @@ -782,19 +862,15 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, s32 branch_oc_off = insn->off; u32 dst_reg = insn->dst_reg; u32 src_reg = insn->src_reg; + struct bpf_jit_probe probe; int last, insn_count = 1; u32 *addrs = jit->addrs; s32 imm = insn->imm; s16 off = insn->off; - int probe_prg = -1; unsigned int mask; - int nop_prg; int err; - if (BPF_CLASS(insn->code) == BPF_LDX && - (BPF_MODE(insn->code) == BPF_PROBE_MEM || - BPF_MODE(insn->code) == BPF_PROBE_MEMSX)) - probe_prg = jit->prg; + bpf_jit_probe_init(&probe); switch (insn->code) { /* @@ -823,6 +899,22 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, } break; case BPF_ALU64 | BPF_MOV | BPF_X: + if (insn_is_cast_user(insn)) { + int patch_brc; + + /* ltgr %dst,%src */ + EMIT4(0xb9020000, dst_reg, src_reg); + /* brc 8,0f */ + patch_brc = jit->prg; + EMIT4_PCREL_RIC(0xa7040000, 8, 0); + /* iihf %dst,user_arena>>32 */ + EMIT6_IMM(0xc0080000, dst_reg, jit->user_arena >> 32); + /* 0: */ + if (jit->prg_buf) + *(u16 *)(jit->prg_buf + patch_brc + 2) = + (jit->prg - patch_brc) >> 1; + break; + } switch (insn->off) { case 0: /* DST = SRC */ /* lgr %dst,%src */ @@ -1366,51 +1458,99 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, * BPF_ST(X) */ case BPF_STX | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = src_reg */ - /* stcy %src,off(%dst) */ - EMIT6_DISP_LH(0xe3000000, 0x0072, src_reg, dst_reg, REG_0, off); + case BPF_STX | BPF_PROBE_MEM32 | BPF_B: + bpf_jit_probe_store_pre(jit, insn, &probe); + /* stcy %src,off(%dst,%arena) */ + EMIT6_DISP_LH(0xe3000000, 0x0072, src_reg, dst_reg, + probe.arena_reg, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; jit->seen |= SEEN_MEM; break; case BPF_STX | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = src */ - /* sthy %src,off(%dst) */ - EMIT6_DISP_LH(0xe3000000, 0x0070, src_reg, dst_reg, REG_0, off); + case BPF_STX | BPF_PROBE_MEM32 | BPF_H: + bpf_jit_probe_store_pre(jit, insn, &probe); + /* sthy %src,off(%dst,%arena) */ + EMIT6_DISP_LH(0xe3000000, 0x0070, src_reg, dst_reg, + probe.arena_reg, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; jit->seen |= SEEN_MEM; break; case BPF_STX | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = src */ - /* sty %src,off(%dst) */ - EMIT6_DISP_LH(0xe3000000, 0x0050, src_reg, dst_reg, REG_0, off); + case BPF_STX | BPF_PROBE_MEM32 | BPF_W: + bpf_jit_probe_store_pre(jit, insn, &probe); + /* sty %src,off(%dst,%arena) */ + EMIT6_DISP_LH(0xe3000000, 0x0050, src_reg, dst_reg, + probe.arena_reg, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; jit->seen |= SEEN_MEM; break; case BPF_STX | BPF_MEM | BPF_DW: /* (u64 *)(dst + off) = src */ - /* stg %src,off(%dst) */ - EMIT6_DISP_LH(0xe3000000, 0x0024, src_reg, dst_reg, REG_0, off); + case BPF_STX | BPF_PROBE_MEM32 | BPF_DW: + bpf_jit_probe_store_pre(jit, insn, &probe); + /* stg %src,off(%dst,%arena) */ + EMIT6_DISP_LH(0xe3000000, 0x0024, src_reg, dst_reg, + probe.arena_reg, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; jit->seen |= SEEN_MEM; break; case BPF_ST | BPF_MEM | BPF_B: /* *(u8 *)(dst + off) = imm */ + case BPF_ST | BPF_PROBE_MEM32 | BPF_B: /* lhi %w0,imm */ EMIT4_IMM(0xa7080000, REG_W0, (u8) imm); - /* stcy %w0,off(dst) */ - EMIT6_DISP_LH(0xe3000000, 0x0072, REG_W0, dst_reg, REG_0, off); + bpf_jit_probe_store_pre(jit, insn, &probe); + /* stcy %w0,off(%dst,%arena) */ + EMIT6_DISP_LH(0xe3000000, 0x0072, REG_W0, dst_reg, + probe.arena_reg, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; jit->seen |= SEEN_MEM; break; case BPF_ST | BPF_MEM | BPF_H: /* (u16 *)(dst + off) = imm */ + case BPF_ST | BPF_PROBE_MEM32 | BPF_H: /* lhi %w0,imm */ EMIT4_IMM(0xa7080000, REG_W0, (u16) imm); - /* sthy %w0,off(dst) */ - EMIT6_DISP_LH(0xe3000000, 0x0070, REG_W0, dst_reg, REG_0, off); + bpf_jit_probe_store_pre(jit, insn, &probe); + /* sthy %w0,off(%dst,%arena) */ + EMIT6_DISP_LH(0xe3000000, 0x0070, REG_W0, dst_reg, + probe.arena_reg, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; jit->seen |= SEEN_MEM; break; case BPF_ST | BPF_MEM | BPF_W: /* *(u32 *)(dst + off) = imm */ + case BPF_ST | BPF_PROBE_MEM32 | BPF_W: /* llilf %w0,imm */ EMIT6_IMM(0xc00f0000, REG_W0, (u32) imm); - /* sty %w0,off(%dst) */ - EMIT6_DISP_LH(0xe3000000, 0x0050, REG_W0, dst_reg, REG_0, off); + bpf_jit_probe_store_pre(jit, insn, &probe); + /* sty %w0,off(%dst,%arena) */ + EMIT6_DISP_LH(0xe3000000, 0x0050, REG_W0, dst_reg, + probe.arena_reg, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; jit->seen |= SEEN_MEM; break; case BPF_ST | BPF_MEM | BPF_DW: /* *(u64 *)(dst + off) = imm */ + case BPF_ST | BPF_PROBE_MEM32 | BPF_DW: /* lgfi %w0,imm */ EMIT6_IMM(0xc0010000, REG_W0, imm); - /* stg %w0,off(%dst) */ - EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W0, dst_reg, REG_0, off); + bpf_jit_probe_store_pre(jit, insn, &probe); + /* stg %w0,off(%dst,%arena) */ + EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W0, dst_reg, + probe.arena_reg, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; jit->seen |= SEEN_MEM; break; /* @@ -1418,15 +1558,30 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, */ case BPF_STX | BPF_ATOMIC | BPF_DW: case BPF_STX | BPF_ATOMIC | BPF_W: + case BPF_STX | BPF_PROBE_ATOMIC | BPF_DW: + case BPF_STX | BPF_PROBE_ATOMIC | BPF_W: { bool is32 = BPF_SIZE(insn->code) == BPF_W; + /* + * Unlike loads and stores, atomics have only a base register, + * but no index register. For the non-arena case, simply use + * %dst as a base. For the arena case, use the work register + * %r1: first, load the arena base into it, and then add %dst + * to it. + */ + probe.arena_reg = dst_reg; + switch (insn->imm) { -/* {op32|op64} {%w0|%src},%src,off(%dst) */ #define EMIT_ATOMIC(op32, op64) do { \ + bpf_jit_probe_atomic_pre(jit, insn, &probe); \ + /* {op32|op64} {%w0|%src},%src,off(%arena) */ \ EMIT6_DISP_LH(0xeb000000, is32 ? (op32) : (op64), \ (insn->imm & BPF_FETCH) ? src_reg : REG_W0, \ - src_reg, dst_reg, off); \ + src_reg, probe.arena_reg, off); \ + err = bpf_jit_probe_post(jit, fp, &probe); \ + if (err < 0) \ + return err; \ if (insn->imm & BPF_FETCH) { \ /* bcr 14,0 - see atomic_fetch_{add,and,or,xor}() */ \ _EMIT2(0x07e0); \ @@ -1455,25 +1610,50 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, EMIT_ATOMIC(0x00f7, 0x00e7); break; #undef EMIT_ATOMIC - case BPF_XCHG: - /* {ly|lg} %w0,off(%dst) */ + case BPF_XCHG: { + struct bpf_jit_probe load_probe = probe; + int loop_start; + + bpf_jit_probe_atomic_pre(jit, insn, &load_probe); + /* {ly|lg} %w0,off(%arena) */ EMIT6_DISP_LH(0xe3000000, is32 ? 0x0058 : 0x0004, REG_W0, REG_0, - dst_reg, off); - /* 0: {csy|csg} %w0,%src,off(%dst) */ + load_probe.arena_reg, off); + bpf_jit_probe_emit_nop(jit, &load_probe); + /* Reuse {ly|lg}'s arena_reg for {csy|csg}. */ + if (load_probe.prg != -1) { + probe.prg = jit->prg; + probe.arena_reg = load_probe.arena_reg; + } + loop_start = jit->prg; + /* 0: {csy|csg} %w0,%src,off(%arena) */ EMIT6_DISP_LH(0xeb000000, is32 ? 0x0014 : 0x0030, - REG_W0, src_reg, dst_reg, off); + REG_W0, src_reg, probe.arena_reg, off); + bpf_jit_probe_emit_nop(jit, &probe); /* brc 4,0b */ - EMIT4_PCREL_RIC(0xa7040000, 4, jit->prg - 6); + EMIT4_PCREL_RIC(0xa7040000, 4, loop_start); /* {llgfr|lgr} %src,%w0 */ EMIT4(is32 ? 0xb9160000 : 0xb9040000, src_reg, REG_W0); + /* Both probes should land here on exception. */ + err = bpf_jit_probe_post(jit, fp, &load_probe); + if (err < 0) + return err; + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; if (is32 && insn_is_zext(&insn[1])) insn_count = 2; break; + } case BPF_CMPXCHG: - /* 0: {csy|csg} %b0,%src,off(%dst) */ + bpf_jit_probe_atomic_pre(jit, insn, &probe); + /* 0: {csy|csg} %b0,%src,off(%arena) */ EMIT6_DISP_LH(0xeb000000, is32 ? 0x0014 : 0x0030, - BPF_REG_0, src_reg, dst_reg, off); + BPF_REG_0, src_reg, + probe.arena_reg, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; break; default: pr_err("Unknown atomic operation %02x\n", insn->imm); @@ -1488,51 +1668,87 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, */ case BPF_LDX | BPF_MEM | BPF_B: /* dst = *(u8 *)(ul) (src + off) */ case BPF_LDX | BPF_PROBE_MEM | BPF_B: - /* llgc %dst,0(off,%src) */ - EMIT6_DISP_LH(0xe3000000, 0x0090, dst_reg, src_reg, REG_0, off); + case BPF_LDX | BPF_PROBE_MEM32 | BPF_B: + bpf_jit_probe_load_pre(jit, insn, &probe); + /* llgc %dst,off(%src,%arena) */ + EMIT6_DISP_LH(0xe3000000, 0x0090, dst_reg, src_reg, + probe.arena_reg, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; jit->seen |= SEEN_MEM; if (insn_is_zext(&insn[1])) insn_count = 2; break; case BPF_LDX | BPF_MEMSX | BPF_B: /* dst = *(s8 *)(ul) (src + off) */ case BPF_LDX | BPF_PROBE_MEMSX | BPF_B: - /* lgb %dst,0(off,%src) */ + bpf_jit_probe_load_pre(jit, insn, &probe); + /* lgb %dst,off(%src) */ EMIT6_DISP_LH(0xe3000000, 0x0077, dst_reg, src_reg, REG_0, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; jit->seen |= SEEN_MEM; break; case BPF_LDX | BPF_MEM | BPF_H: /* dst = *(u16 *)(ul) (src + off) */ case BPF_LDX | BPF_PROBE_MEM | BPF_H: - /* llgh %dst,0(off,%src) */ - EMIT6_DISP_LH(0xe3000000, 0x0091, dst_reg, src_reg, REG_0, off); + case BPF_LDX | BPF_PROBE_MEM32 | BPF_H: + bpf_jit_probe_load_pre(jit, insn, &probe); + /* llgh %dst,off(%src,%arena) */ + EMIT6_DISP_LH(0xe3000000, 0x0091, dst_reg, src_reg, + probe.arena_reg, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; jit->seen |= SEEN_MEM; if (insn_is_zext(&insn[1])) insn_count = 2; break; case BPF_LDX | BPF_MEMSX | BPF_H: /* dst = *(s16 *)(ul) (src + off) */ case BPF_LDX | BPF_PROBE_MEMSX | BPF_H: - /* lgh %dst,0(off,%src) */ + bpf_jit_probe_load_pre(jit, insn, &probe); + /* lgh %dst,off(%src) */ EMIT6_DISP_LH(0xe3000000, 0x0015, dst_reg, src_reg, REG_0, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; jit->seen |= SEEN_MEM; break; case BPF_LDX | BPF_MEM | BPF_W: /* dst = *(u32 *)(ul) (src + off) */ case BPF_LDX | BPF_PROBE_MEM | BPF_W: + case BPF_LDX | BPF_PROBE_MEM32 | BPF_W: + bpf_jit_probe_load_pre(jit, insn, &probe); /* llgf %dst,off(%src) */ jit->seen |= SEEN_MEM; - EMIT6_DISP_LH(0xe3000000, 0x0016, dst_reg, src_reg, REG_0, off); + EMIT6_DISP_LH(0xe3000000, 0x0016, dst_reg, src_reg, + probe.arena_reg, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; if (insn_is_zext(&insn[1])) insn_count = 2; break; case BPF_LDX | BPF_MEMSX | BPF_W: /* dst = *(s32 *)(ul) (src + off) */ case BPF_LDX | BPF_PROBE_MEMSX | BPF_W: + bpf_jit_probe_load_pre(jit, insn, &probe); /* lgf %dst,off(%src) */ jit->seen |= SEEN_MEM; EMIT6_DISP_LH(0xe3000000, 0x0014, dst_reg, src_reg, REG_0, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; break; case BPF_LDX | BPF_MEM | BPF_DW: /* dst = *(u64 *)(ul) (src + off) */ case BPF_LDX | BPF_PROBE_MEM | BPF_DW: - /* lg %dst,0(off,%src) */ + case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW: + bpf_jit_probe_load_pre(jit, insn, &probe); + /* lg %dst,off(%src,%arena) */ jit->seen |= SEEN_MEM; - EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, src_reg, REG_0, off); + EMIT6_DISP_LH(0xe3000000, 0x0004, dst_reg, src_reg, + probe.arena_reg, off); + err = bpf_jit_probe_post(jit, fp, &probe); + if (err < 0) + return err; break; /* * BPF_JMP / CALL @@ -1647,7 +1863,7 @@ static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, /* * Restore registers before calling function */ - save_restore_regs(jit, REGS_RESTORE, stack_depth); + save_restore_regs(jit, REGS_RESTORE, stack_depth, 0); /* * goto *(prog->bpf_func + tail_call_start); @@ -1897,22 +2113,6 @@ branch_oc: return -1; } - if (probe_prg != -1) { - /* - * Handlers of certain exceptions leave psw.addr pointing to - * the instruction directly after the failing one. Therefore, - * create two exception table entries and also add a nop in - * case two probing instructions come directly after each - * other. - */ - nop_prg = jit->prg; - /* bcr 0,%0 */ - _EMIT2(0x0700); - err = bpf_jit_probe_mem(jit, fp, probe_prg, nop_prg); - if (err < 0) - return err; - } - return insn_count; } @@ -1958,12 +2158,18 @@ static int bpf_jit_prog(struct bpf_jit *jit, struct bpf_prog *fp, bool extra_pass, u32 stack_depth) { int i, insn_count, lit32_size, lit64_size; + u64 kern_arena; jit->lit32 = jit->lit32_start; jit->lit64 = jit->lit64_start; jit->prg = 0; jit->excnt = 0; + kern_arena = bpf_arena_get_kern_vm_start(fp->aux->arena); + if (kern_arena) + jit->kern_arena = _EMIT_CONST_U64(kern_arena); + jit->user_arena = bpf_arena_get_user_vm_start(fp->aux->arena); + bpf_jit_prologue(jit, fp, stack_depth); if (bpf_set_addr(jit, 0) < 0) return -1; @@ -2011,9 +2217,25 @@ static struct bpf_binary_header *bpf_jit_alloc(struct bpf_jit *jit, struct bpf_prog *fp) { struct bpf_binary_header *header; + struct bpf_insn *insn; u32 extable_size; u32 code_size; + int i; + for (i = 0; i < fp->len; i++) { + insn = &fp->insnsi[i]; + + if (BPF_CLASS(insn->code) == BPF_STX && + BPF_MODE(insn->code) == BPF_PROBE_ATOMIC && + (BPF_SIZE(insn->code) == BPF_DW || + BPF_SIZE(insn->code) == BPF_W) && + insn->imm == BPF_XCHG) + /* + * bpf_jit_insn() emits a load and a compare-and-swap, + * both of which need to be probed. + */ + fp->aux->num_exentries += 1; + } /* We need two entries per insn. */ fp->aux->num_exentries *= 2; @@ -2689,3 +2911,52 @@ bool bpf_jit_supports_subprog_tailcalls(void) { return true; } + +bool bpf_jit_supports_arena(void) +{ + return true; +} + +bool bpf_jit_supports_insn(struct bpf_insn *insn, bool in_arena) +{ + /* + * Currently the verifier uses this function only to check which + * atomic stores to arena are supported, and they all are. + */ + return true; +} + +bool bpf_jit_supports_exceptions(void) +{ + /* + * Exceptions require unwinding support, which is always available, + * because the kernel is always built with backchain. + */ + return true; +} + +void arch_bpf_stack_walk(bool (*consume_fn)(void *, u64, u64, u64), + void *cookie) +{ + unsigned long addr, prev_addr = 0; + struct unwind_state state; + + unwind_for_each_frame(&state, NULL, NULL, 0) { + addr = unwind_get_return_address(&state); + if (!addr) + break; + /* + * addr is a return address and state.sp is the value of %r15 + * at this address. exception_cb needs %r15 at entry to the + * function containing addr, so take the next state.sp. + * + * There is no bp, and the exception_cb prog does not need one + * to perform a quasi-longjmp. The common code requires a + * non-zero bp, so pass sp there as well. + */ + if (prev_addr && !consume_fn(cookie, prev_addr, state.sp, + state.sp)) + break; + prev_addr = addr; + } +} diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index 5159c7a22922..d25d81c8ecc0 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -1234,13 +1234,11 @@ bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs) } static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt, - bool *regs_used, bool *tail_call_seen) + bool *regs_used) { int i; for (i = 1; i <= insn_cnt; i++, insn++) { - if (insn->code == (BPF_JMP | BPF_TAIL_CALL)) - *tail_call_seen = true; if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6) regs_used[0] = true; if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7) @@ -1324,7 +1322,6 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image struct bpf_insn *insn = bpf_prog->insnsi; bool callee_regs_used[4] = {}; int insn_cnt = bpf_prog->len; - bool tail_call_seen = false; bool seen_exit = false; u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY]; u64 arena_vm_start, user_vm_start; @@ -1336,11 +1333,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image arena_vm_start = bpf_arena_get_kern_vm_start(bpf_prog->aux->arena); user_vm_start = bpf_arena_get_user_vm_start(bpf_prog->aux->arena); - detect_reg_usage(insn, insn_cnt, callee_regs_used, - &tail_call_seen); - - /* tail call's presence in current prog implies it is reachable */ - tail_call_reachable |= tail_call_seen; + detect_reg_usage(insn, insn_cnt, callee_regs_used); emit_prologue(&prog, bpf_prog->aux->stack_depth, bpf_prog_was_classic(bpf_prog), tail_call_reachable, @@ -3363,7 +3356,7 @@ out_image: * * Both cases are serious bugs and justify WARN_ON. */ - if (WARN_ON(bpf_jit_binary_pack_finalize(prog, header, rw_header))) { + if (WARN_ON(bpf_jit_binary_pack_finalize(header, rw_header))) { /* header has been freed */ header = NULL; goto out_image; @@ -3442,7 +3435,7 @@ void bpf_jit_free(struct bpf_prog *prog) * before freeing it. */ if (jit_data) { - bpf_jit_binary_pack_finalize(prog, jit_data->header, + bpf_jit_binary_pack_finalize(jit_data->header, jit_data->rw_header); kvfree(jit_data->addrs); kfree(jit_data); diff --git a/drivers/base/Makefile b/drivers/base/Makefile index 3079bfe53d04..7fb21768ca36 100644 --- a/drivers/base/Makefile +++ b/drivers/base/Makefile @@ -16,6 +16,7 @@ obj-$(CONFIG_NUMA) += node.o obj-$(CONFIG_MEMORY_HOTPLUG) += memory.o ifeq ($(CONFIG_SYSFS),y) obj-$(CONFIG_MODULES) += module.o +obj-$(CONFIG_AUXILIARY_BUS) += auxiliary_sysfs.o endif obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o obj-$(CONFIG_REGMAP) += regmap/ diff --git a/drivers/base/auxiliary.c b/drivers/base/auxiliary.c index d3a2c40c2f12..3f01f4ec69e5 100644 --- a/drivers/base/auxiliary.c +++ b/drivers/base/auxiliary.c @@ -287,6 +287,7 @@ int auxiliary_device_init(struct auxiliary_device *auxdev) dev->bus = &auxiliary_bus_type; device_initialize(&auxdev->dev); + mutex_init(&auxdev->sysfs.lock); return 0; } EXPORT_SYMBOL_GPL(auxiliary_device_init); diff --git a/drivers/base/auxiliary_sysfs.c b/drivers/base/auxiliary_sysfs.c new file mode 100644 index 000000000000..754f21730afd --- /dev/null +++ b/drivers/base/auxiliary_sysfs.c @@ -0,0 +1,113 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES + */ + +#include +#include + +#define AUXILIARY_MAX_IRQ_NAME 11 + +struct auxiliary_irq_info { + struct device_attribute sysfs_attr; + char name[AUXILIARY_MAX_IRQ_NAME]; +}; + +static struct attribute *auxiliary_irq_attrs[] = { + NULL +}; + +static const struct attribute_group auxiliary_irqs_group = { + .name = "irqs", + .attrs = auxiliary_irq_attrs, +}; + +static int auxiliary_irq_dir_prepare(struct auxiliary_device *auxdev) +{ + int ret = 0; + + guard(mutex)(&auxdev->sysfs.lock); + if (auxdev->sysfs.irq_dir_exists) + return 0; + + ret = devm_device_add_group(&auxdev->dev, &auxiliary_irqs_group); + if (ret) + return ret; + + auxdev->sysfs.irq_dir_exists = true; + xa_init(&auxdev->sysfs.irqs); + return 0; +} + +/** + * auxiliary_device_sysfs_irq_add - add a sysfs entry for the given IRQ + * @auxdev: auxiliary bus device to add the sysfs entry. + * @irq: The associated interrupt number. + * + * This function should be called after auxiliary device have successfully + * received the irq. + * The driver is responsible to add a unique irq for the auxiliary device. The + * driver can invoke this function from multiple thread context safely for + * unique irqs of the auxiliary devices. The driver must not invoke this API + * multiple times if the irq is already added previously. + * + * Return: zero on success or an error code on failure. + */ +int auxiliary_device_sysfs_irq_add(struct auxiliary_device *auxdev, int irq) +{ + struct auxiliary_irq_info *info __free(kfree) = NULL; + struct device *dev = &auxdev->dev; + int ret; + + ret = auxiliary_irq_dir_prepare(auxdev); + if (ret) + return ret; + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + sysfs_attr_init(&info->sysfs_attr.attr); + snprintf(info->name, AUXILIARY_MAX_IRQ_NAME, "%d", irq); + + ret = xa_insert(&auxdev->sysfs.irqs, irq, info, GFP_KERNEL); + if (ret) + return ret; + + info->sysfs_attr.attr.name = info->name; + ret = sysfs_add_file_to_group(&dev->kobj, &info->sysfs_attr.attr, + auxiliary_irqs_group.name); + if (ret) + goto sysfs_add_err; + + xa_store(&auxdev->sysfs.irqs, irq, no_free_ptr(info), GFP_KERNEL); + return 0; + +sysfs_add_err: + xa_erase(&auxdev->sysfs.irqs, irq); + return ret; +} +EXPORT_SYMBOL_GPL(auxiliary_device_sysfs_irq_add); + +/** + * auxiliary_device_sysfs_irq_remove - remove a sysfs entry for the given IRQ + * @auxdev: auxiliary bus device to add the sysfs entry. + * @irq: the IRQ to remove. + * + * This function should be called to remove an IRQ sysfs entry. + * The driver must invoke this API when IRQ is released by the device. + */ +void auxiliary_device_sysfs_irq_remove(struct auxiliary_device *auxdev, int irq) +{ + struct auxiliary_irq_info *info __free(kfree) = xa_load(&auxdev->sysfs.irqs, irq); + struct device *dev = &auxdev->dev; + + if (!info) { + dev_err(&auxdev->dev, "IRQ %d doesn't exist\n", irq); + return; + } + sysfs_remove_file_from_group(&dev->kobj, &info->sysfs_attr.attr, + auxiliary_irqs_group.name); + xa_erase(&auxdev->sysfs.irqs, irq); +} +EXPORT_SYMBOL_GPL(auxiliary_device_sysfs_irq_remove); diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index 0b5f218ac505..90a94a111e67 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -105,6 +105,7 @@ config BT_HCIUART tristate "HCI UART driver" depends on SERIAL_DEV_BUS || !SERIAL_DEV_BUS depends on NVMEM || !NVMEM + depends on POWER_SEQUENCING || !POWER_SEQUENCING depends on TTY help Bluetooth HCI UART driver. @@ -287,12 +288,12 @@ config BT_HCIBCM203X config BT_HCIBCM4377 - tristate "HCI BCM4377/4378/4387 PCIe driver" + tristate "HCI BCM4377/4378/4387/4388 PCIe driver" depends on PCI select FW_LOADER help - Support for Broadcom BCM4377/4378/4387 Bluetooth chipsets attached via - PCIe. These are usually found in Apple machines. + Support for Broadcom BCM4377/4378/4387/4388 Bluetooth chipsets + attached via PCIe. These are usually found in Apple machines. Say Y here to compile support for HCI BCM4377 family devices into the kernel or say M to compile it as module (hci_bcm4377). diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c index 0c855c3ee1c1..e7a612504ab1 100644 --- a/drivers/bluetooth/btintel.c +++ b/drivers/bluetooth/btintel.c @@ -26,21 +26,11 @@ #define ECDSA_OFFSET 644 #define ECDSA_HEADER_LEN 320 -#define BTINTEL_PPAG_NAME "PPAG" - enum { DSM_SET_WDISABLE2_DELAY = 1, DSM_SET_RESET_METHOD = 3, }; -/* structure to store the PPAG data read from ACPI table */ -struct btintel_ppag { - u32 domain; - u32 mode; - acpi_status status; - struct hci_dev *hdev; -}; - #define CMD_WRITE_BOOT_PARAMS 0xfc0e struct cmd_write_boot_params { __le32 boot_addr; @@ -482,6 +472,7 @@ int btintel_version_info_tlv(struct hci_dev *hdev, case 0x19: /* Slr-F */ case 0x1b: /* Mgr */ case 0x1c: /* Gale Peak (GaP) */ + case 0x1d: /* BlazarU (BzrU) */ case 0x1e: /* BlazarI (Bzr) */ break; default: @@ -641,6 +632,10 @@ int btintel_parse_version_tlv(struct hci_dev *hdev, case INTEL_TLV_GIT_SHA1: version->git_sha1 = get_unaligned_le32(tlv->val); break; + case INTEL_TLV_FW_ID: + snprintf(version->fw_id, sizeof(version->fw_id), + "%s", tlv->val); + break; default: /* Ignore rest of information */ break; @@ -1324,65 +1319,6 @@ static int btintel_read_debug_features(struct hci_dev *hdev, return 0; } -static acpi_status btintel_ppag_callback(acpi_handle handle, u32 lvl, void *data, - void **ret) -{ - acpi_status status; - size_t len; - struct btintel_ppag *ppag = data; - union acpi_object *p, *elements; - struct acpi_buffer string = {ACPI_ALLOCATE_BUFFER, NULL}; - struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; - struct hci_dev *hdev = ppag->hdev; - - status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); - if (ACPI_FAILURE(status)) { - bt_dev_warn(hdev, "PPAG-BT: ACPI Failure: %s", acpi_format_exception(status)); - return status; - } - - len = strlen(string.pointer); - if (len < strlen(BTINTEL_PPAG_NAME)) { - kfree(string.pointer); - return AE_OK; - } - - if (strncmp((char *)string.pointer + len - 4, BTINTEL_PPAG_NAME, 4)) { - kfree(string.pointer); - return AE_OK; - } - kfree(string.pointer); - - status = acpi_evaluate_object(handle, NULL, NULL, &buffer); - if (ACPI_FAILURE(status)) { - ppag->status = status; - bt_dev_warn(hdev, "PPAG-BT: ACPI Failure: %s", acpi_format_exception(status)); - return status; - } - - p = buffer.pointer; - ppag = (struct btintel_ppag *)data; - - if (p->type != ACPI_TYPE_PACKAGE || p->package.count != 2) { - kfree(buffer.pointer); - bt_dev_warn(hdev, "PPAG-BT: Invalid object type: %d or package count: %d", - p->type, p->package.count); - ppag->status = AE_ERROR; - return AE_ERROR; - } - - elements = p->package.elements; - - /* PPAG table is located at element[1] */ - p = &elements[1]; - - ppag->domain = (u32)p->package.elements[0].integer.value; - ppag->mode = (u32)p->package.elements[1].integer.value; - ppag->status = AE_OK; - kfree(buffer.pointer); - return AE_CTRL_TERMINATE; -} - static int btintel_set_debug_features(struct hci_dev *hdev, const struct intel_debug_features *features) { @@ -2202,30 +2138,61 @@ static void btintel_get_fw_name_tlv(const struct intel_version_tlv *ver, const char *suffix) { const char *format; - /* The firmware file name for new generation controllers will be - * ibt-- - */ - switch (ver->cnvi_top & 0xfff) { + u32 cnvi, cnvr; + + cnvi = INTEL_CNVX_TOP_PACK_SWAB(INTEL_CNVX_TOP_TYPE(ver->cnvi_top), + INTEL_CNVX_TOP_STEP(ver->cnvi_top)); + + cnvr = INTEL_CNVX_TOP_PACK_SWAB(INTEL_CNVX_TOP_TYPE(ver->cnvr_top), + INTEL_CNVX_TOP_STEP(ver->cnvr_top)); + /* Only Blazar product supports downloading of intermediate loader * image */ - case BTINTEL_CNVI_BLAZARI: - if (ver->img_type == BTINTEL_IMG_BOOTLOADER) - format = "intel/ibt-%04x-%04x-iml.%s"; - else - format = "intel/ibt-%04x-%04x.%s"; - break; - default: - format = "intel/ibt-%04x-%04x.%s"; - break; - } + if (INTEL_HW_VARIANT(ver->cnvi_bt) >= 0x1e) { + u8 zero[BTINTEL_FWID_MAXLEN]; - snprintf(fw_name, len, format, - INTEL_CNVX_TOP_PACK_SWAB(INTEL_CNVX_TOP_TYPE(ver->cnvi_top), - INTEL_CNVX_TOP_STEP(ver->cnvi_top)), - INTEL_CNVX_TOP_PACK_SWAB(INTEL_CNVX_TOP_TYPE(ver->cnvr_top), - INTEL_CNVX_TOP_STEP(ver->cnvr_top)), - suffix); + if (ver->img_type == BTINTEL_IMG_BOOTLOADER) { + format = "intel/ibt-%04x-%04x-iml.%s"; + snprintf(fw_name, len, format, cnvi, cnvr, suffix); + return; + } + + memset(zero, 0, sizeof(zero)); + + /* ibt-- */ + if (memcmp(ver->fw_id, zero, sizeof(zero))) { + format = "intel/ibt-%04x-%04x-%s.%s"; + snprintf(fw_name, len, format, cnvi, cnvr, + ver->fw_id, suffix); + return; + } + /* If firmware id is not present, fallback to legacy naming + * convention + */ + } + /* Fallback to legacy naming convention for other controllers + * ibt-- + */ + format = "intel/ibt-%04x-%04x.%s"; + snprintf(fw_name, len, format, cnvi, cnvr, suffix); +} + +static void btintel_get_iml_tlv(const struct intel_version_tlv *ver, + char *fw_name, size_t len, + const char *suffix) +{ + const char *format; + u32 cnvi, cnvr; + + cnvi = INTEL_CNVX_TOP_PACK_SWAB(INTEL_CNVX_TOP_TYPE(ver->cnvi_top), + INTEL_CNVX_TOP_STEP(ver->cnvi_top)); + + cnvr = INTEL_CNVX_TOP_PACK_SWAB(INTEL_CNVX_TOP_TYPE(ver->cnvr_top), + INTEL_CNVX_TOP_STEP(ver->cnvr_top)); + + format = "intel/ibt-%04x-%04x-iml.%s"; + snprintf(fw_name, len, format, cnvi, cnvr, suffix); } static int btintel_prepare_fw_download_tlv(struct hci_dev *hdev, @@ -2233,7 +2200,7 @@ static int btintel_prepare_fw_download_tlv(struct hci_dev *hdev, u32 *boot_param) { const struct firmware *fw; - char fwname[64]; + char fwname[128]; int err; ktime_t calltime; @@ -2268,7 +2235,20 @@ static int btintel_prepare_fw_download_tlv(struct hci_dev *hdev, } } - btintel_get_fw_name_tlv(ver, fwname, sizeof(fwname), "sfi"); + if (ver->img_type == BTINTEL_IMG_OP) { + /* Controller running OP image. In case of FW downgrade, + * FWID TLV may not be present and driver may attempt to load + * firmware image which doesn't exist. Lets compare the version + * of IML image + */ + if (INTEL_HW_VARIANT(ver->cnvi_bt) >= 0x1e) + btintel_get_iml_tlv(ver, fwname, sizeof(fwname), "sfi"); + else + btintel_get_fw_name_tlv(ver, fwname, sizeof(fwname), "sfi"); + } else { + btintel_get_fw_name_tlv(ver, fwname, sizeof(fwname), "sfi"); + } + err = firmware_request_nowarn(&fw, fwname, &hdev->dev); if (err < 0) { if (!btintel_test_flag(hdev, INTEL_BOOTLOADER)) { @@ -2427,10 +2407,13 @@ error: static void btintel_set_ppag(struct hci_dev *hdev, struct intel_version_tlv *ver) { - struct btintel_ppag ppag; struct sk_buff *skb; struct hci_ppag_enable_cmd ppag_cmd; acpi_handle handle; + struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; + union acpi_object *p, *elements; + u32 domain, mode; + acpi_status status; /* PPAG is not supported if CRF is HrP2, Jfp2, JfP1 */ switch (ver->cnvr_top & 0xFFF) { @@ -2448,22 +2431,34 @@ static void btintel_set_ppag(struct hci_dev *hdev, struct intel_version_tlv *ver return; } - memset(&ppag, 0, sizeof(ppag)); - - ppag.hdev = hdev; - ppag.status = AE_NOT_FOUND; - acpi_walk_namespace(ACPI_TYPE_PACKAGE, handle, 1, NULL, - btintel_ppag_callback, &ppag, NULL); - - if (ACPI_FAILURE(ppag.status)) { - if (ppag.status == AE_NOT_FOUND) { + status = acpi_evaluate_object(handle, "PPAG", NULL, &buffer); + if (ACPI_FAILURE(status)) { + if (status == AE_NOT_FOUND) { bt_dev_dbg(hdev, "PPAG-BT: ACPI entry not found"); return; } + bt_dev_warn(hdev, "PPAG-BT: ACPI Failure: %s", acpi_format_exception(status)); return; } - if (ppag.domain != 0x12) { + p = buffer.pointer; + if (p->type != ACPI_TYPE_PACKAGE || p->package.count != 2) { + bt_dev_warn(hdev, "PPAG-BT: Invalid object type: %d or package count: %d", + p->type, p->package.count); + kfree(buffer.pointer); + return; + } + + elements = p->package.elements; + + /* PPAG table is located at element[1] */ + p = &elements[1]; + + domain = (u32)p->package.elements[0].integer.value; + mode = (u32)p->package.elements[1].integer.value; + kfree(buffer.pointer); + + if (domain != 0x12) { bt_dev_dbg(hdev, "PPAG-BT: Bluetooth domain is disabled in ACPI firmware"); return; } @@ -2474,19 +2469,22 @@ static void btintel_set_ppag(struct hci_dev *hdev, struct intel_version_tlv *ver * BIT 1 : 0 Disabled in China * 1 Enabled in China */ - if ((ppag.mode & 0x01) != BIT(0) && (ppag.mode & 0x02) != BIT(1)) { - bt_dev_dbg(hdev, "PPAG-BT: EU, China mode are disabled in CB/BIOS"); + mode &= 0x03; + + if (!mode) { + bt_dev_dbg(hdev, "PPAG-BT: EU, China mode are disabled in BIOS"); return; } - ppag_cmd.ppag_enable_flags = cpu_to_le32(ppag.mode); + ppag_cmd.ppag_enable_flags = cpu_to_le32(mode); - skb = __hci_cmd_sync(hdev, INTEL_OP_PPAG_CMD, sizeof(ppag_cmd), &ppag_cmd, HCI_CMD_TIMEOUT); + skb = __hci_cmd_sync(hdev, INTEL_OP_PPAG_CMD, sizeof(ppag_cmd), + &ppag_cmd, HCI_CMD_TIMEOUT); if (IS_ERR(skb)) { bt_dev_warn(hdev, "Failed to send PPAG Enable (%ld)", PTR_ERR(skb)); return; } - bt_dev_info(hdev, "PPAG-BT: Enabled (Mode %d)", ppag.mode); + bt_dev_info(hdev, "PPAG-BT: Enabled (Mode %d)", mode); kfree_skb(skb); } @@ -2600,6 +2598,24 @@ static void btintel_set_dsm_reset_method(struct hci_dev *hdev, data->acpi_reset_method = btintel_acpi_reset_method; } +#define BTINTEL_ISODATA_HANDLE_BASE 0x900 + +static u8 btintel_classify_pkt_type(struct hci_dev *hdev, struct sk_buff *skb) +{ + /* + * Distinguish ISO data packets form ACL data packets + * based on their connection handle value range. + */ + if (hci_skb_pkt_type(skb) == HCI_ACLDATA_PKT) { + __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle); + + if (hci_handle(handle) >= BTINTEL_ISODATA_HANDLE_BASE) + return HCI_ISODATA_PKT; + } + + return hci_skb_pkt_type(skb); +} + int btintel_bootloader_setup_tlv(struct hci_dev *hdev, struct intel_version_tlv *ver) { @@ -2635,7 +2651,7 @@ int btintel_bootloader_setup_tlv(struct hci_dev *hdev, return err; /* If image type returned is BTINTEL_IMG_IML, then controller supports - * intermediae loader image + * intermediate loader image */ if (ver->img_type == BTINTEL_IMG_IML) { err = btintel_prepare_fw_download_tlv(hdev, ver, &boot_param); @@ -2703,6 +2719,7 @@ void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant) case 0x19: case 0x1b: case 0x1c: + case 0x1d: case 0x1e: hci_set_msft_opcode(hdev, 0xFC1E); break; @@ -2713,7 +2730,7 @@ void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant) } EXPORT_SYMBOL_GPL(btintel_set_msft_opcode); -static void btintel_print_fseq_info(struct hci_dev *hdev) +void btintel_print_fseq_info(struct hci_dev *hdev) { struct sk_buff *skb; u8 *p; @@ -2825,6 +2842,7 @@ static void btintel_print_fseq_info(struct hci_dev *hdev) kfree_skb(skb); } +EXPORT_SYMBOL_GPL(btintel_print_fseq_info); static int btintel_setup_combined(struct hci_dev *hdev) { @@ -3039,11 +3057,15 @@ static int btintel_setup_combined(struct hci_dev *hdev) err = btintel_bootloader_setup(hdev, &ver); btintel_register_devcoredump_support(hdev); break; + case 0x18: /* GfP2 */ + case 0x1c: /* GaP */ + /* Re-classify packet type for controllers with LE audio */ + hdev->classify_pkt_type = btintel_classify_pkt_type; + fallthrough; case 0x17: - case 0x18: case 0x19: case 0x1b: - case 0x1c: + case 0x1d: case 0x1e: /* Display version information of TLV type */ btintel_version_info_tlv(hdev, &ver_tlv); diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h index b5fea735e260..aa70e4c27416 100644 --- a/drivers/bluetooth/btintel.h +++ b/drivers/bluetooth/btintel.h @@ -42,7 +42,8 @@ enum { INTEL_TLV_SBE_TYPE, INTEL_TLV_OTP_BDADDR, INTEL_TLV_UNLOCKED_STATE, - INTEL_TLV_GIT_SHA1 + INTEL_TLV_GIT_SHA1, + INTEL_TLV_FW_ID = 0x50 }; struct intel_tlv { @@ -57,6 +58,8 @@ struct intel_tlv { #define BTINTEL_IMG_IML 0x02 /* Intermediate image */ #define BTINTEL_IMG_OP 0x03 /* Operational image */ +#define BTINTEL_FWID_MAXLEN 64 + struct intel_version_tlv { u32 cnvi_top; u32 cnvr_top; @@ -77,6 +80,7 @@ struct intel_version_tlv { u8 limited_cce; u8 sbe_type; u32 git_sha1; + u8 fw_id[BTINTEL_FWID_MAXLEN]; bdaddr_t otp_bd_addr; }; @@ -244,6 +248,7 @@ int btintel_bootloader_setup_tlv(struct hci_dev *hdev, struct intel_version_tlv *ver); int btintel_shutdown_combined(struct hci_dev *hdev); void btintel_hw_error(struct hci_dev *hdev, u8 code); +void btintel_print_fseq_info(struct hci_dev *hdev); #else static inline int btintel_check_bdaddr(struct hci_dev *hdev) @@ -373,4 +378,8 @@ static inline int btintel_shutdown_combined(struct hci_dev *hdev) static inline void btintel_hw_error(struct hci_dev *hdev, u8 code) { } + +static inline void btintel_print_fseq_info(struct hci_dev *hdev) +{ +} #endif diff --git a/drivers/bluetooth/btintel_pcie.c b/drivers/bluetooth/btintel_pcie.c index dd3c0626c72d..0d1a0415557b 100644 --- a/drivers/bluetooth/btintel_pcie.c +++ b/drivers/bluetooth/btintel_pcie.c @@ -797,7 +797,6 @@ static int btintel_pcie_setup_txq_bufs(struct btintel_pcie_data *data, kfree(txq->bufs); return -ENOMEM; } - memset(txq->buf_v_addr, 0, txq->count * BTINTEL_PCIE_BUFFER_SIZE); /* Setup the allocated DMA buffer to bufs. Each data_buf should * have virtual address and physical address @@ -842,7 +841,6 @@ static int btintel_pcie_setup_rxq_bufs(struct btintel_pcie_data *data, kfree(rxq->bufs); return -ENOMEM; } - memset(rxq->buf_v_addr, 0, rxq->count * BTINTEL_PCIE_BUFFER_SIZE); /* Setup the allocated DMA buffer to bufs. Each data_buf should * have virtual address and physical address @@ -1197,9 +1195,11 @@ static int btintel_pcie_setup(struct hci_dev *hdev) bt_dev_err(hdev, "Unsupported Intel hw variant (%u)", INTEL_HW_VARIANT(ver_tlv.cnvi_bt)); err = -EINVAL; + goto exit_error; break; } + btintel_print_fseq_info(hdev); exit_error: kfree_skb(skb); @@ -1327,6 +1327,12 @@ static void btintel_pcie_remove(struct pci_dev *pdev) data = pci_get_drvdata(pdev); btintel_pcie_reset_bt(data); + for (int i = 0; i < data->alloc_vecs; i++) { + struct msix_entry *msix_entry; + + msix_entry = &data->msix_entries[i]; + free_irq(msix_entry->vector, msix_entry); + } pci_free_irq_vectors(pdev); diff --git a/drivers/bluetooth/btmtk.c b/drivers/bluetooth/btmtk.c index 812fd2a8f853..b7c348687a77 100644 --- a/drivers/bluetooth/btmtk.c +++ b/drivers/bluetooth/btmtk.c @@ -4,6 +4,9 @@ */ #include #include +#include +#include +#include #include #include @@ -19,6 +22,9 @@ #define MTK_SEC_MAP_COMMON_SIZE 12 #define MTK_SEC_MAP_NEED_SEND_SIZE 52 +/* It is for mt79xx iso data transmission setting */ +#define MTK_ISO_THRESHOLD 264 + struct btmtk_patch_header { u8 datetime[16]; u8 platform[4]; @@ -64,7 +70,7 @@ static void btmtk_coredump(struct hci_dev *hdev) static void btmtk_coredump_hdr(struct hci_dev *hdev, struct sk_buff *skb) { - struct btmediatek_data *data = hci_get_priv(hdev); + struct btmtk_data *data = hci_get_priv(hdev); char buf[80]; snprintf(buf, sizeof(buf), "Controller Name: 0x%X\n", @@ -85,7 +91,7 @@ static void btmtk_coredump_hdr(struct hci_dev *hdev, struct sk_buff *skb) static void btmtk_coredump_notify(struct hci_dev *hdev, int state) { - struct btmediatek_data *data = hci_get_priv(hdev); + struct btmtk_data *data = hci_get_priv(hdev); switch (state) { case HCI_DEVCOREDUMP_IDLE: @@ -103,6 +109,24 @@ static void btmtk_coredump_notify(struct hci_dev *hdev, int state) } } +void btmtk_fw_get_filename(char *buf, size_t size, u32 dev_id, u32 fw_ver, + u32 fw_flavor) +{ + if (dev_id == 0x7925) + snprintf(buf, size, + "mediatek/mt%04x/BT_RAM_CODE_MT%04x_1_%x_hdr.bin", + dev_id & 0xffff, dev_id & 0xffff, (fw_ver & 0xff) + 1); + else if (dev_id == 0x7961 && fw_flavor) + snprintf(buf, size, + "mediatek/BT_RAM_CODE_MT%04x_1a_%x_hdr.bin", + dev_id & 0xffff, (fw_ver & 0xff) + 1); + else + snprintf(buf, size, + "mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin", + dev_id & 0xffff, (fw_ver & 0xff) + 1); +} +EXPORT_SYMBOL_GPL(btmtk_fw_get_filename); + int btmtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwname, wmt_cmd_sync_func_t wmt_cmd_sync) { @@ -337,7 +361,7 @@ EXPORT_SYMBOL_GPL(btmtk_set_bdaddr); void btmtk_reset_sync(struct hci_dev *hdev) { - struct btmediatek_data *reset_work = hci_get_priv(hdev); + struct btmtk_data *reset_work = hci_get_priv(hdev); int err; hci_dev_lock(hdev); @@ -353,7 +377,7 @@ EXPORT_SYMBOL_GPL(btmtk_reset_sync); int btmtk_register_coredump(struct hci_dev *hdev, const char *name, u32 fw_version) { - struct btmediatek_data *data = hci_get_priv(hdev); + struct btmtk_data *data = hci_get_priv(hdev); if (!IS_ENABLED(CONFIG_DEV_COREDUMP)) return -EOPNOTSUPP; @@ -369,7 +393,7 @@ EXPORT_SYMBOL_GPL(btmtk_register_coredump); int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb) { - struct btmediatek_data *data = hci_get_priv(hdev); + struct btmtk_data *data = hci_get_priv(hdev); int err; if (!IS_ENABLED(CONFIG_DEV_COREDUMP)) { @@ -413,6 +437,1057 @@ int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb) } EXPORT_SYMBOL_GPL(btmtk_process_coredump); +static void btmtk_usb_wmt_recv(struct urb *urb) +{ + struct hci_dev *hdev = urb->context; + struct btmtk_data *data = hci_get_priv(hdev); + struct sk_buff *skb; + int err; + + if (urb->status == 0 && urb->actual_length > 0) { + hdev->stat.byte_rx += urb->actual_length; + + /* WMT event shouldn't be fragmented and the size should be + * less than HCI_WMT_MAX_EVENT_SIZE. + */ + skb = bt_skb_alloc(HCI_WMT_MAX_EVENT_SIZE, GFP_ATOMIC); + if (!skb) { + hdev->stat.err_rx++; + kfree(urb->setup_packet); + return; + } + + hci_skb_pkt_type(skb) = HCI_EVENT_PKT; + skb_put_data(skb, urb->transfer_buffer, urb->actual_length); + + /* When someone waits for the WMT event, the skb is being cloned + * and being processed the events from there then. + */ + if (test_bit(BTMTK_TX_WAIT_VND_EVT, &data->flags)) { + data->evt_skb = skb_clone(skb, GFP_ATOMIC); + if (!data->evt_skb) { + kfree_skb(skb); + kfree(urb->setup_packet); + return; + } + } + + err = hci_recv_frame(hdev, skb); + if (err < 0) { + kfree_skb(data->evt_skb); + data->evt_skb = NULL; + kfree(urb->setup_packet); + return; + } + + if (test_and_clear_bit(BTMTK_TX_WAIT_VND_EVT, + &data->flags)) { + /* Barrier to sync with other CPUs */ + smp_mb__after_atomic(); + wake_up_bit(&data->flags, + BTMTK_TX_WAIT_VND_EVT); + } + kfree(urb->setup_packet); + return; + } else if (urb->status == -ENOENT) { + /* Avoid suspend failed when usb_kill_urb */ + return; + } + + usb_mark_last_busy(data->udev); + + /* The URB complete handler is still called with urb->actual_length = 0 + * when the event is not available, so we should keep re-submitting + * URB until WMT event returns, Also, It's necessary to wait some time + * between the two consecutive control URBs to relax the target device + * to generate the event. Otherwise, the WMT event cannot return from + * the device successfully. + */ + udelay(500); + + usb_anchor_urb(urb, data->ctrl_anchor); + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err < 0) { + kfree(urb->setup_packet); + /* -EPERM: urb is being killed; + * -ENODEV: device got disconnected + */ + if (err != -EPERM && err != -ENODEV) + bt_dev_err(hdev, "urb %p failed to resubmit (%d)", + urb, -err); + usb_unanchor_urb(urb); + } +} + +static int btmtk_usb_submit_wmt_recv_urb(struct hci_dev *hdev) +{ + struct btmtk_data *data = hci_get_priv(hdev); + struct usb_ctrlrequest *dr; + unsigned char *buf; + int err, size = 64; + unsigned int pipe; + struct urb *urb; + + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) + return -ENOMEM; + + dr = kmalloc(sizeof(*dr), GFP_KERNEL); + if (!dr) { + usb_free_urb(urb); + return -ENOMEM; + } + + dr->bRequestType = USB_TYPE_VENDOR | USB_DIR_IN; + dr->bRequest = 1; + dr->wIndex = cpu_to_le16(0); + dr->wValue = cpu_to_le16(48); + dr->wLength = cpu_to_le16(size); + + buf = kmalloc(size, GFP_KERNEL); + if (!buf) { + kfree(dr); + usb_free_urb(urb); + return -ENOMEM; + } + + pipe = usb_rcvctrlpipe(data->udev, 0); + + usb_fill_control_urb(urb, data->udev, pipe, (void *)dr, + buf, size, btmtk_usb_wmt_recv, hdev); + + urb->transfer_flags |= URB_FREE_BUFFER; + + usb_anchor_urb(urb, data->ctrl_anchor); + err = usb_submit_urb(urb, GFP_KERNEL); + if (err < 0) { + if (err != -EPERM && err != -ENODEV) + bt_dev_err(hdev, "urb %p submission failed (%d)", + urb, -err); + usb_unanchor_urb(urb); + } + + usb_free_urb(urb); + + return err; +} + +static int btmtk_usb_hci_wmt_sync(struct hci_dev *hdev, + struct btmtk_hci_wmt_params *wmt_params) +{ + struct btmtk_data *data = hci_get_priv(hdev); + struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc; + u32 hlen, status = BTMTK_WMT_INVALID; + struct btmtk_hci_wmt_evt *wmt_evt; + struct btmtk_hci_wmt_cmd *wc; + struct btmtk_wmt_hdr *hdr; + int err; + + /* Send the WMT command and wait until the WMT event returns */ + hlen = sizeof(*hdr) + wmt_params->dlen; + if (hlen > 255) + return -EINVAL; + + wc = kzalloc(hlen, GFP_KERNEL); + if (!wc) + return -ENOMEM; + + hdr = &wc->hdr; + hdr->dir = 1; + hdr->op = wmt_params->op; + hdr->dlen = cpu_to_le16(wmt_params->dlen + 1); + hdr->flag = wmt_params->flag; + memcpy(wc->data, wmt_params->data, wmt_params->dlen); + + set_bit(BTMTK_TX_WAIT_VND_EVT, &data->flags); + + /* WMT cmd/event doesn't follow up the generic HCI cmd/event handling, + * it needs constantly polling control pipe until the host received the + * WMT event, thus, we should require to specifically acquire PM counter + * on the USB to prevent the interface from entering auto suspended + * while WMT cmd/event in progress. + */ + err = usb_autopm_get_interface(data->intf); + if (err < 0) + goto err_free_wc; + + err = __hci_cmd_send(hdev, 0xfc6f, hlen, wc); + + if (err < 0) { + clear_bit(BTMTK_TX_WAIT_VND_EVT, &data->flags); + usb_autopm_put_interface(data->intf); + goto err_free_wc; + } + + /* Submit control IN URB on demand to process the WMT event */ + err = btmtk_usb_submit_wmt_recv_urb(hdev); + + usb_autopm_put_interface(data->intf); + + if (err < 0) + goto err_free_wc; + + /* The vendor specific WMT commands are all answered by a vendor + * specific event and will have the Command Status or Command + * Complete as with usual HCI command flow control. + * + * After sending the command, wait for BTUSB_TX_WAIT_VND_EVT + * state to be cleared. The driver specific event receive routine + * will clear that state and with that indicate completion of the + * WMT command. + */ + err = wait_on_bit_timeout(&data->flags, BTMTK_TX_WAIT_VND_EVT, + TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT); + if (err == -EINTR) { + bt_dev_err(hdev, "Execution of wmt command interrupted"); + clear_bit(BTMTK_TX_WAIT_VND_EVT, &data->flags); + goto err_free_wc; + } + + if (err) { + bt_dev_err(hdev, "Execution of wmt command timed out"); + clear_bit(BTMTK_TX_WAIT_VND_EVT, &data->flags); + err = -ETIMEDOUT; + goto err_free_wc; + } + + if (data->evt_skb == NULL) + goto err_free_wc; + + /* Parse and handle the return WMT event */ + wmt_evt = (struct btmtk_hci_wmt_evt *)data->evt_skb->data; + if (wmt_evt->whdr.op != hdr->op) { + bt_dev_err(hdev, "Wrong op received %d expected %d", + wmt_evt->whdr.op, hdr->op); + err = -EIO; + goto err_free_skb; + } + + switch (wmt_evt->whdr.op) { + case BTMTK_WMT_SEMAPHORE: + if (wmt_evt->whdr.flag == 2) + status = BTMTK_WMT_PATCH_UNDONE; + else + status = BTMTK_WMT_PATCH_DONE; + break; + case BTMTK_WMT_FUNC_CTRL: + wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt; + if (be16_to_cpu(wmt_evt_funcc->status) == 0x404) + status = BTMTK_WMT_ON_DONE; + else if (be16_to_cpu(wmt_evt_funcc->status) == 0x420) + status = BTMTK_WMT_ON_PROGRESS; + else + status = BTMTK_WMT_ON_UNDONE; + break; + case BTMTK_WMT_PATCH_DWNLD: + if (wmt_evt->whdr.flag == 2) + status = BTMTK_WMT_PATCH_DONE; + else if (wmt_evt->whdr.flag == 1) + status = BTMTK_WMT_PATCH_PROGRESS; + else + status = BTMTK_WMT_PATCH_UNDONE; + break; + } + + if (wmt_params->status) + *wmt_params->status = status; + +err_free_skb: + kfree_skb(data->evt_skb); + data->evt_skb = NULL; +err_free_wc: + kfree(wc); + return err; +} + +static int btmtk_usb_func_query(struct hci_dev *hdev) +{ + struct btmtk_hci_wmt_params wmt_params; + int status, err; + u8 param = 0; + + /* Query whether the function is enabled */ + wmt_params.op = BTMTK_WMT_FUNC_CTRL; + wmt_params.flag = 4; + wmt_params.dlen = sizeof(param); + wmt_params.data = ¶m; + wmt_params.status = &status; + + err = btmtk_usb_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to query function status (%d)", err); + return err; + } + + return status; +} + +static int btmtk_usb_uhw_reg_write(struct hci_dev *hdev, u32 reg, u32 val) +{ + struct btmtk_data *data = hci_get_priv(hdev); + int pipe, err; + void *buf; + + buf = kzalloc(4, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + put_unaligned_le32(val, buf); + + pipe = usb_sndctrlpipe(data->udev, 0); + err = usb_control_msg(data->udev, pipe, 0x02, + 0x5E, + reg >> 16, reg & 0xffff, + buf, 4, USB_CTRL_SET_TIMEOUT); + if (err < 0) + bt_dev_err(hdev, "Failed to write uhw reg(%d)", err); + + kfree(buf); + + return err; +} + +static int btmtk_usb_uhw_reg_read(struct hci_dev *hdev, u32 reg, u32 *val) +{ + struct btmtk_data *data = hci_get_priv(hdev); + int pipe, err; + void *buf; + + buf = kzalloc(4, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pipe = usb_rcvctrlpipe(data->udev, 0); + err = usb_control_msg(data->udev, pipe, 0x01, + 0xDE, + reg >> 16, reg & 0xffff, + buf, 4, USB_CTRL_GET_TIMEOUT); + if (err < 0) { + bt_dev_err(hdev, "Failed to read uhw reg(%d)", err); + goto err_free_buf; + } + + *val = get_unaligned_le32(buf); + bt_dev_dbg(hdev, "reg=%x, value=0x%08x", reg, *val); + +err_free_buf: + kfree(buf); + + return err; +} + +static int btmtk_usb_reg_read(struct hci_dev *hdev, u32 reg, u32 *val) +{ + struct btmtk_data *data = hci_get_priv(hdev); + int pipe, err, size = sizeof(u32); + void *buf; + + buf = kzalloc(size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + pipe = usb_rcvctrlpipe(data->udev, 0); + err = usb_control_msg(data->udev, pipe, 0x63, + USB_TYPE_VENDOR | USB_DIR_IN, + reg >> 16, reg & 0xffff, + buf, size, USB_CTRL_GET_TIMEOUT); + if (err < 0) + goto err_free_buf; + + *val = get_unaligned_le32(buf); + +err_free_buf: + kfree(buf); + + return err; +} + +static int btmtk_usb_id_get(struct hci_dev *hdev, u32 reg, u32 *id) +{ + return btmtk_usb_reg_read(hdev, reg, id); +} + +static u32 btmtk_usb_reset_done(struct hci_dev *hdev) +{ + u32 val = 0; + + btmtk_usb_uhw_reg_read(hdev, MTK_BT_MISC, &val); + + return val & MTK_BT_RST_DONE; +} + +int btmtk_usb_subsys_reset(struct hci_dev *hdev, u32 dev_id) +{ + u32 val; + int err; + + if (dev_id == 0x7922) { + err = btmtk_usb_uhw_reg_read(hdev, MTK_BT_SUBSYS_RST, &val); + if (err < 0) + return err; + val |= 0x00002020; + err = btmtk_usb_uhw_reg_write(hdev, MTK_BT_SUBSYS_RST, val); + if (err < 0) + return err; + err = btmtk_usb_uhw_reg_write(hdev, MTK_EP_RST_OPT, 0x00010001); + if (err < 0) + return err; + err = btmtk_usb_uhw_reg_read(hdev, MTK_BT_SUBSYS_RST, &val); + if (err < 0) + return err; + val |= BIT(0); + err = btmtk_usb_uhw_reg_write(hdev, MTK_BT_SUBSYS_RST, val); + if (err < 0) + return err; + msleep(100); + } else if (dev_id == 0x7925) { + err = btmtk_usb_uhw_reg_read(hdev, MTK_BT_RESET_REG_CONNV3, &val); + if (err < 0) + return err; + val |= (1 << 5); + err = btmtk_usb_uhw_reg_write(hdev, MTK_BT_RESET_REG_CONNV3, val); + if (err < 0) + return err; + err = btmtk_usb_uhw_reg_read(hdev, MTK_BT_RESET_REG_CONNV3, &val); + if (err < 0) + return err; + val &= 0xFFFF00FF; + val |= (1 << 13); + err = btmtk_usb_uhw_reg_write(hdev, MTK_BT_RESET_REG_CONNV3, val); + if (err < 0) + return err; + err = btmtk_usb_uhw_reg_write(hdev, MTK_EP_RST_OPT, 0x00010001); + if (err < 0) + return err; + err = btmtk_usb_uhw_reg_read(hdev, MTK_BT_RESET_REG_CONNV3, &val); + if (err < 0) + return err; + val |= (1 << 0); + err = btmtk_usb_uhw_reg_write(hdev, MTK_BT_RESET_REG_CONNV3, val); + if (err < 0) + return err; + err = btmtk_usb_uhw_reg_write(hdev, MTK_UDMA_INT_STA_BT, 0x000000FF); + if (err < 0) + return err; + err = btmtk_usb_uhw_reg_read(hdev, MTK_UDMA_INT_STA_BT, &val); + if (err < 0) + return err; + err = btmtk_usb_uhw_reg_write(hdev, MTK_UDMA_INT_STA_BT1, 0x000000FF); + if (err < 0) + return err; + err = btmtk_usb_uhw_reg_read(hdev, MTK_UDMA_INT_STA_BT1, &val); + if (err < 0) + return err; + msleep(100); + } else { + /* It's Device EndPoint Reset Option Register */ + bt_dev_dbg(hdev, "Initiating reset mechanism via uhw"); + err = btmtk_usb_uhw_reg_write(hdev, MTK_EP_RST_OPT, MTK_EP_RST_IN_OUT_OPT); + if (err < 0) + return err; + err = btmtk_usb_uhw_reg_read(hdev, MTK_BT_WDT_STATUS, &val); + if (err < 0) + return err; + /* Reset the bluetooth chip via USB interface. */ + err = btmtk_usb_uhw_reg_write(hdev, MTK_BT_SUBSYS_RST, 1); + if (err < 0) + return err; + err = btmtk_usb_uhw_reg_write(hdev, MTK_UDMA_INT_STA_BT, 0x000000FF); + if (err < 0) + return err; + err = btmtk_usb_uhw_reg_read(hdev, MTK_UDMA_INT_STA_BT, &val); + if (err < 0) + return err; + err = btmtk_usb_uhw_reg_write(hdev, MTK_UDMA_INT_STA_BT1, 0x000000FF); + if (err < 0) + return err; + err = btmtk_usb_uhw_reg_read(hdev, MTK_UDMA_INT_STA_BT1, &val); + if (err < 0) + return err; + /* MT7921 need to delay 20ms between toggle reset bit */ + msleep(20); + err = btmtk_usb_uhw_reg_write(hdev, MTK_BT_SUBSYS_RST, 0); + if (err < 0) + return err; + err = btmtk_usb_uhw_reg_read(hdev, MTK_BT_SUBSYS_RST, &val); + if (err < 0) + return err; + } + + err = readx_poll_timeout(btmtk_usb_reset_done, hdev, val, + val & MTK_BT_RST_DONE, 20000, 1000000); + if (err < 0) + bt_dev_err(hdev, "Reset timeout"); + + if (dev_id == 0x7922) { + err = btmtk_usb_uhw_reg_write(hdev, MTK_UDMA_INT_STA_BT, 0x000000FF); + if (err < 0) + return err; + } + + err = btmtk_usb_id_get(hdev, 0x70010200, &val); + if (err < 0 || !val) + bt_dev_err(hdev, "Can't get device id, subsys reset fail."); + + return err; +} +EXPORT_SYMBOL_GPL(btmtk_usb_subsys_reset); + +int btmtk_usb_recv_acl(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct btmtk_data *data = hci_get_priv(hdev); + u16 handle = le16_to_cpu(hci_acl_hdr(skb)->handle); + + switch (handle) { + case 0xfc6f: /* Firmware dump from device */ + /* When the firmware hangs, the device can no longer + * suspend and thus disable auto-suspend. + */ + usb_disable_autosuspend(data->udev); + + /* We need to forward the diagnostic packet to userspace daemon + * for backward compatibility, so we have to clone the packet + * extraly for the in-kernel coredump support. + */ + if (IS_ENABLED(CONFIG_DEV_COREDUMP)) { + struct sk_buff *skb_cd = skb_clone(skb, GFP_ATOMIC); + + if (skb_cd) + btmtk_process_coredump(hdev, skb_cd); + } + + fallthrough; + case 0x05ff: /* Firmware debug logging 1 */ + case 0x05fe: /* Firmware debug logging 2 */ + return hci_recv_diag(hdev, skb); + } + + return hci_recv_frame(hdev, skb); +} +EXPORT_SYMBOL_GPL(btmtk_usb_recv_acl); + +static int btmtk_isopkt_pad(struct hci_dev *hdev, struct sk_buff *skb) +{ + if (skb->len > MTK_ISO_THRESHOLD) + return -EINVAL; + + if (skb_pad(skb, MTK_ISO_THRESHOLD - skb->len)) + return -ENOMEM; + + __skb_put(skb, MTK_ISO_THRESHOLD - skb->len); + + return 0; +} + +static int __set_mtk_intr_interface(struct hci_dev *hdev) +{ + struct btmtk_data *btmtk_data = hci_get_priv(hdev); + struct usb_interface *intf = btmtk_data->isopkt_intf; + int i, err; + + if (!btmtk_data->isopkt_intf) + return -ENODEV; + + err = usb_set_interface(btmtk_data->udev, MTK_ISO_IFNUM, 1); + if (err < 0) { + bt_dev_err(hdev, "setting interface failed (%d)", -err); + return err; + } + + btmtk_data->isopkt_tx_ep = NULL; + btmtk_data->isopkt_rx_ep = NULL; + + for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) { + struct usb_endpoint_descriptor *ep_desc; + + ep_desc = &intf->cur_altsetting->endpoint[i].desc; + + if (!btmtk_data->isopkt_tx_ep && + usb_endpoint_is_int_out(ep_desc)) { + btmtk_data->isopkt_tx_ep = ep_desc; + continue; + } + + if (!btmtk_data->isopkt_rx_ep && + usb_endpoint_is_int_in(ep_desc)) { + btmtk_data->isopkt_rx_ep = ep_desc; + continue; + } + } + + if (!btmtk_data->isopkt_tx_ep || + !btmtk_data->isopkt_rx_ep) { + bt_dev_err(hdev, "invalid interrupt descriptors"); + return -ENODEV; + } + + return 0; +} + +struct urb *alloc_mtk_intr_urb(struct hci_dev *hdev, struct sk_buff *skb, + usb_complete_t tx_complete) +{ + struct btmtk_data *btmtk_data = hci_get_priv(hdev); + struct urb *urb; + unsigned int pipe; + + if (!btmtk_data->isopkt_tx_ep) + return ERR_PTR(-ENODEV); + + urb = usb_alloc_urb(0, GFP_KERNEL); + if (!urb) + return ERR_PTR(-ENOMEM); + + if (btmtk_isopkt_pad(hdev, skb)) + return ERR_PTR(-EINVAL); + + pipe = usb_sndintpipe(btmtk_data->udev, + btmtk_data->isopkt_tx_ep->bEndpointAddress); + + usb_fill_int_urb(urb, btmtk_data->udev, pipe, + skb->data, skb->len, tx_complete, + skb, btmtk_data->isopkt_tx_ep->bInterval); + + skb->dev = (void *)hdev; + + return urb; +} +EXPORT_SYMBOL_GPL(alloc_mtk_intr_urb); + +static int btmtk_recv_isopkt(struct hci_dev *hdev, void *buffer, int count) +{ + struct btmtk_data *btmtk_data = hci_get_priv(hdev); + struct sk_buff *skb; + unsigned long flags; + int err = 0; + + spin_lock_irqsave(&btmtk_data->isorxlock, flags); + skb = btmtk_data->isopkt_skb; + + while (count) { + int len; + + if (!skb) { + skb = bt_skb_alloc(HCI_MAX_ISO_SIZE, GFP_ATOMIC); + if (!skb) { + err = -ENOMEM; + break; + } + + hci_skb_pkt_type(skb) = HCI_ISODATA_PKT; + hci_skb_expect(skb) = HCI_ISO_HDR_SIZE; + } + + len = min_t(uint, hci_skb_expect(skb), count); + skb_put_data(skb, buffer, len); + + count -= len; + buffer += len; + hci_skb_expect(skb) -= len; + + if (skb->len == HCI_ISO_HDR_SIZE) { + __le16 dlen = ((struct hci_iso_hdr *)skb->data)->dlen; + + /* Complete ISO header */ + hci_skb_expect(skb) = __le16_to_cpu(dlen); + + if (skb_tailroom(skb) < hci_skb_expect(skb)) { + kfree_skb(skb); + skb = NULL; + + err = -EILSEQ; + break; + } + } + + if (!hci_skb_expect(skb)) { + /* Complete frame */ + hci_recv_frame(hdev, skb); + skb = NULL; + } + } + + btmtk_data->isopkt_skb = skb; + spin_unlock_irqrestore(&btmtk_data->isorxlock, flags); + + return err; +} + +static void btmtk_intr_complete(struct urb *urb) +{ + struct hci_dev *hdev = urb->context; + struct btmtk_data *btmtk_data = hci_get_priv(hdev); + int err; + + BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status, + urb->actual_length); + + if (!test_bit(HCI_RUNNING, &hdev->flags)) + return; + + if (hdev->suspended) + return; + + if (urb->status == 0) { + hdev->stat.byte_rx += urb->actual_length; + + if (btmtk_recv_isopkt(hdev, urb->transfer_buffer, + urb->actual_length) < 0) { + bt_dev_err(hdev, "corrupted iso packet"); + hdev->stat.err_rx++; + } + } else if (urb->status == -ENOENT) { + /* Avoid suspend failed when usb_kill_urb */ + return; + } + + usb_mark_last_busy(btmtk_data->udev); + usb_anchor_urb(urb, &btmtk_data->isopkt_anchor); + + err = usb_submit_urb(urb, GFP_ATOMIC); + if (err < 0) { + /* -EPERM: urb is being killed; + * -ENODEV: device got disconnected + */ + if (err != -EPERM && err != -ENODEV) + bt_dev_err(hdev, "urb %p failed to resubmit (%d)", + urb, -err); + if (err != -EPERM) + hci_cmd_sync_cancel(hdev, -err); + usb_unanchor_urb(urb); + } +} + +static int btmtk_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags) +{ + struct btmtk_data *btmtk_data = hci_get_priv(hdev); + unsigned char *buf; + unsigned int pipe; + struct urb *urb; + int err, size; + + BT_DBG("%s", hdev->name); + + if (!btmtk_data->isopkt_rx_ep) + return -ENODEV; + + urb = usb_alloc_urb(0, mem_flags); + if (!urb) + return -ENOMEM; + size = le16_to_cpu(btmtk_data->isopkt_rx_ep->wMaxPacketSize); + + buf = kmalloc(size, mem_flags); + if (!buf) { + usb_free_urb(urb); + return -ENOMEM; + } + + pipe = usb_rcvintpipe(btmtk_data->udev, + btmtk_data->isopkt_rx_ep->bEndpointAddress); + + usb_fill_int_urb(urb, btmtk_data->udev, pipe, buf, size, + btmtk_intr_complete, hdev, + btmtk_data->isopkt_rx_ep->bInterval); + + urb->transfer_flags |= URB_FREE_BUFFER; + + usb_mark_last_busy(btmtk_data->udev); + usb_anchor_urb(urb, &btmtk_data->isopkt_anchor); + + err = usb_submit_urb(urb, mem_flags); + if (err < 0) { + if (err != -EPERM && err != -ENODEV) + bt_dev_err(hdev, "urb %p submission failed (%d)", + urb, -err); + usb_unanchor_urb(urb); + } + + usb_free_urb(urb); + + return err; +} + +static int btmtk_usb_isointf_init(struct hci_dev *hdev) +{ + struct btmtk_data *btmtk_data = hci_get_priv(hdev); + u8 iso_param[2] = { 0x08, 0x01 }; + struct sk_buff *skb; + int err; + + init_usb_anchor(&btmtk_data->isopkt_anchor); + spin_lock_init(&btmtk_data->isorxlock); + + __set_mtk_intr_interface(hdev); + + err = btmtk_submit_intr_urb(hdev, GFP_KERNEL); + if (err < 0) { + usb_kill_anchored_urbs(&btmtk_data->isopkt_anchor); + bt_dev_err(hdev, "ISO intf not support (%d)", err); + return err; + } + + skb = __hci_cmd_sync(hdev, 0xfd98, sizeof(iso_param), iso_param, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Failed to apply iso setting (%ld)", PTR_ERR(skb)); + return PTR_ERR(skb); + } + kfree_skb(skb); + + return 0; +} + +int btmtk_usb_resume(struct hci_dev *hdev) +{ + /* This function describes the specific additional steps taken by MediaTek + * when Bluetooth usb driver's resume function is called. + */ + struct btmtk_data *btmtk_data = hci_get_priv(hdev); + + /* Resubmit urb for iso data transmission */ + if (test_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags)) { + if (btmtk_submit_intr_urb(hdev, GFP_NOIO) < 0) + clear_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags); + } + + return 0; +} +EXPORT_SYMBOL_GPL(btmtk_usb_resume); + +int btmtk_usb_suspend(struct hci_dev *hdev) +{ + /* This function describes the specific additional steps taken by MediaTek + * when Bluetooth usb driver's suspend function is called. + */ + struct btmtk_data *btmtk_data = hci_get_priv(hdev); + + /* Stop urb anchor for iso data transmission */ + usb_kill_anchored_urbs(&btmtk_data->isopkt_anchor); + + return 0; +} +EXPORT_SYMBOL_GPL(btmtk_usb_suspend); + +int btmtk_usb_setup(struct hci_dev *hdev) +{ + struct btmtk_data *btmtk_data = hci_get_priv(hdev); + struct btmtk_hci_wmt_params wmt_params; + ktime_t calltime, delta, rettime; + struct btmtk_tci_sleep tci_sleep; + unsigned long long duration; + struct sk_buff *skb; + const char *fwname; + int err, status; + u32 dev_id = 0; + char fw_bin_name[64]; + u32 fw_version = 0, fw_flavor = 0; + u8 param; + + calltime = ktime_get(); + + err = btmtk_usb_id_get(hdev, 0x80000008, &dev_id); + if (err < 0) { + bt_dev_err(hdev, "Failed to get device id (%d)", err); + return err; + } + + if (!dev_id || dev_id != 0x7663) { + err = btmtk_usb_id_get(hdev, 0x70010200, &dev_id); + if (err < 0) { + bt_dev_err(hdev, "Failed to get device id (%d)", err); + return err; + } + err = btmtk_usb_id_get(hdev, 0x80021004, &fw_version); + if (err < 0) { + bt_dev_err(hdev, "Failed to get fw version (%d)", err); + return err; + } + err = btmtk_usb_id_get(hdev, 0x70010020, &fw_flavor); + if (err < 0) { + bt_dev_err(hdev, "Failed to get fw flavor (%d)", err); + return err; + } + fw_flavor = (fw_flavor & 0x00000080) >> 7; + } + + btmtk_data->dev_id = dev_id; + + err = btmtk_register_coredump(hdev, btmtk_data->drv_name, fw_version); + if (err < 0) + bt_dev_err(hdev, "Failed to register coredump (%d)", err); + + switch (dev_id) { + case 0x7663: + fwname = FIRMWARE_MT7663; + break; + case 0x7668: + fwname = FIRMWARE_MT7668; + break; + case 0x7922: + case 0x7961: + case 0x7925: + /* Reset the device to ensure it's in the initial state before + * downloading the firmware to ensure. + */ + + if (!test_bit(BTMTK_FIRMWARE_LOADED, &btmtk_data->flags)) + btmtk_usb_subsys_reset(hdev, dev_id); + + btmtk_fw_get_filename(fw_bin_name, sizeof(fw_bin_name), dev_id, + fw_version, fw_flavor); + + err = btmtk_setup_firmware_79xx(hdev, fw_bin_name, + btmtk_usb_hci_wmt_sync); + if (err < 0) { + bt_dev_err(hdev, "Failed to set up firmware (%d)", err); + clear_bit(BTMTK_FIRMWARE_LOADED, &btmtk_data->flags); + return err; + } + + set_bit(BTMTK_FIRMWARE_LOADED, &btmtk_data->flags); + + /* It's Device EndPoint Reset Option Register */ + err = btmtk_usb_uhw_reg_write(hdev, MTK_EP_RST_OPT, + MTK_EP_RST_IN_OUT_OPT); + if (err < 0) + return err; + + /* Enable Bluetooth protocol */ + param = 1; + wmt_params.op = BTMTK_WMT_FUNC_CTRL; + wmt_params.flag = 0; + wmt_params.dlen = sizeof(param); + wmt_params.data = ¶m; + wmt_params.status = NULL; + + err = btmtk_usb_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err); + return err; + } + + hci_set_msft_opcode(hdev, 0xFD30); + hci_set_aosp_capable(hdev); + + /* Set up ISO interface after protocol enabled */ + if (test_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags)) { + if (!btmtk_usb_isointf_init(hdev)) + set_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags); + } + + goto done; + default: + bt_dev_err(hdev, "Unsupported hardware variant (%08x)", + dev_id); + return -ENODEV; + } + + /* Query whether the firmware is already download */ + wmt_params.op = BTMTK_WMT_SEMAPHORE; + wmt_params.flag = 1; + wmt_params.dlen = 0; + wmt_params.data = NULL; + wmt_params.status = &status; + + err = btmtk_usb_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to query firmware status (%d)", err); + return err; + } + + if (status == BTMTK_WMT_PATCH_DONE) { + bt_dev_info(hdev, "firmware already downloaded"); + goto ignore_setup_fw; + } + + /* Setup a firmware which the device definitely requires */ + err = btmtk_setup_firmware(hdev, fwname, + btmtk_usb_hci_wmt_sync); + if (err < 0) + return err; + +ignore_setup_fw: + err = readx_poll_timeout(btmtk_usb_func_query, hdev, status, + status < 0 || status != BTMTK_WMT_ON_PROGRESS, + 2000, 5000000); + /* -ETIMEDOUT happens */ + if (err < 0) + return err; + + /* The other errors happen in btmtk_usb_func_query */ + if (status < 0) + return status; + + if (status == BTMTK_WMT_ON_DONE) { + bt_dev_info(hdev, "function already on"); + goto ignore_func_on; + } + + /* Enable Bluetooth protocol */ + param = 1; + wmt_params.op = BTMTK_WMT_FUNC_CTRL; + wmt_params.flag = 0; + wmt_params.dlen = sizeof(param); + wmt_params.data = ¶m; + wmt_params.status = NULL; + + err = btmtk_usb_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err); + return err; + } + +ignore_func_on: + /* Apply the low power environment setup */ + tci_sleep.mode = 0x5; + tci_sleep.duration = cpu_to_le16(0x640); + tci_sleep.host_duration = cpu_to_le16(0x640); + tci_sleep.host_wakeup_pin = 0; + tci_sleep.time_compensation = 0; + + skb = __hci_cmd_sync(hdev, 0xfc7a, sizeof(tci_sleep), &tci_sleep, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + bt_dev_err(hdev, "Failed to apply low power setting (%d)", err); + return err; + } + kfree_skb(skb); + +done: + rettime = ktime_get(); + delta = ktime_sub(rettime, calltime); + duration = (unsigned long long)ktime_to_ns(delta) >> 10; + + bt_dev_info(hdev, "Device setup in %llu usecs", duration); + + return 0; +} +EXPORT_SYMBOL_GPL(btmtk_usb_setup); + +int btmtk_usb_shutdown(struct hci_dev *hdev) +{ + struct btmtk_hci_wmt_params wmt_params; + u8 param = 0; + int err; + + /* Disable the device */ + wmt_params.op = BTMTK_WMT_FUNC_CTRL; + wmt_params.flag = 0; + wmt_params.dlen = sizeof(param); + wmt_params.data = ¶m; + wmt_params.status = NULL; + + err = btmtk_usb_hci_wmt_sync(hdev, &wmt_params); + if (err < 0) { + bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err); + return err; + } + + return 0; +} +EXPORT_SYMBOL_GPL(btmtk_usb_shutdown); + MODULE_AUTHOR("Sean Wang "); MODULE_AUTHOR("Mark Chen "); MODULE_DESCRIPTION("Bluetooth support for MediaTek devices ver " VERSION); diff --git a/drivers/bluetooth/btmtk.h b/drivers/bluetooth/btmtk.h index cbcdb99a22e6..5df7c3296624 100644 --- a/drivers/bluetooth/btmtk.h +++ b/drivers/bluetooth/btmtk.h @@ -28,6 +28,21 @@ #define MTK_COREDUMP_END_LEN (sizeof(MTK_COREDUMP_END)) #define MTK_COREDUMP_NUM 255 +/* UHW CR mapping */ +#define MTK_BT_MISC 0x70002510 +#define MTK_BT_SUBSYS_RST 0x70002610 +#define MTK_UDMA_INT_STA_BT 0x74000024 +#define MTK_UDMA_INT_STA_BT1 0x74000308 +#define MTK_BT_WDT_STATUS 0x740003A0 +#define MTK_EP_RST_OPT 0x74011890 +#define MTK_EP_RST_IN_OUT_OPT 0x00010001 +#define MTK_BT_RST_DONE 0x00000100 +#define MTK_BT_RESET_REG_CONNV3 0x70028610 +#define MTK_BT_READ_DEV_ID 0x70010200 + +/* MediaTek ISO Interface */ +#define MTK_ISO_IFNUM 2 + enum { BTMTK_WMT_PATCH_DWNLD = 0x1, BTMTK_WMT_TEST = 0x2, @@ -126,6 +141,14 @@ struct btmtk_hci_wmt_params { u32 *status; }; +enum { + BTMTK_TX_WAIT_VND_EVT, + BTMTK_FIRMWARE_LOADED, + BTMTK_HW_RESET_ACTIVE, + BTMTK_ISOPKT_OVER_INTR, + BTMTK_ISOPKT_RUNNING, +}; + typedef int (*btmtk_reset_sync_func_t)(struct hci_dev *, void *); struct btmtk_coredump_info { @@ -135,10 +158,25 @@ struct btmtk_coredump_info { int state; }; -struct btmediatek_data { +struct btmtk_data { + const char *drv_name; + unsigned long flags; u32 dev_id; btmtk_reset_sync_func_t reset_sync; struct btmtk_coredump_info cd_info; + + struct usb_device *udev; + struct usb_interface *intf; + struct usb_anchor *ctrl_anchor; + struct sk_buff *evt_skb; + struct usb_endpoint_descriptor *isopkt_tx_ep; + struct usb_endpoint_descriptor *isopkt_rx_ep; + struct usb_interface *isopkt_intf; + struct usb_anchor isopkt_anchor; + struct sk_buff *isopkt_skb; + + /* spinlock for ISO data transmission */ + spinlock_t isorxlock; }; typedef int (*wmt_cmd_sync_func_t)(struct hci_dev *, @@ -160,6 +198,24 @@ int btmtk_register_coredump(struct hci_dev *hdev, const char *name, u32 fw_version); int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb); + +void btmtk_fw_get_filename(char *buf, size_t size, u32 dev_id, u32 fw_ver, + u32 fw_flavor); + +int btmtk_usb_subsys_reset(struct hci_dev *hdev, u32 dev_id); + +int btmtk_usb_recv_acl(struct hci_dev *hdev, struct sk_buff *skb); + +struct urb *alloc_mtk_intr_urb(struct hci_dev *hdev, struct sk_buff *skb, + usb_complete_t tx_complete); + +int btmtk_usb_resume(struct hci_dev *hdev); + +int btmtk_usb_suspend(struct hci_dev *hdev); + +int btmtk_usb_setup(struct hci_dev *hdev); + +int btmtk_usb_shutdown(struct hci_dev *hdev); #else static inline int btmtk_set_bdaddr(struct hci_dev *hdev, @@ -168,29 +224,73 @@ static inline int btmtk_set_bdaddr(struct hci_dev *hdev, return -EOPNOTSUPP; } -static int btmtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwname, - wmt_cmd_sync_func_t wmt_cmd_sync) +static inline int btmtk_setup_firmware_79xx(struct hci_dev *hdev, + const char *fwname, + wmt_cmd_sync_func_t wmt_cmd_sync) { return -EOPNOTSUPP; } -static int btmtk_setup_firmware(struct hci_dev *hdev, const char *fwname, - wmt_cmd_sync_func_t wmt_cmd_sync) +static inline int btmtk_setup_firmware(struct hci_dev *hdev, const char *fwname, + wmt_cmd_sync_func_t wmt_cmd_sync) { return -EOPNOTSUPP; } -static void btmtk_reset_sync(struct hci_dev *hdev) +static inline void btmtk_reset_sync(struct hci_dev *hdev) { } -static int btmtk_register_coredump(struct hci_dev *hdev, const char *name, - u32 fw_version) +static inline int btmtk_register_coredump(struct hci_dev *hdev, + const char *name, u32 fw_version) { return -EOPNOTSUPP; } -static int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb) +static inline int btmtk_process_coredump(struct hci_dev *hdev, + struct sk_buff *skb) +{ + return -EOPNOTSUPP; +} + +static inline void btmtk_fw_get_filename(char *buf, size_t size, u32 dev_id, + u32 fw_ver, u32 fw_flavor) +{ +} + +static inline int btmtk_usb_subsys_reset(struct hci_dev *hdev, u32 dev_id) +{ + return -EOPNOTSUPP; +} + +static inline int btmtk_usb_recv_acl(struct hci_dev *hdev, struct sk_buff *skb) +{ + return -EOPNOTSUPP; +} + +static inline struct urb *alloc_mtk_intr_urb(struct hci_dev *hdev, + struct sk_buff *skb, + usb_complete_t tx_complete) +{ + return ERR_PTR(-EOPNOTSUPP); +} + +static inline int btmtk_usb_resume(struct hci_dev *hdev) +{ + return -EOPNOTSUPP; +} + +static inline int btmtk_usb_suspend(struct hci_dev *hdev) +{ + return -EOPNOTSUPP; +} + +static inline int btmtk_usb_setup(struct hci_dev *hdev) +{ + return -EOPNOTSUPP; +} + +static inline int btmtk_usb_shutdown(struct hci_dev *hdev) { return -EOPNOTSUPP; } diff --git a/drivers/bluetooth/btmtksdio.c b/drivers/bluetooth/btmtksdio.c index 8ded9ef8089a..39d6898497a4 100644 --- a/drivers/bluetooth/btmtksdio.c +++ b/drivers/bluetooth/btmtksdio.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -1117,6 +1118,9 @@ static int btmtksdio_setup(struct hci_dev *hdev) return err; } + btmtk_fw_get_filename(fwname, sizeof(fwname), dev_id, + fw_version, 0); + snprintf(fwname, sizeof(fwname), "mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin", dev_id & 0xffff, (fw_version & 0xff) + 1); diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c index e6bc4a73c9fc..aa87c3e78871 100644 --- a/drivers/bluetooth/btmtkuart.c +++ b/drivers/bluetooth/btmtkuart.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c index 9bfa9a6ad56c..31d3dd90b672 100644 --- a/drivers/bluetooth/btnxpuart.c +++ b/drivers/bluetooth/btnxpuart.c @@ -29,27 +29,38 @@ #define BTNXPUART_CHECK_BOOT_SIGNATURE 3 #define BTNXPUART_SERDEV_OPEN 4 #define BTNXPUART_IR_IN_PROGRESS 5 +#define BTNXPUART_FW_DOWNLOAD_ABORT 6 /* NXP HW err codes */ #define BTNXPUART_IR_HW_ERR 0xb0 -#define FIRMWARE_W8987 "nxp/uartuart8987_bt.bin" -#define FIRMWARE_W8997 "nxp/uartuart8997_bt_v4.bin" -#define FIRMWARE_W9098 "nxp/uartuart9098_bt_v1.bin" -#define FIRMWARE_IW416 "nxp/uartiw416_bt_v0.bin" -#define FIRMWARE_IW612 "nxp/uartspi_n61x_v1.bin.se" -#define FIRMWARE_IW624 "nxp/uartiw624_bt.bin" -#define FIRMWARE_SECURE_IW624 "nxp/uartiw624_bt.bin.se" -#define FIRMWARE_AW693 "nxp/uartaw693_bt.bin" -#define FIRMWARE_SECURE_AW693 "nxp/uartaw693_bt.bin.se" -#define FIRMWARE_HELPER "nxp/helper_uart_3000000.bin" +#define FIRMWARE_W8987 "uart8987_bt_v0.bin" +#define FIRMWARE_W8987_OLD "uartuart8987_bt.bin" +#define FIRMWARE_W8997 "uart8997_bt_v4.bin" +#define FIRMWARE_W8997_OLD "uartuart8997_bt_v4.bin" +#define FIRMWARE_W9098 "uart9098_bt_v1.bin" +#define FIRMWARE_W9098_OLD "uartuart9098_bt_v1.bin" +#define FIRMWARE_IW416 "uartiw416_bt_v0.bin" +#define FIRMWARE_IW612 "uartspi_n61x_v1.bin.se" +#define FIRMWARE_IW615 "uartspi_iw610_v0.bin" +#define FIRMWARE_SECURE_IW615 "uartspi_iw610_v0.bin.se" +#define FIRMWARE_IW624 "uartiw624_bt.bin" +#define FIRMWARE_SECURE_IW624 "uartiw624_bt.bin.se" +#define FIRMWARE_AW693 "uartaw693_bt.bin" +#define FIRMWARE_SECURE_AW693 "uartaw693_bt.bin.se" +#define FIRMWARE_AW693_A1 "uartaw693_bt_v1.bin" +#define FIRMWARE_SECURE_AW693_A1 "uartaw693_bt_v1.bin.se" +#define FIRMWARE_HELPER "helper_uart_3000000.bin" #define CHIP_ID_W9098 0x5c03 #define CHIP_ID_IW416 0x7201 #define CHIP_ID_IW612 0x7601 #define CHIP_ID_IW624a 0x8000 #define CHIP_ID_IW624c 0x8001 -#define CHIP_ID_AW693 0x8200 +#define CHIP_ID_AW693a0 0x8200 +#define CHIP_ID_AW693a1 0x8201 +#define CHIP_ID_IW615a0 0x8800 +#define CHIP_ID_IW615a1 0x8801 #define FW_SECURE_MASK 0xc0 #define FW_OPEN 0x00 @@ -144,6 +155,7 @@ struct psmode_cmd_payload { struct btnxpuart_data { const char *helper_fw_name; const char *fw_name; + const char *fw_name_old; }; struct btnxpuart_dev { @@ -159,6 +171,7 @@ struct btnxpuart_dev { u8 fw_name[MAX_FW_FILE_NAME_LEN]; u32 fw_dnld_v1_offset; u32 fw_v1_sent_bytes; + u32 fw_dnld_v3_offset; u32 fw_v3_offset_correction; u32 fw_v1_expected_len; u32 boot_reg_offset; @@ -187,6 +200,11 @@ struct btnxpuart_dev { #define NXP_NAK_V3 0x7b #define NXP_CRC_ERROR_V3 0x7c +/* Bootloader signature error codes */ +#define NXP_ACK_RX_TIMEOUT 0x0002 /* ACK not received from host */ +#define NXP_HDR_RX_TIMEOUT 0x0003 /* FW Header chunk not received */ +#define NXP_DATA_RX_TIMEOUT 0x0004 /* FW Data chunk not received */ + #define HDR_LEN 16 #define NXP_RECV_CHIP_VER_V1 \ @@ -277,6 +295,17 @@ struct nxp_bootloader_cmd { __be32 crc; } __packed; +struct nxp_v3_rx_timeout_nak { + u8 nak; + __le32 offset; + u8 crc; +} __packed; + +union nxp_v3_rx_timeout_nak_u { + struct nxp_v3_rx_timeout_nak pkt; + u8 buf[6]; +}; + static u8 crc8_table[CRC8_TABLE_SIZE]; /* Default configurations */ @@ -328,7 +357,7 @@ static void ps_cancel_timer(struct btnxpuart_dev *nxpdev) struct ps_data *psdata = &nxpdev->psdata; flush_work(&psdata->work); - del_timer_sync(&psdata->ps_timer); + timer_shutdown_sync(&psdata->ps_timer); } static void ps_control(struct hci_dev *hdev, u8 ps_state) @@ -550,6 +579,7 @@ static int nxp_download_firmware(struct hci_dev *hdev) nxpdev->fw_v1_sent_bytes = 0; nxpdev->fw_v1_expected_len = HDR_LEN; nxpdev->boot_reg_offset = 0; + nxpdev->fw_dnld_v3_offset = 0; nxpdev->fw_v3_offset_correction = 0; nxpdev->baudrate_changed = false; nxpdev->timeout_changed = false; @@ -564,14 +594,23 @@ static int nxp_download_firmware(struct hci_dev *hdev) !test_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state), msecs_to_jiffies(60000)); + + release_firmware(nxpdev->fw); + memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name)); + if (err == 0) { - bt_dev_err(hdev, "FW Download Timeout."); + bt_dev_err(hdev, "FW Download Timeout. offset: %d", + nxpdev->fw_dnld_v1_offset ? + nxpdev->fw_dnld_v1_offset : + nxpdev->fw_dnld_v3_offset); return -ETIMEDOUT; } + if (test_bit(BTNXPUART_FW_DOWNLOAD_ABORT, &nxpdev->tx_state)) { + bt_dev_err(hdev, "FW Download Aborted"); + return -EINTR; + } serdev_device_set_flow_control(nxpdev->serdev, true); - release_firmware(nxpdev->fw); - memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name)); /* Allow the downloaded FW to initialize */ msleep(1200); @@ -682,19 +721,30 @@ static bool process_boot_signature(struct btnxpuart_dev *nxpdev) return is_fw_downloading(nxpdev); } -static int nxp_request_firmware(struct hci_dev *hdev, const char *fw_name) +static int nxp_request_firmware(struct hci_dev *hdev, const char *fw_name, + const char *fw_name_old) { struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + const char *fw_name_dt; int err = 0; if (!fw_name) return -ENOENT; if (!strlen(nxpdev->fw_name)) { - snprintf(nxpdev->fw_name, MAX_FW_FILE_NAME_LEN, "%s", fw_name); + if (strcmp(fw_name, FIRMWARE_HELPER) && + !device_property_read_string(&nxpdev->serdev->dev, + "firmware-name", + &fw_name_dt)) + fw_name = fw_name_dt; + snprintf(nxpdev->fw_name, MAX_FW_FILE_NAME_LEN, "nxp/%s", fw_name); + err = request_firmware_direct(&nxpdev->fw, nxpdev->fw_name, &hdev->dev); + if (err < 0 && fw_name_old) { + snprintf(nxpdev->fw_name, MAX_FW_FILE_NAME_LEN, "nxp/%s", fw_name_old); + err = request_firmware_direct(&nxpdev->fw, nxpdev->fw_name, &hdev->dev); + } - bt_dev_dbg(hdev, "Request Firmware: %s", nxpdev->fw_name); - err = request_firmware(&nxpdev->fw, nxpdev->fw_name, &hdev->dev); + bt_dev_info(hdev, "Request Firmware: %s", nxpdev->fw_name); if (err < 0) { bt_dev_err(hdev, "Firmware file %s not found", nxpdev->fw_name); clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state); @@ -773,15 +823,15 @@ static int nxp_recv_fw_req_v1(struct hci_dev *hdev, struct sk_buff *skb) } if (!nxp_data->helper_fw_name || nxpdev->helper_downloaded) { - if (nxp_request_firmware(hdev, nxp_data->fw_name)) + if (nxp_request_firmware(hdev, nxp_data->fw_name, nxp_data->fw_name_old)) goto free_skb; } else if (nxp_data->helper_fw_name && !nxpdev->helper_downloaded) { - if (nxp_request_firmware(hdev, nxp_data->helper_fw_name)) + if (nxp_request_firmware(hdev, nxp_data->helper_fw_name, NULL)) goto free_skb; } if (!len) { - bt_dev_dbg(hdev, "FW Downloaded Successfully: %zu bytes", + bt_dev_info(hdev, "FW Download Complete: %zu bytes", nxpdev->fw->size); if (nxp_data->helper_fw_name && !nxpdev->helper_downloaded) { nxpdev->helper_downloaded = true; @@ -863,7 +913,7 @@ static char *nxp_get_fw_name_from_chipid(struct hci_dev *hdev, u16 chipid, else bt_dev_err(hdev, "Illegal loader version %02x", loader_ver); break; - case CHIP_ID_AW693: + case CHIP_ID_AW693a0: if ((loader_ver & FW_SECURE_MASK) == FW_OPEN) fw_name = FIRMWARE_AW693; else if ((loader_ver & FW_SECURE_MASK) != FW_AUTH_ILLEGAL) @@ -871,6 +921,23 @@ static char *nxp_get_fw_name_from_chipid(struct hci_dev *hdev, u16 chipid, else bt_dev_err(hdev, "Illegal loader version %02x", loader_ver); break; + case CHIP_ID_AW693a1: + if ((loader_ver & FW_SECURE_MASK) == FW_OPEN) + fw_name = FIRMWARE_AW693_A1; + else if ((loader_ver & FW_SECURE_MASK) != FW_AUTH_ILLEGAL) + fw_name = FIRMWARE_SECURE_AW693_A1; + else + bt_dev_err(hdev, "Illegal loader version %02x", loader_ver); + break; + case CHIP_ID_IW615a0: + case CHIP_ID_IW615a1: + if ((loader_ver & FW_SECURE_MASK) == FW_OPEN) + fw_name = FIRMWARE_IW615; + else if ((loader_ver & FW_SECURE_MASK) != FW_AUTH_ILLEGAL) + fw_name = FIRMWARE_SECURE_IW615; + else + bt_dev_err(hdev, "Illegal loader version %02x", loader_ver); + break; default: bt_dev_err(hdev, "Unknown chip signature %04x", chipid); break; @@ -878,10 +945,25 @@ static char *nxp_get_fw_name_from_chipid(struct hci_dev *hdev, u16 chipid, return fw_name; } +static char *nxp_get_old_fw_name_from_chipid(struct hci_dev *hdev, u16 chipid, + u8 loader_ver) +{ + char *fw_name_old = NULL; + + switch (chipid) { + case CHIP_ID_W9098: + fw_name_old = FIRMWARE_W9098_OLD; + break; + } + return fw_name_old; +} + static int nxp_recv_chip_ver_v3(struct hci_dev *hdev, struct sk_buff *skb) { struct v3_start_ind *req = skb_pull_data(skb, sizeof(*req)); struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + const char *fw_name; + const char *fw_name_old; u16 chip_id; u8 loader_ver; @@ -890,8 +972,10 @@ static int nxp_recv_chip_ver_v3(struct hci_dev *hdev, struct sk_buff *skb) chip_id = le16_to_cpu(req->chip_id); loader_ver = req->loader_ver; - if (!nxp_request_firmware(hdev, nxp_get_fw_name_from_chipid(hdev, - chip_id, loader_ver))) + bt_dev_info(hdev, "ChipID: %04x, Version: %d", chip_id, loader_ver); + fw_name = nxp_get_fw_name_from_chipid(hdev, chip_id, loader_ver); + fw_name_old = nxp_get_old_fw_name_from_chipid(hdev, chip_id, loader_ver); + if (!nxp_request_firmware(hdev, fw_name, fw_name_old)) nxp_send_ack(NXP_ACK_V3, hdev); free_skb: @@ -899,6 +983,32 @@ free_skb: return 0; } +static void nxp_handle_fw_download_error(struct hci_dev *hdev, struct v3_data_req *req) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + __u32 offset = __le32_to_cpu(req->offset); + __u16 err = __le16_to_cpu(req->error); + union nxp_v3_rx_timeout_nak_u nak_tx_buf; + + switch (err) { + case NXP_ACK_RX_TIMEOUT: + case NXP_HDR_RX_TIMEOUT: + case NXP_DATA_RX_TIMEOUT: + nak_tx_buf.pkt.nak = NXP_NAK_V3; + nak_tx_buf.pkt.offset = __cpu_to_le32(offset); + nak_tx_buf.pkt.crc = crc8(crc8_table, nak_tx_buf.buf, + sizeof(nak_tx_buf) - 1, 0xff); + serdev_device_write_buf(nxpdev->serdev, nak_tx_buf.buf, + sizeof(nak_tx_buf)); + break; + default: + bt_dev_dbg(hdev, "Unknown bootloader error code: %d", err); + break; + + } + +} + static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb) { struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); @@ -913,7 +1023,12 @@ static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb) if (!req || !nxpdev->fw) goto free_skb; - nxp_send_ack(NXP_ACK_V3, hdev); + if (!req->error) { + nxp_send_ack(NXP_ACK_V3, hdev); + } else { + nxp_handle_fw_download_error(hdev, req); + goto free_skb; + } len = __le16_to_cpu(req->len); @@ -934,15 +1049,12 @@ static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb) } if (req->len == 0) { - bt_dev_dbg(hdev, "FW Downloaded Successfully: %zu bytes", + bt_dev_info(hdev, "FW Download Complete: %zu bytes", nxpdev->fw->size); clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state); wake_up_interruptible(&nxpdev->fw_dnld_done_wait_q); goto free_skb; } - if (req->error) - bt_dev_dbg(hdev, "FW Download received err 0x%02x from chip", - req->error); offset = __le32_to_cpu(req->offset); if (offset < nxpdev->fw_v3_offset_correction) { @@ -954,8 +1066,9 @@ static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb) goto free_skb; } - serdev_device_write_buf(nxpdev->serdev, nxpdev->fw->data + offset - - nxpdev->fw_v3_offset_correction, len); + nxpdev->fw_dnld_v3_offset = offset - nxpdev->fw_v3_offset_correction; + serdev_device_write_buf(nxpdev->serdev, nxpdev->fw->data + + nxpdev->fw_dnld_v3_offset, len); free_skb: kfree_skb(skb); @@ -1037,7 +1150,7 @@ static int nxp_setup(struct hci_dev *hdev) if (err < 0) return err; } else { - bt_dev_dbg(hdev, "FW already running."); + bt_dev_info(hdev, "FW already running."); clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state); } @@ -1253,8 +1366,10 @@ static int btnxpuart_close(struct hci_dev *hdev) ps_wakeup(nxpdev); serdev_device_close(nxpdev->serdev); skb_queue_purge(&nxpdev->txq); - kfree_skb(nxpdev->rx_skb); - nxpdev->rx_skb = NULL; + if (!IS_ERR_OR_NULL(nxpdev->rx_skb)) { + kfree_skb(nxpdev->rx_skb); + nxpdev->rx_skb = NULL; + } clear_bit(BTNXPUART_SERDEV_OPEN, &nxpdev->tx_state); return 0; } @@ -1269,8 +1384,10 @@ static int btnxpuart_flush(struct hci_dev *hdev) cancel_work_sync(&nxpdev->tx_work); - kfree_skb(nxpdev->rx_skb); - nxpdev->rx_skb = NULL; + if (!IS_ERR_OR_NULL(nxpdev->rx_skb)) { + kfree_skb(nxpdev->rx_skb); + nxpdev->rx_skb = NULL; + } return 0; } @@ -1385,28 +1502,56 @@ static void nxp_serdev_remove(struct serdev_device *serdev) struct btnxpuart_dev *nxpdev = serdev_device_get_drvdata(serdev); struct hci_dev *hdev = nxpdev->hdev; - /* Restore FW baudrate to fw_init_baudrate if changed. - * This will ensure FW baudrate is in sync with - * driver baudrate in case this driver is re-inserted. - */ - if (nxpdev->current_baudrate != nxpdev->fw_init_baudrate) { - nxpdev->new_baudrate = nxpdev->fw_init_baudrate; - nxp_set_baudrate_cmd(hdev, NULL); + if (is_fw_downloading(nxpdev)) { + set_bit(BTNXPUART_FW_DOWNLOAD_ABORT, &nxpdev->tx_state); + clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state); + wake_up_interruptible(&nxpdev->check_boot_sign_wait_q); + wake_up_interruptible(&nxpdev->fw_dnld_done_wait_q); + } else { + /* Restore FW baudrate to fw_init_baudrate if changed. + * This will ensure FW baudrate is in sync with + * driver baudrate in case this driver is re-inserted. + */ + if (nxpdev->current_baudrate != nxpdev->fw_init_baudrate) { + nxpdev->new_baudrate = nxpdev->fw_init_baudrate; + nxp_set_baudrate_cmd(hdev, NULL); + } + ps_cancel_timer(nxpdev); } - - ps_cancel_timer(nxpdev); hci_unregister_dev(hdev); hci_free_dev(hdev); } +#ifdef CONFIG_PM_SLEEP +static int nxp_serdev_suspend(struct device *dev) +{ + struct btnxpuart_dev *nxpdev = dev_get_drvdata(dev); + struct ps_data *psdata = &nxpdev->psdata; + + ps_control(psdata->hdev, PS_STATE_SLEEP); + return 0; +} + +static int nxp_serdev_resume(struct device *dev) +{ + struct btnxpuart_dev *nxpdev = dev_get_drvdata(dev); + struct ps_data *psdata = &nxpdev->psdata; + + ps_control(psdata->hdev, PS_STATE_AWAKE); + return 0; +} +#endif + static struct btnxpuart_data w8987_data __maybe_unused = { .helper_fw_name = NULL, .fw_name = FIRMWARE_W8987, + .fw_name_old = FIRMWARE_W8987_OLD, }; static struct btnxpuart_data w8997_data __maybe_unused = { .helper_fw_name = FIRMWARE_HELPER, .fw_name = FIRMWARE_W8997, + .fw_name_old = FIRMWARE_W8997_OLD, }; static const struct of_device_id nxpuart_of_match_table[] __maybe_unused = { @@ -1416,12 +1561,17 @@ static const struct of_device_id nxpuart_of_match_table[] __maybe_unused = { }; MODULE_DEVICE_TABLE(of, nxpuart_of_match_table); +static const struct dev_pm_ops nxp_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(nxp_serdev_suspend, nxp_serdev_resume) +}; + static struct serdev_device_driver nxp_serdev_driver = { .probe = nxp_serdev_probe, .remove = nxp_serdev_remove, .driver = { .name = "btnxpuart", .of_match_table = of_match_ptr(nxpuart_of_match_table), + .pm = &nxp_pm_ops, }, }; diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c index 4f1e37b4f780..f2f37143c454 100644 --- a/drivers/bluetooth/btrtl.c +++ b/drivers/bluetooth/btrtl.c @@ -811,7 +811,7 @@ static int rtl_download_firmware(struct hci_dev *hdev, struct sk_buff *skb; struct hci_rp_read_local_version *rp; - dl_cmd = kmalloc(sizeof(struct rtl_download_cmd), GFP_KERNEL); + dl_cmd = kmalloc(sizeof(*dl_cmd), GFP_KERNEL); if (!dl_cmd) return -ENOMEM; diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index e384ef6ff050..acdba5d77694 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -479,6 +479,7 @@ static const struct usb_device_id quirks_table[] = { { USB_DEVICE(0x8087, 0x0036), .driver_info = BTUSB_INTEL_COMBINED }, { USB_DEVICE(0x8087, 0x0037), .driver_info = BTUSB_INTEL_COMBINED }, { USB_DEVICE(0x8087, 0x0038), .driver_info = BTUSB_INTEL_COMBINED }, + { USB_DEVICE(0x8087, 0x0039), .driver_info = BTUSB_INTEL_COMBINED }, { USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR }, { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL_COMBINED | BTUSB_INTEL_NO_WBS_SUPPORT | @@ -555,6 +556,10 @@ static const struct usb_device_id quirks_table[] = { BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3572), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x13d3, 0x3591), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0489, 0xe125), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, /* Realtek 8852BT/8852BE-VT Bluetooth devices */ { USB_DEVICE(0x0bda, 0x8520), .driver_info = BTUSB_REALTEK | @@ -891,6 +896,9 @@ struct btusb_data { int (*setup_on_usb)(struct hci_dev *hdev); + int (*suspend)(struct hci_dev *hdev); + int (*resume)(struct hci_dev *hdev); + int oob_wake_irq; /* irq for out-of-band wake-on-bt */ unsigned cmd_timeout_cnt; @@ -2638,410 +2646,48 @@ static int btusb_recv_event_realtek(struct hci_dev *hdev, struct sk_buff *skb) return hci_recv_frame(hdev, skb); } -/* UHW CR mapping */ -#define MTK_BT_MISC 0x70002510 -#define MTK_BT_SUBSYS_RST 0x70002610 -#define MTK_UDMA_INT_STA_BT 0x74000024 -#define MTK_UDMA_INT_STA_BT1 0x74000308 -#define MTK_BT_WDT_STATUS 0x740003A0 -#define MTK_EP_RST_OPT 0x74011890 -#define MTK_EP_RST_IN_OUT_OPT 0x00010001 -#define MTK_BT_RST_DONE 0x00000100 -#define MTK_BT_RESET_REG_CONNV3 0x70028610 -#define MTK_BT_READ_DEV_ID 0x70010200 - - -static void btusb_mtk_wmt_recv(struct urb *urb) +static void btusb_mtk_claim_iso_intf(struct btusb_data *data) { - struct hci_dev *hdev = urb->context; - struct btusb_data *data = hci_get_drvdata(hdev); - struct sk_buff *skb; + struct btmtk_data *btmtk_data = hci_get_priv(data->hdev); int err; - if (urb->status == 0 && urb->actual_length > 0) { - hdev->stat.byte_rx += urb->actual_length; - - /* WMT event shouldn't be fragmented and the size should be - * less than HCI_WMT_MAX_EVENT_SIZE. - */ - skb = bt_skb_alloc(HCI_WMT_MAX_EVENT_SIZE, GFP_ATOMIC); - if (!skb) { - hdev->stat.err_rx++; - kfree(urb->setup_packet); - return; - } - - hci_skb_pkt_type(skb) = HCI_EVENT_PKT; - skb_put_data(skb, urb->transfer_buffer, urb->actual_length); - - /* When someone waits for the WMT event, the skb is being cloned - * and being processed the events from there then. - */ - if (test_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags)) { - data->evt_skb = skb_clone(skb, GFP_ATOMIC); - if (!data->evt_skb) { - kfree_skb(skb); - kfree(urb->setup_packet); - return; - } - } - - err = hci_recv_frame(hdev, skb); - if (err < 0) { - kfree_skb(data->evt_skb); - data->evt_skb = NULL; - kfree(urb->setup_packet); - return; - } - - if (test_and_clear_bit(BTUSB_TX_WAIT_VND_EVT, - &data->flags)) { - /* Barrier to sync with other CPUs */ - smp_mb__after_atomic(); - wake_up_bit(&data->flags, - BTUSB_TX_WAIT_VND_EVT); - } - kfree(urb->setup_packet); - return; - } else if (urb->status == -ENOENT) { - /* Avoid suspend failed when usb_kill_urb */ + err = usb_driver_claim_interface(&btusb_driver, + btmtk_data->isopkt_intf, data); + if (err < 0) { + btmtk_data->isopkt_intf = NULL; + bt_dev_err(data->hdev, "Failed to claim iso interface"); return; } - usb_mark_last_busy(data->udev); - - /* The URB complete handler is still called with urb->actual_length = 0 - * when the event is not available, so we should keep re-submitting - * URB until WMT event returns, Also, It's necessary to wait some time - * between the two consecutive control URBs to relax the target device - * to generate the event. Otherwise, the WMT event cannot return from - * the device successfully. - */ - udelay(500); - - usb_anchor_urb(urb, &data->ctrl_anchor); - err = usb_submit_urb(urb, GFP_ATOMIC); - if (err < 0) { - kfree(urb->setup_packet); - /* -EPERM: urb is being killed; - * -ENODEV: device got disconnected - */ - if (err != -EPERM && err != -ENODEV) - bt_dev_err(hdev, "urb %p failed to resubmit (%d)", - urb, -err); - usb_unanchor_urb(urb); - } + set_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags); } -static int btusb_mtk_submit_wmt_recv_urb(struct hci_dev *hdev) +static void btusb_mtk_release_iso_intf(struct btusb_data *data) { - struct btusb_data *data = hci_get_drvdata(hdev); - struct usb_ctrlrequest *dr; - unsigned char *buf; - int err, size = 64; - unsigned int pipe; - struct urb *urb; + struct btmtk_data *btmtk_data = hci_get_priv(data->hdev); - urb = usb_alloc_urb(0, GFP_KERNEL); - if (!urb) - return -ENOMEM; + if (btmtk_data->isopkt_intf) { + usb_kill_anchored_urbs(&btmtk_data->isopkt_anchor); + clear_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags); - dr = kmalloc(sizeof(*dr), GFP_KERNEL); - if (!dr) { - usb_free_urb(urb); - return -ENOMEM; + dev_kfree_skb_irq(btmtk_data->isopkt_skb); + btmtk_data->isopkt_skb = NULL; + usb_set_intfdata(btmtk_data->isopkt_intf, NULL); + usb_driver_release_interface(&btusb_driver, + btmtk_data->isopkt_intf); } - dr->bRequestType = USB_TYPE_VENDOR | USB_DIR_IN; - dr->bRequest = 1; - dr->wIndex = cpu_to_le16(0); - dr->wValue = cpu_to_le16(48); - dr->wLength = cpu_to_le16(size); - - buf = kmalloc(size, GFP_KERNEL); - if (!buf) { - kfree(dr); - usb_free_urb(urb); - return -ENOMEM; - } - - pipe = usb_rcvctrlpipe(data->udev, 0); - - usb_fill_control_urb(urb, data->udev, pipe, (void *)dr, - buf, size, btusb_mtk_wmt_recv, hdev); - - urb->transfer_flags |= URB_FREE_BUFFER; - - usb_anchor_urb(urb, &data->ctrl_anchor); - err = usb_submit_urb(urb, GFP_KERNEL); - if (err < 0) { - if (err != -EPERM && err != -ENODEV) - bt_dev_err(hdev, "urb %p submission failed (%d)", - urb, -err); - usb_unanchor_urb(urb); - } - - usb_free_urb(urb); - - return err; -} - -static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev, - struct btmtk_hci_wmt_params *wmt_params) -{ - struct btusb_data *data = hci_get_drvdata(hdev); - struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc; - u32 hlen, status = BTMTK_WMT_INVALID; - struct btmtk_hci_wmt_evt *wmt_evt; - struct btmtk_hci_wmt_cmd *wc; - struct btmtk_wmt_hdr *hdr; - int err; - - /* Send the WMT command and wait until the WMT event returns */ - hlen = sizeof(*hdr) + wmt_params->dlen; - if (hlen > 255) - return -EINVAL; - - wc = kzalloc(hlen, GFP_KERNEL); - if (!wc) - return -ENOMEM; - - hdr = &wc->hdr; - hdr->dir = 1; - hdr->op = wmt_params->op; - hdr->dlen = cpu_to_le16(wmt_params->dlen + 1); - hdr->flag = wmt_params->flag; - memcpy(wc->data, wmt_params->data, wmt_params->dlen); - - set_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags); - - /* WMT cmd/event doesn't follow up the generic HCI cmd/event handling, - * it needs constantly polling control pipe until the host received the - * WMT event, thus, we should require to specifically acquire PM counter - * on the USB to prevent the interface from entering auto suspended - * while WMT cmd/event in progress. - */ - err = usb_autopm_get_interface(data->intf); - if (err < 0) - goto err_free_wc; - - err = __hci_cmd_send(hdev, 0xfc6f, hlen, wc); - - if (err < 0) { - clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags); - usb_autopm_put_interface(data->intf); - goto err_free_wc; - } - - /* Submit control IN URB on demand to process the WMT event */ - err = btusb_mtk_submit_wmt_recv_urb(hdev); - - usb_autopm_put_interface(data->intf); - - if (err < 0) - goto err_free_wc; - - /* The vendor specific WMT commands are all answered by a vendor - * specific event and will have the Command Status or Command - * Complete as with usual HCI command flow control. - * - * After sending the command, wait for BTUSB_TX_WAIT_VND_EVT - * state to be cleared. The driver specific event receive routine - * will clear that state and with that indicate completion of the - * WMT command. - */ - err = wait_on_bit_timeout(&data->flags, BTUSB_TX_WAIT_VND_EVT, - TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT); - if (err == -EINTR) { - bt_dev_err(hdev, "Execution of wmt command interrupted"); - clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags); - goto err_free_wc; - } - - if (err) { - bt_dev_err(hdev, "Execution of wmt command timed out"); - clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags); - err = -ETIMEDOUT; - goto err_free_wc; - } - - if (data->evt_skb == NULL) - goto err_free_wc; - - /* Parse and handle the return WMT event */ - wmt_evt = (struct btmtk_hci_wmt_evt *)data->evt_skb->data; - if (wmt_evt->whdr.op != hdr->op) { - bt_dev_err(hdev, "Wrong op received %d expected %d", - wmt_evt->whdr.op, hdr->op); - err = -EIO; - goto err_free_skb; - } - - switch (wmt_evt->whdr.op) { - case BTMTK_WMT_SEMAPHORE: - if (wmt_evt->whdr.flag == 2) - status = BTMTK_WMT_PATCH_UNDONE; - else - status = BTMTK_WMT_PATCH_DONE; - break; - case BTMTK_WMT_FUNC_CTRL: - wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt; - if (be16_to_cpu(wmt_evt_funcc->status) == 0x404) - status = BTMTK_WMT_ON_DONE; - else if (be16_to_cpu(wmt_evt_funcc->status) == 0x420) - status = BTMTK_WMT_ON_PROGRESS; - else - status = BTMTK_WMT_ON_UNDONE; - break; - case BTMTK_WMT_PATCH_DWNLD: - if (wmt_evt->whdr.flag == 2) - status = BTMTK_WMT_PATCH_DONE; - else if (wmt_evt->whdr.flag == 1) - status = BTMTK_WMT_PATCH_PROGRESS; - else - status = BTMTK_WMT_PATCH_UNDONE; - break; - } - - if (wmt_params->status) - *wmt_params->status = status; - -err_free_skb: - kfree_skb(data->evt_skb); - data->evt_skb = NULL; -err_free_wc: - kfree(wc); - return err; -} - -static int btusb_mtk_func_query(struct hci_dev *hdev) -{ - struct btmtk_hci_wmt_params wmt_params; - int status, err; - u8 param = 0; - - /* Query whether the function is enabled */ - wmt_params.op = BTMTK_WMT_FUNC_CTRL; - wmt_params.flag = 4; - wmt_params.dlen = sizeof(param); - wmt_params.data = ¶m; - wmt_params.status = &status; - - err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params); - if (err < 0) { - bt_dev_err(hdev, "Failed to query function status (%d)", err); - return err; - } - - return status; -} - -static int btusb_mtk_uhw_reg_write(struct btusb_data *data, u32 reg, u32 val) -{ - struct hci_dev *hdev = data->hdev; - int pipe, err; - void *buf; - - buf = kzalloc(4, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - put_unaligned_le32(val, buf); - - pipe = usb_sndctrlpipe(data->udev, 0); - err = usb_control_msg(data->udev, pipe, 0x02, - 0x5E, - reg >> 16, reg & 0xffff, - buf, 4, USB_CTRL_SET_TIMEOUT); - if (err < 0) { - bt_dev_err(hdev, "Failed to write uhw reg(%d)", err); - goto err_free_buf; - } - -err_free_buf: - kfree(buf); - - return err; -} - -static int btusb_mtk_uhw_reg_read(struct btusb_data *data, u32 reg, u32 *val) -{ - struct hci_dev *hdev = data->hdev; - int pipe, err; - void *buf; - - buf = kzalloc(4, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - pipe = usb_rcvctrlpipe(data->udev, 0); - err = usb_control_msg(data->udev, pipe, 0x01, - 0xDE, - reg >> 16, reg & 0xffff, - buf, 4, USB_CTRL_GET_TIMEOUT); - if (err < 0) { - bt_dev_err(hdev, "Failed to read uhw reg(%d)", err); - goto err_free_buf; - } - - *val = get_unaligned_le32(buf); - bt_dev_dbg(hdev, "reg=%x, value=0x%08x", reg, *val); - -err_free_buf: - kfree(buf); - - return err; -} - -static int btusb_mtk_reg_read(struct btusb_data *data, u32 reg, u32 *val) -{ - int pipe, err, size = sizeof(u32); - void *buf; - - buf = kzalloc(size, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - pipe = usb_rcvctrlpipe(data->udev, 0); - err = usb_control_msg(data->udev, pipe, 0x63, - USB_TYPE_VENDOR | USB_DIR_IN, - reg >> 16, reg & 0xffff, - buf, size, USB_CTRL_GET_TIMEOUT); - if (err < 0) - goto err_free_buf; - - *val = get_unaligned_le32(buf); - -err_free_buf: - kfree(buf); - - return err; -} - -static int btusb_mtk_id_get(struct btusb_data *data, u32 reg, u32 *id) -{ - return btusb_mtk_reg_read(data, reg, id); -} - -static u32 btusb_mtk_reset_done(struct hci_dev *hdev) -{ - struct btusb_data *data = hci_get_drvdata(hdev); - u32 val = 0; - - btusb_mtk_uhw_reg_read(data, MTK_BT_MISC, &val); - - return val & MTK_BT_RST_DONE; + clear_bit(BTMTK_ISOPKT_OVER_INTR, &btmtk_data->flags); } static int btusb_mtk_reset(struct hci_dev *hdev, void *rst_data) { struct btusb_data *data = hci_get_drvdata(hdev); - struct btmediatek_data *mediatek; - u32 val; + struct btmtk_data *btmtk_data = hci_get_priv(hdev); int err; /* It's MediaTek specific bluetooth reset mechanism via USB */ - if (test_and_set_bit(BTUSB_HW_RESET_ACTIVE, &data->flags)) { + if (test_and_set_bit(BTMTK_HW_RESET_ACTIVE, &btmtk_data->flags)) { bt_dev_err(hdev, "last reset failed? Not resetting again"); return -EBUSY; } @@ -3050,302 +2696,68 @@ static int btusb_mtk_reset(struct hci_dev *hdev, void *rst_data) if (err < 0) return err; + if (test_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags)) + btusb_mtk_release_iso_intf(data); + btusb_stop_traffic(data); usb_kill_anchored_urbs(&data->tx_anchor); - mediatek = hci_get_priv(hdev); - if (mediatek->dev_id == 0x7925) { - btusb_mtk_uhw_reg_read(data, MTK_BT_RESET_REG_CONNV3, &val); - val |= (1 << 5); - btusb_mtk_uhw_reg_write(data, MTK_BT_RESET_REG_CONNV3, val); - btusb_mtk_uhw_reg_read(data, MTK_BT_RESET_REG_CONNV3, &val); - val &= 0xFFFF00FF; - val |= (1 << 13); - btusb_mtk_uhw_reg_write(data, MTK_BT_RESET_REG_CONNV3, val); - btusb_mtk_uhw_reg_write(data, MTK_EP_RST_OPT, 0x00010001); - btusb_mtk_uhw_reg_read(data, MTK_BT_RESET_REG_CONNV3, &val); - val |= (1 << 0); - btusb_mtk_uhw_reg_write(data, MTK_BT_RESET_REG_CONNV3, val); - btusb_mtk_uhw_reg_write(data, MTK_UDMA_INT_STA_BT, 0x000000FF); - btusb_mtk_uhw_reg_read(data, MTK_UDMA_INT_STA_BT, &val); - btusb_mtk_uhw_reg_write(data, MTK_UDMA_INT_STA_BT1, 0x000000FF); - btusb_mtk_uhw_reg_read(data, MTK_UDMA_INT_STA_BT1, &val); - msleep(100); - } else { - /* It's Device EndPoint Reset Option Register */ - bt_dev_dbg(hdev, "Initiating reset mechanism via uhw"); - btusb_mtk_uhw_reg_write(data, MTK_EP_RST_OPT, MTK_EP_RST_IN_OUT_OPT); - btusb_mtk_uhw_reg_read(data, MTK_BT_WDT_STATUS, &val); - - /* Reset the bluetooth chip via USB interface. */ - btusb_mtk_uhw_reg_write(data, MTK_BT_SUBSYS_RST, 1); - btusb_mtk_uhw_reg_write(data, MTK_UDMA_INT_STA_BT, 0x000000FF); - btusb_mtk_uhw_reg_read(data, MTK_UDMA_INT_STA_BT, &val); - btusb_mtk_uhw_reg_write(data, MTK_UDMA_INT_STA_BT1, 0x000000FF); - btusb_mtk_uhw_reg_read(data, MTK_UDMA_INT_STA_BT1, &val); - /* MT7921 need to delay 20ms between toggle reset bit */ - msleep(20); - btusb_mtk_uhw_reg_write(data, MTK_BT_SUBSYS_RST, 0); - btusb_mtk_uhw_reg_read(data, MTK_BT_SUBSYS_RST, &val); - } - - err = readx_poll_timeout(btusb_mtk_reset_done, hdev, val, - val & MTK_BT_RST_DONE, 20000, 1000000); - if (err < 0) - bt_dev_err(hdev, "Reset timeout"); - - btusb_mtk_id_get(data, 0x70010200, &val); - if (!val) - bt_dev_err(hdev, "Can't get device id, subsys reset fail."); + err = btmtk_usb_subsys_reset(hdev, btmtk_data->dev_id); usb_queue_reset_device(data->intf); - - clear_bit(BTUSB_HW_RESET_ACTIVE, &data->flags); + clear_bit(BTMTK_HW_RESET_ACTIVE, &btmtk_data->flags); return err; } +static int btusb_send_frame_mtk(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct urb *urb; + + BT_DBG("%s", hdev->name); + + if (hci_skb_pkt_type(skb) == HCI_ISODATA_PKT) { + urb = alloc_mtk_intr_urb(hdev, skb, btusb_tx_complete); + if (IS_ERR(urb)) + return PTR_ERR(urb); + + return submit_or_queue_tx_urb(hdev, urb); + } else { + return btusb_send_frame(hdev, skb); + } +} + static int btusb_mtk_setup(struct hci_dev *hdev) { struct btusb_data *data = hci_get_drvdata(hdev); - struct btmtk_hci_wmt_params wmt_params; - ktime_t calltime, delta, rettime; - struct btmtk_tci_sleep tci_sleep; - unsigned long long duration; - struct sk_buff *skb; - const char *fwname; - int err, status; - u32 dev_id = 0; - char fw_bin_name[64]; - u32 fw_version = 0, fw_flavor = 0; - u8 param; - struct btmediatek_data *mediatek; + struct btmtk_data *btmtk_data = hci_get_priv(hdev); - calltime = ktime_get(); + /* MediaTek WMT vendor cmd requiring below USB resources to + * complete the handshake. + */ + btmtk_data->drv_name = btusb_driver.name; + btmtk_data->intf = data->intf; + btmtk_data->udev = data->udev; + btmtk_data->ctrl_anchor = &data->ctrl_anchor; + btmtk_data->reset_sync = btusb_mtk_reset; - err = btusb_mtk_id_get(data, 0x80000008, &dev_id); - if (err < 0) { - bt_dev_err(hdev, "Failed to get device id (%d)", err); - return err; - } + /* Claim ISO data interface and endpoint */ + btmtk_data->isopkt_intf = usb_ifnum_to_if(data->udev, MTK_ISO_IFNUM); + if (btmtk_data->isopkt_intf) + btusb_mtk_claim_iso_intf(data); - if (!dev_id || dev_id != 0x7663) { - err = btusb_mtk_id_get(data, 0x70010200, &dev_id); - if (err < 0) { - bt_dev_err(hdev, "Failed to get device id (%d)", err); - return err; - } - err = btusb_mtk_id_get(data, 0x80021004, &fw_version); - if (err < 0) { - bt_dev_err(hdev, "Failed to get fw version (%d)", err); - return err; - } - err = btusb_mtk_id_get(data, 0x70010020, &fw_flavor); - if (err < 0) { - bt_dev_err(hdev, "Failed to get fw flavor (%d)", err); - return err; - } - fw_flavor = (fw_flavor & 0x00000080) >> 7; - } - - mediatek = hci_get_priv(hdev); - mediatek->dev_id = dev_id; - mediatek->reset_sync = btusb_mtk_reset; - - err = btmtk_register_coredump(hdev, btusb_driver.name, fw_version); - if (err < 0) - bt_dev_err(hdev, "Failed to register coredump (%d)", err); - - switch (dev_id) { - case 0x7663: - fwname = FIRMWARE_MT7663; - break; - case 0x7668: - fwname = FIRMWARE_MT7668; - break; - case 0x7922: - case 0x7961: - case 0x7925: - if (dev_id == 0x7925) - snprintf(fw_bin_name, sizeof(fw_bin_name), - "mediatek/mt%04x/BT_RAM_CODE_MT%04x_1_%x_hdr.bin", - dev_id & 0xffff, dev_id & 0xffff, (fw_version & 0xff) + 1); - else if (dev_id == 0x7961 && fw_flavor) - snprintf(fw_bin_name, sizeof(fw_bin_name), - "mediatek/BT_RAM_CODE_MT%04x_1a_%x_hdr.bin", - dev_id & 0xffff, (fw_version & 0xff) + 1); - else - snprintf(fw_bin_name, sizeof(fw_bin_name), - "mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin", - dev_id & 0xffff, (fw_version & 0xff) + 1); - - err = btmtk_setup_firmware_79xx(hdev, fw_bin_name, - btusb_mtk_hci_wmt_sync); - if (err < 0) { - bt_dev_err(hdev, "Failed to set up firmware (%d)", err); - return err; - } - - /* It's Device EndPoint Reset Option Register */ - btusb_mtk_uhw_reg_write(data, MTK_EP_RST_OPT, MTK_EP_RST_IN_OUT_OPT); - - /* Enable Bluetooth protocol */ - param = 1; - wmt_params.op = BTMTK_WMT_FUNC_CTRL; - wmt_params.flag = 0; - wmt_params.dlen = sizeof(param); - wmt_params.data = ¶m; - wmt_params.status = NULL; - - err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params); - if (err < 0) { - bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err); - return err; - } - - hci_set_msft_opcode(hdev, 0xFD30); - hci_set_aosp_capable(hdev); - goto done; - default: - bt_dev_err(hdev, "Unsupported hardware variant (%08x)", - dev_id); - return -ENODEV; - } - - /* Query whether the firmware is already download */ - wmt_params.op = BTMTK_WMT_SEMAPHORE; - wmt_params.flag = 1; - wmt_params.dlen = 0; - wmt_params.data = NULL; - wmt_params.status = &status; - - err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params); - if (err < 0) { - bt_dev_err(hdev, "Failed to query firmware status (%d)", err); - return err; - } - - if (status == BTMTK_WMT_PATCH_DONE) { - bt_dev_info(hdev, "firmware already downloaded"); - goto ignore_setup_fw; - } - - /* Setup a firmware which the device definitely requires */ - err = btmtk_setup_firmware(hdev, fwname, - btusb_mtk_hci_wmt_sync); - if (err < 0) - return err; - -ignore_setup_fw: - err = readx_poll_timeout(btusb_mtk_func_query, hdev, status, - status < 0 || status != BTMTK_WMT_ON_PROGRESS, - 2000, 5000000); - /* -ETIMEDOUT happens */ - if (err < 0) - return err; - - /* The other errors happen in btusb_mtk_func_query */ - if (status < 0) - return status; - - if (status == BTMTK_WMT_ON_DONE) { - bt_dev_info(hdev, "function already on"); - goto ignore_func_on; - } - - /* Enable Bluetooth protocol */ - param = 1; - wmt_params.op = BTMTK_WMT_FUNC_CTRL; - wmt_params.flag = 0; - wmt_params.dlen = sizeof(param); - wmt_params.data = ¶m; - wmt_params.status = NULL; - - err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params); - if (err < 0) { - bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err); - return err; - } - -ignore_func_on: - /* Apply the low power environment setup */ - tci_sleep.mode = 0x5; - tci_sleep.duration = cpu_to_le16(0x640); - tci_sleep.host_duration = cpu_to_le16(0x640); - tci_sleep.host_wakeup_pin = 0; - tci_sleep.time_compensation = 0; - - skb = __hci_cmd_sync(hdev, 0xfc7a, sizeof(tci_sleep), &tci_sleep, - HCI_INIT_TIMEOUT); - if (IS_ERR(skb)) { - err = PTR_ERR(skb); - bt_dev_err(hdev, "Failed to apply low power setting (%d)", err); - return err; - } - kfree_skb(skb); - -done: - rettime = ktime_get(); - delta = ktime_sub(rettime, calltime); - duration = (unsigned long long)ktime_to_ns(delta) >> 10; - - bt_dev_info(hdev, "Device setup in %llu usecs", duration); - - return 0; + return btmtk_usb_setup(hdev); } static int btusb_mtk_shutdown(struct hci_dev *hdev) -{ - struct btmtk_hci_wmt_params wmt_params; - u8 param = 0; - int err; - - /* Disable the device */ - wmt_params.op = BTMTK_WMT_FUNC_CTRL; - wmt_params.flag = 0; - wmt_params.dlen = sizeof(param); - wmt_params.data = ¶m; - wmt_params.status = NULL; - - err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params); - if (err < 0) { - bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err); - return err; - } - - return 0; -} - -static int btusb_recv_acl_mtk(struct hci_dev *hdev, struct sk_buff *skb) { struct btusb_data *data = hci_get_drvdata(hdev); - u16 handle = le16_to_cpu(hci_acl_hdr(skb)->handle); + struct btmtk_data *btmtk_data = hci_get_priv(hdev); - switch (handle) { - case 0xfc6f: /* Firmware dump from device */ - /* When the firmware hangs, the device can no longer - * suspend and thus disable auto-suspend. - */ - usb_disable_autosuspend(data->udev); + if (test_bit(BTMTK_ISOPKT_RUNNING, &btmtk_data->flags)) + btusb_mtk_release_iso_intf(data); - /* We need to forward the diagnostic packet to userspace daemon - * for backward compatibility, so we have to clone the packet - * extraly for the in-kernel coredump support. - */ - if (IS_ENABLED(CONFIG_DEV_COREDUMP)) { - struct sk_buff *skb_cd = skb_clone(skb, GFP_ATOMIC); - - if (skb_cd) - btmtk_process_coredump(hdev, skb_cd); - } - - fallthrough; - case 0x05ff: /* Firmware debug logging 1 */ - case 0x05fe: /* Firmware debug logging 2 */ - return hci_recv_diag(hdev, skb); - } - - return hci_recv_frame(hdev, skb); + return btmtk_usb_shutdown(hdev); } #ifdef CONFIG_PM @@ -4347,7 +3759,7 @@ static int btusb_probe(struct usb_interface *intf, data->recv_event = btusb_recv_event_realtek; } else if (id->driver_info & BTUSB_MEDIATEK) { /* Allocate extra space for Mediatek device */ - priv_size += sizeof(struct btmediatek_data); + priv_size += sizeof(struct btmtk_data); } data->recv_acl = hci_recv_frame; @@ -4451,9 +3863,12 @@ static int btusb_probe(struct usb_interface *intf, hdev->manufacturer = 70; hdev->cmd_timeout = btmtk_reset_sync; hdev->set_bdaddr = btmtk_set_bdaddr; + hdev->send = btusb_send_frame_mtk; set_bit(HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN, &hdev->quirks); set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks); - data->recv_acl = btusb_recv_acl_mtk; + data->recv_acl = btmtk_usb_recv_acl; + data->suspend = btmtk_usb_suspend; + data->resume = btmtk_usb_resume; } if (id->driver_info & BTUSB_SWAVE) { @@ -4694,6 +4109,9 @@ static int btusb_suspend(struct usb_interface *intf, pm_message_t message) cancel_work_sync(&data->work); + if (data->suspend) + data->suspend(data->hdev); + btusb_stop_traffic(data); usb_kill_anchored_urbs(&data->tx_anchor); @@ -4797,6 +4215,9 @@ static int btusb_resume(struct usb_interface *intf) btusb_submit_isoc_urb(hdev, GFP_NOIO); } + if (data->resume) + data->resume(hdev); + spin_lock_irq(&data->txlock); play_deferred(data); clear_bit(BTUSB_SUSPENDING, &data->flags); diff --git a/drivers/bluetooth/hci_bcm4377.c b/drivers/bluetooth/hci_bcm4377.c index d90858ea2fe5..77a5454a8721 100644 --- a/drivers/bluetooth/hci_bcm4377.c +++ b/drivers/bluetooth/hci_bcm4377.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only OR MIT /* - * Bluetooth HCI driver for Broadcom 4377/4378/4387 devices attached via PCIe + * Bluetooth HCI driver for Broadcom 4377/4378/4387/4388 devices attached via PCIe * * Copyright (C) The Asahi Linux Contributors */ @@ -26,13 +26,16 @@ enum bcm4377_chip { BCM4377 = 0, BCM4378, BCM4387, + BCM4388, }; #define BCM4377_DEVICE_ID 0x5fa0 #define BCM4378_DEVICE_ID 0x5f69 #define BCM4387_DEVICE_ID 0x5f71 +#define BCM4388_DEVICE_ID 0x5f72 -#define BCM4377_TIMEOUT 1000 +#define BCM4377_TIMEOUT msecs_to_jiffies(1000) +#define BCM4377_BOOT_TIMEOUT msecs_to_jiffies(5000) /* * These devices only support DMA transactions inside a 32bit window @@ -487,6 +490,7 @@ struct bcm4377_data; * second window in BAR0 * has_bar0_core2_window2: Set to true if this chip requires the second core's * second window to be configured + * bar2_offset: Offset to the start of the variables in BAR2 * clear_pciecfg_subsystem_ctrl_bit19: Set to true if bit 19 in the * vendor-specific subsystem control * register has to be cleared @@ -510,6 +514,7 @@ struct bcm4377_hw { u32 bar0_window1; u32 bar0_window2; u32 bar0_core2_window2; + u32 bar2_offset; unsigned long has_bar0_core2_window2 : 1; unsigned long clear_pciecfg_subsystem_ctrl_bit19 : 1; @@ -835,8 +840,8 @@ static irqreturn_t bcm4377_irq(int irq, void *data) struct bcm4377_data *bcm4377 = data; u32 bootstage, rti_status; - bootstage = ioread32(bcm4377->bar2 + BCM4377_BAR2_BOOTSTAGE); - rti_status = ioread32(bcm4377->bar2 + BCM4377_BAR2_RTI_STATUS); + bootstage = ioread32(bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_BOOTSTAGE); + rti_status = ioread32(bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_RTI_STATUS); if (bootstage != bcm4377->bootstage || rti_status != bcm4377->rti_status) { @@ -1196,6 +1201,14 @@ static int bcm4387_send_calibration(struct bcm4377_data *bcm4377) bcm4377->taurus_cal_size); } +static int bcm4388_send_calibration(struct bcm4377_data *bcm4377) +{ + /* BCM4388 always uses beamforming */ + return __bcm4378_send_calibration( + bcm4377, bcm4377->taurus_beamforming_cal_blob, + bcm4377->taurus_beamforming_cal_size); +} + static const struct firmware *bcm4377_request_blob(struct bcm4377_data *bcm4377, const char *suffix) { @@ -1819,8 +1832,8 @@ static int bcm4377_boot(struct bcm4377_data *bcm4377) int ret = 0; u32 bootstage, rti_status; - bootstage = ioread32(bcm4377->bar2 + BCM4377_BAR2_BOOTSTAGE); - rti_status = ioread32(bcm4377->bar2 + BCM4377_BAR2_RTI_STATUS); + bootstage = ioread32(bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_BOOTSTAGE); + rti_status = ioread32(bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_RTI_STATUS); if (bootstage != 0) { dev_err(&bcm4377->pdev->dev, "bootstage is %d and not 0\n", @@ -1854,15 +1867,18 @@ static int bcm4377_boot(struct bcm4377_data *bcm4377) iowrite32(BCM4377_DMA_MASK, bcm4377->bar0 + BCM4377_BAR0_HOST_WINDOW_SIZE); - iowrite32(lower_32_bits(fw_dma), bcm4377->bar2 + BCM4377_BAR2_FW_LO); - iowrite32(upper_32_bits(fw_dma), bcm4377->bar2 + BCM4377_BAR2_FW_HI); - iowrite32(fw->size, bcm4377->bar2 + BCM4377_BAR2_FW_SIZE); + iowrite32(lower_32_bits(fw_dma), + bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_FW_LO); + iowrite32(upper_32_bits(fw_dma), + bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_FW_HI); + iowrite32(fw->size, + bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_FW_SIZE); iowrite32(0, bcm4377->bar0 + BCM4377_BAR0_FW_DOORBELL); dev_dbg(&bcm4377->pdev->dev, "waiting for firmware to boot\n"); ret = wait_for_completion_interruptible_timeout(&bcm4377->event, - BCM4377_TIMEOUT); + BCM4377_BOOT_TIMEOUT); if (ret == 0) { ret = -ETIMEDOUT; goto out_dma_free; @@ -1913,16 +1929,16 @@ static int bcm4377_setup_rti(struct bcm4377_data *bcm4377) dev_dbg(&bcm4377->pdev->dev, "RTI is in state 1\n"); /* allow access to the entire IOVA space again */ - iowrite32(0, bcm4377->bar2 + BCM4377_BAR2_RTI_WINDOW_LO); - iowrite32(0, bcm4377->bar2 + BCM4377_BAR2_RTI_WINDOW_HI); + iowrite32(0, bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_RTI_WINDOW_LO); + iowrite32(0, bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_RTI_WINDOW_HI); iowrite32(BCM4377_DMA_MASK, - bcm4377->bar2 + BCM4377_BAR2_RTI_WINDOW_SIZE); + bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_RTI_WINDOW_SIZE); /* setup "Converged IPC" context */ iowrite32(lower_32_bits(bcm4377->ctx_dma), - bcm4377->bar2 + BCM4377_BAR2_CONTEXT_ADDR_LO); + bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_CONTEXT_ADDR_LO); iowrite32(upper_32_bits(bcm4377->ctx_dma), - bcm4377->bar2 + BCM4377_BAR2_CONTEXT_ADDR_HI); + bcm4377->bar2 + bcm4377->hw->bar2_offset + BCM4377_BAR2_CONTEXT_ADDR_HI); iowrite32(2, bcm4377->bar0 + BCM4377_BAR0_RTI_CONTROL); ret = wait_for_completion_interruptible_timeout(&bcm4377->event, @@ -2488,6 +2504,21 @@ static const struct bcm4377_hw bcm4377_hw_variants[] = { .send_calibration = bcm4387_send_calibration, .send_ptb = bcm4378_send_ptb, }, + + [BCM4388] = { + .id = 0x4388, + .otp_offset = 0x415c, + .bar2_offset = 0x200000, + .bar0_window1 = 0x18002000, + .bar0_window2 = 0x18109000, + .bar0_core2_window2 = 0x18106000, + .has_bar0_core2_window2 = true, + .broken_mws_transport_config = true, + .broken_le_coded = true, + .broken_le_ext_adv_report_phy = true, + .send_calibration = bcm4388_send_calibration, + .send_ptb = bcm4378_send_ptb, + }, }; #define BCM4377_DEVID_ENTRY(id) \ @@ -2501,6 +2532,7 @@ static const struct pci_device_id bcm4377_devid_table[] = { BCM4377_DEVID_ENTRY(4377), BCM4377_DEVID_ENTRY(4378), BCM4377_DEVID_ENTRY(4387), + BCM4377_DEVID_ENTRY(4388), {}, }; MODULE_DEVICE_TABLE(pci, bcm4377_devid_table); @@ -2515,7 +2547,7 @@ static struct pci_driver bcm4377_pci_driver = { module_pci_driver(bcm4377_pci_driver); MODULE_AUTHOR("Sven Peter "); -MODULE_DESCRIPTION("Bluetooth support for Broadcom 4377/4378/4387 devices"); +MODULE_DESCRIPTION("Bluetooth support for Broadcom 4377/4378/4387/4388 devices"); MODULE_LICENSE("Dual MIT/GPL"); MODULE_FIRMWARE("brcm/brcmbt4377*.bin"); MODULE_FIRMWARE("brcm/brcmbt4377*.ptb"); @@ -2523,3 +2555,5 @@ MODULE_FIRMWARE("brcm/brcmbt4378*.bin"); MODULE_FIRMWARE("brcm/brcmbt4378*.ptb"); MODULE_FIRMWARE("brcm/brcmbt4387*.bin"); MODULE_FIRMWARE("brcm/brcmbt4387*.ptb"); +MODULE_FIRMWARE("brcm/brcmbt4388*.bin"); +MODULE_FIRMWARE("brcm/brcmbt4388*.ptb"); diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index 17a2f158a0df..30192bb08354 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -488,7 +488,7 @@ static int hci_uart_tty_open(struct tty_struct *tty) if (tty->ops->write == NULL) return -EOPNOTSUPP; - hu = kzalloc(sizeof(struct hci_uart), GFP_KERNEL); + hu = kzalloc(sizeof(*hu), GFP_KERNEL); if (!hu) { BT_ERR("Can't allocate control structure"); return -ENFILE; diff --git a/drivers/bluetooth/hci_nokia.c b/drivers/bluetooth/hci_nokia.c index 97da0b2bfd17..62633d9ba7c4 100644 --- a/drivers/bluetooth/hci_nokia.c +++ b/drivers/bluetooth/hci_nokia.c @@ -116,11 +116,6 @@ struct hci_nokia_neg_evt { #define SETUP_BAUD_RATE 921600 #define INIT_BAUD_RATE 120000 -struct hci_nokia_radio_hdr { - u8 evt; - u8 dlen; -} __packed; - struct nokia_bt_dev { struct hci_uart hu; struct serdev_device *serdev; diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 9a0bc86f9aac..ca6466676902 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include #include @@ -214,6 +215,7 @@ struct qca_power { struct regulator_bulk_data *vreg_bulk; int num_vregs; bool vregs_on; + struct pwrseq_desc *pwrseq; }; struct qca_serdev { @@ -569,7 +571,7 @@ static int qca_open(struct hci_uart *hu) if (!hci_uart_has_flow_control(hu)) return -EOPNOTSUPP; - qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL); + qca = kzalloc(sizeof(*qca), GFP_KERNEL); if (!qca) return -ENOMEM; @@ -1040,8 +1042,7 @@ static void qca_controller_memdump(struct work_struct *work) } if (!qca_memdump) { - qca_memdump = kzalloc(sizeof(struct qca_memdump_info), - GFP_ATOMIC); + qca_memdump = kzalloc(sizeof(*qca_memdump), GFP_ATOMIC); if (!qca_memdump) { mutex_unlock(&qca->hci_memdump_lock); return; @@ -1685,6 +1686,27 @@ static bool qca_wakeup(struct hci_dev *hdev) return wakeup; } +static int qca_port_reopen(struct hci_uart *hu) +{ + int ret; + + /* Now the device is in ready state to communicate with host. + * To sync host with device we need to reopen port. + * Without this, we will have RTS and CTS synchronization + * issues. + */ + serdev_device_close(hu->serdev); + ret = serdev_device_open(hu->serdev); + if (ret) { + bt_dev_err(hu->hdev, "failed to open port"); + return ret; + } + + hci_uart_set_flow_control(hu, false); + + return 0; +} + static int qca_regulator_init(struct hci_uart *hu) { enum qca_btsoc_type soc_type = qca_soc_type(hu); @@ -1696,6 +1718,7 @@ static int qca_regulator_init(struct hci_uart *hu) * off the voltage regulator. */ qcadev = serdev_device_get_drvdata(hu->serdev); + if (!qcadev->bt_power->vregs_on) { serdev_device_close(hu->serdev); ret = qca_regulator_enable(qcadev); @@ -1753,21 +1776,7 @@ static int qca_regulator_init(struct hci_uart *hu) break; } - /* Now the device is in ready state to communicate with host. - * To sync host with device we need to reopen port. - * Without this, we will have RTS and CTS synchronization - * issues. - */ - serdev_device_close(hu->serdev); - ret = serdev_device_open(hu->serdev); - if (ret) { - bt_dev_err(hu->hdev, "failed to open port"); - return ret; - } - - hci_uart_set_flow_control(hu, false); - - return 0; + return qca_port_reopen(hu); } static int qca_power_on(struct hci_dev *hdev) @@ -1792,6 +1801,7 @@ static int qca_power_on(struct hci_dev *hdev) case QCA_WCN6750: case QCA_WCN6855: case QCA_WCN7850: + case QCA_QCA6390: ret = qca_regulator_init(hu); break; @@ -2130,6 +2140,7 @@ static void qca_power_shutdown(struct hci_uart *hu) unsigned long flags; enum qca_btsoc_type soc_type = qca_soc_type(hu); bool sw_ctrl_state; + struct qca_power *power; /* From this point we go into power off state. But serial port is * still open, stop queueing the IBS data and flush all the buffered @@ -2147,6 +2158,13 @@ static void qca_power_shutdown(struct hci_uart *hu) return; qcadev = serdev_device_get_drvdata(hu->serdev); + power = qcadev->bt_power; + + if (power->pwrseq) { + pwrseq_power_off(power->pwrseq); + set_bit(QCA_BT_OFF, &qca->flags); + return; + } switch (soc_type) { case QCA_WCN3988: @@ -2169,6 +2187,10 @@ static void qca_power_shutdown(struct hci_uart *hu) } break; + case QCA_QCA6390: + pwrseq_power_off(qcadev->bt_power->pwrseq); + break; + default: gpiod_set_value_cansleep(qcadev->bt_en, 0); } @@ -2204,6 +2226,9 @@ static int qca_regulator_enable(struct qca_serdev *qcadev) struct qca_power *power = qcadev->bt_power; int ret; + if (power->pwrseq) + return pwrseq_power_on(power->pwrseq); + /* Already enabled */ if (power->vregs_on) return 0; @@ -2272,6 +2297,13 @@ static int qca_init_regulators(struct qca_power *qca, return 0; } +static void qca_clk_disable_unprepare(void *data) +{ + struct clk *clk = data; + + clk_disable_unprepare(clk); +} + static int qca_serdev_probe(struct serdev_device *serdev) { struct qca_serdev *qcadev; @@ -2310,12 +2342,40 @@ static int qca_serdev_probe(struct serdev_device *serdev) case QCA_WCN6750: case QCA_WCN6855: case QCA_WCN7850: + case QCA_QCA6390: qcadev->bt_power = devm_kzalloc(&serdev->dev, sizeof(struct qca_power), GFP_KERNEL); if (!qcadev->bt_power) return -ENOMEM; + break; + default: + break; + } + switch (qcadev->btsoc_type) { + case QCA_WCN6855: + case QCA_WCN7850: + if (!device_property_present(&serdev->dev, "enable-gpios")) { + /* + * Backward compatibility with old DT sources. If the + * node doesn't have the 'enable-gpios' property then + * let's use the power sequencer. Otherwise, let's + * drive everything outselves. + */ + qcadev->bt_power->pwrseq = devm_pwrseq_get(&serdev->dev, + "bluetooth"); + if (IS_ERR(qcadev->bt_power->pwrseq)) + return PTR_ERR(qcadev->bt_power->pwrseq); + + break; + } + fallthrough; + case QCA_WCN3988: + case QCA_WCN3990: + case QCA_WCN3991: + case QCA_WCN3998: + case QCA_WCN6750: qcadev->bt_power->dev = &serdev->dev; err = qca_init_regulators(qcadev->bt_power, data->vregs, data->num_vregs); @@ -2353,12 +2413,13 @@ static int qca_serdev_probe(struct serdev_device *serdev) dev_err(&serdev->dev, "failed to acquire clk\n"); return PTR_ERR(qcadev->susclk); } + break; - err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto); - if (err) { - BT_ERR("wcn3990 serdev registration failed"); - return err; - } + case QCA_QCA6390: + qcadev->bt_power->pwrseq = devm_pwrseq_get(&serdev->dev, + "bluetooth"); + if (IS_ERR(qcadev->bt_power->pwrseq)) + return PTR_ERR(qcadev->bt_power->pwrseq); break; default: @@ -2385,12 +2446,18 @@ static int qca_serdev_probe(struct serdev_device *serdev) if (err) return err; - err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto); - if (err) { - BT_ERR("Rome serdev registration failed"); - clk_disable_unprepare(qcadev->susclk); + err = devm_add_action_or_reset(&serdev->dev, + qca_clk_disable_unprepare, + qcadev->susclk); + if (err) return err; - } + + } + + err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto); + if (err) { + BT_ERR("serdev registration failed"); + return err; } hdev = qcadev->serdev_hu.hdev; @@ -2428,15 +2495,11 @@ static void qca_serdev_remove(struct serdev_device *serdev) case QCA_WCN6750: case QCA_WCN6855: case QCA_WCN7850: - if (power->vregs_on) { + if (power->vregs_on) qca_power_shutdown(&qcadev->serdev_hu); - break; - } - fallthrough; - + break; default: - if (qcadev->susclk) - clk_disable_unprepare(qcadev->susclk); + break; } hci_uart_unregister_device(&qcadev->serdev_hu); diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c index 28750a40f0ed..c4046f8f1985 100644 --- a/drivers/bluetooth/hci_vhci.c +++ b/drivers/bluetooth/hci_vhci.c @@ -633,7 +633,7 @@ static int vhci_open(struct inode *inode, struct file *file) { struct vhci_data *data; - data = kzalloc(sizeof(struct vhci_data), GFP_KERNEL); + data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; diff --git a/drivers/crypto/caam/Kconfig b/drivers/crypto/caam/Kconfig index c631f99e415f..05210a0edb8a 100644 --- a/drivers/crypto/caam/Kconfig +++ b/drivers/crypto/caam/Kconfig @@ -10,7 +10,7 @@ config CRYPTO_DEV_FSL_CAAM_AHASH_API_DESC config CRYPTO_DEV_FSL_CAAM tristate "Freescale CAAM-Multicore platform driver backend" - depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE + depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE || COMPILE_TEST select SOC_BUS select CRYPTO_DEV_FSL_CAAM_COMMON imply FSL_MC_BUS diff --git a/drivers/crypto/caam/caamalg_qi2.c b/drivers/crypto/caam/caamalg_qi2.c index a4f6884416a0..207dc422785a 100644 --- a/drivers/crypto/caam/caamalg_qi2.c +++ b/drivers/crypto/caam/caamalg_qi2.c @@ -4990,11 +4990,23 @@ err_dma_map: return err; } +static void free_dpaa2_pcpu_netdev(struct dpaa2_caam_priv *priv, const cpumask_t *cpus) +{ + struct dpaa2_caam_priv_per_cpu *ppriv; + int i; + + for_each_cpu(i, cpus) { + ppriv = per_cpu_ptr(priv->ppriv, i); + free_netdev(ppriv->net_dev); + } +} + static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev) { struct device *dev = &ls_dev->dev; struct dpaa2_caam_priv *priv; struct dpaa2_caam_priv_per_cpu *ppriv; + cpumask_t clean_mask; int err, cpu; u8 i; @@ -5073,6 +5085,7 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev) } } + cpumask_clear(&clean_mask); i = 0; for_each_online_cpu(cpu) { u8 j; @@ -5096,15 +5109,23 @@ static int __cold dpaa2_dpseci_setup(struct fsl_mc_device *ls_dev) priv->rx_queue_attr[j].fqid, priv->tx_queue_attr[j].fqid); - ppriv->net_dev.dev = *dev; - INIT_LIST_HEAD(&ppriv->net_dev.napi_list); - netif_napi_add_tx_weight(&ppriv->net_dev, &ppriv->napi, + ppriv->net_dev = alloc_netdev_dummy(0); + if (!ppriv->net_dev) { + err = -ENOMEM; + goto err_alloc_netdev; + } + cpumask_set_cpu(cpu, &clean_mask); + ppriv->net_dev->dev = *dev; + + netif_napi_add_tx_weight(ppriv->net_dev, &ppriv->napi, dpaa2_dpseci_poll, DPAA2_CAAM_NAPI_WEIGHT); } return 0; +err_alloc_netdev: + free_dpaa2_pcpu_netdev(priv, &clean_mask); err_get_rx_queue: dpaa2_dpseci_congestion_free(priv); err_get_vers: @@ -5153,6 +5174,7 @@ static int __cold dpaa2_dpseci_disable(struct dpaa2_caam_priv *priv) ppriv = per_cpu_ptr(priv->ppriv, i); napi_disable(&ppriv->napi); netif_napi_del(&ppriv->napi); + free_netdev(ppriv->net_dev); } return 0; diff --git a/drivers/crypto/caam/caamalg_qi2.h b/drivers/crypto/caam/caamalg_qi2.h index abb502bb675c..61d1219a202f 100644 --- a/drivers/crypto/caam/caamalg_qi2.h +++ b/drivers/crypto/caam/caamalg_qi2.h @@ -81,7 +81,7 @@ struct dpaa2_caam_priv { */ struct dpaa2_caam_priv_per_cpu { struct napi_struct napi; - struct net_device net_dev; + struct net_device *net_dev; int req_fqid; int rsp_fqid; int prio; diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c index bd418dea586d..d4b39184dbdb 100644 --- a/drivers/crypto/caam/ctrl.c +++ b/drivers/crypto/caam/ctrl.c @@ -80,6 +80,7 @@ static void build_deinstantiation_desc(u32 *desc, int handle) append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT); } +#ifdef CONFIG_OF static const struct of_device_id imx8m_machine_match[] = { { .compatible = "fsl,imx8mm", }, { .compatible = "fsl,imx8mn", }, @@ -88,6 +89,7 @@ static const struct of_device_id imx8m_machine_match[] = { { .compatible = "fsl,imx8ulp", }, { } }; +#endif /* * run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of diff --git a/drivers/crypto/caam/qi.c b/drivers/crypto/caam/qi.c index 46a083849a8e..ba8fb5d8a7b2 100644 --- a/drivers/crypto/caam/qi.c +++ b/drivers/crypto/caam/qi.c @@ -57,7 +57,7 @@ struct caam_napi { */ struct caam_qi_pcpu_priv { struct caam_napi caam_napi; - struct net_device net_dev; + struct net_device *net_dev; struct qman_fq *rsp_fq; } ____cacheline_aligned; @@ -144,7 +144,7 @@ static void caam_fq_ern_cb(struct qman_portal *qm, struct qman_fq *fq, { const struct qm_fd *fd; struct caam_drv_req *drv_req; - struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev); + struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev->dev); struct caam_drv_private *priv = dev_get_drvdata(qidev); fd = &msg->ern.fd; @@ -530,6 +530,7 @@ static void caam_qi_shutdown(void *data) if (kill_fq(qidev, per_cpu(pcpu_qipriv.rsp_fq, i))) dev_err(qidev, "Rsp FQ kill failed, cpu: %d\n", i); + free_netdev(per_cpu(pcpu_qipriv.net_dev, i)); } qman_delete_cgr_safe(&priv->cgr); @@ -573,7 +574,7 @@ static enum qman_cb_dqrr_result caam_rsp_fq_dqrr_cb(struct qman_portal *p, struct caam_napi *caam_napi = raw_cpu_ptr(&pcpu_qipriv.caam_napi); struct caam_drv_req *drv_req; const struct qm_fd *fd; - struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev.dev); + struct device *qidev = &(raw_cpu_ptr(&pcpu_qipriv)->net_dev->dev); struct caam_drv_private *priv = dev_get_drvdata(qidev); u32 status; @@ -718,12 +719,24 @@ static void free_rsp_fqs(void) kfree(per_cpu(pcpu_qipriv.rsp_fq, i)); } +static void free_caam_qi_pcpu_netdev(const cpumask_t *cpus) +{ + struct caam_qi_pcpu_priv *priv; + int i; + + for_each_cpu(i, cpus) { + priv = per_cpu_ptr(&pcpu_qipriv, i); + free_netdev(priv->net_dev); + } +} + int caam_qi_init(struct platform_device *caam_pdev) { int err, i; struct device *ctrldev = &caam_pdev->dev, *qidev; struct caam_drv_private *ctrlpriv; const cpumask_t *cpus = qman_affine_cpus(); + cpumask_t clean_mask; ctrlpriv = dev_get_drvdata(ctrldev); qidev = ctrldev; @@ -743,6 +756,8 @@ int caam_qi_init(struct platform_device *caam_pdev) return err; } + cpumask_clear(&clean_mask); + /* * Enable the NAPI contexts on each of the core which has an affine * portal. @@ -751,10 +766,16 @@ int caam_qi_init(struct platform_device *caam_pdev) struct caam_qi_pcpu_priv *priv = per_cpu_ptr(&pcpu_qipriv, i); struct caam_napi *caam_napi = &priv->caam_napi; struct napi_struct *irqtask = &caam_napi->irqtask; - struct net_device *net_dev = &priv->net_dev; + struct net_device *net_dev; + net_dev = alloc_netdev_dummy(0); + if (!net_dev) { + err = -ENOMEM; + goto fail; + } + cpumask_set_cpu(i, &clean_mask); + priv->net_dev = net_dev; net_dev->dev = *qidev; - INIT_LIST_HEAD(&net_dev->napi_list); netif_napi_add_tx_weight(net_dev, irqtask, caam_qi_poll, CAAM_NAPI_WEIGHT); @@ -766,16 +787,22 @@ int caam_qi_init(struct platform_device *caam_pdev) dma_get_cache_alignment(), 0, NULL); if (!qi_cache) { dev_err(qidev, "Can't allocate CAAM cache\n"); - free_rsp_fqs(); - return -ENOMEM; + err = -ENOMEM; + goto fail2; } caam_debugfs_qi_init(ctrlpriv); err = devm_add_action_or_reset(qidev, caam_qi_shutdown, ctrlpriv); if (err) - return err; + goto fail2; dev_info(qidev, "Linux CAAM Queue I/F driver initialised\n"); return 0; + +fail2: + free_rsp_fqs(); +fail: + free_caam_qi_pcpu_netdev(&clean_mask); + return err; } diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c index f0a399cf45b2..dd1a068f905d 100644 --- a/drivers/dma/ti/k3-udma-glue.c +++ b/drivers/dma/ti/k3-udma-glue.c @@ -1528,6 +1528,9 @@ int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn, flow->virq = k3_ringacc_get_ring_irq_num(flow->ringrx); } + if (!flow->virq) + return -ENXIO; + return flow->virq; } EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq); diff --git a/drivers/infiniband/hw/mana/qp.c b/drivers/infiniband/hw/mana/qp.c index ba13c5abf8ef..2d411a16a127 100644 --- a/drivers/infiniband/hw/mana/qp.c +++ b/drivers/infiniband/hw/mana/qp.c @@ -21,7 +21,7 @@ static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev, gc = mdev_to_gc(dev); - req_buf_size = struct_size(req, indir_tab, MANA_INDIRECT_TABLE_SIZE); + req_buf_size = struct_size(req, indir_tab, MANA_INDIRECT_TABLE_DEF_SIZE); req = kzalloc(req_buf_size, GFP_KERNEL); if (!req) return -ENOMEM; @@ -41,18 +41,18 @@ static int mana_ib_cfg_vport_steering(struct mana_ib_dev *dev, if (log_ind_tbl_size) req->rss_enable = true; - req->num_indir_entries = MANA_INDIRECT_TABLE_SIZE; + req->num_indir_entries = MANA_INDIRECT_TABLE_DEF_SIZE; req->indir_tab_offset = offsetof(struct mana_cfg_rx_steer_req_v2, indir_tab); req->update_indir_tab = true; req->cqe_coalescing_enable = 1; /* The ind table passed to the hardware must have - * MANA_INDIRECT_TABLE_SIZE entries. Adjust the verb + * MANA_INDIRECT_TABLE_DEF_SIZE entries. Adjust the verb * ind_table to MANA_INDIRECT_TABLE_SIZE if required */ ibdev_dbg(&dev->ib_dev, "ind table size %u\n", 1 << log_ind_tbl_size); - for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) { + for (i = 0; i < MANA_INDIRECT_TABLE_DEF_SIZE; i++) { req->indir_tab[i] = ind_table[i % (1 << log_ind_tbl_size)]; ibdev_dbg(&dev->ib_dev, "index %u handle 0x%llx\n", i, req->indir_tab[i]); @@ -137,7 +137,7 @@ static int mana_ib_create_qp_rss(struct ib_qp *ibqp, struct ib_pd *pd, } ind_tbl_size = 1 << ind_tbl->log_ind_tbl_size; - if (ind_tbl_size > MANA_INDIRECT_TABLE_SIZE) { + if (ind_tbl_size > MANA_INDIRECT_TABLE_DEF_SIZE) { ibdev_dbg(&mdev->ib_dev, "Indirect table size %d exceeding limit\n", ind_tbl_size); diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c index 8300ce622835..4f6c1968a2ee 100644 --- a/drivers/infiniband/hw/mlx5/counters.c +++ b/drivers/infiniband/hw/mlx5/counters.c @@ -83,6 +83,8 @@ static const struct mlx5_ib_counter extended_err_cnts[] = { INIT_Q_COUNTER(resp_remote_access_errors), INIT_Q_COUNTER(resp_cqe_flush_error), INIT_Q_COUNTER(req_cqe_flush_error), + INIT_Q_COUNTER(req_transport_retries_exceeded), + INIT_Q_COUNTER(req_rnr_retries_exceeded), }; static const struct mlx5_ib_counter roce_accl_cnts[] = { @@ -102,6 +104,8 @@ static const struct mlx5_ib_counter vport_extended_err_cnts[] = { INIT_VPORT_Q_COUNTER(resp_remote_access_errors), INIT_VPORT_Q_COUNTER(resp_cqe_flush_error), INIT_VPORT_Q_COUNTER(req_cqe_flush_error), + INIT_VPORT_Q_COUNTER(req_transport_retries_exceeded), + INIT_VPORT_Q_COUNTER(req_rnr_retries_exceeded), }; static const struct mlx5_ib_counter vport_roce_accl_cnts[] = { diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 43660c831b22..086de6a022f9 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -1810,7 +1810,7 @@ static int set_ucontext_resp(struct ib_ucontext *uctx, } resp->qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp); - if (dev->wc_support) + if (mlx5_wc_support_get(dev->mdev)) resp->bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size); resp->cache_line_size = cache_line_size(); @@ -2337,7 +2337,7 @@ static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vm switch (command) { case MLX5_IB_MMAP_WC_PAGE: case MLX5_IB_MMAP_ALLOC_WC: - if (!dev->wc_support) + if (!mlx5_wc_support_get(dev->mdev)) return -EPERM; fallthrough; case MLX5_IB_MMAP_NC_PAGE: @@ -3612,7 +3612,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_UAR_OBJ_ALLOC)( alloc_type != MLX5_IB_UAPI_UAR_ALLOC_TYPE_NC) return -EOPNOTSUPP; - if (!to_mdev(c->ibucontext.device)->wc_support && + if (!mlx5_wc_support_get(to_mdev(c->ibucontext.device)->mdev) && alloc_type == MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF) return -EOPNOTSUPP; @@ -3766,18 +3766,6 @@ err: return err; } -static int mlx5_ib_enable_driver(struct ib_device *dev) -{ - struct mlx5_ib_dev *mdev = to_mdev(dev); - int ret; - - ret = mlx5_ib_test_wc(mdev); - mlx5_ib_dbg(mdev, "Write-Combining %s", - mdev->wc_support ? "supported" : "not supported"); - - return ret; -} - static const struct ib_device_ops mlx5_ib_dev_ops = { .owner = THIS_MODULE, .driver_id = RDMA_DRIVER_MLX5, @@ -3808,7 +3796,6 @@ static const struct ib_device_ops mlx5_ib_dev_ops = { .drain_rq = mlx5_ib_drain_rq, .drain_sq = mlx5_ib_drain_sq, .device_group = &mlx5_attr_group, - .enable_driver = mlx5_ib_enable_driver, .get_dev_fw_str = get_dev_fw_str, .get_dma_mr = mlx5_ib_get_dma_mr, .get_link_layer = mlx5_ib_port_link_layer, diff --git a/drivers/infiniband/hw/mlx5/mem.c b/drivers/infiniband/hw/mlx5/mem.c index 5a22be14d958..af321f6ef7f5 100644 --- a/drivers/infiniband/hw/mlx5/mem.c +++ b/drivers/infiniband/hw/mlx5/mem.c @@ -30,10 +30,8 @@ * SOFTWARE. */ -#include #include #include "mlx5_ib.h" -#include /* * Fill in a physical address list. ib_umem_num_dma_blocks() entries will be @@ -95,199 +93,3 @@ unsigned long __mlx5_umem_find_best_quantized_pgoff( return 0; return page_size; } - -#define WR_ID_BF 0xBF -#define WR_ID_END 0xBAD -#define TEST_WC_NUM_WQES 255 -#define TEST_WC_POLLING_MAX_TIME_JIFFIES msecs_to_jiffies(100) -static int post_send_nop(struct mlx5_ib_dev *dev, struct ib_qp *ibqp, u64 wr_id, - bool signaled) -{ - struct mlx5_ib_qp *qp = to_mqp(ibqp); - struct mlx5_wqe_ctrl_seg *ctrl; - struct mlx5_bf *bf = &qp->bf; - __be32 mmio_wqe[16] = {}; - unsigned long flags; - unsigned int idx; - - if (unlikely(dev->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR)) - return -EIO; - - spin_lock_irqsave(&qp->sq.lock, flags); - - idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1); - ctrl = mlx5_frag_buf_get_wqe(&qp->sq.fbc, idx); - - memset(ctrl, 0, sizeof(struct mlx5_wqe_ctrl_seg)); - ctrl->fm_ce_se = signaled ? MLX5_WQE_CTRL_CQ_UPDATE : 0; - ctrl->opmod_idx_opcode = - cpu_to_be32(((u32)(qp->sq.cur_post) << 8) | MLX5_OPCODE_NOP); - ctrl->qpn_ds = cpu_to_be32((sizeof(struct mlx5_wqe_ctrl_seg) / 16) | - (qp->trans_qp.base.mqp.qpn << 8)); - - qp->sq.wrid[idx] = wr_id; - qp->sq.w_list[idx].opcode = MLX5_OPCODE_NOP; - qp->sq.wqe_head[idx] = qp->sq.head + 1; - qp->sq.cur_post += DIV_ROUND_UP(sizeof(struct mlx5_wqe_ctrl_seg), - MLX5_SEND_WQE_BB); - qp->sq.w_list[idx].next = qp->sq.cur_post; - qp->sq.head++; - - memcpy(mmio_wqe, ctrl, sizeof(*ctrl)); - ((struct mlx5_wqe_ctrl_seg *)&mmio_wqe)->fm_ce_se |= - MLX5_WQE_CTRL_CQ_UPDATE; - - /* Make sure that descriptors are written before - * updating doorbell record and ringing the doorbell - */ - wmb(); - - qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post); - - /* Make sure doorbell record is visible to the HCA before - * we hit doorbell - */ - wmb(); - __iowrite64_copy(bf->bfreg->map + bf->offset, mmio_wqe, - sizeof(mmio_wqe) / 8); - - bf->offset ^= bf->buf_size; - - spin_unlock_irqrestore(&qp->sq.lock, flags); - - return 0; -} - -static int test_wc_poll_cq_result(struct mlx5_ib_dev *dev, struct ib_cq *cq) -{ - int ret; - struct ib_wc wc = {}; - unsigned long end = jiffies + TEST_WC_POLLING_MAX_TIME_JIFFIES; - - do { - ret = ib_poll_cq(cq, 1, &wc); - if (ret < 0 || wc.status) - return ret < 0 ? ret : -EINVAL; - if (ret) - break; - } while (!time_after(jiffies, end)); - - if (!ret) - return -ETIMEDOUT; - - if (wc.wr_id != WR_ID_BF) - ret = 0; - - return ret; -} - -static int test_wc_do_send(struct mlx5_ib_dev *dev, struct ib_qp *qp) -{ - int err, i; - - for (i = 0; i < TEST_WC_NUM_WQES; i++) { - err = post_send_nop(dev, qp, WR_ID_BF, false); - if (err) - return err; - } - - return post_send_nop(dev, qp, WR_ID_END, true); -} - -int mlx5_ib_test_wc(struct mlx5_ib_dev *dev) -{ - struct ib_cq_init_attr cq_attr = { .cqe = TEST_WC_NUM_WQES + 1 }; - int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type); - struct ib_qp_init_attr qp_init_attr = { - .cap = { .max_send_wr = TEST_WC_NUM_WQES }, - .qp_type = IB_QPT_UD, - .sq_sig_type = IB_SIGNAL_REQ_WR, - .create_flags = MLX5_IB_QP_CREATE_WC_TEST, - }; - struct ib_qp_attr qp_attr = { .port_num = 1 }; - struct ib_device *ibdev = &dev->ib_dev; - struct ib_qp *qp; - struct ib_cq *cq; - struct ib_pd *pd; - int ret; - - if (!MLX5_CAP_GEN(dev->mdev, bf)) - return 0; - - if (!dev->mdev->roce.roce_en && - port_type_cap == MLX5_CAP_PORT_TYPE_ETH) { - if (mlx5_core_is_pf(dev->mdev)) - dev->wc_support = arch_can_pci_mmap_wc(); - return 0; - } - - ret = mlx5_alloc_bfreg(dev->mdev, &dev->wc_bfreg, true, false); - if (ret) - goto print_err; - - if (!dev->wc_bfreg.wc) - goto out1; - - pd = ib_alloc_pd(ibdev, 0); - if (IS_ERR(pd)) { - ret = PTR_ERR(pd); - goto out1; - } - - cq = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_attr); - if (IS_ERR(cq)) { - ret = PTR_ERR(cq); - goto out2; - } - - qp_init_attr.recv_cq = cq; - qp_init_attr.send_cq = cq; - qp = ib_create_qp(pd, &qp_init_attr); - if (IS_ERR(qp)) { - ret = PTR_ERR(qp); - goto out3; - } - - qp_attr.qp_state = IB_QPS_INIT; - ret = ib_modify_qp(qp, &qp_attr, - IB_QP_STATE | IB_QP_PORT | IB_QP_PKEY_INDEX | - IB_QP_QKEY); - if (ret) - goto out4; - - qp_attr.qp_state = IB_QPS_RTR; - ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE); - if (ret) - goto out4; - - qp_attr.qp_state = IB_QPS_RTS; - ret = ib_modify_qp(qp, &qp_attr, IB_QP_STATE | IB_QP_SQ_PSN); - if (ret) - goto out4; - - ret = test_wc_do_send(dev, qp); - if (ret < 0) - goto out4; - - ret = test_wc_poll_cq_result(dev, cq); - if (ret > 0) { - dev->wc_support = true; - ret = 0; - } - -out4: - ib_destroy_qp(qp); -out3: - ib_destroy_cq(cq); -out2: - ib_dealloc_pd(pd); -out1: - mlx5_free_bfreg(dev->mdev, &dev->wc_bfreg); -print_err: - if (ret) - mlx5_ib_err( - dev, - "Error %d while trying to test write-combining support\n", - ret); - return ret; -} diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index f255a12e26a0..b68779e9d86c 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -341,7 +341,6 @@ struct mlx5_ib_flow_db { * rely on the range reserved for that use in the ib_qp_create_flags enum. */ #define MLX5_IB_QP_CREATE_SQPN_QP1 IB_QP_CREATE_RESERVED_START -#define MLX5_IB_QP_CREATE_WC_TEST (IB_QP_CREATE_RESERVED_START << 1) struct wr_list { u16 opcode; @@ -1123,7 +1122,6 @@ struct mlx5_ib_dev { u8 ib_active:1; u8 is_rep:1; u8 lag_active:1; - u8 wc_support:1; u8 fill_delay; struct umr_common umrc; /* sync used page count stats @@ -1149,7 +1147,6 @@ struct mlx5_ib_dev { /* Array with num_ports elements */ struct mlx5_ib_port *port; struct mlx5_sq_bfreg bfreg; - struct mlx5_sq_bfreg wc_bfreg; struct mlx5_sq_bfreg fp_bfreg; struct mlx5_ib_delay_drop delay_drop; const struct mlx5_ib_profile *profile; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index e2164f813607..e8c0fead4062 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -1107,8 +1107,6 @@ static int _create_kernel_qp(struct mlx5_ib_dev *dev, if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR) qp->bf.bfreg = &dev->fp_bfreg; - else if (qp->flags & MLX5_IB_QP_CREATE_WC_TEST) - qp->bf.bfreg = &dev->wc_bfreg; else qp->bf.bfreg = &dev->bfreg; @@ -2959,14 +2957,6 @@ static void process_create_flag(struct mlx5_ib_dev *dev, int *flags, int flag, return; } - if (flag == MLX5_IB_QP_CREATE_WC_TEST) { - /* - * Special case, if condition didn't meet, it won't be error, - * just different in-kernel flow. - */ - *flags &= ~MLX5_IB_QP_CREATE_WC_TEST; - return; - } mlx5_ib_dbg(dev, "Verbs create QP flag 0x%X is not supported\n", flag); } @@ -3027,8 +3017,6 @@ static int process_create_flags(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp, IB_QP_CREATE_PCI_WRITE_END_PADDING, MLX5_CAP_GEN(mdev, end_pad), qp); - process_create_flag(dev, &create_flags, MLX5_IB_QP_CREATE_WC_TEST, - qp_type != MLX5_IB_QPT_REG_UMR, qp); process_create_flag(dev, &create_flags, MLX5_IB_QP_CREATE_SQPN_QP1, true, qp); @@ -4609,10 +4597,6 @@ static bool mlx5_ib_modify_qp_allowed(struct mlx5_ib_dev *dev, if (qp->type == IB_QPT_RAW_PACKET || qp->type == MLX5_IB_QPT_REG_UMR) return true; - /* Internal QP used for wc testing, with NOPs in wq */ - if (qp->flags & MLX5_IB_QP_CREATE_WC_TEST) - return true; - return false; } diff --git a/drivers/isdn/hardware/mISDN/avmfritz.c b/drivers/isdn/hardware/mISDN/avmfritz.c index f68569bfef7a..509b362d6465 100644 --- a/drivers/isdn/hardware/mISDN/avmfritz.c +++ b/drivers/isdn/hardware/mISDN/avmfritz.c @@ -159,6 +159,7 @@ set_debug(const char *val, const struct kernel_param *kp) } MODULE_AUTHOR("Karsten Keil"); +MODULE_DESCRIPTION("mISDN driver for AVM FRITZ!CARD PCI ISDN cards"); MODULE_LICENSE("GPL v2"); MODULE_VERSION(AVMFRITZ_REV); module_param_call(debug, set_debug, param_get_uint, &debug, S_IRUGO | S_IWUSR); diff --git a/drivers/isdn/hardware/mISDN/hfcmulti.c b/drivers/isdn/hardware/mISDN/hfcmulti.c index 2e5cb9dde3ec..0d2928d8aeae 100644 --- a/drivers/isdn/hardware/mISDN/hfcmulti.c +++ b/drivers/isdn/hardware/mISDN/hfcmulti.c @@ -221,6 +221,7 @@ static uint hwid = HWID_NONE; static int HFC_cnt, E1_cnt, bmask_cnt, Port_cnt, PCM_cnt = 99; MODULE_AUTHOR("Andreas Eversberg"); +MODULE_DESCRIPTION("mISDN driver for hfc-4s/hfc-8s/hfc-e1 based cards"); MODULE_LICENSE("GPL"); MODULE_VERSION(HFC_MULTI_VERSION); module_param(debug, uint, S_IRUGO | S_IWUSR); diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c index fe391de1aba3..ce7bccc9faa3 100644 --- a/drivers/isdn/hardware/mISDN/hfcpci.c +++ b/drivers/isdn/hardware/mISDN/hfcpci.c @@ -48,6 +48,7 @@ static struct timer_list hfc_tl; static unsigned long hfc_jiffies; MODULE_AUTHOR("Karsten Keil"); +MODULE_DESCRIPTION("mISDN driver for CCD's hfc-pci based cards"); MODULE_LICENSE("GPL"); module_param(debug, uint, S_IRUGO | S_IWUSR); module_param(poll, uint, S_IRUGO | S_IWUSR); diff --git a/drivers/isdn/hardware/mISDN/hfcsusb.c b/drivers/isdn/hardware/mISDN/hfcsusb.c index b82b89888a5e..e54419a4e731 100644 --- a/drivers/isdn/hardware/mISDN/hfcsusb.c +++ b/drivers/isdn/hardware/mISDN/hfcsusb.c @@ -31,6 +31,7 @@ static DEFINE_RWLOCK(HFClock); MODULE_AUTHOR("Martin Bachem"); +MODULE_DESCRIPTION("mISDN driver for Colognechip HFC-S USB chip"); MODULE_LICENSE("GPL"); module_param(debug, uint, S_IRUGO | S_IWUSR); module_param(poll, int, 0); diff --git a/drivers/isdn/hardware/mISDN/mISDNinfineon.c b/drivers/isdn/hardware/mISDN/mISDNinfineon.c index 88d592bafdb0..30876a012711 100644 --- a/drivers/isdn/hardware/mISDN/mISDNinfineon.c +++ b/drivers/isdn/hardware/mISDN/mISDNinfineon.c @@ -245,6 +245,7 @@ set_debug(const char *val, const struct kernel_param *kp) } MODULE_AUTHOR("Karsten Keil"); +MODULE_DESCRIPTION("mISDN driver for cards based on Infineon ISDN chipsets"); MODULE_LICENSE("GPL v2"); MODULE_VERSION(INFINEON_REV); module_param_call(debug, set_debug, param_get_uint, &debug, S_IRUGO | S_IWUSR); diff --git a/drivers/isdn/hardware/mISDN/mISDNipac.c b/drivers/isdn/hardware/mISDN/mISDNipac.c index 4f8d85bb3ce1..d0b7271fbda1 100644 --- a/drivers/isdn/hardware/mISDN/mISDNipac.c +++ b/drivers/isdn/hardware/mISDN/mISDNipac.c @@ -21,6 +21,7 @@ MODULE_AUTHOR("Karsten Keil"); MODULE_VERSION(ISAC_REV); +MODULE_DESCRIPTION("mISDN driver for ISAC specific functions"); MODULE_LICENSE("GPL v2"); #define ReadISAC(is, o) (is->read_reg(is->dch.hw, o + is->off)) diff --git a/drivers/isdn/hardware/mISDN/mISDNisar.c b/drivers/isdn/hardware/mISDN/mISDNisar.c index 48b3d43e2502..b3e03c410544 100644 --- a/drivers/isdn/hardware/mISDN/mISDNisar.c +++ b/drivers/isdn/hardware/mISDN/mISDNisar.c @@ -22,6 +22,7 @@ #define ISAR_REV "2.1" MODULE_AUTHOR("Karsten Keil"); +MODULE_DESCRIPTION("mISDN driver for ISAR (Siemens PSB 7110) specific functions"); MODULE_LICENSE("GPL v2"); MODULE_VERSION(ISAR_REV); diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c index 566c790a9481..d163850c295e 100644 --- a/drivers/isdn/hardware/mISDN/netjet.c +++ b/drivers/isdn/hardware/mISDN/netjet.c @@ -114,6 +114,7 @@ set_debug(const char *val, const struct kernel_param *kp) } MODULE_AUTHOR("Karsten Keil"); +MODULE_DESCRIPTION("mISDN driver for NETJet cards"); MODULE_LICENSE("GPL v2"); MODULE_VERSION(NETJET_REV); module_param_call(debug, set_debug, param_get_uint, &debug, S_IRUGO | S_IWUSR); diff --git a/drivers/isdn/hardware/mISDN/speedfax.c b/drivers/isdn/hardware/mISDN/speedfax.c index b530c78eca8e..0c405261d940 100644 --- a/drivers/isdn/hardware/mISDN/speedfax.c +++ b/drivers/isdn/hardware/mISDN/speedfax.c @@ -97,6 +97,7 @@ set_debug(const char *val, const struct kernel_param *kp) } MODULE_AUTHOR("Karsten Keil"); +MODULE_DESCRIPTION("mISDN driver for Sedlbauer Speedfax+ cards"); MODULE_LICENSE("GPL v2"); MODULE_VERSION(SPEEDFAX_REV); MODULE_FIRMWARE("isdn/ISAR.BIN"); diff --git a/drivers/isdn/hardware/mISDN/w6692.c b/drivers/isdn/hardware/mISDN/w6692.c index f3b8db7b48fe..ee69212ac351 100644 --- a/drivers/isdn/hardware/mISDN/w6692.c +++ b/drivers/isdn/hardware/mISDN/w6692.c @@ -101,6 +101,7 @@ set_debug(const char *val, const struct kernel_param *kp) } MODULE_AUTHOR("Karsten Keil"); +MODULE_DESCRIPTION("mISDN driver for Winbond w6692 based cards"); MODULE_LICENSE("GPL v2"); MODULE_VERSION(W6692_REV); module_param_call(debug, set_debug, param_get_uint, &debug, S_IRUGO | S_IWUSR); diff --git a/drivers/isdn/mISDN/core.c b/drivers/isdn/mISDN/core.c index ab8513a7acd5..e34a7a46754e 100644 --- a/drivers/isdn/mISDN/core.c +++ b/drivers/isdn/mISDN/core.c @@ -14,6 +14,7 @@ static u_int debug; MODULE_AUTHOR("Karsten Keil"); +MODULE_DESCRIPTION("Modular ISDN core driver"); MODULE_LICENSE("GPL"); module_param(debug, uint, S_IRUGO | S_IWUSR); diff --git a/drivers/isdn/mISDN/dsp_blowfish.c b/drivers/isdn/mISDN/dsp_blowfish.c index 0aa572f3858d..0e77c282c862 100644 --- a/drivers/isdn/mISDN/dsp_blowfish.c +++ b/drivers/isdn/mISDN/dsp_blowfish.c @@ -73,11 +73,6 @@ * crypto-api for faster implementation */ -struct bf_ctx { - u32 p[18]; - u32 s[1024]; -}; - static const u32 bf_pbox[16 + 2] = { 0x243f6a88, 0x85a308d3, 0x13198a2e, 0x03707344, 0xa4093822, 0x299f31d0, 0x082efa98, 0xec4e6c89, diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c index fae95f166688..753232e9fc36 100644 --- a/drivers/isdn/mISDN/dsp_core.c +++ b/drivers/isdn/mISDN/dsp_core.c @@ -172,6 +172,7 @@ module_param(debug, uint, S_IRUGO | S_IWUSR); module_param(options, uint, S_IRUGO | S_IWUSR); module_param(poll, uint, S_IRUGO | S_IWUSR); module_param(dtmfthreshold, uint, S_IRUGO | S_IWUSR); +MODULE_DESCRIPTION("mISDN driver for Digital Audio Processing of transparent data"); MODULE_LICENSE("GPL"); /*int spinnest = 0;*/ diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c index f010b35a0531..a5ad88a960d0 100644 --- a/drivers/isdn/mISDN/l1oip_core.c +++ b/drivers/isdn/mISDN/l1oip_core.c @@ -245,6 +245,7 @@ static int debug; static int ulaw; MODULE_AUTHOR("Andreas Eversberg"); +MODULE_DESCRIPTION("mISDN driver for tunneling layer 1 over IP"); MODULE_LICENSE("GPL"); module_param_array(type, uint, NULL, S_IRUGO | S_IWUSR); module_param_array(codec, uint, NULL, S_IRUGO | S_IWUSR); diff --git a/drivers/net/arcnet/com20020-isa.c b/drivers/net/arcnet/com20020-isa.c index 293a621e654c..fef2ac2852a8 100644 --- a/drivers/net/arcnet/com20020-isa.c +++ b/drivers/net/arcnet/com20020-isa.c @@ -137,6 +137,7 @@ module_param(backplane, int, 0); module_param(clockp, int, 0); module_param(clockm, int, 0); +MODULE_DESCRIPTION("ARCnet COM20020 chipset ISA driver"); MODULE_LICENSE("GPL"); static struct net_device *my_dev; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index d19aabf5d4fb..af9ddd3902cc 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -5755,10 +5755,10 @@ static void bond_ethtool_get_drvinfo(struct net_device *bond_dev, } static int bond_ethtool_get_ts_info(struct net_device *bond_dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct bonding *bond = netdev_priv(bond_dev); - struct ethtool_ts_info ts_info; + struct kernel_ethtool_ts_info ts_info; struct net_device *real_dev; bool sw_tx_support = false; struct list_head *iter; diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index 2e31db55d927..7f9b60a42d29 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -187,9 +187,8 @@ config CAN_SLCAN slcand) can be found in the can-utils at the linux-can project, see https://github.com/linux-can/can-utils for details. - The slcan driver supports up to 10 CAN netdevices by default which - can be changed by the 'maxdev=xx' module option. This driver can - also be built as a module. If so, the module will be called slcan. + This driver can also be built as a module. If so, the module + will be called slcan. config CAN_SUN4I tristate "Allwinner A10 CAN controller" diff --git a/drivers/net/can/dev/dev.c b/drivers/net/can/dev/dev.c index 83e724e0ab87..87828f953073 100644 --- a/drivers/net/can/dev/dev.c +++ b/drivers/net/can/dev/dev.c @@ -376,7 +376,7 @@ EXPORT_SYMBOL(can_eth_ioctl_hwts); * supporting hardware timestamps */ int can_ethtool_op_get_ts_info_hwts(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c index 7b5028b67cd5..a60d9efd5f8d 100644 --- a/drivers/net/can/kvaser_pciefd.c +++ b/drivers/net/can/kvaser_pciefd.c @@ -29,10 +29,10 @@ MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices"); #define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17U #define KVASER_PCIEFD_MAX_CAN_CHANNELS 8UL #define KVASER_PCIEFD_DMA_COUNT 2U - #define KVASER_PCIEFD_DMA_SIZE (4U * 1024U) #define KVASER_PCIEFD_VENDOR 0x1a07 + /* Altera based devices */ #define KVASER_PCIEFD_4HS_DEVICE_ID 0x000d #define KVASER_PCIEFD_2HS_V2_DEVICE_ID 0x000e @@ -550,7 +550,7 @@ static void kvaser_pciefd_disable_err_gen(struct kvaser_pciefd_can *can) spin_unlock_irqrestore(&can->lock, irq); } -static void kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can) +static inline void kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can) { u32 msk; @@ -711,17 +711,17 @@ static void kvaser_pciefd_pwm_start(struct kvaser_pciefd_can *can) static int kvaser_pciefd_open(struct net_device *netdev) { - int err; + int ret; struct kvaser_pciefd_can *can = netdev_priv(netdev); - err = open_candev(netdev); - if (err) - return err; + ret = open_candev(netdev); + if (ret) + return ret; - err = kvaser_pciefd_bus_on(can); - if (err) { + ret = kvaser_pciefd_bus_on(can); + if (ret) { close_candev(netdev); - return err; + return ret; } return 0; @@ -1032,15 +1032,15 @@ static int kvaser_pciefd_reg_candev(struct kvaser_pciefd *pcie) int i; for (i = 0; i < pcie->nr_channels; i++) { - int err = register_candev(pcie->can[i]->can.dev); + int ret = register_candev(pcie->can[i]->can.dev); - if (err) { + if (ret) { int j; /* Unregister all successfully registered devices. */ for (j = 0; j < i; j++) unregister_candev(pcie->can[j]->can.dev); - return err; + return ret; } } @@ -1619,7 +1619,7 @@ static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos, /* Position does not point to the end of the package, * corrupted packet size? */ - if ((*start_pos + size) != pos) + if (unlikely((*start_pos + size) != pos)) return -EIO; /* Point to the next packet header, if any */ @@ -1640,31 +1640,24 @@ static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf) return res; } -static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie) +static u32 kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie) { u32 irq = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); - if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) { + if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) kvaser_pciefd_read_buffer(pcie, 0); - /* Reset DMA buffer 0 */ - iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, - KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); - } - if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) { + if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) kvaser_pciefd_read_buffer(pcie, 1); - /* Reset DMA buffer 1 */ - iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, - KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); - } - if (irq & KVASER_PCIEFD_SRB_IRQ_DOF0 || - irq & KVASER_PCIEFD_SRB_IRQ_DOF1 || - irq & KVASER_PCIEFD_SRB_IRQ_DUF0 || - irq & KVASER_PCIEFD_SRB_IRQ_DUF1) + if (unlikely(irq & KVASER_PCIEFD_SRB_IRQ_DOF0 || + irq & KVASER_PCIEFD_SRB_IRQ_DOF1 || + irq & KVASER_PCIEFD_SRB_IRQ_DUF0 || + irq & KVASER_PCIEFD_SRB_IRQ_DUF1)) dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq); iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); + return irq; } static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can) @@ -1691,27 +1684,33 @@ static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev) { struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev; const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask; - u32 board_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie)); + u32 pci_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie)); + u32 srb_irq = 0; int i; - if (!(board_irq & irq_mask->all)) + if (!(pci_irq & irq_mask->all)) return IRQ_NONE; - if (board_irq & irq_mask->kcan_rx0) - kvaser_pciefd_receive_irq(pcie); + if (pci_irq & irq_mask->kcan_rx0) + srb_irq = kvaser_pciefd_receive_irq(pcie); for (i = 0; i < pcie->nr_channels; i++) { - if (!pcie->can[i]) { - dev_err(&pcie->pci->dev, - "IRQ mask points to unallocated controller\n"); - break; - } - - /* Check that mask matches channel (i) IRQ mask */ - if (board_irq & irq_mask->kcan_tx[i]) + if (pci_irq & irq_mask->kcan_tx[i]) kvaser_pciefd_transmit_irq(pcie->can[i]); } + if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD0) { + /* Reset DMA buffer 0, may trigger new interrupt */ + iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, + KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); + } + + if (srb_irq & KVASER_PCIEFD_SRB_IRQ_DPD1) { + /* Reset DMA buffer 1, may trigger new interrupt */ + iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, + KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); + } + return IRQ_HANDLED; } @@ -1733,7 +1732,7 @@ static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie) static int kvaser_pciefd_probe(struct pci_dev *pdev, const struct pci_device_id *id) { - int err; + int ret; struct kvaser_pciefd *pcie; const struct kvaser_pciefd_irq_mask *irq_mask; void __iomem *irq_en_base; @@ -1747,39 +1746,52 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev, pcie->driver_data = (const struct kvaser_pciefd_driver_data *)id->driver_data; irq_mask = pcie->driver_data->irq_mask; - err = pci_enable_device(pdev); - if (err) - return err; + ret = pci_enable_device(pdev); + if (ret) + return ret; - err = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME); - if (err) + ret = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME); + if (ret) goto err_disable_pci; pcie->reg_base = pci_iomap(pdev, 0, 0); if (!pcie->reg_base) { - err = -ENOMEM; + ret = -ENOMEM; goto err_release_regions; } - err = kvaser_pciefd_setup_board(pcie); - if (err) + ret = kvaser_pciefd_setup_board(pcie); + if (ret) goto err_pci_iounmap; - err = kvaser_pciefd_setup_dma(pcie); - if (err) + ret = kvaser_pciefd_setup_dma(pcie); + if (ret) goto err_pci_iounmap; pci_set_master(pdev); - err = kvaser_pciefd_setup_can_ctrls(pcie); - if (err) + ret = kvaser_pciefd_setup_can_ctrls(pcie); + if (ret) goto err_teardown_can_ctrls; - err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler, + ret = pci_alloc_irq_vectors(pcie->pci, 1, 1, PCI_IRQ_INTX | PCI_IRQ_MSI); + if (ret < 0) { + dev_err(&pcie->pci->dev, "Failed to allocate IRQ vectors.\n"); + goto err_teardown_can_ctrls; + } + + ret = pci_irq_vector(pcie->pci, 0); + if (ret < 0) + goto err_pci_free_irq_vectors; + + pcie->pci->irq = ret; + + ret = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler, IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie); - if (err) - goto err_teardown_can_ctrls; - + if (ret) { + dev_err(&pcie->pci->dev, "Failed to request IRQ %d\n", pcie->pci->irq); + goto err_pci_free_irq_vectors; + } iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); @@ -1797,8 +1809,8 @@ static int kvaser_pciefd_probe(struct pci_dev *pdev, iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); - err = kvaser_pciefd_reg_candev(pcie); - if (err) + ret = kvaser_pciefd_reg_candev(pcie); + if (ret) goto err_free_irq; return 0; @@ -1808,6 +1820,9 @@ err_free_irq: iowrite32(0, irq_en_base); free_irq(pcie->pci->irq, pcie); +err_pci_free_irq_vectors: + pci_free_irq_vectors(pcie->pci); + err_teardown_can_ctrls: kvaser_pciefd_teardown_can_ctrls(pcie); iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG); @@ -1822,7 +1837,7 @@ err_release_regions: err_disable_pci: pci_disable_device(pdev); - return err; + return ret; } static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie) @@ -1853,7 +1868,7 @@ static void kvaser_pciefd_remove(struct pci_dev *pdev) iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie)); free_irq(pcie->pci->irq, pcie); - + pci_free_irq_vectors(pcie->pci); pci_iounmap(pdev, pcie->reg_base); pci_release_regions(pdev); pci_disable_device(pdev); diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index 14b231c4d7ec..7f63f866083e 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -379,38 +379,72 @@ m_can_txe_fifo_read(struct m_can_classdev *cdev, u32 fgi, u32 offset, u32 *val) return cdev->ops->read_fifo(cdev, addr_offset, val, 1); } -static void m_can_config_endisable(struct m_can_classdev *cdev, bool enable) +static int m_can_cccr_update_bits(struct m_can_classdev *cdev, u32 mask, u32 val) { - u32 cccr = m_can_read(cdev, M_CAN_CCCR); - u32 timeout = 10; - u32 val = 0; + u32 val_before = m_can_read(cdev, M_CAN_CCCR); + u32 val_after = (val_before & ~mask) | val; + size_t tries = 10; - /* Clear the Clock stop request if it was set */ - if (cccr & CCCR_CSR) - cccr &= ~CCCR_CSR; - - if (enable) { - /* enable m_can configuration */ - m_can_write(cdev, M_CAN_CCCR, cccr | CCCR_INIT); - udelay(5); - /* CCCR.CCE can only be set/reset while CCCR.INIT = '1' */ - m_can_write(cdev, M_CAN_CCCR, cccr | CCCR_INIT | CCCR_CCE); - } else { - m_can_write(cdev, M_CAN_CCCR, cccr & ~(CCCR_INIT | CCCR_CCE)); + if (!(mask & CCCR_INIT) && !(val_before & CCCR_INIT)) { + dev_err(cdev->dev, + "refusing to configure device when in normal mode\n"); + return -EBUSY; } - /* there's a delay for module initialization */ - if (enable) - val = CCCR_INIT | CCCR_CCE; + /* The chip should be in standby mode when changing the CCCR register, + * and some chips set the CSR and CSA bits when in standby. Furthermore, + * the CSR and CSA bits should be written as zeros, even when they read + * ones. + */ + val_after &= ~(CCCR_CSR | CCCR_CSA); - while ((m_can_read(cdev, M_CAN_CCCR) & (CCCR_INIT | CCCR_CCE)) != val) { - if (timeout == 0) { - netdev_warn(cdev->net, "Failed to init module\n"); - return; - } - timeout--; - udelay(1); + while (tries--) { + u32 val_read; + + /* Write the desired value in each try, as setting some bits in + * the CCCR register require other bits to be set first. E.g. + * setting the NISO bit requires setting the CCE bit first. + */ + m_can_write(cdev, M_CAN_CCCR, val_after); + + val_read = m_can_read(cdev, M_CAN_CCCR) & ~(CCCR_CSR | CCCR_CSA); + + if (val_read == val_after) + return 0; + + usleep_range(1, 5); } + + return -ETIMEDOUT; +} + +static int m_can_config_enable(struct m_can_classdev *cdev) +{ + int err; + + /* CCCR_INIT must be set in order to set CCCR_CCE, but access to + * configuration registers should only be enabled when in standby mode, + * where CCCR_INIT is always set. + */ + err = m_can_cccr_update_bits(cdev, CCCR_CCE, CCCR_CCE); + if (err) + netdev_err(cdev->net, "failed to enable configuration mode\n"); + + return err; +} + +static int m_can_config_disable(struct m_can_classdev *cdev) +{ + int err; + + /* Only clear CCCR_CCE, since CCCR_INIT cannot be cleared while in + * standby mode + */ + err = m_can_cccr_update_bits(cdev, CCCR_CCE, 0); + if (err) + netdev_err(cdev->net, "failed to disable configuration registers\n"); + + return err; } static void m_can_interrupt_enable(struct m_can_classdev *cdev, u32 interrupts) @@ -1403,7 +1437,9 @@ static int m_can_chip_config(struct net_device *dev) interrupts &= ~(IR_ARA | IR_ELO | IR_DRX | IR_TEFF | IR_TFE | IR_TCF | IR_HPM | IR_RF1F | IR_RF1W | IR_RF1N | IR_RF0F); - m_can_config_endisable(cdev, true); + err = m_can_config_enable(cdev); + if (err) + return err; /* RX Buffer/FIFO Element Size 64 bytes data field */ m_can_write(cdev, M_CAN_RXESC, @@ -1521,7 +1557,9 @@ static int m_can_chip_config(struct net_device *dev) FIELD_PREP(TSCC_TCP_MASK, 0xf) | FIELD_PREP(TSCC_TSS_MASK, TSCC_TSS_INTERNAL)); - m_can_config_endisable(cdev, false); + err = m_can_config_disable(cdev); + if (err) + return err; if (cdev->ops->init) cdev->ops->init(cdev); @@ -1550,7 +1588,11 @@ static int m_can_start(struct net_device *dev) cdev->tx_fifo_putidx = FIELD_GET(TXFQS_TFQPI_MASK, m_can_read(cdev, M_CAN_TXFQS)); - return 0; + ret = m_can_cccr_update_bits(cdev, CCCR_INIT, 0); + if (ret) + netdev_err(dev, "failed to enter normal mode\n"); + + return ret; } static int m_can_set_mode(struct net_device *dev, enum can_mode mode) @@ -1599,43 +1641,37 @@ static int m_can_check_core_release(struct m_can_classdev *cdev) } /* Selectable Non ISO support only in version 3.2.x - * This function checks if the bit is writable. + * Return 1 if the bit is writable, 0 if it is not, or negative on error. */ -static bool m_can_niso_supported(struct m_can_classdev *cdev) +static int m_can_niso_supported(struct m_can_classdev *cdev) { - u32 cccr_reg, cccr_poll = 0; - int niso_timeout = -ETIMEDOUT; - int i; + int ret, niso; - m_can_config_endisable(cdev, true); - cccr_reg = m_can_read(cdev, M_CAN_CCCR); - cccr_reg |= CCCR_NISO; - m_can_write(cdev, M_CAN_CCCR, cccr_reg); + ret = m_can_config_enable(cdev); + if (ret) + return ret; - for (i = 0; i <= 10; i++) { - cccr_poll = m_can_read(cdev, M_CAN_CCCR); - if (cccr_poll == cccr_reg) { - niso_timeout = 0; - break; - } + /* First try to set the NISO bit. */ + niso = m_can_cccr_update_bits(cdev, CCCR_NISO, CCCR_NISO); - usleep_range(1, 5); + /* Then clear the it again. */ + ret = m_can_cccr_update_bits(cdev, CCCR_NISO, 0); + if (ret) { + dev_err(cdev->dev, "failed to revert the NON-ISO bit in CCCR\n"); + return ret; } - /* Clear NISO */ - cccr_reg &= ~(CCCR_NISO); - m_can_write(cdev, M_CAN_CCCR, cccr_reg); + ret = m_can_config_disable(cdev); + if (ret) + return ret; - m_can_config_endisable(cdev, false); - - /* return false if time out (-ETIMEDOUT), else return true */ - return !niso_timeout; + return niso == 0; } static int m_can_dev_setup(struct m_can_classdev *cdev) { struct net_device *dev = cdev->net; - int m_can_version, err; + int m_can_version, err, niso; m_can_version = m_can_check_core_release(cdev); /* return if unsupported version */ @@ -1684,9 +1720,11 @@ static int m_can_dev_setup(struct m_can_classdev *cdev) cdev->can.bittiming_const = &m_can_bittiming_const_31X; cdev->can.data_bittiming_const = &m_can_data_bittiming_const_31X; - cdev->can.ctrlmode_supported |= - (m_can_niso_supported(cdev) ? - CAN_CTRLMODE_FD_NON_ISO : 0); + niso = m_can_niso_supported(cdev); + if (niso < 0) + return niso; + if (niso) + cdev->can.ctrlmode_supported |= CAN_CTRLMODE_FD_NON_ISO; break; default: dev_err(cdev->dev, "Unsupported version number: %2d", @@ -1694,21 +1732,26 @@ static int m_can_dev_setup(struct m_can_classdev *cdev) return -EINVAL; } - if (cdev->ops->init) - cdev->ops->init(cdev); - - return 0; + /* Forcing standby mode should be redundant, as the chip should be in + * standby after a reset. Write the INIT bit anyways, should the chip + * be configured by previous stage. + */ + return m_can_cccr_update_bits(cdev, CCCR_INIT, CCCR_INIT); } static void m_can_stop(struct net_device *dev) { struct m_can_classdev *cdev = netdev_priv(dev); + int ret; /* disable all interrupts */ m_can_disable_all_interrupts(cdev); /* Set init mode to disengage from the network */ - m_can_config_endisable(cdev, true); + ret = m_can_cccr_update_bits(cdev, CCCR_INIT, CCCR_INIT); + if (ret) + netdev_err(dev, "failed to enter standby mode: %pe\n", + ERR_PTR(ret)); /* set the state as STOPPED */ cdev->can.state = CAN_STATE_STOPPED; diff --git a/drivers/net/can/m_can/m_can.h b/drivers/net/can/m_can/m_can.h index 3a9edc292593..92b2bd8628e6 100644 --- a/drivers/net/can/m_can/m_can.h +++ b/drivers/net/can/m_can/m_can.h @@ -91,7 +91,7 @@ struct m_can_classdev { ktime_t irq_timer_wait; - struct m_can_ops *ops; + const struct m_can_ops *ops; int version; u32 irqstatus; diff --git a/drivers/net/can/m_can/m_can_pci.c b/drivers/net/can/m_can/m_can_pci.c index 45400de4163d..d72fe771dfc7 100644 --- a/drivers/net/can/m_can/m_can_pci.c +++ b/drivers/net/can/m_can/m_can_pci.c @@ -77,7 +77,7 @@ static int iomap_write_fifo(struct m_can_classdev *cdev, int offset, return 0; } -static struct m_can_ops m_can_pci_ops = { +static const struct m_can_ops m_can_pci_ops = { .read_reg = iomap_read_reg, .write_reg = iomap_write_reg, .write_fifo = iomap_write_fifo, diff --git a/drivers/net/can/m_can/m_can_platform.c b/drivers/net/can/m_can/m_can_platform.c index df0367124b4c..983ab80260dd 100644 --- a/drivers/net/can/m_can/m_can_platform.c +++ b/drivers/net/can/m_can/m_can_platform.c @@ -68,7 +68,7 @@ static int iomap_write_fifo(struct m_can_classdev *cdev, int offset, return 0; } -static struct m_can_ops m_can_plat_ops = { +static const struct m_can_ops m_can_plat_ops = { .read_reg = iomap_read_reg, .write_reg = iomap_write_reg, .write_fifo = iomap_write_fifo, diff --git a/drivers/net/can/m_can/tcan4x5x-core.c b/drivers/net/can/m_can/tcan4x5x-core.c index a42600dac70d..2f73bf3abad8 100644 --- a/drivers/net/can/m_can/tcan4x5x-core.c +++ b/drivers/net/can/m_can/tcan4x5x-core.c @@ -357,7 +357,7 @@ static int tcan4x5x_get_gpios(struct m_can_classdev *cdev, return 0; } -static struct m_can_ops tcan4x5x_ops = { +static const struct m_can_ops tcan4x5x_ops = { .init = tcan4x5x_init, .read_reg = tcan4x5x_read_reg, .write_reg = tcan4x5x_write_reg, @@ -453,10 +453,17 @@ static int tcan4x5x_can_probe(struct spi_device *spi) goto out_power; } - ret = tcan4x5x_init(mcan_class); + tcan4x5x_check_wake(priv); + + ret = tcan4x5x_write_tcan_reg(mcan_class, TCAN4X5X_INT_EN, 0); if (ret) { - dev_err(&spi->dev, "tcan initialization failed %pe\n", - ERR_PTR(ret)); + dev_err(&spi->dev, "Disabling interrupts failed %pe\n", ERR_PTR(ret)); + goto out_power; + } + + ret = tcan4x5x_clear_interrupts(mcan_class); + if (ret) { + dev_err(&spi->dev, "Clearing interrupts failed %pe\n", ERR_PTR(ret)); goto out_power; } diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c index a6829cdc0e81..8c2a7bc64d3d 100644 --- a/drivers/net/can/mscan/mscan.c +++ b/drivers/net/can/mscan/mscan.c @@ -34,12 +34,6 @@ static const struct can_bittiming_const mscan_bittiming_const = { .brp_inc = 1, }; -struct mscan_state { - u8 mode; - u8 canrier; - u8 cantier; -}; - static enum can_state state_map[] = { CAN_STATE_ERROR_ACTIVE, CAN_STATE_ERROR_WARNING, diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c index 31c9c127e24b..b50005397463 100644 --- a/drivers/net/can/peak_canfd/peak_canfd.c +++ b/drivers/net/can/peak_canfd/peak_canfd.c @@ -777,7 +777,7 @@ static const struct net_device_ops peak_canfd_netdev_ops = { }; static int peak_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c index b82842718735..c919668bbe7a 100644 --- a/drivers/net/can/rcar/rcar_canfd.c +++ b/drivers/net/can/rcar/rcar_canfd.c @@ -508,12 +508,6 @@ */ #define RCANFD_CFFIFO_IDX 0 -/* fCAN clock select register settings */ -enum rcar_canfd_fcanclk { - RCANFD_CANFDCLK = 0, /* CANFD clock */ - RCANFD_EXTCLK, /* Externally input clock */ -}; - struct rcar_canfd_global; struct rcar_canfd_hw_info { @@ -545,8 +539,8 @@ struct rcar_canfd_global { struct platform_device *pdev; /* Respective platform device */ struct clk *clkp; /* Peripheral clock */ struct clk *can_clk; /* fCAN clock */ - enum rcar_canfd_fcanclk fcan; /* CANFD or Ext clock */ unsigned long channels_mask; /* Enabled channels mask */ + bool extclk; /* CANFD or Ext clock */ bool fdmode; /* CAN FD or Classical CAN only mode */ struct reset_control *rstc1; struct reset_control *rstc2; @@ -633,28 +627,28 @@ static inline void rcar_canfd_update(u32 mask, u32 val, u32 __iomem *reg) static inline u32 rcar_canfd_read(void __iomem *base, u32 offset) { - return readl(base + (offset)); + return readl(base + offset); } static inline void rcar_canfd_write(void __iomem *base, u32 offset, u32 val) { - writel(val, base + (offset)); + writel(val, base + offset); } static void rcar_canfd_set_bit(void __iomem *base, u32 reg, u32 val) { - rcar_canfd_update(val, val, base + (reg)); + rcar_canfd_update(val, val, base + reg); } static void rcar_canfd_clear_bit(void __iomem *base, u32 reg, u32 val) { - rcar_canfd_update(val, 0, base + (reg)); + rcar_canfd_update(val, 0, base + reg); } static void rcar_canfd_update_bit(void __iomem *base, u32 reg, u32 mask, u32 val) { - rcar_canfd_update(mask, val, base + (reg)); + rcar_canfd_update(mask, val, base + reg); } static void rcar_canfd_get_data(struct rcar_canfd_channel *priv, @@ -665,7 +659,7 @@ static void rcar_canfd_get_data(struct rcar_canfd_channel *priv, lwords = DIV_ROUND_UP(cf->len, sizeof(u32)); for (i = 0; i < lwords; i++) *((u32 *)cf->data + i) = - rcar_canfd_read(priv->base, off + (i * sizeof(u32))); + rcar_canfd_read(priv->base, off + i * sizeof(u32)); } static void rcar_canfd_put_data(struct rcar_canfd_channel *priv, @@ -675,7 +669,7 @@ static void rcar_canfd_put_data(struct rcar_canfd_channel *priv, lwords = DIV_ROUND_UP(cf->len, sizeof(u32)); for (i = 0; i < lwords; i++) - rcar_canfd_write(priv->base, off + (i * sizeof(u32)), + rcar_canfd_write(priv->base, off + i * sizeof(u32), *((u32 *)cf->data + i)); } @@ -777,7 +771,7 @@ static void rcar_canfd_configure_controller(struct rcar_canfd_global *gpriv) cfg |= RCANFD_GCFG_CMPOC; /* Set External Clock if selected */ - if (gpriv->fcan != RCANFD_CANFDCLK) + if (gpriv->extclk) cfg |= RCANFD_GCFG_DCS; rcar_canfd_set_bit(gpriv->base, RCANFD_GCFG, cfg); @@ -1941,16 +1935,12 @@ static int rcar_canfd_probe(struct platform_device *pdev) return dev_err_probe(dev, PTR_ERR(gpriv->can_clk), "cannot get canfd clock\n"); - gpriv->fcan = RCANFD_CANFDCLK; - + /* CANFD clock may be further divided within the IP */ + fcan_freq = clk_get_rate(gpriv->can_clk) / info->postdiv; } else { - gpriv->fcan = RCANFD_EXTCLK; + fcan_freq = clk_get_rate(gpriv->can_clk); + gpriv->extclk = true; } - fcan_freq = clk_get_rate(gpriv->can_clk); - - if (gpriv->fcan == RCANFD_CANFDCLK) - /* CANFD clock is further divided by (1/2) within the IP */ - fcan_freq /= info->postdiv; addr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(addr)) { @@ -2059,8 +2049,9 @@ static int rcar_canfd_probe(struct platform_device *pdev) } platform_set_drvdata(pdev, gpriv); - dev_info(dev, "global operational state (clk %d, fdmode %d)\n", - gpriv->fcan, gpriv->fdmode); + dev_info(dev, "global operational state (%s clk, %s mode)\n", + gpriv->extclk ? "ext" : "canfd", + gpriv->fdmode ? "fd" : "classical"); return 0; fail_channel: diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c index 5de1ebb0c6f0..67e5316c6372 100644 --- a/drivers/net/can/sja1000/plx_pci.c +++ b/drivers/net/can/sja1000/plx_pci.c @@ -122,7 +122,6 @@ struct plx_pci_card { #define TEWS_PCI_VENDOR_ID 0x1498 #define TEWS_PCI_DEVICE_ID_TMPC810 0x032A -#define CTI_PCI_VENDOR_ID 0x12c4 #define CTI_PCI_DEVICE_ID_CRG001 0x0900 #define MOXA_PCI_VENDOR_ID 0x1393 @@ -358,7 +357,7 @@ static const struct pci_device_id plx_pci_tbl[] = { { /* Connect Tech Inc. CANpro/104-Plus Opto (CRG001) card */ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, - CTI_PCI_VENDOR_ID, CTI_PCI_DEVICE_ID_CRG001, + PCI_SUBVENDOR_ID_CONNECT_TECH, CTI_PCI_DEVICE_ID_CRG001, 0, 0, (kernel_ulong_t)&plx_pci_card_info_cti }, diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c index e1b8533a602e..148d974ebb21 100644 --- a/drivers/net/can/spi/hi311x.c +++ b/drivers/net/can/spi/hi311x.c @@ -830,7 +830,6 @@ static int hi3110_can_probe(struct spi_device *spi) struct device *dev = &spi->dev; struct net_device *net; struct hi3110_priv *priv; - const void *match; struct clk *clk; u32 freq; int ret; @@ -874,11 +873,7 @@ static int hi3110_can_probe(struct spi_device *spi) CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING; - match = device_get_match_data(dev); - if (match) - priv->model = (enum hi3110_model)(uintptr_t)match; - else - priv->model = spi_get_device_id(spi)->driver_data; + priv->model = (enum hi3110_model)(uintptr_t)spi_get_device_match_data(spi); priv->net = net; priv->clk = clk; diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c index 79c4bab5f724..3b8736ff0345 100644 --- a/drivers/net/can/spi/mcp251x.c +++ b/drivers/net/can/spi/mcp251x.c @@ -28,7 +28,6 @@ #include #include #include -#include #include #include #include @@ -482,9 +481,9 @@ static int mcp251x_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) { if (mcp251x_gpio_is_input(offset)) - return GPIOF_DIR_IN; + return GPIO_LINE_DIRECTION_IN; - return GPIOF_DIR_OUT; + return GPIO_LINE_DIRECTION_OUT; } static int mcp251x_gpio_get(struct gpio_chip *chip, unsigned int offset) @@ -1301,7 +1300,6 @@ MODULE_DEVICE_TABLE(spi, mcp251x_id_table); static int mcp251x_can_probe(struct spi_device *spi) { - const void *match = device_get_match_data(&spi->dev); struct net_device *net; struct mcp251x_priv *priv; struct clk *clk; @@ -1339,10 +1337,7 @@ static int mcp251x_can_probe(struct spi_device *spi) priv->can.clock.freq = freq / 2; priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY; - if (match) - priv->model = (enum mcp251x_model)(uintptr_t)match; - else - priv->model = spi_get_device_id(spi)->driver_data; + priv->model = (enum mcp251x_model)(uintptr_t)spi_get_device_match_data(spi); priv->net = net; priv->clk = clk; diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c index bf1589aef1fc..3e7526274e34 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c @@ -2,7 +2,7 @@ // // mcp251xfd - Microchip MCP251xFD Family CAN controller driver // -// Copyright (c) 2019, 2020, 2021 Pengutronix, +// Copyright (c) 2019, 2020, 2021, 2023 Pengutronix, // Marc Kleine-Budde // // Based on: @@ -744,6 +744,7 @@ static void mcp251xfd_chip_stop(struct mcp251xfd_priv *priv, mcp251xfd_chip_interrupts_disable(priv); mcp251xfd_chip_rx_int_disable(priv); + mcp251xfd_timestamp_stop(priv); mcp251xfd_chip_sleep(priv); } @@ -763,6 +764,8 @@ static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv) if (err) goto out_chip_stop; + mcp251xfd_timestamp_start(priv); + err = mcp251xfd_set_bittiming(priv); if (err) goto out_chip_stop; @@ -791,7 +794,7 @@ static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv) return 0; - out_chip_stop: +out_chip_stop: mcp251xfd_dump(priv); mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED); @@ -867,18 +870,18 @@ static int mcp251xfd_get_berr_counter(const struct net_device *ndev, static struct sk_buff * mcp251xfd_alloc_can_err_skb(struct mcp251xfd_priv *priv, - struct can_frame **cf, u32 *timestamp) + struct can_frame **cf, u32 *ts_raw) { struct sk_buff *skb; int err; - err = mcp251xfd_get_timestamp(priv, timestamp); + err = mcp251xfd_get_timestamp_raw(priv, ts_raw); if (err) return NULL; skb = alloc_can_err_skb(priv->ndev, cf); if (skb) - mcp251xfd_skb_set_timestamp(priv, skb, *timestamp); + mcp251xfd_skb_set_timestamp_raw(priv, skb, *ts_raw); return skb; } @@ -889,7 +892,7 @@ static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv) struct mcp251xfd_rx_ring *ring; struct sk_buff *skb; struct can_frame *cf; - u32 timestamp, rxovif; + u32 ts_raw, rxovif; int err, i; stats->rx_over_errors++; @@ -924,14 +927,14 @@ static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv) return err; } - skb = mcp251xfd_alloc_can_err_skb(priv, &cf, ×tamp); + skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &ts_raw); if (!skb) return 0; cf->can_id |= CAN_ERR_CRTL; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; - err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); + err = can_rx_offload_queue_timestamp(&priv->offload, skb, ts_raw); if (err) stats->rx_fifo_errors++; @@ -948,12 +951,12 @@ static int mcp251xfd_handle_txatif(struct mcp251xfd_priv *priv) static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv) { struct net_device_stats *stats = &priv->ndev->stats; - u32 bdiag1, timestamp; + u32 bdiag1, ts_raw; struct sk_buff *skb; struct can_frame *cf = NULL; int err; - err = mcp251xfd_get_timestamp(priv, ×tamp); + err = mcp251xfd_get_timestamp_raw(priv, &ts_raw); if (err) return err; @@ -1035,8 +1038,8 @@ static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv) if (!cf) return 0; - mcp251xfd_skb_set_timestamp(priv, skb, timestamp); - err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); + mcp251xfd_skb_set_timestamp_raw(priv, skb, ts_raw); + err = can_rx_offload_queue_timestamp(&priv->offload, skb, ts_raw); if (err) stats->rx_fifo_errors++; @@ -1049,7 +1052,7 @@ static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv) struct sk_buff *skb; struct can_frame *cf = NULL; enum can_state new_state, rx_state, tx_state; - u32 trec, timestamp; + u32 trec, ts_raw; int err; err = regmap_read(priv->map_reg, MCP251XFD_REG_TREC, &trec); @@ -1079,7 +1082,7 @@ static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv) /* The skb allocation might fail, but can_change_state() * handles cf == NULL. */ - skb = mcp251xfd_alloc_can_err_skb(priv, &cf, ×tamp); + skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &ts_raw); can_change_state(priv->ndev, cf, tx_state, rx_state); if (new_state == CAN_STATE_BUS_OFF) { @@ -1110,7 +1113,7 @@ static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv) cf->data[7] = bec.rxerr; } - err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); + err = can_rx_offload_queue_timestamp(&priv->offload, skb, ts_raw); if (err) stats->rx_fifo_errors++; @@ -1135,7 +1138,7 @@ mcp251xfd_handle_modif(const struct mcp251xfd_priv *priv, bool *set_normal_mode) return 0; } - /* According to MCP2517FD errata DS80000792B 1., during a TX + /* According to MCP2517FD errata DS80000792C 1., during a TX * MAB underflow, the controller will transition to Restricted * Operation Mode or Listen Only Mode (depending on SERR2LOM). * @@ -1180,7 +1183,7 @@ static int mcp251xfd_handle_serrif(struct mcp251xfd_priv *priv) /* TX MAB underflow * - * According to MCP2517FD Errata DS80000792B 1. a TX MAB + * According to MCP2517FD Errata DS80000792C 1. a TX MAB * underflow is indicated by SERRIF and MODIF. * * In addition to the effects mentioned in the Errata, there @@ -1224,7 +1227,7 @@ static int mcp251xfd_handle_serrif(struct mcp251xfd_priv *priv) /* RX MAB overflow * - * According to MCP2517FD Errata DS80000792B 1. a RX MAB + * According to MCP2517FD Errata DS80000792C 1. a RX MAB * overflow is indicated by SERRIF. * * In addition to the effects mentioned in the Errata, (most @@ -1331,7 +1334,8 @@ mcp251xfd_handle_eccif(struct mcp251xfd_priv *priv, bool set_normal_mode) return err; /* Errata Reference: - * mcp2517fd: DS80000789B, mcp2518fd: DS80000792C 2. + * mcp2517fd: DS80000789C 3., mcp2518fd: DS80000792E 2., + * mcp251863: DS80000984A 2. * * ECC single error correction does not work in all cases: * @@ -1576,7 +1580,7 @@ static irqreturn_t mcp251xfd_irq(int irq, void *dev_id) handled = IRQ_HANDLED; } while (1); - out_fail: +out_fail: can_rx_offload_threaded_irq_finish(&priv->offload); netdev_err(priv->ndev, "IRQ handler returned %d (intf=0x%08x).\n", @@ -1610,11 +1614,12 @@ static int mcp251xfd_open(struct net_device *ndev) if (err) goto out_mcp251xfd_ring_free; + mcp251xfd_timestamp_init(priv); + err = mcp251xfd_chip_start(priv); if (err) goto out_transceiver_disable; - mcp251xfd_timestamp_init(priv); clear_bit(MCP251XFD_FLAGS_DOWN, priv->flags); can_rx_offload_enable(&priv->offload); @@ -1641,22 +1646,21 @@ static int mcp251xfd_open(struct net_device *ndev) return 0; - out_free_irq: +out_free_irq: free_irq(spi->irq, priv); - out_destroy_workqueue: +out_destroy_workqueue: destroy_workqueue(priv->wq); - out_can_rx_offload_disable: +out_can_rx_offload_disable: can_rx_offload_disable(&priv->offload); set_bit(MCP251XFD_FLAGS_DOWN, priv->flags); - mcp251xfd_timestamp_stop(priv); - out_transceiver_disable: +out_transceiver_disable: mcp251xfd_transceiver_disable(priv); - out_mcp251xfd_ring_free: +out_mcp251xfd_ring_free: mcp251xfd_ring_free(priv); - out_pm_runtime_put: +out_pm_runtime_put: mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED); pm_runtime_put(ndev->dev.parent); - out_close_candev: +out_close_candev: close_candev(ndev); return err; @@ -1674,7 +1678,6 @@ static int mcp251xfd_stop(struct net_device *ndev) free_irq(ndev->irq, priv); destroy_workqueue(priv->wq); can_rx_offload_disable(&priv->offload); - mcp251xfd_timestamp_stop(priv); mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED); mcp251xfd_transceiver_disable(priv); mcp251xfd_ring_free(priv); @@ -1820,9 +1823,9 @@ mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv, u32 *dev_id, *effective_speed_hz_slow = xfer[0].effective_speed_hz; *effective_speed_hz_fast = xfer[1].effective_speed_hz; - out_kfree_buf_tx: +out_kfree_buf_tx: kfree(buf_tx); - out_kfree_buf_rx: +out_kfree_buf_rx: kfree(buf_rx); return err; @@ -1936,13 +1939,13 @@ static int mcp251xfd_register(struct mcp251xfd_priv *priv) return 0; - out_unregister_candev: +out_unregister_candev: unregister_candev(ndev); - out_chip_sleep: +out_chip_sleep: mcp251xfd_chip_sleep(priv); - out_runtime_disable: +out_runtime_disable: pm_runtime_disable(ndev->dev.parent); - out_runtime_put_noidle: +out_runtime_put_noidle: pm_runtime_put_noidle(ndev->dev.parent); mcp251xfd_clks_and_vdd_disable(priv); @@ -2001,7 +2004,6 @@ MODULE_DEVICE_TABLE(spi, mcp251xfd_id_table); static int mcp251xfd_probe(struct spi_device *spi) { - const void *match; struct net_device *ndev; struct mcp251xfd_priv *priv; struct gpio_desc *rx_int; @@ -2093,16 +2095,11 @@ static int mcp251xfd_probe(struct spi_device *spi) priv->pll_enable = pll_enable; priv->reg_vdd = reg_vdd; priv->reg_xceiver = reg_xceiver; - - match = device_get_match_data(&spi->dev); - if (match) - priv->devtype_data = *(struct mcp251xfd_devtype_data *)match; - else - priv->devtype_data = *(struct mcp251xfd_devtype_data *) - spi_get_device_id(spi)->driver_data; + priv->devtype_data = *(struct mcp251xfd_devtype_data *)spi_get_device_match_data(spi); /* Errata Reference: - * mcp2517fd: DS80000792C 5., mcp2518fd: DS80000789C 4. + * mcp2517fd: DS80000792C 5., mcp2518fd: DS80000789E 4., + * mcp251863: DS80000984A 4. * * The SPI can write corrupted data to the RAM at fast SPI * speeds: @@ -2162,9 +2159,9 @@ static int mcp251xfd_probe(struct spi_device *spi) return 0; - out_can_rx_offload_del: +out_can_rx_offload_del: can_rx_offload_del(&priv->offload); - out_free_candev: +out_free_candev: spi->max_speed_hz = priv->spi_max_speed_hz_orig; free_candev(ndev); diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c index 004eaf96262b..050321345304 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c @@ -94,7 +94,7 @@ static void mcp251xfd_dump_registers(const struct mcp251xfd_priv *priv, kfree(buf); } - out: +out: mcp251xfd_dump_header(iter, MCP251XFD_DUMP_OBJECT_TYPE_REG, reg); } diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c index 92b7bc7f14b9..65150e762007 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c @@ -397,7 +397,7 @@ mcp251xfd_regmap_crc_read(void *context, return err; } - out: +out: memcpy(val_buf, buf_rx->data, val_len); return 0; diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c index bfe4caa0c99d..7bd2bcb5cf87 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c @@ -206,6 +206,7 @@ mcp251xfd_ring_init_rx(struct mcp251xfd_priv *priv, u16 *base, u8 *fifo_nr) int i, j; mcp251xfd_for_each_rx_ring(priv, rx_ring, i) { + rx_ring->last_valid = timecounter_read(&priv->tc); rx_ring->head = 0; rx_ring->tail = 0; rx_ring->base = *base; @@ -485,6 +486,8 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv) clear_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags); } + tx_ring->obj_num_shift_to_u8 = BITS_PER_TYPE(tx_ring->obj_num) - + ilog2(tx_ring->obj_num); tx_ring->obj_size = tx_obj_size; rem = priv->rx_obj_num; @@ -507,6 +510,8 @@ int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv) } rx_ring->obj_num = rx_obj_num; + rx_ring->obj_num_shift_to_u8 = BITS_PER_TYPE(rx_ring->obj_num_shift_to_u8) - + ilog2(rx_obj_num); rx_ring->obj_size = rx_obj_size; priv->rx[i] = rx_ring; } diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c index ced8d9c81f8c..fe897f3e4c12 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c @@ -2,7 +2,7 @@ // // mcp251xfd - Microchip MCP251xFD Family CAN controller driver // -// Copyright (c) 2019, 2020, 2021 Pengutronix, +// Copyright (c) 2019, 2020, 2021, 2023 Pengutronix, // Marc Kleine-Budde // // Based on: @@ -16,23 +16,14 @@ #include "mcp251xfd.h" -static inline int -mcp251xfd_rx_head_get_from_chip(const struct mcp251xfd_priv *priv, - const struct mcp251xfd_rx_ring *ring, - u8 *rx_head, bool *fifo_empty) +static inline bool mcp251xfd_rx_fifo_sta_empty(const u32 fifo_sta) { - u32 fifo_sta; - int err; + return !(fifo_sta & MCP251XFD_REG_FIFOSTA_TFNRFNIF); +} - err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr), - &fifo_sta); - if (err) - return err; - - *rx_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta); - *fifo_empty = !(fifo_sta & MCP251XFD_REG_FIFOSTA_TFNRFNIF); - - return 0; +static inline bool mcp251xfd_rx_fifo_sta_full(const u32 fifo_sta) +{ + return fifo_sta & MCP251XFD_REG_FIFOSTA_TFERFFIF; } static inline int @@ -80,29 +71,49 @@ mcp251xfd_check_rx_tail(const struct mcp251xfd_priv *priv, } static int -mcp251xfd_rx_ring_update(const struct mcp251xfd_priv *priv, - struct mcp251xfd_rx_ring *ring) +mcp251xfd_get_rx_len(const struct mcp251xfd_priv *priv, + const struct mcp251xfd_rx_ring *ring, + u8 *len_p) { - u32 new_head; - u8 chip_rx_head; - bool fifo_empty; + const u8 shift = ring->obj_num_shift_to_u8; + u8 chip_head, tail, len; + u32 fifo_sta; int err; - err = mcp251xfd_rx_head_get_from_chip(priv, ring, &chip_rx_head, - &fifo_empty); - if (err || fifo_empty) + err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr), + &fifo_sta); + if (err) return err; - /* chip_rx_head, is the next RX-Object filled by the HW. - * The new RX head must be >= the old head. + if (mcp251xfd_rx_fifo_sta_empty(fifo_sta)) { + *len_p = 0; + return 0; + } + + if (mcp251xfd_rx_fifo_sta_full(fifo_sta)) { + *len_p = ring->obj_num; + return 0; + } + + chip_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta); + + err = mcp251xfd_check_rx_tail(priv, ring); + if (err) + return err; + tail = mcp251xfd_get_rx_tail(ring); + + /* First shift to full u8. The subtraction works on signed + * values, that keeps the difference steady around the u8 + * overflow. The right shift acts on len, which is an u8. */ - new_head = round_down(ring->head, ring->obj_num) + chip_rx_head; - if (new_head <= ring->head) - new_head += ring->obj_num; + BUILD_BUG_ON(sizeof(ring->obj_num) != sizeof(chip_head)); + BUILD_BUG_ON(sizeof(ring->obj_num) != sizeof(tail)); + BUILD_BUG_ON(sizeof(ring->obj_num) != sizeof(len)); - ring->head = new_head; + len = (chip_head << shift) - (tail << shift); + *len_p = len >> shift; - return mcp251xfd_check_rx_tail(priv, ring); + return 0; } static void @@ -148,8 +159,6 @@ mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv, if (!(hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR)) memcpy(cfd->data, hw_rx_obj->data, cfd->len); - - mcp251xfd_skb_set_timestamp(priv, skb, hw_rx_obj->ts); } static int @@ -160,8 +169,26 @@ mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv, struct net_device_stats *stats = &priv->ndev->stats; struct sk_buff *skb; struct canfd_frame *cfd; + u64 timestamp; int err; + /* According to mcp2518fd erratum DS80000789E 6. the FIFOCI + * bits of a FIFOSTA register, here the RX FIFO head index + * might be corrupted and we might process past the RX FIFO's + * head into old CAN frames. + * + * Compare the timestamp of currently processed CAN frame with + * last valid frame received. Abort with -EBADMSG if an old + * CAN frame is detected. + */ + timestamp = timecounter_cyc2time(&priv->tc, hw_rx_obj->ts); + if (timestamp <= ring->last_valid) { + stats->rx_fifo_errors++; + + return -EBADMSG; + } + ring->last_valid = timestamp; + if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF) skb = alloc_canfd_skb(priv->ndev, &cfd); else @@ -172,6 +199,7 @@ mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv, return 0; } + mcp251xfd_skb_set_timestamp(skb, timestamp); mcp251xfd_hw_rx_obj_to_skb(priv, hw_rx_obj, skb); err = can_rx_offload_queue_timestamp(&priv->offload, skb, hw_rx_obj->ts); if (err) @@ -197,52 +225,81 @@ mcp251xfd_rx_obj_read(const struct mcp251xfd_priv *priv, return err; } +static int +mcp251xfd_handle_rxif_ring_uinc(const struct mcp251xfd_priv *priv, + struct mcp251xfd_rx_ring *ring, + u8 len) +{ + int offset; + int err; + + if (!len) + return 0; + + ring->head += len; + + /* Increment the RX FIFO tail pointer 'len' times in a + * single SPI message. + * + * Note: + * Calculate offset, so that the SPI transfer ends on + * the last message of the uinc_xfer array, which has + * "cs_change == 0", to properly deactivate the chip + * select. + */ + offset = ARRAY_SIZE(ring->uinc_xfer) - len; + err = spi_sync_transfer(priv->spi, + ring->uinc_xfer + offset, len); + if (err) + return err; + + ring->tail += len; + + return 0; +} + static int mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv, struct mcp251xfd_rx_ring *ring) { struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj = ring->obj; - u8 rx_tail, len; + u8 rx_tail, len, l; int err, i; - err = mcp251xfd_rx_ring_update(priv, ring); + err = mcp251xfd_get_rx_len(priv, ring, &len); if (err) return err; - while ((len = mcp251xfd_get_rx_linear_len(ring))) { - int offset; - + while ((l = mcp251xfd_get_rx_linear_len(ring, len))) { rx_tail = mcp251xfd_get_rx_tail(ring); err = mcp251xfd_rx_obj_read(priv, ring, hw_rx_obj, - rx_tail, len); + rx_tail, l); if (err) return err; - for (i = 0; i < len; i++) { + for (i = 0; i < l; i++) { err = mcp251xfd_handle_rxif_one(priv, ring, (void *)hw_rx_obj + i * ring->obj_size); - if (err) + + /* -EBADMSG means we're affected by mcp2518fd + * erratum DS80000789E 6., i.e. the timestamp + * in the RX object is older that the last + * valid received CAN frame. Don't process any + * further and mark processed frames as good. + */ + if (err == -EBADMSG) + return mcp251xfd_handle_rxif_ring_uinc(priv, ring, i); + else if (err) return err; } - /* Increment the RX FIFO tail pointer 'len' times in a - * single SPI message. - * - * Note: - * Calculate offset, so that the SPI transfer ends on - * the last message of the uinc_xfer array, which has - * "cs_change == 0", to properly deactivate the chip - * select. - */ - offset = ARRAY_SIZE(ring->uinc_xfer) - len; - err = spi_sync_transfer(priv->spi, - ring->uinc_xfer + offset, len); + err = mcp251xfd_handle_rxif_ring_uinc(priv, ring, l); if (err) return err; - ring->tail += len; + len -= l; } return 0; diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c index e5bd57b65aaf..f732556d233a 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c @@ -2,7 +2,7 @@ // // mcp251xfd - Microchip MCP251xFD Family CAN controller driver // -// Copyright (c) 2019, 2020, 2021 Pengutronix, +// Copyright (c) 2019, 2020, 2021, 2023 Pengutronix, // Marc Kleine-Budde // // Based on: @@ -16,6 +16,11 @@ #include "mcp251xfd.h" +static inline bool mcp251xfd_tx_fifo_sta_full(u32 fifo_sta) +{ + return !(fifo_sta & MCP251XFD_REG_FIFOSTA_TFNRFNIF); +} + static inline int mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv *priv, u8 *tef_tail) @@ -55,61 +60,44 @@ static int mcp251xfd_check_tef_tail(const struct mcp251xfd_priv *priv) return 0; } -static int -mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq) -{ - const struct mcp251xfd_tx_ring *tx_ring = priv->tx; - u32 tef_sta; - int err; - - err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFSTA, &tef_sta); - if (err) - return err; - - if (tef_sta & MCP251XFD_REG_TEFSTA_TEFOVIF) { - netdev_err(priv->ndev, - "Transmit Event FIFO buffer overflow.\n"); - return -ENOBUFS; - } - - netdev_info(priv->ndev, - "Transmit Event FIFO buffer %s. (seq=0x%08x, tef_tail=0x%08x, tef_head=0x%08x, tx_head=0x%08x).\n", - tef_sta & MCP251XFD_REG_TEFSTA_TEFFIF ? - "full" : tef_sta & MCP251XFD_REG_TEFSTA_TEFNEIF ? - "not empty" : "empty", - seq, priv->tef->tail, priv->tef->head, tx_ring->head); - - /* The Sequence Number in the TEF doesn't match our tef_tail. */ - return -EAGAIN; -} - static int mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv, const struct mcp251xfd_hw_tef_obj *hw_tef_obj, unsigned int *frame_len_ptr) { struct net_device_stats *stats = &priv->ndev->stats; + u32 seq, tef_tail_masked, tef_tail; struct sk_buff *skb; - u32 seq, seq_masked, tef_tail_masked, tef_tail; - seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, + /* Use the MCP2517FD mask on the MCP2518FD, too. We only + * compare 7 bits, this is enough to detect old TEF objects. + */ + seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK, hw_tef_obj->flags); - - /* Use the MCP2517FD mask on the MCP2518FD, too. We only - * compare 7 bits, this should be enough to detect - * net-yet-completed, i.e. old TEF objects. - */ - seq_masked = seq & - field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK); tef_tail_masked = priv->tef->tail & field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK); - if (seq_masked != tef_tail_masked) - return mcp251xfd_handle_tefif_recover(priv, seq); + + /* According to mcp2518fd erratum DS80000789E 6. the FIFOCI + * bits of a FIFOSTA register, here the TX FIFO tail index + * might be corrupted and we might process past the TEF FIFO's + * head into old CAN frames. + * + * Compare the sequence number of the currently processed CAN + * frame with the expected sequence number. Abort with + * -EBADMSG if an old CAN frame is detected. + */ + if (seq != tef_tail_masked) { + netdev_dbg(priv->ndev, "%s: chip=0x%02x ring=0x%02x\n", __func__, + seq, tef_tail_masked); + stats->tx_fifo_errors++; + + return -EBADMSG; + } tef_tail = mcp251xfd_get_tef_tail(priv); skb = priv->can.echo_skb[tef_tail]; if (skb) - mcp251xfd_skb_set_timestamp(priv, skb, hw_tef_obj->ts); + mcp251xfd_skb_set_timestamp_raw(priv, skb, hw_tef_obj->ts); stats->tx_bytes += can_rx_offload_get_echo_skb_queue_timestamp(&priv->offload, tef_tail, hw_tef_obj->ts, @@ -120,28 +108,44 @@ mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv, return 0; } -static int mcp251xfd_tef_ring_update(struct mcp251xfd_priv *priv) +static int +mcp251xfd_get_tef_len(struct mcp251xfd_priv *priv, u8 *len_p) { const struct mcp251xfd_tx_ring *tx_ring = priv->tx; - unsigned int new_head; - u8 chip_tx_tail; + const u8 shift = tx_ring->obj_num_shift_to_u8; + u8 chip_tx_tail, tail, len; + u32 fifo_sta; int err; - err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail); + err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(priv->tx->fifo_nr), + &fifo_sta); if (err) return err; - /* chip_tx_tail, is the next TX-Object send by the HW. - * The new TEF head must be >= the old head, ... + if (mcp251xfd_tx_fifo_sta_full(fifo_sta)) { + *len_p = tx_ring->obj_num; + return 0; + } + + chip_tx_tail = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta); + + err = mcp251xfd_check_tef_tail(priv); + if (err) + return err; + tail = mcp251xfd_get_tef_tail(priv); + + /* First shift to full u8. The subtraction works on signed + * values, that keeps the difference steady around the u8 + * overflow. The right shift acts on len, which is an u8. */ - new_head = round_down(priv->tef->head, tx_ring->obj_num) + chip_tx_tail; - if (new_head <= priv->tef->head) - new_head += tx_ring->obj_num; + BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(chip_tx_tail)); + BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(tail)); + BUILD_BUG_ON(sizeof(tx_ring->obj_num) != sizeof(len)); - /* ... but it cannot exceed the TX head. */ - priv->tef->head = min(new_head, tx_ring->head); + len = (chip_tx_tail << shift) - (tail << shift); + *len_p = len >> shift; - return mcp251xfd_check_tef_tail(priv); + return 0; } static inline int @@ -182,13 +186,12 @@ int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv) u8 tef_tail, len, l; int err, i; - err = mcp251xfd_tef_ring_update(priv); + err = mcp251xfd_get_tef_len(priv, &len); if (err) return err; tef_tail = mcp251xfd_get_tef_tail(priv); - len = mcp251xfd_get_tef_len(priv); - l = mcp251xfd_get_tef_linear_len(priv); + l = mcp251xfd_get_tef_linear_len(priv, len); err = mcp251xfd_tef_obj_read(priv, hw_tef_obj, tef_tail, l); if (err) return err; @@ -203,12 +206,12 @@ int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv) unsigned int frame_len = 0; err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i], &frame_len); - /* -EAGAIN means the Sequence Number in the TEF - * doesn't match our tef_tail. This can happen if we - * read the TEF objects too early. Leave loop let the - * interrupt handler call us again. + /* -EBADMSG means we're affected by mcp2518fd erratum + * DS80000789E 6., i.e. the Sequence Number in the TEF + * doesn't match our tef_tail. Don't process any + * further and mark processed frames as good. */ - if (err == -EAGAIN) + if (err == -EBADMSG) goto out_netif_wake_queue; if (err) return err; @@ -216,13 +219,15 @@ int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv) total_frame_len += frame_len; } - out_netif_wake_queue: +out_netif_wake_queue: len = i; /* number of handled goods TEFs */ if (len) { struct mcp251xfd_tef_ring *ring = priv->tef; struct mcp251xfd_tx_ring *tx_ring = priv->tx; int offset; + ring->head += len; + /* Increment the TEF FIFO tail pointer 'len' times in * a single SPI message. * diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c index 712e09186987..202ca0d24d03 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c @@ -2,7 +2,7 @@ // // mcp251xfd - Microchip MCP251xFD Family CAN controller driver // -// Copyright (c) 2021 Pengutronix, +// Copyright (c) 2021, 2023 Pengutronix, // Marc Kleine-Budde // @@ -11,20 +11,20 @@ #include "mcp251xfd.h" -static u64 mcp251xfd_timestamp_read(const struct cyclecounter *cc) +static u64 mcp251xfd_timestamp_raw_read(const struct cyclecounter *cc) { const struct mcp251xfd_priv *priv; - u32 timestamp = 0; + u32 ts_raw = 0; int err; priv = container_of(cc, struct mcp251xfd_priv, cc); - err = mcp251xfd_get_timestamp(priv, ×tamp); + err = mcp251xfd_get_timestamp_raw(priv, &ts_raw); if (err) netdev_err(priv->ndev, "Error %d while reading timestamp. HW timestamps may be inaccurate.", err); - return timestamp; + return ts_raw; } static void mcp251xfd_timestamp_work(struct work_struct *work) @@ -39,28 +39,21 @@ static void mcp251xfd_timestamp_work(struct work_struct *work) MCP251XFD_TIMESTAMP_WORK_DELAY_SEC * HZ); } -void mcp251xfd_skb_set_timestamp(const struct mcp251xfd_priv *priv, - struct sk_buff *skb, u32 timestamp) -{ - struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb); - u64 ns; - - ns = timecounter_cyc2time(&priv->tc, timestamp); - hwtstamps->hwtstamp = ns_to_ktime(ns); -} - void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv) { struct cyclecounter *cc = &priv->cc; - cc->read = mcp251xfd_timestamp_read; + cc->read = mcp251xfd_timestamp_raw_read; cc->mask = CYCLECOUNTER_MASK(32); cc->shift = 1; cc->mult = clocksource_hz2mult(priv->can.clock.freq, cc->shift); - timecounter_init(&priv->tc, &priv->cc, ktime_get_real_ns()); - INIT_DELAYED_WORK(&priv->timestamp, mcp251xfd_timestamp_work); +} + +void mcp251xfd_timestamp_start(struct mcp251xfd_priv *priv) +{ + timecounter_init(&priv->tc, &priv->cc, ktime_get_real_ns()); schedule_delayed_work(&priv->timestamp, MCP251XFD_TIMESTAMP_WORK_DELAY_SEC * HZ); } diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h index b35bfebd23f2..dcbbd2b2fae8 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd.h +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd.h @@ -2,7 +2,7 @@ * * mcp251xfd - Microchip MCP251xFD Family CAN controller driver * - * Copyright (c) 2019, 2020, 2021 Pengutronix, + * Copyright (c) 2019, 2020, 2021, 2023 Pengutronix, * Marc Kleine-Budde * Copyright (c) 2019 Martin Sperl */ @@ -524,6 +524,7 @@ struct mcp251xfd_tef_ring { /* u8 obj_num equals tx_ring->obj_num */ /* u8 obj_size equals sizeof(struct mcp251xfd_hw_tef_obj) */ + /* u8 obj_num_shift_to_u8 equals tx_ring->obj_num_shift_to_u8 */ union mcp251xfd_write_reg_buf irq_enable_buf; struct spi_transfer irq_enable_xfer; @@ -542,6 +543,7 @@ struct mcp251xfd_tx_ring { u8 nr; u8 fifo_nr; u8 obj_num; + u8 obj_num_shift_to_u8; u8 obj_size; struct mcp251xfd_tx_obj obj[MCP251XFD_TX_OBJ_NUM_MAX]; @@ -552,10 +554,14 @@ struct mcp251xfd_rx_ring { unsigned int head; unsigned int tail; + /* timestamp of the last valid received CAN frame */ + u64 last_valid; + u16 base; u8 nr; u8 fifo_nr; u8 obj_num; + u8 obj_num_shift_to_u8; u8 obj_size; union mcp251xfd_write_reg_buf irq_enable_buf; @@ -809,10 +815,27 @@ mcp251xfd_spi_cmd_write(const struct mcp251xfd_priv *priv, return data; } -static inline int mcp251xfd_get_timestamp(const struct mcp251xfd_priv *priv, - u32 *timestamp) +static inline int mcp251xfd_get_timestamp_raw(const struct mcp251xfd_priv *priv, + u32 *ts_raw) { - return regmap_read(priv->map_reg, MCP251XFD_REG_TBC, timestamp); + return regmap_read(priv->map_reg, MCP251XFD_REG_TBC, ts_raw); +} + +static inline void mcp251xfd_skb_set_timestamp(struct sk_buff *skb, u64 ns) +{ + struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb); + + hwtstamps->hwtstamp = ns_to_ktime(ns); +} + +static inline +void mcp251xfd_skb_set_timestamp_raw(const struct mcp251xfd_priv *priv, + struct sk_buff *skb, u32 ts_raw) +{ + u64 ns; + + ns = timecounter_cyc2time(&priv->tc, ts_raw); + mcp251xfd_skb_set_timestamp(skb, ns); } static inline u16 mcp251xfd_get_tef_obj_addr(u8 n) @@ -861,17 +884,8 @@ static inline u8 mcp251xfd_get_tef_tail(const struct mcp251xfd_priv *priv) return priv->tef->tail & (priv->tx->obj_num - 1); } -static inline u8 mcp251xfd_get_tef_len(const struct mcp251xfd_priv *priv) +static inline u8 mcp251xfd_get_tef_linear_len(const struct mcp251xfd_priv *priv, u8 len) { - return priv->tef->head - priv->tef->tail; -} - -static inline u8 mcp251xfd_get_tef_linear_len(const struct mcp251xfd_priv *priv) -{ - u8 len; - - len = mcp251xfd_get_tef_len(priv); - return min_t(u8, len, priv->tx->obj_num - mcp251xfd_get_tef_tail(priv)); } @@ -914,18 +928,9 @@ static inline u8 mcp251xfd_get_rx_tail(const struct mcp251xfd_rx_ring *ring) return ring->tail & (ring->obj_num - 1); } -static inline u8 mcp251xfd_get_rx_len(const struct mcp251xfd_rx_ring *ring) -{ - return ring->head - ring->tail; -} - static inline u8 -mcp251xfd_get_rx_linear_len(const struct mcp251xfd_rx_ring *ring) +mcp251xfd_get_rx_linear_len(const struct mcp251xfd_rx_ring *ring, u8 len) { - u8 len; - - len = mcp251xfd_get_rx_len(ring); - return min_t(u8, len, ring->obj_num - mcp251xfd_get_rx_tail(ring)); } @@ -951,9 +956,8 @@ void mcp251xfd_ring_free(struct mcp251xfd_priv *priv); int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv); int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv); int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv); -void mcp251xfd_skb_set_timestamp(const struct mcp251xfd_priv *priv, - struct sk_buff *skb, u32 timestamp); void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv); +void mcp251xfd_timestamp_start(struct mcp251xfd_priv *priv); void mcp251xfd_timestamp_stop(struct mcp251xfd_priv *priv); void mcp251xfd_tx_obj_write_sync(struct work_struct *work); diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig index bd58c636d465..3e1fba12c0c3 100644 --- a/drivers/net/can/usb/Kconfig +++ b/drivers/net/can/usb/Kconfig @@ -91,6 +91,7 @@ config CAN_KVASER_USB - Kvaser Leaf Light R v2 - Kvaser Mini PCI Express HS - Kvaser Mini PCI Express 2xHS + - Kvaser Mini PCIe 1xCAN - Kvaser USBcan Light 2xHS - Kvaser USBcan II HS/HS - Kvaser USBcan II HS/LS @@ -111,12 +112,14 @@ config CAN_KVASER_USB - Kvaser USBcan Light 4xHS - Kvaser USBcan Pro 2xHS v2 - Kvaser USBcan Pro 4xHS + - Kvaser USBcan Pro 5xCAN - Kvaser USBcan Pro 5xHS - Kvaser U100 - Kvaser U100P - Kvaser U100S - ATI Memorator Pro 2xHS v2 - ATI USBcan Pro 2xHS v2 + - Vining 800 If unsure, say N. diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 65c962f76898..bc86e9b329fd 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -40,6 +40,9 @@ #define USB_ABE_CANDEBUGGER_FD_VENDOR_ID 0x16d0 #define USB_ABE_CANDEBUGGER_FD_PRODUCT_ID 0x10b8 +#define USB_XYLANTA_SAINT3_VENDOR_ID 0x16d0 +#define USB_XYLANTA_SAINT3_PRODUCT_ID 0x0f30 + #define GS_USB_ENDPOINT_IN 1 #define GS_USB_ENDPOINT_OUT 2 @@ -1145,7 +1148,7 @@ static int gs_usb_set_phys_id(struct net_device *netdev, } static int gs_usb_get_ts_info(struct net_device *netdev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct gs_can *dev = netdev_priv(netdev); @@ -1530,6 +1533,8 @@ static const struct usb_device_id gs_usb_table[] = { USB_CES_CANEXT_FD_PRODUCT_ID, 0) }, { USB_DEVICE_INTERFACE_NUMBER(USB_ABE_CANDEBUGGER_FD_VENDOR_ID, USB_ABE_CANDEBUGGER_FD_PRODUCT_ID, 0) }, + { USB_DEVICE_INTERFACE_NUMBER(USB_XYLANTA_SAINT3_VENDOR_ID, + USB_XYLANTA_SAINT3_PRODUCT_ID, 0) }, {} /* Terminating entry */ }; diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c index 024169461cad..daa34b532aa8 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c @@ -89,6 +89,9 @@ #define USB_HYBRID_CANLIN_PRODUCT_ID 0x0115 #define USB_HYBRID_PRO_CANLIN_PRODUCT_ID 0x0116 #define USB_LEAF_V3_PRODUCT_ID 0x0117 +#define USB_VINING_800_PRODUCT_ID 0x0119 +#define USB_USBCAN_PRO_5XCAN_PRODUCT_ID 0x011A +#define USB_MINI_PCIE_1XCAN_PRODUCT_ID 0x011B static const struct kvaser_usb_driver_info kvaser_usb_driver_info_hydra = { .quirks = KVASER_USB_QUIRK_HAS_HARDWARE_TIMESTAMP, @@ -239,6 +242,12 @@ static const struct usb_device_id kvaser_usb_table[] = { .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_V3_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_VINING_800_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_5XCAN_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, + { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_1XCAN_PRODUCT_ID), + .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, { } }; MODULE_DEVICE_TABLE(usb, kvaser_usb_table); diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index 1efa39e134f4..3d68fef46ded 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c @@ -897,7 +897,7 @@ int peak_usb_set_eeprom(struct net_device *netdev, return 0; } -int pcan_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) +int pcan_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info) { info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.h b/drivers/net/can/usb/peak_usb/pcan_usb_core.h index f6cf84bb718f..abab00930b9d 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.h +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.h @@ -145,7 +145,7 @@ void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *tv); int peak_usb_netif_rx_64(struct sk_buff *skb, u32 ts_low, u32 ts_high); void peak_usb_async_complete(struct urb *urb); void peak_usb_restart_complete(struct peak_usb_device *dev); -int pcan_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info); +int pcan_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info); /* common 32-bit CAN channel ID ethtool management */ int peak_usb_get_eeprom_len(struct net_device *netdev); diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index fae0120473f8..d944911d7f05 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -6,7 +6,7 @@ * Copyright (C) 2017 - 2018 Sandvik Mining and Construction Oy * * Description: - * This driver is developed for Axi CAN IP and for Zynq CANPS Controller. + * This driver is developed for AXI CAN IP, AXI CANFD IP, CANPS and CANFD PS Controller. */ #include diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 3092b391031a..2d10b4d6cfbb 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -102,6 +102,7 @@ config NET_DSA_SMSC_LAN9303 tristate select NET_DSA_TAG_LAN9303 select REGMAP + imply SMSC_PHY help This enables support for the Microchip LAN9303/LAN9354 3 port ethernet switch chips. @@ -126,7 +127,7 @@ config NET_DSA_SMSC_LAN9303_MDIO config NET_DSA_VITESSE_VSC73XX tristate - select NET_DSA_TAG_NONE + select NET_DSA_TAG_VSC73XX_8021Q select FIXED_PHY select VITESSE_PHY select GPIOLIB diff --git a/drivers/net/dsa/hirschmann/hellcreek.h b/drivers/net/dsa/hirschmann/hellcreek.h index 6874cb9dc361..9c2ed2ba79da 100644 --- a/drivers/net/dsa/hirschmann/hellcreek.h +++ b/drivers/net/dsa/hirschmann/hellcreek.h @@ -12,14 +12,16 @@ #include #include +#include #include -#include -#include -#include #include +#include #include #include #include +#include +#include + #include #include diff --git a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c index bd7aacc71a63..ca2500aba96f 100644 --- a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c +++ b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.c @@ -16,7 +16,7 @@ #include "hellcreek_ptp.h" int hellcreek_get_ts_info(struct dsa_switch *ds, int port, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct hellcreek *hellcreek = ds->priv; diff --git a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h index 71af77efb28b..7d88da2134f2 100644 --- a/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h +++ b/drivers/net/dsa/hirschmann/hellcreek_hwtstamp.h @@ -48,7 +48,7 @@ void hellcreek_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb); int hellcreek_get_ts_info(struct dsa_switch *ds, int port, - struct ethtool_ts_info *info); + struct kernel_ethtool_ts_info *info); long hellcreek_hwtstamp_work(struct ptp_clock_info *ptp); diff --git a/drivers/net/dsa/lan9303_i2c.c b/drivers/net/dsa/lan9303_i2c.c index bbbec322bc4f..c62d27cdc117 100644 --- a/drivers/net/dsa/lan9303_i2c.c +++ b/drivers/net/dsa/lan9303_i2c.c @@ -89,7 +89,7 @@ static void lan9303_i2c_shutdown(struct i2c_client *client) /*-------------------------------------------------------------------------*/ static const struct i2c_device_id lan9303_i2c_id[] = { - { "lan9303", 0 }, + { "lan9303" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(i2c, lan9303_i2c_id); diff --git a/drivers/net/dsa/lan9303_mdio.c b/drivers/net/dsa/lan9303_mdio.c index 167a86f39f27..0ac4857e5ee8 100644 --- a/drivers/net/dsa/lan9303_mdio.c +++ b/drivers/net/dsa/lan9303_mdio.c @@ -58,19 +58,19 @@ static int lan9303_mdio_read(void *ctx, uint32_t reg, uint32_t *val) return 0; } -static int lan9303_mdio_phy_write(struct lan9303 *chip, int phy, int reg, +static int lan9303_mdio_phy_write(struct lan9303 *chip, int addr, int reg, u16 val) { struct lan9303_mdio *sw_dev = dev_get_drvdata(chip->dev); - return mdiobus_write_nested(sw_dev->device->bus, phy, reg, val); + return mdiobus_write_nested(sw_dev->device->bus, addr, reg, val); } -static int lan9303_mdio_phy_read(struct lan9303 *chip, int phy, int reg) +static int lan9303_mdio_phy_read(struct lan9303 *chip, int addr, int reg) { struct lan9303_mdio *sw_dev = dev_get_drvdata(chip->dev); - return mdiobus_read_nested(sw_dev->device->bus, phy, reg); + return mdiobus_read_nested(sw_dev->device->bus, addr, reg); } static const struct lan9303_phy_ops lan9303_mdio_phy_ops = { diff --git a/drivers/net/dsa/lantiq_gswip.c b/drivers/net/dsa/lantiq_gswip.c index a557049e34f5..fcd4505f4925 100644 --- a/drivers/net/dsa/lantiq_gswip.c +++ b/drivers/net/dsa/lantiq_gswip.c @@ -236,7 +236,9 @@ #define GSWIP_TABLE_ACTIVE_VLAN 0x01 #define GSWIP_TABLE_VLAN_MAPPING 0x02 #define GSWIP_TABLE_MAC_BRIDGE 0x0b -#define GSWIP_TABLE_MAC_BRIDGE_STATIC 0x01 /* Static not, aging entry */ +#define GSWIP_TABLE_MAC_BRIDGE_KEY3_FID GENMASK(5, 0) /* Filtering identifier */ +#define GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT GENMASK(7, 4) /* Port on learned entries */ +#define GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC BIT(0) /* Static, non-aging entry */ #define XRX200_GPHY_FW_ALIGN (16 * 1024) @@ -653,14 +655,8 @@ static int gswip_add_single_port_br(struct gswip_priv *priv, int port, bool add) struct gswip_pce_table_entry vlan_active = {0,}; struct gswip_pce_table_entry vlan_mapping = {0,}; unsigned int cpu_port = priv->hw_info->cpu_port; - unsigned int max_ports = priv->hw_info->max_ports; int err; - if (port >= max_ports) { - dev_err(priv->dev, "single port for %i supported\n", port); - return -EIO; - } - vlan_active.index = port + 1; vlan_active.table = GSWIP_TABLE_ACTIVE_VLAN; vlan_active.key[0] = 0; /* vid */ @@ -695,13 +691,18 @@ static int gswip_port_enable(struct dsa_switch *ds, int port, struct gswip_priv *priv = ds->priv; int err; - if (!dsa_is_user_port(ds, port)) - return 0; - if (!dsa_is_cpu_port(ds, port)) { + u32 mdio_phy = 0; + err = gswip_add_single_port_br(priv, port, true); if (err) return err; + + if (phydev) + mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK; + + gswip_mdio_mask(priv, GSWIP_MDIO_PHY_ADDR_MASK, mdio_phy, + GSWIP_MDIO_PHYp(port)); } /* RMON Counter Enable for port */ @@ -714,16 +715,6 @@ static int gswip_port_enable(struct dsa_switch *ds, int port, gswip_switch_mask(priv, 0, GSWIP_SDMA_PCTRL_EN, GSWIP_SDMA_PCTRLp(port)); - if (!dsa_is_cpu_port(ds, port)) { - u32 mdio_phy = 0; - - if (phydev) - mdio_phy = phydev->mdio.addr & GSWIP_MDIO_PHY_ADDR_MASK; - - gswip_mdio_mask(priv, GSWIP_MDIO_PHY_ADDR_MASK, mdio_phy, - GSWIP_MDIO_PHYp(port)); - } - return 0; } @@ -731,9 +722,6 @@ static void gswip_port_disable(struct dsa_switch *ds, int port) { struct gswip_priv *priv = ds->priv; - if (!dsa_is_user_port(ds, port)) - return; - gswip_switch_mask(priv, GSWIP_FDMA_PCTRL_EN, 0, GSWIP_FDMA_PCTRLp(port)); gswip_switch_mask(priv, GSWIP_SDMA_PCTRL_EN, 0, @@ -792,7 +780,7 @@ static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port, } if (vlan_filtering) { - /* Use port based VLAN tag */ + /* Use tag based VLAN */ gswip_switch_mask(priv, GSWIP_PCE_VCTRL_VSR, GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR | @@ -801,7 +789,7 @@ static int gswip_port_vlan_filtering(struct dsa_switch *ds, int port, gswip_switch_mask(priv, GSWIP_PCE_PCTRL_0_TVM, 0, GSWIP_PCE_PCTRL_0p(port)); } else { - /* Use port based VLAN tag */ + /* Use port based VLAN */ gswip_switch_mask(priv, GSWIP_PCE_VCTRL_UVR | GSWIP_PCE_VCTRL_VIMR | GSWIP_PCE_VCTRL_VEMR, @@ -836,7 +824,7 @@ static int gswip_setup(struct dsa_switch *ds) err = gswip_pce_load_microcode(priv); if (err) { - dev_err(priv->dev, "writing PCE microcode failed, %i", err); + dev_err(priv->dev, "writing PCE microcode failed, %i\n", err); return err; } @@ -898,8 +886,6 @@ static int gswip_setup(struct dsa_switch *ds) ds->mtu_enforcement_ingress = true; - gswip_port_enable(ds, cpu_port, NULL); - ds->configure_vlan_while_not_filtering = false; return 0; @@ -1314,10 +1300,11 @@ static void gswip_port_fast_age(struct dsa_switch *ds, int port) if (!mac_bridge.valid) continue; - if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC) + if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC) continue; - if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) != port) + if (port != FIELD_GET(GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT, + mac_bridge.val[0])) continue; mac_bridge.valid = false; @@ -1383,7 +1370,8 @@ static int gswip_port_fdb(struct dsa_switch *ds, int port, } if (fid == -1) { - dev_err(priv->dev, "Port not part of a bridge\n"); + dev_err(priv->dev, "no FID found for bridge %s\n", + bridge->name); return -EINVAL; } @@ -1392,9 +1380,9 @@ static int gswip_port_fdb(struct dsa_switch *ds, int port, mac_bridge.key[0] = addr[5] | (addr[4] << 8); mac_bridge.key[1] = addr[3] | (addr[2] << 8); mac_bridge.key[2] = addr[1] | (addr[0] << 8); - mac_bridge.key[3] = fid; + mac_bridge.key[3] = FIELD_PREP(GSWIP_TABLE_MAC_BRIDGE_KEY3_FID, fid); mac_bridge.val[0] = add ? BIT(port) : 0; /* port map */ - mac_bridge.val[1] = GSWIP_TABLE_MAC_BRIDGE_STATIC; + mac_bridge.val[1] = GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC; mac_bridge.valid = add; err = gswip_pce_table_entry_write(priv, &mac_bridge); @@ -1423,7 +1411,7 @@ static int gswip_port_fdb_dump(struct dsa_switch *ds, int port, { struct gswip_priv *priv = ds->priv; struct gswip_pce_table_entry mac_bridge = {0,}; - unsigned char addr[6]; + unsigned char addr[ETH_ALEN]; int i; int err; @@ -1448,14 +1436,15 @@ static int gswip_port_fdb_dump(struct dsa_switch *ds, int port, addr[2] = (mac_bridge.key[1] >> 8) & 0xff; addr[1] = mac_bridge.key[2] & 0xff; addr[0] = (mac_bridge.key[2] >> 8) & 0xff; - if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_STATIC) { + if (mac_bridge.val[1] & GSWIP_TABLE_MAC_BRIDGE_VAL1_STATIC) { if (mac_bridge.val[0] & BIT(port)) { err = cb(addr, 0, true, data); if (err) return err; } } else { - if (((mac_bridge.val[0] & GENMASK(7, 4)) >> 4) == port) { + if (port == FIELD_GET(GSWIP_TABLE_MAC_BRIDGE_VAL0_PORT, + mac_bridge.val[0])) { err = cb(addr, 0, false, data); if (err) return err; @@ -1474,12 +1463,11 @@ static int gswip_port_max_mtu(struct dsa_switch *ds, int port) static int gswip_port_change_mtu(struct dsa_switch *ds, int port, int new_mtu) { struct gswip_priv *priv = ds->priv; - int cpu_port = priv->hw_info->cpu_port; /* CPU port always has maximum mtu of user ports, so use it to set * switch frame size, including 8 byte special header. */ - if (port == cpu_port) { + if (dsa_is_cpu_port(ds, port)) { new_mtu += 8; gswip_switch_w(priv, VLAN_ETH_HLEN + new_mtu + ETH_FCS_LEN, GSWIP_MAC_FLEN); @@ -1516,6 +1504,7 @@ static void gswip_xrx200_phylink_get_caps(struct dsa_switch *ds, int port, case 2: case 3: case 4: + case 6: __set_bit(PHY_INTERFACE_MODE_INTERNAL, config->supported_interfaces); break; @@ -1547,6 +1536,7 @@ static void gswip_xrx300_phylink_get_caps(struct dsa_switch *ds, int port, case 2: case 3: case 4: + case 6: __set_bit(PHY_INTERFACE_MODE_INTERNAL, config->supported_interfaces); break; @@ -1790,7 +1780,7 @@ static u32 gswip_bcm_ram_entry_read(struct gswip_priv *priv, u32 table, err = gswip_switch_r_timeout(priv, GSWIP_BM_RAM_CTRL, GSWIP_BM_RAM_CTRL_BAS); if (err) { - dev_err(priv->dev, "timeout while reading table: %u, index: %u", + dev_err(priv->dev, "timeout while reading table: %u, index: %u\n", table, index); return 0; } @@ -1929,11 +1919,9 @@ static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gph msleep(200); ret = request_firmware(&fw, gphy_fw->fw_name, dev); - if (ret) { - dev_err(dev, "failed to load firmware: %s, error: %i\n", - gphy_fw->fw_name, ret); - return ret; - } + if (ret) + return dev_err_probe(dev, ret, "failed to load firmware: %s\n", + gphy_fw->fw_name); /* GPHY cores need the firmware code in a persistent and contiguous * memory area with a 16 kB boundary aligned start address. @@ -1946,9 +1934,9 @@ static int gswip_gphy_fw_load(struct gswip_priv *priv, struct gswip_gphy_fw *gph dev_addr = ALIGN(dma_addr, XRX200_GPHY_FW_ALIGN); memcpy(fw_addr, fw->data, fw->size); } else { - dev_err(dev, "failed to alloc firmware memory\n"); release_firmware(fw); - return -ENOMEM; + return dev_err_probe(dev, -ENOMEM, + "failed to alloc firmware memory\n"); } release_firmware(fw); @@ -1975,8 +1963,8 @@ static int gswip_gphy_fw_probe(struct gswip_priv *priv, gphy_fw->clk_gate = devm_clk_get(dev, gphyname); if (IS_ERR(gphy_fw->clk_gate)) { - dev_err(dev, "Failed to lookup gate clock\n"); - return PTR_ERR(gphy_fw->clk_gate); + return dev_err_probe(dev, PTR_ERR(gphy_fw->clk_gate), + "Failed to lookup gate clock\n"); } ret = of_property_read_u32(gphy_fw_np, "reg", &gphy_fw->fw_addr_offset); @@ -1996,8 +1984,8 @@ static int gswip_gphy_fw_probe(struct gswip_priv *priv, gphy_fw->fw_name = priv->gphy_fw_name_cfg->ge_firmware_name; break; default: - dev_err(dev, "Unknown GPHY mode %d\n", gphy_mode); - return -EINVAL; + return dev_err_probe(dev, -EINVAL, "Unknown GPHY mode %d\n", + gphy_mode); } gphy_fw->reset = of_reset_control_array_get_exclusive(gphy_fw_np); @@ -2019,7 +2007,7 @@ static void gswip_gphy_fw_remove(struct gswip_priv *priv, ret = regmap_write(priv->rcu_regmap, gphy_fw->fw_addr_offset, 0); if (ret) - dev_err(priv->dev, "can not reset GPHY FW pointer"); + dev_err(priv->dev, "can not reset GPHY FW pointer\n"); clk_disable_unprepare(gphy_fw->clk_gate); @@ -2048,8 +2036,9 @@ static int gswip_gphy_fw_list(struct gswip_priv *priv, priv->gphy_fw_name_cfg = &xrx200a2x_gphy_data; break; default: - dev_err(dev, "unknown GSWIP version: 0x%x", version); - return -ENOENT; + return dev_err_probe(dev, -ENOENT, + "unknown GSWIP version: 0x%x\n", + version); } } @@ -2057,10 +2046,9 @@ static int gswip_gphy_fw_list(struct gswip_priv *priv, if (match && match->data) priv->gphy_fw_name_cfg = match->data; - if (!priv->gphy_fw_name_cfg) { - dev_err(dev, "GPHY compatible type not supported"); - return -ENOENT; - } + if (!priv->gphy_fw_name_cfg) + return dev_err_probe(dev, -ENOENT, + "GPHY compatible type not supported\n"); priv->num_gphy_fw = of_get_available_child_count(gphy_fw_list_np); if (!priv->num_gphy_fw) @@ -2161,8 +2149,8 @@ static int gswip_probe(struct platform_device *pdev) return -EINVAL; break; default: - dev_err(dev, "unknown GSWIP version: 0x%x", version); - return -ENOENT; + return dev_err_probe(dev, -ENOENT, + "unknown GSWIP version: 0x%x\n", version); } /* bring up the mdio bus */ @@ -2170,28 +2158,27 @@ static int gswip_probe(struct platform_device *pdev) if (gphy_fw_np) { err = gswip_gphy_fw_list(priv, gphy_fw_np, version); of_node_put(gphy_fw_np); - if (err) { - dev_err(dev, "gphy fw probe failed\n"); - return err; - } + if (err) + return dev_err_probe(dev, err, + "gphy fw probe failed\n"); } /* bring up the mdio bus */ err = gswip_mdio(priv); if (err) { - dev_err(dev, "mdio probe failed\n"); + dev_err_probe(dev, err, "mdio probe failed\n"); goto gphy_fw_remove; } err = dsa_register_switch(priv->ds); if (err) { - dev_err(dev, "dsa switch register failed: %i\n", err); + dev_err_probe(dev, err, "dsa switch registration failed\n"); goto gphy_fw_remove; } if (!dsa_is_cpu_port(priv->ds, priv->hw_info->cpu_port)) { - dev_err(dev, "wrong CPU port defined, HW only supports port: %i", - priv->hw_info->cpu_port); - err = -EINVAL; + err = dev_err_probe(dev, -EINVAL, + "wrong CPU port defined, HW only supports port: %i\n", + priv->hw_info->cpu_port); goto disable_switch; } diff --git a/drivers/net/dsa/microchip/ksz9477_i2c.c b/drivers/net/dsa/microchip/ksz9477_i2c.c index 82bebee4615c..7d7560f23a73 100644 --- a/drivers/net/dsa/microchip/ksz9477_i2c.c +++ b/drivers/net/dsa/microchip/ksz9477_i2c.c @@ -72,8 +72,8 @@ static void ksz9477_i2c_shutdown(struct i2c_client *i2c) } static const struct i2c_device_id ksz9477_i2c_id[] = { - { "ksz9477-switch", 0 }, - {}, + { "ksz9477-switch" }, + {} }; MODULE_DEVICE_TABLE(i2c, ksz9477_i2c_id); diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 0580b2fee21c..b074b4bb0629 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -3116,7 +3116,8 @@ static void ksz_set_xmii(struct ksz_device *dev, int port, /* On KSZ9893, disable RGMII in-band status support */ if (dev->chip_id == KSZ9893_CHIP_ID || dev->chip_id == KSZ8563_CHIP_ID || - dev->chip_id == KSZ9563_CHIP_ID) + dev->chip_id == KSZ9563_CHIP_ID || + is_lan937x(dev)) data8 &= ~P_MII_MAC_MODE; break; default: @@ -3917,6 +3918,13 @@ static int ksz_hsr_join(struct dsa_switch *ds, int port, struct net_device *hsr, return -EOPNOTSUPP; } + /* KSZ9477 can only perform HSR offloading for up to two ports */ + if (hweight8(dev->hsr_ports) >= 2) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot offload more than two ports - using software HSR"); + return -EOPNOTSUPP; + } + /* Self MAC address filtering, to avoid frames traversing * the HSR ring more than once. */ diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index ee7db46e469d..5f0a628b9849 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -22,6 +22,7 @@ /* all KSZ switches count ports from 1 */ #define KSZ_PORT_1 0 #define KSZ_PORT_2 1 +#define KSZ_PORT_4 3 struct ksz_device; struct ksz_port; @@ -637,6 +638,12 @@ static inline int is_lan937x(struct ksz_device *dev) dev->chip_id == LAN9374_CHIP_ID; } +static inline bool is_lan937x_tx_phy(struct ksz_device *dev, int port) +{ + return (dev->chip_id == LAN9371_CHIP_ID || + dev->chip_id == LAN9372_CHIP_ID) && port == KSZ_PORT_4; +} + /* STP State Defines */ #define PORT_TX_ENABLE BIT(2) #define PORT_RX_ENABLE BIT(1) diff --git a/drivers/net/dsa/microchip/ksz_ptp.c b/drivers/net/dsa/microchip/ksz_ptp.c index 1fe105913c75..f0bd46e5d4ec 100644 --- a/drivers/net/dsa/microchip/ksz_ptp.c +++ b/drivers/net/dsa/microchip/ksz_ptp.c @@ -293,7 +293,7 @@ static int ksz_ptp_enable_mode(struct ksz_device *dev) /* The function is return back the capability of timestamping feature when * requested through ethtool -T utility */ -int ksz_get_ts_info(struct dsa_switch *ds, int port, struct ethtool_ts_info *ts) +int ksz_get_ts_info(struct dsa_switch *ds, int port, struct kernel_ethtool_ts_info *ts) { struct ksz_device *dev = ds->priv; struct ksz_ptp_data *ptp_data; diff --git a/drivers/net/dsa/microchip/ksz_ptp.h b/drivers/net/dsa/microchip/ksz_ptp.h index 0ca8ca4f804e..2f1783c0d723 100644 --- a/drivers/net/dsa/microchip/ksz_ptp.h +++ b/drivers/net/dsa/microchip/ksz_ptp.h @@ -38,7 +38,7 @@ int ksz_ptp_clock_register(struct dsa_switch *ds); void ksz_ptp_clock_unregister(struct dsa_switch *ds); int ksz_get_ts_info(struct dsa_switch *ds, int port, - struct ethtool_ts_info *ts); + struct kernel_ethtool_ts_info *ts); int ksz_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr); int ksz_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr); void ksz_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb); diff --git a/drivers/net/dsa/microchip/lan937x_main.c b/drivers/net/dsa/microchip/lan937x_main.c index b479a628b1ae..824d9309a3d3 100644 --- a/drivers/net/dsa/microchip/lan937x_main.c +++ b/drivers/net/dsa/microchip/lan937x_main.c @@ -55,6 +55,9 @@ static int lan937x_vphy_ind_addr_wr(struct ksz_device *dev, int addr, int reg) u16 addr_base = REG_PORT_T1_PHY_CTRL_BASE; u16 temp; + if (is_lan937x_tx_phy(dev, addr)) + addr_base = REG_PORT_TX_PHY_CTRL_BASE; + /* get register address based on the logical port */ temp = PORT_CTRL_ADDR(addr, (addr_base + (reg << 2))); @@ -320,6 +323,9 @@ void lan937x_phylink_get_caps(struct ksz_device *dev, int port, /* MII/RMII/RGMII ports */ config->mac_capabilities |= MAC_ASYM_PAUSE | MAC_SYM_PAUSE | MAC_100HD | MAC_10 | MAC_1000FD; + } else if (is_lan937x_tx_phy(dev, port)) { + config->mac_capabilities |= MAC_ASYM_PAUSE | MAC_SYM_PAUSE | + MAC_100HD | MAC_10; } } @@ -370,23 +376,33 @@ int lan937x_setup(struct dsa_switch *ds) ds->vlan_filtering_is_global = true; /* Enable aggressive back off for half duplex & UNH mode */ - lan937x_cfg(dev, REG_SW_MAC_CTRL_0, - (SW_PAUSE_UNH_MODE | SW_NEW_BACKOFF | SW_AGGR_BACKOFF), - true); + ret = lan937x_cfg(dev, REG_SW_MAC_CTRL_0, (SW_PAUSE_UNH_MODE | + SW_NEW_BACKOFF | + SW_AGGR_BACKOFF), true); + if (ret < 0) + return ret; /* If NO_EXC_COLLISION_DROP bit is set, the switch will not drop * packets when 16 or more collisions occur */ - lan937x_cfg(dev, REG_SW_MAC_CTRL_1, NO_EXC_COLLISION_DROP, true); + ret = lan937x_cfg(dev, REG_SW_MAC_CTRL_1, NO_EXC_COLLISION_DROP, true); + if (ret < 0) + return ret; /* enable global MIB counter freeze function */ - lan937x_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true); + ret = lan937x_cfg(dev, REG_SW_MAC_CTRL_6, SW_MIB_COUNTER_FREEZE, true); + if (ret < 0) + return ret; /* disable CLK125 & CLK25, 1: disable, 0: enable */ - lan937x_cfg(dev, REG_SW_GLOBAL_OUTPUT_CTRL__1, - (SW_CLK125_ENB | SW_CLK25_ENB), true); + ret = lan937x_cfg(dev, REG_SW_GLOBAL_OUTPUT_CTRL__1, + (SW_CLK125_ENB | SW_CLK25_ENB), true); + if (ret < 0) + return ret; - return 0; + /* Disable global VPHY support. Related to CPU interface only? */ + return ksz_rmw32(dev, REG_SW_CFG_STRAP_OVR, SW_VPHY_DISABLE, + SW_VPHY_DISABLE); } void lan937x_teardown(struct dsa_switch *ds) diff --git a/drivers/net/dsa/microchip/lan937x_reg.h b/drivers/net/dsa/microchip/lan937x_reg.h index 45b606b6429f..2f22a9d01de3 100644 --- a/drivers/net/dsa/microchip/lan937x_reg.h +++ b/drivers/net/dsa/microchip/lan937x_reg.h @@ -37,6 +37,10 @@ #define SW_CLK125_ENB BIT(1) #define SW_CLK25_ENB BIT(0) +/* 2 - PHY Control */ +#define REG_SW_CFG_STRAP_OVR 0x0214 +#define SW_VPHY_DISABLE BIT(31) + /* 3 - Operation Control */ #define REG_SW_OPERATION 0x0300 @@ -147,6 +151,7 @@ /* 1 - Phy */ #define REG_PORT_T1_PHY_CTRL_BASE 0x0100 +#define REG_PORT_TX_PHY_CTRL_BASE 0x0280 /* 3 - xMII */ #define PORT_SGMII_SEL BIT(7) diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 598434d8d6e4..ec18e68bf3a8 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -1302,13 +1302,62 @@ mt7530_stp_state_set(struct dsa_switch *ds, int port, u8 state) FID_PST(FID_BRIDGED, stp_state)); } +static void mt7530_update_port_member(struct mt7530_priv *priv, int port, + const struct net_device *bridge_dev, + bool join) __must_hold(&priv->reg_mutex) +{ + struct dsa_port *dp = dsa_to_port(priv->ds, port), *other_dp; + struct mt7530_port *p = &priv->ports[port], *other_p; + struct dsa_port *cpu_dp = dp->cpu_dp; + u32 port_bitmap = BIT(cpu_dp->index); + int other_port; + bool isolated; + + dsa_switch_for_each_user_port(other_dp, priv->ds) { + other_port = other_dp->index; + other_p = &priv->ports[other_port]; + + if (dp == other_dp) + continue; + + /* Add/remove this port to/from the port matrix of the other + * ports in the same bridge. If the port is disabled, port + * matrix is kept and not being setup until the port becomes + * enabled. + */ + if (!dsa_port_offloads_bridge_dev(other_dp, bridge_dev)) + continue; + + isolated = p->isolated && other_p->isolated; + + if (join && !isolated) { + other_p->pm |= PCR_MATRIX(BIT(port)); + port_bitmap |= BIT(other_port); + } else { + other_p->pm &= ~PCR_MATRIX(BIT(port)); + } + + if (other_p->enable) + mt7530_rmw(priv, MT7530_PCR_P(other_port), + PCR_MATRIX_MASK, other_p->pm); + } + + /* Add/remove the all other ports to this port matrix. For !join + * (leaving the bridge), only the CPU port will remain in the port matrix + * of this port. + */ + p->pm = PCR_MATRIX(port_bitmap); + if (priv->ports[port].enable) + mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK, p->pm); +} + static int mt7530_port_pre_bridge_flags(struct dsa_switch *ds, int port, struct switchdev_brport_flags flags, struct netlink_ext_ack *extack) { if (flags.mask & ~(BR_LEARNING | BR_FLOOD | BR_MCAST_FLOOD | - BR_BCAST_FLOOD)) + BR_BCAST_FLOOD | BR_ISOLATED)) return -EINVAL; return 0; @@ -1337,6 +1386,17 @@ mt7530_port_bridge_flags(struct dsa_switch *ds, int port, mt7530_rmw(priv, MT753X_MFC, BC_FFP(BIT(port)), flags.val & BR_BCAST_FLOOD ? BC_FFP(BIT(port)) : 0); + if (flags.mask & BR_ISOLATED) { + struct dsa_port *dp = dsa_to_port(ds, port); + struct net_device *bridge_dev = dsa_port_bridge_dev_get(dp); + + priv->ports[port].isolated = !!(flags.val & BR_ISOLATED); + + mutex_lock(&priv->reg_mutex); + mt7530_update_port_member(priv, port, bridge_dev, true); + mutex_unlock(&priv->reg_mutex); + } + return 0; } @@ -1345,39 +1405,11 @@ mt7530_port_bridge_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge, bool *tx_fwd_offload, struct netlink_ext_ack *extack) { - struct dsa_port *dp = dsa_to_port(ds, port), *other_dp; - struct dsa_port *cpu_dp = dp->cpu_dp; - u32 port_bitmap = BIT(cpu_dp->index); struct mt7530_priv *priv = ds->priv; mutex_lock(&priv->reg_mutex); - dsa_switch_for_each_user_port(other_dp, ds) { - int other_port = other_dp->index; - - if (dp == other_dp) - continue; - - /* Add this port to the port matrix of the other ports in the - * same bridge. If the port is disabled, port matrix is kept - * and not being setup until the port becomes enabled. - */ - if (!dsa_port_offloads_bridge(other_dp, &bridge)) - continue; - - if (priv->ports[other_port].enable) - mt7530_set(priv, MT7530_PCR_P(other_port), - PCR_MATRIX(BIT(port))); - priv->ports[other_port].pm |= PCR_MATRIX(BIT(port)); - - port_bitmap |= BIT(other_port); - } - - /* Add the all other ports to this port matrix. */ - if (priv->ports[port].enable) - mt7530_rmw(priv, MT7530_PCR_P(port), - PCR_MATRIX_MASK, PCR_MATRIX(port_bitmap)); - priv->ports[port].pm |= PCR_MATRIX(port_bitmap); + mt7530_update_port_member(priv, port, bridge.dev, true); /* Set to fallback mode for independent VLAN learning */ mt7530_rmw(priv, MT7530_PCR_P(port), PCR_PORT_VLAN_MASK, @@ -1478,38 +1510,11 @@ static void mt7530_port_bridge_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge) { - struct dsa_port *dp = dsa_to_port(ds, port), *other_dp; - struct dsa_port *cpu_dp = dp->cpu_dp; struct mt7530_priv *priv = ds->priv; mutex_lock(&priv->reg_mutex); - dsa_switch_for_each_user_port(other_dp, ds) { - int other_port = other_dp->index; - - if (dp == other_dp) - continue; - - /* Remove this port from the port matrix of the other ports - * in the same bridge. If the port is disabled, port matrix - * is kept and not being setup until the port becomes enabled. - */ - if (!dsa_port_offloads_bridge(other_dp, &bridge)) - continue; - - if (priv->ports[other_port].enable) - mt7530_clear(priv, MT7530_PCR_P(other_port), - PCR_MATRIX(BIT(port))); - priv->ports[other_port].pm &= ~PCR_MATRIX(BIT(port)); - } - - /* Set the cpu port to be the only one in the port matrix of - * this port. - */ - if (priv->ports[port].enable) - mt7530_rmw(priv, MT7530_PCR_P(port), PCR_MATRIX_MASK, - PCR_MATRIX(BIT(cpu_dp->index))); - priv->ports[port].pm = PCR_MATRIX(BIT(cpu_dp->index)); + mt7530_update_port_member(priv, port, bridge.dev, false); /* When a port is removed from the bridge, the port would be set up * back to the default as is at initial boot which is a VLAN-unaware diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h index 2ea4e24628c6..28592123070b 100644 --- a/drivers/net/dsa/mt7530.h +++ b/drivers/net/dsa/mt7530.h @@ -721,6 +721,7 @@ struct mt7530_fdb { */ struct mt7530_port { bool enable; + bool isolated; u32 pm; u16 pvid; struct phylink_pcs *sgmii_pcs; diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.c b/drivers/net/dsa/mv88e6xxx/hwtstamp.c index 331b4ca089ff..49e6e1355142 100644 --- a/drivers/net/dsa/mv88e6xxx/hwtstamp.c +++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.c @@ -64,7 +64,7 @@ static int mv88e6xxx_ptp_read(struct mv88e6xxx_chip *chip, int addr, #define TX_TSTAMP_TIMEOUT msecs_to_jiffies(40) int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { const struct mv88e6xxx_ptp_ops *ptp_ops; struct mv88e6xxx_chip *chip; diff --git a/drivers/net/dsa/mv88e6xxx/hwtstamp.h b/drivers/net/dsa/mv88e6xxx/hwtstamp.h index cf7fb6d660b1..85acc758e3eb 100644 --- a/drivers/net/dsa/mv88e6xxx/hwtstamp.h +++ b/drivers/net/dsa/mv88e6xxx/hwtstamp.h @@ -121,7 +121,7 @@ void mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb); int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port, - struct ethtool_ts_info *info); + struct kernel_ethtool_ts_info *info); int mv88e6xxx_hwtstamp_setup(struct mv88e6xxx_chip *chip); void mv88e6xxx_hwtstamp_free(struct mv88e6xxx_chip *chip); @@ -157,7 +157,7 @@ static inline void mv88e6xxx_port_txtstamp(struct dsa_switch *ds, int port, } static inline int mv88e6xxx_get_ts_info(struct dsa_switch *ds, int port, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { return -EOPNOTSUPP; } diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 61e95487732d..e554699f06d4 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -1050,24 +1050,32 @@ static void felix_phylink_get_caps(struct dsa_switch *ds, int port, config->supported_interfaces); } -static void felix_phylink_mac_config(struct dsa_switch *ds, int port, +static void felix_phylink_mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state) { - struct ocelot *ocelot = ds->priv; - struct felix *felix = ocelot_to_felix(ocelot); + struct dsa_port *dp = dsa_phylink_to_port(config); + struct ocelot *ocelot = dp->ds->priv; + int port = dp->index; + struct felix *felix; + + felix = ocelot_to_felix(ocelot); if (felix->info->phylink_mac_config) felix->info->phylink_mac_config(ocelot, port, mode, state); } -static struct phylink_pcs *felix_phylink_mac_select_pcs(struct dsa_switch *ds, - int port, - phy_interface_t iface) +static struct phylink_pcs * +felix_phylink_mac_select_pcs(struct phylink_config *config, + phy_interface_t iface) { - struct ocelot *ocelot = ds->priv; - struct felix *felix = ocelot_to_felix(ocelot); + struct dsa_port *dp = dsa_phylink_to_port(config); + struct ocelot *ocelot = dp->ds->priv; struct phylink_pcs *pcs = NULL; + int port = dp->index; + struct felix *felix; + + felix = ocelot_to_felix(ocelot); if (felix->pcs && felix->pcs[port]) pcs = felix->pcs[port]; @@ -1075,11 +1083,13 @@ static struct phylink_pcs *felix_phylink_mac_select_pcs(struct dsa_switch *ds, return pcs; } -static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port, +static void felix_phylink_mac_link_down(struct phylink_config *config, unsigned int link_an_mode, phy_interface_t interface) { - struct ocelot *ocelot = ds->priv; + struct dsa_port *dp = dsa_phylink_to_port(config); + struct ocelot *ocelot = dp->ds->priv; + int port = dp->index; struct felix *felix; felix = ocelot_to_felix(ocelot); @@ -1088,15 +1098,19 @@ static void felix_phylink_mac_link_down(struct dsa_switch *ds, int port, felix->info->quirks); } -static void felix_phylink_mac_link_up(struct dsa_switch *ds, int port, +static void felix_phylink_mac_link_up(struct phylink_config *config, + struct phy_device *phydev, unsigned int link_an_mode, phy_interface_t interface, - struct phy_device *phydev, int speed, int duplex, bool tx_pause, bool rx_pause) { - struct ocelot *ocelot = ds->priv; - struct felix *felix = ocelot_to_felix(ocelot); + struct dsa_port *dp = dsa_phylink_to_port(config); + struct ocelot *ocelot = dp->ds->priv; + int port = dp->index; + struct felix *felix; + + felix = ocelot_to_felix(ocelot); ocelot_phylink_mac_link_up(ocelot, port, phydev, link_an_mode, interface, speed, duplex, tx_pause, rx_pause, @@ -1220,7 +1234,7 @@ static int felix_get_sset_count(struct dsa_switch *ds, int port, int sset) } static int felix_get_ts_info(struct dsa_switch *ds, int port, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct ocelot *ocelot = ds->priv; @@ -1583,6 +1597,15 @@ static int felix_setup(struct dsa_switch *ds) felix_port_qos_map_init(ocelot, dp->index); } + if (felix->info->request_irq) { + err = felix->info->request_irq(ocelot); + if (err) { + dev_err(ocelot->dev, "Failed to request IRQ: %pe\n", + ERR_PTR(err)); + goto out_deinit_ports; + } + } + err = ocelot_devlink_sb_register(ocelot); if (err) goto out_deinit_ports; @@ -2083,7 +2106,14 @@ static void felix_get_mm_stats(struct dsa_switch *ds, int port, ocelot_port_get_mm_stats(ocelot, port, stats); } -const struct dsa_switch_ops felix_switch_ops = { +static const struct phylink_mac_ops felix_phylink_mac_ops = { + .mac_select_pcs = felix_phylink_mac_select_pcs, + .mac_config = felix_phylink_mac_config, + .mac_link_down = felix_phylink_mac_link_down, + .mac_link_up = felix_phylink_mac_link_up, +}; + +static const struct dsa_switch_ops felix_switch_ops = { .get_tag_protocol = felix_get_tag_protocol, .change_tag_protocol = felix_change_tag_protocol, .connect_tag_protocol = felix_connect_tag_protocol, @@ -2104,10 +2134,6 @@ const struct dsa_switch_ops felix_switch_ops = { .get_sset_count = felix_get_sset_count, .get_ts_info = felix_get_ts_info, .phylink_get_caps = felix_phylink_get_caps, - .phylink_mac_config = felix_phylink_mac_config, - .phylink_mac_select_pcs = felix_phylink_mac_select_pcs, - .phylink_mac_link_down = felix_phylink_mac_link_down, - .phylink_mac_link_up = felix_phylink_mac_link_up, .port_enable = felix_port_enable, .port_fast_age = felix_port_fast_age, .port_fdb_dump = felix_fdb_dump, @@ -2166,7 +2192,53 @@ const struct dsa_switch_ops felix_switch_ops = { .port_set_host_flood = felix_port_set_host_flood, .port_change_conduit = felix_port_change_conduit, }; -EXPORT_SYMBOL_GPL(felix_switch_ops); + +int felix_register_switch(struct device *dev, resource_size_t switch_base, + int num_flooding_pgids, bool ptp, + bool mm_supported, + enum dsa_tag_protocol init_tag_proto, + const struct felix_info *info) +{ + struct dsa_switch *ds; + struct ocelot *ocelot; + struct felix *felix; + int err; + + felix = devm_kzalloc(dev, sizeof(*felix), GFP_KERNEL); + if (!felix) + return -ENOMEM; + + ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL); + if (!ds) + return -ENOMEM; + + dev_set_drvdata(dev, felix); + + ocelot = &felix->ocelot; + ocelot->dev = dev; + ocelot->num_flooding_pgids = num_flooding_pgids; + ocelot->ptp = ptp; + ocelot->mm_supported = mm_supported; + + felix->info = info; + felix->switch_base = switch_base; + felix->ds = ds; + felix->tag_proto = init_tag_proto; + + ds->dev = dev; + ds->num_ports = info->num_ports; + ds->num_tx_queues = OCELOT_NUM_TC; + ds->ops = &felix_switch_ops; + ds->phylink_mac_ops = &felix_phylink_mac_ops; + ds->priv = ocelot; + + err = dsa_register_switch(ds); + if (err) + dev_err_probe(dev, err, "Failed to register DSA switch\n"); + + return err; +} +EXPORT_SYMBOL_GPL(felix_register_switch); struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port) { diff --git a/drivers/net/dsa/ocelot/felix.h b/drivers/net/dsa/ocelot/felix.h index dbf5872fe367..211991f494e3 100644 --- a/drivers/net/dsa/ocelot/felix.h +++ b/drivers/net/dsa/ocelot/felix.h @@ -32,7 +32,6 @@ struct felix_info { const u32 *port_modes; int num_mact_rows; int num_ports; - int num_tx_queues; struct vcap_props *vcap; u16 vcap_pol_base; u16 vcap_pol_max; @@ -64,6 +63,7 @@ struct felix_info { const struct phylink_link_state *state); int (*configure_serdes)(struct ocelot *ocelot, int port, struct device_node *portnp); + int (*request_irq)(struct ocelot *ocelot); }; /* Methods for initializing the hardware resources specific to a tagging @@ -82,8 +82,6 @@ struct felix_tag_proto_ops { struct netlink_ext_ack *extack); }; -extern const struct dsa_switch_ops felix_switch_ops; - /* DSA glue / front-end for struct ocelot */ struct felix { struct dsa_switch *ds; @@ -99,6 +97,11 @@ struct felix { unsigned long host_flood_mc_mask; }; +int felix_register_switch(struct device *dev, resource_size_t switch_base, + int num_flooding_pgids, bool ptp, + bool mm_supported, + enum dsa_tag_protocol init_tag_proto, + const struct felix_info *info); struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port); int felix_netdev_to_port(struct net_device *dev); diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 85952d841f28..ba37a566da39 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -2605,6 +2605,28 @@ set: } } +/* The INTB interrupt is shared between for PTP TX timestamp availability + * notification and MAC Merge status change on each port. + */ +static irqreturn_t vsc9959_irq_handler(int irq, void *data) +{ + struct ocelot *ocelot = data; + + ocelot_get_txtstamp(ocelot); + ocelot_mm_irq(ocelot); + + return IRQ_HANDLED; +} + +static int vsc9959_request_irq(struct ocelot *ocelot) +{ + struct pci_dev *pdev = to_pci_dev(ocelot->dev); + + return devm_request_threaded_irq(ocelot->dev, pdev->irq, NULL, + &vsc9959_irq_handler, IRQF_ONESHOT, + "felix-intb", ocelot); +} + static const struct ocelot_ops vsc9959_ops = { .reset = vsc9959_reset, .wm_enc = vsc9959_wm_enc, @@ -2636,7 +2658,6 @@ static const struct felix_info felix_info_vsc9959 = { .vcap_pol_max2 = 0, .num_mact_rows = 2048, .num_ports = VSC9959_NUM_PORTS, - .num_tx_queues = OCELOT_NUM_TC, .quirks = FELIX_MAC_QUIRKS, .quirk_no_xtr_irq = true, .ptp_caps = &vsc9959_ptp_caps, @@ -2645,98 +2666,36 @@ static const struct felix_info felix_info_vsc9959 = { .port_modes = vsc9959_port_modes, .port_setup_tc = vsc9959_port_setup_tc, .port_sched_speed_set = vsc9959_sched_speed_set, + .request_irq = vsc9959_request_irq, }; -/* The INTB interrupt is shared between for PTP TX timestamp availability - * notification and MAC Merge status change on each port. - */ -static irqreturn_t felix_irq_handler(int irq, void *data) -{ - struct ocelot *ocelot = (struct ocelot *)data; - - ocelot_get_txtstamp(ocelot); - ocelot_mm_irq(ocelot); - - return IRQ_HANDLED; -} - static int felix_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { - struct dsa_switch *ds; - struct ocelot *ocelot; - struct felix *felix; + struct device *dev = &pdev->dev; + resource_size_t switch_base; int err; - if (pdev->dev.of_node && !of_device_is_available(pdev->dev.of_node)) { - dev_info(&pdev->dev, "device is disabled, skipping\n"); - return -ENODEV; - } - err = pci_enable_device(pdev); if (err) { - dev_err(&pdev->dev, "device enable failed\n"); - goto err_pci_enable; + dev_err(dev, "device enable failed: %pe\n", ERR_PTR(err)); + return err; } - felix = kzalloc(sizeof(struct felix), GFP_KERNEL); - if (!felix) { - err = -ENOMEM; - dev_err(&pdev->dev, "Failed to allocate driver memory\n"); - goto err_alloc_felix; - } - - pci_set_drvdata(pdev, felix); - ocelot = &felix->ocelot; - ocelot->dev = &pdev->dev; - ocelot->num_flooding_pgids = OCELOT_NUM_TC; - felix->info = &felix_info_vsc9959; - felix->switch_base = pci_resource_start(pdev, VSC9959_SWITCH_PCI_BAR); - pci_set_master(pdev); - err = devm_request_threaded_irq(&pdev->dev, pdev->irq, NULL, - &felix_irq_handler, IRQF_ONESHOT, - "felix-intb", ocelot); - if (err) { - dev_err(&pdev->dev, "Failed to request irq\n"); - goto err_alloc_irq; - } + switch_base = pci_resource_start(pdev, VSC9959_SWITCH_PCI_BAR); - ocelot->ptp = 1; - ocelot->mm_supported = true; - - ds = kzalloc(sizeof(struct dsa_switch), GFP_KERNEL); - if (!ds) { - err = -ENOMEM; - dev_err(&pdev->dev, "Failed to allocate DSA switch\n"); - goto err_alloc_ds; - } - - ds->dev = &pdev->dev; - ds->num_ports = felix->info->num_ports; - ds->num_tx_queues = felix->info->num_tx_queues; - ds->ops = &felix_switch_ops; - ds->priv = ocelot; - felix->ds = ds; - felix->tag_proto = DSA_TAG_PROTO_OCELOT; - - err = dsa_register_switch(ds); - if (err) { - dev_err_probe(&pdev->dev, err, "Failed to register DSA switch\n"); - goto err_register_ds; - } + err = felix_register_switch(dev, switch_base, OCELOT_NUM_TC, + true, true, DSA_TAG_PROTO_OCELOT, + &felix_info_vsc9959); + if (err) + goto out_disable; return 0; -err_register_ds: - kfree(ds); -err_alloc_ds: -err_alloc_irq: - kfree(felix); -err_alloc_felix: +out_disable: pci_disable_device(pdev); -err_pci_enable: return err; } @@ -2749,9 +2708,6 @@ static void felix_pci_remove(struct pci_dev *pdev) dsa_unregister_switch(felix->ds); - kfree(felix->ds); - kfree(felix); - pci_disable_device(pdev); } diff --git a/drivers/net/dsa/ocelot/ocelot_ext.c b/drivers/net/dsa/ocelot/ocelot_ext.c index 22187d831c4b..5632a7248cd4 100644 --- a/drivers/net/dsa/ocelot/ocelot_ext.c +++ b/drivers/net/dsa/ocelot/ocelot_ext.c @@ -57,7 +57,6 @@ static const struct felix_info vsc7512_info = { .vcap = vsc7514_vcap_props, .num_mact_rows = 1024, .num_ports = VSC7514_NUM_PORTS, - .num_tx_queues = OCELOT_NUM_TC, .port_modes = vsc7512_port_modes, .phylink_mac_config = ocelot_phylink_mac_config, .configure_serdes = ocelot_port_configure_serdes, @@ -65,54 +64,8 @@ static const struct felix_info vsc7512_info = { static int ocelot_ext_probe(struct platform_device *pdev) { - struct device *dev = &pdev->dev; - struct dsa_switch *ds; - struct ocelot *ocelot; - struct felix *felix; - int err; - - felix = kzalloc(sizeof(*felix), GFP_KERNEL); - if (!felix) - return -ENOMEM; - - dev_set_drvdata(dev, felix); - - ocelot = &felix->ocelot; - ocelot->dev = dev; - - ocelot->num_flooding_pgids = 1; - - felix->info = &vsc7512_info; - - ds = kzalloc(sizeof(*ds), GFP_KERNEL); - if (!ds) { - err = -ENOMEM; - dev_err_probe(dev, err, "Failed to allocate DSA switch\n"); - goto err_free_felix; - } - - ds->dev = dev; - ds->num_ports = felix->info->num_ports; - ds->num_tx_queues = felix->info->num_tx_queues; - - ds->ops = &felix_switch_ops; - ds->priv = ocelot; - felix->ds = ds; - felix->tag_proto = DSA_TAG_PROTO_OCELOT; - - err = dsa_register_switch(ds); - if (err) { - dev_err_probe(dev, err, "Failed to register DSA switch\n"); - goto err_free_ds; - } - - return 0; - -err_free_ds: - kfree(ds); -err_free_felix: - kfree(felix); - return err; + return felix_register_switch(&pdev->dev, 0, 1, false, false, + DSA_TAG_PROTO_OCELOT, &vsc7512_info); } static void ocelot_ext_remove(struct platform_device *pdev) @@ -123,9 +76,6 @@ static void ocelot_ext_remove(struct platform_device *pdev) return; dsa_unregister_switch(felix->ds); - - kfree(felix->ds); - kfree(felix); } static void ocelot_ext_shutdown(struct platform_device *pdev) diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c index 049930da0521..70782649c395 100644 --- a/drivers/net/dsa/ocelot/seville_vsc9953.c +++ b/drivers/net/dsa/ocelot/seville_vsc9953.c @@ -963,7 +963,6 @@ static const struct felix_info seville_info_vsc9953 = { .quirks = FELIX_MAC_QUIRKS, .num_mact_rows = 2048, .num_ports = VSC9953_NUM_PORTS, - .num_tx_queues = OCELOT_NUM_TC, .mdio_bus_alloc = vsc9953_mdio_bus_alloc, .mdio_bus_free = vsc9953_mdio_bus_free, .port_modes = vsc9953_port_modes, @@ -971,62 +970,18 @@ static const struct felix_info seville_info_vsc9953 = { static int seville_probe(struct platform_device *pdev) { - struct dsa_switch *ds; - struct ocelot *ocelot; + struct device *dev = &pdev->dev; struct resource *res; - struct felix *felix; - int err; - - felix = kzalloc(sizeof(struct felix), GFP_KERNEL); - if (!felix) { - err = -ENOMEM; - dev_err(&pdev->dev, "Failed to allocate driver memory\n"); - goto err_alloc_felix; - } - - platform_set_drvdata(pdev, felix); - - ocelot = &felix->ocelot; - ocelot->dev = &pdev->dev; - ocelot->num_flooding_pgids = 1; - felix->info = &seville_info_vsc9953; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { - err = -EINVAL; - dev_err(&pdev->dev, "Invalid resource\n"); - goto err_alloc_felix; - } - felix->switch_base = res->start; - - ds = kzalloc(sizeof(struct dsa_switch), GFP_KERNEL); - if (!ds) { - err = -ENOMEM; - dev_err(&pdev->dev, "Failed to allocate DSA switch\n"); - goto err_alloc_ds; + dev_err(dev, "Invalid resource\n"); + return -EINVAL; } - ds->dev = &pdev->dev; - ds->num_ports = felix->info->num_ports; - ds->ops = &felix_switch_ops; - ds->priv = ocelot; - felix->ds = ds; - felix->tag_proto = DSA_TAG_PROTO_SEVILLE; - - err = dsa_register_switch(ds); - if (err) { - dev_err(&pdev->dev, "Failed to register DSA switch: %d\n", err); - goto err_register_ds; - } - - return 0; - -err_register_ds: - kfree(ds); -err_alloc_ds: -err_alloc_felix: - kfree(felix); - return err; + return felix_register_switch(dev, res->start, 1, false, false, + DSA_TAG_PROTO_SEVILLE, + &seville_info_vsc9953); } static void seville_remove(struct platform_device *pdev) @@ -1037,9 +992,6 @@ static void seville_remove(struct platform_device *pdev) return; dsa_unregister_switch(felix->ds); - - kfree(felix->ds); - kfree(felix); } static void seville_shutdown(struct platform_device *pdev) diff --git a/drivers/net/dsa/qca/ar9331.c b/drivers/net/dsa/qca/ar9331.c index 968cb81088bf..e9f2c67bc15f 100644 --- a/drivers/net/dsa/qca/ar9331.c +++ b/drivers/net/dsa/qca/ar9331.c @@ -1021,7 +1021,7 @@ static const struct regmap_config ar9331_mdio_regmap_config = { .cache_type = REGCACHE_MAPLE, }; -static struct regmap_bus ar9331_sw_bus = { +static const struct regmap_bus ar9331_sw_bus = { .reg_format_endian_default = REGMAP_ENDIAN_NATIVE, .val_format_endian_default = REGMAP_ENDIAN_NATIVE, .read = ar9331_mdio_read, diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c index b3c27cf538e8..f8d8c70642c4 100644 --- a/drivers/net/dsa/qca/qca8k-8xxx.c +++ b/drivers/net/dsa/qca/qca8k-8xxx.c @@ -565,7 +565,7 @@ qca8k_regmap_update_bits(void *ctx, uint32_t reg, uint32_t mask, uint32_t write_ return qca8k_regmap_update_bits_mii(priv, reg, mask, write_val); } -static struct regmap_config qca8k_regmap_config = { +static const struct regmap_config qca8k_regmap_config = { .reg_bits = 16, .val_bits = 32, .reg_stride = 4, diff --git a/drivers/net/dsa/qca/qca8k-common.c b/drivers/net/dsa/qca/qca8k-common.c index 7f80035c5441..560c74c4ac3d 100644 --- a/drivers/net/dsa/qca/qca8k-common.c +++ b/drivers/net/dsa/qca/qca8k-common.c @@ -614,11 +614,57 @@ void qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) qca8k_port_configure_learning(ds, port, learning); } +static int qca8k_update_port_member(struct qca8k_priv *priv, int port, + const struct net_device *bridge_dev, + bool join) +{ + bool isolated = !!(priv->port_isolated_map & BIT(port)), other_isolated; + struct dsa_port *dp = dsa_to_port(priv->ds, port), *other_dp; + u32 port_mask = BIT(dp->cpu_dp->index); + int i, ret; + + for (i = 0; i < QCA8K_NUM_PORTS; i++) { + if (i == port) + continue; + if (dsa_is_cpu_port(priv->ds, i)) + continue; + + other_dp = dsa_to_port(priv->ds, i); + if (!dsa_port_offloads_bridge_dev(other_dp, bridge_dev)) + continue; + + other_isolated = !!(priv->port_isolated_map & BIT(i)); + + /* Add/remove this port to/from the portvlan mask of the other + * ports in the bridge + */ + if (join && !(isolated && other_isolated)) { + port_mask |= BIT(i); + ret = regmap_set_bits(priv->regmap, + QCA8K_PORT_LOOKUP_CTRL(i), + BIT(port)); + } else { + ret = regmap_clear_bits(priv->regmap, + QCA8K_PORT_LOOKUP_CTRL(i), + BIT(port)); + } + + if (ret) + return ret; + } + + /* Add/remove all other ports to/from this port's portvlan mask */ + ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), + QCA8K_PORT_LOOKUP_MEMBER, port_mask); + + return ret; +} + int qca8k_port_pre_bridge_flags(struct dsa_switch *ds, int port, struct switchdev_brport_flags flags, struct netlink_ext_ack *extack) { - if (flags.mask & ~BR_LEARNING) + if (flags.mask & ~(BR_LEARNING | BR_ISOLATED)) return -EINVAL; return 0; @@ -628,6 +674,7 @@ int qca8k_port_bridge_flags(struct dsa_switch *ds, int port, struct switchdev_brport_flags flags, struct netlink_ext_ack *extack) { + struct qca8k_priv *priv = ds->priv; int ret; if (flags.mask & BR_LEARNING) { @@ -637,6 +684,20 @@ int qca8k_port_bridge_flags(struct dsa_switch *ds, int port, return ret; } + if (flags.mask & BR_ISOLATED) { + struct dsa_port *dp = dsa_to_port(ds, port); + struct net_device *bridge_dev = dsa_port_bridge_dev_get(dp); + + if (flags.val & BR_ISOLATED) + priv->port_isolated_map |= BIT(port); + else + priv->port_isolated_map &= ~BIT(port); + + ret = qca8k_update_port_member(priv, port, bridge_dev, true); + if (ret) + return ret; + } + return 0; } @@ -646,62 +707,21 @@ int qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct netlink_ext_ack *extack) { struct qca8k_priv *priv = ds->priv; - int port_mask, cpu_port; - int i, ret; - cpu_port = dsa_to_port(ds, port)->cpu_dp->index; - port_mask = BIT(cpu_port); - - for (i = 0; i < QCA8K_NUM_PORTS; i++) { - if (dsa_is_cpu_port(ds, i)) - continue; - if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge)) - continue; - /* Add this port to the portvlan mask of the other ports - * in the bridge - */ - ret = regmap_set_bits(priv->regmap, - QCA8K_PORT_LOOKUP_CTRL(i), - BIT(port)); - if (ret) - return ret; - if (i != port) - port_mask |= BIT(i); - } - - /* Add all other ports to this ports portvlan mask */ - ret = qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), - QCA8K_PORT_LOOKUP_MEMBER, port_mask); - - return ret; + return qca8k_update_port_member(priv, port, bridge.dev, true); } void qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge) { struct qca8k_priv *priv = ds->priv; - int cpu_port, i; + int err; - cpu_port = dsa_to_port(ds, port)->cpu_dp->index; - - for (i = 0; i < QCA8K_NUM_PORTS; i++) { - if (dsa_is_cpu_port(ds, i)) - continue; - if (!dsa_port_offloads_bridge(dsa_to_port(ds, i), &bridge)) - continue; - /* Remove this port to the portvlan mask of the other ports - * in the bridge - */ - regmap_clear_bits(priv->regmap, - QCA8K_PORT_LOOKUP_CTRL(i), - BIT(port)); - } - - /* Set the cpu port to be the only one in the portvlan mask of - * this port - */ - qca8k_rmw(priv, QCA8K_PORT_LOOKUP_CTRL(port), - QCA8K_PORT_LOOKUP_MEMBER, BIT(cpu_port)); + err = qca8k_update_port_member(priv, port, bridge.dev, false); + if (err) + dev_err(priv->dev, + "Failed to update switch config for bridge leave: %d\n", + err); } void qca8k_port_fast_age(struct dsa_switch *ds, int port) diff --git a/drivers/net/dsa/qca/qca8k.h b/drivers/net/dsa/qca/qca8k.h index 2184d8d2d5a9..3664a2e2f1f6 100644 --- a/drivers/net/dsa/qca/qca8k.h +++ b/drivers/net/dsa/qca/qca8k.h @@ -451,6 +451,7 @@ struct qca8k_priv { * Bit 1: port enabled. Bit 0: port disabled. */ u8 port_enabled_map; + u8 port_isolated_map; struct qca8k_ports_config ports_config; struct regmap *regmap; struct mii_bus *bus; diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index ee0fb1c343f1..c7282ce3d11c 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -2133,14 +2133,13 @@ static int sja1105_bridge_join(struct dsa_switch *ds, int port, if (rc) return rc; - rc = dsa_tag_8021q_bridge_join(ds, port, bridge); + rc = dsa_tag_8021q_bridge_join(ds, port, bridge, tx_fwd_offload, + extack); if (rc) { sja1105_bridge_member(ds, port, bridge, false); return rc; } - *tx_fwd_offload = true; - return 0; } @@ -3167,8 +3166,7 @@ static int sja1105_setup(struct dsa_switch *ds) ds->vlan_filtering_is_global = true; ds->untag_bridge_pvid = true; ds->fdb_isolation = true; - /* tag_8021q has 3 bits for the VBID, and the value 0 is reserved */ - ds->max_num_bridges = 7; + ds->max_num_bridges = DSA_TAG_8021Q_MAX_NUM_BRIDGES; /* Advertise the 8 egress queues */ ds->num_tx_queues = SJA1105_NUM_TC; diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.c b/drivers/net/dsa/sja1105/sja1105_ptp.c index a7d41e781398..a1f4ca6ad888 100644 --- a/drivers/net/dsa/sja1105/sja1105_ptp.c +++ b/drivers/net/dsa/sja1105/sja1105_ptp.c @@ -111,7 +111,7 @@ int sja1105_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr) } int sja1105_get_ts_info(struct dsa_switch *ds, int port, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct sja1105_private *priv = ds->priv; struct sja1105_ptp_data *ptp_data = &priv->ptp_data; diff --git a/drivers/net/dsa/sja1105/sja1105_ptp.h b/drivers/net/dsa/sja1105/sja1105_ptp.h index 416461ee95d2..8add2bd5f728 100644 --- a/drivers/net/dsa/sja1105/sja1105_ptp.h +++ b/drivers/net/dsa/sja1105/sja1105_ptp.h @@ -101,7 +101,7 @@ void sja1105pqrs_ptp_cmd_packing(u8 *buf, struct sja1105_ptp_cmd *cmd, enum packing_op op); int sja1105_get_ts_info(struct dsa_switch *ds, int port, - struct ethtool_ts_info *ts); + struct kernel_ethtool_ts_info *ts); void sja1105_ptp_txtstamp_skb(struct dsa_switch *ds, int slot, struct sk_buff *clone); diff --git a/drivers/net/dsa/vitesse-vsc73xx-core.c b/drivers/net/dsa/vitesse-vsc73xx-core.c index 4b031fefcec6..d9d3e30fd47a 100644 --- a/drivers/net/dsa/vitesse-vsc73xx-core.c +++ b/drivers/net/dsa/vitesse-vsc73xx-core.c @@ -22,9 +22,11 @@ #include #include #include +#include #include #include #include +#include #include #include @@ -62,6 +64,8 @@ #define VSC73XX_CAT_DROP 0x6e #define VSC73XX_CAT_PR_MISC_L2 0x6f #define VSC73XX_CAT_PR_USR_PRIO 0x75 +#define VSC73XX_CAT_VLAN_MISC 0x79 +#define VSC73XX_CAT_PORT_VLAN 0x7a #define VSC73XX_Q_MISC_CONF 0xdf /* MAC_CFG register bits */ @@ -122,6 +126,17 @@ #define VSC73XX_ADVPORTM_IO_LOOPBACK BIT(1) #define VSC73XX_ADVPORTM_HOST_LOOPBACK BIT(0) +/* TXUPDCFG transmit modify setup bits */ +#define VSC73XX_TXUPDCFG_DSCP_REWR_MODE GENMASK(20, 19) +#define VSC73XX_TXUPDCFG_DSCP_REWR_ENA BIT(18) +#define VSC73XX_TXUPDCFG_TX_INT_TO_USRPRIO_ENA BIT(17) +#define VSC73XX_TXUPDCFG_TX_UNTAGGED_VID GENMASK(15, 4) +#define VSC73XX_TXUPDCFG_TX_UNTAGGED_VID_ENA BIT(3) +#define VSC73XX_TXUPDCFG_TX_UPDATE_CRC_CPU_ENA BIT(1) +#define VSC73XX_TXUPDCFG_TX_INSERT_TAG BIT(0) + +#define VSC73XX_TXUPDCFG_TX_UNTAGGED_VID_SHIFT 4 + /* CAT_DROP categorizer frame dropping register bits */ #define VSC73XX_CAT_DROP_DROP_MC_SMAC_ENA BIT(6) #define VSC73XX_CAT_DROP_FWD_CTRL_ENA BIT(4) @@ -135,6 +150,15 @@ #define VSC73XX_Q_MISC_CONF_EARLY_TX_512 (1 << 1) #define VSC73XX_Q_MISC_CONF_MAC_PAUSE_MODE BIT(0) +/* CAT_VLAN_MISC categorizer VLAN miscellaneous bits */ +#define VSC73XX_CAT_VLAN_MISC_VLAN_TCI_IGNORE_ENA BIT(8) +#define VSC73XX_CAT_VLAN_MISC_VLAN_KEEP_TAG_ENA BIT(7) + +/* CAT_PORT_VLAN categorizer port VLAN */ +#define VSC73XX_CAT_PORT_VLAN_VLAN_CFI BIT(15) +#define VSC73XX_CAT_PORT_VLAN_VLAN_USR_PRIO GENMASK(14, 12) +#define VSC73XX_CAT_PORT_VLAN_VLAN_VID GENMASK(11, 0) + /* Frame analyzer block 2 registers */ #define VSC73XX_STORMLIMIT 0x02 #define VSC73XX_ADVLEARN 0x03 @@ -164,6 +188,10 @@ #define VSC73XX_AGENCTRL 0xf0 #define VSC73XX_CAPRST 0xff +#define VSC73XX_SRCMASKS_CPU_COPY BIT(27) +#define VSC73XX_SRCMASKS_MIRROR BIT(26) +#define VSC73XX_SRCMASKS_PORTS_MASK GENMASK(7, 0) + #define VSC73XX_MACACCESS_CPU_COPY BIT(14) #define VSC73XX_MACACCESS_FWD_KILL BIT(13) #define VSC73XX_MACACCESS_IGNORE_VLAN BIT(12) @@ -185,7 +213,8 @@ #define VSC73XX_VLANACCESS_VLAN_MIRROR BIT(29) #define VSC73XX_VLANACCESS_VLAN_SRC_CHECK BIT(28) #define VSC73XX_VLANACCESS_VLAN_PORT_MASK GENMASK(9, 2) -#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_MASK GENMASK(2, 0) +#define VSC73XX_VLANACCESS_VLAN_PORT_MASK_SHIFT 2 +#define VSC73XX_VLANACCESS_VLAN_TBL_CMD_MASK GENMASK(1, 0) #define VSC73XX_VLANACCESS_VLAN_TBL_CMD_IDLE 0 #define VSC73XX_VLANACCESS_VLAN_TBL_CMD_READ_ENTRY 1 #define VSC73XX_VLANACCESS_VLAN_TBL_CMD_WRITE_ENTRY 2 @@ -343,6 +372,17 @@ static const struct vsc73xx_counter vsc73xx_tx_counters[] = { { 29, "TxQoSClass3" }, /* non-standard counter */ }; +struct vsc73xx_vlan_summary { + size_t num_tagged; + size_t num_untagged; +}; + +enum vsc73xx_port_vlan_conf { + VSC73XX_VLAN_FILTER, + VSC73XX_VLAN_FILTER_UNTAG_ALL, + VSC73XX_VLAN_IGNORE, +}; + int vsc73xx_is_addr_valid(u8 block, u8 subblock) { switch (block) { @@ -557,16 +597,103 @@ static enum dsa_tag_protocol vsc73xx_get_tag_protocol(struct dsa_switch *ds, * cannot access the tag. (See "Internal frame header" section * 3.9.1 in the manual.) */ - return DSA_TAG_PROTO_NONE; + return DSA_TAG_PROTO_VSC73XX_8021Q; +} + +static int vsc73xx_wait_for_vlan_table_cmd(struct vsc73xx *vsc) +{ + int ret, err; + u32 val; + + ret = read_poll_timeout(vsc73xx_read, err, + err < 0 || + ((val & VSC73XX_VLANACCESS_VLAN_TBL_CMD_MASK) == + VSC73XX_VLANACCESS_VLAN_TBL_CMD_IDLE), + VSC73XX_POLL_SLEEP_US, VSC73XX_POLL_TIMEOUT_US, + false, vsc, VSC73XX_BLOCK_ANALYZER, + 0, VSC73XX_VLANACCESS, &val); + if (ret) + return ret; + return err; +} + +static int +vsc73xx_read_vlan_table_entry(struct vsc73xx *vsc, u16 vid, u8 *portmap) +{ + u32 val; + int ret; + + vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_VLANTIDX, vid); + + ret = vsc73xx_wait_for_vlan_table_cmd(vsc); + if (ret) + return ret; + + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_VLANACCESS, + VSC73XX_VLANACCESS_VLAN_TBL_CMD_MASK, + VSC73XX_VLANACCESS_VLAN_TBL_CMD_READ_ENTRY); + + ret = vsc73xx_wait_for_vlan_table_cmd(vsc); + if (ret) + return ret; + + vsc73xx_read(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_VLANACCESS, &val); + *portmap = (val & VSC73XX_VLANACCESS_VLAN_PORT_MASK) >> + VSC73XX_VLANACCESS_VLAN_PORT_MASK_SHIFT; + + return 0; +} + +static int +vsc73xx_write_vlan_table_entry(struct vsc73xx *vsc, u16 vid, u8 portmap) +{ + int ret; + + vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_VLANTIDX, vid); + + ret = vsc73xx_wait_for_vlan_table_cmd(vsc); + if (ret) + return ret; + + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_VLANACCESS, + VSC73XX_VLANACCESS_VLAN_TBL_CMD_MASK | + VSC73XX_VLANACCESS_VLAN_SRC_CHECK | + VSC73XX_VLANACCESS_VLAN_PORT_MASK, + VSC73XX_VLANACCESS_VLAN_TBL_CMD_WRITE_ENTRY | + VSC73XX_VLANACCESS_VLAN_SRC_CHECK | + (portmap << VSC73XX_VLANACCESS_VLAN_PORT_MASK_SHIFT)); + + return vsc73xx_wait_for_vlan_table_cmd(vsc); +} + +static int +vsc73xx_update_vlan_table(struct vsc73xx *vsc, int port, u16 vid, bool set) +{ + u8 portmap; + int ret; + + ret = vsc73xx_read_vlan_table_entry(vsc, vid, &portmap); + if (ret) + return ret; + + if (set) + portmap |= BIT(port); + else + portmap &= ~BIT(port); + + return vsc73xx_write_vlan_table_entry(vsc, vid, portmap); } static int vsc73xx_setup(struct dsa_switch *ds) { struct vsc73xx *vsc = ds->priv; - int i; + int i, ret; dev_info(vsc->dev, "set up the switch\n"); + ds->untag_bridge_pvid = true; + ds->max_num_bridges = DSA_TAG_8021Q_MAX_NUM_BRIDGES; + /* Issue RESET */ vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GLORESET, VSC73XX_GLORESET_MASTER_RESET); @@ -594,7 +721,7 @@ static int vsc73xx_setup(struct dsa_switch *ds) VSC73XX_MACACCESS, VSC73XX_MACACCESS_CMD_CLEAR_TABLE); - /* Clear VLAN table */ + /* Set VLAN table to default values */ vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_VLANACCESS, VSC73XX_VLANACCESS_VLAN_TBL_CMD_CLEAR_TABLE); @@ -623,9 +750,9 @@ static int vsc73xx_setup(struct dsa_switch *ds) vsc73xx_write(vsc, VSC73XX_BLOCK_SYSTEM, 0, VSC73XX_GMIIDELAY, VSC73XX_GMIIDELAY_GMII0_GTXDELAY_2_0_NS | VSC73XX_GMIIDELAY_GMII0_RXDELAY_2_0_NS); - /* Enable reception of frames on all ports */ - vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_RECVMASK, - 0x5f); + /* Ingess VLAN reception mask (table 145) */ + vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_VLANMASK, + 0xff); /* IP multicast flood mask (table 144) */ vsc73xx_write(vsc, VSC73XX_BLOCK_ANALYZER, 0, VSC73XX_IFLODMSK, 0xff); @@ -638,7 +765,24 @@ static int vsc73xx_setup(struct dsa_switch *ds) udelay(4); - return 0; + /* Clear VLAN table */ + for (i = 0; i < VLAN_N_VID; i++) + vsc73xx_write_vlan_table_entry(vsc, i, 0); + + INIT_LIST_HEAD(&vsc->vlans); + + rtnl_lock(); + ret = dsa_tag_8021q_register(ds, htons(ETH_P_8021Q)); + rtnl_unlock(); + + return ret; +} + +static void vsc73xx_teardown(struct dsa_switch *ds) +{ + rtnl_lock(); + dsa_tag_8021q_unregister(ds); + rtnl_unlock(); } static void vsc73xx_init_port(struct vsc73xx *vsc, int port) @@ -788,10 +932,6 @@ static void vsc73xx_mac_link_down(struct phylink_config *config, /* Allow backward dropping of frames from this port */ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0, VSC73XX_SBACKWDROP, BIT(port), BIT(port)); - - /* Receive mask (disable forwarding) */ - vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0, - VSC73XX_RECVMASK, BIT(port), 0); } static void vsc73xx_mac_link_up(struct phylink_config *config, @@ -828,6 +968,12 @@ static void vsc73xx_mac_link_up(struct phylink_config *config, val |= seed << VSC73XX_MAC_CFG_SEED_OFFSET; val |= VSC73XX_MAC_CFG_SEED_LOAD; val |= VSC73XX_MAC_CFG_WEXC_DIS; + + /* Those bits are responsible for MTU only. Kernel takes care about MTU, + * let's enable +8 bytes frame length unconditionally. + */ + val |= VSC73XX_MAC_CFG_VLAN_AWR | VSC73XX_MAC_CFG_VLAN_DBLAWR; + vsc73xx_write(vsc, VSC73XX_BLOCK_MAC, port, VSC73XX_MAC_CFG, val); /* Flow control for the PHY facing ports: @@ -844,10 +990,6 @@ static void vsc73xx_mac_link_up(struct phylink_config *config, vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0, VSC73XX_ARBDISC, BIT(port), 0); - /* Enable port (forwarding) in the receive mask */ - vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0, - VSC73XX_RECVMASK, BIT(port), BIT(port)); - /* Disallow backward dropping of frames from this port */ vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ARBITER, 0, VSC73XX_SBACKWDROP, BIT(port), 0); @@ -860,6 +1002,257 @@ static void vsc73xx_mac_link_up(struct phylink_config *config, VSC73XX_MAC_CFG_TX_EN | VSC73XX_MAC_CFG_RX_EN); } +static bool vsc73xx_tag_8021q_active(struct dsa_port *dp) +{ + return !dsa_port_is_vlan_filtering(dp); +} + +static struct vsc73xx_bridge_vlan * +vsc73xx_bridge_vlan_find(struct vsc73xx *vsc, u16 vid) +{ + struct vsc73xx_bridge_vlan *vlan; + + list_for_each_entry(vlan, &vsc->vlans, list) + if (vlan->vid == vid) + return vlan; + + return NULL; +} + +static void +vsc73xx_bridge_vlan_remove_port(struct vsc73xx_bridge_vlan *vsc73xx_vlan, + int port) +{ + vsc73xx_vlan->portmask &= ~BIT(port); + + if (vsc73xx_vlan->portmask) + return; + + list_del(&vsc73xx_vlan->list); + kfree(vsc73xx_vlan); +} + +static void vsc73xx_bridge_vlan_summary(struct vsc73xx *vsc, int port, + struct vsc73xx_vlan_summary *summary, + u16 ignored_vid) +{ + size_t num_tagged = 0, num_untagged = 0; + struct vsc73xx_bridge_vlan *vlan; + + list_for_each_entry(vlan, &vsc->vlans, list) { + if (!(vlan->portmask & BIT(port)) || vlan->vid == ignored_vid) + continue; + + if (vlan->untagged & BIT(port)) + num_untagged++; + else + num_tagged++; + } + + summary->num_untagged = num_untagged; + summary->num_tagged = num_tagged; +} + +static u16 vsc73xx_find_first_vlan_untagged(struct vsc73xx *vsc, int port) +{ + struct vsc73xx_bridge_vlan *vlan; + + list_for_each_entry(vlan, &vsc->vlans, list) + if ((vlan->portmask & BIT(port)) && + (vlan->untagged & BIT(port))) + return vlan->vid; + + return VLAN_N_VID; +} + +static int vsc73xx_set_vlan_conf(struct vsc73xx *vsc, int port, + enum vsc73xx_port_vlan_conf port_vlan_conf) +{ + u32 val = 0; + int ret; + + if (port_vlan_conf == VSC73XX_VLAN_IGNORE) + val = VSC73XX_CAT_VLAN_MISC_VLAN_TCI_IGNORE_ENA | + VSC73XX_CAT_VLAN_MISC_VLAN_KEEP_TAG_ENA; + + ret = vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port, + VSC73XX_CAT_VLAN_MISC, + VSC73XX_CAT_VLAN_MISC_VLAN_TCI_IGNORE_ENA | + VSC73XX_CAT_VLAN_MISC_VLAN_KEEP_TAG_ENA, val); + if (ret) + return ret; + + val = (port_vlan_conf == VSC73XX_VLAN_FILTER) ? + VSC73XX_TXUPDCFG_TX_INSERT_TAG : 0; + + return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port, + VSC73XX_TXUPDCFG, + VSC73XX_TXUPDCFG_TX_INSERT_TAG, val); +} + +/** + * vsc73xx_vlan_commit_conf - Update VLAN configuration of a port + * @vsc: Switch private data structure + * @port: Port index on which to operate + * + * Update the VLAN behavior of a port to make sure that when it is under + * a VLAN filtering bridge, the port is either filtering with tag + * preservation, or filtering with all VLANs egress-untagged. Otherwise, + * the port ignores VLAN tags from packets and applies the port-based + * VID. + * + * Must be called when changes are made to: + * - the bridge VLAN filtering state of the port + * - the number or attributes of VLANs from the bridge VLAN table, + * while the port is currently VLAN-aware + * + * Return: 0 on success, or negative errno on error. + */ +static int vsc73xx_vlan_commit_conf(struct vsc73xx *vsc, int port) +{ + enum vsc73xx_port_vlan_conf port_vlan_conf = VSC73XX_VLAN_IGNORE; + struct dsa_port *dp = dsa_to_port(vsc->ds, port); + + if (port == CPU_PORT) { + port_vlan_conf = VSC73XX_VLAN_FILTER; + } else if (dsa_port_is_vlan_filtering(dp)) { + struct vsc73xx_vlan_summary summary; + + port_vlan_conf = VSC73XX_VLAN_FILTER; + + vsc73xx_bridge_vlan_summary(vsc, port, &summary, VLAN_N_VID); + if (summary.num_tagged == 0) + port_vlan_conf = VSC73XX_VLAN_FILTER_UNTAG_ALL; + } + + return vsc73xx_set_vlan_conf(vsc, port, port_vlan_conf); +} + +static int +vsc73xx_vlan_change_untagged(struct vsc73xx *vsc, int port, u16 vid, bool set) +{ + u32 val = 0; + + if (set) + val = VSC73XX_TXUPDCFG_TX_UNTAGGED_VID_ENA | + ((vid << VSC73XX_TXUPDCFG_TX_UNTAGGED_VID_SHIFT) & + VSC73XX_TXUPDCFG_TX_UNTAGGED_VID); + + return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port, + VSC73XX_TXUPDCFG, + VSC73XX_TXUPDCFG_TX_UNTAGGED_VID_ENA | + VSC73XX_TXUPDCFG_TX_UNTAGGED_VID, val); +} + +/** + * vsc73xx_vlan_commit_untagged - Update native VLAN of a port + * @vsc: Switch private data structure + * @port: Port index on which to operate + * + * Update the native VLAN of a port (the one VLAN which is transmitted + * as egress-tagged on a trunk port) when port is in VLAN filtering mode and + * only one untagged vid is configured. + * In other cases no need to configure it because switch can untag all vlans on + * the port. + * + * Return: 0 on success, or negative errno on error. + */ +static int vsc73xx_vlan_commit_untagged(struct vsc73xx *vsc, int port) +{ + struct dsa_port *dp = dsa_to_port(vsc->ds, port); + struct vsc73xx_vlan_summary summary; + u16 vid = 0; + bool valid; + + if (!dsa_port_is_vlan_filtering(dp)) + /* Port is configured to untag all vlans in that case. + * No need to commit untagged config change. + */ + return 0; + + vsc73xx_bridge_vlan_summary(vsc, port, &summary, VLAN_N_VID); + + if (summary.num_untagged > 1) + /* Port must untag all vlans in that case. + * No need to commit untagged config change. + */ + return 0; + + valid = (summary.num_untagged == 1); + if (valid) + vid = vsc73xx_find_first_vlan_untagged(vsc, port); + + return vsc73xx_vlan_change_untagged(vsc, port, vid, valid); +} + +static int +vsc73xx_vlan_change_pvid(struct vsc73xx *vsc, int port, u16 vid, bool set) +{ + u32 val = 0; + int ret; + + val = set ? 0 : VSC73XX_CAT_DROP_UNTAGGED_ENA; + + ret = vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port, + VSC73XX_CAT_DROP, + VSC73XX_CAT_DROP_UNTAGGED_ENA, val); + if (!set || ret) + return ret; + + return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_MAC, port, + VSC73XX_CAT_PORT_VLAN, + VSC73XX_CAT_PORT_VLAN_VLAN_VID, + vid & VSC73XX_CAT_PORT_VLAN_VLAN_VID); +} + +/** + * vsc73xx_vlan_commit_pvid - Update port-based default VLAN of a port + * @vsc: Switch private data structure + * @port: Port index on which to operate + * + * Update the PVID of a port so that it follows either the bridge PVID + * configuration, when the bridge is currently VLAN-aware, or the PVID + * from tag_8021q, when the port is standalone or under a VLAN-unaware + * bridge. A port with no PVID drops all untagged and VID 0 tagged + * traffic. + * + * Must be called when changes are made to: + * - the bridge VLAN filtering state of the port + * - the number or attributes of VLANs from the bridge VLAN table, + * while the port is currently VLAN-aware + * + * Return: 0 on success, or negative errno on error. + */ +static int vsc73xx_vlan_commit_pvid(struct vsc73xx *vsc, int port) +{ + struct vsc73xx_portinfo *portinfo = &vsc->portinfo[port]; + bool valid = portinfo->pvid_tag_8021q_configured; + struct dsa_port *dp = dsa_to_port(vsc->ds, port); + u16 vid = portinfo->pvid_tag_8021q; + + if (dsa_port_is_vlan_filtering(dp)) { + vid = portinfo->pvid_vlan_filtering; + valid = portinfo->pvid_vlan_filtering_configured; + } + + return vsc73xx_vlan_change_pvid(vsc, port, vid, valid); +} + +static int vsc73xx_vlan_commit_settings(struct vsc73xx *vsc, int port) +{ + int ret; + + ret = vsc73xx_vlan_commit_untagged(vsc, port); + if (ret) + return ret; + + ret = vsc73xx_vlan_commit_pvid(vsc, port); + if (ret) + return ret; + + return vsc73xx_vlan_commit_conf(vsc, port); +} + static int vsc73xx_port_enable(struct dsa_switch *ds, int port, struct phy_device *phy) { @@ -868,7 +1261,7 @@ static int vsc73xx_port_enable(struct dsa_switch *ds, int port, dev_info(vsc->dev, "enable port %d\n", port); vsc73xx_init_port(vsc, port); - return 0; + return vsc73xx_vlan_commit_settings(vsc, port); } static void vsc73xx_port_disable(struct dsa_switch *ds, int port) @@ -1039,6 +1432,303 @@ static void vsc73xx_phylink_get_caps(struct dsa_switch *dsa, int port, config->mac_capabilities = MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000; } +static int +vsc73xx_port_vlan_filtering(struct dsa_switch *ds, int port, + bool vlan_filtering, struct netlink_ext_ack *extack) +{ + struct vsc73xx *vsc = ds->priv; + + /* The commit to hardware processed below is required because vsc73xx + * is using tag_8021q. When vlan_filtering is disabled, tag_8021q uses + * pvid/untagged vlans for port recognition. The values configured for + * vlans and pvid/untagged states are stored in portinfo structure. + * When vlan_filtering is enabled, we need to restore pvid/untagged from + * portinfo structure. Analogous routine is processed when + * vlan_filtering is disabled, but values used for tag_8021q are + * restored. + */ + + return vsc73xx_vlan_commit_settings(vsc, port); +} + +static int vsc73xx_port_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan, + struct netlink_ext_ack *extack) +{ + bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; + struct dsa_port *dp = dsa_to_port(ds, port); + struct vsc73xx_bridge_vlan *vsc73xx_vlan; + struct vsc73xx_vlan_summary summary; + struct vsc73xx_portinfo *portinfo; + struct vsc73xx *vsc = ds->priv; + bool commit_to_hardware; + int ret = 0; + + /* Be sure to deny alterations to the configuration done by tag_8021q. + */ + if (vid_is_dsa_8021q(vlan->vid)) { + NL_SET_ERR_MSG_MOD(extack, + "Range 3072-4095 reserved for dsa_8021q operation"); + return -EBUSY; + } + + /* The processed vlan->vid is excluded from the search because the VLAN + * can be re-added with a different set of flags, so it's easiest to + * ignore its old flags from the VLAN database software copy. + */ + vsc73xx_bridge_vlan_summary(vsc, port, &summary, vlan->vid); + + /* VSC73XX allows only three untagged states: none, one or all */ + if ((untagged && summary.num_tagged > 0 && summary.num_untagged > 0) || + (!untagged && summary.num_untagged > 1)) { + NL_SET_ERR_MSG_MOD(extack, + "Port can have only none, one or all untagged vlan"); + return -EBUSY; + } + + vsc73xx_vlan = vsc73xx_bridge_vlan_find(vsc, vlan->vid); + + if (!vsc73xx_vlan) { + vsc73xx_vlan = kzalloc(sizeof(*vsc73xx_vlan), GFP_KERNEL); + if (!vsc73xx_vlan) + return -ENOMEM; + + vsc73xx_vlan->vid = vlan->vid; + + list_add_tail(&vsc73xx_vlan->list, &vsc->vlans); + } + + vsc73xx_vlan->portmask |= BIT(port); + + /* CPU port must be always tagged because source port identification is + * based on tag_8021q. + */ + if (port == CPU_PORT) + goto update_vlan_table; + + if (untagged) + vsc73xx_vlan->untagged |= BIT(port); + else + vsc73xx_vlan->untagged &= ~BIT(port); + + portinfo = &vsc->portinfo[port]; + + if (pvid) { + portinfo->pvid_vlan_filtering_configured = true; + portinfo->pvid_vlan_filtering = vlan->vid; + } else if (portinfo->pvid_vlan_filtering_configured && + portinfo->pvid_vlan_filtering == vlan->vid) { + portinfo->pvid_vlan_filtering_configured = false; + } + + commit_to_hardware = !vsc73xx_tag_8021q_active(dp); + if (commit_to_hardware) { + ret = vsc73xx_vlan_commit_settings(vsc, port); + if (ret) + goto err; + } + +update_vlan_table: + ret = vsc73xx_update_vlan_table(vsc, port, vlan->vid, true); + if (!ret) + return 0; +err: + vsc73xx_bridge_vlan_remove_port(vsc73xx_vlan, port); + return ret; +} + +static int vsc73xx_port_vlan_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct vsc73xx_bridge_vlan *vsc73xx_vlan; + struct vsc73xx_portinfo *portinfo; + struct vsc73xx *vsc = ds->priv; + bool commit_to_hardware; + int ret; + + ret = vsc73xx_update_vlan_table(vsc, port, vlan->vid, false); + if (ret) + return ret; + + portinfo = &vsc->portinfo[port]; + + if (portinfo->pvid_vlan_filtering_configured && + portinfo->pvid_vlan_filtering == vlan->vid) + portinfo->pvid_vlan_filtering_configured = false; + + vsc73xx_vlan = vsc73xx_bridge_vlan_find(vsc, vlan->vid); + + if (vsc73xx_vlan) + vsc73xx_bridge_vlan_remove_port(vsc73xx_vlan, port); + + commit_to_hardware = !vsc73xx_tag_8021q_active(dsa_to_port(ds, port)); + + if (commit_to_hardware) + return vsc73xx_vlan_commit_settings(vsc, port); + + return 0; +} + +static int vsc73xx_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid, + u16 flags) +{ + bool pvid = flags & BRIDGE_VLAN_INFO_PVID; + struct vsc73xx_portinfo *portinfo; + struct vsc73xx *vsc = ds->priv; + bool commit_to_hardware; + int ret; + + portinfo = &vsc->portinfo[port]; + + if (pvid) { + portinfo->pvid_tag_8021q_configured = true; + portinfo->pvid_tag_8021q = vid; + } + + commit_to_hardware = vsc73xx_tag_8021q_active(dsa_to_port(ds, port)); + if (commit_to_hardware) { + ret = vsc73xx_vlan_commit_settings(vsc, port); + if (ret) + return ret; + } + + return vsc73xx_update_vlan_table(vsc, port, vid, true); +} + +static int vsc73xx_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid) +{ + struct vsc73xx_portinfo *portinfo; + struct vsc73xx *vsc = ds->priv; + + portinfo = &vsc->portinfo[port]; + + if (portinfo->pvid_tag_8021q_configured && + portinfo->pvid_tag_8021q == vid) { + struct dsa_port *dp = dsa_to_port(ds, port); + bool commit_to_hardware; + int err; + + portinfo->pvid_tag_8021q_configured = false; + + commit_to_hardware = vsc73xx_tag_8021q_active(dp); + if (commit_to_hardware) { + err = vsc73xx_vlan_commit_settings(vsc, port); + if (err) + return err; + } + } + + return vsc73xx_update_vlan_table(vsc, port, vid, false); +} + +static int vsc73xx_port_pre_bridge_flags(struct dsa_switch *ds, int port, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack) +{ + if (flags.mask & ~BR_LEARNING) + return -EINVAL; + + return 0; +} + +static int vsc73xx_port_bridge_flags(struct dsa_switch *ds, int port, + struct switchdev_brport_flags flags, + struct netlink_ext_ack *extack) +{ + if (flags.mask & BR_LEARNING) { + u32 val = flags.val & BR_LEARNING ? BIT(port) : 0; + struct vsc73xx *vsc = ds->priv; + + return vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0, + VSC73XX_LEARNMASK, BIT(port), val); + } + + return 0; +} + +static void vsc73xx_refresh_fwd_map(struct dsa_switch *ds, int port, u8 state) +{ + struct dsa_port *other_dp, *dp = dsa_to_port(ds, port); + struct vsc73xx *vsc = ds->priv; + u16 mask; + + if (state != BR_STATE_FORWARDING) { + /* Ports that aren't in the forwarding state must not + * forward packets anywhere. + */ + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0, + VSC73XX_SRCMASKS + port, + VSC73XX_SRCMASKS_PORTS_MASK, 0); + + dsa_switch_for_each_available_port(other_dp, ds) { + if (other_dp == dp) + continue; + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0, + VSC73XX_SRCMASKS + other_dp->index, + BIT(port), 0); + } + + return; + } + + /* Forwarding ports must forward to the CPU and to other ports + * in the same bridge + */ + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0, + VSC73XX_SRCMASKS + CPU_PORT, BIT(port), BIT(port)); + + mask = BIT(CPU_PORT); + + dsa_switch_for_each_user_port(other_dp, ds) { + int other_port = other_dp->index; + + if (port == other_port || !dsa_port_bridge_same(dp, other_dp) || + other_dp->stp_state != BR_STATE_FORWARDING) + continue; + + mask |= BIT(other_port); + + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0, + VSC73XX_SRCMASKS + other_port, + BIT(port), BIT(port)); + } + + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0, + VSC73XX_SRCMASKS + port, + VSC73XX_SRCMASKS_PORTS_MASK, mask); +} + +/* FIXME: STP frames aren't forwarded at this moment. BPDU frames are + * forwarded only from and to PI/SI interface. For more info see chapter + * 2.7.1 (CPU Forwarding) in datasheet. + * This function is required for tag_8021q operations. + */ +static void vsc73xx_port_stp_state_set(struct dsa_switch *ds, int port, + u8 state) +{ + struct dsa_port *dp = dsa_to_port(ds, port); + struct vsc73xx *vsc = ds->priv; + u32 val = 0; + + if (state == BR_STATE_LEARNING || state == BR_STATE_FORWARDING) + val = dp->learning ? BIT(port) : 0; + + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0, + VSC73XX_LEARNMASK, BIT(port), val); + + val = (state == BR_STATE_BLOCKING || state == BR_STATE_DISABLED) ? + 0 : BIT(port); + vsc73xx_update_bits(vsc, VSC73XX_BLOCK_ANALYZER, 0, + VSC73XX_RECVMASK, BIT(port), val); + + /* CPU Port should always forward packets when user ports are forwarding + * so let's configure it from other ports only. + */ + if (port != CPU_PORT) + vsc73xx_refresh_fwd_map(ds, port, state); +} + static const struct phylink_mac_ops vsc73xx_phylink_mac_ops = { .mac_config = vsc73xx_mac_config, .mac_link_down = vsc73xx_mac_link_down, @@ -1048,6 +1738,7 @@ static const struct phylink_mac_ops vsc73xx_phylink_mac_ops = { static const struct dsa_switch_ops vsc73xx_ds_ops = { .get_tag_protocol = vsc73xx_get_tag_protocol, .setup = vsc73xx_setup, + .teardown = vsc73xx_teardown, .phy_read = vsc73xx_phy_read, .phy_write = vsc73xx_phy_write, .get_strings = vsc73xx_get_strings, @@ -1055,9 +1746,19 @@ static const struct dsa_switch_ops vsc73xx_ds_ops = { .get_sset_count = vsc73xx_get_sset_count, .port_enable = vsc73xx_port_enable, .port_disable = vsc73xx_port_disable, + .port_pre_bridge_flags = vsc73xx_port_pre_bridge_flags, + .port_bridge_flags = vsc73xx_port_bridge_flags, + .port_bridge_join = dsa_tag_8021q_bridge_join, + .port_bridge_leave = dsa_tag_8021q_bridge_leave, .port_change_mtu = vsc73xx_change_mtu, .port_max_mtu = vsc73xx_get_max_mtu, + .port_stp_state_set = vsc73xx_port_stp_state_set, + .port_vlan_filtering = vsc73xx_port_vlan_filtering, + .port_vlan_add = vsc73xx_port_vlan_add, + .port_vlan_del = vsc73xx_port_vlan_del, .phylink_get_caps = vsc73xx_phylink_get_caps, + .tag_8021q_vlan_add = vsc73xx_tag_8021q_vlan_add, + .tag_8021q_vlan_del = vsc73xx_tag_8021q_vlan_del, }; static int vsc73xx_gpio_get(struct gpio_chip *chip, unsigned int offset) diff --git a/drivers/net/dsa/vitesse-vsc73xx.h b/drivers/net/dsa/vitesse-vsc73xx.h index 2997f7e108b1..3ca579acc798 100644 --- a/drivers/net/dsa/vitesse-vsc73xx.h +++ b/drivers/net/dsa/vitesse-vsc73xx.h @@ -14,6 +14,22 @@ */ #define VSC73XX_MAX_NUM_PORTS 8 +/** + * struct vsc73xx_portinfo - port data structure: contains storage data + * @pvid_vlan_filtering: pvid vlan number used in vlan filtering mode + * @pvid_tag_8021q: pvid vlan number used in tag_8021q mode + * @pvid_vlan_filtering_configured: informs if port has configured pvid in vlan + * filtering mode + * @pvid_tag_8021q_configured: imforms if port have configured pvid in tag_8021q + * mode + */ +struct vsc73xx_portinfo { + u16 pvid_vlan_filtering; + u16 pvid_tag_8021q; + bool pvid_vlan_filtering_configured; + bool pvid_tag_8021q_configured; +}; + /** * struct vsc73xx - VSC73xx state container: main data structure * @dev: The device pointer @@ -25,6 +41,10 @@ * @addr: MAC address used in flow control frames * @ops: Structure with hardware-dependent operations * @priv: Pointer to the configuration interface structure + * @portinfo: Storage table portinfo structructures + * @vlans: List of configured vlans. Contains port mask and untagged status of + * every vlan configured in port vlan operation. It doesn't cover tag_8021q + * vlans. */ struct vsc73xx { struct device *dev; @@ -35,6 +55,8 @@ struct vsc73xx { u8 addr[ETH_ALEN]; const struct vsc73xx_ops *ops; void *priv; + struct vsc73xx_portinfo portinfo[VSC73XX_MAX_NUM_PORTS]; + struct list_head vlans; }; /** @@ -49,6 +71,21 @@ struct vsc73xx_ops { u32 val); }; +/** + * struct vsc73xx_bridge_vlan - VSC73xx driver structure which keeps vlan + * database copy + * @vid: VLAN number + * @portmask: each bit represents one port + * @untagged: each bit represents one port configured with @vid untagged + * @list: list structure + */ +struct vsc73xx_bridge_vlan { + u16 vid; + u8 portmask; + u8 untagged; + struct list_head list; +}; + int vsc73xx_is_addr_valid(u8 block, u8 subblock); int vsc73xx_probe(struct vsc73xx *vsc); void vsc73xx_remove(struct vsc73xx *vsc); diff --git a/drivers/net/dsa/xrs700x/xrs700x_i2c.c b/drivers/net/dsa/xrs700x/xrs700x_i2c.c index c1179d7311f7..9b731dea78c1 100644 --- a/drivers/net/dsa/xrs700x/xrs700x_i2c.c +++ b/drivers/net/dsa/xrs700x/xrs700x_i2c.c @@ -127,8 +127,8 @@ static void xrs700x_i2c_shutdown(struct i2c_client *i2c) } static const struct i2c_device_id xrs700x_i2c_id[] = { - { "xrs700x-switch", 0 }, - {}, + { "xrs700x-switch" }, + {} }; MODULE_DEVICE_TABLE(i2c, xrs700x_i2c_id); diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c index 65f56a98c0a0..1a34da07c0db 100644 --- a/drivers/net/ethernet/8390/ne2k-pci.c +++ b/drivers/net/ethernet/8390/ne2k-pci.c @@ -186,17 +186,6 @@ static void ne2k_pci_block_output(struct net_device *dev, const int count, static const struct ethtool_ops ne2k_pci_ethtool_ops; - -/* There is no room in the standard 8390 structure for extra info we need, - * so we build a meta/outer-wrapper structure.. - */ -struct ne2k_pci_card { - struct net_device *dev; - struct pci_dev *pci_dev; -}; - - - /* NEx000-clone boards have a Station Address (SA) PROM (SAPROM) in the packet * buffer memory space. By-the-spec NE2000 clones have 0x57,0x57 in bytes * 0x0e,0x0f of the SAPROM, while other supposed NE2000 clones must be diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 6a19b5393ed1..0baac25db4f8 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -122,6 +122,7 @@ source "drivers/net/ethernet/litex/Kconfig" source "drivers/net/ethernet/marvell/Kconfig" source "drivers/net/ethernet/mediatek/Kconfig" source "drivers/net/ethernet/mellanox/Kconfig" +source "drivers/net/ethernet/meta/Kconfig" source "drivers/net/ethernet/micrel/Kconfig" source "drivers/net/ethernet/microchip/Kconfig" source "drivers/net/ethernet/mscc/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index 0d872d4efcd1..c03203439c0e 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -59,6 +59,7 @@ obj-$(CONFIG_NET_VENDOR_LITEX) += litex/ obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/ obj-$(CONFIG_NET_VENDOR_MEDIATEK) += mediatek/ obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/ +obj-$(CONFIG_NET_VENDOR_META) += meta/ obj-$(CONFIG_NET_VENDOR_MICREL) += micrel/ obj-$(CONFIG_NET_VENDOR_MICROCHIP) += microchip/ obj-$(CONFIG_NET_VENDOR_MICROSEMI) += mscc/ diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c index 857361c74f5d..e1b8794b14c9 100644 --- a/drivers/net/ethernet/adaptec/starfire.c +++ b/drivers/net/ethernet/adaptec/starfire.c @@ -441,14 +441,6 @@ enum rx_desc_bits { }; /* Completion queue entry. */ -struct short_rx_done_desc { - __le32 status; /* Low 16 bits is length. */ -}; -struct basic_rx_done_desc { - __le32 status; /* Low 16 bits is length. */ - __le16 vlanid; - __le16 status2; -}; struct csum_rx_done_desc { __le32 status; /* Low 16 bits is length. */ __le16 csum; /* Partial checksum */ diff --git a/drivers/net/ethernet/amd/7990.c b/drivers/net/ethernet/amd/7990.c index ef512cf89abf..27792a52b6cf 100644 --- a/drivers/net/ethernet/amd/7990.c +++ b/drivers/net/ethernet/amd/7990.c @@ -667,4 +667,5 @@ void lance_poll(struct net_device *dev) EXPORT_SYMBOL_GPL(lance_poll); #endif +MODULE_DESCRIPTION("LANCE Ethernet IC generic routines"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/amd/a2065.c b/drivers/net/ethernet/amd/a2065.c index 68983b717145..1ca26a8c40eb 100644 --- a/drivers/net/ethernet/amd/a2065.c +++ b/drivers/net/ethernet/amd/a2065.c @@ -781,4 +781,5 @@ static void __exit a2065_cleanup_module(void) module_init(a2065_init_module); module_exit(a2065_cleanup_module); +MODULE_DESCRIPTION("Commodore A2065 Ethernet driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/amd/ariadne.c b/drivers/net/ethernet/amd/ariadne.c index 38153e633231..fa201da567ed 100644 --- a/drivers/net/ethernet/amd/ariadne.c +++ b/drivers/net/ethernet/amd/ariadne.c @@ -790,4 +790,5 @@ static void __exit ariadne_cleanup_module(void) module_init(ariadne_init_module); module_exit(ariadne_cleanup_module); +MODULE_DESCRIPTION("Ariadne Ethernet Driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c index 751454d305c6..8c8cc7d0f42d 100644 --- a/drivers/net/ethernet/amd/atarilance.c +++ b/drivers/net/ethernet/amd/atarilance.c @@ -79,6 +79,7 @@ static int lance_debug = 1; #endif module_param(lance_debug, int, 0); MODULE_PARM_DESC(lance_debug, "atarilance debug level (0-3)"); +MODULE_DESCRIPTION("Atari LANCE Ethernet driver"); MODULE_LICENSE("GPL"); /* Print debug messages on probing? */ diff --git a/drivers/net/ethernet/amd/hplance.c b/drivers/net/ethernet/amd/hplance.c index 055fda11c572..df42294530cb 100644 --- a/drivers/net/ethernet/amd/hplance.c +++ b/drivers/net/ethernet/amd/hplance.c @@ -234,4 +234,5 @@ static void __exit hplance_cleanup_module(void) module_init(hplance_init_module); module_exit(hplance_cleanup_module); +MODULE_DESCRIPTION("HP300 on-board LANCE Ethernet driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c index 6cf38180cc01..b1e6620ad41d 100644 --- a/drivers/net/ethernet/amd/lance.c +++ b/drivers/net/ethernet/amd/lance.c @@ -385,6 +385,7 @@ static void __exit lance_cleanup_module(void) } module_exit(lance_cleanup_module); #endif /* MODULE */ +MODULE_DESCRIPTION("AMD LANCE/PCnet Ethernet driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c index 410c7b67eba4..c156566c0906 100644 --- a/drivers/net/ethernet/amd/mvme147.c +++ b/drivers/net/ethernet/amd/mvme147.c @@ -178,6 +178,7 @@ static int m147lance_close(struct net_device *dev) return 0; } +MODULE_DESCRIPTION("MVME147 LANCE Ethernet driver"); MODULE_LICENSE("GPL"); static struct net_device *dev_mvme147_lance; diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c index 246f34c43765..c60df4a21158 100644 --- a/drivers/net/ethernet/amd/sun3lance.c +++ b/drivers/net/ethernet/amd/sun3lance.c @@ -74,6 +74,7 @@ static int lance_debug = 1; #endif module_param(lance_debug, int, 0); MODULE_PARM_DESC(lance_debug, "SUN3 Lance debug level (0-3)"); +MODULE_DESCRIPTION("Sun3/Sun3x on-board LANCE Ethernet driver"); MODULE_LICENSE("GPL"); #define DPRINTK(n,a) \ diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index 58e7e88aae5b..21407a26f806 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -577,7 +577,7 @@ static int xgbe_set_rxfh(struct net_device *netdev, } static int xgbe_get_ts_info(struct net_device *netdev, - struct ethtool_ts_info *ts_info) + struct kernel_ethtool_ts_info *ts_info) { struct xgbe_prv_data *pdata = netdev_priv(netdev); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index a2606ee3b0a5..d0aecd1d7357 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -652,7 +652,7 @@ static int aq_ethtool_set_wol(struct net_device *ndev, } static int aq_ethtool_get_ts_info(struct net_device *ndev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct aq_nic_s *aq_nic = netdev_priv(ndev); diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig index 0a67612af228..0d400a7d8d91 100644 --- a/drivers/net/ethernet/arc/Kconfig +++ b/drivers/net/ethernet/arc/Kconfig @@ -23,16 +23,6 @@ config ARC_EMAC_CORE select PHYLIB select CRC32 -config ARC_EMAC - tristate "ARC EMAC support" - select ARC_EMAC_CORE - depends on OF_IRQ - depends on ARC || COMPILE_TEST - help - On some legacy ARC (Synopsys) FPGA boards such as ARCAngel4/ML50x - non-standard on-chip ethernet device ARC EMAC 10/100 is used. - Say Y here if you have such a board. If unsure, say N. - config EMAC_ROCKCHIP tristate "Rockchip EMAC support" select ARC_EMAC_CORE diff --git a/drivers/net/ethernet/arc/Makefile b/drivers/net/ethernet/arc/Makefile index d63ada577c8e..23586eefec44 100644 --- a/drivers/net/ethernet/arc/Makefile +++ b/drivers/net/ethernet/arc/Makefile @@ -5,5 +5,4 @@ arc_emac-objs := emac_main.o emac_mdio.o obj-$(CONFIG_ARC_EMAC_CORE) += arc_emac.o -obj-$(CONFIG_ARC_EMAC) += emac_arc.o obj-$(CONFIG_EMAC_ROCKCHIP) += emac_rockchip.o diff --git a/drivers/net/ethernet/arc/emac_arc.c b/drivers/net/ethernet/arc/emac_arc.c deleted file mode 100644 index a3afddb23ee8..000000000000 --- a/drivers/net/ethernet/arc/emac_arc.c +++ /dev/null @@ -1,88 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/** - * DOC: emac_arc.c - ARC EMAC specific glue layer - * - * Copyright (C) 2014 Romain Perier - * - * Romain Perier - */ - -#include -#include -#include -#include - -#include "emac.h" - -#define DRV_NAME "emac_arc" - -static int emac_arc_probe(struct platform_device *pdev) -{ - struct device *dev = &pdev->dev; - struct arc_emac_priv *priv; - phy_interface_t interface; - struct net_device *ndev; - int err; - - if (!dev->of_node) - return -ENODEV; - - ndev = alloc_etherdev(sizeof(struct arc_emac_priv)); - if (!ndev) - return -ENOMEM; - platform_set_drvdata(pdev, ndev); - SET_NETDEV_DEV(ndev, dev); - - priv = netdev_priv(ndev); - priv->drv_name = DRV_NAME; - - err = of_get_phy_mode(dev->of_node, &interface); - if (err) { - if (err == -ENODEV) - interface = PHY_INTERFACE_MODE_MII; - else - goto out_netdev; - } - - priv->clk = devm_clk_get(dev, "hclk"); - if (IS_ERR(priv->clk)) { - dev_err(dev, "failed to retrieve host clock from device tree\n"); - err = -EINVAL; - goto out_netdev; - } - - err = arc_emac_probe(ndev, interface); -out_netdev: - if (err) - free_netdev(ndev); - return err; -} - -static void emac_arc_remove(struct platform_device *pdev) -{ - struct net_device *ndev = platform_get_drvdata(pdev); - - arc_emac_remove(ndev); - free_netdev(ndev); -} - -static const struct of_device_id emac_arc_dt_ids[] = { - { .compatible = "snps,arc-emac" }, - { /* Sentinel */ } -}; -MODULE_DEVICE_TABLE(of, emac_arc_dt_ids); - -static struct platform_driver emac_arc_driver = { - .probe = emac_arc_probe, - .remove_new = emac_arc_remove, - .driver = { - .name = DRV_NAME, - .of_match_table = emac_arc_dt_ids, - }, -}; - -module_platform_driver(emac_arc_driver); - -MODULE_AUTHOR("Romain Perier "); -MODULE_DESCRIPTION("ARC EMAC platform driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 58956ed8f531..c7b56a5e5425 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -3634,7 +3634,7 @@ static int bnx2x_set_channels(struct net_device *dev, } static int bnx2x_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct bnx2x *bp = netdev_priv(dev); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 43952689bfb0..bb3be33c1bbd 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -456,8 +456,9 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) dma_addr_t mapping; unsigned int length, pad = 0; u32 len, free_size, vlan_tag_flags, cfa_action, flags; - u16 prod, last_frag; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; struct pci_dev *pdev = bp->pdev; + u16 prod, last_frag, txts_prod; struct bnxt_tx_ring_info *txr; struct bnxt_sw_tx_bd *tx_buf; __le32 lflags = 0; @@ -509,23 +510,29 @@ static netdev_tx_t bnxt_start_xmit(struct sk_buff *skb, struct net_device *dev) vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT; } - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { - struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && ptp && + ptp->tx_tstamp_en) { + if (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) { + lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP); + tx_buf->is_ts_pkt = 1; + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + } else if (!skb_is_gso(skb)) { + u16 seq_id, hdr_off; - if (ptp && ptp->tx_tstamp_en && !skb_is_gso(skb) && - atomic_dec_if_positive(&ptp->tx_avail) >= 0) { - if (!bnxt_ptp_parse(skb, &ptp->tx_seqid, - &ptp->tx_hdr_off)) { + if (!bnxt_ptp_parse(skb, &seq_id, &hdr_off) && + !bnxt_ptp_get_txts_prod(ptp, &txts_prod)) { if (vlan_tag_flags) - ptp->tx_hdr_off += VLAN_HLEN; + hdr_off += VLAN_HLEN; lflags |= cpu_to_le32(TX_BD_FLAGS_STAMP); + tx_buf->is_ts_pkt = 1; skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; - } else { - atomic_inc(&bp->ptp_cfg->tx_avail); + + ptp->txts_req[txts_prod].tx_seqid = seq_id; + ptp->txts_req[txts_prod].tx_hdr_off = hdr_off; + tx_buf->txts_prod = txts_prod; } } } - if (unlikely(skb->no_fcs)) lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC); @@ -753,8 +760,13 @@ tx_dma_error: tx_free: dev_kfree_skb_any(skb); tx_kick_pending: - if (BNXT_TX_PTP_IS_SET(lflags)) - atomic_inc(&bp->ptp_cfg->tx_avail); + if (BNXT_TX_PTP_IS_SET(lflags)) { + txr->tx_buf_ring[txr->tx_prod].is_ts_pkt = 0; + atomic64_inc(&bp->ptp_cfg->stats.ts_err); + if (!(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) + /* set SKB to err so PTP worker will clean up */ + ptp->txts_req[txts_prod].tx_skb = ERR_PTR(-EIO); + } if (txr->kick_pending) bnxt_txr_db_kick(bp, txr, txr->tx_prod); txr->tx_buf_ring[txr->tx_prod].skb = NULL; @@ -762,7 +774,8 @@ tx_kick_pending: return NETDEV_TX_OK; } -static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr, +/* Returns true if some remaining TX packets not processed. */ +static bool __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr, int budget) { struct netdev_queue *txq = netdev_get_tx_queue(bp->dev, txr->txq_index); @@ -771,24 +784,33 @@ static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr, unsigned int tx_bytes = 0; u16 cons = txr->tx_cons; int tx_pkts = 0; + bool rc = false; while (RING_TX(bp, cons) != hw_cons) { struct bnxt_sw_tx_bd *tx_buf; struct sk_buff *skb; + bool is_ts_pkt; int j, last; tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)]; - cons = NEXT_TX(cons); skb = tx_buf->skb; - tx_buf->skb = NULL; if (unlikely(!skb)) { bnxt_sched_reset_txr(bp, txr, cons); - return; + return rc; } + is_ts_pkt = tx_buf->is_ts_pkt; + if (is_ts_pkt && (bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) { + rc = true; + break; + } + + cons = NEXT_TX(cons); tx_pkts++; tx_bytes += skb->len; + tx_buf->skb = NULL; + tx_buf->is_ts_pkt = 0; if (tx_buf->is_push) { tx_buf->is_push = 0; @@ -808,13 +830,11 @@ static void __bnxt_tx_int(struct bnxt *bp, struct bnxt_tx_ring_info *txr, skb_frag_size(&skb_shinfo(skb)->frags[j]), DMA_TO_DEVICE); } - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { + if (unlikely(is_ts_pkt)) { if (BNXT_CHIP_P5(bp)) { /* PTP worker takes ownership of the skb */ - if (!bnxt_get_tx_ts_p5(bp, skb)) - skb = NULL; - else - atomic_inc(&bp->ptp_cfg->tx_avail); + bnxt_get_tx_ts_p5(bp, skb, tx_buf->txts_prod); + skb = NULL; } } @@ -829,18 +849,22 @@ next_tx_int: __netif_txq_completed_wake(txq, tx_pkts, tx_bytes, bnxt_tx_avail(bp, txr), bp->tx_wake_thresh, READ_ONCE(txr->dev_state) == BNXT_DEV_STATE_CLOSING); + + return rc; } static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int budget) { struct bnxt_tx_ring_info *txr; + bool more = false; int i; bnxt_for_each_napi_tx(i, bnapi, txr) { if (txr->tx_hw_cons != RING_TX(bp, txr->tx_cons)) - __bnxt_tx_int(bp, txr, budget); + more |= __bnxt_tx_int(bp, txr, budget); } - bnapi->events &= ~BNXT_TX_CMP_EVENT; + if (!more) + bnapi->events &= ~BNXT_TX_CMP_EVENT; } static struct page *__bnxt_alloc_rx_page(struct bnxt *bp, dma_addr_t *mapping, @@ -2906,6 +2930,8 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, cpr->has_more_work = 1; break; } + } else if (cmp_type == CMP_TYPE_TX_L2_PKT_TS_CMP) { + bnxt_tx_ts_cmp(bp, bnapi, (struct tx_ts_cmp *)txcmp); } else if (cmp_type >= CMP_TYPE_RX_L2_CMP && cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) { if (likely(budget)) @@ -2937,8 +2963,10 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, } } - if (event & BNXT_REDIRECT_EVENT) + if (event & BNXT_REDIRECT_EVENT) { xdp_do_flush(); + event &= ~BNXT_REDIRECT_EVENT; + } if (event & BNXT_TX_EVENT) { struct bnxt_tx_ring_info *txr = bnapi->tx_ring[0]; @@ -2948,6 +2976,7 @@ static int __bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, wmb(); bnxt_db_write_relaxed(bp, &txr->tx_db, prod); + event &= ~BNXT_TX_EVENT; } cpr->cp_raw_cons = raw_cons; @@ -2965,13 +2994,14 @@ static void __bnxt_poll_work_done(struct bnxt *bp, struct bnxt_napi *bnapi, struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); + bnapi->events &= ~BNXT_RX_EVENT; } if (bnapi->events & BNXT_AGG_EVENT) { struct bnxt_rx_ring_info *rxr = bnapi->rx_ring; bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); + bnapi->events &= ~BNXT_AGG_EVENT; } - bnapi->events &= BNXT_TX_CMP_EVENT; } static int bnxt_poll_work(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, @@ -3308,15 +3338,60 @@ static void bnxt_free_tx_skbs(struct bnxt *bp) } } +static void bnxt_free_one_rx_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) +{ + struct pci_dev *pdev = bp->pdev; + int i, max_idx; + + max_idx = bp->rx_nr_pages * RX_DESC_CNT; + + for (i = 0; i < max_idx; i++) { + struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i]; + dma_addr_t mapping = rx_buf->mapping; + void *data = rx_buf->data; + + if (!data) + continue; + + rx_buf->data = NULL; + if (BNXT_RX_PAGE_MODE(bp)) { + page_pool_recycle_direct(rxr->page_pool, data); + } else { + dma_unmap_single_attrs(&pdev->dev, mapping, + bp->rx_buf_use_size, bp->rx_dir, + DMA_ATTR_WEAK_ORDERING); + skb_free_frag(data); + } + } +} + +static void bnxt_free_one_rx_agg_ring(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) +{ + int i, max_idx; + + max_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; + + for (i = 0; i < max_idx; i++) { + struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i]; + struct page *page = rx_agg_buf->page; + + if (!page) + continue; + + rx_agg_buf->page = NULL; + __clear_bit(i, rxr->rx_agg_bmap); + + page_pool_recycle_direct(rxr->page_pool, page); + } +} + static void bnxt_free_one_rx_ring_skbs(struct bnxt *bp, int ring_nr) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; struct pci_dev *pdev = bp->pdev; struct bnxt_tpa_idx_map *map; - int i, max_idx, max_agg_idx; + int i; - max_idx = bp->rx_nr_pages * RX_DESC_CNT; - max_agg_idx = bp->rx_agg_nr_pages * RX_DESC_CNT; if (!rxr->rx_tpa) goto skip_rx_tpa_free; @@ -3340,41 +3415,13 @@ skip_rx_tpa_free: if (!rxr->rx_buf_ring) goto skip_rx_buf_free; - for (i = 0; i < max_idx; i++) { - struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[i]; - dma_addr_t mapping = rx_buf->mapping; - void *data = rx_buf->data; - - if (!data) - continue; - - rx_buf->data = NULL; - if (BNXT_RX_PAGE_MODE(bp)) { - page_pool_recycle_direct(rxr->page_pool, data); - } else { - dma_unmap_single_attrs(&pdev->dev, mapping, - bp->rx_buf_use_size, bp->rx_dir, - DMA_ATTR_WEAK_ORDERING); - skb_free_frag(data); - } - } + bnxt_free_one_rx_ring(bp, rxr); skip_rx_buf_free: if (!rxr->rx_agg_ring) goto skip_rx_agg_free; - for (i = 0; i < max_agg_idx; i++) { - struct bnxt_sw_rx_agg_bd *rx_agg_buf = &rxr->rx_agg_ring[i]; - struct page *page = rx_agg_buf->page; - - if (!page) - continue; - - rx_agg_buf->page = NULL; - __clear_bit(i, rxr->rx_agg_bmap); - - page_pool_recycle_direct(rxr->page_pool, page); - } + bnxt_free_one_rx_agg_ring(bp, rxr); skip_rx_agg_free: map = rxr->rx_tpa_idx_map; @@ -3971,6 +4018,62 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp) return 0; } +static void bnxt_init_rx_ring_struct(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr) +{ + struct bnxt_ring_mem_info *rmem; + struct bnxt_ring_struct *ring; + + ring = &rxr->rx_ring_struct; + rmem = &ring->ring_mem; + rmem->nr_pages = bp->rx_nr_pages; + rmem->page_size = HW_RXBD_RING_SIZE; + rmem->pg_arr = (void **)rxr->rx_desc_ring; + rmem->dma_arr = rxr->rx_desc_mapping; + rmem->vmem_size = SW_RXBD_RING_SIZE * bp->rx_nr_pages; + rmem->vmem = (void **)&rxr->rx_buf_ring; + + ring = &rxr->rx_agg_ring_struct; + rmem = &ring->ring_mem; + rmem->nr_pages = bp->rx_agg_nr_pages; + rmem->page_size = HW_RXBD_RING_SIZE; + rmem->pg_arr = (void **)rxr->rx_agg_desc_ring; + rmem->dma_arr = rxr->rx_agg_desc_mapping; + rmem->vmem_size = SW_RXBD_AGG_RING_SIZE * bp->rx_agg_nr_pages; + rmem->vmem = (void **)&rxr->rx_agg_ring; +} + +static void bnxt_reset_rx_ring_struct(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr) +{ + struct bnxt_ring_mem_info *rmem; + struct bnxt_ring_struct *ring; + int i; + + rxr->page_pool->p.napi = NULL; + rxr->page_pool = NULL; + + ring = &rxr->rx_ring_struct; + rmem = &ring->ring_mem; + rmem->pg_tbl = NULL; + rmem->pg_tbl_map = 0; + for (i = 0; i < rmem->nr_pages; i++) { + rmem->pg_arr[i] = NULL; + rmem->dma_arr[i] = 0; + } + *rmem->vmem = NULL; + + ring = &rxr->rx_agg_ring_struct; + rmem = &ring->ring_mem; + rmem->pg_tbl = NULL; + rmem->pg_tbl_map = 0; + for (i = 0; i < rmem->nr_pages; i++) { + rmem->pg_arr[i] = NULL; + rmem->dma_arr[i] = 0; + } + *rmem->vmem = NULL; +} + static void bnxt_init_ring_struct(struct bnxt *bp) { int i, j; @@ -4053,37 +4156,55 @@ static void bnxt_init_rxbd_pages(struct bnxt_ring_struct *ring, u32 type) } } -static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr) +static void bnxt_alloc_one_rx_ring_skb(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr, + int ring_nr) { - struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; - struct net_device *dev = bp->dev; u32 prod; int i; prod = rxr->rx_prod; for (i = 0; i < bp->rx_ring_size; i++) { if (bnxt_alloc_rx_data(bp, rxr, prod, GFP_KERNEL)) { - netdev_warn(dev, "init'ed rx ring %d with %d/%d skbs only\n", + netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n", ring_nr, i, bp->rx_ring_size); break; } prod = NEXT_RX(prod); } rxr->rx_prod = prod; +} - if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) - return 0; +static void bnxt_alloc_one_rx_ring_page(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr, + int ring_nr) +{ + u32 prod; + int i; prod = rxr->rx_agg_prod; for (i = 0; i < bp->rx_agg_ring_size; i++) { if (bnxt_alloc_rx_page(bp, rxr, prod, GFP_KERNEL)) { - netdev_warn(dev, "init'ed rx ring %d with %d/%d pages only\n", + netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d pages only\n", ring_nr, i, bp->rx_ring_size); break; } prod = NEXT_RX_AGG(prod); } rxr->rx_agg_prod = prod; +} + +static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr) +{ + struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; + int i; + + bnxt_alloc_one_rx_ring_skb(bp, rxr, ring_nr); + + if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) + return 0; + + bnxt_alloc_one_rx_ring_page(bp, rxr, ring_nr); if (rxr->rx_tpa) { dma_addr_t mapping; @@ -4102,9 +4223,9 @@ static int bnxt_alloc_one_rx_ring(struct bnxt *bp, int ring_nr) return 0; } -static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) +static void bnxt_init_one_rx_ring_rxbd(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr) { - struct bnxt_rx_ring_info *rxr; struct bnxt_ring_struct *ring; u32 type; @@ -4114,9 +4235,33 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) if (NET_IP_ALIGN == 2) type |= RX_BD_FLAGS_SOP; - rxr = &bp->rx_ring[ring_nr]; ring = &rxr->rx_ring_struct; bnxt_init_rxbd_pages(ring, type); + ring->fw_ring_id = INVALID_HW_RING_ID; +} + +static void bnxt_init_one_rx_agg_ring_rxbd(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr) +{ + struct bnxt_ring_struct *ring; + u32 type; + + ring = &rxr->rx_agg_ring_struct; + ring->fw_ring_id = INVALID_HW_RING_ID; + if ((bp->flags & BNXT_FLAG_AGG_RINGS)) { + type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) | + RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; + + bnxt_init_rxbd_pages(ring, type); + } +} + +static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) +{ + struct bnxt_rx_ring_info *rxr; + + rxr = &bp->rx_ring[ring_nr]; + bnxt_init_one_rx_ring_rxbd(bp, rxr); netif_queue_set_napi(bp->dev, ring_nr, NETDEV_QUEUE_TYPE_RX, &rxr->bnapi->napi); @@ -4125,17 +4270,8 @@ static int bnxt_init_one_rx_ring(struct bnxt *bp, int ring_nr) bpf_prog_add(bp->xdp_prog, 1); rxr->xdp_prog = bp->xdp_prog; } - ring->fw_ring_id = INVALID_HW_RING_ID; - ring = &rxr->rx_agg_ring_struct; - ring->fw_ring_id = INVALID_HW_RING_ID; - - if ((bp->flags & BNXT_FLAG_AGG_RINGS)) { - type = ((u32)BNXT_RX_PAGE_SIZE << RX_BD_LEN_SHIFT) | - RX_BD_TYPE_RX_AGG_BD | RX_BD_FLAGS_SOP; - - bnxt_init_rxbd_pages(ring, type); - } + bnxt_init_one_rx_agg_ring_rxbd(bp, rxr); return bnxt_alloc_one_rx_ring(bp, ring_nr); } @@ -5834,17 +5970,20 @@ bnxt_cfg_rfs_ring_tbl_idx(struct bnxt *bp, struct hwrm_cfa_ntuple_filter_alloc_input *req, struct bnxt_ntuple_filter *fltr) { - struct bnxt_rss_ctx *rss_ctx, *tmp; u16 rxq = fltr->base.rxq; if (fltr->base.flags & BNXT_ACT_RSS_CTX) { - list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) { - if (rss_ctx->index == fltr->base.fw_vnic_id) { - struct bnxt_vnic_info *vnic = &rss_ctx->vnic; + struct ethtool_rxfh_context *ctx; + struct bnxt_rss_ctx *rss_ctx; + struct bnxt_vnic_info *vnic; - req->dst_id = cpu_to_le16(vnic->fw_vnic_id); - break; - } + ctx = xa_load(&bp->dev->ethtool->rss_ctx, + fltr->base.fw_vnic_id); + if (ctx) { + rss_ctx = ethtool_rxfh_context_priv(ctx); + vnic = &rss_ctx->vnic; + + req->dst_id = cpu_to_le16(vnic->fw_vnic_id); } return; } @@ -6083,10 +6222,9 @@ static u16 bnxt_cp_ring_for_tx(struct bnxt *bp, struct bnxt_tx_ring_info *txr) return bnxt_cp_ring_from_grp(bp, &txr->tx_ring_struct); } -int bnxt_alloc_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx) +static int bnxt_alloc_rss_indir_tbl(struct bnxt *bp) { int entries; - u16 *tbl; if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) entries = BNXT_MAX_RSS_TABLE_ENTRIES_P5; @@ -6094,22 +6232,19 @@ int bnxt_alloc_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx) entries = HW_HASH_INDEX_SIZE; bp->rss_indir_tbl_entries = entries; - tbl = kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL); - if (!tbl) + bp->rss_indir_tbl = + kmalloc_array(entries, sizeof(*bp->rss_indir_tbl), GFP_KERNEL); + if (!bp->rss_indir_tbl) return -ENOMEM; - if (rss_ctx) - rss_ctx->rss_indir_tbl = tbl; - else - bp->rss_indir_tbl = tbl; - return 0; } -void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx) +void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, + struct ethtool_rxfh_context *rss_ctx) { u16 max_rings, max_entries, pad, i; - u16 *rss_indir_tbl; + u32 *rss_indir_tbl; if (!bp->rx_nr_rings) return; @@ -6121,7 +6256,7 @@ void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx) max_entries = bnxt_get_rxfh_indir_size(bp->dev); if (rss_ctx) - rss_indir_tbl = &rss_ctx->rss_indir_tbl[0]; + rss_indir_tbl = ethtool_rxfh_context_indir(rss_ctx); else rss_indir_tbl = &bp->rss_indir_tbl[0]; @@ -6130,12 +6265,12 @@ void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx) pad = bp->rss_indir_tbl_entries - max_entries; if (pad) - memset(&rss_indir_tbl[i], 0, pad * sizeof(u16)); + memset(&rss_indir_tbl[i], 0, pad * sizeof(*rss_indir_tbl)); } static u16 bnxt_get_max_rss_ring(struct bnxt *bp) { - u16 i, tbl_size, max_ring = 0; + u32 i, tbl_size, max_ring = 0; if (!bp->rss_indir_tbl) return 0; @@ -6146,24 +6281,6 @@ static u16 bnxt_get_max_rss_ring(struct bnxt *bp) return max_ring; } -u16 bnxt_get_max_rss_ctx_ring(struct bnxt *bp) -{ - u16 i, tbl_size, max_ring = 0; - struct bnxt_rss_ctx *rss_ctx; - - if (!BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) - return 0; - - tbl_size = bnxt_get_rxfh_indir_size(bp->dev); - - list_for_each_entry(rss_ctx, &bp->rss_ctx_list, list) { - for (i = 0; i < tbl_size; i++) - max_ring = max(max_ring, rss_ctx->rss_indir_tbl[i]); - } - - return max_ring; -} - int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings) { if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { @@ -6205,7 +6322,7 @@ static void bnxt_fill_hw_rss_tbl_p5(struct bnxt *bp, if (vnic->flags & BNXT_VNIC_NTUPLE_FLAG) j = ethtool_rxfh_indir_default(i, bp->rx_nr_rings); else if (vnic->flags & BNXT_VNIC_RSSCTX_FLAG) - j = vnic->rss_ctx->rss_indir_tbl[i]; + j = ethtool_rxfh_context_indir(vnic->rss_ctx)[i]; else j = bp->rss_indir_tbl[i]; rxr = &bp->rx_ring[j]; @@ -6692,6 +6809,7 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, switch (ring_type) { case HWRM_RING_ALLOC_TX: { struct bnxt_tx_ring_info *txr; + u16 flags = 0; txr = container_of(ring, struct bnxt_tx_ring_info, tx_ring_struct); @@ -6705,6 +6823,9 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, if (bp->flags & BNXT_FLAG_TX_COAL_CMPL) req->cmpl_coal_cnt = RING_ALLOC_REQ_CMPL_COAL_CNT_COAL_64; + if ((bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP) && bp->ptp_cfg) + flags |= RING_ALLOC_REQ_FLAGS_TX_PKT_TS_CMPL_ENABLE; + req->flags = cpu_to_le16(flags); break; } case HWRM_RING_ALLOC_RX: @@ -6878,6 +6999,48 @@ static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type, bnxt_set_db_mask(bp, db, ring_type); } +static int bnxt_hwrm_rx_ring_alloc(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr) +{ + struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; + struct bnxt_napi *bnapi = rxr->bnapi; + u32 type = HWRM_RING_ALLOC_RX; + u32 map_idx = bnapi->index; + int rc; + + rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); + if (rc) + return rc; + + bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id); + bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; + + return 0; +} + +static int bnxt_hwrm_rx_agg_ring_alloc(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr) +{ + struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; + u32 type = HWRM_RING_ALLOC_AGG; + u32 grp_idx = ring->grp_idx; + u32 map_idx; + int rc; + + map_idx = grp_idx + bp->rx_nr_rings; + rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); + if (rc) + return rc; + + bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx, + ring->fw_ring_id); + bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); + bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; + + return 0; +} + static int bnxt_hwrm_ring_alloc(struct bnxt *bp) { bool agg_rings = !!(bp->flags & BNXT_FLAG_AGG_RINGS); @@ -6943,24 +7106,21 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) bnxt_set_db(bp, &txr->tx_db, type, map_idx, ring->fw_ring_id); } - type = HWRM_RING_ALLOC_RX; for (i = 0; i < bp->rx_nr_rings; i++) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; - struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; - struct bnxt_napi *bnapi = rxr->bnapi; - u32 map_idx = bnapi->index; - rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); + rc = bnxt_hwrm_rx_ring_alloc(bp, rxr); if (rc) goto err_out; - bnxt_set_db(bp, &rxr->rx_db, type, map_idx, ring->fw_ring_id); /* If we have agg rings, post agg buffers first. */ if (!agg_rings) bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); - bp->grp_info[map_idx].rx_fw_ring_id = ring->fw_ring_id; if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { struct bnxt_cp_ring_info *cpr2 = rxr->rx_cpr; + struct bnxt_napi *bnapi = rxr->bnapi; u32 type2 = HWRM_RING_ALLOC_CMPL; + struct bnxt_ring_struct *ring; + u32 map_idx = bnapi->index; ring = &cpr2->cp_ring_struct; ring->handle = BNXT_SET_NQ_HDL(cpr2); @@ -6974,23 +7134,10 @@ static int bnxt_hwrm_ring_alloc(struct bnxt *bp) } if (agg_rings) { - type = HWRM_RING_ALLOC_AGG; for (i = 0; i < bp->rx_nr_rings; i++) { - struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; - struct bnxt_ring_struct *ring = - &rxr->rx_agg_ring_struct; - u32 grp_idx = ring->grp_idx; - u32 map_idx = grp_idx + bp->rx_nr_rings; - - rc = hwrm_ring_alloc_send_msg(bp, ring, type, map_idx); + rc = bnxt_hwrm_rx_agg_ring_alloc(bp, &bp->rx_ring[i]); if (rc) goto err_out; - - bnxt_set_db(bp, &rxr->rx_agg_db, type, map_idx, - ring->fw_ring_id); - bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); - bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); - bp->grp_info[grp_idx].agg_fw_ring_id = ring->fw_ring_id; } } err_out: @@ -7030,6 +7177,50 @@ exit: return 0; } +static void bnxt_hwrm_rx_ring_free(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr, + bool close_path) +{ + struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; + u32 grp_idx = rxr->bnapi->index; + u32 cmpl_ring_id; + + if (ring->fw_ring_id == INVALID_HW_RING_ID) + return; + + cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); + hwrm_ring_free_send_msg(bp, ring, + RING_FREE_REQ_RING_TYPE_RX, + close_path ? cmpl_ring_id : + INVALID_HW_RING_ID); + ring->fw_ring_id = INVALID_HW_RING_ID; + bp->grp_info[grp_idx].rx_fw_ring_id = INVALID_HW_RING_ID; +} + +static void bnxt_hwrm_rx_agg_ring_free(struct bnxt *bp, + struct bnxt_rx_ring_info *rxr, + bool close_path) +{ + struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; + u32 grp_idx = rxr->bnapi->index; + u32 type, cmpl_ring_id; + + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) + type = RING_FREE_REQ_RING_TYPE_RX_AGG; + else + type = RING_FREE_REQ_RING_TYPE_RX; + + if (ring->fw_ring_id == INVALID_HW_RING_ID) + return; + + cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); + hwrm_ring_free_send_msg(bp, ring, type, + close_path ? cmpl_ring_id : + INVALID_HW_RING_ID); + ring->fw_ring_id = INVALID_HW_RING_ID; + bp->grp_info[grp_idx].agg_fw_ring_id = INVALID_HW_RING_ID; +} + static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) { u32 type; @@ -7054,42 +7245,8 @@ static void bnxt_hwrm_ring_free(struct bnxt *bp, bool close_path) } for (i = 0; i < bp->rx_nr_rings; i++) { - struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; - struct bnxt_ring_struct *ring = &rxr->rx_ring_struct; - u32 grp_idx = rxr->bnapi->index; - - if (ring->fw_ring_id != INVALID_HW_RING_ID) { - u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); - - hwrm_ring_free_send_msg(bp, ring, - RING_FREE_REQ_RING_TYPE_RX, - close_path ? cmpl_ring_id : - INVALID_HW_RING_ID); - ring->fw_ring_id = INVALID_HW_RING_ID; - bp->grp_info[grp_idx].rx_fw_ring_id = - INVALID_HW_RING_ID; - } - } - - if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) - type = RING_FREE_REQ_RING_TYPE_RX_AGG; - else - type = RING_FREE_REQ_RING_TYPE_RX; - for (i = 0; i < bp->rx_nr_rings; i++) { - struct bnxt_rx_ring_info *rxr = &bp->rx_ring[i]; - struct bnxt_ring_struct *ring = &rxr->rx_agg_ring_struct; - u32 grp_idx = rxr->bnapi->index; - - if (ring->fw_ring_id != INVALID_HW_RING_ID) { - u32 cmpl_ring_id = bnxt_cp_ring_for_rx(bp, rxr); - - hwrm_ring_free_send_msg(bp, ring, type, - close_path ? cmpl_ring_id : - INVALID_HW_RING_ID); - ring->fw_ring_id = INVALID_HW_RING_ID; - bp->grp_info[grp_idx].agg_fw_ring_id = - INVALID_HW_RING_ID; - } + bnxt_hwrm_rx_ring_free(bp, &bp->rx_ring[i], close_path); + bnxt_hwrm_rx_agg_ring_free(bp, &bp->rx_ring[i], close_path); } /* The completion rings are about to be freed. After that the @@ -8849,7 +9006,7 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp) u8 flags; int rc; - if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5(bp)) { + if (bp->hwrm_spec_code < 0x10801 || !BNXT_CHIP_P5_PLUS(bp)) { rc = -ENODEV; goto no_ptp; } @@ -8865,7 +9022,8 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp) goto exit; flags = resp->flags; - if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) { + if (BNXT_CHIP_P5_AND_MINUS(bp) && + !(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) { rc = -ENODEV; goto exit; } @@ -8878,10 +9036,13 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp) ptp->bp = bp; bp->ptp_cfg = ptp; } - if (flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK) { + + if (flags & + (PORT_MAC_PTP_QCFG_RESP_FLAGS_PARTIAL_DIRECT_ACCESS_REF_CLOCK | + PORT_MAC_PTP_QCFG_RESP_FLAGS_64B_PHC_TIME)) { ptp->refclk_regs[0] = le32_to_cpu(resp->ts_ref_clock_reg_lower); ptp->refclk_regs[1] = le32_to_cpu(resp->ts_ref_clock_reg_upper); - } else if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { + } else if (BNXT_CHIP_P5(bp)) { ptp->refclk_regs[0] = BNXT_TS_REG_TIMESYNC_TS0_LOWER; ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER; } else { @@ -8963,6 +9124,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->fw_cap |= BNXT_FW_CAP_RX_ALL_PKT_TS; if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_UDP_GSO_SUPPORTED) bp->flags |= BNXT_FLAG_UDP_GSO_CAP; + if (flags_ext2 & FUNC_QCAPS_RESP_FLAGS_EXT2_TX_PKT_TS_CMPL_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_TX_TS_CMP; bp->tx_push_thresh = 0; if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) && @@ -10031,10 +10194,12 @@ void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx, struct bnxt_ntuple_filter *ntp_fltr; int i; - bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic); - for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) { - if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID) - bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i); + if (netif_running(bp->dev)) { + bnxt_hwrm_vnic_free_one(bp, &rss_ctx->vnic); + for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) { + if (vnic->fw_rss_cos_lb_ctx[i] != INVALID_HW_RING_ID) + bnxt_hwrm_vnic_ctx_free_one(bp, vnic, i); + } } if (!all) return; @@ -10055,19 +10220,17 @@ void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx, dma_free_coherent(&bp->pdev->dev, vnic->rss_table_size, vnic->rss_table, vnic->rss_table_dma_addr); - kfree(rss_ctx->rss_indir_tbl); - list_del(&rss_ctx->list); bp->num_rss_ctx--; - clear_bit(rss_ctx->index, bp->rss_ctx_bmap); - kfree(rss_ctx); } static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp) { bool set_tpa = !!(bp->flags & BNXT_FLAG_TPA); - struct bnxt_rss_ctx *rss_ctx, *tmp; + struct ethtool_rxfh_context *ctx; + unsigned long context; - list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) { + xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) { + struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx); struct bnxt_vnic_info *vnic = &rss_ctx->vnic; if (bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings) || @@ -10076,42 +10239,20 @@ static void bnxt_hwrm_realloc_rss_ctx_vnic(struct bnxt *bp) netdev_err(bp->dev, "Failed to restore RSS ctx %d\n", rss_ctx->index); bnxt_del_one_rss_ctx(bp, rss_ctx, true); + ethtool_rxfh_context_lost(bp->dev, rss_ctx->index); } } } -struct bnxt_rss_ctx *bnxt_alloc_rss_ctx(struct bnxt *bp) +void bnxt_clear_rss_ctxs(struct bnxt *bp) { - struct bnxt_rss_ctx *rss_ctx = NULL; + struct ethtool_rxfh_context *ctx; + unsigned long context; - rss_ctx = kzalloc(sizeof(*rss_ctx), GFP_KERNEL); - if (rss_ctx) { - rss_ctx->vnic.rss_ctx = rss_ctx; - list_add_tail(&rss_ctx->list, &bp->rss_ctx_list); - bp->num_rss_ctx++; - } - return rss_ctx; -} + xa_for_each(&bp->dev->ethtool->rss_ctx, context, ctx) { + struct bnxt_rss_ctx *rss_ctx = ethtool_rxfh_context_priv(ctx); -void bnxt_clear_rss_ctxs(struct bnxt *bp, bool all) -{ - struct bnxt_rss_ctx *rss_ctx, *tmp; - - list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) - bnxt_del_one_rss_ctx(bp, rss_ctx, all); - - if (all) - bitmap_free(bp->rss_ctx_bmap); -} - -static void bnxt_init_multi_rss_ctx(struct bnxt *bp) -{ - bp->rss_ctx_bmap = bitmap_zalloc(BNXT_RSS_CTX_BMAP_LEN, GFP_KERNEL); - if (bp->rss_ctx_bmap) { - /* burn index 0 since we cannot have context 0 */ - __set_bit(0, bp->rss_ctx_bmap); - INIT_LIST_HEAD(&bp->rss_ctx_list); - bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX; + bnxt_del_one_rss_ctx(bp, rss_ctx, false); } } @@ -12004,8 +12145,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) /* VF-reps may need to be re-opened after the PF is re-opened */ if (BNXT_PF(bp)) bnxt_vf_reps_open(bp); - if (bp->ptp_cfg) - atomic_set(&bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS); + if (bp->ptp_cfg && !(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP)) + WRITE_ONCE(bp->ptp_cfg->tx_avail, BNXT_MAX_TX_TS); bnxt_ptp_init_rtc(bp, true); bnxt_ptp_cfg_tstamp_filters(bp); if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) @@ -12158,7 +12299,7 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, msleep(20); if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) - bnxt_clear_rss_ctxs(bp, false); + bnxt_clear_rss_ctxs(bp); /* Flush rings and disable interrupts */ bnxt_shutdown_nic(bp, irq_re_init); @@ -14842,6 +14983,220 @@ static const struct netdev_stat_ops bnxt_stat_ops = { .get_base_stats = bnxt_get_base_stats, }; +static int bnxt_alloc_rx_agg_bmap(struct bnxt *bp, struct bnxt_rx_ring_info *rxr) +{ + u16 mem_size; + + rxr->rx_agg_bmap_size = bp->rx_agg_ring_mask + 1; + mem_size = rxr->rx_agg_bmap_size / 8; + rxr->rx_agg_bmap = kzalloc(mem_size, GFP_KERNEL); + if (!rxr->rx_agg_bmap) + return -ENOMEM; + + return 0; +} + +static int bnxt_queue_mem_alloc(struct net_device *dev, void *qmem, int idx) +{ + struct bnxt_rx_ring_info *rxr, *clone; + struct bnxt *bp = netdev_priv(dev); + struct bnxt_ring_struct *ring; + int rc; + + rxr = &bp->rx_ring[idx]; + clone = qmem; + memcpy(clone, rxr, sizeof(*rxr)); + bnxt_init_rx_ring_struct(bp, clone); + bnxt_reset_rx_ring_struct(bp, clone); + + clone->rx_prod = 0; + clone->rx_agg_prod = 0; + clone->rx_sw_agg_prod = 0; + clone->rx_next_cons = 0; + + rc = bnxt_alloc_rx_page_pool(bp, clone, rxr->page_pool->p.nid); + if (rc) + return rc; + + ring = &clone->rx_ring_struct; + rc = bnxt_alloc_ring(bp, &ring->ring_mem); + if (rc) + goto err_free_rx_ring; + + if (bp->flags & BNXT_FLAG_AGG_RINGS) { + ring = &clone->rx_agg_ring_struct; + rc = bnxt_alloc_ring(bp, &ring->ring_mem); + if (rc) + goto err_free_rx_agg_ring; + + rc = bnxt_alloc_rx_agg_bmap(bp, clone); + if (rc) + goto err_free_rx_agg_ring; + } + + bnxt_init_one_rx_ring_rxbd(bp, clone); + bnxt_init_one_rx_agg_ring_rxbd(bp, clone); + + bnxt_alloc_one_rx_ring_skb(bp, clone, idx); + if (bp->flags & BNXT_FLAG_AGG_RINGS) + bnxt_alloc_one_rx_ring_page(bp, clone, idx); + + return 0; + +err_free_rx_agg_ring: + bnxt_free_ring(bp, &clone->rx_agg_ring_struct.ring_mem); +err_free_rx_ring: + bnxt_free_ring(bp, &clone->rx_ring_struct.ring_mem); + clone->page_pool->p.napi = NULL; + page_pool_destroy(clone->page_pool); + clone->page_pool = NULL; + return rc; +} + +static void bnxt_queue_mem_free(struct net_device *dev, void *qmem) +{ + struct bnxt_rx_ring_info *rxr = qmem; + struct bnxt *bp = netdev_priv(dev); + struct bnxt_ring_struct *ring; + + bnxt_free_one_rx_ring(bp, rxr); + bnxt_free_one_rx_agg_ring(bp, rxr); + + page_pool_destroy(rxr->page_pool); + rxr->page_pool = NULL; + + ring = &rxr->rx_ring_struct; + bnxt_free_ring(bp, &ring->ring_mem); + + ring = &rxr->rx_agg_ring_struct; + bnxt_free_ring(bp, &ring->ring_mem); + + kfree(rxr->rx_agg_bmap); + rxr->rx_agg_bmap = NULL; +} + +static void bnxt_copy_rx_ring(struct bnxt *bp, + struct bnxt_rx_ring_info *dst, + struct bnxt_rx_ring_info *src) +{ + struct bnxt_ring_mem_info *dst_rmem, *src_rmem; + struct bnxt_ring_struct *dst_ring, *src_ring; + int i; + + dst_ring = &dst->rx_ring_struct; + dst_rmem = &dst_ring->ring_mem; + src_ring = &src->rx_ring_struct; + src_rmem = &src_ring->ring_mem; + + WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages); + WARN_ON(dst_rmem->page_size != src_rmem->page_size); + WARN_ON(dst_rmem->flags != src_rmem->flags); + WARN_ON(dst_rmem->depth != src_rmem->depth); + WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size); + WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem); + + dst_rmem->pg_tbl = src_rmem->pg_tbl; + dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map; + *dst_rmem->vmem = *src_rmem->vmem; + for (i = 0; i < dst_rmem->nr_pages; i++) { + dst_rmem->pg_arr[i] = src_rmem->pg_arr[i]; + dst_rmem->dma_arr[i] = src_rmem->dma_arr[i]; + } + + if (!(bp->flags & BNXT_FLAG_AGG_RINGS)) + return; + + dst_ring = &dst->rx_agg_ring_struct; + dst_rmem = &dst_ring->ring_mem; + src_ring = &src->rx_agg_ring_struct; + src_rmem = &src_ring->ring_mem; + + WARN_ON(dst_rmem->nr_pages != src_rmem->nr_pages); + WARN_ON(dst_rmem->page_size != src_rmem->page_size); + WARN_ON(dst_rmem->flags != src_rmem->flags); + WARN_ON(dst_rmem->depth != src_rmem->depth); + WARN_ON(dst_rmem->vmem_size != src_rmem->vmem_size); + WARN_ON(dst_rmem->ctx_mem != src_rmem->ctx_mem); + WARN_ON(dst->rx_agg_bmap_size != src->rx_agg_bmap_size); + + dst_rmem->pg_tbl = src_rmem->pg_tbl; + dst_rmem->pg_tbl_map = src_rmem->pg_tbl_map; + *dst_rmem->vmem = *src_rmem->vmem; + for (i = 0; i < dst_rmem->nr_pages; i++) { + dst_rmem->pg_arr[i] = src_rmem->pg_arr[i]; + dst_rmem->dma_arr[i] = src_rmem->dma_arr[i]; + } + + dst->rx_agg_bmap = src->rx_agg_bmap; +} + +static int bnxt_queue_start(struct net_device *dev, void *qmem, int idx) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_rx_ring_info *rxr, *clone; + struct bnxt_cp_ring_info *cpr; + int rc; + + rxr = &bp->rx_ring[idx]; + clone = qmem; + + rxr->rx_prod = clone->rx_prod; + rxr->rx_agg_prod = clone->rx_agg_prod; + rxr->rx_sw_agg_prod = clone->rx_sw_agg_prod; + rxr->rx_next_cons = clone->rx_next_cons; + rxr->page_pool = clone->page_pool; + + bnxt_copy_rx_ring(bp, rxr, clone); + + rc = bnxt_hwrm_rx_ring_alloc(bp, rxr); + if (rc) + return rc; + rc = bnxt_hwrm_rx_agg_ring_alloc(bp, rxr); + if (rc) + goto err_free_hwrm_rx_ring; + + bnxt_db_write(bp, &rxr->rx_db, rxr->rx_prod); + if (bp->flags & BNXT_FLAG_AGG_RINGS) + bnxt_db_write(bp, &rxr->rx_agg_db, rxr->rx_agg_prod); + + napi_enable(&rxr->bnapi->napi); + + cpr = &rxr->bnapi->cp_ring; + cpr->sw_stats->rx.rx_resets++; + + return 0; + +err_free_hwrm_rx_ring: + bnxt_hwrm_rx_ring_free(bp, rxr, false); + return rc; +} + +static int bnxt_queue_stop(struct net_device *dev, void *qmem, int idx) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_rx_ring_info *rxr; + + rxr = &bp->rx_ring[idx]; + napi_disable(&rxr->bnapi->napi); + bnxt_hwrm_rx_ring_free(bp, rxr, false); + bnxt_hwrm_rx_agg_ring_free(bp, rxr, false); + rxr->rx_next_cons = 0; + page_pool_disable_direct_recycling(rxr->page_pool); + + memcpy(qmem, rxr, sizeof(*rxr)); + bnxt_init_rx_ring_struct(bp, qmem); + + return 0; +} + +static const struct netdev_queue_mgmt_ops bnxt_queue_mgmt_ops = { + .ndo_queue_mem_size = sizeof(struct bnxt_rx_ring_info), + .ndo_queue_mem_alloc = bnxt_queue_mem_alloc, + .ndo_queue_mem_free = bnxt_queue_mem_free, + .ndo_queue_start = bnxt_queue_start, + .ndo_queue_stop = bnxt_queue_stop, +}; + static void bnxt_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); @@ -14859,8 +15214,7 @@ static void bnxt_remove_one(struct pci_dev *pdev) bnxt_free_l2_filters(bp, true); bnxt_free_ntp_fltrs(bp, true); - if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) - bnxt_clear_rss_ctxs(bp, true); + WARN_ON(bp->num_rss_ctx); clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); /* Flush any pending tasks */ cancel_work_sync(&bp->sp_task); @@ -15307,6 +15661,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->stat_ops = &bnxt_stat_ops; dev->watchdog_timeo = BNXT_TX_TIMEOUT; dev->ethtool_ops = &bnxt_ethtool_ops; + dev->queue_mgmt_ops = &bnxt_queue_mgmt_ops; pci_set_drvdata(pdev, dev); rc = bnxt_alloc_hwrm_resources(bp); @@ -15329,7 +15684,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) bp->flags |= BNXT_FLAG_CHIP_P7; } - rc = bnxt_alloc_rss_indir_tbl(bp, NULL); + rc = bnxt_alloc_rss_indir_tbl(bp); if (rc) goto init_err_pci_clean; @@ -15486,8 +15841,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) INIT_LIST_HEAD(&bp->usr_fltr_list); if (BNXT_SUPPORTS_NTUPLE_VNIC(bp)) - bnxt_init_multi_rss_ctx(bp); - + bp->rss_cap |= BNXT_RSS_CAP_MULTI_RSS_CTX; rc = register_netdev(dev); if (rc) @@ -15510,8 +15864,6 @@ init_err_dl: bnxt_clear_int_mode(bp); init_err_pci_clean: - if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) - bnxt_clear_rss_ctxs(bp, true); bnxt_hwrm_func_drv_unrgtr(bp); bnxt_free_hwrm_resources(bp); bnxt_hwmon_uninit(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 6b10a09ee1af..6bbdc718c3a7 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -181,6 +181,32 @@ struct tx_cmp { #define TX_CMP_SQ_CONS_IDX(txcmp) \ (le32_to_cpu((txcmp)->sq_cons_idx) & TX_CMP_SQ_CONS_IDX_MASK) +struct tx_ts_cmp { + __le32 tx_ts_cmp_flags_type; + #define TX_TS_CMP_FLAGS_ERROR (1 << 6) + #define TX_TS_CMP_FLAGS_TS_TYPE (1 << 7) + #define TX_TS_CMP_FLAGS_TS_TYPE_PM (0 << 7) + #define TX_TS_CMP_FLAGS_TS_TYPE_PA (1 << 7) + #define TX_TS_CMP_FLAGS_TS_FALLBACK (1 << 8) + #define TX_TS_CMP_TS_SUB_NS (0xf << 12) + #define TX_TS_CMP_TS_NS_MID (0xffff << 16) + #define TX_TS_CMP_TS_NS_MID_SFT 16 + u32 tx_ts_cmp_opaque; + __le32 tx_ts_cmp_errors_v; + #define TX_TS_CMP_V (1 << 0) + #define TX_TS_CMP_TS_INVALID_ERR (1 << 10) + __le32 tx_ts_cmp_ts_ns_lo; +}; + +#define BNXT_GET_TX_TS_48B_NS(tscmp) \ + (le32_to_cpu((tscmp)->tx_ts_cmp_ts_ns_lo) | \ + ((u64)(le32_to_cpu((tscmp)->tx_ts_cmp_flags_type) & \ + TX_TS_CMP_TS_NS_MID) << TX_TS_CMP_TS_NS_MID_SFT)) + +#define BNXT_TX_TS_ERR(tscmp) \ + (((tscmp)->tx_ts_cmp_flags_type & cpu_to_le32(TX_TS_CMP_FLAGS_ERROR)) &&\ + ((tscmp)->tx_ts_cmp_errors_v & cpu_to_le32(TX_TS_CMP_TS_INVALID_ERR))) + struct rx_cmp { __le32 rx_cmp_len_flags_type; #define RX_CMP_CMP_TYPE (0x3f << 0) @@ -848,11 +874,14 @@ struct bnxt_sw_tx_bd { DEFINE_DMA_UNMAP_ADDR(mapping); DEFINE_DMA_UNMAP_LEN(len); struct page *page; - u8 is_gso; + u8 is_ts_pkt; u8 is_push; u8 action; unsigned short nr_frags; - u16 rx_prod; + union { + u16 rx_prod; + u16 txts_prod; + }; }; struct bnxt_sw_rx_bd { @@ -1257,19 +1286,16 @@ struct bnxt_vnic_info { #define BNXT_VNIC_RFS_NEW_RSS_FLAG 0x10 #define BNXT_VNIC_NTUPLE_FLAG 0x20 #define BNXT_VNIC_RSSCTX_FLAG 0x40 - struct bnxt_rss_ctx *rss_ctx; + struct ethtool_rxfh_context *rss_ctx; u32 vnic_id; }; struct bnxt_rss_ctx { - struct list_head list; struct bnxt_vnic_info vnic; - u16 *rss_indir_tbl; u8 index; }; #define BNXT_MAX_ETH_RSS_CTX 32 -#define BNXT_RSS_CTX_BMAP_LEN (BNXT_MAX_ETH_RSS_CTX + 1) #define BNXT_VNIC_ID_INVALID 0xffffffff struct bnxt_hw_rings { @@ -2237,9 +2263,17 @@ struct bnxt { (BNXT_CHIP_NUM_58700((bp)->chip_num) && \ !BNXT_CHIP_TYPE_NITRO_A0(bp))) +/* Chip class phase 3.x */ +#define BNXT_CHIP_P3(bp) \ + (BNXT_CHIP_NUM_57X0X((bp)->chip_num) || \ + BNXT_CHIP_TYPE_NITRO_A0(bp)) + #define BNXT_CHIP_P4_PLUS(bp) \ (BNXT_CHIP_P4(bp) || BNXT_CHIP_P5_PLUS(bp)) +#define BNXT_CHIP_P5_AND_MINUS(bp) \ + (BNXT_CHIP_P3(bp) || BNXT_CHIP_P4(bp) || BNXT_CHIP_P5(bp)) + struct bnxt_aux_priv *aux_priv; struct bnxt_en_dev *edev; @@ -2294,11 +2328,9 @@ struct bnxt { /* grp_info indexed by completion ring index */ struct bnxt_ring_grp_info *grp_info; struct bnxt_vnic_info *vnic_info; - struct list_head rss_ctx_list; - unsigned long *rss_ctx_bmap; u32 num_rss_ctx; int nr_vnics; - u16 *rss_indir_tbl; + u32 *rss_indir_tbl; u16 rss_indir_tbl_entries; u32 rss_hash_cfg; u32 rss_hash_delta; @@ -2384,6 +2416,7 @@ struct bnxt { #define BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2 BIT_ULL(16) #define BNXT_FW_CAP_PCIE_STATS_SUPPORTED BIT_ULL(17) #define BNXT_FW_CAP_EXT_STATS_SUPPORTED BIT_ULL(18) + #define BNXT_FW_CAP_TX_TS_CMP BIT_ULL(19) #define BNXT_FW_CAP_ERR_RECOVER_RELOAD BIT_ULL(20) #define BNXT_FW_CAP_HOT_RESET BIT_ULL(21) #define BNXT_FW_CAP_PTP_RTC BIT_ULL(22) @@ -2774,9 +2807,8 @@ int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, struct bnxt_vnic_info *vnic, u32 tpa_flags); void bnxt_fill_ipv6_mask(__be32 mask[4]); -int bnxt_alloc_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx); -void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx); -u16 bnxt_get_max_rss_ctx_ring(struct bnxt *bp); +void bnxt_set_dflt_rss_indir_tbl(struct bnxt *bp, + struct ethtool_rxfh_context *rss_ctx); int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings); int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic); int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic, @@ -2810,8 +2842,7 @@ int bnxt_hwrm_vnic_rss_cfg_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic); int __bnxt_setup_vnic_p5(struct bnxt *bp, struct bnxt_vnic_info *vnic); void bnxt_del_one_rss_ctx(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx, bool all); -struct bnxt_rss_ctx *bnxt_alloc_rss_ctx(struct bnxt *bp); -void bnxt_clear_rss_ctxs(struct bnxt *bp, bool all); +void bnxt_clear_rss_ctxs(struct bnxt *bp); int bnxt_open_nic(struct bnxt *, bool, bool); int bnxt_half_open_nic(struct bnxt *bp); void bnxt_half_close_nic(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 79c09c1cdf93..d00ef0063820 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -961,12 +961,6 @@ static int bnxt_set_channels(struct net_device *dev, return rc; } - if (req_rx_rings < bp->rx_nr_rings && - req_rx_rings <= bnxt_get_max_rss_ctx_ring(bp)) { - netdev_warn(dev, "Can't deactivate rings used by RSS contexts\n"); - return -EINVAL; - } - if (bnxt_get_nr_rss_ctxs(bp, req_rx_rings) != bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) && netif_is_rxfh_configured(dev)) { @@ -976,7 +970,7 @@ static int bnxt_set_channels(struct net_device *dev, bnxt_clear_usr_fltrs(bp, true); if (BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) - bnxt_clear_rss_ctxs(bp, false); + bnxt_clear_rss_ctxs(bp); if (netif_running(dev)) { if (BNXT_PF(bp)) { /* TODO CHIMP_FW: Send message to all VF's @@ -1216,19 +1210,18 @@ fltr_err: static struct bnxt_rss_ctx *bnxt_get_rss_ctx_from_index(struct bnxt *bp, u32 index) { - struct bnxt_rss_ctx *rss_ctx, *tmp; + struct ethtool_rxfh_context *ctx; - list_for_each_entry_safe(rss_ctx, tmp, &bp->rss_ctx_list, list) - if (rss_ctx->index == index) - return rss_ctx; - return NULL; + ctx = xa_load(&bp->dev->ethtool->rss_ctx, index); + if (!ctx) + return NULL; + return ethtool_rxfh_context_priv(ctx); } -static int bnxt_alloc_rss_ctx_rss_table(struct bnxt *bp, - struct bnxt_rss_ctx *rss_ctx) +static int bnxt_alloc_vnic_rss_table(struct bnxt *bp, + struct bnxt_vnic_info *vnic) { int size = L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5); - struct bnxt_vnic_info *vnic = &rss_ctx->vnic; vnic->rss_table_size = size + HW_HASH_KEY_SIZE; vnic->rss_table = dma_alloc_coherent(&bp->pdev->dev, @@ -1807,10 +1800,9 @@ static u32 bnxt_get_rxfh_key_size(struct net_device *dev) static int bnxt_get_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh) { - u32 rss_context = rxfh->rss_context; struct bnxt_rss_ctx *rss_ctx = NULL; struct bnxt *bp = netdev_priv(dev); - u16 *indir_tbl = bp->rss_indir_tbl; + u32 *indir_tbl = bp->rss_indir_tbl; struct bnxt_vnic_info *vnic; u32 i, tbl_size; @@ -1821,10 +1813,13 @@ static int bnxt_get_rxfh(struct net_device *dev, vnic = &bp->vnic_info[BNXT_VNIC_DEFAULT]; if (rxfh->rss_context) { - rss_ctx = bnxt_get_rss_ctx_from_index(bp, rss_context); - if (!rss_ctx) + struct ethtool_rxfh_context *ctx; + + ctx = xa_load(&bp->dev->ethtool->rss_ctx, rxfh->rss_context); + if (!ctx) return -EINVAL; - indir_tbl = rss_ctx->rss_indir_tbl; + indir_tbl = ethtool_rxfh_context_indir(ctx); + rss_ctx = ethtool_rxfh_context_priv(ctx); vnic = &rss_ctx->vnic; } @@ -1840,8 +1835,9 @@ static int bnxt_get_rxfh(struct net_device *dev, return 0; } -static void bnxt_modify_rss(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx, - struct ethtool_rxfh_param *rxfh) +static void bnxt_modify_rss(struct bnxt *bp, struct ethtool_rxfh_context *ctx, + struct bnxt_rss_ctx *rss_ctx, + const struct ethtool_rxfh_param *rxfh) { if (rxfh->key) { if (rss_ctx) { @@ -1854,29 +1850,21 @@ static void bnxt_modify_rss(struct bnxt *bp, struct bnxt_rss_ctx *rss_ctx, } if (rxfh->indir) { u32 i, pad, tbl_size = bnxt_get_rxfh_indir_size(bp->dev); - u16 *indir_tbl = bp->rss_indir_tbl; + u32 *indir_tbl = bp->rss_indir_tbl; if (rss_ctx) - indir_tbl = rss_ctx->rss_indir_tbl; + indir_tbl = ethtool_rxfh_context_indir(ctx); for (i = 0; i < tbl_size; i++) indir_tbl[i] = rxfh->indir[i]; pad = bp->rss_indir_tbl_entries - tbl_size; if (pad) - memset(&bp->rss_indir_tbl[i], 0, pad * sizeof(u16)); + memset(&indir_tbl[i], 0, pad * sizeof(*indir_tbl)); } } -static int bnxt_set_rxfh_context(struct bnxt *bp, - struct ethtool_rxfh_param *rxfh, - struct netlink_ext_ack *extack) +static int bnxt_rxfh_context_check(struct bnxt *bp, + struct netlink_ext_ack *extack) { - u32 *rss_context = &rxfh->rss_context; - struct bnxt_rss_ctx *rss_ctx; - struct bnxt_vnic_info *vnic; - bool modify = false; - int bit_id; - int rc; - if (!BNXT_SUPPORTS_MULTI_RSS_CTX(bp)) { NL_SET_ERR_MSG_MOD(extack, "RSS contexts not supported"); return -EOPNOTSUPP; @@ -1887,21 +1875,22 @@ static int bnxt_set_rxfh_context(struct bnxt *bp, return -EAGAIN; } - if (*rss_context != ETH_RXFH_CONTEXT_ALLOC) { - rss_ctx = bnxt_get_rss_ctx_from_index(bp, *rss_context); - if (!rss_ctx) { - NL_SET_ERR_MSG_FMT_MOD(extack, "RSS context %u not found", - *rss_context); - return -EINVAL; - } - if (*rss_context && rxfh->rss_delete) { - bnxt_del_one_rss_ctx(bp, rss_ctx, true); - return 0; - } - modify = true; - vnic = &rss_ctx->vnic; - goto modify_context; - } + return 0; +} + +static int bnxt_create_rxfh_context(struct net_device *dev, + struct ethtool_rxfh_context *ctx, + const struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_rss_ctx *rss_ctx; + struct bnxt_vnic_info *vnic; + int rc; + + rc = bnxt_rxfh_context_check(bp, extack); + if (rc) + return rc; if (bp->num_rss_ctx >= BNXT_MAX_ETH_RSS_CTX) { NL_SET_ERR_MSG_FMT_MOD(extack, "Out of RSS contexts, maximum %u", @@ -1914,22 +1903,19 @@ static int bnxt_set_rxfh_context(struct bnxt *bp, return -ENOMEM; } - rss_ctx = bnxt_alloc_rss_ctx(bp); - if (!rss_ctx) - return -ENOMEM; + rss_ctx = ethtool_rxfh_context_priv(ctx); + + bp->num_rss_ctx++; vnic = &rss_ctx->vnic; + vnic->rss_ctx = ctx; vnic->flags |= BNXT_VNIC_RSSCTX_FLAG; vnic->vnic_id = BNXT_VNIC_ID_INVALID; - rc = bnxt_alloc_rss_ctx_rss_table(bp, rss_ctx); + rc = bnxt_alloc_vnic_rss_table(bp, vnic); if (rc) goto out; - rc = bnxt_alloc_rss_indir_tbl(bp, rss_ctx); - if (rc) - goto out; - - bnxt_set_dflt_rss_indir_tbl(bp, rss_ctx); + bnxt_set_dflt_rss_indir_tbl(bp, ctx); memcpy(vnic->rss_hash_key, bp->rss_hash_key, HW_HASH_KEY_SIZE); rc = bnxt_hwrm_vnic_alloc(bp, vnic, 0, bp->rx_nr_rings); @@ -1943,11 +1929,7 @@ static int bnxt_set_rxfh_context(struct bnxt *bp, NL_SET_ERR_MSG_MOD(extack, "Unable to setup TPA"); goto out; } -modify_context: - bnxt_modify_rss(bp, rss_ctx, rxfh); - - if (modify) - return bnxt_hwrm_vnic_rss_cfg_p5(bp, vnic); + bnxt_modify_rss(bp, ctx, rss_ctx, rxfh); rc = __bnxt_setup_vnic_p5(bp, vnic); if (rc) { @@ -1955,21 +1937,47 @@ modify_context: goto out; } - bit_id = bitmap_find_free_region(bp->rss_ctx_bmap, - BNXT_RSS_CTX_BMAP_LEN, 0); - if (bit_id < 0) { - rc = -ENOMEM; - goto out; - } - rss_ctx->index = (u16)bit_id; - *rss_context = rss_ctx->index; - + rss_ctx->index = rxfh->rss_context; return 0; out: bnxt_del_one_rss_ctx(bp, rss_ctx, true); return rc; } +static int bnxt_modify_rxfh_context(struct net_device *dev, + struct ethtool_rxfh_context *ctx, + const struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_rss_ctx *rss_ctx; + int rc; + + rc = bnxt_rxfh_context_check(bp, extack); + if (rc) + return rc; + + rss_ctx = ethtool_rxfh_context_priv(ctx); + + bnxt_modify_rss(bp, ctx, rss_ctx, rxfh); + + return bnxt_hwrm_vnic_rss_cfg_p5(bp, &rss_ctx->vnic); +} + +static int bnxt_remove_rxfh_context(struct net_device *dev, + struct ethtool_rxfh_context *ctx, + u32 rss_context, + struct netlink_ext_ack *extack) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_rss_ctx *rss_ctx; + + rss_ctx = ethtool_rxfh_context_priv(ctx); + + bnxt_del_one_rss_ctx(bp, rss_ctx, true); + return 0; +} + static int bnxt_set_rxfh(struct net_device *dev, struct ethtool_rxfh_param *rxfh, struct netlink_ext_ack *extack) @@ -1980,10 +1988,7 @@ static int bnxt_set_rxfh(struct net_device *dev, if (rxfh->hfunc && rxfh->hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; - if (rxfh->rss_context) - return bnxt_set_rxfh_context(bp, rxfh, extack); - - bnxt_modify_rss(bp, NULL, rxfh); + bnxt_modify_rss(bp, NULL, NULL, rxfh); bnxt_clear_usr_fltrs(bp, false); if (netif_running(bp->dev)) { @@ -5019,7 +5024,7 @@ static int bnxt_get_dump_data(struct net_device *dev, struct ethtool_dump *dump, } static int bnxt_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct bnxt *bp = netdev_priv(dev); struct bnxt_ptp_cfg *ptp; @@ -5239,6 +5244,19 @@ static void bnxt_get_rmon_stats(struct net_device *dev, *ranges = bnxt_rmon_ranges; } +static void bnxt_get_ptp_stats(struct net_device *dev, + struct ethtool_ts_stats *ts_stats) +{ + struct bnxt *bp = netdev_priv(dev); + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + + if (ptp) { + ts_stats->pkts = ptp->stats.ts_pkts; + ts_stats->lost = ptp->stats.ts_lost; + ts_stats->err = atomic64_read(&ptp->stats.ts_err); + } +} + static void bnxt_get_link_ext_stats(struct net_device *dev, struct ethtool_link_ext_stats *stats) { @@ -5262,6 +5280,9 @@ void bnxt_ethtool_free(struct bnxt *bp) const struct ethtool_ops bnxt_ethtool_ops = { .cap_link_lanes_supported = 1, .cap_rss_ctx_supported = 1, + .rxfh_max_context_id = BNXT_MAX_ETH_RSS_CTX, + .rxfh_indir_space = BNXT_MAX_RSS_TABLE_ENTRIES_P5, + .rxfh_priv_size = sizeof(struct bnxt_rss_ctx), .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_MAX_FRAMES | ETHTOOL_COALESCE_USECS_IRQ | @@ -5299,6 +5320,9 @@ const struct ethtool_ops bnxt_ethtool_ops = { .get_rxfh_key_size = bnxt_get_rxfh_key_size, .get_rxfh = bnxt_get_rxfh, .set_rxfh = bnxt_set_rxfh, + .create_rxfh_context = bnxt_create_rxfh_context, + .modify_rxfh_context = bnxt_modify_rxfh_context, + .remove_rxfh_context = bnxt_remove_rxfh_context, .flash_device = bnxt_flash_device, .get_eeprom_len = bnxt_get_eeprom_len, .get_eeprom = bnxt_get_eeprom, @@ -5322,4 +5346,5 @@ const struct ethtool_ops bnxt_ethtool_ops = { .get_eth_mac_stats = bnxt_get_eth_mac_stats, .get_eth_ctrl_stats = bnxt_get_eth_ctrl_stats, .get_rmon_stats = bnxt_get_rmon_stats, + .get_ts_stats = bnxt_get_ptp_stats, }; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c index e661ab154d6b..37d42423459c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -110,7 +110,7 @@ static void bnxt_ptp_get_current_time(struct bnxt *bp) } static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts, - u32 txts_tmo) + u32 txts_tmo, int slot) { struct hwrm_port_ts_query_output *resp; struct hwrm_port_ts_query_input *req; @@ -123,11 +123,12 @@ static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts, req->flags = cpu_to_le32(flags); if ((flags & PORT_TS_QUERY_REQ_FLAGS_PATH) == PORT_TS_QUERY_REQ_FLAGS_PATH_TX) { + struct bnxt_ptp_tx_req *txts_req = &bp->ptp_cfg->txts_req[slot]; u32 tmo_us = txts_tmo * 1000; req->enables = cpu_to_le16(BNXT_PTP_QTS_TX_ENABLES); - req->ptp_seq_id = cpu_to_le32(bp->ptp_cfg->tx_seqid); - req->ptp_hdr_offset = cpu_to_le16(bp->ptp_cfg->tx_hdr_off); + req->ptp_seq_id = cpu_to_le32(txts_req->tx_seqid); + req->ptp_hdr_offset = cpu_to_le16(txts_req->tx_hdr_off); if (!tmo_us) tmo_us = BNXT_PTP_QTS_TIMEOUT; tmo_us = min(tmo_us, BNXT_PTP_QTS_MAX_TMO_US); @@ -656,6 +657,14 @@ static int bnxt_map_ptp_regs(struct bnxt *bp) (ptp->refclk_regs[i] & BNXT_GRC_OFFSET_MASK); return 0; } + if (bp->flags & BNXT_FLAG_CHIP_P7) { + for (i = 0; i < 2; i++) { + if (reg_arr[i] & BNXT_GRC_BASE_MASK) + return -EINVAL; + ptp->refclk_mapped_regs[i] = reg_arr[i]; + } + return 0; + } return -ENODEV; } @@ -674,41 +683,44 @@ static u64 bnxt_cc_read(const struct cyclecounter *cc) return ns; } -static void bnxt_stamp_tx_skb(struct bnxt *bp, struct sk_buff *skb) +static int bnxt_stamp_tx_skb(struct bnxt *bp, int slot) { struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; struct skb_shared_hwtstamps timestamp; + struct bnxt_ptp_tx_req *txts_req; unsigned long now = jiffies; u64 ts = 0, ns = 0; u32 tmo = 0; int rc; - if (!ptp->txts_pending) - ptp->abs_txts_tmo = now + msecs_to_jiffies(ptp->txts_tmo); - if (!time_after_eq(now, ptp->abs_txts_tmo)) - tmo = jiffies_to_msecs(ptp->abs_txts_tmo - now); + txts_req = &ptp->txts_req[slot]; + /* make sure bnxt_get_tx_ts_p5() has updated abs_txts_tmo */ + smp_rmb(); + if (!time_after_eq(now, txts_req->abs_txts_tmo)) + tmo = jiffies_to_msecs(txts_req->abs_txts_tmo - now); rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_PATH_TX, &ts, - tmo); + tmo, slot); if (!rc) { memset(×tamp, 0, sizeof(timestamp)); spin_lock_bh(&ptp->ptp_lock); ns = timecounter_cyc2time(&ptp->tc, ts); spin_unlock_bh(&ptp->ptp_lock); timestamp.hwtstamp = ns_to_ktime(ns); - skb_tstamp_tx(ptp->tx_skb, ×tamp); + skb_tstamp_tx(txts_req->tx_skb, ×tamp); + ptp->stats.ts_pkts++; } else { - if (!time_after_eq(jiffies, ptp->abs_txts_tmo)) { - ptp->txts_pending = true; - return; - } + if (!time_after_eq(jiffies, txts_req->abs_txts_tmo)) + return -EAGAIN; + + ptp->stats.ts_lost++; netdev_warn_once(bp->dev, "TS query for TX timer failed rc = %x\n", rc); } - dev_kfree_skb_any(ptp->tx_skb); - ptp->tx_skb = NULL; - atomic_inc(&ptp->tx_avail); - ptp->txts_pending = false; + dev_kfree_skb_any(txts_req->tx_skb); + txts_req->tx_skb = NULL; + + return 0; } static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info) @@ -717,12 +729,30 @@ static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info) ptp_info); unsigned long now = jiffies; struct bnxt *bp = ptp->bp; + u16 cons = ptp->txts_cons; + u32 num_requests; + int rc = 0; - if (ptp->tx_skb) - bnxt_stamp_tx_skb(bp, ptp->tx_skb); + num_requests = BNXT_MAX_TX_TS - READ_ONCE(ptp->tx_avail); + while (num_requests--) { + if (IS_ERR(ptp->txts_req[cons].tx_skb)) + goto next_slot; + if (!ptp->txts_req[cons].tx_skb) + break; + rc = bnxt_stamp_tx_skb(bp, cons); + if (rc == -EAGAIN) + break; +next_slot: + BNXT_PTP_INC_TX_AVAIL(ptp); + cons = NEXT_TXTS(cons); + } + ptp->txts_cons = cons; - if (!time_after_eq(now, ptp->next_period)) + if (!time_after_eq(now, ptp->next_period)) { + if (rc == -EAGAIN) + return 0; return ptp->next_period - now; + } bnxt_ptp_get_current_time(bp); ptp->next_period = now + HZ; @@ -732,22 +762,37 @@ static long bnxt_ptp_ts_aux_work(struct ptp_clock_info *ptp_info) spin_unlock_bh(&ptp->ptp_lock); ptp->next_overflow_check = now + BNXT_PHC_OVERFLOW_PERIOD; } - if (ptp->txts_pending) + if (rc == -EAGAIN) return 0; return HZ; } -int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb) +int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod) +{ + spin_lock_bh(&ptp->ptp_tx_lock); + if (ptp->tx_avail) { + *prod = ptp->txts_prod; + ptp->txts_prod = NEXT_TXTS(*prod); + ptp->tx_avail--; + spin_unlock_bh(&ptp->ptp_tx_lock); + return 0; + } + spin_unlock_bh(&ptp->ptp_tx_lock); + atomic64_inc(&ptp->stats.ts_err); + return -ENOSPC; +} + +void bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb, u16 prod) { struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + struct bnxt_ptp_tx_req *txts_req; - if (ptp->tx_skb) { - netdev_err(bp->dev, "deferring skb:one SKB is still outstanding\n"); - return -EBUSY; - } - ptp->tx_skb = skb; + txts_req = &ptp->txts_req[prod]; + txts_req->abs_txts_tmo = jiffies + msecs_to_jiffies(ptp->txts_tmo); + /* make sure abs_txts_tmo is written first */ + smp_wmb(); + txts_req->tx_skb = skb; ptp_schedule_worker(ptp->ptp_clock, 0); - return 0; } int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts) @@ -766,6 +811,38 @@ int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts) return 0; } +void bnxt_tx_ts_cmp(struct bnxt *bp, struct bnxt_napi *bnapi, + struct tx_ts_cmp *tscmp) +{ + struct skb_shared_hwtstamps timestamp = {}; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + u32 opaque = tscmp->tx_ts_cmp_opaque; + struct bnxt_tx_ring_info *txr; + struct bnxt_sw_tx_bd *tx_buf; + u64 ts, ns; + u16 cons; + + txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)]; + ts = BNXT_GET_TX_TS_48B_NS(tscmp); + cons = TX_OPAQUE_IDX(opaque); + tx_buf = &txr->tx_buf_ring[RING_TX(bp, cons)]; + if (tx_buf->is_ts_pkt) { + if (BNXT_TX_TS_ERR(tscmp)) { + netdev_err(bp->dev, + "timestamp completion error 0x%x 0x%x\n", + le32_to_cpu(tscmp->tx_ts_cmp_flags_type), + le32_to_cpu(tscmp->tx_ts_cmp_errors_v)); + } else { + spin_lock_bh(&ptp->ptp_lock); + ns = timecounter_cyc2time(&ptp->tc, ts); + spin_unlock_bh(&ptp->ptp_lock); + timestamp.hwtstamp = ns_to_ktime(ns); + skb_tstamp_tx(tx_buf->skb, ×tamp); + } + tx_buf->is_ts_pkt = 0; + } +} + static const struct ptp_clock_info bnxt_ptp_caps = { .owner = THIS_MODULE, .name = "bnxt clock", @@ -912,7 +989,7 @@ int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg) return rc; } else { rc = bnxt_hwrm_port_ts_query(bp, PORT_TS_QUERY_REQ_FLAGS_CURRENT_TIME, - &ns, 0); + &ns, 0, 0); if (rc) return rc; } @@ -952,8 +1029,9 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg) bnxt_ptp_free(bp); - atomic_set(&ptp->tx_avail, BNXT_MAX_TX_TS); + WRITE_ONCE(ptp->tx_avail, BNXT_MAX_TX_TS); spin_lock_init(&ptp->ptp_lock); + spin_lock_init(&ptp->ptp_tx_lock); if (BNXT_PTP_USE_RTC(bp)) { bnxt_ptp_timecounter_init(bp, false); @@ -979,7 +1057,12 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg) rc = err; goto out; } - if (BNXT_CHIP_P5(bp)) { + + ptp->stats.ts_pkts = 0; + ptp->stats.ts_lost = 0; + atomic64_set(&ptp->stats.ts_err, 0); + + if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) { spin_lock_bh(&ptp->ptp_lock); bnxt_refclk_read(bp, NULL, &ptp->current_time); WRITE_ONCE(ptp->old_time, ptp->current_time); @@ -998,6 +1081,7 @@ out: void bnxt_ptp_clear(struct bnxt *bp) { struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + int i; if (!ptp) return; @@ -1009,9 +1093,12 @@ void bnxt_ptp_clear(struct bnxt *bp) kfree(ptp->ptp_info.pin_config); ptp->ptp_info.pin_config = NULL; - if (ptp->tx_skb) { - dev_kfree_skb_any(ptp->tx_skb); - ptp->tx_skb = NULL; + for (i = 0; i < BNXT_MAX_TX_TS; i++) { + if (ptp->txts_req[i].tx_skb) { + dev_kfree_skb_any(ptp->txts_req[i].tx_skb); + ptp->txts_req[i].tx_skb = NULL; + } } + bnxt_unmap_ptp_regs(bp); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h index 2c3415c8fc03..a9a2f9a18c9c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h @@ -79,6 +79,22 @@ struct bnxt_pps { struct pps_pin pins[BNXT_MAX_TSIO_PINS]; }; +struct bnxt_ptp_stats { + u64 ts_pkts; + u64 ts_lost; + atomic64_t ts_err; +}; + +#define BNXT_MAX_TX_TS 4 +#define NEXT_TXTS(idx) (((idx) + 1) & (BNXT_MAX_TX_TS - 1)) + +struct bnxt_ptp_tx_req { + struct sk_buff *tx_skb; + u16 tx_seqid; + u16 tx_hdr_off; + unsigned long abs_txts_tmo; +}; + struct bnxt_ptp_cfg { struct ptp_clock_info ptp_info; struct ptp_clock *ptp_clock; @@ -87,7 +103,8 @@ struct bnxt_ptp_cfg { struct bnxt_pps pps_info; /* serialize timecounter access */ spinlock_t ptp_lock; - struct sk_buff *tx_skb; + /* serialize ts tx request queuing */ + spinlock_t ptp_tx_lock; u64 current_time; u64 old_time; unsigned long next_period; @@ -96,11 +113,10 @@ struct bnxt_ptp_cfg { /* a 23b shift cyclecounter will overflow in ~36 mins. Check overflow every 18 mins. */ #define BNXT_PHC_OVERFLOW_PERIOD (18 * 60 * HZ) - u16 tx_seqid; - u16 tx_hdr_off; + struct bnxt_ptp_tx_req txts_req[BNXT_MAX_TX_TS]; + struct bnxt *bp; - atomic_t tx_avail; -#define BNXT_MAX_TX_TS 1 + u32 tx_avail; u16 rxctl; #define BNXT_PTP_MSG_SYNC (1 << 0) #define BNXT_PTP_MSG_DELAY_REQ (1 << 1) @@ -117,14 +133,16 @@ struct bnxt_ptp_cfg { BNXT_PTP_MSG_PDELAY_REQ | \ BNXT_PTP_MSG_PDELAY_RESP) u8 tx_tstamp_en:1; - u8 txts_pending:1; int rx_filter; u32 tstamp_filters; u32 refclk_regs[2]; u32 refclk_mapped_regs[2]; u32 txts_tmo; - unsigned long abs_txts_tmo; + u16 txts_prod; + u16 txts_cons; + + struct bnxt_ptp_stats stats; }; #if BITS_PER_LONG == 32 @@ -139,6 +157,13 @@ do { \ ((dst) = READ_ONCE(src)) #endif +#define BNXT_PTP_INC_TX_AVAIL(ptp) \ +do { \ + spin_lock_bh(&(ptp)->ptp_tx_lock); \ + (ptp)->tx_avail++; \ + spin_unlock_bh(&(ptp)->ptp_tx_lock); \ +} while (0) + int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off); void bnxt_ptp_update_current_time(struct bnxt *bp); void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2); @@ -146,8 +171,11 @@ int bnxt_ptp_cfg_tstamp_filters(struct bnxt *bp); void bnxt_ptp_reapply_pps(struct bnxt *bp); int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr); int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr); -int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb); +int bnxt_ptp_get_txts_prod(struct bnxt_ptp_cfg *ptp, u16 *prod); +void bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb, u16 prod); int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts); +void bnxt_tx_ts_cmp(struct bnxt *bp, struct bnxt_napi *bnapi, + struct tx_ts_cmp *tscmp); void bnxt_ptp_rtc_timecounter_init(struct bnxt_ptp_cfg *ptp, u64 ns); int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg); int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg); diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 1589a49b876c..0ec5f01551f9 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -6141,7 +6141,7 @@ static void tg3_refclk_write(struct tg3 *tp, u64 newval) static inline void tg3_full_lock(struct tg3 *tp, int irq_sync); static inline void tg3_full_unlock(struct tg3 *tp); -static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) +static int tg3_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info) { struct tg3 *tp = netdev_priv(dev); diff --git a/drivers/net/ethernet/brocade/bna/bna_types.h b/drivers/net/ethernet/brocade/bna/bna_types.h index a5ebd7110e07..986f43d27711 100644 --- a/drivers/net/ethernet/brocade/bna/bna_types.h +++ b/drivers/net/ethernet/brocade/bna/bna_types.h @@ -416,7 +416,7 @@ struct bna_ib { /* Tx object */ /* Tx datapath control structure */ -#define BNA_Q_NAME_SIZE 16 +#define BNA_Q_NAME_SIZE (IFNAMSIZ + 6) struct bna_tcb { /* Fast path */ void **sw_qpt; diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c index fe121d36112d..ece6f3b48327 100644 --- a/drivers/net/ethernet/brocade/bna/bnad.c +++ b/drivers/net/ethernet/brocade/bna/bnad.c @@ -1534,8 +1534,9 @@ bnad_tx_msix_register(struct bnad *bnad, struct bnad_tx_info *tx_info, for (i = 0; i < num_txqs; i++) { vector_num = tx_info->tcb[i]->intr_vector; - sprintf(tx_info->tcb[i]->name, "%s TXQ %d", bnad->netdev->name, - tx_id + tx_info->tcb[i]->id); + snprintf(tx_info->tcb[i]->name, BNA_Q_NAME_SIZE, "%s TXQ %d", + bnad->netdev->name, + tx_id + tx_info->tcb[i]->id); err = request_irq(bnad->msix_table[vector_num].vector, (irq_handler_t)bnad_msix_tx, 0, tx_info->tcb[i]->name, @@ -1585,9 +1586,9 @@ bnad_rx_msix_register(struct bnad *bnad, struct bnad_rx_info *rx_info, for (i = 0; i < num_rxps; i++) { vector_num = rx_info->rx_ctrl[i].ccb->intr_vector; - sprintf(rx_info->rx_ctrl[i].ccb->name, "%s CQ %d", - bnad->netdev->name, - rx_id + rx_info->rx_ctrl[i].ccb->id); + snprintf(rx_info->rx_ctrl[i].ccb->name, BNA_Q_NAME_SIZE, + "%s CQ %d", bnad->netdev->name, + rx_id + rx_info->rx_ctrl[i].ccb->id); err = request_irq(bnad->msix_table[vector_num].vector, (irq_handler_t)bnad_msix_rx, 0, rx_info->rx_ctrl[i].ccb->name, diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index aa5700ac9c00..ea71612f6b36 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -645,6 +645,10 @@ #define GEM_T2OFST_OFFSET 0 /* offset value */ #define GEM_T2OFST_SIZE 7 +/* Bitfields in queue pointer registers */ +#define MACB_QUEUE_DISABLE_OFFSET 0 /* disable queue */ +#define MACB_QUEUE_DISABLE_SIZE 1 + /* Offset for screener type 2 compare values (T2CMPOFST). * Note the offset is applied after the specified point, * e.g. GEM_T2COMPOFST_ETYPE denotes the EtherType field, so an offset @@ -733,6 +737,7 @@ #define MACB_CAPS_NEEDS_RSTONUBR 0x00000100 #define MACB_CAPS_MIIONRGMII 0x00000200 #define MACB_CAPS_NEED_TSUCLK 0x00000400 +#define MACB_CAPS_QUEUE_DISABLE 0x00000800 #define MACB_CAPS_PCS 0x01000000 #define MACB_CAPS_HIGH_SPEED 0x02000000 #define MACB_CAPS_CLK_HW_CHG 0x04000000 @@ -1163,7 +1168,7 @@ struct macb_ptp_info { s32 (*get_ptp_max_adj)(void); unsigned int (*get_tsu_rate)(struct macb *bp); int (*get_ts_info)(struct net_device *dev, - struct ethtool_ts_info *info); + struct kernel_ethtool_ts_info *info); int (*get_hwtst)(struct net_device *netdev, struct kernel_hwtstamp_config *tstamp_config); int (*set_hwtst)(struct net_device *netdev, @@ -1254,6 +1259,8 @@ struct macb { u32 (*macb_reg_readl)(struct macb *bp, int offset); void (*macb_reg_writel)(struct macb *bp, int offset, u32 value); + struct macb_dma_desc *rx_ring_tieoff; + dma_addr_t rx_ring_tieoff_dma; size_t rx_buffer_size; unsigned int rx_ring_size; @@ -1299,6 +1306,7 @@ struct macb { unsigned int jumbo_max_len; u32 wol; + u32 wolopts; /* holds value of rx watermark value for pbuf_rxcutthru register */ u32 rx_watermark; diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 241ce9a2fa99..11665be3a22c 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -38,6 +38,7 @@ #include #include #include +#include #include "macb.h" /* This structure is only used for MACB on SiFive FU540 devices */ @@ -84,8 +85,7 @@ struct sifive_fu540_macb_mgmt { #define GEM_MTU_MIN_SIZE ETH_MIN_MTU #define MACB_NETIF_LSO NETIF_F_TSO -#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) -#define MACB_WOL_ENABLED (0x1 << 1) +#define MACB_WOL_ENABLED BIT(0) #define HS_SPEED_10000M 4 #define MACB_SERDES_RATE_10G 1 @@ -2477,6 +2477,12 @@ static void macb_free_consistent(struct macb *bp) unsigned int q; int size; + if (bp->rx_ring_tieoff) { + dma_free_coherent(&bp->pdev->dev, macb_dma_desc_get_size(bp), + bp->rx_ring_tieoff, bp->rx_ring_tieoff_dma); + bp->rx_ring_tieoff = NULL; + } + bp->macbgem_ops.mog_free_rx_buffers(bp); for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { @@ -2568,6 +2574,16 @@ static int macb_alloc_consistent(struct macb *bp) if (bp->macbgem_ops.mog_alloc_rx_buffers(bp)) goto out_err; + /* Required for tie off descriptor for PM cases */ + if (!(bp->caps & MACB_CAPS_QUEUE_DISABLE)) { + bp->rx_ring_tieoff = dma_alloc_coherent(&bp->pdev->dev, + macb_dma_desc_get_size(bp), + &bp->rx_ring_tieoff_dma, + GFP_KERNEL); + if (!bp->rx_ring_tieoff) + goto out_err; + } + return 0; out_err: @@ -2575,6 +2591,19 @@ out_err: return -ENOMEM; } +static void macb_init_tieoff(struct macb *bp) +{ + struct macb_dma_desc *desc = bp->rx_ring_tieoff; + + if (bp->caps & MACB_CAPS_QUEUE_DISABLE) + return; + /* Setup a wrapping descriptor with no free slots + * (WRAP and USED) to tie off/disable unused RX queues. + */ + macb_set_addr(bp, desc, MACB_BIT(RX_WRAP) | MACB_BIT(RX_USED)); + desc->ctrl = 0; +} + static void gem_init_rings(struct macb *bp) { struct macb_queue *queue; @@ -2598,6 +2627,7 @@ static void gem_init_rings(struct macb *bp) gem_rx_refill(queue); } + macb_init_tieoff(bp); } static void macb_init_rings(struct macb *bp) @@ -2615,6 +2645,8 @@ static void macb_init_rings(struct macb *bp) bp->queues[0].tx_head = 0; bp->queues[0].tx_tail = 0; desc->ctrl |= MACB_BIT(TX_WRAP); + + macb_init_tieoff(bp); } static void macb_reset_hw(struct macb *bp) @@ -3246,13 +3278,11 @@ static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct macb *bp = netdev_priv(netdev); - if (bp->wol & MACB_WOL_HAS_MAGIC_PACKET) { - phylink_ethtool_get_wol(bp->phylink, wol); - wol->supported |= WAKE_MAGIC; + phylink_ethtool_get_wol(bp->phylink, wol); + wol->supported |= (WAKE_MAGIC | WAKE_ARP); - if (bp->wol & MACB_WOL_ENABLED) - wol->wolopts |= WAKE_MAGIC; - } + /* Add macb wolopts to phy wolopts */ + wol->wolopts |= bp->wolopts; } static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) @@ -3262,22 +3292,15 @@ static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) /* Pass the order to phylink layer */ ret = phylink_ethtool_set_wol(bp->phylink, wol); - /* Don't manage WoL on MAC if handled by the PHY - * or if there's a failure in talking to the PHY - */ - if (!ret || ret != -EOPNOTSUPP) + /* Don't manage WoL on MAC, if PHY set_wol() fails */ + if (ret && ret != -EOPNOTSUPP) return ret; - if (!(bp->wol & MACB_WOL_HAS_MAGIC_PACKET) || - (wol->wolopts & ~WAKE_MAGIC)) - return -EOPNOTSUPP; + bp->wolopts = (wol->wolopts & WAKE_MAGIC) ? WAKE_MAGIC : 0; + bp->wolopts |= (wol->wolopts & WAKE_ARP) ? WAKE_ARP : 0; + bp->wol = (wol->wolopts) ? MACB_WOL_ENABLED : 0; - if (wol->wolopts & WAKE_MAGIC) - bp->wol |= MACB_WOL_ENABLED; - else - bp->wol &= ~MACB_WOL_ENABLED; - - device_set_wakeup_enable(&bp->pdev->dev, bp->wol & MACB_WOL_ENABLED); + device_set_wakeup_enable(&bp->pdev->dev, bp->wol); return 0; } @@ -3376,7 +3399,7 @@ static s32 gem_get_ptp_max_adj(void) } static int gem_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct macb *bp = netdev_priv(dev); @@ -3417,7 +3440,7 @@ static struct macb_ptp_info gem_ptp_info = { #endif static int macb_get_ts_info(struct net_device *netdev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct macb *bp = netdev_priv(netdev); @@ -4917,7 +4940,8 @@ static const struct macb_config sama7g5_emac_config = { static const struct macb_config versal_config = { .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO | - MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH | MACB_CAPS_NEED_TSUCLK, + MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH | MACB_CAPS_NEED_TSUCLK | + MACB_CAPS_QUEUE_DISABLE, .dma_burst_length = 16, .clk_init = macb_clk_init, .init = init_reset_optional, @@ -5053,9 +5077,7 @@ static int macb_probe(struct platform_device *pdev) bp->max_tx_length = GEM_MAX_TX_LEN; bp->wol = 0; - if (of_property_read_bool(np, "magic-packet")) - bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; - device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); + device_set_wakeup_capable(&pdev->dev, 1); bp->usrio = macb_config->usrio; @@ -5211,10 +5233,13 @@ static int __maybe_unused macb_suspend(struct device *dev) { struct net_device *netdev = dev_get_drvdata(dev); struct macb *bp = netdev_priv(netdev); + struct in_ifaddr *ifa = NULL; struct macb_queue *queue; + struct in_device *idev; unsigned long flags; unsigned int q; int err; + u32 tmp; if (!device_may_wakeup(&bp->dev->dev)) phy_exit(bp->sgmii_phy); @@ -5223,18 +5248,54 @@ static int __maybe_unused macb_suspend(struct device *dev) return 0; if (bp->wol & MACB_WOL_ENABLED) { + /* Check for IP address in WOL ARP mode */ + idev = __in_dev_get_rcu(bp->dev); + if (idev && idev->ifa_list) + ifa = rcu_access_pointer(idev->ifa_list); + if ((bp->wolopts & WAKE_ARP) && !ifa) { + netdev_err(netdev, "IP address not assigned as required by WoL walk ARP\n"); + return -EOPNOTSUPP; + } spin_lock_irqsave(&bp->lock, flags); - /* Flush all status bits */ - macb_writel(bp, TSR, -1); - macb_writel(bp, RSR, -1); + + /* Disable Tx and Rx engines before disabling the queues, + * this is mandatory as per the IP spec sheet + */ + tmp = macb_readl(bp, NCR); + macb_writel(bp, NCR, tmp & ~(MACB_BIT(TE) | MACB_BIT(RE))); for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { + /* Disable RX queues */ + if (bp->caps & MACB_CAPS_QUEUE_DISABLE) { + queue_writel(queue, RBQP, MACB_BIT(QUEUE_DISABLE)); + } else { + /* Tie off RX queues */ + queue_writel(queue, RBQP, + lower_32_bits(bp->rx_ring_tieoff_dma)); +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + queue_writel(queue, RBQPH, + upper_32_bits(bp->rx_ring_tieoff_dma)); +#endif + } /* Disable all interrupts */ queue_writel(queue, IDR, -1); queue_readl(queue, ISR); if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) queue_writel(queue, ISR, -1); } + /* Enable Receive engine */ + macb_writel(bp, NCR, tmp | MACB_BIT(RE)); + /* Flush all status bits */ + macb_writel(bp, TSR, -1); + macb_writel(bp, RSR, -1); + + tmp = (bp->wolopts & WAKE_MAGIC) ? MACB_BIT(MAG) : 0; + if (bp->wolopts & WAKE_ARP) { + tmp |= MACB_BIT(ARP); + /* write IP address into register */ + tmp |= MACB_BFEXT(IP, be32_to_cpu(ifa->ifa_local)); + } + /* Change interrupt handler and * Enable WoL IRQ on queue 0 */ @@ -5250,7 +5311,7 @@ static int __maybe_unused macb_suspend(struct device *dev) return err; } queue_writel(bp->queues, IER, GEM_BIT(WOL)); - gem_writel(bp, WOL, MACB_BIT(MAG)); + gem_writel(bp, WOL, tmp); } else { err = devm_request_irq(dev, bp->queues[0].irq, macb_wol_interrupt, IRQF_SHARED, netdev->name, bp->queues); @@ -5262,7 +5323,7 @@ static int __maybe_unused macb_suspend(struct device *dev) return err; } queue_writel(bp->queues, IER, MACB_BIT(WOL)); - macb_writel(bp, WOL, MACB_BIT(MAG)); + macb_writel(bp, WOL, tmp); } spin_unlock_irqrestore(&bp->lock, flags); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index d3e07b6ed5e1..5835965dbc32 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -2497,7 +2497,7 @@ ret_intrmod: } static int lio_get_ts_info(struct net_device *netdev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct lio *lio = GET_LIO(netdev); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 34f02a8ec2ca..1d79f6eaa41f 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -92,12 +92,6 @@ static int octeon_console_debug_enabled(u32 console) /* time to wait for possible in-flight requests in milliseconds */ #define WAIT_INFLIGHT_REQUEST msecs_to_jiffies(1000) -struct oct_link_status_resp { - u64 rh; - struct oct_link_info link_info; - u64 status; -}; - struct oct_timestamp_resp { u64 rh; u64 timestamp; diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c index 0d6ee30affb9..eef12fdd246d 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c @@ -30,11 +30,6 @@ #include "cn23xx_pf_device.h" #include "cn23xx_vf_device.h" -struct niclist { - struct list_head list; - void *ptr; -}; - struct __dispatch { struct list_head list; struct octeon_recv_info *rinfo; diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c index 34125b8cd935..6a04d2530176 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c @@ -836,7 +836,7 @@ static int nicvf_set_pauseparam(struct net_device *dev, } static int nicvf_get_ts_info(struct net_device *netdev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct nicvf *nic = netdev_priv(netdev); diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c index a317feb8decb..a40c266c37f2 100644 --- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c +++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c @@ -54,7 +54,7 @@ struct lmac { bool link_up; int lmacid; /* ID within BGX */ int lmacid_bd; /* ID on board */ - struct net_device netdev; + struct net_device *netdev; struct phy_device *phydev; unsigned int last_duplex; unsigned int last_link; @@ -590,10 +590,12 @@ static void bgx_sgmii_change_link_state(struct lmac *lmac) static void bgx_lmac_handler(struct net_device *netdev) { - struct lmac *lmac = container_of(netdev, struct lmac, netdev); struct phy_device *phydev; + struct lmac *lmac, **priv; int link_changed = 0; + priv = netdev_priv(netdev); + lmac = *priv; phydev = lmac->phydev; if (!phydev->link && lmac->last_link) @@ -1052,12 +1054,18 @@ static int phy_interface_mode(u8 lmac_type) static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) { - struct lmac *lmac; + struct lmac *lmac, **priv; u64 cfg; lmac = &bgx->lmac[lmacid]; lmac->bgx = bgx; + lmac->netdev = alloc_netdev_dummy(sizeof(struct lmac *)); + if (!lmac->netdev) + return -ENOMEM; + priv = netdev_priv(lmac->netdev); + *priv = lmac; + if ((lmac->lmac_type == BGX_MODE_SGMII) || (lmac->lmac_type == BGX_MODE_QSGMII) || (lmac->lmac_type == BGX_MODE_RGMII)) { @@ -1116,7 +1124,7 @@ static int bgx_lmac_enable(struct bgx *bgx, u8 lmacid) } lmac->phydev->dev_flags = 0; - if (phy_connect_direct(&lmac->netdev, lmac->phydev, + if (phy_connect_direct(lmac->netdev, lmac->phydev, bgx_lmac_handler, phy_interface_mode(lmac->lmac_type))) return -ENODEV; @@ -1183,6 +1191,7 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid) (lmac->lmac_type != BGX_MODE_10G_KR) && lmac->phydev) phy_disconnect(lmac->phydev); + free_netdev(lmac->netdev); lmac->phydev = NULL; } @@ -1414,7 +1423,7 @@ static acpi_status bgx_acpi_register_phy(acpi_handle handle, acpi_get_mac_address(dev, adev, bgx->lmac[bgx->acpi_lmac_idx].mac); - SET_NETDEV_DEV(&bgx->lmac[bgx->acpi_lmac_idx].netdev, dev); + SET_NETDEV_DEV(bgx->lmac[bgx->acpi_lmac_idx].netdev, dev); bgx->lmac[bgx->acpi_lmac_idx].lmacid = bgx->acpi_lmac_idx; bgx->acpi_lmac_idx++; /* move to next LMAC */ @@ -1483,7 +1492,7 @@ static int bgx_init_of_phy(struct bgx *bgx) of_get_mac_address(node, bgx->lmac[lmac].mac); - SET_NETDEV_DEV(&bgx->lmac[lmac].netdev, &bgx->pdev->dev); + SET_NETDEV_DEV(bgx->lmac[lmac].netdev, &bgx->pdev->dev); bgx->lmac[lmac].lmacid = lmac; phy_np = of_parse_phandle(node, "phy-handle", 0); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 47eecde36285..3d091947ae00 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -1550,7 +1550,7 @@ out_free_fw: return ret; } -static int get_ts_info(struct net_device *dev, struct ethtool_ts_info *ts_info) +static int get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *ts_info) { struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; diff --git a/drivers/net/ethernet/cirrus/mac89x0.c b/drivers/net/ethernet/cirrus/mac89x0.c index 887876f35f10..84b300fee2bb 100644 --- a/drivers/net/ethernet/cirrus/mac89x0.c +++ b/drivers/net/ethernet/cirrus/mac89x0.c @@ -554,6 +554,7 @@ static int set_mac_address(struct net_device *dev, void *addr) return 0; } +MODULE_DESCRIPTION("Macintosh CS89x0-based Ethernet driver"); MODULE_LICENSE("GPL"); static void mac89x0_device_remove(struct platform_device *pdev) diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c index 241906697019..f2f1055880b2 100644 --- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c +++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c @@ -599,7 +599,7 @@ static int enic_set_rxfh(struct net_device *netdev, } static int enic_get_ts_info(struct net_device *netdev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_RX_SOFTWARE | @@ -608,6 +608,28 @@ static int enic_get_ts_info(struct net_device *netdev, return 0; } +static void enic_get_channels(struct net_device *netdev, + struct ethtool_channels *channels) +{ + struct enic *enic = netdev_priv(netdev); + + switch (vnic_dev_get_intr_mode(enic->vdev)) { + case VNIC_DEV_INTR_MODE_MSIX: + channels->max_rx = ENIC_RQ_MAX; + channels->max_tx = ENIC_WQ_MAX; + channels->rx_count = enic->rq_count; + channels->tx_count = enic->wq_count; + break; + case VNIC_DEV_INTR_MODE_MSI: + case VNIC_DEV_INTR_MODE_INTX: + channels->max_combined = 1; + channels->combined_count = 1; + break; + default: + break; + } +} + static const struct ethtool_ops enic_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_USE_ADAPTIVE_RX | @@ -632,6 +654,7 @@ static const struct ethtool_ops enic_ethtool_ops = { .set_rxfh = enic_set_rxfh, .get_link_ksettings = enic_get_ksettings, .get_ts_info = enic_get_ts_info, + .get_channels = enic_get_channels, }; void enic_set_ethtool_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index 5f0c9e1771db..73e1c71c5092 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -79,7 +79,8 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); #define GMAC0_IRQ4_8 (GMAC0_MIB_INT_BIT | GMAC0_RX_OVERRUN_INT_BIT) #define GMAC_OFFLOAD_FEATURES (NETIF_F_SG | NETIF_F_IP_CSUM | \ - NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM) + NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM | \ + NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6) /** * struct gmac_queue_page - page buffer per-page info @@ -287,13 +288,13 @@ static void gmac_set_flow_control(struct net_device *netdev, bool tx, bool rx) spin_unlock_irqrestore(&port->config_lock, flags); } -static void gmac_speed_set(struct net_device *netdev) +static void gmac_adjust_link(struct net_device *netdev) { struct gemini_ethernet_port *port = netdev_priv(netdev); struct phy_device *phydev = netdev->phydev; union gmac_status status, old_status; - int pause_tx = 0; - int pause_rx = 0; + bool pause_tx = false; + bool pause_rx = false; status.bits32 = readl(port->gmac_base + GMAC_STATUS); old_status.bits32 = status.bits32; @@ -328,14 +329,9 @@ static void gmac_speed_set(struct net_device *netdev) } if (phydev->duplex == DUPLEX_FULL) { - u16 lcladv = phy_read(phydev, MII_ADVERTISE); - u16 rmtadv = phy_read(phydev, MII_LPA); - u8 cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv); - - if (cap & FLOW_CTRL_RX) - pause_rx = 1; - if (cap & FLOW_CTRL_TX) - pause_tx = 1; + phy_get_pause(phydev, &pause_tx, &pause_rx); + netdev_dbg(netdev, "set negotiated pause params pause TX = %s, pause RX = %s\n", + pause_tx ? "ON" : "OFF", pause_rx ? "ON" : "OFF"); } gmac_set_flow_control(netdev, pause_tx, pause_rx); @@ -366,7 +362,7 @@ static int gmac_setup_phy(struct net_device *netdev) phy = of_phy_get_and_connect(netdev, dev->of_node, - gmac_speed_set); + gmac_adjust_link); if (!phy) return -ENODEV; netdev->phydev = phy; @@ -1148,13 +1144,25 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb, skb_frag_t *skb_frag; dma_addr_t mapping; void *buffer; + u16 mss; int ret; - /* TODO: implement proper TSO using MTU in word3 */ word1 = skb->len; word3 = SOF_BIT; - if (skb->len >= ETH_FRAME_LEN) { + mss = skb_shinfo(skb)->gso_size; + if (mss) { + /* This means we are dealing with TCP and skb->len is the + * sum total of all the segments. The TSO will deal with + * chopping this up for us. + */ + /* The accelerator needs the full frame size here */ + mss += skb_tcp_all_headers(skb); + netdev_dbg(netdev, "segment offloading mss = %04x len=%04x\n", + mss, skb->len); + word1 |= TSS_MTU_ENABLE_BIT; + word3 |= mss; + } else if (skb->len >= ETH_FRAME_LEN) { /* Hardware offloaded checksumming isn't working on frames * bigger than 1514 bytes. A hypothesis about this is that the * checksum buffer is only 1518 bytes, so when the frames get @@ -1169,7 +1177,9 @@ static int gmac_map_tx_bufs(struct net_device *netdev, struct sk_buff *skb, return ret; } word1 |= TSS_BYPASS_BIT; - } else if (skb->ip_summed == CHECKSUM_PARTIAL) { + } + + if (skb->ip_summed == CHECKSUM_PARTIAL) { int tcp = 0; /* We do not switch off the checksumming on non TCP/UDP @@ -2116,6 +2126,19 @@ static void gmac_get_pauseparam(struct net_device *netdev, pparam->autoneg = true; } +static int gmac_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pparam) +{ + struct phy_device *phydev = netdev->phydev; + + if (!pparam->autoneg) + return -EOPNOTSUPP; + + phy_set_asym_pause(phydev, pparam->rx_pause, pparam->tx_pause); + + return 0; +} + static void gmac_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *rp, struct kernel_ethtool_ringparam *kernel_rp, @@ -2236,6 +2259,7 @@ static const struct ethtool_ops gmac_351x_ethtool_ops = { .set_link_ksettings = gmac_set_ksettings, .nway_reset = gmac_nway_reset, .get_pauseparam = gmac_get_pauseparam, + .set_pauseparam = gmac_set_pauseparam, .get_ringparam = gmac_get_ringparam, .set_ringparam = gmac_set_ringparam, .get_coalesce = gmac_get_coalesce, diff --git a/drivers/net/ethernet/engleder/tsnep_ethtool.c b/drivers/net/ethernet/engleder/tsnep_ethtool.c index 65ec1abc9442..9aa286ba1f00 100644 --- a/drivers/net/ethernet/engleder/tsnep_ethtool.c +++ b/drivers/net/ethernet/engleder/tsnep_ethtool.c @@ -305,7 +305,7 @@ static void tsnep_ethtool_get_channels(struct net_device *netdev, } static int tsnep_ethtool_get_ts_info(struct net_device *netdev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct tsnep_adapter *adapter = netdev_priv(netdev); diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index baa0b3c2ce6f..cfe6b57b1da0 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -371,6 +371,7 @@ static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type, void *type_data) { struct dpaa_priv *priv = netdev_priv(net_dev); + int num_txqs_per_tc = dpaa_num_txqs_per_tc(); struct tc_mqprio_qopt *mqprio = type_data; u8 num_tc; int i; @@ -398,12 +399,12 @@ static int dpaa_setup_tc(struct net_device *net_dev, enum tc_setup_type type, netdev_set_num_tc(net_dev, num_tc); for (i = 0; i < num_tc; i++) - netdev_set_tc_queue(net_dev, i, DPAA_TC_TXQ_NUM, - i * DPAA_TC_TXQ_NUM); + netdev_set_tc_queue(net_dev, i, num_txqs_per_tc, + i * num_txqs_per_tc); out: priv->num_tc = num_tc ? : 1; - netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM); + netif_set_real_num_tx_queues(net_dev, priv->num_tc * num_txqs_per_tc); return 0; } @@ -649,7 +650,7 @@ static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx) fq->wq = 6; break; case FQ_TYPE_TX: - switch (idx / DPAA_TC_TXQ_NUM) { + switch (idx / dpaa_num_txqs_per_tc()) { case 0: /* Low priority (best effort) */ fq->wq = 6; @@ -667,8 +668,8 @@ static inline void dpaa_assign_wq(struct dpaa_fq *fq, int idx) fq->wq = 0; break; default: - WARN(1, "Too many TX FQs: more than %d!\n", - DPAA_ETH_TXQ_NUM); + WARN(1, "Too many TX FQs: more than %zu!\n", + dpaa_max_num_txqs()); } break; default: @@ -740,7 +741,8 @@ static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list, port_fqs->rx_pcdq = &dpaa_fq[0]; - if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX_CONF_MQ)) + if (!dpaa_fq_alloc(dev, 0, dpaa_max_num_txqs(), list, + FQ_TYPE_TX_CONF_MQ)) goto fq_alloc_failed; dpaa_fq = dpaa_fq_alloc(dev, 0, 1, list, FQ_TYPE_TX_ERROR); @@ -755,7 +757,7 @@ static int dpaa_alloc_all_fqs(struct device *dev, struct list_head *list, port_fqs->tx_defq = &dpaa_fq[0]; - if (!dpaa_fq_alloc(dev, 0, DPAA_ETH_TXQ_NUM, list, FQ_TYPE_TX)) + if (!dpaa_fq_alloc(dev, 0, dpaa_max_num_txqs(), list, FQ_TYPE_TX)) goto fq_alloc_failed; return 0; @@ -931,14 +933,18 @@ static inline void dpaa_setup_egress(const struct dpaa_priv *priv, } } -static void dpaa_fq_setup(struct dpaa_priv *priv, - const struct dpaa_fq_cbs *fq_cbs, - struct fman_port *tx_port) +static int dpaa_fq_setup(struct dpaa_priv *priv, + const struct dpaa_fq_cbs *fq_cbs, + struct fman_port *tx_port) { int egress_cnt = 0, conf_cnt = 0, num_portals = 0, portal_cnt = 0, cpu; const cpumask_t *affine_cpus = qman_affine_cpus(); - u16 channels[NR_CPUS]; struct dpaa_fq *fq; + u16 *channels; + + channels = kcalloc(num_possible_cpus(), sizeof(u16), GFP_KERNEL); + if (!channels) + return -ENOMEM; for_each_cpu_and(cpu, affine_cpus, cpu_online_mask) channels[num_portals++] = qman_affine_channel(cpu); @@ -965,11 +971,7 @@ static void dpaa_fq_setup(struct dpaa_priv *priv, case FQ_TYPE_TX: dpaa_setup_egress(priv, fq, tx_port, &fq_cbs->egress_ern); - /* If we have more Tx queues than the number of cores, - * just ignore the extra ones. - */ - if (egress_cnt < DPAA_ETH_TXQ_NUM) - priv->egress_fqs[egress_cnt++] = &fq->fq_base; + priv->egress_fqs[egress_cnt++] = &fq->fq_base; break; case FQ_TYPE_TX_CONF_MQ: priv->conf_fqs[conf_cnt++] = &fq->fq_base; @@ -987,16 +989,9 @@ static void dpaa_fq_setup(struct dpaa_priv *priv, } } - /* Make sure all CPUs receive a corresponding Tx queue. */ - while (egress_cnt < DPAA_ETH_TXQ_NUM) { - list_for_each_entry(fq, &priv->dpaa_fq_list, list) { - if (fq->fq_type != FQ_TYPE_TX) - continue; - priv->egress_fqs[egress_cnt++] = &fq->fq_base; - if (egress_cnt == DPAA_ETH_TXQ_NUM) - break; - } - } + kfree(channels); + + return 0; } static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv, @@ -1004,7 +999,7 @@ static inline int dpaa_tx_fq_to_id(const struct dpaa_priv *priv, { int i; - for (i = 0; i < DPAA_ETH_TXQ_NUM; i++) + for (i = 0; i < dpaa_max_num_txqs(); i++) if (priv->egress_fqs[i] == tx_fq) return i; @@ -3324,7 +3319,7 @@ static int dpaa_eth_probe(struct platform_device *pdev) /* Allocate this early, so we can store relevant information in * the private area */ - net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA_ETH_TXQ_NUM); + net_dev = alloc_etherdev_mq(sizeof(*priv), dpaa_max_num_txqs()); if (!net_dev) { dev_err(dev, "alloc_etherdev_mq() failed\n"); return -ENOMEM; @@ -3339,6 +3334,22 @@ static int dpaa_eth_probe(struct platform_device *pdev) priv->msg_enable = netif_msg_init(debug, DPAA_MSG_DEFAULT); + priv->egress_fqs = devm_kcalloc(dev, dpaa_max_num_txqs(), + sizeof(*priv->egress_fqs), + GFP_KERNEL); + if (!priv->egress_fqs) { + err = -ENOMEM; + goto free_netdev; + } + + priv->conf_fqs = devm_kcalloc(dev, dpaa_max_num_txqs(), + sizeof(*priv->conf_fqs), + GFP_KERNEL); + if (!priv->conf_fqs) { + err = -ENOMEM; + goto free_netdev; + } + mac_dev = dpaa_mac_dev_get(pdev); if (IS_ERR(mac_dev)) { netdev_err(net_dev, "dpaa_mac_dev_get() failed\n"); @@ -3416,7 +3427,9 @@ static int dpaa_eth_probe(struct platform_device *pdev) */ dpaa_eth_add_channel(priv->channel, &pdev->dev); - dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]); + err = dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]); + if (err) + goto free_dpaa_bps; /* Create a congestion group for this netdev, with * dynamically-allocated CGR ID. @@ -3462,7 +3475,8 @@ static int dpaa_eth_probe(struct platform_device *pdev) } priv->num_tc = 1; - netif_set_real_num_tx_queues(net_dev, priv->num_tc * DPAA_TC_TXQ_NUM); + netif_set_real_num_tx_queues(net_dev, + priv->num_tc * dpaa_num_txqs_per_tc()); /* Initialize NAPI */ err = dpaa_napi_add(net_dev); diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h index ac3c8ed57bbe..7ed659eb08de 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h @@ -18,10 +18,6 @@ /* Number of prioritised traffic classes */ #define DPAA_TC_NUM 4 -/* Number of Tx queues per traffic class */ -#define DPAA_TC_TXQ_NUM NR_CPUS -/* Total number of Tx queues */ -#define DPAA_ETH_TXQ_NUM (DPAA_TC_NUM * DPAA_TC_TXQ_NUM) /* More detailed FQ types - used for fine-grained WQ assignments */ enum dpaa_fq_type { @@ -142,8 +138,8 @@ struct dpaa_priv { struct mac_device *mac_dev; struct device *rx_dma_dev; struct device *tx_dma_dev; - struct qman_fq *egress_fqs[DPAA_ETH_TXQ_NUM]; - struct qman_fq *conf_fqs[DPAA_ETH_TXQ_NUM]; + struct qman_fq **egress_fqs; + struct qman_fq **conf_fqs; u16 channel; struct list_head dpaa_fq_list; @@ -185,4 +181,16 @@ extern const struct ethtool_ops dpaa_ethtool_ops; /* from dpaa_eth_sysfs.c */ void dpaa_eth_sysfs_remove(struct device *dev); void dpaa_eth_sysfs_init(struct device *dev); + +static inline size_t dpaa_num_txqs_per_tc(void) +{ + return num_possible_cpus(); +} + +/* Total number of Tx queues */ +static inline size_t dpaa_max_num_txqs(void) +{ + return DPAA_TC_NUM * dpaa_num_txqs_per_tc(); +} + #endif /* __DPAA_H */ diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c index 4fee74c024bd..aad470e9caea 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c @@ -35,7 +35,6 @@ static ssize_t dpaa_eth_show_fqids(struct device *dev, u32 last_fqid = 0; ssize_t bytes = 0; char *str; - int i = 0; list_for_each_entry_safe(fq, tmp, &priv->dpaa_fq_list, list) { switch (fq->fq_type) { @@ -85,7 +84,6 @@ static ssize_t dpaa_eth_show_fqids(struct device *dev, prev = fq; prevstr = str; - i++; } if (prev) { diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c index 5bd0b36d1feb..b0060cf96090 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c @@ -394,7 +394,7 @@ static int dpaa_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) } static int dpaa_get_ts_info(struct net_device *net_dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct device *dev = net_dev->dev.parent; struct device_node *mac_node = dev->of_node; @@ -457,12 +457,16 @@ static int dpaa_set_coalesce(struct net_device *dev, struct netlink_ext_ack *extack) { const cpumask_t *cpus = qman_affine_cpus(); - bool needs_revert[NR_CPUS] = {false}; struct qman_portal *portal; u32 period, prev_period; u8 thresh, prev_thresh; + bool *needs_revert; int cpu, res; + needs_revert = kcalloc(num_possible_cpus(), sizeof(bool), GFP_KERNEL); + if (!needs_revert) + return -ENOMEM; + period = c->rx_coalesce_usecs; thresh = c->rx_max_coalesced_frames; @@ -485,6 +489,8 @@ static int dpaa_set_coalesce(struct net_device *dev, needs_revert[cpu] = true; } + kfree(needs_revert); + return 0; revert_values: @@ -498,6 +504,8 @@ revert_values: qman_dqrr_set_ithresh(portal, prev_thresh); } + kfree(needs_revert); + return res; } diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c index e80e9388c71f..7f476519b7ad 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c @@ -794,7 +794,7 @@ int dpaa2_phc_index = -1; EXPORT_SYMBOL(dpaa2_phc_index); static int dpaa2_eth_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { if (!dpaa2_ptp) return ethtool_op_get_ts_info(dev, info); diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c index f7753ea5b57e..5e684b23c5f5 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c @@ -841,7 +841,7 @@ static int enetc_set_coalesce(struct net_device *ndev, } static int enetc_get_ts_info(struct net_device *ndev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { int *phc_idx; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 881ece735dcf..a923cb95cdc6 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1361,6 +1361,12 @@ fec_stop(struct net_device *ndev) writel(FEC_ECR_ETHEREN, fep->hwp + FEC_ECNTRL); writel(rmii_mode, fep->hwp + FEC_R_CNTRL); } + + if (fep->bufdesc_ex) { + val = readl(fep->hwp + FEC_ECNTRL); + val |= FEC_ECR_EN1588; + writel(val, fep->hwp + FEC_ECNTRL); + } } static void @@ -2762,7 +2768,7 @@ static void fec_enet_get_regs(struct net_device *ndev, } static int fec_enet_get_ts_info(struct net_device *ndev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct fec_enet_private *fep = netdev_priv(ndev); diff --git a/drivers/net/ethernet/freescale/fman/fman_memac.c b/drivers/net/ethernet/freescale/fman/fman_memac.c index 92b8f4ab26f1..796e6f4e583d 100644 --- a/drivers/net/ethernet/freescale/fman/fman_memac.c +++ b/drivers/net/ethernet/freescale/fman/fman_memac.c @@ -1066,7 +1066,6 @@ int memac_initialization(struct mac_device *mac_dev, struct fman_mac_params *params) { int err; - struct device_node *fixed; struct phylink_pcs *pcs; struct fman_mac *memac; unsigned long capabilities; @@ -1222,18 +1221,15 @@ int memac_initialization(struct mac_device *mac_dev, memac->rgmii_no_half_duplex = true; /* Most boards should use MLO_AN_INBAND, but existing boards don't have - * a managed property. Default to MLO_AN_INBAND if nothing else is - * specified. We need to be careful and not enable this if we have a - * fixed link or if we are using MII or RGMII, since those - * configurations modes don't use in-band autonegotiation. + * a managed property. Default to MLO_AN_INBAND rather than MLO_AN_PHY. + * Phylink will allow this to be overriden by a fixed link. We need to + * be careful and not enable this if we are using MII or RGMII, since + * those configurations modes don't use in-band autonegotiation. */ - fixed = of_get_child_by_name(mac_node, "fixed-link"); - if (!fixed && !of_property_read_bool(mac_node, "fixed-link") && - !of_property_read_bool(mac_node, "managed") && + if (!of_property_read_bool(mac_node, "managed") && mac_dev->phy_if != PHY_INTERFACE_MODE_MII && !phy_interface_mode_is_rgmii(mac_dev->phy_if)) - mac_dev->phylink_config.ovr_an_inband = true; - of_node_put(fixed); + mac_dev->phylink_config.default_an_inband = true; err = memac_init(mac_dev->fman_mac); if (err < 0) diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 7a15b9245698..f581402ad740 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -1448,7 +1448,7 @@ static int gfar_get_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd, } static int gfar_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct gfar_private *priv = netdev_priv(dev); struct platform_device *ptp_dev; diff --git a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c index 4edd0adfc6c7..7f081e6e8c87 100644 --- a/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c +++ b/drivers/net/ethernet/fungible/funeth/funeth_ethtool.c @@ -1040,7 +1040,7 @@ static int fun_set_rxfh(struct net_device *netdev, } static int fun_get_ts_info(struct net_device *netdev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_RX_HARDWARE | diff --git a/drivers/net/ethernet/google/gve/Makefile b/drivers/net/ethernet/google/gve/Makefile index b9a6be76531b..9ed07080b38a 100644 --- a/drivers/net/ethernet/google/gve/Makefile +++ b/drivers/net/ethernet/google/gve/Makefile @@ -1,4 +1,4 @@ # Makefile for the Google virtual Ethernet (gve) driver obj-$(CONFIG_GVE) += gve.o -gve-objs := gve_main.o gve_tx.o gve_tx_dqo.o gve_rx.o gve_rx_dqo.o gve_ethtool.o gve_adminq.o gve_utils.o +gve-objs := gve_main.o gve_tx.o gve_tx_dqo.o gve_rx.o gve_rx_dqo.o gve_ethtool.o gve_adminq.o gve_utils.o gve_flow_rule.o diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h index ae1e21c9b0a5..84ac004d3953 100644 --- a/drivers/net/ethernet/google/gve/gve.h +++ b/drivers/net/ethernet/google/gve/gve.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: (GPL-2.0 OR MIT) * Google virtual Ethernet (gve) driver * - * Copyright (C) 2015-2021 Google, Inc. + * Copyright (C) 2015-2024 Google LLC */ #ifndef _GVE_H_ @@ -60,6 +60,11 @@ #define GVE_DEFAULT_RX_BUFFER_OFFSET 2048 +#define GVE_FLOW_RULES_CACHE_SIZE \ + (GVE_ADMINQ_BUFFER_SIZE / sizeof(struct gve_adminq_queried_flow_rule)) +#define GVE_FLOW_RULE_IDS_CACHE_SIZE \ + (GVE_ADMINQ_BUFFER_SIZE / sizeof(((struct gve_adminq_queried_flow_rule *)0)->location)) + #define GVE_XDP_ACTIONS 5 #define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182 @@ -678,6 +683,39 @@ enum gve_queue_format { GVE_DQO_QPL_FORMAT = 0x4, }; +struct gve_flow_spec { + __be32 src_ip[4]; + __be32 dst_ip[4]; + union { + struct { + __be16 src_port; + __be16 dst_port; + }; + __be32 spi; + }; + union { + u8 tos; + u8 tclass; + }; +}; + +struct gve_flow_rule { + u32 location; + u16 flow_type; + u16 action; + struct gve_flow_spec key; + struct gve_flow_spec mask; +}; + +struct gve_flow_rules_cache { + bool rules_cache_synced; /* False if the driver's rules_cache is outdated */ + struct gve_adminq_queried_flow_rule *rules_cache; + __be32 *rule_ids_cache; + /* The total number of queried rules that stored in the caches */ + u32 rules_cache_num; + u32 rule_ids_cache_num; +}; + struct gve_priv { struct net_device *dev; struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */ @@ -724,6 +762,7 @@ struct gve_priv { union gve_adminq_command *adminq; dma_addr_t adminq_bus_addr; struct dma_pool *adminq_pool; + struct mutex adminq_lock; /* Protects adminq command execution */ u32 adminq_mask; /* masks prod_cnt to adminq size */ u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */ u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */ @@ -743,6 +782,8 @@ struct gve_priv { u32 adminq_report_link_speed_cnt; u32 adminq_get_ptype_map_cnt; u32 adminq_verify_driver_compatibility_cnt; + u32 adminq_query_flow_rules_cnt; + u32 adminq_cfg_flow_rule_cnt; /* Global stats */ u32 interface_up_cnt; /* count of times interface turned up since last reset */ @@ -785,6 +826,11 @@ struct gve_priv { u16 header_buf_size; /* device configured, header-split supported if non-zero */ bool header_split_enabled; /* True if the header split is enabled by the user */ + + u32 max_flow_rules; + u32 num_flow_rules; + + struct gve_flow_rules_cache flow_rules_cache; }; enum gve_service_task_flags_bit { @@ -1124,6 +1170,12 @@ int gve_adjust_config(struct gve_priv *priv, int gve_adjust_queues(struct gve_priv *priv, struct gve_queue_config new_rx_config, struct gve_queue_config new_tx_config); +/* flow steering rule */ +int gve_get_flow_rule_entry(struct gve_priv *priv, struct ethtool_rxnfc *cmd); +int gve_get_flow_rule_ids(struct gve_priv *priv, struct ethtool_rxnfc *cmd, u32 *rule_locs); +int gve_add_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd); +int gve_del_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd); +int gve_flow_rules_reset(struct gve_priv *priv); /* report stats handling */ void gve_handle_report_stats(struct gve_priv *priv); /* exported by ethtool.c */ diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c index 8ca0def176ef..c5bbc1b7524e 100644 --- a/drivers/net/ethernet/google/gve/gve_adminq.c +++ b/drivers/net/ethernet/google/gve/gve_adminq.c @@ -44,6 +44,7 @@ void gve_parse_device_option(struct gve_priv *priv, struct gve_device_option_jumbo_frames **dev_op_jumbo_frames, struct gve_device_option_dqo_qpl **dev_op_dqo_qpl, struct gve_device_option_buffer_sizes **dev_op_buffer_sizes, + struct gve_device_option_flow_steering **dev_op_flow_steering, struct gve_device_option_modify_ring **dev_op_modify_ring) { u32 req_feat_mask = be32_to_cpu(option->required_features_mask); @@ -189,6 +190,23 @@ void gve_parse_device_option(struct gve_priv *priv, if (option_length == GVE_DEVICE_OPTION_NO_MIN_RING_SIZE) priv->default_min_ring_size = true; break; + case GVE_DEV_OPT_ID_FLOW_STEERING: + if (option_length < sizeof(**dev_op_flow_steering) || + req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING) { + dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT, + "Flow Steering", + (int)sizeof(**dev_op_flow_steering), + GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING, + option_length, req_feat_mask); + break; + } + + if (option_length > sizeof(**dev_op_flow_steering)) + dev_warn(&priv->pdev->dev, + GVE_DEVICE_OPTION_TOO_BIG_FMT, + "Flow Steering"); + *dev_op_flow_steering = (void *)(option + 1); + break; default: /* If we don't recognize the option just continue * without doing anything. @@ -208,6 +226,7 @@ gve_process_device_options(struct gve_priv *priv, struct gve_device_option_jumbo_frames **dev_op_jumbo_frames, struct gve_device_option_dqo_qpl **dev_op_dqo_qpl, struct gve_device_option_buffer_sizes **dev_op_buffer_sizes, + struct gve_device_option_flow_steering **dev_op_flow_steering, struct gve_device_option_modify_ring **dev_op_modify_ring) { const int num_options = be16_to_cpu(descriptor->num_device_options); @@ -230,7 +249,7 @@ gve_process_device_options(struct gve_priv *priv, dev_op_gqi_rda, dev_op_gqi_qpl, dev_op_dqo_rda, dev_op_jumbo_frames, dev_op_dqo_qpl, dev_op_buffer_sizes, - dev_op_modify_ring); + dev_op_flow_steering, dev_op_modify_ring); dev_opt = next_opt; } @@ -268,6 +287,8 @@ int gve_adminq_alloc(struct device *dev, struct gve_priv *priv) priv->adminq_report_stats_cnt = 0; priv->adminq_report_link_speed_cnt = 0; priv->adminq_get_ptype_map_cnt = 0; + priv->adminq_query_flow_rules_cnt = 0; + priv->adminq_cfg_flow_rule_cnt = 0; /* Setup Admin queue with the device */ if (priv->pdev->revision < 0x1) { @@ -284,6 +305,7 @@ int gve_adminq_alloc(struct device *dev, struct gve_priv *priv) &priv->reg_bar0->adminq_base_address_lo); iowrite32be(GVE_DRIVER_STATUS_RUN_MASK, &priv->reg_bar0->driver_status); } + mutex_init(&priv->adminq_lock); gve_set_admin_queue_ok(priv); return 0; } @@ -460,6 +482,8 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv, memcpy(cmd, cmd_orig, sizeof(*cmd_orig)); opcode = be32_to_cpu(READ_ONCE(cmd->opcode)); + if (opcode == GVE_ADMINQ_EXTENDED_COMMAND) + opcode = be32_to_cpu(cmd->extended_command.inner_opcode); switch (opcode) { case GVE_ADMINQ_DESCRIBE_DEVICE: @@ -504,6 +528,12 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv, case GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY: priv->adminq_verify_driver_compatibility_cnt++; break; + case GVE_ADMINQ_QUERY_FLOW_RULES: + priv->adminq_query_flow_rules_cnt++; + break; + case GVE_ADMINQ_CONFIGURE_FLOW_RULE: + priv->adminq_cfg_flow_rule_cnt++; + break; default: dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode); } @@ -511,28 +541,58 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv, return 0; } -/* This function is not threadsafe - the caller is responsible for any - * necessary locks. - * The caller is also responsible for making sure there are no commands - * waiting to be executed. - */ static int gve_adminq_execute_cmd(struct gve_priv *priv, union gve_adminq_command *cmd_orig) { u32 tail, head; int err; + mutex_lock(&priv->adminq_lock); tail = ioread32be(&priv->reg_bar0->adminq_event_counter); head = priv->adminq_prod_cnt; - if (tail != head) - // This is not a valid path - return -EINVAL; + if (tail != head) { + err = -EINVAL; + goto out; + } err = gve_adminq_issue_cmd(priv, cmd_orig); if (err) - return err; + goto out; - return gve_adminq_kick_and_wait(priv); + err = gve_adminq_kick_and_wait(priv); + +out: + mutex_unlock(&priv->adminq_lock); + return err; +} + +static int gve_adminq_execute_extended_cmd(struct gve_priv *priv, u32 opcode, + size_t cmd_size, void *cmd_orig) +{ + union gve_adminq_command cmd; + dma_addr_t inner_cmd_bus; + void *inner_cmd; + int err; + + inner_cmd = dma_alloc_coherent(&priv->pdev->dev, cmd_size, + &inner_cmd_bus, GFP_KERNEL); + if (!inner_cmd) + return -ENOMEM; + + memcpy(inner_cmd, cmd_orig, cmd_size); + + memset(&cmd, 0, sizeof(cmd)); + cmd.opcode = cpu_to_be32(GVE_ADMINQ_EXTENDED_COMMAND); + cmd.extended_command = (struct gve_adminq_extended_command) { + .inner_opcode = cpu_to_be32(opcode), + .inner_length = cpu_to_be32(cmd_size), + .inner_command_addr = cpu_to_be64(inner_cmd_bus), + }; + + err = gve_adminq_execute_cmd(priv, &cmd); + + dma_free_coherent(&priv->pdev->dev, cmd_size, inner_cmd, inner_cmd_bus); + return err; } /* The device specifies that the management vector can either be the first irq @@ -805,6 +865,8 @@ static void gve_enable_supported_features(struct gve_priv *priv, *dev_op_dqo_qpl, const struct gve_device_option_buffer_sizes *dev_op_buffer_sizes, + const struct gve_device_option_flow_steering + *dev_op_flow_steering, const struct gve_device_option_modify_ring *dev_op_modify_ring) { @@ -857,10 +919,23 @@ static void gve_enable_supported_features(struct gve_priv *priv, priv->min_tx_desc_cnt = be16_to_cpu(dev_op_modify_ring->min_tx_ring_size); } } + + if (dev_op_flow_steering && + (supported_features_mask & GVE_SUP_FLOW_STEERING_MASK)) { + if (dev_op_flow_steering->max_flow_rules) { + priv->max_flow_rules = + be32_to_cpu(dev_op_flow_steering->max_flow_rules); + priv->dev->hw_features |= NETIF_F_NTUPLE; + dev_info(&priv->pdev->dev, + "FLOW STEERING device option enabled with max rule limit of %u.\n", + priv->max_flow_rules); + } + } } int gve_adminq_describe_device(struct gve_priv *priv) { + struct gve_device_option_flow_steering *dev_op_flow_steering = NULL; struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL; struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL; struct gve_device_option_modify_ring *dev_op_modify_ring = NULL; @@ -897,6 +972,7 @@ int gve_adminq_describe_device(struct gve_priv *priv) &dev_op_gqi_qpl, &dev_op_dqo_rda, &dev_op_jumbo_frames, &dev_op_dqo_qpl, &dev_op_buffer_sizes, + &dev_op_flow_steering, &dev_op_modify_ring); if (err) goto free_device_descriptor; @@ -958,7 +1034,8 @@ int gve_adminq_describe_device(struct gve_priv *priv) gve_enable_supported_features(priv, supported_features_mask, dev_op_jumbo_frames, dev_op_dqo_qpl, - dev_op_buffer_sizes, dev_op_modify_ring); + dev_op_buffer_sizes, dev_op_flow_steering, + dev_op_modify_ring); free_device_descriptor: dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus); @@ -1121,3 +1198,130 @@ err: ptype_map_bus); return err; } + +static int +gve_adminq_configure_flow_rule(struct gve_priv *priv, + struct gve_adminq_configure_flow_rule *flow_rule_cmd) +{ + int err = gve_adminq_execute_extended_cmd(priv, + GVE_ADMINQ_CONFIGURE_FLOW_RULE, + sizeof(struct gve_adminq_configure_flow_rule), + flow_rule_cmd); + + if (err) { + dev_err(&priv->pdev->dev, "Timeout to configure the flow rule, trigger reset"); + gve_reset(priv, true); + } else { + priv->flow_rules_cache.rules_cache_synced = false; + } + + return err; +} + +int gve_adminq_add_flow_rule(struct gve_priv *priv, struct gve_adminq_flow_rule *rule, u32 loc) +{ + struct gve_adminq_configure_flow_rule flow_rule_cmd = { + .opcode = cpu_to_be16(GVE_FLOW_RULE_CFG_ADD), + .location = cpu_to_be32(loc), + .rule = *rule, + }; + + return gve_adminq_configure_flow_rule(priv, &flow_rule_cmd); +} + +int gve_adminq_del_flow_rule(struct gve_priv *priv, u32 loc) +{ + struct gve_adminq_configure_flow_rule flow_rule_cmd = { + .opcode = cpu_to_be16(GVE_FLOW_RULE_CFG_DEL), + .location = cpu_to_be32(loc), + }; + + return gve_adminq_configure_flow_rule(priv, &flow_rule_cmd); +} + +int gve_adminq_reset_flow_rules(struct gve_priv *priv) +{ + struct gve_adminq_configure_flow_rule flow_rule_cmd = { + .opcode = cpu_to_be16(GVE_FLOW_RULE_CFG_RESET), + }; + + return gve_adminq_configure_flow_rule(priv, &flow_rule_cmd); +} + +/* In the dma memory that the driver allocated for the device to query the flow rules, the device + * will first write it with a struct of gve_query_flow_rules_descriptor. Next to it, the device + * will write an array of rules or rule ids with the count that specified in the descriptor. + * For GVE_FLOW_RULE_QUERY_STATS, the device will only write the descriptor. + */ +static int gve_adminq_process_flow_rules_query(struct gve_priv *priv, u16 query_opcode, + struct gve_query_flow_rules_descriptor *descriptor) +{ + struct gve_flow_rules_cache *flow_rules_cache = &priv->flow_rules_cache; + u32 num_queried_rules, total_memory_len, rule_info_len; + void *rule_info; + + total_memory_len = be32_to_cpu(descriptor->total_length); + num_queried_rules = be32_to_cpu(descriptor->num_queried_rules); + rule_info = (void *)(descriptor + 1); + + switch (query_opcode) { + case GVE_FLOW_RULE_QUERY_RULES: + rule_info_len = num_queried_rules * sizeof(*flow_rules_cache->rules_cache); + if (sizeof(*descriptor) + rule_info_len != total_memory_len) { + dev_err(&priv->dev->dev, "flow rules query is out of memory.\n"); + return -ENOMEM; + } + + memcpy(flow_rules_cache->rules_cache, rule_info, rule_info_len); + flow_rules_cache->rules_cache_num = num_queried_rules; + break; + case GVE_FLOW_RULE_QUERY_IDS: + rule_info_len = num_queried_rules * sizeof(*flow_rules_cache->rule_ids_cache); + if (sizeof(*descriptor) + rule_info_len != total_memory_len) { + dev_err(&priv->dev->dev, "flow rule ids query is out of memory.\n"); + return -ENOMEM; + } + + memcpy(flow_rules_cache->rule_ids_cache, rule_info, rule_info_len); + flow_rules_cache->rule_ids_cache_num = num_queried_rules; + break; + case GVE_FLOW_RULE_QUERY_STATS: + priv->num_flow_rules = be32_to_cpu(descriptor->num_flow_rules); + priv->max_flow_rules = be32_to_cpu(descriptor->max_flow_rules); + return 0; + default: + return -EINVAL; + } + + return 0; +} + +int gve_adminq_query_flow_rules(struct gve_priv *priv, u16 query_opcode, u32 starting_loc) +{ + struct gve_query_flow_rules_descriptor *descriptor; + union gve_adminq_command cmd; + dma_addr_t descriptor_bus; + int err = 0; + + memset(&cmd, 0, sizeof(cmd)); + descriptor = dma_pool_alloc(priv->adminq_pool, GFP_KERNEL, &descriptor_bus); + if (!descriptor) + return -ENOMEM; + + cmd.opcode = cpu_to_be32(GVE_ADMINQ_QUERY_FLOW_RULES); + cmd.query_flow_rules = (struct gve_adminq_query_flow_rules) { + .opcode = cpu_to_be16(query_opcode), + .starting_rule_id = cpu_to_be32(starting_loc), + .available_length = cpu_to_be64(GVE_ADMINQ_BUFFER_SIZE), + .rule_descriptor_addr = cpu_to_be64(descriptor_bus), + }; + err = gve_adminq_execute_cmd(priv, &cmd); + if (err) + goto out; + + err = gve_adminq_process_flow_rules_query(priv, query_opcode, descriptor); + +out: + dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus); + return err; +} diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h index e64f0dbe744d..ed1370c9b197 100644 --- a/drivers/net/ethernet/google/gve/gve_adminq.h +++ b/drivers/net/ethernet/google/gve/gve_adminq.h @@ -25,6 +25,19 @@ enum gve_adminq_opcodes { GVE_ADMINQ_REPORT_LINK_SPEED = 0xD, GVE_ADMINQ_GET_PTYPE_MAP = 0xE, GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY = 0xF, + GVE_ADMINQ_QUERY_FLOW_RULES = 0x10, + + /* For commands that are larger than 56 bytes */ + GVE_ADMINQ_EXTENDED_COMMAND = 0xFF, +}; + +/* The normal adminq command is restricted to be 56 bytes at maximum. For the + * longer adminq command, it is wrapped by GVE_ADMINQ_EXTENDED_COMMAND with + * inner opcode of gve_adminq_extended_cmd_opcodes specified. The inner command + * is written in the dma memory allocated by GVE_ADMINQ_EXTENDED_COMMAND. + */ +enum gve_adminq_extended_cmd_opcodes { + GVE_ADMINQ_CONFIGURE_FLOW_RULE = 0x101, }; /* Admin queue status codes */ @@ -143,6 +156,14 @@ struct gve_device_option_modify_ring { static_assert(sizeof(struct gve_device_option_modify_ring) == 12); +struct gve_device_option_flow_steering { + __be32 supported_features_mask; + __be32 reserved; + __be32 max_flow_rules; +}; + +static_assert(sizeof(struct gve_device_option_flow_steering) == 12); + /* Terminology: * * RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA @@ -160,6 +181,7 @@ enum gve_dev_opt_id { GVE_DEV_OPT_ID_DQO_QPL = 0x7, GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8, GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa, + GVE_DEV_OPT_ID_FLOW_STEERING = 0xb, }; enum gve_dev_opt_req_feat_mask { @@ -171,12 +193,14 @@ enum gve_dev_opt_req_feat_mask { GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL = 0x0, GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES = 0x0, GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING = 0x0, + GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING = 0x0, }; enum gve_sup_feature_mask { GVE_SUP_MODIFY_RING_MASK = 1 << 0, GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2, GVE_SUP_BUFFER_SIZES_MASK = 1 << 4, + GVE_SUP_FLOW_STEERING_MASK = 1 << 5, }; #define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0 @@ -208,6 +232,14 @@ enum gve_driver_capbility { #define GVE_DRIVER_CAPABILITY_FLAGS3 0x0 #define GVE_DRIVER_CAPABILITY_FLAGS4 0x0 +struct gve_adminq_extended_command { + __be32 inner_opcode; + __be32 inner_length; + __be64 inner_command_addr; +}; + +static_assert(sizeof(struct gve_adminq_extended_command) == 16); + struct gve_driver_info { u8 os_type; /* 0x01 = Linux */ u8 driver_major; @@ -412,6 +444,71 @@ struct gve_adminq_get_ptype_map { __be64 ptype_map_addr; }; +/* Flow-steering related definitions */ +enum gve_adminq_flow_rule_cfg_opcode { + GVE_FLOW_RULE_CFG_ADD = 0, + GVE_FLOW_RULE_CFG_DEL = 1, + GVE_FLOW_RULE_CFG_RESET = 2, +}; + +enum gve_adminq_flow_rule_query_opcode { + GVE_FLOW_RULE_QUERY_RULES = 0, + GVE_FLOW_RULE_QUERY_IDS = 1, + GVE_FLOW_RULE_QUERY_STATS = 2, +}; + +enum gve_adminq_flow_type { + GVE_FLOW_TYPE_TCPV4, + GVE_FLOW_TYPE_UDPV4, + GVE_FLOW_TYPE_SCTPV4, + GVE_FLOW_TYPE_AHV4, + GVE_FLOW_TYPE_ESPV4, + GVE_FLOW_TYPE_TCPV6, + GVE_FLOW_TYPE_UDPV6, + GVE_FLOW_TYPE_SCTPV6, + GVE_FLOW_TYPE_AHV6, + GVE_FLOW_TYPE_ESPV6, +}; + +/* Flow-steering command */ +struct gve_adminq_flow_rule { + __be16 flow_type; + __be16 action; /* RX queue id */ + struct gve_flow_spec key; + struct gve_flow_spec mask; +}; + +struct gve_adminq_configure_flow_rule { + __be16 opcode; + u8 padding[2]; + struct gve_adminq_flow_rule rule; + __be32 location; +}; + +static_assert(sizeof(struct gve_adminq_configure_flow_rule) == 92); + +struct gve_query_flow_rules_descriptor { + __be32 num_flow_rules; + __be32 max_flow_rules; + __be32 num_queried_rules; + __be32 total_length; +}; + +struct gve_adminq_queried_flow_rule { + __be32 location; + struct gve_adminq_flow_rule flow_rule; +}; + +struct gve_adminq_query_flow_rules { + __be16 opcode; + u8 padding[2]; + __be32 starting_rule_id; + __be64 available_length; /* The dma memory length that the driver allocated */ + __be64 rule_descriptor_addr; /* The dma memory address */ +}; + +static_assert(sizeof(struct gve_adminq_query_flow_rules) == 24); + union gve_adminq_command { struct { __be32 opcode; @@ -432,6 +529,8 @@ union gve_adminq_command { struct gve_adminq_get_ptype_map get_ptype_map; struct gve_adminq_verify_driver_compatibility verify_driver_compatibility; + struct gve_adminq_query_flow_rules query_flow_rules; + struct gve_adminq_extended_command extended_command; }; }; u8 reserved[64]; @@ -465,6 +564,10 @@ int gve_adminq_verify_driver_compatibility(struct gve_priv *priv, u64 driver_info_len, dma_addr_t driver_info_addr); int gve_adminq_report_link_speed(struct gve_priv *priv); +int gve_adminq_add_flow_rule(struct gve_priv *priv, struct gve_adminq_flow_rule *rule, u32 loc); +int gve_adminq_del_flow_rule(struct gve_priv *priv, u32 loc); +int gve_adminq_reset_flow_rules(struct gve_priv *priv); +int gve_adminq_query_flow_rules(struct gve_priv *priv, u16 query_opcode, u32 starting_loc); struct gve_ptype_lut; int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv, diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c index fe1741d482b4..3480ff5c7ed6 100644 --- a/drivers/net/ethernet/google/gve/gve_ethtool.c +++ b/drivers/net/ethernet/google/gve/gve_ethtool.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0 OR MIT) /* Google virtual Ethernet (gve) driver * - * Copyright (C) 2015-2021 Google, Inc. + * Copyright (C) 2015-2024 Google LLC */ #include @@ -74,7 +74,8 @@ static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = { "adminq_create_tx_queue_cnt", "adminq_create_rx_queue_cnt", "adminq_destroy_tx_queue_cnt", "adminq_destroy_rx_queue_cnt", "adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt", - "adminq_report_stats_cnt", "adminq_report_link_speed_cnt", "adminq_get_ptype_map_cnt" + "adminq_report_stats_cnt", "adminq_report_link_speed_cnt", "adminq_get_ptype_map_cnt", + "adminq_query_flow_rules", "adminq_cfg_flow_rule", }; static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = { @@ -450,6 +451,8 @@ gve_get_ethtool_stats(struct net_device *netdev, data[i++] = priv->adminq_report_stats_cnt; data[i++] = priv->adminq_report_link_speed_cnt; data[i++] = priv->adminq_get_ptype_map_cnt; + data[i++] = priv->adminq_query_flow_rules_cnt; + data[i++] = priv->adminq_cfg_flow_rule_cnt; } static void gve_get_channels(struct net_device *netdev, @@ -772,6 +775,69 @@ static int gve_set_coalesce(struct net_device *netdev, return 0; } +static int gve_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) +{ + struct gve_priv *priv = netdev_priv(netdev); + int err = 0; + + if (!(netdev->features & NETIF_F_NTUPLE)) + return -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + err = gve_add_flow_rule(priv, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + err = gve_del_flow_rule(priv, cmd); + break; + case ETHTOOL_SRXFH: + err = -EOPNOTSUPP; + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int gve_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + struct gve_priv *priv = netdev_priv(netdev); + int err = 0; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = priv->rx_cfg.num_queues; + break; + case ETHTOOL_GRXCLSRLCNT: + if (!priv->max_flow_rules) + return -EOPNOTSUPP; + + err = gve_adminq_query_flow_rules(priv, GVE_FLOW_RULE_QUERY_STATS, 0); + if (err) + return err; + + cmd->rule_cnt = priv->num_flow_rules; + cmd->data = priv->max_flow_rules; + break; + case ETHTOOL_GRXCLSRULE: + err = gve_get_flow_rule_entry(priv, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + err = gve_get_flow_rule_ids(priv, cmd, (u32 *)rule_locs); + break; + case ETHTOOL_GRXFH: + err = -EOPNOTSUPP; + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + const struct ethtool_ops gve_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS, .supported_ring_params = ETHTOOL_RING_USE_TCP_DATA_SPLIT, @@ -783,6 +849,8 @@ const struct ethtool_ops gve_ethtool_ops = { .get_msglevel = gve_get_msglevel, .set_channels = gve_set_channels, .get_channels = gve_get_channels, + .set_rxnfc = gve_set_rxnfc, + .get_rxnfc = gve_get_rxnfc, .get_link = ethtool_op_get_link, .get_coalesce = gve_get_coalesce, .set_coalesce = gve_set_coalesce, diff --git a/drivers/net/ethernet/google/gve/gve_flow_rule.c b/drivers/net/ethernet/google/gve/gve_flow_rule.c new file mode 100644 index 000000000000..0bb8cd1876a3 --- /dev/null +++ b/drivers/net/ethernet/google/gve/gve_flow_rule.c @@ -0,0 +1,298 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* Google virtual Ethernet (gve) driver + * + * Copyright (C) 2015-2024 Google LLC + */ + +#include "gve.h" +#include "gve_adminq.h" + +static +int gve_fill_ethtool_flow_spec(struct ethtool_rx_flow_spec *fsp, + struct gve_adminq_queried_flow_rule *rule) +{ + struct gve_adminq_flow_rule *flow_rule = &rule->flow_rule; + static const u16 flow_type_lut[] = { + [GVE_FLOW_TYPE_TCPV4] = TCP_V4_FLOW, + [GVE_FLOW_TYPE_UDPV4] = UDP_V4_FLOW, + [GVE_FLOW_TYPE_SCTPV4] = SCTP_V4_FLOW, + [GVE_FLOW_TYPE_AHV4] = AH_V4_FLOW, + [GVE_FLOW_TYPE_ESPV4] = ESP_V4_FLOW, + [GVE_FLOW_TYPE_TCPV6] = TCP_V6_FLOW, + [GVE_FLOW_TYPE_UDPV6] = UDP_V6_FLOW, + [GVE_FLOW_TYPE_SCTPV6] = SCTP_V6_FLOW, + [GVE_FLOW_TYPE_AHV6] = AH_V6_FLOW, + [GVE_FLOW_TYPE_ESPV6] = ESP_V6_FLOW, + }; + + if (be16_to_cpu(flow_rule->flow_type) >= ARRAY_SIZE(flow_type_lut)) + return -EINVAL; + + fsp->flow_type = flow_type_lut[be16_to_cpu(flow_rule->flow_type)]; + + memset(&fsp->h_u, 0, sizeof(fsp->h_u)); + memset(&fsp->h_ext, 0, sizeof(fsp->h_ext)); + memset(&fsp->m_u, 0, sizeof(fsp->m_u)); + memset(&fsp->m_ext, 0, sizeof(fsp->m_ext)); + + switch (fsp->flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + fsp->h_u.tcp_ip4_spec.ip4src = flow_rule->key.src_ip[0]; + fsp->h_u.tcp_ip4_spec.ip4dst = flow_rule->key.dst_ip[0]; + fsp->h_u.tcp_ip4_spec.psrc = flow_rule->key.src_port; + fsp->h_u.tcp_ip4_spec.pdst = flow_rule->key.dst_port; + fsp->h_u.tcp_ip4_spec.tos = flow_rule->key.tos; + fsp->m_u.tcp_ip4_spec.ip4src = flow_rule->mask.src_ip[0]; + fsp->m_u.tcp_ip4_spec.ip4dst = flow_rule->mask.dst_ip[0]; + fsp->m_u.tcp_ip4_spec.psrc = flow_rule->mask.src_port; + fsp->m_u.tcp_ip4_spec.pdst = flow_rule->mask.dst_port; + fsp->m_u.tcp_ip4_spec.tos = flow_rule->mask.tos; + break; + case AH_V4_FLOW: + case ESP_V4_FLOW: + fsp->h_u.ah_ip4_spec.ip4src = flow_rule->key.src_ip[0]; + fsp->h_u.ah_ip4_spec.ip4dst = flow_rule->key.dst_ip[0]; + fsp->h_u.ah_ip4_spec.spi = flow_rule->key.spi; + fsp->h_u.ah_ip4_spec.tos = flow_rule->key.tos; + fsp->m_u.ah_ip4_spec.ip4src = flow_rule->mask.src_ip[0]; + fsp->m_u.ah_ip4_spec.ip4dst = flow_rule->mask.dst_ip[0]; + fsp->m_u.ah_ip4_spec.spi = flow_rule->mask.spi; + fsp->m_u.ah_ip4_spec.tos = flow_rule->mask.tos; + break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + memcpy(fsp->h_u.tcp_ip6_spec.ip6src, &flow_rule->key.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->h_u.tcp_ip6_spec.ip6dst, &flow_rule->key.dst_ip, + sizeof(struct in6_addr)); + fsp->h_u.tcp_ip6_spec.psrc = flow_rule->key.src_port; + fsp->h_u.tcp_ip6_spec.pdst = flow_rule->key.dst_port; + fsp->h_u.tcp_ip6_spec.tclass = flow_rule->key.tclass; + memcpy(fsp->m_u.tcp_ip6_spec.ip6src, &flow_rule->mask.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->m_u.tcp_ip6_spec.ip6dst, &flow_rule->mask.dst_ip, + sizeof(struct in6_addr)); + fsp->m_u.tcp_ip6_spec.psrc = flow_rule->mask.src_port; + fsp->m_u.tcp_ip6_spec.pdst = flow_rule->mask.dst_port; + fsp->m_u.tcp_ip6_spec.tclass = flow_rule->mask.tclass; + break; + case AH_V6_FLOW: + case ESP_V6_FLOW: + memcpy(fsp->h_u.ah_ip6_spec.ip6src, &flow_rule->key.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->h_u.ah_ip6_spec.ip6dst, &flow_rule->key.dst_ip, + sizeof(struct in6_addr)); + fsp->h_u.ah_ip6_spec.spi = flow_rule->key.spi; + fsp->h_u.ah_ip6_spec.tclass = flow_rule->key.tclass; + memcpy(fsp->m_u.ah_ip6_spec.ip6src, &flow_rule->mask.src_ip, + sizeof(struct in6_addr)); + memcpy(fsp->m_u.ah_ip6_spec.ip6dst, &flow_rule->mask.dst_ip, + sizeof(struct in6_addr)); + fsp->m_u.ah_ip6_spec.spi = flow_rule->mask.spi; + fsp->m_u.ah_ip6_spec.tclass = flow_rule->mask.tclass; + break; + default: + return -EINVAL; + } + + fsp->ring_cookie = be16_to_cpu(flow_rule->action); + + return 0; +} + +static int gve_generate_flow_rule(struct gve_priv *priv, struct ethtool_rx_flow_spec *fsp, + struct gve_adminq_flow_rule *rule) +{ + static const u16 flow_type_lut[] = { + [TCP_V4_FLOW] = GVE_FLOW_TYPE_TCPV4, + [UDP_V4_FLOW] = GVE_FLOW_TYPE_UDPV4, + [SCTP_V4_FLOW] = GVE_FLOW_TYPE_SCTPV4, + [AH_V4_FLOW] = GVE_FLOW_TYPE_AHV4, + [ESP_V4_FLOW] = GVE_FLOW_TYPE_ESPV4, + [TCP_V6_FLOW] = GVE_FLOW_TYPE_TCPV6, + [UDP_V6_FLOW] = GVE_FLOW_TYPE_UDPV6, + [SCTP_V6_FLOW] = GVE_FLOW_TYPE_SCTPV6, + [AH_V6_FLOW] = GVE_FLOW_TYPE_AHV6, + [ESP_V6_FLOW] = GVE_FLOW_TYPE_ESPV6, + }; + u32 flow_type; + + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) + return -EOPNOTSUPP; + + if (fsp->ring_cookie >= priv->rx_cfg.num_queues) + return -EINVAL; + + rule->action = cpu_to_be16(fsp->ring_cookie); + + flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS); + if (!flow_type || flow_type >= ARRAY_SIZE(flow_type_lut)) + return -EINVAL; + + rule->flow_type = cpu_to_be16(flow_type_lut[flow_type]); + + switch (flow_type) { + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + rule->key.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; + rule->key.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; + rule->key.src_port = fsp->h_u.tcp_ip4_spec.psrc; + rule->key.dst_port = fsp->h_u.tcp_ip4_spec.pdst; + rule->mask.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src; + rule->mask.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst; + rule->mask.src_port = fsp->m_u.tcp_ip4_spec.psrc; + rule->mask.dst_port = fsp->m_u.tcp_ip4_spec.pdst; + break; + case AH_V4_FLOW: + case ESP_V4_FLOW: + rule->key.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; + rule->key.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; + rule->key.spi = fsp->h_u.ah_ip4_spec.spi; + rule->mask.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src; + rule->mask.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst; + rule->mask.spi = fsp->m_u.ah_ip4_spec.spi; + break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + memcpy(&rule->key.src_ip, fsp->h_u.tcp_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&rule->key.dst_ip, fsp->h_u.tcp_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + rule->key.src_port = fsp->h_u.tcp_ip6_spec.psrc; + rule->key.dst_port = fsp->h_u.tcp_ip6_spec.pdst; + memcpy(&rule->mask.src_ip, fsp->m_u.tcp_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&rule->mask.dst_ip, fsp->m_u.tcp_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + rule->mask.src_port = fsp->m_u.tcp_ip6_spec.psrc; + rule->mask.dst_port = fsp->m_u.tcp_ip6_spec.pdst; + break; + case AH_V6_FLOW: + case ESP_V6_FLOW: + memcpy(&rule->key.src_ip, fsp->h_u.usr_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&rule->key.dst_ip, fsp->h_u.usr_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + rule->key.spi = fsp->h_u.ah_ip6_spec.spi; + memcpy(&rule->mask.src_ip, fsp->m_u.usr_ip6_spec.ip6src, + sizeof(struct in6_addr)); + memcpy(&rule->mask.dst_ip, fsp->m_u.usr_ip6_spec.ip6dst, + sizeof(struct in6_addr)); + rule->key.spi = fsp->h_u.ah_ip6_spec.spi; + break; + default: + /* not doing un-parsed flow types */ + return -EINVAL; + } + + return 0; +} + +int gve_get_flow_rule_entry(struct gve_priv *priv, struct ethtool_rxnfc *cmd) +{ + struct gve_adminq_queried_flow_rule *rules_cache = priv->flow_rules_cache.rules_cache; + struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; + u32 *cache_num = &priv->flow_rules_cache.rules_cache_num; + struct gve_adminq_queried_flow_rule *rule = NULL; + int err = 0; + u32 i; + + if (!priv->max_flow_rules) + return -EOPNOTSUPP; + + if (!priv->flow_rules_cache.rules_cache_synced || + fsp->location < be32_to_cpu(rules_cache[0].location) || + fsp->location > be32_to_cpu(rules_cache[*cache_num - 1].location)) { + err = gve_adminq_query_flow_rules(priv, GVE_FLOW_RULE_QUERY_RULES, fsp->location); + if (err) + return err; + + priv->flow_rules_cache.rules_cache_synced = true; + } + + for (i = 0; i < *cache_num; i++) { + if (fsp->location == be32_to_cpu(rules_cache[i].location)) { + rule = &rules_cache[i]; + break; + } + } + + if (!rule) + return -EINVAL; + + err = gve_fill_ethtool_flow_spec(fsp, rule); + + return err; +} + +int gve_get_flow_rule_ids(struct gve_priv *priv, struct ethtool_rxnfc *cmd, u32 *rule_locs) +{ + __be32 *rule_ids_cache = priv->flow_rules_cache.rule_ids_cache; + u32 *cache_num = &priv->flow_rules_cache.rule_ids_cache_num; + u32 starting_rule_id = 0; + u32 i = 0, j = 0; + int err = 0; + + if (!priv->max_flow_rules) + return -EOPNOTSUPP; + + do { + err = gve_adminq_query_flow_rules(priv, GVE_FLOW_RULE_QUERY_IDS, + starting_rule_id); + if (err) + return err; + + for (i = 0; i < *cache_num; i++) { + if (j >= cmd->rule_cnt) + return -EMSGSIZE; + + rule_locs[j++] = be32_to_cpu(rule_ids_cache[i]); + starting_rule_id = be32_to_cpu(rule_ids_cache[i]) + 1; + } + } while (*cache_num != 0); + cmd->data = priv->max_flow_rules; + + return err; +} + +int gve_add_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = &cmd->fs; + struct gve_adminq_flow_rule *rule = NULL; + int err; + + if (!priv->max_flow_rules) + return -EOPNOTSUPP; + + rule = kvzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return -ENOMEM; + + err = gve_generate_flow_rule(priv, fsp, rule); + if (err) + goto out; + + err = gve_adminq_add_flow_rule(priv, rule, fsp->location); + +out: + kvfree(rule); + if (err) + dev_err(&priv->pdev->dev, "Failed to add the flow rule: %u", fsp->location); + + return err; +} + +int gve_del_flow_rule(struct gve_priv *priv, struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = (struct ethtool_rx_flow_spec *)&cmd->fs; + + if (!priv->max_flow_rules) + return -EOPNOTSUPP; + + return gve_adminq_del_flow_rule(priv, fsp->location); +} diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index cabf7d4bcecb..9744b426940e 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: (GPL-2.0 OR MIT) /* Google virtual Ethernet (gve) driver * - * Copyright (C) 2015-2021 Google, Inc. + * Copyright (C) 2015-2024 Google LLC */ #include @@ -141,6 +141,49 @@ static void gve_get_stats(struct net_device *dev, struct rtnl_link_stats64 *s) } } +static int gve_alloc_flow_rule_caches(struct gve_priv *priv) +{ + struct gve_flow_rules_cache *flow_rules_cache = &priv->flow_rules_cache; + int err = 0; + + if (!priv->max_flow_rules) + return 0; + + flow_rules_cache->rules_cache = + kvcalloc(GVE_FLOW_RULES_CACHE_SIZE, sizeof(*flow_rules_cache->rules_cache), + GFP_KERNEL); + if (!flow_rules_cache->rules_cache) { + dev_err(&priv->pdev->dev, "Cannot alloc flow rules cache\n"); + return -ENOMEM; + } + + flow_rules_cache->rule_ids_cache = + kvcalloc(GVE_FLOW_RULE_IDS_CACHE_SIZE, sizeof(*flow_rules_cache->rule_ids_cache), + GFP_KERNEL); + if (!flow_rules_cache->rule_ids_cache) { + dev_err(&priv->pdev->dev, "Cannot alloc flow rule ids cache\n"); + err = -ENOMEM; + goto free_rules_cache; + } + + return 0; + +free_rules_cache: + kvfree(flow_rules_cache->rules_cache); + flow_rules_cache->rules_cache = NULL; + return err; +} + +static void gve_free_flow_rule_caches(struct gve_priv *priv) +{ + struct gve_flow_rules_cache *flow_rules_cache = &priv->flow_rules_cache; + + kvfree(flow_rules_cache->rule_ids_cache); + flow_rules_cache->rule_ids_cache = NULL; + kvfree(flow_rules_cache->rules_cache); + flow_rules_cache->rules_cache = NULL; +} + static int gve_alloc_counter_array(struct gve_priv *priv) { priv->counter_array = @@ -521,9 +564,12 @@ static int gve_setup_device_resources(struct gve_priv *priv) { int err; - err = gve_alloc_counter_array(priv); + err = gve_alloc_flow_rule_caches(priv); if (err) return err; + err = gve_alloc_counter_array(priv); + if (err) + goto abort_with_flow_rule_caches; err = gve_alloc_notify_blocks(priv); if (err) goto abort_with_counter; @@ -575,6 +621,8 @@ abort_with_ntfy_blocks: gve_free_notify_blocks(priv); abort_with_counter: gve_free_counter_array(priv); +abort_with_flow_rule_caches: + gve_free_flow_rule_caches(priv); return err; } @@ -587,6 +635,12 @@ static void gve_teardown_device_resources(struct gve_priv *priv) /* Tell device its resources are being freed */ if (gve_get_device_resources_ok(priv)) { + err = gve_flow_rules_reset(priv); + if (err) { + dev_err(&priv->pdev->dev, + "Failed to reset flow rules: err=%d\n", err); + gve_trigger_reset(priv); + } /* detach the stats report */ err = gve_adminq_report_stats(priv, 0, 0x0, GVE_STATS_REPORT_TIMER_PERIOD); if (err) { @@ -606,6 +660,7 @@ static void gve_teardown_device_resources(struct gve_priv *priv) kvfree(priv->ptype_lut_dqo); priv->ptype_lut_dqo = NULL; + gve_free_flow_rule_caches(priv); gve_free_counter_array(priv); gve_free_notify_blocks(priv); gve_free_stats_report(priv); @@ -1730,6 +1785,14 @@ static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp) } } +int gve_flow_rules_reset(struct gve_priv *priv) +{ + if (!priv->max_flow_rules) + return 0; + + return gve_adminq_reset_flow_rules(priv); +} + int gve_adjust_config(struct gve_priv *priv, struct gve_tx_alloc_rings_cfg *tx_alloc_cfg, struct gve_rx_alloc_rings_cfg *rx_alloc_cfg) @@ -2003,15 +2066,21 @@ static int gve_set_features(struct net_device *netdev, netdev->features ^= NETIF_F_LRO; if (netif_carrier_ok(netdev)) { err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg); - if (err) { - /* Revert the change on error. */ - netdev->features = orig_features; - return err; - } + if (err) + goto revert_features; } } + if ((netdev->features & NETIF_F_NTUPLE) && !(features & NETIF_F_NTUPLE)) { + err = gve_flow_rules_reset(priv); + if (err) + goto revert_features; + } return 0; + +revert_features: + netdev->features = orig_features; + return err; } static const struct net_device_ops gve_netdev_ops = { diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile index 8e9293e57bfd..e8af26da1fc1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/Makefile @@ -15,15 +15,14 @@ hns3-objs = hns3_enet.o hns3_ethtool.o hns3_debugfs.o hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o -obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o +obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o hclge-common.o -hclgevf-objs = hns3vf/hclgevf_main.o hns3vf/hclgevf_mbx.o hns3vf/hclgevf_devlink.o hns3vf/hclgevf_regs.o \ - hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o +hclge-common-objs += hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o -obj-$(CONFIG_HNS3_HCLGE) += hclge.o +hclgevf-objs = hns3vf/hclgevf_main.o hns3vf/hclgevf_mbx.o hns3vf/hclgevf_devlink.o hns3vf/hclgevf_regs.o + +obj-$(CONFIG_HNS3_HCLGE) += hclge.o hclge-common.o hclge-objs = hns3pf/hclge_main.o hns3pf/hclge_mdio.o hns3pf/hclge_tm.o hns3pf/hclge_regs.o \ hns3pf/hclge_mbx.o hns3pf/hclge_err.o hns3pf/hclge_debugfs.o hns3pf/hclge_ptp.o hns3pf/hclge_devlink.o \ - hns3_common/hclge_comm_cmd.o hns3_common/hclge_comm_rss.o hns3_common/hclge_comm_tqp_stats.o - hclge-$(CONFIG_HNS3_DCB) += hns3pf/hclge_dcb.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 7cebb08bd320..27dbe367f3d3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -786,7 +786,7 @@ struct hnae3_ae_ops { void (*get_rx_hwts)(struct hnae3_handle *handle, struct sk_buff *skb, u32 nsec, u32 sec); int (*get_ts_info)(struct hnae3_handle *handle, - struct ethtool_ts_info *info); + struct kernel_ethtool_ts_info *info); int (*get_link_diagnosis_info)(struct hnae3_handle *handle, u32 *status_code); void (*clean_vf_config)(struct hnae3_ae_dev *ae_dev, int num_vfs); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c index ea40b594dbac..4ad4e8ab2f1f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_cmd.c @@ -48,6 +48,7 @@ void hclge_comm_cmd_reuse_desc(struct hclge_desc *desc, bool is_read) else desc->flag &= cpu_to_le16(~HCLGE_COMM_CMD_FLAG_WR); } +EXPORT_SYMBOL_GPL(hclge_comm_cmd_reuse_desc); static void hclge_comm_set_default_capability(struct hnae3_ae_dev *ae_dev, bool is_pf) @@ -72,6 +73,7 @@ void hclge_comm_cmd_setup_basic_desc(struct hclge_desc *desc, if (is_read) desc->flag |= cpu_to_le16(HCLGE_COMM_CMD_FLAG_WR); } +EXPORT_SYMBOL_GPL(hclge_comm_cmd_setup_basic_desc); int hclge_comm_firmware_compat_config(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw, bool en) @@ -517,6 +519,7 @@ int hclge_comm_cmd_send(struct hclge_comm_hw *hw, struct hclge_desc *desc, return ret; } +EXPORT_SYMBOL_GPL(hclge_comm_cmd_send); static void hclge_comm_cmd_uninit_regs(struct hclge_comm_hw *hw) { @@ -553,6 +556,7 @@ void hclge_comm_cmd_uninit(struct hnae3_ae_dev *ae_dev, hclge_comm_free_cmd_desc(&cmdq->csq); hclge_comm_free_cmd_desc(&cmdq->crq); } +EXPORT_SYMBOL_GPL(hclge_comm_cmd_uninit); int hclge_comm_cmd_queue_init(struct pci_dev *pdev, struct hclge_comm_hw *hw) { @@ -591,6 +595,7 @@ err_csq: hclge_comm_free_cmd_desc(&hw->cmq.csq); return ret; } +EXPORT_SYMBOL_GPL(hclge_comm_cmd_queue_init); void hclge_comm_cmd_init_ops(struct hclge_comm_hw *hw, const struct hclge_comm_cmq_ops *ops) @@ -602,6 +607,7 @@ void hclge_comm_cmd_init_ops(struct hclge_comm_hw *hw, cmdq->ops.trace_cmd_get = ops->trace_cmd_get; } } +EXPORT_SYMBOL_GPL(hclge_comm_cmd_init_ops); int hclge_comm_cmd_init(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw, u32 *fw_version, bool is_pf, @@ -672,3 +678,8 @@ err_cmd_init: return ret; } +EXPORT_SYMBOL_GPL(hclge_comm_cmd_init); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet PF/VF Common Library"); +MODULE_AUTHOR("Huawei Tech. Co., Ltd."); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c index b4ae2160aff4..4e2bb6556b1c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c @@ -62,6 +62,7 @@ int hclge_comm_rss_init_cfg(struct hnae3_handle *nic, return 0; } +EXPORT_SYMBOL_GPL(hclge_comm_rss_init_cfg); void hclge_comm_get_rss_tc_info(u16 rss_size, u8 hw_tc_map, u16 *tc_offset, u16 *tc_valid, u16 *tc_size) @@ -78,6 +79,7 @@ void hclge_comm_get_rss_tc_info(u16 rss_size, u8 hw_tc_map, u16 *tc_offset, tc_offset[i] = (hw_tc_map & BIT(i)) ? rss_size * i : 0; } } +EXPORT_SYMBOL_GPL(hclge_comm_get_rss_tc_info); int hclge_comm_set_rss_tc_mode(struct hclge_comm_hw *hw, u16 *tc_offset, u16 *tc_valid, u16 *tc_size) @@ -113,6 +115,7 @@ int hclge_comm_set_rss_tc_mode(struct hclge_comm_hw *hw, u16 *tc_offset, return ret; } +EXPORT_SYMBOL_GPL(hclge_comm_set_rss_tc_mode); int hclge_comm_set_rss_hash_key(struct hclge_comm_rss_cfg *rss_cfg, struct hclge_comm_hw *hw, const u8 *key, @@ -143,6 +146,7 @@ int hclge_comm_set_rss_hash_key(struct hclge_comm_rss_cfg *rss_cfg, return 0; } +EXPORT_SYMBOL_GPL(hclge_comm_set_rss_hash_key); int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev, struct hclge_comm_hw *hw, @@ -185,11 +189,13 @@ int hclge_comm_set_rss_tuple(struct hnae3_ae_dev *ae_dev, rss_cfg->rss_tuple_sets.ipv6_fragment_en = req->ipv6_fragment_en; return 0; } +EXPORT_SYMBOL_GPL(hclge_comm_set_rss_tuple); u32 hclge_comm_get_rss_key_size(struct hnae3_handle *handle) { return HCLGE_COMM_RSS_KEY_SIZE; } +EXPORT_SYMBOL_GPL(hclge_comm_get_rss_key_size); int hclge_comm_parse_rss_hfunc(struct hclge_comm_rss_cfg *rss_cfg, const u8 hfunc, u8 *hash_algo) @@ -217,6 +223,7 @@ void hclge_comm_rss_indir_init_cfg(struct hnae3_ae_dev *ae_dev, for (i = 0; i < ae_dev->dev_specs.rss_ind_tbl_size; i++) rss_cfg->rss_indirection_tbl[i] = i % rss_cfg->rss_size; } +EXPORT_SYMBOL_GPL(hclge_comm_rss_indir_init_cfg); int hclge_comm_get_rss_tuple(struct hclge_comm_rss_cfg *rss_cfg, int flow_type, u8 *tuple_sets) @@ -250,6 +257,7 @@ int hclge_comm_get_rss_tuple(struct hclge_comm_rss_cfg *rss_cfg, int flow_type, return 0; } +EXPORT_SYMBOL_GPL(hclge_comm_get_rss_tuple); static void hclge_comm_append_rss_msb_info(struct hclge_comm_rss_ind_tbl_cmd *req, @@ -304,6 +312,7 @@ int hclge_comm_set_rss_indir_table(struct hnae3_ae_dev *ae_dev, } return 0; } +EXPORT_SYMBOL_GPL(hclge_comm_set_rss_indir_table); int hclge_comm_set_rss_input_tuple(struct hclge_comm_hw *hw, struct hclge_comm_rss_cfg *rss_cfg) @@ -332,6 +341,7 @@ int hclge_comm_set_rss_input_tuple(struct hclge_comm_hw *hw, "failed to configure rss input, ret = %d.\n", ret); return ret; } +EXPORT_SYMBOL_GPL(hclge_comm_set_rss_input_tuple); void hclge_comm_get_rss_hash_info(struct hclge_comm_rss_cfg *rss_cfg, u8 *key, u8 *hfunc) @@ -355,6 +365,7 @@ void hclge_comm_get_rss_hash_info(struct hclge_comm_rss_cfg *rss_cfg, u8 *key, if (key) memcpy(key, rss_cfg->rss_hash_key, HCLGE_COMM_RSS_KEY_SIZE); } +EXPORT_SYMBOL_GPL(hclge_comm_get_rss_hash_info); void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg, u32 *indir, u16 rss_ind_tbl_size) @@ -367,6 +378,7 @@ void hclge_comm_get_rss_indir_tbl(struct hclge_comm_rss_cfg *rss_cfg, for (i = 0; i < rss_ind_tbl_size; i++) indir[i] = rss_cfg->rss_indirection_tbl[i]; } +EXPORT_SYMBOL_GPL(hclge_comm_get_rss_indir_tbl); int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc, const u8 *key) @@ -408,6 +420,7 @@ int hclge_comm_set_rss_algo_key(struct hclge_comm_hw *hw, const u8 hfunc, return 0; } +EXPORT_SYMBOL_GPL(hclge_comm_set_rss_algo_key); static u8 hclge_comm_get_rss_hash_bits(struct ethtool_rxnfc *nfc) { @@ -502,3 +515,4 @@ u64 hclge_comm_convert_rss_tuple(u8 tuple_sets) return tuple_data; } +EXPORT_SYMBOL_GPL(hclge_comm_convert_rss_tuple); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c index 618f66d9586b..2b31188ff555 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_tqp_stats.c @@ -26,6 +26,7 @@ u64 *hclge_comm_tqps_get_stats(struct hnae3_handle *handle, u64 *data) return buff; } +EXPORT_SYMBOL_GPL(hclge_comm_tqps_get_stats); int hclge_comm_tqps_get_sset_count(struct hnae3_handle *handle) { @@ -33,6 +34,7 @@ int hclge_comm_tqps_get_sset_count(struct hnae3_handle *handle) return kinfo->num_tqps * HCLGE_COMM_QUEUE_PAIR_SIZE; } +EXPORT_SYMBOL_GPL(hclge_comm_tqps_get_sset_count); u8 *hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 *data) { @@ -56,6 +58,7 @@ u8 *hclge_comm_tqps_get_strings(struct hnae3_handle *handle, u8 *data) return buff; } +EXPORT_SYMBOL_GPL(hclge_comm_tqps_get_strings); int hclge_comm_tqps_update_stats(struct hnae3_handle *handle, struct hclge_comm_hw *hw) @@ -99,6 +102,7 @@ int hclge_comm_tqps_update_stats(struct hnae3_handle *handle, return 0; } +EXPORT_SYMBOL_GPL(hclge_comm_tqps_update_stats); void hclge_comm_reset_tqp_stats(struct hnae3_handle *handle) { @@ -113,3 +117,4 @@ void hclge_comm_reset_tqp_stats(struct hnae3_handle *handle) memset(&tqp->tqp_stats, 0, sizeof(tqp->tqp_stats)); } } +EXPORT_SYMBOL_GPL(hclge_comm_reset_tqp_stats); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 941cb529d671..b1e988347347 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -2009,7 +2009,7 @@ static int hns3_set_tunable(struct net_device *netdev, ETHTOOL_RING_USE_TX_PUSH) static int hns3_get_ts_info(struct net_device *netdev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct hnae3_handle *handle = hns3_get_handle(netdev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c index 507d7ce26d83..5fff8ed388f8 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.c @@ -378,7 +378,7 @@ int hclge_ptp_set_cfg(struct hclge_dev *hdev, struct ifreq *ifr) } int hclge_ptp_get_ts_info(struct hnae3_handle *handle, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h index bbee74cd8404..63483636c074 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h @@ -138,6 +138,6 @@ int hclge_ptp_set_cfg(struct hclge_dev *hdev, struct ifreq *ifr); int hclge_ptp_init(struct hclge_dev *hdev); void hclge_ptp_uninit(struct hclge_dev *hdev); int hclge_ptp_get_ts_info(struct hnae3_handle *handle, - struct ethtool_ts_info *info); + struct kernel_ethtool_ts_info *info); int hclge_ptp_cfg_qry(struct hclge_dev *hdev, u32 *cfg); #endif diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index e0287fbd501d..0375c7448a57 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -384,17 +384,6 @@ config IGC_LEDS Optional support for controlling the NIC LED's with the netdev LED trigger. -config IDPF - tristate "Intel(R) Infrastructure Data Path Function Support" - depends on PCI_MSI - select DIMLIB - select PAGE_POOL - select PAGE_POOL_STATS - help - This driver supports Intel(R) Infrastructure Data Path Function - devices. - - To compile this driver as a module, choose M here. The module - will be called idpf. +source "drivers/net/ethernet/intel/idpf/Kconfig" endif # NET_VENDOR_INTEL diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 9b068d40778d..aa139b67a55b 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -161,7 +161,6 @@ #define FIRMWARE_D102E "e100/d102e_ucode.bin" MODULE_DESCRIPTION(DRV_DESCRIPTION); -MODULE_AUTHOR(DRV_COPYRIGHT); MODULE_LICENSE("GPL v2"); MODULE_FIRMWARE(FIRMWARE_D101M); MODULE_FIRMWARE(FIRMWARE_D101S); diff --git a/drivers/net/ethernet/intel/e1000/Makefile b/drivers/net/ethernet/intel/e1000/Makefile index 314c52d44b7c..79491dec47e1 100644 --- a/drivers/net/ethernet/intel/e1000/Makefile +++ b/drivers/net/ethernet/intel/e1000/Makefile @@ -7,4 +7,4 @@ obj-$(CONFIG_E1000) += e1000.o -e1000-objs := e1000_main.o e1000_hw.o e1000_ethtool.o e1000_param.o +e1000-y := e1000_main.o e1000_hw.o e1000_ethtool.o e1000_param.o diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 60fff9a6c53e..ab7ae418d294 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -187,7 +187,6 @@ static struct pci_driver e1000_driver = { .err_handler = &e1000_err_handler }; -MODULE_AUTHOR("Intel Corporation, "); MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/intel/e1000e/Makefile b/drivers/net/ethernet/intel/e1000e/Makefile index 0baa15503c38..18f22b6374d5 100644 --- a/drivers/net/ethernet/intel/e1000e/Makefile +++ b/drivers/net/ethernet/intel/e1000e/Makefile @@ -10,7 +10,6 @@ subdir-ccflags-y += -I$(src) obj-$(CONFIG_E1000E) += e1000e.o -e1000e-objs := 82571.o ich8lan.o 80003es2lan.o \ - mac.o manage.o nvm.o phy.o \ - param.o ethtool.o netdev.o ptp.o - +e1000e-y := 82571.o ich8lan.o 80003es2lan.o \ + mac.o manage.o nvm.o phy.o \ + param.o ethtool.o netdev.o ptp.o diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 85da20778e0f..9364bc2b4eb1 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -2263,7 +2263,7 @@ static int e1000e_set_eee(struct net_device *netdev, struct ethtool_keee *edata) } static int e1000e_get_ts_info(struct net_device *netdev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct e1000_adapter *adapter = netdev_priv(netdev); diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 3cd161c6672b..360ee26557f7 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -7969,7 +7969,6 @@ static void __exit e1000_exit_module(void) } module_exit(e1000_exit_module); -MODULE_AUTHOR("Intel Corporation, "); MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index fc373472e4e1..142f07ca8bc0 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -17,7 +17,6 @@ static const char fm10k_driver_string[] = DRV_SUMMARY; static const char fm10k_copyright[] = "Copyright(c) 2013 - 2019 Intel Corporation."; -MODULE_AUTHOR("Intel Corporation, "); MODULE_DESCRIPTION(DRV_SUMMARY); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile index cad93f323bd5..9faa4339a76c 100644 --- a/drivers/net/ethernet/intel/i40e/Makefile +++ b/drivers/net/ethernet/intel/i40e/Makefile @@ -10,7 +10,7 @@ subdir-ccflags-y += -I$(src) obj-$(CONFIG_I40E) += i40e.o -i40e-objs := i40e_main.o \ +i40e-y := i40e_main.o \ i40e_ethtool.o \ i40e_adminq.o \ i40e_common.o \ diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index bca2084cc54b..d546567e0286 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -735,7 +735,7 @@ __i40e_pf_next_veb(struct i40e_pf *pf, int *idx) _i++, _veb = __i40e_pf_next_veb(_pf, &_i)) /** - * i40e_mac_to_hkey - Convert a 6-byte MAC Address to a u64 hash key + * i40e_addr_to_hkey - Convert a 6-byte MAC Address to a u64 hash key * @macaddr: the MAC Address as the base key * * Simply copies the address and returns it as a u64 for hashing diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 4e28785c9fb2..1d0d2e526adb 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -2546,7 +2546,7 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset, } static int i40e_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct i40e_pf *pf = i40e_netdev_to_pf(dev); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 310513d9321b..cbcfada7b357 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -98,7 +98,6 @@ static int debug = -1; module_param(debug, uint, 0); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX)"); -MODULE_AUTHOR("Intel Corporation, "); MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver"); MODULE_IMPORT_NS(LIBIE); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/intel/iavf/Makefile b/drivers/net/ethernet/intel/iavf/Makefile index 2d154a4e2fd7..356ac9faa5bf 100644 --- a/drivers/net/ethernet/intel/iavf/Makefile +++ b/drivers/net/ethernet/intel/iavf/Makefile @@ -11,6 +11,5 @@ subdir-ccflags-y += -I$(src) obj-$(CONFIG_IAVF) += iavf.o -iavf-objs := iavf_main.o iavf_ethtool.o iavf_virtchnl.o iavf_fdir.o \ - iavf_adv_rss.o \ - iavf_txrx.o iavf_common.o iavf_adminq.o +iavf-y := iavf_main.o iavf_ethtool.o iavf_virtchnl.o iavf_fdir.o \ + iavf_adv_rss.o iavf_txrx.o iavf_common.o iavf_adminq.o diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index c6dff0963053..ff11bafb3b4f 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -45,7 +45,6 @@ static const struct pci_device_id iavf_pci_tbl[] = { MODULE_DEVICE_TABLE(pci, iavf_pci_tbl); MODULE_ALIAS("i40evf"); -MODULE_AUTHOR("Intel Corporation, "); MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver"); MODULE_IMPORT_NS(LIBETH); MODULE_IMPORT_NS(LIBIE); diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink.c b/drivers/net/ethernet/intel/ice/devlink/devlink.c index 704e9ad5144e..810a901d7afd 100644 --- a/drivers/net/ethernet/intel/ice/devlink/devlink.c +++ b/drivers/net/ethernet/intel/ice/devlink/devlink.c @@ -794,10 +794,8 @@ int ice_devlink_rate_init_tx_topology(struct devlink *devlink, struct ice_vsi *v tc_node = pi->root->children[0]; mutex_lock(&pi->sched_lock); - devl_lock(devlink); for (i = 0; i < tc_node->num_children; i++) ice_traverse_tx_tree(devlink, tc_node->children[i], tc_node, pf); - devl_unlock(devlink); mutex_unlock(&pi->sched_lock); return 0; @@ -1383,9 +1381,129 @@ ice_devlink_enable_iw_validate(struct devlink *devlink, u32 id, return 0; } +#define DEVLINK_LOCAL_FWD_DISABLED_STR "disabled" +#define DEVLINK_LOCAL_FWD_ENABLED_STR "enabled" +#define DEVLINK_LOCAL_FWD_PRIORITIZED_STR "prioritized" + +/** + * ice_devlink_local_fwd_mode_to_str - Get string for local_fwd mode. + * @mode: local forwarding for mode used in port_info struct. + * + * Return: Mode respective string or "Invalid". + */ +static const char * +ice_devlink_local_fwd_mode_to_str(enum ice_local_fwd_mode mode) +{ + switch (mode) { + case ICE_LOCAL_FWD_MODE_ENABLED: + return DEVLINK_LOCAL_FWD_ENABLED_STR; + case ICE_LOCAL_FWD_MODE_PRIORITIZED: + return DEVLINK_LOCAL_FWD_PRIORITIZED_STR; + case ICE_LOCAL_FWD_MODE_DISABLED: + return DEVLINK_LOCAL_FWD_DISABLED_STR; + } + + return "Invalid"; +} + +/** + * ice_devlink_local_fwd_str_to_mode - Get local_fwd mode from string name. + * @mode_str: local forwarding mode string. + * + * Return: Mode value or negative number if invalid. + */ +static int ice_devlink_local_fwd_str_to_mode(const char *mode_str) +{ + if (!strcmp(mode_str, DEVLINK_LOCAL_FWD_ENABLED_STR)) + return ICE_LOCAL_FWD_MODE_ENABLED; + else if (!strcmp(mode_str, DEVLINK_LOCAL_FWD_PRIORITIZED_STR)) + return ICE_LOCAL_FWD_MODE_PRIORITIZED; + else if (!strcmp(mode_str, DEVLINK_LOCAL_FWD_DISABLED_STR)) + return ICE_LOCAL_FWD_MODE_DISABLED; + + return -EINVAL; +} + +/** + * ice_devlink_local_fwd_get - Get local_fwd parameter. + * @devlink: Pointer to the devlink instance. + * @id: The parameter ID to set. + * @ctx: Context to store the parameter value. + * + * Return: Zero. + */ +static int ice_devlink_local_fwd_get(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct ice_pf *pf = devlink_priv(devlink); + struct ice_port_info *pi; + const char *mode_str; + + pi = pf->hw.port_info; + mode_str = ice_devlink_local_fwd_mode_to_str(pi->local_fwd_mode); + snprintf(ctx->val.vstr, sizeof(ctx->val.vstr), "%s", mode_str); + + return 0; +} + +/** + * ice_devlink_local_fwd_set - Set local_fwd parameter. + * @devlink: Pointer to the devlink instance. + * @id: The parameter ID to set. + * @ctx: Context to get the parameter value. + * @extack: Netlink extended ACK structure. + * + * Return: Zero. + */ +static int ice_devlink_local_fwd_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) +{ + int new_local_fwd_mode = ice_devlink_local_fwd_str_to_mode(ctx->val.vstr); + struct ice_pf *pf = devlink_priv(devlink); + struct device *dev = ice_pf_to_dev(pf); + struct ice_port_info *pi; + + pi = pf->hw.port_info; + if (pi->local_fwd_mode != new_local_fwd_mode) { + pi->local_fwd_mode = new_local_fwd_mode; + dev_info(dev, "Setting local_fwd to %s\n", ctx->val.vstr); + ice_schedule_reset(pf, ICE_RESET_CORER); + } + + return 0; +} + +/** + * ice_devlink_local_fwd_validate - Validate passed local_fwd parameter value. + * @devlink: Unused pointer to devlink instance. + * @id: The parameter ID to validate. + * @val: Value to validate. + * @extack: Netlink extended ACK structure. + * + * Supported values are: + * "enabled" - local_fwd is enabled, "disabled" - local_fwd is disabled + * "prioritized" - local_fwd traffic is prioritized in scheduling. + * + * Return: Zero when passed parameter value is supported. Negative value on + * error. + */ +static int ice_devlink_local_fwd_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + if (ice_devlink_local_fwd_str_to_mode(val.vstr) < 0) { + NL_SET_ERR_MSG_MOD(extack, "Error: Requested value is not supported."); + return -EINVAL; + } + + return 0; +} + enum ice_param_id { ICE_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, ICE_DEVLINK_PARAM_ID_TX_SCHED_LAYERS, + ICE_DEVLINK_PARAM_ID_LOCAL_FWD, }; static const struct devlink_param ice_dvl_rdma_params[] = { @@ -1407,6 +1525,12 @@ static const struct devlink_param ice_dvl_sched_params[] = { ice_devlink_tx_sched_layers_get, ice_devlink_tx_sched_layers_set, ice_devlink_tx_sched_layers_validate), + DEVLINK_PARAM_DRIVER(ICE_DEVLINK_PARAM_ID_LOCAL_FWD, + "local_forwarding", DEVLINK_PARAM_TYPE_STRING, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + ice_devlink_local_fwd_get, + ice_devlink_local_fwd_set, + ice_devlink_local_fwd_validate), }; static void ice_devlink_free(void *devlink_ptr) diff --git a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c b/drivers/net/ethernet/intel/ice/devlink/devlink_port.c index 13e6790d3cae..00fed5a61d62 100644 --- a/drivers/net/ethernet/intel/ice/devlink/devlink_port.c +++ b/drivers/net/ethernet/intel/ice/devlink/devlink_port.c @@ -372,6 +372,62 @@ void ice_devlink_destroy_pf_port(struct ice_pf *pf) devl_port_unregister(&pf->devlink_port); } +/** + * ice_devlink_port_get_vf_fn_mac - .port_fn_hw_addr_get devlink handler + * @port: devlink port structure + * @hw_addr: MAC address of the port + * @hw_addr_len: length of MAC address + * @extack: extended netdev ack structure + * + * Callback for the devlink .port_fn_hw_addr_get operation + * Return: zero on success or an error code on failure. + */ +static int ice_devlink_port_get_vf_fn_mac(struct devlink_port *port, + u8 *hw_addr, int *hw_addr_len, + struct netlink_ext_ack *extack) +{ + struct ice_vf *vf = container_of(port, struct ice_vf, devlink_port); + + ether_addr_copy(hw_addr, vf->dev_lan_addr); + *hw_addr_len = ETH_ALEN; + + return 0; +} + +/** + * ice_devlink_port_set_vf_fn_mac - .port_fn_hw_addr_set devlink handler + * @port: devlink port structure + * @hw_addr: MAC address of the port + * @hw_addr_len: length of MAC address + * @extack: extended netdev ack structure + * + * Callback for the devlink .port_fn_hw_addr_set operation + * Return: zero on success or an error code on failure. + */ +static int ice_devlink_port_set_vf_fn_mac(struct devlink_port *port, + const u8 *hw_addr, + int hw_addr_len, + struct netlink_ext_ack *extack) + +{ + struct devlink_port_attrs *attrs = &port->attrs; + struct devlink_port_pci_vf_attrs *pci_vf; + struct devlink *devlink = port->devlink; + struct ice_pf *pf; + u16 vf_id; + + pf = devlink_priv(devlink); + pci_vf = &attrs->pci_vf; + vf_id = pci_vf->vf; + + return __ice_set_vf_mac(pf, vf_id, hw_addr); +} + +static const struct devlink_port_ops ice_devlink_vf_port_ops = { + .port_fn_hw_addr_get = ice_devlink_port_get_vf_fn_mac, + .port_fn_hw_addr_set = ice_devlink_port_set_vf_fn_mac, +}; + /** * ice_devlink_create_vf_port - Create a devlink port for this VF * @vf: the VF to create a port for @@ -407,7 +463,8 @@ int ice_devlink_create_vf_port(struct ice_vf *vf) devlink_port_attrs_set(devlink_port, &attrs); devlink = priv_to_devlink(pf); - err = devlink_port_register(devlink, devlink_port, vsi->idx); + err = devl_port_register_with_ops(devlink, devlink_port, vsi->idx, + &ice_devlink_vf_port_ops); if (err) { dev_err(dev, "Failed to create devlink port for VF %d, error %d\n", vf->vf_id, err); @@ -426,5 +483,5 @@ int ice_devlink_create_vf_port(struct ice_vf *vf) void ice_devlink_destroy_vf_port(struct ice_vf *vf) { devl_rate_leaf_destroy(&vf->devlink_port); - devlink_port_unregister(&vf->devlink_port); + devl_port_unregister(&vf->devlink_port); } diff --git a/drivers/net/ethernet/intel/ice/ice_adapter.c b/drivers/net/ethernet/intel/ice/ice_adapter.c index 52d15ef7f4b1..ad84d8ad49a6 100644 --- a/drivers/net/ethernet/intel/ice/ice_adapter.c +++ b/drivers/net/ethernet/intel/ice/ice_adapter.c @@ -11,6 +11,7 @@ #include "ice_adapter.h" static DEFINE_XARRAY(ice_adapters); +static DEFINE_MUTEX(ice_adapters_mutex); /* PCI bus number is 8 bits. Slot is 5 bits. Domain can have the rest. */ #define INDEX_FIELD_DOMAIN GENMASK(BITS_PER_LONG - 1, 13) @@ -47,8 +48,6 @@ static void ice_adapter_free(struct ice_adapter *adapter) kfree(adapter); } -DEFINE_FREE(ice_adapter_free, struct ice_adapter*, if (_T) ice_adapter_free(_T)) - /** * ice_adapter_get - Get a shared ice_adapter structure. * @pdev: Pointer to the pci_dev whose driver is getting the ice_adapter. @@ -64,27 +63,26 @@ DEFINE_FREE(ice_adapter_free, struct ice_adapter*, if (_T) ice_adapter_free(_T)) */ struct ice_adapter *ice_adapter_get(const struct pci_dev *pdev) { - struct ice_adapter *ret, __free(ice_adapter_free) *adapter = NULL; unsigned long index = ice_adapter_index(pdev); + struct ice_adapter *adapter; + int err; - adapter = ice_adapter_new(); - if (!adapter) - return ERR_PTR(-ENOMEM); + scoped_guard(mutex, &ice_adapters_mutex) { + err = xa_insert(&ice_adapters, index, NULL, GFP_KERNEL); + if (err == -EBUSY) { + adapter = xa_load(&ice_adapters, index); + refcount_inc(&adapter->refcount); + return adapter; + } + if (err) + return ERR_PTR(err); - xa_lock(&ice_adapters); - ret = __xa_cmpxchg(&ice_adapters, index, NULL, adapter, GFP_KERNEL); - if (xa_is_err(ret)) { - ret = ERR_PTR(xa_err(ret)); - goto unlock; + adapter = ice_adapter_new(); + if (!adapter) + return ERR_PTR(-ENOMEM); + xa_store(&ice_adapters, index, adapter, GFP_KERNEL); } - if (ret) { - refcount_inc(&ret->refcount); - goto unlock; - } - ret = no_free_ptr(adapter); -unlock: - xa_unlock(&ice_adapters); - return ret; + return adapter; } /** @@ -94,23 +92,21 @@ unlock: * Releases the reference to ice_adapter previously obtained with * ice_adapter_get. * - * Context: Any. + * Context: Process, may sleep. */ void ice_adapter_put(const struct pci_dev *pdev) { unsigned long index = ice_adapter_index(pdev); struct ice_adapter *adapter; - xa_lock(&ice_adapters); - adapter = xa_load(&ice_adapters, index); - if (WARN_ON(!adapter)) - goto unlock; + scoped_guard(mutex, &ice_adapters_mutex) { + adapter = xa_load(&ice_adapters, index); + if (WARN_ON(!adapter)) + return; + if (!refcount_dec_and_test(&adapter->refcount)) + return; - if (!refcount_dec_and_test(&adapter->refcount)) - goto unlock; - - WARN_ON(__xa_erase(&ice_adapters, index) != adapter); + WARN_ON(xa_erase(&ice_adapters, index) != adapter); + } ice_adapter_free(adapter); -unlock: - xa_unlock(&ice_adapters); } diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index e76c388b9905..66f02988d549 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -122,6 +122,7 @@ struct ice_aqc_list_caps_elem { #define ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT 0x0077 #define ICE_AQC_CAPS_NVM_MGMT 0x0080 #define ICE_AQC_CAPS_TX_SCHED_TOPO_COMP_MODE 0x0085 +#define ICE_AQC_CAPS_NAC_TOPOLOGY 0x0087 #define ICE_AQC_CAPS_FW_LAG_SUPPORT 0x0092 #define ICE_AQC_BIT_ROCEV2_LAG 0x01 #define ICE_AQC_BIT_SRIOV_LAG 0x02 @@ -231,6 +232,13 @@ struct ice_aqc_get_sw_cfg_resp_elem { #define ICE_AQC_GET_SW_CONF_RESP_IS_VF BIT(15) }; +/* Loopback port parameter mode values. */ +enum ice_local_fwd_mode { + ICE_LOCAL_FWD_MODE_ENABLED = 0, + ICE_LOCAL_FWD_MODE_DISABLED = 1, + ICE_LOCAL_FWD_MODE_PRIORITIZED = 2, +}; + /* Set Port parameters, (direct, 0x0203) */ struct ice_aqc_set_port_params { __le16 cmd_flags; @@ -239,7 +247,9 @@ struct ice_aqc_set_port_params { __le16 swid; #define ICE_AQC_PORT_SWID_VALID BIT(15) #define ICE_AQC_PORT_SWID_M 0xFF - u8 reserved[10]; + u8 local_fwd_mode; +#define ICE_AQC_SET_P_PARAMS_LOCAL_FWD_MODE_VALID BIT(2) + u8 reserved[9]; }; /* These resource type defines are used for all switch resource @@ -1460,6 +1470,55 @@ struct ice_aqc_get_sensor_reading_resp { } data; }; +/* DNL call command (indirect 0x0682) + * Struct is used for both command and response + */ +struct ice_aqc_dnl_call_command { + u8 ctx; /* Used in command, reserved in response */ + u8 reserved; + __le16 activity_id; +#define ICE_AQC_ACT_ID_DNL 0x1129 + __le32 reserved1; + __le32 addr_high; + __le32 addr_low; +}; + +struct ice_aqc_dnl_equa_param { + __le16 data_in; +#define ICE_AQC_RX_EQU_SHIFT 8 +#define ICE_AQC_RX_EQU_PRE2 (0x10 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_PRE1 (0x11 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_POST1 (0x12 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_BFLF (0x13 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_BFHF (0x14 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_RX_EQU_DRATE (0x15 << ICE_AQC_RX_EQU_SHIFT) +#define ICE_AQC_TX_EQU_PRE1 0x0 +#define ICE_AQC_TX_EQU_PRE3 0x3 +#define ICE_AQC_TX_EQU_ATTEN 0x4 +#define ICE_AQC_TX_EQU_POST1 0x8 +#define ICE_AQC_TX_EQU_PRE2 0xC + __le16 op_code_serdes_sel; +#define ICE_AQC_OP_CODE_SHIFT 4 +#define ICE_AQC_OP_CODE_RX_EQU (0x9 << ICE_AQC_OP_CODE_SHIFT) +#define ICE_AQC_OP_CODE_TX_EQU (0x10 << ICE_AQC_OP_CODE_SHIFT) + __le32 reserved[3]; +}; + +struct ice_aqc_dnl_equa_respon { + /* Equalization value can be negative */ + int val; + __le32 reserved[3]; +}; + +/* DNL call command/response buffer (indirect 0x0682) */ +struct ice_aqc_dnl_call { + union { + struct ice_aqc_dnl_equa_param txrx_equa_reqs; + __le32 stores[4]; + struct ice_aqc_dnl_equa_respon txrx_equa_resp; + } sto; +}; + struct ice_aqc_link_topo_params { u8 lport_num; u8 lport_num_valid; @@ -2563,6 +2622,7 @@ struct ice_aq_desc { struct ice_aqc_get_link_status get_link_status; struct ice_aqc_event_lan_overflow lan_overflow; struct ice_aqc_get_link_topo get_link_topo; + struct ice_aqc_dnl_call_command dnl_call; struct ice_aqc_i2c read_write_i2c; struct ice_aqc_read_i2c_resp read_i2c_resp; struct ice_aqc_get_set_tx_topo get_set_tx_topo; @@ -2687,6 +2747,7 @@ enum ice_adminq_opc { ice_aqc_opc_set_phy_rec_clk_out = 0x0630, ice_aqc_opc_get_phy_rec_clk_out = 0x0631, ice_aqc_opc_get_sensor_reading = 0x0632, + ice_aqc_opc_dnl_call = 0x0682, ice_aqc_opc_get_link_topo = 0x06E0, ice_aqc_opc_read_i2c = 0x06E2, ice_aqc_opc_write_i2c = 0x06E3, diff --git a/drivers/net/ethernet/intel/ice/ice_cgu_regs.h b/drivers/net/ethernet/intel/ice/ice_cgu_regs.h index 57abd52386d0..10d9d74f3545 100644 --- a/drivers/net/ethernet/intel/ice/ice_cgu_regs.h +++ b/drivers/net/ethernet/intel/ice/ice_cgu_regs.h @@ -23,7 +23,18 @@ union nac_cgu_dword9 { u32 clk_synce0_amp : 2; u32 one_pps_out_amp : 2; u32 misc24 : 12; - } field; + }; + u32 val; +}; + +#define NAC_CGU_DWORD16_E825C 0x40 +union nac_cgu_dword16_e825c { + struct { + u32 synce_remndr : 6; + u32 synce_phlmt_en : 1; + u32 misc13 : 17; + u32 tspll_ck_refclkfreq : 8; + }; u32 val; }; @@ -39,7 +50,7 @@ union nac_cgu_dword19 { u32 japll_ndivratio : 4; u32 japll_iref_ndivratio : 3; u32 misc27 : 1; - } field; + }; u32 val; }; @@ -63,7 +74,23 @@ union nac_cgu_dword22 { u32 fdpllclk_sel_div2 : 1; u32 time1588clk_sel_div2 : 1; u32 misc3 : 1; - } field; + }; + u32 val; +}; + +#define NAC_CGU_DWORD23_E825C 0x5C +union nac_cgu_dword23_e825c { + struct { + u32 cgupll_fbdiv_intgr : 10; + u32 ux56pll_fbdiv_intgr : 10; + u32 misc20 : 4; + u32 ts_pll_enable : 1; + u32 time_sync_tspll_align_sel : 1; + u32 ext_synce_sel : 1; + u32 ref1588_ck_div : 4; + u32 time_ref_sel : 1; + + }; u32 val; }; @@ -77,7 +104,7 @@ union nac_cgu_dword24 { u32 ext_synce_sel : 1; u32 ref1588_ck_div : 4; u32 time_ref_sel : 1; - } field; + }; u32 val; }; @@ -92,7 +119,7 @@ union tspll_cntr_bist_settings { u32 i_plllock_cnt_6_0 : 7; u32 i_plllock_cnt_10_7 : 4; u32 reserved200 : 4; - } field; + }; u32 val; }; @@ -109,7 +136,45 @@ union tspll_ro_bwm_lf { u32 afcdone_cri : 1; u32 feedfwrdgain_cal_cri_7_0 : 8; u32 m2fbdivmod_cri_7_0 : 8; - } field; + }; + u32 val; +}; + +#define TSPLL_RO_LOCK_E825C 0x3f0 +union tspll_ro_lock_e825c { + struct { + u32 bw_freqov_high_cri_7_0 : 8; + u32 bw_freqov_high_cri_9_8 : 2; + u32 reserved455 : 1; + u32 plllock_gain_tran_cri : 1; + u32 plllock_true_lock_cri : 1; + u32 pllunlock_flag_cri : 1; + u32 afcerr_cri : 1; + u32 afcdone_cri : 1; + u32 feedfwrdgain_cal_cri_7_0 : 8; + u32 reserved462 : 8; + }; + u32 val; +}; + +#define TSPLL_BW_TDC_E825C 0x31c +union tspll_bw_tdc_e825c { + struct { + u32 i_tdc_offset_lock_1_0 : 2; + u32 i_bbthresh1_2_0 : 3; + u32 i_bbthresh2_2_0 : 3; + u32 i_tdcsel_1_0 : 2; + u32 i_tdcovccorr_en_h : 1; + u32 i_divretimeren : 1; + u32 i_bw_ampmeas_window : 1; + u32 i_bw_lowerbound_2_0 : 3; + u32 i_bw_upperbound_2_0 : 3; + u32 i_bw_mode_1_0 : 2; + u32 i_ft_mode_sel_2_0 : 3; + u32 i_bwphase_4_0 : 5; + u32 i_plllock_sel_1_0 : 2; + u32 i_afc_divratio : 1; + }; u32 val; }; diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 24716a3b494c..009716a12a26 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -239,6 +239,30 @@ bool ice_is_e810t(struct ice_hw *hw) return false; } +/** + * ice_is_e822 - Check if a device is E822 family device + * @hw: pointer to the hardware structure + * + * Return: true if the device is E822 based, false if not. + */ +bool ice_is_e822(struct ice_hw *hw) +{ + switch (hw->device_id) { + case ICE_DEV_ID_E822C_BACKPLANE: + case ICE_DEV_ID_E822C_QSFP: + case ICE_DEV_ID_E822C_SFP: + case ICE_DEV_ID_E822C_10G_BASE_T: + case ICE_DEV_ID_E822C_SGMII: + case ICE_DEV_ID_E822L_BACKPLANE: + case ICE_DEV_ID_E822L_SFP: + case ICE_DEV_ID_E822L_10G_BASE_T: + case ICE_DEV_ID_E822L_SGMII: + return true; + default: + return false; + } +} + /** * ice_is_e823 * @hw: pointer to the hardware structure @@ -910,6 +934,9 @@ static int ice_init_fltr_mgmt_struct(struct ice_hw *hw) INIT_LIST_HEAD(&sw->vsi_list_map_head); sw->prof_res_bm_init = 0; + /* Initialize recipe count with default recipes read from NVM */ + sw->recp_cnt = ICE_SW_LKUP_LAST; + status = ice_init_def_sw_recp(hw); if (status) { devm_kfree(ice_hw_to_dev(hw), hw->switch_info); @@ -937,14 +964,7 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) } recps = sw->recp_list; for (i = 0; i < ICE_MAX_NUM_RECIPES; i++) { - struct ice_recp_grp_entry *rg_entry, *tmprg_entry; - recps[i].root_rid = i; - list_for_each_entry_safe(rg_entry, tmprg_entry, - &recps[i].rg_list, l_entry) { - list_del(&rg_entry->l_entry); - devm_kfree(ice_hw_to_dev(hw), rg_entry); - } if (recps[i].adv_rule) { struct ice_adv_fltr_mgmt_list_entry *tmp_entry; @@ -969,7 +989,6 @@ static void ice_cleanup_fltr_mgmt_struct(struct ice_hw *hw) devm_kfree(ice_hw_to_dev(hw), lst_itr); } } - devm_kfree(ice_hw_to_dev(hw), recps[i].root_buf); } ice_rm_all_sw_replay_rule_info(hw); devm_kfree(ice_hw_to_dev(hw), sw->recp_list); @@ -1062,6 +1081,7 @@ int ice_init_hw(struct ice_hw *hw) goto err_unroll_cqinit; } + hw->port_info->local_fwd_mode = ICE_LOCAL_FWD_MODE_ENABLED; /* set the back pointer to HW */ hw->port_info->hw = hw; @@ -1473,8 +1493,9 @@ ice_sbq_send_cmd(struct ice_hw *hw, struct ice_sbq_cmd_desc *desc, * ice_sbq_rw_reg - Fill Sideband Queue command * @hw: pointer to the HW struct * @in: message info to be filled in descriptor + * @flags: control queue descriptor flags */ -int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in) +int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in, u16 flags) { struct ice_sbq_cmd_desc desc = {0}; struct ice_sbq_msg_req msg = {0}; @@ -1498,7 +1519,7 @@ int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in) */ msg_len -= sizeof(msg.data); - desc.flags = cpu_to_le16(ICE_AQ_FLAG_RD); + desc.flags = cpu_to_le16(flags); desc.opcode = cpu_to_le16(ice_sbq_opc_neigh_dev_req); desc.param0.cmd_len = cpu_to_le16(msg_len); status = ice_sbq_send_cmd(hw, &desc, &msg, msg_len, NULL); @@ -2290,8 +2311,13 @@ ice_parse_1588_func_caps(struct ice_hw *hw, struct ice_hw_func_caps *func_p, info->tmr_index_owned = ((number & ICE_TS_TMR_IDX_OWND_M) != 0); info->tmr_index_assoc = ((number & ICE_TS_TMR_IDX_ASSOC_M) != 0); - info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number); - info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0); + if (!ice_is_e825c(hw)) { + info->clk_freq = FIELD_GET(ICE_TS_CLK_FREQ_M, number); + info->clk_src = ((number & ICE_TS_CLK_SRC_M) != 0); + } else { + info->clk_freq = ICE_TIME_REF_FREQ_156_250; + info->clk_src = ICE_CLK_SRC_TCXO; + } if (info->clk_freq < NUM_ICE_TIME_REF_FREQ) { info->time_ref = (enum ice_time_ref_freq)info->clk_freq; @@ -2564,6 +2590,34 @@ ice_parse_sensor_reading_cap(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, dev_p->supported_sensors); } +/** + * ice_parse_nac_topo_dev_caps - Parse ICE_AQC_CAPS_NAC_TOPOLOGY cap + * @hw: pointer to the HW struct + * @dev_p: pointer to device capabilities structure + * @cap: capability element to parse + * + * Parse ICE_AQC_CAPS_NAC_TOPOLOGY for device capabilities. + */ +static void ice_parse_nac_topo_dev_caps(struct ice_hw *hw, + struct ice_hw_dev_caps *dev_p, + struct ice_aqc_list_caps_elem *cap) +{ + dev_p->nac_topo.mode = le32_to_cpu(cap->number); + dev_p->nac_topo.id = le32_to_cpu(cap->phys_id) & ICE_NAC_TOPO_ID_M; + + dev_info(ice_hw_to_dev(hw), + "PF is configured in %s mode with IP instance ID %d\n", + (dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M) ? + "primary" : "secondary", dev_p->nac_topo.id); + + ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_primary = %d\n", + !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_PRIMARY_M)); + ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology is_dual = %d\n", + !!(dev_p->nac_topo.mode & ICE_NAC_TOPO_DUAL_M)); + ice_debug(hw, ICE_DBG_INIT, "dev caps: nac topology id = %d\n", + dev_p->nac_topo.id); +} + /** * ice_parse_dev_caps - Parse device capabilities * @hw: pointer to the HW struct @@ -2615,6 +2669,9 @@ ice_parse_dev_caps(struct ice_hw *hw, struct ice_hw_dev_caps *dev_p, case ICE_AQC_CAPS_SENSOR_READING: ice_parse_sensor_reading_cap(hw, dev_p, &cap_resp[i]); break; + case ICE_AQC_CAPS_NAC_TOPOLOGY: + ice_parse_nac_topo_dev_caps(hw, dev_p, &cap_resp[i]); + break; default: /* Don't list common capabilities as unknown */ if (!found) @@ -3010,6 +3067,9 @@ ice_aq_set_port_params(struct ice_port_info *pi, bool double_vlan, cmd_flags |= ICE_AQC_SET_P_PARAMS_DOUBLE_VLAN_ENA; cmd->cmd_flags = cpu_to_le16(cmd_flags); + cmd->local_fwd_mode = pi->local_fwd_mode | + ICE_AQC_SET_P_PARAMS_LOCAL_FWD_MODE_VALID; + return ice_aq_send_cmd(hw, &desc, NULL, 0, cd); } @@ -3043,11 +3103,13 @@ bool ice_is_100m_speed_supported(struct ice_hw *hw) * Note: In the structure of [phy_type_low, phy_type_high], there should * be one bit set, as this function will convert one PHY type to its * speed. - * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned - * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned + * + * Return: + * * PHY speed for recognized PHY type + * * If no bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned + * * If more than one bit gets set, ICE_AQ_LINK_SPEED_UNKNOWN will be returned */ -static u16 -ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high) +u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high) { u16 speed_phy_type_high = ICE_AQ_LINK_SPEED_UNKNOWN; u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; @@ -3308,6 +3370,100 @@ int ice_update_link_info(struct ice_port_info *pi) return status; } +/** + * ice_aq_get_phy_equalization - function to read serdes equaliser + * value from firmware using admin queue command. + * @hw: pointer to the HW struct + * @data_in: represents the serdes equalization parameter requested + * @op_code: represents the serdes number and flag to represent tx or rx + * @serdes_num: represents the serdes number + * @output: pointer to the caller-supplied buffer to return serdes equaliser + * + * Return: non-zero status on error and 0 on success. + */ +int ice_aq_get_phy_equalization(struct ice_hw *hw, u16 data_in, u16 op_code, + u8 serdes_num, int *output) +{ + struct ice_aqc_dnl_call_command *cmd; + struct ice_aqc_dnl_call buf = {}; + struct ice_aq_desc desc; + int err; + + buf.sto.txrx_equa_reqs.data_in = cpu_to_le16(data_in); + buf.sto.txrx_equa_reqs.op_code_serdes_sel = + cpu_to_le16(op_code | (serdes_num & 0xF)); + cmd = &desc.params.dnl_call; + ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_dnl_call); + desc.flags |= cpu_to_le16(ICE_AQ_FLAG_BUF | + ICE_AQ_FLAG_RD | + ICE_AQ_FLAG_SI); + desc.datalen = cpu_to_le16(sizeof(struct ice_aqc_dnl_call)); + cmd->activity_id = cpu_to_le16(ICE_AQC_ACT_ID_DNL); + + err = ice_aq_send_cmd(hw, &desc, &buf, sizeof(struct ice_aqc_dnl_call), + NULL); + *output = err ? 0 : buf.sto.txrx_equa_resp.val; + + return err; +} + +#define FEC_REG_PORT(port) { \ + FEC_CORR_LOW_REG_PORT##port, \ + FEC_CORR_HIGH_REG_PORT##port, \ + FEC_UNCORR_LOW_REG_PORT##port, \ + FEC_UNCORR_HIGH_REG_PORT##port, \ +} + +static const u32 fec_reg[][ICE_FEC_MAX] = { + FEC_REG_PORT(0), + FEC_REG_PORT(1), + FEC_REG_PORT(2), + FEC_REG_PORT(3) +}; + +/** + * ice_aq_get_fec_stats - reads fec stats from phy + * @hw: pointer to the HW struct + * @pcs_quad: represents pcsquad of user input serdes + * @pcs_port: represents the pcs port number part of above pcs quad + * @fec_type: represents FEC stats type + * @output: pointer to the caller-supplied buffer to return requested fec stats + * + * Return: non-zero status on error and 0 on success. + */ +int ice_aq_get_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port, + enum ice_fec_stats_types fec_type, u32 *output) +{ + u16 flag = (ICE_AQ_FLAG_RD | ICE_AQ_FLAG_BUF | ICE_AQ_FLAG_SI); + struct ice_sbq_msg_input msg = {}; + u32 receiver_id, reg_offset; + int err; + + if (pcs_port > 3) + return -EINVAL; + + reg_offset = fec_reg[pcs_port][fec_type]; + + if (pcs_quad == 0) + receiver_id = FEC_RECEIVER_ID_PCS0; + else if (pcs_quad == 1) + receiver_id = FEC_RECEIVER_ID_PCS1; + else + return -EINVAL; + + msg.msg_addr_low = lower_16_bits(reg_offset); + msg.msg_addr_high = receiver_id; + msg.opcode = ice_sbq_msg_rd; + msg.dest_dev = rmn_0; + + err = ice_sbq_rw_reg(hw, &msg, flag); + if (err) + return err; + + *output = msg.data; + return 0; +} + /** * ice_cache_phy_user_req * @pi: port information structure diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h index ffb22c7ce28b..66f29bac783a 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.h +++ b/drivers/net/ethernet/intel/ice/ice_common.h @@ -17,13 +17,34 @@ #define ICE_SQ_SEND_DELAY_TIME_MS 10 #define ICE_SQ_SEND_MAX_EXECUTE 3 +#define FEC_REG_SHIFT 2 +#define FEC_RECV_ID_SHIFT 4 +#define FEC_CORR_LOW_REG_PORT0 (0x02 << FEC_REG_SHIFT) +#define FEC_CORR_HIGH_REG_PORT0 (0x03 << FEC_REG_SHIFT) +#define FEC_UNCORR_LOW_REG_PORT0 (0x04 << FEC_REG_SHIFT) +#define FEC_UNCORR_HIGH_REG_PORT0 (0x05 << FEC_REG_SHIFT) +#define FEC_CORR_LOW_REG_PORT1 (0x42 << FEC_REG_SHIFT) +#define FEC_CORR_HIGH_REG_PORT1 (0x43 << FEC_REG_SHIFT) +#define FEC_UNCORR_LOW_REG_PORT1 (0x44 << FEC_REG_SHIFT) +#define FEC_UNCORR_HIGH_REG_PORT1 (0x45 << FEC_REG_SHIFT) +#define FEC_CORR_LOW_REG_PORT2 (0x4A << FEC_REG_SHIFT) +#define FEC_CORR_HIGH_REG_PORT2 (0x4B << FEC_REG_SHIFT) +#define FEC_UNCORR_LOW_REG_PORT2 (0x4C << FEC_REG_SHIFT) +#define FEC_UNCORR_HIGH_REG_PORT2 (0x4D << FEC_REG_SHIFT) +#define FEC_CORR_LOW_REG_PORT3 (0x52 << FEC_REG_SHIFT) +#define FEC_CORR_HIGH_REG_PORT3 (0x53 << FEC_REG_SHIFT) +#define FEC_UNCORR_LOW_REG_PORT3 (0x54 << FEC_REG_SHIFT) +#define FEC_UNCORR_HIGH_REG_PORT3 (0x55 << FEC_REG_SHIFT) +#define FEC_RECEIVER_ID_PCS0 (0x33 << FEC_RECV_ID_SHIFT) +#define FEC_RECEIVER_ID_PCS1 (0x34 << FEC_RECV_ID_SHIFT) + int ice_init_hw(struct ice_hw *hw); void ice_deinit_hw(struct ice_hw *hw); int ice_check_reset(struct ice_hw *hw); int ice_reset(struct ice_hw *hw, enum ice_reset_req req); int ice_create_all_ctrlq(struct ice_hw *hw); int ice_init_all_ctrlq(struct ice_hw *hw); -void ice_shutdown_all_ctrlq(struct ice_hw *hw); +void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading); void ice_destroy_all_ctrlq(struct ice_hw *hw); int ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq, @@ -121,6 +142,11 @@ int ice_get_link_default_override(struct ice_link_default_override_tlv *ldo, struct ice_port_info *pi); bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps); +int ice_aq_get_phy_equalization(struct ice_hw *hw, u16 data_in, u16 op_code, + u8 serdes_num, int *output); +int +ice_aq_get_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port, + enum ice_fec_stats_types fec_type, u32 *output); enum ice_fc_mode ice_caps_to_fc_mode(u8 caps); enum ice_fec_mode ice_caps_to_fec_mode(u8 caps, u8 fec_options); @@ -201,7 +227,7 @@ int ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle); void ice_replay_post(struct ice_hw *hw); struct ice_q_ctx * ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle); -int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in); +int ice_sbq_rw_reg(struct ice_hw *hw, struct ice_sbq_msg_input *in, u16 flag); int ice_aq_get_cgu_abilities(struct ice_hw *hw, struct ice_aqc_get_cgu_abilities *abilities); @@ -249,6 +275,7 @@ void ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, u64 *prev_stat, u64 *cur_stat); bool ice_is_e810t(struct ice_hw *hw); +bool ice_is_e822(struct ice_hw *hw); bool ice_is_e823(struct ice_hw *hw); bool ice_is_e825c(struct ice_hw *hw); int @@ -261,6 +288,7 @@ int ice_aq_get_gpio(struct ice_hw *hw, u16 gpio_ctrl_handle, u8 pin_idx, bool *value, struct ice_sq_cd *cd); bool ice_is_100m_speed_supported(struct ice_hw *hw); +u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low, u64 phy_type_high); int ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, struct ice_sq_cd *cd); diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index ffe660f34992..ffaa6511c455 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c @@ -510,16 +510,19 @@ shutdown_sq_out: */ static bool ice_aq_ver_check(struct ice_hw *hw) { - if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) { + u8 exp_fw_api_ver_major = EXP_FW_API_VER_MAJOR_BY_MAC(hw); + u8 exp_fw_api_ver_minor = EXP_FW_API_VER_MINOR_BY_MAC(hw); + + if (hw->api_maj_ver > exp_fw_api_ver_major) { /* Major API version is newer than expected, don't load */ dev_warn(ice_hw_to_dev(hw), "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n"); return false; - } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) { - if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2)) + } else if (hw->api_maj_ver == exp_fw_api_ver_major) { + if (hw->api_min_ver > (exp_fw_api_ver_minor + 2)) dev_info(ice_hw_to_dev(hw), "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n"); - else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR) + else if ((hw->api_min_ver + 2) < exp_fw_api_ver_minor) dev_info(ice_hw_to_dev(hw), "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n"); } else { @@ -684,10 +687,12 @@ struct ice_ctl_q_info *ice_get_sbq(struct ice_hw *hw) * ice_shutdown_ctrlq - shutdown routine for any control queue * @hw: pointer to the hardware structure * @q_type: specific Control queue type + * @unloading: is the driver unloading itself * * NOTE: this function does not destroy the control queue locks. */ -static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) +static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type, + bool unloading) { struct ice_ctl_q_info *cq; @@ -695,7 +700,7 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) case ICE_CTL_Q_ADMIN: cq = &hw->adminq; if (ice_check_sq_alive(hw, cq)) - ice_aq_q_shutdown(hw, true); + ice_aq_q_shutdown(hw, unloading); break; case ICE_CTL_Q_SB: cq = &hw->sbq; @@ -714,20 +719,21 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) /** * ice_shutdown_all_ctrlq - shutdown routine for all control queues * @hw: pointer to the hardware structure + * @unloading: is the driver unloading itself * * NOTE: this function does not destroy the control queue locks. The driver * may call this at runtime to shutdown and later restart control queues, such * as in response to a reset event. */ -void ice_shutdown_all_ctrlq(struct ice_hw *hw) +void ice_shutdown_all_ctrlq(struct ice_hw *hw, bool unloading) { /* Shutdown FW admin queue */ - ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); + ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, unloading); /* Shutdown PHY Sideband */ if (ice_is_sbq_supported(hw)) - ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB); + ice_shutdown_ctrlq(hw, ICE_CTL_Q_SB, unloading); /* Shutdown PF-VF Mailbox */ - ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX); + ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX, unloading); } /** @@ -759,7 +765,7 @@ int ice_init_all_ctrlq(struct ice_hw *hw) break; ice_debug(hw, ICE_DBG_AQ_MSG, "Retry Admin Queue init due to FW critical error\n"); - ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN); + ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN, true); msleep(ICE_CTL_Q_ADMIN_INIT_MSEC); } while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT); @@ -840,7 +846,7 @@ static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq) void ice_destroy_all_ctrlq(struct ice_hw *hw) { /* shut down all the control queues first */ - ice_shutdown_all_ctrlq(hw); + ice_shutdown_all_ctrlq(hw, true); ice_destroy_ctrlq_locks(&hw->adminq); if (ice_is_sbq_supported(hw)) diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h index 8f2fd1613a95..1d54b1cdb1c5 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.h +++ b/drivers/net/ethernet/intel/ice/ice_controlq.h @@ -21,9 +21,18 @@ /* Defines that help manage the driver vs FW API checks. * Take a look at ice_aq_ver_check in ice_controlq.c for actual usage. */ -#define EXP_FW_API_VER_BRANCH 0x00 -#define EXP_FW_API_VER_MAJOR 0x01 -#define EXP_FW_API_VER_MINOR 0x05 +#define EXP_FW_API_VER_MAJOR_E810 0x01 +#define EXP_FW_API_VER_MINOR_E810 0x05 + +#define EXP_FW_API_VER_MAJOR_E830 0x01 +#define EXP_FW_API_VER_MINOR_E830 0x07 + +#define EXP_FW_API_VER_MAJOR_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? \ + EXP_FW_API_VER_MAJOR_E830 : \ + EXP_FW_API_VER_MAJOR_E810) +#define EXP_FW_API_VER_MINOR_BY_MAC(hw) ((hw)->mac_type == ICE_MAC_E830 ? \ + EXP_FW_API_VER_MINOR_E830 : \ + EXP_FW_API_VER_MINOR_E810) /* Different control queue types: These are mainly for SW consumption. */ enum ice_ctl_q { diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.c b/drivers/net/ethernet/intel/ice/ice_eswitch.c index b102db8b829a..3cfa071e3718 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.c @@ -117,17 +117,10 @@ static int ice_eswitch_setup_repr(struct ice_pf *pf, struct ice_repr *repr) struct ice_vsi *vsi = repr->src_vsi; struct metadata_dst *dst; - ice_remove_vsi_fltr(&pf->hw, vsi->idx); repr->dst = metadata_dst_alloc(0, METADATA_HW_PORT_MUX, GFP_KERNEL); if (!repr->dst) - goto err_add_mac_fltr; - - if (ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof)) - goto err_dst_free; - - if (ice_vsi_add_vlan_zero(vsi)) - goto err_update_security; + return -ENOMEM; netif_keep_dst(uplink_vsi->netdev); @@ -136,16 +129,48 @@ static int ice_eswitch_setup_repr(struct ice_pf *pf, struct ice_repr *repr) dst->u.port_info.lower_dev = uplink_vsi->netdev; return 0; +} -err_update_security: +/** + * ice_eswitch_cfg_vsi - configure VSI to work in slow-path + * @vsi: VSI structure of representee + * @mac: representee MAC + * + * Return: 0 on success, non-zero on error. + */ +int ice_eswitch_cfg_vsi(struct ice_vsi *vsi, const u8 *mac) +{ + int err; + + ice_remove_vsi_fltr(&vsi->back->hw, vsi->idx); + + err = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof); + if (err) + goto err_update_security; + + err = ice_vsi_add_vlan_zero(vsi); + if (err) + goto err_vlan_zero; + + return 0; + +err_vlan_zero: ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); -err_dst_free: - metadata_dst_free(repr->dst); - repr->dst = NULL; -err_add_mac_fltr: - ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac, ICE_FWD_TO_VSI); +err_update_security: + ice_fltr_add_mac_and_broadcast(vsi, mac, ICE_FWD_TO_VSI); - return -ENODEV; + return err; +} + +/** + * ice_eswitch_decfg_vsi - unroll changes done to VSI for switchdev + * @vsi: VSI structure of representee + * @mac: representee MAC + */ +void ice_eswitch_decfg_vsi(struct ice_vsi *vsi, const u8 *mac) +{ + ice_vsi_update_security(vsi, ice_vsi_ctx_set_antispoof); + ice_fltr_add_mac_and_broadcast(vsi, mac, ICE_FWD_TO_VSI); } /** @@ -153,16 +178,16 @@ err_add_mac_fltr: * @repr_id: representor ID * @vsi: VSI for which port representor is configured */ -void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi) +void ice_eswitch_update_repr(unsigned long *repr_id, struct ice_vsi *vsi) { struct ice_pf *pf = vsi->back; struct ice_repr *repr; - int ret; + int err; if (!ice_is_switchdev_running(pf)) return; - repr = xa_load(&pf->eswitch.reprs, repr_id); + repr = xa_load(&pf->eswitch.reprs, *repr_id); if (!repr) return; @@ -172,12 +197,19 @@ void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi) if (repr->br_port) repr->br_port->vsi = vsi; - ret = ice_vsi_update_security(vsi, ice_vsi_ctx_clear_antispoof); - if (ret) { - ice_fltr_add_mac_and_broadcast(vsi, repr->parent_mac, - ICE_FWD_TO_VSI); + err = ice_eswitch_cfg_vsi(vsi, repr->parent_mac); + if (err) dev_err(ice_pf_to_dev(pf), "Failed to update VSI of port representor %d", repr->id); + + /* The VSI number is different, reload the PR with new id */ + if (repr->id != vsi->vsi_num) { + xa_erase(&pf->eswitch.reprs, repr->id); + repr->id = vsi->vsi_num; + if (xa_insert(&pf->eswitch.reprs, repr->id, repr, GFP_KERNEL)) + dev_err(ice_pf_to_dev(pf), "Failed to reload port representor %d", + repr->id); + *repr_id = repr->id; } } @@ -423,6 +455,7 @@ static void ice_eswitch_start_reprs(struct ice_pf *pf) int ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf) { + struct devlink *devlink = priv_to_devlink(pf); struct ice_repr *repr; int err; @@ -437,7 +470,9 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf) ice_eswitch_stop_reprs(pf); + devl_lock(devlink); repr = ice_repr_add_vf(vf); + devl_unlock(devlink); if (IS_ERR(repr)) { err = PTR_ERR(repr); goto err_create_repr; @@ -460,7 +495,9 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf) err_xa_alloc: ice_eswitch_release_repr(pf, repr); err_setup_repr: + devl_lock(devlink); ice_repr_rem_vf(repr); + devl_unlock(devlink); err_create_repr: if (xa_empty(&pf->eswitch.reprs)) ice_eswitch_disable_switchdev(pf); @@ -484,6 +521,7 @@ void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) ice_eswitch_disable_switchdev(pf); ice_eswitch_release_repr(pf, repr); + devl_lock(devlink); ice_repr_rem_vf(repr); if (xa_empty(&pf->eswitch.reprs)) { @@ -491,28 +529,11 @@ void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) * no point in keeping the nodes */ ice_devlink_rate_clear_tx_topology(ice_get_main_vsi(pf)); - devl_lock(devlink); devl_rate_nodes_destroy(devlink); - devl_unlock(devlink); } else { ice_eswitch_start_reprs(pf); } -} - -/** - * ice_eswitch_rebuild - rebuild eswitch - * @pf: pointer to PF structure - */ -void ice_eswitch_rebuild(struct ice_pf *pf) -{ - struct ice_repr *repr; - unsigned long id; - - if (!ice_is_switchdev_running(pf)) - return; - - xa_for_each(&pf->eswitch.reprs, id, repr) - ice_eswitch_detach(pf, repr->vf); + devl_unlock(devlink); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch.h b/drivers/net/ethernet/intel/ice/ice_eswitch.h index e2e5c0c75e7d..78fd39a6935d 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch.h +++ b/drivers/net/ethernet/intel/ice/ice_eswitch.h @@ -10,7 +10,6 @@ void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf); int ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf); -void ice_eswitch_rebuild(struct ice_pf *pf); int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode); int @@ -18,7 +17,7 @@ ice_eswitch_mode_set(struct devlink *devlink, u16 mode, struct netlink_ext_ack *extack); bool ice_is_eswitch_mode_switchdev(struct ice_pf *pf); -void ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi); +void ice_eswitch_update_repr(unsigned long *repr_id, struct ice_vsi *vsi); void ice_eswitch_stop_all_tx_queues(struct ice_pf *pf); @@ -28,6 +27,9 @@ netdev_tx_t ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev); struct net_device *ice_eswitch_get_target(struct ice_rx_ring *rx_ring, union ice_32b_rx_flex_desc *rx_desc); + +int ice_eswitch_cfg_vsi(struct ice_vsi *vsi, const u8 *mac); +void ice_eswitch_decfg_vsi(struct ice_vsi *vsi, const u8 *mac); #else /* CONFIG_ICE_SWITCHDEV */ static inline void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) { } @@ -44,18 +46,13 @@ ice_eswitch_set_target_vsi(struct sk_buff *skb, struct ice_tx_offload_params *off) { } static inline void -ice_eswitch_update_repr(unsigned long repr_id, struct ice_vsi *vsi) { } +ice_eswitch_update_repr(unsigned long *repr_id, struct ice_vsi *vsi) { } static inline int ice_eswitch_configure(struct ice_pf *pf) { return 0; } -static inline int ice_eswitch_rebuild(struct ice_pf *pf) -{ - return -EOPNOTSUPP; -} - static inline int ice_eswitch_mode_get(struct devlink *devlink, u16 *mode) { return DEVLINK_ESWITCH_MODE_LEGACY; @@ -85,5 +82,12 @@ ice_eswitch_get_target(struct ice_rx_ring *rx_ring, { return rx_ring->netdev; } + +static inline int ice_eswitch_cfg_vsi(struct ice_vsi *vsi, const u8 *mac) +{ + return -EOPNOTSUPP; +} + +static inline void ice_eswitch_decfg_vsi(struct ice_vsi *vsi, const u8 *mac) { } #endif /* CONFIG_ICE_SWITCHDEV */ #endif /* _ICE_ESWITCH_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c index ac5beecd028b..f5aceb32bf4d 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch_br.c +++ b/drivers/net/ethernet/intel/ice/ice_eswitch_br.c @@ -896,7 +896,8 @@ ice_eswitch_br_port_deinit(struct ice_esw_br *bridge, if (br_port->type == ICE_ESWITCH_BR_UPLINK_PORT && vsi->back) { vsi->back->br_port = NULL; } else { - struct ice_repr *repr = ice_repr_get_by_vsi(vsi); + struct ice_repr *repr = + ice_repr_get(vsi->back, br_port->repr_id); if (repr) repr->br_port = NULL; @@ -937,6 +938,7 @@ ice_eswitch_br_vf_repr_port_init(struct ice_esw_br *bridge, br_port->vsi = repr->src_vsi; br_port->vsi_idx = br_port->vsi->idx; br_port->type = ICE_ESWITCH_BR_VF_REPR_PORT; + br_port->repr_id = repr->id; repr->br_port = br_port; err = xa_insert(&bridge->ports, br_port->vsi_idx, br_port, GFP_KERNEL); diff --git a/drivers/net/ethernet/intel/ice/ice_eswitch_br.h b/drivers/net/ethernet/intel/ice/ice_eswitch_br.h index 85a8fadb2928..c15c7344d7f8 100644 --- a/drivers/net/ethernet/intel/ice/ice_eswitch_br.h +++ b/drivers/net/ethernet/intel/ice/ice_eswitch_br.h @@ -46,6 +46,7 @@ struct ice_esw_br_port { enum ice_esw_br_port_type type; u16 vsi_idx; u16 pvid; + u32 repr_id; struct xarray vlans; }; diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 62c8205fceba..8c990c976132 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -463,7 +463,354 @@ ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) static int ice_get_regs_len(struct net_device __always_unused *netdev) { - return sizeof(ice_regs_dump_list); + return (sizeof(ice_regs_dump_list) + + sizeof(struct ice_regdump_to_ethtool)); +} + +/** + * ice_ethtool_get_maxspeed - Get the max speed for given lport + * @hw: pointer to the HW struct + * @lport: logical port for which max speed is requested + * @max_speed: return max speed for input lport + * + * Return: 0 on success, negative on failure. + */ +static int ice_ethtool_get_maxspeed(struct ice_hw *hw, u8 lport, u8 *max_speed) +{ + struct ice_aqc_get_port_options_elem options[ICE_AQC_PORT_OPT_MAX] = {}; + bool active_valid = false, pending_valid = true; + u8 option_count = ICE_AQC_PORT_OPT_MAX; + u8 active_idx = 0, pending_idx = 0; + int status; + + status = ice_aq_get_port_options(hw, options, &option_count, lport, + true, &active_idx, &active_valid, + &pending_idx, &pending_valid); + if (status) + return -EIO; + if (!active_valid) + return -EINVAL; + + *max_speed = options[active_idx].max_lane_speed & ICE_AQC_PORT_OPT_MAX_LANE_M; + return 0; +} + +/** + * ice_is_serdes_muxed - returns whether serdes is muxed in hardware + * @hw: pointer to the HW struct + * + * Return: true when serdes is muxed, false when serdes is not muxed. + */ +static bool ice_is_serdes_muxed(struct ice_hw *hw) +{ + u32 reg_value = rd32(hw, GLGEN_SWITCH_MODE_CONFIG); + + return FIELD_GET(GLGEN_SWITCH_MODE_CONFIG_25X4_QUAD_M, reg_value); +} + +static int ice_map_port_topology_for_sfp(struct ice_port_topology *port_topology, + u8 lport, bool is_muxed) +{ + switch (lport) { + case 0: + port_topology->pcs_quad_select = 0; + port_topology->pcs_port = 0; + port_topology->primary_serdes_lane = 0; + break; + case 1: + port_topology->pcs_quad_select = 1; + port_topology->pcs_port = 0; + if (is_muxed) + port_topology->primary_serdes_lane = 2; + else + port_topology->primary_serdes_lane = 4; + break; + case 2: + port_topology->pcs_quad_select = 0; + port_topology->pcs_port = 1; + port_topology->primary_serdes_lane = 1; + break; + case 3: + port_topology->pcs_quad_select = 1; + port_topology->pcs_port = 1; + if (is_muxed) + port_topology->primary_serdes_lane = 3; + else + port_topology->primary_serdes_lane = 5; + break; + case 4: + port_topology->pcs_quad_select = 0; + port_topology->pcs_port = 2; + port_topology->primary_serdes_lane = 2; + break; + case 5: + port_topology->pcs_quad_select = 1; + port_topology->pcs_port = 2; + port_topology->primary_serdes_lane = 6; + break; + case 6: + port_topology->pcs_quad_select = 0; + port_topology->pcs_port = 3; + port_topology->primary_serdes_lane = 3; + break; + case 7: + port_topology->pcs_quad_select = 1; + port_topology->pcs_port = 3; + port_topology->primary_serdes_lane = 7; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int ice_map_port_topology_for_qsfp(struct ice_port_topology *port_topology, + u8 lport, bool is_muxed) +{ + switch (lport) { + case 0: + port_topology->pcs_quad_select = 0; + port_topology->pcs_port = 0; + port_topology->primary_serdes_lane = 0; + break; + case 1: + port_topology->pcs_quad_select = 1; + port_topology->pcs_port = 0; + if (is_muxed) + port_topology->primary_serdes_lane = 2; + else + port_topology->primary_serdes_lane = 4; + break; + case 2: + port_topology->pcs_quad_select = 0; + port_topology->pcs_port = 1; + port_topology->primary_serdes_lane = 1; + break; + case 3: + port_topology->pcs_quad_select = 1; + port_topology->pcs_port = 1; + if (is_muxed) + port_topology->primary_serdes_lane = 3; + else + port_topology->primary_serdes_lane = 5; + break; + case 4: + port_topology->pcs_quad_select = 0; + port_topology->pcs_port = 2; + port_topology->primary_serdes_lane = 2; + break; + case 5: + port_topology->pcs_quad_select = 1; + port_topology->pcs_port = 2; + port_topology->primary_serdes_lane = 6; + break; + case 6: + port_topology->pcs_quad_select = 0; + port_topology->pcs_port = 3; + port_topology->primary_serdes_lane = 3; + break; + case 7: + port_topology->pcs_quad_select = 1; + port_topology->pcs_port = 3; + port_topology->primary_serdes_lane = 7; + break; + default: + return -EINVAL; + } + + return 0; +} + +/** + * ice_get_port_topology - returns physical topology like pcsquad, pcsport, + * serdes number + * @hw: pointer to the HW struct + * @lport: logical port for which physical info requested + * @port_topology: buffer to hold port topology + * + * Return: 0 on success, negative on failure. + */ +static int ice_get_port_topology(struct ice_hw *hw, u8 lport, + struct ice_port_topology *port_topology) +{ + struct ice_aqc_get_link_topo cmd = {}; + u16 node_handle = 0; + u8 cage_type = 0; + bool is_muxed; + int err; + u8 ctx; + + ctx = ICE_AQC_LINK_TOPO_NODE_TYPE_CAGE << ICE_AQC_LINK_TOPO_NODE_TYPE_S; + ctx |= ICE_AQC_LINK_TOPO_NODE_CTX_PORT << ICE_AQC_LINK_TOPO_NODE_CTX_S; + cmd.addr.topo_params.node_type_ctx = ctx; + + err = ice_aq_get_netlist_node(hw, &cmd, &cage_type, &node_handle); + if (err) + return -EINVAL; + + is_muxed = ice_is_serdes_muxed(hw); + + if (cage_type == 0x11 || /* SFP+ */ + cage_type == 0x12) { /* SFP28 */ + port_topology->serdes_lane_count = 1; + err = ice_map_port_topology_for_sfp(port_topology, lport, is_muxed); + if (err) + return err; + } else if (cage_type == 0x13 || /* QSFP */ + cage_type == 0x14) { /* QSFP28 */ + u8 max_speed = 0; + + err = ice_ethtool_get_maxspeed(hw, lport, &max_speed); + if (err) + return err; + + if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_100G) + port_topology->serdes_lane_count = 4; + else if (max_speed == ICE_AQC_PORT_OPT_MAX_LANE_50G) + port_topology->serdes_lane_count = 2; + else + port_topology->serdes_lane_count = 1; + + err = ice_map_port_topology_for_qsfp(port_topology, lport, is_muxed); + if (err) + return err; + } else { + return -EINVAL; + } + + return 0; +} + +/** + * ice_get_tx_rx_equa - read serdes tx rx equaliser param + * @hw: pointer to the HW struct + * @serdes_num: represents the serdes number + * @ptr: structure to read all serdes parameter for given serdes + * + * Return: all serdes equalization parameter supported per serdes number + */ +static int ice_get_tx_rx_equa(struct ice_hw *hw, u8 serdes_num, + struct ice_serdes_equalization_to_ethtool *ptr) +{ + int err; + + err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_PRE1, + ICE_AQC_OP_CODE_TX_EQU, serdes_num, + &ptr->tx_equalization_pre1); + if (err) + return err; + + err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_PRE3, + ICE_AQC_OP_CODE_TX_EQU, serdes_num, + &ptr->tx_equalization_pre3); + if (err) + return err; + + err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_ATTEN, + ICE_AQC_OP_CODE_TX_EQU, serdes_num, + &ptr->tx_equalization_atten); + if (err) + return err; + + err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_POST1, + ICE_AQC_OP_CODE_TX_EQU, serdes_num, + &ptr->tx_equalization_post1); + if (err) + return err; + + err = ice_aq_get_phy_equalization(hw, ICE_AQC_TX_EQU_PRE2, + ICE_AQC_OP_CODE_TX_EQU, serdes_num, + &ptr->tx_equalization_pre2); + if (err) + return err; + + err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_PRE2, + ICE_AQC_OP_CODE_RX_EQU, serdes_num, + &ptr->rx_equalization_pre2); + if (err) + return err; + + err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_PRE1, + ICE_AQC_OP_CODE_RX_EQU, serdes_num, + &ptr->rx_equalization_pre1); + if (err) + return err; + + err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_POST1, + ICE_AQC_OP_CODE_RX_EQU, serdes_num, + &ptr->rx_equalization_post1); + if (err) + return err; + + err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_BFLF, + ICE_AQC_OP_CODE_RX_EQU, serdes_num, + &ptr->rx_equalization_bflf); + if (err) + return err; + + err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_BFHF, + ICE_AQC_OP_CODE_RX_EQU, serdes_num, + &ptr->rx_equalization_bfhf); + if (err) + return err; + + err = ice_aq_get_phy_equalization(hw, ICE_AQC_RX_EQU_DRATE, + ICE_AQC_OP_CODE_RX_EQU, serdes_num, + &ptr->rx_equalization_drate); + if (err) + return err; + + return 0; +} + +/** + * ice_get_extended_regs - returns FEC correctable, uncorrectable stats per + * pcsquad, pcsport + * @netdev: pointer to net device structure + * @p: output buffer to fill requested register dump + * + * Return: 0 on success, negative on failure. + */ +static int ice_get_extended_regs(struct net_device *netdev, void *p) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_regdump_to_ethtool *ice_prv_regs_buf; + struct ice_port_topology port_topology = {}; + struct ice_port_info *pi; + struct ice_pf *pf; + struct ice_hw *hw; + unsigned int i; + int err; + + pf = np->vsi->back; + hw = &pf->hw; + pi = np->vsi->port_info; + + /* Serdes parameters are not supported if not the PF VSI */ + if (np->vsi->type != ICE_VSI_PF || !pi) + return -EINVAL; + + err = ice_get_port_topology(hw, pi->lport, &port_topology); + if (err) + return -EINVAL; + if (port_topology.serdes_lane_count > 4) + return -EINVAL; + + ice_prv_regs_buf = p; + + /* Get serdes equalization parameter for available serdes */ + for (i = 0; i < port_topology.serdes_lane_count; i++) { + u8 serdes_num = 0; + + serdes_num = port_topology.primary_serdes_lane + i; + err = ice_get_tx_rx_equa(hw, serdes_num, + &ice_prv_regs_buf->equalization[i]); + if (err) + return -EINVAL; + } + + return 0; } static void @@ -475,10 +822,12 @@ ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) u32 *regs_buf = (u32 *)p; unsigned int i; - regs->version = 1; + regs->version = 2; for (i = 0; i < ARRAY_SIZE(ice_regs_dump_list); ++i) regs_buf[i] = rd32(hw, ice_regs_dump_list[i]); + + ice_get_extended_regs(netdev, (void *)®s_buf[i]); } static u32 ice_get_msglevel(struct net_device *netdev) @@ -3434,7 +3783,7 @@ ice_set_rxfh(struct net_device *netdev, struct ethtool_rxfh_param *rxfh, } static int -ice_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) +ice_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info) { struct ice_pf *pf = ice_netdev_to_pf(dev); @@ -4282,6 +4631,94 @@ ice_get_module_eeprom(struct net_device *netdev, return 0; } +/** + * ice_get_port_fec_stats - returns FEC correctable, uncorrectable stats per + * pcsquad, pcsport + * @hw: pointer to the HW struct + * @pcs_quad: pcsquad for input port + * @pcs_port: pcsport for input port + * @fec_stats: buffer to hold FEC statistics for given port + * + * Return: 0 on success, negative on failure. + */ +static int ice_get_port_fec_stats(struct ice_hw *hw, u16 pcs_quad, u16 pcs_port, + struct ethtool_fec_stats *fec_stats) +{ + u32 fec_uncorr_low_val = 0, fec_uncorr_high_val = 0; + u32 fec_corr_low_val = 0, fec_corr_high_val = 0; + int err; + + if (pcs_quad > 1 || pcs_port > 3) + return -EINVAL; + + err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port, ICE_FEC_CORR_LOW, + &fec_corr_low_val); + if (err) + return err; + + err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port, ICE_FEC_CORR_HIGH, + &fec_corr_high_val); + if (err) + return err; + + err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port, + ICE_FEC_UNCORR_LOW, + &fec_uncorr_low_val); + if (err) + return err; + + err = ice_aq_get_fec_stats(hw, pcs_quad, pcs_port, + ICE_FEC_UNCORR_HIGH, + &fec_uncorr_high_val); + if (err) + return err; + + fec_stats->uncorrectable_blocks.total = (fec_corr_high_val << 16) + + fec_corr_low_val; + fec_stats->corrected_blocks.total = (fec_uncorr_high_val << 16) + + fec_uncorr_low_val; + return 0; +} + +/** + * ice_get_fec_stats - returns FEC correctable, uncorrectable stats per netdev + * @netdev: network interface device structure + * @fec_stats: buffer to hold FEC statistics for given port + * + */ +static void ice_get_fec_stats(struct net_device *netdev, + struct ethtool_fec_stats *fec_stats) +{ + struct ice_netdev_priv *np = netdev_priv(netdev); + struct ice_port_topology port_topology; + struct ice_port_info *pi; + struct ice_pf *pf; + struct ice_hw *hw; + int err; + + pf = np->vsi->back; + hw = &pf->hw; + pi = np->vsi->port_info; + + /* Serdes parameters are not supported if not the PF VSI */ + if (np->vsi->type != ICE_VSI_PF || !pi) + return; + + err = ice_get_port_topology(hw, pi->lport, &port_topology); + if (err) { + netdev_info(netdev, "Extended register dump failed Lport %d\n", + pi->lport); + return; + } + + /* Get FEC correctable, uncorrectable counter */ + err = ice_get_port_fec_stats(hw, port_topology.pcs_quad_select, + port_topology.pcs_port, fec_stats); + if (err) + netdev_info(netdev, "FEC stats get failed Lport %d Err %d\n", + pi->lport, err); +} + static const struct ethtool_ops ice_ethtool_ops = { .cap_rss_ctx_supported = true, .supported_coalesce_params = ETHTOOL_COALESCE_USECS | @@ -4290,6 +4727,7 @@ static const struct ethtool_ops ice_ethtool_ops = { .cap_rss_sym_xor_supported = true, .get_link_ksettings = ice_get_link_ksettings, .set_link_ksettings = ice_set_link_ksettings, + .get_fec_stats = ice_get_fec_stats, .get_drvinfo = ice_get_drvinfo, .get_regs_len = ice_get_regs_len, .get_regs = ice_get_regs, diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.h b/drivers/net/ethernet/intel/ice/ice_ethtool.h index b88e3da06f13..9acccae38625 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.h +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.h @@ -9,6 +9,35 @@ struct ice_phy_type_to_ethtool { u8 link_mode; }; +struct ice_serdes_equalization_to_ethtool { + int rx_equalization_pre2; + int rx_equalization_pre1; + int rx_equalization_post1; + int rx_equalization_bflf; + int rx_equalization_bfhf; + int rx_equalization_drate; + int tx_equalization_pre1; + int tx_equalization_pre3; + int tx_equalization_atten; + int tx_equalization_post1; + int tx_equalization_pre2; +}; + +struct ice_regdump_to_ethtool { + /* A multilane port can have max 4 serdes */ + struct ice_serdes_equalization_to_ethtool equalization[4]; +}; + +/* Port topology from lport i.e. + * serdes mapping, pcsquad, macport, cage etc... + */ +struct ice_port_topology { + u16 pcs_port; + u16 primary_serdes_lane; + u16 serdes_lane_count; + u16 pcs_quad_select; +}; + /* Macro to make PHY type to Ethtool link mode table entry. * The index is the PHY type. */ diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index cfac1d432c15..91cbae1eec89 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -157,6 +157,8 @@ #define GLGEN_RTRIG_CORER_M BIT(0) #define GLGEN_RTRIG_GLOBR_M BIT(1) #define GLGEN_STAT 0x000B612C +#define GLGEN_SWITCH_MODE_CONFIG 0x000B81E0 +#define GLGEN_SWITCH_MODE_CONFIG_25X4_QUAD_M BIT(2) #define GLGEN_VFLRSTAT(_i) (0x00093A04 + ((_i) * 4)) #define PFGEN_CTRL 0x00091000 #define PFGEN_CTRL_PFSWR_M BIT(0) @@ -177,6 +179,8 @@ #define GLINT_CTL_ITR_GRAN_50_M ICE_M(0xF, 24) #define GLINT_CTL_ITR_GRAN_25_S 28 #define GLINT_CTL_ITR_GRAN_25_M ICE_M(0xF, 28) +#define GLGEN_MAC_LINK_TOPO 0x000B81DC +#define GLGEN_MAC_LINK_TOPO_LINK_TOPO_M GENMASK(1, 0) #define GLINT_DYN_CTL(_INT) (0x00160000 + ((_INT) * 4)) #define GLINT_DYN_CTL_INTENA_M BIT(0) #define GLINT_DYN_CTL_CLEARPBA_M BIT(1) diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 7629b0190578..f559e60992fa 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -2580,8 +2580,8 @@ void ice_vsi_free_irq(struct ice_vsi *vsi) if (!IS_ENABLED(CONFIG_RFS_ACCEL)) irq_set_affinity_notifier(irq_num, NULL); - /* clear the affinity_mask in the IRQ descriptor */ - irq_set_affinity_hint(irq_num, NULL); + /* clear the affinity_hint in the IRQ descriptor */ + irq_update_affinity_hint(irq_num, NULL); synchronize_irq(irq_num); devm_free_irq(ice_pf_to_dev(pf), irq_num, vsi->q_vectors[i]); } diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 55a42aad92a5..ec636be4d17d 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -35,7 +35,6 @@ static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation."; #define ICE_DDP_PKG_PATH "intel/ice/ddp/" #define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg" -MODULE_AUTHOR("Intel Corporation, "); MODULE_DESCRIPTION(DRV_SUMMARY); MODULE_IMPORT_NS(LIBIE); MODULE_LICENSE("GPL v2"); @@ -623,7 +622,7 @@ skip: if (hw->port_info) ice_sched_clear_port(hw->port_info); - ice_shutdown_all_ctrlq(hw); + ice_shutdown_all_ctrlq(hw, false); set_bit(ICE_PREPARED_FOR_RESET, pf->state); } @@ -2610,7 +2609,7 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename) } /* assign the mask for this irq */ - irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); + irq_update_affinity_hint(irq_num, &q_vector->affinity_mask); } err = ice_set_cpu_rx_rmap(vsi); @@ -2628,7 +2627,7 @@ free_q_irqs: irq_num = vsi->q_vectors[vector]->irq.virq; if (!IS_ENABLED(CONFIG_RFS_ACCEL)) irq_set_affinity_notifier(irq_num, NULL); - irq_set_affinity_hint(irq_num, NULL); + irq_update_affinity_hint(irq_num, NULL); devm_free_irq(dev, irq_num, &vsi->q_vectors[vector]); } return err; @@ -4158,13 +4157,17 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked) /* set for the next time the netdev is started */ if (!netif_running(vsi->netdev)) { - ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); + err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); + if (err) + goto rebuild_err; dev_dbg(ice_pf_to_dev(pf), "Link is down, queue count change happens when link is brought up\n"); goto done; } ice_vsi_close(vsi); - ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); + err = ice_vsi_rebuild(vsi, ICE_VSI_FLAG_NO_INIT); + if (err) + goto rebuild_err; ice_for_each_traffic_class(i) { if (vsi->tc_cfg.ena_tc & BIT(i)) @@ -4175,6 +4178,11 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked) } ice_pf_dcb_recfg(pf, locked); ice_vsi_open(vsi); + goto done; + +rebuild_err: + dev_err(ice_pf_to_dev(pf), "Error during VSI rebuild: %d. Unload and reload the driver.\n", + err); done: clear_bit(ICE_CFG_BUSY, pf->state); return err; @@ -5490,7 +5498,7 @@ static void ice_prepare_for_shutdown(struct ice_pf *pf) if (pf->vsi[v]) pf->vsi[v]->vsi_num = 0; - ice_shutdown_all_ctrlq(hw); + ice_shutdown_all_ctrlq(hw, true); } /** @@ -7694,8 +7702,6 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type) goto err_vsi_rebuild; } - ice_eswitch_rebuild(pf); - if (reset_type == ICE_RESET_PFR) { err = ice_rebuild_channels(pf); if (err) { @@ -7750,7 +7756,7 @@ err_vsi_rebuild: err_sched_init_port: ice_sched_cleanup_all(hw); err_init_ctrlq: - ice_shutdown_all_ctrlq(hw); + ice_shutdown_all_ctrlq(hw, false); set_bit(ICE_RESET_FAILED, pf->state); clear_recovery: /* set this bit in PF state to control service task scheduling */ diff --git a/drivers/net/ethernet/intel/ice/ice_protocol_type.h b/drivers/net/ethernet/intel/ice/ice_protocol_type.h index 755a9c55267c..7c09ea0f03ba 100644 --- a/drivers/net/ethernet/intel/ice/ice_protocol_type.h +++ b/drivers/net/ethernet/intel/ice/ice_protocol_type.h @@ -7,18 +7,24 @@ /* Each recipe can match up to 5 different fields. Fields to match can be meta- * data, values extracted from packet headers, or results from other recipes. - * One of the 5 fields is reserved for matching the switch ID. So, up to 4 - * recipes can provide intermediate results to another one through chaining, - * e.g. recipes 0, 1, 2, and 3 can provide intermediate results to recipe 4. + * Therefore, up to 5 recipes can provide intermediate results to another one + * through chaining, e.g. recipes 0, 1, 2, 3 and 4 can provide intermediate + * results to recipe 5. Note that one of the fields in one of the recipes must + * always be reserved for matching the switch ID. */ -#define ICE_NUM_WORDS_RECIPE 4 +#define ICE_NUM_WORDS_RECIPE 5 -/* Max recipes that can be chained */ +/* Max recipes that can be chained, not including the last one, which combines + * intermediate results. + */ #define ICE_MAX_CHAIN_RECIPE 5 -/* 1 word reserved for switch ID from allowed 5 words. - * So a recipe can have max 4 words. And you can chain 5 such recipes - * together. So maximum words that can be programmed for look up is 5 * 4. +/* Total max recipes in chain recipe (including intermediate results) */ +#define ICE_MAX_CHAIN_RECIPE_RES (ICE_MAX_CHAIN_RECIPE + 1) + +/* A recipe can have max 5 words, and 5 recipes can be chained together (using + * the 6th one, which would contain only result indexes). So maximum words that + * can be programmed for lookup is 5 * 5 (not including intermediate results). */ #define ICE_MAX_CHAIN_WORDS (ICE_NUM_WORDS_RECIPE * ICE_MAX_CHAIN_RECIPE) @@ -449,32 +455,11 @@ struct ice_prot_ext_tbl_entry { /* Extractions to be looked up for a given recipe */ struct ice_prot_lkup_ext { - u16 prot_type; u8 n_val_words; /* create a buffer to hold max words per recipe */ - u16 field_off[ICE_MAX_CHAIN_WORDS]; u16 field_mask[ICE_MAX_CHAIN_WORDS]; struct ice_fv_word fv_words[ICE_MAX_CHAIN_WORDS]; - - /* Indicate field offsets that have field vector indices assigned */ - DECLARE_BITMAP(done, ICE_MAX_CHAIN_WORDS); }; -struct ice_pref_recipe_group { - u8 n_val_pairs; /* Number of valid pairs */ - struct ice_fv_word pairs[ICE_NUM_WORDS_RECIPE]; - u16 mask[ICE_NUM_WORDS_RECIPE]; -}; - -struct ice_recp_grp_entry { - struct list_head l_entry; - -#define ICE_INVAL_CHAIN_IND 0xFF - u16 rid; - u8 chain_idx; - u16 fv_idx[ICE_NUM_WORDS_RECIPE]; - u16 fv_mask[ICE_NUM_WORDS_RECIPE]; - struct ice_pref_recipe_group r_group; -}; #endif /* _ICE_PROTOCOL_TYPE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index 927b623cedd5..51fac8f18cb0 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -7,8 +7,6 @@ #define E810_OUT_PROP_DELAY_NS 1 -#define UNKNOWN_INCVAL_E82X 0x100000000ULL - static const struct ptp_pin_desc ice_pin_desc_e810t[] = { /* name idx func chan */ { "GNSS", GNSS, PTP_PF_EXTTS, 0, { 0, } }, @@ -813,7 +811,7 @@ static enum ice_tx_tstamp_work ice_ptp_tx_tstamp_owner(struct ice_pf *pf) } mutex_unlock(&pf->ptp.ports_owner.lock); - for (i = 0; i < ICE_MAX_QUAD; i++) { + for (i = 0; i < ICE_GET_QUAD_NUM(pf->hw.ptp.num_lports); i++) { u64 tstamp_ready; int err; @@ -1013,6 +1011,28 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) tx->len = 0; } +/** + * ice_ptp_init_tx_eth56g - Initialize tracking for Tx timestamps + * @pf: Board private structure + * @tx: the Tx tracking structure to initialize + * @port: the port this structure tracks + * + * Initialize the Tx timestamp tracker for this port. ETH56G PHYs + * have independent memory blocks for all ports. + * + * Return: 0 for success, -ENOMEM when failed to allocate Tx tracker + */ +static int ice_ptp_init_tx_eth56g(struct ice_pf *pf, struct ice_ptp_tx *tx, + u8 port) +{ + tx->block = port; + tx->offset = 0; + tx->len = INDEX_PER_PORT_ETH56G; + tx->has_ready_bitmap = 1; + + return ice_ptp_alloc_tx_tracker(tx); +} + /** * ice_ptp_init_tx_e82x - Initialize tracking for Tx timestamps * @pf: Board private structure @@ -1027,7 +1047,7 @@ ice_ptp_release_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) static int ice_ptp_init_tx_e82x(struct ice_pf *pf, struct ice_ptp_tx *tx, u8 port) { - tx->block = port / ICE_PORTS_PER_QUAD; + tx->block = ICE_GET_QUAD_NUM(port); tx->offset = (port % ICE_PORTS_PER_QUAD) * INDEX_PER_PORT_E82X; tx->len = INDEX_PER_PORT_E82X; tx->has_ready_bitmap = 1; @@ -1210,12 +1230,7 @@ static u64 ice_base_incval(struct ice_pf *pf) struct ice_hw *hw = &pf->hw; u64 incval; - if (ice_is_e810(hw)) - incval = ICE_PTP_NOMINAL_INCVAL_E810; - else if (ice_e82x_time_ref(hw) < NUM_ICE_TIME_REF_FREQ) - incval = ice_e82x_nominal_incval(ice_e82x_time_ref(hw)); - else - incval = UNKNOWN_INCVAL_E82X; + incval = ice_get_base_incval(hw); dev_dbg(ice_pf_to_dev(pf), "PTP: using base increment value of 0x%016llx\n", incval); @@ -1229,8 +1244,8 @@ static u64 ice_base_incval(struct ice_pf *pf) */ static int ice_ptp_check_tx_fifo(struct ice_ptp_port *port) { - int quad = port->port_num / ICE_PORTS_PER_QUAD; int offs = port->port_num % ICE_PORTS_PER_QUAD; + int quad = ICE_GET_QUAD_NUM(port->port_num); struct ice_pf *pf; struct ice_hw *hw; u32 val, phy_sts; @@ -1348,10 +1363,19 @@ ice_ptp_port_phy_stop(struct ice_ptp_port *ptp_port) mutex_lock(&ptp_port->ps_lock); - kthread_cancel_delayed_work_sync(&ptp_port->ov_work); + switch (hw->ptp.phy_model) { + case ICE_PHY_ETH56G: + err = ice_stop_phy_timer_eth56g(hw, port, true); + break; + case ICE_PHY_E82X: + kthread_cancel_delayed_work_sync(&ptp_port->ov_work); - err = ice_stop_phy_timer_e82x(hw, port, true); - if (err) + err = ice_stop_phy_timer_e82x(hw, port, true); + break; + default: + err = -ENODEV; + } + if (err && err != -EBUSY) dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d down, err %d\n", port, err); @@ -1385,27 +1409,39 @@ ice_ptp_port_phy_restart(struct ice_ptp_port *ptp_port) mutex_lock(&ptp_port->ps_lock); - kthread_cancel_delayed_work_sync(&ptp_port->ov_work); + switch (hw->ptp.phy_model) { + case ICE_PHY_ETH56G: + err = ice_start_phy_timer_eth56g(hw, port); + break; + case ICE_PHY_E82X: + /* Start the PHY timer in Vernier mode */ + kthread_cancel_delayed_work_sync(&ptp_port->ov_work); - /* temporarily disable Tx timestamps while calibrating PHY offset */ - spin_lock_irqsave(&ptp_port->tx.lock, flags); - ptp_port->tx.calibrating = true; - spin_unlock_irqrestore(&ptp_port->tx.lock, flags); - ptp_port->tx_fifo_busy_cnt = 0; + /* temporarily disable Tx timestamps while calibrating + * PHY offset + */ + spin_lock_irqsave(&ptp_port->tx.lock, flags); + ptp_port->tx.calibrating = true; + spin_unlock_irqrestore(&ptp_port->tx.lock, flags); + ptp_port->tx_fifo_busy_cnt = 0; - /* Start the PHY timer in Vernier mode */ - err = ice_start_phy_timer_e82x(hw, port); - if (err) - goto out_unlock; + /* Start the PHY timer in Vernier mode */ + err = ice_start_phy_timer_e82x(hw, port); + if (err) + break; - /* Enable Tx timestamps right away */ - spin_lock_irqsave(&ptp_port->tx.lock, flags); - ptp_port->tx.calibrating = false; - spin_unlock_irqrestore(&ptp_port->tx.lock, flags); + /* Enable Tx timestamps right away */ + spin_lock_irqsave(&ptp_port->tx.lock, flags); + ptp_port->tx.calibrating = false; + spin_unlock_irqrestore(&ptp_port->tx.lock, flags); - kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, 0); + kthread_queue_delayed_work(pf->ptp.kworker, &ptp_port->ov_work, + 0); + break; + default: + err = -ENODEV; + } -out_unlock: if (err) dev_err(ice_pf_to_dev(pf), "PTP failed to set PHY port %d up, err %d\n", port, err); @@ -1429,20 +1465,23 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) if (pf->ptp.state != ICE_PTP_READY) return; - if (WARN_ON_ONCE(port >= ICE_NUM_EXTERNAL_PORTS)) + if (WARN_ON_ONCE(port >= hw->ptp.num_lports)) return; ptp_port = &pf->ptp.port; + if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo) + port *= 2; if (WARN_ON_ONCE(ptp_port->port_num != port)) return; /* Update cached link status for this port immediately */ ptp_port->link_up = linkup; - switch (hw->phy_model) { + switch (hw->ptp.phy_model) { case ICE_PHY_E810: /* Do not reconfigure E810 PHY */ return; + case ICE_PHY_ETH56G: case ICE_PHY_E82X: ice_ptp_port_phy_restart(ptp_port); return; @@ -1457,42 +1496,62 @@ void ice_ptp_link_change(struct ice_pf *pf, u8 port, bool linkup) * @ena: bool value to enable or disable interrupt * @threshold: Minimum number of packets at which intr is triggered * - * Utility function to enable or disable Tx timestamp interrupt and threshold + * Utility function to configure all the PHY interrupt settings, including + * whether the PHY interrupt is enabled, and what threshold to use. Also + * configures The E82X timestamp owner to react to interrupts from all PHYs. + * + * Return: 0 on success, -EOPNOTSUPP when PHY model incorrect, other error codes + * when failed to configure PHY interrupt for E82X */ static int ice_ptp_cfg_phy_interrupt(struct ice_pf *pf, bool ena, u32 threshold) { + struct device *dev = ice_pf_to_dev(pf); struct ice_hw *hw = &pf->hw; - int err = 0; - int quad; - u32 val; ice_ptp_reset_ts_memory(hw); - for (quad = 0; quad < ICE_MAX_QUAD; quad++) { - err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, - &val); - if (err) - break; + switch (hw->ptp.phy_model) { + case ICE_PHY_ETH56G: { + int port; - if (ena) { - val |= Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M; - val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_THR_M; - val |= FIELD_PREP(Q_REG_TX_MEM_GBL_CFG_INTR_THR_M, - threshold); - } else { - val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M; + for (port = 0; port < hw->ptp.num_lports; port++) { + int err; + + err = ice_phy_cfg_intr_eth56g(hw, port, ena, threshold); + if (err) { + dev_err(dev, "Failed to configure PHY interrupt for port %d, err %d\n", + port, err); + return err; + } } - err = ice_write_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, - val); - if (err) - break; + return 0; } + case ICE_PHY_E82X: { + int quad; - if (err) - dev_err(ice_pf_to_dev(pf), "PTP failed in intr ena, err %d\n", - err); - return err; + for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports); + quad++) { + int err; + + err = ice_phy_cfg_intr_e82x(hw, quad, ena, threshold); + if (err) { + dev_err(dev, "Failed to configure PHY interrupt for quad %d, err %d\n", + quad, err); + return err; + } + } + + return 0; + } + case ICE_PHY_E810: + return 0; + case ICE_PHY_UNSUP: + default: + dev_warn(dev, "%s: Unexpected PHY model %d\n", __func__, + hw->ptp.phy_model); + return -EOPNOTSUPP; + } } /** @@ -1767,8 +1826,7 @@ static int ice_ptp_cfg_clkout(struct ice_pf *pf, unsigned int chan, * maintaining phase */ if (start_time < current_time) - start_time = div64_u64(current_time + NSEC_PER_SEC - 1, - NSEC_PER_SEC) * NSEC_PER_SEC + phase; + start_time = roundup_u64(current_time, NSEC_PER_SEC) + phase; if (ice_is_e810(hw)) start_time -= E810_OUT_PROP_DELAY_NS; @@ -1994,11 +2052,14 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts) struct ice_hw *hw = &pf->hw; int err; - /* For Vernier mode, we need to recalibrate after new settime - * Start with disabling timestamp block + /* For Vernier mode on E82X, we need to recalibrate after new settime. + * Start with marking timestamps as invalid. */ - if (pf->ptp.port.link_up) - ice_ptp_port_phy_stop(&pf->ptp.port); + if (hw->ptp.phy_model == ICE_PHY_E82X) { + err = ice_ptp_clear_phy_offset_ready_e82x(hw); + if (err) + dev_warn(ice_pf_to_dev(pf), "Failed to mark timestamps as invalid before settime\n"); + } if (!ice_ptp_lock(hw)) { err = -EBUSY; @@ -2018,7 +2079,7 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts) ice_ptp_enable_all_clkout(pf); /* Recalibrate and re-enable timestamp blocks for E822/E823 */ - if (hw->phy_model == ICE_PHY_E82X) + if (hw->ptp.phy_model == ICE_PHY_E82X) ice_ptp_restart_all_phy(pf); exit: if (err) { @@ -2644,7 +2705,7 @@ static void ice_ptp_maybe_trigger_tx_interrupt(struct ice_pf *pf) if (!ice_pf_src_tmr_owned(pf)) return; - for (i = 0; i < ICE_MAX_QUAD; i++) { + for (i = 0; i < ICE_GET_QUAD_NUM(hw->ptp.num_lports); i++) { u64 tstamp_ready; int err; @@ -3080,12 +3141,10 @@ static int ice_ptp_init_owner(struct ice_pf *pf) /* Release the global hardware lock */ ice_ptp_unlock(hw); - if (!ice_is_e810(hw)) { - /* Enable quad interrupts */ - err = ice_ptp_cfg_phy_interrupt(pf, true, 1); - if (err) - goto err_exit; - } + /* Configure PHY interrupt settings */ + err = ice_ptp_cfg_phy_interrupt(pf, true, 1); + if (err) + goto err_exit; /* Ensure we have a clock device */ err = ice_ptp_create_clock(pf); @@ -3146,7 +3205,10 @@ static int ice_ptp_init_port(struct ice_pf *pf, struct ice_ptp_port *ptp_port) mutex_init(&ptp_port->ps_lock); - switch (hw->phy_model) { + switch (hw->ptp.phy_model) { + case ICE_PHY_ETH56G: + return ice_ptp_init_tx_eth56g(pf, &ptp_port->tx, + ptp_port->port_num); case ICE_PHY_E810: return ice_ptp_init_tx_e810(pf, &ptp_port->tx); case ICE_PHY_E82X: @@ -3241,7 +3303,7 @@ static void ice_ptp_remove_auxbus_device(struct ice_pf *pf) */ static void ice_ptp_init_tx_interrupt_mode(struct ice_pf *pf) { - switch (pf->hw.phy_model) { + switch (pf->hw.ptp.phy_model) { case ICE_PHY_E82X: /* E822 based PHY has the clock owner process the interrupt * for all ports. @@ -3277,7 +3339,7 @@ void ice_ptp_init(struct ice_pf *pf) ptp->state = ICE_PTP_INITIALIZING; - ice_ptp_init_phy_model(hw); + ice_ptp_init_hw(hw); ice_ptp_init_tx_interrupt_mode(pf); @@ -3291,6 +3353,9 @@ void ice_ptp_init(struct ice_pf *pf) } ptp->port.port_num = hw->pf_id; + if (ice_is_e825c(hw) && hw->ptp.is_2x50g_muxed_topo) + ptp->port.port_num = hw->pf_id * 2; + err = ice_ptp_init_port(pf, &ptp->port); if (err) goto err; diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.h b/drivers/net/ethernet/intel/ice/ice_ptp.h index e2af9749061c..2db2257a0fb2 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp.h @@ -160,6 +160,7 @@ struct ice_ptp_tx { #define INDEX_PER_QUAD 64 #define INDEX_PER_PORT_E82X 16 #define INDEX_PER_PORT_E810 64 +#define INDEX_PER_PORT_ETH56G 64 /** * struct ice_ptp_port - data used to initialize an external port for PTP diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h index 2c4dab0c48ab..e6980b94a6c1 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_consts.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp_consts.h @@ -9,6 +9,321 @@ */ /* Constants defined for the PTP 1588 clock hardware. */ +const struct ice_phy_reg_info_eth56g eth56g_phy_res[NUM_ETH56G_PHY_RES] = { + /* ETH56G_PHY_REG_PTP */ + { + /* base_addr */ + { + 0x092000, + 0x126000, + 0x1BA000, + 0x24E000, + 0x2E2000, + }, + /* step */ + 0x98, + }, + /* ETH56G_PHY_MEM_PTP */ + { + /* base_addr */ + { + 0x093000, + 0x127000, + 0x1BB000, + 0x24F000, + 0x2E3000, + }, + /* step */ + 0x200, + }, + /* ETH56G_PHY_REG_XPCS */ + { + /* base_addr */ + { + 0x000000, + 0x009400, + 0x128000, + 0x1BC000, + 0x250000, + }, + /* step */ + 0x21000, + }, + /* ETH56G_PHY_REG_MAC */ + { + /* base_addr */ + { + 0x085000, + 0x119000, + 0x1AD000, + 0x241000, + 0x2D5000, + }, + /* step */ + 0x1000, + }, + /* ETH56G_PHY_REG_GPCS */ + { + /* base_addr */ + { + 0x084000, + 0x118000, + 0x1AC000, + 0x240000, + 0x2D4000, + }, + /* step */ + 0x400, + }, +}; + +const +struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD] = { + [ICE_ETH56G_LNK_SPD_1G] = { + .tx_mode = { .def = 6, }, + .rx_mode = { .def = 6, }, + .blks_per_clk = 1, + .blktime = 0x4000, /* 32 */ + .tx_offset = { + .serdes = 0x6666, /* 51.2 */ + .no_fec = 0xd066, /* 104.2 */ + .sfd = 0x3000, /* 24 */ + .onestep = 0x30000 /* 384 */ + }, + .rx_offset = { + .serdes = 0xffffc59a, /* -29.2 */ + .no_fec = 0xffff0a80, /* -122.75 */ + .sfd = 0x2c00, /* 22 */ + .bs_ds = 0x19a /* 0.8 */ + /* Dynamic bitslip 0 equals to 10 */ + } + }, + [ICE_ETH56G_LNK_SPD_2_5G] = { + .tx_mode = { .def = 6, }, + .rx_mode = { .def = 6, }, + .blks_per_clk = 1, + .blktime = 0x199a, /* 12.8 */ + .tx_offset = { + .serdes = 0x28f6, /* 20.48 */ + .no_fec = 0x53b8, /* 41.86 */ + .sfd = 0x1333, /* 9.6 */ + .onestep = 0x13333 /* 153.6 */ + }, + .rx_offset = { + .serdes = 0xffffe8a4, /* -11.68 */ + .no_fec = 0xffff9a76, /* -50.77 */ + .sfd = 0xf33, /* 7.6 */ + .bs_ds = 0xa4 /* 0.32 */ + } + }, + [ICE_ETH56G_LNK_SPD_10G] = { + .tx_mode = { .def = 1, }, + .rx_mode = { .def = 1, }, + .blks_per_clk = 1, + .blktime = 0x666, /* 3.2 */ + .tx_offset = { + .serdes = 0x234c, /* 17.6484848 */ + .no_fec = 0x8e80, /* 71.25 */ + .fc = 0xb4a4, /* 90.32 */ + .sfd = 0x4a4, /* 2.32 */ + .onestep = 0x4ccd /* 38.4 */ + }, + .rx_offset = { + .serdes = 0xffffeb27, /* -10.42424 */ + .no_fec = 0xffffcccd, /* -25.6 */ + .fc = 0xfffe0014, /* -255.96 */ + .sfd = 0x4a4, /* 2.32 */ + .bs_ds = 0x32 /* 0.0969697 */ + } + }, + [ICE_ETH56G_LNK_SPD_25G] = { + .tx_mode = { + .def = 1, + .rs = 4 + }, + .tx_mk_dly = 4, + .tx_cw_dly = { + .def = 1, + .onestep = 6 + }, + .rx_mode = { + .def = 1, + .rs = 4 + }, + .rx_mk_dly = { + .def = 1, + .rs = 1 + }, + .rx_cw_dly = { + .def = 1, + .rs = 1 + }, + .blks_per_clk = 1, + .blktime = 0x28f, /* 1.28 */ + .mktime = 0x147b, /* 10.24, only if RS-FEC enabled */ + .tx_offset = { + .serdes = 0xe1e, /* 7.0593939 */ + .no_fec = 0x3857, /* 28.17 */ + .fc = 0x48c3, /* 36.38 */ + .rs = 0x8100, /* 64.5 */ + .sfd = 0x1dc, /* 0.93 */ + .onestep = 0x1eb8 /* 15.36 */ + }, + .rx_offset = { + .serdes = 0xfffff7a9, /* -4.1697 */ + .no_fec = 0xffffe71a, /* -12.45 */ + .fc = 0xfffe894d, /* -187.35 */ + .rs = 0xfffff8cd, /* -3.6 */ + .sfd = 0x1dc, /* 0.93 */ + .bs_ds = 0x14 /* 0.0387879, RS-FEC 0 */ + } + }, + [ICE_ETH56G_LNK_SPD_40G] = { + .tx_mode = { .def = 3 }, + .tx_mk_dly = 4, + .tx_cw_dly = { + .def = 1, + .onestep = 6 + }, + .rx_mode = { .def = 4 }, + .rx_mk_dly = { .def = 1 }, + .rx_cw_dly = { .def = 1 }, + .blktime = 0x333, /* 1.6 */ + .mktime = 0xccd, /* 6.4 */ + .tx_offset = { + .serdes = 0x234c, /* 17.6484848 */ + .no_fec = 0x5a8a, /* 45.27 */ + .fc = 0x81b8, /* 64.86 */ + .sfd = 0x4a4, /* 2.32 */ + .onestep = 0x1333 /* 9.6 */ + }, + .rx_offset = { + .serdes = 0xffffeb27, /* -10.42424 */ + .no_fec = 0xfffff594, /* -5.21 */ + .fc = 0xfffe3080, /* -231.75 */ + .sfd = 0x4a4, /* 2.32 */ + .bs_ds = 0xccd /* 6.4 */ + } + }, + [ICE_ETH56G_LNK_SPD_50G] = { + .tx_mode = { .def = 5 }, + .tx_mk_dly = 4, + .tx_cw_dly = { + .def = 1, + .onestep = 6 + }, + .rx_mode = { .def = 5 }, + .rx_mk_dly = { .def = 1 }, + .rx_cw_dly = { .def = 1 }, + .blktime = 0x28f, /* 1.28 */ + .mktime = 0xa3d, /* 5.12 */ + .tx_offset = { + .serdes = 0x13ba, /* 9.86353 */ + .rs = 0x5400, /* 42 */ + .sfd = 0xe6, /* 0.45 */ + .onestep = 0xf5c /* 7.68 */ + }, + .rx_offset = { + .serdes = 0xfffff7e8, /* -4.04706 */ + .rs = 0xfffff994, /* -3.21 */ + .sfd = 0xe6 /* 0.45 */ + } + }, + [ICE_ETH56G_LNK_SPD_50G2] = { + .tx_mode = { + .def = 3, + .rs = 2 + }, + .tx_mk_dly = 4, + .tx_cw_dly = { + .def = 1, + .onestep = 6 + }, + .rx_mode = { + .def = 4, + .rs = 1 + }, + .rx_mk_dly = { .def = 1 }, + .rx_cw_dly = { .def = 1 }, + .blktime = 0x28f, /* 1.28 */ + .mktime = 0xa3d, /* 5.12 */ + .tx_offset = { + .serdes = 0xe1e, /* 7.0593939 */ + .no_fec = 0x3d33, /* 30.6 */ + .rs = 0x5057, /* 40.17 */ + .sfd = 0x1dc, /* 0.93 */ + .onestep = 0xf5c /* 7.68 */ + }, + .rx_offset = { + .serdes = 0xfffff7a9, /* -4.1697 */ + .no_fec = 0xfffff8cd, /* -3.6 */ + .rs = 0xfffff21a, /* -6.95 */ + .sfd = 0x1dc, /* 0.93 */ + .bs_ds = 0xa3d /* 5.12, RS-FEC 0x633 (3.1) */ + } + }, + [ICE_ETH56G_LNK_SPD_100G] = { + .tx_mode = { + .def = 3, + .rs = 2 + }, + .tx_mk_dly = 10, + .tx_cw_dly = { + .def = 3, + .onestep = 6 + }, + .rx_mode = { + .def = 4, + .rs = 1 + }, + .rx_mk_dly = { .def = 5 }, + .rx_cw_dly = { .def = 5 }, + .blks_per_clk = 1, + .blktime = 0x148, /* 0.64 */ + .mktime = 0x199a, /* 12.8 */ + .tx_offset = { + .serdes = 0xe1e, /* 7.0593939 */ + .no_fec = 0x67ec, /* 51.96 */ + .rs = 0x44fb, /* 34.49 */ + .sfd = 0x1dc, /* 0.93 */ + .onestep = 0xf5c /* 7.68 */ + }, + .rx_offset = { + .serdes = 0xfffff7a9, /* -4.1697 */ + .no_fec = 0xfffff5a9, /* -5.17 */ + .rs = 0xfffff6e6, /* -4.55 */ + .sfd = 0x1dc, /* 0.93 */ + .bs_ds = 0x199a /* 12.8, RS-FEC 0x31b (1.552) */ + } + }, + [ICE_ETH56G_LNK_SPD_100G2] = { + .tx_mode = { .def = 5 }, + .tx_mk_dly = 10, + .tx_cw_dly = { + .def = 3, + .onestep = 6 + }, + .rx_mode = { .def = 5 }, + .rx_mk_dly = { .def = 5 }, + .rx_cw_dly = { .def = 5 }, + .blks_per_clk = 1, + .blktime = 0x148, /* 0.64 */ + .mktime = 0x199a, /* 12.8 */ + .tx_offset = { + .serdes = 0x13ba, /* 9.86353 */ + .rs = 0x460a, /* 35.02 */ + .sfd = 0xe6, /* 0.45 */ + .onestep = 0xf5c /* 7.68 */ + }, + .rx_offset = { + .serdes = 0xfffff7e8, /* -4.04706 */ + .rs = 0xfffff548, /* -5.36 */ + .sfd = 0xe6, /* 0.45 */ + .bs_ds = 0x303 /* 1.506 */ + } + } +}; + /* struct ice_time_ref_info_e82x * * E822 hardware can use different sources as the reference for the PTP @@ -155,6 +470,93 @@ const struct ice_cgu_pll_params_e82x e822_cgu_params[NUM_ICE_TIME_REF_FREQ] = { }, }; +const +struct ice_cgu_pll_params_e825c e825c_cgu_params[NUM_ICE_TIME_REF_FREQ] = { + /* ICE_TIME_REF_FREQ_25_000 -> 25 MHz */ + { + /* tspll_ck_refclkfreq */ + 0x19, + /* tspll_ndivratio */ + 1, + /* tspll_fbdiv_intgr */ + 320, + /* tspll_fbdiv_frac */ + 0, + /* ref1588_ck_div */ + 0, + }, + + /* ICE_TIME_REF_FREQ_122_880 -> 122.88 MHz */ + { + /* tspll_ck_refclkfreq */ + 0x29, + /* tspll_ndivratio */ + 3, + /* tspll_fbdiv_intgr */ + 195, + /* tspll_fbdiv_frac */ + 1342177280UL, + /* ref1588_ck_div */ + 0, + }, + + /* ICE_TIME_REF_FREQ_125_000 -> 125 MHz */ + { + /* tspll_ck_refclkfreq */ + 0x3E, + /* tspll_ndivratio */ + 2, + /* tspll_fbdiv_intgr */ + 128, + /* tspll_fbdiv_frac */ + 0, + /* ref1588_ck_div */ + 0, + }, + + /* ICE_TIME_REF_FREQ_153_600 -> 153.6 MHz */ + { + /* tspll_ck_refclkfreq */ + 0x33, + /* tspll_ndivratio */ + 3, + /* tspll_fbdiv_intgr */ + 156, + /* tspll_fbdiv_frac */ + 1073741824UL, + /* ref1588_ck_div */ + 0, + }, + + /* ICE_TIME_REF_FREQ_156_250 -> 156.25 MHz */ + { + /* tspll_ck_refclkfreq */ + 0x1F, + /* tspll_ndivratio */ + 5, + /* tspll_fbdiv_intgr */ + 256, + /* tspll_fbdiv_frac */ + 0, + /* ref1588_ck_div */ + 0, + }, + + /* ICE_TIME_REF_FREQ_245_760 -> 245.76 MHz */ + { + /* tspll_ck_refclkfreq */ + 0x52, + /* tspll_ndivratio */ + 3, + /* tspll_fbdiv_intgr */ + 97, + /* tspll_fbdiv_frac */ + 2818572288UL, + /* ref1588_ck_div */ + 0, + }, +}; + /* struct ice_vernier_info_e82x * * E822 hardware calibrates the delay of the timestamp indication from the diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c index 2b9423a173bb..3a33e6b9b313 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.c @@ -2,6 +2,7 @@ /* Copyright (C) 2021, Intel Corporation. */ #include +#include #include "ice_common.h" #include "ice_ptp_hw.h" #include "ice_ptp_consts.h" @@ -226,6 +227,622 @@ static u64 ice_ptp_read_src_incval(struct ice_hw *hw) return ((u64)(hi & INCVAL_HIGH_M) << 32) | lo; } +/** + * ice_read_cgu_reg_e82x - Read a CGU register + * @hw: pointer to the HW struct + * @addr: Register address to read + * @val: storage for register value read + * + * Read the contents of a register of the Clock Generation Unit. Only + * applicable to E822 devices. + * + * Return: 0 on success, other error codes when failed to read from CGU + */ +static int ice_read_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 *val) +{ + struct ice_sbq_msg_input cgu_msg = { + .opcode = ice_sbq_msg_rd, + .dest_dev = cgu, + .msg_addr_low = addr + }; + int err; + + err = ice_sbq_rw_reg(hw, &cgu_msg, ICE_AQ_FLAG_RD); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, err %d\n", + addr, err); + return err; + } + + *val = cgu_msg.data; + + return 0; +} + +/** + * ice_write_cgu_reg_e82x - Write a CGU register + * @hw: pointer to the HW struct + * @addr: Register address to write + * @val: value to write into the register + * + * Write the specified value to a register of the Clock Generation Unit. Only + * applicable to E822 devices. + * + * Return: 0 on success, other error codes when failed to write to CGU + */ +static int ice_write_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 val) +{ + struct ice_sbq_msg_input cgu_msg = { + .opcode = ice_sbq_msg_wr, + .dest_dev = cgu, + .msg_addr_low = addr, + .data = val + }; + int err; + + err = ice_sbq_rw_reg(hw, &cgu_msg, ICE_AQ_FLAG_RD); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, err %d\n", + addr, err); + return err; + } + + return err; +} + +/** + * ice_clk_freq_str - Convert time_ref_freq to string + * @clk_freq: Clock frequency + * + * Return: specified TIME_REF clock frequency converted to a string + */ +static const char *ice_clk_freq_str(enum ice_time_ref_freq clk_freq) +{ + switch (clk_freq) { + case ICE_TIME_REF_FREQ_25_000: + return "25 MHz"; + case ICE_TIME_REF_FREQ_122_880: + return "122.88 MHz"; + case ICE_TIME_REF_FREQ_125_000: + return "125 MHz"; + case ICE_TIME_REF_FREQ_153_600: + return "153.6 MHz"; + case ICE_TIME_REF_FREQ_156_250: + return "156.25 MHz"; + case ICE_TIME_REF_FREQ_245_760: + return "245.76 MHz"; + default: + return "Unknown"; + } +} + +/** + * ice_clk_src_str - Convert time_ref_src to string + * @clk_src: Clock source + * + * Return: specified clock source converted to its string name + */ +static const char *ice_clk_src_str(enum ice_clk_src clk_src) +{ + switch (clk_src) { + case ICE_CLK_SRC_TCXO: + return "TCXO"; + case ICE_CLK_SRC_TIME_REF: + return "TIME_REF"; + default: + return "Unknown"; + } +} + +/** + * ice_cfg_cgu_pll_e82x - Configure the Clock Generation Unit + * @hw: pointer to the HW struct + * @clk_freq: Clock frequency to program + * @clk_src: Clock source to select (TIME_REF, or TCXO) + * + * Configure the Clock Generation Unit with the desired clock frequency and + * time reference, enabling the PLL which drives the PTP hardware clock. + * + * Return: + * * %0 - success + * * %-EINVAL - input parameters are incorrect + * * %-EBUSY - failed to lock TS PLL + * * %other - CGU read/write failure + */ +static int ice_cfg_cgu_pll_e82x(struct ice_hw *hw, + enum ice_time_ref_freq clk_freq, + enum ice_clk_src clk_src) +{ + union tspll_ro_bwm_lf bwm_lf; + union nac_cgu_dword19 dw19; + union nac_cgu_dword22 dw22; + union nac_cgu_dword24 dw24; + union nac_cgu_dword9 dw9; + int err; + + if (clk_freq >= NUM_ICE_TIME_REF_FREQ) { + dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n", + clk_freq); + return -EINVAL; + } + + if (clk_src >= NUM_ICE_CLK_SRC) { + dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n", + clk_src); + return -EINVAL; + } + + if (clk_src == ICE_CLK_SRC_TCXO && + clk_freq != ICE_TIME_REF_FREQ_25_000) { + dev_warn(ice_hw_to_dev(hw), + "TCXO only supports 25 MHz frequency\n"); + return -EINVAL; + } + + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val); + if (err) + return err; + + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val); + if (err) + return err; + + err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val); + if (err) + return err; + + /* Log the current clock configuration */ + ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n", + dw24.ts_pll_enable ? "enabled" : "disabled", + ice_clk_src_str(dw24.time_ref_sel), + ice_clk_freq_str(dw9.time_ref_freq_sel), + bwm_lf.plllock_true_lock_cri ? "locked" : "unlocked"); + + /* Disable the PLL before changing the clock source or frequency */ + if (dw24.ts_pll_enable) { + dw24.ts_pll_enable = 0; + + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val); + if (err) + return err; + } + + /* Set the frequency */ + dw9.time_ref_freq_sel = clk_freq; + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val); + if (err) + return err; + + /* Configure the TS PLL feedback divisor */ + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD19, &dw19.val); + if (err) + return err; + + dw19.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div; + dw19.tspll_ndivratio = 1; + + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD19, dw19.val); + if (err) + return err; + + /* Configure the TS PLL post divisor */ + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD22, &dw22.val); + if (err) + return err; + + dw22.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div; + dw22.time1588clk_sel_div2 = 0; + + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD22, dw22.val); + if (err) + return err; + + /* Configure the TS PLL pre divisor and clock source */ + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val); + if (err) + return err; + + dw24.ref1588_ck_div = e822_cgu_params[clk_freq].refclk_pre_div; + dw24.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div; + dw24.time_ref_sel = clk_src; + + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val); + if (err) + return err; + + /* Finally, enable the PLL */ + dw24.ts_pll_enable = 1; + + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val); + if (err) + return err; + + /* Wait to verify if the PLL locks */ + usleep_range(1000, 5000); + + err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val); + if (err) + return err; + + if (!bwm_lf.plllock_true_lock_cri) { + dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n"); + return -EBUSY; + } + + /* Log the current clock configuration */ + ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n", + dw24.ts_pll_enable ? "enabled" : "disabled", + ice_clk_src_str(dw24.time_ref_sel), + ice_clk_freq_str(dw9.time_ref_freq_sel), + bwm_lf.plllock_true_lock_cri ? "locked" : "unlocked"); + + return 0; +} + +/** + * ice_cfg_cgu_pll_e825c - Configure the Clock Generation Unit for E825-C + * @hw: pointer to the HW struct + * @clk_freq: Clock frequency to program + * @clk_src: Clock source to select (TIME_REF, or TCXO) + * + * Configure the Clock Generation Unit with the desired clock frequency and + * time reference, enabling the PLL which drives the PTP hardware clock. + * + * Return: + * * %0 - success + * * %-EINVAL - input parameters are incorrect + * * %-EBUSY - failed to lock TS PLL + * * %other - CGU read/write failure + */ +static int ice_cfg_cgu_pll_e825c(struct ice_hw *hw, + enum ice_time_ref_freq clk_freq, + enum ice_clk_src clk_src) +{ + union tspll_ro_lock_e825c ro_lock; + union nac_cgu_dword16_e825c dw16; + union nac_cgu_dword23_e825c dw23; + union nac_cgu_dword19 dw19; + union nac_cgu_dword22 dw22; + union nac_cgu_dword24 dw24; + union nac_cgu_dword9 dw9; + int err; + + if (clk_freq >= NUM_ICE_TIME_REF_FREQ) { + dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n", + clk_freq); + return -EINVAL; + } + + if (clk_src >= NUM_ICE_CLK_SRC) { + dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n", + clk_src); + return -EINVAL; + } + + if (clk_src == ICE_CLK_SRC_TCXO && + clk_freq != ICE_TIME_REF_FREQ_156_250) { + dev_warn(ice_hw_to_dev(hw), + "TCXO only supports 156.25 MHz frequency\n"); + return -EINVAL; + } + + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val); + if (err) + return err; + + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val); + if (err) + return err; + + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD16_E825C, &dw16.val); + if (err) + return err; + + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, &dw23.val); + if (err) + return err; + + err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_LOCK_E825C, &ro_lock.val); + if (err) + return err; + + /* Log the current clock configuration */ + ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n", + dw24.ts_pll_enable ? "enabled" : "disabled", + ice_clk_src_str(dw23.time_ref_sel), + ice_clk_freq_str(dw9.time_ref_freq_sel), + ro_lock.plllock_true_lock_cri ? "locked" : "unlocked"); + + /* Disable the PLL before changing the clock source or frequency */ + if (dw23.ts_pll_enable) { + dw23.ts_pll_enable = 0; + + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, + dw23.val); + if (err) + return err; + } + + /* Set the frequency */ + dw9.time_ref_freq_sel = clk_freq; + + /* Enable the correct receiver */ + if (clk_src == ICE_CLK_SRC_TCXO) { + dw9.time_ref_en = 0; + dw9.clk_eref0_en = 1; + } else { + dw9.time_ref_en = 1; + dw9.clk_eref0_en = 0; + } + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val); + if (err) + return err; + + /* Choose the referenced frequency */ + dw16.tspll_ck_refclkfreq = + e825c_cgu_params[clk_freq].tspll_ck_refclkfreq; + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD16_E825C, dw16.val); + if (err) + return err; + + /* Configure the TS PLL feedback divisor */ + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD19, &dw19.val); + if (err) + return err; + + dw19.tspll_fbdiv_intgr = + e825c_cgu_params[clk_freq].tspll_fbdiv_intgr; + dw19.tspll_ndivratio = + e825c_cgu_params[clk_freq].tspll_ndivratio; + + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD19, dw19.val); + if (err) + return err; + + /* Configure the TS PLL post divisor */ + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD22, &dw22.val); + if (err) + return err; + + /* These two are constant for E825C */ + dw22.time1588clk_div = 5; + dw22.time1588clk_sel_div2 = 0; + + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD22, dw22.val); + if (err) + return err; + + /* Configure the TS PLL pre divisor and clock source */ + err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, &dw23.val); + if (err) + return err; + + dw23.ref1588_ck_div = + e825c_cgu_params[clk_freq].ref1588_ck_div; + dw23.time_ref_sel = clk_src; + + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, dw23.val); + if (err) + return err; + + dw24.tspll_fbdiv_frac = + e825c_cgu_params[clk_freq].tspll_fbdiv_frac; + + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val); + if (err) + return err; + + /* Finally, enable the PLL */ + dw23.ts_pll_enable = 1; + + err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD23_E825C, dw23.val); + if (err) + return err; + + /* Wait to verify if the PLL locks */ + usleep_range(1000, 5000); + + err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_LOCK_E825C, &ro_lock.val); + if (err) + return err; + + if (!ro_lock.plllock_true_lock_cri) { + dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n"); + return -EBUSY; + } + + /* Log the current clock configuration */ + ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n", + dw24.ts_pll_enable ? "enabled" : "disabled", + ice_clk_src_str(dw23.time_ref_sel), + ice_clk_freq_str(dw9.time_ref_freq_sel), + ro_lock.plllock_true_lock_cri ? "locked" : "unlocked"); + + return 0; +} + +/** + * ice_cfg_cgu_pll_dis_sticky_bits_e82x - disable TS PLL sticky bits + * @hw: pointer to the HW struct + * + * Configure the Clock Generation Unit TS PLL sticky bits so they don't latch on + * losing TS PLL lock, but always show current state. + * + * Return: 0 on success, other error codes when failed to read/write CGU + */ +static int ice_cfg_cgu_pll_dis_sticky_bits_e82x(struct ice_hw *hw) +{ + union tspll_cntr_bist_settings cntr_bist; + int err; + + err = ice_read_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS, + &cntr_bist.val); + if (err) + return err; + + /* Disable sticky lock detection so lock err reported is accurate */ + cntr_bist.i_plllock_sel_0 = 0; + cntr_bist.i_plllock_sel_1 = 0; + + return ice_write_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS, + cntr_bist.val); +} + +/** + * ice_cfg_cgu_pll_dis_sticky_bits_e825c - disable TS PLL sticky bits for E825-C + * @hw: pointer to the HW struct + * + * Configure the Clock Generation Unit TS PLL sticky bits so they don't latch on + * losing TS PLL lock, but always show current state. + * + * Return: 0 on success, other error codes when failed to read/write CGU + */ +static int ice_cfg_cgu_pll_dis_sticky_bits_e825c(struct ice_hw *hw) +{ + union tspll_bw_tdc_e825c bw_tdc; + int err; + + err = ice_read_cgu_reg_e82x(hw, TSPLL_BW_TDC_E825C, &bw_tdc.val); + if (err) + return err; + + bw_tdc.i_plllock_sel_1_0 = 0; + + return ice_write_cgu_reg_e82x(hw, TSPLL_BW_TDC_E825C, bw_tdc.val); +} + +/** + * ice_init_cgu_e82x - Initialize CGU with settings from firmware + * @hw: pointer to the HW structure + * + * Initialize the Clock Generation Unit of the E822 device. + * + * Return: 0 on success, other error codes when failed to read/write/cfg CGU + */ +static int ice_init_cgu_e82x(struct ice_hw *hw) +{ + struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info; + int err; + + /* Disable sticky lock detection so lock err reported is accurate */ + if (ice_is_e825c(hw)) + err = ice_cfg_cgu_pll_dis_sticky_bits_e825c(hw); + else + err = ice_cfg_cgu_pll_dis_sticky_bits_e82x(hw); + if (err) + return err; + + /* Configure the CGU PLL using the parameters from the function + * capabilities. + */ + if (ice_is_e825c(hw)) + err = ice_cfg_cgu_pll_e825c(hw, ts_info->time_ref, + (enum ice_clk_src)ts_info->clk_src); + else + err = ice_cfg_cgu_pll_e82x(hw, ts_info->time_ref, + (enum ice_clk_src)ts_info->clk_src); + + return err; +} + +/** + * ice_ptp_tmr_cmd_to_src_reg - Convert to source timer command value + * @hw: pointer to HW struct + * @cmd: Timer command + * + * Return: the source timer command register value for the given PTP timer + * command. + */ +static u32 ice_ptp_tmr_cmd_to_src_reg(struct ice_hw *hw, + enum ice_ptp_tmr_cmd cmd) +{ + u32 cmd_val, tmr_idx; + + switch (cmd) { + case ICE_PTP_INIT_TIME: + cmd_val = GLTSYN_CMD_INIT_TIME; + break; + case ICE_PTP_INIT_INCVAL: + cmd_val = GLTSYN_CMD_INIT_INCVAL; + break; + case ICE_PTP_ADJ_TIME: + cmd_val = GLTSYN_CMD_ADJ_TIME; + break; + case ICE_PTP_ADJ_TIME_AT_TIME: + cmd_val = GLTSYN_CMD_ADJ_INIT_TIME; + break; + case ICE_PTP_NOP: + case ICE_PTP_READ_TIME: + cmd_val = GLTSYN_CMD_READ_TIME; + break; + default: + dev_warn(ice_hw_to_dev(hw), + "Ignoring unrecognized timer command %u\n", cmd); + cmd_val = 0; + } + + tmr_idx = ice_get_ptp_src_clock_index(hw); + + return tmr_idx << SEL_CPK_SRC | cmd_val; +} + +/** + * ice_ptp_tmr_cmd_to_port_reg- Convert to port timer command value + * @hw: pointer to HW struct + * @cmd: Timer command + * + * Note that some hardware families use a different command register value for + * the PHY ports, while other hardware families use the same register values + * as the source timer. + * + * Return: the PHY port timer command register value for the given PTP timer + * command. + */ +static u32 ice_ptp_tmr_cmd_to_port_reg(struct ice_hw *hw, + enum ice_ptp_tmr_cmd cmd) +{ + u32 cmd_val, tmr_idx; + + /* Certain hardware families share the same register values for the + * port register and source timer register. + */ + switch (hw->ptp.phy_model) { + case ICE_PHY_E810: + return ice_ptp_tmr_cmd_to_src_reg(hw, cmd) & TS_CMD_MASK_E810; + default: + break; + } + + switch (cmd) { + case ICE_PTP_INIT_TIME: + cmd_val = PHY_CMD_INIT_TIME; + break; + case ICE_PTP_INIT_INCVAL: + cmd_val = PHY_CMD_INIT_INCVAL; + break; + case ICE_PTP_ADJ_TIME: + cmd_val = PHY_CMD_ADJ_TIME; + break; + case ICE_PTP_ADJ_TIME_AT_TIME: + cmd_val = PHY_CMD_ADJ_TIME_AT_TIME; + break; + case ICE_PTP_READ_TIME: + cmd_val = PHY_CMD_READ_TIME; + break; + case ICE_PTP_NOP: + cmd_val = 0; + break; + default: + dev_warn(ice_hw_to_dev(hw), + "Ignoring unrecognized timer command %u\n", cmd); + cmd_val = 0; + } + + tmr_idx = ice_get_ptp_src_clock_index(hw); + + return tmr_idx << SEL_PHY_SRC | cmd_val; +} + /** * ice_ptp_src_cmd - Prepare source timer for a timer command * @hw: pointer to HW structure @@ -235,31 +852,7 @@ static u64 ice_ptp_read_src_incval(struct ice_hw *hw) */ void ice_ptp_src_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) { - u32 cmd_val; - u8 tmr_idx; - - tmr_idx = ice_get_ptp_src_clock_index(hw); - cmd_val = tmr_idx << SEL_CPK_SRC; - - switch (cmd) { - case ICE_PTP_INIT_TIME: - cmd_val |= GLTSYN_CMD_INIT_TIME; - break; - case ICE_PTP_INIT_INCVAL: - cmd_val |= GLTSYN_CMD_INIT_INCVAL; - break; - case ICE_PTP_ADJ_TIME: - cmd_val |= GLTSYN_CMD_ADJ_TIME; - break; - case ICE_PTP_ADJ_TIME_AT_TIME: - cmd_val |= GLTSYN_CMD_ADJ_INIT_TIME; - break; - case ICE_PTP_READ_TIME: - cmd_val |= GLTSYN_CMD_READ_TIME; - break; - case ICE_PTP_NOP: - break; - } + u32 cmd_val = ice_ptp_tmr_cmd_to_src_reg(hw, cmd); wr32(hw, GLTSYN_CMD, cmd_val); } @@ -281,6 +874,1832 @@ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw) ice_flush(hw); } +/* 56G PHY device functions + * + * The following functions operate on devices with the ETH 56G PHY. + */ + +/** + * ice_write_phy_eth56g - Write a PHY port register + * @hw: pointer to the HW struct + * @phy_idx: PHY index + * @addr: PHY register address + * @val: Value to write + * + * Return: 0 on success, other error codes when failed to write to PHY + */ +static int ice_write_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr, + u32 val) +{ + struct ice_sbq_msg_input phy_msg; + int err; + + phy_msg.opcode = ice_sbq_msg_wr; + + phy_msg.msg_addr_low = lower_16_bits(addr); + phy_msg.msg_addr_high = upper_16_bits(addr); + + phy_msg.data = val; + phy_msg.dest_dev = hw->ptp.phy.eth56g.phy_addr[phy_idx]; + + err = ice_sbq_rw_reg(hw, &phy_msg, ICE_AQ_FLAG_RD); + + if (err) + ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n", + err); + + return err; +} + +/** + * ice_read_phy_eth56g - Read a PHY port register + * @hw: pointer to the HW struct + * @phy_idx: PHY index + * @addr: PHY register address + * @val: Value to write + * + * Return: 0 on success, other error codes when failed to read from PHY + */ +static int ice_read_phy_eth56g(struct ice_hw *hw, u8 phy_idx, u32 addr, + u32 *val) +{ + struct ice_sbq_msg_input phy_msg; + int err; + + phy_msg.opcode = ice_sbq_msg_rd; + + phy_msg.msg_addr_low = lower_16_bits(addr); + phy_msg.msg_addr_high = upper_16_bits(addr); + + phy_msg.data = 0; + phy_msg.dest_dev = hw->ptp.phy.eth56g.phy_addr[phy_idx]; + + err = ice_sbq_rw_reg(hw, &phy_msg, ICE_AQ_FLAG_RD); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "PTP failed to send msg to phy %d\n", + err); + return err; + } + + *val = phy_msg.data; + + return 0; +} + +/** + * ice_phy_res_address_eth56g - Calculate a PHY port register address + * @port: Port number to be written + * @res_type: resource type (register/memory) + * @offset: Offset from PHY port register base + * @addr: The result address + * + * Return: + * * %0 - success + * * %EINVAL - invalid port number or resource type + */ +static int ice_phy_res_address_eth56g(u8 port, enum eth56g_res_type res_type, + u32 offset, u32 *addr) +{ + u8 lane = port % ICE_PORTS_PER_QUAD; + u8 phy = ICE_GET_QUAD_NUM(port); + + if (res_type >= NUM_ETH56G_PHY_RES) + return -EINVAL; + + *addr = eth56g_phy_res[res_type].base[phy] + + lane * eth56g_phy_res[res_type].step + offset; + return 0; +} + +/** + * ice_write_port_eth56g - Write a PHY port register + * @hw: pointer to the HW struct + * @offset: PHY register offset + * @port: Port number + * @val: Value to write + * @res_type: resource type (register/memory) + * + * Return: + * * %0 - success + * * %EINVAL - invalid port number or resource type + * * %other - failed to write to PHY + */ +static int ice_write_port_eth56g(struct ice_hw *hw, u8 port, u32 offset, + u32 val, enum eth56g_res_type res_type) +{ + u8 phy_port = port % hw->ptp.ports_per_phy; + u8 phy_idx = port / hw->ptp.ports_per_phy; + u32 addr; + int err; + + if (port >= hw->ptp.num_lports) + return -EINVAL; + + err = ice_phy_res_address_eth56g(phy_port, res_type, offset, &addr); + if (err) + return err; + + return ice_write_phy_eth56g(hw, phy_idx, addr, val); +} + +/** + * ice_read_port_eth56g - Read a PHY port register + * @hw: pointer to the HW struct + * @offset: PHY register offset + * @port: Port number + * @val: Value to write + * @res_type: resource type (register/memory) + * + * Return: + * * %0 - success + * * %EINVAL - invalid port number or resource type + * * %other - failed to read from PHY + */ +static int ice_read_port_eth56g(struct ice_hw *hw, u8 port, u32 offset, + u32 *val, enum eth56g_res_type res_type) +{ + u8 phy_port = port % hw->ptp.ports_per_phy; + u8 phy_idx = port / hw->ptp.ports_per_phy; + u32 addr; + int err; + + if (port >= hw->ptp.num_lports) + return -EINVAL; + + err = ice_phy_res_address_eth56g(phy_port, res_type, offset, &addr); + if (err) + return err; + + return ice_read_phy_eth56g(hw, phy_idx, addr, val); +} + +/** + * ice_write_ptp_reg_eth56g - Write a PHY port register + * @hw: pointer to the HW struct + * @port: Port number to be written + * @offset: Offset from PHY port register base + * @val: Value to write + * + * Return: + * * %0 - success + * * %EINVAL - invalid port number or resource type + * * %other - failed to write to PHY + */ +static int ice_write_ptp_reg_eth56g(struct ice_hw *hw, u8 port, u16 offset, + u32 val) +{ + return ice_write_port_eth56g(hw, port, offset, val, ETH56G_PHY_REG_PTP); +} + +/** + * ice_write_mac_reg_eth56g - Write a MAC PHY port register + * parameter + * @hw: pointer to the HW struct + * @port: Port number to be written + * @offset: Offset from PHY port register base + * @val: Value to write + * + * Return: + * * %0 - success + * * %EINVAL - invalid port number or resource type + * * %other - failed to write to PHY + */ +static int ice_write_mac_reg_eth56g(struct ice_hw *hw, u8 port, u32 offset, + u32 val) +{ + return ice_write_port_eth56g(hw, port, offset, val, ETH56G_PHY_REG_MAC); +} + +/** + * ice_write_xpcs_reg_eth56g - Write a PHY port register + * @hw: pointer to the HW struct + * @port: Port number to be written + * @offset: Offset from PHY port register base + * @val: Value to write + * + * Return: + * * %0 - success + * * %EINVAL - invalid port number or resource type + * * %other - failed to write to PHY + */ +static int ice_write_xpcs_reg_eth56g(struct ice_hw *hw, u8 port, u32 offset, + u32 val) +{ + return ice_write_port_eth56g(hw, port, offset, val, + ETH56G_PHY_REG_XPCS); +} + +/** + * ice_read_ptp_reg_eth56g - Read a PHY port register + * @hw: pointer to the HW struct + * @port: Port number to be read + * @offset: Offset from PHY port register base + * @val: Pointer to the value to read (out param) + * + * Return: + * * %0 - success + * * %EINVAL - invalid port number or resource type + * * %other - failed to read from PHY + */ +static int ice_read_ptp_reg_eth56g(struct ice_hw *hw, u8 port, u16 offset, + u32 *val) +{ + return ice_read_port_eth56g(hw, port, offset, val, ETH56G_PHY_REG_PTP); +} + +/** + * ice_read_mac_reg_eth56g - Read a PHY port register + * @hw: pointer to the HW struct + * @port: Port number to be read + * @offset: Offset from PHY port register base + * @val: Pointer to the value to read (out param) + * + * Return: + * * %0 - success + * * %EINVAL - invalid port number or resource type + * * %other - failed to read from PHY + */ +static int ice_read_mac_reg_eth56g(struct ice_hw *hw, u8 port, u16 offset, + u32 *val) +{ + return ice_read_port_eth56g(hw, port, offset, val, ETH56G_PHY_REG_MAC); +} + +/** + * ice_read_gpcs_reg_eth56g - Read a PHY port register + * @hw: pointer to the HW struct + * @port: Port number to be read + * @offset: Offset from PHY port register base + * @val: Pointer to the value to read (out param) + * + * Return: + * * %0 - success + * * %EINVAL - invalid port number or resource type + * * %other - failed to read from PHY + */ +static int ice_read_gpcs_reg_eth56g(struct ice_hw *hw, u8 port, u16 offset, + u32 *val) +{ + return ice_read_port_eth56g(hw, port, offset, val, ETH56G_PHY_REG_GPCS); +} + +/** + * ice_read_port_mem_eth56g - Read a PHY port memory location + * @hw: pointer to the HW struct + * @port: Port number to be read + * @offset: Offset from PHY port register base + * @val: Pointer to the value to read (out param) + * + * Return: + * * %0 - success + * * %EINVAL - invalid port number or resource type + * * %other - failed to read from PHY + */ +static int ice_read_port_mem_eth56g(struct ice_hw *hw, u8 port, u16 offset, + u32 *val) +{ + return ice_read_port_eth56g(hw, port, offset, val, ETH56G_PHY_MEM_PTP); +} + +/** + * ice_write_port_mem_eth56g - Write a PHY port memory location + * @hw: pointer to the HW struct + * @port: Port number to be read + * @offset: Offset from PHY port register base + * @val: Pointer to the value to read (out param) + * + * Return: + * * %0 - success + * * %EINVAL - invalid port number or resource type + * * %other - failed to write to PHY + */ +static int ice_write_port_mem_eth56g(struct ice_hw *hw, u8 port, u16 offset, + u32 val) +{ + return ice_write_port_eth56g(hw, port, offset, val, ETH56G_PHY_MEM_PTP); +} + +/** + * ice_is_64b_phy_reg_eth56g - Check if this is a 64bit PHY register + * @low_addr: the low address to check + * @high_addr: on return, contains the high address of the 64bit register + * + * Write the appropriate high register offset to use. + * + * Return: true if the provided low address is one of the known 64bit PHY values + * represented as two 32bit registers, false otherwise. + */ +static bool ice_is_64b_phy_reg_eth56g(u16 low_addr, u16 *high_addr) +{ + switch (low_addr) { + case PHY_REG_TX_TIMER_INC_PRE_L: + *high_addr = PHY_REG_TX_TIMER_INC_PRE_U; + return true; + case PHY_REG_RX_TIMER_INC_PRE_L: + *high_addr = PHY_REG_RX_TIMER_INC_PRE_U; + return true; + case PHY_REG_TX_CAPTURE_L: + *high_addr = PHY_REG_TX_CAPTURE_U; + return true; + case PHY_REG_RX_CAPTURE_L: + *high_addr = PHY_REG_RX_CAPTURE_U; + return true; + case PHY_REG_TOTAL_TX_OFFSET_L: + *high_addr = PHY_REG_TOTAL_TX_OFFSET_U; + return true; + case PHY_REG_TOTAL_RX_OFFSET_L: + *high_addr = PHY_REG_TOTAL_RX_OFFSET_U; + return true; + case PHY_REG_TX_MEMORY_STATUS_L: + *high_addr = PHY_REG_TX_MEMORY_STATUS_U; + return true; + default: + return false; + } +} + +/** + * ice_is_40b_phy_reg_eth56g - Check if this is a 40bit PHY register + * @low_addr: the low address to check + * @high_addr: on return, contains the high address of the 40bit value + * + * Write the appropriate high register offset to use. + * + * Return: true if the provided low address is one of the known 40bit PHY + * values split into two registers with the lower 8 bits in the low register and + * the upper 32 bits in the high register, false otherwise. + */ +static bool ice_is_40b_phy_reg_eth56g(u16 low_addr, u16 *high_addr) +{ + switch (low_addr) { + case PHY_REG_TIMETUS_L: + *high_addr = PHY_REG_TIMETUS_U; + return true; + case PHY_PCS_REF_TUS_L: + *high_addr = PHY_PCS_REF_TUS_U; + return true; + case PHY_PCS_REF_INC_L: + *high_addr = PHY_PCS_REF_INC_U; + return true; + default: + return false; + } +} + +/** + * ice_read_64b_phy_reg_eth56g - Read a 64bit value from PHY registers + * @hw: pointer to the HW struct + * @port: PHY port to read from + * @low_addr: offset of the lower register to read from + * @val: on return, the contents of the 64bit value from the PHY registers + * @res_type: resource type + * + * Check if the caller has specified a known 40 bit register offset and read + * the two registers associated with a 40bit value and return it in the val + * pointer. + * + * Return: + * * %0 - success + * * %EINVAL - not a 64 bit register + * * %other - failed to read from PHY + */ +static int ice_read_64b_phy_reg_eth56g(struct ice_hw *hw, u8 port, u16 low_addr, + u64 *val, enum eth56g_res_type res_type) +{ + u16 high_addr; + u32 lo, hi; + int err; + + if (!ice_is_64b_phy_reg_eth56g(low_addr, &high_addr)) + return -EINVAL; + + err = ice_read_port_eth56g(hw, port, low_addr, &lo, res_type); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read from low register %#08x\n, err %d", + low_addr, err); + return err; + } + + err = ice_read_port_eth56g(hw, port, high_addr, &hi, res_type); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read from high register %#08x\n, err %d", + high_addr, err); + return err; + } + + *val = ((u64)hi << 32) | lo; + + return 0; +} + +/** + * ice_read_64b_ptp_reg_eth56g - Read a 64bit value from PHY registers + * @hw: pointer to the HW struct + * @port: PHY port to read from + * @low_addr: offset of the lower register to read from + * @val: on return, the contents of the 64bit value from the PHY registers + * + * Check if the caller has specified a known 40 bit register offset and read + * the two registers associated with a 40bit value and return it in the val + * pointer. + * + * Return: + * * %0 - success + * * %EINVAL - not a 64 bit register + * * %other - failed to read from PHY + */ +static int ice_read_64b_ptp_reg_eth56g(struct ice_hw *hw, u8 port, u16 low_addr, + u64 *val) +{ + return ice_read_64b_phy_reg_eth56g(hw, port, low_addr, val, + ETH56G_PHY_REG_PTP); +} + +/** + * ice_write_40b_phy_reg_eth56g - Write a 40b value to the PHY + * @hw: pointer to the HW struct + * @port: port to write to + * @low_addr: offset of the low register + * @val: 40b value to write + * @res_type: resource type + * + * Check if the caller has specified a known 40 bit register offset and write + * provided 40b value to the two associated registers by splitting it up into + * two chunks, the lower 8 bits and the upper 32 bits. + * + * Return: + * * %0 - success + * * %EINVAL - not a 40 bit register + * * %other - failed to write to PHY + */ +static int ice_write_40b_phy_reg_eth56g(struct ice_hw *hw, u8 port, + u16 low_addr, u64 val, + enum eth56g_res_type res_type) +{ + u16 high_addr; + u32 lo, hi; + int err; + + if (!ice_is_40b_phy_reg_eth56g(low_addr, &high_addr)) + return -EINVAL; + + lo = FIELD_GET(P_REG_40B_LOW_M, val); + hi = (u32)(val >> P_REG_40B_HIGH_S); + + err = ice_write_port_eth56g(hw, port, low_addr, lo, res_type); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d", + low_addr, err); + return err; + } + + err = ice_write_port_eth56g(hw, port, high_addr, hi, res_type); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d", + high_addr, err); + return err; + } + + return 0; +} + +/** + * ice_write_40b_ptp_reg_eth56g - Write a 40b value to the PHY + * @hw: pointer to the HW struct + * @port: port to write to + * @low_addr: offset of the low register + * @val: 40b value to write + * + * Check if the caller has specified a known 40 bit register offset and write + * provided 40b value to the two associated registers by splitting it up into + * two chunks, the lower 8 bits and the upper 32 bits. + * + * Return: + * * %0 - success + * * %EINVAL - not a 40 bit register + * * %other - failed to write to PHY + */ +static int ice_write_40b_ptp_reg_eth56g(struct ice_hw *hw, u8 port, + u16 low_addr, u64 val) +{ + return ice_write_40b_phy_reg_eth56g(hw, port, low_addr, val, + ETH56G_PHY_REG_PTP); +} + +/** + * ice_write_64b_phy_reg_eth56g - Write a 64bit value to PHY registers + * @hw: pointer to the HW struct + * @port: PHY port to read from + * @low_addr: offset of the lower register to read from + * @val: the contents of the 64bit value to write to PHY + * @res_type: resource type + * + * Check if the caller has specified a known 64 bit register offset and write + * the 64bit value to the two associated 32bit PHY registers. + * + * Return: + * * %0 - success + * * %EINVAL - not a 64 bit register + * * %other - failed to write to PHY + */ +static int ice_write_64b_phy_reg_eth56g(struct ice_hw *hw, u8 port, + u16 low_addr, u64 val, + enum eth56g_res_type res_type) +{ + u16 high_addr; + u32 lo, hi; + int err; + + if (!ice_is_64b_phy_reg_eth56g(low_addr, &high_addr)) + return -EINVAL; + + lo = lower_32_bits(val); + hi = upper_32_bits(val); + + err = ice_write_port_eth56g(hw, port, low_addr, lo, res_type); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write to low register 0x%08x\n, err %d", + low_addr, err); + return err; + } + + err = ice_write_port_eth56g(hw, port, high_addr, hi, res_type); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write to high register 0x%08x\n, err %d", + high_addr, err); + return err; + } + + return 0; +} + +/** + * ice_write_64b_ptp_reg_eth56g - Write a 64bit value to PHY registers + * @hw: pointer to the HW struct + * @port: PHY port to read from + * @low_addr: offset of the lower register to read from + * @val: the contents of the 64bit value to write to PHY + * + * Check if the caller has specified a known 64 bit register offset and write + * the 64bit value to the two associated 32bit PHY registers. + * + * Return: + * * %0 - success + * * %EINVAL - not a 64 bit register + * * %other - failed to write to PHY + */ +static int ice_write_64b_ptp_reg_eth56g(struct ice_hw *hw, u8 port, + u16 low_addr, u64 val) +{ + return ice_write_64b_phy_reg_eth56g(hw, port, low_addr, val, + ETH56G_PHY_REG_PTP); +} + +/** + * ice_read_ptp_tstamp_eth56g - Read a PHY timestamp out of the port memory + * @hw: pointer to the HW struct + * @port: the port to read from + * @idx: the timestamp index to read + * @tstamp: on return, the 40bit timestamp value + * + * Read a 40bit timestamp value out of the two associated entries in the + * port memory block of the internal PHYs of the 56G devices. + * + * Return: + * * %0 - success + * * %other - failed to read from PHY + */ +static int ice_read_ptp_tstamp_eth56g(struct ice_hw *hw, u8 port, u8 idx, + u64 *tstamp) +{ + u16 lo_addr, hi_addr; + u32 lo, hi; + int err; + + lo_addr = (u16)PHY_TSTAMP_L(idx); + hi_addr = (u16)PHY_TSTAMP_U(idx); + + err = ice_read_port_mem_eth56g(hw, port, lo_addr, &lo); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read low PTP timestamp register, err %d\n", + err); + return err; + } + + err = ice_read_port_mem_eth56g(hw, port, hi_addr, &hi); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read high PTP timestamp register, err %d\n", + err); + return err; + } + + /* For 56G based internal PHYs, the timestamp is reported with the + * lower 8 bits in the low register, and the upper 32 bits in the high + * register. + */ + *tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo & TS_PHY_LOW_M); + + return 0; +} + +/** + * ice_clear_ptp_tstamp_eth56g - Clear a timestamp from the quad block + * @hw: pointer to the HW struct + * @port: the quad to read from + * @idx: the timestamp index to reset + * + * Read and then forcibly clear the timestamp index to ensure the valid bit is + * cleared and the timestamp status bit is reset in the PHY port memory of + * internal PHYs of the 56G devices. + * + * To directly clear the contents of the timestamp block entirely, discarding + * all timestamp data at once, software should instead use + * ice_ptp_reset_ts_memory_quad_eth56g(). + * + * This function should only be called on an idx whose bit is set according to + * ice_get_phy_tx_tstamp_ready(). + * + * Return: + * * %0 - success + * * %other - failed to write to PHY + */ +static int ice_clear_ptp_tstamp_eth56g(struct ice_hw *hw, u8 port, u8 idx) +{ + u64 unused_tstamp; + u16 lo_addr; + int err; + + /* Read the timestamp register to ensure the timestamp status bit is + * cleared. + */ + err = ice_read_ptp_tstamp_eth56g(hw, port, idx, &unused_tstamp); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read the PHY timestamp register for port %u, idx %u, err %d\n", + port, idx, err); + } + + lo_addr = (u16)PHY_TSTAMP_L(idx); + + err = ice_write_port_mem_eth56g(hw, port, lo_addr, 0); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to clear low PTP timestamp register for port %u, idx %u, err %d\n", + port, idx, err); + return err; + } + + return 0; +} + +/** + * ice_ptp_reset_ts_memory_eth56g - Clear all timestamps from the port block + * @hw: pointer to the HW struct + */ +static void ice_ptp_reset_ts_memory_eth56g(struct ice_hw *hw) +{ + unsigned int port; + + for (port = 0; port < hw->ptp.num_lports; port++) { + ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_MEMORY_STATUS_L, + 0); + ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_MEMORY_STATUS_U, + 0); + } +} + +/** + * ice_ptp_prep_port_time_eth56g - Prepare one PHY port with initial time + * @hw: pointer to the HW struct + * @port: port number + * @time: time to initialize the PHY port clocks to + * + * Write a new initial time value into registers of a specific PHY port. + * + * Return: + * * %0 - success + * * %other - failed to write to PHY + */ +static int ice_ptp_prep_port_time_eth56g(struct ice_hw *hw, u8 port, + u64 time) +{ + int err; + + /* Tx case */ + err = ice_write_64b_ptp_reg_eth56g(hw, port, PHY_REG_TX_TIMER_INC_PRE_L, + time); + if (err) + return err; + + /* Rx case */ + return ice_write_64b_ptp_reg_eth56g(hw, port, + PHY_REG_RX_TIMER_INC_PRE_L, time); +} + +/** + * ice_ptp_prep_phy_time_eth56g - Prepare PHY port with initial time + * @hw: pointer to the HW struct + * @time: Time to initialize the PHY port clocks to + * + * Program the PHY port registers with a new initial time value. The port + * clock will be initialized once the driver issues an ICE_PTP_INIT_TIME sync + * command. The time value is the upper 32 bits of the PHY timer, usually in + * units of nominal nanoseconds. + * + * Return: + * * %0 - success + * * %other - failed to write to PHY + */ +static int ice_ptp_prep_phy_time_eth56g(struct ice_hw *hw, u32 time) +{ + u64 phy_time; + u8 port; + + /* The time represents the upper 32 bits of the PHY timer, so we need + * to shift to account for this when programming. + */ + phy_time = (u64)time << 32; + + for (port = 0; port < hw->ptp.num_lports; port++) { + int err; + + err = ice_ptp_prep_port_time_eth56g(hw, port, phy_time); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write init time for port %u, err %d\n", + port, err); + return err; + } + } + + return 0; +} + +/** + * ice_ptp_prep_port_adj_eth56g - Prepare a single port for time adjust + * @hw: pointer to HW struct + * @port: Port number to be programmed + * @time: time in cycles to adjust the port clocks + * + * Program the port for an atomic adjustment by writing the Tx and Rx timer + * registers. The atomic adjustment won't be completed until the driver issues + * an ICE_PTP_ADJ_TIME command. + * + * Note that time is not in units of nanoseconds. It is in clock time + * including the lower sub-nanosecond portion of the port timer. + * + * Negative adjustments are supported using 2s complement arithmetic. + * + * Return: + * * %0 - success + * * %other - failed to write to PHY + */ +static int ice_ptp_prep_port_adj_eth56g(struct ice_hw *hw, u8 port, s64 time) +{ + u32 l_time, u_time; + int err; + + l_time = lower_32_bits(time); + u_time = upper_32_bits(time); + + /* Tx case */ + err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_TIMER_INC_PRE_L, + l_time); + if (err) + goto exit_err; + + err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_TIMER_INC_PRE_U, + u_time); + if (err) + goto exit_err; + + /* Rx case */ + err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_RX_TIMER_INC_PRE_L, + l_time); + if (err) + goto exit_err; + + err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_RX_TIMER_INC_PRE_U, + u_time); + if (err) + goto exit_err; + + return 0; + +exit_err: + ice_debug(hw, ICE_DBG_PTP, "Failed to write time adjust for port %u, err %d\n", + port, err); + return err; +} + +/** + * ice_ptp_prep_phy_adj_eth56g - Prep PHY ports for a time adjustment + * @hw: pointer to HW struct + * @adj: adjustment in nanoseconds + * + * Prepare the PHY ports for an atomic time adjustment by programming the PHY + * Tx and Rx port registers. The actual adjustment is completed by issuing an + * ICE_PTP_ADJ_TIME or ICE_PTP_ADJ_TIME_AT_TIME sync command. + * + * Return: + * * %0 - success + * * %other - failed to write to PHY + */ +static int ice_ptp_prep_phy_adj_eth56g(struct ice_hw *hw, s32 adj) +{ + s64 cycles; + u8 port; + + /* The port clock supports adjustment of the sub-nanosecond portion of + * the clock (lowest 32 bits). We shift the provided adjustment in + * nanoseconds by 32 to calculate the appropriate adjustment to program + * into the PHY ports. + */ + cycles = (s64)adj << 32; + + for (port = 0; port < hw->ptp.num_lports; port++) { + int err; + + err = ice_ptp_prep_port_adj_eth56g(hw, port, cycles); + if (err) + return err; + } + + return 0; +} + +/** + * ice_ptp_prep_phy_incval_eth56g - Prepare PHY ports for time adjustment + * @hw: pointer to HW struct + * @incval: new increment value to prepare + * + * Prepare each of the PHY ports for a new increment value by programming the + * port's TIMETUS registers. The new increment value will be updated after + * issuing an ICE_PTP_INIT_INCVAL command. + * + * Return: + * * %0 - success + * * %other - failed to write to PHY + */ +static int ice_ptp_prep_phy_incval_eth56g(struct ice_hw *hw, u64 incval) +{ + u8 port; + + for (port = 0; port < hw->ptp.num_lports; port++) { + int err; + + err = ice_write_40b_ptp_reg_eth56g(hw, port, PHY_REG_TIMETUS_L, + incval); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write incval for port %u, err %d\n", + port, err); + return err; + } + } + + return 0; +} + +/** + * ice_ptp_read_port_capture_eth56g - Read a port's local time capture + * @hw: pointer to HW struct + * @port: Port number to read + * @tx_ts: on return, the Tx port time capture + * @rx_ts: on return, the Rx port time capture + * + * Read the port's Tx and Rx local time capture values. + * + * Return: + * * %0 - success + * * %other - failed to read from PHY + */ +static int ice_ptp_read_port_capture_eth56g(struct ice_hw *hw, u8 port, + u64 *tx_ts, u64 *rx_ts) +{ + int err; + + /* Tx case */ + err = ice_read_64b_ptp_reg_eth56g(hw, port, PHY_REG_TX_CAPTURE_L, + tx_ts); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read REG_TX_CAPTURE, err %d\n", + err); + return err; + } + + ice_debug(hw, ICE_DBG_PTP, "tx_init = %#016llx\n", *tx_ts); + + /* Rx case */ + err = ice_read_64b_ptp_reg_eth56g(hw, port, PHY_REG_RX_CAPTURE_L, + rx_ts); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_CAPTURE, err %d\n", + err); + return err; + } + + ice_debug(hw, ICE_DBG_PTP, "rx_init = %#016llx\n", *rx_ts); + + return 0; +} + +/** + * ice_ptp_write_port_cmd_eth56g - Prepare a single PHY port for a timer command + * @hw: pointer to HW struct + * @port: Port to which cmd has to be sent + * @cmd: Command to be sent to the port + * + * Prepare the requested port for an upcoming timer sync command. + * + * Return: + * * %0 - success + * * %other - failed to write to PHY + */ +static int ice_ptp_write_port_cmd_eth56g(struct ice_hw *hw, u8 port, + enum ice_ptp_tmr_cmd cmd) +{ + u32 val = ice_ptp_tmr_cmd_to_port_reg(hw, cmd); + int err; + + /* Tx case */ + err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_TMR_CMD, val); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, err %d\n", + err); + return err; + } + + /* Rx case */ + err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_RX_TMR_CMD, val); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, err %d\n", + err); + return err; + } + + return 0; +} + +/** + * ice_phy_get_speed_eth56g - Get link speed based on PHY link type + * @li: pointer to link information struct + * + * Return: simplified ETH56G PHY speed + */ +static enum ice_eth56g_link_spd +ice_phy_get_speed_eth56g(struct ice_link_status *li) +{ + u16 speed = ice_get_link_speed_based_on_phy_type(li->phy_type_low, + li->phy_type_high); + + switch (speed) { + case ICE_AQ_LINK_SPEED_1000MB: + return ICE_ETH56G_LNK_SPD_1G; + case ICE_AQ_LINK_SPEED_2500MB: + return ICE_ETH56G_LNK_SPD_2_5G; + case ICE_AQ_LINK_SPEED_10GB: + return ICE_ETH56G_LNK_SPD_10G; + case ICE_AQ_LINK_SPEED_25GB: + return ICE_ETH56G_LNK_SPD_25G; + case ICE_AQ_LINK_SPEED_40GB: + return ICE_ETH56G_LNK_SPD_40G; + case ICE_AQ_LINK_SPEED_50GB: + switch (li->phy_type_low) { + case ICE_PHY_TYPE_LOW_50GBASE_SR: + case ICE_PHY_TYPE_LOW_50GBASE_FR: + case ICE_PHY_TYPE_LOW_50GBASE_LR: + case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4: + case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC: + case ICE_PHY_TYPE_LOW_50G_AUI1: + return ICE_ETH56G_LNK_SPD_50G; + default: + return ICE_ETH56G_LNK_SPD_50G2; + } + case ICE_AQ_LINK_SPEED_100GB: + if (li->phy_type_high || + li->phy_type_low == ICE_PHY_TYPE_LOW_100GBASE_SR2) + return ICE_ETH56G_LNK_SPD_100G2; + else + return ICE_ETH56G_LNK_SPD_100G; + default: + return ICE_ETH56G_LNK_SPD_1G; + } +} + +/** + * ice_phy_cfg_parpcs_eth56g - Configure TUs per PAR/PCS clock cycle + * @hw: pointer to the HW struct + * @port: port to configure + * + * Configure the number of TUs for the PAR and PCS clocks used as part of the + * timestamp calibration process. + * + * Return: + * * %0 - success + * * %other - PHY read/write failed + */ +static int ice_phy_cfg_parpcs_eth56g(struct ice_hw *hw, u8 port) +{ + u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1); + u32 val; + int err; + + err = ice_write_xpcs_reg_eth56g(hw, port, PHY_VENDOR_TXLANE_THRESH, + ICE_ETH56G_NOMINAL_THRESH4); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read VENDOR_TXLANE_THRESH, status: %d", + err); + return err; + } + + switch (ice_phy_get_speed_eth56g(&hw->port_info->phy.link_info)) { + case ICE_ETH56G_LNK_SPD_1G: + case ICE_ETH56G_LNK_SPD_2_5G: + err = ice_read_ptp_reg_eth56g(hw, port_blk, + PHY_GPCS_CONFIG_REG0, &val); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read PHY_GPCS_CONFIG_REG0, status: %d", + err); + return err; + } + + val &= ~PHY_GPCS_CONFIG_REG0_TX_THR_M; + val |= FIELD_PREP(PHY_GPCS_CONFIG_REG0_TX_THR_M, + ICE_ETH56G_NOMINAL_TX_THRESH); + + err = ice_write_ptp_reg_eth56g(hw, port_blk, + PHY_GPCS_CONFIG_REG0, val); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write PHY_GPCS_CONFIG_REG0, status: %d", + err); + return err; + } + break; + default: + break; + } + + err = ice_write_40b_ptp_reg_eth56g(hw, port, PHY_PCS_REF_TUS_L, + ICE_ETH56G_NOMINAL_PCS_REF_TUS); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write PHY_PCS_REF_TUS, status: %d", + err); + return err; + } + + err = ice_write_40b_ptp_reg_eth56g(hw, port, PHY_PCS_REF_INC_L, + ICE_ETH56G_NOMINAL_PCS_REF_INC); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to write PHY_PCS_REF_INC, status: %d", + err); + return err; + } + + return 0; +} + +/** + * ice_phy_cfg_ptp_1step_eth56g - Configure 1-step PTP settings + * @hw: Pointer to the HW struct + * @port: Port to configure + * + * Return: + * * %0 - success + * * %other - PHY read/write failed + */ +int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port) +{ + u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1); + u8 blk_port = port & (ICE_PORTS_PER_QUAD - 1); + bool enable, sfd_ena; + u32 val, peer_delay; + int err; + + enable = hw->ptp.phy.eth56g.onestep_ena; + peer_delay = hw->ptp.phy.eth56g.peer_delay; + sfd_ena = hw->ptp.phy.eth56g.sfd_ena; + + /* PHY_PTP_1STEP_CONFIG */ + err = ice_read_ptp_reg_eth56g(hw, port_blk, PHY_PTP_1STEP_CONFIG, &val); + if (err) + return err; + + if (enable) + val |= blk_port; + else + val &= ~blk_port; + + val &= ~(PHY_PTP_1STEP_T1S_UP64_M | PHY_PTP_1STEP_T1S_DELTA_M); + + err = ice_write_ptp_reg_eth56g(hw, port_blk, PHY_PTP_1STEP_CONFIG, val); + if (err) + return err; + + /* PHY_PTP_1STEP_PEER_DELAY */ + val = FIELD_PREP(PHY_PTP_1STEP_PD_DELAY_M, peer_delay); + if (peer_delay) + val |= PHY_PTP_1STEP_PD_ADD_PD_M; + val |= PHY_PTP_1STEP_PD_DLY_V_M; + err = ice_write_ptp_reg_eth56g(hw, port_blk, + PHY_PTP_1STEP_PEER_DELAY(blk_port), val); + if (err) + return err; + + val &= ~PHY_PTP_1STEP_PD_DLY_V_M; + err = ice_write_ptp_reg_eth56g(hw, port_blk, + PHY_PTP_1STEP_PEER_DELAY(blk_port), val); + if (err) + return err; + + /* PHY_MAC_XIF_MODE */ + err = ice_read_mac_reg_eth56g(hw, port, PHY_MAC_XIF_MODE, &val); + if (err) + return err; + + val &= ~(PHY_MAC_XIF_1STEP_ENA_M | PHY_MAC_XIF_TS_BIN_MODE_M | + PHY_MAC_XIF_TS_SFD_ENA_M | PHY_MAC_XIF_GMII_TS_SEL_M); + + switch (ice_phy_get_speed_eth56g(&hw->port_info->phy.link_info)) { + case ICE_ETH56G_LNK_SPD_1G: + case ICE_ETH56G_LNK_SPD_2_5G: + val |= PHY_MAC_XIF_GMII_TS_SEL_M; + break; + default: + break; + } + + val |= FIELD_PREP(PHY_MAC_XIF_1STEP_ENA_M, enable) | + FIELD_PREP(PHY_MAC_XIF_TS_BIN_MODE_M, enable) | + FIELD_PREP(PHY_MAC_XIF_TS_SFD_ENA_M, sfd_ena); + + return ice_write_mac_reg_eth56g(hw, port, PHY_MAC_XIF_MODE, val); +} + +/** + * mul_u32_u32_fx_q9 - Multiply two u32 fixed point Q9 values + * @a: multiplier value + * @b: multiplicand value + * + * Return: result of multiplication + */ +static u32 mul_u32_u32_fx_q9(u32 a, u32 b) +{ + return (u32)(((u64)a * b) >> ICE_ETH56G_MAC_CFG_FRAC_W); +} + +/** + * add_u32_u32_fx - Add two u32 fixed point values and discard overflow + * @a: first value + * @b: second value + * + * Return: result of addition + */ +static u32 add_u32_u32_fx(u32 a, u32 b) +{ + return lower_32_bits(((u64)a + b)); +} + +/** + * ice_ptp_calc_bitslip_eth56g - Calculate bitslip value + * @hw: pointer to the HW struct + * @port: port to configure + * @bs: bitslip multiplier + * @fc: FC-FEC enabled + * @rs: RS-FEC enabled + * @spd: link speed + * + * Return: calculated bitslip value + */ +static u32 ice_ptp_calc_bitslip_eth56g(struct ice_hw *hw, u8 port, u32 bs, + bool fc, bool rs, + enum ice_eth56g_link_spd spd) +{ + u8 port_offset = port & (ICE_PORTS_PER_QUAD - 1); + u8 port_blk = port & ~(ICE_PORTS_PER_QUAD - 1); + u32 bitslip; + int err; + + if (!bs || rs) + return 0; + + if (spd == ICE_ETH56G_LNK_SPD_1G || spd == ICE_ETH56G_LNK_SPD_2_5G) + err = ice_read_gpcs_reg_eth56g(hw, port, PHY_GPCS_BITSLIP, + &bitslip); + else + err = ice_read_ptp_reg_eth56g(hw, port_blk, + PHY_REG_SD_BIT_SLIP(port_offset), + &bitslip); + if (err) + return 0; + + if (spd == ICE_ETH56G_LNK_SPD_1G && !bitslip) { + /* Bitslip register value of 0 corresponds to 10 so substitute + * it for calculations + */ + bitslip = 10; + } else if (spd == ICE_ETH56G_LNK_SPD_10G || + spd == ICE_ETH56G_LNK_SPD_25G) { + if (fc) + bitslip = bitslip * 2 + 32; + else + bitslip = (u32)((s32)bitslip * -1 + 20); + } + + bitslip <<= ICE_ETH56G_MAC_CFG_FRAC_W; + return mul_u32_u32_fx_q9(bitslip, bs); +} + +/** + * ice_ptp_calc_deskew_eth56g - Calculate deskew value + * @hw: pointer to the HW struct + * @port: port to configure + * @ds: deskew multiplier + * @rs: RS-FEC enabled + * @spd: link speed + * + * Return: calculated deskew value + */ +static u32 ice_ptp_calc_deskew_eth56g(struct ice_hw *hw, u8 port, u32 ds, + bool rs, enum ice_eth56g_link_spd spd) +{ + u32 deskew_i, deskew_f; + int err; + + if (!ds) + return 0; + + read_poll_timeout(ice_read_ptp_reg_eth56g, err, + FIELD_GET(PHY_REG_DESKEW_0_VALID, deskew_i), 500, + 50 * USEC_PER_MSEC, false, hw, port, PHY_REG_DESKEW_0, + &deskew_i); + if (err) + return err; + + deskew_f = FIELD_GET(PHY_REG_DESKEW_0_RLEVEL_FRAC, deskew_i); + deskew_i = FIELD_GET(PHY_REG_DESKEW_0_RLEVEL, deskew_i); + + if (rs && spd == ICE_ETH56G_LNK_SPD_50G2) + ds = 0x633; /* 3.1 */ + else if (rs && spd == ICE_ETH56G_LNK_SPD_100G) + ds = 0x31b; /* 1.552 */ + + deskew_i = FIELD_PREP(ICE_ETH56G_MAC_CFG_RX_OFFSET_INT, deskew_i); + /* Shift 3 fractional bits to the end of the integer part */ + deskew_f <<= ICE_ETH56G_MAC_CFG_FRAC_W - PHY_REG_DESKEW_0_RLEVEL_FRAC_W; + return mul_u32_u32_fx_q9(deskew_i | deskew_f, ds); +} + +/** + * ice_phy_set_offsets_eth56g - Set Tx/Rx offset values + * @hw: pointer to the HW struct + * @port: port to configure + * @spd: link speed + * @cfg: structure to store output values + * @fc: FC-FEC enabled + * @rs: RS-FEC enabled + * + * Return: + * * %0 - success + * * %other - failed to write to PHY + */ +static int ice_phy_set_offsets_eth56g(struct ice_hw *hw, u8 port, + enum ice_eth56g_link_spd spd, + const struct ice_eth56g_mac_reg_cfg *cfg, + bool fc, bool rs) +{ + u32 rx_offset, tx_offset, bs_ds; + bool onestep, sfd; + + onestep = hw->ptp.phy.eth56g.onestep_ena; + sfd = hw->ptp.phy.eth56g.sfd_ena; + bs_ds = cfg->rx_offset.bs_ds; + + if (fc) + rx_offset = cfg->rx_offset.fc; + else if (rs) + rx_offset = cfg->rx_offset.rs; + else + rx_offset = cfg->rx_offset.no_fec; + + rx_offset = add_u32_u32_fx(rx_offset, cfg->rx_offset.serdes); + if (sfd) + rx_offset = add_u32_u32_fx(rx_offset, cfg->rx_offset.sfd); + + if (spd < ICE_ETH56G_LNK_SPD_40G) + bs_ds = ice_ptp_calc_bitslip_eth56g(hw, port, bs_ds, fc, rs, + spd); + else + bs_ds = ice_ptp_calc_deskew_eth56g(hw, port, bs_ds, rs, spd); + rx_offset = add_u32_u32_fx(rx_offset, bs_ds); + rx_offset &= ICE_ETH56G_MAC_CFG_RX_OFFSET_INT | + ICE_ETH56G_MAC_CFG_RX_OFFSET_FRAC; + + if (fc) + tx_offset = cfg->tx_offset.fc; + else if (rs) + tx_offset = cfg->tx_offset.rs; + else + tx_offset = cfg->tx_offset.no_fec; + tx_offset += cfg->tx_offset.serdes + cfg->tx_offset.sfd * sfd + + cfg->tx_offset.onestep * onestep; + + ice_write_mac_reg_eth56g(hw, port, PHY_MAC_RX_OFFSET, rx_offset); + return ice_write_mac_reg_eth56g(hw, port, PHY_MAC_TX_OFFSET, tx_offset); +} + +/** + * ice_phy_cfg_mac_eth56g - Configure MAC for PTP + * @hw: Pointer to the HW struct + * @port: Port to configure + * + * Return: + * * %0 - success + * * %other - failed to write to PHY + */ +static int ice_phy_cfg_mac_eth56g(struct ice_hw *hw, u8 port) +{ + const struct ice_eth56g_mac_reg_cfg *cfg; + enum ice_eth56g_link_spd spd; + struct ice_link_status *li; + bool fc = false; + bool rs = false; + bool onestep; + u32 val; + int err; + + onestep = hw->ptp.phy.eth56g.onestep_ena; + li = &hw->port_info->phy.link_info; + spd = ice_phy_get_speed_eth56g(li); + if (!!(li->an_info & ICE_AQ_FEC_EN)) { + if (spd == ICE_ETH56G_LNK_SPD_10G) { + fc = true; + } else { + fc = !!(li->fec_info & ICE_AQ_LINK_25G_KR_FEC_EN); + rs = !!(li->fec_info & ~ICE_AQ_LINK_25G_KR_FEC_EN); + } + } + cfg = ð56g_mac_cfg[spd]; + + err = ice_write_mac_reg_eth56g(hw, port, PHY_MAC_RX_MODULO, 0); + if (err) + return err; + + err = ice_write_mac_reg_eth56g(hw, port, PHY_MAC_TX_MODULO, 0); + if (err) + return err; + + val = FIELD_PREP(PHY_MAC_TSU_CFG_TX_MODE_M, + cfg->tx_mode.def + rs * cfg->tx_mode.rs) | + FIELD_PREP(PHY_MAC_TSU_CFG_TX_MII_MK_DLY_M, cfg->tx_mk_dly) | + FIELD_PREP(PHY_MAC_TSU_CFG_TX_MII_CW_DLY_M, + cfg->tx_cw_dly.def + + onestep * cfg->tx_cw_dly.onestep) | + FIELD_PREP(PHY_MAC_TSU_CFG_RX_MODE_M, + cfg->rx_mode.def + rs * cfg->rx_mode.rs) | + FIELD_PREP(PHY_MAC_TSU_CFG_RX_MII_MK_DLY_M, + cfg->rx_mk_dly.def + rs * cfg->rx_mk_dly.rs) | + FIELD_PREP(PHY_MAC_TSU_CFG_RX_MII_CW_DLY_M, + cfg->rx_cw_dly.def + rs * cfg->rx_cw_dly.rs) | + FIELD_PREP(PHY_MAC_TSU_CFG_BLKS_PER_CLK_M, cfg->blks_per_clk); + err = ice_write_mac_reg_eth56g(hw, port, PHY_MAC_TSU_CONFIG, val); + if (err) + return err; + + err = ice_write_mac_reg_eth56g(hw, port, PHY_MAC_BLOCKTIME, + cfg->blktime); + if (err) + return err; + + err = ice_phy_set_offsets_eth56g(hw, port, spd, cfg, fc, rs); + if (err) + return err; + + if (spd == ICE_ETH56G_LNK_SPD_25G && !rs) + val = 0; + else + val = cfg->mktime; + + return ice_write_mac_reg_eth56g(hw, port, PHY_MAC_MARKERTIME, val); +} + +/** + * ice_phy_cfg_intr_eth56g - Configure TX timestamp interrupt + * @hw: pointer to the HW struct + * @port: the timestamp port + * @ena: enable or disable interrupt + * @threshold: interrupt threshold + * + * Configure TX timestamp interrupt for the specified port + * + * Return: + * * %0 - success + * * %other - PHY read/write failed + */ +int ice_phy_cfg_intr_eth56g(struct ice_hw *hw, u8 port, bool ena, u8 threshold) +{ + int err; + u32 val; + + err = ice_read_ptp_reg_eth56g(hw, port, PHY_REG_TS_INT_CONFIG, &val); + if (err) + return err; + + if (ena) { + val |= PHY_TS_INT_CONFIG_ENA_M; + val &= ~PHY_TS_INT_CONFIG_THRESHOLD_M; + val |= FIELD_PREP(PHY_TS_INT_CONFIG_THRESHOLD_M, threshold); + } else { + val &= ~PHY_TS_INT_CONFIG_ENA_M; + } + + return ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TS_INT_CONFIG, val); +} + +/** + * ice_read_phy_and_phc_time_eth56g - Simultaneously capture PHC and PHY time + * @hw: pointer to the HW struct + * @port: the PHY port to read + * @phy_time: on return, the 64bit PHY timer value + * @phc_time: on return, the lower 64bits of PHC time + * + * Issue a ICE_PTP_READ_TIME timer command to simultaneously capture the PHY + * and PHC timer values. + * + * Return: + * * %0 - success + * * %other - PHY read/write failed + */ +static int ice_read_phy_and_phc_time_eth56g(struct ice_hw *hw, u8 port, + u64 *phy_time, u64 *phc_time) +{ + u64 tx_time, rx_time; + u32 zo, lo; + u8 tmr_idx; + int err; + + tmr_idx = ice_get_ptp_src_clock_index(hw); + + /* Prepare the PHC timer for a ICE_PTP_READ_TIME capture command */ + ice_ptp_src_cmd(hw, ICE_PTP_READ_TIME); + + /* Prepare the PHY timer for a ICE_PTP_READ_TIME capture command */ + err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_READ_TIME); + if (err) + return err; + + /* Issue the sync to start the ICE_PTP_READ_TIME capture */ + ice_ptp_exec_tmr_cmd(hw); + + /* Read the captured PHC time from the shadow time registers */ + zo = rd32(hw, GLTSYN_SHTIME_0(tmr_idx)); + lo = rd32(hw, GLTSYN_SHTIME_L(tmr_idx)); + *phc_time = (u64)lo << 32 | zo; + + /* Read the captured PHY time from the PHY shadow registers */ + err = ice_ptp_read_port_capture_eth56g(hw, port, &tx_time, &rx_time); + if (err) + return err; + + /* If the PHY Tx and Rx timers don't match, log a warning message. + * Note that this should not happen in normal circumstances since the + * driver always programs them together. + */ + if (tx_time != rx_time) + dev_warn(ice_hw_to_dev(hw), "PHY port %u Tx and Rx timers do not match, tx_time 0x%016llX, rx_time 0x%016llX\n", + port, tx_time, rx_time); + + *phy_time = tx_time; + + return 0; +} + +/** + * ice_sync_phy_timer_eth56g - Synchronize the PHY timer with PHC timer + * @hw: pointer to the HW struct + * @port: the PHY port to synchronize + * + * Perform an adjustment to ensure that the PHY and PHC timers are in sync. + * This is done by issuing a ICE_PTP_READ_TIME command which triggers a + * simultaneous read of the PHY timer and PHC timer. Then we use the + * difference to calculate an appropriate 2s complement addition to add + * to the PHY timer in order to ensure it reads the same value as the + * primary PHC timer. + * + * Return: + * * %0 - success + * * %-EBUSY- failed to acquire PTP semaphore + * * %other - PHY read/write failed + */ +static int ice_sync_phy_timer_eth56g(struct ice_hw *hw, u8 port) +{ + u64 phc_time, phy_time, difference; + int err; + + if (!ice_ptp_lock(hw)) { + ice_debug(hw, ICE_DBG_PTP, "Failed to acquire PTP semaphore\n"); + return -EBUSY; + } + + err = ice_read_phy_and_phc_time_eth56g(hw, port, &phy_time, &phc_time); + if (err) + goto err_unlock; + + /* Calculate the amount required to add to the port time in order for + * it to match the PHC time. + * + * Note that the port adjustment is done using 2s complement + * arithmetic. This is convenient since it means that we can simply + * calculate the difference between the PHC time and the port time, + * and it will be interpreted correctly. + */ + + ice_ptp_src_cmd(hw, ICE_PTP_NOP); + difference = phc_time - phy_time; + + err = ice_ptp_prep_port_adj_eth56g(hw, port, (s64)difference); + if (err) + goto err_unlock; + + err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_ADJ_TIME); + if (err) + goto err_unlock; + + /* Issue the sync to activate the time adjustment */ + ice_ptp_exec_tmr_cmd(hw); + + /* Re-capture the timer values to flush the command registers and + * verify that the time was properly adjusted. + */ + err = ice_read_phy_and_phc_time_eth56g(hw, port, &phy_time, &phc_time); + if (err) + goto err_unlock; + + dev_info(ice_hw_to_dev(hw), + "Port %u PHY time synced to PHC: 0x%016llX, 0x%016llX\n", + port, phy_time, phc_time); + +err_unlock: + ice_ptp_unlock(hw); + return err; +} + +/** + * ice_stop_phy_timer_eth56g - Stop the PHY clock timer + * @hw: pointer to the HW struct + * @port: the PHY port to stop + * @soft_reset: if true, hold the SOFT_RESET bit of PHY_REG_PS + * + * Stop the clock of a PHY port. This must be done as part of the flow to + * re-calibrate Tx and Rx timestamping offsets whenever the clock time is + * initialized or when link speed changes. + * + * Return: + * * %0 - success + * * %other - failed to write to PHY + */ +int ice_stop_phy_timer_eth56g(struct ice_hw *hw, u8 port, bool soft_reset) +{ + int err; + + err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_OFFSET_READY, 0); + if (err) + return err; + + err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_RX_OFFSET_READY, 0); + if (err) + return err; + + ice_debug(hw, ICE_DBG_PTP, "Disabled clock on PHY port %u\n", port); + + return 0; +} + +/** + * ice_start_phy_timer_eth56g - Start the PHY clock timer + * @hw: pointer to the HW struct + * @port: the PHY port to start + * + * Start the clock of a PHY port. This must be done as part of the flow to + * re-calibrate Tx and Rx timestamping offsets whenever the clock time is + * initialized or when link speed changes. + * + * Return: + * * %0 - success + * * %other - PHY read/write failed + */ +int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port) +{ + u32 lo, hi; + u64 incval; + u8 tmr_idx; + int err; + + tmr_idx = ice_get_ptp_src_clock_index(hw); + + err = ice_stop_phy_timer_eth56g(hw, port, false); + if (err) + return err; + + ice_ptp_src_cmd(hw, ICE_PTP_NOP); + + err = ice_phy_cfg_parpcs_eth56g(hw, port); + if (err) + return err; + + err = ice_phy_cfg_ptp_1step_eth56g(hw, port); + if (err) + return err; + + err = ice_phy_cfg_mac_eth56g(hw, port); + if (err) + return err; + + lo = rd32(hw, GLTSYN_INCVAL_L(tmr_idx)); + hi = rd32(hw, GLTSYN_INCVAL_H(tmr_idx)); + incval = (u64)hi << 32 | lo; + + err = ice_write_40b_ptp_reg_eth56g(hw, port, PHY_REG_TIMETUS_L, incval); + if (err) + return err; + + err = ice_ptp_one_port_cmd(hw, port, ICE_PTP_INIT_INCVAL); + if (err) + return err; + + ice_ptp_exec_tmr_cmd(hw); + + err = ice_sync_phy_timer_eth56g(hw, port); + if (err) + return err; + + err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_TX_OFFSET_READY, 1); + if (err) + return err; + + err = ice_write_ptp_reg_eth56g(hw, port, PHY_REG_RX_OFFSET_READY, 1); + if (err) + return err; + + ice_debug(hw, ICE_DBG_PTP, "Enabled clock on PHY port %u\n", port); + + return 0; +} + +/** + * ice_sb_access_ena_eth56g - Enable SB devices (PHY and others) access + * @hw: pointer to HW struct + * @enable: Enable or disable access + * + * Enable sideband devices (PHY and others) access. + */ +static void ice_sb_access_ena_eth56g(struct ice_hw *hw, bool enable) +{ + u32 val = rd32(hw, PF_SB_REM_DEV_CTL); + + if (enable) + val |= BIT(eth56g_phy_0) | BIT(cgu) | BIT(eth56g_phy_1); + else + val &= ~(BIT(eth56g_phy_0) | BIT(cgu) | BIT(eth56g_phy_1)); + + wr32(hw, PF_SB_REM_DEV_CTL, val); +} + +/** + * ice_ptp_init_phc_eth56g - Perform E82X specific PHC initialization + * @hw: pointer to HW struct + * + * Perform PHC initialization steps specific to E82X devices. + * + * Return: + * * %0 - success + * * %other - failed to initialize CGU + */ +static int ice_ptp_init_phc_eth56g(struct ice_hw *hw) +{ + ice_sb_access_ena_eth56g(hw, true); + /* Initialize the Clock Generation Unit */ + return ice_init_cgu_e82x(hw); +} + +/** + * ice_ptp_read_tx_hwtstamp_status_eth56g - Get TX timestamp status + * @hw: pointer to the HW struct + * @ts_status: the timestamp mask pointer + * + * Read the PHY Tx timestamp status mask indicating which ports have Tx + * timestamps available. + * + * Return: + * * %0 - success + * * %other - failed to read from PHY + */ +int ice_ptp_read_tx_hwtstamp_status_eth56g(struct ice_hw *hw, u32 *ts_status) +{ + const struct ice_eth56g_params *params = &hw->ptp.phy.eth56g; + u8 phy, mask; + u32 status; + + mask = (1 << hw->ptp.ports_per_phy) - 1; + *ts_status = 0; + + for (phy = 0; phy < params->num_phys; phy++) { + int err; + + err = ice_read_phy_eth56g(hw, phy, PHY_PTP_INT_STATUS, &status); + if (err) + return err; + + *ts_status |= (status & mask) << (phy * hw->ptp.ports_per_phy); + } + + ice_debug(hw, ICE_DBG_PTP, "PHY interrupt err: %x\n", *ts_status); + + return 0; +} + +/** + * ice_get_phy_tx_tstamp_ready_eth56g - Read the Tx memory status register + * @hw: pointer to the HW struct + * @port: the PHY port to read from + * @tstamp_ready: contents of the Tx memory status register + * + * Read the PHY_REG_TX_MEMORY_STATUS register indicating which timestamps in + * the PHY are ready. A set bit means the corresponding timestamp is valid and + * ready to be captured from the PHY timestamp block. + * + * Return: + * * %0 - success + * * %other - failed to read from PHY + */ +static int ice_get_phy_tx_tstamp_ready_eth56g(struct ice_hw *hw, u8 port, + u64 *tstamp_ready) +{ + int err; + + err = ice_read_64b_ptp_reg_eth56g(hw, port, PHY_REG_TX_MEMORY_STATUS_L, + tstamp_ready); + if (err) { + ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_MEMORY_STATUS for port %u, err %d\n", + port, err); + return err; + } + + return 0; +} + +/** + * ice_is_muxed_topo - detect breakout 2x50G topology for E825C + * @hw: pointer to the HW struct + * + * Return: true if it's 2x50 breakout topology, false otherwise + */ +static bool ice_is_muxed_topo(struct ice_hw *hw) +{ + u8 link_topo; + bool mux; + u32 val; + + val = rd32(hw, GLGEN_SWITCH_MODE_CONFIG); + mux = FIELD_GET(GLGEN_SWITCH_MODE_CONFIG_25X4_QUAD_M, val); + val = rd32(hw, GLGEN_MAC_LINK_TOPO); + link_topo = FIELD_GET(GLGEN_MAC_LINK_TOPO_LINK_TOPO_M, val); + + return (mux && link_topo == ICE_LINK_TOPO_UP_TO_2_LINKS); +} + +/** + * ice_ptp_init_phy_e825c - initialize PHY parameters + * @hw: pointer to the HW struct + */ +static void ice_ptp_init_phy_e825c(struct ice_hw *hw) +{ + struct ice_ptp_hw *ptp = &hw->ptp; + struct ice_eth56g_params *params; + u8 phy; + + ptp->phy_model = ICE_PHY_ETH56G; + params = &ptp->phy.eth56g; + params->onestep_ena = false; + params->peer_delay = 0; + params->sfd_ena = false; + params->phy_addr[0] = eth56g_phy_0; + params->phy_addr[1] = eth56g_phy_1; + params->num_phys = 2; + ptp->ports_per_phy = 4; + ptp->num_lports = params->num_phys * ptp->ports_per_phy; + + ice_sb_access_ena_eth56g(hw, true); + for (phy = 0; phy < params->num_phys; phy++) { + u32 phy_rev; + int err; + + err = ice_read_phy_eth56g(hw, phy, PHY_REG_REVISION, &phy_rev); + if (err || phy_rev != PHY_REVISION_ETH56G) { + ptp->phy_model = ICE_PHY_UNSUP; + return; + } + } + + ptp->is_2x50g_muxed_topo = ice_is_muxed_topo(hw); +} + /* E822 family functions * * The following functions operate on the E822 family of devices. @@ -288,18 +2707,21 @@ static void ice_ptp_exec_tmr_cmd(struct ice_hw *hw) /** * ice_fill_phy_msg_e82x - Fill message data for a PHY register access + * @hw: pointer to the HW struct * @msg: the PHY message buffer to fill in * @port: the port to access * @offset: the register offset */ -static void -ice_fill_phy_msg_e82x(struct ice_sbq_msg_input *msg, u8 port, u16 offset) +static void ice_fill_phy_msg_e82x(struct ice_hw *hw, + struct ice_sbq_msg_input *msg, u8 port, + u16 offset) { int phy_port, phy, quadtype; - phy_port = port % ICE_PORTS_PER_PHY_E82X; - phy = port / ICE_PORTS_PER_PHY_E82X; - quadtype = (port / ICE_PORTS_PER_QUAD) % ICE_QUADS_PER_PHY_E82X; + phy_port = port % hw->ptp.ports_per_phy; + phy = port / hw->ptp.ports_per_phy; + quadtype = ICE_GET_QUAD_NUM(port) % + ICE_GET_QUAD_NUM(hw->ptp.ports_per_phy); if (quadtype == 0) { msg->msg_addr_low = P_Q0_L(P_0_BASE + offset, phy_port); @@ -430,10 +2852,10 @@ ice_read_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 *val) struct ice_sbq_msg_input msg = {0}; int err; - ice_fill_phy_msg_e82x(&msg, port, offset); + ice_fill_phy_msg_e82x(hw, &msg, port, offset); msg.opcode = ice_sbq_msg_rd; - err = ice_sbq_rw_reg(hw, &msg); + err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n", err); @@ -507,11 +2929,11 @@ ice_write_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 offset, u32 val) struct ice_sbq_msg_input msg = {0}; int err; - ice_fill_phy_msg_e82x(&msg, port, offset); + ice_fill_phy_msg_e82x(hw, &msg, port, offset); msg.opcode = ice_sbq_msg_wr; msg.data = val; - err = ice_sbq_rw_reg(hw, &msg); + err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n", err); @@ -546,8 +2968,7 @@ ice_write_40b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) low_addr); return -EINVAL; } - - low = (u32)(val & P_REG_40B_LOW_M); + low = FIELD_GET(P_REG_40B_LOW_M, val); high = (u32)(val >> P_REG_40B_HIGH_S); err = ice_write_phy_reg_e82x(hw, port, low_addr, low); @@ -617,24 +3038,30 @@ ice_write_64b_phy_reg_e82x(struct ice_hw *hw, u8 port, u16 low_addr, u64 val) /** * ice_fill_quad_msg_e82x - Fill message data for quad register access + * @hw: pointer to the HW struct * @msg: the PHY message buffer to fill in * @quad: the quad to access * @offset: the register offset * * Fill a message buffer for accessing a register in a quad shared between * multiple PHYs. + * + * Return: + * * %0 - OK + * * %-EINVAL - invalid quad number */ -static int -ice_fill_quad_msg_e82x(struct ice_sbq_msg_input *msg, u8 quad, u16 offset) +static int ice_fill_quad_msg_e82x(struct ice_hw *hw, + struct ice_sbq_msg_input *msg, u8 quad, + u16 offset) { u32 addr; - if (quad >= ICE_MAX_QUAD) + if (quad >= ICE_GET_QUAD_NUM(hw->ptp.num_lports)) return -EINVAL; msg->dest_dev = rmn_0; - if ((quad % ICE_QUADS_PER_PHY_E82X) == 0) + if (!(quad % ICE_GET_QUAD_NUM(hw->ptp.ports_per_phy))) addr = Q_0_BASE + offset; else addr = Q_1_BASE + offset; @@ -661,13 +3088,13 @@ ice_read_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 *val) struct ice_sbq_msg_input msg = {0}; int err; - err = ice_fill_quad_msg_e82x(&msg, quad, offset); + err = ice_fill_quad_msg_e82x(hw, &msg, quad, offset); if (err) return err; msg.opcode = ice_sbq_msg_rd; - err = ice_sbq_rw_reg(hw, &msg); + err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n", err); @@ -695,14 +3122,14 @@ ice_write_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 val) struct ice_sbq_msg_input msg = {0}; int err; - err = ice_fill_quad_msg_e82x(&msg, quad, offset); + err = ice_fill_quad_msg_e82x(hw, &msg, quad, offset); if (err) return err; msg.opcode = ice_sbq_msg_wr; msg.data = val; - err = ice_sbq_rw_reg(hw, &msg); + err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n", err); @@ -751,7 +3178,7 @@ ice_read_phy_tstamp_e82x(struct ice_hw *hw, u8 quad, u8 idx, u64 *tstamp) * lower 8 bits in the low register, and the upper 32 bits in the high * register. */ - *tstamp = ((u64)hi) << TS_PHY_HIGH_S | ((u64)lo & TS_PHY_LOW_M); + *tstamp = FIELD_PREP(TS_PHY_HIGH_M, hi) | FIELD_PREP(TS_PHY_LOW_M, lo); return 0; } @@ -816,293 +3243,10 @@ static void ice_ptp_reset_ts_memory_e82x(struct ice_hw *hw) { unsigned int quad; - for (quad = 0; quad < ICE_MAX_QUAD; quad++) + for (quad = 0; quad < ICE_GET_QUAD_NUM(hw->ptp.num_lports); quad++) ice_ptp_reset_ts_memory_quad_e82x(hw, quad); } -/** - * ice_read_cgu_reg_e82x - Read a CGU register - * @hw: pointer to the HW struct - * @addr: Register address to read - * @val: storage for register value read - * - * Read the contents of a register of the Clock Generation Unit. Only - * applicable to E822 devices. - */ -static int -ice_read_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 *val) -{ - struct ice_sbq_msg_input cgu_msg; - int err; - - cgu_msg.opcode = ice_sbq_msg_rd; - cgu_msg.dest_dev = cgu; - cgu_msg.msg_addr_low = addr; - cgu_msg.msg_addr_high = 0x0; - - err = ice_sbq_rw_reg(hw, &cgu_msg); - if (err) { - ice_debug(hw, ICE_DBG_PTP, "Failed to read CGU register 0x%04x, err %d\n", - addr, err); - return err; - } - - *val = cgu_msg.data; - - return err; -} - -/** - * ice_write_cgu_reg_e82x - Write a CGU register - * @hw: pointer to the HW struct - * @addr: Register address to write - * @val: value to write into the register - * - * Write the specified value to a register of the Clock Generation Unit. Only - * applicable to E822 devices. - */ -static int -ice_write_cgu_reg_e82x(struct ice_hw *hw, u32 addr, u32 val) -{ - struct ice_sbq_msg_input cgu_msg; - int err; - - cgu_msg.opcode = ice_sbq_msg_wr; - cgu_msg.dest_dev = cgu; - cgu_msg.msg_addr_low = addr; - cgu_msg.msg_addr_high = 0x0; - cgu_msg.data = val; - - err = ice_sbq_rw_reg(hw, &cgu_msg); - if (err) { - ice_debug(hw, ICE_DBG_PTP, "Failed to write CGU register 0x%04x, err %d\n", - addr, err); - return err; - } - - return err; -} - -/** - * ice_clk_freq_str - Convert time_ref_freq to string - * @clk_freq: Clock frequency - * - * Convert the specified TIME_REF clock frequency to a string. - */ -static const char *ice_clk_freq_str(u8 clk_freq) -{ - switch ((enum ice_time_ref_freq)clk_freq) { - case ICE_TIME_REF_FREQ_25_000: - return "25 MHz"; - case ICE_TIME_REF_FREQ_122_880: - return "122.88 MHz"; - case ICE_TIME_REF_FREQ_125_000: - return "125 MHz"; - case ICE_TIME_REF_FREQ_153_600: - return "153.6 MHz"; - case ICE_TIME_REF_FREQ_156_250: - return "156.25 MHz"; - case ICE_TIME_REF_FREQ_245_760: - return "245.76 MHz"; - default: - return "Unknown"; - } -} - -/** - * ice_clk_src_str - Convert time_ref_src to string - * @clk_src: Clock source - * - * Convert the specified clock source to its string name. - */ -static const char *ice_clk_src_str(u8 clk_src) -{ - switch ((enum ice_clk_src)clk_src) { - case ICE_CLK_SRC_TCX0: - return "TCX0"; - case ICE_CLK_SRC_TIME_REF: - return "TIME_REF"; - default: - return "Unknown"; - } -} - -/** - * ice_cfg_cgu_pll_e82x - Configure the Clock Generation Unit - * @hw: pointer to the HW struct - * @clk_freq: Clock frequency to program - * @clk_src: Clock source to select (TIME_REF, or TCX0) - * - * Configure the Clock Generation Unit with the desired clock frequency and - * time reference, enabling the PLL which drives the PTP hardware clock. - */ -static int -ice_cfg_cgu_pll_e82x(struct ice_hw *hw, enum ice_time_ref_freq clk_freq, - enum ice_clk_src clk_src) -{ - union tspll_ro_bwm_lf bwm_lf; - union nac_cgu_dword19 dw19; - union nac_cgu_dword22 dw22; - union nac_cgu_dword24 dw24; - union nac_cgu_dword9 dw9; - int err; - - if (clk_freq >= NUM_ICE_TIME_REF_FREQ) { - dev_warn(ice_hw_to_dev(hw), "Invalid TIME_REF frequency %u\n", - clk_freq); - return -EINVAL; - } - - if (clk_src >= NUM_ICE_CLK_SRC) { - dev_warn(ice_hw_to_dev(hw), "Invalid clock source %u\n", - clk_src); - return -EINVAL; - } - - if (clk_src == ICE_CLK_SRC_TCX0 && - clk_freq != ICE_TIME_REF_FREQ_25_000) { - dev_warn(ice_hw_to_dev(hw), - "TCX0 only supports 25 MHz frequency\n"); - return -EINVAL; - } - - err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD9, &dw9.val); - if (err) - return err; - - err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val); - if (err) - return err; - - err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val); - if (err) - return err; - - /* Log the current clock configuration */ - ice_debug(hw, ICE_DBG_PTP, "Current CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n", - dw24.field.ts_pll_enable ? "enabled" : "disabled", - ice_clk_src_str(dw24.field.time_ref_sel), - ice_clk_freq_str(dw9.field.time_ref_freq_sel), - bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked"); - - /* Disable the PLL before changing the clock source or frequency */ - if (dw24.field.ts_pll_enable) { - dw24.field.ts_pll_enable = 0; - - err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val); - if (err) - return err; - } - - /* Set the frequency */ - dw9.field.time_ref_freq_sel = clk_freq; - err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD9, dw9.val); - if (err) - return err; - - /* Configure the TS PLL feedback divisor */ - err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD19, &dw19.val); - if (err) - return err; - - dw19.field.tspll_fbdiv_intgr = e822_cgu_params[clk_freq].feedback_div; - dw19.field.tspll_ndivratio = 1; - - err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD19, dw19.val); - if (err) - return err; - - /* Configure the TS PLL post divisor */ - err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD22, &dw22.val); - if (err) - return err; - - dw22.field.time1588clk_div = e822_cgu_params[clk_freq].post_pll_div; - dw22.field.time1588clk_sel_div2 = 0; - - err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD22, dw22.val); - if (err) - return err; - - /* Configure the TS PLL pre divisor and clock source */ - err = ice_read_cgu_reg_e82x(hw, NAC_CGU_DWORD24, &dw24.val); - if (err) - return err; - - dw24.field.ref1588_ck_div = e822_cgu_params[clk_freq].refclk_pre_div; - dw24.field.tspll_fbdiv_frac = e822_cgu_params[clk_freq].frac_n_div; - dw24.field.time_ref_sel = clk_src; - - err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val); - if (err) - return err; - - /* Finally, enable the PLL */ - dw24.field.ts_pll_enable = 1; - - err = ice_write_cgu_reg_e82x(hw, NAC_CGU_DWORD24, dw24.val); - if (err) - return err; - - /* Wait to verify if the PLL locks */ - usleep_range(1000, 5000); - - err = ice_read_cgu_reg_e82x(hw, TSPLL_RO_BWM_LF, &bwm_lf.val); - if (err) - return err; - - if (!bwm_lf.field.plllock_true_lock_cri) { - dev_warn(ice_hw_to_dev(hw), "CGU PLL failed to lock\n"); - return -EBUSY; - } - - /* Log the current clock configuration */ - ice_debug(hw, ICE_DBG_PTP, "New CGU configuration -- %s, clk_src %s, clk_freq %s, PLL %s\n", - dw24.field.ts_pll_enable ? "enabled" : "disabled", - ice_clk_src_str(dw24.field.time_ref_sel), - ice_clk_freq_str(dw9.field.time_ref_freq_sel), - bwm_lf.field.plllock_true_lock_cri ? "locked" : "unlocked"); - - return 0; -} - -/** - * ice_init_cgu_e82x - Initialize CGU with settings from firmware - * @hw: pointer to the HW structure - * - * Initialize the Clock Generation Unit of the E822 device. - */ -static int ice_init_cgu_e82x(struct ice_hw *hw) -{ - struct ice_ts_func_info *ts_info = &hw->func_caps.ts_func_info; - union tspll_cntr_bist_settings cntr_bist; - int err; - - err = ice_read_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS, - &cntr_bist.val); - if (err) - return err; - - /* Disable sticky lock detection so lock err reported is accurate */ - cntr_bist.field.i_plllock_sel_0 = 0; - cntr_bist.field.i_plllock_sel_1 = 0; - - err = ice_write_cgu_reg_e82x(hw, TSPLL_CNTR_BIST_SETTINGS, - cntr_bist.val); - if (err) - return err; - - /* Configure the CGU PLL using the parameters from the function - * capabilities. - */ - err = ice_cfg_cgu_pll_e82x(hw, ts_info->time_ref, - (enum ice_clk_src)ts_info->clk_src); - if (err) - return err; - - return 0; -} - /** * ice_ptp_set_vernier_wl - Set the window length for vernier calibration * @hw: pointer to the HW struct @@ -1113,7 +3257,7 @@ static int ice_ptp_set_vernier_wl(struct ice_hw *hw) { u8 port; - for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { + for (port = 0; port < hw->ptp.num_lports; port++) { int err; err = ice_write_phy_reg_e82x(hw, port, P_REG_WL, @@ -1137,15 +3281,14 @@ static int ice_ptp_set_vernier_wl(struct ice_hw *hw) static int ice_ptp_init_phc_e82x(struct ice_hw *hw) { int err; - u32 regval; + u32 val; /* Enable reading switch and PHY registers over the sideband queue */ #define PF_SB_REM_DEV_CTL_SWITCH_READ BIT(1) #define PF_SB_REM_DEV_CTL_PHY0 BIT(2) - regval = rd32(hw, PF_SB_REM_DEV_CTL); - regval |= (PF_SB_REM_DEV_CTL_SWITCH_READ | - PF_SB_REM_DEV_CTL_PHY0); - wr32(hw, PF_SB_REM_DEV_CTL, regval); + val = rd32(hw, PF_SB_REM_DEV_CTL); + val |= (PF_SB_REM_DEV_CTL_SWITCH_READ | PF_SB_REM_DEV_CTL_PHY0); + wr32(hw, PF_SB_REM_DEV_CTL, val); /* Initialize the Clock Generation Unit */ err = ice_init_cgu_e82x(hw); @@ -1178,7 +3321,7 @@ ice_ptp_prep_phy_time_e82x(struct ice_hw *hw, u32 time) */ phy_time = (u64)time << 32; - for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { + for (port = 0; port < hw->ptp.num_lports; port++) { /* Tx case */ err = ice_write_64b_phy_reg_e82x(hw, port, P_REG_TX_TIMER_INC_PRE_L, @@ -1281,7 +3424,7 @@ ice_ptp_prep_phy_adj_e82x(struct ice_hw *hw, s32 adj) else cycles = -(((s64)-adj) << 32); - for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { + for (port = 0; port < hw->ptp.num_lports; port++) { int err; err = ice_ptp_prep_port_adj_e82x(hw, port, cycles); @@ -1307,7 +3450,7 @@ ice_ptp_prep_phy_incval_e82x(struct ice_hw *hw, u64 incval) int err; u8 port; - for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { + for (port = 0; port < hw->ptp.num_lports; port++) { err = ice_write_40b_phy_reg_e82x(hw, port, P_REG_TIMETUS_L, incval); if (err) @@ -1372,51 +3515,20 @@ ice_ptp_read_port_capture(struct ice_hw *hw, u8 port, u64 *tx_ts, u64 *rx_ts) * * Prepare the requested port for an upcoming timer sync command. * - * Do not use this function directly. If you want to configure exactly one - * port, use ice_ptp_one_port_cmd() instead. + * Note there is no equivalent of this operation on E810, as that device + * always handles all external PHYs internally. + * + * Return: + * * %0 - success + * * %other - failed to write to PHY */ static int ice_ptp_write_port_cmd_e82x(struct ice_hw *hw, u8 port, enum ice_ptp_tmr_cmd cmd) { - u32 cmd_val, val; - u8 tmr_idx; + u32 val = ice_ptp_tmr_cmd_to_port_reg(hw, cmd); int err; - tmr_idx = ice_get_ptp_src_clock_index(hw); - cmd_val = tmr_idx << SEL_PHY_SRC; - switch (cmd) { - case ICE_PTP_INIT_TIME: - cmd_val |= PHY_CMD_INIT_TIME; - break; - case ICE_PTP_INIT_INCVAL: - cmd_val |= PHY_CMD_INIT_INCVAL; - break; - case ICE_PTP_ADJ_TIME: - cmd_val |= PHY_CMD_ADJ_TIME; - break; - case ICE_PTP_READ_TIME: - cmd_val |= PHY_CMD_READ_TIME; - break; - case ICE_PTP_ADJ_TIME_AT_TIME: - cmd_val |= PHY_CMD_ADJ_TIME_AT_TIME; - break; - case ICE_PTP_NOP: - break; - } - /* Tx case */ - /* Read, modify, write */ - err = ice_read_phy_reg_e82x(hw, port, P_REG_TX_TMR_CMD, &val); - if (err) { - ice_debug(hw, ICE_DBG_PTP, "Failed to read TX_TMR_CMD, err %d\n", - err); - return err; - } - - /* Modify necessary bits only and perform write */ - val &= ~TS_CMD_MASK; - val |= cmd_val; - err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_TMR_CMD, val); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to write back TX_TMR_CMD, err %d\n", @@ -1425,19 +3537,8 @@ static int ice_ptp_write_port_cmd_e82x(struct ice_hw *hw, u8 port, } /* Rx case */ - /* Read, modify, write */ - err = ice_read_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD, &val); - if (err) { - ice_debug(hw, ICE_DBG_PTP, "Failed to read RX_TMR_CMD, err %d\n", - err); - return err; - } - - /* Modify necessary bits only and perform write */ - val &= ~TS_CMD_MASK; - val |= cmd_val; - - err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD, val); + err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_TMR_CMD, + val | TS_CMD_RX_TYPE); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to write back RX_TMR_CMD, err %d\n", err); @@ -1447,63 +3548,6 @@ static int ice_ptp_write_port_cmd_e82x(struct ice_hw *hw, u8 port, return 0; } -/** - * ice_ptp_one_port_cmd - Prepare one port for a timer command - * @hw: pointer to the HW struct - * @configured_port: the port to configure with configured_cmd - * @configured_cmd: timer command to prepare on the configured_port - * - * Prepare the configured_port for the configured_cmd, and prepare all other - * ports for ICE_PTP_NOP. This causes the configured_port to execute the - * desired command while all other ports perform no operation. - */ -static int -ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port, - enum ice_ptp_tmr_cmd configured_cmd) -{ - u8 port; - - for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { - enum ice_ptp_tmr_cmd cmd; - int err; - - if (port == configured_port) - cmd = configured_cmd; - else - cmd = ICE_PTP_NOP; - - err = ice_ptp_write_port_cmd_e82x(hw, port, cmd); - if (err) - return err; - } - - return 0; -} - -/** - * ice_ptp_port_cmd_e82x - Prepare all ports for a timer command - * @hw: pointer to the HW struct - * @cmd: timer command to prepare - * - * Prepare all ports connected to this device for an upcoming timer sync - * command. - */ -static int -ice_ptp_port_cmd_e82x(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) -{ - u8 port; - - for (port = 0; port < ICE_NUM_EXTERNAL_PORTS; port++) { - int err; - - err = ice_ptp_write_port_cmd_e82x(hw, port, cmd); - if (err) - return err; - } - - return 0; -} - /* E822 Vernier calibration functions * * The following functions are used as part of the vernier calibration of @@ -1606,7 +3650,7 @@ static void ice_phy_cfg_lane_e82x(struct ice_hw *hw, u8 port) return; } - quad = port / ICE_PORTS_PER_QUAD; + quad = ICE_GET_QUAD_NUM(port); err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val); if (err) { @@ -2326,6 +4370,40 @@ int ice_phy_cfg_rx_offset_e82x(struct ice_hw *hw, u8 port) return 0; } +/** + * ice_ptp_clear_phy_offset_ready_e82x - Clear PHY TX_/RX_OFFSET_READY registers + * @hw: pointer to the HW struct + * + * Clear PHY TX_/RX_OFFSET_READY registers, effectively marking all transmitted + * and received timestamps as invalid. + * + * Return: 0 on success, other error codes when failed to write to PHY + */ +int ice_ptp_clear_phy_offset_ready_e82x(struct ice_hw *hw) +{ + u8 port; + + for (port = 0; port < hw->ptp.num_lports; port++) { + int err; + + err = ice_write_phy_reg_e82x(hw, port, P_REG_TX_OR, 0); + if (err) { + dev_warn(ice_hw_to_dev(hw), + "Failed to clear PHY TX_OFFSET_READY register\n"); + return err; + } + + err = ice_write_phy_reg_e82x(hw, port, P_REG_RX_OR, 0); + if (err) { + dev_warn(ice_hw_to_dev(hw), + "Failed to clear PHY RX_OFFSET_READY register\n"); + return err; + } + } + + return 0; +} + /** * ice_read_phy_and_phc_time_e82x - Simultaneously capture PHC and PHY time * @hw: pointer to the HW struct @@ -2636,6 +4714,48 @@ ice_get_phy_tx_tstamp_ready_e82x(struct ice_hw *hw, u8 quad, u64 *tstamp_ready) return 0; } +/** + * ice_phy_cfg_intr_e82x - Configure TX timestamp interrupt + * @hw: pointer to the HW struct + * @quad: the timestamp quad + * @ena: enable or disable interrupt + * @threshold: interrupt threshold + * + * Configure TX timestamp interrupt for the specified quad + * + * Return: 0 on success, other error codes when failed to read/write quad + */ + +int ice_phy_cfg_intr_e82x(struct ice_hw *hw, u8 quad, bool ena, u8 threshold) +{ + int err; + u32 val; + + err = ice_read_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, &val); + if (err) + return err; + + val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M; + if (ena) { + val |= Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M; + val &= ~Q_REG_TX_MEM_GBL_CFG_INTR_THR_M; + val |= FIELD_PREP(Q_REG_TX_MEM_GBL_CFG_INTR_THR_M, threshold); + } + + return ice_write_quad_reg_e82x(hw, quad, Q_REG_TX_MEM_GBL_CFG, val); +} + +/** + * ice_ptp_init_phy_e82x - initialize PHY parameters + * @ptp: pointer to the PTP HW struct + */ +static void ice_ptp_init_phy_e82x(struct ice_ptp_hw *ptp) +{ + ptp->phy_model = ICE_PHY_E82X; + ptp->num_lports = 8; + ptp->ports_per_phy = 8; +} + /* E810 functions * * The following functions operate on the E810 series devices which use @@ -2660,7 +4780,7 @@ static int ice_read_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 *val) msg.opcode = ice_sbq_msg_rd; msg.dest_dev = rmn_0; - err = ice_sbq_rw_reg(hw, &msg); + err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n", err); @@ -2691,7 +4811,7 @@ static int ice_write_phy_reg_e810(struct ice_hw *hw, u32 addr, u32 val) msg.dest_dev = rmn_0; msg.data = val; - err = ice_sbq_rw_reg(hw, &msg); + err = ice_sbq_rw_reg(hw, &msg, ICE_AQ_FLAG_RD); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to send message to PHY, err %d\n", err); @@ -2863,17 +4983,21 @@ static int ice_clear_phy_tstamp_e810(struct ice_hw *hw, u8 lport, u8 idx) } /** - * ice_ptp_init_phy_e810 - Enable PTP function on the external PHY + * ice_ptp_init_phc_e810 - Perform E810 specific PHC initialization * @hw: pointer to HW struct * - * Enable the timesync PTP functionality for the external PHY connected to - * this function. + * Perform E810-specific PTP hardware clock initialization steps. + * + * Return: 0 on success, other error codes when failed to initialize TimeSync */ -int ice_ptp_init_phy_e810(struct ice_hw *hw) +static int ice_ptp_init_phc_e810(struct ice_hw *hw) { u8 tmr_idx; int err; + /* Ensure synchronization delay is zero */ + wr32(hw, GLTSYN_SYNC_DLAY, 0); + tmr_idx = hw->func_caps.ts_func_info.tmr_index_owned; err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_ENA(tmr_idx), GLTSYN_ENA_TSYN_ENA_M); @@ -2884,21 +5008,6 @@ int ice_ptp_init_phy_e810(struct ice_hw *hw) return err; } -/** - * ice_ptp_init_phc_e810 - Perform E810 specific PHC initialization - * @hw: pointer to HW struct - * - * Perform E810-specific PTP hardware clock initialization steps. - */ -static int ice_ptp_init_phc_e810(struct ice_hw *hw) -{ - /* Ensure synchronization delay is zero */ - wr32(hw, GLTSYN_SYNC_DLAY, 0); - - /* Initialize the PHY */ - return ice_ptp_init_phy_e810(hw); -} - /** * ice_ptp_prep_phy_time_e810 - Prepare PHY port with initial time * @hw: Board private structure @@ -3020,47 +5129,9 @@ static int ice_ptp_prep_phy_incval_e810(struct ice_hw *hw, u64 incval) */ static int ice_ptp_port_cmd_e810(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) { - u32 cmd_val, val; - int err; + u32 val = ice_ptp_tmr_cmd_to_port_reg(hw, cmd); - switch (cmd) { - case ICE_PTP_INIT_TIME: - cmd_val = GLTSYN_CMD_INIT_TIME; - break; - case ICE_PTP_INIT_INCVAL: - cmd_val = GLTSYN_CMD_INIT_INCVAL; - break; - case ICE_PTP_ADJ_TIME: - cmd_val = GLTSYN_CMD_ADJ_TIME; - break; - case ICE_PTP_READ_TIME: - cmd_val = GLTSYN_CMD_READ_TIME; - break; - case ICE_PTP_ADJ_TIME_AT_TIME: - cmd_val = GLTSYN_CMD_ADJ_INIT_TIME; - break; - case ICE_PTP_NOP: - return 0; - } - - /* Read, modify, write */ - err = ice_read_phy_reg_e810(hw, ETH_GLTSYN_CMD, &val); - if (err) { - ice_debug(hw, ICE_DBG_PTP, "Failed to read GLTSYN_CMD, err %d\n", err); - return err; - } - - /* Modify necessary bits only and perform write */ - val &= ~TS_CMD_MASK_E810; - val |= cmd_val; - - err = ice_write_phy_reg_e810(hw, ETH_GLTSYN_CMD, val); - if (err) { - ice_debug(hw, ICE_DBG_PTP, "Failed to write back GLTSYN_CMD, err %d\n", err); - return err; - } - - return 0; + return ice_write_phy_reg_e810(hw, E810_ETH_GLTSYN_CMD, val); } /** @@ -3242,6 +5313,17 @@ int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data) return ice_aq_read_i2c(hw, link_topo, 0, addr, 1, data, NULL); } +/** + * ice_ptp_init_phy_e810 - initialize PHY parameters + * @ptp: pointer to the PTP HW struct + */ +static void ice_ptp_init_phy_e810(struct ice_ptp_hw *ptp) +{ + ptp->phy_model = ICE_PHY_E810; + ptp->num_lports = 8; + ptp->ports_per_phy = 4; +} + /* Device agnostic functions * * The following functions implement shared behavior common to both E822 and @@ -3299,18 +5381,126 @@ void ice_ptp_unlock(struct ice_hw *hw) } /** - * ice_ptp_init_phy_model - Initialize hw->phy_model based on device type + * ice_ptp_init_hw - Initialize hw based on device type * @hw: pointer to the HW structure * - * Determine the PHY model for the device, and initialize hw->phy_model + * Determine the PHY model for the device, and initialize hw * for use by other functions. */ -void ice_ptp_init_phy_model(struct ice_hw *hw) +void ice_ptp_init_hw(struct ice_hw *hw) { - if (ice_is_e810(hw)) - hw->phy_model = ICE_PHY_E810; + struct ice_ptp_hw *ptp = &hw->ptp; + + if (ice_is_e822(hw) || ice_is_e823(hw)) + ice_ptp_init_phy_e82x(ptp); + else if (ice_is_e810(hw)) + ice_ptp_init_phy_e810(ptp); + else if (ice_is_e825c(hw)) + ice_ptp_init_phy_e825c(hw); else - hw->phy_model = ICE_PHY_E82X; + ptp->phy_model = ICE_PHY_UNSUP; +} + +/** + * ice_ptp_write_port_cmd - Prepare a single PHY port for a timer command + * @hw: pointer to HW struct + * @port: Port to which cmd has to be sent + * @cmd: Command to be sent to the port + * + * Prepare one port for the upcoming timer sync command. Do not use this for + * programming only a single port, instead use ice_ptp_one_port_cmd() to + * ensure non-modified ports get properly initialized to ICE_PTP_NOP. + * + * Return: + * * %0 - success + * %-EBUSY - PHY type not supported + * * %other - failed to write port command + */ +static int ice_ptp_write_port_cmd(struct ice_hw *hw, u8 port, + enum ice_ptp_tmr_cmd cmd) +{ + switch (hw->ptp.phy_model) { + case ICE_PHY_ETH56G: + return ice_ptp_write_port_cmd_eth56g(hw, port, cmd); + case ICE_PHY_E82X: + return ice_ptp_write_port_cmd_e82x(hw, port, cmd); + default: + return -EOPNOTSUPP; + } +} + +/** + * ice_ptp_one_port_cmd - Program one PHY port for a timer command + * @hw: pointer to HW struct + * @configured_port: the port that should execute the command + * @configured_cmd: the command to be executed on the configured port + * + * Prepare one port for executing a timer command, while preparing all other + * ports to ICE_PTP_NOP. This allows executing a command on a single port + * while ensuring all other ports do not execute stale commands. + * + * Return: + * * %0 - success + * * %other - failed to write port command + */ +int ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port, + enum ice_ptp_tmr_cmd configured_cmd) +{ + u32 port; + + for (port = 0; port < hw->ptp.num_lports; port++) { + int err; + + /* Program the configured port with the configured command, + * program all other ports with ICE_PTP_NOP. + */ + if (port == configured_port) + err = ice_ptp_write_port_cmd(hw, port, configured_cmd); + else + err = ice_ptp_write_port_cmd(hw, port, ICE_PTP_NOP); + + if (err) + return err; + } + + return 0; +} + +/** + * ice_ptp_port_cmd - Prepare PHY ports for a timer sync command + * @hw: pointer to HW struct + * @cmd: the timer command to setup + * + * Prepare all PHY ports on this device for the requested timer command. For + * some families this can be done in one shot, but for other families each + * port must be configured individually. + * + * Return: + * * %0 - success + * * %other - failed to write port command + */ +static int ice_ptp_port_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) +{ + u32 port; + + /* PHY models which can program all ports simultaneously */ + switch (hw->ptp.phy_model) { + case ICE_PHY_E810: + return ice_ptp_port_cmd_e810(hw, cmd); + default: + break; + } + + /* PHY models which require programming each port separately */ + for (port = 0; port < hw->ptp.num_lports; port++) { + int err; + + err = ice_ptp_write_port_cmd(hw, port, cmd); + if (err) + return err; + } + + return 0; } /** @@ -3331,17 +5521,7 @@ static int ice_ptp_tmr_cmd(struct ice_hw *hw, enum ice_ptp_tmr_cmd cmd) ice_ptp_src_cmd(hw, cmd); /* Next, prepare the ports */ - switch (hw->phy_model) { - case ICE_PHY_E810: - err = ice_ptp_port_cmd_e810(hw, cmd); - break; - case ICE_PHY_E82X: - err = ice_ptp_port_cmd_e82x(hw, cmd); - break; - default: - err = -EOPNOTSUPP; - } - + err = ice_ptp_port_cmd(hw, cmd); if (err) { ice_debug(hw, ICE_DBG_PTP, "Failed to prepare PHY ports for timer command %u, err %d\n", cmd, err); @@ -3383,7 +5563,11 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time) /* PHY timers */ /* Fill Rx and Tx ports and send msg to PHY */ - switch (hw->phy_model) { + switch (hw->ptp.phy_model) { + case ICE_PHY_ETH56G: + err = ice_ptp_prep_phy_time_eth56g(hw, + (u32)(time & 0xFFFFFFFF)); + break; case ICE_PHY_E810: err = ice_ptp_prep_phy_time_e810(hw, time & 0xFFFFFFFF); break; @@ -3425,7 +5609,10 @@ int ice_ptp_write_incval(struct ice_hw *hw, u64 incval) wr32(hw, GLTSYN_SHADJ_L(tmr_idx), lower_32_bits(incval)); wr32(hw, GLTSYN_SHADJ_H(tmr_idx), upper_32_bits(incval)); - switch (hw->phy_model) { + switch (hw->ptp.phy_model) { + case ICE_PHY_ETH56G: + err = ice_ptp_prep_phy_incval_eth56g(hw, incval); + break; case ICE_PHY_E810: err = ice_ptp_prep_phy_incval_e810(hw, incval); break; @@ -3491,7 +5678,10 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj) wr32(hw, GLTSYN_SHADJ_L(tmr_idx), 0); wr32(hw, GLTSYN_SHADJ_H(tmr_idx), adj); - switch (hw->phy_model) { + switch (hw->ptp.phy_model) { + case ICE_PHY_ETH56G: + err = ice_ptp_prep_phy_adj_eth56g(hw, adj); + break; case ICE_PHY_E810: err = ice_ptp_prep_phy_adj_e810(hw, adj); break; @@ -3521,7 +5711,9 @@ int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj) */ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp) { - switch (hw->phy_model) { + switch (hw->ptp.phy_model) { + case ICE_PHY_ETH56G: + return ice_read_ptp_tstamp_eth56g(hw, block, idx, tstamp); case ICE_PHY_E810: return ice_read_phy_tstamp_e810(hw, block, idx, tstamp); case ICE_PHY_E82X: @@ -3549,7 +5741,9 @@ int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp) */ int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx) { - switch (hw->phy_model) { + switch (hw->ptp.phy_model) { + case ICE_PHY_ETH56G: + return ice_clear_ptp_tstamp_eth56g(hw, block, idx); case ICE_PHY_E810: return ice_clear_phy_tstamp_e810(hw, block, idx); case ICE_PHY_E82X: @@ -3610,7 +5804,10 @@ static int ice_get_pf_c827_idx(struct ice_hw *hw, u8 *idx) */ void ice_ptp_reset_ts_memory(struct ice_hw *hw) { - switch (hw->phy_model) { + switch (hw->ptp.phy_model) { + case ICE_PHY_ETH56G: + ice_ptp_reset_ts_memory_eth56g(hw); + break; case ICE_PHY_E82X: ice_ptp_reset_ts_memory_e82x(hw); break; @@ -3636,7 +5833,9 @@ int ice_ptp_init_phc(struct ice_hw *hw) /* Clear event err indications for auxiliary pins */ (void)rd32(hw, GLTSYN_STAT(src_idx)); - switch (hw->phy_model) { + switch (hw->ptp.phy_model) { + case ICE_PHY_ETH56G: + return ice_ptp_init_phc_eth56g(hw); case ICE_PHY_E810: return ice_ptp_init_phc_e810(hw); case ICE_PHY_E82X: @@ -3659,7 +5858,10 @@ int ice_ptp_init_phc(struct ice_hw *hw) */ int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready) { - switch (hw->phy_model) { + switch (hw->ptp.phy_model) { + case ICE_PHY_ETH56G: + return ice_get_phy_tx_tstamp_ready_eth56g(hw, block, + tstamp_ready); case ICE_PHY_E810: return ice_get_phy_tx_tstamp_ready_e810(hw, block, tstamp_ready); diff --git a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h index 1f3e03124430..0852a34ade91 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp_hw.h +++ b/drivers/net/ethernet/intel/ice/ice_ptp_hw.h @@ -41,6 +41,41 @@ enum ice_ptp_fec_mode { ICE_PTP_FEC_MODE_RS_FEC }; +enum eth56g_res_type { + ETH56G_PHY_REG_PTP, + ETH56G_PHY_MEM_PTP, + ETH56G_PHY_REG_XPCS, + ETH56G_PHY_REG_MAC, + ETH56G_PHY_REG_GPCS, + NUM_ETH56G_PHY_RES +}; + +enum ice_eth56g_link_spd { + ICE_ETH56G_LNK_SPD_1G, + ICE_ETH56G_LNK_SPD_2_5G, + ICE_ETH56G_LNK_SPD_10G, + ICE_ETH56G_LNK_SPD_25G, + ICE_ETH56G_LNK_SPD_40G, + ICE_ETH56G_LNK_SPD_50G, + ICE_ETH56G_LNK_SPD_50G2, + ICE_ETH56G_LNK_SPD_100G, + ICE_ETH56G_LNK_SPD_100G2, + NUM_ICE_ETH56G_LNK_SPD /* Must be last */ +}; + +/** + * struct ice_phy_reg_info_eth56g - ETH56G PHY register parameters + * @base: base address for each PHY block + * @step: step between PHY lanes + * + * Characteristic information for the various PHY register parameters in the + * ETH56G devices + */ +struct ice_phy_reg_info_eth56g { + u32 base[NUM_ETH56G_PHY_RES]; + u32 step; +}; + /** * struct ice_time_ref_info_e82x * @pll_freq: Frequency of PLL that drives timer ticks in Hz @@ -94,8 +129,75 @@ struct ice_vernier_info_e82x { u32 rx_fixed_delay; }; +#define ICE_ETH56G_MAC_CFG_RX_OFFSET_INT GENMASK(19, 9) +#define ICE_ETH56G_MAC_CFG_RX_OFFSET_FRAC GENMASK(8, 0) +#define ICE_ETH56G_MAC_CFG_FRAC_W 9 /** - * struct ice_cgu_pll_params_e82x + * struct ice_eth56g_mac_reg_cfg - MAC config values for specific PTP registers + * @tx_mode: Tx timestamp compensation mode + * @tx_mk_dly: Tx timestamp marker start strobe delay + * @tx_cw_dly: Tx timestamp codeword start strobe delay + * @rx_mode: Rx timestamp compensation mode + * @rx_mk_dly: Rx timestamp marker start strobe delay + * @rx_cw_dly: Rx timestamp codeword start strobe delay + * @blks_per_clk: number of blocks transferred per clock cycle + * @blktime: block time, fixed point + * @mktime: marker time, fixed point + * @tx_offset: total Tx offset, fixed point + * @rx_offset: total Rx offset, contains value for bitslip/deskew, fixed point + * + * All fixed point registers except Rx offset are 23 bit unsigned ints with + * a 9 bit fractional. + * Rx offset is 11 bit unsigned int with a 9 bit fractional. + */ +struct ice_eth56g_mac_reg_cfg { + struct { + u8 def; + u8 rs; + } tx_mode; + u8 tx_mk_dly; + struct { + u8 def; + u8 onestep; + } tx_cw_dly; + struct { + u8 def; + u8 rs; + } rx_mode; + struct { + u8 def; + u8 rs; + } rx_mk_dly; + struct { + u8 def; + u8 rs; + } rx_cw_dly; + u8 blks_per_clk; + u16 blktime; + u16 mktime; + struct { + u32 serdes; + u32 no_fec; + u32 fc; + u32 rs; + u32 sfd; + u32 onestep; + } tx_offset; + struct { + u32 serdes; + u32 no_fec; + u32 fc; + u32 rs; + u32 sfd; + u32 bs_ds; + } rx_offset; +}; + +extern +const struct ice_eth56g_mac_reg_cfg eth56g_mac_cfg[NUM_ICE_ETH56G_LNK_SPD]; + +/** + * struct ice_cgu_pll_params_e82x - E82X CGU parameters * @refclk_pre_div: Reference clock pre-divisor * @feedback_div: Feedback divisor * @frac_n_div: Fractional divisor @@ -185,9 +287,34 @@ struct ice_cgu_pin_desc { extern const struct ice_cgu_pll_params_e82x e822_cgu_params[NUM_ICE_TIME_REF_FREQ]; +/** + * struct ice_cgu_pll_params_e825c - E825C CGU parameters + * @tspll_ck_refclkfreq: tspll_ck_refclkfreq selection + * @tspll_ndivratio: ndiv ratio that goes directly to the pll + * @tspll_fbdiv_intgr: TS PLL integer feedback divide + * @tspll_fbdiv_frac: TS PLL fractional feedback divide + * @ref1588_ck_div: clock divider for tspll ref + * + * Clock Generation Unit parameters used to program the PLL based on the + * selected TIME_REF/TCXO frequency. + */ +struct ice_cgu_pll_params_e825c { + u32 tspll_ck_refclkfreq; + u32 tspll_ndivratio; + u32 tspll_fbdiv_intgr; + u32 tspll_fbdiv_frac; + u32 ref1588_ck_div; +}; + +extern const struct +ice_cgu_pll_params_e825c e825c_cgu_params[NUM_ICE_TIME_REF_FREQ]; + #define E810C_QSFP_C827_0_HANDLE 2 #define E810C_QSFP_C827_1_HANDLE 3 +/* Table of constants related to possible ETH56G PHY resources */ +extern const struct ice_phy_reg_info_eth56g eth56g_phy_res[NUM_ETH56G_PHY_RES]; + /* Table of constants related to possible TIME_REF sources */ extern const struct ice_time_ref_info_e82x e822_time_ref[NUM_ICE_TIME_REF_FREQ]; @@ -197,7 +324,9 @@ extern const struct ice_vernier_info_e82x e822_vernier[NUM_ICE_PTP_LNK_SPD]; /* Increment value to generate nanoseconds in the GLTSYN_TIME_L register for * the E810 devices. Based off of a PLL with an 812.5 MHz frequency. */ -#define ICE_PTP_NOMINAL_INCVAL_E810 0x13b13b13bULL +#define ICE_E810_PLL_FREQ 812500000 +#define ICE_PTP_NOMINAL_INCVAL_E810 0x13b13b13bULL +#define E810_OUT_PROP_DELAY_NS 1 /* Device agnostic functions */ u8 ice_get_ptp_src_clock_index(struct ice_hw *hw); @@ -208,11 +337,15 @@ int ice_ptp_init_time(struct ice_hw *hw, u64 time); int ice_ptp_write_incval(struct ice_hw *hw, u64 incval); int ice_ptp_write_incval_locked(struct ice_hw *hw, u64 incval); int ice_ptp_adj_clock(struct ice_hw *hw, s32 adj); +int ice_ptp_clear_phy_offset_ready_e82x(struct ice_hw *hw); int ice_read_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx, u64 *tstamp); int ice_clear_phy_tstamp(struct ice_hw *hw, u8 block, u8 idx); void ice_ptp_reset_ts_memory(struct ice_hw *hw); int ice_ptp_init_phc(struct ice_hw *hw); +void ice_ptp_init_hw(struct ice_hw *hw); int ice_get_phy_tx_tstamp_ready(struct ice_hw *hw, u8 block, u64 *tstamp_ready); +int ice_ptp_one_port_cmd(struct ice_hw *hw, u8 configured_port, + enum ice_ptp_tmr_cmd configured_cmd); /* E822 family functions */ int ice_read_quad_reg_e82x(struct ice_hw *hw, u8 quad, u16 offset, u32 *val); @@ -264,9 +397,9 @@ int ice_stop_phy_timer_e82x(struct ice_hw *hw, u8 port, bool soft_reset); int ice_start_phy_timer_e82x(struct ice_hw *hw, u8 port); int ice_phy_cfg_tx_offset_e82x(struct ice_hw *hw, u8 port); int ice_phy_cfg_rx_offset_e82x(struct ice_hw *hw, u8 port); +int ice_phy_cfg_intr_e82x(struct ice_hw *hw, u8 quad, bool ena, u8 threshold); /* E810 family functions */ -int ice_ptp_init_phy_e810(struct ice_hw *hw); int ice_read_sma_ctrl_e810t(struct ice_hw *hw, u8 *data); int ice_write_sma_ctrl_e810t(struct ice_hw *hw, u8 data); int ice_read_pca9575_reg_e810t(struct ice_hw *hw, u8 offset, u8 *data); @@ -280,11 +413,44 @@ int ice_get_cgu_state(struct ice_hw *hw, u8 dpll_idx, u8 *ref_state, u8 *eec_mode, s64 *phase_offset, enum dpll_lock_status *dpll_state); int ice_get_cgu_rclk_pin_info(struct ice_hw *hw, u8 *base_idx, u8 *pin_num); - -void ice_ptp_init_phy_model(struct ice_hw *hw); int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id, unsigned long *caps); +/* ETH56G family functions */ +int ice_ptp_read_tx_hwtstamp_status_eth56g(struct ice_hw *hw, u32 *ts_status); +int ice_stop_phy_timer_eth56g(struct ice_hw *hw, u8 port, bool soft_reset); +int ice_start_phy_timer_eth56g(struct ice_hw *hw, u8 port); +int ice_phy_cfg_tx_offset_eth56g(struct ice_hw *hw, u8 port); +int ice_phy_cfg_rx_offset_eth56g(struct ice_hw *hw, u8 port); +int ice_phy_cfg_intr_eth56g(struct ice_hw *hw, u8 port, bool ena, u8 threshold); +int ice_phy_cfg_ptp_1step_eth56g(struct ice_hw *hw, u8 port); + +#define ICE_ETH56G_NOMINAL_INCVAL 0x140000000ULL +#define ICE_ETH56G_NOMINAL_PCS_REF_TUS 0x100000000ULL +#define ICE_ETH56G_NOMINAL_PCS_REF_INC 0x300000000ULL +#define ICE_ETH56G_NOMINAL_THRESH4 0x7777 +#define ICE_ETH56G_NOMINAL_TX_THRESH 0x6 + +/** + * ice_get_base_incval - Get base clock increment value + * @hw: pointer to the HW struct + * + * Return: base clock increment value for supported PHYs, 0 otherwise + */ +static inline u64 ice_get_base_incval(struct ice_hw *hw) +{ + switch (hw->ptp.phy_model) { + case ICE_PHY_ETH56G: + return ICE_ETH56G_NOMINAL_INCVAL; + case ICE_PHY_E810: + return ICE_PTP_NOMINAL_INCVAL_E810; + case ICE_PHY_E82X: + return ice_e82x_nominal_incval(ice_e82x_time_ref(hw)); + default: + return 0; + } +} + #define PFTSYN_SEM_BYTES 4 #define ICE_PTP_CLOCK_INDEX_0 0x00 @@ -312,6 +478,7 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id, #define TS_CMD_MASK_E810 0xFF #define TS_CMD_MASK 0xF #define SYNC_EXEC_CMD 0x3 +#define TS_CMD_RX_TYPE ICE_M(0x18, 0x4) /* Macros to derive port low and high addresses on both quads */ #define P_Q0_L(a, p) ((((a) + (0x2000 * (p)))) & 0xFFFF) @@ -344,11 +511,8 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id, #define Q_REG_TX_MEM_GBL_CFG 0xC08 #define Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_S 0 #define Q_REG_TX_MEM_GBL_CFG_LANE_TYPE_M BIT(0) -#define Q_REG_TX_MEM_GBL_CFG_TX_TYPE_S 1 #define Q_REG_TX_MEM_GBL_CFG_TX_TYPE_M ICE_M(0xFF, 1) -#define Q_REG_TX_MEM_GBL_CFG_INTR_THR_S 9 #define Q_REG_TX_MEM_GBL_CFG_INTR_THR_M ICE_M(0x3F, 9) -#define Q_REG_TX_MEM_GBL_CFG_INTR_ENA_S 15 #define Q_REG_TX_MEM_GBL_CFG_INTR_ENA_M BIT(15) /* Tx Timestamp data registers */ @@ -380,7 +544,7 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id, #define P_REG_TIMETUS_L 0x410 #define P_REG_TIMETUS_U 0x414 -#define P_REG_40B_LOW_M 0xFF +#define P_REG_40B_LOW_M GENMASK(7, 0) #define P_REG_40B_HIGH_S 8 /* PHY window length registers */ @@ -487,7 +651,7 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id, #define ETH_GLTSYN_SHADJ_H(_i) (0x0300037C + ((_i) * 32)) /* E810 timer command register */ -#define ETH_GLTSYN_CMD 0x03000344 +#define E810_ETH_GLTSYN_CMD 0x03000344 /* Source timer incval macros */ #define INCVAL_HIGH_M 0xFF @@ -549,4 +713,115 @@ int ice_cgu_get_output_pin_state_caps(struct ice_hw *hw, u8 pin_id, /* E810T PCA9575 IO controller pin control */ #define ICE_E810T_P0_GNSS_PRSNT_N BIT(4) +/* ETH56G PHY register addresses */ +/* Timestamp PHY incval registers */ +#define PHY_REG_TIMETUS_L 0x8 +#define PHY_REG_TIMETUS_U 0xC + +/* Timestamp PCS registers */ +#define PHY_PCS_REF_TUS_L 0x18 +#define PHY_PCS_REF_TUS_U 0x1C + +/* Timestamp PCS ref incval registers */ +#define PHY_PCS_REF_INC_L 0x20 +#define PHY_PCS_REF_INC_U 0x24 + +/* Timestamp init registers */ +#define PHY_REG_RX_TIMER_INC_PRE_L 0x64 +#define PHY_REG_RX_TIMER_INC_PRE_U 0x68 +#define PHY_REG_TX_TIMER_INC_PRE_L 0x44 +#define PHY_REG_TX_TIMER_INC_PRE_U 0x48 + +/* Timestamp match and adjust target registers */ +#define PHY_REG_RX_TIMER_CNT_ADJ_L 0x6C +#define PHY_REG_RX_TIMER_CNT_ADJ_U 0x70 +#define PHY_REG_TX_TIMER_CNT_ADJ_L 0x4C +#define PHY_REG_TX_TIMER_CNT_ADJ_U 0x50 + +/* Timestamp command registers */ +#define PHY_REG_TX_TMR_CMD 0x40 +#define PHY_REG_RX_TMR_CMD 0x60 + +/* Phy offset ready registers */ +#define PHY_REG_TX_OFFSET_READY 0x54 +#define PHY_REG_RX_OFFSET_READY 0x74 + +/* Phy total offset registers */ +#define PHY_REG_TOTAL_TX_OFFSET_L 0x38 +#define PHY_REG_TOTAL_TX_OFFSET_U 0x3C +#define PHY_REG_TOTAL_RX_OFFSET_L 0x58 +#define PHY_REG_TOTAL_RX_OFFSET_U 0x5C + +/* Timestamp capture registers */ +#define PHY_REG_TX_CAPTURE_L 0x78 +#define PHY_REG_TX_CAPTURE_U 0x7C +#define PHY_REG_RX_CAPTURE_L 0x8C +#define PHY_REG_RX_CAPTURE_U 0x90 + +/* Memory status registers */ +#define PHY_REG_TX_MEMORY_STATUS_L 0x80 +#define PHY_REG_TX_MEMORY_STATUS_U 0x84 + +/* Interrupt config register */ +#define PHY_REG_TS_INT_CONFIG 0x88 + +/* XIF mode config register */ +#define PHY_MAC_XIF_MODE 0x24 +#define PHY_MAC_XIF_1STEP_ENA_M ICE_M(0x1, 5) +#define PHY_MAC_XIF_TS_BIN_MODE_M ICE_M(0x1, 11) +#define PHY_MAC_XIF_TS_SFD_ENA_M ICE_M(0x1, 20) +#define PHY_MAC_XIF_GMII_TS_SEL_M ICE_M(0x1, 21) + +/* GPCS config register */ +#define PHY_GPCS_CONFIG_REG0 0x268 +#define PHY_GPCS_CONFIG_REG0_TX_THR_M ICE_M(0xF, 24) +#define PHY_GPCS_BITSLIP 0x5C + +#define PHY_TS_INT_CONFIG_THRESHOLD_M ICE_M(0x3F, 0) +#define PHY_TS_INT_CONFIG_ENA_M BIT(6) + +/* 1-step PTP config */ +#define PHY_PTP_1STEP_CONFIG 0x270 +#define PHY_PTP_1STEP_T1S_UP64_M ICE_M(0xF, 4) +#define PHY_PTP_1STEP_T1S_DELTA_M ICE_M(0xF, 8) +#define PHY_PTP_1STEP_PEER_DELAY(_port) (0x274 + 4 * (_port)) +#define PHY_PTP_1STEP_PD_ADD_PD_M ICE_M(0x1, 0) +#define PHY_PTP_1STEP_PD_DELAY_M ICE_M(0x3fffffff, 1) +#define PHY_PTP_1STEP_PD_DLY_V_M ICE_M(0x1, 31) + +/* Macros to derive offsets for TimeStampLow and TimeStampHigh */ +#define PHY_TSTAMP_L(x) (((x) * 8) + 0) +#define PHY_TSTAMP_U(x) (((x) * 8) + 4) + +#define PHY_REG_REVISION 0x85000 + +#define PHY_REG_DESKEW_0 0x94 +#define PHY_REG_DESKEW_0_RLEVEL GENMASK(6, 0) +#define PHY_REG_DESKEW_0_RLEVEL_FRAC GENMASK(9, 7) +#define PHY_REG_DESKEW_0_RLEVEL_FRAC_W 3 +#define PHY_REG_DESKEW_0_VALID GENMASK(10, 10) + +#define PHY_REG_GPCS_BITSLIP 0x5C +#define PHY_REG_SD_BIT_SLIP(_port_offset) (0x29C + 4 * (_port_offset)) +#define PHY_REVISION_ETH56G 0x10200 +#define PHY_VENDOR_TXLANE_THRESH 0x2000C + +#define PHY_MAC_TSU_CONFIG 0x40 +#define PHY_MAC_TSU_CFG_RX_MODE_M ICE_M(0x7, 0) +#define PHY_MAC_TSU_CFG_RX_MII_CW_DLY_M ICE_M(0x7, 4) +#define PHY_MAC_TSU_CFG_RX_MII_MK_DLY_M ICE_M(0x7, 8) +#define PHY_MAC_TSU_CFG_TX_MODE_M ICE_M(0x7, 12) +#define PHY_MAC_TSU_CFG_TX_MII_CW_DLY_M ICE_M(0x1F, 16) +#define PHY_MAC_TSU_CFG_TX_MII_MK_DLY_M ICE_M(0x1F, 21) +#define PHY_MAC_TSU_CFG_BLKS_PER_CLK_M ICE_M(0x1, 28) +#define PHY_MAC_RX_MODULO 0x44 +#define PHY_MAC_RX_OFFSET 0x48 +#define PHY_MAC_RX_OFFSET_M ICE_M(0xFFFFFF, 0) +#define PHY_MAC_TX_MODULO 0x4C +#define PHY_MAC_BLOCKTIME 0x50 +#define PHY_MAC_MARKERTIME 0x54 +#define PHY_MAC_TX_OFFSET 0x58 + +#define PHY_PTP_INT_STATUS 0x7FD140 + #endif /* _ICE_PTP_HW_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_repr.c b/drivers/net/ethernet/intel/ice/ice_repr.c index d367f4c66dcd..bdda3401e343 100644 --- a/drivers/net/ethernet/intel/ice/ice_repr.c +++ b/drivers/net/ethernet/intel/ice/ice_repr.c @@ -285,9 +285,7 @@ ice_repr_reg_netdev(struct net_device *netdev) static void ice_repr_remove_node(struct devlink_port *devlink_port) { - devl_lock(devlink_port->devlink); devl_rate_leaf_destroy(devlink_port); - devl_unlock(devlink_port->devlink); } /** @@ -308,6 +306,7 @@ static void ice_repr_rem(struct ice_repr *repr) void ice_repr_rem_vf(struct ice_repr *repr) { ice_repr_remove_node(&repr->vf->devlink_port); + ice_eswitch_decfg_vsi(repr->src_vsi, repr->parent_mac); unregister_netdev(repr->netdev); ice_devlink_destroy_vf_port(repr->vf); ice_virtchnl_set_dflt_ops(repr->vf); @@ -403,11 +402,17 @@ struct ice_repr *ice_repr_add_vf(struct ice_vf *vf) if (err) goto err_netdev; + err = ice_eswitch_cfg_vsi(repr->src_vsi, repr->parent_mac); + if (err) + goto err_cfg_vsi; + ice_virtchnl_set_repr_ops(vf); ice_repr_set_tx_topology(vf->pf); return repr; +err_cfg_vsi: + unregister_netdev(repr->netdev); err_netdev: ice_repr_rem(repr); err_repr_add: @@ -415,12 +420,9 @@ err_repr_add: return ERR_PTR(err); } -struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi) +struct ice_repr *ice_repr_get(struct ice_pf *pf, u32 id) { - if (!vsi->vf) - return NULL; - - return xa_load(&vsi->back->eswitch.reprs, vsi->vf->repr_id); + return xa_load(&pf->eswitch.reprs, id); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_repr.h b/drivers/net/ethernet/intel/ice/ice_repr.h index cff730b15ca0..488661b2900b 100644 --- a/drivers/net/ethernet/intel/ice/ice_repr.h +++ b/drivers/net/ethernet/intel/ice/ice_repr.h @@ -35,9 +35,8 @@ void ice_repr_stop_tx_queues(struct ice_repr *repr); struct ice_repr *ice_netdev_to_repr(const struct net_device *netdev); bool ice_is_port_repr_netdev(const struct net_device *netdev); -struct ice_repr *ice_repr_get_by_vsi(struct ice_vsi *vsi); - void ice_repr_inc_tx_stats(struct ice_repr *repr, unsigned int len, int xmit_status); void ice_repr_inc_rx_stats(struct net_device *netdev, unsigned int len); +struct ice_repr *ice_repr_get(struct ice_pf *pf, u32 id); #endif diff --git a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h index ead75fe2bcda..3b0054faf70c 100644 --- a/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_sbq_cmd.h @@ -47,10 +47,12 @@ struct ice_sbq_evt_desc { }; enum ice_sbq_msg_dev { - rmn_0 = 0x02, - rmn_1 = 0x03, - rmn_2 = 0x04, - cgu = 0x06 + eth56g_phy_0 = 0x02, + rmn_0 = 0x02, + rmn_1 = 0x03, + rmn_2 = 0x04, + cgu = 0x06, + eth56g_phy_1 = 0x0D, }; enum ice_sbq_msg_opcode { diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index 067712f4923f..55ef33208456 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -1416,21 +1416,23 @@ out_put_vf: } /** - * ice_set_vf_mac - * @netdev: network interface device structure + * __ice_set_vf_mac - program VF MAC address + * @pf: PF to be configure * @vf_id: VF identifier * @mac: MAC address * * program VF MAC address + * Return: zero on success or an error code on failure */ -int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) +int __ice_set_vf_mac(struct ice_pf *pf, u16 vf_id, const u8 *mac) { - struct ice_pf *pf = ice_netdev_to_pf(netdev); + struct device *dev; struct ice_vf *vf; int ret; + dev = ice_pf_to_dev(pf); if (is_multicast_ether_addr(mac)) { - netdev_err(netdev, "%pM not a valid unicast address\n", mac); + dev_err(dev, "%pM not a valid unicast address\n", mac); return -EINVAL; } @@ -1459,13 +1461,13 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) if (is_zero_ether_addr(mac)) { /* VF will send VIRTCHNL_OP_ADD_ETH_ADDR message with its MAC */ vf->pf_set_mac = false; - netdev_info(netdev, "Removing MAC on VF %d. VF driver will be reinitialized\n", - vf->vf_id); + dev_info(dev, "Removing MAC on VF %d. VF driver will be reinitialized\n", + vf->vf_id); } else { /* PF will add MAC rule for the VF */ vf->pf_set_mac = true; - netdev_info(netdev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n", - mac, vf_id); + dev_info(dev, "Setting MAC %pM on VF %d. VF driver will be reinitialized\n", + mac, vf_id); } ice_reset_vf(vf, ICE_VF_RESET_NOTIFY); @@ -1476,6 +1478,20 @@ out_put_vf: return ret; } +/** + * ice_set_vf_mac - .ndo_set_vf_mac handler + * @netdev: network interface device structure + * @vf_id: VF identifier + * @mac: MAC address + * + * program VF MAC address + * Return: zero on success or an error code on failure + */ +int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) +{ + return __ice_set_vf_mac(ice_netdev_to_pf(netdev), vf_id, mac); +} + /** * ice_set_vf_trust * @netdev: network interface device structure diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.h b/drivers/net/ethernet/intel/ice/ice_sriov.h index 8f22313474d6..96549ca5c52c 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.h +++ b/drivers/net/ethernet/intel/ice/ice_sriov.h @@ -28,6 +28,7 @@ #ifdef CONFIG_PCI_IOV void ice_process_vflr_event(struct ice_pf *pf); int ice_sriov_configure(struct pci_dev *pdev, int num_vfs); +int __ice_set_vf_mac(struct ice_pf *pf, u16 vf_id, const u8 *mac); int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac); int ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi); @@ -80,6 +81,13 @@ ice_sriov_configure(struct pci_dev __always_unused *pdev, return -EOPNOTSUPP; } +static inline int +__ice_set_vf_mac(struct ice_pf __always_unused *pf, + u16 __always_unused vf_id, const u8 __always_unused *mac) +{ + return -EOPNOTSUPP; +} + static inline int ice_set_vf_mac(struct net_device __always_unused *netdev, int __always_unused vf_id, u8 __always_unused *mac) diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 1191031b2a43..3caafcdc301f 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -3,6 +3,7 @@ #include "ice_lib.h" #include "ice_switch.h" +#include "ice_trace.h" #define ICE_ETH_DA_OFFSET 0 #define ICE_ETH_ETHTYPE_OFFSET 12 @@ -1471,7 +1472,6 @@ int ice_init_def_sw_recp(struct ice_hw *hw) recps[i].root_rid = i; INIT_LIST_HEAD(&recps[i].filt_rules); INIT_LIST_HEAD(&recps[i].filt_replay_rules); - INIT_LIST_HEAD(&recps[i].rg_list); mutex_init(&recps[i].filt_rule_lock); } @@ -1962,6 +1962,15 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz, hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT) status = -ENOENT; + if (!status) { + if (opc == ice_aqc_opc_add_sw_rules) + hw->switch_info->rule_cnt += num_rules; + else if (opc == ice_aqc_opc_remove_sw_rules) + hw->switch_info->rule_cnt -= num_rules; + } + + trace_ice_aq_sw_rules(hw->switch_info); + return status; } @@ -2182,8 +2191,10 @@ int ice_alloc_recipe(struct ice_hw *hw, u16 *rid) sw_buf->res_type = cpu_to_le16(res_type); status = ice_aq_alloc_free_res(hw, sw_buf, buf_len, ice_aqc_opc_alloc_res); - if (!status) + if (!status) { *rid = le16_to_cpu(sw_buf->elem[0].e.sw_resp); + hw->switch_info->recp_cnt++; + } return status; } @@ -2197,7 +2208,13 @@ int ice_alloc_recipe(struct ice_hw *hw, u16 *rid) */ static int ice_free_recipe_res(struct ice_hw *hw, u16 rid) { - return ice_free_hw_res(hw, ICE_AQC_RES_TYPE_RECIPE, 1, &rid); + int status; + + status = ice_free_hw_res(hw, ICE_AQC_RES_TYPE_RECIPE, 1, &rid); + if (!status) + hw->switch_info->recp_cnt--; + + return status; } /** @@ -2281,20 +2298,6 @@ static void ice_get_recp_to_prof_map(struct ice_hw *hw) } } -/** - * ice_collect_result_idx - copy result index values - * @buf: buffer that contains the result index - * @recp: the recipe struct to copy data into - */ -static void -ice_collect_result_idx(struct ice_aqc_recipe_data_elem *buf, - struct ice_sw_recipe *recp) -{ - if (buf->content.result_indx & ICE_AQ_RECIPE_RESULT_EN) - set_bit(buf->content.result_indx & ~ICE_AQ_RECIPE_RESULT_EN, - recp->res_idxs); -} - /** * ice_get_recp_frm_fw - update SW bookkeeping from FW recipe entries * @hw: pointer to hardware structure @@ -2353,18 +2356,10 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, for (sub_recps = 0; sub_recps < num_recps; sub_recps++) { struct ice_aqc_recipe_data_elem root_bufs = tmp[sub_recps]; - struct ice_recp_grp_entry *rg_entry; u8 i, prof, idx, prot = 0; bool is_root; u16 off = 0; - rg_entry = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*rg_entry), - GFP_KERNEL); - if (!rg_entry) { - status = -ENOMEM; - goto err_unroll; - } - idx = root_bufs.recipe_indx; is_root = root_bufs.content.rid & ICE_AQ_RECIPE_ID_IS_ROOT; @@ -2377,11 +2372,8 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, prof = find_first_bit(recipe_to_profile[idx], ICE_MAX_NUM_PROFILES); for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) { - u8 lkup_indx = root_bufs.content.lkup_indx[i + 1]; - - rg_entry->fv_idx[i] = lkup_indx; - rg_entry->fv_mask[i] = - le16_to_cpu(root_bufs.content.mask[i + 1]); + u8 lkup_indx = root_bufs.content.lkup_indx[i]; + u16 lkup_mask = le16_to_cpu(root_bufs.content.mask[i]); /* If the recipe is a chained recipe then all its * child recipe's result will have a result index. @@ -2392,26 +2384,21 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, * has ICE_AQ_RECIPE_LKUP_IGNORE or 0 since it isn't a * valid offset value. */ - if (test_bit(rg_entry->fv_idx[i], hw->switch_info->prof_res_bm[prof]) || - rg_entry->fv_idx[i] & ICE_AQ_RECIPE_LKUP_IGNORE || - rg_entry->fv_idx[i] == 0) + if (!lkup_indx || + (lkup_indx & ICE_AQ_RECIPE_LKUP_IGNORE) || + test_bit(lkup_indx, + hw->switch_info->prof_res_bm[prof])) continue; - ice_find_prot_off(hw, ICE_BLK_SW, prof, - rg_entry->fv_idx[i], &prot, &off); + ice_find_prot_off(hw, ICE_BLK_SW, prof, lkup_indx, + &prot, &off); lkup_exts->fv_words[fv_word_idx].prot_id = prot; lkup_exts->fv_words[fv_word_idx].off = off; - lkup_exts->field_mask[fv_word_idx] = - rg_entry->fv_mask[i]; + lkup_exts->field_mask[fv_word_idx] = lkup_mask; fv_word_idx++; } - /* populate rg_list with the data from the child entry of this - * recipe - */ - list_add(&rg_entry->l_entry, &recps[rid].rg_list); /* Propagate some data to the recipe database */ - recps[idx].is_root = !!is_root; recps[idx].priority = root_bufs.content.act_ctrl_fwd_priority; recps[idx].need_pass_l2 = root_bufs.content.act_ctrl & ICE_AQ_RECIPE_ACT_NEED_PASS_L2; @@ -2419,11 +2406,8 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2; bitmap_zero(recps[idx].res_idxs, ICE_MAX_FV_WORDS); if (root_bufs.content.result_indx & ICE_AQ_RECIPE_RESULT_EN) { - recps[idx].chain_idx = root_bufs.content.result_indx & - ~ICE_AQ_RECIPE_RESULT_EN; - set_bit(recps[idx].chain_idx, recps[idx].res_idxs); - } else { - recps[idx].chain_idx = ICE_INVAL_CHAIN_IND; + set_bit(root_bufs.content.result_indx & + ~ICE_AQ_RECIPE_RESULT_EN, recps[idx].res_idxs); } if (!is_root) { @@ -2443,15 +2427,6 @@ ice_get_recp_frm_fw(struct ice_hw *hw, struct ice_sw_recipe *recps, u8 rid, /* Complete initialization of the root recipe entry */ lkup_exts->n_val_words = fv_word_idx; - recps[rid].big_recp = (num_recps > 1); - recps[rid].n_grp_count = (u8)num_recps; - recps[rid].root_buf = devm_kmemdup(ice_hw_to_dev(hw), tmp, - recps[rid].n_grp_count * sizeof(*recps[rid].root_buf), - GFP_KERNEL); - if (!recps[rid].root_buf) { - status = -ENOMEM; - goto err_unroll; - } /* Copy result indexes */ bitmap_copy(recps[rid].res_idxs, result_bm, ICE_MAX_FV_WORDS); @@ -4768,11 +4743,6 @@ ice_find_recp(struct ice_hw *hw, struct ice_prot_lkup_ext *lkup_exts, continue; } - /* Skip inverse action recipes */ - if (recp[i].root_buf && recp[i].root_buf->content.act_ctrl & - ICE_AQ_RECIPE_ACT_INV_ACT) - continue; - /* if number of words we are looking for match */ if (lkup_exts->n_val_words == recp[i].lkup_exts.n_val_words) { struct ice_fv_word *ar = recp[i].lkup_exts.fv_words; @@ -4896,111 +4866,56 @@ ice_fill_valid_words(struct ice_adv_lkup_elem *rule, return ret_val; } -/** - * ice_create_first_fit_recp_def - Create a recipe grouping - * @hw: pointer to the hardware structure - * @lkup_exts: an array of protocol header extractions - * @rg_list: pointer to a list that stores new recipe groups - * @recp_cnt: pointer to a variable that stores returned number of recipe groups - * - * Using first fit algorithm, take all the words that are still not done - * and start grouping them in 4-word groups. Each group makes up one - * recipe. - */ -static int -ice_create_first_fit_recp_def(struct ice_hw *hw, - struct ice_prot_lkup_ext *lkup_exts, - struct list_head *rg_list, - u8 *recp_cnt) -{ - struct ice_pref_recipe_group *grp = NULL; - u8 j; - - *recp_cnt = 0; - - /* Walk through every word in the rule to check if it is not done. If so - * then this word needs to be part of a new recipe. - */ - for (j = 0; j < lkup_exts->n_val_words; j++) - if (!test_bit(j, lkup_exts->done)) { - if (!grp || - grp->n_val_pairs == ICE_NUM_WORDS_RECIPE) { - struct ice_recp_grp_entry *entry; - - entry = devm_kzalloc(ice_hw_to_dev(hw), - sizeof(*entry), - GFP_KERNEL); - if (!entry) - return -ENOMEM; - list_add(&entry->l_entry, rg_list); - grp = &entry->r_group; - (*recp_cnt)++; - } - - grp->pairs[grp->n_val_pairs].prot_id = - lkup_exts->fv_words[j].prot_id; - grp->pairs[grp->n_val_pairs].off = - lkup_exts->fv_words[j].off; - grp->mask[grp->n_val_pairs] = lkup_exts->field_mask[j]; - grp->n_val_pairs++; - } - - return 0; -} - /** * ice_fill_fv_word_index - fill in the field vector indices for a recipe group * @hw: pointer to the hardware structure - * @fv_list: field vector with the extraction sequence information - * @rg_list: recipe groupings with protocol-offset pairs + * @rm: recipe management list entry * * Helper function to fill in the field vector indices for protocol-offset * pairs. These indexes are then ultimately programmed into a recipe. */ static int -ice_fill_fv_word_index(struct ice_hw *hw, struct list_head *fv_list, - struct list_head *rg_list) +ice_fill_fv_word_index(struct ice_hw *hw, struct ice_sw_recipe *rm) { struct ice_sw_fv_list_entry *fv; - struct ice_recp_grp_entry *rg; struct ice_fv_word *fv_ext; + u8 i; - if (list_empty(fv_list)) - return 0; + if (list_empty(&rm->fv_list)) + return -EINVAL; - fv = list_first_entry(fv_list, struct ice_sw_fv_list_entry, + fv = list_first_entry(&rm->fv_list, struct ice_sw_fv_list_entry, list_entry); fv_ext = fv->fv_ptr->ew; - list_for_each_entry(rg, rg_list, l_entry) { - u8 i; + /* Add switch id as the first word. */ + rm->fv_idx[0] = ICE_AQ_SW_ID_LKUP_IDX; + rm->fv_mask[0] = ICE_AQ_SW_ID_LKUP_MASK; + rm->n_ext_words++; - for (i = 0; i < rg->r_group.n_val_pairs; i++) { - struct ice_fv_word *pr; - bool found = false; - u16 mask; - u8 j; + for (i = 1; i < rm->n_ext_words; i++) { + struct ice_fv_word *fv_word = &rm->ext_words[i - 1]; + u16 fv_mask = rm->word_masks[i - 1]; + bool found = false; + u8 j; - pr = &rg->r_group.pairs[i]; - mask = rg->r_group.mask[i]; + for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) { + if (fv_ext[j].prot_id == fv_word->prot_id && + fv_ext[j].off == fv_word->off) { + found = true; - for (j = 0; j < hw->blk[ICE_BLK_SW].es.fvw; j++) - if (fv_ext[j].prot_id == pr->prot_id && - fv_ext[j].off == pr->off) { - found = true; - - /* Store index of field vector */ - rg->fv_idx[i] = j; - rg->fv_mask[i] = mask; - break; - } - - /* Protocol/offset could not be found, caller gave an - * invalid pair - */ - if (!found) - return -EINVAL; + /* Store index of field vector */ + rm->fv_idx[i] = j; + rm->fv_mask[i] = fv_mask; + break; + } } + + /* Protocol/offset could not be found, caller gave an invalid + * pair. + */ + if (!found) + return -EINVAL; } return 0; @@ -5073,6 +4988,73 @@ ice_find_free_recp_res_idx(struct ice_hw *hw, const unsigned long *profiles, return (u16)bitmap_weight(free_idx, ICE_MAX_FV_WORDS); } +/** + * ice_calc_recp_cnt - calculate number of recipes based on word count + * @word_cnt: number of lookup words + * + * Word count should include switch ID word and regular lookup words. + * Returns: number of recipes required to fit @word_cnt, including extra recipes + * needed for recipe chaining (if needed). + */ +static int ice_calc_recp_cnt(u8 word_cnt) +{ + /* All words fit in a single recipe, no need for chaining. */ + if (word_cnt <= ICE_NUM_WORDS_RECIPE) + return 1; + + /* Recipe chaining required. Result indexes are fitted right after + * regular lookup words. In some cases a new recipe must be added in + * order to fit result indexes. + * + * While the word count increases, every 5 words an extra recipe needs + * to be added. However, by adding a recipe, one word for its result + * index must also be added, therefore every 4 words recipe count + * increases by 1. This calculation does not apply to word count == 1, + * which is handled above. + */ + return (word_cnt + 2) / (ICE_NUM_WORDS_RECIPE - 1); +} + +static void fill_recipe_template(struct ice_aqc_recipe_data_elem *recp, u16 rid, + const struct ice_sw_recipe *rm) +{ + int i; + + recp->recipe_indx = rid; + recp->content.act_ctrl |= ICE_AQ_RECIPE_ACT_PRUNE_INDX_M; + + for (i = 0; i < ICE_NUM_WORDS_RECIPE; i++) { + recp->content.lkup_indx[i] = ICE_AQ_RECIPE_LKUP_IGNORE; + recp->content.mask[i] = cpu_to_le16(0); + } + + set_bit(rid, (unsigned long *)recp->recipe_bitmap); + recp->content.act_ctrl_fwd_priority = rm->priority; + + if (rm->need_pass_l2) + recp->content.act_ctrl |= ICE_AQ_RECIPE_ACT_NEED_PASS_L2; + + if (rm->allow_pass_l2) + recp->content.act_ctrl |= ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2; +} + +static void bookkeep_recipe(struct ice_sw_recipe *recipe, + struct ice_aqc_recipe_data_elem *r, + const struct ice_sw_recipe *rm) +{ + memcpy(recipe->r_bitmap, r->recipe_bitmap, sizeof(recipe->r_bitmap)); + + recipe->priority = r->content.act_ctrl_fwd_priority; + recipe->tun_type = rm->tun_type; + recipe->need_pass_l2 = rm->need_pass_l2; + recipe->allow_pass_l2 = rm->allow_pass_l2; + recipe->recp_created = true; +} + +/* For memcpy in ice_add_sw_recipe. */ +static_assert(sizeof_field(struct ice_aqc_recipe_data_elem, recipe_bitmap) == + sizeof_field(struct ice_sw_recipe, r_bitmap)); + /** * ice_add_sw_recipe - function to call AQ calls to create switch recipe * @hw: pointer to hardware structure @@ -5083,326 +5065,147 @@ static int ice_add_sw_recipe(struct ice_hw *hw, struct ice_sw_recipe *rm, unsigned long *profiles) { + struct ice_aqc_recipe_data_elem *buf __free(kfree) = NULL; DECLARE_BITMAP(result_idx_bm, ICE_MAX_FV_WORDS); - struct ice_aqc_recipe_content *content; - struct ice_aqc_recipe_data_elem *tmp; - struct ice_aqc_recipe_data_elem *buf; - struct ice_recp_grp_entry *entry; - u16 free_res_idx; - u16 recipe_count; - u8 chain_idx; - u8 recps = 0; + struct ice_aqc_recipe_data_elem *root; + struct ice_sw_recipe *recipe; + u16 free_res_idx, rid; + int lookup = 0; + int recp_cnt; int status; + int word; + int i; + + recp_cnt = ice_calc_recp_cnt(rm->n_ext_words); - /* When more than one recipe are required, another recipe is needed to - * chain them together. Matching a tunnel metadata ID takes up one of - * the match fields in the chaining recipe reducing the number of - * chained recipes by one. - */ - /* check number of free result indices */ bitmap_zero(result_idx_bm, ICE_MAX_FV_WORDS); + bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES); + + /* Check number of free result indices */ free_res_idx = ice_find_free_recp_res_idx(hw, profiles, result_idx_bm); ice_debug(hw, ICE_DBG_SW, "Result idx slots: %d, need %d\n", - free_res_idx, rm->n_grp_count); + free_res_idx, recp_cnt); - if (rm->n_grp_count > 1) { - if (rm->n_grp_count > free_res_idx) - return -ENOSPC; - - rm->n_grp_count++; - } - - if (rm->n_grp_count > ICE_MAX_CHAIN_RECIPE) + /* Last recipe doesn't need result index */ + if (recp_cnt - 1 > free_res_idx) return -ENOSPC; - tmp = kcalloc(ICE_MAX_NUM_RECIPES, sizeof(*tmp), GFP_KERNEL); - if (!tmp) + if (recp_cnt > ICE_MAX_CHAIN_RECIPE_RES) + return -E2BIG; + + buf = kcalloc(recp_cnt, sizeof(*buf), GFP_KERNEL); + if (!buf) return -ENOMEM; - buf = devm_kcalloc(ice_hw_to_dev(hw), rm->n_grp_count, sizeof(*buf), - GFP_KERNEL); - if (!buf) { - status = -ENOMEM; - goto err_mem; - } - - bitmap_zero(rm->r_bitmap, ICE_MAX_NUM_RECIPES); - recipe_count = ICE_MAX_NUM_RECIPES; - status = ice_aq_get_recipe(hw, tmp, &recipe_count, ICE_SW_LKUP_MAC, - NULL); - if (status || recipe_count == 0) - goto err_unroll; - - /* Allocate the recipe resources, and configure them according to the - * match fields from protocol headers and extracted field vectors. + /* Setup the non-root subrecipes. These do not contain lookups for other + * subrecipes results. Set associated recipe only to own recipe index. + * Each non-root subrecipe needs a free result index from FV. + * + * Note: only done if there is more than one recipe. */ - chain_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS); - list_for_each_entry(entry, &rm->rg_list, l_entry) { - u8 i; + for (i = 0; i < recp_cnt - 1; i++) { + struct ice_aqc_recipe_content *content; + u8 result_idx; - status = ice_alloc_recipe(hw, &entry->rid); - if (status) - goto err_unroll; - - content = &buf[recps].content; - - /* Clear the result index of the located recipe, as this will be - * updated, if needed, later in the recipe creation process. - */ - tmp[0].content.result_indx = 0; - - buf[recps] = tmp[0]; - buf[recps].recipe_indx = (u8)entry->rid; - /* if the recipe is a non-root recipe RID should be programmed - * as 0 for the rules to be applied correctly. - */ - content->rid = 0; - memset(&content->lkup_indx, 0, - sizeof(content->lkup_indx)); - - /* All recipes use look-up index 0 to match switch ID. */ - content->lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX; - content->mask[0] = cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK); - /* Setup lkup_indx 1..4 to INVALID/ignore and set the mask - * to be 0 - */ - for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) { - content->lkup_indx[i] = 0x80; - content->mask[i] = 0; - } - - for (i = 0; i < entry->r_group.n_val_pairs; i++) { - content->lkup_indx[i + 1] = entry->fv_idx[i]; - content->mask[i + 1] = cpu_to_le16(entry->fv_mask[i]); - } - - if (rm->n_grp_count > 1) { - /* Checks to see if there really is a valid result index - * that can be used. - */ - if (chain_idx >= ICE_MAX_FV_WORDS) { - ice_debug(hw, ICE_DBG_SW, "No chain index available\n"); - status = -ENOSPC; - goto err_unroll; - } - - entry->chain_idx = chain_idx; - content->result_indx = - ICE_AQ_RECIPE_RESULT_EN | - FIELD_PREP(ICE_AQ_RECIPE_RESULT_DATA_M, - chain_idx); - clear_bit(chain_idx, result_idx_bm); - chain_idx = find_first_bit(result_idx_bm, - ICE_MAX_FV_WORDS); - } - - /* fill recipe dependencies */ - bitmap_zero((unsigned long *)buf[recps].recipe_bitmap, - ICE_MAX_NUM_RECIPES); - set_bit(buf[recps].recipe_indx, - (unsigned long *)buf[recps].recipe_bitmap); - content->act_ctrl_fwd_priority = rm->priority; - - if (rm->need_pass_l2) - content->act_ctrl |= ICE_AQ_RECIPE_ACT_NEED_PASS_L2; - - if (rm->allow_pass_l2) - content->act_ctrl |= ICE_AQ_RECIPE_ACT_ALLOW_PASS_L2; - recps++; - } - - if (rm->n_grp_count == 1) { - rm->root_rid = buf[0].recipe_indx; - set_bit(buf[0].recipe_indx, rm->r_bitmap); - buf[0].content.rid = rm->root_rid | ICE_AQ_RECIPE_ID_IS_ROOT; - if (sizeof(buf[0].recipe_bitmap) >= sizeof(rm->r_bitmap)) { - memcpy(buf[0].recipe_bitmap, rm->r_bitmap, - sizeof(buf[0].recipe_bitmap)); - } else { - status = -EINVAL; - goto err_unroll; - } - /* Applicable only for ROOT_RECIPE, set the fwd_priority for - * the recipe which is getting created if specified - * by user. Usually any advanced switch filter, which results - * into new extraction sequence, ended up creating a new recipe - * of type ROOT and usually recipes are associated with profiles - * Switch rule referreing newly created recipe, needs to have - * either/or 'fwd' or 'join' priority, otherwise switch rule - * evaluation will not happen correctly. In other words, if - * switch rule to be evaluated on priority basis, then recipe - * needs to have priority, otherwise it will be evaluated last. - */ - buf[0].content.act_ctrl_fwd_priority = rm->priority; - } else { - struct ice_recp_grp_entry *last_chain_entry; - u16 rid, i; - - /* Allocate the last recipe that will chain the outcomes of the - * other recipes together - */ status = ice_alloc_recipe(hw, &rid); if (status) - goto err_unroll; + return status; - content = &buf[recps].content; + fill_recipe_template(&buf[i], rid, rm); - buf[recps].recipe_indx = (u8)rid; - content->rid = (u8)rid; - content->rid |= ICE_AQ_RECIPE_ID_IS_ROOT; - /* the new entry created should also be part of rg_list to - * make sure we have complete recipe + result_idx = find_first_bit(result_idx_bm, ICE_MAX_FV_WORDS); + /* Check if there really is a valid result index that can be + * used. */ - last_chain_entry = devm_kzalloc(ice_hw_to_dev(hw), - sizeof(*last_chain_entry), - GFP_KERNEL); - if (!last_chain_entry) { - status = -ENOMEM; - goto err_unroll; - } - last_chain_entry->rid = rid; - memset(&content->lkup_indx, 0, sizeof(content->lkup_indx)); - /* All recipes use look-up index 0 to match switch ID. */ - content->lkup_indx[0] = ICE_AQ_SW_ID_LKUP_IDX; - content->mask[0] = cpu_to_le16(ICE_AQ_SW_ID_LKUP_MASK); - for (i = 1; i <= ICE_NUM_WORDS_RECIPE; i++) { - content->lkup_indx[i] = ICE_AQ_RECIPE_LKUP_IGNORE; - content->mask[i] = 0; + if (result_idx >= ICE_MAX_FV_WORDS) { + ice_debug(hw, ICE_DBG_SW, "No chain index available\n"); + return -ENOSPC; } + clear_bit(result_idx, result_idx_bm); - i = 1; - /* update r_bitmap with the recp that is used for chaining */ + content = &buf[i].content; + content->result_indx = ICE_AQ_RECIPE_RESULT_EN | + FIELD_PREP(ICE_AQ_RECIPE_RESULT_DATA_M, + result_idx); + + /* Set recipe association to be used for root recipe */ set_bit(rid, rm->r_bitmap); - /* this is the recipe that chains all the other recipes so it - * should not have a chaining ID to indicate the same - */ - last_chain_entry->chain_idx = ICE_INVAL_CHAIN_IND; - list_for_each_entry(entry, &rm->rg_list, l_entry) { - last_chain_entry->fv_idx[i] = entry->chain_idx; - content->lkup_indx[i] = entry->chain_idx; - content->mask[i++] = cpu_to_le16(0xFFFF); - set_bit(entry->rid, rm->r_bitmap); - } - list_add(&last_chain_entry->l_entry, &rm->rg_list); - if (sizeof(buf[recps].recipe_bitmap) >= - sizeof(rm->r_bitmap)) { - memcpy(buf[recps].recipe_bitmap, rm->r_bitmap, - sizeof(buf[recps].recipe_bitmap)); - } else { - status = -EINVAL; - goto err_unroll; - } - content->act_ctrl_fwd_priority = rm->priority; - recps++; - rm->root_rid = (u8)rid; + word = 0; + while (lookup < rm->n_ext_words && + word < ICE_NUM_WORDS_RECIPE) { + content->lkup_indx[word] = rm->fv_idx[lookup]; + content->mask[word] = cpu_to_le16(rm->fv_mask[lookup]); + + lookup++; + word++; + } + + recipe = &hw->switch_info->recp_list[rid]; + set_bit(result_idx, recipe->res_idxs); + bookkeep_recipe(recipe, &buf[i], rm); } + + /* Setup the root recipe */ + status = ice_alloc_recipe(hw, &rid); + if (status) + return status; + + recipe = &hw->switch_info->recp_list[rid]; + root = &buf[recp_cnt - 1]; + fill_recipe_template(root, rid, rm); + + /* Set recipe association, use previously set bitmap and own rid */ + set_bit(rid, rm->r_bitmap); + memcpy(root->recipe_bitmap, rm->r_bitmap, sizeof(root->recipe_bitmap)); + + /* For non-root recipes rid should be 0, for root it should be correct + * rid value ored with 0x80 (is root bit). + */ + root->content.rid = rid | ICE_AQ_RECIPE_ID_IS_ROOT; + + /* Fill remaining lookups in root recipe */ + word = 0; + while (lookup < rm->n_ext_words && + word < ICE_NUM_WORDS_RECIPE /* should always be true */) { + root->content.lkup_indx[word] = rm->fv_idx[lookup]; + root->content.mask[word] = cpu_to_le16(rm->fv_mask[lookup]); + + lookup++; + word++; + } + + /* Fill result indexes as lookups */ + i = 0; + while (i < recp_cnt - 1 && + word < ICE_NUM_WORDS_RECIPE /* should always be true */) { + root->content.lkup_indx[word] = buf[i].content.result_indx & + ~ICE_AQ_RECIPE_RESULT_EN; + root->content.mask[word] = cpu_to_le16(0xffff); + /* For bookkeeping, it is needed to mark FV index as used for + * intermediate result. + */ + set_bit(root->content.lkup_indx[word], recipe->res_idxs); + + i++; + word++; + } + + rm->root_rid = rid; + bookkeep_recipe(&hw->switch_info->recp_list[rid], root, rm); + + /* Program the recipe */ status = ice_acquire_change_lock(hw, ICE_RES_WRITE); if (status) - goto err_unroll; + return status; - status = ice_aq_add_recipe(hw, buf, rm->n_grp_count, NULL); + status = ice_aq_add_recipe(hw, buf, recp_cnt, NULL); ice_release_change_lock(hw); if (status) - goto err_unroll; + return status; - /* Every recipe that just got created add it to the recipe - * book keeping list - */ - list_for_each_entry(entry, &rm->rg_list, l_entry) { - struct ice_switch_info *sw = hw->switch_info; - bool is_root, idx_found = false; - struct ice_sw_recipe *recp; - u16 idx, buf_idx = 0; - - /* find buffer index for copying some data */ - for (idx = 0; idx < rm->n_grp_count; idx++) - if (buf[idx].recipe_indx == entry->rid) { - buf_idx = idx; - idx_found = true; - } - - if (!idx_found) { - status = -EIO; - goto err_unroll; - } - - recp = &sw->recp_list[entry->rid]; - is_root = (rm->root_rid == entry->rid); - recp->is_root = is_root; - - recp->root_rid = entry->rid; - recp->big_recp = (is_root && rm->n_grp_count > 1); - - memcpy(&recp->ext_words, entry->r_group.pairs, - entry->r_group.n_val_pairs * sizeof(struct ice_fv_word)); - - memcpy(recp->r_bitmap, buf[buf_idx].recipe_bitmap, - sizeof(recp->r_bitmap)); - - /* Copy non-result fv index values and masks to recipe. This - * call will also update the result recipe bitmask. - */ - ice_collect_result_idx(&buf[buf_idx], recp); - - /* for non-root recipes, also copy to the root, this allows - * easier matching of a complete chained recipe - */ - if (!is_root) - ice_collect_result_idx(&buf[buf_idx], - &sw->recp_list[rm->root_rid]); - - recp->n_ext_words = entry->r_group.n_val_pairs; - recp->chain_idx = entry->chain_idx; - recp->priority = buf[buf_idx].content.act_ctrl_fwd_priority; - recp->n_grp_count = rm->n_grp_count; - recp->tun_type = rm->tun_type; - recp->need_pass_l2 = rm->need_pass_l2; - recp->allow_pass_l2 = rm->allow_pass_l2; - recp->recp_created = true; - } - rm->root_buf = buf; - kfree(tmp); - return status; - -err_unroll: -err_mem: - kfree(tmp); - devm_kfree(ice_hw_to_dev(hw), buf); - return status; -} - -/** - * ice_create_recipe_group - creates recipe group - * @hw: pointer to hardware structure - * @rm: recipe management list entry - * @lkup_exts: lookup elements - */ -static int -ice_create_recipe_group(struct ice_hw *hw, struct ice_sw_recipe *rm, - struct ice_prot_lkup_ext *lkup_exts) -{ - u8 recp_count = 0; - int status; - - rm->n_grp_count = 0; - - /* Create recipes for words that are marked not done by packing them - * as best fit. - */ - status = ice_create_first_fit_recp_def(hw, lkup_exts, - &rm->rg_list, &recp_count); - if (!status) { - rm->n_grp_count += recp_count; - rm->n_ext_words = lkup_exts->n_val_words; - memcpy(&rm->ext_words, lkup_exts->fv_words, - sizeof(rm->ext_words)); - memcpy(rm->word_masks, lkup_exts->field_mask, - sizeof(rm->word_masks)); - } - - return status; + return 0; } /* ice_get_compat_fv_bitmap - Get compatible field vector bitmap for rule @@ -5509,9 +5312,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, DECLARE_BITMAP(fv_bitmap, ICE_MAX_NUM_PROFILES); DECLARE_BITMAP(profiles, ICE_MAX_NUM_PROFILES); struct ice_prot_lkup_ext *lkup_exts; - struct ice_recp_grp_entry *r_entry; struct ice_sw_fv_list_entry *fvit; - struct ice_recp_grp_entry *r_tmp; struct ice_sw_fv_list_entry *tmp; struct ice_sw_recipe *rm; int status = 0; @@ -5553,7 +5354,6 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, * headers being programmed. */ INIT_LIST_HEAD(&rm->fv_list); - INIT_LIST_HEAD(&rm->rg_list); /* Get bitmap of field vectors (profiles) that are compatible with the * rule request; only these will be searched in the subsequent call to @@ -5565,12 +5365,10 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, if (status) goto err_unroll; - /* Group match words into recipes using preferred recipe grouping - * criteria. - */ - status = ice_create_recipe_group(hw, rm, lkup_exts); - if (status) - goto err_unroll; + /* Copy FV words and masks from lkup_exts to recipe struct. */ + rm->n_ext_words = lkup_exts->n_val_words; + memcpy(rm->ext_words, lkup_exts->fv_words, sizeof(rm->ext_words)); + memcpy(rm->word_masks, lkup_exts->field_mask, sizeof(rm->word_masks)); /* set the recipe priority if specified */ rm->priority = (u8)rinfo->priority; @@ -5581,7 +5379,7 @@ ice_add_adv_recipe(struct ice_hw *hw, struct ice_adv_lkup_elem *lkups, /* Find offsets from the field vector. Pick the first one for all the * recipes. */ - status = ice_fill_fv_word_index(hw, &rm->fv_list, &rm->rg_list); + status = ice_fill_fv_word_index(hw, rm); if (status) goto err_unroll; @@ -5659,17 +5457,11 @@ err_free_recipe: } err_unroll: - list_for_each_entry_safe(r_entry, r_tmp, &rm->rg_list, l_entry) { - list_del(&r_entry->l_entry); - devm_kfree(ice_hw_to_dev(hw), r_entry); - } - list_for_each_entry_safe(fvit, tmp, &rm->fv_list, list_entry) { list_del(&fvit->list_entry); devm_kfree(ice_hw_to_dev(hw), fvit); } - devm_kfree(ice_hw_to_dev(hw), rm->root_buf); kfree(rm); err_free_lkup_exts: diff --git a/drivers/net/ethernet/intel/ice/ice_switch.h b/drivers/net/ethernet/intel/ice/ice_switch.h index ad98e98c812d..671d7a5f359f 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.h +++ b/drivers/net/ethernet/intel/ice/ice_switch.h @@ -216,7 +216,6 @@ struct ice_sw_recipe { /* For a chained recipe the root recipe is what should be used for * programming rules */ - u8 is_root; u8 root_rid; u8 recp_created; @@ -227,19 +226,8 @@ struct ice_sw_recipe { */ struct ice_fv_word ext_words[ICE_MAX_CHAIN_WORDS]; u16 word_masks[ICE_MAX_CHAIN_WORDS]; - - /* if this recipe is a collection of other recipe */ - u8 big_recp; - - /* if this recipe is part of another bigger recipe then chain index - * corresponding to this recipe - */ - u8 chain_idx; - - /* if this recipe is a collection of other recipe then count of other - * recipes and recipe IDs of those recipes - */ - u8 n_grp_count; + u8 fv_idx[ICE_MAX_CHAIN_WORDS]; + u16 fv_mask[ICE_MAX_CHAIN_WORDS]; /* Bit map specifying the IDs associated with this group of recipe */ DECLARE_BITMAP(r_bitmap, ICE_MAX_NUM_RECIPES); @@ -272,10 +260,6 @@ struct ice_sw_recipe { u8 need_pass_l2:1; u8 allow_pass_l2:1; - struct list_head rg_list; - - /* AQ buffer associated with this recipe */ - struct ice_aqc_recipe_data_elem *root_buf; /* This struct saves the fv_words for a given lookup */ struct ice_prot_lkup_ext lkup_exts; }; diff --git a/drivers/net/ethernet/intel/ice/ice_tc_lib.c b/drivers/net/ethernet/intel/ice/ice_tc_lib.c index 8bd24b33f3a6..e6923f8121a9 100644 --- a/drivers/net/ethernet/intel/ice/ice_tc_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_tc_lib.c @@ -1353,6 +1353,7 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule, struct ice_tc_flower_fltr *fltr) { struct ice_tc_flower_lyr_2_4_hdrs *headers = &fltr->outer_headers; + struct netlink_ext_ack *extack = fltr->extack; struct flow_match_control enc_control; fltr->tunnel_type = ice_tc_tun_get_type(dev); @@ -1373,6 +1374,9 @@ ice_parse_tunnel_attr(struct net_device *dev, struct flow_rule *rule, flow_rule_match_enc_control(rule, &enc_control); + if (flow_rule_has_enc_control_flags(enc_control.mask->flags, extack)) + return -EOPNOTSUPP; + if (enc_control.key->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { struct flow_match_ipv4_addrs match; diff --git a/drivers/net/ethernet/intel/ice/ice_trace.h b/drivers/net/ethernet/intel/ice/ice_trace.h index 244cddd2a9ea..07aab6e130cd 100644 --- a/drivers/net/ethernet/intel/ice/ice_trace.h +++ b/drivers/net/ethernet/intel/ice/ice_trace.h @@ -330,6 +330,24 @@ DEFINE_EVENT(ice_esw_br_port_template, TP_ARGS(port) ); +DECLARE_EVENT_CLASS(ice_switch_stats_template, + TP_PROTO(struct ice_switch_info *sw_info), + TP_ARGS(sw_info), + TP_STRUCT__entry(__field(u16, rule_cnt) + __field(u8, recp_cnt)), + TP_fast_assign(__entry->rule_cnt = sw_info->rule_cnt; + __entry->recp_cnt = sw_info->recp_cnt;), + TP_printk("rules=%u recipes=%u", + __entry->rule_cnt, + __entry->recp_cnt) +); + +DEFINE_EVENT(ice_switch_stats_template, + ice_aq_sw_rules, + TP_PROTO(struct ice_switch_info *sw_info), + TP_ARGS(sw_info) +); + /* End tracepoints */ #endif /* _ICE_TRACE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index eef397e5baa0..96037bef3e78 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -71,6 +71,14 @@ enum ice_aq_res_ids { ICE_GLOBAL_CFG_LOCK_RES_ID }; +enum ice_fec_stats_types { + ICE_FEC_CORR_LOW, + ICE_FEC_CORR_HIGH, + ICE_FEC_UNCORR_LOW, + ICE_FEC_UNCORR_HIGH, + ICE_FEC_MAX +}; + /* FW update timeout definitions are in milliseconds */ #define ICE_NVM_TIMEOUT 180000 #define ICE_CHANGE_LOCK_TIMEOUT 1000 @@ -322,12 +330,14 @@ enum ice_time_ref_freq { ICE_TIME_REF_FREQ_156_250 = 4, ICE_TIME_REF_FREQ_245_760 = 5, - NUM_ICE_TIME_REF_FREQ + NUM_ICE_TIME_REF_FREQ, + + ICE_TIME_REF_FREQ_INVALID = -1, }; /* Clock source specification */ enum ice_clk_src { - ICE_CLK_SRC_TCX0 = 0, /* Temperature compensated oscillator */ + ICE_CLK_SRC_TCXO = 0, /* Temperature compensated oscillator */ ICE_CLK_SRC_TIME_REF = 1, /* Use TIME_REF reference clock */ NUM_ICE_CLK_SRC @@ -372,6 +382,15 @@ struct ice_ts_dev_info { u8 ts_ll_int_read; }; +#define ICE_NAC_TOPO_PRIMARY_M BIT(0) +#define ICE_NAC_TOPO_DUAL_M BIT(1) +#define ICE_NAC_TOPO_ID_M GENMASK(0xF, 0) + +struct ice_nac_topology { + u32 mode; + u8 id; +}; + /* Function specific capabilities */ struct ice_hw_func_caps { struct ice_hw_common_caps common_cap; @@ -393,6 +412,7 @@ struct ice_hw_dev_caps { u32 num_flow_director_fltr; /* Number of FD filters available */ struct ice_ts_dev_info ts_dev_info; u32 num_funcs; + struct ice_nac_topology nac_topo; /* bitmap of supported sensors * bit 0 - internal temperature sensor * bit 31:1 - Reserved @@ -718,6 +738,7 @@ struct ice_port_info { u16 sw_id; /* Initial switch ID belongs to port */ u16 pf_vf_num; u8 port_state; + u8 local_fwd_mode; #define ICE_SCHED_PORT_STATE_INIT 0x0 #define ICE_SCHED_PORT_STATE_READY 0x1 u8 lport; @@ -741,6 +762,8 @@ struct ice_switch_info { struct ice_sw_recipe *recp_list; u16 prof_res_bm_init; u16 max_used_prof_index; + u16 rule_cnt; + u8 recp_cnt; DECLARE_BITMAP(prof_res_bm[ICE_MAX_NUM_PROFILES], ICE_MAX_FV_WORDS); }; @@ -820,11 +843,43 @@ struct ice_mbx_data { u16 async_watermark_val; }; +#define ICE_PORTS_PER_QUAD 4 +#define ICE_GET_QUAD_NUM(port) ((port) / ICE_PORTS_PER_QUAD) + +struct ice_eth56g_params { + u8 num_phys; + u8 phy_addr[2]; + bool onestep_ena; + bool sfd_ena; + u32 peer_delay; +}; + +union ice_phy_params { + struct ice_eth56g_params eth56g; +}; + /* PHY model */ enum ice_phy_model { ICE_PHY_UNSUP = -1, - ICE_PHY_E810 = 1, + ICE_PHY_E810 = 1, ICE_PHY_E82X, + ICE_PHY_ETH56G, +}; + +/* Global Link Topology */ +enum ice_global_link_topo { + ICE_LINK_TOPO_UP_TO_2_LINKS, + ICE_LINK_TOPO_UP_TO_4_LINKS, + ICE_LINK_TOPO_UP_TO_8_LINKS, + ICE_LINK_TOPO_RESERVED, +}; + +struct ice_ptp_hw { + enum ice_phy_model phy_model; + union ice_phy_params phy; + u8 num_lports; + u8 ports_per_phy; + bool is_2x50g_muxed_topo; }; /* Port hardware description */ @@ -848,7 +903,6 @@ struct ice_hw { u8 revision_id; u8 pf_id; /* device profile info */ - enum ice_phy_model phy_model; u16 max_burst_size; /* driver sets this value */ @@ -911,12 +965,7 @@ struct ice_hw { /* INTRL granularity in 1 us */ u8 intrl_gran; -#define ICE_MAX_QUAD 2 -#define ICE_QUADS_PER_PHY_E82X 2 -#define ICE_PORTS_PER_PHY_E82X 8 -#define ICE_PORTS_PER_QUAD 4 -#define ICE_PORTS_PER_PHY_E810 4 -#define ICE_NUM_EXTERNAL_PORTS (ICE_MAX_QUAD * ICE_PORTS_PER_QUAD) + struct ice_ptp_hw ptp; /* Active package version (currently active) */ struct ice_pkg_ver active_pkg_ver; diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c index 48a8d462d76a..5635e9da2212 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@ -948,7 +948,7 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags) goto out_unlock; } - ice_eswitch_update_repr(vf->repr_id, vsi); + ice_eswitch_update_repr(&vf->repr_id, vsi); /* if the VF has been reset allow it to come up again */ ice_mbx_clear_malvf(&vf->mbx_info); diff --git a/drivers/net/ethernet/intel/idpf/Kconfig b/drivers/net/ethernet/intel/idpf/Kconfig new file mode 100644 index 000000000000..1addd663acad --- /dev/null +++ b/drivers/net/ethernet/intel/idpf/Kconfig @@ -0,0 +1,26 @@ +# SPDX-License-Identifier: GPL-2.0-only +# Copyright (C) 2024 Intel Corporation + +config IDPF + tristate "Intel(R) Infrastructure Data Path Function Support" + depends on PCI_MSI + select DIMLIB + select LIBETH + help + This driver supports Intel(R) Infrastructure Data Path Function + devices. + + To compile this driver as a module, choose M here. The module + will be called idpf. + +if IDPF + +config IDPF_SINGLEQ + bool "idpf singleq support" + help + This option enables support for legacy single Rx/Tx queues w/no + completion and fill queues. Only enable if you have hardware which + wants to work in this mode as it increases the driver size and adds + runtme checks on hotpath. + +endif # IDPF diff --git a/drivers/net/ethernet/intel/idpf/Makefile b/drivers/net/ethernet/intel/idpf/Makefile index 6844ead2f3ac..2ce01a0b5898 100644 --- a/drivers/net/ethernet/intel/idpf/Makefile +++ b/drivers/net/ethernet/intel/idpf/Makefile @@ -12,7 +12,8 @@ idpf-y := \ idpf_ethtool.o \ idpf_lib.o \ idpf_main.o \ - idpf_singleq_txrx.o \ idpf_txrx.o \ idpf_virtchnl.o \ idpf_vf_dev.o + +idpf-$(CONFIG_IDPF_SINGLEQ) += idpf_singleq_txrx.o diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h index e7a036538246..2c31ad87587a 100644 --- a/drivers/net/ethernet/intel/idpf/idpf.h +++ b/drivers/net/ethernet/intel/idpf/idpf.h @@ -17,10 +17,8 @@ struct idpf_vport_max_q; #include #include #include -#include #include "virtchnl2.h" -#include "idpf_lan_txrx.h" #include "idpf_txrx.h" #include "idpf_controlq.h" @@ -266,7 +264,6 @@ struct idpf_port_stats { * the worst case. * @num_bufqs_per_qgrp: Buffer queues per RX queue in a given grouping * @bufq_desc_count: Buffer queue descriptor count - * @bufq_size: Size of buffers in ring (e.g. 2K, 4K, etc) * @num_rxq_grp: Number of RX queues in a group * @rxq_grps: Total number of RX groups. Number of groups * number of RX per * group will yield total number of RX queues. @@ -302,7 +299,7 @@ struct idpf_vport { u16 num_txq_grp; struct idpf_txq_group *txq_grps; u32 txq_model; - struct idpf_queue **txqs; + struct idpf_tx_queue **txqs; bool crc_enable; u16 num_rxq; @@ -310,11 +307,10 @@ struct idpf_vport { u32 rxq_desc_count; u8 num_bufqs_per_qgrp; u32 bufq_desc_count[IDPF_MAX_BUFQS_PER_RXQ_GRP]; - u32 bufq_size[IDPF_MAX_BUFQS_PER_RXQ_GRP]; u16 num_rxq_grp; struct idpf_rxq_group *rxq_grps; u32 rxq_model; - struct idpf_rx_ptype_decoded rx_ptype_lkup[IDPF_RX_MAX_PTYPE]; + struct libeth_rx_pt *rx_ptype_lkup; struct idpf_adapter *adapter; struct net_device *netdev; @@ -601,7 +597,8 @@ struct idpf_adapter { */ static inline int idpf_is_queue_model_split(u16 q_model) { - return q_model == VIRTCHNL2_QUEUE_MODEL_SPLIT; + return !IS_ENABLED(CONFIG_IDPF_SINGLEQ) || + q_model == VIRTCHNL2_QUEUE_MODEL_SPLIT; } #define idpf_is_cap_ena(adapter, field, flag) \ diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c index 1885ba618981..3806ddd3ce4a 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c +++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c @@ -437,22 +437,24 @@ struct idpf_stats { .stat_offset = offsetof(_type, _stat) \ } -/* Helper macro for defining some statistics related to queues */ -#define IDPF_QUEUE_STAT(_name, _stat) \ - IDPF_STAT(struct idpf_queue, _name, _stat) +/* Helper macros for defining some statistics related to queues */ +#define IDPF_RX_QUEUE_STAT(_name, _stat) \ + IDPF_STAT(struct idpf_rx_queue, _name, _stat) +#define IDPF_TX_QUEUE_STAT(_name, _stat) \ + IDPF_STAT(struct idpf_tx_queue, _name, _stat) /* Stats associated with a Tx queue */ static const struct idpf_stats idpf_gstrings_tx_queue_stats[] = { - IDPF_QUEUE_STAT("pkts", q_stats.tx.packets), - IDPF_QUEUE_STAT("bytes", q_stats.tx.bytes), - IDPF_QUEUE_STAT("lso_pkts", q_stats.tx.lso_pkts), + IDPF_TX_QUEUE_STAT("pkts", q_stats.packets), + IDPF_TX_QUEUE_STAT("bytes", q_stats.bytes), + IDPF_TX_QUEUE_STAT("lso_pkts", q_stats.lso_pkts), }; /* Stats associated with an Rx queue */ static const struct idpf_stats idpf_gstrings_rx_queue_stats[] = { - IDPF_QUEUE_STAT("pkts", q_stats.rx.packets), - IDPF_QUEUE_STAT("bytes", q_stats.rx.bytes), - IDPF_QUEUE_STAT("rx_gro_hw_pkts", q_stats.rx.rsc_pkts), + IDPF_RX_QUEUE_STAT("pkts", q_stats.packets), + IDPF_RX_QUEUE_STAT("bytes", q_stats.bytes), + IDPF_RX_QUEUE_STAT("rx_gro_hw_pkts", q_stats.rsc_pkts), }; #define IDPF_TX_QUEUE_STATS_LEN ARRAY_SIZE(idpf_gstrings_tx_queue_stats) @@ -563,8 +565,6 @@ static void idpf_get_stat_strings(struct net_device *netdev, u8 *data) for (i = 0; i < vport_config->max_q.max_rxq; i++) idpf_add_qstat_strings(&data, idpf_gstrings_rx_queue_stats, "rx", i); - - page_pool_ethtool_stats_get_strings(data); } /** @@ -598,7 +598,6 @@ static int idpf_get_sset_count(struct net_device *netdev, int sset) struct idpf_netdev_priv *np = netdev_priv(netdev); struct idpf_vport_config *vport_config; u16 max_txq, max_rxq; - unsigned int size; if (sset != ETH_SS_STATS) return -EINVAL; @@ -617,11 +616,8 @@ static int idpf_get_sset_count(struct net_device *netdev, int sset) max_txq = vport_config->max_q.max_txq; max_rxq = vport_config->max_q.max_rxq; - size = IDPF_PORT_STATS_LEN + (IDPF_TX_QUEUE_STATS_LEN * max_txq) + + return IDPF_PORT_STATS_LEN + (IDPF_TX_QUEUE_STATS_LEN * max_txq) + (IDPF_RX_QUEUE_STATS_LEN * max_rxq); - size += page_pool_ethtool_stats_get_count(); - - return size; } /** @@ -633,7 +629,7 @@ static int idpf_get_sset_count(struct net_device *netdev, int sset) * Copies the stat data defined by the pointer and stat structure pair into * the memory supplied as data. If the pointer is null, data will be zero'd. */ -static void idpf_add_one_ethtool_stat(u64 *data, void *pstat, +static void idpf_add_one_ethtool_stat(u64 *data, const void *pstat, const struct idpf_stats *stat) { char *p; @@ -671,6 +667,7 @@ static void idpf_add_one_ethtool_stat(u64 *data, void *pstat, * idpf_add_queue_stats - copy queue statistics into supplied buffer * @data: ethtool stats buffer * @q: the queue to copy + * @type: type of the queue * * Queue statistics must be copied while protected by u64_stats_fetch_begin, * so we can't directly use idpf_add_ethtool_stats. Assumes that queue stats @@ -681,19 +678,23 @@ static void idpf_add_one_ethtool_stat(u64 *data, void *pstat, * * This function expects to be called while under rcu_read_lock(). */ -static void idpf_add_queue_stats(u64 **data, struct idpf_queue *q) +static void idpf_add_queue_stats(u64 **data, const void *q, + enum virtchnl2_queue_type type) { + const struct u64_stats_sync *stats_sync; const struct idpf_stats *stats; unsigned int start; unsigned int size; unsigned int i; - if (q->q_type == VIRTCHNL2_QUEUE_TYPE_RX) { + if (type == VIRTCHNL2_QUEUE_TYPE_RX) { size = IDPF_RX_QUEUE_STATS_LEN; stats = idpf_gstrings_rx_queue_stats; + stats_sync = &((const struct idpf_rx_queue *)q)->stats_sync; } else { size = IDPF_TX_QUEUE_STATS_LEN; stats = idpf_gstrings_tx_queue_stats; + stats_sync = &((const struct idpf_tx_queue *)q)->stats_sync; } /* To avoid invalid statistics values, ensure that we keep retrying @@ -701,10 +702,10 @@ static void idpf_add_queue_stats(u64 **data, struct idpf_queue *q) * u64_stats_fetch_retry. */ do { - start = u64_stats_fetch_begin(&q->stats_sync); + start = u64_stats_fetch_begin(stats_sync); for (i = 0; i < size; i++) idpf_add_one_ethtool_stat(&(*data)[i], q, &stats[i]); - } while (u64_stats_fetch_retry(&q->stats_sync, start)); + } while (u64_stats_fetch_retry(stats_sync, start)); /* Once we successfully copy the stats in, update the data pointer */ *data += size; @@ -793,7 +794,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport) for (j = 0; j < num_rxq; j++) { u64 hw_csum_err, hsplit, hsplit_hbo, bad_descs; struct idpf_rx_queue_stats *stats; - struct idpf_queue *rxq; + struct idpf_rx_queue *rxq; unsigned int start; if (idpf_is_queue_model_split(vport->rxq_model)) @@ -807,7 +808,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport) do { start = u64_stats_fetch_begin(&rxq->stats_sync); - stats = &rxq->q_stats.rx; + stats = &rxq->q_stats; hw_csum_err = u64_stats_read(&stats->hw_csum_err); hsplit = u64_stats_read(&stats->hsplit_pkts); hsplit_hbo = u64_stats_read(&stats->hsplit_buf_ovf); @@ -828,7 +829,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport) for (j = 0; j < txq_grp->num_txq; j++) { u64 linearize, qbusy, skb_drops, dma_map_errs; - struct idpf_queue *txq = txq_grp->txqs[j]; + struct idpf_tx_queue *txq = txq_grp->txqs[j]; struct idpf_tx_queue_stats *stats; unsigned int start; @@ -838,7 +839,7 @@ static void idpf_collect_queue_stats(struct idpf_vport *vport) do { start = u64_stats_fetch_begin(&txq->stats_sync); - stats = &txq->q_stats.tx; + stats = &txq->q_stats; linearize = u64_stats_read(&stats->linearize); qbusy = u64_stats_read(&stats->q_busy); skb_drops = u64_stats_read(&stats->skb_drops); @@ -869,7 +870,6 @@ static void idpf_get_ethtool_stats(struct net_device *netdev, { struct idpf_netdev_priv *np = netdev_priv(netdev); struct idpf_vport_config *vport_config; - struct page_pool_stats pp_stats = { }; struct idpf_vport *vport; unsigned int total = 0; unsigned int i, j; @@ -896,12 +896,12 @@ static void idpf_get_ethtool_stats(struct net_device *netdev, qtype = VIRTCHNL2_QUEUE_TYPE_TX; for (j = 0; j < txq_grp->num_txq; j++, total++) { - struct idpf_queue *txq = txq_grp->txqs[j]; + struct idpf_tx_queue *txq = txq_grp->txqs[j]; if (!txq) idpf_add_empty_queue_stats(&data, qtype); else - idpf_add_queue_stats(&data, txq); + idpf_add_queue_stats(&data, txq, qtype); } } @@ -929,7 +929,7 @@ static void idpf_get_ethtool_stats(struct net_device *netdev, num_rxq = rxq_grp->singleq.num_rxq; for (j = 0; j < num_rxq; j++, total++) { - struct idpf_queue *rxq; + struct idpf_rx_queue *rxq; if (is_splitq) rxq = &rxq_grp->splitq.rxq_sets[j]->rxq; @@ -938,93 +938,77 @@ static void idpf_get_ethtool_stats(struct net_device *netdev, if (!rxq) idpf_add_empty_queue_stats(&data, qtype); else - idpf_add_queue_stats(&data, rxq); - - /* In splitq mode, don't get page pool stats here since - * the pools are attached to the buffer queues - */ - if (is_splitq) - continue; - - if (rxq) - page_pool_get_stats(rxq->pp, &pp_stats); - } - } - - for (i = 0; i < vport->num_rxq_grp; i++) { - for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { - struct idpf_queue *rxbufq = - &vport->rxq_grps[i].splitq.bufq_sets[j].bufq; - - page_pool_get_stats(rxbufq->pp, &pp_stats); + idpf_add_queue_stats(&data, rxq, qtype); } } for (; total < vport_config->max_q.max_rxq; total++) idpf_add_empty_queue_stats(&data, VIRTCHNL2_QUEUE_TYPE_RX); - page_pool_ethtool_stats_get(data, &pp_stats); - rcu_read_unlock(); idpf_vport_ctrl_unlock(netdev); } /** - * idpf_find_rxq - find rxq from q index + * idpf_find_rxq_vec - find rxq vector from q index * @vport: virtual port associated to queue * @q_num: q index used to find queue * - * returns pointer to rx queue + * returns pointer to rx vector */ -static struct idpf_queue *idpf_find_rxq(struct idpf_vport *vport, int q_num) +static struct idpf_q_vector *idpf_find_rxq_vec(const struct idpf_vport *vport, + int q_num) { int q_grp, q_idx; if (!idpf_is_queue_model_split(vport->rxq_model)) - return vport->rxq_grps->singleq.rxqs[q_num]; + return vport->rxq_grps->singleq.rxqs[q_num]->q_vector; q_grp = q_num / IDPF_DFLT_SPLITQ_RXQ_PER_GROUP; q_idx = q_num % IDPF_DFLT_SPLITQ_RXQ_PER_GROUP; - return &vport->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq; + return vport->rxq_grps[q_grp].splitq.rxq_sets[q_idx]->rxq.q_vector; } /** - * idpf_find_txq - find txq from q index + * idpf_find_txq_vec - find txq vector from q index * @vport: virtual port associated to queue * @q_num: q index used to find queue * - * returns pointer to tx queue + * returns pointer to tx vector */ -static struct idpf_queue *idpf_find_txq(struct idpf_vport *vport, int q_num) +static struct idpf_q_vector *idpf_find_txq_vec(const struct idpf_vport *vport, + int q_num) { int q_grp; if (!idpf_is_queue_model_split(vport->txq_model)) - return vport->txqs[q_num]; + return vport->txqs[q_num]->q_vector; q_grp = q_num / IDPF_DFLT_SPLITQ_TXQ_PER_GROUP; - return vport->txq_grps[q_grp].complq; + return vport->txq_grps[q_grp].complq->q_vector; } /** * __idpf_get_q_coalesce - get ITR values for specific queue * @ec: ethtool structure to fill with driver's coalesce settings - * @q: quuee of Rx or Tx + * @q_vector: queue vector corresponding to this queue + * @type: queue type */ static void __idpf_get_q_coalesce(struct ethtool_coalesce *ec, - struct idpf_queue *q) + const struct idpf_q_vector *q_vector, + enum virtchnl2_queue_type type) { - if (q->q_type == VIRTCHNL2_QUEUE_TYPE_RX) { + if (type == VIRTCHNL2_QUEUE_TYPE_RX) { ec->use_adaptive_rx_coalesce = - IDPF_ITR_IS_DYNAMIC(q->q_vector->rx_intr_mode); - ec->rx_coalesce_usecs = q->q_vector->rx_itr_value; + IDPF_ITR_IS_DYNAMIC(q_vector->rx_intr_mode); + ec->rx_coalesce_usecs = q_vector->rx_itr_value; } else { ec->use_adaptive_tx_coalesce = - IDPF_ITR_IS_DYNAMIC(q->q_vector->tx_intr_mode); - ec->tx_coalesce_usecs = q->q_vector->tx_itr_value; + IDPF_ITR_IS_DYNAMIC(q_vector->tx_intr_mode); + ec->tx_coalesce_usecs = q_vector->tx_itr_value; } } @@ -1040,8 +1024,8 @@ static int idpf_get_q_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, u32 q_num) { - struct idpf_netdev_priv *np = netdev_priv(netdev); - struct idpf_vport *vport; + const struct idpf_netdev_priv *np = netdev_priv(netdev); + const struct idpf_vport *vport; int err = 0; idpf_vport_ctrl_lock(netdev); @@ -1056,10 +1040,12 @@ static int idpf_get_q_coalesce(struct net_device *netdev, } if (q_num < vport->num_rxq) - __idpf_get_q_coalesce(ec, idpf_find_rxq(vport, q_num)); + __idpf_get_q_coalesce(ec, idpf_find_rxq_vec(vport, q_num), + VIRTCHNL2_QUEUE_TYPE_RX); if (q_num < vport->num_txq) - __idpf_get_q_coalesce(ec, idpf_find_txq(vport, q_num)); + __idpf_get_q_coalesce(ec, idpf_find_txq_vec(vport, q_num), + VIRTCHNL2_QUEUE_TYPE_TX); unlock_mutex: idpf_vport_ctrl_unlock(netdev); @@ -1103,16 +1089,15 @@ static int idpf_get_per_q_coalesce(struct net_device *netdev, u32 q_num, /** * __idpf_set_q_coalesce - set ITR values for specific queue * @ec: ethtool structure from user to update ITR settings - * @q: queue for which itr values has to be set + * @qv: queue vector for which itr values has to be set * @is_rxq: is queue type rx * * Returns 0 on success, negative otherwise. */ -static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec, - struct idpf_queue *q, bool is_rxq) +static int __idpf_set_q_coalesce(const struct ethtool_coalesce *ec, + struct idpf_q_vector *qv, bool is_rxq) { u32 use_adaptive_coalesce, coalesce_usecs; - struct idpf_q_vector *qv = q->q_vector; bool is_dim_ena = false; u16 itr_val; @@ -1128,7 +1113,7 @@ static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec, itr_val = qv->tx_itr_value; } if (coalesce_usecs != itr_val && use_adaptive_coalesce) { - netdev_err(q->vport->netdev, "Cannot set coalesce usecs if adaptive enabled\n"); + netdev_err(qv->vport->netdev, "Cannot set coalesce usecs if adaptive enabled\n"); return -EINVAL; } @@ -1137,7 +1122,7 @@ static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec, return 0; if (coalesce_usecs > IDPF_ITR_MAX) { - netdev_err(q->vport->netdev, + netdev_err(qv->vport->netdev, "Invalid value, %d-usecs range is 0-%d\n", coalesce_usecs, IDPF_ITR_MAX); @@ -1146,7 +1131,7 @@ static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec, if (coalesce_usecs % 2) { coalesce_usecs--; - netdev_info(q->vport->netdev, + netdev_info(qv->vport->netdev, "HW only supports even ITR values, ITR rounded to %d\n", coalesce_usecs); } @@ -1185,15 +1170,16 @@ static int __idpf_set_q_coalesce(struct ethtool_coalesce *ec, * * Return 0 on success, and negative on failure */ -static int idpf_set_q_coalesce(struct idpf_vport *vport, - struct ethtool_coalesce *ec, +static int idpf_set_q_coalesce(const struct idpf_vport *vport, + const struct ethtool_coalesce *ec, int q_num, bool is_rxq) { - struct idpf_queue *q; + struct idpf_q_vector *qv; - q = is_rxq ? idpf_find_rxq(vport, q_num) : idpf_find_txq(vport, q_num); + qv = is_rxq ? idpf_find_rxq_vec(vport, q_num) : + idpf_find_txq_vec(vport, q_num); - if (q && __idpf_set_q_coalesce(ec, q, is_rxq)) + if (qv && __idpf_set_q_coalesce(ec, qv, is_rxq)) return -EINVAL; return 0; diff --git a/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h index a5752dcab888..8c7f8ef8f1a1 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h +++ b/drivers/net/ethernet/intel/idpf/idpf_lan_txrx.h @@ -4,6 +4,8 @@ #ifndef _IDPF_LAN_TXRX_H_ #define _IDPF_LAN_TXRX_H_ +#include + enum idpf_rss_hash { IDPF_HASH_INVALID = 0, /* Values 1 - 28 are reserved for future use */ diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c index f1ee5584e8fa..5dbf2b4ba1b0 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_lib.c +++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c @@ -4,8 +4,7 @@ #include "idpf.h" #include "idpf_virtchnl.h" -static const struct net_device_ops idpf_netdev_ops_splitq; -static const struct net_device_ops idpf_netdev_ops_singleq; +static const struct net_device_ops idpf_netdev_ops; /** * idpf_init_vector_stack - Fill the MSIX vector stack with vector index @@ -69,7 +68,7 @@ static void idpf_deinit_vector_stack(struct idpf_adapter *adapter) static void idpf_mb_intr_rel_irq(struct idpf_adapter *adapter) { clear_bit(IDPF_MB_INTR_MODE, adapter->flags); - free_irq(adapter->msix_entries[0].vector, adapter); + kfree(free_irq(adapter->msix_entries[0].vector, adapter)); queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0); } @@ -124,15 +123,14 @@ static void idpf_mb_irq_enable(struct idpf_adapter *adapter) */ static int idpf_mb_intr_req_irq(struct idpf_adapter *adapter) { - struct idpf_q_vector *mb_vector = &adapter->mb_vector; int irq_num, mb_vidx = 0, err; + char *name; irq_num = adapter->msix_entries[mb_vidx].vector; - mb_vector->name = kasprintf(GFP_KERNEL, "%s-%s-%d", - dev_driver_string(&adapter->pdev->dev), - "Mailbox", mb_vidx); - err = request_irq(irq_num, adapter->irq_mb_handler, 0, - mb_vector->name, adapter); + name = kasprintf(GFP_KERNEL, "%s-%s-%d", + dev_driver_string(&adapter->pdev->dev), + "Mailbox", mb_vidx); + err = request_irq(irq_num, adapter->irq_mb_handler, 0, name, adapter); if (err) { dev_err(&adapter->pdev->dev, "IRQ request for mailbox failed, error: %d\n", err); @@ -765,10 +763,7 @@ static int idpf_cfg_netdev(struct idpf_vport *vport) } /* assign netdev_ops */ - if (idpf_is_queue_model_split(vport->txq_model)) - netdev->netdev_ops = &idpf_netdev_ops_splitq; - else - netdev->netdev_ops = &idpf_netdev_ops_singleq; + netdev->netdev_ops = &idpf_netdev_ops; /* setup watchdog timeout value to be 5 second */ netdev->watchdog_timeo = 5 * HZ; @@ -946,6 +941,9 @@ static void idpf_decfg_netdev(struct idpf_vport *vport) { struct idpf_adapter *adapter = vport->adapter; + kfree(vport->rx_ptype_lkup); + vport->rx_ptype_lkup = NULL; + unregister_netdev(vport->netdev); free_netdev(vport->netdev); vport->netdev = NULL; @@ -1318,14 +1316,14 @@ static void idpf_rx_init_buf_tail(struct idpf_vport *vport) if (idpf_is_queue_model_split(vport->rxq_model)) { for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { - struct idpf_queue *q = + const struct idpf_buf_queue *q = &grp->splitq.bufq_sets[j].bufq; writel(q->next_to_alloc, q->tail); } } else { for (j = 0; j < grp->singleq.num_rxq; j++) { - struct idpf_queue *q = + const struct idpf_rx_queue *q = grp->singleq.rxqs[j]; writel(q->next_to_alloc, q->tail); @@ -1855,7 +1853,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport, enum idpf_vport_state current_state = np->state; struct idpf_adapter *adapter = vport->adapter; struct idpf_vport *new_vport; - int err, i; + int err; /* If the system is low on memory, we can end up in bad state if we * free all the memory for queue resources and try to allocate them @@ -1929,46 +1927,6 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport, */ memcpy(vport, new_vport, offsetof(struct idpf_vport, link_speed_mbps)); - /* Since idpf_vport_queues_alloc was called with new_port, the queue - * back pointers are currently pointing to the local new_vport. Reset - * the backpointers to the original vport here - */ - for (i = 0; i < vport->num_txq_grp; i++) { - struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; - int j; - - tx_qgrp->vport = vport; - for (j = 0; j < tx_qgrp->num_txq; j++) - tx_qgrp->txqs[j]->vport = vport; - - if (idpf_is_queue_model_split(vport->txq_model)) - tx_qgrp->complq->vport = vport; - } - - for (i = 0; i < vport->num_rxq_grp; i++) { - struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; - struct idpf_queue *q; - u16 num_rxq; - int j; - - rx_qgrp->vport = vport; - for (j = 0; j < vport->num_bufqs_per_qgrp; j++) - rx_qgrp->splitq.bufq_sets[j].bufq.vport = vport; - - if (idpf_is_queue_model_split(vport->rxq_model)) - num_rxq = rx_qgrp->splitq.num_rxq_sets; - else - num_rxq = rx_qgrp->singleq.num_rxq; - - for (j = 0; j < num_rxq; j++) { - if (idpf_is_queue_model_split(vport->rxq_model)) - q = &rx_qgrp->splitq.rxq_sets[j]->rxq; - else - q = rx_qgrp->singleq.rxqs[j]; - q->vport = vport; - } - } - if (reset_cause == IDPF_SR_Q_CHANGE) idpf_vport_alloc_vec_indexes(vport); @@ -2393,24 +2351,10 @@ void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem) mem->pa = 0; } -static const struct net_device_ops idpf_netdev_ops_splitq = { +static const struct net_device_ops idpf_netdev_ops = { .ndo_open = idpf_open, .ndo_stop = idpf_stop, - .ndo_start_xmit = idpf_tx_splitq_start, - .ndo_features_check = idpf_features_check, - .ndo_set_rx_mode = idpf_set_rx_mode, - .ndo_validate_addr = eth_validate_addr, - .ndo_set_mac_address = idpf_set_mac, - .ndo_change_mtu = idpf_change_mtu, - .ndo_get_stats64 = idpf_get_stats64, - .ndo_set_features = idpf_set_features, - .ndo_tx_timeout = idpf_tx_timeout, -}; - -static const struct net_device_ops idpf_netdev_ops_singleq = { - .ndo_open = idpf_open, - .ndo_stop = idpf_stop, - .ndo_start_xmit = idpf_tx_singleq_start, + .ndo_start_xmit = idpf_tx_start, .ndo_features_check = idpf_features_check, .ndo_set_rx_mode = idpf_set_rx_mode, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c index f784eea044bd..db476b3314c8 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_main.c +++ b/drivers/net/ethernet/intel/idpf/idpf_main.c @@ -8,6 +8,7 @@ #define DRV_SUMMARY "Intel(R) Infrastructure Data Path Function Linux Driver" MODULE_DESCRIPTION(DRV_SUMMARY); +MODULE_IMPORT_NS(LIBETH); MODULE_LICENSE("GPL"); /** diff --git a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c index 27b93592c4ba..fe64febf7436 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_singleq_txrx.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) 2023 Intel Corporation */ +#include + #include "idpf.h" /** @@ -186,7 +188,7 @@ static int idpf_tx_singleq_csum(struct sk_buff *skb, * and gets a physical address for each memory location and programs * it and the length into the transmit base mode descriptor. */ -static void idpf_tx_singleq_map(struct idpf_queue *tx_q, +static void idpf_tx_singleq_map(struct idpf_tx_queue *tx_q, struct idpf_tx_buf *first, struct idpf_tx_offload_params *offloads) { @@ -205,12 +207,12 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q, data_len = skb->data_len; size = skb_headlen(skb); - tx_desc = IDPF_BASE_TX_DESC(tx_q, i); + tx_desc = &tx_q->base_tx[i]; dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE); /* write each descriptor with CRC bit */ - if (tx_q->vport->crc_enable) + if (idpf_queue_has(CRC_EN, tx_q)) td_cmd |= IDPF_TX_DESC_CMD_ICRC; for (frag = &skb_shinfo(skb)->frags[0];; frag++) { @@ -239,7 +241,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q, i++; if (i == tx_q->desc_count) { - tx_desc = IDPF_BASE_TX_DESC(tx_q, 0); + tx_desc = &tx_q->base_tx[0]; i = 0; } @@ -259,7 +261,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q, i++; if (i == tx_q->desc_count) { - tx_desc = IDPF_BASE_TX_DESC(tx_q, 0); + tx_desc = &tx_q->base_tx[0]; i = 0; } @@ -285,7 +287,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q, /* set next_to_watch value indicating a packet is present */ first->next_to_watch = tx_desc; - nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx); + nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); netdev_tx_sent_queue(nq, first->bytecount); idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more()); @@ -299,7 +301,7 @@ static void idpf_tx_singleq_map(struct idpf_queue *tx_q, * ring entry to reflect that this index is a context descriptor */ static struct idpf_base_tx_ctx_desc * -idpf_tx_singleq_get_ctx_desc(struct idpf_queue *txq) +idpf_tx_singleq_get_ctx_desc(struct idpf_tx_queue *txq) { struct idpf_base_tx_ctx_desc *ctx_desc; int ntu = txq->next_to_use; @@ -307,7 +309,7 @@ idpf_tx_singleq_get_ctx_desc(struct idpf_queue *txq) memset(&txq->tx_buf[ntu], 0, sizeof(struct idpf_tx_buf)); txq->tx_buf[ntu].ctx_entry = true; - ctx_desc = IDPF_BASE_TX_CTX_DESC(txq, ntu); + ctx_desc = &txq->base_ctx[ntu]; IDPF_SINGLEQ_BUMP_RING_IDX(txq, ntu); txq->next_to_use = ntu; @@ -320,7 +322,7 @@ idpf_tx_singleq_get_ctx_desc(struct idpf_queue *txq) * @txq: queue to send buffer on * @offload: offload parameter structure **/ -static void idpf_tx_singleq_build_ctx_desc(struct idpf_queue *txq, +static void idpf_tx_singleq_build_ctx_desc(struct idpf_tx_queue *txq, struct idpf_tx_offload_params *offload) { struct idpf_base_tx_ctx_desc *desc = idpf_tx_singleq_get_ctx_desc(txq); @@ -333,7 +335,7 @@ static void idpf_tx_singleq_build_ctx_desc(struct idpf_queue *txq, qw1 |= FIELD_PREP(IDPF_TXD_CTX_QW1_MSS_M, offload->mss); u64_stats_update_begin(&txq->stats_sync); - u64_stats_inc(&txq->q_stats.tx.lso_pkts); + u64_stats_inc(&txq->q_stats.lso_pkts); u64_stats_update_end(&txq->stats_sync); } @@ -351,8 +353,8 @@ static void idpf_tx_singleq_build_ctx_desc(struct idpf_queue *txq, * * Returns NETDEV_TX_OK if sent, else an error code */ -static netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb, - struct idpf_queue *tx_q) +netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb, + struct idpf_tx_queue *tx_q) { struct idpf_tx_offload_params offload = { }; struct idpf_tx_buf *first; @@ -408,33 +410,6 @@ out_drop: return idpf_tx_drop_skb(tx_q, skb); } -/** - * idpf_tx_singleq_start - Selects the right Tx queue to send buffer - * @skb: send buffer - * @netdev: network interface device structure - * - * Returns NETDEV_TX_OK if sent, else an error code - */ -netdev_tx_t idpf_tx_singleq_start(struct sk_buff *skb, - struct net_device *netdev) -{ - struct idpf_vport *vport = idpf_netdev_to_vport(netdev); - struct idpf_queue *tx_q; - - tx_q = vport->txqs[skb_get_queue_mapping(skb)]; - - /* hardware can't handle really short frames, hardware padding works - * beyond this point - */ - if (skb_put_padto(skb, IDPF_TX_MIN_PKT_LEN)) { - idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false); - - return NETDEV_TX_OK; - } - - return idpf_tx_singleq_frame(skb, tx_q); -} - /** * idpf_tx_singleq_clean - Reclaim resources from queue * @tx_q: Tx queue to clean @@ -442,20 +417,19 @@ netdev_tx_t idpf_tx_singleq_start(struct sk_buff *skb, * @cleaned: returns number of packets cleaned * */ -static bool idpf_tx_singleq_clean(struct idpf_queue *tx_q, int napi_budget, +static bool idpf_tx_singleq_clean(struct idpf_tx_queue *tx_q, int napi_budget, int *cleaned) { - unsigned int budget = tx_q->vport->compln_clean_budget; unsigned int total_bytes = 0, total_pkts = 0; struct idpf_base_tx_desc *tx_desc; + u32 budget = tx_q->clean_budget; s16 ntc = tx_q->next_to_clean; struct idpf_netdev_priv *np; struct idpf_tx_buf *tx_buf; - struct idpf_vport *vport; struct netdev_queue *nq; bool dont_wake; - tx_desc = IDPF_BASE_TX_DESC(tx_q, ntc); + tx_desc = &tx_q->base_tx[ntc]; tx_buf = &tx_q->tx_buf[ntc]; ntc -= tx_q->desc_count; @@ -517,7 +491,7 @@ static bool idpf_tx_singleq_clean(struct idpf_queue *tx_q, int napi_budget, if (unlikely(!ntc)) { ntc -= tx_q->desc_count; tx_buf = tx_q->tx_buf; - tx_desc = IDPF_BASE_TX_DESC(tx_q, 0); + tx_desc = &tx_q->base_tx[0]; } /* unmap any remaining paged data */ @@ -540,7 +514,7 @@ fetch_next_txq_desc: if (unlikely(!ntc)) { ntc -= tx_q->desc_count; tx_buf = tx_q->tx_buf; - tx_desc = IDPF_BASE_TX_DESC(tx_q, 0); + tx_desc = &tx_q->base_tx[0]; } } while (likely(budget)); @@ -550,16 +524,15 @@ fetch_next_txq_desc: *cleaned += total_pkts; u64_stats_update_begin(&tx_q->stats_sync); - u64_stats_add(&tx_q->q_stats.tx.packets, total_pkts); - u64_stats_add(&tx_q->q_stats.tx.bytes, total_bytes); + u64_stats_add(&tx_q->q_stats.packets, total_pkts); + u64_stats_add(&tx_q->q_stats.bytes, total_bytes); u64_stats_update_end(&tx_q->stats_sync); - vport = tx_q->vport; - np = netdev_priv(vport->netdev); - nq = netdev_get_tx_queue(vport->netdev, tx_q->idx); + np = netdev_priv(tx_q->netdev); + nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); dont_wake = np->state != __IDPF_VPORT_UP || - !netif_carrier_ok(vport->netdev); + !netif_carrier_ok(tx_q->netdev); __netif_txq_completed_wake(nq, total_pkts, total_bytes, IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH, dont_wake); @@ -584,7 +557,7 @@ static bool idpf_tx_singleq_clean_all(struct idpf_q_vector *q_vec, int budget, budget_per_q = num_txq ? max(budget / num_txq, 1) : 0; for (i = 0; i < num_txq; i++) { - struct idpf_queue *q; + struct idpf_tx_queue *q; q = q_vec->tx[i]; clean_complete &= idpf_tx_singleq_clean(q, budget_per_q, @@ -614,14 +587,9 @@ static bool idpf_rx_singleq_test_staterr(const union virtchnl2_rx_desc *rx_desc, /** * idpf_rx_singleq_is_non_eop - process handling of non-EOP buffers - * @rxq: Rx ring being processed * @rx_desc: Rx descriptor for current buffer - * @skb: Current socket buffer containing buffer in progress - * @ntc: next to clean */ -static bool idpf_rx_singleq_is_non_eop(struct idpf_queue *rxq, - union virtchnl2_rx_desc *rx_desc, - struct sk_buff *skb, u16 ntc) +static bool idpf_rx_singleq_is_non_eop(const union virtchnl2_rx_desc *rx_desc) { /* if we are the last buffer then there is nothing else to do */ if (likely(idpf_rx_singleq_test_staterr(rx_desc, IDPF_RXD_EOF_SINGLEQ))) @@ -635,98 +603,82 @@ static bool idpf_rx_singleq_is_non_eop(struct idpf_queue *rxq, * @rxq: Rx ring being processed * @skb: skb currently being received and modified * @csum_bits: checksum bits from descriptor - * @ptype: the packet type decoded by hardware + * @decoded: the packet type decoded by hardware * * skb->protocol must be set before this function is called */ -static void idpf_rx_singleq_csum(struct idpf_queue *rxq, struct sk_buff *skb, - struct idpf_rx_csum_decoded *csum_bits, - u16 ptype) +static void idpf_rx_singleq_csum(struct idpf_rx_queue *rxq, + struct sk_buff *skb, + struct idpf_rx_csum_decoded csum_bits, + struct libeth_rx_pt decoded) { - struct idpf_rx_ptype_decoded decoded; bool ipv4, ipv6; /* check if Rx checksum is enabled */ - if (unlikely(!(rxq->vport->netdev->features & NETIF_F_RXCSUM))) + if (!libeth_rx_pt_has_checksum(rxq->netdev, decoded)) return; /* check if HW has decoded the packet and checksum */ - if (unlikely(!(csum_bits->l3l4p))) + if (unlikely(!csum_bits.l3l4p)) return; - decoded = rxq->vport->rx_ptype_lkup[ptype]; - if (unlikely(!(decoded.known && decoded.outer_ip))) - return; - - ipv4 = IDPF_RX_PTYPE_TO_IPV(&decoded, IDPF_RX_PTYPE_OUTER_IPV4); - ipv6 = IDPF_RX_PTYPE_TO_IPV(&decoded, IDPF_RX_PTYPE_OUTER_IPV6); + ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4; + ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6; /* Check if there were any checksum errors */ - if (unlikely(ipv4 && (csum_bits->ipe || csum_bits->eipe))) + if (unlikely(ipv4 && (csum_bits.ipe || csum_bits.eipe))) goto checksum_fail; /* Device could not do any checksum offload for certain extension * headers as indicated by setting IPV6EXADD bit */ - if (unlikely(ipv6 && csum_bits->ipv6exadd)) + if (unlikely(ipv6 && csum_bits.ipv6exadd)) return; /* check for L4 errors and handle packets that were not able to be * checksummed due to arrival speed */ - if (unlikely(csum_bits->l4e)) + if (unlikely(csum_bits.l4e)) goto checksum_fail; - if (unlikely(csum_bits->nat && csum_bits->eudpe)) + if (unlikely(csum_bits.nat && csum_bits.eudpe)) goto checksum_fail; /* Handle packets that were not able to be checksummed due to arrival * speed, in this case the stack can compute the csum. */ - if (unlikely(csum_bits->pprs)) + if (unlikely(csum_bits.pprs)) return; /* If there is an outer header present that might contain a checksum * we need to bump the checksum level by 1 to reflect the fact that * we are indicating we validated the inner checksum. */ - if (decoded.tunnel_type >= IDPF_RX_PTYPE_TUNNEL_IP_GRENAT) + if (decoded.tunnel_type >= LIBETH_RX_PT_TUNNEL_IP_GRENAT) skb->csum_level = 1; - /* Only report checksum unnecessary for ICMP, TCP, UDP, or SCTP */ - switch (decoded.inner_prot) { - case IDPF_RX_PTYPE_INNER_PROT_ICMP: - case IDPF_RX_PTYPE_INNER_PROT_TCP: - case IDPF_RX_PTYPE_INNER_PROT_UDP: - case IDPF_RX_PTYPE_INNER_PROT_SCTP: - skb->ip_summed = CHECKSUM_UNNECESSARY; - return; - default: - return; - } + skb->ip_summed = CHECKSUM_UNNECESSARY; + return; checksum_fail: u64_stats_update_begin(&rxq->stats_sync); - u64_stats_inc(&rxq->q_stats.rx.hw_csum_err); + u64_stats_inc(&rxq->q_stats.hw_csum_err); u64_stats_update_end(&rxq->stats_sync); } /** * idpf_rx_singleq_base_csum - Indicate in skb if hw indicated a good cksum - * @rx_q: Rx completion queue - * @skb: skb currently being received and modified * @rx_desc: the receive descriptor - * @ptype: Rx packet type * * This function only operates on the VIRTCHNL2_RXDID_1_32B_BASE_M base 32byte * descriptor writeback format. + * + * Return: parsed checksum status. **/ -static void idpf_rx_singleq_base_csum(struct idpf_queue *rx_q, - struct sk_buff *skb, - union virtchnl2_rx_desc *rx_desc, - u16 ptype) +static struct idpf_rx_csum_decoded +idpf_rx_singleq_base_csum(const union virtchnl2_rx_desc *rx_desc) { - struct idpf_rx_csum_decoded csum_bits; + struct idpf_rx_csum_decoded csum_bits = { }; u32 rx_error, rx_status; u64 qword; @@ -745,28 +697,23 @@ static void idpf_rx_singleq_base_csum(struct idpf_queue *rx_q, rx_status); csum_bits.ipv6exadd = FIELD_GET(VIRTCHNL2_RX_BASE_DESC_STATUS_IPV6EXADD_M, rx_status); - csum_bits.nat = 0; - csum_bits.eudpe = 0; - idpf_rx_singleq_csum(rx_q, skb, &csum_bits, ptype); + return csum_bits; } /** * idpf_rx_singleq_flex_csum - Indicate in skb if hw indicated a good cksum - * @rx_q: Rx completion queue - * @skb: skb currently being received and modified * @rx_desc: the receive descriptor - * @ptype: Rx packet type * * This function only operates on the VIRTCHNL2_RXDID_2_FLEX_SQ_NIC flexible * descriptor writeback format. + * + * Return: parsed checksum status. **/ -static void idpf_rx_singleq_flex_csum(struct idpf_queue *rx_q, - struct sk_buff *skb, - union virtchnl2_rx_desc *rx_desc, - u16 ptype) +static struct idpf_rx_csum_decoded +idpf_rx_singleq_flex_csum(const union virtchnl2_rx_desc *rx_desc) { - struct idpf_rx_csum_decoded csum_bits; + struct idpf_rx_csum_decoded csum_bits = { }; u16 rx_status0, rx_status1; rx_status0 = le16_to_cpu(rx_desc->flex_nic_wb.status_error0); @@ -786,9 +733,8 @@ static void idpf_rx_singleq_flex_csum(struct idpf_queue *rx_q, rx_status0); csum_bits.nat = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS1_NAT_M, rx_status1); - csum_bits.pprs = 0; - idpf_rx_singleq_csum(rx_q, skb, &csum_bits, ptype); + return csum_bits; } /** @@ -801,14 +747,14 @@ static void idpf_rx_singleq_flex_csum(struct idpf_queue *rx_q, * This function only operates on the VIRTCHNL2_RXDID_1_32B_BASE_M base 32byte * descriptor writeback format. **/ -static void idpf_rx_singleq_base_hash(struct idpf_queue *rx_q, +static void idpf_rx_singleq_base_hash(struct idpf_rx_queue *rx_q, struct sk_buff *skb, - union virtchnl2_rx_desc *rx_desc, - struct idpf_rx_ptype_decoded *decoded) + const union virtchnl2_rx_desc *rx_desc, + struct libeth_rx_pt decoded) { u64 mask, qw1; - if (unlikely(!(rx_q->vport->netdev->features & NETIF_F_RXHASH))) + if (!libeth_rx_pt_has_hash(rx_q->netdev, decoded)) return; mask = VIRTCHNL2_RX_BASE_DESC_FLTSTAT_RSS_HASH_M; @@ -817,7 +763,7 @@ static void idpf_rx_singleq_base_hash(struct idpf_queue *rx_q, if (FIELD_GET(mask, qw1) == mask) { u32 hash = le32_to_cpu(rx_desc->base_wb.qword0.hi_dword.rss); - skb_set_hash(skb, hash, idpf_ptype_to_htype(decoded)); + libeth_rx_pt_set_hash(skb, hash, decoded); } } @@ -831,18 +777,20 @@ static void idpf_rx_singleq_base_hash(struct idpf_queue *rx_q, * This function only operates on the VIRTCHNL2_RXDID_2_FLEX_SQ_NIC flexible * descriptor writeback format. **/ -static void idpf_rx_singleq_flex_hash(struct idpf_queue *rx_q, +static void idpf_rx_singleq_flex_hash(struct idpf_rx_queue *rx_q, struct sk_buff *skb, - union virtchnl2_rx_desc *rx_desc, - struct idpf_rx_ptype_decoded *decoded) + const union virtchnl2_rx_desc *rx_desc, + struct libeth_rx_pt decoded) { - if (unlikely(!(rx_q->vport->netdev->features & NETIF_F_RXHASH))) + if (!libeth_rx_pt_has_hash(rx_q->netdev, decoded)) return; if (FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_STATUS0_RSS_VALID_M, - le16_to_cpu(rx_desc->flex_nic_wb.status_error0))) - skb_set_hash(skb, le32_to_cpu(rx_desc->flex_nic_wb.rss_hash), - idpf_ptype_to_htype(decoded)); + le16_to_cpu(rx_desc->flex_nic_wb.status_error0))) { + u32 hash = le32_to_cpu(rx_desc->flex_nic_wb.rss_hash); + + libeth_rx_pt_set_hash(skb, hash, decoded); + } } /** @@ -857,25 +805,45 @@ static void idpf_rx_singleq_flex_hash(struct idpf_queue *rx_q, * order to populate the hash, checksum, VLAN, protocol, and * other fields within the skb. */ -static void idpf_rx_singleq_process_skb_fields(struct idpf_queue *rx_q, - struct sk_buff *skb, - union virtchnl2_rx_desc *rx_desc, - u16 ptype) +static void +idpf_rx_singleq_process_skb_fields(struct idpf_rx_queue *rx_q, + struct sk_buff *skb, + const union virtchnl2_rx_desc *rx_desc, + u16 ptype) { - struct idpf_rx_ptype_decoded decoded = - rx_q->vport->rx_ptype_lkup[ptype]; + struct libeth_rx_pt decoded = rx_q->rx_ptype_lkup[ptype]; + struct idpf_rx_csum_decoded csum_bits; /* modifies the skb - consumes the enet header */ - skb->protocol = eth_type_trans(skb, rx_q->vport->netdev); + skb->protocol = eth_type_trans(skb, rx_q->netdev); /* Check if we're using base mode descriptor IDs */ if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M) { - idpf_rx_singleq_base_hash(rx_q, skb, rx_desc, &decoded); - idpf_rx_singleq_base_csum(rx_q, skb, rx_desc, ptype); + idpf_rx_singleq_base_hash(rx_q, skb, rx_desc, decoded); + csum_bits = idpf_rx_singleq_base_csum(rx_desc); } else { - idpf_rx_singleq_flex_hash(rx_q, skb, rx_desc, &decoded); - idpf_rx_singleq_flex_csum(rx_q, skb, rx_desc, ptype); + idpf_rx_singleq_flex_hash(rx_q, skb, rx_desc, decoded); + csum_bits = idpf_rx_singleq_flex_csum(rx_desc); } + + idpf_rx_singleq_csum(rx_q, skb, csum_bits, decoded); + skb_record_rx_queue(skb, rx_q->idx); +} + +/** + * idpf_rx_buf_hw_update - Store the new tail and head values + * @rxq: queue to bump + * @val: new head index + */ +static void idpf_rx_buf_hw_update(struct idpf_rx_queue *rxq, u32 val) +{ + rxq->next_to_use = val; + + if (unlikely(!rxq->tail)) + return; + + /* writel has an implicit memory barrier */ + writel(val, rxq->tail); } /** @@ -885,24 +853,28 @@ static void idpf_rx_singleq_process_skb_fields(struct idpf_queue *rx_q, * * Returns false if all allocations were successful, true if any fail */ -bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q, +bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rx_q, u16 cleaned_count) { struct virtchnl2_singleq_rx_buf_desc *desc; + const struct libeth_fq_fp fq = { + .pp = rx_q->pp, + .fqes = rx_q->rx_buf, + .truesize = rx_q->truesize, + .count = rx_q->desc_count, + }; u16 nta = rx_q->next_to_alloc; - struct idpf_rx_buf *buf; if (!cleaned_count) return false; - desc = IDPF_SINGLEQ_RX_BUF_DESC(rx_q, nta); - buf = &rx_q->rx_buf.buf[nta]; + desc = &rx_q->single_buf[nta]; do { dma_addr_t addr; - addr = idpf_alloc_page(rx_q->pp, buf, rx_q->rx_buf_size); - if (unlikely(addr == DMA_MAPPING_ERROR)) + addr = libeth_rx_alloc(&fq, nta); + if (addr == DMA_MAPPING_ERROR) break; /* Refresh the desc even if buffer_addrs didn't change @@ -912,11 +884,9 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q, desc->hdr_addr = 0; desc++; - buf++; nta++; if (unlikely(nta == rx_q->desc_count)) { - desc = IDPF_SINGLEQ_RX_BUF_DESC(rx_q, 0); - buf = rx_q->rx_buf.buf; + desc = &rx_q->single_buf[0]; nta = 0; } @@ -933,7 +903,6 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q, /** * idpf_rx_singleq_extract_base_fields - Extract fields from the Rx descriptor - * @rx_q: Rx descriptor queue * @rx_desc: the descriptor to process * @fields: storage for extracted values * @@ -943,9 +912,9 @@ bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rx_q, * This function only operates on the VIRTCHNL2_RXDID_1_32B_BASE_M base 32byte * descriptor writeback format. */ -static void idpf_rx_singleq_extract_base_fields(struct idpf_queue *rx_q, - union virtchnl2_rx_desc *rx_desc, - struct idpf_rx_extracted *fields) +static void +idpf_rx_singleq_extract_base_fields(const union virtchnl2_rx_desc *rx_desc, + struct idpf_rx_extracted *fields) { u64 qword; @@ -957,7 +926,6 @@ static void idpf_rx_singleq_extract_base_fields(struct idpf_queue *rx_q, /** * idpf_rx_singleq_extract_flex_fields - Extract fields from the Rx descriptor - * @rx_q: Rx descriptor queue * @rx_desc: the descriptor to process * @fields: storage for extracted values * @@ -967,9 +935,9 @@ static void idpf_rx_singleq_extract_base_fields(struct idpf_queue *rx_q, * This function only operates on the VIRTCHNL2_RXDID_2_FLEX_SQ_NIC flexible * descriptor writeback format. */ -static void idpf_rx_singleq_extract_flex_fields(struct idpf_queue *rx_q, - union virtchnl2_rx_desc *rx_desc, - struct idpf_rx_extracted *fields) +static void +idpf_rx_singleq_extract_flex_fields(const union virtchnl2_rx_desc *rx_desc, + struct idpf_rx_extracted *fields) { fields->size = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_PKT_LEN_M, le16_to_cpu(rx_desc->flex_nic_wb.pkt_len)); @@ -984,14 +952,15 @@ static void idpf_rx_singleq_extract_flex_fields(struct idpf_queue *rx_q, * @fields: storage for extracted values * */ -static void idpf_rx_singleq_extract_fields(struct idpf_queue *rx_q, - union virtchnl2_rx_desc *rx_desc, - struct idpf_rx_extracted *fields) +static void +idpf_rx_singleq_extract_fields(const struct idpf_rx_queue *rx_q, + const union virtchnl2_rx_desc *rx_desc, + struct idpf_rx_extracted *fields) { if (rx_q->rxdids == VIRTCHNL2_RXDID_1_32B_BASE_M) - idpf_rx_singleq_extract_base_fields(rx_q, rx_desc, fields); + idpf_rx_singleq_extract_base_fields(rx_desc, fields); else - idpf_rx_singleq_extract_flex_fields(rx_q, rx_desc, fields); + idpf_rx_singleq_extract_flex_fields(rx_desc, fields); } /** @@ -1001,7 +970,7 @@ static void idpf_rx_singleq_extract_fields(struct idpf_queue *rx_q, * * Returns true if there's any budget left (e.g. the clean is finished) */ -static int idpf_rx_singleq_clean(struct idpf_queue *rx_q, int budget) +static int idpf_rx_singleq_clean(struct idpf_rx_queue *rx_q, int budget) { unsigned int total_rx_bytes = 0, total_rx_pkts = 0; struct sk_buff *skb = rx_q->skb; @@ -1016,7 +985,7 @@ static int idpf_rx_singleq_clean(struct idpf_queue *rx_q, int budget) struct idpf_rx_buf *rx_buf; /* get the Rx desc from Rx queue based on 'next_to_clean' */ - rx_desc = IDPF_RX_DESC(rx_q, ntc); + rx_desc = &rx_q->rx[ntc]; /* status_error_ptype_len will always be zero for unused * descriptors because it's cleared in cleanup, and overlaps @@ -1036,29 +1005,27 @@ static int idpf_rx_singleq_clean(struct idpf_queue *rx_q, int budget) idpf_rx_singleq_extract_fields(rx_q, rx_desc, &fields); - rx_buf = &rx_q->rx_buf.buf[ntc]; - if (!fields.size) { - idpf_rx_put_page(rx_buf); + rx_buf = &rx_q->rx_buf[ntc]; + if (!libeth_rx_sync_for_cpu(rx_buf, fields.size)) goto skip_data; - } - idpf_rx_sync_for_cpu(rx_buf, fields.size); if (skb) idpf_rx_add_frag(rx_buf, skb, fields.size); else - skb = idpf_rx_construct_skb(rx_q, rx_buf, fields.size); + skb = idpf_rx_build_skb(rx_buf, fields.size); /* exit if we failed to retrieve a buffer */ if (!skb) break; skip_data: - IDPF_SINGLEQ_BUMP_RING_IDX(rx_q, ntc); + rx_buf->page = NULL; + IDPF_SINGLEQ_BUMP_RING_IDX(rx_q, ntc); cleaned_count++; /* skip if it is non EOP desc */ - if (idpf_rx_singleq_is_non_eop(rx_q, rx_desc, skb, ntc)) + if (idpf_rx_singleq_is_non_eop(rx_desc) || unlikely(!skb)) continue; #define IDPF_RXD_ERR_S FIELD_PREP(VIRTCHNL2_RX_BASE_DESC_QW1_ERROR_M, \ @@ -1084,7 +1051,7 @@ skip_data: rx_desc, fields.rx_ptype); /* send completed skb up the stack */ - napi_gro_receive(&rx_q->q_vector->napi, skb); + napi_gro_receive(rx_q->pp->p.napi, skb); skb = NULL; /* update budget accounting */ @@ -1095,12 +1062,13 @@ skip_data: rx_q->next_to_clean = ntc; + page_pool_nid_changed(rx_q->pp, numa_mem_id()); if (cleaned_count) failure = idpf_rx_singleq_buf_hw_alloc_all(rx_q, cleaned_count); u64_stats_update_begin(&rx_q->stats_sync); - u64_stats_add(&rx_q->q_stats.rx.packets, total_rx_pkts); - u64_stats_add(&rx_q->q_stats.rx.bytes, total_rx_bytes); + u64_stats_add(&rx_q->q_stats.packets, total_rx_pkts); + u64_stats_add(&rx_q->q_stats.bytes, total_rx_bytes); u64_stats_update_end(&rx_q->stats_sync); /* guarantee a trip back through this routine if there was a failure */ @@ -1127,7 +1095,7 @@ static bool idpf_rx_singleq_clean_all(struct idpf_q_vector *q_vec, int budget, */ budget_per_q = num_rxq ? max(budget / num_rxq, 1) : 0; for (i = 0; i < num_rxq; i++) { - struct idpf_queue *rxq = q_vec->rx[i]; + struct idpf_rx_queue *rxq = q_vec->rx[i]; int pkts_cleaned_per_q; pkts_cleaned_per_q = idpf_rx_singleq_clean(rxq, budget_per_q); diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c index b023704bbbda..af2879f03b8d 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c @@ -1,9 +1,14 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) 2023 Intel Corporation */ +#include + #include "idpf.h" #include "idpf_virtchnl.h" +static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs, + unsigned int count); + /** * idpf_buf_lifo_push - push a buffer pointer onto stack * @stack: pointer to stack struct @@ -60,7 +65,8 @@ void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue) * @tx_q: the queue that owns the buffer * @tx_buf: the buffer to free */ -static void idpf_tx_buf_rel(struct idpf_queue *tx_q, struct idpf_tx_buf *tx_buf) +static void idpf_tx_buf_rel(struct idpf_tx_queue *tx_q, + struct idpf_tx_buf *tx_buf) { if (tx_buf->skb) { if (dma_unmap_len(tx_buf, len)) @@ -86,8 +92,9 @@ static void idpf_tx_buf_rel(struct idpf_queue *tx_q, struct idpf_tx_buf *tx_buf) * idpf_tx_buf_rel_all - Free any empty Tx buffers * @txq: queue to be cleaned */ -static void idpf_tx_buf_rel_all(struct idpf_queue *txq) +static void idpf_tx_buf_rel_all(struct idpf_tx_queue *txq) { + struct idpf_buf_lifo *buf_stack; u16 i; /* Buffers already cleared, nothing to do */ @@ -101,38 +108,57 @@ static void idpf_tx_buf_rel_all(struct idpf_queue *txq) kfree(txq->tx_buf); txq->tx_buf = NULL; - if (!txq->buf_stack.bufs) + if (!idpf_queue_has(FLOW_SCH_EN, txq)) return; - for (i = 0; i < txq->buf_stack.size; i++) - kfree(txq->buf_stack.bufs[i]); + buf_stack = &txq->stash->buf_stack; + if (!buf_stack->bufs) + return; - kfree(txq->buf_stack.bufs); - txq->buf_stack.bufs = NULL; + for (i = 0; i < buf_stack->size; i++) + kfree(buf_stack->bufs[i]); + + kfree(buf_stack->bufs); + buf_stack->bufs = NULL; } /** * idpf_tx_desc_rel - Free Tx resources per queue * @txq: Tx descriptor ring for a specific queue - * @bufq: buffer q or completion q * * Free all transmit software resources */ -static void idpf_tx_desc_rel(struct idpf_queue *txq, bool bufq) +static void idpf_tx_desc_rel(struct idpf_tx_queue *txq) { - if (bufq) - idpf_tx_buf_rel_all(txq); + idpf_tx_buf_rel_all(txq); if (!txq->desc_ring) return; dmam_free_coherent(txq->dev, txq->size, txq->desc_ring, txq->dma); txq->desc_ring = NULL; - txq->next_to_alloc = 0; txq->next_to_use = 0; txq->next_to_clean = 0; } +/** + * idpf_compl_desc_rel - Free completion resources per queue + * @complq: completion queue + * + * Free all completion software resources. + */ +static void idpf_compl_desc_rel(struct idpf_compl_queue *complq) +{ + if (!complq->comp) + return; + + dma_free_coherent(complq->netdev->dev.parent, complq->size, + complq->comp, complq->dma); + complq->comp = NULL; + complq->next_to_use = 0; + complq->next_to_clean = 0; +} + /** * idpf_tx_desc_rel_all - Free Tx Resources for All Queues * @vport: virtual port structure @@ -150,10 +176,10 @@ static void idpf_tx_desc_rel_all(struct idpf_vport *vport) struct idpf_txq_group *txq_grp = &vport->txq_grps[i]; for (j = 0; j < txq_grp->num_txq; j++) - idpf_tx_desc_rel(txq_grp->txqs[j], true); + idpf_tx_desc_rel(txq_grp->txqs[j]); if (idpf_is_queue_model_split(vport->txq_model)) - idpf_tx_desc_rel(txq_grp->complq, false); + idpf_compl_desc_rel(txq_grp->complq); } } @@ -163,8 +189,9 @@ static void idpf_tx_desc_rel_all(struct idpf_vport *vport) * * Returns 0 on success, negative on failure */ -static int idpf_tx_buf_alloc_all(struct idpf_queue *tx_q) +static int idpf_tx_buf_alloc_all(struct idpf_tx_queue *tx_q) { + struct idpf_buf_lifo *buf_stack; int buf_size; int i; @@ -180,22 +207,26 @@ static int idpf_tx_buf_alloc_all(struct idpf_queue *tx_q) for (i = 0; i < tx_q->desc_count; i++) tx_q->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG; + if (!idpf_queue_has(FLOW_SCH_EN, tx_q)) + return 0; + + buf_stack = &tx_q->stash->buf_stack; + /* Initialize tx buf stack for out-of-order completions if * flow scheduling offload is enabled */ - tx_q->buf_stack.bufs = - kcalloc(tx_q->desc_count, sizeof(struct idpf_tx_stash *), - GFP_KERNEL); - if (!tx_q->buf_stack.bufs) + buf_stack->bufs = kcalloc(tx_q->desc_count, sizeof(*buf_stack->bufs), + GFP_KERNEL); + if (!buf_stack->bufs) return -ENOMEM; - tx_q->buf_stack.size = tx_q->desc_count; - tx_q->buf_stack.top = tx_q->desc_count; + buf_stack->size = tx_q->desc_count; + buf_stack->top = tx_q->desc_count; for (i = 0; i < tx_q->desc_count; i++) { - tx_q->buf_stack.bufs[i] = kzalloc(sizeof(*tx_q->buf_stack.bufs[i]), - GFP_KERNEL); - if (!tx_q->buf_stack.bufs[i]) + buf_stack->bufs[i] = kzalloc(sizeof(*buf_stack->bufs[i]), + GFP_KERNEL); + if (!buf_stack->bufs[i]) return -ENOMEM; } @@ -204,28 +235,22 @@ static int idpf_tx_buf_alloc_all(struct idpf_queue *tx_q) /** * idpf_tx_desc_alloc - Allocate the Tx descriptors + * @vport: vport to allocate resources for * @tx_q: the tx ring to set up - * @bufq: buffer or completion queue * * Returns 0 on success, negative on failure */ -static int idpf_tx_desc_alloc(struct idpf_queue *tx_q, bool bufq) +static int idpf_tx_desc_alloc(const struct idpf_vport *vport, + struct idpf_tx_queue *tx_q) { struct device *dev = tx_q->dev; - u32 desc_sz; int err; - if (bufq) { - err = idpf_tx_buf_alloc_all(tx_q); - if (err) - goto err_alloc; + err = idpf_tx_buf_alloc_all(tx_q); + if (err) + goto err_alloc; - desc_sz = sizeof(struct idpf_base_tx_desc); - } else { - desc_sz = sizeof(struct idpf_splitq_tx_compl_desc); - } - - tx_q->size = tx_q->desc_count * desc_sz; + tx_q->size = tx_q->desc_count * sizeof(*tx_q->base_tx); /* Allocate descriptors also round up to nearest 4K */ tx_q->size = ALIGN(tx_q->size, 4096); @@ -238,19 +263,43 @@ static int idpf_tx_desc_alloc(struct idpf_queue *tx_q, bool bufq) goto err_alloc; } - tx_q->next_to_alloc = 0; tx_q->next_to_use = 0; tx_q->next_to_clean = 0; - set_bit(__IDPF_Q_GEN_CHK, tx_q->flags); + idpf_queue_set(GEN_CHK, tx_q); return 0; err_alloc: - idpf_tx_desc_rel(tx_q, bufq); + idpf_tx_desc_rel(tx_q); return err; } +/** + * idpf_compl_desc_alloc - allocate completion descriptors + * @vport: vport to allocate resources for + * @complq: completion queue to set up + * + * Return: 0 on success, -errno on failure. + */ +static int idpf_compl_desc_alloc(const struct idpf_vport *vport, + struct idpf_compl_queue *complq) +{ + complq->size = array_size(complq->desc_count, sizeof(*complq->comp)); + + complq->comp = dma_alloc_coherent(complq->netdev->dev.parent, + complq->size, &complq->dma, + GFP_KERNEL); + if (!complq->comp) + return -ENOMEM; + + complq->next_to_use = 0; + complq->next_to_clean = 0; + idpf_queue_set(GEN_CHK, complq); + + return 0; +} + /** * idpf_tx_desc_alloc_all - allocate all queues Tx resources * @vport: virtual port private structure @@ -259,7 +308,6 @@ err_alloc: */ static int idpf_tx_desc_alloc_all(struct idpf_vport *vport) { - struct device *dev = &vport->adapter->pdev->dev; int err = 0; int i, j; @@ -268,13 +316,14 @@ static int idpf_tx_desc_alloc_all(struct idpf_vport *vport) */ for (i = 0; i < vport->num_txq_grp; i++) { for (j = 0; j < vport->txq_grps[i].num_txq; j++) { - struct idpf_queue *txq = vport->txq_grps[i].txqs[j]; + struct idpf_tx_queue *txq = vport->txq_grps[i].txqs[j]; u8 gen_bits = 0; u16 bufidx_mask; - err = idpf_tx_desc_alloc(txq, true); + err = idpf_tx_desc_alloc(vport, txq); if (err) { - dev_err(dev, "Allocation for Tx Queue %u failed\n", + pci_err(vport->adapter->pdev, + "Allocation for Tx Queue %u failed\n", i); goto err_out; } @@ -312,9 +361,10 @@ static int idpf_tx_desc_alloc_all(struct idpf_vport *vport) continue; /* Setup completion queues */ - err = idpf_tx_desc_alloc(vport->txq_grps[i].complq, false); + err = idpf_compl_desc_alloc(vport, vport->txq_grps[i].complq); if (err) { - dev_err(dev, "Allocation for Tx Completion Queue %u failed\n", + pci_err(vport->adapter->pdev, + "Allocation for Tx Completion Queue %u failed\n", i); goto err_out; } @@ -329,70 +379,97 @@ err_out: /** * idpf_rx_page_rel - Release an rx buffer page - * @rxq: the queue that owns the buffer * @rx_buf: the buffer to free */ -static void idpf_rx_page_rel(struct idpf_queue *rxq, struct idpf_rx_buf *rx_buf) +static void idpf_rx_page_rel(struct libeth_fqe *rx_buf) { if (unlikely(!rx_buf->page)) return; - page_pool_put_full_page(rxq->pp, rx_buf->page, false); + page_pool_put_full_page(rx_buf->page->pp, rx_buf->page, false); rx_buf->page = NULL; - rx_buf->page_offset = 0; + rx_buf->offset = 0; } /** * idpf_rx_hdr_buf_rel_all - Release header buffer memory - * @rxq: queue to use + * @bufq: queue to use */ -static void idpf_rx_hdr_buf_rel_all(struct idpf_queue *rxq) +static void idpf_rx_hdr_buf_rel_all(struct idpf_buf_queue *bufq) { - struct idpf_adapter *adapter = rxq->vport->adapter; + struct libeth_fq fq = { + .fqes = bufq->hdr_buf, + .pp = bufq->hdr_pp, + }; - dma_free_coherent(&adapter->pdev->dev, - rxq->desc_count * IDPF_HDR_BUF_SIZE, - rxq->rx_buf.hdr_buf_va, - rxq->rx_buf.hdr_buf_pa); - rxq->rx_buf.hdr_buf_va = NULL; + for (u32 i = 0; i < bufq->desc_count; i++) + idpf_rx_page_rel(&bufq->hdr_buf[i]); + + libeth_rx_fq_destroy(&fq); + bufq->hdr_buf = NULL; + bufq->hdr_pp = NULL; } /** - * idpf_rx_buf_rel_all - Free all Rx buffer resources for a queue - * @rxq: queue to be cleaned + * idpf_rx_buf_rel_bufq - Free all Rx buffer resources for a buffer queue + * @bufq: queue to be cleaned */ -static void idpf_rx_buf_rel_all(struct idpf_queue *rxq) +static void idpf_rx_buf_rel_bufq(struct idpf_buf_queue *bufq) { - u16 i; + struct libeth_fq fq = { + .fqes = bufq->buf, + .pp = bufq->pp, + }; /* queue already cleared, nothing to do */ - if (!rxq->rx_buf.buf) + if (!bufq->buf) return; /* Free all the bufs allocated and given to hw on Rx queue */ - for (i = 0; i < rxq->desc_count; i++) - idpf_rx_page_rel(rxq, &rxq->rx_buf.buf[i]); + for (u32 i = 0; i < bufq->desc_count; i++) + idpf_rx_page_rel(&bufq->buf[i]); - if (rxq->rx_hsplit_en) - idpf_rx_hdr_buf_rel_all(rxq); + if (idpf_queue_has(HSPLIT_EN, bufq)) + idpf_rx_hdr_buf_rel_all(bufq); - page_pool_destroy(rxq->pp); + libeth_rx_fq_destroy(&fq); + bufq->buf = NULL; + bufq->pp = NULL; +} + +/** + * idpf_rx_buf_rel_all - Free all Rx buffer resources for a receive queue + * @rxq: queue to be cleaned + */ +static void idpf_rx_buf_rel_all(struct idpf_rx_queue *rxq) +{ + struct libeth_fq fq = { + .fqes = rxq->rx_buf, + .pp = rxq->pp, + }; + + if (!rxq->rx_buf) + return; + + for (u32 i = 0; i < rxq->desc_count; i++) + idpf_rx_page_rel(&rxq->rx_buf[i]); + + libeth_rx_fq_destroy(&fq); + rxq->rx_buf = NULL; rxq->pp = NULL; - - kfree(rxq->rx_buf.buf); - rxq->rx_buf.buf = NULL; } /** * idpf_rx_desc_rel - Free a specific Rx q resources * @rxq: queue to clean the resources from - * @bufq: buffer q or completion q - * @q_model: single or split q model + * @dev: device to free DMA memory + * @model: single or split queue model * * Free a specific rx queue resources */ -static void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model) +static void idpf_rx_desc_rel(struct idpf_rx_queue *rxq, struct device *dev, + u32 model) { if (!rxq) return; @@ -402,7 +479,7 @@ static void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model) rxq->skb = NULL; } - if (bufq || !idpf_is_queue_model_split(q_model)) + if (!idpf_is_queue_model_split(model)) idpf_rx_buf_rel_all(rxq); rxq->next_to_alloc = 0; @@ -411,10 +488,34 @@ static void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model) if (!rxq->desc_ring) return; - dmam_free_coherent(rxq->dev, rxq->size, rxq->desc_ring, rxq->dma); + dmam_free_coherent(dev, rxq->size, rxq->desc_ring, rxq->dma); rxq->desc_ring = NULL; } +/** + * idpf_rx_desc_rel_bufq - free buffer queue resources + * @bufq: buffer queue to clean the resources from + * @dev: device to free DMA memory + */ +static void idpf_rx_desc_rel_bufq(struct idpf_buf_queue *bufq, + struct device *dev) +{ + if (!bufq) + return; + + idpf_rx_buf_rel_bufq(bufq); + + bufq->next_to_alloc = 0; + bufq->next_to_clean = 0; + bufq->next_to_use = 0; + + if (!bufq->split_buf) + return; + + dma_free_coherent(dev, bufq->size, bufq->split_buf, bufq->dma); + bufq->split_buf = NULL; +} + /** * idpf_rx_desc_rel_all - Free Rx Resources for All Queues * @vport: virtual port structure @@ -423,6 +524,7 @@ static void idpf_rx_desc_rel(struct idpf_queue *rxq, bool bufq, s32 q_model) */ static void idpf_rx_desc_rel_all(struct idpf_vport *vport) { + struct device *dev = &vport->adapter->pdev->dev; struct idpf_rxq_group *rx_qgrp; u16 num_rxq; int i, j; @@ -435,15 +537,15 @@ static void idpf_rx_desc_rel_all(struct idpf_vport *vport) if (!idpf_is_queue_model_split(vport->rxq_model)) { for (j = 0; j < rx_qgrp->singleq.num_rxq; j++) - idpf_rx_desc_rel(rx_qgrp->singleq.rxqs[j], - false, vport->rxq_model); + idpf_rx_desc_rel(rx_qgrp->singleq.rxqs[j], dev, + VIRTCHNL2_QUEUE_MODEL_SINGLE); continue; } num_rxq = rx_qgrp->splitq.num_rxq_sets; for (j = 0; j < num_rxq; j++) idpf_rx_desc_rel(&rx_qgrp->splitq.rxq_sets[j]->rxq, - false, vport->rxq_model); + dev, VIRTCHNL2_QUEUE_MODEL_SPLIT); if (!rx_qgrp->splitq.bufq_sets) continue; @@ -452,45 +554,50 @@ static void idpf_rx_desc_rel_all(struct idpf_vport *vport) struct idpf_bufq_set *bufq_set = &rx_qgrp->splitq.bufq_sets[j]; - idpf_rx_desc_rel(&bufq_set->bufq, true, - vport->rxq_model); + idpf_rx_desc_rel_bufq(&bufq_set->bufq, dev); } } } /** * idpf_rx_buf_hw_update - Store the new tail and head values - * @rxq: queue to bump + * @bufq: queue to bump * @val: new head index */ -void idpf_rx_buf_hw_update(struct idpf_queue *rxq, u32 val) +static void idpf_rx_buf_hw_update(struct idpf_buf_queue *bufq, u32 val) { - rxq->next_to_use = val; + bufq->next_to_use = val; - if (unlikely(!rxq->tail)) + if (unlikely(!bufq->tail)) return; /* writel has an implicit memory barrier */ - writel(val, rxq->tail); + writel(val, bufq->tail); } /** * idpf_rx_hdr_buf_alloc_all - Allocate memory for header buffers - * @rxq: ring to use + * @bufq: ring to use * * Returns 0 on success, negative on failure. */ -static int idpf_rx_hdr_buf_alloc_all(struct idpf_queue *rxq) +static int idpf_rx_hdr_buf_alloc_all(struct idpf_buf_queue *bufq) { - struct idpf_adapter *adapter = rxq->vport->adapter; + struct libeth_fq fq = { + .count = bufq->desc_count, + .type = LIBETH_FQE_HDR, + .nid = idpf_q_vector_to_mem(bufq->q_vector), + }; + int ret; - rxq->rx_buf.hdr_buf_va = - dma_alloc_coherent(&adapter->pdev->dev, - IDPF_HDR_BUF_SIZE * rxq->desc_count, - &rxq->rx_buf.hdr_buf_pa, - GFP_KERNEL); - if (!rxq->rx_buf.hdr_buf_va) - return -ENOMEM; + ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi); + if (ret) + return ret; + + bufq->hdr_pp = fq.pp; + bufq->hdr_buf = fq.fqes; + bufq->hdr_truesize = fq.truesize; + bufq->rx_hbuf_size = fq.buf_len; return 0; } @@ -502,19 +609,20 @@ static int idpf_rx_hdr_buf_alloc_all(struct idpf_queue *rxq) */ static void idpf_rx_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id) { - u16 nta = refillq->next_to_alloc; + u32 nta = refillq->next_to_use; /* store the buffer ID and the SW maintained GEN bit to the refillq */ refillq->ring[nta] = FIELD_PREP(IDPF_RX_BI_BUFID_M, buf_id) | FIELD_PREP(IDPF_RX_BI_GEN_M, - test_bit(__IDPF_Q_GEN_CHK, refillq->flags)); + idpf_queue_has(GEN_CHK, refillq)); if (unlikely(++nta == refillq->desc_count)) { nta = 0; - change_bit(__IDPF_Q_GEN_CHK, refillq->flags); + idpf_queue_change(GEN_CHK, refillq); } - refillq->next_to_alloc = nta; + + refillq->next_to_use = nta; } /** @@ -524,24 +632,35 @@ static void idpf_rx_post_buf_refill(struct idpf_sw_queue *refillq, u16 buf_id) * * Returns false if buffer could not be allocated, true otherwise. */ -static bool idpf_rx_post_buf_desc(struct idpf_queue *bufq, u16 buf_id) +static bool idpf_rx_post_buf_desc(struct idpf_buf_queue *bufq, u16 buf_id) { struct virtchnl2_splitq_rx_buf_desc *splitq_rx_desc = NULL; + struct libeth_fq_fp fq = { + .count = bufq->desc_count, + }; u16 nta = bufq->next_to_alloc; - struct idpf_rx_buf *buf; dma_addr_t addr; - splitq_rx_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, nta); - buf = &bufq->rx_buf.buf[buf_id]; + splitq_rx_desc = &bufq->split_buf[nta]; - if (bufq->rx_hsplit_en) { - splitq_rx_desc->hdr_addr = - cpu_to_le64(bufq->rx_buf.hdr_buf_pa + - (u32)buf_id * IDPF_HDR_BUF_SIZE); + if (idpf_queue_has(HSPLIT_EN, bufq)) { + fq.pp = bufq->hdr_pp; + fq.fqes = bufq->hdr_buf; + fq.truesize = bufq->hdr_truesize; + + addr = libeth_rx_alloc(&fq, buf_id); + if (addr == DMA_MAPPING_ERROR) + return false; + + splitq_rx_desc->hdr_addr = cpu_to_le64(addr); } - addr = idpf_alloc_page(bufq->pp, buf, bufq->rx_buf_size); - if (unlikely(addr == DMA_MAPPING_ERROR)) + fq.pp = bufq->pp; + fq.fqes = bufq->buf; + fq.truesize = bufq->truesize; + + addr = libeth_rx_alloc(&fq, buf_id); + if (addr == DMA_MAPPING_ERROR) return false; splitq_rx_desc->pkt_addr = cpu_to_le64(addr); @@ -562,7 +681,8 @@ static bool idpf_rx_post_buf_desc(struct idpf_queue *bufq, u16 buf_id) * * Returns true if @working_set bufs were posted successfully, false otherwise. */ -static bool idpf_rx_post_init_bufs(struct idpf_queue *bufq, u16 working_set) +static bool idpf_rx_post_init_bufs(struct idpf_buf_queue *bufq, + u16 working_set) { int i; @@ -571,95 +691,114 @@ static bool idpf_rx_post_init_bufs(struct idpf_queue *bufq, u16 working_set) return false; } - idpf_rx_buf_hw_update(bufq, - bufq->next_to_alloc & ~(bufq->rx_buf_stride - 1)); + idpf_rx_buf_hw_update(bufq, ALIGN_DOWN(bufq->next_to_alloc, + IDPF_RX_BUF_STRIDE)); return true; } /** - * idpf_rx_create_page_pool - Create a page pool - * @rxbufq: RX queue to create page pool for + * idpf_rx_buf_alloc_singleq - Allocate memory for all buffer resources + * @rxq: queue for which the buffers are allocated * - * Returns &page_pool on success, casted -errno on failure + * Return: 0 on success, -ENOMEM on failure. */ -static struct page_pool *idpf_rx_create_page_pool(struct idpf_queue *rxbufq) +static int idpf_rx_buf_alloc_singleq(struct idpf_rx_queue *rxq) { - struct page_pool_params pp = { - .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, - .order = 0, - .pool_size = rxbufq->desc_count, - .nid = NUMA_NO_NODE, - .dev = rxbufq->vport->netdev->dev.parent, - .max_len = PAGE_SIZE, - .dma_dir = DMA_FROM_DEVICE, - .offset = 0, - }; + if (idpf_rx_singleq_buf_hw_alloc_all(rxq, rxq->desc_count - 1)) + goto err; - return page_pool_create(&pp); + return 0; + +err: + idpf_rx_buf_rel_all(rxq); + + return -ENOMEM; +} + +/** + * idpf_rx_bufs_init_singleq - Initialize page pool and allocate Rx bufs + * @rxq: buffer queue to create page pool for + * + * Return: 0 on success, -errno on failure. + */ +static int idpf_rx_bufs_init_singleq(struct idpf_rx_queue *rxq) +{ + struct libeth_fq fq = { + .count = rxq->desc_count, + .type = LIBETH_FQE_MTU, + .nid = idpf_q_vector_to_mem(rxq->q_vector), + }; + int ret; + + ret = libeth_rx_fq_create(&fq, &rxq->q_vector->napi); + if (ret) + return ret; + + rxq->pp = fq.pp; + rxq->rx_buf = fq.fqes; + rxq->truesize = fq.truesize; + rxq->rx_buf_size = fq.buf_len; + + return idpf_rx_buf_alloc_singleq(rxq); } /** * idpf_rx_buf_alloc_all - Allocate memory for all buffer resources - * @rxbufq: queue for which the buffers are allocated; equivalent to - * rxq when operating in singleq mode + * @rxbufq: queue for which the buffers are allocated * * Returns 0 on success, negative on failure */ -static int idpf_rx_buf_alloc_all(struct idpf_queue *rxbufq) +static int idpf_rx_buf_alloc_all(struct idpf_buf_queue *rxbufq) { int err = 0; - /* Allocate book keeping buffers */ - rxbufq->rx_buf.buf = kcalloc(rxbufq->desc_count, - sizeof(struct idpf_rx_buf), GFP_KERNEL); - if (!rxbufq->rx_buf.buf) { - err = -ENOMEM; - goto rx_buf_alloc_all_out; - } - - if (rxbufq->rx_hsplit_en) { + if (idpf_queue_has(HSPLIT_EN, rxbufq)) { err = idpf_rx_hdr_buf_alloc_all(rxbufq); if (err) goto rx_buf_alloc_all_out; } /* Allocate buffers to be given to HW. */ - if (idpf_is_queue_model_split(rxbufq->vport->rxq_model)) { - int working_set = IDPF_RX_BUFQ_WORKING_SET(rxbufq); - - if (!idpf_rx_post_init_bufs(rxbufq, working_set)) - err = -ENOMEM; - } else { - if (idpf_rx_singleq_buf_hw_alloc_all(rxbufq, - rxbufq->desc_count - 1)) - err = -ENOMEM; - } + if (!idpf_rx_post_init_bufs(rxbufq, IDPF_RX_BUFQ_WORKING_SET(rxbufq))) + err = -ENOMEM; rx_buf_alloc_all_out: if (err) - idpf_rx_buf_rel_all(rxbufq); + idpf_rx_buf_rel_bufq(rxbufq); return err; } /** * idpf_rx_bufs_init - Initialize page pool, allocate rx bufs, and post to HW - * @rxbufq: RX queue to create page pool for + * @bufq: buffer queue to create page pool for + * @type: type of Rx buffers to allocate * * Returns 0 on success, negative on failure */ -static int idpf_rx_bufs_init(struct idpf_queue *rxbufq) +static int idpf_rx_bufs_init(struct idpf_buf_queue *bufq, + enum libeth_fqe_type type) { - struct page_pool *pool; + struct libeth_fq fq = { + .truesize = bufq->truesize, + .count = bufq->desc_count, + .type = type, + .hsplit = idpf_queue_has(HSPLIT_EN, bufq), + .nid = idpf_q_vector_to_mem(bufq->q_vector), + }; + int ret; - pool = idpf_rx_create_page_pool(rxbufq); - if (IS_ERR(pool)) - return PTR_ERR(pool); + ret = libeth_rx_fq_create(&fq, &bufq->q_vector->napi); + if (ret) + return ret; - rxbufq->pp = pool; + bufq->pp = fq.pp; + bufq->buf = fq.fqes; + bufq->truesize = fq.truesize; + bufq->rx_buf_size = fq.buf_len; - return idpf_rx_buf_alloc_all(rxbufq); + return idpf_rx_buf_alloc_all(bufq); } /** @@ -670,20 +809,22 @@ static int idpf_rx_bufs_init(struct idpf_queue *rxbufq) */ int idpf_rx_bufs_init_all(struct idpf_vport *vport) { - struct idpf_rxq_group *rx_qgrp; - struct idpf_queue *q; + bool split = idpf_is_queue_model_split(vport->rxq_model); int i, j, err; for (i = 0; i < vport->num_rxq_grp; i++) { - rx_qgrp = &vport->rxq_grps[i]; + struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; + u32 truesize = 0; /* Allocate bufs for the rxq itself in singleq */ - if (!idpf_is_queue_model_split(vport->rxq_model)) { + if (!split) { int num_rxq = rx_qgrp->singleq.num_rxq; for (j = 0; j < num_rxq; j++) { + struct idpf_rx_queue *q; + q = rx_qgrp->singleq.rxqs[j]; - err = idpf_rx_bufs_init(q); + err = idpf_rx_bufs_init_singleq(q); if (err) return err; } @@ -693,10 +834,19 @@ int idpf_rx_bufs_init_all(struct idpf_vport *vport) /* Otherwise, allocate bufs for the buffer queues */ for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { + enum libeth_fqe_type type; + struct idpf_buf_queue *q; + q = &rx_qgrp->splitq.bufq_sets[j].bufq; - err = idpf_rx_bufs_init(q); + q->truesize = truesize; + + type = truesize ? LIBETH_FQE_SHORT : LIBETH_FQE_MTU; + + err = idpf_rx_bufs_init(q, type); if (err) return err; + + truesize = q->truesize >> 1; } } @@ -705,22 +855,17 @@ int idpf_rx_bufs_init_all(struct idpf_vport *vport) /** * idpf_rx_desc_alloc - Allocate queue Rx resources + * @vport: vport to allocate resources for * @rxq: Rx queue for which the resources are setup - * @bufq: buffer or completion queue - * @q_model: single or split queue model * * Returns 0 on success, negative on failure */ -static int idpf_rx_desc_alloc(struct idpf_queue *rxq, bool bufq, s32 q_model) +static int idpf_rx_desc_alloc(const struct idpf_vport *vport, + struct idpf_rx_queue *rxq) { - struct device *dev = rxq->dev; + struct device *dev = &vport->adapter->pdev->dev; - if (bufq) - rxq->size = rxq->desc_count * - sizeof(struct virtchnl2_splitq_rx_buf_desc); - else - rxq->size = rxq->desc_count * - sizeof(union virtchnl2_rx_desc); + rxq->size = rxq->desc_count * sizeof(union virtchnl2_rx_desc); /* Allocate descriptors and also round up to nearest 4K */ rxq->size = ALIGN(rxq->size, 4096); @@ -735,7 +880,35 @@ static int idpf_rx_desc_alloc(struct idpf_queue *rxq, bool bufq, s32 q_model) rxq->next_to_alloc = 0; rxq->next_to_clean = 0; rxq->next_to_use = 0; - set_bit(__IDPF_Q_GEN_CHK, rxq->flags); + idpf_queue_set(GEN_CHK, rxq); + + return 0; +} + +/** + * idpf_bufq_desc_alloc - Allocate buffer queue descriptor ring + * @vport: vport to allocate resources for + * @bufq: buffer queue for which the resources are set up + * + * Return: 0 on success, -ENOMEM on failure. + */ +static int idpf_bufq_desc_alloc(const struct idpf_vport *vport, + struct idpf_buf_queue *bufq) +{ + struct device *dev = &vport->adapter->pdev->dev; + + bufq->size = array_size(bufq->desc_count, sizeof(*bufq->split_buf)); + + bufq->split_buf = dma_alloc_coherent(dev, bufq->size, &bufq->dma, + GFP_KERNEL); + if (!bufq->split_buf) + return -ENOMEM; + + bufq->next_to_alloc = 0; + bufq->next_to_clean = 0; + bufq->next_to_use = 0; + + idpf_queue_set(GEN_CHK, bufq); return 0; } @@ -748,9 +921,7 @@ static int idpf_rx_desc_alloc(struct idpf_queue *rxq, bool bufq, s32 q_model) */ static int idpf_rx_desc_alloc_all(struct idpf_vport *vport) { - struct device *dev = &vport->adapter->pdev->dev; struct idpf_rxq_group *rx_qgrp; - struct idpf_queue *q; int i, j, err; u16 num_rxq; @@ -762,13 +933,17 @@ static int idpf_rx_desc_alloc_all(struct idpf_vport *vport) num_rxq = rx_qgrp->singleq.num_rxq; for (j = 0; j < num_rxq; j++) { + struct idpf_rx_queue *q; + if (idpf_is_queue_model_split(vport->rxq_model)) q = &rx_qgrp->splitq.rxq_sets[j]->rxq; else q = rx_qgrp->singleq.rxqs[j]; - err = idpf_rx_desc_alloc(q, false, vport->rxq_model); + + err = idpf_rx_desc_alloc(vport, q); if (err) { - dev_err(dev, "Memory allocation for Rx Queue %u failed\n", + pci_err(vport->adapter->pdev, + "Memory allocation for Rx Queue %u failed\n", i); goto err_out; } @@ -778,10 +953,14 @@ static int idpf_rx_desc_alloc_all(struct idpf_vport *vport) continue; for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { + struct idpf_buf_queue *q; + q = &rx_qgrp->splitq.bufq_sets[j].bufq; - err = idpf_rx_desc_alloc(q, true, vport->rxq_model); + + err = idpf_bufq_desc_alloc(vport, q); if (err) { - dev_err(dev, "Memory allocation for Rx Buffer Queue %u failed\n", + pci_err(vport->adapter->pdev, + "Memory allocation for Rx Buffer Queue %u failed\n", i); goto err_out; } @@ -802,11 +981,16 @@ err_out: */ static void idpf_txq_group_rel(struct idpf_vport *vport) { + bool split, flow_sch_en; int i, j; if (!vport->txq_grps) return; + split = idpf_is_queue_model_split(vport->txq_model); + flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS, + VIRTCHNL2_CAP_SPLITQ_QSCHED); + for (i = 0; i < vport->num_txq_grp; i++) { struct idpf_txq_group *txq_grp = &vport->txq_grps[i]; @@ -814,8 +998,15 @@ static void idpf_txq_group_rel(struct idpf_vport *vport) kfree(txq_grp->txqs[j]); txq_grp->txqs[j] = NULL; } + + if (!split) + continue; + kfree(txq_grp->complq); txq_grp->complq = NULL; + + if (flow_sch_en) + kfree(txq_grp->stashes); } kfree(vport->txq_grps); vport->txq_grps = NULL; @@ -919,7 +1110,7 @@ static int idpf_vport_init_fast_path_txqs(struct idpf_vport *vport) { int i, j, k = 0; - vport->txqs = kcalloc(vport->num_txq, sizeof(struct idpf_queue *), + vport->txqs = kcalloc(vport->num_txq, sizeof(*vport->txqs), GFP_KERNEL); if (!vport->txqs) @@ -967,17 +1158,11 @@ void idpf_vport_init_num_qs(struct idpf_vport *vport, /* Adjust number of buffer queues per Rx queue group. */ if (!idpf_is_queue_model_split(vport->rxq_model)) { vport->num_bufqs_per_qgrp = 0; - vport->bufq_size[0] = IDPF_RX_BUF_2048; return; } vport->num_bufqs_per_qgrp = IDPF_MAX_BUFQS_PER_RXQ_GRP; - /* Bufq[0] default buffer size is 4K - * Bufq[1] default buffer size is 2K - */ - vport->bufq_size[0] = IDPF_RX_BUF_4096; - vport->bufq_size[1] = IDPF_RX_BUF_2048; } /** @@ -1137,9 +1322,10 @@ static void idpf_vport_calc_numq_per_grp(struct idpf_vport *vport, * @q: rx queue for which descids are set * */ -static void idpf_rxq_set_descids(struct idpf_vport *vport, struct idpf_queue *q) +static void idpf_rxq_set_descids(const struct idpf_vport *vport, + struct idpf_rx_queue *q) { - if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) { + if (idpf_is_queue_model_split(vport->rxq_model)) { q->rxdids = VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M; } else { if (vport->base_rxd) @@ -1158,20 +1344,22 @@ static void idpf_rxq_set_descids(struct idpf_vport *vport, struct idpf_queue *q) */ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq) { - bool flow_sch_en; - int err, i; + bool split, flow_sch_en; + int i; vport->txq_grps = kcalloc(vport->num_txq_grp, sizeof(*vport->txq_grps), GFP_KERNEL); if (!vport->txq_grps) return -ENOMEM; + split = idpf_is_queue_model_split(vport->txq_model); flow_sch_en = !idpf_is_cap_ena(vport->adapter, IDPF_OTHER_CAPS, VIRTCHNL2_CAP_SPLITQ_QSCHED); for (i = 0; i < vport->num_txq_grp; i++) { struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; struct idpf_adapter *adapter = vport->adapter; + struct idpf_txq_stash *stashes; int j; tx_qgrp->vport = vport; @@ -1180,45 +1368,62 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq) for (j = 0; j < tx_qgrp->num_txq; j++) { tx_qgrp->txqs[j] = kzalloc(sizeof(*tx_qgrp->txqs[j]), GFP_KERNEL); - if (!tx_qgrp->txqs[j]) { - err = -ENOMEM; + if (!tx_qgrp->txqs[j]) goto err_alloc; - } + } + + if (split && flow_sch_en) { + stashes = kcalloc(num_txq, sizeof(*stashes), + GFP_KERNEL); + if (!stashes) + goto err_alloc; + + tx_qgrp->stashes = stashes; } for (j = 0; j < tx_qgrp->num_txq; j++) { - struct idpf_queue *q = tx_qgrp->txqs[j]; + struct idpf_tx_queue *q = tx_qgrp->txqs[j]; q->dev = &adapter->pdev->dev; q->desc_count = vport->txq_desc_count; q->tx_max_bufs = idpf_get_max_tx_bufs(adapter); q->tx_min_pkt_len = idpf_get_min_tx_pkt_len(adapter); - q->vport = vport; + q->netdev = vport->netdev; q->txq_grp = tx_qgrp; - hash_init(q->sched_buf_hash); - if (flow_sch_en) - set_bit(__IDPF_Q_FLOW_SCH_EN, q->flags); + if (!split) { + q->clean_budget = vport->compln_clean_budget; + idpf_queue_assign(CRC_EN, q, + vport->crc_enable); + } + + if (!flow_sch_en) + continue; + + if (split) { + q->stash = &stashes[j]; + hash_init(q->stash->sched_buf_hash); + } + + idpf_queue_set(FLOW_SCH_EN, q); } - if (!idpf_is_queue_model_split(vport->txq_model)) + if (!split) continue; tx_qgrp->complq = kcalloc(IDPF_COMPLQ_PER_GROUP, sizeof(*tx_qgrp->complq), GFP_KERNEL); - if (!tx_qgrp->complq) { - err = -ENOMEM; + if (!tx_qgrp->complq) goto err_alloc; - } - tx_qgrp->complq->dev = &adapter->pdev->dev; tx_qgrp->complq->desc_count = vport->complq_desc_count; - tx_qgrp->complq->vport = vport; tx_qgrp->complq->txq_grp = tx_qgrp; + tx_qgrp->complq->netdev = vport->netdev; + tx_qgrp->complq->clean_budget = vport->compln_clean_budget; if (flow_sch_en) - __set_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags); + idpf_queue_set(FLOW_SCH_EN, tx_qgrp->complq); } return 0; @@ -1226,7 +1431,7 @@ static int idpf_txq_group_alloc(struct idpf_vport *vport, u16 num_txq) err_alloc: idpf_txq_group_rel(vport); - return err; + return -ENOMEM; } /** @@ -1238,8 +1443,6 @@ err_alloc: */ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq) { - struct idpf_adapter *adapter = vport->adapter; - struct idpf_queue *q; int i, k, err = 0; bool hs; @@ -1292,21 +1495,13 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq) struct idpf_bufq_set *bufq_set = &rx_qgrp->splitq.bufq_sets[j]; int swq_size = sizeof(struct idpf_sw_queue); + struct idpf_buf_queue *q; q = &rx_qgrp->splitq.bufq_sets[j].bufq; - q->dev = &adapter->pdev->dev; q->desc_count = vport->bufq_desc_count[j]; - q->vport = vport; - q->rxq_grp = rx_qgrp; - q->idx = j; - q->rx_buf_size = vport->bufq_size[j]; q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK; - q->rx_buf_stride = IDPF_RX_BUF_STRIDE; - if (hs) { - q->rx_hsplit_en = true; - q->rx_hbuf_size = IDPF_HDR_BUF_SIZE; - } + idpf_queue_assign(HSPLIT_EN, q, hs); bufq_set->num_refillqs = num_rxq; bufq_set->refillqs = kcalloc(num_rxq, swq_size, @@ -1319,13 +1514,12 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq) struct idpf_sw_queue *refillq = &bufq_set->refillqs[k]; - refillq->dev = &vport->adapter->pdev->dev; refillq->desc_count = vport->bufq_desc_count[j]; - set_bit(__IDPF_Q_GEN_CHK, refillq->flags); - set_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags); + idpf_queue_set(GEN_CHK, refillq); + idpf_queue_set(RFL_GEN_CHK, refillq); refillq->ring = kcalloc(refillq->desc_count, - sizeof(u16), + sizeof(*refillq->ring), GFP_KERNEL); if (!refillq->ring) { err = -ENOMEM; @@ -1336,36 +1530,30 @@ static int idpf_rxq_group_alloc(struct idpf_vport *vport, u16 num_rxq) skip_splitq_rx_init: for (j = 0; j < num_rxq; j++) { + struct idpf_rx_queue *q; + if (!idpf_is_queue_model_split(vport->rxq_model)) { q = rx_qgrp->singleq.rxqs[j]; goto setup_rxq; } q = &rx_qgrp->splitq.rxq_sets[j]->rxq; - rx_qgrp->splitq.rxq_sets[j]->refillq0 = + rx_qgrp->splitq.rxq_sets[j]->refillq[0] = &rx_qgrp->splitq.bufq_sets[0].refillqs[j]; if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) - rx_qgrp->splitq.rxq_sets[j]->refillq1 = + rx_qgrp->splitq.rxq_sets[j]->refillq[1] = &rx_qgrp->splitq.bufq_sets[1].refillqs[j]; - if (hs) { - q->rx_hsplit_en = true; - q->rx_hbuf_size = IDPF_HDR_BUF_SIZE; - } + idpf_queue_assign(HSPLIT_EN, q, hs); setup_rxq: - q->dev = &adapter->pdev->dev; q->desc_count = vport->rxq_desc_count; - q->vport = vport; - q->rxq_grp = rx_qgrp; + q->rx_ptype_lkup = vport->rx_ptype_lkup; + q->netdev = vport->netdev; + q->bufq_sets = rx_qgrp->splitq.bufq_sets; q->idx = (i * num_rxq) + j; - /* In splitq mode, RXQ buffer size should be - * set to that of the first buffer queue - * associated with this RXQ - */ - q->rx_buf_size = vport->bufq_size[0]; q->rx_buffer_low_watermark = IDPF_LOW_WATERMARK; q->rx_max_pkt_size = vport->netdev->mtu + - IDPF_PACKET_HDR_PAD; + LIBETH_RX_LL_LEN; idpf_rxq_set_descids(vport, q); } } @@ -1445,12 +1633,13 @@ err_out: * idpf_tx_handle_sw_marker - Handle queue marker packet * @tx_q: tx queue to handle software marker */ -static void idpf_tx_handle_sw_marker(struct idpf_queue *tx_q) +static void idpf_tx_handle_sw_marker(struct idpf_tx_queue *tx_q) { - struct idpf_vport *vport = tx_q->vport; + struct idpf_netdev_priv *priv = netdev_priv(tx_q->netdev); + struct idpf_vport *vport = priv->vport; int i; - clear_bit(__IDPF_Q_SW_MARKER, tx_q->flags); + idpf_queue_clear(SW_MARKER, tx_q); /* Hardware must write marker packets to all queues associated with * completion queues. So check if all queues received marker packets */ @@ -1458,7 +1647,7 @@ static void idpf_tx_handle_sw_marker(struct idpf_queue *tx_q) /* If we're still waiting on any other TXQ marker completions, * just return now since we cannot wake up the marker_wq yet. */ - if (test_bit(__IDPF_Q_SW_MARKER, vport->txqs[i]->flags)) + if (idpf_queue_has(SW_MARKER, vport->txqs[i])) return; /* Drain complete */ @@ -1474,7 +1663,7 @@ static void idpf_tx_handle_sw_marker(struct idpf_queue *tx_q) * @cleaned: pointer to stats struct to track cleaned packets/bytes * @napi_budget: Used to determine if we are in netpoll */ -static void idpf_tx_splitq_clean_hdr(struct idpf_queue *tx_q, +static void idpf_tx_splitq_clean_hdr(struct idpf_tx_queue *tx_q, struct idpf_tx_buf *tx_buf, struct idpf_cleaned_stats *cleaned, int napi_budget) @@ -1505,7 +1694,8 @@ static void idpf_tx_splitq_clean_hdr(struct idpf_queue *tx_q, * @cleaned: pointer to stats struct to track cleaned packets/bytes * @budget: Used to determine if we are in netpoll */ -static void idpf_tx_clean_stashed_bufs(struct idpf_queue *txq, u16 compl_tag, +static void idpf_tx_clean_stashed_bufs(struct idpf_tx_queue *txq, + u16 compl_tag, struct idpf_cleaned_stats *cleaned, int budget) { @@ -1513,7 +1703,7 @@ static void idpf_tx_clean_stashed_bufs(struct idpf_queue *txq, u16 compl_tag, struct hlist_node *tmp_buf; /* Buffer completion */ - hash_for_each_possible_safe(txq->sched_buf_hash, stash, tmp_buf, + hash_for_each_possible_safe(txq->stash->sched_buf_hash, stash, tmp_buf, hlist, compl_tag) { if (unlikely(stash->buf.compl_tag != (int)compl_tag)) continue; @@ -1530,7 +1720,7 @@ static void idpf_tx_clean_stashed_bufs(struct idpf_queue *txq, u16 compl_tag, } /* Push shadow buf back onto stack */ - idpf_buf_lifo_push(&txq->buf_stack, stash); + idpf_buf_lifo_push(&txq->stash->buf_stack, stash); hash_del(&stash->hlist); } @@ -1542,7 +1732,7 @@ static void idpf_tx_clean_stashed_bufs(struct idpf_queue *txq, u16 compl_tag, * @txq: Tx queue to clean * @tx_buf: buffer to store */ -static int idpf_stash_flow_sch_buffers(struct idpf_queue *txq, +static int idpf_stash_flow_sch_buffers(struct idpf_tx_queue *txq, struct idpf_tx_buf *tx_buf) { struct idpf_tx_stash *stash; @@ -1551,10 +1741,10 @@ static int idpf_stash_flow_sch_buffers(struct idpf_queue *txq, !dma_unmap_len(tx_buf, len))) return 0; - stash = idpf_buf_lifo_pop(&txq->buf_stack); + stash = idpf_buf_lifo_pop(&txq->stash->buf_stack); if (unlikely(!stash)) { net_err_ratelimited("%s: No out-of-order TX buffers left!\n", - txq->vport->netdev->name); + netdev_name(txq->netdev)); return -ENOMEM; } @@ -1568,7 +1758,8 @@ static int idpf_stash_flow_sch_buffers(struct idpf_queue *txq, stash->buf.compl_tag = tx_buf->compl_tag; /* Add buffer to buf_hash table to be freed later */ - hash_add(txq->sched_buf_hash, &stash->hlist, stash->buf.compl_tag); + hash_add(txq->stash->sched_buf_hash, &stash->hlist, + stash->buf.compl_tag); memset(tx_buf, 0, sizeof(struct idpf_tx_buf)); @@ -1584,7 +1775,7 @@ do { \ if (unlikely(!(ntc))) { \ ntc -= (txq)->desc_count; \ buf = (txq)->tx_buf; \ - desc = IDPF_FLEX_TX_DESC(txq, 0); \ + desc = &(txq)->flex_tx[0]; \ } else { \ (buf)++; \ (desc)++; \ @@ -1607,7 +1798,7 @@ do { \ * and the buffers will be cleaned separately. The stats are not updated from * this function when using flow-based scheduling. */ -static void idpf_tx_splitq_clean(struct idpf_queue *tx_q, u16 end, +static void idpf_tx_splitq_clean(struct idpf_tx_queue *tx_q, u16 end, int napi_budget, struct idpf_cleaned_stats *cleaned, bool descs_only) @@ -1617,8 +1808,8 @@ static void idpf_tx_splitq_clean(struct idpf_queue *tx_q, u16 end, s16 ntc = tx_q->next_to_clean; struct idpf_tx_buf *tx_buf; - tx_desc = IDPF_FLEX_TX_DESC(tx_q, ntc); - next_pending_desc = IDPF_FLEX_TX_DESC(tx_q, end); + tx_desc = &tx_q->flex_tx[ntc]; + next_pending_desc = &tx_q->flex_tx[end]; tx_buf = &tx_q->tx_buf[ntc]; ntc -= tx_q->desc_count; @@ -1703,7 +1894,7 @@ do { \ * stashed. Returns the byte/segment count for the cleaned packet associated * this completion tag. */ -static bool idpf_tx_clean_buf_ring(struct idpf_queue *txq, u16 compl_tag, +static bool idpf_tx_clean_buf_ring(struct idpf_tx_queue *txq, u16 compl_tag, struct idpf_cleaned_stats *cleaned, int budget) { @@ -1772,14 +1963,14 @@ static bool idpf_tx_clean_buf_ring(struct idpf_queue *txq, u16 compl_tag, * * Returns bytes/packets cleaned */ -static void idpf_tx_handle_rs_completion(struct idpf_queue *txq, +static void idpf_tx_handle_rs_completion(struct idpf_tx_queue *txq, struct idpf_splitq_tx_compl_desc *desc, struct idpf_cleaned_stats *cleaned, int budget) { u16 compl_tag; - if (!test_bit(__IDPF_Q_FLOW_SCH_EN, txq->flags)) { + if (!idpf_queue_has(FLOW_SCH_EN, txq)) { u16 head = le16_to_cpu(desc->q_head_compl_tag.q_head); return idpf_tx_splitq_clean(txq, head, budget, cleaned, false); @@ -1802,24 +1993,23 @@ static void idpf_tx_handle_rs_completion(struct idpf_queue *txq, * * Returns true if there's any budget left (e.g. the clean is finished) */ -static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget, +static bool idpf_tx_clean_complq(struct idpf_compl_queue *complq, int budget, int *cleaned) { struct idpf_splitq_tx_compl_desc *tx_desc; - struct idpf_vport *vport = complq->vport; s16 ntc = complq->next_to_clean; struct idpf_netdev_priv *np; unsigned int complq_budget; bool complq_ok = true; int i; - complq_budget = vport->compln_clean_budget; - tx_desc = IDPF_SPLITQ_TX_COMPLQ_DESC(complq, ntc); + complq_budget = complq->clean_budget; + tx_desc = &complq->comp[ntc]; ntc -= complq->desc_count; do { struct idpf_cleaned_stats cleaned_stats = { }; - struct idpf_queue *tx_q; + struct idpf_tx_queue *tx_q; int rel_tx_qid; u16 hw_head; u8 ctype; /* completion type */ @@ -1828,7 +2018,7 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget, /* if the descriptor isn't done, no work yet to do */ gen = le16_get_bits(tx_desc->qid_comptype_gen, IDPF_TXD_COMPLQ_GEN_M); - if (test_bit(__IDPF_Q_GEN_CHK, complq->flags) != gen) + if (idpf_queue_has(GEN_CHK, complq) != gen) break; /* Find necessary info of TX queue to clean buffers */ @@ -1836,8 +2026,7 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget, IDPF_TXD_COMPLQ_QID_M); if (rel_tx_qid >= complq->txq_grp->num_txq || !complq->txq_grp->txqs[rel_tx_qid]) { - dev_err(&complq->vport->adapter->pdev->dev, - "TxQ not found\n"); + netdev_err(complq->netdev, "TxQ not found\n"); goto fetch_next_desc; } tx_q = complq->txq_grp->txqs[rel_tx_qid]; @@ -1860,15 +2049,14 @@ static bool idpf_tx_clean_complq(struct idpf_queue *complq, int budget, idpf_tx_handle_sw_marker(tx_q); break; default: - dev_err(&tx_q->vport->adapter->pdev->dev, - "Unknown TX completion type: %d\n", - ctype); + netdev_err(tx_q->netdev, + "Unknown TX completion type: %d\n", ctype); goto fetch_next_desc; } u64_stats_update_begin(&tx_q->stats_sync); - u64_stats_add(&tx_q->q_stats.tx.packets, cleaned_stats.packets); - u64_stats_add(&tx_q->q_stats.tx.bytes, cleaned_stats.bytes); + u64_stats_add(&tx_q->q_stats.packets, cleaned_stats.packets); + u64_stats_add(&tx_q->q_stats.bytes, cleaned_stats.bytes); tx_q->cleaned_pkts += cleaned_stats.packets; tx_q->cleaned_bytes += cleaned_stats.bytes; complq->num_completions++; @@ -1879,8 +2067,8 @@ fetch_next_desc: ntc++; if (unlikely(!ntc)) { ntc -= complq->desc_count; - tx_desc = IDPF_SPLITQ_TX_COMPLQ_DESC(complq, 0); - change_bit(__IDPF_Q_GEN_CHK, complq->flags); + tx_desc = &complq->comp[0]; + idpf_queue_change(GEN_CHK, complq); } prefetch(tx_desc); @@ -1896,9 +2084,9 @@ fetch_next_desc: IDPF_TX_COMPLQ_OVERFLOW_THRESH(complq))) complq_ok = false; - np = netdev_priv(complq->vport->netdev); + np = netdev_priv(complq->netdev); for (i = 0; i < complq->txq_grp->num_txq; ++i) { - struct idpf_queue *tx_q = complq->txq_grp->txqs[i]; + struct idpf_tx_queue *tx_q = complq->txq_grp->txqs[i]; struct netdev_queue *nq; bool dont_wake; @@ -1909,11 +2097,11 @@ fetch_next_desc: *cleaned += tx_q->cleaned_pkts; /* Update BQL */ - nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx); + nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); dont_wake = !complq_ok || IDPF_TX_BUF_RSV_LOW(tx_q) || np->state != __IDPF_VPORT_UP || - !netif_carrier_ok(tx_q->vport->netdev); + !netif_carrier_ok(tx_q->netdev); /* Check if the TXQ needs to and can be restarted */ __netif_txq_completed_wake(nq, tx_q->cleaned_pkts, tx_q->cleaned_bytes, IDPF_DESC_UNUSED(tx_q), IDPF_TX_WAKE_THRESH, @@ -1976,7 +2164,7 @@ void idpf_tx_splitq_build_flow_desc(union idpf_tx_flex_desc *desc, * * Returns 0 if stop is not needed */ -int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size) +int idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q, unsigned int size) { struct netdev_queue *nq; @@ -1984,10 +2172,10 @@ int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size) return 0; u64_stats_update_begin(&tx_q->stats_sync); - u64_stats_inc(&tx_q->q_stats.tx.q_busy); + u64_stats_inc(&tx_q->q_stats.q_busy); u64_stats_update_end(&tx_q->stats_sync); - nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx); + nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); return netif_txq_maybe_stop(nq, IDPF_DESC_UNUSED(tx_q), size, size); } @@ -1999,7 +2187,7 @@ int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size) * * Returns 0 if stop is not needed */ -static int idpf_tx_maybe_stop_splitq(struct idpf_queue *tx_q, +static int idpf_tx_maybe_stop_splitq(struct idpf_tx_queue *tx_q, unsigned int descs_needed) { if (idpf_tx_maybe_stop_common(tx_q, descs_needed)) @@ -2023,9 +2211,9 @@ static int idpf_tx_maybe_stop_splitq(struct idpf_queue *tx_q, splitq_stop: u64_stats_update_begin(&tx_q->stats_sync); - u64_stats_inc(&tx_q->q_stats.tx.q_busy); + u64_stats_inc(&tx_q->q_stats.q_busy); u64_stats_update_end(&tx_q->stats_sync); - netif_stop_subqueue(tx_q->vport->netdev, tx_q->idx); + netif_stop_subqueue(tx_q->netdev, tx_q->idx); return -EBUSY; } @@ -2040,12 +2228,12 @@ splitq_stop: * to do a register write to update our queue status. We know this can only * mean tail here as HW should be owning head for TX. */ -void idpf_tx_buf_hw_update(struct idpf_queue *tx_q, u32 val, +void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val, bool xmit_more) { struct netdev_queue *nq; - nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx); + nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); tx_q->next_to_use = val; idpf_tx_maybe_stop_common(tx_q, IDPF_TX_DESC_NEEDED); @@ -2069,7 +2257,7 @@ void idpf_tx_buf_hw_update(struct idpf_queue *tx_q, u32 val, * * Returns number of data descriptors needed for this skb. */ -unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq, +unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq, struct sk_buff *skb) { const struct skb_shared_info *shinfo; @@ -2102,7 +2290,7 @@ unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq, count = idpf_size_to_txd_count(skb->len); u64_stats_update_begin(&txq->stats_sync); - u64_stats_inc(&txq->q_stats.tx.linearize); + u64_stats_inc(&txq->q_stats.linearize); u64_stats_update_end(&txq->stats_sync); } @@ -2116,11 +2304,11 @@ unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq, * @first: original first buffer info buffer for packet * @idx: starting point on ring to unwind */ -void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb, +void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb, struct idpf_tx_buf *first, u16 idx) { u64_stats_update_begin(&txq->stats_sync); - u64_stats_inc(&txq->q_stats.tx.dma_map_errs); + u64_stats_inc(&txq->q_stats.dma_map_errs); u64_stats_update_end(&txq->stats_sync); /* clear dma mappings for failed tx_buf map */ @@ -2143,7 +2331,7 @@ void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb, * used one additional descriptor for a context * descriptor. Reset that here. */ - tx_desc = IDPF_FLEX_TX_DESC(txq, idx); + tx_desc = &txq->flex_tx[idx]; memset(tx_desc, 0, sizeof(struct idpf_flex_tx_ctx_desc)); if (idx == 0) idx = txq->desc_count; @@ -2159,7 +2347,7 @@ void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb, * @txq: the tx ring to wrap * @ntu: ring index to bump */ -static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_queue *txq, u16 ntu) +static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_tx_queue *txq, u16 ntu) { ntu++; @@ -2181,7 +2369,7 @@ static unsigned int idpf_tx_splitq_bump_ntu(struct idpf_queue *txq, u16 ntu) * and gets a physical address for each memory location and programs * it and the length into the transmit flex descriptor. */ -static void idpf_tx_splitq_map(struct idpf_queue *tx_q, +static void idpf_tx_splitq_map(struct idpf_tx_queue *tx_q, struct idpf_tx_splitq_params *params, struct idpf_tx_buf *first) { @@ -2202,7 +2390,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q, data_len = skb->data_len; size = skb_headlen(skb); - tx_desc = IDPF_FLEX_TX_DESC(tx_q, i); + tx_desc = &tx_q->flex_tx[i]; dma = dma_map_single(tx_q->dev, skb->data, size, DMA_TO_DEVICE); @@ -2275,7 +2463,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q, i++; if (i == tx_q->desc_count) { - tx_desc = IDPF_FLEX_TX_DESC(tx_q, 0); + tx_desc = &tx_q->flex_tx[0]; i = 0; tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q); @@ -2320,7 +2508,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q, i++; if (i == tx_q->desc_count) { - tx_desc = IDPF_FLEX_TX_DESC(tx_q, 0); + tx_desc = &tx_q->flex_tx[0]; i = 0; tx_q->compl_tag_cur_gen = IDPF_TX_ADJ_COMPL_TAG_GEN(tx_q); } @@ -2348,7 +2536,7 @@ static void idpf_tx_splitq_map(struct idpf_queue *tx_q, tx_q->txq_grp->num_completions_pending++; /* record bytecount for BQL */ - nq = netdev_get_tx_queue(tx_q->vport->netdev, tx_q->idx); + nq = netdev_get_tx_queue(tx_q->netdev, tx_q->idx); netdev_tx_sent_queue(nq, first->bytecount); idpf_tx_buf_hw_update(tx_q, i, netdev_xmit_more()); @@ -2525,8 +2713,8 @@ static bool __idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs) * E.g.: a packet with 7 fragments can require 9 DMA transactions; 1 for TSO * header, 1 for segment payload, and then 7 for the fragments. */ -bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs, - unsigned int count) +static bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs, + unsigned int count) { if (likely(count < max_bufs)) return false; @@ -2544,7 +2732,7 @@ bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs, * ring entry to reflect that this index is a context descriptor */ static struct idpf_flex_tx_ctx_desc * -idpf_tx_splitq_get_ctx_desc(struct idpf_queue *txq) +idpf_tx_splitq_get_ctx_desc(struct idpf_tx_queue *txq) { struct idpf_flex_tx_ctx_desc *desc; int i = txq->next_to_use; @@ -2553,7 +2741,7 @@ idpf_tx_splitq_get_ctx_desc(struct idpf_queue *txq) txq->tx_buf[i].compl_tag = IDPF_SPLITQ_TX_INVAL_COMPL_TAG; /* grab the next descriptor */ - desc = IDPF_FLEX_TX_CTX_DESC(txq, i); + desc = &txq->flex_ctx[i]; txq->next_to_use = idpf_tx_splitq_bump_ntu(txq, i); return desc; @@ -2564,10 +2752,10 @@ idpf_tx_splitq_get_ctx_desc(struct idpf_queue *txq) * @tx_q: queue to send buffer on * @skb: pointer to skb */ -netdev_tx_t idpf_tx_drop_skb(struct idpf_queue *tx_q, struct sk_buff *skb) +netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb) { u64_stats_update_begin(&tx_q->stats_sync); - u64_stats_inc(&tx_q->q_stats.tx.skb_drops); + u64_stats_inc(&tx_q->q_stats.skb_drops); u64_stats_update_end(&tx_q->stats_sync); idpf_tx_buf_hw_update(tx_q, tx_q->next_to_use, false); @@ -2585,7 +2773,7 @@ netdev_tx_t idpf_tx_drop_skb(struct idpf_queue *tx_q, struct sk_buff *skb) * Returns NETDEV_TX_OK if sent, else an error code */ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb, - struct idpf_queue *tx_q) + struct idpf_tx_queue *tx_q) { struct idpf_tx_splitq_params tx_params = { }; struct idpf_tx_buf *first; @@ -2625,7 +2813,7 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb, ctx_desc->tso.qw0.hdr_len = tx_params.offload.tso_hdr_len; u64_stats_update_begin(&tx_q->stats_sync); - u64_stats_inc(&tx_q->q_stats.tx.lso_pkts); + u64_stats_inc(&tx_q->q_stats.lso_pkts); u64_stats_update_end(&tx_q->stats_sync); } @@ -2642,7 +2830,7 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb, first->bytecount = max_t(unsigned int, skb->len, ETH_ZLEN); } - if (test_bit(__IDPF_Q_FLOW_SCH_EN, tx_q->flags)) { + if (idpf_queue_has(FLOW_SCH_EN, tx_q)) { tx_params.dtype = IDPF_TX_DESC_DTYPE_FLEX_FLOW_SCHE; tx_params.eop_cmd = IDPF_TXD_FLEX_FLOW_CMD_EOP; /* Set the RE bit to catch any packets that may have not been @@ -2672,17 +2860,16 @@ static netdev_tx_t idpf_tx_splitq_frame(struct sk_buff *skb, } /** - * idpf_tx_splitq_start - Selects the right Tx queue to send buffer + * idpf_tx_start - Selects the right Tx queue to send buffer * @skb: send buffer * @netdev: network interface device structure * * Returns NETDEV_TX_OK if sent, else an error code */ -netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb, - struct net_device *netdev) +netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev) { struct idpf_vport *vport = idpf_netdev_to_vport(netdev); - struct idpf_queue *tx_q; + struct idpf_tx_queue *tx_q; if (unlikely(skb_get_queue_mapping(skb) >= vport->num_txq)) { dev_kfree_skb_any(skb); @@ -2701,31 +2888,10 @@ netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb, return NETDEV_TX_OK; } - return idpf_tx_splitq_frame(skb, tx_q); -} - -/** - * idpf_ptype_to_htype - get a hash type - * @decoded: Decoded Rx packet type related fields - * - * Returns appropriate hash type (such as PKT_HASH_TYPE_L2/L3/L4) to be used by - * skb_set_hash based on PTYPE as parsed by HW Rx pipeline and is part of - * Rx desc. - */ -enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *decoded) -{ - if (!decoded->known) - return PKT_HASH_TYPE_NONE; - if (decoded->payload_layer == IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 && - decoded->inner_prot) - return PKT_HASH_TYPE_L4; - if (decoded->payload_layer == IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 && - decoded->outer_ip) - return PKT_HASH_TYPE_L3; - if (decoded->outer_ip == IDPF_RX_PTYPE_OUTER_L2) - return PKT_HASH_TYPE_L2; - - return PKT_HASH_TYPE_NONE; + if (idpf_is_queue_model_split(vport->txq_model)) + return idpf_tx_splitq_frame(skb, tx_q); + else + return idpf_tx_singleq_frame(skb, tx_q); } /** @@ -2735,20 +2901,21 @@ enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *deco * @rx_desc: Receive descriptor * @decoded: Decoded Rx packet type related fields */ -static void idpf_rx_hash(struct idpf_queue *rxq, struct sk_buff *skb, - struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc, - struct idpf_rx_ptype_decoded *decoded) +static void +idpf_rx_hash(const struct idpf_rx_queue *rxq, struct sk_buff *skb, + const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc, + struct libeth_rx_pt decoded) { u32 hash; - if (unlikely(!idpf_is_feature_ena(rxq->vport, NETIF_F_RXHASH))) + if (!libeth_rx_pt_has_hash(rxq->netdev, decoded)) return; hash = le16_to_cpu(rx_desc->hash1) | (rx_desc->ff2_mirrid_hash2.hash2 << 16) | (rx_desc->hash3 << 24); - skb_set_hash(skb, hash, idpf_ptype_to_htype(decoded)); + libeth_rx_pt_set_hash(skb, hash, decoded); } /** @@ -2760,92 +2927,83 @@ static void idpf_rx_hash(struct idpf_queue *rxq, struct sk_buff *skb, * * skb->protocol must be set before this function is called */ -static void idpf_rx_csum(struct idpf_queue *rxq, struct sk_buff *skb, - struct idpf_rx_csum_decoded *csum_bits, - struct idpf_rx_ptype_decoded *decoded) +static void idpf_rx_csum(struct idpf_rx_queue *rxq, struct sk_buff *skb, + struct idpf_rx_csum_decoded csum_bits, + struct libeth_rx_pt decoded) { bool ipv4, ipv6; /* check if Rx checksum is enabled */ - if (unlikely(!idpf_is_feature_ena(rxq->vport, NETIF_F_RXCSUM))) + if (!libeth_rx_pt_has_checksum(rxq->netdev, decoded)) return; /* check if HW has decoded the packet and checksum */ - if (!(csum_bits->l3l4p)) + if (unlikely(!csum_bits.l3l4p)) return; - ipv4 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV4); - ipv6 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV6); + ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4; + ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6; - if (ipv4 && (csum_bits->ipe || csum_bits->eipe)) + if (unlikely(ipv4 && (csum_bits.ipe || csum_bits.eipe))) goto checksum_fail; - if (ipv6 && csum_bits->ipv6exadd) + if (unlikely(ipv6 && csum_bits.ipv6exadd)) return; /* check for L4 errors and handle packets that were not able to be * checksummed */ - if (csum_bits->l4e) + if (unlikely(csum_bits.l4e)) goto checksum_fail; - /* Only report checksum unnecessary for ICMP, TCP, UDP, or SCTP */ - switch (decoded->inner_prot) { - case IDPF_RX_PTYPE_INNER_PROT_ICMP: - case IDPF_RX_PTYPE_INNER_PROT_TCP: - case IDPF_RX_PTYPE_INNER_PROT_UDP: - if (!csum_bits->raw_csum_inv) { - u16 csum = csum_bits->raw_csum; - - skb->csum = csum_unfold((__force __sum16)~swab16(csum)); - skb->ip_summed = CHECKSUM_COMPLETE; - } else { - skb->ip_summed = CHECKSUM_UNNECESSARY; - } - break; - case IDPF_RX_PTYPE_INNER_PROT_SCTP: + if (csum_bits.raw_csum_inv || + decoded.inner_prot == LIBETH_RX_PT_INNER_SCTP) { skb->ip_summed = CHECKSUM_UNNECESSARY; - break; - default: - break; + return; } + skb->csum = csum_unfold((__force __sum16)~swab16(csum_bits.raw_csum)); + skb->ip_summed = CHECKSUM_COMPLETE; + return; checksum_fail: u64_stats_update_begin(&rxq->stats_sync); - u64_stats_inc(&rxq->q_stats.rx.hw_csum_err); + u64_stats_inc(&rxq->q_stats.hw_csum_err); u64_stats_update_end(&rxq->stats_sync); } /** * idpf_rx_splitq_extract_csum_bits - Extract checksum bits from descriptor * @rx_desc: receive descriptor - * @csum: structure to extract checksum fields * + * Return: parsed checksum status. **/ -static void idpf_rx_splitq_extract_csum_bits(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc, - struct idpf_rx_csum_decoded *csum) +static struct idpf_rx_csum_decoded +idpf_rx_splitq_extract_csum_bits(const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc) { + struct idpf_rx_csum_decoded csum = { }; u8 qword0, qword1; qword0 = rx_desc->status_err0_qw0; qword1 = rx_desc->status_err0_qw1; - csum->ipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_M, + csum.ipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_IPE_M, + qword1); + csum.eipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_M, qword1); - csum->eipe = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_EIPE_M, + csum.l4e = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_M, + qword1); + csum.l3l4p = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_M, qword1); - csum->l4e = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_XSUM_L4E_M, - qword1); - csum->l3l4p = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_L3L4P_M, - qword1); - csum->ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M, - qword0); - csum->raw_csum_inv = + csum.ipv6exadd = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_IPV6EXADD_M, + qword0); + csum.raw_csum_inv = le16_get_bits(rx_desc->ptype_err_fflags0, VIRTCHNL2_RX_FLEX_DESC_ADV_RAW_CSUM_INV_M); - csum->raw_csum = le16_to_cpu(rx_desc->misc.raw_cs); + csum.raw_csum = le16_to_cpu(rx_desc->misc.raw_cs); + + return csum; } /** @@ -2860,23 +3018,24 @@ static void idpf_rx_splitq_extract_csum_bits(struct virtchnl2_rx_flex_desc_adv_n * Populate the skb fields with the total number of RSC segments, RSC payload * length and packet type. */ -static int idpf_rx_rsc(struct idpf_queue *rxq, struct sk_buff *skb, - struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc, - struct idpf_rx_ptype_decoded *decoded) +static int idpf_rx_rsc(struct idpf_rx_queue *rxq, struct sk_buff *skb, + const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc, + struct libeth_rx_pt decoded) { u16 rsc_segments, rsc_seg_len; bool ipv4, ipv6; int len; - if (unlikely(!decoded->outer_ip)) + if (unlikely(libeth_rx_pt_get_ip_ver(decoded) == + LIBETH_RX_PT_OUTER_L2)) return -EINVAL; rsc_seg_len = le16_to_cpu(rx_desc->misc.rscseglen); if (unlikely(!rsc_seg_len)) return -EINVAL; - ipv4 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV4); - ipv6 = IDPF_RX_PTYPE_TO_IPV(decoded, IDPF_RX_PTYPE_OUTER_IPV6); + ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4; + ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6; if (unlikely(!(ipv4 ^ ipv6))) return -EINVAL; @@ -2914,7 +3073,7 @@ static int idpf_rx_rsc(struct idpf_queue *rxq, struct sk_buff *skb, tcp_gro_complete(skb); u64_stats_update_begin(&rxq->stats_sync); - u64_stats_inc(&rxq->q_stats.rx.rsc_pkts); + u64_stats_inc(&rxq->q_stats.rsc_pkts); u64_stats_update_end(&rxq->stats_sync); return 0; @@ -2930,35 +3089,31 @@ static int idpf_rx_rsc(struct idpf_queue *rxq, struct sk_buff *skb, * order to populate the hash, checksum, protocol, and * other fields within the skb. */ -static int idpf_rx_process_skb_fields(struct idpf_queue *rxq, - struct sk_buff *skb, - struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc) +static int +idpf_rx_process_skb_fields(struct idpf_rx_queue *rxq, struct sk_buff *skb, + const struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc) { - struct idpf_rx_csum_decoded csum_bits = { }; - struct idpf_rx_ptype_decoded decoded; + struct idpf_rx_csum_decoded csum_bits; + struct libeth_rx_pt decoded; u16 rx_ptype; rx_ptype = le16_get_bits(rx_desc->ptype_err_fflags0, VIRTCHNL2_RX_FLEX_DESC_ADV_PTYPE_M); - - skb->protocol = eth_type_trans(skb, rxq->vport->netdev); - - decoded = rxq->vport->rx_ptype_lkup[rx_ptype]; - /* If we don't know the ptype we can't do anything else with it. Just - * pass it up the stack as-is. - */ - if (!decoded.known) - return 0; + decoded = rxq->rx_ptype_lkup[rx_ptype]; /* process RSS/hash */ - idpf_rx_hash(rxq, skb, rx_desc, &decoded); + idpf_rx_hash(rxq, skb, rx_desc, decoded); + + skb->protocol = eth_type_trans(skb, rxq->netdev); if (le16_get_bits(rx_desc->hdrlen_flags, VIRTCHNL2_RX_FLEX_DESC_ADV_RSC_M)) - return idpf_rx_rsc(rxq, skb, rx_desc, &decoded); + return idpf_rx_rsc(rxq, skb, rx_desc, decoded); - idpf_rx_splitq_extract_csum_bits(rx_desc, &csum_bits); - idpf_rx_csum(rxq, skb, &csum_bits, &decoded); + csum_bits = idpf_rx_splitq_extract_csum_bits(rx_desc); + idpf_rx_csum(rxq, skb, csum_bits, decoded); + + skb_record_rx_queue(skb, rxq->idx); return 0; } @@ -2976,103 +3131,73 @@ static int idpf_rx_process_skb_fields(struct idpf_queue *rxq, void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb, unsigned int size) { + u32 hr = rx_buf->page->pp->p.offset; + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buf->page, - rx_buf->page_offset, size, rx_buf->truesize); - - rx_buf->page = NULL; + rx_buf->offset + hr, size, rx_buf->truesize); } /** - * idpf_rx_construct_skb - Allocate skb and populate it - * @rxq: Rx descriptor queue - * @rx_buf: Rx buffer to pull data from - * @size: the length of the packet + * idpf_rx_hsplit_wa - handle header buffer overflows and split errors + * @hdr: Rx buffer for the headers + * @buf: Rx buffer for the payload + * @data_len: number of bytes received to the payload buffer * - * This function allocates an skb. It then populates it with the page - * data from the current receive descriptor, taking care to set up the - * skb correctly. + * When a header buffer overflow occurs or the HW was unable do parse the + * packet type to perform header split, the whole frame gets placed to the + * payload buffer. We can't build a valid skb around a payload buffer when + * the header split is active since it doesn't reserve any head- or tailroom. + * In that case, copy either the whole frame when it's short or just the + * Ethernet header to the header buffer to be able to build an skb and adjust + * the data offset in the payload buffer, IOW emulate the header split. + * + * Return: number of bytes copied to the header buffer. */ -struct sk_buff *idpf_rx_construct_skb(struct idpf_queue *rxq, - struct idpf_rx_buf *rx_buf, - unsigned int size) +static u32 idpf_rx_hsplit_wa(const struct libeth_fqe *hdr, + struct libeth_fqe *buf, u32 data_len) { - unsigned int headlen; - struct sk_buff *skb; - void *va; + u32 copy = data_len <= L1_CACHE_BYTES ? data_len : ETH_HLEN; + const void *src; + void *dst; - va = page_address(rx_buf->page) + rx_buf->page_offset; + if (!libeth_rx_sync_for_cpu(buf, copy)) + return 0; - /* prefetch first cache line of first page */ - net_prefetch(va); - /* allocate a skb to store the frags */ - skb = napi_alloc_skb(&rxq->q_vector->napi, IDPF_RX_HDR_SIZE); - if (unlikely(!skb)) { - idpf_rx_put_page(rx_buf); + dst = page_address(hdr->page) + hdr->offset + hdr->page->pp->p.offset; + src = page_address(buf->page) + buf->offset + buf->page->pp->p.offset; + memcpy(dst, src, LARGEST_ALIGN(copy)); - return NULL; - } + buf->offset += copy; - skb_record_rx_queue(skb, rxq->idx); - skb_mark_for_recycle(skb); - - /* Determine available headroom for copy */ - headlen = size; - if (headlen > IDPF_RX_HDR_SIZE) - headlen = eth_get_headlen(skb->dev, va, IDPF_RX_HDR_SIZE); - - /* align pull length to size of long to optimize memcpy performance */ - memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long))); - - /* if we exhaust the linear part then add what is left as a frag */ - size -= headlen; - if (!size) { - idpf_rx_put_page(rx_buf); - - return skb; - } - - skb_add_rx_frag(skb, 0, rx_buf->page, rx_buf->page_offset + headlen, - size, rx_buf->truesize); - - /* Since we're giving the page to the stack, clear our reference to it. - * We'll get a new one during buffer posting. - */ - rx_buf->page = NULL; - - return skb; + return copy; } /** - * idpf_rx_hdr_construct_skb - Allocate skb and populate it from header buffer - * @rxq: Rx descriptor queue - * @va: Rx buffer to pull data from + * idpf_rx_build_skb - Allocate skb and populate it from header buffer + * @buf: Rx buffer to pull data from * @size: the length of the packet * * This function allocates an skb. It then populates it with the page data from * the current receive descriptor, taking care to set up the skb correctly. - * This specifically uses a header buffer to start building the skb. */ -static struct sk_buff *idpf_rx_hdr_construct_skb(struct idpf_queue *rxq, - const void *va, - unsigned int size) +struct sk_buff *idpf_rx_build_skb(const struct libeth_fqe *buf, u32 size) { + u32 hr = buf->page->pp->p.offset; struct sk_buff *skb; + void *va; - /* allocate a skb to store the frags */ - skb = napi_alloc_skb(&rxq->q_vector->napi, size); + va = page_address(buf->page) + buf->offset; + prefetch(va + hr); + + skb = napi_build_skb(va, buf->truesize); if (unlikely(!skb)) return NULL; - skb_record_rx_queue(skb, rxq->idx); - - memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); - - /* More than likely, a payload fragment, which will use a page from - * page_pool will be added to the SKB so mark it for recycle - * preemptively. And if not, it's inconsequential. - */ skb_mark_for_recycle(skb); + skb_reserve(skb, hr); + __skb_put(skb, size); + return skb; } @@ -3115,31 +3240,27 @@ static bool idpf_rx_splitq_is_eop(struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_de * * Returns amount of work completed */ -static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget) +static int idpf_rx_splitq_clean(struct idpf_rx_queue *rxq, int budget) { int total_rx_bytes = 0, total_rx_pkts = 0; - struct idpf_queue *rx_bufq = NULL; + struct idpf_buf_queue *rx_bufq = NULL; struct sk_buff *skb = rxq->skb; u16 ntc = rxq->next_to_clean; /* Process Rx packets bounded by budget */ while (likely(total_rx_pkts < budget)) { struct virtchnl2_rx_flex_desc_adv_nic_3 *rx_desc; + struct libeth_fqe *hdr, *rx_buf = NULL; struct idpf_sw_queue *refillq = NULL; struct idpf_rxq_set *rxq_set = NULL; - struct idpf_rx_buf *rx_buf = NULL; - union virtchnl2_rx_desc *desc; unsigned int pkt_len = 0; unsigned int hdr_len = 0; u16 gen_id, buf_id = 0; - /* Header buffer overflow only valid for header split */ - bool hbo = false; int bufq_id; u8 rxdid; /* get the Rx desc from Rx queue based on 'next_to_clean' */ - desc = IDPF_RX_DESC(rxq, ntc); - rx_desc = (struct virtchnl2_rx_flex_desc_adv_nic_3 *)desc; + rx_desc = &rxq->rx[ntc].flex_adv_nic_3_wb; /* This memory barrier is needed to keep us from reading * any other fields out of the rx_desc @@ -3150,7 +3271,7 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget) gen_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id, VIRTCHNL2_RX_FLEX_DESC_ADV_GEN_M); - if (test_bit(__IDPF_Q_GEN_CHK, rxq->flags) != gen_id) + if (idpf_queue_has(GEN_CHK, rxq) != gen_id) break; rxdid = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_RXDID_M, @@ -3158,7 +3279,7 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget) if (rxdid != VIRTCHNL2_RXDID_2_FLEX_SPLITQ) { IDPF_RX_BUMP_NTC(rxq, ntc); u64_stats_update_begin(&rxq->stats_sync); - u64_stats_inc(&rxq->q_stats.rx.bad_descs); + u64_stats_inc(&rxq->q_stats.bad_descs); u64_stats_update_end(&rxq->stats_sync); continue; } @@ -3166,71 +3287,79 @@ static int idpf_rx_splitq_clean(struct idpf_queue *rxq, int budget) pkt_len = le16_get_bits(rx_desc->pktlen_gen_bufq_id, VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_PBUF_M); - hbo = FIELD_GET(VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_M, - rx_desc->status_err0_qw1); + bufq_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id, + VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M); - if (unlikely(hbo)) { + rxq_set = container_of(rxq, struct idpf_rxq_set, rxq); + refillq = rxq_set->refillq[bufq_id]; + + /* retrieve buffer from the rxq */ + rx_bufq = &rxq->bufq_sets[bufq_id].bufq; + + buf_id = le16_to_cpu(rx_desc->buf_id); + + rx_buf = &rx_bufq->buf[buf_id]; + + if (!rx_bufq->hdr_pp) + goto payload; + +#define __HBO_BIT VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_HBO_M +#define __HDR_LEN_MASK VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M + if (likely(!(rx_desc->status_err0_qw1 & __HBO_BIT))) /* If a header buffer overflow, occurs, i.e. header is * too large to fit in the header split buffer, HW will * put the entire packet, including headers, in the * data/payload buffer. */ + hdr_len = le16_get_bits(rx_desc->hdrlen_flags, + __HDR_LEN_MASK); +#undef __HDR_LEN_MASK +#undef __HBO_BIT + + hdr = &rx_bufq->hdr_buf[buf_id]; + + if (unlikely(!hdr_len && !skb)) { + hdr_len = idpf_rx_hsplit_wa(hdr, rx_buf, pkt_len); + pkt_len -= hdr_len; + u64_stats_update_begin(&rxq->stats_sync); - u64_stats_inc(&rxq->q_stats.rx.hsplit_buf_ovf); + u64_stats_inc(&rxq->q_stats.hsplit_buf_ovf); u64_stats_update_end(&rxq->stats_sync); - goto bypass_hsplit; } - hdr_len = le16_get_bits(rx_desc->hdrlen_flags, - VIRTCHNL2_RX_FLEX_DESC_ADV_LEN_HDR_M); + if (libeth_rx_sync_for_cpu(hdr, hdr_len)) { + skb = idpf_rx_build_skb(hdr, hdr_len); + if (!skb) + break; -bypass_hsplit: - bufq_id = le16_get_bits(rx_desc->pktlen_gen_bufq_id, - VIRTCHNL2_RX_FLEX_DESC_ADV_BUFQ_ID_M); + u64_stats_update_begin(&rxq->stats_sync); + u64_stats_inc(&rxq->q_stats.hsplit_pkts); + u64_stats_update_end(&rxq->stats_sync); + } - rxq_set = container_of(rxq, struct idpf_rxq_set, rxq); - if (!bufq_id) - refillq = rxq_set->refillq0; + hdr->page = NULL; + +payload: + if (!libeth_rx_sync_for_cpu(rx_buf, pkt_len)) + goto skip_data; + + if (skb) + idpf_rx_add_frag(rx_buf, skb, pkt_len); else - refillq = rxq_set->refillq1; - - /* retrieve buffer from the rxq */ - rx_bufq = &rxq->rxq_grp->splitq.bufq_sets[bufq_id].bufq; - - buf_id = le16_to_cpu(rx_desc->buf_id); - - rx_buf = &rx_bufq->rx_buf.buf[buf_id]; - - if (hdr_len) { - const void *va = (u8 *)rx_bufq->rx_buf.hdr_buf_va + - (u32)buf_id * IDPF_HDR_BUF_SIZE; - - skb = idpf_rx_hdr_construct_skb(rxq, va, hdr_len); - u64_stats_update_begin(&rxq->stats_sync); - u64_stats_inc(&rxq->q_stats.rx.hsplit_pkts); - u64_stats_update_end(&rxq->stats_sync); - } - - if (pkt_len) { - idpf_rx_sync_for_cpu(rx_buf, pkt_len); - if (skb) - idpf_rx_add_frag(rx_buf, skb, pkt_len); - else - skb = idpf_rx_construct_skb(rxq, rx_buf, - pkt_len); - } else { - idpf_rx_put_page(rx_buf); - } + skb = idpf_rx_build_skb(rx_buf, pkt_len); /* exit if we failed to retrieve a buffer */ if (!skb) break; - idpf_rx_post_buf_refill(refillq, buf_id); +skip_data: + rx_buf->page = NULL; + idpf_rx_post_buf_refill(refillq, buf_id); IDPF_RX_BUMP_NTC(rxq, ntc); + /* skip if it is non EOP desc */ - if (!idpf_rx_splitq_is_eop(rx_desc)) + if (!idpf_rx_splitq_is_eop(rx_desc) || unlikely(!skb)) continue; /* pad skb if needed (to make valid ethernet frame) */ @@ -3250,7 +3379,7 @@ bypass_hsplit: } /* send completed skb up the stack */ - napi_gro_receive(&rxq->q_vector->napi, skb); + napi_gro_receive(rxq->napi, skb); skb = NULL; /* update budget accounting */ @@ -3261,8 +3390,8 @@ bypass_hsplit: rxq->skb = skb; u64_stats_update_begin(&rxq->stats_sync); - u64_stats_add(&rxq->q_stats.rx.packets, total_rx_pkts); - u64_stats_add(&rxq->q_stats.rx.bytes, total_rx_bytes); + u64_stats_add(&rxq->q_stats.packets, total_rx_pkts); + u64_stats_add(&rxq->q_stats.bytes, total_rx_bytes); u64_stats_update_end(&rxq->stats_sync); /* guarantee a trip back through this routine if there was a failure */ @@ -3272,34 +3401,41 @@ bypass_hsplit: /** * idpf_rx_update_bufq_desc - Update buffer queue descriptor * @bufq: Pointer to the buffer queue - * @refill_desc: SW Refill queue descriptor containing buffer ID + * @buf_id: buffer ID * @buf_desc: Buffer queue descriptor * * Return 0 on success and negative on failure. */ -static int idpf_rx_update_bufq_desc(struct idpf_queue *bufq, u16 refill_desc, +static int idpf_rx_update_bufq_desc(struct idpf_buf_queue *bufq, u32 buf_id, struct virtchnl2_splitq_rx_buf_desc *buf_desc) { - struct idpf_rx_buf *buf; + struct libeth_fq_fp fq = { + .pp = bufq->pp, + .fqes = bufq->buf, + .truesize = bufq->truesize, + .count = bufq->desc_count, + }; dma_addr_t addr; - u16 buf_id; - buf_id = FIELD_GET(IDPF_RX_BI_BUFID_M, refill_desc); - - buf = &bufq->rx_buf.buf[buf_id]; - - addr = idpf_alloc_page(bufq->pp, buf, bufq->rx_buf_size); - if (unlikely(addr == DMA_MAPPING_ERROR)) + addr = libeth_rx_alloc(&fq, buf_id); + if (addr == DMA_MAPPING_ERROR) return -ENOMEM; buf_desc->pkt_addr = cpu_to_le64(addr); buf_desc->qword0.buf_id = cpu_to_le16(buf_id); - if (!bufq->rx_hsplit_en) + if (!idpf_queue_has(HSPLIT_EN, bufq)) return 0; - buf_desc->hdr_addr = cpu_to_le64(bufq->rx_buf.hdr_buf_pa + - (u32)buf_id * IDPF_HDR_BUF_SIZE); + fq.pp = bufq->hdr_pp; + fq.fqes = bufq->hdr_buf; + fq.truesize = bufq->hdr_truesize; + + addr = libeth_rx_alloc(&fq, buf_id); + if (addr == DMA_MAPPING_ERROR) + return -ENOMEM; + + buf_desc->hdr_addr = cpu_to_le64(addr); return 0; } @@ -3311,38 +3447,37 @@ static int idpf_rx_update_bufq_desc(struct idpf_queue *bufq, u16 refill_desc, * * This function takes care of the buffer refill management */ -static void idpf_rx_clean_refillq(struct idpf_queue *bufq, +static void idpf_rx_clean_refillq(struct idpf_buf_queue *bufq, struct idpf_sw_queue *refillq) { struct virtchnl2_splitq_rx_buf_desc *buf_desc; u16 bufq_nta = bufq->next_to_alloc; u16 ntc = refillq->next_to_clean; int cleaned = 0; - u16 gen; - buf_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, bufq_nta); + buf_desc = &bufq->split_buf[bufq_nta]; /* make sure we stop at ring wrap in the unlikely case ring is full */ while (likely(cleaned < refillq->desc_count)) { - u16 refill_desc = IDPF_SPLITQ_RX_BI_DESC(refillq, ntc); + u32 buf_id, refill_desc = refillq->ring[ntc]; bool failure; - gen = FIELD_GET(IDPF_RX_BI_GEN_M, refill_desc); - if (test_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags) != gen) + if (idpf_queue_has(RFL_GEN_CHK, refillq) != + !!(refill_desc & IDPF_RX_BI_GEN_M)) break; - failure = idpf_rx_update_bufq_desc(bufq, refill_desc, - buf_desc); + buf_id = FIELD_GET(IDPF_RX_BI_BUFID_M, refill_desc); + failure = idpf_rx_update_bufq_desc(bufq, buf_id, buf_desc); if (failure) break; if (unlikely(++ntc == refillq->desc_count)) { - change_bit(__IDPF_RFLQ_GEN_CHK, refillq->flags); + idpf_queue_change(RFL_GEN_CHK, refillq); ntc = 0; } if (unlikely(++bufq_nta == bufq->desc_count)) { - buf_desc = IDPF_SPLITQ_RX_BUF_DESC(bufq, 0); + buf_desc = &bufq->split_buf[0]; bufq_nta = 0; } else { buf_desc++; @@ -3371,16 +3506,21 @@ static void idpf_rx_clean_refillq(struct idpf_queue *bufq, /** * idpf_rx_clean_refillq_all - Clean all refill queues * @bufq: buffer queue with refill queues + * @nid: ID of the closest NUMA node with memory * * Iterates through all refill queues assigned to the buffer queue assigned to * this vector. Returns true if clean is complete within budget, false * otherwise. */ -static void idpf_rx_clean_refillq_all(struct idpf_queue *bufq) +static void idpf_rx_clean_refillq_all(struct idpf_buf_queue *bufq, int nid) { struct idpf_bufq_set *bufq_set; int i; + page_pool_nid_changed(bufq->pp, nid); + if (bufq->hdr_pp) + page_pool_nid_changed(bufq->hdr_pp, nid); + bufq_set = container_of(bufq, struct idpf_bufq_set, bufq); for (i = 0; i < bufq_set->num_refillqs; i++) idpf_rx_clean_refillq(bufq, &bufq_set->refillqs[i]); @@ -3441,12 +3581,16 @@ void idpf_vport_intr_rel(struct idpf_vport *vport) for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) { struct idpf_q_vector *q_vector = &vport->q_vectors[v_idx]; + kfree(q_vector->complq); + q_vector->complq = NULL; kfree(q_vector->bufq); q_vector->bufq = NULL; kfree(q_vector->tx); q_vector->tx = NULL; kfree(q_vector->rx); q_vector->rx = NULL; + + free_cpumask_var(q_vector->affinity_mask); } /* Clean up the mapping of queues to vectors */ @@ -3495,7 +3639,7 @@ static void idpf_vport_intr_rel_irq(struct idpf_vport *vport) /* clear the affinity_mask in the IRQ descriptor */ irq_set_affinity_hint(irq_num, NULL); - free_irq(irq_num, q_vector); + kfree(free_irq(irq_num, q_vector)); } } @@ -3579,13 +3723,13 @@ static void idpf_net_dim(struct idpf_q_vector *q_vector) goto check_rx_itr; for (i = 0, packets = 0, bytes = 0; i < q_vector->num_txq; i++) { - struct idpf_queue *txq = q_vector->tx[i]; + struct idpf_tx_queue *txq = q_vector->tx[i]; unsigned int start; do { start = u64_stats_fetch_begin(&txq->stats_sync); - packets += u64_stats_read(&txq->q_stats.tx.packets); - bytes += u64_stats_read(&txq->q_stats.tx.bytes); + packets += u64_stats_read(&txq->q_stats.packets); + bytes += u64_stats_read(&txq->q_stats.bytes); } while (u64_stats_fetch_retry(&txq->stats_sync, start)); } @@ -3598,13 +3742,13 @@ check_rx_itr: return; for (i = 0, packets = 0, bytes = 0; i < q_vector->num_rxq; i++) { - struct idpf_queue *rxq = q_vector->rx[i]; + struct idpf_rx_queue *rxq = q_vector->rx[i]; unsigned int start; do { start = u64_stats_fetch_begin(&rxq->stats_sync); - packets += u64_stats_read(&rxq->q_stats.rx.packets); - bytes += u64_stats_read(&rxq->q_stats.rx.bytes); + packets += u64_stats_read(&rxq->q_stats.packets); + bytes += u64_stats_read(&rxq->q_stats.bytes); } while (u64_stats_fetch_retry(&rxq->stats_sync, start)); } @@ -3646,6 +3790,7 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport, char *basename) for (vector = 0; vector < vport->num_q_vectors; vector++) { struct idpf_q_vector *q_vector = &vport->q_vectors[vector]; + char *name; vidx = vport->q_vector_idxs[vector]; irq_num = adapter->msix_entries[vidx].vector; @@ -3659,18 +3804,18 @@ static int idpf_vport_intr_req_irq(struct idpf_vport *vport, char *basename) else continue; - q_vector->name = kasprintf(GFP_KERNEL, "%s-%s-%d", - basename, vec_name, vidx); + name = kasprintf(GFP_KERNEL, "%s-%s-%d", basename, vec_name, + vidx); err = request_irq(irq_num, idpf_vport_intr_clean_queues, 0, - q_vector->name, q_vector); + name, q_vector); if (err) { netdev_err(vport->netdev, "Request_irq failed, error: %d\n", err); goto free_q_irqs; } /* assign the mask for this irq */ - irq_set_affinity_hint(irq_num, &q_vector->affinity_mask); + irq_set_affinity_hint(irq_num, q_vector->affinity_mask); } return 0; @@ -3679,7 +3824,7 @@ free_q_irqs: while (--vector >= 0) { vidx = vport->q_vector_idxs[vector]; irq_num = adapter->msix_entries[vidx].vector; - free_irq(irq_num, &vport->q_vectors[vector]); + kfree(free_irq(irq_num, &vport->q_vectors[vector])); } return err; @@ -3846,16 +3991,17 @@ static void idpf_vport_intr_napi_ena_all(struct idpf_vport *vport) static bool idpf_tx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget, int *cleaned) { - u16 num_txq = q_vec->num_txq; + u16 num_complq = q_vec->num_complq; bool clean_complete = true; int i, budget_per_q; - if (unlikely(!num_txq)) + if (unlikely(!num_complq)) return true; - budget_per_q = DIV_ROUND_UP(budget, num_txq); - for (i = 0; i < num_txq; i++) - clean_complete &= idpf_tx_clean_complq(q_vec->tx[i], + budget_per_q = DIV_ROUND_UP(budget, num_complq); + + for (i = 0; i < num_complq; i++) + clean_complete &= idpf_tx_clean_complq(q_vec->complq[i], budget_per_q, cleaned); return clean_complete; @@ -3876,13 +4022,14 @@ static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget, bool clean_complete = true; int pkts_cleaned = 0; int i, budget_per_q; + int nid; /* We attempt to distribute budget to each Rx queue fairly, but don't * allow the budget to go below 1 because that would exit polling early. */ budget_per_q = num_rxq ? max(budget / num_rxq, 1) : 0; for (i = 0; i < num_rxq; i++) { - struct idpf_queue *rxq = q_vec->rx[i]; + struct idpf_rx_queue *rxq = q_vec->rx[i]; int pkts_cleaned_per_q; pkts_cleaned_per_q = idpf_rx_splitq_clean(rxq, budget_per_q); @@ -3893,8 +4040,10 @@ static bool idpf_rx_splitq_clean_all(struct idpf_q_vector *q_vec, int budget, } *cleaned = pkts_cleaned; + nid = numa_mem_id(); + for (i = 0; i < q_vec->num_bufq; i++) - idpf_rx_clean_refillq_all(q_vec->bufq[i]); + idpf_rx_clean_refillq_all(q_vec->bufq[i], nid); return clean_complete; } @@ -3937,8 +4086,8 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget) * queues virtchnl message, as the interrupts will be disabled after * that */ - if (unlikely(q_vector->num_txq && test_bit(__IDPF_Q_POLL_MODE, - q_vector->tx[0]->flags))) + if (unlikely(q_vector->num_txq && idpf_queue_has(POLL_MODE, + q_vector->tx[0]))) return budget; else return work_done; @@ -3952,27 +4101,28 @@ static int idpf_vport_splitq_napi_poll(struct napi_struct *napi, int budget) */ static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport) { + bool split = idpf_is_queue_model_split(vport->rxq_model); u16 num_txq_grp = vport->num_txq_grp; - int i, j, qv_idx, bufq_vidx = 0; struct idpf_rxq_group *rx_qgrp; struct idpf_txq_group *tx_qgrp; - struct idpf_queue *q, *bufq; - u16 q_index; + u32 i, qv_idx, q_index; for (i = 0, qv_idx = 0; i < vport->num_rxq_grp; i++) { u16 num_rxq; + if (qv_idx >= vport->num_q_vectors) + qv_idx = 0; + rx_qgrp = &vport->rxq_grps[i]; - if (idpf_is_queue_model_split(vport->rxq_model)) + if (split) num_rxq = rx_qgrp->splitq.num_rxq_sets; else num_rxq = rx_qgrp->singleq.num_rxq; - for (j = 0; j < num_rxq; j++) { - if (qv_idx >= vport->num_q_vectors) - qv_idx = 0; + for (u32 j = 0; j < num_rxq; j++) { + struct idpf_rx_queue *q; - if (idpf_is_queue_model_split(vport->rxq_model)) + if (split) q = &rx_qgrp->splitq.rxq_sets[j]->rxq; else q = rx_qgrp->singleq.rxqs[j]; @@ -3980,52 +4130,53 @@ static void idpf_vport_intr_map_vector_to_qs(struct idpf_vport *vport) q_index = q->q_vector->num_rxq; q->q_vector->rx[q_index] = q; q->q_vector->num_rxq++; - qv_idx++; + + if (split) + q->napi = &q->q_vector->napi; } - if (idpf_is_queue_model_split(vport->rxq_model)) { - for (j = 0; j < vport->num_bufqs_per_qgrp; j++) { + if (split) { + for (u32 j = 0; j < vport->num_bufqs_per_qgrp; j++) { + struct idpf_buf_queue *bufq; + bufq = &rx_qgrp->splitq.bufq_sets[j].bufq; - bufq->q_vector = &vport->q_vectors[bufq_vidx]; + bufq->q_vector = &vport->q_vectors[qv_idx]; q_index = bufq->q_vector->num_bufq; bufq->q_vector->bufq[q_index] = bufq; bufq->q_vector->num_bufq++; } - if (++bufq_vidx >= vport->num_q_vectors) - bufq_vidx = 0; } + + qv_idx++; } + split = idpf_is_queue_model_split(vport->txq_model); + for (i = 0, qv_idx = 0; i < num_txq_grp; i++) { u16 num_txq; + if (qv_idx >= vport->num_q_vectors) + qv_idx = 0; + tx_qgrp = &vport->txq_grps[i]; num_txq = tx_qgrp->num_txq; - if (idpf_is_queue_model_split(vport->txq_model)) { - if (qv_idx >= vport->num_q_vectors) - qv_idx = 0; + for (u32 j = 0; j < num_txq; j++) { + struct idpf_tx_queue *q; - q = tx_qgrp->complq; + q = tx_qgrp->txqs[j]; q->q_vector = &vport->q_vectors[qv_idx]; - q_index = q->q_vector->num_txq; - q->q_vector->tx[q_index] = q; - q->q_vector->num_txq++; - qv_idx++; - } else { - for (j = 0; j < num_txq; j++) { - if (qv_idx >= vport->num_q_vectors) - qv_idx = 0; - - q = tx_qgrp->txqs[j]; - q->q_vector = &vport->q_vectors[qv_idx]; - q_index = q->q_vector->num_txq; - q->q_vector->tx[q_index] = q; - q->q_vector->num_txq++; - - qv_idx++; - } + q->q_vector->tx[q->q_vector->num_txq++] = q; } + + if (split) { + struct idpf_compl_queue *q = tx_qgrp->complq; + + q->q_vector = &vport->q_vectors[qv_idx]; + q->q_vector->complq[q->q_vector->num_complq++] = q; + } + + qv_idx++; } } @@ -4086,7 +4237,7 @@ static void idpf_vport_intr_napi_add_all(struct idpf_vport *vport) /* only set affinity_mask if the CPU is online */ if (cpu_online(v_idx)) - cpumask_set_cpu(v_idx, &q_vector->affinity_mask); + cpumask_set_cpu(v_idx, q_vector->affinity_mask); } } @@ -4101,18 +4252,22 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport) { u16 txqs_per_vector, rxqs_per_vector, bufqs_per_vector; struct idpf_q_vector *q_vector; - int v_idx, err; + u32 complqs_per_vector, v_idx; vport->q_vectors = kcalloc(vport->num_q_vectors, sizeof(struct idpf_q_vector), GFP_KERNEL); if (!vport->q_vectors) return -ENOMEM; - txqs_per_vector = DIV_ROUND_UP(vport->num_txq, vport->num_q_vectors); - rxqs_per_vector = DIV_ROUND_UP(vport->num_rxq, vport->num_q_vectors); + txqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp, + vport->num_q_vectors); + rxqs_per_vector = DIV_ROUND_UP(vport->num_rxq_grp, + vport->num_q_vectors); bufqs_per_vector = vport->num_bufqs_per_qgrp * DIV_ROUND_UP(vport->num_rxq_grp, vport->num_q_vectors); + complqs_per_vector = DIV_ROUND_UP(vport->num_txq_grp, + vport->num_q_vectors); for (v_idx = 0; v_idx < vport->num_q_vectors; v_idx++) { q_vector = &vport->q_vectors[v_idx]; @@ -4126,32 +4281,33 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport) q_vector->rx_intr_mode = IDPF_ITR_DYNAMIC; q_vector->rx_itr_idx = VIRTCHNL2_ITR_IDX_0; - q_vector->tx = kcalloc(txqs_per_vector, - sizeof(struct idpf_queue *), - GFP_KERNEL); - if (!q_vector->tx) { - err = -ENOMEM; + if (!zalloc_cpumask_var(&q_vector->affinity_mask, GFP_KERNEL)) goto error; - } - q_vector->rx = kcalloc(rxqs_per_vector, - sizeof(struct idpf_queue *), + q_vector->tx = kcalloc(txqs_per_vector, sizeof(*q_vector->tx), GFP_KERNEL); - if (!q_vector->rx) { - err = -ENOMEM; + if (!q_vector->tx) + goto error; + + q_vector->rx = kcalloc(rxqs_per_vector, sizeof(*q_vector->rx), + GFP_KERNEL); + if (!q_vector->rx) goto error; - } if (!idpf_is_queue_model_split(vport->rxq_model)) continue; q_vector->bufq = kcalloc(bufqs_per_vector, - sizeof(struct idpf_queue *), + sizeof(*q_vector->bufq), GFP_KERNEL); - if (!q_vector->bufq) { - err = -ENOMEM; + if (!q_vector->bufq) + goto error; + + q_vector->complq = kcalloc(complqs_per_vector, + sizeof(*q_vector->complq), + GFP_KERNEL); + if (!q_vector->complq) goto error; - } } return 0; @@ -4159,7 +4315,7 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport) error: idpf_vport_intr_rel(vport); - return err; + return -ENOMEM; } /** diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h index 551391e20464..6215dbee5546 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h +++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h @@ -4,10 +4,13 @@ #ifndef _IDPF_TXRX_H_ #define _IDPF_TXRX_H_ -#include +#include + +#include #include #include +#include "idpf_lan_txrx.h" #include "virtchnl2_lan_desc.h" #define IDPF_LARGE_MAX_Q 256 @@ -83,7 +86,7 @@ do { \ if (unlikely(++(ntc) == (rxq)->desc_count)) { \ ntc = 0; \ - change_bit(__IDPF_Q_GEN_CHK, (rxq)->flags); \ + idpf_queue_change(GEN_CHK, rxq); \ } \ } while (0) @@ -93,16 +96,10 @@ do { \ idx = 0; \ } while (0) -#define IDPF_RX_HDR_SIZE 256 -#define IDPF_RX_BUF_2048 2048 -#define IDPF_RX_BUF_4096 4096 #define IDPF_RX_BUF_STRIDE 32 #define IDPF_RX_BUF_POST_STRIDE 16 #define IDPF_LOW_WATERMARK 64 -/* Size of header buffer specifically for header split */ -#define IDPF_HDR_BUF_SIZE 256 -#define IDPF_PACKET_HDR_PAD \ - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN * 2) + #define IDPF_TX_TSO_MIN_MSS 88 /* Minimum number of descriptors between 2 descriptors with the RE bit set; @@ -110,36 +107,17 @@ do { \ */ #define IDPF_TX_SPLITQ_RE_MIN_GAP 64 -#define IDPF_RX_BI_BUFID_S 0 -#define IDPF_RX_BI_BUFID_M GENMASK(14, 0) -#define IDPF_RX_BI_GEN_S 15 -#define IDPF_RX_BI_GEN_M BIT(IDPF_RX_BI_GEN_S) +#define IDPF_RX_BI_GEN_M BIT(16) +#define IDPF_RX_BI_BUFID_M GENMASK(15, 0) + #define IDPF_RXD_EOF_SPLITQ VIRTCHNL2_RX_FLEX_DESC_ADV_STATUS0_EOF_M #define IDPF_RXD_EOF_SINGLEQ VIRTCHNL2_RX_BASE_DESC_STATUS_EOF_M -#define IDPF_SINGLEQ_RX_BUF_DESC(rxq, i) \ - (&(((struct virtchnl2_singleq_rx_buf_desc *)((rxq)->desc_ring))[i])) -#define IDPF_SPLITQ_RX_BUF_DESC(rxq, i) \ - (&(((struct virtchnl2_splitq_rx_buf_desc *)((rxq)->desc_ring))[i])) -#define IDPF_SPLITQ_RX_BI_DESC(rxq, i) ((((rxq)->ring))[i]) - -#define IDPF_BASE_TX_DESC(txq, i) \ - (&(((struct idpf_base_tx_desc *)((txq)->desc_ring))[i])) -#define IDPF_BASE_TX_CTX_DESC(txq, i) \ - (&(((struct idpf_base_tx_ctx_desc *)((txq)->desc_ring))[i])) -#define IDPF_SPLITQ_TX_COMPLQ_DESC(txcq, i) \ - (&(((struct idpf_splitq_tx_compl_desc *)((txcq)->desc_ring))[i])) - -#define IDPF_FLEX_TX_DESC(txq, i) \ - (&(((union idpf_tx_flex_desc *)((txq)->desc_ring))[i])) -#define IDPF_FLEX_TX_CTX_DESC(txq, i) \ - (&(((struct idpf_flex_tx_ctx_desc *)((txq)->desc_ring))[i])) - #define IDPF_DESC_UNUSED(txq) \ ((((txq)->next_to_clean > (txq)->next_to_use) ? 0 : (txq)->desc_count) + \ (txq)->next_to_clean - (txq)->next_to_use - 1) -#define IDPF_TX_BUF_RSV_UNUSED(txq) ((txq)->buf_stack.top) +#define IDPF_TX_BUF_RSV_UNUSED(txq) ((txq)->stash->buf_stack.top) #define IDPF_TX_BUF_RSV_LOW(txq) (IDPF_TX_BUF_RSV_UNUSED(txq) < \ (txq)->desc_count >> 2) @@ -315,16 +293,7 @@ struct idpf_rx_extracted { #define IDPF_TX_MAX_DESC_DATA_ALIGNED \ ALIGN_DOWN(IDPF_TX_MAX_DESC_DATA, IDPF_TX_MAX_READ_REQ_SIZE) -#define IDPF_RX_DMA_ATTR \ - (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) -#define IDPF_RX_DESC(rxq, i) \ - (&(((union virtchnl2_rx_desc *)((rxq)->desc_ring))[i])) - -struct idpf_rx_buf { - struct page *page; - unsigned int page_offset; - u16 truesize; -}; +#define idpf_rx_buf libeth_fqe #define IDPF_RX_MAX_PTYPE_PROTO_IDS 32 #define IDPF_RX_MAX_PTYPE_SZ (sizeof(struct virtchnl2_ptype) + \ @@ -348,72 +317,6 @@ struct idpf_rx_buf { #define IDPF_RX_MAX_BASE_PTYPE 256 #define IDPF_INVALID_PTYPE_ID 0xFFFF -/* Packet type non-ip values */ -enum idpf_rx_ptype_l2 { - IDPF_RX_PTYPE_L2_RESERVED = 0, - IDPF_RX_PTYPE_L2_MAC_PAY2 = 1, - IDPF_RX_PTYPE_L2_TIMESYNC_PAY2 = 2, - IDPF_RX_PTYPE_L2_FIP_PAY2 = 3, - IDPF_RX_PTYPE_L2_OUI_PAY2 = 4, - IDPF_RX_PTYPE_L2_MACCNTRL_PAY2 = 5, - IDPF_RX_PTYPE_L2_LLDP_PAY2 = 6, - IDPF_RX_PTYPE_L2_ECP_PAY2 = 7, - IDPF_RX_PTYPE_L2_EVB_PAY2 = 8, - IDPF_RX_PTYPE_L2_QCN_PAY2 = 9, - IDPF_RX_PTYPE_L2_EAPOL_PAY2 = 10, - IDPF_RX_PTYPE_L2_ARP = 11, -}; - -enum idpf_rx_ptype_outer_ip { - IDPF_RX_PTYPE_OUTER_L2 = 0, - IDPF_RX_PTYPE_OUTER_IP = 1, -}; - -#define IDPF_RX_PTYPE_TO_IPV(ptype, ipv) \ - (((ptype)->outer_ip == IDPF_RX_PTYPE_OUTER_IP) && \ - ((ptype)->outer_ip_ver == (ipv))) - -enum idpf_rx_ptype_outer_ip_ver { - IDPF_RX_PTYPE_OUTER_NONE = 0, - IDPF_RX_PTYPE_OUTER_IPV4 = 1, - IDPF_RX_PTYPE_OUTER_IPV6 = 2, -}; - -enum idpf_rx_ptype_outer_fragmented { - IDPF_RX_PTYPE_NOT_FRAG = 0, - IDPF_RX_PTYPE_FRAG = 1, -}; - -enum idpf_rx_ptype_tunnel_type { - IDPF_RX_PTYPE_TUNNEL_NONE = 0, - IDPF_RX_PTYPE_TUNNEL_IP_IP = 1, - IDPF_RX_PTYPE_TUNNEL_IP_GRENAT = 2, - IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3, - IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4, -}; - -enum idpf_rx_ptype_tunnel_end_prot { - IDPF_RX_PTYPE_TUNNEL_END_NONE = 0, - IDPF_RX_PTYPE_TUNNEL_END_IPV4 = 1, - IDPF_RX_PTYPE_TUNNEL_END_IPV6 = 2, -}; - -enum idpf_rx_ptype_inner_prot { - IDPF_RX_PTYPE_INNER_PROT_NONE = 0, - IDPF_RX_PTYPE_INNER_PROT_UDP = 1, - IDPF_RX_PTYPE_INNER_PROT_TCP = 2, - IDPF_RX_PTYPE_INNER_PROT_SCTP = 3, - IDPF_RX_PTYPE_INNER_PROT_ICMP = 4, - IDPF_RX_PTYPE_INNER_PROT_TIMESYNC = 5, -}; - -enum idpf_rx_ptype_payload_layer { - IDPF_RX_PTYPE_PAYLOAD_LAYER_NONE = 0, - IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1, - IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2, - IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3, -}; - enum idpf_tunnel_state { IDPF_PTYPE_TUNNEL_IP = BIT(0), IDPF_PTYPE_TUNNEL_IP_GRENAT = BIT(1), @@ -421,22 +324,9 @@ enum idpf_tunnel_state { }; struct idpf_ptype_state { - bool outer_ip; - bool outer_frag; - u8 tunnel_state; -}; - -struct idpf_rx_ptype_decoded { - u32 ptype:10; - u32 known:1; - u32 outer_ip:1; - u32 outer_ip_ver:2; - u32 outer_frag:1; - u32 tunnel_type:3; - u32 tunnel_end_prot:2; - u32 tunnel_end_frag:1; - u32 inner_prot:4; - u32 payload_layer:3; + bool outer_ip:1; + bool outer_frag:1; + u8 tunnel_state:6; }; /** @@ -452,23 +342,37 @@ struct idpf_rx_ptype_decoded { * to 1 and knows that reading a gen bit of 1 in any * descriptor on the initial pass of the ring indicates a * writeback. It also flips on every ring wrap. - * @__IDPF_RFLQ_GEN_CHK: Refill queues are SW only, so Q_GEN acts as the HW bit - * and RFLGQ_GEN is the SW bit. + * @__IDPF_Q_RFL_GEN_CHK: Refill queues are SW only, so Q_GEN acts as the HW + * bit and Q_RFL_GEN is the SW bit. * @__IDPF_Q_FLOW_SCH_EN: Enable flow scheduling * @__IDPF_Q_SW_MARKER: Used to indicate TX queue marker completions * @__IDPF_Q_POLL_MODE: Enable poll mode + * @__IDPF_Q_CRC_EN: enable CRC offload in singleq mode + * @__IDPF_Q_HSPLIT_EN: enable header split on Rx (splitq) * @__IDPF_Q_FLAGS_NBITS: Must be last */ enum idpf_queue_flags_t { __IDPF_Q_GEN_CHK, - __IDPF_RFLQ_GEN_CHK, + __IDPF_Q_RFL_GEN_CHK, __IDPF_Q_FLOW_SCH_EN, __IDPF_Q_SW_MARKER, __IDPF_Q_POLL_MODE, + __IDPF_Q_CRC_EN, + __IDPF_Q_HSPLIT_EN, __IDPF_Q_FLAGS_NBITS, }; +#define idpf_queue_set(f, q) __set_bit(__IDPF_Q_##f, (q)->flags) +#define idpf_queue_clear(f, q) __clear_bit(__IDPF_Q_##f, (q)->flags) +#define idpf_queue_change(f, q) __change_bit(__IDPF_Q_##f, (q)->flags) +#define idpf_queue_has(f, q) test_bit(__IDPF_Q_##f, (q)->flags) + +#define idpf_queue_has_clear(f, q) \ + __test_and_clear_bit(__IDPF_Q_##f, (q)->flags) +#define idpf_queue_assign(f, q, v) \ + __assign_bit(__IDPF_Q_##f, (q)->flags, v) + /** * struct idpf_vec_regs * @dyn_ctl_reg: Dynamic control interrupt register offset @@ -509,54 +413,68 @@ struct idpf_intr_reg { /** * struct idpf_q_vector * @vport: Vport back pointer - * @affinity_mask: CPU affinity mask - * @napi: napi handler - * @v_idx: Vector index - * @intr_reg: See struct idpf_intr_reg + * @num_rxq: Number of RX queues * @num_txq: Number of TX queues + * @num_bufq: Number of buffer queues + * @num_complq: number of completion queues + * @rx: Array of RX queues to service * @tx: Array of TX queues to service + * @bufq: Array of buffer queues to service + * @complq: array of completion queues + * @intr_reg: See struct idpf_intr_reg + * @napi: napi handler + * @total_events: Number of interrupts processed * @tx_dim: Data for TX net_dim algorithm * @tx_itr_value: TX interrupt throttling rate * @tx_intr_mode: Dynamic ITR or not * @tx_itr_idx: TX ITR index - * @num_rxq: Number of RX queues - * @rx: Array of RX queues to service * @rx_dim: Data for RX net_dim algorithm * @rx_itr_value: RX interrupt throttling rate * @rx_intr_mode: Dynamic ITR or not * @rx_itr_idx: RX ITR index - * @num_bufq: Number of buffer queues - * @bufq: Array of buffer queues to service - * @total_events: Number of interrupts processed - * @name: Queue vector name + * @v_idx: Vector index + * @affinity_mask: CPU affinity mask */ struct idpf_q_vector { + __cacheline_group_begin_aligned(read_mostly); struct idpf_vport *vport; - cpumask_t affinity_mask; - struct napi_struct napi; - u16 v_idx; - struct idpf_intr_reg intr_reg; + u16 num_rxq; u16 num_txq; - struct idpf_queue **tx; + u16 num_bufq; + u16 num_complq; + struct idpf_rx_queue **rx; + struct idpf_tx_queue **tx; + struct idpf_buf_queue **bufq; + struct idpf_compl_queue **complq; + + struct idpf_intr_reg intr_reg; + __cacheline_group_end_aligned(read_mostly); + + __cacheline_group_begin_aligned(read_write); + struct napi_struct napi; + u16 total_events; + struct dim tx_dim; u16 tx_itr_value; bool tx_intr_mode; u32 tx_itr_idx; - u16 num_rxq; - struct idpf_queue **rx; struct dim rx_dim; u16 rx_itr_value; bool rx_intr_mode; u32 rx_itr_idx; + __cacheline_group_end_aligned(read_write); - u16 num_bufq; - struct idpf_queue **bufq; + __cacheline_group_begin_aligned(cold); + u16 v_idx; - u16 total_events; - char *name; + cpumask_var_t affinity_mask; + __cacheline_group_end_aligned(cold); }; +libeth_cacheline_set_assert(struct idpf_q_vector, 104, + 424 + 2 * sizeof(struct dim), + 8 + sizeof(cpumask_var_t)); struct idpf_rx_queue_stats { u64_stats_t packets; @@ -583,11 +501,6 @@ struct idpf_cleaned_stats { u32 bytes; }; -union idpf_queue_stats { - struct idpf_rx_queue_stats rx; - struct idpf_tx_queue_stats tx; -}; - #define IDPF_ITR_DYNAMIC 1 #define IDPF_ITR_MAX 0x1FE0 #define IDPF_ITR_20K 0x0032 @@ -603,68 +516,123 @@ union idpf_queue_stats { #define IDPF_DIM_DEFAULT_PROFILE_IX 1 /** - * struct idpf_queue - * @dev: Device back pointer for DMA mapping - * @vport: Back pointer to associated vport - * @txq_grp: See struct idpf_txq_group - * @rxq_grp: See struct idpf_rxq_group - * @idx: For buffer queue, it is used as group id, either 0 or 1. On clean, - * buffer queue uses this index to determine which group of refill queues - * to clean. - * For TX queue, it is used as index to map between TX queue group and - * hot path TX pointers stored in vport. Used in both singleq/splitq. - * For RX queue, it is used to index to total RX queue across groups and - * used for skb reporting. - * @tail: Tail offset. Used for both queue models single and split. In splitq - * model relevant only for TX queue and RX queue. - * @tx_buf: See struct idpf_tx_buf - * @rx_buf: Struct with RX buffer related members - * @rx_buf.buf: See struct idpf_rx_buf - * @rx_buf.hdr_buf_pa: DMA handle - * @rx_buf.hdr_buf_va: Virtual address - * @pp: Page pool pointer - * @skb: Pointer to the skb - * @q_type: Queue type (TX, RX, TX completion, RX buffer) - * @q_id: Queue id - * @desc_count: Number of descriptors - * @next_to_use: Next descriptor to use. Relevant in both split & single txq - * and bufq. - * @next_to_clean: Next descriptor to clean. In split queue model, only - * relevant to TX completion queue and RX queue. - * @next_to_alloc: RX buffer to allocate at. Used only for RX. In splitq model - * only relevant to RX queue. + * struct idpf_txq_stash - Tx buffer stash for Flow-based scheduling mode + * @buf_stack: Stack of empty buffers to store buffer info for out of order + * buffer completions. See struct idpf_buf_lifo + * @sched_buf_hash: Hash table to store buffers + */ +struct idpf_txq_stash { + struct idpf_buf_lifo buf_stack; + DECLARE_HASHTABLE(sched_buf_hash, 12); +} ____cacheline_aligned; + +/** + * struct idpf_rx_queue - software structure representing a receive queue + * @rx: universal receive descriptor array + * @single_buf: buffer descriptor array in singleq + * @desc_ring: virtual descriptor ring address + * @bufq_sets: Pointer to the array of buffer queues in splitq mode + * @napi: NAPI instance corresponding to this queue (splitq) + * @rx_buf: See struct &libeth_fqe + * @pp: Page pool pointer in singleq mode + * @netdev: &net_device corresponding to this queue + * @tail: Tail offset. Used for both queue models single and split. * @flags: See enum idpf_queue_flags_t - * @q_stats: See union idpf_queue_stats + * @idx: For RX queue, it is used to index to total RX queue across groups and + * used for skb reporting. + * @desc_count: Number of descriptors + * @rxdids: Supported RX descriptor ids + * @rx_ptype_lkup: LUT of Rx ptypes + * @next_to_use: Next descriptor to use + * @next_to_clean: Next descriptor to clean + * @next_to_alloc: RX buffer to allocate at + * @skb: Pointer to the skb + * @truesize: data buffer truesize in singleq * @stats_sync: See struct u64_stats_sync - * @cleaned_bytes: Splitq only, TXQ only: When a TX completion is received on - * the TX completion queue, it can be for any TXQ associated - * with that completion queue. This means we can clean up to - * N TXQs during a single call to clean the completion queue. - * cleaned_bytes|pkts tracks the clean stats per TXQ during - * that single call to clean the completion queue. By doing so, - * we can update BQL with aggregate cleaned stats for each TXQ - * only once at the end of the cleaning routine. - * @cleaned_pkts: Number of packets cleaned for the above said case - * @rx_hsplit_en: RX headsplit enable + * @q_stats: See union idpf_rx_queue_stats + * @q_id: Queue id + * @size: Length of descriptor ring in bytes + * @dma: Physical address of ring + * @q_vector: Backreference to associated vector + * @rx_buffer_low_watermark: RX buffer low watermark * @rx_hbuf_size: Header buffer size * @rx_buf_size: Buffer size * @rx_max_pkt_size: RX max packet size - * @rx_buf_stride: RX buffer stride - * @rx_buffer_low_watermark: RX buffer low watermark - * @rxdids: Supported RX descriptor ids - * @q_vector: Backreference to associated vector - * @size: Length of descriptor ring in bytes - * @dma: Physical address of ring - * @desc_ring: Descriptor ring memory - * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather + */ +struct idpf_rx_queue { + __cacheline_group_begin_aligned(read_mostly); + union { + union virtchnl2_rx_desc *rx; + struct virtchnl2_singleq_rx_buf_desc *single_buf; + + void *desc_ring; + }; + union { + struct { + struct idpf_bufq_set *bufq_sets; + struct napi_struct *napi; + }; + struct { + struct libeth_fqe *rx_buf; + struct page_pool *pp; + }; + }; + struct net_device *netdev; + void __iomem *tail; + + DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS); + u16 idx; + u16 desc_count; + + u32 rxdids; + const struct libeth_rx_pt *rx_ptype_lkup; + __cacheline_group_end_aligned(read_mostly); + + __cacheline_group_begin_aligned(read_write); + u16 next_to_use; + u16 next_to_clean; + u16 next_to_alloc; + + struct sk_buff *skb; + u32 truesize; + + struct u64_stats_sync stats_sync; + struct idpf_rx_queue_stats q_stats; + __cacheline_group_end_aligned(read_write); + + __cacheline_group_begin_aligned(cold); + u32 q_id; + u32 size; + dma_addr_t dma; + + struct idpf_q_vector *q_vector; + + u16 rx_buffer_low_watermark; + u16 rx_hbuf_size; + u16 rx_buf_size; + u16 rx_max_pkt_size; + __cacheline_group_end_aligned(cold); +}; +libeth_cacheline_set_assert(struct idpf_rx_queue, 64, + 80 + sizeof(struct u64_stats_sync), + 32); + +/** + * struct idpf_tx_queue - software structure representing a transmit queue + * @base_tx: base Tx descriptor array + * @base_ctx: base Tx context descriptor array + * @flex_tx: flex Tx descriptor array + * @flex_ctx: flex Tx context descriptor array + * @desc_ring: virtual descriptor ring address + * @tx_buf: See struct idpf_tx_buf + * @txq_grp: See struct idpf_txq_group + * @dev: Device back pointer for DMA mapping + * @tail: Tail offset. Used for both queue models single and split + * @flags: See enum idpf_queue_flags_t + * @idx: For TX queue, it is used as index to map between TX queue group and + * hot path TX pointers stored in vport. Used in both singleq/splitq. + * @desc_count: Number of descriptors * @tx_min_pkt_len: Min supported packet length - * @num_completions: Only relevant for TX completion queue. It tracks the - * number of completions received to compare against the - * number of completions pending, as accumulated by the - * TX queues. - * @buf_stack: Stack of empty buffers to store buffer info for out of order - * buffer completions. See struct idpf_buf_lifo. - * @compl_tag_bufid_m: Completion tag buffer id mask * @compl_tag_gen_s: Completion tag generation bit * The format of the completion tag will change based on the TXQ * descriptor ring size so that we can maintain roughly the same level @@ -685,108 +653,238 @@ union idpf_queue_stats { * -------------------------------- * * This gives us 8*8160 = 65280 possible unique values. + * @netdev: &net_device corresponding to this queue + * @next_to_use: Next descriptor to use + * @next_to_clean: Next descriptor to clean + * @cleaned_bytes: Splitq only, TXQ only: When a TX completion is received on + * the TX completion queue, it can be for any TXQ associated + * with that completion queue. This means we can clean up to + * N TXQs during a single call to clean the completion queue. + * cleaned_bytes|pkts tracks the clean stats per TXQ during + * that single call to clean the completion queue. By doing so, + * we can update BQL with aggregate cleaned stats for each TXQ + * only once at the end of the cleaning routine. + * @clean_budget: singleq only, queue cleaning budget + * @cleaned_pkts: Number of packets cleaned for the above said case + * @tx_max_bufs: Max buffers that can be transmitted with scatter-gather + * @stash: Tx buffer stash for Flow-based scheduling mode + * @compl_tag_bufid_m: Completion tag buffer id mask * @compl_tag_cur_gen: Used to keep track of current completion tag generation * @compl_tag_gen_max: To determine when compl_tag_cur_gen should be reset - * @sched_buf_hash: Hash table to stores buffers + * @stats_sync: See struct u64_stats_sync + * @q_stats: See union idpf_tx_queue_stats + * @q_id: Queue id + * @size: Length of descriptor ring in bytes + * @dma: Physical address of ring + * @q_vector: Backreference to associated vector */ -struct idpf_queue { +struct idpf_tx_queue { + __cacheline_group_begin_aligned(read_mostly); + union { + struct idpf_base_tx_desc *base_tx; + struct idpf_base_tx_ctx_desc *base_ctx; + union idpf_tx_flex_desc *flex_tx; + struct idpf_flex_tx_ctx_desc *flex_ctx; + + void *desc_ring; + }; + struct idpf_tx_buf *tx_buf; + struct idpf_txq_group *txq_grp; struct device *dev; - struct idpf_vport *vport; - union { - struct idpf_txq_group *txq_grp; - struct idpf_rxq_group *rxq_grp; - }; - u16 idx; void __iomem *tail; - union { - struct idpf_tx_buf *tx_buf; - struct { - struct idpf_rx_buf *buf; - dma_addr_t hdr_buf_pa; - void *hdr_buf_va; - } rx_buf; - }; - struct page_pool *pp; - struct sk_buff *skb; - u16 q_type; - u32 q_id; + + DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS); + u16 idx; u16 desc_count; - u16 next_to_use; - u16 next_to_clean; - u16 next_to_alloc; - DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS); - - union idpf_queue_stats q_stats; - struct u64_stats_sync stats_sync; - - u32 cleaned_bytes; - u16 cleaned_pkts; - - bool rx_hsplit_en; - u16 rx_hbuf_size; - u16 rx_buf_size; - u16 rx_max_pkt_size; - u16 rx_buf_stride; - u8 rx_buffer_low_watermark; - u64 rxdids; - struct idpf_q_vector *q_vector; - unsigned int size; - dma_addr_t dma; - void *desc_ring; - - u16 tx_max_bufs; - u8 tx_min_pkt_len; - - u32 num_completions; - - struct idpf_buf_lifo buf_stack; - - u16 compl_tag_bufid_m; + u16 tx_min_pkt_len; u16 compl_tag_gen_s; + struct net_device *netdev; + __cacheline_group_end_aligned(read_mostly); + + __cacheline_group_begin_aligned(read_write); + u16 next_to_use; + u16 next_to_clean; + + union { + u32 cleaned_bytes; + u32 clean_budget; + }; + u16 cleaned_pkts; + + u16 tx_max_bufs; + struct idpf_txq_stash *stash; + + u16 compl_tag_bufid_m; u16 compl_tag_cur_gen; u16 compl_tag_gen_max; - DECLARE_HASHTABLE(sched_buf_hash, 12); -} ____cacheline_internodealigned_in_smp; + struct u64_stats_sync stats_sync; + struct idpf_tx_queue_stats q_stats; + __cacheline_group_end_aligned(read_write); + + __cacheline_group_begin_aligned(cold); + u32 q_id; + u32 size; + dma_addr_t dma; + + struct idpf_q_vector *q_vector; + __cacheline_group_end_aligned(cold); +}; +libeth_cacheline_set_assert(struct idpf_tx_queue, 64, + 88 + sizeof(struct u64_stats_sync), + 24); + +/** + * struct idpf_buf_queue - software structure representing a buffer queue + * @split_buf: buffer descriptor array + * @hdr_buf: &libeth_fqe for header buffers + * @hdr_pp: &page_pool for header buffers + * @buf: &libeth_fqe for data buffers + * @pp: &page_pool for data buffers + * @tail: Tail offset + * @flags: See enum idpf_queue_flags_t + * @desc_count: Number of descriptors + * @next_to_use: Next descriptor to use + * @next_to_clean: Next descriptor to clean + * @next_to_alloc: RX buffer to allocate at + * @hdr_truesize: truesize for buffer headers + * @truesize: truesize for data buffers + * @q_id: Queue id + * @size: Length of descriptor ring in bytes + * @dma: Physical address of ring + * @q_vector: Backreference to associated vector + * @rx_buffer_low_watermark: RX buffer low watermark + * @rx_hbuf_size: Header buffer size + * @rx_buf_size: Buffer size + */ +struct idpf_buf_queue { + __cacheline_group_begin_aligned(read_mostly); + struct virtchnl2_splitq_rx_buf_desc *split_buf; + struct libeth_fqe *hdr_buf; + struct page_pool *hdr_pp; + struct libeth_fqe *buf; + struct page_pool *pp; + void __iomem *tail; + + DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS); + u32 desc_count; + __cacheline_group_end_aligned(read_mostly); + + __cacheline_group_begin_aligned(read_write); + u32 next_to_use; + u32 next_to_clean; + u32 next_to_alloc; + + u32 hdr_truesize; + u32 truesize; + __cacheline_group_end_aligned(read_write); + + __cacheline_group_begin_aligned(cold); + u32 q_id; + u32 size; + dma_addr_t dma; + + struct idpf_q_vector *q_vector; + + u16 rx_buffer_low_watermark; + u16 rx_hbuf_size; + u16 rx_buf_size; + __cacheline_group_end_aligned(cold); +}; +libeth_cacheline_set_assert(struct idpf_buf_queue, 64, 24, 32); + +/** + * struct idpf_compl_queue - software structure representing a completion queue + * @comp: completion descriptor array + * @txq_grp: See struct idpf_txq_group + * @flags: See enum idpf_queue_flags_t + * @desc_count: Number of descriptors + * @clean_budget: queue cleaning budget + * @netdev: &net_device corresponding to this queue + * @next_to_use: Next descriptor to use. Relevant in both split & single txq + * and bufq. + * @next_to_clean: Next descriptor to clean + * @num_completions: Only relevant for TX completion queue. It tracks the + * number of completions received to compare against the + * number of completions pending, as accumulated by the + * TX queues. + * @q_id: Queue id + * @size: Length of descriptor ring in bytes + * @dma: Physical address of ring + * @q_vector: Backreference to associated vector + */ +struct idpf_compl_queue { + __cacheline_group_begin_aligned(read_mostly); + struct idpf_splitq_tx_compl_desc *comp; + struct idpf_txq_group *txq_grp; + + DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS); + u32 desc_count; + + u32 clean_budget; + struct net_device *netdev; + __cacheline_group_end_aligned(read_mostly); + + __cacheline_group_begin_aligned(read_write); + u32 next_to_use; + u32 next_to_clean; + + u32 num_completions; + __cacheline_group_end_aligned(read_write); + + __cacheline_group_begin_aligned(cold); + u32 q_id; + u32 size; + dma_addr_t dma; + + struct idpf_q_vector *q_vector; + __cacheline_group_end_aligned(cold); +}; +libeth_cacheline_set_assert(struct idpf_compl_queue, 40, 16, 24); /** * struct idpf_sw_queue - * @next_to_clean: Next descriptor to clean - * @next_to_alloc: Buffer to allocate at - * @flags: See enum idpf_queue_flags_t * @ring: Pointer to the ring + * @flags: See enum idpf_queue_flags_t * @desc_count: Descriptor count - * @dev: Device back pointer for DMA mapping + * @next_to_use: Buffer to allocate at + * @next_to_clean: Next descriptor to clean * * Software queues are used in splitq mode to manage buffers between rxq * producer and the bufq consumer. These are required in order to maintain a * lockless buffer management system and are strictly software only constructs. */ struct idpf_sw_queue { - u16 next_to_clean; - u16 next_to_alloc; + __cacheline_group_begin_aligned(read_mostly); + u32 *ring; + DECLARE_BITMAP(flags, __IDPF_Q_FLAGS_NBITS); - u16 *ring; - u16 desc_count; - struct device *dev; -} ____cacheline_internodealigned_in_smp; + u32 desc_count; + __cacheline_group_end_aligned(read_mostly); + + __cacheline_group_begin_aligned(read_write); + u32 next_to_use; + u32 next_to_clean; + __cacheline_group_end_aligned(read_write); +}; +libeth_cacheline_group_assert(struct idpf_sw_queue, read_mostly, 24); +libeth_cacheline_group_assert(struct idpf_sw_queue, read_write, 8); +libeth_cacheline_struct_assert(struct idpf_sw_queue, 24, 8); /** * struct idpf_rxq_set * @rxq: RX queue - * @refillq0: Pointer to refill queue 0 - * @refillq1: Pointer to refill queue 1 + * @refillq: pointers to refill queues * * Splitq only. idpf_rxq_set associates an rxq with at an array of refillqs. * Each rxq needs a refillq to return used buffers back to the respective bufq. * Bufqs then clean these refillqs for buffers to give to hardware. */ struct idpf_rxq_set { - struct idpf_queue rxq; - struct idpf_sw_queue *refillq0; - struct idpf_sw_queue *refillq1; + struct idpf_rx_queue rxq; + struct idpf_sw_queue *refillq[IDPF_MAX_BUFQS_PER_RXQ_GRP]; }; /** @@ -805,7 +903,7 @@ struct idpf_rxq_set { * managed by at most two bufqs (depending on performance configuration). */ struct idpf_bufq_set { - struct idpf_queue bufq; + struct idpf_buf_queue bufq; int num_refillqs; struct idpf_sw_queue *refillqs; }; @@ -831,7 +929,7 @@ struct idpf_rxq_group { union { struct { u16 num_rxq; - struct idpf_queue *rxqs[IDPF_LARGE_MAX_Q]; + struct idpf_rx_queue *rxqs[IDPF_LARGE_MAX_Q]; } singleq; struct { u16 num_rxq_sets; @@ -846,6 +944,7 @@ struct idpf_rxq_group { * @vport: Vport back pointer * @num_txq: Number of TX queues associated * @txqs: Array of TX queue pointers + * @stashes: array of OOO stashes for the queues * @complq: Associated completion queue pointer, split queue only * @num_completions_pending: Total number of completions pending for the * completion queue, acculumated for all TX queues @@ -859,13 +958,26 @@ struct idpf_txq_group { struct idpf_vport *vport; u16 num_txq; - struct idpf_queue *txqs[IDPF_LARGE_MAX_Q]; + struct idpf_tx_queue *txqs[IDPF_LARGE_MAX_Q]; + struct idpf_txq_stash *stashes; - struct idpf_queue *complq; + struct idpf_compl_queue *complq; u32 num_completions_pending; }; +static inline int idpf_q_vector_to_mem(const struct idpf_q_vector *q_vector) +{ + u32 cpu; + + if (!q_vector) + return NUMA_NO_NODE; + + cpu = cpumask_first(q_vector->affinity_mask); + + return cpu < nr_cpu_ids ? cpu_to_mem(cpu) : NUMA_NO_NODE; +} + /** * idpf_size_to_txd_count - Get number of descriptors needed for large Tx frag * @size: transmit request size in bytes @@ -921,60 +1033,6 @@ static inline void idpf_tx_splitq_build_desc(union idpf_tx_flex_desc *desc, idpf_tx_splitq_build_flow_desc(desc, params, td_cmd, size); } -/** - * idpf_alloc_page - Allocate a new RX buffer from the page pool - * @pool: page_pool to allocate from - * @buf: metadata struct to populate with page info - * @buf_size: 2K or 4K - * - * Returns &dma_addr_t to be passed to HW for Rx, %DMA_MAPPING_ERROR otherwise. - */ -static inline dma_addr_t idpf_alloc_page(struct page_pool *pool, - struct idpf_rx_buf *buf, - unsigned int buf_size) -{ - if (buf_size == IDPF_RX_BUF_2048) - buf->page = page_pool_dev_alloc_frag(pool, &buf->page_offset, - buf_size); - else - buf->page = page_pool_dev_alloc_pages(pool); - - if (!buf->page) - return DMA_MAPPING_ERROR; - - buf->truesize = buf_size; - - return page_pool_get_dma_addr(buf->page) + buf->page_offset + - pool->p.offset; -} - -/** - * idpf_rx_put_page - Return RX buffer page to pool - * @rx_buf: RX buffer metadata struct - */ -static inline void idpf_rx_put_page(struct idpf_rx_buf *rx_buf) -{ - page_pool_put_page(rx_buf->page->pp, rx_buf->page, - rx_buf->truesize, true); - rx_buf->page = NULL; -} - -/** - * idpf_rx_sync_for_cpu - Synchronize DMA buffer - * @rx_buf: RX buffer metadata struct - * @len: frame length from descriptor - */ -static inline void idpf_rx_sync_for_cpu(struct idpf_rx_buf *rx_buf, u32 len) -{ - struct page *page = rx_buf->page; - struct page_pool *pp = page->pp; - - dma_sync_single_range_for_cpu(pp->p.dev, - page_pool_get_dma_addr(page), - rx_buf->page_offset + pp->p.offset, len, - page_pool_get_dma_dir(pp)); -} - int idpf_vport_singleq_napi_poll(struct napi_struct *napi, int budget); void idpf_vport_init_num_qs(struct idpf_vport *vport, struct virtchnl2_create_vport *vport_msg); @@ -991,35 +1049,27 @@ void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector); void idpf_vport_intr_deinit(struct idpf_vport *vport); int idpf_vport_intr_init(struct idpf_vport *vport); void idpf_vport_intr_ena(struct idpf_vport *vport); -enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *decoded); int idpf_config_rss(struct idpf_vport *vport); int idpf_init_rss(struct idpf_vport *vport); void idpf_deinit_rss(struct idpf_vport *vport); int idpf_rx_bufs_init_all(struct idpf_vport *vport); void idpf_rx_add_frag(struct idpf_rx_buf *rx_buf, struct sk_buff *skb, unsigned int size); -struct sk_buff *idpf_rx_construct_skb(struct idpf_queue *rxq, - struct idpf_rx_buf *rx_buf, - unsigned int size); -bool idpf_init_rx_buf_hw_alloc(struct idpf_queue *rxq, struct idpf_rx_buf *buf); -void idpf_rx_buf_hw_update(struct idpf_queue *rxq, u32 val); -void idpf_tx_buf_hw_update(struct idpf_queue *tx_q, u32 val, +struct sk_buff *idpf_rx_build_skb(const struct libeth_fqe *buf, u32 size); +void idpf_tx_buf_hw_update(struct idpf_tx_queue *tx_q, u32 val, bool xmit_more); unsigned int idpf_size_to_txd_count(unsigned int size); -netdev_tx_t idpf_tx_drop_skb(struct idpf_queue *tx_q, struct sk_buff *skb); -void idpf_tx_dma_map_error(struct idpf_queue *txq, struct sk_buff *skb, +netdev_tx_t idpf_tx_drop_skb(struct idpf_tx_queue *tx_q, struct sk_buff *skb); +void idpf_tx_dma_map_error(struct idpf_tx_queue *txq, struct sk_buff *skb, struct idpf_tx_buf *first, u16 ring_idx); -unsigned int idpf_tx_desc_count_required(struct idpf_queue *txq, +unsigned int idpf_tx_desc_count_required(struct idpf_tx_queue *txq, struct sk_buff *skb); -bool idpf_chk_linearize(struct sk_buff *skb, unsigned int max_bufs, - unsigned int count); -int idpf_tx_maybe_stop_common(struct idpf_queue *tx_q, unsigned int size); +int idpf_tx_maybe_stop_common(struct idpf_tx_queue *tx_q, unsigned int size); void idpf_tx_timeout(struct net_device *netdev, unsigned int txqueue); -netdev_tx_t idpf_tx_splitq_start(struct sk_buff *skb, - struct net_device *netdev); -netdev_tx_t idpf_tx_singleq_start(struct sk_buff *skb, - struct net_device *netdev); -bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_queue *rxq, +netdev_tx_t idpf_tx_singleq_frame(struct sk_buff *skb, + struct idpf_tx_queue *tx_q); +netdev_tx_t idpf_tx_start(struct sk_buff *skb, struct net_device *netdev); +bool idpf_rx_singleq_buf_hw_alloc_all(struct idpf_rx_queue *rxq, u16 cleaned_count); int idpf_tso(struct sk_buff *skb, struct idpf_tx_offload_params *off); diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c index a5f9b7a5effe..70986e12da28 100644 --- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c +++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c @@ -1,6 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) 2023 Intel Corporation */ +#include + #include "idpf.h" #include "idpf_virtchnl.h" @@ -750,7 +752,7 @@ static int idpf_wait_for_marker_event(struct idpf_vport *vport) int i; for (i = 0; i < vport->num_txq; i++) - set_bit(__IDPF_Q_SW_MARKER, vport->txqs[i]->flags); + idpf_queue_set(SW_MARKER, vport->txqs[i]); event = wait_event_timeout(vport->sw_marker_wq, test_and_clear_bit(IDPF_VPORT_SW_MARKER, @@ -758,7 +760,7 @@ static int idpf_wait_for_marker_event(struct idpf_vport *vport) msecs_to_jiffies(500)); for (i = 0; i < vport->num_txq; i++) - clear_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags); + idpf_queue_clear(POLL_MODE, vport->txqs[i]); if (event) return 0; @@ -1092,7 +1094,6 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals, int num_regs, u32 q_type) { struct idpf_adapter *adapter = vport->adapter; - struct idpf_queue *q; int i, j, k = 0; switch (q_type) { @@ -1111,6 +1112,8 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals, u16 num_rxq = rx_qgrp->singleq.num_rxq; for (j = 0; j < num_rxq && k < num_regs; j++, k++) { + struct idpf_rx_queue *q; + q = rx_qgrp->singleq.rxqs[j]; q->tail = idpf_get_reg_addr(adapter, reg_vals[k]); @@ -1123,6 +1126,8 @@ static int __idpf_queue_reg_init(struct idpf_vport *vport, u32 *reg_vals, u8 num_bufqs = vport->num_bufqs_per_qgrp; for (j = 0; j < num_bufqs && k < num_regs; j++, k++) { + struct idpf_buf_queue *q; + q = &rx_qgrp->splitq.bufq_sets[j].bufq; q->tail = idpf_get_reg_addr(adapter, reg_vals[k]); @@ -1253,12 +1258,12 @@ int idpf_send_create_vport_msg(struct idpf_adapter *adapter, vport_msg->vport_type = cpu_to_le16(VIRTCHNL2_VPORT_TYPE_DEFAULT); vport_msg->vport_index = cpu_to_le16(idx); - if (adapter->req_tx_splitq) + if (adapter->req_tx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ)) vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT); else vport_msg->txq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE); - if (adapter->req_rx_splitq) + if (adapter->req_rx_splitq || !IS_ENABLED(CONFIG_IDPF_SINGLEQ)) vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SPLIT); else vport_msg->rxq_model = cpu_to_le16(VIRTCHNL2_QUEUE_MODEL_SINGLE); @@ -1320,10 +1325,17 @@ int idpf_check_supported_desc_ids(struct idpf_vport *vport) vport_msg = adapter->vport_params_recvd[vport->idx]; + if (!IS_ENABLED(CONFIG_IDPF_SINGLEQ) && + (vport_msg->rxq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE || + vport_msg->txq_model == VIRTCHNL2_QUEUE_MODEL_SINGLE)) { + pci_err(adapter->pdev, "singleq mode requested, but not compiled-in\n"); + return -EOPNOTSUPP; + } + rx_desc_ids = le64_to_cpu(vport_msg->rx_desc_ids); tx_desc_ids = le64_to_cpu(vport_msg->tx_desc_ids); - if (vport->rxq_model == VIRTCHNL2_QUEUE_MODEL_SPLIT) { + if (idpf_is_queue_model_split(vport->rxq_model)) { if (!(rx_desc_ids & VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M)) { dev_info(&adapter->pdev->dev, "Minimum RX descriptor support not provided, using the default\n"); vport_msg->rx_desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M); @@ -1333,7 +1345,7 @@ int idpf_check_supported_desc_ids(struct idpf_vport *vport) vport->base_rxd = true; } - if (vport->txq_model != VIRTCHNL2_QUEUE_MODEL_SPLIT) + if (!idpf_is_queue_model_split(vport->txq_model)) return 0; if ((tx_desc_ids & MIN_SUPPORT_TXDID) != MIN_SUPPORT_TXDID) { @@ -1449,19 +1461,19 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport) qi[k].model = cpu_to_le16(vport->txq_model); qi[k].type = - cpu_to_le32(tx_qgrp->txqs[j]->q_type); + cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX); qi[k].ring_len = cpu_to_le16(tx_qgrp->txqs[j]->desc_count); qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->txqs[j]->dma); if (idpf_is_queue_model_split(vport->txq_model)) { - struct idpf_queue *q = tx_qgrp->txqs[j]; + struct idpf_tx_queue *q = tx_qgrp->txqs[j]; qi[k].tx_compl_queue_id = cpu_to_le16(tx_qgrp->complq->q_id); qi[k].relative_queue_id = cpu_to_le16(j); - if (test_bit(__IDPF_Q_FLOW_SCH_EN, q->flags)) + if (idpf_queue_has(FLOW_SCH_EN, q)) qi[k].sched_mode = cpu_to_le16(VIRTCHNL2_TXQ_SCHED_MODE_FLOW); else @@ -1478,11 +1490,11 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport) qi[k].queue_id = cpu_to_le32(tx_qgrp->complq->q_id); qi[k].model = cpu_to_le16(vport->txq_model); - qi[k].type = cpu_to_le32(tx_qgrp->complq->q_type); + qi[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION); qi[k].ring_len = cpu_to_le16(tx_qgrp->complq->desc_count); qi[k].dma_ring_addr = cpu_to_le64(tx_qgrp->complq->dma); - if (test_bit(__IDPF_Q_FLOW_SCH_EN, tx_qgrp->complq->flags)) + if (idpf_queue_has(FLOW_SCH_EN, tx_qgrp->complq)) sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_FLOW; else sched_mode = VIRTCHNL2_TXQ_SCHED_MODE_QUEUE; @@ -1567,17 +1579,18 @@ static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport) goto setup_rxqs; for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) { - struct idpf_queue *bufq = + struct idpf_buf_queue *bufq = &rx_qgrp->splitq.bufq_sets[j].bufq; qi[k].queue_id = cpu_to_le32(bufq->q_id); qi[k].model = cpu_to_le16(vport->rxq_model); - qi[k].type = cpu_to_le32(bufq->q_type); + qi[k].type = + cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER); qi[k].desc_ids = cpu_to_le64(VIRTCHNL2_RXDID_2_FLEX_SPLITQ_M); qi[k].ring_len = cpu_to_le16(bufq->desc_count); qi[k].dma_ring_addr = cpu_to_le64(bufq->dma); qi[k].data_buffer_size = cpu_to_le32(bufq->rx_buf_size); - qi[k].buffer_notif_stride = bufq->rx_buf_stride; + qi[k].buffer_notif_stride = IDPF_RX_BUF_STRIDE; qi[k].rx_buffer_low_watermark = cpu_to_le16(bufq->rx_buffer_low_watermark); if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW)) @@ -1591,35 +1604,47 @@ setup_rxqs: num_rxq = rx_qgrp->singleq.num_rxq; for (j = 0; j < num_rxq; j++, k++) { - struct idpf_queue *rxq; + const struct idpf_bufq_set *sets; + struct idpf_rx_queue *rxq; if (!idpf_is_queue_model_split(vport->rxq_model)) { rxq = rx_qgrp->singleq.rxqs[j]; goto common_qi_fields; } + rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq; - qi[k].rx_bufq1_id = - cpu_to_le16(rxq->rxq_grp->splitq.bufq_sets[0].bufq.q_id); + sets = rxq->bufq_sets; + + /* In splitq mode, RXQ buffer size should be + * set to that of the first buffer queue + * associated with this RXQ. + */ + rxq->rx_buf_size = sets[0].bufq.rx_buf_size; + + qi[k].rx_bufq1_id = cpu_to_le16(sets[0].bufq.q_id); if (vport->num_bufqs_per_qgrp > IDPF_SINGLE_BUFQ_PER_RXQ_GRP) { qi[k].bufq2_ena = IDPF_BUFQ2_ENA; qi[k].rx_bufq2_id = - cpu_to_le16(rxq->rxq_grp->splitq.bufq_sets[1].bufq.q_id); + cpu_to_le16(sets[1].bufq.q_id); } qi[k].rx_buffer_low_watermark = cpu_to_le16(rxq->rx_buffer_low_watermark); if (idpf_is_feature_ena(vport, NETIF_F_GRO_HW)) qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_RSC); -common_qi_fields: - if (rxq->rx_hsplit_en) { + rxq->rx_hbuf_size = sets[0].bufq.rx_hbuf_size; + + if (idpf_queue_has(HSPLIT_EN, rxq)) { qi[k].qflags |= cpu_to_le16(VIRTCHNL2_RXQ_HDR_SPLIT); qi[k].hdr_buffer_size = cpu_to_le16(rxq->rx_hbuf_size); } + +common_qi_fields: qi[k].queue_id = cpu_to_le32(rxq->q_id); qi[k].model = cpu_to_le16(vport->rxq_model); - qi[k].type = cpu_to_le32(rxq->q_type); + qi[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX); qi[k].ring_len = cpu_to_le16(rxq->desc_count); qi[k].dma_ring_addr = cpu_to_le64(rxq->dma); qi[k].max_pkt_size = cpu_to_le32(rxq->rx_max_pkt_size); @@ -1706,7 +1731,7 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool ena) struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; for (j = 0; j < tx_qgrp->num_txq; j++, k++) { - qc[k].type = cpu_to_le32(tx_qgrp->txqs[j]->q_type); + qc[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX); qc[k].start_queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id); qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); } @@ -1720,7 +1745,7 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool ena) for (i = 0; i < vport->num_txq_grp; i++, k++) { struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; - qc[k].type = cpu_to_le32(tx_qgrp->complq->q_type); + qc[k].type = cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION); qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id); qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); } @@ -1741,12 +1766,12 @@ setup_rx: qc[k].start_queue_id = cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_id); qc[k].type = - cpu_to_le32(rx_qgrp->splitq.rxq_sets[j]->rxq.q_type); + cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX); } else { qc[k].start_queue_id = cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_id); qc[k].type = - cpu_to_le32(rx_qgrp->singleq.rxqs[j]->q_type); + cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX); } qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); } @@ -1761,10 +1786,11 @@ setup_rx: struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i]; for (j = 0; j < vport->num_bufqs_per_qgrp; j++, k++) { - struct idpf_queue *q; + const struct idpf_buf_queue *q; q = &rx_qgrp->splitq.bufq_sets[j].bufq; - qc[k].type = cpu_to_le32(q->q_type); + qc[k].type = + cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX_BUFFER); qc[k].start_queue_id = cpu_to_le32(q->q_id); qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK); } @@ -1849,7 +1875,8 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map) struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; for (j = 0; j < tx_qgrp->num_txq; j++, k++) { - vqv[k].queue_type = cpu_to_le32(tx_qgrp->txqs[j]->q_type); + vqv[k].queue_type = + cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_TX); vqv[k].queue_id = cpu_to_le32(tx_qgrp->txqs[j]->q_id); if (idpf_is_queue_model_split(vport->txq_model)) { @@ -1879,14 +1906,15 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map) num_rxq = rx_qgrp->singleq.num_rxq; for (j = 0; j < num_rxq; j++, k++) { - struct idpf_queue *rxq; + struct idpf_rx_queue *rxq; if (idpf_is_queue_model_split(vport->rxq_model)) rxq = &rx_qgrp->splitq.rxq_sets[j]->rxq; else rxq = rx_qgrp->singleq.rxqs[j]; - vqv[k].queue_type = cpu_to_le32(rxq->q_type); + vqv[k].queue_type = + cpu_to_le32(VIRTCHNL2_QUEUE_TYPE_RX); vqv[k].queue_id = cpu_to_le32(rxq->q_id); vqv[k].vector_id = cpu_to_le16(rxq->q_vector->v_idx); vqv[k].itr_idx = cpu_to_le32(rxq->q_vector->rx_itr_idx); @@ -1975,7 +2003,7 @@ int idpf_send_disable_queues_msg(struct idpf_vport *vport) * queues virtchnl message is sent */ for (i = 0; i < vport->num_txq; i++) - set_bit(__IDPF_Q_POLL_MODE, vport->txqs[i]->flags); + idpf_queue_set(POLL_MODE, vport->txqs[i]); /* schedule the napi to receive all the marker packets */ local_bh_disable(); @@ -2469,39 +2497,52 @@ do_memcpy: * @frag: fragmentation allowed * */ -static void idpf_fill_ptype_lookup(struct idpf_rx_ptype_decoded *ptype, +static void idpf_fill_ptype_lookup(struct libeth_rx_pt *ptype, struct idpf_ptype_state *pstate, bool ipv4, bool frag) { if (!pstate->outer_ip || !pstate->outer_frag) { - ptype->outer_ip = IDPF_RX_PTYPE_OUTER_IP; pstate->outer_ip = true; if (ipv4) - ptype->outer_ip_ver = IDPF_RX_PTYPE_OUTER_IPV4; + ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV4; else - ptype->outer_ip_ver = IDPF_RX_PTYPE_OUTER_IPV6; + ptype->outer_ip = LIBETH_RX_PT_OUTER_IPV6; if (frag) { - ptype->outer_frag = IDPF_RX_PTYPE_FRAG; + ptype->outer_frag = LIBETH_RX_PT_FRAG; pstate->outer_frag = true; } } else { - ptype->tunnel_type = IDPF_RX_PTYPE_TUNNEL_IP_IP; + ptype->tunnel_type = LIBETH_RX_PT_TUNNEL_IP_IP; pstate->tunnel_state = IDPF_PTYPE_TUNNEL_IP; if (ipv4) - ptype->tunnel_end_prot = - IDPF_RX_PTYPE_TUNNEL_END_IPV4; + ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV4; else - ptype->tunnel_end_prot = - IDPF_RX_PTYPE_TUNNEL_END_IPV6; + ptype->tunnel_end_prot = LIBETH_RX_PT_TUNNEL_END_IPV6; if (frag) - ptype->tunnel_end_frag = IDPF_RX_PTYPE_FRAG; + ptype->tunnel_end_frag = LIBETH_RX_PT_FRAG; } } +static void idpf_finalize_ptype_lookup(struct libeth_rx_pt *ptype) +{ + if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 && + ptype->inner_prot) + ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L4; + else if (ptype->payload_layer == LIBETH_RX_PT_PAYLOAD_L2 && + ptype->outer_ip) + ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L3; + else if (ptype->outer_ip == LIBETH_RX_PT_OUTER_L2) + ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_L2; + else + ptype->payload_layer = LIBETH_RX_PT_PAYLOAD_NONE; + + libeth_rx_pt_gen_hash_type(ptype); +} + /** * idpf_send_get_rx_ptype_msg - Send virtchnl for ptype info * @vport: virtual port data structure @@ -2512,7 +2553,7 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport) { struct virtchnl2_get_ptype_info *get_ptype_info __free(kfree) = NULL; struct virtchnl2_get_ptype_info *ptype_info __free(kfree) = NULL; - struct idpf_rx_ptype_decoded *ptype_lkup = vport->rx_ptype_lkup; + struct libeth_rx_pt *ptype_lkup __free(kfree) = NULL; int max_ptype, ptypes_recvd = 0, ptype_offset; struct idpf_adapter *adapter = vport->adapter; struct idpf_vc_xn_params xn_params = {}; @@ -2520,12 +2561,17 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport) ssize_t reply_sz; int i, j, k; + if (vport->rx_ptype_lkup) + return 0; + if (idpf_is_queue_model_split(vport->rxq_model)) max_ptype = IDPF_RX_MAX_PTYPE; else max_ptype = IDPF_RX_MAX_BASE_PTYPE; - memset(vport->rx_ptype_lkup, 0, sizeof(vport->rx_ptype_lkup)); + ptype_lkup = kcalloc(max_ptype, sizeof(*ptype_lkup), GFP_KERNEL); + if (!ptype_lkup) + return -ENOMEM; get_ptype_info = kzalloc(sizeof(*get_ptype_info), GFP_KERNEL); if (!get_ptype_info) @@ -2583,16 +2629,13 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport) /* 0xFFFF indicates end of ptypes */ if (le16_to_cpu(ptype->ptype_id_10) == IDPF_INVALID_PTYPE_ID) - return 0; + goto out; if (idpf_is_queue_model_split(vport->rxq_model)) k = le16_to_cpu(ptype->ptype_id_10); else k = ptype->ptype_id_8; - if (ptype->proto_id_count) - ptype_lkup[k].known = 1; - for (j = 0; j < ptype->proto_id_count; j++) { id = le16_to_cpu(ptype->proto_id[j]); switch (id) { @@ -2600,18 +2643,18 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport) if (pstate.tunnel_state == IDPF_PTYPE_TUNNEL_IP) { ptype_lkup[k].tunnel_type = - IDPF_RX_PTYPE_TUNNEL_IP_GRENAT; + LIBETH_RX_PT_TUNNEL_IP_GRENAT; pstate.tunnel_state |= IDPF_PTYPE_TUNNEL_IP_GRENAT; } break; case VIRTCHNL2_PROTO_HDR_MAC: ptype_lkup[k].outer_ip = - IDPF_RX_PTYPE_OUTER_L2; + LIBETH_RX_PT_OUTER_L2; if (pstate.tunnel_state == IDPF_TUN_IP_GRE) { ptype_lkup[k].tunnel_type = - IDPF_RX_PTYPE_TUNNEL_IP_GRENAT_MAC; + LIBETH_RX_PT_TUNNEL_IP_GRENAT_MAC; pstate.tunnel_state |= IDPF_PTYPE_TUNNEL_IP_GRENAT_MAC; } @@ -2638,23 +2681,23 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport) break; case VIRTCHNL2_PROTO_HDR_UDP: ptype_lkup[k].inner_prot = - IDPF_RX_PTYPE_INNER_PROT_UDP; + LIBETH_RX_PT_INNER_UDP; break; case VIRTCHNL2_PROTO_HDR_TCP: ptype_lkup[k].inner_prot = - IDPF_RX_PTYPE_INNER_PROT_TCP; + LIBETH_RX_PT_INNER_TCP; break; case VIRTCHNL2_PROTO_HDR_SCTP: ptype_lkup[k].inner_prot = - IDPF_RX_PTYPE_INNER_PROT_SCTP; + LIBETH_RX_PT_INNER_SCTP; break; case VIRTCHNL2_PROTO_HDR_ICMP: ptype_lkup[k].inner_prot = - IDPF_RX_PTYPE_INNER_PROT_ICMP; + LIBETH_RX_PT_INNER_ICMP; break; case VIRTCHNL2_PROTO_HDR_PAY: ptype_lkup[k].payload_layer = - IDPF_RX_PTYPE_PAYLOAD_LAYER_PAY2; + LIBETH_RX_PT_PAYLOAD_L2; break; case VIRTCHNL2_PROTO_HDR_ICMPV6: case VIRTCHNL2_PROTO_HDR_IPV6_EH: @@ -2708,9 +2751,14 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport) break; } } + + idpf_finalize_ptype_lookup(&ptype_lkup[k]); } } +out: + vport->rx_ptype_lkup = no_free_ptr(ptype_lkup); + return 0; } @@ -3125,7 +3173,7 @@ void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q) rss_data->rss_lut_size = le16_to_cpu(vport_msg->rss_lut_size); ether_addr_copy(vport->default_mac_addr, vport_msg->default_mac_addr); - vport->max_mtu = le16_to_cpu(vport_msg->max_mtu) - IDPF_PACKET_HDR_PAD; + vport->max_mtu = le16_to_cpu(vport_msg->max_mtu) - LIBETH_RX_LL_LEN; /* Initialize Tx and Rx profiles for Dynamic Interrupt Moderation */ memcpy(vport->rx_itr_profile, rx_itr, IDPF_DIM_PROFILE_SLOTS); @@ -3242,7 +3290,6 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport, int num_qids, u32 q_type) { - struct idpf_queue *q; int i, j, k = 0; switch (q_type) { @@ -3250,11 +3297,8 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport, for (i = 0; i < vport->num_txq_grp; i++) { struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; - for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++) { + for (j = 0; j < tx_qgrp->num_txq && k < num_qids; j++, k++) tx_qgrp->txqs[j]->q_id = qids[k]; - tx_qgrp->txqs[j]->q_type = - VIRTCHNL2_QUEUE_TYPE_TX; - } } break; case VIRTCHNL2_QUEUE_TYPE_RX: @@ -3268,12 +3312,13 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport, num_rxq = rx_qgrp->singleq.num_rxq; for (j = 0; j < num_rxq && k < num_qids; j++, k++) { + struct idpf_rx_queue *q; + if (idpf_is_queue_model_split(vport->rxq_model)) q = &rx_qgrp->splitq.rxq_sets[j]->rxq; else q = rx_qgrp->singleq.rxqs[j]; q->q_id = qids[k]; - q->q_type = VIRTCHNL2_QUEUE_TYPE_RX; } } break; @@ -3282,8 +3327,6 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport, struct idpf_txq_group *tx_qgrp = &vport->txq_grps[i]; tx_qgrp->complq->q_id = qids[k]; - tx_qgrp->complq->q_type = - VIRTCHNL2_QUEUE_TYPE_TX_COMPLETION; } break; case VIRTCHNL2_QUEUE_TYPE_RX_BUFFER: @@ -3292,9 +3335,10 @@ static int __idpf_vport_queue_ids_init(struct idpf_vport *vport, u8 num_bufqs = vport->num_bufqs_per_qgrp; for (j = 0; j < num_bufqs && k < num_qids; j++, k++) { + struct idpf_buf_queue *q; + q = &rx_qgrp->splitq.bufq_sets[j].bufq; q->q_id = qids[k]; - q->q_type = VIRTCHNL2_QUEUE_TYPE_RX_BUFFER; } } break; diff --git a/drivers/net/ethernet/intel/igb/Makefile b/drivers/net/ethernet/intel/igb/Makefile index 394c1e0656b9..463c0d26b9d4 100644 --- a/drivers/net/ethernet/intel/igb/Makefile +++ b/drivers/net/ethernet/intel/igb/Makefile @@ -6,6 +6,6 @@ obj-$(CONFIG_IGB) += igb.o -igb-objs := igb_main.o igb_ethtool.o e1000_82575.o \ - e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \ - e1000_i210.o igb_ptp.o igb_hwmon.o +igb-y := igb_main.o igb_ethtool.o e1000_82575.o \ + e1000_mac.o e1000_nvm.o e1000_phy.o e1000_mbx.o \ + e1000_i210.o igb_ptp.o igb_hwmon.o diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 61d72250c0ed..06b9970dffad 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2381,7 +2381,7 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) } static int igb_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct igb_adapter *adapter = netdev_priv(dev); diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index fce2930ae6af..11be39f435f3 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -203,7 +203,6 @@ static const struct pci_error_handlers igb_err_handler = { static void igb_init_dmac(struct igb_adapter *adapter, u32 pba); -MODULE_AUTHOR("Intel Corporation, "); MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); MODULE_LICENSE("GPL v2"); @@ -9139,6 +9138,10 @@ static int igb_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) return -EIO; break; case SIOCSMIIREG: + if (igb_write_phy_reg(&adapter->hw, data->reg_num & 0x1F, + data->val_in)) + return -EIO; + break; default: return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/intel/igbvf/Makefile b/drivers/net/ethernet/intel/igbvf/Makefile index afd3e36eae75..902711d5e691 100644 --- a/drivers/net/ethernet/intel/igbvf/Makefile +++ b/drivers/net/ethernet/intel/igbvf/Makefile @@ -6,8 +6,4 @@ obj-$(CONFIG_IGBVF) += igbvf.o -igbvf-objs := vf.o \ - mbx.o \ - ethtool.o \ - netdev.o - +igbvf-y := vf.o mbx.o ethtool.o netdev.o diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 7661edd7d0f2..925d7286a8ee 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -3001,7 +3001,6 @@ static void __exit igbvf_exit_module(void) } module_exit(igbvf_exit_module); -MODULE_AUTHOR("Intel Corporation, "); MODULE_DESCRIPTION("Intel(R) Gigabit Virtual Function Network Driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/intel/igc/Makefile b/drivers/net/ethernet/intel/igc/Makefile index ebffd3054285..efc5e7983dad 100644 --- a/drivers/net/ethernet/intel/igc/Makefile +++ b/drivers/net/ethernet/intel/igc/Makefile @@ -6,7 +6,7 @@ # obj-$(CONFIG_IGC) += igc.o -igc-$(CONFIG_IGC_LEDS) += igc_leds.o -igc-objs := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \ -igc_diag.o igc_ethtool.o igc_ptp.o igc_dump.o igc_tsn.o igc_xdp.o +igc-y := igc_main.o igc_mac.o igc_i225.o igc_base.o igc_nvm.o igc_phy.o \ + igc_diag.o igc_ethtool.o igc_ptp.o igc_dump.o igc_tsn.o igc_xdp.o +igc-$(CONFIG_IGC_LEDS) += igc_leds.o diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 8b14c029eda1..c38b4d0f00ce 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -202,7 +202,6 @@ struct igc_adapter { struct net_device *netdev; struct ethtool_keee eee; - u16 eee_advert; unsigned long state; unsigned int flags; diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index 0cd2bd695db1..3d3ef4e1547c 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -1559,7 +1559,7 @@ static int igc_ethtool_set_channels(struct net_device *netdev, } static int igc_ethtool_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct igc_adapter *adapter = netdev_priv(dev); @@ -1636,10 +1636,6 @@ static int igc_ethtool_get_eee(struct net_device *netdev, linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, edata->supported); - if (hw->dev_spec._base.eee_enable) - mii_eee_cap1_mod_linkmode_t(edata->advertised, - adapter->eee_advert); - eeer = rd32(IGC_EEER); /* EEE status on negotiated link */ @@ -1700,8 +1696,6 @@ static int igc_ethtool_set_eee(struct net_device *netdev, return -EINVAL; } - adapter->eee_advert = linkmode_to_mii_eee_cap1_t(edata->advertised); - if (hw->dev_spec._base.eee_enable != edata->eee_enabled) { hw->dev_spec._base.eee_enable = edata->eee_enabled; adapter->flags |= IGC_FLAG_EEE; diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 87b655b839c1..cb5c7b09e8a0 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -32,7 +32,6 @@ static int debug = -1; -MODULE_AUTHOR("Intel Corporation, "); MODULE_DESCRIPTION(DRV_SUMMARY); MODULE_LICENSE("GPL v2"); module_param(debug, int, 0); @@ -4976,9 +4975,6 @@ void igc_up(struct igc_adapter *adapter) /* start the watchdog. */ hw->mac.get_link_status = true; schedule_work(&adapter->watchdog_task); - - adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T | - MDIO_EEE_2_5GT; } /** diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile index 4fb0d9e3f2da..965e5ce1b326 100644 --- a/drivers/net/ethernet/intel/ixgbe/Makefile +++ b/drivers/net/ethernet/intel/ixgbe/Makefile @@ -6,10 +6,10 @@ obj-$(CONFIG_IXGBE) += ixgbe.o -ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ - ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ - ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o \ - ixgbe_xsk.o +ixgbe-y := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ + ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ + ixgbe_mbx.o ixgbe_x540.o ixgbe_x550.o ixgbe_lib.o ixgbe_ptp.o \ + ixgbe_xsk.o ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ ixgbe_dcb_82599.o ixgbe_dcb_nl.o diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 6e6e6f1847b6..4cac76254966 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -3170,7 +3170,7 @@ static int ixgbe_set_rxfh(struct net_device *netdev, } static int ixgbe_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct ixgbe_adapter *adapter = netdev_priv(dev); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 094653e81b97..8057cef61f39 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -162,7 +162,6 @@ static int debug = -1; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); -MODULE_AUTHOR("Intel Corporation, "); MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/intel/ixgbevf/Makefile b/drivers/net/ethernet/intel/ixgbevf/Makefile index 186a4bb24fde..01d3e892f3fa 100644 --- a/drivers/net/ethernet/intel/ixgbevf/Makefile +++ b/drivers/net/ethernet/intel/ixgbevf/Makefile @@ -6,9 +6,5 @@ obj-$(CONFIG_IXGBEVF) += ixgbevf.o -ixgbevf-objs := vf.o \ - mbx.o \ - ethtool.o \ - ixgbevf_main.o +ixgbevf-y := vf.o mbx.o ethtool.o ixgbevf_main.o ixgbevf-$(CONFIG_IXGBEVF_IPSEC) += ipsec.o - diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index b938dc06045d..149911e3002a 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -76,7 +76,6 @@ static const struct pci_device_id ixgbevf_pci_tbl[] = { }; MODULE_DEVICE_TABLE(pci, ixgbevf_pci_tbl); -MODULE_AUTHOR("Intel Corporation, "); MODULE_DESCRIPTION("Intel(R) 10 Gigabit Virtual Function Network Driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/intel/libeth/Makefile b/drivers/net/ethernet/intel/libeth/Makefile index cb99203d1dd2..52492b081132 100644 --- a/drivers/net/ethernet/intel/libeth/Makefile +++ b/drivers/net/ethernet/intel/libeth/Makefile @@ -3,4 +3,4 @@ obj-$(CONFIG_LIBETH) += libeth.o -libeth-objs += rx.o +libeth-y := rx.o diff --git a/drivers/net/ethernet/intel/libeth/rx.c b/drivers/net/ethernet/intel/libeth/rx.c index 6221b88c34ac..f20926669318 100644 --- a/drivers/net/ethernet/intel/libeth/rx.c +++ b/drivers/net/ethernet/intel/libeth/rx.c @@ -6,7 +6,7 @@ /* Rx buffer management */ /** - * libeth_rx_hw_len - get the actual buffer size to be passed to HW + * libeth_rx_hw_len_mtu - get the actual buffer size to be passed to HW * @pp: &page_pool_params of the netdev to calculate the size for * @max_len: maximum buffer size for a single descriptor * @@ -14,7 +14,7 @@ * MTU the @dev has, HW required alignment, minimum and maximum allowed values, * and system's page size. */ -static u32 libeth_rx_hw_len(const struct page_pool_params *pp, u32 max_len) +static u32 libeth_rx_hw_len_mtu(const struct page_pool_params *pp, u32 max_len) { u32 len; @@ -26,6 +26,118 @@ static u32 libeth_rx_hw_len(const struct page_pool_params *pp, u32 max_len) return len; } +/** + * libeth_rx_hw_len_truesize - get the short buffer size to be passed to HW + * @pp: &page_pool_params of the netdev to calculate the size for + * @max_len: maximum buffer size for a single descriptor + * @truesize: desired truesize for the buffers + * + * Return: HW-writeable length per one buffer to pass it to the HW ignoring the + * MTU and closest to the passed truesize. Can be used for "short" buffer + * queues to fragment pages more efficiently. + */ +static u32 libeth_rx_hw_len_truesize(const struct page_pool_params *pp, + u32 max_len, u32 truesize) +{ + u32 min, len; + + min = SKB_HEAD_ALIGN(pp->offset + LIBETH_RX_BUF_STRIDE); + truesize = clamp(roundup_pow_of_two(truesize), roundup_pow_of_two(min), + PAGE_SIZE << LIBETH_RX_PAGE_ORDER); + + len = SKB_WITH_OVERHEAD(truesize - pp->offset); + len = ALIGN_DOWN(len, LIBETH_RX_BUF_STRIDE) ? : LIBETH_RX_BUF_STRIDE; + len = min3(len, ALIGN_DOWN(max_len ? : U32_MAX, LIBETH_RX_BUF_STRIDE), + pp->max_len); + + return len; +} + +/** + * libeth_rx_page_pool_params - calculate params with the stack overhead + * @fq: buffer queue to calculate the size for + * @pp: &page_pool_params of the netdev + * + * Set the PP params to will all needed stack overhead (headroom, tailroom) and + * both the HW buffer length and the truesize for all types of buffers. For + * "short" buffers, truesize never exceeds the "wanted" one; for the rest, + * it can be up to the page size. + * + * Return: true on success, false on invalid input params. + */ +static bool libeth_rx_page_pool_params(struct libeth_fq *fq, + struct page_pool_params *pp) +{ + pp->offset = LIBETH_SKB_HEADROOM; + /* HW-writeable / syncable length per one page */ + pp->max_len = LIBETH_RX_PAGE_LEN(pp->offset); + + /* HW-writeable length per buffer */ + switch (fq->type) { + case LIBETH_FQE_MTU: + fq->buf_len = libeth_rx_hw_len_mtu(pp, fq->buf_len); + break; + case LIBETH_FQE_SHORT: + fq->buf_len = libeth_rx_hw_len_truesize(pp, fq->buf_len, + fq->truesize); + break; + case LIBETH_FQE_HDR: + fq->buf_len = ALIGN(LIBETH_MAX_HEAD, LIBETH_RX_BUF_STRIDE); + break; + default: + return false; + } + + /* Buffer size to allocate */ + fq->truesize = roundup_pow_of_two(SKB_HEAD_ALIGN(pp->offset + + fq->buf_len)); + + return true; +} + +/** + * libeth_rx_page_pool_params_zc - calculate params without the stack overhead + * @fq: buffer queue to calculate the size for + * @pp: &page_pool_params of the netdev + * + * Set the PP params to exclude the stack overhead and both the buffer length + * and the truesize, which are equal for the data buffers. Note that this + * requires separate header buffers to be always active and account the + * overhead. + * With the MTU == ``PAGE_SIZE``, this allows the kernel to enable the zerocopy + * mode. + * + * Return: true on success, false on invalid input params. + */ +static bool libeth_rx_page_pool_params_zc(struct libeth_fq *fq, + struct page_pool_params *pp) +{ + u32 mtu, max; + + pp->offset = 0; + pp->max_len = PAGE_SIZE << LIBETH_RX_PAGE_ORDER; + + switch (fq->type) { + case LIBETH_FQE_MTU: + mtu = READ_ONCE(pp->netdev->mtu); + break; + case LIBETH_FQE_SHORT: + mtu = fq->truesize; + break; + default: + return false; + } + + mtu = roundup_pow_of_two(mtu); + max = min(rounddown_pow_of_two(fq->buf_len ? : U32_MAX), + pp->max_len); + + fq->buf_len = clamp(mtu, LIBETH_RX_BUF_STRIDE, max); + fq->truesize = fq->buf_len; + + return true; +} + /** * libeth_rx_fq_create - create a PP with the default libeth settings * @fq: buffer queue struct to fill @@ -44,19 +156,17 @@ int libeth_rx_fq_create(struct libeth_fq *fq, struct napi_struct *napi) .netdev = napi->dev, .napi = napi, .dma_dir = DMA_FROM_DEVICE, - .offset = LIBETH_SKB_HEADROOM, }; struct libeth_fqe *fqes; struct page_pool *pool; + bool ret; - /* HW-writeable / syncable length per one page */ - pp.max_len = LIBETH_RX_PAGE_LEN(pp.offset); - - /* HW-writeable length per buffer */ - fq->buf_len = libeth_rx_hw_len(&pp, fq->buf_len); - /* Buffer size to allocate */ - fq->truesize = roundup_pow_of_two(SKB_HEAD_ALIGN(pp.offset + - fq->buf_len)); + if (!fq->hsplit) + ret = libeth_rx_page_pool_params(fq, &pp); + else + ret = libeth_rx_page_pool_params_zc(fq, &pp); + if (!ret) + return -EINVAL; pool = page_pool_create(&pp); if (IS_ERR(pool)) @@ -145,6 +255,5 @@ EXPORT_SYMBOL_NS_GPL(libeth_rx_pt_gen_hash_type, LIBETH); /* Module */ -MODULE_AUTHOR("Intel Corporation"); MODULE_DESCRIPTION("Common Ethernet library"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/intel/libie/Makefile b/drivers/net/ethernet/intel/libie/Makefile index bf42c5aeeedd..ffd27fab916a 100644 --- a/drivers/net/ethernet/intel/libie/Makefile +++ b/drivers/net/ethernet/intel/libie/Makefile @@ -3,4 +3,4 @@ obj-$(CONFIG_LIBIE) += libie.o -libie-objs += rx.o +libie-y := rx.o diff --git a/drivers/net/ethernet/intel/libie/rx.c b/drivers/net/ethernet/intel/libie/rx.c index 38201ee1e891..aceb8d8813c4 100644 --- a/drivers/net/ethernet/intel/libie/rx.c +++ b/drivers/net/ethernet/intel/libie/rx.c @@ -118,7 +118,6 @@ const struct libeth_rx_pt libie_rx_pt_lut[LIBIE_RX_PT_NUM] = { }; EXPORT_SYMBOL_NS_GPL(libie_rx_pt_lut, LIBIE); -MODULE_AUTHOR("Intel Corporation"); MODULE_DESCRIPTION("Intel(R) Ethernet common library"); MODULE_IMPORT_NS(LIBETH); MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 0b9982804370..9e6984815386 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -675,7 +675,6 @@ ltq_etop_probe(struct platform_device *pdev) err = -ENOMEM; goto err_out; } - strcpy(dev->name, "eth%d"); dev->netdev_ops = <q_eth_netdev_ops; dev->ethtool_ops = <q_etop_ethtool_ops; priv = netdev_priv(dev); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 9adf4301c9b1..8c45ad983abc 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -5259,7 +5259,7 @@ static int mvpp2_get_ts_config(struct mvpp2_port *port, struct ifreq *ifr) } static int mvpp2_ethtool_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct mvpp2_port *port = netdev_priv(dev); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index 05b84581d5c5..ed2160cc5acb 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -139,6 +139,7 @@ M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \ M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \ M(PTP_OP, 0x007, ptp_op, ptp_req, ptp_rsp) \ M(GET_HW_CAP, 0x008, get_hw_cap, msg_req, get_hw_cap_rsp) \ +M(NDC_SYNC_OP, 0x009, ndc_sync_op, ndc_sync_op, msg_rsp) \ M(LMTST_TBL_SETUP, 0x00a, lmtst_tbl_setup, lmtst_tbl_setup_req, \ msg_rsp) \ M(SET_VF_PERM, 0x00b, set_vf_perm, set_vf_perm, msg_rsp) \ @@ -1716,6 +1717,13 @@ struct lmtst_tbl_setup_req { u64 rsvd[4]; }; +struct ndc_sync_op { + struct mbox_msghdr hdr; + u8 nix_lf_tx_sync; + u8 nix_lf_rx_sync; + u8 npa_lf_sync; +}; + /* CPT mailbox error codes * Range 901 - 1000. */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index 5f661e67ccbc..ac7ee3f3598c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -2014,6 +2014,13 @@ int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req, return 0; } +int rvu_ndc_sync(struct rvu *rvu, int lfblkaddr, int lfidx, u64 lfoffset) +{ + /* Sync cached info for this LF in NDC to LLC/DRAM */ + rvu_write64(rvu, lfblkaddr, lfoffset, BIT_ULL(12) | lfidx); + return rvu_poll_reg(rvu, lfblkaddr, lfoffset, BIT_ULL(12), true); +} + int rvu_mbox_handler_get_hw_cap(struct rvu *rvu, struct msg_req *req, struct get_hw_cap_rsp *rsp) { @@ -2068,6 +2075,65 @@ int rvu_mbox_handler_set_vf_perm(struct rvu *rvu, struct set_vf_perm *req, return 0; } +int rvu_mbox_handler_ndc_sync_op(struct rvu *rvu, + struct ndc_sync_op *req, + struct msg_rsp *rsp) +{ + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; + int err, lfidx, lfblkaddr; + + if (req->npa_lf_sync) { + /* Get NPA LF data */ + lfblkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, pcifunc); + if (lfblkaddr < 0) + return NPA_AF_ERR_AF_LF_INVALID; + + lfidx = rvu_get_lf(rvu, &hw->block[lfblkaddr], pcifunc, 0); + if (lfidx < 0) + return NPA_AF_ERR_AF_LF_INVALID; + + /* Sync NPA NDC */ + err = rvu_ndc_sync(rvu, lfblkaddr, + lfidx, NPA_AF_NDC_SYNC); + if (err) + dev_err(rvu->dev, + "NDC-NPA sync failed for LF %u\n", lfidx); + } + + if (!req->nix_lf_tx_sync && !req->nix_lf_rx_sync) + return 0; + + /* Get NIX LF data */ + lfblkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (lfblkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + lfidx = rvu_get_lf(rvu, &hw->block[lfblkaddr], pcifunc, 0); + if (lfidx < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + if (req->nix_lf_tx_sync) { + /* Sync NIX TX NDC */ + err = rvu_ndc_sync(rvu, lfblkaddr, + lfidx, NIX_AF_NDC_TX_SYNC); + if (err) + dev_err(rvu->dev, + "NDC-NIX-TX sync fail for LF %u\n", lfidx); + } + + if (req->nix_lf_rx_sync) { + /* Sync NIX RX NDC */ + err = rvu_ndc_sync(rvu, lfblkaddr, + lfidx, NIX_AF_NDC_RX_SYNC); + if (err) + dev_err(rvu->dev, + "NDC-NIX-RX sync failed for LF %u\n", lfidx); + } + + return 0; +} + static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid, struct mbox_msghdr *req) { diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index 35834687e40f..03ee93fd9e94 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -76,6 +76,7 @@ struct rvu_debugfs { struct dump_ctx nix_cq_ctx; struct dump_ctx nix_rq_ctx; struct dump_ctx nix_sq_ctx; + struct dump_ctx nix_tm_ctx; struct cpt_ctx cpt_ctx[MAX_CPT_BLKS]; int npa_qsize_id; int nix_qsize_id; @@ -799,6 +800,7 @@ int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf); int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc); int rvu_poll_reg(struct rvu *rvu, u64 block, u64 offset, u64 mask, bool zero); int rvu_get_num_lbk_chans(void); +int rvu_ndc_sync(struct rvu *rvu, int lfblkid, int lfidx, u64 lfoffset); int rvu_get_blkaddr_from_slot(struct rvu *rvu, int blktype, u16 pcifunc, u16 global_slot, u16 *slot_in_block); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index 881d704644fb..4a4ef5bd9e0b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -1603,6 +1603,367 @@ static void print_nix_cn10k_sq_ctx(struct seq_file *m, (u64)sq_ctx->dropped_pkts); } +static void print_tm_tree(struct seq_file *m, + struct nix_aq_enq_rsp *rsp, u64 sq) +{ + struct nix_sq_ctx_s *sq_ctx = &rsp->sq; + struct nix_hw *nix_hw = m->private; + struct rvu *rvu = nix_hw->rvu; + u16 p1, p2, p3, p4, schq; + int blkaddr; + u64 cfg; + + blkaddr = nix_hw->blkaddr; + schq = sq_ctx->smq; + + cfg = rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PARENT(schq)); + p1 = FIELD_GET(NIX_AF_MDQ_PARENT_MASK, cfg); + + cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PARENT(p1)); + p2 = FIELD_GET(NIX_AF_TL4_PARENT_MASK, cfg); + + cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PARENT(p2)); + p3 = FIELD_GET(NIX_AF_TL3_PARENT_MASK, cfg); + + cfg = rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PARENT(p3)); + p4 = FIELD_GET(NIX_AF_TL2_PARENT_MASK, cfg); + seq_printf(m, + "SQ(%llu) -> SMQ(%u) -> TL4(%u) -> TL3(%u) -> TL2(%u) -> TL1(%u)\n", + sq, schq, p1, p2, p3, p4); +} + +/*dumps given tm_tree registers*/ +static int rvu_dbg_nix_tm_tree_display(struct seq_file *m, void *unused) +{ + int qidx, nixlf, rc, id, max_id = 0; + struct nix_hw *nix_hw = m->private; + struct rvu *rvu = nix_hw->rvu; + struct nix_aq_enq_req aq_req; + struct nix_aq_enq_rsp rsp; + struct rvu_pfvf *pfvf; + u16 pcifunc; + + nixlf = rvu->rvu_dbg.nix_tm_ctx.lf; + id = rvu->rvu_dbg.nix_tm_ctx.id; + + if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc)) + return -EINVAL; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + max_id = pfvf->sq_ctx->qsize; + + memset(&aq_req, 0, sizeof(struct nix_aq_enq_req)); + aq_req.hdr.pcifunc = pcifunc; + aq_req.ctype = NIX_AQ_CTYPE_SQ; + aq_req.op = NIX_AQ_INSTOP_READ; + seq_printf(m, "pcifunc is 0x%x\n", pcifunc); + for (qidx = id; qidx < max_id; qidx++) { + aq_req.qidx = qidx; + + /* Skip SQ's if not initialized */ + if (!test_bit(qidx, pfvf->sq_bmap)) + continue; + + rc = rvu_mbox_handler_nix_aq_enq(rvu, &aq_req, &rsp); + + if (rc) { + seq_printf(m, "Failed to read SQ(%d) context\n", + aq_req.qidx); + continue; + } + print_tm_tree(m, &rsp, aq_req.qidx); + } + return 0; +} + +static ssize_t rvu_dbg_nix_tm_tree_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct seq_file *m = filp->private_data; + struct nix_hw *nix_hw = m->private; + struct rvu *rvu = nix_hw->rvu; + struct rvu_pfvf *pfvf; + u16 pcifunc; + u64 nixlf; + int ret; + + ret = kstrtoull_from_user(buffer, count, 10, &nixlf); + if (ret) + return ret; + + if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc)) + return -EINVAL; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + if (!pfvf->sq_ctx) { + dev_warn(rvu->dev, "SQ context is not initialized\n"); + return -EINVAL; + } + + rvu->rvu_dbg.nix_tm_ctx.lf = nixlf; + return count; +} + +RVU_DEBUG_SEQ_FOPS(nix_tm_tree, nix_tm_tree_display, nix_tm_tree_write); + +static void print_tm_topo(struct seq_file *m, u64 schq, u32 lvl) +{ + struct nix_hw *nix_hw = m->private; + struct rvu *rvu = nix_hw->rvu; + int blkaddr, link, link_level; + struct rvu_hwinfo *hw; + + hw = rvu->hw; + blkaddr = nix_hw->blkaddr; + if (lvl == NIX_TXSCH_LVL_MDQ) { + seq_printf(m, "NIX_AF_SMQ[%llu]_CFG =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq))); + seq_printf(m, "NIX_AF_SMQ[%llu]_STATUS =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_SMQX_STATUS(schq))); + seq_printf(m, "NIX_AF_MDQ[%llu]_OUT_MD_COUNT =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_MDQX_OUT_MD_COUNT(schq))); + seq_printf(m, "NIX_AF_MDQ[%llu]_SCHEDULE =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_MDQX_SCHEDULE(schq))); + seq_printf(m, "NIX_AF_MDQ[%llu]_SHAPE =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_MDQX_SHAPE(schq))); + seq_printf(m, "NIX_AF_MDQ[%llu]_CIR =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_MDQX_CIR(schq))); + seq_printf(m, "NIX_AF_MDQ[%llu]_PIR =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PIR(schq))); + seq_printf(m, "NIX_AF_MDQ[%llu]_SW_XOFF =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_MDQX_SW_XOFF(schq))); + seq_printf(m, "NIX_AF_MDQ[%llu]_PARENT =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_MDQX_PARENT(schq))); + seq_puts(m, "\n"); + } + + if (lvl == NIX_TXSCH_LVL_TL4) { + seq_printf(m, "NIX_AF_TL4[%llu]_SDP_LINK_CFG =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL4X_SDP_LINK_CFG(schq))); + seq_printf(m, "NIX_AF_TL4[%llu]_SCHEDULE =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL4X_SCHEDULE(schq))); + seq_printf(m, "NIX_AF_TL4[%llu]_SHAPE =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL4X_SHAPE(schq))); + seq_printf(m, "NIX_AF_TL4[%llu]_CIR =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL4X_CIR(schq))); + seq_printf(m, "NIX_AF_TL4[%llu]_PIR =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PIR(schq))); + seq_printf(m, "NIX_AF_TL4[%llu]_SW_XOFF =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL4X_SW_XOFF(schq))); + seq_printf(m, "NIX_AF_TL4[%llu]_TOPOLOGY =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL4X_TOPOLOGY(schq))); + seq_printf(m, "NIX_AF_TL4[%llu]_PARENT =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL4X_PARENT(schq))); + seq_printf(m, "NIX_AF_TL4[%llu]_MD_DEBUG0 =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL4X_MD_DEBUG0(schq))); + seq_printf(m, "NIX_AF_TL4[%llu]_MD_DEBUG1 =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL4X_MD_DEBUG1(schq))); + seq_puts(m, "\n"); + } + + if (lvl == NIX_TXSCH_LVL_TL3) { + seq_printf(m, "NIX_AF_TL3[%llu]_SCHEDULE =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL3X_SCHEDULE(schq))); + seq_printf(m, "NIX_AF_TL3[%llu]_SHAPE =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL3X_SHAPE(schq))); + seq_printf(m, "NIX_AF_TL3[%llu]_CIR =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL3X_CIR(schq))); + seq_printf(m, "NIX_AF_TL3[%llu]_PIR =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PIR(schq))); + seq_printf(m, "NIX_AF_TL3[%llu]_SW_XOFF =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL3X_SW_XOFF(schq))); + seq_printf(m, "NIX_AF_TL3[%llu]_TOPOLOGY =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL3X_TOPOLOGY(schq))); + seq_printf(m, "NIX_AF_TL3[%llu]_PARENT =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL3X_PARENT(schq))); + seq_printf(m, "NIX_AF_TL3[%llu]_MD_DEBUG0 =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL3X_MD_DEBUG0(schq))); + seq_printf(m, "NIX_AF_TL3[%llu]_MD_DEBUG1 =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL3X_MD_DEBUG1(schq))); + + link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) + & 0x01 ? NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; + if (lvl == link_level) { + seq_printf(m, + "NIX_AF_TL3_TL2[%llu]_BP_STATUS =0x%llx\n", + schq, rvu_read64(rvu, blkaddr, + NIX_AF_TL3_TL2X_BP_STATUS(schq))); + for (link = 0; link < hw->cgx_links; link++) + seq_printf(m, + "NIX_AF_TL3_TL2[%llu]_LINK[%d]_CFG =0x%llx\n", + schq, link, + rvu_read64(rvu, blkaddr, + NIX_AF_TL3_TL2X_LINKX_CFG(schq, link))); + } + seq_puts(m, "\n"); + } + + if (lvl == NIX_TXSCH_LVL_TL2) { + seq_printf(m, "NIX_AF_TL2[%llu]_SHAPE =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL2X_SHAPE(schq))); + seq_printf(m, "NIX_AF_TL2[%llu]_CIR =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL2X_CIR(schq))); + seq_printf(m, "NIX_AF_TL2[%llu]_PIR =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PIR(schq))); + seq_printf(m, "NIX_AF_TL2[%llu]_SW_XOFF =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL2X_SW_XOFF(schq))); + seq_printf(m, "NIX_AF_TL2[%llu]_TOPOLOGY =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL2X_TOPOLOGY(schq))); + seq_printf(m, "NIX_AF_TL2[%llu]_PARENT =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL2X_PARENT(schq))); + seq_printf(m, "NIX_AF_TL2[%llu]_MD_DEBUG0 =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL2X_MD_DEBUG0(schq))); + seq_printf(m, "NIX_AF_TL2[%llu]_MD_DEBUG1 =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL2X_MD_DEBUG1(schq))); + + link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) + & 0x01 ? NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; + if (lvl == link_level) { + seq_printf(m, + "NIX_AF_TL3_TL2[%llu]_BP_STATUS =0x%llx\n", + schq, rvu_read64(rvu, blkaddr, + NIX_AF_TL3_TL2X_BP_STATUS(schq))); + for (link = 0; link < hw->cgx_links; link++) + seq_printf(m, + "NIX_AF_TL3_TL2[%llu]_LINK[%d]_CFG =0x%llx\n", + schq, link, rvu_read64(rvu, blkaddr, + NIX_AF_TL3_TL2X_LINKX_CFG(schq, link))); + } + seq_puts(m, "\n"); + } + + if (lvl == NIX_TXSCH_LVL_TL1) { + seq_printf(m, "NIX_AF_TX_LINK[%llu]_NORM_CREDIT =0x%llx\n", + schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TX_LINKX_NORM_CREDIT(schq))); + seq_printf(m, "NIX_AF_TX_LINK[%llu]_HW_XOFF =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TX_LINKX_HW_XOFF(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_SCHEDULE =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL1X_SCHEDULE(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_SHAPE =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SHAPE(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_CIR =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_SW_XOFF =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_TOPOLOGY =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL1X_TOPOLOGY(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_MD_DEBUG0 =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL1X_MD_DEBUG0(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_MD_DEBUG1 =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL1X_MD_DEBUG1(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_DROPPED_PACKETS =0x%llx\n", + schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL1X_DROPPED_PACKETS(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_DROPPED_BYTES =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL1X_DROPPED_BYTES(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_RED_PACKETS =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL1X_RED_PACKETS(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_RED_BYTES =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL1X_RED_BYTES(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_YELLOW_PACKETS =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL1X_YELLOW_PACKETS(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_YELLOW_BYTES =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL1X_YELLOW_BYTES(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_GREEN_PACKETS =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL1X_GREEN_PACKETS(schq))); + seq_printf(m, "NIX_AF_TL1[%llu]_GREEN_BYTES =0x%llx\n", schq, + rvu_read64(rvu, blkaddr, + NIX_AF_TL1X_GREEN_BYTES(schq))); + seq_puts(m, "\n"); + } +} + +/*dumps given tm_topo registers*/ +static int rvu_dbg_nix_tm_topo_display(struct seq_file *m, void *unused) +{ + struct nix_hw *nix_hw = m->private; + struct rvu *rvu = nix_hw->rvu; + struct nix_aq_enq_req aq_req; + struct nix_txsch *txsch; + int nixlf, lvl, schq; + u16 pcifunc; + + nixlf = rvu->rvu_dbg.nix_tm_ctx.lf; + + if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc)) + return -EINVAL; + + memset(&aq_req, 0, sizeof(struct nix_aq_enq_req)); + aq_req.hdr.pcifunc = pcifunc; + aq_req.ctype = NIX_AQ_CTYPE_SQ; + aq_req.op = NIX_AQ_INSTOP_READ; + seq_printf(m, "pcifunc is 0x%x\n", pcifunc); + + for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { + txsch = &nix_hw->txsch[lvl]; + for (schq = 0; schq < txsch->schq.max; schq++) { + if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) == pcifunc) + print_tm_topo(m, schq, lvl); + } + } + return 0; +} + +static ssize_t rvu_dbg_nix_tm_topo_write(struct file *filp, + const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct seq_file *m = filp->private_data; + struct nix_hw *nix_hw = m->private; + struct rvu *rvu = nix_hw->rvu; + struct rvu_pfvf *pfvf; + u16 pcifunc; + u64 nixlf; + int ret; + + ret = kstrtoull_from_user(buffer, count, 10, &nixlf); + if (ret) + return ret; + + if (!rvu_dbg_is_valid_lf(rvu, nix_hw->blkaddr, nixlf, &pcifunc)) + return -EINVAL; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + if (!pfvf->sq_ctx) { + dev_warn(rvu->dev, "SQ context is not initialized\n"); + return -EINVAL; + } + + rvu->rvu_dbg.nix_tm_ctx.lf = nixlf; + return count; +} + +RVU_DEBUG_SEQ_FOPS(nix_tm_topo, nix_tm_topo_display, nix_tm_topo_write); + /* Dumps given nix_sq's context */ static void print_nix_sq_ctx(struct seq_file *m, struct nix_aq_enq_rsp *rsp) { @@ -2349,6 +2710,10 @@ static void rvu_dbg_nix_init(struct rvu *rvu, int blkaddr) nix_hw = &rvu->hw->nix[1]; } + debugfs_create_file("tm_tree", 0600, rvu->rvu_dbg.nix, nix_hw, + &rvu_dbg_nix_tm_tree_fops); + debugfs_create_file("tm_topo", 0600, rvu->rvu_dbg.nix, nix_hw, + &rvu_dbg_nix_tm_topo_fops); debugfs_create_file("sq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw, &rvu_dbg_nix_sq_ctx_fops); debugfs_create_file("rq_ctx", 0600, rvu->rvu_dbg.nix, nix_hw, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 3dc828cf6c5a..222f9e00b836 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -2497,9 +2497,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) } mutex_unlock(&rvu->rsrc_lock); - /* Sync cached info for this LF in NDC-TX to LLC/DRAM */ - rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf); - err = rvu_poll_reg(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12), true); + err = rvu_ndc_sync(rvu, blkaddr, nixlf, NIX_AF_NDC_TX_SYNC); if (err) dev_err(rvu->dev, "NDC-TX sync failed for NIXLF %d\n", nixlf); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h index 086f05c0376f..d56be5fb7eb4 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h @@ -121,6 +121,7 @@ #define NPA_AF_LF_RST (0x0020) #define NPA_AF_GEN_CFG (0x0030) #define NPA_AF_NDC_CFG (0x0040) +#define NPA_AF_NDC_SYNC (0x0050) #define NPA_AF_INP_CTL (0x00D0) #define NPA_AF_ACTIVE_CYCLES_PC (0x00F0) #define NPA_AF_AVG_DELAY (0x0100) @@ -239,6 +240,7 @@ #define NIX_AF_RX_CPTX_INST_ADDR (0x0310) #define NIX_AF_RX_CPTX_INST_QSEL(a) (0x0320ull | (uint64_t)(a) << 3) #define NIX_AF_RX_CPTX_CREDIT(a) (0x0360ull | (uint64_t)(a) << 3) +#define NIX_AF_NDC_RX_SYNC (0x03E0) #define NIX_AF_NDC_TX_SYNC (0x03F0) #define NIX_AF_AQ_CFG (0x0400) #define NIX_AF_AQ_BASE (0x0410) @@ -429,6 +431,8 @@ #define NIX_AF_RX_ACTIVE_CYCLES_PCX(a) (0x4800 | (a) << 16) #define NIX_AF_LINKX_CFG(a) (0x4010 | (a) << 17) #define NIX_AF_MDQX_IN_MD_COUNT(a) (0x14e0 | (a) << 16) +#define NIX_AF_SMQX_STATUS(a) (0x730 | (a) << 16) +#define NIX_AF_MDQX_OUT_MD_COUNT(a) (0xdb0 | (a) << 16) #define NIX_PRIV_AF_INT_CFG (0x8000000) #define NIX_PRIV_LFX_CFG (0x8000010) @@ -442,6 +446,11 @@ #define NIX_CONST_MAX_BPIDS GENMASK_ULL(23, 12) #define NIX_CONST_SDP_CHANS GENMASK_ULL(11, 0) +#define NIX_AF_MDQ_PARENT_MASK GENMASK_ULL(24, 16) +#define NIX_AF_TL4_PARENT_MASK GENMASK_ULL(23, 16) +#define NIX_AF_TL3_PARENT_MASK GENMASK_ULL(23, 16) +#define NIX_AF_TL2_PARENT_MASK GENMASK_ULL(20, 16) + /* SSO */ #define SSO_AF_CONST (0x1000) #define SSO_AF_CONST1 (0x1008) diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index 24fbbef265a6..f27a3456ae64 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -346,12 +346,9 @@ struct otx2_flow_config { u16 *def_ent; u16 nr_flows; #define OTX2_DEFAULT_FLOWCOUNT 16 -#define OTX2_MAX_UNICAST_FLOWS 8 +#define OTX2_DEFAULT_UNICAST_FLOWS 4 #define OTX2_MAX_VLAN_FLOWS 1 #define OTX2_MAX_TC_FLOWS OTX2_DEFAULT_FLOWCOUNT -#define OTX2_MCAM_COUNT (OTX2_DEFAULT_FLOWCOUNT + \ - OTX2_MAX_UNICAST_FLOWS + \ - OTX2_MAX_VLAN_FLOWS) u16 unicast_offset; u16 rx_vlan_offset; u16 vf_vlan_offset; @@ -365,6 +362,7 @@ struct otx2_flow_config { u16 max_flows; refcount_t mark_flows; struct list_head flow_list_tc; + u8 ucast_flt_cnt; bool ntuple; }; @@ -1067,6 +1065,7 @@ int otx2_handle_ntuple_tc_features(struct net_device *netdev, int otx2_smq_flush(struct otx2_nic *pfvf, int smq); void otx2_free_bufs(struct otx2_nic *pfvf, struct otx2_pool *pool, u64 iova, int size); +int otx2_mcam_entry_init(struct otx2_nic *pfvf); /* tc support */ int otx2_init_tc(struct otx2_nic *nic); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c index 458d34a62e18..53f14aa944bd 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c @@ -64,9 +64,68 @@ static int otx2_dl_mcam_count_get(struct devlink *devlink, u32 id, return 0; } +static int otx2_dl_ucast_flt_cnt_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) +{ + struct otx2_devlink *otx2_dl = devlink_priv(devlink); + struct otx2_nic *pfvf = otx2_dl->pfvf; + int err; + + pfvf->flow_cfg->ucast_flt_cnt = ctx->val.vu8; + + otx2_mcam_flow_del(pfvf); + err = otx2_mcam_entry_init(pfvf); + if (err) + return err; + + return 0; +} + +static int otx2_dl_ucast_flt_cnt_get(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct otx2_devlink *otx2_dl = devlink_priv(devlink); + struct otx2_nic *pfvf = otx2_dl->pfvf; + + ctx->val.vu8 = pfvf->flow_cfg ? pfvf->flow_cfg->ucast_flt_cnt : 0; + + return 0; +} + +static int otx2_dl_ucast_flt_cnt_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + struct otx2_devlink *otx2_dl = devlink_priv(devlink); + struct otx2_nic *pfvf = otx2_dl->pfvf; + + /* Check for UNICAST filter support*/ + if (!(pfvf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT)) { + NL_SET_ERR_MSG_MOD(extack, + "Unicast filter not enabled"); + return -EINVAL; + } + + if (!pfvf->flow_cfg) { + NL_SET_ERR_MSG_MOD(extack, + "pfvf->flow_cfg not initialized"); + return -EINVAL; + } + + if (pfvf->flow_cfg->nr_flows) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot modify count when there are active rules"); + return -EINVAL; + } + + return 0; +} + enum otx2_dl_param_id { OTX2_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, OTX2_DEVLINK_PARAM_ID_MCAM_COUNT, + OTX2_DEVLINK_PARAM_ID_UCAST_FLT_CNT, }; static const struct devlink_param otx2_dl_params[] = { @@ -75,6 +134,11 @@ static const struct devlink_param otx2_dl_params[] = { BIT(DEVLINK_PARAM_CMODE_RUNTIME), otx2_dl_mcam_count_get, otx2_dl_mcam_count_set, otx2_dl_mcam_count_validate), + DEVLINK_PARAM_DRIVER(OTX2_DEVLINK_PARAM_ID_UCAST_FLT_CNT, + "unicast_filter_count", DEVLINK_PARAM_TYPE_U8, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + otx2_dl_ucast_flt_cnt_get, otx2_dl_ucast_flt_cnt_set, + otx2_dl_ucast_flt_cnt_validate), }; static const struct devlink_ops otx2_devlink_ops = { diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c index 7f786de61014..0db62eb0dab3 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c @@ -954,7 +954,7 @@ static u32 otx2_get_link(struct net_device *netdev) } static int otx2_get_ts_info(struct net_device *netdev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct otx2_nic *pfvf = netdev_priv(netdev); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c index bc5819237ed7..98c31a16c70b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c @@ -12,8 +12,6 @@ #define OTX2_DEFAULT_ACTION 0x1 -static int otx2_mcam_entry_init(struct otx2_nic *pfvf); - struct otx2_flow { struct ethtool_rx_flow_spec flow_spec; struct list_head list; @@ -161,7 +159,7 @@ exit: } EXPORT_SYMBOL(otx2_alloc_mcam_entries); -static int otx2_mcam_entry_init(struct otx2_nic *pfvf) +int otx2_mcam_entry_init(struct otx2_nic *pfvf) { struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; struct npc_get_field_status_req *freq; @@ -172,7 +170,7 @@ static int otx2_mcam_entry_init(struct otx2_nic *pfvf) int ent, count; vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS; - count = OTX2_MAX_UNICAST_FLOWS + + count = flow_cfg->ucast_flt_cnt + OTX2_MAX_VLAN_FLOWS + vf_vlan_max_flows; flow_cfg->def_ent = devm_kmalloc_array(pfvf->dev, count, @@ -214,7 +212,7 @@ static int otx2_mcam_entry_init(struct otx2_nic *pfvf) flow_cfg->vf_vlan_offset = 0; flow_cfg->unicast_offset = vf_vlan_max_flows; flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset + - OTX2_MAX_UNICAST_FLOWS; + flow_cfg->ucast_flt_cnt; pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT; /* Check if NPC_DMAC field is supported @@ -255,6 +253,7 @@ static int otx2_mcam_entry_init(struct otx2_nic *pfvf) refcount_set(&flow_cfg->mark_flows, 1); return 0; } +EXPORT_SYMBOL(otx2_mcam_entry_init); /* TODO : revisit on size */ #define OTX2_DMAC_FLTR_BITMAP_SZ (4 * 2048 + 32) @@ -302,6 +301,8 @@ int otx2_mcam_flow_init(struct otx2_nic *pf) INIT_LIST_HEAD(&pf->flow_cfg->flow_list); INIT_LIST_HEAD(&pf->flow_cfg->flow_list_tc); + pf->flow_cfg->ucast_flt_cnt = OTX2_DEFAULT_UNICAST_FLOWS; + /* Allocate bare minimum number of MCAM entries needed for * unicast and ntuple filters. */ @@ -314,7 +315,7 @@ int otx2_mcam_flow_init(struct otx2_nic *pf) return 0; pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table) - * OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL); + * pf->flow_cfg->ucast_flt_cnt, GFP_KERNEL); if (!pf->mac_table) return -ENOMEM; @@ -356,7 +357,7 @@ static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac) return -ENOMEM; /* dont have free mcam entries or uc list is greater than alloted */ - if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS) + if (netdev_uc_count(pf->netdev) > pf->flow_cfg->ucast_flt_cnt) return -ENOMEM; mutex_lock(&pf->mbox.lock); @@ -367,7 +368,7 @@ static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac) } /* unicast offset starts with 32 0..31 for ntuple */ - for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) { + for (i = 0; i < pf->flow_cfg->ucast_flt_cnt; i++) { if (pf->mac_table[i].inuse) continue; ether_addr_copy(pf->mac_table[i].addr, mac); @@ -410,7 +411,7 @@ static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac, { int i; - for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) { + for (i = 0; i < pf->flow_cfg->ucast_flt_cnt; i++) { if (!pf->mac_table[i].inuse) continue; @@ -1394,6 +1395,7 @@ int otx2_destroy_mcam_flows(struct otx2_nic *pfvf) } pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC; + flow_cfg->max_flows = 0; mutex_unlock(&pfvf->mbox.lock); return 0; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index f5bce3e326cc..5492dea547a1 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -1714,7 +1714,7 @@ static void otx2_do_set_rx_mode(struct otx2_nic *pf) return; if ((netdev->flags & IFF_PROMISC) || - (netdev_uc_count(netdev) > OTX2_MAX_UNICAST_FLOWS)) { + (netdev_uc_count(netdev) > pf->flow_cfg->ucast_flt_cnt)) { promisc = true; } @@ -3245,6 +3245,29 @@ static int otx2_sriov_configure(struct pci_dev *pdev, int numvfs) return otx2_sriov_enable(pdev, numvfs); } +static void otx2_ndc_sync(struct otx2_nic *pf) +{ + struct mbox *mbox = &pf->mbox; + struct ndc_sync_op *req; + + mutex_lock(&mbox->lock); + + req = otx2_mbox_alloc_msg_ndc_sync_op(mbox); + if (!req) { + mutex_unlock(&mbox->lock); + return; + } + + req->nix_lf_tx_sync = 1; + req->nix_lf_rx_sync = 1; + req->npa_lf_sync = 1; + + if (!otx2_sync_mbox_msg(mbox)) + dev_err(pf->dev, "NDC sync operation failed\n"); + + mutex_unlock(&mbox->lock); +} + static void otx2_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); @@ -3293,6 +3316,7 @@ static void otx2_remove(struct pci_dev *pdev) otx2_mcam_flow_del(pf); otx2_shutdown_tc(pf); otx2_shutdown_qos(pf); + otx2_ndc_sync(pf); otx2_detach_resources(&pf->mbox); if (pf->hw.lmt_info) free_percpu(pf->hw.lmt_info); diff --git a/drivers/net/ethernet/mediatek/Kconfig b/drivers/net/ethernet/mediatek/Kconfig index da0db417ab69..95c4405b7d7b 100644 --- a/drivers/net/ethernet/mediatek/Kconfig +++ b/drivers/net/ethernet/mediatek/Kconfig @@ -1,12 +1,20 @@ # SPDX-License-Identifier: GPL-2.0-only config NET_VENDOR_MEDIATEK bool "MediaTek devices" - depends on ARCH_MEDIATEK || SOC_MT7621 || SOC_MT7620 || COMPILE_TEST + depends on ARCH_MEDIATEK || ARCH_AIROHA || SOC_MT7621 || SOC_MT7620 || COMPILE_TEST help If you have a Mediatek SoC with ethernet, say Y. if NET_VENDOR_MEDIATEK +config NET_AIROHA + tristate "Airoha SoC Gigabit Ethernet support" + depends on NET_DSA || !NET_DSA + select PAGE_POOL + help + This driver supports the gigabit ethernet MACs in the + Airoha SoC family. + config NET_MEDIATEK_SOC_WED depends on ARCH_MEDIATEK || COMPILE_TEST def_bool NET_MEDIATEK_SOC != n diff --git a/drivers/net/ethernet/mediatek/Makefile b/drivers/net/ethernet/mediatek/Makefile index 03e008fbc859..ddbb7f4a516c 100644 --- a/drivers/net/ethernet/mediatek/Makefile +++ b/drivers/net/ethernet/mediatek/Makefile @@ -11,3 +11,4 @@ mtk_eth-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_debugfs.o endif obj-$(CONFIG_NET_MEDIATEK_SOC_WED) += mtk_wed_ops.o obj-$(CONFIG_NET_MEDIATEK_STAR_EMAC) += mtk_star_emac.o +obj-$(CONFIG_NET_AIROHA) += airoha_eth.o diff --git a/drivers/net/ethernet/mediatek/airoha_eth.c b/drivers/net/ethernet/mediatek/airoha_eth.c new file mode 100644 index 000000000000..7967a92803c2 --- /dev/null +++ b/drivers/net/ethernet/mediatek/airoha_eth.c @@ -0,0 +1,2730 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2024 AIROHA Inc + * Author: Lorenzo Bianconi + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define AIROHA_MAX_NUM_GDM_PORTS 1 +#define AIROHA_MAX_NUM_RSTS 3 +#define AIROHA_MAX_NUM_XSI_RSTS 5 +#define AIROHA_MAX_MTU 2000 +#define AIROHA_MAX_PACKET_SIZE 2048 +#define AIROHA_NUM_TX_RING 32 +#define AIROHA_NUM_RX_RING 32 +#define AIROHA_FE_MC_MAX_VLAN_TABLE 64 +#define AIROHA_FE_MC_MAX_VLAN_PORT 16 +#define AIROHA_NUM_TX_IRQ 2 +#define HW_DSCP_NUM 2048 +#define IRQ_QUEUE_LEN(_n) ((_n) ? 1024 : 2048) +#define TX_DSCP_NUM 1024 +#define RX_DSCP_NUM(_n) \ + ((_n) == 2 ? 128 : \ + (_n) == 11 ? 128 : \ + (_n) == 15 ? 128 : \ + (_n) == 0 ? 1024 : 16) + +#define PSE_RSV_PAGES 128 +#define PSE_QUEUE_RSV_PAGES 64 + +/* FE */ +#define PSE_BASE 0x0100 +#define CSR_IFC_BASE 0x0200 +#define CDM1_BASE 0x0400 +#define GDM1_BASE 0x0500 +#define PPE1_BASE 0x0c00 + +#define CDM2_BASE 0x1400 +#define GDM2_BASE 0x1500 + +#define GDM3_BASE 0x1100 +#define GDM4_BASE 0x2500 + +#define GDM_BASE(_n) \ + ((_n) == 4 ? GDM4_BASE : \ + (_n) == 3 ? GDM3_BASE : \ + (_n) == 2 ? GDM2_BASE : GDM1_BASE) + +#define REG_FE_DMA_GLO_CFG 0x0000 +#define FE_DMA_GLO_L2_SPACE_MASK GENMASK(7, 4) +#define FE_DMA_GLO_PG_SZ_MASK BIT(3) + +#define REG_FE_RST_GLO_CFG 0x0004 +#define FE_RST_GDM4_MBI_ARB_MASK BIT(3) +#define FE_RST_GDM3_MBI_ARB_MASK BIT(2) +#define FE_RST_CORE_MASK BIT(0) + +#define REG_FE_LAN_MAC_H 0x0040 +#define REG_FE_LAN_MAC_LMIN 0x0044 +#define REG_FE_LAN_MAC_LMAX 0x0048 + +#define REG_FE_CDM1_OQ_MAP0 0x0050 +#define REG_FE_CDM1_OQ_MAP1 0x0054 +#define REG_FE_CDM1_OQ_MAP2 0x0058 +#define REG_FE_CDM1_OQ_MAP3 0x005c + +#define REG_FE_PCE_CFG 0x0070 +#define PCE_DPI_EN_MASK BIT(2) +#define PCE_KA_EN_MASK BIT(1) +#define PCE_MC_EN_MASK BIT(0) + +#define REG_FE_PSE_QUEUE_CFG_WR 0x0080 +#define PSE_CFG_PORT_ID_MASK GENMASK(27, 24) +#define PSE_CFG_QUEUE_ID_MASK GENMASK(20, 16) +#define PSE_CFG_WR_EN_MASK BIT(8) +#define PSE_CFG_OQRSV_SEL_MASK BIT(0) + +#define REG_FE_PSE_QUEUE_CFG_VAL 0x0084 +#define PSE_CFG_OQ_RSV_MASK GENMASK(13, 0) + +#define PSE_FQ_CFG 0x008c +#define PSE_FQ_LIMIT_MASK GENMASK(14, 0) + +#define REG_FE_PSE_BUF_SET 0x0090 +#define PSE_SHARE_USED_LTHD_MASK GENMASK(31, 16) +#define PSE_ALLRSV_MASK GENMASK(14, 0) + +#define REG_PSE_SHARE_USED_THD 0x0094 +#define PSE_SHARE_USED_MTHD_MASK GENMASK(31, 16) +#define PSE_SHARE_USED_HTHD_MASK GENMASK(15, 0) + +#define REG_GDM_MISC_CFG 0x0148 +#define GDM2_RDM_ACK_WAIT_PREF_MASK BIT(9) +#define GDM2_CHN_VLD_MODE_MASK BIT(5) + +#define REG_FE_CSR_IFC_CFG CSR_IFC_BASE +#define FE_IFC_EN_MASK BIT(0) + +#define REG_FE_VIP_PORT_EN 0x01f0 +#define REG_FE_IFC_PORT_EN 0x01f4 + +#define REG_PSE_IQ_REV1 (PSE_BASE + 0x08) +#define PSE_IQ_RES1_P2_MASK GENMASK(23, 16) + +#define REG_PSE_IQ_REV2 (PSE_BASE + 0x0c) +#define PSE_IQ_RES2_P5_MASK GENMASK(15, 8) +#define PSE_IQ_RES2_P4_MASK GENMASK(7, 0) + +#define REG_FE_VIP_EN(_n) (0x0300 + ((_n) << 3)) +#define PATN_FCPU_EN_MASK BIT(7) +#define PATN_SWP_EN_MASK BIT(6) +#define PATN_DP_EN_MASK BIT(5) +#define PATN_SP_EN_MASK BIT(4) +#define PATN_TYPE_MASK GENMASK(3, 1) +#define PATN_EN_MASK BIT(0) + +#define REG_FE_VIP_PATN(_n) (0x0304 + ((_n) << 3)) +#define PATN_DP_MASK GENMASK(31, 16) +#define PATN_SP_MASK GENMASK(15, 0) + +#define REG_CDM1_VLAN_CTRL CDM1_BASE +#define CDM1_VLAN_MASK GENMASK(31, 16) + +#define REG_CDM1_FWD_CFG (CDM1_BASE + 0x08) +#define CDM1_VIP_QSEL_MASK GENMASK(24, 20) + +#define REG_CDM1_CRSN_QSEL(_n) (CDM1_BASE + 0x10 + ((_n) << 2)) +#define CDM1_CRSN_QSEL_REASON_MASK(_n) \ + GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3)) + +#define REG_CDM2_FWD_CFG (CDM2_BASE + 0x08) +#define CDM2_OAM_QSEL_MASK GENMASK(31, 27) +#define CDM2_VIP_QSEL_MASK GENMASK(24, 20) + +#define REG_CDM2_CRSN_QSEL(_n) (CDM2_BASE + 0x10 + ((_n) << 2)) +#define CDM2_CRSN_QSEL_REASON_MASK(_n) \ + GENMASK(4 + (((_n) % 4) << 3), (((_n) % 4) << 3)) + +#define REG_GDM_FWD_CFG(_n) GDM_BASE(_n) +#define GDM_DROP_CRC_ERR BIT(23) +#define GDM_IP4_CKSUM BIT(22) +#define GDM_TCP_CKSUM BIT(21) +#define GDM_UDP_CKSUM BIT(20) +#define GDM_UCFQ_MASK GENMASK(15, 12) +#define GDM_BCFQ_MASK GENMASK(11, 8) +#define GDM_MCFQ_MASK GENMASK(7, 4) +#define GDM_OCFQ_MASK GENMASK(3, 0) + +#define REG_GDM_INGRESS_CFG(_n) (GDM_BASE(_n) + 0x10) +#define GDM_INGRESS_FC_EN_MASK BIT(1) +#define GDM_STAG_EN_MASK BIT(0) + +#define REG_GDM_LEN_CFG(_n) (GDM_BASE(_n) + 0x14) +#define GDM_SHORT_LEN_MASK GENMASK(13, 0) +#define GDM_LONG_LEN_MASK GENMASK(29, 16) + +#define REG_FE_CPORT_CFG (GDM1_BASE + 0x40) +#define FE_CPORT_PAD BIT(26) +#define FE_CPORT_PORT_XFC_MASK BIT(25) +#define FE_CPORT_QUEUE_XFC_MASK BIT(24) + +#define REG_FE_GDM_MIB_CLEAR(_n) (GDM_BASE(_n) + 0xf0) +#define FE_GDM_MIB_RX_CLEAR_MASK BIT(1) +#define FE_GDM_MIB_TX_CLEAR_MASK BIT(0) + +#define REG_FE_GDM1_MIB_CFG (GDM1_BASE + 0xf4) +#define FE_STRICT_RFC2819_MODE_MASK BIT(31) +#define FE_GDM1_TX_MIB_SPLIT_EN_MASK BIT(17) +#define FE_GDM1_RX_MIB_SPLIT_EN_MASK BIT(16) +#define FE_TX_MIB_ID_MASK GENMASK(15, 8) +#define FE_RX_MIB_ID_MASK GENMASK(7, 0) + +#define REG_FE_GDM_TX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x104) +#define REG_FE_GDM_TX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x10c) +#define REG_FE_GDM_TX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x110) +#define REG_FE_GDM_TX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x114) +#define REG_FE_GDM_TX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x118) +#define REG_FE_GDM_TX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x11c) +#define REG_FE_GDM_TX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x120) +#define REG_FE_GDM_TX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x124) +#define REG_FE_GDM_TX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x128) +#define REG_FE_GDM_TX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x12c) +#define REG_FE_GDM_TX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x130) +#define REG_FE_GDM_TX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x134) +#define REG_FE_GDM_TX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x138) +#define REG_FE_GDM_TX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x13c) +#define REG_FE_GDM_TX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x140) + +#define REG_FE_GDM_RX_OK_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x148) +#define REG_FE_GDM_RX_FC_DROP_CNT(_n) (GDM_BASE(_n) + 0x14c) +#define REG_FE_GDM_RX_RC_DROP_CNT(_n) (GDM_BASE(_n) + 0x150) +#define REG_FE_GDM_RX_OVERFLOW_DROP_CNT(_n) (GDM_BASE(_n) + 0x154) +#define REG_FE_GDM_RX_ERROR_DROP_CNT(_n) (GDM_BASE(_n) + 0x158) +#define REG_FE_GDM_RX_OK_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x15c) +#define REG_FE_GDM_RX_ETH_PKT_CNT_L(_n) (GDM_BASE(_n) + 0x160) +#define REG_FE_GDM_RX_ETH_BYTE_CNT_L(_n) (GDM_BASE(_n) + 0x164) +#define REG_FE_GDM_RX_ETH_DROP_CNT(_n) (GDM_BASE(_n) + 0x168) +#define REG_FE_GDM_RX_ETH_BC_CNT(_n) (GDM_BASE(_n) + 0x16c) +#define REG_FE_GDM_RX_ETH_MC_CNT(_n) (GDM_BASE(_n) + 0x170) +#define REG_FE_GDM_RX_ETH_CRC_ERR_CNT(_n) (GDM_BASE(_n) + 0x174) +#define REG_FE_GDM_RX_ETH_FRAG_CNT(_n) (GDM_BASE(_n) + 0x178) +#define REG_FE_GDM_RX_ETH_JABBER_CNT(_n) (GDM_BASE(_n) + 0x17c) +#define REG_FE_GDM_RX_ETH_RUNT_CNT(_n) (GDM_BASE(_n) + 0x180) +#define REG_FE_GDM_RX_ETH_LONG_CNT(_n) (GDM_BASE(_n) + 0x184) +#define REG_FE_GDM_RX_ETH_E64_CNT_L(_n) (GDM_BASE(_n) + 0x188) +#define REG_FE_GDM_RX_ETH_L64_CNT_L(_n) (GDM_BASE(_n) + 0x18c) +#define REG_FE_GDM_RX_ETH_L127_CNT_L(_n) (GDM_BASE(_n) + 0x190) +#define REG_FE_GDM_RX_ETH_L255_CNT_L(_n) (GDM_BASE(_n) + 0x194) +#define REG_FE_GDM_RX_ETH_L511_CNT_L(_n) (GDM_BASE(_n) + 0x198) +#define REG_FE_GDM_RX_ETH_L1023_CNT_L(_n) (GDM_BASE(_n) + 0x19c) + +#define REG_PPE1_TB_HASH_CFG (PPE1_BASE + 0x250) +#define PPE1_SRAM_TABLE_EN_MASK BIT(0) +#define PPE1_SRAM_HASH1_EN_MASK BIT(8) +#define PPE1_DRAM_TABLE_EN_MASK BIT(16) +#define PPE1_DRAM_HASH1_EN_MASK BIT(24) + +#define REG_FE_GDM_TX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x280) +#define REG_FE_GDM_TX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x284) +#define REG_FE_GDM_TX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x288) +#define REG_FE_GDM_TX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x28c) + +#define REG_FE_GDM_RX_OK_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x290) +#define REG_FE_GDM_RX_OK_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x294) +#define REG_FE_GDM_RX_ETH_PKT_CNT_H(_n) (GDM_BASE(_n) + 0x298) +#define REG_FE_GDM_RX_ETH_BYTE_CNT_H(_n) (GDM_BASE(_n) + 0x29c) +#define REG_FE_GDM_TX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2b8) +#define REG_FE_GDM_TX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2bc) +#define REG_FE_GDM_TX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2c0) +#define REG_FE_GDM_TX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2c4) +#define REG_FE_GDM_TX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2c8) +#define REG_FE_GDM_TX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2cc) +#define REG_FE_GDM_RX_ETH_E64_CNT_H(_n) (GDM_BASE(_n) + 0x2e8) +#define REG_FE_GDM_RX_ETH_L64_CNT_H(_n) (GDM_BASE(_n) + 0x2ec) +#define REG_FE_GDM_RX_ETH_L127_CNT_H(_n) (GDM_BASE(_n) + 0x2f0) +#define REG_FE_GDM_RX_ETH_L255_CNT_H(_n) (GDM_BASE(_n) + 0x2f4) +#define REG_FE_GDM_RX_ETH_L511_CNT_H(_n) (GDM_BASE(_n) + 0x2f8) +#define REG_FE_GDM_RX_ETH_L1023_CNT_H(_n) (GDM_BASE(_n) + 0x2fc) + +#define REG_GDM2_CHN_RLS (GDM2_BASE + 0x20) +#define MBI_RX_AGE_SEL_MASK GENMASK(18, 17) +#define MBI_TX_AGE_SEL_MASK GENMASK(18, 17) + +#define REG_GDM3_FWD_CFG GDM3_BASE +#define GDM3_PAD_EN_MASK BIT(28) + +#define REG_GDM4_FWD_CFG (GDM4_BASE + 0x100) +#define GDM4_PAD_EN_MASK BIT(28) +#define GDM4_SPORT_OFFSET0_MASK GENMASK(11, 8) + +#define REG_GDM4_SRC_PORT_SET (GDM4_BASE + 0x33c) +#define GDM4_SPORT_OFF2_MASK GENMASK(19, 16) +#define GDM4_SPORT_OFF1_MASK GENMASK(15, 12) +#define GDM4_SPORT_OFF0_MASK GENMASK(11, 8) + +#define REG_IP_FRAG_FP 0x2010 +#define IP_ASSEMBLE_PORT_MASK GENMASK(24, 21) +#define IP_ASSEMBLE_NBQ_MASK GENMASK(20, 16) +#define IP_FRAGMENT_PORT_MASK GENMASK(8, 5) +#define IP_FRAGMENT_NBQ_MASK GENMASK(4, 0) + +#define REG_MC_VLAN_EN 0x2100 +#define MC_VLAN_EN_MASK BIT(0) + +#define REG_MC_VLAN_CFG 0x2104 +#define MC_VLAN_CFG_CMD_DONE_MASK BIT(31) +#define MC_VLAN_CFG_TABLE_ID_MASK GENMASK(21, 16) +#define MC_VLAN_CFG_PORT_ID_MASK GENMASK(11, 8) +#define MC_VLAN_CFG_TABLE_SEL_MASK BIT(4) +#define MC_VLAN_CFG_RW_MASK BIT(0) + +#define REG_MC_VLAN_DATA 0x2108 + +#define REG_CDM5_RX_OQ1_DROP_CNT 0x29d4 + +/* QDMA */ +#define REG_QDMA_GLOBAL_CFG 0x0004 +#define GLOBAL_CFG_RX_2B_OFFSET_MASK BIT(31) +#define GLOBAL_CFG_DMA_PREFERENCE_MASK GENMASK(30, 29) +#define GLOBAL_CFG_CPU_TXR_RR_MASK BIT(28) +#define GLOBAL_CFG_DSCP_BYTE_SWAP_MASK BIT(27) +#define GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK BIT(26) +#define GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK BIT(25) +#define GLOBAL_CFG_OAM_MODIFY_MASK BIT(24) +#define GLOBAL_CFG_RESET_MASK BIT(23) +#define GLOBAL_CFG_RESET_DONE_MASK BIT(22) +#define GLOBAL_CFG_MULTICAST_EN_MASK BIT(21) +#define GLOBAL_CFG_IRQ1_EN_MASK BIT(20) +#define GLOBAL_CFG_IRQ0_EN_MASK BIT(19) +#define GLOBAL_CFG_LOOPCNT_EN_MASK BIT(18) +#define GLOBAL_CFG_RD_BYPASS_WR_MASK BIT(17) +#define GLOBAL_CFG_QDMA_LOOPBACK_MASK BIT(16) +#define GLOBAL_CFG_LPBK_RXQ_SEL_MASK GENMASK(13, 8) +#define GLOBAL_CFG_CHECK_DONE_MASK BIT(7) +#define GLOBAL_CFG_TX_WB_DONE_MASK BIT(6) +#define GLOBAL_CFG_MAX_ISSUE_NUM_MASK GENMASK(5, 4) +#define GLOBAL_CFG_RX_DMA_BUSY_MASK BIT(3) +#define GLOBAL_CFG_RX_DMA_EN_MASK BIT(2) +#define GLOBAL_CFG_TX_DMA_BUSY_MASK BIT(1) +#define GLOBAL_CFG_TX_DMA_EN_MASK BIT(0) + +#define REG_FWD_DSCP_BASE 0x0010 +#define REG_FWD_BUF_BASE 0x0014 + +#define REG_HW_FWD_DSCP_CFG 0x0018 +#define HW_FWD_DSCP_PAYLOAD_SIZE_MASK GENMASK(29, 28) +#define HW_FWD_DSCP_SCATTER_LEN_MASK GENMASK(17, 16) +#define HW_FWD_DSCP_MIN_SCATTER_LEN_MASK GENMASK(15, 0) + +#define REG_INT_STATUS(_n) \ + (((_n) == 4) ? 0x0730 : \ + ((_n) == 3) ? 0x0724 : \ + ((_n) == 2) ? 0x0720 : \ + ((_n) == 1) ? 0x0024 : 0x0020) + +#define REG_INT_ENABLE(_n) \ + (((_n) == 4) ? 0x0750 : \ + ((_n) == 3) ? 0x0744 : \ + ((_n) == 2) ? 0x0740 : \ + ((_n) == 1) ? 0x002c : 0x0028) + +/* QDMA_CSR_INT_ENABLE1 */ +#define RX15_COHERENT_INT_MASK BIT(31) +#define RX14_COHERENT_INT_MASK BIT(30) +#define RX13_COHERENT_INT_MASK BIT(29) +#define RX12_COHERENT_INT_MASK BIT(28) +#define RX11_COHERENT_INT_MASK BIT(27) +#define RX10_COHERENT_INT_MASK BIT(26) +#define RX9_COHERENT_INT_MASK BIT(25) +#define RX8_COHERENT_INT_MASK BIT(24) +#define RX7_COHERENT_INT_MASK BIT(23) +#define RX6_COHERENT_INT_MASK BIT(22) +#define RX5_COHERENT_INT_MASK BIT(21) +#define RX4_COHERENT_INT_MASK BIT(20) +#define RX3_COHERENT_INT_MASK BIT(19) +#define RX2_COHERENT_INT_MASK BIT(18) +#define RX1_COHERENT_INT_MASK BIT(17) +#define RX0_COHERENT_INT_MASK BIT(16) +#define TX7_COHERENT_INT_MASK BIT(15) +#define TX6_COHERENT_INT_MASK BIT(14) +#define TX5_COHERENT_INT_MASK BIT(13) +#define TX4_COHERENT_INT_MASK BIT(12) +#define TX3_COHERENT_INT_MASK BIT(11) +#define TX2_COHERENT_INT_MASK BIT(10) +#define TX1_COHERENT_INT_MASK BIT(9) +#define TX0_COHERENT_INT_MASK BIT(8) +#define CNT_OVER_FLOW_INT_MASK BIT(7) +#define IRQ1_FULL_INT_MASK BIT(5) +#define IRQ1_INT_MASK BIT(4) +#define HWFWD_DSCP_LOW_INT_MASK BIT(3) +#define HWFWD_DSCP_EMPTY_INT_MASK BIT(2) +#define IRQ0_FULL_INT_MASK BIT(1) +#define IRQ0_INT_MASK BIT(0) + +#define TX_DONE_INT_MASK(_n) \ + ((_n) ? IRQ1_INT_MASK | IRQ1_FULL_INT_MASK \ + : IRQ0_INT_MASK | IRQ0_FULL_INT_MASK) + +#define INT_TX_MASK \ + (IRQ1_INT_MASK | IRQ1_FULL_INT_MASK | \ + IRQ0_INT_MASK | IRQ0_FULL_INT_MASK) + +#define INT_IDX0_MASK \ + (TX0_COHERENT_INT_MASK | TX1_COHERENT_INT_MASK | \ + TX2_COHERENT_INT_MASK | TX3_COHERENT_INT_MASK | \ + TX4_COHERENT_INT_MASK | TX5_COHERENT_INT_MASK | \ + TX6_COHERENT_INT_MASK | TX7_COHERENT_INT_MASK | \ + RX0_COHERENT_INT_MASK | RX1_COHERENT_INT_MASK | \ + RX2_COHERENT_INT_MASK | RX3_COHERENT_INT_MASK | \ + RX4_COHERENT_INT_MASK | RX7_COHERENT_INT_MASK | \ + RX8_COHERENT_INT_MASK | RX9_COHERENT_INT_MASK | \ + RX15_COHERENT_INT_MASK | INT_TX_MASK) + +/* QDMA_CSR_INT_ENABLE2 */ +#define RX15_NO_CPU_DSCP_INT_MASK BIT(31) +#define RX14_NO_CPU_DSCP_INT_MASK BIT(30) +#define RX13_NO_CPU_DSCP_INT_MASK BIT(29) +#define RX12_NO_CPU_DSCP_INT_MASK BIT(28) +#define RX11_NO_CPU_DSCP_INT_MASK BIT(27) +#define RX10_NO_CPU_DSCP_INT_MASK BIT(26) +#define RX9_NO_CPU_DSCP_INT_MASK BIT(25) +#define RX8_NO_CPU_DSCP_INT_MASK BIT(24) +#define RX7_NO_CPU_DSCP_INT_MASK BIT(23) +#define RX6_NO_CPU_DSCP_INT_MASK BIT(22) +#define RX5_NO_CPU_DSCP_INT_MASK BIT(21) +#define RX4_NO_CPU_DSCP_INT_MASK BIT(20) +#define RX3_NO_CPU_DSCP_INT_MASK BIT(19) +#define RX2_NO_CPU_DSCP_INT_MASK BIT(18) +#define RX1_NO_CPU_DSCP_INT_MASK BIT(17) +#define RX0_NO_CPU_DSCP_INT_MASK BIT(16) +#define RX15_DONE_INT_MASK BIT(15) +#define RX14_DONE_INT_MASK BIT(14) +#define RX13_DONE_INT_MASK BIT(13) +#define RX12_DONE_INT_MASK BIT(12) +#define RX11_DONE_INT_MASK BIT(11) +#define RX10_DONE_INT_MASK BIT(10) +#define RX9_DONE_INT_MASK BIT(9) +#define RX8_DONE_INT_MASK BIT(8) +#define RX7_DONE_INT_MASK BIT(7) +#define RX6_DONE_INT_MASK BIT(6) +#define RX5_DONE_INT_MASK BIT(5) +#define RX4_DONE_INT_MASK BIT(4) +#define RX3_DONE_INT_MASK BIT(3) +#define RX2_DONE_INT_MASK BIT(2) +#define RX1_DONE_INT_MASK BIT(1) +#define RX0_DONE_INT_MASK BIT(0) + +#define RX_DONE_INT_MASK \ + (RX0_DONE_INT_MASK | RX1_DONE_INT_MASK | \ + RX2_DONE_INT_MASK | RX3_DONE_INT_MASK | \ + RX4_DONE_INT_MASK | RX7_DONE_INT_MASK | \ + RX8_DONE_INT_MASK | RX9_DONE_INT_MASK | \ + RX15_DONE_INT_MASK) +#define INT_IDX1_MASK \ + (RX_DONE_INT_MASK | \ + RX0_NO_CPU_DSCP_INT_MASK | RX1_NO_CPU_DSCP_INT_MASK | \ + RX2_NO_CPU_DSCP_INT_MASK | RX3_NO_CPU_DSCP_INT_MASK | \ + RX4_NO_CPU_DSCP_INT_MASK | RX7_NO_CPU_DSCP_INT_MASK | \ + RX8_NO_CPU_DSCP_INT_MASK | RX9_NO_CPU_DSCP_INT_MASK | \ + RX15_NO_CPU_DSCP_INT_MASK) + +/* QDMA_CSR_INT_ENABLE5 */ +#define TX31_COHERENT_INT_MASK BIT(31) +#define TX30_COHERENT_INT_MASK BIT(30) +#define TX29_COHERENT_INT_MASK BIT(29) +#define TX28_COHERENT_INT_MASK BIT(28) +#define TX27_COHERENT_INT_MASK BIT(27) +#define TX26_COHERENT_INT_MASK BIT(26) +#define TX25_COHERENT_INT_MASK BIT(25) +#define TX24_COHERENT_INT_MASK BIT(24) +#define TX23_COHERENT_INT_MASK BIT(23) +#define TX22_COHERENT_INT_MASK BIT(22) +#define TX21_COHERENT_INT_MASK BIT(21) +#define TX20_COHERENT_INT_MASK BIT(20) +#define TX19_COHERENT_INT_MASK BIT(19) +#define TX18_COHERENT_INT_MASK BIT(18) +#define TX17_COHERENT_INT_MASK BIT(17) +#define TX16_COHERENT_INT_MASK BIT(16) +#define TX15_COHERENT_INT_MASK BIT(15) +#define TX14_COHERENT_INT_MASK BIT(14) +#define TX13_COHERENT_INT_MASK BIT(13) +#define TX12_COHERENT_INT_MASK BIT(12) +#define TX11_COHERENT_INT_MASK BIT(11) +#define TX10_COHERENT_INT_MASK BIT(10) +#define TX9_COHERENT_INT_MASK BIT(9) +#define TX8_COHERENT_INT_MASK BIT(8) + +#define INT_IDX4_MASK \ + (TX8_COHERENT_INT_MASK | TX9_COHERENT_INT_MASK | \ + TX10_COHERENT_INT_MASK | TX11_COHERENT_INT_MASK | \ + TX12_COHERENT_INT_MASK | TX13_COHERENT_INT_MASK | \ + TX14_COHERENT_INT_MASK | TX15_COHERENT_INT_MASK | \ + TX16_COHERENT_INT_MASK | TX17_COHERENT_INT_MASK | \ + TX18_COHERENT_INT_MASK | TX19_COHERENT_INT_MASK | \ + TX20_COHERENT_INT_MASK | TX21_COHERENT_INT_MASK | \ + TX22_COHERENT_INT_MASK | TX23_COHERENT_INT_MASK | \ + TX24_COHERENT_INT_MASK | TX25_COHERENT_INT_MASK | \ + TX26_COHERENT_INT_MASK | TX27_COHERENT_INT_MASK | \ + TX28_COHERENT_INT_MASK | TX29_COHERENT_INT_MASK | \ + TX30_COHERENT_INT_MASK | TX31_COHERENT_INT_MASK) + +#define REG_TX_IRQ_BASE(_n) ((_n) ? 0x0048 : 0x0050) + +#define REG_TX_IRQ_CFG(_n) ((_n) ? 0x004c : 0x0054) +#define TX_IRQ_THR_MASK GENMASK(27, 16) +#define TX_IRQ_DEPTH_MASK GENMASK(11, 0) + +#define REG_IRQ_CLEAR_LEN(_n) ((_n) ? 0x0064 : 0x0058) +#define IRQ_CLEAR_LEN_MASK GENMASK(7, 0) + +#define REG_IRQ_STATUS(_n) ((_n) ? 0x0068 : 0x005c) +#define IRQ_ENTRY_LEN_MASK GENMASK(27, 16) +#define IRQ_HEAD_IDX_MASK GENMASK(11, 0) + +#define REG_TX_RING_BASE(_n) \ + (((_n) < 8) ? 0x0100 + ((_n) << 5) : 0x0b00 + (((_n) - 8) << 5)) + +#define REG_TX_RING_BLOCKING(_n) \ + (((_n) < 8) ? 0x0104 + ((_n) << 5) : 0x0b04 + (((_n) - 8) << 5)) + +#define TX_RING_IRQ_BLOCKING_MAP_MASK BIT(6) +#define TX_RING_IRQ_BLOCKING_CFG_MASK BIT(4) +#define TX_RING_IRQ_BLOCKING_TX_DROP_EN_MASK BIT(2) +#define TX_RING_IRQ_BLOCKING_MAX_TH_TXRING_EN_MASK BIT(1) +#define TX_RING_IRQ_BLOCKING_MIN_TH_TXRING_EN_MASK BIT(0) + +#define REG_TX_CPU_IDX(_n) \ + (((_n) < 8) ? 0x0108 + ((_n) << 5) : 0x0b08 + (((_n) - 8) << 5)) + +#define TX_RING_CPU_IDX_MASK GENMASK(15, 0) + +#define REG_TX_DMA_IDX(_n) \ + (((_n) < 8) ? 0x010c + ((_n) << 5) : 0x0b0c + (((_n) - 8) << 5)) + +#define TX_RING_DMA_IDX_MASK GENMASK(15, 0) + +#define IRQ_RING_IDX_MASK GENMASK(20, 16) +#define IRQ_DESC_IDX_MASK GENMASK(15, 0) + +#define REG_RX_RING_BASE(_n) \ + (((_n) < 16) ? 0x0200 + ((_n) << 5) : 0x0e00 + (((_n) - 16) << 5)) + +#define REG_RX_RING_SIZE(_n) \ + (((_n) < 16) ? 0x0204 + ((_n) << 5) : 0x0e04 + (((_n) - 16) << 5)) + +#define RX_RING_THR_MASK GENMASK(31, 16) +#define RX_RING_SIZE_MASK GENMASK(15, 0) + +#define REG_RX_CPU_IDX(_n) \ + (((_n) < 16) ? 0x0208 + ((_n) << 5) : 0x0e08 + (((_n) - 16) << 5)) + +#define RX_RING_CPU_IDX_MASK GENMASK(15, 0) + +#define REG_RX_DMA_IDX(_n) \ + (((_n) < 16) ? 0x020c + ((_n) << 5) : 0x0e0c + (((_n) - 16) << 5)) + +#define REG_RX_DELAY_INT_IDX(_n) \ + (((_n) < 16) ? 0x0210 + ((_n) << 5) : 0x0e10 + (((_n) - 16) << 5)) + +#define RX_DELAY_INT_MASK GENMASK(15, 0) + +#define RX_RING_DMA_IDX_MASK GENMASK(15, 0) + +#define REG_INGRESS_TRTCM_CFG 0x0070 +#define INGRESS_TRTCM_EN_MASK BIT(31) +#define INGRESS_TRTCM_MODE_MASK BIT(30) +#define INGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16) +#define INGRESS_FAST_TICK_MASK GENMASK(15, 0) + +#define REG_TXQ_DIS_CFG_BASE(_n) ((_n) ? 0x20a0 : 0x00a0) +#define REG_TXQ_DIS_CFG(_n, _m) (REG_TXQ_DIS_CFG_BASE((_n)) + (_m) << 2) + +#define REG_LMGR_INIT_CFG 0x1000 +#define LMGR_INIT_START BIT(31) +#define LMGR_SRAM_MODE_MASK BIT(30) +#define HW_FWD_PKTSIZE_OVERHEAD_MASK GENMASK(27, 20) +#define HW_FWD_DESC_NUM_MASK GENMASK(16, 0) + +#define REG_FWD_DSCP_LOW_THR 0x1004 +#define FWD_DSCP_LOW_THR_MASK GENMASK(17, 0) + +#define REG_EGRESS_RATE_METER_CFG 0x100c +#define EGRESS_RATE_METER_EN_MASK BIT(29) +#define EGRESS_RATE_METER_EQ_RATE_EN_MASK BIT(17) +#define EGRESS_RATE_METER_WINDOW_SZ_MASK GENMASK(16, 12) +#define EGRESS_RATE_METER_TIMESLICE_MASK GENMASK(10, 0) + +#define REG_EGRESS_TRTCM_CFG 0x1010 +#define EGRESS_TRTCM_EN_MASK BIT(31) +#define EGRESS_TRTCM_MODE_MASK BIT(30) +#define EGRESS_SLOW_TICK_RATIO_MASK GENMASK(29, 16) +#define EGRESS_FAST_TICK_MASK GENMASK(15, 0) + +#define REG_TXWRR_MODE_CFG 0x1020 +#define TWRR_WEIGHT_SCALE_MASK BIT(31) +#define TWRR_WEIGHT_BASE_MASK BIT(3) + +#define REG_PSE_BUF_USAGE_CFG 0x1028 +#define PSE_BUF_ESTIMATE_EN_MASK BIT(29) + +#define REG_GLB_TRTCM_CFG 0x1080 +#define GLB_TRTCM_EN_MASK BIT(31) +#define GLB_TRTCM_MODE_MASK BIT(30) +#define GLB_SLOW_TICK_RATIO_MASK GENMASK(29, 16) +#define GLB_FAST_TICK_MASK GENMASK(15, 0) + +#define REG_TXQ_CNGST_CFG 0x10a0 +#define TXQ_CNGST_DROP_EN BIT(31) +#define TXQ_CNGST_DEI_DROP_EN BIT(30) + +#define REG_SLA_TRTCM_CFG 0x1150 +#define SLA_TRTCM_EN_MASK BIT(31) +#define SLA_TRTCM_MODE_MASK BIT(30) +#define SLA_SLOW_TICK_RATIO_MASK GENMASK(29, 16) +#define SLA_FAST_TICK_MASK GENMASK(15, 0) + +/* CTRL */ +#define QDMA_DESC_DONE_MASK BIT(31) +#define QDMA_DESC_DROP_MASK BIT(30) /* tx: drop - rx: overflow */ +#define QDMA_DESC_MORE_MASK BIT(29) /* more SG elements */ +#define QDMA_DESC_DEI_MASK BIT(25) +#define QDMA_DESC_NO_DROP_MASK BIT(24) +#define QDMA_DESC_LEN_MASK GENMASK(15, 0) +/* DATA */ +#define QDMA_DESC_NEXT_ID_MASK GENMASK(15, 0) +/* TX MSG0 */ +#define QDMA_ETH_TXMSG_MIC_IDX_MASK BIT(30) +#define QDMA_ETH_TXMSG_SP_TAG_MASK GENMASK(29, 14) +#define QDMA_ETH_TXMSG_ICO_MASK BIT(13) +#define QDMA_ETH_TXMSG_UCO_MASK BIT(12) +#define QDMA_ETH_TXMSG_TCO_MASK BIT(11) +#define QDMA_ETH_TXMSG_TSO_MASK BIT(10) +#define QDMA_ETH_TXMSG_FAST_MASK BIT(9) +#define QDMA_ETH_TXMSG_OAM_MASK BIT(8) +#define QDMA_ETH_TXMSG_CHAN_MASK GENMASK(7, 3) +#define QDMA_ETH_TXMSG_QUEUE_MASK GENMASK(2, 0) +/* TX MSG1 */ +#define QDMA_ETH_TXMSG_NO_DROP BIT(31) +#define QDMA_ETH_TXMSG_METER_MASK GENMASK(30, 24) /* 0x7f no meters */ +#define QDMA_ETH_TXMSG_FPORT_MASK GENMASK(23, 20) +#define QDMA_ETH_TXMSG_NBOQ_MASK GENMASK(19, 15) +#define QDMA_ETH_TXMSG_HWF_MASK BIT(14) +#define QDMA_ETH_TXMSG_HOP_MASK BIT(13) +#define QDMA_ETH_TXMSG_PTP_MASK BIT(12) +#define QDMA_ETH_TXMSG_ACNT_G1_MASK GENMASK(10, 6) /* 0x1f do not count */ +#define QDMA_ETH_TXMSG_ACNT_G0_MASK GENMASK(5, 0) /* 0x3f do not count */ + +/* RX MSG1 */ +#define QDMA_ETH_RXMSG_DEI_MASK BIT(31) +#define QDMA_ETH_RXMSG_IP6_MASK BIT(30) +#define QDMA_ETH_RXMSG_IP4_MASK BIT(29) +#define QDMA_ETH_RXMSG_IP4F_MASK BIT(28) +#define QDMA_ETH_RXMSG_L4_VALID_MASK BIT(27) +#define QDMA_ETH_RXMSG_L4F_MASK BIT(26) +#define QDMA_ETH_RXMSG_SPORT_MASK GENMASK(25, 21) +#define QDMA_ETH_RXMSG_CRSN_MASK GENMASK(20, 16) +#define QDMA_ETH_RXMSG_PPE_ENTRY_MASK GENMASK(15, 0) + +struct airoha_qdma_desc { + __le32 rsv; + __le32 ctrl; + __le32 addr; + __le32 data; + __le32 msg0; + __le32 msg1; + __le32 msg2; + __le32 msg3; +}; + +/* CTRL0 */ +#define QDMA_FWD_DESC_CTX_MASK BIT(31) +#define QDMA_FWD_DESC_RING_MASK GENMASK(30, 28) +#define QDMA_FWD_DESC_IDX_MASK GENMASK(27, 16) +#define QDMA_FWD_DESC_LEN_MASK GENMASK(15, 0) +/* CTRL1 */ +#define QDMA_FWD_DESC_FIRST_IDX_MASK GENMASK(15, 0) +/* CTRL2 */ +#define QDMA_FWD_DESC_MORE_PKT_NUM_MASK GENMASK(2, 0) + +struct airoha_qdma_fwd_desc { + __le32 addr; + __le32 ctrl0; + __le32 ctrl1; + __le32 ctrl2; + __le32 msg0; + __le32 msg1; + __le32 rsv0; + __le32 rsv1; +}; + +enum { + QDMA_INT_REG_IDX0, + QDMA_INT_REG_IDX1, + QDMA_INT_REG_IDX2, + QDMA_INT_REG_IDX3, + QDMA_INT_REG_IDX4, + QDMA_INT_REG_MAX +}; + +enum { + XSI_PCIE0_PORT, + XSI_PCIE1_PORT, + XSI_USB_PORT, + XSI_AE_PORT, + XSI_ETH_PORT, +}; + +enum { + XSI_PCIE0_VIP_PORT_MASK = BIT(22), + XSI_PCIE1_VIP_PORT_MASK = BIT(23), + XSI_USB_VIP_PORT_MASK = BIT(25), + XSI_ETH_VIP_PORT_MASK = BIT(24), +}; + +enum { + DEV_STATE_INITIALIZED, +}; + +enum { + CDM_CRSN_QSEL_Q1 = 1, + CDM_CRSN_QSEL_Q5 = 5, + CDM_CRSN_QSEL_Q6 = 6, + CDM_CRSN_QSEL_Q15 = 15, +}; + +enum { + CRSN_08 = 0x8, + CRSN_21 = 0x15, /* KA */ + CRSN_22 = 0x16, /* hit bind and force route to CPU */ + CRSN_24 = 0x18, + CRSN_25 = 0x19, +}; + +enum { + FE_PSE_PORT_CDM1, + FE_PSE_PORT_GDM1, + FE_PSE_PORT_GDM2, + FE_PSE_PORT_GDM3, + FE_PSE_PORT_PPE1, + FE_PSE_PORT_CDM2, + FE_PSE_PORT_CDM3, + FE_PSE_PORT_CDM4, + FE_PSE_PORT_PPE2, + FE_PSE_PORT_GDM4, + FE_PSE_PORT_CDM5, + FE_PSE_PORT_DROP = 0xf, +}; + +struct airoha_queue_entry { + union { + void *buf; + struct sk_buff *skb; + }; + dma_addr_t dma_addr; + u16 dma_len; +}; + +struct airoha_queue { + struct airoha_eth *eth; + + /* protect concurrent queue accesses */ + spinlock_t lock; + struct airoha_queue_entry *entry; + struct airoha_qdma_desc *desc; + u16 head; + u16 tail; + + int queued; + int ndesc; + int free_thr; + int buf_size; + + struct napi_struct napi; + struct page_pool *page_pool; +}; + +struct airoha_tx_irq_queue { + struct airoha_eth *eth; + + struct napi_struct napi; + u32 *q; + + int size; + int queued; + u16 head; +}; + +struct airoha_hw_stats { + /* protect concurrent hw_stats accesses */ + spinlock_t lock; + struct u64_stats_sync syncp; + + /* get_stats64 */ + u64 rx_ok_pkts; + u64 tx_ok_pkts; + u64 rx_ok_bytes; + u64 tx_ok_bytes; + u64 rx_multicast; + u64 rx_errors; + u64 rx_drops; + u64 tx_drops; + u64 rx_crc_error; + u64 rx_over_errors; + /* ethtool stats */ + u64 tx_broadcast; + u64 tx_multicast; + u64 tx_len[7]; + u64 rx_broadcast; + u64 rx_fragment; + u64 rx_jabber; + u64 rx_len[7]; +}; + +struct airoha_gdm_port { + struct net_device *dev; + struct airoha_eth *eth; + int id; + + struct airoha_hw_stats stats; +}; + +struct airoha_eth { + struct device *dev; + + unsigned long state; + + void __iomem *qdma_regs; + void __iomem *fe_regs; + + /* protect concurrent irqmask accesses */ + spinlock_t irq_lock; + u32 irqmask[QDMA_INT_REG_MAX]; + int irq; + + struct reset_control_bulk_data rsts[AIROHA_MAX_NUM_RSTS]; + struct reset_control_bulk_data xsi_rsts[AIROHA_MAX_NUM_XSI_RSTS]; + + struct airoha_gdm_port *ports[AIROHA_MAX_NUM_GDM_PORTS]; + + struct net_device *napi_dev; + struct airoha_queue q_tx[AIROHA_NUM_TX_RING]; + struct airoha_queue q_rx[AIROHA_NUM_RX_RING]; + + struct airoha_tx_irq_queue q_tx_irq[AIROHA_NUM_TX_IRQ]; + + /* descriptor and packet buffers for qdma hw forward */ + struct { + void *desc; + void *q; + } hfwd; +}; + +static u32 airoha_rr(void __iomem *base, u32 offset) +{ + return readl(base + offset); +} + +static void airoha_wr(void __iomem *base, u32 offset, u32 val) +{ + writel(val, base + offset); +} + +static u32 airoha_rmw(void __iomem *base, u32 offset, u32 mask, u32 val) +{ + val |= (airoha_rr(base, offset) & ~mask); + airoha_wr(base, offset, val); + + return val; +} + +#define airoha_fe_rr(eth, offset) \ + airoha_rr((eth)->fe_regs, (offset)) +#define airoha_fe_wr(eth, offset, val) \ + airoha_wr((eth)->fe_regs, (offset), (val)) +#define airoha_fe_rmw(eth, offset, mask, val) \ + airoha_rmw((eth)->fe_regs, (offset), (mask), (val)) +#define airoha_fe_set(eth, offset, val) \ + airoha_rmw((eth)->fe_regs, (offset), 0, (val)) +#define airoha_fe_clear(eth, offset, val) \ + airoha_rmw((eth)->fe_regs, (offset), (val), 0) + +#define airoha_qdma_rr(eth, offset) \ + airoha_rr((eth)->qdma_regs, (offset)) +#define airoha_qdma_wr(eth, offset, val) \ + airoha_wr((eth)->qdma_regs, (offset), (val)) +#define airoha_qdma_rmw(eth, offset, mask, val) \ + airoha_rmw((eth)->qdma_regs, (offset), (mask), (val)) +#define airoha_qdma_set(eth, offset, val) \ + airoha_rmw((eth)->qdma_regs, (offset), 0, (val)) +#define airoha_qdma_clear(eth, offset, val) \ + airoha_rmw((eth)->qdma_regs, (offset), (val), 0) + +static void airoha_qdma_set_irqmask(struct airoha_eth *eth, int index, + u32 clear, u32 set) +{ + unsigned long flags; + + if (WARN_ON_ONCE(index >= ARRAY_SIZE(eth->irqmask))) + return; + + spin_lock_irqsave(ð->irq_lock, flags); + + eth->irqmask[index] &= ~clear; + eth->irqmask[index] |= set; + airoha_qdma_wr(eth, REG_INT_ENABLE(index), eth->irqmask[index]); + /* Read irq_enable register in order to guarantee the update above + * completes in the spinlock critical section. + */ + airoha_qdma_rr(eth, REG_INT_ENABLE(index)); + + spin_unlock_irqrestore(ð->irq_lock, flags); +} + +static void airoha_qdma_irq_enable(struct airoha_eth *eth, int index, + u32 mask) +{ + airoha_qdma_set_irqmask(eth, index, 0, mask); +} + +static void airoha_qdma_irq_disable(struct airoha_eth *eth, int index, + u32 mask) +{ + airoha_qdma_set_irqmask(eth, index, mask, 0); +} + +static void airoha_set_macaddr(struct airoha_eth *eth, const u8 *addr) +{ + u32 val; + + val = (addr[0] << 16) | (addr[1] << 8) | addr[2]; + airoha_fe_wr(eth, REG_FE_LAN_MAC_H, val); + + val = (addr[3] << 16) | (addr[4] << 8) | addr[5]; + airoha_fe_wr(eth, REG_FE_LAN_MAC_LMIN, val); + airoha_fe_wr(eth, REG_FE_LAN_MAC_LMAX, val); +} + +static void airoha_set_gdm_port_fwd_cfg(struct airoha_eth *eth, u32 addr, + u32 val) +{ + airoha_fe_rmw(eth, addr, GDM_OCFQ_MASK, + FIELD_PREP(GDM_OCFQ_MASK, val)); + airoha_fe_rmw(eth, addr, GDM_MCFQ_MASK, + FIELD_PREP(GDM_MCFQ_MASK, val)); + airoha_fe_rmw(eth, addr, GDM_BCFQ_MASK, + FIELD_PREP(GDM_BCFQ_MASK, val)); + airoha_fe_rmw(eth, addr, GDM_UCFQ_MASK, + FIELD_PREP(GDM_UCFQ_MASK, val)); +} + +static int airoha_set_gdm_port(struct airoha_eth *eth, int port, bool enable) +{ + u32 val = enable ? FE_PSE_PORT_PPE1 : FE_PSE_PORT_DROP; + u32 vip_port, cfg_addr; + + switch (port) { + case XSI_PCIE0_PORT: + vip_port = XSI_PCIE0_VIP_PORT_MASK; + cfg_addr = REG_GDM_FWD_CFG(3); + break; + case XSI_PCIE1_PORT: + vip_port = XSI_PCIE1_VIP_PORT_MASK; + cfg_addr = REG_GDM_FWD_CFG(3); + break; + case XSI_USB_PORT: + vip_port = XSI_USB_VIP_PORT_MASK; + cfg_addr = REG_GDM_FWD_CFG(4); + break; + case XSI_ETH_PORT: + vip_port = XSI_ETH_VIP_PORT_MASK; + cfg_addr = REG_GDM_FWD_CFG(4); + break; + default: + return -EINVAL; + } + + if (enable) { + airoha_fe_set(eth, REG_FE_VIP_PORT_EN, vip_port); + airoha_fe_set(eth, REG_FE_IFC_PORT_EN, vip_port); + } else { + airoha_fe_clear(eth, REG_FE_VIP_PORT_EN, vip_port); + airoha_fe_clear(eth, REG_FE_IFC_PORT_EN, vip_port); + } + + airoha_set_gdm_port_fwd_cfg(eth, cfg_addr, val); + + return 0; +} + +static int airoha_set_gdm_ports(struct airoha_eth *eth, bool enable) +{ + const int port_list[] = { + XSI_PCIE0_PORT, + XSI_PCIE1_PORT, + XSI_USB_PORT, + XSI_ETH_PORT + }; + int i, err; + + for (i = 0; i < ARRAY_SIZE(port_list); i++) { + err = airoha_set_gdm_port(eth, port_list[i], enable); + if (err) + goto error; + } + + return 0; + +error: + for (i--; i >= 0; i++) + airoha_set_gdm_port(eth, port_list[i], false); + + return err; +} + +static void airoha_fe_maccr_init(struct airoha_eth *eth) +{ + int p; + + for (p = 1; p <= ARRAY_SIZE(eth->ports); p++) { + airoha_fe_set(eth, REG_GDM_FWD_CFG(p), + GDM_TCP_CKSUM | GDM_UDP_CKSUM | GDM_IP4_CKSUM | + GDM_DROP_CRC_ERR); + airoha_set_gdm_port_fwd_cfg(eth, REG_GDM_FWD_CFG(p), + FE_PSE_PORT_CDM1); + airoha_fe_rmw(eth, REG_GDM_LEN_CFG(p), + GDM_SHORT_LEN_MASK | GDM_LONG_LEN_MASK, + FIELD_PREP(GDM_SHORT_LEN_MASK, 60) | + FIELD_PREP(GDM_LONG_LEN_MASK, 4004)); + } + + airoha_fe_rmw(eth, REG_CDM1_VLAN_CTRL, CDM1_VLAN_MASK, + FIELD_PREP(CDM1_VLAN_MASK, 0x8100)); + + airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PAD); +} + +static void airoha_fe_vip_setup(struct airoha_eth *eth) +{ + airoha_fe_wr(eth, REG_FE_VIP_PATN(3), ETH_P_PPP_DISC); + airoha_fe_wr(eth, REG_FE_VIP_EN(3), PATN_FCPU_EN_MASK | PATN_EN_MASK); + + airoha_fe_wr(eth, REG_FE_VIP_PATN(4), PPP_LCP); + airoha_fe_wr(eth, REG_FE_VIP_EN(4), + PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) | + PATN_EN_MASK); + + airoha_fe_wr(eth, REG_FE_VIP_PATN(6), PPP_IPCP); + airoha_fe_wr(eth, REG_FE_VIP_EN(6), + PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) | + PATN_EN_MASK); + + airoha_fe_wr(eth, REG_FE_VIP_PATN(7), PPP_CHAP); + airoha_fe_wr(eth, REG_FE_VIP_EN(7), + PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) | + PATN_EN_MASK); + + /* BOOTP (0x43) */ + airoha_fe_wr(eth, REG_FE_VIP_PATN(8), 0x43); + airoha_fe_wr(eth, REG_FE_VIP_EN(8), + PATN_FCPU_EN_MASK | PATN_SP_EN_MASK | + FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK); + + /* BOOTP (0x44) */ + airoha_fe_wr(eth, REG_FE_VIP_PATN(9), 0x44); + airoha_fe_wr(eth, REG_FE_VIP_EN(9), + PATN_FCPU_EN_MASK | PATN_SP_EN_MASK | + FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK); + + /* ISAKMP */ + airoha_fe_wr(eth, REG_FE_VIP_PATN(10), 0x1f401f4); + airoha_fe_wr(eth, REG_FE_VIP_EN(10), + PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK | + FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK); + + airoha_fe_wr(eth, REG_FE_VIP_PATN(11), PPP_IPV6CP); + airoha_fe_wr(eth, REG_FE_VIP_EN(11), + PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) | + PATN_EN_MASK); + + /* DHCPv6 */ + airoha_fe_wr(eth, REG_FE_VIP_PATN(12), 0x2220223); + airoha_fe_wr(eth, REG_FE_VIP_EN(12), + PATN_FCPU_EN_MASK | PATN_DP_EN_MASK | PATN_SP_EN_MASK | + FIELD_PREP(PATN_TYPE_MASK, 4) | PATN_EN_MASK); + + airoha_fe_wr(eth, REG_FE_VIP_PATN(19), PPP_PAP); + airoha_fe_wr(eth, REG_FE_VIP_EN(19), + PATN_FCPU_EN_MASK | FIELD_PREP(PATN_TYPE_MASK, 1) | + PATN_EN_MASK); + + /* ETH->ETH_P_1905 (0x893a) */ + airoha_fe_wr(eth, REG_FE_VIP_PATN(20), 0x893a); + airoha_fe_wr(eth, REG_FE_VIP_EN(20), + PATN_FCPU_EN_MASK | PATN_EN_MASK); + + airoha_fe_wr(eth, REG_FE_VIP_PATN(21), ETH_P_LLDP); + airoha_fe_wr(eth, REG_FE_VIP_EN(21), + PATN_FCPU_EN_MASK | PATN_EN_MASK); +} + +static u32 airoha_fe_get_pse_queue_rsv_pages(struct airoha_eth *eth, + u32 port, u32 queue) +{ + u32 val; + + airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR, + PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK, + FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) | + FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue)); + val = airoha_fe_rr(eth, REG_FE_PSE_QUEUE_CFG_VAL); + + return FIELD_GET(PSE_CFG_OQ_RSV_MASK, val); +} + +static void airoha_fe_set_pse_queue_rsv_pages(struct airoha_eth *eth, + u32 port, u32 queue, u32 val) +{ + airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_VAL, PSE_CFG_OQ_RSV_MASK, + FIELD_PREP(PSE_CFG_OQ_RSV_MASK, val)); + airoha_fe_rmw(eth, REG_FE_PSE_QUEUE_CFG_WR, + PSE_CFG_PORT_ID_MASK | PSE_CFG_QUEUE_ID_MASK | + PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK, + FIELD_PREP(PSE_CFG_PORT_ID_MASK, port) | + FIELD_PREP(PSE_CFG_QUEUE_ID_MASK, queue) | + PSE_CFG_WR_EN_MASK | PSE_CFG_OQRSV_SEL_MASK); +} + +static int airoha_fe_set_pse_oq_rsv(struct airoha_eth *eth, + u32 port, u32 queue, u32 val) +{ + u32 orig_val, tmp, all_rsv, fq_limit; + + airoha_fe_set_pse_queue_rsv_pages(eth, port, queue, val); + + /* modify all rsv */ + orig_val = airoha_fe_get_pse_queue_rsv_pages(eth, port, queue); + tmp = airoha_fe_rr(eth, REG_FE_PSE_BUF_SET); + all_rsv = FIELD_GET(PSE_ALLRSV_MASK, tmp); + all_rsv += (val - orig_val); + airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET, PSE_ALLRSV_MASK, + FIELD_PREP(PSE_ALLRSV_MASK, all_rsv)); + + /* modify hthd */ + tmp = airoha_fe_rr(eth, PSE_FQ_CFG); + fq_limit = FIELD_GET(PSE_FQ_LIMIT_MASK, tmp); + tmp = fq_limit - all_rsv - 0x20; + airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD, + PSE_SHARE_USED_HTHD_MASK, + FIELD_PREP(PSE_SHARE_USED_HTHD_MASK, tmp)); + + tmp = fq_limit - all_rsv - 0x100; + airoha_fe_rmw(eth, REG_PSE_SHARE_USED_THD, + PSE_SHARE_USED_MTHD_MASK, + FIELD_PREP(PSE_SHARE_USED_MTHD_MASK, tmp)); + tmp = (3 * tmp) >> 2; + airoha_fe_rmw(eth, REG_FE_PSE_BUF_SET, + PSE_SHARE_USED_LTHD_MASK, + FIELD_PREP(PSE_SHARE_USED_LTHD_MASK, tmp)); + + return 0; +} + +static void airoha_fe_pse_ports_init(struct airoha_eth *eth) +{ + const u32 pse_port_num_queues[] = { + [FE_PSE_PORT_CDM1] = 6, + [FE_PSE_PORT_GDM1] = 6, + [FE_PSE_PORT_GDM2] = 32, + [FE_PSE_PORT_GDM3] = 6, + [FE_PSE_PORT_PPE1] = 4, + [FE_PSE_PORT_CDM2] = 6, + [FE_PSE_PORT_CDM3] = 8, + [FE_PSE_PORT_CDM4] = 10, + [FE_PSE_PORT_PPE2] = 4, + [FE_PSE_PORT_GDM4] = 2, + [FE_PSE_PORT_CDM5] = 2, + }; + int q; + + /* hw misses PPE2 oq rsv */ + airoha_fe_set(eth, REG_FE_PSE_BUF_SET, + PSE_RSV_PAGES * pse_port_num_queues[FE_PSE_PORT_PPE2]); + + /* CMD1 */ + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM1]; q++) + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM1, q, + PSE_QUEUE_RSV_PAGES); + /* GMD1 */ + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM1]; q++) + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM1, q, + PSE_QUEUE_RSV_PAGES); + /* GMD2 */ + for (q = 6; q < pse_port_num_queues[FE_PSE_PORT_GDM2]; q++) + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM2, q, 0); + /* GMD3 */ + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM3]; q++) + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM3, q, + PSE_QUEUE_RSV_PAGES); + /* PPE1 */ + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE1]; q++) { + if (q < pse_port_num_queues[FE_PSE_PORT_PPE1]) + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q, + PSE_QUEUE_RSV_PAGES); + else + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE1, q, 0); + } + /* CDM2 */ + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM2]; q++) + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM2, q, + PSE_QUEUE_RSV_PAGES); + /* CDM3 */ + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM3] - 1; q++) + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM3, q, 0); + /* CDM4 */ + for (q = 4; q < pse_port_num_queues[FE_PSE_PORT_CDM4]; q++) + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM4, q, + PSE_QUEUE_RSV_PAGES); + /* PPE2 */ + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_PPE2]; q++) { + if (q < pse_port_num_queues[FE_PSE_PORT_PPE2] / 2) + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q, + PSE_QUEUE_RSV_PAGES); + else + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_PPE2, q, 0); + } + /* GMD4 */ + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_GDM4]; q++) + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_GDM4, q, + PSE_QUEUE_RSV_PAGES); + /* CDM5 */ + for (q = 0; q < pse_port_num_queues[FE_PSE_PORT_CDM5]; q++) + airoha_fe_set_pse_oq_rsv(eth, FE_PSE_PORT_CDM5, q, + PSE_QUEUE_RSV_PAGES); +} + +static int airoha_fe_mc_vlan_clear(struct airoha_eth *eth) +{ + int i; + + for (i = 0; i < AIROHA_FE_MC_MAX_VLAN_TABLE; i++) { + int err, j; + u32 val; + + airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0); + + val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) | + MC_VLAN_CFG_TABLE_SEL_MASK | MC_VLAN_CFG_RW_MASK; + airoha_fe_wr(eth, REG_MC_VLAN_CFG, val); + err = read_poll_timeout(airoha_fe_rr, val, + val & MC_VLAN_CFG_CMD_DONE_MASK, + USEC_PER_MSEC, 5 * USEC_PER_MSEC, + false, eth, REG_MC_VLAN_CFG); + if (err) + return err; + + for (j = 0; j < AIROHA_FE_MC_MAX_VLAN_PORT; j++) { + airoha_fe_wr(eth, REG_MC_VLAN_DATA, 0x0); + + val = FIELD_PREP(MC_VLAN_CFG_TABLE_ID_MASK, i) | + FIELD_PREP(MC_VLAN_CFG_PORT_ID_MASK, j) | + MC_VLAN_CFG_RW_MASK; + airoha_fe_wr(eth, REG_MC_VLAN_CFG, val); + err = read_poll_timeout(airoha_fe_rr, val, + val & MC_VLAN_CFG_CMD_DONE_MASK, + USEC_PER_MSEC, + 5 * USEC_PER_MSEC, false, eth, + REG_MC_VLAN_CFG); + if (err) + return err; + } + } + + return 0; +} + +static void airoha_fe_crsn_qsel_init(struct airoha_eth *eth) +{ + /* CDM1_CRSN_QSEL */ + airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_22 >> 2), + CDM1_CRSN_QSEL_REASON_MASK(CRSN_22), + FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_22), + CDM_CRSN_QSEL_Q1)); + airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_08 >> 2), + CDM1_CRSN_QSEL_REASON_MASK(CRSN_08), + FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_08), + CDM_CRSN_QSEL_Q1)); + airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_21 >> 2), + CDM1_CRSN_QSEL_REASON_MASK(CRSN_21), + FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_21), + CDM_CRSN_QSEL_Q1)); + airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_24 >> 2), + CDM1_CRSN_QSEL_REASON_MASK(CRSN_24), + FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_24), + CDM_CRSN_QSEL_Q6)); + airoha_fe_rmw(eth, REG_CDM1_CRSN_QSEL(CRSN_25 >> 2), + CDM1_CRSN_QSEL_REASON_MASK(CRSN_25), + FIELD_PREP(CDM1_CRSN_QSEL_REASON_MASK(CRSN_25), + CDM_CRSN_QSEL_Q1)); + /* CDM2_CRSN_QSEL */ + airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_08 >> 2), + CDM2_CRSN_QSEL_REASON_MASK(CRSN_08), + FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_08), + CDM_CRSN_QSEL_Q1)); + airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_21 >> 2), + CDM2_CRSN_QSEL_REASON_MASK(CRSN_21), + FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_21), + CDM_CRSN_QSEL_Q1)); + airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_22 >> 2), + CDM2_CRSN_QSEL_REASON_MASK(CRSN_22), + FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_22), + CDM_CRSN_QSEL_Q1)); + airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_24 >> 2), + CDM2_CRSN_QSEL_REASON_MASK(CRSN_24), + FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_24), + CDM_CRSN_QSEL_Q6)); + airoha_fe_rmw(eth, REG_CDM2_CRSN_QSEL(CRSN_25 >> 2), + CDM2_CRSN_QSEL_REASON_MASK(CRSN_25), + FIELD_PREP(CDM2_CRSN_QSEL_REASON_MASK(CRSN_25), + CDM_CRSN_QSEL_Q1)); +} + +static int airoha_fe_init(struct airoha_eth *eth) +{ + airoha_fe_maccr_init(eth); + + /* PSE IQ reserve */ + airoha_fe_rmw(eth, REG_PSE_IQ_REV1, PSE_IQ_RES1_P2_MASK, + FIELD_PREP(PSE_IQ_RES1_P2_MASK, 0x10)); + airoha_fe_rmw(eth, REG_PSE_IQ_REV2, + PSE_IQ_RES2_P5_MASK | PSE_IQ_RES2_P4_MASK, + FIELD_PREP(PSE_IQ_RES2_P5_MASK, 0x40) | + FIELD_PREP(PSE_IQ_RES2_P4_MASK, 0x34)); + + /* enable FE copy engine for MC/KA/DPI */ + airoha_fe_wr(eth, REG_FE_PCE_CFG, + PCE_DPI_EN_MASK | PCE_KA_EN_MASK | PCE_MC_EN_MASK); + /* set vip queue selection to ring 1 */ + airoha_fe_rmw(eth, REG_CDM1_FWD_CFG, CDM1_VIP_QSEL_MASK, + FIELD_PREP(CDM1_VIP_QSEL_MASK, 0x4)); + airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_VIP_QSEL_MASK, + FIELD_PREP(CDM2_VIP_QSEL_MASK, 0x4)); + /* set GDM4 source interface offset to 8 */ + airoha_fe_rmw(eth, REG_GDM4_SRC_PORT_SET, + GDM4_SPORT_OFF2_MASK | + GDM4_SPORT_OFF1_MASK | + GDM4_SPORT_OFF0_MASK, + FIELD_PREP(GDM4_SPORT_OFF2_MASK, 8) | + FIELD_PREP(GDM4_SPORT_OFF1_MASK, 8) | + FIELD_PREP(GDM4_SPORT_OFF0_MASK, 8)); + + /* set PSE Page as 128B */ + airoha_fe_rmw(eth, REG_FE_DMA_GLO_CFG, + FE_DMA_GLO_L2_SPACE_MASK | FE_DMA_GLO_PG_SZ_MASK, + FIELD_PREP(FE_DMA_GLO_L2_SPACE_MASK, 2) | + FE_DMA_GLO_PG_SZ_MASK); + airoha_fe_wr(eth, REG_FE_RST_GLO_CFG, + FE_RST_CORE_MASK | FE_RST_GDM3_MBI_ARB_MASK | + FE_RST_GDM4_MBI_ARB_MASK); + usleep_range(1000, 2000); + + /* connect RxRing1 and RxRing15 to PSE Port0 OQ-1 + * connect other rings to PSE Port0 OQ-0 + */ + airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP0, BIT(4)); + airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP1, BIT(28)); + airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP2, BIT(4)); + airoha_fe_wr(eth, REG_FE_CDM1_OQ_MAP3, BIT(28)); + + airoha_fe_vip_setup(eth); + airoha_fe_pse_ports_init(eth); + + airoha_fe_set(eth, REG_GDM_MISC_CFG, + GDM2_RDM_ACK_WAIT_PREF_MASK | + GDM2_CHN_VLD_MODE_MASK); + airoha_fe_rmw(eth, REG_CDM2_FWD_CFG, CDM2_OAM_QSEL_MASK, 15); + + /* init fragment and assemble Force Port */ + /* NPU Core-3, NPU Bridge Channel-3 */ + airoha_fe_rmw(eth, REG_IP_FRAG_FP, + IP_FRAGMENT_PORT_MASK | IP_FRAGMENT_NBQ_MASK, + FIELD_PREP(IP_FRAGMENT_PORT_MASK, 6) | + FIELD_PREP(IP_FRAGMENT_NBQ_MASK, 3)); + /* QDMA LAN, RX Ring-22 */ + airoha_fe_rmw(eth, REG_IP_FRAG_FP, + IP_ASSEMBLE_PORT_MASK | IP_ASSEMBLE_NBQ_MASK, + FIELD_PREP(IP_ASSEMBLE_PORT_MASK, 0) | + FIELD_PREP(IP_ASSEMBLE_NBQ_MASK, 22)); + + airoha_fe_set(eth, REG_GDM3_FWD_CFG, GDM3_PAD_EN_MASK); + airoha_fe_set(eth, REG_GDM4_FWD_CFG, GDM4_PAD_EN_MASK); + + airoha_fe_crsn_qsel_init(eth); + + airoha_fe_clear(eth, REG_FE_CPORT_CFG, FE_CPORT_QUEUE_XFC_MASK); + airoha_fe_set(eth, REG_FE_CPORT_CFG, FE_CPORT_PORT_XFC_MASK); + + /* default aging mode for mbi unlock issue */ + airoha_fe_rmw(eth, REG_GDM2_CHN_RLS, + MBI_RX_AGE_SEL_MASK | MBI_TX_AGE_SEL_MASK, + FIELD_PREP(MBI_RX_AGE_SEL_MASK, 3) | + FIELD_PREP(MBI_TX_AGE_SEL_MASK, 3)); + + /* disable IFC by default */ + airoha_fe_clear(eth, REG_FE_CSR_IFC_CFG, FE_IFC_EN_MASK); + + /* enable 1:N vlan action, init vlan table */ + airoha_fe_set(eth, REG_MC_VLAN_EN, MC_VLAN_EN_MASK); + + return airoha_fe_mc_vlan_clear(eth); +} + +static int airoha_qdma_fill_rx_queue(struct airoha_queue *q) +{ + enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool); + struct airoha_eth *eth = q->eth; + int qid = q - ð->q_rx[0]; + int nframes = 0; + + while (q->queued < q->ndesc - 1) { + struct airoha_queue_entry *e = &q->entry[q->head]; + struct airoha_qdma_desc *desc = &q->desc[q->head]; + struct page *page; + int offset; + u32 val; + + page = page_pool_dev_alloc_frag(q->page_pool, &offset, + q->buf_size); + if (!page) + break; + + q->head = (q->head + 1) % q->ndesc; + q->queued++; + nframes++; + + e->buf = page_address(page) + offset; + e->dma_addr = page_pool_get_dma_addr(page) + offset; + e->dma_len = SKB_WITH_OVERHEAD(q->buf_size); + + dma_sync_single_for_device(eth->dev, e->dma_addr, e->dma_len, + dir); + + val = FIELD_PREP(QDMA_DESC_LEN_MASK, e->dma_len); + WRITE_ONCE(desc->ctrl, cpu_to_le32(val)); + WRITE_ONCE(desc->addr, cpu_to_le32(e->dma_addr)); + val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, q->head); + WRITE_ONCE(desc->data, cpu_to_le32(val)); + WRITE_ONCE(desc->msg0, 0); + WRITE_ONCE(desc->msg1, 0); + WRITE_ONCE(desc->msg2, 0); + WRITE_ONCE(desc->msg3, 0); + + airoha_qdma_rmw(eth, REG_RX_CPU_IDX(qid), RX_RING_CPU_IDX_MASK, + FIELD_PREP(RX_RING_CPU_IDX_MASK, q->head)); + } + + return nframes; +} + +static int airoha_qdma_get_gdm_port(struct airoha_eth *eth, + struct airoha_qdma_desc *desc) +{ + u32 port, sport, msg1 = le32_to_cpu(desc->msg1); + + sport = FIELD_GET(QDMA_ETH_RXMSG_SPORT_MASK, msg1); + switch (sport) { + case 0x10 ... 0x13: + port = 0; + break; + case 0x2 ... 0x4: + port = sport - 1; + break; + default: + return -EINVAL; + } + + return port >= ARRAY_SIZE(eth->ports) ? -EINVAL : port; +} + +static int airoha_qdma_rx_process(struct airoha_queue *q, int budget) +{ + enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool); + struct airoha_eth *eth = q->eth; + int qid = q - ð->q_rx[0]; + int done = 0; + + while (done < budget) { + struct airoha_queue_entry *e = &q->entry[q->tail]; + struct airoha_qdma_desc *desc = &q->desc[q->tail]; + dma_addr_t dma_addr = le32_to_cpu(desc->addr); + u32 desc_ctrl = le32_to_cpu(desc->ctrl); + struct sk_buff *skb; + int len, p; + + if (!(desc_ctrl & QDMA_DESC_DONE_MASK)) + break; + + if (!dma_addr) + break; + + len = FIELD_GET(QDMA_DESC_LEN_MASK, desc_ctrl); + if (!len) + break; + + q->tail = (q->tail + 1) % q->ndesc; + q->queued--; + + dma_sync_single_for_cpu(eth->dev, dma_addr, + SKB_WITH_OVERHEAD(q->buf_size), dir); + + p = airoha_qdma_get_gdm_port(eth, desc); + if (p < 0 || !eth->ports[p]) { + page_pool_put_full_page(q->page_pool, + virt_to_head_page(e->buf), + true); + continue; + } + + skb = napi_build_skb(e->buf, q->buf_size); + if (!skb) { + page_pool_put_full_page(q->page_pool, + virt_to_head_page(e->buf), + true); + break; + } + + skb_reserve(skb, 2); + __skb_put(skb, len); + skb_mark_for_recycle(skb); + skb->dev = eth->ports[p]->dev; + skb->protocol = eth_type_trans(skb, skb->dev); + skb->ip_summed = CHECKSUM_UNNECESSARY; + skb_record_rx_queue(skb, qid); + napi_gro_receive(&q->napi, skb); + + done++; + } + airoha_qdma_fill_rx_queue(q); + + return done; +} + +static int airoha_qdma_rx_napi_poll(struct napi_struct *napi, int budget) +{ + struct airoha_queue *q = container_of(napi, struct airoha_queue, napi); + struct airoha_eth *eth = q->eth; + int cur, done = 0; + + do { + cur = airoha_qdma_rx_process(q, budget - done); + done += cur; + } while (cur && done < budget); + + if (done < budget && napi_complete(napi)) + airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX1, + RX_DONE_INT_MASK); + + return done; +} + +static int airoha_qdma_init_rx_queue(struct airoha_eth *eth, + struct airoha_queue *q, int ndesc) +{ + const struct page_pool_params pp_params = { + .order = 0, + .pool_size = 256, + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, + .dma_dir = DMA_FROM_DEVICE, + .max_len = PAGE_SIZE, + .nid = NUMA_NO_NODE, + .dev = eth->dev, + .napi = &q->napi, + }; + int qid = q - ð->q_rx[0], thr; + dma_addr_t dma_addr; + + q->buf_size = PAGE_SIZE / 2; + q->ndesc = ndesc; + q->eth = eth; + + q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry), + GFP_KERNEL); + if (!q->entry) + return -ENOMEM; + + q->page_pool = page_pool_create(&pp_params); + if (IS_ERR(q->page_pool)) { + int err = PTR_ERR(q->page_pool); + + q->page_pool = NULL; + return err; + } + + q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc), + &dma_addr, GFP_KERNEL); + if (!q->desc) + return -ENOMEM; + + netif_napi_add(eth->napi_dev, &q->napi, airoha_qdma_rx_napi_poll); + + airoha_qdma_wr(eth, REG_RX_RING_BASE(qid), dma_addr); + airoha_qdma_rmw(eth, REG_RX_RING_SIZE(qid), RX_RING_SIZE_MASK, + FIELD_PREP(RX_RING_SIZE_MASK, ndesc)); + + thr = clamp(ndesc >> 3, 1, 32); + airoha_qdma_rmw(eth, REG_RX_RING_SIZE(qid), RX_RING_THR_MASK, + FIELD_PREP(RX_RING_THR_MASK, thr)); + airoha_qdma_rmw(eth, REG_RX_DMA_IDX(qid), RX_RING_DMA_IDX_MASK, + FIELD_PREP(RX_RING_DMA_IDX_MASK, q->head)); + + airoha_qdma_fill_rx_queue(q); + + return 0; +} + +static void airoha_qdma_cleanup_rx_queue(struct airoha_queue *q) +{ + enum dma_data_direction dir = page_pool_get_dma_dir(q->page_pool); + struct airoha_eth *eth = q->eth; + + while (q->queued) { + struct airoha_queue_entry *e = &q->entry[q->tail]; + struct page *page = virt_to_head_page(e->buf); + + dma_sync_single_for_cpu(eth->dev, e->dma_addr, e->dma_len, + dir); + page_pool_put_full_page(q->page_pool, page, false); + q->tail = (q->tail + 1) % q->ndesc; + q->queued--; + } +} + +static int airoha_qdma_init_rx(struct airoha_eth *eth) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) { + int err; + + if (!(RX_DONE_INT_MASK & BIT(i))) { + /* rx-queue not binded to irq */ + continue; + } + + err = airoha_qdma_init_rx_queue(eth, ð->q_rx[i], + RX_DSCP_NUM(i)); + if (err) + return err; + } + + return 0; +} + +static int airoha_qdma_tx_napi_poll(struct napi_struct *napi, int budget) +{ + struct airoha_tx_irq_queue *irq_q; + struct airoha_eth *eth; + int id, done = 0; + + irq_q = container_of(napi, struct airoha_tx_irq_queue, napi); + eth = irq_q->eth; + id = irq_q - ð->q_tx_irq[0]; + + while (irq_q->queued > 0 && done < budget) { + u32 qid, last, val = irq_q->q[irq_q->head]; + struct airoha_queue *q; + + if (val == 0xff) + break; + + irq_q->q[irq_q->head] = 0xff; /* mark as done */ + irq_q->head = (irq_q->head + 1) % irq_q->size; + irq_q->queued--; + done++; + + last = FIELD_GET(IRQ_DESC_IDX_MASK, val); + qid = FIELD_GET(IRQ_RING_IDX_MASK, val); + + if (qid >= ARRAY_SIZE(eth->q_tx)) + continue; + + q = ð->q_tx[qid]; + if (!q->ndesc) + continue; + + spin_lock_bh(&q->lock); + + while (q->queued > 0) { + struct airoha_qdma_desc *desc = &q->desc[q->tail]; + struct airoha_queue_entry *e = &q->entry[q->tail]; + u32 desc_ctrl = le32_to_cpu(desc->ctrl); + struct sk_buff *skb = e->skb; + u16 index = q->tail; + + if (!(desc_ctrl & QDMA_DESC_DONE_MASK) && + !(desc_ctrl & QDMA_DESC_DROP_MASK)) + break; + + q->tail = (q->tail + 1) % q->ndesc; + q->queued--; + + dma_unmap_single(eth->dev, e->dma_addr, e->dma_len, + DMA_TO_DEVICE); + + WRITE_ONCE(desc->msg0, 0); + WRITE_ONCE(desc->msg1, 0); + + if (skb) { + struct netdev_queue *txq; + + txq = netdev_get_tx_queue(skb->dev, qid); + if (netif_tx_queue_stopped(txq) && + q->ndesc - q->queued >= q->free_thr) + netif_tx_wake_queue(txq); + + dev_kfree_skb_any(skb); + e->skb = NULL; + } + + if (index == last) + break; + } + + spin_unlock_bh(&q->lock); + } + + if (done) { + int i, len = done >> 7; + + for (i = 0; i < len; i++) + airoha_qdma_rmw(eth, REG_IRQ_CLEAR_LEN(id), + IRQ_CLEAR_LEN_MASK, 0x80); + airoha_qdma_rmw(eth, REG_IRQ_CLEAR_LEN(id), + IRQ_CLEAR_LEN_MASK, (done & 0x7f)); + } + + if (done < budget && napi_complete(napi)) + airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX0, + TX_DONE_INT_MASK(id)); + + return done; +} + +static int airoha_qdma_init_tx_queue(struct airoha_eth *eth, + struct airoha_queue *q, int size) +{ + int i, qid = q - ð->q_tx[0]; + dma_addr_t dma_addr; + + spin_lock_init(&q->lock); + q->ndesc = size; + q->eth = eth; + q->free_thr = 1 + MAX_SKB_FRAGS; + + q->entry = devm_kzalloc(eth->dev, q->ndesc * sizeof(*q->entry), + GFP_KERNEL); + if (!q->entry) + return -ENOMEM; + + q->desc = dmam_alloc_coherent(eth->dev, q->ndesc * sizeof(*q->desc), + &dma_addr, GFP_KERNEL); + if (!q->desc) + return -ENOMEM; + + for (i = 0; i < q->ndesc; i++) { + u32 val; + + val = FIELD_PREP(QDMA_DESC_DONE_MASK, 1); + WRITE_ONCE(q->desc[i].ctrl, cpu_to_le32(val)); + } + + airoha_qdma_wr(eth, REG_TX_RING_BASE(qid), dma_addr); + airoha_qdma_rmw(eth, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK, + FIELD_PREP(TX_RING_CPU_IDX_MASK, q->head)); + airoha_qdma_rmw(eth, REG_TX_DMA_IDX(qid), TX_RING_DMA_IDX_MASK, + FIELD_PREP(TX_RING_DMA_IDX_MASK, q->head)); + + return 0; +} + +static int airoha_qdma_tx_irq_init(struct airoha_eth *eth, + struct airoha_tx_irq_queue *irq_q, + int size) +{ + int id = irq_q - ð->q_tx_irq[0]; + dma_addr_t dma_addr; + + netif_napi_add_tx(eth->napi_dev, &irq_q->napi, + airoha_qdma_tx_napi_poll); + irq_q->q = dmam_alloc_coherent(eth->dev, size * sizeof(u32), + &dma_addr, GFP_KERNEL); + if (!irq_q->q) + return -ENOMEM; + + memset(irq_q->q, 0xff, size * sizeof(u32)); + irq_q->size = size; + irq_q->eth = eth; + + airoha_qdma_wr(eth, REG_TX_IRQ_BASE(id), dma_addr); + airoha_qdma_rmw(eth, REG_TX_IRQ_CFG(id), TX_IRQ_DEPTH_MASK, + FIELD_PREP(TX_IRQ_DEPTH_MASK, size)); + airoha_qdma_rmw(eth, REG_TX_IRQ_CFG(id), TX_IRQ_THR_MASK, + FIELD_PREP(TX_IRQ_THR_MASK, 1)); + + return 0; +} + +static int airoha_qdma_init_tx(struct airoha_eth *eth) +{ + int i, err; + + for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) { + err = airoha_qdma_tx_irq_init(eth, ð->q_tx_irq[i], + IRQ_QUEUE_LEN(i)); + if (err) + return err; + } + + for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) { + err = airoha_qdma_init_tx_queue(eth, ð->q_tx[i], + TX_DSCP_NUM); + if (err) + return err; + } + + return 0; +} + +static void airoha_qdma_cleanup_tx_queue(struct airoha_queue *q) +{ + struct airoha_eth *eth = q->eth; + + spin_lock_bh(&q->lock); + while (q->queued) { + struct airoha_queue_entry *e = &q->entry[q->tail]; + + dma_unmap_single(eth->dev, e->dma_addr, e->dma_len, + DMA_TO_DEVICE); + dev_kfree_skb_any(e->skb); + e->skb = NULL; + + q->tail = (q->tail + 1) % q->ndesc; + q->queued--; + } + spin_unlock_bh(&q->lock); +} + +static int airoha_qdma_init_hfwd_queues(struct airoha_eth *eth) +{ + dma_addr_t dma_addr; + u32 status; + int size; + + size = HW_DSCP_NUM * sizeof(struct airoha_qdma_fwd_desc); + eth->hfwd.desc = dmam_alloc_coherent(eth->dev, size, &dma_addr, + GFP_KERNEL); + if (!eth->hfwd.desc) + return -ENOMEM; + + airoha_qdma_wr(eth, REG_FWD_DSCP_BASE, dma_addr); + + size = AIROHA_MAX_PACKET_SIZE * HW_DSCP_NUM; + eth->hfwd.q = dmam_alloc_coherent(eth->dev, size, &dma_addr, + GFP_KERNEL); + if (!eth->hfwd.q) + return -ENOMEM; + + airoha_qdma_wr(eth, REG_FWD_BUF_BASE, dma_addr); + + airoha_qdma_rmw(eth, REG_HW_FWD_DSCP_CFG, + HW_FWD_DSCP_PAYLOAD_SIZE_MASK, + FIELD_PREP(HW_FWD_DSCP_PAYLOAD_SIZE_MASK, 0)); + airoha_qdma_rmw(eth, REG_FWD_DSCP_LOW_THR, FWD_DSCP_LOW_THR_MASK, + FIELD_PREP(FWD_DSCP_LOW_THR_MASK, 128)); + airoha_qdma_rmw(eth, REG_LMGR_INIT_CFG, + LMGR_INIT_START | LMGR_SRAM_MODE_MASK | + HW_FWD_DESC_NUM_MASK, + FIELD_PREP(HW_FWD_DESC_NUM_MASK, HW_DSCP_NUM) | + LMGR_INIT_START); + + return read_poll_timeout(airoha_qdma_rr, status, + !(status & LMGR_INIT_START), USEC_PER_MSEC, + 30 * USEC_PER_MSEC, true, eth, + REG_LMGR_INIT_CFG); +} + +static void airoha_qdma_init_qos(struct airoha_eth *eth) +{ + airoha_qdma_clear(eth, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_SCALE_MASK); + airoha_qdma_set(eth, REG_TXWRR_MODE_CFG, TWRR_WEIGHT_BASE_MASK); + + airoha_qdma_clear(eth, REG_PSE_BUF_USAGE_CFG, + PSE_BUF_ESTIMATE_EN_MASK); + + airoha_qdma_set(eth, REG_EGRESS_RATE_METER_CFG, + EGRESS_RATE_METER_EN_MASK | + EGRESS_RATE_METER_EQ_RATE_EN_MASK); + /* 2047us x 31 = 63.457ms */ + airoha_qdma_rmw(eth, REG_EGRESS_RATE_METER_CFG, + EGRESS_RATE_METER_WINDOW_SZ_MASK, + FIELD_PREP(EGRESS_RATE_METER_WINDOW_SZ_MASK, 0x1f)); + airoha_qdma_rmw(eth, REG_EGRESS_RATE_METER_CFG, + EGRESS_RATE_METER_TIMESLICE_MASK, + FIELD_PREP(EGRESS_RATE_METER_TIMESLICE_MASK, 0x7ff)); + + /* ratelimit init */ + airoha_qdma_set(eth, REG_GLB_TRTCM_CFG, GLB_TRTCM_EN_MASK); + /* fast-tick 25us */ + airoha_qdma_rmw(eth, REG_GLB_TRTCM_CFG, GLB_FAST_TICK_MASK, + FIELD_PREP(GLB_FAST_TICK_MASK, 25)); + airoha_qdma_rmw(eth, REG_GLB_TRTCM_CFG, GLB_SLOW_TICK_RATIO_MASK, + FIELD_PREP(GLB_SLOW_TICK_RATIO_MASK, 40)); + + airoha_qdma_set(eth, REG_EGRESS_TRTCM_CFG, EGRESS_TRTCM_EN_MASK); + airoha_qdma_rmw(eth, REG_EGRESS_TRTCM_CFG, EGRESS_FAST_TICK_MASK, + FIELD_PREP(EGRESS_FAST_TICK_MASK, 25)); + airoha_qdma_rmw(eth, REG_EGRESS_TRTCM_CFG, + EGRESS_SLOW_TICK_RATIO_MASK, + FIELD_PREP(EGRESS_SLOW_TICK_RATIO_MASK, 40)); + + airoha_qdma_set(eth, REG_INGRESS_TRTCM_CFG, INGRESS_TRTCM_EN_MASK); + airoha_qdma_clear(eth, REG_INGRESS_TRTCM_CFG, + INGRESS_TRTCM_MODE_MASK); + airoha_qdma_rmw(eth, REG_INGRESS_TRTCM_CFG, INGRESS_FAST_TICK_MASK, + FIELD_PREP(INGRESS_FAST_TICK_MASK, 125)); + airoha_qdma_rmw(eth, REG_INGRESS_TRTCM_CFG, + INGRESS_SLOW_TICK_RATIO_MASK, + FIELD_PREP(INGRESS_SLOW_TICK_RATIO_MASK, 8)); + + airoha_qdma_set(eth, REG_SLA_TRTCM_CFG, SLA_TRTCM_EN_MASK); + airoha_qdma_rmw(eth, REG_SLA_TRTCM_CFG, SLA_FAST_TICK_MASK, + FIELD_PREP(SLA_FAST_TICK_MASK, 25)); + airoha_qdma_rmw(eth, REG_SLA_TRTCM_CFG, SLA_SLOW_TICK_RATIO_MASK, + FIELD_PREP(SLA_SLOW_TICK_RATIO_MASK, 40)); +} + +static int airoha_qdma_hw_init(struct airoha_eth *eth) +{ + int i; + + /* clear pending irqs */ + for (i = 0; i < ARRAY_SIZE(eth->irqmask); i++) + airoha_qdma_wr(eth, REG_INT_STATUS(i), 0xffffffff); + + /* setup irqs */ + airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX0, INT_IDX0_MASK); + airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX1, INT_IDX1_MASK); + airoha_qdma_irq_enable(eth, QDMA_INT_REG_IDX4, INT_IDX4_MASK); + + /* setup irq binding */ + for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) { + if (!eth->q_tx[i].ndesc) + continue; + + if (TX_RING_IRQ_BLOCKING_MAP_MASK & BIT(i)) + airoha_qdma_set(eth, REG_TX_RING_BLOCKING(i), + TX_RING_IRQ_BLOCKING_CFG_MASK); + else + airoha_qdma_clear(eth, REG_TX_RING_BLOCKING(i), + TX_RING_IRQ_BLOCKING_CFG_MASK); + } + + airoha_qdma_wr(eth, REG_QDMA_GLOBAL_CFG, + GLOBAL_CFG_RX_2B_OFFSET_MASK | + FIELD_PREP(GLOBAL_CFG_DMA_PREFERENCE_MASK, 3) | + GLOBAL_CFG_CPU_TXR_RR_MASK | + GLOBAL_CFG_PAYLOAD_BYTE_SWAP_MASK | + GLOBAL_CFG_MULTICAST_MODIFY_FP_MASK | + GLOBAL_CFG_MULTICAST_EN_MASK | + GLOBAL_CFG_IRQ0_EN_MASK | GLOBAL_CFG_IRQ1_EN_MASK | + GLOBAL_CFG_TX_WB_DONE_MASK | + FIELD_PREP(GLOBAL_CFG_MAX_ISSUE_NUM_MASK, 2)); + + airoha_qdma_init_qos(eth); + + /* disable qdma rx delay interrupt */ + for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) { + if (!eth->q_rx[i].ndesc) + continue; + + airoha_qdma_clear(eth, REG_RX_DELAY_INT_IDX(i), + RX_DELAY_INT_MASK); + } + + airoha_qdma_set(eth, REG_TXQ_CNGST_CFG, + TXQ_CNGST_DROP_EN | TXQ_CNGST_DEI_DROP_EN); + + return 0; +} + +static irqreturn_t airoha_irq_handler(int irq, void *dev_instance) +{ + struct airoha_eth *eth = dev_instance; + u32 intr[ARRAY_SIZE(eth->irqmask)]; + int i; + + for (i = 0; i < ARRAY_SIZE(eth->irqmask); i++) { + intr[i] = airoha_qdma_rr(eth, REG_INT_STATUS(i)); + intr[i] &= eth->irqmask[i]; + airoha_qdma_wr(eth, REG_INT_STATUS(i), intr[i]); + } + + if (!test_bit(DEV_STATE_INITIALIZED, ð->state)) + return IRQ_NONE; + + if (intr[1] & RX_DONE_INT_MASK) { + airoha_qdma_irq_disable(eth, QDMA_INT_REG_IDX1, + RX_DONE_INT_MASK); + + for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) { + if (!eth->q_rx[i].ndesc) + continue; + + if (intr[1] & BIT(i)) + napi_schedule(ð->q_rx[i].napi); + } + } + + if (intr[0] & INT_TX_MASK) { + for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) { + struct airoha_tx_irq_queue *irq_q = ð->q_tx_irq[i]; + u32 status, head; + + if (!(intr[0] & TX_DONE_INT_MASK(i))) + continue; + + airoha_qdma_irq_disable(eth, QDMA_INT_REG_IDX0, + TX_DONE_INT_MASK(i)); + + status = airoha_qdma_rr(eth, REG_IRQ_STATUS(i)); + head = FIELD_GET(IRQ_HEAD_IDX_MASK, status); + irq_q->head = head % irq_q->size; + irq_q->queued = FIELD_GET(IRQ_ENTRY_LEN_MASK, status); + + napi_schedule(ð->q_tx_irq[i].napi); + } + } + + return IRQ_HANDLED; +} + +static int airoha_qdma_init(struct airoha_eth *eth) +{ + int err; + + err = devm_request_irq(eth->dev, eth->irq, airoha_irq_handler, + IRQF_SHARED, KBUILD_MODNAME, eth); + if (err) + return err; + + err = airoha_qdma_init_rx(eth); + if (err) + return err; + + err = airoha_qdma_init_tx(eth); + if (err) + return err; + + err = airoha_qdma_init_hfwd_queues(eth); + if (err) + return err; + + err = airoha_qdma_hw_init(eth); + if (err) + return err; + + set_bit(DEV_STATE_INITIALIZED, ð->state); + + return 0; +} + +static int airoha_hw_init(struct airoha_eth *eth) +{ + int err; + + /* disable xsi */ + reset_control_bulk_assert(ARRAY_SIZE(eth->xsi_rsts), eth->xsi_rsts); + + reset_control_bulk_assert(ARRAY_SIZE(eth->rsts), eth->rsts); + msleep(20); + reset_control_bulk_deassert(ARRAY_SIZE(eth->rsts), eth->rsts); + msleep(20); + + err = airoha_fe_init(eth); + if (err) + return err; + + return airoha_qdma_init(eth); +} + +static void airoha_hw_cleanup(struct airoha_eth *eth) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) { + if (!eth->q_rx[i].ndesc) + continue; + + napi_disable(ð->q_rx[i].napi); + netif_napi_del(ð->q_rx[i].napi); + airoha_qdma_cleanup_rx_queue(ð->q_rx[i]); + if (eth->q_rx[i].page_pool) + page_pool_destroy(eth->q_rx[i].page_pool); + } + + for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) { + napi_disable(ð->q_tx_irq[i].napi); + netif_napi_del(ð->q_tx_irq[i].napi); + } + + for (i = 0; i < ARRAY_SIZE(eth->q_tx); i++) { + if (!eth->q_tx[i].ndesc) + continue; + + airoha_qdma_cleanup_tx_queue(ð->q_tx[i]); + } +} + +static void airoha_qdma_start_napi(struct airoha_eth *eth) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(eth->q_tx_irq); i++) + napi_enable(ð->q_tx_irq[i].napi); + + for (i = 0; i < ARRAY_SIZE(eth->q_rx); i++) { + if (!eth->q_rx[i].ndesc) + continue; + + napi_enable(ð->q_rx[i].napi); + } +} + +static void airoha_update_hw_stats(struct airoha_gdm_port *port) +{ + struct airoha_eth *eth = port->eth; + u32 val, i = 0; + + spin_lock(&port->stats.lock); + u64_stats_update_begin(&port->stats.syncp); + + /* TX */ + val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_H(port->id)); + port->stats.tx_ok_pkts += ((u64)val << 32); + val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_PKT_CNT_L(port->id)); + port->stats.tx_ok_pkts += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_H(port->id)); + port->stats.tx_ok_bytes += ((u64)val << 32); + val = airoha_fe_rr(eth, REG_FE_GDM_TX_OK_BYTE_CNT_L(port->id)); + port->stats.tx_ok_bytes += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_DROP_CNT(port->id)); + port->stats.tx_drops += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_BC_CNT(port->id)); + port->stats.tx_broadcast += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_MC_CNT(port->id)); + port->stats.tx_multicast += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_RUNT_CNT(port->id)); + port->stats.tx_len[i] += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_H(port->id)); + port->stats.tx_len[i] += ((u64)val << 32); + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_E64_CNT_L(port->id)); + port->stats.tx_len[i++] += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_H(port->id)); + port->stats.tx_len[i] += ((u64)val << 32); + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L64_CNT_L(port->id)); + port->stats.tx_len[i++] += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_H(port->id)); + port->stats.tx_len[i] += ((u64)val << 32); + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L127_CNT_L(port->id)); + port->stats.tx_len[i++] += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_H(port->id)); + port->stats.tx_len[i] += ((u64)val << 32); + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L255_CNT_L(port->id)); + port->stats.tx_len[i++] += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_H(port->id)); + port->stats.tx_len[i] += ((u64)val << 32); + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L511_CNT_L(port->id)); + port->stats.tx_len[i++] += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_H(port->id)); + port->stats.tx_len[i] += ((u64)val << 32); + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_L1023_CNT_L(port->id)); + port->stats.tx_len[i++] += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_TX_ETH_LONG_CNT(port->id)); + port->stats.tx_len[i++] += val; + + /* RX */ + val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_H(port->id)); + port->stats.rx_ok_pkts += ((u64)val << 32); + val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_PKT_CNT_L(port->id)); + port->stats.rx_ok_pkts += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_H(port->id)); + port->stats.rx_ok_bytes += ((u64)val << 32); + val = airoha_fe_rr(eth, REG_FE_GDM_RX_OK_BYTE_CNT_L(port->id)); + port->stats.rx_ok_bytes += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_DROP_CNT(port->id)); + port->stats.rx_drops += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_BC_CNT(port->id)); + port->stats.rx_broadcast += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_MC_CNT(port->id)); + port->stats.rx_multicast += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ERROR_DROP_CNT(port->id)); + port->stats.rx_errors += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_CRC_ERR_CNT(port->id)); + port->stats.rx_crc_error += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_RX_OVERFLOW_DROP_CNT(port->id)); + port->stats.rx_over_errors += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_FRAG_CNT(port->id)); + port->stats.rx_fragment += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_JABBER_CNT(port->id)); + port->stats.rx_jabber += val; + + i = 0; + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_RUNT_CNT(port->id)); + port->stats.rx_len[i] += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_H(port->id)); + port->stats.rx_len[i] += ((u64)val << 32); + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_E64_CNT_L(port->id)); + port->stats.rx_len[i++] += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_H(port->id)); + port->stats.rx_len[i] += ((u64)val << 32); + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L64_CNT_L(port->id)); + port->stats.rx_len[i++] += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_H(port->id)); + port->stats.rx_len[i] += ((u64)val << 32); + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L127_CNT_L(port->id)); + port->stats.rx_len[i++] += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_H(port->id)); + port->stats.rx_len[i] += ((u64)val << 32); + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L255_CNT_L(port->id)); + port->stats.rx_len[i++] += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_H(port->id)); + port->stats.rx_len[i] += ((u64)val << 32); + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L511_CNT_L(port->id)); + port->stats.rx_len[i++] += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_H(port->id)); + port->stats.rx_len[i] += ((u64)val << 32); + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_L1023_CNT_L(port->id)); + port->stats.rx_len[i++] += val; + + val = airoha_fe_rr(eth, REG_FE_GDM_RX_ETH_LONG_CNT(port->id)); + port->stats.rx_len[i++] += val; + + /* reset mib counters */ + airoha_fe_set(eth, REG_FE_GDM_MIB_CLEAR(port->id), + FE_GDM_MIB_RX_CLEAR_MASK | FE_GDM_MIB_TX_CLEAR_MASK); + + u64_stats_update_end(&port->stats.syncp); + spin_unlock(&port->stats.lock); +} + +static int airoha_dev_open(struct net_device *dev) +{ + struct airoha_gdm_port *port = netdev_priv(dev); + struct airoha_eth *eth = port->eth; + int err; + + netif_tx_start_all_queues(dev); + err = airoha_set_gdm_ports(eth, true); + if (err) + return err; + + if (netdev_uses_dsa(dev)) + airoha_fe_set(eth, REG_GDM_INGRESS_CFG(port->id), + GDM_STAG_EN_MASK); + else + airoha_fe_clear(eth, REG_GDM_INGRESS_CFG(port->id), + GDM_STAG_EN_MASK); + + airoha_qdma_set(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_TX_DMA_EN_MASK); + airoha_qdma_set(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_RX_DMA_EN_MASK); + + return 0; +} + +static int airoha_dev_stop(struct net_device *dev) +{ + struct airoha_gdm_port *port = netdev_priv(dev); + struct airoha_eth *eth = port->eth; + int err; + + netif_tx_disable(dev); + err = airoha_set_gdm_ports(eth, false); + if (err) + return err; + + airoha_qdma_clear(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_TX_DMA_EN_MASK); + airoha_qdma_clear(eth, REG_QDMA_GLOBAL_CFG, GLOBAL_CFG_RX_DMA_EN_MASK); + + return 0; +} + +static int airoha_dev_set_macaddr(struct net_device *dev, void *p) +{ + struct airoha_gdm_port *port = netdev_priv(dev); + int err; + + err = eth_mac_addr(dev, p); + if (err) + return err; + + airoha_set_macaddr(port->eth, dev->dev_addr); + + return 0; +} + +static int airoha_dev_init(struct net_device *dev) +{ + struct airoha_gdm_port *port = netdev_priv(dev); + + airoha_set_macaddr(port->eth, dev->dev_addr); + + return 0; +} + +static void airoha_dev_get_stats64(struct net_device *dev, + struct rtnl_link_stats64 *storage) +{ + struct airoha_gdm_port *port = netdev_priv(dev); + unsigned int start; + + airoha_update_hw_stats(port); + do { + start = u64_stats_fetch_begin(&port->stats.syncp); + storage->rx_packets = port->stats.rx_ok_pkts; + storage->tx_packets = port->stats.tx_ok_pkts; + storage->rx_bytes = port->stats.rx_ok_bytes; + storage->tx_bytes = port->stats.tx_ok_bytes; + storage->multicast = port->stats.rx_multicast; + storage->rx_errors = port->stats.rx_errors; + storage->rx_dropped = port->stats.rx_drops; + storage->tx_dropped = port->stats.tx_drops; + storage->rx_crc_errors = port->stats.rx_crc_error; + storage->rx_over_errors = port->stats.rx_over_errors; + } while (u64_stats_fetch_retry(&port->stats.syncp, start)); +} + +static netdev_tx_t airoha_dev_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct skb_shared_info *sinfo = skb_shinfo(skb); + struct airoha_gdm_port *port = netdev_priv(dev); + u32 msg0 = 0, msg1, len = skb_headlen(skb); + int i, qid = skb_get_queue_mapping(skb); + struct airoha_eth *eth = port->eth; + u32 nr_frags = 1 + sinfo->nr_frags; + struct netdev_queue *txq; + struct airoha_queue *q; + void *data = skb->data; + u16 index; + u8 fport; + + if (skb->ip_summed == CHECKSUM_PARTIAL) + msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TCO_MASK, 1) | + FIELD_PREP(QDMA_ETH_TXMSG_UCO_MASK, 1) | + FIELD_PREP(QDMA_ETH_TXMSG_ICO_MASK, 1); + + /* TSO: fill MSS info in tcp checksum field */ + if (skb_is_gso(skb)) { + if (skb_cow_head(skb, 0)) + goto error; + + if (sinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { + __be16 csum = cpu_to_be16(sinfo->gso_size); + + tcp_hdr(skb)->check = (__force __sum16)csum; + msg0 |= FIELD_PREP(QDMA_ETH_TXMSG_TSO_MASK, 1); + } + } + + fport = port->id == 4 ? FE_PSE_PORT_GDM4 : port->id; + msg1 = FIELD_PREP(QDMA_ETH_TXMSG_FPORT_MASK, fport) | + FIELD_PREP(QDMA_ETH_TXMSG_METER_MASK, 0x7f); + + q = ð->q_tx[qid]; + if (WARN_ON_ONCE(!q->ndesc)) + goto error; + + spin_lock_bh(&q->lock); + + txq = netdev_get_tx_queue(dev, qid); + if (q->queued + nr_frags > q->ndesc) { + /* not enough space in the queue */ + netif_tx_stop_queue(txq); + spin_unlock_bh(&q->lock); + return NETDEV_TX_BUSY; + } + + index = q->head; + for (i = 0; i < nr_frags; i++) { + struct airoha_qdma_desc *desc = &q->desc[index]; + struct airoha_queue_entry *e = &q->entry[index]; + skb_frag_t *frag = &sinfo->frags[i]; + dma_addr_t addr; + u32 val; + + addr = dma_map_single(dev->dev.parent, data, len, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(dev->dev.parent, addr))) + goto error_unmap; + + index = (index + 1) % q->ndesc; + + val = FIELD_PREP(QDMA_DESC_LEN_MASK, len); + if (i < nr_frags - 1) + val |= FIELD_PREP(QDMA_DESC_MORE_MASK, 1); + WRITE_ONCE(desc->ctrl, cpu_to_le32(val)); + WRITE_ONCE(desc->addr, cpu_to_le32(addr)); + val = FIELD_PREP(QDMA_DESC_NEXT_ID_MASK, index); + WRITE_ONCE(desc->data, cpu_to_le32(val)); + WRITE_ONCE(desc->msg0, cpu_to_le32(msg0)); + WRITE_ONCE(desc->msg1, cpu_to_le32(msg1)); + WRITE_ONCE(desc->msg2, cpu_to_le32(0xffff)); + + e->skb = i ? NULL : skb; + e->dma_addr = addr; + e->dma_len = len; + + airoha_qdma_rmw(eth, REG_TX_CPU_IDX(qid), TX_RING_CPU_IDX_MASK, + FIELD_PREP(TX_RING_CPU_IDX_MASK, index)); + + data = skb_frag_address(frag); + len = skb_frag_size(frag); + } + + q->head = index; + q->queued += i; + + skb_tx_timestamp(skb); + if (q->ndesc - q->queued < q->free_thr) + netif_tx_stop_queue(txq); + + spin_unlock_bh(&q->lock); + + return NETDEV_TX_OK; + +error_unmap: + for (i--; i >= 0; i++) + dma_unmap_single(dev->dev.parent, q->entry[i].dma_addr, + q->entry[i].dma_len, DMA_TO_DEVICE); + + spin_unlock_bh(&q->lock); +error: + dev_kfree_skb_any(skb); + dev->stats.tx_dropped++; + + return NETDEV_TX_OK; +} + +static void airoha_ethtool_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + struct airoha_gdm_port *port = netdev_priv(dev); + struct airoha_eth *eth = port->eth; + + strscpy(info->driver, eth->dev->driver->name, sizeof(info->driver)); + strscpy(info->bus_info, dev_name(eth->dev), sizeof(info->bus_info)); +} + +static void airoha_ethtool_get_mac_stats(struct net_device *dev, + struct ethtool_eth_mac_stats *stats) +{ + struct airoha_gdm_port *port = netdev_priv(dev); + unsigned int start; + + airoha_update_hw_stats(port); + do { + start = u64_stats_fetch_begin(&port->stats.syncp); + stats->MulticastFramesXmittedOK = port->stats.tx_multicast; + stats->BroadcastFramesXmittedOK = port->stats.tx_broadcast; + stats->BroadcastFramesReceivedOK = port->stats.rx_broadcast; + } while (u64_stats_fetch_retry(&port->stats.syncp, start)); +} + +static const struct ethtool_rmon_hist_range airoha_ethtool_rmon_ranges[] = { + { 0, 64 }, + { 65, 127 }, + { 128, 255 }, + { 256, 511 }, + { 512, 1023 }, + { 1024, 1518 }, + { 1519, 10239 }, + {}, +}; + +static void +airoha_ethtool_get_rmon_stats(struct net_device *dev, + struct ethtool_rmon_stats *stats, + const struct ethtool_rmon_hist_range **ranges) +{ + struct airoha_gdm_port *port = netdev_priv(dev); + struct airoha_hw_stats *hw_stats = &port->stats; + unsigned int start; + + BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) != + ARRAY_SIZE(hw_stats->tx_len) + 1); + BUILD_BUG_ON(ARRAY_SIZE(airoha_ethtool_rmon_ranges) != + ARRAY_SIZE(hw_stats->rx_len) + 1); + + *ranges = airoha_ethtool_rmon_ranges; + airoha_update_hw_stats(port); + do { + int i; + + start = u64_stats_fetch_begin(&port->stats.syncp); + stats->fragments = hw_stats->rx_fragment; + stats->jabbers = hw_stats->rx_jabber; + for (i = 0; i < ARRAY_SIZE(airoha_ethtool_rmon_ranges) - 1; + i++) { + stats->hist[i] = hw_stats->rx_len[i]; + stats->hist_tx[i] = hw_stats->tx_len[i]; + } + } while (u64_stats_fetch_retry(&port->stats.syncp, start)); +} + +static const struct net_device_ops airoha_netdev_ops = { + .ndo_init = airoha_dev_init, + .ndo_open = airoha_dev_open, + .ndo_stop = airoha_dev_stop, + .ndo_start_xmit = airoha_dev_xmit, + .ndo_get_stats64 = airoha_dev_get_stats64, + .ndo_set_mac_address = airoha_dev_set_macaddr, +}; + +static const struct ethtool_ops airoha_ethtool_ops = { + .get_drvinfo = airoha_ethtool_get_drvinfo, + .get_eth_mac_stats = airoha_ethtool_get_mac_stats, + .get_rmon_stats = airoha_ethtool_get_rmon_stats, +}; + +static int airoha_alloc_gdm_port(struct airoha_eth *eth, struct device_node *np) +{ + const __be32 *id_ptr = of_get_property(np, "reg", NULL); + struct airoha_gdm_port *port; + struct net_device *dev; + int err, index; + u32 id; + + if (!id_ptr) { + dev_err(eth->dev, "missing gdm port id\n"); + return -EINVAL; + } + + id = be32_to_cpup(id_ptr); + index = id - 1; + + if (!id || id > ARRAY_SIZE(eth->ports)) { + dev_err(eth->dev, "invalid gdm port id: %d\n", id); + return -EINVAL; + } + + if (eth->ports[index]) { + dev_err(eth->dev, "duplicate gdm port id: %d\n", id); + return -EINVAL; + } + + dev = devm_alloc_etherdev_mqs(eth->dev, sizeof(*port), + AIROHA_NUM_TX_RING, AIROHA_NUM_RX_RING); + if (!dev) { + dev_err(eth->dev, "alloc_etherdev failed\n"); + return -ENOMEM; + } + + dev->netdev_ops = &airoha_netdev_ops; + dev->ethtool_ops = &airoha_ethtool_ops; + dev->max_mtu = AIROHA_MAX_MTU; + dev->watchdog_timeo = 5 * HZ; + dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM | + NETIF_F_TSO6 | NETIF_F_IPV6_CSUM | + NETIF_F_SG | NETIF_F_TSO; + dev->features |= dev->hw_features; + dev->dev.of_node = np; + SET_NETDEV_DEV(dev, eth->dev); + + err = of_get_ethdev_address(np, dev); + if (err) { + if (err == -EPROBE_DEFER) + return err; + + eth_hw_addr_random(dev); + dev_info(eth->dev, "generated random MAC address %pM\n", + dev->dev_addr); + } + + port = netdev_priv(dev); + u64_stats_init(&port->stats.syncp); + spin_lock_init(&port->stats.lock); + port->dev = dev; + port->eth = eth; + port->id = id; + eth->ports[index] = port; + + return register_netdev(dev); +} + +static int airoha_probe(struct platform_device *pdev) +{ + struct device_node *np; + struct airoha_eth *eth; + int i, err; + + eth = devm_kzalloc(&pdev->dev, sizeof(*eth), GFP_KERNEL); + if (!eth) + return -ENOMEM; + + eth->dev = &pdev->dev; + + err = dma_set_mask_and_coherent(eth->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(eth->dev, "failed configuring DMA mask\n"); + return err; + } + + eth->fe_regs = devm_platform_ioremap_resource_byname(pdev, "fe"); + if (IS_ERR(eth->fe_regs)) + return dev_err_probe(eth->dev, PTR_ERR(eth->fe_regs), + "failed to iomap fe regs\n"); + + eth->qdma_regs = devm_platform_ioremap_resource_byname(pdev, "qdma0"); + if (IS_ERR(eth->qdma_regs)) + return dev_err_probe(eth->dev, PTR_ERR(eth->qdma_regs), + "failed to iomap qdma regs\n"); + + eth->rsts[0].id = "fe"; + eth->rsts[1].id = "pdma"; + eth->rsts[2].id = "qdma"; + err = devm_reset_control_bulk_get_exclusive(eth->dev, + ARRAY_SIZE(eth->rsts), + eth->rsts); + if (err) { + dev_err(eth->dev, "failed to get bulk reset lines\n"); + return err; + } + + eth->xsi_rsts[0].id = "xsi-mac"; + eth->xsi_rsts[1].id = "hsi0-mac"; + eth->xsi_rsts[2].id = "hsi1-mac"; + eth->xsi_rsts[3].id = "hsi-mac"; + eth->xsi_rsts[4].id = "xfp-mac"; + err = devm_reset_control_bulk_get_exclusive(eth->dev, + ARRAY_SIZE(eth->xsi_rsts), + eth->xsi_rsts); + if (err) { + dev_err(eth->dev, "failed to get bulk xsi reset lines\n"); + return err; + } + + spin_lock_init(ð->irq_lock); + eth->irq = platform_get_irq(pdev, 0); + if (eth->irq < 0) + return eth->irq; + + eth->napi_dev = alloc_netdev_dummy(0); + if (!eth->napi_dev) + return -ENOMEM; + + /* Enable threaded NAPI by default */ + eth->napi_dev->threaded = true; + strscpy(eth->napi_dev->name, "qdma_eth", sizeof(eth->napi_dev->name)); + platform_set_drvdata(pdev, eth); + + err = airoha_hw_init(eth); + if (err) + goto error; + + airoha_qdma_start_napi(eth); + for_each_child_of_node(pdev->dev.of_node, np) { + if (!of_device_is_compatible(np, "airoha,eth-mac")) + continue; + + if (!of_device_is_available(np)) + continue; + + err = airoha_alloc_gdm_port(eth, np); + if (err) { + of_node_put(np); + goto error; + } + } + + return 0; + +error: + airoha_hw_cleanup(eth); + for (i = 0; i < ARRAY_SIZE(eth->ports); i++) { + struct airoha_gdm_port *port = eth->ports[i]; + + if (port && port->dev->reg_state == NETREG_REGISTERED) + unregister_netdev(port->dev); + } + free_netdev(eth->napi_dev); + platform_set_drvdata(pdev, NULL); + + return err; +} + +static void airoha_remove(struct platform_device *pdev) +{ + struct airoha_eth *eth = platform_get_drvdata(pdev); + int i; + + airoha_hw_cleanup(eth); + for (i = 0; i < ARRAY_SIZE(eth->ports); i++) { + struct airoha_gdm_port *port = eth->ports[i]; + + if (!port) + continue; + + airoha_dev_stop(port->dev); + unregister_netdev(port->dev); + } + free_netdev(eth->napi_dev); + + platform_set_drvdata(pdev, NULL); +} + +static const struct of_device_id of_airoha_match[] = { + { .compatible = "airoha,en7581-eth" }, + { /* sentinel */ } +}; + +static struct platform_driver airoha_driver = { + .probe = airoha_probe, + .remove_new = airoha_remove, + .driver = { + .name = KBUILD_MODNAME, + .of_match_table = of_airoha_match, + }, +}; +module_platform_driver(airoha_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Lorenzo Bianconi "); +MODULE_DESCRIPTION("Ethernet driver for Airoha SoC"); diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index c84ce54a84a0..0cc2dd85652f 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -80,7 +80,9 @@ static const struct mtk_reg_map mtk_reg_map = { .fq_blen = 0x1b2c, }, .gdm1_cnt = 0x2400, - .gdma_to_ppe = 0x4444, + .gdma_to_ppe = { + [0] = 0x4444, + }, .ppe_base = 0x0c00, .wdma_base = { [0] = 0x2800, @@ -144,7 +146,10 @@ static const struct mtk_reg_map mt7986_reg_map = { .tx_sch_rate = 0x4798, }, .gdm1_cnt = 0x1c00, - .gdma_to_ppe = 0x3333, + .gdma_to_ppe = { + [0] = 0x3333, + [1] = 0x4444, + }, .ppe_base = 0x2000, .wdma_base = { [0] = 0x4800, @@ -192,7 +197,11 @@ static const struct mtk_reg_map mt7988_reg_map = { .tx_sch_rate = 0x4798, }, .gdm1_cnt = 0x1c00, - .gdma_to_ppe = 0x3333, + .gdma_to_ppe = { + [0] = 0x3333, + [1] = 0x4444, + [2] = 0xcccc, + }, .ppe_base = 0x2000, .wdma_base = { [0] = 0x4800, @@ -2015,6 +2024,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, struct mtk_rx_dma_v2 *rxd, trxd; int done = 0, bytes = 0; dma_addr_t dma_addr = DMA_MAPPING_ERROR; + int ppe_idx = 0; while (done < budget) { unsigned int pktlen, *rxdcsum; @@ -2058,6 +2068,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, goto release_desc; netdev = eth->netdev[mac]; + ppe_idx = eth->mac[mac]->ppe_idx; if (unlikely(test_bit(MTK_RESETTING, ð->state))) goto release_desc; @@ -2181,7 +2192,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget, } if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) - mtk_ppe_check_skb(eth->ppe[0], skb, hash); + mtk_ppe_check_skb(eth->ppe[ppe_idx], skb, hash); skb_record_rx_queue(skb, 0); napi_gro_receive(napi, skb); @@ -3276,37 +3287,27 @@ static int mtk_start_dma(struct mtk_eth *eth) return 0; } -static void mtk_gdm_config(struct mtk_eth *eth, u32 config) +static void mtk_gdm_config(struct mtk_eth *eth, u32 id, u32 config) { - int i; + u32 val; if (MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) return; - for (i = 0; i < MTK_MAX_DEVS; i++) { - u32 val; + val = mtk_r32(eth, MTK_GDMA_FWD_CFG(id)); - if (!eth->netdev[i]) - continue; + /* default setup the forward port to send frame to PDMA */ + val &= ~0xffff; - val = mtk_r32(eth, MTK_GDMA_FWD_CFG(i)); + /* Enable RX checksum */ + val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN; - /* default setup the forward port to send frame to PDMA */ - val &= ~0xffff; + val |= config; - /* Enable RX checksum */ - val |= MTK_GDMA_ICS_EN | MTK_GDMA_TCS_EN | MTK_GDMA_UCS_EN; + if (eth->netdev[id] && netdev_uses_dsa(eth->netdev[id])) + val |= MTK_GDMA_SPECIAL_TAG; - val |= config; - - if (netdev_uses_dsa(eth->netdev[i])) - val |= MTK_GDMA_SPECIAL_TAG; - - mtk_w32(eth, val, MTK_GDMA_FWD_CFG(i)); - } - /* Reset and enable PSE */ - mtk_w32(eth, RST_GL_PSE, MTK_RST_GL); - mtk_w32(eth, 0, MTK_RST_GL); + mtk_w32(eth, val, MTK_GDMA_FWD_CFG(id)); } @@ -3366,7 +3367,10 @@ static int mtk_open(struct net_device *dev) { struct mtk_mac *mac = netdev_priv(dev); struct mtk_eth *eth = mac->hw; - int i, err; + struct mtk_mac *target_mac; + int i, err, ppe_num; + + ppe_num = eth->soc->ppe_num; err = phylink_of_phy_connect(mac->phylink, mac->of_node, 0); if (err) { @@ -3390,18 +3394,38 @@ static int mtk_open(struct net_device *dev) for (i = 0; i < ARRAY_SIZE(eth->ppe); i++) mtk_ppe_start(eth->ppe[i]); - gdm_config = soc->offload_version ? soc->reg_map->gdma_to_ppe - : MTK_GDMA_TO_PDMA; - mtk_gdm_config(eth, gdm_config); + for (i = 0; i < MTK_MAX_DEVS; i++) { + if (!eth->netdev[i]) + continue; + + target_mac = netdev_priv(eth->netdev[i]); + if (!soc->offload_version) { + target_mac->ppe_idx = 0; + gdm_config = MTK_GDMA_TO_PDMA; + } else if (ppe_num >= 3 && target_mac->id == 2) { + target_mac->ppe_idx = 2; + gdm_config = soc->reg_map->gdma_to_ppe[2]; + } else if (ppe_num >= 2 && target_mac->id == 1) { + target_mac->ppe_idx = 1; + gdm_config = soc->reg_map->gdma_to_ppe[1]; + } else { + target_mac->ppe_idx = 0; + gdm_config = soc->reg_map->gdma_to_ppe[0]; + } + mtk_gdm_config(eth, target_mac->id, gdm_config); + } + /* Reset and enable PSE */ + mtk_w32(eth, RST_GL_PSE, MTK_RST_GL); + mtk_w32(eth, 0, MTK_RST_GL); napi_enable(ð->tx_napi); napi_enable(ð->rx_napi); mtk_tx_irq_enable(eth, MTK_TX_DONE_INT); mtk_rx_irq_enable(eth, soc->rx.irq_done_mask); refcount_set(ð->dma_refcnt, 1); - } - else + } else { refcount_inc(ð->dma_refcnt); + } phylink_start(mac->phylink); netif_tx_start_all_queues(dev); @@ -3478,7 +3502,8 @@ static int mtk_stop(struct net_device *dev) if (!refcount_dec_and_test(ð->dma_refcnt)) return 0; - mtk_gdm_config(eth, MTK_GDMA_DROP_ALL); + for (i = 0; i < MTK_MAX_DEVS; i++) + mtk_gdm_config(eth, i, MTK_GDMA_DROP_ALL); mtk_tx_irq_disable(eth, MTK_TX_DONE_INT); mtk_rx_irq_disable(eth, eth->soc->rx.irq_done_mask); @@ -4439,6 +4464,20 @@ static int mtk_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) return ret; } +static void mtk_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause) +{ + struct mtk_mac *mac = netdev_priv(dev); + + phylink_ethtool_get_pauseparam(mac->phylink, pause); +} + +static int mtk_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause) +{ + struct mtk_mac *mac = netdev_priv(dev); + + return phylink_ethtool_set_pauseparam(mac->phylink, pause); +} + static u16 mtk_select_queue(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev) { @@ -4467,8 +4506,10 @@ static const struct ethtool_ops mtk_ethtool_ops = { .get_strings = mtk_get_strings, .get_sset_count = mtk_get_sset_count, .get_ethtool_stats = mtk_get_ethtool_stats, + .get_pauseparam = mtk_get_pauseparam, + .set_pauseparam = mtk_set_pauseparam, .get_rxnfc = mtk_get_rxnfc, - .set_rxnfc = mtk_set_rxnfc, + .set_rxnfc = mtk_set_rxnfc, }; static const struct net_device_ops mtk_netdev_ops = { @@ -4959,23 +5000,24 @@ static int mtk_probe(struct platform_device *pdev) } if (eth->soc->offload_version) { - u32 num_ppe = mtk_is_netsys_v2_or_greater(eth) ? 2 : 1; + u8 ppe_num = eth->soc->ppe_num; - num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe); - for (i = 0; i < num_ppe; i++) { - u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400; + ppe_num = min_t(u8, ARRAY_SIZE(eth->ppe), ppe_num); + for (i = 0; i < ppe_num; i++) { + u32 ppe_addr = eth->soc->reg_map->ppe_base; + ppe_addr += (i == 2 ? 0xc00 : i * 0x400); eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr, i); if (!eth->ppe[i]) { err = -ENOMEM; goto err_deinit_ppe; } - } + err = mtk_eth_offload_init(eth, i); - err = mtk_eth_offload_init(eth); - if (err) - goto err_deinit_ppe; + if (err) + goto err_deinit_ppe; + } } for (i = 0; i < MTK_MAX_DEVS; i++) { @@ -5083,6 +5125,7 @@ static const struct mtk_soc_data mt7621_data = { .required_pctl = false, .version = 1, .offload_version = 1, + .ppe_num = 1, .hash_offset = 2, .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE, .tx = { @@ -5111,6 +5154,7 @@ static const struct mtk_soc_data mt7622_data = { .required_pctl = false, .version = 1, .offload_version = 2, + .ppe_num = 1, .hash_offset = 2, .has_accounting = true, .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE, @@ -5139,6 +5183,7 @@ static const struct mtk_soc_data mt7623_data = { .required_pctl = true, .version = 1, .offload_version = 1, + .ppe_num = 1, .hash_offset = 2, .foe_entry_size = MTK_FOE_ENTRY_V1_SIZE, .disable_pll_modes = true, @@ -5194,6 +5239,7 @@ static const struct mtk_soc_data mt7981_data = { .required_pctl = false, .version = 2, .offload_version = 2, + .ppe_num = 2, .hash_offset = 4, .has_accounting = true, .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE, @@ -5223,6 +5269,7 @@ static const struct mtk_soc_data mt7986_data = { .required_pctl = false, .version = 2, .offload_version = 2, + .ppe_num = 2, .hash_offset = 4, .has_accounting = true, .foe_entry_size = MTK_FOE_ENTRY_V2_SIZE, @@ -5252,6 +5299,7 @@ static const struct mtk_soc_data mt7988_data = { .required_pctl = false, .version = 3, .offload_version = 2, + .ppe_num = 3, .hash_offset = 4, .has_accounting = true, .foe_entry_size = MTK_FOE_ENTRY_V3_SIZE, diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index f5174f6cb1bb..eb1708b43aa3 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -1132,7 +1132,7 @@ struct mtk_reg_map { u32 tx_sch_rate; /* tx scheduler rate control registers */ } qdma; u32 gdm1_cnt; - u32 gdma_to_ppe; + u32 gdma_to_ppe[3]; u32 ppe_base; u32 wdma_base[3]; u32 pse_iq_sta; @@ -1170,6 +1170,7 @@ struct mtk_soc_data { u8 offload_version; u8 hash_offset; u8 version; + u8 ppe_num; u16 foe_entry_size; netdev_features_t hw_features; bool has_accounting; @@ -1294,7 +1295,7 @@ struct mtk_eth { struct metadata_dst *dsa_meta[MTK_MAX_DSA_PORTS]; - struct mtk_ppe *ppe[2]; + struct mtk_ppe *ppe[3]; struct rhashtable flow_table; struct bpf_prog __rcu *prog; @@ -1319,6 +1320,7 @@ struct mtk_eth { struct mtk_mac { int id; phy_interface_t interface; + u8 ppe_idx; int speed; struct device_node *of_node; struct phylink *phylink; @@ -1440,7 +1442,7 @@ int mtk_gmac_sgmii_path_setup(struct mtk_eth *eth, int mac_id); int mtk_gmac_gephy_path_setup(struct mtk_eth *eth, int mac_id); int mtk_gmac_rgmii_path_setup(struct mtk_eth *eth, int mac_id); -int mtk_eth_offload_init(struct mtk_eth *eth); +int mtk_eth_offload_init(struct mtk_eth *eth, u8 id); int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data); int mtk_flow_offload_cmd(struct mtk_eth *eth, struct flow_cls_offload *cls, diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h index 691806bca372..223f709e2704 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe.h +++ b/drivers/net/ethernet/mediatek/mtk_ppe.h @@ -8,7 +8,7 @@ #include #include -#define MTK_PPE_ENTRIES_SHIFT 3 +#define MTK_PPE_ENTRIES_SHIFT 4 #define MTK_PPE_ENTRIES (1024 << MTK_PPE_ENTRIES_SHIFT) #define MTK_PPE_HASH_MASK (MTK_PPE_ENTRIES - 1) #define MTK_PPE_WAIT_TIMEOUT_US 1000000 diff --git a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c index aa262e6f4b85..f20bb390df3a 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe_offload.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe_offload.c @@ -245,10 +245,10 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f, int ppe_index) { struct flow_rule *rule = flow_cls_offload_flow_rule(f); + struct net_device *idev = NULL, *odev = NULL; struct flow_action_entry *act; struct mtk_flow_data data = {}; struct mtk_foe_entry foe; - struct net_device *odev = NULL; struct mtk_flow_entry *entry; int offload_type = 0; int wed_index = -1; @@ -264,6 +264,17 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f, struct flow_match_meta match; flow_rule_match_meta(rule, &match); + if (mtk_is_netsys_v2_or_greater(eth)) { + idev = __dev_get_by_index(&init_net, match.key->ingress_ifindex); + if (idev && idev->netdev_ops == eth->netdev[0]->netdev_ops) { + struct mtk_mac *mac = netdev_priv(idev); + + if (WARN_ON(mac->ppe_idx >= eth->soc->ppe_num)) + return -EINVAL; + + ppe_index = mac->ppe_idx; + } + } } else { return -EOPNOTSUPP; } @@ -637,7 +648,9 @@ int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type, } } -int mtk_eth_offload_init(struct mtk_eth *eth) +int mtk_eth_offload_init(struct mtk_eth *eth, u8 id) { + if (!eth->ppe[id] || !eth->ppe[id]->foe_table) + return 0; return rhashtable_init(ð->flow_table, &mtk_flow_ht_params); } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c index 1184ac5751e1..461cc2c79c71 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c @@ -126,6 +126,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, cq_idx = cq_idx % priv->rx_ring_num; rx_cq = priv->rx_cq[cq_idx]; cq->vector = rx_cq->vector; + irq = mlx4_eq_get_irq(mdev->dev, cq->vector); } if (cq->type == RX) @@ -142,18 +143,23 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, if (err) goto free_eq; + cq->cq_idx = cq_idx; cq->mcq.event = mlx4_en_cq_event; switch (cq->type) { case TX: cq->mcq.comp = mlx4_en_tx_irq; netif_napi_add_tx(cq->dev, &cq->napi, mlx4_en_poll_tx_cq); + netif_napi_set_irq(&cq->napi, irq); napi_enable(&cq->napi); + netif_queue_set_napi(cq->dev, cq_idx, NETDEV_QUEUE_TYPE_TX, &cq->napi); break; case RX: cq->mcq.comp = mlx4_en_rx_irq; netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq); + netif_napi_set_irq(&cq->napi, irq); napi_enable(&cq->napi); + netif_queue_set_napi(cq->dev, cq_idx, NETDEV_QUEUE_TYPE_RX, &cq->napi); break; case TX_XDP: /* nothing regarding napi, it's shared with rx ring */ @@ -189,6 +195,14 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq) void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq) { if (cq->type != TX_XDP) { + enum netdev_queue_type qtype; + + if (cq->type == RX) + qtype = NETDEV_QUEUE_TYPE_RX; + else + qtype = NETDEV_QUEUE_TYPE_TX; + + netif_queue_set_napi(cq->dev, cq->cq_idx, qtype, NULL); napi_disable(&cq->napi); netif_napi_del(&cq->napi); } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 619e1c3ef7f9..943d6918c2ec 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -450,7 +450,6 @@ static void mlx4_en_get_strings(struct net_device *dev, uint32_t stringset, uint8_t *data) { struct mlx4_en_priv *priv = netdev_priv(dev); - int index = 0; int i, strings = 0; struct bitmap_iterator it; @@ -459,10 +458,10 @@ static void mlx4_en_get_strings(struct net_device *dev, switch (stringset) { case ETH_SS_TEST: for (i = 0; i < MLX4_EN_NUM_SELF_TEST - 2; i++) - strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]); + ethtool_puts(&data, mlx4_en_test_names[i]); if (priv->mdev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) for (; i < MLX4_EN_NUM_SELF_TEST; i++) - strcpy(data + i * ETH_GSTRING_LEN, mlx4_en_test_names[i]); + ethtool_puts(&data, mlx4_en_test_names[i]); break; case ETH_SS_STATS: @@ -470,74 +469,56 @@ static void mlx4_en_get_strings(struct net_device *dev, for (i = 0; i < NUM_MAIN_STATS; i++, strings++, bitmap_iterator_inc(&it)) if (bitmap_iterator_test(&it)) - strcpy(data + (index++) * ETH_GSTRING_LEN, - main_strings[strings]); + ethtool_puts(&data, main_strings[strings]); for (i = 0; i < NUM_PORT_STATS; i++, strings++, bitmap_iterator_inc(&it)) if (bitmap_iterator_test(&it)) - strcpy(data + (index++) * ETH_GSTRING_LEN, - main_strings[strings]); + ethtool_puts(&data, main_strings[strings]); for (i = 0; i < NUM_PF_STATS; i++, strings++, bitmap_iterator_inc(&it)) if (bitmap_iterator_test(&it)) - strcpy(data + (index++) * ETH_GSTRING_LEN, - main_strings[strings]); + ethtool_puts(&data, main_strings[strings]); for (i = 0; i < NUM_FLOW_STATS; i++, strings++, bitmap_iterator_inc(&it)) if (bitmap_iterator_test(&it)) - strcpy(data + (index++) * ETH_GSTRING_LEN, - main_strings[strings]); + ethtool_puts(&data, main_strings[strings]); for (i = 0; i < NUM_PKT_STATS; i++, strings++, bitmap_iterator_inc(&it)) if (bitmap_iterator_test(&it)) - strcpy(data + (index++) * ETH_GSTRING_LEN, - main_strings[strings]); + ethtool_puts(&data, main_strings[strings]); for (i = 0; i < NUM_XDP_STATS; i++, strings++, bitmap_iterator_inc(&it)) if (bitmap_iterator_test(&it)) - strcpy(data + (index++) * ETH_GSTRING_LEN, - main_strings[strings]); + ethtool_puts(&data, main_strings[strings]); for (i = 0; i < NUM_PHY_STATS; i++, strings++, bitmap_iterator_inc(&it)) if (bitmap_iterator_test(&it)) - strcpy(data + (index++) * ETH_GSTRING_LEN, - main_strings[strings]); + ethtool_puts(&data, main_strings[strings]); for (i = 0; i < priv->tx_ring_num[TX]; i++) { - sprintf(data + (index++) * ETH_GSTRING_LEN, - "tx%d_packets", i); - sprintf(data + (index++) * ETH_GSTRING_LEN, - "tx%d_bytes", i); + ethtool_sprintf(&data, "tx%d_packets", i); + ethtool_sprintf(&data, "tx%d_bytes", i); } for (i = 0; i < priv->rx_ring_num; i++) { - sprintf(data + (index++) * ETH_GSTRING_LEN, - "rx%d_packets", i); - sprintf(data + (index++) * ETH_GSTRING_LEN, - "rx%d_bytes", i); - sprintf(data + (index++) * ETH_GSTRING_LEN, - "rx%d_dropped", i); - sprintf(data + (index++) * ETH_GSTRING_LEN, - "rx%d_xdp_drop", i); - sprintf(data + (index++) * ETH_GSTRING_LEN, - "rx%d_xdp_redirect", i); - sprintf(data + (index++) * ETH_GSTRING_LEN, - "rx%d_xdp_redirect_fail", i); - sprintf(data + (index++) * ETH_GSTRING_LEN, - "rx%d_xdp_tx", i); - sprintf(data + (index++) * ETH_GSTRING_LEN, - "rx%d_xdp_tx_full", i); + ethtool_sprintf(&data, "rx%d_packets", i); + ethtool_sprintf(&data, "rx%d_bytes", i); + ethtool_sprintf(&data, "rx%d_dropped", i); + ethtool_sprintf(&data, "rx%d_xdp_drop", i); + ethtool_sprintf(&data, "rx%d_xdp_redirect", i); + ethtool_sprintf(&data, "rx%d_xdp_redirect_fail", i); + ethtool_sprintf(&data, "rx%d_xdp_tx", i); + ethtool_sprintf(&data, "rx%d_xdp_tx_full", i); } break; case ETH_SS_PRIV_FLAGS: for (i = 0; i < ARRAY_SIZE(mlx4_en_priv_flags); i++) - strcpy(data + i * ETH_GSTRING_LEN, - mlx4_en_priv_flags[i]); + ethtool_puts(&data, mlx4_en_priv_flags[i]); break; } @@ -1903,7 +1884,7 @@ out: } static int mlx4_en_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_dev *mdev = priv->mdev; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 4c089cfa027a..281b34af0bb4 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -43,6 +43,7 @@ #include #include #include +#include #include #include @@ -2073,6 +2074,7 @@ static void mlx4_en_clear_stats(struct net_device *dev) priv->rx_ring[i]->csum_ok = 0; priv->rx_ring[i]->csum_none = 0; priv->rx_ring[i]->csum_complete = 0; + priv->rx_ring[i]->alloc_fail = 0; } } @@ -3099,6 +3101,77 @@ void mlx4_en_set_stats_bitmap(struct mlx4_dev *dev, last_i += NUM_PHY_STATS; } +static void mlx4_get_queue_stats_rx(struct net_device *dev, int i, + struct netdev_queue_stats_rx *stats) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + const struct mlx4_en_rx_ring *ring; + + spin_lock_bh(&priv->stats_lock); + + if (!priv->port_up || mlx4_is_master(priv->mdev->dev)) + goto out_unlock; + + ring = priv->rx_ring[i]; + stats->packets = READ_ONCE(ring->packets); + stats->bytes = READ_ONCE(ring->bytes); + stats->alloc_fail = READ_ONCE(ring->alloc_fail); + +out_unlock: + spin_unlock_bh(&priv->stats_lock); +} + +static void mlx4_get_queue_stats_tx(struct net_device *dev, int i, + struct netdev_queue_stats_tx *stats) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + const struct mlx4_en_tx_ring *ring; + + spin_lock_bh(&priv->stats_lock); + + if (!priv->port_up || mlx4_is_master(priv->mdev->dev)) + goto out_unlock; + + ring = priv->tx_ring[TX][i]; + stats->packets = READ_ONCE(ring->packets); + stats->bytes = READ_ONCE(ring->bytes); + +out_unlock: + spin_unlock_bh(&priv->stats_lock); +} + +static void mlx4_get_base_stats(struct net_device *dev, + struct netdev_queue_stats_rx *rx, + struct netdev_queue_stats_tx *tx) +{ + struct mlx4_en_priv *priv = netdev_priv(dev); + + spin_lock_bh(&priv->stats_lock); + + if (!priv->port_up || mlx4_is_master(priv->mdev->dev)) + goto out_unlock; + + if (priv->rx_ring_num) { + rx->packets = 0; + rx->bytes = 0; + rx->alloc_fail = 0; + } + + if (priv->tx_ring_num[TX]) { + tx->packets = 0; + tx->bytes = 0; + } + +out_unlock: + spin_unlock_bh(&priv->stats_lock); +} + +static const struct netdev_stat_ops mlx4_stat_ops = { + .get_queue_stats_rx = mlx4_get_queue_stats_rx, + .get_queue_stats_tx = mlx4_get_queue_stats_tx, + .get_base_stats = mlx4_get_base_stats, +}; + int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, struct mlx4_en_port_profile *prof) { @@ -3262,6 +3335,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]); netif_set_real_num_rx_queues(dev, priv->rx_ring_num); + dev->stat_ops = &mlx4_stat_ops; dev->ethtool_ops = &mlx4_en_ethtool_ops; /* diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 8328df8645d5..15c57e9517e9 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -82,8 +82,10 @@ static int mlx4_en_alloc_frags(struct mlx4_en_priv *priv, for (i = 0; i < priv->num_frags; i++, frags++) { if (!frags->page) { - if (mlx4_alloc_page(priv, frags, gfp)) + if (mlx4_alloc_page(priv, frags, gfp)) { + ring->alloc_fail++; return -ENOMEM; + } ring->rx_alloc_pages++; } rx_desc->data[i].addr = cpu_to_be64(frags->dma + diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 98688e4dbec5..febeadfdd5a5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -169,12 +169,6 @@ module_param_array(port_type_array, int, &arr_argc, 0444); MODULE_PARM_DESC(port_type_array, "Array of port types: HW_DEFAULT (0) is default " "1 for IB, 2 for Ethernet"); -struct mlx4_port_config { - struct list_head list; - enum mlx4_port_type port_type[MLX4_MAX_PORTS + 1]; - struct pci_dev *pdev; -}; - static atomic_t pf_loading = ATOMIC_INIT(0); static int mlx4_devlink_ierr_reset_get(struct devlink *devlink, u32 id, diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h index efe3f97b874f..28b70dcc652e 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h @@ -355,6 +355,7 @@ struct mlx4_en_rx_ring { unsigned long xdp_tx; unsigned long xdp_tx_full; unsigned long dropped; + unsigned long alloc_fail; int hwtstamp_rx_filter; cpumask_var_t affinity_mask; struct xdp_rxq_info xdp_rxq; @@ -379,6 +380,7 @@ struct mlx4_en_cq { #define MLX4_EN_OPCODE_ERROR 0x1e const struct cpumask *aff_mask; + int cq_idx; }; struct mlx4_en_port_profile { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index 76dc5a9b9648..1289475e7be7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -17,7 +17,7 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ fs_counters.o fs_ft_pool.o rl.o lag/debugfs.o lag/lag.o dev.o events.o wq.o lib/gid.o \ lib/devcom.o lib/pci_vsc.o lib/dm.o lib/fs_ttc.o diag/fs_tracepoint.o \ diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o diag/reporter_vnic.o \ - fw_reset.o qos.o lib/tout.o lib/aso.o + fw_reset.o qos.o lib/tout.o lib/aso.o wc.o # # Netdev basic diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index e85fb71bf0b4..5fd82c67b6ab 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -80,6 +80,7 @@ struct page_pool; SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) #define MLX5E_RX_MAX_HEAD (256) +#define MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE (8) #define MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE (9) #define MLX5E_SHAMPO_WQ_HEADER_PER_PAGE (PAGE_SIZE >> MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE) #define MLX5E_SHAMPO_WQ_BASE_HEAD_ENTRY_SIZE (64) @@ -146,25 +147,6 @@ struct page_pool; #define MLX5E_TX_XSK_POLL_BUDGET 64 #define MLX5E_SQ_RECOVER_MIN_INTERVAL 500 /* msecs */ -#define MLX5E_KLM_UMR_WQE_SZ(sgl_len)\ - (sizeof(struct mlx5e_umr_wqe) +\ - (sizeof(struct mlx5_klm) * (sgl_len))) - -#define MLX5E_KLM_UMR_WQEBBS(klm_entries) \ - (DIV_ROUND_UP(MLX5E_KLM_UMR_WQE_SZ(klm_entries), MLX5_SEND_WQE_BB)) - -#define MLX5E_KLM_UMR_DS_CNT(klm_entries)\ - (DIV_ROUND_UP(MLX5E_KLM_UMR_WQE_SZ(klm_entries), MLX5_SEND_WQE_DS)) - -#define MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size)\ - (((wqe_size) - sizeof(struct mlx5e_umr_wqe)) / sizeof(struct mlx5_klm)) - -#define MLX5E_KLM_ENTRIES_PER_WQE(wqe_size)\ - ALIGN_DOWN(MLX5E_KLM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT) - -#define MLX5E_MAX_KLM_PER_WQE(mdev) \ - MLX5E_KLM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * mlx5e_get_max_sq_aligned_wqebbs(mdev)) - #define mlx5e_state_dereference(priv, p) \ rcu_dereference_protected((p), lockdep_is_held(&(priv)->state_lock)) @@ -885,6 +867,8 @@ struct mlx5e_priv { /* priv data path fields - start */ struct mlx5e_selq selq; struct mlx5e_txqsq **txq2sq; + struct mlx5e_sq_stats **txq2sq_stats; + #ifdef CONFIG_MLX5_CORE_EN_DCB struct mlx5e_dcbx_dp dcbx_dp; #endif @@ -1014,7 +998,7 @@ void mlx5e_build_ptys2ethtool_map(void); bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift, enum mlx5e_mpwrq_umr_mode umr_mode); -void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close); +void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq); void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s); @@ -1207,7 +1191,7 @@ int mlx5e_set_per_queue_coalesce(struct net_device *dev, u32 queue, u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv); u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv); int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv, - struct ethtool_ts_info *info); + struct kernel_ethtool_ts_info *info); int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv, struct ethtool_flash *flash); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h index 4d6225e0eec7..1e8b7d330701 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h @@ -154,6 +154,19 @@ struct mlx5e_tc_table *mlx5e_fs_get_tc(struct mlx5e_flow_steering *fs); struct mlx5e_l2_table *mlx5e_fs_get_l2(struct mlx5e_flow_steering *fs); struct mlx5_flow_namespace *mlx5e_fs_get_ns(struct mlx5e_flow_steering *fs, bool egress); void mlx5e_fs_set_ns(struct mlx5e_flow_steering *fs, struct mlx5_flow_namespace *ns, bool egress); + +static inline bool mlx5e_fs_has_arfs(struct net_device *netdev) +{ + return IS_ENABLED(CONFIG_MLX5_EN_ARFS) && + netdev->hw_features & NETIF_F_NTUPLE; +} + +static inline bool mlx5e_fs_want_arfs(struct net_device *netdev) +{ + return IS_ENABLED(CONFIG_MLX5_EN_ARFS) && + netdev->features & NETIF_F_NTUPLE; +} + #ifdef CONFIG_MLX5_EN_RXNFC struct mlx5e_ethtool_steering *mlx5e_fs_get_ethtool(struct mlx5e_flow_steering *fs); #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index ec819dfc98be..6c9ccccca81e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -1071,18 +1071,18 @@ static u32 mlx5e_shampo_icosq_sz(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_rq_param *rq_param) { - int max_num_of_umr_per_wqe, max_hd_per_wqe, max_klm_per_umr, rest; + int max_num_of_umr_per_wqe, max_hd_per_wqe, max_ksm_per_umr, rest; void *wqc = MLX5_ADDR_OF(rqc, rq_param->rqc, wq); int wq_size = BIT(MLX5_GET(wq, wqc, log_wq_sz)); u32 wqebbs; - max_klm_per_umr = MLX5E_MAX_KLM_PER_WQE(mdev); + max_ksm_per_umr = MLX5E_MAX_KSM_PER_WQE(mdev); max_hd_per_wqe = mlx5e_shampo_hd_per_wqe(mdev, params, rq_param); - max_num_of_umr_per_wqe = max_hd_per_wqe / max_klm_per_umr; - rest = max_hd_per_wqe % max_klm_per_umr; - wqebbs = MLX5E_KLM_UMR_WQEBBS(max_klm_per_umr) * max_num_of_umr_per_wqe; + max_num_of_umr_per_wqe = max_hd_per_wqe / max_ksm_per_umr; + rest = max_hd_per_wqe % max_ksm_per_umr; + wqebbs = MLX5E_KSM_UMR_WQEBBS(max_ksm_per_umr) * max_num_of_umr_per_wqe; if (rest) - wqebbs += MLX5E_KLM_UMR_WQEBBS(rest); + wqebbs += MLX5E_KSM_UMR_WQEBBS(rest); wqebbs *= wq_size; return wqebbs; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c index 6743806b8480..f0744a45db92 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c @@ -170,6 +170,7 @@ int mlx5e_activate_qos_sq(void *data, u16 node_qid, u32 hw_id) mlx5e_tx_disable_queue(netdev_get_tx_queue(priv->netdev, qid)); priv->txq2sq[qid] = sq; + priv->txq2sq_stats[qid] = sq->stats; /* Make the change to txq2sq visible before the queue is started. * As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE, @@ -186,6 +187,7 @@ int mlx5e_activate_qos_sq(void *data, u16 node_qid, u32 hw_id) void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid) { struct mlx5e_txqsq *sq; + u16 txq_ix; sq = mlx5e_get_qos_sq(priv, qid); if (!sq) /* Handle the case when the SQ failed to open. */ @@ -194,7 +196,10 @@ void mlx5e_deactivate_qos_sq(struct mlx5e_priv *priv, u16 qid) qos_dbg(sq->mdev, "Deactivate QoS SQ qid %u\n", qid); mlx5e_deactivate_txqsq(sq); - priv->txq2sq[mlx5e_qid_from_qos(&priv->channels, qid)] = NULL; + txq_ix = mlx5e_qid_from_qos(&priv->channels, qid); + + priv->txq2sq[txq_ix] = NULL; + priv->txq2sq_stats[txq_ix] = NULL; /* Make the change to txq2sq visible before the queue is started again. * As mlx5e_xmit runs under a spinlock, there is an implicit ACQUIRE, @@ -325,6 +330,7 @@ void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c) { struct mlx5e_params *params = &c->priv->channels.params; struct mlx5e_txqsq __rcu **qos_sqs; + u16 txq_ix; int i; qos_sqs = mlx5e_state_dereference(c->priv, c->qos_sqs); @@ -342,8 +348,11 @@ void mlx5e_qos_deactivate_queues(struct mlx5e_channel *c) qos_dbg(c->mdev, "Deactivate QoS SQ qid %u\n", qid); mlx5e_deactivate_txqsq(sq); + txq_ix = mlx5e_qid_from_qos(&c->priv->channels, qid); + /* The queue is disabled, no synchronization with datapath is needed. */ - c->priv->txq2sq[mlx5e_qid_from_qos(&c->priv->channels, qid)] = NULL; + c->priv->txq2sq[txq_ix] = NULL; + c->priv->txq2sq_stats[txq_ix] = NULL; } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index fadfa8b50beb..8cf8ba2622f2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -69,6 +69,8 @@ struct mlx5_tc_ct_priv { struct rhashtable ct_tuples_nat_ht; struct mlx5_flow_table *ct; struct mlx5_flow_table *ct_nat; + struct mlx5_flow_group *ct_nat_miss_group; + struct mlx5_flow_handle *ct_nat_miss_rule; struct mlx5e_post_act *post_act; struct mutex control_lock; /* guards parallel adds/dels */ struct mapping_ctx *zone_mapping; @@ -141,6 +143,8 @@ struct mlx5_ct_counter { enum { MLX5_CT_ENTRY_FLAG_VALID, + MLX5_CT_ENTRY_IN_CT_TABLE, + MLX5_CT_ENTRY_IN_CT_NAT_TABLE, }; struct mlx5_ct_entry { @@ -198,9 +202,15 @@ static const struct rhashtable_params tuples_nat_ht_params = { }; static bool -mlx5_tc_ct_entry_has_nat(struct mlx5_ct_entry *entry) +mlx5_tc_ct_entry_in_ct_table(struct mlx5_ct_entry *entry) { - return !!(entry->tuple_nat_node.next); + return test_bit(MLX5_CT_ENTRY_IN_CT_TABLE, &entry->flags); +} + +static bool +mlx5_tc_ct_entry_in_ct_nat_table(struct mlx5_ct_entry *entry) +{ + return test_bit(MLX5_CT_ENTRY_IN_CT_NAT_TABLE, &entry->flags); } static int @@ -526,8 +536,10 @@ static void mlx5_tc_ct_entry_del_rules(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_entry *entry) { - mlx5_tc_ct_entry_del_rule(ct_priv, entry, true); - mlx5_tc_ct_entry_del_rule(ct_priv, entry, false); + if (mlx5_tc_ct_entry_in_ct_nat_table(entry)) + mlx5_tc_ct_entry_del_rule(ct_priv, entry, true); + if (mlx5_tc_ct_entry_in_ct_table(entry)) + mlx5_tc_ct_entry_del_rule(ct_priv, entry, false); atomic_dec(&ct_priv->debugfs.stats.offloaded); } @@ -814,7 +826,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, &zone_rule->mh, zone_restore_id, nat, - mlx5_tc_ct_entry_has_nat(entry)); + mlx5_tc_ct_entry_in_ct_nat_table(entry)); if (err) { ct_dbg("Failed to create ct entry mod hdr"); goto err_mod_hdr; @@ -888,7 +900,7 @@ mlx5_tc_ct_entry_replace_rule(struct mlx5_tc_ct_priv *ct_priv, *old_attr = *attr; err = mlx5_tc_ct_entry_create_mod_hdr(ct_priv, attr, flow_rule, &mh, zone_restore_id, - nat, mlx5_tc_ct_entry_has_nat(entry)); + nat, mlx5_tc_ct_entry_in_ct_nat_table(entry)); if (err) { ct_dbg("Failed to create ct entry mod hdr"); goto err_mod_hdr; @@ -957,11 +969,13 @@ static void mlx5_tc_ct_entry_remove_from_tuples(struct mlx5_ct_entry *entry) { struct mlx5_tc_ct_priv *ct_priv = entry->ct_priv; - rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht, - &entry->tuple_nat_node, - tuples_nat_ht_params); - rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node, - tuples_ht_params); + if (mlx5_tc_ct_entry_in_ct_nat_table(entry)) + rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht, + &entry->tuple_nat_node, + tuples_nat_ht_params); + if (mlx5_tc_ct_entry_in_ct_table(entry)) + rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node, + tuples_ht_params); } static void mlx5_tc_ct_entry_del(struct mlx5_ct_entry *entry) @@ -1100,21 +1114,26 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv, return err; } - err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, false, - zone_restore_id); - if (err) - goto err_orig; + if (mlx5_tc_ct_entry_in_ct_table(entry)) { + err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, false, + zone_restore_id); + if (err) + goto err_orig; + } - err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, true, - zone_restore_id); - if (err) - goto err_nat; + if (mlx5_tc_ct_entry_in_ct_nat_table(entry)) { + err = mlx5_tc_ct_entry_add_rule(ct_priv, flow_rule, entry, true, + zone_restore_id); + if (err) + goto err_nat; + } atomic_inc(&ct_priv->debugfs.stats.offloaded); return 0; err_nat: - mlx5_tc_ct_entry_del_rule(ct_priv, entry, false); + if (mlx5_tc_ct_entry_in_ct_table(entry)) + mlx5_tc_ct_entry_del_rule(ct_priv, entry, false); err_orig: mlx5_tc_ct_counter_put(ct_priv, entry); return err; @@ -1126,17 +1145,21 @@ mlx5_tc_ct_entry_replace_rules(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_entry *entry, u8 zone_restore_id) { - int err; + int err = 0; - err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, false, - zone_restore_id); - if (err) - return err; + if (mlx5_tc_ct_entry_in_ct_table(entry)) { + err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, false, + zone_restore_id); + if (err) + return err; + } - err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, true, - zone_restore_id); - if (err) - mlx5_tc_ct_entry_del_rule(ct_priv, entry, false); + if (mlx5_tc_ct_entry_in_ct_nat_table(entry)) { + err = mlx5_tc_ct_entry_replace_rule(ct_priv, flow_rule, entry, true, + zone_restore_id); + if (err && mlx5_tc_ct_entry_in_ct_table(entry)) + mlx5_tc_ct_entry_del_rule(ct_priv, entry, false); + } return err; } @@ -1224,18 +1247,24 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft, if (err) goto err_entries; - err = rhashtable_lookup_insert_fast(&ct_priv->ct_tuples_ht, - &entry->tuple_node, - tuples_ht_params); - if (err) - goto err_tuple; - if (memcmp(&entry->tuple, &entry->tuple_nat, sizeof(entry->tuple))) { err = rhashtable_lookup_insert_fast(&ct_priv->ct_tuples_nat_ht, &entry->tuple_nat_node, tuples_nat_ht_params); if (err) goto err_tuple_nat; + + set_bit(MLX5_CT_ENTRY_IN_CT_NAT_TABLE, &entry->flags); + } + + if (!mlx5_tc_ct_entry_in_ct_nat_table(entry)) { + err = rhashtable_lookup_insert_fast(&ct_priv->ct_tuples_ht, + &entry->tuple_node, + tuples_ht_params); + if (err) + goto err_tuple; + + set_bit(MLX5_CT_ENTRY_IN_CT_TABLE, &entry->flags); } spin_unlock_bh(&ct_priv->ht_lock); @@ -1251,17 +1280,10 @@ mlx5_tc_ct_block_flow_offload_add(struct mlx5_ct_ft *ft, err_rules: spin_lock_bh(&ct_priv->ht_lock); - if (mlx5_tc_ct_entry_has_nat(entry)) - rhashtable_remove_fast(&ct_priv->ct_tuples_nat_ht, - &entry->tuple_nat_node, tuples_nat_ht_params); -err_tuple_nat: - rhashtable_remove_fast(&ct_priv->ct_tuples_ht, - &entry->tuple_node, - tuples_ht_params); err_tuple: - rhashtable_remove_fast(&ft->ct_entries_ht, - &entry->node, - cts_ht_params); + mlx5_tc_ct_entry_remove_from_tuples(entry); +err_tuple_nat: + rhashtable_remove_fast(&ft->ct_entries_ht, &entry->node, cts_ht_params); err_entries: spin_unlock_bh(&ct_priv->ht_lock); err_set: @@ -2149,6 +2171,76 @@ mlx5_ct_tc_remove_dbgfs(struct mlx5_tc_ct_priv *ct_priv) debugfs_remove_recursive(ct_priv->debugfs.root); } +static struct mlx5_flow_handle * +tc_ct_add_miss_rule(struct mlx5_flow_table *ft, + struct mlx5_flow_table *next_ft) +{ + struct mlx5_flow_destination dest = {}; + struct mlx5_flow_act act = {}; + + act.flags = FLOW_ACT_IGNORE_FLOW_LEVEL | FLOW_ACT_NO_APPEND; + act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + dest.ft = next_ft; + + return mlx5_add_flow_rules(ft, NULL, &act, &dest, 1); +} + +static int +tc_ct_add_ct_table_miss_rule(struct mlx5_flow_table *from, + struct mlx5_flow_table *to, + struct mlx5_flow_group **miss_group, + struct mlx5_flow_handle **miss_rule) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_flow_group *group; + struct mlx5_flow_handle *rule; + unsigned int max_fte = from->max_fte; + u32 *flow_group_in; + int err = 0; + + flow_group_in = kvzalloc(inlen, GFP_KERNEL); + if (!flow_group_in) + return -ENOMEM; + + /* create miss group */ + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, + max_fte - 2); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, + max_fte - 1); + group = mlx5_create_flow_group(from, flow_group_in); + if (IS_ERR(group)) { + err = PTR_ERR(group); + goto err_miss_grp; + } + + /* add miss rule to next fdb */ + rule = tc_ct_add_miss_rule(from, to); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + goto err_miss_rule; + } + + *miss_group = group; + *miss_rule = rule; + kvfree(flow_group_in); + return 0; + +err_miss_rule: + mlx5_destroy_flow_group(group); +err_miss_grp: + kvfree(flow_group_in); + return err; +} + +static void +tc_ct_del_ct_table_miss_rule(struct mlx5_flow_group *miss_group, + struct mlx5_flow_handle *miss_rule) +{ + mlx5_del_flow_rules(miss_rule); + mlx5_destroy_flow_group(miss_group); +} + #define INIT_ERR_PREFIX "tc ct offload init failed" struct mlx5_tc_ct_priv * @@ -2212,6 +2304,12 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains, goto err_ct_nat_tbl; } + err = tc_ct_add_ct_table_miss_rule(ct_priv->ct_nat, ct_priv->ct, + &ct_priv->ct_nat_miss_group, + &ct_priv->ct_nat_miss_rule); + if (err) + goto err_ct_zone_ht; + ct_priv->post_act = post_act; mutex_init(&ct_priv->control_lock); if (rhashtable_init(&ct_priv->zone_ht, &zone_params)) @@ -2273,6 +2371,7 @@ mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv) ct_priv->fs_ops->destroy(ct_priv->fs); kfree(ct_priv->fs); + tc_ct_del_ct_table_miss_rule(ct_priv->ct_nat_miss_group, ct_priv->ct_nat_miss_rule); mlx5_chains_destroy_global_table(chains, ct_priv->ct_nat); mlx5_chains_destroy_global_table(chains, ct_priv->ct); mapping_destroy(ct_priv->zone_mapping); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index 8dfb57f712b0..721f35e59757 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c @@ -850,6 +850,12 @@ int mlx5e_tc_tun_parse(struct net_device *filter_dev, flow_rule_match_enc_control(rule, &match); addr_type = match.key->addr_type; + if (flow_rule_has_enc_control_flags(match.mask->flags, + extack)) { + err = -EOPNOTSUPP; + goto out; + } + /* For tunnel addr_type used same key id`s as for non-tunnel */ if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { struct flow_match_ipv4_addrs match; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index 879d698b6119..5ec468268d1a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -6,6 +6,8 @@ #include "en.h" #include +#include +#include #define MLX5E_TX_WQE_EMPTY_DS_COUNT (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) @@ -34,6 +36,25 @@ #define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND) +#define MLX5E_KSM_UMR_WQE_SZ(sgl_len)\ + (sizeof(struct mlx5e_umr_wqe) +\ + (sizeof(struct mlx5_ksm) * (sgl_len))) + +#define MLX5E_KSM_UMR_WQEBBS(ksm_entries) \ + (DIV_ROUND_UP(MLX5E_KSM_UMR_WQE_SZ(ksm_entries), MLX5_SEND_WQE_BB)) + +#define MLX5E_KSM_UMR_DS_CNT(ksm_entries)\ + (DIV_ROUND_UP(MLX5E_KSM_UMR_WQE_SZ(ksm_entries), MLX5_SEND_WQE_DS)) + +#define MLX5E_KSM_MAX_ENTRIES_PER_WQE(wqe_size)\ + (((wqe_size) - sizeof(struct mlx5e_umr_wqe)) / sizeof(struct mlx5_ksm)) + +#define MLX5E_KSM_ENTRIES_PER_WQE(wqe_size)\ + ALIGN_DOWN(MLX5E_KSM_MAX_ENTRIES_PER_WQE(wqe_size), MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT) + +#define MLX5E_MAX_KSM_PER_WQE(mdev) \ + MLX5E_KSM_ENTRIES_PER_WQE(MLX5_SEND_WQE_BB * mlx5e_get_max_sq_aligned_wqebbs(mdev)) + static inline ktime_t mlx5e_cqe_ts_to_ns(cqe_ts_to_ns func, struct mlx5_clock *clock, u64 cqe_ts) { @@ -460,6 +481,41 @@ mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, } } +static inline void +mlx5e_swp_encap_csum_partial(struct mlx5_core_dev *mdev, struct sk_buff *skb, bool tunnel) +{ + const struct iphdr *ip = tunnel ? inner_ip_hdr(skb) : ip_hdr(skb); + const struct ipv6hdr *ip6; + struct tcphdr *th; + struct udphdr *uh; + int len; + + if (!MLX5_CAP_ETH(mdev, swp_csum_l4_partial) || !skb_is_gso(skb)) + return; + + if (skb_is_gso_tcp(skb)) { + th = inner_tcp_hdr(skb); + len = skb_shinfo(skb)->gso_size + inner_tcp_hdrlen(skb); + + if (ip->version == 4) { + th->check = ~tcp_v4_check(len, ip->saddr, ip->daddr, 0); + } else { + ip6 = tunnel ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); + th->check = ~tcp_v6_check(len, &ip6->saddr, &ip6->daddr, 0); + } + } else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { + uh = (struct udphdr *)skb_inner_transport_header(skb); + len = skb_shinfo(skb)->gso_size + sizeof(struct udphdr); + + if (ip->version == 4) { + uh->check = ~udp_v4_check(len, ip->saddr, ip->daddr, 0); + } else { + ip6 = tunnel ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); + uh->check = ~udp_v6_check(len, &ip6->saddr, &ip6->daddr, 0); + } + } +} + #define MLX5E_STOP_ROOM(wqebbs) ((wqebbs) * 2 - 1) static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_size) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h index 359050f0b54d..3cc640669247 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h @@ -116,6 +116,7 @@ static inline bool mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg) { + struct mlx5_core_dev *mdev = sq->mdev; u8 inner_ipproto; if (!mlx5e_ipsec_eseg_meta(eseg)) @@ -125,9 +126,12 @@ mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, inner_ipproto = xfrm_offload(skb)->inner_ipproto; if (inner_ipproto) { eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM; - if (inner_ipproto == IPPROTO_TCP || inner_ipproto == IPPROTO_UDP) + if (inner_ipproto == IPPROTO_TCP || inner_ipproto == IPPROTO_UDP) { + mlx5e_swp_encap_csum_partial(mdev, skb, true); eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM; + } } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { + mlx5e_swp_encap_csum_partial(mdev, skb, false); eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM; sq->stats->csum_partial_inner++; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 3320f12ba2db..279dcb54af14 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -525,7 +525,7 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, opened = test_bit(MLX5E_STATE_OPENED, &priv->state); - arfs_enabled = opened && (priv->netdev->features & NETIF_F_NTUPLE); + arfs_enabled = opened && mlx5e_fs_want_arfs(priv->netdev); if (arfs_enabled) mlx5e_arfs_disable(priv->fs); @@ -1658,7 +1658,7 @@ static int mlx5e_set_pauseparam(struct net_device *netdev, } int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct mlx5_core_dev *mdev = priv->mdev; @@ -1682,7 +1682,7 @@ int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv, } static int mlx5e_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct mlx5e_priv *priv = netdev_priv(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 8c5b291a171f..05058710d2c7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -1307,8 +1307,7 @@ int mlx5e_create_flow_steering(struct mlx5e_flow_steering *fs, return -EOPNOTSUPP; mlx5e_fs_set_ns(fs, ns, false); - err = mlx5e_arfs_create_tables(fs, rx_res, - !!(netdev->hw_features & NETIF_F_NTUPLE)); + err = mlx5e_arfs_create_tables(fs, rx_res, mlx5e_fs_has_arfs(netdev)); if (err) { fs_err(fs, "Failed to create arfs tables, err=%d\n", err); netdev->hw_features &= ~NETIF_F_NTUPLE; @@ -1355,7 +1354,7 @@ err_destroy_ttc_table: err_destroy_inner_ttc_table: mlx5e_destroy_inner_ttc_table(fs); err_destroy_arfs_tables: - mlx5e_arfs_destroy_tables(fs, !!(netdev->hw_features & NETIF_F_NTUPLE)); + mlx5e_arfs_destroy_tables(fs, mlx5e_fs_has_arfs(netdev)); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index eedbcba22689..6f686fabed44 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include #include @@ -74,6 +75,27 @@ #include "lib/devcom.h" #include "lib/sd.h" +static bool mlx5e_hw_gro_supported(struct mlx5_core_dev *mdev) +{ + if (!MLX5_CAP_GEN(mdev, shampo)) + return false; + + /* Our HW-GRO implementation relies on "KSM Mkey" for + * SHAMPO headers buffer mapping + */ + if (!MLX5_CAP_GEN(mdev, fixed_buffer_size)) + return false; + + if (!MLX5_CAP_GEN_2(mdev, min_mkey_log_entity_size_fixed_buffer_valid)) + return false; + + if (MLX5_CAP_GEN_2(mdev, min_mkey_log_entity_size_fixed_buffer) > + MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE) + return false; + + return true; +} + bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev, u8 page_shift, enum mlx5e_mpwrq_umr_mode umr_mode) { @@ -504,8 +526,8 @@ static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev, return err; } -static int mlx5e_create_umr_klm_mkey(struct mlx5_core_dev *mdev, - u64 nentries, +static int mlx5e_create_umr_ksm_mkey(struct mlx5_core_dev *mdev, + u64 nentries, u8 log_entry_size, u32 *umr_mkey) { int inlen; @@ -525,12 +547,13 @@ static int mlx5e_create_umr_klm_mkey(struct mlx5_core_dev *mdev, MLX5_SET(mkc, mkc, umr_en, 1); MLX5_SET(mkc, mkc, lw, 1); MLX5_SET(mkc, mkc, lr, 1); - MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KLMS); + MLX5_SET(mkc, mkc, access_mode_1_0, MLX5_MKC_ACCESS_MODE_KSM); mlx5e_mkey_set_relaxed_ordering(mdev, mkc); MLX5_SET(mkc, mkc, qpn, 0xffffff); MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.hw_objs.pdn); MLX5_SET(mkc, mkc, translations_octword_size, nentries); - MLX5_SET(mkc, mkc, length64, 1); + MLX5_SET(mkc, mkc, log_page_size, log_entry_size); + MLX5_SET64(mkc, mkc, len, nentries << log_entry_size); err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen); kvfree(in); @@ -565,14 +588,16 @@ static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq static int mlx5e_create_rq_hd_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq) { - u32 max_klm_size = BIT(MLX5_CAP_GEN(mdev, log_max_klm_list_size)); + u32 max_ksm_size = BIT(MLX5_CAP_GEN(mdev, log_max_klm_list_size)); - if (max_klm_size < rq->mpwqe.shampo->hd_per_wq) { - mlx5_core_err(mdev, "max klm list size 0x%x is smaller than shampo header buffer list size 0x%x\n", - max_klm_size, rq->mpwqe.shampo->hd_per_wq); + if (max_ksm_size < rq->mpwqe.shampo->hd_per_wq) { + mlx5_core_err(mdev, "max ksm list size 0x%x is smaller than shampo header buffer list size 0x%x\n", + max_ksm_size, rq->mpwqe.shampo->hd_per_wq); return -EINVAL; } - return mlx5e_create_umr_klm_mkey(mdev, rq->mpwqe.shampo->hd_per_wq, + + return mlx5e_create_umr_ksm_mkey(mdev, rq->mpwqe.shampo->hd_per_wq, + MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE, &rq->mpwqe.shampo->mkey); } @@ -1208,15 +1233,6 @@ void mlx5e_free_rx_missing_descs(struct mlx5e_rq *rq) head = mlx5_wq_ll_get_wqe_next_ix(wq, head); } - if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) { - u16 len; - - len = (rq->mpwqe.shampo->pi - rq->mpwqe.shampo->ci) & - (rq->mpwqe.shampo->hd_per_wq - 1); - mlx5e_shampo_dealloc_hd(rq, len, rq->mpwqe.shampo->ci, false); - rq->mpwqe.shampo->pi = rq->mpwqe.shampo->ci; - } - rq->mpwqe.actual_wq_head = wq->head; rq->mpwqe.umr_in_progress = 0; rq->mpwqe.umr_completed = 0; @@ -1244,8 +1260,7 @@ void mlx5e_free_rx_descs(struct mlx5e_rq *rq) } if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) - mlx5e_shampo_dealloc_hd(rq, rq->mpwqe.shampo->hd_per_wq, - 0, true); + mlx5e_shampo_dealloc_hd(rq); } else { struct mlx5_wq_cyc *wq = &rq->wqe.wq; u16 missing = mlx5_wq_cyc_missing(wq); @@ -3111,6 +3126,7 @@ static void mlx5e_build_txq_maps(struct mlx5e_priv *priv) struct mlx5e_txqsq *sq = &c->sq[tc]; priv->txq2sq[sq->txq_ix] = sq; + priv->txq2sq_stats[sq->txq_ix] = sq->stats; } } @@ -3125,6 +3141,7 @@ static void mlx5e_build_txq_maps(struct mlx5e_priv *priv) struct mlx5e_txqsq *sq = &c->ptpsq[tc].txqsq; priv->txq2sq[sq->txq_ix] = sq; + priv->txq2sq_stats[sq->txq_ix] = sq->stats; } out: @@ -4259,13 +4276,19 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features) #define MLX5E_HANDLE_FEATURE(feature, handler) \ mlx5e_handle_feature(netdev, &oper_features, feature, handler) - err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro); - err |= MLX5E_HANDLE_FEATURE(NETIF_F_GRO_HW, set_feature_hw_gro); + if (features & (NETIF_F_GRO_HW | NETIF_F_LRO)) { + err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs); + err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro); + err |= MLX5E_HANDLE_FEATURE(NETIF_F_GRO_HW, set_feature_hw_gro); + } else { + err |= MLX5E_HANDLE_FEATURE(NETIF_F_LRO, set_feature_lro); + err |= MLX5E_HANDLE_FEATURE(NETIF_F_GRO_HW, set_feature_hw_gro); + err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs); + } err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_FILTER, set_feature_cvlan_filter); err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_TC, set_feature_hw_tc); err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXALL, set_feature_rx_all); - err |= MLX5E_HANDLE_FEATURE(NETIF_F_RXFCS, set_feature_rx_fcs); err |= MLX5E_HANDLE_FEATURE(NETIF_F_HW_VLAN_CTAG_RX, set_feature_rx_vlan); #ifdef CONFIG_MLX5_EN_ARFS err |= MLX5E_HANDLE_FEATURE(NETIF_F_NTUPLE, set_feature_arfs); @@ -4890,7 +4913,7 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv, } out: - /* Disable CSUM and GSO if the udp dport is not offloaded by HW */ + /* Disable CSUM and GSO if skb cannot be offloaded by HW */ return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); } @@ -5276,6 +5299,136 @@ static bool mlx5e_tunnel_any_tx_proto_supported(struct mlx5_core_dev *mdev) return (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)); } +static void mlx5e_get_queue_stats_rx(struct net_device *dev, int i, + struct netdev_queue_stats_rx *stats) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5e_channel_stats *channel_stats; + struct mlx5e_rq_stats *xskrq_stats; + struct mlx5e_rq_stats *rq_stats; + + ASSERT_RTNL(); + if (mlx5e_is_uplink_rep(priv)) + return; + + channel_stats = priv->channel_stats[i]; + xskrq_stats = &channel_stats->xskrq; + rq_stats = &channel_stats->rq; + + stats->packets = rq_stats->packets + xskrq_stats->packets; + stats->bytes = rq_stats->bytes + xskrq_stats->bytes; + stats->alloc_fail = rq_stats->buff_alloc_err + + xskrq_stats->buff_alloc_err; +} + +static void mlx5e_get_queue_stats_tx(struct net_device *dev, int i, + struct netdev_queue_stats_tx *stats) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5e_sq_stats *sq_stats; + + ASSERT_RTNL(); + /* no special case needed for ptp htb etc since txq2sq_stats is kept up + * to date for active sq_stats, otherwise get_base_stats takes care of + * inactive sqs. + */ + sq_stats = priv->txq2sq_stats[i]; + stats->packets = sq_stats->packets; + stats->bytes = sq_stats->bytes; +} + +static void mlx5e_get_base_stats(struct net_device *dev, + struct netdev_queue_stats_rx *rx, + struct netdev_queue_stats_tx *tx) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5e_ptp *ptp_channel; + int i, tc; + + ASSERT_RTNL(); + if (!mlx5e_is_uplink_rep(priv)) { + rx->packets = 0; + rx->bytes = 0; + rx->alloc_fail = 0; + + for (i = priv->channels.params.num_channels; i < priv->stats_nch; i++) { + struct netdev_queue_stats_rx rx_i = {0}; + + mlx5e_get_queue_stats_rx(dev, i, &rx_i); + + rx->packets += rx_i.packets; + rx->bytes += rx_i.bytes; + rx->alloc_fail += rx_i.alloc_fail; + } + + /* always report PTP RX stats from base as there is no + * corresponding channel to report them under in + * mlx5e_get_queue_stats_rx. + */ + if (priv->rx_ptp_opened) { + struct mlx5e_rq_stats *rq_stats = &priv->ptp_stats.rq; + + rx->packets += rq_stats->packets; + rx->bytes += rq_stats->bytes; + } + } + + tx->packets = 0; + tx->bytes = 0; + + for (i = 0; i < priv->stats_nch; i++) { + struct mlx5e_channel_stats *channel_stats = priv->channel_stats[i]; + + /* handle two cases: + * + * 1. channels which are active. In this case, + * report only deactivated TCs on these channels. + * + * 2. channels which were deactivated + * (i > priv->channels.params.num_channels) + * must have all of their TCs [0 .. priv->max_opened_tc) + * examined because deactivated channels will not be in the + * range of [0..real_num_tx_queues) and will not have their + * stats reported by mlx5e_get_queue_stats_tx. + */ + if (i < priv->channels.params.num_channels) + tc = mlx5e_get_dcb_num_tc(&priv->channels.params); + else + tc = 0; + + for (; tc < priv->max_opened_tc; tc++) { + struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[tc]; + + tx->packets += sq_stats->packets; + tx->bytes += sq_stats->bytes; + } + } + + /* if PTP TX was opened at some point and has since either: + * - been shutdown and set to NULL, or + * - simply disabled (bit unset) + * + * report stats directly from the ptp_stats structures as these queues + * are now unavailable and there is no txq index to retrieve these + * stats via calls to mlx5e_get_queue_stats_tx. + */ + ptp_channel = priv->channels.ptp; + if (priv->tx_ptp_opened && (!ptp_channel || !test_bit(MLX5E_PTP_STATE_TX, ptp_channel->state))) { + for (tc = 0; tc < priv->max_opened_tc; tc++) { + struct mlx5e_sq_stats *sq_stats = &priv->ptp_stats.sq[tc]; + + tx->packets += sq_stats->packets; + tx->bytes += sq_stats->bytes; + } + } +} + +static const struct netdev_stat_ops mlx5e_stat_ops = { + .get_queue_stats_rx = mlx5e_get_queue_stats_rx, + .get_queue_stats_tx = mlx5e_get_queue_stats_tx, + .get_base_stats = mlx5e_get_base_stats, +}; + static void mlx5e_build_nic_netdev(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -5293,6 +5446,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->watchdog_timeo = 15 * HZ; + netdev->stat_ops = &mlx5e_stat_ops; netdev->ethtool_ops = &mlx5e_ethtool_ops; netdev->vlan_features |= NETIF_F_SG; @@ -5331,6 +5485,11 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; netdev->hw_features |= NETIF_F_HW_VLAN_STAG_TX; + if (mlx5e_hw_gro_supported(mdev) && + mlx5e_check_fragmented_striding_rq_cap(mdev, PAGE_SHIFT, + MLX5E_MPWRQ_UMR_MODE_ALIGNED)) + netdev->hw_features |= NETIF_F_GRO_HW; + if (mlx5e_tunnel_any_tx_proto_supported(mdev)) { netdev->hw_enc_features |= NETIF_F_HW_CSUM; netdev->hw_enc_features |= NETIF_F_TSO; @@ -5397,8 +5556,10 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) #if IS_ENABLED(CONFIG_MLX5_CLS_ACT) netdev->hw_features |= NETIF_F_HW_TC; #endif -#ifdef CONFIG_MLX5_EN_ARFS +#if IS_ENABLED(CONFIG_MLX5_EN_ARFS) netdev->hw_features |= NETIF_F_NTUPLE; +#elif IS_ENABLED(CONFIG_MLX5_EN_RXNFC) + netdev->features |= NETIF_F_NTUPLE; #endif } @@ -5572,7 +5733,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) err_tc_nic_cleanup: mlx5e_tc_nic_cleanup(priv); err_destroy_flow_steering: - mlx5e_destroy_flow_steering(priv->fs, !!(priv->netdev->hw_features & NETIF_F_NTUPLE), + mlx5e_destroy_flow_steering(priv->fs, mlx5e_fs_has_arfs(priv->netdev), priv->profile); err_destroy_rx_res: mlx5e_rx_res_destroy(priv->rx_res); @@ -5588,7 +5749,7 @@ static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv) { mlx5e_accel_cleanup_rx(priv); mlx5e_tc_nic_cleanup(priv); - mlx5e_destroy_flow_steering(priv->fs, !!(priv->netdev->hw_features & NETIF_F_NTUPLE), + mlx5e_destroy_flow_steering(priv->fs, mlx5e_fs_has_arfs(priv->netdev), priv->profile); mlx5e_rx_res_destroy(priv->rx_res); priv->rx_res = NULL; @@ -5823,9 +5984,13 @@ int mlx5e_priv_init(struct mlx5e_priv *priv, if (!priv->txq2sq) goto err_destroy_workqueue; + priv->txq2sq_stats = kcalloc_node(num_txqs, sizeof(*priv->txq2sq_stats), GFP_KERNEL, node); + if (!priv->txq2sq_stats) + goto err_free_txq2sq; + priv->tx_rates = kcalloc_node(num_txqs, sizeof(*priv->tx_rates), GFP_KERNEL, node); if (!priv->tx_rates) - goto err_free_txq2sq; + goto err_free_txq2sq_stats; priv->channel_stats = kcalloc_node(nch, sizeof(*priv->channel_stats), GFP_KERNEL, node); @@ -5836,6 +6001,8 @@ int mlx5e_priv_init(struct mlx5e_priv *priv, err_free_tx_rates: kfree(priv->tx_rates); +err_free_txq2sq_stats: + kfree(priv->txq2sq_stats); err_free_txq2sq: kfree(priv->txq2sq); err_destroy_workqueue: @@ -5859,6 +6026,7 @@ void mlx5e_priv_cleanup(struct mlx5e_priv *priv) kvfree(priv->channel_stats[i]); kfree(priv->channel_stats); kfree(priv->tx_rates); + kfree(priv->txq2sq_stats); kfree(priv->txq2sq); destroy_workqueue(priv->wq); mlx5e_selq_cleanup(&priv->selq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index b5333da20e8a..225da8d691fc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -523,15 +523,23 @@ mlx5e_add_skb_shared_info_frag(struct mlx5e_rq *rq, struct skb_shared_info *sinf static inline void mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb, - struct page *page, u32 frag_offset, u32 len, + struct mlx5e_frag_page *frag_page, + u32 frag_offset, u32 len, unsigned int truesize) { - dma_addr_t addr = page_pool_get_dma_addr(page); + dma_addr_t addr = page_pool_get_dma_addr(frag_page->page); + u8 next_frag = skb_shinfo(skb)->nr_frags; dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len, rq->buff.map_dir); - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, - page, frag_offset, len, truesize); + + if (skb_can_coalesce(skb, next_frag, frag_page->page, frag_offset)) { + skb_coalesce_rx_frag(skb, next_frag - 1, len, truesize); + } else { + frag_page->frags++; + skb_add_rx_frag(skb, next_frag, frag_page->page, + frag_offset, len, truesize); + } } static inline void @@ -619,25 +627,25 @@ static int bitmap_find_window(unsigned long *bitmap, int len, return min(len, count); } -static void build_klm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe, - __be32 key, u16 offset, u16 klm_len, u16 wqe_bbs) +static void build_ksm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe, + __be32 key, u16 offset, u16 ksm_len) { - memset(umr_wqe, 0, offsetof(struct mlx5e_umr_wqe, inline_klms)); + memset(umr_wqe, 0, offsetof(struct mlx5e_umr_wqe, inline_ksms)); umr_wqe->ctrl.opmod_idx_opcode = cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | MLX5_OPCODE_UMR); umr_wqe->ctrl.umr_mkey = key; umr_wqe->ctrl.qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) - | MLX5E_KLM_UMR_DS_CNT(klm_len)); + | MLX5E_KSM_UMR_DS_CNT(ksm_len)); umr_wqe->uctrl.flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE; umr_wqe->uctrl.xlt_offset = cpu_to_be16(offset); - umr_wqe->uctrl.xlt_octowords = cpu_to_be16(klm_len); + umr_wqe->uctrl.xlt_octowords = cpu_to_be16(ksm_len); umr_wqe->uctrl.mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE); } static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, struct mlx5e_icosq *sq, - u16 klm_entries, u16 index) + u16 ksm_entries, u16 index) { struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; u16 entries, pi, header_offset, err, wqe_bbs, new_entries; @@ -650,20 +658,20 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, int headroom, i; headroom = rq->buff.headroom; - new_entries = klm_entries - (shampo->pi & (MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT - 1)); - entries = ALIGN(klm_entries, MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT); - wqe_bbs = MLX5E_KLM_UMR_WQEBBS(entries); + new_entries = ksm_entries - (shampo->pi & (MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT - 1)); + entries = ALIGN(ksm_entries, MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT); + wqe_bbs = MLX5E_KSM_UMR_WQEBBS(entries); pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs); umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi); - build_klm_umr(sq, umr_wqe, shampo->key, index, entries, wqe_bbs); + build_ksm_umr(sq, umr_wqe, shampo->key, index, entries); frag_page = &shampo->pages[page_index]; for (i = 0; i < entries; i++, index++) { dma_info = &shampo->info[index]; - if (i >= klm_entries || (index < shampo->pi && shampo->pi - index < - MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT)) - goto update_klm; + if (i >= ksm_entries || (index < shampo->pi && shampo->pi - index < + MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT)) + goto update_ksm; header_offset = (index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) << MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE; if (!(header_offset & (PAGE_SIZE - 1))) { @@ -683,12 +691,11 @@ static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq, dma_info->frag_page = frag_page; } -update_klm: - umr_wqe->inline_klms[i].bcount = - cpu_to_be32(MLX5E_RX_MAX_HEAD); - umr_wqe->inline_klms[i].key = cpu_to_be32(lkey); - umr_wqe->inline_klms[i].va = - cpu_to_be64(dma_info->addr + headroom); +update_ksm: + umr_wqe->inline_ksms[i] = (struct mlx5_ksm) { + .key = cpu_to_be32(lkey), + .va = cpu_to_be64(dma_info->addr + headroom), + }; } sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) { @@ -720,37 +727,37 @@ err_unmap: static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq) { struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; - u16 klm_entries, num_wqe, index, entries_before; + u16 ksm_entries, num_wqe, index, entries_before; struct mlx5e_icosq *sq = rq->icosq; - int i, err, max_klm_entries, len; + int i, err, max_ksm_entries, len; - max_klm_entries = MLX5E_MAX_KLM_PER_WQE(rq->mdev); - klm_entries = bitmap_find_window(shampo->bitmap, + max_ksm_entries = MLX5E_MAX_KSM_PER_WQE(rq->mdev); + ksm_entries = bitmap_find_window(shampo->bitmap, shampo->hd_per_wqe, shampo->hd_per_wq, shampo->pi); - if (!klm_entries) + if (!ksm_entries) return 0; - klm_entries += (shampo->pi & (MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT - 1)); - index = ALIGN_DOWN(shampo->pi, MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT); + ksm_entries += (shampo->pi & (MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT - 1)); + index = ALIGN_DOWN(shampo->pi, MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT); entries_before = shampo->hd_per_wq - index; - if (unlikely(entries_before < klm_entries)) - num_wqe = DIV_ROUND_UP(entries_before, max_klm_entries) + - DIV_ROUND_UP(klm_entries - entries_before, max_klm_entries); + if (unlikely(entries_before < ksm_entries)) + num_wqe = DIV_ROUND_UP(entries_before, max_ksm_entries) + + DIV_ROUND_UP(ksm_entries - entries_before, max_ksm_entries); else - num_wqe = DIV_ROUND_UP(klm_entries, max_klm_entries); + num_wqe = DIV_ROUND_UP(ksm_entries, max_ksm_entries); for (i = 0; i < num_wqe; i++) { - len = (klm_entries > max_klm_entries) ? max_klm_entries : - klm_entries; + len = (ksm_entries > max_ksm_entries) ? max_ksm_entries : + ksm_entries; if (unlikely(index + len > shampo->hd_per_wq)) len = shampo->hd_per_wq - index; err = mlx5e_build_shampo_hd_umr(rq, sq, len, index); if (unlikely(err)) return err; index = (index + len) & (rq->mpwqe.shampo->hd_per_wq - 1); - klm_entries -= len; + ksm_entries -= len; } return 0; @@ -839,44 +846,28 @@ err: return err; } -/* This function is responsible to dealloc SHAMPO header buffer. - * close == true specifies that we are in the middle of closing RQ operation so - * we go over all the entries and if they are not in use we free them, - * otherwise we only go over a specific range inside the header buffer that are - * not in use. - */ -void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq, u16 len, u16 start, bool close) +static void +mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index) { struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; - struct mlx5e_frag_page *deleted_page = NULL; - int hd_per_wq = shampo->hd_per_wq; - struct mlx5e_dma_info *hd_info; - int i, index = start; + u64 addr = shampo->info[header_index].addr; - for (i = 0; i < len; i++, index++) { - if (index == hd_per_wq) - index = 0; + if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) { + struct mlx5e_dma_info *dma_info = &shampo->info[header_index]; - if (close && !test_bit(index, shampo->bitmap)) - continue; - - hd_info = &shampo->info[index]; - hd_info->addr = ALIGN_DOWN(hd_info->addr, PAGE_SIZE); - if (hd_info->frag_page && hd_info->frag_page != deleted_page) { - deleted_page = hd_info->frag_page; - mlx5e_page_release_fragmented(rq, hd_info->frag_page); - } - - hd_info->frag_page = NULL; + dma_info->addr = ALIGN_DOWN(addr, PAGE_SIZE); + mlx5e_page_release_fragmented(rq, dma_info->frag_page); } + clear_bit(header_index, shampo->bitmap); +} - if (start + len > hd_per_wq) { - len -= hd_per_wq - start; - bitmap_clear(shampo->bitmap, start, hd_per_wq - start); - start = 0; - } +void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq) +{ + struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; + int i; - bitmap_clear(shampo->bitmap, start, len); + for_each_set_bit(i, shampo->bitmap, rq->mpwqe.shampo->hd_per_wq) + mlx5e_free_rx_shampo_hd_entry(rq, i); } static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix) @@ -1191,9 +1182,8 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, check = csum_partial(tcp, tcp->doff * 4, csum_unfold((__force __sum16)cqe->check_sum)); /* Almost done, don't forget the pseudo header */ - tcp->check = csum_tcpudp_magic(ipv4->saddr, ipv4->daddr, - tot_len - sizeof(struct iphdr), - IPPROTO_TCP, check); + tcp->check = tcp_v4_check(tot_len - sizeof(struct iphdr), + ipv4->saddr, ipv4->daddr, check); } else { u16 payload_len = tot_len - sizeof(struct ipv6hdr); struct ipv6hdr *ipv6 = ip_p; @@ -1208,8 +1198,8 @@ static void mlx5e_lro_update_hdr(struct sk_buff *skb, struct mlx5_cqe64 *cqe, check = csum_partial(tcp, tcp->doff * 4, csum_unfold((__force __sum16)cqe->check_sum)); /* Almost done, don't forget the pseudo header */ - tcp->check = csum_ipv6_magic(&ipv6->saddr, &ipv6->daddr, payload_len, - IPPROTO_TCP, check); + tcp->check = tcp_v6_check(payload_len, &ipv6->saddr, + &ipv6->daddr, check); } } @@ -1612,9 +1602,7 @@ static void mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq, struct mlx5e_rq_stats *stats = rq->stats; stats->packets++; - stats->gro_packets++; stats->bytes += cqe_bcnt; - stats->gro_bytes += cqe_bcnt; if (NAPI_GRO_CB(skb)->count != 1) return; mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb); @@ -1964,30 +1952,24 @@ const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep = { #endif static void -mlx5e_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq, - struct mlx5e_frag_page *frag_page, - u32 data_bcnt, u32 data_offset) +mlx5e_shampo_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq, + struct mlx5e_frag_page *frag_page, + u32 data_bcnt, u32 data_offset) { net_prefetchw(skb->data); - while (data_bcnt) { + do { /* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */ u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - data_offset, data_bcnt); - unsigned int truesize; + unsigned int truesize = pg_consumed_bytes; - if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) - truesize = pg_consumed_bytes; - else - truesize = ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz)); - - frag_page->frags++; - mlx5e_add_skb_frag(rq, skb, frag_page->page, data_offset, + mlx5e_add_skb_frag(rq, skb, frag_page, data_offset, pg_consumed_bytes, truesize); data_bcnt -= pg_consumed_bytes; data_offset = 0; frag_page++; - } + } while (data_bcnt); } static struct sk_buff * @@ -2212,8 +2194,8 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, if (likely(frag_size <= BIT(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE))) { /* build SKB around header */ dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, rq->buff.map_dir); - prefetchw(hdr); - prefetch(data); + net_prefetchw(hdr); + net_prefetch(data); skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0); if (unlikely(!skb)) @@ -2230,7 +2212,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, return NULL; } - prefetchw(skb->data); + net_prefetchw(skb->data); mlx5e_copy_skb_header(rq, skb, head->frag_page->page, head->addr, head_offset + rx_headroom, rx_headroom, head_size); @@ -2261,12 +2243,19 @@ mlx5e_shampo_flush_skb(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match) { struct sk_buff *skb = rq->hw_gro_data->skb; struct mlx5e_rq_stats *stats = rq->stats; + u16 gro_count = NAPI_GRO_CB(skb)->count; - stats->gro_skbs++; if (likely(skb_shinfo(skb)->nr_frags)) mlx5e_shampo_align_fragment(skb, rq->mpwqe.log_stride_sz); - if (NAPI_GRO_CB(skb)->count > 1) + if (gro_count > 1) { + stats->gro_skbs++; + stats->gro_packets += gro_count; + stats->gro_bytes += skb->data_len + skb_headlen(skb) * gro_count; + mlx5e_shampo_update_hdr(rq, cqe, match); + } else { + skb_shinfo(skb)->gso_size = 0; + } napi_gro_receive(rq->cq.napi, skb); rq->hw_gro_data->skb = NULL; } @@ -2279,21 +2268,6 @@ mlx5e_hw_gro_skb_has_enough_space(struct sk_buff *skb, u16 data_bcnt) return PAGE_SIZE * nr_frags + data_bcnt <= GRO_LEGACY_MAX_SIZE; } -static void -mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index) -{ - struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo; - u64 addr = shampo->info[header_index].addr; - - if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) { - struct mlx5e_dma_info *dma_info = &shampo->info[header_index]; - - dma_info->addr = ALIGN_DOWN(addr, PAGE_SIZE); - mlx5e_page_release_fragmented(rq, dma_info->frag_page); - } - bitmap_clear(shampo->bitmap, header_index, 1); -} - static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { u16 data_bcnt = mpwrq_get_cqe_byte_cnt(cqe) - cqe->shampo.header_size; @@ -2327,8 +2301,6 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq goto mpwrq_cqe_out; } - stats->gro_match_packets += match; - if (*skb && (!match || !(mlx5e_hw_gro_skb_has_enough_space(*skb, data_bcnt)))) { match = false; mlx5e_shampo_flush_skb(rq, cqe, match); @@ -2359,21 +2331,30 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq } if (likely(head_size)) { - struct mlx5e_frag_page *frag_page; + if (data_bcnt) { + struct mlx5e_frag_page *frag_page; - frag_page = &wi->alloc_units.frag_pages[page_idx]; - mlx5e_fill_skb_data(*skb, rq, frag_page, data_bcnt, data_offset); + frag_page = &wi->alloc_units.frag_pages[page_idx]; + mlx5e_shampo_fill_skb_data(*skb, rq, frag_page, data_bcnt, data_offset); + } else { + stats->hds_nodata_packets++; + stats->hds_nodata_bytes += head_size; + } } mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb); - if (flush) + if (flush && rq->hw_gro_data->skb) mlx5e_shampo_flush_skb(rq, cqe, match); free_hd_entry: - mlx5e_free_rx_shampo_hd_entry(rq, header_index); + if (likely(head_size)) + mlx5e_free_rx_shampo_hd_entry(rq, header_index); mpwrq_cqe_out: if (likely(wi->consumed_strides < rq->mpwqe.num_strides)) return; + if (unlikely(!cstrides)) + return; + wq = &rq->mpwqe.wq; wqe = mlx5_wq_ll_get_wqe(wq, wqe_id); mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index e1ed214e8651..e7a3290a708a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -141,8 +141,9 @@ static const struct counter_desc sw_stats_desc[] = { { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_bytes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_skbs) }, - { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_match_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_gro_large_hds) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nodata_packets) }, + { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_hds_nodata_bytes) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_ecn_mark) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_removed_vlan_packets) }, { MLX5E_DECLARE_STAT(struct mlx5e_sw_stats, rx_csum_unnecessary) }, @@ -343,8 +344,9 @@ static void mlx5e_stats_grp_sw_update_stats_rq_stats(struct mlx5e_sw_stats *s, s->rx_gro_packets += rq_stats->gro_packets; s->rx_gro_bytes += rq_stats->gro_bytes; s->rx_gro_skbs += rq_stats->gro_skbs; - s->rx_gro_match_packets += rq_stats->gro_match_packets; s->rx_gro_large_hds += rq_stats->gro_large_hds; + s->rx_hds_nodata_packets += rq_stats->hds_nodata_packets; + s->rx_hds_nodata_bytes += rq_stats->hds_nodata_bytes; s->rx_ecn_mark += rq_stats->ecn_mark; s->rx_removed_vlan_packets += rq_stats->removed_vlan_packets; s->rx_csum_none += rq_stats->csum_none; @@ -2057,8 +2059,9 @@ static const struct counter_desc rq_stats_desc[] = { { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_bytes) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_skbs) }, - { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_match_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, gro_large_hds) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nodata_packets) }, + { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, hds_nodata_bytes) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, ecn_mark) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, removed_vlan_packets) }, { MLX5E_DECLARE_RX_STAT(struct mlx5e_rq_stats, wqe_err) }, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 650732288616..4c5858c1dd82 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -153,8 +153,9 @@ struct mlx5e_sw_stats { u64 rx_gro_packets; u64 rx_gro_bytes; u64 rx_gro_skbs; - u64 rx_gro_match_packets; u64 rx_gro_large_hds; + u64 rx_hds_nodata_packets; + u64 rx_hds_nodata_bytes; u64 rx_mcast_packets; u64 rx_ecn_mark; u64 rx_removed_vlan_packets; @@ -352,8 +353,9 @@ struct mlx5e_rq_stats { u64 gro_packets; u64 gro_bytes; u64 gro_skbs; - u64 gro_match_packets; u64 gro_large_hds; + u64 hds_nodata_packets; + u64 hds_nodata_bytes; u64 mcast_packets; u64 ecn_mark; u64 removed_vlan_packets; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index ac1565c0c8af..cb7e7e4104af 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -714,7 +714,7 @@ err2: err1: mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL); mlx5_eq_notifier_unregister(dev, &table->cq_err_nb); - mlx5_ctrl_irq_release(table->ctrl_irq); + mlx5_ctrl_irq_release(dev, table->ctrl_irq); return err; } @@ -730,7 +730,7 @@ static void destroy_async_eqs(struct mlx5_core_dev *dev) cleanup_async_eq(dev, &table->cmd_eq, "cmd"); mlx5_cmd_allowed_opcode(dev, CMD_ALLOWED_OPCODE_ALL); mlx5_eq_notifier_unregister(dev, &table->cq_err_nb); - mlx5_ctrl_irq_release(table->ctrl_irq); + mlx5_ctrl_irq_release(dev, table->ctrl_irq); } struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev) @@ -918,7 +918,7 @@ static int comp_irq_request_sf(struct mlx5_core_dev *dev, u16 vecidx) af_desc.is_managed = 1; cpumask_copy(&af_desc.mask, cpu_online_mask); cpumask_andnot(&af_desc.mask, &af_desc.mask, &table->used_cpus); - irq = mlx5_irq_affinity_request(pool, &af_desc); + irq = mlx5_irq_affinity_request(dev, pool, &af_desc); if (IS_ERR(irq)) return PTR_ERR(irq); @@ -1187,7 +1187,6 @@ static int get_num_eqs(struct mlx5_core_dev *dev) { struct mlx5_eq_table *eq_table = dev->priv.eq_table; int max_dev_eqs; - int max_eqs_sf; int num_eqs; /* If ethernet is disabled we use just a single completion vector to @@ -1202,7 +1201,11 @@ static int get_num_eqs(struct mlx5_core_dev *dev) num_eqs = min_t(int, mlx5_irq_table_get_num_comp(eq_table->irq_table), max_dev_eqs - MLX5_MAX_ASYNC_EQS); if (mlx5_core_is_sf(dev)) { - max_eqs_sf = min_t(int, MLX5_COMP_EQS_PER_SF, + int max_eqs_sf = MLX5_CAP_GEN_2(dev, sf_eq_usage) ? + MLX5_CAP_GEN_2(dev, max_num_eqs_24b) : + MLX5_COMP_EQS_PER_SF; + + max_eqs_sf = min_t(int, max_eqs_sf, mlx5_irq_table_get_sfs_vec(eq_table->irq_table)); num_eqs = min_t(int, num_eqs, max_eqs_sf); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c index d2ebe56c3977..20146a2dc7f4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c @@ -531,7 +531,7 @@ static bool esw_qos_element_type_supported(struct mlx5_core_dev *dev, int type) switch (type) { case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR: return MLX5_CAP_QOS(dev, esw_element_type) & - ELEMENT_TYPE_CAP_MASK_TASR; + ELEMENT_TYPE_CAP_MASK_TSAR; case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT: return MLX5_CAP_QOS(dev, esw_element_type) & ELEMENT_TYPE_CAP_MASK_VPORT; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index 88745dc6aed5..578466d69f21 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -223,6 +223,7 @@ struct mlx5_vport { u16 vport; bool enabled; + bool max_eqs_set; enum mlx5_eswitch_vport_event enabled_events; int index; struct mlx5_devlink_port *dl_port; @@ -579,6 +580,8 @@ int mlx5_devlink_port_fn_max_io_eqs_get(struct devlink_port *port, int mlx5_devlink_port_fn_max_io_eqs_set(struct devlink_port *port, u32 max_io_eqs, struct netlink_ext_ack *extack); +int mlx5_devlink_port_fn_max_io_eqs_set_sf_default(struct devlink_port *port, + struct netlink_ext_ack *extack); void *mlx5_eswitch_get_uplink_priv(struct mlx5_eswitch *esw, u8 rep_type); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 72949cb85244..768199d2255a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -68,6 +68,7 @@ #define MLX5_ESW_FT_OFFLOADS_DROP_RULE (1) #define MLX5_ESW_MAX_CTRL_EQS 4 +#define MLX5_ESW_DEFAULT_SF_COMP_EQS 8 static struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_mirror_ns = { .max_fte = MLX5_ESW_VPORT_TBL_SIZE, @@ -4676,13 +4677,25 @@ mlx5_devlink_port_fn_max_io_eqs_set(struct devlink_port *port, u32 max_io_eqs, hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability); MLX5_SET(cmd_hca_cap_2, hca_caps, max_num_eqs_24b, max_eqs); + if (mlx5_esw_is_sf_vport(esw, vport_num)) + MLX5_SET(cmd_hca_cap_2, hca_caps, sf_eq_usage, 1); + err = mlx5_vport_set_other_func_cap(esw->dev, hca_caps, vport_num, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE2); if (err) NL_SET_ERR_MSG_MOD(extack, "Failed setting HCA caps"); - + vport->max_eqs_set = true; out: mutex_unlock(&esw->state_lock); kfree(query_ctx); return err; } + +int +mlx5_devlink_port_fn_max_io_eqs_set_sf_default(struct devlink_port *port, + struct netlink_ext_ack *extack) +{ + return mlx5_devlink_port_fn_max_io_eqs_set(port, + MLX5_ESW_DEFAULT_SF_COMP_EQS, + extack); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 32cdacc34a0d..a47d6419160d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -3353,9 +3353,9 @@ static int mlx5_fs_mode_get(struct devlink *devlink, u32 id, struct mlx5_core_dev *dev = devlink_priv(devlink); if (dev->priv.steering->mode == MLX5_FLOW_STEERING_MODE_SMFS) - strcpy(ctx->val.vstr, "smfs"); + strscpy(ctx->val.vstr, "smfs", sizeof(ctx->val.vstr)); else - strcpy(ctx->val.vstr, "dmfs"); + strscpy(ctx->val.vstr, "dmfs", sizeof(ctx->val.vstr)); return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c index 779d92b762d3..905bdbaffb9a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c @@ -136,7 +136,7 @@ static int mlx5i_get_coalesce(struct net_device *netdev, } static int mlx5i_get_ts_info(struct net_device *netdev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct mlx5e_priv *priv = mlx5i_epriv(netdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 8e0404c0d1ca..0979d672d47f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -372,7 +372,7 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv) mlx5e_fs_set_ns(priv->fs, ns, false); err = mlx5e_arfs_create_tables(priv->fs, priv->rx_res, - !!(priv->netdev->hw_features & NETIF_F_NTUPLE)); + mlx5e_fs_has_arfs(priv->netdev)); if (err) { netdev_err(priv->netdev, "Failed to create arfs tables, err=%d\n", err); @@ -391,8 +391,7 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv) return 0; err_destroy_arfs_tables: - mlx5e_arfs_destroy_tables(priv->fs, - !!(priv->netdev->hw_features & NETIF_F_NTUPLE)); + mlx5e_arfs_destroy_tables(priv->fs, mlx5e_fs_has_arfs(priv->netdev)); return err; } @@ -400,8 +399,7 @@ err_destroy_arfs_tables: static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv) { mlx5e_destroy_ttc_table(priv->fs); - mlx5e_arfs_destroy_tables(priv->fs, - !!(priv->netdev->hw_features & NETIF_F_NTUPLE)); + mlx5e_arfs_destroy_tables(priv->fs, mlx5e_fs_has_arfs(priv->netdev)); mlx5e_ethtool_cleanup_steering(priv->fs); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c index 612e666ec263..f7b01b3f0cba 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/irq_affinity.c @@ -112,15 +112,18 @@ irq_pool_find_least_loaded(struct mlx5_irq_pool *pool, const struct cpumask *req /** * mlx5_irq_affinity_request - request an IRQ according to the given mask. + * @dev: mlx5 core device which is requesting the IRQ. * @pool: IRQ pool to request from. * @af_desc: affinity descriptor for this IRQ. * * This function returns a pointer to IRQ, or ERR_PTR in case of error. */ struct mlx5_irq * -mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc) +mlx5_irq_affinity_request(struct mlx5_core_dev *dev, struct mlx5_irq_pool *pool, + struct irq_affinity_desc *af_desc) { struct mlx5_irq *least_loaded_irq, *new_irq; + int ret; mutex_lock(&pool->lock); least_loaded_irq = irq_pool_find_least_loaded(pool, &af_desc->mask); @@ -153,6 +156,16 @@ out: mlx5_irq_read_locked(least_loaded_irq) / MLX5_EQ_REFS_PER_IRQ); unlock: mutex_unlock(&pool->lock); + if (mlx5_irq_pool_is_sf_pool(pool)) { + ret = auxiliary_device_sysfs_irq_add(mlx5_sf_coredev_to_adev(dev), + mlx5_irq_get_irq(least_loaded_irq)); + if (ret) { + mlx5_core_err(dev, "Failed to create sysfs entry for irq %d, ret = %d\n", + mlx5_irq_get_irq(least_loaded_irq), ret); + mlx5_irq_put(least_loaded_irq); + least_loaded_irq = ERR_PTR(ret); + } + } return least_loaded_irq; } @@ -164,6 +177,9 @@ void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *i cpu = cpumask_first(mlx5_irq_get_affinity_mask(irq)); synchronize_irq(pci_irq_vector(pool->dev->pdev, mlx5_irq_get_index(irq))); + if (mlx5_irq_pool_is_sf_pool(pool)) + auxiliary_device_sysfs_irq_remove(mlx5_sf_coredev_to_adev(dev), + mlx5_irq_get_irq(irq)); if (mlx5_irq_put(irq)) if (pool->irqs_per_cpu) cpu_put(pool, cpu); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 459a836a5d9c..527da58c7953 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1819,6 +1819,7 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx) mutex_init(&dev->intf_state_mutex); lockdep_set_class(&dev->intf_state_mutex, &dev->lock_key); mutex_init(&dev->mlx5e_res.uplink_netdev_lock); + mutex_init(&dev->wc_state_lock); mutex_init(&priv->bfregs.reg_head.lock); mutex_init(&priv->bfregs.wc_head.lock); @@ -1916,6 +1917,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev) mutex_destroy(&priv->alloc_mutex); mutex_destroy(&priv->bfregs.wc_head.lock); mutex_destroy(&priv->bfregs.reg_head.lock); + mutex_destroy(&dev->wc_state_lock); mutex_destroy(&dev->mlx5e_res.uplink_netdev_lock); mutex_destroy(&dev->intf_state_mutex); lockdep_unregister_key(&dev->lock_key); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index a7fd18888b6e..62c770b0eaa8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -320,6 +320,12 @@ static inline bool mlx5_core_is_sf(const struct mlx5_core_dev *dev) return dev->coredev_type == MLX5_COREDEV_SF; } +static inline struct auxiliary_device * +mlx5_sf_coredev_to_adev(struct mlx5_core_dev *mdev) +{ + return container_of(mdev->device, struct auxiliary_device, dev); +} + int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx); void mlx5_mdev_uninit(struct mlx5_core_dev *dev); int mlx5_init_one(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h index 1088114e905d..0881e961d8b1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h @@ -25,7 +25,7 @@ int mlx5_set_msix_vec_count(struct mlx5_core_dev *dev, int devfn, int mlx5_get_default_msix_vec_count(struct mlx5_core_dev *dev, int num_vfs); struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev); -void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq); +void mlx5_ctrl_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *ctrl_irq); struct mlx5_irq *mlx5_irq_request(struct mlx5_core_dev *dev, u16 vecidx, struct irq_affinity_desc *af_desc, struct cpu_rmap **rmap); @@ -36,13 +36,15 @@ int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb); int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb); struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq); int mlx5_irq_get_index(struct mlx5_irq *irq); +int mlx5_irq_get_irq(const struct mlx5_irq *irq); struct mlx5_irq_pool; #ifdef CONFIG_MLX5_SF struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev, struct cpumask *used_cpus, u16 vecidx); -struct mlx5_irq *mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, - struct irq_affinity_desc *af_desc); +struct mlx5_irq * +mlx5_irq_affinity_request(struct mlx5_core_dev *dev, struct mlx5_irq_pool *pool, + struct irq_affinity_desc *af_desc); void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq); #else static inline @@ -53,7 +55,8 @@ struct mlx5_irq *mlx5_irq_affinity_irq_request_auto(struct mlx5_core_dev *dev, } static inline struct mlx5_irq * -mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, struct irq_affinity_desc *af_desc) +mlx5_irq_affinity_request(struct mlx5_core_dev *dev, struct mlx5_irq_pool *pool, + struct irq_affinity_desc *af_desc) { return ERR_PTR(-EOPNOTSUPP); } @@ -61,6 +64,7 @@ mlx5_irq_affinity_request(struct mlx5_irq_pool *pool, struct irq_affinity_desc * static inline void mlx5_irq_affinity_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *irq) { + mlx5_irq_release_vector(irq); } #endif #endif /* __MLX5_IRQ_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index 401d39069680..81a9232a03e1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -16,6 +16,7 @@ #endif #define MLX5_SFS_PER_CTRL_IRQ 64 +#define MLX5_MAX_MSIX_PER_SF 256 #define MLX5_IRQ_CTRL_SF_MAX 8 /* min num of vectors for SFs to be enabled */ #define MLX5_IRQ_VEC_COMP_BASE_SF 2 @@ -367,6 +368,11 @@ struct cpumask *mlx5_irq_get_affinity_mask(struct mlx5_irq *irq) return irq->mask; } +int mlx5_irq_get_irq(const struct mlx5_irq *irq) +{ + return irq->map.virq; +} + int mlx5_irq_get_index(struct mlx5_irq *irq) { return irq->map.index; @@ -440,11 +446,12 @@ static void _mlx5_irq_release(struct mlx5_irq *irq) /** * mlx5_ctrl_irq_release - release a ctrl IRQ back to the system. + * @dev: mlx5 device that releasing the IRQ. * @ctrl_irq: ctrl IRQ to be released. */ -void mlx5_ctrl_irq_release(struct mlx5_irq *ctrl_irq) +void mlx5_ctrl_irq_release(struct mlx5_core_dev *dev, struct mlx5_irq *ctrl_irq) { - _mlx5_irq_release(ctrl_irq); + mlx5_irq_affinity_irq_release(dev, ctrl_irq); } /** @@ -473,7 +480,7 @@ struct mlx5_irq *mlx5_ctrl_irq_request(struct mlx5_core_dev *dev) /* Allocate the IRQ in index 0. The vector was already allocated */ irq = irq_pool_request_vector(pool, 0, &af_desc, NULL); } else { - irq = mlx5_irq_affinity_request(pool, &af_desc); + irq = mlx5_irq_affinity_request(dev, pool, &af_desc); } return irq; @@ -589,8 +596,6 @@ static void irq_pool_free(struct mlx5_irq_pool *pool) static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pcif_vec) { struct mlx5_irq_table *table = dev->priv.irq_table; - int num_sf_ctrl_by_msix; - int num_sf_ctrl_by_sfs; int num_sf_ctrl; int err; @@ -608,10 +613,8 @@ static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pcif_vec) } /* init sf_ctrl_pool */ - num_sf_ctrl_by_msix = DIV_ROUND_UP(sf_vec, MLX5_COMP_EQS_PER_SF); - num_sf_ctrl_by_sfs = DIV_ROUND_UP(mlx5_sf_max_functions(dev), - MLX5_SFS_PER_CTRL_IRQ); - num_sf_ctrl = min_t(int, num_sf_ctrl_by_msix, num_sf_ctrl_by_sfs); + num_sf_ctrl = DIV_ROUND_UP(mlx5_sf_max_functions(dev), + MLX5_SFS_PER_CTRL_IRQ); num_sf_ctrl = min_t(int, MLX5_IRQ_CTRL_SF_MAX, num_sf_ctrl); table->sf_ctrl_pool = irq_pool_alloc(dev, pcif_vec, num_sf_ctrl, "mlx5_sf_ctrl", @@ -726,8 +729,7 @@ int mlx5_irq_table_create(struct mlx5_core_dev *dev) total_vec = pcif_vec; if (mlx5_sf_max_functions(dev)) - total_vec += MLX5_IRQ_CTRL_SF_MAX + - MLX5_COMP_EQS_PER_SF * mlx5_sf_max_functions(dev); + total_vec += MLX5_MAX_MSIX_PER_SF * mlx5_sf_max_functions(dev); total_vec = min_t(int, total_vec, pci_msix_vec_count(dev->pdev)); pcif_vec = min_t(int, pcif_vec, pci_msix_vec_count(dev->pdev)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c index 6c11e075cab0..a96be98be032 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c @@ -161,6 +161,7 @@ int mlx5_devlink_sf_port_fn_state_get(struct devlink_port *dl_port, static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf, struct netlink_ext_ack *extack) { + struct mlx5_vport *vport; int err; if (mlx5_sf_is_active(sf)) @@ -170,6 +171,13 @@ static int mlx5_sf_activate(struct mlx5_core_dev *dev, struct mlx5_sf *sf, return -EBUSY; } + vport = mlx5_devlink_port_vport_get(&sf->dl_port.dl_port); + if (!vport->max_eqs_set && MLX5_CAP_GEN_2(dev, max_num_eqs_24b)) { + err = mlx5_devlink_port_fn_max_io_eqs_set_sf_default(&sf->dl_port.dl_port, + extack); + if (err) + return err; + } err = mlx5_cmd_sf_enable_hca(dev, sf->hw_fn_id); if (err) return err; @@ -318,7 +326,11 @@ int mlx5_devlink_sf_port_new(struct devlink *devlink, static void mlx5_sf_dealloc(struct mlx5_sf_table *table, struct mlx5_sf *sf) { + struct mlx5_vport *vport; + mutex_lock(&table->sf_state_lock); + vport = mlx5_devlink_port_vport_get(&sf->dl_port.dl_port); + vport->max_eqs_set = false; mlx5_sf_function_id_erase(table, sf); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index 81eff6c410ce..7618c6147f86 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -1379,6 +1379,11 @@ int mlx5dr_cmd_create_modify_header_arg(struct mlx5_core_dev *dev, void mlx5dr_cmd_destroy_modify_header_arg(struct mlx5_core_dev *dev, u32 obj_id); +int mlx5dr_definer_get(struct mlx5dr_domain *dmn, u16 format_id, + u8 *dw_selectors, u8 *byte_selectors, + u8 *match_mask, u32 *definer_id); +void mlx5dr_definer_put(struct mlx5dr_domain *dmn, u32 definer_id); + struct mlx5dr_icm_pool *mlx5dr_icm_pool_create(struct mlx5dr_domain *dmn, enum mlx5dr_icm_type icm_type); void mlx5dr_icm_pool_destroy(struct mlx5dr_icm_pool *pool); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h index 89fced86936f..3ac7dc67509f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h @@ -153,11 +153,6 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action); u32 mlx5dr_action_get_pkt_reformat_id(struct mlx5dr_action *action); -int mlx5dr_definer_get(struct mlx5dr_domain *dmn, u16 format_id, - u8 *dw_selectors, u8 *byte_selectors, - u8 *match_mask, u32 *definer_id); -void mlx5dr_definer_put(struct mlx5dr_domain *dmn, u32 definer_id); - static inline bool mlx5dr_is_supported(struct mlx5_core_dev *dev) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wc.c b/drivers/net/ethernet/mellanox/mlx5/core/wc.c new file mode 100644 index 000000000000..1bed75eca97d --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/wc.c @@ -0,0 +1,434 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include +#include +#include "lib/clock.h" +#include "mlx5_core.h" +#include "wq.h" + +#define TEST_WC_NUM_WQES 255 +#define TEST_WC_LOG_CQ_SZ (order_base_2(TEST_WC_NUM_WQES)) +#define TEST_WC_SQ_LOG_WQ_SZ TEST_WC_LOG_CQ_SZ +#define TEST_WC_POLLING_MAX_TIME_JIFFIES msecs_to_jiffies(100) + +struct mlx5_wc_cq { + /* data path - accessed per cqe */ + struct mlx5_cqwq wq; + + /* data path - accessed per napi poll */ + struct mlx5_core_cq mcq; + + /* control */ + struct mlx5_core_dev *mdev; + struct mlx5_wq_ctrl wq_ctrl; +}; + +struct mlx5_wc_sq { + /* data path */ + u16 cc; + u16 pc; + + /* read only */ + struct mlx5_wq_cyc wq; + u32 sqn; + + /* control path */ + struct mlx5_wq_ctrl wq_ctrl; + + struct mlx5_wc_cq cq; + struct mlx5_sq_bfreg bfreg; +}; + +static int mlx5_wc_create_cqwq(struct mlx5_core_dev *mdev, void *cqc, + struct mlx5_wc_cq *cq) +{ + struct mlx5_core_cq *mcq = &cq->mcq; + struct mlx5_wq_param param = {}; + int err; + u32 i; + + err = mlx5_cqwq_create(mdev, ¶m, cqc, &cq->wq, &cq->wq_ctrl); + if (err) + return err; + + mcq->cqe_sz = 64; + mcq->set_ci_db = cq->wq_ctrl.db.db; + mcq->arm_db = cq->wq_ctrl.db.db + 1; + + for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) { + struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i); + + cqe->op_own = 0xf1; + } + + cq->mdev = mdev; + + return 0; +} + +static int create_wc_cq(struct mlx5_wc_cq *cq, void *cqc_data) +{ + u32 out[MLX5_ST_SZ_DW(create_cq_out)]; + struct mlx5_core_dev *mdev = cq->mdev; + struct mlx5_core_cq *mcq = &cq->mcq; + int err, inlen, eqn; + void *in, *cqc; + + err = mlx5_comp_eqn_get(mdev, 0, &eqn); + if (err) + return err; + + inlen = MLX5_ST_SZ_BYTES(create_cq_in) + + sizeof(u64) * cq->wq_ctrl.buf.npages; + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); + + memcpy(cqc, cqc_data, MLX5_ST_SZ_BYTES(cqc)); + + mlx5_fill_page_frag_array(&cq->wq_ctrl.buf, + (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas)); + + MLX5_SET(cqc, cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE); + MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn); + MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); + MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - + MLX5_ADAPTER_PAGE_SHIFT); + MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma); + + err = mlx5_core_create_cq(mdev, mcq, in, inlen, out, sizeof(out)); + + kvfree(in); + + return err; +} + +static int mlx5_wc_create_cq(struct mlx5_core_dev *mdev, struct mlx5_wc_cq *cq) +{ + void *cqc; + int err; + + cqc = kvzalloc(MLX5_ST_SZ_BYTES(cqc), GFP_KERNEL); + if (!cqc) + return -ENOMEM; + + MLX5_SET(cqc, cqc, log_cq_size, TEST_WC_LOG_CQ_SZ); + MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); + if (MLX5_CAP_GEN(mdev, cqe_128_always) && cache_line_size() >= 128) + MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD); + + err = mlx5_wc_create_cqwq(mdev, cqc, cq); + if (err) { + mlx5_core_err(mdev, "Failed to create wc cq wq, err=%d\n", err); + goto err_create_cqwq; + } + + err = create_wc_cq(cq, cqc); + if (err) { + mlx5_core_err(mdev, "Failed to create wc cq, err=%d\n", err); + goto err_create_cq; + } + + kvfree(cqc); + return 0; + +err_create_cq: + mlx5_wq_destroy(&cq->wq_ctrl); +err_create_cqwq: + kvfree(cqc); + return err; +} + +static void mlx5_wc_destroy_cq(struct mlx5_wc_cq *cq) +{ + mlx5_core_destroy_cq(cq->mdev, &cq->mcq); + mlx5_wq_destroy(&cq->wq_ctrl); +} + +static int create_wc_sq(struct mlx5_core_dev *mdev, void *sqc_data, + struct mlx5_wc_sq *sq) +{ + void *in, *sqc, *wq; + int inlen, err; + u8 ts_format; + + inlen = MLX5_ST_SZ_BYTES(create_sq_in) + + sizeof(u64) * sq->wq_ctrl.buf.npages; + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + sqc = MLX5_ADDR_OF(create_sq_in, in, ctx); + wq = MLX5_ADDR_OF(sqc, sqc, wq); + + memcpy(sqc, sqc_data, MLX5_ST_SZ_BYTES(sqc)); + MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn); + + MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); + MLX5_SET(sqc, sqc, flush_in_error_en, 1); + + ts_format = mlx5_is_real_time_sq(mdev) ? + MLX5_TIMESTAMP_FORMAT_REAL_TIME : + MLX5_TIMESTAMP_FORMAT_FREE_RUNNING; + MLX5_SET(sqc, sqc, ts_format, ts_format); + + MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); + MLX5_SET(wq, wq, uar_page, sq->bfreg.index); + MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - + MLX5_ADAPTER_PAGE_SHIFT); + MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma); + + mlx5_fill_page_frag_array(&sq->wq_ctrl.buf, + (__be64 *)MLX5_ADDR_OF(wq, wq, pas)); + + err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn); + if (err) { + mlx5_core_err(mdev, "Failed to create wc sq, err=%d\n", err); + goto err_create_sq; + } + + memset(in, 0, MLX5_ST_SZ_BYTES(modify_sq_in)); + MLX5_SET(modify_sq_in, in, sq_state, MLX5_SQC_STATE_RST); + sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx); + MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RDY); + + err = mlx5_core_modify_sq(mdev, sq->sqn, in); + if (err) { + mlx5_core_err(mdev, "Failed to set wc sq(sqn=0x%x) ready, err=%d\n", + sq->sqn, err); + goto err_modify_sq; + } + + kvfree(in); + return 0; + +err_modify_sq: + mlx5_core_destroy_sq(mdev, sq->sqn); +err_create_sq: + kvfree(in); + return err; +} + +static int mlx5_wc_create_sq(struct mlx5_core_dev *mdev, struct mlx5_wc_sq *sq) +{ + struct mlx5_wq_param param = {}; + void *sqc_data, *wq; + int err; + + sqc_data = kvzalloc(MLX5_ST_SZ_BYTES(sqc), GFP_KERNEL); + if (!sqc_data) + return -ENOMEM; + + wq = MLX5_ADDR_OF(sqc, sqc_data, wq); + MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB)); + MLX5_SET(wq, wq, pd, mdev->mlx5e_res.hw_objs.pdn); + MLX5_SET(wq, wq, log_wq_sz, TEST_WC_SQ_LOG_WQ_SZ); + + err = mlx5_wq_cyc_create(mdev, ¶m, wq, &sq->wq, &sq->wq_ctrl); + if (err) { + mlx5_core_err(mdev, "Failed to create wc sq wq, err=%d\n", err); + goto err_create_wq_cyc; + } + + err = create_wc_sq(mdev, sqc_data, sq); + if (err) + goto err_create_sq; + + mlx5_core_dbg(mdev, "wc sq->sqn = 0x%x created\n", sq->sqn); + + kvfree(sqc_data); + return 0; + +err_create_sq: + mlx5_wq_destroy(&sq->wq_ctrl); +err_create_wq_cyc: + kvfree(sqc_data); + return err; +} + +static void mlx5_wc_destroy_sq(struct mlx5_wc_sq *sq) +{ + mlx5_core_destroy_sq(sq->cq.mdev, sq->sqn); + mlx5_wq_destroy(&sq->wq_ctrl); +} + +static void mlx5_wc_post_nop(struct mlx5_wc_sq *sq, bool signaled) +{ + int buf_size = (1 << MLX5_CAP_GEN(sq->cq.mdev, log_bf_reg_size)) / 2; + struct mlx5_wqe_ctrl_seg *ctrl; + __be32 mmio_wqe[16] = {}; + u16 pi; + + pi = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->pc); + ctrl = mlx5_wq_cyc_get_wqe(&sq->wq, pi); + memset(ctrl, 0, sizeof(*ctrl)); + ctrl->opmod_idx_opcode = + cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) | MLX5_OPCODE_NOP); + ctrl->qpn_ds = + cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) | + DIV_ROUND_UP(sizeof(struct mlx5_wqe_ctrl_seg), MLX5_SEND_WQE_DS)); + if (signaled) + ctrl->fm_ce_se |= MLX5_WQE_CTRL_CQ_UPDATE; + + memcpy(mmio_wqe, ctrl, sizeof(*ctrl)); + ((struct mlx5_wqe_ctrl_seg *)&mmio_wqe)->fm_ce_se |= + MLX5_WQE_CTRL_CQ_UPDATE; + + /* ensure wqe is visible to device before updating doorbell record */ + dma_wmb(); + + sq->pc++; + sq->wq.db[MLX5_SND_DBR] = cpu_to_be32(sq->pc); + + /* ensure doorbell record is visible to device before ringing the + * doorbell + */ + wmb(); + + __iowrite64_copy(sq->bfreg.map + sq->bfreg.offset, mmio_wqe, + sizeof(mmio_wqe) / 8); + + sq->bfreg.offset ^= buf_size; +} + +static int mlx5_wc_poll_cq(struct mlx5_wc_sq *sq) +{ + struct mlx5_wc_cq *cq = &sq->cq; + struct mlx5_cqe64 *cqe; + + cqe = mlx5_cqwq_get_cqe(&cq->wq); + if (!cqe) + return -ETIMEDOUT; + + /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), + * otherwise a cq overrun may occur + */ + mlx5_cqwq_pop(&cq->wq); + + if (get_cqe_opcode(cqe) == MLX5_CQE_REQ) { + int wqe_counter = be16_to_cpu(cqe->wqe_counter); + struct mlx5_core_dev *mdev = cq->mdev; + + if (wqe_counter == TEST_WC_NUM_WQES - 1) + mdev->wc_state = MLX5_WC_STATE_UNSUPPORTED; + else + mdev->wc_state = MLX5_WC_STATE_SUPPORTED; + + mlx5_core_dbg(mdev, "wc wqe_counter = 0x%x\n", wqe_counter); + } + + mlx5_cqwq_update_db_record(&cq->wq); + + /* ensure cq space is freed before enabling more cqes */ + wmb(); + + sq->cc++; + + return 0; +} + +static void mlx5_core_test_wc(struct mlx5_core_dev *mdev) +{ + unsigned long expires; + struct mlx5_wc_sq *sq; + int i, err; + + if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED) + return; + + sq = kzalloc(sizeof(*sq), GFP_KERNEL); + if (!sq) + return; + + err = mlx5_alloc_bfreg(mdev, &sq->bfreg, true, false); + if (err) { + mlx5_core_err(mdev, "Failed to alloc bfreg for wc, err=%d\n", err); + goto err_alloc_bfreg; + } + + err = mlx5_wc_create_cq(mdev, &sq->cq); + if (err) + goto err_create_cq; + + err = mlx5_wc_create_sq(mdev, sq); + if (err) + goto err_create_sq; + + for (i = 0; i < TEST_WC_NUM_WQES - 1; i++) + mlx5_wc_post_nop(sq, false); + + mlx5_wc_post_nop(sq, true); + + expires = jiffies + TEST_WC_POLLING_MAX_TIME_JIFFIES; + do { + err = mlx5_wc_poll_cq(sq); + if (err) + usleep_range(2, 10); + } while (mdev->wc_state == MLX5_WC_STATE_UNINITIALIZED && + time_is_after_jiffies(expires)); + + mlx5_wc_destroy_sq(sq); + +err_create_sq: + mlx5_wc_destroy_cq(&sq->cq); +err_create_cq: + mlx5_free_bfreg(mdev, &sq->bfreg); +err_alloc_bfreg: + kfree(sq); +} + +bool mlx5_wc_support_get(struct mlx5_core_dev *mdev) +{ + struct mlx5_core_dev *parent = NULL; + + if (!MLX5_CAP_GEN(mdev, bf)) { + mlx5_core_dbg(mdev, "BlueFlame not supported\n"); + goto out; + } + + if (!MLX5_CAP_GEN(mdev, log_max_sq)) { + mlx5_core_dbg(mdev, "SQ not supported\n"); + goto out; + } + + if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED) + /* No need to lock anything as we perform WC test only + * once for whole device and was already done. + */ + goto out; + + mutex_lock(&mdev->wc_state_lock); + + if (mdev->wc_state != MLX5_WC_STATE_UNINITIALIZED) + goto unlock; + +#ifdef CONFIG_MLX5_SF + if (mlx5_core_is_sf(mdev)) + parent = mdev->priv.parent_mdev; +#endif + + if (parent) { + mutex_lock(&parent->wc_state_lock); + + mlx5_core_test_wc(parent); + + mlx5_core_dbg(mdev, "parent set wc_state=%d\n", + parent->wc_state); + mdev->wc_state = parent->wc_state; + + mutex_unlock(&parent->wc_state_lock); + } + + mlx5_core_test_wc(mdev); + +unlock: + mutex_unlock(&mdev->wc_state_lock); +out: + mlx5_core_dbg(mdev, "wc_state=%d\n", mdev->wc_state); + + return mdev->wc_state == MLX5_WC_STATE_SUPPORTED; +} +EXPORT_SYMBOL(mlx5_wc_support_get); diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig index a510bf2cff2f..74f7e27b490f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig +++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig @@ -33,6 +33,7 @@ config MLXSW_CORE_THERMAL config MLXSW_PCI tristate "PCI bus implementation for Mellanox Technologies Switch ASICs" depends on PCI && HAS_IOMEM && MLXSW_CORE + select PAGE_POOL default m help This is PCI bus implementation for Mellanox Technologies Switch ASICs. diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.c b/drivers/net/ethernet/mellanox/mlxsw/core_env.c index 6c06b0592760..294e758f1067 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.c @@ -513,6 +513,63 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, } EXPORT_SYMBOL(mlxsw_env_get_module_eeprom_by_page); +int +mlxsw_env_set_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, + u8 slot_index, u8 module, + const struct ethtool_module_eeprom *page, + struct netlink_ext_ack *extack) +{ + struct mlxsw_env *mlxsw_env = mlxsw_core_env(mlxsw_core); + u32 bytes_written = 0; + u16 device_addr; + int err; + + if (!mlxsw_env_linecard_is_active(mlxsw_env, slot_index)) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot write to EEPROM of a module on an inactive line card"); + return -EIO; + } + + err = mlxsw_env_validate_module_type(mlxsw_core, slot_index, module); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "EEPROM is not equipped on port module type"); + return err; + } + + device_addr = page->offset; + + while (bytes_written < page->length) { + char mcia_pl[MLXSW_REG_MCIA_LEN]; + char eeprom_tmp[128] = {}; + u8 size; + + size = min_t(u8, page->length - bytes_written, + mlxsw_env->max_eeprom_len); + + mlxsw_reg_mcia_pack(mcia_pl, slot_index, module, page->page, + device_addr + bytes_written, size, + page->i2c_address); + mlxsw_reg_mcia_bank_number_set(mcia_pl, page->bank); + memcpy(eeprom_tmp, page->data + bytes_written, size); + mlxsw_reg_mcia_eeprom_memcpy_to(mcia_pl, eeprom_tmp); + + err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(mcia), mcia_pl); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to access module's EEPROM"); + return err; + } + + err = mlxsw_env_mcia_status_process(mcia_pl, extack); + if (err) + return err; + + bytes_written += size; + } + + return 0; +} +EXPORT_SYMBOL(mlxsw_env_set_module_eeprom_by_page); + static int mlxsw_env_module_reset(struct mlxsw_core *mlxsw_core, u8 slot_index, u8 module) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_env.h b/drivers/net/ethernet/mellanox/mlxsw/core_env.h index a197e3ae069c..e4ff17869400 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_env.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_env.h @@ -28,6 +28,12 @@ mlxsw_env_get_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, const struct ethtool_module_eeprom *page, struct netlink_ext_ack *extack); +int +mlxsw_env_set_module_eeprom_by_page(struct mlxsw_core *mlxsw_core, + u8 slot_index, u8 module, + const struct ethtool_module_eeprom *page, + struct netlink_ext_ack *extack); + int mlxsw_env_reset_module(struct net_device *netdev, struct mlxsw_core *mlxsw_core, u8 slot_index, u8 module, u32 *flags); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c index 5c511e1a8efa..d61478c0c632 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c @@ -100,6 +100,12 @@ static const struct mlxsw_cooling_states default_cooling_states[] = { struct mlxsw_thermal; +struct mlxsw_thermal_cooling_device { + struct mlxsw_thermal *thermal; + struct thermal_cooling_device *cdev; + unsigned int idx; +}; + struct mlxsw_thermal_module { struct mlxsw_thermal *parent; struct thermal_zone_device *tzdev; @@ -123,7 +129,7 @@ struct mlxsw_thermal { const struct mlxsw_bus_info *bus_info; struct thermal_zone_device *tzdev; int polling_delay; - struct thermal_cooling_device *cdevs[MLXSW_MFCR_PWMS_MAX]; + struct mlxsw_thermal_cooling_device cdevs[MLXSW_MFCR_PWMS_MAX]; struct thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS]; struct mlxsw_cooling_states cooling_states[MLXSW_THERMAL_NUM_TRIPS]; struct mlxsw_thermal_area line_cards[]; @@ -147,7 +153,7 @@ static int mlxsw_get_cooling_device_idx(struct mlxsw_thermal *thermal, int i; for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++) - if (thermal->cdevs[i] == cdev) + if (thermal->cdevs[i].cdev == cdev) return i; /* Allow mlxsw thermal zone binding to an external cooling device */ @@ -352,17 +358,14 @@ static int mlxsw_thermal_get_cur_state(struct thermal_cooling_device *cdev, unsigned long *p_state) { - struct mlxsw_thermal *thermal = cdev->devdata; + struct mlxsw_thermal_cooling_device *mlxsw_cdev = cdev->devdata; + struct mlxsw_thermal *thermal = mlxsw_cdev->thermal; struct device *dev = thermal->bus_info->dev; char mfsc_pl[MLXSW_REG_MFSC_LEN]; - int err, idx; u8 duty; + int err; - idx = mlxsw_get_cooling_device_idx(thermal, cdev); - if (idx < 0) - return idx; - - mlxsw_reg_mfsc_pack(mfsc_pl, idx, 0); + mlxsw_reg_mfsc_pack(mfsc_pl, mlxsw_cdev->idx, 0); err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfsc), mfsc_pl); if (err) { dev_err(dev, "Failed to query PWM duty\n"); @@ -378,22 +381,19 @@ static int mlxsw_thermal_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state) { - struct mlxsw_thermal *thermal = cdev->devdata; + struct mlxsw_thermal_cooling_device *mlxsw_cdev = cdev->devdata; + struct mlxsw_thermal *thermal = mlxsw_cdev->thermal; struct device *dev = thermal->bus_info->dev; char mfsc_pl[MLXSW_REG_MFSC_LEN]; - int idx; int err; if (state > MLXSW_THERMAL_MAX_STATE) return -EINVAL; - idx = mlxsw_get_cooling_device_idx(thermal, cdev); - if (idx < 0) - return idx; - /* Normalize the state to the valid speed range. */ state = max_t(unsigned long, MLXSW_THERMAL_MIN_STATE, state); - mlxsw_reg_mfsc_pack(mfsc_pl, idx, mlxsw_state_to_duty(state)); + mlxsw_reg_mfsc_pack(mfsc_pl, mlxsw_cdev->idx, + mlxsw_state_to_duty(state)); err = mlxsw_reg_write(thermal->core, MLXSW_REG(mfsc), mfsc_pl); if (err) { dev_err(dev, "Failed to write PWM duty\n"); @@ -753,17 +753,21 @@ int mlxsw_thermal_init(struct mlxsw_core *core, } for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++) { if (pwm_active & BIT(i)) { + struct mlxsw_thermal_cooling_device *mlxsw_cdev; struct thermal_cooling_device *cdev; + mlxsw_cdev = &thermal->cdevs[i]; + mlxsw_cdev->thermal = thermal; + mlxsw_cdev->idx = i; cdev = thermal_cooling_device_register("mlxsw_fan", - thermal, + mlxsw_cdev, &mlxsw_cooling_ops); if (IS_ERR(cdev)) { err = PTR_ERR(cdev); dev_err(dev, "Failed to register cooling device\n"); goto err_thermal_cooling_device_register; } - thermal->cdevs[i] = cdev; + mlxsw_cdev->cdev = cdev; } } @@ -824,8 +828,7 @@ err_thermal_modules_init: err_thermal_zone_device_register: err_thermal_cooling_device_register: for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++) - if (thermal->cdevs[i]) - thermal_cooling_device_unregister(thermal->cdevs[i]); + thermal_cooling_device_unregister(thermal->cdevs[i].cdev); err_reg_write: err_reg_query: kfree(thermal); @@ -847,12 +850,8 @@ void mlxsw_thermal_fini(struct mlxsw_thermal *thermal) thermal->tzdev = NULL; } - for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++) { - if (thermal->cdevs[i]) { - thermal_cooling_device_unregister(thermal->cdevs[i]); - thermal->cdevs[i] = NULL; - } - } + for (i = 0; i < MLXSW_MFCR_PWMS_MAX; i++) + thermal_cooling_device_unregister(thermal->cdevs[i].cdev); kfree(thermal); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/item.h b/drivers/net/ethernet/mellanox/mlxsw/item.h index cfafbeb42586..a619a0736bd1 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/item.h +++ b/drivers/net/ethernet/mellanox/mlxsw/item.h @@ -218,6 +218,10 @@ __mlxsw_item_bit_array_offset(const struct mlxsw_item *item, } max_index = (item->size.bytes << 3) / item->element_size - 1; + if (WARN_ONCE(index > max_index, + "name=%s,index=%u,max_index=%u\n", item->name, index, + max_index)) + index = 0; be_index = max_index - index; offset = be_index * item->element_size >> 3; in_byte_index = index % (BITS_PER_BYTE / item->element_size); diff --git a/drivers/net/ethernet/mellanox/mlxsw/minimal.c b/drivers/net/ethernet/mellanox/mlxsw/minimal.c index f0ceb196a6ce..828c65036a4c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/minimal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/minimal.c @@ -140,6 +140,20 @@ mlxsw_m_get_module_eeprom_by_page(struct net_device *netdev, page, extack); } +static int +mlxsw_m_set_module_eeprom_by_page(struct net_device *netdev, + const struct ethtool_module_eeprom *page, + struct netlink_ext_ack *extack) +{ + struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev); + struct mlxsw_core *core = mlxsw_m_port->mlxsw_m->core; + + return mlxsw_env_set_module_eeprom_by_page(core, + mlxsw_m_port->slot_index, + mlxsw_m_port->module, + page, extack); +} + static int mlxsw_m_reset(struct net_device *netdev, u32 *flags) { struct mlxsw_m_port *mlxsw_m_port = netdev_priv(netdev); @@ -181,6 +195,7 @@ static const struct ethtool_ops mlxsw_m_port_ethtool_ops = { .get_module_info = mlxsw_m_get_module_info, .get_module_eeprom = mlxsw_m_get_module_eeprom, .get_module_eeprom_by_page = mlxsw_m_get_module_eeprom_by_page, + .set_module_eeprom_by_page = mlxsw_m_set_module_eeprom_by_page, .reset = mlxsw_m_reset, .get_module_power_mode = mlxsw_m_get_module_power_mode, .set_module_power_mode = mlxsw_m_set_module_power_mode, @@ -702,8 +717,8 @@ static struct mlxsw_driver mlxsw_m_driver = { }; static const struct i2c_device_id mlxsw_m_i2c_id[] = { - { "mlxsw_minimal", 0}, - { }, + { "mlxsw_minimal" }, + { } }; static struct i2c_driver mlxsw_m_i2c_driver = { diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index c0ced4d315f3..060e5b939211 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -13,6 +13,7 @@ #include #include #include +#include #include "pci_hw.h" #include "pci.h" @@ -61,15 +62,11 @@ struct mlxsw_pci_mem_item { }; struct mlxsw_pci_queue_elem_info { + struct page *pages[MLXSW_PCI_WQE_SG_ENTRIES]; char *elem; /* pointer to actual dma mapped element mem chunk */ - union { - struct { - struct sk_buff *skb; - } sdq; - struct { - struct sk_buff *skb; - } rdq; - } u; + struct { + struct sk_buff *skb; + } sdq; }; struct mlxsw_pci_queue { @@ -88,10 +85,14 @@ struct mlxsw_pci_queue { enum mlxsw_pci_cqe_v v; struct mlxsw_pci_queue *dq; struct napi_struct napi; + struct page_pool *page_pool; } cq; struct { struct tasklet_struct tasklet; } eq; + struct { + struct mlxsw_pci_queue *cq; + } rdq; } u; }; @@ -110,6 +111,7 @@ struct mlxsw_pci { bool cff_support; enum mlxsw_cmd_mbox_config_profile_lag_mode lag_mode; enum mlxsw_cmd_mbox_config_profile_flood_mode flood_mode; + u8 num_sg_entries; /* Number of scatter/gather entries for packets. */ struct mlxsw_pci_queue_type_group queues[MLXSW_PCI_QUEUE_TYPE_COUNT]; u32 doorbell_offset; struct mlxsw_core *core; @@ -335,6 +337,29 @@ static void mlxsw_pci_sdq_fini(struct mlxsw_pci *mlxsw_pci, mlxsw_cmd_hw2sw_sdq(mlxsw_pci->core, q->num); } +#define MLXSW_PCI_SKB_HEADROOM (NET_SKB_PAD + NET_IP_ALIGN) + +#define MLXSW_PCI_RX_BUF_SW_OVERHEAD \ + (MLXSW_PCI_SKB_HEADROOM + \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) + +static void +mlxsw_pci_wqe_rx_frag_set(struct mlxsw_pci *mlxsw_pci, struct page *page, + char *wqe, int index, size_t frag_len) +{ + dma_addr_t mapaddr; + + mapaddr = page_pool_get_dma_addr(page); + + if (index == 0) { + mapaddr += MLXSW_PCI_SKB_HEADROOM; + frag_len = frag_len - MLXSW_PCI_RX_BUF_SW_OVERHEAD; + } + + mlxsw_pci_wqe_address_set(wqe, index, mapaddr); + mlxsw_pci_wqe_byte_count_set(wqe, index, frag_len); +} + static int mlxsw_pci_wqe_frag_map(struct mlxsw_pci *mlxsw_pci, char *wqe, int index, char *frag_data, size_t frag_len, int direction) @@ -364,43 +389,140 @@ static void mlxsw_pci_wqe_frag_unmap(struct mlxsw_pci *mlxsw_pci, char *wqe, dma_unmap_single(&pdev->dev, mapaddr, frag_len, direction); } -static int mlxsw_pci_rdq_skb_alloc(struct mlxsw_pci *mlxsw_pci, - struct mlxsw_pci_queue_elem_info *elem_info, - gfp_t gfp) +static struct sk_buff *mlxsw_pci_rdq_build_skb(struct page *pages[], + u16 byte_count) { - size_t buf_len = MLXSW_PORT_MAX_MTU; - char *wqe = elem_info->elem; + unsigned int linear_data_size; struct sk_buff *skb; - int err; + int page_index = 0; + bool linear_only; + void *data; - skb = __netdev_alloc_skb_ip_align(NULL, buf_len, gfp); - if (!skb) + data = page_address(pages[page_index]); + net_prefetch(data); + + skb = napi_build_skb(data, PAGE_SIZE); + if (unlikely(!skb)) + return ERR_PTR(-ENOMEM); + + linear_only = byte_count + MLXSW_PCI_RX_BUF_SW_OVERHEAD <= PAGE_SIZE; + linear_data_size = linear_only ? byte_count : + PAGE_SIZE - + MLXSW_PCI_RX_BUF_SW_OVERHEAD; + + skb_reserve(skb, MLXSW_PCI_SKB_HEADROOM); + skb_put(skb, linear_data_size); + + if (linear_only) + return skb; + + byte_count -= linear_data_size; + page_index++; + + while (byte_count > 0) { + unsigned int frag_size; + struct page *page; + + page = pages[page_index]; + frag_size = min(byte_count, PAGE_SIZE); + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + page, 0, frag_size, PAGE_SIZE); + byte_count -= frag_size; + page_index++; + } + + return skb; +} + +static int mlxsw_pci_rdq_page_alloc(struct mlxsw_pci_queue *q, + struct mlxsw_pci_queue_elem_info *elem_info, + int index) +{ + struct mlxsw_pci_queue *cq = q->u.rdq.cq; + char *wqe = elem_info->elem; + struct page *page; + + page = page_pool_dev_alloc_pages(cq->u.cq.page_pool); + if (unlikely(!page)) return -ENOMEM; - err = mlxsw_pci_wqe_frag_map(mlxsw_pci, wqe, 0, skb->data, - buf_len, DMA_FROM_DEVICE); - if (err) - goto err_frag_map; + mlxsw_pci_wqe_rx_frag_set(q->pci, page, wqe, index, PAGE_SIZE); + elem_info->pages[index] = page; + return 0; +} + +static void mlxsw_pci_rdq_page_free(struct mlxsw_pci_queue *q, + struct mlxsw_pci_queue_elem_info *elem_info, + int index) +{ + struct mlxsw_pci_queue *cq = q->u.rdq.cq; + + page_pool_put_page(cq->u.cq.page_pool, elem_info->pages[index], -1, + false); +} + +static u8 mlxsw_pci_num_sg_entries_get(u16 byte_count) +{ + return DIV_ROUND_UP(byte_count + MLXSW_PCI_RX_BUF_SW_OVERHEAD, + PAGE_SIZE); +} + +static int +mlxsw_pci_elem_info_pages_ref_store(const struct mlxsw_pci_queue *q, + const struct mlxsw_pci_queue_elem_info *el, + u16 byte_count, struct page *pages[], + u8 *p_num_sg_entries) +{ + u8 num_sg_entries; + int i; + + num_sg_entries = mlxsw_pci_num_sg_entries_get(byte_count); + if (WARN_ON_ONCE(num_sg_entries > q->pci->num_sg_entries)) + return -EINVAL; + + for (i = 0; i < num_sg_entries; i++) + pages[i] = el->pages[i]; + + *p_num_sg_entries = num_sg_entries; + return 0; +} + +static int +mlxsw_pci_rdq_pages_alloc(struct mlxsw_pci_queue *q, + struct mlxsw_pci_queue_elem_info *elem_info, + u8 num_sg_entries) +{ + struct page *old_pages[MLXSW_PCI_WQE_SG_ENTRIES]; + struct mlxsw_pci_queue *cq = q->u.rdq.cq; + int i, err; + + for (i = 0; i < num_sg_entries; i++) { + old_pages[i] = elem_info->pages[i]; + err = mlxsw_pci_rdq_page_alloc(q, elem_info, i); + if (err) { + dev_err_ratelimited(&q->pci->pdev->dev, "Failed to alloc page\n"); + goto err_page_alloc; + } + } - elem_info->u.rdq.skb = skb; return 0; -err_frag_map: - dev_kfree_skb_any(skb); +err_page_alloc: + for (i--; i >= 0; i--) + page_pool_recycle_direct(cq->u.cq.page_pool, old_pages[i]); + return err; } -static void mlxsw_pci_rdq_skb_free(struct mlxsw_pci *mlxsw_pci, - struct mlxsw_pci_queue_elem_info *elem_info) +static void +mlxsw_pci_rdq_pages_recycle(struct mlxsw_pci_queue *q, struct page *pages[], + u8 num_sg_entries) { - struct sk_buff *skb; - char *wqe; + struct mlxsw_pci_queue *cq = q->u.rdq.cq; + int i; - skb = elem_info->u.rdq.skb; - wqe = elem_info->elem; - - mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE); - dev_kfree_skb_any(skb); + for (i = 0; i < num_sg_entries; i++) + page_pool_recycle_direct(cq->u.cq.page_pool, pages[i]); } static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, @@ -410,7 +532,7 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, u8 sdq_count = mlxsw_pci->num_sdqs; struct mlxsw_pci_queue *cq; u8 cq_num; - int i; + int i, j; int err; q->producer_counter = 0; @@ -434,15 +556,19 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, cq = mlxsw_pci_cq_get(mlxsw_pci, cq_num); cq->u.cq.dq = q; + q->u.rdq.cq = cq; mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); for (i = 0; i < q->count; i++) { elem_info = mlxsw_pci_queue_elem_info_producer_get(q); BUG_ON(!elem_info); - err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info, GFP_KERNEL); - if (err) - goto rollback; + + for (j = 0; j < mlxsw_pci->num_sg_entries; j++) { + err = mlxsw_pci_rdq_page_alloc(q, elem_info, j); + if (err) + goto rollback; + } /* Everything is set up, ring doorbell to pass elem to HW */ q->producer_counter++; mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); @@ -453,8 +579,11 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, rollback: for (i--; i >= 0; i--) { elem_info = mlxsw_pci_queue_elem_info_get(q, i); - mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info); + for (j--; j >= 0; j--) + mlxsw_pci_rdq_page_free(q, elem_info, j); + j = mlxsw_pci->num_sg_entries; } + q->u.rdq.cq = NULL; cq->u.cq.dq = NULL; mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num); @@ -465,12 +594,13 @@ static void mlxsw_pci_rdq_fini(struct mlxsw_pci *mlxsw_pci, struct mlxsw_pci_queue *q) { struct mlxsw_pci_queue_elem_info *elem_info; - int i; + int i, j; mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num); for (i = 0; i < q->count; i++) { elem_info = mlxsw_pci_queue_elem_info_get(q, i); - mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info); + for (j = 0; j < mlxsw_pci->num_sg_entries; j++) + mlxsw_pci_rdq_page_free(q, elem_info, j); } } @@ -515,7 +645,7 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci, struct mlxsw_pci_queue *q, u16 consumer_counter_limit, enum mlxsw_pci_cqe_v cqe_v, - char *cqe) + char *cqe, int budget) { struct pci_dev *pdev = mlxsw_pci->pdev; struct mlxsw_pci_queue_elem_info *elem_info; @@ -526,8 +656,8 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci, spin_lock(&q->lock); elem_info = mlxsw_pci_queue_elem_info_consumer_get(q); - tx_info = mlxsw_skb_cb(elem_info->u.sdq.skb)->tx_info; - skb = elem_info->u.sdq.skb; + tx_info = mlxsw_skb_cb(elem_info->sdq.skb)->tx_info; + skb = elem_info->sdq.skb; wqe = elem_info->elem; for (i = 0; i < MLXSW_PCI_WQE_SG_ENTRIES; i++) mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, i, DMA_TO_DEVICE); @@ -541,8 +671,8 @@ static void mlxsw_pci_cqe_sdq_handle(struct mlxsw_pci *mlxsw_pci, } if (skb) - dev_kfree_skb_any(skb); - elem_info->u.sdq.skb = NULL; + napi_consume_skb(skb, budget); + elem_info->sdq.skb = NULL; if (q->consumer_counter++ != consumer_counter_limit) dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in SDQ\n"); @@ -604,27 +734,40 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, enum mlxsw_pci_cqe_v cqe_v, char *cqe) { struct pci_dev *pdev = mlxsw_pci->pdev; + struct page *pages[MLXSW_PCI_WQE_SG_ENTRIES]; struct mlxsw_pci_queue_elem_info *elem_info; struct mlxsw_rx_info rx_info = {}; - char wqe[MLXSW_PCI_WQE_SIZE]; struct sk_buff *skb; + u8 num_sg_entries; u16 byte_count; int err; elem_info = mlxsw_pci_queue_elem_info_consumer_get(q); - skb = elem_info->u.rdq.skb; - memcpy(wqe, elem_info->elem, MLXSW_PCI_WQE_SIZE); if (q->consumer_counter++ != consumer_counter_limit) dev_dbg_ratelimited(&pdev->dev, "Consumer counter does not match limit in RDQ\n"); - err = mlxsw_pci_rdq_skb_alloc(mlxsw_pci, elem_info, GFP_ATOMIC); - if (err) { - dev_err_ratelimited(&pdev->dev, "Failed to alloc skb for RDQ\n"); + byte_count = mlxsw_pci_cqe_byte_count_get(cqe); + if (mlxsw_pci_cqe_crc_get(cqe_v, cqe)) + byte_count -= ETH_FCS_LEN; + + err = mlxsw_pci_elem_info_pages_ref_store(q, elem_info, byte_count, + pages, &num_sg_entries); + if (err) + goto out; + + err = mlxsw_pci_rdq_pages_alloc(q, elem_info, num_sg_entries); + if (err) + goto out; + + skb = mlxsw_pci_rdq_build_skb(pages, byte_count); + if (IS_ERR(skb)) { + dev_err_ratelimited(&pdev->dev, "Failed to build skb for RDQ\n"); + mlxsw_pci_rdq_pages_recycle(q, pages, num_sg_entries); goto out; } - mlxsw_pci_wqe_frag_unmap(mlxsw_pci, wqe, 0, DMA_FROM_DEVICE); + skb_mark_for_recycle(skb); if (mlxsw_pci_cqe_lag_get(cqe_v, cqe)) { rx_info.is_lag = true; @@ -657,10 +800,6 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, mlxsw_pci_skb_cb_ts_set(mlxsw_pci, skb, cqe_v, cqe); - byte_count = mlxsw_pci_cqe_byte_count_get(cqe); - if (mlxsw_pci_cqe_crc_get(cqe_v, cqe)) - byte_count -= ETH_FCS_LEN; - skb_put(skb, byte_count); mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info); out: @@ -785,7 +924,7 @@ static int mlxsw_pci_napi_poll_cq_tx(struct napi_struct *napi, int budget) mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq, - wqe_counter, q->u.cq.v, ncqe); + wqe_counter, q->u.cq.v, ncqe, budget); work_done++; } @@ -832,19 +971,51 @@ static void mlxsw_pci_cq_napi_setup(struct mlxsw_pci_queue *q, mlxsw_pci_napi_poll_cq_rx); break; } - - napi_enable(&q->u.cq.napi); } static void mlxsw_pci_cq_napi_teardown(struct mlxsw_pci_queue *q) { - napi_disable(&q->u.cq.napi); netif_napi_del(&q->u.cq.napi); } +static int mlxsw_pci_cq_page_pool_init(struct mlxsw_pci_queue *q, + enum mlxsw_pci_cq_type cq_type) +{ + struct page_pool_params pp_params = {}; + struct mlxsw_pci *mlxsw_pci = q->pci; + struct page_pool *page_pool; + + if (cq_type != MLXSW_PCI_CQ_RDQ) + return 0; + + pp_params.flags = PP_FLAG_DMA_MAP; + pp_params.pool_size = MLXSW_PCI_WQE_COUNT * mlxsw_pci->num_sg_entries; + pp_params.nid = dev_to_node(&mlxsw_pci->pdev->dev); + pp_params.dev = &mlxsw_pci->pdev->dev; + pp_params.napi = &q->u.cq.napi; + pp_params.dma_dir = DMA_FROM_DEVICE; + + page_pool = page_pool_create(&pp_params); + if (IS_ERR(page_pool)) + return PTR_ERR(page_pool); + + q->u.cq.page_pool = page_pool; + return 0; +} + +static void mlxsw_pci_cq_page_pool_fini(struct mlxsw_pci_queue *q, + enum mlxsw_pci_cq_type cq_type) +{ + if (cq_type != MLXSW_PCI_CQ_RDQ) + return; + + page_pool_destroy(q->u.cq.page_pool); +} + static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, struct mlxsw_pci_queue *q) { + enum mlxsw_pci_cq_type cq_type = mlxsw_pci_cq_type(mlxsw_pci, q); int i; int err; @@ -874,15 +1045,29 @@ static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num); if (err) return err; - mlxsw_pci_cq_napi_setup(q, mlxsw_pci_cq_type(mlxsw_pci, q)); + mlxsw_pci_cq_napi_setup(q, cq_type); + + err = mlxsw_pci_cq_page_pool_init(q, cq_type); + if (err) + goto err_page_pool_init; + + napi_enable(&q->u.cq.napi); mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); return 0; + +err_page_pool_init: + mlxsw_pci_cq_napi_teardown(q); + return err; } static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci, struct mlxsw_pci_queue *q) { + enum mlxsw_pci_cq_type cq_type = mlxsw_pci_cq_type(mlxsw_pci, q); + + napi_disable(&q->u.cq.napi); + mlxsw_pci_cq_page_pool_fini(q, cq_type); mlxsw_pci_cq_napi_teardown(q); mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num); } @@ -1599,6 +1784,7 @@ static int mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci *mlxsw_pci, { struct pci_dev *pdev = mlxsw_pci->pdev; char mrsr_pl[MLXSW_REG_MRSR_LEN]; + struct pci_dev *bridge; int err; if (!pci_reset_sbr_supported) { @@ -1615,6 +1801,9 @@ static int mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci *mlxsw_pci, sbr: device_lock_assert(&pdev->dev); + bridge = pci_upstream_bridge(pdev); + if (bridge) + pci_cfg_access_lock(bridge); pci_cfg_access_lock(pdev); pci_save_state(pdev); @@ -1624,6 +1813,8 @@ sbr: pci_restore_state(pdev); pci_cfg_access_unlock(pdev); + if (bridge) + pci_cfg_access_unlock(bridge); return err; } @@ -1703,6 +1894,17 @@ static void mlxsw_pci_free_irq_vectors(struct mlxsw_pci *mlxsw_pci) pci_free_irq_vectors(mlxsw_pci->pdev); } +static void mlxsw_pci_num_sg_entries_set(struct mlxsw_pci *mlxsw_pci) +{ + u8 num_sg_entries; + + num_sg_entries = mlxsw_pci_num_sg_entries_get(MLXSW_PORT_MAX_MTU); + mlxsw_pci->num_sg_entries = min(num_sg_entries, + MLXSW_PCI_WQE_SG_ENTRIES); + + WARN_ON(num_sg_entries > MLXSW_PCI_WQE_SG_ENTRIES); +} + static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, const struct mlxsw_config_profile *profile, struct mlxsw_res *res) @@ -1825,6 +2027,8 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, if (err) goto err_requery_resources; + mlxsw_pci_num_sg_entries_set(mlxsw_pci); + err = mlxsw_pci_napi_devs_init(mlxsw_pci); if (err) goto err_napi_devs_init; @@ -1931,7 +2135,7 @@ static int mlxsw_pci_skb_transmit(void *bus_priv, struct sk_buff *skb, goto unlock; } mlxsw_skb_cb(skb)->tx_info = *tx_info; - elem_info->u.sdq.skb = skb; + elem_info->sdq.skb = skb; wqe = elem_info->elem; mlxsw_pci_wqe_c_set(wqe, 1); /* always report completion */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h index ac4d4ea51597..0a73b1a4526e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/port.h +++ b/drivers/net/ethernet/mellanox/mlxsw/port.h @@ -6,7 +6,8 @@ #include -#define MLXSW_PORT_MAX_MTU 10000 +#define MLXSW_PORT_MAX_MTU (10 * 1024) +#define MLXSW_PORT_ETH_FRAME_HDR (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN) #define MLXSW_PORT_DEFAULT_VID 1 diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 030ed71f945d..f064789f3240 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -405,29 +405,12 @@ static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port) mlxsw_sp_port->dev->dev_addr); } -static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu) -{ - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - char pmtu_pl[MLXSW_REG_PMTU_LEN]; - int err; - - mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0); - err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); - if (err) - return err; - - *p_max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl); - return 0; -} - static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; char pmtu_pl[MLXSW_REG_PMTU_LEN]; - mtu += MLXSW_TXHDR_LEN + ETH_HLEN; - if (mtu > mlxsw_sp_port->max_mtu) - return -EINVAL; + mtu += MLXSW_PORT_ETH_FRAME_HDR; mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu); return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl); @@ -1697,8 +1680,8 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port, NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC; dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK; - dev->min_mtu = 0; - dev->max_mtu = ETH_MAX_MTU; + dev->min_mtu = ETH_MIN_MTU; + dev->max_mtu = MLXSW_PORT_MAX_MTU - MLXSW_PORT_ETH_FRAME_HDR; /* Each packet needs to have a Tx header (metadata) on top all other * headers. @@ -1727,13 +1710,6 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port, goto err_max_speed_get; } - err = mlxsw_sp_port_max_mtu_get(mlxsw_sp_port, &mlxsw_sp_port->max_mtu); - if (err) { - dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum MTU\n", - mlxsw_sp_port->local_port); - goto err_port_max_mtu_get; - } - err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n", @@ -1877,7 +1853,6 @@ err_port_ets_init: err_port_buffers_init: err_port_admin_status_set: err_port_mtu_set: -err_port_max_mtu_get: err_max_speed_get: err_port_speed_by_width_set: err_port_system_port_mapping_set: diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 3beb5d0847ab..8d3c61287696 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -238,7 +238,7 @@ struct mlxsw_sp_ptp_ops { struct hwtstamp_config *config); void (*shaper_work)(struct work_struct *work); int (*get_ts_info)(struct mlxsw_sp *mlxsw_sp, - struct ethtool_ts_info *info); + struct kernel_ethtool_ts_info *info); int (*get_stats_count)(void); void (*get_stats_strings)(u8 **p); void (*get_stats)(struct mlxsw_sp_port *mlxsw_sp_port, @@ -359,7 +359,6 @@ struct mlxsw_sp_port { u16 egr_types; struct mlxsw_sp_ptp_port_stats stats; } ptp; - int max_mtu; u32 max_speed; struct mlxsw_sp_hdroom *hdroom; u64 module_overheat_initial_val; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c index 4b713832fdd5..07cb1e26ca3e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c @@ -391,7 +391,8 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, if (err) return err; - lkey_id = aregion->ops->lkey_id_get(aregion, aentry->enc_key, erp_id); + lkey_id = aregion->ops->lkey_id_get(aregion, aentry->ht_key.enc_key, + erp_id); if (IS_ERR(lkey_id)) return PTR_ERR(lkey_id); aentry->lkey_id = lkey_id; @@ -399,7 +400,7 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, kvdl_index = mlxsw_afa_block_first_kvdl_index(rulei->act_block); mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_WRITE, priority, region->tcam_region_info, - aentry->enc_key, erp_id, + aentry->ht_key.enc_key, erp_id, aentry->delta_info.start, aentry->delta_info.mask, aentry->delta_info.value, @@ -428,7 +429,7 @@ mlxsw_sp_acl_atcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp, mlxsw_reg_ptce3_pack(ptce3_pl, false, MLXSW_REG_PTCE3_OP_WRITE_WRITE, 0, region->tcam_region_info, - aentry->enc_key, erp_id, + aentry->ht_key.enc_key, erp_id, aentry->delta_info.start, aentry->delta_info.mask, aentry->delta_info.value, @@ -457,7 +458,7 @@ mlxsw_sp_acl_atcam_region_entry_action_replace(struct mlxsw_sp *mlxsw_sp, kvdl_index = mlxsw_afa_block_first_kvdl_index(rulei->act_block); mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_UPDATE, priority, region->tcam_region_info, - aentry->enc_key, erp_id, + aentry->ht_key.enc_key, erp_id, aentry->delta_info.start, aentry->delta_info.mask, aentry->delta_info.value, @@ -480,26 +481,23 @@ __mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp, int err; mlxsw_afk_encode(afk, region->key_info, &rulei->values, - aentry->ht_key.full_enc_key, mask); + aentry->ht_key.enc_key, mask); erp_mask = mlxsw_sp_acl_erp_mask_get(aregion, mask, false); if (IS_ERR(erp_mask)) return PTR_ERR(erp_mask); aentry->erp_mask = erp_mask; aentry->ht_key.erp_id = mlxsw_sp_acl_erp_mask_erp_id(erp_mask); - memcpy(aentry->enc_key, aentry->ht_key.full_enc_key, - sizeof(aentry->enc_key)); /* Compute all needed delta information and clear the delta bits - * from the encrypted key. + * from the encoded key. */ delta = mlxsw_sp_acl_erp_delta(aentry->erp_mask); aentry->delta_info.start = mlxsw_sp_acl_erp_delta_start(delta); aentry->delta_info.mask = mlxsw_sp_acl_erp_delta_mask(delta); aentry->delta_info.value = - mlxsw_sp_acl_erp_delta_value(delta, - aentry->ht_key.full_enc_key); - mlxsw_sp_acl_erp_delta_clear(delta, aentry->enc_key); + mlxsw_sp_acl_erp_delta_value(delta, aentry->ht_key.enc_key); + mlxsw_sp_acl_erp_delta_clear(delta, aentry->ht_key.enc_key); /* Add rule to the list of A-TCAM rules, assuming this * rule is intended to A-TCAM. In case this rule does diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c index 95f63fcf4ba1..a54eedb69a3f 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c @@ -249,7 +249,7 @@ __mlxsw_sp_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion, memcpy(chunk + pad_bytes, &erp_region_id, sizeof(erp_region_id)); memcpy(chunk + key_offset, - &aentry->enc_key[chunk_key_offsets[chunk_index]], + &aentry->ht_key.enc_key[chunk_key_offsets[chunk_index]], chunk_key_len); chunk += chunk_len; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c index d231f4d2888b..9eee229303cc 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c @@ -1217,18 +1217,6 @@ static bool mlxsw_sp_acl_erp_delta_check(void *priv, const void *parent_obj, return err ? false : true; } -static int mlxsw_sp_acl_erp_hints_obj_cmp(const void *obj1, const void *obj2) -{ - const struct mlxsw_sp_acl_erp_key *key1 = obj1; - const struct mlxsw_sp_acl_erp_key *key2 = obj2; - - /* For hints purposes, two objects are considered equal - * in case the masks are the same. Does not matter what - * the "ctcam" value is. - */ - return memcmp(key1->mask, key2->mask, sizeof(key1->mask)); -} - static void *mlxsw_sp_acl_erp_delta_create(void *priv, void *parent_obj, void *obj) { @@ -1308,7 +1296,6 @@ static void mlxsw_sp_acl_erp_root_destroy(void *priv, void *root_priv) static const struct objagg_ops mlxsw_sp_acl_erp_objagg_ops = { .obj_size = sizeof(struct mlxsw_sp_acl_erp_key), .delta_check = mlxsw_sp_acl_erp_delta_check, - .hints_obj_cmp = mlxsw_sp_acl_erp_hints_obj_cmp, .delta_create = mlxsw_sp_acl_erp_delta_create, .delta_destroy = mlxsw_sp_acl_erp_delta_destroy, .root_create = mlxsw_sp_acl_erp_root_create, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h index 79a1d8606512..010204f73ea4 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h @@ -167,9 +167,9 @@ struct mlxsw_sp_acl_atcam_region { }; struct mlxsw_sp_acl_atcam_entry_ht_key { - char full_enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded - * key. - */ + char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key, minus + * delta bits. + */ u8 erp_id; }; @@ -181,9 +181,6 @@ struct mlxsw_sp_acl_atcam_entry { struct rhash_head ht_node; struct list_head list; /* Member in entries_list */ struct mlxsw_sp_acl_atcam_entry_ht_key ht_key; - char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key, - * minus delta bits. - */ struct { u16 start; u8 mask; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index ba090262e27e..2c0cfa79d138 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -399,11 +399,13 @@ void mlxsw_sp_hdroom_bufs_reset_sizes(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_hdroom *hdroom) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + unsigned int max_mtu = mlxsw_sp_port->dev->max_mtu; u16 reserve_cells; int i; + max_mtu += MLXSW_PORT_ETH_FRAME_HDR; /* Internal buffer. */ - reserve_cells = mlxsw_sp_hdroom_int_buf_size_get(mlxsw_sp, mlxsw_sp_port->max_mtu, + reserve_cells = mlxsw_sp_hdroom_int_buf_size_get(mlxsw_sp, max_mtu, mlxsw_sp_port->max_speed); reserve_cells = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, reserve_cells); hdroom->int_buf.reserve_cells = reserve_cells; @@ -613,7 +615,9 @@ static int mlxsw_sp_port_headroom_init(struct mlxsw_sp_port *mlxsw_sp_port) mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom); /* Buffer 9 is used for control traffic. */ - size9 = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, mlxsw_sp_port->max_mtu); + size9 = mlxsw_sp_port_headroom_8x_adjust(mlxsw_sp_port, + mlxsw_sp_port->dev->max_mtu + + MLXSW_PORT_ETH_FRAME_HDR); hdroom.bufs.buf[9].size_cells = mlxsw_sp_bytes_cells(mlxsw_sp, size9); return __mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom, true); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c index ca80af06465f..fa6eddd27ecf 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_dpipe.c @@ -283,7 +283,7 @@ static u64 mlxsw_sp_dpipe_table_erif_size_get(void *priv) return MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); } -static struct devlink_dpipe_table_ops mlxsw_sp_erif_ops = { +static const struct devlink_dpipe_table_ops mlxsw_sp_erif_ops = { .matches_dump = mlxsw_sp_dpipe_table_erif_matches_dump, .actions_dump = mlxsw_sp_dpipe_table_erif_actions_dump, .entries_dump = mlxsw_sp_dpipe_table_erif_entries_dump, @@ -734,7 +734,7 @@ static u64 mlxsw_sp_dpipe_table_host4_size_get(void *priv) return mlxsw_sp_dpipe_table_host_size_get(mlxsw_sp, AF_INET); } -static struct devlink_dpipe_table_ops mlxsw_sp_host4_ops = { +static const struct devlink_dpipe_table_ops mlxsw_sp_host4_ops = { .matches_dump = mlxsw_sp_dpipe_table_host4_matches_dump, .actions_dump = mlxsw_sp_dpipe_table_host_actions_dump, .entries_dump = mlxsw_sp_dpipe_table_host4_entries_dump, @@ -811,7 +811,7 @@ static u64 mlxsw_sp_dpipe_table_host6_size_get(void *priv) return mlxsw_sp_dpipe_table_host_size_get(mlxsw_sp, AF_INET6); } -static struct devlink_dpipe_table_ops mlxsw_sp_host6_ops = { +static const struct devlink_dpipe_table_ops mlxsw_sp_host6_ops = { .matches_dump = mlxsw_sp_dpipe_table_host6_matches_dump, .actions_dump = mlxsw_sp_dpipe_table_host_actions_dump, .entries_dump = mlxsw_sp_dpipe_table_host6_entries_dump, @@ -1230,7 +1230,7 @@ mlxsw_sp_dpipe_table_adj_size_get(void *priv) return size; } -static struct devlink_dpipe_table_ops mlxsw_sp_dpipe_table_adj_ops = { +static const struct devlink_dpipe_table_ops mlxsw_sp_dpipe_table_adj_ops = { .matches_dump = mlxsw_sp_dpipe_table_adj_matches_dump, .actions_dump = mlxsw_sp_dpipe_table_adj_actions_dump, .entries_dump = mlxsw_sp_dpipe_table_adj_entries_dump, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c index a755b0a901d3..2bed8c86b7cf 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c @@ -1068,7 +1068,21 @@ mlxsw_sp_get_module_eeprom_by_page(struct net_device *dev, } static int -mlxsw_sp_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info) +mlxsw_sp_set_module_eeprom_by_page(struct net_device *dev, + const struct ethtool_module_eeprom *page, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + u8 slot_index = mlxsw_sp_port->mapping.slot_index; + u8 module = mlxsw_sp_port->mapping.module; + + return mlxsw_env_set_module_eeprom_by_page(mlxsw_sp->core, slot_index, + module, page, extack); +} + +static int +mlxsw_sp_get_ts_info(struct net_device *netdev, struct kernel_ethtool_ts_info *info) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev); struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; @@ -1256,6 +1270,7 @@ const struct ethtool_ops mlxsw_sp_port_ethtool_ops = { .get_module_info = mlxsw_sp_get_module_info, .get_module_eeprom = mlxsw_sp_get_module_eeprom, .get_module_eeprom_by_page = mlxsw_sp_get_module_eeprom_by_page, + .set_module_eeprom_by_page = mlxsw_sp_set_module_eeprom_by_page, .get_ts_info = mlxsw_sp_get_ts_info, .get_eth_phy_stats = mlxsw_sp_get_eth_phy_stats, .get_eth_mac_stats = mlxsw_sp_get_eth_mac_stats, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c index cbb6c75a6620..5b174cb95eb8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c @@ -1276,7 +1276,7 @@ int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, } int mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { info->phc_index = ptp_clock_index(mlxsw_sp->clock->ptp); @@ -1661,7 +1661,7 @@ err_get_message_types: } int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { info->phc_index = ptp_clock_index(mlxsw_sp->clock->ptp); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h index a8b88230959a..769095d4932d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.h @@ -11,7 +11,7 @@ struct mlxsw_sp; struct mlxsw_sp_port; struct mlxsw_sp_ptp_clock; -static inline int mlxsw_sp_ptp_get_ts_info_noptp(struct ethtool_ts_info *info) +static inline int mlxsw_sp_ptp_get_ts_info_noptp(struct kernel_ethtool_ts_info *info) { info->so_timestamping = SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_SOFTWARE; @@ -50,7 +50,7 @@ int mlxsw_sp1_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, void mlxsw_sp1_ptp_shaper_work(struct work_struct *work); int mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp, - struct ethtool_ts_info *info); + struct kernel_ethtool_ts_info *info); int mlxsw_sp1_get_stats_count(void); void mlxsw_sp1_get_stats_strings(u8 **p); @@ -84,7 +84,7 @@ int mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, struct hwtstamp_config *config); int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp, - struct ethtool_ts_info *info); + struct kernel_ethtool_ts_info *info); int mlxsw_sp2_ptp_txhdr_construct(struct mlxsw_core *mlxsw_core, struct mlxsw_sp_port *mlxsw_sp_port, @@ -152,7 +152,7 @@ static inline void mlxsw_sp1_ptp_shaper_work(struct work_struct *work) } static inline int mlxsw_sp1_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { return mlxsw_sp_ptp_get_ts_info_noptp(info); } @@ -227,7 +227,7 @@ mlxsw_sp2_ptp_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port, } static inline int mlxsw_sp2_ptp_get_ts_info(struct mlxsw_sp *mlxsw_sp, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { return mlxsw_sp_ptp_get_ts_info_noptp(info); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 40ba314fbc72..800dfb64ec83 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -11450,12 +11450,16 @@ static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp) { bool old_inc_parsing_depth, new_inc_parsing_depth; struct mlxsw_sp_mp_hash_config config = {}; + struct net *net = mlxsw_sp_net(mlxsw_sp); char recr2_pl[MLXSW_REG_RECR2_LEN]; unsigned long bit; u32 seed; int err; - seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0); + seed = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_seed).user_seed; + if (!seed) + seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0); + mlxsw_reg_recr2_pack(recr2_pl, seed); mlxsw_sp_mp4_hash_init(mlxsw_sp, &config); mlxsw_sp_mp6_hash_init(mlxsw_sp, &config); diff --git a/drivers/net/ethernet/meta/Kconfig b/drivers/net/ethernet/meta/Kconfig new file mode 100644 index 000000000000..d8f5e9f9bb33 --- /dev/null +++ b/drivers/net/ethernet/meta/Kconfig @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Meta Platforms network device configuration +# + +config NET_VENDOR_META + bool "Meta Platforms devices" + default y + help + If you have a network (Ethernet) card designed by Meta, say Y. + That's Meta as in the parent company of Facebook. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about Meta cards. If you say Y, you will be asked for + your specific card in the following questions. + +if NET_VENDOR_META + +config FBNIC + tristate "Meta Platforms Host Network Interface" + depends on X86_64 || COMPILE_TEST + depends on PCI_MSI + select PHYLINK + help + This driver supports Meta Platforms Host Network Interface. + + To compile this driver as a module, choose M here. The module + will be called fbnic. MSI-X interrupt support is required. + +endif # NET_VENDOR_META diff --git a/drivers/net/ethernet/meta/Makefile b/drivers/net/ethernet/meta/Makefile new file mode 100644 index 000000000000..88804f3de963 --- /dev/null +++ b/drivers/net/ethernet/meta/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Meta Platforms network device drivers. +# + +obj-$(CONFIG_FBNIC) += fbnic/ diff --git a/drivers/net/ethernet/meta/fbnic/Makefile b/drivers/net/ethernet/meta/fbnic/Makefile new file mode 100644 index 000000000000..9373b558fdc9 --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/Makefile @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0 +# Copyright (c) Meta Platforms, Inc. and affiliates. + +# +# Makefile for the Meta(R) Host Network Interface +# + +obj-$(CONFIG_FBNIC) += fbnic.o + +fbnic-y := fbnic_devlink.o \ + fbnic_fw.o \ + fbnic_irq.o \ + fbnic_mac.o \ + fbnic_netdev.o \ + fbnic_pci.o \ + fbnic_phylink.o \ + fbnic_rpc.o \ + fbnic_tlv.o \ + fbnic_txrx.o diff --git a/drivers/net/ethernet/meta/fbnic/fbnic.h b/drivers/net/ethernet/meta/fbnic/fbnic.h new file mode 100644 index 000000000000..ad2689bfd6cb --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic.h @@ -0,0 +1,144 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#ifndef _FBNIC_H_ +#define _FBNIC_H_ + +#include +#include +#include +#include + +#include "fbnic_csr.h" +#include "fbnic_fw.h" +#include "fbnic_mac.h" +#include "fbnic_rpc.h" + +struct fbnic_dev { + struct device *dev; + struct net_device *netdev; + + u32 __iomem *uc_addr0; + u32 __iomem *uc_addr4; + const struct fbnic_mac *mac; + unsigned int fw_msix_vector; + unsigned int pcs_msix_vector; + unsigned short num_irqs; + + struct delayed_work service_task; + + struct fbnic_fw_mbx mbx[FBNIC_IPC_MBX_INDICES]; + struct fbnic_fw_cap fw_cap; + /* Lock protecting Tx Mailbox queue to prevent possible races */ + spinlock_t fw_tx_lock; + + unsigned long last_heartbeat_request; + unsigned long last_heartbeat_response; + u8 fw_heartbeat_enabled; + + u64 dsn; + u32 mps; + u32 readrq; + + /* Local copy of the devices TCAM */ + struct fbnic_act_tcam act_tcam[FBNIC_RPC_TCAM_ACT_NUM_ENTRIES]; + struct fbnic_mac_addr mac_addr[FBNIC_RPC_TCAM_MACDA_NUM_ENTRIES]; + u8 mac_addr_boundary; + + /* Number of TCQs/RCQs available on hardware */ + u16 max_num_queues; +}; + +/* Reserve entry 0 in the MSI-X "others" array until we have filled all + * 32 of the possible interrupt slots. By doing this we can avoid any + * potential conflicts should we need to enable one of the debug interrupt + * causes later. + */ +enum { + FBNIC_FW_MSIX_ENTRY, + FBNIC_PCS_MSIX_ENTRY, + FBNIC_NON_NAPI_VECTORS +}; + +static inline bool fbnic_present(struct fbnic_dev *fbd) +{ + return !!READ_ONCE(fbd->uc_addr0); +} + +static inline void fbnic_wr32(struct fbnic_dev *fbd, u32 reg, u32 val) +{ + u32 __iomem *csr = READ_ONCE(fbd->uc_addr0); + + if (csr) + writel(val, csr + reg); +} + +u32 fbnic_rd32(struct fbnic_dev *fbd, u32 reg); + +static inline void fbnic_wrfl(struct fbnic_dev *fbd) +{ + fbnic_rd32(fbd, FBNIC_MASTER_SPARE_0); +} + +static inline void +fbnic_rmw32(struct fbnic_dev *fbd, u32 reg, u32 mask, u32 val) +{ + u32 v; + + v = fbnic_rd32(fbd, reg); + v &= ~mask; + v |= val; + fbnic_wr32(fbd, reg, v); +} + +#define wr32(_f, _r, _v) fbnic_wr32(_f, _r, _v) +#define rd32(_f, _r) fbnic_rd32(_f, _r) +#define wrfl(_f) fbnic_wrfl(_f) + +bool fbnic_fw_present(struct fbnic_dev *fbd); +u32 fbnic_fw_rd32(struct fbnic_dev *fbd, u32 reg); +void fbnic_fw_wr32(struct fbnic_dev *fbd, u32 reg, u32 val); + +#define fw_rd32(_f, _r) fbnic_fw_rd32(_f, _r) +#define fw_wr32(_f, _r, _v) fbnic_fw_wr32(_f, _r, _v) +#define fw_wrfl(_f) fbnic_fw_rd32(_f, FBNIC_FW_ZERO_REG) + +static inline bool fbnic_bmc_present(struct fbnic_dev *fbd) +{ + return fbd->fw_cap.bmc_present; +} + +static inline bool fbnic_init_failure(struct fbnic_dev *fbd) +{ + return !fbd->netdev; +} + +extern char fbnic_driver_name[]; + +void fbnic_devlink_free(struct fbnic_dev *fbd); +struct fbnic_dev *fbnic_devlink_alloc(struct pci_dev *pdev); +void fbnic_devlink_register(struct fbnic_dev *fbd); +void fbnic_devlink_unregister(struct fbnic_dev *fbd); + +int fbnic_fw_enable_mbx(struct fbnic_dev *fbd); +void fbnic_fw_disable_mbx(struct fbnic_dev *fbd); + +int fbnic_pcs_irq_enable(struct fbnic_dev *fbd); +void fbnic_pcs_irq_disable(struct fbnic_dev *fbd); + +int fbnic_request_irq(struct fbnic_dev *dev, int nr, irq_handler_t handler, + unsigned long flags, const char *name, void *data); +void fbnic_free_irq(struct fbnic_dev *dev, int nr, void *data); +void fbnic_free_irqs(struct fbnic_dev *fbd); +int fbnic_alloc_irqs(struct fbnic_dev *fbd); + +enum fbnic_boards { + fbnic_board_asic +}; + +struct fbnic_info { + unsigned int max_num_queues; + unsigned int bar_mask; +}; + +#endif /* _FBNIC_H_ */ diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_csr.h b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h new file mode 100644 index 000000000000..a64360de0552 --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_csr.h @@ -0,0 +1,838 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#ifndef _FBNIC_CSR_H_ +#define _FBNIC_CSR_H_ + +#include + +#define CSR_BIT(nr) (1u << (nr)) +#define CSR_GENMASK(h, l) GENMASK(h, l) + +#define DESC_BIT(nr) BIT_ULL(nr) +#define DESC_GENMASK(h, l) GENMASK_ULL(h, l) + +/* Defines the minimum firmware version required by the driver */ +#define MIN_FW_MAJOR_VERSION 0 +#define MIN_FW_MINOR_VERSION 10 +#define MIN_FW_BUILD_VERSION 6 +#define MIN_FW_VERSION_CODE (MIN_FW_MAJOR_VERSION * (1u << 24) + \ + MIN_FW_MINOR_VERSION * (1u << 16) + \ + MIN_FW_BUILD_VERSION) + +#define PCI_DEVICE_ID_META_FBNIC_ASIC 0x0013 + +#define FBNIC_CLOCK_FREQ (600 * (1000 * 1000)) + +/* Transmit Work Descriptor Format */ +/* Length, Type, Offset Masks and Shifts */ +#define FBNIC_TWD_L2_HLEN_MASK DESC_GENMASK(5, 0) + +#define FBNIC_TWD_L3_TYPE_MASK DESC_GENMASK(7, 6) +enum { + FBNIC_TWD_L3_TYPE_OTHER = 0, + FBNIC_TWD_L3_TYPE_IPV4 = 1, + FBNIC_TWD_L3_TYPE_IPV6 = 2, + FBNIC_TWD_L3_TYPE_V6V6 = 3, +}; + +#define FBNIC_TWD_L3_OHLEN_MASK DESC_GENMASK(15, 8) +#define FBNIC_TWD_L3_IHLEN_MASK DESC_GENMASK(23, 16) + +enum { + FBNIC_TWD_L4_TYPE_OTHER = 0, + FBNIC_TWD_L4_TYPE_TCP = 1, + FBNIC_TWD_L4_TYPE_UDP = 2, +}; + +#define FBNIC_TWD_CSUM_OFFSET_MASK DESC_GENMASK(27, 24) +#define FBNIC_TWD_L4_HLEN_MASK DESC_GENMASK(31, 28) + +/* Flags and Type */ +#define FBNIC_TWD_L4_TYPE_MASK DESC_GENMASK(33, 32) +#define FBNIC_TWD_FLAG_REQ_TS DESC_BIT(34) +#define FBNIC_TWD_FLAG_REQ_LSO DESC_BIT(35) +#define FBNIC_TWD_FLAG_REQ_CSO DESC_BIT(36) +#define FBNIC_TWD_FLAG_REQ_COMPLETION DESC_BIT(37) +#define FBNIC_TWD_FLAG_DEST_MAC DESC_BIT(43) +#define FBNIC_TWD_FLAG_DEST_BMC DESC_BIT(44) +#define FBNIC_TWD_FLAG_DEST_FW DESC_BIT(45) +#define FBNIC_TWD_TYPE_MASK DESC_GENMASK(47, 46) +enum { + FBNIC_TWD_TYPE_META = 0, + FBNIC_TWD_TYPE_OPT_META = 1, + FBNIC_TWD_TYPE_AL = 2, + FBNIC_TWD_TYPE_LAST_AL = 3, +}; + +/* MSS and Completion Req */ +#define FBNIC_TWD_MSS_MASK DESC_GENMASK(61, 48) + +#define FBNIC_TWD_TS_MASK DESC_GENMASK(39, 0) +#define FBNIC_TWD_ADDR_MASK DESC_GENMASK(45, 0) +#define FBNIC_TWD_LEN_MASK DESC_GENMASK(63, 48) + +/* Tx Completion Descriptor Format */ +#define FBNIC_TCD_TYPE0_HEAD0_MASK DESC_GENMASK(15, 0) +#define FBNIC_TCD_TYPE0_HEAD1_MASK DESC_GENMASK(31, 16) + +#define FBNIC_TCD_TYPE1_TS_MASK DESC_GENMASK(39, 0) + +#define FBNIC_TCD_STATUS_MASK DESC_GENMASK(59, 48) +#define FBNIC_TCD_STATUS_TS_INVALID DESC_BIT(48) +#define FBNIC_TCD_STATUS_ILLEGAL_TS_REQ DESC_BIT(49) +#define FBNIC_TCD_TWQ1 DESC_BIT(60) +#define FBNIC_TCD_TYPE_MASK DESC_GENMASK(62, 61) +enum { + FBNIC_TCD_TYPE_0 = 0, + FBNIC_TCD_TYPE_1 = 1, +}; + +#define FBNIC_TCD_DONE DESC_BIT(63) + +/* Rx Buffer Descriptor Format + * + * The layout of this can vary depending on the page size of the system. + * + * If the page size is 4K then the layout will simply consist of ID for + * the 16 most significant bits, and the lower 46 are essentially the page + * address with the lowest 12 bits being reserved 0 due to the fact that + * a page will be aligned. + * + * If the page size is larger than 4K then the lower n bits of the ID and + * page address will be reserved for the fragment ID. This fragment will + * be 4K in size and will be used to index both the DMA address and the ID + * by the same amount. + */ +#define FBNIC_BD_DESC_ADDR_MASK DESC_GENMASK(45, 12) +#define FBNIC_BD_DESC_ID_MASK DESC_GENMASK(63, 48) +#define FBNIC_BD_FRAG_SIZE \ + (FBNIC_BD_DESC_ADDR_MASK & ~(FBNIC_BD_DESC_ADDR_MASK - 1)) +#define FBNIC_BD_FRAG_COUNT \ + (PAGE_SIZE / FBNIC_BD_FRAG_SIZE) +#define FBNIC_BD_FRAG_ADDR_MASK \ + (FBNIC_BD_DESC_ADDR_MASK & \ + ~(FBNIC_BD_DESC_ADDR_MASK * FBNIC_BD_FRAG_COUNT)) +#define FBNIC_BD_FRAG_ID_MASK \ + (FBNIC_BD_DESC_ID_MASK & \ + ~(FBNIC_BD_DESC_ID_MASK * FBNIC_BD_FRAG_COUNT)) +#define FBNIC_BD_PAGE_ADDR_MASK \ + (FBNIC_BD_DESC_ADDR_MASK & ~FBNIC_BD_FRAG_ADDR_MASK) +#define FBNIC_BD_PAGE_ID_MASK \ + (FBNIC_BD_DESC_ID_MASK & ~FBNIC_BD_FRAG_ID_MASK) + +/* Rx Completion Queue Descriptors */ +#define FBNIC_RCD_TYPE_MASK DESC_GENMASK(62, 61) +enum { + FBNIC_RCD_TYPE_HDR_AL = 0, + FBNIC_RCD_TYPE_PAY_AL = 1, + FBNIC_RCD_TYPE_OPT_META = 2, + FBNIC_RCD_TYPE_META = 3, +}; + +#define FBNIC_RCD_DONE DESC_BIT(63) + +/* Address/Length Completion Descriptors */ +#define FBNIC_RCD_AL_BUFF_ID_MASK DESC_GENMASK(15, 0) +#define FBNIC_RCD_AL_BUFF_FRAG_MASK (FBNIC_BD_FRAG_COUNT - 1) +#define FBNIC_RCD_AL_BUFF_PAGE_MASK \ + (FBNIC_RCD_AL_BUFF_ID_MASK & ~FBNIC_RCD_AL_BUFF_FRAG_MASK) +#define FBNIC_RCD_AL_BUFF_LEN_MASK DESC_GENMASK(28, 16) +#define FBNIC_RCD_AL_BUFF_OFF_MASK DESC_GENMASK(43, 32) +#define FBNIC_RCD_AL_PAGE_FIN DESC_BIT(60) + +/* Header AL specific values */ +#define FBNIC_RCD_HDR_AL_OVERFLOW DESC_BIT(53) +#define FBNIC_RCD_HDR_AL_DMA_HINT_MASK DESC_GENMASK(59, 54) +enum { + FBNIC_RCD_HDR_AL_DMA_HINT_NONE = 0, + FBNIC_RCD_HDR_AL_DMA_HINT_L2 = 1, + FBNIC_RCD_HDR_AL_DMA_HINT_L3 = 2, + FBNIC_RCD_HDR_AL_DMA_HINT_L4 = 4, +}; + +/* Optional Metadata Completion Descriptors */ +#define FBNIC_RCD_OPT_META_TS_MASK DESC_GENMASK(39, 0) +#define FBNIC_RCD_OPT_META_ACTION_MASK DESC_GENMASK(45, 40) +#define FBNIC_RCD_OPT_META_ACTION DESC_BIT(57) +#define FBNIC_RCD_OPT_META_TS DESC_BIT(58) +#define FBNIC_RCD_OPT_META_TYPE_MASK DESC_GENMASK(60, 59) + +/* Metadata Completion Descriptors */ +#define FBNIC_RCD_META_RSS_HASH_MASK DESC_GENMASK(31, 0) +#define FBNIC_RCD_META_L2_CSUM_MASK DESC_GENMASK(47, 32) +#define FBNIC_RCD_META_L3_TYPE_MASK DESC_GENMASK(49, 48) +enum { + FBNIC_RCD_META_L3_TYPE_OTHER = 0, + FBNIC_RCD_META_L3_TYPE_IPV4 = 1, + FBNIC_RCD_META_L3_TYPE_IPV6 = 2, + FBNIC_RCD_META_L3_TYPE_V6V6 = 3, +}; + +#define FBNIC_RCD_META_L4_TYPE_MASK DESC_GENMASK(51, 50) +enum { + FBNIC_RCD_META_L4_TYPE_OTHER = 0, + FBNIC_RCD_META_L4_TYPE_TCP = 1, + FBNIC_RCD_META_L4_TYPE_UDP = 2, +}; + +#define FBNIC_RCD_META_L4_CSUM_UNNECESSARY DESC_BIT(52) +#define FBNIC_RCD_META_ERR_MAC_EOP DESC_BIT(53) +#define FBNIC_RCD_META_ERR_TRUNCATED_FRAME DESC_BIT(54) +#define FBNIC_RCD_META_ERR_PARSER DESC_BIT(55) +#define FBNIC_RCD_META_UNCORRECTABLE_ERR_MASK \ + (FBNIC_RCD_META_ERR_MAC_EOP | FBNIC_RCD_META_ERR_TRUNCATED_FRAME) +#define FBNIC_RCD_META_ECN DESC_BIT(60) + +/* Register Definitions + * + * The registers are laid as indexes into an le32 array. As such the actual + * address is 4 times the index value. Below each register is defined as 3 + * fields, name, index, and Address. + * + * Name Index Address + *************************************************************************/ +/* Interrupt Registers */ +#define FBNIC_CSR_START_INTR 0x00000 /* CSR section delimiter */ +#define FBNIC_INTR_STATUS(n) (0x00000 + (n)) /* 0x00000 + 4*n */ +#define FBNIC_INTR_STATUS_CNT 8 +#define FBNIC_INTR_MASK(n) (0x00008 + (n)) /* 0x00020 + 4*n */ +#define FBNIC_INTR_MASK_CNT 8 +#define FBNIC_INTR_SET(n) (0x00010 + (n)) /* 0x00040 + 4*n */ +#define FBNIC_INTR_SET_CNT 8 +#define FBNIC_INTR_CLEAR(n) (0x00018 + (n)) /* 0x00060 + 4*n */ +#define FBNIC_INTR_CLEAR_CNT 8 +#define FBNIC_INTR_SW_STATUS(n) (0x00020 + (n)) /* 0x00080 + 4*n */ +#define FBNIC_INTR_SW_STATUS_CNT 8 +#define FBNIC_INTR_SW_AC_MODE(n) (0x00028 + (n)) /* 0x000a0 + 4*n */ +#define FBNIC_INTR_SW_AC_MODE_CNT 8 +#define FBNIC_INTR_MASK_SET(n) (0x00030 + (n)) /* 0x000c0 + 4*n */ +#define FBNIC_INTR_MASK_SET_CNT 8 +#define FBNIC_INTR_MASK_CLEAR(n) (0x00038 + (n)) /* 0x000e0 + 4*n */ +#define FBNIC_INTR_MASK_CLEAR_CNT 8 +#define FBNIC_MAX_MSIX_VECS 256U +#define FBNIC_INTR_MSIX_CTRL(n) (0x00040 + (n)) /* 0x00100 + 4*n */ +#define FBNIC_INTR_MSIX_CTRL_VECTOR_MASK CSR_GENMASK(7, 0) +#define FBNIC_INTR_MSIX_CTRL_ENABLE CSR_BIT(31) +enum { + FBNIC_INTR_MSIX_CTRL_PCS_IDX = 34, +}; + +#define FBNIC_CSR_END_INTR 0x0005f /* CSR section delimiter */ + +/* Interrupt MSIX Registers */ +#define FBNIC_CSR_START_INTR_CQ 0x00400 /* CSR section delimiter */ +#define FBNIC_INTR_CQ_REARM(n) \ + (0x00400 + 4 * (n)) /* 0x01000 + 16*n */ +#define FBNIC_INTR_CQ_REARM_CNT 256 +#define FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT CSR_GENMASK(13, 0) +#define FBNIC_INTR_CQ_REARM_RCQ_TIMEOUT_UPD_EN CSR_BIT(14) +#define FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT CSR_GENMASK(28, 15) +#define FBNIC_INTR_CQ_REARM_TCQ_TIMEOUT_UPD_EN CSR_BIT(29) +#define FBNIC_INTR_CQ_REARM_INTR_RELOAD CSR_BIT(30) +#define FBNIC_INTR_CQ_REARM_INTR_UNMASK CSR_BIT(31) + +#define FBNIC_INTR_RCQ_TIMEOUT(n) \ + (0x00401 + 4 * (n)) /* 0x01004 + 16*n */ +#define FBNIC_INTR_RCQ_TIMEOUT_CNT 256 +#define FBNIC_INTR_TCQ_TIMEOUT(n) \ + (0x00402 + 4 * (n)) /* 0x01008 + 16*n */ +#define FBNIC_INTR_TCQ_TIMEOUT_CNT 256 +#define FBNIC_CSR_END_INTR_CQ 0x007fe /* CSR section delimiter */ + +/* Global QM Tx registers */ +#define FBNIC_CSR_START_QM_TX 0x00800 /* CSR section delimiter */ +#define FBNIC_QM_TWQ_IDLE(n) (0x00800 + (n)) /* 0x02000 + 4*n */ +#define FBNIC_QM_TWQ_IDLE_CNT 8 +#define FBNIC_QM_TWQ_DEFAULT_META_L 0x00818 /* 0x02060 */ +#define FBNIC_QM_TWQ_DEFAULT_META_H 0x00819 /* 0x02064 */ + +#define FBNIC_QM_TQS_CTL0 0x0081b /* 0x0206c */ +#define FBNIC_QM_TQS_CTL0_LSO_TS_MASK CSR_BIT(0) +enum { + FBNIC_QM_TQS_CTL0_LSO_TS_FIRST = 0, + FBNIC_QM_TQS_CTL0_LSO_TS_LAST = 1, +}; + +#define FBNIC_QM_TQS_CTL0_PREFETCH_THRESH CSR_GENMASK(7, 1) +enum { + FBNIC_QM_TQS_CTL0_PREFETCH_THRESH_MIN = 16, +}; + +#define FBNIC_QM_TQS_CTL1 0x0081c /* 0x02070 */ +#define FBNIC_QM_TQS_CTL1_MC_MAX_CREDITS CSR_GENMASK(7, 0) +#define FBNIC_QM_TQS_CTL1_BULK_MAX_CREDITS CSR_GENMASK(15, 8) +#define FBNIC_QM_TQS_MTU_CTL0 0x0081d /* 0x02074 */ +#define FBNIC_QM_TQS_MTU_CTL1 0x0081e /* 0x02078 */ +#define FBNIC_QM_TQS_MTU_CTL1_BULK CSR_GENMASK(13, 0) +#define FBNIC_QM_TCQ_IDLE(n) (0x00821 + (n)) /* 0x02084 + 4*n */ +#define FBNIC_QM_TCQ_IDLE_CNT 4 +#define FBNIC_QM_TCQ_CTL0 0x0082d /* 0x020b4 */ +#define FBNIC_QM_TCQ_CTL0_COAL_WAIT CSR_GENMASK(15, 0) +#define FBNIC_QM_TCQ_CTL0_TICK_CYCLES CSR_GENMASK(26, 16) +#define FBNIC_QM_TQS_IDLE(n) (0x00830 + (n)) /* 0x020c0 + 4*n */ +#define FBNIC_QM_TQS_IDLE_CNT 8 +#define FBNIC_QM_TQS_EDT_TS_RANGE 0x00849 /* 0x2124 */ +#define FBNIC_QM_TDE_IDLE(n) (0x00853 + (n)) /* 0x0214c + 4*n */ +#define FBNIC_QM_TDE_IDLE_CNT 8 +#define FBNIC_QM_TNI_TDF_CTL 0x0086c /* 0x021b0 */ +#define FBNIC_QM_TNI_TDF_CTL_MRRS CSR_GENMASK(1, 0) +#define FBNIC_QM_TNI_TDF_CTL_CLS CSR_GENMASK(3, 2) +#define FBNIC_QM_TNI_TDF_CTL_MAX_OT CSR_GENMASK(11, 4) +#define FBNIC_QM_TNI_TDF_CTL_MAX_OB CSR_GENMASK(23, 12) +#define FBNIC_QM_TNI_TDE_CTL 0x0086d /* 0x021b4 */ +#define FBNIC_QM_TNI_TDE_CTL_MRRS CSR_GENMASK(1, 0) +#define FBNIC_QM_TNI_TDE_CTL_CLS CSR_GENMASK(3, 2) +#define FBNIC_QM_TNI_TDE_CTL_MAX_OT CSR_GENMASK(11, 4) +#define FBNIC_QM_TNI_TDE_CTL_MAX_OB CSR_GENMASK(24, 12) +#define FBNIC_QM_TNI_TDE_CTL_MRRS_1K CSR_BIT(25) +#define FBNIC_QM_TNI_TCM_CTL 0x0086e /* 0x021b8 */ +#define FBNIC_QM_TNI_TCM_CTL_MPS CSR_GENMASK(1, 0) +#define FBNIC_QM_TNI_TCM_CTL_CLS CSR_GENMASK(3, 2) +#define FBNIC_QM_TNI_TCM_CTL_MAX_OT CSR_GENMASK(11, 4) +#define FBNIC_QM_TNI_TCM_CTL_MAX_OB CSR_GENMASK(23, 12) +#define FBNIC_CSR_END_QM_TX 0x00873 /* CSR section delimiter */ + +/* Global QM Rx registers */ +#define FBNIC_CSR_START_QM_RX 0x00c00 /* CSR section delimiter */ +#define FBNIC_QM_RCQ_IDLE(n) (0x00c00 + (n)) /* 0x03000 + 4*n */ +#define FBNIC_QM_RCQ_IDLE_CNT 4 +#define FBNIC_QM_RCQ_CTL0 0x00c0c /* 0x03030 */ +#define FBNIC_QM_RCQ_CTL0_COAL_WAIT CSR_GENMASK(15, 0) +#define FBNIC_QM_RCQ_CTL0_TICK_CYCLES CSR_GENMASK(26, 16) +#define FBNIC_QM_HPQ_IDLE(n) (0x00c0f + (n)) /* 0x0303c + 4*n */ +#define FBNIC_QM_HPQ_IDLE_CNT 4 +#define FBNIC_QM_PPQ_IDLE(n) (0x00c13 + (n)) /* 0x0304c + 4*n */ +#define FBNIC_QM_PPQ_IDLE_CNT 4 +#define FBNIC_QM_RNI_RBP_CTL 0x00c2d /* 0x030b4 */ +#define FBNIC_QM_RNI_RBP_CTL_MRRS CSR_GENMASK(1, 0) +#define FBNIC_QM_RNI_RBP_CTL_CLS CSR_GENMASK(3, 2) +#define FBNIC_QM_RNI_RBP_CTL_MAX_OT CSR_GENMASK(11, 4) +#define FBNIC_QM_RNI_RBP_CTL_MAX_OB CSR_GENMASK(23, 12) +#define FBNIC_QM_RNI_RDE_CTL 0x00c2e /* 0x030b8 */ +#define FBNIC_QM_RNI_RDE_CTL_MPS CSR_GENMASK(1, 0) +#define FBNIC_QM_RNI_RDE_CTL_CLS CSR_GENMASK(3, 2) +#define FBNIC_QM_RNI_RDE_CTL_MAX_OT CSR_GENMASK(11, 4) +#define FBNIC_QM_RNI_RDE_CTL_MAX_OB CSR_GENMASK(23, 12) +#define FBNIC_QM_RNI_RCM_CTL 0x00c2f /* 0x030bc */ +#define FBNIC_QM_RNI_RCM_CTL_MPS CSR_GENMASK(1, 0) +#define FBNIC_QM_RNI_RCM_CTL_CLS CSR_GENMASK(3, 2) +#define FBNIC_QM_RNI_RCM_CTL_MAX_OT CSR_GENMASK(11, 4) +#define FBNIC_QM_RNI_RCM_CTL_MAX_OB CSR_GENMASK(23, 12) +#define FBNIC_CSR_END_QM_RX 0x00c34 /* CSR section delimiter */ + +/* TCE registers */ +#define FBNIC_CSR_START_TCE 0x04000 /* CSR section delimiter */ +#define FBNIC_TCE_REG_BASE 0x04000 /* 0x10000 */ + +#define FBNIC_TCE_LSO_CTRL 0x04000 /* 0x10000 */ +#define FBNIC_TCE_LSO_CTRL_TCPF_CLR_1ST CSR_GENMASK(8, 0) +#define FBNIC_TCE_LSO_CTRL_TCPF_CLR_MID CSR_GENMASK(17, 9) +#define FBNIC_TCE_LSO_CTRL_TCPF_CLR_END CSR_GENMASK(26, 18) +#define FBNIC_TCE_LSO_CTRL_IPID_MODE_INC CSR_BIT(27) + +#define FBNIC_TCE_CSO_CTRL 0x04001 /* 0x10004 */ +#define FBNIC_TCE_CSO_CTRL_TCP_ZERO_CSUM CSR_BIT(0) + +#define FBNIC_TCE_TXB_CTRL 0x04002 /* 0x10008 */ +#define FBNIC_TCE_TXB_CTRL_LOAD CSR_BIT(0) +#define FBNIC_TCE_TXB_CTRL_TCAM_ENABLE CSR_BIT(1) +#define FBNIC_TCE_TXB_CTRL_DISABLE CSR_BIT(2) + +#define FBNIC_TCE_TXB_ENQ_WRR_CTRL 0x04003 /* 0x1000c */ +#define FBNIC_TCE_TXB_ENQ_WRR_CTRL_WEIGHT0 CSR_GENMASK(7, 0) +#define FBNIC_TCE_TXB_ENQ_WRR_CTRL_WEIGHT1 CSR_GENMASK(15, 8) +#define FBNIC_TCE_TXB_ENQ_WRR_CTRL_WEIGHT2 CSR_GENMASK(23, 16) + +#define FBNIC_TCE_TXB_TEI_Q0_CTRL 0x04004 /* 0x10010 */ +#define FBNIC_TCE_TXB_TEI_Q1_CTRL 0x04005 /* 0x10014 */ +#define FBNIC_TCE_TXB_MC_Q_CTRL 0x04006 /* 0x10018 */ +#define FBNIC_TCE_TXB_RX_TEI_Q_CTRL 0x04007 /* 0x1001c */ +#define FBNIC_TCE_TXB_RX_BMC_Q_CTRL 0x04008 /* 0x10020 */ +#define FBNIC_TCE_TXB_Q_CTRL_START CSR_GENMASK(10, 0) +#define FBNIC_TCE_TXB_Q_CTRL_SIZE CSR_GENMASK(22, 11) + +#define FBNIC_TCE_TXB_TEI_DWRR_CTRL 0x04009 /* 0x10024 */ +#define FBNIC_TCE_TXB_TEI_DWRR_CTRL_QUANTUM0 CSR_GENMASK(7, 0) +#define FBNIC_TCE_TXB_TEI_DWRR_CTRL_QUANTUM1 CSR_GENMASK(15, 8) +#define FBNIC_TCE_TXB_NTWRK_DWRR_CTRL 0x0400a /* 0x10028 */ +#define FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM0 CSR_GENMASK(7, 0) +#define FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM1 CSR_GENMASK(15, 8) +#define FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM2 CSR_GENMASK(23, 16) + +#define FBNIC_TCE_TXB_CLDR_CFG 0x0400b /* 0x1002c */ +#define FBNIC_TCE_TXB_CLDR_CFG_NUM_SLOT CSR_GENMASK(5, 0) +#define FBNIC_TCE_TXB_CLDR_SLOT_CFG(n) (0x0400c + (n)) /* 0x10030 + 4*n */ +#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_CNT 16 +#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_0_0 CSR_GENMASK(1, 0) +#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_0_1 CSR_GENMASK(3, 2) +#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_0_2 CSR_GENMASK(5, 4) +#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_0_3 CSR_GENMASK(7, 6) +#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_1_0 CSR_GENMASK(9, 8) +#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_1_1 CSR_GENMASK(11, 10) +#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_1_2 CSR_GENMASK(13, 12) +#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_1_3 CSR_GENMASK(15, 14) +#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_2_0 CSR_GENMASK(17, 16) +#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_2_1 CSR_GENMASK(19, 18) +#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_2_2 CSR_GENMASK(21, 20) +#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_2_3 CSR_GENMASK(23, 22) +#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_3_0 CSR_GENMASK(25, 24) +#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_3_1 CSR_GENMASK(27, 26) +#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_3_2 CSR_GENMASK(29, 28) +#define FBNIC_TCE_TXB_CLDR_SLOT_CFG_DEST_ID_3_3 CSR_GENMASK(31, 30) + +#define FBNIC_TCE_BMC_MAX_PKTSZ 0x0403a /* 0x100e8 */ +#define FBNIC_TCE_BMC_MAX_PKTSZ_TX CSR_GENMASK(13, 0) +#define FBNIC_TCE_BMC_MAX_PKTSZ_RX CSR_GENMASK(27, 14) +#define FBNIC_TCE_MC_MAX_PKTSZ 0x0403b /* 0x100ec */ +#define FBNIC_TCE_MC_MAX_PKTSZ_TMI CSR_GENMASK(13, 0) + +#define FBNIC_TCE_SOP_PROT_CTRL 0x0403c /* 0x100f0 */ +#define FBNIC_TCE_SOP_PROT_CTRL_TBI CSR_GENMASK(7, 0) +#define FBNIC_TCE_SOP_PROT_CTRL_TTI_FRM CSR_GENMASK(14, 8) +#define FBNIC_TCE_SOP_PROT_CTRL_TTI_CM CSR_GENMASK(18, 15) + +#define FBNIC_TCE_DROP_CTRL 0x0403d /* 0x100f4 */ +#define FBNIC_TCE_DROP_CTRL_TTI_CM_DROP_EN CSR_BIT(0) +#define FBNIC_TCE_DROP_CTRL_TTI_FRM_DROP_EN CSR_BIT(1) +#define FBNIC_TCE_DROP_CTRL_TTI_TBI_DROP_EN CSR_BIT(2) + +#define FBNIC_TCE_TXB_TX_BMC_Q_CTRL 0x0404B /* 0x1012c */ +#define FBNIC_TCE_TXB_BMC_DWRR_CTRL 0x0404C /* 0x10130 */ +#define FBNIC_TCE_TXB_BMC_DWRR_CTRL_QUANTUM0 CSR_GENMASK(7, 0) +#define FBNIC_TCE_TXB_BMC_DWRR_CTRL_QUANTUM1 CSR_GENMASK(15, 8) +#define FBNIC_TCE_TXB_TEI_DWRR_CTRL_EXT 0x0404D /* 0x10134 */ +#define FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_EXT \ + 0x0404E /* 0x10138 */ +#define FBNIC_TCE_TXB_BMC_DWRR_CTRL_EXT 0x0404F /* 0x1013c */ +#define FBNIC_CSR_END_TCE 0x04050 /* CSR section delimiter */ + +/* TMI registers */ +#define FBNIC_CSR_START_TMI 0x04400 /* CSR section delimiter */ +#define FBNIC_TMI_SOP_PROT_CTRL 0x04400 /* 0x11000 */ +#define FBNIC_TMI_DROP_CTRL 0x04401 /* 0x11004 */ +#define FBNIC_TMI_DROP_CTRL_EN CSR_BIT(0) +#define FBNIC_CSR_END_TMI 0x0443f /* CSR section delimiter */ +/* Rx Buffer Registers */ +#define FBNIC_CSR_START_RXB 0x08000 /* CSR section delimiter */ +enum { + FBNIC_RXB_FIFO_MC = 0, + /* Unused */ + /* Unused */ + FBNIC_RXB_FIFO_NET_TO_BMC = 3, + FBNIC_RXB_FIFO_HOST = 4, + /* Unused */ + FBNIC_RXB_FIFO_BMC_TO_HOST = 6, + /* Unused */ + FBNIC_RXB_FIFO_INDICES = 8 +}; + +#define FBNIC_RXB_CT_SIZE(n) (0x08000 + (n)) /* 0x20000 + 4*n */ +#define FBNIC_RXB_CT_SIZE_CNT 8 +#define FBNIC_RXB_CT_SIZE_HEADER CSR_GENMASK(5, 0) +#define FBNIC_RXB_CT_SIZE_PAYLOAD CSR_GENMASK(11, 6) +#define FBNIC_RXB_CT_SIZE_ENABLE CSR_BIT(12) +#define FBNIC_RXB_PAUSE_DROP_CTRL 0x08008 /* 0x20020 */ +#define FBNIC_RXB_PAUSE_DROP_CTRL_DROP_ENABLE CSR_GENMASK(7, 0) +#define FBNIC_RXB_PAUSE_DROP_CTRL_PAUSE_ENABLE CSR_GENMASK(15, 8) +#define FBNIC_RXB_PAUSE_DROP_CTRL_ECN_ENABLE CSR_GENMASK(23, 16) +#define FBNIC_RXB_PAUSE_DROP_CTRL_PS_ENABLE CSR_GENMASK(27, 24) +#define FBNIC_RXB_PAUSE_THLD(n) (0x08009 + (n)) /* 0x20024 + 4*n */ +#define FBNIC_RXB_PAUSE_THLD_CNT 8 +#define FBNIC_RXB_PAUSE_THLD_ON CSR_GENMASK(12, 0) +#define FBNIC_RXB_PAUSE_THLD_OFF CSR_GENMASK(25, 13) +#define FBNIC_RXB_DROP_THLD(n) (0x08011 + (n)) /* 0x20044 + 4*n */ +#define FBNIC_RXB_DROP_THLD_CNT 8 +#define FBNIC_RXB_DROP_THLD_ON CSR_GENMASK(12, 0) +#define FBNIC_RXB_DROP_THLD_OFF CSR_GENMASK(25, 13) +#define FBNIC_RXB_ECN_THLD(n) (0x0801e + (n)) /* 0x20078 + 4*n */ +#define FBNIC_RXB_ECN_THLD_CNT 8 +#define FBNIC_RXB_ECN_THLD_ON CSR_GENMASK(12, 0) +#define FBNIC_RXB_ECN_THLD_OFF CSR_GENMASK(25, 13) +#define FBNIC_RXB_PBUF_CFG(n) (0x08027 + (n)) /* 0x2009c + 4*n */ +#define FBNIC_RXB_PBUF_CFG_CNT 8 +#define FBNIC_RXB_PBUF_BASE_ADDR CSR_GENMASK(12, 0) +#define FBNIC_RXB_PBUF_SIZE CSR_GENMASK(21, 13) +#define FBNIC_RXB_DWRR_RDE_WEIGHT0 0x0802f /* 0x200bc */ +#define FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM0 CSR_GENMASK(7, 0) +#define FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM1 CSR_GENMASK(15, 8) +#define FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM2 CSR_GENMASK(23, 16) +#define FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM3 CSR_GENMASK(31, 24) +#define FBNIC_RXB_DWRR_RDE_WEIGHT1 0x08030 /* 0x200c0 */ +#define FBNIC_RXB_DWRR_RDE_WEIGHT1_QUANTUM4 CSR_GENMASK(7, 0) +#define FBNIC_RXB_DWRR_BMC_WEIGHT 0x08031 /* 0x200c4 */ +#define FBNIC_RXB_CLDR_PRIO_CFG(n) (0x8034 + (n)) /* 0x200d0 + 4*n */ +#define FBNIC_RXB_CLDR_PRIO_CFG_CNT 16 +#define FBNIC_RXB_ENDIAN_FCS 0x08044 /* 0x20110 */ +enum { + /* Unused */ + /* Unused */ + FBNIC_RXB_DEQUEUE_BMC = 2, + FBNIC_RXB_DEQUEUE_HOST = 3, + FBNIC_RXB_DEQUEUE_INDICES = 4 +}; + +#define FBNIC_RXB_PBUF_CREDIT(n) (0x08047 + (n)) /* 0x2011C + 4*n */ +#define FBNIC_RXB_PBUF_CREDIT_CNT 8 +#define FBNIC_RXB_PBUF_CREDIT_MASK CSR_GENMASK(13, 0) +#define FBNIC_RXB_INTF_CREDIT 0x0804f /* 0x2013C */ +#define FBNIC_RXB_INTF_CREDIT_MASK0 CSR_GENMASK(3, 0) +#define FBNIC_RXB_INTF_CREDIT_MASK1 CSR_GENMASK(7, 4) +#define FBNIC_RXB_INTF_CREDIT_MASK2 CSR_GENMASK(11, 8) +#define FBNIC_RXB_INTF_CREDIT_MASK3 CSR_GENMASK(15, 12) + +#define FBNIC_RXB_PAUSE_EVENT_CNT(n) (0x08053 + (n)) /* 0x2014c + 4*n */ +#define FBNIC_RXB_DROP_FRMS_STS(n) (0x08057 + (n)) /* 0x2015c + 4*n */ +#define FBNIC_RXB_DROP_BYTES_STS_L(n) \ + (0x08080 + 2 * (n)) /* 0x20200 + 8*n */ +#define FBNIC_RXB_DROP_BYTES_STS_H(n) \ + (0x08081 + 2 * (n)) /* 0x20204 + 8*n */ +#define FBNIC_RXB_TRUN_FRMS_STS(n) (0x08091 + (n)) /* 0x20244 + 4*n */ +#define FBNIC_RXB_TRUN_BYTES_STS_L(n) \ + (0x080c0 + 2 * (n)) /* 0x20300 + 8*n */ +#define FBNIC_RXB_TRUN_BYTES_STS_H(n) \ + (0x080c1 + 2 * (n)) /* 0x20304 + 8*n */ +#define FBNIC_RXB_TRANS_PAUSE_STS(n) (0x080d1 + (n)) /* 0x20344 + 4*n */ +#define FBNIC_RXB_TRANS_DROP_STS(n) (0x080d9 + (n)) /* 0x20364 + 4*n */ +#define FBNIC_RXB_TRANS_ECN_STS(n) (0x080e1 + (n)) /* 0x20384 + 4*n */ +enum { + FBNIC_RXB_ENQUEUE_NET = 0, + FBNIC_RXB_ENQUEUE_BMC = 1, + /* Unused */ + /* Unused */ + FBNIC_RXB_ENQUEUE_INDICES = 4 +}; + +#define FBNIC_RXB_DRBO_FRM_CNT_SRC(n) (0x080f9 + (n)) /* 0x203e4 + 4*n */ +#define FBNIC_RXB_DRBO_BYTE_CNT_SRC_L(n) \ + (0x080fd + (n)) /* 0x203f4 + 4*n */ +#define FBNIC_RXB_DRBO_BYTE_CNT_SRC_H(n) \ + (0x08101 + (n)) /* 0x20404 + 4*n */ +#define FBNIC_RXB_INTF_FRM_CNT_DST(n) (0x08105 + (n)) /* 0x20414 + 4*n */ +#define FBNIC_RXB_INTF_BYTE_CNT_DST_L(n) \ + (0x08109 + (n)) /* 0x20424 + 4*n */ +#define FBNIC_RXB_INTF_BYTE_CNT_DST_H(n) \ + (0x0810d + (n)) /* 0x20434 + 4*n */ +#define FBNIC_RXB_PBUF_FRM_CNT_DST(n) (0x08111 + (n)) /* 0x20444 + 4*n */ +#define FBNIC_RXB_PBUF_BYTE_CNT_DST_L(n) \ + (0x08115 + (n)) /* 0x20454 + 4*n */ +#define FBNIC_RXB_PBUF_BYTE_CNT_DST_H(n) \ + (0x08119 + (n)) /* 0x20464 + 4*n */ + +#define FBNIC_RXB_PBUF_FIFO_LEVEL(n) (0x0811d + (n)) /* 0x20474 + 4*n */ + +#define FBNIC_RXB_INTEGRITY_ERR(n) (0x0812f + (n)) /* 0x204bc + 4*n */ +#define FBNIC_RXB_MAC_ERR(n) (0x08133 + (n)) /* 0x204cc + 4*n */ +#define FBNIC_RXB_PARSER_ERR(n) (0x08137 + (n)) /* 0x204dc + 4*n */ +#define FBNIC_RXB_FRM_ERR(n) (0x0813b + (n)) /* 0x204ec + 4*n */ + +#define FBNIC_RXB_DWRR_RDE_WEIGHT0_EXT 0x08143 /* 0x2050c */ +#define FBNIC_RXB_DWRR_RDE_WEIGHT1_EXT 0x08144 /* 0x20510 */ +#define FBNIC_CSR_END_RXB 0x081b1 /* CSR section delimiter */ + +/* Rx Parser and Classifier Registers */ +#define FBNIC_CSR_START_RPC 0x08400 /* CSR section delimiter */ +#define FBNIC_RPC_RMI_CONFIG 0x08400 /* 0x21000 */ +#define FBNIC_RPC_RMI_CONFIG_OH_BYTES CSR_GENMASK(4, 0) +#define FBNIC_RPC_RMI_CONFIG_FCS_PRESENT CSR_BIT(8) +#define FBNIC_RPC_RMI_CONFIG_ENABLE CSR_BIT(12) +#define FBNIC_RPC_RMI_CONFIG_MTU CSR_GENMASK(31, 16) + +#define FBNIC_RPC_ACT_TBL0_DEFAULT 0x0840a /* 0x21028 */ +#define FBNIC_RPC_ACT_TBL0_DROP CSR_BIT(0) +#define FBNIC_RPC_ACT_TBL0_DEST_MASK CSR_GENMASK(3, 1) +enum { + FBNIC_RPC_ACT_TBL0_DEST_HOST = 1, + FBNIC_RPC_ACT_TBL0_DEST_BMC = 2, + FBNIC_RPC_ACT_TBL0_DEST_EI = 4, +}; + +#define FBNIC_RPC_ACT_TBL0_DMA_HINT CSR_GENMASK(24, 16) +#define FBNIC_RPC_ACT_TBL0_RSS_CTXT_ID CSR_BIT(30) + +#define FBNIC_RPC_ACT_TBL1_DEFAULT 0x0840b /* 0x2102c */ +#define FBNIC_RPC_ACT_TBL1_RSS_ENA_MASK CSR_GENMASK(15, 0) +enum { + FBNIC_RPC_ACT_TBL1_RSS_ENA_IP_SRC = 1, + FBNIC_RPC_ACT_TBL1_RSS_ENA_IP_DST = 2, + FBNIC_RPC_ACT_TBL1_RSS_ENA_L4_SRC = 4, + FBNIC_RPC_ACT_TBL1_RSS_ENA_L4_DST = 8, + FBNIC_RPC_ACT_TBL1_RSS_ENA_L2_DA = 16, + FBNIC_RPC_ACT_TBL1_RSS_ENA_L4_RSS_BYTE = 32, + FBNIC_RPC_ACT_TBL1_RSS_ENA_IV6_FL_LBL = 64, + FBNIC_RPC_ACT_TBL1_RSS_ENA_OV6_FL_LBL = 128, + FBNIC_RPC_ACT_TBL1_RSS_ENA_DSCP = 256, + FBNIC_RPC_ACT_TBL1_RSS_ENA_L3_PROT = 512, + FBNIC_RPC_ACT_TBL1_RSS_ENA_L4_PROT = 1024, +}; + +#define FBNIC_RPC_RSS_KEY(n) (0x0840c + (n)) /* 0x21030 + 4*n */ +#define FBNIC_RPC_RSS_KEY_BIT_LEN 425 +#define FBNIC_RPC_RSS_KEY_BYTE_LEN \ + DIV_ROUND_UP(FBNIC_RPC_RSS_KEY_BIT_LEN, 8) +#define FBNIC_RPC_RSS_KEY_DWORD_LEN \ + DIV_ROUND_UP(FBNIC_RPC_RSS_KEY_BIT_LEN, 32) +#define FBNIC_RPC_RSS_KEY_LAST_IDX \ + (FBNIC_RPC_RSS_KEY_DWORD_LEN - 1) +#define FBNIC_RPC_RSS_KEY_LAST_MASK \ + CSR_GENMASK(31, \ + FBNIC_RPC_RSS_KEY_DWORD_LEN * 32 - \ + FBNIC_RPC_RSS_KEY_BIT_LEN) + +#define FBNIC_RPC_TCAM_MACDA_VALIDATE 0x0852d /* 0x214b4 */ +#define FBNIC_CSR_END_RPC 0x0856b /* CSR section delimiter */ + +/* RPC RAM Registers */ + +#define FBNIC_CSR_START_RPC_RAM 0x08800 /* CSR section delimiter */ +#define FBNIC_RPC_ACT_TBL0(n) (0x08800 + (n)) /* 0x22000 + 4*n */ +#define FBNIC_RPC_ACT_TBL1(n) (0x08840 + (n)) /* 0x22100 + 4*n */ +#define FBNIC_RPC_ACT_TBL_NUM_ENTRIES 64 + +/* TCAM Tables */ +#define FBNIC_RPC_TCAM_VALIDATE CSR_BIT(31) + +/* 64 Action TCAM Entries, 12 registers + * 3 mixed, src port, dst port, 6 L4 words, and Validate + */ +#define FBNIC_RPC_TCAM_ACT(m, n) \ + (0x08880 + 0x40 * (n) + (m)) /* 0x22200 + 256*n + 4*m */ + +#define FBNIC_RPC_TCAM_ACT_VALUE CSR_GENMASK(15, 0) +#define FBNIC_RPC_TCAM_ACT_MASK CSR_GENMASK(31, 16) + +#define FBNIC_RPC_TCAM_MACDA(m, n) \ + (0x08b80 + 0x20 * (n) + (m)) /* 0x022e00 + 128*n + 4*m */ +#define FBNIC_RPC_TCAM_MACDA_VALUE CSR_GENMASK(15, 0) +#define FBNIC_RPC_TCAM_MACDA_MASK CSR_GENMASK(31, 16) + +#define FBNIC_RPC_RSS_TBL(n, m) \ + (0x08d20 + 0x100 * (n) + (m)) /* 0x023480 + 1024*n + 4*m */ +#define FBNIC_RPC_RSS_TBL_COUNT 2 +#define FBNIC_RPC_RSS_TBL_SIZE 256 +#define FBNIC_CSR_END_RPC_RAM 0x08f1f /* CSR section delimiter */ + +/* Fab Registers */ +#define FBNIC_CSR_START_FAB 0x0C000 /* CSR section delimiter */ +#define FBNIC_FAB_AXI4_AR_SPACER_2_CFG 0x0C005 /* 0x30014 */ +#define FBNIC_FAB_AXI4_AR_SPACER_MASK CSR_BIT(16) +#define FBNIC_FAB_AXI4_AR_SPACER_THREADSHOLD CSR_GENMASK(15, 0) +#define FBNIC_CSR_END_FAB 0x0C020 /* CSR section delimiter */ + +/* Master Registers */ +#define FBNIC_CSR_START_MASTER 0x0C400 /* CSR section delimiter */ +#define FBNIC_MASTER_SPARE_0 0x0C41B /* 0x3106c */ +#define FBNIC_CSR_END_MASTER 0x0C452 /* CSR section delimiter */ + +/* MAC MAC registers (ASIC only) */ +#define FBNIC_CSR_START_MAC_MAC 0x11000 /* CSR section delimiter */ +#define FBNIC_MAC_COMMAND_CONFIG 0x11002 /* 0x44008 */ +#define FBNIC_MAC_COMMAND_CONFIG_RX_PAUSE_DIS CSR_BIT(29) +#define FBNIC_MAC_COMMAND_CONFIG_TX_PAUSE_DIS CSR_BIT(28) +#define FBNIC_MAC_COMMAND_CONFIG_FLT_HDL_DIS CSR_BIT(27) +#define FBNIC_MAC_COMMAND_CONFIG_TX_PAD_EN CSR_BIT(11) +#define FBNIC_MAC_COMMAND_CONFIG_LOOPBACK_EN CSR_BIT(10) +#define FBNIC_MAC_COMMAND_CONFIG_PROMISC_EN CSR_BIT(4) +#define FBNIC_MAC_COMMAND_CONFIG_RX_ENA CSR_BIT(1) +#define FBNIC_MAC_COMMAND_CONFIG_TX_ENA CSR_BIT(0) +#define FBNIC_MAC_CL01_PAUSE_QUANTA 0x11015 /* 0x44054 */ +#define FBNIC_MAC_CL01_QUANTA_THRESH 0x11019 /* 0x44064 */ +#define FBNIC_CSR_END_MAC_MAC 0x11028 /* CSR section delimiter */ + +/* Signals from MAC, AN, PCS, and LED CSR registers (ASIC only) */ +#define FBNIC_CSR_START_SIG 0x11800 /* CSR section delimiter */ +#define FBNIC_SIG_MAC_IN0 0x11800 /* 0x46000 */ +#define FBNIC_SIG_MAC_IN0_RESET_FF_TX_CLK CSR_BIT(14) +#define FBNIC_SIG_MAC_IN0_RESET_FF_RX_CLK CSR_BIT(13) +#define FBNIC_SIG_MAC_IN0_RESET_TX_CLK CSR_BIT(12) +#define FBNIC_SIG_MAC_IN0_RESET_RX_CLK CSR_BIT(11) +#define FBNIC_SIG_MAC_IN0_TX_CRC CSR_BIT(8) +#define FBNIC_SIG_MAC_IN0_CFG_MODE128 CSR_BIT(10) +#define FBNIC_SIG_PCS_OUT0 0x11808 /* 0x46020 */ +#define FBNIC_SIG_PCS_OUT0_LINK CSR_BIT(27) +#define FBNIC_SIG_PCS_OUT0_BLOCK_LOCK CSR_GENMASK(24, 5) +#define FBNIC_SIG_PCS_OUT0_AMPS_LOCK CSR_GENMASK(4, 1) +#define FBNIC_SIG_PCS_OUT1 0x11809 /* 0x46024 */ +#define FBNIC_SIG_PCS_OUT1_FCFEC_LOCK CSR_GENMASK(11, 8) +#define FBNIC_SIG_PCS_INTR_STS 0x11814 /* 0x46050 */ +#define FBNIC_SIG_PCS_INTR_LINK_DOWN CSR_BIT(1) +#define FBNIC_SIG_PCS_INTR_LINK_UP CSR_BIT(0) +#define FBNIC_SIG_PCS_INTR_MASK 0x11816 /* 0x46058 */ +#define FBNIC_CSR_END_SIG 0x1184e /* CSR section delimiter */ + +/* PUL User Registers */ +#define FBNIC_CSR_START_PUL_USER 0x31000 /* CSR section delimiter */ +#define FBNIC_PUL_OB_TLP_HDR_AW_CFG 0x3103d /* 0xc40f4 */ +#define FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME CSR_BIT(18) +#define FBNIC_PUL_OB_TLP_HDR_AR_CFG 0x3103e /* 0xc40f8 */ +#define FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME CSR_BIT(18) +#define FBNIC_CSR_END_PUL_USER 0x31080 /* CSR section delimiter */ + +/* Queue Registers + * + * The queue register offsets are specific for a given queue grouping. So to + * find the actual register offset it is necessary to combine FBNIC_QUEUE(n) + * with the register to get the actual register offset like so: + * FBNIC_QUEUE_TWQ0_CTL(n) == FBNIC_QUEUE(n) + FBNIC_QUEUE_TWQ0_CTL + */ +#define FBNIC_CSR_START_QUEUE 0x40000 /* CSR section delimiter */ +#define FBNIC_QUEUE_STRIDE 0x400 /* 0x1000 */ +#define FBNIC_QUEUE(n)\ + (0x40000 + FBNIC_QUEUE_STRIDE * (n)) /* 0x100000 + 4096*n */ + +#define FBNIC_QUEUE_TWQ0_CTL 0x000 /* 0x000 */ +#define FBNIC_QUEUE_TWQ1_CTL 0x001 /* 0x004 */ +#define FBNIC_QUEUE_TWQ_CTL_RESET CSR_BIT(0) +#define FBNIC_QUEUE_TWQ_CTL_ENABLE CSR_BIT(1) +#define FBNIC_QUEUE_TWQ0_TAIL 0x002 /* 0x008 */ +#define FBNIC_QUEUE_TWQ1_TAIL 0x003 /* 0x00c */ + +#define FBNIC_QUEUE_TWQ0_SIZE 0x00a /* 0x028 */ +#define FBNIC_QUEUE_TWQ1_SIZE 0x00b /* 0x02c */ +#define FBNIC_QUEUE_TWQ_SIZE_MASK CSR_GENMASK(3, 0) + +#define FBNIC_QUEUE_TWQ0_BAL 0x020 /* 0x080 */ +#define FBNIC_QUEUE_BAL_MASK CSR_GENMASK(31, 7) +#define FBNIC_QUEUE_TWQ0_BAH 0x021 /* 0x084 */ +#define FBNIC_QUEUE_TWQ1_BAL 0x022 /* 0x088 */ +#define FBNIC_QUEUE_TWQ1_BAH 0x023 /* 0x08c */ + +/* Tx Completion Queue Registers */ +#define FBNIC_QUEUE_TCQ_CTL 0x080 /* 0x200 */ +#define FBNIC_QUEUE_TCQ_CTL_RESET CSR_BIT(0) +#define FBNIC_QUEUE_TCQ_CTL_ENABLE CSR_BIT(1) + +#define FBNIC_QUEUE_TCQ_HEAD 0x081 /* 0x204 */ + +#define FBNIC_QUEUE_TCQ_SIZE 0x084 /* 0x210 */ +#define FBNIC_QUEUE_TCQ_SIZE_MASK CSR_GENMASK(3, 0) + +#define FBNIC_QUEUE_TCQ_BAL 0x0a0 /* 0x280 */ +#define FBNIC_QUEUE_TCQ_BAH 0x0a1 /* 0x284 */ + +/* Tx Interrupt Manager Registers */ +#define FBNIC_QUEUE_TIM_CTL 0x0c0 /* 0x300 */ +#define FBNIC_QUEUE_TIM_CTL_MSIX_MASK CSR_GENMASK(7, 0) + +#define FBNIC_QUEUE_TIM_THRESHOLD 0x0c1 /* 0x304 */ +#define FBNIC_QUEUE_TIM_THRESHOLD_TWD_MASK CSR_GENMASK(14, 0) + +#define FBNIC_QUEUE_TIM_CLEAR 0x0c2 /* 0x308 */ +#define FBNIC_QUEUE_TIM_CLEAR_MASK CSR_BIT(0) +#define FBNIC_QUEUE_TIM_SET 0x0c3 /* 0x30c */ +#define FBNIC_QUEUE_TIM_SET_MASK CSR_BIT(0) +#define FBNIC_QUEUE_TIM_MASK 0x0c4 /* 0x310 */ +#define FBNIC_QUEUE_TIM_MASK_MASK CSR_BIT(0) + +#define FBNIC_QUEUE_TIM_TIMER 0x0c5 /* 0x314 */ + +#define FBNIC_QUEUE_TIM_COUNTS 0x0c6 /* 0x318 */ +#define FBNIC_QUEUE_TIM_COUNTS_CNT1_MASK CSR_GENMASK(30, 16) +#define FBNIC_QUEUE_TIM_COUNTS_CNT0_MASK CSR_GENMASK(14, 0) + +/* Rx Completion Queue Registers */ +#define FBNIC_QUEUE_RCQ_CTL 0x200 /* 0x800 */ +#define FBNIC_QUEUE_RCQ_CTL_RESET CSR_BIT(0) +#define FBNIC_QUEUE_RCQ_CTL_ENABLE CSR_BIT(1) + +#define FBNIC_QUEUE_RCQ_HEAD 0x201 /* 0x804 */ + +#define FBNIC_QUEUE_RCQ_SIZE 0x204 /* 0x810 */ +#define FBNIC_QUEUE_RCQ_SIZE_MASK CSR_GENMASK(3, 0) + +#define FBNIC_QUEUE_RCQ_BAL 0x220 /* 0x880 */ +#define FBNIC_QUEUE_RCQ_BAH 0x221 /* 0x884 */ + +/* Rx Buffer Descriptor Queue Registers */ +#define FBNIC_QUEUE_BDQ_CTL 0x240 /* 0x900 */ +#define FBNIC_QUEUE_BDQ_CTL_RESET CSR_BIT(0) +#define FBNIC_QUEUE_BDQ_CTL_ENABLE CSR_BIT(1) +#define FBNIC_QUEUE_BDQ_CTL_PPQ_ENABLE CSR_BIT(30) + +#define FBNIC_QUEUE_BDQ_HPQ_TAIL 0x241 /* 0x904 */ +#define FBNIC_QUEUE_BDQ_PPQ_TAIL 0x242 /* 0x908 */ + +#define FBNIC_QUEUE_BDQ_HPQ_SIZE 0x247 /* 0x91c */ +#define FBNIC_QUEUE_BDQ_PPQ_SIZE 0x248 /* 0x920 */ +#define FBNIC_QUEUE_BDQ_SIZE_MASK CSR_GENMASK(3, 0) + +#define FBNIC_QUEUE_BDQ_HPQ_BAL 0x260 /* 0x980 */ +#define FBNIC_QUEUE_BDQ_HPQ_BAH 0x261 /* 0x984 */ +#define FBNIC_QUEUE_BDQ_PPQ_BAL 0x262 /* 0x988 */ +#define FBNIC_QUEUE_BDQ_PPQ_BAH 0x263 /* 0x98c */ + +/* Rx DMA Engine Configuration */ +#define FBNIC_QUEUE_RDE_CTL0 0x2a0 /* 0xa80 */ +#define FBNIC_QUEUE_RDE_CTL0_EN_HDR_SPLIT CSR_BIT(31) +#define FBNIC_QUEUE_RDE_CTL0_DROP_MODE_MASK CSR_GENMASK(30, 29) +enum { + FBNIC_QUEUE_RDE_CTL0_DROP_IMMEDIATE = 0, + FBNIC_QUEUE_RDE_CTL0_DROP_WAIT = 1, + FBNIC_QUEUE_RDE_CTL0_DROP_NEVER = 2, +}; + +#define FBNIC_QUEUE_RDE_CTL0_MIN_HROOM_MASK CSR_GENMASK(28, 20) +#define FBNIC_QUEUE_RDE_CTL0_MIN_TROOM_MASK CSR_GENMASK(19, 11) + +#define FBNIC_QUEUE_RDE_CTL1 0x2a1 /* 0xa84 */ +#define FBNIC_QUEUE_RDE_CTL1_MAX_HDR_MASK CSR_GENMASK(24, 12) +#define FBNIC_QUEUE_RDE_CTL1_PAYLD_OFF_MASK CSR_GENMASK(11, 9) +#define FBNIC_QUEUE_RDE_CTL1_PAYLD_PG_CL_MASK CSR_GENMASK(8, 6) +#define FBNIC_QUEUE_RDE_CTL1_PADLEN_MASK CSR_GENMASK(5, 2) +#define FBNIC_QUEUE_RDE_CTL1_PAYLD_PACK_MASK CSR_GENMASK(1, 0) +enum { + FBNIC_QUEUE_RDE_CTL1_PAYLD_PACK_NONE = 0, + FBNIC_QUEUE_RDE_CTL1_PAYLD_PACK_ALL = 1, + FBNIC_QUEUE_RDE_CTL1_PAYLD_PACK_RSS = 2, +}; + +/* Rx Interrupt Manager Registers */ +#define FBNIC_QUEUE_RIM_CTL 0x2c0 /* 0xb00 */ +#define FBNIC_QUEUE_RIM_CTL_MSIX_MASK CSR_GENMASK(7, 0) + +#define FBNIC_QUEUE_RIM_THRESHOLD 0x2c1 /* 0xb04 */ +#define FBNIC_QUEUE_RIM_THRESHOLD_RCD_MASK CSR_GENMASK(14, 0) + +#define FBNIC_QUEUE_RIM_CLEAR 0x2c2 /* 0xb08 */ +#define FBNIC_QUEUE_RIM_CLEAR_MASK CSR_BIT(0) +#define FBNIC_QUEUE_RIM_SET 0x2c3 /* 0xb0c */ +#define FBNIC_QUEUE_RIM_SET_MASK CSR_BIT(0) +#define FBNIC_QUEUE_RIM_MASK 0x2c4 /* 0xb10 */ +#define FBNIC_QUEUE_RIM_MASK_MASK CSR_BIT(0) + +#define FBNIC_QUEUE_RIM_COAL_STATUS 0x2c5 /* 0xb14 */ +#define FBNIC_QUEUE_RIM_RCD_COUNT_MASK CSR_GENMASK(30, 16) +#define FBNIC_QUEUE_RIM_TIMER_MASK CSR_GENMASK(13, 0) +#define FBNIC_MAX_QUEUES 128 +#define FBNIC_CSR_END_QUEUE (0x40000 + 0x400 * FBNIC_MAX_QUEUES - 1) + +/* BAR 4 CSRs */ + +/* The IPC mailbox consists of 32 mailboxes, with each mailbox consisting + * of 32 4 byte registers. We will use 2 registers per descriptor so the + * length of the mailbox is reduced to 16. + * + * Currently we use an offset of 0x6000 on BAR4 for the mailbox so we just + * have to do the math and determine the offset based on the mailbox + * direction and index inside that mailbox. + */ +#define FBNIC_IPC_MBX_DESC_LEN 16 +#define FBNIC_IPC_MBX(mbx_idx, desc_idx) \ + ((((mbx_idx) * FBNIC_IPC_MBX_DESC_LEN + (desc_idx)) * 2) + 0x6000) + +/* Use first register in mailbox to flush writes */ +#define FBNIC_FW_ZERO_REG FBNIC_IPC_MBX(0, 0) + +enum { + FBNIC_IPC_MBX_RX_IDX, + FBNIC_IPC_MBX_TX_IDX, + FBNIC_IPC_MBX_INDICES, +}; + +#define FBNIC_IPC_MBX_DESC_LEN_MASK DESC_GENMASK(63, 48) +#define FBNIC_IPC_MBX_DESC_EOM DESC_BIT(46) +#define FBNIC_IPC_MBX_DESC_ADDR_MASK DESC_GENMASK(45, 3) +#define FBNIC_IPC_MBX_DESC_FW_CMPL DESC_BIT(1) +#define FBNIC_IPC_MBX_DESC_HOST_CMPL DESC_BIT(0) + +#endif /* _FBNIC_CSR_H_ */ diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c new file mode 100644 index 000000000000..e87049dfd223 --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_devlink.c @@ -0,0 +1,88 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#include +#include +#include +#include + +#include "fbnic.h" + +#define FBNIC_SN_STR_LEN 24 + +static int fbnic_devlink_info_get(struct devlink *devlink, + struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + struct fbnic_dev *fbd = devlink_priv(devlink); + int err; + + if (fbd->dsn) { + unsigned char serial[FBNIC_SN_STR_LEN]; + u8 dsn[8]; + + put_unaligned_be64(fbd->dsn, dsn); + err = snprintf(serial, FBNIC_SN_STR_LEN, "%8phD", dsn); + if (err < 0) + return err; + + err = devlink_info_serial_number_put(req, serial); + if (err) + return err; + } + + return 0; +} + +static const struct devlink_ops fbnic_devlink_ops = { + .info_get = fbnic_devlink_info_get, +}; + +void fbnic_devlink_free(struct fbnic_dev *fbd) +{ + struct devlink *devlink = priv_to_devlink(fbd); + + devlink_free(devlink); +} + +struct fbnic_dev *fbnic_devlink_alloc(struct pci_dev *pdev) +{ + void __iomem * const *iomap_table; + struct devlink *devlink; + struct fbnic_dev *fbd; + + devlink = devlink_alloc(&fbnic_devlink_ops, sizeof(struct fbnic_dev), + &pdev->dev); + if (!devlink) + return NULL; + + fbd = devlink_priv(devlink); + pci_set_drvdata(pdev, fbd); + fbd->dev = &pdev->dev; + + iomap_table = pcim_iomap_table(pdev); + fbd->uc_addr0 = iomap_table[0]; + fbd->uc_addr4 = iomap_table[4]; + + fbd->dsn = pci_get_dsn(pdev); + fbd->mps = pcie_get_mps(pdev); + fbd->readrq = pcie_get_readrq(pdev); + + fbd->mac_addr_boundary = FBNIC_RPC_TCAM_MACDA_DEFAULT_BOUNDARY; + + return fbd; +} + +void fbnic_devlink_register(struct fbnic_dev *fbd) +{ + struct devlink *devlink = priv_to_devlink(fbd); + + devlink_register(devlink); +} + +void fbnic_devlink_unregister(struct fbnic_dev *fbd) +{ + struct devlink *devlink = priv_to_devlink(fbd); + + devlink_unregister(devlink); +} diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_drvinfo.h b/drivers/net/ethernet/meta/fbnic/fbnic_drvinfo.h new file mode 100644 index 000000000000..809ba6729442 --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_drvinfo.h @@ -0,0 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#define DRV_NAME "fbnic" +#define DRV_SUMMARY "Meta(R) Host Network Interface Driver" diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.c b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c new file mode 100644 index 000000000000..0c6e1b4c119b --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.c @@ -0,0 +1,791 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#include +#include +#include +#include +#include +#include +#include + +#include "fbnic.h" +#include "fbnic_tlv.h" + +static void __fbnic_mbx_wr_desc(struct fbnic_dev *fbd, int mbx_idx, + int desc_idx, u64 desc) +{ + u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx); + + fw_wr32(fbd, desc_offset + 1, upper_32_bits(desc)); + fw_wrfl(fbd); + fw_wr32(fbd, desc_offset, lower_32_bits(desc)); +} + +static u64 __fbnic_mbx_rd_desc(struct fbnic_dev *fbd, int mbx_idx, int desc_idx) +{ + u32 desc_offset = FBNIC_IPC_MBX(mbx_idx, desc_idx); + u64 desc; + + desc = fw_rd32(fbd, desc_offset); + desc |= (u64)fw_rd32(fbd, desc_offset + 1) << 32; + + return desc; +} + +static void fbnic_mbx_init_desc_ring(struct fbnic_dev *fbd, int mbx_idx) +{ + int desc_idx; + + /* Initialize first descriptor to all 0s. Doing this gives us a + * solid stop for the firmware to hit when it is done looping + * through the ring. + */ + __fbnic_mbx_wr_desc(fbd, mbx_idx, 0, 0); + + fw_wrfl(fbd); + + /* We then fill the rest of the ring starting at the end and moving + * back toward descriptor 0 with skip descriptors that have no + * length nor address, and tell the firmware that they can skip + * them and just move past them to the one we initialized to 0. + */ + for (desc_idx = FBNIC_IPC_MBX_DESC_LEN; --desc_idx;) { + __fbnic_mbx_wr_desc(fbd, mbx_idx, desc_idx, + FBNIC_IPC_MBX_DESC_FW_CMPL | + FBNIC_IPC_MBX_DESC_HOST_CMPL); + fw_wrfl(fbd); + } +} + +void fbnic_mbx_init(struct fbnic_dev *fbd) +{ + int i; + + /* Initialize lock to protect Tx ring */ + spin_lock_init(&fbd->fw_tx_lock); + + /* Reinitialize mailbox memory */ + for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++) + memset(&fbd->mbx[i], 0, sizeof(struct fbnic_fw_mbx)); + + /* Do not auto-clear the FW mailbox interrupt, let SW clear it */ + wr32(fbd, FBNIC_INTR_SW_AC_MODE(0), ~(1u << FBNIC_FW_MSIX_ENTRY)); + + /* Clear any stale causes in vector 0 as that is used for doorbell */ + wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY); + + for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++) + fbnic_mbx_init_desc_ring(fbd, i); +} + +static int fbnic_mbx_map_msg(struct fbnic_dev *fbd, int mbx_idx, + struct fbnic_tlv_msg *msg, u16 length, u8 eom) +{ + struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx]; + u8 tail = mbx->tail; + dma_addr_t addr; + int direction; + + if (!mbx->ready || !fbnic_fw_present(fbd)) + return -ENODEV; + + direction = (mbx_idx == FBNIC_IPC_MBX_RX_IDX) ? DMA_FROM_DEVICE : + DMA_TO_DEVICE; + + if (mbx->head == ((tail + 1) % FBNIC_IPC_MBX_DESC_LEN)) + return -EBUSY; + + addr = dma_map_single(fbd->dev, msg, PAGE_SIZE, direction); + if (dma_mapping_error(fbd->dev, addr)) { + free_page((unsigned long)msg); + + return -ENOSPC; + } + + mbx->buf_info[tail].msg = msg; + mbx->buf_info[tail].addr = addr; + + mbx->tail = (tail + 1) % FBNIC_IPC_MBX_DESC_LEN; + + fw_wr32(fbd, FBNIC_IPC_MBX(mbx_idx, mbx->tail), 0); + + __fbnic_mbx_wr_desc(fbd, mbx_idx, tail, + FIELD_PREP(FBNIC_IPC_MBX_DESC_LEN_MASK, length) | + (addr & FBNIC_IPC_MBX_DESC_ADDR_MASK) | + (eom ? FBNIC_IPC_MBX_DESC_EOM : 0) | + FBNIC_IPC_MBX_DESC_HOST_CMPL); + + return 0; +} + +static void fbnic_mbx_unmap_and_free_msg(struct fbnic_dev *fbd, int mbx_idx, + int desc_idx) +{ + struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx]; + int direction; + + if (!mbx->buf_info[desc_idx].msg) + return; + + direction = (mbx_idx == FBNIC_IPC_MBX_RX_IDX) ? DMA_FROM_DEVICE : + DMA_TO_DEVICE; + dma_unmap_single(fbd->dev, mbx->buf_info[desc_idx].addr, + PAGE_SIZE, direction); + + free_page((unsigned long)mbx->buf_info[desc_idx].msg); + mbx->buf_info[desc_idx].msg = NULL; +} + +static void fbnic_mbx_clean_desc_ring(struct fbnic_dev *fbd, int mbx_idx) +{ + int i; + + fbnic_mbx_init_desc_ring(fbd, mbx_idx); + + for (i = FBNIC_IPC_MBX_DESC_LEN; i--;) + fbnic_mbx_unmap_and_free_msg(fbd, mbx_idx, i); +} + +void fbnic_mbx_clean(struct fbnic_dev *fbd) +{ + int i; + + for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++) + fbnic_mbx_clean_desc_ring(fbd, i); +} + +#define FBNIC_MBX_MAX_PAGE_SIZE FIELD_MAX(FBNIC_IPC_MBX_DESC_LEN_MASK) +#define FBNIC_RX_PAGE_SIZE min_t(int, PAGE_SIZE, FBNIC_MBX_MAX_PAGE_SIZE) + +static int fbnic_mbx_alloc_rx_msgs(struct fbnic_dev *fbd) +{ + struct fbnic_fw_mbx *rx_mbx = &fbd->mbx[FBNIC_IPC_MBX_RX_IDX]; + u8 tail = rx_mbx->tail, head = rx_mbx->head, count; + int err = 0; + + /* Do nothing if mailbox is not ready, or we already have pages on + * the ring that can be used by the firmware + */ + if (!rx_mbx->ready) + return -ENODEV; + + /* Fill all but 1 unused descriptors in the Rx queue. */ + count = (head - tail - 1) % FBNIC_IPC_MBX_DESC_LEN; + while (!err && count--) { + struct fbnic_tlv_msg *msg; + + msg = (struct fbnic_tlv_msg *)__get_free_page(GFP_ATOMIC | + __GFP_NOWARN); + if (!msg) { + err = -ENOMEM; + break; + } + + err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_RX_IDX, msg, + FBNIC_RX_PAGE_SIZE, 0); + if (err) + free_page((unsigned long)msg); + } + + return err; +} + +static int fbnic_mbx_map_tlv_msg(struct fbnic_dev *fbd, + struct fbnic_tlv_msg *msg) +{ + unsigned long flags; + int err; + + spin_lock_irqsave(&fbd->fw_tx_lock, flags); + + err = fbnic_mbx_map_msg(fbd, FBNIC_IPC_MBX_TX_IDX, msg, + le16_to_cpu(msg->hdr.len) * sizeof(u32), 1); + + spin_unlock_irqrestore(&fbd->fw_tx_lock, flags); + + return err; +} + +static void fbnic_mbx_process_tx_msgs(struct fbnic_dev *fbd) +{ + struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX]; + u8 head = tx_mbx->head; + u64 desc; + + while (head != tx_mbx->tail) { + desc = __fbnic_mbx_rd_desc(fbd, FBNIC_IPC_MBX_TX_IDX, head); + if (!(desc & FBNIC_IPC_MBX_DESC_FW_CMPL)) + break; + + fbnic_mbx_unmap_and_free_msg(fbd, FBNIC_IPC_MBX_TX_IDX, head); + + head++; + head %= FBNIC_IPC_MBX_DESC_LEN; + } + + /* Record head for next interrupt */ + tx_mbx->head = head; +} + +/** + * fbnic_fw_xmit_simple_msg - Transmit a simple single TLV message w/o data + * @fbd: FBNIC device structure + * @msg_type: ENUM value indicating message type to send + * + * Return: + * One the following values: + * -EOPNOTSUPP: Is not ASIC so mailbox is not supported + * -ENODEV: Device I/O error + * -ENOMEM: Failed to allocate message + * -EBUSY: No space in mailbox + * -ENOSPC: DMA mapping failed + * + * This function sends a single TLV header indicating the host wants to take + * some action. However there are no other side effects which means that any + * response will need to be caught via a completion if this action is + * expected to kick off a resultant action. + */ +static int fbnic_fw_xmit_simple_msg(struct fbnic_dev *fbd, u32 msg_type) +{ + struct fbnic_tlv_msg *msg; + int err = 0; + + if (!fbnic_fw_present(fbd)) + return -ENODEV; + + msg = fbnic_tlv_msg_alloc(msg_type); + if (!msg) + return -ENOMEM; + + err = fbnic_mbx_map_tlv_msg(fbd, msg); + if (err) + free_page((unsigned long)msg); + + return err; +} + +/** + * fbnic_fw_xmit_cap_msg - Allocate and populate a FW capabilities message + * @fbd: FBNIC device structure + * + * Return: NULL on failure to allocate, error pointer on error, or pointer + * to new TLV test message. + * + * Sends a single TLV header indicating the host wants the firmware to + * confirm the capabilities and version. + **/ +static int fbnic_fw_xmit_cap_msg(struct fbnic_dev *fbd) +{ + int err = fbnic_fw_xmit_simple_msg(fbd, FBNIC_TLV_MSG_ID_HOST_CAP_REQ); + + /* Return 0 if we are not calling this on ASIC */ + return (err == -EOPNOTSUPP) ? 0 : err; +} + +static void fbnic_mbx_postinit_desc_ring(struct fbnic_dev *fbd, int mbx_idx) +{ + struct fbnic_fw_mbx *mbx = &fbd->mbx[mbx_idx]; + + /* This is a one time init, so just exit if it is completed */ + if (mbx->ready) + return; + + mbx->ready = true; + + switch (mbx_idx) { + case FBNIC_IPC_MBX_RX_IDX: + /* Make sure we have a page for the FW to write to */ + fbnic_mbx_alloc_rx_msgs(fbd); + break; + case FBNIC_IPC_MBX_TX_IDX: + /* Force version to 1 if we successfully requested an update + * from the firmware. This should be overwritten once we get + * the actual version from the firmware in the capabilities + * request message. + */ + if (!fbnic_fw_xmit_cap_msg(fbd) && + !fbd->fw_cap.running.mgmt.version) + fbd->fw_cap.running.mgmt.version = 1; + break; + } +} + +static void fbnic_mbx_postinit(struct fbnic_dev *fbd) +{ + int i; + + /* We only need to do this on the first interrupt following init. + * this primes the mailbox so that we will have cleared all the + * skip descriptors. + */ + if (!(rd32(fbd, FBNIC_INTR_STATUS(0)) & (1u << FBNIC_FW_MSIX_ENTRY))) + return; + + wr32(fbd, FBNIC_INTR_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY); + + for (i = 0; i < FBNIC_IPC_MBX_INDICES; i++) + fbnic_mbx_postinit_desc_ring(fbd, i); +} + +/** + * fbnic_fw_xmit_ownership_msg - Create and transmit a host ownership message + * to FW mailbox + * + * @fbd: FBNIC device structure + * @take_ownership: take/release the ownership + * + * Return: zero on success, negative value on failure + * + * Notifies the firmware that the driver either takes ownership of the NIC + * (when @take_ownership is true) or releases it. + */ +int fbnic_fw_xmit_ownership_msg(struct fbnic_dev *fbd, bool take_ownership) +{ + unsigned long req_time = jiffies; + struct fbnic_tlv_msg *msg; + int err = 0; + + if (!fbnic_fw_present(fbd)) + return -ENODEV; + + msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_OWNERSHIP_REQ); + if (!msg) + return -ENOMEM; + + if (take_ownership) { + err = fbnic_tlv_attr_put_flag(msg, FBNIC_FW_OWNERSHIP_FLAG); + if (err) + goto free_message; + } + + err = fbnic_mbx_map_tlv_msg(fbd, msg); + if (err) + goto free_message; + + /* Initialize heartbeat, set last response to 1 second in the past + * so that we will trigger a timeout if the firmware doesn't respond + */ + fbd->last_heartbeat_response = req_time - HZ; + + fbd->last_heartbeat_request = req_time; + + /* Set heartbeat detection based on if we are taking ownership */ + fbd->fw_heartbeat_enabled = take_ownership; + + return err; + +free_message: + free_page((unsigned long)msg); + return err; +} + +static const struct fbnic_tlv_index fbnic_fw_cap_resp_index[] = { + FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_VERSION), + FBNIC_TLV_ATTR_FLAG(FBNIC_FW_CAP_RESP_BMC_PRESENT), + FBNIC_TLV_ATTR_MAC_ADDR(FBNIC_FW_CAP_RESP_BMC_MAC_ADDR), + FBNIC_TLV_ATTR_ARRAY(FBNIC_FW_CAP_RESP_BMC_MAC_ARRAY), + FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_STORED_VERSION), + FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT), + FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR, + FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE), + FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_BMC_ALL_MULTI), + FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_FW_LINK_SPEED), + FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_FW_LINK_FEC), + FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_STORED_COMMIT_STR, + FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE), + FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_CMRT_VERSION), + FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION), + FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR, + FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE), + FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR, + FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE), + FBNIC_TLV_ATTR_U32(FBNIC_FW_CAP_RESP_UEFI_VERSION), + FBNIC_TLV_ATTR_STRING(FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR, + FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE), + FBNIC_TLV_ATTR_LAST +}; + +static int fbnic_fw_parse_bmc_addrs(u8 bmc_mac_addr[][ETH_ALEN], + struct fbnic_tlv_msg *attr, int len) +{ + int attr_len = le16_to_cpu(attr->hdr.len) / sizeof(u32) - 1; + struct fbnic_tlv_msg *mac_results[8]; + int err, i = 0; + + /* Make sure we have enough room to process all the MAC addresses */ + if (len > 8) + return -ENOSPC; + + /* Parse the array */ + err = fbnic_tlv_attr_parse_array(&attr[1], attr_len, mac_results, + fbnic_fw_cap_resp_index, + FBNIC_FW_CAP_RESP_BMC_MAC_ADDR, len); + if (err) + return err; + + /* Copy results into MAC addr array */ + for (i = 0; i < len && mac_results[i]; i++) + fbnic_tlv_attr_addr_copy(bmc_mac_addr[i], mac_results[i]); + + /* Zero remaining unused addresses */ + while (i < len) + eth_zero_addr(bmc_mac_addr[i++]); + + return 0; +} + +static int fbnic_fw_parse_cap_resp(void *opaque, struct fbnic_tlv_msg **results) +{ + u32 active_slot = 0, all_multi = 0; + struct fbnic_dev *fbd = opaque; + u32 speed = 0, fec = 0; + size_t commit_size = 0; + bool bmc_present; + int err; + + get_unsigned_result(FBNIC_FW_CAP_RESP_VERSION, + fbd->fw_cap.running.mgmt.version); + + if (!fbd->fw_cap.running.mgmt.version) + return -EINVAL; + + if (fbd->fw_cap.running.mgmt.version < MIN_FW_VERSION_CODE) { + char running_ver[FBNIC_FW_VER_MAX_SIZE]; + + fbnic_mk_fw_ver_str(fbd->fw_cap.running.mgmt.version, + running_ver); + dev_err(fbd->dev, "Device firmware version(%s) is older than minimum required version(%02d.%02d.%02d)\n", + running_ver, + MIN_FW_MAJOR_VERSION, + MIN_FW_MINOR_VERSION, + MIN_FW_BUILD_VERSION); + /* Disable TX mailbox to prevent card use until firmware is + * updated. + */ + fbd->mbx[FBNIC_IPC_MBX_TX_IDX].ready = false; + return -EINVAL; + } + + get_string_result(FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR, commit_size, + fbd->fw_cap.running.mgmt.commit, + FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE); + if (!commit_size) + dev_warn(fbd->dev, "Firmware did not send mgmt commit!\n"); + + get_unsigned_result(FBNIC_FW_CAP_RESP_STORED_VERSION, + fbd->fw_cap.stored.mgmt.version); + get_string_result(FBNIC_FW_CAP_RESP_STORED_COMMIT_STR, commit_size, + fbd->fw_cap.stored.mgmt.commit, + FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE); + + get_unsigned_result(FBNIC_FW_CAP_RESP_CMRT_VERSION, + fbd->fw_cap.running.bootloader.version); + get_string_result(FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR, commit_size, + fbd->fw_cap.running.bootloader.commit, + FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE); + + get_unsigned_result(FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION, + fbd->fw_cap.stored.bootloader.version); + get_string_result(FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR, commit_size, + fbd->fw_cap.stored.bootloader.commit, + FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE); + + get_unsigned_result(FBNIC_FW_CAP_RESP_UEFI_VERSION, + fbd->fw_cap.stored.undi.version); + get_string_result(FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR, commit_size, + fbd->fw_cap.stored.undi.commit, + FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE); + + get_unsigned_result(FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT, active_slot); + fbd->fw_cap.active_slot = active_slot; + + get_unsigned_result(FBNIC_FW_CAP_RESP_FW_LINK_SPEED, speed); + get_unsigned_result(FBNIC_FW_CAP_RESP_FW_LINK_FEC, fec); + fbd->fw_cap.link_speed = speed; + fbd->fw_cap.link_fec = fec; + + bmc_present = !!results[FBNIC_FW_CAP_RESP_BMC_PRESENT]; + if (bmc_present) { + struct fbnic_tlv_msg *attr; + + attr = results[FBNIC_FW_CAP_RESP_BMC_MAC_ARRAY]; + if (!attr) + return -EINVAL; + + err = fbnic_fw_parse_bmc_addrs(fbd->fw_cap.bmc_mac_addr, + attr, 4); + if (err) + return err; + + get_unsigned_result(FBNIC_FW_CAP_RESP_BMC_ALL_MULTI, all_multi); + } else { + memset(fbd->fw_cap.bmc_mac_addr, 0, + sizeof(fbd->fw_cap.bmc_mac_addr)); + } + + fbd->fw_cap.bmc_present = bmc_present; + + if (results[FBNIC_FW_CAP_RESP_BMC_ALL_MULTI] || !bmc_present) + fbd->fw_cap.all_multi = all_multi; + + return 0; +} + +static const struct fbnic_tlv_index fbnic_ownership_resp_index[] = { + FBNIC_TLV_ATTR_LAST +}; + +static int fbnic_fw_parse_ownership_resp(void *opaque, + struct fbnic_tlv_msg **results) +{ + struct fbnic_dev *fbd = (struct fbnic_dev *)opaque; + + /* Count the ownership response as a heartbeat reply */ + fbd->last_heartbeat_response = jiffies; + + return 0; +} + +static const struct fbnic_tlv_index fbnic_heartbeat_resp_index[] = { + FBNIC_TLV_ATTR_LAST +}; + +static int fbnic_fw_parse_heartbeat_resp(void *opaque, + struct fbnic_tlv_msg **results) +{ + struct fbnic_dev *fbd = (struct fbnic_dev *)opaque; + + fbd->last_heartbeat_response = jiffies; + + return 0; +} + +static int fbnic_fw_xmit_heartbeat_message(struct fbnic_dev *fbd) +{ + unsigned long req_time = jiffies; + struct fbnic_tlv_msg *msg; + int err = 0; + + if (!fbnic_fw_present(fbd)) + return -ENODEV; + + msg = fbnic_tlv_msg_alloc(FBNIC_TLV_MSG_ID_HEARTBEAT_REQ); + if (!msg) + return -ENOMEM; + + err = fbnic_mbx_map_tlv_msg(fbd, msg); + if (err) + goto free_message; + + fbd->last_heartbeat_request = req_time; + + return err; + +free_message: + free_page((unsigned long)msg); + return err; +} + +static bool fbnic_fw_heartbeat_current(struct fbnic_dev *fbd) +{ + unsigned long last_response = fbd->last_heartbeat_response; + unsigned long last_request = fbd->last_heartbeat_request; + + return !time_before(last_response, last_request); +} + +int fbnic_fw_init_heartbeat(struct fbnic_dev *fbd, bool poll) +{ + int err = -ETIMEDOUT; + int attempts = 50; + + if (!fbnic_fw_present(fbd)) + return -ENODEV; + + while (attempts--) { + msleep(200); + if (poll) + fbnic_mbx_poll(fbd); + + if (!fbnic_fw_heartbeat_current(fbd)) + continue; + + /* Place new message on mailbox to elicit a response */ + err = fbnic_fw_xmit_heartbeat_message(fbd); + if (err) + dev_warn(fbd->dev, + "Failed to send heartbeat message: %d\n", + err); + break; + } + + return err; +} + +void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd) +{ + unsigned long last_request = fbd->last_heartbeat_request; + int err; + + /* Do not check heartbeat or send another request until current + * period has expired. Otherwise we might start spamming requests. + */ + if (time_is_after_jiffies(last_request + FW_HEARTBEAT_PERIOD)) + return; + + /* We already reported no mailbox. Wait for it to come back */ + if (!fbd->fw_heartbeat_enabled) + return; + + /* Was the last heartbeat response long time ago? */ + if (!fbnic_fw_heartbeat_current(fbd)) { + dev_warn(fbd->dev, + "Firmware did not respond to heartbeat message\n"); + fbd->fw_heartbeat_enabled = false; + } + + /* Place new message on mailbox to elicit a response */ + err = fbnic_fw_xmit_heartbeat_message(fbd); + if (err) + dev_warn(fbd->dev, "Failed to send heartbeat message\n"); +} + +static const struct fbnic_tlv_parser fbnic_fw_tlv_parser[] = { + FBNIC_TLV_PARSER(FW_CAP_RESP, fbnic_fw_cap_resp_index, + fbnic_fw_parse_cap_resp), + FBNIC_TLV_PARSER(OWNERSHIP_RESP, fbnic_ownership_resp_index, + fbnic_fw_parse_ownership_resp), + FBNIC_TLV_PARSER(HEARTBEAT_RESP, fbnic_heartbeat_resp_index, + fbnic_fw_parse_heartbeat_resp), + FBNIC_TLV_MSG_ERROR +}; + +static void fbnic_mbx_process_rx_msgs(struct fbnic_dev *fbd) +{ + struct fbnic_fw_mbx *rx_mbx = &fbd->mbx[FBNIC_IPC_MBX_RX_IDX]; + u8 head = rx_mbx->head; + u64 desc, length; + + while (head != rx_mbx->tail) { + struct fbnic_tlv_msg *msg; + int err; + + desc = __fbnic_mbx_rd_desc(fbd, FBNIC_IPC_MBX_RX_IDX, head); + if (!(desc & FBNIC_IPC_MBX_DESC_FW_CMPL)) + break; + + dma_unmap_single(fbd->dev, rx_mbx->buf_info[head].addr, + PAGE_SIZE, DMA_FROM_DEVICE); + + msg = rx_mbx->buf_info[head].msg; + + length = FIELD_GET(FBNIC_IPC_MBX_DESC_LEN_MASK, desc); + + /* Ignore NULL mailbox descriptors */ + if (!length) + goto next_page; + + /* Report descriptors with length greater than page size */ + if (length > PAGE_SIZE) { + dev_warn(fbd->dev, + "Invalid mailbox descriptor length: %lld\n", + length); + goto next_page; + } + + if (le16_to_cpu(msg->hdr.len) * sizeof(u32) > length) + dev_warn(fbd->dev, "Mailbox message length mismatch\n"); + + /* If parsing fails dump contents of message to dmesg */ + err = fbnic_tlv_msg_parse(fbd, msg, fbnic_fw_tlv_parser); + if (err) { + dev_warn(fbd->dev, "Unable to process message: %d\n", + err); + print_hex_dump(KERN_WARNING, "fbnic:", + DUMP_PREFIX_OFFSET, 16, 2, + msg, length, true); + } + + dev_dbg(fbd->dev, "Parsed msg type %d\n", msg->hdr.type); +next_page: + + free_page((unsigned long)rx_mbx->buf_info[head].msg); + rx_mbx->buf_info[head].msg = NULL; + + head++; + head %= FBNIC_IPC_MBX_DESC_LEN; + } + + /* Record head for next interrupt */ + rx_mbx->head = head; + + /* Make sure we have at least one page for the FW to write to */ + fbnic_mbx_alloc_rx_msgs(fbd); +} + +void fbnic_mbx_poll(struct fbnic_dev *fbd) +{ + fbnic_mbx_postinit(fbd); + + fbnic_mbx_process_tx_msgs(fbd); + fbnic_mbx_process_rx_msgs(fbd); +} + +int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd) +{ + struct fbnic_fw_mbx *tx_mbx; + int attempts = 50; + + /* Immediate fail if BAR4 isn't there */ + if (!fbnic_fw_present(fbd)) + return -ENODEV; + + tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX]; + while (!tx_mbx->ready && --attempts) { + /* Force the firmware to trigger an interrupt response to + * avoid the mailbox getting stuck closed if the interrupt + * is reset. + */ + fbnic_mbx_init_desc_ring(fbd, FBNIC_IPC_MBX_TX_IDX); + + msleep(200); + + fbnic_mbx_poll(fbd); + } + + return attempts ? 0 : -ETIMEDOUT; +} + +void fbnic_mbx_flush_tx(struct fbnic_dev *fbd) +{ + struct fbnic_fw_mbx *tx_mbx; + int attempts = 50; + u8 count = 0; + + /* Nothing to do if there is no mailbox */ + if (!fbnic_fw_present(fbd)) + return; + + /* Record current Rx stats */ + tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX]; + + /* Nothing to do if mailbox never got to ready */ + if (!tx_mbx->ready) + return; + + /* Give firmware time to process packet, + * we will wait up to 10 seconds which is 50 waits of 200ms. + */ + do { + u8 head = tx_mbx->head; + + if (head == tx_mbx->tail) + break; + + msleep(200); + fbnic_mbx_process_tx_msgs(fbd); + + count += (tx_mbx->head - head) % FBNIC_IPC_MBX_DESC_LEN; + } while (count < FBNIC_IPC_MBX_DESC_LEN && --attempts); +} diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_fw.h b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h new file mode 100644 index 000000000000..c65bca613665 --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_fw.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#ifndef _FBNIC_FW_H_ +#define _FBNIC_FW_H_ + +#include +#include + +struct fbnic_dev; +struct fbnic_tlv_msg; + +struct fbnic_fw_mbx { + u8 ready, head, tail; + struct { + struct fbnic_tlv_msg *msg; + dma_addr_t addr; + } buf_info[FBNIC_IPC_MBX_DESC_LEN]; +}; + +// FW_VER_MAX_SIZE must match ETHTOOL_FWVERS_LEN +#define FBNIC_FW_VER_MAX_SIZE 32 +// Formatted version is in the format XX.YY.ZZ_RRR_COMMIT +#define FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE (FBNIC_FW_VER_MAX_SIZE - 13) +#define FBNIC_FW_LOG_MAX_SIZE 256 + +struct fbnic_fw_ver { + u32 version; + char commit[FBNIC_FW_CAP_RESP_COMMIT_MAX_SIZE]; +}; + +struct fbnic_fw_cap { + struct { + struct fbnic_fw_ver mgmt, bootloader; + } running; + struct { + struct fbnic_fw_ver mgmt, bootloader, undi; + } stored; + u8 active_slot; + u8 bmc_mac_addr[4][ETH_ALEN]; + u8 bmc_present : 1; + u8 all_multi : 1; + u8 link_speed; + u8 link_fec; +}; + +void fbnic_mbx_init(struct fbnic_dev *fbd); +void fbnic_mbx_clean(struct fbnic_dev *fbd); +void fbnic_mbx_poll(struct fbnic_dev *fbd); +int fbnic_mbx_poll_tx_ready(struct fbnic_dev *fbd); +void fbnic_mbx_flush_tx(struct fbnic_dev *fbd); +int fbnic_fw_xmit_ownership_msg(struct fbnic_dev *fbd, bool take_ownership); +int fbnic_fw_init_heartbeat(struct fbnic_dev *fbd, bool poll); +void fbnic_fw_check_heartbeat(struct fbnic_dev *fbd); + +#define fbnic_mk_full_fw_ver_str(_rev_id, _delim, _commit, _str) \ +do { \ + const u32 __rev_id = _rev_id; \ + snprintf(_str, sizeof(_str), "%02lu.%02lu.%02lu-%03lu%s%s", \ + FIELD_GET(FBNIC_FW_CAP_RESP_VERSION_MAJOR, __rev_id), \ + FIELD_GET(FBNIC_FW_CAP_RESP_VERSION_MINOR, __rev_id), \ + FIELD_GET(FBNIC_FW_CAP_RESP_VERSION_PATCH, __rev_id), \ + FIELD_GET(FBNIC_FW_CAP_RESP_VERSION_BUILD, __rev_id), \ + _delim, _commit); \ +} while (0) + +#define fbnic_mk_fw_ver_str(_rev_id, _str) \ + fbnic_mk_full_fw_ver_str(_rev_id, "", "", _str) + +#define FW_HEARTBEAT_PERIOD (10 * HZ) + +enum { + FBNIC_TLV_MSG_ID_HOST_CAP_REQ = 0x10, + FBNIC_TLV_MSG_ID_FW_CAP_RESP = 0x11, + FBNIC_TLV_MSG_ID_OWNERSHIP_REQ = 0x12, + FBNIC_TLV_MSG_ID_OWNERSHIP_RESP = 0x13, + FBNIC_TLV_MSG_ID_HEARTBEAT_REQ = 0x14, + FBNIC_TLV_MSG_ID_HEARTBEAT_RESP = 0x15, +}; + +#define FBNIC_FW_CAP_RESP_VERSION_MAJOR CSR_GENMASK(31, 24) +#define FBNIC_FW_CAP_RESP_VERSION_MINOR CSR_GENMASK(23, 16) +#define FBNIC_FW_CAP_RESP_VERSION_PATCH CSR_GENMASK(15, 8) +#define FBNIC_FW_CAP_RESP_VERSION_BUILD CSR_GENMASK(7, 0) +enum { + FBNIC_FW_CAP_RESP_VERSION = 0x0, + FBNIC_FW_CAP_RESP_BMC_PRESENT = 0x1, + FBNIC_FW_CAP_RESP_BMC_MAC_ADDR = 0x2, + FBNIC_FW_CAP_RESP_BMC_MAC_ARRAY = 0x3, + FBNIC_FW_CAP_RESP_STORED_VERSION = 0x4, + FBNIC_FW_CAP_RESP_ACTIVE_FW_SLOT = 0x5, + FBNIC_FW_CAP_RESP_VERSION_COMMIT_STR = 0x6, + FBNIC_FW_CAP_RESP_BMC_ALL_MULTI = 0x8, + FBNIC_FW_CAP_RESP_FW_STATE = 0x9, + FBNIC_FW_CAP_RESP_FW_LINK_SPEED = 0xa, + FBNIC_FW_CAP_RESP_FW_LINK_FEC = 0xb, + FBNIC_FW_CAP_RESP_STORED_COMMIT_STR = 0xc, + FBNIC_FW_CAP_RESP_CMRT_VERSION = 0xd, + FBNIC_FW_CAP_RESP_STORED_CMRT_VERSION = 0xe, + FBNIC_FW_CAP_RESP_CMRT_COMMIT_STR = 0xf, + FBNIC_FW_CAP_RESP_STORED_CMRT_COMMIT_STR = 0x10, + FBNIC_FW_CAP_RESP_UEFI_VERSION = 0x11, + FBNIC_FW_CAP_RESP_UEFI_COMMIT_STR = 0x12, + FBNIC_FW_CAP_RESP_MSG_MAX +}; + +enum { + FBNIC_FW_LINK_SPEED_25R1 = 1, + FBNIC_FW_LINK_SPEED_50R2 = 2, + FBNIC_FW_LINK_SPEED_50R1 = 3, + FBNIC_FW_LINK_SPEED_100R2 = 4, +}; + +enum { + FBNIC_FW_LINK_FEC_NONE = 1, + FBNIC_FW_LINK_FEC_RS = 2, + FBNIC_FW_LINK_FEC_BASER = 3, +}; + +enum { + FBNIC_FW_OWNERSHIP_FLAG = 0x0, + FBNIC_FW_OWNERSHIP_MSG_MAX +}; +#endif /* _FBNIC_FW_H_ */ diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_irq.c b/drivers/net/ethernet/meta/fbnic/fbnic_irq.c new file mode 100644 index 000000000000..914362195920 --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_irq.c @@ -0,0 +1,208 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#include +#include + +#include "fbnic.h" +#include "fbnic_netdev.h" +#include "fbnic_txrx.h" + +static irqreturn_t fbnic_fw_msix_intr(int __always_unused irq, void *data) +{ + struct fbnic_dev *fbd = (struct fbnic_dev *)data; + + fbnic_mbx_poll(fbd); + + fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY); + + return IRQ_HANDLED; +} + +/** + * fbnic_fw_enable_mbx - Configure and initialize Firmware Mailbox + * @fbd: Pointer to device to initialize + * + * This function will initialize the firmware mailbox rings, enable the IRQ + * and initialize the communication between the Firmware and the host. The + * firmware is expected to respond to the initialization by sending an + * interrupt essentially notifying the host that it has seen the + * initialization and is now synced up. + * + * Return: non-zero on failure. + **/ +int fbnic_fw_enable_mbx(struct fbnic_dev *fbd) +{ + u32 vector = fbd->fw_msix_vector; + int err; + + /* Request the IRQ for FW Mailbox vector. */ + err = request_threaded_irq(vector, NULL, &fbnic_fw_msix_intr, + IRQF_ONESHOT, dev_name(fbd->dev), fbd); + if (err) + return err; + + /* Initialize mailbox and attempt to poll it into ready state */ + fbnic_mbx_init(fbd); + err = fbnic_mbx_poll_tx_ready(fbd); + if (err) { + dev_warn(fbd->dev, "FW mailbox did not enter ready state\n"); + free_irq(vector, fbd); + return err; + } + + /* Enable interrupts */ + fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(0), 1u << FBNIC_FW_MSIX_ENTRY); + + return 0; +} + +/** + * fbnic_fw_disable_mbx - Disable mailbox and place it in standby state + * @fbd: Pointer to device to disable + * + * This function will disable the mailbox interrupt, free any messages still + * in the mailbox and place it into a standby state. The firmware is + * expected to see the update and assume that the host is in the reset state. + **/ +void fbnic_fw_disable_mbx(struct fbnic_dev *fbd) +{ + /* Disable interrupt and free vector */ + fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(0), 1u << FBNIC_FW_MSIX_ENTRY); + + /* Free the vector */ + free_irq(fbd->fw_msix_vector, fbd); + + /* Make sure disabling logs message is sent, must be done here to + * avoid risk of completing without a running interrupt. + */ + fbnic_mbx_flush_tx(fbd); + + /* Reset the mailboxes to the initialized state */ + fbnic_mbx_clean(fbd); +} + +static irqreturn_t fbnic_pcs_msix_intr(int __always_unused irq, void *data) +{ + struct fbnic_dev *fbd = data; + struct fbnic_net *fbn; + + if (fbd->mac->pcs_get_link_event(fbd) == FBNIC_LINK_EVENT_NONE) { + fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(0), + 1u << FBNIC_PCS_MSIX_ENTRY); + return IRQ_HANDLED; + } + + fbn = netdev_priv(fbd->netdev); + + phylink_pcs_change(&fbn->phylink_pcs, false); + + return IRQ_HANDLED; +} + +/** + * fbnic_pcs_irq_enable - Configure the MAC to enable it to advertise link + * @fbd: Pointer to device to initialize + * + * This function provides basic bringup for the MAC/PCS IRQ. For now the IRQ + * will remain disabled until we start the MAC/PCS/PHY logic via phylink. + * + * Return: non-zero on failure. + **/ +int fbnic_pcs_irq_enable(struct fbnic_dev *fbd) +{ + u32 vector = fbd->pcs_msix_vector; + int err; + + /* Request the IRQ for MAC link vector. + * Map MAC cause to it, and unmask it + */ + err = request_irq(vector, &fbnic_pcs_msix_intr, 0, + fbd->netdev->name, fbd); + if (err) + return err; + + fbnic_wr32(fbd, FBNIC_INTR_MSIX_CTRL(FBNIC_INTR_MSIX_CTRL_PCS_IDX), + FBNIC_PCS_MSIX_ENTRY | FBNIC_INTR_MSIX_CTRL_ENABLE); + + return 0; +} + +/** + * fbnic_pcs_irq_disable - Teardown the MAC IRQ to prepare for stopping + * @fbd: Pointer to device that is stopping + * + * This function undoes the work done in fbnic_pcs_irq_enable and prepares + * the device to no longer receive traffic on the host interface. + **/ +void fbnic_pcs_irq_disable(struct fbnic_dev *fbd) +{ + /* Disable interrupt */ + fbnic_wr32(fbd, FBNIC_INTR_MSIX_CTRL(FBNIC_INTR_MSIX_CTRL_PCS_IDX), + FBNIC_PCS_MSIX_ENTRY); + fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(0), 1u << FBNIC_PCS_MSIX_ENTRY); + + /* Free the vector */ + free_irq(fbd->pcs_msix_vector, fbd); +} + +int fbnic_request_irq(struct fbnic_dev *fbd, int nr, irq_handler_t handler, + unsigned long flags, const char *name, void *data) +{ + struct pci_dev *pdev = to_pci_dev(fbd->dev); + int irq = pci_irq_vector(pdev, nr); + + if (irq < 0) + return irq; + + return request_irq(irq, handler, flags, name, data); +} + +void fbnic_free_irq(struct fbnic_dev *fbd, int nr, void *data) +{ + struct pci_dev *pdev = to_pci_dev(fbd->dev); + int irq = pci_irq_vector(pdev, nr); + + if (irq < 0) + return; + + free_irq(irq, data); +} + +void fbnic_free_irqs(struct fbnic_dev *fbd) +{ + struct pci_dev *pdev = to_pci_dev(fbd->dev); + + fbd->pcs_msix_vector = 0; + fbd->fw_msix_vector = 0; + + fbd->num_irqs = 0; + + pci_free_irq_vectors(pdev); +} + +int fbnic_alloc_irqs(struct fbnic_dev *fbd) +{ + unsigned int wanted_irqs = FBNIC_NON_NAPI_VECTORS; + struct pci_dev *pdev = to_pci_dev(fbd->dev); + int num_irqs; + + wanted_irqs += min_t(unsigned int, num_online_cpus(), FBNIC_MAX_RXQS); + num_irqs = pci_alloc_irq_vectors(pdev, FBNIC_NON_NAPI_VECTORS + 1, + wanted_irqs, PCI_IRQ_MSIX); + if (num_irqs < 0) { + dev_err(fbd->dev, "Failed to allocate MSI-X entries\n"); + return num_irqs; + } + + if (num_irqs < wanted_irqs) + dev_warn(fbd->dev, "Allocated %d IRQs, expected %d\n", + num_irqs, wanted_irqs); + + fbd->num_irqs = num_irqs; + + fbd->pcs_msix_vector = pci_irq_vector(pdev, FBNIC_PCS_MSIX_ENTRY); + fbd->fw_msix_vector = pci_irq_vector(pdev, FBNIC_FW_MSIX_ENTRY); + + return 0; +} diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.c b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c new file mode 100644 index 000000000000..7920e7af82d9 --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.c @@ -0,0 +1,666 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#include +#include + +#include "fbnic.h" +#include "fbnic_mac.h" +#include "fbnic_netdev.h" + +static void fbnic_init_readrq(struct fbnic_dev *fbd, unsigned int offset, + unsigned int cls, unsigned int readrq) +{ + u32 val = rd32(fbd, offset); + + /* The TDF_CTL masks are a superset of the RNI_RBP ones. So we can + * use them when setting either the TDE_CTF or RNI_RBP registers. + */ + val &= FBNIC_QM_TNI_TDF_CTL_MAX_OT | FBNIC_QM_TNI_TDF_CTL_MAX_OB; + + val |= FIELD_PREP(FBNIC_QM_TNI_TDF_CTL_MRRS, readrq) | + FIELD_PREP(FBNIC_QM_TNI_TDF_CTL_CLS, cls); + + wr32(fbd, offset, val); +} + +static void fbnic_init_mps(struct fbnic_dev *fbd, unsigned int offset, + unsigned int cls, unsigned int mps) +{ + u32 val = rd32(fbd, offset); + + /* Currently all MPS masks are identical so just use the first one */ + val &= ~(FBNIC_QM_TNI_TCM_CTL_MPS | FBNIC_QM_TNI_TCM_CTL_CLS); + + val |= FIELD_PREP(FBNIC_QM_TNI_TCM_CTL_MPS, mps) | + FIELD_PREP(FBNIC_QM_TNI_TCM_CTL_CLS, cls); + + wr32(fbd, offset, val); +} + +static void fbnic_mac_init_axi(struct fbnic_dev *fbd) +{ + bool override_1k = false; + int readrq, mps, cls; + + /* All of the values are based on being a power of 2 starting + * with 64 == 0. Therefore we can either divide by 64 in the + * case of constants, or just subtract 6 from the log2 of the value + * in order to get the value we will be programming into the + * registers. + */ + readrq = ilog2(fbd->readrq) - 6; + if (readrq > 3) + override_1k = true; + readrq = clamp(readrq, 0, 3); + + mps = ilog2(fbd->mps) - 6; + mps = clamp(mps, 0, 3); + + cls = ilog2(L1_CACHE_BYTES) - 6; + cls = clamp(cls, 0, 3); + + /* Configure Tx/Rx AXI Paths w/ Read Request and Max Payload sizes */ + fbnic_init_readrq(fbd, FBNIC_QM_TNI_TDF_CTL, cls, readrq); + fbnic_init_mps(fbd, FBNIC_QM_TNI_TCM_CTL, cls, mps); + + /* Configure QM TNI TDE: + * - Max outstanding AXI beats to 704(768 - 64) - guaranetees 8% of + * buffer capacity to descriptors. + * - Max outstanding transactions to 128 + */ + wr32(fbd, FBNIC_QM_TNI_TDE_CTL, + FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_MRRS_1K, override_1k ? 1 : 0) | + FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_MAX_OB, 704) | + FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_MAX_OT, 128) | + FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_MRRS, readrq) | + FIELD_PREP(FBNIC_QM_TNI_TDE_CTL_CLS, cls)); + + fbnic_init_readrq(fbd, FBNIC_QM_RNI_RBP_CTL, cls, readrq); + fbnic_init_mps(fbd, FBNIC_QM_RNI_RDE_CTL, cls, mps); + fbnic_init_mps(fbd, FBNIC_QM_RNI_RCM_CTL, cls, mps); + + /* Enable XALI AR/AW outbound */ + wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AW_CFG, + FBNIC_PUL_OB_TLP_HDR_AW_CFG_BME); + wr32(fbd, FBNIC_PUL_OB_TLP_HDR_AR_CFG, + FBNIC_PUL_OB_TLP_HDR_AR_CFG_BME); +} + +static void fbnic_mac_init_qm(struct fbnic_dev *fbd) +{ + u32 clock_freq; + + /* Configure TSO behavior */ + wr32(fbd, FBNIC_QM_TQS_CTL0, + FIELD_PREP(FBNIC_QM_TQS_CTL0_LSO_TS_MASK, + FBNIC_QM_TQS_CTL0_LSO_TS_LAST) | + FIELD_PREP(FBNIC_QM_TQS_CTL0_PREFETCH_THRESH, + FBNIC_QM_TQS_CTL0_PREFETCH_THRESH_MIN)); + + /* Limit EDT to INT_MAX as this is the limit of the EDT Qdisc */ + wr32(fbd, FBNIC_QM_TQS_EDT_TS_RANGE, INT_MAX); + + /* Configure MTU + * Due to known HW issue we cannot set the MTU to within 16 octets + * of a 64 octet aligned boundary. So we will set the TQS_MTU(s) to + * MTU + 1. + */ + wr32(fbd, FBNIC_QM_TQS_MTU_CTL0, FBNIC_MAX_JUMBO_FRAME_SIZE + 1); + wr32(fbd, FBNIC_QM_TQS_MTU_CTL1, + FIELD_PREP(FBNIC_QM_TQS_MTU_CTL1_BULK, + FBNIC_MAX_JUMBO_FRAME_SIZE + 1)); + + clock_freq = FBNIC_CLOCK_FREQ; + + /* Be aggressive on the timings. We will have the interrupt + * threshold timer tick once every 1 usec and coalesce writes for + * up to 80 usecs. + */ + wr32(fbd, FBNIC_QM_TCQ_CTL0, + FIELD_PREP(FBNIC_QM_TCQ_CTL0_TICK_CYCLES, + clock_freq / 1000000) | + FIELD_PREP(FBNIC_QM_TCQ_CTL0_COAL_WAIT, + clock_freq / 12500)); + + /* We will have the interrupt threshold timer tick once every + * 1 usec and coalesce writes for up to 2 usecs. + */ + wr32(fbd, FBNIC_QM_RCQ_CTL0, + FIELD_PREP(FBNIC_QM_RCQ_CTL0_TICK_CYCLES, + clock_freq / 1000000) | + FIELD_PREP(FBNIC_QM_RCQ_CTL0_COAL_WAIT, + clock_freq / 500000)); + + /* Configure spacer control to 64 beats. */ + wr32(fbd, FBNIC_FAB_AXI4_AR_SPACER_2_CFG, + FBNIC_FAB_AXI4_AR_SPACER_MASK | + FIELD_PREP(FBNIC_FAB_AXI4_AR_SPACER_THREADSHOLD, 2)); +} + +#define FBNIC_DROP_EN_MASK 0x7d +#define FBNIC_PAUSE_EN_MASK 0x14 +#define FBNIC_ECN_EN_MASK 0x10 + +struct fbnic_fifo_config { + unsigned int addr; + unsigned int size; +}; + +/* Rx FIFO Configuration + * The table consists of 8 entries, of which only 4 are currently used + * The starting addr is in units of 64B and the size is in 2KB units + * Below is the human readable version of the table defined below: + * Function Addr Size + * ---------------------------------- + * Network to Host/BMC 384K 64K + * Unused + * Unused + * Network to BMC 448K 32K + * Network to Host 0 384K + * Unused + * BMC to Host 480K 32K + * Unused + */ +static const struct fbnic_fifo_config fifo_config[] = { + { .addr = 0x1800, .size = 0x20 }, /* Network to Host/BMC */ + { }, /* Unused */ + { }, /* Unused */ + { .addr = 0x1c00, .size = 0x10 }, /* Network to BMC */ + { .addr = 0x0000, .size = 0xc0 }, /* Network to Host */ + { }, /* Unused */ + { .addr = 0x1e00, .size = 0x10 }, /* BMC to Host */ + { } /* Unused */ +}; + +static void fbnic_mac_init_rxb(struct fbnic_dev *fbd) +{ + bool rx_enable; + int i; + + rx_enable = !!(rd32(fbd, FBNIC_RPC_RMI_CONFIG) & + FBNIC_RPC_RMI_CONFIG_ENABLE); + + for (i = 0; i < 8; i++) { + unsigned int size = fifo_config[i].size; + + /* If we are coming up on a system that already has the + * Rx data path enabled we don't need to reconfigure the + * FIFOs. Instead we can check to verify the values are + * large enough to meet our needs, and use the values to + * populate the flow control, ECN, and drop thresholds. + */ + if (rx_enable) { + size = FIELD_GET(FBNIC_RXB_PBUF_SIZE, + rd32(fbd, FBNIC_RXB_PBUF_CFG(i))); + if (size < fifo_config[i].size) + dev_warn(fbd->dev, + "fifo%d size of %d smaller than expected value of %d\n", + i, size << 11, + fifo_config[i].size << 11); + } else { + /* Program RXB Cuthrough */ + wr32(fbd, FBNIC_RXB_CT_SIZE(i), + FIELD_PREP(FBNIC_RXB_CT_SIZE_HEADER, 4) | + FIELD_PREP(FBNIC_RXB_CT_SIZE_PAYLOAD, 2)); + + /* The granularity for the packet buffer size is 2KB + * granularity while the packet buffer base address is + * only 64B granularity + */ + wr32(fbd, FBNIC_RXB_PBUF_CFG(i), + FIELD_PREP(FBNIC_RXB_PBUF_BASE_ADDR, + fifo_config[i].addr) | + FIELD_PREP(FBNIC_RXB_PBUF_SIZE, size)); + + /* The granularity for the credits is 64B. This is + * based on RXB_PBUF_SIZE * 32 + 4. + */ + wr32(fbd, FBNIC_RXB_PBUF_CREDIT(i), + FIELD_PREP(FBNIC_RXB_PBUF_CREDIT_MASK, + size ? size * 32 + 4 : 0)); + } + + if (!size) + continue; + + /* Pause is size of FIFO with 56KB skid to start/stop */ + wr32(fbd, FBNIC_RXB_PAUSE_THLD(i), + !(FBNIC_PAUSE_EN_MASK & (1u << i)) ? 0x1fff : + FIELD_PREP(FBNIC_RXB_PAUSE_THLD_ON, + size * 32 - 0x380) | + FIELD_PREP(FBNIC_RXB_PAUSE_THLD_OFF, 0x380)); + + /* Enable Drop when only one packet is left in the FIFO */ + wr32(fbd, FBNIC_RXB_DROP_THLD(i), + !(FBNIC_DROP_EN_MASK & (1u << i)) ? 0x1fff : + FIELD_PREP(FBNIC_RXB_DROP_THLD_ON, + size * 32 - + FBNIC_MAX_JUMBO_FRAME_SIZE / 64) | + FIELD_PREP(FBNIC_RXB_DROP_THLD_OFF, + size * 32 - + FBNIC_MAX_JUMBO_FRAME_SIZE / 64)); + + /* Enable ECN bit when 1/4 of RXB is filled with at least + * 1 room for one full jumbo frame before setting ECN + */ + wr32(fbd, FBNIC_RXB_ECN_THLD(i), + !(FBNIC_ECN_EN_MASK & (1u << i)) ? 0x1fff : + FIELD_PREP(FBNIC_RXB_ECN_THLD_ON, + max_t(unsigned int, + size * 32 / 4, + FBNIC_MAX_JUMBO_FRAME_SIZE / 64)) | + FIELD_PREP(FBNIC_RXB_ECN_THLD_OFF, + max_t(unsigned int, + size * 32 / 4, + FBNIC_MAX_JUMBO_FRAME_SIZE / 64))); + } + + /* For now only enable drop and ECN. We need to add driver/kernel + * interfaces for configuring pause. + */ + wr32(fbd, FBNIC_RXB_PAUSE_DROP_CTRL, + FIELD_PREP(FBNIC_RXB_PAUSE_DROP_CTRL_DROP_ENABLE, + FBNIC_DROP_EN_MASK) | + FIELD_PREP(FBNIC_RXB_PAUSE_DROP_CTRL_ECN_ENABLE, + FBNIC_ECN_EN_MASK)); + + /* Program INTF credits */ + wr32(fbd, FBNIC_RXB_INTF_CREDIT, + FBNIC_RXB_INTF_CREDIT_MASK0 | + FBNIC_RXB_INTF_CREDIT_MASK1 | + FBNIC_RXB_INTF_CREDIT_MASK2 | + FIELD_PREP(FBNIC_RXB_INTF_CREDIT_MASK3, 8)); + + /* Configure calendar slots. + * Rx: 0 - 62 RDE 1st, BMC 2nd + * 63 BMC 1st, RDE 2nd + */ + for (i = 0; i < 16; i++) { + u32 calendar_val = (i == 15) ? 0x1e1b1b1b : 0x1b1b1b1b; + + wr32(fbd, FBNIC_RXB_CLDR_PRIO_CFG(i), calendar_val); + } + + /* Split the credits for the DRR up as follows: + * Quantum0: 8000 Network to Host + * Quantum1: 0 Not used + * Quantum2: 80 BMC to Host + * Quantum3: 0 Not used + * Quantum4: 8000 Multicast to Host and BMC + */ + wr32(fbd, FBNIC_RXB_DWRR_RDE_WEIGHT0, + FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM0, 0x40) | + FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM2, 0x50)); + wr32(fbd, FBNIC_RXB_DWRR_RDE_WEIGHT0_EXT, + FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT0_QUANTUM0, 0x1f)); + wr32(fbd, FBNIC_RXB_DWRR_RDE_WEIGHT1, + FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT1_QUANTUM4, 0x40)); + wr32(fbd, FBNIC_RXB_DWRR_RDE_WEIGHT1_EXT, + FIELD_PREP(FBNIC_RXB_DWRR_RDE_WEIGHT1_QUANTUM4, 0x1f)); + + /* Program RXB FCS Endian register */ + wr32(fbd, FBNIC_RXB_ENDIAN_FCS, 0x0aaaaaa0); +} + +static void fbnic_mac_init_txb(struct fbnic_dev *fbd) +{ + int i; + + wr32(fbd, FBNIC_TCE_TXB_CTRL, 0); + + /* Configure Tx QM Credits */ + wr32(fbd, FBNIC_QM_TQS_CTL1, + FIELD_PREP(FBNIC_QM_TQS_CTL1_MC_MAX_CREDITS, 0x40) | + FIELD_PREP(FBNIC_QM_TQS_CTL1_BULK_MAX_CREDITS, 0x20)); + + /* Initialize internal Tx queues */ + wr32(fbd, FBNIC_TCE_TXB_TEI_Q0_CTRL, 0); + wr32(fbd, FBNIC_TCE_TXB_TEI_Q1_CTRL, 0); + wr32(fbd, FBNIC_TCE_TXB_MC_Q_CTRL, + FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_SIZE, 0x400) | + FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_START, 0x000)); + wr32(fbd, FBNIC_TCE_TXB_RX_TEI_Q_CTRL, 0); + wr32(fbd, FBNIC_TCE_TXB_TX_BMC_Q_CTRL, + FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_SIZE, 0x200) | + FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_START, 0x400)); + wr32(fbd, FBNIC_TCE_TXB_RX_BMC_Q_CTRL, + FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_SIZE, 0x200) | + FIELD_PREP(FBNIC_TCE_TXB_Q_CTRL_START, 0x600)); + + wr32(fbd, FBNIC_TCE_LSO_CTRL, + FBNIC_TCE_LSO_CTRL_IPID_MODE_INC | + FIELD_PREP(FBNIC_TCE_LSO_CTRL_TCPF_CLR_1ST, TCPHDR_PSH | + TCPHDR_FIN) | + FIELD_PREP(FBNIC_TCE_LSO_CTRL_TCPF_CLR_MID, TCPHDR_PSH | + TCPHDR_CWR | + TCPHDR_FIN) | + FIELD_PREP(FBNIC_TCE_LSO_CTRL_TCPF_CLR_END, TCPHDR_CWR)); + wr32(fbd, FBNIC_TCE_CSO_CTRL, 0); + + wr32(fbd, FBNIC_TCE_BMC_MAX_PKTSZ, + FIELD_PREP(FBNIC_TCE_BMC_MAX_PKTSZ_TX, + FBNIC_MAX_JUMBO_FRAME_SIZE) | + FIELD_PREP(FBNIC_TCE_BMC_MAX_PKTSZ_RX, + FBNIC_MAX_JUMBO_FRAME_SIZE)); + wr32(fbd, FBNIC_TCE_MC_MAX_PKTSZ, + FIELD_PREP(FBNIC_TCE_MC_MAX_PKTSZ_TMI, + FBNIC_MAX_JUMBO_FRAME_SIZE)); + + /* Configure calendar slots. + * Tx: 0 - 62 TMI 1st, BMC 2nd + * 63 BMC 1st, TMI 2nd + */ + for (i = 0; i < 16; i++) { + u32 calendar_val = (i == 15) ? 0x1e1b1b1b : 0x1b1b1b1b; + + wr32(fbd, FBNIC_TCE_TXB_CLDR_SLOT_CFG(i), calendar_val); + } + + /* Configure DWRR */ + wr32(fbd, FBNIC_TCE_TXB_ENQ_WRR_CTRL, + FIELD_PREP(FBNIC_TCE_TXB_ENQ_WRR_CTRL_WEIGHT0, 0x64) | + FIELD_PREP(FBNIC_TCE_TXB_ENQ_WRR_CTRL_WEIGHT2, 0x04)); + wr32(fbd, FBNIC_TCE_TXB_TEI_DWRR_CTRL, 0); + wr32(fbd, FBNIC_TCE_TXB_TEI_DWRR_CTRL_EXT, 0); + wr32(fbd, FBNIC_TCE_TXB_BMC_DWRR_CTRL, + FIELD_PREP(FBNIC_TCE_TXB_BMC_DWRR_CTRL_QUANTUM0, 0x50) | + FIELD_PREP(FBNIC_TCE_TXB_BMC_DWRR_CTRL_QUANTUM1, 0x82)); + wr32(fbd, FBNIC_TCE_TXB_BMC_DWRR_CTRL_EXT, 0); + wr32(fbd, FBNIC_TCE_TXB_NTWRK_DWRR_CTRL, + FIELD_PREP(FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM1, 0x50) | + FIELD_PREP(FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM2, 0x20)); + wr32(fbd, FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_EXT, + FIELD_PREP(FBNIC_TCE_TXB_NTWRK_DWRR_CTRL_QUANTUM2, 0x03)); + + /* Configure SOP protocol protection */ + wr32(fbd, FBNIC_TCE_SOP_PROT_CTRL, + FIELD_PREP(FBNIC_TCE_SOP_PROT_CTRL_TBI, 0x78) | + FIELD_PREP(FBNIC_TCE_SOP_PROT_CTRL_TTI_FRM, 0x40) | + FIELD_PREP(FBNIC_TCE_SOP_PROT_CTRL_TTI_CM, 0x0c)); + + /* Conservative configuration on MAC interface Start of Packet + * protection FIFO. This sets the minimum depth of the FIFO before + * we start sending packets to the MAC measured in 64B units and + * up to 160 entries deep. + * + * For the ASIC the clock is fast enough that we will likely fill + * the SOP FIFO before the MAC can drain it. So just use a minimum + * value of 8. + */ + wr32(fbd, FBNIC_TMI_SOP_PROT_CTRL, 8); + + wrfl(fbd); + wr32(fbd, FBNIC_TCE_TXB_CTRL, FBNIC_TCE_TXB_CTRL_TCAM_ENABLE | + FBNIC_TCE_TXB_CTRL_LOAD); +} + +static void fbnic_mac_init_regs(struct fbnic_dev *fbd) +{ + fbnic_mac_init_axi(fbd); + fbnic_mac_init_qm(fbd); + fbnic_mac_init_rxb(fbd); + fbnic_mac_init_txb(fbd); +} + +static void fbnic_mac_tx_pause_config(struct fbnic_dev *fbd, bool tx_pause) +{ + u32 rxb_pause_ctrl; + + /* Enable generation of pause frames if enabled */ + rxb_pause_ctrl = rd32(fbd, FBNIC_RXB_PAUSE_DROP_CTRL); + rxb_pause_ctrl &= ~FBNIC_RXB_PAUSE_DROP_CTRL_PAUSE_ENABLE; + if (tx_pause) + rxb_pause_ctrl |= + FIELD_PREP(FBNIC_RXB_PAUSE_DROP_CTRL_PAUSE_ENABLE, + FBNIC_PAUSE_EN_MASK); + wr32(fbd, FBNIC_RXB_PAUSE_DROP_CTRL, rxb_pause_ctrl); +} + +static int fbnic_pcs_get_link_event_asic(struct fbnic_dev *fbd) +{ + u32 pcs_intr_mask = rd32(fbd, FBNIC_SIG_PCS_INTR_STS); + + if (pcs_intr_mask & FBNIC_SIG_PCS_INTR_LINK_DOWN) + return FBNIC_LINK_EVENT_DOWN; + + return (pcs_intr_mask & FBNIC_SIG_PCS_INTR_LINK_UP) ? + FBNIC_LINK_EVENT_UP : FBNIC_LINK_EVENT_NONE; +} + +static u32 __fbnic_mac_cmd_config_asic(struct fbnic_dev *fbd, + bool tx_pause, bool rx_pause) +{ + /* Enable MAC Promiscuous mode and Tx padding */ + u32 command_config = FBNIC_MAC_COMMAND_CONFIG_TX_PAD_EN | + FBNIC_MAC_COMMAND_CONFIG_PROMISC_EN; + struct fbnic_net *fbn = netdev_priv(fbd->netdev); + + /* Disable pause frames if not enabled */ + if (!tx_pause) + command_config |= FBNIC_MAC_COMMAND_CONFIG_TX_PAUSE_DIS; + if (!rx_pause) + command_config |= FBNIC_MAC_COMMAND_CONFIG_RX_PAUSE_DIS; + + /* Disable fault handling if no FEC is requested */ + if ((fbn->fec & FBNIC_FEC_MODE_MASK) == FBNIC_FEC_OFF) + command_config |= FBNIC_MAC_COMMAND_CONFIG_FLT_HDL_DIS; + + return command_config; +} + +static bool fbnic_mac_get_pcs_link_status(struct fbnic_dev *fbd) +{ + struct fbnic_net *fbn = netdev_priv(fbd->netdev); + u32 pcs_status, lane_mask = ~0; + + pcs_status = rd32(fbd, FBNIC_SIG_PCS_OUT0); + if (!(pcs_status & FBNIC_SIG_PCS_OUT0_LINK)) + return false; + + /* Define the expected lane mask for the status bits we need to check */ + switch (fbn->link_mode & FBNIC_LINK_MODE_MASK) { + case FBNIC_LINK_100R2: + lane_mask = 0xf; + break; + case FBNIC_LINK_50R1: + lane_mask = 3; + break; + case FBNIC_LINK_50R2: + switch (fbn->fec & FBNIC_FEC_MODE_MASK) { + case FBNIC_FEC_OFF: + lane_mask = 0x63; + break; + case FBNIC_FEC_RS: + lane_mask = 5; + break; + case FBNIC_FEC_BASER: + lane_mask = 0xf; + break; + } + break; + case FBNIC_LINK_25R1: + lane_mask = 1; + break; + } + + /* Use an XOR to remove the bits we expect to see set */ + switch (fbn->fec & FBNIC_FEC_MODE_MASK) { + case FBNIC_FEC_OFF: + lane_mask ^= FIELD_GET(FBNIC_SIG_PCS_OUT0_BLOCK_LOCK, + pcs_status); + break; + case FBNIC_FEC_RS: + lane_mask ^= FIELD_GET(FBNIC_SIG_PCS_OUT0_AMPS_LOCK, + pcs_status); + break; + case FBNIC_FEC_BASER: + lane_mask ^= FIELD_GET(FBNIC_SIG_PCS_OUT1_FCFEC_LOCK, + rd32(fbd, FBNIC_SIG_PCS_OUT1)); + break; + } + + /* If all lanes cancelled then we have a lock on all lanes */ + return !lane_mask; +} + +static bool fbnic_pcs_get_link_asic(struct fbnic_dev *fbd) +{ + bool link; + + /* Flush status bits to clear possible stale data, + * bits should reset themselves back to 1 if link is truly up + */ + wr32(fbd, FBNIC_SIG_PCS_OUT0, FBNIC_SIG_PCS_OUT0_LINK | + FBNIC_SIG_PCS_OUT0_BLOCK_LOCK | + FBNIC_SIG_PCS_OUT0_AMPS_LOCK); + wr32(fbd, FBNIC_SIG_PCS_OUT1, FBNIC_SIG_PCS_OUT1_FCFEC_LOCK); + wrfl(fbd); + + /* Clear interrupt state due to recent changes. */ + wr32(fbd, FBNIC_SIG_PCS_INTR_STS, + FBNIC_SIG_PCS_INTR_LINK_DOWN | FBNIC_SIG_PCS_INTR_LINK_UP); + + link = fbnic_mac_get_pcs_link_status(fbd); + + /* Enable interrupt to only capture changes in link state */ + wr32(fbd, FBNIC_SIG_PCS_INTR_MASK, + ~FBNIC_SIG_PCS_INTR_LINK_DOWN & ~FBNIC_SIG_PCS_INTR_LINK_UP); + wr32(fbd, FBNIC_INTR_MASK_CLEAR(0), 1u << FBNIC_PCS_MSIX_ENTRY); + + return link; +} + +static void fbnic_pcs_get_fw_settings(struct fbnic_dev *fbd) +{ + struct fbnic_net *fbn = netdev_priv(fbd->netdev); + u8 link_mode = fbn->link_mode; + u8 fec = fbn->fec; + + /* Update FEC first to reflect FW current mode */ + if (fbn->fec & FBNIC_FEC_AUTO) { + switch (fbd->fw_cap.link_fec) { + case FBNIC_FW_LINK_FEC_NONE: + fec = FBNIC_FEC_OFF; + break; + case FBNIC_FW_LINK_FEC_RS: + fec = FBNIC_FEC_RS; + break; + case FBNIC_FW_LINK_FEC_BASER: + fec = FBNIC_FEC_BASER; + break; + default: + return; + } + + fbn->fec = fec; + } + + /* Do nothing if AUTO mode is not engaged */ + if (fbn->link_mode & FBNIC_LINK_AUTO) { + switch (fbd->fw_cap.link_speed) { + case FBNIC_FW_LINK_SPEED_25R1: + link_mode = FBNIC_LINK_25R1; + break; + case FBNIC_FW_LINK_SPEED_50R2: + link_mode = FBNIC_LINK_50R2; + break; + case FBNIC_FW_LINK_SPEED_50R1: + link_mode = FBNIC_LINK_50R1; + fec = FBNIC_FEC_RS; + break; + case FBNIC_FW_LINK_SPEED_100R2: + link_mode = FBNIC_LINK_100R2; + fec = FBNIC_FEC_RS; + break; + default: + return; + } + + fbn->link_mode = link_mode; + } +} + +static int fbnic_pcs_enable_asic(struct fbnic_dev *fbd) +{ + /* Mask and clear the PCS interrupt, will be enabled by link handler */ + wr32(fbd, FBNIC_SIG_PCS_INTR_MASK, ~0); + wr32(fbd, FBNIC_SIG_PCS_INTR_STS, ~0); + + /* Pull in settings from FW */ + fbnic_pcs_get_fw_settings(fbd); + + return 0; +} + +static void fbnic_pcs_disable_asic(struct fbnic_dev *fbd) +{ + /* Mask and clear the PCS interrupt */ + wr32(fbd, FBNIC_SIG_PCS_INTR_MASK, ~0); + wr32(fbd, FBNIC_SIG_PCS_INTR_STS, ~0); +} + +static void fbnic_mac_link_down_asic(struct fbnic_dev *fbd) +{ + u32 cmd_cfg, mac_ctrl; + + cmd_cfg = __fbnic_mac_cmd_config_asic(fbd, false, false); + mac_ctrl = rd32(fbd, FBNIC_SIG_MAC_IN0); + + mac_ctrl |= FBNIC_SIG_MAC_IN0_RESET_FF_TX_CLK | + FBNIC_SIG_MAC_IN0_RESET_TX_CLK | + FBNIC_SIG_MAC_IN0_RESET_FF_RX_CLK | + FBNIC_SIG_MAC_IN0_RESET_RX_CLK; + + wr32(fbd, FBNIC_SIG_MAC_IN0, mac_ctrl); + wr32(fbd, FBNIC_MAC_COMMAND_CONFIG, cmd_cfg); +} + +static void fbnic_mac_link_up_asic(struct fbnic_dev *fbd, + bool tx_pause, bool rx_pause) +{ + u32 cmd_cfg, mac_ctrl; + + fbnic_mac_tx_pause_config(fbd, tx_pause); + + cmd_cfg = __fbnic_mac_cmd_config_asic(fbd, tx_pause, rx_pause); + mac_ctrl = rd32(fbd, FBNIC_SIG_MAC_IN0); + + mac_ctrl &= ~(FBNIC_SIG_MAC_IN0_RESET_FF_TX_CLK | + FBNIC_SIG_MAC_IN0_RESET_TX_CLK | + FBNIC_SIG_MAC_IN0_RESET_FF_RX_CLK | + FBNIC_SIG_MAC_IN0_RESET_RX_CLK); + cmd_cfg |= FBNIC_MAC_COMMAND_CONFIG_RX_ENA | + FBNIC_MAC_COMMAND_CONFIG_TX_ENA; + + wr32(fbd, FBNIC_SIG_MAC_IN0, mac_ctrl); + wr32(fbd, FBNIC_MAC_COMMAND_CONFIG, cmd_cfg); +} + +static const struct fbnic_mac fbnic_mac_asic = { + .init_regs = fbnic_mac_init_regs, + .pcs_enable = fbnic_pcs_enable_asic, + .pcs_disable = fbnic_pcs_disable_asic, + .pcs_get_link = fbnic_pcs_get_link_asic, + .pcs_get_link_event = fbnic_pcs_get_link_event_asic, + .link_down = fbnic_mac_link_down_asic, + .link_up = fbnic_mac_link_up_asic, +}; + +/** + * fbnic_mac_init - Assign a MAC type and initialize the fbnic device + * @fbd: Device pointer to device to initialize + * + * Return: zero on success, negative on failure + * + * Initialize the MAC function pointers and initializes the MAC of + * the device. + **/ +int fbnic_mac_init(struct fbnic_dev *fbd) +{ + fbd->mac = &fbnic_mac_asic; + + fbd->mac->init_regs(fbd); + + return 0; +} diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_mac.h b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h new file mode 100644 index 000000000000..f53be6e6aef9 --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_mac.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#ifndef _FBNIC_MAC_H_ +#define _FBNIC_MAC_H_ + +#include + +struct fbnic_dev; + +#define FBNIC_MAX_JUMBO_FRAME_SIZE 9742 + +enum { + FBNIC_LINK_EVENT_NONE = 0, + FBNIC_LINK_EVENT_UP = 1, + FBNIC_LINK_EVENT_DOWN = 2, +}; + +/* Treat the FEC bits as a bitmask laid out as follows: + * Bit 0: RS Enabled + * Bit 1: BASER(Firecode) Enabled + * Bit 2: Retrieve FEC from FW + */ +enum { + FBNIC_FEC_OFF = 0, + FBNIC_FEC_RS = 1, + FBNIC_FEC_BASER = 2, + FBNIC_FEC_AUTO = 4, +}; + +#define FBNIC_FEC_MODE_MASK (FBNIC_FEC_AUTO - 1) + +/* Treat the link modes as a set of modulation/lanes bitmask: + * Bit 0: Lane Count, 0 = R1, 1 = R2 + * Bit 1: Modulation, 0 = NRZ, 1 = PAM4 + * Bit 2: Retrieve link mode from FW + */ +enum { + FBNIC_LINK_25R1 = 0, + FBNIC_LINK_50R2 = 1, + FBNIC_LINK_50R1 = 2, + FBNIC_LINK_100R2 = 3, + FBNIC_LINK_AUTO = 4, +}; + +#define FBNIC_LINK_MODE_R2 (FBNIC_LINK_50R2) +#define FBNIC_LINK_MODE_PAM4 (FBNIC_LINK_50R1) +#define FBNIC_LINK_MODE_MASK (FBNIC_LINK_AUTO - 1) + +/* This structure defines the interface hooks for the MAC. The MAC hooks + * will be configured as a const struct provided with a set of function + * pointers. + * + * void (*init_regs)(struct fbnic_dev *fbd); + * Initialize MAC registers to enable Tx/Rx paths and FIFOs. + * + * void (*pcs_enable)(struct fbnic_dev *fbd); + * Configure and enable PCS to enable link if not already enabled + * void (*pcs_disable)(struct fbnic_dev *fbd); + * Shutdown the link if we are the only consumer of it. + * bool (*pcs_get_link)(struct fbnic_dev *fbd); + * Check PCS link status + * int (*pcs_get_link_event)(struct fbnic_dev *fbd) + * Get the current link event status, reports true if link has + * changed to either FBNIC_LINK_EVENT_DOWN or FBNIC_LINK_EVENT_UP + * + * void (*link_down)(struct fbnic_dev *fbd); + * Configure MAC for link down event + * void (*link_up)(struct fbnic_dev *fbd, bool tx_pause, bool rx_pause); + * Configure MAC for link up event; + * + */ +struct fbnic_mac { + void (*init_regs)(struct fbnic_dev *fbd); + + int (*pcs_enable)(struct fbnic_dev *fbd); + void (*pcs_disable)(struct fbnic_dev *fbd); + bool (*pcs_get_link)(struct fbnic_dev *fbd); + int (*pcs_get_link_event)(struct fbnic_dev *fbd); + + void (*link_down)(struct fbnic_dev *fbd); + void (*link_up)(struct fbnic_dev *fbd, bool tx_pause, bool rx_pause); +}; + +int fbnic_mac_init(struct fbnic_dev *fbd); +#endif /* _FBNIC_MAC_H_ */ diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c new file mode 100644 index 000000000000..b7ce6da68543 --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.c @@ -0,0 +1,488 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#include +#include +#include + +#include "fbnic.h" +#include "fbnic_netdev.h" +#include "fbnic_txrx.h" + +int __fbnic_open(struct fbnic_net *fbn) +{ + struct fbnic_dev *fbd = fbn->fbd; + int err; + + err = fbnic_alloc_napi_vectors(fbn); + if (err) + return err; + + err = fbnic_alloc_resources(fbn); + if (err) + goto free_napi_vectors; + + err = netif_set_real_num_tx_queues(fbn->netdev, + fbn->num_tx_queues); + if (err) + goto free_resources; + + err = netif_set_real_num_rx_queues(fbn->netdev, + fbn->num_rx_queues); + if (err) + goto free_resources; + + /* Send ownership message and flush to verify FW has seen it */ + err = fbnic_fw_xmit_ownership_msg(fbd, true); + if (err) { + dev_warn(fbd->dev, + "Error %d sending host ownership message to the firmware\n", + err); + goto free_resources; + } + + err = fbnic_fw_init_heartbeat(fbd, false); + if (err) + goto release_ownership; + + err = fbnic_pcs_irq_enable(fbd); + if (err) + goto release_ownership; + /* Pull the BMC config and initialize the RPC */ + fbnic_bmc_rpc_init(fbd); + fbnic_rss_reinit(fbd, fbn); + + return 0; +release_ownership: + fbnic_fw_xmit_ownership_msg(fbn->fbd, false); +free_resources: + fbnic_free_resources(fbn); +free_napi_vectors: + fbnic_free_napi_vectors(fbn); + return err; +} + +static int fbnic_open(struct net_device *netdev) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + int err; + + err = __fbnic_open(fbn); + if (!err) + fbnic_up(fbn); + + return err; +} + +static int fbnic_stop(struct net_device *netdev) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + + fbnic_down(fbn); + fbnic_pcs_irq_disable(fbn->fbd); + + fbnic_fw_xmit_ownership_msg(fbn->fbd, false); + + fbnic_free_resources(fbn); + fbnic_free_napi_vectors(fbn); + + return 0; +} + +static int fbnic_uc_sync(struct net_device *netdev, const unsigned char *addr) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + struct fbnic_mac_addr *avail_addr; + + if (WARN_ON(!is_valid_ether_addr(addr))) + return -EADDRNOTAVAIL; + + avail_addr = __fbnic_uc_sync(fbn->fbd, addr); + if (!avail_addr) + return -ENOSPC; + + /* Add type flag indicating this address is in use by the host */ + set_bit(FBNIC_MAC_ADDR_T_UNICAST, avail_addr->act_tcam); + + return 0; +} + +static int fbnic_uc_unsync(struct net_device *netdev, const unsigned char *addr) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + struct fbnic_dev *fbd = fbn->fbd; + int i, ret; + + /* Scan from middle of list to bottom, filling bottom up. + * Skip the first entry which is reserved for dev_addr and + * leave the last entry to use for promiscuous filtering. + */ + for (i = fbd->mac_addr_boundary, ret = -ENOENT; + i < FBNIC_RPC_TCAM_MACDA_HOST_ADDR_IDX && ret; i++) { + struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i]; + + if (!ether_addr_equal(mac_addr->value.addr8, addr)) + continue; + + ret = __fbnic_uc_unsync(mac_addr); + } + + return ret; +} + +static int fbnic_mc_sync(struct net_device *netdev, const unsigned char *addr) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + struct fbnic_mac_addr *avail_addr; + + if (WARN_ON(!is_multicast_ether_addr(addr))) + return -EADDRNOTAVAIL; + + avail_addr = __fbnic_mc_sync(fbn->fbd, addr); + if (!avail_addr) + return -ENOSPC; + + /* Add type flag indicating this address is in use by the host */ + set_bit(FBNIC_MAC_ADDR_T_MULTICAST, avail_addr->act_tcam); + + return 0; +} + +static int fbnic_mc_unsync(struct net_device *netdev, const unsigned char *addr) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + struct fbnic_dev *fbd = fbn->fbd; + int i, ret; + + /* Scan from middle of list to top, filling top down. + * Skip over the address reserved for the BMC MAC and + * exclude index 0 as that belongs to the broadcast address + */ + for (i = fbd->mac_addr_boundary, ret = -ENOENT; + --i > FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX && ret;) { + struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i]; + + if (!ether_addr_equal(mac_addr->value.addr8, addr)) + continue; + + ret = __fbnic_mc_unsync(mac_addr); + } + + return ret; +} + +void __fbnic_set_rx_mode(struct net_device *netdev) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + bool uc_promisc = false, mc_promisc = false; + struct fbnic_dev *fbd = fbn->fbd; + struct fbnic_mac_addr *mac_addr; + int err; + + /* Populate host address from dev_addr */ + mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_HOST_ADDR_IDX]; + if (!ether_addr_equal(mac_addr->value.addr8, netdev->dev_addr) || + mac_addr->state != FBNIC_TCAM_S_VALID) { + ether_addr_copy(mac_addr->value.addr8, netdev->dev_addr); + mac_addr->state = FBNIC_TCAM_S_UPDATE; + set_bit(FBNIC_MAC_ADDR_T_UNICAST, mac_addr->act_tcam); + } + + /* Populate broadcast address if broadcast is enabled */ + mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX]; + if (netdev->flags & IFF_BROADCAST) { + if (!is_broadcast_ether_addr(mac_addr->value.addr8) || + mac_addr->state != FBNIC_TCAM_S_VALID) { + eth_broadcast_addr(mac_addr->value.addr8); + mac_addr->state = FBNIC_TCAM_S_ADD; + } + set_bit(FBNIC_MAC_ADDR_T_BROADCAST, mac_addr->act_tcam); + } else if (mac_addr->state == FBNIC_TCAM_S_VALID) { + __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_BROADCAST); + } + + /* Synchronize unicast and multicast address lists */ + err = __dev_uc_sync(netdev, fbnic_uc_sync, fbnic_uc_unsync); + if (err == -ENOSPC) + uc_promisc = true; + err = __dev_mc_sync(netdev, fbnic_mc_sync, fbnic_mc_unsync); + if (err == -ENOSPC) + mc_promisc = true; + + uc_promisc |= !!(netdev->flags & IFF_PROMISC); + mc_promisc |= !!(netdev->flags & IFF_ALLMULTI) || uc_promisc; + + /* Populate last TCAM entry with promiscuous entry and 0/1 bit mask */ + mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_PROMISC_IDX]; + if (uc_promisc) { + if (!is_zero_ether_addr(mac_addr->value.addr8) || + mac_addr->state != FBNIC_TCAM_S_VALID) { + eth_zero_addr(mac_addr->value.addr8); + eth_broadcast_addr(mac_addr->mask.addr8); + clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI, + mac_addr->act_tcam); + set_bit(FBNIC_MAC_ADDR_T_PROMISC, + mac_addr->act_tcam); + mac_addr->state = FBNIC_TCAM_S_ADD; + } + } else if (mc_promisc && + (!fbnic_bmc_present(fbd) || !fbd->fw_cap.all_multi)) { + /* We have to add a special handler for multicast as the + * BMC may have an all-multi rule already in place. As such + * adding a rule ourselves won't do any good so we will have + * to modify the rules for the ALL MULTI below if the BMC + * already has the rule in place. + */ + if (!is_multicast_ether_addr(mac_addr->value.addr8) || + mac_addr->state != FBNIC_TCAM_S_VALID) { + eth_zero_addr(mac_addr->value.addr8); + eth_broadcast_addr(mac_addr->mask.addr8); + mac_addr->value.addr8[0] ^= 1; + mac_addr->mask.addr8[0] ^= 1; + set_bit(FBNIC_MAC_ADDR_T_ALLMULTI, + mac_addr->act_tcam); + clear_bit(FBNIC_MAC_ADDR_T_PROMISC, + mac_addr->act_tcam); + mac_addr->state = FBNIC_TCAM_S_ADD; + } + } else if (mac_addr->state == FBNIC_TCAM_S_VALID) { + if (test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam)) { + clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI, + mac_addr->act_tcam); + clear_bit(FBNIC_MAC_ADDR_T_PROMISC, + mac_addr->act_tcam); + } else { + mac_addr->state = FBNIC_TCAM_S_DELETE; + } + } + + /* Add rules for BMC all multicast if it is enabled */ + fbnic_bmc_rpc_all_multi_config(fbd, mc_promisc); + + /* Sift out any unshared BMC rules and place them in BMC only section */ + fbnic_sift_macda(fbd); + + /* Write updates to hardware */ + fbnic_write_rules(fbd); + fbnic_write_macda(fbd); +} + +static void fbnic_set_rx_mode(struct net_device *netdev) +{ + /* No need to update the hardware if we are not running */ + if (netif_running(netdev)) + __fbnic_set_rx_mode(netdev); +} + +static int fbnic_set_mac(struct net_device *netdev, void *p) +{ + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + + eth_hw_addr_set(netdev, addr->sa_data); + + fbnic_set_rx_mode(netdev); + + return 0; +} + +void fbnic_clear_rx_mode(struct net_device *netdev) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + struct fbnic_dev *fbd = fbn->fbd; + int idx; + + for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;) { + struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[idx]; + + if (mac_addr->state != FBNIC_TCAM_S_VALID) + continue; + + bitmap_clear(mac_addr->act_tcam, + FBNIC_MAC_ADDR_T_HOST_START, + FBNIC_MAC_ADDR_T_HOST_LEN); + + if (bitmap_empty(mac_addr->act_tcam, + FBNIC_RPC_TCAM_ACT_NUM_ENTRIES)) + mac_addr->state = FBNIC_TCAM_S_DELETE; + } + + /* Write updates to hardware */ + fbnic_write_macda(fbd); + + __dev_uc_unsync(netdev, NULL); + __dev_mc_unsync(netdev, NULL); +} + +static const struct net_device_ops fbnic_netdev_ops = { + .ndo_open = fbnic_open, + .ndo_stop = fbnic_stop, + .ndo_validate_addr = eth_validate_addr, + .ndo_start_xmit = fbnic_xmit_frame, + .ndo_features_check = fbnic_features_check, + .ndo_set_mac_address = fbnic_set_mac, + .ndo_set_rx_mode = fbnic_set_rx_mode, +}; + +void fbnic_reset_queues(struct fbnic_net *fbn, + unsigned int tx, unsigned int rx) +{ + struct fbnic_dev *fbd = fbn->fbd; + unsigned int max_napis; + + max_napis = fbd->num_irqs - FBNIC_NON_NAPI_VECTORS; + + tx = min(tx, max_napis); + fbn->num_tx_queues = tx; + + rx = min(rx, max_napis); + fbn->num_rx_queues = rx; + + fbn->num_napi = max(tx, rx); +} + +/** + * fbnic_netdev_free - Free the netdev associate with fbnic + * @fbd: Driver specific structure to free netdev from + * + * Allocate and initialize the netdev and netdev private structure. Bind + * together the hardware, netdev, and pci data structures. + **/ +void fbnic_netdev_free(struct fbnic_dev *fbd) +{ + struct fbnic_net *fbn = netdev_priv(fbd->netdev); + + if (fbn->phylink) + phylink_destroy(fbn->phylink); + + free_netdev(fbd->netdev); + fbd->netdev = NULL; +} + +/** + * fbnic_netdev_alloc - Allocate a netdev and associate with fbnic + * @fbd: Driver specific structure to associate netdev with + * + * Allocate and initialize the netdev and netdev private structure. Bind + * together the hardware, netdev, and pci data structures. + * + * Return: 0 on success, negative on failure + **/ +struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd) +{ + struct net_device *netdev; + struct fbnic_net *fbn; + int default_queues; + + netdev = alloc_etherdev_mq(sizeof(*fbn), FBNIC_MAX_RXQS); + if (!netdev) + return NULL; + + SET_NETDEV_DEV(netdev, fbd->dev); + fbd->netdev = netdev; + + netdev->netdev_ops = &fbnic_netdev_ops; + + fbn = netdev_priv(netdev); + + fbn->netdev = netdev; + fbn->fbd = fbd; + INIT_LIST_HEAD(&fbn->napis); + + fbn->txq_size = FBNIC_TXQ_SIZE_DEFAULT; + fbn->hpq_size = FBNIC_HPQ_SIZE_DEFAULT; + fbn->ppq_size = FBNIC_PPQ_SIZE_DEFAULT; + fbn->rcq_size = FBNIC_RCQ_SIZE_DEFAULT; + + default_queues = netif_get_num_default_rss_queues(); + if (default_queues > fbd->max_num_queues) + default_queues = fbd->max_num_queues; + + fbnic_reset_queues(fbn, default_queues, default_queues); + + fbnic_reset_indir_tbl(fbn); + fbnic_rss_key_fill(fbn->rss_key); + fbnic_rss_init_en_mask(fbn); + + netdev->features |= + NETIF_F_RXHASH | + NETIF_F_SG | + NETIF_F_HW_CSUM | + NETIF_F_RXCSUM; + + netdev->hw_features |= netdev->features; + netdev->vlan_features |= netdev->features; + netdev->hw_enc_features |= netdev->features; + + netdev->min_mtu = IPV6_MIN_MTU; + netdev->max_mtu = FBNIC_MAX_JUMBO_FRAME_SIZE - ETH_HLEN; + + /* TBD: This is workaround for BMC as phylink doesn't have support + * for leavling the link enabled if a BMC is present. + */ + netdev->ethtool->wol_enabled = true; + + fbn->fec = FBNIC_FEC_AUTO | FBNIC_FEC_RS; + fbn->link_mode = FBNIC_LINK_AUTO | FBNIC_LINK_50R2; + netif_carrier_off(netdev); + + netif_tx_stop_all_queues(netdev); + + if (fbnic_phylink_init(netdev)) { + fbnic_netdev_free(fbd); + return NULL; + } + + return netdev; +} + +static int fbnic_dsn_to_mac_addr(u64 dsn, char *addr) +{ + addr[0] = (dsn >> 56) & 0xFF; + addr[1] = (dsn >> 48) & 0xFF; + addr[2] = (dsn >> 40) & 0xFF; + addr[3] = (dsn >> 16) & 0xFF; + addr[4] = (dsn >> 8) & 0xFF; + addr[5] = dsn & 0xFF; + + return is_valid_ether_addr(addr) ? 0 : -EINVAL; +} + +/** + * fbnic_netdev_register - Initialize general software structures + * @netdev: Netdev containing structure to initialize and register + * + * Initialize the MAC address for the netdev and register it. + * + * Return: 0 on success, negative on failure + **/ +int fbnic_netdev_register(struct net_device *netdev) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + struct fbnic_dev *fbd = fbn->fbd; + u64 dsn = fbd->dsn; + u8 addr[ETH_ALEN]; + int err; + + err = fbnic_dsn_to_mac_addr(dsn, addr); + if (!err) { + ether_addr_copy(netdev->perm_addr, addr); + eth_hw_addr_set(netdev, addr); + } else { + /* A randomly assigned MAC address will cause provisioning + * issues so instead just fail to spawn the netdev and + * avoid any confusion. + */ + dev_err(fbd->dev, "MAC addr %pM invalid\n", addr); + return err; + } + + return register_netdev(netdev); +} + +void fbnic_netdev_unregister(struct net_device *netdev) +{ + unregister_netdev(netdev); +} diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h new file mode 100644 index 000000000000..6bc0ebeb8182 --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_netdev.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#ifndef _FBNIC_NETDEV_H_ +#define _FBNIC_NETDEV_H_ + +#include +#include + +#include "fbnic_csr.h" +#include "fbnic_rpc.h" +#include "fbnic_txrx.h" + +struct fbnic_net { + struct fbnic_ring *tx[FBNIC_MAX_TXQS]; + struct fbnic_ring *rx[FBNIC_MAX_RXQS]; + + struct net_device *netdev; + struct fbnic_dev *fbd; + + u32 txq_size; + u32 hpq_size; + u32 ppq_size; + u32 rcq_size; + + u16 num_napi; + + struct phylink *phylink; + struct phylink_config phylink_config; + struct phylink_pcs phylink_pcs; + + /* TBD: Remove these when phylink supports FEC and lane config */ + u8 fec; + u8 link_mode; + + u16 num_tx_queues; + u16 num_rx_queues; + + u8 indir_tbl[FBNIC_RPC_RSS_TBL_COUNT][FBNIC_RPC_RSS_TBL_SIZE]; + u32 rss_key[FBNIC_RPC_RSS_KEY_DWORD_LEN]; + u32 rss_flow_hash[FBNIC_NUM_HASH_OPT]; + + u64 link_down_events; + + struct list_head napis; +}; + +int __fbnic_open(struct fbnic_net *fbn); +void fbnic_up(struct fbnic_net *fbn); +void fbnic_down(struct fbnic_net *fbn); + +struct net_device *fbnic_netdev_alloc(struct fbnic_dev *fbd); +void fbnic_netdev_free(struct fbnic_dev *fbd); +int fbnic_netdev_register(struct net_device *netdev); +void fbnic_netdev_unregister(struct net_device *netdev); +void fbnic_reset_queues(struct fbnic_net *fbn, + unsigned int tx, unsigned int rx); + +void __fbnic_set_rx_mode(struct net_device *netdev); +void fbnic_clear_rx_mode(struct net_device *netdev); + +int fbnic_phylink_init(struct net_device *netdev); +#endif /* _FBNIC_NETDEV_H_ */ diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_pci.c b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c new file mode 100644 index 000000000000..a4809fe0fc24 --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_pci.c @@ -0,0 +1,564 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#include +#include +#include +#include +#include + +#include "fbnic.h" +#include "fbnic_drvinfo.h" +#include "fbnic_netdev.h" + +char fbnic_driver_name[] = DRV_NAME; + +MODULE_DESCRIPTION(DRV_SUMMARY); +MODULE_LICENSE("GPL"); + +static const struct fbnic_info fbnic_asic_info = { + .max_num_queues = FBNIC_MAX_QUEUES, + .bar_mask = BIT(0) | BIT(4) +}; + +static const struct fbnic_info *fbnic_info_tbl[] = { + [fbnic_board_asic] = &fbnic_asic_info, +}; + +static const struct pci_device_id fbnic_pci_tbl[] = { + { PCI_DEVICE_DATA(META, FBNIC_ASIC, fbnic_board_asic) }, + /* Required last entry */ + {0, } +}; +MODULE_DEVICE_TABLE(pci, fbnic_pci_tbl); + +u32 fbnic_rd32(struct fbnic_dev *fbd, u32 reg) +{ + u32 __iomem *csr = READ_ONCE(fbd->uc_addr0); + u32 value; + + if (!csr) + return ~0U; + + value = readl(csr + reg); + + /* If any bits are 0 value should be valid */ + if (~value) + return value; + + /* All 1's may be valid if ZEROs register still works */ + if (reg != FBNIC_MASTER_SPARE_0 && ~readl(csr + FBNIC_MASTER_SPARE_0)) + return value; + + /* Hardware is giving us all 1's reads, assume it is gone */ + WRITE_ONCE(fbd->uc_addr0, NULL); + WRITE_ONCE(fbd->uc_addr4, NULL); + + dev_err(fbd->dev, + "Failed read (idx 0x%x AKA addr 0x%x), disabled CSR access, awaiting reset\n", + reg, reg << 2); + + /* Notify stack that device has lost (PCIe) link */ + if (!fbnic_init_failure(fbd)) + netif_device_detach(fbd->netdev); + + return ~0U; +} + +bool fbnic_fw_present(struct fbnic_dev *fbd) +{ + return !!READ_ONCE(fbd->uc_addr4); +} + +void fbnic_fw_wr32(struct fbnic_dev *fbd, u32 reg, u32 val) +{ + u32 __iomem *csr = READ_ONCE(fbd->uc_addr4); + + if (csr) + writel(val, csr + reg); +} + +u32 fbnic_fw_rd32(struct fbnic_dev *fbd, u32 reg) +{ + u32 __iomem *csr = READ_ONCE(fbd->uc_addr4); + u32 value; + + if (!csr) + return ~0U; + + value = readl(csr + reg); + + /* If any bits are 0 value should be valid */ + if (~value) + return value; + + /* All 1's may be valid if ZEROs register still works */ + if (reg != FBNIC_FW_ZERO_REG && ~readl(csr + FBNIC_FW_ZERO_REG)) + return value; + + /* Hardware is giving us all 1's reads, assume it is gone */ + WRITE_ONCE(fbd->uc_addr0, NULL); + WRITE_ONCE(fbd->uc_addr4, NULL); + + dev_err(fbd->dev, + "Failed read (idx 0x%x AKA addr 0x%x), disabled CSR access, awaiting reset\n", + reg, reg << 2); + + /* Notify stack that device has lost (PCIe) link */ + if (!fbnic_init_failure(fbd)) + netif_device_detach(fbd->netdev); + + return ~0U; +} + +static void fbnic_service_task_start(struct fbnic_net *fbn) +{ + struct fbnic_dev *fbd = fbn->fbd; + + schedule_delayed_work(&fbd->service_task, HZ); + phylink_resume(fbn->phylink); +} + +static void fbnic_service_task_stop(struct fbnic_net *fbn) +{ + struct fbnic_dev *fbd = fbn->fbd; + + phylink_suspend(fbn->phylink, fbnic_bmc_present(fbd)); + cancel_delayed_work(&fbd->service_task); +} + +void fbnic_up(struct fbnic_net *fbn) +{ + fbnic_enable(fbn); + + fbnic_fill(fbn); + + fbnic_rss_reinit_hw(fbn->fbd, fbn); + + __fbnic_set_rx_mode(fbn->netdev); + + /* Enable Tx/Rx processing */ + fbnic_napi_enable(fbn); + netif_tx_start_all_queues(fbn->netdev); + + fbnic_service_task_start(fbn); +} + +static void fbnic_down_noidle(struct fbnic_net *fbn) +{ + fbnic_service_task_stop(fbn); + + /* Disable Tx/Rx Processing */ + fbnic_napi_disable(fbn); + netif_tx_disable(fbn->netdev); + + fbnic_clear_rx_mode(fbn->netdev); + fbnic_clear_rules(fbn->fbd); + fbnic_rss_disable_hw(fbn->fbd); + fbnic_disable(fbn); +} + +void fbnic_down(struct fbnic_net *fbn) +{ + fbnic_down_noidle(fbn); + + fbnic_wait_all_queues_idle(fbn->fbd, false); + + fbnic_flush(fbn); +} + +static void fbnic_health_check(struct fbnic_dev *fbd) +{ + struct fbnic_fw_mbx *tx_mbx = &fbd->mbx[FBNIC_IPC_MBX_TX_IDX]; + + /* As long as the heart is beating the FW is healty */ + if (fbd->fw_heartbeat_enabled) + return; + + /* If the Tx mailbox still has messages sitting in it then there likely + * isn't anything we can do. We will wait until the mailbox is empty to + * report the fault so we can collect the crashlog. + */ + if (tx_mbx->head != tx_mbx->tail) + return; + + /* TBD: Need to add a more thorough recovery here. + * Specifically I need to verify what all the firmware will have + * changed since we had setup and it rebooted. May just need to + * perform a down/up. For now we will just reclaim ownership so + * the heartbeat can catch the next fault. + */ + fbnic_fw_xmit_ownership_msg(fbd, true); +} + +static void fbnic_service_task(struct work_struct *work) +{ + struct fbnic_dev *fbd = container_of(to_delayed_work(work), + struct fbnic_dev, service_task); + + rtnl_lock(); + + fbnic_fw_check_heartbeat(fbd); + + fbnic_health_check(fbd); + + if (netif_carrier_ok(fbd->netdev)) + fbnic_napi_depletion_check(fbd->netdev); + + if (netif_running(fbd->netdev)) + schedule_delayed_work(&fbd->service_task, HZ); + + rtnl_unlock(); +} + +/** + * fbnic_probe - Device Initialization Routine + * @pdev: PCI device information struct + * @ent: entry in fbnic_pci_tbl + * + * Initializes a PCI device identified by a pci_dev structure. + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + * + * Return: 0 on success, negative on failure + **/ +static int fbnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + const struct fbnic_info *info = fbnic_info_tbl[ent->driver_data]; + struct net_device *netdev; + struct fbnic_dev *fbd; + int err; + + if (pdev->error_state != pci_channel_io_normal) { + dev_err(&pdev->dev, + "PCI device still in an error state. Unable to load...\n"); + return -EIO; + } + + err = pcim_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "PCI enable device failed: %d\n", err); + return err; + } + + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(46)); + if (err) + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (err) { + dev_err(&pdev->dev, "DMA configuration failed: %d\n", err); + return err; + } + + err = pcim_iomap_regions(pdev, info->bar_mask, fbnic_driver_name); + if (err) { + dev_err(&pdev->dev, + "pci_request_selected_regions failed: %d\n", err); + return err; + } + + fbd = fbnic_devlink_alloc(pdev); + if (!fbd) { + dev_err(&pdev->dev, "Devlink allocation failed\n"); + return -ENOMEM; + } + + /* Populate driver with hardware-specific info and handlers */ + fbd->max_num_queues = info->max_num_queues; + + pci_set_master(pdev); + pci_save_state(pdev); + + INIT_DELAYED_WORK(&fbd->service_task, fbnic_service_task); + + err = fbnic_alloc_irqs(fbd); + if (err) + goto free_fbd; + + err = fbnic_mac_init(fbd); + if (err) { + dev_err(&pdev->dev, "Failed to initialize MAC: %d\n", err); + goto free_irqs; + } + + err = fbnic_fw_enable_mbx(fbd); + if (err) { + dev_err(&pdev->dev, + "Firmware mailbox initialization failure\n"); + goto free_irqs; + } + + fbnic_devlink_register(fbd); + + if (!fbd->dsn) { + dev_warn(&pdev->dev, "Reading serial number failed\n"); + goto init_failure_mode; + } + + netdev = fbnic_netdev_alloc(fbd); + if (!netdev) { + dev_err(&pdev->dev, "Netdev allocation failed\n"); + goto init_failure_mode; + } + + err = fbnic_netdev_register(netdev); + if (err) { + dev_err(&pdev->dev, "Netdev registration failed: %d\n", err); + goto ifm_free_netdev; + } + + return 0; + +ifm_free_netdev: + fbnic_netdev_free(fbd); +init_failure_mode: + dev_warn(&pdev->dev, "Probe error encountered, entering init failure mode. Normal networking functionality will not be available.\n"); + /* Always return 0 even on error so devlink is registered to allow + * firmware updates for fixes. + */ + return 0; +free_irqs: + fbnic_free_irqs(fbd); +free_fbd: + pci_disable_device(pdev); + fbnic_devlink_free(fbd); + + return err; +} + +/** + * fbnic_remove - Device Removal Routine + * @pdev: PCI device information struct + * + * Called by the PCI subsystem to alert the driver that it should release + * a PCI device. The could be caused by a Hot-Plug event, or because the + * driver is going to be removed from memory. + **/ +static void fbnic_remove(struct pci_dev *pdev) +{ + struct fbnic_dev *fbd = pci_get_drvdata(pdev); + + if (!fbnic_init_failure(fbd)) { + struct net_device *netdev = fbd->netdev; + + fbnic_netdev_unregister(netdev); + cancel_delayed_work_sync(&fbd->service_task); + fbnic_netdev_free(fbd); + } + + fbnic_devlink_unregister(fbd); + fbnic_fw_disable_mbx(fbd); + fbnic_free_irqs(fbd); + + pci_disable_device(pdev); + fbnic_devlink_free(fbd); +} + +static int fbnic_pm_suspend(struct device *dev) +{ + struct fbnic_dev *fbd = dev_get_drvdata(dev); + struct net_device *netdev = fbd->netdev; + + if (fbnic_init_failure(fbd)) + goto null_uc_addr; + + rtnl_lock(); + + netif_device_detach(netdev); + + if (netif_running(netdev)) + netdev->netdev_ops->ndo_stop(netdev); + + rtnl_unlock(); + +null_uc_addr: + fbnic_fw_disable_mbx(fbd); + + /* Free the IRQs so they aren't trying to occupy sleeping CPUs */ + fbnic_free_irqs(fbd); + + /* Hardware is about to go away, so switch off MMIO access internally */ + WRITE_ONCE(fbd->uc_addr0, NULL); + WRITE_ONCE(fbd->uc_addr4, NULL); + + return 0; +} + +static int __fbnic_pm_resume(struct device *dev) +{ + struct fbnic_dev *fbd = dev_get_drvdata(dev); + struct net_device *netdev = fbd->netdev; + void __iomem * const *iomap_table; + struct fbnic_net *fbn; + int err; + + /* Restore MMIO access */ + iomap_table = pcim_iomap_table(to_pci_dev(dev)); + fbd->uc_addr0 = iomap_table[0]; + fbd->uc_addr4 = iomap_table[4]; + + /* Rerequest the IRQs */ + err = fbnic_alloc_irqs(fbd); + if (err) + goto err_invalidate_uc_addr; + + fbd->mac->init_regs(fbd); + + /* Re-enable mailbox */ + err = fbnic_fw_enable_mbx(fbd); + if (err) + goto err_free_irqs; + + /* No netdev means there isn't a network interface to bring up */ + if (fbnic_init_failure(fbd)) + return 0; + + fbn = netdev_priv(netdev); + + /* Reset the queues if needed */ + fbnic_reset_queues(fbn, fbn->num_tx_queues, fbn->num_rx_queues); + + rtnl_lock(); + + if (netif_running(netdev)) { + err = __fbnic_open(fbn); + if (err) + goto err_disable_mbx; + } + + rtnl_unlock(); + + return 0; +err_disable_mbx: + rtnl_unlock(); + fbnic_fw_disable_mbx(fbd); +err_free_irqs: + fbnic_free_irqs(fbd); +err_invalidate_uc_addr: + WRITE_ONCE(fbd->uc_addr0, NULL); + WRITE_ONCE(fbd->uc_addr4, NULL); + return err; +} + +static void __fbnic_pm_attach(struct device *dev) +{ + struct fbnic_dev *fbd = dev_get_drvdata(dev); + struct net_device *netdev = fbd->netdev; + struct fbnic_net *fbn; + + if (fbnic_init_failure(fbd)) + return; + + fbn = netdev_priv(netdev); + + if (netif_running(netdev)) + fbnic_up(fbn); + + netif_device_attach(netdev); +} + +static int __maybe_unused fbnic_pm_resume(struct device *dev) +{ + int err; + + err = __fbnic_pm_resume(dev); + if (!err) + __fbnic_pm_attach(dev); + + return err; +} + +static const struct dev_pm_ops fbnic_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(fbnic_pm_suspend, fbnic_pm_resume) +}; + +static void fbnic_shutdown(struct pci_dev *pdev) +{ + fbnic_pm_suspend(&pdev->dev); +} + +static pci_ers_result_t fbnic_err_error_detected(struct pci_dev *pdev, + pci_channel_state_t state) +{ + /* Disconnect device if failure is not recoverable via reset */ + if (state == pci_channel_io_perm_failure) + return PCI_ERS_RESULT_DISCONNECT; + + fbnic_pm_suspend(&pdev->dev); + + /* Request a slot reset */ + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t fbnic_err_slot_reset(struct pci_dev *pdev) +{ + int err; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_save_state(pdev); + + if (pci_enable_device_mem(pdev)) { + dev_err(&pdev->dev, + "Cannot re-enable PCI device after reset.\n"); + return PCI_ERS_RESULT_DISCONNECT; + } + + /* Restore device to previous state */ + err = __fbnic_pm_resume(&pdev->dev); + + return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED; +} + +static void fbnic_err_resume(struct pci_dev *pdev) +{ + __fbnic_pm_attach(&pdev->dev); +} + +static const struct pci_error_handlers fbnic_err_handler = { + .error_detected = fbnic_err_error_detected, + .slot_reset = fbnic_err_slot_reset, + .resume = fbnic_err_resume, +}; + +static struct pci_driver fbnic_driver = { + .name = fbnic_driver_name, + .id_table = fbnic_pci_tbl, + .probe = fbnic_probe, + .remove = fbnic_remove, + .driver.pm = &fbnic_pm_ops, + .shutdown = fbnic_shutdown, + .err_handler = &fbnic_err_handler, +}; + +/** + * fbnic_init_module - Driver Registration Routine + * + * The first routine called when the driver is loaded. All it does is + * register with the PCI subsystem. + * + * Return: 0 on success, negative on failure + **/ +static int __init fbnic_init_module(void) +{ + int err; + + err = pci_register_driver(&fbnic_driver); + if (err) + goto out; + + pr_info(DRV_SUMMARY " (%s)", fbnic_driver.name); +out: + return err; +} +module_init(fbnic_init_module); + +/** + * fbnic_exit_module - Driver Exit Cleanup Routine + * + * Called just before the driver is removed from memory. + **/ +static void __exit fbnic_exit_module(void) +{ + pci_unregister_driver(&fbnic_driver); +} +module_exit(fbnic_exit_module); diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c new file mode 100644 index 000000000000..1a5e1e719b30 --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_phylink.c @@ -0,0 +1,161 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#include +#include + +#include "fbnic.h" +#include "fbnic_mac.h" +#include "fbnic_netdev.h" + +static struct fbnic_net * +fbnic_pcs_to_net(struct phylink_pcs *pcs) +{ + return container_of(pcs, struct fbnic_net, phylink_pcs); +} + +static void +fbnic_phylink_pcs_get_state(struct phylink_pcs *pcs, + struct phylink_link_state *state) +{ + struct fbnic_net *fbn = fbnic_pcs_to_net(pcs); + struct fbnic_dev *fbd = fbn->fbd; + + /* For now we use hard-coded defaults and FW config to determine + * the current values. In future patches we will add support for + * reconfiguring these values and changing link settings. + */ + switch (fbd->fw_cap.link_speed) { + case FBNIC_FW_LINK_SPEED_25R1: + state->speed = SPEED_25000; + break; + case FBNIC_FW_LINK_SPEED_50R2: + state->speed = SPEED_50000; + break; + case FBNIC_FW_LINK_SPEED_100R2: + state->speed = SPEED_100000; + break; + default: + state->speed = SPEED_UNKNOWN; + break; + } + + state->duplex = DUPLEX_FULL; + + state->link = fbd->mac->pcs_get_link(fbd); +} + +static int +fbnic_phylink_pcs_enable(struct phylink_pcs *pcs) +{ + struct fbnic_net *fbn = fbnic_pcs_to_net(pcs); + struct fbnic_dev *fbd = fbn->fbd; + + return fbd->mac->pcs_enable(fbd); +} + +static void +fbnic_phylink_pcs_disable(struct phylink_pcs *pcs) +{ + struct fbnic_net *fbn = fbnic_pcs_to_net(pcs); + struct fbnic_dev *fbd = fbn->fbd; + + return fbd->mac->pcs_disable(fbd); +} + +static int +fbnic_phylink_pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, + phy_interface_t interface, + const unsigned long *advertising, + bool permit_pause_to_mac) +{ + return 0; +} + +static const struct phylink_pcs_ops fbnic_phylink_pcs_ops = { + .pcs_config = fbnic_phylink_pcs_config, + .pcs_enable = fbnic_phylink_pcs_enable, + .pcs_disable = fbnic_phylink_pcs_disable, + .pcs_get_state = fbnic_phylink_pcs_get_state, +}; + +static struct phylink_pcs * +fbnic_phylink_mac_select_pcs(struct phylink_config *config, + phy_interface_t interface) +{ + struct net_device *netdev = to_net_dev(config->dev); + struct fbnic_net *fbn = netdev_priv(netdev); + + return &fbn->phylink_pcs; +} + +static void +fbnic_phylink_mac_config(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) +{ +} + +static void +fbnic_phylink_mac_link_down(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct net_device *netdev = to_net_dev(config->dev); + struct fbnic_net *fbn = netdev_priv(netdev); + struct fbnic_dev *fbd = fbn->fbd; + + fbd->mac->link_down(fbd); + + fbn->link_down_events++; +} + +static void +fbnic_phylink_mac_link_up(struct phylink_config *config, + struct phy_device *phy, unsigned int mode, + phy_interface_t interface, int speed, int duplex, + bool tx_pause, bool rx_pause) +{ + struct net_device *netdev = to_net_dev(config->dev); + struct fbnic_net *fbn = netdev_priv(netdev); + struct fbnic_dev *fbd = fbn->fbd; + + fbd->mac->link_up(fbd, tx_pause, rx_pause); +} + +static const struct phylink_mac_ops fbnic_phylink_mac_ops = { + .mac_select_pcs = fbnic_phylink_mac_select_pcs, + .mac_config = fbnic_phylink_mac_config, + .mac_link_down = fbnic_phylink_mac_link_down, + .mac_link_up = fbnic_phylink_mac_link_up, +}; + +int fbnic_phylink_init(struct net_device *netdev) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + struct phylink *phylink; + + fbn->phylink_pcs.neg_mode = true; + fbn->phylink_pcs.ops = &fbnic_phylink_pcs_ops; + + fbn->phylink_config.dev = &netdev->dev; + fbn->phylink_config.type = PHYLINK_NETDEV; + fbn->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | + MAC_10000FD | MAC_25000FD | + MAC_40000FD | MAC_50000FD | + MAC_100000FD; + fbn->phylink_config.default_an_inband = true; + + __set_bit(PHY_INTERFACE_MODE_XGMII, + fbn->phylink_config.supported_interfaces); + __set_bit(PHY_INTERFACE_MODE_XLGMII, + fbn->phylink_config.supported_interfaces); + + phylink = phylink_create(&fbn->phylink_config, NULL, + PHY_INTERFACE_MODE_XLGMII, + &fbnic_phylink_mac_ops); + if (IS_ERR(phylink)) + return PTR_ERR(phylink); + + fbn->phylink = phylink; + + return 0; +} diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c new file mode 100644 index 000000000000..c8aa29fc052b --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.c @@ -0,0 +1,651 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#include +#include + +#include "fbnic.h" +#include "fbnic_netdev.h" +#include "fbnic_rpc.h" + +void fbnic_reset_indir_tbl(struct fbnic_net *fbn) +{ + unsigned int num_rx = fbn->num_rx_queues; + unsigned int i; + + for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++) { + fbn->indir_tbl[0][i] = ethtool_rxfh_indir_default(i, num_rx); + fbn->indir_tbl[1][i] = ethtool_rxfh_indir_default(i, num_rx); + } +} + +void fbnic_rss_key_fill(u32 *buffer) +{ + static u32 rss_key[FBNIC_RPC_RSS_KEY_DWORD_LEN]; + + net_get_random_once(rss_key, sizeof(rss_key)); + rss_key[FBNIC_RPC_RSS_KEY_LAST_IDX] &= FBNIC_RPC_RSS_KEY_LAST_MASK; + + memcpy(buffer, rss_key, sizeof(rss_key)); +} + +#define RX_HASH_OPT_L4 \ + (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3) +#define RX_HASH_OPT_L3 \ + (RXH_IP_SRC | RXH_IP_DST) +#define RX_HASH_OPT_L2 RXH_L2DA + +void fbnic_rss_init_en_mask(struct fbnic_net *fbn) +{ + fbn->rss_flow_hash[FBNIC_TCP4_HASH_OPT] = RX_HASH_OPT_L4; + fbn->rss_flow_hash[FBNIC_TCP6_HASH_OPT] = RX_HASH_OPT_L4; + + fbn->rss_flow_hash[FBNIC_UDP4_HASH_OPT] = RX_HASH_OPT_L3; + fbn->rss_flow_hash[FBNIC_UDP6_HASH_OPT] = RX_HASH_OPT_L3; + fbn->rss_flow_hash[FBNIC_IPV4_HASH_OPT] = RX_HASH_OPT_L3; + fbn->rss_flow_hash[FBNIC_IPV6_HASH_OPT] = RX_HASH_OPT_L3; + + fbn->rss_flow_hash[FBNIC_ETHER_HASH_OPT] = RX_HASH_OPT_L2; +} + +void fbnic_rss_disable_hw(struct fbnic_dev *fbd) +{ + /* Disable RPC by clearing enable bit and configuration */ + if (!fbnic_bmc_present(fbd)) + wr32(fbd, FBNIC_RPC_RMI_CONFIG, + FIELD_PREP(FBNIC_RPC_RMI_CONFIG_OH_BYTES, 20)); +} + +#define FBNIC_FH_2_RSSEM_BIT(_fh, _rssem, _val) \ + FIELD_PREP(FBNIC_RPC_ACT_TBL1_RSS_ENA_##_rssem, \ + FIELD_GET(RXH_##_fh, _val)) +static u16 fbnic_flow_hash_2_rss_en_mask(struct fbnic_net *fbn, int flow_type) +{ + u32 flow_hash = fbn->rss_flow_hash[flow_type]; + u32 rss_en_mask = 0; + + rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(L2DA, L2_DA, flow_hash); + rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(IP_SRC, IP_SRC, flow_hash); + rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(IP_DST, IP_DST, flow_hash); + rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(L4_B_0_1, L4_SRC, flow_hash); + rss_en_mask |= FBNIC_FH_2_RSSEM_BIT(L4_B_2_3, L4_DST, flow_hash); + + return rss_en_mask; +} + +void fbnic_rss_reinit_hw(struct fbnic_dev *fbd, struct fbnic_net *fbn) +{ + unsigned int i; + + for (i = 0; i < FBNIC_RPC_RSS_TBL_SIZE; i++) { + wr32(fbd, FBNIC_RPC_RSS_TBL(0, i), fbn->indir_tbl[0][i]); + wr32(fbd, FBNIC_RPC_RSS_TBL(1, i), fbn->indir_tbl[1][i]); + } + + for (i = 0; i < FBNIC_RPC_RSS_KEY_DWORD_LEN; i++) + wr32(fbd, FBNIC_RPC_RSS_KEY(i), fbn->rss_key[i]); + + /* Default action for this to drop w/ no destination */ + wr32(fbd, FBNIC_RPC_ACT_TBL0_DEFAULT, FBNIC_RPC_ACT_TBL0_DROP); + wrfl(fbd); + + wr32(fbd, FBNIC_RPC_ACT_TBL1_DEFAULT, 0); + + /* If it isn't already enabled set the RMI Config value to enable RPC */ + wr32(fbd, FBNIC_RPC_RMI_CONFIG, + FIELD_PREP(FBNIC_RPC_RMI_CONFIG_MTU, FBNIC_MAX_JUMBO_FRAME_SIZE) | + FIELD_PREP(FBNIC_RPC_RMI_CONFIG_OH_BYTES, 20) | + FBNIC_RPC_RMI_CONFIG_ENABLE); +} + +void fbnic_bmc_rpc_all_multi_config(struct fbnic_dev *fbd, + bool enable_host) +{ + struct fbnic_act_tcam *act_tcam; + struct fbnic_mac_addr *mac_addr; + int j; + + /* We need to add the all multicast filter at the end of the + * multicast address list. This way if there are any that are + * shared between the host and the BMC they can be directed to + * both. Otherwise the remainder just get sent directly to the + * BMC. + */ + mac_addr = &fbd->mac_addr[fbd->mac_addr_boundary - 1]; + if (fbnic_bmc_present(fbd) && fbd->fw_cap.all_multi) { + if (mac_addr->state != FBNIC_TCAM_S_VALID) { + eth_zero_addr(mac_addr->value.addr8); + eth_broadcast_addr(mac_addr->mask.addr8); + mac_addr->value.addr8[0] ^= 1; + mac_addr->mask.addr8[0] ^= 1; + set_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam); + mac_addr->state = FBNIC_TCAM_S_ADD; + } + if (enable_host) + set_bit(FBNIC_MAC_ADDR_T_ALLMULTI, + mac_addr->act_tcam); + else + clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI, + mac_addr->act_tcam); + } else if (!test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam) && + !is_zero_ether_addr(mac_addr->mask.addr8) && + mac_addr->state == FBNIC_TCAM_S_VALID) { + clear_bit(FBNIC_MAC_ADDR_T_ALLMULTI, mac_addr->act_tcam); + clear_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam); + mac_addr->state = FBNIC_TCAM_S_DELETE; + } + + /* We have to add a special handler for multicast as the + * BMC may have an all-multi rule already in place. As such + * adding a rule ourselves won't do any good so we will have + * to modify the rules for the ALL MULTI below if the BMC + * already has the rule in place. + */ + act_tcam = &fbd->act_tcam[FBNIC_RPC_ACT_TBL_BMC_ALL_MULTI_OFFSET]; + + /* If we are not enabling the rule just delete it. We will fall + * back to the RSS rules that support the multicast addresses. + */ + if (!fbnic_bmc_present(fbd) || !fbd->fw_cap.all_multi || enable_host) { + if (act_tcam->state == FBNIC_TCAM_S_VALID) + act_tcam->state = FBNIC_TCAM_S_DELETE; + return; + } + + /* Rewrite TCAM rule 23 to handle BMC all-multi traffic */ + act_tcam->dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK, + FBNIC_RPC_ACT_TBL0_DEST_BMC); + act_tcam->mask.tcam[0] = 0xffff; + + /* MACDA 0 - 3 is reserved for the BMC MAC address */ + act_tcam->value.tcam[1] = + FIELD_PREP(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX, + fbd->mac_addr_boundary - 1) | + FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID; + act_tcam->mask.tcam[1] = 0xffff & + ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX & + ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID; + + for (j = 2; j < FBNIC_RPC_TCAM_ACT_WORD_LEN; j++) + act_tcam->mask.tcam[j] = 0xffff; + + act_tcam->state = FBNIC_TCAM_S_UPDATE; +} + +void fbnic_bmc_rpc_init(struct fbnic_dev *fbd) +{ + int i = FBNIC_RPC_TCAM_MACDA_BMC_ADDR_IDX; + struct fbnic_act_tcam *act_tcam; + struct fbnic_mac_addr *mac_addr; + int j; + + /* Check if BMC is present */ + if (!fbnic_bmc_present(fbd)) + return; + + /* Fetch BMC MAC addresses from firmware capabilities */ + for (j = 0; j < 4; j++) { + u8 *bmc_mac = fbd->fw_cap.bmc_mac_addr[j]; + + /* Validate BMC MAC addresses */ + if (is_zero_ether_addr(bmc_mac)) + continue; + + if (is_multicast_ether_addr(bmc_mac)) + mac_addr = __fbnic_mc_sync(fbd, bmc_mac); + else + mac_addr = &fbd->mac_addr[i++]; + + if (!mac_addr) { + netdev_err(fbd->netdev, + "No slot for BMC MAC address[%d]\n", j); + continue; + } + + ether_addr_copy(mac_addr->value.addr8, bmc_mac); + eth_zero_addr(mac_addr->mask.addr8); + + set_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam); + mac_addr->state = FBNIC_TCAM_S_ADD; + } + + /* Validate Broadcast is also present, record it and tag it */ + mac_addr = &fbd->mac_addr[FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX]; + eth_broadcast_addr(mac_addr->value.addr8); + set_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam); + mac_addr->state = FBNIC_TCAM_S_ADD; + + /* Rewrite TCAM rule 0 if it isn't present to relocate BMC rules */ + act_tcam = &fbd->act_tcam[FBNIC_RPC_ACT_TBL_BMC_OFFSET]; + act_tcam->dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK, + FBNIC_RPC_ACT_TBL0_DEST_BMC); + act_tcam->mask.tcam[0] = 0xffff; + + /* MACDA 0 - 3 is reserved for the BMC MAC address + * to account for that we have to mask out the lower 2 bits + * of the macda by performing an &= with 0x1c. + */ + act_tcam->value.tcam[1] = FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID; + act_tcam->mask.tcam[1] = 0xffff & + ~FIELD_PREP(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX, 0x1c) & + ~FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID; + + for (j = 2; j < FBNIC_RPC_TCAM_ACT_WORD_LEN; j++) + act_tcam->mask.tcam[j] = 0xffff; + + act_tcam->state = FBNIC_TCAM_S_UPDATE; + + fbnic_bmc_rpc_all_multi_config(fbd, false); +} + +#define FBNIC_ACT1_INIT(_l4, _udp, _ip, _v6) \ + (((_l4) ? FBNIC_RPC_TCAM_ACT1_L4_VALID : 0) | \ + ((_udp) ? FBNIC_RPC_TCAM_ACT1_L4_IS_UDP : 0) | \ + ((_ip) ? FBNIC_RPC_TCAM_ACT1_IP_VALID : 0) | \ + ((_v6) ? FBNIC_RPC_TCAM_ACT1_IP_IS_V6 : 0)) + +void fbnic_rss_reinit(struct fbnic_dev *fbd, struct fbnic_net *fbn) +{ + static const u32 act1_value[FBNIC_NUM_HASH_OPT] = { + FBNIC_ACT1_INIT(1, 1, 1, 1), /* UDP6 */ + FBNIC_ACT1_INIT(1, 1, 1, 0), /* UDP4 */ + FBNIC_ACT1_INIT(1, 0, 1, 1), /* TCP6 */ + FBNIC_ACT1_INIT(1, 0, 1, 0), /* TCP4 */ + FBNIC_ACT1_INIT(0, 0, 1, 1), /* IP6 */ + FBNIC_ACT1_INIT(0, 0, 1, 0), /* IP4 */ + 0 /* Ether */ + }; + unsigned int i; + + /* To support scenarios where a BMC is present we must write the + * rules twice, once for the unicast cases, and once again for + * the broadcast/multicast cases as we have to support 2 destinations. + */ + BUILD_BUG_ON(FBNIC_RSS_EN_NUM_UNICAST * 2 != FBNIC_RSS_EN_NUM_ENTRIES); + BUILD_BUG_ON(ARRAY_SIZE(act1_value) != FBNIC_NUM_HASH_OPT); + + /* Program RSS hash enable mask for host in action TCAM/table. */ + for (i = fbnic_bmc_present(fbd) ? 0 : FBNIC_RSS_EN_NUM_UNICAST; + i < FBNIC_RSS_EN_NUM_ENTRIES; i++) { + unsigned int idx = i + FBNIC_RPC_ACT_TBL_RSS_OFFSET; + struct fbnic_act_tcam *act_tcam = &fbd->act_tcam[idx]; + u32 flow_hash, dest, rss_en_mask; + int flow_type, j; + u16 value = 0; + + flow_type = i % FBNIC_RSS_EN_NUM_UNICAST; + flow_hash = fbn->rss_flow_hash[flow_type]; + + /* Set DEST_HOST based on absence of RXH_DISCARD */ + dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK, + !(RXH_DISCARD & flow_hash) ? + FBNIC_RPC_ACT_TBL0_DEST_HOST : 0); + + if (i >= FBNIC_RSS_EN_NUM_UNICAST && fbnic_bmc_present(fbd)) + dest |= FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK, + FBNIC_RPC_ACT_TBL0_DEST_BMC); + + if (!dest) + dest = FBNIC_RPC_ACT_TBL0_DROP; + + if (act1_value[flow_type] & FBNIC_RPC_TCAM_ACT1_L4_VALID) + dest |= FIELD_PREP(FBNIC_RPC_ACT_TBL0_DMA_HINT, + FBNIC_RCD_HDR_AL_DMA_HINT_L4); + + rss_en_mask = fbnic_flow_hash_2_rss_en_mask(fbn, flow_type); + + act_tcam->dest = dest; + act_tcam->rss_en_mask = rss_en_mask; + act_tcam->state = FBNIC_TCAM_S_UPDATE; + + act_tcam->mask.tcam[0] = 0xffff; + + /* We reserve the upper 8 MACDA TCAM entries for host + * unicast. So we set the value to 24, and the mask the + * lower bits so that the lower entries can be used as + * multicast or BMC addresses. + */ + if (i < FBNIC_RSS_EN_NUM_UNICAST) + value = FIELD_PREP(FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX, + fbd->mac_addr_boundary); + value |= FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID; + + flow_type = i % FBNIC_RSS_EN_NUM_UNICAST; + value |= act1_value[flow_type]; + + act_tcam->value.tcam[1] = value; + act_tcam->mask.tcam[1] = ~value; + + for (j = 2; j < FBNIC_RPC_TCAM_ACT_WORD_LEN; j++) + act_tcam->mask.tcam[j] = 0xffff; + + act_tcam->state = FBNIC_TCAM_S_UPDATE; + } +} + +struct fbnic_mac_addr *__fbnic_uc_sync(struct fbnic_dev *fbd, + const unsigned char *addr) +{ + struct fbnic_mac_addr *avail_addr = NULL; + unsigned int i; + + /* Scan from middle of list to bottom, filling bottom up. + * Skip the first entry which is reserved for dev_addr and + * leave the last entry to use for promiscuous filtering. + */ + for (i = fbd->mac_addr_boundary - 1; + i < FBNIC_RPC_TCAM_MACDA_HOST_ADDR_IDX; i++) { + struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i]; + + if (mac_addr->state == FBNIC_TCAM_S_DISABLED) { + avail_addr = mac_addr; + } else if (ether_addr_equal(mac_addr->value.addr8, addr)) { + avail_addr = mac_addr; + break; + } + } + + if (avail_addr && avail_addr->state == FBNIC_TCAM_S_DISABLED) { + ether_addr_copy(avail_addr->value.addr8, addr); + eth_zero_addr(avail_addr->mask.addr8); + avail_addr->state = FBNIC_TCAM_S_ADD; + } + + return avail_addr; +} + +struct fbnic_mac_addr *__fbnic_mc_sync(struct fbnic_dev *fbd, + const unsigned char *addr) +{ + struct fbnic_mac_addr *avail_addr = NULL; + unsigned int i; + + /* Scan from middle of list to top, filling top down. + * Skip over the address reserved for the BMC MAC and + * exclude index 0 as that belongs to the broadcast address + */ + for (i = fbd->mac_addr_boundary; + --i > FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX;) { + struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i]; + + if (mac_addr->state == FBNIC_TCAM_S_DISABLED) { + avail_addr = mac_addr; + } else if (ether_addr_equal(mac_addr->value.addr8, addr)) { + avail_addr = mac_addr; + break; + } + } + + /* Scan the BMC addresses to see if it may have already + * reserved the address. + */ + while (--i) { + struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[i]; + + if (!is_zero_ether_addr(mac_addr->mask.addr8)) + continue; + + /* Only move on if we find a match */ + if (!ether_addr_equal(mac_addr->value.addr8, addr)) + continue; + + /* We need to pull this address to the shared area */ + if (avail_addr) { + memcpy(avail_addr, mac_addr, sizeof(*mac_addr)); + mac_addr->state = FBNIC_TCAM_S_DELETE; + avail_addr->state = FBNIC_TCAM_S_ADD; + } + + break; + } + + if (avail_addr && avail_addr->state == FBNIC_TCAM_S_DISABLED) { + ether_addr_copy(avail_addr->value.addr8, addr); + eth_zero_addr(avail_addr->mask.addr8); + avail_addr->state = FBNIC_TCAM_S_ADD; + } + + return avail_addr; +} + +int __fbnic_xc_unsync(struct fbnic_mac_addr *mac_addr, unsigned int tcam_idx) +{ + if (!test_and_clear_bit(tcam_idx, mac_addr->act_tcam)) + return -ENOENT; + + if (bitmap_empty(mac_addr->act_tcam, FBNIC_RPC_TCAM_ACT_NUM_ENTRIES)) + mac_addr->state = FBNIC_TCAM_S_DELETE; + + return 0; +} + +void fbnic_sift_macda(struct fbnic_dev *fbd) +{ + int dest, src; + + /* Move BMC only addresses back into BMC region */ + for (dest = FBNIC_RPC_TCAM_MACDA_BMC_ADDR_IDX, + src = FBNIC_RPC_TCAM_MACDA_MULTICAST_IDX; + ++dest < FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX && + src < fbd->mac_addr_boundary;) { + struct fbnic_mac_addr *dest_addr = &fbd->mac_addr[dest]; + + if (dest_addr->state != FBNIC_TCAM_S_DISABLED) + continue; + + while (src < fbd->mac_addr_boundary) { + struct fbnic_mac_addr *src_addr = &fbd->mac_addr[src++]; + + /* Verify BMC bit is set */ + if (!test_bit(FBNIC_MAC_ADDR_T_BMC, src_addr->act_tcam)) + continue; + + /* Verify filter isn't already disabled */ + if (src_addr->state == FBNIC_TCAM_S_DISABLED || + src_addr->state == FBNIC_TCAM_S_DELETE) + continue; + + /* Verify only BMC bit is set */ + if (bitmap_weight(src_addr->act_tcam, + FBNIC_RPC_TCAM_ACT_NUM_ENTRIES) != 1) + continue; + + /* Verify we are not moving wildcard address */ + if (!is_zero_ether_addr(src_addr->mask.addr8)) + continue; + + memcpy(dest_addr, src_addr, sizeof(*src_addr)); + src_addr->state = FBNIC_TCAM_S_DELETE; + dest_addr->state = FBNIC_TCAM_S_ADD; + } + } +} + +static void fbnic_clear_macda_entry(struct fbnic_dev *fbd, unsigned int idx) +{ + int i; + + /* Invalidate entry and clear addr state info */ + for (i = 0; i <= FBNIC_RPC_TCAM_MACDA_WORD_LEN; i++) + wr32(fbd, FBNIC_RPC_TCAM_MACDA(idx, i), 0); +} + +static void fbnic_clear_macda(struct fbnic_dev *fbd) +{ + int idx; + + for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;) { + struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[idx]; + + if (mac_addr->state == FBNIC_TCAM_S_DISABLED) + continue; + + if (test_bit(FBNIC_MAC_ADDR_T_BMC, mac_addr->act_tcam)) { + if (fbnic_bmc_present(fbd)) + continue; + dev_warn_once(fbd->dev, + "Found BMC MAC address w/ BMC not present\n"); + } + + fbnic_clear_macda_entry(fbd, idx); + + /* If rule was already destined for deletion just wipe it now */ + if (mac_addr->state == FBNIC_TCAM_S_DELETE) { + memset(mac_addr, 0, sizeof(*mac_addr)); + continue; + } + + /* Change state to update so that we will rewrite + * this tcam the next time fbnic_write_macda is called. + */ + mac_addr->state = FBNIC_TCAM_S_UPDATE; + } +} + +static void fbnic_write_macda_entry(struct fbnic_dev *fbd, unsigned int idx, + struct fbnic_mac_addr *mac_addr) +{ + __be16 *mask, *value; + int i; + + mask = &mac_addr->mask.addr16[FBNIC_RPC_TCAM_MACDA_WORD_LEN - 1]; + value = &mac_addr->value.addr16[FBNIC_RPC_TCAM_MACDA_WORD_LEN - 1]; + + for (i = 0; i < FBNIC_RPC_TCAM_MACDA_WORD_LEN; i++) + wr32(fbd, FBNIC_RPC_TCAM_MACDA(idx, i), + FIELD_PREP(FBNIC_RPC_TCAM_MACDA_MASK, ntohs(*mask--)) | + FIELD_PREP(FBNIC_RPC_TCAM_MACDA_VALUE, ntohs(*value--))); + + wrfl(fbd); + + wr32(fbd, FBNIC_RPC_TCAM_MACDA(idx, i), FBNIC_RPC_TCAM_VALIDATE); +} + +void fbnic_write_macda(struct fbnic_dev *fbd) +{ + int idx; + + for (idx = ARRAY_SIZE(fbd->mac_addr); idx--;) { + struct fbnic_mac_addr *mac_addr = &fbd->mac_addr[idx]; + + /* Check if update flag is set else exit. */ + if (!(mac_addr->state & FBNIC_TCAM_S_UPDATE)) + continue; + + /* Clear by writing 0s. */ + if (mac_addr->state == FBNIC_TCAM_S_DELETE) { + /* Invalidate entry and clear addr state info */ + fbnic_clear_macda_entry(fbd, idx); + memset(mac_addr, 0, sizeof(*mac_addr)); + + continue; + } + + fbnic_write_macda_entry(fbd, idx, mac_addr); + + mac_addr->state = FBNIC_TCAM_S_VALID; + } +} + +static void fbnic_clear_act_tcam(struct fbnic_dev *fbd, unsigned int idx) +{ + int i; + + /* Invalidate entry and clear addr state info */ + for (i = 0; i <= FBNIC_RPC_TCAM_ACT_WORD_LEN; i++) + wr32(fbd, FBNIC_RPC_TCAM_ACT(idx, i), 0); +} + +void fbnic_clear_rules(struct fbnic_dev *fbd) +{ + u32 dest = FIELD_PREP(FBNIC_RPC_ACT_TBL0_DEST_MASK, + FBNIC_RPC_ACT_TBL0_DEST_BMC); + int i = FBNIC_RPC_TCAM_ACT_NUM_ENTRIES - 1; + struct fbnic_act_tcam *act_tcam; + + /* Clear MAC rules */ + fbnic_clear_macda(fbd); + + /* If BMC is present we need to preserve the last rule which + * will be used to route traffic to the BMC if it is received. + * + * At this point it should be the only MAC address in the MACDA + * so any unicast or multicast traffic received should be routed + * to it. So leave the last rule in place. + * + * It will be rewritten to add the host again when we bring + * the interface back up. + */ + if (fbnic_bmc_present(fbd)) { + act_tcam = &fbd->act_tcam[i]; + + if (act_tcam->state == FBNIC_TCAM_S_VALID && + (act_tcam->dest & dest)) { + wr32(fbd, FBNIC_RPC_ACT_TBL0(i), dest); + wr32(fbd, FBNIC_RPC_ACT_TBL1(i), 0); + + act_tcam->state = FBNIC_TCAM_S_UPDATE; + + i--; + } + } + + /* Work from the bottom up deleting all other rules from hardware */ + do { + act_tcam = &fbd->act_tcam[i]; + + if (act_tcam->state != FBNIC_TCAM_S_VALID) + continue; + + fbnic_clear_act_tcam(fbd, i); + act_tcam->state = FBNIC_TCAM_S_UPDATE; + } while (i--); +} + +static void fbnic_delete_act_tcam(struct fbnic_dev *fbd, unsigned int idx) +{ + fbnic_clear_act_tcam(fbd, idx); + memset(&fbd->act_tcam[idx], 0, sizeof(struct fbnic_act_tcam)); +} + +static void fbnic_update_act_tcam(struct fbnic_dev *fbd, unsigned int idx) +{ + struct fbnic_act_tcam *act_tcam = &fbd->act_tcam[idx]; + int i; + + /* Update entry by writing the destination and RSS mask */ + wr32(fbd, FBNIC_RPC_ACT_TBL0(idx), act_tcam->dest); + wr32(fbd, FBNIC_RPC_ACT_TBL1(idx), act_tcam->rss_en_mask); + + /* Write new TCAM rule to hardware */ + for (i = 0; i < FBNIC_RPC_TCAM_ACT_WORD_LEN; i++) + wr32(fbd, FBNIC_RPC_TCAM_ACT(idx, i), + FIELD_PREP(FBNIC_RPC_TCAM_ACT_MASK, + act_tcam->mask.tcam[i]) | + FIELD_PREP(FBNIC_RPC_TCAM_ACT_VALUE, + act_tcam->value.tcam[i])); + + wrfl(fbd); + + wr32(fbd, FBNIC_RPC_TCAM_ACT(idx, i), FBNIC_RPC_TCAM_VALIDATE); + act_tcam->state = FBNIC_TCAM_S_VALID; +} + +void fbnic_write_rules(struct fbnic_dev *fbd) +{ + int i; + + /* Flush any pending action table rules */ + for (i = 0; i < FBNIC_RPC_ACT_TBL_NUM_ENTRIES; i++) { + struct fbnic_act_tcam *act_tcam = &fbd->act_tcam[i]; + + /* Check if update flag is set else exit. */ + if (!(act_tcam->state & FBNIC_TCAM_S_UPDATE)) + continue; + + if (act_tcam->state == FBNIC_TCAM_S_DELETE) + fbnic_delete_act_tcam(fbd, i); + else + fbnic_update_act_tcam(fbd, i); + } +} diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h new file mode 100644 index 000000000000..d62935f722a2 --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_rpc.h @@ -0,0 +1,189 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#ifndef _FBNIC_RPC_H_ +#define _FBNIC_RPC_H_ + +#include +#include + +/* The TCAM state definitions follow an expected ordering. + * They start out disabled, then move through the following states: + * Disabled 0 -> Add 2 + * Add 2 -> Valid 1 + * + * Valid 1 -> Add/Update 2 + * Add 2 -> Valid 1 + * + * Valid 1 -> Delete 3 + * Delete 3 -> Disabled 0 + */ +enum { + FBNIC_TCAM_S_DISABLED = 0, + FBNIC_TCAM_S_VALID = 1, + FBNIC_TCAM_S_ADD = 2, + FBNIC_TCAM_S_UPDATE = FBNIC_TCAM_S_ADD, + FBNIC_TCAM_S_DELETE = 3, +}; + +/* 32 MAC Destination Address TCAM Entries + * 4 registers DA[1:0], DA[3:2], DA[5:4], Validate + */ +#define FBNIC_RPC_TCAM_MACDA_WORD_LEN 3 +#define FBNIC_RPC_TCAM_MACDA_NUM_ENTRIES 32 + +#define FBNIC_RPC_TCAM_ACT_WORD_LEN 11 +#define FBNIC_RPC_TCAM_ACT_NUM_ENTRIES 64 + +struct fbnic_mac_addr { + union { + unsigned char addr8[ETH_ALEN]; + __be16 addr16[FBNIC_RPC_TCAM_MACDA_WORD_LEN]; + } mask, value; + unsigned char state; + DECLARE_BITMAP(act_tcam, FBNIC_RPC_TCAM_ACT_NUM_ENTRIES); +}; + +struct fbnic_act_tcam { + struct { + u16 tcam[FBNIC_RPC_TCAM_ACT_WORD_LEN]; + } mask, value; + unsigned char state; + u16 rss_en_mask; + u32 dest; +}; + +enum { + FBNIC_RSS_EN_HOST_UDP6, + FBNIC_RSS_EN_HOST_UDP4, + FBNIC_RSS_EN_HOST_TCP6, + FBNIC_RSS_EN_HOST_TCP4, + FBNIC_RSS_EN_HOST_IP6, + FBNIC_RSS_EN_HOST_IP4, + FBNIC_RSS_EN_HOST_ETHER, + FBNIC_RSS_EN_XCAST_UDP6, +#define FBNIC_RSS_EN_NUM_UNICAST FBNIC_RSS_EN_XCAST_UDP6 + FBNIC_RSS_EN_XCAST_UDP4, + FBNIC_RSS_EN_XCAST_TCP6, + FBNIC_RSS_EN_XCAST_TCP4, + FBNIC_RSS_EN_XCAST_IP6, + FBNIC_RSS_EN_XCAST_IP4, + FBNIC_RSS_EN_XCAST_ETHER, + FBNIC_RSS_EN_NUM_ENTRIES +}; + +/* Reserve the first 2 entries for the use by the BMC so that we can + * avoid allowing rules to get in the way of BMC unicast traffic. + */ +#define FBNIC_RPC_ACT_TBL_BMC_OFFSET 0 +#define FBNIC_RPC_ACT_TBL_BMC_ALL_MULTI_OFFSET 1 + +/* We reserve the last 14 entries for RSS rules on the host. The BMC + * unicast rule will need to be populated above these and is expected to + * use MACDA TCAM entry 23 to store the BMC MAC address. + */ +#define FBNIC_RPC_ACT_TBL_RSS_OFFSET \ + (FBNIC_RPC_ACT_TBL_NUM_ENTRIES - FBNIC_RSS_EN_NUM_ENTRIES) + +/* Flags used to identify the owner for this MAC filter. Note that any + * flags set for Broadcast thru Promisc indicate that the rule belongs + * to the RSS filters for the host. + */ +enum { + FBNIC_MAC_ADDR_T_BMC = 0, + FBNIC_MAC_ADDR_T_BROADCAST = FBNIC_RPC_ACT_TBL_RSS_OFFSET, +#define FBNIC_MAC_ADDR_T_HOST_START FBNIC_MAC_ADDR_T_BROADCAST + FBNIC_MAC_ADDR_T_MULTICAST, + FBNIC_MAC_ADDR_T_UNICAST, + FBNIC_MAC_ADDR_T_ALLMULTI, /* BROADCAST ... MULTICAST*/ + FBNIC_MAC_ADDR_T_PROMISC, /* BROADCAST ... UNICAST */ + FBNIC_MAC_ADDR_T_HOST_LAST +}; + +#define FBNIC_MAC_ADDR_T_HOST_LEN \ + (FBNIC_MAC_ADDR_T_HOST_LAST - FBNIC_MAC_ADDR_T_HOST_START) + +#define FBNIC_RPC_TCAM_ACT0_IPSRC_IDX CSR_GENMASK(2, 0) +#define FBNIC_RPC_TCAM_ACT0_IPSRC_VALID CSR_BIT(3) +#define FBNIC_RPC_TCAM_ACT0_IPDST_IDX CSR_GENMASK(6, 4) +#define FBNIC_RPC_TCAM_ACT0_IPDST_VALID CSR_BIT(7) +#define FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_IDX CSR_GENMASK(10, 8) +#define FBNIC_RPC_TCAM_ACT0_OUTER_IPSRC_VALID CSR_BIT(11) +#define FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_IDX CSR_GENMASK(14, 12) +#define FBNIC_RPC_TCAM_ACT0_OUTER_IPDST_VALID CSR_BIT(15) + +#define FBNIC_RPC_TCAM_ACT1_L2_MACDA_IDX CSR_GENMASK(9, 5) +#define FBNIC_RPC_TCAM_ACT1_L2_MACDA_VALID CSR_BIT(10) +#define FBNIC_RPC_TCAM_ACT1_IP_IS_V6 CSR_BIT(11) +#define FBNIC_RPC_TCAM_ACT1_IP_VALID CSR_BIT(12) +#define FBNIC_RPC_TCAM_ACT1_OUTER_IP_VALID CSR_BIT(13) +#define FBNIC_RPC_TCAM_ACT1_L4_IS_UDP CSR_BIT(14) +#define FBNIC_RPC_TCAM_ACT1_L4_VALID CSR_BIT(15) + +/* TCAM 0 - 3 reserved for BMC MAC addresses */ +#define FBNIC_RPC_TCAM_MACDA_BMC_ADDR_IDX 0 +/* TCAM 4 reserved for broadcast MAC address */ +#define FBNIC_RPC_TCAM_MACDA_BROADCAST_IDX 4 +/* TCAMs 5 - 30 will be used for multicast and unicast addresses. The + * boundary between the two can be variable it is currently set to 24 + * on which the unicast addresses start. The general idea is that we will + * always go top-down with unicast, and bottom-up with multicast so that + * there should be free-space in the middle between the two. + * + * The entry at MADCA_DEFAULT_BOUNDARY is a special case as it can be used + * for the ALL MULTI address if the list is full, or the BMC has requested + * it. + */ +#define FBNIC_RPC_TCAM_MACDA_MULTICAST_IDX 5 +#define FBNIC_RPC_TCAM_MACDA_DEFAULT_BOUNDARY 24 +#define FBNIC_RPC_TCAM_MACDA_HOST_ADDR_IDX 30 +/* Reserved for use to record Multicast promisc, or Promiscuous */ +#define FBNIC_RPC_TCAM_MACDA_PROMISC_IDX 31 + +enum { + FBNIC_UDP6_HASH_OPT, + FBNIC_UDP4_HASH_OPT, + FBNIC_TCP6_HASH_OPT, + FBNIC_TCP4_HASH_OPT, +#define FBNIC_L4_HASH_OPT FBNIC_TCP4_HASH_OPT + FBNIC_IPV6_HASH_OPT, + FBNIC_IPV4_HASH_OPT, +#define FBNIC_IP_HASH_OPT FBNIC_IPV4_HASH_OPT + FBNIC_ETHER_HASH_OPT, + FBNIC_NUM_HASH_OPT, +}; + +struct fbnic_dev; +struct fbnic_net; + +void fbnic_bmc_rpc_init(struct fbnic_dev *fbd); +void fbnic_bmc_rpc_all_multi_config(struct fbnic_dev *fbd, bool enable_host); + +void fbnic_reset_indir_tbl(struct fbnic_net *fbn); +void fbnic_rss_key_fill(u32 *buffer); +void fbnic_rss_init_en_mask(struct fbnic_net *fbn); +void fbnic_rss_disable_hw(struct fbnic_dev *fbd); +void fbnic_rss_reinit_hw(struct fbnic_dev *fbd, struct fbnic_net *fbn); +void fbnic_rss_reinit(struct fbnic_dev *fbd, struct fbnic_net *fbn); + +int __fbnic_xc_unsync(struct fbnic_mac_addr *mac_addr, unsigned int tcam_idx); +struct fbnic_mac_addr *__fbnic_uc_sync(struct fbnic_dev *fbd, + const unsigned char *addr); +struct fbnic_mac_addr *__fbnic_mc_sync(struct fbnic_dev *fbd, + const unsigned char *addr); +void fbnic_sift_macda(struct fbnic_dev *fbd); +void fbnic_write_macda(struct fbnic_dev *fbd); + +static inline int __fbnic_uc_unsync(struct fbnic_mac_addr *mac_addr) +{ + return __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_UNICAST); +} + +static inline int __fbnic_mc_unsync(struct fbnic_mac_addr *mac_addr) +{ + return __fbnic_xc_unsync(mac_addr, FBNIC_MAC_ADDR_T_MULTICAST); +} + +void fbnic_clear_rules(struct fbnic_dev *fbd); +void fbnic_write_rules(struct fbnic_dev *fbd); +#endif /* _FBNIC_RPC_H_ */ diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c new file mode 100644 index 000000000000..2a174ab062a3 --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.c @@ -0,0 +1,529 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#include +#include +#include +#include +#include +#include + +#include "fbnic_tlv.h" + +/** + * fbnic_tlv_msg_alloc - Allocate page and initialize FW message header + * @msg_id: Identifier for new message we are starting + * + * Return: pointer to start of message, or NULL on failure. + * + * Allocates a page and initializes message header at start of page. + * Initial message size is 1 DWORD which is just the header. + **/ +struct fbnic_tlv_msg *fbnic_tlv_msg_alloc(u16 msg_id) +{ + struct fbnic_tlv_hdr hdr = { 0 }; + struct fbnic_tlv_msg *msg; + + msg = (struct fbnic_tlv_msg *)__get_free_page(GFP_KERNEL); + if (!msg) + return NULL; + + /* Start with zero filled header and then back fill with data */ + hdr.type = msg_id; + hdr.is_msg = 1; + hdr.len = cpu_to_le16(1); + + /* Copy header into start of message */ + msg->hdr = hdr; + + return msg; +} + +/** + * fbnic_tlv_attr_put_flag - Add flag value to message + * @msg: Message header we are adding flag attribute to + * @attr_id: ID of flag attribute we are adding to message + * + * Return: -ENOSPC if there is no room for the attribute. Otherwise 0. + * + * Adds a 1 DWORD flag attribute to the message. The presence of this + * attribute can be used as a boolean value indicating true, otherwise the + * value is considered false. + **/ +int fbnic_tlv_attr_put_flag(struct fbnic_tlv_msg *msg, const u16 attr_id) +{ + int attr_max_len = PAGE_SIZE - offset_in_page(msg) - sizeof(*msg); + struct fbnic_tlv_hdr hdr = { 0 }; + struct fbnic_tlv_msg *attr; + + attr_max_len -= le16_to_cpu(msg->hdr.len) * sizeof(u32); + if (attr_max_len < sizeof(*attr)) + return -ENOSPC; + + /* Get header pointer and bump attr to start of data */ + attr = &msg[le16_to_cpu(msg->hdr.len)]; + + /* Record attribute type and size */ + hdr.type = attr_id; + hdr.len = cpu_to_le16(sizeof(hdr)); + + attr->hdr = hdr; + le16_add_cpu(&msg->hdr.len, + FBNIC_TLV_MSG_SIZE(le16_to_cpu(hdr.len))); + + return 0; +} + +/** + * fbnic_tlv_attr_put_value - Add data to message + * @msg: Message header we are adding flag attribute to + * @attr_id: ID of flag attribute we are adding to message + * @value: Pointer to data to be stored + * @len: Size of data to be stored. + * + * Return: -ENOSPC if there is no room for the attribute. Otherwise 0. + * + * Adds header and copies data pointed to by value into the message. The + * result is rounded up to the nearest DWORD for sizing so that the + * headers remain aligned. + * + * The assumption is that the value field is in a format where byte + * ordering can be guaranteed such as a byte array or a little endian + * format. + **/ +int fbnic_tlv_attr_put_value(struct fbnic_tlv_msg *msg, const u16 attr_id, + const void *value, const int len) +{ + int attr_max_len = PAGE_SIZE - offset_in_page(msg) - sizeof(*msg); + struct fbnic_tlv_hdr hdr = { 0 }; + struct fbnic_tlv_msg *attr; + + attr_max_len -= le16_to_cpu(msg->hdr.len) * sizeof(u32); + if (attr_max_len < sizeof(*attr) + len) + return -ENOSPC; + + /* Get header pointer and bump attr to start of data */ + attr = &msg[le16_to_cpu(msg->hdr.len)]; + + /* Record attribute type and size */ + hdr.type = attr_id; + hdr.len = cpu_to_le16(sizeof(hdr) + len); + + /* Zero pad end of region to be written if we aren't aligned */ + if (len % sizeof(hdr)) + attr->value[len / sizeof(hdr)] = 0; + + /* Copy data over */ + memcpy(attr->value, value, len); + + attr->hdr = hdr; + le16_add_cpu(&msg->hdr.len, + FBNIC_TLV_MSG_SIZE(le16_to_cpu(hdr.len))); + + return 0; +} + +/** + * __fbnic_tlv_attr_put_int - Add integer to message + * @msg: Message header we are adding flag attribute to + * @attr_id: ID of flag attribute we are adding to message + * @value: Data to be stored + * @len: Size of data to be stored, either 4 or 8 bytes. + * + * Return: -ENOSPC if there is no room for the attribute. Otherwise 0. + * + * Adds header and copies data pointed to by value into the message. Will + * format the data as little endian. + **/ +int __fbnic_tlv_attr_put_int(struct fbnic_tlv_msg *msg, const u16 attr_id, + s64 value, const int len) +{ + __le64 le64_value = cpu_to_le64(value); + + return fbnic_tlv_attr_put_value(msg, attr_id, &le64_value, len); +} + +/** + * fbnic_tlv_attr_put_mac_addr - Add mac_addr to message + * @msg: Message header we are adding flag attribute to + * @attr_id: ID of flag attribute we are adding to message + * @mac_addr: Byte pointer to MAC address to be stored + * + * Return: -ENOSPC if there is no room for the attribute. Otherwise 0. + * + * Adds header and copies data pointed to by mac_addr into the message. Will + * copy the address raw so it will be in big endian with start of MAC + * address at start of attribute. + **/ +int fbnic_tlv_attr_put_mac_addr(struct fbnic_tlv_msg *msg, const u16 attr_id, + const u8 *mac_addr) +{ + return fbnic_tlv_attr_put_value(msg, attr_id, mac_addr, ETH_ALEN); +} + +/** + * fbnic_tlv_attr_put_string - Add string to message + * @msg: Message header we are adding flag attribute to + * @attr_id: ID of flag attribute we are adding to message + * @string: Byte pointer to null terminated string to be stored + * + * Return: -ENOSPC if there is no room for the attribute. Otherwise 0. + * + * Adds header and copies data pointed to by string into the message. Will + * copy the address raw so it will be in byte order. + **/ +int fbnic_tlv_attr_put_string(struct fbnic_tlv_msg *msg, u16 attr_id, + const char *string) +{ + int attr_max_len = PAGE_SIZE - sizeof(*msg); + int str_len = 1; + + /* The max length will be message minus existing message and new + * attribute header. Since the message is measured in DWORDs we have + * to multiply the size by 4. + * + * The string length doesn't include the \0 so we have to add one to + * the final value, so start with that as our initial value. + * + * We will verify if the string will fit in fbnic_tlv_attr_put_value() + */ + attr_max_len -= le16_to_cpu(msg->hdr.len) * sizeof(u32); + str_len += strnlen(string, attr_max_len); + + return fbnic_tlv_attr_put_value(msg, attr_id, string, str_len); +} + +/** + * fbnic_tlv_attr_get_unsigned - Retrieve unsigned value from result + * @attr: Attribute to retrieve data from + * + * Return: unsigned 64b value containing integer value + **/ +u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr) +{ + __le64 le64_value = 0; + + memcpy(&le64_value, &attr->value[0], + le16_to_cpu(attr->hdr.len) - sizeof(*attr)); + + return le64_to_cpu(le64_value); +} + +/** + * fbnic_tlv_attr_get_signed - Retrieve signed value from result + * @attr: Attribute to retrieve data from + * + * Return: signed 64b value containing integer value + **/ +s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr) +{ + int shift = (8 + sizeof(*attr) - le16_to_cpu(attr->hdr.len)) * 8; + __le64 le64_value = 0; + s64 value; + + /* Copy the value and adjust for byte ordering */ + memcpy(&le64_value, &attr->value[0], + le16_to_cpu(attr->hdr.len) - sizeof(*attr)); + value = le64_to_cpu(le64_value); + + /* Sign extend the return value by using a pair of shifts */ + return (value << shift) >> shift; +} + +/** + * fbnic_tlv_attr_get_string - Retrieve string value from result + * @attr: Attribute to retrieve data from + * @str: Pointer to an allocated string to store the data + * @max_size: The maximum size which can be in str + * + * Return: the size of the string read from firmware + **/ +size_t fbnic_tlv_attr_get_string(struct fbnic_tlv_msg *attr, char *str, + size_t max_size) +{ + max_size = min_t(size_t, max_size, + (le16_to_cpu(attr->hdr.len) * 4) - sizeof(*attr)); + memcpy(str, &attr->value, max_size); + + return max_size; +} + +/** + * fbnic_tlv_attr_nest_start - Add nested attribute header to message + * @msg: Message header we are adding flag attribute to + * @attr_id: ID of flag attribute we are adding to message + * + * Return: NULL if there is no room for the attribute. Otherwise a pointer + * to the new attribute header. + * + * New header length is stored initially in DWORDs. + **/ +struct fbnic_tlv_msg *fbnic_tlv_attr_nest_start(struct fbnic_tlv_msg *msg, + u16 attr_id) +{ + int attr_max_len = PAGE_SIZE - offset_in_page(msg) - sizeof(*msg); + struct fbnic_tlv_msg *attr = &msg[le16_to_cpu(msg->hdr.len)]; + struct fbnic_tlv_hdr hdr = { 0 }; + + /* Make sure we have space for at least the nest header plus one more */ + attr_max_len -= le16_to_cpu(msg->hdr.len) * sizeof(u32); + if (attr_max_len < sizeof(*attr) * 2) + return NULL; + + /* Record attribute type and size */ + hdr.type = attr_id; + + /* Add current message length to account for consumption within the + * page and leave it as a multiple of DWORDs, we will shift to + * bytes when we close it out. + */ + hdr.len = cpu_to_le16(1); + + attr->hdr = hdr; + + return attr; +} + +/** + * fbnic_tlv_attr_nest_stop - Close out nested attribute and add it to message + * @msg: Message header we are adding flag attribute to + * + * Closes out nested attribute, adds length to message, and then bumps + * length from DWORDs to bytes to match other attributes. + **/ +void fbnic_tlv_attr_nest_stop(struct fbnic_tlv_msg *msg) +{ + struct fbnic_tlv_msg *attr = &msg[le16_to_cpu(msg->hdr.len)]; + u16 len = le16_to_cpu(attr->hdr.len); + + /* Add attribute to message if there is more than just a header */ + if (len <= 1) + return; + + le16_add_cpu(&msg->hdr.len, len); + + /* Convert from DWORDs to bytes */ + attr->hdr.len = cpu_to_le16(len * sizeof(u32)); +} + +static int +fbnic_tlv_attr_validate(struct fbnic_tlv_msg *attr, + const struct fbnic_tlv_index *tlv_index) +{ + u16 len = le16_to_cpu(attr->hdr.len) - sizeof(*attr); + u16 attr_id = attr->hdr.type; + __le32 *value = &attr->value[0]; + + if (attr->hdr.is_msg) + return -EINVAL; + + if (attr_id >= FBNIC_TLV_RESULTS_MAX) + return -EINVAL; + + while (tlv_index->id != attr_id) { + if (tlv_index->id == FBNIC_TLV_ATTR_ID_UNKNOWN) { + if (attr->hdr.cannot_ignore) + return -ENOENT; + return le16_to_cpu(attr->hdr.len); + } + + tlv_index++; + } + + if (offset_in_page(attr) + len > PAGE_SIZE - sizeof(*attr)) + return -E2BIG; + + switch (tlv_index->type) { + case FBNIC_TLV_STRING: + if (!len || len > tlv_index->len) + return -EINVAL; + if (((char *)value)[len - 1]) + return -EINVAL; + break; + case FBNIC_TLV_FLAG: + if (len) + return -EINVAL; + break; + case FBNIC_TLV_UNSIGNED: + case FBNIC_TLV_SIGNED: + if (tlv_index->len > sizeof(__le64)) + return -EINVAL; + fallthrough; + case FBNIC_TLV_BINARY: + if (!len || len > tlv_index->len) + return -EINVAL; + break; + case FBNIC_TLV_NESTED: + case FBNIC_TLV_ARRAY: + if (len % 4) + return -EINVAL; + break; + default: + return -EINVAL; + } + + return 0; +} + +/** + * fbnic_tlv_attr_parse_array - Parse array of attributes into results array + * @attr: Start of attributes in the message + * @len: Length of attributes in the message + * @results: Array of pointers to store the results of parsing + * @tlv_index: List of TLV attributes to be parsed from message + * @tlv_attr_id: Specific ID that is repeated in array + * @array_len: Number of results to store in results array + * + * Return: zero on success, or negative value on error. + * + * Will take a list of attributes and a parser definition and will capture + * the results in the results array to have the data extracted later. + **/ +int fbnic_tlv_attr_parse_array(struct fbnic_tlv_msg *attr, int len, + struct fbnic_tlv_msg **results, + const struct fbnic_tlv_index *tlv_index, + u16 tlv_attr_id, size_t array_len) +{ + int i = 0; + + /* Initialize results table to NULL. */ + memset(results, 0, array_len * sizeof(results[0])); + + /* Nothing to parse if header was only thing there */ + if (!len) + return 0; + + /* Work through list of attributes, parsing them as necessary */ + while (len > 0) { + u16 attr_id = attr->hdr.type; + u16 attr_len; + int err; + + if (tlv_attr_id != attr_id) + return -EINVAL; + + /* Stop parsing on full error */ + err = fbnic_tlv_attr_validate(attr, tlv_index); + if (err < 0) + return err; + + if (i >= array_len) + return -ENOSPC; + + results[i++] = attr; + + attr_len = FBNIC_TLV_MSG_SIZE(le16_to_cpu(attr->hdr.len)); + len -= attr_len; + attr += attr_len; + } + + return len == 0 ? 0 : -EINVAL; +} + +/** + * fbnic_tlv_attr_parse - Parse attributes into a list of attribute results + * @attr: Start of attributes in the message + * @len: Length of attributes in the message + * @results: Array of pointers to store the results of parsing + * @tlv_index: List of TLV attributes to be parsed from message + * + * Return: zero on success, or negative value on error. + * + * Will take a list of attributes and a parser definition and will capture + * the results in the results array to have the data extracted later. + **/ +int fbnic_tlv_attr_parse(struct fbnic_tlv_msg *attr, int len, + struct fbnic_tlv_msg **results, + const struct fbnic_tlv_index *tlv_index) +{ + /* Initialize results table to NULL. */ + memset(results, 0, sizeof(results[0]) * FBNIC_TLV_RESULTS_MAX); + + /* Nothing to parse if header was only thing there */ + if (!len) + return 0; + + /* Work through list of attributes, parsing them as necessary */ + while (len > 0) { + int err = fbnic_tlv_attr_validate(attr, tlv_index); + u16 attr_id = attr->hdr.type; + u16 attr_len; + + /* Stop parsing on full error */ + if (err < 0) + return err; + + /* Ignore results for unsupported values */ + if (!err) { + /* Do not overwrite existing entries */ + if (results[attr_id]) + return -EADDRINUSE; + + results[attr_id] = attr; + } + + attr_len = FBNIC_TLV_MSG_SIZE(le16_to_cpu(attr->hdr.len)); + len -= attr_len; + attr += attr_len; + } + + return len == 0 ? 0 : -EINVAL; +} + +/** + * fbnic_tlv_msg_parse - Parse message and process via predetermined functions + * @opaque: Value passed to parser function to enable driver access + * @msg: Message to be parsed. + * @parser: TLV message parser definition. + * + * Return: zero on success, or negative value on error. + * + * Will take a message a number of message types via the attribute parsing + * definitions and function provided for the parser array. + **/ +int fbnic_tlv_msg_parse(void *opaque, struct fbnic_tlv_msg *msg, + const struct fbnic_tlv_parser *parser) +{ + struct fbnic_tlv_msg *results[FBNIC_TLV_RESULTS_MAX]; + u16 msg_id = msg->hdr.type; + int err; + + if (!msg->hdr.is_msg) + return -EINVAL; + + if (le16_to_cpu(msg->hdr.len) > PAGE_SIZE / sizeof(u32)) + return -E2BIG; + + while (parser->id != msg_id) { + if (parser->id == FBNIC_TLV_MSG_ID_UNKNOWN) + return -ENOENT; + parser++; + } + + err = fbnic_tlv_attr_parse(&msg[1], le16_to_cpu(msg->hdr.len) - 1, + results, parser->attr); + if (err) + return err; + + return parser->func(opaque, results); +} + +/** + * fbnic_tlv_parser_error - called if message doesn't match known type + * @opaque: (unused) + * @results: (unused) + * + * Return: -EBADMSG to indicate the message is an unsupported type + **/ +int fbnic_tlv_parser_error(void *opaque, struct fbnic_tlv_msg **results) +{ + return -EBADMSG; +} + +void fbnic_tlv_attr_addr_copy(u8 *dest, struct fbnic_tlv_msg *src) +{ + u8 *mac_addr; + + mac_addr = fbnic_tlv_attr_get_value_ptr(src); + memcpy(dest, mac_addr, ETH_ALEN); +} diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h new file mode 100644 index 000000000000..67300ab44353 --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_tlv.h @@ -0,0 +1,175 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#ifndef _FBNIC_TLV_H_ +#define _FBNIC_TLV_H_ + +#include +#include +#include +#include + +#define FBNIC_TLV_MSG_ALIGN(len) ALIGN(len, sizeof(u32)) +#define FBNIC_TLV_MSG_SIZE(len) \ + (FBNIC_TLV_MSG_ALIGN(len) / sizeof(u32)) + +/* TLV Header Format + * 3 2 1 + * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * | Length |M|I|RSV| Type / ID | + * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + * + * The TLV header format described above will be used for transferring + * messages between the host and the firmware. To ensure byte ordering + * we have defined all fields as being little endian. + * Type/ID: Identifier for message and/or attribute + * RSV: Reserved field for future use, likely as additional flags + * I: cannot_ignore flag, identifies if unrecognized attribute can be ignored + * M: is_msg, indicates that this is the start of a new message + * Length: Total length of message in dwords including header + * or + * Total length of attribute in bytes including header + */ +struct fbnic_tlv_hdr { +#if defined(__LITTLE_ENDIAN_BITFIELD) + u16 type : 12; /* 0 .. 11 Type / ID */ + u16 rsvd : 2; /* 12 .. 13 Reserved for future use */ + u16 cannot_ignore : 1; /* 14 Attribute can be ignored */ + u16 is_msg : 1; /* 15 Header belongs to message */ +#elif defined(__BIG_ENDIAN_BITFIELD) + u16 is_msg : 1; /* 15 Header belongs to message */ + u16 cannot_ignore : 1; /* 14 Attribute can be ignored */ + u16 rsvd : 2; /* 13 .. 12 Reserved for future use */ + u16 type : 12; /* 11 .. 0 Type / ID */ +#else +#error "Missing defines from byteorder.h" +#endif + __le16 len; /* 16 .. 32 length including TLV header */ +}; + +#define FBNIC_TLV_RESULTS_MAX 32 + +struct fbnic_tlv_msg { + struct fbnic_tlv_hdr hdr; + __le32 value[]; +}; + +#define FBNIC_TLV_MSG_ID_UNKNOWN USHRT_MAX + +enum fbnic_tlv_type { + FBNIC_TLV_STRING, + FBNIC_TLV_FLAG, + FBNIC_TLV_UNSIGNED, + FBNIC_TLV_SIGNED, + FBNIC_TLV_BINARY, + FBNIC_TLV_NESTED, + FBNIC_TLV_ARRAY, + __FBNIC_TLV_MAX_TYPE +}; + +/* TLV Index + * Defines the relationship between the attribute IDs and their types. + * For each entry in the index there will be a size and type associated + * with it so that we can use this to parse the data and verify it matches + * the expected layout. + */ +struct fbnic_tlv_index { + u16 id; + u16 len; + enum fbnic_tlv_type type; +}; + +#define TLV_MAX_DATA (PAGE_SIZE - 512) +#define FBNIC_TLV_ATTR_ID_UNKNOWN USHRT_MAX +#define FBNIC_TLV_ATTR_STRING(id, len) { id, len, FBNIC_TLV_STRING } +#define FBNIC_TLV_ATTR_FLAG(id) { id, 0, FBNIC_TLV_FLAG } +#define FBNIC_TLV_ATTR_U32(id) { id, sizeof(u32), FBNIC_TLV_UNSIGNED } +#define FBNIC_TLV_ATTR_U64(id) { id, sizeof(u64), FBNIC_TLV_UNSIGNED } +#define FBNIC_TLV_ATTR_S32(id) { id, sizeof(s32), FBNIC_TLV_SIGNED } +#define FBNIC_TLV_ATTR_S64(id) { id, sizeof(s64), FBNIC_TLV_SIGNED } +#define FBNIC_TLV_ATTR_MAC_ADDR(id) { id, ETH_ALEN, FBNIC_TLV_BINARY } +#define FBNIC_TLV_ATTR_NESTED(id) { id, 0, FBNIC_TLV_NESTED } +#define FBNIC_TLV_ATTR_ARRAY(id) { id, 0, FBNIC_TLV_ARRAY } +#define FBNIC_TLV_ATTR_RAW_DATA(id) { id, TLV_MAX_DATA, FBNIC_TLV_BINARY } +#define FBNIC_TLV_ATTR_LAST { FBNIC_TLV_ATTR_ID_UNKNOWN, 0, 0 } + +struct fbnic_tlv_parser { + u16 id; + const struct fbnic_tlv_index *attr; + int (*func)(void *opaque, + struct fbnic_tlv_msg **results); +}; + +#define FBNIC_TLV_PARSER(id, attr, func) { FBNIC_TLV_MSG_ID_##id, attr, func } + +static inline void * +fbnic_tlv_attr_get_value_ptr(struct fbnic_tlv_msg *attr) +{ + return (void *)&attr->value[0]; +} + +static inline bool fbnic_tlv_attr_get_bool(struct fbnic_tlv_msg *attr) +{ + return !!attr; +} + +u64 fbnic_tlv_attr_get_unsigned(struct fbnic_tlv_msg *attr); +s64 fbnic_tlv_attr_get_signed(struct fbnic_tlv_msg *attr); +size_t fbnic_tlv_attr_get_string(struct fbnic_tlv_msg *attr, char *str, + size_t max_size); + +#define get_unsigned_result(id, location) \ +do { \ + struct fbnic_tlv_msg *result = results[id]; \ + if (result) \ + location = fbnic_tlv_attr_get_unsigned(result); \ +} while (0) + +#define get_signed_result(id, location) \ +do { \ + struct fbnic_tlv_msg *result = results[id]; \ + if (result) \ + location = fbnic_tlv_attr_get_signed(result); \ +} while (0) + +#define get_string_result(id, size, str, max_size) \ +do { \ + struct fbnic_tlv_msg *result = results[id]; \ + if (result) \ + size = fbnic_tlv_attr_get_string(result, str, max_size); \ +} while (0) + +#define get_bool(id) (!!(results[id])) + +struct fbnic_tlv_msg *fbnic_tlv_msg_alloc(u16 msg_id); +int fbnic_tlv_attr_put_flag(struct fbnic_tlv_msg *msg, const u16 attr_id); +int fbnic_tlv_attr_put_value(struct fbnic_tlv_msg *msg, const u16 attr_id, + const void *value, const int len); +int __fbnic_tlv_attr_put_int(struct fbnic_tlv_msg *msg, const u16 attr_id, + s64 value, const int len); +#define fbnic_tlv_attr_put_int(msg, attr_id, value) \ + __fbnic_tlv_attr_put_int(msg, attr_id, value, \ + FBNIC_TLV_MSG_ALIGN(sizeof(value))) +int fbnic_tlv_attr_put_mac_addr(struct fbnic_tlv_msg *msg, const u16 attr_id, + const u8 *mac_addr); +int fbnic_tlv_attr_put_string(struct fbnic_tlv_msg *msg, u16 attr_id, + const char *string); +struct fbnic_tlv_msg *fbnic_tlv_attr_nest_start(struct fbnic_tlv_msg *msg, + u16 attr_id); +void fbnic_tlv_attr_nest_stop(struct fbnic_tlv_msg *msg); +void fbnic_tlv_attr_addr_copy(u8 *dest, struct fbnic_tlv_msg *src); +int fbnic_tlv_attr_parse_array(struct fbnic_tlv_msg *attr, int len, + struct fbnic_tlv_msg **results, + const struct fbnic_tlv_index *tlv_index, + u16 tlv_attr_id, size_t array_len); +int fbnic_tlv_attr_parse(struct fbnic_tlv_msg *attr, int len, + struct fbnic_tlv_msg **results, + const struct fbnic_tlv_index *tlv_index); +int fbnic_tlv_msg_parse(void *opaque, struct fbnic_tlv_msg *msg, + const struct fbnic_tlv_parser *parser); +int fbnic_tlv_parser_error(void *opaque, struct fbnic_tlv_msg **results); + +#define FBNIC_TLV_MSG_ERROR \ + FBNIC_TLV_PARSER(UNKNOWN, NULL, fbnic_tlv_parser_error) +#endif /* _FBNIC_TLV_H_ */ diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c new file mode 100644 index 000000000000..0ed4c9fff5d8 --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.c @@ -0,0 +1,1913 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#include +#include +#include +#include +#include + +#include "fbnic.h" +#include "fbnic_csr.h" +#include "fbnic_netdev.h" +#include "fbnic_txrx.h" + +struct fbnic_xmit_cb { + u32 bytecount; + u8 desc_count; + int hw_head; +}; + +#define FBNIC_XMIT_CB(__skb) ((struct fbnic_xmit_cb *)((__skb)->cb)) + +static u32 __iomem *fbnic_ring_csr_base(const struct fbnic_ring *ring) +{ + unsigned long csr_base = (unsigned long)ring->doorbell; + + csr_base &= ~(FBNIC_QUEUE_STRIDE * sizeof(u32) - 1); + + return (u32 __iomem *)csr_base; +} + +static u32 fbnic_ring_rd32(struct fbnic_ring *ring, unsigned int csr) +{ + u32 __iomem *csr_base = fbnic_ring_csr_base(ring); + + return readl(csr_base + csr); +} + +static void fbnic_ring_wr32(struct fbnic_ring *ring, unsigned int csr, u32 val) +{ + u32 __iomem *csr_base = fbnic_ring_csr_base(ring); + + writel(val, csr_base + csr); +} + +static unsigned int fbnic_desc_unused(struct fbnic_ring *ring) +{ + return (ring->head - ring->tail - 1) & ring->size_mask; +} + +static unsigned int fbnic_desc_used(struct fbnic_ring *ring) +{ + return (ring->tail - ring->head) & ring->size_mask; +} + +static struct netdev_queue *txring_txq(const struct net_device *dev, + const struct fbnic_ring *ring) +{ + return netdev_get_tx_queue(dev, ring->q_idx); +} + +static int fbnic_maybe_stop_tx(const struct net_device *dev, + struct fbnic_ring *ring, + const unsigned int size) +{ + struct netdev_queue *txq = txring_txq(dev, ring); + int res; + + res = netif_txq_maybe_stop(txq, fbnic_desc_unused(ring), size, + FBNIC_TX_DESC_WAKEUP); + + return !res; +} + +static bool fbnic_tx_sent_queue(struct sk_buff *skb, struct fbnic_ring *ring) +{ + struct netdev_queue *dev_queue = txring_txq(skb->dev, ring); + unsigned int bytecount = FBNIC_XMIT_CB(skb)->bytecount; + bool xmit_more = netdev_xmit_more(); + + /* TBD: Request completion more often if xmit_more becomes large */ + + return __netdev_tx_sent_queue(dev_queue, bytecount, xmit_more); +} + +static void fbnic_unmap_single_twd(struct device *dev, __le64 *twd) +{ + u64 raw_twd = le64_to_cpu(*twd); + unsigned int len; + dma_addr_t dma; + + dma = FIELD_GET(FBNIC_TWD_ADDR_MASK, raw_twd); + len = FIELD_GET(FBNIC_TWD_LEN_MASK, raw_twd); + + dma_unmap_single(dev, dma, len, DMA_TO_DEVICE); +} + +static void fbnic_unmap_page_twd(struct device *dev, __le64 *twd) +{ + u64 raw_twd = le64_to_cpu(*twd); + unsigned int len; + dma_addr_t dma; + + dma = FIELD_GET(FBNIC_TWD_ADDR_MASK, raw_twd); + len = FIELD_GET(FBNIC_TWD_LEN_MASK, raw_twd); + + dma_unmap_page(dev, dma, len, DMA_TO_DEVICE); +} + +#define FBNIC_TWD_TYPE(_type) \ + cpu_to_le64(FIELD_PREP(FBNIC_TWD_TYPE_MASK, FBNIC_TWD_TYPE_##_type)) + +static bool +fbnic_tx_offloads(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta) +{ + unsigned int l2len, i3len; + + if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) + return false; + + l2len = skb_mac_header_len(skb); + i3len = skb_checksum_start(skb) - skb_network_header(skb); + + *meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_CSUM_OFFSET_MASK, + skb->csum_offset / 2)); + + *meta |= cpu_to_le64(FBNIC_TWD_FLAG_REQ_CSO); + + *meta |= cpu_to_le64(FIELD_PREP(FBNIC_TWD_L2_HLEN_MASK, l2len / 2) | + FIELD_PREP(FBNIC_TWD_L3_IHLEN_MASK, i3len / 2)); + return false; +} + +static void +fbnic_rx_csum(u64 rcd, struct sk_buff *skb, struct fbnic_ring *rcq) +{ + skb_checksum_none_assert(skb); + + if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) + return; + + if (FIELD_GET(FBNIC_RCD_META_L4_CSUM_UNNECESSARY, rcd)) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else { + u16 csum = FIELD_GET(FBNIC_RCD_META_L2_CSUM_MASK, rcd); + + skb->ip_summed = CHECKSUM_COMPLETE; + skb->csum = (__force __wsum)csum; + } +} + +static bool +fbnic_tx_map(struct fbnic_ring *ring, struct sk_buff *skb, __le64 *meta) +{ + struct device *dev = skb->dev->dev.parent; + unsigned int tail = ring->tail, first; + unsigned int size, data_len; + skb_frag_t *frag; + dma_addr_t dma; + __le64 *twd; + + ring->tx_buf[tail] = skb; + + tail++; + tail &= ring->size_mask; + first = tail; + + size = skb_headlen(skb); + data_len = skb->data_len; + + if (size > FIELD_MAX(FBNIC_TWD_LEN_MASK)) + goto dma_error; + + dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); + + for (frag = &skb_shinfo(skb)->frags[0];; frag++) { + twd = &ring->desc[tail]; + + if (dma_mapping_error(dev, dma)) + goto dma_error; + + *twd = cpu_to_le64(FIELD_PREP(FBNIC_TWD_ADDR_MASK, dma) | + FIELD_PREP(FBNIC_TWD_LEN_MASK, size) | + FIELD_PREP(FBNIC_TWD_TYPE_MASK, + FBNIC_TWD_TYPE_AL)); + + tail++; + tail &= ring->size_mask; + + if (!data_len) + break; + + size = skb_frag_size(frag); + data_len -= size; + + if (size > FIELD_MAX(FBNIC_TWD_LEN_MASK)) + goto dma_error; + + dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); + } + + *twd |= FBNIC_TWD_TYPE(LAST_AL); + + FBNIC_XMIT_CB(skb)->desc_count = ((twd - meta) + 1) & ring->size_mask; + + ring->tail = tail; + + /* Verify there is room for another packet */ + fbnic_maybe_stop_tx(skb->dev, ring, FBNIC_MAX_SKB_DESC); + + if (fbnic_tx_sent_queue(skb, ring)) { + *meta |= cpu_to_le64(FBNIC_TWD_FLAG_REQ_COMPLETION); + + /* Force DMA writes to flush before writing to tail */ + dma_wmb(); + + writel(tail, ring->doorbell); + } + + return false; +dma_error: + if (net_ratelimit()) + netdev_err(skb->dev, "TX DMA map failed\n"); + + while (tail != first) { + tail--; + tail &= ring->size_mask; + twd = &ring->desc[tail]; + if (tail == first) + fbnic_unmap_single_twd(dev, twd); + else + fbnic_unmap_page_twd(dev, twd); + } + + return true; +} + +#define FBNIC_MIN_FRAME_LEN 60 + +static netdev_tx_t +fbnic_xmit_frame_ring(struct sk_buff *skb, struct fbnic_ring *ring) +{ + __le64 *meta = &ring->desc[ring->tail]; + u16 desc_needed; + + if (skb_put_padto(skb, FBNIC_MIN_FRAME_LEN)) + goto err_count; + + /* Need: 1 descriptor per page, + * + 1 desc for skb_head, + * + 2 desc for metadata and timestamp metadata + * + 7 desc gap to keep tail from touching head + * otherwise try next time + */ + desc_needed = skb_shinfo(skb)->nr_frags + 10; + if (fbnic_maybe_stop_tx(skb->dev, ring, desc_needed)) + return NETDEV_TX_BUSY; + + *meta = cpu_to_le64(FBNIC_TWD_FLAG_DEST_MAC); + + /* Write all members within DWORD to condense this into 2 4B writes */ + FBNIC_XMIT_CB(skb)->bytecount = skb->len; + FBNIC_XMIT_CB(skb)->desc_count = 0; + + if (fbnic_tx_offloads(ring, skb, meta)) + goto err_free; + + if (fbnic_tx_map(ring, skb, meta)) + goto err_free; + + return NETDEV_TX_OK; + +err_free: + dev_kfree_skb_any(skb); +err_count: + return NETDEV_TX_OK; +} + +netdev_tx_t fbnic_xmit_frame(struct sk_buff *skb, struct net_device *dev) +{ + struct fbnic_net *fbn = netdev_priv(dev); + unsigned int q_map = skb->queue_mapping; + + return fbnic_xmit_frame_ring(skb, fbn->tx[q_map]); +} + +netdev_features_t +fbnic_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + unsigned int l2len, l3len; + + if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) + return features; + + l2len = skb_mac_header_len(skb); + l3len = skb_checksum_start(skb) - skb_network_header(skb); + + /* Check header lengths are multiple of 2. + * In case of 6in6 we support longer headers (IHLEN + OHLEN) + * but keep things simple for now, 512B is plenty. + */ + if ((l2len | l3len | skb->csum_offset) % 2 || + !FIELD_FIT(FBNIC_TWD_L2_HLEN_MASK, l2len / 2) || + !FIELD_FIT(FBNIC_TWD_L3_IHLEN_MASK, l3len / 2) || + !FIELD_FIT(FBNIC_TWD_CSUM_OFFSET_MASK, skb->csum_offset / 2)) + return features & ~NETIF_F_CSUM_MASK; + + return features; +} + +static void fbnic_clean_twq0(struct fbnic_napi_vector *nv, int napi_budget, + struct fbnic_ring *ring, bool discard, + unsigned int hw_head) +{ + u64 total_bytes = 0, total_packets = 0; + unsigned int head = ring->head; + struct netdev_queue *txq; + unsigned int clean_desc; + + clean_desc = (hw_head - head) & ring->size_mask; + + while (clean_desc) { + struct sk_buff *skb = ring->tx_buf[head]; + unsigned int desc_cnt; + + desc_cnt = FBNIC_XMIT_CB(skb)->desc_count; + if (desc_cnt > clean_desc) + break; + + ring->tx_buf[head] = NULL; + + clean_desc -= desc_cnt; + + while (!(ring->desc[head] & FBNIC_TWD_TYPE(AL))) { + head++; + head &= ring->size_mask; + desc_cnt--; + } + + fbnic_unmap_single_twd(nv->dev, &ring->desc[head]); + head++; + head &= ring->size_mask; + desc_cnt--; + + while (desc_cnt--) { + fbnic_unmap_page_twd(nv->dev, &ring->desc[head]); + head++; + head &= ring->size_mask; + } + + total_bytes += FBNIC_XMIT_CB(skb)->bytecount; + total_packets += 1; + + napi_consume_skb(skb, napi_budget); + } + + if (!total_bytes) + return; + + ring->head = head; + + txq = txring_txq(nv->napi.dev, ring); + + if (unlikely(discard)) { + netdev_tx_completed_queue(txq, total_packets, total_bytes); + return; + } + + netif_txq_completed_wake(txq, total_packets, total_bytes, + fbnic_desc_unused(ring), + FBNIC_TX_DESC_WAKEUP); +} + +static void fbnic_page_pool_init(struct fbnic_ring *ring, unsigned int idx, + struct page *page) +{ + struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx]; + + page_pool_fragment_page(page, PAGECNT_BIAS_MAX); + rx_buf->pagecnt_bias = PAGECNT_BIAS_MAX; + rx_buf->page = page; +} + +static struct page *fbnic_page_pool_get(struct fbnic_ring *ring, + unsigned int idx) +{ + struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx]; + + rx_buf->pagecnt_bias--; + + return rx_buf->page; +} + +static void fbnic_page_pool_drain(struct fbnic_ring *ring, unsigned int idx, + struct fbnic_napi_vector *nv, int budget) +{ + struct fbnic_rx_buf *rx_buf = &ring->rx_buf[idx]; + struct page *page = rx_buf->page; + + if (!page_pool_unref_page(page, rx_buf->pagecnt_bias)) + page_pool_put_unrefed_page(nv->page_pool, page, -1, !!budget); + + rx_buf->page = NULL; +} + +static void fbnic_clean_twq(struct fbnic_napi_vector *nv, int napi_budget, + struct fbnic_q_triad *qt, s32 head0) +{ + if (head0 >= 0) + fbnic_clean_twq0(nv, napi_budget, &qt->sub0, false, head0); +} + +static void +fbnic_clean_tcq(struct fbnic_napi_vector *nv, struct fbnic_q_triad *qt, + int napi_budget) +{ + struct fbnic_ring *cmpl = &qt->cmpl; + __le64 *raw_tcd, done; + u32 head = cmpl->head; + s32 head0 = -1; + + done = (head & (cmpl->size_mask + 1)) ? 0 : cpu_to_le64(FBNIC_TCD_DONE); + raw_tcd = &cmpl->desc[head & cmpl->size_mask]; + + /* Walk the completion queue collecting the heads reported by NIC */ + while ((*raw_tcd & cpu_to_le64(FBNIC_TCD_DONE)) == done) { + u64 tcd; + + dma_rmb(); + + tcd = le64_to_cpu(*raw_tcd); + + switch (FIELD_GET(FBNIC_TCD_TYPE_MASK, tcd)) { + case FBNIC_TCD_TYPE_0: + if (!(tcd & FBNIC_TCD_TWQ1)) + head0 = FIELD_GET(FBNIC_TCD_TYPE0_HEAD0_MASK, + tcd); + /* Currently all err status bits are related to + * timestamps and as those have yet to be added + * they are skipped for now. + */ + break; + default: + break; + } + + raw_tcd++; + head++; + if (!(head & cmpl->size_mask)) { + done ^= cpu_to_le64(FBNIC_TCD_DONE); + raw_tcd = &cmpl->desc[0]; + } + } + + /* Record the current head/tail of the queue */ + if (cmpl->head != head) { + cmpl->head = head; + writel(head & cmpl->size_mask, cmpl->doorbell); + } + + /* Unmap and free processed buffers */ + fbnic_clean_twq(nv, napi_budget, qt, head0); +} + +static void fbnic_clean_bdq(struct fbnic_napi_vector *nv, int napi_budget, + struct fbnic_ring *ring, unsigned int hw_head) +{ + unsigned int head = ring->head; + + if (head == hw_head) + return; + + do { + fbnic_page_pool_drain(ring, head, nv, napi_budget); + + head++; + head &= ring->size_mask; + } while (head != hw_head); + + ring->head = head; +} + +static void fbnic_bd_prep(struct fbnic_ring *bdq, u16 id, struct page *page) +{ + __le64 *bdq_desc = &bdq->desc[id * FBNIC_BD_FRAG_COUNT]; + dma_addr_t dma = page_pool_get_dma_addr(page); + u64 bd, i = FBNIC_BD_FRAG_COUNT; + + bd = (FBNIC_BD_PAGE_ADDR_MASK & dma) | + FIELD_PREP(FBNIC_BD_PAGE_ID_MASK, id); + + /* In the case that a page size is larger than 4K we will map a + * single page to multiple fragments. The fragments will be + * FBNIC_BD_FRAG_COUNT in size and the lower n bits will be use + * to indicate the individual fragment IDs. + */ + do { + *bdq_desc = cpu_to_le64(bd); + bd += FIELD_PREP(FBNIC_BD_DESC_ADDR_MASK, 1) | + FIELD_PREP(FBNIC_BD_DESC_ID_MASK, 1); + } while (--i); +} + +static void fbnic_fill_bdq(struct fbnic_napi_vector *nv, struct fbnic_ring *bdq) +{ + unsigned int count = fbnic_desc_unused(bdq); + unsigned int i = bdq->tail; + + if (!count) + return; + + do { + struct page *page; + + page = page_pool_dev_alloc_pages(nv->page_pool); + if (!page) + break; + + fbnic_page_pool_init(bdq, i, page); + fbnic_bd_prep(bdq, i, page); + + i++; + i &= bdq->size_mask; + + count--; + } while (count); + + if (bdq->tail != i) { + bdq->tail = i; + + /* Force DMA writes to flush before writing to tail */ + dma_wmb(); + + writel(i, bdq->doorbell); + } +} + +static unsigned int fbnic_hdr_pg_start(unsigned int pg_off) +{ + /* The headroom of the first header may be larger than FBNIC_RX_HROOM + * due to alignment. So account for that by just making the page + * offset 0 if we are starting at the first header. + */ + if (ALIGN(FBNIC_RX_HROOM, 128) > FBNIC_RX_HROOM && + pg_off == ALIGN(FBNIC_RX_HROOM, 128)) + return 0; + + return pg_off - FBNIC_RX_HROOM; +} + +static unsigned int fbnic_hdr_pg_end(unsigned int pg_off, unsigned int len) +{ + /* Determine the end of the buffer by finding the start of the next + * and then subtracting the headroom from that frame. + */ + pg_off += len + FBNIC_RX_TROOM + FBNIC_RX_HROOM; + + return ALIGN(pg_off, 128) - FBNIC_RX_HROOM; +} + +static void fbnic_pkt_prepare(struct fbnic_napi_vector *nv, u64 rcd, + struct fbnic_pkt_buff *pkt, + struct fbnic_q_triad *qt) +{ + unsigned int hdr_pg_idx = FIELD_GET(FBNIC_RCD_AL_BUFF_PAGE_MASK, rcd); + unsigned int hdr_pg_off = FIELD_GET(FBNIC_RCD_AL_BUFF_OFF_MASK, rcd); + struct page *page = fbnic_page_pool_get(&qt->sub0, hdr_pg_idx); + unsigned int len = FIELD_GET(FBNIC_RCD_AL_BUFF_LEN_MASK, rcd); + unsigned int frame_sz, hdr_pg_start, hdr_pg_end, headroom; + unsigned char *hdr_start; + + /* data_hard_start should always be NULL when this is called */ + WARN_ON_ONCE(pkt->buff.data_hard_start); + + /* Short-cut the end calculation if we know page is fully consumed */ + hdr_pg_end = FIELD_GET(FBNIC_RCD_AL_PAGE_FIN, rcd) ? + FBNIC_BD_FRAG_SIZE : fbnic_hdr_pg_end(hdr_pg_off, len); + hdr_pg_start = fbnic_hdr_pg_start(hdr_pg_off); + + headroom = hdr_pg_off - hdr_pg_start + FBNIC_RX_PAD; + frame_sz = hdr_pg_end - hdr_pg_start; + xdp_init_buff(&pkt->buff, frame_sz, NULL); + hdr_pg_start += (FBNIC_RCD_AL_BUFF_FRAG_MASK & rcd) * + FBNIC_BD_FRAG_SIZE; + + /* Sync DMA buffer */ + dma_sync_single_range_for_cpu(nv->dev, page_pool_get_dma_addr(page), + hdr_pg_start, frame_sz, + DMA_BIDIRECTIONAL); + + /* Build frame around buffer */ + hdr_start = page_address(page) + hdr_pg_start; + + xdp_prepare_buff(&pkt->buff, hdr_start, headroom, + len - FBNIC_RX_PAD, true); + + pkt->data_truesize = 0; + pkt->data_len = 0; + pkt->nr_frags = 0; +} + +static void fbnic_add_rx_frag(struct fbnic_napi_vector *nv, u64 rcd, + struct fbnic_pkt_buff *pkt, + struct fbnic_q_triad *qt) +{ + unsigned int pg_idx = FIELD_GET(FBNIC_RCD_AL_BUFF_PAGE_MASK, rcd); + unsigned int pg_off = FIELD_GET(FBNIC_RCD_AL_BUFF_OFF_MASK, rcd); + unsigned int len = FIELD_GET(FBNIC_RCD_AL_BUFF_LEN_MASK, rcd); + struct page *page = fbnic_page_pool_get(&qt->sub1, pg_idx); + struct skb_shared_info *shinfo; + unsigned int truesize; + + truesize = FIELD_GET(FBNIC_RCD_AL_PAGE_FIN, rcd) ? + FBNIC_BD_FRAG_SIZE - pg_off : ALIGN(len, 128); + + pg_off += (FBNIC_RCD_AL_BUFF_FRAG_MASK & rcd) * + FBNIC_BD_FRAG_SIZE; + + /* Sync DMA buffer */ + dma_sync_single_range_for_cpu(nv->dev, page_pool_get_dma_addr(page), + pg_off, truesize, DMA_BIDIRECTIONAL); + + /* Add page to xdp shared info */ + shinfo = xdp_get_shared_info_from_buff(&pkt->buff); + + /* We use gso_segs to store truesize */ + pkt->data_truesize += truesize; + + __skb_fill_page_desc_noacc(shinfo, pkt->nr_frags++, page, pg_off, len); + + /* Store data_len in gso_size */ + pkt->data_len += len; +} + +static void fbnic_put_pkt_buff(struct fbnic_napi_vector *nv, + struct fbnic_pkt_buff *pkt, int budget) +{ + struct skb_shared_info *shinfo; + struct page *page; + int nr_frags; + + if (!pkt->buff.data_hard_start) + return; + + shinfo = xdp_get_shared_info_from_buff(&pkt->buff); + nr_frags = pkt->nr_frags; + + while (nr_frags--) { + page = skb_frag_page(&shinfo->frags[nr_frags]); + page_pool_put_full_page(nv->page_pool, page, !!budget); + } + + page = virt_to_page(pkt->buff.data_hard_start); + page_pool_put_full_page(nv->page_pool, page, !!budget); +} + +static struct sk_buff *fbnic_build_skb(struct fbnic_napi_vector *nv, + struct fbnic_pkt_buff *pkt) +{ + unsigned int nr_frags = pkt->nr_frags; + struct skb_shared_info *shinfo; + unsigned int truesize; + struct sk_buff *skb; + + truesize = xdp_data_hard_end(&pkt->buff) + FBNIC_RX_TROOM - + pkt->buff.data_hard_start; + + /* Build frame around buffer */ + skb = napi_build_skb(pkt->buff.data_hard_start, truesize); + if (unlikely(!skb)) + return NULL; + + /* Push data pointer to start of data, put tail to end of data */ + skb_reserve(skb, pkt->buff.data - pkt->buff.data_hard_start); + __skb_put(skb, pkt->buff.data_end - pkt->buff.data); + + /* Add tracking for metadata at the start of the frame */ + skb_metadata_set(skb, pkt->buff.data - pkt->buff.data_meta); + + /* Add Rx frags */ + if (nr_frags) { + /* Verify that shared info didn't move */ + shinfo = xdp_get_shared_info_from_buff(&pkt->buff); + WARN_ON(skb_shinfo(skb) != shinfo); + + skb->truesize += pkt->data_truesize; + skb->data_len += pkt->data_len; + shinfo->nr_frags = nr_frags; + skb->len += pkt->data_len; + } + + skb_mark_for_recycle(skb); + + /* Set MAC header specific fields */ + skb->protocol = eth_type_trans(skb, nv->napi.dev); + + return skb; +} + +static enum pkt_hash_types fbnic_skb_hash_type(u64 rcd) +{ + return (FBNIC_RCD_META_L4_TYPE_MASK & rcd) ? PKT_HASH_TYPE_L4 : + (FBNIC_RCD_META_L3_TYPE_MASK & rcd) ? PKT_HASH_TYPE_L3 : + PKT_HASH_TYPE_L2; +} + +static void fbnic_populate_skb_fields(struct fbnic_napi_vector *nv, + u64 rcd, struct sk_buff *skb, + struct fbnic_q_triad *qt) +{ + struct net_device *netdev = nv->napi.dev; + struct fbnic_ring *rcq = &qt->cmpl; + + fbnic_rx_csum(rcd, skb, rcq); + + if (netdev->features & NETIF_F_RXHASH) + skb_set_hash(skb, + FIELD_GET(FBNIC_RCD_META_RSS_HASH_MASK, rcd), + fbnic_skb_hash_type(rcd)); + + skb_record_rx_queue(skb, rcq->q_idx); +} + +static bool fbnic_rcd_metadata_err(u64 rcd) +{ + return !!(FBNIC_RCD_META_UNCORRECTABLE_ERR_MASK & rcd); +} + +static int fbnic_clean_rcq(struct fbnic_napi_vector *nv, + struct fbnic_q_triad *qt, int budget) +{ + struct fbnic_ring *rcq = &qt->cmpl; + struct fbnic_pkt_buff *pkt; + s32 head0 = -1, head1 = -1; + __le64 *raw_rcd, done; + u32 head = rcq->head; + u64 packets = 0; + + done = (head & (rcq->size_mask + 1)) ? cpu_to_le64(FBNIC_RCD_DONE) : 0; + raw_rcd = &rcq->desc[head & rcq->size_mask]; + pkt = rcq->pkt; + + /* Walk the completion queue collecting the heads reported by NIC */ + while (likely(packets < budget)) { + struct sk_buff *skb = ERR_PTR(-EINVAL); + u64 rcd; + + if ((*raw_rcd & cpu_to_le64(FBNIC_RCD_DONE)) == done) + break; + + dma_rmb(); + + rcd = le64_to_cpu(*raw_rcd); + + switch (FIELD_GET(FBNIC_RCD_TYPE_MASK, rcd)) { + case FBNIC_RCD_TYPE_HDR_AL: + head0 = FIELD_GET(FBNIC_RCD_AL_BUFF_PAGE_MASK, rcd); + fbnic_pkt_prepare(nv, rcd, pkt, qt); + + break; + case FBNIC_RCD_TYPE_PAY_AL: + head1 = FIELD_GET(FBNIC_RCD_AL_BUFF_PAGE_MASK, rcd); + fbnic_add_rx_frag(nv, rcd, pkt, qt); + + break; + case FBNIC_RCD_TYPE_OPT_META: + /* Only type 0 is currently supported */ + if (FIELD_GET(FBNIC_RCD_OPT_META_TYPE_MASK, rcd)) + break; + + /* We currently ignore the action table index */ + break; + case FBNIC_RCD_TYPE_META: + if (likely(!fbnic_rcd_metadata_err(rcd))) + skb = fbnic_build_skb(nv, pkt); + + /* Populate skb and invalidate XDP */ + if (!IS_ERR_OR_NULL(skb)) { + fbnic_populate_skb_fields(nv, rcd, skb, qt); + + packets++; + + napi_gro_receive(&nv->napi, skb); + } else { + fbnic_put_pkt_buff(nv, pkt, 1); + } + + pkt->buff.data_hard_start = NULL; + + break; + } + + raw_rcd++; + head++; + if (!(head & rcq->size_mask)) { + done ^= cpu_to_le64(FBNIC_RCD_DONE); + raw_rcd = &rcq->desc[0]; + } + } + + /* Unmap and free processed buffers */ + if (head0 >= 0) + fbnic_clean_bdq(nv, budget, &qt->sub0, head0); + fbnic_fill_bdq(nv, &qt->sub0); + + if (head1 >= 0) + fbnic_clean_bdq(nv, budget, &qt->sub1, head1); + fbnic_fill_bdq(nv, &qt->sub1); + + /* Record the current head/tail of the queue */ + if (rcq->head != head) { + rcq->head = head; + writel(head & rcq->size_mask, rcq->doorbell); + } + + return packets; +} + +static void fbnic_nv_irq_disable(struct fbnic_napi_vector *nv) +{ + struct fbnic_dev *fbd = nv->fbd; + u32 v_idx = nv->v_idx; + + fbnic_wr32(fbd, FBNIC_INTR_MASK_SET(v_idx / 32), 1 << (v_idx % 32)); +} + +static void fbnic_nv_irq_rearm(struct fbnic_napi_vector *nv) +{ + struct fbnic_dev *fbd = nv->fbd; + u32 v_idx = nv->v_idx; + + fbnic_wr32(fbd, FBNIC_INTR_CQ_REARM(v_idx), + FBNIC_INTR_CQ_REARM_INTR_UNMASK); +} + +static int fbnic_poll(struct napi_struct *napi, int budget) +{ + struct fbnic_napi_vector *nv = container_of(napi, + struct fbnic_napi_vector, + napi); + int i, j, work_done = 0; + + for (i = 0; i < nv->txt_count; i++) + fbnic_clean_tcq(nv, &nv->qt[i], budget); + + for (j = 0; j < nv->rxt_count; j++, i++) + work_done += fbnic_clean_rcq(nv, &nv->qt[i], budget); + + if (work_done >= budget) + return budget; + + if (likely(napi_complete_done(napi, work_done))) + fbnic_nv_irq_rearm(nv); + + return 0; +} + +static irqreturn_t fbnic_msix_clean_rings(int __always_unused irq, void *data) +{ + struct fbnic_napi_vector *nv = data; + + napi_schedule_irqoff(&nv->napi); + + return IRQ_HANDLED; +} + +static void fbnic_remove_tx_ring(struct fbnic_net *fbn, + struct fbnic_ring *txr) +{ + if (!(txr->flags & FBNIC_RING_F_STATS)) + return; + + /* Remove pointer to the Tx ring */ + WARN_ON(fbn->tx[txr->q_idx] && fbn->tx[txr->q_idx] != txr); + fbn->tx[txr->q_idx] = NULL; +} + +static void fbnic_remove_rx_ring(struct fbnic_net *fbn, + struct fbnic_ring *rxr) +{ + if (!(rxr->flags & FBNIC_RING_F_STATS)) + return; + + /* Remove pointer to the Rx ring */ + WARN_ON(fbn->rx[rxr->q_idx] && fbn->rx[rxr->q_idx] != rxr); + fbn->rx[rxr->q_idx] = NULL; +} + +static void fbnic_free_napi_vector(struct fbnic_net *fbn, + struct fbnic_napi_vector *nv) +{ + struct fbnic_dev *fbd = nv->fbd; + u32 v_idx = nv->v_idx; + int i, j; + + for (i = 0; i < nv->txt_count; i++) { + fbnic_remove_tx_ring(fbn, &nv->qt[i].sub0); + fbnic_remove_tx_ring(fbn, &nv->qt[i].cmpl); + } + + for (j = 0; j < nv->rxt_count; j++, i++) { + fbnic_remove_rx_ring(fbn, &nv->qt[i].sub0); + fbnic_remove_rx_ring(fbn, &nv->qt[i].sub1); + fbnic_remove_rx_ring(fbn, &nv->qt[i].cmpl); + } + + fbnic_free_irq(fbd, v_idx, nv); + page_pool_destroy(nv->page_pool); + netif_napi_del(&nv->napi); + list_del(&nv->napis); + kfree(nv); +} + +void fbnic_free_napi_vectors(struct fbnic_net *fbn) +{ + struct fbnic_napi_vector *nv, *temp; + + list_for_each_entry_safe(nv, temp, &fbn->napis, napis) + fbnic_free_napi_vector(fbn, nv); +} + +static void fbnic_name_napi_vector(struct fbnic_napi_vector *nv) +{ + unsigned char *dev_name = nv->napi.dev->name; + + if (!nv->rxt_count) + snprintf(nv->name, sizeof(nv->name), "%s-Tx-%u", dev_name, + nv->v_idx - FBNIC_NON_NAPI_VECTORS); + else + snprintf(nv->name, sizeof(nv->name), "%s-TxRx-%u", dev_name, + nv->v_idx - FBNIC_NON_NAPI_VECTORS); +} + +#define FBNIC_PAGE_POOL_FLAGS \ + (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV) + +static int fbnic_alloc_nv_page_pool(struct fbnic_net *fbn, + struct fbnic_napi_vector *nv) +{ + struct page_pool_params pp_params = { + .order = 0, + .flags = FBNIC_PAGE_POOL_FLAGS, + .pool_size = (fbn->hpq_size + fbn->ppq_size) * nv->rxt_count, + .nid = NUMA_NO_NODE, + .dev = nv->dev, + .dma_dir = DMA_BIDIRECTIONAL, + .offset = 0, + .max_len = PAGE_SIZE + }; + struct page_pool *pp; + + /* Page pool cannot exceed a size of 32768. This doesn't limit the + * pages on the ring but the number we can have cached waiting on + * the next use. + * + * TBD: Can this be reduced further? Would a multiple of + * NAPI_POLL_WEIGHT possibly make more sense? The question is how + * may pages do we need to hold in reserve to get the best return + * without hogging too much system memory. + */ + if (pp_params.pool_size > 32768) + pp_params.pool_size = 32768; + + pp = page_pool_create(&pp_params); + if (IS_ERR(pp)) + return PTR_ERR(pp); + + nv->page_pool = pp; + + return 0; +} + +static void fbnic_ring_init(struct fbnic_ring *ring, u32 __iomem *doorbell, + int q_idx, u8 flags) +{ + ring->doorbell = doorbell; + ring->q_idx = q_idx; + ring->flags = flags; +} + +static int fbnic_alloc_napi_vector(struct fbnic_dev *fbd, struct fbnic_net *fbn, + unsigned int v_count, unsigned int v_idx, + unsigned int txq_count, unsigned int txq_idx, + unsigned int rxq_count, unsigned int rxq_idx) +{ + int txt_count = txq_count, rxt_count = rxq_count; + u32 __iomem *uc_addr = fbd->uc_addr0; + struct fbnic_napi_vector *nv; + struct fbnic_q_triad *qt; + int qt_count, err; + u32 __iomem *db; + + qt_count = txt_count + rxq_count; + if (!qt_count) + return -EINVAL; + + /* If MMIO has already failed there are no rings to initialize */ + if (!uc_addr) + return -EIO; + + /* Allocate NAPI vector and queue triads */ + nv = kzalloc(struct_size(nv, qt, qt_count), GFP_KERNEL); + if (!nv) + return -ENOMEM; + + /* Record queue triad counts */ + nv->txt_count = txt_count; + nv->rxt_count = rxt_count; + + /* Provide pointer back to fbnic and MSI-X vectors */ + nv->fbd = fbd; + nv->v_idx = v_idx; + + /* Record IRQ to NAPI struct */ + netif_napi_set_irq(&nv->napi, + pci_irq_vector(to_pci_dev(fbd->dev), nv->v_idx)); + + /* Tie napi to netdev */ + list_add(&nv->napis, &fbn->napis); + netif_napi_add(fbn->netdev, &nv->napi, fbnic_poll); + + /* Tie nv back to PCIe dev */ + nv->dev = fbd->dev; + + /* Allocate page pool */ + if (rxq_count) { + err = fbnic_alloc_nv_page_pool(fbn, nv); + if (err) + goto napi_del; + } + + /* Initialize vector name */ + fbnic_name_napi_vector(nv); + + /* Request the IRQ for napi vector */ + err = fbnic_request_irq(fbd, v_idx, &fbnic_msix_clean_rings, + IRQF_SHARED, nv->name, nv); + if (err) + goto pp_destroy; + + /* Initialize queue triads */ + qt = nv->qt; + + while (txt_count) { + /* Configure Tx queue */ + db = &uc_addr[FBNIC_QUEUE(txq_idx) + FBNIC_QUEUE_TWQ0_TAIL]; + + /* Assign Tx queue to netdev if applicable */ + if (txq_count > 0) { + u8 flags = FBNIC_RING_F_CTX | FBNIC_RING_F_STATS; + + fbnic_ring_init(&qt->sub0, db, txq_idx, flags); + fbn->tx[txq_idx] = &qt->sub0; + txq_count--; + } else { + fbnic_ring_init(&qt->sub0, db, 0, + FBNIC_RING_F_DISABLED); + } + + /* Configure Tx completion queue */ + db = &uc_addr[FBNIC_QUEUE(txq_idx) + FBNIC_QUEUE_TCQ_HEAD]; + fbnic_ring_init(&qt->cmpl, db, 0, 0); + + /* Update Tx queue index */ + txt_count--; + txq_idx += v_count; + + /* Move to next queue triad */ + qt++; + } + + while (rxt_count) { + /* Configure header queue */ + db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_BDQ_HPQ_TAIL]; + fbnic_ring_init(&qt->sub0, db, 0, FBNIC_RING_F_CTX); + + /* Configure payload queue */ + db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_BDQ_PPQ_TAIL]; + fbnic_ring_init(&qt->sub1, db, 0, FBNIC_RING_F_CTX); + + /* Configure Rx completion queue */ + db = &uc_addr[FBNIC_QUEUE(rxq_idx) + FBNIC_QUEUE_RCQ_HEAD]; + fbnic_ring_init(&qt->cmpl, db, rxq_idx, FBNIC_RING_F_STATS); + fbn->rx[rxq_idx] = &qt->cmpl; + + /* Update Rx queue index */ + rxt_count--; + rxq_idx += v_count; + + /* Move to next queue triad */ + qt++; + } + + return 0; + +pp_destroy: + page_pool_destroy(nv->page_pool); +napi_del: + netif_napi_del(&nv->napi); + list_del(&nv->napis); + kfree(nv); + return err; +} + +int fbnic_alloc_napi_vectors(struct fbnic_net *fbn) +{ + unsigned int txq_idx = 0, rxq_idx = 0, v_idx = FBNIC_NON_NAPI_VECTORS; + unsigned int num_tx = fbn->num_tx_queues; + unsigned int num_rx = fbn->num_rx_queues; + unsigned int num_napi = fbn->num_napi; + struct fbnic_dev *fbd = fbn->fbd; + int err; + + /* Allocate 1 Tx queue per napi vector */ + if (num_napi < FBNIC_MAX_TXQS && num_napi == num_tx + num_rx) { + while (num_tx) { + err = fbnic_alloc_napi_vector(fbd, fbn, + num_napi, v_idx, + 1, txq_idx, 0, 0); + if (err) + goto free_vectors; + + /* Update counts and index */ + num_tx--; + txq_idx++; + + v_idx++; + } + } + + /* Allocate Tx/Rx queue pairs per vector, or allocate remaining Rx */ + while (num_rx | num_tx) { + int tqpv = DIV_ROUND_UP(num_tx, num_napi - txq_idx); + int rqpv = DIV_ROUND_UP(num_rx, num_napi - rxq_idx); + + err = fbnic_alloc_napi_vector(fbd, fbn, num_napi, v_idx, + tqpv, txq_idx, rqpv, rxq_idx); + if (err) + goto free_vectors; + + /* Update counts and index */ + num_tx -= tqpv; + txq_idx++; + + num_rx -= rqpv; + rxq_idx++; + + v_idx++; + } + + return 0; + +free_vectors: + fbnic_free_napi_vectors(fbn); + + return -ENOMEM; +} + +static void fbnic_free_ring_resources(struct device *dev, + struct fbnic_ring *ring) +{ + kvfree(ring->buffer); + ring->buffer = NULL; + + /* If size is not set there are no descriptors present */ + if (!ring->size) + return; + + dma_free_coherent(dev, ring->size, ring->desc, ring->dma); + ring->size_mask = 0; + ring->size = 0; +} + +static int fbnic_alloc_tx_ring_desc(struct fbnic_net *fbn, + struct fbnic_ring *txr) +{ + struct device *dev = fbn->netdev->dev.parent; + size_t size; + + /* Round size up to nearest 4K */ + size = ALIGN(array_size(sizeof(*txr->desc), fbn->txq_size), 4096); + + txr->desc = dma_alloc_coherent(dev, size, &txr->dma, + GFP_KERNEL | __GFP_NOWARN); + if (!txr->desc) + return -ENOMEM; + + /* txq_size should be a power of 2, so mask is just that -1 */ + txr->size_mask = fbn->txq_size - 1; + txr->size = size; + + return 0; +} + +static int fbnic_alloc_tx_ring_buffer(struct fbnic_ring *txr) +{ + size_t size = array_size(sizeof(*txr->tx_buf), txr->size_mask + 1); + + txr->tx_buf = kvzalloc(size, GFP_KERNEL | __GFP_NOWARN); + + return txr->tx_buf ? 0 : -ENOMEM; +} + +static int fbnic_alloc_tx_ring_resources(struct fbnic_net *fbn, + struct fbnic_ring *txr) +{ + struct device *dev = fbn->netdev->dev.parent; + int err; + + if (txr->flags & FBNIC_RING_F_DISABLED) + return 0; + + err = fbnic_alloc_tx_ring_desc(fbn, txr); + if (err) + return err; + + if (!(txr->flags & FBNIC_RING_F_CTX)) + return 0; + + err = fbnic_alloc_tx_ring_buffer(txr); + if (err) + goto free_desc; + + return 0; + +free_desc: + fbnic_free_ring_resources(dev, txr); + return err; +} + +static int fbnic_alloc_rx_ring_desc(struct fbnic_net *fbn, + struct fbnic_ring *rxr) +{ + struct device *dev = fbn->netdev->dev.parent; + size_t desc_size = sizeof(*rxr->desc); + u32 rxq_size; + size_t size; + + switch (rxr->doorbell - fbnic_ring_csr_base(rxr)) { + case FBNIC_QUEUE_BDQ_HPQ_TAIL: + rxq_size = fbn->hpq_size / FBNIC_BD_FRAG_COUNT; + desc_size *= FBNIC_BD_FRAG_COUNT; + break; + case FBNIC_QUEUE_BDQ_PPQ_TAIL: + rxq_size = fbn->ppq_size / FBNIC_BD_FRAG_COUNT; + desc_size *= FBNIC_BD_FRAG_COUNT; + break; + case FBNIC_QUEUE_RCQ_HEAD: + rxq_size = fbn->rcq_size; + break; + default: + return -EINVAL; + } + + /* Round size up to nearest 4K */ + size = ALIGN(array_size(desc_size, rxq_size), 4096); + + rxr->desc = dma_alloc_coherent(dev, size, &rxr->dma, + GFP_KERNEL | __GFP_NOWARN); + if (!rxr->desc) + return -ENOMEM; + + /* rxq_size should be a power of 2, so mask is just that -1 */ + rxr->size_mask = rxq_size - 1; + rxr->size = size; + + return 0; +} + +static int fbnic_alloc_rx_ring_buffer(struct fbnic_ring *rxr) +{ + size_t size = array_size(sizeof(*rxr->rx_buf), rxr->size_mask + 1); + + if (rxr->flags & FBNIC_RING_F_CTX) + size = sizeof(*rxr->rx_buf) * (rxr->size_mask + 1); + else + size = sizeof(*rxr->pkt); + + rxr->rx_buf = kvzalloc(size, GFP_KERNEL | __GFP_NOWARN); + + return rxr->rx_buf ? 0 : -ENOMEM; +} + +static int fbnic_alloc_rx_ring_resources(struct fbnic_net *fbn, + struct fbnic_ring *rxr) +{ + struct device *dev = fbn->netdev->dev.parent; + int err; + + err = fbnic_alloc_rx_ring_desc(fbn, rxr); + if (err) + return err; + + err = fbnic_alloc_rx_ring_buffer(rxr); + if (err) + goto free_desc; + + return 0; + +free_desc: + fbnic_free_ring_resources(dev, rxr); + return err; +} + +static void fbnic_free_qt_resources(struct fbnic_net *fbn, + struct fbnic_q_triad *qt) +{ + struct device *dev = fbn->netdev->dev.parent; + + fbnic_free_ring_resources(dev, &qt->cmpl); + fbnic_free_ring_resources(dev, &qt->sub1); + fbnic_free_ring_resources(dev, &qt->sub0); +} + +static int fbnic_alloc_tx_qt_resources(struct fbnic_net *fbn, + struct fbnic_q_triad *qt) +{ + struct device *dev = fbn->netdev->dev.parent; + int err; + + err = fbnic_alloc_tx_ring_resources(fbn, &qt->sub0); + if (err) + return err; + + err = fbnic_alloc_tx_ring_resources(fbn, &qt->cmpl); + if (err) + goto free_sub1; + + return 0; + +free_sub1: + fbnic_free_ring_resources(dev, &qt->sub0); + return err; +} + +static int fbnic_alloc_rx_qt_resources(struct fbnic_net *fbn, + struct fbnic_q_triad *qt) +{ + struct device *dev = fbn->netdev->dev.parent; + int err; + + err = fbnic_alloc_rx_ring_resources(fbn, &qt->sub0); + if (err) + return err; + + err = fbnic_alloc_rx_ring_resources(fbn, &qt->sub1); + if (err) + goto free_sub0; + + err = fbnic_alloc_rx_ring_resources(fbn, &qt->cmpl); + if (err) + goto free_sub1; + + return 0; + +free_sub1: + fbnic_free_ring_resources(dev, &qt->sub1); +free_sub0: + fbnic_free_ring_resources(dev, &qt->sub0); + return err; +} + +static void fbnic_free_nv_resources(struct fbnic_net *fbn, + struct fbnic_napi_vector *nv) +{ + int i, j; + + /* Free Tx Resources */ + for (i = 0; i < nv->txt_count; i++) + fbnic_free_qt_resources(fbn, &nv->qt[i]); + + for (j = 0; j < nv->rxt_count; j++, i++) + fbnic_free_qt_resources(fbn, &nv->qt[i]); +} + +static int fbnic_alloc_nv_resources(struct fbnic_net *fbn, + struct fbnic_napi_vector *nv) +{ + int i, j, err; + + /* Allocate Tx Resources */ + for (i = 0; i < nv->txt_count; i++) { + err = fbnic_alloc_tx_qt_resources(fbn, &nv->qt[i]); + if (err) + goto free_resources; + } + + /* Allocate Rx Resources */ + for (j = 0; j < nv->rxt_count; j++, i++) { + err = fbnic_alloc_rx_qt_resources(fbn, &nv->qt[i]); + if (err) + goto free_resources; + } + + return 0; + +free_resources: + while (i--) + fbnic_free_qt_resources(fbn, &nv->qt[i]); + return err; +} + +void fbnic_free_resources(struct fbnic_net *fbn) +{ + struct fbnic_napi_vector *nv; + + list_for_each_entry(nv, &fbn->napis, napis) + fbnic_free_nv_resources(fbn, nv); +} + +int fbnic_alloc_resources(struct fbnic_net *fbn) +{ + struct fbnic_napi_vector *nv; + int err = -ENODEV; + + list_for_each_entry(nv, &fbn->napis, napis) { + err = fbnic_alloc_nv_resources(fbn, nv); + if (err) + goto free_resources; + } + + return 0; + +free_resources: + list_for_each_entry_continue_reverse(nv, &fbn->napis, napis) + fbnic_free_nv_resources(fbn, nv); + + return err; +} + +static void fbnic_disable_twq0(struct fbnic_ring *txr) +{ + u32 twq_ctl = fbnic_ring_rd32(txr, FBNIC_QUEUE_TWQ0_CTL); + + twq_ctl &= ~FBNIC_QUEUE_TWQ_CTL_ENABLE; + + fbnic_ring_wr32(txr, FBNIC_QUEUE_TWQ0_CTL, twq_ctl); +} + +static void fbnic_disable_tcq(struct fbnic_ring *txr) +{ + fbnic_ring_wr32(txr, FBNIC_QUEUE_TCQ_CTL, 0); + fbnic_ring_wr32(txr, FBNIC_QUEUE_TIM_MASK, FBNIC_QUEUE_TIM_MASK_MASK); +} + +static void fbnic_disable_bdq(struct fbnic_ring *hpq, struct fbnic_ring *ppq) +{ + u32 bdq_ctl = fbnic_ring_rd32(hpq, FBNIC_QUEUE_BDQ_CTL); + + bdq_ctl &= ~FBNIC_QUEUE_BDQ_CTL_ENABLE; + + fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_CTL, bdq_ctl); +} + +static void fbnic_disable_rcq(struct fbnic_ring *rxr) +{ + fbnic_ring_wr32(rxr, FBNIC_QUEUE_RCQ_CTL, 0); + fbnic_ring_wr32(rxr, FBNIC_QUEUE_RIM_MASK, FBNIC_QUEUE_RIM_MASK_MASK); +} + +void fbnic_napi_disable(struct fbnic_net *fbn) +{ + struct fbnic_napi_vector *nv; + + list_for_each_entry(nv, &fbn->napis, napis) { + napi_disable(&nv->napi); + + fbnic_nv_irq_disable(nv); + } +} + +void fbnic_disable(struct fbnic_net *fbn) +{ + struct fbnic_dev *fbd = fbn->fbd; + struct fbnic_napi_vector *nv; + int i, j; + + list_for_each_entry(nv, &fbn->napis, napis) { + /* Disable Tx queue triads */ + for (i = 0; i < nv->txt_count; i++) { + struct fbnic_q_triad *qt = &nv->qt[i]; + + fbnic_disable_twq0(&qt->sub0); + fbnic_disable_tcq(&qt->cmpl); + } + + /* Disable Rx queue triads */ + for (j = 0; j < nv->rxt_count; j++, i++) { + struct fbnic_q_triad *qt = &nv->qt[i]; + + fbnic_disable_bdq(&qt->sub0, &qt->sub1); + fbnic_disable_rcq(&qt->cmpl); + } + } + + fbnic_wrfl(fbd); +} + +static void fbnic_tx_flush(struct fbnic_dev *fbd) +{ + netdev_warn(fbd->netdev, "triggering Tx flush\n"); + + fbnic_rmw32(fbd, FBNIC_TMI_DROP_CTRL, FBNIC_TMI_DROP_CTRL_EN, + FBNIC_TMI_DROP_CTRL_EN); +} + +static void fbnic_tx_flush_off(struct fbnic_dev *fbd) +{ + fbnic_rmw32(fbd, FBNIC_TMI_DROP_CTRL, FBNIC_TMI_DROP_CTRL_EN, 0); +} + +struct fbnic_idle_regs { + u32 reg_base; + u8 reg_cnt; +}; + +static bool fbnic_all_idle(struct fbnic_dev *fbd, + const struct fbnic_idle_regs *regs, + unsigned int nregs) +{ + unsigned int i, j; + + for (i = 0; i < nregs; i++) { + for (j = 0; j < regs[i].reg_cnt; j++) { + if (fbnic_rd32(fbd, regs[i].reg_base + j) != ~0U) + return false; + } + } + return true; +} + +static void fbnic_idle_dump(struct fbnic_dev *fbd, + const struct fbnic_idle_regs *regs, + unsigned int nregs, const char *dir, int err) +{ + unsigned int i, j; + + netdev_err(fbd->netdev, "error waiting for %s idle %d\n", dir, err); + for (i = 0; i < nregs; i++) + for (j = 0; j < regs[i].reg_cnt; j++) + netdev_err(fbd->netdev, "0x%04x: %08x\n", + regs[i].reg_base + j, + fbnic_rd32(fbd, regs[i].reg_base + j)); +} + +int fbnic_wait_all_queues_idle(struct fbnic_dev *fbd, bool may_fail) +{ + static const struct fbnic_idle_regs tx[] = { + { FBNIC_QM_TWQ_IDLE(0), FBNIC_QM_TWQ_IDLE_CNT, }, + { FBNIC_QM_TQS_IDLE(0), FBNIC_QM_TQS_IDLE_CNT, }, + { FBNIC_QM_TDE_IDLE(0), FBNIC_QM_TDE_IDLE_CNT, }, + { FBNIC_QM_TCQ_IDLE(0), FBNIC_QM_TCQ_IDLE_CNT, }, + }, rx[] = { + { FBNIC_QM_HPQ_IDLE(0), FBNIC_QM_HPQ_IDLE_CNT, }, + { FBNIC_QM_PPQ_IDLE(0), FBNIC_QM_PPQ_IDLE_CNT, }, + { FBNIC_QM_RCQ_IDLE(0), FBNIC_QM_RCQ_IDLE_CNT, }, + }; + bool idle; + int err; + + err = read_poll_timeout_atomic(fbnic_all_idle, idle, idle, 2, 500000, + false, fbd, tx, ARRAY_SIZE(tx)); + if (err == -ETIMEDOUT) { + fbnic_tx_flush(fbd); + err = read_poll_timeout_atomic(fbnic_all_idle, idle, idle, + 2, 500000, false, + fbd, tx, ARRAY_SIZE(tx)); + fbnic_tx_flush_off(fbd); + } + if (err) { + fbnic_idle_dump(fbd, tx, ARRAY_SIZE(tx), "Tx", err); + if (may_fail) + return err; + } + + err = read_poll_timeout_atomic(fbnic_all_idle, idle, idle, 2, 500000, + false, fbd, rx, ARRAY_SIZE(rx)); + if (err) + fbnic_idle_dump(fbd, rx, ARRAY_SIZE(rx), "Rx", err); + return err; +} + +void fbnic_flush(struct fbnic_net *fbn) +{ + struct fbnic_napi_vector *nv; + + list_for_each_entry(nv, &fbn->napis, napis) { + int i, j; + + /* Flush any processed Tx Queue Triads and drop the rest */ + for (i = 0; i < nv->txt_count; i++) { + struct fbnic_q_triad *qt = &nv->qt[i]; + struct netdev_queue *tx_queue; + + /* Clean the work queues of unprocessed work */ + fbnic_clean_twq0(nv, 0, &qt->sub0, true, qt->sub0.tail); + + /* Reset completion queue descriptor ring */ + memset(qt->cmpl.desc, 0, qt->cmpl.size); + + /* Nothing else to do if Tx queue is disabled */ + if (qt->sub0.flags & FBNIC_RING_F_DISABLED) + continue; + + /* Reset BQL associated with Tx queue */ + tx_queue = netdev_get_tx_queue(nv->napi.dev, + qt->sub0.q_idx); + netdev_tx_reset_queue(tx_queue); + + /* Disassociate Tx queue from NAPI */ + netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx, + NETDEV_QUEUE_TYPE_TX, NULL); + } + + /* Flush any processed Rx Queue Triads and drop the rest */ + for (j = 0; j < nv->rxt_count; j++, i++) { + struct fbnic_q_triad *qt = &nv->qt[i]; + + /* Clean the work queues of unprocessed work */ + fbnic_clean_bdq(nv, 0, &qt->sub0, qt->sub0.tail); + fbnic_clean_bdq(nv, 0, &qt->sub1, qt->sub1.tail); + + /* Reset completion queue descriptor ring */ + memset(qt->cmpl.desc, 0, qt->cmpl.size); + + fbnic_put_pkt_buff(nv, qt->cmpl.pkt, 0); + qt->cmpl.pkt->buff.data_hard_start = NULL; + + /* Disassociate Rx queue from NAPI */ + netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx, + NETDEV_QUEUE_TYPE_RX, NULL); + } + } +} + +void fbnic_fill(struct fbnic_net *fbn) +{ + struct fbnic_napi_vector *nv; + + list_for_each_entry(nv, &fbn->napis, napis) { + int i, j; + + /* Configure NAPI mapping for Tx */ + for (i = 0; i < nv->txt_count; i++) { + struct fbnic_q_triad *qt = &nv->qt[i]; + + /* Nothing to do if Tx queue is disabled */ + if (qt->sub0.flags & FBNIC_RING_F_DISABLED) + continue; + + /* Associate Tx queue with NAPI */ + netif_queue_set_napi(nv->napi.dev, qt->sub0.q_idx, + NETDEV_QUEUE_TYPE_TX, &nv->napi); + } + + /* Configure NAPI mapping and populate pages + * in the BDQ rings to use for Rx + */ + for (j = 0; j < nv->rxt_count; j++, i++) { + struct fbnic_q_triad *qt = &nv->qt[i]; + + /* Associate Rx queue with NAPI */ + netif_queue_set_napi(nv->napi.dev, qt->cmpl.q_idx, + NETDEV_QUEUE_TYPE_RX, &nv->napi); + + /* Populate the header and payload BDQs */ + fbnic_fill_bdq(nv, &qt->sub0); + fbnic_fill_bdq(nv, &qt->sub1); + } + } +} + +static void fbnic_enable_twq0(struct fbnic_ring *twq) +{ + u32 log_size = fls(twq->size_mask); + + if (!twq->size_mask) + return; + + /* Reset head/tail */ + fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_CTL, FBNIC_QUEUE_TWQ_CTL_RESET); + twq->tail = 0; + twq->head = 0; + + /* Store descriptor ring address and size */ + fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_BAL, lower_32_bits(twq->dma)); + fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_BAH, upper_32_bits(twq->dma)); + + /* Write lower 4 bits of log size as 64K ring size is 0 */ + fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_SIZE, log_size & 0xf); + + fbnic_ring_wr32(twq, FBNIC_QUEUE_TWQ0_CTL, FBNIC_QUEUE_TWQ_CTL_ENABLE); +} + +static void fbnic_enable_tcq(struct fbnic_napi_vector *nv, + struct fbnic_ring *tcq) +{ + u32 log_size = fls(tcq->size_mask); + + if (!tcq->size_mask) + return; + + /* Reset head/tail */ + fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_CTL, FBNIC_QUEUE_TCQ_CTL_RESET); + tcq->tail = 0; + tcq->head = 0; + + /* Store descriptor ring address and size */ + fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_BAL, lower_32_bits(tcq->dma)); + fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_BAH, upper_32_bits(tcq->dma)); + + /* Write lower 4 bits of log size as 64K ring size is 0 */ + fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_SIZE, log_size & 0xf); + + /* Store interrupt information for the completion queue */ + fbnic_ring_wr32(tcq, FBNIC_QUEUE_TIM_CTL, nv->v_idx); + fbnic_ring_wr32(tcq, FBNIC_QUEUE_TIM_THRESHOLD, tcq->size_mask / 2); + fbnic_ring_wr32(tcq, FBNIC_QUEUE_TIM_MASK, 0); + + /* Enable queue */ + fbnic_ring_wr32(tcq, FBNIC_QUEUE_TCQ_CTL, FBNIC_QUEUE_TCQ_CTL_ENABLE); +} + +static void fbnic_enable_bdq(struct fbnic_ring *hpq, struct fbnic_ring *ppq) +{ + u32 bdq_ctl = FBNIC_QUEUE_BDQ_CTL_ENABLE; + u32 log_size; + + /* Reset head/tail */ + fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_CTL, FBNIC_QUEUE_BDQ_CTL_RESET); + ppq->tail = 0; + ppq->head = 0; + hpq->tail = 0; + hpq->head = 0; + + log_size = fls(hpq->size_mask); + + /* Store descriptor ring address and size */ + fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_HPQ_BAL, lower_32_bits(hpq->dma)); + fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_HPQ_BAH, upper_32_bits(hpq->dma)); + + /* Write lower 4 bits of log size as 64K ring size is 0 */ + fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_HPQ_SIZE, log_size & 0xf); + + if (!ppq->size_mask) + goto write_ctl; + + log_size = fls(ppq->size_mask); + + /* Add enabling of PPQ to BDQ control */ + bdq_ctl |= FBNIC_QUEUE_BDQ_CTL_PPQ_ENABLE; + + /* Store descriptor ring address and size */ + fbnic_ring_wr32(ppq, FBNIC_QUEUE_BDQ_PPQ_BAL, lower_32_bits(ppq->dma)); + fbnic_ring_wr32(ppq, FBNIC_QUEUE_BDQ_PPQ_BAH, upper_32_bits(ppq->dma)); + fbnic_ring_wr32(ppq, FBNIC_QUEUE_BDQ_PPQ_SIZE, log_size & 0xf); + +write_ctl: + fbnic_ring_wr32(hpq, FBNIC_QUEUE_BDQ_CTL, bdq_ctl); +} + +static void fbnic_config_drop_mode_rcq(struct fbnic_napi_vector *nv, + struct fbnic_ring *rcq) +{ + u32 drop_mode, rcq_ctl; + + drop_mode = FBNIC_QUEUE_RDE_CTL0_DROP_IMMEDIATE; + + /* Specify packet layout */ + rcq_ctl = FIELD_PREP(FBNIC_QUEUE_RDE_CTL0_DROP_MODE_MASK, drop_mode) | + FIELD_PREP(FBNIC_QUEUE_RDE_CTL0_MIN_HROOM_MASK, FBNIC_RX_HROOM) | + FIELD_PREP(FBNIC_QUEUE_RDE_CTL0_MIN_TROOM_MASK, FBNIC_RX_TROOM); + + fbnic_ring_wr32(rcq, FBNIC_QUEUE_RDE_CTL0, rcq_ctl); +} + +static void fbnic_enable_rcq(struct fbnic_napi_vector *nv, + struct fbnic_ring *rcq) +{ + u32 log_size = fls(rcq->size_mask); + u32 rcq_ctl; + + fbnic_config_drop_mode_rcq(nv, rcq); + + rcq_ctl = FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_PADLEN_MASK, FBNIC_RX_PAD) | + FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_MAX_HDR_MASK, + FBNIC_RX_MAX_HDR) | + FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_PAYLD_OFF_MASK, + FBNIC_RX_PAYLD_OFFSET) | + FIELD_PREP(FBNIC_QUEUE_RDE_CTL1_PAYLD_PG_CL_MASK, + FBNIC_RX_PAYLD_PG_CL); + fbnic_ring_wr32(rcq, FBNIC_QUEUE_RDE_CTL1, rcq_ctl); + + /* Reset head/tail */ + fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_CTL, FBNIC_QUEUE_RCQ_CTL_RESET); + rcq->head = 0; + rcq->tail = 0; + + /* Store descriptor ring address and size */ + fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_BAL, lower_32_bits(rcq->dma)); + fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_BAH, upper_32_bits(rcq->dma)); + + /* Write lower 4 bits of log size as 64K ring size is 0 */ + fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_SIZE, log_size & 0xf); + + /* Store interrupt information for the completion queue */ + fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_CTL, nv->v_idx); + fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_THRESHOLD, rcq->size_mask / 2); + fbnic_ring_wr32(rcq, FBNIC_QUEUE_RIM_MASK, 0); + + /* Enable queue */ + fbnic_ring_wr32(rcq, FBNIC_QUEUE_RCQ_CTL, FBNIC_QUEUE_RCQ_CTL_ENABLE); +} + +void fbnic_enable(struct fbnic_net *fbn) +{ + struct fbnic_dev *fbd = fbn->fbd; + struct fbnic_napi_vector *nv; + int i, j; + + list_for_each_entry(nv, &fbn->napis, napis) { + /* Setup Tx Queue Triads */ + for (i = 0; i < nv->txt_count; i++) { + struct fbnic_q_triad *qt = &nv->qt[i]; + + fbnic_enable_twq0(&qt->sub0); + fbnic_enable_tcq(nv, &qt->cmpl); + } + + /* Setup Rx Queue Triads */ + for (j = 0; j < nv->rxt_count; j++, i++) { + struct fbnic_q_triad *qt = &nv->qt[i]; + + fbnic_enable_bdq(&qt->sub0, &qt->sub1); + fbnic_config_drop_mode_rcq(nv, &qt->cmpl); + fbnic_enable_rcq(nv, &qt->cmpl); + } + } + + fbnic_wrfl(fbd); +} + +static void fbnic_nv_irq_enable(struct fbnic_napi_vector *nv) +{ + struct fbnic_dev *fbd = nv->fbd; + u32 val; + + val = FBNIC_INTR_CQ_REARM_INTR_UNMASK; + + fbnic_wr32(fbd, FBNIC_INTR_CQ_REARM(nv->v_idx), val); +} + +void fbnic_napi_enable(struct fbnic_net *fbn) +{ + u32 irqs[FBNIC_MAX_MSIX_VECS / 32] = {}; + struct fbnic_dev *fbd = fbn->fbd; + struct fbnic_napi_vector *nv; + int i; + + list_for_each_entry(nv, &fbn->napis, napis) { + napi_enable(&nv->napi); + + fbnic_nv_irq_enable(nv); + + /* Record bit used for NAPI IRQs so we can + * set the mask appropriately + */ + irqs[nv->v_idx / 32] |= BIT(nv->v_idx % 32); + } + + /* Force the first interrupt on the device to guarantee + * that any packets that may have been enqueued during the + * bringup are processed. + */ + for (i = 0; i < ARRAY_SIZE(irqs); i++) { + if (!irqs[i]) + continue; + fbnic_wr32(fbd, FBNIC_INTR_SET(i), irqs[i]); + } + + fbnic_wrfl(fbd); +} + +void fbnic_napi_depletion_check(struct net_device *netdev) +{ + struct fbnic_net *fbn = netdev_priv(netdev); + u32 irqs[FBNIC_MAX_MSIX_VECS / 32] = {}; + struct fbnic_dev *fbd = fbn->fbd; + struct fbnic_napi_vector *nv; + int i, j; + + list_for_each_entry(nv, &fbn->napis, napis) { + /* Find RQs which are completely out of pages */ + for (i = nv->txt_count, j = 0; j < nv->rxt_count; j++, i++) { + /* Assume 4 pages is always enough to fit a packet + * and therefore generate a completion and an IRQ. + */ + if (fbnic_desc_used(&nv->qt[i].sub0) < 4 || + fbnic_desc_used(&nv->qt[i].sub1) < 4) + irqs[nv->v_idx / 32] |= BIT(nv->v_idx % 32); + } + } + + for (i = 0; i < ARRAY_SIZE(irqs); i++) { + if (!irqs[i]) + continue; + fbnic_wr32(fbd, FBNIC_INTR_MASK_CLEAR(i), irqs[i]); + fbnic_wr32(fbd, FBNIC_INTR_SET(i), irqs[i]); + } + + fbnic_wrfl(fbd); +} diff --git a/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h new file mode 100644 index 000000000000..4a206c0e7192 --- /dev/null +++ b/drivers/net/ethernet/meta/fbnic/fbnic_txrx.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) Meta Platforms, Inc. and affiliates. */ + +#ifndef _FBNIC_TXRX_H_ +#define _FBNIC_TXRX_H_ + +#include +#include +#include +#include + +struct fbnic_net; + +/* Guarantee we have space needed for storing the buffer + * To store the buffer we need: + * 1 descriptor per page + * + 1 descriptor for skb head + * + 2 descriptors for metadata and optional metadata + * + 7 descriptors to keep tail out of the same cacheline as head + * If we cannot guarantee that then we should return TX_BUSY + */ +#define FBNIC_MAX_SKB_DESC (MAX_SKB_FRAGS + 10) +#define FBNIC_TX_DESC_WAKEUP (FBNIC_MAX_SKB_DESC * 2) +#define FBNIC_TX_DESC_MIN roundup_pow_of_two(FBNIC_TX_DESC_WAKEUP) + +#define FBNIC_MAX_TXQS 128u +#define FBNIC_MAX_RXQS 128u + +#define FBNIC_TXQ_SIZE_DEFAULT 1024 +#define FBNIC_HPQ_SIZE_DEFAULT 256 +#define FBNIC_PPQ_SIZE_DEFAULT 256 +#define FBNIC_RCQ_SIZE_DEFAULT 1024 + +#define FBNIC_RX_TROOM \ + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +#define FBNIC_RX_HROOM \ + (ALIGN(FBNIC_RX_TROOM + NET_SKB_PAD, 128) - FBNIC_RX_TROOM) +#define FBNIC_RX_PAD 0 +#define FBNIC_RX_MAX_HDR (1536 - FBNIC_RX_PAD) +#define FBNIC_RX_PAYLD_OFFSET 0 +#define FBNIC_RX_PAYLD_PG_CL 0 + +#define FBNIC_RING_F_DISABLED BIT(0) +#define FBNIC_RING_F_CTX BIT(1) +#define FBNIC_RING_F_STATS BIT(2) /* Ring's stats may be used */ + +struct fbnic_pkt_buff { + struct xdp_buff buff; + u32 data_truesize; + u16 data_len; + u16 nr_frags; +}; + +/* Pagecnt bias is long max to reserve the last bit to catch overflow + * cases where if we overcharge the bias it will flip over to be negative. + */ +#define PAGECNT_BIAS_MAX LONG_MAX +struct fbnic_rx_buf { + struct page *page; + long pagecnt_bias; +}; + +struct fbnic_ring { + /* Pointer to buffer specific info */ + union { + struct fbnic_pkt_buff *pkt; /* RCQ */ + struct fbnic_rx_buf *rx_buf; /* BDQ */ + void **tx_buf; /* TWQ */ + void *buffer; /* Generic pointer */ + }; + + u32 __iomem *doorbell; /* Pointer to CSR space for ring */ + __le64 *desc; /* Descriptor ring memory */ + u16 size_mask; /* Size of ring in descriptors - 1 */ + u8 q_idx; /* Logical netdev ring index */ + u8 flags; /* Ring flags (FBNIC_RING_F_*) */ + + u32 head, tail; /* Head/Tail of ring */ + + /* Slow path fields follow */ + dma_addr_t dma; /* Phys addr of descriptor memory */ + size_t size; /* Size of descriptor ring in memory */ +}; + +struct fbnic_q_triad { + struct fbnic_ring sub0, sub1, cmpl; +}; + +struct fbnic_napi_vector { + struct napi_struct napi; + struct device *dev; /* Device for DMA unmapping */ + struct page_pool *page_pool; + struct fbnic_dev *fbd; + char name[IFNAMSIZ + 9]; + + u16 v_idx; + u8 txt_count; + u8 rxt_count; + + struct list_head napis; + + struct fbnic_q_triad qt[]; +}; + +#define FBNIC_MAX_TXQS 128u +#define FBNIC_MAX_RXQS 128u + +netdev_tx_t fbnic_xmit_frame(struct sk_buff *skb, struct net_device *dev); +netdev_features_t +fbnic_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features); + +int fbnic_alloc_napi_vectors(struct fbnic_net *fbn); +void fbnic_free_napi_vectors(struct fbnic_net *fbn); +int fbnic_alloc_resources(struct fbnic_net *fbn); +void fbnic_free_resources(struct fbnic_net *fbn); +void fbnic_napi_enable(struct fbnic_net *fbn); +void fbnic_napi_disable(struct fbnic_net *fbn); +void fbnic_enable(struct fbnic_net *fbn); +void fbnic_disable(struct fbnic_net *fbn); +void fbnic_flush(struct fbnic_net *fbn); +void fbnic_fill(struct fbnic_net *fbn); + +void fbnic_napi_depletion_check(struct net_device *netdev); +int fbnic_wait_all_queues_idle(struct fbnic_dev *fbd, bool may_fail); + +#endif /* _FBNIC_TXRX_H_ */ diff --git a/drivers/net/ethernet/microchip/encx24j600-regmap.c b/drivers/net/ethernet/microchip/encx24j600-regmap.c index 3885d6fbace1..26b00e66d912 100644 --- a/drivers/net/ethernet/microchip/encx24j600-regmap.c +++ b/drivers/net/ethernet/microchip/encx24j600-regmap.c @@ -474,13 +474,13 @@ static struct regmap_config regcfg = { .unlock = regmap_unlock_mutex, }; -static struct regmap_bus regmap_encx24j600 = { +static const struct regmap_bus regmap_encx24j600 = { .write = regmap_encx24j600_write, .read = regmap_encx24j600_read, .reg_update_bits = regmap_encx24j600_reg_update_bits, }; -static struct regmap_config phycfg = { +static const struct regmap_config phycfg = { .name = "phy", .reg_bits = 8, .val_bits = 16, @@ -492,7 +492,7 @@ static struct regmap_config phycfg = { .volatile_reg = encx24j600_phymap_volatile, }; -static struct regmap_bus phymap_encx24j600 = { +static const struct regmap_bus phymap_encx24j600 = { .reg_write = regmap_encx24j600_phy_reg_write, .reg_read = regmap_encx24j600_phy_reg_read, }; diff --git a/drivers/net/ethernet/microchip/lan743x_ethtool.c b/drivers/net/ethernet/microchip/lan743x_ethtool.c index 0d1740d64676..3a63ec091413 100644 --- a/drivers/net/ethernet/microchip/lan743x_ethtool.c +++ b/drivers/net/ethernet/microchip/lan743x_ethtool.c @@ -1029,7 +1029,7 @@ static int lan743x_ethtool_set_rxfh(struct net_device *netdev, } static int lan743x_ethtool_get_ts_info(struct net_device *netdev, - struct ethtool_ts_info *ts_info) + struct kernel_ethtool_ts_info *ts_info) { struct lan743x_adapter *adapter = netdev_priv(netdev); diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c index 06811c60d598..aec7066d83b3 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c @@ -376,7 +376,6 @@ static void lan966x_get_eth_mac_stats(struct net_device *dev, lan966x->stats[idx + SYS_COUNT_TX_PMAC_BC]; mac_stats->SingleCollisionFrames = lan966x->stats[idx + SYS_COUNT_TX_COL]; - mac_stats->MultipleCollisionFrames = 0; mac_stats->FramesReceivedOK = lan966x->stats[idx + SYS_COUNT_RX_UC] + lan966x->stats[idx + SYS_COUNT_RX_MC] + @@ -384,26 +383,19 @@ static void lan966x_get_eth_mac_stats(struct net_device *dev, mac_stats->FrameCheckSequenceErrors = lan966x->stats[idx + SYS_COUNT_RX_CRC] + lan966x->stats[idx + SYS_COUNT_RX_CRC]; - mac_stats->AlignmentErrors = 0; mac_stats->OctetsTransmittedOK = lan966x->stats[idx + SYS_COUNT_TX_OCT] + lan966x->stats[idx + SYS_COUNT_TX_PMAC_OCT]; mac_stats->FramesWithDeferredXmissions = lan966x->stats[idx + SYS_COUNT_TX_MM_HOLD]; - mac_stats->LateCollisions = 0; - mac_stats->FramesAbortedDueToXSColls = 0; - mac_stats->FramesLostDueToIntMACXmitError = 0; - mac_stats->CarrierSenseErrors = 0; mac_stats->OctetsReceivedOK = lan966x->stats[idx + SYS_COUNT_RX_OCT]; - mac_stats->FramesLostDueToIntMACRcvError = 0; mac_stats->MulticastFramesXmittedOK = lan966x->stats[idx + SYS_COUNT_TX_MC] + lan966x->stats[idx + SYS_COUNT_TX_PMAC_MC]; mac_stats->BroadcastFramesXmittedOK = lan966x->stats[idx + SYS_COUNT_TX_BC] + lan966x->stats[idx + SYS_COUNT_TX_PMAC_BC]; - mac_stats->FramesWithExcessiveDeferral = 0; mac_stats->MulticastFramesReceivedOK = lan966x->stats[idx + SYS_COUNT_RX_MC]; mac_stats->BroadcastFramesReceivedOK = @@ -546,7 +538,7 @@ static int lan966x_set_pauseparam(struct net_device *dev, } static int lan966x_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct lan966x_port *port = netdev_priv(dev); struct lan966x *lan966x = port->lan966x; diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c index a4414f63c9b1..a1471e38d118 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vcap_impl.c @@ -581,7 +581,7 @@ static void lan966x_vcap_move(struct net_device *dev, lan966x_vcap_wait_update(lan966x, admin->tgt_inst); } -static struct vcap_operations lan966x_vcap_ops = { +static const struct vcap_operations lan966x_vcap_ops = { .validate_keyset = lan966x_vcap_validate_keyset, .add_default_fields = lan966x_vcap_add_default_fields, .cache_erase = lan966x_vcap_cache_erase, diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c index a06dc5a9b355..4f800c1a435d 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c @@ -1183,7 +1183,7 @@ static void sparx5_config_port_stats(struct sparx5 *sparx5, int portno) } static int sparx5_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct sparx5_port *port = netdev_priv(dev); struct sparx5 *sparx5 = port->sparx5; diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c index 187efa1fc904..967c8621c250 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_vcap_impl.c @@ -1507,7 +1507,7 @@ static void sparx5_vcap_move(struct net_device *ndev, struct vcap_admin *admin, } } -static struct vcap_operations sparx5_vcap_ops = { +static const struct vcap_operations sparx5_vcap_ops = { .validate_keyset = sparx5_vcap_validate_keyset, .add_default_fields = sparx5_vcap_add_default_fields, .cache_erase = sparx5_vcap_cache_erase, diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api.h b/drivers/net/ethernet/microchip/vcap/vcap_api.h index 9eccfa633c1a..6069ad95c27e 100644 --- a/drivers/net/ethernet/microchip/vcap/vcap_api.h +++ b/drivers/net/ethernet/microchip/vcap/vcap_api.h @@ -271,7 +271,7 @@ struct vcap_operations { /* VCAP API Client control interface */ struct vcap_control { - struct vcap_operations *ops; /* client supplied operations */ + const struct vcap_operations *ops; /* client supplied operations */ const struct vcap_info *vcaps; /* client supplied vcap models */ const struct vcap_statistics *stats; /* client supplied vcap stats */ struct list_head list; /* list of vcap instances */ diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c index b23c11b0647c..9c9d38042125 100644 --- a/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c +++ b/drivers/net/ethernet/microchip/vcap/vcap_api_debugfs_kunit.c @@ -221,7 +221,7 @@ static int vcap_test_port_info(struct net_device *ndev, return 0; } -static struct vcap_operations test_callbacks = { +static const struct vcap_operations test_callbacks = { .validate_keyset = test_val_keyset, .add_default_fields = test_add_def_fields, .cache_erase = test_cache_erase, diff --git a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c index fe4e166de8a0..51d9423b08a6 100644 --- a/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c +++ b/drivers/net/ethernet/microchip/vcap/vcap_api_kunit.c @@ -211,7 +211,7 @@ static int vcap_test_port_info(struct net_device *ndev, return 0; } -static struct vcap_operations test_callbacks = { +static const struct vcap_operations test_callbacks = { .validate_keyset = test_val_keyset, .add_default_fields = test_add_def_fields, .cache_erase = test_cache_erase, diff --git a/drivers/net/ethernet/microsoft/Kconfig b/drivers/net/ethernet/microsoft/Kconfig index 286f0d5697a1..901fbffbf718 100644 --- a/drivers/net/ethernet/microsoft/Kconfig +++ b/drivers/net/ethernet/microsoft/Kconfig @@ -18,7 +18,7 @@ if NET_VENDOR_MICROSOFT config MICROSOFT_MANA tristate "Microsoft Azure Network Adapter (MANA) support" depends on PCI_MSI - depends on X86_64 || (ARM64 && !CPU_BIG_ENDIAN && ARM64_4K_PAGES) + depends on X86_64 || (ARM64 && !CPU_BIG_ENDIAN) depends on PCI_HYPERV select AUXILIARY_BUS select PAGE_POOL diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index 1332db9a08eb..e1d70d21e207 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -182,7 +182,7 @@ int mana_gd_alloc_memory(struct gdma_context *gc, unsigned int length, dma_addr_t dma_handle; void *buf; - if (length < PAGE_SIZE || !is_power_of_2(length)) + if (length < MANA_PAGE_SIZE || !is_power_of_2(length)) return -EINVAL; gmi->dev = gc->dev; @@ -717,7 +717,7 @@ EXPORT_SYMBOL_NS(mana_gd_destroy_dma_region, NET_MANA); static int mana_gd_create_dma_region(struct gdma_dev *gd, struct gdma_mem_info *gmi) { - unsigned int num_page = gmi->length / PAGE_SIZE; + unsigned int num_page = gmi->length / MANA_PAGE_SIZE; struct gdma_create_dma_region_req *req = NULL; struct gdma_create_dma_region_resp resp = {}; struct gdma_context *gc = gd->gdma_context; @@ -727,10 +727,10 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd, int err; int i; - if (length < PAGE_SIZE || !is_power_of_2(length)) + if (length < MANA_PAGE_SIZE || !is_power_of_2(length)) return -EINVAL; - if (offset_in_page(gmi->virt_addr) != 0) + if (!MANA_PAGE_ALIGNED(gmi->virt_addr)) return -EINVAL; hwc = gc->hwc.driver_data; @@ -751,7 +751,7 @@ static int mana_gd_create_dma_region(struct gdma_dev *gd, req->page_addr_list_len = num_page; for (i = 0; i < num_page; i++) - req->page_addr_list[i] = gmi->dma_handle + i * PAGE_SIZE; + req->page_addr_list[i] = gmi->dma_handle + i * MANA_PAGE_SIZE; err = mana_gd_send_request(gc, req_msg_size, req, sizeof(resp), &resp); if (err) diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c index bbc4f9e16c98..cafded2f9382 100644 --- a/drivers/net/ethernet/microsoft/mana/hw_channel.c +++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c @@ -362,12 +362,12 @@ static int mana_hwc_create_cq(struct hw_channel_context *hwc, u16 q_depth, int err; eq_size = roundup_pow_of_two(GDMA_EQE_SIZE * q_depth); - if (eq_size < MINIMUM_SUPPORTED_PAGE_SIZE) - eq_size = MINIMUM_SUPPORTED_PAGE_SIZE; + if (eq_size < MANA_MIN_QSIZE) + eq_size = MANA_MIN_QSIZE; cq_size = roundup_pow_of_two(GDMA_CQE_SIZE * q_depth); - if (cq_size < MINIMUM_SUPPORTED_PAGE_SIZE) - cq_size = MINIMUM_SUPPORTED_PAGE_SIZE; + if (cq_size < MANA_MIN_QSIZE) + cq_size = MANA_MIN_QSIZE; hwc_cq = kzalloc(sizeof(*hwc_cq), GFP_KERNEL); if (!hwc_cq) @@ -429,7 +429,7 @@ static int mana_hwc_alloc_dma_buf(struct hw_channel_context *hwc, u16 q_depth, dma_buf->num_reqs = q_depth; - buf_size = PAGE_ALIGN(q_depth * max_msg_size); + buf_size = MANA_PAGE_ALIGN(q_depth * max_msg_size); gmi = &dma_buf->mem_info; err = mana_gd_alloc_memory(gc, buf_size, gmi); @@ -497,8 +497,8 @@ static int mana_hwc_create_wq(struct hw_channel_context *hwc, else queue_size = roundup_pow_of_two(GDMA_MAX_SQE_SIZE * q_depth); - if (queue_size < MINIMUM_SUPPORTED_PAGE_SIZE) - queue_size = MINIMUM_SUPPORTED_PAGE_SIZE; + if (queue_size < MANA_MIN_QSIZE) + queue_size = MANA_MIN_QSIZE; hwc_wq = kzalloc(sizeof(*hwc_wq), GFP_KERNEL); if (!hwc_wq) diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index 608ad31a9702..91f10910ea44 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -481,7 +481,7 @@ static int mana_get_tx_queue(struct net_device *ndev, struct sk_buff *skb, struct sock *sk = skb->sk; int txq; - txq = apc->indir_table[hash & MANA_INDIRECT_TABLE_MASK]; + txq = apc->indir_table[hash & (apc->indir_table_sz - 1)]; if (txq != old_q && sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache)) @@ -721,6 +721,13 @@ static void mana_cleanup_port_context(struct mana_port_context *apc) apc->rxqs = NULL; } +static void mana_cleanup_indir_table(struct mana_port_context *apc) +{ + apc->indir_table_sz = 0; + kfree(apc->indir_table); + kfree(apc->rxobj_table); +} + static int mana_init_port_context(struct mana_port_context *apc) { apc->rxqs = kcalloc(apc->num_queues, sizeof(struct mana_rxq *), @@ -962,7 +969,16 @@ static int mana_query_vport_cfg(struct mana_port_context *apc, u32 vport_index, *max_sq = resp.max_num_sq; *max_rq = resp.max_num_rq; - *num_indir_entry = resp.num_indirection_ent; + if (resp.num_indirection_ent > 0 && + resp.num_indirection_ent <= MANA_INDIRECT_TABLE_MAX_SIZE && + is_power_of_2(resp.num_indirection_ent)) { + *num_indir_entry = resp.num_indirection_ent; + } else { + netdev_warn(apc->ndev, + "Setting indirection table size to default %d for vPort %d\n", + MANA_INDIRECT_TABLE_DEF_SIZE, apc->port_idx); + *num_indir_entry = MANA_INDIRECT_TABLE_DEF_SIZE; + } apc->port_handle = resp.vport; ether_addr_copy(apc->mac_addr, resp.mac_addr); @@ -1054,14 +1070,13 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc, bool update_default_rxobj, bool update_key, bool update_tab) { - u16 num_entries = MANA_INDIRECT_TABLE_SIZE; struct mana_cfg_rx_steer_req_v2 *req; struct mana_cfg_rx_steer_resp resp = {}; struct net_device *ndev = apc->ndev; u32 req_buf_size; int err; - req_buf_size = struct_size(req, indir_tab, num_entries); + req_buf_size = struct_size(req, indir_tab, apc->indir_table_sz); req = kzalloc(req_buf_size, GFP_KERNEL); if (!req) return -ENOMEM; @@ -1072,7 +1087,7 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc, req->hdr.req.msg_version = GDMA_MESSAGE_V2; req->vport = apc->port_handle; - req->num_indir_entries = num_entries; + req->num_indir_entries = apc->indir_table_sz; req->indir_tab_offset = offsetof(struct mana_cfg_rx_steer_req_v2, indir_tab); req->rx_enable = rx; @@ -1111,7 +1126,7 @@ static int mana_cfg_vport_steering(struct mana_port_context *apc, } netdev_info(ndev, "Configured steering vPort %llu entries %u\n", - apc->port_handle, num_entries); + apc->port_handle, apc->indir_table_sz); out: kfree(req); return err; @@ -1889,10 +1904,10 @@ static int mana_create_txq(struct mana_port_context *apc, * to prevent overflow. */ txq_size = MAX_SEND_BUFFERS_PER_QUEUE * 32; - BUILD_BUG_ON(!PAGE_ALIGNED(txq_size)); + BUILD_BUG_ON(!MANA_PAGE_ALIGNED(txq_size)); cq_size = MAX_SEND_BUFFERS_PER_QUEUE * COMP_ENTRY_SIZE; - cq_size = PAGE_ALIGN(cq_size); + cq_size = MANA_PAGE_ALIGN(cq_size); gc = gd->gdma_context; @@ -2189,8 +2204,8 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, if (err) goto out; - rq_size = PAGE_ALIGN(rq_size); - cq_size = PAGE_ALIGN(cq_size); + rq_size = MANA_PAGE_ALIGN(rq_size); + cq_size = MANA_PAGE_ALIGN(cq_size); /* Create RQ */ memset(&spec, 0, sizeof(spec)); @@ -2344,11 +2359,33 @@ static int mana_create_vport(struct mana_port_context *apc, return mana_create_txq(apc, net); } +static int mana_rss_table_alloc(struct mana_port_context *apc) +{ + if (!apc->indir_table_sz) { + netdev_err(apc->ndev, + "Indirection table size not set for vPort %d\n", + apc->port_idx); + return -EINVAL; + } + + apc->indir_table = kcalloc(apc->indir_table_sz, sizeof(u32), GFP_KERNEL); + if (!apc->indir_table) + return -ENOMEM; + + apc->rxobj_table = kcalloc(apc->indir_table_sz, sizeof(mana_handle_t), GFP_KERNEL); + if (!apc->rxobj_table) { + kfree(apc->indir_table); + return -ENOMEM; + } + + return 0; +} + static void mana_rss_table_init(struct mana_port_context *apc) { int i; - for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) + for (i = 0; i < apc->indir_table_sz; i++) apc->indir_table[i] = ethtool_rxfh_indir_default(i, apc->num_queues); } @@ -2361,7 +2398,7 @@ int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx, int i; if (update_tab) { - for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) { + for (i = 0; i < apc->indir_table_sz; i++) { queue_idx = apc->indir_table[i]; apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj; } @@ -2466,7 +2503,6 @@ static int mana_init_port(struct net_device *ndev) struct mana_port_context *apc = netdev_priv(ndev); u32 max_txq, max_rxq, max_queues; int port_idx = apc->port_idx; - u32 num_indirect_entries; int err; err = mana_init_port_context(apc); @@ -2474,7 +2510,7 @@ static int mana_init_port(struct net_device *ndev) return err; err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq, - &num_indirect_entries); + &apc->indir_table_sz); if (err) { netdev_err(ndev, "Failed to query info for vPort %d\n", port_idx); @@ -2493,8 +2529,7 @@ static int mana_init_port(struct net_device *ndev) return 0; reset_apc: - kfree(apc->rxqs); - apc->rxqs = NULL; + mana_cleanup_port_context(apc); return err; } @@ -2723,6 +2758,10 @@ static int mana_probe_port(struct mana_context *ac, int port_idx, if (err) goto free_net; + err = mana_rss_table_alloc(apc); + if (err) + goto reset_apc; + netdev_lockdep_set_classes(ndev); ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; @@ -2739,14 +2778,15 @@ static int mana_probe_port(struct mana_context *ac, int port_idx, err = register_netdev(ndev); if (err) { netdev_err(ndev, "Unable to register netdev.\n"); - goto reset_apc; + goto free_indir; } return 0; +free_indir: + mana_cleanup_indir_table(apc); reset_apc: - kfree(apc->rxqs); - apc->rxqs = NULL; + mana_cleanup_port_context(apc); free_net: *ndev_storage = NULL; netdev_err(ndev, "Failed to probe vPort %d: %d\n", port_idx, err); @@ -2874,16 +2914,30 @@ int mana_probe(struct gdma_dev *gd, bool resuming) if (!resuming) { for (i = 0; i < ac->num_ports; i++) { err = mana_probe_port(ac, i, &ac->ports[i]); - if (err) + /* we log the port for which the probe failed and stop + * probes for subsequent ports. + * Note that we keep running ports, for which the probes + * were successful, unless add_adev fails too + */ + if (err) { + dev_err(dev, "Probe Failed for port %d\n", i); break; + } } } else { for (i = 0; i < ac->num_ports; i++) { rtnl_lock(); err = mana_attach(ac->ports[i]); rtnl_unlock(); - if (err) + /* we log the port for which the attach failed and stop + * attach for subsequent ports + * Note that we keep running ports, for which the attach + * were successful, unless add_adev fails too + */ + if (err) { + dev_err(dev, "Attach Failed for port %d\n", i); break; + } } } @@ -2899,6 +2953,7 @@ void mana_remove(struct gdma_dev *gd, bool suspending) { struct gdma_context *gc = gd->gdma_context; struct mana_context *ac = gd->driver_data; + struct mana_port_context *apc; struct device *dev = gc->dev; struct net_device *ndev; int err; @@ -2910,6 +2965,7 @@ void mana_remove(struct gdma_dev *gd, bool suspending) for (i = 0; i < ac->num_ports; i++) { ndev = ac->ports[i]; + apc = netdev_priv(ndev); if (!ndev) { if (i == 0) dev_err(dev, "No net device to remove\n"); @@ -2933,6 +2989,7 @@ void mana_remove(struct gdma_dev *gd, bool suspending) } unregister_netdevice(ndev); + mana_cleanup_indir_table(apc); rtnl_unlock(); diff --git a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c index ab2413d71f6c..146d5db1792f 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_ethtool.c +++ b/drivers/net/ethernet/microsoft/mana/mana_ethtool.c @@ -245,7 +245,9 @@ static u32 mana_get_rxfh_key_size(struct net_device *ndev) static u32 mana_rss_indir_size(struct net_device *ndev) { - return MANA_INDIRECT_TABLE_SIZE; + struct mana_port_context *apc = netdev_priv(ndev); + + return apc->indir_table_sz; } static int mana_get_rxfh(struct net_device *ndev, @@ -257,7 +259,7 @@ static int mana_get_rxfh(struct net_device *ndev, rxfh->hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */ if (rxfh->indir) { - for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) + for (i = 0; i < apc->indir_table_sz; i++) rxfh->indir[i] = apc->indir_table[i]; } @@ -273,8 +275,8 @@ static int mana_set_rxfh(struct net_device *ndev, { struct mana_port_context *apc = netdev_priv(ndev); bool update_hash = false, update_table = false; - u32 save_table[MANA_INDIRECT_TABLE_SIZE]; u8 save_key[MANA_HASH_KEY_SIZE]; + u32 *save_table; int i, err; if (!apc->port_is_up) @@ -284,13 +286,19 @@ static int mana_set_rxfh(struct net_device *ndev, rxfh->hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; + save_table = kcalloc(apc->indir_table_sz, sizeof(u32), GFP_KERNEL); + if (!save_table) + return -ENOMEM; + if (rxfh->indir) { - for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) - if (rxfh->indir[i] >= apc->num_queues) - return -EINVAL; + for (i = 0; i < apc->indir_table_sz; i++) + if (rxfh->indir[i] >= apc->num_queues) { + err = -EINVAL; + goto cleanup; + } update_table = true; - for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) { + for (i = 0; i < apc->indir_table_sz; i++) { save_table[i] = apc->indir_table[i]; apc->indir_table[i] = rxfh->indir[i]; } @@ -306,7 +314,7 @@ static int mana_set_rxfh(struct net_device *ndev, if (err) { /* recover to original values */ if (update_table) { - for (i = 0; i < MANA_INDIRECT_TABLE_SIZE; i++) + for (i = 0; i < apc->indir_table_sz; i++) apc->indir_table[i] = save_table[i]; } @@ -316,6 +324,9 @@ static int mana_set_rxfh(struct net_device *ndev, mana_config_rss(apc, TRI_STATE_TRUE, update_hash, update_table); } +cleanup: + kfree(save_table); + return err; } diff --git a/drivers/net/ethernet/microsoft/mana/shm_channel.c b/drivers/net/ethernet/microsoft/mana/shm_channel.c index 5553af9c8085..0f1679ebad96 100644 --- a/drivers/net/ethernet/microsoft/mana/shm_channel.c +++ b/drivers/net/ethernet/microsoft/mana/shm_channel.c @@ -6,6 +6,7 @@ #include #include +#include #include #define PAGE_FRAME_L48_WIDTH_BYTES 6 @@ -155,8 +156,8 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr, return err; } - if (!PAGE_ALIGNED(eq_addr) || !PAGE_ALIGNED(cq_addr) || - !PAGE_ALIGNED(rq_addr) || !PAGE_ALIGNED(sq_addr)) + if (!MANA_PAGE_ALIGNED(eq_addr) || !MANA_PAGE_ALIGNED(cq_addr) || + !MANA_PAGE_ALIGNED(rq_addr) || !MANA_PAGE_ALIGNED(sq_addr)) return -EINVAL; if ((eq_msix_index & VECTOR_MASK) != eq_msix_index) @@ -183,7 +184,7 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr, /* EQ addr: low 48 bits of frame address */ shmem = (u64 *)ptr; - frame_addr = PHYS_PFN(eq_addr); + frame_addr = MANA_PFN(eq_addr); *shmem = frame_addr & PAGE_FRAME_L48_MASK; all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) << (frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS); @@ -191,7 +192,7 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr, /* CQ addr: low 48 bits of frame address */ shmem = (u64 *)ptr; - frame_addr = PHYS_PFN(cq_addr); + frame_addr = MANA_PFN(cq_addr); *shmem = frame_addr & PAGE_FRAME_L48_MASK; all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) << (frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS); @@ -199,7 +200,7 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr, /* RQ addr: low 48 bits of frame address */ shmem = (u64 *)ptr; - frame_addr = PHYS_PFN(rq_addr); + frame_addr = MANA_PFN(rq_addr); *shmem = frame_addr & PAGE_FRAME_L48_MASK; all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) << (frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS); @@ -207,7 +208,7 @@ int mana_smc_setup_hwc(struct shm_channel *sc, bool reset_vf, u64 eq_addr, /* SQ addr: low 48 bits of frame address */ shmem = (u64 *)ptr; - frame_addr = PHYS_PFN(sq_addr); + frame_addr = MANA_PFN(sq_addr); *shmem = frame_addr & PAGE_FRAME_L48_MASK; all_addr_h4bits |= (frame_addr >> PAGE_FRAME_L48_WIDTH_BITS) << (frame_addr_seq++ * PAGE_FRAME_H4_WIDTH_BITS); diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c index 21a87a3fc556..7c9540a71725 100644 --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -980,7 +980,7 @@ static int ocelot_port_get_sset_count(struct net_device *dev, int sset) } static int ocelot_port_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct ocelot_port_private *priv = netdev_priv(dev); struct ocelot *ocelot = priv->port.ocelot; diff --git a/drivers/net/ethernet/mscc/ocelot_ptp.c b/drivers/net/ethernet/mscc/ocelot_ptp.c index cb32234a5bf1..b3c28260adf8 100644 --- a/drivers/net/ethernet/mscc/ocelot_ptp.c +++ b/drivers/net/ethernet/mscc/ocelot_ptp.c @@ -580,7 +580,7 @@ int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr) EXPORT_SYMBOL(ocelot_hwstamp_set); int ocelot_get_ts_info(struct ocelot *ocelot, int port, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { info->phc_index = ocelot->ptp_clock ? ptp_clock_index(ocelot->ptp_clock) : -1; diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 8e0a890381b6..46ffc2c20893 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -321,6 +321,10 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, flow_rule_match_enc_control(rule, &enc_ctl); + if (flow_rule_has_enc_control_flags(enc_ctl.mask->flags, + extack)) + return -EOPNOTSUPP; + if (enc_ctl.mask->addr_type != 0xffff) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: wildcarded protocols on tunnels are not supported"); return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/pensando/ionic/ionic.h b/drivers/net/ethernet/pensando/ionic/ionic.h index 2ccc2c2a06e3..1c61390677f7 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic.h +++ b/drivers/net/ethernet/pensando/ionic/ionic.h @@ -18,6 +18,8 @@ struct ionic_lif; #define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_PF 0x1002 #define PCI_DEVICE_ID_PENSANDO_IONIC_ETH_VF 0x1003 +#define IONIC_ASIC_TYPE_ELBA 2 + #define DEVCMD_TIMEOUT 5 #define IONIC_ADMINQ_TIME_SLICE msecs_to_jiffies(100) @@ -47,6 +49,7 @@ struct ionic { struct ionic_dev_bar bars[IONIC_BARS_MAX]; unsigned int num_bars; struct ionic_identity ident; + struct workqueue_struct *wq; struct ionic_lif *lif; unsigned int nnqs_per_lif; unsigned int neqs_per_lif; @@ -54,6 +57,8 @@ struct ionic { unsigned int nrxqs_per_lif; unsigned int nintrs; DECLARE_BITMAP(intrs, IONIC_INTR_CTRL_REGS_MAX); + cpumask_var_t *affinity_masks; + struct delayed_work doorbell_check_dwork; struct work_struct nb_work; struct notifier_block nb; struct rw_semaphore vf_op_lock; /* lock for VF operations */ @@ -93,4 +98,6 @@ int ionic_port_identify(struct ionic *ionic); int ionic_port_init(struct ionic *ionic); int ionic_port_reset(struct ionic *ionic); +bool ionic_doorbell_wa(struct ionic *ionic); + #endif /* _IONIC_H_ */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c index 6ba8d4aca0a0..b93791d6b593 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c @@ -326,6 +326,11 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_out; } +#ifdef CONFIG_PPC64 + /* Ensure MSI/MSI-X interrupts lie within addressable physical memory */ + pdev->no_64bit_msi = 1; +#endif + err = ionic_setup_one(ionic); if (err) goto err_out; @@ -372,6 +377,7 @@ static int ionic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) mod_timer(&ionic->watchdog_timer, round_jiffies(jiffies + ionic->watchdog_period)); + ionic_queue_doorbell_check(ionic, IONIC_NAPI_DEADLINE); return 0; @@ -406,6 +412,8 @@ static void ionic_remove(struct pci_dev *pdev) if (test_and_clear_bit(IONIC_LIF_F_FW_RESET, ionic->lif->state)) set_bit(IONIC_LIF_F_FW_STOPPING, ionic->lif->state); + if (ionic->lif->doorbell_wa) + cancel_delayed_work_sync(&ionic->doorbell_check_dwork); ionic_lif_unregister(ionic->lif); ionic_devlink_unregister(ionic); ionic_lif_deinit(ionic->lif); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c index c3ae11a48024..59e5a9f21105 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_debugfs.c @@ -220,7 +220,7 @@ static int netdev_show(struct seq_file *seq, void *v) { struct net_device *netdev = seq->private; - seq_printf(seq, "%s\n", netdev->name); + seq_printf(seq, "%s\n", netdev_name(netdev)); return 0; } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c index 874499337132..9e42d599840d 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c @@ -43,11 +43,99 @@ static void ionic_watchdog_cb(struct timer_list *t) work->type = IONIC_DW_TYPE_RX_MODE; netdev_dbg(lif->netdev, "deferred: rx_mode\n"); - ionic_lif_deferred_enqueue(&lif->deferred, work); + ionic_lif_deferred_enqueue(lif, work); } } -static void ionic_watchdog_init(struct ionic *ionic) +static void ionic_napi_schedule_do_softirq(struct napi_struct *napi) +{ + local_bh_disable(); + napi_schedule(napi); + local_bh_enable(); +} + +void ionic_doorbell_napi_work(struct work_struct *work) +{ + struct ionic_qcq *qcq = container_of(work, struct ionic_qcq, + doorbell_napi_work); + unsigned long now, then, dif; + + now = READ_ONCE(jiffies); + then = qcq->q.dbell_jiffies; + dif = now - then; + + if (dif > qcq->q.dbell_deadline) + ionic_napi_schedule_do_softirq(&qcq->napi); +} + +static int ionic_get_preferred_cpu(struct ionic *ionic, + struct ionic_intr_info *intr) +{ + int cpu; + + cpu = cpumask_first_and(*intr->affinity_mask, cpu_online_mask); + if (cpu >= nr_cpu_ids) + cpu = cpumask_local_spread(0, dev_to_node(ionic->dev)); + + return cpu; +} + +static void ionic_queue_dbell_napi_work(struct ionic *ionic, + struct ionic_qcq *qcq) +{ + int cpu; + + if (!(qcq->flags & IONIC_QCQ_F_INTR)) + return; + + cpu = ionic_get_preferred_cpu(ionic, &qcq->intr); + queue_work_on(cpu, ionic->wq, &qcq->doorbell_napi_work); +} + +static void ionic_doorbell_check_dwork(struct work_struct *work) +{ + struct ionic *ionic = container_of(work, struct ionic, + doorbell_check_dwork.work); + struct ionic_lif *lif = ionic->lif; + + mutex_lock(&lif->queue_lock); + + if (test_bit(IONIC_LIF_F_FW_STOPPING, lif->state) || + test_bit(IONIC_LIF_F_FW_RESET, lif->state)) { + mutex_unlock(&lif->queue_lock); + return; + } + + ionic_napi_schedule_do_softirq(&lif->adminqcq->napi); + + if (test_bit(IONIC_LIF_F_UP, lif->state)) { + int i; + + for (i = 0; i < lif->nxqs; i++) { + ionic_queue_dbell_napi_work(ionic, lif->txqcqs[i]); + ionic_queue_dbell_napi_work(ionic, lif->rxqcqs[i]); + } + + if (lif->hwstamp_txq && + lif->hwstamp_txq->flags & IONIC_QCQ_F_INTR) + ionic_napi_schedule_do_softirq(&lif->hwstamp_txq->napi); + if (lif->hwstamp_rxq && + lif->hwstamp_rxq->flags & IONIC_QCQ_F_INTR) + ionic_napi_schedule_do_softirq(&lif->hwstamp_rxq->napi); + } + mutex_unlock(&lif->queue_lock); + + ionic_queue_doorbell_check(ionic, IONIC_NAPI_DEADLINE); +} + +bool ionic_doorbell_wa(struct ionic *ionic) +{ + u8 asic_type = ionic->idev.dev_info.asic_type; + + return !asic_type || asic_type == IONIC_ASIC_TYPE_ELBA; +} + +static int ionic_watchdog_init(struct ionic *ionic) { struct ionic_dev *idev = &ionic->idev; @@ -63,6 +151,31 @@ static void ionic_watchdog_init(struct ionic *ionic) idev->fw_status_ready = true; idev->fw_generation = IONIC_FW_STS_F_GENERATION & ioread8(&idev->dev_info_regs->fw_status); + + ionic->wq = alloc_workqueue("%s-wq", WQ_UNBOUND, 0, + dev_name(ionic->dev)); + if (!ionic->wq) { + dev_err(ionic->dev, "alloc_workqueue failed"); + return -ENOMEM; + } + + if (ionic_doorbell_wa(ionic)) + INIT_DELAYED_WORK(&ionic->doorbell_check_dwork, + ionic_doorbell_check_dwork); + + return 0; +} + +void ionic_queue_doorbell_check(struct ionic *ionic, int delay) +{ + int cpu; + + if (!ionic->lif->doorbell_wa) + return; + + cpu = ionic_get_preferred_cpu(ionic, &ionic->lif->adminqcq->intr); + queue_delayed_work_on(cpu, ionic->wq, &ionic->doorbell_check_dwork, + delay); } void ionic_init_devinfo(struct ionic *ionic) @@ -94,6 +207,7 @@ int ionic_dev_setup(struct ionic *ionic) struct device *dev = ionic->dev; int size; u32 sig; + int err; /* BAR0: dev_cmd and interrupts */ if (num_bars < 1) { @@ -129,7 +243,9 @@ int ionic_dev_setup(struct ionic *ionic) return -EFAULT; } - ionic_watchdog_init(ionic); + err = ionic_watchdog_init(ionic); + if (err) + return err; idev->db_pages = bar->vaddr; idev->phy_db_pages = bar->bus_addr; @@ -161,6 +277,7 @@ void ionic_dev_teardown(struct ionic *ionic) idev->phy_cmb_pages = 0; idev->cmb_npages = 0; + destroy_workqueue(ionic->wq); mutex_destroy(&idev->cmb_inuse_lock); } @@ -273,7 +390,7 @@ do_check_time: if (work) { work->type = IONIC_DW_TYPE_LIF_RESET; work->fw_status = fw_status_ready; - ionic_lif_deferred_enqueue(&lif->deferred, work); + ionic_lif_deferred_enqueue(lif, work); } } } @@ -703,10 +820,6 @@ void ionic_q_post(struct ionic_queue *q, bool ring_doorbell) q->dbval | q->head_idx); q->dbell_jiffies = jiffies; - - if (q_to_qcq(q)->napi_qcq) - mod_timer(&q_to_qcq(q)->napi_qcq->napi_deadline, - jiffies + IONIC_NAPI_DEADLINE); } } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h index b6c01a88098d..c647033f3ad2 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h @@ -28,7 +28,7 @@ #define IONIC_DEV_INFO_REG_COUNT 32 #define IONIC_DEV_CMD_REG_COUNT 32 -#define IONIC_NAPI_DEADLINE (HZ / 200) /* 5ms */ +#define IONIC_NAPI_DEADLINE (HZ) /* 1 sec */ #define IONIC_ADMIN_DOORBELL_DEADLINE (HZ / 2) /* 500ms */ #define IONIC_TX_DOORBELL_DEADLINE (HZ / 100) /* 10ms */ #define IONIC_RX_MIN_DOORBELL_DEADLINE (HZ / 100) /* 10ms */ @@ -280,9 +280,9 @@ struct ionic_intr_info { u64 rearm_count; unsigned int index; unsigned int vector; - unsigned int cpu; u32 dim_coal_hw; - cpumask_t affinity_mask; + cpumask_var_t *affinity_mask; + struct irq_affinity_notify aff_notify; }; struct ionic_cq { @@ -388,6 +388,8 @@ bool ionic_q_is_posted(struct ionic_queue *q, unsigned int pos); int ionic_heartbeat_check(struct ionic *ionic); bool ionic_is_fw_running(struct ionic_dev *idev); +void ionic_doorbell_napi_work(struct work_struct *work); +void ionic_queue_doorbell_check(struct ionic *ionic, int delay); bool ionic_adminq_poke_doorbell(struct ionic_queue *q); bool ionic_txq_poke_doorbell(struct ionic_queue *q); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c index 91183965a6b7..4619fd74f3e3 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c @@ -11,6 +11,8 @@ #include "ionic_ethtool.h" #include "ionic_stats.h" +#define IONIC_MAX_RX_COPYBREAK min(U16_MAX, IONIC_MAX_BUF_LEN) + static void ionic_get_stats_strings(struct ionic_lif *lif, u8 *buf) { u32 i; @@ -872,10 +874,17 @@ static int ionic_set_tunable(struct net_device *dev, const void *data) { struct ionic_lif *lif = netdev_priv(dev); + u32 rx_copybreak; switch (tuna->id) { case ETHTOOL_RX_COPYBREAK: - lif->rx_copybreak = *(u32 *)data; + rx_copybreak = *(u32 *)data; + if (rx_copybreak > IONIC_MAX_RX_COPYBREAK) { + netdev_err(dev, "Max supported rx_copybreak size: %u\n", + IONIC_MAX_RX_COPYBREAK); + return -EINVAL; + } + lif->rx_copybreak = (u16)rx_copybreak; break; default: return -EOPNOTSUPP; @@ -968,7 +977,7 @@ static int ionic_get_module_eeprom(struct net_device *netdev, } static int ionic_get_ts_info(struct net_device *netdev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct ionic_lif *lif = netdev_priv(netdev); struct ionic *ionic = lif->ionic; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h index 9a1825edf0d0..9c85c0706c6e 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_if.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h @@ -71,7 +71,7 @@ enum ionic_cmd_opcode { IONIC_CMD_FW_CONTROL_V1 = 255, }; -/** +/* * enum ionic_status_code - Device command return codes */ enum ionic_status_code { @@ -112,6 +112,7 @@ enum ionic_notifyq_opcode { /** * struct ionic_admin_cmd - General admin command format * @opcode: Opcode for the command + * @rsvd: reserved byte(s) * @lif_index: LIF index * @cmd_data: Opcode-specific command bytes */ @@ -125,6 +126,7 @@ struct ionic_admin_cmd { /** * struct ionic_admin_comp - General admin command completion format * @status: Status of the command (enum ionic_status_code) + * @rsvd: reserved byte(s) * @comp_index: Index in the descriptor ring for which this is the completion * @cmd_data: Command-specific bytes * @color: Color bit (Always 0 for commands issued to the @@ -147,6 +149,7 @@ static inline u8 color_match(u8 color, u8 done_color) /** * struct ionic_nop_cmd - NOP command * @opcode: opcode + * @rsvd: reserved byte(s) */ struct ionic_nop_cmd { u8 opcode; @@ -156,6 +159,7 @@ struct ionic_nop_cmd { /** * struct ionic_nop_comp - NOP command completion * @status: Status of the command (enum ionic_status_code) + * @rsvd: reserved byte(s) */ struct ionic_nop_comp { u8 status; @@ -166,6 +170,7 @@ struct ionic_nop_comp { * struct ionic_dev_init_cmd - Device init command * @opcode: opcode * @type: Device type + * @rsvd: reserved byte(s) */ struct ionic_dev_init_cmd { u8 opcode; @@ -176,6 +181,7 @@ struct ionic_dev_init_cmd { /** * struct ionic_dev_init_comp - Device init command completion * @status: Status of the command (enum ionic_status_code) + * @rsvd: reserved byte(s) */ struct ionic_dev_init_comp { u8 status; @@ -185,6 +191,7 @@ struct ionic_dev_init_comp { /** * struct ionic_dev_reset_cmd - Device reset command * @opcode: opcode + * @rsvd: reserved byte(s) */ struct ionic_dev_reset_cmd { u8 opcode; @@ -194,6 +201,7 @@ struct ionic_dev_reset_cmd { /** * struct ionic_dev_reset_comp - Reset command completion * @status: Status of the command (enum ionic_status_code) + * @rsvd: reserved byte(s) */ struct ionic_dev_reset_comp { u8 status; @@ -207,6 +215,7 @@ struct ionic_dev_reset_comp { * struct ionic_dev_identify_cmd - Driver/device identify command * @opcode: opcode * @ver: Highest version of identify supported by driver + * @rsvd: reserved byte(s) */ struct ionic_dev_identify_cmd { u8 opcode; @@ -218,6 +227,7 @@ struct ionic_dev_identify_cmd { * struct ionic_dev_identify_comp - Driver/device identify command completion * @status: Status of the command (enum ionic_status_code) * @ver: Version of identify returned by device + * @rsvd: reserved byte(s) */ struct ionic_dev_identify_comp { u8 status; @@ -242,6 +252,7 @@ enum ionic_os_type { * @kernel_ver: Kernel version, numeric format * @kernel_ver_str: Kernel version, string format * @driver_ver_str: Driver version, string format + * @words: word access to struct contents */ union ionic_drv_identity { struct { @@ -267,7 +278,9 @@ enum ionic_dev_capability { * union ionic_dev_identity - device identity information * @version: Version of device identify * @type: Identify type (0 for now) + * @rsvd: reserved byte(s) * @nports: Number of ports provisioned + * @rsvd2: reserved byte(s) * @nlifs: Number of LIFs provisioned * @nintrs: Number of interrupts provisioned * @ndbpgs_per_lif: Number of doorbell pages per LIF @@ -284,6 +297,7 @@ enum ionic_dev_capability { * @hwstamp_mult: Hardware tick to nanosecond multiplier. * @hwstamp_shift: Hardware tick to nanosecond divisor (power of two). * @capabilities: Device capabilities + * @words: word access to struct contents */ union ionic_dev_identity { struct { @@ -317,6 +331,7 @@ enum ionic_lif_type { * @opcode: opcode * @type: LIF type (enum ionic_lif_type) * @ver: Version of identify returned by device + * @rsvd: reserved byte(s) */ struct ionic_lif_identify_cmd { u8 opcode; @@ -329,6 +344,7 @@ struct ionic_lif_identify_cmd { * struct ionic_lif_identify_comp - LIF identify command completion * @status: Status of the command (enum ionic_status_code) * @ver: Version of identify returned by device + * @rsvd2: reserved byte(s) */ struct ionic_lif_identify_comp { u8 status; @@ -416,7 +432,7 @@ enum ionic_txq_feature { }; /** - * struct ionic_hwstamp_bits - Hardware timestamp decoding bits + * enum ionic_hwstamp_bits - Hardware timestamp decoding bits * @IONIC_HWSTAMP_INVALID: Invalid hardware timestamp value * @IONIC_HWSTAMP_CQ_NEGOFFSET: Timestamp field negative offset * from the base cq descriptor. @@ -429,6 +445,7 @@ enum ionic_hwstamp_bits { /** * struct ionic_lif_logical_qtype - Descriptor of logical to HW queue type * @qtype: Hardware Queue Type + * @rsvd: reserved byte(s) * @qid_count: Number of Queue IDs of the logical type * @qid_base: Minimum Queue ID of the logical type */ @@ -454,12 +471,14 @@ enum ionic_lif_state { /** * union ionic_lif_config - LIF configuration * @state: LIF state (enum ionic_lif_state) + * @rsvd: reserved byte(s) * @name: LIF name * @mtu: MTU * @mac: Station MAC address * @vlan: Default Vlan ID * @features: Features (enum ionic_eth_hw_features) * @queue_count: Queue counts per queue-type + * @words: word access to struct contents */ union ionic_lif_config { struct { @@ -481,33 +500,39 @@ union ionic_lif_config { * @capabilities: LIF capabilities * * @eth: Ethernet identify structure - * @version: Ethernet identify structure version - * @max_ucast_filters: Number of perfect unicast addresses supported - * @max_mcast_filters: Number of perfect multicast addresses supported - * @min_frame_size: Minimum size of frames to be sent - * @max_frame_size: Maximum size of frames to be sent - * @hwstamp_tx_modes: Bitmask of BIT_ULL(enum ionic_txstamp_mode) - * @hwstamp_rx_filters: Bitmask of enum ionic_pkt_class - * @config: LIF config struct with features, mtu, mac, q counts + * @eth.version: Ethernet identify structure version + * @eth.rsvd: reserved byte(s) + * @eth.max_ucast_filters: Number of perfect unicast addresses supported + * @eth.max_mcast_filters: Number of perfect multicast addresses supported + * @eth.min_frame_size: Minimum size of frames to be sent + * @eth.max_frame_size: Maximum size of frames to be sent + * @eth.rsvd2: reserved byte(s) + * @eth.hwstamp_tx_modes: Bitmask of BIT_ULL(enum ionic_txstamp_mode) + * @eth.hwstamp_rx_filters: Bitmask of enum ionic_pkt_class + * @eth.rsvd3: reserved byte(s) + * @eth.config: LIF config struct with features, mtu, mac, q counts * * @rdma: RDMA identify structure - * @version: RDMA version of opcodes and queue descriptors - * @qp_opcodes: Number of RDMA queue pair opcodes supported - * @admin_opcodes: Number of RDMA admin opcodes supported - * @npts_per_lif: Page table size per LIF - * @nmrs_per_lif: Number of memory regions per LIF - * @nahs_per_lif: Number of address handles per LIF - * @max_stride: Max work request stride - * @cl_stride: Cache line stride - * @pte_stride: Page table entry stride - * @rrq_stride: Remote RQ work request stride - * @rsq_stride: Remote SQ work request stride - * @dcqcn_profiles: Number of DCQCN profiles - * @aq_qtype: RDMA Admin Qtype - * @sq_qtype: RDMA Send Qtype - * @rq_qtype: RDMA Receive Qtype - * @cq_qtype: RDMA Completion Qtype - * @eq_qtype: RDMA Event Qtype + * @rdma.version: RDMA version of opcodes and queue descriptors + * @rdma.qp_opcodes: Number of RDMA queue pair opcodes supported + * @rdma.admin_opcodes: Number of RDMA admin opcodes supported + * @rdma.rsvd: reserved byte(s) + * @rdma.npts_per_lif: Page table size per LIF + * @rdma.nmrs_per_lif: Number of memory regions per LIF + * @rdma.nahs_per_lif: Number of address handles per LIF + * @rdma.max_stride: Max work request stride + * @rdma.cl_stride: Cache line stride + * @rdma.pte_stride: Page table entry stride + * @rdma.rrq_stride: Remote RQ work request stride + * @rdma.rsq_stride: Remote SQ work request stride + * @rdma.dcqcn_profiles: Number of DCQCN profiles + * @rdma.rsvd_dimensions: reserved byte(s) + * @rdma.aq_qtype: RDMA Admin Qtype + * @rdma.sq_qtype: RDMA Send Qtype + * @rdma.rq_qtype: RDMA Receive Qtype + * @rdma.cq_qtype: RDMA Completion Qtype + * @rdma.eq_qtype: RDMA Event Qtype + * @words: word access to struct contents */ union ionic_lif_identity { struct { @@ -558,7 +583,9 @@ union ionic_lif_identity { * @opcode: Opcode * @type: LIF type (enum ionic_lif_type) * @index: LIF index + * @rsvd: reserved byte(s) * @info_pa: Destination address for LIF info (struct ionic_lif_info) + * @rsvd2: reserved byte(s) */ struct ionic_lif_init_cmd { u8 opcode; @@ -572,7 +599,9 @@ struct ionic_lif_init_cmd { /** * struct ionic_lif_init_comp - LIF init command completion * @status: Status of the command (enum ionic_status_code) + * @rsvd: reserved byte(s) * @hw_index: Hardware index of the initialized LIF + * @rsvd2: reserved byte(s) */ struct ionic_lif_init_comp { u8 status; @@ -584,9 +613,11 @@ struct ionic_lif_init_comp { /** * struct ionic_q_identify_cmd - queue identify command * @opcode: opcode + * @rsvd: reserved byte(s) * @lif_type: LIF type (enum ionic_lif_type) * @type: Logical queue type (enum ionic_logical_qtype) * @ver: Highest queue type version that the driver supports + * @rsvd2: reserved byte(s) */ struct ionic_q_identify_cmd { u8 opcode; @@ -600,8 +631,10 @@ struct ionic_q_identify_cmd { /** * struct ionic_q_identify_comp - queue identify command completion * @status: Status of the command (enum ionic_status_code) + * @rsvd: reserved byte(s) * @comp_index: Index in the descriptor ring for which this is the completion * @ver: Queue type version that can be used with FW + * @rsvd2: reserved byte(s) */ struct ionic_q_identify_comp { u8 status; @@ -615,12 +648,14 @@ struct ionic_q_identify_comp { * union ionic_q_identity - queue identity information * @version: Queue type version that can be used with FW * @supported: Bitfield of queue versions, first bit = ver 0 + * @rsvd: reserved byte(s) * @features: Queue features (enum ionic_q_feature, etc) * @desc_sz: Descriptor size * @comp_sz: Completion descriptor size * @sg_desc_sz: Scatter/Gather descriptor size * @max_sg_elems: Maximum number of Scatter/Gather elements * @sg_desc_stride: Number of Scatter/Gather elements per descriptor + * @words: word access to struct contents */ union ionic_q_identity { struct { @@ -640,8 +675,10 @@ union ionic_q_identity { /** * struct ionic_q_init_cmd - Queue init command * @opcode: opcode + * @rsvd: reserved byte(s) * @type: Logical queue type * @ver: Queue type version + * @rsvd1: reserved byte(s) * @lif_index: LIF index * @index: (LIF, qtype) relative admin queue index * @intr_index: Interrupt control register index, or Event queue index @@ -667,6 +704,7 @@ union ionic_q_identity { * @ring_base: Queue ring base address * @cq_ring_base: Completion queue ring base address * @sg_ring_base: Scatter/Gather ring base address + * @rsvd2: reserved byte(s) * @features: Mask of queue features to enable, if not in the flags above. */ struct ionic_q_init_cmd { @@ -698,9 +736,11 @@ struct ionic_q_init_cmd { /** * struct ionic_q_init_comp - Queue init command completion * @status: Status of the command (enum ionic_status_code) + * @rsvd: reserved byte(s) * @comp_index: Index in the descriptor ring for which this is the completion * @hw_index: Hardware Queue ID * @hw_type: Hardware Queue type + * @rsvd2: reserved byte(s) * @color: Color */ struct ionic_q_init_comp { @@ -800,7 +840,7 @@ enum ionic_txq_desc_opcode { * will set CWR flag in the first segment if * CWR is set in the template header, and * clear CWR in remaining segments. - * @flags: + * flags: * vlan: * Insert an L2 VLAN header using @vlan_tci * encap: @@ -813,13 +853,14 @@ enum ionic_txq_desc_opcode { * TSO start * tso_eot: * TSO end - * @num_sg_elems: Number of scatter-gather elements in SG + * num_sg_elems: Number of scatter-gather elements in SG * descriptor - * @addr: First data buffer's DMA address + * addr: First data buffer's DMA address * (Subsequent data buffers are on txq_sg_desc) * @len: First data buffer's length, in bytes * @vlan_tci: VLAN tag to insert in the packet (if requested * by @V-bit). Includes .1p and .1q tags + * @hword0: half word padding * @hdr_len: Length of packet headers, including * encapsulating outer header, if applicable * Valid for opcodes IONIC_TXQ_DESC_OPCODE_CALC_CSUM and @@ -830,10 +871,12 @@ enum ionic_txq_desc_opcode { * IONIC_TXQ_DESC_OPCODE_TSO, @hdr_len is up to * inner-most L4 payload, so inclusive of * inner-most L4 header. + * @hword1: half word padding * @mss: Desired MSS value for TSO; only applicable for * IONIC_TXQ_DESC_OPCODE_TSO * @csum_start: Offset from packet to first byte checked in L4 checksum * @csum_offset: Offset from csum_start to L4 checksum field + * @hword2: half word padding */ struct ionic_txq_desc { __le64 cmd; @@ -901,6 +944,7 @@ static inline void decode_txq_desc_cmd(u64 cmd, u8 *opcode, u8 *flags, * struct ionic_txq_sg_elem - Transmit scatter-gather (SG) descriptor element * @addr: DMA address of SG element data buffer * @len: Length of SG element data buffer, in bytes + * @rsvd: reserved byte(s) */ struct ionic_txq_sg_elem { __le64 addr; @@ -927,7 +971,9 @@ struct ionic_txq_sg_desc_v1 { /** * struct ionic_txq_comp - Ethernet transmit queue completion descriptor * @status: Status of the command (enum ionic_status_code) + * @rsvd: reserved byte(s) * @comp_index: Index in the descriptor ring for which this is the completion + * @rsvd2: reserved byte(s) * @color: Color bit */ struct ionic_txq_comp { @@ -953,6 +999,7 @@ enum ionic_rxq_desc_opcode { * receive, including actual bytes received, * are recorded in Rx completion descriptor. * + * @rsvd: reserved byte(s) * @len: Data buffer's length, in bytes * @addr: Data buffer's DMA address */ @@ -967,6 +1014,7 @@ struct ionic_rxq_desc { * struct ionic_rxq_sg_elem - Receive scatter-gather (SG) descriptor element * @addr: DMA address of SG element data buffer * @len: Length of SG element data buffer, in bytes + * @rsvd: reserved byte(s) */ struct ionic_rxq_sg_elem { __le64 addr; @@ -1170,6 +1218,7 @@ enum ionic_pkt_class { * @lif_index: LIF index * @index: Queue index * @oper: Operation (enum ionic_q_control_oper) + * @rsvd: reserved byte(s) */ struct ionic_q_control_cmd { u8 opcode; @@ -1182,7 +1231,7 @@ struct ionic_q_control_cmd { typedef struct ionic_admin_comp ionic_q_control_comp; -enum q_control_oper { +enum ionic_q_control_oper { IONIC_Q_DISABLE = 0, IONIC_Q_ENABLE = 1, IONIC_Q_HANG_RESET = 2, @@ -1216,7 +1265,7 @@ enum ionic_xcvr_state { IONIC_XCVR_STATE_SPROM_READ_ERR = 4, }; -/** +/* * enum ionic_xcvr_pid - Supported link modes */ enum ionic_xcvr_pid { @@ -1351,6 +1400,7 @@ struct ionic_xcvr_status { * @fec_type: fec type (enum ionic_port_fec_type) * @pause_type: pause type (enum ionic_port_pause_type) * @loopback_mode: loopback mode (enum ionic_port_loopback_mode) + * @words: word access to struct contents */ union ionic_port_config { struct { @@ -1382,6 +1432,7 @@ union ionic_port_config { * @speed: link speed (in Mbps) * @link_down_count: number of times link went from up to down * @fec_type: fec type (enum ionic_port_fec_type) + * @rsvd: reserved byte(s) * @xcvr: transceiver status */ struct ionic_port_status { @@ -1399,6 +1450,7 @@ struct ionic_port_status { * @opcode: opcode * @index: port index * @ver: Highest version of identify supported by driver + * @rsvd: reserved byte(s) */ struct ionic_port_identify_cmd { u8 opcode; @@ -1411,6 +1463,7 @@ struct ionic_port_identify_cmd { * struct ionic_port_identify_comp - Port identify command completion * @status: Status of the command (enum ionic_status_code) * @ver: Version of identify returned by device + * @rsvd: reserved byte(s) */ struct ionic_port_identify_comp { u8 status; @@ -1422,7 +1475,9 @@ struct ionic_port_identify_comp { * struct ionic_port_init_cmd - Port initialization command * @opcode: opcode * @index: port index + * @rsvd: reserved byte(s) * @info_pa: destination address for port info (struct ionic_port_info) + * @rsvd2: reserved byte(s) */ struct ionic_port_init_cmd { u8 opcode; @@ -1435,6 +1490,7 @@ struct ionic_port_init_cmd { /** * struct ionic_port_init_comp - Port initialization command completion * @status: Status of the command (enum ionic_status_code) + * @rsvd: reserved byte(s) */ struct ionic_port_init_comp { u8 status; @@ -1445,6 +1501,7 @@ struct ionic_port_init_comp { * struct ionic_port_reset_cmd - Port reset command * @opcode: opcode * @index: port index + * @rsvd: reserved byte(s) */ struct ionic_port_reset_cmd { u8 opcode; @@ -1455,6 +1512,7 @@ struct ionic_port_reset_cmd { /** * struct ionic_port_reset_comp - Port reset command completion * @status: Status of the command (enum ionic_status_code) + * @rsvd: reserved byte(s) */ struct ionic_port_reset_comp { u8 status; @@ -1510,6 +1568,7 @@ enum ionic_port_attr { * @opcode: Opcode * @index: Port index * @attr: Attribute type (enum ionic_port_attr) + * @rsvd: reserved byte(s) * @state: Port state * @speed: Port speed * @mtu: Port MTU @@ -1518,6 +1577,7 @@ enum ionic_port_attr { * @pause_type: Port pause type setting * @loopback_mode: Port loopback mode * @stats_ctl: Port stats setting + * @rsvd2: reserved byte(s) */ struct ionic_port_setattr_cmd { u8 opcode; @@ -1540,6 +1600,7 @@ struct ionic_port_setattr_cmd { /** * struct ionic_port_setattr_comp - Port set attr command completion * @status: Status of the command (enum ionic_status_code) + * @rsvd: reserved byte(s) * @color: Color bit */ struct ionic_port_setattr_comp { @@ -1553,6 +1614,7 @@ struct ionic_port_setattr_comp { * @opcode: Opcode * @index: port index * @attr: Attribute type (enum ionic_port_attr) + * @rsvd: reserved byte(s) */ struct ionic_port_getattr_cmd { u8 opcode; @@ -1564,6 +1626,7 @@ struct ionic_port_getattr_cmd { /** * struct ionic_port_getattr_comp - Port get attr command completion * @status: Status of the command (enum ionic_status_code) + * @rsvd: reserved byte(s) * @state: Port state * @speed: Port speed * @mtu: Port MTU @@ -1571,6 +1634,7 @@ struct ionic_port_getattr_cmd { * @fec_type: Port FEC type setting * @pause_type: Port pause type setting * @loopback_mode: Port loopback mode + * @rsvd2: reserved byte(s) * @color: Color bit */ struct ionic_port_getattr_comp { @@ -1593,9 +1657,11 @@ struct ionic_port_getattr_comp { * struct ionic_lif_status - LIF status register * @eid: most recent NotifyQ event id * @port_num: port the LIF is connected to + * @rsvd: reserved byte(s) * @link_status: port status (enum ionic_port_oper_status) * @link_speed: speed of link in Mbps * @link_down_count: number of times link went from up to down + * @rsvd2: reserved byte(s) */ struct ionic_lif_status { __le64 eid; @@ -1610,7 +1676,9 @@ struct ionic_lif_status { /** * struct ionic_lif_reset_cmd - LIF reset command * @opcode: opcode + * @rsvd: reserved byte(s) * @index: LIF index + * @rsvd2: reserved byte(s) */ struct ionic_lif_reset_cmd { u8 opcode; @@ -1643,9 +1711,11 @@ enum ionic_dev_attr { * struct ionic_dev_setattr_cmd - Set Device attributes on the NIC * @opcode: Opcode * @attr: Attribute type (enum ionic_dev_attr) + * @rsvd: reserved byte(s) * @state: Device state (enum ionic_dev_state) * @name: The bus info, e.g. PCI slot-device-function, 0 terminated * @features: Device features + * @rsvd2: reserved byte(s) */ struct ionic_dev_setattr_cmd { u8 opcode; @@ -1662,7 +1732,9 @@ struct ionic_dev_setattr_cmd { /** * struct ionic_dev_setattr_comp - Device set attr command completion * @status: Status of the command (enum ionic_status_code) + * @rsvd: reserved byte(s) * @features: Device features + * @rsvd2: reserved byte(s) * @color: Color bit */ struct ionic_dev_setattr_comp { @@ -1679,6 +1751,7 @@ struct ionic_dev_setattr_comp { * struct ionic_dev_getattr_cmd - Get Device attributes from the NIC * @opcode: opcode * @attr: Attribute type (enum ionic_dev_attr) + * @rsvd: reserved byte(s) */ struct ionic_dev_getattr_cmd { u8 opcode; @@ -1687,9 +1760,11 @@ struct ionic_dev_getattr_cmd { }; /** - * struct ionic_dev_setattr_comp - Device set attr command completion + * struct ionic_dev_getattr_comp - Device set attr command completion * @status: Status of the command (enum ionic_status_code) + * @rsvd: reserved byte(s) * @features: Device features + * @rsvd2: reserved byte(s) * @color: Color bit */ struct ionic_dev_getattr_comp { @@ -1702,7 +1777,7 @@ struct ionic_dev_getattr_comp { u8 color; }; -/** +/* * RSS parameters */ #define IONIC_RSS_HASH_KEY_SIZE 40 @@ -1726,6 +1801,7 @@ enum ionic_rss_hash_types { * @IONIC_LIF_ATTR_RSS: LIF RSS attribute * @IONIC_LIF_ATTR_STATS_CTRL: LIF statistics control attribute * @IONIC_LIF_ATTR_TXSTAMP: LIF TX timestamping mode + * @IONIC_LIF_ATTR_MAX: maximum attribute value */ enum ionic_lif_attr { IONIC_LIF_ATTR_STATE = 0, @@ -1736,6 +1812,7 @@ enum ionic_lif_attr { IONIC_LIF_ATTR_RSS = 5, IONIC_LIF_ATTR_STATS_CTRL = 6, IONIC_LIF_ATTR_TXSTAMP = 7, + IONIC_LIF_ATTR_MAX = 255, }; /** @@ -1749,11 +1826,13 @@ enum ionic_lif_attr { * @mac: Station mac * @features: Features (enum ionic_eth_hw_features) * @rss: RSS properties - * @types: The hash types to enable (see rss_hash_types) - * @key: The hash secret key - * @addr: Address for the indirection table shared memory + * @rss.types: The hash types to enable (see rss_hash_types) + * @rss.key: The hash secret key + * @rss.rsvd: reserved byte(s) + * @rss.addr: Address for the indirection table shared memory * @stats_ctl: stats control commands (enum ionic_stats_ctl_cmd) - * @txstamp: TX Timestamping Mode (enum ionic_txstamp_mode) + * @txstamp_mode: TX Timestamping Mode (enum ionic_txstamp_mode) + * @rsvd: reserved byte(s) */ struct ionic_lif_setattr_cmd { u8 opcode; @@ -1772,7 +1851,7 @@ struct ionic_lif_setattr_cmd { __le64 addr; } rss; u8 stats_ctl; - __le16 txstamp_mode; + __le16 txstamp_mode; u8 rsvd[60]; } __packed; }; @@ -1780,8 +1859,10 @@ struct ionic_lif_setattr_cmd { /** * struct ionic_lif_setattr_comp - LIF set attr command completion * @status: Status of the command (enum ionic_status_code) + * @rsvd: reserved byte(s) * @comp_index: Index in the descriptor ring for which this is the completion * @features: features (enum ionic_eth_hw_features) + * @rsvd2: reserved byte(s) * @color: Color bit */ struct ionic_lif_setattr_comp { @@ -1800,6 +1881,7 @@ struct ionic_lif_setattr_comp { * @opcode: Opcode * @attr: Attribute type (enum ionic_lif_attr) * @index: LIF index + * @rsvd: reserved byte(s) */ struct ionic_lif_getattr_cmd { u8 opcode; @@ -1811,13 +1893,14 @@ struct ionic_lif_getattr_cmd { /** * struct ionic_lif_getattr_comp - LIF get attr command completion * @status: Status of the command (enum ionic_status_code) + * @rsvd: reserved byte(s) * @comp_index: Index in the descriptor ring for which this is the completion * @state: LIF state (enum ionic_lif_state) - * @name: The netdev name string, 0 terminated * @mtu: Mtu * @mac: Station mac * @features: Features (enum ionic_eth_hw_features) - * @txstamp: TX Timestamping Mode (enum ionic_txstamp_mode) + * @txstamp_mode: TX Timestamping Mode (enum ionic_txstamp_mode) + * @rsvd2: reserved byte(s) * @color: Color bit */ struct ionic_lif_getattr_comp { @@ -1838,12 +1921,15 @@ struct ionic_lif_getattr_comp { /** * struct ionic_lif_setphc_cmd - Set LIF PTP Hardware Clock * @opcode: Opcode + * @rsvd1: reserved byte(s) * @lif_index: LIF index + * @rsvd2: reserved byte(s) * @tick: Hardware stamp tick of an instant in time. * @nsec: Nanosecond stamp of the same instant. * @frac: Fractional nanoseconds at the same instant. * @mult: Cycle to nanosecond multiplier. * @shift: Cycle to nanosecond divisor (power of two). + * @rsvd3: reserved byte(s) */ struct ionic_lif_setphc_cmd { u8 opcode; @@ -1870,6 +1956,7 @@ enum ionic_rx_mode { /** * struct ionic_rx_mode_set_cmd - Set LIF's Rx mode command * @opcode: opcode + * @rsvd: reserved byte(s) * @lif_index: LIF index * @rx_mode: Rx mode flags: * IONIC_RX_MODE_F_UNICAST: Accept known unicast packets @@ -1878,6 +1965,7 @@ enum ionic_rx_mode { * IONIC_RX_MODE_F_PROMISC: Accept any packets * IONIC_RX_MODE_F_ALLMULTI: Accept any multicast packets * IONIC_RX_MODE_F_RDMA_SNIFFER: Sniff RDMA packets + * @rsvd2: reserved byte(s) */ struct ionic_rx_mode_set_cmd { u8 opcode; @@ -1904,13 +1992,14 @@ enum ionic_rx_filter_match_type { * @qid: Queue ID * @match: Rx filter match type (see IONIC_RX_FILTER_MATCH_xxx) * @vlan: VLAN filter - * @vlan: VLAN ID + * @vlan.vlan: VLAN ID * @mac: MAC filter - * @addr: MAC address (network-byte order) + * @mac.addr: MAC address (network-byte order) * @mac_vlan: MACVLAN filter - * @vlan: VLAN ID - * @addr: MAC address (network-byte order) + * @mac_vlan.vlan: VLAN ID + * @mac_vlan.addr: MAC address (network-byte order) * @pkt_class: Packet classification filter + * @rsvd: reserved byte(s) */ struct ionic_rx_filter_add_cmd { u8 opcode; @@ -1937,8 +2026,10 @@ struct ionic_rx_filter_add_cmd { /** * struct ionic_rx_filter_add_comp - Add LIF Rx filter command completion * @status: Status of the command (enum ionic_status_code) + * @rsvd: reserved byte(s) * @comp_index: Index in the descriptor ring for which this is the completion * @filter_id: Filter ID + * @rsvd2: reserved byte(s) * @color: Color bit */ struct ionic_rx_filter_add_comp { @@ -1953,8 +2044,10 @@ struct ionic_rx_filter_add_comp { /** * struct ionic_rx_filter_del_cmd - Delete LIF Rx filter command * @opcode: opcode + * @rsvd: reserved byte(s) * @lif_index: LIF index * @filter_id: Filter ID + * @rsvd2: reserved byte(s) */ struct ionic_rx_filter_del_cmd { u8 opcode; @@ -2000,6 +2093,7 @@ enum ionic_vf_link_status { * @trust: enable VF trust * @linkstate: set link up or down * @stats_pa: set DMA address for VF stats + * @pad: reserved byte(s) */ struct ionic_vf_setattr_cmd { u8 opcode; @@ -2031,6 +2125,7 @@ struct ionic_vf_setattr_comp { * @opcode: Opcode * @attr: Attribute type (enum ionic_vf_attr) * @vf_index: VF index + * @rsvd: reserved byte(s) */ struct ionic_vf_getattr_cmd { u8 opcode; @@ -2064,8 +2159,8 @@ enum ionic_vf_ctrl_opcode { /** * struct ionic_vf_ctrl_cmd - VF control command * @opcode: Opcode for the command - * @vf_index: VF Index. It is unused if op START_ALL is used. * @ctrl_opcode: VF control operation type + * @vf_index: VF Index. It is unused if op START_ALL is used. */ struct ionic_vf_ctrl_cmd { u8 opcode; @@ -2089,7 +2184,7 @@ struct ionic_vf_ctrl_comp { * struct ionic_qos_identify_cmd - QoS identify command * @opcode: opcode * @ver: Highest version of identify supported by driver - * + * @rsvd: reserved byte(s) */ struct ionic_qos_identify_cmd { u8 opcode; @@ -2101,6 +2196,7 @@ struct ionic_qos_identify_cmd { * struct ionic_qos_identify_comp - QoS identify command completion * @status: Status of the command (enum ionic_status_code) * @ver: Version of identify returned by device + * @rsvd: reserved byte(s) */ struct ionic_qos_identify_comp { u8 status; @@ -2118,7 +2214,7 @@ struct ionic_qos_identify_comp { #define IONIC_QOS_ALL_PCP 0xFF #define IONIC_DSCP_BLOCK_SIZE 8 -/** +/* * enum ionic_qos_class */ enum ionic_qos_class { @@ -2174,6 +2270,7 @@ enum ionic_qos_sched_type { * @dot1q_pcp: Dot1q pcp value * @ndscp: Number of valid dscp values in the ip_dscp field * @ip_dscp: IP dscp values + * @words: word access to struct contents */ union ionic_qos_config { struct { @@ -2219,8 +2316,9 @@ union ionic_qos_config { * union ionic_qos_identity - QoS identity structure * @version: Version of the identify structure * @type: QoS system type - * @nclasses: Number of usable QoS classes + * @rsvd: reserved byte(s) * @config: Current configuration of classes + * @words: word access to struct contents */ union ionic_qos_identity { struct { @@ -2236,7 +2334,9 @@ union ionic_qos_identity { * struct ionic_qos_init_cmd - QoS config init command * @opcode: Opcode * @group: QoS class id + * @rsvd: reserved byte(s) * @info_pa: destination address for qos info + * @rsvd1: reserved byte(s) */ struct ionic_qos_init_cmd { u8 opcode; @@ -2252,6 +2352,7 @@ typedef struct ionic_admin_comp ionic_qos_init_comp; * struct ionic_qos_reset_cmd - QoS config reset command * @opcode: Opcode * @group: QoS class id + * @rsvd: reserved byte(s) */ struct ionic_qos_reset_cmd { u8 opcode; @@ -2260,8 +2361,10 @@ struct ionic_qos_reset_cmd { }; /** - * struct ionic_qos_clear_port_stats_cmd - Qos config reset command + * struct ionic_qos_clear_stats_cmd - Qos config reset command * @opcode: Opcode + * @group_bitmap: bitmap of groups to be cleared + * @rsvd: reserved byte(s) */ struct ionic_qos_clear_stats_cmd { u8 opcode; @@ -2274,6 +2377,7 @@ typedef struct ionic_admin_comp ionic_qos_reset_comp; /** * struct ionic_fw_download_cmd - Firmware download command * @opcode: opcode + * @rsvd: reserved byte(s) * @addr: dma address of the firmware buffer * @offset: offset of the firmware buffer within the full image * @length: number of valid bytes in the firmware buffer @@ -2297,6 +2401,7 @@ typedef struct ionic_admin_comp ionic_fw_download_comp; * @IONIC_FW_INSTALL_STATUS: Firmware installation status * @IONIC_FW_ACTIVATE_ASYNC: Activate firmware asynchronously * @IONIC_FW_ACTIVATE_STATUS: Firmware activate status + * @IONIC_FW_UPDATE_CLEANUP: Clean up after an interrupted fw update */ enum ionic_fw_control_oper { IONIC_FW_RESET = 0, @@ -2312,8 +2417,10 @@ enum ionic_fw_control_oper { /** * struct ionic_fw_control_cmd - Firmware control command * @opcode: opcode + * @rsvd: reserved byte(s) * @oper: firmware control operation (enum ionic_fw_control_oper) * @slot: slot to activate + * @rsvd1: reserved byte(s) */ struct ionic_fw_control_cmd { u8 opcode; @@ -2326,8 +2433,10 @@ struct ionic_fw_control_cmd { /** * struct ionic_fw_control_comp - Firmware control copletion * @status: Status of the command (enum ionic_status_code) + * @rsvd: reserved byte(s) * @comp_index: Index in the descriptor ring for which this is the completion * @slot: Slot where the firmware was installed + * @rsvd1: reserved byte(s) * @color: Color bit */ struct ionic_fw_control_comp { @@ -2346,7 +2455,9 @@ struct ionic_fw_control_comp { /** * struct ionic_rdma_reset_cmd - Reset RDMA LIF cmd * @opcode: opcode + * @rsvd: reserved byte(s) * @lif_index: LIF index + * @rsvd2: reserved byte(s) * * There is no RDMA specific dev command completion struct. Completion uses * the common struct ionic_admin_comp. Only the status is indicated. @@ -2362,6 +2473,7 @@ struct ionic_rdma_reset_cmd { /** * struct ionic_rdma_queue_cmd - Create RDMA Queue command * @opcode: opcode, 52, 53 + * @rsvd: reserved byte(s) * @lif_index: LIF index * @qid_ver: (qid | (RDMA version << 24)) * @cid: intr, eq_id, or cq_id @@ -2369,6 +2481,7 @@ struct ionic_rdma_reset_cmd { * @depth_log2: log base two of queue depth * @stride_log2: log base two of queue stride * @dma_addr: address of the queue memory + * @rsvd2: reserved byte(s) * * The same command struct is used to create an RDMA event queue, completion * queue, or RDMA admin queue. The cid is an interrupt number for an event @@ -2425,6 +2538,7 @@ struct ionic_notifyq_event { * @ecode: event code = IONIC_EVENT_LINK_CHANGE * @link_status: link up/down, with error bits (enum ionic_port_status) * @link_speed: speed of the network link + * @rsvd: reserved byte(s) * * Sent when the network link state changes between UP and DOWN */ @@ -2442,6 +2556,7 @@ struct ionic_link_change_event { * @ecode: event code = IONIC_EVENT_RESET * @reset_code: reset type * @state: 0=pending, 1=complete, 2=error + * @rsvd: reserved byte(s) * * Sent when the NIC or some subsystem is going to be or * has been reset. @@ -2458,6 +2573,7 @@ struct ionic_reset_event { * struct ionic_heartbeat_event - Sent periodically by NIC to indicate health * @eid: event number * @ecode: event code = IONIC_EVENT_HEARTBEAT + * @rsvd: reserved byte(s) */ struct ionic_heartbeat_event { __le64 eid; @@ -2481,6 +2597,7 @@ struct ionic_log_event { * struct ionic_xcvr_event - Transceiver change event * @eid: event number * @ecode: event code = IONIC_EVENT_XCVR + * @rsvd: reserved byte(s) */ struct ionic_xcvr_event { __le64 eid; @@ -2488,7 +2605,7 @@ struct ionic_xcvr_event { u8 rsvd[54]; }; -/** +/* * struct ionic_port_stats - Port statistics structure */ struct ionic_port_stats { @@ -2646,8 +2763,7 @@ enum ionic_oflow_drop_stats { IONIC_OFLOW_DROP_MAX, }; -/** - * struct port_pb_stats - packet buffers system stats +/* struct ionic_port_pb_stats - packet buffers system stats * uses ionic_pb_buffer_drop_stats for drop_counts[] */ struct ionic_port_pb_stats { @@ -2681,7 +2797,9 @@ struct ionic_port_pb_stats { * @pause_type: supported pause types * @loopback_mode: supported loopback mode * @speeds: supported speeds + * @rsvd2: reserved byte(s) * @config: current port configuration + * @words: word access to struct contents */ union ionic_port_identity { struct { @@ -2707,7 +2825,8 @@ union ionic_port_identity { * @status: Port status data * @stats: Port statistics data * @mgmt_stats: Port management statistics data - * @port_pb_drop_stats: uplink pb drop stats + * @rsvd: reserved byte(s) + * @pb_stats: uplink pb drop stats */ struct ionic_port_info { union ionic_port_config config; @@ -2721,7 +2840,7 @@ struct ionic_port_info { struct ionic_port_pb_stats pb_stats; }; -/** +/* * struct ionic_lif_stats - LIF statistics structure */ struct ionic_lif_stats { @@ -2983,8 +3102,10 @@ struct ionic_hwstamp_regs { * bit 4-7 - 4 bit generation number, changes on fw restart * @fw_heartbeat: Firmware heartbeat counter * @serial_num: Serial number + * @rsvd_pad1024: reserved byte(s) * @fw_version: Firmware version - * @hwstamp_regs: Hardware current timestamp registers + * @hwstamp: Hardware current timestamp registers + * @words: word access to struct contents */ union ionic_dev_info_regs { #define IONIC_DEVINFO_FWVERS_BUFLEN 32 @@ -3014,7 +3135,9 @@ union ionic_dev_info_regs { * @done: Done indicator, bit 0 == 1 when command is complete * @cmd: Opcode-specific command bytes * @comp: Opcode-specific response bytes + * @rsvd: reserved byte(s) * @data: Opcode-specific side-data + * @words: word access to struct contents */ union ionic_dev_cmd_regs { struct { @@ -3032,6 +3155,7 @@ union ionic_dev_cmd_regs { * union ionic_dev_regs - Device register format for bar 0 page 0 * @info: Device info registers * @devcmd: Device command registers + * @words: word access to struct contents */ union ionic_dev_regs { struct { @@ -3098,6 +3222,7 @@ union ionic_adminq_comp { * interrupts when armed. * @qid_lo: Queue destination for the producer index and flags (low bits) * @qid_hi: Queue destination for the producer index and flags (high bits) + * @rsvd2: reserved byte(s) */ struct ionic_doorbell { __le16 p_index; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 1837a30ba08a..aa0cc31dfe6e 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -126,13 +126,13 @@ static void ionic_lif_deferred_work(struct work_struct *work) } while (true); } -void ionic_lif_deferred_enqueue(struct ionic_deferred *def, +void ionic_lif_deferred_enqueue(struct ionic_lif *lif, struct ionic_deferred_work *work) { - spin_lock_bh(&def->lock); - list_add_tail(&work->list, &def->list); - spin_unlock_bh(&def->lock); - schedule_work(&def->work); + spin_lock_bh(&lif->deferred.lock); + list_add_tail(&work->list, &lif->deferred.list); + spin_unlock_bh(&lif->deferred.lock); + queue_work(lif->ionic->wq, &lif->deferred.work); } static void ionic_link_status_check(struct ionic_lif *lif) @@ -207,19 +207,12 @@ void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep) } work->type = IONIC_DW_TYPE_LINK_STATUS; - ionic_lif_deferred_enqueue(&lif->deferred, work); + ionic_lif_deferred_enqueue(lif, work); } else { ionic_link_status_check(lif); } } -static void ionic_napi_deadline(struct timer_list *timer) -{ - struct ionic_qcq *qcq = container_of(timer, struct ionic_qcq, napi_deadline); - - napi_schedule(&qcq->napi); -} - static irqreturn_t ionic_isr(int irq, void *data) { struct napi_struct *napi = data; @@ -237,12 +230,12 @@ static int ionic_request_irq(struct ionic_lif *lif, struct ionic_qcq *qcq) const char *name; if (lif->registered) - name = lif->netdev->name; + name = netdev_name(lif->netdev); else name = dev_name(dev); snprintf(intr->name, sizeof(intr->name), - "%s-%s-%s", IONIC_DRV_NAME, name, q->name); + "%.5s-%.16s-%.8s", IONIC_DRV_NAME, name, q->name); return devm_request_irq(dev, intr->vector, ionic_isr, 0, intr->name, &qcq->napi); @@ -272,6 +265,18 @@ static void ionic_intr_free(struct ionic *ionic, int index) clear_bit(index, ionic->intrs); } +static void ionic_irq_aff_notify(struct irq_affinity_notify *notify, + const cpumask_t *mask) +{ + struct ionic_intr_info *intr = container_of(notify, struct ionic_intr_info, aff_notify); + + cpumask_copy(*intr->affinity_mask, mask); +} + +static void ionic_irq_aff_release(struct kref __always_unused *ref) +{ +} + static int ionic_qcq_enable(struct ionic_qcq *qcq) { struct ionic_queue *q = &qcq->q; @@ -306,8 +311,10 @@ static int ionic_qcq_enable(struct ionic_qcq *qcq) if (qcq->flags & IONIC_QCQ_F_INTR) { napi_enable(&qcq->napi); + irq_set_affinity_notifier(qcq->intr.vector, + &qcq->intr.aff_notify); irq_set_affinity_hint(qcq->intr.vector, - &qcq->intr.affinity_mask); + *qcq->intr.affinity_mask); ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, IONIC_INTR_MASK_CLEAR); } @@ -337,13 +344,15 @@ static int ionic_qcq_disable(struct ionic_lif *lif, struct ionic_qcq *qcq, int f if (qcq->flags & IONIC_QCQ_F_INTR) { struct ionic_dev *idev = &lif->ionic->idev; + if (lif->doorbell_wa) + cancel_work_sync(&qcq->doorbell_napi_work); cancel_work_sync(&qcq->dim.work); ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, IONIC_INTR_MASK_SET); synchronize_irq(qcq->intr.vector); + irq_set_affinity_notifier(qcq->intr.vector, NULL); irq_set_affinity_hint(qcq->intr.vector, NULL); napi_disable(&qcq->napi); - del_timer_sync(&qcq->napi_deadline); } /* If there was a previous fw communcation error, don't bother with @@ -478,11 +487,11 @@ static void ionic_link_qcq_interrupts(struct ionic_qcq *src_qcq, { n_qcq->intr.vector = src_qcq->intr.vector; n_qcq->intr.index = src_qcq->intr.index; - n_qcq->napi_qcq = src_qcq->napi_qcq; } static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qcq) { + cpumask_var_t *affinity_mask; int err; if (!(qcq->flags & IONIC_QCQ_F_INTR)) { @@ -514,10 +523,19 @@ static int ionic_alloc_qcq_interrupt(struct ionic_lif *lif, struct ionic_qcq *qc } /* try to get the irq on the local numa node first */ - qcq->intr.cpu = cpumask_local_spread(qcq->intr.index, - dev_to_node(lif->ionic->dev)); - if (qcq->intr.cpu != -1) - cpumask_set_cpu(qcq->intr.cpu, &qcq->intr.affinity_mask); + affinity_mask = &lif->ionic->affinity_masks[qcq->intr.index]; + if (cpumask_empty(*affinity_mask)) { + unsigned int cpu; + + cpu = cpumask_local_spread(qcq->intr.index, + dev_to_node(lif->ionic->dev)); + if (cpu != -1) + cpumask_set_cpu(cpu, *affinity_mask); + } + + qcq->intr.affinity_mask = affinity_mask; + qcq->intr.aff_notify.notify = ionic_irq_aff_notify; + qcq->intr.aff_notify.release = ionic_irq_aff_release; netdev_dbg(lif->netdev, "%s: Interrupt index %d\n", qcq->q.name, qcq->intr.index); return 0; @@ -674,6 +692,8 @@ static int ionic_qcq_alloc(struct ionic_lif *lif, unsigned int type, INIT_WORK(&new->dim.work, ionic_dim_work); new->dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_CQE; + if (lif->doorbell_wa) + INIT_WORK(&new->doorbell_napi_work, ionic_doorbell_napi_work); *qcq = new; @@ -832,11 +852,8 @@ static int ionic_lif_txq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) q->dbell_deadline = IONIC_TX_DOORBELL_DEADLINE; q->dbell_jiffies = jiffies; - if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) { + if (test_bit(IONIC_LIF_F_SPLIT_INTR, lif->state)) netif_napi_add(lif->netdev, &qcq->napi, ionic_tx_napi); - qcq->napi_qcq = qcq; - timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0); - } qcq->flags |= IONIC_QCQ_F_INITED; @@ -909,9 +926,6 @@ static int ionic_lif_rxq_init(struct ionic_lif *lif, struct ionic_qcq *qcq) else netif_napi_add(lif->netdev, &qcq->napi, ionic_txrx_napi); - qcq->napi_qcq = qcq; - timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0); - qcq->flags |= IONIC_QCQ_F_INITED; return 0; @@ -1166,7 +1180,6 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget) struct ionic_dev *idev = &lif->ionic->idev; unsigned long irqflags; unsigned int flags = 0; - bool resched = false; int rx_work = 0; int tx_work = 0; int n_work = 0; @@ -1182,6 +1195,7 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget) if (lif->adminqcq && lif->adminqcq->flags & IONIC_QCQ_F_INITED) a_work = ionic_cq_service(&lif->adminqcq->cq, budget, ionic_adminq_service, NULL, NULL); + spin_unlock_irqrestore(&lif->adminq_lock, irqflags); if (lif->hwstamp_rxq) @@ -1203,15 +1217,14 @@ static int ionic_adminq_napi(struct napi_struct *napi, int budget) ionic_intr_credits(idev->intr_ctrl, intr->index, credits, flags); } - if (!a_work && ionic_adminq_poke_doorbell(&lif->adminqcq->q)) - resched = true; - if (lif->hwstamp_rxq && !rx_work && ionic_rxq_poke_doorbell(&lif->hwstamp_rxq->q)) - resched = true; - if (lif->hwstamp_txq && !tx_work && ionic_txq_poke_doorbell(&lif->hwstamp_txq->q)) - resched = true; - if (resched) - mod_timer(&lif->adminqcq->napi_deadline, - jiffies + IONIC_NAPI_DEADLINE); + if (lif->doorbell_wa) { + if (!a_work) + ionic_adminq_poke_doorbell(&lif->adminqcq->q); + if (lif->hwstamp_rxq && !rx_work) + ionic_rxq_poke_doorbell(&lif->hwstamp_rxq->q); + if (lif->hwstamp_txq && !tx_work) + ionic_txq_poke_doorbell(&lif->hwstamp_txq->q); + } return work_done; } @@ -1383,7 +1396,7 @@ static void ionic_ndo_set_rx_mode(struct net_device *netdev) } work->type = IONIC_DW_TYPE_RX_MODE; netdev_dbg(lif->netdev, "deferred: rx_mode\n"); - ionic_lif_deferred_enqueue(&lif->deferred, work); + ionic_lif_deferred_enqueue(lif, work); } static __le64 ionic_netdev_features_to_nic(netdev_features_t features) @@ -3139,6 +3152,44 @@ err_out: return err; } +static int ionic_affinity_masks_alloc(struct ionic *ionic) +{ + cpumask_var_t *affinity_masks; + int nintrs = ionic->nintrs; + int i; + + affinity_masks = kcalloc(nintrs, sizeof(cpumask_var_t), GFP_KERNEL); + if (!affinity_masks) + return -ENOMEM; + + for (i = 0; i < nintrs; i++) { + if (!zalloc_cpumask_var_node(&affinity_masks[i], GFP_KERNEL, + dev_to_node(ionic->dev))) + goto err_out; + } + + ionic->affinity_masks = affinity_masks; + + return 0; + +err_out: + for (--i; i >= 0; i--) + free_cpumask_var(affinity_masks[i]); + kfree(affinity_masks); + + return -ENOMEM; +} + +static void ionic_affinity_masks_free(struct ionic *ionic) +{ + int i; + + for (i = 0; i < ionic->nintrs; i++) + free_cpumask_var(ionic->affinity_masks[i]); + kfree(ionic->affinity_masks); + ionic->affinity_masks = NULL; +} + int ionic_lif_alloc(struct ionic *ionic) { struct device *dev = ionic->dev; @@ -3230,11 +3281,15 @@ int ionic_lif_alloc(struct ionic *ionic) ionic_debugfs_add_lif(lif); + err = ionic_affinity_masks_alloc(ionic); + if (err) + goto err_out_free_lif_info; + /* allocate control queues and txrx queue arrays */ ionic_lif_queue_identify(lif); err = ionic_qcqs_alloc(lif); if (err) - goto err_out_free_lif_info; + goto err_out_free_affinity_masks; /* allocate rss indirection table */ tbl_sz = le16_to_cpu(lif->ionic->ident.lif.eth.rss_ind_tbl_sz); @@ -3256,6 +3311,8 @@ int ionic_lif_alloc(struct ionic *ionic) err_out_free_qcqs: ionic_qcqs_free(lif); +err_out_free_affinity_masks: + ionic_affinity_masks_free(lif->ionic); err_out_free_lif_info: dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); lif->info = NULL; @@ -3356,6 +3413,7 @@ int ionic_restart_lif(struct ionic_lif *lif) clear_bit(IONIC_LIF_F_FW_RESET, lif->state); ionic_link_status_check_request(lif, CAN_SLEEP); netif_device_attach(lif->netdev); + ionic_queue_doorbell_check(ionic, IONIC_NAPI_DEADLINE); return 0; @@ -3386,6 +3444,7 @@ static void ionic_lif_handle_fw_up(struct ionic_lif *lif) * just need to reanimate it. */ ionic_init_devinfo(ionic); + ionic_reset(ionic); err = ionic_identify(ionic); if (err) goto err_out; @@ -3428,6 +3487,8 @@ void ionic_lif_free(struct ionic_lif *lif) if (!test_bit(IONIC_LIF_F_FW_RESET, lif->state)) ionic_lif_reset(lif); + ionic_affinity_masks_free(lif->ionic); + /* free lif info */ kfree(lif->identity); dma_free_coherent(dev, lif->info_sz, lif->info, lif->info_pa); @@ -3501,14 +3562,11 @@ static int ionic_lif_adminq_init(struct ionic_lif *lif) netif_napi_add(lif->netdev, &qcq->napi, ionic_adminq_napi); - qcq->napi_qcq = qcq; - timer_setup(&qcq->napi_deadline, ionic_napi_deadline, 0); - napi_enable(&qcq->napi); if (qcq->flags & IONIC_QCQ_F_INTR) { irq_set_affinity_hint(qcq->intr.vector, - &qcq->intr.affinity_mask); + *qcq->intr.affinity_mask); ionic_intr_mask(idev->intr_ctrl, qcq->intr.index, IONIC_INTR_MASK_CLEAR); } @@ -3695,6 +3753,7 @@ int ionic_lif_init(struct ionic_lif *lif) goto err_out_notifyq_deinit; lif->rx_copybreak = IONIC_RX_COPYBREAK_DEFAULT; + lif->doorbell_wa = ionic_doorbell_wa(lif->ionic); set_bit(IONIC_LIF_F_INITED, lif->state); @@ -3729,7 +3788,7 @@ static void ionic_lif_set_netdev_info(struct ionic_lif *lif) }, }; - strscpy(ctx.cmd.lif_setattr.name, lif->netdev->name, + strscpy(ctx.cmd.lif_setattr.name, netdev_name(lif->netdev), sizeof(ctx.cmd.lif_setattr.name)); ionic_adminq_post_wait(lif, &ctx); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h index 08f4266fe2aa..3e1005293c4a 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h @@ -84,12 +84,11 @@ struct ionic_qcq { u32 cmb_pgid; u32 cmb_order; struct dim dim; - struct timer_list napi_deadline; struct ionic_queue q; struct ionic_cq cq; struct napi_struct napi; - struct ionic_qcq *napi_qcq; struct ionic_intr_info intr; + struct work_struct doorbell_napi_work; struct dentry *dentry; }; @@ -207,11 +206,12 @@ struct ionic_lif { unsigned int nxqs; unsigned int ntxq_descs; unsigned int nrxq_descs; - u32 rx_copybreak; u64 rxq_features; - u16 rx_mode; u64 hw_features; + u16 rx_copybreak; + u16 rx_mode; bool registered; + bool doorbell_wa; u16 lif_type; unsigned int link_down_count; unsigned int nmcast; @@ -226,11 +226,11 @@ struct ionic_lif { u32 info_sz; struct ionic_qtype_info qtype_info[IONIC_QTYPE_MAX]; - u16 rss_types; u8 rss_hash_key[IONIC_RSS_HASH_KEY_SIZE]; u8 *rss_ind_tbl; dma_addr_t rss_ind_tbl_pa; u32 rss_ind_tbl_sz; + u16 rss_types; struct ionic_rx_filters rx_filters; u32 rx_coalesce_usecs; /* what the user asked for */ @@ -333,7 +333,7 @@ static inline bool ionic_txq_hwstamp_enabled(struct ionic_queue *q) void ionic_link_status_check_request(struct ionic_lif *lif, bool can_sleep); void ionic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *ns); -void ionic_lif_deferred_enqueue(struct ionic_deferred *def, +void ionic_lif_deferred_enqueue(struct ionic_lif *lif, struct ionic_deferred_work *work); int ionic_lif_alloc(struct ionic *ionic); int ionic_lif_init(struct ionic_lif *lif); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c index c1259324b0be..0f817c3f92d8 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_main.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c @@ -287,7 +287,7 @@ bool ionic_notifyq_service(struct ionic_cq *cq) clear_bit(IONIC_LIF_F_FW_STOPPING, lif->state); } else { work->type = IONIC_DW_TYPE_LIF_RESET; - ionic_lif_deferred_enqueue(&lif->deferred, work); + ionic_lif_deferred_enqueue(lif, work); } } break; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index 9fdd7cd3ef19..fc79baad4561 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -518,7 +518,7 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats, XDP_PACKET_HEADROOM, frag_len, false); dma_sync_single_range_for_cpu(rxq->dev, ionic_rx_buf_pa(buf_info), - XDP_PACKET_HEADROOM, len, + XDP_PACKET_HEADROOM, frag_len, DMA_FROM_DEVICE); prefetchw(&xdp_buf.data_hard_start); @@ -596,7 +596,7 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats, buf_info->page_offset, true); __netif_tx_unlock(nq); - if (err) { + if (unlikely(err)) { netdev_dbg(netdev, "tx ionic_xdp_post_frame err %d\n", err); goto out_xdp_abort; } @@ -608,7 +608,7 @@ static bool ionic_run_xdp(struct ionic_rx_stats *stats, case XDP_REDIRECT: err = xdp_do_redirect(netdev, &xdp_buf, xdp_prog); - if (err) { + if (unlikely(err)) { netdev_dbg(netdev, "xdp_do_redirect err %d\n", err); goto out_xdp_abort; } @@ -878,9 +878,6 @@ void ionic_rx_fill(struct ionic_queue *q) q->dbell_deadline = IONIC_RX_MIN_DOORBELL_DEADLINE; q->dbell_jiffies = jiffies; - - mod_timer(&q_to_qcq(q)->napi_qcq->napi_deadline, - jiffies + IONIC_NAPI_DEADLINE); } void ionic_rx_empty(struct ionic_queue *q) @@ -963,8 +960,8 @@ int ionic_tx_napi(struct napi_struct *napi, int budget) work_done, flags); } - if (!work_done && ionic_txq_poke_doorbell(&qcq->q)) - mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE); + if (!work_done && cq->bound_q->lif->doorbell_wa) + ionic_txq_poke_doorbell(&qcq->q); return work_done; } @@ -1006,8 +1003,8 @@ int ionic_rx_napi(struct napi_struct *napi, int budget) work_done, flags); } - if (!work_done && ionic_rxq_poke_doorbell(&qcq->q)) - mod_timer(&qcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE); + if (!work_done && cq->bound_q->lif->doorbell_wa) + ionic_rxq_poke_doorbell(&qcq->q); return work_done; } @@ -1020,7 +1017,6 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget) struct ionic_qcq *txqcq; struct ionic_lif *lif; struct ionic_cq *txcq; - bool resched = false; u32 rx_work_done = 0; u32 tx_work_done = 0; u32 flags = 0; @@ -1052,12 +1048,12 @@ int ionic_txrx_napi(struct napi_struct *napi, int budget) tx_work_done + rx_work_done, flags); } - if (!rx_work_done && ionic_rxq_poke_doorbell(&rxqcq->q)) - resched = true; - if (!tx_work_done && ionic_txq_poke_doorbell(&txqcq->q)) - resched = true; - if (resched) - mod_timer(&rxqcq->napi_deadline, jiffies + IONIC_NAPI_DEADLINE); + if (lif->doorbell_wa) { + if (!rx_work_done) + ionic_rxq_poke_doorbell(&rxqcq->q); + if (!tx_work_done) + ionic_txq_poke_doorbell(&txqcq->q); + } return rx_work_done; } @@ -1069,7 +1065,7 @@ static dma_addr_t ionic_tx_map_single(struct ionic_queue *q, dma_addr_t dma_addr; dma_addr = dma_map_single(dev, data, len, DMA_TO_DEVICE); - if (dma_mapping_error(dev, dma_addr)) { + if (unlikely(dma_mapping_error(dev, dma_addr))) { net_warn_ratelimited("%s: DMA single map failed on %s!\n", dev_name(dev), q->name); q_to_tx_stats(q)->dma_map_err++; @@ -1086,7 +1082,7 @@ static dma_addr_t ionic_tx_map_frag(struct ionic_queue *q, dma_addr_t dma_addr; dma_addr = skb_frag_dma_map(dev, frag, offset, len, DMA_TO_DEVICE); - if (dma_mapping_error(dev, dma_addr)) { + if (unlikely(dma_mapping_error(dev, dma_addr))) { net_warn_ratelimited("%s: DMA frag map failed on %s!\n", dev_name(dev), q->name); q_to_tx_stats(q)->dma_map_err++; @@ -1332,7 +1328,7 @@ static int ionic_tx_tcp_inner_pseudo_csum(struct sk_buff *skb) int err; err = skb_cow_head(skb, 0); - if (err) + if (unlikely(err)) return err; if (skb->protocol == cpu_to_be16(ETH_P_IP)) { @@ -1356,7 +1352,7 @@ static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb) int err; err = skb_cow_head(skb, 0); - if (err) + if (unlikely(err)) return err; if (skb->protocol == cpu_to_be16(ETH_P_IP)) { @@ -1373,7 +1369,7 @@ static int ionic_tx_tcp_pseudo_csum(struct sk_buff *skb) } static void ionic_tx_tso_post(struct net_device *netdev, struct ionic_queue *q, - struct ionic_tx_desc_info *desc_info, + struct ionic_txq_desc *desc, struct sk_buff *skb, dma_addr_t addr, u8 nsge, u16 len, unsigned int hdrlen, unsigned int mss, @@ -1381,7 +1377,6 @@ static void ionic_tx_tso_post(struct net_device *netdev, struct ionic_queue *q, u16 vlan_tci, bool has_vlan, bool start, bool done) { - struct ionic_txq_desc *desc = &q->txq[q->head_idx]; u8 flags = 0; u64 cmd; @@ -1461,7 +1456,7 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q, err = ionic_tx_tcp_inner_pseudo_csum(skb); else err = ionic_tx_tcp_pseudo_csum(skb); - if (err) { + if (unlikely(err)) { /* clean up mapping from ionic_tx_map_skb */ ionic_tx_desc_unmap_bufs(q, desc_info); return err; @@ -1519,10 +1514,9 @@ static int ionic_tx_tso(struct net_device *netdev, struct ionic_queue *q, seg_rem = min(tso_rem, mss); done = (tso_rem == 0); /* post descriptor */ - ionic_tx_tso_post(netdev, q, desc_info, skb, - desc_addr, desc_nsge, desc_len, - hdrlen, mss, outer_csum, vlan_tci, has_vlan, - start, done); + ionic_tx_tso_post(netdev, q, desc, skb, desc_addr, desc_nsge, + desc_len, hdrlen, mss, outer_csum, vlan_tci, + has_vlan, start, done); start = false; /* Buffer information is stored with the first tso descriptor */ desc_info = &q->tx_info[q->head_idx]; @@ -1747,7 +1741,7 @@ static int ionic_tx_descs_needed(struct ionic_queue *q, struct sk_buff *skb) linearize: if (too_many_frags) { err = skb_linearize(skb); - if (err) + if (unlikely(err)) return err; q_to_tx_stats(q)->linearize++; } @@ -1781,7 +1775,7 @@ static netdev_tx_t ionic_start_hwstamp_xmit(struct sk_buff *skb, else err = ionic_tx(netdev, q, skb); - if (err) + if (unlikely(err)) goto err_out_drop; return NETDEV_TX_OK; @@ -1827,7 +1821,7 @@ netdev_tx_t ionic_start_xmit(struct sk_buff *skb, struct net_device *netdev) else err = ionic_tx(netdev, q, skb); - if (err) + if (unlikely(err)) goto err_out_drop; return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c index 2fcbcecb41d1..fef4b2b0b1f2 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c @@ -571,9 +571,6 @@ static u64 ctx_addr_sig_regs[][3] = { #define CRB_CTX_ADDR_REG_HI(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][2]) #define CRB_CTX_SIGNATURE_REG(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][1]) -#define lower32(x) ((u32)((x) & 0xffffffff)) -#define upper32(x) ((u32)(((u64)(x) >> 32) & 0xffffffff)) - static struct netxen_recv_crb recv_crb_registers[] = { /* Instance 0 */ { @@ -723,9 +720,9 @@ netxen_init_old_ctx(struct netxen_adapter *adapter) NETXEN_CTX_SIGNATURE_V2 : NETXEN_CTX_SIGNATURE; NXWR32(adapter, CRB_CTX_ADDR_REG_LO(port), - lower32(recv_ctx->phys_addr)); + lower_32_bits(recv_ctx->phys_addr)); NXWR32(adapter, CRB_CTX_ADDR_REG_HI(port), - upper32(recv_ctx->phys_addr)); + upper_32_bits(recv_ctx->phys_addr)); NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port), signature | port); return 0; diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index f497f6ca1018..97b059be1041 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -1137,7 +1137,7 @@ static int qede_set_channels(struct net_device *dev, } static int qede_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct qede_dev *edev = netdev_priv(dev); diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c index 747cc5e2bb78..63e3dac4d5f7 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c @@ -321,7 +321,7 @@ int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *ifr) sizeof(config)) ? -EFAULT : 0; } -int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info) +int qede_ptp_get_ts_info(struct qede_dev *edev, struct kernel_ethtool_ts_info *info) { struct qede_ptp *ptp = edev->ptp; diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.h b/drivers/net/ethernet/qlogic/qede/qede_ptp.h index 1db0f021c645..adafc894797e 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.h +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.h @@ -17,7 +17,7 @@ void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb); int qede_ptp_hw_ts(struct qede_dev *edev, struct ifreq *req); void qede_ptp_disable(struct qede_dev *edev); int qede_ptp_enable(struct qede_dev *edev); -int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *ts); +int qede_ptp_get_ts_info(struct qede_dev *edev, struct kernel_ethtool_ts_info *ts); static inline void qede_ptp_record_rx_ts(struct qede_dev *edev, union eth_rx_cqe *cqe, diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 7b9e04884575..714d2e804694 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -1608,7 +1608,7 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts) if (!tp->dash_enabled) { rtl_set_d3_pll_down(tp, !wolopts); - tp->dev->wol_enabled = wolopts ? 1 : 0; + tp->dev->ethtool->wol_enabled = wolopts ? 1 : 0; } } @@ -2274,7 +2274,9 @@ static enum mac_version rtl8169_get_mac_version(u16 xid, bool gmii) /* 8168B family. */ { 0x7c8, 0x380, RTL_GIGA_MAC_VER_17 }, - { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11 }, + /* This one is very old and rare, let's see if anybody complains. + * { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11 }, + */ /* 8101 family. */ { 0x7c8, 0x448, RTL_GIGA_MAC_VER_39 }, @@ -5086,12 +5088,10 @@ static void rtl_set_irq_mask(struct rtl8169_private *tp) tp->irq_mask = RxOK | RxErr | TxOK | TxErr | LinkChg; if (tp->mac_version <= RTL_GIGA_MAC_VER_06) - tp->irq_mask |= SYSErr | RxOverflow | RxFIFOOver; + tp->irq_mask |= SYSErr | RxFIFOOver; else if (tp->mac_version == RTL_GIGA_MAC_VER_11) /* special workaround needed */ tp->irq_mask |= RxFIFOOver; - else - tp->irq_mask |= RxOverflow; } static int rtl_alloc_irq(struct rtl8169_private *tp) @@ -5478,7 +5478,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rtl_set_d3_pll_down(tp, true); } else { rtl_set_d3_pll_down(tp, false); - dev->wol_enabled = 1; + dev->ethtool->wol_enabled = 1; } jumbo_max = rtl_jumbo_max(tp); diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig index b03fae7a0f72..9b7559c88bee 100644 --- a/drivers/net/ethernet/renesas/Kconfig +++ b/drivers/net/ethernet/renesas/Kconfig @@ -33,6 +33,7 @@ config RAVB select CRC32 select MII select MDIO_BITBANG + select PAGE_POOL select PHYLIB select RESET_CONTROLLER help @@ -58,4 +59,14 @@ config RENESAS_GEN4_PTP help Renesas R-Car Gen4 gPTP device driver. +config RTSN + tristate "Renesas Ethernet-TSN support" + depends on ARCH_RENESAS || COMPILE_TEST + depends on PTP_1588_CLOCK + select CRC32 + select PHYLIB + select RENESAS_GEN4_PTP + help + Renesas Ethernet-TSN device driver. + endif # NET_VENDOR_RENESAS diff --git a/drivers/net/ethernet/renesas/Makefile b/drivers/net/ethernet/renesas/Makefile index 9070acfd6aaf..f65fc76f8b4d 100644 --- a/drivers/net/ethernet/renesas/Makefile +++ b/drivers/net/ethernet/renesas/Makefile @@ -11,3 +11,5 @@ obj-$(CONFIG_RAVB) += ravb.o obj-$(CONFIG_RENESAS_ETHER_SWITCH) += rswitch.o obj-$(CONFIG_RENESAS_GEN4_PTP) += rcar_gen4_ptp.o + +obj-$(CONFIG_RTSN) += rtsn.o diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index b48935ec7e28..9893c91af105 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -19,6 +19,7 @@ #include #include #include +#include #define BE_TX_RING_SIZE 64 /* TX ring size for Best Effort */ #define BE_RX_RING_SIZE 1024 /* RX ring size for Best Effort */ @@ -257,6 +258,7 @@ enum APSR_BIT { APSR_CMSW = 0x00000010, APSR_RDM = 0x00002000, APSR_TDM = 0x00004000, + APSR_MIISELECT = 0x01000000, /* R-Car V4M only */ }; /* RCR */ @@ -1039,7 +1041,7 @@ struct ravb_ptp { }; struct ravb_hw_info { - bool (*receive)(struct net_device *ndev, int *quota, int q); + int (*receive)(struct net_device *ndev, int budget, int q); void (*set_rate)(struct net_device *ndev); int (*set_feature)(struct net_device *ndev, netdev_features_t features); int (*dmac_init)(struct net_device *ndev); @@ -1051,9 +1053,10 @@ struct ravb_hw_info { int stats_len; u32 tccr_mask; u32 rx_max_frame_size; - u32 rx_max_desc_use; + u32 rx_buffer_size; u32 rx_desc_size; unsigned aligned_tx: 1; + unsigned coalesce_irqs:1; /* Needs software IRQ coalescing */ /* hardware features */ unsigned internal_delay:1; /* AVB-DMAC has internal delays */ @@ -1070,6 +1073,11 @@ struct ravb_hw_info { unsigned half_duplex:1; /* E-MAC supports half duplex mode */ }; +struct ravb_rx_buffer { + struct page *page; + unsigned int offset; +}; + struct ravb_private { struct net_device *ndev; struct platform_device *pdev; @@ -1093,7 +1101,8 @@ struct ravb_private { struct ravb_tx_desc *tx_ring[NUM_TX_QUEUE]; void *tx_align[NUM_TX_QUEUE]; struct sk_buff *rx_1st_skb; - struct sk_buff **rx_skb[NUM_RX_QUEUE]; + struct page_pool *rx_pool[NUM_RX_QUEUE]; + struct ravb_rx_buffer *rx_buffers[NUM_RX_QUEUE]; struct sk_buff **tx_skb[NUM_TX_QUEUE]; u32 rx_over_errors; u32 rx_fifo_errors; diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 4d100283c30f..c02fb296bf7d 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -30,6 +30,7 @@ #include #include #include +#include #include "ravb.h" @@ -113,25 +114,6 @@ static void ravb_set_rate_rcar(struct net_device *ndev) } } -static struct sk_buff * -ravb_alloc_skb(struct net_device *ndev, const struct ravb_hw_info *info, - gfp_t gfp_mask) -{ - struct sk_buff *skb; - u32 reserve; - - skb = __netdev_alloc_skb(ndev, info->rx_max_frame_size + RAVB_ALIGN - 1, - gfp_mask); - if (!skb) - return NULL; - - reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1); - if (reserve) - skb_reserve(skb, RAVB_ALIGN - reserve); - - return skb; -} - /* Get MAC address from the MAC address registers * * Ethernet AVB device doesn't have ROM for MAC address. @@ -257,21 +239,10 @@ static void ravb_rx_ring_free(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); unsigned int ring_size; - unsigned int i; if (!priv->rx_ring[q].raw) return; - for (i = 0; i < priv->num_rx_ring[q]; i++) { - struct ravb_rx_desc *desc = ravb_rx_get_desc(priv, q, i); - - if (!dma_mapping_error(ndev->dev.parent, - le32_to_cpu(desc->dptr))) - dma_unmap_single(ndev->dev.parent, - le32_to_cpu(desc->dptr), - priv->info->rx_max_frame_size, - DMA_FROM_DEVICE); - } ring_size = priv->info->rx_desc_size * (priv->num_rx_ring[q] + 1); dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q].raw, priv->rx_desc_dma[q]); @@ -298,13 +269,16 @@ static void ravb_ring_free(struct net_device *ndev, int q) priv->tx_ring[q] = NULL; } - /* Free RX skb ringbuffer */ - if (priv->rx_skb[q]) { - for (i = 0; i < priv->num_rx_ring[q]; i++) - dev_kfree_skb(priv->rx_skb[q][i]); + /* Free RX buffers */ + for (i = 0; i < priv->num_rx_ring[q]; i++) { + if (priv->rx_buffers[q][i].page) + page_pool_put_page(priv->rx_pool[q], + priv->rx_buffers[q][i].page, + 0, true); } - kfree(priv->rx_skb[q]); - priv->rx_skb[q] = NULL; + kfree(priv->rx_buffers[q]); + priv->rx_buffers[q] = NULL; + page_pool_destroy(priv->rx_pool[q]); /* Free aligned TX buffers */ kfree(priv->tx_align[q]); @@ -317,35 +291,64 @@ static void ravb_ring_free(struct net_device *ndev, int q) priv->tx_skb[q] = NULL; } -static void ravb_rx_ring_format(struct net_device *ndev, int q) +static int +ravb_alloc_rx_buffer(struct net_device *ndev, int q, u32 entry, gfp_t gfp_mask, + struct ravb_rx_desc *rx_desc) { struct ravb_private *priv = netdev_priv(ndev); - struct ravb_rx_desc *rx_desc; - unsigned int rx_ring_size; + const struct ravb_hw_info *info = priv->info; + struct ravb_rx_buffer *rx_buff; dma_addr_t dma_addr; - unsigned int i; + unsigned int size; - rx_ring_size = priv->info->rx_desc_size * priv->num_rx_ring[q]; - memset(priv->rx_ring[q].raw, 0, rx_ring_size); - /* Build RX ring buffer */ - for (i = 0; i < priv->num_rx_ring[q]; i++) { - /* RX descriptor */ - rx_desc = ravb_rx_get_desc(priv, q, i); - rx_desc->ds_cc = cpu_to_le16(priv->info->rx_max_desc_use); - dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data, - priv->info->rx_max_frame_size, - DMA_FROM_DEVICE); + rx_buff = &priv->rx_buffers[q][entry]; + size = info->rx_buffer_size; + rx_buff->page = page_pool_alloc(priv->rx_pool[q], &rx_buff->offset, + &size, gfp_mask); + if (unlikely(!rx_buff->page)) { /* We just set the data size to 0 for a failed mapping which * should prevent DMA from happening... */ - if (dma_mapping_error(ndev->dev.parent, dma_addr)) - rx_desc->ds_cc = cpu_to_le16(0); - rx_desc->dptr = cpu_to_le32(dma_addr); + rx_desc->ds_cc = cpu_to_le16(0); + return -ENOMEM; + } + + dma_addr = page_pool_get_dma_addr(rx_buff->page) + rx_buff->offset; + dma_sync_single_for_device(ndev->dev.parent, dma_addr, + info->rx_buffer_size, DMA_FROM_DEVICE); + rx_desc->dptr = cpu_to_le32(dma_addr); + + /* The end of the RX buffer is used to store skb shared data, so we need + * to ensure that the hardware leaves enough space for this. + */ + rx_desc->ds_cc = cpu_to_le16(info->rx_buffer_size - + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) - + ETH_FCS_LEN + sizeof(__sum16)); + return 0; +} + +static u32 +ravb_rx_ring_refill(struct net_device *ndev, int q, u32 count, gfp_t gfp_mask) +{ + struct ravb_private *priv = netdev_priv(ndev); + struct ravb_rx_desc *rx_desc; + u32 i, entry; + + for (i = 0; i < count; i++) { + entry = (priv->dirty_rx[q] + i) % priv->num_rx_ring[q]; + rx_desc = ravb_rx_get_desc(priv, q, entry); + + if (!priv->rx_buffers[q][entry].page) { + if (unlikely(ravb_alloc_rx_buffer(ndev, q, entry, + gfp_mask, rx_desc))) + break; + } + /* Descriptor type must be set after all the above writes */ + dma_wmb(); rx_desc->die_dt = DT_FEMPTY; } - rx_desc = ravb_rx_get_desc(priv, q, i); - rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); - rx_desc->die_dt = DT_LINKFIX; /* type */ + + return i; } /* Format skb and descriptor buffer for Ethernet AVB */ @@ -353,6 +356,7 @@ static void ravb_ring_format(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); unsigned int num_tx_desc = priv->num_tx_desc; + struct ravb_rx_desc *rx_desc; struct ravb_tx_desc *tx_desc; struct ravb_desc *desc; unsigned int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] * @@ -364,7 +368,13 @@ static void ravb_ring_format(struct net_device *ndev, int q) priv->dirty_rx[q] = 0; priv->dirty_tx[q] = 0; - ravb_rx_ring_format(ndev, q); + /* Regular RX descriptors have already been initialized by + * ravb_rx_ring_refill(), we just need to initialize the final link + * descriptor. + */ + rx_desc = ravb_rx_get_desc(priv, q, priv->num_rx_ring[q]); + rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); + rx_desc->die_dt = DT_LINKFIX; /* type */ memset(priv->tx_ring[q], 0, tx_ring_size); /* Build TX ring buffer */ @@ -408,26 +418,47 @@ static void *ravb_alloc_rx_desc(struct net_device *ndev, int q) static int ravb_ring_init(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); - const struct ravb_hw_info *info = priv->info; unsigned int num_tx_desc = priv->num_tx_desc; + struct page_pool_params params = { + .order = 0, + .flags = PP_FLAG_DMA_MAP, + .pool_size = priv->num_rx_ring[q], + .nid = NUMA_NO_NODE, + .dev = ndev->dev.parent, + .dma_dir = DMA_FROM_DEVICE, + }; unsigned int ring_size; - struct sk_buff *skb; - unsigned int i; + u32 num_filled; - /* Allocate RX and TX skb rings */ - priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], - sizeof(*priv->rx_skb[q]), GFP_KERNEL); - priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q], - sizeof(*priv->tx_skb[q]), GFP_KERNEL); - if (!priv->rx_skb[q] || !priv->tx_skb[q]) + /* Allocate RX page pool and buffers */ + priv->rx_pool[q] = page_pool_create(¶ms); + if (IS_ERR(priv->rx_pool[q])) goto error; - for (i = 0; i < priv->num_rx_ring[q]; i++) { - skb = ravb_alloc_skb(ndev, info, GFP_KERNEL); - if (!skb) - goto error; - priv->rx_skb[q][i] = skb; - } + /* Allocate RX buffers */ + priv->rx_buffers[q] = kcalloc(priv->num_rx_ring[q], + sizeof(*priv->rx_buffers[q]), GFP_KERNEL); + if (!priv->rx_buffers[q]) + goto error; + + /* Allocate TX skb rings */ + priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q], + sizeof(*priv->tx_skb[q]), GFP_KERNEL); + if (!priv->tx_skb[q]) + goto error; + + /* Allocate all RX descriptors. */ + if (!ravb_alloc_rx_desc(ndev, q)) + goto error; + + /* Populate RX ring buffer. */ + priv->dirty_rx[q] = 0; + ring_size = priv->info->rx_desc_size * priv->num_rx_ring[q]; + memset(priv->rx_ring[q].raw, 0, ring_size); + num_filled = ravb_rx_ring_refill(ndev, q, priv->num_rx_ring[q], + GFP_KERNEL); + if (num_filled != priv->num_rx_ring[q]) + goto error; if (num_tx_desc > 1) { /* Allocate rings for the aligned buffers */ @@ -437,12 +468,6 @@ static int ravb_ring_init(struct net_device *ndev, int q) goto error; } - /* Allocate all RX descriptors. */ - if (!ravb_alloc_rx_desc(ndev, q)) - goto error; - - priv->dirty_rx[q] = 0; - /* Allocate all TX descriptors. */ ring_size = sizeof(struct ravb_tx_desc) * (priv->num_tx_ring[q] * num_tx_desc + 1); @@ -554,6 +579,16 @@ static void ravb_emac_init_rcar(struct net_device *ndev) ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR); } +static void ravb_emac_init_rcar_gen4(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + bool mii = priv->phy_interface == PHY_INTERFACE_MODE_MII; + + ravb_modify(ndev, APSR, APSR_MIISELECT, mii ? APSR_MIISELECT : 0); + + ravb_emac_init_rcar(ndev); +} + /* E-MAC init function */ static void ravb_emac_init(struct net_device *ndev) { @@ -706,7 +741,9 @@ static void ravb_get_tx_tstamp(struct net_device *ndev) static void ravb_rx_csum_gbeth(struct sk_buff *skb) { + struct skb_shared_info *shinfo = skb_shinfo(skb); __wsum csum_ip_hdr, csum_proto; + skb_frag_t *last_frag; u8 *hw_csum; /* The hardware checksum status is contained in sizeof(__sum16) * 2 = 4 @@ -716,12 +753,24 @@ static void ravb_rx_csum_gbeth(struct sk_buff *skb) if (unlikely(skb->len < sizeof(__sum16) * 2)) return; - hw_csum = skb_tail_pointer(skb) - sizeof(__sum16); + if (skb_is_nonlinear(skb)) { + last_frag = &shinfo->frags[shinfo->nr_frags - 1]; + hw_csum = skb_frag_address(last_frag) + + skb_frag_size(last_frag); + } else { + hw_csum = skb_tail_pointer(skb); + } + + hw_csum -= sizeof(__sum16); csum_proto = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum)); hw_csum -= sizeof(__sum16); csum_ip_hdr = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum)); - skb_trim(skb, skb->len - 2 * sizeof(__sum16)); + + if (skb_is_nonlinear(skb)) + skb_frag_size_sub(last_frag, 2 * sizeof(__sum16)); + else + skb_trim(skb, skb->len - 2 * sizeof(__sum16)); /* TODO: IPV6 Rx checksum */ if (skb->protocol == htons(ETH_P_IP) && !csum_ip_hdr && !csum_proto) @@ -743,30 +792,14 @@ static void ravb_rx_csum(struct sk_buff *skb) skb_trim(skb, skb->len - sizeof(__sum16)); } -static struct sk_buff *ravb_get_skb_gbeth(struct net_device *ndev, int entry, - struct ravb_rx_desc *desc) -{ - struct ravb_private *priv = netdev_priv(ndev); - struct sk_buff *skb; - - skb = priv->rx_skb[RAVB_BE][entry]; - priv->rx_skb[RAVB_BE][entry] = NULL; - dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), - ALIGN(priv->info->rx_max_frame_size, 16), - DMA_FROM_DEVICE); - - return skb; -} - /* Packet receive function for Gigabit Ethernet */ -static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q) +static int ravb_rx_gbeth(struct net_device *ndev, int budget, int q) { struct ravb_private *priv = netdev_priv(ndev); const struct ravb_hw_info *info = priv->info; struct net_device_stats *stats; struct ravb_rx_desc *desc; struct sk_buff *skb; - dma_addr_t dma_addr; int rx_packets = 0; u8 desc_status; u16 desc_len; @@ -781,7 +814,7 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q) for (i = 0; i < limit; i++, priv->cur_rx[q]++) { entry = priv->cur_rx[q] % priv->num_rx_ring[q]; desc = &priv->rx_ring[q].desc[entry]; - if (rx_packets == *quota || desc->die_dt == DT_FEMPTY) + if (rx_packets == budget || desc->die_dt == DT_FEMPTY) break; /* Descriptor type must be checked before all other reads */ @@ -807,87 +840,110 @@ static bool ravb_rx_gbeth(struct net_device *ndev, int *quota, int q) if (desc_status & MSC_CEEF) stats->rx_missed_errors++; } else { + struct ravb_rx_buffer *rx_buff; + void *rx_addr; + + rx_buff = &priv->rx_buffers[q][entry]; + rx_addr = page_address(rx_buff->page) + rx_buff->offset; die_dt = desc->die_dt & 0xF0; + dma_sync_single_for_cpu(ndev->dev.parent, + le32_to_cpu(desc->dptr), + desc_len, DMA_FROM_DEVICE); + switch (die_dt) { case DT_FSINGLE: - skb = ravb_get_skb_gbeth(ndev, entry, desc); + case DT_FSTART: + /* Start of packet: Set initial data length. */ + skb = napi_build_skb(rx_addr, + info->rx_buffer_size); + if (unlikely(!skb)) { + stats->rx_errors++; + page_pool_put_page(priv->rx_pool[q], + rx_buff->page, 0, + true); + goto refill; + } + skb_mark_for_recycle(skb); skb_put(skb, desc_len); + + /* Save this skb if the packet spans multiple + * descriptors. + */ + if (die_dt == DT_FSTART) + priv->rx_1st_skb = skb; + break; + + case DT_FMID: + case DT_FEND: + /* Continuing a packet: Add this buffer as an RX + * frag. + */ + + /* rx_1st_skb will be NULL if napi_build_skb() + * failed for the first descriptor of a + * multi-descriptor packet. + */ + if (unlikely(!priv->rx_1st_skb)) { + stats->rx_errors++; + page_pool_put_page(priv->rx_pool[q], + rx_buff->page, 0, + true); + + /* We may find a DT_FSINGLE or DT_FSTART + * descriptor in the queue which we can + * process, so don't give up yet. + */ + continue; + } + skb_add_rx_frag(priv->rx_1st_skb, + skb_shinfo(priv->rx_1st_skb)->nr_frags, + rx_buff->page, rx_buff->offset, + desc_len, info->rx_buffer_size); + + /* Set skb to point at the whole packet so that + * we only need one code path for finishing a + * packet. + */ + skb = priv->rx_1st_skb; + } + + switch (die_dt) { + case DT_FSINGLE: + case DT_FEND: + /* Finishing a packet: Determine protocol & + * checksum, hand off to NAPI and update our + * stats. + */ skb->protocol = eth_type_trans(skb, ndev); if (ndev->features & NETIF_F_RXCSUM) ravb_rx_csum_gbeth(skb); + stats->rx_bytes += skb->len; napi_gro_receive(&priv->napi[q], skb); rx_packets++; - stats->rx_bytes += desc_len; - break; - case DT_FSTART: - priv->rx_1st_skb = ravb_get_skb_gbeth(ndev, entry, desc); - skb_put(priv->rx_1st_skb, desc_len); - break; - case DT_FMID: - skb = ravb_get_skb_gbeth(ndev, entry, desc); - skb_copy_to_linear_data_offset(priv->rx_1st_skb, - priv->rx_1st_skb->len, - skb->data, - desc_len); - skb_put(priv->rx_1st_skb, desc_len); - dev_kfree_skb(skb); - break; - case DT_FEND: - skb = ravb_get_skb_gbeth(ndev, entry, desc); - skb_copy_to_linear_data_offset(priv->rx_1st_skb, - priv->rx_1st_skb->len, - skb->data, - desc_len); - skb_put(priv->rx_1st_skb, desc_len); - dev_kfree_skb(skb); - priv->rx_1st_skb->protocol = - eth_type_trans(priv->rx_1st_skb, ndev); - if (ndev->features & NETIF_F_RXCSUM) - ravb_rx_csum_gbeth(priv->rx_1st_skb); - stats->rx_bytes += priv->rx_1st_skb->len; - napi_gro_receive(&priv->napi[q], - priv->rx_1st_skb); - rx_packets++; - break; + + /* Clear rx_1st_skb so that it will only be + * non-NULL when valid. + */ + priv->rx_1st_skb = NULL; } + + /* Mark this RX buffer as consumed. */ + rx_buff->page = NULL; } } +refill: /* Refill the RX ring buffers. */ - for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) { - entry = priv->dirty_rx[q] % priv->num_rx_ring[q]; - desc = &priv->rx_ring[q].desc[entry]; - desc->ds_cc = cpu_to_le16(priv->info->rx_max_desc_use); - - if (!priv->rx_skb[q][entry]) { - skb = ravb_alloc_skb(ndev, info, GFP_ATOMIC); - if (!skb) - break; - dma_addr = dma_map_single(ndev->dev.parent, - skb->data, - priv->info->rx_max_frame_size, - DMA_FROM_DEVICE); - skb_checksum_none_assert(skb); - /* We just set the data size to 0 for a failed mapping - * which should prevent DMA from happening... - */ - if (dma_mapping_error(ndev->dev.parent, dma_addr)) - desc->ds_cc = cpu_to_le16(0); - desc->dptr = cpu_to_le32(dma_addr); - priv->rx_skb[q][entry] = skb; - } - /* Descriptor type must be set after all the above writes */ - dma_wmb(); - desc->die_dt = DT_FEMPTY; - } + priv->dirty_rx[q] += ravb_rx_ring_refill(ndev, q, + priv->cur_rx[q] - priv->dirty_rx[q], + GFP_ATOMIC); stats->rx_packets += rx_packets; - *quota -= rx_packets; - return *quota == 0; + return rx_packets; } /* Packet receive function for Ethernet AVB */ -static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q) +static int ravb_rx_rcar(struct net_device *ndev, int budget, int q) { struct ravb_private *priv = netdev_priv(ndev); const struct ravb_hw_info *info = priv->info; @@ -895,7 +951,6 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q) struct ravb_ex_rx_desc *desc; unsigned int limit, i; struct sk_buff *skb; - dma_addr_t dma_addr; struct timespec64 ts; int rx_packets = 0; u8 desc_status; @@ -906,7 +961,7 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q) for (i = 0; i < limit; i++, priv->cur_rx[q]++) { entry = priv->cur_rx[q] % priv->num_rx_ring[q]; desc = &priv->rx_ring[q].ex_desc[entry]; - if (rx_packets == *quota || desc->die_dt == DT_FEMPTY) + if (rx_packets == budget || desc->die_dt == DT_FEMPTY) break; /* Descriptor type must be checked before all other reads */ @@ -934,12 +989,23 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q) stats->rx_missed_errors++; } else { u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE; + struct ravb_rx_buffer *rx_buff; + void *rx_addr; - skb = priv->rx_skb[q][entry]; - priv->rx_skb[q][entry] = NULL; - dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), - priv->info->rx_max_frame_size, - DMA_FROM_DEVICE); + rx_buff = &priv->rx_buffers[q][entry]; + rx_addr = page_address(rx_buff->page) + rx_buff->offset; + dma_sync_single_for_cpu(ndev->dev.parent, + le32_to_cpu(desc->dptr), + pkt_len, DMA_FROM_DEVICE); + + skb = napi_build_skb(rx_addr, info->rx_buffer_size); + if (unlikely(!skb)) { + stats->rx_errors++; + page_pool_put_page(priv->rx_pool[q], + rx_buff->page, 0, true); + break; + } + skb_mark_for_recycle(skb); get_ts &= (q == RAVB_NC) ? RAVB_RXTSTAMP_TYPE_V2_L2_EVENT : ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT; @@ -961,48 +1027,28 @@ static bool ravb_rx_rcar(struct net_device *ndev, int *quota, int q) napi_gro_receive(&priv->napi[q], skb); rx_packets++; stats->rx_bytes += pkt_len; + + /* Mark this RX buffer as consumed. */ + rx_buff->page = NULL; } } /* Refill the RX ring buffers. */ - for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) { - entry = priv->dirty_rx[q] % priv->num_rx_ring[q]; - desc = &priv->rx_ring[q].ex_desc[entry]; - desc->ds_cc = cpu_to_le16(priv->info->rx_max_desc_use); - - if (!priv->rx_skb[q][entry]) { - skb = ravb_alloc_skb(ndev, info, GFP_ATOMIC); - if (!skb) - break; /* Better luck next round. */ - dma_addr = dma_map_single(ndev->dev.parent, skb->data, - priv->info->rx_max_frame_size, - DMA_FROM_DEVICE); - skb_checksum_none_assert(skb); - /* We just set the data size to 0 for a failed mapping - * which should prevent DMA from happening... - */ - if (dma_mapping_error(ndev->dev.parent, dma_addr)) - desc->ds_cc = cpu_to_le16(0); - desc->dptr = cpu_to_le32(dma_addr); - priv->rx_skb[q][entry] = skb; - } - /* Descriptor type must be set after all the above writes */ - dma_wmb(); - desc->die_dt = DT_FEMPTY; - } + priv->dirty_rx[q] += ravb_rx_ring_refill(ndev, q, + priv->cur_rx[q] - priv->dirty_rx[q], + GFP_ATOMIC); stats->rx_packets += rx_packets; - *quota -= rx_packets; - return *quota == 0; + return rx_packets; } /* Packet receive function for Ethernet AVB */ -static bool ravb_rx(struct net_device *ndev, int *quota, int q) +static int ravb_rx(struct net_device *ndev, int budget, int q) { struct ravb_private *priv = netdev_priv(ndev); const struct ravb_hw_info *info = priv->info; - return info->receive(ndev, quota, q); + return info->receive(ndev, budget, q); } static void ravb_rcv_snd_disable(struct net_device *ndev) @@ -1319,13 +1365,12 @@ static int ravb_poll(struct napi_struct *napi, int budget) unsigned long flags; int q = napi - priv->napi; int mask = BIT(q); - int quota = budget; - bool unmask; + int work_done; /* Processing RX Descriptor Ring */ /* Clear RX interrupt */ ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0); - unmask = !ravb_rx(ndev, "a, q); + work_done = ravb_rx(ndev, budget, q); /* Processing TX Descriptor Ring */ spin_lock_irqsave(&priv->lock, flags); @@ -1344,24 +1389,20 @@ static int ravb_poll(struct napi_struct *napi, int budget) if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) ndev->stats.rx_fifo_errors = priv->rx_fifo_errors; - if (!unmask) - goto out; - - napi_complete(napi); - - /* Re-enable RX/TX interrupts */ - spin_lock_irqsave(&priv->lock, flags); - if (!info->irq_en_dis) { - ravb_modify(ndev, RIC0, mask, mask); - ravb_modify(ndev, TIC, mask, mask); - } else { - ravb_write(ndev, mask, RIE0); - ravb_write(ndev, mask, TIE); + if (work_done < budget && napi_complete_done(napi, work_done)) { + /* Re-enable RX/TX interrupts */ + spin_lock_irqsave(&priv->lock, flags); + if (!info->irq_en_dis) { + ravb_modify(ndev, RIC0, mask, mask); + ravb_modify(ndev, TIC, mask, mask); + } else { + ravb_write(ndev, mask, RIE0); + ravb_write(ndev, mask, TIE); + } + spin_unlock_irqrestore(&priv->lock, flags); } - spin_unlock_irqrestore(&priv->lock, flags); -out: - return budget - quota; + return work_done; } static void ravb_set_duplex_gbeth(struct net_device *ndev) @@ -1696,7 +1737,7 @@ static int ravb_set_ringparam(struct net_device *ndev, } static int ravb_get_ts_info(struct net_device *ndev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct ravb_private *priv = netdev_priv(ndev); const struct ravb_hw_info *hw_info = priv->info; @@ -2621,30 +2662,6 @@ static int ravb_mdio_release(struct ravb_private *priv) return 0; } -static const struct ravb_hw_info ravb_gen3_hw_info = { - .receive = ravb_rx_rcar, - .set_rate = ravb_set_rate_rcar, - .set_feature = ravb_set_features_rcar, - .dmac_init = ravb_dmac_init_rcar, - .emac_init = ravb_emac_init_rcar, - .gstrings_stats = ravb_gstrings_stats, - .gstrings_size = sizeof(ravb_gstrings_stats), - .net_hw_features = NETIF_F_RXCSUM, - .net_features = NETIF_F_RXCSUM, - .stats_len = ARRAY_SIZE(ravb_gstrings_stats), - .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, - .rx_max_frame_size = SZ_2K, - .rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof(__sum16), - .rx_desc_size = sizeof(struct ravb_ex_rx_desc), - .internal_delay = 1, - .tx_counters = 1, - .multi_irqs = 1, - .irq_en_dis = 1, - .ccc_gac = 1, - .nc_queues = 1, - .magic_pkt = 1, -}; - static const struct ravb_hw_info ravb_gen2_hw_info = { .receive = ravb_rx_rcar, .set_rate = ravb_set_rate_rcar, @@ -2658,7 +2675,8 @@ static const struct ravb_hw_info ravb_gen2_hw_info = { .stats_len = ARRAY_SIZE(ravb_gstrings_stats), .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, .rx_max_frame_size = SZ_2K, - .rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof(__sum16), + .rx_buffer_size = SZ_2K + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), .rx_desc_size = sizeof(struct ravb_ex_rx_desc), .aligned_tx = 1, .gptp = 1, @@ -2666,6 +2684,56 @@ static const struct ravb_hw_info ravb_gen2_hw_info = { .magic_pkt = 1, }; +static const struct ravb_hw_info ravb_gen3_hw_info = { + .receive = ravb_rx_rcar, + .set_rate = ravb_set_rate_rcar, + .set_feature = ravb_set_features_rcar, + .dmac_init = ravb_dmac_init_rcar, + .emac_init = ravb_emac_init_rcar, + .gstrings_stats = ravb_gstrings_stats, + .gstrings_size = sizeof(ravb_gstrings_stats), + .net_hw_features = NETIF_F_RXCSUM, + .net_features = NETIF_F_RXCSUM, + .stats_len = ARRAY_SIZE(ravb_gstrings_stats), + .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, + .rx_max_frame_size = SZ_2K, + .rx_buffer_size = SZ_2K + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), + .rx_desc_size = sizeof(struct ravb_ex_rx_desc), + .internal_delay = 1, + .tx_counters = 1, + .multi_irqs = 1, + .irq_en_dis = 1, + .ccc_gac = 1, + .nc_queues = 1, + .magic_pkt = 1, +}; + +static const struct ravb_hw_info ravb_gen4_hw_info = { + .receive = ravb_rx_rcar, + .set_rate = ravb_set_rate_rcar, + .set_feature = ravb_set_features_rcar, + .dmac_init = ravb_dmac_init_rcar, + .emac_init = ravb_emac_init_rcar_gen4, + .gstrings_stats = ravb_gstrings_stats, + .gstrings_size = sizeof(ravb_gstrings_stats), + .net_hw_features = NETIF_F_RXCSUM, + .net_features = NETIF_F_RXCSUM, + .stats_len = ARRAY_SIZE(ravb_gstrings_stats), + .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, + .rx_max_frame_size = SZ_2K, + .rx_buffer_size = SZ_2K + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), + .rx_desc_size = sizeof(struct ravb_ex_rx_desc), + .internal_delay = 1, + .tx_counters = 1, + .multi_irqs = 1, + .irq_en_dis = 1, + .ccc_gac = 1, + .nc_queues = 1, + .magic_pkt = 1, +}; + static const struct ravb_hw_info ravb_rzv2m_hw_info = { .receive = ravb_rx_rcar, .set_rate = ravb_set_rate_rcar, @@ -2679,7 +2747,8 @@ static const struct ravb_hw_info ravb_rzv2m_hw_info = { .stats_len = ARRAY_SIZE(ravb_gstrings_stats), .tccr_mask = TCCR_TSRQ0 | TCCR_TSRQ1 | TCCR_TSRQ2 | TCCR_TSRQ3, .rx_max_frame_size = SZ_2K, - .rx_max_desc_use = SZ_2K - ETH_FCS_LEN + sizeof(__sum16), + .rx_buffer_size = SZ_2K + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)), .rx_desc_size = sizeof(struct ravb_ex_rx_desc), .multi_irqs = 1, .err_mgmt_irqs = 1, @@ -2702,9 +2771,10 @@ static const struct ravb_hw_info gbeth_hw_info = { .stats_len = ARRAY_SIZE(ravb_gstrings_stats_gbeth), .tccr_mask = TCCR_TSRQ0, .rx_max_frame_size = SZ_8K, - .rx_max_desc_use = 4080, + .rx_buffer_size = SZ_2K, .rx_desc_size = sizeof(struct ravb_rx_desc), .aligned_tx = 1, + .coalesce_irqs = 1, .tx_counters = 1, .carrier_counters = 1, .half_duplex = 1, @@ -2716,7 +2786,7 @@ static const struct of_device_id ravb_match_table[] = { { .compatible = "renesas,etheravb-rcar-gen2", .data = &ravb_gen2_hw_info }, { .compatible = "renesas,etheravb-r8a7795", .data = &ravb_gen3_hw_info }, { .compatible = "renesas,etheravb-rcar-gen3", .data = &ravb_gen3_hw_info }, - { .compatible = "renesas,etheravb-rcar-gen4", .data = &ravb_gen3_hw_info }, + { .compatible = "renesas,etheravb-rcar-gen4", .data = &ravb_gen4_hw_info }, { .compatible = "renesas,etheravb-rzv2m", .data = &ravb_rzv2m_hw_info }, { .compatible = "renesas,rzg2l-gbeth", .data = &gbeth_hw_info }, { } @@ -2981,6 +3051,12 @@ static int ravb_probe(struct platform_device *pdev) if (info->nc_queues) netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll); + if (info->coalesce_irqs) { + netdev_sw_irq_coalesce_default_on(ndev); + if (num_present_cpus() == 1) + dev_set_threaded(ndev, true); + } + /* Network device register */ error = register_netdev(ndev); if (error) diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c index 24c90d8f5a44..ff50e20856ec 100644 --- a/drivers/net/ethernet/renesas/rswitch.c +++ b/drivers/net/ethernet/renesas/rswitch.c @@ -1809,7 +1809,7 @@ static const struct net_device_ops rswitch_netdev_ops = { .ndo_set_mac_address = eth_mac_addr, }; -static int rswitch_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info) +static int rswitch_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info) { struct rswitch_device *rdev = netdev_priv(ndev); diff --git a/drivers/net/ethernet/renesas/rtsn.c b/drivers/net/ethernet/renesas/rtsn.c new file mode 100644 index 000000000000..577227c007ab --- /dev/null +++ b/drivers/net/ethernet/renesas/rtsn.c @@ -0,0 +1,1391 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Renesas Ethernet-TSN device driver + * + * Copyright (C) 2022 Renesas Electronics Corporation + * Copyright (C) 2023 Niklas Söderlund + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rtsn.h" +#include "rcar_gen4_ptp.h" + +struct rtsn_private { + struct net_device *ndev; + struct platform_device *pdev; + void __iomem *base; + struct rcar_gen4_ptp_private *ptp_priv; + struct clk *clk; + struct reset_control *reset; + + u32 num_tx_ring; + u32 num_rx_ring; + u32 tx_desc_bat_size; + dma_addr_t tx_desc_bat_dma; + struct rtsn_desc *tx_desc_bat; + u32 rx_desc_bat_size; + dma_addr_t rx_desc_bat_dma; + struct rtsn_desc *rx_desc_bat; + dma_addr_t tx_desc_dma; + dma_addr_t rx_desc_dma; + struct rtsn_ext_desc *tx_ring; + struct rtsn_ext_ts_desc *rx_ring; + struct sk_buff **tx_skb; + struct sk_buff **rx_skb; + spinlock_t lock; /* Register access lock */ + u32 cur_tx; + u32 dirty_tx; + u32 cur_rx; + u32 dirty_rx; + u8 ts_tag; + struct napi_struct napi; + struct rtnl_link_stats64 stats; + + struct mii_bus *mii; + phy_interface_t iface; + int link; + int speed; + + int tx_data_irq; + int rx_data_irq; +}; + +static u32 rtsn_read(struct rtsn_private *priv, enum rtsn_reg reg) +{ + return ioread32(priv->base + reg); +} + +static void rtsn_write(struct rtsn_private *priv, enum rtsn_reg reg, u32 data) +{ + iowrite32(data, priv->base + reg); +} + +static void rtsn_modify(struct rtsn_private *priv, enum rtsn_reg reg, + u32 clear, u32 set) +{ + rtsn_write(priv, reg, (rtsn_read(priv, reg) & ~clear) | set); +} + +static int rtsn_reg_wait(struct rtsn_private *priv, enum rtsn_reg reg, + u32 mask, u32 expected) +{ + u32 val; + + return readl_poll_timeout(priv->base + reg, val, + (val & mask) == expected, + RTSN_INTERVAL_US, RTSN_TIMEOUT_US); +} + +static void rtsn_ctrl_data_irq(struct rtsn_private *priv, bool enable) +{ + if (enable) { + rtsn_write(priv, TDIE0, TDIE_TDID_TDX(TX_CHAIN_IDX)); + rtsn_write(priv, RDIE0, RDIE_RDID_RDX(RX_CHAIN_IDX)); + } else { + rtsn_write(priv, TDID0, TDIE_TDID_TDX(TX_CHAIN_IDX)); + rtsn_write(priv, RDID0, RDIE_RDID_RDX(RX_CHAIN_IDX)); + } +} + +static void rtsn_get_timestamp(struct rtsn_private *priv, struct timespec64 *ts) +{ + struct rcar_gen4_ptp_private *ptp_priv = priv->ptp_priv; + + ptp_priv->info.gettime64(&ptp_priv->info, ts); +} + +static int rtsn_tx_free(struct net_device *ndev, bool free_txed_only) +{ + struct rtsn_private *priv = netdev_priv(ndev); + struct rtsn_ext_desc *desc; + struct sk_buff *skb; + int free_num = 0; + int entry, size; + + for (; priv->cur_tx - priv->dirty_tx > 0; priv->dirty_tx++) { + entry = priv->dirty_tx % priv->num_tx_ring; + desc = &priv->tx_ring[entry]; + if (free_txed_only && (desc->die_dt & DT_MASK) != DT_FEMPTY) + break; + + dma_rmb(); + size = le16_to_cpu(desc->info_ds) & TX_DS; + skb = priv->tx_skb[entry]; + if (skb) { + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { + struct skb_shared_hwtstamps shhwtstamps; + struct timespec64 ts; + + rtsn_get_timestamp(priv, &ts); + memset(&shhwtstamps, 0, sizeof(shhwtstamps)); + shhwtstamps.hwtstamp = timespec64_to_ktime(ts); + skb_tstamp_tx(skb, &shhwtstamps); + } + dma_unmap_single(ndev->dev.parent, + le32_to_cpu(desc->dptr), + size, DMA_TO_DEVICE); + dev_kfree_skb_any(priv->tx_skb[entry]); + free_num++; + + priv->stats.tx_packets++; + priv->stats.tx_bytes += size; + } + + desc->die_dt = DT_EEMPTY; + } + + desc = &priv->tx_ring[priv->num_tx_ring]; + desc->die_dt = DT_LINK; + + return free_num; +} + +static int rtsn_rx(struct net_device *ndev, int budget) +{ + struct rtsn_private *priv = netdev_priv(ndev); + unsigned int ndescriptors; + unsigned int rx_packets; + unsigned int i; + bool get_ts; + + get_ts = priv->ptp_priv->tstamp_rx_ctrl & + RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT; + + ndescriptors = priv->dirty_rx + priv->num_rx_ring - priv->cur_rx; + rx_packets = 0; + for (i = 0; i < ndescriptors; i++) { + const unsigned int entry = priv->cur_rx % priv->num_rx_ring; + struct rtsn_ext_ts_desc *desc = &priv->rx_ring[entry]; + struct sk_buff *skb; + dma_addr_t dma_addr; + u16 pkt_len; + + /* Stop processing descriptors if budget is consumed. */ + if (rx_packets >= budget) + break; + + /* Stop processing descriptors on first empty. */ + if ((desc->die_dt & DT_MASK) == DT_FEMPTY) + break; + + dma_rmb(); + pkt_len = le16_to_cpu(desc->info_ds) & RX_DS; + + skb = priv->rx_skb[entry]; + priv->rx_skb[entry] = NULL; + dma_addr = le32_to_cpu(desc->dptr); + dma_unmap_single(ndev->dev.parent, dma_addr, PKT_BUF_SZ, + DMA_FROM_DEVICE); + + /* Get timestamp if enabled. */ + if (get_ts) { + struct skb_shared_hwtstamps *shhwtstamps; + struct timespec64 ts; + + shhwtstamps = skb_hwtstamps(skb); + memset(shhwtstamps, 0, sizeof(*shhwtstamps)); + + ts.tv_sec = (u64)le32_to_cpu(desc->ts_sec); + ts.tv_nsec = le32_to_cpu(desc->ts_nsec & cpu_to_le32(0x3fffffff)); + + shhwtstamps->hwtstamp = timespec64_to_ktime(ts); + } + + skb_put(skb, pkt_len); + skb->protocol = eth_type_trans(skb, ndev); + napi_gro_receive(&priv->napi, skb); + + /* Update statistics. */ + priv->stats.rx_packets++; + priv->stats.rx_bytes += pkt_len; + + /* Update counters. */ + priv->cur_rx++; + rx_packets++; + } + + /* Refill the RX ring buffers */ + for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) { + const unsigned int entry = priv->dirty_rx % priv->num_rx_ring; + struct rtsn_ext_ts_desc *desc = &priv->rx_ring[entry]; + struct sk_buff *skb; + dma_addr_t dma_addr; + + desc->info_ds = cpu_to_le16(PKT_BUF_SZ); + + if (!priv->rx_skb[entry]) { + skb = napi_alloc_skb(&priv->napi, + PKT_BUF_SZ + RTSN_ALIGN - 1); + if (!skb) + break; + skb_reserve(skb, NET_IP_ALIGN); + dma_addr = dma_map_single(ndev->dev.parent, skb->data, + le16_to_cpu(desc->info_ds), + DMA_FROM_DEVICE); + if (dma_mapping_error(ndev->dev.parent, dma_addr)) + desc->info_ds = cpu_to_le16(0); + desc->dptr = cpu_to_le32(dma_addr); + skb_checksum_none_assert(skb); + priv->rx_skb[entry] = skb; + } + + dma_wmb(); + desc->die_dt = DT_FEMPTY | D_DIE; + } + + priv->rx_ring[priv->num_rx_ring].die_dt = DT_LINK; + + return rx_packets; +} + +static int rtsn_poll(struct napi_struct *napi, int budget) +{ + struct rtsn_private *priv; + struct net_device *ndev; + unsigned long flags; + int work_done; + + ndev = napi->dev; + priv = netdev_priv(ndev); + + /* Processing RX Descriptor Ring */ + work_done = rtsn_rx(ndev, budget); + + /* Processing TX Descriptor Ring */ + spin_lock_irqsave(&priv->lock, flags); + rtsn_tx_free(ndev, true); + netif_wake_subqueue(ndev, 0); + spin_unlock_irqrestore(&priv->lock, flags); + + /* Re-enable TX/RX interrupts */ + if (work_done < budget && napi_complete_done(napi, work_done)) { + spin_lock_irqsave(&priv->lock, flags); + rtsn_ctrl_data_irq(priv, true); + spin_unlock_irqrestore(&priv->lock, flags); + } + + return work_done; +} + +static int rtsn_desc_alloc(struct rtsn_private *priv) +{ + struct device *dev = &priv->pdev->dev; + unsigned int i; + + priv->tx_desc_bat_size = sizeof(struct rtsn_desc) * TX_NUM_CHAINS; + priv->tx_desc_bat = dma_alloc_coherent(dev, priv->tx_desc_bat_size, + &priv->tx_desc_bat_dma, + GFP_KERNEL); + + if (!priv->tx_desc_bat) + return -ENOMEM; + + for (i = 0; i < TX_NUM_CHAINS; i++) + priv->tx_desc_bat[i].die_dt = DT_EOS; + + priv->rx_desc_bat_size = sizeof(struct rtsn_desc) * RX_NUM_CHAINS; + priv->rx_desc_bat = dma_alloc_coherent(dev, priv->rx_desc_bat_size, + &priv->rx_desc_bat_dma, + GFP_KERNEL); + + if (!priv->rx_desc_bat) + return -ENOMEM; + + for (i = 0; i < RX_NUM_CHAINS; i++) + priv->rx_desc_bat[i].die_dt = DT_EOS; + + return 0; +} + +static void rtsn_desc_free(struct rtsn_private *priv) +{ + if (priv->tx_desc_bat) + dma_free_coherent(&priv->pdev->dev, priv->tx_desc_bat_size, + priv->tx_desc_bat, priv->tx_desc_bat_dma); + priv->tx_desc_bat = NULL; + + if (priv->rx_desc_bat) + dma_free_coherent(&priv->pdev->dev, priv->rx_desc_bat_size, + priv->rx_desc_bat, priv->rx_desc_bat_dma); + priv->rx_desc_bat = NULL; +} + +static void rtsn_chain_free(struct rtsn_private *priv) +{ + struct device *dev = &priv->pdev->dev; + + dma_free_coherent(dev, + sizeof(struct rtsn_ext_desc) * (priv->num_tx_ring + 1), + priv->tx_ring, priv->tx_desc_dma); + priv->tx_ring = NULL; + + dma_free_coherent(dev, + sizeof(struct rtsn_ext_ts_desc) * (priv->num_rx_ring + 1), + priv->rx_ring, priv->rx_desc_dma); + priv->rx_ring = NULL; + + kfree(priv->tx_skb); + priv->tx_skb = NULL; + + kfree(priv->rx_skb); + priv->rx_skb = NULL; +} + +static int rtsn_chain_init(struct rtsn_private *priv, int tx_size, int rx_size) +{ + struct net_device *ndev = priv->ndev; + struct sk_buff *skb; + int i; + + priv->num_tx_ring = tx_size; + priv->num_rx_ring = rx_size; + + priv->tx_skb = kcalloc(tx_size, sizeof(*priv->tx_skb), GFP_KERNEL); + priv->rx_skb = kcalloc(rx_size, sizeof(*priv->rx_skb), GFP_KERNEL); + + if (!priv->rx_skb || !priv->tx_skb) + goto error; + + for (i = 0; i < rx_size; i++) { + skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RTSN_ALIGN - 1); + if (!skb) + goto error; + skb_reserve(skb, NET_IP_ALIGN); + priv->rx_skb[i] = skb; + } + + /* Allocate TX, RX descriptors */ + priv->tx_ring = dma_alloc_coherent(ndev->dev.parent, + sizeof(struct rtsn_ext_desc) * (tx_size + 1), + &priv->tx_desc_dma, GFP_KERNEL); + priv->rx_ring = dma_alloc_coherent(ndev->dev.parent, + sizeof(struct rtsn_ext_ts_desc) * (rx_size + 1), + &priv->rx_desc_dma, GFP_KERNEL); + + if (!priv->tx_ring || !priv->rx_ring) + goto error; + + return 0; +error: + rtsn_chain_free(priv); + + return -ENOMEM; +} + +static void rtsn_chain_format(struct rtsn_private *priv) +{ + struct net_device *ndev = priv->ndev; + struct rtsn_ext_ts_desc *rx_desc; + struct rtsn_ext_desc *tx_desc; + struct rtsn_desc *bat_desc; + dma_addr_t dma_addr; + unsigned int i; + + priv->cur_tx = 0; + priv->cur_rx = 0; + priv->dirty_rx = 0; + priv->dirty_tx = 0; + + /* TX */ + memset(priv->tx_ring, 0, sizeof(*tx_desc) * priv->num_tx_ring); + for (i = 0, tx_desc = priv->tx_ring; i < priv->num_tx_ring; i++, tx_desc++) + tx_desc->die_dt = DT_EEMPTY | D_DIE; + + tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma); + tx_desc->die_dt = DT_LINK; + + bat_desc = &priv->tx_desc_bat[TX_CHAIN_IDX]; + bat_desc->die_dt = DT_LINK; + bat_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma); + + /* RX */ + memset(priv->rx_ring, 0, sizeof(*rx_desc) * priv->num_rx_ring); + for (i = 0, rx_desc = priv->rx_ring; i < priv->num_rx_ring; i++, rx_desc++) { + dma_addr = dma_map_single(ndev->dev.parent, + priv->rx_skb[i]->data, PKT_BUF_SZ, + DMA_FROM_DEVICE); + if (!dma_mapping_error(ndev->dev.parent, dma_addr)) + rx_desc->info_ds = cpu_to_le16(PKT_BUF_SZ); + rx_desc->dptr = cpu_to_le32((u32)dma_addr); + rx_desc->die_dt = DT_FEMPTY | D_DIE; + } + rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma); + rx_desc->die_dt = DT_LINK; + + bat_desc = &priv->rx_desc_bat[RX_CHAIN_IDX]; + bat_desc->die_dt = DT_LINK; + bat_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma); +} + +static int rtsn_dmac_init(struct rtsn_private *priv) +{ + int ret; + + ret = rtsn_chain_init(priv, TX_CHAIN_SIZE, RX_CHAIN_SIZE); + if (ret) + return ret; + + rtsn_chain_format(priv); + + return 0; +} + +static enum rtsn_mode rtsn_read_mode(struct rtsn_private *priv) +{ + return (rtsn_read(priv, OSR) & OSR_OPS) >> 1; +} + +static int rtsn_wait_mode(struct rtsn_private *priv, enum rtsn_mode mode) +{ + unsigned int i; + + /* Need to busy loop as mode changes can happen in atomic context. */ + for (i = 0; i < RTSN_TIMEOUT_US / RTSN_INTERVAL_US; i++) { + if (rtsn_read_mode(priv) == mode) + return 0; + + udelay(RTSN_INTERVAL_US); + } + + return -ETIMEDOUT; +} + +static int rtsn_change_mode(struct rtsn_private *priv, enum rtsn_mode mode) +{ + int ret; + + rtsn_write(priv, OCR, mode); + ret = rtsn_wait_mode(priv, mode); + if (ret) + netdev_err(priv->ndev, "Failed to switch operation mode\n"); + return ret; +} + +static int rtsn_get_data_irq_status(struct rtsn_private *priv) +{ + u32 val; + + val = rtsn_read(priv, TDIS0) | TDIS_TDS(TX_CHAIN_IDX); + val |= rtsn_read(priv, RDIS0) | RDIS_RDS(RX_CHAIN_IDX); + + return val; +} + +static irqreturn_t rtsn_irq(int irq, void *dev_id) +{ + struct rtsn_private *priv = dev_id; + int ret = IRQ_NONE; + + spin_lock(&priv->lock); + + if (rtsn_get_data_irq_status(priv)) { + /* Clear TX/RX irq status */ + rtsn_write(priv, TDIS0, TDIS_TDS(TX_CHAIN_IDX)); + rtsn_write(priv, RDIS0, RDIS_RDS(RX_CHAIN_IDX)); + + if (napi_schedule_prep(&priv->napi)) { + /* Disable TX/RX interrupts */ + rtsn_ctrl_data_irq(priv, false); + + __napi_schedule(&priv->napi); + } + + ret = IRQ_HANDLED; + } + + spin_unlock(&priv->lock); + + return ret; +} + +static int rtsn_request_irq(unsigned int irq, irq_handler_t handler, + unsigned long flags, struct rtsn_private *priv, + const char *ch) +{ + char *name; + int ret; + + name = devm_kasprintf(&priv->pdev->dev, GFP_KERNEL, "%s:%s", + priv->ndev->name, ch); + if (!name) + return -ENOMEM; + + ret = request_irq(irq, handler, flags, name, priv); + if (ret) + netdev_err(priv->ndev, "Cannot request IRQ %s\n", name); + + return ret; +} + +static void rtsn_free_irqs(struct rtsn_private *priv) +{ + free_irq(priv->tx_data_irq, priv); + free_irq(priv->rx_data_irq, priv); +} + +static int rtsn_request_irqs(struct rtsn_private *priv) +{ + int ret; + + priv->rx_data_irq = platform_get_irq_byname(priv->pdev, "rx"); + if (priv->rx_data_irq < 0) + return priv->rx_data_irq; + + priv->tx_data_irq = platform_get_irq_byname(priv->pdev, "tx"); + if (priv->tx_data_irq < 0) + return priv->tx_data_irq; + + ret = rtsn_request_irq(priv->tx_data_irq, rtsn_irq, 0, priv, "tx"); + if (ret) + return ret; + + ret = rtsn_request_irq(priv->rx_data_irq, rtsn_irq, 0, priv, "rx"); + if (ret) { + free_irq(priv->tx_data_irq, priv); + return ret; + } + + return 0; +} + +static int rtsn_reset(struct rtsn_private *priv) +{ + reset_control_reset(priv->reset); + mdelay(1); + + return rtsn_wait_mode(priv, OCR_OPC_DISABLE); +} + +static int rtsn_axibmi_init(struct rtsn_private *priv) +{ + int ret; + + ret = rtsn_reg_wait(priv, RR, RR_RST, RR_RST_COMPLETE); + if (ret) + return ret; + + /* Set AXIWC */ + rtsn_write(priv, AXIWC, AXIWC_DEFAULT); + + /* Set AXIRC */ + rtsn_write(priv, AXIRC, AXIRC_DEFAULT); + + /* TX Descriptor chain setting */ + rtsn_write(priv, TATLS0, TATLS0_TEDE | TATLS0_TATEN(TX_CHAIN_IDX)); + rtsn_write(priv, TATLS1, priv->tx_desc_bat_dma + TX_CHAIN_ADDR_OFFSET); + rtsn_write(priv, TATLR, TATLR_TATL); + + ret = rtsn_reg_wait(priv, TATLR, TATLR_TATL, 0); + if (ret) + return ret; + + /* RX Descriptor chain setting */ + rtsn_write(priv, RATLS0, + RATLS0_RETS | RATLS0_REDE | RATLS0_RATEN(RX_CHAIN_IDX)); + rtsn_write(priv, RATLS1, priv->rx_desc_bat_dma + RX_CHAIN_ADDR_OFFSET); + rtsn_write(priv, RATLR, RATLR_RATL); + + ret = rtsn_reg_wait(priv, RATLR, RATLR_RATL, 0); + if (ret) + return ret; + + /* Enable TX/RX interrupts */ + rtsn_ctrl_data_irq(priv, true); + + return 0; +} + +static void rtsn_mhd_init(struct rtsn_private *priv) +{ + /* TX General setting */ + rtsn_write(priv, TGC1, TGC1_STTV_DEFAULT | TGC1_TQTM_SFM); + rtsn_write(priv, TMS0, TMS_MFS_MAX); + + /* RX Filter IP */ + rtsn_write(priv, CFCR0, CFCR_SDID(RX_CHAIN_IDX)); + rtsn_write(priv, FMSCR, FMSCR_FMSIE(RX_CHAIN_IDX)); +} + +static int rtsn_get_phy_params(struct rtsn_private *priv) +{ + int ret; + + ret = of_get_phy_mode(priv->pdev->dev.of_node, &priv->iface); + if (ret) + return ret; + + switch (priv->iface) { + case PHY_INTERFACE_MODE_MII: + priv->speed = 100; + break; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + priv->speed = 1000; + break; + default: + return -EOPNOTSUPP; + } + + return 0; +} + +static void rtsn_set_phy_interface(struct rtsn_private *priv) +{ + u32 val; + + switch (priv->iface) { + case PHY_INTERFACE_MODE_MII: + val = MPIC_PIS_MII; + break; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + val = MPIC_PIS_GMII; + break; + default: + return; + } + + rtsn_modify(priv, MPIC, MPIC_PIS_MASK, val); +} + +static void rtsn_set_rate(struct rtsn_private *priv) +{ + u32 val; + + switch (priv->speed) { + case 10: + val = MPIC_LSC_10M; + break; + case 100: + val = MPIC_LSC_100M; + break; + case 1000: + val = MPIC_LSC_1G; + break; + default: + return; + } + + rtsn_modify(priv, MPIC, MPIC_LSC_MASK, val); +} + +static int rtsn_rmac_init(struct rtsn_private *priv) +{ + const u8 *mac_addr = priv->ndev->dev_addr; + int ret; + + /* Set MAC address */ + rtsn_write(priv, MRMAC0, (mac_addr[0] << 8) | mac_addr[1]); + rtsn_write(priv, MRMAC1, (mac_addr[2] << 24) | (mac_addr[3] << 16) | + (mac_addr[4] << 8) | mac_addr[5]); + + /* Set xMII type */ + rtsn_set_phy_interface(priv); + rtsn_set_rate(priv); + + /* Enable MII */ + rtsn_modify(priv, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK, + MPIC_PSMCS_DEFAULT | MPIC_PSMHT_DEFAULT); + + /* Link verification */ + rtsn_modify(priv, MLVC, MLVC_PLV, MLVC_PLV); + ret = rtsn_reg_wait(priv, MLVC, MLVC_PLV, 0); + if (ret) + return ret; + + return ret; +} + +static int rtsn_hw_init(struct rtsn_private *priv) +{ + int ret; + + ret = rtsn_reset(priv); + if (ret) + return ret; + + /* Change to CONFIG mode */ + ret = rtsn_change_mode(priv, OCR_OPC_CONFIG); + if (ret) + return ret; + + ret = rtsn_axibmi_init(priv); + if (ret) + return ret; + + rtsn_mhd_init(priv); + + ret = rtsn_rmac_init(priv); + if (ret) + return ret; + + ret = rtsn_change_mode(priv, OCR_OPC_DISABLE); + if (ret) + return ret; + + /* Change to OPERATION mode */ + ret = rtsn_change_mode(priv, OCR_OPC_OPERATION); + + return ret; +} + +static int rtsn_mii_access(struct mii_bus *bus, bool read, int phyad, + int regad, u16 data) +{ + struct rtsn_private *priv = bus->priv; + u32 val; + int ret; + + val = MPSM_PDA(phyad) | MPSM_PRA(regad) | MPSM_PSME; + + if (!read) + val |= MPSM_PSMAD | MPSM_PRD_SET(data); + + rtsn_write(priv, MPSM, val); + + ret = rtsn_reg_wait(priv, MPSM, MPSM_PSME, 0); + if (ret) + return ret; + + if (read) + ret = MPSM_PRD_GET(rtsn_read(priv, MPSM)); + + return ret; +} + +static int rtsn_mii_read(struct mii_bus *bus, int addr, int regnum) +{ + return rtsn_mii_access(bus, true, addr, regnum, 0); +} + +static int rtsn_mii_write(struct mii_bus *bus, int addr, int regnum, u16 val) +{ + return rtsn_mii_access(bus, false, addr, regnum, val); +} + +static int rtsn_mdio_alloc(struct rtsn_private *priv) +{ + struct platform_device *pdev = priv->pdev; + struct device *dev = &pdev->dev; + struct device_node *mdio_node; + struct mii_bus *mii; + int ret; + + mii = mdiobus_alloc(); + if (!mii) + return -ENOMEM; + + mdio_node = of_get_child_by_name(dev->of_node, "mdio"); + if (!mdio_node) { + ret = -ENODEV; + goto out_free_bus; + } + + /* Enter config mode before registering the MDIO bus */ + ret = rtsn_reset(priv); + if (ret) + goto out_free_bus; + + ret = rtsn_change_mode(priv, OCR_OPC_CONFIG); + if (ret) + goto out_free_bus; + + rtsn_modify(priv, MPIC, MPIC_PSMCS_MASK | MPIC_PSMHT_MASK, + MPIC_PSMCS_DEFAULT | MPIC_PSMHT_DEFAULT); + + /* Register the MDIO bus */ + mii->name = "rtsn_mii"; + snprintf(mii->id, MII_BUS_ID_SIZE, "%s-%x", + pdev->name, pdev->id); + mii->priv = priv; + mii->read = rtsn_mii_read; + mii->write = rtsn_mii_write; + mii->parent = dev; + + ret = of_mdiobus_register(mii, mdio_node); + of_node_put(mdio_node); + if (ret) + goto out_free_bus; + + priv->mii = mii; + + return 0; + +out_free_bus: + mdiobus_free(mii); + return ret; +} + +static void rtsn_mdio_free(struct rtsn_private *priv) +{ + mdiobus_unregister(priv->mii); + mdiobus_free(priv->mii); + priv->mii = NULL; +} + +static void rtsn_adjust_link(struct net_device *ndev) +{ + struct rtsn_private *priv = netdev_priv(ndev); + struct phy_device *phydev = ndev->phydev; + bool new_state = false; + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); + + if (phydev->link) { + if (phydev->speed != priv->speed) { + new_state = true; + priv->speed = phydev->speed; + } + + if (!priv->link) { + new_state = true; + priv->link = phydev->link; + } + } else if (priv->link) { + new_state = true; + priv->link = 0; + priv->speed = 0; + } + + if (new_state) { + /* Need to transition to CONFIG mode before reconfiguring and + * then back to the original mode. Any state change to/from + * CONFIG or OPERATION must go over DISABLED to stop Rx/Tx. + */ + enum rtsn_mode orgmode = rtsn_read_mode(priv); + + /* Transit to CONFIG */ + if (orgmode != OCR_OPC_CONFIG) { + if (orgmode != OCR_OPC_DISABLE && + rtsn_change_mode(priv, OCR_OPC_DISABLE)) + goto out; + if (rtsn_change_mode(priv, OCR_OPC_CONFIG)) + goto out; + } + + rtsn_set_rate(priv); + + /* Transition to original mode */ + if (orgmode != OCR_OPC_CONFIG) { + if (rtsn_change_mode(priv, OCR_OPC_DISABLE)) + goto out; + if (orgmode != OCR_OPC_DISABLE && + rtsn_change_mode(priv, orgmode)) + goto out; + } + } +out: + spin_unlock_irqrestore(&priv->lock, flags); + + if (new_state) + phy_print_status(phydev); +} + +static int rtsn_phy_init(struct rtsn_private *priv) +{ + struct device_node *np = priv->ndev->dev.parent->of_node; + struct phy_device *phydev; + struct device_node *phy; + + priv->link = 0; + + phy = of_parse_phandle(np, "phy-handle", 0); + if (!phy) + return -ENOENT; + + phydev = of_phy_connect(priv->ndev, phy, rtsn_adjust_link, 0, + priv->iface); + of_node_put(phy); + if (!phydev) + return -ENOENT; + + /* Only support full-duplex mode */ + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); + + phy_attached_info(phydev); + + return 0; +} + +static void rtsn_phy_deinit(struct rtsn_private *priv) +{ + phy_disconnect(priv->ndev->phydev); + priv->ndev->phydev = NULL; +} + +static int rtsn_init(struct rtsn_private *priv) +{ + int ret; + + ret = rtsn_desc_alloc(priv); + if (ret) + return ret; + + ret = rtsn_dmac_init(priv); + if (ret) + goto error_free_desc; + + ret = rtsn_hw_init(priv); + if (ret) + goto error_free_chain; + + ret = rtsn_phy_init(priv); + if (ret) + goto error_free_chain; + + ret = rtsn_request_irqs(priv); + if (ret) + goto error_free_phy; + + return 0; +error_free_phy: + rtsn_phy_deinit(priv); +error_free_chain: + rtsn_chain_free(priv); +error_free_desc: + rtsn_desc_free(priv); + return ret; +} + +static void rtsn_deinit(struct rtsn_private *priv) +{ + rtsn_free_irqs(priv); + rtsn_phy_deinit(priv); + rtsn_chain_free(priv); + rtsn_desc_free(priv); +} + +static void rtsn_parse_mac_address(struct device_node *np, + struct net_device *ndev) +{ + struct rtsn_private *priv = netdev_priv(ndev); + u8 addr[ETH_ALEN]; + u32 mrmac0; + u32 mrmac1; + + /* Try to read address from Device Tree. */ + if (!of_get_mac_address(np, addr)) { + eth_hw_addr_set(ndev, addr); + return; + } + + /* Try to read address from device. */ + mrmac0 = rtsn_read(priv, MRMAC0); + mrmac1 = rtsn_read(priv, MRMAC1); + + addr[0] = (mrmac0 >> 8) & 0xff; + addr[1] = (mrmac0 >> 0) & 0xff; + addr[2] = (mrmac1 >> 24) & 0xff; + addr[3] = (mrmac1 >> 16) & 0xff; + addr[4] = (mrmac1 >> 8) & 0xff; + addr[5] = (mrmac1 >> 0) & 0xff; + + if (is_valid_ether_addr(addr)) { + eth_hw_addr_set(ndev, addr); + return; + } + + /* Fallback to a random address */ + eth_hw_addr_random(ndev); +} + +static int rtsn_open(struct net_device *ndev) +{ + struct rtsn_private *priv = netdev_priv(ndev); + int ret; + + napi_enable(&priv->napi); + + ret = rtsn_init(priv); + if (ret) { + napi_disable(&priv->napi); + return ret; + } + + phy_start(ndev->phydev); + + netif_start_queue(ndev); + + return 0; +} + +static int rtsn_stop(struct net_device *ndev) +{ + struct rtsn_private *priv = netdev_priv(ndev); + + phy_stop(priv->ndev->phydev); + napi_disable(&priv->napi); + rtsn_change_mode(priv, OCR_OPC_DISABLE); + rtsn_deinit(priv); + + return 0; +} + +static netdev_tx_t rtsn_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct rtsn_private *priv = netdev_priv(ndev); + struct rtsn_ext_desc *desc; + int ret = NETDEV_TX_OK; + unsigned long flags; + dma_addr_t dma_addr; + int entry; + + spin_lock_irqsave(&priv->lock, flags); + + /* Drop packet if it won't fit in a single descriptor. */ + if (skb->len >= TX_DS) { + priv->stats.tx_dropped++; + priv->stats.tx_errors++; + goto out; + } + + if (priv->cur_tx - priv->dirty_tx > priv->num_tx_ring) { + netif_stop_subqueue(ndev, 0); + ret = NETDEV_TX_BUSY; + goto out; + } + + if (skb_put_padto(skb, ETH_ZLEN)) + goto out; + + dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(ndev->dev.parent, dma_addr)) { + dev_kfree_skb_any(skb); + goto out; + } + + entry = priv->cur_tx % priv->num_tx_ring; + priv->tx_skb[entry] = skb; + desc = &priv->tx_ring[entry]; + desc->dptr = cpu_to_le32(dma_addr); + desc->info_ds = cpu_to_le16(skb->len); + desc->info1 = cpu_to_le64(skb->len); + + if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; + priv->ts_tag++; + desc->info_ds |= cpu_to_le16(TXC); + desc->info = priv->ts_tag; + } + + skb_tx_timestamp(skb); + dma_wmb(); + + desc->die_dt = DT_FSINGLE | D_DIE; + priv->cur_tx++; + + /* Start xmit */ + rtsn_write(priv, TRCR0, BIT(TX_CHAIN_IDX)); +out: + spin_unlock_irqrestore(&priv->lock, flags); + return ret; +} + +static void rtsn_get_stats64(struct net_device *ndev, + struct rtnl_link_stats64 *storage) +{ + struct rtsn_private *priv = netdev_priv(ndev); + *storage = priv->stats; +} + +static int rtsn_do_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) +{ + if (!netif_running(ndev)) + return -ENODEV; + + return phy_do_ioctl_running(ndev, ifr, cmd); +} + +static int rtsn_hwtstamp_get(struct net_device *ndev, + struct kernel_hwtstamp_config *config) +{ + struct rcar_gen4_ptp_private *ptp_priv; + struct rtsn_private *priv; + + if (!netif_running(ndev)) + return -ENODEV; + + priv = netdev_priv(ndev); + ptp_priv = priv->ptp_priv; + + config->flags = 0; + + config->tx_type = + ptp_priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + + switch (ptp_priv->tstamp_rx_ctrl & RCAR_GEN4_RXTSTAMP_TYPE) { + case RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT: + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L2_EVENT; + break; + case RCAR_GEN4_RXTSTAMP_TYPE_ALL: + config->rx_filter = HWTSTAMP_FILTER_ALL; + break; + default: + config->rx_filter = HWTSTAMP_FILTER_NONE; + break; + } + + return 0; +} + +static int rtsn_hwtstamp_set(struct net_device *ndev, + struct kernel_hwtstamp_config *config, + struct netlink_ext_ack *extack) +{ + struct rcar_gen4_ptp_private *ptp_priv; + struct rtsn_private *priv; + u32 tstamp_rx_ctrl; + u32 tstamp_tx_ctrl; + + if (!netif_running(ndev)) + return -ENODEV; + + priv = netdev_priv(ndev); + ptp_priv = priv->ptp_priv; + + if (config->flags) + return -EINVAL; + + switch (config->tx_type) { + case HWTSTAMP_TX_OFF: + tstamp_tx_ctrl = 0; + break; + case HWTSTAMP_TX_ON: + tstamp_tx_ctrl = RCAR_GEN4_TXTSTAMP_ENABLED; + break; + default: + return -ERANGE; + } + + switch (config->rx_filter) { + case HWTSTAMP_FILTER_NONE: + tstamp_rx_ctrl = 0; + break; + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: + tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED | + RCAR_GEN4_RXTSTAMP_TYPE_V2_L2_EVENT; + break; + default: + config->rx_filter = HWTSTAMP_FILTER_ALL; + tstamp_rx_ctrl = RCAR_GEN4_RXTSTAMP_ENABLED | + RCAR_GEN4_RXTSTAMP_TYPE_ALL; + break; + } + + ptp_priv->tstamp_tx_ctrl = tstamp_tx_ctrl; + ptp_priv->tstamp_rx_ctrl = tstamp_rx_ctrl; + + return 0; +} + +static const struct net_device_ops rtsn_netdev_ops = { + .ndo_open = rtsn_open, + .ndo_stop = rtsn_stop, + .ndo_start_xmit = rtsn_start_xmit, + .ndo_get_stats64 = rtsn_get_stats64, + .ndo_eth_ioctl = rtsn_do_ioctl, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = eth_mac_addr, + .ndo_hwtstamp_set = rtsn_hwtstamp_set, + .ndo_hwtstamp_get = rtsn_hwtstamp_get, +}; + +static int rtsn_get_ts_info(struct net_device *ndev, + struct kernel_ethtool_ts_info *info) +{ + struct rtsn_private *priv = netdev_priv(ndev); + + info->phc_index = ptp_clock_index(priv->ptp_priv->clock); + info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | + SOF_TIMESTAMPING_RX_SOFTWARE | + SOF_TIMESTAMPING_SOFTWARE | + SOF_TIMESTAMPING_TX_HARDWARE | + SOF_TIMESTAMPING_RX_HARDWARE | + SOF_TIMESTAMPING_RAW_HARDWARE; + info->tx_types = BIT(HWTSTAMP_TX_OFF) | BIT(HWTSTAMP_TX_ON); + info->rx_filters = BIT(HWTSTAMP_FILTER_NONE) | BIT(HWTSTAMP_FILTER_ALL); + + return 0; +} + +static const struct ethtool_ops rtsn_ethtool_ops = { + .nway_reset = phy_ethtool_nway_reset, + .get_link = ethtool_op_get_link, + .get_ts_info = rtsn_get_ts_info, + .get_link_ksettings = phy_ethtool_get_link_ksettings, + .set_link_ksettings = phy_ethtool_set_link_ksettings, +}; + +static const struct of_device_id rtsn_match_table[] = { + { .compatible = "renesas,r8a779g0-ethertsn", }, + { /* Sentinel */ } +}; + +MODULE_DEVICE_TABLE(of, rtsn_match_table); + +static int rtsn_probe(struct platform_device *pdev) +{ + struct rtsn_private *priv; + struct net_device *ndev; + struct resource *res; + int ret; + + ndev = alloc_etherdev_mqs(sizeof(struct rtsn_private), TX_NUM_CHAINS, + RX_NUM_CHAINS); + if (!ndev) + return -ENOMEM; + + priv = netdev_priv(ndev); + priv->pdev = pdev; + priv->ndev = ndev; + priv->ptp_priv = rcar_gen4_ptp_alloc(pdev); + + spin_lock_init(&priv->lock); + platform_set_drvdata(pdev, priv); + + priv->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(priv->clk)) { + ret = PTR_ERR(priv->clk); + goto error_free; + } + + priv->reset = devm_reset_control_get(&pdev->dev, NULL); + if (IS_ERR(priv->reset)) { + ret = PTR_ERR(priv->reset); + goto error_free; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "tsnes"); + if (!res) { + dev_err(&pdev->dev, "Can't find tsnes resource\n"); + ret = -EINVAL; + goto error_free; + } + + priv->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->base)) { + ret = PTR_ERR(priv->base); + goto error_free; + } + + SET_NETDEV_DEV(ndev, &pdev->dev); + + ndev->features = NETIF_F_RXCSUM; + ndev->hw_features = NETIF_F_RXCSUM; + ndev->base_addr = res->start; + ndev->netdev_ops = &rtsn_netdev_ops; + ndev->ethtool_ops = &rtsn_ethtool_ops; + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gptp"); + if (!res) { + dev_err(&pdev->dev, "Can't find gptp resource\n"); + ret = -EINVAL; + goto error_free; + } + + priv->ptp_priv->addr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(priv->ptp_priv->addr)) { + ret = PTR_ERR(priv->ptp_priv->addr); + goto error_free; + } + + ret = rtsn_get_phy_params(priv); + if (ret) + goto error_free; + + pm_runtime_enable(&pdev->dev); + pm_runtime_get_sync(&pdev->dev); + + netif_napi_add(ndev, &priv->napi, rtsn_poll); + + rtsn_parse_mac_address(pdev->dev.of_node, ndev); + + dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + + device_set_wakeup_capable(&pdev->dev, 1); + + ret = rcar_gen4_ptp_register(priv->ptp_priv, RCAR_GEN4_PTP_REG_LAYOUT, + clk_get_rate(priv->clk)); + if (ret) + goto error_pm; + + ret = rtsn_mdio_alloc(priv); + if (ret) + goto error_ptp; + + ret = register_netdev(ndev); + if (ret) + goto error_mdio; + + netdev_info(ndev, "MAC address %pM\n", ndev->dev_addr); + + return 0; + +error_mdio: + rtsn_mdio_free(priv); +error_ptp: + rcar_gen4_ptp_unregister(priv->ptp_priv); +error_pm: + netif_napi_del(&priv->napi); + rtsn_change_mode(priv, OCR_OPC_DISABLE); + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); +error_free: + free_netdev(ndev); + + return ret; +} + +static int rtsn_remove(struct platform_device *pdev) +{ + struct rtsn_private *priv = platform_get_drvdata(pdev); + + unregister_netdev(priv->ndev); + rtsn_mdio_free(priv); + rcar_gen4_ptp_unregister(priv->ptp_priv); + rtsn_change_mode(priv, OCR_OPC_DISABLE); + netif_napi_del(&priv->napi); + + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + + free_netdev(priv->ndev); + + return 0; +} + +static struct platform_driver rtsn_driver = { + .probe = rtsn_probe, + .remove = rtsn_remove, + .driver = { + .name = "rtsn", + .of_match_table = rtsn_match_table, + } +}; +module_platform_driver(rtsn_driver); + +MODULE_AUTHOR("Phong Hoang, Niklas Söderlund"); +MODULE_DESCRIPTION("Renesas Ethernet-TSN device driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/renesas/rtsn.h b/drivers/net/ethernet/renesas/rtsn.h new file mode 100644 index 000000000000..3183e80d7e6b --- /dev/null +++ b/drivers/net/ethernet/renesas/rtsn.h @@ -0,0 +1,464 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* Renesas Ethernet-TSN device driver + * + * Copyright (C) 2022 Renesas Electronics Corporation + * Copyright (C) 2023 Niklas Söderlund + */ + +#ifndef __RTSN_H__ +#define __RTSN_H__ + +#include + +#define AXIBMI 0x0000 +#define TSNMHD 0x1000 +#define RMSO 0x2000 +#define RMRO 0x3800 + +enum rtsn_reg { + AXIWC = AXIBMI + 0x0000, + AXIRC = AXIBMI + 0x0004, + TDPC0 = AXIBMI + 0x0010, + TFT = AXIBMI + 0x0090, + TATLS0 = AXIBMI + 0x00a0, + TATLS1 = AXIBMI + 0x00a4, + TATLR = AXIBMI + 0x00a8, + RATLS0 = AXIBMI + 0x00b0, + RATLS1 = AXIBMI + 0x00b4, + RATLR = AXIBMI + 0x00b8, + TSA0 = AXIBMI + 0x00c0, + TSS0 = AXIBMI + 0x00c4, + TRCR0 = AXIBMI + 0x0140, + RIDAUAS0 = AXIBMI + 0x0180, + RR = AXIBMI + 0x0200, + TATS = AXIBMI + 0x0210, + TATSR0 = AXIBMI + 0x0214, + TATSR1 = AXIBMI + 0x0218, + TATSR2 = AXIBMI + 0x021c, + RATS = AXIBMI + 0x0220, + RATSR0 = AXIBMI + 0x0224, + RATSR1 = AXIBMI + 0x0228, + RATSR2 = AXIBMI + 0x022c, + RIDASM0 = AXIBMI + 0x0240, + RIDASAM0 = AXIBMI + 0x0244, + RIDACAM0 = AXIBMI + 0x0248, + EIS0 = AXIBMI + 0x0300, + EIE0 = AXIBMI + 0x0304, + EID0 = AXIBMI + 0x0308, + EIS1 = AXIBMI + 0x0310, + EIE1 = AXIBMI + 0x0314, + EID1 = AXIBMI + 0x0318, + TCEIS0 = AXIBMI + 0x0340, + TCEIE0 = AXIBMI + 0x0344, + TCEID0 = AXIBMI + 0x0348, + RFSEIS0 = AXIBMI + 0x04c0, + RFSEIE0 = AXIBMI + 0x04c4, + RFSEID0 = AXIBMI + 0x04c8, + RFEIS0 = AXIBMI + 0x0540, + RFEIE0 = AXIBMI + 0x0544, + RFEID0 = AXIBMI + 0x0548, + RCEIS0 = AXIBMI + 0x05c0, + RCEIE0 = AXIBMI + 0x05c4, + RCEID0 = AXIBMI + 0x05c8, + RIDAOIS = AXIBMI + 0x0640, + RIDAOIE = AXIBMI + 0x0644, + RIDAOID = AXIBMI + 0x0648, + TSFEIS = AXIBMI + 0x06c0, + TSFEIE = AXIBMI + 0x06c4, + TSFEID = AXIBMI + 0x06c8, + TSCEIS = AXIBMI + 0x06d0, + TSCEIE = AXIBMI + 0x06d4, + TSCEID = AXIBMI + 0x06d8, + DIS = AXIBMI + 0x0b00, + DIE = AXIBMI + 0x0b04, + DID = AXIBMI + 0x0b08, + TDIS0 = AXIBMI + 0x0b10, + TDIE0 = AXIBMI + 0x0b14, + TDID0 = AXIBMI + 0x0b18, + RDIS0 = AXIBMI + 0x0b90, + RDIE0 = AXIBMI + 0x0b94, + RDID0 = AXIBMI + 0x0b98, + TSDIS = AXIBMI + 0x0c10, + TSDIE = AXIBMI + 0x0c14, + TSDID = AXIBMI + 0x0c18, + GPOUT = AXIBMI + 0x6000, + + OCR = TSNMHD + 0x0000, + OSR = TSNMHD + 0x0004, + SWR = TSNMHD + 0x0008, + SIS = TSNMHD + 0x000c, + GIS = TSNMHD + 0x0010, + GIE = TSNMHD + 0x0014, + GID = TSNMHD + 0x0018, + TIS1 = TSNMHD + 0x0020, + TIE1 = TSNMHD + 0x0024, + TID1 = TSNMHD + 0x0028, + TIS2 = TSNMHD + 0x0030, + TIE2 = TSNMHD + 0x0034, + TID2 = TSNMHD + 0x0038, + RIS = TSNMHD + 0x0040, + RIE = TSNMHD + 0x0044, + RID = TSNMHD + 0x0048, + TGC1 = TSNMHD + 0x0050, + TGC2 = TSNMHD + 0x0054, + TFS0 = TSNMHD + 0x0060, + TCF0 = TSNMHD + 0x0070, + TCR1 = TSNMHD + 0x0080, + TCR2 = TSNMHD + 0x0084, + TCR3 = TSNMHD + 0x0088, + TCR4 = TSNMHD + 0x008c, + TMS0 = TSNMHD + 0x0090, + TSR1 = TSNMHD + 0x00b0, + TSR2 = TSNMHD + 0x00b4, + TSR3 = TSNMHD + 0x00b8, + TSR4 = TSNMHD + 0x00bc, + TSR5 = TSNMHD + 0x00c0, + RGC = TSNMHD + 0x00d0, + RDFCR = TSNMHD + 0x00d4, + RCFCR = TSNMHD + 0x00d8, + REFCNCR = TSNMHD + 0x00dc, + RSR1 = TSNMHD + 0x00e0, + RSR2 = TSNMHD + 0x00e4, + RSR3 = TSNMHD + 0x00e8, + TCIS = TSNMHD + 0x01e0, + TCIE = TSNMHD + 0x01e4, + TCID = TSNMHD + 0x01e8, + TPTPC = TSNMHD + 0x01f0, + TTML = TSNMHD + 0x01f4, + TTJ = TSNMHD + 0x01f8, + TCC = TSNMHD + 0x0200, + TCS = TSNMHD + 0x0204, + TGS = TSNMHD + 0x020c, + TACST0 = TSNMHD + 0x0210, + TACST1 = TSNMHD + 0x0214, + TACST2 = TSNMHD + 0x0218, + TALIT0 = TSNMHD + 0x0220, + TALIT1 = TSNMHD + 0x0224, + TALIT2 = TSNMHD + 0x0228, + TAEN0 = TSNMHD + 0x0230, + TAEN1 = TSNMHD + 0x0234, + TASFE = TSNMHD + 0x0240, + TACLL0 = TSNMHD + 0x0250, + TACLL1 = TSNMHD + 0x0254, + TACLL2 = TSNMHD + 0x0258, + CACC = TSNMHD + 0x0260, + CCS = TSNMHD + 0x0264, + CAIV0 = TSNMHD + 0x0270, + CAUL0 = TSNMHD + 0x0290, + TOCST0 = TSNMHD + 0x0300, + TOCST1 = TSNMHD + 0x0304, + TOCST2 = TSNMHD + 0x0308, + TOLIT0 = TSNMHD + 0x0310, + TOLIT1 = TSNMHD + 0x0314, + TOLIT2 = TSNMHD + 0x0318, + TOEN0 = TSNMHD + 0x0320, + TOEN1 = TSNMHD + 0x0324, + TOSFE = TSNMHD + 0x0330, + TCLR0 = TSNMHD + 0x0340, + TCLR1 = TSNMHD + 0x0344, + TCLR2 = TSNMHD + 0x0348, + TSMS = TSNMHD + 0x0350, + COCC = TSNMHD + 0x0360, + COIV0 = TSNMHD + 0x03b0, + COUL0 = TSNMHD + 0x03d0, + QSTMACU0 = TSNMHD + 0x0400, + QSTMACD0 = TSNMHD + 0x0404, + QSTMAMU0 = TSNMHD + 0x0408, + QSTMAMD0 = TSNMHD + 0x040c, + QSFTVL0 = TSNMHD + 0x0410, + QSFTVLM0 = TSNMHD + 0x0414, + QSFTMSD0 = TSNMHD + 0x0418, + QSFTGMI0 = TSNMHD + 0x041c, + QSFTLS = TSNMHD + 0x0600, + QSFTLIS = TSNMHD + 0x0604, + QSFTLIE = TSNMHD + 0x0608, + QSFTLID = TSNMHD + 0x060c, + QSMSMC = TSNMHD + 0x0610, + QSGTMC = TSNMHD + 0x0614, + QSEIS = TSNMHD + 0x0618, + QSEIE = TSNMHD + 0x061c, + QSEID = TSNMHD + 0x0620, + QGACST0 = TSNMHD + 0x0630, + QGACST1 = TSNMHD + 0x0634, + QGACST2 = TSNMHD + 0x0638, + QGALIT1 = TSNMHD + 0x0640, + QGALIT2 = TSNMHD + 0x0644, + QGAEN0 = TSNMHD + 0x0648, + QGAEN1 = TSNMHD + 0x074c, + QGIGS = TSNMHD + 0x0650, + QGGC = TSNMHD + 0x0654, + QGATL0 = TSNMHD + 0x0664, + QGATL1 = TSNMHD + 0x0668, + QGATL2 = TSNMHD + 0x066c, + QGOCST0 = TSNMHD + 0x0670, + QGOCST1 = TSNMHD + 0x0674, + QGOCST2 = TSNMHD + 0x0678, + QGOLIT0 = TSNMHD + 0x067c, + QGOLIT1 = TSNMHD + 0x0680, + QGOLIT2 = TSNMHD + 0x0684, + QGOEN0 = TSNMHD + 0x0688, + QGOEN1 = TSNMHD + 0x068c, + QGTRO = TSNMHD + 0x0690, + QGTR1 = TSNMHD + 0x0694, + QGTR2 = TSNMHD + 0x0698, + QGFSMS = TSNMHD + 0x069c, + QTMIS = TSNMHD + 0x06e0, + QTMIE = TSNMHD + 0x06e4, + QTMID = TSNMHD + 0x06e8, + QMEC = TSNMHD + 0x0700, + QMMC = TSNMHD + 0x0704, + QRFDC = TSNMHD + 0x0708, + QYFDC = TSNMHD + 0x070c, + QVTCMC0 = TSNMHD + 0x0710, + QMCBSC0 = TSNMHD + 0x0750, + QMCIRC0 = TSNMHD + 0x0790, + QMEBSC0 = TSNMHD + 0x07d0, + QMEIRC0 = TSNMHD + 0x0710, + QMCFC = TSNMHD + 0x0850, + QMEIS = TSNMHD + 0x0860, + QMEIE = TSNMHD + 0x0864, + QMEID = TSNMHD + 0x086c, + QSMFC0 = TSNMHD + 0x0870, + QMSPPC0 = TSNMHD + 0x08b0, + QMSRPC0 = TSNMHD + 0x08f0, + QGPPC0 = TSNMHD + 0x0930, + QGRPC0 = TSNMHD + 0x0950, + QMDPC0 = TSNMHD + 0x0970, + QMGPC0 = TSNMHD + 0x09b0, + QMYPC0 = TSNMHD + 0x09f0, + QMRPC0 = TSNMHD + 0x0a30, + MQSTMACU = TSNMHD + 0x0a70, + MQSTMACD = TSNMHD + 0x0a74, + MQSTMAMU = TSNMHD + 0x0a78, + MQSTMAMD = TSNMHD + 0x0a7c, + MQSFTVL = TSNMHD + 0x0a80, + MQSFTVLM = TSNMHD + 0x0a84, + MQSFTMSD = TSNMHD + 0x0a88, + MQSFTGMI = TSNMHD + 0x0a8c, + + CFCR0 = RMSO + 0x0800, + FMSCR = RMSO + 0x0c10, + + MMC = RMRO + 0x0000, + MPSM = RMRO + 0x0010, + MPIC = RMRO + 0x0014, + MTFFC = RMRO + 0x0020, + MTPFC = RMRO + 0x0024, + MTATC0 = RMRO + 0x0040, + MRGC = RMRO + 0x0080, + MRMAC0 = RMRO + 0x0084, + MRMAC1 = RMRO + 0x0088, + MRAFC = RMRO + 0x008c, + MRSCE = RMRO + 0x0090, + MRSCP = RMRO + 0x0094, + MRSCC = RMRO + 0x0098, + MRFSCE = RMRO + 0x009c, + MRFSCP = RMRO + 0x00a0, + MTRC = RMRO + 0x00a4, + MPFC = RMRO + 0x0100, + MLVC = RMRO + 0x0340, + MEEEC = RMRO + 0x0350, + MLBC = RMRO + 0x0360, + MGMR = RMRO + 0x0400, + MMPFTCT = RMRO + 0x0410, + MAPFTCT = RMRO + 0x0414, + MPFRCT = RMRO + 0x0418, + MFCICT = RMRO + 0x041c, + MEEECT = RMRO + 0x0420, + MEIS = RMRO + 0x0500, + MEIE = RMRO + 0x0504, + MEID = RMRO + 0x0508, + MMIS0 = RMRO + 0x0510, + MMIE0 = RMRO + 0x0514, + MMID0 = RMRO + 0x0518, + MMIS1 = RMRO + 0x0520, + MMIE1 = RMRO + 0x0524, + MMID1 = RMRO + 0x0528, + MMIS2 = RMRO + 0x0530, + MMIE2 = RMRO + 0x0534, + MMID2 = RMRO + 0x0538, + MXMS = RMRO + 0x0600, + +}; + +/* AXIBMI */ +#define RR_RATRR BIT(0) +#define RR_TATRR BIT(1) +#define RR_RST (RR_RATRR | RR_TATRR) +#define RR_RST_COMPLETE 0x03 + +#define AXIWC_DEFAULT 0xffff +#define AXIRC_DEFAULT 0xffff + +#define TATLS0_TEDE BIT(1) +#define TATLS0_TATEN_SHIFT 24 +#define TATLS0_TATEN(n) ((n) << TATLS0_TATEN_SHIFT) +#define TATLR_TATL BIT(31) + +#define RATLS0_RETS BIT(2) +#define RATLS0_REDE BIT(3) +#define RATLS0_RATEN_SHIFT 24 +#define RATLS0_RATEN(n) ((n) << RATLS0_RATEN_SHIFT) +#define RATLR_RATL BIT(31) + +#define DIE_DID_TDICX(n) BIT((n)) +#define DIE_DID_RDICX(n) BIT((n) + 8) +#define TDIE_TDID_TDX(n) BIT(n) +#define RDIE_RDID_RDX(n) BIT(n) +#define TDIS_TDS(n) BIT(n) +#define RDIS_RDS(n) BIT(n) + +/* MHD */ +#define OSR_OPS 0x07 +#define SWR_SWR BIT(0) + +#define TGC1_TQTM_SFM 0xff00 +#define TGC1_STTV_DEFAULT 0x03 + +#define TMS_MFS_MAX 0x2800 + +/* RMAC System */ +#define CFCR_SDID(n) ((n) << 16) +#define FMSCR_FMSIE(n) ((n) << 0) + +/* RMAC */ +#define MPIC_PIS_MASK GENMASK(1, 0) +#define MPIC_PIS_MII 0 +#define MPIC_PIS_RMII 0x01 +#define MPIC_PIS_GMII 0x02 +#define MPIC_PIS_RGMII 0x03 +#define MPIC_LSC_SHIFT 2 +#define MPIC_LSC_MASK GENMASK(3, MPIC_LSC_SHIFT) +#define MPIC_LSC_10M (0 << MPIC_LSC_SHIFT) +#define MPIC_LSC_100M (0x01 << MPIC_LSC_SHIFT) +#define MPIC_LSC_1G (0x02 << MPIC_LSC_SHIFT) +#define MPIC_PSMCS_SHIFT 16 +#define MPIC_PSMCS_MASK GENMASK(21, MPIC_PSMCS_SHIFT) +#define MPIC_PSMCS_DEFAULT (0x0a << MPIC_PSMCS_SHIFT) +#define MPIC_PSMHT_SHIFT 24 +#define MPIC_PSMHT_MASK GENMASK(26, MPIC_PSMHT_SHIFT) +#define MPIC_PSMHT_DEFAULT (0x07 << MPIC_PSMHT_SHIFT) + +#define MLVC_PASE BIT(8) +#define MLVC_PSE BIT(16) +#define MLVC_PLV BIT(17) + +#define MPSM_PSME BIT(0) +#define MPSM_PSMAD BIT(1) +#define MPSM_PDA_SHIFT 3 +#define MPSM_PDA_MASK GENMASK(7, 3) +#define MPSM_PDA(n) (((n) << MPSM_PDA_SHIFT) & MPSM_PDA_MASK) +#define MPSM_PRA_SHIFT 8 +#define MPSM_PRA_MASK GENMASK(12, 8) +#define MPSM_PRA(n) (((n) << MPSM_PRA_SHIFT) & MPSM_PRA_MASK) +#define MPSM_PRD_SHIFT 16 +#define MPSM_PRD_SET(n) ((n) << MPSM_PRD_SHIFT) +#define MPSM_PRD_GET(n) ((n) >> MPSM_PRD_SHIFT) + +#define GPOUT_RDM BIT(13) +#define GPOUT_TDM BIT(14) + +/* RTSN */ +#define RTSN_INTERVAL_US 1000 +#define RTSN_TIMEOUT_US 1000000 + +#define TX_NUM_CHAINS 1 +#define RX_NUM_CHAINS 1 + +#define TX_CHAIN_SIZE 1024 +#define RX_CHAIN_SIZE 1024 + +#define TX_CHAIN_IDX 0 +#define RX_CHAIN_IDX 0 + +#define TX_CHAIN_ADDR_OFFSET (sizeof(struct rtsn_desc) * TX_CHAIN_IDX) +#define RX_CHAIN_ADDR_OFFSET (sizeof(struct rtsn_desc) * RX_CHAIN_IDX) + +#define PKT_BUF_SZ 1584 +#define RTSN_ALIGN 128 + +enum rtsn_mode { + OCR_OPC_DISABLE, + OCR_OPC_CONFIG, + OCR_OPC_OPERATION, +}; + +/* Descriptors */ +enum RX_DS_CC_BIT { + RX_DS = 0x0fff, /* Data size */ + RX_TR = 0x1000, /* Truncation indication */ + RX_EI = 0x2000, /* Error indication */ + RX_PS = 0xc000, /* Padding selection */ +}; + +enum TX_FS_TAGL_BIT { + TX_DS = 0x0fff, /* Data size */ + TX_TAGL = 0xf000, /* Frame tag LSBs */ +}; + +enum DIE_DT { + /* HW/SW arbitration */ + DT_FEMPTY_IS = 0x10, + DT_FEMPTY_IC = 0x20, + DT_FEMPTY_ND = 0x30, + DT_FEMPTY = 0x40, + DT_FEMPTY_START = 0x50, + DT_FEMPTY_MID = 0x60, + DT_FEMPTY_END = 0x70, + + /* Frame data */ + DT_FSINGLE = 0x80, + DT_FSTART = 0x90, + DT_FMID = 0xa0, + DT_FEND = 0xb0, + + /* Chain control */ + DT_LEMPTY = 0xc0, + DT_EEMPTY = 0xd0, + DT_LINK = 0xe0, + DT_EOS = 0xf0, + + DT_MASK = 0xf0, + D_DIE = 0x08, +}; + +struct rtsn_desc { + __le16 info_ds; + __u8 info; + u8 die_dt; + __le32 dptr; +} __packed; + +struct rtsn_ts_desc { + __le16 info_ds; + __u8 info; + u8 die_dt; + __le32 dptr; + __le32 ts_nsec; + __le32 ts_sec; +} __packed; + +struct rtsn_ext_desc { + __le16 info_ds; + __u8 info; + u8 die_dt; + __le32 dptr; + __le64 info1; +} __packed; + +struct rtsn_ext_ts_desc { + __le16 info_ds; + __u8 info; + u8 die_dt; + __le32 dptr; + __le64 info1; + __le32 ts_nsec; + __le32 ts_sec; +} __packed; + +enum EXT_INFO_DS_BIT { + TXC = 0x4000, +}; + +#endif diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 8fa6c0e9195b..7d69302ffa0a 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -1396,7 +1396,7 @@ static void efx_ef10_table_reset_mc_allocations(struct efx_nic *efx) efx_mcdi_filter_table_reset_mc_allocations(efx); nic_data->must_restore_piobufs = true; efx_ef10_forget_old_piobufs(efx); - efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID; + efx->rss_context.priv.context_id = EFX_MCDI_RSS_CONTEXT_INVALID; /* Driver-created vswitches and vports must be re-created */ nic_data->must_probe_vswitching = true; diff --git a/drivers/net/ethernet/sfc/ef100_ethtool.c b/drivers/net/ethernet/sfc/ef100_ethtool.c index cf55202b3a7b..896ffca4aee2 100644 --- a/drivers/net/ethernet/sfc/ef100_ethtool.c +++ b/drivers/net/ethernet/sfc/ef100_ethtool.c @@ -59,8 +59,12 @@ const struct ethtool_ops ef100_ethtool_ops = { .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size, .get_rxfh_key_size = efx_ethtool_get_rxfh_key_size, + .rxfh_priv_size = sizeof(struct efx_rss_context_priv), .get_rxfh = efx_ethtool_get_rxfh, .set_rxfh = efx_ethtool_set_rxfh, + .create_rxfh_context = efx_ethtool_create_rxfh_context, + .modify_rxfh_context = efx_ethtool_modify_rxfh_context, + .remove_rxfh_context = efx_ethtool_remove_rxfh_context, .get_module_info = efx_ethtool_get_module_info, .get_module_eeprom = efx_ethtool_get_module_eeprom, diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index e9d9de8e648a..6f1a01ded7d4 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -299,7 +299,7 @@ static int efx_probe_nic(struct efx_nic *efx) if (efx->n_channels > 1) netdev_rss_key_fill(efx->rss_context.rx_hash_key, sizeof(efx->rss_context.rx_hash_key)); - efx_set_default_rx_indir_table(efx, &efx->rss_context); + efx_set_default_rx_indir_table(efx, efx->rss_context.rx_indir_table); /* Initialise the interrupt moderation settings */ efx->irq_mod_step_us = DIV_ROUND_UP(efx->timer_quantum_ns, 1000); diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index 48d3623735ba..7a6cab883d66 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -158,7 +158,7 @@ static inline s32 efx_filter_get_rx_ids(struct efx_nic *efx, } /* RSS contexts */ -static inline bool efx_rss_active(struct efx_rss_context *ctx) +static inline bool efx_rss_active(struct efx_rss_context_priv *ctx) { return ctx->context_id != EFX_MCDI_RSS_CONTEXT_INVALID; } diff --git a/drivers/net/ethernet/sfc/efx_common.c b/drivers/net/ethernet/sfc/efx_common.c index 4ebd5ae23eca..13cf647051af 100644 --- a/drivers/net/ethernet/sfc/efx_common.c +++ b/drivers/net/ethernet/sfc/efx_common.c @@ -714,7 +714,7 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method) mutex_lock(&efx->mac_lock); down_write(&efx->filter_sem); - mutex_lock(&efx->rss_lock); + mutex_lock(&efx->net_dev->ethtool->rss_lock); efx->type->fini(efx); } @@ -777,7 +777,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) if (efx->type->rx_restore_rss_contexts) efx->type->rx_restore_rss_contexts(efx); - mutex_unlock(&efx->rss_lock); + mutex_unlock(&efx->net_dev->ethtool->rss_lock); efx->type->filter_table_restore(efx); up_write(&efx->filter_sem); @@ -793,7 +793,7 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok) fail: efx->port_initialized = false; - mutex_unlock(&efx->rss_lock); + mutex_unlock(&efx->net_dev->ethtool->rss_lock); up_write(&efx->filter_sem); mutex_unlock(&efx->mac_lock); @@ -1000,9 +1000,7 @@ int efx_init_struct(struct efx_nic *efx, struct pci_dev *pci_dev) efx->type->rx_hash_offset - efx->type->rx_prefix_size; efx->rx_packet_ts_offset = efx->type->rx_ts_offset - efx->type->rx_prefix_size; - INIT_LIST_HEAD(&efx->rss_context.list); - efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID; - mutex_init(&efx->rss_lock); + efx->rss_context.priv.context_id = EFX_MCDI_RSS_CONTEXT_INVALID; efx->vport_id = EVB_PORT_ID_ASSIGNED; spin_lock_init(&efx->stats_lock); efx->vi_stride = EFX_DEFAULT_VI_STRIDE; diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 37c69c8d90b1..7c887160e2ef 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -226,7 +226,7 @@ static void efx_ethtool_get_fec_stats(struct net_device *net_dev, } static int efx_ethtool_get_ts_info(struct net_device *net_dev, - struct ethtool_ts_info *ts_info) + struct kernel_ethtool_ts_info *ts_info) { struct efx_nic *efx = efx_netdev_priv(net_dev); @@ -268,8 +268,12 @@ const struct ethtool_ops efx_ethtool_ops = { .set_rxnfc = efx_ethtool_set_rxnfc, .get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size, .get_rxfh_key_size = efx_ethtool_get_rxfh_key_size, + .rxfh_priv_size = sizeof(struct efx_rss_context_priv), .get_rxfh = efx_ethtool_get_rxfh, .set_rxfh = efx_ethtool_set_rxfh, + .create_rxfh_context = efx_ethtool_create_rxfh_context, + .modify_rxfh_context = efx_ethtool_modify_rxfh_context, + .remove_rxfh_context = efx_ethtool_remove_rxfh_context, .get_ts_info = efx_ethtool_get_ts_info, .get_module_info = efx_ethtool_get_module_info, .get_module_eeprom = efx_ethtool_get_module_eeprom, diff --git a/drivers/net/ethernet/sfc/ethtool_common.c b/drivers/net/ethernet/sfc/ethtool_common.c index 7d5e5db4eac5..6ded44b86052 100644 --- a/drivers/net/ethernet/sfc/ethtool_common.c +++ b/drivers/net/ethernet/sfc/ethtool_common.c @@ -820,10 +820,10 @@ int efx_ethtool_get_rxnfc(struct net_device *net_dev, return 0; case ETHTOOL_GRXFH: { - struct efx_rss_context *ctx = &efx->rss_context; + struct efx_rss_context_priv *ctx = &efx->rss_context.priv; __u64 data; - mutex_lock(&efx->rss_lock); + mutex_lock(&net_dev->ethtool->rss_lock); if (info->flow_type & FLOW_RSS && info->rss_context) { ctx = efx_find_rss_context_entry(efx, info->rss_context); if (!ctx) { @@ -864,7 +864,7 @@ int efx_ethtool_get_rxnfc(struct net_device *net_dev, out_setdata_unlock: info->data = data; out_unlock: - mutex_unlock(&efx->rss_lock); + mutex_unlock(&net_dev->ethtool->rss_lock); return rc; } @@ -1163,46 +1163,14 @@ u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev) return efx->type->rx_hash_key_size; } -static int efx_ethtool_get_rxfh_context(struct net_device *net_dev, - struct ethtool_rxfh_param *rxfh) -{ - struct efx_nic *efx = efx_netdev_priv(net_dev); - struct efx_rss_context *ctx; - int rc = 0; - - if (!efx->type->rx_pull_rss_context_config) - return -EOPNOTSUPP; - - mutex_lock(&efx->rss_lock); - ctx = efx_find_rss_context_entry(efx, rxfh->rss_context); - if (!ctx) { - rc = -ENOENT; - goto out_unlock; - } - rc = efx->type->rx_pull_rss_context_config(efx, ctx); - if (rc) - goto out_unlock; - - rxfh->hfunc = ETH_RSS_HASH_TOP; - if (rxfh->indir) - memcpy(rxfh->indir, ctx->rx_indir_table, - sizeof(ctx->rx_indir_table)); - if (rxfh->key) - memcpy(rxfh->key, ctx->rx_hash_key, - efx->type->rx_hash_key_size); -out_unlock: - mutex_unlock(&efx->rss_lock); - return rc; -} - int efx_ethtool_get_rxfh(struct net_device *net_dev, struct ethtool_rxfh_param *rxfh) { struct efx_nic *efx = efx_netdev_priv(net_dev); int rc; - if (rxfh->rss_context) - return efx_ethtool_get_rxfh_context(net_dev, rxfh); + if (rxfh->rss_context) /* core should never call us for these */ + return -EINVAL; rc = efx->type->rx_pull_rss_config(efx); if (rc) @@ -1218,68 +1186,85 @@ int efx_ethtool_get_rxfh(struct net_device *net_dev, return 0; } -static int efx_ethtool_set_rxfh_context(struct net_device *net_dev, - struct ethtool_rxfh_param *rxfh, - struct netlink_ext_ack *extack) +int efx_ethtool_modify_rxfh_context(struct net_device *net_dev, + struct ethtool_rxfh_context *ctx, + const struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) { struct efx_nic *efx = efx_netdev_priv(net_dev); - u32 *rss_context = &rxfh->rss_context; - struct efx_rss_context *ctx; - u32 *indir = rxfh->indir; - bool allocated = false; - u8 *key = rxfh->key; - int rc; + struct efx_rss_context_priv *priv; + const u32 *indir = rxfh->indir; + const u8 *key = rxfh->key; - if (!efx->type->rx_push_rss_context_config) + if (!efx->type->rx_push_rss_context_config) { + NL_SET_ERR_MSG_MOD(extack, + "NIC type does not support custom contexts"); + return -EOPNOTSUPP; + } + /* Hash function is Toeplitz, cannot be changed */ + if (rxfh->hfunc != ETH_RSS_HASH_NO_CHANGE && + rxfh->hfunc != ETH_RSS_HASH_TOP) { + NL_SET_ERR_MSG_MOD(extack, "Only Toeplitz hash is supported"); return -EOPNOTSUPP; - - mutex_lock(&efx->rss_lock); - - if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) { - if (rxfh->rss_delete) { - /* alloc + delete == Nothing to do */ - rc = -EINVAL; - goto out_unlock; - } - ctx = efx_alloc_rss_context_entry(efx); - if (!ctx) { - rc = -ENOMEM; - goto out_unlock; - } - ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID; - /* Initialise indir table and key to defaults */ - efx_set_default_rx_indir_table(efx, ctx); - netdev_rss_key_fill(ctx->rx_hash_key, sizeof(ctx->rx_hash_key)); - allocated = true; - } else { - ctx = efx_find_rss_context_entry(efx, *rss_context); - if (!ctx) { - rc = -ENOENT; - goto out_unlock; - } } - if (rxfh->rss_delete) { - /* delete this context */ - rc = efx->type->rx_push_rss_context_config(efx, ctx, NULL, NULL); - if (!rc) - efx_free_rss_context_entry(ctx); - goto out_unlock; - } + priv = ethtool_rxfh_context_priv(ctx); if (!key) - key = ctx->rx_hash_key; + key = ethtool_rxfh_context_key(ctx); if (!indir) - indir = ctx->rx_indir_table; + indir = ethtool_rxfh_context_indir(ctx); - rc = efx->type->rx_push_rss_context_config(efx, ctx, indir, key); - if (rc && allocated) - efx_free_rss_context_entry(ctx); - else - *rss_context = ctx->user_id; -out_unlock: - mutex_unlock(&efx->rss_lock); - return rc; + return efx->type->rx_push_rss_context_config(efx, priv, indir, key, + false); +} + +int efx_ethtool_create_rxfh_context(struct net_device *net_dev, + struct ethtool_rxfh_context *ctx, + const struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + struct efx_rss_context_priv *priv; + + priv = ethtool_rxfh_context_priv(ctx); + + priv->context_id = EFX_MCDI_RSS_CONTEXT_INVALID; + priv->rx_hash_udp_4tuple = false; + /* Generate default indir table and/or key if not specified. + * We use ctx as a place to store these; this is fine because + * we're doing a create, so if we fail then the ctx will just + * be deleted. + */ + if (!rxfh->indir) + efx_set_default_rx_indir_table(efx, ethtool_rxfh_context_indir(ctx)); + if (!rxfh->key) + netdev_rss_key_fill(ethtool_rxfh_context_key(ctx), + ctx->key_size); + if (rxfh->hfunc == ETH_RSS_HASH_NO_CHANGE) + ctx->hfunc = ETH_RSS_HASH_TOP; + if (rxfh->input_xfrm == RXH_XFRM_NO_CHANGE) + ctx->input_xfrm = 0; + return efx_ethtool_modify_rxfh_context(net_dev, ctx, rxfh, extack); +} + +int efx_ethtool_remove_rxfh_context(struct net_device *net_dev, + struct ethtool_rxfh_context *ctx, + u32 rss_context, + struct netlink_ext_ack *extack) +{ + struct efx_nic *efx = efx_netdev_priv(net_dev); + struct efx_rss_context_priv *priv; + + if (!efx->type->rx_push_rss_context_config) { + NL_SET_ERR_MSG_MOD(extack, + "NIC type does not support custom contexts"); + return -EOPNOTSUPP; + } + + priv = ethtool_rxfh_context_priv(ctx); + return efx->type->rx_push_rss_context_config(efx, priv, NULL, NULL, + true); } int efx_ethtool_set_rxfh(struct net_device *net_dev, @@ -1295,8 +1280,9 @@ int efx_ethtool_set_rxfh(struct net_device *net_dev, rxfh->hfunc != ETH_RSS_HASH_TOP) return -EOPNOTSUPP; - if (rxfh->rss_context) - return efx_ethtool_set_rxfh_context(net_dev, rxfh, extack); + /* Custom contexts should use new API */ + if (WARN_ON_ONCE(rxfh->rss_context)) + return -EIO; if (!indir && !key) return 0; diff --git a/drivers/net/ethernet/sfc/ethtool_common.h b/drivers/net/ethernet/sfc/ethtool_common.h index a680e5980213..fc52e891637d 100644 --- a/drivers/net/ethernet/sfc/ethtool_common.h +++ b/drivers/net/ethernet/sfc/ethtool_common.h @@ -49,6 +49,18 @@ int efx_ethtool_get_rxfh(struct net_device *net_dev, int efx_ethtool_set_rxfh(struct net_device *net_dev, struct ethtool_rxfh_param *rxfh, struct netlink_ext_ack *extack); +int efx_ethtool_create_rxfh_context(struct net_device *net_dev, + struct ethtool_rxfh_context *ctx, + const struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack); +int efx_ethtool_modify_rxfh_context(struct net_device *net_dev, + struct ethtool_rxfh_context *ctx, + const struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack); +int efx_ethtool_remove_rxfh_context(struct net_device *net_dev, + struct ethtool_rxfh_context *ctx, + u32 rss_context, + struct netlink_ext_ack *extack); int efx_ethtool_reset(struct net_device *net_dev, u32 *flags); int efx_ethtool_get_module_eeprom(struct net_device *net_dev, struct ethtool_eeprom *ee, diff --git a/drivers/net/ethernet/sfc/falcon/falcon.c b/drivers/net/ethernet/sfc/falcon/falcon.c index 7a1c9337081b..36114ce88034 100644 --- a/drivers/net/ethernet/sfc/falcon/falcon.c +++ b/drivers/net/ethernet/sfc/falcon/falcon.c @@ -367,7 +367,7 @@ static const struct i2c_algo_bit_data falcon_i2c_bit_operations = { .getsda = falcon_getsda, .getscl = falcon_getscl, .udelay = 5, - /* Wait up to 50 ms for slave to let us pull SCL high */ + /* Wait up to 50 ms for target to let us pull SCL high */ .timeout = DIV_ROUND_UP(HZ, 20), }; diff --git a/drivers/net/ethernet/sfc/falcon/nic.h b/drivers/net/ethernet/sfc/falcon/nic.h index 9f413474bd9f..ada6e036fd97 100644 --- a/drivers/net/ethernet/sfc/falcon/nic.h +++ b/drivers/net/ethernet/sfc/falcon/nic.h @@ -297,7 +297,7 @@ static inline struct falcon_board *falcon_board(struct ef4_nic *efx) return &data->board; } -struct ethtool_ts_info; +struct kernel_ethtool_ts_info; extern const struct ef4_nic_type falcon_a1_nic_type; extern const struct ef4_nic_type falcon_b0_nic_type; diff --git a/drivers/net/ethernet/sfc/mcdi_filters.c b/drivers/net/ethernet/sfc/mcdi_filters.c index 4ff6586116ee..6ef96292909a 100644 --- a/drivers/net/ethernet/sfc/mcdi_filters.c +++ b/drivers/net/ethernet/sfc/mcdi_filters.c @@ -194,7 +194,7 @@ efx_mcdi_filter_push_prep_set_match_fields(struct efx_nic *efx, static void efx_mcdi_filter_push_prep(struct efx_nic *efx, const struct efx_filter_spec *spec, efx_dword_t *inbuf, u64 handle, - struct efx_rss_context *ctx, + struct efx_rss_context_priv *ctx, bool replacing) { u32 flags = spec->flags; @@ -245,7 +245,7 @@ static void efx_mcdi_filter_push_prep(struct efx_nic *efx, static int efx_mcdi_filter_push(struct efx_nic *efx, const struct efx_filter_spec *spec, u64 *handle, - struct efx_rss_context *ctx, bool replacing) + struct efx_rss_context_priv *ctx, bool replacing) { MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN); MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_EXT_OUT_LEN); @@ -345,9 +345,9 @@ static s32 efx_mcdi_filter_insert_locked(struct efx_nic *efx, bool replace_equal) { DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); + struct efx_rss_context_priv *ctx = NULL; struct efx_mcdi_filter_table *table; struct efx_filter_spec *saved_spec; - struct efx_rss_context *ctx = NULL; unsigned int match_pri, hash; unsigned int priv_flags; bool rss_locked = false; @@ -380,12 +380,12 @@ static s32 efx_mcdi_filter_insert_locked(struct efx_nic *efx, bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); if (spec->flags & EFX_FILTER_FLAG_RX_RSS) { - mutex_lock(&efx->rss_lock); + mutex_lock(&efx->net_dev->ethtool->rss_lock); rss_locked = true; if (spec->rss_context) ctx = efx_find_rss_context_entry(efx, spec->rss_context); else - ctx = &efx->rss_context; + ctx = &efx->rss_context.priv; if (!ctx) { rc = -ENOENT; goto out_unlock; @@ -548,7 +548,7 @@ static s32 efx_mcdi_filter_insert_locked(struct efx_nic *efx, out_unlock: if (rss_locked) - mutex_unlock(&efx->rss_lock); + mutex_unlock(&efx->net_dev->ethtool->rss_lock); up_write(&table->lock); return rc; } @@ -611,13 +611,13 @@ static int efx_mcdi_filter_remove_internal(struct efx_nic *efx, new_spec.priority = EFX_FILTER_PRI_AUTO; new_spec.flags = (EFX_FILTER_FLAG_RX | - (efx_rss_active(&efx->rss_context) ? + (efx_rss_active(&efx->rss_context.priv) ? EFX_FILTER_FLAG_RX_RSS : 0)); new_spec.dmaq_id = 0; new_spec.rss_context = 0; rc = efx_mcdi_filter_push(efx, &new_spec, &table->entry[filter_idx].handle, - &efx->rss_context, + &efx->rss_context.priv, true); if (rc == 0) @@ -764,7 +764,7 @@ static int efx_mcdi_filter_insert_addr_list(struct efx_nic *efx, ids = vlan->uc; } - filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0; + filter_flags = efx_rss_active(&efx->rss_context.priv) ? EFX_FILTER_FLAG_RX_RSS : 0; /* Insert/renew filters */ for (i = 0; i < addr_count; i++) { @@ -833,7 +833,7 @@ static int efx_mcdi_filter_insert_def(struct efx_nic *efx, int rc; u16 *id; - filter_flags = efx_rss_active(&efx->rss_context) ? EFX_FILTER_FLAG_RX_RSS : 0; + filter_flags = efx_rss_active(&efx->rss_context.priv) ? EFX_FILTER_FLAG_RX_RSS : 0; efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0); @@ -1375,8 +1375,8 @@ void efx_mcdi_filter_table_restore(struct efx_nic *efx) struct efx_mcdi_filter_table *table = efx->filter_state; unsigned int invalid_filters = 0, failed = 0; struct efx_mcdi_filter_vlan *vlan; + struct efx_rss_context_priv *ctx; struct efx_filter_spec *spec; - struct efx_rss_context *ctx; unsigned int filter_idx; u32 mcdi_flags; int match_pri; @@ -1388,7 +1388,7 @@ void efx_mcdi_filter_table_restore(struct efx_nic *efx) return; down_write(&table->lock); - mutex_lock(&efx->rss_lock); + mutex_lock(&efx->net_dev->ethtool->rss_lock); for (filter_idx = 0; filter_idx < EFX_MCDI_FILTER_TBL_ROWS; filter_idx++) { spec = efx_mcdi_filter_entry_spec(table, filter_idx); @@ -1407,7 +1407,7 @@ void efx_mcdi_filter_table_restore(struct efx_nic *efx) if (spec->rss_context) ctx = efx_find_rss_context_entry(efx, spec->rss_context); else - ctx = &efx->rss_context; + ctx = &efx->rss_context.priv; if (spec->flags & EFX_FILTER_FLAG_RX_RSS) { if (!ctx) { netif_warn(efx, drv, efx->net_dev, @@ -1444,7 +1444,7 @@ not_restored: } } - mutex_unlock(&efx->rss_lock); + mutex_unlock(&efx->net_dev->ethtool->rss_lock); up_write(&table->lock); /* @@ -1861,7 +1861,8 @@ out_unlock: RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_UDP_IPV6_RSS_MODE_LBN |\ RSS_MODE_HASH_ADDRS << MC_CMD_RSS_CONTEXT_GET_FLAGS_OUT_OTHER_IPV6_RSS_MODE_LBN) -int efx_mcdi_get_rss_context_flags(struct efx_nic *efx, u32 context, u32 *flags) +static int efx_mcdi_get_rss_context_flags(struct efx_nic *efx, u32 context, + u32 *flags) { /* * Firmware had a bug (sfc bug 61952) where it would not actually @@ -1909,8 +1910,8 @@ int efx_mcdi_get_rss_context_flags(struct efx_nic *efx, u32 context, u32 *flags) * Defaults are 4-tuple for TCP and 2-tuple for UDP and other-IP, so we * just need to set the UDP ports flags (for both IP versions). */ -void efx_mcdi_set_rss_context_flags(struct efx_nic *efx, - struct efx_rss_context *ctx) +static void efx_mcdi_set_rss_context_flags(struct efx_nic *efx, + struct efx_rss_context_priv *ctx) { MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_SET_FLAGS_IN_LEN); u32 flags; @@ -1931,7 +1932,7 @@ void efx_mcdi_set_rss_context_flags(struct efx_nic *efx, } static int efx_mcdi_filter_alloc_rss_context(struct efx_nic *efx, bool exclusive, - struct efx_rss_context *ctx, + struct efx_rss_context_priv *ctx, unsigned *context_size) { MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN); @@ -2032,25 +2033,26 @@ void efx_mcdi_rx_free_indir_table(struct efx_nic *efx) { int rc; - if (efx->rss_context.context_id != EFX_MCDI_RSS_CONTEXT_INVALID) { - rc = efx_mcdi_filter_free_rss_context(efx, efx->rss_context.context_id); + if (efx->rss_context.priv.context_id != EFX_MCDI_RSS_CONTEXT_INVALID) { + rc = efx_mcdi_filter_free_rss_context(efx, efx->rss_context.priv.context_id); WARN_ON(rc != 0); } - efx->rss_context.context_id = EFX_MCDI_RSS_CONTEXT_INVALID; + efx->rss_context.priv.context_id = EFX_MCDI_RSS_CONTEXT_INVALID; } static int efx_mcdi_filter_rx_push_shared_rss_config(struct efx_nic *efx, unsigned *context_size) { struct efx_mcdi_filter_table *table = efx->filter_state; - int rc = efx_mcdi_filter_alloc_rss_context(efx, false, &efx->rss_context, - context_size); + int rc = efx_mcdi_filter_alloc_rss_context(efx, false, + &efx->rss_context.priv, + context_size); if (rc != 0) return rc; table->rx_rss_context_exclusive = false; - efx_set_default_rx_indir_table(efx, &efx->rss_context); + efx_set_default_rx_indir_table(efx, efx->rss_context.rx_indir_table); return 0; } @@ -2058,26 +2060,27 @@ static int efx_mcdi_filter_rx_push_exclusive_rss_config(struct efx_nic *efx, const u32 *rx_indir_table, const u8 *key) { + u32 old_rx_rss_context = efx->rss_context.priv.context_id; struct efx_mcdi_filter_table *table = efx->filter_state; - u32 old_rx_rss_context = efx->rss_context.context_id; int rc; - if (efx->rss_context.context_id == EFX_MCDI_RSS_CONTEXT_INVALID || + if (efx->rss_context.priv.context_id == EFX_MCDI_RSS_CONTEXT_INVALID || !table->rx_rss_context_exclusive) { - rc = efx_mcdi_filter_alloc_rss_context(efx, true, &efx->rss_context, - NULL); + rc = efx_mcdi_filter_alloc_rss_context(efx, true, + &efx->rss_context.priv, + NULL); if (rc == -EOPNOTSUPP) return rc; else if (rc != 0) goto fail1; } - rc = efx_mcdi_filter_populate_rss_table(efx, efx->rss_context.context_id, - rx_indir_table, key); + rc = efx_mcdi_filter_populate_rss_table(efx, efx->rss_context.priv.context_id, + rx_indir_table, key); if (rc != 0) goto fail2; - if (efx->rss_context.context_id != old_rx_rss_context && + if (efx->rss_context.priv.context_id != old_rx_rss_context && old_rx_rss_context != EFX_MCDI_RSS_CONTEXT_INVALID) WARN_ON(efx_mcdi_filter_free_rss_context(efx, old_rx_rss_context) != 0); table->rx_rss_context_exclusive = true; @@ -2091,9 +2094,9 @@ static int efx_mcdi_filter_rx_push_exclusive_rss_config(struct efx_nic *efx, return 0; fail2: - if (old_rx_rss_context != efx->rss_context.context_id) { - WARN_ON(efx_mcdi_filter_free_rss_context(efx, efx->rss_context.context_id) != 0); - efx->rss_context.context_id = old_rx_rss_context; + if (old_rx_rss_context != efx->rss_context.priv.context_id) { + WARN_ON(efx_mcdi_filter_free_rss_context(efx, efx->rss_context.priv.context_id) != 0); + efx->rss_context.priv.context_id = old_rx_rss_context; } fail1: netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc); @@ -2101,33 +2104,28 @@ fail1: } int efx_mcdi_rx_push_rss_context_config(struct efx_nic *efx, - struct efx_rss_context *ctx, + struct efx_rss_context_priv *ctx, const u32 *rx_indir_table, - const u8 *key) + const u8 *key, bool delete) { int rc; - WARN_ON(!mutex_is_locked(&efx->rss_lock)); + WARN_ON(!mutex_is_locked(&efx->net_dev->ethtool->rss_lock)); if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID) { + if (delete) + /* already wasn't in HW, nothing to do */ + return 0; rc = efx_mcdi_filter_alloc_rss_context(efx, true, ctx, NULL); if (rc) return rc; } - if (!rx_indir_table) /* Delete this context */ + if (delete) /* Delete this context */ return efx_mcdi_filter_free_rss_context(efx, ctx->context_id); - rc = efx_mcdi_filter_populate_rss_table(efx, ctx->context_id, - rx_indir_table, key); - if (rc) - return rc; - - memcpy(ctx->rx_indir_table, rx_indir_table, - sizeof(efx->rss_context.rx_indir_table)); - memcpy(ctx->rx_hash_key, key, efx->type->rx_hash_key_size); - - return 0; + return efx_mcdi_filter_populate_rss_table(efx, ctx->context_id, + rx_indir_table, key); } int efx_mcdi_rx_pull_rss_context_config(struct efx_nic *efx, @@ -2139,16 +2137,16 @@ int efx_mcdi_rx_pull_rss_context_config(struct efx_nic *efx, size_t outlen; int rc, i; - WARN_ON(!mutex_is_locked(&efx->rss_lock)); + WARN_ON(!mutex_is_locked(&efx->net_dev->ethtool->rss_lock)); BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN != MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN); - if (ctx->context_id == EFX_MCDI_RSS_CONTEXT_INVALID) + if (ctx->priv.context_id == EFX_MCDI_RSS_CONTEXT_INVALID) return -ENOENT; MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID, - ctx->context_id); + ctx->priv.context_id); BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_indir_table) != MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN); rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_TABLE, inbuf, sizeof(inbuf), @@ -2164,7 +2162,7 @@ int efx_mcdi_rx_pull_rss_context_config(struct efx_nic *efx, RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE)[i]; MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID, - ctx->context_id); + ctx->priv.context_id); BUILD_BUG_ON(ARRAY_SIZE(ctx->rx_hash_key) != MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN); rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_KEY, inbuf, sizeof(inbuf), @@ -2186,35 +2184,42 @@ int efx_mcdi_rx_pull_rss_config(struct efx_nic *efx) { int rc; - mutex_lock(&efx->rss_lock); + mutex_lock(&efx->net_dev->ethtool->rss_lock); rc = efx_mcdi_rx_pull_rss_context_config(efx, &efx->rss_context); - mutex_unlock(&efx->rss_lock); + mutex_unlock(&efx->net_dev->ethtool->rss_lock); return rc; } void efx_mcdi_rx_restore_rss_contexts(struct efx_nic *efx) { struct efx_mcdi_filter_table *table = efx->filter_state; - struct efx_rss_context *ctx; + struct ethtool_rxfh_context *ctx; + unsigned long context; int rc; - WARN_ON(!mutex_is_locked(&efx->rss_lock)); + WARN_ON(!mutex_is_locked(&efx->net_dev->ethtool->rss_lock)); if (!table->must_restore_rss_contexts) return; - list_for_each_entry(ctx, &efx->rss_context.list, list) { + xa_for_each(&efx->net_dev->ethtool->rss_ctx, context, ctx) { + struct efx_rss_context_priv *priv; + u32 *indir; + u8 *key; + + priv = ethtool_rxfh_context_priv(ctx); /* previous NIC RSS context is gone */ - ctx->context_id = EFX_MCDI_RSS_CONTEXT_INVALID; + priv->context_id = EFX_MCDI_RSS_CONTEXT_INVALID; /* so try to allocate a new one */ - rc = efx_mcdi_rx_push_rss_context_config(efx, ctx, - ctx->rx_indir_table, - ctx->rx_hash_key); + indir = ethtool_rxfh_context_indir(ctx); + key = ethtool_rxfh_context_key(ctx); + rc = efx_mcdi_rx_push_rss_context_config(efx, priv, indir, key, + false); if (rc) netif_warn(efx, probe, efx->net_dev, - "failed to restore RSS context %u, rc=%d" + "failed to restore RSS context %lu, rc=%d" "; RSS filters may fail to be applied\n", - ctx->user_id, rc); + context, rc); } table->must_restore_rss_contexts = false; } @@ -2276,7 +2281,7 @@ int efx_mcdi_vf_rx_push_rss_config(struct efx_nic *efx, bool user, { if (user) return -EOPNOTSUPP; - if (efx->rss_context.context_id != EFX_MCDI_RSS_CONTEXT_INVALID) + if (efx->rss_context.priv.context_id != EFX_MCDI_RSS_CONTEXT_INVALID) return 0; return efx_mcdi_filter_rx_push_shared_rss_config(efx, NULL); } @@ -2295,7 +2300,7 @@ int efx_mcdi_push_default_indir_table(struct efx_nic *efx, efx_mcdi_rx_free_indir_table(efx); if (rss_spread > 1) { - efx_set_default_rx_indir_table(efx, &efx->rss_context); + efx_set_default_rx_indir_table(efx, efx->rss_context.rx_indir_table); rc = efx->type->rx_push_rss_config(efx, false, efx->rss_context.rx_indir_table, NULL); } diff --git a/drivers/net/ethernet/sfc/mcdi_filters.h b/drivers/net/ethernet/sfc/mcdi_filters.h index c0d6558b9fd2..11b9f87ed9e1 100644 --- a/drivers/net/ethernet/sfc/mcdi_filters.h +++ b/drivers/net/ethernet/sfc/mcdi_filters.h @@ -145,9 +145,9 @@ void efx_mcdi_filter_del_vlan(struct efx_nic *efx, u16 vid); void efx_mcdi_rx_free_indir_table(struct efx_nic *efx); int efx_mcdi_rx_push_rss_context_config(struct efx_nic *efx, - struct efx_rss_context *ctx, + struct efx_rss_context_priv *ctx, const u32 *rx_indir_table, - const u8 *key); + const u8 *key, bool delete); int efx_mcdi_pf_rx_push_rss_config(struct efx_nic *efx, bool user, const u32 *rx_indir_table, const u8 *key); @@ -161,10 +161,6 @@ int efx_mcdi_push_default_indir_table(struct efx_nic *efx, int efx_mcdi_rx_pull_rss_config(struct efx_nic *efx); int efx_mcdi_rx_pull_rss_context_config(struct efx_nic *efx, struct efx_rss_context *ctx); -int efx_mcdi_get_rss_context_flags(struct efx_nic *efx, u32 context, - u32 *flags); -void efx_mcdi_set_rss_context_flags(struct efx_nic *efx, - struct efx_rss_context *ctx); void efx_mcdi_rx_restore_rss_contexts(struct efx_nic *efx); static inline void efx_mcdi_update_rx_scatter(struct efx_nic *efx) diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index f2dd7feb0e0c..b85c51cbe7f9 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -737,21 +737,24 @@ struct vfdi_status; /* The reserved RSS context value */ #define EFX_MCDI_RSS_CONTEXT_INVALID 0xffffffff /** - * struct efx_rss_context - A user-defined RSS context for filtering - * @list: node of linked list on which this struct is stored + * struct efx_rss_context_priv - driver private data for an RSS context * @context_id: the RSS_CONTEXT_ID returned by MC firmware, or * %EFX_MCDI_RSS_CONTEXT_INVALID if this context is not present on the NIC. - * For Siena, 0 if RSS is active, else %EFX_MCDI_RSS_CONTEXT_INVALID. - * @user_id: the rss_context ID exposed to userspace over ethtool. * @rx_hash_udp_4tuple: UDP 4-tuple hashing enabled + */ +struct efx_rss_context_priv { + u32 context_id; + bool rx_hash_udp_4tuple; +}; + +/** + * struct efx_rss_context - an RSS context + * @priv: hardware-specific state * @rx_hash_key: Toeplitz hash key for this RSS context * @indir_table: Indirection table for this RSS context */ struct efx_rss_context { - struct list_head list; - u32 context_id; - u32 user_id; - bool rx_hash_udp_4tuple; + struct efx_rss_context_priv priv; u8 rx_hash_key[40]; u32 rx_indir_table[128]; }; @@ -883,9 +886,7 @@ struct efx_mae; * @rx_packet_ts_offset: Offset of timestamp from start of packet data * (valid only if channel->sync_timestamps_enabled; always negative) * @rx_scatter: Scatter mode enabled for receives - * @rss_context: Main RSS context. Its @list member is the head of the list of - * RSS contexts created by user requests - * @rss_lock: Protects custom RSS context software state in @rss_context.list + * @rss_context: Main RSS context. * @vport_id: The function's vport ID, only relevant for PFs * @int_error_count: Number of internal errors seen recently * @int_error_expire: Time at which error count will be expired @@ -1052,7 +1053,6 @@ struct efx_nic { int rx_packet_ts_offset; bool rx_scatter; struct efx_rss_context rss_context; - struct mutex rss_lock; u32 vport_id; unsigned int_error_count; @@ -1416,9 +1416,9 @@ struct efx_nic_type { const u32 *rx_indir_table, const u8 *key); int (*rx_pull_rss_config)(struct efx_nic *efx); int (*rx_push_rss_context_config)(struct efx_nic *efx, - struct efx_rss_context *ctx, + struct efx_rss_context_priv *ctx, const u32 *rx_indir_table, - const u8 *key); + const u8 *key, bool delete); int (*rx_pull_rss_context_config)(struct efx_nic *efx, struct efx_rss_context *ctx); void (*rx_restore_rss_contexts)(struct efx_nic *efx); diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c index c3bffbf0ba2b..6fd2fdbaa418 100644 --- a/drivers/net/ethernet/sfc/ptp.c +++ b/drivers/net/ethernet/sfc/ptp.c @@ -1864,7 +1864,7 @@ static int efx_ptp_ts_init(struct efx_nic *efx, struct kernel_hwtstamp_config *i return 0; } -void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info) +void efx_ptp_get_ts_info(struct efx_nic *efx, struct kernel_ethtool_ts_info *ts_info) { struct efx_ptp_data *ptp = efx->ptp_data; struct efx_nic *primary = efx->primary; diff --git a/drivers/net/ethernet/sfc/ptp.h b/drivers/net/ethernet/sfc/ptp.h index 2f30dbb490d2..6946203499ef 100644 --- a/drivers/net/ethernet/sfc/ptp.h +++ b/drivers/net/ethernet/sfc/ptp.h @@ -12,7 +12,7 @@ #include #include "net_driver.h" -struct ethtool_ts_info; +struct kernel_ethtool_ts_info; int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel); void efx_ptp_defer_probe_with_channel(struct efx_nic *efx); struct efx_channel *efx_ptp_channel(struct efx_nic *efx); @@ -23,7 +23,8 @@ int efx_ptp_set_ts_config(struct efx_nic *efx, struct netlink_ext_ack *extack); int efx_ptp_get_ts_config(struct efx_nic *efx, struct kernel_hwtstamp_config *config); -void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info); +void efx_ptp_get_ts_info(struct efx_nic *efx, + struct kernel_ethtool_ts_info *ts_info); bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); int efx_ptp_get_mode(struct efx_nic *efx); int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted, diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c index dcd901eccfc8..0b7dc75c40f9 100644 --- a/drivers/net/ethernet/sfc/rx_common.c +++ b/drivers/net/ethernet/sfc/rx_common.c @@ -557,69 +557,25 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, napi_gro_frags(napi); } -/* RSS contexts. We're using linked lists and crappy O(n) algorithms, because - * (a) this is an infrequent control-plane operation and (b) n is small (max 64) - */ -struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx) +struct efx_rss_context_priv *efx_find_rss_context_entry(struct efx_nic *efx, + u32 id) { - struct list_head *head = &efx->rss_context.list; - struct efx_rss_context *ctx, *new; - u32 id = 1; /* Don't use zero, that refers to the master RSS context */ + struct ethtool_rxfh_context *ctx; - WARN_ON(!mutex_is_locked(&efx->rss_lock)); + WARN_ON(!mutex_is_locked(&efx->net_dev->ethtool->rss_lock)); - /* Search for first gap in the numbering */ - list_for_each_entry(ctx, head, list) { - if (ctx->user_id != id) - break; - id++; - /* Check for wrap. If this happens, we have nearly 2^32 - * allocated RSS contexts, which seems unlikely. - */ - if (WARN_ON_ONCE(!id)) - return NULL; - } - - /* Create the new entry */ - new = kmalloc(sizeof(*new), GFP_KERNEL); - if (!new) + ctx = xa_load(&efx->net_dev->ethtool->rss_ctx, id); + if (!ctx) return NULL; - new->context_id = EFX_MCDI_RSS_CONTEXT_INVALID; - new->rx_hash_udp_4tuple = false; - - /* Insert the new entry into the gap */ - new->user_id = id; - list_add_tail(&new->list, &ctx->list); - return new; + return ethtool_rxfh_context_priv(ctx); } -struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id) -{ - struct list_head *head = &efx->rss_context.list; - struct efx_rss_context *ctx; - - WARN_ON(!mutex_is_locked(&efx->rss_lock)); - - list_for_each_entry(ctx, head, list) - if (ctx->user_id == id) - return ctx; - return NULL; -} - -void efx_free_rss_context_entry(struct efx_rss_context *ctx) -{ - list_del(&ctx->list); - kfree(ctx); -} - -void efx_set_default_rx_indir_table(struct efx_nic *efx, - struct efx_rss_context *ctx) +void efx_set_default_rx_indir_table(struct efx_nic *efx, u32 *indir) { size_t i; - for (i = 0; i < ARRAY_SIZE(ctx->rx_indir_table); i++) - ctx->rx_indir_table[i] = - ethtool_rxfh_indir_default(i, efx->rss_spread); + for (i = 0; i < ARRAY_SIZE(efx->rss_context.rx_indir_table); i++) + indir[i] = ethtool_rxfh_indir_default(i, efx->rss_spread); } /** diff --git a/drivers/net/ethernet/sfc/rx_common.h b/drivers/net/ethernet/sfc/rx_common.h index fbd2769307f9..75fa84192362 100644 --- a/drivers/net/ethernet/sfc/rx_common.h +++ b/drivers/net/ethernet/sfc/rx_common.h @@ -84,11 +84,9 @@ void efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf, unsigned int n_frags, u8 *eh, __wsum csum); -struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx); -struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id); -void efx_free_rss_context_entry(struct efx_rss_context *ctx); -void efx_set_default_rx_indir_table(struct efx_nic *efx, - struct efx_rss_context *ctx); +struct efx_rss_context_priv *efx_find_rss_context_entry(struct efx_nic *efx, + u32 id); +void efx_set_default_rx_indir_table(struct efx_nic *efx, u32 *indir); bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec); bool efx_filter_spec_equal(const struct efx_filter_spec *left, diff --git a/drivers/net/ethernet/sfc/siena/ethtool.c b/drivers/net/ethernet/sfc/siena/ethtool.c index 14dd3893bdef..4c182d4edfc2 100644 --- a/drivers/net/ethernet/sfc/siena/ethtool.c +++ b/drivers/net/ethernet/sfc/siena/ethtool.c @@ -226,7 +226,7 @@ static void efx_ethtool_get_fec_stats(struct net_device *net_dev, } static int efx_ethtool_get_ts_info(struct net_device *net_dev, - struct ethtool_ts_info *ts_info) + struct kernel_ethtool_ts_info *ts_info) { struct efx_nic *efx = netdev_priv(net_dev); diff --git a/drivers/net/ethernet/sfc/siena/ptp.c b/drivers/net/ethernet/sfc/siena/ptp.c index 4b5e2f0ba350..c473a4b6dd44 100644 --- a/drivers/net/ethernet/sfc/siena/ptp.c +++ b/drivers/net/ethernet/sfc/siena/ptp.c @@ -1780,7 +1780,7 @@ static int efx_ptp_ts_init(struct efx_nic *efx, } void efx_siena_ptp_get_ts_info(struct efx_nic *efx, - struct ethtool_ts_info *ts_info) + struct kernel_ethtool_ts_info *ts_info) { struct efx_ptp_data *ptp = efx->ptp_data; struct efx_nic *primary = efx->primary; diff --git a/drivers/net/ethernet/sfc/siena/ptp.h b/drivers/net/ethernet/sfc/siena/ptp.h index 6352f84424f6..b6133e7c5608 100644 --- a/drivers/net/ethernet/sfc/siena/ptp.h +++ b/drivers/net/ethernet/sfc/siena/ptp.h @@ -12,7 +12,7 @@ #include #include "net_driver.h" -struct ethtool_ts_info; +struct kernel_ethtool_ts_info; void efx_siena_ptp_defer_probe_with_channel(struct efx_nic *efx); struct efx_channel *efx_siena_ptp_channel(struct efx_nic *efx); int efx_siena_ptp_set_ts_config(struct efx_nic *efx, @@ -21,7 +21,7 @@ int efx_siena_ptp_set_ts_config(struct efx_nic *efx, int efx_siena_ptp_get_ts_config(struct efx_nic *efx, struct kernel_hwtstamp_config *config); void efx_siena_ptp_get_ts_info(struct efx_nic *efx, - struct ethtool_ts_info *ts_info); + struct kernel_ethtool_ts_info *ts_info); bool efx_siena_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb); int efx_siena_ptp_get_mode(struct efx_nic *efx); int efx_siena_ptp_change_mode(struct efx_nic *efx, bool enable_wanted, diff --git a/drivers/net/ethernet/sfc/tc.c b/drivers/net/ethernet/sfc/tc.c index 9d140203e273..0d93164988fc 100644 --- a/drivers/net/ethernet/sfc/tc.c +++ b/drivers/net/ethernet/sfc/tc.c @@ -387,11 +387,8 @@ static int efx_tc_flower_parse_match(struct efx_nic *efx, struct flow_match_control fm; flow_rule_match_enc_control(rule, &fm); - if (fm.mask->flags) { - NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported match on enc_control.flags %#x", - fm.mask->flags); + if (flow_rule_has_enc_control_flags(fm.mask->flags, extack)) return -EOPNOTSUPP; - } if (!IS_ALL_ONES(fm.mask->addr_type)) { NL_SET_ERR_MSG_FMT_MOD(extack, "Unsupported enc addr_type mask %u (key %u)", fm.mask->addr_type, diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c index af661c65ffe2..e2e7b1c68563 100644 --- a/drivers/net/ethernet/smsc/smc9194.c +++ b/drivers/net/ethernet/smsc/smc9194.c @@ -1501,6 +1501,7 @@ static void smc_set_multicast_list(struct net_device *dev) #ifdef MODULE static struct net_device *devSMC9194; +MODULE_DESCRIPTION("SMC 9194 Ethernet driver"); MODULE_LICENSE("GPL"); module_param_hw(io, int, ioport, 0); diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 78ff3af7911a..907498848028 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -1574,12 +1574,8 @@ smc_ethtool_set_link_ksettings(struct net_device *dev, (cmd->base.port != PORT_TP && cmd->base.port != PORT_AUI)) return -EINVAL; -// lp->port = cmd->base.port; lp->ctl_rfduplx = cmd->base.duplex == DUPLEX_FULL; -// if (netif_running(dev)) -// smc_set_port(dev); - ret = 0; } diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h index 45ef5ac0788a..38aa4374e813 100644 --- a/drivers/net/ethernet/smsc/smc91x.h +++ b/drivers/net/ethernet/smsc/smc91x.h @@ -142,14 +142,14 @@ static inline void _SMC_outw_align4(u16 val, void __iomem *ioaddr, int reg, #define SMC_CAN_USE_32BIT 0 #define SMC_NOWAIT 1 -static inline void mcf_insw(void *a, unsigned char *p, int l) +static inline void mcf_insw(void __iomem *a, unsigned char *p, int l) { u16 *wp = (u16 *) p; while (l-- > 0) *wp++ = readw(a); } -static inline void mcf_outsw(void *a, unsigned char *p, int l) +static inline void mcf_outsw(void __iomem *a, unsigned char *p, int l) { u16 *wp = (u16 *) p; while (l-- > 0) diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 9cd62b2110a1..cd36ff4da68c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -271,8 +271,6 @@ struct stmmac_safety_stats { /* PCS defines */ #define STMMAC_PCS_RGMII (1 << 0) #define STMMAC_PCS_SGMII (1 << 1) -#define STMMAC_PCS_TBI (1 << 2) -#define STMMAC_PCS_RTBI (1 << 3) #define SF_DMA_MODE 1 /* DMA STORE-AND-FORWARD Operation Mode */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c index e73fa34237d3..83ad7c7935e3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c @@ -248,7 +248,7 @@ static void intel_speed_mode_2500(struct net_device *ndev, void *intel_data) dev_info(priv->device, "Link Speed Mode: 2.5Gbps\n"); priv->plat->max_speed = 2500; priv->plat->phy_interface = PHY_INTERFACE_MODE_2500BASEX; - priv->plat->mdio_bus_data->xpcs_an_inband = false; + priv->plat->mdio_bus_data->default_an_inband = false; } else { priv->plat->max_speed = 1000; } @@ -444,6 +444,16 @@ static void common_default_data(struct plat_stmmacenet_data *plat) plat->rx_queues_cfg[0].pkt_route = 0x0; } +static struct phylink_pcs *intel_mgbe_select_pcs(struct stmmac_priv *priv, + phy_interface_t interface) +{ + /* plat->mdio_bus_data->has_xpcs has been set true, so there + * should always be an XPCS. The original code would always + * return this if present. + */ + return &priv->hw->xpcs->pcs; +} + static int intel_mgbe_common_data(struct pci_dev *pdev, struct plat_stmmacenet_data *plat) { @@ -586,19 +596,9 @@ static int intel_mgbe_common_data(struct pci_dev *pdev, /* Intel mgbe SGMII interface uses pcs-xcps */ if (plat->phy_interface == PHY_INTERFACE_MODE_SGMII || plat->phy_interface == PHY_INTERFACE_MODE_1000BASEX) { - plat->mdio_bus_data->has_xpcs = true; - plat->mdio_bus_data->xpcs_an_inband = true; - } - - /* For fixed-link setup, we clear xpcs_an_inband */ - if (fwnode) { - struct fwnode_handle *fixed_node; - - fixed_node = fwnode_get_named_child_node(fwnode, "fixed-link"); - if (fixed_node) - plat->mdio_bus_data->xpcs_an_inband = false; - - fwnode_handle_put(fixed_node); + plat->mdio_bus_data->pcs_mask = BIT(INTEL_MGBE_XPCS_ADDR); + plat->mdio_bus_data->default_an_inband = true; + plat->select_pcs = intel_mgbe_select_pcs; } /* Ensure mdio bus scan skips intel serdes and pcs-xpcs */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c index 466c4002f00d..901a3c1959fa 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-qcom-ethqos.c @@ -21,6 +21,7 @@ #define RGMII_IO_MACRO_CONFIG2 0x1C #define RGMII_IO_MACRO_DEBUG1 0x20 #define EMAC_SYSTEM_LOW_POWER_DEBUG 0x28 +#define EMAC_WRAPPER_SGMII_PHY_CNTRL1 0xf4 /* RGMII_IO_MACRO_CONFIG fields */ #define RGMII_CONFIG_FUNC_CLK_EN BIT(30) @@ -79,6 +80,9 @@ #define ETHQOS_MAC_CTRL_SPEED_MODE BIT(14) #define ETHQOS_MAC_CTRL_PORT_SEL BIT(15) +/* EMAC_WRAPPER_SGMII_PHY_CNTRL1 bits */ +#define SGMII_PHY_CNTRL1_SGMII_TX_TO_RX_LOOPBACK_EN BIT(3) + #define SGMII_10M_RX_CLK_DVDR 0x31 struct ethqos_emac_por { @@ -95,6 +99,7 @@ struct ethqos_emac_driver_data { bool has_integrated_pcs; u32 dma_addr_width; struct dwmac4_addrs dwmac4_addrs; + bool needs_sgmii_loopback; }; struct qcom_ethqos { @@ -114,6 +119,7 @@ struct qcom_ethqos { unsigned int num_por; bool rgmii_config_loopback_en; bool has_emac_ge_3; + bool needs_sgmii_loopback; }; static int rgmii_readl(struct qcom_ethqos *ethqos, unsigned int offset) @@ -191,8 +197,22 @@ ethqos_update_link_clk(struct qcom_ethqos *ethqos, unsigned int speed) clk_set_rate(ethqos->link_clk, ethqos->link_clk_rate); } +static void +qcom_ethqos_set_sgmii_loopback(struct qcom_ethqos *ethqos, bool enable) +{ + if (!ethqos->needs_sgmii_loopback || + ethqos->phy_mode != PHY_INTERFACE_MODE_2500BASEX) + return; + + rgmii_updatel(ethqos, + SGMII_PHY_CNTRL1_SGMII_TX_TO_RX_LOOPBACK_EN, + enable ? SGMII_PHY_CNTRL1_SGMII_TX_TO_RX_LOOPBACK_EN : 0, + EMAC_WRAPPER_SGMII_PHY_CNTRL1); +} + static void ethqos_set_func_clk_en(struct qcom_ethqos *ethqos) { + qcom_ethqos_set_sgmii_loopback(ethqos, true); rgmii_updatel(ethqos, RGMII_CONFIG_FUNC_CLK_EN, RGMII_CONFIG_FUNC_CLK_EN, RGMII_IO_MACRO_CONFIG); } @@ -277,6 +297,7 @@ static const struct ethqos_emac_driver_data emac_v4_0_0_data = { .has_emac_ge_3 = true, .link_clk_name = "phyaux", .has_integrated_pcs = true, + .needs_sgmii_loopback = true, .dma_addr_width = 36, .dwmac4_addrs = { .dma_chan = 0x00008100, @@ -607,6 +628,14 @@ static int ethqos_configure_rgmii(struct qcom_ethqos *ethqos) return 0; } +static void ethqos_set_serdes_speed(struct qcom_ethqos *ethqos, int speed) +{ + if (ethqos->serdes_speed != speed) { + phy_set_speed(ethqos->serdes_phy, speed); + ethqos->serdes_speed = speed; + } +} + /* On interface toggle MAC registers gets reset. * Configure MAC block for SGMII on ethernet phy link up */ @@ -624,9 +653,7 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos) rgmii_updatel(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG, RGMII_CONFIG2_RGMII_CLK_SEL_CFG, RGMII_IO_MACRO_CONFIG2); - if (ethqos->serdes_speed != SPEED_2500) - phy_set_speed(ethqos->serdes_phy, SPEED_2500); - ethqos->serdes_speed = SPEED_2500; + ethqos_set_serdes_speed(ethqos, SPEED_2500); stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 0, 0, 0); break; case SPEED_1000: @@ -634,16 +661,12 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos) rgmii_updatel(ethqos, RGMII_CONFIG2_RGMII_CLK_SEL_CFG, RGMII_CONFIG2_RGMII_CLK_SEL_CFG, RGMII_IO_MACRO_CONFIG2); - if (ethqos->serdes_speed != SPEED_1000) - phy_set_speed(ethqos->serdes_phy, SPEED_1000); - ethqos->serdes_speed = SPEED_1000; + ethqos_set_serdes_speed(ethqos, SPEED_1000); stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0); break; case SPEED_100: val |= ETHQOS_MAC_CTRL_PORT_SEL | ETHQOS_MAC_CTRL_SPEED_MODE; - if (ethqos->serdes_speed != SPEED_1000) - phy_set_speed(ethqos->serdes_phy, SPEED_1000); - ethqos->serdes_speed = SPEED_1000; + ethqos_set_serdes_speed(ethqos, SPEED_1000); stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0); break; case SPEED_10: @@ -653,9 +676,7 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos) FIELD_PREP(RGMII_CONFIG_SGMII_CLK_DVDR, SGMII_10M_RX_CLK_DVDR), RGMII_IO_MACRO_CONFIG); - if (ethqos->serdes_speed != SPEED_1000) - phy_set_speed(ethqos->serdes_phy, ethqos->speed); - ethqos->serdes_speed = SPEED_1000; + ethqos_set_serdes_speed(ethqos, SPEED_1000); stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, 0, 0); break; } @@ -665,6 +686,14 @@ static int ethqos_configure_sgmii(struct qcom_ethqos *ethqos) return val; } +static void qcom_ethqos_speed_mode_2500(struct net_device *ndev, void *data) +{ + struct stmmac_priv *priv = netdev_priv(ndev); + + priv->plat->max_speed = 2500; + priv->plat->phy_interface = PHY_INTERFACE_MODE_2500BASEX; +} + static int ethqos_configure(struct qcom_ethqos *ethqos) { return ethqos->configure_func(ethqos); @@ -674,6 +703,7 @@ static void ethqos_fix_mac_speed(void *priv, unsigned int speed, unsigned int mo { struct qcom_ethqos *ethqos = priv; + qcom_ethqos_set_sgmii_loopback(ethqos, false); ethqos->speed = speed; ethqos_update_link_clk(ethqos, speed); ethqos_configure(ethqos); @@ -787,6 +817,9 @@ static int qcom_ethqos_probe(struct platform_device *pdev) case PHY_INTERFACE_MODE_RGMII_TXID: ethqos->configure_func = ethqos_configure_rgmii; break; + case PHY_INTERFACE_MODE_2500BASEX: + plat_dat->speed_mode_2500 = qcom_ethqos_speed_mode_2500; + fallthrough; case PHY_INTERFACE_MODE_SGMII: ethqos->configure_func = ethqos_configure_sgmii; break; @@ -809,6 +842,7 @@ static int qcom_ethqos_probe(struct platform_device *pdev) ethqos->num_por = data->num_por; ethqos->rgmii_config_loopback_en = data->rgmii_config_loopback_en; ethqos->has_emac_ge_3 = data->has_emac_ge_3; + ethqos->needs_sgmii_loopback = data->needs_sgmii_loopback; ethqos->link_clk = devm_clk_get(dev, data->link_clk_name ?: "rgmii"); if (IS_ERR(ethqos->link_clk)) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c index 848cf3c01f4a..59a7bd560f96 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rzn1.c @@ -39,6 +39,12 @@ static void rzn1_dwmac_pcs_exit(struct stmmac_priv *priv) miic_destroy(priv->hw->phylink_pcs); } +static struct phylink_pcs *rzn1_dwmac_select_pcs(struct stmmac_priv *priv, + phy_interface_t interface) +{ + return priv->hw->phylink_pcs; +} + static int rzn1_dwmac_probe(struct platform_device *pdev) { struct plat_stmmacenet_data *plat_dat; @@ -57,6 +63,7 @@ static int rzn1_dwmac_probe(struct platform_device *pdev) plat_dat->bsp_priv = plat_dat; plat_dat->pcs_init = rzn1_dwmac_pcs_init; plat_dat->pcs_exit = rzn1_dwmac_pcs_exit; + plat_dat->select_pcs = rzn1_dwmac_select_pcs; ret = stmmac_dvr_probe(dev, plat_dat, &stmmac_res); if (ret) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c index b3d45f9dfb55..fdb4c773ec98 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c @@ -429,6 +429,12 @@ static void socfpga_dwmac_pcs_exit(struct stmmac_priv *priv) lynx_pcs_destroy(priv->hw->phylink_pcs); } +static struct phylink_pcs *socfpga_dwmac_select_pcs(struct stmmac_priv *priv, + phy_interface_t interface) +{ + return priv->hw->phylink_pcs; +} + static int socfpga_dwmac_probe(struct platform_device *pdev) { struct plat_stmmacenet_data *plat_dat; @@ -478,6 +484,7 @@ static int socfpga_dwmac_probe(struct platform_device *pdev) plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed; plat_dat->pcs_init = socfpga_dwmac_pcs_init; plat_dat->pcs_exit = socfpga_dwmac_pcs_exit; + plat_dat->select_pcs = socfpga_dwmac_select_pcs; ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); if (ret) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c index c92dfc4ecf57..c1732955a697 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-stm32.c @@ -53,12 +53,23 @@ #define SYSCFG_MCU_ETH_SEL_MII 0 #define SYSCFG_MCU_ETH_SEL_RMII 1 -/* STM32MP1 register definitions +/* STM32MP2 register definitions */ +#define SYSCFG_MP2_ETH_MASK GENMASK(31, 0) + +#define SYSCFG_ETHCR_ETH_PTP_CLK_SEL BIT(2) +#define SYSCFG_ETHCR_ETH_CLK_SEL BIT(1) +#define SYSCFG_ETHCR_ETH_REF_CLK_SEL BIT(0) + +#define SYSCFG_ETHCR_ETH_SEL_MII 0 +#define SYSCFG_ETHCR_ETH_SEL_RGMII BIT(4) +#define SYSCFG_ETHCR_ETH_SEL_RMII BIT(6) + +/* STM32MPx register definitions * * Below table summarizes the clock requirement and clock sources for * supported phy interface modes. * __________________________________________________________________________ - *|PHY_MODE | Normal | PHY wo crystal| PHY wo crystal |No 125Mhz from PHY| + *|PHY_MODE | Normal | PHY wo crystal| PHY wo crystal |No 125MHz from PHY| *| | | 25MHz | 50MHz | | * --------------------------------------------------------------------------- *| MII | - | eth-ck | n/a | n/a | @@ -90,6 +101,7 @@ struct stm32_dwmac { int eth_ref_clk_sel_reg; int irq_pwr_wakeup; u32 mode_reg; /* MAC glue-logic mode register */ + u32 mode_mask; struct regmap *regmap; u32 speed; const struct stm32_ops *ops; @@ -102,8 +114,9 @@ struct stm32_ops { void (*resume)(struct stm32_dwmac *dwmac); int (*parse_data)(struct stm32_dwmac *dwmac, struct device *dev); - u32 syscfg_eth_mask; bool clk_rx_enable_in_suspend; + bool is_mp13, is_mp2; + u32 syscfg_clr_off; }; static int stm32_dwmac_clk_enable(struct stm32_dwmac *dwmac, bool resume) @@ -157,65 +170,190 @@ static int stm32_dwmac_init(struct plat_stmmacenet_data *plat_dat, bool resume) return stm32_dwmac_clk_enable(dwmac, resume); } -static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat) +static int stm32mp1_select_ethck_external(struct plat_stmmacenet_data *plat_dat) { struct stm32_dwmac *dwmac = plat_dat->bsp_priv; - u32 reg = dwmac->mode_reg, clk_rate; - int val; - clk_rate = clk_get_rate(dwmac->clk_eth_ck); - dwmac->enable_eth_ck = false; switch (plat_dat->mac_interface) { case PHY_INTERFACE_MODE_MII: - if (clk_rate == ETH_CK_F_25M && dwmac->ext_phyclk) - dwmac->enable_eth_ck = true; - val = SYSCFG_PMCR_ETH_SEL_MII; - pr_debug("SYSCFG init : PHY_INTERFACE_MODE_MII\n"); + dwmac->enable_eth_ck = dwmac->ext_phyclk; + return 0; + case PHY_INTERFACE_MODE_GMII: + dwmac->enable_eth_ck = dwmac->eth_clk_sel_reg || + dwmac->ext_phyclk; + return 0; + case PHY_INTERFACE_MODE_RMII: + dwmac->enable_eth_ck = dwmac->eth_ref_clk_sel_reg || + dwmac->ext_phyclk; + return 0; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + dwmac->enable_eth_ck = dwmac->eth_clk_sel_reg || + dwmac->ext_phyclk; + return 0; + default: + dwmac->enable_eth_ck = false; + dev_err(dwmac->dev, "Mode %s not supported", + phy_modes(plat_dat->mac_interface)); + return -EINVAL; + } +} + +static int stm32mp1_validate_ethck_rate(struct plat_stmmacenet_data *plat_dat) +{ + struct stm32_dwmac *dwmac = plat_dat->bsp_priv; + const u32 clk_rate = clk_get_rate(dwmac->clk_eth_ck); + + if (!dwmac->enable_eth_ck) + return 0; + + switch (plat_dat->mac_interface) { + case PHY_INTERFACE_MODE_MII: + case PHY_INTERFACE_MODE_GMII: + if (clk_rate == ETH_CK_F_25M) + return 0; + break; + case PHY_INTERFACE_MODE_RMII: + if (clk_rate == ETH_CK_F_25M || clk_rate == ETH_CK_F_50M) + return 0; + break; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + if (clk_rate == ETH_CK_F_25M || clk_rate == ETH_CK_F_125M) + return 0; + break; + default: + break; + } + + dev_err(dwmac->dev, "Mode %s does not match eth-ck frequency %d Hz", + phy_modes(plat_dat->mac_interface), clk_rate); + return -EINVAL; +} + +static int stm32mp1_configure_pmcr(struct plat_stmmacenet_data *plat_dat) +{ + struct stm32_dwmac *dwmac = plat_dat->bsp_priv; + u32 reg = dwmac->mode_reg; + int val = 0; + + switch (plat_dat->mac_interface) { + case PHY_INTERFACE_MODE_MII: + /* + * STM32MP15xx supports both MII and GMII, STM32MP13xx MII only. + * SYSCFG_PMCSETR ETH_SELMII is present only on STM32MP15xx and + * acts as a selector between 0:GMII and 1:MII. As STM32MP13xx + * supports only MII, ETH_SELMII is not present. + */ + if (!dwmac->ops->is_mp13) /* Select MII mode on STM32MP15xx */ + val |= SYSCFG_PMCR_ETH_SEL_MII; break; case PHY_INTERFACE_MODE_GMII: val = SYSCFG_PMCR_ETH_SEL_GMII; - if (clk_rate == ETH_CK_F_25M && - (dwmac->eth_clk_sel_reg || dwmac->ext_phyclk)) { - dwmac->enable_eth_ck = true; + if (dwmac->enable_eth_ck) val |= SYSCFG_PMCR_ETH_CLK_SEL; - } - pr_debug("SYSCFG init : PHY_INTERFACE_MODE_GMII\n"); break; case PHY_INTERFACE_MODE_RMII: val = SYSCFG_PMCR_ETH_SEL_RMII; - if ((clk_rate == ETH_CK_F_25M || clk_rate == ETH_CK_F_50M) && - (dwmac->eth_ref_clk_sel_reg || dwmac->ext_phyclk)) { - dwmac->enable_eth_ck = true; + if (dwmac->enable_eth_ck) val |= SYSCFG_PMCR_ETH_REF_CLK_SEL; - } - pr_debug("SYSCFG init : PHY_INTERFACE_MODE_RMII\n"); break; case PHY_INTERFACE_MODE_RGMII: case PHY_INTERFACE_MODE_RGMII_ID: case PHY_INTERFACE_MODE_RGMII_RXID: case PHY_INTERFACE_MODE_RGMII_TXID: val = SYSCFG_PMCR_ETH_SEL_RGMII; - if ((clk_rate == ETH_CK_F_25M || clk_rate == ETH_CK_F_125M) && - (dwmac->eth_clk_sel_reg || dwmac->ext_phyclk)) { - dwmac->enable_eth_ck = true; + if (dwmac->enable_eth_ck) val |= SYSCFG_PMCR_ETH_CLK_SEL; - } - pr_debug("SYSCFG init : PHY_INTERFACE_MODE_RGMII\n"); break; default: - pr_debug("SYSCFG init : Do not manage %d interface\n", - plat_dat->mac_interface); + dev_err(dwmac->dev, "Mode %s not supported", + phy_modes(plat_dat->mac_interface)); /* Do not manage others interfaces */ return -EINVAL; } + dev_dbg(dwmac->dev, "Mode %s", phy_modes(plat_dat->mac_interface)); + + /* Shift value at correct ethernet MAC offset in SYSCFG_PMCSETR */ + val <<= ffs(dwmac->mode_mask) - ffs(SYSCFG_MP1_ETH_MASK); + /* Need to update PMCCLRR (clear register) */ - regmap_write(dwmac->regmap, reg + SYSCFG_PMCCLRR_OFFSET, - dwmac->ops->syscfg_eth_mask); + regmap_write(dwmac->regmap, dwmac->ops->syscfg_clr_off, + dwmac->mode_mask); /* Update PMCSETR (set register) */ return regmap_update_bits(dwmac->regmap, reg, - dwmac->ops->syscfg_eth_mask, val); + dwmac->mode_mask, val); +} + +static int stm32mp2_configure_syscfg(struct plat_stmmacenet_data *plat_dat) +{ + struct stm32_dwmac *dwmac = plat_dat->bsp_priv; + u32 reg = dwmac->mode_reg; + int val = 0; + + switch (plat_dat->mac_interface) { + case PHY_INTERFACE_MODE_MII: + /* ETH_REF_CLK_SEL bit in SYSCFG register is not applicable in MII mode */ + break; + case PHY_INTERFACE_MODE_RMII: + val = SYSCFG_ETHCR_ETH_SEL_RMII; + if (dwmac->enable_eth_ck) { + /* Internal clock ETH_CLK of 50MHz from RCC is used */ + val |= SYSCFG_ETHCR_ETH_REF_CLK_SEL; + } + break; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + val = SYSCFG_ETHCR_ETH_SEL_RGMII; + fallthrough; + case PHY_INTERFACE_MODE_GMII: + if (dwmac->enable_eth_ck) { + /* Internal clock ETH_CLK of 125MHz from RCC is used */ + val |= SYSCFG_ETHCR_ETH_CLK_SEL; + } + break; + default: + dev_err(dwmac->dev, "Mode %s not supported", + phy_modes(plat_dat->mac_interface)); + /* Do not manage others interfaces */ + return -EINVAL; + } + + dev_dbg(dwmac->dev, "Mode %s", phy_modes(plat_dat->mac_interface)); + + /* Select PTP (IEEE1588) clock selection from RCC (ck_ker_ethxptp) */ + val |= SYSCFG_ETHCR_ETH_PTP_CLK_SEL; + + /* Update ETHCR (set register) */ + return regmap_update_bits(dwmac->regmap, reg, + SYSCFG_MP2_ETH_MASK, val); +} + +static int stm32mp1_set_mode(struct plat_stmmacenet_data *plat_dat) +{ + struct stm32_dwmac *dwmac = plat_dat->bsp_priv; + int ret; + + ret = stm32mp1_select_ethck_external(plat_dat); + if (ret) + return ret; + + ret = stm32mp1_validate_ethck_rate(plat_dat); + if (ret) + return ret; + + if (!dwmac->ops->is_mp2) + return stm32mp1_configure_pmcr(plat_dat); + else + return stm32mp2_configure_syscfg(plat_dat); } static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat) @@ -227,21 +365,21 @@ static int stm32mcu_set_mode(struct plat_stmmacenet_data *plat_dat) switch (plat_dat->mac_interface) { case PHY_INTERFACE_MODE_MII: val = SYSCFG_MCU_ETH_SEL_MII; - pr_debug("SYSCFG init : PHY_INTERFACE_MODE_MII\n"); break; case PHY_INTERFACE_MODE_RMII: val = SYSCFG_MCU_ETH_SEL_RMII; - pr_debug("SYSCFG init : PHY_INTERFACE_MODE_RMII\n"); break; default: - pr_debug("SYSCFG init : Do not manage %d interface\n", - plat_dat->mac_interface); + dev_err(dwmac->dev, "Mode %s not supported", + phy_modes(plat_dat->mac_interface)); /* Do not manage others interfaces */ return -EINVAL; } + dev_dbg(dwmac->dev, "Mode %s", phy_modes(plat_dat->mac_interface)); + return regmap_update_bits(dwmac->regmap, reg, - dwmac->ops->syscfg_eth_mask, val << 23); + SYSCFG_MCU_ETH_MASK, val << 23); } static void stm32_dwmac_clk_disable(struct stm32_dwmac *dwmac, bool suspend) @@ -286,8 +424,24 @@ static int stm32_dwmac_parse_data(struct stm32_dwmac *dwmac, return PTR_ERR(dwmac->regmap); err = of_property_read_u32_index(np, "st,syscon", 1, &dwmac->mode_reg); - if (err) + if (err) { dev_err(dev, "Can't get sysconfig mode offset (%d)\n", err); + return err; + } + + if (dwmac->ops->is_mp2) + return 0; + + dwmac->mode_mask = SYSCFG_MP1_ETH_MASK; + err = of_property_read_u32_index(np, "st,syscon", 2, &dwmac->mode_mask); + if (err) { + if (dwmac->ops->is_mp13) { + dev_err(dev, "Sysconfig register mask must be set (%d)\n", err); + } else { + dev_dbg(dev, "Warning sysconfig register mask not set\n"); + err = 0; + } + } return err; } @@ -305,7 +459,7 @@ static int stm32mp1_parse_data(struct stm32_dwmac *dwmac, /* Gigabit Ethernet 125MHz clock selection. */ dwmac->eth_clk_sel_reg = of_property_read_bool(np, "st,eth-clk-sel"); - /* Ethernet 50Mhz RMII clock selection */ + /* Ethernet 50MHz RMII clock selection */ dwmac->eth_ref_clk_sel_reg = of_property_read_bool(np, "st,eth-ref-clk-sel"); @@ -478,8 +632,7 @@ static SIMPLE_DEV_PM_OPS(stm32_dwmac_pm_ops, stm32_dwmac_suspend, stm32_dwmac_resume); static struct stm32_ops stm32mcu_dwmac_data = { - .set_mode = stm32mcu_set_mode, - .syscfg_eth_mask = SYSCFG_MCU_ETH_MASK + .set_mode = stm32mcu_set_mode }; static struct stm32_ops stm32mp1_dwmac_data = { @@ -487,13 +640,35 @@ static struct stm32_ops stm32mp1_dwmac_data = { .suspend = stm32mp1_suspend, .resume = stm32mp1_resume, .parse_data = stm32mp1_parse_data, - .syscfg_eth_mask = SYSCFG_MP1_ETH_MASK, + .syscfg_clr_off = 0x44, + .is_mp13 = false, + .clk_rx_enable_in_suspend = true +}; + +static struct stm32_ops stm32mp13_dwmac_data = { + .set_mode = stm32mp1_set_mode, + .suspend = stm32mp1_suspend, + .resume = stm32mp1_resume, + .parse_data = stm32mp1_parse_data, + .syscfg_clr_off = 0x08, + .is_mp13 = true, + .clk_rx_enable_in_suspend = true +}; + +static struct stm32_ops stm32mp25_dwmac_data = { + .set_mode = stm32mp1_set_mode, + .suspend = stm32mp1_suspend, + .resume = stm32mp1_resume, + .parse_data = stm32mp1_parse_data, + .is_mp2 = true, .clk_rx_enable_in_suspend = true }; static const struct of_device_id stm32_dwmac_match[] = { { .compatible = "st,stm32-dwmac", .data = &stm32mcu_dwmac_data}, { .compatible = "st,stm32mp1-dwmac", .data = &stm32mp1_dwmac_data}, + { .compatible = "st,stm32mp13-dwmac", .data = &stm32mp13_dwmac_data}, + { .compatible = "st,stm32mp25-dwmac", .data = &stm32mp25_dwmac_data}, { } }; MODULE_DEVICE_TABLE(of, stm32_dwmac_match); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c index 8555299443f4..d413d76a8936 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c @@ -15,7 +15,7 @@ #include #include #include -#include +#include #include "stmmac.h" #include "stmmac_pcs.h" #include "dwmac1000.h" @@ -404,11 +404,6 @@ static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral, dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback); } -static void dwmac1000_rane(void __iomem *ioaddr, bool restart) -{ - dwmac_rane(ioaddr, GMAC_PCS_BASE, restart); -} - static void dwmac1000_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv) { dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv); @@ -519,7 +514,6 @@ const struct stmmac_ops dwmac1000_ops = { .set_eee_pls = dwmac1000_set_eee_pls, .debug = dwmac1000_debug, .pcs_ctrl_ane = dwmac1000_ctrl_ane, - .pcs_rane = dwmac1000_rane, .pcs_get_adv_lp = dwmac1000_get_adv_lp, .set_mac_loopback = dwmac1000_set_mac_loopback, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c index daf79cdbd3ec..adccdd816ea9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_dma.c @@ -12,7 +12,7 @@ Author: Giuseppe Cavallaro *******************************************************************************/ -#include +#include #include "dwmac1000.h" #include "dwmac_dma.h" diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c index 7667d103cd0e..14e847c0e1a9 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c @@ -15,7 +15,7 @@ *******************************************************************************/ #include -#include +#include #include "stmmac.h" #include "dwmac100.h" diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c index dea270f60cc3..b402fb54f613 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c @@ -14,7 +14,7 @@ Author: Giuseppe Cavallaro *******************************************************************************/ -#include +#include #include "dwmac100.h" #include "dwmac_dma.h" diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c index b25774d69195..dbd9f93b2460 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c @@ -758,11 +758,6 @@ static void dwmac4_ctrl_ane(void __iomem *ioaddr, bool ane, bool srgmi_ral, dwmac_ctrl_ane(ioaddr, GMAC_PCS_BASE, ane, srgmi_ral, loopback); } -static void dwmac4_rane(void __iomem *ioaddr, bool restart) -{ - dwmac_rane(ioaddr, GMAC_PCS_BASE, restart); -} - static void dwmac4_get_adv_lp(void __iomem *ioaddr, struct rgmii_adv *adv) { dwmac_get_adv_lp(ioaddr, GMAC_PCS_BASE, adv); @@ -1215,7 +1210,6 @@ const struct stmmac_ops dwmac4_ops = { .set_eee_timer = dwmac4_set_eee_timer, .set_eee_pls = dwmac4_set_eee_pls, .pcs_ctrl_ane = dwmac4_ctrl_ane, - .pcs_rane = dwmac4_rane, .pcs_get_adv_lp = dwmac4_get_adv_lp, .debug = dwmac4_debug, .set_filter = dwmac4_set_filter, @@ -1260,7 +1254,6 @@ const struct stmmac_ops dwmac410_ops = { .set_eee_timer = dwmac4_set_eee_timer, .set_eee_pls = dwmac4_set_eee_pls, .pcs_ctrl_ane = dwmac4_ctrl_ane, - .pcs_rane = dwmac4_rane, .pcs_get_adv_lp = dwmac4_get_adv_lp, .debug = dwmac4_debug, .set_filter = dwmac4_set_filter, @@ -1309,7 +1302,6 @@ const struct stmmac_ops dwmac510_ops = { .set_eee_timer = dwmac4_set_eee_timer, .set_eee_pls = dwmac4_set_eee_pls, .pcs_ctrl_ane = dwmac4_ctrl_ane, - .pcs_rane = dwmac4_rane, .pcs_get_adv_lp = dwmac4_get_adv_lp, .debug = dwmac4_debug, .set_filter = dwmac4_set_filter, diff --git a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c index f8e7775bb633..6a987cf598e4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwxgmac2_core.c @@ -1554,9 +1554,6 @@ const struct stmmac_ops dwxgmac210_ops = { .reset_eee_mode = dwxgmac2_reset_eee_mode, .set_eee_timer = dwxgmac2_set_eee_timer, .set_eee_pls = dwxgmac2_set_eee_pls, - .pcs_ctrl_ane = NULL, - .pcs_rane = NULL, - .pcs_get_adv_lp = NULL, .debug = NULL, .set_filter = dwxgmac2_set_filter, .safety_feat_config = dwxgmac3_safety_feat_config, @@ -1614,9 +1611,6 @@ const struct stmmac_ops dwxlgmac2_ops = { .reset_eee_mode = dwxgmac2_reset_eee_mode, .set_eee_timer = dwxgmac2_set_eee_timer, .set_eee_pls = dwxgmac2_set_eee_pls, - .pcs_ctrl_ane = NULL, - .pcs_rane = NULL, - .pcs_get_adv_lp = NULL, .debug = NULL, .set_filter = dwxgmac2_set_filter, .safety_feat_config = dwxgmac3_safety_feat_config, diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h index 90384db228b5..97934ccba5b1 100644 --- a/drivers/net/ethernet/stmicro/stmmac/hwif.h +++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h @@ -370,7 +370,6 @@ struct stmmac_ops { /* PCS calls */ void (*pcs_ctrl_ane)(void __iomem *ioaddr, bool ane, bool srgmi_ral, bool loopback); - void (*pcs_rane)(void __iomem *ioaddr, bool restart); void (*pcs_get_adv_lp)(void __iomem *ioaddr, struct rgmii_adv *adv); /* Safety Features */ int (*safety_feat_config)(void __iomem *ioaddr, unsigned int asp, @@ -484,8 +483,6 @@ struct stmmac_ops { stmmac_do_void_callback(__priv, mac, debug, __priv, __args) #define stmmac_pcs_ctrl_ane(__priv, __args...) \ stmmac_do_void_callback(__priv, mac, pcs_ctrl_ane, __args) -#define stmmac_pcs_rane(__priv, __args...) \ - stmmac_do_void_callback(__priv, mac, pcs_rane, __priv, __args) #define stmmac_pcs_get_adv_lp(__priv, __args...) \ stmmac_do_void_callback(__priv, mac, pcs_get_adv_lp, __args) #define stmmac_safety_feat_config(__priv, __args...) \ diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 542e2633a6f5..7008219fd88d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -11,10 +11,10 @@ #include #include #include +#include #include #include #include -#include #include "stmmac.h" #include "dwmac_dma.h" @@ -1199,7 +1199,7 @@ static int stmmac_set_channels(struct net_device *dev, } static int stmmac_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct stmmac_priv *priv = netdev_priv(dev); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index c58782c41417..4b6a359e5a94 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -471,13 +471,6 @@ bool stmmac_eee_init(struct stmmac_priv *priv) { int eee_tw_timer = priv->eee_tw_timer; - /* Using PCS we cannot dial with the phy registers at this stage - * so we do not support extra feature like EEE. - */ - if (priv->hw->pcs == STMMAC_PCS_TBI || - priv->hw->pcs == STMMAC_PCS_RTBI) - return false; - /* Check if MAC core supports the EEE feature. */ if (!priv->dma_cap.eee) return false; @@ -956,11 +949,15 @@ static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config, phy_interface_t interface) { struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); + struct phylink_pcs *pcs; - if (priv->hw->xpcs) - return &priv->hw->xpcs->pcs; + if (priv->plat->select_pcs) { + pcs = priv->plat->select_pcs(priv, interface); + if (!IS_ERR(pcs)) + return pcs; + } - return priv->hw->phylink_pcs; + return NULL; } static void stmmac_mac_config(struct phylink_config *config, unsigned int mode, @@ -1228,8 +1225,8 @@ static int stmmac_phy_setup(struct stmmac_priv *priv) mdio_bus_data = priv->plat->mdio_bus_data; if (mdio_bus_data) - priv->phylink_config.ovr_an_inband = - mdio_bus_data->xpcs_an_inband; + priv->phylink_config.default_an_inband = + mdio_bus_data->default_an_inband; /* Set the platform/firmware specified interface mode. Note, phylink * deals with the PHY interface mode, not the MAC interface mode. @@ -3953,9 +3950,7 @@ static int __stmmac_open(struct net_device *dev, if (ret < 0) return ret; - if (priv->hw->pcs != STMMAC_PCS_TBI && - priv->hw->pcs != STMMAC_PCS_RTBI && - (!priv->hw->xpcs || + if ((!priv->hw->xpcs || xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) { ret = stmmac_init_phy(dev); if (ret) { @@ -4097,8 +4092,6 @@ static int stmmac_release(struct net_device *dev) if (priv->plat->serdes_powerdown) priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv); - netif_carrier_off(dev); - stmmac_release_ptp(priv); pm_runtime_put(priv->device); @@ -4244,18 +4237,32 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) { struct dma_desc *desc, *first, *mss_desc = NULL; struct stmmac_priv *priv = netdev_priv(dev); - int nfrags = skb_shinfo(skb)->nr_frags; - u32 queue = skb_get_queue_mapping(skb); + int tmp_pay_len = 0, first_tx, nfrags; unsigned int first_entry, tx_packets; struct stmmac_txq_stats *txq_stats; - int tmp_pay_len = 0, first_tx; struct stmmac_tx_queue *tx_q; - bool has_vlan, set_ic; + u32 pay_len, mss, queue; u8 proto_hdr_len, hdr; - u32 pay_len, mss; dma_addr_t des; + bool set_ic; int i; + /* Always insert VLAN tag to SKB payload for TSO frames. + * + * Never insert VLAN tag by HW, since segments splited by + * TSO engine will be un-tagged by mistake. + */ + if (skb_vlan_tag_present(skb)) { + skb = __vlan_hwaccel_push_inside(skb); + if (unlikely(!skb)) { + priv->xstats.tx_dropped++; + return NETDEV_TX_OK; + } + } + + nfrags = skb_shinfo(skb)->nr_frags; + queue = skb_get_queue_mapping(skb); + tx_q = &priv->dma_conf.tx_queue[queue]; txq_stats = &priv->xstats.txq_stats[queue]; first_tx = tx_q->cur_tx; @@ -4308,9 +4315,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) skb->data_len); } - /* Check if VLAN can be inserted by HW */ - has_vlan = stmmac_vlan_insert(priv, skb, tx_q); - first_entry = tx_q->cur_tx; WARN_ON(tx_q->tx_skbuff[first_entry]); @@ -4320,9 +4324,6 @@ static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev) desc = &tx_q->dma_tx[first_entry]; first = desc; - if (has_vlan) - stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT); - /* first descriptor: fill Headers on Buf1 */ des = dma_map_single(priv->device, skb->data, skb_headlen(skb), DMA_TO_DEVICE); @@ -7690,8 +7691,6 @@ int stmmac_dvr_probe(struct device *device, ndev->features |= NETIF_F_RXHASH; ndev->vlan_features |= ndev->features; - /* TSO doesn't work on VLANs yet */ - ndev->vlan_features &= ~NETIF_F_TSO; /* MTU range: 46 - hw-specific max */ ndev->min_mtu = ETH_ZLEN - ETH_HLEN; @@ -7740,16 +7739,12 @@ int stmmac_dvr_probe(struct device *device, if (!pm_runtime_enabled(device)) pm_runtime_enable(device); - if (priv->hw->pcs != STMMAC_PCS_TBI && - priv->hw->pcs != STMMAC_PCS_RTBI) { - /* MDIO bus Registration */ - ret = stmmac_mdio_register(ndev); - if (ret < 0) { - dev_err_probe(priv->device, ret, - "%s: MDIO bus (id: %d) registration failed\n", - __func__, priv->plat->bus_id); - goto error_mdio_register; - } + ret = stmmac_mdio_register(ndev); + if (ret < 0) { + dev_err_probe(priv->device, ret, + "MDIO bus (id: %d) registration failed\n", + priv->plat->bus_id); + goto error_mdio_register; } if (priv->plat->speed_mode_2500) @@ -7791,9 +7786,7 @@ error_netdev_register: error_phy_setup: stmmac_pcs_clean(ndev); error_pcs_setup: - if (priv->hw->pcs != STMMAC_PCS_TBI && - priv->hw->pcs != STMMAC_PCS_RTBI) - stmmac_mdio_unregister(ndev); + stmmac_mdio_unregister(ndev); error_mdio_register: stmmac_napi_del(ndev); error_hw_init: @@ -7822,7 +7815,6 @@ void stmmac_dvr_remove(struct device *dev) stmmac_stop_all_dma(priv); stmmac_mac_set(priv, priv->ioaddr, false); - netif_carrier_off(ndev); unregister_netdev(ndev); #ifdef CONFIG_DEBUG_FS @@ -7834,10 +7826,8 @@ void stmmac_dvr_remove(struct device *dev) reset_control_assert(priv->plat->stmmac_ahb_rst); stmmac_pcs_clean(ndev); + stmmac_mdio_unregister(ndev); - if (priv->hw->pcs != STMMAC_PCS_TBI && - priv->hw->pcs != STMMAC_PCS_RTBI) - stmmac_mdio_unregister(ndev); destroy_workqueue(priv->wq); mutex_destroy(&priv->lock); bitmap_free(priv->af_xdp_zc_qps); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c index aa43117134d3..03f90676b3ad 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c @@ -497,35 +497,33 @@ int stmmac_mdio_reset(struct mii_bus *bus) int stmmac_pcs_setup(struct net_device *ndev) { + struct fwnode_handle *devnode, *pcsnode; struct dw_xpcs *xpcs = NULL; struct stmmac_priv *priv; - int ret = -ENODEV; - int mode, addr; + int addr, mode, ret; priv = netdev_priv(ndev); mode = priv->plat->phy_interface; + devnode = priv->plat->port_node; if (priv->plat->pcs_init) { ret = priv->plat->pcs_init(priv); + } else if (fwnode_property_present(devnode, "pcs-handle")) { + pcsnode = fwnode_find_reference(devnode, "pcs-handle", 0); + xpcs = xpcs_create_fwnode(pcsnode, mode); + fwnode_handle_put(pcsnode); + ret = PTR_ERR_OR_ZERO(xpcs); } else if (priv->plat->mdio_bus_data && - priv->plat->mdio_bus_data->has_xpcs) { - /* Try to probe the XPCS by scanning all addresses */ - for (addr = 0; addr < PHY_MAX_ADDR; addr++) { - xpcs = xpcs_create_mdiodev(priv->mii, addr, mode); - if (IS_ERR(xpcs)) - continue; - - ret = 0; - break; - } + priv->plat->mdio_bus_data->pcs_mask) { + addr = ffs(priv->plat->mdio_bus_data->pcs_mask) - 1; + xpcs = xpcs_create_mdiodev(priv->mii, addr, mode); + ret = PTR_ERR_OR_ZERO(xpcs); } else { return 0; } - if (ret) { - dev_warn(priv->device, "No xPCS found\n"); - return ret; - } + if (ret) + return dev_err_probe(priv->device, ret, "No xPCS found\n"); priv->hw->xpcs = xpcs; @@ -610,7 +608,7 @@ int stmmac_mdio_register(struct net_device *ndev) snprintf(new_bus->id, MII_BUS_ID_SIZE, "%s-%x", new_bus->name, priv->plat->bus_id); new_bus->priv = ndev; - new_bus->phy_mask = mdio_bus_data->phy_mask; + new_bus->phy_mask = mdio_bus_data->phy_mask | mdio_bus_data->pcs_mask; new_bus->parent = priv->device; err = of_mdiobus_register(new_bus, mdio_node); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h index 13a30e6df4c1..1bdf87b237c4 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pcs.h @@ -74,23 +74,6 @@ static inline void dwmac_pcs_isr(void __iomem *ioaddr, u32 reg, } } -/** - * dwmac_rane - To restart ANE - * @ioaddr: IO registers pointer - * @reg: Base address of the AN Control Register. - * @restart: to restart ANE - * Description: this is to just restart the Auto-Negotiation. - */ -static inline void dwmac_rane(void __iomem *ioaddr, u32 reg, bool restart) -{ - u32 value = readl(ioaddr + GMAC_AN_CTRL(reg)); - - if (restart) - value |= GMAC_AN_CTRL_RAN; - - writel(value, ioaddr + GMAC_AN_CTRL(reg)); -} - /** * dwmac_ctrl_ane - To program the AN Control Register. * @ioaddr: IO registers pointer diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index 54797edc9b38..ad868e8d195d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -764,8 +764,8 @@ EXPORT_SYMBOL_GPL(stmmac_get_platform_resources); * Description: Call the platform's init callback (if any) and propagate * the return value. */ -int stmmac_pltfr_init(struct platform_device *pdev, - struct plat_stmmacenet_data *plat) +static int stmmac_pltfr_init(struct platform_device *pdev, + struct plat_stmmacenet_data *plat) { int ret = 0; @@ -774,7 +774,6 @@ int stmmac_pltfr_init(struct platform_device *pdev, return ret; } -EXPORT_SYMBOL_GPL(stmmac_pltfr_init); /** * stmmac_pltfr_exit @@ -782,13 +781,12 @@ EXPORT_SYMBOL_GPL(stmmac_pltfr_init); * @plat: driver data platform structure * Description: Call the platform's exit callback (if any). */ -void stmmac_pltfr_exit(struct platform_device *pdev, - struct plat_stmmacenet_data *plat) +static void stmmac_pltfr_exit(struct platform_device *pdev, + struct plat_stmmacenet_data *plat) { if (plat->exit) plat->exit(pdev, plat->bsp_priv); } -EXPORT_SYMBOL_GPL(stmmac_pltfr_exit); /** * stmmac_pltfr_probe diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h index bb6fc7e59aed..72dc1a32e46d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h @@ -17,11 +17,6 @@ devm_stmmac_probe_config_dt(struct platform_device *pdev, u8 *mac); int stmmac_get_platform_resources(struct platform_device *pdev, struct stmmac_resources *stmmac_res); -int stmmac_pltfr_init(struct platform_device *pdev, - struct plat_stmmacenet_data *plat); -void stmmac_pltfr_exit(struct platform_device *pdev, - struct plat_stmmacenet_data *plat); - int stmmac_pltfr_probe(struct platform_device *pdev, struct plat_stmmacenet_data *plat, struct stmmac_resources *res); diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c index f8e133604146..131786aa4d5b 100644 --- a/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c +++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-common.c @@ -21,8 +21,6 @@ #include "dwc-xlgmac.h" #include "dwc-xlgmac-reg.h" -MODULE_LICENSE("Dual BSD/GPL"); - static int debug = -1; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "DWC ethernet debug level (0=none,...,16=all)"); @@ -725,3 +723,8 @@ void xlgmac_print_all_hw_features(struct xlgmac_pdata *pdata) XLGMAC_PR("=====================================================\n"); XLGMAC_PR("\n"); } + +MODULE_DESCRIPTION(XLGMAC_DRV_DESC); +MODULE_VERSION(XLGMAC_DRV_VERSION); +MODULE_AUTHOR("Jie Deng "); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c index fa8604d7b797..36fe538e3332 100644 --- a/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c +++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-pci.c @@ -71,8 +71,3 @@ static struct pci_driver xlgmac_pci_driver = { }; module_pci_driver(xlgmac_pci_driver); - -MODULE_DESCRIPTION(XLGMAC_DRV_DESC); -MODULE_VERSION(XLGMAC_DRV_VERSION); -MODULE_AUTHOR("Jie Deng "); -MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/ethernet/tehuti/Kconfig b/drivers/net/ethernet/tehuti/Kconfig index 8735633765a1..6db2c9817445 100644 --- a/drivers/net/ethernet/tehuti/Kconfig +++ b/drivers/net/ethernet/tehuti/Kconfig @@ -23,4 +23,19 @@ config TEHUTI help Tehuti Networks 10G Ethernet NIC +config TEHUTI_TN40 + tristate "Tehuti Networks TN40xx 10G Ethernet adapters" + depends on PCI + select PAGE_POOL + select FW_LOADER + select PHYLINK + help + This driver supports 10G Ethernet adapters using Tehuti Networks + TN40xx chips. Currently, adapters with Applied Micro Circuits + Corporation QT2025 are supported; Tehuti Networks TN9310, + DLink DXE-810S, ASUS XG-C100F, and Edimax EN-9320. + + To compile this driver as a module, choose M here: the module + will be called tn40xx. + endif # NET_VENDOR_TEHUTI diff --git a/drivers/net/ethernet/tehuti/Makefile b/drivers/net/ethernet/tehuti/Makefile index 13a0ddd62088..0d4f4d63a65c 100644 --- a/drivers/net/ethernet/tehuti/Makefile +++ b/drivers/net/ethernet/tehuti/Makefile @@ -4,3 +4,6 @@ # obj-$(CONFIG_TEHUTI) += tehuti.o + +tn40xx-y := tn40.o tn40_mdio.o tn40_phy.o +obj-$(CONFIG_TEHUTI_TN40) += tn40xx.o diff --git a/drivers/net/ethernet/tehuti/tn40.c b/drivers/net/ethernet/tehuti/tn40.c new file mode 100644 index 000000000000..259bdac24cf2 --- /dev/null +++ b/drivers/net/ethernet/tehuti/tn40.c @@ -0,0 +1,1850 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) Tehuti Networks Ltd. */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tn40.h" + +#define TN40_SHORT_PACKET_SIZE 60 +#define TN40_FIRMWARE_NAME "tehuti/bdx.bin" + +static void tn40_enable_interrupts(struct tn40_priv *priv) +{ + tn40_write_reg(priv, TN40_REG_IMR, priv->isr_mask); +} + +static void tn40_disable_interrupts(struct tn40_priv *priv) +{ + tn40_write_reg(priv, TN40_REG_IMR, 0); +} + +static int tn40_fifo_alloc(struct tn40_priv *priv, struct tn40_fifo *f, + int fsz_type, + u16 reg_cfg0, u16 reg_cfg1, + u16 reg_rptr, u16 reg_wptr) +{ + u16 memsz = TN40_FIFO_SIZE * (1 << fsz_type); + u64 cfg_base; + + memset(f, 0, sizeof(struct tn40_fifo)); + /* 1K extra space is allocated at the end of the fifo to simplify + * processing of descriptors that wraps around fifo's end. + */ + f->va = dma_alloc_coherent(&priv->pdev->dev, + memsz + TN40_FIFO_EXTRA_SPACE, &f->da, + GFP_KERNEL); + if (!f->va) + return -ENOMEM; + + f->reg_cfg0 = reg_cfg0; + f->reg_cfg1 = reg_cfg1; + f->reg_rptr = reg_rptr; + f->reg_wptr = reg_wptr; + f->rptr = 0; + f->wptr = 0; + f->memsz = memsz; + f->size_mask = memsz - 1; + cfg_base = lower_32_bits((f->da & TN40_TX_RX_CFG0_BASE) | fsz_type); + tn40_write_reg(priv, reg_cfg0, cfg_base); + tn40_write_reg(priv, reg_cfg1, upper_32_bits(f->da)); + return 0; +} + +static void tn40_fifo_free(struct tn40_priv *priv, struct tn40_fifo *f) +{ + dma_free_coherent(&priv->pdev->dev, + f->memsz + TN40_FIFO_EXTRA_SPACE, f->va, f->da); +} + +static struct tn40_rxdb *tn40_rxdb_alloc(int nelem) +{ + size_t size = sizeof(struct tn40_rxdb) + (nelem * sizeof(int)) + + (nelem * sizeof(struct tn40_rx_map)); + struct tn40_rxdb *db; + int i; + + db = vzalloc(size); + if (db) { + db->stack = (int *)(db + 1); + db->elems = (void *)(db->stack + nelem); + db->nelem = nelem; + db->top = nelem; + /* make the first alloc close to db struct */ + for (i = 0; i < nelem; i++) + db->stack[i] = nelem - i - 1; + } + return db; +} + +static void tn40_rxdb_free(struct tn40_rxdb *db) +{ + vfree(db); +} + +static int tn40_rxdb_alloc_elem(struct tn40_rxdb *db) +{ + return db->stack[--db->top]; +} + +static void *tn40_rxdb_addr_elem(struct tn40_rxdb *db, unsigned int n) +{ + return db->elems + n; +} + +static int tn40_rxdb_available(struct tn40_rxdb *db) +{ + return db->top; +} + +static void tn40_rxdb_free_elem(struct tn40_rxdb *db, unsigned int n) +{ + db->stack[db->top++] = n; +} + +/** + * tn40_create_rx_ring - Initialize RX all related HW and SW resources + * @priv: NIC private structure + * + * create_rx_ring creates rxf and rxd fifos, updates the relevant HW registers, + * preallocates skbs for rx. It assumes that Rx is disabled in HW funcs are + * grouped for better cache usage + * + * RxD fifo is smaller then RxF fifo by design. Upon high load, RxD will be + * filled and packets will be dropped by the NIC without getting into the host + * or generating interrupts. In this situation the host has no chance of + * processing all the packets. Dropping packets by the NIC is cheaper, since it + * takes 0 CPU cycles. + * + * Return: 0 on success and negative value on error. + */ +static int tn40_create_rx_ring(struct tn40_priv *priv) +{ + struct page_pool_params pp = { + .dev = &priv->pdev->dev, + .napi = &priv->napi, + .dma_dir = DMA_FROM_DEVICE, + .netdev = priv->ndev, + .flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV, + .max_len = PAGE_SIZE, + }; + int ret, pkt_size, nr; + + priv->page_pool = page_pool_create(&pp); + if (IS_ERR(priv->page_pool)) + return PTR_ERR(priv->page_pool); + + ret = tn40_fifo_alloc(priv, &priv->rxd_fifo0.m, priv->rxd_size, + TN40_REG_RXD_CFG0_0, TN40_REG_RXD_CFG1_0, + TN40_REG_RXD_RPTR_0, TN40_REG_RXD_WPTR_0); + if (ret) + goto err_destroy_page_pool; + + ret = tn40_fifo_alloc(priv, &priv->rxf_fifo0.m, priv->rxf_size, + TN40_REG_RXF_CFG0_0, TN40_REG_RXF_CFG1_0, + TN40_REG_RXF_RPTR_0, TN40_REG_RXF_WPTR_0); + if (ret) + goto err_free_rxd; + + pkt_size = priv->ndev->mtu + VLAN_ETH_HLEN; + priv->rxf_fifo0.m.pktsz = pkt_size; + nr = priv->rxf_fifo0.m.memsz / sizeof(struct tn40_rxf_desc); + priv->rxdb0 = tn40_rxdb_alloc(nr); + if (!priv->rxdb0) { + ret = -ENOMEM; + goto err_free_rxf; + } + return 0; +err_free_rxf: + tn40_fifo_free(priv, &priv->rxf_fifo0.m); +err_free_rxd: + tn40_fifo_free(priv, &priv->rxd_fifo0.m); +err_destroy_page_pool: + page_pool_destroy(priv->page_pool); + return ret; +} + +static void tn40_rx_free_buffers(struct tn40_priv *priv) +{ + struct tn40_rxdb *db = priv->rxdb0; + struct tn40_rx_map *dm; + u16 i; + + netdev_dbg(priv->ndev, "total =%d free =%d busy =%d\n", db->nelem, + tn40_rxdb_available(db), + db->nelem - tn40_rxdb_available(db)); + + for (i = 0; i < db->nelem; i++) { + dm = tn40_rxdb_addr_elem(db, i); + if (dm->page) + page_pool_put_full_page(priv->page_pool, dm->page, + false); + } +} + +static void tn40_destroy_rx_ring(struct tn40_priv *priv) +{ + if (priv->rxdb0) { + tn40_rx_free_buffers(priv); + tn40_rxdb_free(priv->rxdb0); + priv->rxdb0 = NULL; + } + tn40_fifo_free(priv, &priv->rxf_fifo0.m); + tn40_fifo_free(priv, &priv->rxd_fifo0.m); + page_pool_destroy(priv->page_pool); +} + +static void tn40_set_rx_desc(struct tn40_priv *priv, int idx, u64 dma) +{ + struct tn40_rxf_fifo *f = &priv->rxf_fifo0; + struct tn40_rxf_desc *rxfd; + int delta; + + rxfd = (struct tn40_rxf_desc *)(f->m.va + f->m.wptr); + rxfd->info = cpu_to_le32(0x10003); /* INFO =1 BC =3 */ + rxfd->va_lo = cpu_to_le32(idx); + rxfd->pa_lo = cpu_to_le32(lower_32_bits(dma)); + rxfd->pa_hi = cpu_to_le32(upper_32_bits(dma)); + rxfd->len = cpu_to_le32(f->m.pktsz); + f->m.wptr += sizeof(struct tn40_rxf_desc); + delta = f->m.wptr - f->m.memsz; + if (unlikely(delta >= 0)) { + f->m.wptr = delta; + if (delta > 0) { + memcpy(f->m.va, f->m.va + f->m.memsz, delta); + netdev_dbg(priv->ndev, + "wrapped rxd descriptor\n"); + } + } +} + +/** + * tn40_rx_alloc_buffers - Fill rxf fifo with buffers. + * + * @priv: NIC's private structure + * + * rx_alloc_buffers allocates buffers via the page pool API, builds rxf descs + * and pushes them (rxf descr) into the rxf fifo. The pages are stored in rxdb. + * To calculate the free space, we uses the cached values of RPTR and WPTR + * when needed. This function also updates RPTR and WPTR. + */ +static void tn40_rx_alloc_buffers(struct tn40_priv *priv) +{ + struct tn40_rxf_fifo *f = &priv->rxf_fifo0; + struct tn40_rxdb *db = priv->rxdb0; + struct tn40_rx_map *dm; + struct page *page; + int dno, i, idx; + + dno = tn40_rxdb_available(db) - 1; + for (i = dno; i > 0; i--) { + page = page_pool_dev_alloc_pages(priv->page_pool); + if (!page) + break; + + idx = tn40_rxdb_alloc_elem(db); + tn40_set_rx_desc(priv, idx, page_pool_get_dma_addr(page)); + dm = tn40_rxdb_addr_elem(db, idx); + dm->page = page; + } + if (i != dno) + tn40_write_reg(priv, f->m.reg_wptr, + f->m.wptr & TN40_TXF_WPTR_WR_PTR); + netdev_dbg(priv->ndev, "write_reg 0x%04x f->m.reg_wptr 0x%x\n", + f->m.reg_wptr, f->m.wptr & TN40_TXF_WPTR_WR_PTR); + netdev_dbg(priv->ndev, "read_reg 0x%04x f->m.reg_rptr=0x%x\n", + f->m.reg_rptr, tn40_read_reg(priv, f->m.reg_rptr)); + netdev_dbg(priv->ndev, "write_reg 0x%04x f->m.reg_wptr=0x%x\n", + f->m.reg_wptr, tn40_read_reg(priv, f->m.reg_wptr)); +} + +static void tn40_recycle_rx_buffer(struct tn40_priv *priv, + struct tn40_rxd_desc *rxdd) +{ + struct tn40_rxf_fifo *f = &priv->rxf_fifo0; + struct tn40_rx_map *dm; + int idx; + + idx = le32_to_cpu(rxdd->va_lo); + dm = tn40_rxdb_addr_elem(priv->rxdb0, idx); + tn40_set_rx_desc(priv, idx, page_pool_get_dma_addr(dm->page)); + + tn40_write_reg(priv, f->m.reg_wptr, f->m.wptr & TN40_TXF_WPTR_WR_PTR); +} + +static int tn40_rx_receive(struct tn40_priv *priv, int budget) +{ + struct tn40_rxd_fifo *f = &priv->rxd_fifo0; + u32 rxd_val1, rxd_err, pkt_id; + int tmp_len, size, done = 0; + struct tn40_rxdb *db = NULL; + struct tn40_rxd_desc *rxdd; + struct tn40_rx_map *dm; + struct sk_buff *skb; + u16 len, rxd_vlan; + int idx; + + f->m.wptr = tn40_read_reg(priv, f->m.reg_wptr) & TN40_TXF_WPTR_WR_PTR; + size = f->m.wptr - f->m.rptr; + if (size < 0) + size += f->m.memsz; /* Size is negative :-) */ + + while (size > 0) { + rxdd = (struct tn40_rxd_desc *)(f->m.va + f->m.rptr); + db = priv->rxdb0; + + /* We have a chicken and egg problem here. If the + * descriptor is wrapped we first need to copy the tail + * of the descriptor to the end of the buffer before + * extracting values from the descriptor. However in + * order to know if the descriptor is wrapped we need to + * obtain the length of the descriptor from (the + * wrapped) descriptor. Luckily the length is the first + * word of the descriptor. Descriptor lengths are + * multiples of 8 bytes so in case of a wrapped + * descriptor the first 8 bytes guaranteed to appear + * before the end of the buffer. We first obtain the + * length, we then copy the rest of the descriptor if + * needed and then extract the rest of the values from + * the descriptor. + * + * Do not change the order of operations as it will + * break the code!!! + */ + rxd_val1 = le32_to_cpu(rxdd->rxd_val1); + tmp_len = TN40_GET_RXD_BC(rxd_val1) << 3; + pkt_id = TN40_GET_RXD_PKT_ID(rxd_val1); + size -= tmp_len; + /* CHECK FOR A PARTIALLY ARRIVED DESCRIPTOR */ + if (size < 0) { + netdev_dbg(priv->ndev, + "%s partially arrived desc tmp_len %d\n", + __func__, tmp_len); + break; + } + /* make sure that the descriptor fully is arrived + * before reading the rest of the descriptor. + */ + rmb(); + + /* A special treatment is given to non-contiguous + * descriptors that start near the end, wraps around + * and continue at the beginning. The second part is + * copied right after the first, and then descriptor + * is interpreted as normal. The fifo has an extra + * space to allow such operations. + */ + + /* HAVE WE REACHED THE END OF THE QUEUE? */ + f->m.rptr += tmp_len; + tmp_len = f->m.rptr - f->m.memsz; + if (unlikely(tmp_len >= 0)) { + f->m.rptr = tmp_len; + if (tmp_len > 0) { + /* COPY PARTIAL DESCRIPTOR + * TO THE END OF THE QUEUE + */ + netdev_dbg(priv->ndev, + "wrapped desc rptr=%d tmp_len=%d\n", + f->m.rptr, tmp_len); + memcpy(f->m.va + f->m.memsz, f->m.va, tmp_len); + } + } + idx = le32_to_cpu(rxdd->va_lo); + dm = tn40_rxdb_addr_elem(db, idx); + prefetch(dm); + + len = le16_to_cpu(rxdd->len); + rxd_vlan = le16_to_cpu(rxdd->rxd_vlan); + /* CHECK FOR ERRORS */ + rxd_err = TN40_GET_RXD_ERR(rxd_val1); + if (unlikely(rxd_err)) { + u64_stats_update_begin(&priv->syncp); + priv->stats.rx_errors++; + u64_stats_update_end(&priv->syncp); + tn40_recycle_rx_buffer(priv, rxdd); + continue; + } + + skb = napi_build_skb(page_address(dm->page), PAGE_SIZE); + if (!skb) { + u64_stats_update_begin(&priv->syncp); + priv->stats.rx_dropped++; + priv->alloc_fail++; + u64_stats_update_end(&priv->syncp); + tn40_recycle_rx_buffer(priv, rxdd); + break; + } + skb_mark_for_recycle(skb); + skb_put(skb, len); + skb->protocol = eth_type_trans(skb, priv->ndev); + skb->ip_summed = + (pkt_id == 0) ? CHECKSUM_NONE : CHECKSUM_UNNECESSARY; + if (TN40_GET_RXD_VTAG(rxd_val1)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + TN40_GET_RXD_VLAN_TCI(rxd_vlan)); + + dm->page = NULL; + tn40_rxdb_free_elem(db, idx); + + napi_gro_receive(&priv->napi, skb); + + u64_stats_update_begin(&priv->syncp); + priv->stats.rx_bytes += len; + u64_stats_update_end(&priv->syncp); + + if (unlikely(++done >= budget)) + break; + } + u64_stats_update_begin(&priv->syncp); + priv->stats.rx_packets += done; + u64_stats_update_end(&priv->syncp); + /* FIXME: Do something to minimize pci accesses */ + tn40_write_reg(priv, f->m.reg_rptr, f->m.rptr & TN40_TXF_WPTR_WR_PTR); + tn40_rx_alloc_buffers(priv); + return done; +} + +/* TX HW/SW interaction overview + * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + * There are 2 types of TX communication channels between driver and NIC. + * 1) TX Free Fifo - TXF - Holds ack descriptors for sent packets. + * 2) TX Data Fifo - TXD - Holds descriptors of full buffers. + * + * Currently the NIC supports TSO, checksumming and gather DMA + * UFO and IP fragmentation is on the way. + * + * RX SW Data Structures + * ~~~~~~~~~~~~~~~~~~~~~ + * TXDB is used to keep track of all skbs owned by SW and their DMA addresses. + * For TX case, ownership lasts from getting the packet via hard_xmit and + * until the HW acknowledges sending the packet by TXF descriptors. + * TXDB is implemented as a cyclic buffer. + * + * FIFO objects keep info about the fifo's size and location, relevant HW + * registers, usage and skb db. Each RXD and RXF fifo has their own fifo + * structure. Implemented as simple struct. + * + * TX SW Execution Flow + * ~~~~~~~~~~~~~~~~~~~~ + * OS calls the driver's hard_xmit method with a packet to send. The driver + * creates DMA mappings, builds TXD descriptors and kicks the HW by updating + * TXD WPTR. + * + * When a packet is sent, The HW write a TXF descriptor and the SW + * frees the original skb. To prevent TXD fifo overflow without + * reading HW registers every time, the SW deploys "tx level" + * technique. Upon startup, the tx level is initialized to TXD fifo + * length. For every sent packet, the SW gets its TXD descriptor size + * (from a pre-calculated array) and subtracts it from tx level. The + * size is also stored in txdb. When a TXF ack arrives, the SW fetched + * the size of the original TXD descriptor from the txdb and adds it + * to the tx level. When the Tx level drops below some predefined + * threshold, the driver stops the TX queue. When the TX level rises + * above that level, the tx queue is enabled again. + * + * This technique avoids excessive reading of RPTR and WPTR registers. + * As our benchmarks shows, it adds 1.5 Gbit/sec to NIC's throughput. + */ +static void tn40_do_tx_db_ptr_next(struct tn40_txdb *db, + struct tn40_tx_map **pptr) +{ + ++*pptr; + if (unlikely(*pptr == db->end)) + *pptr = db->start; +} + +static void tn40_tx_db_inc_rptr(struct tn40_txdb *db) +{ + tn40_do_tx_db_ptr_next(db, &db->rptr); +} + +static void tn40_tx_db_inc_wptr(struct tn40_txdb *db) +{ + tn40_do_tx_db_ptr_next(db, &db->wptr); +} + +static int tn40_tx_db_init(struct tn40_txdb *d, int sz_type) +{ + int memsz = TN40_FIFO_SIZE * (1 << (sz_type + 1)); + + d->start = vzalloc(memsz); + if (!d->start) + return -ENOMEM; + /* In order to differentiate between an empty db state and a full db + * state at least one element should always be empty in order to + * avoid rptr == wptr, which means that the db is empty. + */ + d->size = memsz / sizeof(struct tn40_tx_map) - 1; + d->end = d->start + d->size + 1; /* just after last element */ + + /* All dbs are created empty */ + d->rptr = d->start; + d->wptr = d->start; + return 0; +} + +static void tn40_tx_db_close(struct tn40_txdb *d) +{ + if (d->start) { + vfree(d->start); + d->start = NULL; + } +} + +/* Sizes of tx desc (including padding if needed) as function of the SKB's + * frag number + * 7 - is number of lwords in txd with one phys buffer + * 3 - is number of lwords used for every additional phys buffer + * for (i = 0; i < TN40_MAX_PBL; i++) { + * lwords = 7 + (i * 3); + * if (lwords & 1) + * lwords++; pad it with 1 lword + * tn40_txd_sizes[i].bytes = lwords << 2; + * tn40_txd_sizes[i].qwords = lwords >> 1; + * } + */ +static struct { + u16 bytes; + u16 qwords; /* qword = 64 bit */ +} tn40_txd_sizes[] = { + {0x20, 0x04}, + {0x28, 0x05}, + {0x38, 0x07}, + {0x40, 0x08}, + {0x50, 0x0a}, + {0x58, 0x0b}, + {0x68, 0x0d}, + {0x70, 0x0e}, + {0x80, 0x10}, + {0x88, 0x11}, + {0x98, 0x13}, + {0xa0, 0x14}, + {0xb0, 0x16}, + {0xb8, 0x17}, + {0xc8, 0x19}, + {0xd0, 0x1a}, + {0xe0, 0x1c}, + {0xe8, 0x1d}, + {0xf8, 0x1f}, +}; + +static void tn40_pbl_set(struct tn40_pbl *pbl, dma_addr_t dma, int len) +{ + pbl->len = cpu_to_le32(len); + pbl->pa_lo = cpu_to_le32(lower_32_bits(dma)); + pbl->pa_hi = cpu_to_le32(upper_32_bits(dma)); +} + +static void tn40_txdb_set(struct tn40_txdb *db, dma_addr_t dma, int len) +{ + db->wptr->len = len; + db->wptr->addr.dma = dma; +} + +struct tn40_mapping_info { + dma_addr_t dma; + size_t size; +}; + +/** + * tn40_tx_map_skb - create and store DMA mappings for skb's data blocks + * @priv: NIC private structure + * @skb: socket buffer to map + * @txdd: pointer to tx descriptor to be updated + * @pkt_len: pointer to unsigned long value + * + * This function creates DMA mappings for skb's data blocks and writes them to + * PBL of a new tx descriptor. It also stores them in the tx db, so they could + * be unmapped after the data has been sent. It is the responsibility of the + * caller to make sure that there is enough space in the txdb. The last + * element holds a pointer to skb itself and is marked with a zero length. + * + * Return: 0 on success and negative value on error. + */ +static int tn40_tx_map_skb(struct tn40_priv *priv, struct sk_buff *skb, + struct tn40_txd_desc *txdd, unsigned int *pkt_len) +{ + struct tn40_mapping_info info[TN40_MAX_PBL]; + int nr_frags = skb_shinfo(skb)->nr_frags; + struct tn40_pbl *pbl = &txdd->pbl[0]; + struct tn40_txdb *db = &priv->txdb; + unsigned int size; + int i, len, ret; + dma_addr_t dma; + + netdev_dbg(priv->ndev, "TX skb %p skbLen %d dataLen %d frags %d\n", skb, + skb->len, skb->data_len, nr_frags); + if (nr_frags > TN40_MAX_PBL - 1) { + ret = skb_linearize(skb); + if (ret) + return ret; + nr_frags = skb_shinfo(skb)->nr_frags; + } + /* initial skb */ + len = skb->len - skb->data_len; + dma = dma_map_single(&priv->pdev->dev, skb->data, len, + DMA_TO_DEVICE); + ret = dma_mapping_error(&priv->pdev->dev, dma); + if (ret) + return ret; + + tn40_txdb_set(db, dma, len); + tn40_pbl_set(pbl++, db->wptr->addr.dma, db->wptr->len); + *pkt_len = db->wptr->len; + + for (i = 0; i < nr_frags; i++) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + + size = skb_frag_size(frag); + dma = skb_frag_dma_map(&priv->pdev->dev, frag, 0, + size, DMA_TO_DEVICE); + + ret = dma_mapping_error(&priv->pdev->dev, dma); + if (ret) + goto mapping_error; + info[i].dma = dma; + info[i].size = size; + } + + for (i = 0; i < nr_frags; i++) { + tn40_tx_db_inc_wptr(db); + tn40_txdb_set(db, info[i].dma, info[i].size); + tn40_pbl_set(pbl++, db->wptr->addr.dma, db->wptr->len); + *pkt_len += db->wptr->len; + } + + /* SHORT_PKT_FIX */ + if (skb->len < TN40_SHORT_PACKET_SIZE) + ++nr_frags; + + /* Add skb clean up info. */ + tn40_tx_db_inc_wptr(db); + db->wptr->len = -tn40_txd_sizes[nr_frags].bytes; + db->wptr->addr.skb = skb; + tn40_tx_db_inc_wptr(db); + + return 0; + mapping_error: + dma_unmap_page(&priv->pdev->dev, db->wptr->addr.dma, db->wptr->len, + DMA_TO_DEVICE); + for (; i > 0; i--) + dma_unmap_page(&priv->pdev->dev, info[i - 1].dma, + info[i - 1].size, DMA_TO_DEVICE); + return -ENOMEM; +} + +static int tn40_create_tx_ring(struct tn40_priv *priv) +{ + int ret; + + ret = tn40_fifo_alloc(priv, &priv->txd_fifo0.m, priv->txd_size, + TN40_REG_TXD_CFG0_0, TN40_REG_TXD_CFG1_0, + TN40_REG_TXD_RPTR_0, TN40_REG_TXD_WPTR_0); + if (ret) + return ret; + + ret = tn40_fifo_alloc(priv, &priv->txf_fifo0.m, priv->txf_size, + TN40_REG_TXF_CFG0_0, TN40_REG_TXF_CFG1_0, + TN40_REG_TXF_RPTR_0, TN40_REG_TXF_WPTR_0); + if (ret) + goto err_free_txd; + + /* The TX db has to keep mappings for all packets sent (on + * TxD) and not yet reclaimed (on TxF). + */ + ret = tn40_tx_db_init(&priv->txdb, max(priv->txd_size, priv->txf_size)); + if (ret) + goto err_free_txf; + + /* SHORT_PKT_FIX */ + priv->b0_len = 64; + priv->b0_va = dma_alloc_coherent(&priv->pdev->dev, priv->b0_len, + &priv->b0_dma, GFP_KERNEL); + if (!priv->b0_va) + goto err_free_db; + + priv->tx_level = TN40_MAX_TX_LEVEL; + priv->tx_update_mark = priv->tx_level - 1024; + return 0; +err_free_db: + tn40_tx_db_close(&priv->txdb); +err_free_txf: + tn40_fifo_free(priv, &priv->txf_fifo0.m); +err_free_txd: + tn40_fifo_free(priv, &priv->txd_fifo0.m); + return -ENOMEM; +} + +/** + * tn40_tx_space - Calculate the available space in the TX fifo. + * @priv: NIC private structure + * + * Return: available space in TX fifo in bytes + */ +static int tn40_tx_space(struct tn40_priv *priv) +{ + struct tn40_txd_fifo *f = &priv->txd_fifo0; + int fsize; + + f->m.rptr = tn40_read_reg(priv, f->m.reg_rptr) & TN40_TXF_WPTR_WR_PTR; + fsize = f->m.rptr - f->m.wptr; + if (fsize <= 0) + fsize = f->m.memsz + fsize; + return fsize; +} + +#define TN40_TXD_FULL_CHECKSUM 7 + +static netdev_tx_t tn40_start_xmit(struct sk_buff *skb, struct net_device *ndev) +{ + struct tn40_priv *priv = netdev_priv(ndev); + struct tn40_txd_fifo *f = &priv->txd_fifo0; + int txd_checksum = TN40_TXD_FULL_CHECKSUM; + struct tn40_txd_desc *txdd; + int nr_frags, len, err; + unsigned int pkt_len; + int txd_vlan_id = 0; + int txd_lgsnd = 0; + int txd_vtag = 0; + int txd_mss = 0; + + /* Build tx descriptor */ + txdd = (struct tn40_txd_desc *)(f->m.va + f->m.wptr); + err = tn40_tx_map_skb(priv, skb, txdd, &pkt_len); + if (err) { + u64_stats_update_begin(&priv->syncp); + priv->stats.tx_dropped++; + u64_stats_update_end(&priv->syncp); + dev_kfree_skb(skb); + return NETDEV_TX_OK; + } + nr_frags = skb_shinfo(skb)->nr_frags; + if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) + txd_checksum = 0; + + if (skb_shinfo(skb)->gso_size) { + txd_mss = skb_shinfo(skb)->gso_size; + txd_lgsnd = 1; + netdev_dbg(priv->ndev, "skb %p pkt len %d gso size = %d\n", skb, + pkt_len, txd_mss); + } + if (skb_vlan_tag_present(skb)) { + /* Don't cut VLAN ID to 12 bits */ + txd_vlan_id = skb_vlan_tag_get(skb); + txd_vtag = 1; + } + txdd->va_hi = 0; + txdd->va_lo = 0; + txdd->length = cpu_to_le16(pkt_len); + txdd->mss = cpu_to_le16(txd_mss); + txdd->txd_val1 = + cpu_to_le32(TN40_TXD_W1_VAL + (tn40_txd_sizes[nr_frags].qwords, txd_checksum, + txd_vtag, txd_lgsnd, txd_vlan_id)); + netdev_dbg(priv->ndev, "=== w1 qwords[%d] %d =====\n", nr_frags, + tn40_txd_sizes[nr_frags].qwords); + netdev_dbg(priv->ndev, "=== TxD desc =====================\n"); + netdev_dbg(priv->ndev, "=== w1: 0x%x ================\n", + txdd->txd_val1); + netdev_dbg(priv->ndev, "=== w2: mss 0x%x len 0x%x\n", txdd->mss, + txdd->length); + /* SHORT_PKT_FIX */ + if (pkt_len < TN40_SHORT_PACKET_SIZE) { + struct tn40_pbl *pbl = &txdd->pbl[++nr_frags]; + + txdd->length = cpu_to_le16(TN40_SHORT_PACKET_SIZE); + txdd->txd_val1 = + cpu_to_le32(TN40_TXD_W1_VAL + (tn40_txd_sizes[nr_frags].qwords, + txd_checksum, txd_vtag, txd_lgsnd, + txd_vlan_id)); + pbl->len = cpu_to_le32(TN40_SHORT_PACKET_SIZE - pkt_len); + pbl->pa_lo = cpu_to_le32(lower_32_bits(priv->b0_dma)); + pbl->pa_hi = cpu_to_le32(upper_32_bits(priv->b0_dma)); + netdev_dbg(priv->ndev, "=== SHORT_PKT_FIX ==============\n"); + netdev_dbg(priv->ndev, "=== nr_frags : %d ==============\n", + nr_frags); + } + + /* Increment TXD write pointer. In case of fifo wrapping copy + * reminder of the descriptor to the beginning. + */ + f->m.wptr += tn40_txd_sizes[nr_frags].bytes; + len = f->m.wptr - f->m.memsz; + if (unlikely(len >= 0)) { + f->m.wptr = len; + if (len > 0) + memcpy(f->m.va, f->m.va + f->m.memsz, len); + } + /* Force memory writes to complete before letting the HW know + * there are new descriptors to fetch. + */ + wmb(); + + priv->tx_level -= tn40_txd_sizes[nr_frags].bytes; + if (priv->tx_level > priv->tx_update_mark) { + tn40_write_reg(priv, f->m.reg_wptr, + f->m.wptr & TN40_TXF_WPTR_WR_PTR); + } else { + if (priv->tx_noupd++ > TN40_NO_UPD_PACKETS) { + priv->tx_noupd = 0; + tn40_write_reg(priv, f->m.reg_wptr, + f->m.wptr & TN40_TXF_WPTR_WR_PTR); + } + } + + u64_stats_update_begin(&priv->syncp); + priv->stats.tx_packets++; + priv->stats.tx_bytes += pkt_len; + u64_stats_update_end(&priv->syncp); + if (priv->tx_level < TN40_MIN_TX_LEVEL) { + netdev_dbg(priv->ndev, "TX Q STOP level %d\n", priv->tx_level); + netif_stop_queue(ndev); + } + + return NETDEV_TX_OK; +} + +static void tn40_tx_cleanup(struct tn40_priv *priv) +{ + struct tn40_txf_fifo *f = &priv->txf_fifo0; + struct tn40_txdb *db = &priv->txdb; + int tx_level = 0; + + f->m.wptr = tn40_read_reg(priv, f->m.reg_wptr) & TN40_TXF_WPTR_MASK; + + netif_tx_lock(priv->ndev); + while (f->m.wptr != f->m.rptr) { + f->m.rptr += TN40_TXF_DESC_SZ; + f->m.rptr &= f->m.size_mask; + /* Unmap all fragments */ + /* First has to come tx_maps containing DMA */ + do { + dma_addr_t addr = db->rptr->addr.dma; + size_t size = db->rptr->len; + + netif_tx_unlock(priv->ndev); + dma_unmap_page(&priv->pdev->dev, addr, + size, DMA_TO_DEVICE); + netif_tx_lock(priv->ndev); + tn40_tx_db_inc_rptr(db); + } while (db->rptr->len > 0); + tx_level -= db->rptr->len; /* '-' Because the len is negative */ + + /* Now should come skb pointer - free it */ + dev_kfree_skb_any(db->rptr->addr.skb); + netdev_dbg(priv->ndev, "dev_kfree_skb_any %p %d\n", + db->rptr->addr.skb, -db->rptr->len); + tn40_tx_db_inc_rptr(db); + } + + /* Let the HW know which TXF descriptors were cleaned */ + tn40_write_reg(priv, f->m.reg_rptr, f->m.rptr & TN40_TXF_WPTR_WR_PTR); + + /* We reclaimed resources, so in case the Q is stopped by xmit + * callback, we resume the transmission and use tx_lock to + * synchronize with xmit. + */ + priv->tx_level += tx_level; + if (priv->tx_noupd) { + priv->tx_noupd = 0; + tn40_write_reg(priv, priv->txd_fifo0.m.reg_wptr, + priv->txd_fifo0.m.wptr & TN40_TXF_WPTR_WR_PTR); + } + if (unlikely(netif_queue_stopped(priv->ndev) && + netif_carrier_ok(priv->ndev) && + (priv->tx_level >= TN40_MAX_TX_LEVEL / 2))) { + netdev_dbg(priv->ndev, "TX Q WAKE level %d\n", priv->tx_level); + netif_wake_queue(priv->ndev); + } + netif_tx_unlock(priv->ndev); +} + +static void tn40_tx_free_skbs(struct tn40_priv *priv) +{ + struct tn40_txdb *db = &priv->txdb; + + while (db->rptr != db->wptr) { + if (likely(db->rptr->len)) + dma_unmap_page(&priv->pdev->dev, db->rptr->addr.dma, + db->rptr->len, DMA_TO_DEVICE); + else + dev_kfree_skb(db->rptr->addr.skb); + tn40_tx_db_inc_rptr(db); + } +} + +static void tn40_destroy_tx_ring(struct tn40_priv *priv) +{ + tn40_tx_free_skbs(priv); + tn40_fifo_free(priv, &priv->txd_fifo0.m); + tn40_fifo_free(priv, &priv->txf_fifo0.m); + tn40_tx_db_close(&priv->txdb); + /* SHORT_PKT_FIX */ + if (priv->b0_len) { + dma_free_coherent(&priv->pdev->dev, priv->b0_len, priv->b0_va, + priv->b0_dma); + priv->b0_len = 0; + } +} + +/** + * tn40_tx_push_desc - Push a descriptor to TxD fifo. + * + * @priv: NIC private structure + * @data: desc's data + * @size: desc's size + * + * This function pushes desc to TxD fifo and overlaps it if needed. + * + * This function does not check for available space, nor does it check + * that the data size is smaller than the fifo size. Checking for + * space is the responsibility of the caller. + */ +static void tn40_tx_push_desc(struct tn40_priv *priv, void *data, int size) +{ + struct tn40_txd_fifo *f = &priv->txd_fifo0; + int i = f->m.memsz - f->m.wptr; + + if (size == 0) + return; + + if (i > size) { + memcpy(f->m.va + f->m.wptr, data, size); + f->m.wptr += size; + } else { + memcpy(f->m.va + f->m.wptr, data, i); + f->m.wptr = size - i; + memcpy(f->m.va, data + i, f->m.wptr); + } + tn40_write_reg(priv, f->m.reg_wptr, f->m.wptr & TN40_TXF_WPTR_WR_PTR); +} + +/** + * tn40_tx_push_desc_safe - push descriptor to TxD fifo in a safe way. + * + * @priv: NIC private structure + * @data: descriptor data + * @size: descriptor size + * + * This function does check for available space and, if necessary, + * waits for the NIC to read existing data before writing new data. + */ +static void tn40_tx_push_desc_safe(struct tn40_priv *priv, void *data, int size) +{ + int timer = 0; + + while (size > 0) { + /* We subtract 8 because when the fifo is full rptr == + * wptr, which also means that fifo is empty, we can + * understand the difference, but could the HW do the + * same ??? + */ + int avail = tn40_tx_space(priv) - 8; + + if (avail <= 0) { + if (timer++ > 300) /* Prevent endless loop */ + break; + /* Give the HW a chance to clean the fifo */ + usleep_range(50, 60); + continue; + } + avail = min(avail, size); + netdev_dbg(priv->ndev, + "about to push %d bytes starting %p size %d\n", + avail, data, size); + tn40_tx_push_desc(priv, data, avail); + size -= avail; + data += avail; + } +} + +int tn40_set_link_speed(struct tn40_priv *priv, u32 speed) +{ + u32 val; + int i; + + netdev_dbg(priv->ndev, "speed %d\n", speed); + switch (speed) { + case SPEED_10000: + case SPEED_5000: + case SPEED_2500: + netdev_dbg(priv->ndev, "link_speed %d\n", speed); + + tn40_write_reg(priv, 0x1010, 0x217); /*ETHSD.REFCLK_CONF */ + tn40_write_reg(priv, 0x104c, 0x4c); /*ETHSD.L0_RX_PCNT */ + tn40_write_reg(priv, 0x1050, 0x4c); /*ETHSD.L1_RX_PCNT */ + tn40_write_reg(priv, 0x1054, 0x4c); /*ETHSD.L2_RX_PCNT */ + tn40_write_reg(priv, 0x1058, 0x4c); /*ETHSD.L3_RX_PCNT */ + tn40_write_reg(priv, 0x102c, 0x434); /*ETHSD.L0_TX_PCNT */ + tn40_write_reg(priv, 0x1030, 0x434); /*ETHSD.L1_TX_PCNT */ + tn40_write_reg(priv, 0x1034, 0x434); /*ETHSD.L2_TX_PCNT */ + tn40_write_reg(priv, 0x1038, 0x434); /*ETHSD.L3_TX_PCNT */ + tn40_write_reg(priv, 0x6300, 0x0400); /*MAC.PCS_CTRL */ + + tn40_write_reg(priv, 0x1018, 0x00); /*Mike2 */ + udelay(5); + tn40_write_reg(priv, 0x1018, 0x04); /*Mike2 */ + udelay(5); + tn40_write_reg(priv, 0x1018, 0x06); /*Mike2 */ + udelay(5); + /*MikeFix1 */ + /*L0: 0x103c , L1: 0x1040 , L2: 0x1044 , L3: 0x1048 =0x81644 */ + tn40_write_reg(priv, 0x103c, 0x81644); /*ETHSD.L0_TX_DCNT */ + tn40_write_reg(priv, 0x1040, 0x81644); /*ETHSD.L1_TX_DCNT */ + tn40_write_reg(priv, 0x1044, 0x81644); /*ETHSD.L2_TX_DCNT */ + tn40_write_reg(priv, 0x1048, 0x81644); /*ETHSD.L3_TX_DCNT */ + tn40_write_reg(priv, 0x1014, 0x043); /*ETHSD.INIT_STAT */ + for (i = 1000; i; i--) { + usleep_range(50, 60); + /*ETHSD.INIT_STAT */ + val = tn40_read_reg(priv, 0x1014); + if (val & (1 << 9)) { + /*ETHSD.INIT_STAT */ + tn40_write_reg(priv, 0x1014, 0x3); + /*ETHSD.INIT_STAT */ + val = tn40_read_reg(priv, 0x1014); + + break; + } + } + if (!i) + netdev_err(priv->ndev, "MAC init timeout!\n"); + + tn40_write_reg(priv, 0x6350, 0x0); /*MAC.PCS_IF_MODE */ + tn40_write_reg(priv, TN40_REG_CTRLST, 0xC13); /*0x93//0x13 */ + tn40_write_reg(priv, 0x111c, 0x7ff); /*MAC.MAC_RST_CNT */ + usleep_range(2000, 2100); + + tn40_write_reg(priv, 0x111c, 0x0); /*MAC.MAC_RST_CNT */ + break; + + case SPEED_1000: + case SPEED_100: + tn40_write_reg(priv, 0x1010, 0x613); /*ETHSD.REFCLK_CONF */ + tn40_write_reg(priv, 0x104c, 0x4d); /*ETHSD.L0_RX_PCNT */ + tn40_write_reg(priv, 0x1050, 0x0); /*ETHSD.L1_RX_PCNT */ + tn40_write_reg(priv, 0x1054, 0x0); /*ETHSD.L2_RX_PCNT */ + tn40_write_reg(priv, 0x1058, 0x0); /*ETHSD.L3_RX_PCNT */ + tn40_write_reg(priv, 0x102c, 0x35); /*ETHSD.L0_TX_PCNT */ + tn40_write_reg(priv, 0x1030, 0x0); /*ETHSD.L1_TX_PCNT */ + tn40_write_reg(priv, 0x1034, 0x0); /*ETHSD.L2_TX_PCNT */ + tn40_write_reg(priv, 0x1038, 0x0); /*ETHSD.L3_TX_PCNT */ + tn40_write_reg(priv, 0x6300, 0x01140); /*MAC.PCS_CTRL */ + + tn40_write_reg(priv, 0x1014, 0x043); /*ETHSD.INIT_STAT */ + for (i = 1000; i; i--) { + usleep_range(50, 60); + val = tn40_read_reg(priv, 0x1014); /*ETHSD.INIT_STAT */ + if (val & (1 << 9)) { + /*ETHSD.INIT_STAT */ + tn40_write_reg(priv, 0x1014, 0x3); + /*ETHSD.INIT_STAT */ + val = tn40_read_reg(priv, 0x1014); + + break; + } + } + if (!i) + netdev_err(priv->ndev, "MAC init timeout!\n"); + + tn40_write_reg(priv, 0x6350, 0x2b); /*MAC.PCS_IF_MODE 1g */ + tn40_write_reg(priv, 0x6310, 0x9801); /*MAC.PCS_DEV_AB */ + + tn40_write_reg(priv, 0x6314, 0x1); /*MAC.PCS_PART_AB */ + tn40_write_reg(priv, 0x6348, 0xc8); /*MAC.PCS_LINK_LO */ + tn40_write_reg(priv, 0x634c, 0xc8); /*MAC.PCS_LINK_HI */ + usleep_range(50, 60); + tn40_write_reg(priv, TN40_REG_CTRLST, 0xC13); /*0x93//0x13 */ + tn40_write_reg(priv, 0x111c, 0x7ff); /*MAC.MAC_RST_CNT */ + usleep_range(2000, 2100); + + tn40_write_reg(priv, 0x111c, 0x0); /*MAC.MAC_RST_CNT */ + tn40_write_reg(priv, 0x6300, 0x1140); /*MAC.PCS_CTRL */ + break; + + case 0: /* Link down */ + tn40_write_reg(priv, 0x104c, 0x0); /*ETHSD.L0_RX_PCNT */ + tn40_write_reg(priv, 0x1050, 0x0); /*ETHSD.L1_RX_PCNT */ + tn40_write_reg(priv, 0x1054, 0x0); /*ETHSD.L2_RX_PCNT */ + tn40_write_reg(priv, 0x1058, 0x0); /*ETHSD.L3_RX_PCNT */ + tn40_write_reg(priv, 0x102c, 0x0); /*ETHSD.L0_TX_PCNT */ + tn40_write_reg(priv, 0x1030, 0x0); /*ETHSD.L1_TX_PCNT */ + tn40_write_reg(priv, 0x1034, 0x0); /*ETHSD.L2_TX_PCNT */ + tn40_write_reg(priv, 0x1038, 0x0); /*ETHSD.L3_TX_PCNT */ + + tn40_write_reg(priv, TN40_REG_CTRLST, 0x800); + tn40_write_reg(priv, 0x111c, 0x7ff); /*MAC.MAC_RST_CNT */ + usleep_range(2000, 2100); + + tn40_write_reg(priv, 0x111c, 0x0); /*MAC.MAC_RST_CNT */ + break; + + default: + netdev_err(priv->ndev, + "Link speed was not identified yet (%d)\n", speed); + speed = 0; + break; + } + return speed; +} + +static void tn40_link_changed(struct tn40_priv *priv) +{ + u32 link = tn40_read_reg(priv, + TN40_REG_MAC_LNK_STAT) & TN40_MAC_LINK_STAT; + + netdev_dbg(priv->ndev, "link changed %u\n", link); +} + +static void tn40_isr_extra(struct tn40_priv *priv, u32 isr) +{ + if (isr & (TN40_IR_LNKCHG0 | TN40_IR_LNKCHG1 | TN40_IR_TMR0)) { + netdev_dbg(priv->ndev, "isr = 0x%x\n", isr); + tn40_link_changed(priv); + } +} + +static irqreturn_t tn40_isr_napi(int irq, void *dev) +{ + struct tn40_priv *priv = netdev_priv((struct net_device *)dev); + u32 isr; + + isr = tn40_read_reg(priv, TN40_REG_ISR_MSK0); + + if (unlikely(!isr)) { + tn40_enable_interrupts(priv); + return IRQ_NONE; /* Not our interrupt */ + } + + if (isr & TN40_IR_EXTRA) + tn40_isr_extra(priv, isr); + + if (isr & (TN40_IR_RX_DESC_0 | TN40_IR_TX_FREE_0 | TN40_IR_TMR1)) { + if (likely(napi_schedule_prep(&priv->napi))) { + __napi_schedule(&priv->napi); + return IRQ_HANDLED; + } + /* We get here if an interrupt has slept into the + * small time window between these lines in + * tn40_poll: tn40_enable_interrupts(priv); return 0; + * + * Currently interrupts are disabled (since we read + * the ISR register) and we have failed to register + * the next poll. So we read the regs to trigger the + * chip and allow further interrupts. + */ + tn40_read_reg(priv, TN40_REG_TXF_WPTR_0); + tn40_read_reg(priv, TN40_REG_RXD_WPTR_0); + } + + tn40_enable_interrupts(priv); + return IRQ_HANDLED; +} + +static int tn40_poll(struct napi_struct *napi, int budget) +{ + struct tn40_priv *priv = container_of(napi, struct tn40_priv, napi); + int work_done; + + tn40_tx_cleanup(priv); + + if (!budget) + return 0; + + work_done = tn40_rx_receive(priv, budget); + if (work_done == budget) + return budget; + + if (napi_complete_done(napi, work_done)) + tn40_enable_interrupts(priv); + return work_done; +} + +static int tn40_fw_load(struct tn40_priv *priv) +{ + const struct firmware *fw = NULL; + int master, ret; + u32 val; + + ret = request_firmware(&fw, TN40_FIRMWARE_NAME, &priv->pdev->dev); + if (ret) + return ret; + + master = tn40_read_reg(priv, TN40_REG_INIT_SEMAPHORE); + if (!tn40_read_reg(priv, TN40_REG_INIT_STATUS) && master) { + netdev_dbg(priv->ndev, "Loading FW...\n"); + tn40_tx_push_desc_safe(priv, (void *)fw->data, fw->size); + msleep(100); + } + ret = read_poll_timeout(tn40_read_reg, val, val, 2000, 400000, false, + priv, TN40_REG_INIT_STATUS); + if (master) + tn40_write_reg(priv, TN40_REG_INIT_SEMAPHORE, 1); + + if (ret) { + netdev_err(priv->ndev, "firmware loading failed\n"); + netdev_dbg(priv->ndev, "VPC: 0x%x VIC: 0x%x STATUS: 0x%xd\n", + tn40_read_reg(priv, TN40_REG_VPC), + tn40_read_reg(priv, TN40_REG_VIC), + tn40_read_reg(priv, TN40_REG_INIT_STATUS)); + ret = -EIO; + } else { + netdev_dbg(priv->ndev, "firmware loading success\n"); + } + release_firmware(fw); + return ret; +} + +static void tn40_restore_mac(struct net_device *ndev, struct tn40_priv *priv) +{ + u32 val; + + netdev_dbg(priv->ndev, "mac0 =%x mac1 =%x mac2 =%x\n", + tn40_read_reg(priv, TN40_REG_UNC_MAC0_A), + tn40_read_reg(priv, TN40_REG_UNC_MAC1_A), + tn40_read_reg(priv, TN40_REG_UNC_MAC2_A)); + + val = (ndev->dev_addr[0] << 8) | (ndev->dev_addr[1]); + tn40_write_reg(priv, TN40_REG_UNC_MAC2_A, val); + val = (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]); + tn40_write_reg(priv, TN40_REG_UNC_MAC1_A, val); + val = (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]); + tn40_write_reg(priv, TN40_REG_UNC_MAC0_A, val); + + /* More then IP MAC address */ + tn40_write_reg(priv, TN40_REG_MAC_ADDR_0, + (ndev->dev_addr[3] << 24) | (ndev->dev_addr[2] << 16) | + (ndev->dev_addr[1] << 8) | (ndev->dev_addr[0])); + tn40_write_reg(priv, TN40_REG_MAC_ADDR_1, + (ndev->dev_addr[5] << 8) | (ndev->dev_addr[4])); + + netdev_dbg(priv->ndev, "mac0 =%x mac1 =%x mac2 =%x\n", + tn40_read_reg(priv, TN40_REG_UNC_MAC0_A), + tn40_read_reg(priv, TN40_REG_UNC_MAC1_A), + tn40_read_reg(priv, TN40_REG_UNC_MAC2_A)); +} + +static void tn40_hw_start(struct tn40_priv *priv) +{ + tn40_write_reg(priv, TN40_REG_FRM_LENGTH, 0X3FE0); + tn40_write_reg(priv, TN40_REG_GMAC_RXF_A, 0X10fd); + /*MikeFix1 */ + /*L0: 0x103c , L1: 0x1040 , L2: 0x1044 , L3: 0x1048 =0x81644 */ + tn40_write_reg(priv, 0x103c, 0x81644); /*ETHSD.L0_TX_DCNT */ + tn40_write_reg(priv, 0x1040, 0x81644); /*ETHSD.L1_TX_DCNT */ + tn40_write_reg(priv, 0x1044, 0x81644); /*ETHSD.L2_TX_DCNT */ + tn40_write_reg(priv, 0x1048, 0x81644); /*ETHSD.L3_TX_DCNT */ + tn40_write_reg(priv, TN40_REG_RX_FIFO_SECTION, 0x10); + tn40_write_reg(priv, TN40_REG_TX_FIFO_SECTION, 0xE00010); + tn40_write_reg(priv, TN40_REG_RX_FULLNESS, 0); + tn40_write_reg(priv, TN40_REG_TX_FULLNESS, 0); + + tn40_write_reg(priv, TN40_REG_VGLB, 0); + tn40_write_reg(priv, TN40_REG_MAX_FRAME_A, + priv->rxf_fifo0.m.pktsz & TN40_MAX_FRAME_AB_VAL); + tn40_write_reg(priv, TN40_REG_RDINTCM0, priv->rdintcm); + tn40_write_reg(priv, TN40_REG_RDINTCM2, 0); + + /* old val = 0x300064 */ + tn40_write_reg(priv, TN40_REG_TDINTCM0, priv->tdintcm); + + /* Enable timer interrupt once in 2 secs. */ + tn40_restore_mac(priv->ndev, priv); + + /* Pause frame */ + tn40_write_reg(priv, 0x12E0, 0x28); + tn40_write_reg(priv, TN40_REG_PAUSE_QUANT, 0xFFFF); + tn40_write_reg(priv, 0x6064, 0xF); + + tn40_write_reg(priv, TN40_REG_GMAC_RXF_A, + TN40_GMAC_RX_FILTER_OSEN | TN40_GMAC_RX_FILTER_TXFC | + TN40_GMAC_RX_FILTER_AM | TN40_GMAC_RX_FILTER_AB); + + tn40_enable_interrupts(priv); +} + +static int tn40_hw_reset(struct tn40_priv *priv) +{ + u32 val; + + /* Reset sequences: read, write 1, read, write 0 */ + val = tn40_read_reg(priv, TN40_REG_CLKPLL); + tn40_write_reg(priv, TN40_REG_CLKPLL, (val | TN40_CLKPLL_SFTRST) + 0x8); + usleep_range(50, 60); + val = tn40_read_reg(priv, TN40_REG_CLKPLL); + tn40_write_reg(priv, TN40_REG_CLKPLL, val & ~TN40_CLKPLL_SFTRST); + + /* Check that the PLLs are locked and reset ended */ + val = read_poll_timeout(tn40_read_reg, val, + (val & TN40_CLKPLL_LKD) == TN40_CLKPLL_LKD, + 10000, 700000, false, priv, TN40_REG_CLKPLL); + if (val) + return -EIO; + + usleep_range(50, 60); + /* Do any PCI-E read transaction */ + tn40_read_reg(priv, TN40_REG_RXD_CFG0_0); + return 0; +} + +static void tn40_sw_reset(struct tn40_priv *priv) +{ + int i, ret; + u32 val; + + /* 1. load MAC (obsolete) */ + /* 2. disable Rx (and Tx) */ + tn40_write_reg(priv, TN40_REG_GMAC_RXF_A, 0); + msleep(100); + /* 3. Disable port */ + tn40_write_reg(priv, TN40_REG_DIS_PORT, 1); + /* 4. Disable queue */ + tn40_write_reg(priv, TN40_REG_DIS_QU, 1); + /* 5. Wait until hw is disabled */ + ret = read_poll_timeout(tn40_read_reg, val, val & 1, 10000, 500000, + false, priv, TN40_REG_RST_PORT); + if (ret) + netdev_err(priv->ndev, "SW reset timeout. continuing anyway\n"); + + /* 6. Disable interrupts */ + tn40_write_reg(priv, TN40_REG_RDINTCM0, 0); + tn40_write_reg(priv, TN40_REG_TDINTCM0, 0); + tn40_write_reg(priv, TN40_REG_IMR, 0); + tn40_read_reg(priv, TN40_REG_ISR); + + /* 7. Reset queue */ + tn40_write_reg(priv, TN40_REG_RST_QU, 1); + /* 8. Reset port */ + tn40_write_reg(priv, TN40_REG_RST_PORT, 1); + /* 9. Zero all read and write pointers */ + for (i = TN40_REG_TXD_WPTR_0; i <= TN40_REG_TXF_RPTR_3; i += 0x10) + tn40_write_reg(priv, i, 0); + /* 10. Unset port disable */ + tn40_write_reg(priv, TN40_REG_DIS_PORT, 0); + /* 11. Unset queue disable */ + tn40_write_reg(priv, TN40_REG_DIS_QU, 0); + /* 12. Unset queue reset */ + tn40_write_reg(priv, TN40_REG_RST_QU, 0); + /* 13. Unset port reset */ + tn40_write_reg(priv, TN40_REG_RST_PORT, 0); + /* 14. Enable Rx */ + /* Skipped. will be done later */ +} + +static int tn40_start(struct tn40_priv *priv) +{ + int ret; + + ret = tn40_create_tx_ring(priv); + if (ret) { + netdev_err(priv->ndev, "failed to tx init %d\n", ret); + return ret; + } + + ret = tn40_create_rx_ring(priv); + if (ret) { + netdev_err(priv->ndev, "failed to rx init %d\n", ret); + goto err_tx_ring; + } + + tn40_rx_alloc_buffers(priv); + if (tn40_rxdb_available(priv->rxdb0) != 1) { + ret = -ENOMEM; + netdev_err(priv->ndev, "failed to allocate rx buffers\n"); + goto err_rx_ring; + } + + ret = request_irq(priv->pdev->irq, &tn40_isr_napi, IRQF_SHARED, + priv->ndev->name, priv->ndev); + if (ret) { + netdev_err(priv->ndev, "failed to request irq %d\n", ret); + goto err_rx_ring; + } + + tn40_hw_start(priv); + return 0; +err_rx_ring: + tn40_destroy_rx_ring(priv); +err_tx_ring: + tn40_destroy_tx_ring(priv); + return ret; +} + +static void tn40_stop(struct tn40_priv *priv) +{ + tn40_disable_interrupts(priv); + free_irq(priv->pdev->irq, priv->ndev); + tn40_sw_reset(priv); + tn40_destroy_tx_ring(priv); + tn40_destroy_rx_ring(priv); +} + +static int tn40_close(struct net_device *ndev) +{ + struct tn40_priv *priv = netdev_priv(ndev); + + phylink_stop(priv->phylink); + phylink_disconnect_phy(priv->phylink); + + napi_disable(&priv->napi); + netif_napi_del(&priv->napi); + tn40_stop(priv); + return 0; +} + +static int tn40_open(struct net_device *dev) +{ + struct tn40_priv *priv = netdev_priv(dev); + int ret; + + ret = phylink_connect_phy(priv->phylink, priv->phydev); + if (ret) { + netdev_err(dev, "failed to connect to phy %d\n", ret); + return ret; + } + tn40_sw_reset(priv); + ret = tn40_start(priv); + if (ret) { + phylink_disconnect_phy(priv->phylink); + netdev_err(dev, "failed to start %d\n", ret); + return ret; + } + napi_enable(&priv->napi); + phylink_start(priv->phylink); + netif_start_queue(priv->ndev); + return 0; +} + +static void __tn40_vlan_rx_vid(struct net_device *ndev, uint16_t vid, + int enable) +{ + struct tn40_priv *priv = netdev_priv(ndev); + u32 reg, bit, val; + + netdev_dbg(priv->ndev, "vid =%d value =%d\n", (int)vid, enable); + reg = TN40_REG_VLAN_0 + (vid / 32) * 4; + bit = 1 << vid % 32; + val = tn40_read_reg(priv, reg); + netdev_dbg(priv->ndev, "reg =%x, val =%x, bit =%d\n", reg, val, bit); + if (enable) + val |= bit; + else + val &= ~bit; + netdev_dbg(priv->ndev, "new val %x\n", val); + tn40_write_reg(priv, reg, val); +} + +static int tn40_vlan_rx_add_vid(struct net_device *ndev, + __always_unused __be16 proto, u16 vid) +{ + __tn40_vlan_rx_vid(ndev, vid, 1); + return 0; +} + +static int tn40_vlan_rx_kill_vid(struct net_device *ndev, + __always_unused __be16 proto, u16 vid) +{ + __tn40_vlan_rx_vid(ndev, vid, 0); + return 0; +} + +static void tn40_setmulti(struct net_device *ndev) +{ + u32 rxf_val = TN40_GMAC_RX_FILTER_AM | TN40_GMAC_RX_FILTER_AB | + TN40_GMAC_RX_FILTER_OSEN | TN40_GMAC_RX_FILTER_TXFC; + struct tn40_priv *priv = netdev_priv(ndev); + int i; + + /* IMF - imperfect (hash) rx multicast filter */ + /* PMF - perfect rx multicast filter */ + + /* FIXME: RXE(OFF) */ + if (ndev->flags & IFF_PROMISC) { + rxf_val |= TN40_GMAC_RX_FILTER_PRM; + } else if (ndev->flags & IFF_ALLMULTI) { + /* set IMF to accept all multicast frames */ + for (i = 0; i < TN40_MAC_MCST_HASH_NUM; i++) + tn40_write_reg(priv, + TN40_REG_RX_MCST_HASH0 + i * 4, ~0); + } else if (netdev_mc_count(ndev)) { + struct netdev_hw_addr *mclist; + u32 reg, val; + u8 hash; + + /* Set IMF to deny all multicast frames */ + for (i = 0; i < TN40_MAC_MCST_HASH_NUM; i++) + tn40_write_reg(priv, + TN40_REG_RX_MCST_HASH0 + i * 4, 0); + + /* Set PMF to deny all multicast frames */ + for (i = 0; i < TN40_MAC_MCST_NUM; i++) { + tn40_write_reg(priv, + TN40_REG_RX_MAC_MCST0 + i * 8, 0); + tn40_write_reg(priv, + TN40_REG_RX_MAC_MCST1 + i * 8, 0); + } + /* Use PMF to accept first MAC_MCST_NUM (15) addresses */ + + /* TBD: Sort the addresses and write them in ascending + * order into RX_MAC_MCST regs. we skip this phase now + * and accept ALL multicast frames through IMF. Accept + * the rest of addresses throw IMF. + */ + netdev_for_each_mc_addr(mclist, ndev) { + hash = 0; + for (i = 0; i < ETH_ALEN; i++) + hash ^= mclist->addr[i]; + + reg = TN40_REG_RX_MCST_HASH0 + ((hash >> 5) << 2); + val = tn40_read_reg(priv, reg); + val |= (1 << (hash % 32)); + tn40_write_reg(priv, reg, val); + } + } else { + rxf_val |= TN40_GMAC_RX_FILTER_AB; + } + tn40_write_reg(priv, TN40_REG_GMAC_RXF_A, rxf_val); + /* Enable RX */ + /* FIXME: RXE(ON) */ +} + +static int tn40_set_mac(struct net_device *ndev, void *p) +{ + struct tn40_priv *priv = netdev_priv(ndev); + struct sockaddr *addr = p; + + eth_hw_addr_set(ndev, addr->sa_data); + tn40_restore_mac(ndev, priv); + return 0; +} + +static void tn40_mac_init(struct tn40_priv *priv) +{ + u8 addr[ETH_ALEN]; + u64 val; + + val = (u64)tn40_read_reg(priv, TN40_REG_UNC_MAC0_A); + val |= (u64)tn40_read_reg(priv, TN40_REG_UNC_MAC1_A) << 16; + val |= (u64)tn40_read_reg(priv, TN40_REG_UNC_MAC2_A) << 32; + + u64_to_ether_addr(val, addr); + eth_hw_addr_set(priv->ndev, addr); +} + +static void tn40_get_stats(struct net_device *ndev, + struct rtnl_link_stats64 *stats) +{ + struct tn40_priv *priv = netdev_priv(ndev); + unsigned int start; + + do { + start = u64_stats_fetch_begin(&priv->syncp); + stats->tx_packets = priv->stats.tx_packets; + stats->tx_bytes = priv->stats.tx_bytes; + stats->tx_dropped = priv->stats.tx_dropped; + + stats->rx_packets = priv->stats.rx_packets; + stats->rx_bytes = priv->stats.rx_bytes; + stats->rx_dropped = priv->stats.rx_dropped; + stats->rx_errors = priv->stats.rx_errors; + } while (u64_stats_fetch_retry(&priv->syncp, start)); +} + +static const struct net_device_ops tn40_netdev_ops = { + .ndo_open = tn40_open, + .ndo_stop = tn40_close, + .ndo_start_xmit = tn40_start_xmit, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = tn40_setmulti, + .ndo_get_stats64 = tn40_get_stats, + .ndo_set_mac_address = tn40_set_mac, + .ndo_vlan_rx_add_vid = tn40_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = tn40_vlan_rx_kill_vid, +}; + +static int tn40_ethtool_get_link_ksettings(struct net_device *ndev, + struct ethtool_link_ksettings *cmd) +{ + struct tn40_priv *priv = netdev_priv(ndev); + + return phylink_ethtool_ksettings_get(priv->phylink, cmd); +} + +static const struct ethtool_ops tn40_ethtool_ops = { + .get_link = ethtool_op_get_link, + .get_link_ksettings = tn40_ethtool_get_link_ksettings, +}; + +static void tn40_get_queue_stats_rx(struct net_device *ndev, int idx, + struct netdev_queue_stats_rx *stats) +{ + struct tn40_priv *priv = netdev_priv(ndev); + unsigned int start; + + do { + start = u64_stats_fetch_begin(&priv->syncp); + + stats->packets = priv->stats.rx_packets; + stats->bytes = priv->stats.rx_bytes; + stats->alloc_fail = priv->alloc_fail; + } while (u64_stats_fetch_retry(&priv->syncp, start)); +} + +static void tn40_get_queue_stats_tx(struct net_device *ndev, int idx, + struct netdev_queue_stats_tx *stats) +{ + struct tn40_priv *priv = netdev_priv(ndev); + unsigned int start; + + do { + start = u64_stats_fetch_begin(&priv->syncp); + + stats->packets = priv->stats.tx_packets; + stats->bytes = priv->stats.tx_bytes; + } while (u64_stats_fetch_retry(&priv->syncp, start)); +} + +static void tn40_get_base_stats(struct net_device *ndev, + struct netdev_queue_stats_rx *rx, + struct netdev_queue_stats_tx *tx) +{ + rx->packets = 0; + rx->bytes = 0; + rx->alloc_fail = 0; + + tx->packets = 0; + tx->bytes = 0; +} + +static const struct netdev_stat_ops tn40_stat_ops = { + .get_queue_stats_rx = tn40_get_queue_stats_rx, + .get_queue_stats_tx = tn40_get_queue_stats_tx, + .get_base_stats = tn40_get_base_stats, +}; + +static int tn40_priv_init(struct tn40_priv *priv) +{ + int ret; + + tn40_set_link_speed(priv, 0); + + /* Set GPIO[9:0] to output 0 */ + tn40_write_reg(priv, 0x51E0, 0x30010006); /* GPIO_OE_ WR CMD */ + tn40_write_reg(priv, 0x51F0, 0x0); /* GPIO_OE_ DATA */ + tn40_write_reg(priv, TN40_REG_MDIO_CMD_STAT, 0x3ec8); + + /* we use tx descriptors to load a firmware. */ + ret = tn40_create_tx_ring(priv); + if (ret) + return ret; + ret = tn40_fw_load(priv); + tn40_destroy_tx_ring(priv); + return ret; +} + +static struct net_device *tn40_netdev_alloc(struct pci_dev *pdev) +{ + struct net_device *ndev; + + ndev = devm_alloc_etherdev(&pdev->dev, sizeof(struct tn40_priv)); + if (!ndev) + return NULL; + ndev->netdev_ops = &tn40_netdev_ops; + ndev->ethtool_ops = &tn40_ethtool_ops; + ndev->stat_ops = &tn40_stat_ops; + ndev->tx_queue_len = TN40_NDEV_TXQ_LEN; + ndev->mem_start = pci_resource_start(pdev, 0); + ndev->mem_end = pci_resource_end(pdev, 0); + ndev->min_mtu = ETH_ZLEN; + ndev->max_mtu = TN40_MAX_MTU; + + ndev->features = NETIF_F_IP_CSUM | + NETIF_F_SG | + NETIF_F_FRAGLIST | + NETIF_F_TSO | NETIF_F_GRO | + NETIF_F_RXCSUM | + NETIF_F_RXHASH | + NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | + NETIF_F_HW_VLAN_CTAG_FILTER; + ndev->vlan_features = NETIF_F_IP_CSUM | + NETIF_F_SG | + NETIF_F_TSO | NETIF_F_GRO | NETIF_F_RXHASH; + + if (dma_get_mask(&pdev->dev) == DMA_BIT_MASK(64)) { + ndev->features |= NETIF_F_HIGHDMA; + ndev->vlan_features |= NETIF_F_HIGHDMA; + } + ndev->hw_features |= ndev->features; + + SET_NETDEV_DEV(ndev, &pdev->dev); + netif_stop_queue(ndev); + return ndev; +} + +static int tn40_probe(struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct net_device *ndev; + struct tn40_priv *priv; + unsigned int nvec = 1; + void __iomem *regs; + int ret; + + ret = pci_enable_device(pdev); + if (ret) + return ret; + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (ret) { + dev_err(&pdev->dev, "failed to set DMA mask.\n"); + goto err_disable_device; + } + + ret = pci_request_regions(pdev, TN40_DRV_NAME); + if (ret) { + dev_err(&pdev->dev, "failed to request PCI regions.\n"); + goto err_disable_device; + } + + pci_set_master(pdev); + + regs = pci_iomap(pdev, 0, TN40_REGS_SIZE); + if (!regs) { + ret = -EIO; + dev_err(&pdev->dev, "failed to map PCI bar.\n"); + goto err_free_regions; + } + + ndev = tn40_netdev_alloc(pdev); + if (!ndev) { + ret = -ENOMEM; + dev_err(&pdev->dev, "failed to allocate netdev.\n"); + goto err_iounmap; + } + + priv = netdev_priv(ndev); + pci_set_drvdata(pdev, priv); + netif_napi_add(ndev, &priv->napi, tn40_poll); + + priv->regs = regs; + priv->pdev = pdev; + priv->ndev = ndev; + /* Initialize fifo sizes. */ + priv->txd_size = 3; + priv->txf_size = 3; + priv->rxd_size = 3; + priv->rxf_size = 3; + /* Initialize the initial coalescing registers. */ + priv->rdintcm = TN40_INT_REG_VAL(0x20, 1, 4, 12); + priv->tdintcm = TN40_INT_REG_VAL(0x20, 1, 0, 12); + + ret = tn40_hw_reset(priv); + if (ret) { + dev_err(&pdev->dev, "failed to reset HW.\n"); + goto err_unset_drvdata; + } + + ret = pci_alloc_irq_vectors(pdev, 1, nvec, PCI_IRQ_MSI); + if (ret < 0) { + dev_err(&pdev->dev, "failed to allocate irq.\n"); + goto err_unset_drvdata; + } + + ret = tn40_mdiobus_init(priv); + if (ret) { + dev_err(&pdev->dev, "failed to initialize mdio bus.\n"); + goto err_free_irq; + } + + priv->stats_flag = + ((tn40_read_reg(priv, TN40_FPGA_VER) & 0xFFF) != 308); + u64_stats_init(&priv->syncp); + + priv->isr_mask = TN40_IR_RX_FREE_0 | TN40_IR_LNKCHG0 | TN40_IR_PSE | + TN40_IR_TMR0 | TN40_IR_RX_DESC_0 | TN40_IR_TX_FREE_0 | + TN40_IR_TMR1; + + tn40_mac_init(priv); + ret = tn40_phy_register(priv); + if (ret) { + dev_err(&pdev->dev, "failed to set up PHY.\n"); + goto err_free_irq; + } + + ret = tn40_priv_init(priv); + if (ret) { + dev_err(&pdev->dev, "failed to initialize tn40_priv.\n"); + goto err_unregister_phydev; + } + + ret = register_netdev(ndev); + if (ret) { + dev_err(&pdev->dev, "failed to register netdev.\n"); + goto err_unregister_phydev; + } + return 0; +err_unregister_phydev: + tn40_phy_unregister(priv); +err_free_irq: + pci_free_irq_vectors(pdev); +err_unset_drvdata: + pci_set_drvdata(pdev, NULL); +err_iounmap: + iounmap(regs); +err_free_regions: + pci_release_regions(pdev); +err_disable_device: + pci_disable_device(pdev); + return ret; +} + +static void tn40_remove(struct pci_dev *pdev) +{ + struct tn40_priv *priv = pci_get_drvdata(pdev); + struct net_device *ndev = priv->ndev; + + unregister_netdev(ndev); + + tn40_phy_unregister(priv); + pci_free_irq_vectors(priv->pdev); + pci_set_drvdata(pdev, NULL); + iounmap(priv->regs); + pci_release_regions(pdev); + pci_disable_device(pdev); +} + +static const struct pci_device_id tn40_id_table[] = { + { PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, 0x4022, + PCI_VENDOR_ID_TEHUTI, 0x3015) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, 0x4022, + PCI_VENDOR_ID_DLINK, 0x4d00) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, 0x4022, + PCI_VENDOR_ID_ASUSTEK, 0x8709) }, + { PCI_DEVICE_SUB(PCI_VENDOR_ID_TEHUTI, 0x4022, + PCI_VENDOR_ID_EDIMAX, 0x8103) }, + { } +}; + +static struct pci_driver tn40_driver = { + .name = TN40_DRV_NAME, + .id_table = tn40_id_table, + .probe = tn40_probe, + .remove = tn40_remove, +}; + +module_pci_driver(tn40_driver); + +MODULE_DEVICE_TABLE(pci, tn40_id_table); +MODULE_LICENSE("GPL"); +MODULE_FIRMWARE(TN40_FIRMWARE_NAME); +MODULE_DESCRIPTION("Tehuti Network TN40xx Driver"); diff --git a/drivers/net/ethernet/tehuti/tn40.h b/drivers/net/ethernet/tehuti/tn40.h new file mode 100644 index 000000000000..490781fe5120 --- /dev/null +++ b/drivers/net/ethernet/tehuti/tn40.h @@ -0,0 +1,233 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) Tehuti Networks Ltd. */ + +#ifndef _TN40_H_ +#define _TN40_H_ + +#include "tn40_regs.h" + +#define TN40_DRV_NAME "tn40xx" + +#define TN40_MDIO_SPEED_1MHZ (1) +#define TN40_MDIO_SPEED_6MHZ (6) + +/* netdev tx queue len for Luxor. The default value is 1000. + * ifconfig eth1 txqueuelen 3000 - to change it at runtime. + */ +#define TN40_NDEV_TXQ_LEN 1000 + +#define TN40_FIFO_SIZE 4096 +#define TN40_FIFO_EXTRA_SPACE 1024 + +#define TN40_TXF_DESC_SZ 16 +#define TN40_MAX_TX_LEVEL (priv->txd_fifo0.m.memsz - 16) +#define TN40_MIN_TX_LEVEL 256 +#define TN40_NO_UPD_PACKETS 40 +#define TN40_MAX_MTU BIT(14) + +#define TN40_PCK_TH_MULT 128 +#define TN40_INT_COAL_MULT 2 + +#define TN40_INT_REG_VAL(coal, coal_rc, rxf_th, pck_th) ( \ + FIELD_PREP(GENMASK(14, 0), (coal)) | \ + FIELD_PREP(BIT(15), (coal_rc)) | \ + FIELD_PREP(GENMASK(19, 16), (rxf_th)) | \ + FIELD_PREP(GENMASK(31, 20), (pck_th)) \ + ) + +struct tn40_fifo { + dma_addr_t da; /* Physical address of fifo (used by HW) */ + char *va; /* Virtual address of fifo (used by SW) */ + u32 rptr, wptr; + /* Cached values of RPTR and WPTR registers, + * they're 32 bits on both 32 and 64 archs. + */ + u16 reg_cfg0; + u16 reg_cfg1; + u16 reg_rptr; + u16 reg_wptr; + u16 memsz; /* Memory size allocated for fifo */ + u16 size_mask; + u16 pktsz; /* Skb packet size to allocate */ + u16 rcvno; /* Number of buffers that come from this RXF */ +}; + +struct tn40_txf_fifo { + struct tn40_fifo m; /* The minimal set of variables used by all fifos */ +}; + +struct tn40_txd_fifo { + struct tn40_fifo m; /* The minimal set of variables used by all fifos */ +}; + +struct tn40_rxf_fifo { + struct tn40_fifo m; /* The minimal set of variables used by all fifos */ +}; + +struct tn40_rxd_fifo { + struct tn40_fifo m; /* The minimal set of variables used by all fifos */ +}; + +struct tn40_rx_map { + struct page *page; +}; + +struct tn40_rxdb { + unsigned int *stack; + struct tn40_rx_map *elems; + unsigned int nelem; + unsigned int top; +}; + +union tn40_tx_dma_addr { + dma_addr_t dma; + struct sk_buff *skb; +}; + +/* Entry in the db. + * if len == 0 addr is dma + * if len != 0 addr is skb + */ +struct tn40_tx_map { + union tn40_tx_dma_addr addr; + int len; +}; + +/* tx database - implemented as circular fifo buffer */ +struct tn40_txdb { + struct tn40_tx_map *start; /* Points to the first element */ + struct tn40_tx_map *end; /* Points just AFTER the last element */ + struct tn40_tx_map *rptr; /* Points to the next element to read */ + struct tn40_tx_map *wptr; /* Points to the next element to write */ + int size; /* Number of elements in the db */ +}; + +struct tn40_priv { + struct net_device *ndev; + struct pci_dev *pdev; + + struct napi_struct napi; + /* RX FIFOs: 1 for data (full) descs, and 2 for free descs */ + struct tn40_rxd_fifo rxd_fifo0; + struct tn40_rxf_fifo rxf_fifo0; + struct tn40_rxdb *rxdb0; /* Rx dbs to store skb pointers */ + struct page_pool *page_pool; + + /* Tx FIFOs: 1 for data desc, 1 for empty (acks) desc */ + struct tn40_txd_fifo txd_fifo0; + struct tn40_txf_fifo txf_fifo0; + struct tn40_txdb txdb; + int tx_level; + int tx_update_mark; + int tx_noupd; + + int stats_flag; + struct rtnl_link_stats64 stats; + u64 alloc_fail; + struct u64_stats_sync syncp; + + u8 txd_size; + u8 txf_size; + u8 rxd_size; + u8 rxf_size; + u32 rdintcm; + u32 tdintcm; + + u32 isr_mask; + + void __iomem *regs; + + /* SHORT_PKT_FIX */ + u32 b0_len; + dma_addr_t b0_dma; /* Physical address of buffer */ + char *b0_va; /* Virtual address of buffer */ + + struct mii_bus *mdio; + struct phy_device *phydev; + struct phylink *phylink; + struct phylink_config phylink_config; +}; + +/* RX FREE descriptor - 64bit */ +struct tn40_rxf_desc { + __le32 info; /* Buffer Count + Info - described below */ + __le32 va_lo; /* VAdr[31:0] */ + __le32 va_hi; /* VAdr[63:32] */ + __le32 pa_lo; /* PAdr[31:0] */ + __le32 pa_hi; /* PAdr[63:32] */ + __le32 len; /* Buffer Length */ +}; + +#define TN40_GET_RXD_BC(x) FIELD_GET(GENMASK(4, 0), (x)) +#define TN40_GET_RXD_ERR(x) FIELD_GET(GENMASK(26, 21), (x)) +#define TN40_GET_RXD_PKT_ID(x) FIELD_GET(GENMASK(30, 28), (x)) +#define TN40_GET_RXD_VTAG(x) FIELD_GET(BIT(31), (x)) +#define TN40_GET_RXD_VLAN_TCI(x) FIELD_GET(GENMASK(15, 0), (x)) + +struct tn40_rxd_desc { + __le32 rxd_val1; + __le16 len; + __le16 rxd_vlan; + __le32 va_lo; + __le32 va_hi; + __le32 rss_lo; + __le32 rss_hash; +}; + +#define TN40_MAX_PBL (19) +/* PBL describes each virtual buffer to be transmitted from the host. */ +struct tn40_pbl { + __le32 pa_lo; + __le32 pa_hi; + __le32 len; +}; + +/* First word for TXD descriptor. It means: type = 3 for regular Tx packet, + * hw_csum = 7 for IP+UDP+TCP HW checksums. + */ +#define TN40_TXD_W1_VAL(bc, checksum, vtag, lgsnd, vlan_id) ( \ + GENMASK(17, 16) | \ + FIELD_PREP(GENMASK(4, 0), (bc)) | \ + FIELD_PREP(GENMASK(7, 5), (checksum)) | \ + FIELD_PREP(BIT(8), (vtag)) | \ + FIELD_PREP(GENMASK(12, 9), (lgsnd)) | \ + FIELD_PREP(GENMASK(15, 13), \ + FIELD_GET(GENMASK(15, 13), (vlan_id))) | \ + FIELD_PREP(GENMASK(31, 20), \ + FIELD_GET(GENMASK(11, 0), (vlan_id))) \ + ) + +struct tn40_txd_desc { + __le32 txd_val1; + __le16 mss; + __le16 length; + __le32 va_lo; + __le32 va_hi; + struct tn40_pbl pbl[]; /* Fragments */ +}; + +struct tn40_txf_desc { + u32 status; + u32 va_lo; /* VAdr[31:0] */ + u32 va_hi; /* VAdr[63:32] */ + u32 pad; +}; + +static inline u32 tn40_read_reg(struct tn40_priv *priv, u32 reg) +{ + return readl(priv->regs + reg); +} + +static inline void tn40_write_reg(struct tn40_priv *priv, u32 reg, u32 val) +{ + writel(val, priv->regs + reg); +} + +int tn40_set_link_speed(struct tn40_priv *priv, u32 speed); + +int tn40_mdiobus_init(struct tn40_priv *priv); + +int tn40_phy_register(struct tn40_priv *priv); +void tn40_phy_unregister(struct tn40_priv *priv); + +#endif /* _TN40XX_H */ diff --git a/drivers/net/ethernet/tehuti/tn40_mdio.c b/drivers/net/ethernet/tehuti/tn40_mdio.c new file mode 100644 index 000000000000..af18615d64a8 --- /dev/null +++ b/drivers/net/ethernet/tehuti/tn40_mdio.c @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) Tehuti Networks Ltd. */ + +#include +#include +#include + +#include "tn40.h" + +#define TN40_MDIO_DEVAD_MASK GENMASK(4, 0) +#define TN40_MDIO_PRTAD_MASK GENMASK(9, 5) +#define TN40_MDIO_CMD_VAL(device, port) \ + (FIELD_PREP(TN40_MDIO_DEVAD_MASK, (device)) | \ + (FIELD_PREP(TN40_MDIO_PRTAD_MASK, (port)))) +#define TN40_MDIO_CMD_READ BIT(15) + +static void tn40_mdio_set_speed(struct tn40_priv *priv, u32 speed) +{ + void __iomem *regs = priv->regs; + int mdio_cfg; + + if (speed == TN40_MDIO_SPEED_1MHZ) + mdio_cfg = (0x7d << 7) | 0x08; /* 1MHz */ + else + mdio_cfg = 0xA08; /* 6MHz */ + mdio_cfg |= (1 << 6); + writel(mdio_cfg, regs + TN40_REG_MDIO_CMD_STAT); + msleep(100); +} + +static u32 tn40_mdio_stat(struct tn40_priv *priv) +{ + void __iomem *regs = priv->regs; + + return readl(regs + TN40_REG_MDIO_CMD_STAT); +} + +static int tn40_mdio_wait_nobusy(struct tn40_priv *priv, u32 *val) +{ + u32 stat; + int ret; + + ret = readx_poll_timeout_atomic(tn40_mdio_stat, priv, stat, + TN40_GET_MDIO_BUSY(stat) == 0, 10, + 10000); + if (val) + *val = stat; + return ret; +} + +static int tn40_mdio_read(struct tn40_priv *priv, int port, int device, + u16 regnum) +{ + void __iomem *regs = priv->regs; + u32 i; + + /* wait until MDIO is not busy */ + if (tn40_mdio_wait_nobusy(priv, NULL)) + return -EIO; + + i = TN40_MDIO_CMD_VAL(device, port); + writel(i, regs + TN40_REG_MDIO_CMD); + writel((u32)regnum, regs + TN40_REG_MDIO_ADDR); + if (tn40_mdio_wait_nobusy(priv, NULL)) + return -EIO; + + writel(TN40_MDIO_CMD_READ | i, regs + TN40_REG_MDIO_CMD); + /* read CMD_STAT until not busy */ + if (tn40_mdio_wait_nobusy(priv, NULL)) + return -EIO; + + return lower_16_bits(readl(regs + TN40_REG_MDIO_DATA)); +} + +static int tn40_mdio_write(struct tn40_priv *priv, int port, int device, + u16 regnum, u16 data) +{ + void __iomem *regs = priv->regs; + u32 tmp_reg = 0; + int ret; + + /* wait until MDIO is not busy */ + if (tn40_mdio_wait_nobusy(priv, NULL)) + return -EIO; + writel(TN40_MDIO_CMD_VAL(device, port), regs + TN40_REG_MDIO_CMD); + writel((u32)regnum, regs + TN40_REG_MDIO_ADDR); + if (tn40_mdio_wait_nobusy(priv, NULL)) + return -EIO; + writel((u32)data, regs + TN40_REG_MDIO_DATA); + /* read CMD_STAT until not busy */ + ret = tn40_mdio_wait_nobusy(priv, &tmp_reg); + if (ret) + return -EIO; + + if (TN40_GET_MDIO_RD_ERR(tmp_reg)) { + dev_err(&priv->pdev->dev, "MDIO error after write command\n"); + return -EIO; + } + return 0; +} + +static int tn40_mdio_read_c45(struct mii_bus *mii_bus, int addr, int devnum, + int regnum) +{ + return tn40_mdio_read(mii_bus->priv, addr, devnum, regnum); +} + +static int tn40_mdio_write_c45(struct mii_bus *mii_bus, int addr, int devnum, + int regnum, u16 val) +{ + return tn40_mdio_write(mii_bus->priv, addr, devnum, regnum, val); +} + +int tn40_mdiobus_init(struct tn40_priv *priv) +{ + struct pci_dev *pdev = priv->pdev; + struct mii_bus *bus; + int ret; + + bus = devm_mdiobus_alloc(&pdev->dev); + if (!bus) + return -ENOMEM; + + bus->name = TN40_DRV_NAME; + bus->parent = &pdev->dev; + snprintf(bus->id, MII_BUS_ID_SIZE, "tn40xx-%x-%x", + pci_domain_nr(pdev->bus), pci_dev_id(pdev)); + bus->priv = priv; + + bus->read_c45 = tn40_mdio_read_c45; + bus->write_c45 = tn40_mdio_write_c45; + + ret = devm_mdiobus_register(&pdev->dev, bus); + if (ret) { + dev_err(&pdev->dev, "failed to register mdiobus %d %u %u\n", + ret, bus->state, MDIOBUS_UNREGISTERED); + return ret; + } + tn40_mdio_set_speed(priv, TN40_MDIO_SPEED_6MHZ); + priv->mdio = bus; + return 0; +} diff --git a/drivers/net/ethernet/tehuti/tn40_phy.c b/drivers/net/ethernet/tehuti/tn40_phy.c new file mode 100644 index 000000000000..39eef7ca7958 --- /dev/null +++ b/drivers/net/ethernet/tehuti/tn40_phy.c @@ -0,0 +1,76 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) Tehuti Networks Ltd. */ + +#include +#include +#include + +#include "tn40.h" + +static struct tn40_priv *tn40_config_to_priv(struct phylink_config *config) +{ + return container_of(config, struct tn40_priv, phylink_config); +} + +static void tn40_link_up(struct phylink_config *config, struct phy_device *phy, + unsigned int mode, phy_interface_t interface, + int speed, int duplex, bool tx_pause, bool rx_pause) +{ + struct tn40_priv *priv = tn40_config_to_priv(config); + + tn40_set_link_speed(priv, speed); + netif_wake_queue(priv->ndev); +} + +static void tn40_link_down(struct phylink_config *config, unsigned int mode, + phy_interface_t interface) +{ + struct tn40_priv *priv = tn40_config_to_priv(config); + + netif_stop_queue(priv->ndev); + tn40_set_link_speed(priv, 0); +} + +static void tn40_mac_config(struct phylink_config *config, unsigned int mode, + const struct phylink_link_state *state) +{ +} + +static const struct phylink_mac_ops tn40_mac_ops = { + .mac_config = tn40_mac_config, + .mac_link_up = tn40_link_up, + .mac_link_down = tn40_link_down, +}; + +int tn40_phy_register(struct tn40_priv *priv) +{ + struct phylink_config *config; + struct phy_device *phydev; + struct phylink *phylink; + + phydev = phy_find_first(priv->mdio); + if (!phydev) { + dev_err(&priv->pdev->dev, "PHY isn't found\n"); + return -ENODEV; + } + + config = &priv->phylink_config; + config->dev = &priv->ndev->dev; + config->type = PHYLINK_NETDEV; + config->mac_capabilities = MAC_10000FD; + __set_bit(PHY_INTERFACE_MODE_XAUI, config->supported_interfaces); + + phylink = phylink_create(config, NULL, PHY_INTERFACE_MODE_XAUI, + &tn40_mac_ops); + if (IS_ERR(phylink)) + return PTR_ERR(phylink); + + priv->phydev = phydev; + priv->phylink = phylink; + return 0; +} + +void tn40_phy_unregister(struct tn40_priv *priv) +{ + phylink_destroy(priv->phylink); +} diff --git a/drivers/net/ethernet/tehuti/tn40_regs.h b/drivers/net/ethernet/tehuti/tn40_regs.h new file mode 100644 index 000000000000..95171aa57a9e --- /dev/null +++ b/drivers/net/ethernet/tehuti/tn40_regs.h @@ -0,0 +1,245 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) Tehuti Networks Ltd. */ + +#ifndef _TN40_REGS_H_ +#define _TN40_REGS_H_ + +/* Register region size */ +#define TN40_REGS_SIZE 0x10000 + +/* Registers from 0x0000-0x00fc were remapped to 0x4000-0x40fc */ +#define TN40_REG_TXD_CFG1_0 0x4000 +#define TN40_REG_TXD_CFG1_1 0x4004 +#define TN40_REG_TXD_CFG1_2 0x4008 +#define TN40_REG_TXD_CFG1_3 0x400C + +#define TN40_REG_RXF_CFG1_0 0x4010 +#define TN40_REG_RXF_CFG1_1 0x4014 +#define TN40_REG_RXF_CFG1_2 0x4018 +#define TN40_REG_RXF_CFG1_3 0x401C + +#define TN40_REG_RXD_CFG1_0 0x4020 +#define TN40_REG_RXD_CFG1_1 0x4024 +#define TN40_REG_RXD_CFG1_2 0x4028 +#define TN40_REG_RXD_CFG1_3 0x402C + +#define TN40_REG_TXF_CFG1_0 0x4030 +#define TN40_REG_TXF_CFG1_1 0x4034 +#define TN40_REG_TXF_CFG1_2 0x4038 +#define TN40_REG_TXF_CFG1_3 0x403C + +#define TN40_REG_TXD_CFG0_0 0x4040 +#define TN40_REG_TXD_CFG0_1 0x4044 +#define TN40_REG_TXD_CFG0_2 0x4048 +#define TN40_REG_TXD_CFG0_3 0x404C + +#define TN40_REG_RXF_CFG0_0 0x4050 +#define TN40_REG_RXF_CFG0_1 0x4054 +#define TN40_REG_RXF_CFG0_2 0x4058 +#define TN40_REG_RXF_CFG0_3 0x405C + +#define TN40_REG_RXD_CFG0_0 0x4060 +#define TN40_REG_RXD_CFG0_1 0x4064 +#define TN40_REG_RXD_CFG0_2 0x4068 +#define TN40_REG_RXD_CFG0_3 0x406C + +#define TN40_REG_TXF_CFG0_0 0x4070 +#define TN40_REG_TXF_CFG0_1 0x4074 +#define TN40_REG_TXF_CFG0_2 0x4078 +#define TN40_REG_TXF_CFG0_3 0x407C + +#define TN40_REG_TXD_WPTR_0 0x4080 +#define TN40_REG_TXD_WPTR_1 0x4084 +#define TN40_REG_TXD_WPTR_2 0x4088 +#define TN40_REG_TXD_WPTR_3 0x408C + +#define TN40_REG_RXF_WPTR_0 0x4090 +#define TN40_REG_RXF_WPTR_1 0x4094 +#define TN40_REG_RXF_WPTR_2 0x4098 +#define TN40_REG_RXF_WPTR_3 0x409C + +#define TN40_REG_RXD_WPTR_0 0x40A0 +#define TN40_REG_RXD_WPTR_1 0x40A4 +#define TN40_REG_RXD_WPTR_2 0x40A8 +#define TN40_REG_RXD_WPTR_3 0x40AC + +#define TN40_REG_TXF_WPTR_0 0x40B0 +#define TN40_REG_TXF_WPTR_1 0x40B4 +#define TN40_REG_TXF_WPTR_2 0x40B8 +#define TN40_REG_TXF_WPTR_3 0x40BC + +#define TN40_REG_TXD_RPTR_0 0x40C0 +#define TN40_REG_TXD_RPTR_1 0x40C4 +#define TN40_REG_TXD_RPTR_2 0x40C8 +#define TN40_REG_TXD_RPTR_3 0x40CC + +#define TN40_REG_RXF_RPTR_0 0x40D0 +#define TN40_REG_RXF_RPTR_1 0x40D4 +#define TN40_REG_RXF_RPTR_2 0x40D8 +#define TN40_REG_RXF_RPTR_3 0x40DC + +#define TN40_REG_RXD_RPTR_0 0x40E0 +#define TN40_REG_RXD_RPTR_1 0x40E4 +#define TN40_REG_RXD_RPTR_2 0x40E8 +#define TN40_REG_RXD_RPTR_3 0x40EC + +#define TN40_REG_TXF_RPTR_0 0x40F0 +#define TN40_REG_TXF_RPTR_1 0x40F4 +#define TN40_REG_TXF_RPTR_2 0x40F8 +#define TN40_REG_TXF_RPTR_3 0x40FC + +/* Hardware versioning */ +#define TN40_FPGA_VER 0x5030 + +/* Registers from 0x0100-0x0150 were remapped to 0x5100-0x5150 */ +#define TN40_REG_ISR TN40_REG_ISR0 +#define TN40_REG_ISR0 0x5100 + +#define TN40_REG_IMR TN40_REG_IMR0 +#define TN40_REG_IMR0 0x5110 + +#define TN40_REG_RDINTCM0 0x5120 +#define TN40_REG_RDINTCM2 0x5128 + +#define TN40_REG_TDINTCM0 0x5130 + +#define TN40_REG_ISR_MSK0 0x5140 + +#define TN40_REG_INIT_SEMAPHORE 0x5170 +#define TN40_REG_INIT_STATUS 0x5180 + +#define TN40_REG_MAC_LNK_STAT 0x0200 +#define TN40_MAC_LINK_STAT 0x0004 /* Link state */ + +#define TN40_REG_BLNK_LED 0x0210 + +#define TN40_REG_GMAC_RXF_A 0x1240 + +#define TN40_REG_UNC_MAC0_A 0x1250 +#define TN40_REG_UNC_MAC1_A 0x1260 +#define TN40_REG_UNC_MAC2_A 0x1270 + +#define TN40_REG_VLAN_0 0x1800 + +#define TN40_REG_MAX_FRAME_A 0x12C0 + +#define TN40_REG_RX_MAC_MCST0 0x1A80 +#define TN40_REG_RX_MAC_MCST1 0x1A84 +#define TN40_MAC_MCST_NUM 15 +#define TN40_REG_RX_MCST_HASH0 0x1A00 +#define TN40_MAC_MCST_HASH_NUM 8 + +#define TN40_REG_VPC 0x2300 +#define TN40_REG_VIC 0x2320 +#define TN40_REG_VGLB 0x2340 + +#define TN40_REG_CLKPLL 0x5000 + +/* MDIO interface */ + +#define TN40_REG_MDIO_CMD_STAT 0x6030 +#define TN40_REG_MDIO_CMD 0x6034 +#define TN40_REG_MDIO_DATA 0x6038 +#define TN40_REG_MDIO_ADDR 0x603C +#define TN40_GET_MDIO_BUSY(x) FIELD_GET(GENMASK(0, 0), (x)) +#define TN40_GET_MDIO_RD_ERR(x) FIELD_GET(GENMASK(1, 1), (x)) + +#define TN40_REG_REVISION 0x6000 +#define TN40_REG_SCRATCH 0x6004 +#define TN40_REG_CTRLST 0x6008 +#define TN40_REG_MAC_ADDR_0 0x600C +#define TN40_REG_MAC_ADDR_1 0x6010 +#define TN40_REG_FRM_LENGTH 0x6014 +#define TN40_REG_PAUSE_QUANT 0x6054 +#define TN40_REG_RX_FIFO_SECTION 0x601C +#define TN40_REG_TX_FIFO_SECTION 0x6020 +#define TN40_REG_RX_FULLNESS 0x6024 +#define TN40_REG_TX_FULLNESS 0x6028 +#define TN40_REG_HASHTABLE 0x602C + +#define TN40_REG_RST_PORT 0x7000 +#define TN40_REG_DIS_PORT 0x7010 +#define TN40_REG_RST_QU 0x7020 +#define TN40_REG_DIS_QU 0x7030 + +#define TN40_REG_CTRLST_TX_ENA 0x0001 +#define TN40_REG_CTRLST_RX_ENA 0x0002 +#define TN40_REG_CTRLST_PRM_ENA 0x0010 +#define TN40_REG_CTRLST_PAD_ENA 0x0020 + +#define TN40_REG_CTRLST_BASE (TN40_REG_CTRLST_PAD_ENA | REG_CTRLST_PRM_ENA) + +/* TXD TXF RXF RXD CONFIG 0x0000 --- 0x007c */ +#define TN40_TX_RX_CFG1_BASE 0xffffffff /*0-31 */ +#define TN40_TX_RX_CFG0_BASE 0xfffff000 /*31:12 */ +#define TN40_TX_RX_CFG0_RSVD 0x00000ffc /*11:2 */ +#define TN40_TX_RX_CFG0_SIZE 0x00000003 /*1:0 */ + +/* TXD TXF RXF RXD WRITE 0x0080 --- 0x00BC */ +#define TN40_TXF_WPTR_WR_PTR 0x00007ff8 /*14:3 */ + +/* TXD TXF RXF RXD READ 0x00CO --- 0x00FC */ +#define TN40_TXF_RPTR_RD_PTR 0x00007ff8 /*14:3 */ + +/* The last 4 bits are dropped size is rounded to 16 */ +#define TN40_TXF_WPTR_MASK 0x7ff0 + +/* regISR 0x0100 */ +/* regIMR 0x0110 */ +#define TN40_IMR_INPROG 0x80000000 /*31 */ +#define TN40_IR_LNKCHG1 0x10000000 /*28 */ +#define TN40_IR_LNKCHG0 0x08000000 /*27 */ +#define TN40_IR_GPIO 0x04000000 /*26 */ +#define TN40_IR_RFRSH 0x02000000 /*25 */ +#define TN40_IR_RSVD 0x01000000 /*24 */ +#define TN40_IR_SWI 0x00800000 /*23 */ +#define TN40_IR_RX_FREE_3 0x00400000 /*22 */ +#define TN40_IR_RX_FREE_2 0x00200000 /*21 */ +#define TN40_IR_RX_FREE_1 0x00100000 /*20 */ +#define TN40_IR_RX_FREE_0 0x00080000 /*19 */ +#define TN40_IR_TX_FREE_3 0x00040000 /*18 */ +#define TN40_IR_TX_FREE_2 0x00020000 /*17 */ +#define TN40_IR_TX_FREE_1 0x00010000 /*16 */ +#define TN40_IR_TX_FREE_0 0x00008000 /*15 */ +#define TN40_IR_RX_DESC_3 0x00004000 /*14 */ +#define TN40_IR_RX_DESC_2 0x00002000 /*13 */ +#define TN40_IR_RX_DESC_1 0x00001000 /*12 */ +#define TN40_IR_RX_DESC_0 0x00000800 /*11 */ +#define TN40_IR_PSE 0x00000400 /*10 */ +#define TN40_IR_TMR3 0x00000200 /* 9 */ +#define TN40_IR_TMR2 0x00000100 /* 8 */ +#define TN40_IR_TMR1 0x00000080 /* 7 */ +#define TN40_IR_TMR0 0x00000040 /* 6 */ +#define TN40_IR_VNT 0x00000020 /* 5 */ +#define TN40_IR_RxFL 0x00000010 /* 4 */ +#define TN40_IR_SDPERR 0x00000008 /* 3 */ +#define TN40_IR_TR 0x00000004 /* 2 */ +#define TN40_IR_PCIE_LINK 0x00000002 /* 1 */ +#define TN40_IR_PCIE_TOUT 0x00000001 /* 0 */ + +#define TN40_IR_EXTRA \ + (TN40_IR_RX_FREE_0 | TN40_IR_LNKCHG0 | TN40_IR_LNKCHG1 |\ + TN40_IR_PSE | TN40_IR_TMR0 | TN40_IR_PCIE_LINK | \ + TN40_IR_PCIE_TOUT) + +#define TN40_GMAC_RX_FILTER_OSEN 0x1000 /* shared OS enable */ +#define TN40_GMAC_RX_FILTER_TXFC 0x0400 /* Tx flow control */ +#define TN40_GMAC_RX_FILTER_RSV0 0x0200 /* reserved */ +#define TN40_GMAC_RX_FILTER_FDA 0x0100 /* filter out direct address */ +#define TN40_GMAC_RX_FILTER_AOF 0x0080 /* accept over run */ +#define TN40_GMAC_RX_FILTER_ACF 0x0040 /* accept control frames */ +#define TN40_GMAC_RX_FILTER_ARUNT 0x0020 /* accept under run */ +#define TN40_GMAC_RX_FILTER_ACRC 0x0010 /* accept crc error */ +#define TN40_GMAC_RX_FILTER_AM 0x0008 /* accept multicast */ +#define TN40_GMAC_RX_FILTER_AB 0x0004 /* accept broadcast */ +#define TN40_GMAC_RX_FILTER_PRM 0x0001 /* [0:1] promiscuous mode */ + +#define TN40_MAX_FRAME_AB_VAL 0x3fff /* 13:0 */ + +#define TN40_CLKPLL_PLLLKD 0x0200 /* 9 */ +#define TN40_CLKPLL_RSTEND 0x0100 /* 8 */ +#define TN40_CLKPLL_SFTRST 0x0001 /* 0 */ + +#define TN40_CLKPLL_LKD (TN40_CLKPLL_PLLLKD | TN40_CLKPLL_RSTEND) + +#endif diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index 1729eb0e0b41..0d5a862cd78a 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -188,6 +188,7 @@ config TI_ICSSG_PRUETH select TI_ICSS_IEP select TI_K3_CPPI_DESC_POOL depends on PRU_REMOTEPROC + depends on NET_SWITCHDEV depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER depends on PTP_1588_CLOCK_OPTIONAL help @@ -204,6 +205,7 @@ config TI_ICSSG_PRUETH_SR1 select TI_ICSS_IEP select TI_K3_CPPI_DESC_POOL depends on PRU_REMOTEPROC + depends on NET_SWITCHDEV depends on ARCH_K3 && OF && TI_K3_UDMA_GLUE_LAYER help Support dual Gigabit Ethernet ports over the ICSSG PRU Subsystem. diff --git a/drivers/net/ethernet/ti/Makefile b/drivers/net/ethernet/ti/Makefile index 6e086b4c0384..cbcf44806924 100644 --- a/drivers/net/ethernet/ti/Makefile +++ b/drivers/net/ethernet/ti/Makefile @@ -31,21 +31,18 @@ ti-am65-cpsw-nuss-$(CONFIG_TI_AM65_CPSW_QOS) += am65-cpsw-qos.o ti-am65-cpsw-nuss-$(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV) += am65-cpsw-switchdev.o obj-$(CONFIG_TI_K3_AM65_CPTS) += am65-cpts.o -obj-$(CONFIG_TI_ICSSG_PRUETH) += icssg-prueth.o -icssg-prueth-y := icssg/icssg_prueth.o \ - icssg/icssg_common.o \ - icssg/icssg_classifier.o \ - icssg/icssg_queues.o \ - icssg/icssg_config.o \ - icssg/icssg_mii_cfg.o \ - icssg/icssg_stats.o \ - icssg/icssg_ethtool.o -obj-$(CONFIG_TI_ICSSG_PRUETH_SR1) += icssg-prueth-sr1.o -icssg-prueth-sr1-y := icssg/icssg_prueth_sr1.o \ - icssg/icssg_common.o \ - icssg/icssg_classifier.o \ - icssg/icssg_config.o \ - icssg/icssg_mii_cfg.o \ - icssg/icssg_stats.o \ - icssg/icssg_ethtool.o +obj-$(CONFIG_TI_ICSSG_PRUETH) += icssg-prueth.o icssg.o +icssg-prueth-y := icssg/icssg_prueth.o icssg/icssg_switchdev.o + +obj-$(CONFIG_TI_ICSSG_PRUETH_SR1) += icssg-prueth-sr1.o icssg.o +icssg-prueth-sr1-y := icssg/icssg_prueth_sr1.o + +icssg-y := icssg/icssg_common.o \ + icssg/icssg_classifier.o \ + icssg/icssg_queues.o \ + icssg/icssg_config.o \ + icssg/icssg_mii_cfg.o \ + icssg/icssg_stats.o \ + icssg/icssg_ethtool.o + obj-$(CONFIG_TI_ICSS_IEP) += icssg/icss_iep.o diff --git a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c index a1d0935d1ebe..b60976947da5 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-ethtool.c +++ b/drivers/net/ethernet/ti/am65-cpsw-ethtool.c @@ -692,7 +692,7 @@ static void am65_cpsw_get_eth_mac_stats(struct net_device *ndev, }; static int am65_cpsw_get_ethtool_ts_info(struct net_device *ndev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct am65_cpsw_common *common = am65_ndev_to_common(ndev); unsigned int ptp_v2_filter; diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 4e50b3792888..81d9f21086ec 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -896,7 +896,7 @@ static int am65_cpsw_nuss_ndo_slave_open(struct net_device *ndev) /* mac_sl should be configured via phy-link interface */ am65_cpsw_sl_ctl_reset(port); - ret = phylink_of_phy_connect(port->slave.phylink, port->slave.phy_node, 0); + ret = phylink_of_phy_connect(port->slave.phylink, port->slave.port_np, 0); if (ret) goto error_cleanup; @@ -2424,10 +2424,10 @@ static int am65_cpsw_nuss_init_rx_chns(struct am65_cpsw_common *common) rx_chn->irq = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i); - if (rx_chn->irq <= 0) { + if (rx_chn->irq < 0) { dev_err(dev, "Failed to get rx dma irq %d\n", rx_chn->irq); - ret = -ENXIO; + ret = rx_chn->irq; goto err; } } @@ -2611,7 +2611,7 @@ static int am65_cpsw_nuss_init_slave_ports(struct am65_cpsw_common *common) of_property_read_bool(port_np, "ti,mac-only"); /* get phy/link info */ - port->slave.phy_node = port_np; + port->slave.port_np = port_np; ret = of_get_phy_mode(port_np, &port->slave.phy_if); if (ret) { dev_err(dev, "%pOF read phy-mode err %d\n", @@ -2703,6 +2703,7 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx) mutex_init(&ndev_priv->mm_lock); port->qos.link_speed = SPEED_UNKNOWN; SET_NETDEV_DEV(port->ndev, dev); + port->ndev->dev.of_node = port->slave.port_np; eth_hw_addr_set(port->ndev, port->slave.mac_addr); @@ -2760,7 +2761,7 @@ am65_cpsw_nuss_init_port_ndev(struct am65_cpsw_common *common, u32 port_idx) } phylink = phylink_create(&port->slave.phylink_config, - of_node_to_fwnode(port->slave.phy_node), + of_node_to_fwnode(port->slave.port_np), port->slave.phy_if, &am65_cpsw_phylink_mac_ops); if (IS_ERR(phylink)) diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h index d8ce88dc9c89..e2ce2be320bd 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h @@ -30,7 +30,7 @@ struct am65_cpts; struct am65_cpsw_slave_data { bool mac_only; struct cpsw_sl *mac_sl; - struct device_node *phy_node; + struct device_node *port_np; phy_interface_t phy_if; struct phy *ifphy; struct phy *serdes_phy; diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c index f7b283353ba2..53ed23d68722 100644 --- a/drivers/net/ethernet/ti/cpsw_ethtool.c +++ b/drivers/net/ethernet/ti/cpsw_ethtool.c @@ -717,7 +717,7 @@ err: } #if IS_ENABLED(CONFIG_TI_CPTS) -int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info) +int cpsw_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info) { struct cpsw_common *cpsw = ndev_to_cpsw(ndev); @@ -738,7 +738,7 @@ int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info) return 0; } #else -int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info) +int cpsw_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info) { info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h index 7efa72502c86..1f448290b9f4 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.h +++ b/drivers/net/ethernet/ti/cpsw_priv.h @@ -510,6 +510,6 @@ int cpsw_set_ringparam(struct net_device *ndev, int cpsw_set_channels_common(struct net_device *ndev, struct ethtool_channels *chs, cpdma_handler_fn rx_handler); -int cpsw_get_ts_info(struct net_device *ndev, struct ethtool_ts_info *info); +int cpsw_get_ts_info(struct net_device *ndev, struct kernel_ethtool_ts_info *info); #endif /* DRIVERS_NET_ETHERNET_TI_CPSW_PRIV_H_ */ diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c index 3025e9c18970..75c294ce6fb6 100644 --- a/drivers/net/ethernet/ti/icssg/icss_iep.c +++ b/drivers/net/ethernet/ti/icssg/icss_iep.c @@ -17,6 +17,7 @@ #include #include #include +#include #include "icss_iep.h" @@ -94,7 +95,7 @@ enum { * @flags: Flags to represent IEP properties */ struct icss_iep_plat_data { - struct regmap_config *config; + const struct regmap_config *config; u32 reg_offs[ICSS_IEP_MAX_REGS]; u32 flags; }; @@ -110,7 +111,6 @@ struct icss_iep { struct ptp_clock_info ptp_info; struct ptp_clock *ptp_clock; struct mutex ptp_clk_mutex; /* PHC access serializer */ - spinlock_t irq_lock; /* CMP IRQ vs icss_iep_ptp_enable access */ u32 def_inc; s16 slow_cmp_inc; u32 slow_cmp_count; @@ -122,6 +122,7 @@ struct icss_iep { int cap_cmp_irq; u64 period; u32 latch_enable; + struct work_struct work; }; /** @@ -192,14 +193,11 @@ static void icss_iep_update_to_next_boundary(struct icss_iep *iep, u64 start_ns) */ static void icss_iep_settime(struct icss_iep *iep, u64 ns) { - unsigned long flags; - if (iep->ops && iep->ops->settime) { iep->ops->settime(iep->clockops_data, ns); return; } - spin_lock_irqsave(&iep->irq_lock, flags); if (iep->pps_enabled || iep->perout_enabled) writel(0, iep->base + iep->plat_data->reg_offs[ICSS_IEP_SYNC_CTRL_REG]); @@ -210,7 +208,6 @@ static void icss_iep_settime(struct icss_iep *iep, u64 ns) writel(IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN, iep->base + iep->plat_data->reg_offs[ICSS_IEP_SYNC_CTRL_REG]); } - spin_unlock_irqrestore(&iep->irq_lock, flags); } /** @@ -546,7 +543,6 @@ static int icss_iep_perout_enable_hw(struct icss_iep *iep, static int icss_iep_perout_enable(struct icss_iep *iep, struct ptp_perout_request *req, int on) { - unsigned long flags; int ret = 0; mutex_lock(&iep->ptp_clk_mutex); @@ -559,11 +555,9 @@ static int icss_iep_perout_enable(struct icss_iep *iep, if (iep->perout_enabled == !!on) goto exit; - spin_lock_irqsave(&iep->irq_lock, flags); ret = icss_iep_perout_enable_hw(iep, req, on); if (!ret) iep->perout_enabled = !!on; - spin_unlock_irqrestore(&iep->irq_lock, flags); exit: mutex_unlock(&iep->ptp_clk_mutex); @@ -571,11 +565,61 @@ exit: return ret; } +static void icss_iep_cap_cmp_work(struct work_struct *work) +{ + struct icss_iep *iep = container_of(work, struct icss_iep, work); + const u32 *reg_offs = iep->plat_data->reg_offs; + struct ptp_clock_event pevent; + unsigned int val; + u64 ns, ns_next; + + mutex_lock(&iep->ptp_clk_mutex); + + ns = readl(iep->base + reg_offs[ICSS_IEP_CMP1_REG0]); + if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) { + val = readl(iep->base + reg_offs[ICSS_IEP_CMP1_REG1]); + ns |= (u64)val << 32; + } + /* set next event */ + ns_next = ns + iep->period; + writel(lower_32_bits(ns_next), + iep->base + reg_offs[ICSS_IEP_CMP1_REG0]); + if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) + writel(upper_32_bits(ns_next), + iep->base + reg_offs[ICSS_IEP_CMP1_REG1]); + + pevent.pps_times.ts_real = ns_to_timespec64(ns); + pevent.type = PTP_CLOCK_PPSUSR; + pevent.index = 0; + ptp_clock_event(iep->ptp_clock, &pevent); + dev_dbg(iep->dev, "IEP:pps ts: %llu next:%llu:\n", ns, ns_next); + + mutex_unlock(&iep->ptp_clk_mutex); +} + +static irqreturn_t icss_iep_cap_cmp_irq(int irq, void *dev_id) +{ + struct icss_iep *iep = (struct icss_iep *)dev_id; + const u32 *reg_offs = iep->plat_data->reg_offs; + unsigned int val; + + val = readl(iep->base + reg_offs[ICSS_IEP_CMP_STAT_REG]); + /* The driver only enables CMP1 */ + if (val & BIT(1)) { + /* Clear the event */ + writel(BIT(1), iep->base + reg_offs[ICSS_IEP_CMP_STAT_REG]); + if (iep->pps_enabled || iep->perout_enabled) + schedule_work(&iep->work); + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + static int icss_iep_pps_enable(struct icss_iep *iep, int on) { struct ptp_clock_request rq; struct timespec64 ts; - unsigned long flags; int ret = 0; u64 ns; @@ -589,8 +633,6 @@ static int icss_iep_pps_enable(struct icss_iep *iep, int on) if (iep->pps_enabled == !!on) goto exit; - spin_lock_irqsave(&iep->irq_lock, flags); - rq.perout.index = 0; if (on) { ns = icss_iep_gettime(iep, NULL); @@ -602,13 +644,13 @@ static int icss_iep_pps_enable(struct icss_iep *iep, int on) ret = icss_iep_perout_enable_hw(iep, &rq.perout, on); } else { ret = icss_iep_perout_enable_hw(iep, &rq.perout, on); + if (iep->cap_cmp_irq) + cancel_work_sync(&iep->work); } if (!ret) iep->pps_enabled = !!on; - spin_unlock_irqrestore(&iep->irq_lock, flags); - exit: mutex_unlock(&iep->ptp_clk_mutex); @@ -777,6 +819,8 @@ int icss_iep_init(struct icss_iep *iep, const struct icss_iep_clockops *clkops, if (iep->ops && iep->ops->perout_enable) { iep->ptp_info.n_per_out = 1; iep->ptp_info.pps = 1; + } else if (iep->cap_cmp_irq) { + iep->ptp_info.pps = 1; } if (iep->ops && iep->ops->extts_enable) @@ -817,6 +861,7 @@ static int icss_iep_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct icss_iep *iep; struct clk *iep_clk; + int ret, irq; iep = devm_kzalloc(dev, sizeof(*iep), GFP_KERNEL); if (!iep) @@ -827,6 +872,22 @@ static int icss_iep_probe(struct platform_device *pdev) if (IS_ERR(iep->base)) return -ENODEV; + irq = platform_get_irq_byname_optional(pdev, "iep_cap_cmp"); + if (irq == -EPROBE_DEFER) + return irq; + + if (irq > 0) { + ret = devm_request_irq(dev, irq, icss_iep_cap_cmp_irq, + IRQF_TRIGGER_HIGH, "iep_cap_cmp", iep); + if (ret) { + dev_info(iep->dev, "cap_cmp irq request failed: %x\n", + ret); + } else { + iep->cap_cmp_irq = irq; + INIT_WORK(&iep->work, icss_iep_cap_cmp_work); + } + } + iep_clk = devm_clk_get(dev, NULL); if (IS_ERR(iep_clk)) return PTR_ERR(iep_clk); @@ -853,7 +914,6 @@ static int icss_iep_probe(struct platform_device *pdev) iep->ptp_info = icss_iep_ptp_info; mutex_init(&iep->ptp_clk_mutex); - spin_lock_init(&iep->irq_lock); dev_set_drvdata(dev, iep); icss_iep_disable(iep); @@ -892,7 +952,7 @@ static int icss_iep_regmap_read(void *context, unsigned int reg, return 0; } -static struct regmap_config am654_icss_iep_regmap_config = { +static const struct regmap_config am654_icss_iep_regmap_config = { .name = "icss iep", .reg_stride = 1, .reg_write = icss_iep_regmap_write, diff --git a/drivers/net/ethernet/ti/icssg/icssg_classifier.c b/drivers/net/ethernet/ti/icssg/icssg_classifier.c index f7d21da1a0fb..9ec504d976d6 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_classifier.c +++ b/drivers/net/ethernet/ti/icssg/icssg_classifier.c @@ -297,6 +297,7 @@ void icssg_class_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac) mac[2] << 16 | mac[3] << 24)); regmap_write(miig_rt, offs[slice].mac1, (u32)(mac[4] | mac[5] << 8)); } +EXPORT_SYMBOL_GPL(icssg_class_set_mac_addr); static void icssg_class_ft1_add_mcast(struct regmap *miig_rt, int slice, int slot, const u8 *addr, const u8 *mask) @@ -360,6 +361,7 @@ void icssg_class_disable(struct regmap *miig_rt, int slice) /* clear CFG2 */ regmap_write(miig_rt, offs[slice].rx_class_cfg2, 0); } +EXPORT_SYMBOL_GPL(icssg_class_disable); void icssg_class_default(struct regmap *miig_rt, int slice, bool allmulti, bool is_sr1) @@ -390,6 +392,7 @@ void icssg_class_default(struct regmap *miig_rt, int slice, bool allmulti, /* clear CFG2 */ regmap_write(miig_rt, offs[slice].rx_class_cfg2, 0); } +EXPORT_SYMBOL_GPL(icssg_class_default); void icssg_class_promiscuous_sr1(struct regmap *miig_rt, int slice) { @@ -408,6 +411,7 @@ void icssg_class_promiscuous_sr1(struct regmap *miig_rt, int slice) regmap_write(miig_rt, offset, data); } } +EXPORT_SYMBOL_GPL(icssg_class_promiscuous_sr1); void icssg_class_add_mcast_sr1(struct regmap *miig_rt, int slice, struct net_device *ndev) @@ -449,6 +453,7 @@ void icssg_class_add_mcast_sr1(struct regmap *miig_rt, int slice, slot++; } } +EXPORT_SYMBOL_GPL(icssg_class_add_mcast_sr1); /* required for SAV check */ void icssg_ft1_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac_addr) @@ -460,3 +465,4 @@ void icssg_ft1_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac_addr) rx_class_ft1_set_da_mask(miig_rt, slice, 0, mask_addr); rx_class_ft1_cfg_set_type(miig_rt, slice, 0, FT1_CFG_TYPE_EQ); } +EXPORT_SYMBOL_GPL(icssg_ft1_set_mac_addr); diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c index 088ab8076db4..b9d8a93d1680 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_common.c +++ b/drivers/net/ethernet/ti/icssg/icssg_common.c @@ -51,6 +51,7 @@ void prueth_cleanup_rx_chns(struct prueth_emac *emac, if (rx_chn->rx_chn) k3_udma_glue_release_rx_chn(rx_chn->rx_chn); } +EXPORT_SYMBOL_GPL(prueth_cleanup_rx_chns); void prueth_cleanup_tx_chns(struct prueth_emac *emac) { @@ -71,6 +72,7 @@ void prueth_cleanup_tx_chns(struct prueth_emac *emac) memset(tx_chn, 0, sizeof(*tx_chn)); } } +EXPORT_SYMBOL_GPL(prueth_cleanup_tx_chns); void prueth_ndev_del_tx_napi(struct prueth_emac *emac, int num) { @@ -84,6 +86,7 @@ void prueth_ndev_del_tx_napi(struct prueth_emac *emac, int num) netif_napi_del(&tx_chn->napi_tx); } } +EXPORT_SYMBOL_GPL(prueth_ndev_del_tx_napi); void prueth_xmit_free(struct prueth_tx_chn *tx_chn, struct cppi5_host_desc_t *desc) @@ -120,6 +123,7 @@ void prueth_xmit_free(struct prueth_tx_chn *tx_chn, k3_cppi_desc_pool_free(tx_chn->desc_pool, first_desc); } +EXPORT_SYMBOL_GPL(prueth_xmit_free); int emac_tx_complete_packets(struct prueth_emac *emac, int chn, int budget, bool *tdown) @@ -264,6 +268,7 @@ fail: prueth_ndev_del_tx_napi(emac, i); return ret; } +EXPORT_SYMBOL_GPL(prueth_ndev_add_tx_napi); int prueth_init_tx_chns(struct prueth_emac *emac) { @@ -344,6 +349,7 @@ fail: prueth_cleanup_tx_chns(emac); return ret; } +EXPORT_SYMBOL_GPL(prueth_init_tx_chns); int prueth_init_rx_chns(struct prueth_emac *emac, struct prueth_rx_chn *rx_chn, @@ -440,9 +446,7 @@ int prueth_init_rx_chns(struct prueth_emac *emac, fdqring_id = k3_udma_glue_rx_flow_get_fdq_id(rx_chn->rx_chn, i); ret = k3_udma_glue_rx_get_irq(rx_chn->rx_chn, i); - if (ret <= 0) { - if (!ret) - ret = -ENXIO; + if (ret < 0) { netdev_err(ndev, "Failed to get rx dma irq"); goto fail; } @@ -455,6 +459,7 @@ fail: prueth_cleanup_rx_chns(emac, rx_chn, max_rflows); return ret; } +EXPORT_SYMBOL_GPL(prueth_init_rx_chns); int prueth_dma_rx_push(struct prueth_emac *emac, struct sk_buff *skb, @@ -492,6 +497,7 @@ int prueth_dma_rx_push(struct prueth_emac *emac, return k3_udma_glue_push_rx_chn(rx_chn->rx_chn, 0, desc_rx, desc_dma); } +EXPORT_SYMBOL_GPL(prueth_dma_rx_push); u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns) { @@ -507,6 +513,7 @@ u64 icssg_ts_to_ns(u32 hi_sw, u32 hi, u32 lo, u32 cycle_time_ns) return ns; } +EXPORT_SYMBOL_GPL(icssg_ts_to_ns); void emac_rx_timestamp(struct prueth_emac *emac, struct sk_buff *skb, u32 *psdata) @@ -581,6 +588,8 @@ static int emac_rx_packet(struct prueth_emac *emac, u32 flow_id) } else { /* send the filled skb up the n/w stack */ skb_put(skb, pkt_len); + if (emac->prueth->is_switch_mode) + skb->offload_fwd_mark = emac->offload_fwd_mark; skb->protocol = eth_type_trans(skb, ndev); napi_gro_receive(&emac->napi_rx, skb); ndev->stats.rx_bytes += pkt_len; @@ -636,7 +645,7 @@ static int prueth_tx_ts_cookie_get(struct prueth_emac *emac) } /** - * emac_ndo_start_xmit - EMAC Transmit function + * icssg_ndo_start_xmit - EMAC Transmit function * @skb: SKB pointer * @ndev: EMAC network adapter * @@ -647,7 +656,7 @@ static int prueth_tx_ts_cookie_get(struct prueth_emac *emac) * * Return: enum netdev_tx */ -enum netdev_tx emac_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev) +enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct cppi5_host_desc_t *first_desc, *next_desc, *cur_desc; struct prueth_emac *emac = netdev_priv(ndev); @@ -806,6 +815,7 @@ drop_stop_q_busy: netif_tx_stop_queue(netif_txq); return NETDEV_TX_BUSY; } +EXPORT_SYMBOL_GPL(icssg_ndo_start_xmit); static void prueth_tx_cleanup(void *data, dma_addr_t desc_dma) { @@ -831,6 +841,7 @@ irqreturn_t prueth_rx_irq(int irq, void *dev_id) return IRQ_HANDLED; } +EXPORT_SYMBOL_GPL(prueth_rx_irq); void prueth_emac_stop(struct prueth_emac *emac) { @@ -855,6 +866,7 @@ void prueth_emac_stop(struct prueth_emac *emac) rproc_shutdown(prueth->rtu[slice]); rproc_shutdown(prueth->pru[slice]); } +EXPORT_SYMBOL_GPL(prueth_emac_stop); void prueth_cleanup_tx_ts(struct prueth_emac *emac) { @@ -867,8 +879,9 @@ void prueth_cleanup_tx_ts(struct prueth_emac *emac) } } } +EXPORT_SYMBOL_GPL(prueth_cleanup_tx_ts); -int emac_napi_rx_poll(struct napi_struct *napi_rx, int budget) +int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget) { struct prueth_emac *emac = prueth_napi_to_emac(napi_rx); int rx_flow = emac->is_sr1 ? @@ -905,6 +918,7 @@ int emac_napi_rx_poll(struct napi_struct *napi_rx, int budget) return num_rx; } +EXPORT_SYMBOL_GPL(icssg_napi_rx_poll); int prueth_prepare_rx_chan(struct prueth_emac *emac, struct prueth_rx_chn *chn, @@ -930,6 +944,7 @@ int prueth_prepare_rx_chan(struct prueth_emac *emac, return 0; } +EXPORT_SYMBOL_GPL(prueth_prepare_rx_chan); void prueth_reset_tx_chan(struct prueth_emac *emac, int ch_num, bool free_skb) @@ -944,6 +959,7 @@ void prueth_reset_tx_chan(struct prueth_emac *emac, int ch_num, k3_udma_glue_disable_tx_chn(emac->tx_chns[i].tx_chn); } } +EXPORT_SYMBOL_GPL(prueth_reset_tx_chan); void prueth_reset_rx_chan(struct prueth_rx_chn *chn, int num_flows, bool disable) @@ -956,11 +972,13 @@ void prueth_reset_rx_chan(struct prueth_rx_chn *chn, if (disable) k3_udma_glue_disable_rx_chn(chn->rx_chn); } +EXPORT_SYMBOL_GPL(prueth_reset_rx_chan); -void emac_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue) +void icssg_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue) { ndev->stats.tx_errors++; } +EXPORT_SYMBOL_GPL(icssg_ndo_tx_timeout); static int emac_set_ts_config(struct net_device *ndev, struct ifreq *ifr) { @@ -1024,7 +1042,7 @@ static int emac_get_ts_config(struct net_device *ndev, struct ifreq *ifr) -EFAULT : 0; } -int emac_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) +int icssg_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) { switch (cmd) { case SIOCGHWTSTAMP: @@ -1037,9 +1055,10 @@ int emac_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) return phy_do_ioctl(ndev, ifr, cmd); } +EXPORT_SYMBOL_GPL(icssg_ndo_ioctl); -void emac_ndo_get_stats64(struct net_device *ndev, - struct rtnl_link_stats64 *stats) +void icssg_ndo_get_stats64(struct net_device *ndev, + struct rtnl_link_stats64 *stats) { struct prueth_emac *emac = netdev_priv(ndev); @@ -1058,9 +1077,10 @@ void emac_ndo_get_stats64(struct net_device *ndev, stats->tx_errors = ndev->stats.tx_errors; stats->tx_dropped = ndev->stats.tx_dropped; } +EXPORT_SYMBOL_GPL(icssg_ndo_get_stats64); -int emac_ndo_get_phys_port_name(struct net_device *ndev, char *name, - size_t len) +int icssg_ndo_get_phys_port_name(struct net_device *ndev, char *name, + size_t len) { struct prueth_emac *emac = netdev_priv(ndev); int ret; @@ -1071,6 +1091,7 @@ int emac_ndo_get_phys_port_name(struct net_device *ndev, char *name, return 0; } +EXPORT_SYMBOL_GPL(icssg_ndo_get_phys_port_name); /* get emac_port corresponding to eth_node name */ int prueth_node_port(struct device_node *eth_node) @@ -1089,6 +1110,7 @@ int prueth_node_port(struct device_node *eth_node) else return PRUETH_PORT_INVALID; } +EXPORT_SYMBOL_GPL(prueth_node_port); /* get MAC instance corresponding to eth_node name */ int prueth_node_mac(struct device_node *eth_node) @@ -1107,6 +1129,7 @@ int prueth_node_mac(struct device_node *eth_node) else return PRUETH_MAC_INVALID; } +EXPORT_SYMBOL_GPL(prueth_node_mac); void prueth_netdev_exit(struct prueth *prueth, struct device_node *eth_node) @@ -1132,6 +1155,7 @@ void prueth_netdev_exit(struct prueth *prueth, free_netdev(emac->ndev); prueth->emac[mac] = NULL; } +EXPORT_SYMBOL_GPL(prueth_netdev_exit); int prueth_get_cores(struct prueth *prueth, int slice, bool is_sr1) { @@ -1182,6 +1206,7 @@ int prueth_get_cores(struct prueth *prueth, int slice, bool is_sr1) return 0; } +EXPORT_SYMBOL_GPL(prueth_get_cores); void prueth_put_cores(struct prueth *prueth, int slice) { @@ -1194,6 +1219,7 @@ void prueth_put_cores(struct prueth *prueth, int slice) if (prueth->pru[slice]) pru_rproc_put(prueth->pru[slice]); } +EXPORT_SYMBOL_GPL(prueth_put_cores); #ifdef CONFIG_PM_SLEEP static int prueth_suspend(struct device *dev) @@ -1250,3 +1276,9 @@ static int prueth_resume(struct device *dev) const struct dev_pm_ops prueth_dev_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(prueth_suspend, prueth_resume) }; +EXPORT_SYMBOL_GPL(prueth_dev_pm_ops); + +MODULE_AUTHOR("Roger Quadros "); +MODULE_AUTHOR("Md Danish Anwar "); +MODULE_DESCRIPTION("PRUSS ICSSG Ethernet Driver Common Module"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.c b/drivers/net/ethernet/ti/icssg/icssg_config.c index 15f2235bf90f..dae52a83a378 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_config.c +++ b/drivers/net/ethernet/ti/icssg/icssg_config.c @@ -107,28 +107,49 @@ static const struct map hwq_map[2][ICSSG_NUM_OTHER_QUEUES] = { }, }; -static void icssg_config_mii_init(struct prueth_emac *emac) +static void icssg_config_mii_init_switch(struct prueth_emac *emac) { - u32 rxcfg, txcfg, rxcfg_reg, txcfg_reg, pcnt_reg; struct prueth *prueth = emac->prueth; - int slice = prueth_emac_slice(emac); + int mii = prueth_emac_slice(emac); + u32 txcfg_reg, pcnt_reg, txcfg; + struct regmap *mii_rt; + + mii_rt = prueth->mii_rt; + + txcfg_reg = (mii == ICSS_MII0) ? PRUSS_MII_RT_TXCFG0 : + PRUSS_MII_RT_TXCFG1; + pcnt_reg = (mii == ICSS_MII0) ? PRUSS_MII_RT_RX_PCNT0 : + PRUSS_MII_RT_RX_PCNT1; + + txcfg = PRUSS_MII_RT_TXCFG_TX_ENABLE | + PRUSS_MII_RT_TXCFG_TX_AUTO_PREAMBLE | + PRUSS_MII_RT_TXCFG_TX_IPG_WIRE_CLK_EN; + + if (emac->phy_if == PHY_INTERFACE_MODE_MII && mii == ICSS_MII1) + txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL; + else if (emac->phy_if != PHY_INTERFACE_MODE_MII && mii == ICSS_MII0) + txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL; + + regmap_write(mii_rt, txcfg_reg, txcfg); + regmap_write(mii_rt, pcnt_reg, 0x1); +} + +static void icssg_config_mii_init(struct prueth_emac *emac) +{ + struct prueth *prueth = emac->prueth; + int slice = prueth_emac_slice(emac); + u32 txcfg, txcfg_reg, pcnt_reg; struct regmap *mii_rt; mii_rt = prueth->mii_rt; - rxcfg_reg = (slice == ICSS_MII0) ? PRUSS_MII_RT_RXCFG0 : - PRUSS_MII_RT_RXCFG1; txcfg_reg = (slice == ICSS_MII0) ? PRUSS_MII_RT_TXCFG0 : PRUSS_MII_RT_TXCFG1; pcnt_reg = (slice == ICSS_MII0) ? PRUSS_MII_RT_RX_PCNT0 : PRUSS_MII_RT_RX_PCNT1; - rxcfg = MII_RXCFG_DEFAULT; txcfg = MII_TXCFG_DEFAULT; - if (slice == ICSS_MII1) - rxcfg |= PRUSS_MII_RT_RXCFG_RX_MUX_SEL; - /* In MII mode TX lines swapped inside ICSSG, so TX_MUX_SEL cfg need * to be swapped also comparing to RGMII mode. */ @@ -137,7 +158,6 @@ static void icssg_config_mii_init(struct prueth_emac *emac) else if (emac->phy_if != PHY_INTERFACE_MODE_MII && slice == ICSS_MII1) txcfg |= PRUSS_MII_RT_TXCFG_TX_MUX_SEL; - regmap_write(mii_rt, rxcfg_reg, rxcfg); regmap_write(mii_rt, txcfg_reg, txcfg); regmap_write(mii_rt, pcnt_reg, 0x1); } @@ -228,6 +248,7 @@ void icssg_config_ipg(struct prueth_emac *emac) icssg_mii_update_ipg(prueth->mii_rt, slice, ipg); } +EXPORT_SYMBOL_GPL(icssg_config_ipg); static void emac_r30_cmd_init(struct prueth_emac *emac) { @@ -257,6 +278,66 @@ static int emac_r30_is_done(struct prueth_emac *emac) return 1; } +static int prueth_switch_buffer_setup(struct prueth_emac *emac) +{ + struct icssg_buffer_pool_cfg __iomem *bpool_cfg; + struct icssg_rxq_ctx __iomem *rxq_ctx; + struct prueth *prueth = emac->prueth; + int slice = prueth_emac_slice(emac); + u32 addr; + int i; + + addr = lower_32_bits(prueth->msmcram.pa); + if (slice) + addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE; + + if (addr % SZ_64K) { + dev_warn(prueth->dev, "buffer pool needs to be 64KB aligned\n"); + return -EINVAL; + } + + bpool_cfg = emac->dram.va + BUFFER_POOL_0_ADDR_OFFSET; + /* workaround for f/w bug. bpool 0 needs to be initialized */ + for (i = 0; i < PRUETH_NUM_BUF_POOLS; i++) { + writel(addr, &bpool_cfg[i].addr); + writel(PRUETH_EMAC_BUF_POOL_SIZE, &bpool_cfg[i].len); + addr += PRUETH_EMAC_BUF_POOL_SIZE; + } + + if (!slice) + addr += PRUETH_NUM_BUF_POOLS * PRUETH_EMAC_BUF_POOL_SIZE; + else + addr += PRUETH_SW_NUM_BUF_POOLS_HOST * PRUETH_SW_BUF_POOL_SIZE_HOST; + + for (i = PRUETH_NUM_BUF_POOLS; + i < 2 * PRUETH_SW_NUM_BUF_POOLS_HOST + PRUETH_NUM_BUF_POOLS; + i++) { + /* The driver only uses first 4 queues per PRU so only initialize them */ + if (i % PRUETH_SW_NUM_BUF_POOLS_HOST < PRUETH_SW_NUM_BUF_POOLS_PER_PRU) { + writel(addr, &bpool_cfg[i].addr); + writel(PRUETH_SW_BUF_POOL_SIZE_HOST, &bpool_cfg[i].len); + addr += PRUETH_SW_BUF_POOL_SIZE_HOST; + } else { + writel(0, &bpool_cfg[i].addr); + writel(0, &bpool_cfg[i].len); + } + } + + if (!slice) + addr += PRUETH_SW_NUM_BUF_POOLS_HOST * PRUETH_SW_BUF_POOL_SIZE_HOST; + else + addr += PRUETH_EMAC_RX_CTX_BUF_SIZE; + + rxq_ctx = emac->dram.va + HOST_RX_Q_PRE_CONTEXT_OFFSET; + for (i = 0; i < 3; i++) + writel(addr, &rxq_ctx->start[i]); + + addr += PRUETH_EMAC_RX_CTX_BUF_SIZE; + writel(addr - SZ_2K, &rxq_ctx->end); + + return 0; +} + static int prueth_emac_buffer_setup(struct prueth_emac *emac) { struct icssg_buffer_pool_cfg __iomem *bpool_cfg; @@ -321,25 +402,63 @@ static void icssg_init_emac_mode(struct prueth *prueth) /* When the device is configured as a bridge and it is being brought * back to the emac mode, the host mac address has to be set as 0. */ + u32 addr = prueth->shram.pa + EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET; + int i; u8 mac[ETH_ALEN] = { 0 }; if (prueth->emacs_initialized) return; - regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, - SMEM_VLAN_OFFSET_MASK, 0); - regmap_write(prueth->miig_rt, FDB_GEN_CFG2, 0); + /* Set VLAN TABLE address base */ + regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK, + addr << SMEM_VLAN_OFFSET); + /* Set enable VLAN aware mode, and FDBs for all PRUs */ + regmap_write(prueth->miig_rt, FDB_GEN_CFG2, (FDB_PRU0_EN | FDB_PRU1_EN | FDB_HOST_EN)); + prueth->vlan_tbl = (struct prueth_vlan_tbl __force *)(prueth->shram.va + + EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET); + for (i = 0; i < SZ_4K - 1; i++) { + prueth->vlan_tbl[i].fid = i; + prueth->vlan_tbl[i].fid_c1 = 0; + } /* Clear host MAC address */ icssg_class_set_host_mac_addr(prueth->miig_rt, mac); } +static void icssg_init_switch_mode(struct prueth *prueth) +{ + u32 addr = prueth->shram.pa + EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET; + int i; + + if (prueth->emacs_initialized) + return; + + /* Set VLAN TABLE address base */ + regmap_update_bits(prueth->miig_rt, FDB_GEN_CFG1, SMEM_VLAN_OFFSET_MASK, + addr << SMEM_VLAN_OFFSET); + /* Set enable VLAN aware mode, and FDBs for all PRUs */ + regmap_write(prueth->miig_rt, FDB_GEN_CFG2, FDB_EN_ALL); + prueth->vlan_tbl = (struct prueth_vlan_tbl __force *)(prueth->shram.va + + EMAC_ICSSG_SWITCH_DEFAULT_VLAN_TABLE_OFFSET); + for (i = 0; i < SZ_4K - 1; i++) { + prueth->vlan_tbl[i].fid = i; + prueth->vlan_tbl[i].fid_c1 = 0; + } + + if (prueth->hw_bridge_dev) + icssg_class_set_host_mac_addr(prueth->miig_rt, prueth->hw_bridge_dev->dev_addr); + icssg_set_pvid(prueth, prueth->default_vlan, PRUETH_PORT_HOST); +} + int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice) { void __iomem *config = emac->dram.va + ICSSG_CONFIG_OFFSET; struct icssg_flow_cfg __iomem *flow_cfg; int ret; - icssg_init_emac_mode(prueth); + if (prueth->is_switch_mode) + icssg_init_switch_mode(prueth); + else + icssg_init_emac_mode(prueth); memset_io(config, 0, TAS_GATE_MASK_LIST0); icssg_miig_queues_init(prueth, slice); @@ -353,7 +472,10 @@ int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice) regmap_update_bits(prueth->miig_rt, ICSSG_CFG_OFFSET, ICSSG_CFG_DEFAULT, ICSSG_CFG_DEFAULT); icssg_miig_set_interface_mode(prueth->miig_rt, slice, emac->phy_if); - icssg_config_mii_init(emac); + if (prueth->is_switch_mode) + icssg_config_mii_init_switch(emac); + else + icssg_config_mii_init(emac); icssg_config_ipg(emac); icssg_update_rgmii_cfg(prueth->miig_rt, emac); @@ -376,7 +498,10 @@ int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice) writeb(0, config + SPL_PKT_DEFAULT_PRIORITY); writeb(0, config + QUEUE_NUM_UNTAGGED); - ret = prueth_emac_buffer_setup(emac); + if (prueth->is_switch_mode) + ret = prueth_switch_buffer_setup(emac); + else + ret = prueth_emac_buffer_setup(emac); if (ret) return ret; @@ -384,6 +509,7 @@ int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice) return 0; } +EXPORT_SYMBOL_GPL(icssg_config); /* Bitmask for ICSSG r30 commands */ static const struct icssg_r30_cmd emac_r32_bitmask[] = { @@ -408,8 +534,8 @@ static const struct icssg_r30_cmd emac_r32_bitmask[] = { {{0xffef0000, EMAC_NONE, 0xffef0000, EMAC_NONE}} /* VLAN UNWARE*/ }; -int emac_set_port_state(struct prueth_emac *emac, - enum icssg_port_state_cmd cmd) +int icssg_set_port_state(struct prueth_emac *emac, + enum icssg_port_state_cmd cmd) { struct icssg_r30_cmd __iomem *p; int ret = -ETIMEDOUT; @@ -440,6 +566,7 @@ int emac_set_port_state(struct prueth_emac *emac, return ret; } +EXPORT_SYMBOL_GPL(icssg_set_port_state); void icssg_config_half_duplex(struct prueth_emac *emac) { @@ -451,6 +578,7 @@ void icssg_config_half_duplex(struct prueth_emac *emac) val = get_random_u32(); writel(val, emac->dram.va + HD_RAND_SEED_OFFSET); } +EXPORT_SYMBOL_GPL(icssg_config_half_duplex); void icssg_config_set_speed(struct prueth_emac *emac) { @@ -477,3 +605,180 @@ void icssg_config_set_speed(struct prueth_emac *emac) writeb(fw_speed, emac->dram.va + PORT_LINK_SPEED_OFFSET); } +EXPORT_SYMBOL_GPL(icssg_config_set_speed); + +int icssg_send_fdb_msg(struct prueth_emac *emac, struct mgmt_cmd *cmd, + struct mgmt_cmd_rsp *rsp) +{ + struct prueth *prueth = emac->prueth; + int slice = prueth_emac_slice(emac); + int addr, ret; + + addr = icssg_queue_pop(prueth, slice == 0 ? + ICSSG_CMD_POP_SLICE0 : ICSSG_CMD_POP_SLICE1); + if (addr < 0) + return addr; + + /* First 4 bytes have FW owned buffer linking info which should + * not be touched + */ + memcpy_toio(prueth->shram.va + addr + 4, cmd, sizeof(*cmd)); + icssg_queue_push(prueth, slice == 0 ? + ICSSG_CMD_PUSH_SLICE0 : ICSSG_CMD_PUSH_SLICE1, addr); + ret = read_poll_timeout(icssg_queue_pop, addr, addr >= 0, + 2000, 20000000, false, prueth, slice == 0 ? + ICSSG_RSP_POP_SLICE0 : ICSSG_RSP_POP_SLICE1); + if (ret) { + netdev_err(emac->ndev, "Timedout sending HWQ message\n"); + return ret; + } + + memcpy_fromio(rsp, prueth->shram.va + addr, sizeof(*rsp)); + /* Return buffer back for to pool */ + icssg_queue_push(prueth, slice == 0 ? + ICSSG_RSP_PUSH_SLICE0 : ICSSG_RSP_PUSH_SLICE1, addr); + + return 0; +} +EXPORT_SYMBOL_GPL(icssg_send_fdb_msg); + +static void icssg_fdb_setup(struct prueth_emac *emac, struct mgmt_cmd *fdb_cmd, + const unsigned char *addr, u8 fid, int cmd) +{ + int slice = prueth_emac_slice(emac); + u8 mac_fid[ETH_ALEN + 2]; + u16 fdb_slot; + + ether_addr_copy(mac_fid, addr); + + /* 1-1 VID-FID mapping is already setup */ + mac_fid[ETH_ALEN] = fid; + mac_fid[ETH_ALEN + 1] = 0; + + fdb_slot = bitrev32(crc32_le(0, mac_fid, 8)) & PRUETH_SWITCH_FDB_MASK; + + fdb_cmd->header = ICSSG_FW_MGMT_CMD_HEADER; + fdb_cmd->type = ICSSG_FW_MGMT_FDB_CMD_TYPE; + fdb_cmd->seqnum = ++(emac->prueth->icssg_hwcmdseq); + fdb_cmd->param = cmd; + fdb_cmd->param |= (slice << 4); + + memcpy(&fdb_cmd->cmd_args[0], addr, 4); + memcpy(&fdb_cmd->cmd_args[1], &addr[4], 2); + fdb_cmd->cmd_args[2] = fdb_slot; + + netdev_dbg(emac->ndev, "MAC %pM slot %X FID %X\n", addr, fdb_slot, fid); +} + +int icssg_fdb_add_del(struct prueth_emac *emac, const unsigned char *addr, + u8 vid, u8 fid_c2, bool add) +{ + struct mgmt_cmd_rsp fdb_cmd_rsp = { 0 }; + struct mgmt_cmd fdb_cmd = { 0 }; + u8 fid = vid; + int ret; + + icssg_fdb_setup(emac, &fdb_cmd, addr, fid, add ? ICSS_CMD_ADD_FDB : ICSS_CMD_DEL_FDB); + + fid_c2 |= ICSSG_FDB_ENTRY_VALID; + fdb_cmd.cmd_args[1] |= ((fid << 16) | (fid_c2 << 24)); + + ret = icssg_send_fdb_msg(emac, &fdb_cmd, &fdb_cmd_rsp); + if (ret) + return ret; + + WARN_ON(fdb_cmd.seqnum != fdb_cmd_rsp.seqnum); + if (fdb_cmd_rsp.status == 1) + return 0; + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(icssg_fdb_add_del); + +int icssg_fdb_lookup(struct prueth_emac *emac, const unsigned char *addr, + u8 vid) +{ + struct mgmt_cmd_rsp fdb_cmd_rsp = { 0 }; + struct mgmt_cmd fdb_cmd = { 0 }; + struct prueth_fdb_slot *slot; + u8 fid = vid; + int ret, i; + + icssg_fdb_setup(emac, &fdb_cmd, addr, fid, ICSS_CMD_GET_FDB_SLOT); + + fdb_cmd.cmd_args[1] |= fid << 16; + + ret = icssg_send_fdb_msg(emac, &fdb_cmd, &fdb_cmd_rsp); + if (ret) + return ret; + + WARN_ON(fdb_cmd.seqnum != fdb_cmd_rsp.seqnum); + + slot = (struct prueth_fdb_slot __force *)(emac->dram.va + FDB_CMD_BUFFER); + for (i = 0; i < 4; i++) { + if (ether_addr_equal(addr, slot->mac) && vid == slot->fid) + return (slot->fid_c2 & ~ICSSG_FDB_ENTRY_VALID); + slot++; + } + + return 0; +} +EXPORT_SYMBOL_GPL(icssg_fdb_lookup); + +void icssg_vtbl_modify(struct prueth_emac *emac, u8 vid, u8 port_mask, + u8 untag_mask, bool add) +{ + struct prueth *prueth = emac->prueth; + struct prueth_vlan_tbl *tbl; + u8 fid_c1; + + tbl = prueth->vlan_tbl; + fid_c1 = tbl[vid].fid_c1; + + /* FID_C1: bit0..2 port membership mask, + * bit3..5 tagging mask for each port + * bit6 Stream VID (not handled currently) + * bit7 MC flood (not handled currently) + */ + if (add) { + fid_c1 |= (port_mask | port_mask << 3); + fid_c1 &= ~(untag_mask << 3); + } else { + fid_c1 &= ~(port_mask | port_mask << 3); + } + + tbl[vid].fid_c1 = fid_c1; +} +EXPORT_SYMBOL_GPL(icssg_vtbl_modify); + +u16 icssg_get_pvid(struct prueth_emac *emac) +{ + struct prueth *prueth = emac->prueth; + u32 pvid; + + if (emac->port_id == PRUETH_PORT_MII0) + pvid = readl(prueth->shram.va + EMAC_ICSSG_SWITCH_PORT1_DEFAULT_VLAN_OFFSET); + else + pvid = readl(prueth->shram.va + EMAC_ICSSG_SWITCH_PORT2_DEFAULT_VLAN_OFFSET); + + pvid = pvid >> 24; + + return pvid; +} +EXPORT_SYMBOL_GPL(icssg_get_pvid); + +void icssg_set_pvid(struct prueth *prueth, u8 vid, u8 port) +{ + u32 pvid; + + /* only 256 VLANs are supported */ + pvid = (u32 __force)cpu_to_be32((ETH_P_8021Q << 16) | (vid & 0xff)); + + if (port == PRUETH_PORT_MII0) + writel(pvid, prueth->shram.va + EMAC_ICSSG_SWITCH_PORT1_DEFAULT_VLAN_OFFSET); + else if (port == PRUETH_PORT_MII1) + writel(pvid, prueth->shram.va + EMAC_ICSSG_SWITCH_PORT2_DEFAULT_VLAN_OFFSET); + else + writel(pvid, prueth->shram.va + EMAC_ICSSG_SWITCH_PORT0_DEFAULT_VLAN_OFFSET); +} +EXPORT_SYMBOL_GPL(icssg_set_pvid); diff --git a/drivers/net/ethernet/ti/icssg/icssg_config.h b/drivers/net/ethernet/ti/icssg/icssg_config.h index cf2ea4bd22a2..1ac60283923b 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_config.h +++ b/drivers/net/ethernet/ti/icssg/icssg_config.h @@ -35,6 +35,15 @@ struct icssg_flow_cfg { (2 * (PRUETH_EMAC_BUF_POOL_SIZE * PRUETH_NUM_BUF_POOLS + \ PRUETH_EMAC_RX_CTX_BUF_SIZE * 2)) +#define PRUETH_SW_BUF_POOL_SIZE_HOST SZ_4K +#define PRUETH_SW_NUM_BUF_POOLS_HOST 8 +#define PRUETH_SW_NUM_BUF_POOLS_PER_PRU 4 +#define MSMC_RAM_SIZE_SWITCH_MODE \ + (MSMC_RAM_SIZE + \ + (2 * PRUETH_SW_BUF_POOL_SIZE_HOST * PRUETH_SW_NUM_BUF_POOLS_HOST)) + +#define PRUETH_SWITCH_FDB_MASK ((SIZE_OF_FDB / NUMBER_OF_FDB_BUCKET_ENTRIES) - 1) + struct icssg_rxq_ctx { __le32 start[3]; __le32 end; @@ -202,6 +211,23 @@ struct icssg_setclock_desc { #define ICSSG_TS_PUSH_SLICE0 40 #define ICSSG_TS_PUSH_SLICE1 41 +struct mgmt_cmd { + u8 param; + u8 seqnum; + u8 type; + u8 header; + u32 cmd_args[3]; +}; + +struct mgmt_cmd_rsp { + u32 reserved; + u8 status; + u8 seqnum; + u8 type; + u8 header; + u32 cmd_args[3]; +}; + /* FDB FID_C2 flag definitions */ /* Indicates host port membership.*/ #define ICSSG_FDB_ENTRY_P0_MEMBERSHIP BIT(0) diff --git a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c index c8d0f45cc5b1..5688f054cec5 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_ethtool.c +++ b/drivers/net/ethernet/ti/icssg/icssg_ethtool.c @@ -110,7 +110,7 @@ static void emac_get_ethtool_stats(struct net_device *ndev, } static int emac_get_ts_info(struct net_device *ndev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct prueth_emac *emac = netdev_priv(ndev); @@ -312,3 +312,4 @@ const struct ethtool_ops icssg_ethtool_ops = { .nway_reset = emac_nway_reset, .get_rmon_stats = emac_get_rmon_stats, }; +EXPORT_SYMBOL_GPL(icssg_ethtool_ops); diff --git a/drivers/net/ethernet/ti/icssg/icssg_mii_cfg.c b/drivers/net/ethernet/ti/icssg/icssg_mii_cfg.c index 92718ae40d7e..b64955438bb2 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_mii_cfg.c +++ b/drivers/net/ethernet/ti/icssg/icssg_mii_cfg.c @@ -40,6 +40,7 @@ void icssg_mii_update_mtu(struct regmap *mii_rt, int mii, int mtu) (mtu - 1) << PRUSS_MII_RT_RX_FRMS_MAX_FRM_SHIFT); } } +EXPORT_SYMBOL_GPL(icssg_mii_update_mtu); void icssg_update_rgmii_cfg(struct regmap *miig_rt, struct prueth_emac *emac) { @@ -66,6 +67,7 @@ void icssg_update_rgmii_cfg(struct regmap *miig_rt, struct prueth_emac *emac) regmap_update_bits(miig_rt, RGMII_CFG_OFFSET, full_duplex_mask, full_duplex_val); } +EXPORT_SYMBOL_GPL(icssg_update_rgmii_cfg); void icssg_miig_set_interface_mode(struct regmap *miig_rt, int mii, phy_interface_t phy_if) { @@ -105,6 +107,7 @@ u32 icssg_rgmii_get_speed(struct regmap *miig_rt, int mii) return icssg_rgmii_cfg_get_bitfield(miig_rt, mask, shift); } +EXPORT_SYMBOL_GPL(icssg_rgmii_get_speed); u32 icssg_rgmii_get_fullduplex(struct regmap *miig_rt, int mii) { @@ -118,3 +121,4 @@ u32 icssg_rgmii_get_fullduplex(struct regmap *miig_rt, int mii) return icssg_rgmii_cfg_get_bitfield(miig_rt, mask, shift); } +EXPORT_SYMBOL_GPL(icssg_rgmii_get_fullduplex); diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.c b/drivers/net/ethernet/ti/icssg/icssg_prueth.c index 1ea3fbd5e954..3e51b3a9b0a5 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.c @@ -27,13 +27,19 @@ #include #include #include +#include #include "icssg_prueth.h" #include "icssg_mii_rt.h" +#include "icssg_switchdev.h" #include "../k3-cppi-desc-pool.h" #define PRUETH_MODULE_DESCRIPTION "PRUSS ICSSG Ethernet driver" +#define DEFAULT_VID 1 +#define DEFAULT_PORT_MASK 1 +#define DEFAULT_UNTAG_MASK 1 + /* CTRLMMR_ICSSG_RGMII_CTRL register bits */ #define ICSSG_CTRL_RGMII_ID_MODE BIT(24) @@ -112,6 +118,19 @@ static irqreturn_t prueth_tx_ts_irq(int irq, void *dev_id) return IRQ_HANDLED; } +static struct icssg_firmwares icssg_switch_firmwares[] = { + { + .pru = "ti-pruss/am65x-sr2-pru0-prusw-fw.elf", + .rtu = "ti-pruss/am65x-sr2-rtu0-prusw-fw.elf", + .txpru = "ti-pruss/am65x-sr2-txpru0-prusw-fw.elf", + }, + { + .pru = "ti-pruss/am65x-sr2-pru1-prusw-fw.elf", + .rtu = "ti-pruss/am65x-sr2-rtu1-prusw-fw.elf", + .txpru = "ti-pruss/am65x-sr2-txpru1-prusw-fw.elf", + } +}; + static struct icssg_firmwares icssg_emac_firmwares[] = { { .pru = "ti-pruss/am65x-sr2-pru0-prueth-fw.elf", @@ -131,7 +150,10 @@ static int prueth_emac_start(struct prueth *prueth, struct prueth_emac *emac) struct device *dev = prueth->dev; int slice, ret; - firmwares = icssg_emac_firmwares; + if (prueth->is_switch_mode) + firmwares = icssg_switch_firmwares; + else + firmwares = icssg_emac_firmwares; slice = prueth_emac_slice(emac); if (slice < 0) { @@ -227,10 +249,10 @@ static void emac_adjust_link(struct net_device *ndev) icssg_config_ipg(emac); spin_unlock_irqrestore(&emac->lock, flags); icssg_config_set_speed(emac); - emac_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD); + icssg_set_port_state(emac, ICSSG_EMAC_PORT_FORWARD); } else { - emac_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE); + icssg_set_port_state(emac, ICSSG_EMAC_PORT_DISABLE); } } @@ -417,6 +439,37 @@ const struct icss_iep_clockops prueth_iep_clockops = { .perout_enable = prueth_perout_enable, }; +static int icssg_prueth_add_mcast(struct net_device *ndev, const u8 *addr) +{ + struct prueth_emac *emac = netdev_priv(ndev); + int port_mask = BIT(emac->port_id); + + port_mask |= icssg_fdb_lookup(emac, addr, 0); + icssg_fdb_add_del(emac, addr, 0, port_mask, true); + icssg_vtbl_modify(emac, 0, port_mask, port_mask, true); + + return 0; +} + +static int icssg_prueth_del_mcast(struct net_device *ndev, const u8 *addr) +{ + struct prueth_emac *emac = netdev_priv(ndev); + int port_mask = BIT(emac->port_id); + int other_port_mask; + + other_port_mask = port_mask ^ icssg_fdb_lookup(emac, addr, 0); + + icssg_fdb_add_del(emac, addr, 0, port_mask, false); + icssg_vtbl_modify(emac, 0, port_mask, port_mask, false); + + if (other_port_mask) { + icssg_fdb_add_del(emac, addr, 0, other_port_mask, true); + icssg_vtbl_modify(emac, 0, other_port_mask, other_port_mask, true); + } + + return 0; +} + /** * emac_ndo_open - EMAC device open * @ndev: network adapter device @@ -445,9 +498,8 @@ static int emac_ndo_open(struct net_device *ndev) ether_addr_copy(emac->mac_addr, ndev->dev_addr); icssg_class_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr); - icssg_ft1_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr); - icssg_class_default(prueth->miig_rt, slice, 0, false); + icssg_ft1_set_mac_addr(prueth->miig_rt, slice, emac->mac_addr); /* Notify the stack of the actual queue counts. */ ret = netif_set_real_num_tx_queues(ndev, num_data_chn); @@ -578,6 +630,8 @@ static int emac_ndo_stop(struct net_device *ndev) icssg_class_disable(prueth->miig_rt, prueth_emac_slice(emac)); + __dev_mc_unsync(ndev, icssg_prueth_del_mcast); + atomic_set(&emac->tdown_cnt, emac->tx_ch_num); /* ensure new tdown_cnt value is visible */ smp_mb__after_atomic(); @@ -640,24 +694,21 @@ static void emac_ndo_set_rx_mode_work(struct work_struct *work) promisc = ndev->flags & IFF_PROMISC; allmulti = ndev->flags & IFF_ALLMULTI; - emac_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_DISABLE); - emac_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_DISABLE); + icssg_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_DISABLE); + icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_DISABLE); if (promisc) { - emac_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_ENABLE); - emac_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE); + icssg_set_port_state(emac, ICSSG_EMAC_PORT_UC_FLOODING_ENABLE); + icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE); return; } if (allmulti) { - emac_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE); + icssg_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE); return; } - if (!netdev_mc_empty(ndev)) { - emac_set_port_state(emac, ICSSG_EMAC_PORT_MC_FLOODING_ENABLE); - return; - } + __dev_mc_sync(ndev, icssg_prueth_add_mcast, icssg_prueth_del_mcast); } /** @@ -677,14 +728,14 @@ static void emac_ndo_set_rx_mode(struct net_device *ndev) static const struct net_device_ops emac_netdev_ops = { .ndo_open = emac_ndo_open, .ndo_stop = emac_ndo_stop, - .ndo_start_xmit = emac_ndo_start_xmit, + .ndo_start_xmit = icssg_ndo_start_xmit, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, - .ndo_tx_timeout = emac_ndo_tx_timeout, + .ndo_tx_timeout = icssg_ndo_tx_timeout, .ndo_set_rx_mode = emac_ndo_set_rx_mode, - .ndo_eth_ioctl = emac_ndo_ioctl, - .ndo_get_stats64 = emac_ndo_get_stats64, - .ndo_get_phys_port_name = emac_ndo_get_phys_port_name, + .ndo_eth_ioctl = icssg_ndo_ioctl, + .ndo_get_stats64 = icssg_ndo_get_stats64, + .ndo_get_phys_port_name = icssg_ndo_get_phys_port_name, }; static int prueth_netdev_init(struct prueth *prueth, @@ -720,7 +771,7 @@ static int prueth_netdev_init(struct prueth *prueth, } INIT_WORK(&emac->rx_mode_work, emac_ndo_set_rx_mode_work); - INIT_DELAYED_WORK(&emac->stats_work, emac_stats_work_handler); + INIT_DELAYED_WORK(&emac->stats_work, icssg_stats_work_handler); ret = pruss_request_mem_region(prueth->pruss, port == PRUETH_PORT_MII0 ? @@ -813,7 +864,7 @@ static int prueth_netdev_init(struct prueth *prueth, ndev->hw_features = NETIF_F_SG; ndev->features = ndev->hw_features; - netif_napi_add(ndev, &emac->napi_rx, emac_napi_rx_poll); + netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll); hrtimer_init(&emac->rx_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); emac->rx_hrtimer.function = &emac_rx_timer_callback; @@ -833,6 +884,214 @@ free_ndev: return ret; } +bool prueth_dev_check(const struct net_device *ndev) +{ + if (ndev->netdev_ops == &emac_netdev_ops && netif_running(ndev)) { + struct prueth_emac *emac = netdev_priv(ndev); + + return emac->prueth->is_switch_mode; + } + + return false; +} + +static void prueth_offload_fwd_mark_update(struct prueth *prueth) +{ + int set_val = 0; + int i; + + if (prueth->br_members == (BIT(PRUETH_PORT_MII0) | BIT(PRUETH_PORT_MII1))) + set_val = 1; + + dev_dbg(prueth->dev, "set offload_fwd_mark %d\n", set_val); + + for (i = PRUETH_MAC0; i < PRUETH_NUM_MACS; i++) { + struct prueth_emac *emac = prueth->emac[i]; + + if (!emac || !emac->ndev) + continue; + + emac->offload_fwd_mark = set_val; + } +} + +static void prueth_emac_restart(struct prueth *prueth) +{ + struct prueth_emac *emac0 = prueth->emac[PRUETH_MAC0]; + struct prueth_emac *emac1 = prueth->emac[PRUETH_MAC1]; + + /* Detach the net_device for both PRUeth ports*/ + if (netif_running(emac0->ndev)) + netif_device_detach(emac0->ndev); + if (netif_running(emac1->ndev)) + netif_device_detach(emac1->ndev); + + /* Disable both PRUeth ports */ + icssg_set_port_state(emac0, ICSSG_EMAC_PORT_DISABLE); + icssg_set_port_state(emac1, ICSSG_EMAC_PORT_DISABLE); + + /* Stop both pru cores for both PRUeth ports*/ + prueth_emac_stop(emac0); + prueth->emacs_initialized--; + prueth_emac_stop(emac1); + prueth->emacs_initialized--; + + /* Start both pru cores for both PRUeth ports */ + prueth_emac_start(prueth, emac0); + prueth->emacs_initialized++; + prueth_emac_start(prueth, emac1); + prueth->emacs_initialized++; + + /* Enable forwarding for both PRUeth ports */ + icssg_set_port_state(emac0, ICSSG_EMAC_PORT_FORWARD); + icssg_set_port_state(emac1, ICSSG_EMAC_PORT_FORWARD); + + /* Attache net_device for both PRUeth ports */ + netif_device_attach(emac0->ndev); + netif_device_attach(emac1->ndev); +} + +static void icssg_enable_switch_mode(struct prueth *prueth) +{ + struct prueth_emac *emac; + int mac; + + prueth_emac_restart(prueth); + + for (mac = PRUETH_MAC0; mac < PRUETH_NUM_MACS; mac++) { + emac = prueth->emac[mac]; + if (netif_running(emac->ndev)) { + icssg_fdb_add_del(emac, eth_stp_addr, prueth->default_vlan, + ICSSG_FDB_ENTRY_P0_MEMBERSHIP | + ICSSG_FDB_ENTRY_P1_MEMBERSHIP | + ICSSG_FDB_ENTRY_P2_MEMBERSHIP | + ICSSG_FDB_ENTRY_BLOCK, + true); + icssg_vtbl_modify(emac, emac->port_vlan | DEFAULT_VID, + BIT(emac->port_id) | DEFAULT_PORT_MASK, + BIT(emac->port_id) | DEFAULT_UNTAG_MASK, + true); + icssg_set_pvid(prueth, emac->port_vlan, emac->port_id); + icssg_set_port_state(emac, ICSSG_EMAC_PORT_VLAN_AWARE_ENABLE); + } + } +} + +static int prueth_netdevice_port_link(struct net_device *ndev, + struct net_device *br_ndev, + struct netlink_ext_ack *extack) +{ + struct prueth_emac *emac = netdev_priv(ndev); + struct prueth *prueth = emac->prueth; + int err; + + if (!prueth->br_members) { + prueth->hw_bridge_dev = br_ndev; + } else { + /* This is adding the port to a second bridge, this is + * unsupported + */ + if (prueth->hw_bridge_dev != br_ndev) + return -EOPNOTSUPP; + } + + err = switchdev_bridge_port_offload(br_ndev, ndev, emac, + &prueth->prueth_switchdev_nb, + &prueth->prueth_switchdev_bl_nb, + false, extack); + if (err) + return err; + + prueth->br_members |= BIT(emac->port_id); + + if (!prueth->is_switch_mode) { + if (prueth->br_members & BIT(PRUETH_PORT_MII0) && + prueth->br_members & BIT(PRUETH_PORT_MII1)) { + prueth->is_switch_mode = true; + prueth->default_vlan = 1; + emac->port_vlan = prueth->default_vlan; + icssg_enable_switch_mode(prueth); + } + } + + prueth_offload_fwd_mark_update(prueth); + + return NOTIFY_DONE; +} + +static void prueth_netdevice_port_unlink(struct net_device *ndev) +{ + struct prueth_emac *emac = netdev_priv(ndev); + struct prueth *prueth = emac->prueth; + + prueth->br_members &= ~BIT(emac->port_id); + + if (prueth->is_switch_mode) { + prueth->is_switch_mode = false; + emac->port_vlan = 0; + prueth_emac_restart(prueth); + } + + prueth_offload_fwd_mark_update(prueth); + + if (!prueth->br_members) + prueth->hw_bridge_dev = NULL; +} + +/* netdev notifier */ +static int prueth_netdevice_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr); + struct net_device *ndev = netdev_notifier_info_to_dev(ptr); + struct netdev_notifier_changeupper_info *info; + int ret = NOTIFY_DONE; + + if (ndev->netdev_ops != &emac_netdev_ops) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_CHANGEUPPER: + info = ptr; + + if (netif_is_bridge_master(info->upper_dev)) { + if (info->linking) + ret = prueth_netdevice_port_link(ndev, info->upper_dev, extack); + else + prueth_netdevice_port_unlink(ndev); + } + break; + default: + return NOTIFY_DONE; + } + + return notifier_from_errno(ret); +} + +static int prueth_register_notifiers(struct prueth *prueth) +{ + int ret = 0; + + prueth->prueth_netdevice_nb.notifier_call = &prueth_netdevice_event; + ret = register_netdevice_notifier(&prueth->prueth_netdevice_nb); + if (ret) { + dev_err(prueth->dev, "can't register netdevice notifier\n"); + return ret; + } + + ret = prueth_switchdev_register_notifiers(prueth); + if (ret) + unregister_netdevice_notifier(&prueth->prueth_netdevice_nb); + + return ret; +} + +static void prueth_unregister_notifiers(struct prueth *prueth) +{ + prueth_switchdev_unregister_notifiers(prueth); + unregister_netdevice_notifier(&prueth->prueth_netdevice_nb); +} + static int prueth_probe(struct platform_device *pdev) { struct device_node *eth_node, *eth_ports_node; @@ -960,6 +1219,9 @@ static int prueth_probe(struct platform_device *pdev) } msmc_ram_size = MSMC_RAM_SIZE; + prueth->is_switchmode_supported = prueth->pdata.switch_mode; + if (prueth->is_switchmode_supported) + msmc_ram_size = MSMC_RAM_SIZE_SWITCH_MODE; /* NOTE: FW bug needs buffer base to be 64KB aligned */ prueth->msmcram.va = @@ -1065,6 +1327,14 @@ static int prueth_probe(struct platform_device *pdev) phy_attached_info(prueth->emac[PRUETH_MAC1]->ndev->phydev); } + if (prueth->is_switchmode_supported) { + ret = prueth_register_notifiers(prueth); + if (ret) + goto netdev_unregister; + + sprintf(prueth->switch_id, "%s", dev_name(dev)); + } + dev_info(dev, "TI PRU ethernet driver initialized: %s EMAC mode\n", (!eth0_node || !eth1_node) ? "single" : "dual"); @@ -1134,6 +1404,8 @@ static void prueth_remove(struct platform_device *pdev) struct device_node *eth_node; int i; + prueth_unregister_notifiers(prueth); + for (i = 0; i < PRUETH_NUM_MACS; i++) { if (!prueth->registered_netdevs[i]) continue; @@ -1175,10 +1447,12 @@ static void prueth_remove(struct platform_device *pdev) static const struct prueth_pdata am654_icssg_pdata = { .fdqring_mode = K3_RINGACC_RING_MODE_MESSAGE, .quirk_10m_link_issue = 1, + .switch_mode = 1, }; static const struct prueth_pdata am64x_icssg_pdata = { .fdqring_mode = K3_RINGACC_RING_MODE_RING, + .switch_mode = 1, }; static const struct of_device_id prueth_dt_match[] = { diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth.h b/drivers/net/ethernet/ti/icssg/icssg_prueth.h index a78c5eb75fb8..f678d656a3ed 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth.h +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth.h @@ -186,6 +186,9 @@ struct prueth_emac { struct pruss_mem_region dram; + bool offload_fwd_mark; + int port_vlan; + struct delayed_work stats_work; u64 stats[ICSSG_NUM_STATS]; @@ -198,10 +201,12 @@ struct prueth_emac { * struct prueth_pdata - PRUeth platform data * @fdqring_mode: Free desc queue mode * @quirk_10m_link_issue: 10M link detect errata + * @switch_mode: switch firmware support */ struct prueth_pdata { enum k3_ring_mode fdqring_mode; u32 quirk_10m_link_issue:1; + u32 switch_mode:1; }; struct icssg_firmwares { @@ -232,6 +237,16 @@ struct icssg_firmwares { * @emacs_initialized: num of EMACs/ext ports that are up/running * @iep0: pointer to IEP0 device * @iep1: pointer to IEP1 device + * @vlan_tbl: VLAN-FID table pointer + * @hw_bridge_dev: pointer to HW bridge net device + * @br_members: bitmask of bridge member ports + * @prueth_netdevice_nb: netdevice notifier block + * @prueth_switchdev_nb: switchdev notifier block + * @prueth_switchdev_bl_nb: switchdev blocking notifier block + * @is_switch_mode: flag to indicate if device is in Switch mode + * @is_switchmode_supported: indicates platform support for switch mode + * @switch_id: ID for mapping switch ports to bridge + * @default_vlan: Default VLAN for host */ struct prueth { struct device *dev; @@ -256,6 +271,17 @@ struct prueth { int emacs_initialized; struct icss_iep *iep0; struct icss_iep *iep1; + struct prueth_vlan_tbl *vlan_tbl; + + struct net_device *hw_bridge_dev; + u8 br_members; + struct notifier_block prueth_netdevice_nb; + struct notifier_block prueth_switchdev_nb; + struct notifier_block prueth_switchdev_bl_nb; + bool is_switch_mode; + bool is_switchmode_supported; + unsigned char switch_id[MAX_PHYS_ITEM_ID_LEN]; + int default_vlan; }; struct emac_tx_ts_response { @@ -303,8 +329,8 @@ void icssg_ft1_set_mac_addr(struct regmap *miig_rt, int slice, u8 *mac_addr); void icssg_config_ipg(struct prueth_emac *emac); int icssg_config(struct prueth *prueth, struct prueth_emac *emac, int slice); -int emac_set_port_state(struct prueth_emac *emac, - enum icssg_port_state_cmd state); +int icssg_set_port_state(struct prueth_emac *emac, + enum icssg_port_state_cmd state); void icssg_config_set_speed(struct prueth_emac *emac); void icssg_config_half_duplex(struct prueth_emac *emac); @@ -313,10 +339,20 @@ int icssg_queue_pop(struct prueth *prueth, u8 queue); void icssg_queue_push(struct prueth *prueth, int queue, u16 addr); u32 icssg_queue_level(struct prueth *prueth, int queue); +int icssg_send_fdb_msg(struct prueth_emac *emac, struct mgmt_cmd *cmd, + struct mgmt_cmd_rsp *rsp); +int icssg_fdb_add_del(struct prueth_emac *emac, const unsigned char *addr, + u8 vid, u8 fid_c2, bool add); +int icssg_fdb_lookup(struct prueth_emac *emac, const unsigned char *addr, + u8 vid); +void icssg_vtbl_modify(struct prueth_emac *emac, u8 vid, u8 port_mask, + u8 untag_mask, bool add); +u16 icssg_get_pvid(struct prueth_emac *emac); +void icssg_set_pvid(struct prueth *prueth, u8 vid, u8 port); #define prueth_napi_to_tx_chn(pnapi) \ container_of(pnapi, struct prueth_tx_chn, napi_tx) -void emac_stats_work_handler(struct work_struct *work); +void icssg_stats_work_handler(struct work_struct *work); void emac_update_hardware_stats(struct prueth_emac *emac); int emac_get_stat_by_name(struct prueth_emac *emac, char *stat_name); @@ -341,11 +377,11 @@ int prueth_dma_rx_push(struct prueth_emac *emac, struct prueth_rx_chn *rx_chn); void emac_rx_timestamp(struct prueth_emac *emac, struct sk_buff *skb, u32 *psdata); -enum netdev_tx emac_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev); +enum netdev_tx icssg_ndo_start_xmit(struct sk_buff *skb, struct net_device *ndev); irqreturn_t prueth_rx_irq(int irq, void *dev_id); void prueth_emac_stop(struct prueth_emac *emac); void prueth_cleanup_tx_ts(struct prueth_emac *emac); -int emac_napi_rx_poll(struct napi_struct *napi_rx, int budget); +int icssg_napi_rx_poll(struct napi_struct *napi_rx, int budget); int prueth_prepare_rx_chan(struct prueth_emac *emac, struct prueth_rx_chn *chn, int buf_size); @@ -353,12 +389,12 @@ void prueth_reset_tx_chan(struct prueth_emac *emac, int ch_num, bool free_skb); void prueth_reset_rx_chan(struct prueth_rx_chn *chn, int num_flows, bool disable); -void emac_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue); -int emac_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd); -void emac_ndo_get_stats64(struct net_device *ndev, - struct rtnl_link_stats64 *stats); -int emac_ndo_get_phys_port_name(struct net_device *ndev, char *name, - size_t len); +void icssg_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue); +int icssg_ndo_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd); +void icssg_ndo_get_stats64(struct net_device *ndev, + struct rtnl_link_stats64 *stats); +int icssg_ndo_get_phys_port_name(struct net_device *ndev, char *name, + size_t len); int prueth_node_port(struct device_node *eth_node); int prueth_node_mac(struct device_node *eth_node); void prueth_netdev_exit(struct prueth *prueth, diff --git a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c index 7b3304bbd7fc..e180c1166170 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c +++ b/drivers/net/ethernet/ti/icssg/icssg_prueth_sr1.c @@ -722,14 +722,14 @@ static void emac_ndo_set_rx_mode_sr1(struct net_device *ndev) static const struct net_device_ops emac_netdev_ops = { .ndo_open = emac_ndo_open, .ndo_stop = emac_ndo_stop, - .ndo_start_xmit = emac_ndo_start_xmit, + .ndo_start_xmit = icssg_ndo_start_xmit, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, - .ndo_tx_timeout = emac_ndo_tx_timeout, + .ndo_tx_timeout = icssg_ndo_tx_timeout, .ndo_set_rx_mode = emac_ndo_set_rx_mode_sr1, - .ndo_eth_ioctl = emac_ndo_ioctl, - .ndo_get_stats64 = emac_ndo_get_stats64, - .ndo_get_phys_port_name = emac_ndo_get_phys_port_name, + .ndo_eth_ioctl = icssg_ndo_ioctl, + .ndo_get_stats64 = icssg_ndo_get_stats64, + .ndo_get_phys_port_name = icssg_ndo_get_phys_port_name, }; static int prueth_netdev_init(struct prueth *prueth, @@ -767,7 +767,7 @@ static int prueth_netdev_init(struct prueth *prueth, goto free_ndev; } - INIT_DELAYED_WORK(&emac->stats_work, emac_stats_work_handler); + INIT_DELAYED_WORK(&emac->stats_work, icssg_stats_work_handler); ret = pruss_request_mem_region(prueth->pruss, port == PRUETH_PORT_MII0 ? @@ -854,7 +854,7 @@ static int prueth_netdev_init(struct prueth *prueth, ndev->hw_features = NETIF_F_SG; ndev->features = ndev->hw_features; - netif_napi_add(ndev, &emac->napi_rx, emac_napi_rx_poll); + netif_napi_add(ndev, &emac->napi_rx, icssg_napi_rx_poll); prueth->emac[mac] = emac; return 0; @@ -1011,16 +1011,44 @@ static int prueth_probe(struct platform_device *pdev) dev_dbg(dev, "sram: pa %llx va %p size %zx\n", prueth->msmcram.pa, prueth->msmcram.va, prueth->msmcram.size); + prueth->iep0 = icss_iep_get_idx(np, 0); + if (IS_ERR(prueth->iep0)) { + ret = dev_err_probe(dev, PTR_ERR(prueth->iep0), + "iep0 get failed\n"); + goto free_pool; + } + + prueth->iep1 = icss_iep_get_idx(np, 1); + if (IS_ERR(prueth->iep1)) { + ret = dev_err_probe(dev, PTR_ERR(prueth->iep1), + "iep1 get failed\n"); + goto put_iep0; + } + + ret = icss_iep_init(prueth->iep0, NULL, NULL, 0); + if (ret) { + dev_err_probe(dev, ret, "failed to init iep0\n"); + goto put_iep; + } + + ret = icss_iep_init(prueth->iep1, NULL, NULL, 0); + if (ret) { + dev_err_probe(dev, ret, "failed to init iep1\n"); + goto exit_iep0; + } + if (eth0_node) { ret = prueth_netdev_init(prueth, eth0_node); if (ret) { dev_err_probe(dev, ret, "netdev init %s failed\n", eth0_node->name); - goto free_pool; + goto exit_iep; } if (of_find_property(eth0_node, "ti,half-duplex-capable", NULL)) prueth->emac[PRUETH_MAC0]->half_duplex = 1; + + prueth->emac[PRUETH_MAC0]->iep = prueth->iep0; } if (eth1_node) { @@ -1033,6 +1061,8 @@ static int prueth_probe(struct platform_device *pdev) if (of_find_property(eth1_node, "ti,half-duplex-capable", NULL)) prueth->emac[PRUETH_MAC1]->half_duplex = 1; + + prueth->emac[PRUETH_MAC1]->iep = prueth->iep1; } /* register the network devices */ @@ -1091,6 +1121,19 @@ netdev_exit: prueth_netdev_exit(prueth, eth_node); } +exit_iep: + icss_iep_exit(prueth->iep1); +exit_iep0: + icss_iep_exit(prueth->iep0); + +put_iep: + icss_iep_put(prueth->iep1); + +put_iep0: + icss_iep_put(prueth->iep0); + prueth->iep0 = NULL; + prueth->iep1 = NULL; + free_pool: gen_pool_free(prueth->sram_pool, (unsigned long)prueth->msmcram.va, msmc_ram_size); @@ -1138,6 +1181,12 @@ static void prueth_remove(struct platform_device *pdev) prueth_netdev_exit(prueth, eth_node); } + icss_iep_exit(prueth->iep1); + icss_iep_exit(prueth->iep0); + + icss_iep_put(prueth->iep1); + icss_iep_put(prueth->iep0); + gen_pool_free(prueth->sram_pool, (unsigned long)prueth->msmcram.va, MSMC_RAM_SIZE_SR1); diff --git a/drivers/net/ethernet/ti/icssg/icssg_queues.c b/drivers/net/ethernet/ti/icssg/icssg_queues.c index 3c34f61ad40b..e5052d9e7807 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_queues.c +++ b/drivers/net/ethernet/ti/icssg/icssg_queues.c @@ -28,6 +28,7 @@ int icssg_queue_pop(struct prueth *prueth, u8 queue) return val; } +EXPORT_SYMBOL_GPL(icssg_queue_pop); void icssg_queue_push(struct prueth *prueth, int queue, u16 addr) { @@ -36,6 +37,7 @@ void icssg_queue_push(struct prueth *prueth, int queue, u16 addr) regmap_write(prueth->miig_rt, ICSSG_QUEUE_OFFSET + 4 * queue, addr); } +EXPORT_SYMBOL_GPL(icssg_queue_push); u32 icssg_queue_level(struct prueth *prueth, int queue) { diff --git a/drivers/net/ethernet/ti/icssg/icssg_stats.c b/drivers/net/ethernet/ti/icssg/icssg_stats.c index 3dbadddd7e35..2fb150c13078 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_stats.c +++ b/drivers/net/ethernet/ti/icssg/icssg_stats.c @@ -42,7 +42,7 @@ void emac_update_hardware_stats(struct prueth_emac *emac) } } -void emac_stats_work_handler(struct work_struct *work) +void icssg_stats_work_handler(struct work_struct *work) { struct prueth_emac *emac = container_of(work, struct prueth_emac, stats_work.work); @@ -51,6 +51,7 @@ void emac_stats_work_handler(struct work_struct *work) queue_delayed_work(system_long_wq, &emac->stats_work, msecs_to_jiffies((STATS_TIME_LIMIT_1G_MS * 1000) / emac->speed)); } +EXPORT_SYMBOL_GPL(icssg_stats_work_handler); int emac_get_stat_by_name(struct prueth_emac *emac, char *stat_name) { diff --git a/drivers/net/ethernet/ti/icssg/icssg_switchdev.c b/drivers/net/ethernet/ti/icssg/icssg_switchdev.c new file mode 100644 index 000000000000..67e2927e176d --- /dev/null +++ b/drivers/net/ethernet/ti/icssg/icssg_switchdev.c @@ -0,0 +1,477 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Texas Instruments K3 ICSSG Ethernet Switchdev Driver + * + * Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com/ + * + */ + +#include +#include +#include +#include +#include + +#include "icssg_prueth.h" +#include "icssg_switchdev.h" +#include "icssg_mii_rt.h" + +struct prueth_switchdev_event_work { + struct work_struct work; + struct switchdev_notifier_fdb_info fdb_info; + struct prueth_emac *emac; + unsigned long event; +}; + +static int prueth_switchdev_stp_state_set(struct prueth_emac *emac, + u8 state) +{ + enum icssg_port_state_cmd emac_state; + int ret = 0; + + switch (state) { + case BR_STATE_FORWARDING: + emac_state = ICSSG_EMAC_PORT_FORWARD; + break; + case BR_STATE_DISABLED: + emac_state = ICSSG_EMAC_PORT_DISABLE; + break; + case BR_STATE_LISTENING: + case BR_STATE_BLOCKING: + emac_state = ICSSG_EMAC_PORT_BLOCK; + break; + default: + return -EOPNOTSUPP; + } + + icssg_set_port_state(emac, emac_state); + netdev_dbg(emac->ndev, "STP state: %u\n", emac_state); + + return ret; +} + +static int prueth_switchdev_attr_br_flags_set(struct prueth_emac *emac, + struct net_device *orig_dev, + struct switchdev_brport_flags brport_flags) +{ + enum icssg_port_state_cmd emac_state; + + if (brport_flags.mask & BR_MCAST_FLOOD) + emac_state = ICSSG_EMAC_PORT_MC_FLOODING_ENABLE; + else + emac_state = ICSSG_EMAC_PORT_MC_FLOODING_DISABLE; + + netdev_dbg(emac->ndev, "BR_MCAST_FLOOD: %d port %u\n", + emac_state, emac->port_id); + + icssg_set_port_state(emac, emac_state); + + return 0; +} + +static int prueth_switchdev_attr_br_flags_pre_set(struct net_device *netdev, + struct switchdev_brport_flags brport_flags) +{ + if (brport_flags.mask & ~(BR_LEARNING | BR_MCAST_FLOOD)) + return -EINVAL; + + return 0; +} + +static int prueth_switchdev_attr_set(struct net_device *ndev, const void *ctx, + const struct switchdev_attr *attr, + struct netlink_ext_ack *extack) +{ + struct prueth_emac *emac = netdev_priv(ndev); + int ret; + + netdev_dbg(ndev, "attr: id %u port: %u\n", attr->id, emac->port_id); + + switch (attr->id) { + case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: + ret = prueth_switchdev_attr_br_flags_pre_set(ndev, + attr->u.brport_flags); + break; + case SWITCHDEV_ATTR_ID_PORT_STP_STATE: + ret = prueth_switchdev_stp_state_set(emac, + attr->u.stp_state); + netdev_dbg(ndev, "stp state: %u\n", attr->u.stp_state); + break; + case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: + ret = prueth_switchdev_attr_br_flags_set(emac, attr->orig_dev, + attr->u.brport_flags); + break; + default: + ret = -EOPNOTSUPP; + break; + } + + return ret; +} + +static void prueth_switchdev_fdb_offload_notify(struct net_device *ndev, + struct switchdev_notifier_fdb_info *rcv) +{ + struct switchdev_notifier_fdb_info info; + + memset(&info, 0, sizeof(info)); + info.addr = rcv->addr; + info.vid = rcv->vid; + info.offloaded = true; + call_switchdev_notifiers(SWITCHDEV_FDB_OFFLOADED, + ndev, &info.info, NULL); +} + +static void prueth_switchdev_event_work(struct work_struct *work) +{ + struct prueth_switchdev_event_work *switchdev_work = + container_of(work, struct prueth_switchdev_event_work, work); + struct prueth_emac *emac = switchdev_work->emac; + struct switchdev_notifier_fdb_info *fdb; + int port_id = emac->port_id; + int ret; + + rtnl_lock(); + switch (switchdev_work->event) { + case SWITCHDEV_FDB_ADD_TO_DEVICE: + fdb = &switchdev_work->fdb_info; + + netdev_dbg(emac->ndev, "prueth_fdb_add: MACID = %pM vid = %u flags = %u %u -- port %d\n", + fdb->addr, fdb->vid, fdb->added_by_user, + fdb->offloaded, port_id); + + if (!fdb->added_by_user) + break; + if (!ether_addr_equal(emac->mac_addr, fdb->addr)) + break; + + ret = icssg_fdb_add_del(emac, fdb->addr, fdb->vid, + BIT(port_id), true); + if (!ret) + prueth_switchdev_fdb_offload_notify(emac->ndev, fdb); + break; + case SWITCHDEV_FDB_DEL_TO_DEVICE: + fdb = &switchdev_work->fdb_info; + + netdev_dbg(emac->ndev, "prueth_fdb_del: MACID = %pM vid = %u flags = %u %u -- port %d\n", + fdb->addr, fdb->vid, fdb->added_by_user, + fdb->offloaded, port_id); + + if (!fdb->added_by_user) + break; + if (!ether_addr_equal(emac->mac_addr, fdb->addr)) + break; + icssg_fdb_add_del(emac, fdb->addr, fdb->vid, + BIT(port_id), false); + break; + default: + break; + } + rtnl_unlock(); + + kfree(switchdev_work->fdb_info.addr); + kfree(switchdev_work); + dev_put(emac->ndev); +} + +static int prueth_switchdev_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *ndev = switchdev_notifier_info_to_dev(ptr); + struct prueth_switchdev_event_work *switchdev_work; + struct switchdev_notifier_fdb_info *fdb_info = ptr; + struct prueth_emac *emac = netdev_priv(ndev); + int err; + + if (!prueth_dev_check(ndev)) + return NOTIFY_DONE; + + if (event == SWITCHDEV_PORT_ATTR_SET) { + err = switchdev_handle_port_attr_set(ndev, ptr, + prueth_dev_check, + prueth_switchdev_attr_set); + return notifier_from_errno(err); + } + + switchdev_work = kzalloc(sizeof(*switchdev_work), GFP_ATOMIC); + if (WARN_ON(!switchdev_work)) + return NOTIFY_BAD; + + INIT_WORK(&switchdev_work->work, prueth_switchdev_event_work); + switchdev_work->emac = emac; + switchdev_work->event = event; + + switch (event) { + case SWITCHDEV_FDB_ADD_TO_DEVICE: + case SWITCHDEV_FDB_DEL_TO_DEVICE: + memcpy(&switchdev_work->fdb_info, ptr, + sizeof(switchdev_work->fdb_info)); + switchdev_work->fdb_info.addr = kzalloc(ETH_ALEN, GFP_ATOMIC); + if (!switchdev_work->fdb_info.addr) + goto err_addr_alloc; + ether_addr_copy((u8 *)switchdev_work->fdb_info.addr, + fdb_info->addr); + dev_hold(ndev); + break; + default: + kfree(switchdev_work); + return NOTIFY_DONE; + } + + queue_work(system_long_wq, &switchdev_work->work); + + return NOTIFY_DONE; + +err_addr_alloc: + kfree(switchdev_work); + return NOTIFY_BAD; +} + +static int prueth_switchdev_vlan_add(struct prueth_emac *emac, bool untag, bool pvid, + u8 vid, struct net_device *orig_dev) +{ + bool cpu_port = netif_is_bridge_master(orig_dev); + int untag_mask = 0; + int port_mask; + int ret = 0; + + if (cpu_port) + port_mask = BIT(PRUETH_PORT_HOST); + else + port_mask = BIT(emac->port_id); + + if (untag) + untag_mask = port_mask; + + icssg_vtbl_modify(emac, vid, port_mask, untag_mask, true); + + netdev_dbg(emac->ndev, "VID add vid:%u port_mask:%X untag_mask %X PVID %d\n", + vid, port_mask, untag_mask, pvid); + + if (!pvid) + return ret; + + icssg_set_pvid(emac->prueth, vid, emac->port_id); + + return ret; +} + +static int prueth_switchdev_vlan_del(struct prueth_emac *emac, u16 vid, + struct net_device *orig_dev) +{ + bool cpu_port = netif_is_bridge_master(orig_dev); + int port_mask; + int ret = 0; + + if (cpu_port) + port_mask = BIT(PRUETH_PORT_HOST); + else + port_mask = BIT(emac->port_id); + + icssg_vtbl_modify(emac, vid, port_mask, 0, false); + + if (cpu_port) + icssg_fdb_add_del(emac, emac->mac_addr, vid, + BIT(PRUETH_PORT_HOST), false); + + if (vid == icssg_get_pvid(emac)) + icssg_set_pvid(emac->prueth, 0, emac->port_id); + + netdev_dbg(emac->ndev, "VID del vid:%u port_mask:%X\n", + vid, port_mask); + + return ret; +} + +static int prueth_switchdev_vlans_add(struct prueth_emac *emac, + const struct switchdev_obj_port_vlan *vlan) +{ + bool untag = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + struct net_device *orig_dev = vlan->obj.orig_dev; + bool cpu_port = netif_is_bridge_master(orig_dev); + bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; + + netdev_dbg(emac->ndev, "VID add vid:%u flags:%X\n", + vlan->vid, vlan->flags); + + if (cpu_port && !(vlan->flags & BRIDGE_VLAN_INFO_BRENTRY)) + return 0; + + if (vlan->vid > 0xff) + return 0; + + return prueth_switchdev_vlan_add(emac, untag, pvid, vlan->vid, + orig_dev); +} + +static int prueth_switchdev_vlans_del(struct prueth_emac *emac, + const struct switchdev_obj_port_vlan *vlan) +{ + if (vlan->vid > 0xff) + return 0; + + return prueth_switchdev_vlan_del(emac, vlan->vid, + vlan->obj.orig_dev); +} + +static int prueth_switchdev_mdb_add(struct prueth_emac *emac, + struct switchdev_obj_port_mdb *mdb) +{ + struct net_device *orig_dev = mdb->obj.orig_dev; + u8 port_mask, fid_c2; + bool cpu_port; + int err; + + cpu_port = netif_is_bridge_master(orig_dev); + + if (cpu_port) + port_mask = BIT(PRUETH_PORT_HOST); + else + port_mask = BIT(emac->port_id); + + fid_c2 = icssg_fdb_lookup(emac, mdb->addr, mdb->vid); + + err = icssg_fdb_add_del(emac, mdb->addr, mdb->vid, fid_c2 | port_mask, true); + netdev_dbg(emac->ndev, "MDB add vid %u:%pM ports: %X\n", + mdb->vid, mdb->addr, port_mask); + + return err; +} + +static int prueth_switchdev_mdb_del(struct prueth_emac *emac, + struct switchdev_obj_port_mdb *mdb) +{ + struct net_device *orig_dev = mdb->obj.orig_dev; + int del_mask, ret, fid_c2; + bool cpu_port; + + cpu_port = netif_is_bridge_master(orig_dev); + + if (cpu_port) + del_mask = BIT(PRUETH_PORT_HOST); + else + del_mask = BIT(emac->port_id); + + fid_c2 = icssg_fdb_lookup(emac, mdb->addr, mdb->vid); + + if (fid_c2 & ~del_mask) + ret = icssg_fdb_add_del(emac, mdb->addr, mdb->vid, fid_c2 & ~del_mask, true); + else + ret = icssg_fdb_add_del(emac, mdb->addr, mdb->vid, 0, false); + + netdev_dbg(emac->ndev, "MDB del vid %u:%pM ports: %X\n", + mdb->vid, mdb->addr, del_mask); + + return ret; +} + +static int prueth_switchdev_obj_add(struct net_device *ndev, const void *ctx, + const struct switchdev_obj *obj, + struct netlink_ext_ack *extack) +{ + struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); + struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj); + struct prueth_emac *emac = netdev_priv(ndev); + int err = 0; + + netdev_dbg(ndev, "obj_add: id %u port: %u\n", obj->id, emac->port_id); + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + err = prueth_switchdev_vlans_add(emac, vlan); + break; + case SWITCHDEV_OBJ_ID_PORT_MDB: + case SWITCHDEV_OBJ_ID_HOST_MDB: + err = prueth_switchdev_mdb_add(emac, mdb); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int prueth_switchdev_obj_del(struct net_device *ndev, const void *ctx, + const struct switchdev_obj *obj) +{ + struct switchdev_obj_port_vlan *vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); + struct switchdev_obj_port_mdb *mdb = SWITCHDEV_OBJ_PORT_MDB(obj); + struct prueth_emac *emac = netdev_priv(ndev); + int err = 0; + + netdev_dbg(ndev, "obj_del: id %u port: %u\n", obj->id, emac->port_id); + + switch (obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + err = prueth_switchdev_vlans_del(emac, vlan); + break; + case SWITCHDEV_OBJ_ID_PORT_MDB: + case SWITCHDEV_OBJ_ID_HOST_MDB: + err = prueth_switchdev_mdb_del(emac, mdb); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int prueth_switchdev_blocking_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + int err; + + switch (event) { + case SWITCHDEV_PORT_OBJ_ADD: + err = switchdev_handle_port_obj_add(dev, ptr, + prueth_dev_check, + prueth_switchdev_obj_add); + return notifier_from_errno(err); + case SWITCHDEV_PORT_OBJ_DEL: + err = switchdev_handle_port_obj_del(dev, ptr, + prueth_dev_check, + prueth_switchdev_obj_del); + return notifier_from_errno(err); + case SWITCHDEV_PORT_ATTR_SET: + err = switchdev_handle_port_attr_set(dev, ptr, + prueth_dev_check, + prueth_switchdev_attr_set); + return notifier_from_errno(err); + default: + break; + } + + return NOTIFY_DONE; +} + +int prueth_switchdev_register_notifiers(struct prueth *prueth) +{ + int ret = 0; + + prueth->prueth_switchdev_nb.notifier_call = &prueth_switchdev_event; + ret = register_switchdev_notifier(&prueth->prueth_switchdev_nb); + if (ret) { + dev_err(prueth->dev, "register switchdev notifier fail ret:%d\n", + ret); + return ret; + } + + prueth->prueth_switchdev_bl_nb.notifier_call = &prueth_switchdev_blocking_event; + ret = register_switchdev_blocking_notifier(&prueth->prueth_switchdev_bl_nb); + if (ret) { + dev_err(prueth->dev, "register switchdev blocking notifier ret:%d\n", + ret); + unregister_switchdev_notifier(&prueth->prueth_switchdev_nb); + } + + return ret; +} + +void prueth_switchdev_unregister_notifiers(struct prueth *prueth) +{ + unregister_switchdev_blocking_notifier(&prueth->prueth_switchdev_bl_nb); + unregister_switchdev_notifier(&prueth->prueth_switchdev_nb); +} diff --git a/drivers/net/ethernet/ti/icssg/icssg_switchdev.h b/drivers/net/ethernet/ti/icssg/icssg_switchdev.h new file mode 100644 index 000000000000..0e64e7760a00 --- /dev/null +++ b/drivers/net/ethernet/ti/icssg/icssg_switchdev.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2021 Texas Instruments Incorporated - https://www.ti.com/ + */ +#ifndef __NET_TI_ICSSG_SWITCHDEV_H +#define __NET_TI_ICSSG_SWITCHDEV_H + +#include "icssg_prueth.h" + +int prueth_switchdev_register_notifiers(struct prueth *prueth); +void prueth_switchdev_unregister_notifiers(struct prueth *prueth); +bool prueth_dev_check(const struct net_device *ndev); + +#endif /* __NET_TI_ICSSG_SWITCHDEV_H */ diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 02cb6474f6dc..d286709ca3b9 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -1999,7 +1999,7 @@ static int keystone_set_link_ksettings(struct net_device *ndev, #if IS_ENABLED(CONFIG_TI_CPTS) static int keystone_get_ts_info(struct net_device *ndev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct netcp_intf *netcp = netdev_priv(ndev); struct gbe_intf *gbe_intf; @@ -2027,7 +2027,7 @@ static int keystone_get_ts_info(struct net_device *ndev, } #else static int keystone_get_ts_info(struct net_device *ndev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | diff --git a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c index cc3bec42ed8e..abe5921dde02 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_ethtool.c @@ -43,6 +43,11 @@ static const struct wx_stats wx_gstrings_stats[] = { WX_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), }; +static const struct wx_stats wx_gstrings_fdir_stats[] = { + WX_STAT("fdir_match", stats.fdirmatch), + WX_STAT("fdir_miss", stats.fdirmiss), +}; + /* drivers allocates num_tx_queues and num_rx_queues symmetrically so * we set the num_rx_queues to evaluate to num_tx_queues. This is * used because we do not have a good way to get the max number of @@ -55,13 +60,17 @@ static const struct wx_stats wx_gstrings_stats[] = { (WX_NUM_TX_QUEUES + WX_NUM_RX_QUEUES) * \ (sizeof(struct wx_queue_stats) / sizeof(u64))) #define WX_GLOBAL_STATS_LEN ARRAY_SIZE(wx_gstrings_stats) +#define WX_FDIR_STATS_LEN ARRAY_SIZE(wx_gstrings_fdir_stats) #define WX_STATS_LEN (WX_GLOBAL_STATS_LEN + WX_QUEUE_STATS_LEN) int wx_get_sset_count(struct net_device *netdev, int sset) { + struct wx *wx = netdev_priv(netdev); + switch (sset) { case ETH_SS_STATS: - return WX_STATS_LEN; + return (wx->mac.type == wx_mac_sp) ? + WX_STATS_LEN + WX_FDIR_STATS_LEN : WX_STATS_LEN; default: return -EOPNOTSUPP; } @@ -70,6 +79,7 @@ EXPORT_SYMBOL(wx_get_sset_count); void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { + struct wx *wx = netdev_priv(netdev); u8 *p = data; int i; @@ -77,6 +87,10 @@ void wx_get_strings(struct net_device *netdev, u32 stringset, u8 *data) case ETH_SS_STATS: for (i = 0; i < WX_GLOBAL_STATS_LEN; i++) ethtool_puts(&p, wx_gstrings_stats[i].stat_string); + if (wx->mac.type == wx_mac_sp) { + for (i = 0; i < WX_FDIR_STATS_LEN; i++) + ethtool_puts(&p, wx_gstrings_fdir_stats[i].stat_string); + } for (i = 0; i < netdev->num_tx_queues; i++) { ethtool_sprintf(&p, "tx_queue_%u_packets", i); ethtool_sprintf(&p, "tx_queue_%u_bytes", i); @@ -96,7 +110,7 @@ void wx_get_ethtool_stats(struct net_device *netdev, struct wx *wx = netdev_priv(netdev); struct wx_ring *ring; unsigned int start; - int i, j; + int i, j, k; char *p; wx_update_stats(wx); @@ -107,6 +121,13 @@ void wx_get_ethtool_stats(struct net_device *netdev, sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } + if (wx->mac.type == wx_mac_sp) { + for (k = 0; k < WX_FDIR_STATS_LEN; k++) { + p = (char *)wx + wx_gstrings_fdir_stats[k].stat_offset; + data[i++] = *(u64 *)p; + } + } + for (j = 0; j < netdev->num_tx_queues; j++) { ring = wx->tx_ring[j]; if (!ring) { @@ -172,17 +193,21 @@ EXPORT_SYMBOL(wx_get_pause_stats); void wx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { + unsigned int stats_len = WX_STATS_LEN; struct wx *wx = netdev_priv(netdev); + if (wx->mac.type == wx_mac_sp) + stats_len += WX_FDIR_STATS_LEN; + strscpy(info->driver, wx->driver_name, sizeof(info->driver)); strscpy(info->fw_version, wx->eeprom_id, sizeof(info->fw_version)); strscpy(info->bus_info, pci_name(wx->pdev), sizeof(info->bus_info)); if (wx->num_tx_queues <= WX_NUM_TX_QUEUES) { - info->n_stats = WX_STATS_LEN - + info->n_stats = stats_len - (WX_NUM_TX_QUEUES - wx->num_tx_queues) * (sizeof(struct wx_queue_stats) / sizeof(u64)) * 2; } else { - info->n_stats = WX_STATS_LEN; + info->n_stats = stats_len; } } EXPORT_SYMBOL(wx_get_drvinfo); @@ -383,6 +408,9 @@ void wx_get_channels(struct net_device *dev, /* record RSS queues */ ch->combined_count = wx->ring_feature[RING_F_RSS].indices; + + if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) + ch->combined_count = wx->ring_feature[RING_F_FDIR].indices; } EXPORT_SYMBOL(wx_get_channels); @@ -400,6 +428,9 @@ int wx_set_channels(struct net_device *dev, if (count > wx_max_channels(wx)) return -EINVAL; + if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) + wx->ring_feature[RING_F_FDIR].limit = count; + wx->ring_feature[RING_F_RSS].limit = count; return 0; diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.c b/drivers/net/ethernet/wangxun/libwx/wx_hw.c index d1b682ce9c6d..1bf9c38e4125 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.c @@ -1147,8 +1147,15 @@ static void wx_enable_rx(struct wx *wx) static void wx_set_rxpba(struct wx *wx) { u32 rxpktsize, txpktsize, txpbthresh; + u32 pbsize = wx->mac.rx_pb_size; - rxpktsize = wx->mac.rx_pb_size << WX_RDB_PB_SZ_SHIFT; + if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) { + if (test_bit(WX_FLAG_FDIR_HASH, wx->flags) || + test_bit(WX_FLAG_FDIR_PERFECT, wx->flags)) + pbsize -= 64; /* Default 64KB */ + } + + rxpktsize = pbsize << WX_RDB_PB_SZ_SHIFT; wr32(wx, WX_RDB_PB_SZ(0), rxpktsize); /* Only support an equally distributed Tx packet buffer strategy. */ @@ -1261,7 +1268,7 @@ static void wx_configure_port(struct wx *wx) * Stops the receive data path and waits for the HW to internally empty * the Rx security block **/ -static int wx_disable_sec_rx_path(struct wx *wx) +int wx_disable_sec_rx_path(struct wx *wx) { u32 secrx; @@ -1271,6 +1278,7 @@ static int wx_disable_sec_rx_path(struct wx *wx) return read_poll_timeout(rd32, secrx, secrx & WX_RSC_ST_RSEC_RDY, 1000, 40000, false, wx, WX_RSC_ST); } +EXPORT_SYMBOL(wx_disable_sec_rx_path); /** * wx_enable_sec_rx_path - Enables the receive data path @@ -1278,11 +1286,12 @@ static int wx_disable_sec_rx_path(struct wx *wx) * * Enables the receive data path. **/ -static void wx_enable_sec_rx_path(struct wx *wx) +void wx_enable_sec_rx_path(struct wx *wx) { wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_RX_DIS, 0); WX_WRITE_FLUSH(wx); } +EXPORT_SYMBOL(wx_enable_sec_rx_path); static void wx_vlan_strip_control(struct wx *wx, bool enable) { @@ -1499,6 +1508,13 @@ static void wx_configure_tx_ring(struct wx *wx, txdctl |= ring->count / 128 << WX_PX_TR_CFG_TR_SIZE_SHIFT; txdctl |= 0x20 << WX_PX_TR_CFG_WTHRESH_SHIFT; + ring->atr_count = 0; + if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags) && + test_bit(WX_FLAG_FDIR_HASH, wx->flags)) + ring->atr_sample_rate = wx->atr_sample_rate; + else + ring->atr_sample_rate = 0; + /* reinitialize tx_buffer_info */ memset(ring->tx_buffer_info, 0, sizeof(struct wx_tx_buffer) * ring->count); @@ -1732,7 +1748,9 @@ void wx_configure(struct wx *wx) wx_set_rx_mode(wx->netdev); wx_restore_vlan(wx); - wx_enable_sec_rx_path(wx); + + if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) + wx->configure_fdir(wx); wx_configure_tx(wx); wx_configure_rx(wx); @@ -1959,6 +1977,7 @@ int wx_sw_init(struct wx *wx) } bitmap_zero(wx->state, WX_STATE_NBITS); + bitmap_zero(wx->flags, WX_PF_FLAGS_NBITS); wx->misc_irq_domain = false; return 0; @@ -2334,6 +2353,11 @@ void wx_update_stats(struct wx *wx) hwstats->b2ogprc += rd32(wx, WX_RDM_BMC2OS_CNT); hwstats->rdmdrop += rd32(wx, WX_RDM_DRP_PKT); + if (wx->mac.type == wx_mac_sp) { + hwstats->fdirmatch += rd32(wx, WX_RDB_FDIR_MATCH); + hwstats->fdirmiss += rd32(wx, WX_RDB_FDIR_MISS); + } + for (i = 0; i < wx->mac.max_rx_queues; i++) hwstats->qmprc += rd32(wx, WX_PX_MPRC(i)); } diff --git a/drivers/net/ethernet/wangxun/libwx/wx_hw.h b/drivers/net/ethernet/wangxun/libwx/wx_hw.h index 9e219fa717a2..11fb33349482 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_hw.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_hw.h @@ -28,6 +28,8 @@ void wx_mac_set_default_filter(struct wx *wx, u8 *addr); void wx_flush_sw_mac_table(struct wx *wx); int wx_set_mac(struct net_device *netdev, void *p); void wx_disable_rx(struct wx *wx); +int wx_disable_sec_rx_path(struct wx *wx); +void wx_enable_sec_rx_path(struct wx *wx); void wx_set_rx_mode(struct net_device *netdev); int wx_change_mtu(struct net_device *netdev, int new_mtu); void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.c b/drivers/net/ethernet/wangxun/libwx/wx_lib.c index 81bedc8ee8d4..1eecba984f3b 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.c +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.c @@ -148,10 +148,11 @@ static struct wx_dec_ptype wx_ptype_lookup[256] = { [0xFD] = WX_PTT(IP, IPV6, IGMV, IPV6, SCTP, PAY4), }; -static struct wx_dec_ptype wx_decode_ptype(const u8 ptype) +struct wx_dec_ptype wx_decode_ptype(const u8 ptype) { return wx_ptype_lookup[ptype]; } +EXPORT_SYMBOL(wx_decode_ptype); /* wx_test_staterr - tests bits in Rx descriptor status and error fields */ static __le32 wx_test_staterr(union wx_rx_desc *rx_desc, @@ -1453,6 +1454,7 @@ static void wx_tx_csum(struct wx_ring *tx_ring, struct wx_tx_buffer *first, static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb, struct wx_ring *tx_ring) { + struct wx *wx = netdev_priv(tx_ring->netdev); u16 count = TXD_USE_COUNT(skb_headlen(skb)); struct wx_tx_buffer *first; u8 hdr_len = 0, ptype; @@ -1498,6 +1500,10 @@ static netdev_tx_t wx_xmit_frame_ring(struct sk_buff *skb, goto out_drop; else if (!tso) wx_tx_csum(tx_ring, first, ptype); + + if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags) && tx_ring->atr_sample_rate) + wx->atr(tx_ring, first, ptype); + wx_tx_map(tx_ring, first, hdr_len); return NETDEV_TX_OK; @@ -1574,8 +1580,27 @@ static void wx_set_rss_queues(struct wx *wx) f = &wx->ring_feature[RING_F_RSS]; f->indices = f->limit; - wx->num_rx_queues = f->limit; - wx->num_tx_queues = f->limit; + if (!(test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags))) + goto out; + + clear_bit(WX_FLAG_FDIR_HASH, wx->flags); + + /* Use Flow Director in addition to RSS to ensure the best + * distribution of flows across cores, even when an FDIR flow + * isn't matched. + */ + if (f->indices > 1) { + f = &wx->ring_feature[RING_F_FDIR]; + + f->indices = f->limit; + + if (!(test_bit(WX_FLAG_FDIR_PERFECT, wx->flags))) + set_bit(WX_FLAG_FDIR_HASH, wx->flags); + } + +out: + wx->num_rx_queues = f->indices; + wx->num_tx_queues = f->indices; } static void wx_set_num_queues(struct wx *wx) @@ -2684,6 +2709,7 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features) { netdev_features_t changed = netdev->features ^ features; struct wx *wx = netdev_priv(netdev); + bool need_reset = false; if (features & NETIF_F_RXHASH) { wr32m(wx, WX_RDB_RA_CTL, WX_RDB_RA_CTL_RSS_EN, @@ -2701,6 +2727,36 @@ int wx_set_features(struct net_device *netdev, netdev_features_t features) else if (changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER)) wx_set_rx_mode(netdev); + if (!(test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags))) + return 0; + + /* Check if Flow Director n-tuple support was enabled or disabled. If + * the state changed, we need to reset. + */ + switch (features & NETIF_F_NTUPLE) { + case NETIF_F_NTUPLE: + /* turn off ATR, enable perfect filters and reset */ + if (!(test_and_set_bit(WX_FLAG_FDIR_PERFECT, wx->flags))) + need_reset = true; + + clear_bit(WX_FLAG_FDIR_HASH, wx->flags); + break; + default: + /* turn off perfect filters, enable ATR and reset */ + if (test_and_clear_bit(WX_FLAG_FDIR_PERFECT, wx->flags)) + need_reset = true; + + /* We cannot enable ATR if RSS is disabled */ + if (wx->ring_feature[RING_F_RSS].limit <= 1) + break; + + set_bit(WX_FLAG_FDIR_HASH, wx->flags); + break; + } + + if (need_reset) + wx->do_reset(netdev); + return 0; } EXPORT_SYMBOL(wx_set_features); diff --git a/drivers/net/ethernet/wangxun/libwx/wx_lib.h b/drivers/net/ethernet/wangxun/libwx/wx_lib.h index c41b29ea812f..fdeb0c315b75 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_lib.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_lib.h @@ -7,6 +7,7 @@ #ifndef _WX_LIB_H_ #define _WX_LIB_H_ +struct wx_dec_ptype wx_decode_ptype(const u8 ptype); void wx_alloc_rx_buffers(struct wx_ring *rx_ring, u16 cleaned_count); u16 wx_desc_unused(struct wx_ring *ring); netdev_tx_t wx_xmit_frame(struct sk_buff *skb, diff --git a/drivers/net/ethernet/wangxun/libwx/wx_type.h b/drivers/net/ethernet/wangxun/libwx/wx_type.h index 0df7f5712b6f..1d57b047817b 100644 --- a/drivers/net/ethernet/wangxun/libwx/wx_type.h +++ b/drivers/net/ethernet/wangxun/libwx/wx_type.h @@ -157,6 +157,8 @@ #define WX_RDB_RA_CTL_RSS_IPV6_TCP BIT(21) #define WX_RDB_RA_CTL_RSS_IPV4_UDP BIT(22) #define WX_RDB_RA_CTL_RSS_IPV6_UDP BIT(23) +#define WX_RDB_FDIR_MATCH 0x19558 +#define WX_RDB_FDIR_MISS 0x1955C /******************************* PSR Registers *******************************/ /* psr control */ @@ -503,6 +505,34 @@ enum WX_MSCA_CMD_value { #define WX_PTYPE_TYP_TCP 0x04 #define WX_PTYPE_TYP_SCTP 0x05 +/* Packet type non-ip values */ +enum wx_l2_ptypes { + WX_PTYPE_L2_ABORTED = (WX_PTYPE_PKT_MAC), + WX_PTYPE_L2_MAC = (WX_PTYPE_PKT_MAC | WX_PTYPE_TYP_MAC), + + WX_PTYPE_L2_IPV4_FRAG = (WX_PTYPE_PKT_IP | WX_PTYPE_TYP_IPFRAG), + WX_PTYPE_L2_IPV4 = (WX_PTYPE_PKT_IP | WX_PTYPE_TYP_IP), + WX_PTYPE_L2_IPV4_UDP = (WX_PTYPE_PKT_IP | WX_PTYPE_TYP_UDP), + WX_PTYPE_L2_IPV4_TCP = (WX_PTYPE_PKT_IP | WX_PTYPE_TYP_TCP), + WX_PTYPE_L2_IPV4_SCTP = (WX_PTYPE_PKT_IP | WX_PTYPE_TYP_SCTP), + WX_PTYPE_L2_IPV6_FRAG = (WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6 | + WX_PTYPE_TYP_IPFRAG), + WX_PTYPE_L2_IPV6 = (WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6 | + WX_PTYPE_TYP_IP), + WX_PTYPE_L2_IPV6_UDP = (WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6 | + WX_PTYPE_TYP_UDP), + WX_PTYPE_L2_IPV6_TCP = (WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6 | + WX_PTYPE_TYP_TCP), + WX_PTYPE_L2_IPV6_SCTP = (WX_PTYPE_PKT_IP | WX_PTYPE_PKT_IPV6 | + WX_PTYPE_TYP_SCTP), + + WX_PTYPE_L2_TUN4_MAC = (WX_PTYPE_TUN_IPV4 | WX_PTYPE_PKT_IGM), + WX_PTYPE_L2_TUN6_MAC = (WX_PTYPE_TUN_IPV6 | WX_PTYPE_PKT_IGM), +}; + +#define WX_PTYPE_PKT(_pt) ((_pt) & 0x30) +#define WX_PTYPE_TYPL4(_pt) ((_pt) & 0x07) + #define WX_RXD_PKTTYPE(_rxd) \ ((le32_to_cpu((_rxd)->wb.lower.lo_dword.data) >> 9) & 0xFF) #define WX_RXD_IPV6EX(_rxd) \ @@ -552,6 +582,9 @@ enum wx_tx_flags { WX_TX_FLAGS_OUTER_IPV4 = 0x100, WX_TX_FLAGS_LINKSEC = 0x200, WX_TX_FLAGS_IPSEC = 0x400, + + /* software defined flags */ + WX_TX_FLAGS_SW_VLAN = 0x40, }; /* VLAN info */ @@ -900,7 +933,13 @@ struct wx_ring { */ u16 next_to_use; u16 next_to_clean; - u16 next_to_alloc; + union { + u16 next_to_alloc; + struct { + u8 atr_sample_rate; + u8 atr_count; + }; + }; struct wx_queue_stats stats; struct u64_stats_sync syncp; @@ -939,6 +978,7 @@ struct wx_ring_feature { enum wx_ring_f_enum { RING_F_NONE = 0, RING_F_RSS, + RING_F_FDIR, RING_F_ARRAY_SIZE /* must be last in enum set */ }; @@ -980,15 +1020,26 @@ struct wx_hw_stats { u64 crcerrs; u64 rlec; u64 qmprc; + u64 fdirmatch; + u64 fdirmiss; }; enum wx_state { WX_STATE_RESETTING, WX_STATE_NBITS, /* must be last */ }; + +enum wx_pf_flags { + WX_FLAG_FDIR_CAPABLE, + WX_FLAG_FDIR_HASH, + WX_FLAG_FDIR_PERFECT, + WX_PF_FLAGS_NBITS /* must be last */ +}; + struct wx { unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; DECLARE_BITMAP(state, WX_STATE_NBITS); + DECLARE_BITMAP(flags, WX_PF_FLAGS_NBITS); void *priv; u8 __iomem *hw_addr; @@ -1078,6 +1129,9 @@ struct wx { u64 hw_csum_rx_error; u64 alloc_rx_buff_failed; + u32 atr_sample_rate; + void (*atr)(struct wx_ring *ring, struct wx_tx_buffer *first, u8 ptype); + void (*configure_fdir)(struct wx *wx); void (*do_reset)(struct net_device *netdev); }; diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c index 46a5a3e95202..e868f7ef4920 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_ethtool.c @@ -37,9 +37,9 @@ static int ngbe_set_wol(struct net_device *netdev, wx->wol = 0; if (wol->wolopts & WAKE_MAGIC) wx->wol = WX_PSR_WKUP_CTL_MAG; - netdev->wol_enabled = !!(wx->wol); + netdev->ethtool->wol_enabled = !!(wx->wol); wr32(wx, WX_PSR_WKUP_CTL, wx->wol); - device_set_wakeup_enable(&pdev->dev, netdev->wol_enabled); + device_set_wakeup_enable(&pdev->dev, netdev->ethtool->wol_enabled); return 0; } diff --git a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c index af30ca0312b8..53aeae2f884b 100644 --- a/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c +++ b/drivers/net/ethernet/wangxun/ngbe/ngbe_main.c @@ -652,7 +652,7 @@ static int ngbe_probe(struct pci_dev *pdev, if (wx->wol_hw_supported) wx->wol = NGBE_PSR_WKUP_CTL_MAG; - netdev->wol_enabled = !!(wx->wol); + netdev->ethtool->wol_enabled = !!(wx->wol); wr32(wx, NGBE_PSR_WKUP_CTL, wx->wol); device_set_wakeup_enable(&pdev->dev, wx->wol); diff --git a/drivers/net/ethernet/wangxun/txgbe/Makefile b/drivers/net/ethernet/wangxun/txgbe/Makefile index 42718875277c..f74576fe7062 100644 --- a/drivers/net/ethernet/wangxun/txgbe/Makefile +++ b/drivers/net/ethernet/wangxun/txgbe/Makefile @@ -10,4 +10,5 @@ txgbe-objs := txgbe_main.o \ txgbe_hw.o \ txgbe_phy.o \ txgbe_irq.o \ + txgbe_fdir.o \ txgbe_ethtool.o diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c index 31fde3fa7c6b..d98314b26c19 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_ethtool.c @@ -9,6 +9,7 @@ #include "../libwx/wx_type.h" #include "../libwx/wx_lib.h" #include "txgbe_type.h" +#include "txgbe_fdir.h" #include "txgbe_ethtool.h" static int txgbe_set_ringparam(struct net_device *netdev, @@ -79,6 +80,430 @@ static int txgbe_set_channels(struct net_device *dev, return txgbe_setup_tc(dev, netdev_get_num_tc(dev)); } +static int txgbe_get_ethtool_fdir_entry(struct txgbe *txgbe, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + union txgbe_atr_input *mask = &txgbe->fdir_mask; + struct txgbe_fdir_filter *rule = NULL; + struct hlist_node *node; + + /* report total rule count */ + cmd->data = (1024 << TXGBE_FDIR_PBALLOC_64K) - 2; + + hlist_for_each_entry_safe(rule, node, &txgbe->fdir_filter_list, + fdir_node) { + if (fsp->location <= rule->sw_idx) + break; + } + + if (!rule || fsp->location != rule->sw_idx) + return -EINVAL; + + /* set flow type field */ + switch (rule->filter.formatted.flow_type) { + case TXGBE_ATR_FLOW_TYPE_TCPV4: + fsp->flow_type = TCP_V4_FLOW; + break; + case TXGBE_ATR_FLOW_TYPE_UDPV4: + fsp->flow_type = UDP_V4_FLOW; + break; + case TXGBE_ATR_FLOW_TYPE_SCTPV4: + fsp->flow_type = SCTP_V4_FLOW; + break; + case TXGBE_ATR_FLOW_TYPE_IPV4: + fsp->flow_type = IP_USER_FLOW; + fsp->h_u.usr_ip4_spec.ip_ver = ETH_RX_NFC_IP4; + fsp->h_u.usr_ip4_spec.proto = 0; + fsp->m_u.usr_ip4_spec.proto = 0; + break; + default: + return -EINVAL; + } + + fsp->h_u.tcp_ip4_spec.psrc = rule->filter.formatted.src_port; + fsp->m_u.tcp_ip4_spec.psrc = mask->formatted.src_port; + fsp->h_u.tcp_ip4_spec.pdst = rule->filter.formatted.dst_port; + fsp->m_u.tcp_ip4_spec.pdst = mask->formatted.dst_port; + fsp->h_u.tcp_ip4_spec.ip4src = rule->filter.formatted.src_ip[0]; + fsp->m_u.tcp_ip4_spec.ip4src = mask->formatted.src_ip[0]; + fsp->h_u.tcp_ip4_spec.ip4dst = rule->filter.formatted.dst_ip[0]; + fsp->m_u.tcp_ip4_spec.ip4dst = mask->formatted.dst_ip[0]; + fsp->h_ext.vlan_etype = rule->filter.formatted.flex_bytes; + fsp->m_ext.vlan_etype = mask->formatted.flex_bytes; + fsp->h_ext.data[1] = htonl(rule->filter.formatted.vm_pool); + fsp->m_ext.data[1] = htonl(mask->formatted.vm_pool); + fsp->flow_type |= FLOW_EXT; + + /* record action */ + if (rule->action == TXGBE_RDB_FDIR_DROP_QUEUE) + fsp->ring_cookie = RX_CLS_FLOW_DISC; + else + fsp->ring_cookie = rule->action; + + return 0; +} + +static int txgbe_get_ethtool_fdir_all(struct txgbe *txgbe, + struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct txgbe_fdir_filter *rule; + struct hlist_node *node; + int cnt = 0; + + /* report total rule count */ + cmd->data = (1024 << TXGBE_FDIR_PBALLOC_64K) - 2; + + hlist_for_each_entry_safe(rule, node, &txgbe->fdir_filter_list, + fdir_node) { + if (cnt == cmd->rule_cnt) + return -EMSGSIZE; + rule_locs[cnt] = rule->sw_idx; + cnt++; + } + + cmd->rule_cnt = cnt; + + return 0; +} + +static int txgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct wx *wx = netdev_priv(dev); + struct txgbe *txgbe = wx->priv; + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_GRXRINGS: + cmd->data = wx->num_rx_queues; + ret = 0; + break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = txgbe->fdir_filter_count; + ret = 0; + break; + case ETHTOOL_GRXCLSRULE: + ret = txgbe_get_ethtool_fdir_entry(txgbe, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + ret = txgbe_get_ethtool_fdir_all(txgbe, cmd, (u32 *)rule_locs); + break; + default: + break; + } + + return ret; +} + +static int txgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp, + u8 *flow_type) +{ + switch (fsp->flow_type & ~FLOW_EXT) { + case TCP_V4_FLOW: + *flow_type = TXGBE_ATR_FLOW_TYPE_TCPV4; + break; + case UDP_V4_FLOW: + *flow_type = TXGBE_ATR_FLOW_TYPE_UDPV4; + break; + case SCTP_V4_FLOW: + *flow_type = TXGBE_ATR_FLOW_TYPE_SCTPV4; + break; + case IP_USER_FLOW: + switch (fsp->h_u.usr_ip4_spec.proto) { + case IPPROTO_TCP: + *flow_type = TXGBE_ATR_FLOW_TYPE_TCPV4; + break; + case IPPROTO_UDP: + *flow_type = TXGBE_ATR_FLOW_TYPE_UDPV4; + break; + case IPPROTO_SCTP: + *flow_type = TXGBE_ATR_FLOW_TYPE_SCTPV4; + break; + case 0: + if (!fsp->m_u.usr_ip4_spec.proto) { + *flow_type = TXGBE_ATR_FLOW_TYPE_IPV4; + break; + } + fallthrough; + default: + return -EINVAL; + } + break; + default: + return -EINVAL; + } + + return 0; +} + +static bool txgbe_match_ethtool_fdir_entry(struct txgbe *txgbe, + struct txgbe_fdir_filter *input) +{ + struct txgbe_fdir_filter *rule = NULL; + struct hlist_node *node2; + + hlist_for_each_entry_safe(rule, node2, &txgbe->fdir_filter_list, + fdir_node) { + if (rule->filter.formatted.bkt_hash == + input->filter.formatted.bkt_hash && + rule->action == input->action) { + wx_dbg(txgbe->wx, "FDIR entry already exist\n"); + return true; + } + } + return false; +} + +static int txgbe_update_ethtool_fdir_entry(struct txgbe *txgbe, + struct txgbe_fdir_filter *input, + u16 sw_idx) +{ + struct hlist_node *node = NULL, *parent = NULL; + struct txgbe_fdir_filter *rule; + struct wx *wx = txgbe->wx; + bool deleted = false; + int err; + + hlist_for_each_entry_safe(rule, node, &txgbe->fdir_filter_list, + fdir_node) { + /* hash found, or no matching entry */ + if (rule->sw_idx >= sw_idx) + break; + parent = node; + } + + /* if there is an old rule occupying our place remove it */ + if (rule && rule->sw_idx == sw_idx) { + /* hardware filters are only configured when interface is up, + * and we should not issue filter commands while the interface + * is down + */ + if (netif_running(wx->netdev) && + (!input || rule->filter.formatted.bkt_hash != + input->filter.formatted.bkt_hash)) { + err = txgbe_fdir_erase_perfect_filter(wx, + &rule->filter, + sw_idx); + if (err) + return -EINVAL; + } + + hlist_del(&rule->fdir_node); + kfree(rule); + txgbe->fdir_filter_count--; + deleted = true; + } + + /* If we weren't given an input, then this was a request to delete a + * filter. We should return -EINVAL if the filter wasn't found, but + * return 0 if the rule was successfully deleted. + */ + if (!input) + return deleted ? 0 : -EINVAL; + + /* initialize node and set software index */ + INIT_HLIST_NODE(&input->fdir_node); + + /* add filter to the list */ + if (parent) + hlist_add_behind(&input->fdir_node, parent); + else + hlist_add_head(&input->fdir_node, + &txgbe->fdir_filter_list); + + /* update counts */ + txgbe->fdir_filter_count++; + + return 0; +} + +static int txgbe_add_ethtool_fdir_entry(struct txgbe *txgbe, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct txgbe_fdir_filter *input; + union txgbe_atr_input mask; + struct wx *wx = txgbe->wx; + int err = -EINVAL; + u16 ptype = 0; + u8 queue; + + if (!(test_bit(WX_FLAG_FDIR_PERFECT, wx->flags))) + return -EOPNOTSUPP; + + /* ring_cookie is a masked into a set of queues and txgbe pools or + * we use drop index + */ + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) { + queue = TXGBE_RDB_FDIR_DROP_QUEUE; + } else { + u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); + + if (ring >= wx->num_rx_queues) + return -EINVAL; + + /* Map the ring onto the absolute queue index */ + queue = wx->rx_ring[ring]->reg_idx; + } + + /* Don't allow indexes to exist outside of available space */ + if (fsp->location >= ((1024 << TXGBE_FDIR_PBALLOC_64K) - 2)) { + wx_err(wx, "Location out of range\n"); + return -EINVAL; + } + + input = kzalloc(sizeof(*input), GFP_ATOMIC); + if (!input) + return -ENOMEM; + + memset(&mask, 0, sizeof(union txgbe_atr_input)); + + /* set SW index */ + input->sw_idx = fsp->location; + + /* record flow type */ + if (txgbe_flowspec_to_flow_type(fsp, + &input->filter.formatted.flow_type)) { + wx_err(wx, "Unrecognized flow type\n"); + goto err_out; + } + + mask.formatted.flow_type = TXGBE_ATR_L4TYPE_IPV6_MASK | + TXGBE_ATR_L4TYPE_MASK; + + if (input->filter.formatted.flow_type == TXGBE_ATR_FLOW_TYPE_IPV4) + mask.formatted.flow_type &= TXGBE_ATR_L4TYPE_IPV6_MASK; + + /* Copy input into formatted structures */ + input->filter.formatted.src_ip[0] = fsp->h_u.tcp_ip4_spec.ip4src; + mask.formatted.src_ip[0] = fsp->m_u.tcp_ip4_spec.ip4src; + input->filter.formatted.dst_ip[0] = fsp->h_u.tcp_ip4_spec.ip4dst; + mask.formatted.dst_ip[0] = fsp->m_u.tcp_ip4_spec.ip4dst; + input->filter.formatted.src_port = fsp->h_u.tcp_ip4_spec.psrc; + mask.formatted.src_port = fsp->m_u.tcp_ip4_spec.psrc; + input->filter.formatted.dst_port = fsp->h_u.tcp_ip4_spec.pdst; + mask.formatted.dst_port = fsp->m_u.tcp_ip4_spec.pdst; + + if (fsp->flow_type & FLOW_EXT) { + input->filter.formatted.vm_pool = + (unsigned char)ntohl(fsp->h_ext.data[1]); + mask.formatted.vm_pool = + (unsigned char)ntohl(fsp->m_ext.data[1]); + input->filter.formatted.flex_bytes = + fsp->h_ext.vlan_etype; + mask.formatted.flex_bytes = fsp->m_ext.vlan_etype; + } + + switch (input->filter.formatted.flow_type) { + case TXGBE_ATR_FLOW_TYPE_TCPV4: + ptype = WX_PTYPE_L2_IPV4_TCP; + break; + case TXGBE_ATR_FLOW_TYPE_UDPV4: + ptype = WX_PTYPE_L2_IPV4_UDP; + break; + case TXGBE_ATR_FLOW_TYPE_SCTPV4: + ptype = WX_PTYPE_L2_IPV4_SCTP; + break; + case TXGBE_ATR_FLOW_TYPE_IPV4: + ptype = WX_PTYPE_L2_IPV4; + break; + default: + break; + } + + input->filter.formatted.vlan_id = htons(ptype); + if (mask.formatted.flow_type & TXGBE_ATR_L4TYPE_MASK) + mask.formatted.vlan_id = htons(0xFFFF); + else + mask.formatted.vlan_id = htons(0xFFF8); + + /* determine if we need to drop or route the packet */ + if (fsp->ring_cookie == RX_CLS_FLOW_DISC) + input->action = TXGBE_RDB_FDIR_DROP_QUEUE; + else + input->action = fsp->ring_cookie; + + spin_lock(&txgbe->fdir_perfect_lock); + + if (hlist_empty(&txgbe->fdir_filter_list)) { + /* save mask and program input mask into HW */ + memcpy(&txgbe->fdir_mask, &mask, sizeof(mask)); + err = txgbe_fdir_set_input_mask(wx, &mask); + if (err) + goto err_unlock; + } else if (memcmp(&txgbe->fdir_mask, &mask, sizeof(mask))) { + wx_err(wx, "Hardware only supports one mask per port. To change the mask you must first delete all the rules.\n"); + goto err_unlock; + } + + /* apply mask and compute/store hash */ + txgbe_atr_compute_perfect_hash(&input->filter, &mask); + + /* check if new entry does not exist on filter list */ + if (txgbe_match_ethtool_fdir_entry(txgbe, input)) + goto err_unlock; + + /* only program filters to hardware if the net device is running, as + * we store the filters in the Rx buffer which is not allocated when + * the device is down + */ + if (netif_running(wx->netdev)) { + err = txgbe_fdir_write_perfect_filter(wx, &input->filter, + input->sw_idx, queue); + if (err) + goto err_unlock; + } + + txgbe_update_ethtool_fdir_entry(txgbe, input, input->sw_idx); + + spin_unlock(&txgbe->fdir_perfect_lock); + + return 0; +err_unlock: + spin_unlock(&txgbe->fdir_perfect_lock); +err_out: + kfree(input); + return err; +} + +static int txgbe_del_ethtool_fdir_entry(struct txgbe *txgbe, + struct ethtool_rxnfc *cmd) +{ + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + int err = 0; + + spin_lock(&txgbe->fdir_perfect_lock); + err = txgbe_update_ethtool_fdir_entry(txgbe, NULL, fsp->location); + spin_unlock(&txgbe->fdir_perfect_lock); + + return err; +} + +static int txgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) +{ + struct wx *wx = netdev_priv(dev); + struct txgbe *txgbe = wx->priv; + int ret = -EOPNOTSUPP; + + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + ret = txgbe_add_ethtool_fdir_entry(txgbe, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + ret = txgbe_del_ethtool_fdir_entry(txgbe, cmd); + break; + default: + break; + } + + return ret; +} + static const struct ethtool_ops txgbe_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ, @@ -100,6 +525,8 @@ static const struct ethtool_ops txgbe_ethtool_ops = { .set_coalesce = wx_set_coalesce, .get_channels = wx_get_channels, .set_channels = txgbe_set_channels, + .get_rxnfc = txgbe_get_rxnfc, + .set_rxnfc = txgbe_set_rxnfc, .get_msglevel = wx_get_msglevel, .set_msglevel = wx_set_msglevel, }; diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c new file mode 100644 index 000000000000..ef50efbaec0f --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.c @@ -0,0 +1,643 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */ + +#include +#include +#include + +#include "../libwx/wx_type.h" +#include "../libwx/wx_lib.h" +#include "../libwx/wx_hw.h" +#include "txgbe_type.h" +#include "txgbe_fdir.h" + +/* These defines allow us to quickly generate all of the necessary instructions + * in the function below by simply calling out TXGBE_COMPUTE_SIG_HASH_ITERATION + * for values 0 through 15 + */ +#define TXGBE_ATR_COMMON_HASH_KEY \ + (TXGBE_ATR_BUCKET_HASH_KEY & TXGBE_ATR_SIGNATURE_HASH_KEY) +#define TXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ +do { \ + u32 n = (_n); \ + if (TXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \ + common_hash ^= lo_hash_dword >> n; \ + else if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ + bucket_hash ^= lo_hash_dword >> n; \ + else if (TXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \ + sig_hash ^= lo_hash_dword << (16 - n); \ + if (TXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \ + common_hash ^= hi_hash_dword >> n; \ + else if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ + bucket_hash ^= hi_hash_dword >> n; \ + else if (TXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ + sig_hash ^= hi_hash_dword << (16 - n); \ +} while (0) + +/** + * txgbe_atr_compute_sig_hash - Compute the signature hash + * @input: input bitstream to compute the hash on + * @common: compressed common input dword + * @hash: pointer to the computed hash + * + * This function is almost identical to the function above but contains + * several optimizations such as unwinding all of the loops, letting the + * compiler work out all of the conditional ifs since the keys are static + * defines, and computing two keys at once since the hashed dword stream + * will be the same for both keys. + **/ +static void txgbe_atr_compute_sig_hash(union txgbe_atr_hash_dword input, + union txgbe_atr_hash_dword common, + u32 *hash) +{ + u32 sig_hash = 0, bucket_hash = 0, common_hash = 0; + u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; + u32 i; + + /* record the flow_vm_vlan bits as they are a key part to the hash */ + flow_vm_vlan = ntohl(input.dword); + + /* generate common hash dword */ + hi_hash_dword = ntohl(common.dword); + + /* low dword is word swapped version of common */ + lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); + + /* apply flow ID/VM pool/VLAN ID bits to hash words */ + hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); + + /* Process bits 0 and 16 */ + TXGBE_COMPUTE_SIG_HASH_ITERATION(0); + + /* apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to + * delay this because bit 0 of the stream should not be processed + * so we do not add the VLAN until after bit 0 was processed + */ + lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); + + /* Process remaining 30 bit of the key */ + for (i = 1; i <= 15; i++) + TXGBE_COMPUTE_SIG_HASH_ITERATION(i); + + /* combine common_hash result with signature and bucket hashes */ + bucket_hash ^= common_hash; + bucket_hash &= TXGBE_ATR_HASH_MASK; + + sig_hash ^= common_hash << 16; + sig_hash &= TXGBE_ATR_HASH_MASK << 16; + + /* return completed signature hash */ + *hash = sig_hash ^ bucket_hash; +} + +#define TXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \ +do { \ + u32 n = (_n); \ + if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ + bucket_hash ^= lo_hash_dword >> n; \ + if (TXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ + bucket_hash ^= hi_hash_dword >> n; \ +} while (0) + +/** + * txgbe_atr_compute_perfect_hash - Compute the perfect filter hash + * @input: input bitstream to compute the hash on + * @input_mask: mask for the input bitstream + * + * This function serves two main purposes. First it applies the input_mask + * to the atr_input resulting in a cleaned up atr_input data stream. + * Secondly it computes the hash and stores it in the bkt_hash field at + * the end of the input byte stream. This way it will be available for + * future use without needing to recompute the hash. + **/ +void txgbe_atr_compute_perfect_hash(union txgbe_atr_input *input, + union txgbe_atr_input *input_mask) +{ + u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; + u32 bucket_hash = 0; + __be32 hi_dword = 0; + u32 i = 0; + + /* Apply masks to input data */ + for (i = 0; i < 11; i++) + input->dword_stream[i] &= input_mask->dword_stream[i]; + + /* record the flow_vm_vlan bits as they are a key part to the hash */ + flow_vm_vlan = ntohl(input->dword_stream[0]); + + /* generate common hash dword */ + for (i = 1; i <= 10; i++) + hi_dword ^= input->dword_stream[i]; + hi_hash_dword = ntohl(hi_dword); + + /* low dword is word swapped version of common */ + lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); + + /* apply flow ID/VM pool/VLAN ID bits to hash words */ + hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); + + /* Process bits 0 and 16 */ + TXGBE_COMPUTE_BKT_HASH_ITERATION(0); + + /* apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to + * delay this because bit 0 of the stream should not be processed + * so we do not add the VLAN until after bit 0 was processed + */ + lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); + + /* Process remaining 30 bit of the key */ + for (i = 1; i <= 15; i++) + TXGBE_COMPUTE_BKT_HASH_ITERATION(i); + + /* Limit hash to 13 bits since max bucket count is 8K. + * Store result at the end of the input stream. + */ + input->formatted.bkt_hash = (__force __be16)(bucket_hash & 0x1FFF); +} + +static int txgbe_fdir_check_cmd_complete(struct wx *wx) +{ + u32 val; + + return read_poll_timeout_atomic(rd32, val, + !(val & TXGBE_RDB_FDIR_CMD_CMD_MASK), + 10, 100, false, + wx, TXGBE_RDB_FDIR_CMD); +} + +/** + * txgbe_fdir_add_signature_filter - Adds a signature hash filter + * @wx: pointer to hardware structure + * @input: unique input dword + * @common: compressed common input dword + * @queue: queue index to direct traffic to + * + * @return: 0 on success and negative on failure + **/ +static int txgbe_fdir_add_signature_filter(struct wx *wx, + union txgbe_atr_hash_dword input, + union txgbe_atr_hash_dword common, + u8 queue) +{ + u32 fdirhashcmd, fdircmd; + u8 flow_type; + int err; + + /* Get the flow_type in order to program FDIRCMD properly + * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 + * fifth is FDIRCMD.TUNNEL_FILTER + */ + flow_type = input.formatted.flow_type; + switch (flow_type) { + case TXGBE_ATR_FLOW_TYPE_TCPV4: + case TXGBE_ATR_FLOW_TYPE_UDPV4: + case TXGBE_ATR_FLOW_TYPE_SCTPV4: + case TXGBE_ATR_FLOW_TYPE_TCPV6: + case TXGBE_ATR_FLOW_TYPE_UDPV6: + case TXGBE_ATR_FLOW_TYPE_SCTPV6: + break; + default: + wx_err(wx, "Error on flow type input\n"); + return -EINVAL; + } + + /* configure FDIRCMD register */ + fdircmd = TXGBE_RDB_FDIR_CMD_CMD_ADD_FLOW | + TXGBE_RDB_FDIR_CMD_FILTER_UPDATE | + TXGBE_RDB_FDIR_CMD_LAST | TXGBE_RDB_FDIR_CMD_QUEUE_EN; + fdircmd |= TXGBE_RDB_FDIR_CMD_FLOW_TYPE(flow_type); + fdircmd |= TXGBE_RDB_FDIR_CMD_RX_QUEUE(queue); + + txgbe_atr_compute_sig_hash(input, common, &fdirhashcmd); + fdirhashcmd |= TXGBE_RDB_FDIR_HASH_BUCKET_VALID; + wr32(wx, TXGBE_RDB_FDIR_HASH, fdirhashcmd); + wr32(wx, TXGBE_RDB_FDIR_CMD, fdircmd); + + wx_dbg(wx, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); + + err = txgbe_fdir_check_cmd_complete(wx); + if (err) + wx_err(wx, "Flow Director command did not complete!\n"); + + return err; +} + +void txgbe_atr(struct wx_ring *ring, struct wx_tx_buffer *first, u8 ptype) +{ + union txgbe_atr_hash_dword common = { .dword = 0 }; + union txgbe_atr_hash_dword input = { .dword = 0 }; + struct wx_q_vector *q_vector = ring->q_vector; + struct wx_dec_ptype dptype; + union network_header { + struct ipv6hdr *ipv6; + struct iphdr *ipv4; + void *raw; + } hdr; + struct tcphdr *th; + + /* if ring doesn't have a interrupt vector, cannot perform ATR */ + if (!q_vector) + return; + + ring->atr_count++; + dptype = wx_decode_ptype(ptype); + if (dptype.etype) { + if (WX_PTYPE_TYPL4(ptype) != WX_PTYPE_TYP_TCP) + return; + hdr.raw = (void *)skb_inner_network_header(first->skb); + th = inner_tcp_hdr(first->skb); + } else { + if (WX_PTYPE_PKT(ptype) != WX_PTYPE_PKT_IP || + WX_PTYPE_TYPL4(ptype) != WX_PTYPE_TYP_TCP) + return; + hdr.raw = (void *)skb_network_header(first->skb); + th = tcp_hdr(first->skb); + } + + /* skip this packet since it is invalid or the socket is closing */ + if (!th || th->fin) + return; + + /* sample on all syn packets or once every atr sample count */ + if (!th->syn && ring->atr_count < ring->atr_sample_rate) + return; + + /* reset sample count */ + ring->atr_count = 0; + + /* src and dst are inverted, think how the receiver sees them + * + * The input is broken into two sections, a non-compressed section + * containing vm_pool, vlan_id, and flow_type. The rest of the data + * is XORed together and stored in the compressed dword. + */ + input.formatted.vlan_id = htons((u16)ptype); + + /* since src port and flex bytes occupy the same word XOR them together + * and write the value to source port portion of compressed dword + */ + if (first->tx_flags & WX_TX_FLAGS_SW_VLAN) + common.port.src ^= th->dest ^ first->skb->protocol; + else if (first->tx_flags & WX_TX_FLAGS_HW_VLAN) + common.port.src ^= th->dest ^ first->skb->vlan_proto; + else + common.port.src ^= th->dest ^ first->protocol; + common.port.dst ^= th->source; + + if (WX_PTYPE_PKT_IPV6 & WX_PTYPE_PKT(ptype)) { + input.formatted.flow_type = TXGBE_ATR_FLOW_TYPE_TCPV6; + common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^ + hdr.ipv6->saddr.s6_addr32[1] ^ + hdr.ipv6->saddr.s6_addr32[2] ^ + hdr.ipv6->saddr.s6_addr32[3] ^ + hdr.ipv6->daddr.s6_addr32[0] ^ + hdr.ipv6->daddr.s6_addr32[1] ^ + hdr.ipv6->daddr.s6_addr32[2] ^ + hdr.ipv6->daddr.s6_addr32[3]; + } else { + input.formatted.flow_type = TXGBE_ATR_FLOW_TYPE_TCPV4; + common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; + } + + /* This assumes the Rx queue and Tx queue are bound to the same CPU */ + txgbe_fdir_add_signature_filter(q_vector->wx, input, common, + ring->queue_index); +} + +int txgbe_fdir_set_input_mask(struct wx *wx, union txgbe_atr_input *input_mask) +{ + u32 fdirm = 0, fdirtcpm = 0, flex = 0; + + /* Program the relevant mask registers. If src/dst_port or src/dst_addr + * are zero, then assume a full mask for that field. Also assume that + * a VLAN of 0 is unspecified, so mask that out as well. L4type + * cannot be masked out in this implementation. + * + * This also assumes IPv4 only. IPv6 masking isn't supported at this + * point in time. + */ + + /* verify bucket hash is cleared on hash generation */ + if (input_mask->formatted.bkt_hash) + wx_dbg(wx, "bucket hash should always be 0 in mask\n"); + + /* Program FDIRM and verify partial masks */ + switch (input_mask->formatted.vm_pool & 0x7F) { + case 0x0: + fdirm |= TXGBE_RDB_FDIR_OTHER_MSK_POOL; + break; + case 0x7F: + break; + default: + wx_err(wx, "Error on vm pool mask\n"); + return -EINVAL; + } + + switch (input_mask->formatted.flow_type & TXGBE_ATR_L4TYPE_MASK) { + case 0x0: + fdirm |= TXGBE_RDB_FDIR_OTHER_MSK_L4P; + if (input_mask->formatted.dst_port || + input_mask->formatted.src_port) { + wx_err(wx, "Error on src/dst port mask\n"); + return -EINVAL; + } + break; + case TXGBE_ATR_L4TYPE_MASK: + break; + default: + wx_err(wx, "Error on flow type mask\n"); + return -EINVAL; + } + + /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ + wr32(wx, TXGBE_RDB_FDIR_OTHER_MSK, fdirm); + + flex = rd32(wx, TXGBE_RDB_FDIR_FLEX_CFG(0)); + flex &= ~TXGBE_RDB_FDIR_FLEX_CFG_FIELD0; + flex |= (TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC | + TXGBE_RDB_FDIR_FLEX_CFG_OFST(0x6)); + + switch ((__force u16)input_mask->formatted.flex_bytes & 0xFFFF) { + case 0x0000: + /* Mask Flex Bytes */ + flex |= TXGBE_RDB_FDIR_FLEX_CFG_MSK; + break; + case 0xFFFF: + break; + default: + wx_err(wx, "Error on flexible byte mask\n"); + return -EINVAL; + } + wr32(wx, TXGBE_RDB_FDIR_FLEX_CFG(0), flex); + + /* store the TCP/UDP port masks, bit reversed from port layout */ + fdirtcpm = ntohs(input_mask->formatted.dst_port); + fdirtcpm <<= TXGBE_RDB_FDIR_PORT_DESTINATION_SHIFT; + fdirtcpm |= ntohs(input_mask->formatted.src_port); + + /* write both the same so that UDP and TCP use the same mask */ + wr32(wx, TXGBE_RDB_FDIR_TCP_MSK, ~fdirtcpm); + wr32(wx, TXGBE_RDB_FDIR_UDP_MSK, ~fdirtcpm); + wr32(wx, TXGBE_RDB_FDIR_SCTP_MSK, ~fdirtcpm); + + /* store source and destination IP masks (little-enian) */ + wr32(wx, TXGBE_RDB_FDIR_SA4_MSK, + ntohl(~input_mask->formatted.src_ip[0])); + wr32(wx, TXGBE_RDB_FDIR_DA4_MSK, + ntohl(~input_mask->formatted.dst_ip[0])); + + return 0; +} + +int txgbe_fdir_write_perfect_filter(struct wx *wx, + union txgbe_atr_input *input, + u16 soft_id, u8 queue) +{ + u32 fdirport, fdirvlan, fdirhash, fdircmd; + int err = 0; + + /* currently IPv6 is not supported, must be programmed with 0 */ + wr32(wx, TXGBE_RDB_FDIR_IP6(2), ntohl(input->formatted.src_ip[0])); + wr32(wx, TXGBE_RDB_FDIR_IP6(1), ntohl(input->formatted.src_ip[1])); + wr32(wx, TXGBE_RDB_FDIR_IP6(0), ntohl(input->formatted.src_ip[2])); + + /* record the source address (little-endian) */ + wr32(wx, TXGBE_RDB_FDIR_SA, ntohl(input->formatted.src_ip[0])); + + /* record the first 32 bits of the destination address + * (little-endian) + */ + wr32(wx, TXGBE_RDB_FDIR_DA, ntohl(input->formatted.dst_ip[0])); + + /* record source and destination port (little-endian)*/ + fdirport = ntohs(input->formatted.dst_port); + fdirport <<= TXGBE_RDB_FDIR_PORT_DESTINATION_SHIFT; + fdirport |= ntohs(input->formatted.src_port); + wr32(wx, TXGBE_RDB_FDIR_PORT, fdirport); + + /* record packet type and flex_bytes (little-endian) */ + fdirvlan = ntohs(input->formatted.flex_bytes); + fdirvlan <<= TXGBE_RDB_FDIR_FLEX_FLEX_SHIFT; + fdirvlan |= ntohs(input->formatted.vlan_id); + wr32(wx, TXGBE_RDB_FDIR_FLEX, fdirvlan); + + /* configure FDIRHASH register */ + fdirhash = (__force u32)input->formatted.bkt_hash | + TXGBE_RDB_FDIR_HASH_BUCKET_VALID | + TXGBE_RDB_FDIR_HASH_SIG_SW_INDEX(soft_id); + wr32(wx, TXGBE_RDB_FDIR_HASH, fdirhash); + + /* flush all previous writes to make certain registers are + * programmed prior to issuing the command + */ + WX_WRITE_FLUSH(wx); + + /* configure FDIRCMD register */ + fdircmd = TXGBE_RDB_FDIR_CMD_CMD_ADD_FLOW | + TXGBE_RDB_FDIR_CMD_FILTER_UPDATE | + TXGBE_RDB_FDIR_CMD_LAST | TXGBE_RDB_FDIR_CMD_QUEUE_EN; + if (queue == TXGBE_RDB_FDIR_DROP_QUEUE) + fdircmd |= TXGBE_RDB_FDIR_CMD_DROP; + fdircmd |= TXGBE_RDB_FDIR_CMD_FLOW_TYPE(input->formatted.flow_type); + fdircmd |= TXGBE_RDB_FDIR_CMD_RX_QUEUE(queue); + fdircmd |= TXGBE_RDB_FDIR_CMD_VT_POOL(input->formatted.vm_pool); + + wr32(wx, TXGBE_RDB_FDIR_CMD, fdircmd); + err = txgbe_fdir_check_cmd_complete(wx); + if (err) + wx_err(wx, "Flow Director command did not complete!\n"); + + return err; +} + +int txgbe_fdir_erase_perfect_filter(struct wx *wx, + union txgbe_atr_input *input, + u16 soft_id) +{ + u32 fdirhash, fdircmd; + int err = 0; + + /* configure FDIRHASH register */ + fdirhash = (__force u32)input->formatted.bkt_hash; + fdirhash |= TXGBE_RDB_FDIR_HASH_SIG_SW_INDEX(soft_id); + wr32(wx, TXGBE_RDB_FDIR_HASH, fdirhash); + + /* flush hash to HW */ + WX_WRITE_FLUSH(wx); + + /* Query if filter is present */ + wr32(wx, TXGBE_RDB_FDIR_CMD, TXGBE_RDB_FDIR_CMD_CMD_QUERY_REM_FILT); + + err = txgbe_fdir_check_cmd_complete(wx); + if (err) { + wx_err(wx, "Flow Director command did not complete!\n"); + return err; + } + + fdircmd = rd32(wx, TXGBE_RDB_FDIR_CMD); + /* if filter exists in hardware then remove it */ + if (fdircmd & TXGBE_RDB_FDIR_CMD_FILTER_VALID) { + wr32(wx, TXGBE_RDB_FDIR_HASH, fdirhash); + WX_WRITE_FLUSH(wx); + wr32(wx, TXGBE_RDB_FDIR_CMD, + TXGBE_RDB_FDIR_CMD_CMD_REMOVE_FLOW); + } + + return 0; +} + +/** + * txgbe_fdir_enable - Initialize Flow Director control registers + * @wx: pointer to hardware structure + * @fdirctrl: value to write to flow director control register + **/ +static void txgbe_fdir_enable(struct wx *wx, u32 fdirctrl) +{ + u32 val; + int ret; + + /* Prime the keys for hashing */ + wr32(wx, TXGBE_RDB_FDIR_HKEY, TXGBE_ATR_BUCKET_HASH_KEY); + wr32(wx, TXGBE_RDB_FDIR_SKEY, TXGBE_ATR_SIGNATURE_HASH_KEY); + + wr32(wx, TXGBE_RDB_FDIR_CTL, fdirctrl); + WX_WRITE_FLUSH(wx); + ret = read_poll_timeout(rd32, val, val & TXGBE_RDB_FDIR_CTL_INIT_DONE, + 1000, 10000, false, wx, TXGBE_RDB_FDIR_CTL); + + if (ret < 0) + wx_dbg(wx, "Flow Director poll time exceeded!\n"); +} + +/** + * txgbe_init_fdir_signature -Initialize Flow Director sig filters + * @wx: pointer to hardware structure + **/ +static void txgbe_init_fdir_signature(struct wx *wx) +{ + u32 fdirctrl = TXGBE_FDIR_PBALLOC_64K; + u32 flex = 0; + + flex = rd32(wx, TXGBE_RDB_FDIR_FLEX_CFG(0)); + flex &= ~TXGBE_RDB_FDIR_FLEX_CFG_FIELD0; + + flex |= (TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC | + TXGBE_RDB_FDIR_FLEX_CFG_OFST(0x6)); + wr32(wx, TXGBE_RDB_FDIR_FLEX_CFG(0), flex); + + /* Continue setup of fdirctrl register bits: + * Move the flexible bytes to use the ethertype - shift 6 words + * Set the maximum length per hash bucket to 0xA filters + * Send interrupt when 64 filters are left + */ + fdirctrl |= TXGBE_RDB_FDIR_CTL_HASH_BITS(0xF) | + TXGBE_RDB_FDIR_CTL_MAX_LENGTH(0xA) | + TXGBE_RDB_FDIR_CTL_FULL_THRESH(4); + + /* write hashes and fdirctrl register, poll for completion */ + txgbe_fdir_enable(wx, fdirctrl); +} + +/** + * txgbe_init_fdir_perfect - Initialize Flow Director perfect filters + * @wx: pointer to hardware structure + **/ +static void txgbe_init_fdir_perfect(struct wx *wx) +{ + u32 fdirctrl = TXGBE_FDIR_PBALLOC_64K; + + /* Continue setup of fdirctrl register bits: + * Turn perfect match filtering on + * Report hash in RSS field of Rx wb descriptor + * Initialize the drop queue + * Move the flexible bytes to use the ethertype - shift 6 words + * Set the maximum length per hash bucket to 0xA filters + * Send interrupt when 64 (0x4 * 16) filters are left + */ + fdirctrl |= TXGBE_RDB_FDIR_CTL_PERFECT_MATCH | + TXGBE_RDB_FDIR_CTL_DROP_Q(TXGBE_RDB_FDIR_DROP_QUEUE) | + TXGBE_RDB_FDIR_CTL_HASH_BITS(0xF) | + TXGBE_RDB_FDIR_CTL_MAX_LENGTH(0xA) | + TXGBE_RDB_FDIR_CTL_FULL_THRESH(4); + + /* write hashes and fdirctrl register, poll for completion */ + txgbe_fdir_enable(wx, fdirctrl); +} + +static void txgbe_fdir_filter_restore(struct wx *wx) +{ + struct txgbe_fdir_filter *filter; + struct txgbe *txgbe = wx->priv; + struct hlist_node *node; + u8 queue = 0; + int ret = 0; + + spin_lock(&txgbe->fdir_perfect_lock); + + if (!hlist_empty(&txgbe->fdir_filter_list)) + ret = txgbe_fdir_set_input_mask(wx, &txgbe->fdir_mask); + + if (ret) + goto unlock; + + hlist_for_each_entry_safe(filter, node, + &txgbe->fdir_filter_list, fdir_node) { + if (filter->action == TXGBE_RDB_FDIR_DROP_QUEUE) { + queue = TXGBE_RDB_FDIR_DROP_QUEUE; + } else { + u32 ring = ethtool_get_flow_spec_ring(filter->action); + + if (ring >= wx->num_rx_queues) { + wx_err(wx, "FDIR restore failed, ring:%u\n", + ring); + continue; + } + + /* Map the ring onto the absolute queue index */ + queue = wx->rx_ring[ring]->reg_idx; + } + + ret = txgbe_fdir_write_perfect_filter(wx, + &filter->filter, + filter->sw_idx, + queue); + if (ret) + wx_err(wx, "FDIR restore failed, index:%u\n", + filter->sw_idx); + } + +unlock: + spin_unlock(&txgbe->fdir_perfect_lock); +} + +void txgbe_configure_fdir(struct wx *wx) +{ + wx_disable_sec_rx_path(wx); + + if (test_bit(WX_FLAG_FDIR_HASH, wx->flags)) { + txgbe_init_fdir_signature(wx); + } else if (test_bit(WX_FLAG_FDIR_PERFECT, wx->flags)) { + txgbe_init_fdir_perfect(wx); + txgbe_fdir_filter_restore(wx); + } + + wx_enable_sec_rx_path(wx); +} + +void txgbe_fdir_filter_exit(struct wx *wx) +{ + struct txgbe_fdir_filter *filter; + struct txgbe *txgbe = wx->priv; + struct hlist_node *node; + + spin_lock(&txgbe->fdir_perfect_lock); + + hlist_for_each_entry_safe(filter, node, + &txgbe->fdir_filter_list, fdir_node) { + hlist_del(&filter->fdir_node); + kfree(filter); + } + txgbe->fdir_filter_count = 0; + + spin_unlock(&txgbe->fdir_perfect_lock); +} diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.h new file mode 100644 index 000000000000..1f44ce60becb --- /dev/null +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_fdir.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2015 - 2024 Beijing WangXun Technology Co., Ltd. */ + +#ifndef _TXGBE_FDIR_H_ +#define _TXGBE_FDIR_H_ + +void txgbe_atr_compute_perfect_hash(union txgbe_atr_input *input, + union txgbe_atr_input *input_mask); +void txgbe_atr(struct wx_ring *ring, struct wx_tx_buffer *first, u8 ptype); +int txgbe_fdir_set_input_mask(struct wx *wx, union txgbe_atr_input *input_mask); +int txgbe_fdir_write_perfect_filter(struct wx *wx, + union txgbe_atr_input *input, + u16 soft_id, u8 queue); +int txgbe_fdir_erase_perfect_filter(struct wx *wx, + union txgbe_atr_input *input, + u16 soft_id); +void txgbe_configure_fdir(struct wx *wx); +void txgbe_fdir_filter_exit(struct wx *wx); + +#endif /* _TXGBE_FDIR_H_ */ diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c index ca74d9422065..93180225a6f1 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_main.c @@ -18,6 +18,7 @@ #include "txgbe_hw.h" #include "txgbe_phy.h" #include "txgbe_irq.h" +#include "txgbe_fdir.h" #include "txgbe_ethtool.h" char txgbe_driver_name[] = "txgbe"; @@ -257,6 +258,14 @@ static int txgbe_sw_init(struct wx *wx) num_online_cpus()); wx->rss_enabled = true; + wx->ring_feature[RING_F_FDIR].limit = min_t(int, TXGBE_MAX_FDIR_INDICES, + num_online_cpus()); + set_bit(WX_FLAG_FDIR_CAPABLE, wx->flags); + set_bit(WX_FLAG_FDIR_HASH, wx->flags); + wx->atr_sample_rate = TXGBE_DEFAULT_ATR_SAMPLE_RATE; + wx->atr = txgbe_atr; + wx->configure_fdir = txgbe_configure_fdir; + /* enable itr by default in dynamic mode */ wx->rx_itr_setting = 1; wx->tx_itr_setting = 1; @@ -274,6 +283,12 @@ static int txgbe_sw_init(struct wx *wx) return 0; } +static void txgbe_init_fdir(struct txgbe *txgbe) +{ + txgbe->fdir_filter_count = 0; + spin_lock_init(&txgbe->fdir_perfect_lock); +} + /** * txgbe_open - Called when a network interface is made active * @netdev: network interface device structure @@ -352,6 +367,7 @@ static int txgbe_close(struct net_device *netdev) txgbe_down(wx); wx_free_irq(wx); wx_free_resources(wx); + txgbe_fdir_filter_exit(wx); wx_control_hw(wx, false); return 0; @@ -660,6 +676,8 @@ static int txgbe_probe(struct pci_dev *pdev, txgbe->wx = wx; wx->priv = txgbe; + txgbe_init_fdir(txgbe); + err = txgbe_setup_misc_irq(txgbe); if (err) goto err_release_hw; diff --git a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h index f434a7865cb7..959102c4c379 100644 --- a/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h +++ b/drivers/net/ethernet/wangxun/txgbe/txgbe_type.h @@ -89,6 +89,55 @@ #define TXGBE_XPCS_IDA_ADDR 0x13000 #define TXGBE_XPCS_IDA_DATA 0x13004 +/********************************* Flow Director *****************************/ +#define TXGBE_RDB_FDIR_DROP_QUEUE 127 +#define TXGBE_RDB_FDIR_CTL 0x19500 +#define TXGBE_RDB_FDIR_CTL_INIT_DONE BIT(3) +#define TXGBE_RDB_FDIR_CTL_PERFECT_MATCH BIT(4) +#define TXGBE_RDB_FDIR_CTL_DROP_Q(v) FIELD_PREP(GENMASK(14, 8), v) +#define TXGBE_RDB_FDIR_CTL_HASH_BITS(v) FIELD_PREP(GENMASK(23, 20), v) +#define TXGBE_RDB_FDIR_CTL_MAX_LENGTH(v) FIELD_PREP(GENMASK(27, 24), v) +#define TXGBE_RDB_FDIR_CTL_FULL_THRESH(v) FIELD_PREP(GENMASK(31, 28), v) +#define TXGBE_RDB_FDIR_IP6(_i) (0x1950C + ((_i) * 4)) /* 0-2 */ +#define TXGBE_RDB_FDIR_SA 0x19518 +#define TXGBE_RDB_FDIR_DA 0x1951C +#define TXGBE_RDB_FDIR_PORT 0x19520 +#define TXGBE_RDB_FDIR_PORT_DESTINATION_SHIFT 16 +#define TXGBE_RDB_FDIR_FLEX 0x19524 +#define TXGBE_RDB_FDIR_FLEX_FLEX_SHIFT 16 +#define TXGBE_RDB_FDIR_HASH 0x19528 +#define TXGBE_RDB_FDIR_HASH_SIG_SW_INDEX(v) FIELD_PREP(GENMASK(31, 16), v) +#define TXGBE_RDB_FDIR_HASH_BUCKET_VALID BIT(15) +#define TXGBE_RDB_FDIR_CMD 0x1952C +#define TXGBE_RDB_FDIR_CMD_CMD_MASK GENMASK(1, 0) +#define TXGBE_RDB_FDIR_CMD_CMD(v) FIELD_PREP(GENMASK(1, 0), v) +#define TXGBE_RDB_FDIR_CMD_CMD_ADD_FLOW TXGBE_RDB_FDIR_CMD_CMD(1) +#define TXGBE_RDB_FDIR_CMD_CMD_REMOVE_FLOW TXGBE_RDB_FDIR_CMD_CMD(2) +#define TXGBE_RDB_FDIR_CMD_CMD_QUERY_REM_FILT TXGBE_RDB_FDIR_CMD_CMD(3) +#define TXGBE_RDB_FDIR_CMD_FILTER_VALID BIT(2) +#define TXGBE_RDB_FDIR_CMD_FILTER_UPDATE BIT(3) +#define TXGBE_RDB_FDIR_CMD_FLOW_TYPE(v) FIELD_PREP(GENMASK(6, 5), v) +#define TXGBE_RDB_FDIR_CMD_DROP BIT(9) +#define TXGBE_RDB_FDIR_CMD_LAST BIT(11) +#define TXGBE_RDB_FDIR_CMD_QUEUE_EN BIT(15) +#define TXGBE_RDB_FDIR_CMD_RX_QUEUE(v) FIELD_PREP(GENMASK(22, 16), v) +#define TXGBE_RDB_FDIR_CMD_VT_POOL(v) FIELD_PREP(GENMASK(29, 24), v) +#define TXGBE_RDB_FDIR_DA4_MSK 0x1953C +#define TXGBE_RDB_FDIR_SA4_MSK 0x19540 +#define TXGBE_RDB_FDIR_TCP_MSK 0x19544 +#define TXGBE_RDB_FDIR_UDP_MSK 0x19548 +#define TXGBE_RDB_FDIR_SCTP_MSK 0x19560 +#define TXGBE_RDB_FDIR_HKEY 0x19568 +#define TXGBE_RDB_FDIR_SKEY 0x1956C +#define TXGBE_RDB_FDIR_OTHER_MSK 0x19570 +#define TXGBE_RDB_FDIR_OTHER_MSK_POOL BIT(2) +#define TXGBE_RDB_FDIR_OTHER_MSK_L4P BIT(3) +#define TXGBE_RDB_FDIR_FLEX_CFG(_i) (0x19580 + ((_i) * 4)) +#define TXGBE_RDB_FDIR_FLEX_CFG_FIELD0 GENMASK(7, 0) +#define TXGBE_RDB_FDIR_FLEX_CFG_BASE_MAC FIELD_PREP(GENMASK(1, 0), 0) +#define TXGBE_RDB_FDIR_FLEX_CFG_MSK BIT(2) +#define TXGBE_RDB_FDIR_FLEX_CFG_OFST(v) FIELD_PREP(GENMASK(7, 3), v) + /* Checksum and EEPROM pointers */ #define TXGBE_EEPROM_LAST_WORD 0x800 #define TXGBE_EEPROM_CHECKSUM 0x2F @@ -112,6 +161,98 @@ #define TXGBE_SP_RX_PB_SIZE 512 #define TXGBE_SP_TDB_PB_SZ (160 * 1024) /* 160KB Packet Buffer */ +#define TXGBE_DEFAULT_ATR_SAMPLE_RATE 20 + +/* Software ATR hash keys */ +#define TXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2 +#define TXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614 + +/* Software ATR input stream values and masks */ +#define TXGBE_ATR_HASH_MASK 0x7fff +#define TXGBE_ATR_L4TYPE_MASK 0x3 +#define TXGBE_ATR_L4TYPE_UDP 0x1 +#define TXGBE_ATR_L4TYPE_TCP 0x2 +#define TXGBE_ATR_L4TYPE_SCTP 0x3 +#define TXGBE_ATR_L4TYPE_IPV6_MASK 0x4 +#define TXGBE_ATR_L4TYPE_TUNNEL_MASK 0x10 + +enum txgbe_atr_flow_type { + TXGBE_ATR_FLOW_TYPE_IPV4 = 0x0, + TXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1, + TXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2, + TXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3, + TXGBE_ATR_FLOW_TYPE_IPV6 = 0x4, + TXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5, + TXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6, + TXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7, + TXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4 = 0x10, + TXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4 = 0x11, + TXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4 = 0x12, + TXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4 = 0x13, + TXGBE_ATR_FLOW_TYPE_TUNNELED_IPV6 = 0x14, + TXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV6 = 0x15, + TXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV6 = 0x16, + TXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV6 = 0x17, +}; + +/* Flow Director ATR input struct. */ +union txgbe_atr_input { + /* Byte layout in order, all values with MSB first: + * + * vm_pool - 1 byte + * flow_type - 1 byte + * vlan_id - 2 bytes + * dst_ip - 16 bytes + * src_ip - 16 bytes + * src_port - 2 bytes + * dst_port - 2 bytes + * flex_bytes - 2 bytes + * bkt_hash - 2 bytes + */ + struct { + u8 vm_pool; + u8 flow_type; + __be16 vlan_id; + __be32 dst_ip[4]; + __be32 src_ip[4]; + __be16 src_port; + __be16 dst_port; + __be16 flex_bytes; + __be16 bkt_hash; + } formatted; + __be32 dword_stream[11]; +}; + +/* Flow Director compressed ATR hash input struct */ +union txgbe_atr_hash_dword { + struct { + u8 vm_pool; + u8 flow_type; + __be16 vlan_id; + } formatted; + __be32 ip; + struct { + __be16 src; + __be16 dst; + } port; + __be16 flex_bytes; + __be32 dword; +}; + +enum txgbe_fdir_pballoc_type { + TXGBE_FDIR_PBALLOC_NONE = 0, + TXGBE_FDIR_PBALLOC_64K = 1, + TXGBE_FDIR_PBALLOC_128K = 2, + TXGBE_FDIR_PBALLOC_256K = 3, +}; + +struct txgbe_fdir_filter { + struct hlist_node fdir_node; + union txgbe_atr_input filter; + u16 sw_idx; + u16 action; +}; + /* TX/RX descriptor defines */ #define TXGBE_DEFAULT_TXD 512 #define TXGBE_DEFAULT_TX_WORK 256 @@ -196,6 +337,12 @@ struct txgbe { struct gpio_chip *gpio; unsigned int gpio_irq; unsigned int link_irq; + + /* flow director */ + struct hlist_head fdir_filter_list; + union txgbe_atr_input fdir_mask; + int fdir_filter_count; + spinlock_t fdir_perfect_lock; /* spinlock for FDIR */ }; #endif /* _TXGBE_TYPE_H_ */ diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index c29809cd9201..e342f387c3dd 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -1945,9 +1945,9 @@ axienet_ethtools_set_coalesce(struct net_device *ndev, struct axienet_local *lp = netdev_priv(ndev); if (netif_running(ndev)) { - netdev_err(ndev, - "Please stop netif before applying configuration\n"); - return -EFAULT; + NL_SET_ERR_MSG(extack, + "Please stop netif before applying configuration"); + return -EBUSY; } if (ecoalesce->rx_max_coalesced_frames) @@ -2254,7 +2254,6 @@ static int axienet_probe(struct platform_device *pdev) platform_set_drvdata(pdev, ndev); SET_NETDEV_DEV(ndev, &pdev->dev); - ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ ndev->features = NETIF_F_SG; ndev->ethtool_ops = &axienet_ethtool_ops; diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index 8aff6a73ca0a..56df37f8d50a 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -1015,7 +1015,7 @@ static void ixp4xx_get_drvinfo(struct net_device *dev, } static int ixp4xx_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct port *port = netdev_priv(dev); diff --git a/drivers/net/fjes/fjes_trace.h b/drivers/net/fjes/fjes_trace.h index 166ef015262b..37c6071cb333 100644 --- a/drivers/net/fjes/fjes_trace.h +++ b/drivers/net/fjes/fjes_trace.h @@ -358,7 +358,7 @@ TRACE_EVENT(fjes_stop_req_irq_post, #undef TRACE_INCLUDE_PATH #undef TRACE_INCLUDE_FILE -#define TRACE_INCLUDE_PATH ../../../drivers/net/fjes +#define TRACE_INCLUDE_PATH ../../drivers/net/fjes #define TRACE_INCLUDE_FILE fjes_trace /* This part must be outside protection */ diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 67b7ef2d463f..24298a33e0e9 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -1084,7 +1084,7 @@ static int macvlan_ethtool_get_link_ksettings(struct net_device *dev, } static int macvlan_ethtool_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct net_device *real_dev = macvlan_dev_real_dev(dev); diff --git a/drivers/net/mctp/mctp-i2c.c b/drivers/net/mctp/mctp-i2c.c index b37a9e4bade4..4dc057c121f5 100644 --- a/drivers/net/mctp/mctp-i2c.c +++ b/drivers/net/mctp/mctp-i2c.c @@ -442,6 +442,42 @@ static void mctp_i2c_unlock_reset(struct mctp_i2c_dev *midev) i2c_unlock_bus(midev->adapter, I2C_LOCK_SEGMENT); } +static void mctp_i2c_invalidate_tx_flow(struct mctp_i2c_dev *midev, + struct sk_buff *skb) +{ + struct mctp_sk_key *key; + struct mctp_flow *flow; + unsigned long flags; + bool release; + + flow = skb_ext_find(skb, SKB_EXT_MCTP); + if (!flow) + return; + + key = flow->key; + if (!key) + return; + + spin_lock_irqsave(&key->lock, flags); + if (key->manual_alloc) { + /* we don't have control over lifetimes for manually-allocated + * keys, so cannot assume we can invalidate all future flows + * that would use this key. + */ + release = false; + } else { + release = key->dev_flow_state == MCTP_I2C_FLOW_STATE_ACTIVE; + key->dev_flow_state = MCTP_I2C_FLOW_STATE_INVALID; + } + spin_unlock_irqrestore(&key->lock, flags); + + /* if we have changed state from active, the flow held a reference on + * the lock; release that now. + */ + if (release) + mctp_i2c_unlock_nest(midev); +} + static void mctp_i2c_xmit(struct mctp_i2c_dev *midev, struct sk_buff *skb) { struct net_device_stats *stats = &midev->ndev->stats; @@ -500,6 +536,11 @@ static void mctp_i2c_xmit(struct mctp_i2c_dev *midev, struct sk_buff *skb) case MCTP_I2C_TX_FLOW_EXISTING: /* existing flow: we already have the lock; just tx */ rc = __i2c_transfer(midev->adapter, &msg, 1); + + /* on tx errors, the flow can no longer be considered valid */ + if (rc) + mctp_i2c_invalidate_tx_flow(midev, skb); + break; case MCTP_I2C_TX_FLOW_INVALID: @@ -1042,8 +1083,8 @@ static struct notifier_block mctp_i2c_notifier = { }; static const struct i2c_device_id mctp_i2c_id[] = { - { "mctp-i2c-interface", 0 }, - {}, + { "mctp-i2c-interface" }, + {} }; MODULE_DEVICE_TABLE(i2c, mctp_i2c_id); diff --git a/drivers/net/mdio/mdio-mscc-miim.c b/drivers/net/mdio/mdio-mscc-miim.c index c29377c85307..62c47e0dd142 100644 --- a/drivers/net/mdio/mdio-mscc-miim.c +++ b/drivers/net/mdio/mdio-mscc-miim.c @@ -19,6 +19,7 @@ #include #include #include +#include #define MSCC_MIIM_REG_STATUS 0x0 #define MSCC_MIIM_STATUS_STAT_PENDING BIT(2) @@ -271,10 +272,17 @@ static int mscc_miim_probe(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; struct regmap *mii_regmap, *phy_regmap; struct device *dev = &pdev->dev; + struct reset_control *reset; struct mscc_miim_dev *miim; struct mii_bus *bus; int ret; + reset = devm_reset_control_get_optional_shared(dev, "switch"); + if (IS_ERR(reset)) + return dev_err_probe(dev, PTR_ERR(reset), "Failed to get reset\n"); + + reset_control_reset(reset); + mii_regmap = ocelot_regmap_from_resource(pdev, 0, &mscc_miim_regmap_config); if (IS_ERR(mii_regmap)) diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c index d7070dd4fe73..9c09293b5258 100644 --- a/drivers/net/netconsole.c +++ b/drivers/net/netconsole.c @@ -344,7 +344,7 @@ static ssize_t enabled_store(struct config_item *item, goto out_unlock; err = -EINVAL; - if ((bool)enabled == nt->enabled) { + if (enabled == nt->enabled) { pr_info("network logging has already %s\n", nt->enabled ? "started" : "stopped"); goto out_unlock; @@ -369,6 +369,7 @@ static ssize_t enabled_store(struct config_item *item, if (err) goto out_unlock; + nt->enabled = true; pr_info("network logging started\n"); } else { /* false */ /* We need to disable the netconsole before cleaning it up @@ -381,8 +382,6 @@ static ssize_t enabled_store(struct config_item *item, netpoll_cleanup(&nt->np); } - nt->enabled = enabled; - mutex_unlock(&dynamic_netconsole_mutex); return strnlen(buf, count); out_unlock: @@ -974,6 +973,7 @@ restart: /* rtnl_lock already held * we might sleep in __netpoll_cleanup() */ + nt->enabled = false; spin_unlock_irqrestore(&target_list_lock, flags); __netpoll_cleanup(&nt->np); @@ -981,7 +981,6 @@ restart: spin_lock_irqsave(&target_list_lock, flags); netdev_put(nt->np.dev, &nt->np.dev_tracker); nt->np.dev = NULL; - nt->enabled = false; stopped = true; netconsole_target_put(nt); goto restart; @@ -1262,6 +1261,8 @@ static int __init init_netconsole(void) while ((target_config = strsep(&input, ";"))) { nt = alloc_param_target(target_config, count); if (IS_ERR(nt)) { + if (IS_ENABLED(CONFIG_NETCONSOLE_DYNAMIC)) + continue; err = PTR_ERR(nt); goto fail; } diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c index 3f9c9327f149..1436905bc106 100644 --- a/drivers/net/netdevsim/ethtool.c +++ b/drivers/net/netdevsim/ethtool.c @@ -148,7 +148,7 @@ nsim_get_fec_stats(struct net_device *dev, struct ethtool_fec_stats *fec_stats) } static int nsim_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct netdevsim *ns = netdev_priv(dev); diff --git a/drivers/net/pcs/Kconfig b/drivers/net/pcs/Kconfig index 87cf308fc6d8..f6aa437473de 100644 --- a/drivers/net/pcs/Kconfig +++ b/drivers/net/pcs/Kconfig @@ -6,11 +6,11 @@ menu "PCS device drivers" config PCS_XPCS - tristate + tristate "Synopsys DesignWare Ethernet XPCS" select PHYLINK help - This module provides helper functions for Synopsys DesignWare XPCS - controllers. + This module provides a driver and helper functions for Synopsys + DesignWare XPCS controllers. config PCS_LYNX tristate diff --git a/drivers/net/pcs/Makefile b/drivers/net/pcs/Makefile index fb1694192ae6..4f7920618b90 100644 --- a/drivers/net/pcs/Makefile +++ b/drivers/net/pcs/Makefile @@ -1,7 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 # Makefile for Linux PCS drivers -pcs_xpcs-$(CONFIG_PCS_XPCS) := pcs-xpcs.o pcs-xpcs-nxp.o pcs-xpcs-wx.o +pcs_xpcs-$(CONFIG_PCS_XPCS) := pcs-xpcs.o pcs-xpcs-plat.o \ + pcs-xpcs-nxp.o pcs-xpcs-wx.o obj-$(CONFIG_PCS_XPCS) += pcs_xpcs.o obj-$(CONFIG_PCS_LYNX) += pcs-lynx.o diff --git a/drivers/net/pcs/pcs-xpcs-plat.c b/drivers/net/pcs/pcs-xpcs-plat.c new file mode 100644 index 000000000000..629315f1e57c --- /dev/null +++ b/drivers/net/pcs/pcs-xpcs-plat.c @@ -0,0 +1,460 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Synopsys DesignWare XPCS platform device driver + * + * Copyright (C) 2024 Serge Semin + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "pcs-xpcs.h" + +/* Page select register for the indirect MMIO CSRs access */ +#define DW_VR_CSR_VIEWPORT 0xff + +struct dw_xpcs_plat { + struct platform_device *pdev; + struct mii_bus *bus; + bool reg_indir; + int reg_width; + void __iomem *reg_base; + struct clk *cclk; +}; + +static ptrdiff_t xpcs_mmio_addr_format(int dev, int reg) +{ + return FIELD_PREP(0x1f0000, dev) | FIELD_PREP(0xffff, reg); +} + +static u16 xpcs_mmio_addr_page(ptrdiff_t csr) +{ + return FIELD_GET(0x1fff00, csr); +} + +static ptrdiff_t xpcs_mmio_addr_offset(ptrdiff_t csr) +{ + return FIELD_GET(0xff, csr); +} + +static int xpcs_mmio_read_reg_indirect(struct dw_xpcs_plat *pxpcs, + int dev, int reg) +{ + ptrdiff_t csr, ofs; + u16 page; + int ret; + + csr = xpcs_mmio_addr_format(dev, reg); + page = xpcs_mmio_addr_page(csr); + ofs = xpcs_mmio_addr_offset(csr); + + ret = pm_runtime_resume_and_get(&pxpcs->pdev->dev); + if (ret) + return ret; + + switch (pxpcs->reg_width) { + case 4: + writel(page, pxpcs->reg_base + (DW_VR_CSR_VIEWPORT << 2)); + ret = readl(pxpcs->reg_base + (ofs << 2)); + break; + default: + writew(page, pxpcs->reg_base + (DW_VR_CSR_VIEWPORT << 1)); + ret = readw(pxpcs->reg_base + (ofs << 1)); + break; + } + + pm_runtime_put(&pxpcs->pdev->dev); + + return ret; +} + +static int xpcs_mmio_write_reg_indirect(struct dw_xpcs_plat *pxpcs, + int dev, int reg, u16 val) +{ + ptrdiff_t csr, ofs; + u16 page; + int ret; + + csr = xpcs_mmio_addr_format(dev, reg); + page = xpcs_mmio_addr_page(csr); + ofs = xpcs_mmio_addr_offset(csr); + + ret = pm_runtime_resume_and_get(&pxpcs->pdev->dev); + if (ret) + return ret; + + switch (pxpcs->reg_width) { + case 4: + writel(page, pxpcs->reg_base + (DW_VR_CSR_VIEWPORT << 2)); + writel(val, pxpcs->reg_base + (ofs << 2)); + break; + default: + writew(page, pxpcs->reg_base + (DW_VR_CSR_VIEWPORT << 1)); + writew(val, pxpcs->reg_base + (ofs << 1)); + break; + } + + pm_runtime_put(&pxpcs->pdev->dev); + + return 0; +} + +static int xpcs_mmio_read_reg_direct(struct dw_xpcs_plat *pxpcs, + int dev, int reg) +{ + ptrdiff_t csr; + int ret; + + csr = xpcs_mmio_addr_format(dev, reg); + + ret = pm_runtime_resume_and_get(&pxpcs->pdev->dev); + if (ret) + return ret; + + switch (pxpcs->reg_width) { + case 4: + ret = readl(pxpcs->reg_base + (csr << 2)); + break; + default: + ret = readw(pxpcs->reg_base + (csr << 1)); + break; + } + + pm_runtime_put(&pxpcs->pdev->dev); + + return ret; +} + +static int xpcs_mmio_write_reg_direct(struct dw_xpcs_plat *pxpcs, + int dev, int reg, u16 val) +{ + ptrdiff_t csr; + int ret; + + csr = xpcs_mmio_addr_format(dev, reg); + + ret = pm_runtime_resume_and_get(&pxpcs->pdev->dev); + if (ret) + return ret; + + switch (pxpcs->reg_width) { + case 4: + writel(val, pxpcs->reg_base + (csr << 2)); + break; + default: + writew(val, pxpcs->reg_base + (csr << 1)); + break; + } + + pm_runtime_put(&pxpcs->pdev->dev); + + return 0; +} + +static int xpcs_mmio_read_c22(struct mii_bus *bus, int addr, int reg) +{ + struct dw_xpcs_plat *pxpcs = bus->priv; + + if (addr != 0) + return -ENODEV; + + if (pxpcs->reg_indir) + return xpcs_mmio_read_reg_indirect(pxpcs, MDIO_MMD_VEND2, reg); + else + return xpcs_mmio_read_reg_direct(pxpcs, MDIO_MMD_VEND2, reg); +} + +static int xpcs_mmio_write_c22(struct mii_bus *bus, int addr, int reg, u16 val) +{ + struct dw_xpcs_plat *pxpcs = bus->priv; + + if (addr != 0) + return -ENODEV; + + if (pxpcs->reg_indir) + return xpcs_mmio_write_reg_indirect(pxpcs, MDIO_MMD_VEND2, reg, val); + else + return xpcs_mmio_write_reg_direct(pxpcs, MDIO_MMD_VEND2, reg, val); +} + +static int xpcs_mmio_read_c45(struct mii_bus *bus, int addr, int dev, int reg) +{ + struct dw_xpcs_plat *pxpcs = bus->priv; + + if (addr != 0) + return -ENODEV; + + if (pxpcs->reg_indir) + return xpcs_mmio_read_reg_indirect(pxpcs, dev, reg); + else + return xpcs_mmio_read_reg_direct(pxpcs, dev, reg); +} + +static int xpcs_mmio_write_c45(struct mii_bus *bus, int addr, int dev, + int reg, u16 val) +{ + struct dw_xpcs_plat *pxpcs = bus->priv; + + if (addr != 0) + return -ENODEV; + + if (pxpcs->reg_indir) + return xpcs_mmio_write_reg_indirect(pxpcs, dev, reg, val); + else + return xpcs_mmio_write_reg_direct(pxpcs, dev, reg, val); +} + +static struct dw_xpcs_plat *xpcs_plat_create_data(struct platform_device *pdev) +{ + struct dw_xpcs_plat *pxpcs; + + pxpcs = devm_kzalloc(&pdev->dev, sizeof(*pxpcs), GFP_KERNEL); + if (!pxpcs) + return ERR_PTR(-ENOMEM); + + pxpcs->pdev = pdev; + + dev_set_drvdata(&pdev->dev, pxpcs); + + return pxpcs; +} + +static int xpcs_plat_init_res(struct dw_xpcs_plat *pxpcs) +{ + struct platform_device *pdev = pxpcs->pdev; + struct device *dev = &pdev->dev; + resource_size_t spc_size; + struct resource *res; + + if (!device_property_read_u32(dev, "reg-io-width", &pxpcs->reg_width)) { + if (pxpcs->reg_width != 2 && pxpcs->reg_width != 4) { + dev_err(dev, "Invalid reg-space data width\n"); + return -EINVAL; + } + } else { + pxpcs->reg_width = 2; + } + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "direct") ?: + platform_get_resource_byname(pdev, IORESOURCE_MEM, "indirect"); + if (!res) { + dev_err(dev, "No reg-space found\n"); + return -EINVAL; + } + + if (!strcmp(res->name, "indirect")) + pxpcs->reg_indir = true; + + if (pxpcs->reg_indir) + spc_size = pxpcs->reg_width * SZ_256; + else + spc_size = pxpcs->reg_width * SZ_2M; + + if (resource_size(res) < spc_size) { + dev_err(dev, "Invalid reg-space size\n"); + return -EINVAL; + } + + pxpcs->reg_base = devm_ioremap_resource(dev, res); + if (IS_ERR(pxpcs->reg_base)) { + dev_err(dev, "Failed to map reg-space\n"); + return PTR_ERR(pxpcs->reg_base); + } + + return 0; +} + +static int xpcs_plat_init_clk(struct dw_xpcs_plat *pxpcs) +{ + struct device *dev = &pxpcs->pdev->dev; + int ret; + + pxpcs->cclk = devm_clk_get(dev, "csr"); + if (IS_ERR(pxpcs->cclk)) + return dev_err_probe(dev, PTR_ERR(pxpcs->cclk), + "Failed to get CSR clock\n"); + + pm_runtime_set_active(dev); + ret = devm_pm_runtime_enable(dev); + if (ret) { + dev_err(dev, "Failed to enable runtime-PM\n"); + return ret; + } + + return 0; +} + +static int xpcs_plat_init_bus(struct dw_xpcs_plat *pxpcs) +{ + struct device *dev = &pxpcs->pdev->dev; + static atomic_t id = ATOMIC_INIT(-1); + int ret; + + pxpcs->bus = devm_mdiobus_alloc_size(dev, 0); + if (!pxpcs->bus) + return -ENOMEM; + + pxpcs->bus->name = "DW XPCS MCI/APB3"; + pxpcs->bus->read = xpcs_mmio_read_c22; + pxpcs->bus->write = xpcs_mmio_write_c22; + pxpcs->bus->read_c45 = xpcs_mmio_read_c45; + pxpcs->bus->write_c45 = xpcs_mmio_write_c45; + pxpcs->bus->phy_mask = ~0; + pxpcs->bus->parent = dev; + pxpcs->bus->priv = pxpcs; + + snprintf(pxpcs->bus->id, MII_BUS_ID_SIZE, + "dwxpcs-%x", atomic_inc_return(&id)); + + /* MDIO-bus here serves as just a back-end engine abstracting out + * the MDIO and MCI/APB3 IO interfaces utilized for the DW XPCS CSRs + * access. + */ + ret = devm_mdiobus_register(dev, pxpcs->bus); + if (ret) { + dev_err(dev, "Failed to create MDIO bus\n"); + return ret; + } + + return 0; +} + +/* Note there is no need in the next function antagonist because the MDIO-bus + * de-registration will effectively remove and destroy all the MDIO-devices + * registered on the bus. + */ +static int xpcs_plat_init_dev(struct dw_xpcs_plat *pxpcs) +{ + struct device *dev = &pxpcs->pdev->dev; + struct mdio_device *mdiodev; + int ret; + + /* There is a single memory-mapped DW XPCS device */ + mdiodev = mdio_device_create(pxpcs->bus, 0); + if (IS_ERR(mdiodev)) + return PTR_ERR(mdiodev); + + /* Associate the FW-node with the device structure so it can be looked + * up later. Make sure DD-core is aware of the OF-node being re-used. + */ + device_set_node(&mdiodev->dev, fwnode_handle_get(dev_fwnode(dev))); + mdiodev->dev.of_node_reused = true; + + /* Pass the data further so the DW XPCS driver core could use it */ + mdiodev->dev.platform_data = (void *)device_get_match_data(dev); + + ret = mdio_device_register(mdiodev); + if (ret) { + dev_err(dev, "Failed to register MDIO device\n"); + goto err_clean_data; + } + + return 0; + +err_clean_data: + mdiodev->dev.platform_data = NULL; + + fwnode_handle_put(dev_fwnode(&mdiodev->dev)); + device_set_node(&mdiodev->dev, NULL); + + mdio_device_free(mdiodev); + + return ret; +} + +static int xpcs_plat_probe(struct platform_device *pdev) +{ + struct dw_xpcs_plat *pxpcs; + int ret; + + pxpcs = xpcs_plat_create_data(pdev); + if (IS_ERR(pxpcs)) + return PTR_ERR(pxpcs); + + ret = xpcs_plat_init_res(pxpcs); + if (ret) + return ret; + + ret = xpcs_plat_init_clk(pxpcs); + if (ret) + return ret; + + ret = xpcs_plat_init_bus(pxpcs); + if (ret) + return ret; + + ret = xpcs_plat_init_dev(pxpcs); + if (ret) + return ret; + + return 0; +} + +static int __maybe_unused xpcs_plat_pm_runtime_suspend(struct device *dev) +{ + struct dw_xpcs_plat *pxpcs = dev_get_drvdata(dev); + + clk_disable_unprepare(pxpcs->cclk); + + return 0; +} + +static int __maybe_unused xpcs_plat_pm_runtime_resume(struct device *dev) +{ + struct dw_xpcs_plat *pxpcs = dev_get_drvdata(dev); + + return clk_prepare_enable(pxpcs->cclk); +} + +static const struct dev_pm_ops xpcs_plat_pm_ops = { + SET_RUNTIME_PM_OPS(xpcs_plat_pm_runtime_suspend, + xpcs_plat_pm_runtime_resume, + NULL) +}; + +DW_XPCS_INFO_DECLARE(xpcs_generic, DW_XPCS_ID_NATIVE, DW_XPCS_PMA_ID_NATIVE); +DW_XPCS_INFO_DECLARE(xpcs_pma_gen1_3g, DW_XPCS_ID_NATIVE, DW_XPCS_PMA_GEN1_3G_ID); +DW_XPCS_INFO_DECLARE(xpcs_pma_gen2_3g, DW_XPCS_ID_NATIVE, DW_XPCS_PMA_GEN2_3G_ID); +DW_XPCS_INFO_DECLARE(xpcs_pma_gen2_6g, DW_XPCS_ID_NATIVE, DW_XPCS_PMA_GEN2_6G_ID); +DW_XPCS_INFO_DECLARE(xpcs_pma_gen4_3g, DW_XPCS_ID_NATIVE, DW_XPCS_PMA_GEN4_3G_ID); +DW_XPCS_INFO_DECLARE(xpcs_pma_gen4_6g, DW_XPCS_ID_NATIVE, DW_XPCS_PMA_GEN4_6G_ID); +DW_XPCS_INFO_DECLARE(xpcs_pma_gen5_10g, DW_XPCS_ID_NATIVE, DW_XPCS_PMA_GEN5_10G_ID); +DW_XPCS_INFO_DECLARE(xpcs_pma_gen5_12g, DW_XPCS_ID_NATIVE, DW_XPCS_PMA_GEN5_12G_ID); + +static const struct of_device_id xpcs_of_ids[] = { + { .compatible = "snps,dw-xpcs", .data = &xpcs_generic }, + { .compatible = "snps,dw-xpcs-gen1-3g", .data = &xpcs_pma_gen1_3g }, + { .compatible = "snps,dw-xpcs-gen2-3g", .data = &xpcs_pma_gen2_3g }, + { .compatible = "snps,dw-xpcs-gen2-6g", .data = &xpcs_pma_gen2_6g }, + { .compatible = "snps,dw-xpcs-gen4-3g", .data = &xpcs_pma_gen4_3g }, + { .compatible = "snps,dw-xpcs-gen4-6g", .data = &xpcs_pma_gen4_6g }, + { .compatible = "snps,dw-xpcs-gen5-10g", .data = &xpcs_pma_gen5_10g }, + { .compatible = "snps,dw-xpcs-gen5-12g", .data = &xpcs_pma_gen5_12g }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, xpcs_of_ids); + +static struct platform_driver xpcs_plat_driver = { + .probe = xpcs_plat_probe, + .driver = { + .name = "dwxpcs", + .pm = &xpcs_plat_pm_ops, + .of_match_table = xpcs_of_ids, + }, +}; +module_platform_driver(xpcs_plat_driver); + +MODULE_DESCRIPTION("Synopsys DesignWare XPCS platform device driver"); +MODULE_AUTHOR("Signed-off-by: Serge Semin "); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/pcs/pcs-xpcs.c b/drivers/net/pcs/pcs-xpcs.c index 31525fe9c32e..82463f9d50c8 100644 --- a/drivers/net/pcs/pcs-xpcs.c +++ b/drivers/net/pcs/pcs-xpcs.c @@ -6,10 +6,13 @@ * Author: Jose Abreu */ +#include #include #include #include +#include #include +#include #include "pcs-xpcs.h" @@ -143,7 +146,7 @@ enum { DW_XPCS_INTERFACE_MAX, }; -struct xpcs_compat { +struct dw_xpcs_compat { const int *supported; const phy_interface_t *interface; int num_interfaces; @@ -151,19 +154,19 @@ struct xpcs_compat { int (*pma_config)(struct dw_xpcs *xpcs); }; -struct xpcs_id { +struct dw_xpcs_desc { u32 id; u32 mask; - const struct xpcs_compat *compat; + const struct dw_xpcs_compat *compat; }; -static const struct xpcs_compat *xpcs_find_compat(const struct xpcs_id *id, - phy_interface_t interface) +static const struct dw_xpcs_compat * +xpcs_find_compat(const struct dw_xpcs_desc *desc, phy_interface_t interface) { int i, j; for (i = 0; i < DW_XPCS_INTERFACE_MAX; i++) { - const struct xpcs_compat *compat = &id->compat[i]; + const struct dw_xpcs_compat *compat = &desc->compat[i]; for (j = 0; j < compat->num_interfaces; j++) if (compat->interface[j] == interface) @@ -175,9 +178,9 @@ static const struct xpcs_compat *xpcs_find_compat(const struct xpcs_id *id, int xpcs_get_an_mode(struct dw_xpcs *xpcs, phy_interface_t interface) { - const struct xpcs_compat *compat; + const struct dw_xpcs_compat *compat; - compat = xpcs_find_compat(xpcs->id, interface); + compat = xpcs_find_compat(xpcs->desc, interface); if (!compat) return -ENODEV; @@ -185,7 +188,7 @@ int xpcs_get_an_mode(struct dw_xpcs *xpcs, phy_interface_t interface) } EXPORT_SYMBOL_GPL(xpcs_get_an_mode); -static bool __xpcs_linkmode_supported(const struct xpcs_compat *compat, +static bool __xpcs_linkmode_supported(const struct dw_xpcs_compat *compat, enum ethtool_link_mode_bit_indices linkmode) { int i; @@ -237,29 +240,6 @@ int xpcs_write_vpcs(struct dw_xpcs *xpcs, int reg, u16 val) return xpcs_write_vendor(xpcs, MDIO_MMD_PCS, reg, val); } -static int xpcs_dev_flag(struct dw_xpcs *xpcs) -{ - int ret, oui; - - ret = xpcs_read(xpcs, MDIO_MMD_PMAPMD, MDIO_DEVID1); - if (ret < 0) - return ret; - - oui = ret; - - ret = xpcs_read(xpcs, MDIO_MMD_PMAPMD, MDIO_DEVID2); - if (ret < 0) - return ret; - - ret = (ret >> 10) & 0x3F; - oui |= ret << 16; - - if (oui == DW_OUI_WX) - xpcs->dev_flag = DW_DEV_TXGBE; - - return 0; -} - static int xpcs_poll_reset(struct dw_xpcs *xpcs, int dev) { /* Poll until the reset bit clears (50ms per retry == 0.6 sec) */ @@ -277,7 +257,7 @@ static int xpcs_poll_reset(struct dw_xpcs *xpcs, int dev) } static int xpcs_soft_reset(struct dw_xpcs *xpcs, - const struct xpcs_compat *compat) + const struct dw_xpcs_compat *compat) { int ret, dev; @@ -418,7 +398,7 @@ out: } static int _xpcs_config_aneg_c73(struct dw_xpcs *xpcs, - const struct xpcs_compat *compat) + const struct dw_xpcs_compat *compat) { int ret, adv; @@ -463,7 +443,7 @@ static int _xpcs_config_aneg_c73(struct dw_xpcs *xpcs, } static int xpcs_config_aneg_c73(struct dw_xpcs *xpcs, - const struct xpcs_compat *compat) + const struct dw_xpcs_compat *compat) { int ret; @@ -482,7 +462,7 @@ static int xpcs_config_aneg_c73(struct dw_xpcs *xpcs, static int xpcs_aneg_done_c73(struct dw_xpcs *xpcs, struct phylink_link_state *state, - const struct xpcs_compat *compat, u16 an_stat1) + const struct dw_xpcs_compat *compat, u16 an_stat1) { int ret; @@ -607,12 +587,12 @@ static int xpcs_validate(struct phylink_pcs *pcs, unsigned long *supported, const struct phylink_link_state *state) { __ETHTOOL_DECLARE_LINK_MODE_MASK(xpcs_supported) = { 0, }; - const struct xpcs_compat *compat; + const struct dw_xpcs_compat *compat; struct dw_xpcs *xpcs; int i; xpcs = phylink_pcs_to_xpcs(pcs); - compat = xpcs_find_compat(xpcs->id, state->interface); + compat = xpcs_find_compat(xpcs->desc, state->interface); if (!compat) return -EINVAL; @@ -633,7 +613,7 @@ void xpcs_get_interfaces(struct dw_xpcs *xpcs, unsigned long *interfaces) int i, j; for (i = 0; i < DW_XPCS_INTERFACE_MAX; i++) { - const struct xpcs_compat *compat = &xpcs->id->compat[i]; + const struct dw_xpcs_compat *compat = &xpcs->desc->compat[i]; for (j = 0; j < compat->num_interfaces; j++) __set_bit(compat->interface[j], interfaces); @@ -684,7 +664,7 @@ static int xpcs_config_aneg_c37_sgmii(struct dw_xpcs *xpcs, { int ret, mdio_ctrl, tx_conf; - if (xpcs->dev_flag == DW_DEV_TXGBE) + if (xpcs->info.pma == WX_TXGBE_XPCS_PMA_10G_ID) xpcs_write_vpcs(xpcs, DW_VR_XS_PCS_DIG_CTRL1, DW_CL37_BP | DW_EN_VSMMD1); /* For AN for C37 SGMII mode, the settings are :- @@ -722,7 +702,7 @@ static int xpcs_config_aneg_c37_sgmii(struct dw_xpcs *xpcs, ret |= (DW_VR_MII_PCS_MODE_C37_SGMII << DW_VR_MII_AN_CTRL_PCS_MODE_SHIFT & DW_VR_MII_PCS_MODE_MASK); - if (xpcs->dev_flag == DW_DEV_TXGBE) { + if (xpcs->info.pma == WX_TXGBE_XPCS_PMA_10G_ID) { ret |= DW_VR_MII_AN_CTRL_8BIT; /* Hardware requires it to be PHY side SGMII */ tx_conf = DW_VR_MII_TX_CONFIG_PHY_SIDE_SGMII; @@ -744,7 +724,7 @@ static int xpcs_config_aneg_c37_sgmii(struct dw_xpcs *xpcs, else ret &= ~DW_VR_MII_DIG_CTRL1_MAC_AUTO_SW; - if (xpcs->dev_flag == DW_DEV_TXGBE) + if (xpcs->info.pma == WX_TXGBE_XPCS_PMA_10G_ID) ret |= DW_VR_MII_DIG_CTRL1_PHY_MODE_CTRL; ret = xpcs_write(xpcs, MDIO_MMD_VEND2, DW_VR_MII_DIG_CTRL1, ret); @@ -766,7 +746,7 @@ static int xpcs_config_aneg_c37_1000basex(struct dw_xpcs *xpcs, int ret, mdio_ctrl, adv; bool changed = 0; - if (xpcs->dev_flag == DW_DEV_TXGBE) + if (xpcs->info.pma == WX_TXGBE_XPCS_PMA_10G_ID) xpcs_write_vpcs(xpcs, DW_VR_XS_PCS_DIG_CTRL1, DW_CL37_BP | DW_EN_VSMMD1); /* According to Chap 7.12, to set 1000BASE-X C37 AN, AN must @@ -850,14 +830,14 @@ static int xpcs_config_2500basex(struct dw_xpcs *xpcs) int xpcs_do_config(struct dw_xpcs *xpcs, phy_interface_t interface, const unsigned long *advertising, unsigned int neg_mode) { - const struct xpcs_compat *compat; + const struct dw_xpcs_compat *compat; int ret; - compat = xpcs_find_compat(xpcs->id, interface); + compat = xpcs_find_compat(xpcs->desc, interface); if (!compat) return -ENODEV; - if (xpcs->dev_flag == DW_DEV_TXGBE) { + if (xpcs->info.pma == WX_TXGBE_XPCS_PMA_10G_ID) { ret = txgbe_xpcs_switch_mode(xpcs, interface); if (ret) return ret; @@ -915,7 +895,7 @@ static int xpcs_config(struct phylink_pcs *pcs, unsigned int neg_mode, static int xpcs_get_state_c73(struct dw_xpcs *xpcs, struct phylink_link_state *state, - const struct xpcs_compat *compat) + const struct dw_xpcs_compat *compat) { bool an_enabled; int pcs_stat1; @@ -1115,10 +1095,10 @@ static void xpcs_get_state(struct phylink_pcs *pcs, struct phylink_link_state *state) { struct dw_xpcs *xpcs = phylink_pcs_to_xpcs(pcs); - const struct xpcs_compat *compat; + const struct dw_xpcs_compat *compat; int ret; - compat = xpcs_find_compat(xpcs->id, state->interface); + compat = xpcs_find_compat(xpcs->desc, state->interface); if (!compat) return; @@ -1229,47 +1209,73 @@ static void xpcs_an_restart(struct phylink_pcs *pcs) } } -static u32 xpcs_get_id(struct dw_xpcs *xpcs) +static int xpcs_get_id(struct dw_xpcs *xpcs) { int ret; u32 id; - /* First, search C73 PCS using PCS MMD */ + /* First, search C73 PCS using PCS MMD 3. Return ENODEV if communication + * failed indicating that device couldn't be reached. + */ ret = xpcs_read(xpcs, MDIO_MMD_PCS, MII_PHYSID1); if (ret < 0) - return 0xffffffff; + return -ENODEV; id = ret << 16; ret = xpcs_read(xpcs, MDIO_MMD_PCS, MII_PHYSID2); if (ret < 0) - return 0xffffffff; + return ret; - /* If Device IDs are not all zeros or all ones, - * we found C73 AN-type device + id |= ret; + + /* If Device IDs are not all zeros or ones, then 10GBase-X/R or C73 + * KR/KX4 PCS found. Otherwise fallback to detecting 1000Base-X or C37 + * PCS in MII MMD 31. */ - if ((id | ret) && (id | ret) != 0xffffffff) - return id | ret; + if (!id || id == 0xffffffff) { + ret = xpcs_read(xpcs, MDIO_MMD_VEND2, MII_PHYSID1); + if (ret < 0) + return ret; - /* Next, search C37 PCS using Vendor-Specific MII MMD */ - ret = xpcs_read(xpcs, MDIO_MMD_VEND2, MII_PHYSID1); + id = ret << 16; + + ret = xpcs_read(xpcs, MDIO_MMD_VEND2, MII_PHYSID2); + if (ret < 0) + return ret; + + id |= ret; + } + + /* Set the PCS ID if it hasn't been pre-initialized */ + if (xpcs->info.pcs == DW_XPCS_ID_NATIVE) + xpcs->info.pcs = id; + + /* Find out PMA/PMD ID from MMD 1 device ID registers */ + ret = xpcs_read(xpcs, MDIO_MMD_PMAPMD, MDIO_DEVID1); if (ret < 0) - return 0xffffffff; + return ret; - id = ret << 16; + id = ret; - ret = xpcs_read(xpcs, MDIO_MMD_VEND2, MII_PHYSID2); + ret = xpcs_read(xpcs, MDIO_MMD_PMAPMD, MDIO_DEVID2); if (ret < 0) - return 0xffffffff; + return ret; - /* If Device IDs are not all zeros, we found C37 AN-type device */ - if (id | ret) - return id | ret; + /* Note the inverted dword order and masked out Model/Revision numbers + * with respect to what is done with the PCS ID... + */ + ret = (ret >> 10) & 0x3F; + id |= ret << 16; - return 0xffffffff; + /* Set the PMA ID if it hasn't been pre-initialized */ + if (xpcs->info.pma == DW_XPCS_PMA_ID_NATIVE) + xpcs->info.pma = id; + + return 0; } -static const struct xpcs_compat synopsys_xpcs_compat[DW_XPCS_INTERFACE_MAX] = { +static const struct dw_xpcs_compat synopsys_xpcs_compat[DW_XPCS_INTERFACE_MAX] = { [DW_XPCS_USXGMII] = { .supported = xpcs_usxgmii_features, .interface = xpcs_usxgmii_interfaces, @@ -1314,7 +1320,7 @@ static const struct xpcs_compat synopsys_xpcs_compat[DW_XPCS_INTERFACE_MAX] = { }, }; -static const struct xpcs_compat nxp_sja1105_xpcs_compat[DW_XPCS_INTERFACE_MAX] = { +static const struct dw_xpcs_compat nxp_sja1105_xpcs_compat[DW_XPCS_INTERFACE_MAX] = { [DW_XPCS_SGMII] = { .supported = xpcs_sgmii_features, .interface = xpcs_sgmii_interfaces, @@ -1324,7 +1330,7 @@ static const struct xpcs_compat nxp_sja1105_xpcs_compat[DW_XPCS_INTERFACE_MAX] = }, }; -static const struct xpcs_compat nxp_sja1110_xpcs_compat[DW_XPCS_INTERFACE_MAX] = { +static const struct dw_xpcs_compat nxp_sja1110_xpcs_compat[DW_XPCS_INTERFACE_MAX] = { [DW_XPCS_SGMII] = { .supported = xpcs_sgmii_features, .interface = xpcs_sgmii_interfaces, @@ -1341,18 +1347,18 @@ static const struct xpcs_compat nxp_sja1110_xpcs_compat[DW_XPCS_INTERFACE_MAX] = }, }; -static const struct xpcs_id xpcs_id_list[] = { +static const struct dw_xpcs_desc xpcs_desc_list[] = { { - .id = SYNOPSYS_XPCS_ID, - .mask = SYNOPSYS_XPCS_MASK, + .id = DW_XPCS_ID, + .mask = DW_XPCS_ID_MASK, .compat = synopsys_xpcs_compat, }, { .id = NXP_SJA1105_XPCS_ID, - .mask = SYNOPSYS_XPCS_MASK, + .mask = DW_XPCS_ID_MASK, .compat = nxp_sja1105_xpcs_compat, }, { .id = NXP_SJA1110_XPCS_ID, - .mask = SYNOPSYS_XPCS_MASK, + .mask = DW_XPCS_ID_MASK, .compat = nxp_sja1110_xpcs_compat, }, }; @@ -1365,12 +1371,9 @@ static const struct phylink_pcs_ops xpcs_phylink_ops = { .pcs_link_up = xpcs_link_up, }; -static struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev, - phy_interface_t interface) +static struct dw_xpcs *xpcs_create_data(struct mdio_device *mdiodev) { struct dw_xpcs *xpcs; - u32 xpcs_id; - int i, ret; xpcs = kzalloc(sizeof(*xpcs), GFP_KERNEL); if (!xpcs) @@ -1378,59 +1381,142 @@ static struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev, mdio_device_get(mdiodev); xpcs->mdiodev = mdiodev; + xpcs->pcs.ops = &xpcs_phylink_ops; + xpcs->pcs.neg_mode = true; + xpcs->pcs.poll = true; - xpcs_id = xpcs_get_id(xpcs); + return xpcs; +} - for (i = 0; i < ARRAY_SIZE(xpcs_id_list); i++) { - const struct xpcs_id *entry = &xpcs_id_list[i]; - const struct xpcs_compat *compat; +static void xpcs_free_data(struct dw_xpcs *xpcs) +{ + mdio_device_put(xpcs->mdiodev); + kfree(xpcs); +} - if ((xpcs_id & entry->mask) != entry->id) - continue; +static int xpcs_init_clks(struct dw_xpcs *xpcs) +{ + static const char *ids[DW_XPCS_NUM_CLKS] = { + [DW_XPCS_CORE_CLK] = "core", + [DW_XPCS_PAD_CLK] = "pad", + }; + struct device *dev = &xpcs->mdiodev->dev; + int ret, i; - xpcs->id = entry; + for (i = 0; i < DW_XPCS_NUM_CLKS; ++i) + xpcs->clks[i].id = ids[i]; - compat = xpcs_find_compat(entry, interface); - if (!compat) { - ret = -ENODEV; - goto out; - } + ret = clk_bulk_get_optional(dev, DW_XPCS_NUM_CLKS, xpcs->clks); + if (ret) + return dev_err_probe(dev, ret, "Failed to get clocks\n"); - ret = xpcs_dev_flag(xpcs); - if (ret) - goto out; + ret = clk_bulk_prepare_enable(DW_XPCS_NUM_CLKS, xpcs->clks); + if (ret) + return dev_err_probe(dev, ret, "Failed to enable clocks\n"); - xpcs->pcs.ops = &xpcs_phylink_ops; - xpcs->pcs.neg_mode = true; + return 0; +} - if (xpcs->dev_flag != DW_DEV_TXGBE) { - xpcs->pcs.poll = true; +static void xpcs_clear_clks(struct dw_xpcs *xpcs) +{ + clk_bulk_disable_unprepare(DW_XPCS_NUM_CLKS, xpcs->clks); - ret = xpcs_soft_reset(xpcs, compat); - if (ret) - goto out; - } + clk_bulk_put(DW_XPCS_NUM_CLKS, xpcs->clks); +} - return xpcs; +static int xpcs_init_id(struct dw_xpcs *xpcs) +{ + const struct dw_xpcs_info *info; + int i, ret; + + info = dev_get_platdata(&xpcs->mdiodev->dev); + if (!info) { + xpcs->info.pcs = DW_XPCS_ID_NATIVE; + xpcs->info.pma = DW_XPCS_PMA_ID_NATIVE; + } else { + xpcs->info = *info; } - ret = -ENODEV; + ret = xpcs_get_id(xpcs); + if (ret < 0) + return ret; -out: - mdio_device_put(mdiodev); - kfree(xpcs); + for (i = 0; i < ARRAY_SIZE(xpcs_desc_list); i++) { + const struct dw_xpcs_desc *desc = &xpcs_desc_list[i]; + + if ((xpcs->info.pcs & desc->mask) != desc->id) + continue; + + xpcs->desc = desc; + + break; + } + + if (!xpcs->desc) + return -ENODEV; + + return 0; +} + +static int xpcs_init_iface(struct dw_xpcs *xpcs, phy_interface_t interface) +{ + const struct dw_xpcs_compat *compat; + + compat = xpcs_find_compat(xpcs->desc, interface); + if (!compat) + return -EINVAL; + + if (xpcs->info.pma == WX_TXGBE_XPCS_PMA_10G_ID) { + xpcs->pcs.poll = false; + return 0; + } + + return xpcs_soft_reset(xpcs, compat); +} + +static struct dw_xpcs *xpcs_create(struct mdio_device *mdiodev, + phy_interface_t interface) +{ + struct dw_xpcs *xpcs; + int ret; + + xpcs = xpcs_create_data(mdiodev); + if (IS_ERR(xpcs)) + return xpcs; + + ret = xpcs_init_clks(xpcs); + if (ret) + goto out_free_data; + + ret = xpcs_init_id(xpcs); + if (ret) + goto out_clear_clks; + + ret = xpcs_init_iface(xpcs, interface); + if (ret) + goto out_clear_clks; + + return xpcs; + +out_clear_clks: + xpcs_clear_clks(xpcs); + +out_free_data: + xpcs_free_data(xpcs); return ERR_PTR(ret); } -void xpcs_destroy(struct dw_xpcs *xpcs) -{ - if (xpcs) - mdio_device_put(xpcs->mdiodev); - kfree(xpcs); -} -EXPORT_SYMBOL_GPL(xpcs_destroy); - +/** + * xpcs_create_mdiodev() - create a DW xPCS instance with the MDIO @addr + * @bus: pointer to the MDIO-bus descriptor for the device to be looked at + * @addr: device MDIO-bus ID + * @interface: requested PHY interface + * + * Return: a pointer to the DW XPCS handle if successful, otherwise -ENODEV if + * the PCS device couldn't be found on the bus and other negative errno related + * to the data allocation and MDIO-bus communications. + */ struct dw_xpcs *xpcs_create_mdiodev(struct mii_bus *bus, int addr, phy_interface_t interface) { @@ -1455,5 +1541,54 @@ struct dw_xpcs *xpcs_create_mdiodev(struct mii_bus *bus, int addr, } EXPORT_SYMBOL_GPL(xpcs_create_mdiodev); +/** + * xpcs_create_fwnode() - Create a DW xPCS instance from @fwnode + * @fwnode: fwnode handle poining to the DW XPCS device + * @interface: requested PHY interface + * + * Return: a pointer to the DW XPCS handle if successful, otherwise -ENODEV if + * the fwnode device is unavailable or the PCS device couldn't be found on the + * bus, -EPROBE_DEFER if the respective MDIO-device instance couldn't be found, + * other negative errno related to the data allocations and MDIO-bus + * communications. + */ +struct dw_xpcs *xpcs_create_fwnode(struct fwnode_handle *fwnode, + phy_interface_t interface) +{ + struct mdio_device *mdiodev; + struct dw_xpcs *xpcs; + + if (!fwnode_device_is_available(fwnode)) + return ERR_PTR(-ENODEV); + + mdiodev = fwnode_mdio_find_device(fwnode); + if (!mdiodev) + return ERR_PTR(-EPROBE_DEFER); + + xpcs = xpcs_create(mdiodev, interface); + + /* xpcs_create() has taken a refcount on the mdiodev if it was + * successful. If xpcs_create() fails, this will free the mdio + * device here. In any case, we don't need to hold our reference + * anymore, and putting it here will allow mdio_device_put() in + * xpcs_destroy() to automatically free the mdio device. + */ + mdio_device_put(mdiodev); + + return xpcs; +} +EXPORT_SYMBOL_GPL(xpcs_create_fwnode); + +void xpcs_destroy(struct dw_xpcs *xpcs) +{ + if (!xpcs) + return; + + xpcs_clear_clks(xpcs); + + xpcs_free_data(xpcs); +} +EXPORT_SYMBOL_GPL(xpcs_destroy); + MODULE_DESCRIPTION("Synopsys DesignWare XPCS library"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/pcs/pcs-xpcs.h b/drivers/net/pcs/pcs-xpcs.h index 96c36b32ca99..fa05adfae220 100644 --- a/drivers/net/pcs/pcs-xpcs.h +++ b/drivers/net/pcs/pcs-xpcs.h @@ -6,8 +6,8 @@ * Author: Jose Abreu */ -#define SYNOPSYS_XPCS_ID 0x7996ced0 -#define SYNOPSYS_XPCS_MASK 0xffffffff +#include +#include /* Vendor regs access */ #define DW_VENDOR BIT(15) @@ -120,6 +120,9 @@ /* VR MII EEE Control 1 defines */ #define DW_VR_MII_EEE_TRN_LPI BIT(0) /* Transparent Mode Enable */ +#define DW_XPCS_INFO_DECLARE(_name, _pcs, _pma) \ + static const struct dw_xpcs_info _name = { .pcs = _pcs, .pma = _pma } + int xpcs_read(struct dw_xpcs *xpcs, int dev, u32 reg); int xpcs_write(struct dw_xpcs *xpcs, int dev, u32 reg, u16 val); int xpcs_read_vpcs(struct dw_xpcs *xpcs, int reg); diff --git a/drivers/net/phy/aquantia/Makefile b/drivers/net/phy/aquantia/Makefile index aa77fb63c8ec..c6c4d494ee2a 100644 --- a/drivers/net/phy/aquantia/Makefile +++ b/drivers/net/phy/aquantia/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0 -aquantia-objs += aquantia_main.o aquantia_firmware.o +aquantia-objs += aquantia_main.o aquantia_firmware.o aquantia_leds.o ifdef CONFIG_HWMON aquantia-objs += aquantia_hwmon.o endif diff --git a/drivers/net/phy/aquantia/aquantia.h b/drivers/net/phy/aquantia/aquantia.h index 4830b25e6c7d..2465345081f8 100644 --- a/drivers/net/phy/aquantia/aquantia.h +++ b/drivers/net/phy/aquantia/aquantia.h @@ -66,6 +66,28 @@ #define VEND1_GLOBAL_CONTROL2_UP_RUN_STALL_OVD BIT(6) #define VEND1_GLOBAL_CONTROL2_UP_RUN_STALL BIT(0) +#define VEND1_GLOBAL_LED_PROV 0xc430 +#define AQR_LED_PROV(x) (VEND1_GLOBAL_LED_PROV + (x)) +#define VEND1_GLOBAL_LED_PROV_LINK2500 BIT(14) +#define VEND1_GLOBAL_LED_PROV_LINK5000 BIT(15) +#define VEND1_GLOBAL_LED_PROV_FORCE_ON BIT(8) +#define VEND1_GLOBAL_LED_PROV_LINK10000 BIT(7) +#define VEND1_GLOBAL_LED_PROV_LINK1000 BIT(6) +#define VEND1_GLOBAL_LED_PROV_LINK100 BIT(5) +#define VEND1_GLOBAL_LED_PROV_RX_ACT BIT(3) +#define VEND1_GLOBAL_LED_PROV_TX_ACT BIT(2) +#define VEND1_GLOBAL_LED_PROV_ACT_STRETCH GENMASK(0, 1) + +#define VEND1_GLOBAL_LED_PROV_LINK_MASK (VEND1_GLOBAL_LED_PROV_LINK100 | \ + VEND1_GLOBAL_LED_PROV_LINK1000 | \ + VEND1_GLOBAL_LED_PROV_LINK10000 | \ + VEND1_GLOBAL_LED_PROV_LINK5000 | \ + VEND1_GLOBAL_LED_PROV_LINK2500) + +#define VEND1_GLOBAL_LED_DRIVE 0xc438 +#define VEND1_GLOBAL_LED_DRIVE_VDD BIT(1) +#define AQR_LED_DRIVE(x) (VEND1_GLOBAL_LED_DRIVE + (x)) + #define VEND1_THERMAL_PROV_HIGH_TEMP_FAIL 0xc421 #define VEND1_THERMAL_PROV_LOW_TEMP_FAIL 0xc422 #define VEND1_THERMAL_PROV_HIGH_TEMP_WARN 0xc423 @@ -90,6 +112,18 @@ #define VEND1_GLOBAL_RSVD_STAT9_MODE GENMASK(7, 0) #define VEND1_GLOBAL_RSVD_STAT9_1000BT2 0x23 +/* MDIO_MMD_C22EXT */ +#define MDIO_C22EXT_STAT_SGMII_RX_GOOD_FRAMES 0xd292 +#define MDIO_C22EXT_STAT_SGMII_RX_BAD_FRAMES 0xd294 +#define MDIO_C22EXT_STAT_SGMII_RX_FALSE_CARRIER 0xd297 +#define MDIO_C22EXT_STAT_SGMII_TX_GOOD_FRAMES 0xd313 +#define MDIO_C22EXT_STAT_SGMII_TX_BAD_FRAMES 0xd315 +#define MDIO_C22EXT_STAT_SGMII_TX_FALSE_CARRIER 0xd317 +#define MDIO_C22EXT_STAT_SGMII_TX_COLLISIONS 0xd318 +#define MDIO_C22EXT_STAT_SGMII_TX_LINE_COLLISIONS 0xd319 +#define MDIO_C22EXT_STAT_SGMII_TX_FRAME_ALIGN_ERR 0xd31a +#define MDIO_C22EXT_STAT_SGMII_TX_RUNT_FRAMES 0xd31b + #define VEND1_GLOBAL_INT_STD_STATUS 0xfc00 #define VEND1_GLOBAL_INT_VEND_STATUS 0xfc01 @@ -116,6 +150,35 @@ #define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL2 BIT(1) #define VEND1_GLOBAL_INT_VEND_MASK_GLOBAL3 BIT(0) +#define AQR_MAX_LEDS 3 + +struct aqr107_hw_stat { + const char *name; + int reg; + int size; +}; + +#define SGMII_STAT(n, r, s) { n, MDIO_C22EXT_STAT_SGMII_ ## r, s } +static const struct aqr107_hw_stat aqr107_hw_stats[] = { + SGMII_STAT("sgmii_rx_good_frames", RX_GOOD_FRAMES, 26), + SGMII_STAT("sgmii_rx_bad_frames", RX_BAD_FRAMES, 26), + SGMII_STAT("sgmii_rx_false_carrier_events", RX_FALSE_CARRIER, 8), + SGMII_STAT("sgmii_tx_good_frames", TX_GOOD_FRAMES, 26), + SGMII_STAT("sgmii_tx_bad_frames", TX_BAD_FRAMES, 26), + SGMII_STAT("sgmii_tx_false_carrier_events", TX_FALSE_CARRIER, 8), + SGMII_STAT("sgmii_tx_collisions", TX_COLLISIONS, 8), + SGMII_STAT("sgmii_tx_line_collisions", TX_LINE_COLLISIONS, 8), + SGMII_STAT("sgmii_tx_frame_alignment_err", TX_FRAME_ALIGN_ERR, 16), + SGMII_STAT("sgmii_tx_runt_frames", TX_RUNT_FRAMES, 22), +}; + +#define AQR107_SGMII_STAT_SZ ARRAY_SIZE(aqr107_hw_stats) + +struct aqr107_priv { + u64 sgmii_stats[AQR107_SGMII_STAT_SZ]; + unsigned long leds_active_low; +}; + #if IS_REACHABLE(CONFIG_HWMON) int aqr_hwmon_probe(struct phy_device *phydev); #else @@ -124,4 +187,20 @@ static inline int aqr_hwmon_probe(struct phy_device *phydev) { return 0; } int aqr_firmware_load(struct phy_device *phydev); +int aqr_phy_led_blink_set(struct phy_device *phydev, u8 index, + unsigned long *delay_on, + unsigned long *delay_off); +int aqr_phy_led_brightness_set(struct phy_device *phydev, + u8 index, enum led_brightness value); +int aqr_phy_led_hw_is_supported(struct phy_device *phydev, u8 index, + unsigned long rules); +int aqr_phy_led_hw_control_get(struct phy_device *phydev, u8 index, + unsigned long *rules); +int aqr_phy_led_hw_control_set(struct phy_device *phydev, u8 index, + unsigned long rules); +int aqr_phy_led_active_low_set(struct phy_device *phydev, int index, bool enable); +int aqr_phy_led_polarity_set(struct phy_device *phydev, int index, + unsigned long modes); +int aqr_wait_reset_complete(struct phy_device *phydev); + #endif /* AQUANTIA_H */ diff --git a/drivers/net/phy/aquantia/aquantia_firmware.c b/drivers/net/phy/aquantia/aquantia_firmware.c index 0c9640ef153b..524627a36c6f 100644 --- a/drivers/net/phy/aquantia/aquantia_firmware.c +++ b/drivers/net/phy/aquantia/aquantia_firmware.c @@ -353,6 +353,10 @@ int aqr_firmware_load(struct phy_device *phydev) { int ret; + ret = aqr_wait_reset_complete(phydev); + if (ret) + return ret; + /* Check if the firmware is not already loaded by pooling * the current version returned by the PHY. If 0 is returned, * no firmware is loaded. diff --git a/drivers/net/phy/aquantia/aquantia_leds.c b/drivers/net/phy/aquantia/aquantia_leds.c new file mode 100644 index 000000000000..0516ac02c3f8 --- /dev/null +++ b/drivers/net/phy/aquantia/aquantia_leds.c @@ -0,0 +1,150 @@ +// SPDX-License-Identifier: GPL-2.0 +/* LED driver for Aquantia PHY + * + * Author: Daniel Golle + */ + +#include + +#include "aquantia.h" + +int aqr_phy_led_brightness_set(struct phy_device *phydev, + u8 index, enum led_brightness value) +{ + if (index >= AQR_MAX_LEDS) + return -EINVAL; + + return phy_modify_mmd(phydev, MDIO_MMD_VEND1, AQR_LED_PROV(index), + VEND1_GLOBAL_LED_PROV_LINK_MASK | + VEND1_GLOBAL_LED_PROV_FORCE_ON | + VEND1_GLOBAL_LED_PROV_RX_ACT | + VEND1_GLOBAL_LED_PROV_TX_ACT, + value ? VEND1_GLOBAL_LED_PROV_FORCE_ON : 0); +} + +static const unsigned long supported_triggers = (BIT(TRIGGER_NETDEV_LINK) | + BIT(TRIGGER_NETDEV_LINK_100) | + BIT(TRIGGER_NETDEV_LINK_1000) | + BIT(TRIGGER_NETDEV_LINK_2500) | + BIT(TRIGGER_NETDEV_LINK_5000) | + BIT(TRIGGER_NETDEV_LINK_10000) | + BIT(TRIGGER_NETDEV_RX) | + BIT(TRIGGER_NETDEV_TX)); + +int aqr_phy_led_hw_is_supported(struct phy_device *phydev, u8 index, + unsigned long rules) +{ + if (index >= AQR_MAX_LEDS) + return -EINVAL; + + /* All combinations of the supported triggers are allowed */ + if (rules & ~supported_triggers) + return -EOPNOTSUPP; + + return 0; +} + +int aqr_phy_led_hw_control_get(struct phy_device *phydev, u8 index, + unsigned long *rules) +{ + int val; + + if (index >= AQR_MAX_LEDS) + return -EINVAL; + + val = phy_read_mmd(phydev, MDIO_MMD_VEND1, AQR_LED_PROV(index)); + if (val < 0) + return val; + + *rules = 0; + if (val & VEND1_GLOBAL_LED_PROV_LINK100) + *rules |= BIT(TRIGGER_NETDEV_LINK_100); + + if (val & VEND1_GLOBAL_LED_PROV_LINK1000) + *rules |= BIT(TRIGGER_NETDEV_LINK_1000); + + if (val & VEND1_GLOBAL_LED_PROV_LINK2500) + *rules |= BIT(TRIGGER_NETDEV_LINK_2500); + + if (val & VEND1_GLOBAL_LED_PROV_LINK5000) + *rules |= BIT(TRIGGER_NETDEV_LINK_5000); + + if (val & VEND1_GLOBAL_LED_PROV_LINK10000) + *rules |= BIT(TRIGGER_NETDEV_LINK_10000); + + if (val & VEND1_GLOBAL_LED_PROV_RX_ACT) + *rules |= BIT(TRIGGER_NETDEV_RX); + + if (val & VEND1_GLOBAL_LED_PROV_TX_ACT) + *rules |= BIT(TRIGGER_NETDEV_TX); + + return 0; +} + +int aqr_phy_led_hw_control_set(struct phy_device *phydev, u8 index, + unsigned long rules) +{ + u16 val = 0; + + if (index >= AQR_MAX_LEDS) + return -EINVAL; + + if (rules & (BIT(TRIGGER_NETDEV_LINK_100) | BIT(TRIGGER_NETDEV_LINK))) + val |= VEND1_GLOBAL_LED_PROV_LINK100; + + if (rules & (BIT(TRIGGER_NETDEV_LINK_1000) | BIT(TRIGGER_NETDEV_LINK))) + val |= VEND1_GLOBAL_LED_PROV_LINK1000; + + if (rules & (BIT(TRIGGER_NETDEV_LINK_2500) | BIT(TRIGGER_NETDEV_LINK))) + val |= VEND1_GLOBAL_LED_PROV_LINK2500; + + if (rules & (BIT(TRIGGER_NETDEV_LINK_5000) | BIT(TRIGGER_NETDEV_LINK))) + val |= VEND1_GLOBAL_LED_PROV_LINK5000; + + if (rules & (BIT(TRIGGER_NETDEV_LINK_10000) | BIT(TRIGGER_NETDEV_LINK))) + val |= VEND1_GLOBAL_LED_PROV_LINK10000; + + if (rules & BIT(TRIGGER_NETDEV_RX)) + val |= VEND1_GLOBAL_LED_PROV_RX_ACT; + + if (rules & BIT(TRIGGER_NETDEV_TX)) + val |= VEND1_GLOBAL_LED_PROV_TX_ACT; + + return phy_modify_mmd(phydev, MDIO_MMD_VEND1, AQR_LED_PROV(index), + VEND1_GLOBAL_LED_PROV_LINK_MASK | + VEND1_GLOBAL_LED_PROV_FORCE_ON | + VEND1_GLOBAL_LED_PROV_RX_ACT | + VEND1_GLOBAL_LED_PROV_TX_ACT, val); +} + +int aqr_phy_led_active_low_set(struct phy_device *phydev, int index, bool enable) +{ + return phy_modify_mmd(phydev, MDIO_MMD_VEND1, AQR_LED_DRIVE(index), + VEND1_GLOBAL_LED_DRIVE_VDD, enable); +} + +int aqr_phy_led_polarity_set(struct phy_device *phydev, int index, unsigned long modes) +{ + struct aqr107_priv *priv = phydev->priv; + bool active_low = false; + u32 mode; + + if (index >= AQR_MAX_LEDS) + return -EINVAL; + + for_each_set_bit(mode, &modes, __PHY_LED_MODES_NUM) { + switch (mode) { + case PHY_LED_ACTIVE_LOW: + active_low = true; + break; + default: + return -EINVAL; + } + } + + /* Save LED driver vdd state to restore on SW reset */ + if (active_low) + priv->leds_active_low |= BIT(index); + + return aqr_phy_led_active_low_set(phydev, index, active_low); +} diff --git a/drivers/net/phy/aquantia/aquantia_main.c b/drivers/net/phy/aquantia/aquantia_main.c index d34cdec47636..d12e35374231 100644 --- a/drivers/net/phy/aquantia/aquantia_main.c +++ b/drivers/net/phy/aquantia/aquantia_main.c @@ -29,6 +29,7 @@ #define PHY_ID_AQR113 0x31c31c40 #define PHY_ID_AQR113C 0x31c31c12 #define PHY_ID_AQR114C 0x31c31c22 +#define PHY_ID_AQR115C 0x31c31c33 #define PHY_ID_AQR813 0x31c31cb2 #define MDIO_PHYXS_VEND_IF_STATUS 0xe812 @@ -84,49 +85,12 @@ #define MDIO_AN_RX_VEND_STAT3 0xe832 #define MDIO_AN_RX_VEND_STAT3_AFR BIT(0) -/* MDIO_MMD_C22EXT */ -#define MDIO_C22EXT_STAT_SGMII_RX_GOOD_FRAMES 0xd292 -#define MDIO_C22EXT_STAT_SGMII_RX_BAD_FRAMES 0xd294 -#define MDIO_C22EXT_STAT_SGMII_RX_FALSE_CARRIER 0xd297 -#define MDIO_C22EXT_STAT_SGMII_TX_GOOD_FRAMES 0xd313 -#define MDIO_C22EXT_STAT_SGMII_TX_BAD_FRAMES 0xd315 -#define MDIO_C22EXT_STAT_SGMII_TX_FALSE_CARRIER 0xd317 -#define MDIO_C22EXT_STAT_SGMII_TX_COLLISIONS 0xd318 -#define MDIO_C22EXT_STAT_SGMII_TX_LINE_COLLISIONS 0xd319 -#define MDIO_C22EXT_STAT_SGMII_TX_FRAME_ALIGN_ERR 0xd31a -#define MDIO_C22EXT_STAT_SGMII_TX_RUNT_FRAMES 0xd31b - /* Sleep and timeout for checking if the Processor-Intensive * MDIO operation is finished */ #define AQR107_OP_IN_PROG_SLEEP 1000 #define AQR107_OP_IN_PROG_TIMEOUT 100000 -struct aqr107_hw_stat { - const char *name; - int reg; - int size; -}; - -#define SGMII_STAT(n, r, s) { n, MDIO_C22EXT_STAT_SGMII_ ## r, s } -static const struct aqr107_hw_stat aqr107_hw_stats[] = { - SGMII_STAT("sgmii_rx_good_frames", RX_GOOD_FRAMES, 26), - SGMII_STAT("sgmii_rx_bad_frames", RX_BAD_FRAMES, 26), - SGMII_STAT("sgmii_rx_false_carrier_events", RX_FALSE_CARRIER, 8), - SGMII_STAT("sgmii_tx_good_frames", TX_GOOD_FRAMES, 26), - SGMII_STAT("sgmii_tx_bad_frames", TX_BAD_FRAMES, 26), - SGMII_STAT("sgmii_tx_false_carrier_events", TX_FALSE_CARRIER, 8), - SGMII_STAT("sgmii_tx_collisions", TX_COLLISIONS, 8), - SGMII_STAT("sgmii_tx_line_collisions", TX_LINE_COLLISIONS, 8), - SGMII_STAT("sgmii_tx_frame_alignment_err", TX_FRAME_ALIGN_ERR, 16), - SGMII_STAT("sgmii_tx_runt_frames", TX_RUNT_FRAMES, 22), -}; -#define AQR107_SGMII_STAT_SZ ARRAY_SIZE(aqr107_hw_stats) - -struct aqr107_priv { - u64 sgmii_stats[AQR107_SGMII_STAT_SZ]; -}; - static int aqr107_get_sset_count(struct phy_device *phydev) { return AQR107_SGMII_STAT_SZ; @@ -478,7 +442,7 @@ static int aqr107_set_tunable(struct phy_device *phydev, * The chip also provides a "reset completed" bit, but it's cleared after * read. Therefore function would time out if called again. */ -static int aqr107_wait_reset_complete(struct phy_device *phydev) +int aqr_wait_reset_complete(struct phy_device *phydev) { int val; @@ -512,7 +476,9 @@ static void aqr107_chip_info(struct phy_device *phydev) static int aqr107_config_init(struct phy_device *phydev) { - int ret; + struct aqr107_priv *priv = phydev->priv; + u32 led_active_low; + int ret, index = 0; /* Check that the PHY interface type is compatible */ if (phydev->interface != PHY_INTERFACE_MODE_SGMII && @@ -529,11 +495,23 @@ static int aqr107_config_init(struct phy_device *phydev) WARN(phydev->interface == PHY_INTERFACE_MODE_XGMII, "Your devicetree is out of date, please update it. The AQR107 family doesn't support XGMII, maybe you mean USXGMII.\n"); - ret = aqr107_wait_reset_complete(phydev); + ret = aqr_wait_reset_complete(phydev); if (!ret) aqr107_chip_info(phydev); - return aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT); + ret = aqr107_set_downshift(phydev, MDIO_AN_VEND_PROV_DOWNSHIFT_DFLT); + if (ret) + return ret; + + /* Restore LED polarity state after reset */ + for_each_set_bit(led_active_low, &priv->leds_active_low, AQR_MAX_LEDS) { + ret = aqr_phy_led_active_low_set(phydev, index, led_active_low); + if (ret) + return ret; + index++; + } + + return 0; } static int aqcs109_config_init(struct phy_device *phydev) @@ -545,7 +523,7 @@ static int aqcs109_config_init(struct phy_device *phydev) phydev->interface != PHY_INTERFACE_MODE_2500BASEX) return -ENODEV; - ret = aqr107_wait_reset_complete(phydev); + ret = aqr_wait_reset_complete(phydev); if (!ret) aqr107_chip_info(phydev); @@ -675,7 +653,13 @@ static int aqr107_fill_interface_modes(struct phy_device *phydev) unsigned long *possible = phydev->possible_interfaces; unsigned int serdes_mode, rate_adapt; phy_interface_t interface; - int i, val; + int i, val, ret; + + ret = phy_read_mmd_poll_timeout(phydev, MDIO_MMD_VEND1, + VEND1_GLOBAL_CFG_10M, val, val != 0, + 1000, 100000, false); + if (ret) + return ret; /* Walk the media-speed configuration registers to determine which * host-side serdes modes may be used by the PHY depending on the @@ -823,6 +807,11 @@ static struct phy_driver aqr_driver[] = { .get_strings = aqr107_get_strings, .get_stats = aqr107_get_stats, .link_change_notify = aqr107_link_change_notify, + .led_brightness_set = aqr_phy_led_brightness_set, + .led_hw_is_supported = aqr_phy_led_hw_is_supported, + .led_hw_control_set = aqr_phy_led_hw_control_set, + .led_hw_control_get = aqr_phy_led_hw_control_get, + .led_polarity_set = aqr_phy_led_polarity_set, }, { PHY_ID_MATCH_MODEL(PHY_ID_AQCS109), @@ -842,6 +831,11 @@ static struct phy_driver aqr_driver[] = { .get_strings = aqr107_get_strings, .get_stats = aqr107_get_stats, .link_change_notify = aqr107_link_change_notify, + .led_brightness_set = aqr_phy_led_brightness_set, + .led_hw_is_supported = aqr_phy_led_hw_is_supported, + .led_hw_control_set = aqr_phy_led_hw_control_set, + .led_hw_control_get = aqr_phy_led_hw_control_get, + .led_polarity_set = aqr_phy_led_polarity_set, }, { PHY_ID_MATCH_MODEL(PHY_ID_AQR111), @@ -861,6 +855,11 @@ static struct phy_driver aqr_driver[] = { .get_strings = aqr107_get_strings, .get_stats = aqr107_get_stats, .link_change_notify = aqr107_link_change_notify, + .led_brightness_set = aqr_phy_led_brightness_set, + .led_hw_is_supported = aqr_phy_led_hw_is_supported, + .led_hw_control_set = aqr_phy_led_hw_control_set, + .led_hw_control_get = aqr_phy_led_hw_control_get, + .led_polarity_set = aqr_phy_led_polarity_set, }, { PHY_ID_MATCH_MODEL(PHY_ID_AQR111B0), @@ -880,6 +879,11 @@ static struct phy_driver aqr_driver[] = { .get_strings = aqr107_get_strings, .get_stats = aqr107_get_stats, .link_change_notify = aqr107_link_change_notify, + .led_brightness_set = aqr_phy_led_brightness_set, + .led_hw_is_supported = aqr_phy_led_hw_is_supported, + .led_hw_control_set = aqr_phy_led_hw_control_set, + .led_hw_control_get = aqr_phy_led_hw_control_get, + .led_polarity_set = aqr_phy_led_polarity_set, }, { PHY_ID_MATCH_MODEL(PHY_ID_AQR405), @@ -906,6 +910,11 @@ static struct phy_driver aqr_driver[] = { .get_strings = aqr107_get_strings, .get_stats = aqr107_get_stats, .link_change_notify = aqr107_link_change_notify, + .led_brightness_set = aqr_phy_led_brightness_set, + .led_hw_is_supported = aqr_phy_led_hw_is_supported, + .led_hw_control_set = aqr_phy_led_hw_control_set, + .led_hw_control_get = aqr_phy_led_hw_control_get, + .led_polarity_set = aqr_phy_led_polarity_set, }, { PHY_ID_MATCH_MODEL(PHY_ID_AQR412), @@ -943,6 +952,11 @@ static struct phy_driver aqr_driver[] = { .get_strings = aqr107_get_strings, .get_stats = aqr107_get_stats, .link_change_notify = aqr107_link_change_notify, + .led_brightness_set = aqr_phy_led_brightness_set, + .led_hw_is_supported = aqr_phy_led_hw_is_supported, + .led_hw_control_set = aqr_phy_led_hw_control_set, + .led_hw_control_get = aqr_phy_led_hw_control_get, + .led_polarity_set = aqr_phy_led_polarity_set, }, { PHY_ID_MATCH_MODEL(PHY_ID_AQR113C), @@ -962,6 +976,11 @@ static struct phy_driver aqr_driver[] = { .get_strings = aqr107_get_strings, .get_stats = aqr107_get_stats, .link_change_notify = aqr107_link_change_notify, + .led_brightness_set = aqr_phy_led_brightness_set, + .led_hw_is_supported = aqr_phy_led_hw_is_supported, + .led_hw_control_set = aqr_phy_led_hw_control_set, + .led_hw_control_get = aqr_phy_led_hw_control_get, + .led_polarity_set = aqr_phy_led_polarity_set, }, { PHY_ID_MATCH_MODEL(PHY_ID_AQR114C), @@ -981,6 +1000,35 @@ static struct phy_driver aqr_driver[] = { .get_strings = aqr107_get_strings, .get_stats = aqr107_get_stats, .link_change_notify = aqr107_link_change_notify, + .led_brightness_set = aqr_phy_led_brightness_set, + .led_hw_is_supported = aqr_phy_led_hw_is_supported, + .led_hw_control_set = aqr_phy_led_hw_control_set, + .led_hw_control_get = aqr_phy_led_hw_control_get, + .led_polarity_set = aqr_phy_led_polarity_set, +}, +{ + PHY_ID_MATCH_MODEL(PHY_ID_AQR115C), + .name = "Aquantia AQR115C", + .probe = aqr107_probe, + .get_rate_matching = aqr107_get_rate_matching, + .config_init = aqr113c_config_init, + .config_aneg = aqr_config_aneg, + .config_intr = aqr_config_intr, + .handle_interrupt = aqr_handle_interrupt, + .read_status = aqr107_read_status, + .get_tunable = aqr107_get_tunable, + .set_tunable = aqr107_set_tunable, + .suspend = aqr107_suspend, + .resume = aqr107_resume, + .get_sset_count = aqr107_get_sset_count, + .get_strings = aqr107_get_strings, + .get_stats = aqr107_get_stats, + .link_change_notify = aqr107_link_change_notify, + .led_brightness_set = aqr_phy_led_brightness_set, + .led_hw_is_supported = aqr_phy_led_hw_is_supported, + .led_hw_control_set = aqr_phy_led_hw_control_set, + .led_hw_control_get = aqr_phy_led_hw_control_get, + .led_polarity_set = aqr_phy_led_polarity_set, }, { PHY_ID_MATCH_MODEL(PHY_ID_AQR813), @@ -1000,6 +1048,11 @@ static struct phy_driver aqr_driver[] = { .get_strings = aqr107_get_strings, .get_stats = aqr107_get_stats, .link_change_notify = aqr107_link_change_notify, + .led_brightness_set = aqr_phy_led_brightness_set, + .led_hw_is_supported = aqr_phy_led_hw_is_supported, + .led_hw_control_set = aqr_phy_led_hw_control_set, + .led_hw_control_get = aqr_phy_led_hw_control_get, + .led_polarity_set = aqr_phy_led_polarity_set, }, }; @@ -1020,6 +1073,7 @@ static struct mdio_device_id __maybe_unused aqr_tbl[] = { { PHY_ID_MATCH_MODEL(PHY_ID_AQR113) }, { PHY_ID_MATCH_MODEL(PHY_ID_AQR113C) }, { PHY_ID_MATCH_MODEL(PHY_ID_AQR114C) }, + { PHY_ID_MATCH_MODEL(PHY_ID_AQR115C) }, { PHY_ID_MATCH_MODEL(PHY_ID_AQR813) }, { } }; diff --git a/drivers/net/phy/bcm-phy-lib.c b/drivers/net/phy/bcm-phy-lib.c index 876f28fd8256..6c52f7dda514 100644 --- a/drivers/net/phy/bcm-phy-lib.c +++ b/drivers/net/phy/bcm-phy-lib.c @@ -794,6 +794,49 @@ out: return ret; } +static int bcm_setup_lre_forced(struct phy_device *phydev) +{ + u16 ctl = 0; + + phydev->pause = 0; + phydev->asym_pause = 0; + + if (phydev->speed == SPEED_100) + ctl |= LRECR_SPEED100; + + if (phydev->duplex != DUPLEX_FULL) + return -EOPNOTSUPP; + + return phy_modify(phydev, MII_BCM54XX_LRECR, LRECR_SPEED100, ctl); +} + +/** + * bcm_linkmode_adv_to_lre_adv_t - translate linkmode advertisement to LDS + * @advertising: the linkmode advertisement settings + * Return: LDS Auto-Negotiation Advertised Ability register value + * + * A small helper function that translates linkmode advertisement + * settings to phy LDS autonegotiation advertisements for the + * MII_BCM54XX_LREANAA register of Broadcom PHYs capable of LDS + */ +static u32 bcm_linkmode_adv_to_lre_adv_t(unsigned long *advertising) +{ + u32 result = 0; + + if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT1BRR_Full_BIT, + advertising)) + result |= LREANAA_10_1PAIR; + if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT1_Full_BIT, + advertising)) + result |= LREANAA_100_1PAIR; + if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertising)) + result |= LRELPA_PAUSE; + if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertising)) + result |= LRELPA_PAUSE_ASYM; + + return result; +} + int bcm_phy_cable_test_start(struct phy_device *phydev) { return _bcm_phy_cable_test_start(phydev, false); @@ -1066,6 +1109,78 @@ int bcm_phy_led_brightness_set(struct phy_device *phydev, } EXPORT_SYMBOL_GPL(bcm_phy_led_brightness_set); +int bcm_setup_lre_master_slave(struct phy_device *phydev) +{ + u16 ctl = 0; + + switch (phydev->master_slave_set) { + case MASTER_SLAVE_CFG_MASTER_PREFERRED: + case MASTER_SLAVE_CFG_MASTER_FORCE: + ctl = LRECR_MASTER; + break; + case MASTER_SLAVE_CFG_SLAVE_PREFERRED: + case MASTER_SLAVE_CFG_SLAVE_FORCE: + break; + case MASTER_SLAVE_CFG_UNKNOWN: + case MASTER_SLAVE_CFG_UNSUPPORTED: + return 0; + default: + phydev_warn(phydev, "Unsupported Master/Slave mode\n"); + return -EOPNOTSUPP; + } + + return phy_modify_changed(phydev, MII_BCM54XX_LRECR, LRECR_MASTER, ctl); +} +EXPORT_SYMBOL_GPL(bcm_setup_lre_master_slave); + +int bcm_config_lre_aneg(struct phy_device *phydev, bool changed) +{ + int err; + + if (genphy_config_eee_advert(phydev)) + changed = true; + + err = bcm_setup_lre_master_slave(phydev); + if (err < 0) + return err; + else if (err) + changed = true; + + if (phydev->autoneg != AUTONEG_ENABLE) + return bcm_setup_lre_forced(phydev); + + err = bcm_config_lre_advert(phydev); + if (err < 0) + return err; + else if (err) + changed = true; + + return genphy_check_and_restart_aneg(phydev, changed); +} +EXPORT_SYMBOL_GPL(bcm_config_lre_aneg); + +/** + * bcm_config_lre_advert - sanitize and advertise Long-Distance Signaling + * auto-negotiation parameters + * @phydev: target phy_device struct + * Return: 0 if the PHY's advertisement hasn't changed, < 0 on error, + * > 0 if it has changed + * + * Writes MII_BCM54XX_LREANAA with the appropriate values. The values are to be + * sanitized before, to make sure we only advertise what is supported. + * The sanitization is done already in phy_ethtool_ksettings_set() + */ +int bcm_config_lre_advert(struct phy_device *phydev) +{ + u32 adv = bcm_linkmode_adv_to_lre_adv_t(phydev->advertising); + + /* Setup BroadR-Reach mode advertisement */ + return phy_modify_changed(phydev, MII_BCM54XX_LREANAA, + LRE_ADVERTISE_ALL | LREANAA_PAUSE | + LREANAA_PAUSE_ASYM, adv); +} +EXPORT_SYMBOL_GPL(bcm_config_lre_advert); + MODULE_DESCRIPTION("Broadcom PHY Library"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Broadcom Corporation"); diff --git a/drivers/net/phy/bcm-phy-lib.h b/drivers/net/phy/bcm-phy-lib.h index b52189e45a84..bceddbc860eb 100644 --- a/drivers/net/phy/bcm-phy-lib.h +++ b/drivers/net/phy/bcm-phy-lib.h @@ -121,4 +121,8 @@ irqreturn_t bcm_phy_wol_isr(int irq, void *dev_id); int bcm_phy_led_brightness_set(struct phy_device *phydev, u8 index, enum led_brightness value); +int bcm_setup_lre_master_slave(struct phy_device *phydev); +int bcm_config_lre_aneg(struct phy_device *phydev, bool changed); +int bcm_config_lre_advert(struct phy_device *phydev); + #endif /* _LINUX_BCM_PHY_LIB_H */ diff --git a/drivers/net/phy/bcm-phy-ptp.c b/drivers/net/phy/bcm-phy-ptp.c index 617d384d4551..874a1b64b115 100644 --- a/drivers/net/phy/bcm-phy-ptp.c +++ b/drivers/net/phy/bcm-phy-ptp.c @@ -841,7 +841,7 @@ static int bcm_ptp_hwtstamp(struct mii_timestamper *mii_ts, } static int bcm_ptp_ts_info(struct mii_timestamper *mii_ts, - struct ethtool_ts_info *ts_info) + struct kernel_ethtool_ts_info *ts_info) { struct bcm_ptp_private *priv = mii2priv(mii_ts); @@ -931,6 +931,9 @@ struct bcm_ptp_private *bcm_ptp_probe(struct phy_device *phydev) return ERR_CAST(clock); priv->ptp_clock = clock; + /* Timestamp selected by default to keep legacy API */ + phydev->default_timestamp = true; + priv->phydev = phydev; bcm_ptp_init(priv); diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 370e4ed45098..ddded162c44c 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -5,6 +5,8 @@ * Broadcom BCM5411, BCM5421 and BCM5461 Gigabit Ethernet * transceivers. * + * Broadcom BCM54810, BCM54811 BroadR-Reach transceivers. + * * Copyright (c) 2006 Maciej W. Rozycki * * Inspired by code written by Amy Fong. @@ -36,6 +38,29 @@ struct bcm54xx_phy_priv { struct bcm_ptp_private *ptp; int wake_irq; bool wake_irq_enabled; + bool brr_mode; +}; + +/* Link modes for BCM58411 PHY */ +static const int bcm54811_linkmodes[] = { + ETHTOOL_LINK_MODE_100baseT1_Full_BIT, + ETHTOOL_LINK_MODE_10baseT1BRR_Full_BIT, + ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + ETHTOOL_LINK_MODE_1000baseX_Full_BIT, + ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + ETHTOOL_LINK_MODE_100baseT_Full_BIT, + ETHTOOL_LINK_MODE_100baseT_Half_BIT, + ETHTOOL_LINK_MODE_10baseT_Full_BIT, + ETHTOOL_LINK_MODE_10baseT_Half_BIT +}; + +/* Long-Distance Signaling (BroadR-Reach mode aneg) relevant linkmode bits */ +static const int lds_br_bits[] = { + ETHTOOL_LINK_MODE_Autoneg_BIT, + ETHTOOL_LINK_MODE_Pause_BIT, + ETHTOOL_LINK_MODE_Asym_Pause_BIT, + ETHTOOL_LINK_MODE_10baseT1BRR_Full_BIT, + ETHTOOL_LINK_MODE_100baseT1_Full_BIT }; static bool bcm54xx_phy_can_wakeup(struct phy_device *phydev) @@ -347,6 +372,61 @@ static void bcm54xx_ptp_config_init(struct phy_device *phydev) bcm_ptp_config_init(phydev); } +static int bcm5481x_set_brrmode(struct phy_device *phydev, bool on) +{ + int reg; + int err; + u16 val; + + reg = bcm_phy_read_exp(phydev, BCM54810_EXP_BROADREACH_LRE_MISC_CTL); + + if (reg < 0) + return reg; + + if (on) + reg |= BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN; + else + reg &= ~BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN; + + err = bcm_phy_write_exp(phydev, + BCM54810_EXP_BROADREACH_LRE_MISC_CTL, reg); + if (err) + return err; + + /* Ensure LRE or IEEE register set is accessed according to the brr + * on/off, thus set the override + */ + val = BCM54811_EXP_BROADREACH_LRE_OVERLAY_CTL_EN; + if (!on) + val |= BCM54811_EXP_BROADREACH_LRE_OVERLAY_CTL_OVERRIDE_VAL; + + return bcm_phy_write_exp(phydev, + BCM54811_EXP_BROADREACH_LRE_OVERLAY_CTL, val); +} + +static int bcm54811_config_init(struct phy_device *phydev) +{ + struct bcm54xx_phy_priv *priv = phydev->priv; + int err, reg; + + /* Enable CLK125 MUX on LED4 if ref clock is enabled. */ + if (!(phydev->dev_flags & PHY_BRCM_RX_REFCLK_UNUSED)) { + reg = bcm_phy_read_exp(phydev, BCM54612E_EXP_SPARE0); + if (reg < 0) + return reg; + err = bcm_phy_write_exp(phydev, BCM54612E_EXP_SPARE0, + BCM54612E_LED4_CLK125OUT_EN | reg); + if (err < 0) + return err; + } + + /* With BCM54811, BroadR-Reach implies no autoneg */ + if (priv->brr_mode) + phydev->autoneg = 0; + + return bcm5481x_set_brrmode(phydev, priv->brr_mode); +} + static int bcm54xx_config_init(struct phy_device *phydev) { int reg, err, val; @@ -399,6 +479,9 @@ static int bcm54xx_config_init(struct phy_device *phydev) BCM54810_EXP_BROADREACH_LRE_MISC_CTL, val); break; + case PHY_ID_BCM54811: + err = bcm54811_config_init(phydev); + break; } if (err) return err; @@ -553,52 +636,117 @@ static int bcm54810_write_mmd(struct phy_device *phydev, int devnum, u16 regnum, return -EOPNOTSUPP; } -static int bcm54811_config_init(struct phy_device *phydev) -{ - int err, reg; - /* Disable BroadR-Reach function. */ - reg = bcm_phy_read_exp(phydev, BCM54810_EXP_BROADREACH_LRE_MISC_CTL); - reg &= ~BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN; - err = bcm_phy_write_exp(phydev, BCM54810_EXP_BROADREACH_LRE_MISC_CTL, - reg); - if (err < 0) - return err; - - err = bcm54xx_config_init(phydev); - - /* Enable CLK125 MUX on LED4 if ref clock is enabled. */ - if (!(phydev->dev_flags & PHY_BRCM_RX_REFCLK_UNUSED)) { - reg = bcm_phy_read_exp(phydev, BCM54612E_EXP_SPARE0); - err = bcm_phy_write_exp(phydev, BCM54612E_EXP_SPARE0, - BCM54612E_LED4_CLK125OUT_EN | reg); - if (err < 0) - return err; - } - - return err; -} - -static int bcm5481_config_aneg(struct phy_device *phydev) +/** + * bcm5481x_read_abilities - read PHY abilities from LRESR or Clause 22 + * (BMSR) registers, based on whether the PHY is in BroadR-Reach or IEEE mode + * @phydev: target phy_device struct + * + * Description: Reads the PHY's abilities and populates phydev->supported + * accordingly. The register to read the abilities from is determined by + * the brr mode setting of the PHY as read from the device tree. + * Note that the LRE and IEEE sets of abilities are disjunct, in other words, + * not only the link modes differ, but also the auto-negotiation and + * master-slave setup is controlled differently. + * + * Returns: 0 on success, < 0 on failure + */ +static int bcm5481x_read_abilities(struct phy_device *phydev) { struct device_node *np = phydev->mdio.dev.of_node; - int ret; + struct bcm54xx_phy_priv *priv = phydev->priv; + int i, val, err; - /* Aneg firstly. */ - ret = genphy_config_aneg(phydev); + for (i = 0; i < ARRAY_SIZE(bcm54811_linkmodes); i++) + linkmode_clear_bit(bcm54811_linkmodes[i], phydev->supported); - /* Then we can set up the delay. */ + priv->brr_mode = of_property_read_bool(np, "brr-mode"); + + /* Set BroadR-Reach mode as configured in the DT. */ + err = bcm5481x_set_brrmode(phydev, priv->brr_mode); + if (err) + return err; + + if (priv->brr_mode) { + linkmode_set_bit_array(phy_basic_ports_array, + ARRAY_SIZE(phy_basic_ports_array), + phydev->supported); + + val = phy_read(phydev, MII_BCM54XX_LRESR); + if (val < 0) + return val; + + linkmode_mod_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + phydev->supported, + val & LRESR_LDSABILITY); + linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT1_Full_BIT, + phydev->supported, + val & LRESR_100_1PAIR); + linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT1BRR_Full_BIT, + phydev->supported, + val & LRESR_10_1PAIR); + return 0; + } + + return genphy_read_abilities(phydev); +} + +static int bcm5481x_config_delay_swap(struct phy_device *phydev) +{ + struct device_node *np = phydev->mdio.dev.of_node; + + /* Set up the delay. */ bcm54xx_config_clock_delay(phydev); if (of_property_read_bool(np, "enet-phy-lane-swap")) { /* Lane Swap - Undocumented register...magic! */ - ret = bcm_phy_write_exp(phydev, MII_BCM54XX_EXP_SEL_ER + 0x9, - 0x11B); + int ret = bcm_phy_write_exp(phydev, + MII_BCM54XX_EXP_SEL_ER + 0x9, + 0x11B); if (ret < 0) return ret; } - return ret; + return 0; +} + +static int bcm5481_config_aneg(struct phy_device *phydev) +{ + struct bcm54xx_phy_priv *priv = phydev->priv; + int ret; + + /* Aneg firstly. */ + if (priv->brr_mode) + ret = bcm_config_lre_aneg(phydev, false); + else + ret = genphy_config_aneg(phydev); + + if (ret) + return ret; + + /* Then we can set up the delay and swap. */ + return bcm5481x_config_delay_swap(phydev); +} + +static int bcm54811_config_aneg(struct phy_device *phydev) +{ + struct bcm54xx_phy_priv *priv = phydev->priv; + int ret; + + /* Aneg firstly. */ + if (priv->brr_mode) { + /* BCM54811 is only capable of autonegotiation in IEEE mode */ + phydev->autoneg = 0; + ret = bcm_config_lre_aneg(phydev, false); + } else { + ret = genphy_config_aneg(phydev); + } + + if (ret) + return ret; + + /* Then we can set up the delay and swap. */ + return bcm5481x_config_delay_swap(phydev); } struct bcm54616s_phy_priv { @@ -1062,6 +1210,203 @@ static void bcm54xx_link_change_notify(struct phy_device *phydev) bcm_phy_write_exp(phydev, MII_BCM54XX_EXP_EXP08, ret); } +static int lre_read_master_slave(struct phy_device *phydev) +{ + int cfg = MASTER_SLAVE_CFG_UNKNOWN, state; + int val; + + /* In BroadR-Reach mode we are always capable of master-slave + * and there is no preferred master or slave configuration + */ + phydev->master_slave_get = MASTER_SLAVE_CFG_UNKNOWN; + phydev->master_slave_state = MASTER_SLAVE_STATE_UNKNOWN; + + val = phy_read(phydev, MII_BCM54XX_LRECR); + if (val < 0) + return val; + + if ((val & LRECR_LDSEN) == 0) { + if (val & LRECR_MASTER) + cfg = MASTER_SLAVE_CFG_MASTER_FORCE; + else + cfg = MASTER_SLAVE_CFG_SLAVE_FORCE; + } + + val = phy_read(phydev, MII_BCM54XX_LRELDSE); + if (val < 0) + return val; + + if (val & LDSE_MASTER) + state = MASTER_SLAVE_STATE_MASTER; + else + state = MASTER_SLAVE_STATE_SLAVE; + + phydev->master_slave_get = cfg; + phydev->master_slave_state = state; + + return 0; +} + +/* Read LDS Link Partner Ability in BroadR-Reach mode */ +static int lre_read_lpa(struct phy_device *phydev) +{ + int i, lrelpa; + + if (phydev->autoneg != AUTONEG_ENABLE) { + if (!phydev->autoneg_complete) { + /* aneg not yet done, reset all relevant bits */ + for (i = 0; i < ARRAY_SIZE(lds_br_bits); i++) + linkmode_clear_bit(lds_br_bits[i], + phydev->lp_advertising); + + return 0; + } + + /* Long-Distance Signaling Link Partner Ability */ + lrelpa = phy_read(phydev, MII_BCM54XX_LRELPA); + if (lrelpa < 0) + return lrelpa; + + linkmode_mod_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + phydev->lp_advertising, + lrelpa & LRELPA_PAUSE_ASYM); + linkmode_mod_bit(ETHTOOL_LINK_MODE_Pause_BIT, + phydev->lp_advertising, + lrelpa & LRELPA_PAUSE); + linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT1_Full_BIT, + phydev->lp_advertising, + lrelpa & LRELPA_100_1PAIR); + linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT1BRR_Full_BIT, + phydev->lp_advertising, + lrelpa & LRELPA_10_1PAIR); + } else { + linkmode_zero(phydev->lp_advertising); + } + + return 0; +} + +static int lre_read_status_fixed(struct phy_device *phydev) +{ + int lrecr = phy_read(phydev, MII_BCM54XX_LRECR); + + if (lrecr < 0) + return lrecr; + + phydev->duplex = DUPLEX_FULL; + + if (lrecr & LRECR_SPEED100) + phydev->speed = SPEED_100; + else + phydev->speed = SPEED_10; + + return 0; +} + +/** + * lre_update_link - update link status in @phydev + * @phydev: target phy_device struct + * Return: 0 on success, < 0 on error + * + * Description: Update the value in phydev->link to reflect the + * current link value. In order to do this, we need to read + * the status register twice, keeping the second value. + * This is a genphy_update_link modified to work on LRE registers + * of BroadR-Reach PHY + */ +static int lre_update_link(struct phy_device *phydev) +{ + int status = 0, lrecr; + + lrecr = phy_read(phydev, MII_BCM54XX_LRECR); + if (lrecr < 0) + return lrecr; + + /* Autoneg is being started, therefore disregard BMSR value and + * report link as down. + */ + if (lrecr & BMCR_ANRESTART) + goto done; + + /* The link state is latched low so that momentary link + * drops can be detected. Do not double-read the status + * in polling mode to detect such short link drops except + * the link was already down. + */ + if (!phy_polling_mode(phydev) || !phydev->link) { + status = phy_read(phydev, MII_BCM54XX_LRESR); + if (status < 0) + return status; + else if (status & LRESR_LSTATUS) + goto done; + } + + /* Read link and autonegotiation status */ + status = phy_read(phydev, MII_BCM54XX_LRESR); + if (status < 0) + return status; +done: + phydev->link = status & LRESR_LSTATUS ? 1 : 0; + phydev->autoneg_complete = status & LRESR_LDSCOMPLETE ? 1 : 0; + + /* Consider the case that autoneg was started and "aneg complete" + * bit has been reset, but "link up" bit not yet. + */ + if (phydev->autoneg == AUTONEG_ENABLE && !phydev->autoneg_complete) + phydev->link = 0; + + return 0; +} + +/* Get the status in BroadRReach mode just like genphy_read_status does +* in normal mode +*/ +static int bcm54811_lre_read_status(struct phy_device *phydev) +{ + int err, old_link = phydev->link; + + /* Update the link, but return if there was an error */ + err = lre_update_link(phydev); + if (err) + return err; + + /* why bother the PHY if nothing can have changed */ + if (phydev->autoneg == + AUTONEG_ENABLE && old_link && phydev->link) + return 0; + + phydev->speed = SPEED_UNKNOWN; + phydev->duplex = DUPLEX_UNKNOWN; + phydev->pause = 0; + phydev->asym_pause = 0; + + err = lre_read_master_slave(phydev); + if (err < 0) + return err; + + /* Read LDS Link Partner Ability */ + err = lre_read_lpa(phydev); + if (err < 0) + return err; + + if (phydev->autoneg == AUTONEG_ENABLE && phydev->autoneg_complete) + phy_resolve_aneg_linkmode(phydev); + else if (phydev->autoneg == AUTONEG_DISABLE) + err = lre_read_status_fixed(phydev); + + return err; +} + +static int bcm54811_read_status(struct phy_device *phydev) +{ + struct bcm54xx_phy_priv *priv = phydev->priv; + + if (priv->brr_mode) + return bcm54811_lre_read_status(phydev); + + return genphy_read_status(phydev); +} + static struct phy_driver broadcom_drivers[] = { { .phy_id = PHY_ID_BCM5411, @@ -1211,10 +1556,12 @@ static struct phy_driver broadcom_drivers[] = { .get_strings = bcm_phy_get_strings, .get_stats = bcm54xx_get_stats, .probe = bcm54xx_phy_probe, - .config_init = bcm54811_config_init, - .config_aneg = bcm5481_config_aneg, + .config_init = bcm54xx_config_init, + .config_aneg = bcm54811_config_aneg, .config_intr = bcm_phy_config_intr, .handle_interrupt = bcm_phy_handle_interrupt, + .read_status = bcm54811_read_status, + .get_features = bcm5481x_read_abilities, .suspend = bcm54xx_suspend, .resume = bcm54xx_resume, .link_change_notify = bcm54xx_link_change_notify, diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index 5c42c47dc564..075d2beea716 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -1395,7 +1395,7 @@ static void dp83640_txtstamp(struct mii_timestamper *mii_ts, } static int dp83640_ts_info(struct mii_timestamper *mii_ts, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct dp83640_private *dp83640 = container_of(mii_ts, struct dp83640_private, mii_ts); @@ -1447,6 +1447,8 @@ static int dp83640_probe(struct phy_device *phydev) for (i = 0; i < MAX_RXTS; i++) list_add(&dp83640->rx_pool_data[i].list, &dp83640->rxpool); + /* Timestamp selected by default to keep legacy API */ + phydev->default_timestamp = true; phydev->mii_ts = &dp83640->mii_ts; phydev->priv = dp83640; diff --git a/drivers/net/phy/dp83td510.c b/drivers/net/phy/dp83td510.c index d7616b13c594..551e37786c2d 100644 --- a/drivers/net/phy/dp83td510.c +++ b/drivers/net/phy/dp83td510.c @@ -4,6 +4,7 @@ */ #include +#include #include #include #include @@ -29,6 +30,10 @@ #define DP83TD510E_INT1_LINK BIT(13) #define DP83TD510E_INT1_LINK_EN BIT(5) +#define DP83TD510E_CTRL 0x1f +#define DP83TD510E_CTRL_HW_RESET BIT(15) +#define DP83TD510E_CTRL_SW_RESET BIT(14) + #define DP83TD510E_AN_STAT_1 0x60c #define DP83TD510E_MASTER_SLAVE_RESOL_FAIL BIT(15) @@ -53,6 +58,117 @@ static const u16 dp83td510_mse_sqi_map[] = { 0x0000 /* 24dB =< SNR */ }; +/* Time Domain Reflectometry (TDR) Functionality of DP83TD510 PHY + * + * I assume that this PHY is using a variation of Spread Spectrum Time Domain + * Reflectometry (SSTDR) rather than the commonly used TDR found in many PHYs. + * Here are the following observations which likely confirm this: + * - The DP83TD510 PHY transmits a modulated signal of configurable length + * (default 16000 µs) instead of a single pulse pattern, which is typical + * for traditional TDR. + * - The pulse observed on the wire, triggered by the HW RESET register, is not + * part of the cable testing process. + * + * I assume that SSTDR seems to be a logical choice for the 10BaseT1L + * environment due to improved noise resistance, making it suitable for + * environments with significant electrical noise, such as long 10BaseT1L cable + * runs. + * + * Configuration Variables: + * The SSTDR variation used in this PHY involves more configuration variables + * that can dramatically affect the functionality and precision of cable + * testing. Since most of these configuration options are either not well + * documented or documented with minimal details, the following sections + * describe my understanding and observations of these variables and their + * impact on TDR functionality. + * + * Timeline: + * ,<--cfg_pre_silence_time + * | ,<-SSTDR Modulated Transmission + * | | ,<--cfg_post_silence_time + * | | | ,<--Force Link Mode + * |<--'-->|<-------'------->|<--'-->|<--------'------->| + * + * - cfg_pre_silence_time: Optional silence time before TDR transmission starts. + * - SSTDR Modulated Transmission: Transmission duration configured by + * cfg_tdr_tx_duration and amplitude configured by cfg_tdr_tx_type. + * - cfg_post_silence_time: Silence time after TDR transmission. + * - Force Link Mode: If nothing is configured after cfg_post_silence_time, + * the PHY continues in force link mode without autonegotiation. + */ + +#define DP83TD510E_TDR_CFG 0x1e +#define DP83TD510E_TDR_START BIT(15) +#define DP83TD510E_TDR_DONE BIT(1) +#define DP83TD510E_TDR_FAIL BIT(0) + +#define DP83TD510E_TDR_CFG1 0x300 +/* cfg_tdr_tx_type: Transmit voltage level for TDR. + * 0 = 1V, 1 = 2.4V + * Note: Using different voltage levels may not work + * in all configuration variations. For example, setting + * 2.4V may give different cable length measurements. + * Other settings may be needed to make it work properly. + */ +#define DP83TD510E_TDR_TX_TYPE BIT(12) +#define DP83TD510E_TDR_TX_TYPE_1V 0 +#define DP83TD510E_TDR_TX_TYPE_2_4V 1 +/* cfg_post_silence_time: Time after the TDR sequence. Since we force master mode + * for the TDR will proceed with forced link state after this time. For Linux + * it is better to set max value to avoid false link state detection. + */ +#define DP83TD510E_TDR_CFG1_POST_SILENCE_TIME GENMASK(3, 2) +#define DP83TD510E_TDR_CFG1_POST_SILENCE_TIME_0MS 0 +#define DP83TD510E_TDR_CFG1_POST_SILENCE_TIME_10MS 1 +#define DP83TD510E_TDR_CFG1_POST_SILENCE_TIME_100MS 2 +#define DP83TD510E_TDR_CFG1_POST_SILENCE_TIME_1000MS 3 +/* cfg_pre_silence_time: Time before the TDR sequence. It should be enough to + * settle down all pulses and reflections. Since for 10BASE-T1L we have + * maximum 2000m cable length, we can set it to 1ms. + */ +#define DP83TD510E_TDR_CFG1_PRE_SILENCE_TIME GENMASK(1, 0) +#define DP83TD510E_TDR_CFG1_PRE_SILENCE_TIME_0MS 0 +#define DP83TD510E_TDR_CFG1_PRE_SILENCE_TIME_10MS 1 +#define DP83TD510E_TDR_CFG1_PRE_SILENCE_TIME_100MS 2 +#define DP83TD510E_TDR_CFG1_PRE_SILENCE_TIME_1000MS 3 + +#define DP83TD510E_TDR_CFG2 0x301 +#define DP83TD510E_TDR_END_TAP_INDEX_1 GENMASK(14, 8) +#define DP83TD510E_TDR_END_TAP_INDEX_1_DEF 36 +#define DP83TD510E_TDR_START_TAP_INDEX_1 GENMASK(6, 0) +#define DP83TD510E_TDR_START_TAP_INDEX_1_DEF 4 + +#define DP83TD510E_TDR_CFG3 0x302 +/* cfg_tdr_tx_duration: Duration of the TDR transmission in microseconds. + * This value sets the duration of the modulated signal used for TDR + * measurements. + * - Default: 16000 µs + * - Observation: A minimum duration of 6000 µs is recommended to ensure + * accurate detection of cable faults. Durations shorter than 6000 µs may + * result in incomplete data, especially for shorter cables (e.g., 20 meters), + * leading to false "OK" results. Longer durations (e.g., 6000 µs or more) + * provide better accuracy, particularly for detecting open circuits. + */ +#define DP83TD510E_TDR_TX_DURATION_US GENMASK(15, 0) +#define DP83TD510E_TDR_TX_DURATION_US_DEF 16000 + +#define DP83TD510E_TDR_FAULT_CFG1 0x303 +#define DP83TD510E_TDR_FLT_LOC_OFFSET_1 GENMASK(14, 8) +#define DP83TD510E_TDR_FLT_LOC_OFFSET_1_DEF 4 +#define DP83TD510E_TDR_FLT_INIT_1 GENMASK(7, 0) +#define DP83TD510E_TDR_FLT_INIT_1_DEF 62 + +#define DP83TD510E_TDR_FAULT_STAT 0x30c +#define DP83TD510E_TDR_PEAK_DETECT BIT(11) +#define DP83TD510E_TDR_PEAK_SIGN BIT(10) +#define DP83TD510E_TDR_PEAK_LOCATION GENMASK(9, 0) + +/* Not documented registers and values but recommended according to + * "DP83TD510E Cable Diagnostics Toolkit revC" + */ +#define DP83TD510E_UNKN_030E 0x30e +#define DP83TD510E_030E_VAL 0x2520 + static int dp83td510_config_intr(struct phy_device *phydev) { int ret; @@ -198,6 +314,151 @@ static int dp83td510_get_sqi_max(struct phy_device *phydev) return DP83TD510_SQI_MAX; } +/** + * dp83td510_cable_test_start - Start the cable test for the DP83TD510 PHY. + * @phydev: Pointer to the phy_device structure. + * + * This sequence is implemented according to the "Application Note DP83TD510E + * Cable Diagnostics Toolkit revC". + * + * Returns: 0 on success, a negative error code on failure. + */ +static int dp83td510_cable_test_start(struct phy_device *phydev) +{ + int ret; + + ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_CTRL, + DP83TD510E_CTRL_HW_RESET); + if (ret) + return ret; + + ret = genphy_c45_an_disable_aneg(phydev); + if (ret) + return ret; + + /* Force master mode */ + ret = phy_set_bits_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_PMD_BT1_CTRL, + MDIO_PMA_PMD_BT1_CTRL_CFG_MST); + if (ret) + return ret; + + /* There is no official recommendation for this register, but it is + * better to use 1V for TDR since other values seems to be optimized + * for this amplitude. Except of amplitude, it is better to configure + * pre TDR silence time to 10ms to avoid false reflections (value 0 + * seems to be too short, otherwise we need to implement own silence + * time). Also, post TDR silence time should be set to 1000ms to avoid + * false link state detection, it fits to the polling time of the + * PHY framework. The idea is to wait until + * dp83td510_cable_test_get_status() will be called and reconfigure + * the PHY to the default state within the post silence time window. + */ + ret = phy_modify_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_TDR_CFG1, + DP83TD510E_TDR_TX_TYPE | + DP83TD510E_TDR_CFG1_POST_SILENCE_TIME | + DP83TD510E_TDR_CFG1_PRE_SILENCE_TIME, + DP83TD510E_TDR_TX_TYPE_1V | + DP83TD510E_TDR_CFG1_PRE_SILENCE_TIME_10MS | + DP83TD510E_TDR_CFG1_POST_SILENCE_TIME_1000MS); + if (ret) + return ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_TDR_CFG2, + FIELD_PREP(DP83TD510E_TDR_END_TAP_INDEX_1, + DP83TD510E_TDR_END_TAP_INDEX_1_DEF) | + FIELD_PREP(DP83TD510E_TDR_START_TAP_INDEX_1, + DP83TD510E_TDR_START_TAP_INDEX_1_DEF)); + if (ret) + return ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_TDR_FAULT_CFG1, + FIELD_PREP(DP83TD510E_TDR_FLT_LOC_OFFSET_1, + DP83TD510E_TDR_FLT_LOC_OFFSET_1_DEF) | + FIELD_PREP(DP83TD510E_TDR_FLT_INIT_1, + DP83TD510E_TDR_FLT_INIT_1_DEF)); + if (ret) + return ret; + + /* Undocumented register, from the "Application Note DP83TD510E Cable + * Diagnostics Toolkit revC". + */ + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_UNKN_030E, + DP83TD510E_030E_VAL); + if (ret) + return ret; + + ret = phy_write_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_TDR_CFG3, + DP83TD510E_TDR_TX_DURATION_US_DEF); + if (ret) + return ret; + + ret = phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_CTRL, + DP83TD510E_CTRL_SW_RESET); + if (ret) + return ret; + + return phy_set_bits_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_TDR_CFG, + DP83TD510E_TDR_START); +} + +/** + * dp83td510_cable_test_get_status - Get the status of the cable test for the + * DP83TD510 PHY. + * @phydev: Pointer to the phy_device structure. + * @finished: Pointer to a boolean that indicates whether the test is finished. + * + * The function sets the @finished flag to true if the test is complete. + * + * Returns: 0 on success or a negative error code on failure. + */ +static int dp83td510_cable_test_get_status(struct phy_device *phydev, + bool *finished) +{ + int ret, stat; + + *finished = false; + + ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, DP83TD510E_TDR_CFG); + if (ret < 0) + return ret; + + if (!(ret & DP83TD510E_TDR_DONE)) + return 0; + + if (!(ret & DP83TD510E_TDR_FAIL)) { + int location; + + ret = phy_read_mmd(phydev, MDIO_MMD_VEND2, + DP83TD510E_TDR_FAULT_STAT); + if (ret < 0) + return ret; + + if (ret & DP83TD510E_TDR_PEAK_DETECT) { + if (ret & DP83TD510E_TDR_PEAK_SIGN) + stat = ETHTOOL_A_CABLE_RESULT_CODE_OPEN; + else + stat = ETHTOOL_A_CABLE_RESULT_CODE_SAME_SHORT; + + location = FIELD_GET(DP83TD510E_TDR_PEAK_LOCATION, + ret) * 100; + ethnl_cable_test_fault_length(phydev, + ETHTOOL_A_CABLE_PAIR_A, + location); + } else { + stat = ETHTOOL_A_CABLE_RESULT_CODE_OK; + } + } else { + /* Most probably we have active link partner */ + stat = ETHTOOL_A_CABLE_RESULT_CODE_UNSPEC; + } + + *finished = true; + + ethnl_cable_test_result(phydev, ETHTOOL_A_CABLE_PAIR_A, stat); + + return phy_init_hw(phydev); +} + static int dp83td510_get_features(struct phy_device *phydev) { /* This PHY can't respond on MDIO bus if no RMII clock is enabled. @@ -221,6 +482,7 @@ static struct phy_driver dp83td510_driver[] = { PHY_ID_MATCH_MODEL(DP83TD510E_PHY_ID), .name = "TI DP83TD510E", + .flags = PHY_POLL_CABLE_TEST, .config_aneg = dp83td510_config_aneg, .read_status = dp83td510_read_status, .get_features = dp83td510_get_features, @@ -228,6 +490,8 @@ static struct phy_driver dp83td510_driver[] = { .handle_interrupt = dp83td510_handle_interrupt, .get_sqi = dp83td510_get_sqi, .get_sqi_max = dp83td510_get_sqi_max, + .cable_test_start = dp83td510_cable_test_start, + .cable_test_get_status = dp83td510_cable_test_get_status, .suspend = genphy_suspend, .resume = genphy_resume, diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index ebafedde0ab7..dd519805deee 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -2552,7 +2552,7 @@ static void lan8814_ptp_tx_ts_get(struct phy_device *phydev, *seq_id = lanphy_read_page_reg(phydev, 5, PTP_TX_MSG_HEADER2); } -static int lan8814_ts_info(struct mii_timestamper *mii_ts, struct ethtool_ts_info *info) +static int lan8814_ts_info(struct mii_timestamper *mii_ts, struct kernel_ethtool_ts_info *info) { struct kszphy_ptp_priv *ptp_priv = container_of(mii_ts, struct kszphy_ptp_priv, mii_ts); struct phy_device *phydev = ptp_priv->phydev; @@ -3781,6 +3781,9 @@ static void lan8814_ptp_init(struct phy_device *phydev) ptp_priv->mii_ts.ts_info = lan8814_ts_info; phydev->mii_ts = &ptp_priv->mii_ts; + + /* Timestamp selected by default to keep legacy API */ + phydev->default_timestamp = true; } static int lan8814_ptp_probe_once(struct phy_device *phydev) @@ -4314,7 +4317,7 @@ static irqreturn_t lan8841_handle_interrupt(struct phy_device *phydev) } static int lan8841_ts_info(struct mii_timestamper *mii_ts, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct kszphy_ptp_priv *ptp_priv; @@ -5279,6 +5282,9 @@ static int lan8841_probe(struct phy_device *phydev) phydev->mii_ts = &ptp_priv->mii_ts; + /* Timestamp selected by default to keep legacy API */ + phydev->default_timestamp = true; + return 0; } diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c index 0b88635f4fbc..d3273bc0da4a 100644 --- a/drivers/net/phy/microchip.c +++ b/drivers/net/phy/microchip.c @@ -12,8 +12,14 @@ #include #include +#define PHY_ID_LAN937X_TX 0x0007c190 + +#define LAN937X_MODE_CTRL_STATUS_REG 0x11 +#define LAN937X_AUTOMDIX_EN BIT(7) +#define LAN937X_MDI_MODE BIT(6) + #define DRIVER_AUTHOR "WOOJUNG HUH " -#define DRIVER_DESC "Microchip LAN88XX PHY driver" +#define DRIVER_DESC "Microchip LAN88XX/LAN937X TX PHY driver" struct lan88xx_priv { int chip_id; @@ -373,6 +379,115 @@ static void lan88xx_link_change_notify(struct phy_device *phydev) } } +/** + * lan937x_tx_read_mdix_status - Read the MDIX status for the LAN937x TX PHY. + * @phydev: Pointer to the phy_device structure. + * + * This function reads the MDIX status of the LAN937x TX PHY and sets the + * mdix_ctrl and mdix fields of the phy_device structure accordingly. + * Note that MDIX status is not supported in AUTO mode, and will be set + * to invalid in such cases. + * + * Return: 0 on success, a negative error code on failure. + */ +static int lan937x_tx_read_mdix_status(struct phy_device *phydev) +{ + int ret; + + ret = phy_read(phydev, LAN937X_MODE_CTRL_STATUS_REG); + if (ret < 0) + return ret; + + if (ret & LAN937X_AUTOMDIX_EN) { + phydev->mdix_ctrl = ETH_TP_MDI_AUTO; + /* MDI/MDIX status is unknown */ + phydev->mdix = ETH_TP_MDI_INVALID; + } else if (ret & LAN937X_MDI_MODE) { + phydev->mdix_ctrl = ETH_TP_MDI_X; + phydev->mdix = ETH_TP_MDI_X; + } else { + phydev->mdix_ctrl = ETH_TP_MDI; + phydev->mdix = ETH_TP_MDI; + } + + return 0; +} + +/** + * lan937x_tx_read_status - Read the status for the LAN937x TX PHY. + * @phydev: Pointer to the phy_device structure. + * + * This function reads the status of the LAN937x TX PHY and updates the + * phy_device structure accordingly. + * + * Return: 0 on success, a negative error code on failure. + */ +static int lan937x_tx_read_status(struct phy_device *phydev) +{ + int ret; + + ret = genphy_read_status(phydev); + if (ret < 0) + return ret; + + return lan937x_tx_read_mdix_status(phydev); +} + +/** + * lan937x_tx_set_mdix - Set the MDIX mode for the LAN937x TX PHY. + * @phydev: Pointer to the phy_device structure. + * + * This function configures the MDIX mode of the LAN937x TX PHY based on the + * mdix_ctrl field of the phy_device structure. The MDIX mode can be set to + * MDI (straight-through), MDIX (crossover), or AUTO (auto-MDIX). If the mode + * is not recognized, it returns 0 without making any changes. + * + * Return: 0 on success, a negative error code on failure. + */ +static int lan937x_tx_set_mdix(struct phy_device *phydev) +{ + u16 val; + + switch (phydev->mdix_ctrl) { + case ETH_TP_MDI: + val = 0; + break; + case ETH_TP_MDI_X: + val = LAN937X_MDI_MODE; + break; + case ETH_TP_MDI_AUTO: + val = LAN937X_AUTOMDIX_EN; + break; + default: + return 0; + } + + return phy_modify(phydev, LAN937X_MODE_CTRL_STATUS_REG, + LAN937X_AUTOMDIX_EN | LAN937X_MDI_MODE, val); +} + +/** + * lan937x_tx_config_aneg - Configure auto-negotiation and fixed modes for the + * LAN937x TX PHY. + * @phydev: Pointer to the phy_device structure. + * + * This function configures the MDIX mode for the LAN937x TX PHY and then + * proceeds to configure the auto-negotiation or fixed mode settings + * based on the phy_device structure. + * + * Return: 0 on success, a negative error code on failure. + */ +static int lan937x_tx_config_aneg(struct phy_device *phydev) +{ + int ret; + + ret = lan937x_tx_set_mdix(phydev); + if (ret < 0) + return ret; + + return genphy_config_aneg(phydev); +} + static struct phy_driver microchip_phy_driver[] = { { .phy_id = 0x0007c132, @@ -400,12 +515,21 @@ static struct phy_driver microchip_phy_driver[] = { .set_wol = lan88xx_set_wol, .read_page = lan88xx_read_page, .write_page = lan88xx_write_page, +}, +{ + PHY_ID_MATCH_MODEL(PHY_ID_LAN937X_TX), + .name = "Microchip LAN937x TX", + .suspend = genphy_suspend, + .resume = genphy_resume, + .config_aneg = lan937x_tx_config_aneg, + .read_status = lan937x_tx_read_status, } }; module_phy_driver(microchip_phy_driver); static struct mdio_device_id __maybe_unused microchip_tbl[] = { { 0x0007c132, 0xfffffff2 }, + { PHY_ID_MATCH_MODEL(PHY_ID_LAN937X_TX) }, { } }; diff --git a/drivers/net/phy/mscc/mscc_ptp.c b/drivers/net/phy/mscc/mscc_ptp.c index eb0b032cb613..c1ddae36a2ae 100644 --- a/drivers/net/phy/mscc/mscc_ptp.c +++ b/drivers/net/phy/mscc/mscc_ptp.c @@ -1134,7 +1134,7 @@ static int vsc85xx_hwtstamp(struct mii_timestamper *mii_ts, } static int vsc85xx_ts_info(struct mii_timestamper *mii_ts, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct vsc8531_private *vsc8531 = container_of(mii_ts, struct vsc8531_private, mii_ts); @@ -1570,6 +1570,9 @@ int vsc8584_ptp_probe(struct phy_device *phydev) return PTR_ERR(vsc8531->load_save); } + /* Timestamp selected by default to keep legacy API */ + phydev->default_timestamp = true; + vsc8531->ptp->phydev = phydev; return 0; diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c index 3cf614b4cd52..5af5ade4fc64 100644 --- a/drivers/net/phy/nxp-c45-tja11xx.c +++ b/drivers/net/phy/nxp-c45-tja11xx.c @@ -1058,7 +1058,7 @@ nxp_c45_no_ptp_irq: } static int nxp_c45_ts_info(struct mii_timestamper *mii_ts, - struct ethtool_ts_info *ts_info) + struct kernel_ethtool_ts_info *ts_info) { struct nxp_c45_phy *priv = container_of(mii_ts, struct nxp_c45_phy, mii_ts); @@ -1660,6 +1660,9 @@ static int nxp_c45_probe(struct phy_device *phydev) priv->mii_ts.ts_info = nxp_c45_ts_info; phydev->mii_ts = &priv->mii_ts; ret = nxp_c45_init_ptp_clock(priv); + + /* Timestamp selected by default to keep legacy API */ + phydev->default_timestamp = true; } else { phydev_dbg(phydev, "PTP support not enabled even if the phy supports it"); } diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c index 15f349e5995a..1f98b6a96c15 100644 --- a/drivers/net/phy/phy-core.c +++ b/drivers/net/phy/phy-core.c @@ -13,7 +13,7 @@ */ const char *phy_speed_to_str(int speed) { - BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 102, + BUILD_BUG_ON_MSG(__ETHTOOL_LINK_MODE_MASK_NBITS != 103, "Enum ethtool_link_mode_bit_indices and phylib are out of sync. " "If a speed or mode has been added please update phy_speed_to_str " "and the PHY settings array.\n"); @@ -141,6 +141,7 @@ int phy_interface_num_ports(phy_interface_t interface) return 1; case PHY_INTERFACE_MODE_QSGMII: case PHY_INTERFACE_MODE_QUSGMII: + case PHY_INTERFACE_MODE_10G_QXGMII: return 4; case PHY_INTERFACE_MODE_PSGMII: return 5; @@ -265,6 +266,7 @@ static const struct phy_setting settings[] = { PHY_SETTING( 10, FULL, 10baseT1S_Full ), PHY_SETTING( 10, HALF, 10baseT1S_Half ), PHY_SETTING( 10, HALF, 10baseT1S_P2MP_Half ), + PHY_SETTING( 10, FULL, 10baseT1BRR_Full ), }; #undef PHY_SETTING diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index c4236564c1cd..785182fa5fe0 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -1309,7 +1309,7 @@ static irqreturn_t phy_interrupt(int irq, void *phy_dat) if (netdev) { struct device *parent = netdev->dev.parent; - if (netdev->wol_enabled) + if (netdev->ethtool->wol_enabled) pm_system_wakeup(); else if (device_may_wakeup(&netdev->dev)) pm_wakeup_dev_event(&netdev->dev, 0, true); diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 6c6ec9475709..70b07e621fb2 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -296,7 +296,7 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev) if (!netdev) goto out; - if (netdev->wol_enabled) + if (netdev->ethtool->wol_enabled) return false; /* As long as not all affected network drivers support the @@ -1980,16 +1980,17 @@ int phy_suspend(struct phy_device *phydev) const struct phy_driver *phydrv = phydev->drv; int ret; - if (phydev->suspended) + if (phydev->suspended || !phydrv) return 0; phy_ethtool_get_wol(phydev, &wol); - phydev->wol_enabled = wol.wolopts || (netdev && netdev->wol_enabled); + phydev->wol_enabled = wol.wolopts || + (netdev && netdev->ethtool->wol_enabled); /* If the device has WOL enabled, we cannot suspend the PHY */ if (phydev->wol_enabled && !(phydrv->flags & PHY_ALWAYS_CALL_SUSPEND)) return -EBUSY; - if (!phydrv || !phydrv->suspend) + if (!phydrv->suspend) return 0; ret = phydrv->suspend(phydev); diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 994471fad833..51c526d227fa 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -231,6 +231,7 @@ static int phylink_interface_max_speed(phy_interface_t interface) return SPEED_1000; case PHY_INTERFACE_MODE_2500BASEX: + case PHY_INTERFACE_MODE_10G_QXGMII: return SPEED_2500; case PHY_INTERFACE_MODE_5GBASER: @@ -500,7 +501,11 @@ static unsigned long phylink_get_capabilities(phy_interface_t interface, switch (interface) { case PHY_INTERFACE_MODE_USXGMII: - caps |= MAC_10000FD | MAC_5000FD | MAC_2500FD; + caps |= MAC_10000FD | MAC_5000FD; + fallthrough; + + case PHY_INTERFACE_MODE_10G_QXGMII: + caps |= MAC_2500FD; fallthrough; case PHY_INTERFACE_MODE_RGMII_TXID: @@ -885,26 +890,31 @@ static int phylink_parse_mode(struct phylink *pl, const char *managed; unsigned long caps; + if (pl->config->default_an_inband) + pl->cfg_link_an_mode = MLO_AN_INBAND; + dn = fwnode_get_named_child_node(fwnode, "fixed-link"); if (dn || fwnode_property_present(fwnode, "fixed-link")) pl->cfg_link_an_mode = MLO_AN_FIXED; fwnode_handle_put(dn); if ((fwnode_property_read_string(fwnode, "managed", &managed) == 0 && - strcmp(managed, "in-band-status") == 0) || - pl->config->ovr_an_inband) { + strcmp(managed, "in-band-status") == 0)) { if (pl->cfg_link_an_mode == MLO_AN_FIXED) { phylink_err(pl, "can't use both fixed-link and in-band-status\n"); return -EINVAL; } + pl->cfg_link_an_mode = MLO_AN_INBAND; + } + + if (pl->cfg_link_an_mode == MLO_AN_INBAND) { linkmode_zero(pl->supported); phylink_set(pl->supported, MII); phylink_set(pl->supported, Autoneg); phylink_set(pl->supported, Asym_Pause); phylink_set(pl->supported, Pause); - pl->cfg_link_an_mode = MLO_AN_INBAND; switch (pl->link_config.interface) { case PHY_INTERFACE_MODE_SGMII: @@ -921,6 +931,7 @@ static int phylink_parse_mode(struct phylink *pl, case PHY_INTERFACE_MODE_5GBASER: case PHY_INTERFACE_MODE_25GBASER: case PHY_INTERFACE_MODE_USXGMII: + case PHY_INTERFACE_MODE_10G_QXGMII: case PHY_INTERFACE_MODE_10GKR: case PHY_INTERFACE_MODE_10GBASER: case PHY_INTERFACE_MODE_XLGMII: @@ -1119,6 +1130,7 @@ static unsigned int phylink_pcs_neg_mode(unsigned int mode, case PHY_INTERFACE_MODE_QSGMII: case PHY_INTERFACE_MODE_QUSGMII: case PHY_INTERFACE_MODE_USXGMII: + case PHY_INTERFACE_MODE_10G_QXGMII: /* These protocols are designed for use with a PHY which * communicates its negotiation result back to the MAC via * inband communication. Note: there exist PHYs that run @@ -2270,7 +2282,7 @@ void phylink_suspend(struct phylink *pl, bool mac_wol) { ASSERT_RTNL(); - if (mac_wol && (!pl->netdev || pl->netdev->wol_enabled)) { + if (mac_wol && (!pl->netdev || pl->netdev->ethtool->wol_enabled)) { /* Wake-on-Lan enabled, MAC handling */ mutex_lock(&pl->state_mutex); diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index 7ab41f95dae5..bed839237fb5 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -32,6 +32,15 @@ #define RTL8211F_PHYCR2 0x19 #define RTL8211F_INSR 0x1d +#define RTL8211F_LEDCR 0x10 +#define RTL8211F_LEDCR_MODE BIT(15) +#define RTL8211F_LEDCR_ACT_TXRX BIT(4) +#define RTL8211F_LEDCR_LINK_1000 BIT(3) +#define RTL8211F_LEDCR_LINK_100 BIT(1) +#define RTL8211F_LEDCR_LINK_10 BIT(0) +#define RTL8211F_LEDCR_MASK GENMASK(4, 0) +#define RTL8211F_LEDCR_SHIFT 5 + #define RTL8211F_TX_DELAY BIT(8) #define RTL8211F_RX_DELAY BIT(3) @@ -87,6 +96,8 @@ #define RTL_8221B_VN_CG 0x001cc84a #define RTL_8251B 0x001cc862 +#define RTL8211F_LED_COUNT 3 + MODULE_DESCRIPTION("Realtek PHY driver"); MODULE_AUTHOR("Johnson Leung"); MODULE_LICENSE("GPL"); @@ -476,6 +487,98 @@ static int rtl821x_resume(struct phy_device *phydev) return 0; } +static int rtl8211f_led_hw_is_supported(struct phy_device *phydev, u8 index, + unsigned long rules) +{ + const unsigned long mask = BIT(TRIGGER_NETDEV_LINK_10) | + BIT(TRIGGER_NETDEV_LINK_100) | + BIT(TRIGGER_NETDEV_LINK_1000) | + BIT(TRIGGER_NETDEV_RX) | + BIT(TRIGGER_NETDEV_TX); + + /* The RTL8211F PHY supports these LED settings on up to three LEDs: + * - Link: Configurable subset of 10/100/1000 link rates + * - Active: Blink on activity, RX or TX is not differentiated + * The Active option has two modes, A and B: + * - A: Link and Active indication at configurable, but matching, + * subset of 10/100/1000 link rates + * - B: Link indication at configurable subset of 10/100/1000 link + * rates and Active indication always at all three 10+100+1000 + * link rates. + * This code currently uses mode B only. + */ + + if (index >= RTL8211F_LED_COUNT) + return -EINVAL; + + /* Filter out any other unsupported triggers. */ + if (rules & ~mask) + return -EOPNOTSUPP; + + /* RX and TX are not differentiated, either both are set or not set. */ + if (!(rules & BIT(TRIGGER_NETDEV_RX)) ^ !(rules & BIT(TRIGGER_NETDEV_TX))) + return -EOPNOTSUPP; + + return 0; +} + +static int rtl8211f_led_hw_control_get(struct phy_device *phydev, u8 index, + unsigned long *rules) +{ + int val; + + val = phy_read_paged(phydev, 0xd04, RTL8211F_LEDCR); + if (val < 0) + return val; + + val >>= RTL8211F_LEDCR_SHIFT * index; + val &= RTL8211F_LEDCR_MASK; + + if (val & RTL8211F_LEDCR_LINK_10) + set_bit(TRIGGER_NETDEV_LINK_10, rules); + + if (val & RTL8211F_LEDCR_LINK_100) + set_bit(TRIGGER_NETDEV_LINK_100, rules); + + if (val & RTL8211F_LEDCR_LINK_1000) + set_bit(TRIGGER_NETDEV_LINK_1000, rules); + + if (val & RTL8211F_LEDCR_ACT_TXRX) { + set_bit(TRIGGER_NETDEV_RX, rules); + set_bit(TRIGGER_NETDEV_TX, rules); + } + + return 0; +} + +static int rtl8211f_led_hw_control_set(struct phy_device *phydev, u8 index, + unsigned long rules) +{ + const u16 mask = RTL8211F_LEDCR_MASK << (RTL8211F_LEDCR_SHIFT * index); + u16 reg = RTL8211F_LEDCR_MODE; /* Mode B */ + + if (index >= RTL8211F_LED_COUNT) + return -EINVAL; + + if (test_bit(TRIGGER_NETDEV_LINK_10, &rules)) + reg |= RTL8211F_LEDCR_LINK_10; + + if (test_bit(TRIGGER_NETDEV_LINK_100, &rules)) + reg |= RTL8211F_LEDCR_LINK_100; + + if (test_bit(TRIGGER_NETDEV_LINK_1000, &rules)) + reg |= RTL8211F_LEDCR_LINK_1000; + + if (test_bit(TRIGGER_NETDEV_RX, &rules) || + test_bit(TRIGGER_NETDEV_TX, &rules)) { + reg |= RTL8211F_LEDCR_ACT_TXRX; + } + + reg <<= RTL8211F_LEDCR_SHIFT * index; + + return phy_modify_paged(phydev, 0xd04, RTL8211F_LEDCR, mask, reg); +} + static int rtl8211e_config_init(struct phy_device *phydev) { int ret = 0, oldpage; @@ -1192,6 +1295,9 @@ static struct phy_driver realtek_drvs[] = { .read_page = rtl821x_read_page, .write_page = rtl821x_write_page, .flags = PHY_ALWAYS_CALL_SUSPEND, + .led_hw_is_supported = rtl8211f_led_hw_is_supported, + .led_hw_control_get = rtl8211f_led_hw_control_get, + .led_hw_control_set = rtl8211f_led_hw_control_set, }, { PHY_ID_MATCH_EXACT(RTL_8211FVD_PHYID), .name = "RTL8211F-VD Gigabit Ethernet", @@ -1317,6 +1423,14 @@ static struct phy_driver realtek_drvs[] = { .resume = rtlgen_resume, .read_page = rtl821x_read_page, .write_page = rtl821x_write_page, + }, { + PHY_ID_MATCH_EXACT(0x001ccad0), + .name = "RTL8224 2.5Gbps PHY", + .get_features = rtl822x_c45_get_features, + .config_aneg = rtl822x_c45_config_aneg, + .read_status = rtl822x_c45_read_status, + .suspend = genphy_c45_pma_suspend, + .resume = rtlgen_c45_resume, }, { PHY_ID_MATCH_EXACT(0x001cc961), .name = "RTL8366RB Gigabit Ethernet", diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c index 7b1bc5fcef9b..7c51daecf18e 100644 --- a/drivers/net/phy/xilinx_gmii2rgmii.c +++ b/drivers/net/phy/xilinx_gmii2rgmii.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #define XILINX_GMII2RGMII_REG 0x10 @@ -85,11 +86,17 @@ static int xgmiitorgmii_probe(struct mdio_device *mdiodev) struct device *dev = &mdiodev->dev; struct device_node *np = dev->of_node, *phy_node; struct gmii2rgmii *priv; + struct clk *clkin; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; + clkin = devm_clk_get_optional_enabled(dev, NULL); + if (IS_ERR(clkin)) + return dev_err_probe(dev, PTR_ERR(clkin), + "Failed to get and enable clock from Device Tree\n"); + phy_node = of_parse_phandle(np, "phy-handle", 0); if (!phy_node) { dev_err(dev, "Couldn't parse phy-handle\n"); diff --git a/drivers/net/pse-pd/pd692x0.c b/drivers/net/pse-pd/pd692x0.c index 6488b941703c..0af7db80b2f8 100644 --- a/drivers/net/pse-pd/pd692x0.c +++ b/drivers/net/pse-pd/pd692x0.c @@ -73,6 +73,9 @@ enum { PD692X0_MSG_SET_PORT_PARAM, PD692X0_MSG_GET_PORT_STATUS, PD692X0_MSG_DOWNLOAD_CMD, + PD692X0_MSG_GET_PORT_CLASS, + PD692X0_MSG_GET_PORT_MEAS, + PD692X0_MSG_GET_PORT_PARAM, /* add new message above here */ PD692X0_MSG_CNT @@ -134,7 +137,7 @@ static const struct pd692x0_msg pd692x0_msg_template_list[PD692X0_MSG_CNT] = { [PD692X0_MSG_SET_PORT_PARAM] = { .key = PD692X0_KEY_CMD, .sub = {0x05, 0xc0}, - .data = { 0, 0xff, 0xff, 0xff, + .data = { 0xf, 0xff, 0xff, 0xff, 0x4e, 0x4e, 0x4e, 0x4e}, }, [PD692X0_MSG_GET_PORT_STATUS] = { @@ -149,6 +152,24 @@ static const struct pd692x0_msg pd692x0_msg_template_list[PD692X0_MSG_CNT] = { .data = {0x16, 0x16, 0x99, 0x4e, 0x4e, 0x4e, 0x4e, 0x4e}, }, + [PD692X0_MSG_GET_PORT_CLASS] = { + .key = PD692X0_KEY_REQ, + .sub = {0x05, 0xc4}, + .data = {0x4e, 0x4e, 0x4e, 0x4e, + 0x4e, 0x4e, 0x4e, 0x4e}, + }, + [PD692X0_MSG_GET_PORT_MEAS] = { + .key = PD692X0_KEY_REQ, + .sub = {0x05, 0xc5}, + .data = {0x4e, 0x4e, 0x4e, 0x4e, + 0x4e, 0x4e, 0x4e, 0x4e}, + }, + [PD692X0_MSG_GET_PORT_PARAM] = { + .key = PD692X0_KEY_REQ, + .sub = {0x05, 0xc0}, + .data = {0x4e, 0x4e, 0x4e, 0x4e, + 0x4e, 0x4e, 0x4e, 0x4e}, + }, }; static u8 pd692x0_build_msg(struct pd692x0_msg *msg, u8 echo) @@ -435,6 +456,184 @@ static int pd692x0_pi_is_enabled(struct pse_controller_dev *pcdev, int id) } } +struct pd692x0_pse_ext_state_mapping { + u32 status_code; + enum ethtool_c33_pse_ext_state pse_ext_state; + u32 pse_ext_substate; +}; + +static const struct pd692x0_pse_ext_state_mapping +pd692x0_pse_ext_state_map[] = { + {0x06, ETHTOOL_C33_PSE_EXT_STATE_OPTION_VPORT_LIM, + ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_HIGH_VOLTAGE}, + {0x07, ETHTOOL_C33_PSE_EXT_STATE_OPTION_VPORT_LIM, + ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_LOW_VOLTAGE}, + {0x08, ETHTOOL_C33_PSE_EXT_STATE_MR_PSE_ENABLE, + ETHTOOL_C33_PSE_EXT_SUBSTATE_MR_PSE_ENABLE_DISABLE_PIN_ACTIVE}, + {0x0C, ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_NON_EXISTING_PORT}, + {0x11, ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNDEFINED_PORT}, + {0x12, ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_INTERNAL_HW_FAULT}, + {0x1B, ETHTOOL_C33_PSE_EXT_STATE_OPTION_DETECT_TED, + ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_DETECT_TED_DET_IN_PROCESS}, + {0x1C, ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNKNOWN_PORT_STATUS}, + {0x1E, ETHTOOL_C33_PSE_EXT_STATE_MR_MPS_VALID, + ETHTOOL_C33_PSE_EXT_SUBSTATE_MR_MPS_VALID_DETECTED_UNDERLOAD}, + {0x1F, ETHTOOL_C33_PSE_EXT_STATE_OVLD_DETECTED, + ETHTOOL_C33_PSE_EXT_SUBSTATE_OVLD_DETECTED_OVERLOAD}, + {0x20, ETHTOOL_C33_PSE_EXT_STATE_POWER_NOT_AVAILABLE, + ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_BUDGET_EXCEEDED}, + {0x21, ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_INTERNAL_HW_FAULT}, + {0x22, ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_CONFIG_CHANGE}, + {0x24, ETHTOOL_C33_PSE_EXT_STATE_OPTION_VPORT_LIM, + ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_VOLTAGE_INJECTION}, + {0x25, ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNKNOWN_PORT_STATUS}, + {0x34, ETHTOOL_C33_PSE_EXT_STATE_SHORT_DETECTED, + ETHTOOL_C33_PSE_EXT_SUBSTATE_SHORT_DETECTED_SHORT_CONDITION}, + {0x35, ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_DETECTED_OVER_TEMP}, + {0x36, ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_DETECTED_OVER_TEMP}, + {0x37, ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNKNOWN_PORT_STATUS}, + {0x3C, ETHTOOL_C33_PSE_EXT_STATE_POWER_NOT_AVAILABLE, + ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_PORT_PW_LIMIT_EXCEEDS_CONTROLLER_BUDGET}, + {0x3D, ETHTOOL_C33_PSE_EXT_STATE_POWER_NOT_AVAILABLE, + ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_PD_REQUEST_EXCEEDS_PORT_LIMIT}, + {0x41, ETHTOOL_C33_PSE_EXT_STATE_POWER_NOT_AVAILABLE, + ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_HW_PW_LIMIT}, + {0x43, ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNKNOWN_PORT_STATUS}, + {0xA7, ETHTOOL_C33_PSE_EXT_STATE_OPTION_DETECT_TED, + ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_DETECT_TED_CONNECTION_CHECK_ERROR}, + {0xA8, ETHTOOL_C33_PSE_EXT_STATE_MR_MPS_VALID, + ETHTOOL_C33_PSE_EXT_SUBSTATE_MR_MPS_VALID_CONNECTION_OPEN}, + { /* sentinel */ } +}; + +static void +pd692x0_get_ext_state(struct ethtool_c33_pse_ext_state_info *c33_ext_state_info, + u32 status_code) +{ + const struct pd692x0_pse_ext_state_mapping *ext_state_map; + + ext_state_map = pd692x0_pse_ext_state_map; + while (ext_state_map->status_code) { + if (ext_state_map->status_code == status_code) { + c33_ext_state_info->c33_pse_ext_state = ext_state_map->pse_ext_state; + c33_ext_state_info->__c33_pse_ext_substate = ext_state_map->pse_ext_substate; + return; + } + ext_state_map++; + } +} + +struct pd692x0_class_pw { + int class; + int class_cfg_value; + int class_pw; + int max_added_class_pw; +}; + +#define PD692X0_CLASS_PW_TABLE_SIZE 4 +/* 4/2 pairs class configuration power table in compliance mode. + * Need to be arranged in ascending order of power support. + */ +static const struct pd692x0_class_pw +pd692x0_class_pw_table[PD692X0_CLASS_PW_TABLE_SIZE] = { + {.class = 3, .class_cfg_value = 0x3, .class_pw = 15000, .max_added_class_pw = 3100}, + {.class = 4, .class_cfg_value = 0x2, .class_pw = 30000, .max_added_class_pw = 8000}, + {.class = 6, .class_cfg_value = 0x1, .class_pw = 60000, .max_added_class_pw = 5000}, + {.class = 8, .class_cfg_value = 0x0, .class_pw = 90000, .max_added_class_pw = 7500}, +}; + +static int pd692x0_pi_get_pw_from_table(int op_mode, int added_pw) +{ + const struct pd692x0_class_pw *pw_table; + int i; + + pw_table = pd692x0_class_pw_table; + for (i = 0; i < PD692X0_CLASS_PW_TABLE_SIZE; i++, pw_table++) { + if (pw_table->class_cfg_value == op_mode) + return pw_table->class_pw + added_pw * 100; + } + + return -ERANGE; +} + +static int pd692x0_pi_set_pw_from_table(struct device *dev, + struct pd692x0_msg *msg, int pw) +{ + const struct pd692x0_class_pw *pw_table; + int i; + + pw_table = pd692x0_class_pw_table; + if (pw < pw_table->class_pw) { + dev_err(dev, + "Power limit %dmW not supported. Ranges minimal available: [%d-%d]\n", + pw, + pw_table->class_pw, + pw_table->class_pw + pw_table->max_added_class_pw); + return -ERANGE; + } + + for (i = 0; i < PD692X0_CLASS_PW_TABLE_SIZE; i++, pw_table++) { + if (pw > (pw_table->class_pw + pw_table->max_added_class_pw)) + continue; + + if (pw < pw_table->class_pw) { + dev_err(dev, + "Power limit %dmW not supported. Ranges available: [%d-%d] or [%d-%d]\n", + pw, + (pw_table - 1)->class_pw, + (pw_table - 1)->class_pw + (pw_table - 1)->max_added_class_pw, + pw_table->class_pw, + pw_table->class_pw + pw_table->max_added_class_pw); + return -ERANGE; + } + + msg->data[2] = pw_table->class_cfg_value; + msg->data[3] = (pw - pw_table->class_pw) / 100; + return 0; + } + + pw_table--; + dev_warn(dev, + "Power limit %dmW not supported. Set to highest power limit %dmW\n", + pw, pw_table->class_pw + pw_table->max_added_class_pw); + msg->data[2] = pw_table->class_cfg_value; + msg->data[3] = pw_table->max_added_class_pw / 100; + return 0; +} + +static int +pd692x0_pi_get_pw_ranges(struct pse_control_status *st) +{ + const struct pd692x0_class_pw *pw_table; + int i; + + pw_table = pd692x0_class_pw_table; + st->c33_pw_limit_ranges = kcalloc(PD692X0_CLASS_PW_TABLE_SIZE, + sizeof(struct ethtool_c33_pse_pw_limit_range), + GFP_KERNEL); + if (!st->c33_pw_limit_ranges) + return -ENOMEM; + + for (i = 0; i < PD692X0_CLASS_PW_TABLE_SIZE; i++, pw_table++) { + st->c33_pw_limit_ranges[i].min = pw_table->class_pw; + st->c33_pw_limit_ranges[i].max = pw_table->class_pw + pw_table->max_added_class_pw; + } + + st->c33_pw_limit_nb_ranges = i; + return 0; +} + static int pd692x0_ethtool_get_status(struct pse_controller_dev *pcdev, unsigned long id, struct netlink_ext_ack *extack, @@ -442,6 +641,7 @@ static int pd692x0_ethtool_get_status(struct pse_controller_dev *pcdev, { struct pd692x0_priv *priv = to_pd692x0_priv(pcdev); struct pd692x0_msg msg, buf = {0}; + u32 class; int ret; ret = pd692x0_fw_unavailable(priv); @@ -471,6 +671,36 @@ static int pd692x0_ethtool_get_status(struct pse_controller_dev *pcdev, priv->admin_state[id] = status->c33_admin_state; + pd692x0_get_ext_state(&status->c33_ext_state_info, buf.sub[0]); + status->c33_actual_pw = (buf.data[0] << 4 | buf.data[1]) * 100; + + msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_PARAM]; + msg.sub[2] = id; + memset(&buf, 0, sizeof(buf)); + ret = pd692x0_sendrecv_msg(priv, &msg, &buf); + if (ret < 0) + return ret; + + ret = pd692x0_pi_get_pw_from_table(buf.data[0], buf.data[1]); + if (ret < 0) + return ret; + status->c33_avail_pw_limit = ret; + + memset(&buf, 0, sizeof(buf)); + msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_CLASS]; + msg.sub[2] = id; + ret = pd692x0_sendrecv_msg(priv, &msg, &buf); + if (ret < 0) + return ret; + + class = buf.data[3] >> 4; + if (class <= 8) + status->c33_pw_class = class; + + ret = pd692x0_pi_get_pw_ranges(status); + if (ret < 0) + return ret; + return 0; } @@ -749,12 +979,97 @@ out: return ret; } +static int pd692x0_pi_get_voltage(struct pse_controller_dev *pcdev, int id) +{ + struct pd692x0_priv *priv = to_pd692x0_priv(pcdev); + struct pd692x0_msg msg, buf = {0}; + int ret; + + ret = pd692x0_fw_unavailable(priv); + if (ret) + return ret; + + msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_MEAS]; + msg.sub[2] = id; + ret = pd692x0_sendrecv_msg(priv, &msg, &buf); + if (ret < 0) + return ret; + + /* Convert 0.1V unit to uV */ + return (buf.sub[0] << 8 | buf.sub[1]) * 100000; +} + +static int pd692x0_pi_get_current_limit(struct pse_controller_dev *pcdev, + int id) +{ + struct pd692x0_priv *priv = to_pd692x0_priv(pcdev); + struct pd692x0_msg msg, buf = {0}; + int mW, uV, uA, ret; + s64 tmp_64; + + msg = pd692x0_msg_template_list[PD692X0_MSG_GET_PORT_PARAM]; + msg.sub[2] = id; + ret = pd692x0_sendrecv_msg(priv, &msg, &buf); + if (ret < 0) + return ret; + + ret = pd692x0_pi_get_pw_from_table(buf.data[2], buf.data[3]); + if (ret < 0) + return ret; + mW = ret; + + ret = pd692x0_pi_get_voltage(pcdev, id); + if (ret < 0) + return ret; + uV = ret; + + tmp_64 = mW; + tmp_64 *= 1000000000ull; + /* uA = mW * 1000000000 / uV */ + uA = DIV_ROUND_CLOSEST_ULL(tmp_64, uV); + return uA; +} + +static int pd692x0_pi_set_current_limit(struct pse_controller_dev *pcdev, + int id, int max_uA) +{ + struct pd692x0_priv *priv = to_pd692x0_priv(pcdev); + struct device *dev = &priv->client->dev; + struct pd692x0_msg msg, buf = {0}; + int uV, ret, mW; + s64 tmp_64; + + ret = pd692x0_fw_unavailable(priv); + if (ret) + return ret; + + ret = pd692x0_pi_get_voltage(pcdev, id); + if (ret < 0) + return ret; + uV = ret; + + msg = pd692x0_msg_template_list[PD692X0_MSG_SET_PORT_PARAM]; + msg.sub[2] = id; + tmp_64 = uV; + tmp_64 *= max_uA; + /* mW = uV * uA / 1000000000 */ + mW = DIV_ROUND_CLOSEST_ULL(tmp_64, 1000000000); + ret = pd692x0_pi_set_pw_from_table(dev, &msg, mW); + if (ret) + return ret; + + return pd692x0_sendrecv_msg(priv, &msg, &buf); +} + static const struct pse_controller_ops pd692x0_ops = { .setup_pi_matrix = pd692x0_setup_pi_matrix, .ethtool_get_status = pd692x0_ethtool_get_status, .pi_enable = pd692x0_pi_enable, .pi_disable = pd692x0_pi_disable, .pi_is_enabled = pd692x0_pi_is_enabled, + .pi_get_voltage = pd692x0_pi_get_voltage, + .pi_get_current_limit = pd692x0_pi_get_current_limit, + .pi_set_current_limit = pd692x0_pi_set_current_limit, }; #define PD692X0_FW_LINE_MAX_SZ 0xff @@ -1194,8 +1509,8 @@ static void pd692x0_i2c_remove(struct i2c_client *client) } static const struct i2c_device_id pd692x0_id[] = { - { PD692X0_PSE_NAME, 0 }, - { }, + { PD692X0_PSE_NAME }, + { } }; MODULE_DEVICE_TABLE(i2c, pd692x0_id); diff --git a/drivers/net/pse-pd/pse_core.c b/drivers/net/pse-pd/pse_core.c index 795ab264eaf2..ec20953e0f82 100644 --- a/drivers/net/pse-pd/pse_core.c +++ b/drivers/net/pse-pd/pse_core.c @@ -265,10 +265,113 @@ static int pse_pi_disable(struct regulator_dev *rdev) return ret; } +static int _pse_pi_get_voltage(struct regulator_dev *rdev) +{ + struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev); + const struct pse_controller_ops *ops; + int id; + + ops = pcdev->ops; + if (!ops->pi_get_voltage) + return -EOPNOTSUPP; + + id = rdev_get_id(rdev); + return ops->pi_get_voltage(pcdev, id); +} + +static int pse_pi_get_voltage(struct regulator_dev *rdev) +{ + struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev); + int ret; + + mutex_lock(&pcdev->lock); + ret = _pse_pi_get_voltage(rdev); + mutex_unlock(&pcdev->lock); + + return ret; +} + +static int _pse_ethtool_get_status(struct pse_controller_dev *pcdev, + int id, + struct netlink_ext_ack *extack, + struct pse_control_status *status); + +static int pse_pi_get_current_limit(struct regulator_dev *rdev) +{ + struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev); + const struct pse_controller_ops *ops; + struct netlink_ext_ack extack = {}; + struct pse_control_status st = {}; + int id, uV, ret; + s64 tmp_64; + + ops = pcdev->ops; + id = rdev_get_id(rdev); + mutex_lock(&pcdev->lock); + if (ops->pi_get_current_limit) { + ret = ops->pi_get_current_limit(pcdev, id); + goto out; + } + + /* If pi_get_current_limit() callback not populated get voltage + * from pi_get_voltage() and power limit from ethtool_get_status() + * to calculate current limit. + */ + ret = _pse_pi_get_voltage(rdev); + if (!ret) { + dev_err(pcdev->dev, "Voltage null\n"); + ret = -ERANGE; + goto out; + } + if (ret < 0) + goto out; + uV = ret; + + ret = _pse_ethtool_get_status(pcdev, id, &extack, &st); + if (ret) + goto out; + + if (!st.c33_avail_pw_limit) { + ret = -ENODATA; + goto out; + } + + tmp_64 = st.c33_avail_pw_limit; + tmp_64 *= 1000000000ull; + /* uA = mW * 1000000000 / uV */ + ret = DIV_ROUND_CLOSEST_ULL(tmp_64, uV); + +out: + mutex_unlock(&pcdev->lock); + return ret; +} + +static int pse_pi_set_current_limit(struct regulator_dev *rdev, int min_uA, + int max_uA) +{ + struct pse_controller_dev *pcdev = rdev_get_drvdata(rdev); + const struct pse_controller_ops *ops; + int id, ret; + + ops = pcdev->ops; + if (!ops->pi_set_current_limit) + return -EOPNOTSUPP; + + id = rdev_get_id(rdev); + mutex_lock(&pcdev->lock); + ret = ops->pi_set_current_limit(pcdev, id, max_uA); + mutex_unlock(&pcdev->lock); + + return ret; +} + static const struct regulator_ops pse_pi_ops = { .is_enabled = pse_pi_is_enabled, .enable = pse_pi_enable, .disable = pse_pi_disable, + .get_voltage = pse_pi_get_voltage, + .get_current_limit = pse_pi_get_current_limit, + .set_current_limit = pse_pi_set_current_limit, }; static int @@ -298,7 +401,9 @@ devm_pse_pi_regulator_register(struct pse_controller_dev *pcdev, rdesc->ops = &pse_pi_ops; rdesc->owner = pcdev->owner; - rinit_data->constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS; + rinit_data->constraints.valid_ops_mask = REGULATOR_CHANGE_STATUS | + REGULATOR_CHANGE_CURRENT; + rinit_data->constraints.max_uA = MAX_PI_CURRENT; rinit_data->supply_regulator = "vpwr"; rconfig.dev = pcdev->dev; @@ -626,6 +731,23 @@ out: } EXPORT_SYMBOL_GPL(of_pse_control_get); +static int _pse_ethtool_get_status(struct pse_controller_dev *pcdev, + int id, + struct netlink_ext_ack *extack, + struct pse_control_status *status) +{ + const struct pse_controller_ops *ops; + + ops = pcdev->ops; + if (!ops->ethtool_get_status) { + NL_SET_ERR_MSG(extack, + "PSE driver does not support status report"); + return -EOPNOTSUPP; + } + + return ops->ethtool_get_status(pcdev, id, extack, status); +} + /** * pse_ethtool_get_status - get status of PSE control * @psec: PSE control pointer @@ -638,19 +760,10 @@ int pse_ethtool_get_status(struct pse_control *psec, struct netlink_ext_ack *extack, struct pse_control_status *status) { - const struct pse_controller_ops *ops; int err; - ops = psec->pcdev->ops; - - if (!ops->ethtool_get_status) { - NL_SET_ERR_MSG(extack, - "PSE driver does not support status report"); - return -EOPNOTSUPP; - } - mutex_lock(&psec->pcdev->lock); - err = ops->ethtool_get_status(psec->pcdev, psec->id, extack, status); + err = _pse_ethtool_get_status(psec->pcdev, psec->id, extack, status); mutex_unlock(&psec->pcdev->lock); return err; @@ -719,19 +832,56 @@ int pse_ethtool_set_config(struct pse_control *psec, { int err = 0; - if (pse_has_c33(psec)) { + if (pse_has_c33(psec) && config->c33_admin_control) { err = pse_ethtool_c33_set_config(psec, config); if (err) return err; } - if (pse_has_podl(psec)) + if (pse_has_podl(psec) && config->podl_admin_control) err = pse_ethtool_podl_set_config(psec, config); return err; } EXPORT_SYMBOL_GPL(pse_ethtool_set_config); +/** + * pse_ethtool_set_pw_limit - set PSE control power limit + * @psec: PSE control pointer + * @extack: extack for reporting useful error messages + * @pw_limit: power limit value in mW + * + * Return: 0 on success and failure value on error + */ +int pse_ethtool_set_pw_limit(struct pse_control *psec, + struct netlink_ext_ack *extack, + const unsigned int pw_limit) +{ + int uV, uA, ret; + s64 tmp_64; + + ret = regulator_get_voltage(psec->ps); + if (!ret) { + NL_SET_ERR_MSG(extack, + "Can't calculate the current, PSE voltage read is 0"); + return -ERANGE; + } + if (ret < 0) { + NL_SET_ERR_MSG(extack, + "Error reading PSE voltage"); + return ret; + } + uV = ret; + + tmp_64 = pw_limit; + tmp_64 *= 1000000000ull; + /* uA = mW * 1000000000 / uV */ + uA = DIV_ROUND_CLOSEST_ULL(tmp_64, uV); + + return regulator_set_current_limit(psec->ps, 0, uA); +} +EXPORT_SYMBOL_GPL(pse_ethtool_set_pw_limit); + bool pse_has_podl(struct pse_control *psec) { return psec->pcdev->types & ETHTOOL_PSE_PODL; diff --git a/drivers/net/pse-pd/tps23881.c b/drivers/net/pse-pd/tps23881.c index 98ffbb1bbf13..61f6ad9c1934 100644 --- a/drivers/net/pse-pd/tps23881.c +++ b/drivers/net/pse-pd/tps23881.c @@ -794,8 +794,8 @@ static int tps23881_i2c_probe(struct i2c_client *client) } static const struct i2c_device_id tps23881_id[] = { - { "tps23881", 0 }, - { }, + { "tps23881" }, + { } }; MODULE_DEVICE_TABLE(i2c, tps23881_id); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 9254bca2813d..9b24861464bc 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1661,6 +1661,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, int len, int *skb_xdp) { struct page_frag *alloc_frag = ¤t->task_frag; + struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx; struct bpf_prog *xdp_prog; int buflen = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); char *buf; @@ -1700,6 +1701,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, local_bh_disable(); rcu_read_lock(); + bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx); xdp_prog = rcu_dereference(tun->xdp_prog); if (xdp_prog) { struct xdp_buff xdp; @@ -1728,12 +1730,14 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun, pad = xdp.data - xdp.data_hard_start; len = xdp.data_end - xdp.data; } + bpf_net_ctx_clear(bpf_net_ctx); rcu_read_unlock(); local_bh_enable(); return __tun_build_skb(tfile, alloc_frag, buf, buflen, len, pad); out: + bpf_net_ctx_clear(bpf_net_ctx); rcu_read_unlock(); local_bh_enable(); return NULL; @@ -2566,6 +2570,7 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) if (m->msg_controllen == sizeof(struct tun_msg_ctl) && ctl && ctl->type == TUN_MSG_PTR) { + struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx; struct tun_page tpage; int n = ctl->num; int flush = 0, queued = 0; @@ -2574,6 +2579,7 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) local_bh_disable(); rcu_read_lock(); + bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx); for (i = 0; i < n; i++) { xdp = &((struct xdp_buff *)ctl->ptr)[i]; @@ -2588,6 +2594,7 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) if (tfile->napi_enabled && queued > 0) napi_schedule(&tfile->napi); + bpf_net_ctx_clear(bpf_net_ctx); rcu_read_unlock(); local_bh_enable(); diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index bf76ecccc2e6..d5c47a2a62dc 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c @@ -933,7 +933,8 @@ int cdc_ncm_bind_common(struct usbnet *dev, struct usb_interface *intf, u8 data_ cdc_ncm_find_endpoints(dev, ctx->data); cdc_ncm_find_endpoints(dev, ctx->control); - if (!dev->in || !dev->out || !dev->status) { + if (!dev->in || !dev->out || + (!dev->status && dev->driver_info->flags & FLAG_LINK_INTR)) { dev_dbg(&intf->dev, "failed to collect endpoints\n"); goto error2; } @@ -1925,6 +1926,34 @@ static const struct driver_info cdc_ncm_zlp_info = { .set_rx_mode = usbnet_cdc_update_filter, }; +/* Same as cdc_ncm_info, but with FLAG_SEND_ZLP */ +static const struct driver_info apple_tethering_interface_info = { + .description = "CDC NCM (Apple Tethering)", + .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET + | FLAG_LINK_INTR | FLAG_ETHER | FLAG_SEND_ZLP, + .bind = cdc_ncm_bind, + .unbind = cdc_ncm_unbind, + .manage_power = usbnet_manage_power, + .status = cdc_ncm_status, + .rx_fixup = cdc_ncm_rx_fixup, + .tx_fixup = cdc_ncm_tx_fixup, + .set_rx_mode = usbnet_cdc_update_filter, +}; + +/* Same as apple_tethering_interface_info, but without FLAG_LINK_INTR */ +static const struct driver_info apple_private_interface_info = { + .description = "CDC NCM (Apple Private)", + .flags = FLAG_POINTTOPOINT | FLAG_NO_SETINT | FLAG_MULTI_PACKET + | FLAG_ETHER | FLAG_SEND_ZLP, + .bind = cdc_ncm_bind, + .unbind = cdc_ncm_unbind, + .manage_power = usbnet_manage_power, + .status = cdc_ncm_status, + .rx_fixup = cdc_ncm_rx_fixup, + .tx_fixup = cdc_ncm_tx_fixup, + .set_rx_mode = usbnet_cdc_update_filter, +}; + /* Same as cdc_ncm_info, but with FLAG_WWAN */ static const struct driver_info wwan_info = { .description = "Mobile Broadband Network Device", @@ -1954,6 +1983,22 @@ static const struct driver_info wwan_noarp_info = { }; static const struct usb_device_id cdc_devs[] = { + /* iPhone */ + { USB_DEVICE_INTERFACE_NUMBER(0x05ac, 0x12a8, 2), + .driver_info = (unsigned long)&apple_tethering_interface_info, + }, + { USB_DEVICE_INTERFACE_NUMBER(0x05ac, 0x12a8, 4), + .driver_info = (unsigned long)&apple_private_interface_info, + }, + + /* iPad */ + { USB_DEVICE_INTERFACE_NUMBER(0x05ac, 0x12ab, 2), + .driver_info = (unsigned long)&apple_tethering_interface_info, + }, + { USB_DEVICE_INTERFACE_NUMBER(0x05ac, 0x12ab, 4), + .driver_info = (unsigned long)&apple_private_interface_info, + }, + /* Ericsson MBM devices like F5521gw */ { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_VENDOR, diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 5a2c38b63012..8adf77e3557e 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -380,11 +380,6 @@ struct skb_data { /* skb->cb is one of these */ int num_of_packet; }; -struct usb_context { - struct usb_ctrlrequest req; - struct lan78xx_net *dev; -}; - #define EVENT_TX_HALT 0 #define EVENT_RX_HALT 1 #define EVENT_RX_MEMORY 2 @@ -2946,6 +2941,8 @@ static int lan78xx_reset(struct lan78xx_net *dev) return ret; buf |= HW_CFG_MEF_; + buf |= HW_CFG_CLK125_EN_; + buf |= HW_CFG_REFCLK25_EN_; ret = lan78xx_write_reg(dev, HW_CFG, buf); if (ret < 0) @@ -3034,8 +3031,11 @@ static int lan78xx_reset(struct lan78xx_net *dev) return ret; /* LAN7801 only has RGMII mode */ - if (dev->chipid == ID_REV_CHIP_ID_7801_) + if (dev->chipid == ID_REV_CHIP_ID_7801_) { buf &= ~MAC_CR_GMII_EN_; + /* Enable Auto Duplex and Auto speed */ + buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_; + } if (dev->chipid == ID_REV_CHIP_ID_7800_ || dev->chipid == ID_REV_CHIP_ID_7850_) { diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 19df1cd9f072..15e12f46d0ea 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -1774,6 +1774,7 @@ static int vendor_mac_passthru_addr_read(struct r8152 *tp, struct sockaddr *sa) goto amacout; } memcpy(sa->sa_data, buf, 6); + tp->netdev->addr_assign_type = NET_ADDR_STOLEN; netif_info(tp, probe, tp->netdev, "Using pass-thru MAC addr %pM\n", sa->sa_data); @@ -8554,6 +8555,19 @@ static int rtl8152_system_resume(struct r8152 *tp) usb_submit_urb(tp->intr_urb, GFP_NOIO); } + /* If the device is RTL8152_INACCESSIBLE here then we should do a + * reset. This is important because the usb_lock_device_for_reset() + * that happens as a result of usb_queue_reset_device() will silently + * fail if the device was suspended or if too much time passed. + * + * NOTE: The device is locked here so we can directly do the reset. + * We don't need usb_lock_device_for_reset() because that's just a + * wrapper over device_lock() and device_resume() (which calls us) + * does that for us. + */ + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) + usb_reset_device(tp->udev); + return 0; } @@ -8634,6 +8648,13 @@ static int rtl8152_system_suspend(struct r8152 *tp) tasklet_enable(&tp->tx_tl); } + /* If we're inaccessible here then some of the work that we did to + * get the adapter ready for suspend didn't work. Queue up a wakeup + * event so we can try again. + */ + if (test_bit(RTL8152_INACCESSIBLE, &tp->flags)) + pm_wakeup_event(&tp->udev->dev, 0); + return 0; } diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 0726e18bee6f..78c821349f48 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -61,11 +61,6 @@ struct smsc75xx_priv { u8 suspend_flags; }; -struct usb_context { - struct usb_ctrlrequest req; - struct usbnet *dev; -}; - static bool turbo_mode = true; module_param(turbo_mode, bool, 0644); MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction"); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index ea10db9a09fa..af474cc191d0 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -25,6 +25,7 @@ #include #include #include +#include static int napi_weight = NAPI_POLL_WEIGHT; module_param(napi_weight, int, 0444); @@ -40,14 +41,12 @@ module_param(napi_tx, bool, 0644); #define VIRTNET_RX_PAD (NET_IP_ALIGN + NET_SKB_PAD) -/* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */ -#define VIRTIO_XDP_HEADROOM 256 - /* Separating two types of XDP xmit */ #define VIRTIO_XDP_TX BIT(0) #define VIRTIO_XDP_REDIR BIT(1) -#define VIRTIO_XDP_FLAG BIT(0) +#define VIRTIO_XDP_FLAG BIT(0) +#define VIRTIO_ORPHAN_FLAG BIT(1) /* RX packet size EWMA. The average packet size is used to determine the packet * buffer size when refilling RX rings. As the entire RX ring may be refilled @@ -85,6 +84,8 @@ struct virtnet_stat_desc { struct virtnet_sq_free_stats { u64 packets; u64 bytes; + u64 napi_packets; + u64 napi_bytes; }; struct virtnet_sq_stats { @@ -348,6 +349,13 @@ struct receive_queue { /* Record the last dma info to free after new pages is allocated. */ struct virtnet_rq_dma *last_dma; + + struct xsk_buff_pool *xsk_pool; + + /* xdp rxq used by xsk */ + struct xdp_rxq_info xsk_rxq_info; + + struct xdp_buff **xsk_buffs; }; /* This structure can contain rss message with maximum settings for indirection table and keysize @@ -490,6 +498,16 @@ struct virtio_net_common_hdr { }; static void virtnet_sq_free_unused_buf(struct virtqueue *vq, void *buf); +static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp, + struct net_device *dev, + unsigned int *xdp_xmit, + struct virtnet_rq_stats *stats); +static void virtnet_receive_done(struct virtnet_info *vi, struct receive_queue *rq, + struct sk_buff *skb, u8 flags); +static struct sk_buff *virtnet_skb_append_frag(struct sk_buff *head_skb, + struct sk_buff *curr_skb, + struct page *page, void *buf, + int len, int truesize); static bool is_xdp_frame(void *ptr) { @@ -506,29 +524,50 @@ static struct xdp_frame *ptr_to_xdp(void *ptr) return (struct xdp_frame *)((unsigned long)ptr & ~VIRTIO_XDP_FLAG); } -static void __free_old_xmit(struct send_queue *sq, bool in_napi, - struct virtnet_sq_free_stats *stats) +static bool is_orphan_skb(void *ptr) +{ + return (unsigned long)ptr & VIRTIO_ORPHAN_FLAG; +} + +static void *skb_to_ptr(struct sk_buff *skb, bool orphan) +{ + return (void *)((unsigned long)skb | (orphan ? VIRTIO_ORPHAN_FLAG : 0)); +} + +static struct sk_buff *ptr_to_skb(void *ptr) +{ + return (struct sk_buff *)((unsigned long)ptr & ~VIRTIO_ORPHAN_FLAG); +} + +static void __free_old_xmit(struct send_queue *sq, struct netdev_queue *txq, + bool in_napi, struct virtnet_sq_free_stats *stats) { unsigned int len; void *ptr; while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { - ++stats->packets; - if (!is_xdp_frame(ptr)) { - struct sk_buff *skb = ptr; + struct sk_buff *skb = ptr_to_skb(ptr); pr_debug("Sent skb %p\n", skb); - stats->bytes += skb->len; + if (is_orphan_skb(ptr)) { + stats->packets++; + stats->bytes += skb->len; + } else { + stats->napi_packets++; + stats->napi_bytes += skb->len; + } napi_consume_skb(skb, in_napi); } else { struct xdp_frame *frame = ptr_to_xdp(ptr); + stats->packets++; stats->bytes += xdp_get_frame_len(frame); xdp_return_frame(frame); } } + netdev_tx_completed_queue(txq, stats->napi_packets, stats->napi_bytes); } /* Converting between virtqueue no. and kernel tx/rx queue no. @@ -949,27 +988,33 @@ static void virtnet_rq_unmap_free_buf(struct virtqueue *vq, void *buf) rq = &vi->rq[i]; + if (rq->xsk_pool) { + xsk_buff_free((struct xdp_buff *)buf); + return; + } + if (!vi->big_packets || vi->mergeable_rx_bufs) virtnet_rq_unmap(rq, buf, 0); virtnet_rq_free_buf(vi, rq, buf); } -static void free_old_xmit(struct send_queue *sq, bool in_napi) +static void free_old_xmit(struct send_queue *sq, struct netdev_queue *txq, + bool in_napi) { struct virtnet_sq_free_stats stats = {0}; - __free_old_xmit(sq, in_napi, &stats); + __free_old_xmit(sq, txq, in_napi, &stats); /* Avoid overhead when no packets have been processed * happens when called speculatively from start_xmit. */ - if (!stats.packets) + if (!stats.packets && !stats.napi_packets) return; u64_stats_update_begin(&sq->stats.syncp); - u64_stats_add(&sq->stats.bytes, stats.bytes); - u64_stats_add(&sq->stats.packets, stats.packets); + u64_stats_add(&sq->stats.bytes, stats.bytes + stats.napi_bytes); + u64_stats_add(&sq->stats.packets, stats.packets + stats.napi_packets); u64_stats_update_end(&sq->stats.syncp); } @@ -1003,7 +1048,9 @@ static void check_sq_full_and_disable(struct virtnet_info *vi, * early means 16 slots are typically wasted. */ if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { - netif_stop_subqueue(dev, qnum); + struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum); + + netif_tx_stop_queue(txq); u64_stats_update_begin(&sq->stats.syncp); u64_stats_inc(&sq->stats.stop); u64_stats_update_end(&sq->stats.syncp); @@ -1012,7 +1059,7 @@ static void check_sq_full_and_disable(struct virtnet_info *vi, virtqueue_napi_schedule(&sq->napi, sq->vq); } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { /* More just got used, free them then recheck. */ - free_old_xmit(sq, false); + free_old_xmit(sq, txq, false); if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { netif_start_subqueue(dev, qnum); u64_stats_update_begin(&sq->stats.syncp); @@ -1024,6 +1071,329 @@ static void check_sq_full_and_disable(struct virtnet_info *vi, } } +static void sg_fill_dma(struct scatterlist *sg, dma_addr_t addr, u32 len) +{ + sg->dma_address = addr; + sg->length = len; +} + +static struct xdp_buff *buf_to_xdp(struct virtnet_info *vi, + struct receive_queue *rq, void *buf, u32 len) +{ + struct xdp_buff *xdp; + u32 bufsize; + + xdp = (struct xdp_buff *)buf; + + bufsize = xsk_pool_get_rx_frame_size(rq->xsk_pool) + vi->hdr_len; + + if (unlikely(len > bufsize)) { + pr_debug("%s: rx error: len %u exceeds truesize %u\n", + vi->dev->name, len, bufsize); + DEV_STATS_INC(vi->dev, rx_length_errors); + xsk_buff_free(xdp); + return NULL; + } + + xsk_buff_set_size(xdp, len); + xsk_buff_dma_sync_for_cpu(xdp); + + return xdp; +} + +static struct sk_buff *xsk_construct_skb(struct receive_queue *rq, + struct xdp_buff *xdp) +{ + unsigned int metasize = xdp->data - xdp->data_meta; + struct sk_buff *skb; + unsigned int size; + + size = xdp->data_end - xdp->data_hard_start; + skb = napi_alloc_skb(&rq->napi, size); + if (unlikely(!skb)) { + xsk_buff_free(xdp); + return NULL; + } + + skb_reserve(skb, xdp->data_meta - xdp->data_hard_start); + + size = xdp->data_end - xdp->data_meta; + memcpy(__skb_put(skb, size), xdp->data_meta, size); + + if (metasize) { + __skb_pull(skb, metasize); + skb_metadata_set(skb, metasize); + } + + xsk_buff_free(xdp); + + return skb; +} + +static struct sk_buff *virtnet_receive_xsk_small(struct net_device *dev, struct virtnet_info *vi, + struct receive_queue *rq, struct xdp_buff *xdp, + unsigned int *xdp_xmit, + struct virtnet_rq_stats *stats) +{ + struct bpf_prog *prog; + u32 ret; + + ret = XDP_PASS; + rcu_read_lock(); + prog = rcu_dereference(rq->xdp_prog); + if (prog) + ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, stats); + rcu_read_unlock(); + + switch (ret) { + case XDP_PASS: + return xsk_construct_skb(rq, xdp); + + case XDP_TX: + case XDP_REDIRECT: + return NULL; + + default: + /* drop packet */ + xsk_buff_free(xdp); + u64_stats_inc(&stats->drops); + return NULL; + } +} + +static void xsk_drop_follow_bufs(struct net_device *dev, + struct receive_queue *rq, + u32 num_buf, + struct virtnet_rq_stats *stats) +{ + struct xdp_buff *xdp; + u32 len; + + while (num_buf-- > 1) { + xdp = virtqueue_get_buf(rq->vq, &len); + if (unlikely(!xdp)) { + pr_debug("%s: rx error: %d buffers missing\n", + dev->name, num_buf); + DEV_STATS_INC(dev, rx_length_errors); + break; + } + u64_stats_add(&stats->bytes, len); + xsk_buff_free(xdp); + } +} + +static int xsk_append_merge_buffer(struct virtnet_info *vi, + struct receive_queue *rq, + struct sk_buff *head_skb, + u32 num_buf, + struct virtio_net_hdr_mrg_rxbuf *hdr, + struct virtnet_rq_stats *stats) +{ + struct sk_buff *curr_skb; + struct xdp_buff *xdp; + u32 len, truesize; + struct page *page; + void *buf; + + curr_skb = head_skb; + + while (--num_buf) { + buf = virtqueue_get_buf(rq->vq, &len); + if (unlikely(!buf)) { + pr_debug("%s: rx error: %d buffers out of %d missing\n", + vi->dev->name, num_buf, + virtio16_to_cpu(vi->vdev, + hdr->num_buffers)); + DEV_STATS_INC(vi->dev, rx_length_errors); + return -EINVAL; + } + + u64_stats_add(&stats->bytes, len); + + xdp = buf_to_xdp(vi, rq, buf, len); + if (!xdp) + goto err; + + buf = napi_alloc_frag(len); + if (!buf) { + xsk_buff_free(xdp); + goto err; + } + + memcpy(buf, xdp->data - vi->hdr_len, len); + + xsk_buff_free(xdp); + + page = virt_to_page(buf); + + truesize = len; + + curr_skb = virtnet_skb_append_frag(head_skb, curr_skb, page, + buf, len, truesize); + if (!curr_skb) { + put_page(page); + goto err; + } + } + + return 0; + +err: + xsk_drop_follow_bufs(vi->dev, rq, num_buf, stats); + return -EINVAL; +} + +static struct sk_buff *virtnet_receive_xsk_merge(struct net_device *dev, struct virtnet_info *vi, + struct receive_queue *rq, struct xdp_buff *xdp, + unsigned int *xdp_xmit, + struct virtnet_rq_stats *stats) +{ + struct virtio_net_hdr_mrg_rxbuf *hdr; + struct bpf_prog *prog; + struct sk_buff *skb; + u32 ret, num_buf; + + hdr = xdp->data - vi->hdr_len; + num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers); + + ret = XDP_PASS; + rcu_read_lock(); + prog = rcu_dereference(rq->xdp_prog); + /* TODO: support multi buffer. */ + if (prog && num_buf == 1) + ret = virtnet_xdp_handler(prog, xdp, dev, xdp_xmit, stats); + rcu_read_unlock(); + + switch (ret) { + case XDP_PASS: + skb = xsk_construct_skb(rq, xdp); + if (!skb) + goto drop_bufs; + + if (xsk_append_merge_buffer(vi, rq, skb, num_buf, hdr, stats)) { + dev_kfree_skb(skb); + goto drop; + } + + return skb; + + case XDP_TX: + case XDP_REDIRECT: + return NULL; + + default: + /* drop packet */ + xsk_buff_free(xdp); + } + +drop_bufs: + xsk_drop_follow_bufs(dev, rq, num_buf, stats); + +drop: + u64_stats_inc(&stats->drops); + return NULL; +} + +static void virtnet_receive_xsk_buf(struct virtnet_info *vi, struct receive_queue *rq, + void *buf, u32 len, + unsigned int *xdp_xmit, + struct virtnet_rq_stats *stats) +{ + struct net_device *dev = vi->dev; + struct sk_buff *skb = NULL; + struct xdp_buff *xdp; + u8 flags; + + len -= vi->hdr_len; + + u64_stats_add(&stats->bytes, len); + + xdp = buf_to_xdp(vi, rq, buf, len); + if (!xdp) + return; + + if (unlikely(len < ETH_HLEN)) { + pr_debug("%s: short packet %i\n", dev->name, len); + DEV_STATS_INC(dev, rx_length_errors); + xsk_buff_free(xdp); + return; + } + + flags = ((struct virtio_net_common_hdr *)(xdp->data - vi->hdr_len))->hdr.flags; + + if (!vi->mergeable_rx_bufs) + skb = virtnet_receive_xsk_small(dev, vi, rq, xdp, xdp_xmit, stats); + else + skb = virtnet_receive_xsk_merge(dev, vi, rq, xdp, xdp_xmit, stats); + + if (skb) + virtnet_receive_done(vi, rq, skb, flags); +} + +static int virtnet_add_recvbuf_xsk(struct virtnet_info *vi, struct receive_queue *rq, + struct xsk_buff_pool *pool, gfp_t gfp) +{ + struct xdp_buff **xsk_buffs; + dma_addr_t addr; + int err = 0; + u32 len, i; + int num; + + xsk_buffs = rq->xsk_buffs; + + num = xsk_buff_alloc_batch(pool, xsk_buffs, rq->vq->num_free); + if (!num) + return -ENOMEM; + + len = xsk_pool_get_rx_frame_size(pool) + vi->hdr_len; + + for (i = 0; i < num; ++i) { + /* Use the part of XDP_PACKET_HEADROOM as the virtnet hdr space. + * We assume XDP_PACKET_HEADROOM is larger than hdr->len. + * (see function virtnet_xsk_pool_enable) + */ + addr = xsk_buff_xdp_get_dma(xsk_buffs[i]) - vi->hdr_len; + + sg_init_table(rq->sg, 1); + sg_fill_dma(rq->sg, addr, len); + + err = virtqueue_add_inbuf(rq->vq, rq->sg, 1, xsk_buffs[i], gfp); + if (err) + goto err; + } + + return num; + +err: + for (; i < num; ++i) + xsk_buff_free(xsk_buffs[i]); + + return err; +} + +static int virtnet_xsk_wakeup(struct net_device *dev, u32 qid, u32 flag) +{ + struct virtnet_info *vi = netdev_priv(dev); + struct send_queue *sq; + + if (!netif_running(dev)) + return -ENETDOWN; + + if (qid >= vi->curr_queue_pairs) + return -EINVAL; + + sq = &vi->sq[qid]; + + if (napi_if_scheduled_mark_missed(&sq->napi)) + return 0; + + local_bh_disable(); + virtqueue_napi_schedule(&sq->napi, sq->vq); + local_bh_enable(); + + return 0; +} + static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, struct send_queue *sq, struct xdp_frame *xdpf) @@ -1138,7 +1508,8 @@ static int virtnet_xdp_xmit(struct net_device *dev, } /* Free up any pending old buffers before queueing new ones. */ - __free_old_xmit(sq, false, &stats); + __free_old_xmit(sq, netdev_get_tx_queue(dev, sq - vi->sq), + false, &stats); for (i = 0; i < n; i++) { struct xdp_frame *xdpf = frames[i]; @@ -1240,7 +1611,7 @@ static int virtnet_xdp_handler(struct bpf_prog *xdp_prog, struct xdp_buff *xdp, static unsigned int virtnet_get_headroom(struct virtnet_info *vi) { - return vi->xdp_enabled ? VIRTIO_XDP_HEADROOM : 0; + return vi->xdp_enabled ? XDP_PACKET_HEADROOM : 0; } /* We copy the packet for XDP in the following cases: @@ -1304,7 +1675,7 @@ static struct page *xdp_linearize_page(struct receive_queue *rq, } /* Headroom does not contribute to packet length */ - *len = page_off - VIRTIO_XDP_HEADROOM; + *len = page_off - XDP_PACKET_HEADROOM; return page; err_buf: __free_pages(page, 0); @@ -1591,8 +1962,8 @@ static int virtnet_build_xdp_buff_mrg(struct net_device *dev, void *ctx; xdp_init_buff(xdp, frame_sz, &rq->xdp_rxq); - xdp_prepare_buff(xdp, buf - VIRTIO_XDP_HEADROOM, - VIRTIO_XDP_HEADROOM + vi->hdr_len, len - vi->hdr_len, true); + xdp_prepare_buff(xdp, buf - XDP_PACKET_HEADROOM, + XDP_PACKET_HEADROOM + vi->hdr_len, len - vi->hdr_len, true); if (!*num_buf) return 0; @@ -1709,12 +2080,12 @@ static void *mergeable_xdp_get_buf(struct virtnet_info *vi, /* linearize data for XDP */ xdp_page = xdp_linearize_page(rq, num_buf, *page, offset, - VIRTIO_XDP_HEADROOM, + XDP_PACKET_HEADROOM, len); if (!xdp_page) return NULL; } else { - xdp_room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM + + xdp_room = SKB_DATA_ALIGN(XDP_PACKET_HEADROOM + sizeof(struct skb_shared_info)); if (*len + xdp_room > PAGE_SIZE) return NULL; @@ -1723,7 +2094,7 @@ static void *mergeable_xdp_get_buf(struct virtnet_info *vi, if (!xdp_page) return NULL; - memcpy(page_address(xdp_page) + VIRTIO_XDP_HEADROOM, + memcpy(page_address(xdp_page) + XDP_PACKET_HEADROOM, page_address(*page) + offset, *len); } @@ -1733,7 +2104,7 @@ static void *mergeable_xdp_get_buf(struct virtnet_info *vi, *page = xdp_page; - return page_address(*page) + VIRTIO_XDP_HEADROOM; + return page_address(*page) + XDP_PACKET_HEADROOM; } static struct sk_buff *receive_mergeable_xdp(struct net_device *dev, @@ -1796,6 +2167,49 @@ err_xdp: return NULL; } +static struct sk_buff *virtnet_skb_append_frag(struct sk_buff *head_skb, + struct sk_buff *curr_skb, + struct page *page, void *buf, + int len, int truesize) +{ + int num_skb_frags; + int offset; + + num_skb_frags = skb_shinfo(curr_skb)->nr_frags; + if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { + struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); + + if (unlikely(!nskb)) + return NULL; + + if (curr_skb == head_skb) + skb_shinfo(curr_skb)->frag_list = nskb; + else + curr_skb->next = nskb; + curr_skb = nskb; + head_skb->truesize += nskb->truesize; + num_skb_frags = 0; + } + + if (curr_skb != head_skb) { + head_skb->data_len += len; + head_skb->len += len; + head_skb->truesize += truesize; + } + + offset = buf - page_address(page); + if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { + put_page(page); + skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, + len, truesize); + } else { + skb_add_rx_frag(curr_skb, num_skb_frags, page, + offset, len, truesize); + } + + return curr_skb; +} + static struct sk_buff *receive_mergeable(struct net_device *dev, struct virtnet_info *vi, struct receive_queue *rq, @@ -1845,8 +2259,6 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, if (unlikely(!curr_skb)) goto err_skb; while (--num_buf) { - int num_skb_frags; - buf = virtnet_rq_get_buf(rq, &len, &ctx); if (unlikely(!buf)) { pr_debug("%s: rx error: %d buffers out of %d missing\n", @@ -1871,34 +2283,10 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, goto err_skb; } - num_skb_frags = skb_shinfo(curr_skb)->nr_frags; - if (unlikely(num_skb_frags == MAX_SKB_FRAGS)) { - struct sk_buff *nskb = alloc_skb(0, GFP_ATOMIC); - - if (unlikely(!nskb)) - goto err_skb; - if (curr_skb == head_skb) - skb_shinfo(curr_skb)->frag_list = nskb; - else - curr_skb->next = nskb; - curr_skb = nskb; - head_skb->truesize += nskb->truesize; - num_skb_frags = 0; - } - if (curr_skb != head_skb) { - head_skb->data_len += len; - head_skb->len += len; - head_skb->truesize += truesize; - } - offset = buf - page_address(page); - if (skb_can_coalesce(curr_skb, num_skb_frags, page, offset)) { - put_page(page); - skb_coalesce_rx_frag(curr_skb, num_skb_frags - 1, - len, truesize); - } else { - skb_add_rx_frag(curr_skb, num_skb_frags, page, - offset, len, truesize); - } + curr_skb = virtnet_skb_append_frag(head_skb, curr_skb, page, + buf, len, truesize); + if (!curr_skb) + goto err_skb; } ewma_pkt_len_add(&rq->mrg_avg_pkt_len, head_skb->len); @@ -1943,6 +2331,40 @@ static void virtio_skb_set_hash(const struct virtio_net_hdr_v1_hash *hdr_hash, skb_set_hash(skb, __le32_to_cpu(hdr_hash->hash_value), rss_hash_type); } +static void virtnet_receive_done(struct virtnet_info *vi, struct receive_queue *rq, + struct sk_buff *skb, u8 flags) +{ + struct virtio_net_common_hdr *hdr; + struct net_device *dev = vi->dev; + + hdr = skb_vnet_common_hdr(skb); + if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report) + virtio_skb_set_hash(&hdr->hash_v1_hdr, skb); + + if (flags & VIRTIO_NET_HDR_F_DATA_VALID) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + if (virtio_net_hdr_to_skb(skb, &hdr->hdr, + virtio_is_little_endian(vi->vdev))) { + net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n", + dev->name, hdr->hdr.gso_type, + hdr->hdr.gso_size); + goto frame_err; + } + + skb_record_rx_queue(skb, vq2rxq(rq->vq)); + skb->protocol = eth_type_trans(skb, dev); + pr_debug("Receiving skb proto 0x%04x len %i type %i\n", + ntohs(skb->protocol), skb->len, skb->pkt_type); + + napi_gro_receive(&rq->napi, skb); + return; + +frame_err: + DEV_STATS_INC(dev, rx_frame_errors); + dev_kfree_skb(skb); +} + static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, void *buf, unsigned int len, void **ctx, unsigned int *xdp_xmit, @@ -1950,7 +2372,6 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, { struct net_device *dev = vi->dev; struct sk_buff *skb; - struct virtio_net_common_hdr *hdr; u8 flags; if (unlikely(len < vi->hdr_len + ETH_HLEN)) { @@ -1980,32 +2401,7 @@ static void receive_buf(struct virtnet_info *vi, struct receive_queue *rq, if (unlikely(!skb)) return; - hdr = skb_vnet_common_hdr(skb); - if (dev->features & NETIF_F_RXHASH && vi->has_rss_hash_report) - virtio_skb_set_hash(&hdr->hash_v1_hdr, skb); - - if (flags & VIRTIO_NET_HDR_F_DATA_VALID) - skb->ip_summed = CHECKSUM_UNNECESSARY; - - if (virtio_net_hdr_to_skb(skb, &hdr->hdr, - virtio_is_little_endian(vi->vdev))) { - net_warn_ratelimited("%s: bad gso: type: %u, size: %u\n", - dev->name, hdr->hdr.gso_type, - hdr->hdr.gso_size); - goto frame_err; - } - - skb_record_rx_queue(skb, vq2rxq(rq->vq)); - skb->protocol = eth_type_trans(skb, dev); - pr_debug("Receiving skb proto 0x%04x len %i type %i\n", - ntohs(skb->protocol), skb->len, skb->pkt_type); - - napi_gro_receive(&rq->napi, skb); - return; - -frame_err: - DEV_STATS_INC(dev, rx_frame_errors); - dev_kfree_skb(skb); + virtnet_receive_done(vi, rq, skb, flags); } /* Unlike mergeable buffers, all buffers are allocated to the @@ -2166,7 +2562,11 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, gfp_t gfp) { int err; - bool oom; + + if (rq->xsk_pool) { + err = virtnet_add_recvbuf_xsk(vi, rq, rq->xsk_pool, gfp); + goto kick; + } do { if (vi->mergeable_rx_bufs) @@ -2176,10 +2576,11 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, else err = add_recvbuf_small(vi, rq, gfp); - oom = err == -ENOMEM; if (err) break; } while (rq->vq->num_free); + +kick: if (virtqueue_kick_prepare(rq->vq) && virtqueue_notify(rq->vq)) { unsigned long flags; @@ -2188,7 +2589,7 @@ static bool try_fill_recv(struct virtnet_info *vi, struct receive_queue *rq, u64_stats_update_end_irqrestore(&rq->stats.syncp, flags); } - return !oom; + return err != -ENOMEM; } static void skb_recv_done(struct virtqueue *rvq) @@ -2259,32 +2660,68 @@ static void refill_work(struct work_struct *work) } } -static int virtnet_receive(struct receive_queue *rq, int budget, - unsigned int *xdp_xmit) +static int virtnet_receive_xsk_bufs(struct virtnet_info *vi, + struct receive_queue *rq, + int budget, + unsigned int *xdp_xmit, + struct virtnet_rq_stats *stats) +{ + unsigned int len; + int packets = 0; + void *buf; + + while (packets < budget) { + buf = virtqueue_get_buf(rq->vq, &len); + if (!buf) + break; + + virtnet_receive_xsk_buf(vi, rq, buf, len, xdp_xmit, stats); + packets++; + } + + return packets; +} + +static int virtnet_receive_packets(struct virtnet_info *vi, + struct receive_queue *rq, + int budget, + unsigned int *xdp_xmit, + struct virtnet_rq_stats *stats) { - struct virtnet_info *vi = rq->vq->vdev->priv; - struct virtnet_rq_stats stats = {}; unsigned int len; int packets = 0; void *buf; - int i; if (!vi->big_packets || vi->mergeable_rx_bufs) { void *ctx; - while (packets < budget && (buf = virtnet_rq_get_buf(rq, &len, &ctx))) { - receive_buf(vi, rq, buf, len, ctx, xdp_xmit, &stats); + receive_buf(vi, rq, buf, len, ctx, xdp_xmit, stats); packets++; } } else { while (packets < budget && (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) { - receive_buf(vi, rq, buf, len, NULL, xdp_xmit, &stats); + receive_buf(vi, rq, buf, len, NULL, xdp_xmit, stats); packets++; } } + return packets; +} + +static int virtnet_receive(struct receive_queue *rq, int budget, + unsigned int *xdp_xmit) +{ + struct virtnet_info *vi = rq->vq->vdev->priv; + struct virtnet_rq_stats stats = {}; + int i, packets; + + if (rq->xsk_pool) + packets = virtnet_receive_xsk_bufs(vi, rq, budget, xdp_xmit, &stats); + else + packets = virtnet_receive_packets(vi, rq, budget, xdp_xmit, &stats); + if (rq->vq->num_free > min((unsigned int)budget, virtqueue_get_vring_size(rq->vq)) / 2) { if (!try_fill_recv(vi, rq, GFP_ATOMIC)) { spin_lock(&vi->refill_lock); @@ -2313,7 +2750,7 @@ static int virtnet_receive(struct receive_queue *rq, int budget, return packets; } -static void virtnet_poll_cleantx(struct receive_queue *rq) +static void virtnet_poll_cleantx(struct receive_queue *rq, int budget) { struct virtnet_info *vi = rq->vq->vdev->priv; unsigned int index = vq2rxq(rq->vq); @@ -2331,7 +2768,7 @@ static void virtnet_poll_cleantx(struct receive_queue *rq) do { virtqueue_disable_cb(sq->vq); - free_old_xmit(sq, true); + free_old_xmit(sq, txq, !!budget); } while (unlikely(!virtqueue_enable_cb_delayed(sq->vq))); if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) { @@ -2354,12 +2791,13 @@ static void virtnet_rx_dim_update(struct virtnet_info *vi, struct receive_queue if (!rq->packets_in_napi) return; - u64_stats_update_begin(&rq->stats.syncp); + /* Don't need protection when fetching stats, since fetcher and + * updater of the stats are in same context + */ dim_update_sample(rq->calls, u64_stats_read(&rq->stats.packets), u64_stats_read(&rq->stats.bytes), &cur_sample); - u64_stats_update_end(&rq->stats.syncp); net_dim(&rq->dim, cur_sample); rq->packets_in_napi = 0; @@ -2375,7 +2813,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget) unsigned int xdp_xmit = 0; bool napi_complete; - virtnet_poll_cleantx(rq); + virtnet_poll_cleantx(rq, budget); received = virtnet_receive(rq, budget, &xdp_xmit); rq->packets_in_napi += received; @@ -2430,6 +2868,7 @@ static int virtnet_enable_queue_pair(struct virtnet_info *vi, int qp_index) goto err_xdp_reg_mem_model; virtnet_napi_enable(vi->rq[qp_index].vq, &vi->rq[qp_index].napi); + netdev_tx_reset_queue(netdev_get_tx_queue(vi->dev, qp_index)); virtnet_napi_tx_enable(vi, vi->sq[qp_index].vq, &vi->sq[qp_index].napi); return 0; @@ -2439,6 +2878,13 @@ err_xdp_reg_mem_model: return err; } +static void virtnet_cancel_dim(struct virtnet_info *vi, struct dim *dim) +{ + if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) + return; + net_dim_work_cancel(dim); +} + static int virtnet_open(struct net_device *dev) { struct virtnet_info *vi = netdev_priv(dev); @@ -2465,7 +2911,7 @@ err_enable_qp: for (i--; i >= 0; i--) { virtnet_disable_queue_pair(vi, i); - cancel_work_sync(&vi->rq[i].dim.work); + virtnet_cancel_dim(vi, &vi->rq[i].dim); } return err; @@ -2489,7 +2935,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget) txq = netdev_get_tx_queue(vi->dev, index); __netif_tx_lock(txq, raw_smp_processor_id()); virtqueue_disable_cb(sq->vq); - free_old_xmit(sq, true); + free_old_xmit(sq, txq, !!budget); if (sq->vq->num_free >= 2 + MAX_SKB_FRAGS) { if (netif_tx_queue_stopped(txq)) { @@ -2523,7 +2969,7 @@ static int virtnet_poll_tx(struct napi_struct *napi, int budget) return 0; } -static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) +static int xmit_skb(struct send_queue *sq, struct sk_buff *skb, bool orphan) { struct virtio_net_hdr_mrg_rxbuf *hdr; const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest; @@ -2567,7 +3013,8 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb) return num_sg; num_sg++; } - return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, skb, GFP_ATOMIC); + return virtqueue_add_outbuf(sq->vq, sq->sg, num_sg, + skb_to_ptr(skb, orphan), GFP_ATOMIC); } static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) @@ -2577,24 +3024,25 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) struct send_queue *sq = &vi->sq[qnum]; int err; struct netdev_queue *txq = netdev_get_tx_queue(dev, qnum); - bool kick = !netdev_xmit_more(); + bool xmit_more = netdev_xmit_more(); bool use_napi = sq->napi.weight; + bool kick; /* Free up any pending old buffers before queueing new ones. */ do { if (use_napi) virtqueue_disable_cb(sq->vq); - free_old_xmit(sq, false); + free_old_xmit(sq, txq, false); - } while (use_napi && kick && + } while (use_napi && !xmit_more && unlikely(!virtqueue_enable_cb_delayed(sq->vq))); /* timestamp packet in software */ skb_tx_timestamp(skb); /* Try to transmit */ - err = xmit_skb(sq, skb); + err = xmit_skb(sq, skb, !use_napi); /* This should not happen! */ if (unlikely(err)) { @@ -2616,7 +3064,9 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) check_sq_full_and_disable(vi, dev, sq); - if (kick || netif_xmit_stopped(txq)) { + kick = use_napi ? __netdev_tx_sent_queue(txq, skb->len, xmit_more) : + !xmit_more || netif_xmit_stopped(txq); + if (kick) { if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { u64_stats_update_begin(&sq->stats.syncp); u64_stats_inc(&sq->stats.kicks); @@ -2627,37 +3077,49 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) return NETDEV_TX_OK; } -static int virtnet_rx_resize(struct virtnet_info *vi, - struct receive_queue *rq, u32 ring_num) +static void virtnet_rx_pause(struct virtnet_info *vi, struct receive_queue *rq) { bool running = netif_running(vi->dev); - int err, qindex; - - qindex = rq - vi->rq; if (running) { napi_disable(&rq->napi); - cancel_work_sync(&rq->dim.work); + virtnet_cancel_dim(vi, &rq->dim); } +} - err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf); - if (err) - netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err); +static void virtnet_rx_resume(struct virtnet_info *vi, struct receive_queue *rq) +{ + bool running = netif_running(vi->dev); if (!try_fill_recv(vi, rq, GFP_KERNEL)) schedule_delayed_work(&vi->refill, 0); if (running) virtnet_napi_enable(rq->vq, &rq->napi); +} + +static int virtnet_rx_resize(struct virtnet_info *vi, + struct receive_queue *rq, u32 ring_num) +{ + int err, qindex; + + qindex = rq - vi->rq; + + virtnet_rx_pause(vi, rq); + + err = virtqueue_resize(rq->vq, ring_num, virtnet_rq_unmap_free_buf); + if (err) + netdev_err(vi->dev, "resize rx fail: rx queue index: %d err: %d\n", qindex, err); + + virtnet_rx_resume(vi, rq); return err; } -static int virtnet_tx_resize(struct virtnet_info *vi, - struct send_queue *sq, u32 ring_num) +static void virtnet_tx_pause(struct virtnet_info *vi, struct send_queue *sq) { bool running = netif_running(vi->dev); struct netdev_queue *txq; - int err, qindex; + int qindex; qindex = sq - vi->sq; @@ -2678,10 +3140,17 @@ static int virtnet_tx_resize(struct virtnet_info *vi, netif_stop_subqueue(vi->dev, qindex); __netif_tx_unlock_bh(txq); +} - err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf); - if (err) - netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err); +static void virtnet_tx_resume(struct virtnet_info *vi, struct send_queue *sq) +{ + bool running = netif_running(vi->dev); + struct netdev_queue *txq; + int qindex; + + qindex = sq - vi->sq; + + txq = netdev_get_tx_queue(vi->dev, qindex); __netif_tx_lock_bh(txq); sq->reset = false; @@ -2690,6 +3159,23 @@ static int virtnet_tx_resize(struct virtnet_info *vi, if (running) virtnet_napi_tx_enable(vi, sq->vq, &sq->napi); +} + +static int virtnet_tx_resize(struct virtnet_info *vi, struct send_queue *sq, + u32 ring_num) +{ + int qindex, err; + + qindex = sq - vi->sq; + + virtnet_tx_pause(vi, sq); + + err = virtqueue_resize(sq->vq, ring_num, virtnet_sq_free_unused_buf); + if (err) + netdev_err(vi->dev, "resize tx fail: tx queue index: %d err: %d\n", qindex, err); + + virtnet_tx_resume(vi, sq); + return err; } @@ -2898,7 +3384,7 @@ static int virtnet_close(struct net_device *dev) for (i = 0; i < vi->max_queue_pairs; i++) { virtnet_disable_queue_pair(vi, i); - cancel_work_sync(&vi->rq[i].dim.work); + virtnet_cancel_dim(vi, &vi->rq[i].dim); } return 0; @@ -4424,7 +4910,7 @@ static void virtnet_rx_dim_work(struct work_struct *work) if (!rq->dim_enabled) goto out; - update_moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix); + update_moder = net_dim_get_rx_irq_moder(dev, dim); if (update_moder.usec != rq->intr_coal.max_usecs || update_moder.pkts != rq->intr_coal.max_packets) { err = virtnet_send_rx_ctrl_coal_vq_cmd(vi, qnum, @@ -4927,10 +5413,144 @@ static int virtnet_restore_guest_offloads(struct virtnet_info *vi) return virtnet_set_guest_offloads(vi, offloads); } +static int virtnet_rq_bind_xsk_pool(struct virtnet_info *vi, struct receive_queue *rq, + struct xsk_buff_pool *pool) +{ + int err, qindex; + + qindex = rq - vi->rq; + + if (pool) { + err = xdp_rxq_info_reg(&rq->xsk_rxq_info, vi->dev, qindex, rq->napi.napi_id); + if (err < 0) + return err; + + err = xdp_rxq_info_reg_mem_model(&rq->xsk_rxq_info, + MEM_TYPE_XSK_BUFF_POOL, NULL); + if (err < 0) + goto unreg; + + xsk_pool_set_rxq_info(pool, &rq->xsk_rxq_info); + } + + virtnet_rx_pause(vi, rq); + + err = virtqueue_reset(rq->vq, virtnet_rq_unmap_free_buf); + if (err) { + netdev_err(vi->dev, "reset rx fail: rx queue index: %d err: %d\n", qindex, err); + + pool = NULL; + } + + rq->xsk_pool = pool; + + virtnet_rx_resume(vi, rq); + + if (pool) + return 0; + +unreg: + xdp_rxq_info_unreg(&rq->xsk_rxq_info); + return err; +} + +static int virtnet_xsk_pool_enable(struct net_device *dev, + struct xsk_buff_pool *pool, + u16 qid) +{ + struct virtnet_info *vi = netdev_priv(dev); + struct receive_queue *rq; + struct device *dma_dev; + struct send_queue *sq; + int err, size; + + if (vi->hdr_len > xsk_pool_get_headroom(pool)) + return -EINVAL; + + /* In big_packets mode, xdp cannot work, so there is no need to + * initialize xsk of rq. + */ + if (vi->big_packets && !vi->mergeable_rx_bufs) + return -ENOENT; + + if (qid >= vi->curr_queue_pairs) + return -EINVAL; + + sq = &vi->sq[qid]; + rq = &vi->rq[qid]; + + /* xsk assumes that tx and rx must have the same dma device. The af-xdp + * may use one buffer to receive from the rx and reuse this buffer to + * send by the tx. So the dma dev of sq and rq must be the same one. + * + * But vq->dma_dev allows every vq has the respective dma dev. So I + * check the dma dev of vq and sq is the same dev. + */ + if (virtqueue_dma_dev(rq->vq) != virtqueue_dma_dev(sq->vq)) + return -EINVAL; + + dma_dev = virtqueue_dma_dev(rq->vq); + if (!dma_dev) + return -EINVAL; + + size = virtqueue_get_vring_size(rq->vq); + + rq->xsk_buffs = kvcalloc(size, sizeof(*rq->xsk_buffs), GFP_KERNEL); + if (!rq->xsk_buffs) + return -ENOMEM; + + err = xsk_pool_dma_map(pool, dma_dev, 0); + if (err) + goto err_xsk_map; + + err = virtnet_rq_bind_xsk_pool(vi, rq, pool); + if (err) + goto err_rq; + + return 0; + +err_rq: + xsk_pool_dma_unmap(pool, 0); +err_xsk_map: + return err; +} + +static int virtnet_xsk_pool_disable(struct net_device *dev, u16 qid) +{ + struct virtnet_info *vi = netdev_priv(dev); + struct xsk_buff_pool *pool; + struct receive_queue *rq; + int err; + + if (qid >= vi->curr_queue_pairs) + return -EINVAL; + + rq = &vi->rq[qid]; + + pool = rq->xsk_pool; + + err = virtnet_rq_bind_xsk_pool(vi, rq, NULL); + + xsk_pool_dma_unmap(pool, 0); + + kvfree(rq->xsk_buffs); + + return err; +} + +static int virtnet_xsk_pool_setup(struct net_device *dev, struct netdev_bpf *xdp) +{ + if (xdp->xsk.pool) + return virtnet_xsk_pool_enable(dev, xdp->xsk.pool, + xdp->xsk.queue_id); + else + return virtnet_xsk_pool_disable(dev, xdp->xsk.queue_id); +} + static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog, struct netlink_ext_ack *extack) { - unsigned int room = SKB_DATA_ALIGN(VIRTIO_XDP_HEADROOM + + unsigned int room = SKB_DATA_ALIGN(XDP_PACKET_HEADROOM + sizeof(struct skb_shared_info)); unsigned int max_sz = PAGE_SIZE - room - ETH_HLEN; struct virtnet_info *vi = netdev_priv(dev); @@ -5052,6 +5672,8 @@ static int virtnet_xdp(struct net_device *dev, struct netdev_bpf *xdp) switch (xdp->command) { case XDP_SETUP_PROG: return virtnet_xdp_set(dev, xdp->prog, xdp->extack); + case XDP_SETUP_XSK_POOL: + return virtnet_xsk_pool_setup(dev, xdp); default: return -EINVAL; } @@ -5124,6 +5746,36 @@ static void virtnet_tx_timeout(struct net_device *dev, unsigned int txqueue) jiffies_to_usecs(jiffies - READ_ONCE(txq->trans_start))); } +static int virtnet_init_irq_moder(struct virtnet_info *vi) +{ + u8 profile_flags = 0, coal_flags = 0; + int ret, i; + + profile_flags |= DIM_PROFILE_RX; + coal_flags |= DIM_COALESCE_USEC | DIM_COALESCE_PKTS; + ret = net_dim_init_irq_moder(vi->dev, profile_flags, coal_flags, + DIM_CQ_PERIOD_MODE_START_FROM_EQE, + 0, virtnet_rx_dim_work, NULL); + + if (ret) + return ret; + + for (i = 0; i < vi->max_queue_pairs; i++) + net_dim_setting(vi->dev, &vi->rq[i].dim, false); + + return 0; +} + +static void virtnet_free_irq_moder(struct virtnet_info *vi) +{ + if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_VQ_NOTF_COAL)) + return; + + rtnl_lock(); + net_dim_free_irq_moder(vi->dev); + rtnl_unlock(); +} + static const struct net_device_ops virtnet_netdev = { .ndo_open = virtnet_open, .ndo_stop = virtnet_close, @@ -5136,6 +5788,7 @@ static const struct net_device_ops virtnet_netdev = { .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid, .ndo_bpf = virtnet_xdp, .ndo_xdp_xmit = virtnet_xdp_xmit, + .ndo_xsk_wakeup = virtnet_xsk_wakeup, .ndo_features_check = passthru_features_check, .ndo_get_phys_port_name = virtnet_get_phys_port_name, .ndo_set_features = virtnet_set_features, @@ -5403,9 +6056,6 @@ static int virtnet_alloc_queues(struct virtnet_info *vi) virtnet_poll_tx, napi_tx ? napi_weight : 0); - INIT_WORK(&vi->rq[i].dim.work, virtnet_rx_dim_work); - vi->rq[i].dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; - sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg)); ewma_pkt_len_init(&vi->rq[i].mrg_avg_pkt_len); sg_init_table(vi->sq[i].sg, ARRAY_SIZE(vi->sq[i].sg)); @@ -5834,6 +6484,10 @@ static int virtnet_probe(struct virtio_device *vdev) for (i = 0; i < vi->max_queue_pairs; i++) if (vi->sq[i].napi.weight) vi->sq[i].intr_coal.max_packets = 1; + + err = virtnet_init_irq_moder(vi); + if (err) + goto free; } #ifdef CONFIG_SYSFS @@ -5985,6 +6639,8 @@ static void virtnet_remove(struct virtio_device *vdev) disable_rx_mode_work(vi); flush_work(&vi->rx_mode_work); + virtnet_free_irq_moder(vi); + unregister_netdev(vi->dev); net_failover_destroy(vi->failover); diff --git a/drivers/net/vmxnet3/Makefile b/drivers/net/vmxnet3/Makefile index f82870c10205..59ef494ce2e0 100644 --- a/drivers/net/vmxnet3/Makefile +++ b/drivers/net/vmxnet3/Makefile @@ -2,7 +2,7 @@ # # Linux driver for VMware's vmxnet3 ethernet NIC. # -# Copyright (C) 2007-2022, VMware, Inc. All Rights Reserved. +# Copyright (C) 2007-2024, VMware, Inc. All Rights Reserved. # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h index 41d6767283a6..5c5148768039 100644 --- a/drivers/net/vmxnet3/vmxnet3_defs.h +++ b/drivers/net/vmxnet3/vmxnet3_defs.h @@ -1,7 +1,7 @@ /* * Linux driver for VMware's vmxnet3 ethernet NIC. * - * Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved. + * Copyright (C) 2008-2024, VMware, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -80,6 +80,8 @@ enum { #define VMXNET3_IO_TYPE(addr) ((addr) >> 24) #define VMXNET3_IO_REG(addr) ((addr) & 0xFFFFFF) +#define VMXNET3_PMC_PSEUDO_TSC 0x10003 + enum { VMXNET3_CMD_FIRST_SET = 0xCAFE0000, VMXNET3_CMD_ACTIVATE_DEV = VMXNET3_CMD_FIRST_SET, @@ -123,6 +125,8 @@ enum { VMXNET3_CMD_GET_RESERVED4, VMXNET3_CMD_GET_MAX_CAPABILITIES, VMXNET3_CMD_GET_DCR0_REG, + VMXNET3_CMD_GET_TSRING_DESC_SIZE, + VMXNET3_CMD_GET_DISABLED_OFFLOADS, }; /* @@ -254,6 +258,24 @@ struct Vmxnet3_RxDesc { #define VMXNET3_RCD_HDR_INNER_SHIFT 13 +struct Vmxnet3TSInfo { + u64 tsData:56; + u64 tsType:4; + u64 tsi:1; //bit to indicate to set ts + u64 pad:3; + u64 pad2; +}; + +struct Vmxnet3_TxTSDesc { + struct Vmxnet3TSInfo ts; + u64 pad[14]; +}; + +struct Vmxnet3_RxTSDesc { + struct Vmxnet3TSInfo ts; + u64 pad[14]; +}; + struct Vmxnet3_RxCompDesc { #ifdef __BIG_ENDIAN_BITFIELD u32 ext2:1; @@ -427,6 +449,13 @@ union Vmxnet3_GenericDesc { #define VMXNET3_RXDATA_DESC_SIZE_ALIGN 64 #define VMXNET3_RXDATA_DESC_SIZE_MASK (VMXNET3_RXDATA_DESC_SIZE_ALIGN - 1) +/* Rx TS Ring buffer size must be a multiple of 64 bytes */ +#define VMXNET3_RXTS_DESC_SIZE_ALIGN 64 +#define VMXNET3_RXTS_DESC_SIZE_MASK (VMXNET3_RXTS_DESC_SIZE_ALIGN - 1) +/* Tx TS Ring buffer size must be a multiple of 64 bytes */ +#define VMXNET3_TXTS_DESC_SIZE_ALIGN 64 +#define VMXNET3_TXTS_DESC_SIZE_MASK (VMXNET3_TXTS_DESC_SIZE_ALIGN - 1) + /* Max ring size */ #define VMXNET3_TX_RING_MAX_SIZE 4096 #define VMXNET3_TC_RING_MAX_SIZE 4096 @@ -439,6 +468,9 @@ union Vmxnet3_GenericDesc { #define VMXNET3_RXDATA_DESC_MAX_SIZE 2048 +#define VMXNET3_TXTS_DESC_MAX_SIZE 256 +#define VMXNET3_RXTS_DESC_MAX_SIZE 256 + /* a list of reasons for queue stop */ enum { @@ -546,6 +578,24 @@ struct Vmxnet3_RxQueueConf { }; +struct Vmxnet3_LatencyConf { + u16 sampleRate; + u16 pad; +}; + +struct Vmxnet3_TxQueueTSConf { + __le64 txTSRingBasePA; + __le16 txTSRingDescSize; /* size of tx timestamp ring buffer */ + u16 pad; + struct Vmxnet3_LatencyConf latencyConf; +}; + +struct Vmxnet3_RxQueueTSConf { + __le64 rxTSRingBasePA; + __le16 rxTSRingDescSize; /* size of rx timestamp ring buffer */ + u16 pad[3]; +}; + enum vmxnet3_intr_mask_mode { VMXNET3_IMM_AUTO = 0, VMXNET3_IMM_ACTIVE = 1, @@ -679,7 +729,8 @@ struct Vmxnet3_TxQueueDesc { /* Driver read after a GET command */ struct Vmxnet3_QueueStatus status; struct UPT1_TxStats stats; - u8 _pad[88]; /* 128 aligned */ + struct Vmxnet3_TxQueueTSConf tsConf; + u8 _pad[72]; /* 128 aligned */ }; @@ -689,7 +740,8 @@ struct Vmxnet3_RxQueueDesc { /* Driver read after a GET commad */ struct Vmxnet3_QueueStatus status; struct UPT1_RxStats stats; - u8 __pad[88]; /* 128 aligned */ + struct Vmxnet3_RxQueueTSConf tsConf; + u8 __pad[72]; /* 128 aligned */ }; struct Vmxnet3_SetPolling { @@ -861,4 +913,7 @@ struct Vmxnet3_DriverShared { /* when new capability is introduced, update VMXNET3_CAP_MAX */ #define VMXNET3_CAP_MAX VMXNET3_CAP_VERSION_7_MAX +#define VMXNET3_OFFLOAD_TSO BIT(0) +#define VMXNET3_OFFLOAD_LRO BIT(1) + #endif /* _VMXNET3_DEFS_H_ */ diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 63822d454c00..b70654c7ad34 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -1,7 +1,7 @@ /* * Linux driver for VMware's vmxnet3 ethernet NIC. * - * Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved. + * Copyright (C) 2008-2024, VMware, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -143,6 +143,32 @@ vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue)); } +static u64 +vmxnet3_get_cycles(int pmc) +{ +#ifdef CONFIG_X86 + return native_read_pmc(pmc); +#else + return 0; +#endif +} + +static bool +vmxnet3_apply_timestamp(struct vmxnet3_tx_queue *tq, u16 rate) +{ +#ifdef CONFIG_X86 + if (rate > 0) { + if (tq->tsPktCount == 1) { + if (rate != 1) + tq->tsPktCount = rate; + return true; + } + tq->tsPktCount--; + } +#endif + return false; +} + /* Check if capability is supported by UPT device or * UPT is even requested */ @@ -498,6 +524,12 @@ vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq, tq->data_ring.base, tq->data_ring.basePA); tq->data_ring.base = NULL; } + if (tq->ts_ring.base) { + dma_free_coherent(&adapter->pdev->dev, + tq->tx_ring.size * tq->tx_ts_desc_size, + tq->ts_ring.base, tq->ts_ring.basePA); + tq->ts_ring.base = NULL; + } if (tq->comp_ring.base) { dma_free_coherent(&adapter->pdev->dev, tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc), @@ -535,6 +567,10 @@ vmxnet3_tq_init(struct vmxnet3_tx_queue *tq, memset(tq->data_ring.base, 0, tq->data_ring.size * tq->txdata_desc_size); + if (tq->ts_ring.base) + memset(tq->ts_ring.base, 0, + tq->tx_ring.size * tq->tx_ts_desc_size); + /* reset the tx comp ring contents to 0 and reset comp ring states */ memset(tq->comp_ring.base, 0, tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc)); @@ -573,6 +609,18 @@ vmxnet3_tq_create(struct vmxnet3_tx_queue *tq, goto err; } + if (tq->tx_ts_desc_size != 0) { + tq->ts_ring.base = dma_alloc_coherent(&adapter->pdev->dev, + tq->tx_ring.size * tq->tx_ts_desc_size, + &tq->ts_ring.basePA, GFP_KERNEL); + if (!tq->ts_ring.base) { + netdev_err(adapter->netdev, "failed to allocate tx ts ring\n"); + tq->tx_ts_desc_size = 0; + } + } else { + tq->ts_ring.base = NULL; + } + tq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, tq->comp_ring.size * sizeof(struct Vmxnet3_TxCompDesc), &tq->comp_ring.basePA, GFP_KERNEL); @@ -861,6 +909,11 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx, /* set the last buf_info for the pkt */ tbi->skb = skb; tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base; + if (tq->tx_ts_desc_size != 0) { + ctx->ts_txd = (struct Vmxnet3_TxTSDesc *)((u8 *)tq->ts_ring.base + + tbi->sop_idx * tq->tx_ts_desc_size); + ctx->ts_txd->ts.tsi = 0; + } return 0; } @@ -968,7 +1021,7 @@ vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, skb_headlen(skb)); } - if (skb->len <= VMXNET3_HDR_COPY_SIZE) + if (skb->len <= tq->txdata_desc_size) ctx->copy_size = skb->len; /* make sure headers are accessible directly */ @@ -1259,6 +1312,14 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, gdesc->txd.tci = skb_vlan_tag_get(skb); } + if (tq->tx_ts_desc_size != 0 && + adapter->latencyConf->sampleRate != 0) { + if (vmxnet3_apply_timestamp(tq, adapter->latencyConf->sampleRate)) { + ctx.ts_txd->ts.tsData = vmxnet3_get_cycles(VMXNET3_PMC_PSEUDO_TSC); + ctx.ts_txd->ts.tsi = 1; + } + } + /* Ensure that the write to (&gdesc->txd)->gen will be observed after * all other writes to &gdesc->txd. */ @@ -1608,6 +1669,15 @@ skip_xdp: skip_page_frags = false; ctx->skb = rbi->skb; + if (rq->rx_ts_desc_size != 0 && rcd->ext2) { + struct Vmxnet3_RxTSDesc *ts_rxd; + + ts_rxd = (struct Vmxnet3_RxTSDesc *)((u8 *)rq->ts_ring.base + + idx * rq->rx_ts_desc_size); + ts_rxd->ts.tsData = vmxnet3_get_cycles(VMXNET3_PMC_PSEUDO_TSC); + ts_rxd->ts.tsi = 1; + } + rxDataRingUsed = VMXNET3_RX_DATA_RING(adapter, rcd->rqID); len = rxDataRingUsed ? rcd->len : rbi->len; @@ -2007,6 +2077,13 @@ static void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, rq->data_ring.base = NULL; } + if (rq->ts_ring.base) { + dma_free_coherent(&adapter->pdev->dev, + rq->rx_ring[0].size * rq->rx_ts_desc_size, + rq->ts_ring.base, rq->ts_ring.basePA); + rq->ts_ring.base = NULL; + } + if (rq->comp_ring.base) { dma_free_coherent(&adapter->pdev->dev, rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc), @@ -2090,6 +2167,10 @@ vmxnet3_rq_init(struct vmxnet3_rx_queue *rq, } vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter); + if (rq->ts_ring.base) + memset(rq->ts_ring.base, 0, + rq->rx_ring[0].size * rq->rx_ts_desc_size); + /* reset the comp ring */ rq->comp_ring.next2proc = 0; memset(rq->comp_ring.base, 0, rq->comp_ring.size * @@ -2160,6 +2241,21 @@ vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) rq->data_ring.desc_size = 0; } + if (rq->rx_ts_desc_size != 0) { + sz = rq->rx_ring[0].size * rq->rx_ts_desc_size; + rq->ts_ring.base = + dma_alloc_coherent(&adapter->pdev->dev, sz, + &rq->ts_ring.basePA, + GFP_KERNEL); + if (!rq->ts_ring.base) { + netdev_err(adapter->netdev, + "rx ts ring will be disabled\n"); + rq->rx_ts_desc_size = 0; + } + } else { + rq->ts_ring.base = NULL; + } + sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc); rq->comp_ring.base = dma_alloc_coherent(&adapter->pdev->dev, sz, &rq->comp_ring.basePA, @@ -2759,6 +2855,8 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) struct Vmxnet3_DSDevReadExt *devReadExt = &shared->devReadExt; struct Vmxnet3_TxQueueConf *tqc; struct Vmxnet3_RxQueueConf *rqc; + struct Vmxnet3_TxQueueTSConf *tqtsc; + struct Vmxnet3_RxQueueTSConf *rqtsc; int i; memset(shared, 0, sizeof(*shared)); @@ -2815,6 +2913,11 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) tqc->compRingSize = cpu_to_le32(tq->comp_ring.size); tqc->ddLen = cpu_to_le32(0); tqc->intrIdx = tq->comp_ring.intr_idx; + if (VMXNET3_VERSION_GE_9(adapter)) { + tqtsc = &adapter->tqd_start[i].tsConf; + tqtsc->txTSRingBasePA = cpu_to_le64(tq->ts_ring.basePA); + tqtsc->txTSRingDescSize = cpu_to_le16(tq->tx_ts_desc_size); + } } /* rx queue settings */ @@ -2837,6 +2940,11 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter) rqc->rxDataRingDescSize = cpu_to_le16(rq->data_ring.desc_size); } + if (VMXNET3_VERSION_GE_9(adapter)) { + rqtsc = &adapter->rqd_start[i].tsConf; + rqtsc->rxTSRingBasePA = cpu_to_le64(rq->ts_ring.basePA); + rqtsc->rxTSRingDescSize = cpu_to_le16(rq->rx_ts_desc_size); + } } #ifdef VMXNET3_RSS @@ -3299,6 +3407,8 @@ vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size, tq->stopped = true; tq->adapter = adapter; tq->qid = i; + tq->tx_ts_desc_size = adapter->tx_ts_desc_size; + tq->tsPktCount = 1; err = vmxnet3_tq_create(tq, adapter); /* * Too late to change num_tx_queues. We cannot do away with @@ -3320,6 +3430,7 @@ vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size, rq->shared = &adapter->rqd_start[i].ctrl; rq->adapter = adapter; rq->data_ring.desc_size = rxdata_desc_size; + rq->rx_ts_desc_size = adapter->rx_ts_desc_size; err = vmxnet3_rq_create(rq, adapter); if (err) { if (i == 0) { @@ -3361,14 +3472,15 @@ vmxnet3_open(struct net_device *netdev) if (VMXNET3_VERSION_GE_3(adapter)) { unsigned long flags; u16 txdata_desc_size; + u32 ret; spin_lock_irqsave(&adapter->cmd_lock, flags); VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_TXDATA_DESC_SIZE); - txdata_desc_size = VMXNET3_READ_BAR1_REG(adapter, - VMXNET3_REG_CMD); + ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); spin_unlock_irqrestore(&adapter->cmd_lock, flags); + txdata_desc_size = ret & 0xffff; if ((txdata_desc_size < VMXNET3_TXDATA_DESC_MIN_SIZE) || (txdata_desc_size > VMXNET3_TXDATA_DESC_MAX_SIZE) || (txdata_desc_size & VMXNET3_TXDATA_DESC_SIZE_MASK)) { @@ -3377,10 +3489,40 @@ vmxnet3_open(struct net_device *netdev) } else { adapter->txdata_desc_size = txdata_desc_size; } + if (VMXNET3_VERSION_GE_9(adapter)) + adapter->rxdata_desc_size = (ret >> 16) & 0xffff; } else { adapter->txdata_desc_size = sizeof(struct Vmxnet3_TxDataDesc); } + if (VMXNET3_VERSION_GE_9(adapter)) { + unsigned long flags; + u16 tx_ts_desc_size = 0; + u16 rx_ts_desc_size = 0; + u32 ret; + + spin_lock_irqsave(&adapter->cmd_lock, flags); + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_GET_TSRING_DESC_SIZE); + ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); + if (ret > 0) { + tx_ts_desc_size = (ret & 0xff); + rx_ts_desc_size = ((ret >> 16) & 0xff); + } + if (tx_ts_desc_size > VMXNET3_TXTS_DESC_MAX_SIZE || + tx_ts_desc_size & VMXNET3_TXTS_DESC_SIZE_MASK) + tx_ts_desc_size = 0; + if (rx_ts_desc_size > VMXNET3_RXTS_DESC_MAX_SIZE || + rx_ts_desc_size & VMXNET3_RXTS_DESC_SIZE_MASK) + rx_ts_desc_size = 0; + adapter->tx_ts_desc_size = tx_ts_desc_size; + adapter->rx_ts_desc_size = rx_ts_desc_size; + } else { + adapter->tx_ts_desc_size = 0; + adapter->rx_ts_desc_size = 0; + } + err = vmxnet3_create_queues(adapter, adapter->tx_ring_size, adapter->rx_ring_size, @@ -3503,6 +3645,15 @@ static void vmxnet3_declare_features(struct vmxnet3_adapter *adapter) { struct net_device *netdev = adapter->netdev; + unsigned long flags; + + if (VMXNET3_VERSION_GE_9(adapter)) { + spin_lock_irqsave(&adapter->cmd_lock, flags); + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, + VMXNET3_CMD_GET_DISABLED_OFFLOADS); + adapter->disabledOffloads = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); + spin_unlock_irqrestore(&adapter->cmd_lock, flags); + } netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM | NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_CTAG_TX | @@ -3520,6 +3671,16 @@ vmxnet3_declare_features(struct vmxnet3_adapter *adapter) NETIF_F_GSO_UDP_TUNNEL_CSUM; } + if (adapter->disabledOffloads & VMXNET3_OFFLOAD_TSO) { + netdev->hw_features &= ~(NETIF_F_TSO | NETIF_F_TSO6); + netdev->hw_enc_features &= ~(NETIF_F_TSO | NETIF_F_TSO6); + } + + if (adapter->disabledOffloads & VMXNET3_OFFLOAD_LRO) { + netdev->hw_features &= ~(NETIF_F_LRO); + netdev->hw_enc_features &= ~(NETIF_F_LRO); + } + if (VMXNET3_VERSION_GE_7(adapter)) { unsigned long flags; @@ -3790,7 +3951,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, struct net_device *netdev; struct vmxnet3_adapter *adapter; u8 mac[ETH_ALEN]; - int size; + int size, i; int num_tx_queues; int num_rx_queues; int queues; @@ -3857,42 +4018,14 @@ vmxnet3_probe_device(struct pci_dev *pdev, goto err_alloc_pci; ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS); - if (ver & (1 << VMXNET3_REV_7)) { - VMXNET3_WRITE_BAR1_REG(adapter, - VMXNET3_REG_VRRS, - 1 << VMXNET3_REV_7); - adapter->version = VMXNET3_REV_7 + 1; - } else if (ver & (1 << VMXNET3_REV_6)) { - VMXNET3_WRITE_BAR1_REG(adapter, - VMXNET3_REG_VRRS, - 1 << VMXNET3_REV_6); - adapter->version = VMXNET3_REV_6 + 1; - } else if (ver & (1 << VMXNET3_REV_5)) { - VMXNET3_WRITE_BAR1_REG(adapter, - VMXNET3_REG_VRRS, - 1 << VMXNET3_REV_5); - adapter->version = VMXNET3_REV_5 + 1; - } else if (ver & (1 << VMXNET3_REV_4)) { - VMXNET3_WRITE_BAR1_REG(adapter, - VMXNET3_REG_VRRS, - 1 << VMXNET3_REV_4); - adapter->version = VMXNET3_REV_4 + 1; - } else if (ver & (1 << VMXNET3_REV_3)) { - VMXNET3_WRITE_BAR1_REG(adapter, - VMXNET3_REG_VRRS, - 1 << VMXNET3_REV_3); - adapter->version = VMXNET3_REV_3 + 1; - } else if (ver & (1 << VMXNET3_REV_2)) { - VMXNET3_WRITE_BAR1_REG(adapter, - VMXNET3_REG_VRRS, - 1 << VMXNET3_REV_2); - adapter->version = VMXNET3_REV_2 + 1; - } else if (ver & (1 << VMXNET3_REV_1)) { - VMXNET3_WRITE_BAR1_REG(adapter, - VMXNET3_REG_VRRS, - 1 << VMXNET3_REV_1); - adapter->version = VMXNET3_REV_1 + 1; - } else { + for (i = VMXNET3_REV_9; i >= VMXNET3_REV_1; i--) { + if (ver & (1 << i)) { + VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1 << i); + adapter->version = i + 1; + break; + } + } + if (i < VMXNET3_REV_1) { dev_err(&pdev->dev, "Incompatible h/w version (0x%x) for adapter\n", ver); err = -EBUSY; @@ -3992,6 +4125,8 @@ vmxnet3_probe_device(struct pci_dev *pdev, } adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start + adapter->num_tx_queues); + if (VMXNET3_VERSION_GE_9(adapter)) + adapter->latencyConf = &adapter->tqd_start->tsConf.latencyConf; adapter->pm_conf = dma_alloc_coherent(&adapter->pdev->dev, sizeof(struct Vmxnet3_PMConf), diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c index 7e8008d5378a..471f91c4204a 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethtool.c +++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c @@ -1,7 +1,7 @@ /* * Linux driver for VMware's vmxnet3 ethernet NIC. * - * Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved. + * Copyright (C) 2008-2024, VMware, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index 915aaf18c409..9f24d66dbb27 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h @@ -1,7 +1,7 @@ /* * Linux driver for VMware's vmxnet3 ethernet NIC. * - * Copyright (C) 2008-2022, VMware, Inc. All Rights Reserved. + * Copyright (C) 2008-2024, VMware, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the @@ -72,18 +72,20 @@ /* * Version numbers */ -#define VMXNET3_DRIVER_VERSION_STRING "1.7.0.0-k" +#define VMXNET3_DRIVER_VERSION_STRING "1.9.0.0-k" /* Each byte of this 32-bit integer encodes a version number in * VMXNET3_DRIVER_VERSION_STRING. */ -#define VMXNET3_DRIVER_VERSION_NUM 0x01070000 +#define VMXNET3_DRIVER_VERSION_NUM 0x01090000 #if defined(CONFIG_PCI_MSI) /* RSS only makes sense if MSI-X is supported. */ #define VMXNET3_RSS #endif +#define VMXNET3_REV_9 8 /* Vmxnet3 Rev. 9 */ +#define VMXNET3_REV_8 7 /* Vmxnet3 Rev. 8 */ #define VMXNET3_REV_7 6 /* Vmxnet3 Rev. 7 */ #define VMXNET3_REV_6 5 /* Vmxnet3 Rev. 6 */ #define VMXNET3_REV_5 4 /* Vmxnet3 Rev. 5 */ @@ -191,6 +193,11 @@ struct vmxnet3_tx_data_ring { dma_addr_t basePA; }; +struct vmxnet3_tx_ts_ring { + struct Vmxnet3_TxTSDesc *base; + dma_addr_t basePA; +}; + #define VMXNET3_MAP_NONE 0 #define VMXNET3_MAP_SINGLE BIT(0) #define VMXNET3_MAP_PAGE BIT(1) @@ -243,6 +250,7 @@ struct vmxnet3_tx_ctx { u32 copy_size; /* # of bytes copied into the data ring */ union Vmxnet3_GenericDesc *sop_txd; union Vmxnet3_GenericDesc *eop_txd; + struct Vmxnet3_TxTSDesc *ts_txd; }; struct vmxnet3_tx_queue { @@ -252,6 +260,7 @@ struct vmxnet3_tx_queue { struct vmxnet3_cmd_ring tx_ring; struct vmxnet3_tx_buf_info *buf_info; struct vmxnet3_tx_data_ring data_ring; + struct vmxnet3_tx_ts_ring ts_ring; struct vmxnet3_comp_ring comp_ring; struct Vmxnet3_TxQueueCtrl *shared; struct vmxnet3_tq_driver_stats stats; @@ -260,6 +269,8 @@ struct vmxnet3_tx_queue { * stopped */ int qid; u16 txdata_desc_size; + u16 tx_ts_desc_size; + u16 tsPktCount; } ____cacheline_aligned; enum vmxnet3_rx_buf_type { @@ -307,6 +318,11 @@ struct vmxnet3_rx_data_ring { u16 desc_size; }; +struct vmxnet3_rx_ts_ring { + struct Vmxnet3_RxTSDesc *base; + dma_addr_t basePA; +}; + struct vmxnet3_rx_queue { char name[IFNAMSIZ + 8]; /* To identify interrupt */ struct vmxnet3_adapter *adapter; @@ -314,6 +330,7 @@ struct vmxnet3_rx_queue { struct vmxnet3_cmd_ring rx_ring[2]; struct vmxnet3_rx_data_ring data_ring; struct vmxnet3_comp_ring comp_ring; + struct vmxnet3_rx_ts_ring ts_ring; struct vmxnet3_rx_ctx rx_ctx; u32 qid; /* rqID in RCD for buffer from 1st ring */ u32 qid2; /* rqID in RCD for buffer from 2nd ring */ @@ -323,6 +340,7 @@ struct vmxnet3_rx_queue { struct vmxnet3_rq_driver_stats stats; struct page_pool *page_pool; struct xdp_rxq_info xdp_rxq; + u16 rx_ts_desc_size; } ____cacheline_aligned; #define VMXNET3_DEVICE_MAX_TX_QUEUES 32 @@ -432,6 +450,11 @@ struct vmxnet3_adapter { u16 rx_prod_offset; u16 rx_prod2_offset; struct bpf_prog __rcu *xdp_bpf_prog; + struct Vmxnet3_LatencyConf *latencyConf; + /* Size of buffer in the ts ring */ + u16 tx_ts_desc_size; + u16 rx_ts_desc_size; + u32 disabledOffloads; }; #define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \ @@ -463,6 +486,10 @@ struct vmxnet3_adapter { (adapter->version >= VMXNET3_REV_6 + 1) #define VMXNET3_VERSION_GE_7(adapter) \ (adapter->version >= VMXNET3_REV_7 + 1) +#define VMXNET3_VERSION_GE_8(adapter) \ + (adapter->version >= VMXNET3_REV_8 + 1) +#define VMXNET3_VERSION_GE_9(adapter) \ + (adapter->version >= VMXNET3_REV_9 + 1) /* must be a multiple of VMXNET3_RING_SIZE_ALIGN */ #define VMXNET3_DEF_TX_RING_SIZE 512 diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 3a252ac5dd28..9af316cdd8b3 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -126,8 +126,8 @@ static void vrf_rx_stats(struct net_device *dev, int len) struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); u64_stats_update_begin(&dstats->syncp); - dstats->rx_packets++; - dstats->rx_bytes += len; + u64_stats_inc(&dstats->rx_packets); + u64_stats_add(&dstats->rx_bytes, len); u64_stats_update_end(&dstats->syncp); } @@ -137,33 +137,6 @@ static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb) kfree_skb(skb); } -static void vrf_get_stats64(struct net_device *dev, - struct rtnl_link_stats64 *stats) -{ - int i; - - for_each_possible_cpu(i) { - const struct pcpu_dstats *dstats; - u64 tbytes, tpkts, tdrops, rbytes, rpkts; - unsigned int start; - - dstats = per_cpu_ptr(dev->dstats, i); - do { - start = u64_stats_fetch_begin(&dstats->syncp); - tbytes = dstats->tx_bytes; - tpkts = dstats->tx_packets; - tdrops = dstats->tx_drops; - rbytes = dstats->rx_bytes; - rpkts = dstats->rx_packets; - } while (u64_stats_fetch_retry(&dstats->syncp, start)); - stats->tx_bytes += tbytes; - stats->tx_packets += tpkts; - stats->tx_dropped += tdrops; - stats->rx_bytes += rbytes; - stats->rx_packets += rpkts; - } -} - static struct vrf_map *netns_vrf_map(struct net *net) { struct netns_vrf *nn_vrf = net_generic(net, vrf_net_id); @@ -408,10 +381,15 @@ static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev, skb->protocol = eth_type_trans(skb, dev); - if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) + if (likely(__netif_rx(skb) == NET_RX_SUCCESS)) { vrf_rx_stats(dev, len); - else - this_cpu_inc(dev->dstats->rx_drops); + } else { + struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); + + u64_stats_update_begin(&dstats->syncp); + u64_stats_inc(&dstats->rx_drops); + u64_stats_update_end(&dstats->syncp); + } return NETDEV_TX_OK; } @@ -599,19 +577,20 @@ static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev) static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev) { + struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); + int len = skb->len; netdev_tx_t ret = is_ip_tx_frame(skb, dev); + u64_stats_update_begin(&dstats->syncp); if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { - struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats); - u64_stats_update_begin(&dstats->syncp); - dstats->tx_packets++; - dstats->tx_bytes += len; - u64_stats_update_end(&dstats->syncp); + u64_stats_inc(&dstats->tx_packets); + u64_stats_add(&dstats->tx_bytes, len); } else { - this_cpu_inc(dev->dstats->tx_drops); + u64_stats_inc(&dstats->tx_drops); } + u64_stats_update_end(&dstats->syncp); return ret; } @@ -1195,7 +1174,6 @@ static const struct net_device_ops vrf_netdev_ops = { .ndo_uninit = vrf_dev_uninit, .ndo_start_xmit = vrf_xmit, .ndo_set_mac_address = eth_mac_addr, - .ndo_get_stats64 = vrf_get_stats64, .ndo_add_slave = vrf_add_slave, .ndo_del_slave = vrf_del_slave, }; diff --git a/drivers/net/wireless/admtek/adm8211.c b/drivers/net/wireless/admtek/adm8211.c index e3fd48dd3909..a2d87c3ad196 100644 --- a/drivers/net/wireless/admtek/adm8211.c +++ b/drivers/net/wireless/admtek/adm8211.c @@ -1550,7 +1550,7 @@ fail: return retval; } -static void adm8211_stop(struct ieee80211_hw *dev) +static void adm8211_stop(struct ieee80211_hw *dev, bool suspend) { struct adm8211_priv *priv = dev->priv; diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c index 5a55db349cb5..156f3650c006 100644 --- a/drivers/net/wireless/ath/ar5523/ar5523.c +++ b/drivers/net/wireless/ath/ar5523/ar5523.c @@ -1061,7 +1061,7 @@ err: return error; } -static void ar5523_stop(struct ieee80211_hw *hw) +static void ar5523_stop(struct ieee80211_hw *hw, bool suspend) { struct ar5523 *ar = hw->priv; diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig index 4f385f4a8cef..876aed765833 100644 --- a/drivers/net/wireless/ath/ath10k/Kconfig +++ b/drivers/net/wireless/ath/ath10k/Kconfig @@ -68,6 +68,12 @@ config ATH10K_DEBUGFS If unsure, say Y to make it easier to debug problems. +config ATH10K_LEDS + bool + depends on ATH10K + depends on LEDS_CLASS=y || LEDS_CLASS=MAC80211 + default y + config ATH10K_SPECTRAL bool "Atheros ath10k spectral scan support" depends on ATH10K_DEBUGFS diff --git a/drivers/net/wireless/ath/ath10k/Makefile b/drivers/net/wireless/ath/ath10k/Makefile index 142c777b287f..02bf9b629038 100644 --- a/drivers/net/wireless/ath/ath10k/Makefile +++ b/drivers/net/wireless/ath/ath10k/Makefile @@ -19,6 +19,7 @@ ath10k_core-$(CONFIG_ATH10K_SPECTRAL) += spectral.o ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o ath10k_core-$(CONFIG_THERMAL) += thermal.o +ath10k_core-$(CONFIG_ATH10K_LEDS) += leds.o ath10k_core-$(CONFIG_MAC80211_DEBUGFS) += debugfs_sta.o ath10k_core-$(CONFIG_PM) += wow.o ath10k_core-$(CONFIG_DEV_COREDUMP) += coredump.o diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index bdf0552cd1c3..b3294287bce1 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -27,6 +27,7 @@ #include "testmode.h" #include "wmi-ops.h" #include "coredump.h" +#include "leds.h" unsigned int ath10k_debug_mask; EXPORT_SYMBOL(ath10k_debug_mask); @@ -68,6 +69,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .name = "qca988x hw2.0", .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR, .uart_pin = 7, + .led_pin = 1, .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL, .otp_exe_param = 0, .channel_counters_freq_hz = 88000, @@ -108,6 +110,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .name = "qca988x hw2.0 ubiquiti", .patch_load_addr = QCA988X_HW_2_0_PATCH_LOAD_ADDR, .uart_pin = 7, + .led_pin = 0, .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL, .otp_exe_param = 0, .channel_counters_freq_hz = 88000, @@ -149,6 +152,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .name = "qca9887 hw1.0", .patch_load_addr = QCA9887_HW_1_0_PATCH_LOAD_ADDR, .uart_pin = 7, + .led_pin = 1, .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_ALL, .otp_exe_param = 0, .channel_counters_freq_hz = 88000, @@ -190,6 +194,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .name = "qca6174 hw3.2 sdio", .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR, .uart_pin = 19, + .led_pin = 0, .otp_exe_param = 0, .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, @@ -226,6 +231,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .name = "qca6164 hw2.1", .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR, .uart_pin = 6, + .led_pin = 0, .otp_exe_param = 0, .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, @@ -266,6 +272,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .name = "qca6174 hw2.1", .patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR, .uart_pin = 6, + .led_pin = 0, .otp_exe_param = 0, .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, @@ -306,6 +313,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .name = "qca6174 hw3.0", .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR, .uart_pin = 6, + .led_pin = 0, .otp_exe_param = 0, .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, @@ -346,6 +354,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .name = "qca6174 hw3.2", .patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR, .uart_pin = 6, + .led_pin = 0, .otp_exe_param = 0, .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, @@ -390,6 +399,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .name = "qca99x0 hw2.0", .patch_load_addr = QCA99X0_HW_2_0_PATCH_LOAD_ADDR, .uart_pin = 7, + .led_pin = 17, .otp_exe_param = 0x00000700, .continuous_frag_desc = true, .cck_rate_map_rev2 = true, @@ -436,6 +446,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .name = "qca9984/qca9994 hw1.0", .patch_load_addr = QCA9984_HW_1_0_PATCH_LOAD_ADDR, .uart_pin = 7, + .led_pin = 17, .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_EACH, .otp_exe_param = 0x00000700, .continuous_frag_desc = true, @@ -488,6 +499,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .name = "qca9888 hw2.0", .patch_load_addr = QCA9888_HW_2_0_PATCH_LOAD_ADDR, .uart_pin = 7, + .led_pin = 17, .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_EACH, .otp_exe_param = 0x00000700, .continuous_frag_desc = true, @@ -538,6 +550,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .name = "qca9377 hw1.0", .patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR, .uart_pin = 6, + .led_pin = 0, .otp_exe_param = 0, .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, @@ -578,6 +591,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .name = "qca9377 hw1.1", .patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR, .uart_pin = 6, + .led_pin = 0, .otp_exe_param = 0, .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, @@ -620,6 +634,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .name = "qca9377 hw1.1 sdio", .patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR, .uart_pin = 19, + .led_pin = 0, .otp_exe_param = 0, .channel_counters_freq_hz = 88000, .max_probe_resp_desc_thres = 0, @@ -653,6 +668,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .name = "qca4019 hw1.0", .patch_load_addr = QCA4019_HW_1_0_PATCH_LOAD_ADDR, .uart_pin = 7, + .led_pin = 0, .cc_wraparound_type = ATH10K_HW_CC_WRAP_SHIFTED_EACH, .otp_exe_param = 0x0010000, .continuous_frag_desc = true, @@ -698,6 +714,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .dev_id = 0, .bus = ATH10K_BUS_SNOC, .name = "wcn3990 hw1.0", + .led_pin = 0, .continuous_frag_desc = true, .tx_chain_mask = 0x7, .rx_chain_mask = 0x7, @@ -3224,6 +3241,10 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode, goto err_hif_stop; } + status = ath10k_leds_start(ar); + if (status) + goto err_hif_stop; + return 0; err_hif_stop: @@ -3482,9 +3503,18 @@ static void ath10k_core_register_work(struct work_struct *work) goto err_spectral_destroy; } + status = ath10k_leds_register(ar); + if (status) { + ath10k_err(ar, "could not register leds: %d\n", + status); + goto err_thermal_unregister; + } + set_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags); return; +err_thermal_unregister: + ath10k_thermal_unregister(ar); err_spectral_destroy: ath10k_spectral_destroy(ar); err_debug_destroy: @@ -3520,6 +3550,8 @@ void ath10k_core_unregister(struct ath10k *ar) if (!test_bit(ATH10K_FLAG_CORE_REGISTERED, &ar->dev_flags)) return; + ath10k_leds_unregister(ar); + ath10k_thermal_unregister(ar); /* Stop spectral before unregistering from mac80211 to remove the * relayfs debugfs file cleanly. Otherwise the parent debugfs tree diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index b00099f0b24e..446dca74f06a 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -15,6 +15,7 @@ #include #include #include +#include #include "htt.h" #include "htc.h" @@ -1258,6 +1259,13 @@ struct ath10k { bool utf_monitor; } testmode; + struct { + struct gpio_led wifi_led; + struct led_classdev cdev; + char label[48]; + u32 gpio_state_pin; + } leds; + struct { /* protected by data_lock */ u32 rx_crc_err_drop; diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 48897e5eca06..442091c6dfd2 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -512,6 +512,7 @@ struct ath10k_hw_params { const char *name; u32 patch_load_addr; int uart_pin; + int led_pin; u32 otp_exe_param; /* Type of hw cycle counter wraparound logic, for more info diff --git a/drivers/net/wireless/ath/ath10k/leds.c b/drivers/net/wireless/ath/ath10k/leds.c new file mode 100644 index 000000000000..9b1d04eb4265 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/leds.c @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: ISC +/* + * Copyright (c) 2005-2011 Atheros Communications Inc. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018 Sebastian Gottschall + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + */ + +#include + +#include "core.h" +#include "wmi.h" +#include "wmi-ops.h" + +#include "leds.h" + +static int ath10k_leds_set_brightness_blocking(struct led_classdev *led_cdev, + enum led_brightness brightness) +{ + struct ath10k *ar = container_of(led_cdev, struct ath10k, + leds.cdev); + struct gpio_led *led = &ar->leds.wifi_led; + + mutex_lock(&ar->conf_mutex); + + if (ar->state != ATH10K_STATE_ON) + goto out; + + ar->leds.gpio_state_pin = (brightness != LED_OFF) ^ led->active_low; + ath10k_wmi_gpio_output(ar, led->gpio, ar->leds.gpio_state_pin); + +out: + mutex_unlock(&ar->conf_mutex); + + return 0; +} + +int ath10k_leds_start(struct ath10k *ar) +{ + if (ar->hw_params.led_pin == 0) + /* leds not supported */ + return 0; + + /* under some circumstances, the gpio pin gets reconfigured + * to default state by the firmware, so we need to + * reconfigure it this behaviour has only ben seen on + * QCA9984 and QCA99XX devices so far + */ + ath10k_wmi_gpio_config(ar, ar->hw_params.led_pin, 0, + WMI_GPIO_PULL_NONE, WMI_GPIO_INTTYPE_DISABLE); + ath10k_wmi_gpio_output(ar, ar->hw_params.led_pin, 1); + + return 0; +} + +int ath10k_leds_register(struct ath10k *ar) +{ + int ret; + + if (ar->hw_params.led_pin == 0) + /* leds not supported */ + return 0; + + snprintf(ar->leds.label, sizeof(ar->leds.label), "ath10k-%s", + wiphy_name(ar->hw->wiphy)); + ar->leds.wifi_led.active_low = 1; + ar->leds.wifi_led.gpio = ar->hw_params.led_pin; + ar->leds.wifi_led.name = ar->leds.label; + ar->leds.wifi_led.default_state = LEDS_GPIO_DEFSTATE_KEEP; + + ar->leds.cdev.name = ar->leds.label; + ar->leds.cdev.brightness_set_blocking = ath10k_leds_set_brightness_blocking; + ar->leds.cdev.default_trigger = ar->leds.wifi_led.default_trigger; + + ret = led_classdev_register(wiphy_dev(ar->hw->wiphy), &ar->leds.cdev); + if (ret) + return ret; + + return 0; +} + +void ath10k_leds_unregister(struct ath10k *ar) +{ + if (ar->hw_params.led_pin == 0) + /* leds not supported */ + return; + + led_classdev_unregister(&ar->leds.cdev); +} + diff --git a/drivers/net/wireless/ath/ath10k/leds.h b/drivers/net/wireless/ath/ath10k/leds.h new file mode 100644 index 000000000000..56325b0875e5 --- /dev/null +++ b/drivers/net/wireless/ath/ath10k/leds.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: ISC */ +/* + * Copyright (c) 2005-2011 Atheros Communications Inc. + * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. + * Copyright (c) 2018 Sebastian Gottschall + * Copyright (c) 2018 The Linux Foundation. All rights reserved. + */ + +#ifndef _LEDS_H_ +#define _LEDS_H_ + +#include "core.h" + +#ifdef CONFIG_ATH10K_LEDS +void ath10k_leds_unregister(struct ath10k *ar); +int ath10k_leds_start(struct ath10k *ar); +int ath10k_leds_register(struct ath10k *ar); +#else +static inline void ath10k_leds_unregister(struct ath10k *ar) +{ +} + +static inline int ath10k_leds_start(struct ath10k *ar) +{ + return 0; +} + +static inline int ath10k_leds_register(struct ath10k *ar) +{ + return 0; +} + +#endif +#endif /* _LEDS_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index e322b528baaf..a5da32e87106 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -25,6 +25,7 @@ #include "wmi-tlv.h" #include "wmi-ops.h" #include "wow.h" +#include "leds.h" /*********/ /* Rates */ @@ -5362,7 +5363,7 @@ err: return ret; } -static void ath10k_stop(struct ieee80211_hw *hw) +static void ath10k_stop(struct ieee80211_hw *hw, bool suspend) { struct ath10k *ar = hw->priv; u32 opt; diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c index 38e939f572a9..f1f33af0170a 100644 --- a/drivers/net/wireless/ath/ath10k/qmi.c +++ b/drivers/net/wireless/ath/ath10k/qmi.c @@ -1040,6 +1040,10 @@ static void ath10k_qmi_driver_event_work(struct work_struct *work) switch (event->type) { case ATH10K_QMI_EVENT_SERVER_ARRIVE: ath10k_qmi_event_server_arrive(qmi); + if (qmi->no_msa_ready_indicator) { + ath10k_info(ar, "qmi not waiting for msa_ready indicator"); + ath10k_qmi_event_msa_ready(qmi); + } break; case ATH10K_QMI_EVENT_SERVER_EXIT: ath10k_qmi_event_server_exit(qmi); @@ -1048,6 +1052,10 @@ static void ath10k_qmi_driver_event_work(struct work_struct *work) ath10k_qmi_event_fw_ready_ind(qmi); break; case ATH10K_QMI_EVENT_MSA_READY_IND: + if (qmi->no_msa_ready_indicator) { + ath10k_warn(ar, "qmi unexpected msa_ready indicator"); + break; + } ath10k_qmi_event_msa_ready(qmi); break; default: @@ -1077,6 +1085,9 @@ int ath10k_qmi_init(struct ath10k *ar, u32 msa_size) if (of_property_read_bool(dev->of_node, "qcom,msa-fixed-perm")) qmi->msa_fixed_perm = true; + if (of_property_read_bool(dev->of_node, "qcom,no-msa-ready-indicator")) + qmi->no_msa_ready_indicator = true; + ret = qmi_handle_init(&qmi->qmi_hdl, WLFW_BDF_DOWNLOAD_REQ_MSG_V01_MAX_MSG_LEN, &ath10k_qmi_ops, qmi_msg_handler); diff --git a/drivers/net/wireless/ath/ath10k/qmi.h b/drivers/net/wireless/ath/ath10k/qmi.h index 89464239fe96..0816eb4e4a18 100644 --- a/drivers/net/wireless/ath/ath10k/qmi.h +++ b/drivers/net/wireless/ath/ath10k/qmi.h @@ -107,6 +107,7 @@ struct ath10k_qmi { char fw_build_timestamp[MAX_TIMESTAMP_LEN + 1]; struct ath10k_qmi_cal_data cal_data[MAX_NUM_CAL_V01]; bool msa_fixed_perm; + bool no_msa_ready_indicator; enum ath10k_qmi_state state; }; diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h index aa57d807491c..f3f6b5954b27 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-ops.h +++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h @@ -226,7 +226,10 @@ struct wmi_ops { const struct wmi_bb_timing_cfg_arg *arg); struct sk_buff *(*gen_per_peer_per_tid_cfg)(struct ath10k *ar, const struct wmi_per_peer_per_tid_cfg_arg *arg); + struct sk_buff *(*gen_gpio_config)(struct ath10k *ar, u32 gpio_num, + u32 input, u32 pull_type, u32 intr_mode); + struct sk_buff *(*gen_gpio_output)(struct ath10k *ar, u32 gpio_num, u32 set); }; int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id); @@ -1122,6 +1125,35 @@ ath10k_wmi_force_fw_hang(struct ath10k *ar, return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid); } +static inline int ath10k_wmi_gpio_config(struct ath10k *ar, u32 gpio_num, + u32 input, u32 pull_type, u32 intr_mode) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_gpio_config) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_gpio_config(ar, gpio_num, input, pull_type, intr_mode); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->gpio_config_cmdid); +} + +static inline int ath10k_wmi_gpio_output(struct ath10k *ar, u32 gpio_num, u32 set) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_gpio_config) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_gpio_output(ar, gpio_num, set); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->gpio_output_cmdid); +} + static inline int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u64 module_enable, u32 log_level) { diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index aed97fd121ba..dbaf26d6a7a6 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -4606,6 +4606,8 @@ static const struct wmi_ops wmi_tlv_ops = { .gen_echo = ath10k_wmi_tlv_op_gen_echo, .gen_vdev_spectral_conf = ath10k_wmi_tlv_op_gen_vdev_spectral_conf, .gen_vdev_spectral_enable = ath10k_wmi_tlv_op_gen_vdev_spectral_enable, + /* .gen_gpio_config not implemented */ + /* .gen_gpio_output not implemented */ }; static const struct wmi_peer_flags_map wmi_tlv_peer_flags_map = { diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 80d255aaff1b..fe2344598364 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -7493,6 +7493,49 @@ ath10k_wmi_op_gen_peer_set_param(struct ath10k *ar, u32 vdev_id, return skb; } +static struct sk_buff *ath10k_wmi_op_gen_gpio_config(struct ath10k *ar, + u32 gpio_num, u32 input, + u32 pull_type, u32 intr_mode) +{ + struct wmi_gpio_config_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_gpio_config_cmd *)skb->data; + cmd->pull_type = __cpu_to_le32(pull_type); + cmd->gpio_num = __cpu_to_le32(gpio_num); + cmd->input = __cpu_to_le32(input); + cmd->intr_mode = __cpu_to_le32(intr_mode); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi gpio_config gpio_num 0x%08x input 0x%08x pull_type 0x%08x intr_mode 0x%08x\n", + gpio_num, input, pull_type, intr_mode); + + return skb; +} + +static struct sk_buff *ath10k_wmi_op_gen_gpio_output(struct ath10k *ar, + u32 gpio_num, u32 set) +{ + struct wmi_gpio_output_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_gpio_output_cmd *)skb->data; + cmd->gpio_num = __cpu_to_le32(gpio_num); + cmd->set = __cpu_to_le32(set); + + ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi gpio_output gpio_num 0x%08x set 0x%08x\n", + gpio_num, set); + + return skb; +} + static struct sk_buff * ath10k_wmi_op_gen_set_psmode(struct ath10k *ar, u32 vdev_id, enum wmi_sta_ps_mode psmode) @@ -9157,6 +9200,9 @@ static const struct wmi_ops wmi_ops = { .fw_stats_fill = ath10k_wmi_main_op_fw_stats_fill, .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype, .gen_echo = ath10k_wmi_op_gen_echo, + .gen_gpio_config = ath10k_wmi_op_gen_gpio_config, + .gen_gpio_output = ath10k_wmi_op_gen_gpio_output, + /* .gen_bcn_tmpl not implemented */ /* .gen_prb_tmpl not implemented */ /* .gen_p2p_go_bcn_ie not implemented */ @@ -9227,6 +9273,8 @@ static const struct wmi_ops wmi_10_1_ops = { .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill, .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype, .gen_echo = ath10k_wmi_op_gen_echo, + .gen_gpio_config = ath10k_wmi_op_gen_gpio_config, + .gen_gpio_output = ath10k_wmi_op_gen_gpio_output, /* .gen_bcn_tmpl not implemented */ /* .gen_prb_tmpl not implemented */ /* .gen_p2p_go_bcn_ie not implemented */ @@ -9299,6 +9347,8 @@ static const struct wmi_ops wmi_10_2_ops = { .gen_delba_send = ath10k_wmi_op_gen_delba_send, .fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill, .get_vdev_subtype = ath10k_wmi_op_get_vdev_subtype, + .gen_gpio_config = ath10k_wmi_op_gen_gpio_config, + .gen_gpio_output = ath10k_wmi_op_gen_gpio_output, /* .gen_pdev_enable_adaptive_cca not implemented */ }; @@ -9370,6 +9420,8 @@ static const struct wmi_ops wmi_10_2_4_ops = { ath10k_wmi_op_gen_pdev_enable_adaptive_cca, .get_vdev_subtype = ath10k_wmi_10_2_4_op_get_vdev_subtype, .gen_bb_timing = ath10k_wmi_10_2_4_op_gen_bb_timing, + .gen_gpio_config = ath10k_wmi_op_gen_gpio_config, + .gen_gpio_output = ath10k_wmi_op_gen_gpio_output, /* .gen_bcn_tmpl not implemented */ /* .gen_prb_tmpl not implemented */ /* .gen_p2p_go_bcn_ie not implemented */ @@ -9451,6 +9503,8 @@ static const struct wmi_ops wmi_10_4_ops = { .gen_pdev_bss_chan_info_req = ath10k_wmi_10_2_op_gen_pdev_bss_chan_info, .gen_echo = ath10k_wmi_op_gen_echo, .gen_pdev_get_tpc_config = ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config, + .gen_gpio_config = ath10k_wmi_op_gen_gpio_config, + .gen_gpio_output = ath10k_wmi_op_gen_gpio_output, }; int ath10k_wmi_attach(struct ath10k *ar) diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index 2379501225a4..0faefc0a9a40 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -3034,6 +3034,41 @@ enum wmi_10_4_feature_mask { }; +/* WMI_GPIO_CONFIG_CMDID */ +enum { + WMI_GPIO_PULL_NONE, + WMI_GPIO_PULL_UP, + WMI_GPIO_PULL_DOWN, +}; + +enum { + WMI_GPIO_INTTYPE_DISABLE, + WMI_GPIO_INTTYPE_RISING_EDGE, + WMI_GPIO_INTTYPE_FALLING_EDGE, + WMI_GPIO_INTTYPE_BOTH_EDGE, + WMI_GPIO_INTTYPE_LEVEL_LOW, + WMI_GPIO_INTTYPE_LEVEL_HIGH +}; + +/* WMI_GPIO_CONFIG_CMDID */ +struct wmi_gpio_config_cmd { + __le32 gpio_num; /* GPIO number to be setup */ + __le32 input; /* 0 - Output/ 1 - Input */ + __le32 pull_type; /* Pull type defined above */ + __le32 intr_mode; /* Interrupt mode defined above (Input) */ +} __packed; + +/* WMI_GPIO_OUTPUT_CMDID */ +struct wmi_gpio_output_cmd { + __le32 gpio_num; /* GPIO number to be setup */ + __le32 set; /* Set the GPIO pin*/ +} __packed; + +/* WMI_GPIO_INPUT_EVENTID */ +struct wmi_gpio_input_event { + __le32 gpio_num; /* GPIO number which changed state */ +} __packed; + struct wmi_ext_resource_config_10_4_cmd { /* contains enum wmi_host_platform_type */ __le32 host_platform_config; diff --git a/drivers/net/wireless/ath/ath11k/ahb.c b/drivers/net/wireless/ath/ath11k/ahb.c index ca0f17ddebba..e3ff4786c714 100644 --- a/drivers/net/wireless/ath/ath11k/ahb.c +++ b/drivers/net/wireless/ath/ath11k/ahb.c @@ -954,6 +954,36 @@ static int ath11k_ahb_setup_msa_resources(struct ath11k_base *ab) return 0; } +static int ath11k_ahb_ce_remap(struct ath11k_base *ab) +{ + const struct ce_remap *ce_remap = ab->hw_params.ce_remap; + struct platform_device *pdev = ab->pdev; + + if (!ce_remap) { + /* no separate CE register space */ + ab->mem_ce = ab->mem; + return 0; + } + + /* ce register space is moved out of wcss unlike ipq8074 or ipq6018 + * and the space is not contiguous, hence remapping the CE registers + * to a new space for accessing them. + */ + ab->mem_ce = ioremap(ce_remap->base, ce_remap->size); + if (!ab->mem_ce) { + dev_err(&pdev->dev, "ce ioremap error\n"); + return -ENOMEM; + } + + return 0; +} + +static void ath11k_ahb_ce_unmap(struct ath11k_base *ab) +{ + if (ab->hw_params.ce_remap) + iounmap(ab->mem_ce); +} + static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab) { struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); @@ -1146,25 +1176,13 @@ static int ath11k_ahb_probe(struct platform_device *pdev) if (ret) goto err_core_free; - ab->mem_ce = ab->mem; - - if (ab->hw_params.ce_remap) { - const struct ce_remap *ce_remap = ab->hw_params.ce_remap; - /* ce register space is moved out of wcss unlike ipq8074 or ipq6018 - * and the space is not contiguous, hence remapping the CE registers - * to a new space for accessing them. - */ - ab->mem_ce = ioremap(ce_remap->base, ce_remap->size); - if (!ab->mem_ce) { - dev_err(&pdev->dev, "ce ioremap error\n"); - ret = -ENOMEM; - goto err_core_free; - } - } + ret = ath11k_ahb_ce_remap(ab); + if (ret) + goto err_core_free; ret = ath11k_ahb_fw_resources_init(ab); if (ret) - goto err_core_free; + goto err_ce_unmap; ret = ath11k_ahb_setup_smp2p_handle(ab); if (ret) @@ -1216,6 +1234,9 @@ err_release_smp2p_handle: err_fw_deinit: ath11k_ahb_fw_resource_deinit(ab); +err_ce_unmap: + ath11k_ahb_ce_unmap(ab); + err_core_free: ath11k_core_free(ab); platform_set_drvdata(pdev, NULL); @@ -1248,9 +1269,7 @@ static void ath11k_ahb_free_resources(struct ath11k_base *ab) ath11k_ahb_release_smp2p_handle(ab); ath11k_ahb_fw_resource_deinit(ab); ath11k_ce_free_pipes(ab); - - if (ab->hw_params.ce_remap) - iounmap(ab->mem_ce); + ath11k_ahb_ce_unmap(ab); ath11k_core_free(ab); platform_set_drvdata(pdev, NULL); diff --git a/drivers/net/wireless/ath/ath11k/ce.h b/drivers/net/wireless/ath/ath11k/ce.h index 69946fc70077..bcde2fcf02cf 100644 --- a/drivers/net/wireless/ath/ath11k/ce.h +++ b/drivers/net/wireless/ath/ath11k/ce.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH11K_CE_H @@ -146,7 +146,7 @@ struct ath11k_ce_ring { /* Host address space */ void *base_addr_owner_space_unaligned; /* CE address space */ - u32 base_addr_ce_space_unaligned; + dma_addr_t base_addr_ce_space_unaligned; /* Actual start of descriptors. * Aligned to descriptor-size boundary. @@ -156,7 +156,7 @@ struct ath11k_ce_ring { void *base_addr_owner_space; /* CE address space */ - u32 base_addr_ce_space; + dma_addr_t base_addr_ce_space; /* HAL ring id */ u32 hal_ring_id; diff --git a/drivers/net/wireless/ath/ath11k/core.c b/drivers/net/wireless/ath/ath11k/core.c index b82e8fb28541..03187df26000 100644 --- a/drivers/net/wireless/ath/ath11k/core.c +++ b/drivers/net/wireless/ath/ath11k/core.c @@ -62,7 +62,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074, .single_pdev_only = false, .rxdma1_enable = true, - .num_rxmda_per_pdev = 1, + .num_rxdma_per_pdev = 1, .rx_mac_buf_ring = false, .vdev_start_delay = false, .htt_peer_map_v2 = true, @@ -148,7 +148,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074, .single_pdev_only = false, .rxdma1_enable = true, - .num_rxmda_per_pdev = 1, + .num_rxdma_per_pdev = 1, .rx_mac_buf_ring = false, .vdev_start_delay = false, .htt_peer_map_v2 = true, @@ -232,7 +232,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074, .single_pdev_only = true, .rxdma1_enable = false, - .num_rxmda_per_pdev = 2, + .num_rxdma_per_pdev = 2, .rx_mac_buf_ring = true, .vdev_start_delay = true, .htt_peer_map_v2 = false, @@ -320,7 +320,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .svc_to_ce_map_len = 18, .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074, .rxdma1_enable = true, - .num_rxmda_per_pdev = 1, + .num_rxdma_per_pdev = 1, .rx_mac_buf_ring = false, .vdev_start_delay = false, .htt_peer_map_v2 = true, @@ -404,7 +404,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074, .single_pdev_only = true, .rxdma1_enable = false, - .num_rxmda_per_pdev = 2, + .num_rxdma_per_pdev = 2, .rx_mac_buf_ring = true, .vdev_start_delay = true, .htt_peer_map_v2 = false, @@ -492,7 +492,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .svc_to_ce_map_len = 14, .single_pdev_only = true, .rxdma1_enable = false, - .num_rxmda_per_pdev = 2, + .num_rxdma_per_pdev = 2, .rx_mac_buf_ring = true, .vdev_start_delay = true, .htt_peer_map_v2 = false, @@ -580,7 +580,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074, .single_pdev_only = true, .rxdma1_enable = false, - .num_rxmda_per_pdev = 1, + .num_rxdma_per_pdev = 1, .rx_mac_buf_ring = true, .vdev_start_delay = true, .htt_peer_map_v2 = false, @@ -673,7 +673,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .ce_ie_addr = &ath11k_ce_ie_addr_ipq5018, .ce_remap = &ath11k_ce_remap_ipq5018, .rxdma1_enable = true, - .num_rxmda_per_pdev = RXDMA_PER_PDEV_5018, + .num_rxdma_per_pdev = RXDMA_PER_PDEV_5018, .rx_mac_buf_ring = false, .vdev_start_delay = false, .htt_peer_map_v2 = true, @@ -744,7 +744,7 @@ static const struct ath11k_hw_params ath11k_hw_params[] = { .ce_ie_addr = &ath11k_ce_ie_addr_ipq8074, .single_pdev_only = true, .rxdma1_enable = false, - .num_rxmda_per_pdev = 2, + .num_rxdma_per_pdev = 2, .rx_mac_buf_ring = true, .vdev_start_delay = true, .htt_peer_map_v2 = false, @@ -1009,6 +1009,16 @@ int ath11k_core_resume(struct ath11k_base *ab) return -ETIMEDOUT; } + if (ab->hw_params.current_cc_support && + ar->alpha2[0] != 0 && ar->alpha2[1] != 0) { + ret = ath11k_reg_set_cc(ar); + if (ret) { + ath11k_warn(ab, "failed to set country code during resume: %d\n", + ret); + return ret; + } + } + ret = ath11k_dp_rx_pktlog_start(ab); if (ret) ath11k_warn(ab, "failed to start rx pktlog during resume: %d\n", @@ -1801,7 +1811,7 @@ static int ath11k_core_start(struct ath11k_base *ab) } /* put hardware to DBS mode */ - if (ab->hw_params.single_pdev_only && ab->hw_params.num_rxmda_per_pdev > 1) { + if (ab->hw_params.single_pdev_only && ab->hw_params.num_rxdma_per_pdev > 1) { ret = ath11k_wmi_set_hw_mode(ab, WMI_HOST_HW_MODE_DBS); if (ret) { ath11k_err(ab, "failed to send dbs mode: %d\n", ret); @@ -1978,23 +1988,20 @@ static void ath11k_update_11d(struct work_struct *work) struct ath11k_base *ab = container_of(work, struct ath11k_base, update_11d_work); struct ath11k *ar; struct ath11k_pdev *pdev; - struct wmi_set_current_country_params set_current_param = {}; int ret, i; - spin_lock_bh(&ab->base_lock); - memcpy(&set_current_param.alpha2, &ab->new_alpha2, 2); - spin_unlock_bh(&ab->base_lock); - - ath11k_dbg(ab, ATH11K_DBG_WMI, "update 11d new cc %c%c\n", - set_current_param.alpha2[0], - set_current_param.alpha2[1]); - for (i = 0; i < ab->num_radios; i++) { pdev = &ab->pdevs[i]; ar = pdev->ar; - memcpy(&ar->alpha2, &set_current_param.alpha2, 2); - ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param); + spin_lock_bh(&ab->base_lock); + memcpy(&ar->alpha2, &ab->new_alpha2, 2); + spin_unlock_bh(&ab->base_lock); + + ath11k_dbg(ab, ATH11K_DBG_WMI, "update 11d new cc %c%c for pdev %d\n", + ar->alpha2[0], ar->alpha2[1], i); + + ret = ath11k_reg_set_cc(ar); if (ret) ath11k_warn(ar->ab, "pdev id %d failed set current country code: %d\n", diff --git a/drivers/net/wireless/ath/ath11k/core.h b/drivers/net/wireless/ath/ath11k/core.h index 205f40ee6b66..df24f0e409af 100644 --- a/drivers/net/wireless/ath/ath11k/core.h +++ b/drivers/net/wireless/ath/ath11k/core.h @@ -330,6 +330,9 @@ struct ath11k_chan_power_info { s8 tx_power; }; +/* ath11k only deals with 160 MHz, so 8 subchannels */ +#define ATH11K_NUM_PWR_LEVELS 8 + /** * struct ath11k_reg_tpc_power_info - regulatory TPC power info * @is_psd_power: is PSD power or not @@ -346,10 +349,10 @@ struct ath11k_reg_tpc_power_info { u8 eirp_power; enum wmi_reg_6ghz_ap_type ap_power_type; u8 num_pwr_levels; - u8 reg_max[IEEE80211_MAX_NUM_PWR_LEVEL]; + u8 reg_max[ATH11K_NUM_PWR_LEVELS]; u8 ap_constraint_power; - s8 tpe[IEEE80211_MAX_NUM_PWR_LEVEL]; - struct ath11k_chan_power_info chan_power_info[IEEE80211_MAX_NUM_PWR_LEVEL]; + s8 tpe[ATH11K_NUM_PWR_LEVELS]; + struct ath11k_chan_power_info chan_power_info[ATH11K_NUM_PWR_LEVELS]; }; struct ath11k_vif { diff --git a/drivers/net/wireless/ath/ath11k/debugfs.c b/drivers/net/wireless/ath/ath11k/debugfs.c index 414a5ce279f7..57281a135dd7 100644 --- a/drivers/net/wireless/ath/ath11k/debugfs.c +++ b/drivers/net/wireless/ath/ath11k/debugfs.c @@ -668,7 +668,7 @@ static ssize_t ath11k_write_extd_rx_stats(struct file *file, ar->debug.rx_filter = tlv_filter.rx_filter; - for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id, HAL_RXDMA_MONITOR_STATUS, @@ -1112,7 +1112,7 @@ static ssize_t ath11k_write_pktlog_filter(struct file *file, } /* Clear rx filter set for monitor mode and rx status */ - for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id, HAL_RXDMA_MONITOR_STATUS, @@ -1171,7 +1171,7 @@ static ssize_t ath11k_write_pktlog_filter(struct file *file, HTT_RX_FP_DATA_FILTER_FLASG3; } - for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; ret = ath11k_dp_tx_htt_rx_filter_setup(ab, ring_id, ar->dp.mac_id + i, diff --git a/drivers/net/wireless/ath/ath11k/dp.c b/drivers/net/wireless/ath/ath11k/dp.c index 1a62407e5a9f..fbf666d0ecf1 100644 --- a/drivers/net/wireless/ath/ath11k/dp.c +++ b/drivers/net/wireless/ath/ath11k/dp.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -830,8 +830,8 @@ int ath11k_dp_service_srng(struct ath11k_base *ab, if (ab->hw_params.ring_mask->rx_mon_status[grp_id]) { for (i = 0; i < ab->num_radios; i++) { - for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) { - int id = i * ab->hw_params.num_rxmda_per_pdev + j; + for (j = 0; j < ab->hw_params.num_rxdma_per_pdev; j++) { + int id = i * ab->hw_params.num_rxdma_per_pdev + j; if (ab->hw_params.ring_mask->rx_mon_status[grp_id] & BIT(id)) { @@ -853,8 +853,8 @@ int ath11k_dp_service_srng(struct ath11k_base *ab, ath11k_dp_process_reo_status(ab); for (i = 0; i < ab->num_radios; i++) { - for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) { - int id = i * ab->hw_params.num_rxmda_per_pdev + j; + for (j = 0; j < ab->hw_params.num_rxdma_per_pdev; j++) { + int id = i * ab->hw_params.num_rxdma_per_pdev + j; if (ab->hw_params.ring_mask->rxdma2host[grp_id] & BIT(id)) { work_done = ath11k_dp_process_rxdma_err(ab, id, budget); @@ -913,7 +913,7 @@ void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab) spin_lock_init(&dp->rx_refill_buf_ring.idr_lock); atomic_set(&dp->num_tx_pending, 0); init_waitqueue_head(&dp->tx_empty_waitq); - for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) { + for (j = 0; j < ab->hw_params.num_rxdma_per_pdev; j++) { idr_init(&dp->rx_mon_status_refill_ring[j].bufs_idr); spin_lock_init(&dp->rx_mon_status_refill_ring[j].idr_lock); } diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.c b/drivers/net/wireless/ath/ath11k/dp_rx.c index afd481f5858f..86485580dd89 100644 --- a/drivers/net/wireless/ath/ath11k/dp_rx.c +++ b/drivers/net/wireless/ath/ath11k/dp_rx.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -311,7 +311,7 @@ static void ath11k_dp_service_mon_ring(struct timer_list *t) struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer); int i; - for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) + for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) ath11k_dp_rx_process_mon_rings(ab, i, NULL, DP_MON_SERVICE_BUDGET); mod_timer(&ab->mon_reap_timer, jiffies + @@ -324,7 +324,7 @@ static int ath11k_dp_purge_mon_ring(struct ath11k_base *ab) unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS); do { - for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) + for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) reaped += ath11k_dp_rx_process_mon_rings(ab, i, NULL, DP_MON_SERVICE_BUDGET); @@ -468,7 +468,7 @@ static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar) rx_ring = &dp->rxdma_mon_buf_ring; ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); - for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { rx_ring = &dp->rx_mon_status_refill_ring[i]; ath11k_dp_rxdma_buf_ring_free(ar, rx_ring); } @@ -506,7 +506,7 @@ static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar) ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF); } - for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { rx_ring = &dp->rx_mon_status_refill_ring[i]; ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS); } @@ -522,7 +522,7 @@ static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar) ath11k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring); - for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { if (ab->hw_params.rx_mac_buf_ring) ath11k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]); @@ -585,7 +585,7 @@ static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar) } if (ar->ab->hw_params.rx_mac_buf_ring) { - for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { ret = ath11k_dp_srng_setup(ar->ab, &dp->rx_mac_buf_ring[i], HAL_RXDMA_BUF, 1, @@ -598,7 +598,7 @@ static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar) } } - for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring[i], HAL_RXDMA_DST, 0, dp->mac_id + i, DP_RXDMA_ERR_DST_RING_SIZE); @@ -608,7 +608,7 @@ static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar) } } - for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring; ret = ath11k_dp_srng_setup(ar->ab, srng, @@ -1877,8 +1877,7 @@ static void ath11k_dp_rx_h_csum_offload(struct ath11k *ar, struct sk_buff *msdu) CHECKSUM_NONE : CHECKSUM_UNNECESSARY; } -static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, - enum hal_encrypt_type enctype) +int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, enum hal_encrypt_type enctype) { switch (enctype) { case HAL_ENCRYPT_TYPE_OPEN: @@ -2990,11 +2989,52 @@ ath11k_dp_rx_mon_update_status_buf_state(struct ath11k_mon_data *pmon, } } +static enum dp_mon_status_buf_state +ath11k_dp_rx_mon_buf_done(struct ath11k_base *ab, struct hal_srng *srng, + struct dp_rxdma_ring *rx_ring) +{ + struct ath11k_skb_rxcb *rxcb; + struct hal_tlv_hdr *tlv; + struct sk_buff *skb; + void *status_desc; + dma_addr_t paddr; + u32 cookie; + int buf_id; + u8 rbm; + + status_desc = ath11k_hal_srng_src_next_peek(ab, srng); + if (!status_desc) + return DP_MON_STATUS_NO_DMA; + + ath11k_hal_rx_buf_addr_info_get(status_desc, &paddr, &cookie, &rbm); + + buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie); + + spin_lock_bh(&rx_ring->idr_lock); + skb = idr_find(&rx_ring->bufs_idr, buf_id); + spin_unlock_bh(&rx_ring->idr_lock); + + if (!skb) + return DP_MON_STATUS_NO_DMA; + + rxcb = ATH11K_SKB_RXCB(skb); + dma_sync_single_for_cpu(ab->dev, rxcb->paddr, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + + tlv = (struct hal_tlv_hdr *)skb->data; + if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != HAL_RX_STATUS_BUFFER_DONE) + return DP_MON_STATUS_NO_DMA; + + return DP_MON_STATUS_REPLINISH; +} + static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id, int *budget, struct sk_buff_head *skb_list) { struct ath11k *ar; const struct ath11k_hw_hal_params *hal_params; + enum dp_mon_status_buf_state reap_status; struct ath11k_pdev_dp *dp; struct dp_rxdma_ring *rx_ring; struct ath11k_mon_data *pmon; @@ -3057,15 +3097,38 @@ static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id, ath11k_warn(ab, "mon status DONE not set %lx, buf_id %d\n", FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl), buf_id); - /* If done status is missing, hold onto status - * ring until status is done for this status - * ring buffer. - * Keep HP in mon_status_ring unchanged, - * and break from here. - * Check status for same buffer for next time + /* RxDMA status done bit might not be set even + * though tp is moved by HW. */ - pmon->buf_state = DP_MON_STATUS_NO_DMA; - break; + + /* If done status is missing: + * 1. As per MAC team's suggestion, + * when HP + 1 entry is peeked and if DMA + * is not done and if HP + 2 entry's DMA done + * is set. skip HP + 1 entry and + * start processing in next interrupt. + * 2. If HP + 2 entry's DMA done is not set, + * poll onto HP + 1 entry DMA done to be set. + * Check status for same buffer for next time + * dp_rx_mon_status_srng_process + */ + + reap_status = ath11k_dp_rx_mon_buf_done(ab, srng, + rx_ring); + if (reap_status == DP_MON_STATUS_NO_DMA) + continue; + + spin_lock_bh(&rx_ring->idr_lock); + idr_remove(&rx_ring->bufs_idr, buf_id); + spin_unlock_bh(&rx_ring->idr_lock); + + dma_unmap_single(ab->dev, rxcb->paddr, + skb->len + skb_tailroom(skb), + DMA_FROM_DEVICE); + + dev_kfree_skb_any(skb); + pmon->buf_state = DP_MON_STATUS_REPLINISH; + goto move_next; } spin_lock_bh(&rx_ring->idr_lock); @@ -4391,7 +4454,7 @@ int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id) } if (ab->hw_params.rx_mac_buf_ring) { - for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { ring_id = dp->rx_mac_buf_ring[i].ring_id; ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i, HAL_RXDMA_BUF); @@ -4403,7 +4466,7 @@ int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id) } } - for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { ring_id = dp->rxdma_err_dst_ring[i].ring_id; ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i, HAL_RXDMA_DST); @@ -4443,7 +4506,7 @@ int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id) } config_refill_ring: - for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i, HAL_RXDMA_MONITOR_STATUS); diff --git a/drivers/net/wireless/ath/ath11k/dp_rx.h b/drivers/net/wireless/ath/ath11k/dp_rx.h index 623da3bf9dc8..c322e30caa96 100644 --- a/drivers/net/wireless/ath/ath11k/dp_rx.h +++ b/drivers/net/wireless/ath/ath11k/dp_rx.h @@ -1,6 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH11K_DP_RX_H #define ATH11K_DP_RX_H @@ -95,4 +96,6 @@ int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id int ath11k_dp_rx_pktlog_start(struct ath11k_base *ab); int ath11k_dp_rx_pktlog_stop(struct ath11k_base *ab, bool stop_timer); +int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, enum hal_encrypt_type enctype); + #endif /* ATH11K_DP_RX_H */ diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.c b/drivers/net/wireless/ath/ath11k/dp_tx.c index 272b1c35f98d..8522c67baabf 100644 --- a/drivers/net/wireless/ath/ath11k/dp_tx.c +++ b/drivers/net/wireless/ath/ath11k/dp_tx.c @@ -353,8 +353,12 @@ ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab, if (ts->acked) { if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { info->flags |= IEEE80211_TX_STAT_ACK; - info->status.ack_signal = ATH11K_DEFAULT_NOISE_FLOOR + - ts->ack_rssi; + info->status.ack_signal = ts->ack_rssi; + + if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT, + ab->wmi_ab.svc_map)) + info->status.ack_signal += ATH11K_DEFAULT_NOISE_FLOOR; + info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID; } else { @@ -584,8 +588,12 @@ static void ath11k_dp_tx_complete_msdu(struct ath11k *ar, if (ts->status == HAL_WBM_TQM_REL_REASON_FRAME_ACKED && !(info->flags & IEEE80211_TX_CTL_NO_ACK)) { info->flags |= IEEE80211_TX_STAT_ACK; - info->status.ack_signal = ATH11K_DEFAULT_NOISE_FLOOR + - ts->ack_rssi; + info->status.ack_signal = ts->ack_rssi; + + if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT, + ab->wmi_ab.svc_map)) + info->status.ack_signal += ATH11K_DEFAULT_NOISE_FLOOR; + info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID; } @@ -1035,7 +1043,7 @@ int ath11k_dp_tx_htt_h2t_ppdu_stats_req(struct ath11k *ar, u32 mask) int ret; int i; - for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { skb = ath11k_htc_alloc_skb(ab, len); if (!skb) return -ENOMEM; @@ -1218,7 +1226,7 @@ int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset) &tlv_filter); } else if (!reset) { /* set in monitor mode only */ - for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { ring_id = dp->rx_mac_buf_ring[i].ring_id; ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, dp->mac_id + i, @@ -1231,7 +1239,7 @@ int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset) if (ret) return ret; - for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; if (!reset) { tlv_filter.rx_filter = diff --git a/drivers/net/wireless/ath/ath11k/dp_tx.h b/drivers/net/wireless/ath/ath11k/dp_tx.h index 61be2265e09f..795fe3b8fa0d 100644 --- a/drivers/net/wireless/ath/ath11k/dp_tx.h +++ b/drivers/net/wireless/ath/ath11k/dp_tx.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2021, 2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021, 2023-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH11K_DP_TX_H @@ -13,7 +13,7 @@ struct ath11k_dp_htt_wbm_tx_status { u32 msdu_id; bool acked; - int ack_rssi; + s8 ack_rssi; u16 peer_id; }; diff --git a/drivers/net/wireless/ath/ath11k/hal.c b/drivers/net/wireless/ath/ath11k/hal.c index f3d04568c221..f02599bd1c36 100644 --- a/drivers/net/wireless/ath/ath11k/hal.c +++ b/drivers/net/wireless/ath/ath11k/hal.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include #include "hal_tx.h" @@ -796,6 +796,20 @@ u32 *ath11k_hal_srng_src_get_next_reaped(struct ath11k_base *ab, return desc; } +u32 *ath11k_hal_srng_src_next_peek(struct ath11k_base *ab, struct hal_srng *srng) +{ + u32 next_hp; + + lockdep_assert_held(&srng->lock); + + next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size; + + if (next_hp != srng->u.src_ring.cached_tp) + return srng->ring_base_vaddr + next_hp; + + return NULL; +} + u32 *ath11k_hal_srng_src_peek(struct ath11k_base *ab, struct hal_srng *srng) { lockdep_assert_held(&srng->lock); diff --git a/drivers/net/wireless/ath/ath11k/hal.h b/drivers/net/wireless/ath/ath11k/hal.h index e453c137385e..dc8bbe073017 100644 --- a/drivers/net/wireless/ath/ath11k/hal.h +++ b/drivers/net/wireless/ath/ath11k/hal.h @@ -947,6 +947,8 @@ u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng); int ath11k_hal_srng_dst_num_free(struct ath11k_base *ab, struct hal_srng *srng, bool sync_hw_ptr); u32 *ath11k_hal_srng_src_peek(struct ath11k_base *ab, struct hal_srng *srng); +u32 *ath11k_hal_srng_src_next_peek(struct ath11k_base *ab, + struct hal_srng *srng); u32 *ath11k_hal_srng_src_get_next_reaped(struct ath11k_base *ab, struct hal_srng *srng); u32 *ath11k_hal_srng_src_reap_next(struct ath11k_base *ab, diff --git a/drivers/net/wireless/ath/ath11k/hal_tx.h b/drivers/net/wireless/ath/ath11k/hal_tx.h index c5e88364afe5..46d17abd808b 100644 --- a/drivers/net/wireless/ath/ath11k/hal_tx.h +++ b/drivers/net/wireless/ath/ath11k/hal_tx.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH11K_HAL_TX_H @@ -54,7 +54,7 @@ struct hal_tx_info { struct hal_tx_status { enum hal_wbm_rel_src_module buf_rel_source; enum hal_wbm_tqm_rel_reason status; - u8 ack_rssi; + s8 ack_rssi; u32 flags; /* %HAL_TX_STATUS_FLAGS_ */ u32 ppdu_id; u8 try_cnt; diff --git a/drivers/net/wireless/ath/ath11k/hw.h b/drivers/net/wireless/ath/ath11k/hw.h index 14ef4eb48f80..300322535766 100644 --- a/drivers/net/wireless/ath/ath11k/hw.h +++ b/drivers/net/wireless/ath/ath11k/hw.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH11K_HW_H @@ -167,7 +167,7 @@ struct ath11k_hw_params { bool single_pdev_only; bool rxdma1_enable; - int num_rxmda_per_pdev; + int num_rxdma_per_pdev; bool rx_mac_buf_ring; bool vdev_start_delay; bool htt_peer_map_v2; diff --git a/drivers/net/wireless/ath/ath11k/mac.c b/drivers/net/wireless/ath/ath11k/mac.c index 9b96dbb21d83..ba910ae2c676 100644 --- a/drivers/net/wireless/ath/ath11k/mac.c +++ b/drivers/net/wireless/ath/ath11k/mac.c @@ -4229,6 +4229,7 @@ static int ath11k_install_key(struct ath11k_vif *arvif, switch (key->cipher) { case WLAN_CIPHER_SUITE_CCMP: + case WLAN_CIPHER_SUITE_CCMP_256: arg.key_cipher = WMI_CIPHER_AES_CCM; /* TODO: Re-check if flag is valid */ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT; @@ -4238,12 +4239,10 @@ static int ath11k_install_key(struct ath11k_vif *arvif, arg.key_txmic_len = 8; arg.key_rxmic_len = 8; break; - case WLAN_CIPHER_SUITE_CCMP_256: - arg.key_cipher = WMI_CIPHER_AES_CCM; - break; case WLAN_CIPHER_SUITE_GCMP: case WLAN_CIPHER_SUITE_GCMP_256: arg.key_cipher = WMI_CIPHER_AES_GCM; + key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT; break; default: ath11k_warn(ar->ab, "cipher %d is not supported\n", key->cipher); @@ -5903,7 +5902,10 @@ static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif, { struct ath11k_base *ab = ar->ab; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb); struct ieee80211_tx_info *info; + enum hal_encrypt_type enctype; + unsigned int mic_len; dma_addr_t paddr; int buf_id; int ret; @@ -5927,7 +5929,12 @@ static int ath11k_mac_mgmt_tx_wmi(struct ath11k *ar, struct ath11k_vif *arvif, ieee80211_is_deauth(hdr->frame_control) || ieee80211_is_disassoc(hdr->frame_control)) && ieee80211_has_protected(hdr->frame_control)) { - skb_put(skb, IEEE80211_CCMP_MIC_LEN); + if (!(skb_cb->flags & ATH11K_SKB_CIPHER_SET)) + ath11k_warn(ab, "WMI management tx frame without ATH11K_SKB_CIPHER_SET"); + + enctype = ath11k_dp_tx_get_encrypt_type(skb_cb->cipher); + mic_len = ath11k_dp_rx_crypto_mic_len(ar, enctype); + skb_put(skb, mic_len); } } @@ -6108,7 +6115,7 @@ static int ath11k_mac_config_mon_status_default(struct ath11k *ar, bool enable) tlv_filter.rx_filter = ath11k_debugfs_rx_filter(ar); } - for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { ring_id = ar->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, ar->dp.mac_id + i, @@ -6278,7 +6285,7 @@ err: return ret; } -static void ath11k_mac_op_stop(struct ieee80211_hw *hw) +static void ath11k_mac_op_stop(struct ieee80211_hw *hw, bool suspend) { struct ath11k *ar = hw->priv; struct htt_ppdu_stats_info *ppdu_stats, *tmp; @@ -7507,32 +7514,6 @@ static int ath11k_mac_stop_vdev_early(struct ieee80211_hw *hw, return 0; } -static u8 ath11k_mac_get_tpe_count(u8 txpwr_intrprt, u8 txpwr_cnt) -{ - switch (txpwr_intrprt) { - /* Refer "Table 9-276-Meaning of Maximum Transmit Power Count subfield - * if the Maximum Transmit Power Interpretation subfield is 0 or 2" of - * "IEEE Std 802.11ax 2021". - */ - case IEEE80211_TPE_LOCAL_EIRP: - case IEEE80211_TPE_REG_CLIENT_EIRP: - txpwr_cnt = txpwr_cnt <= 3 ? txpwr_cnt : 3; - txpwr_cnt = txpwr_cnt + 1; - break; - /* Refer "Table 9-277-Meaning of Maximum Transmit Power Count subfield - * if Maximum Transmit Power Interpretation subfield is 1 or 3" of - * "IEEE Std 802.11ax 2021". - */ - case IEEE80211_TPE_LOCAL_EIRP_PSD: - case IEEE80211_TPE_REG_CLIENT_EIRP_PSD: - txpwr_cnt = txpwr_cnt <= 4 ? txpwr_cnt : 4; - txpwr_cnt = txpwr_cnt ? (BIT(txpwr_cnt - 1)) : 1; - break; - } - - return txpwr_cnt; -} - static u8 ath11k_mac_get_num_pwr_levels(struct cfg80211_chan_def *chan_def) { if (chan_def->chan->flags & IEEE80211_CHAN_PSD) { @@ -7688,7 +7669,7 @@ void ath11k_mac_fill_reg_tpc_info(struct ath11k *ar, struct ieee80211_channel *chan, *temp_chan; u8 pwr_lvl_idx, num_pwr_levels, pwr_reduction; bool is_psd_power = false, is_tpe_present = false; - s8 max_tx_power[IEEE80211_MAX_NUM_PWR_LEVEL], + s8 max_tx_power[ATH11K_NUM_PWR_LEVELS], psd_power, tx_power; s8 eirp_power = 0; u16 start_freq, center_freq; @@ -7701,7 +7682,8 @@ void ath11k_mac_fill_reg_tpc_info(struct ath11k *ar, is_tpe_present = true; num_pwr_levels = arvif->reg_tpc_info.num_pwr_levels; } else { - num_pwr_levels = ath11k_mac_get_num_pwr_levels(&ctx->def); + num_pwr_levels = + ath11k_mac_get_num_pwr_levels(&bss_conf->chanreq.oper); } for (pwr_lvl_idx = 0; pwr_lvl_idx < num_pwr_levels; pwr_lvl_idx++) { @@ -7858,33 +7840,23 @@ static void ath11k_mac_parse_tx_pwr_env(struct ath11k *ar, struct ath11k_base *ab = ar->ab; struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; - struct ieee80211_tx_pwr_env *single_tpe; + struct ieee80211_parsed_tpe_eirp *non_psd = NULL; + struct ieee80211_parsed_tpe_psd *psd = NULL; enum wmi_reg_6ghz_client_type client_type; struct cur_regulatory_info *reg_info; + u8 local_tpe_count, reg_tpe_count; + bool use_local_tpe; int i; - u8 pwr_count, pwr_interpret, pwr_category; - u8 psd_index = 0, non_psd_index = 0, local_tpe_count = 0, reg_tpe_count = 0; - bool use_local_tpe, non_psd_set = false, psd_set = false; reg_info = &ab->reg_info_store[ar->pdev_idx]; client_type = reg_info->client_type; - for (i = 0; i < bss_conf->tx_pwr_env_num; i++) { - single_tpe = &bss_conf->tx_pwr_env[i]; - pwr_category = u8_get_bits(single_tpe->tx_power_info, - IEEE80211_TX_PWR_ENV_INFO_CATEGORY); - pwr_interpret = u8_get_bits(single_tpe->tx_power_info, - IEEE80211_TX_PWR_ENV_INFO_INTERPRET); - - if (pwr_category == client_type) { - if (pwr_interpret == IEEE80211_TPE_LOCAL_EIRP || - pwr_interpret == IEEE80211_TPE_LOCAL_EIRP_PSD) - local_tpe_count++; - else if (pwr_interpret == IEEE80211_TPE_REG_CLIENT_EIRP || - pwr_interpret == IEEE80211_TPE_REG_CLIENT_EIRP_PSD) - reg_tpe_count++; - } - } + local_tpe_count = + bss_conf->tpe.max_local[client_type].valid + + bss_conf->tpe.psd_local[client_type].valid; + reg_tpe_count = + bss_conf->tpe.max_reg_client[client_type].valid + + bss_conf->tpe.psd_reg_client[client_type].valid; if (!reg_tpe_count && !local_tpe_count) { ath11k_warn(ab, @@ -7897,83 +7869,44 @@ static void ath11k_mac_parse_tx_pwr_env(struct ath11k *ar, use_local_tpe = false; } - for (i = 0; i < bss_conf->tx_pwr_env_num; i++) { - single_tpe = &bss_conf->tx_pwr_env[i]; - pwr_category = u8_get_bits(single_tpe->tx_power_info, - IEEE80211_TX_PWR_ENV_INFO_CATEGORY); - pwr_interpret = u8_get_bits(single_tpe->tx_power_info, - IEEE80211_TX_PWR_ENV_INFO_INTERPRET); - - if (pwr_category != client_type) - continue; - - /* get local transmit power envelope */ - if (use_local_tpe) { - if (pwr_interpret == IEEE80211_TPE_LOCAL_EIRP) { - non_psd_index = i; - non_psd_set = true; - } else if (pwr_interpret == IEEE80211_TPE_LOCAL_EIRP_PSD) { - psd_index = i; - psd_set = true; - } - /* get regulatory transmit power envelope */ - } else { - if (pwr_interpret == IEEE80211_TPE_REG_CLIENT_EIRP) { - non_psd_index = i; - non_psd_set = true; - } else if (pwr_interpret == IEEE80211_TPE_REG_CLIENT_EIRP_PSD) { - psd_index = i; - psd_set = true; - } - } + if (use_local_tpe) { + psd = &bss_conf->tpe.psd_local[client_type]; + if (!psd->valid) + psd = NULL; + non_psd = &bss_conf->tpe.max_local[client_type]; + if (!non_psd->valid) + non_psd = NULL; + } else { + psd = &bss_conf->tpe.psd_reg_client[client_type]; + if (!psd->valid) + psd = NULL; + non_psd = &bss_conf->tpe.max_reg_client[client_type]; + if (!non_psd->valid) + non_psd = NULL; } - if (non_psd_set && !psd_set) { - single_tpe = &bss_conf->tx_pwr_env[non_psd_index]; - pwr_count = u8_get_bits(single_tpe->tx_power_info, - IEEE80211_TX_PWR_ENV_INFO_COUNT); - pwr_interpret = u8_get_bits(single_tpe->tx_power_info, - IEEE80211_TX_PWR_ENV_INFO_INTERPRET); + if (non_psd && !psd) { arvif->reg_tpc_info.is_psd_power = false; arvif->reg_tpc_info.eirp_power = 0; - arvif->reg_tpc_info.num_pwr_levels = - ath11k_mac_get_tpe_count(pwr_interpret, pwr_count); + arvif->reg_tpc_info.num_pwr_levels = non_psd->count; for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++) { ath11k_dbg(ab, ATH11K_DBG_MAC, "non PSD power[%d] : %d\n", - i, single_tpe->tx_power[i]); - arvif->reg_tpc_info.tpe[i] = single_tpe->tx_power[i] / 2; + i, non_psd->power[i]); + arvif->reg_tpc_info.tpe[i] = non_psd->power[i] / 2; } } - if (psd_set) { - single_tpe = &bss_conf->tx_pwr_env[psd_index]; - pwr_count = u8_get_bits(single_tpe->tx_power_info, - IEEE80211_TX_PWR_ENV_INFO_COUNT); - pwr_interpret = u8_get_bits(single_tpe->tx_power_info, - IEEE80211_TX_PWR_ENV_INFO_INTERPRET); - arvif->reg_tpc_info.is_psd_power = true; + if (psd) { + arvif->reg_tpc_info.num_pwr_levels = psd->count; - if (pwr_count == 0) { + for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++) { ath11k_dbg(ab, ATH11K_DBG_MAC, - "TPE PSD power : %d\n", single_tpe->tx_power[0]); - arvif->reg_tpc_info.num_pwr_levels = - ath11k_mac_get_num_pwr_levels(&ctx->def); - - for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++) - arvif->reg_tpc_info.tpe[i] = single_tpe->tx_power[0] / 2; - } else { - arvif->reg_tpc_info.num_pwr_levels = - ath11k_mac_get_tpe_count(pwr_interpret, pwr_count); - - for (i = 0; i < arvif->reg_tpc_info.num_pwr_levels; i++) { - ath11k_dbg(ab, ATH11K_DBG_MAC, - "TPE PSD power[%d] : %d\n", - i, single_tpe->tx_power[i]); - arvif->reg_tpc_info.tpe[i] = single_tpe->tx_power[i] / 2; - } + "TPE PSD power[%d] : %d\n", + i, psd->power[i]); + arvif->reg_tpc_info.tpe[i] = psd->power[i] / 2; } } } @@ -8851,12 +8784,8 @@ ath11k_mac_op_reconfig_complete(struct ieee80211_hw *hw, ieee80211_wake_queues(ar->hw); if (ar->ab->hw_params.current_cc_support && - ar->alpha2[0] != 0 && ar->alpha2[1] != 0) { - struct wmi_set_current_country_params set_current_param = {}; - - memcpy(&set_current_param.alpha2, ar->alpha2, 2); - ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param); - } + ar->alpha2[0] != 0 && ar->alpha2[1] != 0) + ath11k_reg_set_cc(ar); if (ab->is_reset) { recovery_count = atomic_inc_return(&ab->recovery_count); @@ -9055,8 +8984,11 @@ static void ath11k_mac_op_sta_statistics(struct ieee80211_hw *hw, sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); } - sinfo->signal_avg = ewma_avg_rssi_read(&arsta->avg_rssi) + - ATH11K_DEFAULT_NOISE_FLOOR; + sinfo->signal_avg = ewma_avg_rssi_read(&arsta->avg_rssi); + + if (!db2dbm) + sinfo->signal_avg += ATH11K_DEFAULT_NOISE_FLOOR; + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); } @@ -9091,7 +9023,6 @@ static void ath11k_mac_op_ipv6_changed(struct ieee80211_hw *hw, struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif); struct inet6_ifaddr *ifa6; struct ifacaddr6 *ifaca6; - struct list_head *p; u32 count, scope; ath11k_dbg(ar->ab, ATH11K_DBG_MAC, "op ipv6 changed\n"); @@ -9099,7 +9030,12 @@ static void ath11k_mac_op_ipv6_changed(struct ieee80211_hw *hw, offload = &arvif->arp_ns_offload; count = 0; - /* Note: read_lock_bh() calls rcu_read_lock() */ + /* The _ipv6_changed() is called with RCU lock already held in + * atomic_notifier_call_chain(), so we don't need to call + * rcu_read_lock() again here. But note that with CONFIG_PREEMPT_RT + * enabled, read_lock_bh() also calls rcu_read_lock(). This is OK + * because RCU read critical section is allowed to get nested. + */ read_lock_bh(&idev->lock); memset(offload->ipv6_addr, 0, sizeof(offload->ipv6_addr)); @@ -9107,11 +9043,10 @@ static void ath11k_mac_op_ipv6_changed(struct ieee80211_hw *hw, memcpy(offload->mac_addr, vif->addr, ETH_ALEN); /* get unicast address */ - list_for_each(p, &idev->addr_list) { + list_for_each_entry(ifa6, &idev->addr_list, if_list) { if (count >= ATH11K_IPV6_MAX_COUNT) goto generate; - ifa6 = list_entry(p, struct inet6_ifaddr, if_list); if (ifa6->flags & IFA_F_DADFAILED) continue; scope = ipv6_addr_src_scope(&ifa6->addr); @@ -10325,11 +10260,8 @@ static int __ath11k_mac_register(struct ath11k *ar) } if (ab->hw_params.current_cc_support && ab->new_alpha2[0]) { - struct wmi_set_current_country_params set_current_param = {}; - - memcpy(&set_current_param.alpha2, ab->new_alpha2, 2); memcpy(&ar->alpha2, ab->new_alpha2, 2); - ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param); + ret = ath11k_reg_set_cc(ar); if (ret) ath11k_warn(ar->ab, "failed set cc code for mac register: %d\n", ret); diff --git a/drivers/net/wireless/ath/ath11k/qmi.c b/drivers/net/wireless/ath/ath11k/qmi.c index d4a243b64f6c..1bc648920ab6 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.c +++ b/drivers/net/wireless/ath/ath11k/qmi.c @@ -2293,7 +2293,7 @@ static int ath11k_qmi_load_file_target_mem(struct ath11k_base *ab, struct qmi_txn txn; const u8 *temp = data; void __iomem *bdf_addr = NULL; - int ret; + int ret = 0; u32 remaining = len; req = kzalloc(sizeof(*req), GFP_KERNEL); @@ -2859,7 +2859,7 @@ int ath11k_qmi_firmware_start(struct ath11k_base *ab, int ath11k_qmi_fwreset_from_cold_boot(struct ath11k_base *ab) { - int timeout; + long time_left; if (!ath11k_core_coldboot_cal_support(ab) || ab->hw_params.cbcal_restart_fw == 0) @@ -2867,11 +2867,11 @@ int ath11k_qmi_fwreset_from_cold_boot(struct ath11k_base *ab) ath11k_dbg(ab, ATH11K_DBG_QMI, "wait for cold boot done\n"); - timeout = wait_event_timeout(ab->qmi.cold_boot_waitq, - (ab->qmi.cal_done == 1), - ATH11K_COLD_BOOT_FW_RESET_DELAY); + time_left = wait_event_timeout(ab->qmi.cold_boot_waitq, + (ab->qmi.cal_done == 1), + ATH11K_COLD_BOOT_FW_RESET_DELAY); - if (timeout <= 0) { + if (time_left <= 0) { ath11k_warn(ab, "Coldboot Calibration timed out\n"); return -ETIMEDOUT; } @@ -2886,7 +2886,7 @@ EXPORT_SYMBOL(ath11k_qmi_fwreset_from_cold_boot); static int ath11k_qmi_process_coldboot_calibration(struct ath11k_base *ab) { - int timeout; + long time_left; int ret; ret = ath11k_qmi_wlanfw_mode_send(ab, ATH11K_FIRMWARE_MODE_COLD_BOOT); @@ -2897,10 +2897,10 @@ static int ath11k_qmi_process_coldboot_calibration(struct ath11k_base *ab) ath11k_dbg(ab, ATH11K_DBG_QMI, "Coldboot calibration wait started\n"); - timeout = wait_event_timeout(ab->qmi.cold_boot_waitq, - (ab->qmi.cal_done == 1), - ATH11K_COLD_BOOT_FW_RESET_DELAY); - if (timeout <= 0) { + time_left = wait_event_timeout(ab->qmi.cold_boot_waitq, + (ab->qmi.cal_done == 1), + ATH11K_COLD_BOOT_FW_RESET_DELAY); + if (time_left <= 0) { ath11k_warn(ab, "coldboot calibration timed out\n"); return 0; } diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c index 737fcd450d4b..b0f289784dd3 100644 --- a/drivers/net/wireless/ath/ath11k/reg.c +++ b/drivers/net/wireless/ath/ath11k/reg.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: BSD-3-Clause-Clear /* * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include @@ -49,7 +49,6 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) { struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); struct wmi_init_country_params init_country_param; - struct wmi_set_current_country_params set_current_param = {}; struct ath11k *ar = hw->priv; int ret; @@ -83,9 +82,8 @@ ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) * reg info */ if (ar->ab->hw_params.current_cc_support) { - memcpy(&set_current_param.alpha2, request->alpha2, 2); - memcpy(&ar->alpha2, &set_current_param.alpha2, 2); - ret = ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param); + memcpy(&ar->alpha2, request->alpha2, 2); + ret = ath11k_reg_set_cc(ar); if (ret) ath11k_warn(ar->ab, "failed set current country code: %d\n", ret); @@ -878,7 +876,7 @@ int ath11k_reg_handle_chan_list(struct ath11k_base *ab, ath11k_reg_reset_info(reg_info); if (ab->hw_params.single_pdev_only && - pdev_idx < ab->hw_params.num_rxmda_per_pdev) + pdev_idx < ab->hw_params.num_rxdma_per_pdev) return 0; goto fallback; } @@ -1017,3 +1015,11 @@ void ath11k_reg_free(struct ath11k_base *ab) kfree(ab->new_regd[i]); } } + +int ath11k_reg_set_cc(struct ath11k *ar) +{ + struct wmi_set_current_country_params set_current_param = {}; + + memcpy(&set_current_param.alpha2, ar->alpha2, 2); + return ath11k_wmi_send_set_current_country_cmd(ar, &set_current_param); +} diff --git a/drivers/net/wireless/ath/ath11k/reg.h b/drivers/net/wireless/ath/ath11k/reg.h index 64edb794260a..263ea9061948 100644 --- a/drivers/net/wireless/ath/ath11k/reg.h +++ b/drivers/net/wireless/ath/ath11k/reg.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2019 The Linux Foundation. All rights reserved. - * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH11K_REG_H @@ -45,5 +45,5 @@ ath11k_reg_ap_pwr_convert(enum ieee80211_ap_reg_power power_type); int ath11k_reg_handle_chan_list(struct ath11k_base *ab, struct cur_regulatory_info *reg_info, enum ieee80211_ap_reg_power power_type); - +int ath11k_reg_set_cc(struct ath11k *ar); #endif diff --git a/drivers/net/wireless/ath/ath11k/wmi.c b/drivers/net/wireless/ath/ath11k/wmi.c index 6ff01c45f165..38f175dd1557 100644 --- a/drivers/net/wireless/ath/ath11k/wmi.c +++ b/drivers/net/wireless/ath/ath11k/wmi.c @@ -9082,7 +9082,7 @@ int ath11k_wmi_attach(struct ath11k_base *ab) ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_MAX; /* It's overwritten when service_ext_ready is handled */ - if (ab->hw_params.single_pdev_only && ab->hw_params.num_rxmda_per_pdev > 1) + if (ab->hw_params.single_pdev_only && ab->hw_params.num_rxdma_per_pdev > 1) ab->wmi_ab.preferred_hw_mode = WMI_HOST_HW_MODE_SINGLE; /* TODO: Init remaining wmi soc resources required */ diff --git a/drivers/net/wireless/ath/ath12k/Makefile b/drivers/net/wireless/ath/ath12k/Makefile index d42480db7463..5a1ed20d730e 100644 --- a/drivers/net/wireless/ath/ath12k/Makefile +++ b/drivers/net/wireless/ath/ath12k/Makefile @@ -23,9 +23,10 @@ ath12k-y += core.o \ fw.o \ p2p.o -ath12k-$(CONFIG_ATH12K_DEBUGFS) += debugfs.o +ath12k-$(CONFIG_ATH12K_DEBUGFS) += debugfs.o debugfs_htt_stats.o ath12k-$(CONFIG_ACPI) += acpi.o ath12k-$(CONFIG_ATH12K_TRACING) += trace.o +ath12k-$(CONFIG_PM) += wow.o # for tracing framework to find trace.h CFLAGS_trace.o := -I$(src) diff --git a/drivers/net/wireless/ath/ath12k/acpi.c b/drivers/net/wireless/ath/ath12k/acpi.c index 443ba12e01f3..0555d35aab47 100644 --- a/drivers/net/wireless/ath/ath12k/acpi.c +++ b/drivers/net/wireless/ath/ath12k/acpi.c @@ -391,4 +391,6 @@ void ath12k_acpi_stop(struct ath12k_base *ab) acpi_remove_notify_handler(ACPI_HANDLE(ab->dev), ACPI_DEVICE_NOTIFY, ath12k_acpi_dsm_notify); + + memset(&ab->acpi, 0, sizeof(ab->acpi)); } diff --git a/drivers/net/wireless/ath/ath12k/ce.h b/drivers/net/wireless/ath/ath12k/ce.h index 79af3b6159f1..857bc5f9e946 100644 --- a/drivers/net/wireless/ath/ath12k/ce.h +++ b/drivers/net/wireless/ath/ath12k/ce.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH12K_CE_H @@ -119,7 +119,7 @@ struct ath12k_ce_ring { /* Host address space */ void *base_addr_owner_space_unaligned; /* CE address space */ - u32 base_addr_ce_space_unaligned; + dma_addr_t base_addr_ce_space_unaligned; /* Actual start of descriptors. * Aligned to descriptor-size boundary. @@ -129,7 +129,7 @@ struct ath12k_ce_ring { void *base_addr_owner_space; /* CE address space */ - u32 base_addr_ce_space; + dma_addr_t base_addr_ce_space; /* HAL ring id */ u32 hal_ring_id; diff --git a/drivers/net/wireless/ath/ath12k/core.c b/drivers/net/wireless/ath/ath12k/core.c index 6663f4e1792d..51252e8bc1ae 100644 --- a/drivers/net/wireless/ath/ath12k/core.c +++ b/drivers/net/wireless/ath/ath12k/core.c @@ -16,6 +16,7 @@ #include "hif.h" #include "fw.h" #include "debugfs.h" +#include "wow.h" unsigned int ath12k_debug_mask; module_param_named(debug_mask, ath12k_debug_mask, uint, 0644); @@ -42,27 +43,48 @@ static int ath12k_core_rfkill_config(struct ath12k_base *ab) return ret; } +/* Check if we need to continue with suspend/resume operation. + * Return: + * a negative value: error happens and don't continue. + * 0: no error but don't continue. + * positive value: no error and do continue. + */ +static int ath12k_core_continue_suspend_resume(struct ath12k_base *ab) +{ + struct ath12k *ar; + + if (!ab->hw_params->supports_suspend) + return -EOPNOTSUPP; + + /* so far single_pdev_only chips have supports_suspend as true + * so pass 0 as a dummy pdev_id here. + */ + ar = ab->pdevs[0].ar; + if (!ar || !ar->ah || ar->ah->state != ATH12K_HW_STATE_OFF) + return 0; + + return 1; +} + int ath12k_core_suspend(struct ath12k_base *ab) { struct ath12k *ar; int ret, i; - if (!ab->hw_params->supports_suspend) - return -EOPNOTSUPP; + ret = ath12k_core_continue_suspend_resume(ab); + if (ret <= 0) + return ret; - rcu_read_lock(); for (i = 0; i < ab->num_radios; i++) { - ar = ath12k_mac_get_ar_by_pdev_id(ab, i); + ar = ab->pdevs[i].ar; if (!ar) continue; ret = ath12k_mac_wait_tx_complete(ar); if (ret) { ath12k_warn(ab, "failed to wait tx complete: %d\n", ret); - rcu_read_unlock(); return ret; } } - rcu_read_unlock(); /* PM framework skips suspend_late/resume_early callbacks * if other devices report errors in their suspend callbacks. @@ -83,8 +105,13 @@ EXPORT_SYMBOL(ath12k_core_suspend); int ath12k_core_suspend_late(struct ath12k_base *ab) { - if (!ab->hw_params->supports_suspend) - return -EOPNOTSUPP; + int ret; + + ret = ath12k_core_continue_suspend_resume(ab); + if (ret <= 0) + return ret; + + ath12k_acpi_stop(ab); ath12k_hif_irq_disable(ab); ath12k_hif_ce_irq_disable(ab); @@ -99,8 +126,9 @@ int ath12k_core_resume_early(struct ath12k_base *ab) { int ret; - if (!ab->hw_params->supports_suspend) - return -EOPNOTSUPP; + ret = ath12k_core_continue_suspend_resume(ab); + if (ret <= 0) + return ret; reinit_completion(&ab->restart_completed); ret = ath12k_hif_power_up(ab); @@ -114,9 +142,11 @@ EXPORT_SYMBOL(ath12k_core_resume_early); int ath12k_core_resume(struct ath12k_base *ab) { long time_left; + int ret; - if (!ab->hw_params->supports_suspend) - return -EOPNOTSUPP; + ret = ath12k_core_continue_suspend_resume(ab); + if (ret <= 0) + return ret; time_left = wait_for_completion_timeout(&ab->restart_completed, ATH12K_RESET_TIMEOUT_HZ); @@ -994,9 +1024,8 @@ void ath12k_core_halt(struct ath12k *ar) static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab) { struct ath12k *ar; - struct ath12k_pdev *pdev; struct ath12k_hw *ah; - int i; + int i, j; spin_lock_bh(&ab->base_lock); ab->stats.fw_crash_counter++; @@ -1006,35 +1035,32 @@ static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab) set_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags); for (i = 0; i < ab->num_hw; i++) { - if (!ab->ah[i]) - continue; - ah = ab->ah[i]; - ieee80211_stop_queues(ah->hw); - } - - for (i = 0; i < ab->num_radios; i++) { - pdev = &ab->pdevs[i]; - ar = pdev->ar; - if (!ar || ar->state == ATH12K_STATE_OFF) + if (!ah || ah->state == ATH12K_HW_STATE_OFF) continue; - ath12k_mac_drain_tx(ar); - complete(&ar->scan.started); - complete(&ar->scan.completed); - complete(&ar->scan.on_channel); - complete(&ar->peer_assoc_done); - complete(&ar->peer_delete_done); - complete(&ar->install_key_done); - complete(&ar->vdev_setup_done); - complete(&ar->vdev_delete_done); - complete(&ar->bss_survey_done); + ieee80211_stop_queues(ah->hw); - wake_up(&ar->dp.tx_empty_waitq); - idr_for_each(&ar->txmgmt_idr, - ath12k_mac_tx_mgmt_pending_free, ar); - idr_destroy(&ar->txmgmt_idr); - wake_up(&ar->txmgmt_empty_waitq); + for (j = 0; j < ah->num_radio; j++) { + ar = &ah->radio[j]; + + ath12k_mac_drain_tx(ar); + complete(&ar->scan.started); + complete(&ar->scan.completed); + complete(&ar->scan.on_channel); + complete(&ar->peer_assoc_done); + complete(&ar->peer_delete_done); + complete(&ar->install_key_done); + complete(&ar->vdev_setup_done); + complete(&ar->vdev_delete_done); + complete(&ar->bss_survey_done); + + wake_up(&ar->dp.tx_empty_waitq); + idr_for_each(&ar->txmgmt_idr, + ath12k_mac_tx_mgmt_pending_free, ar); + idr_destroy(&ar->txmgmt_idr); + wake_up(&ar->txmgmt_empty_waitq); + } } wake_up(&ab->wmi_ab.tx_credits_wq); @@ -1043,48 +1069,57 @@ static void ath12k_core_pre_reconfigure_recovery(struct ath12k_base *ab) static void ath12k_core_post_reconfigure_recovery(struct ath12k_base *ab) { + struct ath12k_hw *ah; struct ath12k *ar; - struct ath12k_pdev *pdev; - int i; + int i, j; - for (i = 0; i < ab->num_radios; i++) { - pdev = &ab->pdevs[i]; - ar = pdev->ar; - if (!ar || ar->state == ATH12K_STATE_OFF) + for (i = 0; i < ab->num_hw; i++) { + ah = ab->ah[i]; + if (!ah || ah->state == ATH12K_HW_STATE_OFF) continue; - mutex_lock(&ar->conf_mutex); + mutex_lock(&ah->hw_mutex); + + switch (ah->state) { + case ATH12K_HW_STATE_ON: + ah->state = ATH12K_HW_STATE_RESTARTING; + + for (j = 0; j < ah->num_radio; j++) { + ar = &ah->radio[j]; + + mutex_lock(&ar->conf_mutex); + ath12k_core_halt(ar); + mutex_unlock(&ar->conf_mutex); + } - switch (ar->state) { - case ATH12K_STATE_ON: - ar->state = ATH12K_STATE_RESTARTING; - ath12k_core_halt(ar); - ieee80211_restart_hw(ath12k_ar_to_hw(ar)); break; - case ATH12K_STATE_OFF: + case ATH12K_HW_STATE_OFF: ath12k_warn(ab, - "cannot restart radio %d that hasn't been started\n", + "cannot restart hw %d that hasn't been started\n", i); break; - case ATH12K_STATE_RESTARTING: + case ATH12K_HW_STATE_RESTARTING: break; - case ATH12K_STATE_RESTARTED: - ar->state = ATH12K_STATE_WEDGED; + case ATH12K_HW_STATE_RESTARTED: + ah->state = ATH12K_HW_STATE_WEDGED; fallthrough; - case ATH12K_STATE_WEDGED: + case ATH12K_HW_STATE_WEDGED: ath12k_warn(ab, - "device is wedged, will not restart radio %d\n", i); + "device is wedged, will not restart hw %d\n", i); break; } - mutex_unlock(&ar->conf_mutex); + + mutex_unlock(&ah->hw_mutex); } + complete(&ab->driver_recovery); } static void ath12k_core_restart(struct work_struct *work) { struct ath12k_base *ab = container_of(work, struct ath12k_base, restart_work); - int ret; + struct ath12k_hw *ah; + int ret, i; ret = ath12k_core_reconfigure_on_crash(ab); if (ret) { @@ -1092,8 +1127,12 @@ static void ath12k_core_restart(struct work_struct *work) return; } - if (ab->is_reset) - complete_all(&ab->reconfigure_complete); + if (ab->is_reset) { + for (i = 0; i < ab->num_hw; i++) { + ah = ab->ah[i]; + ieee80211_restart_hw(ah->hw); + } + } complete(&ab->restart_completed); } @@ -1147,20 +1186,14 @@ static void ath12k_core_reset(struct work_struct *work) ath12k_dbg(ab, ATH12K_DBG_BOOT, "reset starting\n"); ab->is_reset = true; - atomic_set(&ab->recovery_start_count, 0); - reinit_completion(&ab->recovery_start); atomic_set(&ab->recovery_count, 0); ath12k_core_pre_reconfigure_recovery(ab); - reinit_completion(&ab->reconfigure_complete); ath12k_core_post_reconfigure_recovery(ab); ath12k_dbg(ab, ATH12K_DBG_BOOT, "waiting recovery start...\n"); - time_left = wait_for_completion_timeout(&ab->recovery_start, - ATH12K_RECOVER_START_TIMEOUT_HZ); - ath12k_hif_irq_disable(ab); ath12k_hif_ce_irq_disable(ab); @@ -1185,6 +1218,29 @@ int ath12k_core_pre_init(struct ath12k_base *ab) return 0; } +static int ath12k_core_panic_handler(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct ath12k_base *ab = container_of(nb, struct ath12k_base, + panic_nb); + + return ath12k_hif_panic_handler(ab); +} + +static int ath12k_core_panic_notifier_register(struct ath12k_base *ab) +{ + ab->panic_nb.notifier_call = ath12k_core_panic_handler; + + return atomic_notifier_chain_register(&panic_notifier_list, + &ab->panic_nb); +} + +static void ath12k_core_panic_notifier_unregister(struct ath12k_base *ab) +{ + atomic_notifier_chain_unregister(&panic_notifier_list, + &ab->panic_nb); +} + int ath12k_core_init(struct ath12k_base *ab) { int ret; @@ -1195,11 +1251,17 @@ int ath12k_core_init(struct ath12k_base *ab) return ret; } + ret = ath12k_core_panic_notifier_register(ab); + if (ret) + ath12k_warn(ab, "failed to register panic handler: %d\n", ret); + return 0; } void ath12k_core_deinit(struct ath12k_base *ab) { + ath12k_core_panic_notifier_unregister(ab); + mutex_lock(&ab->core_lock); ath12k_core_pdev_destroy(ab); @@ -1243,8 +1305,6 @@ struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size, mutex_init(&ab->core_lock); spin_lock_init(&ab->base_lock); init_completion(&ab->reset_complete); - init_completion(&ab->reconfigure_complete); - init_completion(&ab->recovery_start); INIT_LIST_HEAD(&ab->peers); init_waitqueue_head(&ab->peer_mapping_wq); @@ -1256,12 +1316,23 @@ struct ath12k_base *ath12k_core_alloc(struct device *dev, size_t priv_size, timer_setup(&ab->rx_replenish_retry, ath12k_ce_rx_replenish_retry, 0); init_completion(&ab->htc_suspend); init_completion(&ab->restart_completed); + init_completion(&ab->wow.wakeup_completed); ab->dev = dev; ab->hif.bus = bus; ab->qmi.num_radios = U8_MAX; ab->mlo_capable_flags = ATH12K_INTRA_DEVICE_MLO_SUPPORT; + /* Device index used to identify the devices in a group. + * + * In Intra-device MLO, only one device present in a group, + * so it is always zero. + * + * In Inter-device MLO, Multiple device present in a group, + * expect non-zero value. + */ + ab->device_id = 0; + return ab; err_free_wq: diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h index 47dde4401210..cdfd43a7321a 100644 --- a/drivers/net/wireless/ath/ath12k/core.h +++ b/drivers/net/wireless/ath/ath12k/core.h @@ -14,6 +14,7 @@ #include #include #include +#include #include "qmi.h" #include "htc.h" #include "wmi.h" @@ -27,6 +28,8 @@ #include "dbring.h" #include "fw.h" #include "acpi.h" +#include "wow.h" +#include "debugfs_htt_stats.h" #define SM(_v, _f) (((_v) << _f##_LSB) & _f##_MASK) @@ -146,7 +149,7 @@ struct ath12k_ext_irq_grp { u32 grp_id; u64 timestamp; struct napi_struct napi; - struct net_device napi_ndev; + struct net_device *napi_ndev; }; struct ath12k_smbios_bdf { @@ -180,8 +183,6 @@ struct ath12k_he { u32 heop_param; }; -#define MAX_RADIOS 3 - enum { WMI_HOST_TP_SCALE_MAX = 0, WMI_HOST_TP_SCALE_50 = 1, @@ -212,10 +213,6 @@ enum ath12k_dev_flags { ATH12K_FLAG_EXT_IRQ_ENABLED, }; -enum ath12k_monitor_flags { - ATH12K_FLAG_MONITOR_ENABLED, -}; - struct ath12k_tx_conf { bool changed; u16 ac; @@ -234,6 +231,13 @@ struct ath12k_vif_cache { u32 bss_conf_changed; }; +struct ath12k_rekey_data { + u8 kck[NL80211_KCK_LEN]; + u8 kek[NL80211_KCK_LEN]; + u64 replay_ctr; + bool enable_offload; +}; + struct ath12k_vif { u32 vdev_id; enum wmi_vdev_type vdev_type; @@ -290,6 +294,7 @@ struct ath12k_vif { u32 punct_bitmap; bool ps; struct ath12k_vif_cache *cache; + struct ath12k_rekey_data rekey_data; }; struct ath12k_vif_iter { @@ -454,15 +459,15 @@ struct ath12k_sta { #define ATH12K_MIN_5G_FREQ 4150 #define ATH12K_MIN_6G_FREQ 5925 #define ATH12K_MAX_6G_FREQ 7115 -#define ATH12K_NUM_CHANS 100 +#define ATH12K_NUM_CHANS 101 #define ATH12K_MAX_5G_CHAN 173 -enum ath12k_state { - ATH12K_STATE_OFF, - ATH12K_STATE_ON, - ATH12K_STATE_RESTARTING, - ATH12K_STATE_RESTARTED, - ATH12K_STATE_WEDGED, +enum ath12k_hw_state { + ATH12K_HW_STATE_OFF, + ATH12K_HW_STATE_ON, + ATH12K_HW_STATE_RESTARTING, + ATH12K_HW_STATE_RESTARTED, + ATH12K_HW_STATE_WEDGED, /* Add other states as required */ }; @@ -477,8 +482,17 @@ struct ath12k_fw_stats { struct list_head bcn; }; +struct ath12k_dbg_htt_stats { + enum ath12k_dbg_htt_ext_stats_type type; + u32 cfg_param[4]; + u8 reset; + struct debug_htt_stats_req *stats_req; +}; + struct ath12k_debug { struct dentry *debugfs_pdev; + struct dentry *debugfs_pdev_symlink; + struct ath12k_dbg_htt_stats htt_stats; }; struct ath12k_per_peer_tx_stats { @@ -511,7 +525,6 @@ struct ath12k { u32 ht_cap_info; u32 vht_cap_info; struct ath12k_he ar_he; - enum ath12k_state state; bool supports_6ghz; struct { struct completion started; @@ -533,7 +546,6 @@ struct ath12k { unsigned long dev_flags; unsigned int filter_flags; - unsigned long monitor_flags; u32 min_tx_power; u32 max_tx_power; u32 txpower_limit_2g; @@ -613,6 +625,9 @@ struct ath12k { struct work_struct wmi_mgmt_tx_work; struct sk_buff_head wmi_mgmt_tx_queue; + struct ath12k_wow wow; + struct completion target_suspend; + bool target_suspend_ack; struct ath12k_per_peer_tx_stats peer_tx_stats; struct list_head ppdu_stats_info; u32 ppdu_stat_list_depth; @@ -632,14 +647,22 @@ struct ath12k { u32 freq_low; u32 freq_high; + + bool nlo_enabled; }; struct ath12k_hw { struct ieee80211_hw *hw; + /* Protect the write operation of the hardware state ath12k_hw::state + * between hardware start<=>reconfigure<=>stop transitions. + */ + struct mutex hw_mutex; + enum ath12k_hw_state state; bool regd_updated; bool use_6ghz_regd; - u8 num_radio; + + /* Keep last */ struct ath12k radio[] __aligned(sizeof(void *)); }; @@ -689,6 +712,7 @@ struct mlo_timestamp { struct ath12k_pdev { struct ath12k *ar; u32 pdev_id; + u32 hw_link_id; struct ath12k_pdev_cap cap; u8 mac_addr[ETH_ALEN]; struct mlo_timestamp timestamp; @@ -747,6 +771,7 @@ struct ath12k_base { struct ath12k_qmi qmi; struct ath12k_wmi_base wmi_ab; struct completion fw_ready; + u8 device_id; int num_radios; /* HW channel counters frequency value in hertz common to all MACs */ u32 cc_freq_hz; @@ -763,6 +788,11 @@ struct ath12k_base { const struct ath12k_hif_ops *ops; } hif; + struct { + struct completion wakeup_completed; + u32 wmi_conf_rx_decap_mode; + } wow; + struct ath12k_ce ce; struct timer_list rx_replenish_retry; struct ath12k_hal hal; @@ -845,11 +875,8 @@ struct ath12k_base { struct work_struct reset_work; atomic_t reset_count; atomic_t recovery_count; - atomic_t recovery_start_count; bool is_reset; struct completion reset_complete; - struct completion reconfigure_complete; - struct completion recovery_start; /* continuous recovery fail count */ atomic_t fail_cont_count; unsigned long reset_fail_timeout; @@ -923,6 +950,8 @@ struct ath12k_base { #endif /* CONFIG_ACPI */ + struct notifier_block panic_nb; + /* must be last */ u8 drv_priv[] __aligned(sizeof(void *)); }; @@ -1037,6 +1066,11 @@ static inline struct ath12k *ath12k_ah_to_ar(struct ath12k_hw *ah, u8 hw_link_id return &ah->radio[hw_link_id]; } +static inline struct ath12k_hw *ath12k_ar_to_ah(struct ath12k *ar) +{ + return ar->ah; +} + static inline struct ieee80211_hw *ath12k_ar_to_hw(struct ath12k *ar) { return ar->ah->hw; diff --git a/drivers/net/wireless/ath/ath12k/debug.h b/drivers/net/wireless/ath/ath12k/debug.h index aa685295f8a4..f7005917362c 100644 --- a/drivers/net/wireless/ath/ath12k/debug.h +++ b/drivers/net/wireless/ath/ath12k/debug.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef _ATH12K_DEBUG_H_ @@ -25,6 +25,7 @@ enum ath12k_debug_mask { ATH12K_DBG_PCI = 0x00001000, ATH12K_DBG_DP_TX = 0x00002000, ATH12K_DBG_DP_RX = 0x00004000, + ATH12K_DBG_WOW = 0x00008000, ATH12K_DBG_ANY = 0xffffffff, }; diff --git a/drivers/net/wireless/ath/ath12k/debugfs.c b/drivers/net/wireless/ath/ath12k/debugfs.c index 8d8ba951093b..2a977c36af00 100644 --- a/drivers/net/wireless/ath/ath12k/debugfs.c +++ b/drivers/net/wireless/ath/ath12k/debugfs.c @@ -6,6 +6,7 @@ #include "core.h" #include "debugfs.h" +#include "debugfs_htt_stats.h" static ssize_t ath12k_write_simulate_radar(struct file *file, const char __user *user_buf, @@ -80,11 +81,27 @@ void ath12k_debugfs_register(struct ath12k *ar) /* Create a symlink under ieee80211/phy* */ scnprintf(buf, sizeof(buf), "../../ath12k/%pd2", ar->debug.debugfs_pdev); - debugfs_create_symlink("ath12k", hw->wiphy->debugfsdir, buf); + ar->debug.debugfs_pdev_symlink = debugfs_create_symlink("ath12k", + hw->wiphy->debugfsdir, + buf); if (ar->mac.sbands[NL80211_BAND_5GHZ].channels) { debugfs_create_file("dfs_simulate_radar", 0200, ar->debug.debugfs_pdev, ar, &fops_simulate_radar); } + + ath12k_debugfs_htt_stats_register(ar); +} + +void ath12k_debugfs_unregister(struct ath12k *ar) +{ + if (!ar->debug.debugfs_pdev) + return; + + /* Remove symlink under ieee80211/phy* */ + debugfs_remove(ar->debug.debugfs_pdev_symlink); + debugfs_remove_recursive(ar->debug.debugfs_pdev); + ar->debug.debugfs_pdev_symlink = NULL; + ar->debug.debugfs_pdev = NULL; } diff --git a/drivers/net/wireless/ath/ath12k/debugfs.h b/drivers/net/wireless/ath/ath12k/debugfs.h index a62f2a550b23..8d64ba03aa9a 100644 --- a/drivers/net/wireless/ath/ath12k/debugfs.h +++ b/drivers/net/wireless/ath/ath12k/debugfs.h @@ -11,7 +11,7 @@ void ath12k_debugfs_soc_create(struct ath12k_base *ab); void ath12k_debugfs_soc_destroy(struct ath12k_base *ab); void ath12k_debugfs_register(struct ath12k *ar); - +void ath12k_debugfs_unregister(struct ath12k *ar); #else static inline void ath12k_debugfs_soc_create(struct ath12k_base *ab) { @@ -25,6 +25,10 @@ static inline void ath12k_debugfs_register(struct ath12k *ar) { } +static inline void ath12k_debugfs_unregister(struct ath12k *ar) +{ +} + #endif /* CONFIG_ATH12K_DEBUGFS */ #endif /* _ATH12K_DEBUGFS_H_ */ diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c new file mode 100644 index 000000000000..ce80e7b5175b --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.c @@ -0,0 +1,1540 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include "core.h" +#include "debug.h" +#include "debugfs_htt_stats.h" +#include "dp_tx.h" +#include "dp_rx.h" + +static u32 +print_array_to_buf(u8 *buf, u32 offset, const char *header, + const __le32 *array, u32 array_len, const char *footer) +{ + int index = 0; + u8 i; + + if (header) { + index += scnprintf(buf + offset, + ATH12K_HTT_STATS_BUF_SIZE - offset, + "%s = ", header); + } + for (i = 0; i < array_len; i++) { + index += scnprintf(buf + offset + index, + (ATH12K_HTT_STATS_BUF_SIZE - offset) - index, + " %u:%u,", i, le32_to_cpu(array[i])); + } + /* To overwrite the last trailing comma */ + index--; + *(buf + offset + index) = '\0'; + + if (footer) { + index += scnprintf(buf + offset + index, + (ATH12K_HTT_STATS_BUF_SIZE - offset) - index, + "%s", footer); + } + return index; +} + +static void +htt_print_tx_pdev_stats_cmn_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_pdev_stats_cmn_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 mac_id_word; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + mac_id_word = le32_to_cpu(htt_stats_buf->mac_id__word); + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_PDEV_STATS_CMN_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", + u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID)); + len += scnprintf(buf + len, buf_len - len, "comp_delivered = %u\n", + le32_to_cpu(htt_stats_buf->comp_delivered)); + len += scnprintf(buf + len, buf_len - len, "self_triggers = %u\n", + le32_to_cpu(htt_stats_buf->self_triggers)); + len += scnprintf(buf + len, buf_len - len, "hw_queued = %u\n", + le32_to_cpu(htt_stats_buf->hw_queued)); + len += scnprintf(buf + len, buf_len - len, "hw_reaped = %u\n", + le32_to_cpu(htt_stats_buf->hw_reaped)); + len += scnprintf(buf + len, buf_len - len, "underrun = %u\n", + le32_to_cpu(htt_stats_buf->underrun)); + len += scnprintf(buf + len, buf_len - len, "hw_paused = %u\n", + le32_to_cpu(htt_stats_buf->hw_paused)); + len += scnprintf(buf + len, buf_len - len, "hw_flush = %u\n", + le32_to_cpu(htt_stats_buf->hw_flush)); + len += scnprintf(buf + len, buf_len - len, "hw_filt = %u\n", + le32_to_cpu(htt_stats_buf->hw_filt)); + len += scnprintf(buf + len, buf_len - len, "tx_abort = %u\n", + le32_to_cpu(htt_stats_buf->tx_abort)); + len += scnprintf(buf + len, buf_len - len, "ppdu_ok = %u\n", + le32_to_cpu(htt_stats_buf->ppdu_ok)); + len += scnprintf(buf + len, buf_len - len, "mpdu_requeued = %u\n", + le32_to_cpu(htt_stats_buf->mpdu_requed)); + len += scnprintf(buf + len, buf_len - len, "tx_xretry = %u\n", + le32_to_cpu(htt_stats_buf->tx_xretry)); + len += scnprintf(buf + len, buf_len - len, "data_rc = %u\n", + le32_to_cpu(htt_stats_buf->data_rc)); + len += scnprintf(buf + len, buf_len - len, "mpdu_dropped_xretry = %u\n", + le32_to_cpu(htt_stats_buf->mpdu_dropped_xretry)); + len += scnprintf(buf + len, buf_len - len, "illegal_rate_phy_err = %u\n", + le32_to_cpu(htt_stats_buf->illgl_rate_phy_err)); + len += scnprintf(buf + len, buf_len - len, "cont_xretry = %u\n", + le32_to_cpu(htt_stats_buf->cont_xretry)); + len += scnprintf(buf + len, buf_len - len, "tx_timeout = %u\n", + le32_to_cpu(htt_stats_buf->tx_timeout)); + len += scnprintf(buf + len, buf_len - len, "tx_time_dur_data = %u\n", + le32_to_cpu(htt_stats_buf->tx_time_dur_data)); + len += scnprintf(buf + len, buf_len - len, "pdev_resets = %u\n", + le32_to_cpu(htt_stats_buf->pdev_resets)); + len += scnprintf(buf + len, buf_len - len, "phy_underrun = %u\n", + le32_to_cpu(htt_stats_buf->phy_underrun)); + len += scnprintf(buf + len, buf_len - len, "txop_ovf = %u\n", + le32_to_cpu(htt_stats_buf->txop_ovf)); + len += scnprintf(buf + len, buf_len - len, "seq_posted = %u\n", + le32_to_cpu(htt_stats_buf->seq_posted)); + len += scnprintf(buf + len, buf_len - len, "seq_failed_queueing = %u\n", + le32_to_cpu(htt_stats_buf->seq_failed_queueing)); + len += scnprintf(buf + len, buf_len - len, "seq_completed = %u\n", + le32_to_cpu(htt_stats_buf->seq_completed)); + len += scnprintf(buf + len, buf_len - len, "seq_restarted = %u\n", + le32_to_cpu(htt_stats_buf->seq_restarted)); + len += scnprintf(buf + len, buf_len - len, "seq_txop_repost_stop = %u\n", + le32_to_cpu(htt_stats_buf->seq_txop_repost_stop)); + len += scnprintf(buf + len, buf_len - len, "next_seq_cancel = %u\n", + le32_to_cpu(htt_stats_buf->next_seq_cancel)); + len += scnprintf(buf + len, buf_len - len, "dl_mu_mimo_seq_posted = %u\n", + le32_to_cpu(htt_stats_buf->mu_seq_posted)); + len += scnprintf(buf + len, buf_len - len, "dl_mu_ofdma_seq_posted = %u\n", + le32_to_cpu(htt_stats_buf->mu_ofdma_seq_posted)); + len += scnprintf(buf + len, buf_len - len, "ul_mu_mimo_seq_posted = %u\n", + le32_to_cpu(htt_stats_buf->ul_mumimo_seq_posted)); + len += scnprintf(buf + len, buf_len - len, "ul_mu_ofdma_seq_posted = %u\n", + le32_to_cpu(htt_stats_buf->ul_ofdma_seq_posted)); + len += scnprintf(buf + len, buf_len - len, "mu_mimo_peer_blacklisted = %u\n", + le32_to_cpu(htt_stats_buf->num_mu_peer_blacklisted)); + len += scnprintf(buf + len, buf_len - len, "seq_qdepth_repost_stop = %u\n", + le32_to_cpu(htt_stats_buf->seq_qdepth_repost_stop)); + len += scnprintf(buf + len, buf_len - len, "seq_min_msdu_repost_stop = %u\n", + le32_to_cpu(htt_stats_buf->seq_min_msdu_repost_stop)); + len += scnprintf(buf + len, buf_len - len, "mu_seq_min_msdu_repost_stop = %u\n", + le32_to_cpu(htt_stats_buf->mu_seq_min_msdu_repost_stop)); + len += scnprintf(buf + len, buf_len - len, "seq_switch_hw_paused = %u\n", + le32_to_cpu(htt_stats_buf->seq_switch_hw_paused)); + len += scnprintf(buf + len, buf_len - len, "next_seq_posted_dsr = %u\n", + le32_to_cpu(htt_stats_buf->next_seq_posted_dsr)); + len += scnprintf(buf + len, buf_len - len, "seq_posted_isr = %u\n", + le32_to_cpu(htt_stats_buf->seq_posted_isr)); + len += scnprintf(buf + len, buf_len - len, "seq_ctrl_cached = %u\n", + le32_to_cpu(htt_stats_buf->seq_ctrl_cached)); + len += scnprintf(buf + len, buf_len - len, "mpdu_count_tqm = %u\n", + le32_to_cpu(htt_stats_buf->mpdu_count_tqm)); + len += scnprintf(buf + len, buf_len - len, "msdu_count_tqm = %u\n", + le32_to_cpu(htt_stats_buf->msdu_count_tqm)); + len += scnprintf(buf + len, buf_len - len, "mpdu_removed_tqm = %u\n", + le32_to_cpu(htt_stats_buf->mpdu_removed_tqm)); + len += scnprintf(buf + len, buf_len - len, "msdu_removed_tqm = %u\n", + le32_to_cpu(htt_stats_buf->msdu_removed_tqm)); + len += scnprintf(buf + len, buf_len - len, "remove_mpdus_max_retries = %u\n", + le32_to_cpu(htt_stats_buf->remove_mpdus_max_retries)); + len += scnprintf(buf + len, buf_len - len, "mpdus_sw_flush = %u\n", + le32_to_cpu(htt_stats_buf->mpdus_sw_flush)); + len += scnprintf(buf + len, buf_len - len, "mpdus_hw_filter = %u\n", + le32_to_cpu(htt_stats_buf->mpdus_hw_filter)); + len += scnprintf(buf + len, buf_len - len, "mpdus_truncated = %u\n", + le32_to_cpu(htt_stats_buf->mpdus_truncated)); + len += scnprintf(buf + len, buf_len - len, "mpdus_ack_failed = %u\n", + le32_to_cpu(htt_stats_buf->mpdus_ack_failed)); + len += scnprintf(buf + len, buf_len - len, "mpdus_expired = %u\n", + le32_to_cpu(htt_stats_buf->mpdus_expired)); + len += scnprintf(buf + len, buf_len - len, "mpdus_seq_hw_retry = %u\n", + le32_to_cpu(htt_stats_buf->mpdus_seq_hw_retry)); + len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n", + le32_to_cpu(htt_stats_buf->ack_tlv_proc)); + len += scnprintf(buf + len, buf_len - len, "coex_abort_mpdu_cnt_valid = %u\n", + le32_to_cpu(htt_stats_buf->coex_abort_mpdu_cnt_valid)); + len += scnprintf(buf + len, buf_len - len, "coex_abort_mpdu_cnt = %u\n", + le32_to_cpu(htt_stats_buf->coex_abort_mpdu_cnt)); + len += scnprintf(buf + len, buf_len - len, "num_total_ppdus_tried_ota = %u\n", + le32_to_cpu(htt_stats_buf->num_total_ppdus_tried_ota)); + len += scnprintf(buf + len, buf_len - len, "num_data_ppdus_tried_ota = %u\n", + le32_to_cpu(htt_stats_buf->num_data_ppdus_tried_ota)); + len += scnprintf(buf + len, buf_len - len, "local_ctrl_mgmt_enqued = %u\n", + le32_to_cpu(htt_stats_buf->local_ctrl_mgmt_enqued)); + len += scnprintf(buf + len, buf_len - len, "local_ctrl_mgmt_freed = %u\n", + le32_to_cpu(htt_stats_buf->local_ctrl_mgmt_freed)); + len += scnprintf(buf + len, buf_len - len, "local_data_enqued = %u\n", + le32_to_cpu(htt_stats_buf->local_data_enqued)); + len += scnprintf(buf + len, buf_len - len, "local_data_freed = %u\n", + le32_to_cpu(htt_stats_buf->local_data_freed)); + len += scnprintf(buf + len, buf_len - len, "mpdu_tried = %u\n", + le32_to_cpu(htt_stats_buf->mpdu_tried)); + len += scnprintf(buf + len, buf_len - len, "isr_wait_seq_posted = %u\n", + le32_to_cpu(htt_stats_buf->isr_wait_seq_posted)); + len += scnprintf(buf + len, buf_len - len, "tx_active_dur_us_low = %u\n", + le32_to_cpu(htt_stats_buf->tx_active_dur_us_low)); + len += scnprintf(buf + len, buf_len - len, "tx_active_dur_us_high = %u\n", + le32_to_cpu(htt_stats_buf->tx_active_dur_us_high)); + len += scnprintf(buf + len, buf_len - len, "fes_offsets_err_cnt = %u\n\n", + le32_to_cpu(htt_stats_buf->fes_offsets_err_cnt)); + + stats_req->buf_len = len; +} + +static void +htt_print_tx_pdev_stats_urrn_tlv(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_pdev_stats_urrn_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u16 num_elems = min_t(u16, (tag_len >> 2), + HTT_TX_PDEV_MAX_URRN_STATS); + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_STATS_URRN_TLV:\n"); + + len += print_array_to_buf(buf, len, "urrn_stats", htt_stats_buf->urrn_stats, + num_elems, "\n\n"); + + stats_req->buf_len = len; +} + +static void +htt_print_tx_pdev_stats_flush_tlv(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_pdev_stats_flush_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u16 num_elems = min_t(u16, (tag_len >> 2), + ATH12K_HTT_TX_PDEV_MAX_FLUSH_REASON_STATS); + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_STATS_FLUSH_TLV:\n"); + + len += print_array_to_buf(buf, len, "flush_errs", htt_stats_buf->flush_errs, + num_elems, "\n\n"); + + stats_req->buf_len = len; +} + +static void +htt_print_tx_pdev_stats_sifs_tlv(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_pdev_stats_sifs_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u16 num_elems = min_t(u16, (tag_len >> 2), + ATH12K_HTT_TX_PDEV_MAX_SIFS_BURST_STATS); + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_STATS_SIFS_TLV:\n"); + + len += print_array_to_buf(buf, len, "sifs_status", htt_stats_buf->sifs_status, + num_elems, "\n\n"); + + stats_req->buf_len = len; +} + +static void +htt_print_tx_pdev_mu_ppdu_dist_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_pdev_mu_ppdu_dist_stats_tlv *htt_stats_buf = tag_buf; + char *mode; + u8 j, hw_mode, i, str_buf_len; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 stats_value; + u8 max_ppdu = ATH12K_HTT_STATS_MAX_NUM_MU_PPDU_PER_BURST; + u8 max_sched = ATH12K_HTT_STATS_MAX_NUM_SCHED_STATUS; + char str_buf[ATH12K_HTT_MAX_STRING_LEN]; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + hw_mode = le32_to_cpu(htt_stats_buf->hw_mode); + + switch (hw_mode) { + case ATH12K_HTT_STATS_HWMODE_AC: + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_AC_MU_PPDU_DISTRIBUTION_STATS:\n"); + mode = "ac"; + break; + case ATH12K_HTT_STATS_HWMODE_AX: + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_AX_MU_PPDU_DISTRIBUTION_STATS:\n"); + mode = "ax"; + break; + case ATH12K_HTT_STATS_HWMODE_BE: + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_BE_MU_PPDU_DISTRIBUTION_STATS:\n"); + mode = "be"; + break; + default: + return; + } + + for (i = 0; i < ATH12K_HTT_STATS_NUM_NR_BINS ; i++) { + len += scnprintf(buf + len, buf_len - len, + "%s_mu_mimo_num_seq_posted_nr%u = %u\n", mode, + ((i + 1) * 4), htt_stats_buf->num_seq_posted[i]); + str_buf_len = 0; + memset(str_buf, 0x0, sizeof(str_buf)); + for (j = 0; j < ATH12K_HTT_STATS_MAX_NUM_MU_PPDU_PER_BURST ; j++) { + stats_value = le32_to_cpu(htt_stats_buf->num_ppdu_posted_per_burst + [i * max_ppdu + j]); + str_buf_len += scnprintf(&str_buf[str_buf_len], + ATH12K_HTT_MAX_STRING_LEN - str_buf_len, + " %u:%u,", j, stats_value); + } + /* To overwrite the last trailing comma */ + str_buf[str_buf_len - 1] = '\0'; + len += scnprintf(buf + len, buf_len - len, + "%s_mu_mimo_num_ppdu_posted_per_burst_nr%u = %s\n", + mode, ((i + 1) * 4), str_buf); + str_buf_len = 0; + memset(str_buf, 0x0, sizeof(str_buf)); + for (j = 0; j < ATH12K_HTT_STATS_MAX_NUM_MU_PPDU_PER_BURST ; j++) { + stats_value = le32_to_cpu(htt_stats_buf->num_ppdu_cmpl_per_burst + [i * max_ppdu + j]); + str_buf_len += scnprintf(&str_buf[str_buf_len], + ATH12K_HTT_MAX_STRING_LEN - str_buf_len, + " %u:%u,", j, stats_value); + } + /* To overwrite the last trailing comma */ + str_buf[str_buf_len - 1] = '\0'; + len += scnprintf(buf + len, buf_len - len, + "%s_mu_mimo_num_ppdu_completed_per_burst_nr%u = %s\n", + mode, ((i + 1) * 4), str_buf); + str_buf_len = 0; + memset(str_buf, 0x0, sizeof(str_buf)); + for (j = 0; j < ATH12K_HTT_STATS_MAX_NUM_SCHED_STATUS ; j++) { + stats_value = le32_to_cpu(htt_stats_buf->num_seq_term_status + [i * max_sched + j]); + str_buf_len += scnprintf(&str_buf[str_buf_len], + ATH12K_HTT_MAX_STRING_LEN - str_buf_len, + " %u:%u,", j, stats_value); + } + /* To overwrite the last trailing comma */ + str_buf[str_buf_len - 1] = '\0'; + len += scnprintf(buf + len, buf_len - len, + "%s_mu_mimo_num_seq_term_status_nr%u = %s\n\n", + mode, ((i + 1) * 4), str_buf); + } + + stats_req->buf_len = len; +} + +static void +htt_print_tx_pdev_stats_sifs_hist_tlv(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_pdev_stats_sifs_hist_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u16 num_elems = min_t(u16, (tag_len >> 2), + ATH12K_HTT_TX_PDEV_MAX_SIFS_BURST_HIST_STATS); + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_STATS_SIFS_HIST_TLV:\n"); + + len += print_array_to_buf(buf, len, "sifs_hist_status", + htt_stats_buf->sifs_hist_status, num_elems, "\n\n"); + + stats_req->buf_len = len; +} + +static void +htt_print_pdev_ctrl_path_tx_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_pdev_ctrl_path_tx_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_STATS_CTRL_PATH_TX_STATS:\n"); + len += print_array_to_buf(buf, len, "fw_tx_mgmt_subtype", + htt_stats_buf->fw_tx_mgmt_subtype, + ATH12K_HTT_STATS_SUBTYPE_MAX, "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_stats_tx_sched_cmn_tlv(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_stats_tx_sched_cmn_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 mac_id_word; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word); + + len += scnprintf(buf + len, buf_len - len, "HTT_STATS_TX_SCHED_CMN_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", + u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID)); + len += scnprintf(buf + len, buf_len - len, "current_timestamp = %u\n\n", + le32_to_cpu(htt_stats_buf->current_timestamp)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_pdev_stats_sched_per_txq_tlv(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_pdev_stats_sched_per_txq_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 mac_id_word; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word); + + len += scnprintf(buf + len, buf_len - len, + "HTT_TX_PDEV_STATS_SCHED_PER_TXQ_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", + u32_get_bits(mac_id_word, + ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID)); + len += scnprintf(buf + len, buf_len - len, "txq_id = %u\n", + u32_get_bits(mac_id_word, + ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_ID)); + len += scnprintf(buf + len, buf_len - len, "sched_policy = %u\n", + le32_to_cpu(htt_stats_buf->sched_policy)); + len += scnprintf(buf + len, buf_len - len, + "last_sched_cmd_posted_timestamp = %u\n", + le32_to_cpu(htt_stats_buf->last_sched_cmd_posted_timestamp)); + len += scnprintf(buf + len, buf_len - len, + "last_sched_cmd_compl_timestamp = %u\n", + le32_to_cpu(htt_stats_buf->last_sched_cmd_compl_timestamp)); + len += scnprintf(buf + len, buf_len - len, "sched_2_tac_lwm_count = %u\n", + le32_to_cpu(htt_stats_buf->sched_2_tac_lwm_count)); + len += scnprintf(buf + len, buf_len - len, "sched_2_tac_ring_full = %u\n", + le32_to_cpu(htt_stats_buf->sched_2_tac_ring_full)); + len += scnprintf(buf + len, buf_len - len, "sched_cmd_post_failure = %u\n", + le32_to_cpu(htt_stats_buf->sched_cmd_post_failure)); + len += scnprintf(buf + len, buf_len - len, "num_active_tids = %u\n", + le32_to_cpu(htt_stats_buf->num_active_tids)); + len += scnprintf(buf + len, buf_len - len, "num_ps_schedules = %u\n", + le32_to_cpu(htt_stats_buf->num_ps_schedules)); + len += scnprintf(buf + len, buf_len - len, "sched_cmds_pending = %u\n", + le32_to_cpu(htt_stats_buf->sched_cmds_pending)); + len += scnprintf(buf + len, buf_len - len, "num_tid_register = %u\n", + le32_to_cpu(htt_stats_buf->num_tid_register)); + len += scnprintf(buf + len, buf_len - len, "num_tid_unregister = %u\n", + le32_to_cpu(htt_stats_buf->num_tid_unregister)); + len += scnprintf(buf + len, buf_len - len, "num_qstats_queried = %u\n", + le32_to_cpu(htt_stats_buf->num_qstats_queried)); + len += scnprintf(buf + len, buf_len - len, "qstats_update_pending = %u\n", + le32_to_cpu(htt_stats_buf->qstats_update_pending)); + len += scnprintf(buf + len, buf_len - len, "last_qstats_query_timestamp = %u\n", + le32_to_cpu(htt_stats_buf->last_qstats_query_timestamp)); + len += scnprintf(buf + len, buf_len - len, "num_tqm_cmdq_full = %u\n", + le32_to_cpu(htt_stats_buf->num_tqm_cmdq_full)); + len += scnprintf(buf + len, buf_len - len, "num_de_sched_algo_trigger = %u\n", + le32_to_cpu(htt_stats_buf->num_de_sched_algo_trigger)); + len += scnprintf(buf + len, buf_len - len, "num_rt_sched_algo_trigger = %u\n", + le32_to_cpu(htt_stats_buf->num_rt_sched_algo_trigger)); + len += scnprintf(buf + len, buf_len - len, "num_tqm_sched_algo_trigger = %u\n", + le32_to_cpu(htt_stats_buf->num_tqm_sched_algo_trigger)); + len += scnprintf(buf + len, buf_len - len, "notify_sched = %u\n", + le32_to_cpu(htt_stats_buf->notify_sched)); + len += scnprintf(buf + len, buf_len - len, "dur_based_sendn_term = %u\n", + le32_to_cpu(htt_stats_buf->dur_based_sendn_term)); + len += scnprintf(buf + len, buf_len - len, "su_notify2_sched = %u\n", + le32_to_cpu(htt_stats_buf->su_notify2_sched)); + len += scnprintf(buf + len, buf_len - len, "su_optimal_queued_msdus_sched = %u\n", + le32_to_cpu(htt_stats_buf->su_optimal_queued_msdus_sched)); + len += scnprintf(buf + len, buf_len - len, "su_delay_timeout_sched = %u\n", + le32_to_cpu(htt_stats_buf->su_delay_timeout_sched)); + len += scnprintf(buf + len, buf_len - len, "su_min_txtime_sched_delay = %u\n", + le32_to_cpu(htt_stats_buf->su_min_txtime_sched_delay)); + len += scnprintf(buf + len, buf_len - len, "su_no_delay = %u\n", + le32_to_cpu(htt_stats_buf->su_no_delay)); + len += scnprintf(buf + len, buf_len - len, "num_supercycles = %u\n", + le32_to_cpu(htt_stats_buf->num_supercycles)); + len += scnprintf(buf + len, buf_len - len, "num_subcycles_with_sort = %u\n", + le32_to_cpu(htt_stats_buf->num_subcycles_with_sort)); + len += scnprintf(buf + len, buf_len - len, "num_subcycles_no_sort = %u\n\n", + le32_to_cpu(htt_stats_buf->num_subcycles_no_sort)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_sched_txq_cmd_posted_tlv(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_sched_txq_cmd_posted_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u16 num_elements = tag_len >> 2; + + len += scnprintf(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_POSTED_TLV:\n"); + len += print_array_to_buf(buf, len, "sched_cmd_posted", + htt_stats_buf->sched_cmd_posted, num_elements, "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_sched_txq_cmd_reaped_tlv(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_sched_txq_cmd_reaped_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u16 num_elements = tag_len >> 2; + + len += scnprintf(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_REAPED_TLV:\n"); + len += print_array_to_buf(buf, len, "sched_cmd_reaped", + htt_stats_buf->sched_cmd_reaped, num_elements, "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_sched_txq_sched_order_su_tlv(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_sched_txq_sched_order_su_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 sched_order_su_num_entries = min_t(u32, (tag_len >> 2), + ATH12K_HTT_TX_PDEV_NUM_SCHED_ORDER_LOG); + + len += scnprintf(buf + len, buf_len - len, + "HTT_SCHED_TXQ_SCHED_ORDER_SU_TLV:\n"); + len += print_array_to_buf(buf, len, "sched_order_su", + htt_stats_buf->sched_order_su, + sched_order_su_num_entries, "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_sched_txq_sched_ineligibility_tlv(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_sched_txq_sched_ineligibility_tlv *htt_stats_buf = + tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 sched_ineligibility_num_entries = tag_len >> 2; + + len += scnprintf(buf + len, buf_len - len, + "HTT_SCHED_TXQ_SCHED_INELIGIBILITY:\n"); + len += print_array_to_buf(buf, len, "sched_ineligibility", + htt_stats_buf->sched_ineligibility, + sched_ineligibility_num_entries, "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_sched_txq_supercycle_trigger_tlv(const void *tag_buf, + u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_sched_txq_supercycle_triggers_tlv *htt_stats_buf = + tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u16 num_elems = min_t(u16, (tag_len >> 2), + ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_MAX); + + len += scnprintf(buf + len, buf_len - len, + "HTT_SCHED_TXQ_SUPERCYCLE_TRIGGER:\n"); + len += print_array_to_buf(buf, len, "supercycle_triggers", + htt_stats_buf->supercycle_triggers, num_elems, "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_hw_stats_pdev_errs_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_hw_stats_pdev_errs_tlv *htt_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 mac_id_word; + + if (tag_len < sizeof(*htt_buf)) + return; + + mac_id_word = le32_to_cpu(htt_buf->mac_id__word); + + len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_PDEV_ERRS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", + u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID)); + len += scnprintf(buf + len, buf_len - len, "tx_abort = %u\n", + le32_to_cpu(htt_buf->tx_abort)); + len += scnprintf(buf + len, buf_len - len, "tx_abort_fail_count = %u\n", + le32_to_cpu(htt_buf->tx_abort_fail_count)); + len += scnprintf(buf + len, buf_len - len, "rx_abort = %u\n", + le32_to_cpu(htt_buf->rx_abort)); + len += scnprintf(buf + len, buf_len - len, "rx_abort_fail_count = %u\n", + le32_to_cpu(htt_buf->rx_abort_fail_count)); + len += scnprintf(buf + len, buf_len - len, "rx_flush_cnt = %u\n", + le32_to_cpu(htt_buf->rx_flush_cnt)); + len += scnprintf(buf + len, buf_len - len, "warm_reset = %u\n", + le32_to_cpu(htt_buf->warm_reset)); + len += scnprintf(buf + len, buf_len - len, "cold_reset = %u\n", + le32_to_cpu(htt_buf->cold_reset)); + len += scnprintf(buf + len, buf_len - len, "mac_cold_reset_restore_cal = %u\n", + le32_to_cpu(htt_buf->mac_cold_reset_restore_cal)); + len += scnprintf(buf + len, buf_len - len, "mac_cold_reset = %u\n", + le32_to_cpu(htt_buf->mac_cold_reset)); + len += scnprintf(buf + len, buf_len - len, "mac_warm_reset = %u\n", + le32_to_cpu(htt_buf->mac_warm_reset)); + len += scnprintf(buf + len, buf_len - len, "mac_only_reset = %u\n", + le32_to_cpu(htt_buf->mac_only_reset)); + len += scnprintf(buf + len, buf_len - len, "phy_warm_reset = %u\n", + le32_to_cpu(htt_buf->phy_warm_reset)); + len += scnprintf(buf + len, buf_len - len, "phy_warm_reset_ucode_trig = %u\n", + le32_to_cpu(htt_buf->phy_warm_reset_ucode_trig)); + len += scnprintf(buf + len, buf_len - len, "mac_warm_reset_restore_cal = %u\n", + le32_to_cpu(htt_buf->mac_warm_reset_restore_cal)); + len += scnprintf(buf + len, buf_len - len, "mac_sfm_reset = %u\n", + le32_to_cpu(htt_buf->mac_sfm_reset)); + len += scnprintf(buf + len, buf_len - len, "phy_warm_reset_m3_ssr = %u\n", + le32_to_cpu(htt_buf->phy_warm_reset_m3_ssr)); + len += scnprintf(buf + len, buf_len - len, "fw_rx_rings_reset = %u\n", + le32_to_cpu(htt_buf->fw_rx_rings_reset)); + len += scnprintf(buf + len, buf_len - len, "tx_flush = %u\n", + le32_to_cpu(htt_buf->tx_flush)); + len += scnprintf(buf + len, buf_len - len, "tx_glb_reset = %u\n", + le32_to_cpu(htt_buf->tx_glb_reset)); + len += scnprintf(buf + len, buf_len - len, "tx_txq_reset = %u\n", + le32_to_cpu(htt_buf->tx_txq_reset)); + len += scnprintf(buf + len, buf_len - len, "rx_timeout_reset = %u\n\n", + le32_to_cpu(htt_buf->rx_timeout_reset)); + + len += scnprintf(buf + len, buf_len - len, "PDEV_PHY_WARM_RESET_REASONS:\n"); + len += scnprintf(buf + len, buf_len - len, "phy_warm_reset_reason_phy_m3 = %u\n", + le32_to_cpu(htt_buf->phy_warm_reset_reason_phy_m3)); + len += scnprintf(buf + len, buf_len - len, + "phy_warm_reset_reason_tx_hw_stuck = %u\n", + le32_to_cpu(htt_buf->phy_warm_reset_reason_tx_hw_stuck)); + len += scnprintf(buf + len, buf_len - len, + "phy_warm_reset_reason_num_cca_rx_frame_stuck = %u\n", + le32_to_cpu(htt_buf->phy_warm_reset_reason_num_rx_frame_stuck)); + len += scnprintf(buf + len, buf_len - len, + "phy_warm_reset_reason_wal_rx_recovery_rst_rx_busy = %u\n", + le32_to_cpu(htt_buf->phy_warm_reset_reason_wal_rx_rec_rx_busy)); + len += scnprintf(buf + len, buf_len - len, + "phy_warm_reset_reason_wal_rx_recovery_rst_mac_hang = %u\n", + le32_to_cpu(htt_buf->phy_warm_reset_reason_wal_rx_rec_mac_hng)); + len += scnprintf(buf + len, buf_len - len, + "phy_warm_reset_reason_mac_reset_converted_phy_reset = %u\n", + le32_to_cpu(htt_buf->phy_warm_reset_reason_mac_conv_phy_reset)); + len += scnprintf(buf + len, buf_len - len, + "phy_warm_reset_reason_tx_lifetime_expiry_cca_stuck = %u\n", + le32_to_cpu(htt_buf->phy_warm_reset_reason_tx_exp_cca_stuck)); + len += scnprintf(buf + len, buf_len - len, + "phy_warm_reset_reason_tx_consecutive_flush9_war = %u\n", + le32_to_cpu(htt_buf->phy_warm_reset_reason_tx_consec_flsh_war)); + len += scnprintf(buf + len, buf_len - len, + "phy_warm_reset_reason_tx_hwsch_reset_war = %u\n", + le32_to_cpu(htt_buf->phy_warm_reset_reason_tx_hwsch_reset_war)); + len += scnprintf(buf + len, buf_len - len, + "phy_warm_reset_reason_hwsch_wdog_or_cca_wdog_war = %u\n\n", + le32_to_cpu(htt_buf->phy_warm_reset_reason_hwsch_cca_wdog_war)); + + len += scnprintf(buf + len, buf_len - len, "WAL_RX_RECOVERY_STATS:\n"); + len += scnprintf(buf + len, buf_len - len, + "wal_rx_recovery_rst_mac_hang_count = %u\n", + le32_to_cpu(htt_buf->wal_rx_recovery_rst_mac_hang_cnt)); + len += scnprintf(buf + len, buf_len - len, + "wal_rx_recovery_rst_known_sig_count = %u\n", + le32_to_cpu(htt_buf->wal_rx_recovery_rst_known_sig_cnt)); + len += scnprintf(buf + len, buf_len - len, + "wal_rx_recovery_rst_no_rx_count = %u\n", + le32_to_cpu(htt_buf->wal_rx_recovery_rst_no_rx_cnt)); + len += scnprintf(buf + len, buf_len - len, + "wal_rx_recovery_rst_no_rx_consecutive_count = %u\n", + le32_to_cpu(htt_buf->wal_rx_recovery_rst_no_rx_consec_cnt)); + len += scnprintf(buf + len, buf_len - len, + "wal_rx_recovery_rst_rx_busy_count = %u\n", + le32_to_cpu(htt_buf->wal_rx_recovery_rst_rx_busy_cnt)); + len += scnprintf(buf + len, buf_len - len, + "wal_rx_recovery_rst_phy_mac_hang_count = %u\n\n", + le32_to_cpu(htt_buf->wal_rx_recovery_rst_phy_mac_hang_cnt)); + + len += scnprintf(buf + len, buf_len - len, "HTT_RX_DEST_DRAIN_STATS:\n"); + len += scnprintf(buf + len, buf_len - len, + "rx_dest_drain_rx_descs_leak_prevention_done = %u\n", + le32_to_cpu(htt_buf->rx_dest_drain_rx_descs_leak_prevented)); + len += scnprintf(buf + len, buf_len - len, + "rx_dest_drain_rx_descs_saved_cnt = %u\n", + le32_to_cpu(htt_buf->rx_dest_drain_rx_descs_saved_cnt)); + len += scnprintf(buf + len, buf_len - len, + "rx_dest_drain_rxdma2reo_leak_detected = %u\n", + le32_to_cpu(htt_buf->rx_dest_drain_rxdma2reo_leak_detected)); + len += scnprintf(buf + len, buf_len - len, + "rx_dest_drain_rxdma2fw_leak_detected = %u\n", + le32_to_cpu(htt_buf->rx_dest_drain_rxdma2fw_leak_detected)); + len += scnprintf(buf + len, buf_len - len, + "rx_dest_drain_rxdma2wbm_leak_detected = %u\n", + le32_to_cpu(htt_buf->rx_dest_drain_rxdma2wbm_leak_detected)); + len += scnprintf(buf + len, buf_len - len, + "rx_dest_drain_rxdma1_2sw_leak_detected = %u\n", + le32_to_cpu(htt_buf->rx_dest_drain_rxdma1_2sw_leak_detected)); + len += scnprintf(buf + len, buf_len - len, + "rx_dest_drain_rx_drain_ok_mac_idle = %u\n", + le32_to_cpu(htt_buf->rx_dest_drain_rx_drain_ok_mac_idle)); + len += scnprintf(buf + len, buf_len - len, + "rx_dest_drain_ok_mac_not_idle = %u\n", + le32_to_cpu(htt_buf->rx_dest_drain_ok_mac_not_idle)); + len += scnprintf(buf + len, buf_len - len, + "rx_dest_drain_prerequisite_invld = %u\n", + le32_to_cpu(htt_buf->rx_dest_drain_prerequisite_invld)); + len += scnprintf(buf + len, buf_len - len, + "rx_dest_drain_skip_for_non_lmac_reset = %u\n", + le32_to_cpu(htt_buf->rx_dest_drain_skip_non_lmac_reset)); + len += scnprintf(buf + len, buf_len - len, + "rx_dest_drain_hw_fifo_not_empty_post_drain_wait = %u\n\n", + le32_to_cpu(htt_buf->rx_dest_drain_hw_fifo_notempty_post_wait)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_hw_stats_intr_misc_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_hw_stats_intr_misc_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_INTR_MISC_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "hw_intr_name = %s\n", + htt_stats_buf->hw_intr_name); + len += scnprintf(buf + len, buf_len - len, "mask = %u\n", + le32_to_cpu(htt_stats_buf->mask)); + len += scnprintf(buf + len, buf_len - len, "count = %u\n\n", + le32_to_cpu(htt_stats_buf->count)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_hw_stats_whal_tx_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_hw_stats_whal_tx_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 mac_id_word; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word); + + len += scnprintf(buf + len, buf_len - len, "HTT_HW_STATS_WHAL_TX_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", + u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID)); + len += scnprintf(buf + len, buf_len - len, "last_unpause_ppdu_id = %u\n", + le32_to_cpu(htt_stats_buf->last_unpause_ppdu_id)); + len += scnprintf(buf + len, buf_len - len, "hwsch_unpause_wait_tqm_write = %u\n", + le32_to_cpu(htt_stats_buf->hwsch_unpause_wait_tqm_write)); + len += scnprintf(buf + len, buf_len - len, "hwsch_dummy_tlv_skipped = %u\n", + le32_to_cpu(htt_stats_buf->hwsch_dummy_tlv_skipped)); + len += scnprintf(buf + len, buf_len - len, + "hwsch_misaligned_offset_received = %u\n", + le32_to_cpu(htt_stats_buf->hwsch_misaligned_offset_received)); + len += scnprintf(buf + len, buf_len - len, "hwsch_reset_count = %u\n", + le32_to_cpu(htt_stats_buf->hwsch_reset_count)); + len += scnprintf(buf + len, buf_len - len, "hwsch_dev_reset_war = %u\n", + le32_to_cpu(htt_stats_buf->hwsch_dev_reset_war)); + len += scnprintf(buf + len, buf_len - len, "hwsch_delayed_pause = %u\n", + le32_to_cpu(htt_stats_buf->hwsch_delayed_pause)); + len += scnprintf(buf + len, buf_len - len, "hwsch_long_delayed_pause = %u\n", + le32_to_cpu(htt_stats_buf->hwsch_long_delayed_pause)); + len += scnprintf(buf + len, buf_len - len, "sch_rx_ppdu_no_response = %u\n", + le32_to_cpu(htt_stats_buf->sch_rx_ppdu_no_response)); + len += scnprintf(buf + len, buf_len - len, "sch_selfgen_response = %u\n", + le32_to_cpu(htt_stats_buf->sch_selfgen_response)); + len += scnprintf(buf + len, buf_len - len, "sch_rx_sifs_resp_trigger= %u\n\n", + le32_to_cpu(htt_stats_buf->sch_rx_sifs_resp_trigger)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_hw_war_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_hw_war_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u16 fixed_len, array_len; + u8 i, array_words; + u32 mac_id; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + mac_id = __le32_to_cpu(htt_stats_buf->mac_id__word); + fixed_len = sizeof(*htt_stats_buf); + array_len = tag_len - fixed_len; + array_words = array_len >> 2; + + len += scnprintf(buf + len, buf_len - len, "HTT_HW_WAR_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", + u32_get_bits(mac_id, ATH12K_HTT_STATS_MAC_ID)); + + for (i = 0; i < array_words; i++) { + len += scnprintf(buf + len, buf_len - len, "hw_war %u = %u\n\n", + i, le32_to_cpu(htt_stats_buf->hw_wars[i])); + } + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_tqm_cmn_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_tqm_cmn_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u32 mac_id_word; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word); + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_CMN_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n", + u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID)); + len += scnprintf(buf + len, buf_len - len, "max_cmdq_id = %u\n", + le32_to_cpu(htt_stats_buf->max_cmdq_id)); + len += scnprintf(buf + len, buf_len - len, "list_mpdu_cnt_hist_intvl = %u\n", + le32_to_cpu(htt_stats_buf->list_mpdu_cnt_hist_intvl)); + len += scnprintf(buf + len, buf_len - len, "add_msdu = %u\n", + le32_to_cpu(htt_stats_buf->add_msdu)); + len += scnprintf(buf + len, buf_len - len, "q_empty = %u\n", + le32_to_cpu(htt_stats_buf->q_empty)); + len += scnprintf(buf + len, buf_len - len, "q_not_empty = %u\n", + le32_to_cpu(htt_stats_buf->q_not_empty)); + len += scnprintf(buf + len, buf_len - len, "drop_notification = %u\n", + le32_to_cpu(htt_stats_buf->drop_notification)); + len += scnprintf(buf + len, buf_len - len, "desc_threshold = %u\n", + le32_to_cpu(htt_stats_buf->desc_threshold)); + len += scnprintf(buf + len, buf_len - len, "hwsch_tqm_invalid_status = %u\n", + le32_to_cpu(htt_stats_buf->hwsch_tqm_invalid_status)); + len += scnprintf(buf + len, buf_len - len, "missed_tqm_gen_mpdus = %u\n", + le32_to_cpu(htt_stats_buf->missed_tqm_gen_mpdus)); + len += scnprintf(buf + len, buf_len - len, + "total_msduq_timestamp_updates = %u\n", + le32_to_cpu(htt_stats_buf->msduq_timestamp_updates)); + len += scnprintf(buf + len, buf_len - len, + "total_msduq_timestamp_updates_by_get_mpdu_head_info_cmd = %u\n", + le32_to_cpu(htt_stats_buf->msduq_updates_mpdu_head_info_cmd)); + len += scnprintf(buf + len, buf_len - len, + "total_msduq_timestamp_updates_by_emp_to_nonemp_status = %u\n", + le32_to_cpu(htt_stats_buf->msduq_updates_emp_to_nonemp_status)); + len += scnprintf(buf + len, buf_len - len, + "total_get_mpdu_head_info_cmds_by_sched_algo_la_query = %u\n", + le32_to_cpu(htt_stats_buf->get_mpdu_head_info_cmds_by_query)); + len += scnprintf(buf + len, buf_len - len, + "total_get_mpdu_head_info_cmds_by_tac = %u\n", + le32_to_cpu(htt_stats_buf->get_mpdu_head_info_cmds_by_tac)); + len += scnprintf(buf + len, buf_len - len, + "total_gen_mpdu_cmds_by_sched_algo_la_query = %u\n", + le32_to_cpu(htt_stats_buf->gen_mpdu_cmds_by_query)); + len += scnprintf(buf + len, buf_len - len, "active_tqm_tids = %u\n", + le32_to_cpu(htt_stats_buf->tqm_active_tids)); + len += scnprintf(buf + len, buf_len - len, "inactive_tqm_tids = %u\n", + le32_to_cpu(htt_stats_buf->tqm_inactive_tids)); + len += scnprintf(buf + len, buf_len - len, "tqm_active_msduq_flows = %u\n", + le32_to_cpu(htt_stats_buf->tqm_active_msduq_flows)); + len += scnprintf(buf + len, buf_len - len, "hi_prio_q_not_empty = %u\n\n", + le32_to_cpu(htt_stats_buf->high_prio_q_not_empty)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_tqm_error_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_tqm_error_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_ERROR_STATS_TLV:\n"); + len += scnprintf(buf + len, buf_len - len, "q_empty_failure = %u\n", + le32_to_cpu(htt_stats_buf->q_empty_failure)); + len += scnprintf(buf + len, buf_len - len, "q_not_empty_failure = %u\n", + le32_to_cpu(htt_stats_buf->q_not_empty_failure)); + len += scnprintf(buf + len, buf_len - len, "add_msdu_failure = %u\n\n", + le32_to_cpu(htt_stats_buf->add_msdu_failure)); + + len += scnprintf(buf + len, buf_len - len, "TQM_ERROR_RESET_STATS:\n"); + len += scnprintf(buf + len, buf_len - len, "tqm_cache_ctl_err = %u\n", + le32_to_cpu(htt_stats_buf->tqm_cache_ctl_err)); + len += scnprintf(buf + len, buf_len - len, "tqm_soft_reset = %u\n", + le32_to_cpu(htt_stats_buf->tqm_soft_reset)); + len += scnprintf(buf + len, buf_len - len, + "tqm_reset_total_num_in_use_link_descs = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_num_in_use_link_descs)); + len += scnprintf(buf + len, buf_len - len, + "tqm_reset_worst_case_num_lost_link_descs = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_num_lost_link_descs)); + len += scnprintf(buf + len, buf_len - len, + "tqm_reset_worst_case_num_lost_host_tx_bufs_count = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_num_lost_host_tx_buf_cnt)); + len += scnprintf(buf + len, buf_len - len, + "tqm_reset_num_in_use_link_descs_internal_tqm = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_num_in_use_internal_tqm)); + len += scnprintf(buf + len, buf_len - len, + "tqm_reset_num_in_use_link_descs_wbm_idle_link_ring = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_num_in_use_idle_link_rng)); + len += scnprintf(buf + len, buf_len - len, + "tqm_reset_time_to_tqm_hang_delta_ms = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_time_to_tqm_hang_delta_ms)); + len += scnprintf(buf + len, buf_len - len, "tqm_reset_recovery_time_ms = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_recovery_time_ms)); + len += scnprintf(buf + len, buf_len - len, "tqm_reset_num_peers_hdl = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_num_peers_hdl)); + len += scnprintf(buf + len, buf_len - len, + "tqm_reset_cumm_dirty_hw_mpduq_proc_cnt = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_cumm_dirty_hw_mpduq_cnt)); + len += scnprintf(buf + len, buf_len - len, + "tqm_reset_cumm_dirty_hw_msduq_proc = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_cumm_dirty_hw_msduq_proc)); + len += scnprintf(buf + len, buf_len - len, + "tqm_reset_flush_cache_cmd_su_cnt = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_flush_cache_cmd_su_cnt)); + len += scnprintf(buf + len, buf_len - len, + "tqm_reset_flush_cache_cmd_other_cnt = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_flush_cache_cmd_other_cnt)); + len += scnprintf(buf + len, buf_len - len, + "tqm_reset_flush_cache_cmd_trig_type = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_flush_cache_cmd_trig_type)); + len += scnprintf(buf + len, buf_len - len, + "tqm_reset_flush_cache_cmd_trig_cfg = %u\n", + le32_to_cpu(htt_stats_buf->tqm_reset_flush_cache_cmd_trig_cfg)); + len += scnprintf(buf + len, buf_len - len, + "tqm_reset_flush_cache_cmd_skip_cmd_status_null = %u\n\n", + le32_to_cpu(htt_stats_buf->tqm_reset_flush_cmd_skp_status_null)); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_tqm_gen_mpdu_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_tqm_gen_mpdu_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u16 num_elements = tag_len >> 2; + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_GEN_MPDU_STATS_TLV:\n"); + len += print_array_to_buf(buf, len, "gen_mpdu_end_reason", + htt_stats_buf->gen_mpdu_end_reason, num_elements, + "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_tqm_list_mpdu_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_tqm_list_mpdu_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u16 num_elems = min_t(u16, (tag_len >> 2), + ATH12K_HTT_TX_TQM_MAX_LIST_MPDU_END_REASON); + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_LIST_MPDU_STATS_TLV:\n"); + len += print_array_to_buf(buf, len, "list_mpdu_end_reason", + htt_stats_buf->list_mpdu_end_reason, num_elems, "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_tqm_list_mpdu_cnt_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_tqm_list_mpdu_cnt_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + u16 num_elems = min_t(u16, (tag_len >> 2), + ATH12K_HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS); + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_LIST_MPDU_CNT_TLV_V:\n"); + len += print_array_to_buf(buf, len, "list_mpdu_cnt_hist", + htt_stats_buf->list_mpdu_cnt_hist, num_elems, "\n\n"); + + stats_req->buf_len = len; +} + +static void +ath12k_htt_print_tx_tqm_pdev_stats_tlv(const void *tag_buf, u16 tag_len, + struct debug_htt_stats_req *stats_req) +{ + const struct ath12k_htt_tx_tqm_pdev_stats_tlv *htt_stats_buf = tag_buf; + u8 *buf = stats_req->buf; + u32 len = stats_req->buf_len; + u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE; + + if (tag_len < sizeof(*htt_stats_buf)) + return; + + len += scnprintf(buf + len, buf_len - len, "HTT_TX_TQM_PDEV_STATS_TLV_V:\n"); + len += scnprintf(buf + len, buf_len - len, "msdu_count = %u\n", + le32_to_cpu(htt_stats_buf->msdu_count)); + len += scnprintf(buf + len, buf_len - len, "mpdu_count = %u\n", + le32_to_cpu(htt_stats_buf->mpdu_count)); + len += scnprintf(buf + len, buf_len - len, "remove_msdu = %u\n", + le32_to_cpu(htt_stats_buf->remove_msdu)); + len += scnprintf(buf + len, buf_len - len, "remove_mpdu = %u\n", + le32_to_cpu(htt_stats_buf->remove_mpdu)); + len += scnprintf(buf + len, buf_len - len, "remove_msdu_ttl = %u\n", + le32_to_cpu(htt_stats_buf->remove_msdu_ttl)); + len += scnprintf(buf + len, buf_len - len, "send_bar = %u\n", + le32_to_cpu(htt_stats_buf->send_bar)); + len += scnprintf(buf + len, buf_len - len, "bar_sync = %u\n", + le32_to_cpu(htt_stats_buf->bar_sync)); + len += scnprintf(buf + len, buf_len - len, "notify_mpdu = %u\n", + le32_to_cpu(htt_stats_buf->notify_mpdu)); + len += scnprintf(buf + len, buf_len - len, "sync_cmd = %u\n", + le32_to_cpu(htt_stats_buf->sync_cmd)); + len += scnprintf(buf + len, buf_len - len, "write_cmd = %u\n", + le32_to_cpu(htt_stats_buf->write_cmd)); + len += scnprintf(buf + len, buf_len - len, "hwsch_trigger = %u\n", + le32_to_cpu(htt_stats_buf->hwsch_trigger)); + len += scnprintf(buf + len, buf_len - len, "ack_tlv_proc = %u\n", + le32_to_cpu(htt_stats_buf->ack_tlv_proc)); + len += scnprintf(buf + len, buf_len - len, "gen_mpdu_cmd = %u\n", + le32_to_cpu(htt_stats_buf->gen_mpdu_cmd)); + len += scnprintf(buf + len, buf_len - len, "gen_list_cmd = %u\n", + le32_to_cpu(htt_stats_buf->gen_list_cmd)); + len += scnprintf(buf + len, buf_len - len, "remove_mpdu_cmd = %u\n", + le32_to_cpu(htt_stats_buf->remove_mpdu_cmd)); + len += scnprintf(buf + len, buf_len - len, "remove_mpdu_tried_cmd = %u\n", + le32_to_cpu(htt_stats_buf->remove_mpdu_tried_cmd)); + len += scnprintf(buf + len, buf_len - len, "mpdu_queue_stats_cmd = %u\n", + le32_to_cpu(htt_stats_buf->mpdu_queue_stats_cmd)); + len += scnprintf(buf + len, buf_len - len, "mpdu_head_info_cmd = %u\n", + le32_to_cpu(htt_stats_buf->mpdu_head_info_cmd)); + len += scnprintf(buf + len, buf_len - len, "msdu_flow_stats_cmd = %u\n", + le32_to_cpu(htt_stats_buf->msdu_flow_stats_cmd)); + len += scnprintf(buf + len, buf_len - len, "remove_msdu_cmd = %u\n", + le32_to_cpu(htt_stats_buf->remove_msdu_cmd)); + len += scnprintf(buf + len, buf_len - len, "remove_msdu_ttl_cmd = %u\n", + le32_to_cpu(htt_stats_buf->remove_msdu_ttl_cmd)); + len += scnprintf(buf + len, buf_len - len, "flush_cache_cmd = %u\n", + le32_to_cpu(htt_stats_buf->flush_cache_cmd)); + len += scnprintf(buf + len, buf_len - len, "update_mpduq_cmd = %u\n", + le32_to_cpu(htt_stats_buf->update_mpduq_cmd)); + len += scnprintf(buf + len, buf_len - len, "enqueue = %u\n", + le32_to_cpu(htt_stats_buf->enqueue)); + len += scnprintf(buf + len, buf_len - len, "enqueue_notify = %u\n", + le32_to_cpu(htt_stats_buf->enqueue_notify)); + len += scnprintf(buf + len, buf_len - len, "notify_mpdu_at_head = %u\n", + le32_to_cpu(htt_stats_buf->notify_mpdu_at_head)); + len += scnprintf(buf + len, buf_len - len, "notify_mpdu_state_valid = %u\n", + le32_to_cpu(htt_stats_buf->notify_mpdu_state_valid)); + len += scnprintf(buf + len, buf_len - len, "sched_udp_notify1 = %u\n", + le32_to_cpu(htt_stats_buf->sched_udp_notify1)); + len += scnprintf(buf + len, buf_len - len, "sched_udp_notify2 = %u\n", + le32_to_cpu(htt_stats_buf->sched_udp_notify2)); + len += scnprintf(buf + len, buf_len - len, "sched_nonudp_notify1 = %u\n", + le32_to_cpu(htt_stats_buf->sched_nonudp_notify1)); + len += scnprintf(buf + len, buf_len - len, "sched_nonudp_notify2 = %u\n\n", + le32_to_cpu(htt_stats_buf->sched_nonudp_notify2)); + + stats_req->buf_len = len; +} + +static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab, + u16 tag, u16 len, const void *tag_buf, + void *user_data) +{ + struct debug_htt_stats_req *stats_req = user_data; + + switch (tag) { + case HTT_STATS_TX_PDEV_CMN_TAG: + htt_print_tx_pdev_stats_cmn_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_PDEV_UNDERRUN_TAG: + htt_print_tx_pdev_stats_urrn_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_PDEV_SIFS_TAG: + htt_print_tx_pdev_stats_sifs_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_PDEV_FLUSH_TAG: + htt_print_tx_pdev_stats_flush_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_PDEV_SIFS_HIST_TAG: + htt_print_tx_pdev_stats_sifs_hist_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_PDEV_CTRL_PATH_TX_STATS_TAG: + htt_print_pdev_ctrl_path_tx_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_MU_PPDU_DIST_TAG: + htt_print_tx_pdev_mu_ppdu_dist_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_SCHED_CMN_TAG: + ath12k_htt_print_stats_tx_sched_cmn_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG: + ath12k_htt_print_tx_pdev_stats_sched_per_txq_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG: + ath12k_htt_print_sched_txq_cmd_posted_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_SCHED_TXQ_CMD_REAPED_TAG: + ath12k_htt_print_sched_txq_cmd_reaped_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG: + ath12k_htt_print_sched_txq_sched_order_su_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_SCHED_TXQ_SCHED_INELIGIBILITY_TAG: + ath12k_htt_print_sched_txq_sched_ineligibility_tlv(tag_buf, len, + stats_req); + break; + case HTT_STATS_SCHED_TXQ_SUPERCYCLE_TRIGGER_TAG: + ath12k_htt_print_sched_txq_supercycle_trigger_tlv(tag_buf, len, + stats_req); + break; + case HTT_STATS_HW_PDEV_ERRS_TAG: + ath12k_htt_print_hw_stats_pdev_errs_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_HW_INTR_MISC_TAG: + ath12k_htt_print_hw_stats_intr_misc_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_WHAL_TX_TAG: + ath12k_htt_print_hw_stats_whal_tx_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_HW_WAR_TAG: + ath12k_htt_print_hw_war_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_TQM_CMN_TAG: + ath12k_htt_print_tx_tqm_cmn_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_TQM_ERROR_STATS_TAG: + ath12k_htt_print_tx_tqm_error_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_TQM_GEN_MPDU_TAG: + ath12k_htt_print_tx_tqm_gen_mpdu_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_TQM_LIST_MPDU_TAG: + ath12k_htt_print_tx_tqm_list_mpdu_stats_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_TQM_LIST_MPDU_CNT_TAG: + ath12k_htt_print_tx_tqm_list_mpdu_cnt_tlv(tag_buf, len, stats_req); + break; + case HTT_STATS_TX_TQM_PDEV_TAG: + ath12k_htt_print_tx_tqm_pdev_stats_tlv(tag_buf, len, stats_req); + break; + default: + break; + } + + return 0; +} + +void ath12k_debugfs_htt_ext_stats_handler(struct ath12k_base *ab, + struct sk_buff *skb) +{ + struct ath12k_htt_extd_stats_msg *msg; + struct debug_htt_stats_req *stats_req; + struct ath12k *ar; + u32 len, pdev_id, stats_info; + u64 cookie; + int ret; + bool send_completion = false; + + msg = (struct ath12k_htt_extd_stats_msg *)skb->data; + cookie = le64_to_cpu(msg->cookie); + + if (u64_get_bits(cookie, ATH12K_HTT_STATS_COOKIE_MSB) != + ATH12K_HTT_STATS_MAGIC_VALUE) { + ath12k_warn(ab, "received invalid htt ext stats event\n"); + return; + } + + pdev_id = u64_get_bits(cookie, ATH12K_HTT_STATS_COOKIE_LSB); + rcu_read_lock(); + ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id); + if (!ar) { + ath12k_warn(ab, "failed to get ar for pdev_id %d\n", pdev_id); + goto exit; + } + + stats_req = ar->debug.htt_stats.stats_req; + if (!stats_req) + goto exit; + + spin_lock_bh(&ar->data_lock); + + stats_info = le32_to_cpu(msg->info1); + stats_req->done = u32_get_bits(stats_info, ATH12K_HTT_T2H_EXT_STATS_INFO1_DONE); + if (stats_req->done) + send_completion = true; + + spin_unlock_bh(&ar->data_lock); + + len = u32_get_bits(stats_info, ATH12K_HTT_T2H_EXT_STATS_INFO1_LENGTH); + if (len > skb->len) { + ath12k_warn(ab, "invalid length %d for HTT stats", len); + goto exit; + } + + ret = ath12k_dp_htt_tlv_iter(ab, msg->data, len, + ath12k_dbg_htt_ext_stats_parse, + stats_req); + if (ret) + ath12k_warn(ab, "Failed to parse tlv %d\n", ret); + + if (send_completion) + complete(&stats_req->htt_stats_rcvd); +exit: + rcu_read_unlock(); +} + +static ssize_t ath12k_read_htt_stats_type(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath12k *ar = file->private_data; + enum ath12k_dbg_htt_ext_stats_type type; + char buf[32]; + size_t len; + + mutex_lock(&ar->conf_mutex); + type = ar->debug.htt_stats.type; + mutex_unlock(&ar->conf_mutex); + + len = scnprintf(buf, sizeof(buf), "%u\n", type); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + +static ssize_t ath12k_write_htt_stats_type(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath12k *ar = file->private_data; + enum ath12k_dbg_htt_ext_stats_type type; + unsigned int cfg_param[4] = {0}; + const int size = 32; + int num_args; + + char *buf __free(kfree) = kzalloc(size, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + if (copy_from_user(buf, user_buf, count)) + return -EFAULT; + + num_args = sscanf(buf, "%u %u %u %u %u\n", &type, &cfg_param[0], + &cfg_param[1], &cfg_param[2], &cfg_param[3]); + if (!num_args || num_args > 5) + return -EINVAL; + + if (type == ATH12K_DBG_HTT_EXT_STATS_RESET || + type >= ATH12K_DBG_HTT_NUM_EXT_STATS) + return -EINVAL; + + mutex_lock(&ar->conf_mutex); + + ar->debug.htt_stats.type = type; + ar->debug.htt_stats.cfg_param[0] = cfg_param[0]; + ar->debug.htt_stats.cfg_param[1] = cfg_param[1]; + ar->debug.htt_stats.cfg_param[2] = cfg_param[2]; + ar->debug.htt_stats.cfg_param[3] = cfg_param[3]; + + mutex_unlock(&ar->conf_mutex); + + return count; +} + +static const struct file_operations fops_htt_stats_type = { + .read = ath12k_read_htt_stats_type, + .write = ath12k_write_htt_stats_type, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static int ath12k_debugfs_htt_stats_req(struct ath12k *ar) +{ + struct debug_htt_stats_req *stats_req = ar->debug.htt_stats.stats_req; + enum ath12k_dbg_htt_ext_stats_type type = stats_req->type; + u64 cookie; + int ret, pdev_id; + struct htt_ext_stats_cfg_params cfg_params = { 0 }; + + lockdep_assert_held(&ar->conf_mutex); + + init_completion(&stats_req->htt_stats_rcvd); + + pdev_id = ath12k_mac_get_target_pdev_id(ar); + stats_req->done = false; + stats_req->pdev_id = pdev_id; + + cookie = u64_encode_bits(ATH12K_HTT_STATS_MAGIC_VALUE, + ATH12K_HTT_STATS_COOKIE_MSB); + cookie |= u64_encode_bits(pdev_id, ATH12K_HTT_STATS_COOKIE_LSB); + + if (stats_req->override_cfg_param) { + cfg_params.cfg0 = stats_req->cfg_param[0]; + cfg_params.cfg1 = stats_req->cfg_param[1]; + cfg_params.cfg2 = stats_req->cfg_param[2]; + cfg_params.cfg3 = stats_req->cfg_param[3]; + } + + ret = ath12k_dp_tx_htt_h2t_ext_stats_req(ar, type, &cfg_params, cookie); + if (ret) { + ath12k_warn(ar->ab, "failed to send htt stats request: %d\n", ret); + return ret; + } + if (!wait_for_completion_timeout(&stats_req->htt_stats_rcvd, 3 * HZ)) { + spin_lock_bh(&ar->data_lock); + if (!stats_req->done) { + stats_req->done = true; + spin_unlock_bh(&ar->data_lock); + ath12k_warn(ar->ab, "stats request timed out\n"); + return -ETIMEDOUT; + } + spin_unlock_bh(&ar->data_lock); + } + + return 0; +} + +static int ath12k_open_htt_stats(struct inode *inode, + struct file *file) +{ + struct ath12k *ar = inode->i_private; + struct debug_htt_stats_req *stats_req; + enum ath12k_dbg_htt_ext_stats_type type = ar->debug.htt_stats.type; + struct ath12k_hw *ah = ath12k_ar_to_ah(ar); + int ret; + + if (type == ATH12K_DBG_HTT_EXT_STATS_RESET) + return -EPERM; + + mutex_lock(&ar->conf_mutex); + + if (ah->state != ATH12K_HW_STATE_ON) { + ret = -ENETDOWN; + goto err_unlock; + } + + if (ar->debug.htt_stats.stats_req) { + ret = -EAGAIN; + goto err_unlock; + } + + stats_req = kzalloc(sizeof(*stats_req) + ATH12K_HTT_STATS_BUF_SIZE, GFP_KERNEL); + if (!stats_req) { + ret = -ENOMEM; + goto err_unlock; + } + + ar->debug.htt_stats.stats_req = stats_req; + stats_req->type = type; + stats_req->cfg_param[0] = ar->debug.htt_stats.cfg_param[0]; + stats_req->cfg_param[1] = ar->debug.htt_stats.cfg_param[1]; + stats_req->cfg_param[2] = ar->debug.htt_stats.cfg_param[2]; + stats_req->cfg_param[3] = ar->debug.htt_stats.cfg_param[3]; + stats_req->override_cfg_param = !!stats_req->cfg_param[0] || + !!stats_req->cfg_param[1] || + !!stats_req->cfg_param[2] || + !!stats_req->cfg_param[3]; + + ret = ath12k_debugfs_htt_stats_req(ar); + if (ret < 0) + goto out; + + file->private_data = stats_req; + + mutex_unlock(&ar->conf_mutex); + + return 0; +out: + kfree(stats_req); + ar->debug.htt_stats.stats_req = NULL; +err_unlock: + mutex_unlock(&ar->conf_mutex); + + return ret; +} + +static int ath12k_release_htt_stats(struct inode *inode, + struct file *file) +{ + struct ath12k *ar = inode->i_private; + + mutex_lock(&ar->conf_mutex); + kfree(file->private_data); + ar->debug.htt_stats.stats_req = NULL; + mutex_unlock(&ar->conf_mutex); + + return 0; +} + +static ssize_t ath12k_read_htt_stats(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct debug_htt_stats_req *stats_req = file->private_data; + char *buf; + u32 length; + + buf = stats_req->buf; + length = min_t(u32, stats_req->buf_len, ATH12K_HTT_STATS_BUF_SIZE); + return simple_read_from_buffer(user_buf, count, ppos, buf, length); +} + +static const struct file_operations fops_dump_htt_stats = { + .open = ath12k_open_htt_stats, + .release = ath12k_release_htt_stats, + .read = ath12k_read_htt_stats, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +static ssize_t ath12k_write_htt_stats_reset(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ath12k *ar = file->private_data; + enum ath12k_dbg_htt_ext_stats_type type; + struct htt_ext_stats_cfg_params cfg_params = { 0 }; + u8 param_pos; + int ret; + + ret = kstrtou32_from_user(user_buf, count, 0, &type); + if (ret) + return ret; + + if (type >= ATH12K_DBG_HTT_NUM_EXT_STATS || + type == ATH12K_DBG_HTT_EXT_STATS_RESET) + return -E2BIG; + + mutex_lock(&ar->conf_mutex); + cfg_params.cfg0 = HTT_STAT_DEFAULT_RESET_START_OFFSET; + param_pos = (type >> 5) + 1; + + switch (param_pos) { + case ATH12K_HTT_STATS_RESET_PARAM_CFG_32_BYTES: + cfg_params.cfg1 = 1 << (cfg_params.cfg0 + type); + break; + case ATH12K_HTT_STATS_RESET_PARAM_CFG_64_BYTES: + cfg_params.cfg2 = ATH12K_HTT_STATS_RESET_BITMAP32_BIT(cfg_params.cfg0 + + type); + break; + case ATH12K_HTT_STATS_RESET_PARAM_CFG_128_BYTES: + cfg_params.cfg3 = ATH12K_HTT_STATS_RESET_BITMAP64_BIT(cfg_params.cfg0 + + type); + break; + default: + break; + } + + ret = ath12k_dp_tx_htt_h2t_ext_stats_req(ar, + ATH12K_DBG_HTT_EXT_STATS_RESET, + &cfg_params, + 0ULL); + if (ret) { + ath12k_warn(ar->ab, "failed to send htt stats request: %d\n", ret); + mutex_unlock(&ar->conf_mutex); + return ret; + } + + ar->debug.htt_stats.reset = type; + mutex_unlock(&ar->conf_mutex); + + return count; +} + +static const struct file_operations fops_htt_stats_reset = { + .write = ath12k_write_htt_stats_reset, + .open = simple_open, + .owner = THIS_MODULE, + .llseek = default_llseek, +}; + +void ath12k_debugfs_htt_stats_register(struct ath12k *ar) +{ + debugfs_create_file("htt_stats_type", 0600, ar->debug.debugfs_pdev, + ar, &fops_htt_stats_type); + debugfs_create_file("htt_stats", 0400, ar->debug.debugfs_pdev, + ar, &fops_dump_htt_stats); + debugfs_create_file("htt_stats_reset", 0200, ar->debug.debugfs_pdev, + ar, &fops_htt_stats_reset); +} diff --git a/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h new file mode 100644 index 000000000000..6294a082cf8a --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/debugfs_htt_stats.h @@ -0,0 +1,567 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. + * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef DEBUG_HTT_STATS_H +#define DEBUG_HTT_STATS_H + +#define ATH12K_HTT_STATS_BUF_SIZE (1024 * 512) +#define ATH12K_HTT_STATS_COOKIE_LSB GENMASK_ULL(31, 0) +#define ATH12K_HTT_STATS_COOKIE_MSB GENMASK_ULL(63, 32) +#define ATH12K_HTT_STATS_MAGIC_VALUE 0xF0F0F0F0 +#define ATH12K_HTT_STATS_SUBTYPE_MAX 16 +#define ATH12K_HTT_MAX_STRING_LEN 256 + +#define ATH12K_HTT_STATS_RESET_BITMAP32_OFFSET(_idx) ((_idx) & 0x1f) +#define ATH12K_HTT_STATS_RESET_BITMAP64_OFFSET(_idx) ((_idx) & 0x3f) +#define ATH12K_HTT_STATS_RESET_BITMAP32_BIT(_idx) (1 << \ + ATH12K_HTT_STATS_RESET_BITMAP32_OFFSET(_idx)) +#define ATH12K_HTT_STATS_RESET_BITMAP64_BIT(_idx) (1 << \ + ATH12K_HTT_STATS_RESET_BITMAP64_OFFSET(_idx)) + +void ath12k_debugfs_htt_stats_register(struct ath12k *ar); + +#ifdef CONFIG_ATH12K_DEBUGFS +void ath12k_debugfs_htt_ext_stats_handler(struct ath12k_base *ab, + struct sk_buff *skb); +#else /* CONFIG_ATH12K_DEBUGFS */ +static inline void ath12k_debugfs_htt_ext_stats_handler(struct ath12k_base *ab, + struct sk_buff *skb) +{ +} +#endif + +/** + * DOC: target -> host extended statistics upload + * + * The following field definitions describe the format of the HTT + * target to host stats upload confirmation message. + * The message contains a cookie echoed from the HTT host->target stats + * upload request, which identifies which request the confirmation is + * for, and a single stats can span over multiple HTT stats indication + * due to the HTT message size limitation so every HTT ext stats + * indication will have tag-length-value stats information elements. + * The tag-length header for each HTT stats IND message also includes a + * status field, to indicate whether the request for the stat type in + * question was fully met, partially met, unable to be met, or invalid + * (if the stat type in question is disabled in the target). + * A Done bit 1's indicate the end of the of stats info elements. + * + * + * |31 16|15 12|11|10 8|7 5|4 0| + * |--------------------------------------------------------------| + * | reserved | msg type | + * |--------------------------------------------------------------| + * | cookie LSBs | + * |--------------------------------------------------------------| + * | cookie MSBs | + * |--------------------------------------------------------------| + * | stats entry length | rsvd | D| S | stat type | + * |--------------------------------------------------------------| + * | type-specific stats info | + * | (see debugfs_htt_stats.h) | + * |--------------------------------------------------------------| + * Header fields: + * - MSG_TYPE + * Bits 7:0 + * Purpose: Identifies this is a extended statistics upload confirmation + * message. + * Value: 0x1c + * - COOKIE_LSBS + * Bits 31:0 + * Purpose: Provide a mechanism to match a target->host stats confirmation + * message with its preceding host->target stats request message. + * Value: MSBs of the opaque cookie specified by the host-side requestor + * - COOKIE_MSBS + * Bits 31:0 + * Purpose: Provide a mechanism to match a target->host stats confirmation + * message with its preceding host->target stats request message. + * Value: MSBs of the opaque cookie specified by the host-side requestor + * + * Stats Information Element tag-length header fields: + * - STAT_TYPE + * Bits 7:0 + * Purpose: identifies the type of statistics info held in the + * following information element + * Value: ath12k_dbg_htt_ext_stats_type + * - STATUS + * Bits 10:8 + * Purpose: indicate whether the requested stats are present + * Value: + * 0 -> The requested stats have been delivered in full + * 1 -> The requested stats have been delivered in part + * 2 -> The requested stats could not be delivered (error case) + * 3 -> The requested stat type is either not recognized (invalid) + * - DONE + * Bits 11 + * Purpose: + * Indicates the completion of the stats entry, this will be the last + * stats conf HTT segment for the requested stats type. + * Value: + * 0 -> the stats retrieval is ongoing + * 1 -> the stats retrieval is complete + * - LENGTH + * Bits 31:16 + * Purpose: indicate the stats information size + * Value: This field specifies the number of bytes of stats information + * that follows the element tag-length header. + * It is expected but not required that this length is a multiple of + * 4 bytes. + */ + +#define ATH12K_HTT_T2H_EXT_STATS_INFO1_DONE BIT(11) +#define ATH12K_HTT_T2H_EXT_STATS_INFO1_LENGTH GENMASK(31, 16) + +struct ath12k_htt_extd_stats_msg { + __le32 info0; + __le64 cookie; + __le32 info1; + u8 data[]; +} __packed; + +/* htt_dbg_ext_stats_type */ +enum ath12k_dbg_htt_ext_stats_type { + ATH12K_DBG_HTT_EXT_STATS_RESET = 0, + ATH12K_DBG_HTT_EXT_STATS_PDEV_TX = 1, + ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED = 4, + ATH12K_DBG_HTT_EXT_STATS_PDEV_ERROR = 5, + ATH12K_DBG_HTT_EXT_STATS_PDEV_TQM = 6, + + /* keep this last */ + ATH12K_DBG_HTT_NUM_EXT_STATS, +}; + +enum ath12k_dbg_htt_tlv_tag { + HTT_STATS_TX_PDEV_CMN_TAG = 0, + HTT_STATS_TX_PDEV_UNDERRUN_TAG = 1, + HTT_STATS_TX_PDEV_SIFS_TAG = 2, + HTT_STATS_TX_PDEV_FLUSH_TAG = 3, + HTT_STATS_TX_TQM_GEN_MPDU_TAG = 11, + HTT_STATS_TX_TQM_LIST_MPDU_TAG = 12, + HTT_STATS_TX_TQM_LIST_MPDU_CNT_TAG = 13, + HTT_STATS_TX_TQM_CMN_TAG = 14, + HTT_STATS_TX_TQM_PDEV_TAG = 15, + HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG = 36, + HTT_STATS_TX_SCHED_CMN_TAG = 37, + HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG = 39, + HTT_STATS_TX_TQM_ERROR_STATS_TAG = 43, + HTT_STATS_SCHED_TXQ_CMD_REAPED_TAG = 44, + HTT_STATS_HW_INTR_MISC_TAG = 54, + HTT_STATS_HW_PDEV_ERRS_TAG = 56, + HTT_STATS_WHAL_TX_TAG = 66, + HTT_STATS_TX_PDEV_SIFS_HIST_TAG = 67, + HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG = 86, + HTT_STATS_SCHED_TXQ_SCHED_INELIGIBILITY_TAG = 87, + HTT_STATS_HW_WAR_TAG = 89, + HTT_STATS_SCHED_TXQ_SUPERCYCLE_TRIGGER_TAG = 100, + HTT_STATS_PDEV_CTRL_PATH_TX_STATS_TAG = 102, + HTT_STATS_MU_PPDU_DIST_TAG = 129, + + HTT_STATS_MAX_TAG, +}; + +#define ATH12K_HTT_STATS_MAC_ID GENMASK(7, 0) + +#define ATH12K_HTT_TX_PDEV_MAX_SIFS_BURST_STATS 9 +#define ATH12K_HTT_TX_PDEV_MAX_FLUSH_REASON_STATS 150 + +/* MU MIMO distribution stats is a 2-dimensional array + * with dimension one denoting stats for nr4[0] or nr8[1] + */ +#define ATH12K_HTT_STATS_NUM_NR_BINS 2 +#define ATH12K_HTT_STATS_MAX_NUM_MU_PPDU_PER_BURST 10 +#define ATH12K_HTT_TX_PDEV_MAX_SIFS_BURST_HIST_STATS 10 +#define ATH12K_HTT_STATS_MAX_NUM_SCHED_STATUS 9 +#define ATH12K_HTT_STATS_NUM_SCHED_STATUS_WORDS \ + (ATH12K_HTT_STATS_NUM_NR_BINS * ATH12K_HTT_STATS_MAX_NUM_SCHED_STATUS) +#define ATH12K_HTT_STATS_MU_PPDU_PER_BURST_WORDS \ + (ATH12K_HTT_STATS_NUM_NR_BINS * ATH12K_HTT_STATS_MAX_NUM_MU_PPDU_PER_BURST) + +enum ath12k_htt_tx_pdev_underrun_enum { + HTT_STATS_TX_PDEV_NO_DATA_UNDERRUN = 0, + HTT_STATS_TX_PDEV_DATA_UNDERRUN_BETWEEN_MPDU = 1, + HTT_STATS_TX_PDEV_DATA_UNDERRUN_WITHIN_MPDU = 2, + HTT_TX_PDEV_MAX_URRN_STATS = 3, +}; + +enum ath12k_htt_stats_reset_cfg_param_alloc_pos { + ATH12K_HTT_STATS_RESET_PARAM_CFG_32_BYTES = 1, + ATH12K_HTT_STATS_RESET_PARAM_CFG_64_BYTES, + ATH12K_HTT_STATS_RESET_PARAM_CFG_128_BYTES, +}; + +struct debug_htt_stats_req { + bool done; + bool override_cfg_param; + u8 pdev_id; + enum ath12k_dbg_htt_ext_stats_type type; + u32 cfg_param[4]; + u8 peer_addr[ETH_ALEN]; + struct completion htt_stats_rcvd; + u32 buf_len; + u8 buf[]; +}; + +struct ath12k_htt_tx_pdev_stats_cmn_tlv { + __le32 mac_id__word; + __le32 hw_queued; + __le32 hw_reaped; + __le32 underrun; + __le32 hw_paused; + __le32 hw_flush; + __le32 hw_filt; + __le32 tx_abort; + __le32 mpdu_requed; + __le32 tx_xretry; + __le32 data_rc; + __le32 mpdu_dropped_xretry; + __le32 illgl_rate_phy_err; + __le32 cont_xretry; + __le32 tx_timeout; + __le32 pdev_resets; + __le32 phy_underrun; + __le32 txop_ovf; + __le32 seq_posted; + __le32 seq_failed_queueing; + __le32 seq_completed; + __le32 seq_restarted; + __le32 mu_seq_posted; + __le32 seq_switch_hw_paused; + __le32 next_seq_posted_dsr; + __le32 seq_posted_isr; + __le32 seq_ctrl_cached; + __le32 mpdu_count_tqm; + __le32 msdu_count_tqm; + __le32 mpdu_removed_tqm; + __le32 msdu_removed_tqm; + __le32 mpdus_sw_flush; + __le32 mpdus_hw_filter; + __le32 mpdus_truncated; + __le32 mpdus_ack_failed; + __le32 mpdus_expired; + __le32 mpdus_seq_hw_retry; + __le32 ack_tlv_proc; + __le32 coex_abort_mpdu_cnt_valid; + __le32 coex_abort_mpdu_cnt; + __le32 num_total_ppdus_tried_ota; + __le32 num_data_ppdus_tried_ota; + __le32 local_ctrl_mgmt_enqued; + __le32 local_ctrl_mgmt_freed; + __le32 local_data_enqued; + __le32 local_data_freed; + __le32 mpdu_tried; + __le32 isr_wait_seq_posted; + + __le32 tx_active_dur_us_low; + __le32 tx_active_dur_us_high; + __le32 remove_mpdus_max_retries; + __le32 comp_delivered; + __le32 ppdu_ok; + __le32 self_triggers; + __le32 tx_time_dur_data; + __le32 seq_qdepth_repost_stop; + __le32 mu_seq_min_msdu_repost_stop; + __le32 seq_min_msdu_repost_stop; + __le32 seq_txop_repost_stop; + __le32 next_seq_cancel; + __le32 fes_offsets_err_cnt; + __le32 num_mu_peer_blacklisted; + __le32 mu_ofdma_seq_posted; + __le32 ul_mumimo_seq_posted; + __le32 ul_ofdma_seq_posted; + + __le32 thermal_suspend_cnt; + __le32 dfs_suspend_cnt; + __le32 tx_abort_suspend_cnt; + __le32 tgt_specific_opaque_txq_suspend_info; + __le32 last_suspend_reason; +} __packed; + +struct ath12k_htt_tx_pdev_stats_urrn_tlv { + DECLARE_FLEX_ARRAY(__le32, urrn_stats); +} __packed; + +struct ath12k_htt_tx_pdev_stats_flush_tlv { + DECLARE_FLEX_ARRAY(__le32, flush_errs); +} __packed; + +struct ath12k_htt_tx_pdev_stats_phy_err_tlv { + DECLARE_FLEX_ARRAY(__le32, phy_errs); +} __packed; + +struct ath12k_htt_tx_pdev_stats_sifs_tlv { + DECLARE_FLEX_ARRAY(__le32, sifs_status); +} __packed; + +struct ath12k_htt_pdev_ctrl_path_tx_stats_tlv { + __le32 fw_tx_mgmt_subtype[ATH12K_HTT_STATS_SUBTYPE_MAX]; +} __packed; + +struct ath12k_htt_tx_pdev_stats_sifs_hist_tlv { + DECLARE_FLEX_ARRAY(__le32, sifs_hist_status); +} __packed; + +enum ath12k_htt_stats_hw_mode { + ATH12K_HTT_STATS_HWMODE_AC = 0, + ATH12K_HTT_STATS_HWMODE_AX = 1, + ATH12K_HTT_STATS_HWMODE_BE = 2, +}; + +struct ath12k_htt_tx_pdev_mu_ppdu_dist_stats_tlv { + __le32 hw_mode; + __le32 num_seq_term_status[ATH12K_HTT_STATS_NUM_SCHED_STATUS_WORDS]; + __le32 num_ppdu_cmpl_per_burst[ATH12K_HTT_STATS_MU_PPDU_PER_BURST_WORDS]; + __le32 num_seq_posted[ATH12K_HTT_STATS_NUM_NR_BINS]; + __le32 num_ppdu_posted_per_burst[ATH12K_HTT_STATS_MU_PPDU_PER_BURST_WORDS]; +} __packed; + +#define ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID GENMASK(7, 0) +#define ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_ID GENMASK(15, 8) + +#define ATH12K_HTT_TX_PDEV_NUM_SCHED_ORDER_LOG 20 + +struct ath12k_htt_stats_tx_sched_cmn_tlv { + __le32 mac_id__word; + __le32 current_timestamp; +} __packed; + +struct ath12k_htt_tx_pdev_stats_sched_per_txq_tlv { + __le32 mac_id__word; + __le32 sched_policy; + __le32 last_sched_cmd_posted_timestamp; + __le32 last_sched_cmd_compl_timestamp; + __le32 sched_2_tac_lwm_count; + __le32 sched_2_tac_ring_full; + __le32 sched_cmd_post_failure; + __le32 num_active_tids; + __le32 num_ps_schedules; + __le32 sched_cmds_pending; + __le32 num_tid_register; + __le32 num_tid_unregister; + __le32 num_qstats_queried; + __le32 qstats_update_pending; + __le32 last_qstats_query_timestamp; + __le32 num_tqm_cmdq_full; + __le32 num_de_sched_algo_trigger; + __le32 num_rt_sched_algo_trigger; + __le32 num_tqm_sched_algo_trigger; + __le32 notify_sched; + __le32 dur_based_sendn_term; + __le32 su_notify2_sched; + __le32 su_optimal_queued_msdus_sched; + __le32 su_delay_timeout_sched; + __le32 su_min_txtime_sched_delay; + __le32 su_no_delay; + __le32 num_supercycles; + __le32 num_subcycles_with_sort; + __le32 num_subcycles_no_sort; +} __packed; + +struct ath12k_htt_sched_txq_cmd_posted_tlv { + DECLARE_FLEX_ARRAY(__le32, sched_cmd_posted); +} __packed; + +struct ath12k_htt_sched_txq_cmd_reaped_tlv { + DECLARE_FLEX_ARRAY(__le32, sched_cmd_reaped); +} __packed; + +struct ath12k_htt_sched_txq_sched_order_su_tlv { + DECLARE_FLEX_ARRAY(__le32, sched_order_su); +} __packed; + +struct ath12k_htt_sched_txq_sched_ineligibility_tlv { + DECLARE_FLEX_ARRAY(__le32, sched_ineligibility); +} __packed; + +enum ath12k_htt_sched_txq_supercycle_triggers_tlv_enum { + ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_NONE = 0, + ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_FORCED, + ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_LESS_NUM_TIDQ_ENTRIES, + ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_LESS_NUM_ACTIVE_TIDS, + ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_MAX_ITR_REACHED, + ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_DUR_THRESHOLD_REACHED, + ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_TWT_TRIGGER, + ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_MAX, +}; + +struct ath12k_htt_sched_txq_supercycle_triggers_tlv { + DECLARE_FLEX_ARRAY(__le32, supercycle_triggers); +} __packed; + +struct ath12k_htt_hw_stats_pdev_errs_tlv { + __le32 mac_id__word; + __le32 tx_abort; + __le32 tx_abort_fail_count; + __le32 rx_abort; + __le32 rx_abort_fail_count; + __le32 warm_reset; + __le32 cold_reset; + __le32 tx_flush; + __le32 tx_glb_reset; + __le32 tx_txq_reset; + __le32 rx_timeout_reset; + __le32 mac_cold_reset_restore_cal; + __le32 mac_cold_reset; + __le32 mac_warm_reset; + __le32 mac_only_reset; + __le32 phy_warm_reset; + __le32 phy_warm_reset_ucode_trig; + __le32 mac_warm_reset_restore_cal; + __le32 mac_sfm_reset; + __le32 phy_warm_reset_m3_ssr; + __le32 phy_warm_reset_reason_phy_m3; + __le32 phy_warm_reset_reason_tx_hw_stuck; + __le32 phy_warm_reset_reason_num_rx_frame_stuck; + __le32 phy_warm_reset_reason_wal_rx_rec_rx_busy; + __le32 phy_warm_reset_reason_wal_rx_rec_mac_hng; + __le32 phy_warm_reset_reason_mac_conv_phy_reset; + __le32 wal_rx_recovery_rst_mac_hang_cnt; + __le32 wal_rx_recovery_rst_known_sig_cnt; + __le32 wal_rx_recovery_rst_no_rx_cnt; + __le32 wal_rx_recovery_rst_no_rx_consec_cnt; + __le32 wal_rx_recovery_rst_rx_busy_cnt; + __le32 wal_rx_recovery_rst_phy_mac_hang_cnt; + __le32 rx_flush_cnt; + __le32 phy_warm_reset_reason_tx_exp_cca_stuck; + __le32 phy_warm_reset_reason_tx_consec_flsh_war; + __le32 phy_warm_reset_reason_tx_hwsch_reset_war; + __le32 phy_warm_reset_reason_hwsch_cca_wdog_war; + __le32 fw_rx_rings_reset; + __le32 rx_dest_drain_rx_descs_leak_prevented; + __le32 rx_dest_drain_rx_descs_saved_cnt; + __le32 rx_dest_drain_rxdma2reo_leak_detected; + __le32 rx_dest_drain_rxdma2fw_leak_detected; + __le32 rx_dest_drain_rxdma2wbm_leak_detected; + __le32 rx_dest_drain_rxdma1_2sw_leak_detected; + __le32 rx_dest_drain_rx_drain_ok_mac_idle; + __le32 rx_dest_drain_ok_mac_not_idle; + __le32 rx_dest_drain_prerequisite_invld; + __le32 rx_dest_drain_skip_non_lmac_reset; + __le32 rx_dest_drain_hw_fifo_notempty_post_wait; +} __packed; + +#define ATH12K_HTT_STATS_MAX_HW_INTR_NAME_LEN 8 +struct ath12k_htt_hw_stats_intr_misc_tlv { + u8 hw_intr_name[ATH12K_HTT_STATS_MAX_HW_INTR_NAME_LEN]; + __le32 mask; + __le32 count; +} __packed; + +struct ath12k_htt_hw_stats_whal_tx_tlv { + __le32 mac_id__word; + __le32 last_unpause_ppdu_id; + __le32 hwsch_unpause_wait_tqm_write; + __le32 hwsch_dummy_tlv_skipped; + __le32 hwsch_misaligned_offset_received; + __le32 hwsch_reset_count; + __le32 hwsch_dev_reset_war; + __le32 hwsch_delayed_pause; + __le32 hwsch_long_delayed_pause; + __le32 sch_rx_ppdu_no_response; + __le32 sch_selfgen_response; + __le32 sch_rx_sifs_resp_trigger; +} __packed; + +struct ath12k_htt_hw_war_stats_tlv { + __le32 mac_id__word; + DECLARE_FLEX_ARRAY(__le32, hw_wars); +} __packed; + +struct ath12k_htt_tx_tqm_cmn_stats_tlv { + __le32 mac_id__word; + __le32 max_cmdq_id; + __le32 list_mpdu_cnt_hist_intvl; + __le32 add_msdu; + __le32 q_empty; + __le32 q_not_empty; + __le32 drop_notification; + __le32 desc_threshold; + __le32 hwsch_tqm_invalid_status; + __le32 missed_tqm_gen_mpdus; + __le32 tqm_active_tids; + __le32 tqm_inactive_tids; + __le32 tqm_active_msduq_flows; + __le32 msduq_timestamp_updates; + __le32 msduq_updates_mpdu_head_info_cmd; + __le32 msduq_updates_emp_to_nonemp_status; + __le32 get_mpdu_head_info_cmds_by_query; + __le32 get_mpdu_head_info_cmds_by_tac; + __le32 gen_mpdu_cmds_by_query; + __le32 high_prio_q_not_empty; +} __packed; + +struct ath12k_htt_tx_tqm_error_stats_tlv { + __le32 q_empty_failure; + __le32 q_not_empty_failure; + __le32 add_msdu_failure; + __le32 tqm_cache_ctl_err; + __le32 tqm_soft_reset; + __le32 tqm_reset_num_in_use_link_descs; + __le32 tqm_reset_num_lost_link_descs; + __le32 tqm_reset_num_lost_host_tx_buf_cnt; + __le32 tqm_reset_num_in_use_internal_tqm; + __le32 tqm_reset_num_in_use_idle_link_rng; + __le32 tqm_reset_time_to_tqm_hang_delta_ms; + __le32 tqm_reset_recovery_time_ms; + __le32 tqm_reset_num_peers_hdl; + __le32 tqm_reset_cumm_dirty_hw_mpduq_cnt; + __le32 tqm_reset_cumm_dirty_hw_msduq_proc; + __le32 tqm_reset_flush_cache_cmd_su_cnt; + __le32 tqm_reset_flush_cache_cmd_other_cnt; + __le32 tqm_reset_flush_cache_cmd_trig_type; + __le32 tqm_reset_flush_cache_cmd_trig_cfg; + __le32 tqm_reset_flush_cmd_skp_status_null; +} __packed; + +struct ath12k_htt_tx_tqm_gen_mpdu_stats_tlv { + DECLARE_FLEX_ARRAY(__le32, gen_mpdu_end_reason); +} __packed; + +#define ATH12K_HTT_TX_TQM_MAX_LIST_MPDU_END_REASON 16 +#define ATH12K_HTT_TX_TQM_MAX_LIST_MPDU_CNT_HISTOGRAM_BINS 16 + +struct ath12k_htt_tx_tqm_list_mpdu_stats_tlv { + DECLARE_FLEX_ARRAY(__le32, list_mpdu_end_reason); +} __packed; + +struct ath12k_htt_tx_tqm_list_mpdu_cnt_tlv { + DECLARE_FLEX_ARRAY(__le32, list_mpdu_cnt_hist); +} __packed; + +struct ath12k_htt_tx_tqm_pdev_stats_tlv { + __le32 msdu_count; + __le32 mpdu_count; + __le32 remove_msdu; + __le32 remove_mpdu; + __le32 remove_msdu_ttl; + __le32 send_bar; + __le32 bar_sync; + __le32 notify_mpdu; + __le32 sync_cmd; + __le32 write_cmd; + __le32 hwsch_trigger; + __le32 ack_tlv_proc; + __le32 gen_mpdu_cmd; + __le32 gen_list_cmd; + __le32 remove_mpdu_cmd; + __le32 remove_mpdu_tried_cmd; + __le32 mpdu_queue_stats_cmd; + __le32 mpdu_head_info_cmd; + __le32 msdu_flow_stats_cmd; + __le32 remove_msdu_cmd; + __le32 remove_msdu_ttl_cmd; + __le32 flush_cache_cmd; + __le32 update_mpduq_cmd; + __le32 enqueue; + __le32 enqueue_notify; + __le32 notify_mpdu_at_head; + __le32 notify_mpdu_state_valid; + __le32 sched_udp_notify1; + __le32 sched_udp_notify2; + __le32 sched_nonudp_notify1; + __le32 sched_nonudp_notify2; +} __packed; + +#endif diff --git a/drivers/net/wireless/ath/ath12k/dp.c b/drivers/net/wireless/ath/ath12k/dp.c index 7843c76a82c1..61aa78d8bd8c 100644 --- a/drivers/net/wireless/ath/ath12k/dp.c +++ b/drivers/net/wireless/ath/ath12k/dp.c @@ -132,7 +132,9 @@ static int ath12k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask) static int ath12k_dp_srng_calculate_msi_group(struct ath12k_base *ab, enum hal_ring_type type, int ring_num) { + const struct ath12k_hal_tcl_to_wbm_rbm_map *map; const u8 *grp_mask; + int i; switch (type) { case HAL_WBM2SW_RELEASE: @@ -140,6 +142,14 @@ static int ath12k_dp_srng_calculate_msi_group(struct ath12k_base *ab, grp_mask = &ab->hw_params->ring_mask->rx_wbm_rel[0]; ring_num = 0; } else { + map = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map; + for (i = 0; i < ab->hw_params->max_tx_ring; i++) { + if (ring_num == map[i].wbm_ring_num) { + ring_num = i; + break; + } + } + grp_mask = &ab->hw_params->ring_mask->tx[0]; } break; @@ -457,8 +467,6 @@ static void ath12k_dp_srng_common_cleanup(struct ath12k_base *ab) ath12k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring); ath12k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring); } - ath12k_dp_srng_cleanup(ab, &dp->tcl_status_ring); - ath12k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring); ath12k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring); } @@ -479,20 +487,6 @@ static int ath12k_dp_srng_common_setup(struct ath12k_base *ab) goto err; } - ret = ath12k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0, - DP_TCL_CMD_RING_SIZE); - if (ret) { - ath12k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret); - goto err; - } - - ret = ath12k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS, - 0, 0, DP_TCL_STATUS_RING_SIZE); - if (ret) { - ath12k_warn(ab, "failed to set up tcl_status ring :%d\n", ret); - goto err; - } - for (i = 0; i < ab->hw_params->max_tx_ring; i++) { map = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map; tx_comp_ring_num = map[i].wbm_ring_num; @@ -616,6 +610,7 @@ static int ath12k_dp_scatter_idle_link_desc_setup(struct ath12k_base *ab, int i; int ret = 0; u32 end_offset, cookie; + enum hal_rx_buf_return_buf_manager rbm = dp->idle_link_rbm; n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE / ath12k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK); @@ -646,7 +641,8 @@ static int ath12k_dp_scatter_idle_link_desc_setup(struct ath12k_base *ab, paddr = link_desc_banks[i].paddr; while (n_entries) { cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i); - ath12k_hal_set_link_desc_addr(scatter_buf, cookie, paddr); + ath12k_hal_set_link_desc_addr(scatter_buf, cookie, + paddr, rbm); n_entries--; paddr += HAL_LINK_DESC_SIZE; if (rem_entries) { @@ -790,6 +786,7 @@ int ath12k_dp_link_desc_setup(struct ath12k_base *ab, u32 paddr; int i, ret; u32 cookie; + enum hal_rx_buf_return_buf_manager rbm = ab->dp.idle_link_rbm; tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE; tot_mem_sz += HAL_LINK_DESC_ALIGN; @@ -850,8 +847,7 @@ int ath12k_dp_link_desc_setup(struct ath12k_base *ab, while (n_entries && (desc = ath12k_hal_srng_src_get_next_entry(ab, srng))) { cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i); - ath12k_hal_set_link_desc_addr(desc, - cookie, paddr); + ath12k_hal_set_link_desc_addr(desc, cookie, paddr, rbm); n_entries--; paddr += HAL_LINK_DESC_SIZE; } @@ -881,11 +877,9 @@ int ath12k_dp_service_srng(struct ath12k_base *ab, enum dp_monitor_mode monitor_mode; u8 ring_mask; - while (i < ab->hw_params->max_tx_ring) { - if (ab->hw_params->ring_mask->tx[grp_id] & - BIT(ab->hw_params->hal_ops->tcl_to_wbm_rbm_map[i].wbm_ring_num)) - ath12k_dp_tx_completion_handler(ab, i); - i++; + if (ab->hw_params->ring_mask->tx[grp_id]) { + i = fls(ab->hw_params->ring_mask->tx[grp_id]) - 1; + ath12k_dp_tx_completion_handler(ab, i); } if (ab->hw_params->ring_mask->rx_err[grp_id]) { @@ -921,8 +915,8 @@ int ath12k_dp_service_srng(struct ath12k_base *ab, monitor_mode = ATH12K_DP_RX_MONITOR_MODE; ring_mask = ab->hw_params->ring_mask->rx_mon_dest[grp_id]; for (i = 0; i < ab->num_radios; i++) { - for (j = 0; j < ab->hw_params->num_rxmda_per_pdev; j++) { - int id = i * ab->hw_params->num_rxmda_per_pdev + j; + for (j = 0; j < ab->hw_params->num_rxdma_per_pdev; j++) { + int id = i * ab->hw_params->num_rxdma_per_pdev + j; if (ring_mask & BIT(id)) { work_done = @@ -942,8 +936,8 @@ int ath12k_dp_service_srng(struct ath12k_base *ab, monitor_mode = ATH12K_DP_TX_MONITOR_MODE; ring_mask = ab->hw_params->ring_mask->tx_mon_dest[grp_id]; for (i = 0; i < ab->num_radios; i++) { - for (j = 0; j < ab->hw_params->num_rxmda_per_pdev; j++) { - int id = i * ab->hw_params->num_rxmda_per_pdev + j; + for (j = 0; j < ab->hw_params->num_rxdma_per_pdev; j++) { + int id = i * ab->hw_params->num_rxdma_per_pdev + j; if (ring_mask & BIT(id)) { work_done = @@ -1031,7 +1025,7 @@ static void ath12k_dp_service_mon_ring(struct timer_list *t) struct ath12k_base *ab = from_timer(ab, t, mon_reap_timer); int i; - for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) + for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) ath12k_dp_mon_process_ring(ab, i, NULL, DP_MON_SERVICE_BUDGET, ATH12K_DP_RX_MONITOR_MODE); @@ -1355,13 +1349,14 @@ static inline void *ath12k_dp_cc_get_desc_addr_ptr(struct ath12k_base *ab, struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab, u32 cookie) { + struct ath12k_dp *dp = &ab->dp; struct ath12k_rx_desc_info **desc_addr_ptr; u16 start_ppt_idx, end_ppt_idx, ppt_idx, spt_idx; ppt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT); spt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_SPT); - start_ppt_idx = ATH12K_RX_SPT_PAGE_OFFSET; + start_ppt_idx = dp->rx_ppt_base + ATH12K_RX_SPT_PAGE_OFFSET; end_ppt_idx = start_ppt_idx + ATH12K_NUM_RX_SPT_PAGES; if (ppt_idx < start_ppt_idx || @@ -1369,6 +1364,7 @@ struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab, spt_idx > ATH12K_MAX_SPT_ENTRIES) return NULL; + ppt_idx = ppt_idx - dp->rx_ppt_base; desc_addr_ptr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, spt_idx); return *desc_addr_ptr; @@ -1403,7 +1399,7 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab) struct ath12k_rx_desc_info *rx_descs, **rx_desc_addr; struct ath12k_tx_desc_info *tx_descs, **tx_desc_addr; u32 i, j, pool_id, tx_spt_page; - u32 ppt_idx; + u32 ppt_idx, cookie_ppt_idx; spin_lock_bh(&dp->rx_desc_lock); @@ -1418,10 +1414,11 @@ static int ath12k_dp_cc_desc_init(struct ath12k_base *ab) } ppt_idx = ATH12K_RX_SPT_PAGE_OFFSET + i; + cookie_ppt_idx = dp->rx_ppt_base + ppt_idx; dp->spt_info->rxbaddr[i] = &rx_descs[0]; for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) { - rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(ppt_idx, j); + rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(cookie_ppt_idx, j); rx_descs[j].magic = ATH12K_DP_RX_DESC_MAGIC; list_add_tail(&rx_descs[j].list, &dp->rx_desc_free_list); @@ -1482,6 +1479,7 @@ static int ath12k_dp_cmem_init(struct ath12k_base *ab, end = start + ATH12K_NUM_TX_SPT_PAGES; break; case ATH12K_DP_RX_DESC: + cmem_base += ATH12K_PPT_ADDR_OFFSET(dp->rx_ppt_base); start = ATH12K_RX_SPT_PAGE_OFFSET; end = start + ATH12K_NUM_RX_SPT_PAGES; break; @@ -1524,6 +1522,8 @@ static int ath12k_dp_cc_init(struct ath12k_base *ab) return -ENOMEM; } + dp->rx_ppt_base = ab->device_id * ATH12K_NUM_RX_SPT_PAGES; + for (i = 0; i < dp->num_spt_pages; i++) { dp->spt_info[i].vaddr = dma_alloc_coherent(ab->dev, ATH12K_PAGE_SIZE, @@ -1587,6 +1587,24 @@ static int ath12k_dp_reoq_lut_setup(struct ath12k_base *ab) return 0; } +static enum hal_rx_buf_return_buf_manager +ath12k_dp_get_idle_link_rbm(struct ath12k_base *ab) +{ + switch (ab->device_id) { + case 0: + return HAL_RX_BUF_RBM_WBM_DEV0_IDLE_DESC_LIST; + case 1: + return HAL_RX_BUF_RBM_WBM_DEV1_IDLE_DESC_LIST; + case 2: + return HAL_RX_BUF_RBM_WBM_DEV2_IDLE_DESC_LIST; + default: + ath12k_warn(ab, "invalid %d device id, so choose default rbm\n", + ab->device_id); + WARN_ON(1); + return HAL_RX_BUF_RBM_WBM_DEV0_IDLE_DESC_LIST; + } +} + int ath12k_dp_alloc(struct ath12k_base *ab) { struct ath12k_dp *dp = &ab->dp; @@ -1603,6 +1621,7 @@ int ath12k_dp_alloc(struct ath12k_base *ab) spin_lock_init(&dp->reo_cmd_lock); dp->reo_cmd_cache_flush_count = 0; + dp->idle_link_rbm = ath12k_dp_get_idle_link_rbm(ab); ret = ath12k_wbm_idle_ring_setup(ab, &n_link_desc); if (ret) { diff --git a/drivers/net/wireless/ath/ath12k/dp.h b/drivers/net/wireless/ath/ath12k/dp.h index 5cf0d21ef184..b77497c14ac4 100644 --- a/drivers/net/wireless/ath/ath12k/dp.h +++ b/drivers/net/wireless/ath/ath12k/dp.h @@ -325,15 +325,15 @@ struct ath12k_dp { u8 htt_tgt_ver_major; u8 htt_tgt_ver_minor; struct dp_link_desc_bank link_desc_banks[DP_LINK_DESC_BANKS_MAX]; + enum hal_rx_buf_return_buf_manager idle_link_rbm; struct dp_srng wbm_idle_ring; struct dp_srng wbm_desc_rel_ring; - struct dp_srng tcl_cmd_ring; - struct dp_srng tcl_status_ring; struct dp_srng reo_reinject_ring; struct dp_srng rx_rel_ring; struct dp_srng reo_except_ring; struct dp_srng reo_cmd_ring; struct dp_srng reo_status_ring; + enum ath12k_peer_metadata_version peer_metadata_ver; struct dp_srng reo_dst_ring[DP_REO_DST_RING_MAX]; struct dp_tx_ring tx_ring[DP_TCL_NUM_RING_MAX]; struct hal_wbm_idle_scatter_list scatter_list[DP_IDLE_SCATTER_BUFS_MAX]; @@ -351,6 +351,7 @@ struct ath12k_dp { struct ath12k_hp_update_timer tx_ring_timer[DP_TCL_NUM_RING_MAX]; struct ath12k_spt_info *spt_info; u32 num_spt_pages; + u32 rx_ppt_base; struct list_head rx_desc_free_list; /* protects the free desc list */ spinlock_t rx_desc_lock; diff --git a/drivers/net/wireless/ath/ath12k/dp_mon.c b/drivers/net/wireless/ath/ath12k/dp_mon.c index 6b0b72477540..5c6749bc4039 100644 --- a/drivers/net/wireless/ath/ath12k/dp_mon.c +++ b/drivers/net/wireless/ath/ath12k/dp_mon.c @@ -1903,43 +1903,6 @@ ath12k_dp_mon_tx_parse_status_tlv(struct ath12k_base *ab, break; } - case HAL_MON_BUF_ADDR: { - struct dp_rxdma_mon_ring *buf_ring = &ab->dp.tx_mon_buf_ring; - struct dp_mon_packet_info *packet_info = - (struct dp_mon_packet_info *)tlv_data; - int buf_id = u32_get_bits(packet_info->cookie, - DP_RXDMA_BUF_COOKIE_BUF_ID); - struct sk_buff *msdu; - struct dp_mon_mpdu *mon_mpdu = tx_ppdu_info->tx_mon_mpdu; - struct ath12k_skb_rxcb *rxcb; - - spin_lock_bh(&buf_ring->idr_lock); - msdu = idr_remove(&buf_ring->bufs_idr, buf_id); - spin_unlock_bh(&buf_ring->idr_lock); - - if (unlikely(!msdu)) { - ath12k_warn(ab, "monitor destination with invalid buf_id %d\n", - buf_id); - return DP_MON_TX_STATUS_PPDU_NOT_DONE; - } - - rxcb = ATH12K_SKB_RXCB(msdu); - dma_unmap_single(ab->dev, rxcb->paddr, - msdu->len + skb_tailroom(msdu), - DMA_FROM_DEVICE); - - if (!mon_mpdu->head) - mon_mpdu->head = msdu; - else if (mon_mpdu->tail) - mon_mpdu->tail->next = msdu; - - mon_mpdu->tail = msdu; - - ath12k_dp_mon_buf_replenish(ab, buf_ring, 1); - status = DP_MON_TX_BUFFER_ADDR; - break; - } - case HAL_TX_MPDU_END: list_add_tail(&tx_ppdu_info->tx_mon_mpdu->list, &tx_ppdu_info->dp_tx_mon_mpdu_list); @@ -2088,8 +2051,7 @@ int ath12k_dp_mon_srng_process(struct ath12k *ar, int mac_id, int *budget, mon_dst_ring = &pdev_dp->rxdma_mon_dst_ring[srng_id]; buf_ring = &dp->rxdma_mon_buf_ring; } else { - mon_dst_ring = &pdev_dp->tx_mon_dst_ring[srng_id]; - buf_ring = &dp->tx_mon_buf_ring; + return 0; } srng = &ab->hal.srng_list[mon_dst_ring->ring_id]; diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.c b/drivers/net/wireless/ath/ath12k/dp_rx.c index 75df622f25d8..14236d0a0c89 100644 --- a/drivers/net/wireless/ath/ath12k/dp_rx.c +++ b/drivers/net/wireless/ath/ath12k/dp_rx.c @@ -17,6 +17,7 @@ #include "dp_tx.h" #include "peer.h" #include "dp_mon.h" +#include "debugfs_htt_stats.h" #define ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ) @@ -422,8 +423,6 @@ static int ath12k_dp_rxdma_buf_free(struct ath12k_base *ab) ath12k_dp_rxdma_mon_buf_ring_free(ab, &dp->rxdma_mon_buf_ring); - ath12k_dp_rxdma_mon_buf_ring_free(ab, &dp->tx_mon_buf_ring); - return 0; } @@ -476,15 +475,6 @@ static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab) "failed to setup HAL_RXDMA_MONITOR_BUF\n"); return ret; } - - ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab, - &dp->tx_mon_buf_ring, - HAL_TX_MONITOR_BUF); - if (ret) { - ath12k_warn(ab, - "failed to setup HAL_TX_MONITOR_BUF\n"); - return ret; - } } return 0; @@ -496,10 +486,8 @@ static void ath12k_dp_rx_pdev_srng_free(struct ath12k *ar) struct ath12k_base *ab = ar->ab; int i; - for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_dst_ring[i]); - ath12k_dp_srng_cleanup(ab, &dp->tx_mon_dst_ring[i]); - } } void ath12k_dp_rx_pdev_reo_cleanup(struct ath12k_base *ab) @@ -543,7 +531,7 @@ static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k *ar) int ret; u32 mac_id = dp->mac_id; - for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { ret = ath12k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring[i], HAL_RXDMA_MONITOR_DST, @@ -554,17 +542,6 @@ static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k *ar) "failed to setup HAL_RXDMA_MONITOR_DST\n"); return ret; } - - ret = ath12k_dp_srng_setup(ar->ab, - &dp->tx_mon_dst_ring[i], - HAL_TX_MONITOR_DST, - 0, mac_id + i, - DP_TX_MONITOR_DEST_RING_SIZE); - if (ret) { - ath12k_warn(ar->ab, - "failed to setup HAL_TX_MONITOR_DST\n"); - return ret; - } } return 0; @@ -1292,10 +1269,10 @@ static int ath12k_htt_tlv_ppdu_stats_parse(struct ath12k_base *ab, return 0; } -static int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len, - int (*iter)(struct ath12k_base *ar, u16 tag, u16 len, - const void *ptr, void *data), - void *data) +int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len, + int (*iter)(struct ath12k_base *ar, u16 tag, u16 len, + const void *ptr, void *data), + void *data) { const struct htt_tlv *tlv; const void *begin = ptr; @@ -1765,6 +1742,7 @@ void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab, ath12k_htt_pull_ppdu_stats(ab, skb); break; case HTT_T2H_MSG_TYPE_EXT_STATS_CONF: + ath12k_debugfs_htt_ext_stats_handler(ab, skb); break; case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND: ath12k_htt_mlo_offset_event_handler(ab, skb); @@ -2383,8 +2361,10 @@ void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc, channel_num = meta_data; center_freq = meta_data >> 16; - if (center_freq >= 5935 && center_freq <= 7105) { + if (center_freq >= ATH12K_MIN_6G_FREQ && + center_freq <= ATH12K_MAX_6G_FREQ) { rx_status->band = NL80211_BAND_6GHZ; + rx_status->freq = center_freq; } else if (channel_num >= 1 && channel_num <= 14) { rx_status->band = NL80211_BAND_2GHZ; } else if (channel_num >= 36 && channel_num <= 173) { @@ -2402,8 +2382,9 @@ void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct hal_rx_desc *rx_desc, rx_desc, sizeof(*rx_desc)); } - rx_status->freq = ieee80211_channel_to_frequency(channel_num, - rx_status->band); + if (rx_status->band != NL80211_BAND_6GHZ) + rx_status->freq = ieee80211_channel_to_frequency(channel_num, + rx_status->band); ath12k_dp_rx_h_rate(ar, rx_desc, rx_status); } @@ -2604,6 +2585,29 @@ static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab, rcu_read_unlock(); } +static u16 ath12k_dp_rx_get_peer_id(struct ath12k_base *ab, + enum ath12k_peer_metadata_version ver, + __le32 peer_metadata) +{ + switch (ver) { + default: + ath12k_warn(ab, "Unknown peer metadata version: %d", ver); + fallthrough; + case ATH12K_PEER_METADATA_V0: + return le32_get_bits(peer_metadata, + RX_MPDU_DESC_META_DATA_V0_PEER_ID); + case ATH12K_PEER_METADATA_V1: + return le32_get_bits(peer_metadata, + RX_MPDU_DESC_META_DATA_V1_PEER_ID); + case ATH12K_PEER_METADATA_V1A: + return le32_get_bits(peer_metadata, + RX_MPDU_DESC_META_DATA_V1A_PEER_ID); + case ATH12K_PEER_METADATA_V1B: + return le32_get_bits(peer_metadata, + RX_MPDU_DESC_META_DATA_V1B_PEER_ID); + } +} + int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id, struct napi_struct *napi, int budget) { @@ -2632,6 +2636,8 @@ try_again: ath12k_hal_srng_access_begin(ab, srng); while ((desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) { + struct rx_mpdu_desc *mpdu_info; + struct rx_msdu_desc *msdu_info; enum hal_reo_dest_ring_push_reason push_reason; u32 cookie; @@ -2649,7 +2655,8 @@ try_again: if (!desc_info) { desc_info = ath12k_dp_get_rx_desc(ab, cookie); if (!desc_info) { - ath12k_warn(ab, "Invalid cookie in manual desc retrieval"); + ath12k_warn(ab, "Invalid cookie in manual descriptor retrieval: 0x%x\n", + cookie); continue; } } @@ -2678,16 +2685,19 @@ try_again: continue; } - rxcb->is_first_msdu = !!(le32_to_cpu(desc->rx_msdu_info.info0) & + msdu_info = &desc->rx_msdu_info; + mpdu_info = &desc->rx_mpdu_info; + + rxcb->is_first_msdu = !!(le32_to_cpu(msdu_info->info0) & RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU); - rxcb->is_last_msdu = !!(le32_to_cpu(desc->rx_msdu_info.info0) & + rxcb->is_last_msdu = !!(le32_to_cpu(msdu_info->info0) & RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU); - rxcb->is_continuation = !!(le32_to_cpu(desc->rx_msdu_info.info0) & + rxcb->is_continuation = !!(le32_to_cpu(msdu_info->info0) & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION); rxcb->mac_id = mac_id; - rxcb->peer_id = le32_get_bits(desc->rx_mpdu_info.peer_meta_data, - RX_MPDU_DESC_META_DATA_PEER_ID); - rxcb->tid = le32_get_bits(desc->rx_mpdu_info.info0, + rxcb->peer_id = ath12k_dp_rx_get_peer_id(ab, dp->peer_metadata_ver, + mpdu_info->peer_meta_data); + rxcb->tid = le32_get_bits(mpdu_info->info0, RX_MPDU_DESC_INFO0_TID); __skb_queue_tail(&msdu_list, msdu); @@ -2762,6 +2772,7 @@ int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev peer = ath12k_peer_find(ab, vdev_id, peer_mac); if (!peer) { spin_unlock_bh(&ab->base_lock); + crypto_free_shash(tfm); ath12k_warn(ab, "failed to find the peer to set up fragment info\n"); return -ENOENT; } @@ -2991,9 +3002,10 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar, struct hal_srng *srng; dma_addr_t link_paddr, buf_paddr; u32 desc_bank, msdu_info, msdu_ext_info, mpdu_info; - u32 cookie, hal_rx_desc_sz, dest_ring_info0; + u32 cookie, hal_rx_desc_sz, dest_ring_info0, queue_addr_hi; int ret; struct ath12k_rx_desc_info *desc_info; + enum hal_rx_buf_return_buf_manager idle_link_rbm = dp->idle_link_rbm; u8 dst_ind; hal_rx_desc_sz = ab->hal.hal_desc_sz; @@ -3027,7 +3039,7 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar, buf_paddr = dma_map_single(ab->dev, defrag_skb->data, defrag_skb->len + skb_tailroom(defrag_skb), - DMA_FROM_DEVICE); + DMA_TO_DEVICE); if (dma_mapping_error(ab->dev, buf_paddr)) return -ENOMEM; @@ -3071,7 +3083,7 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar, ath12k_hal_rx_buf_addr_info_set(&reo_ent_ring->buf_addr_info, link_paddr, cookie, - HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST); + idle_link_rbm); mpdu_info = u32_encode_bits(1, RX_MPDU_DESC_INFO0_MSDU_COUNT) | u32_encode_bits(0, RX_MPDU_DESC_INFO0_FRAG_FLAG) | @@ -3083,13 +3095,11 @@ static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar, reo_ent_ring->rx_mpdu_info.peer_meta_data = reo_dest_ring->rx_mpdu_info.peer_meta_data; - /* Firmware expects physical address to be filled in queue_addr_lo in - * the MLO scenario and in case of non MLO peer meta data needs to be - * filled. - * TODO: Need to handle for MLO scenario. - */ - reo_ent_ring->queue_addr_lo = reo_dest_ring->rx_mpdu_info.peer_meta_data; - reo_ent_ring->info0 = le32_encode_bits(dst_ind, + reo_ent_ring->queue_addr_lo = cpu_to_le32(lower_32_bits(rx_tid->paddr)); + queue_addr_hi = upper_32_bits(rx_tid->paddr); + reo_ent_ring->info0 = le32_encode_bits(queue_addr_hi, + HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI) | + le32_encode_bits(dst_ind, HAL_REO_ENTR_RING_INFO0_DEST_IND); reo_ent_ring->info1 = le32_encode_bits(rx_tid->cur_sn, @@ -3113,7 +3123,7 @@ err_free_desc: spin_unlock_bh(&dp->rx_desc_lock); err_unmap_dma: dma_unmap_single(ab->dev, buf_paddr, defrag_skb->len + skb_tailroom(defrag_skb), - DMA_FROM_DEVICE); + DMA_TO_DEVICE); return ret; } @@ -3346,7 +3356,8 @@ ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc, if (!desc_info) { desc_info = ath12k_dp_get_rx_desc(ab, cookie); if (!desc_info) { - ath12k_warn(ab, "Invalid cookie in manual desc retrieval"); + ath12k_warn(ab, "Invalid cookie in DP rx error descriptor retrieval: 0x%x\n", + cookie); return -EINVAL; } } @@ -3421,7 +3432,7 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi, struct ath12k *ar; dma_addr_t paddr; bool is_frag; - bool drop = false; + bool drop; int pdev_id; tot_n_bufs_reaped = 0; @@ -3439,7 +3450,9 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi, while (budget && (reo_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) { + drop = false; ab->soc_stats.err_ring_pkts++; + ret = ath12k_hal_desc_reo_parse_err(ab, reo_desc, &paddr, &desc_bank); if (ret) { @@ -3451,7 +3464,7 @@ int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi, (paddr - link_desc_banks[desc_bank].paddr); ath12k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies, &rbm); - if (rbm != HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST && + if (rbm != dp->idle_link_rbm && rbm != HAL_RX_BUF_RBM_SW3_BM && rbm != ab->hw_params->hal_params->rx_buf_rbm) { ab->soc_stats.invalid_rbm++; @@ -3765,7 +3778,8 @@ int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab, if (!desc_info) { desc_info = ath12k_dp_get_rx_desc(ab, err_info.cookie); if (!desc_info) { - ath12k_warn(ab, "Invalid cookie in manual desc retrieval"); + ath12k_warn(ab, "Invalid cookie in DP WBM rx error descriptor retrieval: 0x%x\n", + err_info.cookie); continue; } } @@ -3961,7 +3975,7 @@ void ath12k_dp_rx_free(struct ath12k_base *ab) ath12k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring); - for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { if (ab->hw_params->rx_mac_buf_ring) ath12k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]); } @@ -3970,7 +3984,6 @@ void ath12k_dp_rx_free(struct ath12k_base *ab) ath12k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]); ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring); - ath12k_dp_srng_cleanup(ab, &dp->tx_mon_buf_ring.refill_buf_ring); ath12k_dp_rxdma_buf_free(ab); } @@ -4028,7 +4041,7 @@ int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab) struct ath12k_dp *dp = &ab->dp; struct htt_rx_ring_tlv_filter tlv_filter = {0}; u32 ring_id; - int ret; + int ret = 0; u32 hal_rx_desc_sz = ab->hal.hal_desc_sz; int i; @@ -4054,7 +4067,7 @@ int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab) * and modify the rx_desc struct */ - for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { ring_id = dp->rx_mac_buf_ring[i].ring_id; ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, i, HAL_RXDMA_BUF, @@ -4081,7 +4094,7 @@ int ath12k_dp_rx_htt_setup(struct ath12k_base *ab) } if (ab->hw_params->rx_mac_buf_ring) { - for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { ring_id = dp->rx_mac_buf_ring[i].ring_id; ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, i, HAL_RXDMA_BUF); @@ -4113,15 +4126,6 @@ int ath12k_dp_rx_htt_setup(struct ath12k_base *ab) ret); return ret; } - - ring_id = dp->tx_mon_buf_ring.refill_buf_ring.ring_id; - ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, - 0, HAL_TX_MONITOR_BUF); - if (ret) { - ath12k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n", - ret); - return ret; - } } ret = ab->hw_params->hw_ops->rxdma_ring_sel_config(ab); @@ -4141,9 +4145,6 @@ int ath12k_dp_rx_alloc(struct ath12k_base *ab) idr_init(&dp->rxdma_mon_buf_ring.bufs_idr); spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock); - idr_init(&dp->tx_mon_buf_ring.bufs_idr); - spin_lock_init(&dp->tx_mon_buf_ring.idr_lock); - ret = ath12k_dp_srng_setup(ab, &dp->rx_refill_buf_ring.refill_buf_ring, HAL_RXDMA_BUF, 0, 0, @@ -4154,7 +4155,7 @@ int ath12k_dp_rx_alloc(struct ath12k_base *ab) } if (ab->hw_params->rx_mac_buf_ring) { - for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { ret = ath12k_dp_srng_setup(ab, &dp->rx_mac_buf_ring[i], HAL_RXDMA_BUF, 1, @@ -4186,15 +4187,6 @@ int ath12k_dp_rx_alloc(struct ath12k_base *ab) ath12k_warn(ab, "failed to setup HAL_RXDMA_MONITOR_BUF\n"); return ret; } - - ret = ath12k_dp_srng_setup(ab, - &dp->tx_mon_buf_ring.refill_buf_ring, - HAL_TX_MONITOR_BUF, 0, 0, - DP_TX_MONITOR_BUF_RING_SIZE); - if (ret) { - ath12k_warn(ab, "failed to setup DP_TX_MONITOR_BUF_RING_SIZE\n"); - return ret; - } } ret = ath12k_dp_rxdma_buf_setup(ab); @@ -4223,7 +4215,7 @@ int ath12k_dp_rx_pdev_alloc(struct ath12k_base *ab, int mac_id) return ret; } - for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { ring_id = dp->rxdma_mon_dst_ring[i].ring_id; ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i, @@ -4234,17 +4226,6 @@ int ath12k_dp_rx_pdev_alloc(struct ath12k_base *ab, int mac_id) i, ret); return ret; } - - ring_id = dp->tx_mon_dst_ring[i].ring_id; - ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, - mac_id + i, - HAL_TX_MONITOR_DST); - if (ret) { - ath12k_warn(ab, - "failed to configure tx_mon_dst_ring %d %d\n", - i, ret); - return ret; - } } out: return 0; diff --git a/drivers/net/wireless/ath/ath12k/dp_rx.h b/drivers/net/wireless/ath/ath12k/dp_rx.h index 2ff421160181..eb1f92559179 100644 --- a/drivers/net/wireless/ath/ath12k/dp_rx.h +++ b/drivers/net/wireless/ath/ath12k/dp_rx.h @@ -139,4 +139,8 @@ ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu); int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab); int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab); +int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len, + int (*iter)(struct ath12k_base *ar, u16 tag, u16 len, + const void *ptr, void *data), + void *data); #endif /* ATH12K_DP_RX_H */ diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.c b/drivers/net/wireless/ath/ath12k/dp_tx.c index 9b6d7d72f57c..d08c04343e90 100644 --- a/drivers/net/wireless/ath/ath12k/dp_tx.c +++ b/drivers/net/wireless/ath/ath12k/dp_tx.c @@ -124,6 +124,44 @@ static void ath12k_hal_tx_cmd_ext_desc_setup(struct ath12k_base *ab, HAL_TX_MSDU_EXT_INFO1_ENCRYPT_TYPE); } +#define HTT_META_DATA_ALIGNMENT 0x8 + +static void *ath12k_dp_metadata_align_skb(struct sk_buff *skb, u8 tail_len) +{ + struct sk_buff *tail; + void *metadata; + + if (unlikely(skb_cow_data(skb, tail_len, &tail) < 0)) + return NULL; + + metadata = pskb_put(skb, tail, tail_len); + memset(metadata, 0, tail_len); + return metadata; +} + +/* Preparing HTT Metadata when utilized with ext MSDU */ +static int ath12k_dp_prepare_htt_metadata(struct sk_buff *skb) +{ + struct hal_tx_msdu_metadata *desc_ext; + u8 htt_desc_size; + /* Size rounded of multiple of 8 bytes */ + u8 htt_desc_size_aligned; + + htt_desc_size = sizeof(struct hal_tx_msdu_metadata); + htt_desc_size_aligned = ALIGN(htt_desc_size, HTT_META_DATA_ALIGNMENT); + + desc_ext = ath12k_dp_metadata_align_skb(skb, htt_desc_size_aligned); + if (!desc_ext) + return -ENOMEM; + + desc_ext->info0 = le32_encode_bits(1, HAL_TX_MSDU_METADATA_INFO0_ENCRYPT_FLAG) | + le32_encode_bits(0, HAL_TX_MSDU_METADATA_INFO0_ENCRYPT_TYPE) | + le32_encode_bits(1, + HAL_TX_MSDU_METADATA_INFO0_HOST_TX_DESC_POOL); + + return 0; +} + int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif, struct sk_buff *skb) { @@ -145,6 +183,7 @@ int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif, u8 ring_selector, ring_map = 0; bool tcl_ring_retry; bool msdu_ext_desc = false; + bool add_htt_metadata = false; if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)) return -ESHUTDOWN; @@ -248,6 +287,18 @@ tcl_ring_sel: goto fail_remove_tx_buf; } + if (!test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags) && + !(skb_cb->flags & ATH12K_SKB_HW_80211_ENCAP) && + !(skb_cb->flags & ATH12K_SKB_CIPHER_SET) && + ieee80211_has_protected(hdr->frame_control)) { + /* Add metadata for sw encrypted vlan group traffic */ + add_htt_metadata = true; + msdu_ext_desc = true; + ti.flags0 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TO_FW); + ti.encap_type = HAL_TCL_ENCAP_TYPE_RAW; + ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN; + } + tx_desc->skb = skb; tx_desc->mac_id = ar->pdev_idx; ti.desc_id = tx_desc->desc_id; @@ -269,6 +320,15 @@ tcl_ring_sel: msg = (struct hal_tx_msdu_ext_desc *)skb_ext_desc->data; ath12k_hal_tx_cmd_ext_desc_setup(ab, msg, &ti); + if (add_htt_metadata) { + ret = ath12k_dp_prepare_htt_metadata(skb_ext_desc); + if (ret < 0) { + ath12k_dbg(ab, ATH12K_DBG_DP_TX, + "Failed to add HTT meta data, dropping packet\n"); + goto fail_unmap_dma; + } + } + ti.paddr = dma_map_single(ab->dev, skb_ext_desc->data, skb_ext_desc->len, DMA_TO_DEVICE); ret = dma_mapping_error(ab->dev, ti.paddr); @@ -352,15 +412,15 @@ static void ath12k_dp_tx_free_txbuf(struct ath12k_base *ab, u8 pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id); skb_cb = ATH12K_SKB_CB(msdu); + ar = ab->pdevs[pdev_id].ar; dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); if (skb_cb->paddr_ext_desc) dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc, sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE); - dev_kfree_skb_any(msdu); + ieee80211_free_txskb(ar->ah->hw, msdu); - ar = ab->pdevs[pdev_id].ar; if (atomic_dec_and_test(&ar->dp.num_tx_pending)) wake_up(&ar->dp.tx_empty_waitq); } @@ -393,8 +453,12 @@ ath12k_dp_tx_htt_tx_complete_buf(struct ath12k_base *ab, if (ts->acked) { if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { info->flags |= IEEE80211_TX_STAT_ACK; - info->status.ack_signal = ATH12K_DEFAULT_NOISE_FLOOR + - ts->ack_rssi; + info->status.ack_signal = ts->ack_rssi; + + if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT, + ab->wmi_ab.svc_map)) + info->status.ack_signal += ATH12K_DEFAULT_NOISE_FLOOR; + info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID; } else { info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; @@ -448,6 +512,7 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar, struct hal_tx_status *ts) { struct ath12k_base *ab = ar->ab; + struct ath12k_hw *ah = ar->ah; struct ieee80211_tx_info *info; struct ath12k_skb_cb *skb_cb; @@ -466,12 +531,12 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar, rcu_read_lock(); if (!rcu_dereference(ab->pdevs_active[ar->pdev_idx])) { - dev_kfree_skb_any(msdu); + ieee80211_free_txskb(ah->hw, msdu); goto exit; } if (!skb_cb->vif) { - dev_kfree_skb_any(msdu); + ieee80211_free_txskb(ah->hw, msdu); goto exit; } @@ -481,17 +546,39 @@ static void ath12k_dp_tx_complete_msdu(struct ath12k *ar, /* skip tx rate update from ieee80211_status*/ info->status.rates[0].idx = -1; - if (ts->status == HAL_WBM_TQM_REL_REASON_FRAME_ACKED && - !(info->flags & IEEE80211_TX_CTL_NO_ACK)) { - info->flags |= IEEE80211_TX_STAT_ACK; - info->status.ack_signal = ATH12K_DEFAULT_NOISE_FLOOR + - ts->ack_rssi; - info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID; - } + switch (ts->status) { + case HAL_WBM_TQM_REL_REASON_FRAME_ACKED: + if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { + info->flags |= IEEE80211_TX_STAT_ACK; + info->status.ack_signal = ts->ack_rssi; - if (ts->status == HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX && - (info->flags & IEEE80211_TX_CTL_NO_ACK)) - info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; + if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT, + ab->wmi_ab.svc_map)) + info->status.ack_signal += ATH12K_DEFAULT_NOISE_FLOOR; + + info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID; + } + break; + case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX: + if (info->flags & IEEE80211_TX_CTL_NO_ACK) { + info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; + break; + } + fallthrough; + case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_MPDU: + case HAL_WBM_TQM_REL_REASON_DROP_THRESHOLD: + case HAL_WBM_TQM_REL_REASON_CMD_REMOVE_AGED_FRAMES: + /* The failure status is due to internal firmware tx failure + * hence drop the frame; do not update the status of frame to + * the upper layer + */ + ieee80211_free_txskb(ah->hw, msdu); + goto exit; + default: + ath12k_dbg(ab, ATH12K_DBG_DP_TX, "tx frame is not acked status %d\n", + ts->status); + break; + } /* NOTE: Tx rate status reporting. Tx completion status does not have * necessary information (for example nss) to build the tx rate. @@ -669,14 +756,6 @@ ath12k_dp_tx_get_ring_id_type(struct ath12k_base *ab, *htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING; *htt_ring_type = HTT_SW_TO_HW_RING; break; - case HAL_TX_MONITOR_BUF: - *htt_ring_id = HTT_TX_MON_HOST2MON_BUF_RING; - *htt_ring_type = HTT_SW_TO_HW_RING; - break; - case HAL_TX_MONITOR_DST: - *htt_ring_id = HTT_TX_MON_MON2HOST_DEST_RING; - *htt_ring_type = HTT_HW_TO_SW_RING; - break; default: ath12k_warn(ab, "Unsupported ring type in DP :%d\n", ring_type); ret = -EINVAL; @@ -854,7 +933,7 @@ int ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k *ar, u32 mask) int ret; int i; - for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) { + for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) { skb = ath12k_htc_alloc_skb(ab, len); if (!skb) return -ENOMEM; @@ -1007,6 +1086,7 @@ ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k *ar, u8 type, struct htt_ext_stats_cfg_cmd *cmd; int len = sizeof(*cmd); int ret; + u32 pdev_id; skb = ath12k_htc_alloc_skb(ab, len); if (!skb) @@ -1018,7 +1098,8 @@ ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k *ar, u8 type, memset(cmd, 0, sizeof(*cmd)); cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_EXT_STATS_CFG; - cmd->hdr.pdev_mask = 1 << ar->pdev->pdev_id; + pdev_id = ath12k_mac_get_target_pdev_id(ar); + cmd->hdr.pdev_mask = 1 << pdev_id; cmd->hdr.stats_type = type; cmd->cfg_param0 = cpu_to_le32(cfg_params->cfg0); @@ -1044,13 +1125,7 @@ int ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k *ar, bool reset) struct ath12k_base *ab = ar->ab; int ret; - ret = ath12k_dp_tx_htt_tx_monitor_mode_ring_config(ar, reset); - if (ret) { - ath12k_err(ab, "failed to setup tx monitor filter %d\n", ret); - return ret; - } - - ret = ath12k_dp_tx_htt_tx_monitor_mode_ring_config(ar, reset); + ret = ath12k_dp_tx_htt_rx_monitor_mode_ring_config(ar, reset); if (ret) { ath12k_err(ab, "failed to setup rx monitor filter %d\n", ret); return ret; @@ -1209,31 +1284,3 @@ err_free: dev_kfree_skb_any(skb); return ret; } - -int ath12k_dp_tx_htt_tx_monitor_mode_ring_config(struct ath12k *ar, bool reset) -{ - struct ath12k_base *ab = ar->ab; - struct ath12k_dp *dp = &ab->dp; - struct htt_tx_ring_tlv_filter tlv_filter = {0}; - int ret, ring_id; - - ring_id = dp->tx_mon_buf_ring.refill_buf_ring.ring_id; - - /* TODO: Need to set upstream/downstream tlv filters - * here - */ - - if (ab->hw_params->rxdma1_enable) { - ret = ath12k_dp_tx_htt_tx_filter_setup(ar->ab, ring_id, 0, - HAL_TX_MONITOR_BUF, - DP_RXDMA_REFILL_RING_SIZE, - &tlv_filter); - if (ret) { - ath12k_err(ab, - "failed to setup filter for monitor buf %d\n", ret); - return ret; - } - } - - return 0; -} diff --git a/drivers/net/wireless/ath/ath12k/dp_tx.h b/drivers/net/wireless/ath/ath12k/dp_tx.h index 436d77e5e9ee..55ff8cc721e3 100644 --- a/drivers/net/wireless/ath/ath12k/dp_tx.h +++ b/drivers/net/wireless/ath/ath12k/dp_tx.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH12K_DP_TX_H @@ -12,7 +12,7 @@ struct ath12k_dp_htt_wbm_tx_status { bool acked; - int ack_rssi; + s8 ack_rssi; }; int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab); @@ -36,6 +36,5 @@ int ath12k_dp_tx_htt_tx_filter_setup(struct ath12k_base *ab, u32 ring_id, int mac_id, enum hal_ring_type ring_type, int tx_buf_size, struct htt_tx_ring_tlv_filter *htt_tlv_filter); -int ath12k_dp_tx_htt_tx_monitor_mode_ring_config(struct ath12k *ar, bool reset); int ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k *ar, bool reset); #endif diff --git a/drivers/net/wireless/ath/ath12k/hal.c b/drivers/net/wireless/ath/ath12k/hal.c index 78310da8cfe8..ca04bfae8bdc 100644 --- a/drivers/net/wireless/ath/ath12k/hal.c +++ b/drivers/net/wireless/ath/ath12k/hal.c @@ -1969,14 +1969,15 @@ u32 ath12k_hal_ce_dst_status_get_length(struct hal_ce_srng_dst_status_desc *desc } void ath12k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie, - dma_addr_t paddr) + dma_addr_t paddr, + enum hal_rx_buf_return_buf_manager rbm) { desc->buf_addr_info.info0 = le32_encode_bits((paddr & HAL_ADDR_LSB_REG_MASK), BUFFER_ADDR_INFO0_ADDR); desc->buf_addr_info.info1 = le32_encode_bits(((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT), BUFFER_ADDR_INFO1_ADDR) | - le32_encode_bits(1, BUFFER_ADDR_INFO1_RET_BUF_MGR) | + le32_encode_bits(rbm, BUFFER_ADDR_INFO1_RET_BUF_MGR) | le32_encode_bits(cookie, BUFFER_ADDR_INFO1_SW_COOKIE); } diff --git a/drivers/net/wireless/ath/ath12k/hal.h b/drivers/net/wireless/ath/ath12k/hal.h index dbb9205bfa10..8a78bb9a10bc 100644 --- a/drivers/net/wireless/ath/ath12k/hal.h +++ b/drivers/net/wireless/ath/ath12k/hal.h @@ -770,12 +770,12 @@ struct hal_srng_config { * enum hal_rx_buf_return_buf_manager - manager for returned rx buffers * * @HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST: Buffer returned to WBM idle buffer list - * @HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST: Descriptor returned to WBM idle - * descriptor list, where the chip 0 WBM is chosen in case of a multi-chip config - * @HAL_RX_BUF_RBM_WBM_CHIP1_IDLE_DESC_LIST: Descriptor returned to WBM idle - * descriptor list, where the chip 1 WBM is chosen in case of a multi-chip config - * @HAL_RX_BUF_RBM_WBM_CHIP2_IDLE_DESC_LIST: Descriptor returned to WBM idle - * descriptor list, where the chip 2 WBM is chosen in case of a multi-chip config + * @HAL_RX_BUF_RBM_WBM_DEV0_IDLE_DESC_LIST: Descriptor returned to WBM idle + * descriptor list, where the device 0 WBM is chosen in case of a multi-device config + * @HAL_RX_BUF_RBM_WBM_DEV1_IDLE_DESC_LIST: Descriptor returned to WBM idle + * descriptor list, where the device 1 WBM is chosen in case of a multi-device config + * @HAL_RX_BUF_RBM_WBM_DEV2_IDLE_DESC_LIST: Descriptor returned to WBM idle + * descriptor list, where the device 2 WBM is chosen in case of a multi-device config * @HAL_RX_BUF_RBM_FW_BM: Buffer returned to FW * @HAL_RX_BUF_RBM_SW0_BM: For ring 0 -- returned to host * @HAL_RX_BUF_RBM_SW1_BM: For ring 1 -- returned to host @@ -788,9 +788,9 @@ struct hal_srng_config { enum hal_rx_buf_return_buf_manager { HAL_RX_BUF_RBM_WBM_IDLE_BUF_LIST, - HAL_RX_BUF_RBM_WBM_CHIP0_IDLE_DESC_LIST, - HAL_RX_BUF_RBM_WBM_CHIP1_IDLE_DESC_LIST, - HAL_RX_BUF_RBM_WBM_CHIP2_IDLE_DESC_LIST, + HAL_RX_BUF_RBM_WBM_DEV0_IDLE_DESC_LIST, + HAL_RX_BUF_RBM_WBM_DEV1_IDLE_DESC_LIST, + HAL_RX_BUF_RBM_WBM_DEV2_IDLE_DESC_LIST, HAL_RX_BUF_RBM_FW_BM, HAL_RX_BUF_RBM_SW0_BM, HAL_RX_BUF_RBM_SW1_BM, @@ -1113,7 +1113,8 @@ dma_addr_t ath12k_hal_srng_get_tp_addr(struct ath12k_base *ab, dma_addr_t ath12k_hal_srng_get_hp_addr(struct ath12k_base *ab, struct hal_srng *srng); void ath12k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie, - dma_addr_t paddr); + dma_addr_t paddr, + enum hal_rx_buf_return_buf_manager rbm); u32 ath12k_hal_ce_get_desc_size(enum hal_ce_desc type); void ath12k_hal_ce_src_set_desc(struct hal_ce_srng_src_desc *desc, dma_addr_t paddr, u32 len, u32 id, u8 byte_swap_data); diff --git a/drivers/net/wireless/ath/ath12k/hal_desc.h b/drivers/net/wireless/ath/ath12k/hal_desc.h index 63340256d3f6..739f73370015 100644 --- a/drivers/net/wireless/ath/ath12k/hal_desc.h +++ b/drivers/net/wireless/ath/ath12k/hal_desc.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #include "core.h" @@ -597,8 +597,30 @@ struct hal_tlv_64_hdr { #define RX_MPDU_DESC_INFO0_MPDU_QOS_CTRL_VALID BIT(27) #define RX_MPDU_DESC_INFO0_TID GENMASK(31, 28) -/* TODO revisit after meta data is concluded */ -#define RX_MPDU_DESC_META_DATA_PEER_ID GENMASK(15, 0) +/* Peer Metadata classification */ + +/* Version 0 */ +#define RX_MPDU_DESC_META_DATA_V0_PEER_ID GENMASK(15, 0) +#define RX_MPDU_DESC_META_DATA_V0_VDEV_ID GENMASK(23, 16) + +/* Version 1 */ +#define RX_MPDU_DESC_META_DATA_V1_PEER_ID GENMASK(13, 0) +#define RX_MPDU_DESC_META_DATA_V1_LOGICAL_LINK_ID GENMASK(15, 14) +#define RX_MPDU_DESC_META_DATA_V1_VDEV_ID GENMASK(23, 16) +#define RX_MPDU_DESC_META_DATA_V1_LMAC_ID GENMASK(25, 24) +#define RX_MPDU_DESC_META_DATA_V1_DEVICE_ID GENMASK(28, 26) + +/* Version 1A */ +#define RX_MPDU_DESC_META_DATA_V1A_PEER_ID GENMASK(13, 0) +#define RX_MPDU_DESC_META_DATA_V1A_VDEV_ID GENMASK(21, 14) +#define RX_MPDU_DESC_META_DATA_V1A_LOGICAL_LINK_ID GENMASK(25, 22) +#define RX_MPDU_DESC_META_DATA_V1A_DEVICE_ID GENMASK(28, 26) + +/* Version 1B */ +#define RX_MPDU_DESC_META_DATA_V1B_PEER_ID GENMASK(13, 0) +#define RX_MPDU_DESC_META_DATA_V1B_VDEV_ID GENMASK(21, 14) +#define RX_MPDU_DESC_META_DATA_V1B_HW_LINK_ID GENMASK(25, 22) +#define RX_MPDU_DESC_META_DATA_V1B_DEVICE_ID GENMASK(28, 26) struct rx_mpdu_desc { __le32 info0; /* %RX_MPDU_DESC_INFO */ @@ -2048,6 +2070,19 @@ struct hal_wbm_release_ring { * fw with fw_reason2. * @HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON3: Remove command initiated by * fw with fw_reason3. + * @HAL_WBM_TQM_REL_REASON_CMD_DISABLE_QUEUE: Remove command initiated by + * fw with disable queue. + * @HAL_WBM_TQM_REL_REASON_CMD_TILL_NONMATCHING: Remove command initiated by + * fw to remove all mpdu until 1st non-match. + * @HAL_WBM_TQM_REL_REASON_DROP_THRESHOLD: Dropped due to drop threshold + * criteria + * @HAL_WBM_TQM_REL_REASON_DROP_LINK_DESC_UNAVAIL: Dropped due to link desc + * not available + * @HAL_WBM_TQM_REL_REASON_DROP_OR_INVALID_MSDU: Dropped due drop bit set or + * null flow + * @HAL_WBM_TQM_REL_REASON_MULTICAST_DROP: Dropped due mcast drop set for VDEV + * @HAL_WBM_TQM_REL_REASON_VDEV_MISMATCH_DROP: Dropped due to being set with + * 'TCL_drop_reason' */ enum hal_wbm_tqm_rel_reason { HAL_WBM_TQM_REL_REASON_FRAME_ACKED, @@ -2058,6 +2093,13 @@ enum hal_wbm_tqm_rel_reason { HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON1, HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON2, HAL_WBM_TQM_REL_REASON_CMD_REMOVE_RESEAON3, + HAL_WBM_TQM_REL_REASON_CMD_DISABLE_QUEUE, + HAL_WBM_TQM_REL_REASON_CMD_TILL_NONMATCHING, + HAL_WBM_TQM_REL_REASON_DROP_THRESHOLD, + HAL_WBM_TQM_REL_REASON_DROP_LINK_DESC_UNAVAIL, + HAL_WBM_TQM_REL_REASON_DROP_OR_INVALID_MSDU, + HAL_WBM_TQM_REL_REASON_MULTICAST_DROP, + HAL_WBM_TQM_REL_REASON_VDEV_MISMATCH_DROP, }; struct hal_wbm_buffer_ring { @@ -2964,4 +3006,29 @@ struct hal_mon_dest_desc { * updated by SRNG. */ +#define HAL_TX_MSDU_METADATA_INFO0_ENCRYPT_FLAG BIT(8) +#define HAL_TX_MSDU_METADATA_INFO0_ENCRYPT_TYPE GENMASK(16, 15) +#define HAL_TX_MSDU_METADATA_INFO0_HOST_TX_DESC_POOL BIT(31) + +struct hal_tx_msdu_metadata { + __le32 info0; + __le32 rsvd0[6]; +} __packed; + +/* hal_tx_msdu_metadata + * valid_encrypt_type + * if set, encrypt type is valid + * encrypt_type + * 0 = NO_ENCRYPT, + * 1 = ENCRYPT, + * 2 ~ 3 - Reserved + * host_tx_desc_pool + * If set, Firmware allocates tx_descriptors + * in WAL_BUFFERID_TX_HOST_DATA_EXP,instead + * of WAL_BUFFERID_TX_TCL_DATA_EXP. + * Use cases: + * Any time firmware uses TQM-BYPASS for Data + * TID, firmware expect host to set this bit. + */ + #endif /* ATH12K_HAL_DESC_H */ diff --git a/drivers/net/wireless/ath/ath12k/hal_tx.h b/drivers/net/wireless/ath/ath12k/hal_tx.h index 7c837094a6f7..3cf5973771d7 100644 --- a/drivers/net/wireless/ath/ath12k/hal_tx.h +++ b/drivers/net/wireless/ath/ath12k/hal_tx.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: BSD-3-Clause-Clear */ /* * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved. - * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2021-2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef ATH12K_HAL_TX_H @@ -57,7 +57,7 @@ struct hal_tx_info { struct hal_tx_status { enum hal_wbm_rel_src_module buf_rel_source; enum hal_wbm_tqm_rel_reason status; - u8 ack_rssi; + s8 ack_rssi; u32 flags; /* %HAL_TX_STATUS_FLAGS_ */ u32 ppdu_id; u8 try_cnt; diff --git a/drivers/net/wireless/ath/ath12k/hif.h b/drivers/net/wireless/ath/ath12k/hif.h index 7f0926fe751d..0e53ec269fa4 100644 --- a/drivers/net/wireless/ath/ath12k/hif.h +++ b/drivers/net/wireless/ath/ath12k/hif.h @@ -30,6 +30,7 @@ struct ath12k_hif_ops { void (*ce_irq_enable)(struct ath12k_base *ab); void (*ce_irq_disable)(struct ath12k_base *ab); void (*get_ce_msi_idx)(struct ath12k_base *ab, u32 ce_id, u32 *msi_idx); + int (*panic_handler)(struct ath12k_base *ab); }; static inline int ath12k_hif_map_service_to_pipe(struct ath12k_base *ab, u16 service_id, @@ -147,4 +148,12 @@ static inline void ath12k_hif_power_down(struct ath12k_base *ab, bool is_suspend ab->hif.ops->power_down(ab, is_suspend); } +static inline int ath12k_hif_panic_handler(struct ath12k_base *ab) +{ + if (!ab->hif.ops->panic_handler) + return NOTIFY_DONE; + + return ab->hif.ops->panic_handler(ab); +} + #endif /* ATH12K_HIF_H */ diff --git a/drivers/net/wireless/ath/ath12k/htc.c b/drivers/net/wireless/ath/ath12k/htc.c index 2f2230f565bb..d13616bf07f4 100644 --- a/drivers/net/wireless/ath/ath12k/htc.c +++ b/drivers/net/wireless/ath/ath12k/htc.c @@ -244,6 +244,11 @@ static void ath12k_htc_suspend_complete(struct ath12k_base *ab, bool ack) complete(&ab->htc_suspend); } +static void ath12k_htc_wakeup_from_suspend(struct ath12k_base *ab) +{ + ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot wakeup from suspend is received\n"); +} + void ath12k_htc_rx_completion_handler(struct ath12k_base *ab, struct sk_buff *skb) { @@ -349,6 +354,7 @@ void ath12k_htc_rx_completion_handler(struct ath12k_base *ab, ath12k_htc_suspend_complete(ab, false); break; case ATH12K_HTC_MSG_WAKEUP_FROM_SUSPEND_ID: + ath12k_htc_wakeup_from_suspend(ab); break; default: ath12k_warn(ab, "ignoring unsolicited htc ep0 event %u\n", diff --git a/drivers/net/wireless/ath/ath12k/hw.c b/drivers/net/wireless/ath/ath12k/hw.c index f4c827015821..2e11ea763574 100644 --- a/drivers/net/wireless/ath/ath12k/hw.c +++ b/drivers/net/wireless/ath/ath12k/hw.c @@ -544,9 +544,6 @@ static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_qcn9274 = { }, .rx_mon_dest = { 0, 0, 0, - ATH12K_RX_MON_RING_MASK_0, - ATH12K_RX_MON_RING_MASK_1, - ATH12K_RX_MON_RING_MASK_2, }, .rx = { 0, 0, 0, 0, @@ -572,16 +569,15 @@ static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_qcn9274 = { ATH12K_HOST2RXDMA_RING_MASK_0, }, .tx_mon_dest = { - ATH12K_TX_MON_RING_MASK_0, - ATH12K_TX_MON_RING_MASK_1, + 0, 0, 0, }, }; static const struct ath12k_hw_ring_mask ath12k_hw_ring_mask_wcn7850 = { .tx = { ATH12K_TX_RING_MASK_0, + ATH12K_TX_RING_MASK_1, ATH12K_TX_RING_MASK_2, - ATH12K_TX_RING_MASK_4, }, .rx_mon_dest = { }, @@ -884,14 +880,15 @@ static const struct ath12k_hw_params ath12k_hw_params[] = { .hal_params = &ath12k_hw_hal_params_qcn9274, .rxdma1_enable = false, - .num_rxmda_per_pdev = 1, + .num_rxdma_per_pdev = 1, .num_rxdma_dst_ring = 0, .rx_mac_buf_ring = false, .vdev_start_delay = false, .interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | - BIT(NL80211_IFTYPE_MESH_POINT), + BIT(NL80211_IFTYPE_MESH_POINT) | + BIT(NL80211_IFTYPE_AP_VLAN), .supports_monitor = false, .idle_ps = false, @@ -926,6 +923,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = { .supports_sta_ps = false, .acpi_guid = NULL, + .supports_dynamic_smps_6ghz = true, }, { .name = "wcn7850 hw2.0", @@ -956,7 +954,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = { .hal_params = &ath12k_hw_hal_params_wcn7850, .rxdma1_enable = false, - .num_rxmda_per_pdev = 2, + .num_rxdma_per_pdev = 2, .num_rxdma_dst_ring = 1, .rx_mac_buf_ring = true, .vdev_start_delay = true, @@ -1001,6 +999,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = { .supports_sta_ps = true, .acpi_guid = &wcn7850_uuid, + .supports_dynamic_smps_6ghz = false, }, { .name = "qcn9274 hw2.0", @@ -1029,14 +1028,15 @@ static const struct ath12k_hw_params ath12k_hw_params[] = { .hal_params = &ath12k_hw_hal_params_qcn9274, .rxdma1_enable = false, - .num_rxmda_per_pdev = 1, + .num_rxdma_per_pdev = 1, .num_rxdma_dst_ring = 0, .rx_mac_buf_ring = false, .vdev_start_delay = false, .interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | - BIT(NL80211_IFTYPE_MESH_POINT), + BIT(NL80211_IFTYPE_MESH_POINT) | + BIT(NL80211_IFTYPE_AP_VLAN), .supports_monitor = false, .idle_ps = false, @@ -1071,6 +1071,7 @@ static const struct ath12k_hw_params ath12k_hw_params[] = { .supports_sta_ps = false, .acpi_guid = NULL, + .supports_dynamic_smps_6ghz = true, }, }; diff --git a/drivers/net/wireless/ath/ath12k/hw.h b/drivers/net/wireless/ath/ath12k/hw.h index 3f450ee93f34..e792eb6b249b 100644 --- a/drivers/net/wireless/ath/ath12k/hw.h +++ b/drivers/net/wireless/ath/ath12k/hw.h @@ -78,8 +78,7 @@ #define TARGET_NUM_WDS_ENTRIES 32 #define TARGET_DMA_BURST_SIZE 1 #define TARGET_RX_BATCHMODE 1 -#define TARGET_RX_PEER_METADATA_VER_V1A 2 -#define TARGET_RX_PEER_METADATA_VER_V1B 3 +#define TARGET_EMA_MAX_PROFILE_PERIOD 8 #define ATH12K_HW_DEFAULT_QUEUE 0 #define ATH12K_HW_MAX_QUEUES 4 @@ -174,7 +173,7 @@ struct ath12k_hw_params { const struct ath12k_hw_hal_params *hal_params; bool rxdma1_enable:1; - int num_rxmda_per_pdev; + int num_rxdma_per_pdev; int num_rxdma_dst_ring; bool rx_mac_buf_ring:1; bool vdev_start_delay:1; @@ -215,6 +214,7 @@ struct ath12k_hw_params { bool supports_sta_ps; const guid_t *acpi_guid; + bool supports_dynamic_smps_6ghz; }; struct ath12k_hw_ops { diff --git a/drivers/net/wireless/ath/ath12k/mac.c b/drivers/net/wireless/ath/ath12k/mac.c index 805cb084484a..8106297f0bc1 100644 --- a/drivers/net/wireless/ath/ath12k/mac.c +++ b/drivers/net/wireless/ath/ath12k/mac.c @@ -6,6 +6,7 @@ #include #include + #include "mac.h" #include "core.h" #include "debug.h" @@ -15,6 +16,8 @@ #include "dp_rx.h" #include "peer.h" #include "debugfs.h" +#include "hif.h" +#include "wow.h" #define CHAN2G(_channel, _freq, _flags) { \ .band = NL80211_BAND_2GHZ, \ @@ -91,6 +94,10 @@ static const struct ieee80211_channel ath12k_5ghz_channels[] = { }; static const struct ieee80211_channel ath12k_6ghz_channels[] = { + /* Operating Class 136 */ + CHAN6G(2, 5935, 0), + + /* Operating Classes 131-135 */ CHAN6G(1, 5955, 0), CHAN6G(5, 5975, 0), CHAN6G(9, 5995, 0), @@ -666,6 +673,82 @@ static struct ath12k *ath12k_get_ar_by_vif(struct ieee80211_hw *hw, return NULL; } +static struct ath12k_vif *ath12k_mac_get_vif_up(struct ath12k *ar) +{ + struct ath12k_vif *arvif; + + lockdep_assert_held(&ar->conf_mutex); + list_for_each_entry(arvif, &ar->arvifs, list) { + if (arvif->is_up) + return arvif; + } + + return NULL; +} + +static bool ath12k_mac_band_match(enum nl80211_band band1, enum WMI_HOST_WLAN_BAND band2) +{ + switch (band1) { + case NL80211_BAND_2GHZ: + if (band2 & WMI_HOST_WLAN_2G_CAP) + return true; + break; + case NL80211_BAND_5GHZ: + case NL80211_BAND_6GHZ: + if (band2 & WMI_HOST_WLAN_5G_CAP) + return true; + break; + default: + return false; + } + + return false; +} + +static u8 ath12k_mac_get_target_pdev_id_from_vif(struct ath12k_vif *arvif) +{ + struct ath12k *ar = arvif->ar; + struct ath12k_base *ab = ar->ab; + struct ieee80211_vif *vif = arvif->vif; + struct cfg80211_chan_def def; + enum nl80211_band band; + u8 pdev_id = ab->fw_pdev[0].pdev_id; + int i; + + if (WARN_ON(ath12k_mac_vif_chan(vif, &def))) + return pdev_id; + + band = def.chan->band; + + for (i = 0; i < ab->fw_pdev_count; i++) { + if (ath12k_mac_band_match(band, ab->fw_pdev[i].supported_bands)) + return ab->fw_pdev[i].pdev_id; + } + + return pdev_id; +} + +u8 ath12k_mac_get_target_pdev_id(struct ath12k *ar) +{ + struct ath12k_vif *arvif; + struct ath12k_base *ab = ar->ab; + + if (!ab->hw_params->single_pdev_only) + return ar->pdev->pdev_id; + + arvif = ath12k_mac_get_vif_up(ar); + + /* fw_pdev array has pdev ids derived from phy capability + * service ready event (pdev_and_hw_link_ids). + * If no vif is active, return default first index. + */ + if (!arvif) + return ar->ab->fw_pdev[0].pdev_id; + + /* If active vif is found, return the pdev id matching chandef band */ + return ath12k_mac_get_target_pdev_id_from_vif(arvif); +} + static void ath12k_pdev_caps_update(struct ath12k *ar) { struct ath12k_base *ab = ar->ab; @@ -863,9 +946,12 @@ static int ath12k_mac_vdev_setup_sync(struct ath12k *ar) static int ath12k_monitor_vdev_up(struct ath12k *ar, int vdev_id) { + struct ath12k_wmi_vdev_up_params params = {}; int ret; - ret = ath12k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr); + params.vdev_id = vdev_id; + params.bssid = ar->mac_addr; + ret = ath12k_wmi_vdev_up(ar, ¶ms); if (ret) { ath12k_warn(ar->ab, "failed to put up monitor vdev %i: %d\n", vdev_id, ret); @@ -882,6 +968,7 @@ static int ath12k_mac_monitor_vdev_start(struct ath12k *ar, int vdev_id, { struct ieee80211_channel *channel; struct wmi_vdev_start_req_arg arg = {}; + struct ath12k_wmi_vdev_up_params params = {}; int ret; lockdep_assert_held(&ar->conf_mutex); @@ -922,7 +1009,9 @@ static int ath12k_mac_monitor_vdev_start(struct ath12k *ar, int vdev_id, return ret; } - ret = ath12k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr); + params.vdev_id = vdev_id; + params.bssid = ar->mac_addr; + ret = ath12k_wmi_vdev_up(ar, ¶ms); if (ret) { ath12k_warn(ar->ab, "failed to put up monitor vdev %i: %d\n", vdev_id, ret); @@ -1289,37 +1378,188 @@ static int ath12k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui, return 0; } +static void ath12k_mac_set_arvif_ies(struct ath12k_vif *arvif, struct sk_buff *bcn, + u8 bssid_index, bool *nontx_profile_found) +{ + struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)bcn->data; + const struct element *elem, *nontx, *index, *nie; + const u8 *start, *tail; + u16 rem_len; + u8 i; + + start = bcn->data + ieee80211_get_hdrlen_from_skb(bcn) + sizeof(mgmt->u.beacon); + tail = skb_tail_pointer(bcn); + rem_len = tail - start; + + arvif->rsnie_present = false; + arvif->wpaie_present = false; + + if (cfg80211_find_ie(WLAN_EID_RSN, start, rem_len)) + arvif->rsnie_present = true; + if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WPA, + start, rem_len)) + arvif->wpaie_present = true; + + /* Return from here for the transmitted profile */ + if (!bssid_index) + return; + + /* Initial rsnie_present for the nontransmitted profile is set to be same as that + * of the transmitted profile. It will be changed if security configurations are + * different. + */ + *nontx_profile_found = false; + for_each_element_id(elem, WLAN_EID_MULTIPLE_BSSID, start, rem_len) { + /* Fixed minimum MBSSID element length with at least one + * nontransmitted BSSID profile is 12 bytes as given below; + * 1 (max BSSID indicator) + + * 2 (Nontransmitted BSSID profile: Subelement ID + length) + + * 4 (Nontransmitted BSSID Capabilities: tag + length + info) + * 2 (Nontransmitted BSSID SSID: tag + length) + * 3 (Nontransmitted BSSID Index: tag + length + BSSID index + */ + if (elem->datalen < 12 || elem->data[0] < 1) + continue; /* Max BSSID indicator must be >=1 */ + + for_each_element(nontx, elem->data + 1, elem->datalen - 1) { + start = nontx->data; + + if (nontx->id != 0 || nontx->datalen < 4) + continue; /* Invalid nontransmitted profile */ + + if (nontx->data[0] != WLAN_EID_NON_TX_BSSID_CAP || + nontx->data[1] != 2) { + continue; /* Missing nontransmitted BSS capabilities */ + } + + if (nontx->data[4] != WLAN_EID_SSID) + continue; /* Missing SSID for nontransmitted BSS */ + + index = cfg80211_find_elem(WLAN_EID_MULTI_BSSID_IDX, + start, nontx->datalen); + if (!index || index->datalen < 1 || index->data[0] == 0) + continue; /* Invalid MBSSID Index element */ + + if (index->data[0] == bssid_index) { + *nontx_profile_found = true; + if (cfg80211_find_ie(WLAN_EID_RSN, + nontx->data, + nontx->datalen)) { + arvif->rsnie_present = true; + return; + } else if (!arvif->rsnie_present) { + return; /* Both tx and nontx BSS are open */ + } + + nie = cfg80211_find_ext_elem(WLAN_EID_EXT_NON_INHERITANCE, + nontx->data, + nontx->datalen); + if (!nie || nie->datalen < 2) + return; /* Invalid non-inheritance element */ + + for (i = 1; i < nie->datalen - 1; i++) { + if (nie->data[i] == WLAN_EID_RSN) { + arvif->rsnie_present = false; + break; + } + } + + return; + } + } + } +} + +static int ath12k_mac_setup_bcn_tmpl_ema(struct ath12k_vif *arvif) +{ + struct ieee80211_bss_conf *bss_conf = &arvif->vif->bss_conf; + struct ath12k_wmi_bcn_tmpl_ema_arg ema_args; + struct ieee80211_ema_beacons *beacons; + struct ath12k_vif *tx_arvif; + bool nontx_profile_found = false; + int ret = 0; + u8 i; + + tx_arvif = ath12k_vif_to_arvif(arvif->vif->mbssid_tx_vif); + beacons = ieee80211_beacon_get_template_ema_list(ath12k_ar_to_hw(tx_arvif->ar), + tx_arvif->vif, 0); + if (!beacons || !beacons->cnt) { + ath12k_warn(arvif->ar->ab, + "failed to get ema beacon templates from mac80211\n"); + return -EPERM; + } + + if (tx_arvif == arvif) + ath12k_mac_set_arvif_ies(arvif, beacons->bcn[0].skb, 0, NULL); + + for (i = 0; i < beacons->cnt; i++) { + if (tx_arvif != arvif && !nontx_profile_found) + ath12k_mac_set_arvif_ies(arvif, beacons->bcn[i].skb, + bss_conf->bssid_index, + &nontx_profile_found); + + ema_args.bcn_cnt = beacons->cnt; + ema_args.bcn_index = i; + ret = ath12k_wmi_bcn_tmpl(tx_arvif->ar, tx_arvif->vdev_id, + &beacons->bcn[i].offs, + beacons->bcn[i].skb, &ema_args); + if (ret) { + ath12k_warn(tx_arvif->ar->ab, + "failed to set ema beacon template id %i error %d\n", + i, ret); + break; + } + } + + if (tx_arvif != arvif && !nontx_profile_found) + ath12k_warn(arvif->ar->ab, + "nontransmitted bssid index %u not found in beacon template\n", + bss_conf->bssid_index); + + ieee80211_beacon_free_ema_list(beacons); + return ret; +} + static int ath12k_mac_setup_bcn_tmpl(struct ath12k_vif *arvif) { + struct ath12k_vif *tx_arvif = arvif; struct ath12k *ar = arvif->ar; struct ath12k_base *ab = ar->ab; - struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); struct ieee80211_vif *vif = arvif->vif; struct ieee80211_mutable_offsets offs = {}; + bool nontx_profile_found = false; struct sk_buff *bcn; - struct ieee80211_mgmt *mgmt; - u8 *ies; int ret; if (arvif->vdev_type != WMI_VDEV_TYPE_AP) return 0; - bcn = ieee80211_beacon_get_template(hw, vif, &offs, 0); + if (vif->mbssid_tx_vif) { + tx_arvif = ath12k_vif_to_arvif(vif->mbssid_tx_vif); + if (tx_arvif != arvif && arvif->is_up) + return 0; + + if (vif->bss_conf.ema_ap) + return ath12k_mac_setup_bcn_tmpl_ema(arvif); + } + + bcn = ieee80211_beacon_get_template(ath12k_ar_to_hw(tx_arvif->ar), tx_arvif->vif, + &offs, 0); if (!bcn) { ath12k_warn(ab, "failed to get beacon template from mac80211\n"); return -EPERM; } - ies = bcn->data + ieee80211_get_hdrlen_from_skb(bcn); - ies += sizeof(mgmt->u.beacon); - - if (cfg80211_find_ie(WLAN_EID_RSN, ies, (skb_tail_pointer(bcn) - ies))) - arvif->rsnie_present = true; - - if (cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, - WLAN_OUI_TYPE_MICROSOFT_WPA, - ies, (skb_tail_pointer(bcn) - ies))) - arvif->wpaie_present = true; + if (tx_arvif == arvif) { + ath12k_mac_set_arvif_ies(arvif, bcn, 0, NULL); + } else { + ath12k_mac_set_arvif_ies(arvif, bcn, + arvif->vif->bss_conf.bssid_index, + &nontx_profile_found); + if (!nontx_profile_found) + ath12k_warn(ab, + "nontransmitted profile not found in beacon template\n"); + } if (arvif->vif->type == NL80211_IFTYPE_AP && arvif->vif->p2p) { ret = ath12k_mac_setup_bcn_p2p_ie(arvif, bcn); @@ -1344,7 +1584,7 @@ static int ath12k_mac_setup_bcn_tmpl(struct ath12k_vif *arvif) } } - ret = ath12k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn); + ret = ath12k_wmi_bcn_tmpl(ar, arvif->vdev_id, &offs, bcn, NULL); if (ret) ath12k_warn(ab, "failed to submit beacon template command: %d\n", @@ -1358,6 +1598,7 @@ free_bcn_skb: static void ath12k_control_beaconing(struct ath12k_vif *arvif, struct ieee80211_bss_conf *info) { + struct ath12k_wmi_vdev_up_params params = {}; struct ath12k *ar = arvif->ar; int ret; @@ -1385,8 +1626,15 @@ static void ath12k_control_beaconing(struct ath12k_vif *arvif, ether_addr_copy(arvif->bssid, info->bssid); - ret = ath12k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, - arvif->bssid); + params.vdev_id = arvif->vdev_id; + params.aid = arvif->aid; + params.bssid = arvif->bssid; + if (arvif->vif->mbssid_tx_vif) { + params.tx_bssid = ath12k_vif_to_arvif(arvif->vif->mbssid_tx_vif)->bssid; + params.nontx_profile_idx = info->bssid_index; + params.nontx_profile_cnt = 1 << info->bssid_indicator; + } + ret = ath12k_wmi_vdev_up(arvif->ar, ¶ms); if (ret) { ath12k_warn(ar->ab, "failed to bring up vdev %d: %i\n", arvif->vdev_id, ret); @@ -1881,7 +2129,9 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar, { const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; int i; - u8 ampdu_factor, rx_mcs_80, rx_mcs_160, max_nss; + u8 ampdu_factor, max_nss; + u8 rx_mcs_80 = IEEE80211_HE_MCS_NOT_SUPPORTED; + u8 rx_mcs_160 = IEEE80211_HE_MCS_NOT_SUPPORTED; u16 mcs_160_map, mcs_80_map; bool support_160; u16 v; @@ -2028,17 +2278,88 @@ static void ath12k_peer_assoc_h_he(struct ath12k *ar, } } +static void ath12k_peer_assoc_h_he_6ghz(struct ath12k *ar, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + struct ath12k_wmi_peer_assoc_arg *arg) +{ + const struct ieee80211_sta_he_cap *he_cap = &sta->deflink.he_cap; + struct cfg80211_chan_def def; + enum nl80211_band band; + u8 ampdu_factor, mpdu_density; + + if (WARN_ON(ath12k_mac_vif_chan(vif, &def))) + return; + + band = def.chan->band; + + if (!arg->he_flag || band != NL80211_BAND_6GHZ || !sta->deflink.he_6ghz_capa.capa) + return; + + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40) + arg->bw_40 = true; + + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80) + arg->bw_80 = true; + + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) + arg->bw_160 = true; + + if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_320) + arg->bw_320 = true; + + arg->peer_he_caps_6ghz = le16_to_cpu(sta->deflink.he_6ghz_capa.capa); + + mpdu_density = u32_get_bits(arg->peer_he_caps_6ghz, + IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START); + arg->peer_mpdu_density = ath12k_parse_mpdudensity(mpdu_density); + + /* From IEEE Std 802.11ax-2021 - Section 10.12.2: An HE STA shall be capable of + * receiving A-MPDU where the A-MPDU pre-EOF padding length is up to the value + * indicated by the Maximum A-MPDU Length Exponent Extension field in the HE + * Capabilities element and the Maximum A-MPDU Length Exponent field in HE 6 GHz + * Band Capabilities element in the 6 GHz band. + * + * Here, we are extracting the Max A-MPDU Exponent Extension from HE caps and + * factor is the Maximum A-MPDU Length Exponent from HE 6 GHZ Band capability. + */ + ampdu_factor = u8_get_bits(he_cap->he_cap_elem.mac_cap_info[3], + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK) + + u32_get_bits(arg->peer_he_caps_6ghz, + IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP); + + arg->peer_max_mpdu = (1u << (IEEE80211_HE_6GHZ_MAX_AMPDU_FACTOR + + ampdu_factor)) - 1; +} + +static int ath12k_get_smps_from_capa(const struct ieee80211_sta_ht_cap *ht_cap, + const struct ieee80211_he_6ghz_capa *he_6ghz_capa, + int *smps) +{ + if (ht_cap->ht_supported) + *smps = u16_get_bits(ht_cap->cap, IEEE80211_HT_CAP_SM_PS); + else + *smps = le16_get_bits(he_6ghz_capa->capa, + IEEE80211_HE_6GHZ_CAP_SM_PS); + + if (*smps >= ARRAY_SIZE(ath12k_smps_map)) + return -EINVAL; + + return 0; +} + static void ath12k_peer_assoc_h_smps(struct ieee80211_sta *sta, struct ath12k_wmi_peer_assoc_arg *arg) { + const struct ieee80211_he_6ghz_capa *he_6ghz_capa = &sta->deflink.he_6ghz_capa; const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap; int smps; - if (!ht_cap->ht_supported) + if (!ht_cap->ht_supported && !he_6ghz_capa->capa) return; - smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS; - smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT; + if (ath12k_get_smps_from_capa(ht_cap, he_6ghz_capa, &smps)) + return; switch (smps) { case WLAN_HT_CAP_SM_PS_STATIC: @@ -2500,6 +2821,7 @@ static void ath12k_peer_assoc_prepare(struct ath12k *ar, ath12k_peer_assoc_h_ht(ar, vif, sta, arg); ath12k_peer_assoc_h_vht(ar, vif, sta, arg); ath12k_peer_assoc_h_he(ar, vif, sta, arg); + ath12k_peer_assoc_h_he_6ghz(ar, vif, sta, arg); ath12k_peer_assoc_h_eht(ar, vif, sta, arg); ath12k_peer_assoc_h_qos(ar, vif, sta, arg); ath12k_peer_assoc_h_phymode(ar, vif, sta, arg); @@ -2510,18 +2832,17 @@ static void ath12k_peer_assoc_prepare(struct ath12k *ar, static int ath12k_setup_peer_smps(struct ath12k *ar, struct ath12k_vif *arvif, const u8 *addr, - const struct ieee80211_sta_ht_cap *ht_cap) + const struct ieee80211_sta_ht_cap *ht_cap, + const struct ieee80211_he_6ghz_capa *he_6ghz_capa) { - int smps; + int smps, ret = 0; - if (!ht_cap->ht_supported) + if (!ht_cap->ht_supported && !he_6ghz_capa) return 0; - smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS; - smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT; - - if (smps >= ARRAY_SIZE(ath12k_smps_map)) - return -EINVAL; + ret = ath12k_get_smps_from_capa(ht_cap, he_6ghz_capa, &smps); + if (ret < 0) + return ret; return ath12k_wmi_set_peer_param(ar, addr, arvif->vdev_id, WMI_PEER_MIMO_PS_STATE, @@ -2533,6 +2854,7 @@ static void ath12k_bss_assoc(struct ath12k *ar, struct ieee80211_bss_conf *bss_conf) { struct ieee80211_vif *vif = arvif->vif; + struct ath12k_wmi_vdev_up_params params = {}; struct ath12k_wmi_peer_assoc_arg peer_arg; struct ieee80211_sta *ap_sta; struct ath12k_peer *peer; @@ -2572,7 +2894,8 @@ static void ath12k_bss_assoc(struct ath12k *ar, } ret = ath12k_setup_peer_smps(ar, arvif, bss_conf->bssid, - &ap_sta->deflink.ht_cap); + &ap_sta->deflink.ht_cap, + &ap_sta->deflink.he_6ghz_capa); if (ret) { ath12k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n", arvif->vdev_id, ret); @@ -2584,7 +2907,10 @@ static void ath12k_bss_assoc(struct ath12k *ar, arvif->aid = vif->cfg.aid; ether_addr_copy(arvif->bssid, bss_conf->bssid); - ret = ath12k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid); + params.vdev_id = arvif->vdev_id; + params.aid = arvif->aid; + params.bssid = arvif->bssid; + ret = ath12k_wmi_vdev_up(ar, ¶ms); if (ret) { ath12k_warn(ar->ab, "failed to set vdev %d up: %d\n", arvif->vdev_id, ret); @@ -2592,6 +2918,7 @@ static void ath12k_bss_assoc(struct ath12k *ar, } arvif->is_up = true; + arvif->rekey_data.enable_offload = false; ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac vdev %d up (associated) bssid %pM aid %d\n", @@ -2639,6 +2966,8 @@ static void ath12k_bss_disassoc(struct ath12k *ar, arvif->is_up = false; + memset(&arvif->rekey_data, 0, sizeof(arvif->rekey_data)); + cancel_delayed_work(&arvif->connection_loss_work); } @@ -3130,7 +3459,7 @@ static void ath12k_mac_op_bss_info_changed(struct ieee80211_hw *hw, static struct ath12k* ath12k_mac_select_scan_device(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_scan_request *req) + u32 center_freq) { struct ath12k_hw *ah = hw->priv; enum nl80211_band band; @@ -3147,9 +3476,9 @@ ath12k_mac_select_scan_device(struct ieee80211_hw *hw, * split the hw request and perform multiple scans */ - if (req->req.channels[0]->center_freq < ATH12K_MIN_5G_FREQ) + if (center_freq < ATH12K_MIN_5G_FREQ) band = NL80211_BAND_2GHZ; - else if (req->req.channels[0]->center_freq < ATH12K_MIN_6G_FREQ) + else if (center_freq < ATH12K_MIN_6G_FREQ) band = NL80211_BAND_5GHZ; else band = NL80211_BAND_6GHZ; @@ -3349,7 +3678,7 @@ static int ath12k_mac_op_hw_scan(struct ieee80211_hw *hw, /* Since the targeted scan device could depend on the frequency * requested in the hw_req, select the corresponding radio */ - ar = ath12k_mac_select_scan_device(hw, vif, hw_req); + ar = ath12k_mac_select_scan_device(hw, vif, hw_req->req.channels[0]->center_freq); if (!ar) return -EINVAL; @@ -3845,6 +4174,11 @@ static int ath12k_station_assoc(struct ath12k *ar, ath12k_peer_assoc_prepare(ar, vif, sta, &peer_arg, reassoc); + if (peer_arg.peer_nss < 1) { + ath12k_warn(ar->ab, + "invalid peer NSS %d\n", peer_arg.peer_nss); + return -EINVAL; + } ret = ath12k_wmi_send_peer_assoc_cmd(ar, &peer_arg); if (ret) { ath12k_warn(ar->ab, "failed to run peer assoc for STA %pM vdev %i: %d\n", @@ -3879,7 +4213,8 @@ static int ath12k_station_assoc(struct ath12k *ar, return 0; ret = ath12k_setup_peer_smps(ar, arvif, sta->addr, - &sta->deflink.ht_cap); + &sta->deflink.ht_cap, + &sta->deflink.he_6ghz_capa); if (ret) { ath12k_warn(ar->ab, "failed to setup peer SMPS for vdev %d: %d\n", arvif->vdev_id, ret); @@ -5269,6 +5604,7 @@ static void ath12k_mac_setup_sband_iftype_data(struct ath12k *ar, static int __ath12k_set_antenna(struct ath12k *ar, u32 tx_ant, u32 rx_ant) { + struct ath12k_hw *ah = ath12k_ar_to_ah(ar); int ret; lockdep_assert_held(&ar->conf_mutex); @@ -5289,8 +5625,8 @@ static int __ath12k_set_antenna(struct ath12k *ar, u32 tx_ant, u32 rx_ant) ar->cfg_tx_chainmask = tx_ant; ar->cfg_rx_chainmask = rx_ant; - if (ar->state != ATH12K_STATE_ON && - ar->state != ATH12K_STATE_RESTARTED) + if (ah->state != ATH12K_HW_STATE_ON && + ah->state != ATH12K_HW_STATE_RESTARTED) return 0; ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_TX_CHAIN_MASK, @@ -5590,51 +5926,16 @@ static int ath12k_mac_config_mon_status_default(struct ath12k *ar, bool enable) /* TODO: Need to support new monitor mode */ } -static void ath12k_mac_wait_reconfigure(struct ath12k_base *ab) -{ - int recovery_start_count; - - if (!ab->is_reset) - return; - - recovery_start_count = atomic_inc_return(&ab->recovery_start_count); - - ath12k_dbg(ab, ATH12K_DBG_MAC, "recovery start count %d\n", recovery_start_count); - - if (recovery_start_count == ab->num_radios) { - complete(&ab->recovery_start); - ath12k_dbg(ab, ATH12K_DBG_MAC, "recovery started success\n"); - } - - ath12k_dbg(ab, ATH12K_DBG_MAC, "waiting reconfigure...\n"); - - wait_for_completion_timeout(&ab->reconfigure_complete, - ATH12K_RECONFIGURE_TIMEOUT_HZ); -} - static int ath12k_mac_start(struct ath12k *ar) { + struct ath12k_hw *ah = ar->ah; struct ath12k_base *ab = ar->ab; struct ath12k_pdev *pdev = ar->pdev; int ret; - mutex_lock(&ar->conf_mutex); + lockdep_assert_held(&ah->hw_mutex); - switch (ar->state) { - case ATH12K_STATE_OFF: - ar->state = ATH12K_STATE_ON; - break; - case ATH12K_STATE_RESTARTING: - ar->state = ATH12K_STATE_RESTARTED; - ath12k_mac_wait_reconfigure(ab); - break; - case ATH12K_STATE_RESTARTED: - case ATH12K_STATE_WEDGED: - case ATH12K_STATE_ON: - WARN_ON(1); - ret = -EINVAL; - goto err; - } + mutex_lock(&ar->conf_mutex); ret = ath12k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS, 1, pdev->pdev_id); @@ -5726,7 +6027,6 @@ static int ath12k_mac_start(struct ath12k *ar) return 0; err: - ar->state = ATH12K_STATE_OFF; mutex_unlock(&ar->conf_mutex); return ret; @@ -5749,9 +6049,29 @@ static int ath12k_mac_op_start(struct ieee80211_hw *hw) ath12k_drain_tx(ah); + guard(mutex)(&ah->hw_mutex); + + switch (ah->state) { + case ATH12K_HW_STATE_OFF: + ah->state = ATH12K_HW_STATE_ON; + break; + case ATH12K_HW_STATE_RESTARTING: + ah->state = ATH12K_HW_STATE_RESTARTED; + break; + case ATH12K_HW_STATE_RESTARTED: + case ATH12K_HW_STATE_WEDGED: + case ATH12K_HW_STATE_ON: + ah->state = ATH12K_HW_STATE_OFF; + + WARN_ON(1); + return -EINVAL; + } + for_each_ar(ah, ar, i) { ret = ath12k_mac_start(ar); if (ret) { + ah->state = ATH12K_HW_STATE_OFF; + ath12k_err(ar->ab, "fail to start mac operations in pdev idx %d ret %d\n", ar->pdev_idx, ret); goto fail_start; @@ -5759,11 +6079,13 @@ static int ath12k_mac_op_start(struct ieee80211_hw *hw) } return 0; + fail_start: for (; i > 0; i--) { ar = ath12k_ah_to_ar(ah, i - 1); ath12k_mac_stop(ar); } + return ret; } @@ -5826,9 +6148,12 @@ int ath12k_mac_rfkill_enable_radio(struct ath12k *ar, bool enable) static void ath12k_mac_stop(struct ath12k *ar) { + struct ath12k_hw *ah = ar->ah; struct htt_ppdu_stats_info *ppdu_stats, *tmp; int ret; + lockdep_assert_held(&ah->hw_mutex); + mutex_lock(&ar->conf_mutex); ret = ath12k_mac_config_mon_status_default(ar, false); if (ret && (ret != -EOPNOTSUPP)) @@ -5836,7 +6161,6 @@ static void ath12k_mac_stop(struct ath12k *ar) ret); clear_bit(ATH12K_CAC_RUNNING, &ar->dev_flags); - ar->state = ATH12K_STATE_OFF; mutex_unlock(&ar->conf_mutex); cancel_delayed_work_sync(&ar->scan.timeout); @@ -5857,7 +6181,7 @@ static void ath12k_mac_stop(struct ath12k *ar) atomic_set(&ar->num_pending_mgmt_tx, 0); } -static void ath12k_mac_op_stop(struct ieee80211_hw *hw) +static void ath12k_mac_op_stop(struct ieee80211_hw *hw, bool suspend) { struct ath12k_hw *ah = ath12k_hw_to_ah(hw); struct ath12k *ar; @@ -5865,8 +6189,14 @@ static void ath12k_mac_op_stop(struct ieee80211_hw *hw) ath12k_drain_tx(ah); + mutex_lock(&ah->hw_mutex); + + ah->state = ATH12K_HW_STATE_OFF; + for_each_ar(ah, ar, i) ath12k_mac_stop(ar); + + mutex_unlock(&ah->hw_mutex); } static u8 @@ -5892,17 +6222,59 @@ ath12k_mac_get_vdev_stats_id(struct ath12k_vif *arvif) return vdev_stats_id; } -static void ath12k_mac_setup_vdev_create_arg(struct ath12k_vif *arvif, - struct ath12k_wmi_vdev_create_arg *arg) +static int ath12k_mac_setup_vdev_params_mbssid(struct ath12k_vif *arvif, + u32 *flags, u32 *tx_vdev_id) +{ + struct ieee80211_vif *tx_vif = arvif->vif->mbssid_tx_vif; + struct ath12k *ar = arvif->ar; + struct ath12k_vif *tx_arvif; + + if (!tx_vif) + return 0; + + tx_arvif = ath12k_vif_to_arvif(tx_vif); + + if (arvif->vif->bss_conf.nontransmitted) { + if (ar->ah->hw->wiphy != ieee80211_vif_to_wdev(tx_vif)->wiphy) + return -EINVAL; + + *flags = WMI_VDEV_MBSSID_FLAGS_NON_TRANSMIT_AP; + *tx_vdev_id = tx_arvif->vdev_id; + } else if (tx_arvif == arvif) { + *flags = WMI_VDEV_MBSSID_FLAGS_TRANSMIT_AP; + } else { + return -EINVAL; + } + + if (arvif->vif->bss_conf.ema_ap) + *flags |= WMI_VDEV_MBSSID_FLAGS_EMA_MODE; + + return 0; +} + +static int ath12k_mac_setup_vdev_create_arg(struct ath12k_vif *arvif, + struct ath12k_wmi_vdev_create_arg *arg) { struct ath12k *ar = arvif->ar; struct ath12k_pdev *pdev = ar->pdev; + int ret; arg->if_id = arvif->vdev_id; arg->type = arvif->vdev_type; arg->subtype = arvif->vdev_subtype; arg->pdev_id = pdev->pdev_id; + arg->mbssid_flags = WMI_VDEV_MBSSID_FLAGS_NON_MBSSID_AP; + arg->mbssid_tx_vdev_id = 0; + if (!test_bit(WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT, + ar->ab->wmi_ab.svc_map)) { + ret = ath12k_mac_setup_vdev_params_mbssid(arvif, + &arg->mbssid_flags, + &arg->mbssid_tx_vdev_id); + if (ret) + return ret; + } + if (pdev->cap.supported_bands & WMI_HOST_WLAN_2G_CAP) { arg->chains[NL80211_BAND_2GHZ].tx = ar->num_tx_chains; arg->chains[NL80211_BAND_2GHZ].rx = ar->num_rx_chains; @@ -5918,6 +6290,7 @@ static void ath12k_mac_setup_vdev_create_arg(struct ath12k_vif *arvif, } arg->if_stats_id = ath12k_mac_get_vdev_stats_id(arvif); + return 0; } static u32 @@ -6099,7 +6472,12 @@ static int ath12k_mac_vdev_create(struct ath12k *ar, struct ieee80211_vif *vif) for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++) vif->hw_queue[i] = i % (ATH12K_HW_MAX_QUEUES - 1); - ath12k_mac_setup_vdev_create_arg(arvif, &vdev_arg); + ret = ath12k_mac_setup_vdev_create_arg(arvif, &vdev_arg); + if (ret) { + ath12k_warn(ab, "failed to create vdev parameters %d: %d\n", + arvif->vdev_id, ret); + goto err; + } ret = ath12k_wmi_vdev_create(ar, vif->addr, &vdev_arg); if (ret) { @@ -6492,7 +6870,6 @@ err_vdev_del: /* Recalc txpower for remaining vdev */ ath12k_mac_txpower_recalc(ar); - clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags); /* TODO: recal traffic pause state based on the available vdevs */ arvif->is_created = false; @@ -6563,15 +6940,9 @@ static void ath12k_mac_configure_filter(struct ath12k *ar, reset_flag = !(ar->filter_flags & FIF_BCN_PRBRESP_PROMISC); ret = ath12k_dp_tx_htt_monitor_mode_ring_config(ar, reset_flag); - if (!ret) { - if (!reset_flag) - set_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags); - else - clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags); - } else { + if (ret) ath12k_warn(ar->ab, "fail to set monitor filter: %d\n", ret); - } ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "total_flags:0x%x, reset_flag:%d\n", @@ -6848,10 +7219,16 @@ ath12k_mac_vdev_start_restart(struct ath12k_vif *arvif, arg.pref_tx_streams = ar->num_tx_chains; arg.pref_rx_streams = ar->num_rx_chains; - /* Fill the MBSSID flags to indicate AP is non MBSSID by default - * Corresponding flags would be updated with MBSSID support. - */ arg.mbssid_flags = WMI_VDEV_MBSSID_FLAGS_NON_MBSSID_AP; + arg.mbssid_tx_vdev_id = 0; + if (test_bit(WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT, + ar->ab->wmi_ab.svc_map)) { + ret = ath12k_mac_setup_vdev_params_mbssid(arvif, + &arg.mbssid_flags, + &arg.mbssid_tx_vdev_id); + if (ret) + return ret; + } if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { arg.ssid = arvif->u.ap.ssid; @@ -7045,7 +7422,9 @@ ath12k_mac_update_vif_chan(struct ath12k *ar, struct ieee80211_vif_chanctx_switch *vifs, int n_vifs) { + struct ath12k_wmi_vdev_up_params params = {}; struct ath12k_base *ab = ar->ab; + struct ieee80211_vif *vif; struct ath12k_vif *arvif; int ret; int i; @@ -7054,9 +7433,10 @@ ath12k_mac_update_vif_chan(struct ath12k *ar, lockdep_assert_held(&ar->conf_mutex); for (i = 0; i < n_vifs; i++) { - arvif = ath12k_vif_to_arvif(vifs[i].vif); + vif = vifs[i].vif; + arvif = ath12k_vif_to_arvif(vif); - if (vifs[i].vif->type == NL80211_IFTYPE_MONITOR) + if (vif->type == NL80211_IFTYPE_MONITOR) monitor_vif = true; ath12k_dbg(ab, ATH12K_DBG_MAC, @@ -7067,29 +7447,6 @@ ath12k_mac_update_vif_chan(struct ath12k *ar, vifs[i].old_ctx->def.width, vifs[i].new_ctx->def.width); - if (WARN_ON(!arvif->is_started)) - continue; - - if (WARN_ON(!arvif->is_up)) - continue; - - ret = ath12k_wmi_vdev_down(ar, arvif->vdev_id); - if (ret) { - ath12k_warn(ab, "failed to down vdev %d: %d\n", - arvif->vdev_id, ret); - continue; - } - } - - /* All relevant vdevs are downed and associated channel resources - * should be available for the channel switch now. - */ - - /* TODO: Update ar->rx_channel */ - - for (i = 0; i < n_vifs; i++) { - arvif = ath12k_vif_to_arvif(vifs[i].vif); - if (WARN_ON(!arvif->is_started)) continue; @@ -7125,8 +7482,16 @@ ath12k_mac_update_vif_chan(struct ath12k *ar, ath12k_warn(ab, "failed to update bcn tmpl during csa: %d\n", ret); - ret = ath12k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, - arvif->bssid); + memset(¶ms, 0, sizeof(params)); + params.vdev_id = arvif->vdev_id; + params.aid = arvif->aid; + params.bssid = arvif->bssid; + if (vif->mbssid_tx_vif) { + params.tx_bssid = ath12k_vif_to_arvif(vif->mbssid_tx_vif)->bssid; + params.nontx_profile_idx = vif->bss_conf.bssid_index; + params.nontx_profile_cnt = 1 << vif->bss_conf.bssid_indicator; + } + ret = ath12k_wmi_vdev_up(arvif->ar, ¶ms); if (ret) { ath12k_warn(ab, "failed to bring vdev up %d: %d\n", arvif->vdev_id, ret); @@ -7259,7 +7624,6 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, struct ath12k_base *ab; struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); int ret; - struct ath12k_wmi_peer_create_arg param; /* For multi radio wiphy, the vdev was not created during add_interface * create now since we have a channel ctx now to assign to a specific ar/fw @@ -7295,21 +7659,6 @@ ath12k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, goto out; } - if (ab->hw_params->vdev_start_delay && - arvif->vdev_type != WMI_VDEV_TYPE_AP && - arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) { - param.vdev_id = arvif->vdev_id; - param.peer_type = WMI_PEER_TYPE_DEFAULT; - param.peer_addr = ar->mac_addr; - - ret = ath12k_peer_create(ar, arvif, NULL, ¶m); - if (ret) { - ath12k_warn(ab, "failed to create peer after vdev start delay: %d", - ret); - goto out; - } - } - if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { ret = ath12k_mac_monitor_start(ar); if (ret) @@ -7371,11 +7720,6 @@ ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, WARN_ON(!arvif->is_started); - if (ab->hw_params->vdev_start_delay && - arvif->vdev_type == WMI_VDEV_TYPE_MONITOR && - ath12k_peer_find_by_addr(ab, ar->mac_addr)) - ath12k_peer_delete(ar, arvif->vdev_id, ar->mac_addr); - if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) { ret = ath12k_mac_monitor_stop(ar); if (ret) { @@ -7386,7 +7730,8 @@ ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, arvif->is_started = false; } - if (arvif->vdev_type != WMI_VDEV_TYPE_STA) { + if (arvif->vdev_type != WMI_VDEV_TYPE_STA && + arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) { ath12k_bss_disassoc(ar, arvif); ret = ath12k_mac_vdev_stop(arvif); if (ret) @@ -7395,10 +7740,6 @@ ath12k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, } arvif->is_started = false; - if (ab->hw_params->vdev_start_delay && - arvif->vdev_type == WMI_VDEV_TYPE_MONITOR) - ath12k_wmi_vdev_down(ar, arvif->vdev_id); - if (arvif->vdev_type != WMI_VDEV_TYPE_MONITOR && ar->num_started_vdevs == 1 && ar->monitor_vdev_created) ath12k_mac_monitor_stop(ar); @@ -7920,26 +8261,33 @@ ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw, struct ath12k *ar; struct ath12k_base *ab; struct ath12k_vif *arvif; - int recovery_count; + int recovery_count, i; if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART) return; - ar = ath12k_ah_to_ar(ah, 0); - ab = ar->ab; + guard(mutex)(&ah->hw_mutex); - mutex_lock(&ar->conf_mutex); + if (ah->state != ATH12K_HW_STATE_RESTARTED) + return; + + ah->state = ATH12K_HW_STATE_ON; + ieee80211_wake_queues(hw); + + for_each_ar(ah, ar, i) { + mutex_lock(&ar->conf_mutex); + + ab = ar->ab; - if (ar->state == ATH12K_STATE_RESTARTED) { ath12k_warn(ar->ab, "pdev %d successfully recovered\n", ar->pdev->pdev_id); - ar->state = ATH12K_STATE_ON; - ieee80211_wake_queues(hw); if (ab->is_reset) { recovery_count = atomic_inc_return(&ab->recovery_count); + ath12k_dbg(ab, ATH12K_DBG_BOOT, "recovery count %d\n", recovery_count); + /* When there are multiple radios in an SOC, * the recovery has to be done for each radio */ @@ -7958,6 +8306,7 @@ ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw, arvif->key_cipher, arvif->is_up, arvif->vdev_type); + /* After trigger disconnect, then upper layer will * trigger connect again, then the PN number of * upper layer will be reset to keep up with AP @@ -7967,13 +8316,14 @@ ath12k_mac_op_reconfig_complete(struct ieee80211_hw *hw, arvif->vdev_type == WMI_VDEV_TYPE_STA && arvif->vdev_subtype == WMI_VDEV_SUBTYPE_NONE) { ieee80211_hw_restart_disconnect(arvif->vif); + ath12k_dbg(ab, ATH12K_DBG_BOOT, "restart disconnect\n"); } } - } - mutex_unlock(&ar->conf_mutex); + mutex_unlock(&ar->conf_mutex); + } } static void @@ -8026,6 +8376,13 @@ static int ath12k_mac_op_get_survey(struct ieee80211_hw *hw, int idx, if (!sband) sband = hw->wiphy->bands[NL80211_BAND_5GHZ]; + if (sband && idx >= sband->n_channels) { + idx -= sband->n_channels; + sband = NULL; + } + + if (!sband) + sband = hw->wiphy->bands[NL80211_BAND_6GHZ]; if (!sband || idx >= sband->n_channels) return -ENOENT; @@ -8124,12 +8481,68 @@ static int ath12k_mac_op_remain_on_channel(struct ieee80211_hw *hw, struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); struct ath12k_hw *ah = ath12k_hw_to_ah(hw); struct ath12k_wmi_scan_req_arg arg; - struct ath12k *ar; + struct ath12k *ar, *prev_ar; u32 scan_time_msec; + bool create = true; int ret; - ar = ath12k_ah_to_ar(ah, 0); + if (ah->num_radio == 1) { + WARN_ON(!arvif->is_created); + ar = ath12k_ah_to_ar(ah, 0); + goto scan; + } + ar = ath12k_mac_select_scan_device(hw, vif, chan->center_freq); + if (!ar) + return -EINVAL; + + /* If the vif is already assigned to a specific vdev of an ar, + * check whether its already started, vdev which is started + * are not allowed to switch to a new radio. + * If the vdev is not started, but was earlier created on a + * different ar, delete that vdev and create a new one. We don't + * delete at the scan stop as an optimization to avoid redundant + * delete-create vdev's for the same ar, in case the request is + * always on the same band for the vif + */ + if (arvif->is_created) { + if (WARN_ON(!arvif->ar)) + return -EINVAL; + + if (ar != arvif->ar && arvif->is_started) + return -EBUSY; + + if (ar != arvif->ar) { + /* backup the previously used ar ptr, since the vdev delete + * would assign the arvif->ar to NULL after the call + */ + prev_ar = arvif->ar; + mutex_lock(&prev_ar->conf_mutex); + ret = ath12k_mac_vdev_delete(prev_ar, vif); + mutex_unlock(&prev_ar->conf_mutex); + if (ret) { + ath12k_warn(prev_ar->ab, + "unable to delete scan vdev for roc: %d\n", + ret); + return ret; + } + } else { + create = false; + } + } + + if (create) { + mutex_lock(&ar->conf_mutex); + ret = ath12k_mac_vdev_create(ar, vif); + mutex_unlock(&ar->conf_mutex); + if (ret) { + ath12k_warn(ar->ab, "unable to create scan vdev for roc: %d\n", + ret); + return -EINVAL; + } + } + +scan: mutex_lock(&ar->conf_mutex); spin_lock_bh(&ar->data_lock); @@ -8211,6 +8624,40 @@ exit: return ret; } +static void ath12k_mac_op_set_rekey_data(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct cfg80211_gtk_rekey_data *data) +{ + struct ath12k_vif *arvif = ath12k_vif_to_arvif(vif); + struct ath12k_rekey_data *rekey_data = &arvif->rekey_data; + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); + struct ath12k *ar = ath12k_ah_to_ar(ah, 0); + + ath12k_dbg(ar->ab, ATH12K_DBG_MAC, "mac set rekey data vdev %d\n", + arvif->vdev_id); + + mutex_lock(&ar->conf_mutex); + + memcpy(rekey_data->kck, data->kck, NL80211_KCK_LEN); + memcpy(rekey_data->kek, data->kek, NL80211_KEK_LEN); + + /* The supplicant works on big-endian, the firmware expects it on + * little endian. + */ + rekey_data->replay_ctr = get_unaligned_be64(data->replay_ctr); + + arvif->rekey_data.enable_offload = true; + + ath12k_dbg_dump(ar->ab, ATH12K_DBG_MAC, "kck", NULL, + rekey_data->kck, NL80211_KCK_LEN); + ath12k_dbg_dump(ar->ab, ATH12K_DBG_MAC, "kek", NULL, + rekey_data->kck, NL80211_KEK_LEN); + ath12k_dbg_dump(ar->ab, ATH12K_DBG_MAC, "replay ctr", NULL, + &rekey_data->replay_ctr, sizeof(rekey_data->replay_ctr)); + + mutex_unlock(&ar->conf_mutex); +} + static const struct ieee80211_ops ath12k_ops = { .tx = ath12k_mac_op_tx, .wake_tx_queue = ieee80211_handle_wake_tx_queue, @@ -8226,6 +8673,7 @@ static const struct ieee80211_ops ath12k_ops = { .hw_scan = ath12k_mac_op_hw_scan, .cancel_hw_scan = ath12k_mac_op_cancel_hw_scan, .set_key = ath12k_mac_op_set_key, + .set_rekey_data = ath12k_mac_op_set_rekey_data, .sta_state = ath12k_mac_op_sta_state, .sta_set_txpwr = ath12k_mac_op_sta_set_txpwr, .sta_rc_update = ath12k_mac_op_sta_rc_update, @@ -8247,6 +8695,12 @@ static const struct ieee80211_ops ath12k_ops = { .sta_statistics = ath12k_mac_op_sta_statistics, .remain_on_channel = ath12k_mac_op_remain_on_channel, .cancel_remain_on_channel = ath12k_mac_op_cancel_remain_on_channel, + +#ifdef CONFIG_PM + .suspend = ath12k_wow_op_suspend, + .resume = ath12k_wow_op_resume, + .set_wakeup = ath12k_wow_op_set_wakeup, +#endif }; static void ath12k_mac_update_ch_list(struct ath12k *ar, @@ -8488,19 +8942,23 @@ static int ath12k_mac_setup_iface_combinations(struct ath12k_hw *ah) static const u8 ath12k_if_types_ext_capa[] = { [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, + [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT, [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, }; static const u8 ath12k_if_types_ext_capa_sta[] = { [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, + [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT, [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT, }; static const u8 ath12k_if_types_ext_capa_ap[] = { [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, + [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT, [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, [9] = WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT, + [10] = WLAN_EXT_CAPA11_EMA_SUPPORT, }; static const struct wiphy_iftype_ext_capab ath12k_iftypes_ext_capa[] = { @@ -8540,8 +8998,10 @@ static void ath12k_mac_hw_unregister(struct ath12k_hw *ah) struct ath12k *ar; int i; - for_each_ar(ah, ar, i) + for_each_ar(ah, ar, i) { cancel_work_sync(&ar->regd_update_work); + ath12k_debugfs_unregister(ar); + } ieee80211_unregister_hw(hw); @@ -8605,6 +9065,7 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah) u32 ht_cap = U32_MAX, antennas_rx = 0, antennas_tx = 0; bool is_6ghz = false, is_raw_mode = false, is_monitor_disable = false; u8 *mac_addr = NULL; + u8 mbssid_max_interfaces = 0; wiphy->max_ap_assoc_sta = 0; @@ -8648,6 +9109,8 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah) mac_addr = ar->mac_addr; else mac_addr = ab->mac_addr; + + mbssid_max_interfaces += TARGET_NUM_VDEVS; } wiphy->available_antennas_rx = antennas_rx; @@ -8685,7 +9148,7 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah) ieee80211_hw_set(hw, SUPPORTS_TX_FRAG); ieee80211_hw_set(hw, REPORTS_LOW_ACK); - if (ht_cap & WMI_HT_CAP_ENABLED) { + if ((ht_cap & WMI_HT_CAP_ENABLED) || ar->supports_6ghz) { ieee80211_hw_set(hw, AMPDU_AGGREGATION); ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW); ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); @@ -8700,7 +9163,8 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah) * for each band for a dual band capable radio. It will be tricky to * handle it when the ht capability different for each band. */ - if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS) + if (ht_cap & WMI_HT_CAP_DYNAMIC_SMPS || + (ar->supports_6ghz && ab->hw_params->supports_dynamic_smps_6ghz)) wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS; wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID; @@ -8739,6 +9203,9 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah) wiphy->iftype_ext_capab = ath12k_iftypes_ext_capa; wiphy->num_iftype_ext_capab = ARRAY_SIZE(ath12k_iftypes_ext_capa); + wiphy->mbssid_max_interfaces = mbssid_max_interfaces; + wiphy->ema_max_profile_periodicity = TARGET_EMA_MAX_PROFILE_PERIOD; + if (is_6ghz) { wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_FILS_DISCOVERY); @@ -8756,6 +9223,24 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah) ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); } + if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) { + wiphy->max_sched_scan_ssids = WMI_PNO_MAX_SUPP_NETWORKS; + wiphy->max_match_sets = WMI_PNO_MAX_SUPP_NETWORKS; + wiphy->max_sched_scan_ie_len = WMI_PNO_MAX_IE_LENGTH; + wiphy->max_sched_scan_plans = WMI_PNO_MAX_SCHED_SCAN_PLANS; + wiphy->max_sched_scan_plan_interval = + WMI_PNO_MAX_SCHED_SCAN_PLAN_INT; + wiphy->max_sched_scan_plan_iterations = + WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS; + wiphy->features |= NL80211_FEATURE_ND_RANDOM_MAC_ADDR; + } + + ret = ath12k_wow_init(ar); + if (ret) { + ath12k_warn(ar->ab, "failed to init wow: %d\n", ret); + goto err_free_if_combs; + } + ret = ieee80211_register_hw(hw); if (ret) { ath12k_err(ab, "ieee80211 registration failed: %d\n", ret); @@ -8777,13 +9262,16 @@ static int ath12k_mac_hw_register(struct ath12k_hw *ah) ath12k_err(ar->ab, "ath12k regd update failed: %d\n", ret); goto err_unregister_hw; } - } - ath12k_debugfs_register(ar); + ath12k_debugfs_register(ar); + } return 0; err_unregister_hw: + for_each_ar(ah, ar, i) + ath12k_debugfs_unregister(ar); + ieee80211_unregister_hw(hw); err_free_if_combs: @@ -8842,7 +9330,6 @@ static void ath12k_mac_setup(struct ath12k *ar) INIT_WORK(&ar->wmi_mgmt_tx_work, ath12k_mgmt_over_wmi_tx_work); skb_queue_head_init(&ar->wmi_mgmt_tx_queue); - clear_bit(ATH12K_FLAG_MONITOR_ENABLED, &ar->monitor_flags); } int ath12k_mac_register(struct ath12k_base *ab) @@ -8919,6 +9406,8 @@ static struct ath12k_hw *ath12k_mac_hw_allocate(struct ath12k_base *ab, ah->hw = hw; ah->num_radio = num_pdev_map; + mutex_init(&ah->hw_mutex); + for (i = 0; i < num_pdev_map; i++) { ab = pdev_map[i].ab; pdev_idx = pdev_map[i].pdev_idx; @@ -8927,7 +9416,7 @@ static struct ath12k_hw *ath12k_mac_hw_allocate(struct ath12k_base *ab, ar = ath12k_ah_to_ar(ah, i); ar->ah = ah; ar->ab = ab; - ar->hw_link_id = i; + ar->hw_link_id = pdev->hw_link_id; ar->pdev = pdev; ar->pdev_idx = pdev_idx; pdev->ar = ar; @@ -9005,3 +9494,34 @@ err: return ret; } + +int ath12k_mac_vif_set_keepalive(struct ath12k_vif *arvif, + enum wmi_sta_keepalive_method method, + u32 interval) +{ + struct wmi_sta_keepalive_arg arg = {}; + struct ath12k *ar = arvif->ar; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + if (arvif->vdev_type != WMI_VDEV_TYPE_STA) + return 0; + + if (!test_bit(WMI_TLV_SERVICE_STA_KEEP_ALIVE, ar->ab->wmi_ab.svc_map)) + return 0; + + arg.vdev_id = arvif->vdev_id; + arg.enabled = 1; + arg.method = method; + arg.interval = interval; + + ret = ath12k_wmi_sta_keepalive(ar, &arg); + if (ret) { + ath12k_warn(ar->ab, "failed to set keepalive on vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + + return 0; +} diff --git a/drivers/net/wireless/ath/ath12k/mac.h b/drivers/net/wireless/ath/ath12k/mac.h index 69fd282b9dd3..5efbb6822628 100644 --- a/drivers/net/wireless/ath/ath12k/mac.h +++ b/drivers/net/wireless/ath/ath12k/mac.h @@ -9,6 +9,7 @@ #include #include +#include "wmi.h" struct ath12k; struct ath12k_base; @@ -81,5 +82,9 @@ int ath12k_mac_rfkill_config(struct ath12k *ar); int ath12k_mac_wait_tx_complete(struct ath12k *ar); void ath12k_mac_handle_beacon(struct ath12k *ar, struct sk_buff *skb); void ath12k_mac_handle_beacon_miss(struct ath12k *ar, u32 vdev_id); +int ath12k_mac_vif_set_keepalive(struct ath12k_vif *arvif, + enum wmi_sta_keepalive_method method, + u32 interval); +u8 ath12k_mac_get_target_pdev_id(struct ath12k *ar); #endif diff --git a/drivers/net/wireless/ath/ath12k/mhi.c b/drivers/net/wireless/ath/ath12k/mhi.c index fef2f7622033..df96b0f91f54 100644 --- a/drivers/net/wireless/ath/ath12k/mhi.c +++ b/drivers/net/wireless/ath/ath12k/mhi.c @@ -16,6 +16,7 @@ #define MHI_TIMEOUT_DEFAULT_MS 90000 #define OTP_INVALID_BOARD_ID 0xFFFF #define OTP_VALID_DUALMAC_BOARD_ID_MASK 0x1000 +#define MHI_CB_INVALID 0xff static const struct mhi_channel_config ath12k_mhi_channels_qcn9274[] = { { @@ -268,6 +269,7 @@ static void ath12k_mhi_op_status_cb(struct mhi_controller *mhi_cntrl, enum mhi_callback cb) { struct ath12k_base *ab = dev_get_drvdata(mhi_cntrl->cntrl_dev); + struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); ath12k_dbg(ab, ATH12K_DBG_BOOT, "mhi notify status reason %s\n", ath12k_mhi_op_callback_to_str(cb)); @@ -277,12 +279,20 @@ static void ath12k_mhi_op_status_cb(struct mhi_controller *mhi_cntrl, ath12k_warn(ab, "firmware crashed: MHI_CB_SYS_ERROR\n"); break; case MHI_CB_EE_RDDM: + if (ab_pci->mhi_pre_cb == MHI_CB_EE_RDDM) { + ath12k_dbg(ab, ATH12K_DBG_BOOT, + "do not queue again for consecutive RDDM event\n"); + break; + } + if (!(test_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags))) queue_work(ab->workqueue_aux, &ab->reset_work); break; default: break; } + + ab_pci->mhi_pre_cb = cb; } static int ath12k_mhi_op_read_reg(struct mhi_controller *mhi_cntrl, @@ -313,6 +323,7 @@ int ath12k_mhi_register(struct ath12k_pci *ab_pci) if (!mhi_ctrl) return -ENOMEM; + ab_pci->mhi_pre_cb = MHI_CB_INVALID; ab_pci->mhi_ctrl = mhi_ctrl; mhi_ctrl->cntrl_dev = ab->dev; mhi_ctrl->regs = ab->mem; diff --git a/drivers/net/wireless/ath/ath12k/pci.c b/drivers/net/wireless/ath/ath12k/pci.c index 16af046c33d9..876c029f58f6 100644 --- a/drivers/net/wireless/ath/ath12k/pci.c +++ b/drivers/net/wireless/ath/ath12k/pci.c @@ -350,6 +350,7 @@ static void ath12k_pci_free_ext_irq(struct ath12k_base *ab) free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp); netif_napi_del(&irq_grp->napi); + free_netdev(irq_grp->napi_ndev); } } @@ -560,8 +561,9 @@ static irqreturn_t ath12k_pci_ext_interrupt_handler(int irq, void *arg) static int ath12k_pci_ext_irq_config(struct ath12k_base *ab) { struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); - int i, j, ret, num_vectors = 0; + int i, j, n, ret, num_vectors = 0; u32 user_base_data = 0, base_vector = 0, base_idx; + struct ath12k_ext_irq_grp *irq_grp; base_idx = ATH12K_PCI_IRQ_CE0_OFFSET + CE_COUNT_MAX; ret = ath12k_pci_get_user_msi_assignment(ab, "DP", @@ -572,13 +574,18 @@ static int ath12k_pci_ext_irq_config(struct ath12k_base *ab) return ret; for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { - struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; + irq_grp = &ab->ext_irq_grp[i]; u32 num_irq = 0; irq_grp->ab = ab; irq_grp->grp_id = i; - init_dummy_netdev(&irq_grp->napi_ndev); - netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi, + irq_grp->napi_ndev = alloc_netdev_dummy(0); + if (!irq_grp->napi_ndev) { + ret = -ENOMEM; + goto fail_allocate; + } + + netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi, ath12k_pci_ext_grp_napi_poll); if (ab->hw_params->ring_mask->tx[i] || @@ -611,13 +618,23 @@ static int ath12k_pci_ext_irq_config(struct ath12k_base *ab) if (ret) { ath12k_err(ab, "failed request irq %d: %d\n", vector, ret); - return ret; + goto fail_request; } } ath12k_pci_ext_grp_disable(irq_grp); } return 0; + +fail_request: + /* i ->napi_ndev was properly allocated. Free it also */ + i += 1; +fail_allocate: + for (n = 0; n < i; n++) { + irq_grp = &ab->ext_irq_grp[n]; + free_netdev(irq_grp->napi_ndev); + } + return ret; } static int ath12k_pci_set_irq_affinity_hint(struct ath12k_pci *ab_pci, @@ -1090,14 +1107,14 @@ void ath12k_pci_ext_irq_enable(struct ath12k_base *ab) { int i; - set_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags); - for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; napi_enable(&irq_grp->napi); ath12k_pci_ext_grp_enable(irq_grp); } + + set_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags); } void ath12k_pci_ext_irq_disable(struct ath12k_base *ab) @@ -1285,6 +1302,13 @@ void ath12k_pci_power_down(struct ath12k_base *ab, bool is_suspend) ath12k_pci_sw_reset(ab_pci->ab, false); } +static int ath12k_pci_panic_handler(struct ath12k_base *ab) +{ + ath12k_pci_sw_reset(ab, false); + + return NOTIFY_OK; +} + static const struct ath12k_hif_ops ath12k_pci_hif_ops = { .start = ath12k_pci_start, .stop = ath12k_pci_stop, @@ -1302,6 +1326,7 @@ static const struct ath12k_hif_ops ath12k_pci_hif_ops = { .ce_irq_enable = ath12k_pci_hif_ce_irq_enable, .ce_irq_disable = ath12k_pci_hif_ce_irq_disable, .get_ce_msi_idx = ath12k_pci_get_ce_msi_idx, + .panic_handler = ath12k_pci_panic_handler, }; static diff --git a/drivers/net/wireless/ath/ath12k/pci.h b/drivers/net/wireless/ath/ath12k/pci.h index 6186a78038cf..31584a7ad80e 100644 --- a/drivers/net/wireless/ath/ath12k/pci.h +++ b/drivers/net/wireless/ath/ath12k/pci.h @@ -104,6 +104,7 @@ struct ath12k_pci { struct mhi_controller *mhi_ctrl; const struct ath12k_msi_config *msi_config; unsigned long mhi_state; + enum mhi_callback mhi_pre_cb; u32 register_window; /* protects register_window above */ diff --git a/drivers/net/wireless/ath/ath12k/qmi.c b/drivers/net/wireless/ath/ath12k/qmi.c index 5484112859a6..b93ce9f87f61 100644 --- a/drivers/net/wireless/ath/ath12k/qmi.c +++ b/drivers/net/wireless/ath/ath12k/qmi.c @@ -2041,7 +2041,7 @@ static void ath12k_host_cap_parse_mlo(struct ath12k_base *ab, req->mlo_capable_valid = 1; req->mlo_capable = 1; req->mlo_chip_id_valid = 1; - req->mlo_chip_id = 0; + req->mlo_chip_id = ab->device_id; req->mlo_group_id_valid = 1; req->mlo_group_id = 0; req->max_mlo_peer_valid = 1; @@ -2053,7 +2053,7 @@ static void ath12k_host_cap_parse_mlo(struct ath12k_base *ab, req->mlo_num_chips = 1; info = &req->mlo_chip_info[0]; - info->chip_id = 0; + info->chip_id = ab->device_id; info->num_local_links = ab->qmi.num_radios; for (i = 0; i < info->num_local_links; i++) { @@ -2503,7 +2503,7 @@ static int ath12k_qmi_request_target_cap(struct ath12k_base *ab) ab->qmi.dev_mem[i].size = resp.dev_mem[i].size; ath12k_dbg(ab, ATH12K_DBG_QMI, - "devmem [%d] start ox%llx size %llu\n", i, + "devmem [%d] start 0x%llx size %llu\n", i, ab->qmi.dev_mem[i].start, ab->qmi.dev_mem[i].size); } @@ -2538,7 +2538,7 @@ static int ath12k_qmi_load_file_target_mem(struct ath12k_base *ab, struct qmi_wlanfw_bdf_download_resp_msg_v01 resp = {}; struct qmi_txn txn; const u8 *temp = data; - int ret; + int ret = 0; u32 remaining = len; req = kzalloc(sizeof(*req), GFP_KERNEL); diff --git a/drivers/net/wireless/ath/ath12k/reg.c b/drivers/net/wireless/ath/ath12k/reg.c index fbf38044938c..439d61f284d8 100644 --- a/drivers/net/wireless/ath/ath12k/reg.c +++ b/drivers/net/wireless/ath/ath12k/reg.c @@ -206,9 +206,9 @@ static void ath12k_copy_regd(struct ieee80211_regdomain *regd_orig, int ath12k_regd_update(struct ath12k *ar, bool init) { - struct ieee80211_hw *hw = ath12k_ar_to_hw(ar); + struct ath12k_hw *ah = ath12k_ar_to_ah(ar); + struct ieee80211_hw *hw = ah->hw; struct ieee80211_regdomain *regd, *regd_copy = NULL; - struct ath12k_hw *ah = ar->ah; int ret, regd_len, pdev_id; struct ath12k_base *ab; int i; @@ -286,19 +286,20 @@ int ath12k_regd_update(struct ath12k *ar, bool init) if (ret) goto err; + if (ah->state != ATH12K_HW_STATE_ON) + goto skip; + ah->regd_updated = true; /* Apply the new regd to all the radios, this is expected to be received only once * since we check for ah->regd_updated and allow here only once. */ for_each_ar(ah, ar, i) { - if (ar->state == ATH12K_STATE_ON) { - ab = ar->ab; - ret = ath12k_reg_update_chan_list(ar); - if (ret) - goto err; - } + ab = ar->ab; + ret = ath12k_reg_update_chan_list(ar); + if (ret) + goto err; } - +skip: return 0; err: ath12k_warn(ab, "failed to perform regd update : %d\n", ret); diff --git a/drivers/net/wireless/ath/ath12k/wmi.c b/drivers/net/wireless/ath/ath12k/wmi.c index 7a52d2082b79..9f6be557365e 100644 --- a/drivers/net/wireless/ath/ath12k/wmi.c +++ b/drivers/net/wireless/ath/ath12k/wmi.c @@ -228,9 +228,12 @@ void ath12k_wmi_init_qcn9274(struct ath12k_base *ab, config->peer_map_unmap_version = 0x32; config->twt_ap_pdev_count = ab->num_radios; config->twt_ap_sta_count = 1000; + config->ema_max_vap_cnt = ab->num_radios; + config->ema_max_profile_period = TARGET_EMA_MAX_PROFILE_PERIOD; + config->beacon_tx_offload_max_vdev += config->ema_max_vap_cnt; if (test_bit(WMI_TLV_SERVICE_PEER_METADATA_V1A_V1B_SUPPORT, ab->wmi_ab.svc_map)) - config->dp_peer_meta_data_ver = TARGET_RX_PEER_METADATA_VER_V1B; + config->peer_metadata_ver = ATH12K_PEER_METADATA_V1B; } void ath12k_wmi_init_wcn7850(struct ath12k_base *ab, @@ -497,6 +500,7 @@ ath12k_pull_mac_phy_cap_svc_ready_ext(struct ath12k_wmi_pdev *wmi_handle, mac_caps = wmi_mac_phy_caps + phy_idx; pdev->pdev_id = ath12k_wmi_mac_phy_get_pdev_id(mac_caps); + pdev->hw_link_id = ath12k_wmi_mac_phy_get_hw_link_id(mac_caps); pdev_cap->supported_bands |= le32_to_cpu(mac_caps->supported_bands); pdev_cap->ampdu_density = le32_to_cpu(mac_caps->ampdu_density); @@ -841,6 +845,8 @@ int ath12k_wmi_vdev_create(struct ath12k *ar, u8 *macaddr, cmd->vdev_subtype = cpu_to_le32(args->subtype); cmd->num_cfg_txrx_streams = cpu_to_le32(WMI_NUM_SUPPORTED_BAND_MAX); cmd->pdev_id = cpu_to_le32(args->pdev_id); + cmd->mbssid_flags = cpu_to_le32(args->mbssid_flags); + cmd->mbssid_tx_vdev_id = cpu_to_le32(args->mbssid_tx_vdev_id); cmd->vdev_stats_id = cpu_to_le32(args->if_stats_id); ether_addr_copy(cmd->vdev_macaddr.addr, macaddr); @@ -1046,6 +1052,7 @@ int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg, cmd->he_ops = cpu_to_le32(arg->he_ops); cmd->punct_bitmap = cpu_to_le32(arg->punct_bitmap); cmd->mbssid_flags = cpu_to_le32(arg->mbssid_flags); + cmd->mbssid_tx_vdev_id = cpu_to_le32(arg->mbssid_tx_vdev_id); if (!restart) { if (arg->ssid) { @@ -1097,7 +1104,7 @@ int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg, return ret; } -int ath12k_wmi_vdev_up(struct ath12k *ar, u32 vdev_id, u32 aid, const u8 *bssid) +int ath12k_wmi_vdev_up(struct ath12k *ar, struct ath12k_wmi_vdev_up_params *params) { struct ath12k_wmi_pdev *wmi = ar->wmi; struct wmi_vdev_up_cmd *cmd; @@ -1112,14 +1119,20 @@ int ath12k_wmi_vdev_up(struct ath12k *ar, u32 vdev_id, u32 aid, const u8 *bssid) cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_VDEV_UP_CMD, sizeof(*cmd)); - cmd->vdev_id = cpu_to_le32(vdev_id); - cmd->vdev_assoc_id = cpu_to_le32(aid); + cmd->vdev_id = cpu_to_le32(params->vdev_id); + cmd->vdev_assoc_id = cpu_to_le32(params->aid); - ether_addr_copy(cmd->vdev_bssid.addr, bssid); + ether_addr_copy(cmd->vdev_bssid.addr, params->bssid); + + if (params->tx_bssid) { + ether_addr_copy(cmd->tx_vdev_bssid.addr, params->tx_bssid); + cmd->nontx_profile_idx = cpu_to_le32(params->nontx_profile_idx); + cmd->nontx_profile_cnt = cpu_to_le32(params->nontx_profile_cnt); + } ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "WMI mgmt vdev up id 0x%x assoc id %d bssid %pM\n", - vdev_id, aid, bssid); + params->vdev_id, params->aid, params->bssid); ret = ath12k_wmi_cmd_send(wmi, skb, WMI_VDEV_UP_CMDID); if (ret) { @@ -1776,13 +1789,15 @@ int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id, int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id, struct ieee80211_mutable_offsets *offs, - struct sk_buff *bcn) + struct sk_buff *bcn, + struct ath12k_wmi_bcn_tmpl_ema_arg *ema_args) { struct ath12k_wmi_pdev *wmi = ar->wmi; struct wmi_bcn_tmpl_cmd *cmd; struct ath12k_wmi_bcn_prb_info_params *bcn_prb_info; struct wmi_tlv *tlv; struct sk_buff *skb; + u32 ema_params = 0; void *ptr; int ret, len; size_t aligned_len = roundup(bcn->len, 4); @@ -1801,6 +1816,16 @@ int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id, cmd->csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[0]); cmd->ext_csa_switch_count_offset = cpu_to_le32(offs->cntdwn_counter_offs[1]); cmd->buf_len = cpu_to_le32(bcn->len); + cmd->mbssid_ie_offset = cpu_to_le32(offs->mbssid_off); + if (ema_args) { + u32p_replace_bits(&ema_params, ema_args->bcn_cnt, WMI_EMA_BEACON_CNT); + u32p_replace_bits(&ema_params, ema_args->bcn_index, WMI_EMA_BEACON_IDX); + if (ema_args->bcn_index == 0) + u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_FIRST); + if (ema_args->bcn_index + 1 == ema_args->bcn_cnt) + u32p_replace_bits(&ema_params, 1, WMI_EMA_BEACON_LAST); + cmd->ema_params = cpu_to_le32(ema_params); + } ptr = skb->data + sizeof(*cmd); @@ -3473,11 +3498,13 @@ ath12k_wmi_copy_resource_config(struct ath12k_wmi_resource_config_params *wmi_cf wmi_cfg->sched_params = cpu_to_le32(tg_cfg->sched_params); wmi_cfg->twt_ap_pdev_count = cpu_to_le32(tg_cfg->twt_ap_pdev_count); wmi_cfg->twt_ap_sta_count = cpu_to_le32(tg_cfg->twt_ap_sta_count); - wmi_cfg->flags2 = le32_encode_bits(tg_cfg->dp_peer_meta_data_ver, + wmi_cfg->flags2 = le32_encode_bits(tg_cfg->peer_metadata_ver, WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION); - wmi_cfg->host_service_flags = cpu_to_le32(tg_cfg->is_reg_cc_ext_event_supported << WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT); + wmi_cfg->ema_max_vap_cnt = cpu_to_le32(tg_cfg->ema_max_vap_cnt); + wmi_cfg->ema_max_profile_period = cpu_to_le32(tg_cfg->ema_max_profile_period); + wmi_cfg->flags2 |= cpu_to_le32(WMI_RSRC_CFG_FLAGS2_CALC_NEXT_DTIM_COUNT_SET); } static int ath12k_init_cmd_send(struct ath12k_wmi_pdev *wmi, @@ -3690,6 +3717,7 @@ int ath12k_wmi_cmd_init(struct ath12k_base *ab) arg.res_cfg.is_reg_cc_ext_event_supported = true; ab->hw_params->wmi_init(ab, &arg.res_cfg); + ab->wow.wmi_conf_rx_decap_mode = arg.res_cfg.rx_decap_mode; arg.num_mem_chunks = wmi_ab->num_mem_chunks; arg.hw_mode_id = wmi_ab->preferred_hw_mode; @@ -3701,6 +3729,8 @@ int ath12k_wmi_cmd_init(struct ath12k_base *ab) arg.num_band_to_mac = ab->num_radios; ath12k_fill_band_to_mac_param(ab, arg.band_to_mac); + ab->dp.peer_metadata_ver = arg.res_cfg.peer_metadata_ver; + return ath12k_init_cmd_send(&wmi_ab->wmi[0], &arg); } @@ -3808,7 +3838,7 @@ int ath12k_wmi_pdev_dma_ring_cfg(struct ath12k *ar, cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_DMA_RING_CFG_REQ, sizeof(*cmd)); - cmd->pdev_id = cpu_to_le32(DP_SW2HW_MACID(arg->pdev_id)); + cmd->pdev_id = cpu_to_le32(arg->pdev_id); cmd->module_id = cpu_to_le32(arg->module_id); cmd->base_paddr_lo = cpu_to_le32(arg->base_paddr_lo); cmd->base_paddr_hi = cpu_to_le32(arg->base_paddr_hi); @@ -5693,7 +5723,7 @@ static int ath12k_reg_chan_list_event(struct ath12k_base *ab, struct sk_buff *sk * event. Otherwise, it goes to fallback. */ if (ab->hw_params->single_pdev_only && - pdev_idx < ab->hw_params->num_rxmda_per_pdev) + pdev_idx < ab->hw_params->num_rxdma_per_pdev) goto mem_free; else goto fallback; @@ -6022,8 +6052,10 @@ static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb) if (rx_ev.status & WMI_RX_STATUS_ERR_MIC) status->flag |= RX_FLAG_MMIC_ERROR; - if (rx_ev.chan_freq >= ATH12K_MIN_6G_FREQ) { + if (rx_ev.chan_freq >= ATH12K_MIN_6G_FREQ && + rx_ev.chan_freq <= ATH12K_MAX_6G_FREQ) { status->band = NL80211_BAND_6GHZ; + status->freq = rx_ev.chan_freq; } else if (rx_ev.channel >= 1 && rx_ev.channel <= 14) { status->band = NL80211_BAND_2GHZ; } else if (rx_ev.channel >= 36 && rx_ev.channel <= ATH12K_MAX_5G_CHAN) { @@ -6044,8 +6076,10 @@ static void ath12k_mgmt_rx_event(struct ath12k_base *ab, struct sk_buff *skb) sband = &ar->mac.sbands[status->band]; - status->freq = ieee80211_channel_to_frequency(rx_ev.channel, - status->band); + if (status->band != NL80211_BAND_6GHZ) + status->freq = ieee80211_channel_to_frequency(rx_ev.channel, + status->band); + status->signal = rx_ev.snr + ATH12K_DEFAULT_NOISE_FLOOR; status->rate_idx = ath12k_mac_bitrate_to_idx(sband, rx_ev.rate / 100); @@ -6999,6 +7033,116 @@ exit: kfree(tb); } +static int ath12k_wmi_wow_wakeup_host_parse(struct ath12k_base *ab, + u16 tag, u16 len, + const void *ptr, void *data) +{ + const struct wmi_wow_ev_pg_fault_param *pf_param; + const struct wmi_wow_ev_param *param; + struct wmi_wow_ev_arg *arg = data; + int pf_len; + + switch (tag) { + case WMI_TAG_WOW_EVENT_INFO: + param = ptr; + arg->wake_reason = le32_to_cpu(param->wake_reason); + ath12k_dbg(ab, ATH12K_DBG_WMI, "wow wakeup host reason %d %s\n", + arg->wake_reason, wow_reason(arg->wake_reason)); + break; + + case WMI_TAG_ARRAY_BYTE: + if (arg && arg->wake_reason == WOW_REASON_PAGE_FAULT) { + pf_param = ptr; + pf_len = le32_to_cpu(pf_param->len); + if (pf_len > len - sizeof(pf_len) || + pf_len < 0) { + ath12k_warn(ab, "invalid wo reason page fault buffer len %d\n", + pf_len); + return -EINVAL; + } + ath12k_dbg(ab, ATH12K_DBG_WMI, "wow_reason_page_fault len %d\n", + pf_len); + ath12k_dbg_dump(ab, ATH12K_DBG_WMI, + "wow_reason_page_fault packet present", + "wow_pg_fault ", + pf_param->data, + pf_len); + } + break; + default: + break; + } + + return 0; +} + +static void ath12k_wmi_event_wow_wakeup_host(struct ath12k_base *ab, struct sk_buff *skb) +{ + struct wmi_wow_ev_arg arg = { }; + int ret; + + ret = ath12k_wmi_tlv_iter(ab, skb->data, skb->len, + ath12k_wmi_wow_wakeup_host_parse, + &arg); + if (ret) { + ath12k_warn(ab, "failed to parse wmi wow wakeup host event tlv: %d\n", + ret); + return; + } + + complete(&ab->wow.wakeup_completed); +} + +static void ath12k_wmi_gtk_offload_status_event(struct ath12k_base *ab, + struct sk_buff *skb) +{ + const struct wmi_gtk_offload_status_event *ev; + struct ath12k_vif *arvif; + __be64 replay_ctr_be; + u64 replay_ctr; + const void **tb; + int ret; + + tb = ath12k_wmi_tlv_parse_alloc(ab, skb, GFP_ATOMIC); + if (IS_ERR(tb)) { + ret = PTR_ERR(tb); + ath12k_warn(ab, "failed to parse tlv: %d\n", ret); + return; + } + + ev = tb[WMI_TAG_GTK_OFFLOAD_STATUS_EVENT]; + if (!ev) { + ath12k_warn(ab, "failed to fetch gtk offload status ev"); + kfree(tb); + return; + } + + rcu_read_lock(); + arvif = ath12k_mac_get_arvif_by_vdev_id(ab, le32_to_cpu(ev->vdev_id)); + if (!arvif) { + rcu_read_unlock(); + ath12k_warn(ab, "failed to get arvif for vdev_id:%d\n", + le32_to_cpu(ev->vdev_id)); + kfree(tb); + return; + } + + replay_ctr = le64_to_cpu(ev->replay_ctr); + arvif->rekey_data.replay_ctr = replay_ctr; + ath12k_dbg(ab, ATH12K_DBG_WMI, "wmi gtk offload event refresh_cnt %d replay_ctr %llu\n", + le32_to_cpu(ev->refresh_cnt), replay_ctr); + + /* supplicant expects big-endian replay counter */ + replay_ctr_be = cpu_to_be64(replay_ctr); + + ieee80211_gtk_rekey_notify(arvif->vif, arvif->bssid, + (void *)&replay_ctr_be, GFP_ATOMIC); + + rcu_read_unlock(); + + kfree(tb); +} + static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb) { struct wmi_cmd_hdr *cmd_hdr; @@ -7119,6 +7263,12 @@ static void ath12k_wmi_op_rx(struct ath12k_base *ab, struct sk_buff *skb) case WMI_DIAG_EVENTID: ath12k_wmi_diag_event(ab, skb); break; + case WMI_WOW_WAKEUP_HOST_EVENTID: + ath12k_wmi_event_wow_wakeup_host(ab, skb); + break; + case WMI_GTK_OFFLOAD_STATUS_EVENTID: + ath12k_wmi_gtk_offload_status_event(ab, skb); + break; /* TODO: Add remaining events */ default: ath12k_dbg(ab, ATH12K_DBG_WMI, "Unknown eventid: 0x%x\n", id); @@ -7328,3 +7478,608 @@ void ath12k_wmi_detach(struct ath12k_base *ab) ath12k_wmi_free_dbring_caps(ab); } + +int ath12k_wmi_hw_data_filter_cmd(struct ath12k *ar, struct wmi_hw_data_filter_arg *arg) +{ + struct wmi_hw_data_filter_cmd *cmd; + struct sk_buff *skb; + int len; + + len = sizeof(*cmd); + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_hw_data_filter_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_HW_DATA_FILTER_CMD, + sizeof(*cmd)); + cmd->vdev_id = cpu_to_le32(arg->vdev_id); + cmd->enable = cpu_to_le32(arg->enable ? 1 : 0); + + /* Set all modes in case of disable */ + if (arg->enable) + cmd->hw_filter_bitmap = cpu_to_le32(arg->hw_filter_bitmap); + else + cmd->hw_filter_bitmap = cpu_to_le32((u32)~0U); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "wmi hw data filter enable %d filter_bitmap 0x%x\n", + arg->enable, arg->hw_filter_bitmap); + + return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_HW_DATA_FILTER_CMDID); +} + +int ath12k_wmi_wow_host_wakeup_ind(struct ath12k *ar) +{ + struct wmi_wow_host_wakeup_cmd *cmd; + struct sk_buff *skb; + size_t len; + + len = sizeof(*cmd); + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_wow_host_wakeup_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_HOSTWAKEUP_FROM_SLEEP_CMD, + sizeof(*cmd)); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow host wakeup ind\n"); + + return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID); +} + +int ath12k_wmi_wow_enable(struct ath12k *ar) +{ + struct wmi_wow_enable_cmd *cmd; + struct sk_buff *skb; + int len; + + len = sizeof(*cmd); + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_wow_enable_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ENABLE_CMD, + sizeof(*cmd)); + + cmd->enable = cpu_to_le32(1); + cmd->pause_iface_config = cpu_to_le32(WOW_IFACE_PAUSE_ENABLED); + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow enable\n"); + + return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_CMDID); +} + +int ath12k_wmi_wow_add_wakeup_event(struct ath12k *ar, u32 vdev_id, + enum wmi_wow_wakeup_event event, + u32 enable) +{ + struct wmi_wow_add_del_event_cmd *cmd; + struct sk_buff *skb; + size_t len; + + len = sizeof(*cmd); + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_wow_add_del_event_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_DEL_EVT_CMD, + sizeof(*cmd)); + cmd->vdev_id = cpu_to_le32(vdev_id); + cmd->is_add = cpu_to_le32(enable); + cmd->event_bitmap = cpu_to_le32((1 << event)); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add wakeup event %s enable %d vdev_id %d\n", + wow_wakeup_event(event), enable, vdev_id); + + return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID); +} + +int ath12k_wmi_wow_add_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id, + const u8 *pattern, const u8 *mask, + int pattern_len, int pattern_offset) +{ + struct wmi_wow_add_pattern_cmd *cmd; + struct wmi_wow_bitmap_pattern_params *bitmap; + struct wmi_tlv *tlv; + struct sk_buff *skb; + void *ptr; + size_t len; + + len = sizeof(*cmd) + + sizeof(*tlv) + /* array struct */ + sizeof(*bitmap) + /* bitmap */ + sizeof(*tlv) + /* empty ipv4 sync */ + sizeof(*tlv) + /* empty ipv6 sync */ + sizeof(*tlv) + /* empty magic */ + sizeof(*tlv) + /* empty info timeout */ + sizeof(*tlv) + sizeof(u32); /* ratelimit interval */ + + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + /* cmd */ + ptr = skb->data; + cmd = ptr; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_ADD_PATTERN_CMD, + sizeof(*cmd)); + cmd->vdev_id = cpu_to_le32(vdev_id); + cmd->pattern_id = cpu_to_le32(pattern_id); + cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN); + + ptr += sizeof(*cmd); + + /* bitmap */ + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, sizeof(*bitmap)); + + ptr += sizeof(*tlv); + + bitmap = ptr; + bitmap->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_BITMAP_PATTERN_T, + sizeof(*bitmap)); + memcpy(bitmap->patternbuf, pattern, pattern_len); + memcpy(bitmap->bitmaskbuf, mask, pattern_len); + bitmap->pattern_offset = cpu_to_le32(pattern_offset); + bitmap->pattern_len = cpu_to_le32(pattern_len); + bitmap->bitmask_len = cpu_to_le32(pattern_len); + bitmap->pattern_id = cpu_to_le32(pattern_id); + + ptr += sizeof(*bitmap); + + /* ipv4 sync */ + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0); + + ptr += sizeof(*tlv); + + /* ipv6 sync */ + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0); + + ptr += sizeof(*tlv); + + /* magic */ + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, 0); + + ptr += sizeof(*tlv); + + /* pattern info timeout */ + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, 0); + + ptr += sizeof(*tlv); + + /* ratelimit interval */ + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, sizeof(u32)); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow add pattern vdev_id %d pattern_id %d pattern_offset %d pattern_len %d\n", + vdev_id, pattern_id, pattern_offset, pattern_len); + + ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow pattern: ", + bitmap->patternbuf, pattern_len); + ath12k_dbg_dump(ar->ab, ATH12K_DBG_WMI, NULL, "wow bitmask: ", + bitmap->bitmaskbuf, pattern_len); + + return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_ADD_WAKE_PATTERN_CMDID); +} + +int ath12k_wmi_wow_del_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id) +{ + struct wmi_wow_del_pattern_cmd *cmd; + struct sk_buff *skb; + size_t len; + + len = sizeof(*cmd); + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_wow_del_pattern_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_WOW_DEL_PATTERN_CMD, + sizeof(*cmd)); + cmd->vdev_id = cpu_to_le32(vdev_id); + cmd->pattern_id = cpu_to_le32(pattern_id); + cmd->pattern_type = cpu_to_le32(WOW_BITMAP_PATTERN); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv wow del pattern vdev_id %d pattern_id %d\n", + vdev_id, pattern_id); + + return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_WOW_DEL_WAKE_PATTERN_CMDID); +} + +static struct sk_buff * +ath12k_wmi_op_gen_config_pno_start(struct ath12k *ar, u32 vdev_id, + struct wmi_pno_scan_req_arg *pno) +{ + struct nlo_configured_params *nlo_list; + size_t len, nlo_list_len, channel_list_len; + struct wmi_wow_nlo_config_cmd *cmd; + __le32 *channel_list; + struct wmi_tlv *tlv; + struct sk_buff *skb; + void *ptr; + u32 i; + + len = sizeof(*cmd) + + sizeof(*tlv) + + /* TLV place holder for array of structures + * nlo_configured_params(nlo_list) + */ + sizeof(*tlv); + /* TLV place holder for array of uint32 channel_list */ + + channel_list_len = sizeof(u32) * pno->a_networks[0].channel_count; + len += channel_list_len; + + nlo_list_len = sizeof(*nlo_list) * pno->uc_networks_count; + len += nlo_list_len; + + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + ptr = skb->data; + cmd = ptr; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, sizeof(*cmd)); + + cmd->vdev_id = cpu_to_le32(pno->vdev_id); + cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_START | WMI_NLO_CONFIG_SSID_HIDE_EN); + + /* current FW does not support min-max range for dwell time */ + cmd->active_dwell_time = cpu_to_le32(pno->active_max_time); + cmd->passive_dwell_time = cpu_to_le32(pno->passive_max_time); + + if (pno->do_passive_scan) + cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SCAN_PASSIVE); + + cmd->fast_scan_period = cpu_to_le32(pno->fast_scan_period); + cmd->slow_scan_period = cpu_to_le32(pno->slow_scan_period); + cmd->fast_scan_max_cycles = cpu_to_le32(pno->fast_scan_max_cycles); + cmd->delay_start_time = cpu_to_le32(pno->delay_start_time); + + if (pno->enable_pno_scan_randomization) { + cmd->flags |= cpu_to_le32(WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ | + WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ); + ether_addr_copy(cmd->mac_addr.addr, pno->mac_addr); + ether_addr_copy(cmd->mac_mask.addr, pno->mac_addr_mask); + } + + ptr += sizeof(*cmd); + + /* nlo_configured_params(nlo_list) */ + cmd->no_of_ssids = cpu_to_le32(pno->uc_networks_count); + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, nlo_list_len); + + ptr += sizeof(*tlv); + nlo_list = ptr; + for (i = 0; i < pno->uc_networks_count; i++) { + tlv = (struct wmi_tlv *)(&nlo_list[i].tlv_header); + tlv->header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARRAY_BYTE, + sizeof(*nlo_list)); + + nlo_list[i].ssid.valid = cpu_to_le32(1); + nlo_list[i].ssid.ssid.ssid_len = + cpu_to_le32(pno->a_networks[i].ssid.ssid_len); + memcpy(nlo_list[i].ssid.ssid.ssid, + pno->a_networks[i].ssid.ssid, + le32_to_cpu(nlo_list[i].ssid.ssid.ssid_len)); + + if (pno->a_networks[i].rssi_threshold && + pno->a_networks[i].rssi_threshold > -300) { + nlo_list[i].rssi_cond.valid = cpu_to_le32(1); + nlo_list[i].rssi_cond.rssi = + cpu_to_le32(pno->a_networks[i].rssi_threshold); + } + + nlo_list[i].bcast_nw_type.valid = cpu_to_le32(1); + nlo_list[i].bcast_nw_type.bcast_nw_type = + cpu_to_le32(pno->a_networks[i].bcast_nw_type); + } + + ptr += nlo_list_len; + cmd->num_of_channels = cpu_to_le32(pno->a_networks[0].channel_count); + tlv = ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_UINT32, channel_list_len); + ptr += sizeof(*tlv); + channel_list = ptr; + + for (i = 0; i < pno->a_networks[0].channel_count; i++) + channel_list[i] = cpu_to_le32(pno->a_networks[0].channels[i]); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi tlv start pno config vdev_id %d\n", + vdev_id); + + return skb; +} + +static struct sk_buff *ath12k_wmi_op_gen_config_pno_stop(struct ath12k *ar, + u32 vdev_id) +{ + struct wmi_wow_nlo_config_cmd *cmd; + struct sk_buff *skb; + size_t len; + + len = sizeof(*cmd); + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_wow_nlo_config_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NLO_CONFIG_CMD, len); + + cmd->vdev_id = cpu_to_le32(vdev_id); + cmd->flags = cpu_to_le32(WMI_NLO_CONFIG_STOP); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "wmi tlv stop pno config vdev_id %d\n", vdev_id); + return skb; +} + +int ath12k_wmi_wow_config_pno(struct ath12k *ar, u32 vdev_id, + struct wmi_pno_scan_req_arg *pno_scan) +{ + struct sk_buff *skb; + + if (pno_scan->enable) + skb = ath12k_wmi_op_gen_config_pno_start(ar, vdev_id, pno_scan); + else + skb = ath12k_wmi_op_gen_config_pno_stop(ar, vdev_id); + + if (IS_ERR_OR_NULL(skb)) + return -ENOMEM; + + return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID); +} + +static void ath12k_wmi_fill_ns_offload(struct ath12k *ar, + struct wmi_arp_ns_offload_arg *offload, + void **ptr, + bool enable, + bool ext) +{ + struct wmi_ns_offload_params *ns; + struct wmi_tlv *tlv; + void *buf_ptr = *ptr; + u32 ns_cnt, ns_ext_tuples; + int i, max_offloads; + + ns_cnt = offload->ipv6_count; + + tlv = buf_ptr; + + if (ext) { + ns_ext_tuples = offload->ipv6_count - WMI_MAX_NS_OFFLOADS; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, + ns_ext_tuples * sizeof(*ns)); + i = WMI_MAX_NS_OFFLOADS; + max_offloads = offload->ipv6_count; + } else { + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, + WMI_MAX_NS_OFFLOADS * sizeof(*ns)); + i = 0; + max_offloads = WMI_MAX_NS_OFFLOADS; + } + + buf_ptr += sizeof(*tlv); + + for (; i < max_offloads; i++) { + ns = buf_ptr; + ns->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_NS_OFFLOAD_TUPLE, + sizeof(*ns)); + + if (enable) { + if (i < ns_cnt) + ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_VALID); + + memcpy(ns->target_ipaddr[0], offload->ipv6_addr[i], 16); + memcpy(ns->solicitation_ipaddr, offload->self_ipv6_addr[i], 16); + + if (offload->ipv6_type[i]) + ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_IS_IPV6_ANYCAST); + + memcpy(ns->target_mac.addr, offload->mac_addr, ETH_ALEN); + + if (!is_zero_ether_addr(ns->target_mac.addr)) + ns->flags |= cpu_to_le32(WMI_NSOL_FLAGS_MAC_VALID); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "wmi index %d ns_solicited %pI6 target %pI6", + i, ns->solicitation_ipaddr, + ns->target_ipaddr[0]); + } + + buf_ptr += sizeof(*ns); + } + + *ptr = buf_ptr; +} + +static void ath12k_wmi_fill_arp_offload(struct ath12k *ar, + struct wmi_arp_ns_offload_arg *offload, + void **ptr, + bool enable) +{ + struct wmi_arp_offload_params *arp; + struct wmi_tlv *tlv; + void *buf_ptr = *ptr; + int i; + + /* fill arp tuple */ + tlv = buf_ptr; + tlv->header = ath12k_wmi_tlv_hdr(WMI_TAG_ARRAY_STRUCT, + WMI_MAX_ARP_OFFLOADS * sizeof(*arp)); + buf_ptr += sizeof(*tlv); + + for (i = 0; i < WMI_MAX_ARP_OFFLOADS; i++) { + arp = buf_ptr; + arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_ARP_OFFLOAD_TUPLE, + sizeof(*arp)); + + if (enable && i < offload->ipv4_count) { + /* Copy the target ip addr and flags */ + arp->flags = cpu_to_le32(WMI_ARPOL_FLAGS_VALID); + memcpy(arp->target_ipaddr, offload->ipv4_addr[i], 4); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "wmi arp offload address %pI4", + arp->target_ipaddr); + } + + buf_ptr += sizeof(*arp); + } + + *ptr = buf_ptr; +} + +int ath12k_wmi_arp_ns_offload(struct ath12k *ar, + struct ath12k_vif *arvif, + struct wmi_arp_ns_offload_arg *offload, + bool enable) +{ + struct wmi_set_arp_ns_offload_cmd *cmd; + struct wmi_tlv *tlv; + struct sk_buff *skb; + void *buf_ptr; + size_t len; + u8 ns_cnt, ns_ext_tuples = 0; + + ns_cnt = offload->ipv6_count; + + len = sizeof(*cmd) + + sizeof(*tlv) + + WMI_MAX_NS_OFFLOADS * sizeof(struct wmi_ns_offload_params) + + sizeof(*tlv) + + WMI_MAX_ARP_OFFLOADS * sizeof(struct wmi_arp_offload_params); + + if (ns_cnt > WMI_MAX_NS_OFFLOADS) { + ns_ext_tuples = ns_cnt - WMI_MAX_NS_OFFLOADS; + len += sizeof(*tlv) + + ns_ext_tuples * sizeof(struct wmi_ns_offload_params); + } + + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + buf_ptr = skb->data; + cmd = buf_ptr; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_SET_ARP_NS_OFFLOAD_CMD, + sizeof(*cmd)); + cmd->flags = cpu_to_le32(0); + cmd->vdev_id = cpu_to_le32(arvif->vdev_id); + cmd->num_ns_ext_tuples = cpu_to_le32(ns_ext_tuples); + + buf_ptr += sizeof(*cmd); + + ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 0); + ath12k_wmi_fill_arp_offload(ar, offload, &buf_ptr, enable); + + if (ns_ext_tuples) + ath12k_wmi_fill_ns_offload(ar, offload, &buf_ptr, enable, 1); + + return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_SET_ARP_NS_OFFLOAD_CMDID); +} + +int ath12k_wmi_gtk_rekey_offload(struct ath12k *ar, + struct ath12k_vif *arvif, bool enable) +{ + struct ath12k_rekey_data *rekey_data = &arvif->rekey_data; + struct wmi_gtk_rekey_offload_cmd *cmd; + struct sk_buff *skb; + __le64 replay_ctr; + int len; + + len = sizeof(*cmd); + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd)); + cmd->vdev_id = cpu_to_le32(arvif->vdev_id); + + if (enable) { + cmd->flags = cpu_to_le32(GTK_OFFLOAD_ENABLE_OPCODE); + + /* the length in rekey_data and cmd is equal */ + memcpy(cmd->kck, rekey_data->kck, sizeof(cmd->kck)); + memcpy(cmd->kek, rekey_data->kek, sizeof(cmd->kek)); + + replay_ctr = cpu_to_le64(rekey_data->replay_ctr); + memcpy(cmd->replay_ctr, &replay_ctr, + sizeof(replay_ctr)); + } else { + cmd->flags = cpu_to_le32(GTK_OFFLOAD_DISABLE_OPCODE); + } + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "offload gtk rekey vdev: %d %d\n", + arvif->vdev_id, enable); + return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID); +} + +int ath12k_wmi_gtk_rekey_getinfo(struct ath12k *ar, + struct ath12k_vif *arvif) +{ + struct wmi_gtk_rekey_offload_cmd *cmd; + struct sk_buff *skb; + int len; + + len = sizeof(*cmd); + skb = ath12k_wmi_alloc_skb(ar->wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_gtk_rekey_offload_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_GTK_OFFLOAD_CMD, sizeof(*cmd)); + cmd->vdev_id = cpu_to_le32(arvif->vdev_id); + cmd->flags = cpu_to_le32(GTK_OFFLOAD_REQUEST_STATUS_OPCODE); + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, "get gtk rekey vdev_id: %d\n", + arvif->vdev_id); + return ath12k_wmi_cmd_send(ar->wmi, skb, WMI_GTK_OFFLOAD_CMDID); +} + +int ath12k_wmi_sta_keepalive(struct ath12k *ar, + const struct wmi_sta_keepalive_arg *arg) +{ + struct wmi_sta_keepalive_arp_resp_params *arp; + struct ath12k_wmi_pdev *wmi = ar->wmi; + struct wmi_sta_keepalive_cmd *cmd; + struct sk_buff *skb; + size_t len; + + len = sizeof(*cmd) + sizeof(*arp); + skb = ath12k_wmi_alloc_skb(wmi->wmi_ab, len); + if (!skb) + return -ENOMEM; + + cmd = (struct wmi_sta_keepalive_cmd *)skb->data; + cmd->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALIVE_CMD, sizeof(*cmd)); + cmd->vdev_id = cpu_to_le32(arg->vdev_id); + cmd->enabled = cpu_to_le32(arg->enabled); + cmd->interval = cpu_to_le32(arg->interval); + cmd->method = cpu_to_le32(arg->method); + + arp = (struct wmi_sta_keepalive_arp_resp_params *)(cmd + 1); + arp->tlv_header = ath12k_wmi_tlv_cmd_hdr(WMI_TAG_STA_KEEPALVE_ARP_RESPONSE, + sizeof(*arp)); + if (arg->method == WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE || + arg->method == WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST) { + arp->src_ip4_addr = cpu_to_le32(arg->src_ip4_addr); + arp->dest_ip4_addr = cpu_to_le32(arg->dest_ip4_addr); + ether_addr_copy(arp->dest_mac_addr.addr, arg->dest_mac_addr); + } + + ath12k_dbg(ar->ab, ATH12K_DBG_WMI, + "wmi sta keepalive vdev %d enabled %d method %d interval %d\n", + arg->vdev_id, arg->enabled, arg->method, arg->interval); + + return ath12k_wmi_cmd_send(wmi, skb, WMI_STA_KEEPALIVE_CMDID); +} diff --git a/drivers/net/wireless/ath/ath12k/wmi.h b/drivers/net/wireless/ath/ath12k/wmi.h index 496866673aea..f1f52175a52b 100644 --- a/drivers/net/wireless/ath/ath12k/wmi.h +++ b/drivers/net/wireless/ath/ath12k/wmi.h @@ -24,6 +24,7 @@ struct ath12k_base; struct ath12k; +struct ath12k_vif; /* There is no signed version of __le32, so for a temporary solution come * up with our own version. The idea is from fs/ntfs/endian.h. @@ -2154,6 +2155,7 @@ enum wmi_tlv_service { WMI_TLV_SERVICE_PER_PEER_HTT_STATS_RESET = 213, WMI_TLV_SERVICE_FREQINFO_IN_METADATA = 219, WMI_TLV_SERVICE_EXT2_MSG = 220, + WMI_TLV_SERVICE_MBSS_PARAM_IN_VDEV_START_SUPPORT = 253, WMI_MAX_EXT_SERVICE = 256, @@ -2292,6 +2294,13 @@ struct ath12k_wmi_host_mem_chunk_arg { u32 req_id; }; +enum ath12k_peer_metadata_version { + ATH12K_PEER_METADATA_V0, + ATH12K_PEER_METADATA_V1, + ATH12K_PEER_METADATA_V1A, + ATH12K_PEER_METADATA_V1B +}; + struct ath12k_wmi_resource_config_arg { u32 num_vdevs; u32 num_peers; @@ -2354,8 +2363,10 @@ struct ath12k_wmi_resource_config_arg { u32 sched_params; u32 twt_ap_pdev_count; u32 twt_ap_sta_count; + enum ath12k_peer_metadata_version peer_metadata_ver; + u32 ema_max_vap_cnt; + u32 ema_max_profile_period; bool is_reg_cc_ext_event_supported; - u8 dp_peer_meta_data_ver; }; struct ath12k_wmi_init_cmd_arg { @@ -2410,6 +2421,7 @@ struct wmi_init_cmd { #define WMI_RSRC_CFG_HOST_SVC_FLAG_REG_CC_EXT_SUPPORT_BIT 4 #define WMI_RSRC_CFG_FLAGS2_RX_PEER_METADATA_VERSION GENMASK(5, 4) #define WMI_RSRC_CFG_FLAG1_BSS_CHANNEL_INFO_64 BIT(5) +#define WMI_RSRC_CFG_FLAGS2_CALC_NEXT_DTIM_COUNT_SET BIT(9) struct ath12k_wmi_resource_config_params { __le32 tlv_header; @@ -2726,6 +2738,8 @@ struct ath12k_wmi_vdev_create_arg { } chains[NUM_NL80211_BANDS]; u32 pdev_id; u8 if_stats_id; + u32 mbssid_flags; + u32 mbssid_tx_vdev_id; }; #define ATH12K_MAX_VDEV_STATS_ID 0x30 @@ -2757,14 +2771,23 @@ struct wmi_vdev_delete_cmd { __le32 vdev_id; } __packed; +struct ath12k_wmi_vdev_up_params { + u32 vdev_id; + u32 aid; + const u8 *bssid; + const u8 *tx_bssid; + u32 nontx_profile_idx; + u32 nontx_profile_cnt; +}; + struct wmi_vdev_up_cmd { __le32 tlv_header; __le32 vdev_id; __le32 vdev_assoc_id; struct ath12k_wmi_mac_addr_params vdev_bssid; - struct ath12k_wmi_mac_addr_params trans_bssid; - __le32 profile_idx; - __le32 profile_num; + struct ath12k_wmi_mac_addr_params tx_vdev_bssid; + __le32 nontx_profile_idx; + __le32 nontx_profile_cnt; } __packed; struct wmi_vdev_stop_cmd { @@ -2792,6 +2815,10 @@ struct ath12k_wmi_ssid_params { enum wmi_vdev_mbssid_flags { WMI_VDEV_MBSSID_FLAGS_NON_MBSSID_AP = BIT(0), + WMI_VDEV_MBSSID_FLAGS_TRANSMIT_AP = BIT(1), + WMI_VDEV_MBSSID_FLAGS_NON_TRANSMIT_AP = BIT(2), + WMI_VDEV_MBSSID_FLAGS_EMA_MODE = BIT(3), + WMI_VDEV_MBSSID_FLAGS_SCAN_MODE_VAP = BIT(4), }; struct wmi_vdev_start_request_cmd { @@ -3514,6 +3541,16 @@ struct ath12k_wmi_p2p_noa_info { #define WMI_BEACON_TX_BUFFER_SIZE 512 +#define WMI_EMA_BEACON_CNT GENMASK(7, 0) +#define WMI_EMA_BEACON_IDX GENMASK(15, 8) +#define WMI_EMA_BEACON_FIRST GENMASK(23, 16) +#define WMI_EMA_BEACON_LAST GENMASK(31, 24) + +struct ath12k_wmi_bcn_tmpl_ema_arg { + u8 bcn_cnt; + u8 bcn_index; +}; + struct wmi_bcn_tmpl_cmd { __le32 tlv_header; __le32 vdev_id; @@ -3524,6 +3561,11 @@ struct wmi_bcn_tmpl_cmd { __le32 csa_event_bitmap; __le32 mbssid_ie_offset; __le32 esp_ie_offset; + __le32 csc_switch_count_offset; + __le32 csc_event_bitmap; + __le32 mu_edca_ie_offset; + __le32 feature_enable_bitmap; + __le32 ema_params; } __packed; struct wmi_p2p_go_set_beacon_ie_cmd { @@ -4770,7 +4812,7 @@ struct wmi_probe_tmpl_cmd { __le32 buf_len; } __packed; -#define MAX_RADIOS 3 +#define MAX_RADIOS 2 #define WMI_SERVICE_READY_TIMEOUT_HZ (5 * HZ) #define WMI_SEND_TIMEOUT_HZ (3 * HZ) @@ -4868,6 +4910,556 @@ struct wmi_twt_disable_event { __le32 status; } __packed; +/* WOW structures */ +enum wmi_wow_wakeup_event { + WOW_BMISS_EVENT = 0, + WOW_BETTER_AP_EVENT, + WOW_DEAUTH_RECVD_EVENT, + WOW_MAGIC_PKT_RECVD_EVENT, + WOW_GTK_ERR_EVENT, + WOW_FOURWAY_HSHAKE_EVENT, + WOW_EAPOL_RECVD_EVENT, + WOW_NLO_DETECTED_EVENT, + WOW_DISASSOC_RECVD_EVENT, + WOW_PATTERN_MATCH_EVENT, + WOW_CSA_IE_EVENT, + WOW_PROBE_REQ_WPS_IE_EVENT, + WOW_AUTH_REQ_EVENT, + WOW_ASSOC_REQ_EVENT, + WOW_HTT_EVENT, + WOW_RA_MATCH_EVENT, + WOW_HOST_AUTO_SHUTDOWN_EVENT, + WOW_IOAC_MAGIC_EVENT, + WOW_IOAC_SHORT_EVENT, + WOW_IOAC_EXTEND_EVENT, + WOW_IOAC_TIMER_EVENT, + WOW_DFS_PHYERR_RADAR_EVENT, + WOW_BEACON_EVENT, + WOW_CLIENT_KICKOUT_EVENT, + WOW_EVENT_MAX, +}; + +enum wmi_wow_interface_cfg { + WOW_IFACE_PAUSE_ENABLED, + WOW_IFACE_PAUSE_DISABLED +}; + +#define C2S(x) case x: return #x + +static inline const char *wow_wakeup_event(enum wmi_wow_wakeup_event ev) +{ + switch (ev) { + C2S(WOW_BMISS_EVENT); + C2S(WOW_BETTER_AP_EVENT); + C2S(WOW_DEAUTH_RECVD_EVENT); + C2S(WOW_MAGIC_PKT_RECVD_EVENT); + C2S(WOW_GTK_ERR_EVENT); + C2S(WOW_FOURWAY_HSHAKE_EVENT); + C2S(WOW_EAPOL_RECVD_EVENT); + C2S(WOW_NLO_DETECTED_EVENT); + C2S(WOW_DISASSOC_RECVD_EVENT); + C2S(WOW_PATTERN_MATCH_EVENT); + C2S(WOW_CSA_IE_EVENT); + C2S(WOW_PROBE_REQ_WPS_IE_EVENT); + C2S(WOW_AUTH_REQ_EVENT); + C2S(WOW_ASSOC_REQ_EVENT); + C2S(WOW_HTT_EVENT); + C2S(WOW_RA_MATCH_EVENT); + C2S(WOW_HOST_AUTO_SHUTDOWN_EVENT); + C2S(WOW_IOAC_MAGIC_EVENT); + C2S(WOW_IOAC_SHORT_EVENT); + C2S(WOW_IOAC_EXTEND_EVENT); + C2S(WOW_IOAC_TIMER_EVENT); + C2S(WOW_DFS_PHYERR_RADAR_EVENT); + C2S(WOW_BEACON_EVENT); + C2S(WOW_CLIENT_KICKOUT_EVENT); + C2S(WOW_EVENT_MAX); + default: + return NULL; + } +} + +enum wmi_wow_wake_reason { + WOW_REASON_UNSPECIFIED = -1, + WOW_REASON_NLOD = 0, + WOW_REASON_AP_ASSOC_LOST, + WOW_REASON_LOW_RSSI, + WOW_REASON_DEAUTH_RECVD, + WOW_REASON_DISASSOC_RECVD, + WOW_REASON_GTK_HS_ERR, + WOW_REASON_EAP_REQ, + WOW_REASON_FOURWAY_HS_RECV, + WOW_REASON_TIMER_INTR_RECV, + WOW_REASON_PATTERN_MATCH_FOUND, + WOW_REASON_RECV_MAGIC_PATTERN, + WOW_REASON_P2P_DISC, + WOW_REASON_WLAN_HB, + WOW_REASON_CSA_EVENT, + WOW_REASON_PROBE_REQ_WPS_IE_RECV, + WOW_REASON_AUTH_REQ_RECV, + WOW_REASON_ASSOC_REQ_RECV, + WOW_REASON_HTT_EVENT, + WOW_REASON_RA_MATCH, + WOW_REASON_HOST_AUTO_SHUTDOWN, + WOW_REASON_IOAC_MAGIC_EVENT, + WOW_REASON_IOAC_SHORT_EVENT, + WOW_REASON_IOAC_EXTEND_EVENT, + WOW_REASON_IOAC_TIMER_EVENT, + WOW_REASON_ROAM_HO, + WOW_REASON_DFS_PHYERR_RADADR_EVENT, + WOW_REASON_BEACON_RECV, + WOW_REASON_CLIENT_KICKOUT_EVENT, + WOW_REASON_PAGE_FAULT = 0x3a, + WOW_REASON_DEBUG_TEST = 0xFF, +}; + +static inline const char *wow_reason(enum wmi_wow_wake_reason reason) +{ + switch (reason) { + C2S(WOW_REASON_UNSPECIFIED); + C2S(WOW_REASON_NLOD); + C2S(WOW_REASON_AP_ASSOC_LOST); + C2S(WOW_REASON_LOW_RSSI); + C2S(WOW_REASON_DEAUTH_RECVD); + C2S(WOW_REASON_DISASSOC_RECVD); + C2S(WOW_REASON_GTK_HS_ERR); + C2S(WOW_REASON_EAP_REQ); + C2S(WOW_REASON_FOURWAY_HS_RECV); + C2S(WOW_REASON_TIMER_INTR_RECV); + C2S(WOW_REASON_PATTERN_MATCH_FOUND); + C2S(WOW_REASON_RECV_MAGIC_PATTERN); + C2S(WOW_REASON_P2P_DISC); + C2S(WOW_REASON_WLAN_HB); + C2S(WOW_REASON_CSA_EVENT); + C2S(WOW_REASON_PROBE_REQ_WPS_IE_RECV); + C2S(WOW_REASON_AUTH_REQ_RECV); + C2S(WOW_REASON_ASSOC_REQ_RECV); + C2S(WOW_REASON_HTT_EVENT); + C2S(WOW_REASON_RA_MATCH); + C2S(WOW_REASON_HOST_AUTO_SHUTDOWN); + C2S(WOW_REASON_IOAC_MAGIC_EVENT); + C2S(WOW_REASON_IOAC_SHORT_EVENT); + C2S(WOW_REASON_IOAC_EXTEND_EVENT); + C2S(WOW_REASON_IOAC_TIMER_EVENT); + C2S(WOW_REASON_ROAM_HO); + C2S(WOW_REASON_DFS_PHYERR_RADADR_EVENT); + C2S(WOW_REASON_BEACON_RECV); + C2S(WOW_REASON_CLIENT_KICKOUT_EVENT); + C2S(WOW_REASON_PAGE_FAULT); + C2S(WOW_REASON_DEBUG_TEST); + default: + return NULL; + } +} + +#undef C2S + +#define WOW_DEFAULT_BITMAP_PATTERN_SIZE 148 +#define WOW_DEFAULT_BITMASK_SIZE 148 + +#define WOW_MIN_PATTERN_SIZE 1 +#define WOW_MAX_PATTERN_SIZE 148 +#define WOW_MAX_PKT_OFFSET 128 +#define WOW_HDR_LEN (sizeof(struct ieee80211_hdr_3addr) + \ + sizeof(struct rfc1042_hdr)) +#define WOW_MAX_REDUCE (WOW_HDR_LEN - sizeof(struct ethhdr) - \ + offsetof(struct ieee80211_hdr_3addr, addr1)) + +struct wmi_wow_bitmap_pattern_params { + __le32 tlv_header; + u8 patternbuf[WOW_DEFAULT_BITMAP_PATTERN_SIZE]; + u8 bitmaskbuf[WOW_DEFAULT_BITMASK_SIZE]; + __le32 pattern_offset; + __le32 pattern_len; + __le32 bitmask_len; + __le32 pattern_id; +} __packed; + +struct wmi_wow_add_pattern_cmd { + __le32 tlv_header; + __le32 vdev_id; + __le32 pattern_id; + __le32 pattern_type; +} __packed; + +struct wmi_wow_del_pattern_cmd { + __le32 tlv_header; + __le32 vdev_id; + __le32 pattern_id; + __le32 pattern_type; +} __packed; + +enum wmi_tlv_pattern_type { + WOW_PATTERN_MIN = 0, + WOW_BITMAP_PATTERN = WOW_PATTERN_MIN, + WOW_IPV4_SYNC_PATTERN, + WOW_IPV6_SYNC_PATTERN, + WOW_WILD_CARD_PATTERN, + WOW_TIMER_PATTERN, + WOW_MAGIC_PATTERN, + WOW_IPV6_RA_PATTERN, + WOW_IOAC_PKT_PATTERN, + WOW_IOAC_TMR_PATTERN, + WOW_PATTERN_MAX +}; + +struct wmi_wow_add_del_event_cmd { + __le32 tlv_header; + __le32 vdev_id; + __le32 is_add; + __le32 event_bitmap; +} __packed; + +struct wmi_wow_enable_cmd { + __le32 tlv_header; + __le32 enable; + __le32 pause_iface_config; + __le32 flags; +} __packed; + +struct wmi_wow_host_wakeup_cmd { + __le32 tlv_header; + __le32 reserved; +} __packed; + +struct wmi_wow_ev_param { + __le32 vdev_id; + __le32 flag; + __le32 wake_reason; + __le32 data_len; +} __packed; + +struct wmi_wow_ev_pg_fault_param { + __le32 len; + u8 data[]; +} __packed; + +struct wmi_wow_ev_arg { + enum wmi_wow_wake_reason wake_reason; +}; + +#define WMI_PNO_MAX_SCHED_SCAN_PLANS 2 +#define WMI_PNO_MAX_SCHED_SCAN_PLAN_INT 7200 +#define WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS 100 +#define WMI_PNO_MAX_NETW_CHANNELS 26 +#define WMI_PNO_MAX_NETW_CHANNELS_EX 60 +#define WMI_PNO_MAX_SUPP_NETWORKS WLAN_SCAN_PARAMS_MAX_SSID +#define WMI_PNO_MAX_IE_LENGTH WLAN_SCAN_PARAMS_MAX_IE_LEN + +/* size based of dot11 declaration without extra IEs as we will not carry those for PNO */ +#define WMI_PNO_MAX_PB_REQ_SIZE 450 + +#define WMI_PNO_24GHZ_DEFAULT_CH 1 +#define WMI_PNO_5GHZ_DEFAULT_CH 36 + +#define WMI_ACTIVE_MAX_CHANNEL_TIME 40 +#define WMI_PASSIVE_MAX_CHANNEL_TIME 110 + +/* SSID broadcast type */ +enum wmi_ssid_bcast_type { + BCAST_UNKNOWN = 0, + BCAST_NORMAL = 1, + BCAST_HIDDEN = 2, +}; + +#define WMI_NLO_MAX_SSIDS 16 +#define WMI_NLO_MAX_CHAN 48 + +#define WMI_NLO_CONFIG_STOP BIT(0) +#define WMI_NLO_CONFIG_START BIT(1) +#define WMI_NLO_CONFIG_RESET BIT(2) +#define WMI_NLO_CONFIG_SLOW_SCAN BIT(4) +#define WMI_NLO_CONFIG_FAST_SCAN BIT(5) +#define WMI_NLO_CONFIG_SSID_HIDE_EN BIT(6) + +/* This bit is used to indicate if EPNO or supplicant PNO is enabled. + * Only one of them can be enabled at a given time + */ +#define WMI_NLO_CONFIG_ENLO BIT(7) +#define WMI_NLO_CONFIG_SCAN_PASSIVE BIT(8) +#define WMI_NLO_CONFIG_ENLO_RESET BIT(9) +#define WMI_NLO_CONFIG_SPOOFED_MAC_IN_PROBE_REQ BIT(10) +#define WMI_NLO_CONFIG_RANDOM_SEQ_NO_IN_PROBE_REQ BIT(11) +#define WMI_NLO_CONFIG_ENABLE_IE_WHITELIST_IN_PROBE_REQ BIT(12) +#define WMI_NLO_CONFIG_ENABLE_CNLO_RSSI_CONFIG BIT(13) + +struct wmi_nlo_ssid_params { + __le32 valid; + struct ath12k_wmi_ssid_params ssid; +} __packed; + +struct wmi_nlo_enc_params { + __le32 valid; + __le32 enc_type; +} __packed; + +struct wmi_nlo_auth_params { + __le32 valid; + __le32 auth_type; +} __packed; + +struct wmi_nlo_bcast_nw_params { + __le32 valid; + __le32 bcast_nw_type; +} __packed; + +struct wmi_nlo_rssi_params { + __le32 valid; + __le32 rssi; +} __packed; + +struct nlo_configured_params { + /* TLV tag and len;*/ + __le32 tlv_header; + struct wmi_nlo_ssid_params ssid; + struct wmi_nlo_enc_params enc_type; + struct wmi_nlo_auth_params auth_type; + struct wmi_nlo_rssi_params rssi_cond; + + /* indicates if the SSID is hidden or not */ + struct wmi_nlo_bcast_nw_params bcast_nw_type; +} __packed; + +struct wmi_network_type_arg { + struct cfg80211_ssid ssid; + u32 authentication; + u32 encryption; + u32 bcast_nw_type; + u8 channel_count; + u16 channels[WMI_PNO_MAX_NETW_CHANNELS_EX]; + s32 rssi_threshold; +}; + +struct wmi_pno_scan_req_arg { + u8 enable; + u8 vdev_id; + u8 uc_networks_count; + struct wmi_network_type_arg a_networks[WMI_PNO_MAX_SUPP_NETWORKS]; + u32 fast_scan_period; + u32 slow_scan_period; + u8 fast_scan_max_cycles; + + bool do_passive_scan; + + u32 delay_start_time; + u32 active_min_time; + u32 active_max_time; + u32 passive_min_time; + u32 passive_max_time; + + /* mac address randomization attributes */ + u32 enable_pno_scan_randomization; + u8 mac_addr[ETH_ALEN]; + u8 mac_addr_mask[ETH_ALEN]; +}; + +struct wmi_wow_nlo_config_cmd { + __le32 tlv_header; + __le32 flags; + __le32 vdev_id; + __le32 fast_scan_max_cycles; + __le32 active_dwell_time; + __le32 passive_dwell_time; + __le32 probe_bundle_size; + + /* ART = IRT */ + __le32 rest_time; + + /* max value that can be reached after scan_backoff_multiplier */ + __le32 max_rest_time; + + __le32 scan_backoff_multiplier; + __le32 fast_scan_period; + + /* specific to windows */ + __le32 slow_scan_period; + + __le32 no_of_ssids; + + __le32 num_of_channels; + + /* NLO scan start delay time in milliseconds */ + __le32 delay_start_time; + + /* MAC Address to use in Probe Req as SA */ + struct ath12k_wmi_mac_addr_params mac_addr; + + /* Mask on which MAC has to be randomized */ + struct ath12k_wmi_mac_addr_params mac_mask; + + /* IE bitmap to use in Probe Req */ + __le32 ie_bitmap[8]; + + /* Number of vendor OUIs. In the TLV vendor_oui[] */ + __le32 num_vendor_oui; + + /* Number of connected NLO band preferences */ + __le32 num_cnlo_band_pref; + + /* The TLVs will follow. + * nlo_configured_params nlo_list[]; + * u32 channel_list[num_of_channels]; + */ +} __packed; + +/* Definition of HW data filtering */ +enum hw_data_filter_type { + WMI_HW_DATA_FILTER_DROP_NON_ARP_BC = BIT(0), + WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC = BIT(1), +}; + +struct wmi_hw_data_filter_cmd { + __le32 tlv_header; + __le32 vdev_id; + __le32 enable; + __le32 hw_filter_bitmap; +} __packed; + +struct wmi_hw_data_filter_arg { + u32 vdev_id; + bool enable; + u32 hw_filter_bitmap; +}; + +#define WMI_IPV6_UC_TYPE 0 +#define WMI_IPV6_AC_TYPE 1 + +#define WMI_IPV6_MAX_COUNT 16 +#define WMI_IPV4_MAX_COUNT 2 + +struct wmi_arp_ns_offload_arg { + u8 ipv4_addr[WMI_IPV4_MAX_COUNT][4]; + u32 ipv4_count; + u32 ipv6_count; + u8 ipv6_addr[WMI_IPV6_MAX_COUNT][16]; + u8 self_ipv6_addr[WMI_IPV6_MAX_COUNT][16]; + u8 ipv6_type[WMI_IPV6_MAX_COUNT]; + bool ipv6_valid[WMI_IPV6_MAX_COUNT]; + u8 mac_addr[ETH_ALEN]; +}; + +#define WMI_MAX_NS_OFFLOADS 2 +#define WMI_MAX_ARP_OFFLOADS 2 + +#define WMI_ARPOL_FLAGS_VALID BIT(0) +#define WMI_ARPOL_FLAGS_MAC_VALID BIT(1) +#define WMI_ARPOL_FLAGS_REMOTE_IP_VALID BIT(2) + +struct wmi_arp_offload_params { + __le32 tlv_header; + __le32 flags; + u8 target_ipaddr[4]; + u8 remote_ipaddr[4]; + struct ath12k_wmi_mac_addr_params target_mac; +} __packed; + +#define WMI_NSOL_FLAGS_VALID BIT(0) +#define WMI_NSOL_FLAGS_MAC_VALID BIT(1) +#define WMI_NSOL_FLAGS_REMOTE_IP_VALID BIT(2) +#define WMI_NSOL_FLAGS_IS_IPV6_ANYCAST BIT(3) + +#define WMI_NSOL_MAX_TARGET_IPS 2 + +struct wmi_ns_offload_params { + __le32 tlv_header; + __le32 flags; + u8 target_ipaddr[WMI_NSOL_MAX_TARGET_IPS][16]; + u8 solicitation_ipaddr[16]; + u8 remote_ipaddr[16]; + struct ath12k_wmi_mac_addr_params target_mac; +} __packed; + +struct wmi_set_arp_ns_offload_cmd { + __le32 tlv_header; + __le32 flags; + __le32 vdev_id; + __le32 num_ns_ext_tuples; + /* The TLVs follow: + * wmi_ns_offload_params ns[WMI_MAX_NS_OFFLOADS]; + * wmi_arp_offload_params arp[WMI_MAX_ARP_OFFLOADS]; + * wmi_ns_offload_params ns_ext[num_ns_ext_tuples]; + */ +} __packed; + +#define GTK_OFFLOAD_OPCODE_MASK 0xFF000000 +#define GTK_OFFLOAD_ENABLE_OPCODE 0x01000000 +#define GTK_OFFLOAD_DISABLE_OPCODE 0x02000000 +#define GTK_OFFLOAD_REQUEST_STATUS_OPCODE 0x04000000 + +#define GTK_OFFLOAD_KEK_BYTES 16 +#define GTK_OFFLOAD_KCK_BYTES 16 +#define GTK_REPLAY_COUNTER_BYTES 8 +#define WMI_MAX_KEY_LEN 32 +#define IGTK_PN_SIZE 6 + +struct wmi_gtk_offload_status_event { + __le32 vdev_id; + __le32 flags; + __le32 refresh_cnt; + __le64 replay_ctr; + u8 igtk_key_index; + u8 igtk_key_length; + u8 igtk_key_rsc[IGTK_PN_SIZE]; + u8 igtk_key[WMI_MAX_KEY_LEN]; + u8 gtk_key_index; + u8 gtk_key_length; + u8 gtk_key_rsc[GTK_REPLAY_COUNTER_BYTES]; + u8 gtk_key[WMI_MAX_KEY_LEN]; +} __packed; + +struct wmi_gtk_rekey_offload_cmd { + __le32 tlv_header; + __le32 vdev_id; + __le32 flags; + u8 kek[GTK_OFFLOAD_KEK_BYTES]; + u8 kck[GTK_OFFLOAD_KCK_BYTES]; + u8 replay_ctr[GTK_REPLAY_COUNTER_BYTES]; +} __packed; + +struct wmi_sta_keepalive_cmd { + __le32 tlv_header; + __le32 vdev_id; + __le32 enabled; + + /* WMI_STA_KEEPALIVE_METHOD_ */ + __le32 method; + + /* in seconds */ + __le32 interval; + + /* following this structure is the TLV for struct + * wmi_sta_keepalive_arp_resp_params + */ +} __packed; + +struct wmi_sta_keepalive_arp_resp_params { + __le32 tlv_header; + __le32 src_ip4_addr; + __le32 dest_ip4_addr; + struct ath12k_wmi_mac_addr_params dest_mac_addr; +} __packed; + +struct wmi_sta_keepalive_arg { + u32 vdev_id; + u32 enabled; + u32 method; + u32 interval; + u32 src_ip4_addr; + u32 dest_ip4_addr; + const u8 dest_mac_addr[ETH_ALEN]; +}; + +enum wmi_sta_keepalive_method { + WMI_STA_KEEPALIVE_METHOD_NULL_FRAME = 1, + WMI_STA_KEEPALIVE_METHOD_UNSOLICITED_ARP_RESPONSE = 2, + WMI_STA_KEEPALIVE_METHOD_ETHERNET_LOOPBACK = 3, + WMI_STA_KEEPALIVE_METHOD_GRATUITOUS_ARP_REQUEST = 4, + WMI_STA_KEEPALIVE_METHOD_MGMT_VENDOR_ACTION = 5, +}; + +#define WMI_STA_KEEPALIVE_INTERVAL_DEFAULT 30 +#define WMI_STA_KEEPALIVE_INTERVAL_DISABLE 0 + void ath12k_wmi_init_qcn9274(struct ath12k_base *ab, struct ath12k_wmi_resource_config_arg *config); void ath12k_wmi_init_wcn7850(struct ath12k_base *ab, @@ -4881,10 +5473,10 @@ int ath12k_wmi_p2p_go_bcn_ie(struct ath12k *ar, u32 vdev_id, const u8 *p2p_ie); int ath12k_wmi_bcn_tmpl(struct ath12k *ar, u32 vdev_id, struct ieee80211_mutable_offsets *offs, - struct sk_buff *bcn); + struct sk_buff *bcn, + struct ath12k_wmi_bcn_tmpl_ema_arg *ema_args); int ath12k_wmi_vdev_down(struct ath12k *ar, u8 vdev_id); -int ath12k_wmi_vdev_up(struct ath12k *ar, u32 vdev_id, u32 aid, - const u8 *bssid); +int ath12k_wmi_vdev_up(struct ath12k *ar, struct ath12k_wmi_vdev_up_params *params); int ath12k_wmi_vdev_stop(struct ath12k *ar, u8 vdev_id); int ath12k_wmi_vdev_start(struct ath12k *ar, struct wmi_vdev_start_req_arg *arg, bool restart); @@ -5020,4 +5612,28 @@ ath12k_wmi_mac_phy_get_hw_link_id(const struct ath12k_wmi_mac_phy_caps_params *p WMI_CAPS_PARAMS_HW_LINK_ID); } +int ath12k_wmi_wow_host_wakeup_ind(struct ath12k *ar); +int ath12k_wmi_wow_enable(struct ath12k *ar); +int ath12k_wmi_wow_del_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id); +int ath12k_wmi_wow_add_pattern(struct ath12k *ar, u32 vdev_id, u32 pattern_id, + const u8 *pattern, const u8 *mask, + int pattern_len, int pattern_offset); +int ath12k_wmi_wow_add_wakeup_event(struct ath12k *ar, u32 vdev_id, + enum wmi_wow_wakeup_event event, + u32 enable); +int ath12k_wmi_wow_config_pno(struct ath12k *ar, u32 vdev_id, + struct wmi_pno_scan_req_arg *pno_scan); +int ath12k_wmi_hw_data_filter_cmd(struct ath12k *ar, + struct wmi_hw_data_filter_arg *arg); +int ath12k_wmi_arp_ns_offload(struct ath12k *ar, + struct ath12k_vif *arvif, + struct wmi_arp_ns_offload_arg *offload, + bool enable); +int ath12k_wmi_gtk_rekey_offload(struct ath12k *ar, + struct ath12k_vif *arvif, bool enable); +int ath12k_wmi_gtk_rekey_getinfo(struct ath12k *ar, + struct ath12k_vif *arvif); +int ath12k_wmi_sta_keepalive(struct ath12k *ar, + const struct wmi_sta_keepalive_arg *arg); + #endif diff --git a/drivers/net/wireless/ath/ath12k/wow.c b/drivers/net/wireless/ath/ath12k/wow.c new file mode 100644 index 000000000000..bead19db2c9a --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/wow.c @@ -0,0 +1,1026 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include +#include +#include +#include +#include + +#include "mac.h" + +#include +#include "core.h" +#include "hif.h" +#include "debug.h" +#include "wmi.h" +#include "wow.h" + +static const struct wiphy_wowlan_support ath12k_wowlan_support = { + .flags = WIPHY_WOWLAN_DISCONNECT | + WIPHY_WOWLAN_MAGIC_PKT | + WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | + WIPHY_WOWLAN_GTK_REKEY_FAILURE, + .pattern_min_len = WOW_MIN_PATTERN_SIZE, + .pattern_max_len = WOW_MAX_PATTERN_SIZE, + .max_pkt_offset = WOW_MAX_PKT_OFFSET, +}; + +static inline bool ath12k_wow_is_p2p_vdev(struct ath12k_vif *arvif) +{ + return (arvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_DEVICE || + arvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_CLIENT || + arvif->vdev_subtype == WMI_VDEV_SUBTYPE_P2P_GO); +} + +int ath12k_wow_enable(struct ath12k *ar) +{ + struct ath12k_base *ab = ar->ab; + int i, ret; + + clear_bit(ATH12K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags); + + /* The firmware might be busy and it can not enter WoW immediately. + * In that case firmware notifies host with + * ATH12K_HTC_MSG_NACK_SUSPEND message, asking host to try again + * later. Per the firmware team there could be up to 10 loops. + */ + for (i = 0; i < ATH12K_WOW_RETRY_NUM; i++) { + reinit_completion(&ab->htc_suspend); + + ret = ath12k_wmi_wow_enable(ar); + if (ret) { + ath12k_warn(ab, "failed to issue wow enable: %d\n", ret); + return ret; + } + + ret = wait_for_completion_timeout(&ab->htc_suspend, 3 * HZ); + if (ret == 0) { + ath12k_warn(ab, + "timed out while waiting for htc suspend completion\n"); + return -ETIMEDOUT; + } + + if (test_bit(ATH12K_FLAG_HTC_SUSPEND_COMPLETE, &ab->dev_flags)) + /* success, suspend complete received */ + return 0; + + ath12k_warn(ab, "htc suspend not complete, retrying (try %d)\n", + i); + msleep(ATH12K_WOW_RETRY_WAIT_MS); + } + + ath12k_warn(ab, "htc suspend not complete, failing after %d tries\n", i); + + return -ETIMEDOUT; +} + +int ath12k_wow_wakeup(struct ath12k *ar) +{ + struct ath12k_base *ab = ar->ab; + int ret; + + reinit_completion(&ab->wow.wakeup_completed); + + ret = ath12k_wmi_wow_host_wakeup_ind(ar); + if (ret) { + ath12k_warn(ab, "failed to send wow wakeup indication: %d\n", + ret); + return ret; + } + + ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ); + if (ret == 0) { + ath12k_warn(ab, "timed out while waiting for wow wakeup completion\n"); + return -ETIMEDOUT; + } + + return 0; +} + +static int ath12k_wow_vif_cleanup(struct ath12k_vif *arvif) +{ + struct ath12k *ar = arvif->ar; + int i, ret; + + for (i = 0; i < WOW_EVENT_MAX; i++) { + ret = ath12k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 0); + if (ret) { + ath12k_warn(ar->ab, "failed to issue wow wakeup for event %s on vdev %i: %d\n", + wow_wakeup_event(i), arvif->vdev_id, ret); + return ret; + } + } + + for (i = 0; i < ar->wow.max_num_patterns; i++) { + ret = ath12k_wmi_wow_del_pattern(ar, arvif->vdev_id, i); + if (ret) { + ath12k_warn(ar->ab, "failed to delete wow pattern %d for vdev %i: %d\n", + i, arvif->vdev_id, ret); + return ret; + } + } + + return 0; +} + +static int ath12k_wow_cleanup(struct ath12k *ar) +{ + struct ath12k_vif *arvif; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + list_for_each_entry(arvif, &ar->arvifs, list) { + ret = ath12k_wow_vif_cleanup(arvif); + if (ret) { + ath12k_warn(ar->ab, "failed to clean wow wakeups on vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + } + + return 0; +} + +/* Convert a 802.3 format to a 802.11 format. + * +------------+-----------+--------+----------------+ + * 802.3: |dest mac(6B)|src mac(6B)|type(2B)| body... | + * +------------+-----------+--------+----------------+ + * |__ |_______ |____________ |________ + * | | | | + * +--+------------+----+-----------+---------------+-----------+ + * 802.11: |4B|dest mac(6B)| 6B |src mac(6B)| 8B |type(2B)| body... | + * +--+------------+----+-----------+---------------+-----------+ + */ +static void +ath12k_wow_convert_8023_to_80211(struct ath12k *ar, + const struct cfg80211_pkt_pattern *eth_pattern, + struct ath12k_pkt_pattern *i80211_pattern) +{ + size_t r1042_eth_ofs = offsetof(struct rfc1042_hdr, eth_type); + size_t a1_ofs = offsetof(struct ieee80211_hdr_3addr, addr1); + size_t a3_ofs = offsetof(struct ieee80211_hdr_3addr, addr3); + size_t i80211_hdr_len = sizeof(struct ieee80211_hdr_3addr); + size_t prot_ofs = offsetof(struct ethhdr, h_proto); + size_t src_ofs = offsetof(struct ethhdr, h_source); + u8 eth_bytemask[WOW_MAX_PATTERN_SIZE] = {}; + const u8 *eth_pat = eth_pattern->pattern; + size_t eth_pat_len = eth_pattern->pattern_len; + size_t eth_pkt_ofs = eth_pattern->pkt_offset; + u8 *bytemask = i80211_pattern->bytemask; + u8 *pat = i80211_pattern->pattern; + size_t pat_len = 0; + size_t pkt_ofs = 0; + size_t delta; + int i; + + /* convert bitmask to bytemask */ + for (i = 0; i < eth_pat_len; i++) + if (eth_pattern->mask[i / 8] & BIT(i % 8)) + eth_bytemask[i] = 0xff; + + if (eth_pkt_ofs < ETH_ALEN) { + pkt_ofs = eth_pkt_ofs + a1_ofs; + + if (size_add(eth_pkt_ofs, eth_pat_len) < ETH_ALEN) { + memcpy(pat, eth_pat, eth_pat_len); + memcpy(bytemask, eth_bytemask, eth_pat_len); + + pat_len = eth_pat_len; + } else if (eth_pkt_ofs + eth_pat_len < prot_ofs) { + memcpy(pat, eth_pat, ETH_ALEN - eth_pkt_ofs); + memcpy(bytemask, eth_bytemask, ETH_ALEN - eth_pkt_ofs); + + delta = eth_pkt_ofs + eth_pat_len - src_ofs; + memcpy(pat + a3_ofs - pkt_ofs, + eth_pat + ETH_ALEN - eth_pkt_ofs, + delta); + memcpy(bytemask + a3_ofs - pkt_ofs, + eth_bytemask + ETH_ALEN - eth_pkt_ofs, + delta); + + pat_len = a3_ofs - pkt_ofs + delta; + } else { + memcpy(pat, eth_pat, ETH_ALEN - eth_pkt_ofs); + memcpy(bytemask, eth_bytemask, ETH_ALEN - eth_pkt_ofs); + + memcpy(pat + a3_ofs - pkt_ofs, + eth_pat + ETH_ALEN - eth_pkt_ofs, + ETH_ALEN); + memcpy(bytemask + a3_ofs - pkt_ofs, + eth_bytemask + ETH_ALEN - eth_pkt_ofs, + ETH_ALEN); + + delta = eth_pkt_ofs + eth_pat_len - prot_ofs; + memcpy(pat + i80211_hdr_len + r1042_eth_ofs - pkt_ofs, + eth_pat + prot_ofs - eth_pkt_ofs, + delta); + memcpy(bytemask + i80211_hdr_len + r1042_eth_ofs - pkt_ofs, + eth_bytemask + prot_ofs - eth_pkt_ofs, + delta); + + pat_len = i80211_hdr_len + r1042_eth_ofs - pkt_ofs + delta; + } + } else if (eth_pkt_ofs < prot_ofs) { + pkt_ofs = eth_pkt_ofs - ETH_ALEN + a3_ofs; + + if (size_add(eth_pkt_ofs, eth_pat_len) < prot_ofs) { + memcpy(pat, eth_pat, eth_pat_len); + memcpy(bytemask, eth_bytemask, eth_pat_len); + + pat_len = eth_pat_len; + } else { + memcpy(pat, eth_pat, prot_ofs - eth_pkt_ofs); + memcpy(bytemask, eth_bytemask, prot_ofs - eth_pkt_ofs); + + delta = eth_pkt_ofs + eth_pat_len - prot_ofs; + memcpy(pat + i80211_hdr_len + r1042_eth_ofs - pkt_ofs, + eth_pat + prot_ofs - eth_pkt_ofs, + delta); + memcpy(bytemask + i80211_hdr_len + r1042_eth_ofs - pkt_ofs, + eth_bytemask + prot_ofs - eth_pkt_ofs, + delta); + + pat_len = i80211_hdr_len + r1042_eth_ofs - pkt_ofs + delta; + } + } else { + pkt_ofs = eth_pkt_ofs - prot_ofs + i80211_hdr_len + r1042_eth_ofs; + + memcpy(pat, eth_pat, eth_pat_len); + memcpy(bytemask, eth_bytemask, eth_pat_len); + + pat_len = eth_pat_len; + } + + i80211_pattern->pattern_len = pat_len; + i80211_pattern->pkt_offset = pkt_ofs; +} + +static int +ath12k_wow_pno_check_and_convert(struct ath12k *ar, u32 vdev_id, + const struct cfg80211_sched_scan_request *nd_config, + struct wmi_pno_scan_req_arg *pno) +{ + int i, j; + u8 ssid_len; + + pno->enable = 1; + pno->vdev_id = vdev_id; + pno->uc_networks_count = nd_config->n_match_sets; + + if (!pno->uc_networks_count || + pno->uc_networks_count > WMI_PNO_MAX_SUPP_NETWORKS) + return -EINVAL; + + if (nd_config->n_channels > WMI_PNO_MAX_NETW_CHANNELS_EX) + return -EINVAL; + + /* Filling per profile params */ + for (i = 0; i < pno->uc_networks_count; i++) { + ssid_len = nd_config->match_sets[i].ssid.ssid_len; + + if (ssid_len == 0 || ssid_len > 32) + return -EINVAL; + + pno->a_networks[i].ssid.ssid_len = ssid_len; + + memcpy(pno->a_networks[i].ssid.ssid, + nd_config->match_sets[i].ssid.ssid, + ssid_len); + pno->a_networks[i].authentication = 0; + pno->a_networks[i].encryption = 0; + pno->a_networks[i].bcast_nw_type = 0; + + /* Copying list of valid channel into request */ + pno->a_networks[i].channel_count = nd_config->n_channels; + pno->a_networks[i].rssi_threshold = nd_config->match_sets[i].rssi_thold; + + for (j = 0; j < nd_config->n_channels; j++) { + pno->a_networks[i].channels[j] = + nd_config->channels[j]->center_freq; + } + } + + /* set scan to passive if no SSIDs are specified in the request */ + if (nd_config->n_ssids == 0) + pno->do_passive_scan = true; + else + pno->do_passive_scan = false; + + for (i = 0; i < nd_config->n_ssids; i++) { + for (j = 0; j < pno->uc_networks_count; j++) { + if (pno->a_networks[j].ssid.ssid_len == + nd_config->ssids[i].ssid_len && + !memcmp(pno->a_networks[j].ssid.ssid, + nd_config->ssids[i].ssid, + pno->a_networks[j].ssid.ssid_len)) { + pno->a_networks[j].bcast_nw_type = BCAST_HIDDEN; + break; + } + } + } + + if (nd_config->n_scan_plans == 2) { + pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC; + pno->fast_scan_max_cycles = nd_config->scan_plans[0].iterations; + pno->slow_scan_period = + nd_config->scan_plans[1].interval * MSEC_PER_SEC; + } else if (nd_config->n_scan_plans == 1) { + pno->fast_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC; + pno->fast_scan_max_cycles = 1; + pno->slow_scan_period = nd_config->scan_plans[0].interval * MSEC_PER_SEC; + } else { + ath12k_warn(ar->ab, "Invalid number of PNO scan plans: %d", + nd_config->n_scan_plans); + } + + if (nd_config->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { + /* enable mac randomization */ + pno->enable_pno_scan_randomization = 1; + memcpy(pno->mac_addr, nd_config->mac_addr, ETH_ALEN); + memcpy(pno->mac_addr_mask, nd_config->mac_addr_mask, ETH_ALEN); + } + + pno->delay_start_time = nd_config->delay; + + /* Current FW does not support min-max range for dwell time */ + pno->active_max_time = WMI_ACTIVE_MAX_CHANNEL_TIME; + pno->passive_max_time = WMI_PASSIVE_MAX_CHANNEL_TIME; + + return 0; +} + +static int ath12k_wow_vif_set_wakeups(struct ath12k_vif *arvif, + struct cfg80211_wowlan *wowlan) +{ + const struct cfg80211_pkt_pattern *patterns = wowlan->patterns; + struct ath12k *ar = arvif->ar; + unsigned long wow_mask = 0; + int pattern_id = 0; + int ret, i; + + /* Setup requested WOW features */ + switch (arvif->vdev_type) { + case WMI_VDEV_TYPE_IBSS: + __set_bit(WOW_BEACON_EVENT, &wow_mask); + fallthrough; + case WMI_VDEV_TYPE_AP: + __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask); + __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask); + __set_bit(WOW_PROBE_REQ_WPS_IE_EVENT, &wow_mask); + __set_bit(WOW_AUTH_REQ_EVENT, &wow_mask); + __set_bit(WOW_ASSOC_REQ_EVENT, &wow_mask); + __set_bit(WOW_HTT_EVENT, &wow_mask); + __set_bit(WOW_RA_MATCH_EVENT, &wow_mask); + break; + case WMI_VDEV_TYPE_STA: + if (wowlan->disconnect) { + __set_bit(WOW_DEAUTH_RECVD_EVENT, &wow_mask); + __set_bit(WOW_DISASSOC_RECVD_EVENT, &wow_mask); + __set_bit(WOW_BMISS_EVENT, &wow_mask); + __set_bit(WOW_CSA_IE_EVENT, &wow_mask); + } + + if (wowlan->magic_pkt) + __set_bit(WOW_MAGIC_PKT_RECVD_EVENT, &wow_mask); + + if (wowlan->nd_config) { + struct wmi_pno_scan_req_arg *pno; + int ret; + + pno = kzalloc(sizeof(*pno), GFP_KERNEL); + if (!pno) + return -ENOMEM; + + ar->nlo_enabled = true; + + ret = ath12k_wow_pno_check_and_convert(ar, arvif->vdev_id, + wowlan->nd_config, pno); + if (!ret) { + ath12k_wmi_wow_config_pno(ar, arvif->vdev_id, pno); + __set_bit(WOW_NLO_DETECTED_EVENT, &wow_mask); + } + + kfree(pno); + } + break; + default: + break; + } + + for (i = 0; i < wowlan->n_patterns; i++) { + const struct cfg80211_pkt_pattern *eth_pattern = &patterns[i]; + struct ath12k_pkt_pattern new_pattern = {}; + + if (WARN_ON(eth_pattern->pattern_len > WOW_MAX_PATTERN_SIZE)) + return -EINVAL; + + if (ar->ab->wow.wmi_conf_rx_decap_mode == + ATH12K_HW_TXRX_NATIVE_WIFI) { + ath12k_wow_convert_8023_to_80211(ar, eth_pattern, + &new_pattern); + + if (WARN_ON(new_pattern.pattern_len > WOW_MAX_PATTERN_SIZE)) + return -EINVAL; + } else { + memcpy(new_pattern.pattern, eth_pattern->pattern, + eth_pattern->pattern_len); + + /* convert bitmask to bytemask */ + for (i = 0; i < eth_pattern->pattern_len; i++) + if (eth_pattern->mask[i / 8] & BIT(i % 8)) + new_pattern.bytemask[i] = 0xff; + + new_pattern.pattern_len = eth_pattern->pattern_len; + new_pattern.pkt_offset = eth_pattern->pkt_offset; + } + + ret = ath12k_wmi_wow_add_pattern(ar, arvif->vdev_id, + pattern_id, + new_pattern.pattern, + new_pattern.bytemask, + new_pattern.pattern_len, + new_pattern.pkt_offset); + if (ret) { + ath12k_warn(ar->ab, "failed to add pattern %i to vdev %i: %d\n", + pattern_id, + arvif->vdev_id, ret); + return ret; + } + + pattern_id++; + __set_bit(WOW_PATTERN_MATCH_EVENT, &wow_mask); + } + + for (i = 0; i < WOW_EVENT_MAX; i++) { + if (!test_bit(i, &wow_mask)) + continue; + ret = ath12k_wmi_wow_add_wakeup_event(ar, arvif->vdev_id, i, 1); + if (ret) { + ath12k_warn(ar->ab, "failed to enable wakeup event %s on vdev %i: %d\n", + wow_wakeup_event(i), arvif->vdev_id, ret); + return ret; + } + } + + return 0; +} + +static int ath12k_wow_set_wakeups(struct ath12k *ar, + struct cfg80211_wowlan *wowlan) +{ + struct ath12k_vif *arvif; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + list_for_each_entry(arvif, &ar->arvifs, list) { + if (ath12k_wow_is_p2p_vdev(arvif)) + continue; + ret = ath12k_wow_vif_set_wakeups(arvif, wowlan); + if (ret) { + ath12k_warn(ar->ab, "failed to set wow wakeups on vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + } + + return 0; +} + +static int ath12k_wow_vdev_clean_nlo(struct ath12k *ar, u32 vdev_id) +{ + struct wmi_pno_scan_req_arg *pno; + int ret; + + if (!ar->nlo_enabled) + return 0; + + pno = kzalloc(sizeof(*pno), GFP_KERNEL); + if (!pno) + return -ENOMEM; + + pno->enable = 0; + ret = ath12k_wmi_wow_config_pno(ar, vdev_id, pno); + if (ret) { + ath12k_warn(ar->ab, "failed to disable PNO: %d", ret); + goto out; + } + + ar->nlo_enabled = false; + +out: + kfree(pno); + return ret; +} + +static int ath12k_wow_vif_clean_nlo(struct ath12k_vif *arvif) +{ + struct ath12k *ar = arvif->ar; + + switch (arvif->vdev_type) { + case WMI_VDEV_TYPE_STA: + return ath12k_wow_vdev_clean_nlo(ar, arvif->vdev_id); + default: + return 0; + } +} + +static int ath12k_wow_nlo_cleanup(struct ath12k *ar) +{ + struct ath12k_vif *arvif; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + list_for_each_entry(arvif, &ar->arvifs, list) { + if (ath12k_wow_is_p2p_vdev(arvif)) + continue; + + ret = ath12k_wow_vif_clean_nlo(arvif); + if (ret) { + ath12k_warn(ar->ab, "failed to clean nlo settings on vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + } + + return 0; +} + +static int ath12k_wow_set_hw_filter(struct ath12k *ar) +{ + struct wmi_hw_data_filter_arg arg; + struct ath12k_vif *arvif; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + list_for_each_entry(arvif, &ar->arvifs, list) { + if (arvif->vdev_type != WMI_VDEV_TYPE_STA) + continue; + + arg.vdev_id = arvif->vdev_id; + arg.enable = true; + arg.hw_filter_bitmap = WMI_HW_DATA_FILTER_DROP_NON_ICMPV6_MC; + ret = ath12k_wmi_hw_data_filter_cmd(ar, &arg); + if (ret) { + ath12k_warn(ar->ab, "failed to set hw data filter on vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + } + + return 0; +} + +static int ath12k_wow_clear_hw_filter(struct ath12k *ar) +{ + struct wmi_hw_data_filter_arg arg; + struct ath12k_vif *arvif; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + list_for_each_entry(arvif, &ar->arvifs, list) { + if (arvif->vdev_type != WMI_VDEV_TYPE_STA) + continue; + + arg.vdev_id = arvif->vdev_id; + arg.enable = false; + arg.hw_filter_bitmap = 0; + ret = ath12k_wmi_hw_data_filter_cmd(ar, &arg); + + if (ret) { + ath12k_warn(ar->ab, "failed to clear hw data filter on vdev %i: %d\n", + arvif->vdev_id, ret); + return ret; + } + } + + return 0; +} + +static void ath12k_wow_generate_ns_mc_addr(struct ath12k_base *ab, + struct wmi_arp_ns_offload_arg *offload) +{ + int i; + + for (i = 0; i < offload->ipv6_count; i++) { + offload->self_ipv6_addr[i][0] = 0xff; + offload->self_ipv6_addr[i][1] = 0x02; + offload->self_ipv6_addr[i][11] = 0x01; + offload->self_ipv6_addr[i][12] = 0xff; + offload->self_ipv6_addr[i][13] = + offload->ipv6_addr[i][13]; + offload->self_ipv6_addr[i][14] = + offload->ipv6_addr[i][14]; + offload->self_ipv6_addr[i][15] = + offload->ipv6_addr[i][15]; + ath12k_dbg(ab, ATH12K_DBG_WOW, "NS solicited addr %pI6\n", + offload->self_ipv6_addr[i]); + } +} + +static void ath12k_wow_prepare_ns_offload(struct ath12k_vif *arvif, + struct wmi_arp_ns_offload_arg *offload) +{ + struct net_device *ndev = ieee80211_vif_to_wdev(arvif->vif)->netdev; + struct ath12k_base *ab = arvif->ar->ab; + struct inet6_ifaddr *ifa6; + struct ifacaddr6 *ifaca6; + struct inet6_dev *idev; + u32 count = 0, scope; + + if (!ndev) + return; + + idev = in6_dev_get(ndev); + if (!idev) + return; + + ath12k_dbg(ab, ATH12K_DBG_WOW, "wow prepare ns offload\n"); + + read_lock_bh(&idev->lock); + + /* get unicast address */ + list_for_each_entry(ifa6, &idev->addr_list, if_list) { + if (count >= WMI_IPV6_MAX_COUNT) + goto unlock; + + if (ifa6->flags & IFA_F_DADFAILED) + continue; + + scope = ipv6_addr_src_scope(&ifa6->addr); + if (scope != IPV6_ADDR_SCOPE_LINKLOCAL && + scope != IPV6_ADDR_SCOPE_GLOBAL) { + ath12k_dbg(ab, ATH12K_DBG_WOW, + "Unsupported ipv6 scope: %d\n", scope); + continue; + } + + memcpy(offload->ipv6_addr[count], &ifa6->addr.s6_addr, + sizeof(ifa6->addr.s6_addr)); + offload->ipv6_type[count] = WMI_IPV6_UC_TYPE; + ath12k_dbg(ab, ATH12K_DBG_WOW, "mac count %d ipv6 uc %pI6 scope %d\n", + count, offload->ipv6_addr[count], + scope); + count++; + } + + /* get anycast address */ + rcu_read_lock(); + + for (ifaca6 = rcu_dereference(idev->ac_list); ifaca6; + ifaca6 = rcu_dereference(ifaca6->aca_next)) { + if (count >= WMI_IPV6_MAX_COUNT) { + rcu_read_unlock(); + goto unlock; + } + + scope = ipv6_addr_src_scope(&ifaca6->aca_addr); + if (scope != IPV6_ADDR_SCOPE_LINKLOCAL && + scope != IPV6_ADDR_SCOPE_GLOBAL) { + ath12k_dbg(ab, ATH12K_DBG_WOW, + "Unsupported ipv scope: %d\n", scope); + continue; + } + + memcpy(offload->ipv6_addr[count], &ifaca6->aca_addr, + sizeof(ifaca6->aca_addr)); + offload->ipv6_type[count] = WMI_IPV6_AC_TYPE; + ath12k_dbg(ab, ATH12K_DBG_WOW, "mac count %d ipv6 ac %pI6 scope %d\n", + count, offload->ipv6_addr[count], + scope); + count++; + } + + rcu_read_unlock(); + +unlock: + read_unlock_bh(&idev->lock); + + in6_dev_put(idev); + + offload->ipv6_count = count; + ath12k_wow_generate_ns_mc_addr(ab, offload); +} + +static void ath12k_wow_prepare_arp_offload(struct ath12k_vif *arvif, + struct wmi_arp_ns_offload_arg *offload) +{ + struct ieee80211_vif *vif = arvif->vif; + struct ieee80211_vif_cfg vif_cfg = vif->cfg; + struct ath12k_base *ab = arvif->ar->ab; + u32 ipv4_cnt; + + ath12k_dbg(ab, ATH12K_DBG_WOW, "wow prepare arp offload\n"); + + ipv4_cnt = min(vif_cfg.arp_addr_cnt, WMI_IPV4_MAX_COUNT); + memcpy(offload->ipv4_addr, vif_cfg.arp_addr_list, ipv4_cnt * sizeof(u32)); + offload->ipv4_count = ipv4_cnt; + + ath12k_dbg(ab, ATH12K_DBG_WOW, + "wow arp_addr_cnt %d vif->addr %pM, offload_addr %pI4\n", + vif_cfg.arp_addr_cnt, vif->addr, offload->ipv4_addr); +} + +static int ath12k_wow_arp_ns_offload(struct ath12k *ar, bool enable) +{ + struct wmi_arp_ns_offload_arg *offload; + struct ath12k_vif *arvif; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + offload = kmalloc(sizeof(*offload), GFP_KERNEL); + if (!offload) + return -ENOMEM; + + list_for_each_entry(arvif, &ar->arvifs, list) { + if (arvif->vdev_type != WMI_VDEV_TYPE_STA) + continue; + + memset(offload, 0, sizeof(*offload)); + + memcpy(offload->mac_addr, arvif->vif->addr, ETH_ALEN); + ath12k_wow_prepare_ns_offload(arvif, offload); + ath12k_wow_prepare_arp_offload(arvif, offload); + + ret = ath12k_wmi_arp_ns_offload(ar, arvif, offload, enable); + if (ret) { + ath12k_warn(ar->ab, "failed to set arp ns offload vdev %i: enable %d, ret %d\n", + arvif->vdev_id, enable, ret); + return ret; + } + } + + kfree(offload); + + return 0; +} + +static int ath12k_gtk_rekey_offload(struct ath12k *ar, bool enable) +{ + struct ath12k_vif *arvif; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + list_for_each_entry(arvif, &ar->arvifs, list) { + if (arvif->vdev_type != WMI_VDEV_TYPE_STA || + !arvif->is_up || + !arvif->rekey_data.enable_offload) + continue; + + /* get rekey info before disable rekey offload */ + if (!enable) { + ret = ath12k_wmi_gtk_rekey_getinfo(ar, arvif); + if (ret) { + ath12k_warn(ar->ab, "failed to request rekey info vdev %i, ret %d\n", + arvif->vdev_id, ret); + return ret; + } + } + + ret = ath12k_wmi_gtk_rekey_offload(ar, arvif, enable); + + if (ret) { + ath12k_warn(ar->ab, "failed to offload gtk reky vdev %i: enable %d, ret %d\n", + arvif->vdev_id, enable, ret); + return ret; + } + } + + return 0; +} + +static int ath12k_wow_protocol_offload(struct ath12k *ar, bool enable) +{ + int ret; + + ret = ath12k_wow_arp_ns_offload(ar, enable); + if (ret) { + ath12k_warn(ar->ab, "failed to offload ARP and NS %d %d\n", + enable, ret); + return ret; + } + + ret = ath12k_gtk_rekey_offload(ar, enable); + if (ret) { + ath12k_warn(ar->ab, "failed to offload gtk rekey %d %d\n", + enable, ret); + return ret; + } + + return 0; +} + +static int ath12k_wow_set_keepalive(struct ath12k *ar, + enum wmi_sta_keepalive_method method, + u32 interval) +{ + struct ath12k_vif *arvif; + int ret; + + lockdep_assert_held(&ar->conf_mutex); + + list_for_each_entry(arvif, &ar->arvifs, list) { + ret = ath12k_mac_vif_set_keepalive(arvif, method, interval); + if (ret) + return ret; + } + + return 0; +} + +int ath12k_wow_op_suspend(struct ieee80211_hw *hw, + struct cfg80211_wowlan *wowlan) +{ + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); + struct ath12k *ar = ath12k_ah_to_ar(ah, 0); + int ret; + + mutex_lock(&ar->conf_mutex); + + ret = ath12k_wow_cleanup(ar); + if (ret) { + ath12k_warn(ar->ab, "failed to clear wow wakeup events: %d\n", + ret); + goto exit; + } + + ret = ath12k_wow_set_wakeups(ar, wowlan); + if (ret) { + ath12k_warn(ar->ab, "failed to set wow wakeup events: %d\n", + ret); + goto cleanup; + } + + ret = ath12k_wow_protocol_offload(ar, true); + if (ret) { + ath12k_warn(ar->ab, "failed to set wow protocol offload events: %d\n", + ret); + goto cleanup; + } + + ret = ath12k_mac_wait_tx_complete(ar); + if (ret) { + ath12k_warn(ar->ab, "failed to wait tx complete: %d\n", ret); + goto cleanup; + } + + ret = ath12k_wow_set_hw_filter(ar); + if (ret) { + ath12k_warn(ar->ab, "failed to set hw filter: %d\n", + ret); + goto cleanup; + } + + ret = ath12k_wow_set_keepalive(ar, + WMI_STA_KEEPALIVE_METHOD_NULL_FRAME, + WMI_STA_KEEPALIVE_INTERVAL_DEFAULT); + if (ret) { + ath12k_warn(ar->ab, "failed to enable wow keepalive: %d\n", ret); + goto cleanup; + } + + ret = ath12k_wow_enable(ar); + if (ret) { + ath12k_warn(ar->ab, "failed to start wow: %d\n", ret); + goto cleanup; + } + + ath12k_hif_irq_disable(ar->ab); + ath12k_hif_ce_irq_disable(ar->ab); + + ret = ath12k_hif_suspend(ar->ab); + if (ret) { + ath12k_warn(ar->ab, "failed to suspend hif: %d\n", ret); + goto wakeup; + } + + goto exit; + +wakeup: + ath12k_wow_wakeup(ar); + +cleanup: + ath12k_wow_cleanup(ar); + +exit: + mutex_unlock(&ar->conf_mutex); + return ret ? 1 : 0; +} + +void ath12k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled) +{ + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); + struct ath12k *ar = ath12k_ah_to_ar(ah, 0); + + mutex_lock(&ar->conf_mutex); + device_set_wakeup_enable(ar->ab->dev, enabled); + mutex_unlock(&ar->conf_mutex); +} + +int ath12k_wow_op_resume(struct ieee80211_hw *hw) +{ + struct ath12k_hw *ah = ath12k_hw_to_ah(hw); + struct ath12k *ar = ath12k_ah_to_ar(ah, 0); + int ret; + + mutex_lock(&ar->conf_mutex); + + ret = ath12k_hif_resume(ar->ab); + if (ret) { + ath12k_warn(ar->ab, "failed to resume hif: %d\n", ret); + goto exit; + } + + ath12k_hif_ce_irq_enable(ar->ab); + ath12k_hif_irq_enable(ar->ab); + + ret = ath12k_wow_wakeup(ar); + if (ret) { + ath12k_warn(ar->ab, "failed to wakeup from wow: %d\n", ret); + goto exit; + } + + ret = ath12k_wow_nlo_cleanup(ar); + if (ret) { + ath12k_warn(ar->ab, "failed to cleanup nlo: %d\n", ret); + goto exit; + } + + ret = ath12k_wow_clear_hw_filter(ar); + if (ret) { + ath12k_warn(ar->ab, "failed to clear hw filter: %d\n", ret); + goto exit; + } + + ret = ath12k_wow_protocol_offload(ar, false); + if (ret) { + ath12k_warn(ar->ab, "failed to clear wow protocol offload events: %d\n", + ret); + goto exit; + } + + ret = ath12k_wow_set_keepalive(ar, + WMI_STA_KEEPALIVE_METHOD_NULL_FRAME, + WMI_STA_KEEPALIVE_INTERVAL_DISABLE); + if (ret) { + ath12k_warn(ar->ab, "failed to disable wow keepalive: %d\n", ret); + goto exit; + } + +exit: + if (ret) { + switch (ah->state) { + case ATH12K_HW_STATE_ON: + ah->state = ATH12K_HW_STATE_RESTARTING; + ret = 1; + break; + case ATH12K_HW_STATE_OFF: + case ATH12K_HW_STATE_RESTARTING: + case ATH12K_HW_STATE_RESTARTED: + case ATH12K_HW_STATE_WEDGED: + ath12k_warn(ar->ab, "encountered unexpected device state %d on resume, cannot recover\n", + ah->state); + ret = -EIO; + break; + } + } + + mutex_unlock(&ar->conf_mutex); + return ret; +} + +int ath12k_wow_init(struct ath12k *ar) +{ + if (!test_bit(WMI_TLV_SERVICE_WOW, ar->wmi->wmi_ab->svc_map)) + return 0; + + ar->wow.wowlan_support = ath12k_wowlan_support; + + if (ar->ab->wow.wmi_conf_rx_decap_mode == ATH12K_HW_TXRX_NATIVE_WIFI) { + ar->wow.wowlan_support.pattern_max_len -= WOW_MAX_REDUCE; + ar->wow.wowlan_support.max_pkt_offset -= WOW_MAX_REDUCE; + } + + if (test_bit(WMI_TLV_SERVICE_NLO, ar->wmi->wmi_ab->svc_map)) { + ar->wow.wowlan_support.flags |= WIPHY_WOWLAN_NET_DETECT; + ar->wow.wowlan_support.max_nd_match_sets = WMI_PNO_MAX_SUPP_NETWORKS; + } + + ar->wow.max_num_patterns = ATH12K_WOW_PATTERNS; + ar->wow.wowlan_support.n_patterns = ar->wow.max_num_patterns; + ar->ah->hw->wiphy->wowlan = &ar->wow.wowlan_support; + + device_set_wakeup_capable(ar->ab->dev, true); + + return 0; +} diff --git a/drivers/net/wireless/ath/ath12k/wow.h b/drivers/net/wireless/ath/ath12k/wow.h new file mode 100644 index 000000000000..af9be5fadcc3 --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/wow.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2020 The Linux Foundation. All rights reserved. + * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef ATH12K_WOW_H +#define ATH12K_WOW_H + +#define ATH12K_WOW_RETRY_NUM 10 +#define ATH12K_WOW_RETRY_WAIT_MS 200 +#define ATH12K_WOW_PATTERNS 22 + +struct ath12k_wow { + u32 max_num_patterns; + struct completion wakeup_completed; + struct wiphy_wowlan_support wowlan_support; +}; + +struct ath12k_pkt_pattern { + u8 pattern[WOW_MAX_PATTERN_SIZE]; + u8 bytemask[WOW_MAX_PATTERN_SIZE]; + int pattern_len; + int pkt_offset; +}; + +struct rfc1042_hdr { + u8 llc_dsap; + u8 llc_ssap; + u8 llc_ctrl; + u8 snap_oui[3]; + __be16 eth_type; +} __packed; + +#ifdef CONFIG_PM + +int ath12k_wow_init(struct ath12k *ar); +int ath12k_wow_op_suspend(struct ieee80211_hw *hw, + struct cfg80211_wowlan *wowlan); +int ath12k_wow_op_resume(struct ieee80211_hw *hw); +void ath12k_wow_op_set_wakeup(struct ieee80211_hw *hw, bool enabled); +int ath12k_wow_enable(struct ath12k *ar); +int ath12k_wow_wakeup(struct ath12k *ar); + +#else + +static inline int ath12k_wow_init(struct ath12k *ar) +{ + return 0; +} + +static inline int ath12k_wow_enable(struct ath12k *ar) +{ + return 0; +} + +static inline int ath12k_wow_wakeup(struct ath12k *ar) +{ + return 0; +} +#endif /* CONFIG_PM */ +#endif /* ATH12K_WOW_H */ diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c index 9f534ed2fbb3..abe41330fb69 100644 --- a/drivers/net/wireless/ath/ath5k/base.c +++ b/drivers/net/wireless/ath/ath5k/base.c @@ -2847,7 +2847,7 @@ static void ath5k_stop_tasklets(struct ath5k_hw *ah) * if another thread does a system call and the thread doing the * stop is preempted). */ -void ath5k_stop(struct ieee80211_hw *hw) +void ath5k_stop(struct ieee80211_hw *hw, bool suspend) { struct ath5k_hw *ah = hw->priv; int ret; diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h index 97469d0fbad7..594e5b945cb7 100644 --- a/drivers/net/wireless/ath/ath5k/base.h +++ b/drivers/net/wireless/ath/ath5k/base.h @@ -92,7 +92,7 @@ void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif); bool ath5k_any_vif_assoc(struct ath5k_hw *ah); int ath5k_start(struct ieee80211_hw *hw); -void ath5k_stop(struct ieee80211_hw *hw); +void ath5k_stop(struct ieee80211_hw *hw, bool suspend); void ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf); int ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif); diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c index b389e19381c4..8a03bcc2789e 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c @@ -973,7 +973,7 @@ static int ath9k_htc_start(struct ieee80211_hw *hw) return ret; } -static void ath9k_htc_stop(struct ieee80211_hw *hw) +static void ath9k_htc_stop(struct ieee80211_hw *hw, bool suspend) { struct ath9k_htc_priv *priv = hw->priv; struct ath_hw *ah = priv->ah; diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 01173aac3045..b92c89dad8de 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -895,7 +895,7 @@ static void ath9k_pending_key_del(struct ath_softc *sc, u8 keyix) ath_key_delete(common, keyix); } -static void ath9k_stop(struct ieee80211_hw *hw) +static void ath9k_stop(struct ieee80211_hw *hw, bool suspend) { struct ath_softc *sc = hw->priv; struct ath_hw *ah = sc->sc_ah; diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c index 7e7797bf44b7..755c068e4197 100644 --- a/drivers/net/wireless/ath/carl9170/main.c +++ b/drivers/net/wireless/ath/carl9170/main.c @@ -439,7 +439,7 @@ static void carl9170_cancel_worker(struct ar9170 *ar) cancel_work_sync(&ar->ampdu_work); } -static void carl9170_op_stop(struct ieee80211_hw *hw) +static void carl9170_op_stop(struct ieee80211_hw *hw, bool suspend) { struct ar9170 *ar = hw->priv; diff --git a/drivers/net/wireless/ath/wcn36xx/main.c b/drivers/net/wireless/ath/wcn36xx/main.c index e760d8002e09..408776562a7e 100644 --- a/drivers/net/wireless/ath/wcn36xx/main.c +++ b/drivers/net/wireless/ath/wcn36xx/main.c @@ -278,7 +278,7 @@ out_err: return ret; } -static void wcn36xx_stop(struct ieee80211_hw *hw) +static void wcn36xx_stop(struct ieee80211_hw *hw, bool suspend) { struct wcn36xx *wcn = hw->priv; diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c index ee7d7e9c2718..d5d364683c0e 100644 --- a/drivers/net/wireless/ath/wil6210/netdev.c +++ b/drivers/net/wireless/ath/wil6210/netdev.c @@ -453,16 +453,21 @@ int wil_if_add(struct wil6210_priv *wil) return rc; } - init_dummy_netdev(&wil->napi_ndev); + wil->napi_ndev = alloc_netdev_dummy(0); + if (!wil->napi_ndev) { + wil_err(wil, "failed to allocate dummy netdev"); + rc = -ENOMEM; + goto out_wiphy; + } if (wil->use_enhanced_dma_hw) { - netif_napi_add(&wil->napi_ndev, &wil->napi_rx, + netif_napi_add(wil->napi_ndev, &wil->napi_rx, wil6210_netdev_poll_rx_edma); - netif_napi_add_tx(&wil->napi_ndev, + netif_napi_add_tx(wil->napi_ndev, &wil->napi_tx, wil6210_netdev_poll_tx_edma); } else { - netif_napi_add(&wil->napi_ndev, &wil->napi_rx, + netif_napi_add(wil->napi_ndev, &wil->napi_rx, wil6210_netdev_poll_rx); - netif_napi_add_tx(&wil->napi_ndev, + netif_napi_add_tx(wil->napi_ndev, &wil->napi_tx, wil6210_netdev_poll_tx); } @@ -474,10 +479,12 @@ int wil_if_add(struct wil6210_priv *wil) wiphy_unlock(wiphy); rtnl_unlock(); if (rc < 0) - goto out_wiphy; + goto free_dummy; return 0; +free_dummy: + free_netdev(wil->napi_ndev); out_wiphy: wiphy_unregister(wiphy); return rc; @@ -554,5 +561,7 @@ void wil_if_remove(struct wil6210_priv *wil) netif_napi_del(&wil->napi_tx); netif_napi_del(&wil->napi_rx); + free_netdev(wil->napi_ndev); + wiphy_unregister(wiphy); } diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index 22a6eb3e12b7..9bd1286d2857 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -983,7 +983,7 @@ struct wil6210_priv { spinlock_t eap_lock; /* guarding access to eap rekey fields */ struct napi_struct napi_rx; struct napi_struct napi_tx; - struct net_device napi_ndev; /* dummy net_device serving all VIFs */ + struct net_device *napi_ndev; /* dummy net_device serving all VIFs */ /* DMA related */ struct wil_ring ring_rx; diff --git a/drivers/net/wireless/atmel/at76c50x-usb.c b/drivers/net/wireless/atmel/at76c50x-usb.c index 0b55a272bfd6..504e05ea30f2 100644 --- a/drivers/net/wireless/atmel/at76c50x-usb.c +++ b/drivers/net/wireless/atmel/at76c50x-usb.c @@ -332,7 +332,7 @@ static int at76_dfu_get_status(struct usb_device *udev, ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), DFU_GETSTATUS, USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE, - 0, 0, status, sizeof(struct dfu_status), + 0, 0, status, sizeof(*status), USB_CTRL_GET_TIMEOUT); return ret; } @@ -366,7 +366,7 @@ static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size, u32 dfu_timeout = 0; int bsize = 0; int blockno = 0; - struct dfu_status *dfu_stat_buf = NULL; + struct dfu_status *dfu_stat_buf; u8 *dfu_state = NULL; u8 *block = NULL; @@ -378,7 +378,7 @@ static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size, return -EINVAL; } - dfu_stat_buf = kmalloc(sizeof(struct dfu_status), GFP_KERNEL); + dfu_stat_buf = kmalloc(sizeof(*dfu_stat_buf), GFP_KERNEL); if (!dfu_stat_buf) { ret = -ENOMEM; goto exit; @@ -721,9 +721,11 @@ static int at76_set_card_command(struct usb_device *udev, u8 cmd, void *buf, int buf_size) { int ret; - struct at76_command *cmd_buf = kmalloc(sizeof(struct at76_command) + - buf_size, GFP_KERNEL); + size_t total_size; + struct at76_command *cmd_buf; + total_size = struct_size(cmd_buf, data, buf_size); + cmd_buf = kmalloc(total_size, GFP_KERNEL); if (!cmd_buf) return -ENOMEM; @@ -732,15 +734,13 @@ static int at76_set_card_command(struct usb_device *udev, u8 cmd, void *buf, cmd_buf->size = cpu_to_le16(buf_size); memcpy(cmd_buf->data, buf, buf_size); - at76_dbg_dump(DBG_CMD, cmd_buf, sizeof(struct at76_command) + buf_size, + at76_dbg_dump(DBG_CMD, cmd_buf, total_size, "issuing command %s (0x%02x)", at76_get_cmd_string(cmd), cmd); ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x0e, USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE, - 0, 0, cmd_buf, - sizeof(struct at76_command) + buf_size, - USB_CTRL_GET_TIMEOUT); + 0, 0, cmd_buf, total_size, USB_CTRL_GET_TIMEOUT); kfree(cmd_buf); return ret; } @@ -931,14 +931,12 @@ static void at76_dump_mib_mac_addr(struct at76_priv *priv) { int i; int ret; - struct mib_mac_addr *m = kmalloc(sizeof(struct mib_mac_addr), - GFP_KERNEL); + struct mib_mac_addr *m = kmalloc(sizeof(*m), GFP_KERNEL); if (!m) return; - ret = at76_get_mib(priv->udev, MIB_MAC_ADDR, m, - sizeof(struct mib_mac_addr)); + ret = at76_get_mib(priv->udev, MIB_MAC_ADDR, m, sizeof(*m)); if (ret < 0) { wiphy_err(priv->hw->wiphy, "at76_get_mib (MAC_ADDR) failed: %d\n", ret); @@ -961,13 +959,12 @@ static void at76_dump_mib_mac_wep(struct at76_priv *priv) int i; int ret; int key_len; - struct mib_mac_wep *m = kmalloc(sizeof(struct mib_mac_wep), GFP_KERNEL); + struct mib_mac_wep *m = kmalloc(sizeof(*m), GFP_KERNEL); if (!m) return; - ret = at76_get_mib(priv->udev, MIB_MAC_WEP, m, - sizeof(struct mib_mac_wep)); + ret = at76_get_mib(priv->udev, MIB_MAC_WEP, m, sizeof(*m)); if (ret < 0) { wiphy_err(priv->hw->wiphy, "at76_get_mib (MAC_WEP) failed: %d\n", ret); @@ -997,14 +994,12 @@ exit: static void at76_dump_mib_mac_mgmt(struct at76_priv *priv) { int ret; - struct mib_mac_mgmt *m = kmalloc(sizeof(struct mib_mac_mgmt), - GFP_KERNEL); + struct mib_mac_mgmt *m = kmalloc(sizeof(*m), GFP_KERNEL); if (!m) return; - ret = at76_get_mib(priv->udev, MIB_MAC_MGMT, m, - sizeof(struct mib_mac_mgmt)); + ret = at76_get_mib(priv->udev, MIB_MAC_MGMT, m, sizeof(*m)); if (ret < 0) { wiphy_err(priv->hw->wiphy, "at76_get_mib (MAC_MGMT) failed: %d\n", ret); @@ -1035,12 +1030,12 @@ exit: static void at76_dump_mib_mac(struct at76_priv *priv) { int ret; - struct mib_mac *m = kmalloc(sizeof(struct mib_mac), GFP_KERNEL); + struct mib_mac *m = kmalloc(sizeof(*m), GFP_KERNEL); if (!m) return; - ret = at76_get_mib(priv->udev, MIB_MAC, m, sizeof(struct mib_mac)); + ret = at76_get_mib(priv->udev, MIB_MAC, m, sizeof(*m)); if (ret < 0) { wiphy_err(priv->hw->wiphy, "at76_get_mib (MAC) failed: %d\n", ret); @@ -1072,12 +1067,12 @@ exit: static void at76_dump_mib_phy(struct at76_priv *priv) { int ret; - struct mib_phy *m = kmalloc(sizeof(struct mib_phy), GFP_KERNEL); + struct mib_phy *m = kmalloc(sizeof(*m), GFP_KERNEL); if (!m) return; - ret = at76_get_mib(priv->udev, MIB_PHY, m, sizeof(struct mib_phy)); + ret = at76_get_mib(priv->udev, MIB_PHY, m, sizeof(*m)); if (ret < 0) { wiphy_err(priv->hw->wiphy, "at76_get_mib (PHY) failed: %d\n", ret); @@ -1130,13 +1125,12 @@ exit: static void at76_dump_mib_mdomain(struct at76_priv *priv) { int ret; - struct mib_mdomain *m = kmalloc(sizeof(struct mib_mdomain), GFP_KERNEL); + struct mib_mdomain *m = kmalloc(sizeof(*m), GFP_KERNEL); if (!m) return; - ret = at76_get_mib(priv->udev, MIB_MDOMAIN, m, - sizeof(struct mib_mdomain)); + ret = at76_get_mib(priv->udev, MIB_MDOMAIN, m, sizeof(*m)); if (ret < 0) { wiphy_err(priv->hw->wiphy, "at76_get_mib (MDOMAIN) failed: %d\n", ret); @@ -1375,7 +1369,7 @@ static int at76_startup_device(struct at76_priv *priv) priv->scan_min_time, priv->scan_max_time, priv->scan_mode == SCAN_TYPE_ACTIVE ? "active" : "passive"); - memset(ccfg, 0, sizeof(struct at76_card_config)); + memset(ccfg, 0, sizeof(*ccfg)); ccfg->promiscuous_mode = 0; ccfg->short_retry_limit = priv->short_retry_limit; @@ -1411,7 +1405,7 @@ static int at76_startup_device(struct at76_priv *priv) ccfg->beacon_period = cpu_to_le16(priv->beacon_period); ret = at76_set_card_command(priv->udev, CMD_STARTUP, &priv->card_config, - sizeof(struct at76_card_config)); + sizeof(*ccfg)); if (ret < 0) { wiphy_err(priv->hw->wiphy, "at76_set_card_command failed: %d\n", ret); @@ -1856,7 +1850,7 @@ error: return 0; } -static void at76_mac80211_stop(struct ieee80211_hw *hw) +static void at76_mac80211_stop(struct ieee80211_hw *hw, bool suspend) { struct at76_priv *priv = hw->priv; @@ -2443,7 +2437,7 @@ static int at76_probe(struct usb_interface *interface, struct usb_device *udev; int op_mode; int need_ext_fw = 0; - struct mib_fw_version *fwv = NULL; + struct mib_fw_version *fwv; int board_type = (int)id->driver_info; udev = usb_get_dev(interface_to_usbdev(interface)); @@ -2531,7 +2525,7 @@ static int at76_probe(struct usb_interface *interface, usb_set_intfdata(interface, priv); - memcpy(&priv->fw_version, fwv, sizeof(struct mib_fw_version)); + memcpy(&priv->fw_version, fwv, sizeof(*fwv)); priv->board_type = board_type; ret = at76_init_new_device(priv, interface); diff --git a/drivers/net/wireless/atmel/at76c50x-usb.h b/drivers/net/wireless/atmel/at76c50x-usb.h index 746e64dfd8aa..843146a0de64 100644 --- a/drivers/net/wireless/atmel/at76c50x-usb.h +++ b/drivers/net/wireless/atmel/at76c50x-usb.h @@ -151,7 +151,7 @@ struct at76_command { u8 cmd; u8 reserved; __le16 size; - u8 data[]; + u8 data[] __counted_by_le(size); } __packed; /* Length of Atmel-specific Rx header before 802.11 frame */ diff --git a/drivers/net/wireless/broadcom/b43/main.c b/drivers/net/wireless/broadcom/b43/main.c index badb2f494035..8e56dcf9309d 100644 --- a/drivers/net/wireless/broadcom/b43/main.c +++ b/drivers/net/wireless/broadcom/b43/main.c @@ -5078,7 +5078,7 @@ static int b43_op_start(struct ieee80211_hw *hw) return err; } -static void b43_op_stop(struct ieee80211_hw *hw) +static void b43_op_stop(struct ieee80211_hw *hw, bool suspend) { struct b43_wl *wl = hw_to_b43_wl(hw); struct b43_wldev *dev = wl->current_dev; diff --git a/drivers/net/wireless/broadcom/b43legacy/main.c b/drivers/net/wireless/broadcom/b43legacy/main.c index 18eb610f600a..441d6440671b 100644 --- a/drivers/net/wireless/broadcom/b43legacy/main.c +++ b/drivers/net/wireless/broadcom/b43legacy/main.c @@ -3485,7 +3485,7 @@ out_mutex_unlock: return err; } -static void b43legacy_op_stop(struct ieee80211_hw *hw) +static void b43legacy_op_stop(struct ieee80211_hw *hw, bool suspend) { struct b43legacy_wl *wl = hw_to_b43legacy_wl(hw); struct b43legacy_wldev *dev = wl->current_dev; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index 13391c2d82aa..d35262335eaf 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -1061,10 +1061,10 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func, if (func->num != 2) return -ENODEV; - bus_if = kzalloc(sizeof(struct brcmf_bus), GFP_KERNEL); + bus_if = kzalloc(sizeof(*bus_if), GFP_KERNEL); if (!bus_if) return -ENOMEM; - sdiodev = kzalloc(sizeof(struct brcmf_sdio_dev), GFP_KERNEL); + sdiodev = kzalloc(sizeof(*sdiodev), GFP_KERNEL); if (!sdiodev) { kfree(bus_if); return -ENOMEM; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c index 7ea2631b8069..0c3d119d1219 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/btcoex.c @@ -358,10 +358,10 @@ idle: */ int brcmf_btcoex_attach(struct brcmf_cfg80211_info *cfg) { - struct brcmf_btcoex_info *btci = NULL; + struct brcmf_btcoex_info *btci; brcmf_dbg(TRACE, "enter\n"); - btci = kmalloc(sizeof(struct brcmf_btcoex_info), GFP_KERNEL); + btci = kmalloc(sizeof(*btci), GFP_KERNEL); if (!btci) return -ENOMEM; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 5fe0e671ecb3..1585a5653ee4 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -4071,7 +4071,7 @@ static void brcmf_report_wowl_wakeind(struct wiphy *wiphy, struct brcmf_if *ifp) struct cfg80211_wowlan_wakeup *wakeup; u32 wakeind; s32 err; - int timeout; + long time_left; err = brcmf_fil_iovar_data_get(ifp, "wowl_wakeind", &wake_ind_le, sizeof(wake_ind_le)); @@ -4113,10 +4113,10 @@ static void brcmf_report_wowl_wakeind(struct wiphy *wiphy, struct brcmf_if *ifp) } if (wakeind & BRCMF_WOWL_PFN_FOUND) { brcmf_dbg(INFO, "WOWL Wake indicator: BRCMF_WOWL_PFN_FOUND\n"); - timeout = wait_event_timeout(cfg->wowl.nd_data_wait, - cfg->wowl.nd_data_completed, - BRCMF_ND_INFO_TIMEOUT); - if (!timeout) + time_left = wait_event_timeout(cfg->wowl.nd_data_wait, + cfg->wowl.nd_data_completed, + BRCMF_ND_INFO_TIMEOUT); + if (!time_left) bphy_err(drvr, "No result for wowl net detect\n"); else wakeup_data.net_detect = cfg->wowl.nd_info; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c index e406e11481a6..fe4f65756105 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c @@ -70,6 +70,7 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type, { struct brcmfmac_sdio_pd *sdio = &settings->bus.sdio; struct device_node *root, *np = dev->of_node; + struct of_phandle_args oirq; const char *prop; int irq; int err; @@ -129,10 +130,10 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type, sdio->drive_strength = val; /* make sure there are interrupts defined in the node */ - if (!of_property_present(np, "interrupts")) + if (of_irq_parse_one(np, 0, &oirq)) return; - irq = irq_of_parse_and_map(np, 0); + irq = irq_create_of_mapping(&oirq); if (!irq) { brcmf_err("interrupt could not be mapped\n"); return; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 06698a714b52..ce482a3877e9 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -313,11 +313,6 @@ struct brcmf_pcie_shared_info { u8 version; }; -struct brcmf_pcie_core_info { - u32 base; - u32 wrapbase; -}; - #define BRCMF_OTP_MAX_PARAM_LEN 16 struct brcmf_otp_params { diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index 6b38d9de71af..1461dc453ac2 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -4450,7 +4450,7 @@ struct brcmf_sdio *brcmf_sdio_probe(struct brcmf_sdio_dev *sdiodev) brcmf_dbg(TRACE, "Enter\n"); /* Allocate private bus interface state */ - bus = kzalloc(sizeof(struct brcmf_sdio), GFP_ATOMIC); + bus = kzalloc(sizeof(*bus), GFP_ATOMIC); if (!bus) goto fail; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c index 9a105e6debe1..8afbf529c745 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/usb.c @@ -1236,8 +1236,8 @@ brcmf_usb_prepare_fw_request(struct brcmf_usbdev_info *devinfo) static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo, enum brcmf_fwvendor fwvid) { - struct brcmf_bus *bus = NULL; - struct brcmf_usbdev *bus_pub = NULL; + struct brcmf_bus *bus; + struct brcmf_usbdev *bus_pub; struct device *dev = devinfo->dev; struct brcmf_fw_request *fwreq; int ret; @@ -1247,7 +1247,7 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo, if (!bus_pub) return -ENODEV; - bus = kzalloc(sizeof(struct brcmf_bus), GFP_ATOMIC); + bus = kzalloc(sizeof(*bus), GFP_ATOMIC); if (!bus) { ret = -ENOMEM; goto fail; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c index 2084b506a450..50d817485cf9 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/aiutils.c @@ -512,7 +512,7 @@ ai_attach(struct bcma_bus *pbus) struct si_info *sii; /* alloc struct si_info */ - sii = kzalloc(sizeof(struct si_info), GFP_ATOMIC); + sii = kzalloc(sizeof(*sii), GFP_ATOMIC); if (sii == NULL) return NULL; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c index c3376f887114..33d17b779201 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/ampdu.c @@ -219,7 +219,7 @@ struct ampdu_info *brcms_c_ampdu_attach(struct brcms_c_info *wlc) struct ampdu_info *ampdu; int i; - ampdu = kzalloc(sizeof(struct ampdu_info), GFP_ATOMIC); + ampdu = kzalloc(sizeof(*ampdu), GFP_ATOMIC); if (!ampdu) return NULL; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/antsel.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/antsel.c index 54c616919590..f411bc6d795d 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/antsel.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/antsel.c @@ -111,7 +111,7 @@ struct antsel_info *brcms_c_antsel_attach(struct brcms_c_info *wlc) struct antsel_info *asi; struct ssb_sprom *sprom = &wlc->hw->d11core->bus->sprom; - asi = kzalloc(sizeof(struct antsel_info), GFP_ATOMIC); + asi = kzalloc(sizeof(*asi), GFP_ATOMIC); if (!asi) return NULL; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c index f6962e558d7c..d1b9a18d0374 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/channel.c @@ -331,7 +331,7 @@ struct brcms_cm_info *brcms_c_channel_mgr_attach(struct brcms_c_info *wlc) const char *ccode = sprom->alpha2; int ccode_len = sizeof(sprom->alpha2); - wlc_cm = kzalloc(sizeof(struct brcms_cm_info), GFP_ATOMIC); + wlc_cm = kzalloc(sizeof(*wlc_cm), GFP_ATOMIC); if (wlc_cm == NULL) return NULL; wlc_cm->pub = pub; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c index 3d5c1ef8f7f2..bd480239368a 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/dma.c @@ -558,7 +558,7 @@ struct dma_pub *dma_attach(char *name, struct brcms_c_info *wlc, struct si_info *sii = container_of(sih, struct si_info, pub); /* allocate private info structure */ - di = kzalloc(sizeof(struct dma_info), GFP_ATOMIC); + di = kzalloc(sizeof(*di), GFP_ATOMIC); if (di == NULL) return NULL; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c index 92860dc0a92e..d86f28b8bc60 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c @@ -457,7 +457,7 @@ static int brcms_ops_start(struct ieee80211_hw *hw) return err; } -static void brcms_ops_stop(struct ieee80211_hw *hw) +static void brcms_ops_stop(struct ieee80211_hw *hw, bool suspend) { struct brcms_info *wl = hw->priv; int status; @@ -1090,6 +1090,7 @@ static int ieee_hw_init(struct ieee80211_hw *hw) ieee80211_hw_set(hw, AMPDU_AGGREGATION); ieee80211_hw_set(hw, SIGNAL_DBM); ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); + ieee80211_hw_set(hw, MFP_CAPABLE); hw->extra_tx_headroom = brcms_c_get_header_len(); hw->queues = N_TX_QUEUES; @@ -1496,7 +1497,7 @@ struct brcms_timer *brcms_init_timer(struct brcms_info *wl, { struct brcms_timer *t; - t = kzalloc(sizeof(struct brcms_timer), GFP_ATOMIC); + t = kzalloc(sizeof(*t), GFP_ATOMIC); if (!t) return NULL; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c index 34460b5815d0..2738d4d6c60a 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/main.c @@ -234,12 +234,6 @@ /* max # tx status to process in wlc_txstatus() */ #define TXSBND 8 -/* brcmu_format_flags() bit description structure */ -struct brcms_c_bit_desc { - u32 bit; - const char *name; -}; - /* * The following table lists the buffer memory allocated to xmt fifos in HW. * the size is in units of 256bytes(one block), total size is HW dependent @@ -463,11 +457,11 @@ static struct brcms_bss_cfg *brcms_c_bsscfg_malloc(uint unit) { struct brcms_bss_cfg *cfg; - cfg = kzalloc(sizeof(struct brcms_bss_cfg), GFP_ATOMIC); + cfg = kzalloc(sizeof(*cfg), GFP_ATOMIC); if (cfg == NULL) goto fail; - cfg->current_bss = kzalloc(sizeof(struct brcms_bss_info), GFP_ATOMIC); + cfg->current_bss = kzalloc(sizeof(*cfg->current_bss), GFP_ATOMIC); if (cfg->current_bss == NULL) goto fail; @@ -483,14 +477,14 @@ brcms_c_attach_malloc(uint unit, uint *err, uint devid) { struct brcms_c_info *wlc; - wlc = kzalloc(sizeof(struct brcms_c_info), GFP_ATOMIC); + wlc = kzalloc(sizeof(*wlc), GFP_ATOMIC); if (wlc == NULL) { *err = 1002; goto fail; } /* allocate struct brcms_c_pub state structure */ - wlc->pub = kzalloc(sizeof(struct brcms_pub), GFP_ATOMIC); + wlc->pub = kzalloc(sizeof(*wlc->pub), GFP_ATOMIC); if (wlc->pub == NULL) { *err = 1003; goto fail; @@ -499,7 +493,7 @@ brcms_c_attach_malloc(uint unit, uint *err, uint devid) /* allocate struct brcms_hardware state structure */ - wlc->hw = kzalloc(sizeof(struct brcms_hardware), GFP_ATOMIC); + wlc->hw = kzalloc(sizeof(*wlc->hw), GFP_ATOMIC); if (wlc->hw == NULL) { *err = 1005; goto fail; @@ -528,7 +522,7 @@ brcms_c_attach_malloc(uint unit, uint *err, uint devid) goto fail; } - wlc->default_bss = kzalloc(sizeof(struct brcms_bss_info), GFP_ATOMIC); + wlc->default_bss = kzalloc(sizeof(*wlc->default_bss), GFP_ATOMIC); if (wlc->default_bss == NULL) { *err = 1010; goto fail; @@ -540,21 +534,20 @@ brcms_c_attach_malloc(uint unit, uint *err, uint devid) goto fail; } - wlc->protection = kzalloc(sizeof(struct brcms_protection), - GFP_ATOMIC); + wlc->protection = kzalloc(sizeof(*wlc->protection), GFP_ATOMIC); if (wlc->protection == NULL) { *err = 1016; goto fail; } - wlc->stf = kzalloc(sizeof(struct brcms_stf), GFP_ATOMIC); + wlc->stf = kzalloc(sizeof(*wlc->stf), GFP_ATOMIC); if (wlc->stf == NULL) { *err = 1017; goto fail; } wlc->bandstate[0] = - kcalloc(MAXBANDS, sizeof(struct brcms_band), GFP_ATOMIC); + kcalloc(MAXBANDS, sizeof(*wlc->bandstate[0]), GFP_ATOMIC); if (wlc->bandstate[0] == NULL) { *err = 1025; goto fail; @@ -567,14 +560,14 @@ brcms_c_attach_malloc(uint unit, uint *err, uint devid) + (sizeof(struct brcms_band)*i)); } - wlc->corestate = kzalloc(sizeof(struct brcms_core), GFP_ATOMIC); + wlc->corestate = kzalloc(sizeof(*wlc->corestate), GFP_ATOMIC); if (wlc->corestate == NULL) { *err = 1026; goto fail; } wlc->corestate->macstat_snapshot = - kzalloc(sizeof(struct macstat), GFP_ATOMIC); + kzalloc(sizeof(*wlc->corestate->macstat_snapshot), GFP_ATOMIC); if (wlc->corestate->macstat_snapshot == NULL) { *err = 1027; goto fail; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c index a27d6f0b8819..c3d7aa570b4e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_cmn.c @@ -355,7 +355,7 @@ struct shared_phy *wlc_phy_shared_attach(struct shared_phy_params *shp) { struct shared_phy *sh; - sh = kzalloc(sizeof(struct shared_phy), GFP_ATOMIC); + sh = kzalloc(sizeof(*sh), GFP_ATOMIC); if (sh == NULL) return NULL; @@ -442,7 +442,7 @@ wlc_phy_attach(struct shared_phy *sh, struct bcma_device *d11core, return &pi->pubpi_ro; } - pi = kzalloc(sizeof(struct brcms_phy), GFP_ATOMIC); + pi = kzalloc(sizeof(*pi), GFP_ATOMIC); if (pi == NULL) return NULL; pi->wiphy = wiphy; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c index aae2cf95fe95..d0faba240561 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c @@ -2567,7 +2567,6 @@ wlc_lcnphy_tx_iqlo_cal(struct brcms_phy *pi, struct lcnphy_txgains cal_gains, temp_gains; u16 hash; - u8 band_idx; int j; u16 ncorr_override[5]; u16 syst_coeffs[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, @@ -2599,6 +2598,9 @@ wlc_lcnphy_tx_iqlo_cal(struct brcms_phy *pi, u16 *values_to_save; struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy; + if (WARN_ON(CHSPEC_IS5G(pi->radio_chanspec))) + return; + values_to_save = kmalloc_array(20, sizeof(u16), GFP_ATOMIC); if (NULL == values_to_save) return; @@ -2662,20 +2664,18 @@ wlc_lcnphy_tx_iqlo_cal(struct brcms_phy *pi, hash = (target_gains->gm_gain << 8) | (target_gains->pga_gain << 4) | (target_gains->pad_gain); - band_idx = (CHSPEC_IS5G(pi->radio_chanspec) ? 1 : 0); - cal_gains = *target_gains; memset(ncorr_override, 0, sizeof(ncorr_override)); - for (j = 0; j < iqcal_gainparams_numgains_lcnphy[band_idx]; j++) { - if (hash == tbl_iqcal_gainparams_lcnphy[band_idx][j][0]) { + for (j = 0; j < iqcal_gainparams_numgains_lcnphy[0]; j++) { + if (hash == tbl_iqcal_gainparams_lcnphy[0][j][0]) { cal_gains.gm_gain = - tbl_iqcal_gainparams_lcnphy[band_idx][j][1]; + tbl_iqcal_gainparams_lcnphy[0][j][1]; cal_gains.pga_gain = - tbl_iqcal_gainparams_lcnphy[band_idx][j][2]; + tbl_iqcal_gainparams_lcnphy[0][j][2]; cal_gains.pad_gain = - tbl_iqcal_gainparams_lcnphy[band_idx][j][3]; + tbl_iqcal_gainparams_lcnphy[0][j][3]; memcpy(ncorr_override, - &tbl_iqcal_gainparams_lcnphy[band_idx][j][3], + &tbl_iqcal_gainparams_lcnphy[0][j][3], sizeof(ncorr_override)); break; } @@ -4968,11 +4968,11 @@ bool wlc_phy_attach_lcnphy(struct brcms_phy *pi) { struct brcms_phy_lcnphy *pi_lcn; - pi->u.pi_lcnphy = kzalloc(sizeof(struct brcms_phy_lcnphy), GFP_ATOMIC); - if (pi->u.pi_lcnphy == NULL) + pi_lcn = kzalloc(sizeof(*pi_lcn), GFP_ATOMIC); + if (!pi_lcn) return false; - pi_lcn = pi->u.pi_lcnphy; + pi->u.pi_lcnphy = pi_lcn; if (0 == (pi->sh->boardflags & BFL_NOPA)) { pi->hwpwrctrl = true; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c index b72381791536..8b852581c4e4 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy_shim.c @@ -38,9 +38,9 @@ struct phy_shim_info { struct phy_shim_info *wlc_phy_shim_attach(struct brcms_hardware *wlc_hw, struct brcms_info *wl, struct brcms_c_info *wlc) { - struct phy_shim_info *physhim = NULL; + struct phy_shim_info *physhim; - physhim = kzalloc(sizeof(struct phy_shim_info), GFP_ATOMIC); + physhim = kzalloc(sizeof(*physhim), GFP_ATOMIC); if (!physhim) return NULL; diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_tx.c b/drivers/net/wireless/intel/ipw2x00/libipw_tx.c index 4aec1fce1ae2..e22a6732a4c3 100644 --- a/drivers/net/wireless/intel/ipw2x00/libipw_tx.c +++ b/drivers/net/wireless/intel/ipw2x00/libipw_tx.c @@ -180,11 +180,10 @@ static struct libipw_txb *libipw_alloc_txb(int nr_frags, int txb_size, struct libipw_txb *txb; int i; - txb = kmalloc(struct_size(txb, fragments, nr_frags), gfp_mask); + txb = kzalloc(struct_size(txb, fragments, nr_frags), gfp_mask); if (!txb) return NULL; - memset(txb, 0, sizeof(struct libipw_txb)); txb->nr_frags = nr_frags; txb->frag_size = txb_size; diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c index 075b705a8d7b..74fc76c00ebc 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c @@ -2813,7 +2813,7 @@ out_release_irq: } static void -il3945_mac_stop(struct ieee80211_hw *hw) +il3945_mac_stop(struct ieee80211_hw *hw, bool suspend) { struct il_priv *il = hw->priv; diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c index a773939b8c2a..1fab7849f56d 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945.c +++ b/drivers/net/wireless/intel/iwlegacy/3945.c @@ -566,7 +566,7 @@ il3945_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb) if (!(rx_end->status & RX_RES_STATUS_NO_CRC32_ERROR) || !(rx_end->status & RX_RES_STATUS_NO_RXE_OVERFLOW)) { D_RX("Bad CRC or FIFO: 0x%08X.\n", rx_end->status); - return; + rx_status.flag |= RX_FLAG_FAILED_FCS_CRC; } /* Convert 3945's rssi indicator to dBm */ diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c index 4beb7be6d51d..1600c344edbb 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c @@ -664,7 +664,7 @@ il4965_hdl_rx(struct il_priv *il, struct il_rx_buf *rxb) if (!(rx_pkt_status & RX_RES_STATUS_NO_CRC32_ERROR) || !(rx_pkt_status & RX_RES_STATUS_NO_RXE_OVERFLOW)) { D_RX("Bad CRC or FIFO: 0x%08X.\n", le32_to_cpu(rx_pkt_status)); - return; + rx_status.flag |= RX_FLAG_FAILED_FCS_CRC; } /* This will be used in several places later */ @@ -5820,7 +5820,7 @@ out: } void -il4965_mac_stop(struct ieee80211_hw *hw) +il4965_mac_stop(struct ieee80211_hw *hw, bool suspend) { struct il_priv *il = hw->priv; diff --git a/drivers/net/wireless/intel/iwlegacy/4965.h b/drivers/net/wireless/intel/iwlegacy/4965.h index 863e3792d153..951f2245fefb 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965.h +++ b/drivers/net/wireless/intel/iwlegacy/4965.h @@ -151,7 +151,7 @@ void il4965_mac_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb); int il4965_mac_start(struct ieee80211_hw *hw); -void il4965_mac_stop(struct ieee80211_hw *hw); +void il4965_mac_stop(struct ieee80211_hw *hw, bool suspend); void il4965_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *total_flags, u64 multicast); diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile index 8bb94a4c12cd..64c123314245 100644 --- a/drivers/net/wireless/intel/iwlwifi/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/Makefile @@ -4,7 +4,7 @@ obj-$(CONFIG_IWLWIFI) += iwlwifi.o iwlwifi-objs += iwl-io.o iwlwifi-objs += iwl-drv.o iwlwifi-objs += iwl-debug.o -iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o +iwlwifi-objs += iwl-nvm-utils.o iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o iwlwifi-objs += pcie/ctxt-info.o pcie/ctxt-info-gen3.o @@ -14,7 +14,6 @@ iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o iwlwifi-$(CONFIG_IWLMVM) += cfg/ax210.o cfg/bz.o cfg/sc.o iwlwifi-objs += iwl-dbg-tlv.o iwlwifi-objs += iwl-trans.o -iwlwifi-objs += queue/tx.o iwlwifi-objs += fw/img.o fw/notif-wait.o fw/rs.o iwlwifi-objs += fw/dbg.o fw/pnvm.o fw/dump.o diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index d594694206b3..2e2fcb3807ef 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2023 Intel Corporation + * Copyright (C) 2018-2024 Intel Corporation */ #include #include @@ -13,7 +13,7 @@ #define IWL_22000_UCODE_API_MAX 77 /* Lowest firmware API version supported */ -#define IWL_22000_UCODE_API_MIN 50 +#define IWL_22000_UCODE_API_MIN 77 /* NVM versions */ #define IWL_22000_NVM_VERSION 0x0a1d diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c index 25952d0bea99..975e8aed1526 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/ax210.c @@ -13,7 +13,7 @@ #define IWL_AX210_UCODE_API_MAX 89 /* Lowest firmware API version supported */ -#define IWL_AX210_UCODE_API_MIN 59 +#define IWL_AX210_UCODE_API_MIN 77 /* NVM versions */ #define IWL_AX210_NVM_VERSION 0x0a1d diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c index bc98b87cf2a1..3b6b8b410be5 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/bz.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/bz.c @@ -10,10 +10,10 @@ #include "fw/api/txq.h" /* Highest firmware API version supported */ -#define IWL_BZ_UCODE_API_MAX 90 +#define IWL_BZ_UCODE_API_MAX 92 /* Lowest firmware API version supported */ -#define IWL_BZ_UCODE_API_MIN 80 +#define IWL_BZ_UCODE_API_MIN 90 /* NVM versions */ #define IWL_BZ_NVM_VERSION 0x0a1d diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c index 9b79279fd76c..4ccb0b7bdc20 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/sc.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/sc.c @@ -10,10 +10,10 @@ #include "fw/api/txq.h" /* Highest firmware API version supported */ -#define IWL_SC_UCODE_API_MAX 90 +#define IWL_SC_UCODE_API_MAX 92 /* Lowest firmware API version supported */ -#define IWL_SC_UCODE_API_MIN 82 +#define IWL_SC_UCODE_API_MIN 90 /* NVM versions */ #define IWL_SC_NVM_VERSION 0x0a1d diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/Makefile b/drivers/net/wireless/intel/iwlwifi/dvm/Makefile index 6109d64006db..abcf8aeb010d 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/dvm/Makefile @@ -2,7 +2,7 @@ # DVM obj-$(CONFIG_IWLDVM) += iwldvm.o iwldvm-objs += main.o rs.o mac80211.o ucode.o tx.o -iwldvm-objs += lib.o calib.o tt.o sta.o rx.o +iwldvm-objs += lib.o calib.o tt.o sta.o rx.o eeprom.o iwldvm-objs += power.o iwldvm-objs += scan.o diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h index fefaa414272b..a13add556a7b 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/agn.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/agn.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2021 Intel Corporation + * Copyright (C) 2005-2014, 2021, 2024 Intel Corporation */ #ifndef __iwl_agn_h__ #define __iwl_agn_h__ @@ -385,6 +385,25 @@ static inline void iwl_dvm_set_pmi(struct iwl_priv *priv, bool state) iwl_trans_set_pmi(priv->trans, state); } +/** + * iwl_parse_eeprom_data - parse EEPROM data and return values + * + * @trans: ransport we're parsing for, for debug only + * @cfg: device configuration for parsing and overrides + * @eeprom: the EEPROM data + * @eeprom_size: length of the EEPROM data + * + * This function parses all EEPROM values we need and then + * returns a (newly allocated) struct containing all the + * relevant values for driver use. The struct must be freed + * later with iwl_free_nvm_data(). + */ +struct iwl_nvm_data * +iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, + const u8 *eeprom, size_t eeprom_size); + +int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size); + #ifdef CONFIG_IWLWIFI_DEBUGFS void iwl_dbgfs_register(struct iwl_priv *priv, struct dentry *dbgfs_dir); #else diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h index 04864d3fda63..3f49c0bccb28 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/commands.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/commands.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2023 Intel Corporation + * Copyright (C) 2005-2014, 2023-2024 Intel Corporation */ /* * Please use this file (commands.h) only for uCode API definitions. @@ -177,7 +177,7 @@ enum { * *****************************************************************************/ -/** +/* * iwlagn rate_n_flags bit fields * * rate_n_flags format is used in following iwlagn commands: @@ -251,7 +251,7 @@ enum { #define RATE_MCS_SGI_POS 13 #define RATE_MCS_SGI_MSK 0x2000 -/** +/* * rate_n_flags Tx antenna masks * bit14:16 */ @@ -2767,7 +2767,7 @@ struct iwl_missed_beacon_notif { * *****************************************************************************/ -/** +/* * SENSITIVITY_CMD = 0xa8 (command, has simple generic response) * * This command sets up the Rx signal detector for a sensitivity level that diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h index 25283e4b849f..4ac8b862ad41 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/dev.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/dev.h @@ -19,7 +19,7 @@ #include #include "fw/img.h" -#include "iwl-eeprom-parse.h" +#include "iwl-nvm-utils.h" #include "iwl-csr.h" #include "iwl-debug.h" #include "iwl-agn-hw.h" diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c index 39e40901fa46..48a8349680fc 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/devices.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/devices.c @@ -12,7 +12,7 @@ */ #include "iwl-io.h" #include "iwl-prph.h" -#include "iwl-eeprom-parse.h" +#include "iwl-nvm-utils.h" #include "agn.h" #include "dev.h" diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c similarity index 69% rename from drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c rename to drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c index 2b290fab1ef2..931aa3f5798d 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/eeprom.c @@ -1,16 +1,18 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2005-2014, 2018-2021, 2023 Intel Corporation - * Copyright (C) 2015 Intel Mobile Communications GmbH + * Copyright (C) 2005-2014, 2018-2019, 2021, 2024 Intel Corporation */ #include #include #include -#include "iwl-drv.h" -#include "iwl-modparams.h" -#include "iwl-eeprom-parse.h" -#if IS_ENABLED(CONFIG_IWLDVM) +#include "iwl-drv.h" +#include "iwl-debug.h" +#include "iwl-io.h" +#include "iwl-prph.h" +#include "iwl-csr.h" +#include "agn.h" + /* EEPROM offset definitions */ /* indirect access definitions */ @@ -79,7 +81,6 @@ enum eeprom_sku_bits { #define EEPROM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */ #define EEPROM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */ - /* * EEPROM bands * These are the channel numbers from each band in the order @@ -257,7 +258,6 @@ struct iwl_eeprom_channel { s8 max_power_avg; } __packed; - enum iwl_eeprom_enhanced_txpwr_flags { IWL_EEPROM_ENH_TXP_FL_VALID = BIT(0), IWL_EEPROM_ENH_TXP_FL_BAND_52G = BIT(1), @@ -648,114 +648,385 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg, return n_channels; } -#endif +/* + * EEPROM access time values: + * + * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG. + * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1). + * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec. + * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG. + */ +#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */ -int iwl_init_sband_channels(struct iwl_nvm_data *data, - struct ieee80211_supported_band *sband, - int n_channels, enum nl80211_band band) +/* + * The device's EEPROM semaphore prevents conflicts between driver and uCode + * when accessing the EEPROM; each access is a series of pulses to/from the + * EEPROM chip, not a single event, so even reads could conflict if they + * weren't arbitrated by the semaphore. + */ +#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */ +#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */ + + +static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans) { - struct ieee80211_channel *chan = &data->channels[0]; - int n = 0, idx = 0; + u16 count; + int ret; - while (idx < n_channels && chan->band != band) - chan = &data->channels[++idx]; + for (count = 0; count < IWL_EEPROM_SEM_RETRY_LIMIT; count++) { + /* Request semaphore */ + iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); - sband->channels = &data->channels[idx]; - - while (idx < n_channels && chan->band == band) { - chan = &data->channels[++idx]; - n++; + /* See if we got it */ + ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, + CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, + IWL_EEPROM_SEM_TIMEOUT); + if (ret >= 0) { + IWL_DEBUG_EEPROM(trans->dev, + "Acquired semaphore after %d tries.\n", + count+1); + return ret; + } } - sband->n_channels = n; - - return n; + return ret; } -#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ -#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */ - -void iwl_init_ht_hw_capab(struct iwl_trans *trans, - struct iwl_nvm_data *data, - struct ieee80211_sta_ht_cap *ht_info, - enum nl80211_band band, - u8 tx_chains, u8 rx_chains) +static void iwl_eeprom_release_semaphore(struct iwl_trans *trans) { - const struct iwl_cfg *cfg = trans->cfg; - int max_bit_rate = 0; + iwl_clear_bit(trans, CSR_HW_IF_CONFIG_REG, + CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); +} - tx_chains = hweight8(tx_chains); - if (cfg->rx_with_siso_diversity) - rx_chains = 1; - else - rx_chains = hweight8(rx_chains); +static int iwl_eeprom_verify_signature(struct iwl_trans *trans, bool nvm_is_otp) +{ + u32 gp = iwl_read32(trans, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK; - if (!(data->sku_cap_11n_enable) || - (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) || - !cfg->ht_params) { - ht_info->ht_supported = false; - return; - } + IWL_DEBUG_EEPROM(trans->dev, "EEPROM signature=0x%08x\n", gp); - if (data->sku_cap_mimo_disabled) - rx_chains = 1; - - ht_info->ht_supported = true; - ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40; - - if (cfg->ht_params->stbc) { - ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT); - - if (tx_chains > 1) - ht_info->cap |= IEEE80211_HT_CAP_TX_STBC; - } - - if (cfg->ht_params->ldpc) - ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING; - - if (trans->trans_cfg->mq_rx_supported || - iwlwifi_mod_params.amsdu_size >= IWL_AMSDU_8K) - ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; - - ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; - ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4; - - ht_info->mcs.rx_mask[0] = 0xFF; - ht_info->mcs.rx_mask[1] = 0x00; - ht_info->mcs.rx_mask[2] = 0x00; - - if (rx_chains >= 2) - ht_info->mcs.rx_mask[1] = 0xFF; - if (rx_chains >= 3) - ht_info->mcs.rx_mask[2] = 0xFF; - - if (cfg->ht_params->ht_greenfield_support) - ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD; - ht_info->cap |= IEEE80211_HT_CAP_SGI_20; - - max_bit_rate = MAX_BIT_RATE_20_MHZ; - - if (cfg->ht_params->ht40_bands & BIT(band)) { - ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; - ht_info->cap |= IEEE80211_HT_CAP_SGI_40; - max_bit_rate = MAX_BIT_RATE_40_MHZ; - } - - /* Highest supported Rx data rate */ - max_bit_rate *= rx_chains; - WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK); - ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate); - - /* Tx MCS capabilities */ - ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; - if (tx_chains != rx_chains) { - ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; - ht_info->mcs.tx_params |= ((tx_chains - 1) << - IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT); + switch (gp) { + case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP: + if (!nvm_is_otp) { + IWL_ERR(trans, "EEPROM with bad signature: 0x%08x\n", + gp); + return -ENOENT; + } + return 0; + case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K: + case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K: + if (nvm_is_otp) { + IWL_ERR(trans, "OTP with bad signature: 0x%08x\n", gp); + return -ENOENT; + } + return 0; + case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP: + default: + IWL_ERR(trans, + "bad EEPROM/OTP signature, type=%s, EEPROM_GP=0x%08x\n", + nvm_is_otp ? "OTP" : "EEPROM", gp); + return -ENOENT; } } -#if IS_ENABLED(CONFIG_IWLDVM) +/****************************************************************************** + * + * OTP related functions + * +******************************************************************************/ + +static void iwl_set_otp_access_absolute(struct iwl_trans *trans) +{ + iwl_read32(trans, CSR_OTP_GP_REG); + + iwl_clear_bit(trans, CSR_OTP_GP_REG, + CSR_OTP_GP_REG_OTP_ACCESS_MODE); +} + +static int iwl_nvm_is_otp(struct iwl_trans *trans) +{ + u32 otpgp; + + /* OTP only valid for CP/PP and after */ + switch (trans->hw_rev & CSR_HW_REV_TYPE_MSK) { + case CSR_HW_REV_TYPE_NONE: + IWL_ERR(trans, "Unknown hardware type\n"); + return -EIO; + case CSR_HW_REV_TYPE_5300: + case CSR_HW_REV_TYPE_5350: + case CSR_HW_REV_TYPE_5100: + case CSR_HW_REV_TYPE_5150: + return 0; + default: + otpgp = iwl_read32(trans, CSR_OTP_GP_REG); + if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT) + return 1; + return 0; + } +} + +static int iwl_init_otp_access(struct iwl_trans *trans) +{ + int ret; + + ret = iwl_finish_nic_init(trans); + if (ret) + return ret; + + iwl_set_bits_prph(trans, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_RESET_REQ); + udelay(5); + iwl_clear_bits_prph(trans, APMG_PS_CTRL_REG, + APMG_PS_CTRL_VAL_RESET_REQ); + + /* + * CSR auto clock gate disable bit - + * this is only applicable for HW with OTP shadow RAM + */ + if (trans->trans_cfg->base_params->shadow_ram_support) + iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, + CSR_RESET_LINK_PWR_MGMT_DISABLED); + + return 0; +} + +static int iwl_read_otp_word(struct iwl_trans *trans, u16 addr, + __le16 *eeprom_data) +{ + int ret = 0; + u32 r; + u32 otpgp; + + iwl_write32(trans, CSR_EEPROM_REG, + CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); + ret = iwl_poll_bit(trans, CSR_EEPROM_REG, + CSR_EEPROM_REG_READ_VALID_MSK, + CSR_EEPROM_REG_READ_VALID_MSK, + IWL_EEPROM_ACCESS_TIMEOUT); + if (ret < 0) { + IWL_ERR(trans, "Time out reading OTP[%d]\n", addr); + return ret; + } + r = iwl_read32(trans, CSR_EEPROM_REG); + /* check for ECC errors: */ + otpgp = iwl_read32(trans, CSR_OTP_GP_REG); + if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) { + /* stop in this case */ + /* set the uncorrectable OTP ECC bit for acknowledgment */ + iwl_set_bit(trans, CSR_OTP_GP_REG, + CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK); + IWL_ERR(trans, "Uncorrectable OTP ECC error, abort OTP read\n"); + return -EINVAL; + } + if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) { + /* continue in this case */ + /* set the correctable OTP ECC bit for acknowledgment */ + iwl_set_bit(trans, CSR_OTP_GP_REG, + CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK); + IWL_ERR(trans, "Correctable OTP ECC error, continue read\n"); + } + *eeprom_data = cpu_to_le16(r >> 16); + return 0; +} + +/* + * iwl_is_otp_empty: check for empty OTP + */ +static bool iwl_is_otp_empty(struct iwl_trans *trans) +{ + u16 next_link_addr = 0; + __le16 link_value; + bool is_empty = false; + + /* locate the beginning of OTP link list */ + if (!iwl_read_otp_word(trans, next_link_addr, &link_value)) { + if (!link_value) { + IWL_ERR(trans, "OTP is empty\n"); + is_empty = true; + } + } else { + IWL_ERR(trans, "Unable to read first block of OTP list.\n"); + is_empty = true; + } + + return is_empty; +} + + +/* + * iwl_find_otp_image: find EEPROM image in OTP + * finding the OTP block that contains the EEPROM image. + * the last valid block on the link list (the block _before_ the last block) + * is the block we should read and used to configure the device. + * If all the available OTP blocks are full, the last block will be the block + * we should read and used to configure the device. + * only perform this operation if shadow RAM is disabled + */ +static int iwl_find_otp_image(struct iwl_trans *trans, + u16 *validblockaddr) +{ + u16 next_link_addr = 0, valid_addr; + __le16 link_value = 0; + int usedblocks = 0; + + /* set addressing mode to absolute to traverse the link list */ + iwl_set_otp_access_absolute(trans); + + /* checking for empty OTP or error */ + if (iwl_is_otp_empty(trans)) + return -EINVAL; + + /* + * start traverse link list + * until reach the max number of OTP blocks + * different devices have different number of OTP blocks + */ + do { + /* save current valid block address + * check for more block on the link list + */ + valid_addr = next_link_addr; + next_link_addr = le16_to_cpu(link_value) * sizeof(u16); + IWL_DEBUG_EEPROM(trans->dev, "OTP blocks %d addr 0x%x\n", + usedblocks, next_link_addr); + if (iwl_read_otp_word(trans, next_link_addr, &link_value)) + return -EINVAL; + if (!link_value) { + /* + * reach the end of link list, return success and + * set address point to the starting address + * of the image + */ + *validblockaddr = valid_addr; + /* skip first 2 bytes (link list pointer) */ + *validblockaddr += 2; + return 0; + } + /* more in the link list, continue */ + usedblocks++; + } while (usedblocks <= trans->trans_cfg->base_params->max_ll_items); + + /* OTP has no valid blocks */ + IWL_DEBUG_EEPROM(trans->dev, "OTP has no valid blocks\n"); + return -EINVAL; +} + +/* + * iwl_read_eeprom - read EEPROM contents + * + * Load the EEPROM contents from adapter and return it + * and its size. + * + * NOTE: This routine uses the non-debug IO access functions. + */ +int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size) +{ + __le16 *e; + u32 gp = iwl_read32(trans, CSR_EEPROM_GP); + int sz; + int ret; + u16 addr; + u16 validblockaddr = 0; + u16 cache_addr = 0; + int nvm_is_otp; + + if (!eeprom || !eeprom_size) + return -EINVAL; + + nvm_is_otp = iwl_nvm_is_otp(trans); + if (nvm_is_otp < 0) + return nvm_is_otp; + + sz = trans->trans_cfg->base_params->eeprom_size; + IWL_DEBUG_EEPROM(trans->dev, "NVM size = %d\n", sz); + + e = kmalloc(sz, GFP_KERNEL); + if (!e) + return -ENOMEM; + + ret = iwl_eeprom_verify_signature(trans, nvm_is_otp); + if (ret < 0) { + IWL_ERR(trans, "EEPROM not found, EEPROM_GP=0x%08x\n", gp); + goto err_free; + } + + /* Make sure driver (instead of uCode) is allowed to read EEPROM */ + ret = iwl_eeprom_acquire_semaphore(trans); + if (ret < 0) { + IWL_ERR(trans, "Failed to acquire EEPROM semaphore.\n"); + goto err_free; + } + + if (nvm_is_otp) { + ret = iwl_init_otp_access(trans); + if (ret) { + IWL_ERR(trans, "Failed to initialize OTP access.\n"); + goto err_unlock; + } + + iwl_write32(trans, CSR_EEPROM_GP, + iwl_read32(trans, CSR_EEPROM_GP) & + ~CSR_EEPROM_GP_IF_OWNER_MSK); + + iwl_set_bit(trans, CSR_OTP_GP_REG, + CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK | + CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK); + /* traversing the linked list if no shadow ram supported */ + if (!trans->trans_cfg->base_params->shadow_ram_support) { + ret = iwl_find_otp_image(trans, &validblockaddr); + if (ret) + goto err_unlock; + } + for (addr = validblockaddr; addr < validblockaddr + sz; + addr += sizeof(u16)) { + __le16 eeprom_data; + + ret = iwl_read_otp_word(trans, addr, &eeprom_data); + if (ret) + goto err_unlock; + e[cache_addr / 2] = eeprom_data; + cache_addr += sizeof(u16); + } + } else { + /* eeprom is an array of 16bit values */ + for (addr = 0; addr < sz; addr += sizeof(u16)) { + u32 r; + + iwl_write32(trans, CSR_EEPROM_REG, + CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); + + ret = iwl_poll_bit(trans, CSR_EEPROM_REG, + CSR_EEPROM_REG_READ_VALID_MSK, + CSR_EEPROM_REG_READ_VALID_MSK, + IWL_EEPROM_ACCESS_TIMEOUT); + if (ret < 0) { + IWL_ERR(trans, + "Time out reading EEPROM[%d]\n", addr); + goto err_unlock; + } + r = iwl_read32(trans, CSR_EEPROM_REG); + e[addr / 2] = cpu_to_le16(r >> 16); + } + } + + IWL_DEBUG_EEPROM(trans->dev, "NVM Type: %s\n", + nvm_is_otp ? "OTP" : "EEPROM"); + + iwl_eeprom_release_semaphore(trans); + + *eeprom_size = sz; + *eeprom = (u8 *)e; + return 0; + + err_unlock: + iwl_eeprom_release_semaphore(trans); + err_free: + kfree(e); + + return ret; +} + static void iwl_init_sbands(struct iwl_trans *trans, const struct iwl_cfg *cfg, struct iwl_nvm_data *data, const u8 *eeprom, size_t eeprom_size) @@ -790,7 +1061,6 @@ static void iwl_init_sbands(struct iwl_trans *trans, const struct iwl_cfg *cfg, } /* EEPROM data functions */ - struct iwl_nvm_data * iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, const u8 *eeprom, size_t eeprom_size) @@ -837,8 +1107,8 @@ iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, data->kelvin_temperature = *(__le16 *)tmp; data->kelvin_voltage = *((__le16 *)tmp + 1); - radio_cfg = iwl_eeprom_query16(eeprom, eeprom_size, - EEPROM_RADIO_CONFIG); + radio_cfg = + iwl_eeprom_query16(eeprom, eeprom_size, EEPROM_RADIO_CONFIG); data->radio_cfg_dash = EEPROM_RF_CFG_DASH_MSK(radio_cfg); data->radio_cfg_pnum = EEPROM_RF_CFG_PNUM_MSK(radio_cfg); data->radio_cfg_step = EEPROM_RF_CFG_STEP_MSK(radio_cfg); @@ -878,5 +1148,3 @@ iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, kfree(data); return NULL; } -IWL_EXPORT_SYMBOL(iwl_parse_eeprom_data); -#endif diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c index 52b008ce53bd..74d163e56511 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/mac80211.c @@ -2,7 +2,7 @@ /****************************************************************************** * * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. - * Copyright(C) 2018 - 2019, 2022 - 2023 Intel Corporation + * Copyright(C) 2018 - 2019, 2022 - 2024 Intel Corporation * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -145,8 +145,6 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv, #ifdef CONFIG_PM_SLEEP if (priv->fw->img[IWL_UCODE_WOWLAN].num_sec && - priv->trans->ops->d3_suspend && - priv->trans->ops->d3_resume && device_can_wakeup(priv->trans->dev)) { priv->wowlan_support.flags = WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT | @@ -302,7 +300,7 @@ static int iwlagn_mac_start(struct ieee80211_hw *hw) return ret; } -static void iwlagn_mac_stop(struct ieee80211_hw *hw) +static void iwlagn_mac_stop(struct ieee80211_hw *hw, bool suspend) { struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); @@ -730,8 +728,6 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, ret = iwl_sta_rx_agg_stop(priv, sta, tid); break; case IEEE80211_AMPDU_TX_START: - if (!priv->trans->ops->txq_enable) - break; if (!iwl_enable_tx_ampdu()) break; IWL_DEBUG_HT(priv, "start Tx\n"); diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c index 8774dd7b921e..65b7c68e5ca7 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0-only /****************************************************************************** * - * Copyright(c) 2003 - 2014, 2018 - 2022 Intel Corporation. All rights reserved. + * Copyright(c) 2003 - 2014, 2018 - 2022 Intel Corporation. All rights reserved. + * Copyright(c) 2024 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Deutschland GmbH * * Portions of this file are derived from the ipw3945 project, as well @@ -25,8 +26,7 @@ #include -#include "iwl-eeprom-read.h" -#include "iwl-eeprom-parse.h" +#include "iwl-nvm-utils.h" #include "iwl-io.h" #include "iwl-trans.h" #include "iwl-op-mode.h" diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c index f4a6f76cf193..8879e668ef0d 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/rs.c @@ -2673,20 +2673,16 @@ static void rs_get_rate(void *priv_r, struct ieee80211_sta *sta, void *priv_sta, IWL_DEBUG_RATE_LIMIT(priv, "rate scale calculate new rate for skb\n"); /* Get max rate if user set max rate */ - if (lq_sta) { - lq_sta->max_rate_idx = fls(txrc->rate_idx_mask) - 1; - if ((sband->band == NL80211_BAND_5GHZ) && - (lq_sta->max_rate_idx != -1)) - lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE; - if ((lq_sta->max_rate_idx < 0) || - (lq_sta->max_rate_idx >= IWL_RATE_COUNT)) - lq_sta->max_rate_idx = -1; - } + lq_sta->max_rate_idx = fls(txrc->rate_idx_mask) - 1; + if (sband->band == NL80211_BAND_5GHZ && lq_sta->max_rate_idx != -1) + lq_sta->max_rate_idx += IWL_FIRST_OFDM_RATE; + if (lq_sta->max_rate_idx < 0 || lq_sta->max_rate_idx >= IWL_RATE_COUNT) + lq_sta->max_rate_idx = -1; - /* Treat uninitialized rate scaling data same as non-existing. */ - if (lq_sta && !lq_sta->drv) { + if (!lq_sta->drv) { IWL_DEBUG_RATE(priv, "Rate scaling not initialized yet.\n"); - priv_sta = NULL; + /* mac80211 already set up the data for using low rates */ + return; } rate_idx = lq_sta->last_txrate_idx; @@ -2756,7 +2752,6 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i lq_sta = &sta_priv->lq_sta; sband = hw->wiphy->bands[conf->chandef.chan->band]; - lq_sta->lq.sta_id = sta_id; for (j = 0; j < LQ_SIZE; j++) diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/tt.h b/drivers/net/wireless/intel/iwlwifi/dvm/tt.h index 23dfcda0dd86..4af792d41dce 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/tt.h +++ b/drivers/net/wireless/intel/iwlwifi/dvm/tt.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /****************************************************************************** * - * Copyright(c) 2007 - 2014, 2023 Intel Corporation. All rights reserved. + * Copyright(c) 2007 - 2014, 2023-2024 Intel Corporation. All rights reserved. * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -64,7 +64,7 @@ struct iwl_tt_trans { }; /** - * struct iwl_tt_mgnt - Thermal Throttling Management structure + * struct iwl_tt_mgmt - Thermal Throttling Management structure * @advanced_tt: advanced thermal throttle required * @state: current Thermal Throttling state * @tt_power_mode: Thermal Throttling power mode index diff --git a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c index fa339791223b..79774c8c7ff4 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/acpi.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/acpi.c @@ -27,6 +27,7 @@ static const size_t acpi_dsm_size[DSM_FUNC_NUM_FUNCS] = { [DSM_FUNC_FORCE_DISABLE_CHANNELS] = sizeof(u32), [DSM_FUNC_ENERGY_DETECTION_THRESHOLD] = sizeof(u32), [DSM_FUNC_RFI_CONFIG] = sizeof(u32), + [DSM_FUNC_ENABLE_11BE] = sizeof(u32), }; static int iwl_acpi_get_handle(struct device *dev, acpi_string method, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h index e00ab21e7358..ebe85fdf08d3 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018, 2020-2021 Intel Corporation + * Copyright (C) 2012-2014, 2018, 2020-2021, 2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -113,7 +113,7 @@ struct iwl_alive_ntf_v6 { } __packed; /* UCODE_ALIVE_NTFY_API_S_VER_6 */ /** - * enum iwl_extended_cfg_flag - commands driver may send before + * enum iwl_extended_cfg_flags - commands driver may send before * finishing init flow * @IWL_INIT_DEBUG_CFG: driver is going to send debug config command * @IWL_INIT_NVM: driver is going to send NVM_ACCESS commands @@ -126,7 +126,7 @@ enum iwl_extended_cfg_flags { }; /** - * struct iwl_extended_cfg_cmd - mark what commands ucode should wait for + * struct iwl_init_extended_cfg_cmd - mark what commands ucode should wait for * before finishing init flows * @init_flags: values from iwl_extended_cfg_flags */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h b/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h index d9044ada6a43..2397fdc37fc5 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/binding.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2020, 2022 Intel Corporation + * Copyright (C) 2012-2014, 2020, 2022, 2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -77,7 +77,7 @@ struct iwl_time_quota_data_v1 { } __packed; /* TIME_QUOTA_DATA_API_S_VER_1 */ /** - * struct iwl_time_quota_cmd - configuration of time quota between bindings + * struct iwl_time_quota_cmd_v1 - configuration of time quota between bindings * ( TIME_QUOTA_CMD = 0x2c ) * @quotas: allocations per binding * Note: on non-CDB the fourth one is the auxilary mac and is diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h index bc27e15488f5..b97a43353779 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/coex.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2023 Intel Corporation + * Copyright (C) 2023-2024 Intel Corporation * Copyright (C) 2013-2014, 2018-2019 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2017 Intel Deutschland GmbH @@ -77,73 +77,6 @@ struct iwl_bt_coex_ci_cmd { __le32 secondary_ch_phy_id; } __packed; /* BT_CI_MSG_API_S_VER_2 */ -#define BT_MBOX(n_dw, _msg, _pos, _nbits) \ - BT_MBOX##n_dw##_##_msg##_POS = (_pos), \ - BT_MBOX##n_dw##_##_msg = BITS(_nbits) << BT_MBOX##n_dw##_##_msg##_POS - -enum iwl_bt_mxbox_dw0 { - BT_MBOX(0, LE_SLAVE_LAT, 0, 3), - BT_MBOX(0, LE_PROF1, 3, 1), - BT_MBOX(0, LE_PROF2, 4, 1), - BT_MBOX(0, LE_PROF_OTHER, 5, 1), - BT_MBOX(0, CHL_SEQ_N, 8, 4), - BT_MBOX(0, INBAND_S, 13, 1), - BT_MBOX(0, LE_MIN_RSSI, 16, 4), - BT_MBOX(0, LE_SCAN, 20, 1), - BT_MBOX(0, LE_ADV, 21, 1), - BT_MBOX(0, LE_MAX_TX_POWER, 24, 4), - BT_MBOX(0, OPEN_CON_1, 28, 2), -}; - -enum iwl_bt_mxbox_dw1 { - BT_MBOX(1, BR_MAX_TX_POWER, 0, 4), - BT_MBOX(1, IP_SR, 4, 1), - BT_MBOX(1, LE_MSTR, 5, 1), - BT_MBOX(1, AGGR_TRFC_LD, 8, 6), - BT_MBOX(1, MSG_TYPE, 16, 3), - BT_MBOX(1, SSN, 19, 2), -}; - -enum iwl_bt_mxbox_dw2 { - BT_MBOX(2, SNIFF_ACT, 0, 3), - BT_MBOX(2, PAG, 3, 1), - BT_MBOX(2, INQUIRY, 4, 1), - BT_MBOX(2, CONN, 5, 1), - BT_MBOX(2, SNIFF_INTERVAL, 8, 5), - BT_MBOX(2, DISC, 13, 1), - BT_MBOX(2, SCO_TX_ACT, 16, 2), - BT_MBOX(2, SCO_RX_ACT, 18, 2), - BT_MBOX(2, ESCO_RE_TX, 20, 2), - BT_MBOX(2, SCO_DURATION, 24, 6), -}; - -enum iwl_bt_mxbox_dw3 { - BT_MBOX(3, SCO_STATE, 0, 1), - BT_MBOX(3, SNIFF_STATE, 1, 1), - BT_MBOX(3, A2DP_STATE, 2, 1), - BT_MBOX(3, ACL_STATE, 3, 1), - BT_MBOX(3, MSTR_STATE, 4, 1), - BT_MBOX(3, OBX_STATE, 5, 1), - BT_MBOX(3, A2DP_SRC, 6, 1), - BT_MBOX(3, OPEN_CON_2, 8, 2), - BT_MBOX(3, TRAFFIC_LOAD, 10, 2), - BT_MBOX(3, CHL_SEQN_LSB, 12, 1), - BT_MBOX(3, INBAND_P, 13, 1), - BT_MBOX(3, MSG_TYPE_2, 16, 3), - BT_MBOX(3, SSN_2, 19, 2), - BT_MBOX(3, UPDATE_REQUEST, 21, 1), -}; - -#define BT_MBOX_MSG(_notif, _num, _field) \ - ((le32_to_cpu((_notif)->mbox_msg[(_num)]) & BT_MBOX##_num##_##_field)\ - >> BT_MBOX##_num##_##_field##_POS) - -#define BT_MBOX_PRINT(_num, _field, _end) \ - pos += scnprintf(buf + pos, bufsz - pos, \ - "\t%s: %d%s", \ - #_field, \ - BT_MBOX_MSG(notif, _num, _field), \ - true ? "\n" : ", ") enum iwl_bt_activity_grading { BT_OFF = 0, BT_ON_NO_CONNECTION = 1, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/config.h b/drivers/net/wireless/intel/iwlwifi/fw/api/config.h index 4419631604b4..1fc65469990e 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/config.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/config.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2019, 2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2019, 2023-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -76,7 +76,7 @@ struct iwl_phy_specific_cfg { } __packed; /* PHY_SPECIFIC_CONFIGURATION_API_VER_1*/ /** - * struct iwl_phy_cfg_cmd - Phy configuration command + * struct iwl_phy_cfg_cmd_v1 - Phy configuration command * * @phy_cfg: PHY configuration value, uses &enum iwl_fw_phy_cfg * @calib_control: calibration control data diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h index bbaaf3c73115..ffee7927cf26 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/d3.h @@ -42,7 +42,7 @@ struct iwl_d3_manager_config { /* TODO: OFFLOADS_QUERY_API_S_VER_1 */ /** - * enum iwl_d3_proto_offloads - enabled protocol offloads + * enum iwl_proto_offloads - enabled protocol offloads * @IWL_D3_PROTO_OFFLOAD_ARP: ARP data is enabled * @IWL_D3_PROTO_OFFLOAD_NS: NS (Neighbor Solicitation) is enabled * @IWL_D3_PROTO_IPV4_VALID: IPv4 data is valid @@ -195,7 +195,7 @@ struct iwl_wowlan_pattern_v1 { #define IWL_WOWLAN_MAX_PATTERNS 20 /** - * struct iwl_wowlan_patterns_cmd - WoWLAN wakeup patterns + * struct iwl_wowlan_patterns_cmd_v1 - WoWLAN wakeup patterns */ struct iwl_wowlan_patterns_cmd_v1 { /** diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h index f272b6a4e72e..2ab38eaeb290 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h @@ -231,28 +231,33 @@ struct iwl_synced_time_rsp { #define PTP_CTX_MAX_DATA_SIZE 128 /** - * struct iwl_time_msmt_ptp_ctx - Vendor specific information element + * struct iwl_time_msmt_ptp_ctx - Vendor specific element * to allow a space for flexibility for the userspace App * - * @element_id: element id of vendor specific ie - * @length: length of vendor specific ie - * @reserved: for alignment - * @data: vendor specific data blob + * @ftm: FTM specific vendor element + * @ftm.element_id: element id of vendor specific ie + * @ftm.length: length of vendor specific ie + * @ftm.reserved: for alignment + * @ftm.data: vendor specific data blob + * @tm: TM specific vendor element + * @tm.element_id: element id of vendor specific ie + * @tm.length: length of vendor specific ie + * @tm.data: vendor specific data blob */ struct iwl_time_msmt_ptp_ctx { - /* Differentiate between FTM and TM specific Vendor IEs */ + /* Differentiate between FTM and TM specific Vendor elements */ union { struct { u8 element_id; u8 length; __le16 reserved; u8 data[PTP_CTX_MAX_DATA_SIZE]; - } ftm; /* FTM specific vendor IE */ + } ftm; struct { u8 element_id; u8 length; u8 data[PTP_CTX_MAX_DATA_SIZE]; - } tm; /* TM specific vendor IE */ + } tm; }; } __packed /* PTP_CTX_VER_1 */; @@ -531,6 +536,10 @@ struct iwl_rx_baid_cfg_cmd_remove { /** * struct iwl_rx_baid_cfg_cmd - BAID allocation/config command * @action: the action, from &enum iwl_rx_baid_action + * @alloc: allocation data + * @modify: modify data + * @remove_v1: remove data (version 1) + * @remove: remove data */ struct iwl_rx_baid_cfg_cmd { __le32 action; @@ -565,6 +574,7 @@ enum iwl_scd_queue_cfg_operation { /** * struct iwl_scd_queue_cfg_cmd - scheduler queue allocation command * @operation: the operation, see &enum iwl_scd_queue_cfg_operation + * @u: union depending on command usage * @u.add.sta_mask: station mask * @u.add.tid: TID * @u.add.reserved: reserved @@ -634,6 +644,7 @@ enum iwl_sec_key_flags { /** * struct iwl_sec_key_cmd - security key command * @action: action from &enum iwl_ctxt_action + * @u: union depending on command type * @u.add.sta_mask: station mask for the new key * @u.add.key_id: key ID (0-7) for the new key * @u.add.key_flags: key flags per &enum iwl_sec_key_flags diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h index 47c914de2992..855cd13a181e 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h @@ -147,32 +147,34 @@ struct iwl_fw_ini_region_internal_buffer { * Configures parameters for region data collection * * @hdr: debug header - * @id: region id. Max id is &IWL_FW_INI_MAX_REGION_ID + * @id: region id. Max id is %IWL_FW_INI_MAX_REGION_ID * @type: region type. One of &enum iwl_fw_ini_region_type * @sub_type: region sub type * @sub_type_ver: region sub type version * @reserved: not in use * @name: region name * @dev_addr: device address configuration. Used by - * &IWL_FW_INI_REGION_DEVICE_MEMORY, &IWL_FW_INI_REGION_PERIPHERY_MAC, - * &IWL_FW_INI_REGION_PERIPHERY_PHY, &IWL_FW_INI_REGION_PERIPHERY_AUX, - * &IWL_FW_INI_REGION_PAGING, &IWL_FW_INI_REGION_CSR, - * &IWL_FW_INI_REGION_DRAM_IMR and &IWL_FW_INI_REGION_PCI_IOSF_CONFIG - * &IWL_FW_INI_REGION_DBGI_SRAM, &FW_TLV_DEBUG_REGION_TYPE_DBGI_SRAM, - * &IWL_FW_INI_REGION_PERIPHERY_SNPS_DPHYIP, + * %IWL_FW_INI_REGION_DEVICE_MEMORY, %IWL_FW_INI_REGION_PERIPHERY_MAC, + * %IWL_FW_INI_REGION_PERIPHERY_PHY, %IWL_FW_INI_REGION_PERIPHERY_AUX, + * %IWL_FW_INI_REGION_PAGING, %IWL_FW_INI_REGION_CSR, + * %IWL_FW_INI_REGION_DRAM_IMR and %IWL_FW_INI_REGION_PCI_IOSF_CONFIG + * %IWL_FW_INI_REGION_DBGI_SRAM, %FW_TLV_DEBUG_REGION_TYPE_DBGI_SRAM, + * %IWL_FW_INI_REGION_PERIPHERY_SNPS_DPHYIP, * @dev_addr_range: device address range configuration. Used by - * &IWL_FW_INI_REGION_PERIPHERY_MAC_RANGE and - * &IWL_FW_INI_REGION_PERIPHERY_PHY_RANGE - * @fifos: fifos configuration. Used by &IWL_FW_INI_REGION_TXF and - * &IWL_FW_INI_REGION_RXF + * %IWL_FW_INI_REGION_PERIPHERY_MAC_RANGE and + * %IWL_FW_INI_REGION_PERIPHERY_PHY_RANGE + * @fifos: fifos configuration. Used by %IWL_FW_INI_REGION_TXF and + * %IWL_FW_INI_REGION_RXF * @err_table: error table configuration. Used by - * IWL_FW_INI_REGION_LMAC_ERROR_TABLE and - * IWL_FW_INI_REGION_UMAC_ERROR_TABLE + * %IWL_FW_INI_REGION_LMAC_ERROR_TABLE and + * %IWL_FW_INI_REGION_UMAC_ERROR_TABLE * @internal_buffer: internal monitor buffer configuration. Used by - * &IWL_FW_INI_REGION_INTERNAL_BUFFER + * %IWL_FW_INI_REGION_INTERNAL_BUFFER + * @special_mem: special device memory region, used by + * %IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY * @dram_alloc_id: dram allocation id. One of &enum iwl_fw_ini_allocation_id. - * Used by &IWL_FW_INI_REGION_DRAM_BUFFER - * @tlv_mask: tlv collection mask. Used by &IWL_FW_INI_REGION_TLV + * Used by %IWL_FW_INI_REGION_DRAM_BUFFER + * @tlv_mask: tlv collection mask. Used by %IWL_FW_INI_REGION_TLV * @addrs: array of addresses attached to the end of the region tlv */ struct iwl_fw_ini_region_tlv { @@ -291,7 +293,7 @@ struct iwl_fw_ini_addr_val { } __packed; /* FW_TLV_DEBUG_ADDR_VALUE_VER_1 */ /** - * struct iwl_fw_ini_conf_tlv - configuration TLV to set register/memory. + * struct iwl_fw_ini_conf_set_tlv - configuration TLV to set register/memory. * * @hdr: debug header * @time_point: time point to apply config. One of &enum iwl_fw_ini_time_point @@ -470,6 +472,10 @@ enum iwl_fw_ini_region_device_memory_subtype { * @IWL_FW_INI_TIME_POINT_EAPOL_FAILED: EAPOL failed * @IWL_FW_INI_TIME_POINT_FAKE_TX: fake Tx * @IWL_FW_INI_TIME_POINT_DEASSOC: de association + * @IWL_FW_INI_TIME_POINT_PRESET_OVERRIDE_EXT_REQ: request to override preset + * @IWL_FW_INI_TIME_POINT_PRESET_OVERRIDE_START: start handling override preset + * request + * @IWL_FW_INI_TIME_SCAN_FAILURE: failed scan channel list * @IWL_FW_INI_TIME_POINT_NUM: number of time points */ enum iwl_fw_ini_time_point { @@ -500,6 +506,9 @@ enum iwl_fw_ini_time_point { IWL_FW_INI_TIME_POINT_EAPOL_FAILED, IWL_FW_INI_TIME_POINT_FAKE_TX, IWL_FW_INI_TIME_POINT_DEASSOC, + IWL_FW_INI_TIME_POINT_PRESET_OVERRIDE_EXT_REQ, + IWL_FW_INI_TIME_POINT_PRESET_OVERRIDE_START, + IWL_FW_INI_TIME_SCAN_FAILURE, IWL_FW_INI_TIME_POINT_NUM, }; /* FW_TLV_DEBUG_TIME_POINT_API_E */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h index b31ae6889bd0..bea0f4668cc8 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/debug.h @@ -1,11 +1,12 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2023 Intel Corporation + * Copyright (C) 2005-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_debug_h__ #define __iwl_fw_api_debug_h__ +#include "dbg-tlv.h" /** * enum iwl_debug_cmds - debug commands diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h index 25530a29317e..30a54c7fa001 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/location.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/location.h @@ -2,6 +2,7 @@ /* * Copyright (C) 2015-2017 Intel Deutschland GmbH * Copyright (C) 2018-2022 Intel Corporation + * Copyright (C) 2024 Intel Corporation */ #ifndef __iwl_fw_api_location_h__ #define __iwl_fw_api_location_h__ @@ -390,10 +391,62 @@ struct iwl_tof_responder_config_cmd_v9 { __le16 max_time_between_msr; } __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_8 */ +/** + * struct iwl_tof_responder_config_cmd - ToF AP mode + * @cmd_valid_fields: &iwl_tof_responder_cmd_valid_field + * @responder_cfg_flags: &iwl_tof_responder_cfg_flags + * @format_bw: bits 0 - 3: &enum iwl_location_frame_format. + * bits 4 - 7: &enum iwl_location_bw. + * @bss_color: current AP bss_color + * @channel_num: current AP Channel + * @ctrl_ch_position: coding of the control channel position relative to + * the center frequency, see iwl_mvm_get_ctrl_pos() + * @sta_id: index of the AP STA when in AP mode + * @band: current AP band + * @toa_offset: Artificial addition [pSec] for the ToA - to be used for debug + * purposes, simulating station movement by adding various values + * to this field + * @common_calib: XVT: common calibration value + * @specific_calib: XVT: specific calibration value + * @bssid: Current AP BSSID + * @r2i_ndp_params: parameters for R2I NDP. + * bits 0 - 2: max number of LTF repetitions + * bits 3 - 5: max number of spatial streams (supported values are < 2) + * bits 6 - 7: max number of total LTFs see + * &enum ieee80211_range_params_max_total_ltf + * @i2r_ndp_params: parameters for I2R NDP. + * bits 0 - 2: max number of LTF repetitions + * bits 3 - 5: max number of spatial streams + * bits 6 - 7: max number of total LTFs see + * &enum ieee80211_range_params_max_total_ltf + * @min_time_between_msr: for non trigger based NDP ranging, minimum time + * between measurements in milliseconds. + * @max_time_between_msr: for non trigger based NDP ranging, maximum time + * between measurements in milliseconds. + */ +struct iwl_tof_responder_config_cmd { + __le32 cmd_valid_fields; + __le32 responder_cfg_flags; + u8 format_bw; + u8 bss_color; + u8 channel_num; + u8 ctrl_ch_position; + u8 sta_id; + u8 band; + __le16 toa_offset; + __le16 common_calib; + __le16 specific_calib; + u8 bssid[ETH_ALEN]; + u8 r2i_ndp_params; + u8 i2r_ndp_params; + __le16 min_time_between_msr; + __le16 max_time_between_msr; +} __packed; /* TOF_RESPONDER_CONFIG_CMD_API_S_VER_10 */ + #define IWL_LCI_CIVIC_IE_MAX_SIZE 400 /** - * struct iwl_tof_responder_dyn_config_cmd - Dynamic responder settings + * struct iwl_tof_responder_dyn_config_cmd_v2 - Dynamic responder settings * @lci_len: The length of the 1st (LCI) part in the @lci_civic buffer * @civic_len: The length of the 2nd (CIVIC) part in the @lci_civic buffer * @lci_civic: The LCI/CIVIC buffer. LCI data (if exists) comes first, then, if @@ -561,6 +614,8 @@ struct iwl_tof_range_req_ap_entry_v2 { * the responder asked for LMR feedback although the initiator did not set * the LMR feedback bit in the FTM request. If not set, the initiator will * continue with the session and will provide the LMR feedback. + * @IWL_INITIATOR_AP_FLAGS_TEST_INCORRECT_SAC: send an incorrect SAC in the + * first NDP exchange. This is used for testing. */ enum iwl_initiator_ap_flags { IWL_INITIATOR_AP_FLAGS_ASAP = BIT(1), @@ -577,6 +632,7 @@ enum iwl_initiator_ap_flags { IWL_INITIATOR_AP_FLAGS_USE_CALIB = BIT(13), IWL_INITIATOR_AP_FLAGS_PMF = BIT(14), IWL_INITIATOR_AP_FLAGS_TERMINATE_ON_LMR_FEEDBACK = BIT(15), + IWL_INITIATOR_AP_FLAGS_TEST_INCORRECT_SAC = BIT(16), }; /** @@ -797,6 +853,7 @@ struct iwl_tof_range_req_ap_entry_v7 { } __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_7 */ #define IWL_LOCATION_MAX_STS_POS 3 +#define IWL_LOCATION_TOTAL_LTF_POS 6 /** * struct iwl_tof_range_req_ap_entry_v8 - AP configuration parameters @@ -953,6 +1010,78 @@ struct iwl_tof_range_req_ap_entry_v9 { __le16 min_time_between_msr; } __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_9 */ +/** + * struct iwl_tof_range_req_ap_entry_v10 - AP configuration parameters + * @initiator_ap_flags: see &enum iwl_initiator_ap_flags. + * @band: 0 for 5.2 GHz, 1 for 2.4 GHz, 2 for 6GHz + * @channel_num: AP Channel number + * @format_bw: bits 0 - 3: &enum iwl_location_frame_format. + * bits 4 - 7: &enum iwl_location_bw. + * @ctrl_ch_position: Coding of the control channel position relative to the + * center frequency, see iwl_mvm_get_ctrl_pos(). + * @bssid: AP's BSSID + * @burst_period: For EDCA based ranging: Recommended value to be sent to the + * AP. Measurement periodicity In units of 100ms. ignored if + * num_of_bursts_exp = 0. + * For non trigger based NDP ranging, the maximum time between + * measurements in units of milliseconds. + * @samples_per_burst: the number of FTMs pairs in single Burst (1-31); + * @num_of_bursts: Recommended value to be sent to the AP. 2s Exponent of + * the number of measurement iterations (min 2^0 = 1, max 2^14) + * @sta_id: the station id of the AP. Only relevant when associated to the AP, + * otherwise should be set to &IWL_MVM_INVALID_STA. + * @cipher: pairwise cipher suite for secured measurement. + * &enum iwl_location_cipher. + * @hltk: HLTK to be used for secured 11az measurement + * @tk: TK to be used for secured 11az measurement + * @calib: An array of calibration values per FTM rx bandwidth. + * If &IWL_INITIATOR_AP_FLAGS_USE_CALIB is set, the fw will use the + * calibration value that corresponds to the rx bandwidth of the FTM + * frame. + * @beacon_interval: beacon interval of the AP in TUs. Only required if + * &IWL_INITIATOR_AP_FLAGS_TB is set. + * @rx_pn: the next expected PN for protected management frames Rx. LE byte + * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id + * is set to &IWL_MVM_INVALID_STA. + * @tx_pn: the next PN to use for protected management frames Tx. LE byte + * order. Only valid if &IWL_INITIATOR_AP_FLAGS_SECURED is set and sta_id + * is set to &IWL_MVM_INVALID_STA. + * @r2i_ndp_params: parameters for R2I NDP ranging negotiation. + * bits 0 - 2: max LTF repetitions + * bits 3 - 5: max number of spatial streams + * bits 6 - 7: max total LTFs. One of + * &enum ieee80211_range_params_max_total_ltf. + * @i2r_ndp_params: parameters for I2R NDP ranging negotiation. + * bits 0 - 2: max LTF repetitions + * bits 3 - 5: max number of spatial streams (supported values are < 2) + * bits 6 - 7: max total LTFs. One of + * &enum ieee80211_range_params_max_total_ltf. + * @min_time_between_msr: For non trigger based NDP ranging, the minimum time + * between measurements in units of milliseconds + */ +struct iwl_tof_range_req_ap_entry_v10 { + __le32 initiator_ap_flags; + u8 band; + u8 channel_num; + u8 format_bw; + u8 ctrl_ch_position; + u8 bssid[ETH_ALEN]; + __le16 burst_period; + u8 samples_per_burst; + u8 num_of_bursts; + u8 sta_id; + u8 cipher; + u8 hltk[HLTK_11AZ_LEN]; + u8 tk[TK_11AZ_LEN]; + __le16 calib[IWL_TOF_BW_NUM]; + __le16 beacon_interval; + u8 rx_pn[IEEE80211_CCMP_PN_LEN]; + u8 tx_pn[IEEE80211_CCMP_PN_LEN]; + u8 r2i_ndp_params; + u8 i2r_ndp_params; + __le16 min_time_between_msr; +} __packed; /* LOCATION_RANGE_REQ_AP_ENTRY_CMD_API_S_VER_9 */ + /** * enum iwl_tof_response_mode * @IWL_MVM_TOF_RESPONSE_ASAP: report each AP measurement separately as soon as @@ -1230,6 +1359,34 @@ struct iwl_tof_range_req_cmd_v13 { struct iwl_tof_range_req_ap_entry_v9 ap[IWL_MVM_TOF_MAX_APS]; } __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_13 */ +/** + * struct iwl_tof_range_req_cmd_v14 - start measurement cmd + * @initiator_flags: see flags @ iwl_tof_initiator_flags + * @request_id: A Token incremented per request. The same Token will be + * sent back in the range response + * @num_of_ap: Number of APs to measure (error if > IWL_MVM_TOF_MAX_APS) + * @range_req_bssid: ranging request BSSID + * @macaddr_mask: Bits set to 0 shall be copied from the MAC address template. + * Bits set to 1 shall be randomized by the UMAC + * @macaddr_template: MAC address template to use for non-randomized bits + * @req_timeout_ms: Requested timeout of the response in units of milliseconds. + * This is the session time for completing the measurement. + * @tsf_mac_id: report the measurement start time for each ap in terms of the + * TSF of this mac id. 0xff to disable TSF reporting. + * @ap: per-AP request data, see &struct iwl_tof_range_req_ap_entry_v10. + */ +struct iwl_tof_range_req_cmd_v14 { + __le32 initiator_flags; + u8 request_id; + u8 num_of_ap; + u8 range_req_bssid[ETH_ALEN]; + u8 macaddr_mask[ETH_ALEN]; + u8 macaddr_template[ETH_ALEN]; + __le32 req_timeout_ms; + __le32 tsf_mac_id; + struct iwl_tof_range_req_ap_entry_v10 ap[IWL_MVM_TOF_MAX_APS]; +} __packed; /* LOCATION_RANGE_REQ_CMD_API_S_VER_13 */ + /* * enum iwl_tof_range_request_status - status of the sent request * @IWL_TOF_RANGE_REQUEST_STATUS_SUCCESSFUL - FW successfully received the diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h index 754c5d655ad0..ca6fa66d1917 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac-cfg.h @@ -144,7 +144,7 @@ struct iwl_missed_vap_notif { } __packed; /* MISSED_VAP_NTFY_API_S_VER_1 */ /** - * struct iwl_channel_switch_start_notif - Channel switch start notification + * struct iwl_channel_switch_start_notif_v1 - Channel switch start notification * * @id_and_color: ID and color of the MAC */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h index 545826973a80..bcbbf8c4a297 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h @@ -310,6 +310,13 @@ struct iwl_ac_qos { * @filter_flags: combination of &enum iwl_mac_filter_flags * @qos_flags: from &enum iwl_mac_qos_flags * @ac: one iwl_mac_qos configuration for each AC + * @ap: AP specific config data, see &struct iwl_mac_data_ap + * @go: GO specific config data, see &struct iwl_mac_data_go + * @sta: BSS client specific config data, see &struct iwl_mac_data_sta + * @p2p_sta: P2P client specific config data, see &struct iwl_mac_data_p2p_sta + * @p2p_dev: P2P-device specific config data, see &struct iwl_mac_data_p2p_dev + * @pibss: Pseudo-IBSS specific data, unused; see struct iwl_mac_data_pibss + * @ibss: IBSS specific config data, see &struct iwl_mac_data_ibss */ struct iwl_mac_ctx_cmd { /* COMMON_INDEX_HDR_API_S_VER_1 */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h index a08497a04733..d424d0126367 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h @@ -23,7 +23,8 @@ enum iwl_regulatory_and_nvm_subcmd_ids { * &struct iwl_lari_config_change_cmd_v4, * &struct iwl_lari_config_change_cmd_v5, * &struct iwl_lari_config_change_cmd_v6, - * &struct iwl_lari_config_change_cmd_v7 or + * &struct iwl_lari_config_change_cmd_v7, + * &struct iwl_lari_config_change_cmd_v10 or * &struct iwl_lari_config_change_cmd */ LARI_CONFIG_CHANGE = 0x1, @@ -119,7 +120,7 @@ struct iwl_nvm_access_cmd { } __packed; /* NVM_ACCESS_CMD_API_S_VER_2 */ /** - * struct iwl_nvm_access_resp_ver2 - response to NVM_ACCESS_CMD + * struct iwl_nvm_access_resp - response to NVM_ACCESS_CMD * @offset: offset in bytes into the section * @length: in bytes, either how much was written or read * @type: NVM_SECTION_TYPE_* @@ -211,7 +212,7 @@ struct iwl_nvm_get_info_phy { #define IWL_NUM_CHANNELS 110 /** - * struct iwl_nvm_get_info_regulatory - regulatory information + * struct iwl_nvm_get_info_regulatory_v1 - regulatory information * @lar_enabled: is LAR enabled * @channel_profile: regulatory data of this channel * @reserved: reserved @@ -648,7 +649,7 @@ struct iwl_lari_config_change_cmd_v7 { /* LARI_CHANGE_CONF_CMD_S_VER_9 */ /** - * struct iwl_lari_config_change_cmd - change LARI configuration + * struct iwl_lari_config_change_cmd_v10 - change LARI configuration * @config_bitmap: Bitmap of the config commands. Each bit will trigger a * different predefined FW config operation. * @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets. @@ -674,7 +675,7 @@ struct iwl_lari_config_change_cmd_v7 { * bit1: enable 320Mhz in South Korea. * bit 2 - 31: reserved. */ -struct iwl_lari_config_change_cmd { +struct iwl_lari_config_change_cmd_v10 { __le32 config_bitmap; __le32 oem_uhb_allow_bitmap; __le32 oem_11ax_allow_bitmap; @@ -686,8 +687,57 @@ struct iwl_lari_config_change_cmd { } __packed; /* LARI_CHANGE_CONF_CMD_S_VER_10 */ +/** + * struct iwl_lari_config_change_cmd - change LARI configuration + * @config_bitmap: Bitmap of the config commands. Each bit will trigger a + * different predefined FW config operation. + * @oem_uhb_allow_bitmap: Bitmap of UHB enabled MCC sets. + * @oem_11ax_allow_bitmap: Bitmap of 11ax allowed MCCs. There are two bits + * per country, one to indicate whether to override and the other to + * indicate the value to use. + * @oem_unii4_allow_bitmap: Bitmap of unii4 allowed MCCs.There are two bits + * per country, one to indicate whether to override and the other to + * indicate allow/disallow unii4 channels. + * For LARI cmd version 11 - bits 0:5 are supported. + * @chan_state_active_bitmap: Bitmap to enable different bands per country + * or region. + * Each bit represents a country or region, and a band to activate + * according to the BIOS definitions. + * For LARI cmd version 11 - bits 0:4 are supported. + * For LARI cmd version 12 - bits 0:6 are supported and bits 7:31 are + * reserved. No need to mask out the reserved bits. + * @force_disable_channels_bitmap: Bitmap of disabled bands/channels. + * Each bit represents a set of channels in a specific band that should be + * disabled + * @edt_bitmap: Bitmap of energy detection threshold table. + * Disable/enable the EDT optimization method for different band. + * @oem_320mhz_allow_bitmap: 320Mhz bandwidth enablement bitmap per MCC. + * bit0: enable 320Mhz in Japan. + * bit1: enable 320Mhz in South Korea. + * bit 2 - 31: reserved. + * @oem_11be_allow_bitmap: Bitmap of 11be allowed MCCs. No need to mask out the + * unsupported bits + * bit0: enable 11be in China(CB/CN). + * bit1: enable 11be in South Korea. + * bit 2 - 31: reserved. + */ +struct iwl_lari_config_change_cmd { + __le32 config_bitmap; + __le32 oem_uhb_allow_bitmap; + __le32 oem_11ax_allow_bitmap; + __le32 oem_unii4_allow_bitmap; + __le32 chan_state_active_bitmap; + __le32 force_disable_channels_bitmap; + __le32 edt_bitmap; + __le32 oem_320mhz_allow_bitmap; + __le32 oem_11be_allow_bitmap; +} __packed; +/* LARI_CHANGE_CONF_CMD_S_VER_11 */ +/* LARI_CHANGE_CONF_CMD_S_VER_12 */ + /* Activate UNII-1 (5.2GHz) for World Wide */ -#define ACTIVATE_5G2_IN_WW_MASK BIT(4) +#define ACTIVATE_5G2_IN_WW_MASK BIT(4) +#define CHAN_STATE_ACTIVE_BITMAP_CMD_V11 0x1F /** * struct iwl_pnvm_init_complete_ntfy - PNVM initialization complete diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h index 2ed7acc09e5a..6a7bbfd6b2b7 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/offload.h @@ -60,7 +60,7 @@ struct iwl_stored_beacon_notif_common { } __packed; /** - * struct iwl_stored_beacon_notif - Stored beacon notification + * struct iwl_stored_beacon_notif_v2 - Stored beacon notification * * @common: fields common for all versions * @data: beacon data, length in @byte_count diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h index 08a2c416ce60..4d8a12799c4d 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy-ctxt.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018, 2020-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018, 2020-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -113,7 +113,7 @@ struct iwl_phy_context_cmd_tail { } __packed; /** - * struct iwl_phy_context_cmd - config of the PHY context + * struct iwl_phy_context_cmd_v1 - config of the PHY context * ( PHY_CONTEXT_CMD = 0x8 ) * @id_and_color: ID and color of the relevant Binding * @action: action to perform, see &enum iwl_ctxt_action @@ -144,6 +144,7 @@ struct iwl_phy_context_cmd_v1 { * @rxchain_info: ??? * @sbb_bandwidth: 0 disabled, 1 - 40Mhz ... 4 - 320MHz * @sbb_ctrl_channel_loc: location of the control channel + * @puncture_mask: bitmap of punctured subchannels * @dsp_cfg_flags: set to 0 * @reserved: reserved to align to 64 bit */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h index 92e4b62c119f..c73d4d597857 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/phy.h @@ -195,7 +195,7 @@ struct ct_kill_notif { } __packed; /* CT_KILL_NOTIFICATION_API_S_VER_1, CT_KILL_NOTIFICATION_API_S_VER_2 */ /** -* enum ctdp_cmd_operation - CTDP command operations +* enum iwl_mvm_ctdp_cmd_operation - CTDP command operations * @CTDP_CMD_OPERATION_START: update the current budget * @CTDP_CMD_OPERATION_STOP: stop ctdp * @CTDP_CMD_OPERATION_REPORT: get the average budget diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h index 532d5cfa9162..6e6a92d173cc 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/power.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/power.h @@ -462,7 +462,7 @@ struct iwl_per_chain_offset { } __packed; /* PER_CHAIN_LIMIT_OFFSET_PER_CHAIN_S_VER_1 */ /** - * struct iwl_geo_tx_power_profile_cmd_v1 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. + * struct iwl_geo_tx_power_profiles_cmd_v1 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation * @table: offset profile per band. */ @@ -472,7 +472,7 @@ struct iwl_geo_tx_power_profiles_cmd_v1 { } __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_1 */ /** - * struct iwl_geo_tx_power_profile_cmd_v2 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. + * struct iwl_geo_tx_power_profiles_cmd_v2 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation * @table: offset profile per band. * @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading) @@ -484,7 +484,7 @@ struct iwl_geo_tx_power_profiles_cmd_v2 { } __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_2 */ /** - * struct iwl_geo_tx_power_profile_cmd_v3 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. + * struct iwl_geo_tx_power_profiles_cmd_v3 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation * @table: offset profile per band. * @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading) @@ -496,7 +496,7 @@ struct iwl_geo_tx_power_profiles_cmd_v3 { } __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_3 */ /** - * struct iwl_geo_tx_power_profile_cmd_v4 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. + * struct iwl_geo_tx_power_profiles_cmd_v4 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation * @table: offset profile per band. * @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading) @@ -508,7 +508,7 @@ struct iwl_geo_tx_power_profiles_cmd_v4 { } __packed; /* PER_CHAIN_LIMIT_OFFSET_CMD_VER_4 */ /** - * struct iwl_geo_tx_power_profile_cmd_v5 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. + * struct iwl_geo_tx_power_profiles_cmd_v5 - struct for PER_CHAIN_LIMIT_OFFSET_CMD cmd. * @ops: operations, value from &enum iwl_geo_per_chain_offset_operation * @table: offset profile per band. * @table_revision: 0 for not-South Korea, 1 for South Korea (the name is misleading) @@ -569,9 +569,12 @@ enum iwl_ppag_flags { * @v2: version 2 * version 3, 4, 5 and 6 are the same structure as v2, * but has a different format of the flags bitmap - * @flags: values from &enum iwl_ppag_flags - * @gain: table of antenna gain values per chain and sub-band - * @reserved: reserved + * @v1.flags: values from &enum iwl_ppag_flags + * @v1.gain: table of antenna gain values per chain and sub-band + * @v1.reserved: reserved + * @v2.flags: values from &enum iwl_ppag_flags + * @v2.gain: table of antenna gain values per chain and sub-band + * @v2.reserved: reserved */ union iwl_ppag_table_cmd { struct { diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h index a1a272433b09..1a60f0cdf972 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2022 Intel Corporation + * Copyright (C) 2012-2014, 2018-2022, 2024 Intel Corporation * Copyright (C) 2017 Intel Deutschland GmbH */ #ifndef __iwl_fw_api_rs_h__ @@ -9,7 +9,7 @@ #include "mac.h" /** - * enum iwl_tlc_mng_cfg_flags_enum - options for TLC config flags + * enum iwl_tlc_mng_cfg_flags - options for TLC config flags * @IWL_TLC_MNG_CFG_FLAGS_STBC_MSK: enable STBC. For HE this enables STBC for * bandwidths <= 80MHz * @IWL_TLC_MNG_CFG_FLAGS_LDPC_MSK: enable LDPC diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h index e71f29d0c694..691c879cb90d 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2015-2017 Intel Deutschland GmbH */ @@ -710,7 +710,15 @@ struct iwl_rx_mpdu_desc { __le32 reorder_data; union { + /** + * @v1: version 1 of the remaining RX descriptor, + * see &struct iwl_rx_mpdu_desc_v1 + */ struct iwl_rx_mpdu_desc_v1 v1; + /** + * @v3: version 3 of the remaining RX descriptor, + * see &struct iwl_rx_mpdu_desc_v3 + */ struct iwl_rx_mpdu_desc_v3 v3; }; } __packed; /* RX_MPDU_RES_START_API_S_VER_3, @@ -976,7 +984,7 @@ struct iwl_ba_window_status_notif { } __packed; /* BA_WINDOW_STATUS_NTFY_API_S_VER_1 */ /** - * struct iwl_rfh_queue_config - RX queue configuration + * struct iwl_rfh_queue_data - RX queue configuration * @q_num: Q num * @enable: enable queue * @reserved: alignment diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h index 6684506f4fc4..8598031567bb 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/scan.h @@ -149,7 +149,7 @@ struct iwl_scan_offload_profile_cfg_data { } __packed; /** - * struct iwl_scan_offload_profile_cfg + * struct iwl_scan_offload_profile_cfg_v1 - scan offload profile config * @profiles: profiles to search for match * @data: the rest of the data for profile_cfg */ @@ -423,7 +423,7 @@ struct iwl_lmac_scan_complete_notif { } __packed; /** - * struct iwl_scan_offload_complete - PERIODIC_SCAN_COMPLETE_NTF_API_S_VER_2 + * struct iwl_periodic_scan_complete - PERIODIC_SCAN_COMPLETE_NTF_API_S_VER_2 * @last_schedule_line: last schedule line executed (fast or regular) * @last_schedule_iteration: last scan iteration executed before scan abort * @status: &enum iwl_scan_offload_complete_status @@ -443,10 +443,10 @@ struct iwl_periodic_scan_complete { /* UMAC Scan API */ /* The maximum of either of these cannot exceed 8, because we use an - * 8-bit mask (see IWL_MVM_SCAN_MASK in mvm.h). + * 8-bit mask (see enum iwl_scan_status). */ -#define IWL_MVM_MAX_UMAC_SCANS 4 -#define IWL_MVM_MAX_LMAC_SCANS 1 +#define IWL_MAX_UMAC_SCANS 4 +#define IWL_MAX_LMAC_SCANS 1 enum scan_config_flags { SCAN_CONFIG_FLAG_ACTIVATE = BIT(0), @@ -789,7 +789,7 @@ struct iwl_scan_req_umac_tail_v1 { } __packed; /** - * struct iwl_scan_req_umac_tail - the rest of the UMAC scan request command + * struct iwl_scan_req_umac_tail_v2 - the rest of the UMAC scan request command * parameters following channels configuration array. * @schedule: two scheduling plans. * @delay: delay in TUs before starting the first scan iteration @@ -1085,7 +1085,7 @@ struct iwl_scan_req_params_v12 { } __packed; /* SCAN_REQUEST_PARAMS_API_S_VER_12 */ /** - * struct iwl_scan_req_params_v16 + * struct iwl_scan_req_params_v17 - scan request parameters (v17) * @general_params: &struct iwl_scan_general_params_v11 * @channel_params: &struct iwl_scan_channel_params_v7 * @periodic_params: &struct iwl_scan_periodic_parms_v1 @@ -1111,7 +1111,7 @@ struct iwl_scan_req_umac_v12 { } __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_12 */ /** - * struct iwl_scan_req_umac_v16 + * struct iwl_scan_req_umac_v17 - scan request command (v17) * @uid: scan id, &enum iwl_umac_scan_uid_offsets * @ooc_priority: out of channel priority - &enum iwl_scan_priority * @scan_params: scan parameters diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h index 2e15be71c957..f4b827b58bd3 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/time-event.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2012-2014, 2018-2020, 2022-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2020, 2022-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -340,11 +340,13 @@ struct iwl_hs20_roc_res { * @ROC_ACTIVITY_HOTSPOT: ROC for hs20 activity * @ROC_ACTIVITY_P2P_DISC: ROC for p2p discoverability activity * @ROC_ACTIVITY_P2P_TXRX: ROC for p2p action frames activity + * @ROC_ACTIVITY_P2P_NEG: ROC for p2p negotiation (used also for TX) */ enum iwl_roc_activity { ROC_ACTIVITY_HOTSPOT, ROC_ACTIVITY_P2P_DISC, ROC_ACTIVITY_P2P_TXRX, + ROC_ACTIVITY_P2P_NEG, ROC_NUM_ACTIVITIES }; /* ROC_ACTIVITY_API_E_VER_1 */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h index bbd176d88820..c5277e2f8cd4 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h @@ -698,6 +698,7 @@ enum iwl_mvm_ba_resp_flags { * @query_frame_cnt: SCD query frame count * @txed: number of frames sent in the aggregation (all-TIDs) * @done: number of frames that were Acked by the BA (all-TIDs) + * @rts_retry_cnt: RTS retry count * @reserved: reserved (for alignment) * @wireless_time: Wireless-media time * @tx_rate: the rate the aggregation was sent at @@ -718,7 +719,8 @@ struct iwl_mvm_compressed_ba_notif { __le16 query_frame_cnt; __le16 txed; __le16 done; - __le16 reserved; + u8 rts_retry_cnt; + u8 reserved; __le32 wireless_time; __le32 tx_rate; __le16 tfd_cnt; @@ -864,7 +866,7 @@ enum iwl_dump_control { }; /** - * struct iwl_tx_path_flush_cmd -- queue/FIFO flush command + * struct iwl_tx_path_flush_cmd_v1 -- queue/FIFO flush command * @queues_ctl: bitmap of queues to flush * @flush_ctl: control flags * @reserved: reserved diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index 945ffc083d25..fa57df336785 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -1168,17 +1168,13 @@ static int iwl_dump_ini_config_iter(struct iwl_fw_runtime *fwrt, le32_to_cpu(reg->dev_addr.offset); int i; - /* we shouldn't get here if the trans doesn't have read_config32 */ - if (WARN_ON_ONCE(!trans->ops->read_config32)) - return -EOPNOTSUPP; - range->internal_base_addr = cpu_to_le32(addr); range->range_data_size = reg->dev_addr.size; for (i = 0; i < le32_to_cpu(reg->dev_addr.size); i += 4) { int ret; u32 tmp; - ret = trans->ops->read_config32(trans, addr + i, &tmp); + ret = iwl_trans_read_config32(trans, addr + i, &tmp); if (ret < 0) return ret; diff --git a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c index 751a125a1566..893b21fcaf87 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/debugfs.c @@ -230,8 +230,7 @@ static ssize_t iwl_dbgfs_send_hcmd_write(struct iwl_fw_runtime *fwrt, char *buf, .data = { NULL, }, }; - if (fwrt->ops && fwrt->ops->fw_running && - !fwrt->ops->fw_running(fwrt->ops_ctx)) + if (!iwl_trans_fw_running(fwrt->trans)) return -EIO; if (count < header_size + 1 || count > 1024 * 4) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h index 5c76e3b94968..e63b08b7d336 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h @@ -248,7 +248,7 @@ struct iwl_fw_error_dump_mem { #define IWL_INI_DUMP_NAME_TYPE (BIT(31) | BIT(24)) /** - * struct iwl_fw_error_dump_data - data for one type + * struct iwl_fw_ini_error_dump_data - data for one type * @type: &enum iwl_fw_ini_region_type * @sub_type: sub type id * @sub_type_ver: sub type version @@ -278,7 +278,7 @@ struct iwl_fw_ini_dump_entry { } __packed; /** - * struct iwl_fw_error_dump_file - header of dump file + * struct iwl_fw_ini_dump_file_hdr - header of dump file * @barker: must be %IWL_FW_INI_ERROR_DUMP_BARKER * @file_len: the length of all the file including the header */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/init.c b/drivers/net/wireless/intel/iwlwifi/fw/init.c index 135bd48bfe9f..d8b083be5b6b 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/init.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/init.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2019-2021 Intel Corporation + * Copyright (C) 2019-2021, 2024 Intel Corporation */ #include "iwl-drv.h" #include "runtime.h" @@ -135,7 +135,9 @@ int iwl_configure_rxq(struct iwl_fw_runtime *fwrt) struct iwl_trans_rxq_dma_data data; cmd->data[i].q_num = i + 1; - iwl_trans_get_rxq_dma_data(fwrt->trans, i + 1, &data); + ret = iwl_trans_get_rxq_dma_data(fwrt->trans, i + 1, &data); + if (ret) + goto out; cmd->data[i].fr_bd_cb = cpu_to_le64(data.fr_bd_cb); cmd->data[i].urbd_stts_wrptr = @@ -149,6 +151,7 @@ int iwl_configure_rxq(struct iwl_fw_runtime *fwrt) ret = iwl_trans_send_cmd(fwrt->trans, &hcmd); +out: kfree(cmd); if (ret) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c index b9bb3636e88f..560a91998cc4 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.c @@ -497,9 +497,13 @@ static size_t iwl_get_lari_config_cmd_size(u8 cmd_ver) size_t cmd_size; switch (cmd_ver) { - case 10: + case 12: + case 11: cmd_size = sizeof(struct iwl_lari_config_change_cmd); break; + case 10: + cmd_size = sizeof(struct iwl_lari_config_change_cmd_v10); + break; case 9: case 8: case 7: @@ -560,6 +564,9 @@ int iwl_fill_lari_config(struct iwl_fw_runtime *fwrt, if (!ret) { if (cmd_ver < 8) value &= ~ACTIVATE_5G2_IN_WW_MASK; + if (cmd_ver < 12) + value &= CHAN_STATE_ACTIVE_BITMAP_CMD_V11; + cmd->chan_state_active_bitmap = cpu_to_le32(value); } @@ -580,6 +587,10 @@ int iwl_fill_lari_config(struct iwl_fw_runtime *fwrt, if (!ret) cmd->oem_320mhz_allow_bitmap = cpu_to_le32(value); + ret = iwl_bios_get_dsm(fwrt, DSM_FUNC_ENABLE_11BE, &value); + if (!ret) + cmd->oem_11be_allow_bitmap = cpu_to_le32(value); + if (cmd->config_bitmap || cmd->oem_uhb_allow_bitmap || cmd->oem_11ax_allow_bitmap || @@ -587,7 +598,8 @@ int iwl_fill_lari_config(struct iwl_fw_runtime *fwrt, cmd->chan_state_active_bitmap || cmd->force_disable_channels_bitmap || cmd->edt_bitmap || - cmd->oem_320mhz_allow_bitmap) { + cmd->oem_320mhz_allow_bitmap || + cmd->oem_11be_allow_bitmap) { IWL_DEBUG_RADIO(fwrt, "sending LARI_CONFIG_CHANGE, config_bitmap=0x%x, oem_11ax_allow_bitmap=0x%x\n", le32_to_cpu(cmd->config_bitmap), @@ -605,6 +617,9 @@ int iwl_fill_lari_config(struct iwl_fw_runtime *fwrt, "sending LARI_CONFIG_CHANGE, edt_bitmap=0x%x, oem_320mhz_allow_bitmap=0x%x\n", le32_to_cpu(cmd->edt_bitmap), le32_to_cpu(cmd->oem_320mhz_allow_bitmap)); + IWL_DEBUG_RADIO(fwrt, + "sending LARI_CONFIG_CHANGE, oem_11be_allow_bitmap=0x%x\n", + le32_to_cpu(cmd->oem_11be_allow_bitmap)); } else { return 1; } diff --git a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h index 633c9ad9af84..e2c056f483c1 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/regulatory.h @@ -115,7 +115,8 @@ enum iwl_dsm_funcs { DSM_FUNC_FORCE_DISABLE_CHANNELS = 9, DSM_FUNC_ENERGY_DETECTION_THRESHOLD = 10, DSM_FUNC_RFI_CONFIG = 11, - DSM_FUNC_NUM_FUNCS = 12, + DSM_FUNC_ENABLE_11BE = 12, + DSM_FUNC_NUM_FUNCS = 13, }; enum iwl_dsm_values_srd { diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h index 9122f9a1260a..048877fa7c71 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h @@ -12,14 +12,13 @@ #include "fw/api/debug.h" #include "fw/api/paging.h" #include "fw/api/power.h" -#include "iwl-eeprom-parse.h" +#include "iwl-nvm-utils.h" #include "fw/acpi.h" #include "fw/regulatory.h" struct iwl_fw_runtime_ops { void (*dump_start)(void *ctx); void (*dump_end)(void *ctx); - bool (*fw_running)(void *ctx); int (*send_hcmd)(void *ctx, struct iwl_host_cmd *host_cmd); bool (*d3_debug_enable)(void *ctx); }; @@ -104,7 +103,6 @@ struct iwl_txf_iter_data { * @cur_fw_img: current firmware image, must be maintained by * the driver by calling &iwl_fw_set_current_image() * @dump: debug dump data - * @uats_enabled: VLP or AFC AP is enabled * @uats_table: AP type table * @uefi_tables_lock_status: The status of the WIFI GUID UEFI variables lock: * 0: Unlocked, 1 and 2: Locked. @@ -184,7 +182,6 @@ struct iwl_fw_runtime { bool sgom_enabled; struct iwl_mcc_allowed_ap_type_cmd uats_table; u8 uefi_tables_lock_status; - bool uats_enabled; }; void iwl_fw_runtime_init(struct iwl_fw_runtime *fwrt, struct iwl_trans *trans, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 732889f96ca2..b2abd4fd1944 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -241,7 +241,7 @@ enum iwl_cfg_trans_ltr_delay { }; /** - * struct iwl_cfg_trans - information needed to start the trans + * struct iwl_cfg_trans_params - information needed to start the trans * * These values are specific to the device ID and do not change when * multiple configs are used for a single device ID. They values are @@ -258,6 +258,7 @@ enum iwl_cfg_trans_ltr_delay { * @mq_rx_supported: multi-queue rx support * @integrated: discrete or integrated * @low_latency_xtal: use the low latency xtal if supported + * @bisr_workaround: BISR hardware workaround (for 22260 series devices) * @ltr_delay: LTR delay parameter, &enum iwl_cfg_trans_ltr_delay. * @imr_enabled: use the IMR if supported. */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index 4511d7fb2279..98563757ce2c 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2023 Intel Corporation + * Copyright (C) 2005-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2016 Intel Deutschland GmbH */ @@ -304,9 +304,7 @@ #define CSR_HW_RFID_IS_CDB(_val) (((_val) & 0x10000000) >> 28) #define CSR_HW_RFID_IS_JACKET(_val) (((_val) & 0x20000000) >> 29) -/** - * hw_rev values - */ +/* hw_rev values */ enum { SILICON_A_STEP = 0, SILICON_B_STEP, diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c index 561d0c261123..08d990ba8a79 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c @@ -223,12 +223,6 @@ static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans, return -EINVAL; } - if (type == IWL_FW_INI_REGION_PCI_IOSF_CONFIG && - !trans->ops->read_config32) { - IWL_ERR(trans, "WRT: Unsupported region type %u\n", type); - return -EOPNOTSUPP; - } - if (type == IWL_FW_INI_REGION_INTERNAL_BUFFER) { trans->dbg.imr_data.sram_addr = le32_to_cpu(reg->internal_buffer.base_addr); @@ -1246,12 +1240,6 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync, } fwrt->trans->dbg.restart_required = false; - IWL_DEBUG_FW(fwrt, "WRT: tp %d, reset_fw %d\n", - tp, dump_data.trig->reset_fw); - IWL_DEBUG_FW(fwrt, - "WRT: restart_required %d, last_tp_resetfw %d\n", - fwrt->trans->dbg.restart_required, - fwrt->trans->dbg.last_tp_resetfw); if (fwrt->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_9000) { @@ -1261,22 +1249,17 @@ iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt, bool sync, IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) { fwrt->trans->dbg.restart_required = false; fwrt->trans->dbg.last_tp_resetfw = 0xFF; - IWL_DEBUG_FW(fwrt, "WRT: FW_ASSERT due to reset_fw_mode-no restart\n"); } else if (le32_to_cpu(dump_data.trig->reset_fw) == IWL_FW_INI_RESET_FW_MODE_STOP_AND_RELOAD_FW) { - IWL_DEBUG_FW(fwrt, "WRT: stop and reload firmware\n"); fwrt->trans->dbg.restart_required = true; } else if (le32_to_cpu(dump_data.trig->reset_fw) == IWL_FW_INI_RESET_FW_MODE_STOP_FW_ONLY) { - IWL_DEBUG_FW(fwrt, - "WRT: stop only and no reload firmware\n"); fwrt->trans->dbg.restart_required = false; fwrt->trans->dbg.last_tp_resetfw = le32_to_cpu(dump_data.trig->reset_fw); } else if (le32_to_cpu(dump_data.trig->reset_fw) == IWL_FW_INI_RESET_FW_MODE_NOTHING) { - IWL_DEBUG_FW(fwrt, - "WRT: nothing need to be done after debug collection\n"); + /* nothing */ } else { IWL_ERR(fwrt, "WRT: wrong resetfw %d\n", le32_to_cpu(dump_data.trig->reset_fw)); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h index 2c280a2fe3df..0d4a0896a2c5 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-data.h @@ -3,7 +3,7 @@ * * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Deutschland GmbH - * Copyright(c) 2018 - 2019, 2023 Intel Corporation + * Copyright(c) 2018 - 2019, 2023-2024 Intel Corporation *****************************************************************************/ #if !defined(__IWLWIFI_DEVICE_TRACE_DATA) || defined(TRACE_HEADER_MULTI_READ) @@ -28,7 +28,7 @@ TRACE_EVENT(iwlwifi_dev_tx_tb, TP_fast_assign( DEV_ASSIGN; __entry->phys = phys; - if (iwl_trace_data(skb)) + if (__get_dynamic_array_len(data)) memcpy(__get_dynamic_array(data), data_src, data_len); ), TP_printk("[%s] TX frame data", __get_str(dev)) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h index e656bf6bc003..ead72c3d33bd 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-devtrace-iwlwifi.h @@ -4,7 +4,7 @@ * Copyright(c) 2009 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH - * Copyright(c) 2018, 2023 Intel Corporation + * Copyright(c) 2018, 2023-2024 Intel Corporation *****************************************************************************/ #if !defined(__IWLWIFI_DEVICE_TRACE_IWLWIFI) || defined(TRACE_HEADER_MULTI_READ) @@ -88,8 +88,8 @@ TRACE_EVENT(iwlwifi_dev_tx, * for the possible padding). */ __dynamic_array(u8, buf0, buf0_len) - __dynamic_array(u8, buf1, hdr_len > 0 && iwl_trace_data(skb) ? - 0 : skb->len - hdr_len) + __dynamic_array(u8, buf1, hdr_len > 0 && !iwl_trace_data(skb) ? + skb->len - hdr_len : 0) ), TP_fast_assign( DEV_ASSIGN; @@ -99,7 +99,7 @@ TRACE_EVENT(iwlwifi_dev_tx, __entry->framelen += skb->len - hdr_len; memcpy(__get_dynamic_array(tfd), tfd, tfdlen); memcpy(__get_dynamic_array(buf0), buf0, buf0_len); - if (hdr_len > 0 && !iwl_trace_data(skb)) + if (__get_dynamic_array_len(buf1)) skb_copy_bits(skb, hdr_len, __get_dynamic_array(buf1), skb->len - hdr_len); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index d156a9c64194..aaaabd67f959 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -982,16 +982,10 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, minor = le32_to_cpup(ptr++); local_comp = le32_to_cpup(ptr); - if (major >= 35) - snprintf(drv->fw.fw_version, - sizeof(drv->fw.fw_version), - "%u.%08x.%u %s", major, minor, - local_comp, iwl_reduced_fw_name(drv)); - else - snprintf(drv->fw.fw_version, - sizeof(drv->fw.fw_version), - "%u.%u.%u %s", major, minor, - local_comp, iwl_reduced_fw_name(drv)); + snprintf(drv->fw.fw_version, + sizeof(drv->fw.fw_version), + "%u.%08x.%u %s", major, minor, + local_comp, iwl_reduced_fw_name(drv)); break; } case IWL_UCODE_TLV_FW_DBG_DEST: { @@ -1842,7 +1836,7 @@ void iwl_drv_stop(struct iwl_drv *drv) mutex_unlock(&iwlwifi_opmode_table_mtx); #ifdef CONFIG_IWLWIFI_DEBUGFS - drv->trans->ops->debugfs_cleanup(drv->trans); + iwl_trans_debugfs_cleanup(drv->trans); debugfs_remove_recursive(drv->dbgfs_drv); #endif diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c deleted file mode 100644 index 5f386bb1a353..000000000000 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.c +++ /dev/null @@ -1,394 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause -/* - * Copyright (C) 2005-2014, 2018-2019, 2021 Intel Corporation - */ -#include -#include -#include - -#include "iwl-drv.h" -#include "iwl-debug.h" -#include "iwl-eeprom-read.h" -#include "iwl-io.h" -#include "iwl-prph.h" -#include "iwl-csr.h" - -/* - * EEPROM access time values: - * - * Driver initiates EEPROM read by writing byte address << 1 to CSR_EEPROM_REG. - * Driver then polls CSR_EEPROM_REG for CSR_EEPROM_REG_READ_VALID_MSK (0x1). - * When polling, wait 10 uSec between polling loops, up to a maximum 5000 uSec. - * Driver reads 16-bit value from bits 31-16 of CSR_EEPROM_REG. - */ -#define IWL_EEPROM_ACCESS_TIMEOUT 5000 /* uSec */ - -/* - * The device's EEPROM semaphore prevents conflicts between driver and uCode - * when accessing the EEPROM; each access is a series of pulses to/from the - * EEPROM chip, not a single event, so even reads could conflict if they - * weren't arbitrated by the semaphore. - */ -#define IWL_EEPROM_SEM_TIMEOUT 10 /* microseconds */ -#define IWL_EEPROM_SEM_RETRY_LIMIT 1000 /* number of attempts (not time) */ - - -static int iwl_eeprom_acquire_semaphore(struct iwl_trans *trans) -{ - u16 count; - int ret; - - for (count = 0; count < IWL_EEPROM_SEM_RETRY_LIMIT; count++) { - /* Request semaphore */ - iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); - - /* See if we got it */ - ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, - CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, - IWL_EEPROM_SEM_TIMEOUT); - if (ret >= 0) { - IWL_DEBUG_EEPROM(trans->dev, - "Acquired semaphore after %d tries.\n", - count+1); - return ret; - } - } - - return ret; -} - -static void iwl_eeprom_release_semaphore(struct iwl_trans *trans) -{ - iwl_clear_bit(trans, CSR_HW_IF_CONFIG_REG, - CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); -} - -static int iwl_eeprom_verify_signature(struct iwl_trans *trans, bool nvm_is_otp) -{ - u32 gp = iwl_read32(trans, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK; - - IWL_DEBUG_EEPROM(trans->dev, "EEPROM signature=0x%08x\n", gp); - - switch (gp) { - case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP: - if (!nvm_is_otp) { - IWL_ERR(trans, "EEPROM with bad signature: 0x%08x\n", - gp); - return -ENOENT; - } - return 0; - case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K: - case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K: - if (nvm_is_otp) { - IWL_ERR(trans, "OTP with bad signature: 0x%08x\n", gp); - return -ENOENT; - } - return 0; - case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP: - default: - IWL_ERR(trans, - "bad EEPROM/OTP signature, type=%s, EEPROM_GP=0x%08x\n", - nvm_is_otp ? "OTP" : "EEPROM", gp); - return -ENOENT; - } -} - -/****************************************************************************** - * - * OTP related functions - * -******************************************************************************/ - -static void iwl_set_otp_access_absolute(struct iwl_trans *trans) -{ - iwl_read32(trans, CSR_OTP_GP_REG); - - iwl_clear_bit(trans, CSR_OTP_GP_REG, - CSR_OTP_GP_REG_OTP_ACCESS_MODE); -} - -static int iwl_nvm_is_otp(struct iwl_trans *trans) -{ - u32 otpgp; - - /* OTP only valid for CP/PP and after */ - switch (trans->hw_rev & CSR_HW_REV_TYPE_MSK) { - case CSR_HW_REV_TYPE_NONE: - IWL_ERR(trans, "Unknown hardware type\n"); - return -EIO; - case CSR_HW_REV_TYPE_5300: - case CSR_HW_REV_TYPE_5350: - case CSR_HW_REV_TYPE_5100: - case CSR_HW_REV_TYPE_5150: - return 0; - default: - otpgp = iwl_read32(trans, CSR_OTP_GP_REG); - if (otpgp & CSR_OTP_GP_REG_DEVICE_SELECT) - return 1; - return 0; - } -} - -static int iwl_init_otp_access(struct iwl_trans *trans) -{ - int ret; - - ret = iwl_finish_nic_init(trans); - if (ret) - return ret; - - iwl_set_bits_prph(trans, APMG_PS_CTRL_REG, - APMG_PS_CTRL_VAL_RESET_REQ); - udelay(5); - iwl_clear_bits_prph(trans, APMG_PS_CTRL_REG, - APMG_PS_CTRL_VAL_RESET_REQ); - - /* - * CSR auto clock gate disable bit - - * this is only applicable for HW with OTP shadow RAM - */ - if (trans->trans_cfg->base_params->shadow_ram_support) - iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG, - CSR_RESET_LINK_PWR_MGMT_DISABLED); - - return 0; -} - -static int iwl_read_otp_word(struct iwl_trans *trans, u16 addr, - __le16 *eeprom_data) -{ - int ret = 0; - u32 r; - u32 otpgp; - - iwl_write32(trans, CSR_EEPROM_REG, - CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); - ret = iwl_poll_bit(trans, CSR_EEPROM_REG, - CSR_EEPROM_REG_READ_VALID_MSK, - CSR_EEPROM_REG_READ_VALID_MSK, - IWL_EEPROM_ACCESS_TIMEOUT); - if (ret < 0) { - IWL_ERR(trans, "Time out reading OTP[%d]\n", addr); - return ret; - } - r = iwl_read32(trans, CSR_EEPROM_REG); - /* check for ECC errors: */ - otpgp = iwl_read32(trans, CSR_OTP_GP_REG); - if (otpgp & CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK) { - /* stop in this case */ - /* set the uncorrectable OTP ECC bit for acknowledgment */ - iwl_set_bit(trans, CSR_OTP_GP_REG, - CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK); - IWL_ERR(trans, "Uncorrectable OTP ECC error, abort OTP read\n"); - return -EINVAL; - } - if (otpgp & CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK) { - /* continue in this case */ - /* set the correctable OTP ECC bit for acknowledgment */ - iwl_set_bit(trans, CSR_OTP_GP_REG, - CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK); - IWL_ERR(trans, "Correctable OTP ECC error, continue read\n"); - } - *eeprom_data = cpu_to_le16(r >> 16); - return 0; -} - -/* - * iwl_is_otp_empty: check for empty OTP - */ -static bool iwl_is_otp_empty(struct iwl_trans *trans) -{ - u16 next_link_addr = 0; - __le16 link_value; - bool is_empty = false; - - /* locate the beginning of OTP link list */ - if (!iwl_read_otp_word(trans, next_link_addr, &link_value)) { - if (!link_value) { - IWL_ERR(trans, "OTP is empty\n"); - is_empty = true; - } - } else { - IWL_ERR(trans, "Unable to read first block of OTP list.\n"); - is_empty = true; - } - - return is_empty; -} - - -/* - * iwl_find_otp_image: find EEPROM image in OTP - * finding the OTP block that contains the EEPROM image. - * the last valid block on the link list (the block _before_ the last block) - * is the block we should read and used to configure the device. - * If all the available OTP blocks are full, the last block will be the block - * we should read and used to configure the device. - * only perform this operation if shadow RAM is disabled - */ -static int iwl_find_otp_image(struct iwl_trans *trans, - u16 *validblockaddr) -{ - u16 next_link_addr = 0, valid_addr; - __le16 link_value = 0; - int usedblocks = 0; - - /* set addressing mode to absolute to traverse the link list */ - iwl_set_otp_access_absolute(trans); - - /* checking for empty OTP or error */ - if (iwl_is_otp_empty(trans)) - return -EINVAL; - - /* - * start traverse link list - * until reach the max number of OTP blocks - * different devices have different number of OTP blocks - */ - do { - /* save current valid block address - * check for more block on the link list - */ - valid_addr = next_link_addr; - next_link_addr = le16_to_cpu(link_value) * sizeof(u16); - IWL_DEBUG_EEPROM(trans->dev, "OTP blocks %d addr 0x%x\n", - usedblocks, next_link_addr); - if (iwl_read_otp_word(trans, next_link_addr, &link_value)) - return -EINVAL; - if (!link_value) { - /* - * reach the end of link list, return success and - * set address point to the starting address - * of the image - */ - *validblockaddr = valid_addr; - /* skip first 2 bytes (link list pointer) */ - *validblockaddr += 2; - return 0; - } - /* more in the link list, continue */ - usedblocks++; - } while (usedblocks <= trans->trans_cfg->base_params->max_ll_items); - - /* OTP has no valid blocks */ - IWL_DEBUG_EEPROM(trans->dev, "OTP has no valid blocks\n"); - return -EINVAL; -} - -/* - * iwl_read_eeprom - read EEPROM contents - * - * Load the EEPROM contents from adapter and return it - * and its size. - * - * NOTE: This routine uses the non-debug IO access functions. - */ -int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size) -{ - __le16 *e; - u32 gp = iwl_read32(trans, CSR_EEPROM_GP); - int sz; - int ret; - u16 addr; - u16 validblockaddr = 0; - u16 cache_addr = 0; - int nvm_is_otp; - - if (!eeprom || !eeprom_size) - return -EINVAL; - - nvm_is_otp = iwl_nvm_is_otp(trans); - if (nvm_is_otp < 0) - return nvm_is_otp; - - sz = trans->trans_cfg->base_params->eeprom_size; - IWL_DEBUG_EEPROM(trans->dev, "NVM size = %d\n", sz); - - e = kmalloc(sz, GFP_KERNEL); - if (!e) - return -ENOMEM; - - ret = iwl_eeprom_verify_signature(trans, nvm_is_otp); - if (ret < 0) { - IWL_ERR(trans, "EEPROM not found, EEPROM_GP=0x%08x\n", gp); - goto err_free; - } - - /* Make sure driver (instead of uCode) is allowed to read EEPROM */ - ret = iwl_eeprom_acquire_semaphore(trans); - if (ret < 0) { - IWL_ERR(trans, "Failed to acquire EEPROM semaphore.\n"); - goto err_free; - } - - if (nvm_is_otp) { - ret = iwl_init_otp_access(trans); - if (ret) { - IWL_ERR(trans, "Failed to initialize OTP access.\n"); - goto err_unlock; - } - - iwl_write32(trans, CSR_EEPROM_GP, - iwl_read32(trans, CSR_EEPROM_GP) & - ~CSR_EEPROM_GP_IF_OWNER_MSK); - - iwl_set_bit(trans, CSR_OTP_GP_REG, - CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK | - CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK); - /* traversing the linked list if no shadow ram supported */ - if (!trans->trans_cfg->base_params->shadow_ram_support) { - ret = iwl_find_otp_image(trans, &validblockaddr); - if (ret) - goto err_unlock; - } - for (addr = validblockaddr; addr < validblockaddr + sz; - addr += sizeof(u16)) { - __le16 eeprom_data; - - ret = iwl_read_otp_word(trans, addr, &eeprom_data); - if (ret) - goto err_unlock; - e[cache_addr / 2] = eeprom_data; - cache_addr += sizeof(u16); - } - } else { - /* eeprom is an array of 16bit values */ - for (addr = 0; addr < sz; addr += sizeof(u16)) { - u32 r; - - iwl_write32(trans, CSR_EEPROM_REG, - CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); - - ret = iwl_poll_bit(trans, CSR_EEPROM_REG, - CSR_EEPROM_REG_READ_VALID_MSK, - CSR_EEPROM_REG_READ_VALID_MSK, - IWL_EEPROM_ACCESS_TIMEOUT); - if (ret < 0) { - IWL_ERR(trans, - "Time out reading EEPROM[%d]\n", addr); - goto err_unlock; - } - r = iwl_read32(trans, CSR_EEPROM_REG); - e[addr / 2] = cpu_to_le16(r >> 16); - } - } - - IWL_DEBUG_EEPROM(trans->dev, "NVM Type: %s\n", - nvm_is_otp ? "OTP" : "EEPROM"); - - iwl_eeprom_release_semaphore(trans); - - *eeprom_size = sz; - *eeprom = (u8 *)e; - return 0; - - err_unlock: - iwl_eeprom_release_semaphore(trans); - err_free: - kfree(e); - - return ret; -} -IWL_EXPORT_SYMBOL(iwl_read_eeprom); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h deleted file mode 100644 index 63b8e6c6659b..000000000000 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-read.h +++ /dev/null @@ -1,12 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ -/* - * Copyright (C) 2005-2014 Intel Corporation - */ -#ifndef __iwl_eeprom_h__ -#define __iwl_eeprom_h__ - -#include "iwl-trans.h" - -int iwl_read_eeprom(struct iwl_trans *trans, u8 **eeprom, size_t *eeprom_size); - -#endif /* __iwl_eeprom_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h index 6ba374efaacb..5c8f1868db64 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h @@ -15,7 +15,7 @@ /* Flow Handler Definitions */ /****************************/ -/** +/* * This I/O area is directly read/writable by driver (e.g. Linux uses writel()) * Addresses are offsets from device's PCI hardware base address. */ @@ -24,7 +24,7 @@ #define FH_MEM_LOWER_BOUND_GEN2 (0xa06000) #define FH_MEM_UPPER_BOUND_GEN2 (0xa08000) -/** +/* * Keep-Warm (KW) buffer base address. * * Driver must allocate a 4KByte buffer that is for keeping the @@ -44,7 +44,7 @@ #define FH_KW_MEM_ADDR_REG (FH_MEM_LOWER_BOUND + 0x97C) -/** +/* * TFD Circular Buffers Base (CBBC) addresses * * Device has 16 base pointer registers, one for each of 16 host-DRAM-resident @@ -143,7 +143,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, */ #define TFH_SRV_DMA_CHNL0_BC (0x1F70) -/** +/* * Rx SRAM Control and Status Registers (RSCSR) * * These registers provide handshake between driver and device for the Rx queue @@ -216,21 +216,21 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, #define FH_MEM_RSCSR_UPPER_BOUND (FH_MEM_LOWER_BOUND + 0xC00) #define FH_MEM_RSCSR_CHNL0 (FH_MEM_RSCSR_LOWER_BOUND) -/** +/* * Physical base address of 8-byte Rx Status buffer. * Bit fields: * 31-0: Rx status buffer physical base address [35:4], must 16-byte aligned. */ #define FH_RSCSR_CHNL0_STTS_WPTR_REG (FH_MEM_RSCSR_CHNL0) -/** +/* * Physical base address of Rx Buffer Descriptor Circular Buffer. * Bit fields: * 27-0: RBD CD physical base address [35:8], must be 256-byte aligned. */ #define FH_RSCSR_CHNL0_RBDCB_BASE_REG (FH_MEM_RSCSR_CHNL0 + 0x004) -/** +/* * Rx write pointer (index, really!). * Bit fields: * 11-0: Index of driver's most recent prepared-to-be-filled RBD, + 1. @@ -242,7 +242,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, #define FW_RSCSR_CHNL0_RXDCB_RDPTR_REG (FH_MEM_RSCSR_CHNL0 + 0x00c) #define FH_RSCSR_CHNL0_RDPTR FW_RSCSR_CHNL0_RXDCB_RDPTR_REG -/** +/* * Rx Config/Status Registers (RCSR) * Rx Config Reg for channel 0 (only channel used) * @@ -300,7 +300,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, #define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_NO_INT_VAL (0x00000000) #define FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL (0x00001000) -/** +/* * Rx Shared Status Registers (RSSR) * * After stopping Rx DMA channel (writing 0 to @@ -356,7 +356,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, #define RFH_RBDBUF_RBD0_LSB 0xA08300 #define RFH_RBDBUF_RBD_LSB(q) (RFH_RBDBUF_RBD0_LSB + (q) * 8) -/** +/* * RFH Status Register * * Bit fields: @@ -440,7 +440,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, #define FH_TFDIB_CTRL0_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl)) #define FH_TFDIB_CTRL1_REG(_chnl) (FH_TFDIB_LOWER_BOUND + 0x8 * (_chnl) + 0x4) -/** +/* * Transmit DMA Channel Control/Status Registers (TCSR) * * Device has one configuration register for each of 8 Tx DMA/FIFO channels @@ -501,7 +501,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, #define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM (20) #define FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX (12) -/** +/* * Tx Shared Status Registers (TSSR) * * After stopping Tx DMA channel (writing 0 to @@ -518,7 +518,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, #define FH_TSSR_TX_STATUS_REG (FH_TSSR_LOWER_BOUND + 0x010) -/** +/* * Bit fields for TSSR(Tx Shared Status & Control) error status register: * 31: Indicates an address error when accessed to internal memory * uCode/driver must write "1" in order to clear this flag @@ -634,7 +634,7 @@ enum iwl_tfd_tb_hi_n_len { }; /** - * struct iwl_tfd_tb transmit buffer descriptor within transmit frame descriptor + * struct iwl_tfd_tb - transmit buffer descriptor within transmit frame descriptor * * This structure contains dma address and length of transmission address * @@ -648,7 +648,7 @@ struct iwl_tfd_tb { } __packed; /** - * struct iwl_tfh_tb transmit buffer descriptor within transmit frame descriptor + * struct iwl_tfh_tb - transmit buffer descriptor within transmit frame descriptor * * This structure contains dma address and length of transmission address * @@ -717,7 +717,7 @@ struct iwl_tfh_tfd { /* Fixed (non-configurable) rx data from phy */ /** - * struct iwlagn_schedq_bc_tbl scheduler byte count table + * struct iwlagn_scd_bc_tbl - scheduler byte count table * base physical address provided by SCD_DRAM_BASE_ADDR * For devices up to 22000: * @tfd_offset: @@ -734,7 +734,7 @@ struct iwlagn_scd_bc_tbl { } __packed; /** - * struct iwl_gen3_bc_tbl_entry scheduler byte count table entry gen3 + * struct iwl_gen3_bc_tbl_entry - scheduler byte count table entry gen3 * For AX210 and on: * @tfd_offset: 0-12 - tx command byte count * 12-13 - number of 64 byte chunks diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-io.c b/drivers/net/wireless/intel/iwlwifi/iwl-io.c index c60f9466c5fd..060becfd64f3 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-io.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-io.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2003-2014, 2018-2022 Intel Corporation + * Copyright (C) 2003-2014, 2018-2022, 2024 Intel Corporation * Copyright (C) 2015-2016 Intel Deutschland GmbH */ #include @@ -460,7 +460,7 @@ int iwl_finish_nic_init(struct iwl_trans *trans) */ if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_BZ) { iwl_set_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | + CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ | CSR_GP_CNTRL_REG_FLAG_MAC_INIT); poll_ready = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS; } else { diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h index 1cf26ab4f488..21eabfc3ffc8 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2022 Intel Corporation + * Copyright (C) 2005-2014, 2018-2022, 2024 Intel Corporation */ #ifndef __iwl_modparams_h__ #define __iwl_modparams_h__ @@ -106,4 +106,23 @@ static inline bool iwl_enable_tx_ampdu(void) return true; } +/* Verify amsdu_size module parameter and convert it to a rxb size */ +static inline enum iwl_amsdu_size +iwl_amsdu_size_to_rxb_size(void) +{ + switch (iwlwifi_mod_params.amsdu_size) { + case IWL_AMSDU_8K: + return IWL_AMSDU_8K; + case IWL_AMSDU_12K: + return IWL_AMSDU_12K; + default: + pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME, + iwlwifi_mod_params.amsdu_size); + fallthrough; + case IWL_AMSDU_DEF: + case IWL_AMSDU_4K: + return IWL_AMSDU_4K; + } +} + #endif /* #__iwl_modparams_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 149903f52567..d902121da009 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -38,16 +38,13 @@ enum nvm_offsets { N_HW_ADDRS = 3, NVM_CHANNELS = 0x1E0 - NVM_SW_SECTION, - /* NVM calibration section offset (in words) definitions */ - NVM_CALIB_SECTION = 0x2B8, - XTAL_CALIB = 0x316 - NVM_CALIB_SECTION, - /* NVM REGULATORY -Section offset (in words) definitions */ NVM_CHANNELS_SDP = 0, }; enum ext_nvm_offsets { /* NVM HW-Section offset (in words) definitions */ + MAC_ADDRESS_OVERRIDE_EXT_NVM = 1, /* NVM SW-Section offset (in words) definitions */ @@ -373,7 +370,9 @@ static u32 iwl_get_channel_flags(u8 ch_num, int ch_idx, enum nl80211_band band, flags |= IEEE80211_CHAN_IR_CONCURRENT; /* Set the AP type for the UHB case. */ - if (!(nvm_flags & NVM_CHANNEL_VLP)) + if (nvm_flags & NVM_CHANNEL_VLP) + flags |= IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP; + else flags |= IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT; if (!(nvm_flags & NVM_CHANNEL_AFC)) flags |= IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT; @@ -1574,9 +1573,6 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, ®ulatory[NVM_CHANNELS_SDP] : &nvm_sw[NVM_CHANNELS]; - /* in family 8000 Xtal calibration values moved to OTP */ - data->xtal_calib[0] = *(nvm_calib + XTAL_CALIB); - data->xtal_calib[1] = *(nvm_calib + XTAL_CALIB + 1); lar_enabled = true; } else { u16 lar_offset = data->nvm_version < 0xE39 ? @@ -1614,8 +1610,7 @@ IWL_EXPORT_SYMBOL(iwl_parse_nvm_data); static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan, int ch_idx, u16 nvm_flags, struct iwl_reg_capa reg_capa, - const struct iwl_cfg *cfg, - bool uats_enabled) + const struct iwl_cfg *cfg) { u32 flags = NL80211_RRF_NO_HT40; @@ -1625,11 +1620,15 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan, flags &= ~NL80211_RRF_NO_HT40PLUS; if (nvm_chan[ch_idx] >= FIRST_2GHZ_HT_MINUS) flags &= ~NL80211_RRF_NO_HT40MINUS; - } else if (nvm_flags & NVM_CHANNEL_40MHZ) { + } else if (ch_idx < NUM_2GHZ_CHANNELS + NUM_5GHZ_CHANNELS && + nvm_flags & NVM_CHANNEL_40MHZ) { if ((ch_idx - NUM_2GHZ_CHANNELS) % 2 == 0) flags &= ~NL80211_RRF_NO_HT40PLUS; else flags &= ~NL80211_RRF_NO_HT40MINUS; + } else if (nvm_flags & NVM_CHANNEL_40MHZ) { + flags &= ~NL80211_RRF_NO_HT40PLUS; + flags &= ~NL80211_RRF_NO_HT40MINUS; } if (!(nvm_flags & NVM_CHANNEL_80MHZ)) @@ -1662,13 +1661,13 @@ static u32 iwl_nvm_get_regdom_bw_flags(const u16 *nvm_chan, } /* Set the AP type for the UHB case. */ - if (uats_enabled) { - if (!(nvm_flags & NVM_CHANNEL_VLP)) - flags |= NL80211_RRF_NO_6GHZ_VLP_CLIENT; + if (nvm_flags & NVM_CHANNEL_VLP) + flags |= NL80211_RRF_ALLOW_6GHZ_VLP_AP; + else + flags |= NL80211_RRF_NO_6GHZ_VLP_CLIENT; - if (!(nvm_flags & NVM_CHANNEL_AFC)) - flags |= NL80211_RRF_NO_6GHZ_AFC_CLIENT; - } + if (!(nvm_flags & NVM_CHANNEL_AFC)) + flags |= NL80211_RRF_NO_6GHZ_AFC_CLIENT; /* * reg_capa is per regulatory domain so apply it for every channel @@ -1724,7 +1723,7 @@ static struct iwl_reg_capa iwl_get_reg_capa(u32 flags, u8 resp_ver) struct ieee80211_regdomain * iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, int num_of_ch, __le32 *channels, u16 fw_mcc, - u16 geo_info, u32 cap, u8 resp_ver, bool uats_enabled) + u16 geo_info, u32 cap, u8 resp_ver) { int ch_idx; u16 ch_flags; @@ -1732,7 +1731,6 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, const u16 *nvm_chan; struct ieee80211_regdomain *regd, *copy_rd; struct ieee80211_reg_rule *rule; - enum nl80211_band band; int center_freq, prev_center_freq = 0; int valid_rules = 0; bool new_rule; @@ -1776,8 +1774,10 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, reg_capa = iwl_get_reg_capa(cap, resp_ver); for (ch_idx = 0; ch_idx < num_of_ch; ch_idx++) { + enum nl80211_band band = + iwl_nl80211_band_from_channel_idx(ch_idx); + ch_flags = (u16)__le32_to_cpup(channels + ch_idx); - band = iwl_nl80211_band_from_channel_idx(ch_idx); center_freq = ieee80211_channel_to_frequency(nvm_chan[ch_idx], band); new_rule = false; @@ -1790,7 +1790,7 @@ iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, reg_rule_flags = iwl_nvm_get_regdom_bw_flags(nvm_chan, ch_idx, ch_flags, reg_capa, - cfg, uats_enabled); + cfg); /* we can't continue the same rule */ if (ch_idx == 0 || prev_reg_rule_flags != reg_rule_flags || diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h index fd9c3bed9407..0c6c3fb8c6dd 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.h @@ -1,13 +1,13 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2015, 2018-2023 Intel Corporation + * Copyright (C) 2005-2015, 2018-2024 Intel Corporation * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #ifndef __iwl_nvm_parse_h__ #define __iwl_nvm_parse_h__ #include -#include "iwl-eeprom-parse.h" +#include "iwl-nvm-utils.h" #include "mei/iwl-mei.h" /** @@ -38,7 +38,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, u8 tx_chains, u8 rx_chains); /** - * iwl_parse_mcc_info - parse MCC (mobile country code) info coming from FW + * iwl_parse_nvm_mcc_info - parse MCC (mobile country code) info coming from FW * * This function parses the regulatory channel data received as a * MCC_UPDATE_CMD command. It returns a newly allocation regulatory domain, @@ -50,7 +50,7 @@ iwl_parse_nvm_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, struct ieee80211_regdomain * iwl_parse_nvm_mcc_info(struct device *dev, const struct iwl_cfg *cfg, int num_of_ch, __le32 *channels, u16 fw_mcc, - u16 geo_info, u32 cap, u8 resp_ver, bool uats_enabled); + u16 geo_info, u32 cap, u8 resp_ver); /** * struct iwl_nvm_section - describes an NVM section in memory. diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.c new file mode 100644 index 000000000000..b3c25acd3691 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.c @@ -0,0 +1,118 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* + * Copyright (C) 2005-2014, 2018-2021, 2023 Intel Corporation + * Copyright (C) 2015 Intel Mobile Communications GmbH + */ +#include +#include +#include +#include "iwl-drv.h" +#include "iwl-modparams.h" +#include "iwl-nvm-utils.h" + +int iwl_init_sband_channels(struct iwl_nvm_data *data, + struct ieee80211_supported_band *sband, + int n_channels, enum nl80211_band band) +{ + struct ieee80211_channel *chan = &data->channels[0]; + int n = 0, idx = 0; + + while (idx < n_channels && chan->band != band) + chan = &data->channels[++idx]; + + sband->channels = &data->channels[idx]; + + while (idx < n_channels && chan->band == band) { + chan = &data->channels[++idx]; + n++; + } + + sband->n_channels = n; + + return n; +} +IWL_EXPORT_SYMBOL(iwl_init_sband_channels); + +#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ +#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */ + +void iwl_init_ht_hw_capab(struct iwl_trans *trans, + struct iwl_nvm_data *data, + struct ieee80211_sta_ht_cap *ht_info, + enum nl80211_band band, + u8 tx_chains, u8 rx_chains) +{ + const struct iwl_cfg *cfg = trans->cfg; + int max_bit_rate = 0; + + tx_chains = hweight8(tx_chains); + if (cfg->rx_with_siso_diversity) + rx_chains = 1; + else + rx_chains = hweight8(rx_chains); + + if (!(data->sku_cap_11n_enable) || + (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_ALL) || + !cfg->ht_params) { + ht_info->ht_supported = false; + return; + } + + if (data->sku_cap_mimo_disabled) + rx_chains = 1; + + ht_info->ht_supported = true; + ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40; + + if (cfg->ht_params->stbc) { + ht_info->cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT); + + if (tx_chains > 1) + ht_info->cap |= IEEE80211_HT_CAP_TX_STBC; + } + + if (cfg->ht_params->ldpc) + ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING; + + if (trans->trans_cfg->mq_rx_supported || + iwlwifi_mod_params.amsdu_size >= IWL_AMSDU_8K) + ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; + + ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; + ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_4; + + ht_info->mcs.rx_mask[0] = 0xFF; + ht_info->mcs.rx_mask[1] = 0x00; + ht_info->mcs.rx_mask[2] = 0x00; + + if (rx_chains >= 2) + ht_info->mcs.rx_mask[1] = 0xFF; + if (rx_chains >= 3) + ht_info->mcs.rx_mask[2] = 0xFF; + + if (cfg->ht_params->ht_greenfield_support) + ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD; + ht_info->cap |= IEEE80211_HT_CAP_SGI_20; + + max_bit_rate = MAX_BIT_RATE_20_MHZ; + + if (cfg->ht_params->ht40_bands & BIT(band)) { + ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; + ht_info->cap |= IEEE80211_HT_CAP_SGI_40; + max_bit_rate = MAX_BIT_RATE_40_MHZ; + } + + /* Highest supported Rx data rate */ + max_bit_rate *= rx_chains; + WARN_ON(max_bit_rate & ~IEEE80211_HT_MCS_RX_HIGHEST_MASK); + ht_info->mcs.rx_highest = cpu_to_le16(max_bit_rate); + + /* Tx MCS capabilities */ + ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; + if (tx_chains != rx_chains) { + ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; + ht_info->mcs.tx_params |= ((tx_chains - 1) << + IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT); + } +} +IWL_EXPORT_SYMBOL(iwl_init_ht_hw_capab); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.h similarity index 73% rename from drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h rename to drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.h index 34a178a2eb5d..ac0a29a1c31f 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-utils.h @@ -58,23 +58,6 @@ struct iwl_nvm_data { struct ieee80211_channel channels[]; }; -/** - * iwl_parse_eeprom_data - parse EEPROM data and return values - * - * @trans: ransport we're parsing for, for debug only - * @cfg: device configuration for parsing and overrides - * @eeprom: the EEPROM data - * @eeprom_size: length of the EEPROM data - * - * This function parses all EEPROM values we need and then - * returns a (newly allocated) struct containing all the - * relevant values for driver use. The struct must be freed - * later with iwl_free_nvm_data(). - */ -struct iwl_nvm_data * -iwl_parse_eeprom_data(struct iwl_trans *trans, const struct iwl_cfg *cfg, - const u8 *eeprom, size_t eeprom_size); - int iwl_init_sband_channels(struct iwl_nvm_data *data, struct ieee80211_supported_band *sband, int n_channels, enum nl80211_band band); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h index 1ca82f3e4ebf..595fa6ddf084 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-op-mode.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2005-2014, 2018-2021 Intel Corporation + * Copyright (C) 2005-2014, 2018-2021, 2024 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015 Intel Deutschland GmbH */ @@ -185,7 +185,8 @@ static inline void iwl_op_mode_cmd_queue_full(struct iwl_op_mode *op_mode) static inline void iwl_op_mode_nic_config(struct iwl_op_mode *op_mode) { might_sleep(); - op_mode->ops->nic_config(op_mode); + if (op_mode->ops->nic_config) + op_mode->ops->nic_config(op_mode); } static inline void iwl_op_mode_wimax_active(struct iwl_op_mode *op_mode) diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h index 898e22e0d1ab..dc171c29eb7b 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h @@ -96,7 +96,7 @@ #define DTSC_PTAT_AVG (0x00a10650) -/** +/* * Tx Scheduler * * The Tx Scheduler selects the next frame to be transmitted, choosing TFDs @@ -169,7 +169,7 @@ */ #define SCD_MEM_LOWER_BOUND (0x0000) -/** +/* * Max Tx window size is the max number of contiguous TFDs that the scheduler * can keep track of at one time when creating block-ack chains of frames. * Note that "64" matches the number of ack bits in a block-ack packet. diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c index f95098c21c7d..3c9d91496c82 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.c @@ -2,7 +2,7 @@ /* * Copyright (C) 2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH - * Copyright (C) 2019-2021, 2023 Intel Corporation + * Copyright (C) 2019-2021, 2023-2024 Intel Corporation */ #include #include @@ -11,13 +11,13 @@ #include "iwl-trans.h" #include "iwl-drv.h" #include "iwl-fh.h" -#include "queue/tx.h" #include #include "fw/api/commands.h" +#include "pcie/internal.h" +#include "iwl-context-info-gen3.h" struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, struct device *dev, - const struct iwl_trans_ops *ops, const struct iwl_cfg_trans_params *cfg_trans) { struct iwl_trans *trans; @@ -37,22 +37,8 @@ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, #endif trans->dev = dev; - trans->ops = ops; trans->num_rx_queues = 1; - WARN_ON(!ops->wait_txq_empty && !ops->wait_tx_queues_empty); - - if (trans->trans_cfg->gen2) { - trans->txqs.tfd.addr_size = 64; - trans->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS; - trans->txqs.tfd.size = sizeof(struct iwl_tfh_tfd); - } else { - trans->txqs.tfd.addr_size = 36; - trans->txqs.tfd.max_tbs = IWL_NUM_OF_TBS; - trans->txqs.tfd.size = sizeof(struct iwl_tfd); - } - trans->max_skb_frags = IWL_TRANS_MAX_FRAGS(trans); - return trans; } @@ -78,31 +64,6 @@ int iwl_trans_init(struct iwl_trans *trans) if (WARN_ON(trans->trans_cfg->gen2 && txcmd_size >= txcmd_align)) return -EINVAL; - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) - trans->txqs.bc_tbl_size = - sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_BZ; - else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) - trans->txqs.bc_tbl_size = - sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_AX210; - else - trans->txqs.bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl); - /* - * For gen2 devices, we use a single allocation for each byte-count - * table, but they're pretty small (1k) so use a DMA pool that we - * allocate here. - */ - if (trans->trans_cfg->gen2) { - trans->txqs.bc_pool = dmam_pool_create("iwlwifi:bc", trans->dev, - trans->txqs.bc_tbl_size, - 256, 0); - if (!trans->txqs.bc_pool) - return -ENOMEM; - } - - /* Some things must not change even if the config does */ - WARN_ON(trans->txqs.tfd.addr_size != - (trans->trans_cfg->gen2 ? 64 : 36)); - snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name), "iwl_cmd_pool:%s", dev_name(trans->dev)); trans->dev_cmd_pool = @@ -112,12 +73,6 @@ int iwl_trans_init(struct iwl_trans *trans) if (!trans->dev_cmd_pool) return -ENOMEM; - trans->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page); - if (!trans->txqs.tso_hdr_page) { - kmem_cache_destroy(trans->dev_cmd_pool); - return -ENOMEM; - } - /* Initialize the wait queue for commands */ init_waitqueue_head(&trans->wait_command_queue); @@ -126,20 +81,6 @@ int iwl_trans_init(struct iwl_trans *trans) void iwl_trans_free(struct iwl_trans *trans) { - int i; - - if (trans->txqs.tso_hdr_page) { - for_each_possible_cpu(i) { - struct iwl_tso_hdr_page *p = - per_cpu_ptr(trans->txqs.tso_hdr_page, i); - - if (p && p->page) - __free_page(p->page); - } - - free_percpu(trans->txqs.tso_hdr_page); - } - kmem_cache_destroy(trans->dev_cmd_pool); } @@ -167,10 +108,9 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) return -EIO; - if (unlikely(trans->state != IWL_TRANS_FW_ALIVE)) { - IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); + if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, + "bad state = %d\n", trans->state)) return -EIO; - } if (!(cmd->flags & CMD_ASYNC)) lock_map_acquire_read(&trans->sync_cmd_lockdep_map); @@ -180,7 +120,7 @@ int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) cmd->id = DEF_ID(cmd->id); } - ret = iwl_trans_txq_send_hcmd(trans, cmd); + ret = iwl_trans_pcie_send_hcmd(trans, cmd); if (!(cmd->flags & CMD_ASYNC)) lock_map_release(&trans->sync_cmd_lockdep_map); @@ -247,3 +187,379 @@ int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans) return 0; } IWL_EXPORT_SYMBOL(iwl_cmd_groups_verify_sorted); + +void iwl_trans_configure(struct iwl_trans *trans, + const struct iwl_trans_config *trans_cfg) +{ + trans->op_mode = trans_cfg->op_mode; + + iwl_trans_pcie_configure(trans, trans_cfg); + WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg)); +} +IWL_EXPORT_SYMBOL(iwl_trans_configure); + +int iwl_trans_start_hw(struct iwl_trans *trans) +{ + might_sleep(); + + return iwl_trans_pcie_start_hw(trans); +} +IWL_EXPORT_SYMBOL(iwl_trans_start_hw); + +void iwl_trans_op_mode_leave(struct iwl_trans *trans) +{ + might_sleep(); + + iwl_trans_pcie_op_mode_leave(trans); + + trans->op_mode = NULL; + + trans->state = IWL_TRANS_NO_FW; +} +IWL_EXPORT_SYMBOL(iwl_trans_op_mode_leave); + +void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val) +{ + iwl_trans_pcie_write8(trans, ofs, val); +} +IWL_EXPORT_SYMBOL(iwl_trans_write8); + +void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val) +{ + iwl_trans_pcie_write32(trans, ofs, val); +} +IWL_EXPORT_SYMBOL(iwl_trans_write32); + +u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs) +{ + return iwl_trans_pcie_read32(trans, ofs); +} +IWL_EXPORT_SYMBOL(iwl_trans_read32); + +u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs) +{ + return iwl_trans_pcie_read_prph(trans, ofs); +} +IWL_EXPORT_SYMBOL(iwl_trans_read_prph); + +void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs, u32 val) +{ + return iwl_trans_pcie_write_prph(trans, ofs, val); +} +IWL_EXPORT_SYMBOL(iwl_trans_write_prph); + +int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr, + void *buf, int dwords) +{ + return iwl_trans_pcie_read_mem(trans, addr, buf, dwords); +} +IWL_EXPORT_SYMBOL(iwl_trans_read_mem); + +int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr, + const void *buf, int dwords) +{ + return iwl_trans_pcie_write_mem(trans, addr, buf, dwords); +} +IWL_EXPORT_SYMBOL(iwl_trans_write_mem); + +void iwl_trans_set_pmi(struct iwl_trans *trans, bool state) +{ + if (state) + set_bit(STATUS_TPOWER_PMI, &trans->status); + else + clear_bit(STATUS_TPOWER_PMI, &trans->status); +} +IWL_EXPORT_SYMBOL(iwl_trans_set_pmi); + +int iwl_trans_sw_reset(struct iwl_trans *trans, bool retake_ownership) +{ + return iwl_trans_pcie_sw_reset(trans, retake_ownership); +} +IWL_EXPORT_SYMBOL(iwl_trans_sw_reset); + +struct iwl_trans_dump_data * +iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask, + const struct iwl_dump_sanitize_ops *sanitize_ops, + void *sanitize_ctx) +{ + return iwl_trans_pcie_dump_data(trans, dump_mask, + sanitize_ops, sanitize_ctx); +} +IWL_EXPORT_SYMBOL(iwl_trans_dump_data); + +int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, bool reset) +{ + might_sleep(); + + return iwl_trans_pcie_d3_suspend(trans, test, reset); +} +IWL_EXPORT_SYMBOL(iwl_trans_d3_suspend); + +int iwl_trans_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status, + bool test, bool reset) +{ + might_sleep(); + + return iwl_trans_pcie_d3_resume(trans, status, test, reset); +} +IWL_EXPORT_SYMBOL(iwl_trans_d3_resume); + +void iwl_trans_interrupts(struct iwl_trans *trans, bool enable) +{ + iwl_trans_pci_interrupts(trans, enable); +} +IWL_EXPORT_SYMBOL(iwl_trans_interrupts); + +void iwl_trans_sync_nmi(struct iwl_trans *trans) +{ + iwl_trans_pcie_sync_nmi(trans); +} +IWL_EXPORT_SYMBOL(iwl_trans_sync_nmi); + +int iwl_trans_write_imr_mem(struct iwl_trans *trans, u32 dst_addr, + u64 src_addr, u32 byte_cnt) +{ + return iwl_trans_pcie_copy_imr(trans, dst_addr, src_addr, byte_cnt); +} +IWL_EXPORT_SYMBOL(iwl_trans_write_imr_mem); + +void iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, + u32 mask, u32 value) +{ + iwl_trans_pcie_set_bits_mask(trans, reg, mask, value); +} +IWL_EXPORT_SYMBOL(iwl_trans_set_bits_mask); + +int iwl_trans_read_config32(struct iwl_trans *trans, u32 ofs, + u32 *val) +{ + return iwl_trans_pcie_read_config32(trans, ofs, val); +} +IWL_EXPORT_SYMBOL(iwl_trans_read_config32); + +bool _iwl_trans_grab_nic_access(struct iwl_trans *trans) +{ + return iwl_trans_pcie_grab_nic_access(trans); +} +IWL_EXPORT_SYMBOL(_iwl_trans_grab_nic_access); + +void __releases(nic_access) +iwl_trans_release_nic_access(struct iwl_trans *trans) +{ + iwl_trans_pcie_release_nic_access(trans); + __release(nic_access); +} +IWL_EXPORT_SYMBOL(iwl_trans_release_nic_access); + +void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr) +{ + might_sleep(); + + trans->state = IWL_TRANS_FW_ALIVE; + + if (trans->trans_cfg->gen2) + iwl_trans_pcie_gen2_fw_alive(trans); + else + iwl_trans_pcie_fw_alive(trans, scd_addr); +} +IWL_EXPORT_SYMBOL(iwl_trans_fw_alive); + +int iwl_trans_start_fw(struct iwl_trans *trans, const struct fw_img *fw, + bool run_in_rfkill) +{ + int ret; + + might_sleep(); + + WARN_ON_ONCE(!trans->rx_mpdu_cmd); + + clear_bit(STATUS_FW_ERROR, &trans->status); + + if (trans->trans_cfg->gen2) + ret = iwl_trans_pcie_gen2_start_fw(trans, fw, run_in_rfkill); + else + ret = iwl_trans_pcie_start_fw(trans, fw, run_in_rfkill); + + if (ret == 0) + trans->state = IWL_TRANS_FW_STARTED; + + return ret; +} +IWL_EXPORT_SYMBOL(iwl_trans_start_fw); + +void iwl_trans_stop_device(struct iwl_trans *trans) +{ + might_sleep(); + + if (trans->trans_cfg->gen2) + iwl_trans_pcie_gen2_stop_device(trans); + else + iwl_trans_pcie_stop_device(trans); + + trans->state = IWL_TRANS_NO_FW; +} +IWL_EXPORT_SYMBOL(iwl_trans_stop_device); + +int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, + struct iwl_device_tx_cmd *dev_cmd, int queue) +{ + if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) + return -EIO; + + if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, + "bad state = %d\n", trans->state)) + return -EIO; + + if (trans->trans_cfg->gen2) + return iwl_txq_gen2_tx(trans, skb, dev_cmd, queue); + + return iwl_trans_pcie_tx(trans, skb, dev_cmd, queue); +} +IWL_EXPORT_SYMBOL(iwl_trans_tx); + +void iwl_trans_reclaim(struct iwl_trans *trans, int queue, int ssn, + struct sk_buff_head *skbs, bool is_flush) +{ + if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, + "bad state = %d\n", trans->state)) + return; + + iwl_pcie_reclaim(trans, queue, ssn, skbs, is_flush); +} +IWL_EXPORT_SYMBOL(iwl_trans_reclaim); + +void iwl_trans_txq_disable(struct iwl_trans *trans, int queue, + bool configure_scd) +{ + iwl_trans_pcie_txq_disable(trans, queue, configure_scd); +} +IWL_EXPORT_SYMBOL(iwl_trans_txq_disable); + +bool iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn, + const struct iwl_trans_txq_scd_cfg *cfg, + unsigned int queue_wdg_timeout) +{ + might_sleep(); + + if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, + "bad state = %d\n", trans->state)) + return false; + + return iwl_trans_pcie_txq_enable(trans, queue, ssn, + cfg, queue_wdg_timeout); +} +IWL_EXPORT_SYMBOL(iwl_trans_txq_enable_cfg); + +int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue) +{ + if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, + "bad state = %d\n", trans->state)) + return -EIO; + + return iwl_trans_pcie_wait_txq_empty(trans, queue); +} +IWL_EXPORT_SYMBOL(iwl_trans_wait_txq_empty); + +int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans, u32 txqs) +{ + if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, + "bad state = %d\n", trans->state)) + return -EIO; + + return iwl_trans_pcie_wait_txqs_empty(trans, txqs); +} +IWL_EXPORT_SYMBOL(iwl_trans_wait_tx_queues_empty); + +void iwl_trans_freeze_txq_timer(struct iwl_trans *trans, + unsigned long txqs, bool freeze) +{ + if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, + "bad state = %d\n", trans->state)) + return; + + iwl_pcie_freeze_txq_timer(trans, txqs, freeze); +} +IWL_EXPORT_SYMBOL(iwl_trans_freeze_txq_timer); + +void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans, + int txq_id, bool shared_mode) +{ + iwl_trans_pcie_txq_set_shared_mode(trans, txq_id, shared_mode); +} +IWL_EXPORT_SYMBOL(iwl_trans_txq_set_shared_mode); + +#ifdef CONFIG_IWLWIFI_DEBUGFS +void iwl_trans_debugfs_cleanup(struct iwl_trans *trans) +{ + iwl_trans_pcie_debugfs_cleanup(trans); +} +IWL_EXPORT_SYMBOL(iwl_trans_debugfs_cleanup); +#endif + +void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue, int ptr) +{ + if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, + "bad state = %d\n", trans->state)) + return; + + iwl_pcie_set_q_ptrs(trans, queue, ptr); +} +IWL_EXPORT_SYMBOL(iwl_trans_set_q_ptrs); + +int iwl_trans_txq_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask, + u8 tid, int size, unsigned int wdg_timeout) +{ + might_sleep(); + + if (WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, + "bad state = %d\n", trans->state)) + return -EIO; + + return iwl_txq_dyn_alloc(trans, flags, sta_mask, tid, + size, wdg_timeout); +} +IWL_EXPORT_SYMBOL(iwl_trans_txq_alloc); + +void iwl_trans_txq_free(struct iwl_trans *trans, int queue) +{ + iwl_txq_dyn_free(trans, queue); +} +IWL_EXPORT_SYMBOL(iwl_trans_txq_free); + +int iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue, + struct iwl_trans_rxq_dma_data *data) +{ + return iwl_trans_pcie_rxq_dma_data(trans, queue, data); +} +IWL_EXPORT_SYMBOL(iwl_trans_get_rxq_dma_data); + +int iwl_trans_load_pnvm(struct iwl_trans *trans, + const struct iwl_pnvm_image *pnvm_data, + const struct iwl_ucode_capabilities *capa) +{ + return iwl_trans_pcie_ctx_info_gen3_load_pnvm(trans, pnvm_data, capa); +} +IWL_EXPORT_SYMBOL(iwl_trans_load_pnvm); + +void iwl_trans_set_pnvm(struct iwl_trans *trans, + const struct iwl_ucode_capabilities *capa) +{ + iwl_trans_pcie_ctx_info_gen3_set_pnvm(trans, capa); +} +IWL_EXPORT_SYMBOL(iwl_trans_set_pnvm); + +int iwl_trans_load_reduce_power(struct iwl_trans *trans, + const struct iwl_pnvm_image *payloads, + const struct iwl_ucode_capabilities *capa) +{ + return iwl_trans_pcie_ctx_info_gen3_load_reduce_power(trans, payloads, + capa); +} +IWL_EXPORT_SYMBOL(iwl_trans_load_reduce_power); + +void iwl_trans_set_reduce_power(struct iwl_trans *trans, + const struct iwl_ucode_capabilities *capa) +{ + iwl_trans_pcie_ctx_info_gen3_set_reduce_power(trans, capa); +} +IWL_EXPORT_SYMBOL(iwl_trans_set_reduce_power); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index b93cef7b2330..6148acbac6af 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -26,11 +26,9 @@ * DOC: Transport layer - what is it ? * * The transport layer is the layer that deals with the HW directly. It provides - * an abstraction of the underlying HW to the upper layer. The transport layer - * doesn't provide any policy, algorithm or anything of this kind, but only - * mechanisms to make the HW do something. It is not completely stateless but - * close to it. - * We will have an implementation for each different supported bus. + * the PCIe access to the underlying hardwarwe. The transport layer doesn't + * provide any policy, algorithm or anything of this kind, but only mechanisms + * to make the HW do something. It is not completely stateless but close to it. */ /** @@ -122,6 +120,7 @@ enum CMD_MODE { CMD_BLOCK_TXQS = BIT(3), CMD_SEND_IN_D3 = BIT(4), }; +#define CMD_MODE_BITS 5 #define DEF_CMD_PAYLOAD_SIZE 320 @@ -131,6 +130,11 @@ enum CMD_MODE { * For allocation of the command and tx queues, this establishes the overall * size of the largest command we send to uCode, except for commands that * aren't fully copied and use other TFD space. + * + * @hdr: command header + * @payload: payload for the command + * @hdr_wide: wide command header + * @payload_wide: payload for the wide command */ struct iwl_device_cmd { union { @@ -167,12 +171,6 @@ struct iwl_device_tx_cmd { */ #define IWL_MAX_CMD_TBS_PER_TFD 2 -/* We need 2 entries for the TX command and header, and another one might - * be needed for potential data in the SKB's head. The remaining ones can - * be used for frags. - */ -#define IWL_TRANS_MAX_FRAGS(trans) ((trans)->txqs.tfd.max_tbs - 3) - /** * enum iwl_hcmd_dataflag - flag for each one of the chunks of the command * @@ -281,7 +279,7 @@ static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r) #define IWL_9000_MAX_RX_HW_QUEUES 1 /** - * enum iwl_wowlan_status - WoWLAN image/device status + * enum iwl_d3_status - WoWLAN image/device status * @IWL_D3_STATUS_ALIVE: firmware is still running after resume * @IWL_D3_STATUS_RESET: device was reset while suspended */ @@ -299,9 +297,6 @@ enum iwl_d3_status { * @STATUS_RFKILL_HW: the actual HW state of the RF-kill switch * @STATUS_RFKILL_OPMODE: RF-kill state reported to opmode * @STATUS_FW_ERROR: the fw is in error state - * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands - * are sent - * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation * @STATUS_SUPPRESS_CMD_ERROR_ONCE: suppress "FW error in SYNC CMD" once, * e.g. for testing @@ -314,8 +309,6 @@ enum iwl_trans_status { STATUS_RFKILL_HW, STATUS_RFKILL_OPMODE, STATUS_FW_ERROR, - STATUS_TRANS_GOING_IDLE, - STATUS_TRANS_IDLE, STATUS_TRANS_DEAD, STATUS_SUPPRESS_CMD_ERROR_ONCE, }; @@ -481,183 +474,6 @@ struct iwl_pnvm_image { u32 version; }; -/** - * struct iwl_trans_ops - transport specific operations - * - * All the handlers MUST be implemented - * - * @start_hw: starts the HW. From that point on, the HW can send interrupts. - * May sleep. - * @op_mode_leave: Turn off the HW RF kill indication if on - * May sleep - * @start_fw: allocates and inits all the resources for the transport - * layer. Also kick a fw image. - * May sleep - * @fw_alive: called when the fw sends alive notification. If the fw provides - * the SCD base address in SRAM, then provide it here, or 0 otherwise. - * May sleep - * @stop_device: stops the whole device (embedded CPU put to reset) and stops - * the HW. From that point on, the HW will be stopped but will still issue - * an interrupt if the HW RF kill switch is triggered. - * This callback must do the right thing and not crash even if %start_hw() - * was called but not &start_fw(). May sleep. - * @d3_suspend: put the device into the correct mode for WoWLAN during - * suspend. This is optional, if not implemented WoWLAN will not be - * supported. This callback may sleep. - * @d3_resume: resume the device after WoWLAN, enabling the opmode to - * talk to the WoWLAN image to get its status. This is optional, if not - * implemented WoWLAN will not be supported. This callback may sleep. - * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted. - * If RFkill is asserted in the middle of a SYNC host command, it must - * return -ERFKILL straight away. - * May sleep only if CMD_ASYNC is not set - * @tx: send an skb. The transport relies on the op_mode to zero the - * the ieee80211_tx_info->driver_data. If the MPDU is an A-MSDU, all - * the CSUM will be taken care of (TCP CSUM and IP header in case of - * IPv4). If the MPDU is a single MSDU, the op_mode must compute the IP - * header if it is IPv4. - * Must be atomic - * @reclaim: free packet until ssn. Returns a list of freed packets. - * Must be atomic - * @set_q_ptrs: set queue pointers internally, after D3 when HW state changed - * @txq_enable: setup a queue. To setup an AC queue, use the - * iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before - * this one. The op_mode must not configure the HCMD queue. The scheduler - * configuration may be %NULL, in which case the hardware will not be - * configured. If true is returned, the operation mode needs to increment - * the sequence number of the packets routed to this queue because of a - * hardware scheduler bug. May sleep. - * @txq_disable: de-configure a Tx queue to send AMPDUs - * Must be atomic - * @txq_alloc: Allocate a new TX queue, may sleep. - * @txq_free: Free a previously allocated TX queue. - * @txq_set_shared_mode: change Tx queue shared/unshared marking - * @wait_tx_queues_empty: wait until tx queues are empty. May sleep. - * @wait_txq_empty: wait until specific tx queue is empty. May sleep. - * @freeze_txq_timer: prevents the timer of the queue from firing until the - * queue is set to awake. Must be atomic. - * @write8: write a u8 to a register at offset ofs from the BAR - * @write32: write a u32 to a register at offset ofs from the BAR - * @read32: read a u32 register at offset ofs from the BAR - * @read_prph: read a DWORD from a periphery register - * @write_prph: write a DWORD to a periphery register - * @read_mem: read device's SRAM in DWORD - * @write_mem: write device's SRAM in DWORD. If %buf is %NULL, then the memory - * will be zeroed. - * @read_config32: read a u32 value from the device's config space at - * the given offset. - * @configure: configure parameters required by the transport layer from - * the op_mode. May be called several times before start_fw, can't be - * called after that. - * @set_pmi: set the power pmi state - * @sw_reset: trigger software reset of the NIC - * @grab_nic_access: wake the NIC to be able to access non-HBUS regs. - * Sleeping is not allowed between grab_nic_access and - * release_nic_access. - * @release_nic_access: let the NIC go to sleep. The "flags" parameter - * must be the same one that was sent before to the grab_nic_access. - * @set_bits_mask: set SRAM register according to value and mask. - * @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last - * TX'ed commands and similar. The buffer will be vfree'd by the caller. - * Note that the transport must fill in the proper file headers. - * @debugfs_cleanup: used in the driver unload flow to make a proper cleanup - * of the trans debugfs - * @sync_nmi: trigger a firmware NMI and wait for it to complete - * @load_pnvm: save the pnvm data in DRAM - * @set_pnvm: set the pnvm data in the prph scratch buffer, inside the - * context info. - * @load_reduce_power: copy reduce power table to the corresponding DRAM memory - * @set_reduce_power: set reduce power table addresses in the sratch buffer - * @interrupts: disable/enable interrupts to transport - * @imr_dma_data: set up IMR DMA - * @rxq_dma_data: retrieve RX queue DMA data, see @struct iwl_trans_rxq_dma_data - */ -struct iwl_trans_ops { - - int (*start_hw)(struct iwl_trans *iwl_trans); - void (*op_mode_leave)(struct iwl_trans *iwl_trans); - int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw, - bool run_in_rfkill); - void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr); - void (*stop_device)(struct iwl_trans *trans); - - int (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset); - int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status, - bool test, bool reset); - - int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd); - - int (*tx)(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_tx_cmd *dev_cmd, int queue); - void (*reclaim)(struct iwl_trans *trans, int queue, int ssn, - struct sk_buff_head *skbs, bool is_flush); - - void (*set_q_ptrs)(struct iwl_trans *trans, int queue, int ptr); - - bool (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn, - const struct iwl_trans_txq_scd_cfg *cfg, - unsigned int queue_wdg_timeout); - void (*txq_disable)(struct iwl_trans *trans, int queue, - bool configure_scd); - /* 22000 functions */ - int (*txq_alloc)(struct iwl_trans *trans, u32 flags, - u32 sta_mask, u8 tid, - int size, unsigned int queue_wdg_timeout); - void (*txq_free)(struct iwl_trans *trans, int queue); - int (*rxq_dma_data)(struct iwl_trans *trans, int queue, - struct iwl_trans_rxq_dma_data *data); - - void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id, - bool shared); - - int (*wait_tx_queues_empty)(struct iwl_trans *trans, u32 txq_bm); - int (*wait_txq_empty)(struct iwl_trans *trans, int queue); - void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs, - bool freeze); - - void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val); - void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val); - u32 (*read32)(struct iwl_trans *trans, u32 ofs); - u32 (*read_prph)(struct iwl_trans *trans, u32 ofs); - void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val); - int (*read_mem)(struct iwl_trans *trans, u32 addr, - void *buf, int dwords); - int (*write_mem)(struct iwl_trans *trans, u32 addr, - const void *buf, int dwords); - int (*read_config32)(struct iwl_trans *trans, u32 ofs, u32 *val); - void (*configure)(struct iwl_trans *trans, - const struct iwl_trans_config *trans_cfg); - void (*set_pmi)(struct iwl_trans *trans, bool state); - int (*sw_reset)(struct iwl_trans *trans, bool retake_ownership); - bool (*grab_nic_access)(struct iwl_trans *trans); - void (*release_nic_access)(struct iwl_trans *trans); - void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask, - u32 value); - - struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans, - u32 dump_mask, - const struct iwl_dump_sanitize_ops *sanitize_ops, - void *sanitize_ctx); - void (*debugfs_cleanup)(struct iwl_trans *trans); - void (*sync_nmi)(struct iwl_trans *trans); - int (*load_pnvm)(struct iwl_trans *trans, - const struct iwl_pnvm_image *pnvm_payloads, - const struct iwl_ucode_capabilities *capa); - void (*set_pnvm)(struct iwl_trans *trans, - const struct iwl_ucode_capabilities *capa); - int (*load_reduce_power)(struct iwl_trans *trans, - const struct iwl_pnvm_image *payloads, - const struct iwl_ucode_capabilities *capa); - void (*set_reduce_power)(struct iwl_trans *trans, - const struct iwl_ucode_capabilities *capa); - - void (*interrupts)(struct iwl_trans *trans, bool enable); - int (*imr_dma_data)(struct iwl_trans *trans, - u32 dst_addr, u64 src_addr, - u32 byte_cnt); - -}; - /** * enum iwl_trans_state - state of the transport layer * @@ -897,7 +713,9 @@ struct iwl_dma_ptr { struct iwl_cmd_meta { /* only for SYNC commands, iff the reply skb is wanted */ struct iwl_host_cmd *source; - u32 flags; + u32 flags: CMD_MODE_BITS; + /* sg_offset is valid if it is non-zero */ + u32 sg_offset: PAGE_SHIFT; u32 tbs; }; @@ -934,6 +752,7 @@ struct iwl_pcie_first_tb_buf { * @first_tb_dma: DMA address for the first_tb_bufs start * @entries: transmit entries (driver state) * @lock: queue lock + * @reclaim_lock: reclaim lock * @stuck_timer: timer that fires if queue gets stuck * @trans: pointer back to transport (for timer) * @need_update: indicates need to update read/write index @@ -976,6 +795,8 @@ struct iwl_txq { struct iwl_pcie_txq_entry *entries; /* lock for syncing changes on the queue */ spinlock_t lock; + /* lock to prevent concurrent reclaim */ + spinlock_t reclaim_lock; unsigned long frozen_expiry_remainder; struct timer_list stuck_timer; struct iwl_trans *trans; @@ -998,59 +819,10 @@ struct iwl_txq { bool overflow_tx; }; -/** - * struct iwl_trans_txqs - transport tx queues data - * - * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) - * @page_offs: offset from skb->cb to mac header page pointer - * @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer - * @queue_used: bit mask of used queues - * @queue_stopped: bit mask of stopped queues - * @txq: array of TXQ data structures representing the TXQs - * @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler - * @queue_alloc_cmd_ver: queue allocation command version - * @bc_pool: bytecount DMA allocations pool - * @bc_tbl_size: bytecount table size - * @tso_hdr_page: page allocated (per CPU) for A-MSDU headers when doing TSO - * (and similar usage) - * @tfd: TFD data - * @tfd.max_tbs: max number of buffers per TFD - * @tfd.size: TFD size - * @tfd.addr_size: TFD/TB address size - */ -struct iwl_trans_txqs { - unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; - unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; - struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES]; - struct dma_pool *bc_pool; - size_t bc_tbl_size; - bool bc_table_dword; - u8 page_offs; - u8 dev_cmd_offs; - struct iwl_tso_hdr_page __percpu *tso_hdr_page; - - struct { - u8 fifo; - u8 q_id; - unsigned int wdg_timeout; - } cmd; - - struct { - u8 max_tbs; - u16 size; - u8 addr_size; - } tfd; - - struct iwl_dma_ptr scd_bc_tbls; - - u8 queue_alloc_cmd_ver; -}; - /** * struct iwl_trans - transport common data * * @csme_own: true if we couldn't get ownership on the device - * @ops: pointer to iwl_trans_ops * @op_mode: pointer to the op_mode * @trans_cfg: the trans-specific configuration part * @cfg: pointer to the configuration @@ -1099,7 +871,6 @@ struct iwl_trans_txqs { * This mode is set dynamically, depending on the WoWLAN values * configured from the userspace at runtime. * @name: the device name - * @txqs: transport tx queues data. * @mbx_addr_0_step: step address data 0 * @mbx_addr_1_step: step address data 1 * @pcie_link_speed: current PCIe link speed (%PCI_EXP_LNKSTA_CLS_*), @@ -1112,7 +883,6 @@ struct iwl_trans_txqs { */ struct iwl_trans { bool csme_own; - const struct iwl_trans_ops *ops; struct iwl_op_mode *op_mode; const struct iwl_cfg_trans_params *trans_cfg; const struct iwl_cfg *cfg; @@ -1169,7 +939,6 @@ struct iwl_trans { enum iwl_plat_pm_mode system_pm_mode; const char *name; - struct iwl_trans_txqs txqs; u32 mbx_addr_0_step; u32 mbx_addr_1_step; @@ -1185,101 +954,29 @@ struct iwl_trans { const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id); int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans); -static inline void iwl_trans_configure(struct iwl_trans *trans, - const struct iwl_trans_config *trans_cfg) -{ - trans->op_mode = trans_cfg->op_mode; +void iwl_trans_configure(struct iwl_trans *trans, + const struct iwl_trans_config *trans_cfg); - trans->ops->configure(trans, trans_cfg); - WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg)); -} +int iwl_trans_start_hw(struct iwl_trans *trans); -static inline int iwl_trans_start_hw(struct iwl_trans *trans) -{ - might_sleep(); +void iwl_trans_op_mode_leave(struct iwl_trans *trans); - return trans->ops->start_hw(trans); -} +void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr); -static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans) -{ - might_sleep(); +int iwl_trans_start_fw(struct iwl_trans *trans, const struct fw_img *fw, + bool run_in_rfkill); - if (trans->ops->op_mode_leave) - trans->ops->op_mode_leave(trans); +void iwl_trans_stop_device(struct iwl_trans *trans); - trans->op_mode = NULL; +int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, bool reset); - trans->state = IWL_TRANS_NO_FW; -} +int iwl_trans_d3_resume(struct iwl_trans *trans, enum iwl_d3_status *status, + bool test, bool reset); -static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr) -{ - might_sleep(); - - trans->state = IWL_TRANS_FW_ALIVE; - - trans->ops->fw_alive(trans, scd_addr); -} - -static inline int iwl_trans_start_fw(struct iwl_trans *trans, - const struct fw_img *fw, - bool run_in_rfkill) -{ - int ret; - - might_sleep(); - - WARN_ON_ONCE(!trans->rx_mpdu_cmd); - - clear_bit(STATUS_FW_ERROR, &trans->status); - ret = trans->ops->start_fw(trans, fw, run_in_rfkill); - if (ret == 0) - trans->state = IWL_TRANS_FW_STARTED; - - return ret; -} - -static inline void iwl_trans_stop_device(struct iwl_trans *trans) -{ - might_sleep(); - - trans->ops->stop_device(trans); - - trans->state = IWL_TRANS_NO_FW; -} - -static inline int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test, - bool reset) -{ - might_sleep(); - if (!trans->ops->d3_suspend) - return -EOPNOTSUPP; - - return trans->ops->d3_suspend(trans, test, reset); -} - -static inline int iwl_trans_d3_resume(struct iwl_trans *trans, - enum iwl_d3_status *status, - bool test, bool reset) -{ - might_sleep(); - if (!trans->ops->d3_resume) - return -EOPNOTSUPP; - - return trans->ops->d3_resume(trans, status, test, reset); -} - -static inline struct iwl_trans_dump_data * +struct iwl_trans_dump_data * iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask, const struct iwl_dump_sanitize_ops *sanitize_ops, - void *sanitize_ctx) -{ - if (!trans->ops->dump_data) - return NULL; - return trans->ops->dump_data(trans, dump_mask, - sanitize_ops, sanitize_ctx); -} + void *sanitize_ctx); static inline struct iwl_device_tx_cmd * iwl_trans_alloc_tx_cmd(struct iwl_trans *trans) @@ -1295,109 +992,31 @@ static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans, kmem_cache_free(trans->dev_cmd_pool, dev_cmd); } -static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_tx_cmd *dev_cmd, int queue) -{ - if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status))) - return -EIO; +int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb, + struct iwl_device_tx_cmd *dev_cmd, int queue); - if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { - IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); - return -EIO; - } +void iwl_trans_reclaim(struct iwl_trans *trans, int queue, int ssn, + struct sk_buff_head *skbs, bool is_flush); - return trans->ops->tx(trans, skb, dev_cmd, queue); -} +void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue, int ptr); -static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue, - int ssn, struct sk_buff_head *skbs, - bool is_flush) -{ - if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { - IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); - return; - } +void iwl_trans_txq_disable(struct iwl_trans *trans, int queue, + bool configure_scd); - trans->ops->reclaim(trans, queue, ssn, skbs, is_flush); -} +bool iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn, + const struct iwl_trans_txq_scd_cfg *cfg, + unsigned int queue_wdg_timeout); -static inline void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue, - int ptr) -{ - if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { - IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); - return; - } +int iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue, + struct iwl_trans_rxq_dma_data *data); - trans->ops->set_q_ptrs(trans, queue, ptr); -} +void iwl_trans_txq_free(struct iwl_trans *trans, int queue); -static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue, - bool configure_scd) -{ - trans->ops->txq_disable(trans, queue, configure_scd); -} +int iwl_trans_txq_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask, + u8 tid, int size, unsigned int wdg_timeout); -static inline bool -iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn, - const struct iwl_trans_txq_scd_cfg *cfg, - unsigned int queue_wdg_timeout) -{ - might_sleep(); - - if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { - IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); - return false; - } - - return trans->ops->txq_enable(trans, queue, ssn, - cfg, queue_wdg_timeout); -} - -static inline int -iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue, - struct iwl_trans_rxq_dma_data *data) -{ - if (WARN_ON_ONCE(!trans->ops->rxq_dma_data)) - return -EOPNOTSUPP; - - return trans->ops->rxq_dma_data(trans, queue, data); -} - -static inline void -iwl_trans_txq_free(struct iwl_trans *trans, int queue) -{ - if (WARN_ON_ONCE(!trans->ops->txq_free)) - return; - - trans->ops->txq_free(trans, queue); -} - -static inline int -iwl_trans_txq_alloc(struct iwl_trans *trans, - u32 flags, u32 sta_mask, u8 tid, - int size, unsigned int wdg_timeout) -{ - might_sleep(); - - if (WARN_ON_ONCE(!trans->ops->txq_alloc)) - return -EOPNOTSUPP; - - if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { - IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); - return -EIO; - } - - return trans->ops->txq_alloc(trans, flags, sta_mask, tid, - size, wdg_timeout); -} - -static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans, - int queue, bool shared_mode) -{ - if (trans->ops->txq_set_shared_mode) - trans->ops->txq_set_shared_mode(trans, queue, shared_mode); -} +void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans, + int txq_id, bool shared_mode); static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue, int fifo, int sta_id, int tid, @@ -1430,78 +1049,32 @@ void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo, iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout); } -static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans, - unsigned long txqs, - bool freeze) -{ - if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { - IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); - return; - } +void iwl_trans_freeze_txq_timer(struct iwl_trans *trans, + unsigned long txqs, bool freeze); - if (trans->ops->freeze_txq_timer) - trans->ops->freeze_txq_timer(trans, txqs, freeze); -} +int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans, u32 txqs); -static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans, - u32 txqs) -{ - if (WARN_ON_ONCE(!trans->ops->wait_tx_queues_empty)) - return -EOPNOTSUPP; +int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue); - /* No need to wait if the firmware is not alive */ - if (trans->state != IWL_TRANS_FW_ALIVE) { - IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); - return -EIO; - } +void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val); - return trans->ops->wait_tx_queues_empty(trans, txqs); -} +void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val); -static inline int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue) -{ - if (WARN_ON_ONCE(!trans->ops->wait_txq_empty)) - return -EOPNOTSUPP; +u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs); - if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) { - IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state); - return -EIO; - } +u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs); - return trans->ops->wait_txq_empty(trans, queue); -} +void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs, u32 val); -static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val) -{ - trans->ops->write8(trans, ofs, val); -} +int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr, + void *buf, int dwords); -static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val) -{ - trans->ops->write32(trans, ofs, val); -} +int iwl_trans_read_config32(struct iwl_trans *trans, u32 ofs, + u32 *val); -static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs) -{ - return trans->ops->read32(trans, ofs); -} - -static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs) -{ - return trans->ops->read_prph(trans, ofs); -} - -static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs, - u32 val) -{ - return trans->ops->write_prph(trans, ofs, val); -} - -static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr, - void *buf, int dwords) -{ - return trans->ops->read_mem(trans, addr, buf, dwords); -} +#ifdef CONFIG_IWLWIFI_DEBUGFS +void iwl_trans_debugfs_cleanup(struct iwl_trans *trans); +#endif #define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \ do { \ @@ -1510,14 +1083,8 @@ static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr, iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\ } while (0) -static inline int iwl_trans_write_imr_mem(struct iwl_trans *trans, - u32 dst_addr, u64 src_addr, - u32 byte_cnt) -{ - if (trans->ops->imr_dma_data) - return trans->ops->imr_dma_data(trans, dst_addr, src_addr, byte_cnt); - return 0; -} +int iwl_trans_write_imr_mem(struct iwl_trans *trans, u32 dst_addr, + u64 src_addr, u32 byte_cnt); static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr) { @@ -1529,11 +1096,8 @@ static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr) return value; } -static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr, - const void *buf, int dwords) -{ - return trans->ops->write_mem(trans, addr, buf, dwords); -} +int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr, + const void *buf, int dwords); static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr, u32 val) @@ -1541,36 +1105,21 @@ static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr, return iwl_trans_write_mem(trans, addr, &val, 1); } -static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state) -{ - if (trans->ops->set_pmi) - trans->ops->set_pmi(trans, state); -} +void iwl_trans_set_pmi(struct iwl_trans *trans, bool state); -static inline int iwl_trans_sw_reset(struct iwl_trans *trans, - bool retake_ownership) -{ - if (trans->ops->sw_reset) - return trans->ops->sw_reset(trans, retake_ownership); - return 0; -} +int iwl_trans_sw_reset(struct iwl_trans *trans, bool retake_ownership); -static inline void -iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value) -{ - trans->ops->set_bits_mask(trans, reg, mask, value); -} +void iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, + u32 mask, u32 value); + +bool _iwl_trans_grab_nic_access(struct iwl_trans *trans); #define iwl_trans_grab_nic_access(trans) \ __cond_lock(nic_access, \ - likely((trans)->ops->grab_nic_access(trans))) + likely(_iwl_trans_grab_nic_access(trans))) -static inline void __releases(nic_access) -iwl_trans_release_nic_access(struct iwl_trans *trans) -{ - trans->ops->release_nic_access(trans); - __release(nic_access); -} +void __releases(nic_access) +iwl_trans_release_nic_access(struct iwl_trans *trans); static inline void iwl_trans_fw_error(struct iwl_trans *trans, bool sync) { @@ -1589,44 +1138,24 @@ static inline bool iwl_trans_fw_running(struct iwl_trans *trans) return trans->state == IWL_TRANS_FW_ALIVE; } -static inline void iwl_trans_sync_nmi(struct iwl_trans *trans) -{ - if (trans->ops->sync_nmi) - trans->ops->sync_nmi(trans); -} +void iwl_trans_sync_nmi(struct iwl_trans *trans); void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr, u32 sw_err_bit); -static inline int iwl_trans_load_pnvm(struct iwl_trans *trans, - const struct iwl_pnvm_image *pnvm_data, - const struct iwl_ucode_capabilities *capa) -{ - return trans->ops->load_pnvm(trans, pnvm_data, capa); -} +int iwl_trans_load_pnvm(struct iwl_trans *trans, + const struct iwl_pnvm_image *pnvm_data, + const struct iwl_ucode_capabilities *capa); -static inline void iwl_trans_set_pnvm(struct iwl_trans *trans, - const struct iwl_ucode_capabilities *capa) -{ - if (trans->ops->set_pnvm) - trans->ops->set_pnvm(trans, capa); -} +void iwl_trans_set_pnvm(struct iwl_trans *trans, + const struct iwl_ucode_capabilities *capa); -static inline int iwl_trans_load_reduce_power - (struct iwl_trans *trans, - const struct iwl_pnvm_image *payloads, - const struct iwl_ucode_capabilities *capa) -{ - return trans->ops->load_reduce_power(trans, payloads, capa); -} +int iwl_trans_load_reduce_power(struct iwl_trans *trans, + const struct iwl_pnvm_image *payloads, + const struct iwl_ucode_capabilities *capa); -static inline void -iwl_trans_set_reduce_power(struct iwl_trans *trans, - const struct iwl_ucode_capabilities *capa) -{ - if (trans->ops->set_reduce_power) - trans->ops->set_reduce_power(trans, capa); -} +void iwl_trans_set_reduce_power(struct iwl_trans *trans, + const struct iwl_ucode_capabilities *capa); static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans) { @@ -1634,18 +1163,13 @@ static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans) trans->dbg.external_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED; } -static inline void iwl_trans_interrupts(struct iwl_trans *trans, bool enable) -{ - if (trans->ops->interrupts) - trans->ops->interrupts(trans, enable); -} +void iwl_trans_interrupts(struct iwl_trans *trans, bool enable); /***************************************************** * transport helper functions *****************************************************/ struct iwl_trans *iwl_trans_alloc(unsigned int priv_size, struct device *dev, - const struct iwl_trans_ops *ops, const struct iwl_cfg_trans_params *cfg_trans); int iwl_trans_init(struct iwl_trans *trans); void iwl_trans_free(struct iwl_trans *trans); @@ -1656,10 +1180,13 @@ static inline bool iwl_trans_is_hw_error_value(u32 val) } /***************************************************** -* driver (transport) register/unregister functions -******************************************************/ + * PCIe handling + *****************************************************/ int __must_check iwl_pci_register_driver(void); void iwl_pci_unregister_driver(void); void iwl_trans_pcie_remove(struct iwl_trans *trans, bool rescan); +int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, + struct iwl_host_cmd *cmd); + #endif /* __iwl_trans_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h b/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h index 1f3c885aeb65..4900de3cc0d3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h +++ b/drivers/net/wireless/intel/iwlwifi/mei/iwl-mei.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * Copyright (C) 2021-2023 Intel Corporation + * Copyright (C) 2021-2024 Intel Corporation */ #ifndef __iwl_mei_h__ @@ -456,8 +456,11 @@ void iwl_mei_device_state(bool up); /** * iwl_mei_pldr_req() - must be called before loading the fw * - * Return: 0 if the PLDR flow was successful and the fw can be loaded, negative - * value otherwise. + * Requests from the ME that it releases its potential bus access to + * the WiFi NIC so that the device can safely undergo product reset. + * + * Return: 0 if the request was successful and the device can be + * reset, a negative error value otherwise */ int iwl_mei_pldr_req(void); @@ -488,7 +491,7 @@ static inline void iwl_mei_set_nic_info(const u8 *mac_address, const u8 *nvm_add static inline void iwl_mei_set_country_code(u16 mcc) {} -static inline void iwl_mei_set_power_limit(__le16 *power_limit) +static inline void iwl_mei_set_power_limit(const __le16 *power_limit) {} static inline int iwl_mei_register(void *priv, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h index 3cbeaddf4358..c4c1e67b9ac7 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/constants.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/constants.h @@ -23,7 +23,7 @@ #define IWL_MVM_WOWLAN_PS_RX_DATA_TIMEOUT (10 * USEC_PER_MSEC) #define IWL_MVM_SHORT_PS_TX_DATA_TIMEOUT (2 * 1024) /* defined in TU */ #define IWL_MVM_SHORT_PS_RX_DATA_TIMEOUT (40 * 1024) /* defined in TU */ -#define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE 0 +#define IWL_MVM_P2P_LOWLATENCY_PS_ENABLE 1 #define IWL_MVM_UAPSD_RX_DATA_TIMEOUT (50 * USEC_PER_MSEC) #define IWL_MVM_UAPSD_TX_DATA_TIMEOUT (50 * USEC_PER_MSEC) #define IWL_MVM_UAPSD_QUEUES (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\ @@ -56,7 +56,6 @@ #define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK 1 #define IWL_MVM_TOF_IS_RESPONDER 0 #define IWL_MVM_HW_CSUM_DISABLE 0 -#define IWL_MVM_PARSE_NVM 0 #define IWL_MVM_ADWELL_ENABLE 1 #define IWL_MVM_ADWELL_MAX_BUDGET 0 #define IWL_MVM_TCM_LOAD_MEDIUM_THRESH 10 /* percentage */ @@ -100,6 +99,7 @@ #define IWL_MVM_FTM_INITIATOR_ALGO IWL_TOF_ALGO_TYPE_MAX_LIKE #define IWL_MVM_FTM_INITIATOR_DYNACK true #define IWL_MVM_FTM_LMR_FEEDBACK_TERMINATE false +#define IWL_MVM_FTM_TEST_INCORRECT_SAC false #define IWL_MVM_FTM_R2I_MAX_REP 7 #define IWL_MVM_FTM_I2R_MAX_REP 7 #define IWL_MVM_FTM_R2I_MAX_STS 1 @@ -114,7 +114,6 @@ #define IWL_MVM_D3_DEBUG false #define IWL_MVM_USE_TWT true #define IWL_MVM_AMPDU_CONSEC_DROPS_DELBA 20 -#define IWL_MVM_USE_NSSN_SYNC 0 #define IWL_MVM_FTM_INITIATOR_ENABLE_SMOOTH false #define IWL_MVM_FTM_INITIATOR_SMOOTH_ALPHA 40 /* 20016 pSec is 6 meter RTT, meaning 3 meter range */ @@ -124,6 +123,7 @@ #define IWL_MVM_DISABLE_AP_FILS false #define IWL_MVM_6GHZ_PASSIVE_SCAN_TIMEOUT 3000 /* in seconds */ #define IWL_MVM_6GHZ_PASSIVE_SCAN_ASSOC_TIMEOUT 60 /* in seconds */ +#define IWL_MVM_MIN_BEACON_INTERVAL_TU 16 #define IWL_MVM_AUTO_EML_ENABLE true #define IWL_MVM_MISSED_BEACONS_EXIT_ESR_THRESH 7 diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 54f4acbbd05b..b4d650583ac2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -2493,6 +2493,9 @@ static void iwl_mvm_parse_wowlan_info_notif(struct iwl_mvm *mvm, return; } + if (mvm->fast_resume) + return; + iwl_mvm_convert_key_counters_v5(status, &data->gtk[0].sc); iwl_mvm_convert_gtk_v3(status, data->gtk); iwl_mvm_convert_igtk(status, &data->igtk[0]); @@ -3049,7 +3052,7 @@ static bool iwl_mvm_check_rt_status(struct iwl_mvm *mvm, if (iwl_mvm_rt_status(mvm->trans, mvm->trans->dbg.lmac_error_event_table[0], &err_id)) { - if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN) { + if (err_id == RF_KILL_INDICATOR_FOR_WOWLAN && vif) { struct cfg80211_wowlan_wakeup wakeup = { .rfkill_release = true, }; @@ -3366,7 +3369,7 @@ static int iwl_mvm_resume_firmware(struct iwl_mvm *mvm, bool test) return ret; } -#define IWL_MVM_D3_NOTIF_TIMEOUT (HZ / 5) +#define IWL_MVM_D3_NOTIF_TIMEOUT (HZ / 3) static int iwl_mvm_d3_notif_wait(struct iwl_mvm *mvm, struct iwl_d3_data *d3_data) @@ -3377,12 +3380,22 @@ static int iwl_mvm_d3_notif_wait(struct iwl_mvm *mvm, WIDE_ID(SCAN_GROUP, OFFLOAD_MATCH_INFO_NOTIF), WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION) }; + static const u16 d3_fast_resume_notif[] = { + WIDE_ID(PROT_OFFLOAD_GROUP, D3_END_NOTIFICATION) + }; struct iwl_notification_wait wait_d3_notif; int ret; - iwl_init_notification_wait(&mvm->notif_wait, &wait_d3_notif, - d3_resume_notif, ARRAY_SIZE(d3_resume_notif), - iwl_mvm_wait_d3_notif, d3_data); + if (mvm->fast_resume) + iwl_init_notification_wait(&mvm->notif_wait, &wait_d3_notif, + d3_fast_resume_notif, + ARRAY_SIZE(d3_fast_resume_notif), + iwl_mvm_wait_d3_notif, d3_data); + else + iwl_init_notification_wait(&mvm->notif_wait, &wait_d3_notif, + d3_resume_notif, + ARRAY_SIZE(d3_resume_notif), + iwl_mvm_wait_d3_notif, d3_data); ret = iwl_mvm_resume_firmware(mvm, d3_data->test); if (ret) { @@ -3567,6 +3580,68 @@ void iwl_mvm_set_wakeup(struct ieee80211_hw *hw, bool enabled) device_set_wakeup_enable(mvm->trans->dev, enabled); } +void iwl_mvm_fast_suspend(struct iwl_mvm *mvm) +{ + struct iwl_d3_manager_config d3_cfg_cmd_data = {}; + int ret; + + lockdep_assert_held(&mvm->mutex); + + IWL_DEBUG_WOWLAN(mvm, "Starting fast suspend flow\n"); + + mvm->fast_resume = true; + set_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); + + WARN_ON(iwl_mvm_power_update_device(mvm)); + mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_D3; + ret = iwl_mvm_send_cmd_pdu(mvm, D3_CONFIG_CMD, CMD_SEND_IN_D3, + sizeof(d3_cfg_cmd_data), &d3_cfg_cmd_data); + if (ret) + IWL_ERR(mvm, + "fast suspend: couldn't send D3_CONFIG_CMD %d\n", ret); + + WARN_ON(iwl_mvm_power_update_mac(mvm)); + + ret = iwl_trans_d3_suspend(mvm->trans, false, false); + if (ret) + IWL_ERR(mvm, "fast suspend: trans_d3_suspend failed %d\n", ret); +} + +int iwl_mvm_fast_resume(struct iwl_mvm *mvm) +{ + struct iwl_d3_data d3_data = { + .notif_expected = + IWL_D3_NOTIF_D3_END_NOTIF, + }; + int ret; + + lockdep_assert_held(&mvm->mutex); + + IWL_DEBUG_WOWLAN(mvm, "Starting the fast resume flow\n"); + + mvm->last_reset_or_resume_time_jiffies = jiffies; + iwl_fw_dbg_read_d3_debug_data(&mvm->fwrt); + + if (iwl_mvm_check_rt_status(mvm, NULL)) { + set_bit(STATUS_FW_ERROR, &mvm->trans->status); + iwl_mvm_dump_nic_error_log(mvm); + iwl_dbg_tlv_time_point(&mvm->fwrt, + IWL_FW_INI_TIME_POINT_FW_ASSERT, NULL); + iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert, + false, 0); + return -ENODEV; + } + ret = iwl_mvm_d3_notif_wait(mvm, &d3_data); + clear_bit(IWL_MVM_STATUS_IN_D3, &mvm->status); + mvm->trans->system_pm_mode = IWL_PLAT_PM_MODE_DISABLED; + mvm->fast_resume = false; + + if (ret) + IWL_ERR(mvm, "Couldn't get the d3 notif %d\n", ret); + + return ret; +} + #ifdef CONFIG_IWLWIFI_DEBUGFS static int iwl_mvm_d3_test_open(struct inode *inode, struct file *file) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c index 17c97dfbc62a..25f07e00db42 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c @@ -692,6 +692,42 @@ static ssize_t iwl_dbgfs_quota_min_read(struct file *file, return simple_read_from_buffer(user_buf, count, ppos, buf, len); } +static ssize_t iwl_dbgfs_max_tx_op_write(struct ieee80211_vif *vif, char *buf, + size_t count, loff_t *ppos) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + u16 value; + int ret; + + ret = kstrtou16(buf, 0, &value); + if (ret) + return ret; + + mutex_lock(&mvm->mutex); + mvmvif->max_tx_op = value; + mutex_unlock(&mvm->mutex); + + return count; +} + +static ssize_t iwl_dbgfs_max_tx_op_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct ieee80211_vif *vif = file->private_data; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm *mvm = mvmvif->mvm; + char buf[10]; + int len; + + mutex_lock(&mvm->mutex); + len = scnprintf(buf, sizeof(buf), "%hu\n", mvmvif->max_tx_op); + mutex_unlock(&mvm->mutex); + + return simple_read_from_buffer(user_buf, count, ppos, buf, len); +} + static ssize_t iwl_dbgfs_int_mlo_scan_write(struct ieee80211_vif *vif, char *buf, size_t count, loff_t *ppos) @@ -801,6 +837,7 @@ MVM_DEBUGFS_READ_WRITE_FILE_OPS(uapsd_misbehaving, 20); MVM_DEBUGFS_READ_WRITE_FILE_OPS(rx_phyinfo, 10); MVM_DEBUGFS_READ_WRITE_FILE_OPS(quota_min, 32); MVM_DEBUGFS_READ_FILE_OPS(os_device_timediff); +MVM_DEBUGFS_READ_WRITE_FILE_OPS(max_tx_op, 10); MVM_DEBUGFS_WRITE_FILE_OPS(int_mlo_scan, 32); MVM_DEBUGFS_READ_WRITE_FILE_OPS(esr_disable_reason, 32); @@ -830,6 +867,7 @@ void iwl_mvm_vif_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif) MVM_DEBUGFS_ADD_FILE_VIF(rx_phyinfo, mvmvif->dbgfs_dir, 0600); MVM_DEBUGFS_ADD_FILE_VIF(quota_min, mvmvif->dbgfs_dir, 0600); MVM_DEBUGFS_ADD_FILE_VIF(os_device_timediff, mvmvif->dbgfs_dir, 0400); + MVM_DEBUGFS_ADD_FILE_VIF(max_tx_op, mvmvif->dbgfs_dir, 0600); debugfs_create_bool("ftm_unprotected", 0200, mvmvif->dbgfs_dir, &mvmvif->ftm_unprotected); MVM_DEBUGFS_ADD_FILE_VIF(int_mlo_scan, mvmvif->dbgfs_dir, 0200); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 8101ecbb478b..91ca830a7b60 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -151,37 +151,6 @@ static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf, return ret; } -static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm_sta *mvmsta; - int sta_id, drain, ret; - - if (!iwl_mvm_firmware_running(mvm) || - mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) - return -EIO; - - if (sscanf(buf, "%d %d", &sta_id, &drain) != 2) - return -EINVAL; - if (sta_id < 0 || sta_id >= mvm->fw->ucode_capa.num_stations) - return -EINVAL; - if (drain < 0 || drain > 1) - return -EINVAL; - - mutex_lock(&mvm->mutex); - - mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); - - if (!mvmsta) - ret = -ENOENT; - else - ret = iwl_mvm_drain_sta(mvm, mvmsta, drain) ? : count; - - mutex_unlock(&mvm->mutex); - - return ret; -} - static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { @@ -568,193 +537,12 @@ static ssize_t iwl_dbgfs_disable_power_off_write(struct iwl_mvm *mvm, char *buf, return ret ?: count; } -static -int iwl_mvm_coex_dump_mbox(struct iwl_bt_coex_profile_notif *notif, char *buf, - int pos, int bufsz) -{ - pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw0:\n"); - - BT_MBOX_PRINT(0, LE_SLAVE_LAT, false); - BT_MBOX_PRINT(0, LE_PROF1, false); - BT_MBOX_PRINT(0, LE_PROF2, false); - BT_MBOX_PRINT(0, LE_PROF_OTHER, false); - BT_MBOX_PRINT(0, CHL_SEQ_N, false); - BT_MBOX_PRINT(0, INBAND_S, false); - BT_MBOX_PRINT(0, LE_MIN_RSSI, false); - BT_MBOX_PRINT(0, LE_SCAN, false); - BT_MBOX_PRINT(0, LE_ADV, false); - BT_MBOX_PRINT(0, LE_MAX_TX_POWER, false); - BT_MBOX_PRINT(0, OPEN_CON_1, true); - - pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw1:\n"); - - BT_MBOX_PRINT(1, BR_MAX_TX_POWER, false); - BT_MBOX_PRINT(1, IP_SR, false); - BT_MBOX_PRINT(1, LE_MSTR, false); - BT_MBOX_PRINT(1, AGGR_TRFC_LD, false); - BT_MBOX_PRINT(1, MSG_TYPE, false); - BT_MBOX_PRINT(1, SSN, true); - - pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw2:\n"); - - BT_MBOX_PRINT(2, SNIFF_ACT, false); - BT_MBOX_PRINT(2, PAG, false); - BT_MBOX_PRINT(2, INQUIRY, false); - BT_MBOX_PRINT(2, CONN, false); - BT_MBOX_PRINT(2, SNIFF_INTERVAL, false); - BT_MBOX_PRINT(2, DISC, false); - BT_MBOX_PRINT(2, SCO_TX_ACT, false); - BT_MBOX_PRINT(2, SCO_RX_ACT, false); - BT_MBOX_PRINT(2, ESCO_RE_TX, false); - BT_MBOX_PRINT(2, SCO_DURATION, true); - - pos += scnprintf(buf+pos, bufsz-pos, "MBOX dw3:\n"); - - BT_MBOX_PRINT(3, SCO_STATE, false); - BT_MBOX_PRINT(3, SNIFF_STATE, false); - BT_MBOX_PRINT(3, A2DP_STATE, false); - BT_MBOX_PRINT(3, A2DP_SRC, false); - BT_MBOX_PRINT(3, ACL_STATE, false); - BT_MBOX_PRINT(3, MSTR_STATE, false); - BT_MBOX_PRINT(3, OBX_STATE, false); - BT_MBOX_PRINT(3, OPEN_CON_2, false); - BT_MBOX_PRINT(3, TRAFFIC_LOAD, false); - BT_MBOX_PRINT(3, CHL_SEQN_LSB, false); - BT_MBOX_PRINT(3, INBAND_P, false); - BT_MBOX_PRINT(3, MSG_TYPE_2, false); - BT_MBOX_PRINT(3, SSN_2, false); - BT_MBOX_PRINT(3, UPDATE_REQUEST, true); - - return pos; -} - -static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm *mvm = file->private_data; - struct iwl_bt_coex_profile_notif *notif = &mvm->last_bt_notif; - char *buf; - int ret, pos = 0, bufsz = sizeof(char) * 1024; - - buf = kmalloc(bufsz, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - mutex_lock(&mvm->mutex); - - pos += iwl_mvm_coex_dump_mbox(notif, buf, pos, bufsz); - - pos += scnprintf(buf + pos, bufsz - pos, "bt_ci_compliance = %d\n", - notif->bt_ci_compliance); - pos += scnprintf(buf + pos, bufsz - pos, "primary_ch_lut = %d\n", - le32_to_cpu(notif->primary_ch_lut)); - pos += scnprintf(buf + pos, bufsz - pos, "secondary_ch_lut = %d\n", - le32_to_cpu(notif->secondary_ch_lut)); - pos += scnprintf(buf + pos, - bufsz - pos, "bt_activity_grading = %d\n", - le32_to_cpu(notif->bt_activity_grading)); - pos += scnprintf(buf + pos, bufsz - pos, "bt_rrc = %d\n", - notif->rrc_status & 0xF); - pos += scnprintf(buf + pos, bufsz - pos, "bt_ttc = %d\n", - notif->ttc_status & 0xF); - - pos += scnprintf(buf + pos, bufsz - pos, "sync_sco = %d\n", - IWL_MVM_BT_COEX_SYNC2SCO); - pos += scnprintf(buf + pos, bufsz - pos, "mplut = %d\n", - IWL_MVM_BT_COEX_MPLUT); - - mutex_unlock(&mvm->mutex); - - ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); - kfree(buf); - - return ret; -} -#undef BT_MBOX_PRINT - -static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf, - size_t count, loff_t *ppos) -{ - struct iwl_mvm *mvm = file->private_data; - struct iwl_bt_coex_ci_cmd *cmd = &mvm->last_bt_ci_cmd; - char buf[256]; - int bufsz = sizeof(buf); - int pos = 0; - - mutex_lock(&mvm->mutex); - - pos += scnprintf(buf + pos, bufsz - pos, "Channel inhibition CMD\n"); - pos += scnprintf(buf + pos, bufsz - pos, - "\tPrimary Channel Bitmap 0x%016llx\n", - le64_to_cpu(cmd->bt_primary_ci)); - pos += scnprintf(buf + pos, bufsz - pos, - "\tSecondary Channel Bitmap 0x%016llx\n", - le64_to_cpu(cmd->bt_secondary_ci)); - - mutex_unlock(&mvm->mutex); - - return simple_read_from_buffer(user_buf, count, ppos, buf, pos); -} - -static ssize_t -iwl_dbgfs_bt_tx_prio_write(struct iwl_mvm *mvm, char *buf, - size_t count, loff_t *ppos) -{ - u32 bt_tx_prio; - - if (sscanf(buf, "%u", &bt_tx_prio) != 1) - return -EINVAL; - if (bt_tx_prio > 4) - return -EINVAL; - - mvm->bt_tx_prio = bt_tx_prio; - - return count; -} - -static ssize_t -iwl_dbgfs_bt_force_ant_write(struct iwl_mvm *mvm, char *buf, - size_t count, loff_t *ppos) -{ - static const char * const modes_str[BT_FORCE_ANT_MAX] = { - [BT_FORCE_ANT_DIS] = "dis", - [BT_FORCE_ANT_AUTO] = "auto", - [BT_FORCE_ANT_BT] = "bt", - [BT_FORCE_ANT_WIFI] = "wifi", - }; - int ret, bt_force_ant_mode; - - ret = match_string(modes_str, ARRAY_SIZE(modes_str), buf); - if (ret < 0) - return ret; - - bt_force_ant_mode = ret; - ret = 0; - mutex_lock(&mvm->mutex); - if (mvm->bt_force_ant_mode == bt_force_ant_mode) - goto out; - - mvm->bt_force_ant_mode = bt_force_ant_mode; - IWL_DEBUG_COEX(mvm, "Force mode: %s\n", - modes_str[mvm->bt_force_ant_mode]); - - if (iwl_mvm_firmware_running(mvm)) - ret = iwl_mvm_send_bt_init_conf(mvm); - else - ret = 0; - -out: - mutex_unlock(&mvm->mutex); - return ret ?: count; -} - static ssize_t iwl_dbgfs_fw_ver_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct iwl_mvm *mvm = file->private_data; char *buff, *pos, *endpos; static const size_t bufsz = 1024; - char _fw_name_pre[FW_NAME_PRE_BUFSIZE]; int ret; buff = kmalloc(bufsz, GFP_KERNEL); @@ -764,8 +552,8 @@ static ssize_t iwl_dbgfs_fw_ver_read(struct file *file, char __user *user_buf, pos = buff; endpos = pos + bufsz; - pos += scnprintf(pos, endpos - pos, "FW prefix: %s\n", - iwl_drv_get_fwname_pre(mvm->trans, _fw_name_pre)); + pos += scnprintf(pos, endpos - pos, "FW id: %s\n", + mvm->fwrt.fw->fw_version); pos += scnprintf(pos, endpos - pos, "FW: %s\n", mvm->fwrt.fw->human_readable); pos += scnprintf(pos, endpos - pos, "Device: %s\n", @@ -1396,6 +1184,8 @@ static ssize_t iwl_dbgfs_fw_nmi_write(struct iwl_mvm *mvm, char *buf, if (!iwl_mvm_firmware_running(mvm)) return -EIO; + IWL_ERR(mvm, "Triggering an NMI from debugfs\n"); + if (count == 6 && !strcmp(buf, "nolog\n")) set_bit(IWL_MVM_STATUS_SUPPRESS_ERROR_LOG_ONCE, &mvm->status); @@ -2164,15 +1954,12 @@ MVM_DEBUGFS_WRITE_FILE_OPS(stop_ctdp, 8); MVM_DEBUGFS_WRITE_FILE_OPS(start_ctdp, 8); MVM_DEBUGFS_WRITE_FILE_OPS(force_ctkill, 8); MVM_DEBUGFS_WRITE_FILE_OPS(tx_flush, 16); -MVM_DEBUGFS_WRITE_FILE_OPS(sta_drain, 8); MVM_DEBUGFS_WRITE_FILE_OPS(send_echo_cmd, 8); MVM_DEBUGFS_READ_WRITE_FILE_OPS(sram, 64); MVM_DEBUGFS_READ_WRITE_FILE_OPS(set_nic_temperature, 64); MVM_DEBUGFS_READ_FILE_OPS(nic_temp); MVM_DEBUGFS_READ_FILE_OPS(stations); MVM_DEBUGFS_READ_LINK_STA_FILE_OPS(rs_data); -MVM_DEBUGFS_READ_FILE_OPS(bt_notif); -MVM_DEBUGFS_READ_FILE_OPS(bt_cmd); MVM_DEBUGFS_READ_WRITE_FILE_OPS(disable_power_off, 64); MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats); MVM_DEBUGFS_READ_FILE_OPS(drv_rx_stats); @@ -2182,8 +1969,6 @@ MVM_DEBUGFS_READ_FILE_OPS(phy_integration_ver); MVM_DEBUGFS_READ_FILE_OPS(tas_get_status); MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10); MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10); -MVM_DEBUGFS_WRITE_FILE_OPS(bt_tx_prio, 10); -MVM_DEBUGFS_WRITE_FILE_OPS(bt_force_ant, 10); MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8); MVM_DEBUGFS_READ_WRITE_FILE_OPS(fw_dbg_conf, 8); MVM_DEBUGFS_WRITE_FILE_OPS(fw_dbg_collect, 64); @@ -2370,7 +2155,6 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm) spin_lock_init(&mvm->drv_stats_lock); MVM_DEBUGFS_ADD_FILE(tx_flush, mvm->debugfs_dir, 0200); - MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(set_nic_temperature, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(nic_temp, mvm->debugfs_dir, 0400); @@ -2379,8 +2163,6 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm) MVM_DEBUGFS_ADD_FILE(start_ctdp, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(force_ctkill, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(stations, mvm->debugfs_dir, 0400); - MVM_DEBUGFS_ADD_FILE(bt_notif, mvm->debugfs_dir, 0400); - MVM_DEBUGFS_ADD_FILE(bt_cmd, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(disable_power_off, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(fw_ver, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(fw_rx_stats, mvm->debugfs_dir, 0400); @@ -2388,8 +2170,6 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm) MVM_DEBUGFS_ADD_FILE(fw_system_stats, mvm->debugfs_dir, 0400); MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(fw_nmi, mvm->debugfs_dir, 0200); - MVM_DEBUGFS_ADD_FILE(bt_tx_prio, mvm->debugfs_dir, 0200); - MVM_DEBUGFS_ADD_FILE(bt_force_ant, mvm->debugfs_dir, 0200); MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(prph_reg, mvm->debugfs_dir, 0600); MVM_DEBUGFS_ADD_FILE(fw_dbg_conf, mvm->debugfs_dir, 0600); @@ -2448,6 +2228,9 @@ void iwl_mvm_dbgfs_register(struct iwl_mvm *mvm) debugfs_create_file("mem", 0600, mvm->debugfs_dir, mvm, &iwl_dbgfs_mem_ops); + debugfs_create_bool("rx_ts_ptp", 0600, mvm->debugfs_dir, + &mvm->rx_ts_ptp); + /* * Create a symlink with mac80211. It will be removed when mac80211 * exists (before the opmode exists which removes the target.) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c index 72a3d71f46f0..afd90a52d4ec 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c @@ -40,6 +40,12 @@ struct iwl_mvm_ftm_pasn_entry { u32 flags; }; +struct iwl_mvm_ftm_iter_data { + u8 *cipher; + u8 *bssid; + u8 *tk; +}; + int iwl_mvm_ftm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, u8 *addr, u32 cipher, u8 *tk, u32 tk_len, u8 *hltk, u32 hltk_len) @@ -431,9 +437,43 @@ iwl_mvm_ftm_put_target_v2(struct iwl_mvm *mvm, return 0; } -#define FTM_PUT_FLAG(flag) (target->initiator_ap_flags |= \ +#define FTM_SET_FLAG(flag) (*flags |= \ cpu_to_le32(IWL_INITIATOR_AP_FLAGS_##flag)) +static void +iwl_mvm_ftm_set_target_flags(struct iwl_mvm *mvm, + struct cfg80211_pmsr_request_peer *peer, + __le32 *flags) +{ + *flags = cpu_to_le32(0); + + if (peer->ftm.asap) + FTM_SET_FLAG(ASAP); + + if (peer->ftm.request_lci) + FTM_SET_FLAG(LCI_REQUEST); + + if (peer->ftm.request_civicloc) + FTM_SET_FLAG(CIVIC_REQUEST); + + if (IWL_MVM_FTM_INITIATOR_DYNACK) + FTM_SET_FLAG(DYN_ACK); + + if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_LINEAR_REG) + FTM_SET_FLAG(ALGO_LR); + else if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_FFT) + FTM_SET_FLAG(ALGO_FFT); + + if (peer->ftm.trigger_based) + FTM_SET_FLAG(TB); + else if (peer->ftm.non_trigger_based) + FTM_SET_FLAG(NON_TB); + + if ((peer->ftm.trigger_based || peer->ftm.non_trigger_based) && + peer->ftm.lmr_feedback) + FTM_SET_FLAG(LMR_FEEDBACK); +} + static void iwl_mvm_ftm_put_target_common(struct iwl_mvm *mvm, struct cfg80211_pmsr_request_peer *peer, @@ -445,33 +485,7 @@ iwl_mvm_ftm_put_target_common(struct iwl_mvm *mvm, target->samples_per_burst = peer->ftm.ftms_per_burst; target->num_of_bursts = peer->ftm.num_bursts_exp; target->ftmr_max_retries = peer->ftm.ftmr_retries; - target->initiator_ap_flags = cpu_to_le32(0); - - if (peer->ftm.asap) - FTM_PUT_FLAG(ASAP); - - if (peer->ftm.request_lci) - FTM_PUT_FLAG(LCI_REQUEST); - - if (peer->ftm.request_civicloc) - FTM_PUT_FLAG(CIVIC_REQUEST); - - if (IWL_MVM_FTM_INITIATOR_DYNACK) - FTM_PUT_FLAG(DYN_ACK); - - if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_LINEAR_REG) - FTM_PUT_FLAG(ALGO_LR); - else if (IWL_MVM_FTM_INITIATOR_ALGO == IWL_TOF_ALGO_TYPE_FFT) - FTM_PUT_FLAG(ALGO_FFT); - - if (peer->ftm.trigger_based) - FTM_PUT_FLAG(TB); - else if (peer->ftm.non_trigger_based) - FTM_PUT_FLAG(NON_TB); - - if ((peer->ftm.trigger_based || peer->ftm.non_trigger_based) && - peer->ftm.lmr_feedback) - FTM_PUT_FLAG(LMR_FEEDBACK); + iwl_mvm_ftm_set_target_flags(mvm, peer, &target->initiator_ap_flags); } static int @@ -514,6 +528,48 @@ iwl_mvm_ftm_put_target_v4(struct iwl_mvm *mvm, return 0; } +static int iwl_mvm_ftm_set_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct cfg80211_pmsr_request_peer *peer, + u8 *sta_id, __le32 *flags) +{ + if (vif->cfg.assoc) { + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct ieee80211_sta *sta; + struct ieee80211_bss_conf *link_conf; + unsigned int link_id; + + rcu_read_lock(); + for_each_vif_active_link(vif, link_conf, link_id) { + if (memcmp(peer->addr, link_conf->bssid, ETH_ALEN)) + continue; + + *sta_id = mvmvif->link[link_id]->ap_sta_id; + sta = rcu_dereference(mvm->fw_id_to_mac_id[*sta_id]); + if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { + rcu_read_unlock(); + return PTR_ERR_OR_ZERO(sta); + } + + if (sta->mfp && (peer->ftm.trigger_based || + peer->ftm.non_trigger_based)) + FTM_SET_FLAG(PMF); + break; + } + rcu_read_unlock(); + +#ifdef CONFIG_IWLWIFI_DEBUGFS + if (mvmvif->ftm_unprotected) { + *sta_id = IWL_MVM_INVALID_STA; + *flags &= ~cpu_to_le32(IWL_INITIATOR_AP_FLAGS_PMF); + } +#endif + } else { + *sta_id = IWL_MVM_INVALID_STA; + } + + return 0; +} + static int iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request_peer *peer, @@ -529,42 +585,8 @@ iwl_mvm_ftm_put_target(struct iwl_mvm *mvm, struct ieee80211_vif *vif, iwl_mvm_ftm_put_target_common(mvm, peer, target); - if (vif->cfg.assoc) { - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - struct ieee80211_sta *sta; - struct ieee80211_bss_conf *link_conf; - unsigned int link_id; - - rcu_read_lock(); - for_each_vif_active_link(vif, link_conf, link_id) { - if (memcmp(peer->addr, link_conf->bssid, ETH_ALEN)) - continue; - - target->sta_id = mvmvif->link[link_id]->ap_sta_id; - sta = rcu_dereference(mvm->fw_id_to_mac_id[target->sta_id]); - if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { - rcu_read_unlock(); - return PTR_ERR_OR_ZERO(sta); - } - - if (sta->mfp && (peer->ftm.trigger_based || - peer->ftm.non_trigger_based)) - FTM_PUT_FLAG(PMF); - break; - } - rcu_read_unlock(); - -#ifdef CONFIG_IWLWIFI_DEBUGFS - if (mvmvif->ftm_unprotected) { - target->sta_id = IWL_MVM_INVALID_STA; - target->initiator_ap_flags &= - ~cpu_to_le32(IWL_INITIATOR_AP_FLAGS_PMF); - } - -#endif - } else { - target->sta_id = IWL_MVM_INVALID_STA; - } + iwl_mvm_ftm_set_sta(mvm, vif, peer, &target->sta_id, + &target->initiator_ap_flags); /* * TODO: Beacon interval is currently unknown, so use the common value @@ -703,27 +725,24 @@ static void iter(struct ieee80211_hw *hw, struct ieee80211_key_conf *key, void *data) { - struct iwl_tof_range_req_ap_entry_v6 *target = data; + struct iwl_mvm_ftm_iter_data *target = data; if (!sta || memcmp(sta->addr, target->bssid, ETH_ALEN)) return; WARN_ON(!sta->mfp); - if (WARN_ON(key->keylen > sizeof(target->tk))) - return; - - memcpy(target->tk, key->key, key->keylen); - target->cipher = iwl_mvm_cipher_to_location_cipher(key->cipher); - WARN_ON(target->cipher == IWL_LOCATION_CIPHER_INVALID); + target->tk = key->key; + *target->cipher = iwl_mvm_cipher_to_location_cipher(key->cipher); + WARN_ON(*target->cipher == IWL_LOCATION_CIPHER_INVALID); } static void iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct iwl_tof_range_req_ap_entry_v7 *target) + u8 *bssid, u8 *cipher, u8 *hltk, u8 *tk, + u8 *rx_pn, u8 *tx_pn, __le32 *flags) { struct iwl_mvm_ftm_pasn_entry *entry; - u32 flags = le32_to_cpu(target->initiator_ap_flags); #ifdef CONFIG_IWLWIFI_DEBUGFS struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); @@ -731,35 +750,37 @@ iwl_mvm_ftm_set_secured_ranging(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return; #endif - if (!(flags & (IWL_INITIATOR_AP_FLAGS_NON_TB | + if (!(le32_to_cpu(*flags) & (IWL_INITIATOR_AP_FLAGS_NON_TB | IWL_INITIATOR_AP_FLAGS_TB))) return; lockdep_assert_held(&mvm->mutex); list_for_each_entry(entry, &mvm->ftm_initiator.pasn_list, list) { - if (memcmp(entry->addr, target->bssid, sizeof(entry->addr))) + if (memcmp(entry->addr, bssid, sizeof(entry->addr))) continue; - target->cipher = entry->cipher; + *cipher = entry->cipher; if (entry->flags & IWL_MVM_PASN_FLAG_HAS_HLTK) - memcpy(target->hltk, entry->hltk, sizeof(target->hltk)); + memcpy(hltk, entry->hltk, sizeof(entry->hltk)); else - memset(target->hltk, 0, sizeof(target->hltk)); + memset(hltk, 0, sizeof(entry->hltk)); if (vif->cfg.assoc && - !memcmp(vif->bss_conf.bssid, target->bssid, - sizeof(target->bssid))) - ieee80211_iter_keys(mvm->hw, vif, iter, target); - else - memcpy(target->tk, entry->tk, sizeof(target->tk)); + !memcmp(vif->bss_conf.bssid, bssid, ETH_ALEN)) { + struct iwl_mvm_ftm_iter_data target; - memcpy(target->rx_pn, entry->rx_pn, sizeof(target->rx_pn)); - memcpy(target->tx_pn, entry->tx_pn, sizeof(target->tx_pn)); + target.bssid = bssid; + ieee80211_iter_keys(mvm->hw, vif, iter, &target); + } else { + memcpy(tk, entry->tk, sizeof(entry->tk)); + } - target->initiator_ap_flags |= - cpu_to_le32(IWL_INITIATOR_AP_FLAGS_SECURED); + memcpy(rx_pn, entry->rx_pn, sizeof(entry->rx_pn)); + memcpy(tx_pn, entry->tx_pn, sizeof(entry->tx_pn)); + + FTM_SET_FLAG(SECURED); return; } } @@ -773,7 +794,11 @@ iwl_mvm_ftm_put_target_v7(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (err) return err; - iwl_mvm_ftm_set_secured_ranging(mvm, vif, target); + iwl_mvm_ftm_set_secured_ranging(mvm, vif, target->bssid, + &target->cipher, target->hltk, + target->tk, target->rx_pn, + target->tx_pn, + &target->initiator_ap_flags); return err; } @@ -920,6 +945,105 @@ static int iwl_mvm_ftm_start_v13(struct iwl_mvm *mvm, return iwl_mvm_ftm_send_cmd(mvm, &hcmd); } +static int +iwl_mvm_ftm_put_target_v10(struct iwl_mvm *mvm, struct ieee80211_vif *vif, + struct cfg80211_pmsr_request_peer *peer, + struct iwl_tof_range_req_ap_entry_v10 *target) +{ + u32 i2r_max_sts, flags; + int ret; + + ret = iwl_mvm_ftm_target_chandef_v2(mvm, peer, &target->channel_num, + &target->format_bw, + &target->ctrl_ch_position); + if (ret) + return ret; + + memcpy(target->bssid, peer->addr, ETH_ALEN); + target->burst_period = + cpu_to_le16(peer->ftm.burst_period); + target->samples_per_burst = peer->ftm.ftms_per_burst; + target->num_of_bursts = peer->ftm.num_bursts_exp; + iwl_mvm_ftm_set_target_flags(mvm, peer, &target->initiator_ap_flags); + iwl_mvm_ftm_set_sta(mvm, vif, peer, &target->sta_id, + &target->initiator_ap_flags); + iwl_mvm_ftm_set_secured_ranging(mvm, vif, target->bssid, + &target->cipher, target->hltk, + target->tk, target->rx_pn, + target->tx_pn, + &target->initiator_ap_flags); + + i2r_max_sts = IWL_MVM_FTM_I2R_MAX_STS > 1 ? 1 : + IWL_MVM_FTM_I2R_MAX_STS; + + target->r2i_ndp_params = IWL_MVM_FTM_R2I_MAX_REP | + (IWL_MVM_FTM_R2I_MAX_STS << IWL_LOCATION_MAX_STS_POS) | + (IWL_MVM_FTM_R2I_MAX_TOTAL_LTF << IWL_LOCATION_TOTAL_LTF_POS); + target->i2r_ndp_params = IWL_MVM_FTM_I2R_MAX_REP | + (i2r_max_sts << IWL_LOCATION_MAX_STS_POS) | + (IWL_MVM_FTM_I2R_MAX_TOTAL_LTF << IWL_LOCATION_TOTAL_LTF_POS); + + if (peer->ftm.non_trigger_based) { + target->min_time_between_msr = + cpu_to_le16(IWL_MVM_FTM_NON_TB_MIN_TIME_BETWEEN_MSR); + target->burst_period = + cpu_to_le16(IWL_MVM_FTM_NON_TB_MAX_TIME_BETWEEN_MSR); + } else { + target->min_time_between_msr = cpu_to_le16(0); + } + + target->band = + iwl_mvm_phy_band_from_nl80211(peer->chandef.chan->band); + + /* + * TODO: Beacon interval is currently unknown, so use the common value + * of 100 TUs. + */ + target->beacon_interval = cpu_to_le16(100); + + /* + * If secure LTF is turned off, replace the flag with PMF only + */ + flags = le32_to_cpu(target->initiator_ap_flags); + if (flags & IWL_INITIATOR_AP_FLAGS_SECURED) { + if (!IWL_MVM_FTM_INITIATOR_SECURE_LTF) + flags &= ~IWL_INITIATOR_AP_FLAGS_SECURED; + + flags |= IWL_INITIATOR_AP_FLAGS_PMF; + target->initiator_ap_flags = cpu_to_le32(flags); + } + + return 0; +} + +static int iwl_mvm_ftm_start_v14(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct cfg80211_pmsr_request *req) +{ + struct iwl_tof_range_req_cmd_v14 cmd; + struct iwl_host_cmd hcmd = { + .id = WIDE_ID(LOCATION_GROUP, TOF_RANGE_REQ_CMD), + .dataflags[0] = IWL_HCMD_DFL_DUP, + .data[0] = &cmd, + .len[0] = sizeof(cmd), + }; + u8 i; + int err; + + iwl_mvm_ftm_cmd_common(mvm, vif, (void *)&cmd, req); + + for (i = 0; i < cmd.num_of_ap; i++) { + struct cfg80211_pmsr_request_peer *peer = &req->peers[i]; + struct iwl_tof_range_req_ap_entry_v10 *target = &cmd.ap[i]; + + err = iwl_mvm_ftm_put_target_v10(mvm, vif, peer, target); + if (err) + return err; + } + + return iwl_mvm_ftm_send_cmd(mvm, &hcmd); +} + int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *req) { @@ -938,6 +1062,9 @@ int iwl_mvm_ftm_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, IWL_FW_CMD_VER_UNKNOWN); switch (cmd_ver) { + case 14: + err = iwl_mvm_ftm_start_v14(mvm, vif, req); + break; case 13: err = iwl_mvm_ftm_start_v13(mvm, vif, req); break; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c index 8e760300a1ab..e4caa362f597 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ftm-responder.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2015-2017 Intel Deutschland GmbH - * Copyright (C) 2018-2023 Intel Corporation + * Copyright (C) 2018-2024 Intel Corporation */ #include #include @@ -88,7 +88,7 @@ static int iwl_mvm_ftm_responder_set_bw_v2(struct cfg80211_chan_def *chandef, static void iwl_mvm_ftm_responder_set_ndp(struct iwl_mvm *mvm, - struct iwl_tof_responder_config_cmd_v9 *cmd) + struct iwl_tof_responder_config_cmd *cmd) { /* Up to 2 R2I STS are allowed on the responder */ u32 r2i_max_sts = IWL_MVM_FTM_R2I_MAX_STS < 2 ? @@ -117,7 +117,7 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm, * field interpretation is different), so the same struct can be use * for all cases. */ - struct iwl_tof_responder_config_cmd_v9 cmd = { + struct iwl_tof_responder_config_cmd cmd = { .channel_num = chandef->chan->hw_value, .cmd_valid_fields = cpu_to_le32(IWL_TOF_RESPONDER_CMD_VALID_CHAN_INFO | @@ -131,8 +131,13 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm, lockdep_assert_held(&mvm->mutex); + if (cmd_ver == 10) { + cmd.band = + iwl_mvm_phy_band_from_nl80211(chandef->chan->band); + } + /* Use a default of bss_color=1 for now */ - if (cmd_ver == 9) { + if (cmd_ver >= 9) { cmd.cmd_valid_fields |= cpu_to_le32(IWL_TOF_RESPONDER_CMD_VALID_BSS_COLOR | IWL_TOF_RESPONDER_CMD_VALID_MIN_MAX_TIME_BETWEEN_MSR); @@ -148,7 +153,7 @@ iwl_mvm_ftm_responder_cmd(struct iwl_mvm *mvm, } if (cmd_ver >= 8) - iwl_mvm_ftm_responder_set_ndp(mvm, &cmd); + iwl_mvm_ftm_responder_set_ndp(mvm, (void *)&cmd); if (cmd_ver >= 7) err = iwl_mvm_ftm_responder_set_bw_v2(chandef, &cmd.format_bw, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index f4937a100cbe..08c4898c8f1a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -28,9 +28,6 @@ #define MVM_UCODE_ALIVE_TIMEOUT (2 * HZ) #define MVM_UCODE_CALIB_TIMEOUT (2 * HZ) -#define IWL_UATS_VLP_AP_SUPPORTED BIT(29) -#define IWL_UATS_AFC_AP_SUPPORTED BIT(30) - struct iwl_mvm_alive_data { bool valid; u32 scd_base_addr; @@ -408,7 +405,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, UREG_LMAC2_CURRENT_PC)); } - if (ret == -ETIMEDOUT && !mvm->pldr_sync) + if (ret == -ETIMEDOUT && !mvm->fw_product_reset) iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_ALIVE_TIMEOUT); @@ -460,12 +457,14 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, #endif /* + * For pre-MLD API (MLD API doesn't use the timestamps): * All the BSSes in the BSS table include the GP2 in the system * at the beacon Rx time, this is of course no longer relevant * since we are resetting the firmware. * Purge all the BSS table. */ - cfg80211_bss_flush(mvm->hw->wiphy); + if (!mvm->mld_api_is_used) + cfg80211_bss_flush(mvm->hw->wiphy); return 0; } @@ -491,17 +490,11 @@ static void iwl_mvm_uats_init(struct iwl_mvm *mvm) .dataflags[0] = IWL_HCMD_DFL_NOCOPY, }; - if (!(mvm->trans->trans_cfg->device_family >= - IWL_DEVICE_FAMILY_AX210)) { + if (mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) { IWL_DEBUG_RADIO(mvm, "UATS feature is not supported\n"); return; } - if (!mvm->fwrt.uats_enabled) { - IWL_DEBUG_RADIO(mvm, "UATS feature is disabled\n"); - return; - } - cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd.id, IWL_FW_CMD_VER_UNKNOWN); if (cmd_ver != 1) { @@ -513,7 +506,7 @@ static void iwl_mvm_uats_init(struct iwl_mvm *mvm) ret = iwl_uefi_get_uats_table(mvm->trans, &mvm->fwrt); if (ret < 0) { - IWL_ERR(mvm, "failed to read UATS table (%d)\n", ret); + IWL_DEBUG_FW(mvm, "failed to read UATS table (%d)\n", ret); return; } @@ -627,8 +620,8 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm) if (mvm->trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210) { sb_cfg = iwl_read_umac_prph(mvm->trans, SB_MODIFY_CFG_FLAG); /* if needed, we'll reset this on our way out later */ - mvm->pldr_sync = sb_cfg == SB_CFG_RESIDES_IN_ROM; - if (mvm->pldr_sync && iwl_mei_pldr_req()) + mvm->fw_product_reset = sb_cfg == SB_CFG_RESIDES_IN_ROM; + if (mvm->fw_product_reset && iwl_mei_pldr_req()) return -EBUSY; } @@ -647,7 +640,7 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm) IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); /* if we needed reset then fail here, but notify and remove */ - if (mvm->pldr_sync) { + if (mvm->fw_product_reset) { iwl_mei_alive_notif(false); iwl_trans_pcie_remove(mvm->trans, true); } @@ -686,14 +679,6 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm) goto error; } - if (IWL_MVM_PARSE_NVM && !mvm->nvm_data) { - ret = iwl_nvm_init(mvm); - if (ret) { - IWL_ERR(mvm, "Failed to read NVM: %d\n", ret); - goto error; - } - } - ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(REGULATORY_AND_NVM_GROUP, NVM_ACCESS_COMPLETE), CMD_SEND_IN_RFKILL, @@ -718,7 +703,7 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm) return ret; /* Read the NVM only at driver load time, no need to do this twice */ - if (!IWL_MVM_PARSE_NVM && !mvm->nvm_data) { + if (!mvm->nvm_data) { mvm->nvm_data = iwl_get_nvm(mvm->trans, mvm->fw, mvm->set_tx_ant, mvm->set_rx_ant); if (IS_ERR(mvm->nvm_data)) { @@ -843,7 +828,7 @@ remove_notif: iwl_remove_notification(&mvm->notif_wait, &calib_wait); out: mvm->rfkill_safe_init_done = false; - if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) { + if (!mvm->nvm_data) { /* we want to debug INIT and we have no NVM - fake */ mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) + sizeof(struct ieee80211_channel) + @@ -1231,10 +1216,6 @@ static void iwl_mvm_lari_cfg(struct iwl_mvm *mvm) "Failed to send LARI_CONFIG_CHANGE (%d)\n", ret); } - - if (le32_to_cpu(cmd.oem_uhb_allow_bitmap) & IWL_UATS_VLP_AP_SUPPORTED || - le32_to_cpu(cmd.oem_uhb_allow_bitmap) & IWL_UATS_AFC_AP_SUPPORTED) - mvm->fwrt.uats_enabled = true; } void iwl_mvm_get_bios_tables(struct iwl_mvm *mvm) @@ -1376,9 +1357,6 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm) if (ret) { IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret); - - if (iwlmvm_mod_params.init_dbg) - return 0; return ret; } @@ -1415,14 +1393,14 @@ int iwl_mvm_up(struct iwl_mvm *mvm) ret = iwl_mvm_load_rt_fw(mvm); if (ret) { IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); - if (ret != -ERFKILL && !mvm->pldr_sync) + if (ret != -ERFKILL && !mvm->fw_product_reset) iwl_fw_dbg_error_collect(&mvm->fwrt, FW_DBG_TRIGGER_DRIVER); goto error; } /* FW loaded successfully */ - mvm->pldr_sync = false; + mvm->fw_product_reset = false; iwl_fw_disable_dbg_asserts(&mvm->fwrt); iwl_get_shared_mem_conf(&mvm->fwrt); @@ -1489,8 +1467,6 @@ int iwl_mvm_up(struct iwl_mvm *mvm) for (i = 0; i < IWL_MVM_FW_MAX_LINK_ID + 1; i++) RCU_INIT_POINTER(mvm->link_id_to_link_conf[i], NULL); - memset(&mvm->fw_link_ids_map, 0, sizeof(mvm->fw_link_ids_map)); - mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA; /* reset quota debouncing buffer - 0xff will yield invalid data */ @@ -1619,8 +1595,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) IWL_DEBUG_INFO(mvm, "RT uCode started.\n"); return 0; error: - if (!iwlmvm_mod_params.init_dbg || !ret) - iwl_mvm_stop_device(mvm); + iwl_mvm_stop_device(mvm); return ret; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/link.c b/drivers/net/wireless/intel/iwlwifi/mvm/link.c index 6ec9a8e21a34..a9929aa49913 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/link.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/link.c @@ -11,6 +11,7 @@ HOW(BLOCKED_TPT) \ HOW(BLOCKED_FW) \ HOW(BLOCKED_NON_BSS) \ + HOW(BLOCKED_ROC) \ HOW(EXIT_MISSED_BEACON) \ HOW(EXIT_LOW_RSSI) \ HOW(EXIT_COEX) \ @@ -50,26 +51,15 @@ static void iwl_mvm_print_esr_state(struct iwl_mvm *mvm, u32 mask) static u32 iwl_mvm_get_free_fw_link_id(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvm_vif) { - u32 link_id; + u32 i; lockdep_assert_held(&mvm->mutex); - link_id = ffz(mvm->fw_link_ids_map); + for (i = 0; i < ARRAY_SIZE(mvm->link_id_to_link_conf); i++) + if (!rcu_access_pointer(mvm->link_id_to_link_conf[i])) + return i; - /* this case can happen if there're deactivated but not removed links */ - if (link_id > IWL_MVM_FW_MAX_LINK_ID) - return IWL_MVM_FW_LINK_ID_INVALID; - - mvm->fw_link_ids_map |= BIT(link_id); - return link_id; -} - -static void iwl_mvm_release_fw_link_id(struct iwl_mvm *mvm, u32 link_id) -{ - lockdep_assert_held(&mvm->mutex); - - if (!WARN_ON(link_id > IWL_MVM_FW_MAX_LINK_ID)) - mvm->fw_link_ids_map &= ~BIT(link_id); + return IWL_MVM_FW_LINK_ID_INVALID; } static int iwl_mvm_link_cmd_send(struct iwl_mvm *mvm, @@ -380,7 +370,6 @@ int iwl_mvm_unset_link_mapping(struct iwl_mvm *mvm, struct ieee80211_vif *vif, RCU_INIT_POINTER(mvm->link_id_to_link_conf[link_info->fw_link_id], NULL); - iwl_mvm_release_fw_link_id(mvm, link_info->fw_link_id); return 0; } @@ -504,17 +493,27 @@ iwl_mvm_get_puncturing_factor(const struct ieee80211_bss_conf *link_conf) static unsigned int iwl_mvm_get_chan_load(struct ieee80211_bss_conf *link_conf) { + struct ieee80211_vif *vif = link_conf->vif; struct iwl_mvm_vif_link_info *mvm_link = iwl_mvm_vif_from_mac80211(link_conf->vif)->link[link_conf->link_id]; const struct element *bss_load_elem; const struct ieee80211_bss_load_elem *bss_load; enum nl80211_band band = link_conf->chanreq.oper.chan->band; + const struct cfg80211_bss_ies *ies; unsigned int chan_load; u32 chan_load_by_us; rcu_read_lock(); - bss_load_elem = ieee80211_bss_get_elem(link_conf->bss, - WLAN_EID_QBSS_LOAD); + if (ieee80211_vif_link_active(vif, link_conf->link_id)) + ies = rcu_dereference(link_conf->bss->beacon_ies); + else + ies = rcu_dereference(link_conf->bss->ies); + + if (ies) + bss_load_elem = cfg80211_find_elem(WLAN_EID_QBSS_LOAD, + ies->data, ies->len); + else + bss_load_elem = NULL; /* If there isn't BSS Load element, take the defaults */ if (!bss_load_elem || @@ -978,6 +977,9 @@ void iwl_mvm_exit_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif, lockdep_assert_held(&mvm->mutex); + if (!IWL_MVM_AUTO_EML_ENABLE) + return; + /* Nothing to do */ if (!mvmvif->esr_active) return; @@ -1025,19 +1027,24 @@ void iwl_mvm_block_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif, lockdep_assert_held(&mvm->mutex); + if (!IWL_MVM_AUTO_EML_ENABLE) + return; + /* This should be called only with disable reasons */ if (WARN_ON(!(reason & IWL_MVM_BLOCK_ESR_REASONS))) return; - if (!(mvmvif->esr_disable_reason & reason)) { - IWL_DEBUG_INFO(mvm, - "Blocking EMLSR mode. reason = %s (0x%x)\n", - iwl_get_esr_state_string(reason), reason); - iwl_mvm_print_esr_state(mvm, mvmvif->esr_disable_reason); - } + if (mvmvif->esr_disable_reason & reason) + return; + + IWL_DEBUG_INFO(mvm, + "Blocking EMLSR mode. reason = %s (0x%x)\n", + iwl_get_esr_state_string(reason), reason); mvmvif->esr_disable_reason |= reason; + iwl_mvm_print_esr_state(mvm, mvmvif->esr_disable_reason); + iwl_mvm_exit_esr(mvm, vif, reason, link_to_keep); } @@ -1082,6 +1089,15 @@ static void iwl_mvm_esr_unblocked(struct iwl_mvm *mvm, IWL_DEBUG_INFO(mvm, "EMLSR is unblocked\n"); + /* If we exited due to an EXIT reason, and the exit was in less than + * 30 seconds, then a MLO scan was scheduled already. + */ + if (!need_new_sel && + !(mvmvif->last_esr_exit.reason & IWL_MVM_BLOCK_ESR_REASONS)) { + IWL_DEBUG_INFO(mvm, "Wait for MLO scan\n"); + return; + } + /* * If EMLSR was blocked for more than 30 seconds, or the last link * selection decided to not enter EMLSR, trigger a new scan. @@ -1111,6 +1127,9 @@ void iwl_mvm_unblock_esr(struct iwl_mvm *mvm, struct ieee80211_vif *vif, lockdep_assert_held(&mvm->mutex); + if (!IWL_MVM_AUTO_EML_ENABLE) + return; + /* This should be called only with disable reasons */ if (WARN_ON(!(reason & IWL_MVM_BLOCK_ESR_REASONS))) return; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 5144fa0f96b0..dfcc96f18b4f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -296,6 +296,7 @@ int iwl_mvm_mac_ctxt_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif) INIT_LIST_HEAD(&mvmvif->time_event_data.list); mvmvif->time_event_data.id = TE_MAX; + mvmvif->roc_activity = ROC_NUM_ACTIVITIES; mvmvif->deflink.bcast_sta.sta_id = IWL_MVM_INVALID_STA; mvmvif->deflink.mcast_sta.sta_id = IWL_MVM_INVALID_STA; @@ -1010,12 +1011,13 @@ static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm, tx->tx_flags = cpu_to_le32(tx_flags); if (!fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION)) + IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION)) { iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx); - tx->rate_n_flags = - cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) << - RATE_MCS_ANT_POS); + tx->rate_n_flags = + cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) << + RATE_MCS_ANT_POS); + } rate = iwl_mvm_mac_ctxt_get_beacon_rate(mvm, info, vif); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index dac6155ae1bd..835a05b91833 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -22,7 +22,7 @@ #include "mvm.h" #include "sta.h" #include "time-event.h" -#include "iwl-eeprom-parse.h" +#include "iwl-nvm-utils.h" #include "iwl-phy-db.h" #include "testmode.h" #include "fw/error-dump.h" @@ -30,21 +30,28 @@ #include "iwl-nvm-parse.h" #include "time-sync.h" +#define IWL_MVM_LIMITS(ap) \ + { \ + .max = 1, \ + .types = BIT(NL80211_IFTYPE_STATION), \ + }, \ + { \ + .max = 1, \ + .types = ap | \ + BIT(NL80211_IFTYPE_P2P_CLIENT) | \ + BIT(NL80211_IFTYPE_P2P_GO), \ + }, \ + { \ + .max = 1, \ + .types = BIT(NL80211_IFTYPE_P2P_DEVICE), \ + } + static const struct ieee80211_iface_limit iwl_mvm_limits[] = { - { - .max = 1, - .types = BIT(NL80211_IFTYPE_STATION), - }, - { - .max = 1, - .types = BIT(NL80211_IFTYPE_AP) | - BIT(NL80211_IFTYPE_P2P_CLIENT) | - BIT(NL80211_IFTYPE_P2P_GO), - }, - { - .max = 1, - .types = BIT(NL80211_IFTYPE_P2P_DEVICE), - }, + IWL_MVM_LIMITS(0) +}; + +static const struct ieee80211_iface_limit iwl_mvm_limits_ap[] = { + IWL_MVM_LIMITS(BIT(NL80211_IFTYPE_AP)) }; static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = { @@ -54,6 +61,12 @@ static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = { .limits = iwl_mvm_limits, .n_limits = ARRAY_SIZE(iwl_mvm_limits), }, + { + .num_different_channels = 1, + .max_interfaces = 3, + .limits = iwl_mvm_limits_ap, + .n_limits = ARRAY_SIZE(iwl_mvm_limits_ap), + }, }; static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = { @@ -138,8 +151,7 @@ struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, resp->channels, __le16_to_cpu(resp->mcc), __le16_to_cpu(resp->geo_info), - le32_to_cpu(resp->cap), resp_ver, - mvm->fwrt.uats_enabled); + le32_to_cpu(resp->cap), resp_ver); /* Store the return source id */ src_id = resp->source_id; if (IS_ERR_OR_NULL(regd)) { @@ -360,7 +372,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) if (mvm->mld_api_is_used && mvm->nvm_data->sku_cap_11be_enable && !iwlwifi_mod_params.disable_11ax && !iwlwifi_mod_params.disable_11be) { - hw->wiphy->flags |= WIPHY_FLAG_DISABLE_WEXT; + hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO; /* we handle this already earlier, but need it for MLO */ ieee80211_hw_set(hw, HANDLES_QUIET_CSA); } @@ -371,12 +383,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) if (!mvm->mld_api_is_used) ieee80211_hw_set(hw, TIMING_BEACON_ONLY); - /* We should probably have this, but mac80211 - * currently doesn't support it for MLO. - */ - if (!(hw->wiphy->flags & WIPHY_FLAG_SUPPORTS_MLO)) - ieee80211_hw_set(hw, DEAUTH_NEED_MGD_TX_PREP); - /* * On older devices, enabling TX A-MSDU occasionally leads to * something getting messed up, the command read from the FIFO @@ -579,13 +585,13 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK); - BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) || - IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK)); + BUILD_BUG_ON(IWL_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) || + IWL_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK)); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) - mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS; + mvm->max_scans = IWL_MAX_UMAC_SCANS; else - mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS; + mvm->max_scans = IWL_MAX_LMAC_SCANS; if (mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels) hw->wiphy->bands[NL80211_BAND_2GHZ] = @@ -727,8 +733,6 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) #ifdef CONFIG_PM_SLEEP if ((unified || mvm->fw->img[IWL_UCODE_WOWLAN].num_sec) && - mvm->trans->ops->d3_suspend && - mvm->trans->ops->d3_resume && device_can_wakeup(mvm->trans->dev)) { mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT | WIPHY_WOWLAN_DISCONNECT | @@ -823,7 +827,7 @@ void iwl_mvm_mac_tx(struct ieee80211_hw *hw, } if (offchannel && - !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) && + !test_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status) && !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) goto drop; @@ -1104,6 +1108,8 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac, iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data); spin_unlock_bh(&mvm->time_event_lock); + mvmvif->roc_activity = ROC_NUM_ACTIVITIES; + mvmvif->bf_enabled = false; mvmvif->ba_enabled = false; mvmvif->ap_sta = NULL; @@ -1209,6 +1215,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) int __iwl_mvm_mac_start(struct iwl_mvm *mvm) { + bool fast_resume = false; int ret; lockdep_assert_held(&mvm->mutex); @@ -1234,6 +1241,30 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm) mvm->nvm_data = NULL; } +#ifdef CONFIG_PM + /* fast_resume will be cleared by iwl_mvm_fast_resume */ + fast_resume = mvm->fast_resume; + + if (fast_resume) { + ret = iwl_mvm_fast_resume(mvm); + if (ret) { + iwl_mvm_stop_device(mvm); + /* iwl_mvm_up() will be called further down */ + } else { + /* + * We clear IWL_MVM_STATUS_FIRMWARE_RUNNING upon + * mac_down() so that debugfs will stop honoring + * requests after we flush all the workers. + * Set the IWL_MVM_STATUS_FIRMWARE_RUNNING bit again + * now that we are back. This is a bit abusing the + * flag since the firmware wasn't really ever stopped, + * but this still serves the purpose. + */ + set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); + } + } +#endif /* CONFIG_PM */ + if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) { /* * Now convert the HW_RESTART_REQUESTED flag to IN_HW_RESTART @@ -1244,7 +1275,10 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm) /* Clean up some internal and mac80211 state on restart */ iwl_mvm_restart_cleanup(mvm); } - ret = iwl_mvm_up(mvm); + + /* we also want to load the firmware if fast_resume failed */ + if (!fast_resume || ret) + ret = iwl_mvm_up(mvm); iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_POST_INIT, NULL); @@ -1295,7 +1329,7 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm) { int ret; - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); @@ -1311,8 +1345,6 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm) * of packets the FW sent out, so we must reconnect. */ iwl_mvm_teardown_tdls_peers(mvm); - - mutex_unlock(&mvm->mutex); } void iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw, @@ -1329,7 +1361,7 @@ void iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw, } } -void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) +void __iwl_mvm_mac_stop(struct iwl_mvm *mvm, bool suspend) { lockdep_assert_held(&mvm->mutex); @@ -1345,7 +1377,11 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) if (!iwl_mvm_has_new_station_api(mvm->fw)) iwl_mvm_rm_aux_sta(mvm); - iwl_mvm_stop_device(mvm); + if (suspend && + mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + iwl_mvm_fast_suspend(mvm); + else + iwl_mvm_stop_device(mvm); iwl_mvm_async_handlers_purge(mvm); /* async_handlers_list is empty and will stay empty: HW is stopped */ @@ -1378,7 +1414,7 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) } } -void iwl_mvm_mac_stop(struct ieee80211_hw *hw) +void iwl_mvm_mac_stop(struct ieee80211_hw *hw, bool suspend) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); @@ -1414,7 +1450,7 @@ void iwl_mvm_mac_stop(struct ieee80211_hw *hw) iwl_mvm_mei_set_sw_rfkill_state(mvm); mutex_lock(&mvm->mutex); - __iwl_mvm_mac_stop(mvm); + __iwl_mvm_mac_stop(mvm, suspend); mutex_unlock(&mvm->mutex); /* @@ -1659,9 +1695,8 @@ static void iwl_mvm_prevent_esr_done_wk(struct wiphy *wiphy, struct ieee80211_vif *vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv); - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_PREVENTION); - mutex_unlock(&mvm->mutex); } static void iwl_mvm_mlo_int_scan_wk(struct wiphy *wiphy, struct wiphy_work *wk) @@ -1671,11 +1706,8 @@ static void iwl_mvm_mlo_int_scan_wk(struct wiphy *wiphy, struct wiphy_work *wk) struct ieee80211_vif *vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv); - mutex_lock(&mvmvif->mvm->mutex); - + guard(mvm)(mvmvif->mvm); iwl_mvm_int_mlo_scan(mvmvif->mvm, vif); - - mutex_unlock(&mvmvif->mvm->mutex); } static void iwl_mvm_unblock_esr_tpt(struct wiphy *wiphy, struct wiphy_work *wk) @@ -1686,9 +1718,8 @@ static void iwl_mvm_unblock_esr_tpt(struct wiphy *wiphy, struct wiphy_work *wk) struct ieee80211_vif *vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv); - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); iwl_mvm_unblock_esr(mvm, vif, IWL_MVM_ESR_BLOCKED_TPT); - mutex_unlock(&mvm->mutex); } void iwl_mvm_mac_init_mvmvif(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif) @@ -1863,12 +1894,8 @@ void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, cancel_delayed_work_sync(&mvmvif->csa_work); } -/* This function is doing the common part of removing the interface for - * both - MLD and non-MLD modes. Returns true if removing the interface - * is done - */ -static bool iwl_mvm_mac_remove_interface_common(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) +static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); @@ -1916,21 +1943,10 @@ static bool iwl_mvm_mac_remove_interface_common(struct ieee80211_hw *hw, mvm->noa_duration = 0; } #endif - return true; + goto out; } iwl_mvm_power_update_mac(mvm); - return false; -} - -static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) -{ - struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); - - if (iwl_mvm_mac_remove_interface_common(hw, vif)) - goto out; /* Before the interface removal, mac80211 would cancel the ROC, and the * ROC worker would be scheduled if needed. The worker would be flushed @@ -2078,7 +2094,7 @@ void iwl_mvm_configure_filter(struct ieee80211_hw *hw, struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast; - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); /* replace previous configuration */ kfree(mvm->mcast_filter_cmd); @@ -2095,7 +2111,6 @@ void iwl_mvm_configure_filter(struct ieee80211_hw *hw, iwl_mvm_recalc_multicast(mvm); out: - mutex_unlock(&mvm->mutex); *total_flags = 0; } @@ -2115,9 +2130,8 @@ static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw, !vif->p2p) return; - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); - mutex_unlock(&mvm->mutex); } int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm, struct ieee80211_vif *vif) @@ -2779,6 +2793,13 @@ iwl_mvm_bss_info_changed_station_common(struct iwl_mvm *mvm, if (changes & BSS_CHANGED_BANDWIDTH) iwl_mvm_update_link_smps(vif, link_conf); + + if (changes & BSS_CHANGED_TPE) { + IWL_DEBUG_CALIB(mvm, "Changing TPE\n"); + iwl_mvm_send_ap_tx_power_constraint_cmd(mvm, vif, + link_conf, + false); + } } static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, @@ -2828,6 +2849,8 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, if (changes & BSS_CHANGED_ASSOC) { if (vif->cfg.assoc) { + mvmvif->session_prot_connection_loss = false; + /* clear statistics to get clean beacon counter */ iwl_mvm_request_statistics(mvm, true); for_each_mvm_vif_valid_link(mvmvif, i) @@ -3162,7 +3185,7 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); iwl_mvm_stop_ap_ibss_common(mvm, vif); @@ -3192,8 +3215,6 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, iwl_mvm_power_update_mac(mvm); iwl_mvm_mac_ctxt_remove(mvm, vif); - - mutex_unlock(&mvm->mutex); } static void iwl_mvm_stop_ap(struct ieee80211_hw *hw, @@ -3248,7 +3269,7 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw, { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); if (changes & BSS_CHANGED_IDLE && !vif->cfg.idle) iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); @@ -3275,25 +3296,19 @@ static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw, bss_conf->txpower); iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower); } - - mutex_unlock(&mvm->mutex); } int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_scan_request *hw_req) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - int ret; if (hw_req->req.n_channels == 0 || hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels) return -EINVAL; - mutex_lock(&mvm->mutex); - ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies); - mutex_unlock(&mvm->mutex); - - return ret; + guard(mvm)(mvm); + return iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies); } void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw, @@ -3301,7 +3316,7 @@ void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw, { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); /* Due to a race condition, it's possible that mac80211 asks * us to stop a hw_scan when it's already stopped. This can @@ -3312,8 +3327,6 @@ void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw, */ if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); - - mutex_unlock(&mvm->mutex); } void @@ -3482,7 +3495,7 @@ void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw, * Since there's mvm->mutex here, no need to have RCU lock for * mvm_sta->link access. */ - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); for (link_id = 0; link_id < ARRAY_SIZE(mvm_sta->link); link_id++) { struct iwl_mvm_link_sta *link_sta; u32 sta_id; @@ -3499,7 +3512,6 @@ void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw, RCU_INIT_POINTER(mvm->fw_id_to_link_sta[sta_id], NULL); } } - mutex_unlock(&mvm->mutex); } static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, @@ -3775,8 +3787,6 @@ static void iwl_mvm_rs_rate_init_all_links(struct iwl_mvm *mvm, } } -#define IWL_MVM_MIN_BEACON_INTERVAL_TU 16 - static bool iwl_mvm_vif_conf_from_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta) @@ -4246,12 +4256,8 @@ static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw, * The exception is P2P_DEVICE interface which needs immediate update. */ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { - int ret; - - mutex_lock(&mvm->mutex); - ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); - mutex_unlock(&mvm->mutex); - return ret; + guard(mvm)(mvm); + return iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); } return 0; } @@ -4260,11 +4266,14 @@ void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_prep_tx_info *info) { + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - mutex_lock(&mvm->mutex); + if (info->was_assoc && !mvmvif->session_prot_connection_loss) + return; + + guard(mvm)(mvm); iwl_mvm_protect_assoc(mvm, vif, info->duration, info->link_id); - mutex_unlock(&mvm->mutex); } void iwl_mvm_mac_mgd_complete_tx(struct ieee80211_hw *hw, @@ -4277,9 +4286,8 @@ void iwl_mvm_mac_mgd_complete_tx(struct ieee80211_hw *hw, if (info->success) return; - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); iwl_mvm_stop_session_protection(mvm, vif); - mutex_unlock(&mvm->mutex); } int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw, @@ -4289,20 +4297,12 @@ int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw, { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - int ret; + guard(mvm)(mvm); - mutex_lock(&mvm->mutex); + if (!vif->cfg.idle) + return -EBUSY; - if (!vif->cfg.idle) { - ret = -EBUSY; - goto out; - } - - ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED); - -out: - mutex_unlock(&mvm->mutex); - return ret; + return iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED); } int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw, @@ -4580,13 +4580,9 @@ int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_key_conf *key) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - int ret; - mutex_lock(&mvm->mutex); - ret = __iwl_mvm_mac_set_key(hw, cmd, vif, sta, key); - mutex_unlock(&mvm->mutex); - - return ret; + guard(mvm)(mvm); + return __iwl_mvm_mac_set_key(hw, cmd, vif, sta, key); } void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw, @@ -4808,6 +4804,37 @@ static int iwl_mvm_roc_station(struct iwl_mvm *mvm, return ret; } +static int iwl_mvm_roc_p2p(struct iwl_mvm *mvm, + struct ieee80211_channel *channel, + struct ieee80211_vif *vif, + int duration, + enum ieee80211_roc_type type) +{ + enum iwl_roc_activity activity; + int ret; + + lockdep_assert_held(&mvm->mutex); + + switch (type) { + case IEEE80211_ROC_TYPE_NORMAL: + activity = ROC_ACTIVITY_P2P_DISC; + break; + case IEEE80211_ROC_TYPE_MGMT_TX: + activity = ROC_ACTIVITY_P2P_NEG; + break; + default: + WARN_ONCE(1, "Got an invalid P2P ROC type\n"); + return -EINVAL; + } + + ret = iwl_mvm_mld_add_aux_sta(mvm, + iwl_mvm_get_lmac_id(mvm, channel->band)); + if (ret) + return ret; + + return iwl_mvm_roc_add_cmd(mvm, channel, vif, duration, activity); +} + static int iwl_mvm_p2p_find_phy_ctxt(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_channel *channel) @@ -4861,6 +4888,7 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const struct iwl_mvm_roc_ops *ops) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); + struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm); u32 lmac_id; int ret; @@ -4873,11 +4901,14 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif, */ flush_work(&mvm->roc_done_wk); - ret = iwl_mvm_esr_non_bss_link(mvm, vif, 0, true); - if (ret) - return ret; + if (!IS_ERR_OR_NULL(bss_vif)) { + ret = iwl_mvm_block_esr_sync(mvm, bss_vif, + IWL_MVM_ESR_BLOCKED_ROC); + if (ret) + return ret; + } - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); switch (vif->type) { case NL80211_IFTYPE_STATION: @@ -4887,30 +4918,29 @@ int iwl_mvm_roc_common(struct ieee80211_hw *hw, struct ieee80211_vif *vif, ret = ops->add_aux_sta_for_hs20(mvm, lmac_id); if (!ret) ret = iwl_mvm_roc_station(mvm, channel, vif, duration); - goto out_unlock; + return ret; case NL80211_IFTYPE_P2P_DEVICE: /* handle below */ break; default: IWL_ERR(mvm, "ROC: Invalid vif type=%u\n", vif->type); - ret = -EINVAL; - goto out_unlock; + return -EINVAL; } + if (iwl_mvm_has_p2p_over_aux(mvm)) { + ret = iwl_mvm_roc_p2p(mvm, channel, vif, duration, type); + return ret; + } ret = iwl_mvm_p2p_find_phy_ctxt(mvm, vif, channel); if (ret) - goto out_unlock; + return ret; ret = ops->link(mvm, vif); if (ret) - goto out_unlock; + return ret; - ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type); -out_unlock: - mutex_unlock(&mvm->mutex); - IWL_DEBUG_MAC80211(mvm, "leave\n"); - return ret; + return iwl_mvm_start_p2p_roc(mvm, vif, duration, type); } int iwl_mvm_cancel_roc(struct ieee80211_hw *hw, @@ -4991,13 +5021,9 @@ int iwl_mvm_add_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - int ret; - mutex_lock(&mvm->mutex); - ret = __iwl_mvm_add_chanctx(mvm, ctx); - mutex_unlock(&mvm->mutex); - - return ret; + guard(mvm)(mvm); + return __iwl_mvm_add_chanctx(mvm, ctx); } static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm, @@ -5016,9 +5042,8 @@ void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw, { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); __iwl_mvm_remove_chanctx(mvm, ctx); - mutex_unlock(&mvm->mutex); } void iwl_mvm_change_chanctx(struct ieee80211_hw *hw, @@ -5038,26 +5063,23 @@ void iwl_mvm_change_chanctx(struct ieee80211_hw *hw, phy_ctxt->ref, changed)) return; - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); /* we are only changing the min_width, may be a noop */ if (changed == IEEE80211_CHANCTX_CHANGE_MIN_WIDTH) { if (phy_ctxt->width == def->width) - goto out_unlock; + return; /* we are just toggling between 20_NOHT and 20 */ if (phy_ctxt->width <= NL80211_CHAN_WIDTH_20 && def->width <= NL80211_CHAN_WIDTH_20) - goto out_unlock; + return; } iwl_mvm_bt_coex_vif_change(mvm); iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def, &ctx->ap, ctx->rx_chains_static, ctx->rx_chains_dynamic); - -out_unlock: - mutex_unlock(&mvm->mutex); } /* @@ -5177,6 +5199,10 @@ static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, } iwl_mvm_update_quotas(mvm, false, NULL); + + iwl_mvm_send_ap_tx_power_constraint_cmd(mvm, vif, + link_conf, + false); } goto out; @@ -5196,13 +5222,9 @@ static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - int ret; - mutex_lock(&mvm->mutex); - ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, link_conf, ctx, false); - mutex_unlock(&mvm->mutex); - - return ret; + guard(mvm)(mvm); + return __iwl_mvm_assign_vif_chanctx(mvm, vif, link_conf, ctx, false); } /* @@ -5290,9 +5312,8 @@ static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw, { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); __iwl_mvm_unassign_vif_chanctx(mvm, vif, link_conf, ctx, false); - mutex_unlock(&mvm->mutex); } static int @@ -5302,7 +5323,7 @@ iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm, { int ret; - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); ops->__unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].link_conf, vifs[0].old_ctx, true); __iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx); @@ -5325,7 +5346,7 @@ iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm, if (iwl_mvm_phy_ctx_count(mvm) > 1) iwl_mvm_teardown_tdls_peers(mvm); - goto out; + return 0; out_remove: __iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx); @@ -5342,15 +5363,11 @@ out_reassign: goto out_restart; } - goto out; + return ret; out_restart: /* things keep failing, better restart the hw */ iwl_mvm_nic_restart(mvm, false); - -out: - mutex_unlock(&mvm->mutex); - return ret; } @@ -5361,7 +5378,7 @@ iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm, { int ret; - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); ops->__unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].link_conf, vifs[0].old_ctx, true); @@ -5373,7 +5390,7 @@ iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm, goto out_reassign; } - goto out; + return 0; out_reassign: if (ops->__assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].link_conf, @@ -5382,15 +5399,11 @@ out_reassign: goto out_restart; } - goto out; + return ret; out_restart: /* things keep failing, better restart the hw */ iwl_mvm_nic_restart(mvm, false); - -out: - mutex_unlock(&mvm->mutex); - return ret; } @@ -5517,13 +5530,9 @@ int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw, void *data, int len) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - int err; - mutex_lock(&mvm->mutex); - err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len); - mutex_unlock(&mvm->mutex); - - return err; + guard(mvm)(mvm); + return __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len); } #endif @@ -5740,13 +5749,9 @@ static int iwl_mvm_mac_pre_channel_switch(struct ieee80211_hw *hw, struct ieee80211_channel_switch *chsw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - int ret; - mutex_lock(&mvm->mutex); - ret = iwl_mvm_pre_channel_switch(mvm, vif, chsw); - mutex_unlock(&mvm->mutex); - - return ret; + guard(mvm)(mvm); + return iwl_mvm_pre_channel_switch(mvm, vif, chsw); } void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw, @@ -5798,16 +5803,14 @@ void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw, } mvmvif->csa_count = chsw->count; - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); if (mvmvif->csa_failed) - goto out_unlock; + return; WARN_ON(iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, CHANNEL_SWITCH_TIME_EVENT_CMD), 0, sizeof(cmd), &cmd)); -out_unlock: - mutex_unlock(&mvm->mutex); } static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop) @@ -5816,17 +5819,16 @@ static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop) if (!iwl_mvm_has_new_tx_api(mvm)) { if (drop) { - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); iwl_mvm_flush_tx_path(mvm, iwl_mvm_flushable_queues(mvm) & queues); - mutex_unlock(&mvm->mutex); } else { iwl_trans_wait_tx_queues_empty(mvm->trans, queues); } return; } - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { struct ieee80211_sta *sta; @@ -5841,7 +5843,6 @@ static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop) iwl_mvm_wait_sta_queues_empty(mvm, iwl_mvm_sta_from_mac80211(sta)); } - mutex_unlock(&mvm->mutex); } void iwl_mvm_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, @@ -5924,7 +5925,7 @@ void iwl_mvm_mac_flush_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_link_sta *link_sta; int link_id; - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); for_each_sta_active_link(vif, sta, link_sta, link_id) { mvm_link_sta = rcu_dereference_protected(mvmsta->link[link_id], lockdep_is_held(&mvm->mutex)); @@ -5935,7 +5936,6 @@ void iwl_mvm_mac_flush_sta(struct ieee80211_hw *hw, struct ieee80211_vif *vif, mvmsta->tfd_queue_msk)) IWL_ERR(mvm, "flush request fail\n"); } - mutex_unlock(&mvm->mutex); } static int iwl_mvm_mac_get_acs_survey(struct iwl_mvm *mvm, int idx, @@ -6001,7 +6001,6 @@ int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - int ret = 0; u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, WIDE_ID(SYSTEM_GROUP, SYSTEM_STATISTICS_CMD), @@ -6021,12 +6020,13 @@ int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, if (idx > 0) return iwl_mvm_mac_get_acs_survey(mvm, idx - 1, survey); - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); if (iwl_mvm_firmware_running(mvm)) { - ret = iwl_mvm_request_statistics(mvm, false); + int ret = iwl_mvm_request_statistics(mvm, false); + if (ret) - goto out; + return ret; } survey->filled = SURVEY_INFO_TIME_RX | @@ -6042,7 +6042,7 @@ int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, /* the new fw api doesn't support the following fields */ if (cmd_ver != IWL_FW_CMD_VER_UNKNOWN) - goto out; + return 0; survey->filled |= SURVEY_INFO_TIME | SURVEY_INFO_TIME_SCAN; @@ -6054,9 +6054,7 @@ int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, mvm->radio_stats.on_time_scan; do_div(survey->time_scan, USEC_PER_MSEC); - out: - mutex_unlock(&mvm->mutex); - return ret; + return 0; } static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo) @@ -6223,13 +6221,13 @@ void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, if (!vif->cfg.assoc) return; - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); if (mvmvif->deflink.ap_sta_id != mvmsta->deflink.sta_id) - goto unlock; + return; if (iwl_mvm_request_statistics(mvm, false)) - goto unlock; + return; sinfo->rx_beacon = 0; for_each_mvm_vif_valid_link(mvmvif, i) @@ -6243,8 +6241,6 @@ void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, mvmvif->deflink.beacon_stats.avg_signal; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG); } - unlock: - mutex_unlock(&mvm->mutex); } static void iwl_mvm_event_mlme_callback_ini(struct iwl_mvm *mvm, @@ -6430,9 +6426,8 @@ void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_EMPTY, true, NULL, 0); - mutex_unlock(&mvm->mutex); } int @@ -6468,13 +6463,9 @@ int iwl_mvm_start_pmsr(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *request) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - int ret; - mutex_lock(&mvm->mutex); - ret = iwl_mvm_ftm_start(mvm, vif, request); - mutex_unlock(&mvm->mutex); - - return ret; + guard(mvm)(mvm); + return iwl_mvm_ftm_start(mvm, vif, request); } void iwl_mvm_abort_pmsr(struct ieee80211_hw *hw, struct ieee80211_vif *vif, @@ -6482,9 +6473,8 @@ void iwl_mvm_abort_pmsr(struct ieee80211_hw *hw, struct ieee80211_vif *vif, { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); iwl_mvm_ftm_abort(mvm, request); - mutex_unlock(&mvm->mutex); } static bool iwl_mvm_can_hw_csum(struct sk_buff *skb) @@ -6519,7 +6509,6 @@ int iwl_mvm_set_hw_timestamp(struct ieee80211_hw *hw, { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); u32 protocols = 0; - int ret; /* HW timestamping is only supported for a specific station */ if (!hwts->macaddr) @@ -6529,11 +6518,8 @@ int iwl_mvm_set_hw_timestamp(struct ieee80211_hw *hw, protocols = IWL_TIME_SYNC_PROTOCOL_TM | IWL_TIME_SYNC_PROTOCOL_FTM; - mutex_lock(&mvm->mutex); - ret = iwl_mvm_time_sync_config(mvm, hwts->macaddr, protocols); - mutex_unlock(&mvm->mutex); - - return ret; + guard(mvm)(mvm); + return iwl_mvm_time_sync_config(mvm, hwts->macaddr, protocols); } const struct ieee80211_ops iwl_mvm_hw_ops = { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c index fcfd2dd7568e..3c99396ad369 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-mac80211.c @@ -12,7 +12,7 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw, int ret; int i; - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); iwl_mvm_mac_init_mvmvif(mvm, mvmvif); @@ -32,7 +32,7 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw, /* Allocate resources for the MAC context, and add it to the fw */ ret = iwl_mvm_mac_ctxt_init(mvm, vif); if (ret) - goto out_unlock; + return ret; rcu_assign_pointer(mvm->vif_id_to_mac[mvmvif->id], vif); @@ -46,7 +46,7 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw, ret = iwl_mvm_mld_mac_ctxt_add(mvm, vif); if (ret) - goto out_unlock; + return ret; /* beacon filtering */ ret = iwl_mvm_disable_beacon_filter(mvm, vif); @@ -95,7 +95,7 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw, if (vif->p2p || iwl_fw_lookup_cmd_ver(mvm->fw, PHY_CONTEXT_CMD, 1) < 5) vif->driver_flags |= IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW; - goto out_unlock; + return 0; out_free_bf: if (mvm->bf_allowed_vif == mvmvif) { @@ -106,9 +106,6 @@ static int iwl_mvm_mld_mac_add_interface(struct ieee80211_hw *hw, out_remove_mac: mvmvif->link[0] = NULL; iwl_mvm_mld_mac_ctxt_remove(mvm, vif); - out_unlock: - mutex_unlock(&mvm->mutex); - return ret; } @@ -125,7 +122,7 @@ static void iwl_mvm_mld_mac_remove_interface(struct ieee80211_hw *hw, vif->type == NL80211_IFTYPE_ADHOC)) iwl_mvm_tcm_rm_vif(mvm, vif); - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); if (vif == mvm->csme_vif) { iwl_mei_set_netdev(NULL); @@ -188,8 +185,6 @@ static void iwl_mvm_mld_mac_remove_interface(struct ieee80211_hw *hw, mvm->monitor_on = false; __clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, mvm->hw->flags); } - - mutex_unlock(&mvm->mutex); } static unsigned int iwl_mvm_mld_count_active_links(struct iwl_mvm_vif *mvmvif) @@ -227,6 +222,8 @@ static void iwl_mvm_restart_mpdu_count(struct iwl_mvm *mvm, mvmsta->mpdu_counters[q].window_start = jiffies; spin_unlock_bh(&mvmsta->mpdu_counters[q].lock); } + + IWL_DEBUG_STATS(mvm, "MPDU counters are cleared\n"); } static int iwl_mvm_esr_mode_active(struct iwl_mvm *mvm, @@ -350,6 +347,11 @@ __iwl_mvm_mld_assign_vif_chanctx(struct iwl_mvm *mvm, rcu_read_unlock(); } + if (vif->type == NL80211_IFTYPE_STATION) + iwl_mvm_send_ap_tx_power_constraint_cmd(mvm, vif, + link_conf, + false); + /* then activate */ ret = iwl_mvm_link_changed(mvm, vif, link_conf, LINK_CONTEXT_MODIFY_ACTIVE | @@ -387,10 +389,11 @@ static int iwl_mvm_mld_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - int ret; /* update EMLSR mode */ if (ieee80211_vif_type_p2p(vif) != NL80211_IFTYPE_STATION) { + int ret; + ret = iwl_mvm_esr_non_bss_link(mvm, vif, link_conf->link_id, true); /* @@ -401,11 +404,8 @@ static int iwl_mvm_mld_assign_vif_chanctx(struct ieee80211_hw *hw, return ret; } - mutex_lock(&mvm->mutex); - ret = __iwl_mvm_mld_assign_vif_chanctx(mvm, vif, link_conf, ctx, false); - mutex_unlock(&mvm->mutex); - - return ret; + guard(mvm)(mvm); + return __iwl_mvm_mld_assign_vif_chanctx(mvm, vif, link_conf, ctx, false); } static int iwl_mvm_esr_mode_inactive(struct iwl_mvm *mvm, @@ -531,9 +531,37 @@ static void iwl_mvm_mld_unassign_vif_chanctx(struct ieee80211_hw *hw, } static void +iwl_mvm_tpe_sta_cmd_data(struct iwl_txpower_constraints_cmd *cmd, + const struct ieee80211_bss_conf *bss_info) +{ + u8 i; + + /* + * NOTE: the 0 here is IEEE80211_TPE_CAT_6GHZ_DEFAULT, + * we fully ignore IEEE80211_TPE_CAT_6GHZ_SUBORDINATE + */ + + BUILD_BUG_ON(ARRAY_SIZE(cmd->psd_pwr) != + ARRAY_SIZE(bss_info->tpe.psd_local[0].power)); + + /* if not valid, mac80211 puts default (max value) */ + for (i = 0; i < ARRAY_SIZE(cmd->psd_pwr); i++) + cmd->psd_pwr[i] = min(bss_info->tpe.psd_local[0].power[i], + bss_info->tpe.psd_reg_client[0].power[i]); + + BUILD_BUG_ON(ARRAY_SIZE(cmd->eirp_pwr) != + ARRAY_SIZE(bss_info->tpe.max_local[0].power)); + + for (i = 0; i < ARRAY_SIZE(cmd->eirp_pwr); i++) + cmd->eirp_pwr[i] = min(bss_info->tpe.max_local[0].power[i], + bss_info->tpe.max_reg_client[0].power[i]); +} + +void iwl_mvm_send_ap_tx_power_constraint_cmd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct ieee80211_bss_conf *bss_conf) + struct ieee80211_bss_conf *bss_conf, + bool is_ap) { struct iwl_txpower_constraints_cmd cmd = {}; struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); @@ -553,19 +581,22 @@ iwl_mvm_send_ap_tx_power_constraint_cmd(struct iwl_mvm *mvm, link_info->fw_link_id == IWL_MVM_FW_LINK_ID_INVALID) return; - if (bss_conf->chanreq.oper.chan->band != NL80211_BAND_6GHZ || - bss_conf->chanreq.oper.chan->flags & - IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT) + if (bss_conf->chanreq.oper.chan->band != NL80211_BAND_6GHZ) return; cmd.link_id = cpu_to_le16(link_info->fw_link_id); - /* - * Currently supporting VLP Soft AP only. - */ - cmd.ap_type = cpu_to_le16(IWL_6GHZ_AP_TYPE_VLP); memset(cmd.psd_pwr, DEFAULT_TPE_TX_POWER, sizeof(cmd.psd_pwr)); memset(cmd.eirp_pwr, DEFAULT_TPE_TX_POWER, sizeof(cmd.eirp_pwr)); + if (is_ap) { + cmd.ap_type = cpu_to_le16(IWL_6GHZ_AP_TYPE_VLP); + } else if (bss_conf->power_type == IEEE80211_REG_UNSET_AP) { + return; + } else { + cmd.ap_type = cpu_to_le16(bss_conf->power_type - 1); + iwl_mvm_tpe_sta_cmd_data(&cmd, bss_conf); + } + ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(PHY_OPS_GROUP, AP_TX_POWER_CONSTRAINTS_CMD), @@ -584,15 +615,16 @@ static int iwl_mvm_mld_start_ap_ibss(struct ieee80211_hw *hw, struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); int ret; - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); if (vif->type == NL80211_IFTYPE_AP) - iwl_mvm_send_ap_tx_power_constraint_cmd(mvm, vif, link_conf); + iwl_mvm_send_ap_tx_power_constraint_cmd(mvm, vif, + link_conf, true); /* Send the beacon template */ ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif, link_conf); if (ret) - goto out_unlock; + return ret; /* the link should be already activated when assigning chan context */ ret = iwl_mvm_link_changed(mvm, vif, link_conf, @@ -600,11 +632,11 @@ static int iwl_mvm_mld_start_ap_ibss(struct ieee80211_hw *hw, ~LINK_CONTEXT_MODIFY_ACTIVE, true); if (ret) - goto out_unlock; + return ret; ret = iwl_mvm_mld_add_mcast_sta(mvm, vif, link_conf); if (ret) - goto out_unlock; + return ret; /* Send the bcast station. At this stage the TBTT and DTIM time * events are added and applied to the scheduler @@ -628,7 +660,7 @@ static int iwl_mvm_mld_start_ap_ibss(struct ieee80211_hw *hw, iwl_mvm_ftm_restart_responder(mvm, vif, link_conf); - goto out_unlock; + return 0; out_failed: iwl_mvm_power_update_mac(mvm); @@ -636,8 +668,6 @@ out_failed: iwl_mvm_mld_rm_bcast_sta(mvm, vif, link_conf); out_rm_mcast: iwl_mvm_mld_rm_mcast_sta(mvm, vif, link_conf); -out_unlock: - mutex_unlock(&mvm->mutex); return ret; } @@ -660,7 +690,7 @@ static void iwl_mvm_mld_stop_ap_ibss(struct ieee80211_hw *hw, { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); iwl_mvm_stop_ap_ibss_common(mvm, vif); @@ -674,7 +704,6 @@ static void iwl_mvm_mld_stop_ap_ibss(struct ieee80211_hw *hw, iwl_mvm_mld_rm_mcast_sta(mvm, vif, link_conf); iwl_mvm_power_update_mac(mvm); - mutex_unlock(&mvm->mutex); } static void iwl_mvm_mld_stop_ap(struct ieee80211_hw *hw, @@ -844,6 +873,8 @@ static void iwl_mvm_mld_vif_cfg_changed_station(struct iwl_mvm *mvm, if (changes & BSS_CHANGED_ASSOC) { if (vif->cfg.assoc) { + mvmvif->session_prot_connection_loss = false; + /* clear statistics to get clean beacon counter */ iwl_mvm_request_statistics(mvm, true); iwl_mvm_sf_update(mvm, vif, false); @@ -977,7 +1008,7 @@ static void iwl_mvm_mld_link_info_changed(struct ieee80211_hw *hw, { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); switch (vif->type) { case NL80211_IFTYPE_STATION: @@ -1003,8 +1034,6 @@ static void iwl_mvm_mld_link_info_changed(struct ieee80211_hw *hw, link_conf->txpower); iwl_mvm_set_tx_power(mvm, vif, link_conf->txpower); } - - mutex_unlock(&mvm->mutex); } static void iwl_mvm_mld_vif_cfg_changed(struct ieee80211_hw *hw, @@ -1013,15 +1042,13 @@ static void iwl_mvm_mld_vif_cfg_changed(struct ieee80211_hw *hw, { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); if (changes & BSS_CHANGED_IDLE && !vif->cfg.idle) iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); if (vif->type == NL80211_IFTYPE_STATION) iwl_mvm_mld_vif_cfg_changed_station(mvm, vif, changes); - - mutex_unlock(&mvm->mutex); } static int @@ -1054,9 +1081,8 @@ static void iwl_mvm_mld_config_iface_filter(struct ieee80211_hw *hw, !vif->p2p) return; - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); iwl_mvm_mld_mac_ctxt_changed(mvm, vif, false); - mutex_unlock(&mvm->mutex); } static int @@ -1078,14 +1104,10 @@ iwl_mvm_mld_mac_conf_tx(struct ieee80211_hw *hw, * The exception is P2P_DEVICE interface which needs immediate update. */ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { - int ret; - - mutex_lock(&mvm->mutex); - ret = iwl_mvm_link_changed(mvm, vif, &vif->bss_conf, - LINK_CONTEXT_MODIFY_QOS_PARAMS, - true); - mutex_unlock(&mvm->mutex); - return ret; + guard(mvm)(mvm); + return iwl_mvm_link_changed(mvm, vif, &vif->bss_conf, + LINK_CONTEXT_MODIFY_QOS_PARAMS, + true); } return 0; } @@ -1230,13 +1252,9 @@ iwl_mvm_mld_change_sta_links(struct ieee80211_hw *hw, u16 old_links, u16 new_links) { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); - int ret; - mutex_lock(&mvm->mutex); - ret = iwl_mvm_mld_update_sta_links(mvm, vif, sta, old_links, new_links); - mutex_unlock(&mvm->mutex); - - return ret; + guard(mvm)(mvm); + return iwl_mvm_mld_update_sta_links(mvm, vif, sta, old_links, new_links); } bool iwl_mvm_vif_has_esr_cap(struct iwl_mvm *mvm, struct ieee80211_vif *vif) @@ -1264,26 +1282,19 @@ static bool iwl_mvm_mld_can_activate_links(struct ieee80211_hw *hw, { struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); int n_links = hweight16(desired_links); - bool ret = true; if (n_links <= 1) return true; - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); /* Check if HW supports the wanted number of links */ - if (n_links > iwl_mvm_max_active_links(mvm, vif)) { - ret = false; - goto unlock; - } + if (n_links > iwl_mvm_max_active_links(mvm, vif)) + return false; /* If it is an eSR device, check that we can enter eSR */ - ret = iwl_mvm_is_esr_supported(mvm->fwrt.trans) && - iwl_mvm_vif_has_esr_cap(mvm, vif); - -unlock: - mutex_unlock(&mvm->mutex); - return ret; + return iwl_mvm_is_esr_supported(mvm->fwrt.trans) && + iwl_mvm_vif_has_esr_cap(mvm, vif); } static enum ieee80211_neg_ttlm_res diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c index 9d139b56e152..d5a204e52076 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mld-sta.c @@ -241,7 +241,7 @@ int iwl_mvm_mld_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, IWL_MAX_TID_COUNT, &wdg_timeout); } -/* Allocate a new station entry for the broadcast station to the given vif, +/* Allocate a new station entry for the multicast station to the given vif, * and send it to the FW. * Note that each AP/GO mac should have its own multicast station. */ @@ -470,7 +470,7 @@ static int iwl_mvm_mld_cfg_sta(struct iwl_mvm *mvm, struct ieee80211_sta *sta, break; } - switch (sta->deflink.smps_mode) { + switch (link_sta->smps_mode) { case IEEE80211_SMPS_AUTOMATIC: case IEEE80211_SMPS_NUM_MODES: WARN_ON(1); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 0a1959bd4079..22f48b66d79c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -9,6 +9,7 @@ #include #include +#include #include #include @@ -23,7 +24,7 @@ #include "iwl-op-mode.h" #include "iwl-trans.h" #include "fw/notif-wait.h" -#include "iwl-eeprom-parse.h" +#include "iwl-nvm-utils.h" #include "fw/file.h" #include "iwl-config.h" #include "sta.h" @@ -82,14 +83,9 @@ extern const struct ieee80211_ops iwl_mvm_mld_hw_ops; /** * struct iwl_mvm_mod_params - module parameters for iwlmvm - * @init_dbg: if true, then the NIC won't be stopped if the INIT fw asserted. - * We will register to mac80211 to have testmode working. The NIC must not - * be up'ed after the INIT fw asserted. This is useful to be able to use - * proprietary tools over testmode to debug the INIT fw. * @power_scheme: one of enum iwl_power_scheme */ struct iwl_mvm_mod_params { - bool init_dbg; int power_scheme; }; extern struct iwl_mvm_mod_params iwlmvm_mod_params; @@ -360,7 +356,9 @@ struct iwl_mvm_vif_link_info { * @IWL_MVM_ESR_BLOCKED_WOWLAN: WOWLAN is preventing the enablement of EMLSR * @IWL_MVM_ESR_BLOCKED_TPT: block EMLSR when there is not enough traffic * @IWL_MVM_ESR_BLOCKED_FW: FW didn't recommended/forced exit from EMLSR - * @IWL_MVM_ESR_BLOCKED_NON_BSS: An active non-bssid link's preventing EMLSR + * @IWL_MVM_ESR_BLOCKED_NON_BSS: An active non-BSS interface's link is + * preventing EMLSR + * @IWL_MVM_ESR_BLOCKED_ROC: remain-on-channel is preventing EMLSR * @IWL_MVM_ESR_EXIT_MISSED_BEACON: exited EMLSR due to missed beacons * @IWL_MVM_ESR_EXIT_LOW_RSSI: link is deactivated/not allowed for EMLSR * due to low RSSI. @@ -377,6 +375,7 @@ enum iwl_mvm_esr_state { IWL_MVM_ESR_BLOCKED_TPT = 0x4, IWL_MVM_ESR_BLOCKED_FW = 0x8, IWL_MVM_ESR_BLOCKED_NON_BSS = 0x10, + IWL_MVM_ESR_BLOCKED_ROC = 0x20, IWL_MVM_ESR_EXIT_MISSED_BEACON = 0x10000, IWL_MVM_ESR_EXIT_LOW_RSSI = 0x20000, IWL_MVM_ESR_EXIT_COEX = 0x40000, @@ -426,6 +425,7 @@ struct iwl_mvm_esr_exit { * @csa_bcn_pending: indicates that we are waiting for a beacon on a new channel * @csa_blocks_tx: CSA is blocking TX * @features: hw features active for this vif + * @max_tx_op: max TXOP in usecs for all ACs, zero for no limit. * @ap_beacon_time: AP beacon time for synchronisation (on older FW) * @bf_enabled: indicates if beacon filtering is enabled * @ba_enabled: indicated if beacon abort is enabled @@ -448,6 +448,40 @@ struct iwl_mvm_esr_exit { * @prevent_esr_done_wk: work that should be done when esr prevention ends. * @mlo_int_scan_wk: work for the internal MLO scan. * @unblock_esr_tpt_wk: work for unblocking EMLSR when tpt is high enough. + * @roc_activity: currently running ROC activity for this vif (or + * ROC_NUM_ACTIVITIES if no activity is running). + * @session_prot_connection_loss: the connection was lost due to session + * protection ending without receiving a beacon, so we need to now + * protect the deauth separately + * @ap_early_keys: The firmware cannot install keys before stations etc., + * but higher layers work differently, so we store the keys here for + * later installation. + * @ap_sta: pointer to the AP STA data structure + * @csa_count: CSA counter (old CSA implementation w/o firmware) + * @csa_misbehave: CSA AP misbehaviour flag (old implementation) + * @csa_target_freq: CSA target channel frequency (old implementation) + * @csa_work: CSA work (old implementation) + * @dbgfs_bf: beamforming debugfs data + * @dbgfs_dir: debugfs directory for this vif + * @dbgfs_pm: power management debugfs data + * @dbgfs_quota_min: debugfs value for minimal quota + * @dbgfs_slink: debugfs symlink for this interface + * @ftm_unprotected: unprotected FTM debugfs override + * @hs_time_event_data: hotspot/AUX ROC time event data + * @mac_pwr_cmd: debugfs override for MAC power command + * @target_ipv6_addrs: IPv6 addresses on this interface for offload + * @num_target_ipv6_addrs: number of @target_ipv6_addrs + * @tentative_addrs: bitmap of tentative IPv6 addresses in @target_ipv6_addrs + * @rekey_data: rekeying data for WoWLAN GTK rekey offload + * @seqno: storage for seqno for older firmware D0/D3 transition + * @seqno_valid: indicates @seqno is valid + * @time_event_data: session protection time event data + * @tsf_id: the TSF resource ID assigned in firmware (for firmware needing that) + * @tx_key_idx: WEP transmit key index for D3 + * @uapsd_misbehaving_ap_addr: MLD address/BSSID of U-APSD misbehaving AP, to + * not use U-APSD on reconnection + * @uapsd_nonagg_detected_wk: worker for handling detection of no aggregation + * in U-APSD */ struct iwl_mvm_vif { struct iwl_mvm *mvm; @@ -461,6 +495,7 @@ struct iwl_mvm_vif { bool pm_enabled; bool monitor_active; bool esr_active; + bool session_prot_connection_loss; u8 low_latency: 6; u8 low_latency_actual: 1; @@ -525,6 +560,7 @@ struct iwl_mvm_vif { struct iwl_mvm_time_event_data time_event_data; struct iwl_mvm_time_event_data hs_time_event_data; + enum iwl_roc_activity roc_activity; /* TCP Checksum Offload */ netdev_features_t features; @@ -538,6 +574,8 @@ struct iwl_mvm_vif { struct ieee80211_key_conf __rcu *keys[2]; } bcn_prot; + u16 max_tx_op; + u16 link_selection_res; u8 link_selection_primary; u8 primary_link; @@ -607,7 +645,7 @@ enum iwl_mvm_sched_scan_pass_all_states { }; /** - * struct iwl_mvm_tt_mgnt - Thermal Throttling Management structure + * struct iwl_mvm_tt_mgmt - Thermal Throttling Management structure * @ct_kill_exit: worker to exit thermal kill * @dynamic_smps: Is thermal throttling enabled dynamic_smps? * @tx_backoff: The current thremal throttling tx backoff in uSec. @@ -730,24 +768,20 @@ struct iwl_mvm_tcm { * struct iwl_mvm_reorder_buffer - per ra/tid/queue reorder buffer * @head_sn: reorder window head sn * @num_stored: number of mpdus stored in the buffer - * @buf_size: the reorder buffer size as set by the last addba request * @queue: queue of this reorder buffer * @last_amsdu: track last ASMDU SN for duplication detection * @last_sub_index: track ASMDU sub frame index for duplication detection * @valid: reordering is valid for this queue * @lock: protect reorder buffer internal state - * @mvm: mvm pointer, needed for frame timer context */ struct iwl_mvm_reorder_buffer { u16 head_sn; u16 num_stored; - u16 buf_size; int queue; u16 last_amsdu; u8 last_sub_index; bool valid; spinlock_t lock; - struct iwl_mvm *mvm; } ____cacheline_aligned_in_smp; /** @@ -769,6 +803,7 @@ __aligned(roundup_pow_of_two(sizeof(struct sk_buff_head))) * @tid: tid of the session * @baid: baid of the session * @timeout: the timeout set in the addba request + * @buf_size: the reorder buffer size as set by the last addba request * @entries_per_queue: # of buffers per queue, this actually gets * aligned up to avoid cache line sharing between queues * @last_rx: last rx jiffies, updated only if timeout passed from last update @@ -785,13 +820,14 @@ struct iwl_mvm_baid_data { u8 tid; u8 baid; u16 timeout; + u16 buf_size; u16 entries_per_queue; unsigned long last_rx; struct timer_list session_timer; struct iwl_mvm_baid_data __rcu **rcu_ptr; struct iwl_mvm *mvm; struct iwl_mvm_reorder_buffer reorder_buf[IWL_MAX_RX_HW_QUEUES]; - struct iwl_mvm_reorder_buf_entry entries[]; + struct iwl_mvm_reorder_buf_entry entries[] ____cacheline_aligned_in_smp; }; static inline struct iwl_mvm_baid_data * @@ -1040,7 +1076,6 @@ struct iwl_mvm { struct iwl_rx_phy_info last_phy_info; struct ieee80211_sta __rcu *fw_id_to_mac_id[IWL_MVM_STATION_COUNT_MAX]; struct ieee80211_link_sta __rcu *fw_id_to_link_sta[IWL_MVM_STATION_COUNT_MAX]; - unsigned long fw_link_ids_map; u8 rx_ba_sessions; /* configured by mac80211 */ @@ -1062,7 +1097,7 @@ struct iwl_mvm { unsigned int max_scans; /* UMAC scan tracking */ - u32 scan_uid_status[IWL_MVM_MAX_UMAC_SCANS]; + u32 scan_uid_status[IWL_MAX_UMAC_SCANS]; /* start time of last scan in TSF of the mac that requested the scan */ u64 scan_start; @@ -1152,6 +1187,7 @@ struct iwl_mvm { struct ieee80211_channel **nd_channels; int n_nd_channels; bool net_detect; + bool fast_resume; u8 offload_tid; #ifdef CONFIG_IWLWIFI_DEBUGFS bool d3_wake_sysassert; @@ -1306,13 +1342,21 @@ struct iwl_mvm { struct iwl_phy_specific_cfg phy_filters; #endif + /* report rx timestamp in ptp clock time */ + bool rx_ts_ptp; + unsigned long last_6ghz_passive_scan_jiffies; unsigned long last_reset_or_resume_time_jiffies; bool sta_remove_requires_queue_remove; bool mld_api_is_used; - bool pldr_sync; + /* + * Indicates that firmware will do a product reset (and then + * therefore fail to load) when we start it (due to OTP burn), + * if so don't dump errors etc. since this is expected. + */ + bool fw_product_reset; struct iwl_time_sync_data time_sync; @@ -1330,11 +1374,14 @@ struct iwl_mvm { #define IWL_MAC80211_GET_MVM(_hw) \ IWL_OP_MODE_GET_MVM((struct iwl_op_mode *)((_hw)->priv)) +DEFINE_GUARD(mvm, struct iwl_mvm *, mutex_lock(&_T->mutex), mutex_unlock(&_T->mutex)) + /** * enum iwl_mvm_status - MVM status bits * @IWL_MVM_STATUS_HW_RFKILL: HW RF-kill is asserted * @IWL_MVM_STATUS_HW_CTKILL: CT-kill is active - * @IWL_MVM_STATUS_ROC_RUNNING: remain-on-channel is running + * @IWL_MVM_STATUS_ROC_P2P_RUNNING: remain-on-channel on P2P is running (when + * P2P is not over AUX) * @IWL_MVM_STATUS_HW_RESTART_REQUESTED: HW restart was requested * @IWL_MVM_STATUS_IN_HW_RESTART: HW restart is active * @IWL_MVM_STATUS_ROC_AUX_RUNNING: AUX remain-on-channel is running @@ -1348,7 +1395,7 @@ struct iwl_mvm { enum iwl_mvm_status { IWL_MVM_STATUS_HW_RFKILL, IWL_MVM_STATUS_HW_CTKILL, - IWL_MVM_STATUS_ROC_RUNNING, + IWL_MVM_STATUS_ROC_P2P_RUNNING, IWL_MVM_STATUS_HW_RESTART_REQUESTED, IWL_MVM_STATUS_IN_HW_RESTART, IWL_MVM_STATUS_ROC_AUX_RUNNING, @@ -1439,7 +1486,8 @@ iwl_mvm_rcu_dereference_vif_id(struct iwl_mvm *mvm, u8 vif_id, bool rcu) static inline struct ieee80211_bss_conf * iwl_mvm_rcu_fw_link_id_to_link_conf(struct iwl_mvm *mvm, u8 link_id, bool rcu) { - if (WARN_ON(link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf))) + if (IWL_FW_CHECK(mvm, link_id >= ARRAY_SIZE(mvm->link_id_to_link_conf), + "erroneous FW link ID: %d\n", link_id)) return NULL; if (rcu) @@ -1724,7 +1772,7 @@ struct iwl_rate_info { u8 ieee; /* MAC header: IWL_RATE_6M_IEEE, etc. */ }; -void __iwl_mvm_mac_stop(struct iwl_mvm *mvm); +void __iwl_mvm_mac_stop(struct iwl_mvm *mvm, bool suspend); int __iwl_mvm_mac_start(struct iwl_mvm *mvm); /****************** @@ -1860,10 +1908,10 @@ static inline u8 iwl_mvm_get_valid_tx_ant(struct iwl_mvm *mvm) static inline u8 iwl_mvm_get_valid_rx_ant(struct iwl_mvm *mvm) { - u8 rx_ant = mvm->fw->valid_tx_ant; + u8 rx_ant = mvm->fw->valid_rx_ant; if (mvm->nvm_data && mvm->nvm_data->valid_rx_ant) - rx_ant &= mvm->nvm_data->valid_tx_ant; + rx_ant &= mvm->nvm_data->valid_rx_ant; if (mvm->set_rx_ant) rx_ant &= mvm->set_rx_ant; @@ -2246,11 +2294,22 @@ extern const struct file_operations iwl_dbgfs_d3_test_ops; #ifdef CONFIG_PM void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif); +void iwl_mvm_fast_suspend(struct iwl_mvm *mvm); +int iwl_mvm_fast_resume(struct iwl_mvm *mvm); #else static inline void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { } + +static inline void iwl_mvm_fast_suspend(struct iwl_mvm *mvm) +{ +} + +static inline int iwl_mvm_fast_resume(struct iwl_mvm *mvm) +{ + return 0; +} #endif void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta, struct iwl_wowlan_config_cmd *cmd); @@ -2762,6 +2821,13 @@ static inline void iwl_mvm_mei_set_sw_rfkill_state(struct iwl_mvm *mvm) sw_rfkill); } +static inline bool iwl_mvm_has_p2p_over_aux(struct iwl_mvm *mvm) +{ + u32 cmd_id = WIDE_ID(MAC_CONF_GROUP, ROC_CMD); + + return iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 0) >= 4; +} + static inline bool iwl_mvm_mei_filter_scan(struct iwl_mvm *mvm, struct sk_buff *skb) { @@ -2796,7 +2862,7 @@ int iwl_mvm_op_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant); int iwl_mvm_mac_start(struct ieee80211_hw *hw); void iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw, enum ieee80211_reconfig_type reconfig_type); -void iwl_mvm_mac_stop(struct ieee80211_hw *hw); +void iwl_mvm_mac_stop(struct ieee80211_hw *hw, bool suspend); static inline int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed) { return 0; @@ -2927,7 +2993,7 @@ void iwl_mvm_roc_duration_and_delay(struct ieee80211_vif *vif, int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm, struct ieee80211_channel *channel, struct ieee80211_vif *vif, - int duration, u32 activity); + int duration, enum iwl_roc_activity activity); /* EMLSR */ bool iwl_mvm_vif_has_esr_cap(struct iwl_mvm *mvm, struct ieee80211_vif *vif); @@ -2954,4 +3020,10 @@ iwl_mvm_bt_coex_calculate_esr_mode(struct iwl_mvm *mvm, bool primary); int iwl_mvm_esr_non_bss_link(struct iwl_mvm *mvm, struct ieee80211_vif *vif, unsigned int link_id, bool active); + +void +iwl_mvm_send_ap_tx_power_constraint_cmd(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, + bool is_ap); #endif /* __IWL_MVM_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c index ae8177222881..836ca22597bc 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/nvm.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2018-2019, 2021-2023 Intel Corporation + * Copyright (C) 2012-2014, 2018-2019, 2021-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -9,8 +9,7 @@ #include "iwl-trans.h" #include "iwl-csr.h" #include "mvm.h" -#include "iwl-eeprom-parse.h" -#include "iwl-eeprom-read.h" +#include "iwl-nvm-utils.h" #include "iwl-nvm-parse.h" #include "iwl-prph.h" #include "fw/acpi.h" diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index d343432474db..b7dcae76a05d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -18,7 +18,7 @@ #include "iwl-modparams.h" #include "mvm.h" #include "iwl-phy-db.h" -#include "iwl-eeprom-parse.h" +#include "iwl-nvm-utils.h" #include "iwl-csr.h" #include "iwl-io.h" #include "iwl-prph.h" @@ -41,12 +41,8 @@ static const struct iwl_op_mode_ops iwl_mvm_ops_mq; struct iwl_mvm_mod_params iwlmvm_mod_params = { .power_scheme = IWL_POWER_SCHEME_BPS, - /* rest of fields are 0 by default */ }; -module_param_named(init_dbg, iwlmvm_mod_params.init_dbg, bool, 0444); -MODULE_PARM_DESC(init_dbg, - "set to true to debug an ASSERT in INIT fw (default: false"); module_param_named(power_scheme, iwlmvm_mod_params.power_scheme, int, 0444); MODULE_PARM_DESC(power_scheme, "power management scheme: 1-active, 2-balanced, 3-low power, default: 2"); @@ -471,7 +467,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = { iwl_mvm_time_sync_msmt_confirm_event, RX_HANDLER_SYNC, struct iwl_time_msmt_cfm_notify), RX_HANDLER_GRP(MAC_CONF_GROUP, ROC_NOTIF, - iwl_mvm_rx_roc_notif, RX_HANDLER_SYNC, + iwl_mvm_rx_roc_notif, RX_HANDLER_ASYNC_LOCKED, struct iwl_roc_notif), RX_HANDLER_GRP(SCAN_GROUP, CHANNEL_SURVEY_NOTIF, iwl_mvm_rx_channel_survey_notif, RX_HANDLER_ASYNC_LOCKED, @@ -572,6 +568,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { HCMD_NAME(D0I3_END_CMD), HCMD_NAME(LTR_CONFIG), HCMD_NAME(LDBG_CONFIG_CMD), + HCMD_NAME(DEBUG_LOG_MSG), }; /* Please keep this array *SORTED* by hex value. @@ -579,6 +576,7 @@ static const struct iwl_hcmd_names iwl_mvm_legacy_names[] = { */ static const struct iwl_hcmd_names iwl_mvm_system_names[] = { HCMD_NAME(SHARED_MEM_CFG_CMD), + HCMD_NAME(SOC_CONFIGURATION_CMD), HCMD_NAME(INIT_EXTENDED_CFG_CMD), HCMD_NAME(FW_ERROR_RECOVERY_CMD), HCMD_NAME(RFI_CONFIG_CMD), @@ -593,8 +591,10 @@ static const struct iwl_hcmd_names iwl_mvm_system_names[] = { * Access is done through binary search */ static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = { + HCMD_NAME(LOW_LATENCY_CMD), HCMD_NAME(CHANNEL_SWITCH_TIME_EVENT_CMD), HCMD_NAME(SESSION_PROTECTION_CMD), + HCMD_NAME(CANCEL_CHANNEL_SWITCH_CMD), HCMD_NAME(MAC_CONFIG_CMD), HCMD_NAME(LINK_CONFIG_CMD), HCMD_NAME(STA_CONFIG_CMD), @@ -603,7 +603,10 @@ static const struct iwl_hcmd_names iwl_mvm_mac_conf_names[] = { HCMD_NAME(STA_DISABLE_TX_CMD), HCMD_NAME(ROC_CMD), HCMD_NAME(ROC_NOTIF), + HCMD_NAME(CHANNEL_SWITCH_ERROR_NOTIF), + HCMD_NAME(MISSED_VAP_NOTIF), HCMD_NAME(SESSION_PROTECTION_NOTIF), + HCMD_NAME(PROBE_RESPONSE_DATA_NOTIF), HCMD_NAME(CHANNEL_SWITCH_START_NOTIF), }; @@ -627,6 +630,8 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = { HCMD_NAME(DQA_ENABLE_CMD), HCMD_NAME(UPDATE_MU_GROUPS_CMD), HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD), + HCMD_NAME(WNM_PLATFORM_PTM_REQUEST_CMD), + HCMD_NAME(WNM_80211V_TIMING_MEASUREMENT_CONFIG_CMD), HCMD_NAME(STA_HE_CTXT_CMD), HCMD_NAME(RLC_CONFIG_CMD), HCMD_NAME(RFH_QUEUE_CONFIG_CMD), @@ -650,6 +655,21 @@ static const struct iwl_hcmd_names iwl_mvm_statistics_names[] = { HCMD_NAME(STATISTICS_OPER_PART1_NOTIF), }; +/* Please keep this array *SORTED* by hex value. + * Access is done through binary search + */ +static const struct iwl_hcmd_names iwl_mvm_debug_names[] = { + HCMD_NAME(LMAC_RD_WR), + HCMD_NAME(UMAC_RD_WR), + HCMD_NAME(HOST_EVENT_CFG), + HCMD_NAME(DBGC_SUSPEND_RESUME), + HCMD_NAME(BUFFER_ALLOCATION), + HCMD_NAME(GET_TAS_STATUS), + HCMD_NAME(FW_DUMP_COMPLETE_CMD), + HCMD_NAME(FW_CLEAR_BUFFER), + HCMD_NAME(MFU_ASSERT_DUMP_NTF), +}; + /* Please keep this array *SORTED* by hex value. * Access is done through binary search */ @@ -705,6 +725,7 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = { [PROT_OFFLOAD_GROUP] = HCMD_ARR(iwl_mvm_prot_offload_names), [REGULATORY_AND_NVM_GROUP] = HCMD_ARR(iwl_mvm_regulatory_and_nvm_names), + [DEBUG_GROUP] = HCMD_ARR(iwl_mvm_debug_names), [STATISTICS_GROUP] = HCMD_ARR(iwl_mvm_statistics_names), }; @@ -740,20 +761,18 @@ static void iwl_mvm_tx_unblock_dwork(struct work_struct *work) struct ieee80211_vif *tx_blocked_vif; struct iwl_mvm_vif *mvmvif; - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); tx_blocked_vif = rcu_dereference_protected(mvm->csa_tx_blocked_vif, lockdep_is_held(&mvm->mutex)); if (!tx_blocked_vif) - goto unlock; + return; mvmvif = iwl_mvm_vif_from_mac80211(tx_blocked_vif); iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, false); RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL); -unlock: - mutex_unlock(&mvm->mutex); } static void iwl_mvm_fwrt_dump_start(void *ctx) @@ -770,21 +789,12 @@ static void iwl_mvm_fwrt_dump_end(void *ctx) mutex_unlock(&mvm->mutex); } -static bool iwl_mvm_fwrt_fw_running(void *ctx) -{ - return iwl_mvm_firmware_running(ctx); -} - static int iwl_mvm_fwrt_send_hcmd(void *ctx, struct iwl_host_cmd *host_cmd) { struct iwl_mvm *mvm = (struct iwl_mvm *)ctx; - int ret; - mutex_lock(&mvm->mutex); - ret = iwl_mvm_send_cmd(mvm, host_cmd); - mutex_unlock(&mvm->mutex); - - return ret; + guard(mvm)(mvm); + return iwl_mvm_send_cmd(mvm, host_cmd); } static bool iwl_mvm_d3_debug_enable(void *ctx) @@ -795,7 +805,6 @@ static bool iwl_mvm_d3_debug_enable(void *ctx) static const struct iwl_fw_runtime_ops iwl_mvm_fwrt_ops = { .dump_start = iwl_mvm_fwrt_dump_start, .dump_end = iwl_mvm_fwrt_dump_end, - .fw_running = iwl_mvm_fwrt_fw_running, .send_hcmd = iwl_mvm_fwrt_send_hcmd, .d3_debug_enable = iwl_mvm_d3_debug_enable, }; @@ -851,8 +860,7 @@ get_nvm_from_fw: ret = iwl_mvm_init_mcc(mvm); } - if (!iwlmvm_mod_params.init_dbg || !ret) - iwl_mvm_stop_device(mvm); + iwl_mvm_stop_device(mvm); mutex_unlock(&mvm->mutex); wiphy_unlock(mvm->hw->wiphy); @@ -862,7 +870,7 @@ get_nvm_from_fw: IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret); /* no longer need this regardless of failure or not */ - mvm->pldr_sync = false; + mvm->fw_product_reset = false; return ret; } @@ -1360,24 +1368,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, trans_cfg.no_reclaim_cmds = no_reclaim_cmds; trans_cfg.n_no_reclaim_cmds = ARRAY_SIZE(no_reclaim_cmds); - switch (iwlwifi_mod_params.amsdu_size) { - case IWL_AMSDU_DEF: - trans_cfg.rx_buf_size = IWL_AMSDU_4K; - break; - case IWL_AMSDU_4K: - trans_cfg.rx_buf_size = IWL_AMSDU_4K; - break; - case IWL_AMSDU_8K: - trans_cfg.rx_buf_size = IWL_AMSDU_8K; - break; - case IWL_AMSDU_12K: - trans_cfg.rx_buf_size = IWL_AMSDU_12K; - break; - default: - pr_err("%s: Unsupported amsdu_size: %d\n", KBUILD_MODNAME, - iwlwifi_mod_params.amsdu_size); - trans_cfg.rx_buf_size = IWL_AMSDU_4K; - } + trans_cfg.rx_buf_size = iwl_amsdu_size_to_rxb_size(); trans->wide_cmd_header = true; trans_cfg.bc_table_dword = @@ -1437,9 +1428,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, goto out_free; } - IWL_INFO(mvm, "Detected %s, REV=0x%X\n", - mvm->trans->name, mvm->trans->hw_rev); - if (iwlwifi_mod_params.nvm_file) mvm->nvm_file_name = iwlwifi_mod_params.nvm_file; else @@ -1507,8 +1495,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, iwl_fw_flush_dumps(&mvm->fwrt); iwl_fw_runtime_free(&mvm->fwrt); - if (iwlmvm_mod_params.init_dbg) - return op_mode; iwl_phy_db_free(mvm->phy_db); kfree(mvm->scan_cmd); iwl_trans_op_mode_leave(trans); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/power.c b/drivers/net/wireless/intel/iwlwifi/mvm/power.c index 568f53c56199..bc363e8427e4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c @@ -211,19 +211,37 @@ static void iwl_mvm_power_configure_uapsd(struct iwl_mvm *mvm, IWL_MVM_PS_HEAVY_RX_THLD_PERCENT; } -static void iwl_mvm_p2p_standalone_iterator(void *_data, u8 *mac, - struct ieee80211_vif *vif) -{ - bool *is_p2p_standalone = _data; +struct iwl_allow_uapsd_iface_iterator_data { + struct ieee80211_vif *current_vif; + bool allow_uapsd; +}; - switch (ieee80211_vif_type_p2p(vif)) { - case NL80211_IFTYPE_P2P_GO: +static void iwl_mvm_allow_uapsd_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_allow_uapsd_iface_iterator_data *data = _data; + struct iwl_mvm_vif *other_mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_vif *curr_mvmvif = + iwl_mvm_vif_from_mac80211(data->current_vif); + + /* exclude the given vif */ + if (vif == data->current_vif) + return; + + switch (vif->type) { case NL80211_IFTYPE_AP: - *is_p2p_standalone = false; + case NL80211_IFTYPE_ADHOC: + case NL80211_IFTYPE_NAN: + data->allow_uapsd = false; break; case NL80211_IFTYPE_STATION: - if (vif->cfg.assoc) - *is_p2p_standalone = false; + /* allow UAPSD if P2P interface and BSS station interface share + * the same channel. + */ + if (vif->cfg.assoc && other_mvmvif->deflink.phy_ctxt && + curr_mvmvif->deflink.phy_ctxt && + other_mvmvif->deflink.phy_ctxt->id != curr_mvmvif->deflink.phy_ctxt->id) + data->allow_uapsd = false; break; default: @@ -235,6 +253,10 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_allow_uapsd_iface_iterator_data data = { + .current_vif = vif, + .allow_uapsd = true, + }; if (ether_addr_equal(mvmvif->uapsd_misbehaving_ap_addr, vif->cfg.ap_addr)) @@ -249,88 +271,75 @@ static bool iwl_mvm_power_allow_uapsd(struct iwl_mvm *mvm, IEEE80211_P2P_OPPPS_ENABLE_BIT)) return false; - /* - * Avoid using uAPSD if client is in DCM - - * low latency issue in Miracast - */ - if (iwl_mvm_phy_ctx_count(mvm) >= 2) + if (vif->p2p && !iwl_mvm_is_p2p_scm_uapsd_supported(mvm)) return false; - if (vif->p2p) { - /* Allow U-APSD only if p2p is stand alone */ - bool is_p2p_standalone = true; + ieee80211_iterate_active_interfaces_atomic(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_allow_uapsd_iterator, + &data); - if (!iwl_mvm_is_p2p_scm_uapsd_supported(mvm)) - return false; - - ieee80211_iterate_active_interfaces_atomic(mvm->hw, - IEEE80211_IFACE_ITER_NORMAL, - iwl_mvm_p2p_standalone_iterator, - &is_p2p_standalone); - - if (!is_p2p_standalone) - return false; - } - - return true; + return data.allow_uapsd; } -static bool iwl_mvm_power_is_radar(struct ieee80211_vif *vif) +static bool iwl_mvm_power_is_radar(struct ieee80211_bss_conf *link_conf) { struct ieee80211_chanctx_conf *chanctx_conf; - struct ieee80211_bss_conf *link_conf; - bool radar_detect = false; - unsigned int link_id; - rcu_read_lock(); - for_each_vif_active_link(vif, link_conf, link_id) { - chanctx_conf = rcu_dereference(link_conf->chanctx_conf); - /* this happens on link switching, just ignore inactive ones */ - if (!chanctx_conf) - continue; + chanctx_conf = rcu_dereference(link_conf->chanctx_conf); - radar_detect = !!(chanctx_conf->def.chan->flags & - IEEE80211_CHAN_RADAR); - if (radar_detect) - goto out; - } + /* this happens on link switching, just ignore inactive ones */ + if (!chanctx_conf) + return false; -out: - rcu_read_unlock(); - return radar_detect; + return chanctx_conf->def.chan->flags & IEEE80211_CHAN_RADAR; } static void iwl_mvm_power_config_skip_dtim(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct iwl_mac_power_cmd *cmd) { - int dtimper = vif->bss_conf.dtim_period ?: 1; - int skip; + struct ieee80211_bss_conf *link_conf; + unsigned int min_link_skip = ~0; + unsigned int link_id; /* disable, in case we're supposed to override */ cmd->skip_dtim_periods = 0; cmd->flags &= ~cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK); - if (iwl_mvm_power_is_radar(vif)) - return; - - if (dtimper >= 10) - return; - if (!test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status)) { if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_LP) return; - skip = 2; - } else { - int dtimper_tu = dtimper * vif->bss_conf.beacon_int; - - if (WARN_ON(!dtimper_tu)) - return; - /* configure skip over dtim up to 900 TU DTIM interval */ - skip = max_t(u8, 1, 900 / dtimper_tu); + cmd->skip_dtim_periods = 2; + cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK); + return; } - cmd->skip_dtim_periods = skip; + rcu_read_lock(); + for_each_vif_active_link(vif, link_conf, link_id) { + unsigned int dtimper = link_conf->dtim_period ?: 1; + unsigned int dtimper_tu = dtimper * link_conf->beacon_int; + unsigned int skip; + + if (dtimper >= 10 || iwl_mvm_power_is_radar(link_conf)) { + rcu_read_unlock(); + return; + } + + if (WARN_ON(!dtimper_tu)) + continue; + + /* configure skip over dtim up to 900 TU DTIM interval */ + skip = max_t(int, 1, 900 / dtimper_tu); + min_link_skip = min(min_link_skip, skip); + } + rcu_read_unlock(); + + /* no WARN_ON, can only happen with WARN_ON above */ + if (min_link_skip == ~0) + return; + + cmd->skip_dtim_periods = min_link_skip; cmd->flags |= cpu_to_le16(POWER_FLAGS_SKIP_OVER_DTIM_MSK); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c index 3ba62fb2c85e..05715e5af6ab 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c @@ -514,6 +514,8 @@ void iwl_mvm_tlc_update_notif(struct iwl_mvm *mvm, link_sta->agg.max_tid_amsdu_len[i] = 1; } + ieee80211_sta_recalc_aggregates(sta); + IWL_DEBUG_RATE(mvm, "AMSDU update. AMSDU size: %d, AMSDU selected size: %d, AMSDU TID bitmap 0x%X\n", le32_to_cpu(notif->amsdu_size), size, @@ -609,6 +611,7 @@ void iwl_mvm_rs_fw_rate_init(struct iwl_mvm *mvm, cpu_to_le16(max_amsdu_len) : 0, }; unsigned int link_id = link_conf->link_id; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvmsta->vif); int cmd_ver; int ret; @@ -652,7 +655,10 @@ void iwl_mvm_rs_fw_rate_init(struct iwl_mvm *mvm, * since TLC offload works with one mode we can assume * that only vht/ht is used and also set it as station max amsdu */ - sta->deflink.agg.max_amsdu_len = max_amsdu_len; + link_sta->agg.max_amsdu_len = max_amsdu_len; + ieee80211_sta_recalc_aggregates(sta); + + cfg_cmd.max_tx_op = cpu_to_le16(mvmvif->max_tx_op); cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 0); IWL_DEBUG_RATE(mvm, "TLC CONFIG CMD, sta_id=%d, max_ch_width=%d, mode=%d\n", diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h index 6cd4ec4d8f34..ea81cb236d5c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h @@ -3,7 +3,7 @@ * * Copyright(c) 2015 Intel Mobile Communications GmbH * Copyright(c) 2017 Intel Deutschland GmbH - * Copyright (C) 2003 - 2014, 2018 - 2023 Intel Corporation + * Copyright (C) 2003 - 2014, 2018 - 2024 Intel Corporation *****************************************************************************/ #ifndef __rs_h__ @@ -198,11 +198,12 @@ struct rs_rate { /** * struct iwl_lq_sta_rs_fw - rate and related statistics for RS in FW * @last_rate_n_flags: last rate reported by FW + * @pers: persistent fields * @pers.sta_id: the id of the station - * @chains: bitmask of chains reported in %chain_signal - * @chain_signal: per chain signal strength - * @last_rssi: last rssi reported - * @drv: pointer back to the driver data + * @pers.chains: bitmask of chains reported in %chain_signal + * @pers.chain_signal: per chain signal strength + * @pers.last_rssi: last rssi reported + * @pers.drv: pointer back to the driver data */ struct iwl_lq_sta_rs_fw { /* last tx rate_n_flags */ @@ -213,11 +214,11 @@ struct iwl_lq_sta_rs_fw { u32 sta_id; #ifdef CONFIG_MAC80211_DEBUGFS /** - * @dbg_fixed_rate: for debug, use fixed rate if not 0 + * @pers.dbg_fixed_rate: for debug, use fixed rate if not 0 */ u32 dbg_fixed_rate; /** - * @dbg_agg_frame_count_lim: for debug, max number of + * @pers.dbg_agg_frame_count_lim: for debug, max number of * frames in A-MPDU */ u16 dbg_agg_frame_count_lim; @@ -402,7 +403,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int tid, struct ieee80211_tx_info *info, bool ndp); /** - * iwl_rate_control_register - Register the rate control algorithm callbacks + * iwl_mvm_rate_control_register - Register the rate control algorithm callbacks * * Since the rate control algorithm is hardware specific, there is no need * or reason to place it as a stand alone module. The driver can call @@ -414,7 +415,7 @@ void iwl_mvm_rs_tx_status(struct iwl_mvm *mvm, struct ieee80211_sta *sta, int iwl_mvm_rate_control_register(void); /** - * iwl_rate_control_unregister - Unregister the rate control callbacks + * iwl_mvm_rate_control_unregister - Unregister the rate control callbacks * * This should be called after calling ieee80211_unregister_hw, but before * the driver is unloaded. diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index 6e933907f985..151289e13308 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -1009,6 +1009,9 @@ static void iwl_mvm_update_esr_mode_tpt(struct iwl_mvm *mvm) spin_unlock_bh(&mvmsta->mpdu_counters[q].lock); } + IWL_DEBUG_STATS(mvm, "total Tx MPDUs: %ld. total Rx MPDUs: %ld\n", + total_tx, total_rx); + /* If we don't have enough MPDUs - exit EMLSR */ if (total_tx < IWL_MVM_ENTER_ESR_TPT_THRESH && total_rx < IWL_MVM_ENTER_ESR_TPT_THRESH) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 489cfb0a4ab1..1a210d0c22b3 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -566,7 +566,7 @@ static void iwl_mvm_release_frames(struct iwl_mvm *mvm, lockdep_assert_held(&reorder_buf->lock); while (ieee80211_sn_less(ssn, nssn)) { - int index = ssn % reorder_buf->buf_size; + int index = ssn % baid_data->buf_size; struct sk_buff_head *skb_list = &entries[index].frames; struct sk_buff *skb; @@ -617,7 +617,7 @@ static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue, spin_lock_bh(&reorder_buf->lock); iwl_mvm_release_frames(mvm, sta, NULL, ba_data, reorder_buf, ieee80211_sn_add(reorder_buf->head_sn, - reorder_buf->buf_size)); + ba_data->buf_size)); spin_unlock_bh(&reorder_buf->lock); out: @@ -839,7 +839,7 @@ static bool iwl_mvm_reorder(struct iwl_mvm *mvm, } /* put in reorder buffer */ - index = sn % buffer->buf_size; + index = sn % baid_data->buf_size; __skb_queue_tail(&entries[index].frames, skb); buffer->num_stored++; @@ -1954,6 +1954,16 @@ static void iwl_mvm_rx_fill_status(struct iwl_mvm *mvm, iwl_mvm_decode_lsig(skb, phy_data); rx_status->device_timestamp = phy_data->gp2_on_air_rise; + + if (mvm->rx_ts_ptp && mvm->monitor_on) { + u64 adj_time = + iwl_mvm_ptp_get_adj_time(mvm, phy_data->gp2_on_air_rise * NSEC_PER_USEC); + + rx_status->mactime = div64_u64(adj_time, NSEC_PER_USEC); + rx_status->flag |= RX_FLAG_MACTIME_IS_RTAP_TS64; + rx_status->flag &= ~RX_FLAG_MACTIME; + } + rx_status->freq = ieee80211_channel_to_frequency(phy_data->channel, rx_status->band); iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags, @@ -2032,7 +2042,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, u32 len; u32 pkt_len = iwl_rx_packet_payload_len(pkt); struct ieee80211_sta *sta = NULL; - struct ieee80211_link_sta *link_sta = NULL; struct sk_buff *skb; u8 crypt_len = 0; u8 sta_id = le32_get_bits(desc->status, IWL_RX_MPDU_STATUS_STA_ID); @@ -2185,6 +2194,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, if (desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_SRC_STA_FOUND)) { if (!WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations)) { + struct ieee80211_link_sta *link_sta; + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); if (IS_ERR(sta)) sta = NULL; @@ -2360,7 +2371,6 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rx_no_data_ver_3 *desc = (void *)pkt->data; u32 rssi; - u32 info_type; struct ieee80211_sta *sta = NULL; struct sk_buff *skb; struct iwl_mvm_rx_phy_data phy_data; @@ -2373,7 +2383,6 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi, return; rssi = le32_to_cpu(desc->rssi); - info_type = le32_to_cpu(desc->info) & RX_NO_DATA_INFO_TYPE_MSK; phy_data.d0 = desc->phy_info[0]; phy_data.d1 = desc->phy_info[1]; phy_data.phy_info = IWL_RX_MPDU_PHY_TSF_OVERLOAD; @@ -2425,7 +2434,12 @@ void iwl_mvm_rx_monitor_no_data(struct iwl_mvm *mvm, struct napi_struct *napi, /* 0-length PSDU */ rx_status->flag |= RX_FLAG_NO_PSDU; - switch (info_type) { + /* mark as failed PLCP on any errors to skip checks in mac80211 */ + if (le32_get_bits(desc->info, RX_NO_DATA_INFO_ERR_MSK) != + RX_NO_DATA_INFO_ERR_NONE) + rx_status->flag |= RX_FLAG_FAILED_PLCP_CRC; + + switch (le32_get_bits(desc->info, RX_NO_DATA_INFO_TYPE_MSK)) { case RX_NO_DATA_INFO_TYPE_NDP: rx_status->zero_length_psdu_type = IEEE80211_RADIOTAP_ZERO_LEN_PSDU_SOUNDING; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index e975f5ff17b5..8e0df31f1b3e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -208,7 +208,7 @@ static void iwl_mvm_scan_iterator(void *_data, u8 *mac, curr_mvmvif = iwl_mvm_vif_from_mac80211(data->current_vif); - if (vif->type == NL80211_IFTYPE_AP && vif->p2p && + if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_P2P_GO && mvmvif->deflink.phy_ctxt && curr_mvmvif->deflink.phy_ctxt && mvmvif->deflink.phy_ctxt->id != curr_mvmvif->deflink.phy_ctxt->id) data->is_dcm_with_p2p_go = true; @@ -2878,7 +2878,7 @@ static void iwl_mvm_scan_respect_p2p_go_iter(void *_data, u8 *mac, if (vif == data->current_vif) return; - if (vif->type == NL80211_IFTYPE_AP && vif->p2p) { + if (ieee80211_vif_type_p2p(vif) == NL80211_IFTYPE_P2P_GO) { u32 link_id; for (link_id = 0; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index cc79fe991c26..15e64d94d6ea 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -857,12 +857,6 @@ int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, size = iwl_mvm_get_queue_size(sta); } - /* take the min with bc tbl entries allowed */ - size = min_t(u32, size, mvm->trans->txqs.bc_tbl_size / sizeof(u16)); - - /* size needs to be power of 2 values for calculating read/write pointers */ - size = rounddown_pow_of_two(size); - if (sta) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); struct ieee80211_link_sta *link_sta; @@ -887,22 +881,13 @@ int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, if (!sta_mask) return -EINVAL; - do { - queue = iwl_trans_txq_alloc(mvm->trans, 0, sta_mask, - tid, size, timeout); + queue = iwl_trans_txq_alloc(mvm->trans, 0, sta_mask, + tid, size, timeout); - if (queue < 0) - IWL_DEBUG_TX_QUEUES(mvm, - "Failed allocating TXQ of size %d for sta mask %x tid %d, ret: %d\n", - size, sta_mask, tid, queue); - size /= 2; - } while (queue < 0 && size >= 16); - - if (queue < 0) - return queue; - - IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta mask 0x%x tid %d\n", - queue, sta_mask, tid); + if (queue >= 0) + IWL_DEBUG_TX_QUEUES(mvm, + "Enabling TXQ #%d for sta mask 0x%x tid %d\n", + queue, sta_mask, tid); return queue; } @@ -2758,7 +2743,7 @@ static void iwl_mvm_free_reorder(struct iwl_mvm *mvm, */ WARN_ON(1); - for (j = 0; j < reorder_buf->buf_size; j++) + for (j = 0; j < data->buf_size; j++) __skb_queue_purge(&entries[j].frames); spin_unlock_bh(&reorder_buf->lock); @@ -2767,7 +2752,7 @@ static void iwl_mvm_free_reorder(struct iwl_mvm *mvm, static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm, struct iwl_mvm_baid_data *data, - u16 ssn, u16 buf_size) + u16 ssn) { int i; @@ -2780,12 +2765,10 @@ static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm, reorder_buf->num_stored = 0; reorder_buf->head_sn = ssn; - reorder_buf->buf_size = buf_size; spin_lock_init(&reorder_buf->lock); - reorder_buf->mvm = mvm; reorder_buf->queue = i; reorder_buf->valid = false; - for (j = 0; j < reorder_buf->buf_size; j++) + for (j = 0; j < data->buf_size; j++) __skb_queue_head_init(&entries[j].frames); } } @@ -2994,13 +2977,14 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, baid_data->mvm = mvm; baid_data->tid = tid; baid_data->sta_mask = iwl_mvm_sta_fw_id_mask(mvm, sta, -1); + baid_data->buf_size = buf_size; mvm_sta->tid_to_baid[tid] = baid; if (timeout) mod_timer(&baid_data->session_timer, TU_TO_EXP_TIME(timeout * 2)); - iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size); + iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn); /* * protect the BA data with RCU to cover a case where our * internal RX sync mechanism will timeout (not that it's @@ -4433,6 +4417,7 @@ void iwl_mvm_count_mpdu(struct iwl_mvm_sta *mvm_sta, u8 fw_sta_id, u32 count, bool tx, int queue) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvm_sta->vif); + struct iwl_mvm *mvm = mvmvif->mvm; struct iwl_mvm_tpt_counter *queue_counter; struct iwl_mvm_mpdu_counter *link_counter; u32 total_mpdus = 0; @@ -4469,6 +4454,8 @@ void iwl_mvm_count_mpdu(struct iwl_mvm_sta *mvm_sta, u8 fw_sta_id, u32 count, memset(queue_counter->per_link, 0, sizeof(queue_counter->per_link)); queue_counter->window_start = jiffies; + + IWL_DEBUG_STATS(mvm, "MPDU counters are cleared\n"); } for (int i = 0; i < IWL_MVM_FW_MAX_LINK_ID; i++) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 754a05a8c189..0dc83d6afb3c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -478,7 +478,7 @@ struct iwl_mvm_int_sta { }; /** - * Send the STA info to the FW. + * iwl_mvm_sta_send_to_fw - Send the STA info to the FW. * * @mvm: the iwl_mvm* to use * @sta: the STA diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c index e7d5f4ebeb25..3d25ff5cd7e8 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tdls.c @@ -2,7 +2,7 @@ /* * Copyright (C) 2014 Intel Mobile Communications GmbH * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2020, 2022-2023 Intel Corporation + * Copyright (C) 2018-2020, 2022-2024 Intel Corporation */ #include #include "mvm.h" @@ -151,7 +151,7 @@ void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw, u32 duration = 2 * vif->bss_conf.dtim_period * vif->bss_conf.beacon_int; /* Protect the session to hear the TDLS setup response on the channel */ - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) iwl_mvm_schedule_session_protection(mvm, vif, duration, @@ -159,7 +159,6 @@ void iwl_mvm_mac_mgd_protect_tdls_discover(struct ieee80211_hw *hw, else iwl_mvm_protect_session(mvm, vif, duration, duration, 100, true); - mutex_unlock(&mvm->mutex); } static const char * @@ -460,21 +459,21 @@ void iwl_mvm_tdls_ch_switch_work(struct work_struct *work) int ret; mvm = container_of(work, struct iwl_mvm, tdls_cs.dwork.work); - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); /* called after an active channel switch has finished or timed-out */ iwl_mvm_tdls_update_cs_state(mvm, IWL_MVM_TDLS_SW_IDLE); /* station might be gone, in that case do nothing */ if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA) - goto out; + return; sta = rcu_dereference_protected( mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id], lockdep_is_held(&mvm->mutex)); /* the station may not be here, but if it is, it must be a TDLS peer */ if (!sta || IS_ERR(sta) || WARN_ON(!sta->tdls)) - goto out; + return; mvmsta = iwl_mvm_sta_from_mac80211(sta); vif = mvmsta->vif; @@ -493,8 +492,6 @@ void iwl_mvm_tdls_ch_switch_work(struct work_struct *work) /* retry after a DTIM if we failed sending now */ delay = TU_TO_MS(vif->bss_conf.dtim_period * vif->bss_conf.beacon_int); schedule_delayed_work(&mvm->tdls_cs.dwork, msecs_to_jiffies(delay)); -out: - mutex_unlock(&mvm->mutex); } int @@ -509,7 +506,7 @@ iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw, unsigned int delay; int ret; - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); IWL_DEBUG_TDLS(mvm, "TDLS channel switch with %pM ch %d width %d\n", sta->addr, chandef->chan->center_freq, chandef->width); @@ -519,8 +516,7 @@ iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw, IWL_DEBUG_TDLS(mvm, "Existing peer. Can't start switch with %pM\n", sta->addr); - ret = -EBUSY; - goto out; + return -EBUSY; } ret = iwl_mvm_tdls_config_channel_switch(mvm, vif, @@ -529,17 +525,15 @@ iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw, oper_class, chandef, 0, 0, 0, tmpl_skb, ch_sw_tm_ie); if (ret) - goto out; + return ret; /* * Mark the peer as "in tdls switch" for this vif. We only allow a * single such peer per vif. */ mvm->tdls_cs.peer.skb = skb_copy(tmpl_skb, GFP_KERNEL); - if (!mvm->tdls_cs.peer.skb) { - ret = -ENOMEM; - goto out; - } + if (!mvm->tdls_cs.peer.skb) + return -ENOMEM; mvmsta = iwl_mvm_sta_from_mac80211(sta); mvm->tdls_cs.peer.sta_id = mvmsta->deflink.sta_id; @@ -556,10 +550,7 @@ iwl_mvm_tdls_channel_switch(struct ieee80211_hw *hw, vif->bss_conf.beacon_int); mod_delayed_work(system_wq, &mvm->tdls_cs.dwork, msecs_to_jiffies(delay)); - -out: - mutex_unlock(&mvm->mutex); - return ret; + return 0; } void iwl_mvm_tdls_cancel_channel_switch(struct ieee80211_hw *hw, @@ -626,7 +617,7 @@ iwl_mvm_tdls_recv_channel_switch(struct ieee80211_hw *hw, params->action_code == WLAN_TDLS_CHANNEL_SWITCH_REQUEST ? "REQ" : "RESP"; - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); IWL_DEBUG_TDLS(mvm, "Received TDLS ch switch action %s from %pM status %d\n", @@ -670,5 +661,4 @@ retry: 1024 / 1000; mod_delayed_work(system_wq, &mvm->tdls_cs.dwork, msecs_to_jiffies(delay)); - mutex_unlock(&mvm->mutex); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c b/drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c index f49e3c98b1ba..47b8e7b64ead 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tests/links.c @@ -208,6 +208,7 @@ static void setup_link_conf(struct kunit *test) bss_load->channel_util = params->channel_util; rcu_assign_pointer(bss.ies, ies); + rcu_assign_pointer(bss.beacon_ies, ies); } static void test_link_grading(struct kunit *test) @@ -393,9 +394,6 @@ static void test_valid_link_pair(struct kunit *test) chandef_a.width = params->cw_a ?: NL80211_CHAN_WIDTH_20; chandef_b.width = params->cw_b ?: NL80211_CHAN_WIDTH_20; -#ifdef CONFIG_IWLWIFI_SUPPORT_DEBUG_OVERRIDES - trans->dbg_cfg = default_dbg_config; -#endif mvm.trans = trans; mvm.last_bt_notif.wifi_loss_low_rssi = params->bt; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c index 31bc80cdcb7d..a8c42ce3b630 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/time-event.c @@ -47,12 +47,13 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm, static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm) { + struct ieee80211_vif *bss_vif = iwl_mvm_get_bss_vif(mvm); struct ieee80211_vif *vif = mvm->p2p_device_vif; lockdep_assert_held(&mvm->mutex); /* - * Clear the ROC_RUNNING status bit. + * Clear the ROC_P2P_RUNNING status bit. * This will cause the TX path to drop offchannel transmissions. * That would also be done by mac80211, but it is racy, in particular * in the case that the time event actually completed in the firmware. @@ -62,7 +63,7 @@ static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm) * won't get stuck on the queue and be transmitted in the next * time event. */ - if (test_and_clear_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) { + if (test_and_clear_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status)) { struct iwl_mvm_vif *mvmvif; synchronize_net(); @@ -99,7 +100,14 @@ static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm) } } - /* Do the same for AUX ROC */ + /* + * P2P AUX ROC and HS2.0 ROC do not run simultaneously. + * Clear the ROC_AUX_RUNNING status bit. + * This will cause the TX path to drop offchannel transmissions. + * That would also be done by mac80211, but it is racy, in particular + * in the case that the time event actually completed in the firmware + * (which is handled in iwl_mvm_te_handle_notif). + */ if (test_and_clear_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) { synchronize_net(); @@ -119,9 +127,9 @@ static void iwl_mvm_cleanup_roc(struct iwl_mvm *mvm) iwl_mvm_rm_aux_sta(mvm); } + if (!IS_ERR_OR_NULL(bss_vif)) + iwl_mvm_unblock_esr(mvm, bss_vif, IWL_MVM_ESR_BLOCKED_ROC); mutex_unlock(&mvm->mutex); - if (vif) - iwl_mvm_esr_non_bss_link(mvm, vif, 0, false); } void iwl_mvm_roc_done_wk(struct work_struct *wk) @@ -214,6 +222,8 @@ static bool iwl_mvm_te_check_disconnect(struct iwl_mvm *mvm, iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_ASSOC_FAILED, NULL); + + mvmvif->session_prot_connection_loss = true; } iwl_mvm_connection_loss(mvm, vif, errmsg); @@ -378,7 +388,7 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, te_data->end_jiffies = TU_TO_EXP_TIME(te_data->duration); if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) { - set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status); + set_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status); ieee80211_ready_on_channel(mvm->hw); } else if (te_data->id == TE_CHANNEL_SWITCH_PERIOD) { iwl_mvm_te_handle_notify_csa(mvm, te_data, notif); @@ -388,14 +398,51 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm, } } +struct iwl_mvm_rx_roc_iterator_data { + u32 activity; + bool end_activity; + bool found; +}; + +static void iwl_mvm_rx_roc_iterator(void *_data, u8 *mac, + struct ieee80211_vif *vif) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_mvm_rx_roc_iterator_data *data = _data; + + if (mvmvif->roc_activity == data->activity) { + data->found = true; + if (data->end_activity) + mvmvif->roc_activity = ROC_NUM_ACTIVITIES; + } +} + void iwl_mvm_rx_roc_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) { struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_roc_notif *notif = (void *)pkt->data; + u32 activity = le32_to_cpu(notif->activity); + bool started = le32_to_cpu(notif->success) && + le32_to_cpu(notif->started); + struct iwl_mvm_rx_roc_iterator_data data = { + .activity = activity, + .end_activity = !started, + }; - if (le32_to_cpu(notif->success) && le32_to_cpu(notif->started) && - le32_to_cpu(notif->activity) == ROC_ACTIVITY_HOTSPOT) { + /* Clear vif roc_activity if done (set to ROC_NUM_ACTIVITIES) */ + ieee80211_iterate_active_interfaces_atomic(mvm->hw, + IEEE80211_IFACE_ITER_NORMAL, + iwl_mvm_rx_roc_iterator, + &data); + /* + * It is possible that the ROC was canceled + * but the notification was already fired. + */ + if (!data.found) + return; + + if (started) { set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status); ieee80211_ready_on_channel(mvm->hw); } else { @@ -724,6 +771,21 @@ static void iwl_mvm_cancel_session_protection(struct iwl_mvm *mvm, "Couldn't send the SESSION_PROTECTION_CMD: %d\n", ret); } +static void iwl_mvm_roc_rm_cmd(struct iwl_mvm *mvm, u32 activity) +{ + struct iwl_roc_req roc_cmd = { + .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE), + .activity = cpu_to_le32(activity), + }; + int ret; + + lockdep_assert_held(&mvm->mutex); + ret = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0, + sizeof(roc_cmd), &roc_cmd); + if (ret) + IWL_ERR(mvm, "Couldn't send the ROC_CMD: %d\n", ret); +} + static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm, struct iwl_mvm_time_event_data *te_data, u32 *uid) @@ -733,6 +795,9 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm, struct iwl_mvm_vif *mvmvif; enum nl80211_iftype iftype; s8 link_id; + bool p2p_aux = iwl_mvm_has_p2p_over_aux(mvm); + u8 roc_ver = iwl_fw_lookup_cmd_ver(mvm->fw, + WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0); if (!vif) return false; @@ -757,14 +822,22 @@ static bool __iwl_mvm_remove_time_event(struct iwl_mvm *mvm, iwl_mvm_te_clear_data(mvm, te_data); spin_unlock_bh(&mvm->time_event_lock); - /* When session protection is used, the te_data->id field - * is reused to save session protection's configuration. - * For AUX ROC, HOT_SPOT_CMD is used and the te_data->id field is set - * to HOT_SPOT_CMD. - */ - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD) && - id != HOT_SPOT_CMD) { + if ((p2p_aux && iftype == NL80211_IFTYPE_P2P_DEVICE) || + (roc_ver >= 3 && mvmvif->roc_activity == ROC_ACTIVITY_HOTSPOT)) { + if (mvmvif->roc_activity < ROC_NUM_ACTIVITIES) { + iwl_mvm_roc_rm_cmd(mvm, mvmvif->roc_activity); + mvmvif->roc_activity = ROC_NUM_ACTIVITIES; + iwl_mvm_roc_finished(mvm); + } + return false; + } else if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD) && + id != HOT_SPOT_CMD) { + /* When session protection is used, the te_data->id field + * is reused to save session protection's configuration. + * For AUX ROC, HOT_SPOT_CMD is used and the te_data->id + * field is set to HOT_SPOT_CMD. + */ if (mvmvif && id < SESSION_PROTECT_CONF_MAX_ID) { /* Session protection is still ongoing. Cancel it */ iwl_mvm_cancel_session_protection(mvm, vif, id, @@ -965,7 +1038,7 @@ void iwl_mvm_rx_session_protect_notif(struct iwl_mvm *mvm, if (WARN_ON(mvmvif->time_event_data.id != le32_to_cpu(notif->conf_id))) goto out_unlock; - set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status); + set_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status); ieee80211_ready_on_channel(mvm->hw); /* Start TE */ } @@ -984,12 +1057,21 @@ void iwl_mvm_roc_duration_and_delay(struct ieee80211_vif *vif, u32 *duration_tu, u32 *delay) { - u32 dtim_interval = vif->bss_conf.dtim_period * - vif->bss_conf.beacon_int; + struct ieee80211_bss_conf *link_conf; + unsigned int link_id; + u32 dtim_interval = 0; *delay = AUX_ROC_MIN_DELAY; *duration_tu = MSEC_TO_TU(duration_ms); + rcu_read_lock(); + for_each_vif_active_link(vif, link_conf, link_id) { + dtim_interval = + max_t(u32, dtim_interval, + link_conf->dtim_period * link_conf->beacon_int); + } + rcu_read_unlock(); + /* * If we are associated we want the delay time to be at least one * dtim interval so that the FW can wait until after the DTIM and @@ -998,8 +1080,10 @@ void iwl_mvm_roc_duration_and_delay(struct ieee80211_vif *vif, * Since we want to use almost a whole dtim interval we would also * like the delay to be for 2-3 dtim intervals, in case there are * other time events with higher priority. + * dtim_interval should never be 0, it can be 1 if we don't know it + * (we haven't heard any beacon yet). */ - if (vif->cfg.assoc) { + if (vif->cfg.assoc && !WARN_ON(!dtim_interval)) { *delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY); /* We cannot remain off-channel longer than the DTIM interval */ if (dtim_interval <= *duration_tu) { @@ -1014,7 +1098,7 @@ void iwl_mvm_roc_duration_and_delay(struct ieee80211_vif *vif, int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm, struct ieee80211_channel *channel, struct ieee80211_vif *vif, - int duration, u32 activity) + int duration, enum iwl_roc_activity activity) { int res; u32 duration_tu, delay; @@ -1023,9 +1107,13 @@ int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm, .activity = cpu_to_le32(activity), .sta_id = cpu_to_le32(mvm->aux_sta.sta_id), }; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); lockdep_assert_held(&mvm->mutex); + if (WARN_ON(mvmvif->roc_activity != ROC_NUM_ACTIVITIES)) + return -EBUSY; + /* Set the channel info data */ iwl_mvm_set_chan_info(mvm, &roc_req.channel_info, channel->hw_value, @@ -1041,14 +1129,16 @@ int iwl_mvm_roc_add_cmd(struct iwl_mvm *mvm, "\t(requested = %ums, max_delay = %ums)\n", duration, delay); IWL_DEBUG_TE(mvm, - "Requesting to remain on channel %u for %utu\n", - channel->hw_value, duration_tu); + "Requesting to remain on channel %u for %utu. activity %u\n", + channel->hw_value, duration_tu, activity); /* Set the node address */ memcpy(roc_req.node_addr, vif->addr, ETH_ALEN); res = iwl_mvm_send_cmd_pdu(mvm, WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0, sizeof(roc_req), &roc_req); + if (!res) + mvmvif->roc_activity = activity; return res; } @@ -1191,62 +1281,40 @@ void iwl_mvm_cleanup_roc_te(struct iwl_mvm *mvm) __iwl_mvm_remove_time_event(mvm, te_data, &uid); } -static void iwl_mvm_roc_rm_cmd(struct iwl_mvm *mvm, u32 activity) -{ - int ret; - struct iwl_roc_req roc_cmd = { - .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE), - .activity = cpu_to_le32(activity), - }; - - lockdep_assert_held(&mvm->mutex); - ret = iwl_mvm_send_cmd_pdu(mvm, - WIDE_ID(MAC_CONF_GROUP, ROC_CMD), - 0, sizeof(roc_cmd), &roc_cmd); - WARN_ON(ret); -} - -static void iwl_mvm_roc_station_remove(struct iwl_mvm *mvm, - struct iwl_mvm_vif *mvmvif) -{ - u32 cmd_id = WIDE_ID(MAC_CONF_GROUP, ROC_CMD); - u8 fw_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, - IWL_FW_CMD_VER_UNKNOWN); - - if (fw_ver == IWL_FW_CMD_VER_UNKNOWN) - iwl_mvm_remove_aux_roc_te(mvm, mvmvif, - &mvmvif->hs_time_event_data); - else if (fw_ver == 3) - iwl_mvm_roc_rm_cmd(mvm, ROC_ACTIVITY_HOTSPOT); - else - IWL_ERR(mvm, "ROC command version %d mismatch!\n", fw_ver); -} - void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif) { - struct iwl_mvm_vif *mvmvif; + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); struct iwl_mvm_time_event_data *te_data; + bool p2p_aux = iwl_mvm_has_p2p_over_aux(mvm); + u8 roc_ver = iwl_fw_lookup_cmd_ver(mvm->fw, + WIDE_ID(MAC_CONF_GROUP, ROC_CMD), 0); + int iftype = vif->type; mutex_lock(&mvm->mutex); - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) { - mvmvif = iwl_mvm_vif_from_mac80211(vif); + if (p2p_aux || (roc_ver >= 3 && iftype != NL80211_IFTYPE_P2P_DEVICE)) { + if (mvmvif->roc_activity < ROC_NUM_ACTIVITIES) { + iwl_mvm_roc_rm_cmd(mvm, mvmvif->roc_activity); + mvmvif->roc_activity = ROC_NUM_ACTIVITIES; + } + goto cleanup_roc; + } else if (fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) { te_data = &mvmvif->time_event_data; - if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { + if (iftype == NL80211_IFTYPE_P2P_DEVICE) { if (te_data->id >= SESSION_PROTECT_CONF_MAX_ID) { IWL_DEBUG_TE(mvm, "No remain on channel event\n"); mutex_unlock(&mvm->mutex); return; } - iwl_mvm_cancel_session_protection(mvm, vif, te_data->id, te_data->link_id); } else { - iwl_mvm_roc_station_remove(mvm, mvmvif); + iwl_mvm_remove_aux_roc_te(mvm, mvmvif, + &mvmvif->hs_time_event_data); } goto cleanup_roc; } @@ -1259,8 +1327,8 @@ void iwl_mvm_stop_roc(struct iwl_mvm *mvm, struct ieee80211_vif *vif) } mvmvif = iwl_mvm_vif_from_mac80211(te_data->vif); - - if (te_data->vif->type == NL80211_IFTYPE_P2P_DEVICE) + iftype = te_data->vif->type; + if (iftype == NL80211_IFTYPE_P2P_DEVICE) iwl_mvm_remove_time_event(mvm, mvmvif, te_data); else iwl_mvm_remove_aux_roc_te(mvm, mvmvif, te_data); @@ -1271,9 +1339,10 @@ cleanup_roc: * (so the status bit isn't set) set it here so iwl_mvm_cleanup_roc will * cleanup things properly */ - set_bit(vif->type == NL80211_IFTYPE_P2P_DEVICE ? - IWL_MVM_STATUS_ROC_RUNNING : IWL_MVM_STATUS_ROC_AUX_RUNNING, - &mvm->status); + if (p2p_aux || iftype != NL80211_IFTYPE_P2P_DEVICE) + set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status); + else + set_bit(IWL_MVM_STATUS_ROC_P2P_RUNNING, &mvm->status); /* Mutex is released inside this function */ iwl_mvm_cleanup_roc(mvm); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c index f8b08f98daa0..ed0796aff722 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tt.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2012-2014, 2019-2022 Intel Corporation + * Copyright (C) 2012-2014, 2019-2022, 2024 Intel Corporation * Copyright (C) 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015-2016 Intel Deutschland GmbH */ @@ -299,7 +299,7 @@ static void check_exit_ctkill(struct work_struct *work) ret = iwl_mvm_get_temp(mvm, &temp); - __iwl_mvm_mac_stop(mvm); + __iwl_mvm_mac_stop(mvm, false); if (ret) goto reschedule; @@ -618,48 +618,35 @@ static int iwl_mvm_tzone_get_temp(struct thermal_zone_device *device, int ret; int temp; - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); if (!iwl_mvm_firmware_running(mvm) || - mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) { - ret = -ENODATA; - goto out; - } + mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) + return -ENODATA; ret = iwl_mvm_get_temp(mvm, &temp); if (ret) - goto out; + return ret; *temperature = temp * 1000; - -out: - mutex_unlock(&mvm->mutex); - return ret; + return 0; } static int iwl_mvm_tzone_set_trip_temp(struct thermal_zone_device *device, const struct thermal_trip *trip, int temp) { struct iwl_mvm *mvm = thermal_zone_device_priv(device); - int ret; - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); if (!iwl_mvm_firmware_running(mvm) || - mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) { - ret = -EIO; - goto out; - } + mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) + return -EIO; - if ((temp / 1000) > S16_MAX) { - ret = -EINVAL; - goto out; - } + if ((temp / 1000) > S16_MAX) + return -EINVAL; - ret = iwl_mvm_send_temp_report_ths_cmd(mvm); -out: - mutex_unlock(&mvm->mutex); - return ret; + return iwl_mvm_send_temp_report_ths_cmd(mvm); } static struct thermal_zone_device_ops tzone_ops = { @@ -733,27 +720,18 @@ static int iwl_mvm_tcool_set_cur_state(struct thermal_cooling_device *cdev, unsigned long new_state) { struct iwl_mvm *mvm = (struct iwl_mvm *)(cdev->devdata); - int ret; - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); if (!iwl_mvm_firmware_running(mvm) || - mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) { - ret = -EIO; - goto unlock; - } + mvm->fwrt.cur_fw_img != IWL_UCODE_REGULAR) + return -EIO; - if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) { - ret = -EINVAL; - goto unlock; - } + if (new_state >= ARRAY_SIZE(iwl_mvm_cdev_budgets)) + return -EINVAL; - ret = iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START, - new_state); - -unlock: - mutex_unlock(&mvm->mutex); - return ret; + return iwl_mvm_ctdp_command(mvm, CTDP_CMD_OPERATION_START, + new_state); } static const struct thermal_cooling_device_ops tcooling_ops = { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index 1d695ece93e9..7ff5ea5e7aca 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -12,7 +12,7 @@ #include #include "iwl-trans.h" -#include "iwl-eeprom-parse.h" +#include "iwl-nvm-utils.h" #include "mvm.h" #include "sta.h" #include "time-sync.h" @@ -802,10 +802,30 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) if (info.control.vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(info.control.vif); + bool p2p_aux = iwl_mvm_has_p2p_over_aux(mvm); - if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE || - info.control.vif->type == NL80211_IFTYPE_AP || - info.control.vif->type == NL80211_IFTYPE_ADHOC) { + if ((info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE && + p2p_aux) || + (info.control.vif->type == NL80211_IFTYPE_STATION && + offchannel)) { + /* + * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets + * that can be used in 2 different types of vifs, P2P + * Device and STATION. + * P2P Device uses the offchannel queue. + * STATION (HS2.0) uses the auxiliary context of the FW, + * and hence needs to be sent on the aux queue. + * If P2P_DEV_OVER_AUX is supported (p2p_aux = true) + * also P2P Device uses the aux queue. + */ + sta_id = mvm->aux_sta.sta_id; + queue = mvm->aux_queue; + if (WARN_ON(queue == IWL_MVM_INVALID_QUEUE)) + return -1; + } else if (info.control.vif->type == + NL80211_IFTYPE_P2P_DEVICE || + info.control.vif->type == NL80211_IFTYPE_AP || + info.control.vif->type == NL80211_IFTYPE_ADHOC) { u32 link_id = u32_get_bits(info.control.flags, IEEE80211_TX_CTRL_MLO_LINK); struct iwl_mvm_vif_link_info *link; @@ -831,18 +851,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) } else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) { queue = mvm->snif_queue; sta_id = mvm->snif_sta.sta_id; - } else if (info.control.vif->type == NL80211_IFTYPE_STATION && - offchannel) { - /* - * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets - * that can be used in 2 different types of vifs, P2P & - * STATION. - * P2P uses the offchannel queue. - * STATION (HS2.0) uses the auxiliary context of the FW, - * and hence needs to be sent on the aux queue. - */ - sta_id = mvm->aux_sta.sta_id; - queue = mvm->aux_queue; } } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index 47283a358ffd..0e5fa8374103 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -892,7 +892,7 @@ static void iwl_mvm_tcm_iter(void *_data, u8 *mac, struct ieee80211_vif *vif) static void iwl_mvm_tcm_results(struct iwl_mvm *mvm) { - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); ieee80211_iterate_active_interfaces( mvm->hw, IEEE80211_IFACE_ITER_NORMAL, @@ -900,8 +900,6 @@ static void iwl_mvm_tcm_results(struct iwl_mvm *mvm) if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) iwl_mvm_config_scan(mvm); - - mutex_unlock(&mvm->mutex); } static void iwl_mvm_tcm_uapsd_nonagg_detected_wk(struct work_struct *wk) @@ -1130,10 +1128,9 @@ void iwl_mvm_recalc_tcm(struct iwl_mvm *mvm) spin_unlock(&mvm->tcm.lock); if (handle_uapsd && iwl_mvm_has_new_rx_api(mvm)) { - mutex_lock(&mvm->mutex); + guard(mvm)(mvm); if (iwl_mvm_request_statistics(mvm, true)) handle_uapsd = false; - mutex_unlock(&mvm->mutex); } spin_lock(&mvm->tcm.lock); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c index ebf11f276b20..e63efbf809f0 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c @@ -216,7 +216,7 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, ctxt_info_gen3->cr_tail_idx_arr_base_addr = cpu_to_le64(trans_pcie->prph_info_dma_addr + 3 * PAGE_SIZE / 4); ctxt_info_gen3->mtr_base_addr = - cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr); + cpu_to_le64(trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]->dma_addr); ctxt_info_gen3->mcr_base_addr = cpu_to_le64(trans_pcie->rxq->used_bd_dma); ctxt_info_gen3->mtr_size = diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c index 0fa92704cd14..344e4d5a1c6e 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2023 Intel Corporation + * Copyright (C) 2018-2024 Intel Corporation */ #include "iwl-trans.h" #include "iwl-fh.h" @@ -218,7 +218,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, /* initialize TX command queue */ ctxt_info->hcmd_cfg.cmd_queue_addr = - cpu_to_le64(trans->txqs.txq[trans->txqs.cmd.q_id]->dma_addr); + cpu_to_le64(trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]->dma_addr); ctxt_info->hcmd_cfg.cmd_queue_size = TFD_QUEUE_CB_SIZE(IWL_CMD_QUEUE_SIZE); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index fed2754be680..9ad43464b702 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -503,7 +503,37 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x2727, PCI_ANY_ID, iwl_bz_trans_cfg)}, {IWL_PCI_DEVICE(0x272D, PCI_ANY_ID, iwl_bz_trans_cfg)}, {IWL_PCI_DEVICE(0x272b, PCI_ANY_ID, iwl_bz_trans_cfg)}, - {IWL_PCI_DEVICE(0xA840, PCI_ANY_ID, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0000, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0090, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0094, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0098, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x009C, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x00C0, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x00C4, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x00E0, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x00E4, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x00E8, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x00EC, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0100, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0110, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0114, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0118, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x011C, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0310, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0314, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0510, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x0A10, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x1671, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x1672, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x1771, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x1772, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x1791, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x1792, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x4090, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x40C4, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x40E0, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x4110, iwl_bz_trans_cfg)}, + {IWL_PCI_DEVICE(0xA840, 0x4314, iwl_bz_trans_cfg)}, {IWL_PCI_DEVICE(0x7740, PCI_ANY_ID, iwl_bz_trans_cfg)}, {IWL_PCI_DEVICE(0x4D40, PCI_ANY_ID, iwl_bz_trans_cfg)}, @@ -997,32 +1027,6 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = { IWL_CFG_160, IWL_CFG_ANY, IWL_CFG_CDB, iwlax411_2ax_cfg_so_gf4_a0, iwl_ax411_name), -/* Bz */ -/* FIXME: need to change the naming according to the actual CRF */ - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - iwl_cfg_bz, iwl_fm_name), - - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, - iwl_cfg_bz, iwl_fm_name), - -/* Ga (Gl) */ - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_320, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_cfg_gl, iwl_gl_name), - _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, - IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, - IWL_CFG_NO_320, IWL_CFG_ANY, IWL_CFG_NO_CDB, - iwl_cfg_gl, iwl_mtp_name), - /* SoF with JF2 */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SOF, IWL_CFG_ANY, @@ -1103,6 +1107,32 @@ VISIBLE_IF_IWLWIFI_KUNIT const struct iwl_dev_info iwl_dev_info_table[] = { IWL_CFG_NO_160, IWL_CFG_CORES_BT, IWL_CFG_NO_CDB, iwlax210_2ax_cfg_so_jf_b0, iwl9462_name), +/* Bz */ +/* FIXME: need to change the naming according to the actual CRF */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_BZ, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + iwl_cfg_bz, iwl_fm_name), + + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_BZ_W, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_ANY, + iwl_cfg_bz, iwl_fm_name), + +/* Ga (Gl) */ + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_320, IWL_CFG_ANY, IWL_CFG_NO_CDB, + iwl_cfg_gl, iwl_gl_name), + _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_MAC_TYPE_GL, IWL_CFG_ANY, + IWL_CFG_RF_TYPE_FM, IWL_CFG_ANY, IWL_CFG_ANY, + IWL_CFG_NO_320, IWL_CFG_ANY, IWL_CFG_NO_CDB, + iwl_cfg_gl, iwl_mtp_name), + /* Sc */ _IWL_DEV_INFO(IWL_CFG_ANY, IWL_CFG_ANY, IWL_CFG_MAC_TYPE_SC, IWL_CFG_ANY, @@ -1476,6 +1506,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (!iwl_trans->name) iwl_trans->name = iwl_trans->cfg->name; + IWL_INFO(iwl_trans, "Detected %s\n", iwl_trans->name); + if (iwl_trans->trans_cfg->mq_rx_supported) { if (WARN_ON(!iwl_trans->cfg->num_rbds)) { ret = -EINVAL; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index a7eebe400b5b..b59de4f80b4b 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* - * Copyright (C) 2003-2015, 2018-2023 Intel Corporation + * Copyright (C) 2003-2015, 2018-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -22,7 +22,6 @@ #include "iwl-io.h" #include "iwl-op-mode.h" #include "iwl-drv.h" -#include "queue/tx.h" #include "iwl-context-info.h" /* @@ -273,7 +272,7 @@ enum iwl_pcie_fw_reset_state { }; /** - * enum wl_pcie_imr_status - imr dma transfer state + * enum iwl_pcie_imr_status - imr dma transfer state * @IMR_D2S_IDLE: default value of the dma transfer * @IMR_D2S_REQUESTED: dma transfer requested * @IMR_D2S_COMPLETED: dma transfer completed @@ -286,6 +285,58 @@ enum iwl_pcie_imr_status { IMR_D2S_ERROR, }; +/** + * struct iwl_pcie_txqs - TX queues data + * + * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) + * @page_offs: offset from skb->cb to mac header page pointer + * @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer + * @queue_used: bit mask of used queues + * @queue_stopped: bit mask of stopped queues + * @txq: array of TXQ data structures representing the TXQs + * @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler + * @queue_alloc_cmd_ver: queue allocation command version + * @bc_pool: bytecount DMA allocations pool + * @bc_tbl_size: bytecount table size + * @tso_hdr_page: page allocated (per CPU) for A-MSDU headers when doing TSO + * (and similar usage) + * @cmd: command queue data + * @cmd.fifo: FIFO number + * @cmd.q_id: queue ID + * @cmd.wdg_timeout: watchdog timeout + * @tfd: TFD data + * @tfd.max_tbs: max number of buffers per TFD + * @tfd.size: TFD size + * @tfd.addr_size: TFD/TB address size + */ +struct iwl_pcie_txqs { + unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; + unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)]; + struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES]; + struct dma_pool *bc_pool; + size_t bc_tbl_size; + bool bc_table_dword; + u8 page_offs; + u8 dev_cmd_offs; + struct iwl_tso_hdr_page __percpu *tso_hdr_page; + + struct { + u8 fifo; + u8 q_id; + unsigned int wdg_timeout; + } cmd; + + struct { + u8 max_tbs; + u16 size; + u8 addr_size; + } tfd; + + struct iwl_dma_ptr scd_bc_tbls; + + u8 queue_alloc_cmd_ver; +}; + /** * struct iwl_trans_pcie - PCIe transport specific data * @rxq: all the RX queue data @@ -367,6 +418,7 @@ enum iwl_pcie_imr_status { * @is_down: indicates the NIC is down * @isr_stats: interrupt statistics * @napi_dev: (fake) netdev for NAPI registration + * @txqs: transport tx queues data. */ struct iwl_trans_pcie { struct iwl_rxq *rxq; @@ -464,6 +516,8 @@ struct iwl_trans_pcie { enum iwl_pcie_imr_status imr_status; wait_queue_head_t imr_waitq; char rf_name[32]; + + struct iwl_pcie_txqs txqs; }; static inline struct iwl_trans_pcie * @@ -538,6 +592,33 @@ void iwl_pcie_disable_ict(struct iwl_trans *trans); /***************************************************** * TX / HCMD ******************************************************/ +/* We need 2 entries for the TX command and header, and another one might + * be needed for potential data in the SKB's head. The remaining ones can + * be used for frags. + */ +#define IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie) ((trans_pcie)->txqs.tfd.max_tbs - 3) + +struct iwl_tso_hdr_page { + struct page *page; + u8 *pos; +}; + +/* + * Note that we put this struct *last* in the page. By doing that, we ensure + * that no TB referencing this page can trigger the 32-bit boundary hardware + * bug. + */ +struct iwl_tso_page_info { + dma_addr_t dma_addr; + struct page *next; + refcount_t use_count; +}; + +#define IWL_TSO_PAGE_DATA_SIZE (PAGE_SIZE - sizeof(struct iwl_tso_page_info)) +#define IWL_TSO_PAGE_INFO(addr) \ + ((struct iwl_tso_page_info *)(((unsigned long)addr & PAGE_MASK) + \ + IWL_TSO_PAGE_DATA_SIZE)) + int iwl_pcie_tx_init(struct iwl_trans *trans); void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr); int iwl_pcie_tx_stop(struct iwl_trans *trans); @@ -552,10 +633,170 @@ void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_tx_cmd *dev_cmd, int txq_id); void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans); -int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); void iwl_pcie_hcmd_complete(struct iwl_trans *trans, struct iwl_rx_cmd_buffer *rxb); void iwl_trans_pcie_tx_reset(struct iwl_trans *trans); +int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, + int slots_num, bool cmd_queue); + +dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, void *addr); +struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb, + struct iwl_cmd_meta *cmd_meta, + u8 **hdr, unsigned int hdr_room); + +void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb, + struct iwl_cmd_meta *cmd_meta); + +static inline dma_addr_t iwl_pcie_get_tso_page_phys(void *addr) +{ + dma_addr_t res; + + res = IWL_TSO_PAGE_INFO(addr)->dma_addr; + res += (unsigned long)addr & ~PAGE_MASK; + + return res; +} + +static inline dma_addr_t +iwl_txq_get_first_tb_dma(struct iwl_txq *txq, int idx) +{ + return txq->first_tb_dma + + sizeof(struct iwl_pcie_first_tb_buf) * idx; +} + +static inline u16 iwl_txq_get_cmd_index(const struct iwl_txq *q, u32 index) +{ + return index & (q->n_window - 1); +} + +static inline void *iwl_txq_get_tfd(struct iwl_trans *trans, + struct iwl_txq *txq, int idx) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + if (trans->trans_cfg->gen2) + idx = iwl_txq_get_cmd_index(txq, idx); + + return (u8 *)txq->tfds + trans_pcie->txqs.tfd.size * idx; +} + +/* + * We need this inline in case dma_addr_t is only 32-bits - since the + * hardware is always 64-bit, the issue can still occur in that case, + * so use u64 for 'phys' here to force the addition in 64-bit. + */ +static inline bool iwl_txq_crosses_4g_boundary(u64 phys, u16 len) +{ + return upper_32_bits(phys) != upper_32_bits(phys + len); +} + +int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q); + +static inline void iwl_txq_stop(struct iwl_trans *trans, struct iwl_txq *txq) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + if (!test_and_set_bit(txq->id, trans_pcie->txqs.queue_stopped)) { + iwl_op_mode_queue_full(trans->op_mode, txq->id); + IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id); + } else { + IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n", + txq->id); + } +} + +/** + * iwl_txq_inc_wrap - increment queue index, wrap back to beginning + * @trans: the transport (for configuration data) + * @index: current index + */ +static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index) +{ + return ++index & + (trans->trans_cfg->base_params->max_tfd_queue_size - 1); +} + +/** + * iwl_txq_dec_wrap - decrement queue index, wrap back to end + * @trans: the transport (for configuration data) + * @index: current index + */ +static inline int iwl_txq_dec_wrap(struct iwl_trans *trans, int index) +{ + return --index & + (trans->trans_cfg->base_params->max_tfd_queue_size - 1); +} + +void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq); + +static inline void +iwl_trans_pcie_wake_queue(struct iwl_trans *trans, struct iwl_txq *txq) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + if (test_and_clear_bit(txq->id, trans_pcie->txqs.queue_stopped)) { + IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id); + iwl_op_mode_queue_not_full(trans->op_mode, txq->id); + } +} + +int iwl_txq_gen2_set_tb(struct iwl_trans *trans, + struct iwl_tfh_tfd *tfd, dma_addr_t addr, + u16 len); + +static inline void iwl_txq_set_tfd_invalid_gen2(struct iwl_trans *trans, + struct iwl_tfh_tfd *tfd) +{ + tfd->num_tbs = 0; + + iwl_txq_gen2_set_tb(trans, tfd, trans->invalid_tx_cmd.dma, + trans->invalid_tx_cmd.size); +} + +void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, + struct iwl_cmd_meta *meta, + struct iwl_tfh_tfd *tfd); + +int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, + u32 sta_mask, u8 tid, + int size, unsigned int timeout); + +int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, + struct iwl_device_tx_cmd *dev_cmd, int txq_id); + +void iwl_txq_dyn_free(struct iwl_trans *trans, int queue); +void iwl_txq_gen2_tx_free(struct iwl_trans *trans); +int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, + int slots_num, bool cmd_queue); +int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, + int queue_size); + +static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans, + void *_tfd, u8 idx) +{ + struct iwl_tfd *tfd; + struct iwl_tfd_tb *tb; + + if (trans->trans_cfg->gen2) { + struct iwl_tfh_tfd *tfh_tfd = _tfd; + struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx]; + + return le16_to_cpu(tfh_tb->tb_len); + } + + tfd = (struct iwl_tfd *)_tfd; + tb = &tfd->tbs[idx]; + + return le16_to_cpu(tb->hi_n_len) >> 4; +} + +void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, + struct sk_buff_head *skbs, bool is_flush); +void iwl_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr); +void iwl_pcie_freeze_txq_timer(struct iwl_trans *trans, + unsigned long txqs, bool freeze); +int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx); +int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm); /***************************************************** * Error handling @@ -822,12 +1063,51 @@ void iwl_trans_pcie_dump_regs(struct iwl_trans *trans); #ifdef CONFIG_IWLWIFI_DEBUGFS void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans); +void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans); #else static inline void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { } #endif void iwl_pcie_rx_allocator_work(struct work_struct *data); +/* common trans ops for all generations transports */ +void iwl_trans_pcie_configure(struct iwl_trans *trans, + const struct iwl_trans_config *trans_cfg); +int iwl_trans_pcie_start_hw(struct iwl_trans *trans); +void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans); +void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val); +void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val); +u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs); +u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg); +void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, u32 val); +int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, + void *buf, int dwords); +int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr, + const void *buf, int dwords); +int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, bool retake_ownership); +struct iwl_trans_dump_data * +iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask, + const struct iwl_dump_sanitize_ops *sanitize_ops, + void *sanitize_ctx); +int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, + enum iwl_d3_status *status, + bool test, bool reset); +int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset); +void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable); +void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans); +void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg, + u32 mask, u32 value); +int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs, + u32 *val); +bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans); +void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans); + +/* transport gen 1 exported functions */ +void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr); +int iwl_trans_pcie_start_fw(struct iwl_trans *trans, + const struct fw_img *fw, bool run_in_rfkill); +void iwl_trans_pcie_stop_device(struct iwl_trans *trans); + /* common functions that are used by gen2 transport */ int iwl_pcie_gen2_apm_init(struct iwl_trans *trans); void iwl_pcie_apm_config(struct iwl_trans *trans); @@ -849,7 +1129,7 @@ void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power); /* transport gen 2 exported functions */ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, const struct fw_img *fw, bool run_in_rfkill); -void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr); +void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans); int iwl_trans_pcie_gen2_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans); @@ -864,5 +1144,7 @@ void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans, u32 dst_addr, u64 src_addr, u32 byte_cnt); int iwl_trans_pcie_copy_imr(struct iwl_trans *trans, u32 dst_addr, u64 src_addr, u32 byte_cnt); +int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue, + struct iwl_trans_rxq_dma_data *data); #endif /* __iwl_trans_int_pcie_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index 984d7bcd381f..afb88eab8174 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2003-2014, 2018-2023 Intel Corporation + * Copyright (C) 2003-2014, 2018-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -1301,7 +1301,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, int i) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; + struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]; bool page_stolen = false; int max_len = trans_pcie->rx_buf_bytes; u32 offset = 0; @@ -1678,6 +1678,7 @@ irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id) */ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; /* W/A for WiFi/WiMAX coex and WiMAX own the RF */ @@ -1694,9 +1695,9 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans) } for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { - if (!trans->txqs.txq[i]) + if (!trans_pcie->txqs.txq[i]) continue; - del_timer(&trans->txqs.txq[i]->stuck_timer); + del_timer(&trans_pcie->txqs.txq[i]->stuck_timer); } /* The STATUS_FW_ERROR bit is set in this function. This must happen diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index a4a4772330cf..18dda89b7985 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2023 Intel Corporation + * Copyright (C) 2018-2024 Intel Corporation */ #include "iwl-trans.h" #include "iwl-prph.h" @@ -247,7 +247,7 @@ static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans) return -ENOMEM; /* Allocate or reset and init all Tx and Command queues */ - if (iwl_txq_gen2_init(trans, trans->txqs.cmd.q_id, queue_size)) + if (iwl_txq_gen2_init(trans, trans_pcie->txqs.cmd.q_id, queue_size)) return -ENOMEM; /* enable shadow regs in HW */ @@ -339,16 +339,17 @@ static void iwl_pcie_get_rf_name(struct iwl_trans *trans) pos += scnprintf(buf + pos, buflen - pos, "\n"); } -void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr) +void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); iwl_pcie_reset_ict(trans); /* make sure all queue are not stopped/used */ - memset(trans->txqs.queue_stopped, 0, - sizeof(trans->txqs.queue_stopped)); - memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); + memset(trans_pcie->txqs.queue_stopped, 0, + sizeof(trans_pcie->txqs.queue_stopped)); + memset(trans_pcie->txqs.queue_used, 0, + sizeof(trans_pcie->txqs.queue_used)); /* now that we got alive we can free the fw image & the context info. * paging memory cannot be freed included since FW will still use it diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index d5a887b3a4bb..719ddc4b72c5 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2007-2015, 2018-2023 Intel Corporation + * Copyright (C) 2007-2015, 2018-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ @@ -127,8 +127,7 @@ out: kfree(buf); } -static int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, - bool retake_ownership) +int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, bool retake_ownership) { /* Reset entire device - do controller reset (results in SHRD_HW_RST) */ if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { @@ -1336,8 +1335,8 @@ void iwl_pcie_synchronize_irqs(struct iwl_trans *trans) } } -static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, - const struct fw_img *fw, bool run_in_rfkill) +int iwl_trans_pcie_start_fw(struct iwl_trans *trans, + const struct fw_img *fw, bool run_in_rfkill) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool hw_rfkill; @@ -1423,7 +1422,7 @@ out: return ret; } -static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr) +void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr) { iwl_pcie_reset_ict(trans); iwl_pcie_tx_start(trans, scd_addr); @@ -1458,7 +1457,7 @@ void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, iwl_trans_pcie_rf_kill(trans, hw_rfkill, false); } -static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) +void iwl_trans_pcie_stop_device(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); bool was_in_rfkill; @@ -1505,9 +1504,17 @@ void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans, iwl_pcie_synchronize_irqs(trans); - iwl_clear_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); - iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) { + iwl_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ); + iwl_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_INIT); + } else { + iwl_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + iwl_clear_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_INIT_DONE); + } if (reset) { /* @@ -1552,8 +1559,7 @@ static int iwl_pcie_d3_handshake(struct iwl_trans *trans, bool suspend) return 0; } -static int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, - bool reset) +int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset) { int ret; @@ -1571,9 +1577,9 @@ static int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, return 0; } -static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, - enum iwl_d3_status *status, - bool test, bool reset) +int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, + enum iwl_d3_status *status, + bool test, bool reset) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 val; @@ -1586,8 +1592,12 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, goto out; } - iwl_set_bit(trans, CSR_GP_CNTRL, - CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + iwl_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ); + else + iwl_set_bit(trans, CSR_GP_CNTRL, + CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); ret = iwl_finish_nic_init(trans); if (ret) @@ -1874,7 +1884,7 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans) return 0; } -static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) +int iwl_trans_pcie_start_hw(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int ret; @@ -1886,7 +1896,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) return ret; } -static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans) +void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -1906,17 +1916,17 @@ static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans) iwl_pcie_synchronize_irqs(trans); } -static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) +void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val) { writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); } -static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val) +void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val) { writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); } -static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs) +u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs) { return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs); } @@ -1929,7 +1939,7 @@ static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans) return 0x000FFFFF; } -static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg) +u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg) { u32 mask = iwl_trans_pcie_prph_msk(trans); @@ -1938,8 +1948,7 @@ static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg) return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT); } -static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, - u32 val) +void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, u32 val) { u32 mask = iwl_trans_pcie_prph_msk(trans); @@ -1948,20 +1957,20 @@ static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val); } -static void iwl_trans_pcie_configure(struct iwl_trans *trans, - const struct iwl_trans_config *trans_cfg) +void iwl_trans_pcie_configure(struct iwl_trans *trans, + const struct iwl_trans_config *trans_cfg) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); /* free all first - we might be reconfigured for a different size */ iwl_pcie_free_rbs_pool(trans); - trans->txqs.cmd.q_id = trans_cfg->cmd_queue; - trans->txqs.cmd.fifo = trans_cfg->cmd_fifo; - trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout; - trans->txqs.page_offs = trans_cfg->cb_data_offs; - trans->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *); - trans->txqs.queue_alloc_cmd_ver = trans_cfg->queue_alloc_cmd_ver; + trans_pcie->txqs.cmd.q_id = trans_cfg->cmd_queue; + trans_pcie->txqs.cmd.fifo = trans_cfg->cmd_fifo; + trans_pcie->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout; + trans_pcie->txqs.page_offs = trans_cfg->cb_data_offs; + trans_pcie->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *); + trans_pcie->txqs.queue_alloc_cmd_ver = trans_cfg->queue_alloc_cmd_ver; if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS)) trans_pcie->n_no_reclaim_cmds = 0; @@ -1980,7 +1989,7 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans, if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) trans_pcie->supported_dma_mask = DMA_BIT_MASK(11); - trans->txqs.bc_table_dword = trans_cfg->bc_table_dword; + trans_pcie->txqs.bc_table_dword = trans_cfg->bc_table_dword; trans_pcie->scd_set_active = trans_cfg->scd_set_active; trans->command_groups = trans_cfg->command_groups; @@ -2079,15 +2088,20 @@ void iwl_trans_pcie_free(struct iwl_trans *trans) trans->dev); mutex_destroy(&trans_pcie->mutex); - iwl_trans_free(trans); -} -static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state) -{ - if (state) - set_bit(STATUS_TPOWER_PMI, &trans->status); - else - clear_bit(STATUS_TPOWER_PMI, &trans->status); + if (trans_pcie->txqs.tso_hdr_page) { + for_each_possible_cpu(i) { + struct iwl_tso_hdr_page *p = + per_cpu_ptr(trans_pcie->txqs.tso_hdr_page, i); + + if (p && p->page) + __free_page(p->page); + } + + free_percpu(trans_pcie->txqs.tso_hdr_page); + } + + iwl_trans_free(trans); } struct iwl_trans_pcie_removal { @@ -2253,7 +2267,7 @@ out: return true; } -static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) +bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) { bool ret; @@ -2267,7 +2281,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans) return false; } -static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans) +void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -2297,8 +2311,8 @@ out: spin_unlock_bh(&trans_pcie->reg_lock); } -static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, - void *buf, int dwords) +int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, + void *buf, int dwords) { #define IWL_MAX_HW_ERRS 5 unsigned int num_consec_hw_errors = 0; @@ -2347,8 +2361,8 @@ static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr, return 0; } -static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr, - const void *buf, int dwords) +int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr, + const void *buf, int dwords) { int offs, ret = 0; const u32 *vals = buf; @@ -2365,8 +2379,8 @@ static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr, return ret; } -static int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs, - u32 *val) +int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs, + u32 *val) { return pci_read_config_dword(IWL_TRANS_GET_PCIE_TRANS(trans)->pci_dev, ofs, val); @@ -2374,8 +2388,8 @@ static int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs, #define IWL_FLUSH_WAIT_MS 2000 -static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue, - struct iwl_trans_rxq_dma_data *data) +int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue, + struct iwl_trans_rxq_dma_data *data) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -2390,8 +2404,9 @@ static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue, return 0; } -static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) +int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq; unsigned long now = jiffies; bool overflow_tx; @@ -2401,11 +2416,11 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) if (test_bit(STATUS_TRANS_DEAD, &trans->status)) return -ENODEV; - if (!test_bit(txq_idx, trans->txqs.queue_used)) + if (!test_bit(txq_idx, trans_pcie->txqs.queue_used)) return -EINVAL; IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx); - txq = trans->txqs.txq[txq_idx]; + txq = trans_pcie->txqs.txq[txq_idx]; spin_lock_bh(&txq->lock); overflow_tx = txq->overflow_tx || @@ -2451,8 +2466,9 @@ static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) return 0; } -static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm) +int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int cnt; int ret = 0; @@ -2461,9 +2477,9 @@ static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm) cnt < trans->trans_cfg->base_params->num_of_queues; cnt++) { - if (cnt == trans->txqs.cmd.q_id) + if (cnt == trans_pcie->txqs.cmd.q_id) continue; - if (!test_bit(cnt, trans->txqs.queue_used)) + if (!test_bit(cnt, trans_pcie->txqs.queue_used)) continue; if (!(BIT(cnt) & txq_bm)) continue; @@ -2476,8 +2492,8 @@ static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm) return ret; } -static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg, - u32 mask, u32 value) +void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg, + u32 mask, u32 value) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -2636,12 +2652,13 @@ static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v) struct iwl_dbgfs_tx_queue_priv *priv = seq->private; struct iwl_dbgfs_tx_queue_state *state = v; struct iwl_trans *trans = priv->trans; - struct iwl_txq *txq = trans->txqs.txq[state->pos]; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = trans_pcie->txqs.txq[state->pos]; seq_printf(seq, "hwq %.3u: used=%d stopped=%d ", (unsigned int)state->pos, - !!test_bit(state->pos, trans->txqs.queue_used), - !!test_bit(state->pos, trans->txqs.queue_stopped)); + !!test_bit(state->pos, trans_pcie->txqs.queue_used), + !!test_bit(state->pos, trans_pcie->txqs.queue_stopped)); if (txq) seq_printf(seq, "read=%u write=%u need_update=%d frozen=%d n_window=%d ampdu=%d", @@ -2651,7 +2668,7 @@ static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v) else seq_puts(seq, "(unallocated)"); - if (state->pos == trans->txqs.cmd.q_id) + if (state->pos == trans_pcie->txqs.cmd.q_id) seq_puts(seq, " (HCMD)"); seq_puts(seq, "\n"); @@ -3055,7 +3072,7 @@ void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) DEBUGFS_ADD_FILE(rf, dir, 0400); } -static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans) +void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct cont_rec *data = &trans_pcie->fw_mon_data; @@ -3068,10 +3085,11 @@ static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans) static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 cmdlen = 0; int i; - for (i = 0; i < trans->txqs.tfd.max_tbs; i++) + for (i = 0; i < trans_pcie->txqs.tfd.max_tbs; i++) cmdlen += iwl_txq_gen1_tfd_tb_get_len(trans, tfd, i); return cmdlen; @@ -3332,15 +3350,14 @@ static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len) return 0; } -static struct iwl_trans_dump_data * -iwl_trans_pcie_dump_data(struct iwl_trans *trans, - u32 dump_mask, +struct iwl_trans_dump_data * +iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask, const struct iwl_dump_sanitize_ops *sanitize_ops, void *sanitize_ctx) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_fw_error_dump_data *data; - struct iwl_txq *cmdq = trans->txqs.txq[trans->txqs.cmd.q_id]; + struct iwl_txq *cmdq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]; struct iwl_fw_error_dump_txcmd *txcmd; struct iwl_trans_dump_data *dump_data; u32 len, num_rbs = 0, monitor_len = 0; @@ -3407,7 +3424,7 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans, data = (void *)dump_data->data; if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) { - u16 tfd_size = trans->txqs.tfd.size; + u16 tfd_size = trans_pcie->txqs.tfd.size; data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD); txcmd = (void *)data->data; @@ -3483,7 +3500,7 @@ iwl_trans_pcie_dump_data(struct iwl_trans *trans, return dump_data; } -static void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable) +void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable) { if (enable) iwl_enable_interrupts(trans); @@ -3491,7 +3508,7 @@ static void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable) iwl_disable_interrupts(trans); } -static void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans) +void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans) { u32 inta_addr, sw_err_bit; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -3510,81 +3527,6 @@ static void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans) iwl_trans_sync_nmi_with_addr(trans, inta_addr, sw_err_bit); } -#define IWL_TRANS_COMMON_OPS \ - .op_mode_leave = iwl_trans_pcie_op_mode_leave, \ - .write8 = iwl_trans_pcie_write8, \ - .write32 = iwl_trans_pcie_write32, \ - .read32 = iwl_trans_pcie_read32, \ - .read_prph = iwl_trans_pcie_read_prph, \ - .write_prph = iwl_trans_pcie_write_prph, \ - .read_mem = iwl_trans_pcie_read_mem, \ - .write_mem = iwl_trans_pcie_write_mem, \ - .read_config32 = iwl_trans_pcie_read_config32, \ - .configure = iwl_trans_pcie_configure, \ - .set_pmi = iwl_trans_pcie_set_pmi, \ - .sw_reset = iwl_trans_pcie_sw_reset, \ - .grab_nic_access = iwl_trans_pcie_grab_nic_access, \ - .release_nic_access = iwl_trans_pcie_release_nic_access, \ - .set_bits_mask = iwl_trans_pcie_set_bits_mask, \ - .dump_data = iwl_trans_pcie_dump_data, \ - .d3_suspend = iwl_trans_pcie_d3_suspend, \ - .d3_resume = iwl_trans_pcie_d3_resume, \ - .interrupts = iwl_trans_pci_interrupts, \ - .sync_nmi = iwl_trans_pcie_sync_nmi, \ - .imr_dma_data = iwl_trans_pcie_copy_imr \ - -static const struct iwl_trans_ops trans_ops_pcie = { - IWL_TRANS_COMMON_OPS, - .start_hw = iwl_trans_pcie_start_hw, - .fw_alive = iwl_trans_pcie_fw_alive, - .start_fw = iwl_trans_pcie_start_fw, - .stop_device = iwl_trans_pcie_stop_device, - - .send_cmd = iwl_pcie_enqueue_hcmd, - - .tx = iwl_trans_pcie_tx, - .reclaim = iwl_txq_reclaim, - - .txq_disable = iwl_trans_pcie_txq_disable, - .txq_enable = iwl_trans_pcie_txq_enable, - - .txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode, - - .wait_tx_queues_empty = iwl_trans_pcie_wait_txqs_empty, - - .freeze_txq_timer = iwl_trans_txq_freeze_timer, -#ifdef CONFIG_IWLWIFI_DEBUGFS - .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup, -#endif -}; - -static const struct iwl_trans_ops trans_ops_pcie_gen2 = { - IWL_TRANS_COMMON_OPS, - .start_hw = iwl_trans_pcie_start_hw, - .fw_alive = iwl_trans_pcie_gen2_fw_alive, - .start_fw = iwl_trans_pcie_gen2_start_fw, - .stop_device = iwl_trans_pcie_gen2_stop_device, - - .send_cmd = iwl_pcie_gen2_enqueue_hcmd, - - .tx = iwl_txq_gen2_tx, - .reclaim = iwl_txq_reclaim, - - .set_q_ptrs = iwl_txq_set_q_ptrs, - - .txq_alloc = iwl_txq_dyn_alloc, - .txq_free = iwl_txq_dyn_free, - .wait_txq_empty = iwl_trans_pcie_wait_txq_empty, - .rxq_dma_data = iwl_trans_pcie_rxq_dma_data, - .load_pnvm = iwl_trans_pcie_ctx_info_gen3_load_pnvm, - .set_pnvm = iwl_trans_pcie_ctx_info_gen3_set_pnvm, - .load_reduce_power = iwl_trans_pcie_ctx_info_gen3_load_reduce_power, - .set_reduce_power = iwl_trans_pcie_ctx_info_gen3_set_reduce_power, -#ifdef CONFIG_IWLWIFI_DEBUGFS - .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup, -#endif -}; - struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, const struct pci_device_id *ent, const struct iwl_cfg_trans_params *cfg_trans) @@ -3592,13 +3534,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, struct iwl_trans_pcie *trans_pcie, **priv; struct iwl_trans *trans; int ret, addr_size; - const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2; void __iomem * const *table; u32 bar0; - if (!cfg_trans->gen2) - ops = &trans_ops_pcie; - /* reassign our BAR 0 if invalid due to possible runtime PM races */ pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &bar0); if (bar0 == PCI_BASE_ADDRESS_MEM_TYPE_64) { @@ -3611,20 +3549,65 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, if (ret) return ERR_PTR(ret); - trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops, + trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, cfg_trans); if (!trans) return ERR_PTR(-ENOMEM); trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + if (trans->trans_cfg->gen2) { + trans_pcie->txqs.tfd.addr_size = 64; + trans_pcie->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS; + trans_pcie->txqs.tfd.size = sizeof(struct iwl_tfh_tfd); + } else { + trans_pcie->txqs.tfd.addr_size = 36; + trans_pcie->txqs.tfd.max_tbs = IWL_NUM_OF_TBS; + trans_pcie->txqs.tfd.size = sizeof(struct iwl_tfd); + } + trans->max_skb_frags = IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie); + + trans_pcie->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page); + if (!trans_pcie->txqs.tso_hdr_page) { + ret = -ENOMEM; + goto out_free_trans; + } + + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) + trans_pcie->txqs.bc_tbl_size = + sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_BZ; + else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) + trans_pcie->txqs.bc_tbl_size = + sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_AX210; + else + trans_pcie->txqs.bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl); + /* + * For gen2 devices, we use a single allocation for each byte-count + * table, but they're pretty small (1k) so use a DMA pool that we + * allocate here. + */ + if (trans->trans_cfg->gen2) { + trans_pcie->txqs.bc_pool = + dmam_pool_create("iwlwifi:bc", trans->dev, + trans_pcie->txqs.bc_tbl_size, + 256, 0); + if (!trans_pcie->txqs.bc_pool) { + ret = -ENOMEM; + goto out_free_tso; + } + } + + /* Some things must not change even if the config does */ + WARN_ON(trans_pcie->txqs.tfd.addr_size != + (trans->trans_cfg->gen2 ? 64 : 36)); + /* Initialize NAPI here - it should be before registering to mac80211 * in the opmode but after the HW struct is allocated. */ trans_pcie->napi_dev = alloc_netdev_dummy(sizeof(struct iwl_trans_pcie *)); if (!trans_pcie->napi_dev) { ret = -ENOMEM; - goto out_free_trans; + goto out_free_tso; } /* The private struct in netdev is a pointer to struct iwl_trans_pcie */ priv = netdev_priv(trans_pcie->napi_dev); @@ -3663,7 +3646,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, pci_set_master(pdev); - addr_size = trans->txqs.tfd.addr_size; + addr_size = trans_pcie->txqs.tfd.addr_size; ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_size)); if (ret) { ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); @@ -3766,6 +3749,8 @@ out_no_pci: destroy_workqueue(trans_pcie->rba.alloc_wq); out_free_ndev: free_netdev(trans_pcie->napi_dev); +out_free_tso: + free_percpu(trans_pcie->txqs.tso_hdr_page); out_free_trans: iwl_trans_free(trans); return ERR_PTR(ret); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index aabbef114bc2..2e780fb2da42 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* * Copyright (C) 2017 Intel Deutschland GmbH - * Copyright (C) 2018-2020, 2023 Intel Corporation + * Copyright (C) 2018-2020, 2023-2024 Intel Corporation */ #include #include @@ -11,7 +11,1180 @@ #include "iwl-io.h" #include "internal.h" #include "fw/api/tx.h" -#include "queue/tx.h" +#include "fw/api/commands.h" +#include "fw/api/datapath.h" +#include "iwl-scd.h" + +static struct page *get_workaround_page(struct iwl_trans *trans, + struct sk_buff *skb) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_tso_page_info *info; + struct page **page_ptr; + struct page *ret; + dma_addr_t phys; + + page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs); + + ret = alloc_page(GFP_ATOMIC); + if (!ret) + return NULL; + + info = IWL_TSO_PAGE_INFO(page_address(ret)); + + /* Create a DMA mapping for the page */ + phys = dma_map_page_attrs(trans->dev, ret, 0, PAGE_SIZE, + DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); + if (unlikely(dma_mapping_error(trans->dev, phys))) { + __free_page(ret); + return NULL; + } + + /* Store physical address and set use count */ + info->dma_addr = phys; + refcount_set(&info->use_count, 1); + + /* set the chaining pointer to the previous page if there */ + info->next = *page_ptr; + *page_ptr = ret; + + return ret; +} + +/* + * Add a TB and if needed apply the FH HW bug workaround; + * meta != NULL indicates that it's a page mapping and we + * need to dma_unmap_page() and set the meta->tbs bit in + * this case. + */ +static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans, + struct sk_buff *skb, + struct iwl_tfh_tfd *tfd, + dma_addr_t phys, void *virt, + u16 len, struct iwl_cmd_meta *meta, + bool unmap) +{ + dma_addr_t oldphys = phys; + struct page *page; + int ret; + + if (unlikely(dma_mapping_error(trans->dev, phys))) + return -ENOMEM; + + if (likely(!iwl_txq_crosses_4g_boundary(phys, len))) { + ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len); + + if (ret < 0) + goto unmap; + + if (meta) + meta->tbs |= BIT(ret); + + ret = 0; + goto trace; + } + + /* + * Work around a hardware bug. If (as expressed in the + * condition above) the TB ends on a 32-bit boundary, + * then the next TB may be accessed with the wrong + * address. + * To work around it, copy the data elsewhere and make + * a new mapping for it so the device will not fail. + */ + + if (WARN_ON(len > IWL_TSO_PAGE_DATA_SIZE)) { + ret = -ENOBUFS; + goto unmap; + } + + page = get_workaround_page(trans, skb); + if (!page) { + ret = -ENOMEM; + goto unmap; + } + + memcpy(page_address(page), virt, len); + + /* + * This is a bit odd, but performance does not matter here, what + * matters are the expectations of the calling code and TB cleanup + * function. + * + * As such, if unmap is set, then create another mapping for the TB + * entry as it will be unmapped later. On the other hand, if it is not + * set, then the TB entry will not be unmapped and instead we simply + * reference and sync the mapping that get_workaround_page() created. + */ + if (unmap) { + phys = dma_map_single(trans->dev, page_address(page), len, + DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(trans->dev, phys))) + return -ENOMEM; + } else { + phys = iwl_pcie_get_tso_page_phys(page_address(page)); + dma_sync_single_for_device(trans->dev, phys, len, + DMA_TO_DEVICE); + } + + ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len); + if (ret < 0) { + /* unmap the new allocation as single */ + oldphys = phys; + meta = NULL; + goto unmap; + } + + IWL_DEBUG_TX(trans, + "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n", + len, (unsigned long long)oldphys, + (unsigned long long)phys); + + ret = 0; +unmap: + if (!unmap) + goto trace; + + if (meta) + dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE); + else + dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE); +trace: + trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len); + + return ret; +} + +static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans, + struct sk_buff *skb, + struct iwl_tfh_tfd *tfd, + struct iwl_cmd_meta *out_meta, + int start_len, + u8 hdr_len, + struct iwl_device_tx_cmd *dev_cmd) +{ +#ifdef CONFIG_INET + struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload; + struct ieee80211_hdr *hdr = (void *)skb->data; + unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; + unsigned int mss = skb_shinfo(skb)->gso_size; + dma_addr_t start_hdr_phys; + u16 length, amsdu_pad; + u8 *start_hdr; + struct sg_table *sgt; + struct tso_t tso; + + trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), + &dev_cmd->hdr, start_len, 0); + + ip_hdrlen = skb_network_header_len(skb); + snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); + total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len; + amsdu_pad = 0; + + /* total amount of header we may need for this A-MSDU */ + hdr_room = DIV_ROUND_UP(total_len, mss) * + (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)); + + /* Our device supports 9 segments at most, it will fit in 1 page */ + sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room); + if (!sgt) + return -ENOMEM; + + start_hdr_phys = iwl_pcie_get_tso_page_phys(start_hdr); + + /* + * Pull the ieee80211 header to be able to use TSO core, + * we will restore it for the tx_status flow. + */ + skb_pull(skb, hdr_len); + + /* + * Remove the length of all the headers that we don't actually + * have in the MPDU by themselves, but that we duplicate into + * all the different MSDUs inside the A-MSDU. + */ + le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen); + + tso_start(skb, &tso); + + while (total_len) { + /* this is the data left for this subframe */ + unsigned int data_left = min_t(unsigned int, mss, total_len); + unsigned int tb_len; + dma_addr_t tb_phys; + u8 *pos_hdr = start_hdr; + + total_len -= data_left; + + memset(pos_hdr, 0, amsdu_pad); + pos_hdr += amsdu_pad; + amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + + data_left)) & 0x3; + ether_addr_copy(pos_hdr, ieee80211_get_DA(hdr)); + pos_hdr += ETH_ALEN; + ether_addr_copy(pos_hdr, ieee80211_get_SA(hdr)); + pos_hdr += ETH_ALEN; + + length = snap_ip_tcp_hdrlen + data_left; + *((__be16 *)pos_hdr) = cpu_to_be16(length); + pos_hdr += sizeof(length); + + /* + * This will copy the SNAP as well which will be considered + * as MAC header. + */ + tso_build_hdr(skb, pos_hdr, &tso, data_left, !total_len); + + pos_hdr += snap_ip_tcp_hdrlen; + + tb_len = pos_hdr - start_hdr; + tb_phys = iwl_pcie_get_tso_page_phys(start_hdr); + + /* + * No need for _with_wa, this is from the TSO page and + * we leave some space at the end of it so can't hit + * the buggy scenario. + */ + iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len); + trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, + tb_phys, tb_len); + /* add this subframe's headers' length to the tx_cmd */ + le16_add_cpu(&tx_cmd->len, tb_len); + + /* prepare the start_hdr for the next subframe */ + start_hdr = pos_hdr; + + /* put the payload */ + while (data_left) { + int ret; + + tb_len = min_t(unsigned int, tso.size, data_left); + tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, tso.data); + /* Not a real mapping error, use direct comparison */ + if (unlikely(tb_phys == DMA_MAPPING_ERROR)) + goto out_err; + + ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, + tb_phys, tso.data, + tb_len, NULL, false); + if (ret) + goto out_err; + + data_left -= tb_len; + tso_build_data(skb, &tso, tb_len); + } + } + + dma_sync_single_for_device(trans->dev, start_hdr_phys, hdr_room, + DMA_TO_DEVICE); + + /* re -add the WiFi header */ + skb_push(skb, hdr_len); + + return 0; + +out_err: +#endif + return -EINVAL; +} + +static struct +iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans, + struct iwl_txq *txq, + struct iwl_device_tx_cmd *dev_cmd, + struct sk_buff *skb, + struct iwl_cmd_meta *out_meta, + int hdr_len, + int tx_cmd_len) +{ + int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); + struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); + dma_addr_t tb_phys; + int len; + void *tb1_addr; + + tb_phys = iwl_txq_get_first_tb_dma(txq, idx); + + /* + * No need for _with_wa, the first TB allocation is aligned up + * to a 64-byte boundary and thus can't be at the end or cross + * a page boundary (much less a 2^32 boundary). + */ + iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); + + /* + * The second TB (tb1) points to the remainder of the TX command + * and the 802.11 header - dword aligned size + * (This calculation modifies the TX command, so do it before the + * setup of the first TB) + */ + len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - + IWL_FIRST_TB_SIZE; + + /* do not align A-MSDU to dword as the subframe header aligns it */ + + /* map the data for TB1 */ + tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; + tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(trans->dev, tb_phys))) + goto out_err; + /* + * No need for _with_wa(), we ensure (via alignment) that the data + * here can never cross or end at a page boundary. + */ + iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len); + + if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, out_meta, + len + IWL_FIRST_TB_SIZE, hdr_len, dev_cmd)) + goto out_err; + + /* building the A-MSDU might have changed this data, memcpy it now */ + memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); + return tfd; + +out_err: + iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); + return NULL; +} + +static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans, + struct sk_buff *skb, + struct iwl_tfh_tfd *tfd, + struct iwl_cmd_meta *out_meta) +{ + int i; + + for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { + const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + dma_addr_t tb_phys; + unsigned int fragsz = skb_frag_size(frag); + int ret; + + if (!fragsz) + continue; + + tb_phys = skb_frag_dma_map(trans->dev, frag, 0, + fragsz, DMA_TO_DEVICE); + ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, + skb_frag_address(frag), + fragsz, out_meta, true); + if (ret) + return ret; + } + + return 0; +} + +static struct +iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans, + struct iwl_txq *txq, + struct iwl_device_tx_cmd *dev_cmd, + struct sk_buff *skb, + struct iwl_cmd_meta *out_meta, + int hdr_len, + int tx_cmd_len, + bool pad) +{ + int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); + struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); + dma_addr_t tb_phys; + int len, tb1_len, tb2_len; + void *tb1_addr; + struct sk_buff *frag; + + tb_phys = iwl_txq_get_first_tb_dma(txq, idx); + + /* The first TB points to bi-directional DMA data */ + memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); + + /* + * No need for _with_wa, the first TB allocation is aligned up + * to a 64-byte boundary and thus can't be at the end or cross + * a page boundary (much less a 2^32 boundary). + */ + iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); + + /* + * The second TB (tb1) points to the remainder of the TX command + * and the 802.11 header - dword aligned size + * (This calculation modifies the TX command, so do it before the + * setup of the first TB) + */ + len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - + IWL_FIRST_TB_SIZE; + + if (pad) + tb1_len = ALIGN(len, 4); + else + tb1_len = len; + + /* map the data for TB1 */ + tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; + tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(trans->dev, tb_phys))) + goto out_err; + /* + * No need for _with_wa(), we ensure (via alignment) that the data + * here can never cross or end at a page boundary. + */ + iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len); + trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr, + IWL_FIRST_TB_SIZE + tb1_len, hdr_len); + + /* set up TFD's third entry to point to remainder of skb's head */ + tb2_len = skb_headlen(skb) - hdr_len; + + if (tb2_len > 0) { + int ret; + + tb_phys = dma_map_single(trans->dev, skb->data + hdr_len, + tb2_len, DMA_TO_DEVICE); + ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, + skb->data + hdr_len, tb2_len, + NULL, true); + if (ret) + goto out_err; + } + + if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta)) + goto out_err; + + skb_walk_frags(skb, frag) { + int ret; + + tb_phys = dma_map_single(trans->dev, frag->data, + skb_headlen(frag), DMA_TO_DEVICE); + ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, + frag->data, + skb_headlen(frag), NULL, + true); + if (ret) + goto out_err; + if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta)) + goto out_err; + } + + return tfd; + +out_err: + iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); + return NULL; +} + +static +struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans, + struct iwl_txq *txq, + struct iwl_device_tx_cmd *dev_cmd, + struct sk_buff *skb, + struct iwl_cmd_meta *out_meta) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); + struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); + int len, hdr_len; + bool amsdu; + + /* There must be data left over for TB1 or this code must be changed */ + BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE); + BUILD_BUG_ON(sizeof(struct iwl_cmd_header) + + offsetofend(struct iwl_tx_cmd_gen2, dram_info) > + IWL_FIRST_TB_SIZE); + BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) < IWL_FIRST_TB_SIZE); + BUILD_BUG_ON(sizeof(struct iwl_cmd_header) + + offsetofend(struct iwl_tx_cmd_gen3, dram_info) > + IWL_FIRST_TB_SIZE); + + memset(tfd, 0, sizeof(*tfd)); + + if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) + len = sizeof(struct iwl_tx_cmd_gen2); + else + len = sizeof(struct iwl_tx_cmd_gen3); + + amsdu = ieee80211_is_data_qos(hdr->frame_control) && + (*ieee80211_get_qos_ctl(hdr) & + IEEE80211_QOS_CTL_A_MSDU_PRESENT); + + hdr_len = ieee80211_hdrlen(hdr->frame_control); + + /* + * Only build A-MSDUs here if doing so by GSO, otherwise it may be + * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been + * built in the higher layers already. + */ + if (amsdu && skb_shinfo(skb)->gso_size) + return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb, + out_meta, hdr_len, len); + return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta, + hdr_len, len, !amsdu); +} + +int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q) +{ + unsigned int max; + unsigned int used; + + /* + * To avoid ambiguity between empty and completely full queues, there + * should always be less than max_tfd_queue_size elements in the queue. + * If q->n_window is smaller than max_tfd_queue_size, there is no need + * to reserve any queue entries for this purpose. + */ + if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size) + max = q->n_window; + else + max = trans->trans_cfg->base_params->max_tfd_queue_size - 1; + + /* + * max_tfd_queue_size is a power of 2, so the following is equivalent to + * modulo by max_tfd_queue_size and is well defined. + */ + used = (q->write_ptr - q->read_ptr) & + (trans->trans_cfg->base_params->max_tfd_queue_size - 1); + + if (WARN_ON(used > max)) + return 0; + + return max - used; +} + +/* + * iwl_pcie_gen2_update_byte_tbl - Set up entry in Tx byte-count array + */ +static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans, + struct iwl_txq *txq, u16 byte_cnt, + int num_tbs) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); + u8 filled_tfd_size, num_fetch_chunks; + u16 len = byte_cnt; + __le16 bc_ent; + + if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window)) + return; + + filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) + + num_tbs * sizeof(struct iwl_tfh_tb); + /* + * filled_tfd_size contains the number of filled bytes in the TFD. + * Dividing it by 64 will give the number of chunks to fetch + * to SRAM- 0 for one chunk, 1 for 2 and so on. + * If, for example, TFD contains only 3 TBs then 32 bytes + * of the TFD are used, and only one chunk of 64 bytes should + * be fetched + */ + num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; + + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr; + + /* Starting from AX210, the HW expects bytes */ + WARN_ON(trans_pcie->txqs.bc_table_dword); + WARN_ON(len > 0x3FFF); + bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14)); + scd_bc_tbl_gen3[idx].tfd_offset = bc_ent; + } else { + struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr; + + /* Before AX210, the HW expects DW */ + WARN_ON(!trans_pcie->txqs.bc_table_dword); + len = DIV_ROUND_UP(len, 4); + WARN_ON(len > 0xFFF); + bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); + scd_bc_tbl->tfd_offset[idx] = bc_ent; + } +} + +static u8 iwl_txq_gen2_get_num_tbs(struct iwl_tfh_tfd *tfd) +{ + return le16_to_cpu(tfd->num_tbs) & 0x1f; +} + +int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd, + dma_addr_t addr, u16 len) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int idx = iwl_txq_gen2_get_num_tbs(tfd); + struct iwl_tfh_tb *tb; + + /* Only WARN here so we know about the issue, but we mess up our + * unmap path because not every place currently checks for errors + * returned from this function - it can only return an error if + * there's no more space, and so when we know there is enough we + * don't always check ... + */ + WARN(iwl_txq_crosses_4g_boundary(addr, len), + "possible DMA problem with iova:0x%llx, len:%d\n", + (unsigned long long)addr, len); + + if (WARN_ON(idx >= IWL_TFH_NUM_TBS)) + return -EINVAL; + tb = &tfd->tbs[idx]; + + /* Each TFD can point to a maximum max_tbs Tx buffers */ + if (le16_to_cpu(tfd->num_tbs) >= trans_pcie->txqs.tfd.max_tbs) { + IWL_ERR(trans, "Error can not send more than %d chunks\n", + trans_pcie->txqs.tfd.max_tbs); + return -EINVAL; + } + + put_unaligned_le64(addr, &tb->addr); + tb->tb_len = cpu_to_le16(len); + + tfd->num_tbs = cpu_to_le16(idx + 1); + + return idx; +} + +void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, + struct iwl_cmd_meta *meta, + struct iwl_tfh_tfd *tfd) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int i, num_tbs; + + /* Sanity check on number of chunks */ + num_tbs = iwl_txq_gen2_get_num_tbs(tfd); + + if (num_tbs > trans_pcie->txqs.tfd.max_tbs) { + IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); + return; + } + + /* TB1 is mapped directly, the rest is the TSO page and SG list. */ + if (meta->sg_offset) + num_tbs = 2; + + /* first TB is never freed - it's the bidirectional DMA data */ + for (i = 1; i < num_tbs; i++) { + if (meta->tbs & BIT(i)) + dma_unmap_page(trans->dev, + le64_to_cpu(tfd->tbs[i].addr), + le16_to_cpu(tfd->tbs[i].tb_len), + DMA_TO_DEVICE); + else + dma_unmap_single(trans->dev, + le64_to_cpu(tfd->tbs[i].addr), + le16_to_cpu(tfd->tbs[i].tb_len), + DMA_TO_DEVICE); + } + + iwl_txq_set_tfd_invalid_gen2(trans, tfd); +} + +static void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) +{ + /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and + * idx is bounded by n_window + */ + int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr); + struct sk_buff *skb; + + lockdep_assert_held(&txq->lock); + + if (!txq->entries) + return; + + iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta, + iwl_txq_get_tfd(trans, txq, idx)); + + skb = txq->entries[idx].skb; + + /* Can be called from irqs-disabled context + * If skb is not NULL, it means that the whole queue is being + * freed and that the queue is not empty - free the skb + */ + if (skb) { + iwl_op_mode_free_skb(trans->op_mode, skb); + txq->entries[idx].skb = NULL; + } +} + +/* + * iwl_txq_inc_wr_ptr - Send new write index to hardware + */ +static void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq) +{ + lockdep_assert_held(&txq->lock); + + IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr); + + /* + * if not in power-save mode, uCode will never sleep when we're + * trying to tx (during RFKILL, we're not trying to tx). + */ + iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16)); +} + +int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, + struct iwl_device_tx_cmd *dev_cmd, int txq_id) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_cmd_meta *out_meta; + struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; + u16 cmd_len; + int idx; + void *tfd; + + if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES, + "queue %d out of range", txq_id)) + return -EINVAL; + + if (WARN_ONCE(!test_bit(txq_id, trans_pcie->txqs.queue_used), + "TX on unused queue %d\n", txq_id)) + return -EINVAL; + + if (skb_is_nonlinear(skb) && + skb_shinfo(skb)->nr_frags > IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie) && + __skb_linearize(skb)) + return -ENOMEM; + + spin_lock(&txq->lock); + + if (iwl_txq_space(trans, txq) < txq->high_mark) { + iwl_txq_stop(trans, txq); + + /* don't put the packet on the ring, if there is no room */ + if (unlikely(iwl_txq_space(trans, txq) < 3)) { + struct iwl_device_tx_cmd **dev_cmd_ptr; + + dev_cmd_ptr = (void *)((u8 *)skb->cb + + trans_pcie->txqs.dev_cmd_offs); + + *dev_cmd_ptr = dev_cmd; + __skb_queue_tail(&txq->overflow_q, skb); + spin_unlock(&txq->lock); + return 0; + } + } + + idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); + + /* Set up driver data for this TFD */ + txq->entries[idx].skb = skb; + txq->entries[idx].cmd = dev_cmd; + + dev_cmd->hdr.sequence = + cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | + INDEX_TO_SEQ(idx))); + + /* Set up first empty entry in queue's array of Tx/cmd buffers */ + out_meta = &txq->entries[idx].meta; + memset(out_meta, 0, sizeof(*out_meta)); + + tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta); + if (!tfd) { + spin_unlock(&txq->lock); + return -1; + } + + if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { + struct iwl_tx_cmd_gen3 *tx_cmd_gen3 = + (void *)dev_cmd->payload; + + cmd_len = le16_to_cpu(tx_cmd_gen3->len); + } else { + struct iwl_tx_cmd_gen2 *tx_cmd_gen2 = + (void *)dev_cmd->payload; + + cmd_len = le16_to_cpu(tx_cmd_gen2->len); + } + + /* Set up entry for this TFD in Tx byte-count array */ + iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len, + iwl_txq_gen2_get_num_tbs(tfd)); + + /* start timer if queue currently empty */ + if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) + mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); + + /* Tell device the write index *just past* this latest filled TFD */ + txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); + iwl_txq_inc_wr_ptr(trans, txq); + /* + * At this point the frame is "transmitted" successfully + * and we will get a TX status notification eventually. + */ + spin_unlock(&txq->lock); + return 0; +} + +/*************** HOST COMMAND QUEUE FUNCTIONS *****/ + +/* + * iwl_txq_gen2_unmap - Unmap any remaining DMA mappings and free skb's + */ +static void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; + + spin_lock_bh(&txq->reclaim_lock); + spin_lock(&txq->lock); + while (txq->write_ptr != txq->read_ptr) { + IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", + txq_id, txq->read_ptr); + + if (txq_id != trans_pcie->txqs.cmd.q_id) { + int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr); + struct iwl_cmd_meta *cmd_meta = &txq->entries[idx].meta; + struct sk_buff *skb = txq->entries[idx].skb; + + if (!WARN_ON_ONCE(!skb)) + iwl_pcie_free_tso_pages(trans, skb, cmd_meta); + } + iwl_txq_gen2_free_tfd(trans, txq); + txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); + } + + while (!skb_queue_empty(&txq->overflow_q)) { + struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); + + iwl_op_mode_free_skb(trans->op_mode, skb); + } + + spin_unlock(&txq->lock); + spin_unlock_bh(&txq->reclaim_lock); + + /* just in case - this queue may have been stopped */ + iwl_trans_pcie_wake_queue(trans, txq); +} + +static void iwl_txq_gen2_free_memory(struct iwl_trans *trans, + struct iwl_txq *txq) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct device *dev = trans->dev; + + /* De-alloc circular buffer of TFDs */ + if (txq->tfds) { + dma_free_coherent(dev, + trans_pcie->txqs.tfd.size * txq->n_window, + txq->tfds, txq->dma_addr); + dma_free_coherent(dev, + sizeof(*txq->first_tb_bufs) * txq->n_window, + txq->first_tb_bufs, txq->first_tb_dma); + } + + kfree(txq->entries); + if (txq->bc_tbl.addr) + dma_pool_free(trans_pcie->txqs.bc_pool, + txq->bc_tbl.addr, txq->bc_tbl.dma); + kfree(txq); +} + +/* + * iwl_pcie_txq_free - Deallocate DMA queue. + * @txq: Transmit queue to deallocate. + * + * Empty queue by removing and destroying all BD's. + * Free all buffers. + * 0-fill, but do not free "txq" descriptor structure. + */ +static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq; + int i; + + if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES, + "queue %d out of range", txq_id)) + return; + + txq = trans_pcie->txqs.txq[txq_id]; + + if (WARN_ON(!txq)) + return; + + iwl_txq_gen2_unmap(trans, txq_id); + + /* De-alloc array of command/tx buffers */ + if (txq_id == trans_pcie->txqs.cmd.q_id) + for (i = 0; i < txq->n_window; i++) { + kfree_sensitive(txq->entries[i].cmd); + kfree_sensitive(txq->entries[i].free_buf); + } + del_timer_sync(&txq->stuck_timer); + + iwl_txq_gen2_free_memory(trans, txq); + + trans_pcie->txqs.txq[txq_id] = NULL; + + clear_bit(txq_id, trans_pcie->txqs.queue_used); +} + +static struct iwl_txq * +iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, int size, unsigned int timeout) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + size_t bc_tbl_size, bc_tbl_entries; + struct iwl_txq *txq; + int ret; + + WARN_ON(!trans_pcie->txqs.bc_tbl_size); + + bc_tbl_size = trans_pcie->txqs.bc_tbl_size; + bc_tbl_entries = bc_tbl_size / sizeof(u16); + + if (WARN_ON(size > bc_tbl_entries)) + return ERR_PTR(-EINVAL); + + txq = kzalloc(sizeof(*txq), GFP_KERNEL); + if (!txq) + return ERR_PTR(-ENOMEM); + + txq->bc_tbl.addr = dma_pool_alloc(trans_pcie->txqs.bc_pool, GFP_KERNEL, + &txq->bc_tbl.dma); + if (!txq->bc_tbl.addr) { + IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); + kfree(txq); + return ERR_PTR(-ENOMEM); + } + + ret = iwl_pcie_txq_alloc(trans, txq, size, false); + if (ret) { + IWL_ERR(trans, "Tx queue alloc failed\n"); + goto error; + } + ret = iwl_txq_init(trans, txq, size, false); + if (ret) { + IWL_ERR(trans, "Tx queue init failed\n"); + goto error; + } + + txq->wd_timeout = msecs_to_jiffies(timeout); + + return txq; + +error: + iwl_txq_gen2_free_memory(trans, txq); + return ERR_PTR(ret); +} + +static int iwl_pcie_txq_alloc_response(struct iwl_trans *trans, + struct iwl_txq *txq, + struct iwl_host_cmd *hcmd) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_tx_queue_cfg_rsp *rsp; + int ret, qid; + u32 wr_ptr; + + if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) != + sizeof(*rsp))) { + ret = -EINVAL; + goto error_free_resp; + } + + rsp = (void *)hcmd->resp_pkt->data; + qid = le16_to_cpu(rsp->queue_number); + wr_ptr = le16_to_cpu(rsp->write_pointer); + + if (qid >= ARRAY_SIZE(trans_pcie->txqs.txq)) { + WARN_ONCE(1, "queue index %d unsupported", qid); + ret = -EIO; + goto error_free_resp; + } + + if (test_and_set_bit(qid, trans_pcie->txqs.queue_used)) { + WARN_ONCE(1, "queue %d already used", qid); + ret = -EIO; + goto error_free_resp; + } + + if (WARN_ONCE(trans_pcie->txqs.txq[qid], + "queue %d already allocated\n", qid)) { + ret = -EIO; + goto error_free_resp; + } + + txq->id = qid; + trans_pcie->txqs.txq[qid] = txq; + wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1); + + /* Place first TFD at index corresponding to start sequence number */ + txq->read_ptr = wr_ptr; + txq->write_ptr = wr_ptr; + + IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid); + + iwl_free_resp(hcmd); + return qid; + +error_free_resp: + iwl_free_resp(hcmd); + iwl_txq_gen2_free_memory(trans, txq); + return ret; +} + +int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask, + u8 tid, int size, unsigned int timeout) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq; + union { + struct iwl_tx_queue_cfg_cmd old; + struct iwl_scd_queue_cfg_cmd new; + } cmd; + struct iwl_host_cmd hcmd = { + .flags = CMD_WANT_SKB, + }; + int ret; + + /* take the min with bytecount table entries allowed */ + size = min_t(u32, size, trans_pcie->txqs.bc_tbl_size / sizeof(u16)); + /* but must be power of 2 values for calculating read/write pointers */ + size = rounddown_pow_of_two(size); + + if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ && + trans->hw_rev_step == SILICON_A_STEP) { + size = 4096; + txq = iwl_txq_dyn_alloc_dma(trans, size, timeout); + } else { + do { + txq = iwl_txq_dyn_alloc_dma(trans, size, timeout); + if (!IS_ERR(txq)) + break; + + IWL_DEBUG_TX_QUEUES(trans, + "Failed allocating TXQ of size %d for sta mask %x tid %d, ret: %ld\n", + size, sta_mask, tid, + PTR_ERR(txq)); + size /= 2; + } while (size >= 16); + } + + if (IS_ERR(txq)) + return PTR_ERR(txq); + + if (trans_pcie->txqs.queue_alloc_cmd_ver == 0) { + memset(&cmd.old, 0, sizeof(cmd.old)); + cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr); + cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma); + cmd.old.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size)); + cmd.old.flags = cpu_to_le16(flags | TX_QUEUE_CFG_ENABLE_QUEUE); + cmd.old.tid = tid; + + if (hweight32(sta_mask) != 1) { + ret = -EINVAL; + goto error; + } + cmd.old.sta_id = ffs(sta_mask) - 1; + + hcmd.id = SCD_QUEUE_CFG; + hcmd.len[0] = sizeof(cmd.old); + hcmd.data[0] = &cmd.old; + } else if (trans_pcie->txqs.queue_alloc_cmd_ver == 3) { + memset(&cmd.new, 0, sizeof(cmd.new)); + cmd.new.operation = cpu_to_le32(IWL_SCD_QUEUE_ADD); + cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr); + cmd.new.u.add.bc_dram_addr = cpu_to_le64(txq->bc_tbl.dma); + cmd.new.u.add.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size)); + cmd.new.u.add.flags = cpu_to_le32(flags); + cmd.new.u.add.sta_mask = cpu_to_le32(sta_mask); + cmd.new.u.add.tid = tid; + + hcmd.id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD); + hcmd.len[0] = sizeof(cmd.new); + hcmd.data[0] = &cmd.new; + } else { + ret = -EOPNOTSUPP; + goto error; + } + + ret = iwl_trans_send_cmd(trans, &hcmd); + if (ret) + goto error; + + return iwl_pcie_txq_alloc_response(trans, txq, &hcmd); + +error: + iwl_txq_gen2_free_memory(trans, txq); + return ret; +} + +void iwl_txq_dyn_free(struct iwl_trans *trans, int queue) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + if (WARN(queue >= IWL_MAX_TVQM_QUEUES, + "queue %d out of range", queue)) + return; + + /* + * Upon HW Rfkill - we stop the device, and then stop the queues + * in the op_mode. Just for the sake of the simplicity of the op_mode, + * allow the op_mode to call txq_disable after it already called + * stop_device. + */ + if (!test_and_clear_bit(queue, trans_pcie->txqs.queue_used)) { + WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), + "queue %d not used", queue); + return; + } + + iwl_txq_gen2_free(trans, queue); + + IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue); +} + +void iwl_txq_gen2_tx_free(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int i; + + memset(trans_pcie->txqs.queue_used, 0, + sizeof(trans_pcie->txqs.queue_used)); + + /* Free all TX queues */ + for (i = 0; i < ARRAY_SIZE(trans_pcie->txqs.txq); i++) { + if (!trans_pcie->txqs.txq[i]) + continue; + + iwl_txq_gen2_free(trans, i); + } +} + +int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *queue; + int ret; + + /* alloc and init the tx queue */ + if (!trans_pcie->txqs.txq[txq_id]) { + queue = kzalloc(sizeof(*queue), GFP_KERNEL); + if (!queue) { + IWL_ERR(trans, "Not enough memory for tx queue\n"); + return -ENOMEM; + } + trans_pcie->txqs.txq[txq_id] = queue; + ret = iwl_pcie_txq_alloc(trans, queue, queue_size, true); + if (ret) { + IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); + goto error; + } + } else { + queue = trans_pcie->txqs.txq[txq_id]; + } + + ret = iwl_txq_init(trans, queue, queue_size, + (txq_id == trans_pcie->txqs.cmd.q_id)); + if (ret) { + IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); + goto error; + } + trans_pcie->txqs.txq[txq_id]->id = txq_id; + set_bit(txq_id, trans_pcie->txqs.queue_used); + + return 0; + +error: + iwl_txq_gen2_tx_free(trans); + return ret; +} /*************** HOST COMMAND QUEUE FUNCTIONS *****/ @@ -28,7 +1201,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; + struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]; struct iwl_device_cmd *out_cmd; struct iwl_cmd_meta *out_meta; void *dup_buf = NULL; @@ -130,7 +1303,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, out_cmd = txq->entries[idx].cmd; out_meta = &txq->entries[idx].meta; - /* re-initialize to NULL */ + /* re-initialize, this also marks the SG list as unused */ memset(out_meta, 0, sizeof(*out_meta)); if (cmd->flags & CMD_WANT_SKB) out_meta->source = cmd; @@ -143,7 +1316,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide)); out_cmd->hdr_wide.reserved = 0; out_cmd->hdr_wide.sequence = - cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | + cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) | INDEX_TO_SEQ(txq->write_ptr)); cmd_pos = sizeof(struct iwl_cmd_header_wide); @@ -191,7 +1364,7 @@ int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", iwl_get_cmd_string(trans, cmd->id), group_id, out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), - cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id); + cmd_size, txq->write_ptr, idx, trans_pcie->txqs.cmd.q_id); /* start the TFD with the minimum copy bytes */ tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index fa8eba47dc4c..22d482ae53d9 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -1,16 +1,22 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* - * Copyright (C) 2003-2014, 2018-2021, 2023 Intel Corporation + * Copyright (C) 2003-2014, 2018-2021, 2023-2024 Intel Corporation * Copyright (C) 2013-2015 Intel Mobile Communications GmbH * Copyright (C) 2016-2017 Intel Deutschland GmbH */ #include #include +#include #include #include +#include #include #include +#include "fw/api/commands.h" +#include "fw/api/datapath.h" +#include "fw/api/debug.h" +#include "iwl-fh.h" #include "iwl-debug.h" #include "iwl-csr.h" #include "iwl-prph.h" @@ -72,6 +78,7 @@ void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr) static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); u32 reg = 0; int txq_id = txq->id; @@ -84,7 +91,7 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, * 3. there is a chance that the NIC is asleep */ if (!trans->trans_cfg->base_params->shadow_reg_enable && - txq_id != trans->txqs.cmd.q_id && + txq_id != trans_pcie->txqs.cmd.q_id && test_bit(STATUS_TPOWER_PMI, &trans->status)) { /* * wake up nic if it's powered down ... @@ -115,12 +122,13 @@ static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { - struct iwl_txq *txq = trans->txqs.txq[i]; + struct iwl_txq *txq = trans_pcie->txqs.txq[i]; - if (!test_bit(i, trans->txqs.queue_used)) + if (!test_bit(i, trans_pcie->txqs.queue_used)) continue; spin_lock_bh(&txq->lock); @@ -132,23 +140,43 @@ void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) } } +static inline void iwl_pcie_gen1_tfd_set_tb(struct iwl_tfd *tfd, + u8 idx, dma_addr_t addr, u16 len) +{ + struct iwl_tfd_tb *tb = &tfd->tbs[idx]; + u16 hi_n_len = len << 4; + + put_unaligned_le32(addr, &tb->lo); + hi_n_len |= iwl_get_dma_hi_addr(addr); + + tb->hi_n_len = cpu_to_le16(hi_n_len); + + tfd->num_tbs = idx + 1; +} + +static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_tfd *tfd) +{ + return tfd->num_tbs & 0x1f; +} + static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, dma_addr_t addr, u16 len, bool reset) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); void *tfd; u32 num_tbs; - tfd = (u8 *)txq->tfds + trans->txqs.tfd.size * txq->write_ptr; + tfd = (u8 *)txq->tfds + trans_pcie->txqs.tfd.size * txq->write_ptr; if (reset) - memset(tfd, 0, trans->txqs.tfd.size); + memset(tfd, 0, trans_pcie->txqs.tfd.size); - num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd); + num_tbs = iwl_txq_gen1_tfd_get_num_tbs(tfd); /* Each TFD can point to a maximum max_tbs Tx buffers */ - if (num_tbs >= trans->txqs.tfd.max_tbs) { + if (num_tbs >= trans_pcie->txqs.tfd.max_tbs) { IWL_ERR(trans, "Error can not send more than %d chunks\n", - trans->txqs.tfd.max_tbs); + trans_pcie->txqs.tfd.max_tbs); return -EINVAL; } @@ -156,7 +184,7 @@ static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, "Unaligned address = %llx\n", (unsigned long long)addr)) return -EINVAL; - iwl_pcie_gen1_tfd_set_tb(trans, tfd, num_tbs, addr, len); + iwl_pcie_gen1_tfd_set_tb(tfd, num_tbs, addr, len); return num_tbs; } @@ -181,36 +209,206 @@ static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) spin_unlock(&trans_pcie->reg_lock); } +static void iwl_pcie_free_and_unmap_tso_page(struct iwl_trans *trans, + struct page *page) +{ + struct iwl_tso_page_info *info = IWL_TSO_PAGE_INFO(page_address(page)); + + /* Decrease internal use count and unmap/free page if needed */ + if (refcount_dec_and_test(&info->use_count)) { + dma_unmap_page(trans->dev, info->dma_addr, PAGE_SIZE, + DMA_TO_DEVICE); + + __free_page(page); + } +} + +void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb, + struct iwl_cmd_meta *cmd_meta) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct page **page_ptr; + struct page *next; + + page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs); + next = *page_ptr; + *page_ptr = NULL; + + while (next) { + struct iwl_tso_page_info *info; + struct page *tmp = next; + + info = IWL_TSO_PAGE_INFO(page_address(next)); + next = info->next; + + /* Unmap the scatter gather list that is on the last page */ + if (!next && cmd_meta->sg_offset) { + struct sg_table *sgt; + + sgt = (void *)((u8 *)page_address(tmp) + + cmd_meta->sg_offset); + + dma_unmap_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0); + } + + iwl_pcie_free_and_unmap_tso_page(trans, tmp); + } +} + +static inline dma_addr_t +iwl_txq_gen1_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) +{ + struct iwl_tfd_tb *tb = &tfd->tbs[idx]; + dma_addr_t addr; + dma_addr_t hi_len; + + addr = get_unaligned_le32(&tb->lo); + + if (sizeof(dma_addr_t) <= sizeof(u32)) + return addr; + + hi_len = le16_to_cpu(tb->hi_n_len) & 0xF; + + /* + * shift by 16 twice to avoid warnings on 32-bit + * (where this code never runs anyway due to the + * if statement above) + */ + return addr | ((hi_len << 16) << 16); +} + +static void iwl_txq_set_tfd_invalid_gen1(struct iwl_trans *trans, + struct iwl_tfd *tfd) +{ + tfd->num_tbs = 0; + + iwl_pcie_gen1_tfd_set_tb(tfd, 0, trans->invalid_tx_cmd.dma, + trans->invalid_tx_cmd.size); +} + +static void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans, + struct iwl_cmd_meta *meta, + struct iwl_txq *txq, int index) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int i, num_tbs; + struct iwl_tfd *tfd = iwl_txq_get_tfd(trans, txq, index); + + /* Sanity check on number of chunks */ + num_tbs = iwl_txq_gen1_tfd_get_num_tbs(tfd); + + if (num_tbs > trans_pcie->txqs.tfd.max_tbs) { + IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); + /* @todo issue fatal error, it is quite serious situation */ + return; + } + + /* TB1 is mapped directly, the rest is the TSO page and SG list. */ + if (meta->sg_offset) + num_tbs = 2; + + /* first TB is never freed - it's the bidirectional DMA data */ + + for (i = 1; i < num_tbs; i++) { + if (meta->tbs & BIT(i)) + dma_unmap_page(trans->dev, + iwl_txq_gen1_tfd_tb_get_addr(tfd, i), + iwl_txq_gen1_tfd_tb_get_len(trans, + tfd, i), + DMA_TO_DEVICE); + else + dma_unmap_single(trans->dev, + iwl_txq_gen1_tfd_tb_get_addr(tfd, i), + iwl_txq_gen1_tfd_tb_get_len(trans, + tfd, i), + DMA_TO_DEVICE); + } + + meta->tbs = 0; + + iwl_txq_set_tfd_invalid_gen1(trans, tfd); +} + +/** + * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] + * @trans: transport private data + * @txq: tx queue + * @read_ptr: the TXQ read_ptr to free + * + * Does NOT advance any TFD circular buffer read/write indexes + * Does NOT free the TFD itself (which is within circular buffer) + */ +static void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq, + int read_ptr) +{ + /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and + * idx is bounded by n_window + */ + int idx = iwl_txq_get_cmd_index(txq, read_ptr); + struct sk_buff *skb; + + lockdep_assert_held(&txq->reclaim_lock); + + if (!txq->entries) + return; + + /* We have only q->n_window txq->entries, but we use + * TFD_QUEUE_SIZE_MAX tfds + */ + if (trans->trans_cfg->gen2) + iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta, + iwl_txq_get_tfd(trans, txq, read_ptr)); + else + iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, + txq, read_ptr); + + /* free SKB */ + skb = txq->entries[idx].skb; + + /* Can be called from irqs-disabled context + * If skb is not NULL, it means that the whole queue is being + * freed and that the queue is not empty - free the skb + */ + if (skb) { + iwl_op_mode_free_skb(trans->op_mode, skb); + txq->entries[idx].skb = NULL; + } +} + /* * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's */ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) { - struct iwl_txq *txq = trans->txqs.txq[txq_id]; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; if (!txq) { IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n"); return; } - spin_lock_bh(&txq->lock); + spin_lock_bh(&txq->reclaim_lock); + spin_lock(&txq->lock); while (txq->write_ptr != txq->read_ptr) { IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", txq_id, txq->read_ptr); - if (txq_id != trans->txqs.cmd.q_id) { + if (txq_id != trans_pcie->txqs.cmd.q_id) { struct sk_buff *skb = txq->entries[txq->read_ptr].skb; + struct iwl_cmd_meta *cmd_meta = + &txq->entries[txq->read_ptr].meta; if (WARN_ON_ONCE(!skb)) continue; - iwl_txq_free_tso_page(trans, skb); + iwl_pcie_free_tso_pages(trans, skb, cmd_meta); } - iwl_txq_free_tfd(trans, txq); + iwl_txq_free_tfd(trans, txq, txq->read_ptr); txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); if (txq->read_ptr == txq->write_ptr && - txq_id == trans->txqs.cmd.q_id) + txq_id == trans_pcie->txqs.cmd.q_id) iwl_pcie_clear_cmd_in_flight(trans); } @@ -220,10 +418,11 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) iwl_op_mode_free_skb(trans->op_mode, skb); } - spin_unlock_bh(&txq->lock); + spin_unlock(&txq->lock); + spin_unlock_bh(&txq->reclaim_lock); /* just in case - this queue may have been stopped */ - iwl_wake_queue(trans, txq); + iwl_trans_pcie_wake_queue(trans, txq); } /* @@ -236,7 +435,8 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) */ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) { - struct iwl_txq *txq = trans->txqs.txq[txq_id]; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; struct device *dev = trans->dev; int i; @@ -246,7 +446,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) iwl_pcie_txq_unmap(trans, txq_id); /* De-alloc array of command/tx buffers */ - if (txq_id == trans->txqs.cmd.q_id) + if (txq_id == trans_pcie->txqs.cmd.q_id) for (i = 0; i < txq->n_window; i++) { kfree_sensitive(txq->entries[i].cmd); kfree_sensitive(txq->entries[i].free_buf); @@ -255,7 +455,7 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) /* De-alloc circular buffer of TFDs */ if (txq->tfds) { dma_free_coherent(dev, - trans->txqs.tfd.size * + trans_pcie->txqs.tfd.size * trans->trans_cfg->base_params->max_tfd_queue_size, txq->tfds, txq->dma_addr); txq->dma_addr = 0; @@ -285,9 +485,10 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32); /* make sure all queue are not stopped/used */ - memset(trans->txqs.queue_stopped, 0, - sizeof(trans->txqs.queue_stopped)); - memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); + memset(trans_pcie->txqs.queue_stopped, 0, + sizeof(trans_pcie->txqs.queue_stopped)); + memset(trans_pcie->txqs.queue_used, 0, + sizeof(trans_pcie->txqs.queue_used)); trans_pcie->scd_base_addr = iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); @@ -301,7 +502,7 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) NULL, clear_dwords); iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, - trans->txqs.scd_bc_tbls.dma >> 10); + trans_pcie->txqs.scd_bc_tbls.dma >> 10); /* The chain extension of the SCD doesn't work well. This feature is * enabled by default by the HW, so we need to disable it manually. @@ -309,9 +510,9 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) if (trans->trans_cfg->base_params->scd_chain_ext_wa) iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); - iwl_trans_ac_txq_enable(trans, trans->txqs.cmd.q_id, - trans->txqs.cmd.fifo, - trans->txqs.cmd.wdg_timeout); + iwl_trans_ac_txq_enable(trans, trans_pcie->txqs.cmd.q_id, + trans_pcie->txqs.cmd.fifo, + trans_pcie->txqs.cmd.wdg_timeout); /* Activate all Tx DMA/FIFO channels */ iwl_scd_activate_fifos(trans); @@ -347,7 +548,7 @@ void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; txq_id++) { - struct iwl_txq *txq = trans->txqs.txq[txq_id]; + struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; if (trans->trans_cfg->gen2) iwl_write_direct64(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), @@ -422,9 +623,10 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans) * queues. This happens when we have an rfkill interrupt. * Since we stop Tx altogether - mark the queues as stopped. */ - memset(trans->txqs.queue_stopped, 0, - sizeof(trans->txqs.queue_stopped)); - memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); + memset(trans_pcie->txqs.queue_stopped, 0, + sizeof(trans_pcie->txqs.queue_stopped)); + memset(trans_pcie->txqs.queue_used, 0, + sizeof(trans_pcie->txqs.queue_used)); /* This can happen: start_hw, stop_device */ if (!trans_pcie->txq_memory) @@ -448,7 +650,8 @@ void iwl_pcie_tx_free(struct iwl_trans *trans) int txq_id; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); + memset(trans_pcie->txqs.queue_used, 0, + sizeof(trans_pcie->txqs.queue_used)); /* Tx queues */ if (trans_pcie->txq_memory) { @@ -456,7 +659,7 @@ void iwl_pcie_tx_free(struct iwl_trans *trans) txq_id < trans->trans_cfg->base_params->num_of_queues; txq_id++) { iwl_pcie_txq_free(trans, txq_id); - trans->txqs.txq[txq_id] = NULL; + trans_pcie->txqs.txq[txq_id] = NULL; } } @@ -465,7 +668,135 @@ void iwl_pcie_tx_free(struct iwl_trans *trans) iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw); - iwl_pcie_free_dma_ptr(trans, &trans->txqs.scd_bc_tbls); + iwl_pcie_free_dma_ptr(trans, &trans_pcie->txqs.scd_bc_tbls); +} + +void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) +{ + u32 txq_id = txq->id; + u32 status; + bool active; + u8 fifo; + + if (trans->trans_cfg->gen2) { + IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id, + txq->read_ptr, txq->write_ptr); + /* TODO: access new SCD registers and dump them */ + return; + } + + status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id)); + fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; + active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); + + IWL_ERR(trans, + "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n", + txq_id, active ? "" : "in", fifo, + jiffies_to_msecs(txq->wd_timeout), + txq->read_ptr, txq->write_ptr, + iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & + (trans->trans_cfg->base_params->max_tfd_queue_size - 1), + iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) & + (trans->trans_cfg->base_params->max_tfd_queue_size - 1), + iwl_read_direct32(trans, FH_TX_TRB_REG(fifo))); +} + +static void iwl_txq_stuck_timer(struct timer_list *t) +{ + struct iwl_txq *txq = from_timer(txq, t, stuck_timer); + struct iwl_trans *trans = txq->trans; + + spin_lock(&txq->lock); + /* check if triggered erroneously */ + if (txq->read_ptr == txq->write_ptr) { + spin_unlock(&txq->lock); + return; + } + spin_unlock(&txq->lock); + + iwl_txq_log_scd_error(trans, txq); + + iwl_force_nmi(trans); +} + +int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, + int slots_num, bool cmd_queue) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + size_t num_entries = trans->trans_cfg->gen2 ? + slots_num : trans->trans_cfg->base_params->max_tfd_queue_size; + size_t tfd_sz; + size_t tb0_buf_sz; + int i; + + if (WARN_ONCE(slots_num <= 0, "Invalid slots num:%d\n", slots_num)) + return -EINVAL; + + if (WARN_ON(txq->entries || txq->tfds)) + return -EINVAL; + + tfd_sz = trans_pcie->txqs.tfd.size * num_entries; + + timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0); + txq->trans = trans; + + txq->n_window = slots_num; + + txq->entries = kcalloc(slots_num, + sizeof(struct iwl_pcie_txq_entry), + GFP_KERNEL); + + if (!txq->entries) + goto error; + + if (cmd_queue) + for (i = 0; i < slots_num; i++) { + txq->entries[i].cmd = + kmalloc(sizeof(struct iwl_device_cmd), + GFP_KERNEL); + if (!txq->entries[i].cmd) + goto error; + } + + /* Circular buffer of transmit frame descriptors (TFDs), + * shared with device + */ + txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, + &txq->dma_addr, GFP_KERNEL); + if (!txq->tfds) + goto error; + + BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN); + + tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num; + + txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz, + &txq->first_tb_dma, + GFP_KERNEL); + if (!txq->first_tb_bufs) + goto err_free_tfds; + + for (i = 0; i < num_entries; i++) { + void *tfd = iwl_txq_get_tfd(trans, txq, i); + + if (trans->trans_cfg->gen2) + iwl_txq_set_tfd_invalid_gen2(trans, tfd); + else + iwl_txq_set_tfd_invalid_gen1(trans, tfd); + } + + return 0; +err_free_tfds: + dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr); + txq->tfds = NULL; +error: + if (txq->entries && cmd_queue) + for (i = 0; i < slots_num; i++) + kfree(txq->entries[i].cmd); + kfree(txq->entries); + txq->entries = NULL; + + return -ENOMEM; } /* @@ -491,7 +822,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) goto error; } - ret = iwl_pcie_alloc_dma_ptr(trans, &trans->txqs.scd_bc_tbls, + ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->txqs.scd_bc_tbls, bc_tbls_size); if (ret) { IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); @@ -517,7 +848,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) /* Alloc and init all Tx queues, including the command queue (#4/#9) */ for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; txq_id++) { - bool cmd_queue = (txq_id == trans->txqs.cmd.q_id); + bool cmd_queue = (txq_id == trans_pcie->txqs.cmd.q_id); if (cmd_queue) slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, @@ -525,14 +856,14 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) else slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, trans->cfg->min_ba_txq_size); - trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id]; - ret = iwl_txq_alloc(trans, trans->txqs.txq[txq_id], slots_num, - cmd_queue); + trans_pcie->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id]; + ret = iwl_pcie_txq_alloc(trans, trans_pcie->txqs.txq[txq_id], + slots_num, cmd_queue); if (ret) { IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); goto error; } - trans->txqs.txq[txq_id]->id = txq_id; + trans_pcie->txqs.txq[txq_id]->id = txq_id; } return 0; @@ -543,6 +874,69 @@ error: return ret; } +/* + * iwl_queue_init - Initialize queue's high/low-water and read/write indexes + */ +static int iwl_queue_init(struct iwl_txq *q, int slots_num) +{ + q->n_window = slots_num; + + /* slots_num must be power-of-two size, otherwise + * iwl_txq_get_cmd_index is broken. + */ + if (WARN_ON(!is_power_of_2(slots_num))) + return -EINVAL; + + q->low_mark = q->n_window / 4; + if (q->low_mark < 4) + q->low_mark = 4; + + q->high_mark = q->n_window / 8; + if (q->high_mark < 2) + q->high_mark = 2; + + q->write_ptr = 0; + q->read_ptr = 0; + + return 0; +} + +int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, + int slots_num, bool cmd_queue) +{ + u32 tfd_queue_max_size = + trans->trans_cfg->base_params->max_tfd_queue_size; + int ret; + + txq->need_update = false; + + /* max_tfd_queue_size must be power-of-two size, otherwise + * iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken. + */ + if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1), + "Max tfd queue size must be a power of two, but is %d", + tfd_queue_max_size)) + return -EINVAL; + + /* Initialize queue's high/low-water marks, and head/tail indexes */ + ret = iwl_queue_init(txq, slots_num); + if (ret) + return ret; + + spin_lock_init(&txq->lock); + spin_lock_init(&txq->reclaim_lock); + + if (cmd_queue) { + static struct lock_class_key iwl_txq_cmd_queue_lock_class; + + lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class); + } + + __skb_queue_head_init(&txq->overflow_q); + + return 0; +} + int iwl_pcie_tx_init(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -571,7 +965,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans) /* Alloc and init all Tx queues, including the command queue (#4/#9) */ for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; txq_id++) { - bool cmd_queue = (txq_id == trans->txqs.cmd.q_id); + bool cmd_queue = (txq_id == trans_pcie->txqs.cmd.q_id); if (cmd_queue) slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, @@ -579,7 +973,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans) else slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, trans->cfg->min_ba_txq_size); - ret = iwl_txq_init(trans, trans->txqs.txq[txq_id], slots_num, + ret = iwl_txq_init(trans, trans_pcie->txqs.txq[txq_id], slots_num, cmd_queue); if (ret) { IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); @@ -593,7 +987,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans) * Circular buffer (TFD queue in DRAM) physical base address */ iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), - trans->txqs.txq[txq_id]->dma_addr >> 8); + trans_pcie->txqs.txq[txq_id]->dma_addr >> 8); } iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); @@ -641,6 +1035,42 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, return 0; } +static void iwl_txq_progress(struct iwl_txq *txq) +{ + lockdep_assert_held(&txq->lock); + + if (!txq->wd_timeout) + return; + + /* + * station is asleep and we send data - that must + * be uAPSD or PS-Poll. Don't rearm the timer. + */ + if (txq->frozen) + return; + + /* + * if empty delete timer, otherwise move timer forward + * since we're making progress on this queue + */ + if (txq->read_ptr == txq->write_ptr) + del_timer(&txq->stuck_timer); + else + mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); +} + +static inline bool iwl_txq_used(const struct iwl_txq *q, int i, + int read_ptr, int write_ptr) +{ + int index = iwl_txq_get_cmd_index(q, i); + int r = iwl_txq_get_cmd_index(q, read_ptr); + int w = iwl_txq_get_cmd_index(q, write_ptr); + + return w >= r ? + (index >= r && index < w) : + !(index < r && index >= w); +} + /* * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd * @@ -650,7 +1080,8 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, */ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) { - struct iwl_txq *txq = trans->txqs.txq[txq_id]; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; int nfreed = 0; u16 r; @@ -660,8 +1091,8 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) r = iwl_txq_get_cmd_index(txq, txq->read_ptr); if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size || - (!iwl_txq_used(txq, idx))) { - WARN_ONCE(test_bit(txq_id, trans->txqs.queue_used), + (!iwl_txq_used(txq, idx, txq->read_ptr, txq->write_ptr))) { + WARN_ONCE(test_bit(txq_id, trans_pcie->txqs.queue_used), "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", __func__, txq_id, idx, trans->trans_cfg->base_params->max_tfd_queue_size, @@ -720,11 +1151,11 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, unsigned int wdg_timeout) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = trans->txqs.txq[txq_id]; + struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; int fifo = -1; bool scd_bug = false; - if (test_and_set_bit(txq_id, trans->txqs.queue_used)) + if (test_and_set_bit(txq_id, trans_pcie->txqs.queue_used)) WARN_ONCE(1, "queue %d already used - expect issues", txq_id); txq->wd_timeout = msecs_to_jiffies(wdg_timeout); @@ -733,7 +1164,7 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, fifo = cfg->fifo; /* Disable the scheduler prior configuring the cmd queue */ - if (txq_id == trans->txqs.cmd.q_id && + if (txq_id == trans_pcie->txqs.cmd.q_id && trans_pcie->scd_set_active) iwl_scd_enable_set_active(trans, 0); @@ -741,7 +1172,7 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, iwl_scd_txq_set_inactive(trans, txq_id); /* Set this queue as a chain-building queue unless it is CMD */ - if (txq_id != trans->txqs.cmd.q_id) + if (txq_id != trans_pcie->txqs.cmd.q_id) iwl_scd_txq_set_chain(trans, txq_id); if (cfg->aggregate) { @@ -811,7 +1242,7 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, SCD_QUEUE_STTS_REG_MSK); /* enable the scheduler for this queue (only) */ - if (txq_id == trans->txqs.cmd.q_id && + if (txq_id == trans_pcie->txqs.cmd.q_id && trans_pcie->scd_set_active) iwl_scd_enable_set_active(trans, BIT(txq_id)); @@ -830,7 +1261,8 @@ bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, bool shared_mode) { - struct iwl_txq *txq = trans->txqs.txq[txq_id]; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; txq->ampdu = !shared_mode; } @@ -843,8 +1275,8 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, SCD_TX_STTS_QUEUE_OFFSET(txq_id); static const u32 zero_val[4] = {}; - trans->txqs.txq[txq_id]->frozen_expiry_remainder = 0; - trans->txqs.txq[txq_id]->frozen = false; + trans_pcie->txqs.txq[txq_id]->frozen_expiry_remainder = 0; + trans_pcie->txqs.txq[txq_id]->frozen = false; /* * Upon HW Rfkill - we stop the device, and then stop the queues @@ -852,7 +1284,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, * allow the op_mode to call txq_disable after it already called * stop_device. */ - if (!test_and_clear_bit(txq_id, trans->txqs.queue_used)) { + if (!test_and_clear_bit(txq_id, trans_pcie->txqs.queue_used)) { WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), "queue %d not used", txq_id); return; @@ -866,7 +1298,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, } iwl_pcie_txq_unmap(trans, txq_id); - trans->txqs.txq[txq_id]->ampdu = false; + trans_pcie->txqs.txq[txq_id]->ampdu = false; IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); } @@ -875,12 +1307,13 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { - struct iwl_txq *txq = trans->txqs.txq[i]; + struct iwl_txq *txq = trans_pcie->txqs.txq[i]; - if (i == trans->txqs.cmd.q_id) + if (i == trans_pcie->txqs.cmd.q_id) continue; /* we skip the command queue (obviously) so it's OK to nest */ @@ -912,7 +1345,8 @@ static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block) int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) { - struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]; struct iwl_device_cmd *out_cmd; struct iwl_cmd_meta *out_meta; void *dup_buf = NULL; @@ -1024,7 +1458,8 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, out_cmd = txq->entries[idx].cmd; out_meta = &txq->entries[idx].meta; - memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ + /* re-initialize, this also marks the SG list as unused */ + memset(out_meta, 0, sizeof(*out_meta)); if (cmd->flags & CMD_WANT_SKB) out_meta->source = cmd; @@ -1038,7 +1473,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, sizeof(struct iwl_cmd_header_wide)); out_cmd->hdr_wide.reserved = 0; out_cmd->hdr_wide.sequence = - cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | + cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) | INDEX_TO_SEQ(txq->write_ptr)); cmd_pos = sizeof(struct iwl_cmd_header_wide); @@ -1046,7 +1481,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, } else { out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id); out_cmd->hdr.sequence = - cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | + cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) | INDEX_TO_SEQ(txq->write_ptr)); out_cmd->hdr.group_id = 0; @@ -1097,7 +1532,7 @@ int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, iwl_get_cmd_string(trans, cmd->id), group_id, out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence), - cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id); + cmd_size, txq->write_ptr, idx, trans_pcie->txqs.cmd.q_id); /* start the TFD with the minimum copy bytes */ tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); @@ -1196,14 +1631,14 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, struct iwl_device_cmd *cmd; struct iwl_cmd_meta *meta; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; + struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]; /* If a Tx command is being handled and it isn't in the actual * command queue then there a command routing bug has been introduced * in the queue management code. */ - if (WARN(txq_id != trans->txqs.cmd.q_id, + if (WARN(txq_id != trans_pcie->txqs.cmd.q_id, "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", - txq_id, trans->txqs.cmd.q_id, sequence, txq->read_ptr, + txq_id, trans_pcie->txqs.cmd.q_id, sequence, txq->read_ptr, txq->write_ptr)) { iwl_print_hex_error(trans, pkt, 32); return; @@ -1306,19 +1741,169 @@ static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, } #ifdef CONFIG_INET +static void *iwl_pcie_get_page_hdr(struct iwl_trans *trans, + size_t len, struct sk_buff *skb) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->txqs.tso_hdr_page); + struct iwl_tso_page_info *info; + struct page **page_ptr; + dma_addr_t phys; + void *ret; + + page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs); + + if (WARN_ON(*page_ptr)) + return NULL; + + if (!p->page) + goto alloc; + + /* + * Check if there's enough room on this page + * + * Note that we put a page chaining pointer *last* in the + * page - we need it somewhere, and if it's there then we + * avoid DMA mapping the last bits of the page which may + * trigger the 32-bit boundary hardware bug. + * + * (see also get_workaround_page() in tx-gen2.c) + */ + if (((unsigned long)p->pos & ~PAGE_MASK) + len < IWL_TSO_PAGE_DATA_SIZE) { + info = IWL_TSO_PAGE_INFO(page_address(p->page)); + goto out; + } + + /* We don't have enough room on this page, get a new one. */ + iwl_pcie_free_and_unmap_tso_page(trans, p->page); + +alloc: + p->page = alloc_page(GFP_ATOMIC); + if (!p->page) + return NULL; + p->pos = page_address(p->page); + + info = IWL_TSO_PAGE_INFO(page_address(p->page)); + + /* set the chaining pointer to NULL */ + info->next = NULL; + + /* Create a DMA mapping for the page */ + phys = dma_map_page_attrs(trans->dev, p->page, 0, PAGE_SIZE, + DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); + if (unlikely(dma_mapping_error(trans->dev, phys))) { + __free_page(p->page); + p->page = NULL; + + return NULL; + } + + /* Store physical address and set use count */ + info->dma_addr = phys; + refcount_set(&info->use_count, 1); +out: + *page_ptr = p->page; + /* Return an internal reference for the caller */ + refcount_inc(&info->use_count); + ret = p->pos; + p->pos += len; + + return ret; +} + +/** + * iwl_pcie_get_sgt_tb_phys - Find TB address in mapped SG list + * @sgt: scatter gather table + * @addr: Virtual address + * + * Find the entry that includes the address for the given address and return + * correct physical address for the TB entry. + * + * Returns: Address for TB entry + */ +dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, void *addr) +{ + struct scatterlist *sg; + int i; + + for_each_sgtable_dma_sg(sgt, sg, i) { + if (addr >= sg_virt(sg) && + (u8 *)addr < (u8 *)sg_virt(sg) + sg_dma_len(sg)) + return sg_dma_address(sg) + + ((unsigned long)addr - (unsigned long)sg_virt(sg)); + } + + WARN_ON_ONCE(1); + + return DMA_MAPPING_ERROR; +} + +/** + * iwl_pcie_prep_tso - Prepare TSO page and SKB for sending + * @trans: transport private data + * @skb: the SKB to map + * @cmd_meta: command meta to store the scatter list information for unmapping + * @hdr: output argument for TSO headers + * @hdr_room: requested length for TSO headers + * + * Allocate space for a scatter gather list and TSO headers and map the SKB + * using the scatter gather list. The SKB is unmapped again when the page is + * free'ed again at the end of the operation. + * + * Returns: newly allocated and mapped scatter gather table with list + */ +struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb, + struct iwl_cmd_meta *cmd_meta, + u8 **hdr, unsigned int hdr_room) +{ + struct sg_table *sgt; + + if (WARN_ON_ONCE(skb_has_frag_list(skb))) + return NULL; + + *hdr = iwl_pcie_get_page_hdr(trans, + hdr_room + __alignof__(struct sg_table) + + sizeof(struct sg_table) + + (skb_shinfo(skb)->nr_frags + 1) * + sizeof(struct scatterlist), + skb); + if (!*hdr) + return NULL; + + sgt = (void *)PTR_ALIGN(*hdr + hdr_room, __alignof__(struct sg_table)); + sgt->sgl = (void *)(sgt + 1); + + sg_init_table(sgt->sgl, skb_shinfo(skb)->nr_frags + 1); + + sgt->orig_nents = skb_to_sgvec(skb, sgt->sgl, 0, skb->len); + if (WARN_ON_ONCE(sgt->orig_nents <= 0)) + return NULL; + + /* And map the entire SKB */ + if (dma_map_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0) < 0) + return NULL; + + /* Store non-zero (i.e. valid) offset for unmapping */ + cmd_meta->sg_offset = (unsigned long) sgt & ~PAGE_MASK; + + return sgt; +} + static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_txq *txq, u8 hdr_len, struct iwl_cmd_meta *out_meta, struct iwl_device_tx_cmd *dev_cmd, u16 tb1_len) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; struct ieee80211_hdr *hdr = (void *)skb->data; unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; unsigned int mss = skb_shinfo(skb)->gso_size; u16 length, iv_len, amsdu_pad; - u8 *start_hdr; - struct iwl_tso_hdr_page *hdr_page; + dma_addr_t start_hdr_phys; + u8 *start_hdr, *pos_hdr; + struct sg_table *sgt; struct tso_t tso; /* if the packet is protected, then it must be CCMP or GCMP */ @@ -1328,7 +1913,7 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, trace_iwlwifi_dev_tx(trans->dev, skb, iwl_txq_get_tfd(trans, txq, txq->write_ptr), - trans->txqs.tfd.size, + trans_pcie->txqs.tfd.size, &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0); ip_hdrlen = skb_network_header_len(skb); @@ -1341,13 +1926,14 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len; /* Our device supports 9 segments at most, it will fit in 1 page */ - hdr_page = get_page_hdr(trans, hdr_room, skb); - if (!hdr_page) + sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room); + if (!sgt) return -ENOMEM; - start_hdr = hdr_page->pos; - memcpy(hdr_page->pos, skb->data + hdr_len, iv_len); - hdr_page->pos += iv_len; + start_hdr_phys = iwl_pcie_get_tso_page_phys(start_hdr); + pos_hdr = start_hdr; + memcpy(pos_hdr, skb->data + hdr_len, iv_len); + pos_hdr += iv_len; /* * Pull the ieee80211 header + IV to be able to use TSO core, @@ -1370,45 +1956,43 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, min_t(unsigned int, mss, total_len); unsigned int hdr_tb_len; dma_addr_t hdr_tb_phys; - u8 *subf_hdrs_start = hdr_page->pos; + u8 *subf_hdrs_start = pos_hdr; total_len -= data_left; - memset(hdr_page->pos, 0, amsdu_pad); - hdr_page->pos += amsdu_pad; + memset(pos_hdr, 0, amsdu_pad); + pos_hdr += amsdu_pad; amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + data_left)) & 0x3; - ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); - hdr_page->pos += ETH_ALEN; - ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); - hdr_page->pos += ETH_ALEN; + ether_addr_copy(pos_hdr, ieee80211_get_DA(hdr)); + pos_hdr += ETH_ALEN; + ether_addr_copy(pos_hdr, ieee80211_get_SA(hdr)); + pos_hdr += ETH_ALEN; length = snap_ip_tcp_hdrlen + data_left; - *((__be16 *)hdr_page->pos) = cpu_to_be16(length); - hdr_page->pos += sizeof(length); + *((__be16 *)pos_hdr) = cpu_to_be16(length); + pos_hdr += sizeof(length); /* * This will copy the SNAP as well which will be considered * as MAC header. */ - tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); + tso_build_hdr(skb, pos_hdr, &tso, data_left, !total_len); - hdr_page->pos += snap_ip_tcp_hdrlen; + pos_hdr += snap_ip_tcp_hdrlen; + + hdr_tb_len = pos_hdr - start_hdr; + hdr_tb_phys = iwl_pcie_get_tso_page_phys(start_hdr); - hdr_tb_len = hdr_page->pos - start_hdr; - hdr_tb_phys = dma_map_single(trans->dev, start_hdr, - hdr_tb_len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) - return -EINVAL; iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys, hdr_tb_len, false); trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, hdr_tb_phys, hdr_tb_len); /* add this subframe's headers' length to the tx_cmd */ - le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); + le16_add_cpu(&tx_cmd->len, pos_hdr - subf_hdrs_start); /* prepare the start_hdr for the next subframe */ - start_hdr = hdr_page->pos; + start_hdr = pos_hdr; /* put the payload */ while (data_left) { @@ -1416,9 +2000,9 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, data_left); dma_addr_t tb_phys; - tb_phys = dma_map_single(trans->dev, tso.data, - size, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) + tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, tso.data); + /* Not a real mapping error, use direct comparison */ + if (unlikely(tb_phys == DMA_MAPPING_ERROR)) return -EINVAL; iwl_pcie_txq_build_tfd(trans, txq, tb_phys, @@ -1431,6 +2015,9 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, } } + dma_sync_single_for_device(trans->dev, start_hdr_phys, hdr_room, + DMA_TO_DEVICE); + /* re -add the WiFi header and IV */ skb_push(skb, hdr_len + iv_len); @@ -1450,9 +2037,61 @@ static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, } #endif /* CONFIG_INET */ +#define IWL_TX_CRC_SIZE 4 +#define IWL_TX_DELIMITER_SIZE 4 + +/* + * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array + */ +static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans, + struct iwl_txq *txq, u16 byte_cnt, + int num_tbs) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwlagn_scd_bc_tbl *scd_bc_tbl; + int write_ptr = txq->write_ptr; + int txq_id = txq->id; + u8 sec_ctl = 0; + u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; + __le16 bc_ent; + struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd; + struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; + u8 sta_id = tx_cmd->sta_id; + + scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr; + + sec_ctl = tx_cmd->sec_ctl; + + switch (sec_ctl & TX_CMD_SEC_MSK) { + case TX_CMD_SEC_CCM: + len += IEEE80211_CCMP_MIC_LEN; + break; + case TX_CMD_SEC_TKIP: + len += IEEE80211_TKIP_ICV_LEN; + break; + case TX_CMD_SEC_WEP: + len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN; + break; + } + if (trans_pcie->txqs.bc_table_dword) + len = DIV_ROUND_UP(len, 4); + + if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) + return; + + bc_ent = cpu_to_le16(len | (sta_id << 12)); + + scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; + + if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) + scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = + bc_ent; +} + int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_tx_cmd *dev_cmd, int txq_id) { + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct ieee80211_hdr *hdr; struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; struct iwl_cmd_meta *out_meta; @@ -1467,14 +2106,14 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, u16 wifi_seq; bool amsdu; - txq = trans->txqs.txq[txq_id]; + txq = trans_pcie->txqs.txq[txq_id]; - if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used), + if (WARN_ONCE(!test_bit(txq_id, trans_pcie->txqs.queue_used), "TX on unused queue %d\n", txq_id)) return -EINVAL; if (skb_is_nonlinear(skb) && - skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) && + skb_shinfo(skb)->nr_frags > IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie) && __skb_linearize(skb)) return -ENOMEM; @@ -1495,7 +2134,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_tx_cmd **dev_cmd_ptr; dev_cmd_ptr = (void *)((u8 *)skb->cb + - trans->txqs.dev_cmd_offs); + trans_pcie->txqs.dev_cmd_offs); *dev_cmd_ptr = dev_cmd; __skb_queue_tail(&txq->overflow_q, skb); @@ -1533,7 +2172,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, /* Set up first empty entry in queue's array of Tx/cmd buffers */ out_meta = &txq->entries[txq->write_ptr].meta; - out_meta->flags = 0; + memset(out_meta, 0, sizeof(*out_meta)); /* * The second TB (tb1) points to the remainder of the TX command @@ -1578,7 +2217,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, trace_iwlwifi_dev_tx(trans->dev, skb, iwl_txq_get_tfd(trans, txq, txq->write_ptr), - trans->txqs.tfd.size, + trans_pcie->txqs.tfd.size, &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, hdr_len); @@ -1613,8 +2252,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr); /* Set up entry for this TFD in Tx byte-count array */ iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), - iwl_txq_gen1_tfd_get_num_tbs(trans, - tfd)); + iwl_txq_gen1_tfd_get_num_tbs(tfd)); wait_write_ptr = ieee80211_has_morefrags(fc); @@ -1649,3 +2287,379 @@ out_err: spin_unlock(&txq->lock); return -1; } + +static void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans, + struct iwl_txq *txq, + int read_ptr) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr; + int txq_id = txq->id; + u8 sta_id = 0; + __le16 bc_ent; + struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd; + struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; + + WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); + + if (txq_id != trans_pcie->txqs.cmd.q_id) + sta_id = tx_cmd->sta_id; + + bc_ent = cpu_to_le16(1 | (sta_id << 12)); + + scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; + + if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) + scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = + bc_ent; +} + +/* Frees buffers until index _not_ inclusive */ +void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, + struct sk_buff_head *skbs, bool is_flush) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; + int tfd_num, read_ptr, last_to_free; + int txq_read_ptr, txq_write_ptr; + + /* This function is not meant to release cmd queue*/ + if (WARN_ON(txq_id == trans_pcie->txqs.cmd.q_id)) + return; + + if (WARN_ON(!txq)) + return; + + tfd_num = iwl_txq_get_cmd_index(txq, ssn); + + spin_lock_bh(&txq->reclaim_lock); + + spin_lock(&txq->lock); + txq_read_ptr = txq->read_ptr; + txq_write_ptr = txq->write_ptr; + spin_unlock(&txq->lock); + + read_ptr = iwl_txq_get_cmd_index(txq, txq_read_ptr); + + if (!test_bit(txq_id, trans_pcie->txqs.queue_used)) { + IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", + txq_id, ssn); + goto out; + } + + if (read_ptr == tfd_num) + goto out; + + IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d (%d) -> %d (%d)\n", + txq_id, read_ptr, txq_read_ptr, tfd_num, ssn); + + /* Since we free until index _not_ inclusive, the one before index is + * the last we will free. This one must be used + */ + last_to_free = iwl_txq_dec_wrap(trans, tfd_num); + + if (!iwl_txq_used(txq, last_to_free, txq_read_ptr, txq_write_ptr)) { + IWL_ERR(trans, + "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", + __func__, txq_id, last_to_free, + trans->trans_cfg->base_params->max_tfd_queue_size, + txq_write_ptr, txq_read_ptr); + + iwl_op_mode_time_point(trans->op_mode, + IWL_FW_INI_TIME_POINT_FAKE_TX, + NULL); + goto out; + } + + if (WARN_ON(!skb_queue_empty(skbs))) + goto out; + + for (; + read_ptr != tfd_num; + txq_read_ptr = iwl_txq_inc_wrap(trans, txq_read_ptr), + read_ptr = iwl_txq_get_cmd_index(txq, txq_read_ptr)) { + struct iwl_cmd_meta *cmd_meta = &txq->entries[read_ptr].meta; + struct sk_buff *skb = txq->entries[read_ptr].skb; + + if (WARN_ONCE(!skb, "no SKB at %d (%d) on queue %d\n", + read_ptr, txq_read_ptr, txq_id)) + continue; + + iwl_pcie_free_tso_pages(trans, skb, cmd_meta); + + __skb_queue_tail(skbs, skb); + + txq->entries[read_ptr].skb = NULL; + + if (!trans->trans_cfg->gen2) + iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq, + txq_read_ptr); + + iwl_txq_free_tfd(trans, txq, txq_read_ptr); + } + + spin_lock(&txq->lock); + txq->read_ptr = txq_read_ptr; + + iwl_txq_progress(txq); + + if (iwl_txq_space(trans, txq) > txq->low_mark && + test_bit(txq_id, trans_pcie->txqs.queue_stopped)) { + struct sk_buff_head overflow_skbs; + struct sk_buff *skb; + + __skb_queue_head_init(&overflow_skbs); + skb_queue_splice_init(&txq->overflow_q, + is_flush ? skbs : &overflow_skbs); + + /* + * We are going to transmit from the overflow queue. + * Remember this state so that wait_for_txq_empty will know we + * are adding more packets to the TFD queue. It cannot rely on + * the state of &txq->overflow_q, as we just emptied it, but + * haven't TXed the content yet. + */ + txq->overflow_tx = true; + + /* + * This is tricky: we are in reclaim path and are holding + * reclaim_lock, so noone will try to access the txq data + * from that path. We stopped tx, so we can't have tx as well. + * Bottom line, we can unlock and re-lock later. + */ + spin_unlock(&txq->lock); + + while ((skb = __skb_dequeue(&overflow_skbs))) { + struct iwl_device_tx_cmd *dev_cmd_ptr; + + dev_cmd_ptr = *(void **)((u8 *)skb->cb + + trans_pcie->txqs.dev_cmd_offs); + + /* + * Note that we can very well be overflowing again. + * In that case, iwl_txq_space will be small again + * and we won't wake mac80211's queue. + */ + iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id); + } + + if (iwl_txq_space(trans, txq) > txq->low_mark) + iwl_trans_pcie_wake_queue(trans, txq); + + spin_lock(&txq->lock); + txq->overflow_tx = false; + } + + spin_unlock(&txq->lock); +out: + spin_unlock_bh(&txq->reclaim_lock); +} + +/* Set wr_ptr of specific device and txq */ +void iwl_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; + + spin_lock_bh(&txq->lock); + + txq->write_ptr = ptr; + txq->read_ptr = txq->write_ptr; + + spin_unlock_bh(&txq->lock); +} + +void iwl_pcie_freeze_txq_timer(struct iwl_trans *trans, + unsigned long txqs, bool freeze) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + int queue; + + for_each_set_bit(queue, &txqs, BITS_PER_LONG) { + struct iwl_txq *txq = trans_pcie->txqs.txq[queue]; + unsigned long now; + + spin_lock_bh(&txq->lock); + + now = jiffies; + + if (txq->frozen == freeze) + goto next_queue; + + IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n", + freeze ? "Freezing" : "Waking", queue); + + txq->frozen = freeze; + + if (txq->read_ptr == txq->write_ptr) + goto next_queue; + + if (freeze) { + if (unlikely(time_after(now, + txq->stuck_timer.expires))) { + /* + * The timer should have fired, maybe it is + * spinning right now on the lock. + */ + goto next_queue; + } + /* remember how long until the timer fires */ + txq->frozen_expiry_remainder = + txq->stuck_timer.expires - now; + del_timer(&txq->stuck_timer); + goto next_queue; + } + + /* + * Wake a non-empty queue -> arm timer with the + * remainder before it froze + */ + mod_timer(&txq->stuck_timer, + now + txq->frozen_expiry_remainder); + +next_queue: + spin_unlock_bh(&txq->lock); + } +} + +#define HOST_COMPLETE_TIMEOUT (2 * HZ) + +static int iwl_trans_pcie_send_hcmd_sync(struct iwl_trans *trans, + struct iwl_host_cmd *cmd) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + const char *cmd_str = iwl_get_cmd_string(trans, cmd->id); + struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]; + int cmd_idx; + int ret; + + IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str); + + if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE, + &trans->status), + "Command %s: a command is already active!\n", cmd_str)) + return -EIO; + + IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str); + + if (trans->trans_cfg->gen2) + cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd); + else + cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd); + + if (cmd_idx < 0) { + ret = cmd_idx; + clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); + IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", + cmd_str, ret); + return ret; + } + + ret = wait_event_timeout(trans->wait_command_queue, + !test_bit(STATUS_SYNC_HCMD_ACTIVE, + &trans->status), + HOST_COMPLETE_TIMEOUT); + if (!ret) { + IWL_ERR(trans, "Error sending %s: time out after %dms.\n", + cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); + + IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", + txq->read_ptr, txq->write_ptr); + + clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); + IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", + cmd_str); + ret = -ETIMEDOUT; + + iwl_trans_sync_nmi(trans); + goto cancel; + } + + if (test_bit(STATUS_FW_ERROR, &trans->status)) { + if (!test_and_clear_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE, + &trans->status)) { + IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str); + dump_stack(); + } + ret = -EIO; + goto cancel; + } + + if (!(cmd->flags & CMD_SEND_IN_RFKILL) && + test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { + IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); + ret = -ERFKILL; + goto cancel; + } + + if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { + IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str); + ret = -EIO; + goto cancel; + } + + return 0; + +cancel: + if (cmd->flags & CMD_WANT_SKB) { + /* + * Cancel the CMD_WANT_SKB flag for the cmd in the + * TX cmd queue. Otherwise in case the cmd comes + * in later, it will possibly set an invalid + * address (cmd->meta.source). + */ + txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; + } + + if (cmd->resp_pkt) { + iwl_free_resp(cmd); + cmd->resp_pkt = NULL; + } + + return ret; +} + +int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, + struct iwl_host_cmd *cmd) +{ + /* Make sure the NIC is still alive in the bus */ + if (test_bit(STATUS_TRANS_DEAD, &trans->status)) + return -ENODEV; + + if (!(cmd->flags & CMD_SEND_IN_RFKILL) && + test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { + IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", + cmd->id); + return -ERFKILL; + } + + if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 && + !(cmd->flags & CMD_SEND_IN_D3))) { + IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id); + return -EHOSTDOWN; + } + + if (cmd->flags & CMD_ASYNC) { + int ret; + + /* An asynchronous command can not expect an SKB to be set. */ + if (WARN_ON(cmd->flags & CMD_WANT_SKB)) + return -EINVAL; + + if (trans->trans_cfg->gen2) + ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd); + else + ret = iwl_pcie_enqueue_hcmd(trans, cmd); + + if (ret < 0) { + IWL_ERR(trans, + "Error sending %s: enqueue_hcmd failed: %d\n", + iwl_get_cmd_string(trans, cmd->id), ret); + return ret; + } + return 0; + } + + return iwl_trans_pcie_send_hcmd_sync(trans, cmd); +} +IWL_EXPORT_SYMBOL(iwl_trans_pcie_send_hcmd); diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.c b/drivers/net/wireless/intel/iwlwifi/queue/tx.c deleted file mode 100644 index 6229c785c845..000000000000 --- a/drivers/net/wireless/intel/iwlwifi/queue/tx.c +++ /dev/null @@ -1,1900 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause -/* - * Copyright (C) 2020-2024 Intel Corporation - */ -#include -#include - -#include "iwl-debug.h" -#include "iwl-io.h" -#include "fw/api/commands.h" -#include "fw/api/tx.h" -#include "fw/api/datapath.h" -#include "fw/api/debug.h" -#include "queue/tx.h" -#include "iwl-fh.h" -#include "iwl-scd.h" -#include - -/* - * iwl_txq_update_byte_tbl - Set up entry in Tx byte-count array - */ -static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans, - struct iwl_txq *txq, u16 byte_cnt, - int num_tbs) -{ - int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); - u8 filled_tfd_size, num_fetch_chunks; - u16 len = byte_cnt; - __le16 bc_ent; - - if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window)) - return; - - filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) + - num_tbs * sizeof(struct iwl_tfh_tb); - /* - * filled_tfd_size contains the number of filled bytes in the TFD. - * Dividing it by 64 will give the number of chunks to fetch - * to SRAM- 0 for one chunk, 1 for 2 and so on. - * If, for example, TFD contains only 3 TBs then 32 bytes - * of the TFD are used, and only one chunk of 64 bytes should - * be fetched - */ - num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; - - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { - struct iwl_gen3_bc_tbl_entry *scd_bc_tbl_gen3 = txq->bc_tbl.addr; - - /* Starting from AX210, the HW expects bytes */ - WARN_ON(trans->txqs.bc_table_dword); - WARN_ON(len > 0x3FFF); - bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14)); - scd_bc_tbl_gen3[idx].tfd_offset = bc_ent; - } else { - struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr; - - /* Before AX210, the HW expects DW */ - WARN_ON(!trans->txqs.bc_table_dword); - len = DIV_ROUND_UP(len, 4); - WARN_ON(len > 0xFFF); - bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); - scd_bc_tbl->tfd_offset[idx] = bc_ent; - } -} - -/* - * iwl_txq_inc_wr_ptr - Send new write index to hardware - */ -void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq) -{ - lockdep_assert_held(&txq->lock); - - IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr); - - /* - * if not in power-save mode, uCode will never sleep when we're - * trying to tx (during RFKILL, we're not trying to tx). - */ - iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16)); -} - -static u8 iwl_txq_gen2_get_num_tbs(struct iwl_trans *trans, - struct iwl_tfh_tfd *tfd) -{ - return le16_to_cpu(tfd->num_tbs) & 0x1f; -} - -int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd, - dma_addr_t addr, u16 len) -{ - int idx = iwl_txq_gen2_get_num_tbs(trans, tfd); - struct iwl_tfh_tb *tb; - - /* Only WARN here so we know about the issue, but we mess up our - * unmap path because not every place currently checks for errors - * returned from this function - it can only return an error if - * there's no more space, and so when we know there is enough we - * don't always check ... - */ - WARN(iwl_txq_crosses_4g_boundary(addr, len), - "possible DMA problem with iova:0x%llx, len:%d\n", - (unsigned long long)addr, len); - - if (WARN_ON(idx >= IWL_TFH_NUM_TBS)) - return -EINVAL; - tb = &tfd->tbs[idx]; - - /* Each TFD can point to a maximum max_tbs Tx buffers */ - if (le16_to_cpu(tfd->num_tbs) >= trans->txqs.tfd.max_tbs) { - IWL_ERR(trans, "Error can not send more than %d chunks\n", - trans->txqs.tfd.max_tbs); - return -EINVAL; - } - - put_unaligned_le64(addr, &tb->addr); - tb->tb_len = cpu_to_le16(len); - - tfd->num_tbs = cpu_to_le16(idx + 1); - - return idx; -} - -static void iwl_txq_set_tfd_invalid_gen2(struct iwl_trans *trans, - struct iwl_tfh_tfd *tfd) -{ - tfd->num_tbs = 0; - - iwl_txq_gen2_set_tb(trans, tfd, trans->invalid_tx_cmd.dma, - trans->invalid_tx_cmd.size); -} - -void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, struct iwl_cmd_meta *meta, - struct iwl_tfh_tfd *tfd) -{ - int i, num_tbs; - - /* Sanity check on number of chunks */ - num_tbs = iwl_txq_gen2_get_num_tbs(trans, tfd); - - if (num_tbs > trans->txqs.tfd.max_tbs) { - IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); - return; - } - - /* first TB is never freed - it's the bidirectional DMA data */ - for (i = 1; i < num_tbs; i++) { - if (meta->tbs & BIT(i)) - dma_unmap_page(trans->dev, - le64_to_cpu(tfd->tbs[i].addr), - le16_to_cpu(tfd->tbs[i].tb_len), - DMA_TO_DEVICE); - else - dma_unmap_single(trans->dev, - le64_to_cpu(tfd->tbs[i].addr), - le16_to_cpu(tfd->tbs[i].tb_len), - DMA_TO_DEVICE); - } - - iwl_txq_set_tfd_invalid_gen2(trans, tfd); -} - -void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) -{ - /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and - * idx is bounded by n_window - */ - int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr); - struct sk_buff *skb; - - lockdep_assert_held(&txq->lock); - - if (!txq->entries) - return; - - iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta, - iwl_txq_get_tfd(trans, txq, idx)); - - skb = txq->entries[idx].skb; - - /* Can be called from irqs-disabled context - * If skb is not NULL, it means that the whole queue is being - * freed and that the queue is not empty - free the skb - */ - if (skb) { - iwl_op_mode_free_skb(trans->op_mode, skb); - txq->entries[idx].skb = NULL; - } -} - -static struct page *get_workaround_page(struct iwl_trans *trans, - struct sk_buff *skb) -{ - struct page **page_ptr; - struct page *ret; - - page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); - - ret = alloc_page(GFP_ATOMIC); - if (!ret) - return NULL; - - /* set the chaining pointer to the previous page if there */ - *(void **)((u8 *)page_address(ret) + PAGE_SIZE - sizeof(void *)) = *page_ptr; - *page_ptr = ret; - - return ret; -} - -/* - * Add a TB and if needed apply the FH HW bug workaround; - * meta != NULL indicates that it's a page mapping and we - * need to dma_unmap_page() and set the meta->tbs bit in - * this case. - */ -static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans, - struct sk_buff *skb, - struct iwl_tfh_tfd *tfd, - dma_addr_t phys, void *virt, - u16 len, struct iwl_cmd_meta *meta) -{ - dma_addr_t oldphys = phys; - struct page *page; - int ret; - - if (unlikely(dma_mapping_error(trans->dev, phys))) - return -ENOMEM; - - if (likely(!iwl_txq_crosses_4g_boundary(phys, len))) { - ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len); - - if (ret < 0) - goto unmap; - - if (meta) - meta->tbs |= BIT(ret); - - ret = 0; - goto trace; - } - - /* - * Work around a hardware bug. If (as expressed in the - * condition above) the TB ends on a 32-bit boundary, - * then the next TB may be accessed with the wrong - * address. - * To work around it, copy the data elsewhere and make - * a new mapping for it so the device will not fail. - */ - - if (WARN_ON(len > PAGE_SIZE - sizeof(void *))) { - ret = -ENOBUFS; - goto unmap; - } - - page = get_workaround_page(trans, skb); - if (!page) { - ret = -ENOMEM; - goto unmap; - } - - memcpy(page_address(page), virt, len); - - phys = dma_map_single(trans->dev, page_address(page), len, - DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, phys))) - return -ENOMEM; - ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len); - if (ret < 0) { - /* unmap the new allocation as single */ - oldphys = phys; - meta = NULL; - goto unmap; - } - IWL_DEBUG_TX(trans, - "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n", - len, (unsigned long long)oldphys, - (unsigned long long)phys); - - ret = 0; -unmap: - if (meta) - dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE); - else - dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE); -trace: - trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len); - - return ret; -} - -#ifdef CONFIG_INET -struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len, - struct sk_buff *skb) -{ - struct iwl_tso_hdr_page *p = this_cpu_ptr(trans->txqs.tso_hdr_page); - struct page **page_ptr; - - page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); - - if (WARN_ON(*page_ptr)) - return NULL; - - if (!p->page) - goto alloc; - - /* - * Check if there's enough room on this page - * - * Note that we put a page chaining pointer *last* in the - * page - we need it somewhere, and if it's there then we - * avoid DMA mapping the last bits of the page which may - * trigger the 32-bit boundary hardware bug. - * - * (see also get_workaround_page() in tx-gen2.c) - */ - if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE - - sizeof(void *)) - goto out; - - /* We don't have enough room on this page, get a new one. */ - __free_page(p->page); - -alloc: - p->page = alloc_page(GFP_ATOMIC); - if (!p->page) - return NULL; - p->pos = page_address(p->page); - /* set the chaining pointer to NULL */ - *(void **)((u8 *)page_address(p->page) + PAGE_SIZE - sizeof(void *)) = NULL; -out: - *page_ptr = p->page; - get_page(p->page); - return p; -} -#endif - -static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans, - struct sk_buff *skb, - struct iwl_tfh_tfd *tfd, int start_len, - u8 hdr_len, - struct iwl_device_tx_cmd *dev_cmd) -{ -#ifdef CONFIG_INET - struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload; - struct ieee80211_hdr *hdr = (void *)skb->data; - unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; - unsigned int mss = skb_shinfo(skb)->gso_size; - u16 length, amsdu_pad; - u8 *start_hdr; - struct iwl_tso_hdr_page *hdr_page; - struct tso_t tso; - - trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), - &dev_cmd->hdr, start_len, 0); - - ip_hdrlen = skb_network_header_len(skb); - snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); - total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len; - amsdu_pad = 0; - - /* total amount of header we may need for this A-MSDU */ - hdr_room = DIV_ROUND_UP(total_len, mss) * - (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)); - - /* Our device supports 9 segments at most, it will fit in 1 page */ - hdr_page = get_page_hdr(trans, hdr_room, skb); - if (!hdr_page) - return -ENOMEM; - - start_hdr = hdr_page->pos; - - /* - * Pull the ieee80211 header to be able to use TSO core, - * we will restore it for the tx_status flow. - */ - skb_pull(skb, hdr_len); - - /* - * Remove the length of all the headers that we don't actually - * have in the MPDU by themselves, but that we duplicate into - * all the different MSDUs inside the A-MSDU. - */ - le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen); - - tso_start(skb, &tso); - - while (total_len) { - /* this is the data left for this subframe */ - unsigned int data_left = min_t(unsigned int, mss, total_len); - unsigned int tb_len; - dma_addr_t tb_phys; - u8 *subf_hdrs_start = hdr_page->pos; - - total_len -= data_left; - - memset(hdr_page->pos, 0, amsdu_pad); - hdr_page->pos += amsdu_pad; - amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + - data_left)) & 0x3; - ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); - hdr_page->pos += ETH_ALEN; - ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); - hdr_page->pos += ETH_ALEN; - - length = snap_ip_tcp_hdrlen + data_left; - *((__be16 *)hdr_page->pos) = cpu_to_be16(length); - hdr_page->pos += sizeof(length); - - /* - * This will copy the SNAP as well which will be considered - * as MAC header. - */ - tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); - - hdr_page->pos += snap_ip_tcp_hdrlen; - - tb_len = hdr_page->pos - start_hdr; - tb_phys = dma_map_single(trans->dev, start_hdr, - tb_len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) - goto out_err; - /* - * No need for _with_wa, this is from the TSO page and - * we leave some space at the end of it so can't hit - * the buggy scenario. - */ - iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len); - trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, - tb_phys, tb_len); - /* add this subframe's headers' length to the tx_cmd */ - le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); - - /* prepare the start_hdr for the next subframe */ - start_hdr = hdr_page->pos; - - /* put the payload */ - while (data_left) { - int ret; - - tb_len = min_t(unsigned int, tso.size, data_left); - tb_phys = dma_map_single(trans->dev, tso.data, - tb_len, DMA_TO_DEVICE); - ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, - tb_phys, tso.data, - tb_len, NULL); - if (ret) - goto out_err; - - data_left -= tb_len; - tso_build_data(skb, &tso, tb_len); - } - } - - /* re -add the WiFi header */ - skb_push(skb, hdr_len); - - return 0; - -out_err: -#endif - return -EINVAL; -} - -static struct -iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans, - struct iwl_txq *txq, - struct iwl_device_tx_cmd *dev_cmd, - struct sk_buff *skb, - struct iwl_cmd_meta *out_meta, - int hdr_len, - int tx_cmd_len) -{ - int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); - struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); - dma_addr_t tb_phys; - int len; - void *tb1_addr; - - tb_phys = iwl_txq_get_first_tb_dma(txq, idx); - - /* - * No need for _with_wa, the first TB allocation is aligned up - * to a 64-byte boundary and thus can't be at the end or cross - * a page boundary (much less a 2^32 boundary). - */ - iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); - - /* - * The second TB (tb1) points to the remainder of the TX command - * and the 802.11 header - dword aligned size - * (This calculation modifies the TX command, so do it before the - * setup of the first TB) - */ - len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - - IWL_FIRST_TB_SIZE; - - /* do not align A-MSDU to dword as the subframe header aligns it */ - - /* map the data for TB1 */ - tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; - tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) - goto out_err; - /* - * No need for _with_wa(), we ensure (via alignment) that the data - * here can never cross or end at a page boundary. - */ - iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len); - - if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, len + IWL_FIRST_TB_SIZE, - hdr_len, dev_cmd)) - goto out_err; - - /* building the A-MSDU might have changed this data, memcpy it now */ - memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); - return tfd; - -out_err: - iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); - return NULL; -} - -static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans, - struct sk_buff *skb, - struct iwl_tfh_tfd *tfd, - struct iwl_cmd_meta *out_meta) -{ - int i; - - for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { - const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; - dma_addr_t tb_phys; - unsigned int fragsz = skb_frag_size(frag); - int ret; - - if (!fragsz) - continue; - - tb_phys = skb_frag_dma_map(trans->dev, frag, 0, - fragsz, DMA_TO_DEVICE); - ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, - skb_frag_address(frag), - fragsz, out_meta); - if (ret) - return ret; - } - - return 0; -} - -static struct -iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans, - struct iwl_txq *txq, - struct iwl_device_tx_cmd *dev_cmd, - struct sk_buff *skb, - struct iwl_cmd_meta *out_meta, - int hdr_len, - int tx_cmd_len, - bool pad) -{ - int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); - struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); - dma_addr_t tb_phys; - int len, tb1_len, tb2_len; - void *tb1_addr; - struct sk_buff *frag; - - tb_phys = iwl_txq_get_first_tb_dma(txq, idx); - - /* The first TB points to bi-directional DMA data */ - memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE); - - /* - * No need for _with_wa, the first TB allocation is aligned up - * to a 64-byte boundary and thus can't be at the end or cross - * a page boundary (much less a 2^32 boundary). - */ - iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); - - /* - * The second TB (tb1) points to the remainder of the TX command - * and the 802.11 header - dword aligned size - * (This calculation modifies the TX command, so do it before the - * setup of the first TB) - */ - len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - - IWL_FIRST_TB_SIZE; - - if (pad) - tb1_len = ALIGN(len, 4); - else - tb1_len = len; - - /* map the data for TB1 */ - tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; - tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(trans->dev, tb_phys))) - goto out_err; - /* - * No need for _with_wa(), we ensure (via alignment) that the data - * here can never cross or end at a page boundary. - */ - iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len); - trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr, - IWL_FIRST_TB_SIZE + tb1_len, hdr_len); - - /* set up TFD's third entry to point to remainder of skb's head */ - tb2_len = skb_headlen(skb) - hdr_len; - - if (tb2_len > 0) { - int ret; - - tb_phys = dma_map_single(trans->dev, skb->data + hdr_len, - tb2_len, DMA_TO_DEVICE); - ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, - skb->data + hdr_len, tb2_len, - NULL); - if (ret) - goto out_err; - } - - if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta)) - goto out_err; - - skb_walk_frags(skb, frag) { - int ret; - - tb_phys = dma_map_single(trans->dev, frag->data, - skb_headlen(frag), DMA_TO_DEVICE); - ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys, - frag->data, - skb_headlen(frag), NULL); - if (ret) - goto out_err; - if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta)) - goto out_err; - } - - return tfd; - -out_err: - iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd); - return NULL; -} - -static -struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans, - struct iwl_txq *txq, - struct iwl_device_tx_cmd *dev_cmd, - struct sk_buff *skb, - struct iwl_cmd_meta *out_meta) -{ - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); - struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx); - int len, hdr_len; - bool amsdu; - - /* There must be data left over for TB1 or this code must be changed */ - BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE); - BUILD_BUG_ON(sizeof(struct iwl_cmd_header) + - offsetofend(struct iwl_tx_cmd_gen2, dram_info) > - IWL_FIRST_TB_SIZE); - BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) < IWL_FIRST_TB_SIZE); - BUILD_BUG_ON(sizeof(struct iwl_cmd_header) + - offsetofend(struct iwl_tx_cmd_gen3, dram_info) > - IWL_FIRST_TB_SIZE); - - memset(tfd, 0, sizeof(*tfd)); - - if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210) - len = sizeof(struct iwl_tx_cmd_gen2); - else - len = sizeof(struct iwl_tx_cmd_gen3); - - amsdu = ieee80211_is_data_qos(hdr->frame_control) && - (*ieee80211_get_qos_ctl(hdr) & - IEEE80211_QOS_CTL_A_MSDU_PRESENT); - - hdr_len = ieee80211_hdrlen(hdr->frame_control); - - /* - * Only build A-MSDUs here if doing so by GSO, otherwise it may be - * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been - * built in the higher layers already. - */ - if (amsdu && skb_shinfo(skb)->gso_size) - return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb, - out_meta, hdr_len, len); - return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta, - hdr_len, len, !amsdu); -} - -int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q) -{ - unsigned int max; - unsigned int used; - - /* - * To avoid ambiguity between empty and completely full queues, there - * should always be less than max_tfd_queue_size elements in the queue. - * If q->n_window is smaller than max_tfd_queue_size, there is no need - * to reserve any queue entries for this purpose. - */ - if (q->n_window < trans->trans_cfg->base_params->max_tfd_queue_size) - max = q->n_window; - else - max = trans->trans_cfg->base_params->max_tfd_queue_size - 1; - - /* - * max_tfd_queue_size is a power of 2, so the following is equivalent to - * modulo by max_tfd_queue_size and is well defined. - */ - used = (q->write_ptr - q->read_ptr) & - (trans->trans_cfg->base_params->max_tfd_queue_size - 1); - - if (WARN_ON(used > max)) - return 0; - - return max - used; -} - -int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_tx_cmd *dev_cmd, int txq_id) -{ - struct iwl_cmd_meta *out_meta; - struct iwl_txq *txq = trans->txqs.txq[txq_id]; - u16 cmd_len; - int idx; - void *tfd; - - if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES, - "queue %d out of range", txq_id)) - return -EINVAL; - - if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used), - "TX on unused queue %d\n", txq_id)) - return -EINVAL; - - if (skb_is_nonlinear(skb) && - skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) && - __skb_linearize(skb)) - return -ENOMEM; - - spin_lock(&txq->lock); - - if (iwl_txq_space(trans, txq) < txq->high_mark) { - iwl_txq_stop(trans, txq); - - /* don't put the packet on the ring, if there is no room */ - if (unlikely(iwl_txq_space(trans, txq) < 3)) { - struct iwl_device_tx_cmd **dev_cmd_ptr; - - dev_cmd_ptr = (void *)((u8 *)skb->cb + - trans->txqs.dev_cmd_offs); - - *dev_cmd_ptr = dev_cmd; - __skb_queue_tail(&txq->overflow_q, skb); - spin_unlock(&txq->lock); - return 0; - } - } - - idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); - - /* Set up driver data for this TFD */ - txq->entries[idx].skb = skb; - txq->entries[idx].cmd = dev_cmd; - - dev_cmd->hdr.sequence = - cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | - INDEX_TO_SEQ(idx))); - - /* Set up first empty entry in queue's array of Tx/cmd buffers */ - out_meta = &txq->entries[idx].meta; - out_meta->flags = 0; - - tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta); - if (!tfd) { - spin_unlock(&txq->lock); - return -1; - } - - if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) { - struct iwl_tx_cmd_gen3 *tx_cmd_gen3 = - (void *)dev_cmd->payload; - - cmd_len = le16_to_cpu(tx_cmd_gen3->len); - } else { - struct iwl_tx_cmd_gen2 *tx_cmd_gen2 = - (void *)dev_cmd->payload; - - cmd_len = le16_to_cpu(tx_cmd_gen2->len); - } - - /* Set up entry for this TFD in Tx byte-count array */ - iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len, - iwl_txq_gen2_get_num_tbs(trans, tfd)); - - /* start timer if queue currently empty */ - if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) - mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); - - /* Tell device the write index *just past* this latest filled TFD */ - txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); - iwl_txq_inc_wr_ptr(trans, txq); - /* - * At this point the frame is "transmitted" successfully - * and we will get a TX status notification eventually. - */ - spin_unlock(&txq->lock); - return 0; -} - -/*************** HOST COMMAND QUEUE FUNCTIONS *****/ - -/* - * iwl_txq_gen2_unmap - Unmap any remaining DMA mappings and free skb's - */ -void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id) -{ - struct iwl_txq *txq = trans->txqs.txq[txq_id]; - - spin_lock_bh(&txq->lock); - while (txq->write_ptr != txq->read_ptr) { - IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", - txq_id, txq->read_ptr); - - if (txq_id != trans->txqs.cmd.q_id) { - int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr); - struct sk_buff *skb = txq->entries[idx].skb; - - if (!WARN_ON_ONCE(!skb)) - iwl_txq_free_tso_page(trans, skb); - } - iwl_txq_gen2_free_tfd(trans, txq); - txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); - } - - while (!skb_queue_empty(&txq->overflow_q)) { - struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); - - iwl_op_mode_free_skb(trans->op_mode, skb); - } - - spin_unlock_bh(&txq->lock); - - /* just in case - this queue may have been stopped */ - iwl_wake_queue(trans, txq); -} - -static void iwl_txq_gen2_free_memory(struct iwl_trans *trans, - struct iwl_txq *txq) -{ - struct device *dev = trans->dev; - - /* De-alloc circular buffer of TFDs */ - if (txq->tfds) { - dma_free_coherent(dev, - trans->txqs.tfd.size * txq->n_window, - txq->tfds, txq->dma_addr); - dma_free_coherent(dev, - sizeof(*txq->first_tb_bufs) * txq->n_window, - txq->first_tb_bufs, txq->first_tb_dma); - } - - kfree(txq->entries); - if (txq->bc_tbl.addr) - dma_pool_free(trans->txqs.bc_pool, - txq->bc_tbl.addr, txq->bc_tbl.dma); - kfree(txq); -} - -/* - * iwl_pcie_txq_free - Deallocate DMA queue. - * @txq: Transmit queue to deallocate. - * - * Empty queue by removing and destroying all BD's. - * Free all buffers. - * 0-fill, but do not free "txq" descriptor structure. - */ -static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id) -{ - struct iwl_txq *txq; - int i; - - if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES, - "queue %d out of range", txq_id)) - return; - - txq = trans->txqs.txq[txq_id]; - - if (WARN_ON(!txq)) - return; - - iwl_txq_gen2_unmap(trans, txq_id); - - /* De-alloc array of command/tx buffers */ - if (txq_id == trans->txqs.cmd.q_id) - for (i = 0; i < txq->n_window; i++) { - kfree_sensitive(txq->entries[i].cmd); - kfree_sensitive(txq->entries[i].free_buf); - } - del_timer_sync(&txq->stuck_timer); - - iwl_txq_gen2_free_memory(trans, txq); - - trans->txqs.txq[txq_id] = NULL; - - clear_bit(txq_id, trans->txqs.queue_used); -} - -/* - * iwl_queue_init - Initialize queue's high/low-water and read/write indexes - */ -static int iwl_queue_init(struct iwl_txq *q, int slots_num) -{ - q->n_window = slots_num; - - /* slots_num must be power-of-two size, otherwise - * iwl_txq_get_cmd_index is broken. */ - if (WARN_ON(!is_power_of_2(slots_num))) - return -EINVAL; - - q->low_mark = q->n_window / 4; - if (q->low_mark < 4) - q->low_mark = 4; - - q->high_mark = q->n_window / 8; - if (q->high_mark < 2) - q->high_mark = 2; - - q->write_ptr = 0; - q->read_ptr = 0; - - return 0; -} - -int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, - bool cmd_queue) -{ - int ret; - u32 tfd_queue_max_size = - trans->trans_cfg->base_params->max_tfd_queue_size; - - txq->need_update = false; - - /* max_tfd_queue_size must be power-of-two size, otherwise - * iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken. */ - if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1), - "Max tfd queue size must be a power of two, but is %d", - tfd_queue_max_size)) - return -EINVAL; - - /* Initialize queue's high/low-water marks, and head/tail indexes */ - ret = iwl_queue_init(txq, slots_num); - if (ret) - return ret; - - spin_lock_init(&txq->lock); - - if (cmd_queue) { - static struct lock_class_key iwl_txq_cmd_queue_lock_class; - - lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class); - } - - __skb_queue_head_init(&txq->overflow_q); - - return 0; -} - -void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb) -{ - struct page **page_ptr; - struct page *next; - - page_ptr = (void *)((u8 *)skb->cb + trans->txqs.page_offs); - next = *page_ptr; - *page_ptr = NULL; - - while (next) { - struct page *tmp = next; - - next = *(void **)((u8 *)page_address(next) + PAGE_SIZE - - sizeof(void *)); - __free_page(tmp); - } -} - -void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) -{ - u32 txq_id = txq->id; - u32 status; - bool active; - u8 fifo; - - if (trans->trans_cfg->gen2) { - IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id, - txq->read_ptr, txq->write_ptr); - /* TODO: access new SCD registers and dump them */ - return; - } - - status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id)); - fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; - active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); - - IWL_ERR(trans, - "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n", - txq_id, active ? "" : "in", fifo, - jiffies_to_msecs(txq->wd_timeout), - txq->read_ptr, txq->write_ptr, - iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & - (trans->trans_cfg->base_params->max_tfd_queue_size - 1), - iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) & - (trans->trans_cfg->base_params->max_tfd_queue_size - 1), - iwl_read_direct32(trans, FH_TX_TRB_REG(fifo))); -} - -static void iwl_txq_stuck_timer(struct timer_list *t) -{ - struct iwl_txq *txq = from_timer(txq, t, stuck_timer); - struct iwl_trans *trans = txq->trans; - - spin_lock(&txq->lock); - /* check if triggered erroneously */ - if (txq->read_ptr == txq->write_ptr) { - spin_unlock(&txq->lock); - return; - } - spin_unlock(&txq->lock); - - iwl_txq_log_scd_error(trans, txq); - - iwl_force_nmi(trans); -} - -static void iwl_txq_set_tfd_invalid_gen1(struct iwl_trans *trans, - struct iwl_tfd *tfd) -{ - tfd->num_tbs = 0; - - iwl_pcie_gen1_tfd_set_tb(trans, tfd, 0, trans->invalid_tx_cmd.dma, - trans->invalid_tx_cmd.size); -} - -int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, - bool cmd_queue) -{ - size_t num_entries = trans->trans_cfg->gen2 ? - slots_num : trans->trans_cfg->base_params->max_tfd_queue_size; - size_t tfd_sz; - size_t tb0_buf_sz; - int i; - - if (WARN_ONCE(slots_num <= 0, "Invalid slots num:%d\n", slots_num)) - return -EINVAL; - - if (WARN_ON(txq->entries || txq->tfds)) - return -EINVAL; - - tfd_sz = trans->txqs.tfd.size * num_entries; - - timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0); - txq->trans = trans; - - txq->n_window = slots_num; - - txq->entries = kcalloc(slots_num, - sizeof(struct iwl_pcie_txq_entry), - GFP_KERNEL); - - if (!txq->entries) - goto error; - - if (cmd_queue) - for (i = 0; i < slots_num; i++) { - txq->entries[i].cmd = - kmalloc(sizeof(struct iwl_device_cmd), - GFP_KERNEL); - if (!txq->entries[i].cmd) - goto error; - } - - /* Circular buffer of transmit frame descriptors (TFDs), - * shared with device */ - txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, - &txq->dma_addr, GFP_KERNEL); - if (!txq->tfds) - goto error; - - BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN); - - tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num; - - txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz, - &txq->first_tb_dma, - GFP_KERNEL); - if (!txq->first_tb_bufs) - goto err_free_tfds; - - for (i = 0; i < num_entries; i++) { - void *tfd = iwl_txq_get_tfd(trans, txq, i); - - if (trans->trans_cfg->gen2) - iwl_txq_set_tfd_invalid_gen2(trans, tfd); - else - iwl_txq_set_tfd_invalid_gen1(trans, tfd); - } - - return 0; -err_free_tfds: - dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr); - txq->tfds = NULL; -error: - if (txq->entries && cmd_queue) - for (i = 0; i < slots_num; i++) - kfree(txq->entries[i].cmd); - kfree(txq->entries); - txq->entries = NULL; - - return -ENOMEM; -} - -static struct iwl_txq * -iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, int size, unsigned int timeout) -{ - size_t bc_tbl_size, bc_tbl_entries; - struct iwl_txq *txq; - int ret; - - WARN_ON(!trans->txqs.bc_tbl_size); - - bc_tbl_size = trans->txqs.bc_tbl_size; - bc_tbl_entries = bc_tbl_size / sizeof(u16); - - if (WARN_ON(size > bc_tbl_entries)) - return ERR_PTR(-EINVAL); - - txq = kzalloc(sizeof(*txq), GFP_KERNEL); - if (!txq) - return ERR_PTR(-ENOMEM); - - txq->bc_tbl.addr = dma_pool_alloc(trans->txqs.bc_pool, GFP_KERNEL, - &txq->bc_tbl.dma); - if (!txq->bc_tbl.addr) { - IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); - kfree(txq); - return ERR_PTR(-ENOMEM); - } - - ret = iwl_txq_alloc(trans, txq, size, false); - if (ret) { - IWL_ERR(trans, "Tx queue alloc failed\n"); - goto error; - } - ret = iwl_txq_init(trans, txq, size, false); - if (ret) { - IWL_ERR(trans, "Tx queue init failed\n"); - goto error; - } - - txq->wd_timeout = msecs_to_jiffies(timeout); - - return txq; - -error: - iwl_txq_gen2_free_memory(trans, txq); - return ERR_PTR(ret); -} - -static int iwl_txq_alloc_response(struct iwl_trans *trans, struct iwl_txq *txq, - struct iwl_host_cmd *hcmd) -{ - struct iwl_tx_queue_cfg_rsp *rsp; - int ret, qid; - u32 wr_ptr; - - if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) != - sizeof(*rsp))) { - ret = -EINVAL; - goto error_free_resp; - } - - rsp = (void *)hcmd->resp_pkt->data; - qid = le16_to_cpu(rsp->queue_number); - wr_ptr = le16_to_cpu(rsp->write_pointer); - - if (qid >= ARRAY_SIZE(trans->txqs.txq)) { - WARN_ONCE(1, "queue index %d unsupported", qid); - ret = -EIO; - goto error_free_resp; - } - - if (test_and_set_bit(qid, trans->txqs.queue_used)) { - WARN_ONCE(1, "queue %d already used", qid); - ret = -EIO; - goto error_free_resp; - } - - if (WARN_ONCE(trans->txqs.txq[qid], - "queue %d already allocated\n", qid)) { - ret = -EIO; - goto error_free_resp; - } - - txq->id = qid; - trans->txqs.txq[qid] = txq; - wr_ptr &= (trans->trans_cfg->base_params->max_tfd_queue_size - 1); - - /* Place first TFD at index corresponding to start sequence number */ - txq->read_ptr = wr_ptr; - txq->write_ptr = wr_ptr; - - IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid); - - iwl_free_resp(hcmd); - return qid; - -error_free_resp: - iwl_free_resp(hcmd); - iwl_txq_gen2_free_memory(trans, txq); - return ret; -} - -int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask, - u8 tid, int size, unsigned int timeout) -{ - struct iwl_txq *txq; - union { - struct iwl_tx_queue_cfg_cmd old; - struct iwl_scd_queue_cfg_cmd new; - } cmd; - struct iwl_host_cmd hcmd = { - .flags = CMD_WANT_SKB, - }; - int ret; - - if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_BZ && - trans->hw_rev_step == SILICON_A_STEP) - size = 4096; - - txq = iwl_txq_dyn_alloc_dma(trans, size, timeout); - if (IS_ERR(txq)) - return PTR_ERR(txq); - - if (trans->txqs.queue_alloc_cmd_ver == 0) { - memset(&cmd.old, 0, sizeof(cmd.old)); - cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr); - cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma); - cmd.old.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size)); - cmd.old.flags = cpu_to_le16(flags | TX_QUEUE_CFG_ENABLE_QUEUE); - cmd.old.tid = tid; - - if (hweight32(sta_mask) != 1) { - ret = -EINVAL; - goto error; - } - cmd.old.sta_id = ffs(sta_mask) - 1; - - hcmd.id = SCD_QUEUE_CFG; - hcmd.len[0] = sizeof(cmd.old); - hcmd.data[0] = &cmd.old; - } else if (trans->txqs.queue_alloc_cmd_ver == 3) { - memset(&cmd.new, 0, sizeof(cmd.new)); - cmd.new.operation = cpu_to_le32(IWL_SCD_QUEUE_ADD); - cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr); - cmd.new.u.add.bc_dram_addr = cpu_to_le64(txq->bc_tbl.dma); - cmd.new.u.add.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size)); - cmd.new.u.add.flags = cpu_to_le32(flags); - cmd.new.u.add.sta_mask = cpu_to_le32(sta_mask); - cmd.new.u.add.tid = tid; - - hcmd.id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD); - hcmd.len[0] = sizeof(cmd.new); - hcmd.data[0] = &cmd.new; - } else { - ret = -EOPNOTSUPP; - goto error; - } - - ret = iwl_trans_send_cmd(trans, &hcmd); - if (ret) - goto error; - - return iwl_txq_alloc_response(trans, txq, &hcmd); - -error: - iwl_txq_gen2_free_memory(trans, txq); - return ret; -} - -void iwl_txq_dyn_free(struct iwl_trans *trans, int queue) -{ - if (WARN(queue >= IWL_MAX_TVQM_QUEUES, - "queue %d out of range", queue)) - return; - - /* - * Upon HW Rfkill - we stop the device, and then stop the queues - * in the op_mode. Just for the sake of the simplicity of the op_mode, - * allow the op_mode to call txq_disable after it already called - * stop_device. - */ - if (!test_and_clear_bit(queue, trans->txqs.queue_used)) { - WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), - "queue %d not used", queue); - return; - } - - iwl_txq_gen2_free(trans, queue); - - IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue); -} - -void iwl_txq_gen2_tx_free(struct iwl_trans *trans) -{ - int i; - - memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); - - /* Free all TX queues */ - for (i = 0; i < ARRAY_SIZE(trans->txqs.txq); i++) { - if (!trans->txqs.txq[i]) - continue; - - iwl_txq_gen2_free(trans, i); - } -} - -int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size) -{ - struct iwl_txq *queue; - int ret; - - /* alloc and init the tx queue */ - if (!trans->txqs.txq[txq_id]) { - queue = kzalloc(sizeof(*queue), GFP_KERNEL); - if (!queue) { - IWL_ERR(trans, "Not enough memory for tx queue\n"); - return -ENOMEM; - } - trans->txqs.txq[txq_id] = queue; - ret = iwl_txq_alloc(trans, queue, queue_size, true); - if (ret) { - IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); - goto error; - } - } else { - queue = trans->txqs.txq[txq_id]; - } - - ret = iwl_txq_init(trans, queue, queue_size, - (txq_id == trans->txqs.cmd.q_id)); - if (ret) { - IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); - goto error; - } - trans->txqs.txq[txq_id]->id = txq_id; - set_bit(txq_id, trans->txqs.queue_used); - - return 0; - -error: - iwl_txq_gen2_tx_free(trans); - return ret; -} - -static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans, - struct iwl_tfd *tfd, u8 idx) -{ - struct iwl_tfd_tb *tb = &tfd->tbs[idx]; - dma_addr_t addr; - dma_addr_t hi_len; - - addr = get_unaligned_le32(&tb->lo); - - if (sizeof(dma_addr_t) <= sizeof(u32)) - return addr; - - hi_len = le16_to_cpu(tb->hi_n_len) & 0xF; - - /* - * shift by 16 twice to avoid warnings on 32-bit - * (where this code never runs anyway due to the - * if statement above) - */ - return addr | ((hi_len << 16) << 16); -} - -void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans, - struct iwl_cmd_meta *meta, - struct iwl_txq *txq, int index) -{ - int i, num_tbs; - struct iwl_tfd *tfd = iwl_txq_get_tfd(trans, txq, index); - - /* Sanity check on number of chunks */ - num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd); - - if (num_tbs > trans->txqs.tfd.max_tbs) { - IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); - /* @todo issue fatal error, it is quite serious situation */ - return; - } - - /* first TB is never freed - it's the bidirectional DMA data */ - - for (i = 1; i < num_tbs; i++) { - if (meta->tbs & BIT(i)) - dma_unmap_page(trans->dev, - iwl_txq_gen1_tfd_tb_get_addr(trans, - tfd, i), - iwl_txq_gen1_tfd_tb_get_len(trans, - tfd, i), - DMA_TO_DEVICE); - else - dma_unmap_single(trans->dev, - iwl_txq_gen1_tfd_tb_get_addr(trans, - tfd, i), - iwl_txq_gen1_tfd_tb_get_len(trans, - tfd, i), - DMA_TO_DEVICE); - } - - meta->tbs = 0; - - iwl_txq_set_tfd_invalid_gen1(trans, tfd); -} - -#define IWL_TX_CRC_SIZE 4 -#define IWL_TX_DELIMITER_SIZE 4 - -/* - * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array - */ -void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans, - struct iwl_txq *txq, u16 byte_cnt, - int num_tbs) -{ - struct iwlagn_scd_bc_tbl *scd_bc_tbl; - int write_ptr = txq->write_ptr; - int txq_id = txq->id; - u8 sec_ctl = 0; - u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; - __le16 bc_ent; - struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd; - struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; - u8 sta_id = tx_cmd->sta_id; - - scd_bc_tbl = trans->txqs.scd_bc_tbls.addr; - - sec_ctl = tx_cmd->sec_ctl; - - switch (sec_ctl & TX_CMD_SEC_MSK) { - case TX_CMD_SEC_CCM: - len += IEEE80211_CCMP_MIC_LEN; - break; - case TX_CMD_SEC_TKIP: - len += IEEE80211_TKIP_ICV_LEN; - break; - case TX_CMD_SEC_WEP: - len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN; - break; - } - if (trans->txqs.bc_table_dword) - len = DIV_ROUND_UP(len, 4); - - if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) - return; - - bc_ent = cpu_to_le16(len | (sta_id << 12)); - - scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; - - if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) - scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = - bc_ent; -} - -void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans, - struct iwl_txq *txq) -{ - struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans->txqs.scd_bc_tbls.addr; - int txq_id = txq->id; - int read_ptr = txq->read_ptr; - u8 sta_id = 0; - __le16 bc_ent; - struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd; - struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; - - WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); - - if (txq_id != trans->txqs.cmd.q_id) - sta_id = tx_cmd->sta_id; - - bc_ent = cpu_to_le16(1 | (sta_id << 12)); - - scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; - - if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) - scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = - bc_ent; -} - -/* - * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] - * @trans - transport private data - * @txq - tx queue - * @dma_dir - the direction of the DMA mapping - * - * Does NOT advance any TFD circular buffer read/write indexes - * Does NOT free the TFD itself (which is within circular buffer) - */ -void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) -{ - /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and - * idx is bounded by n_window - */ - int rd_ptr = txq->read_ptr; - int idx = iwl_txq_get_cmd_index(txq, rd_ptr); - struct sk_buff *skb; - - lockdep_assert_held(&txq->lock); - - if (!txq->entries) - return; - - /* We have only q->n_window txq->entries, but we use - * TFD_QUEUE_SIZE_MAX tfds - */ - if (trans->trans_cfg->gen2) - iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta, - iwl_txq_get_tfd(trans, txq, rd_ptr)); - else - iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, - txq, rd_ptr); - - /* free SKB */ - skb = txq->entries[idx].skb; - - /* Can be called from irqs-disabled context - * If skb is not NULL, it means that the whole queue is being - * freed and that the queue is not empty - free the skb - */ - if (skb) { - iwl_op_mode_free_skb(trans->op_mode, skb); - txq->entries[idx].skb = NULL; - } -} - -void iwl_txq_progress(struct iwl_txq *txq) -{ - lockdep_assert_held(&txq->lock); - - if (!txq->wd_timeout) - return; - - /* - * station is asleep and we send data - that must - * be uAPSD or PS-Poll. Don't rearm the timer. - */ - if (txq->frozen) - return; - - /* - * if empty delete timer, otherwise move timer forward - * since we're making progress on this queue - */ - if (txq->read_ptr == txq->write_ptr) - del_timer(&txq->stuck_timer); - else - mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); -} - -/* Frees buffers until index _not_ inclusive */ -void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn, - struct sk_buff_head *skbs, bool is_flush) -{ - struct iwl_txq *txq = trans->txqs.txq[txq_id]; - int tfd_num, read_ptr, last_to_free; - - /* This function is not meant to release cmd queue*/ - if (WARN_ON(txq_id == trans->txqs.cmd.q_id)) - return; - - if (WARN_ON(!txq)) - return; - - tfd_num = iwl_txq_get_cmd_index(txq, ssn); - - spin_lock_bh(&txq->lock); - read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr); - - if (!test_bit(txq_id, trans->txqs.queue_used)) { - IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", - txq_id, ssn); - goto out; - } - - if (read_ptr == tfd_num) - goto out; - - IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d (%d) -> %d (%d)\n", - txq_id, read_ptr, txq->read_ptr, tfd_num, ssn); - - /*Since we free until index _not_ inclusive, the one before index is - * the last we will free. This one must be used */ - last_to_free = iwl_txq_dec_wrap(trans, tfd_num); - - if (!iwl_txq_used(txq, last_to_free)) { - IWL_ERR(trans, - "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", - __func__, txq_id, last_to_free, - trans->trans_cfg->base_params->max_tfd_queue_size, - txq->write_ptr, txq->read_ptr); - - iwl_op_mode_time_point(trans->op_mode, - IWL_FW_INI_TIME_POINT_FAKE_TX, - NULL); - goto out; - } - - if (WARN_ON(!skb_queue_empty(skbs))) - goto out; - - for (; - read_ptr != tfd_num; - txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr), - read_ptr = iwl_txq_get_cmd_index(txq, txq->read_ptr)) { - struct sk_buff *skb = txq->entries[read_ptr].skb; - - if (WARN_ONCE(!skb, "no SKB at %d (%d) on queue %d\n", - read_ptr, txq->read_ptr, txq_id)) - continue; - - iwl_txq_free_tso_page(trans, skb); - - __skb_queue_tail(skbs, skb); - - txq->entries[read_ptr].skb = NULL; - - if (!trans->trans_cfg->gen2) - iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq); - - iwl_txq_free_tfd(trans, txq); - } - - iwl_txq_progress(txq); - - if (iwl_txq_space(trans, txq) > txq->low_mark && - test_bit(txq_id, trans->txqs.queue_stopped)) { - struct sk_buff_head overflow_skbs; - struct sk_buff *skb; - - __skb_queue_head_init(&overflow_skbs); - skb_queue_splice_init(&txq->overflow_q, - is_flush ? skbs : &overflow_skbs); - - /* - * We are going to transmit from the overflow queue. - * Remember this state so that wait_for_txq_empty will know we - * are adding more packets to the TFD queue. It cannot rely on - * the state of &txq->overflow_q, as we just emptied it, but - * haven't TXed the content yet. - */ - txq->overflow_tx = true; - - /* - * This is tricky: we are in reclaim path which is non - * re-entrant, so noone will try to take the access the - * txq data from that path. We stopped tx, so we can't - * have tx as well. Bottom line, we can unlock and re-lock - * later. - */ - spin_unlock_bh(&txq->lock); - - while ((skb = __skb_dequeue(&overflow_skbs))) { - struct iwl_device_tx_cmd *dev_cmd_ptr; - - dev_cmd_ptr = *(void **)((u8 *)skb->cb + - trans->txqs.dev_cmd_offs); - - /* - * Note that we can very well be overflowing again. - * In that case, iwl_txq_space will be small again - * and we won't wake mac80211's queue. - */ - iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id); - } - - if (iwl_txq_space(trans, txq) > txq->low_mark) - iwl_wake_queue(trans, txq); - - spin_lock_bh(&txq->lock); - txq->overflow_tx = false; - } - -out: - spin_unlock_bh(&txq->lock); -} - -/* Set wr_ptr of specific device and txq */ -void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr) -{ - struct iwl_txq *txq = trans->txqs.txq[txq_id]; - - spin_lock_bh(&txq->lock); - - txq->write_ptr = ptr; - txq->read_ptr = txq->write_ptr; - - spin_unlock_bh(&txq->lock); -} - -void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs, - bool freeze) -{ - int queue; - - for_each_set_bit(queue, &txqs, BITS_PER_LONG) { - struct iwl_txq *txq = trans->txqs.txq[queue]; - unsigned long now; - - spin_lock_bh(&txq->lock); - - now = jiffies; - - if (txq->frozen == freeze) - goto next_queue; - - IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n", - freeze ? "Freezing" : "Waking", queue); - - txq->frozen = freeze; - - if (txq->read_ptr == txq->write_ptr) - goto next_queue; - - if (freeze) { - if (unlikely(time_after(now, - txq->stuck_timer.expires))) { - /* - * The timer should have fired, maybe it is - * spinning right now on the lock. - */ - goto next_queue; - } - /* remember how long until the timer fires */ - txq->frozen_expiry_remainder = - txq->stuck_timer.expires - now; - del_timer(&txq->stuck_timer); - goto next_queue; - } - - /* - * Wake a non-empty queue -> arm timer with the - * remainder before it froze - */ - mod_timer(&txq->stuck_timer, - now + txq->frozen_expiry_remainder); - -next_queue: - spin_unlock_bh(&txq->lock); - } -} - -#define HOST_COMPLETE_TIMEOUT (2 * HZ) - -static int iwl_trans_txq_send_hcmd_sync(struct iwl_trans *trans, - struct iwl_host_cmd *cmd) -{ - const char *cmd_str = iwl_get_cmd_string(trans, cmd->id); - struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; - int cmd_idx; - int ret; - - IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str); - - if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE, - &trans->status), - "Command %s: a command is already active!\n", cmd_str)) - return -EIO; - - IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str); - - cmd_idx = trans->ops->send_cmd(trans, cmd); - if (cmd_idx < 0) { - ret = cmd_idx; - clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); - IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", - cmd_str, ret); - return ret; - } - - ret = wait_event_timeout(trans->wait_command_queue, - !test_bit(STATUS_SYNC_HCMD_ACTIVE, - &trans->status), - HOST_COMPLETE_TIMEOUT); - if (!ret) { - IWL_ERR(trans, "Error sending %s: time out after %dms.\n", - cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); - - IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", - txq->read_ptr, txq->write_ptr); - - clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); - IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", - cmd_str); - ret = -ETIMEDOUT; - - iwl_trans_sync_nmi(trans); - goto cancel; - } - - if (test_bit(STATUS_FW_ERROR, &trans->status)) { - if (!test_and_clear_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE, - &trans->status)) { - IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str); - dump_stack(); - } - ret = -EIO; - goto cancel; - } - - if (!(cmd->flags & CMD_SEND_IN_RFKILL) && - test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { - IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); - ret = -ERFKILL; - goto cancel; - } - - if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { - IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str); - ret = -EIO; - goto cancel; - } - - return 0; - -cancel: - if (cmd->flags & CMD_WANT_SKB) { - /* - * Cancel the CMD_WANT_SKB flag for the cmd in the - * TX cmd queue. Otherwise in case the cmd comes - * in later, it will possibly set an invalid - * address (cmd->meta.source). - */ - txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; - } - - if (cmd->resp_pkt) { - iwl_free_resp(cmd); - cmd->resp_pkt = NULL; - } - - return ret; -} - -int iwl_trans_txq_send_hcmd(struct iwl_trans *trans, - struct iwl_host_cmd *cmd) -{ - /* Make sure the NIC is still alive in the bus */ - if (test_bit(STATUS_TRANS_DEAD, &trans->status)) - return -ENODEV; - - if (!(cmd->flags & CMD_SEND_IN_RFKILL) && - test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { - IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", - cmd->id); - return -ERFKILL; - } - - if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 && - !(cmd->flags & CMD_SEND_IN_D3))) { - IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id); - return -EHOSTDOWN; - } - - if (cmd->flags & CMD_ASYNC) { - int ret; - - /* An asynchronous command can not expect an SKB to be set. */ - if (WARN_ON(cmd->flags & CMD_WANT_SKB)) - return -EINVAL; - - ret = trans->ops->send_cmd(trans, cmd); - if (ret < 0) { - IWL_ERR(trans, - "Error sending %s: enqueue_hcmd failed: %d\n", - iwl_get_cmd_string(trans, cmd->id), ret); - return ret; - } - return 0; - } - - return iwl_trans_txq_send_hcmd_sync(trans, cmd); -} - diff --git a/drivers/net/wireless/intel/iwlwifi/queue/tx.h b/drivers/net/wireless/intel/iwlwifi/queue/tx.h deleted file mode 100644 index 124b29aac4a1..000000000000 --- a/drivers/net/wireless/intel/iwlwifi/queue/tx.h +++ /dev/null @@ -1,191 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ -/* - * Copyright (C) 2020-2023 Intel Corporation - */ -#ifndef __iwl_trans_queue_tx_h__ -#define __iwl_trans_queue_tx_h__ -#include "iwl-fh.h" -#include "fw/api/tx.h" - -struct iwl_tso_hdr_page { - struct page *page; - u8 *pos; -}; - -static inline dma_addr_t -iwl_txq_get_first_tb_dma(struct iwl_txq *txq, int idx) -{ - return txq->first_tb_dma + - sizeof(struct iwl_pcie_first_tb_buf) * idx; -} - -static inline u16 iwl_txq_get_cmd_index(const struct iwl_txq *q, u32 index) -{ - return index & (q->n_window - 1); -} - -void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id); - -static inline void iwl_wake_queue(struct iwl_trans *trans, - struct iwl_txq *txq) -{ - if (test_and_clear_bit(txq->id, trans->txqs.queue_stopped)) { - IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id); - iwl_op_mode_queue_not_full(trans->op_mode, txq->id); - } -} - -static inline void *iwl_txq_get_tfd(struct iwl_trans *trans, - struct iwl_txq *txq, int idx) -{ - if (trans->trans_cfg->gen2) - idx = iwl_txq_get_cmd_index(txq, idx); - - return (u8 *)txq->tfds + trans->txqs.tfd.size * idx; -} - -int iwl_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, - bool cmd_queue); -/* - * We need this inline in case dma_addr_t is only 32-bits - since the - * hardware is always 64-bit, the issue can still occur in that case, - * so use u64 for 'phys' here to force the addition in 64-bit. - */ -static inline bool iwl_txq_crosses_4g_boundary(u64 phys, u16 len) -{ - return upper_32_bits(phys) != upper_32_bits(phys + len); -} - -int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q); - -static inline void iwl_txq_stop(struct iwl_trans *trans, struct iwl_txq *txq) -{ - if (!test_and_set_bit(txq->id, trans->txqs.queue_stopped)) { - iwl_op_mode_queue_full(trans->op_mode, txq->id); - IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id); - } else { - IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n", - txq->id); - } -} - -/** - * iwl_txq_inc_wrap - increment queue index, wrap back to beginning - * @trans: the transport (for configuration data) - * @index: current index - */ -static inline int iwl_txq_inc_wrap(struct iwl_trans *trans, int index) -{ - return ++index & - (trans->trans_cfg->base_params->max_tfd_queue_size - 1); -} - -/** - * iwl_txq_dec_wrap - decrement queue index, wrap back to end - * @trans: the transport (for configuration data) - * @index: current index - */ -static inline int iwl_txq_dec_wrap(struct iwl_trans *trans, int index) -{ - return --index & - (trans->trans_cfg->base_params->max_tfd_queue_size - 1); -} - -static inline bool iwl_txq_used(const struct iwl_txq *q, int i) -{ - int index = iwl_txq_get_cmd_index(q, i); - int r = iwl_txq_get_cmd_index(q, q->read_ptr); - int w = iwl_txq_get_cmd_index(q, q->write_ptr); - - return w >= r ? - (index >= r && index < w) : - !(index < r && index >= w); -} - -void iwl_txq_free_tso_page(struct iwl_trans *trans, struct sk_buff *skb); - -void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq); - -int iwl_txq_gen2_set_tb(struct iwl_trans *trans, - struct iwl_tfh_tfd *tfd, dma_addr_t addr, - u16 len); - -void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans, - struct iwl_cmd_meta *meta, - struct iwl_tfh_tfd *tfd); - -int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, - u32 sta_mask, u8 tid, - int size, unsigned int timeout); - -int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, - struct iwl_device_tx_cmd *dev_cmd, int txq_id); - -void iwl_txq_dyn_free(struct iwl_trans *trans, int queue); -void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq); -void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq); -void iwl_txq_gen2_tx_free(struct iwl_trans *trans); -int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, - bool cmd_queue); -int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size); -#ifdef CONFIG_INET -struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len, - struct sk_buff *skb); -#endif -static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_trans *trans, - struct iwl_tfd *tfd) -{ - return tfd->num_tbs & 0x1f; -} - -static inline u16 iwl_txq_gen1_tfd_tb_get_len(struct iwl_trans *trans, - void *_tfd, u8 idx) -{ - struct iwl_tfd *tfd; - struct iwl_tfd_tb *tb; - - if (trans->trans_cfg->gen2) { - struct iwl_tfh_tfd *tfh_tfd = _tfd; - struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx]; - - return le16_to_cpu(tfh_tb->tb_len); - } - - tfd = (struct iwl_tfd *)_tfd; - tb = &tfd->tbs[idx]; - - return le16_to_cpu(tb->hi_n_len) >> 4; -} - -static inline void iwl_pcie_gen1_tfd_set_tb(struct iwl_trans *trans, - struct iwl_tfd *tfd, - u8 idx, dma_addr_t addr, u16 len) -{ - struct iwl_tfd_tb *tb = &tfd->tbs[idx]; - u16 hi_n_len = len << 4; - - put_unaligned_le32(addr, &tb->lo); - hi_n_len |= iwl_get_dma_hi_addr(addr); - - tb->hi_n_len = cpu_to_le16(hi_n_len); - - tfd->num_tbs = idx + 1; -} - -void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans, - struct iwl_cmd_meta *meta, - struct iwl_txq *txq, int index); -void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans, - struct iwl_txq *txq); -void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans, - struct iwl_txq *txq, u16 byte_cnt, - int num_tbs); -void iwl_txq_reclaim(struct iwl_trans *trans, int txq_id, int ssn, - struct sk_buff_head *skbs, bool is_flush); -void iwl_txq_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr); -void iwl_trans_txq_freeze_timer(struct iwl_trans *trans, unsigned long txqs, - bool freeze); -void iwl_txq_progress(struct iwl_txq *txq); -void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq); -int iwl_trans_txq_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd); -#endif /* __iwl_trans_queue_tx_h__ */ diff --git a/drivers/net/wireless/intersil/p54/fwio.c b/drivers/net/wireless/intersil/p54/fwio.c index c4fe70e05b9b..772084a9bd8d 100644 --- a/drivers/net/wireless/intersil/p54/fwio.c +++ b/drivers/net/wireless/intersil/p54/fwio.c @@ -216,7 +216,7 @@ int p54_download_eeprom(struct p54_common *priv, void *buf, struct sk_buff *skb; size_t eeprom_hdr_size; int ret = 0; - long timeout; + long time_left; if (priv->fw_var >= 0x509) eeprom_hdr_size = sizeof(*eeprom_hdr); @@ -245,9 +245,9 @@ int p54_download_eeprom(struct p54_common *priv, void *buf, p54_tx(priv, skb); - timeout = wait_for_completion_interruptible_timeout( + time_left = wait_for_completion_interruptible_timeout( &priv->eeprom_comp, HZ); - if (timeout <= 0) { + if (time_left <= 0) { wiphy_err(priv->hw->wiphy, "device does not respond or signal received!\n"); ret = -EBUSY; diff --git a/drivers/net/wireless/intersil/p54/main.c b/drivers/net/wireless/intersil/p54/main.c index 687841b2fa2a..42111bb53f58 100644 --- a/drivers/net/wireless/intersil/p54/main.c +++ b/drivers/net/wireless/intersil/p54/main.c @@ -197,7 +197,7 @@ out: return err; } -static void p54_stop(struct ieee80211_hw *dev) +static void p54_stop(struct ieee80211_hw *dev, bool suspend) { struct p54_common *priv = dev->priv; int i; diff --git a/drivers/net/wireless/intersil/p54/p54pci.c b/drivers/net/wireless/intersil/p54/p54pci.c index e97ee547b9f3..6588f5477762 100644 --- a/drivers/net/wireless/intersil/p54/p54pci.c +++ b/drivers/net/wireless/intersil/p54/p54pci.c @@ -434,7 +434,7 @@ static int p54p_open(struct ieee80211_hw *dev) { struct p54p_priv *priv = dev->priv; int err; - long timeout; + long time_left; init_completion(&priv->boot_comp); err = request_irq(priv->pdev->irq, p54p_interrupt, @@ -472,12 +472,12 @@ static int p54p_open(struct ieee80211_hw *dev) P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET)); P54P_READ(dev_int); - timeout = wait_for_completion_interruptible_timeout( + time_left = wait_for_completion_interruptible_timeout( &priv->boot_comp, HZ); - if (timeout <= 0) { + if (time_left <= 0) { wiphy_err(dev->wiphy, "Cannot boot firmware!\n"); p54p_stop(dev); - return timeout ? -ERESTARTSYS : -ETIMEDOUT; + return time_left ? -ERESTARTSYS : -ETIMEDOUT; } P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_UPDATE)); diff --git a/drivers/net/wireless/intersil/p54/p54spi.c b/drivers/net/wireless/intersil/p54/p54spi.c index 0073b5e0f9c9..d33a994906a7 100644 --- a/drivers/net/wireless/intersil/p54/p54spi.c +++ b/drivers/net/wireless/intersil/p54/p54spi.c @@ -518,7 +518,7 @@ out: static int p54spi_op_start(struct ieee80211_hw *dev) { struct p54s_priv *priv = dev->priv; - unsigned long timeout; + long time_left; int ret = 0; if (mutex_lock_interruptible(&priv->mutex)) { @@ -538,10 +538,10 @@ static int p54spi_op_start(struct ieee80211_hw *dev) mutex_unlock(&priv->mutex); - timeout = msecs_to_jiffies(2000); - timeout = wait_for_completion_interruptible_timeout(&priv->fw_comp, - timeout); - if (!timeout) { + time_left = msecs_to_jiffies(2000); + time_left = wait_for_completion_interruptible_timeout(&priv->fw_comp, + time_left); + if (!time_left) { dev_err(&priv->spi->dev, "firmware boot failed"); p54spi_power_off(priv); ret = -1; diff --git a/drivers/net/wireless/marvell/libertas_tf/main.c b/drivers/net/wireless/marvell/libertas_tf/main.c index 9cca69fe04d7..b47a832b9ae2 100644 --- a/drivers/net/wireless/marvell/libertas_tf/main.c +++ b/drivers/net/wireless/marvell/libertas_tf/main.c @@ -267,7 +267,7 @@ static int lbtf_op_start(struct ieee80211_hw *hw) return 0; } -static void lbtf_op_stop(struct ieee80211_hw *hw) +static void lbtf_op_stop(struct ieee80211_hw *hw, bool suspend) { struct lbtf_private *priv = hw->priv; unsigned long flags; diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index b909a7665e9c..155eb0fab12a 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -926,6 +926,8 @@ mwifiex_init_new_priv_params(struct mwifiex_private *priv, return -EOPNOTSUPP; } + priv->bss_num = mwifiex_get_unused_bss_num(adapter, priv->bss_type); + spin_lock_irqsave(&adapter->main_proc_lock, flags); adapter->main_locked = false; spin_unlock_irqrestore(&adapter->main_proc_lock, flags); diff --git a/drivers/net/wireless/marvell/mwifiex/main.h b/drivers/net/wireless/marvell/mwifiex/main.h index 175882485a19..c5164ae41b54 100644 --- a/drivers/net/wireless/marvell/mwifiex/main.h +++ b/drivers/net/wireless/marvell/mwifiex/main.h @@ -1287,6 +1287,9 @@ mwifiex_get_priv_by_id(struct mwifiex_adapter *adapter, for (i = 0; i < adapter->priv_num; i++) { if (adapter->priv[i]) { + if (adapter->priv[i]->bss_mode == NL80211_IFTYPE_UNSPECIFIED) + continue; + if ((adapter->priv[i]->bss_num == bss_num) && (adapter->priv[i]->bss_type == bss_type)) break; diff --git a/drivers/net/wireless/marvell/mwl8k.c b/drivers/net/wireless/marvell/mwl8k.c index d3d07bb26335..b130e057370f 100644 --- a/drivers/net/wireless/marvell/mwl8k.c +++ b/drivers/net/wireless/marvell/mwl8k.c @@ -2211,7 +2211,7 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt_hdr *cmd dma_addr_t dma_addr; unsigned int dma_size; int rc; - unsigned long timeout = 0; + unsigned long time_left = 0; u8 buf[32]; u32 bitmap = 0; @@ -2258,8 +2258,8 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt_hdr *cmd iowrite32(MWL8K_H2A_INT_DUMMY, regs + MWL8K_HIU_H2A_INTERRUPT_EVENTS); - timeout = wait_for_completion_timeout(&cmd_wait, - msecs_to_jiffies(MWL8K_CMD_TIMEOUT_MS)); + time_left = wait_for_completion_timeout(&cmd_wait, + msecs_to_jiffies(MWL8K_CMD_TIMEOUT_MS)); priv->hostcmd_wait = NULL; @@ -2267,7 +2267,7 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt_hdr *cmd dma_unmap_single(&priv->pdev->dev, dma_addr, dma_size, DMA_BIDIRECTIONAL); - if (!timeout) { + if (!time_left) { wiphy_err(hw->wiphy, "Command %s timeout after %u ms\n", mwl8k_cmd_name(cmd->code, buf, sizeof(buf)), MWL8K_CMD_TIMEOUT_MS); @@ -2275,7 +2275,7 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt_hdr *cmd } else { int ms; - ms = MWL8K_CMD_TIMEOUT_MS - jiffies_to_msecs(timeout); + ms = MWL8K_CMD_TIMEOUT_MS - jiffies_to_msecs(time_left); rc = cmd->result ? -EINVAL : 0; if (rc) @@ -4768,7 +4768,7 @@ static int mwl8k_start(struct ieee80211_hw *hw) return rc; } -static void mwl8k_stop(struct ieee80211_hw *hw) +static void mwl8k_stop(struct ieee80211_hw *hw, bool suspend) { struct mwl8k_priv *priv = hw->priv; int i; @@ -6023,7 +6023,7 @@ static int mwl8k_reload_firmware(struct ieee80211_hw *hw, char *fw_image) struct mwl8k_priv *priv = hw->priv; struct mwl8k_vif *vif, *tmp_vif; - mwl8k_stop(hw); + mwl8k_stop(hw, false); mwl8k_rxq_deinit(hw, 0); /* diff --git a/drivers/net/wireless/mediatek/mt76/debugfs.c b/drivers/net/wireless/mediatek/mt76/debugfs.c index ae83be572b94..b6a2746c187d 100644 --- a/drivers/net/wireless/mediatek/mt76/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/debugfs.c @@ -33,8 +33,8 @@ mt76_napi_threaded_set(void *data, u64 val) if (!mt76_is_mmio(dev)) return -EOPNOTSUPP; - if (dev->napi_dev.threaded != val) - return dev_set_threaded(&dev->napi_dev, val); + if (dev->napi_dev->threaded != val) + return dev_set_threaded(dev->napi_dev, val); return 0; } @@ -44,7 +44,7 @@ mt76_napi_threaded_get(void *data, u64 *val) { struct mt76_dev *dev = data; - *val = dev->napi_dev.threaded; + *val = dev->napi_dev->threaded; return 0; } diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index f4f88c444e21..5f46d6daeaa7 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -916,7 +916,7 @@ int mt76_dma_rx_poll(struct napi_struct *napi, int budget) struct mt76_dev *dev; int qid, done = 0, cur; - dev = container_of(napi->dev, struct mt76_dev, napi_dev); + dev = mt76_priv(napi->dev); qid = napi - dev->napi; rcu_read_lock(); @@ -940,18 +940,35 @@ static int mt76_dma_init(struct mt76_dev *dev, int (*poll)(struct napi_struct *napi, int budget)) { + struct mt76_dev **priv; int i; - init_dummy_netdev(&dev->napi_dev); - init_dummy_netdev(&dev->tx_napi_dev); - snprintf(dev->napi_dev.name, sizeof(dev->napi_dev.name), "%s", + dev->napi_dev = alloc_netdev_dummy(sizeof(struct mt76_dev *)); + if (!dev->napi_dev) + return -ENOMEM; + + /* napi_dev private data points to mt76_dev parent, so, mt76_dev + * can be retrieved given napi_dev + */ + priv = netdev_priv(dev->napi_dev); + *priv = dev; + + dev->tx_napi_dev = alloc_netdev_dummy(sizeof(struct mt76_dev *)); + if (!dev->tx_napi_dev) { + free_netdev(dev->napi_dev); + return -ENOMEM; + } + priv = netdev_priv(dev->tx_napi_dev); + *priv = dev; + + snprintf(dev->napi_dev->name, sizeof(dev->napi_dev->name), "%s", wiphy_name(dev->hw->wiphy)); - dev->napi_dev.threaded = 1; + dev->napi_dev->threaded = 1; init_completion(&dev->mmio.wed_reset); init_completion(&dev->mmio.wed_reset_complete); mt76_for_each_q_rx(dev, i) { - netif_napi_add(&dev->napi_dev, &dev->napi[i], poll); + netif_napi_add(dev->napi_dev, &dev->napi[i], poll); mt76_dma_rx_fill(dev, &dev->q_rx[i], false); napi_enable(&dev->napi[i]); } @@ -1019,5 +1036,7 @@ void mt76_dma_cleanup(struct mt76_dev *dev) mt76_free_pending_txwi(dev); mt76_free_pending_rxwi(dev); + free_netdev(dev->napi_dev); + free_netdev(dev->tx_napi_dev); } EXPORT_SYMBOL_GPL(mt76_dma_cleanup); diff --git a/drivers/net/wireless/mediatek/mt76/dma.h b/drivers/net/wireless/mediatek/mt76/dma.h index 1de5a2b20f74..e3ddc7a83757 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.h +++ b/drivers/net/wireless/mediatek/mt76/dma.h @@ -116,4 +116,13 @@ mt76_dma_should_drop_buf(bool *drop, u32 ctrl, u32 buf1, u32 info) } } +static inline void *mt76_priv(struct net_device *dev) +{ + struct mt76_dev **priv; + + priv = netdev_priv(dev); + + return *priv; +} + #endif diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index e8ba2e4e8484..bb291fe314fb 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -1125,6 +1125,11 @@ mt76_rx_convert(struct mt76_dev *dev, struct sk_buff *skb, memcpy(status->chain_signal, mstat.chain_signal, sizeof(mstat.chain_signal)); + if (mstat.wcid) { + status->link_valid = mstat.wcid->link_valid; + status->link_id = mstat.wcid->link_id; + } + *sta = wcid_to_sta(mstat.wcid); *hw = mt76_phy_hw(dev, mstat.phy_idx); } diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 11b9f22ca7f3..4a58a78d5ed2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -349,6 +349,8 @@ struct mt76_wcid { u8 sta:1; u8 amsdu:1; u8 phy_idx:2; + u8 link_id:4; + bool link_valid; u8 rx_check_pn; u8 rx_key_pn[IEEE80211_NUM_TIDS + 1][6]; @@ -366,6 +368,8 @@ struct mt76_wcid { struct mt76_sta_stats stats; struct list_head poll_list; + + struct mt76_wcid *def_wcid; }; struct mt76_txq { @@ -831,8 +835,8 @@ struct mt76_dev { struct mt76_mcu mcu; - struct net_device napi_dev; - struct net_device tx_napi_dev; + struct net_device *napi_dev; + struct net_device *tx_napi_dev; spinlock_t rx_lock; struct napi_struct napi[__MT_RXQ_MAX]; struct sk_buff_head rx_skb[__MT_RXQ_MAX]; @@ -1081,6 +1085,7 @@ bool ____mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val, void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs); void mt76_pci_disable_aspm(struct pci_dev *pdev); +bool mt76_pci_aspm_supported(struct pci_dev *pdev); static inline u16 mt76_chip(struct mt76_dev *dev) { @@ -1256,6 +1261,9 @@ wcid_to_sta(struct mt76_wcid *wcid) if (!wcid || !wcid->sta) return NULL; + if (wcid->def_wcid) + ptr = wcid->def_wcid; + return container_of(ptr, struct ieee80211_sta, drv_priv); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c index 14304b063715..ea017f22fff2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/dma.c @@ -242,7 +242,7 @@ int mt7603_dma_init(struct mt7603_dev *dev) if (ret) return ret; - netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, + netif_napi_add_tx(dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, mt7603_poll_tx); napi_enable(&dev->mt76.tx_napi); diff --git a/drivers/net/wireless/mediatek/mt76/mt7603/main.c b/drivers/net/wireless/mediatek/mt76/mt7603/main.c index 9b49267b1eab..f35fa643c0da 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7603/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7603/main.c @@ -23,7 +23,7 @@ mt7603_start(struct ieee80211_hw *hw) } static void -mt7603_stop(struct ieee80211_hw *hw) +mt7603_stop(struct ieee80211_hw *hw, bool suspend) { struct mt7603_dev *dev = hw->priv; diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c index e7135b2f1742..bcf7864312d7 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/dma.c @@ -67,7 +67,7 @@ static int mt7615_poll_tx(struct napi_struct *napi, int budget) { struct mt7615_dev *dev; - dev = container_of(napi, struct mt7615_dev, mt76.tx_napi); + dev = mt76_priv(napi->dev); if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) { napi_complete(napi); queue_work(dev->mt76.wq, &dev->pm.wake_work); @@ -89,7 +89,7 @@ static int mt7615_poll_rx(struct napi_struct *napi, int budget) struct mt7615_dev *dev; int done; - dev = container_of(napi->dev, struct mt7615_dev, mt76.napi_dev); + dev = mt76_priv(napi->dev); if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) { napi_complete(napi); @@ -282,7 +282,7 @@ int mt7615_dma_init(struct mt7615_dev *dev) if (ret < 0) return ret; - netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, + netif_napi_add_tx(dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, mt7615_poll_tx); napi_enable(&dev->mt76.tx_napi); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/main.c b/drivers/net/wireless/mediatek/mt76/mt7615/main.c index c27acaf0eb1c..50e262c1622f 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/main.c @@ -91,7 +91,7 @@ out: return ret; } -static void mt7615_stop(struct ieee80211_hw *hw) +static void mt7615_stop(struct ieee80211_hw *hw, bool suspend) { struct mt7615_dev *dev = mt7615_hw_dev(hw); struct mt7615_phy *phy = mt7615_hw_phy(hw); diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c index c807bd8d928d..d50d967828be 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c @@ -842,6 +842,7 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif, { struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; struct sk_buff *skb, *sskb, *wskb = NULL; + struct ieee80211_link_sta *link_sta; struct mt7615_dev *dev = phy->dev; struct wtbl_req_hdr *wtbl_hdr; struct mt7615_sta *msta; @@ -849,6 +850,7 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif, int cmd, err; msta = sta ? (struct mt7615_sta *)sta->drv_priv : &mvif->sta; + link_sta = sta ? &sta->deflink : NULL; sskb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, &msta->wcid); @@ -861,8 +863,8 @@ mt7615_mcu_wtbl_sta_add(struct mt7615_phy *phy, struct ieee80211_vif *vif, else mvif->sta_added = true; } - mt76_connac_mcu_sta_basic_tlv(&dev->mt76, sskb, vif, sta, enable, - new_entry); + mt76_connac_mcu_sta_basic_tlv(&dev->mt76, sskb, vif, link_sta, + enable, new_entry); if (enable && sta) mt76_connac_mcu_sta_tlv(phy->mt76, sskb, sta, vif, 0, MT76_STA_INFO_STATE_ASSOC); @@ -1109,8 +1111,8 @@ mt7615_mcu_uni_add_dev(struct mt7615_phy *phy, struct ieee80211_vif *vif, { struct mt7615_vif *mvif = (struct mt7615_vif *)vif->drv_priv; - return mt76_connac_mcu_uni_add_dev(phy->mt76, vif, &mvif->sta.wcid, - enable); + return mt76_connac_mcu_uni_add_dev(phy->mt76, &vif->bss_conf, + &mvif->sta.wcid, enable); } static int diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c index df737e1ff27b..9335ca0776fe 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/usb.c @@ -79,7 +79,7 @@ static void mt7663u_copy(struct mt76_dev *dev, u32 offset, mutex_unlock(&usb->usb_ctrl_mtx); } -static void mt7663u_stop(struct ieee80211_hw *hw) +static void mt7663u_stop(struct ieee80211_hw *hw, bool suspend) { struct mt7615_phy *phy = mt7615_hw_phy(hw); struct mt7615_dev *dev = hw->priv; diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c index 162c57fb7954..4dce03ddbfa4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.c @@ -370,7 +370,7 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_bss_omac_tlv); void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, + struct ieee80211_link_sta *link_sta, bool enable, bool newly) { struct sta_rec_basic *basic; @@ -390,7 +390,7 @@ void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb, basic->conn_state = CONN_STATE_DISCONNECT; } - if (!sta) { + if (!link_sta) { basic->conn_type = cpu_to_le32(CONNECTION_INFRA_BC); if (vif->type == NL80211_IFTYPE_STATION && @@ -411,7 +411,7 @@ void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb, else conn_type = CONNECTION_INFRA_STA; basic->conn_type = cpu_to_le32(conn_type); - basic->aid = cpu_to_le16(sta->aid); + basic->aid = cpu_to_le16(link_sta->sta->aid); break; case NL80211_IFTYPE_STATION: if (vif->p2p && !is_mt7921(dev)) @@ -423,15 +423,15 @@ void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb, break; case NL80211_IFTYPE_ADHOC: basic->conn_type = cpu_to_le32(CONNECTION_IBSS_ADHOC); - basic->aid = cpu_to_le16(sta->aid); + basic->aid = cpu_to_le16(link_sta->sta->aid); break; default: WARN_ON(1); break; } - memcpy(basic->peer_addr, sta->addr, ETH_ALEN); - basic->qos = sta->wme; + memcpy(basic->peer_addr, link_sta->addr, ETH_ALEN); + basic->qos = link_sta->sta->wme; } EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_basic_tlv); @@ -793,7 +793,8 @@ EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_he_tlv_v2); u8 mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif, - enum nl80211_band band, struct ieee80211_sta *sta) + enum nl80211_band band, + struct ieee80211_link_sta *link_sta) { struct ieee80211_sta_ht_cap *ht_cap; struct ieee80211_sta_vht_cap *vht_cap; @@ -801,11 +802,11 @@ mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif, const struct ieee80211_sta_eht_cap *eht_cap; u8 mode = 0; - if (sta) { - ht_cap = &sta->deflink.ht_cap; - vht_cap = &sta->deflink.vht_cap; - he_cap = &sta->deflink.he_cap; - eht_cap = &sta->deflink.eht_cap; + if (link_sta) { + ht_cap = &link_sta->ht_cap; + vht_cap = &link_sta->vht_cap; + he_cap = &link_sta->he_cap; + eht_cap = &link_sta->eht_cap; } else { struct ieee80211_supported_band *sband; @@ -911,7 +912,8 @@ void mt76_connac_mcu_sta_tlv(struct mt76_phy *mphy, struct sk_buff *skb, tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_PHY, sizeof(*phy)); phy = (struct sta_rec_phy *)tlv; - phy->phy_type = mt76_connac_get_phy_mode_v2(mphy, vif, band, sta); + phy->phy_type = mt76_connac_get_phy_mode_v2(mphy, vif, band, + &sta->deflink); phy->basic_rate = cpu_to_le16((u16)vif->bss_conf.basic_rates); phy->rcpi = rcpi; phy->ampdu = FIELD_PREP(IEEE80211_HT_AMPDU_PARM_FACTOR, @@ -1044,6 +1046,7 @@ int mt76_connac_mcu_sta_cmd(struct mt76_phy *phy, struct mt76_sta_cmd_info *info) { struct mt76_vif *mvif = (struct mt76_vif *)info->vif->drv_priv; + struct ieee80211_link_sta *link_sta; struct mt76_dev *dev = phy->dev; struct wtbl_req_hdr *wtbl_hdr; struct tlv *sta_wtbl; @@ -1053,9 +1056,11 @@ int mt76_connac_mcu_sta_cmd(struct mt76_phy *phy, if (IS_ERR(skb)) return PTR_ERR(skb); + link_sta = info->sta ? &info->sta->deflink : NULL; if (info->sta || !info->offload_fw) - mt76_connac_mcu_sta_basic_tlv(dev, skb, info->vif, info->sta, - info->enable, info->newly); + mt76_connac_mcu_sta_basic_tlv(dev, skb, info->vif, + link_sta, info->enable, + info->newly); if (info->sta && info->enable) mt76_connac_mcu_sta_tlv(phy, skb, info->sta, info->vif, info->rcpi, @@ -1132,11 +1137,11 @@ void mt76_connac_mcu_wtbl_ba_tlv(struct mt76_dev *dev, struct sk_buff *skb, EXPORT_SYMBOL_GPL(mt76_connac_mcu_wtbl_ba_tlv); int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy, - struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, struct mt76_wcid *wcid, bool enable) { - struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct mt76_vif *mvif = (struct mt76_vif *)bss_conf->vif->drv_priv; struct mt76_dev *dev = phy->dev; struct { struct { @@ -1148,7 +1153,7 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy, __le16 tag; __le16 len; u8 active; - u8 pad; + u8 link_idx; /* not link_id */ u8 omac_addr[ETH_ALEN]; } __packed tlv; } dev_req = { @@ -1160,6 +1165,7 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy, .tag = cpu_to_le16(DEV_INFO_ACTIVE), .len = cpu_to_le16(sizeof(struct req_tlv)), .active = enable, + .link_idx = mvif->idx, }, }; struct { @@ -1182,12 +1188,13 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy, .bmc_tx_wlan_idx = cpu_to_le16(wcid->idx), .sta_idx = cpu_to_le16(wcid->idx), .conn_state = 1, + .link_idx = mvif->idx, }, }; int err, idx, cmd, len; void *data; - switch (vif->type) { + switch (bss_conf->vif->type) { case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_AP: @@ -1207,7 +1214,7 @@ int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy, idx = mvif->omac_idx > EXT_BSSID_START ? HW_BSSID_0 : mvif->omac_idx; basic_req.basic.hw_bss_idx = idx; - memcpy(dev_req.tlv.omac_addr, vif->addr, ETH_ALEN); + memcpy(dev_req.tlv.omac_addr, bss_conf->addr, ETH_ALEN); cmd = enable ? MCU_UNI_CMD(DEV_INFO_UPDATE) : MCU_UNI_CMD(BSS_INFO_UPDATE); data = enable ? (void *)&dev_req : (void *)&basic_req; @@ -1305,7 +1312,8 @@ int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif, EXPORT_SYMBOL_GPL(mt76_connac_mcu_sta_ba); u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif, - enum nl80211_band band, struct ieee80211_sta *sta) + enum nl80211_band band, + struct ieee80211_link_sta *link_sta) { struct mt76_dev *dev = phy->dev; const struct ieee80211_sta_he_cap *he_cap; @@ -1316,10 +1324,10 @@ u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif, if (is_connac_v1(dev)) return 0x38; - if (sta) { - ht_cap = &sta->deflink.ht_cap; - vht_cap = &sta->deflink.vht_cap; - he_cap = &sta->deflink.he_cap; + if (link_sta) { + ht_cap = &link_sta->ht_cap; + vht_cap = &link_sta->vht_cap; + he_cap = &link_sta->he_cap; } else { struct ieee80211_supported_band *sband; diff --git a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h index 6873ce14d299..4242d436de26 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt76_connac_mcu.h @@ -545,6 +545,13 @@ struct sta_rec_muru { } mimo_ul; } __packed; +struct sta_rec_remove { + __le16 tag; + __le16 len; + u8 action; + u8 pad[3]; +} __packed; + struct sta_phy { u8 type; u8 flag; @@ -813,7 +820,10 @@ enum { STA_REC_HE_6G = 0x17, STA_REC_HE_V2 = 0x19, STA_REC_MLD = 0x20, + STA_REC_EHT_MLD = 0x21, STA_REC_EHT = 0x22, + STA_REC_MLD_OFF = 0x23, + STA_REC_REMOVE = 0x25, STA_REC_PN_INFO = 0x26, STA_REC_KEY_V3 = 0x27, STA_REC_HDRT = 0x28, @@ -1392,6 +1402,7 @@ enum { MT_NIC_CAP_WFDMA_REALLOC, MT_NIC_CAP_6G, MT_NIC_CAP_CHIP_CAP = 0x20, + MT_NIC_CAP_EML_CAP = 0x22, }; #define UNI_WOW_DETECT_TYPE_MAGIC BIT(0) @@ -1443,7 +1454,7 @@ struct mt76_connac_bss_basic_tlv { __le16 sta_idx; __le16 nonht_basic_phy; u8 phymode_ext; /* bit(0) AX_6G */ - u8 pad[1]; + u8 link_idx; } __packed; struct mt76_connac_bss_qos_tlv { @@ -1733,7 +1744,10 @@ enum mt76_sta_info_state { }; struct mt76_sta_cmd_info { - struct ieee80211_sta *sta; + union { + struct ieee80211_sta *sta; + struct ieee80211_link_sta *link_sta; + }; struct mt76_wcid *wcid; struct ieee80211_vif *vif; @@ -1883,8 +1897,8 @@ int mt76_connac_mcu_set_channel_domain(struct mt76_phy *phy); int mt76_connac_mcu_set_vif_ps(struct mt76_dev *dev, struct ieee80211_vif *vif); void mt76_connac_mcu_sta_basic_tlv(struct mt76_dev *dev, struct sk_buff *skb, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, bool enable, - bool newly); + struct ieee80211_link_sta *link_sta, + bool enable, bool newly); void mt76_connac_mcu_wtbl_generic_tlv(struct mt76_dev *dev, struct sk_buff *skb, struct ieee80211_vif *vif, struct ieee80211_sta *sta, void *sta_wtbl, @@ -1898,7 +1912,8 @@ int mt76_connac_mcu_sta_update_hdr_trans(struct mt76_dev *dev, struct mt76_wcid *wcid, int cmd); void mt76_connac_mcu_sta_he_tlv_v2(struct sk_buff *skb, struct ieee80211_sta *sta); u8 mt76_connac_get_phy_mode_v2(struct mt76_phy *mphy, struct ieee80211_vif *vif, - enum nl80211_band band, struct ieee80211_sta *sta); + enum nl80211_band band, + struct ieee80211_link_sta *link_sta); int mt76_connac_mcu_wtbl_update_hdr_trans(struct mt76_dev *dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta); @@ -1917,7 +1932,7 @@ void mt76_connac_mcu_sta_ba_tlv(struct sk_buff *skb, struct ieee80211_ampdu_params *params, bool enable, bool tx); int mt76_connac_mcu_uni_add_dev(struct mt76_phy *phy, - struct ieee80211_vif *vif, + struct ieee80211_bss_conf *bss_conf, struct mt76_wcid *wcid, bool enable); int mt76_connac_mcu_sta_ba(struct mt76_dev *dev, struct mt76_vif *mvif, @@ -1992,7 +2007,8 @@ mt76_connac_get_he_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif); const struct ieee80211_sta_eht_cap * mt76_connac_get_eht_phy_cap(struct mt76_phy *phy, struct ieee80211_vif *vif); u8 mt76_connac_get_phy_mode(struct mt76_phy *phy, struct ieee80211_vif *vif, - enum nl80211_band band, struct ieee80211_sta *sta); + enum nl80211_band band, + struct ieee80211_link_sta *sta); u8 mt76_connac_get_phy_mode_ext(struct mt76_phy *phy, struct ieee80211_vif *vif, enum nl80211_band band); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c index 79b7996ad1a8..2ecee7c5c80d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c @@ -44,7 +44,7 @@ static void mt76x0e_stop_hw(struct mt76x02_dev *dev) mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_RX_DMA_EN); } -static void mt76x0e_stop(struct ieee80211_hw *hw) +static void mt76x0e_stop(struct ieee80211_hw *hw, bool suspend) { struct mt76x02_dev *dev = hw->priv; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c index bba44f289b4e..390f502e97f0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c @@ -77,7 +77,7 @@ static void mt76x0u_cleanup(struct mt76x02_dev *dev) mt76u_queues_deinit(&dev->mt76); } -static void mt76x0u_stop(struct ieee80211_hw *hw) +static void mt76x0u_stop(struct ieee80211_hw *hw, bool suspend) { struct mt76x02_dev *dev = hw->priv; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c index e5ad635d3c56..35b7ebc2c9c6 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c @@ -239,7 +239,7 @@ int mt76x02_dma_init(struct mt76x02_dev *dev) if (ret) return ret; - netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, + netif_napi_add_tx(dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, mt76x02_poll_tx); napi_enable(&dev->mt76.tx_napi); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c index bfc8c69f43fa..6accea551319 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c @@ -24,7 +24,7 @@ mt76x2_start(struct ieee80211_hw *hw) } static void -mt76x2_stop(struct ieee80211_hw *hw) +mt76x2_stop(struct ieee80211_hw *hw, bool suspend) { struct mt76x02_dev *dev = hw->priv; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c index 9fe390fdd730..ba0241c36672 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c @@ -22,7 +22,7 @@ static int mt76x2u_start(struct ieee80211_hw *hw) return 0; } -static void mt76x2u_stop(struct ieee80211_hw *hw) +static void mt76x2u_stop(struct ieee80211_hw *hw, bool suspend) { struct mt76x02_dev *dev = hw->priv; diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c index 0baa82c8df5a..0c62272fe7d0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/dma.c @@ -578,7 +578,7 @@ int mt7915_dma_init(struct mt7915_dev *dev, struct mt7915_phy *phy2) if (ret < 0) return ret; - netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, + netif_napi_add_tx(dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, mt7915_poll_tx); napi_enable(&dev->mt76.tx_napi); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/main.c b/drivers/net/wireless/mediatek/mt76/mt7915/main.c index 2624edbb59a1..049223df9beb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/main.c @@ -108,7 +108,7 @@ static int mt7915_start(struct ieee80211_hw *hw) return ret; } -static void mt7915_stop(struct ieee80211_hw *hw) +static void mt7915_stop(struct ieee80211_hw *hw, bool suspend) { struct mt7915_dev *dev = mt7915_hw_dev(hw); struct mt7915_phy *phy = mt7915_hw_phy(hw); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c index 9599adf104b1..2185cd24e2e1 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mcu.c @@ -1503,7 +1503,7 @@ mt7915_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7915_dev *dev, ra->valid = true; ra->auto_rate = true; - ra->phy_mode = mt76_connac_get_phy_mode(mphy, vif, band, sta); + ra->phy_mode = mt76_connac_get_phy_mode(mphy, vif, band, &sta->deflink); ra->channel = chandef->chan->hw_value; ra->bw = sta->deflink.bandwidth; ra->phy.bw = sta->deflink.bandwidth; @@ -1656,11 +1656,13 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta, bool enable) { struct mt7915_vif *mvif = (struct mt7915_vif *)vif->drv_priv; + struct ieee80211_link_sta *link_sta; struct mt7915_sta *msta; struct sk_buff *skb; int ret; msta = sta ? (struct mt7915_sta *)sta->drv_priv : &mvif->sta; + link_sta = sta ? &sta->deflink : NULL; skb = mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, &msta->wcid); @@ -1668,7 +1670,7 @@ int mt7915_mcu_add_sta(struct mt7915_dev *dev, struct ieee80211_vif *vif, return PTR_ERR(skb); /* starec basic */ - mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, vif, sta, enable, + mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, vif, link_sta, enable, !rcu_access_pointer(dev->mt76.wcid[msta->wcid.idx])); if (!enable) goto out; diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c index 73e42ef42983..047106b65d2b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mac.c @@ -39,6 +39,7 @@ static void mt7921_mac_sta_poll(struct mt792x_dev *dev) }; struct ieee80211_sta *sta; struct mt792x_sta *msta; + struct mt792x_link_sta *mlink; u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS]; LIST_HEAD(sta_poll_list); struct rate_info *rate; @@ -60,23 +61,25 @@ static void mt7921_mac_sta_poll(struct mt792x_dev *dev) spin_unlock_bh(&dev->mt76.sta_poll_lock); break; } - msta = list_first_entry(&sta_poll_list, - struct mt792x_sta, wcid.poll_list); - list_del_init(&msta->wcid.poll_list); + mlink = list_first_entry(&sta_poll_list, + struct mt792x_link_sta, + wcid.poll_list); + msta = container_of(mlink, struct mt792x_sta, deflink); + list_del_init(&mlink->wcid.poll_list); spin_unlock_bh(&dev->mt76.sta_poll_lock); - idx = msta->wcid.idx; + idx = mlink->wcid.idx; addr = mt7921_mac_wtbl_lmac_addr(idx, MT_WTBL_AC0_CTT_OFFSET); for (i = 0; i < IEEE80211_NUM_ACS; i++) { - u32 tx_last = msta->airtime_ac[i]; - u32 rx_last = msta->airtime_ac[i + 4]; + u32 tx_last = mlink->airtime_ac[i]; + u32 rx_last = mlink->airtime_ac[i + 4]; - msta->airtime_ac[i] = mt76_rr(dev, addr); - msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4); + mlink->airtime_ac[i] = mt76_rr(dev, addr); + mlink->airtime_ac[i + 4] = mt76_rr(dev, addr + 4); - tx_time[i] = msta->airtime_ac[i] - tx_last; - rx_time[i] = msta->airtime_ac[i + 4] - rx_last; + tx_time[i] = mlink->airtime_ac[i] - tx_last; + rx_time[i] = mlink->airtime_ac[i + 4] - rx_last; if ((tx_last | rx_last) & BIT(30)) clear = true; @@ -87,10 +90,10 @@ static void mt7921_mac_sta_poll(struct mt792x_dev *dev) if (clear) { mt7921_mac_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); - memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac)); + memset(mlink->airtime_ac, 0, sizeof(mlink->airtime_ac)); } - if (!msta->wcid.sta) + if (!mlink->wcid.sta) continue; sta = container_of((void *)msta, struct ieee80211_sta, @@ -113,7 +116,7 @@ static void mt7921_mac_sta_poll(struct mt792x_dev *dev) * we need to make sure that flags match so polling GI * from per-sta counters directly. */ - rate = &msta->wcid.rate; + rate = &mlink->wcid.rate; addr = mt7921_mac_wtbl_lmac_addr(idx, MT_WTBL_TXRX_CAP_RATE_OFFSET); val = mt76_rr(dev, addr); @@ -154,10 +157,10 @@ static void mt7921_mac_sta_poll(struct mt792x_dev *dev) rssi[2] = to_rssi(GENMASK(23, 16), val); rssi[3] = to_rssi(GENMASK(31, 14), val); - msta->ack_signal = + mlink->ack_signal = mt76_rx_signal(msta->vif->phy->mt76->antenna_mask, rssi); - ewma_avg_signal_add(&msta->avg_ack_signal, -msta->ack_signal); + ewma_avg_signal_add(&mlink->avg_ack_signal, -mlink->ack_signal); } } @@ -180,6 +183,7 @@ mt7921_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb) u32 rxd3 = le32_to_cpu(rxd[3]); u32 rxd4 = le32_to_cpu(rxd[4]); struct mt792x_sta *msta = NULL; + struct mt792x_link_sta *mlink; u16 seq_ctrl = 0; __le16 fc = 0; u8 mode = 0; @@ -210,10 +214,11 @@ mt7921_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb) status->wcid = mt792x_rx_get_wcid(dev, idx, unicast); if (status->wcid) { - msta = container_of(status->wcid, struct mt792x_sta, wcid); + mlink = container_of(status->wcid, struct mt792x_link_sta, wcid); + msta = container_of(mlink, struct mt792x_sta, deflink); spin_lock_bh(&dev->mt76.sta_poll_lock); - if (list_empty(&msta->wcid.poll_list)) - list_add_tail(&msta->wcid.poll_list, + if (list_empty(&mlink->wcid.poll_list)) + list_add_tail(&mlink->wcid.poll_list, &dev->mt76.sta_poll_list); spin_unlock_bh(&dev->mt76.sta_poll_lock); } @@ -444,7 +449,7 @@ mt7921_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb) void mt7921_mac_add_txs(struct mt792x_dev *dev, void *data) { - struct mt792x_sta *msta = NULL; + struct mt792x_link_sta *mlink; struct mt76_wcid *wcid; __le32 *txs_data = data; u16 wcidx; @@ -468,15 +473,15 @@ void mt7921_mac_add_txs(struct mt792x_dev *dev, void *data) if (!wcid) goto out; - msta = container_of(wcid, struct mt792x_sta, wcid); + mlink = container_of(wcid, struct mt792x_link_sta, wcid); mt76_connac2_mac_add_txs_skb(&dev->mt76, wcid, pid, txs_data); if (!wcid->sta) goto out; spin_lock_bh(&dev->mt76.sta_poll_lock); - if (list_empty(&msta->wcid.poll_list)) - list_add_tail(&msta->wcid.poll_list, &dev->mt76.sta_poll_list); + if (list_empty(&mlink->wcid.poll_list)) + list_add_tail(&mlink->wcid.poll_list, &dev->mt76.sta_poll_list); spin_unlock_bh(&dev->mt76.sta_poll_lock); out: @@ -513,7 +518,7 @@ static void mt7921_mac_tx_free(struct mt792x_dev *dev, void *data, int len) * 1'b0: msdu_id with the same 'wcid pair' as above. */ if (info & MT_TX_FREE_PAIR) { - struct mt792x_sta *msta; + struct mt792x_link_sta *mlink; u16 idx; count++; @@ -523,10 +528,10 @@ static void mt7921_mac_tx_free(struct mt792x_dev *dev, void *data, int len) if (!sta) continue; - msta = container_of(wcid, struct mt792x_sta, wcid); + mlink = container_of(wcid, struct mt792x_link_sta, wcid); spin_lock_bh(&mdev->sta_poll_lock); - if (list_empty(&msta->wcid.poll_list)) - list_add_tail(&msta->wcid.poll_list, + if (list_empty(&mlink->wcid.poll_list)) + list_add_tail(&mlink->wcid.poll_list, &mdev->sta_poll_list); spin_unlock_bh(&mdev->sta_poll_lock); continue; @@ -641,11 +646,12 @@ mt7921_vif_connect_iter(void *priv, u8 *mac, if (vif->type == NL80211_IFTYPE_STATION) ieee80211_disconnect(vif, true); - mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, true); + mt76_connac_mcu_uni_add_dev(&dev->mphy, &vif->bss_conf, + &mvif->sta.deflink.wcid, true); mt7921_mcu_set_tx(dev, vif); if (vif->type == NL80211_IFTYPE_AP) { - mt76_connac_mcu_uni_add_bss(dev->phy.mt76, vif, &mvif->sta.wcid, + mt76_connac_mcu_uni_add_bss(dev->phy.mt76, vif, &mvif->sta.deflink.wcid, true, NULL); mt7921_mcu_sta_update(dev, NULL, vif, true, MT76_STA_INFO_STATE_NONE); @@ -786,9 +792,9 @@ int mt7921_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, if (sta) { struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; - if (time_after(jiffies, msta->last_txs + HZ / 4)) { + if (time_after(jiffies, msta->deflink.last_txs + HZ / 4)) { info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; - msta->last_txs = jiffies; + msta->deflink.last_txs = jiffies; } } diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c index 3e3ad3518d85..2e6268cb06c0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c @@ -268,7 +268,7 @@ static int mt7921_start(struct ieee80211_hw *hw) return err; } -static void mt7921_stop(struct ieee80211_hw *hw) +static void mt7921_stop(struct ieee80211_hw *hw, bool suspend) { struct mt792x_dev *dev = mt792x_hw_dev(hw); int err = 0; @@ -281,7 +281,7 @@ static void mt7921_stop(struct ieee80211_hw *hw) return; } - mt792x_stop(hw); + mt792x_stop(hw, false); } static int @@ -295,40 +295,40 @@ mt7921_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) mt792x_mutex_acquire(dev); - mvif->mt76.idx = __ffs64(~dev->mt76.vif_mask); - if (mvif->mt76.idx >= MT792x_MAX_INTERFACES) { + mvif->bss_conf.mt76.idx = __ffs64(~dev->mt76.vif_mask); + if (mvif->bss_conf.mt76.idx >= MT792x_MAX_INTERFACES) { ret = -ENOSPC; goto out; } - mvif->mt76.omac_idx = mvif->mt76.idx; + mvif->bss_conf.mt76.omac_idx = mvif->bss_conf.mt76.idx; mvif->phy = phy; - mvif->mt76.band_idx = 0; - mvif->mt76.wmm_idx = mvif->mt76.idx % MT76_CONNAC_MAX_WMM_SETS; + mvif->bss_conf.mt76.band_idx = 0; + mvif->bss_conf.mt76.wmm_idx = mvif->bss_conf.mt76.idx % MT76_CONNAC_MAX_WMM_SETS; - ret = mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, - true); + ret = mt76_connac_mcu_uni_add_dev(&dev->mphy, &vif->bss_conf, + &mvif->sta.deflink.wcid, true); if (ret) goto out; - dev->mt76.vif_mask |= BIT_ULL(mvif->mt76.idx); - phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx); + dev->mt76.vif_mask |= BIT_ULL(mvif->bss_conf.mt76.idx); + phy->omac_mask |= BIT_ULL(mvif->bss_conf.mt76.omac_idx); - idx = MT792x_WTBL_RESERVED - mvif->mt76.idx; + idx = MT792x_WTBL_RESERVED - mvif->bss_conf.mt76.idx; - INIT_LIST_HEAD(&mvif->sta.wcid.poll_list); - mvif->sta.wcid.idx = idx; - mvif->sta.wcid.phy_idx = mvif->mt76.band_idx; - mvif->sta.wcid.hw_key_idx = -1; - mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET; - mt76_wcid_init(&mvif->sta.wcid); + INIT_LIST_HEAD(&mvif->sta.deflink.wcid.poll_list); + mvif->sta.deflink.wcid.idx = idx; + mvif->sta.deflink.wcid.phy_idx = mvif->bss_conf.mt76.band_idx; + mvif->sta.deflink.wcid.hw_key_idx = -1; + mvif->sta.deflink.wcid.tx_info |= MT_WCID_TX_INFO_SET; + mt76_wcid_init(&mvif->sta.deflink.wcid); mt7921_mac_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); - ewma_rssi_init(&mvif->rssi); + ewma_rssi_init(&mvif->bss_conf.rssi); - rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid); + rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.deflink.wcid); if (vif->txq) { mtxq = (struct mt76_txq *)vif->txq->drv_priv; mtxq->wcid = idx; @@ -494,7 +494,7 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct mt792x_sta *msta = sta ? (struct mt792x_sta *)sta->drv_priv : &mvif->sta; - struct mt76_wcid *wcid = &msta->wcid; + struct mt76_wcid *wcid = &msta->deflink.wcid; u8 *wcid_keyidx = &wcid->hw_key_idx; int idx = key->keyidx, err = 0; @@ -541,18 +541,18 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, } mt76_wcid_key_setup(&dev->mt76, wcid, key); - err = mt76_connac_mcu_add_key(&dev->mt76, vif, &msta->bip, + err = mt76_connac_mcu_add_key(&dev->mt76, vif, &msta->deflink.bip, key, MCU_UNI_CMD(STA_REC_UPDATE), - &msta->wcid, cmd); + &msta->deflink.wcid, cmd); if (err) goto out; if (key->cipher == WLAN_CIPHER_SUITE_WEP104 || key->cipher == WLAN_CIPHER_SUITE_WEP40) err = mt76_connac_mcu_add_key(&dev->mt76, vif, - &mvif->wep_sta->bip, + &mvif->wep_sta->deflink.bip, key, MCU_UNI_CMD(STA_REC_UPDATE), - &mvif->wep_sta->wcid, cmd); + &mvif->wep_sta->deflink.wcid, cmd); out: mt792x_mutex_release(dev); @@ -718,7 +718,7 @@ static void mt7921_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_ARP_FILTER) { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; - mt76_connac_mcu_update_arp_filter(&dev->mt76, &mvif->mt76, + mt76_connac_mcu_update_arp_filter(&dev->mt76, &mvif->bss_conf.mt76, info); } @@ -799,13 +799,13 @@ int mt7921_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, if (idx < 0) return -ENOSPC; - INIT_LIST_HEAD(&msta->wcid.poll_list); + INIT_LIST_HEAD(&msta->deflink.wcid.poll_list); msta->vif = mvif; - msta->wcid.sta = 1; - msta->wcid.idx = idx; - msta->wcid.phy_idx = mvif->mt76.band_idx; - msta->wcid.tx_info |= MT_WCID_TX_INFO_SET; - msta->last_txs = jiffies; + msta->deflink.wcid.sta = 1; + msta->deflink.wcid.idx = idx; + msta->deflink.wcid.phy_idx = mvif->bss_conf.mt76.band_idx; + msta->deflink.wcid.tx_info |= MT_WCID_TX_INFO_SET; + msta->deflink.last_txs = jiffies; ret = mt76_connac_pm_wake(&dev->mphy, &dev->pm); if (ret) @@ -840,14 +840,14 @@ void mt7921_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif, mt792x_mutex_acquire(dev); if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) - mt76_connac_mcu_uni_add_bss(&dev->mphy, vif, &mvif->sta.wcid, - true, mvif->mt76.ctx); + mt76_connac_mcu_uni_add_bss(&dev->mphy, vif, &mvif->sta.deflink.wcid, + true, mvif->bss_conf.mt76.ctx); - ewma_avg_signal_init(&msta->avg_ack_signal); + ewma_avg_signal_init(&msta->deflink.avg_ack_signal); - mt7921_mac_wtbl_update(dev, msta->wcid.idx, + mt7921_mac_wtbl_update(dev, msta->deflink.wcid.idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); - memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac)); + memset(msta->deflink.airtime_ac, 0, sizeof(msta->deflink.airtime_ac)); mt7921_mcu_sta_update(dev, sta, vif, true, MT76_STA_INFO_STATE_ASSOC); @@ -861,27 +861,27 @@ void mt7921_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; - mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid); + mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->deflink.wcid); mt76_connac_pm_wake(&dev->mphy, &dev->pm); mt7921_mcu_sta_update(dev, sta, vif, false, MT76_STA_INFO_STATE_NONE); - mt7921_mac_wtbl_update(dev, msta->wcid.idx, + mt7921_mac_wtbl_update(dev, msta->deflink.wcid.idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); if (vif->type == NL80211_IFTYPE_STATION) { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; mvif->wep_sta = NULL; - ewma_rssi_init(&mvif->rssi); + ewma_rssi_init(&mvif->bss_conf.rssi); if (!sta->tdls) mt76_connac_mcu_uni_add_bss(&dev->mphy, vif, - &mvif->sta.wcid, false, - mvif->mt76.ctx); + &mvif->sta.deflink.wcid, false, + mvif->bss_conf.mt76.ctx); } spin_lock_bh(&dev->mt76.sta_poll_lock); - if (!list_empty(&msta->wcid.poll_list)) - list_del_init(&msta->wcid.poll_list); + if (!list_empty(&msta->deflink.wcid.poll_list)) + list_del_init(&msta->deflink.wcid.poll_list); spin_unlock_bh(&dev->mt76.sta_poll_lock); mt7921_regd_set_6ghz_power_type(vif, false); @@ -923,12 +923,12 @@ mt7921_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, mt792x_mutex_acquire(dev); switch (action) { case IEEE80211_AMPDU_RX_START: - mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn, + mt76_rx_aggr_start(&dev->mt76, &msta->deflink.wcid, tid, ssn, params->buf_size); mt7921_mcu_uni_rx_ba(dev, params, true); break; case IEEE80211_AMPDU_RX_STOP: - mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid); + mt76_rx_aggr_stop(&dev->mt76, &msta->deflink.wcid, tid); mt7921_mcu_uni_rx_ba(dev, params, false); break; case IEEE80211_AMPDU_TX_OPERATIONAL: @@ -939,16 +939,16 @@ mt7921_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: mtxq->aggr = false; - clear_bit(tid, &msta->wcid.ampdu_state); + clear_bit(tid, &msta->deflink.wcid.ampdu_state); mt7921_mcu_uni_tx_ba(dev, params, false); break; case IEEE80211_AMPDU_TX_START: - set_bit(tid, &msta->wcid.ampdu_state); + set_bit(tid, &msta->deflink.wcid.ampdu_state); ret = IEEE80211_AMPDU_TX_START_IMMEDIATE; break; case IEEE80211_AMPDU_TX_STOP_CONT: mtxq->aggr = false; - clear_bit(tid, &msta->wcid.ampdu_state); + clear_bit(tid, &msta->deflink.wcid.ampdu_state); mt7921_mcu_uni_tx_ba(dev, params, false); ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; @@ -1166,11 +1166,11 @@ static void mt7921_sta_set_decap_offload(struct ieee80211_hw *hw, mt792x_mutex_acquire(dev); if (enabled) - set_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags); + set_bit(MT_WCID_FLAG_HDR_TRANS, &msta->deflink.wcid.flags); else - clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags); + clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->deflink.wcid.flags); - mt76_connac_mcu_sta_update_hdr_trans(&dev->mt76, vif, &msta->wcid, + mt76_connac_mcu_sta_update_hdr_trans(&dev->mt76, vif, &msta->deflink.wcid, MCU_UNI_CMD(STA_REC_UPDATE)); mt792x_mutex_release(dev); @@ -1196,7 +1196,7 @@ static void mt7921_ipv6_addr_change(struct ieee80211_hw *hw, struct mt76_connac_arpns_tlv arpns; } req_hdr = { .hdr = { - .bss_idx = mvif->mt76.idx, + .bss_idx = mvif->bss_conf.mt76.idx, }, .arpns = { .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ND), @@ -1294,8 +1294,8 @@ mt7921_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif, mt792x_mutex_acquire(dev); - err = mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid, - true, mvif->mt76.ctx); + err = mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.deflink.wcid, + true, mvif->bss_conf.mt76.ctx); if (err) goto out; @@ -1326,8 +1326,8 @@ mt7921_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif, if (err) goto out; - mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.wcid, false, - mvif->mt76.ctx); + mt76_connac_mcu_uni_add_bss(phy->mt76, vif, &mvif->sta.deflink.wcid, false, + mvif->bss_conf.mt76.ctx); out: mt792x_mutex_release(dev); @@ -1346,32 +1346,27 @@ mt7921_remove_chanctx(struct ieee80211_hw *hw, { } -static void mt7921_ctx_iter(void *priv, u8 *mac, - struct ieee80211_vif *vif) -{ - struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; - struct ieee80211_chanctx_conf *ctx = priv; - - if (ctx != mvif->mt76.ctx) - return; - - if (vif->type == NL80211_IFTYPE_MONITOR) - mt7921_mcu_config_sniffer(mvif, ctx); - else - mt76_connac_mcu_uni_set_chctx(mvif->phy->mt76, &mvif->mt76, ctx); -} - static void mt7921_change_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx, u32 changed) { + struct mt792x_chanctx *mctx = (struct mt792x_chanctx *)ctx->drv_priv; struct mt792x_phy *phy = mt792x_hw_phy(hw); + struct ieee80211_vif *vif; + struct mt792x_vif *mvif; + + if (!mctx->bss_conf) + return; + + mvif = container_of(mctx->bss_conf, struct mt792x_vif, bss_conf); + vif = container_of((void *)mvif, struct ieee80211_vif, drv_priv); mt792x_mutex_acquire(phy->dev); - ieee80211_iterate_active_interfaces(phy->mt76->hw, - IEEE80211_IFACE_ITER_ACTIVE, - mt7921_ctx_iter, ctx); + if (vif->type == NL80211_IFTYPE_MONITOR) + mt7921_mcu_config_sniffer(mvif, ctx); + else + mt76_connac_mcu_uni_set_chctx(mvif->phy->mt76, &mvif->bss_conf.mt76, ctx); mt792x_mutex_release(phy->dev); } @@ -1385,7 +1380,7 @@ static void mt7921_mgd_prepare_tx(struct ieee80211_hw *hw, jiffies_to_msecs(HZ); mt792x_mutex_acquire(dev); - mt7921_set_roc(mvif->phy, mvif, mvif->mt76.ctx->def.chan, duration, + mt7921_set_roc(mvif->phy, mvif, mvif->bss_conf.mt76.ctx->def.chan, duration, MT7921_ROC_REQ_JOIN); mt792x_mutex_release(dev); } diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c index bdd8b5f19b24..394fcd799345 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/mcu.c @@ -105,7 +105,7 @@ mt7921_mcu_set_ipv6_ns_filter(struct mt76_dev *dev, struct mt76_connac_arpns_tlv arpns; } req = { .hdr = { - .bss_idx = mvif->mt76.idx, + .bss_idx = mvif->bss_conf.mt76.idx, }, .arpns = { .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ND), @@ -260,7 +260,7 @@ mt7921_mcu_rssi_monitor_iter(void *priv, u8 *mac, struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct mt76_connac_rssi_notify_event *event = priv; enum nl80211_cqm_rssi_threshold_event nl_event; - s32 rssi = le32_to_cpu(event->rssi[mvif->mt76.idx]); + s32 rssi = le32_to_cpu(event->rssi[mvif->bss_conf.mt76.idx]); if (!rssi) return; @@ -386,9 +386,9 @@ int mt7921_mcu_uni_tx_ba(struct mt792x_dev *dev, struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv; if (enable && !params->amsdu) - msta->wcid.amsdu = false; + msta->deflink.wcid.amsdu = false; - return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->mt76, params, + return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->bss_conf.mt76, params, MCU_UNI_CMD(STA_REC_UPDATE), enable, true); } @@ -399,7 +399,7 @@ int mt7921_mcu_uni_rx_ba(struct mt792x_dev *dev, { struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv; - return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->mt76, params, + return mt76_connac_mcu_sta_ba(&dev->mt76, &msta->vif->bss_conf.mt76, params, MCU_UNI_CMD(STA_REC_UPDATE), enable, false); } @@ -678,9 +678,9 @@ int mt7921_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif) u8 wmm_idx; u8 pad; } __packed req = { - .bss_idx = mvif->mt76.idx, + .bss_idx = mvif->bss_conf.mt76.idx, .qos = vif->bss_conf.qos, - .wmm_idx = mvif->mt76.wmm_idx, + .wmm_idx = mvif->bss_conf.mt76.wmm_idx, }; struct mu_edca { u8 cw_min; @@ -701,15 +701,15 @@ int mt7921_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif) struct mu_edca edca[IEEE80211_NUM_ACS]; u8 pad3[32]; } __packed req_mu = { - .bss_idx = mvif->mt76.idx, + .bss_idx = mvif->bss_conf.mt76.idx, .qos = vif->bss_conf.qos, - .wmm_idx = mvif->mt76.wmm_idx, + .wmm_idx = mvif->bss_conf.mt76.wmm_idx, }; static const int to_aci[] = { 1, 0, 2, 3 }; int ac, ret; for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { - struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac]; + struct ieee80211_tx_queue_params *q = &mvif->bss_conf.queue_params[ac]; struct edca *e = &req.edca[to_aci[ac]]; e->aifs = cpu_to_le16(q->aifs); @@ -738,10 +738,10 @@ int mt7921_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif) struct ieee80211_he_mu_edca_param_ac_rec *q; struct mu_edca *e; - if (!mvif->queue_params[ac].mu_edca) + if (!mvif->bss_conf.queue_params[ac].mu_edca) break; - q = &mvif->queue_params[ac].mu_edca_param_rec; + q = &mvif->bss_conf.queue_params[ac].mu_edca_param_rec; e = &(req_mu.edca[to_aci[ac]]); e->cw_min = q->ecw_min_max & 0xf; @@ -790,7 +790,7 @@ int mt7921_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_vif *vif, .tokenid = token_id, .reqtype = type, .maxinterval = cpu_to_le32(duration), - .bss_idx = vif->mt76.idx, + .bss_idx = vif->bss_conf.mt76.idx, .control_channel = chan->hw_value, .bw = CMD_CBW_20MHZ, .bw_from_ap = CMD_CBW_20MHZ, @@ -842,7 +842,7 @@ int mt7921_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif, .tag = cpu_to_le16(UNI_ROC_ABORT), .len = cpu_to_le16(sizeof(struct roc_abort_tlv)), .tokenid = token_id, - .bss_idx = vif->mt76.idx, + .bss_idx = vif->bss_conf.mt76.idx, .dbdcband = 0xff, /* auto*/ }, }; @@ -947,7 +947,7 @@ int mt7921_mcu_uni_bss_ps(struct mt792x_dev *dev, struct ieee80211_vif *vif) } __packed ps; } __packed ps_req = { .hdr = { - .bss_idx = mvif->mt76.idx, + .bss_idx = mvif->bss_conf.mt76.idx, }, .ps = { .tag = cpu_to_le16(UNI_BSS_INFO_PS), @@ -982,7 +982,7 @@ mt7921_mcu_uni_bss_bcnft(struct mt792x_dev *dev, struct ieee80211_vif *vif, } __packed bcnft; } __packed bcnft_req = { .hdr = { - .bss_idx = mvif->mt76.idx, + .bss_idx = mvif->bss_conf.mt76.idx, }, .bcnft = { .tag = cpu_to_le16(UNI_BSS_INFO_BCNFT), @@ -1015,7 +1015,7 @@ mt7921_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif, u8 bmc_triggered_ac; u8 pad; } req = { - .bss_idx = mvif->mt76.idx, + .bss_idx = mvif->bss_conf.mt76.idx, .aid = cpu_to_le16(vif->cfg.aid), .dtim_period = vif->bss_conf.dtim_period, .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int), @@ -1024,7 +1024,7 @@ mt7921_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif, u8 bss_idx; u8 pad[3]; } req_hdr = { - .bss_idx = mvif->mt76.idx, + .bss_idx = mvif->bss_conf.mt76.idx, }; int err; @@ -1042,7 +1042,7 @@ int mt7921_mcu_sta_update(struct mt792x_dev *dev, struct ieee80211_sta *sta, enum mt76_sta_info_state state) { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; - int rssi = -ewma_rssi_read(&mvif->rssi); + int rssi = -ewma_rssi_read(&mvif->bss_conf.rssi); struct mt76_sta_cmd_info info = { .sta = sta, .vif = vif, @@ -1055,7 +1055,7 @@ int mt7921_mcu_sta_update(struct mt792x_dev *dev, struct ieee80211_sta *sta, struct mt792x_sta *msta; msta = sta ? (struct mt792x_sta *)sta->drv_priv : NULL; - info.wcid = msta ? &msta->wcid : &mvif->sta.wcid; + info.wcid = msta ? &msta->deflink.wcid : &mvif->sta.deflink.wcid; info.newly = msta ? state != MT76_STA_INFO_STATE_ASSOC : true; return mt76_connac_mcu_sta_cmd(&dev->mphy, &info); @@ -1190,7 +1190,7 @@ int mt7921_mcu_config_sniffer(struct mt792x_vif *vif, } __packed tlv; } __packed req = { .hdr = { - .band_idx = vif->mt76.band_idx, + .band_idx = vif->bss_conf.mt76.band_idx, }, .tlv = { .tag = cpu_to_le16(1), @@ -1251,7 +1251,7 @@ mt7921_mcu_uni_add_beacon_offload(struct mt792x_dev *dev, } __packed beacon_tlv; } req = { .hdr = { - .bss_idx = mvif->mt76.idx, + .bss_idx = mvif->bss_conf.mt76.idx, }, .beacon_tlv = { .tag = cpu_to_le16(UNI_BSS_INFO_BCN_CONTENT), @@ -1460,7 +1460,7 @@ int mt7921_mcu_set_rssimonitor(struct mt792x_dev *dev, struct ieee80211_vif *vif .enable = vif->cfg.assoc, .cqm_rssi_high = vif->bss_conf.cqm_rssi_thold + vif->bss_conf.cqm_rssi_hyst, .cqm_rssi_low = vif->bss_conf.cqm_rssi_thold - vif->bss_conf.cqm_rssi_hyst, - .bss_idx = mvif->mt76.idx, + .bss_idx = mvif->bss_conf.mt76.idx, }; return mt76_mcu_send_msg(&dev->mt76, MCU_CE_CMD(RSSI_MONITOR), diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c index f768e9389ac6..a7430216a80d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci.c @@ -219,7 +219,7 @@ static int mt7921_dma_init(struct mt792x_dev *dev) if (ret < 0) return ret; - netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, + netif_napi_add_tx(dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, mt792x_poll_tx); napi_enable(&dev->mt76.tx_napi); @@ -339,6 +339,9 @@ static int mt7921_pci_probe(struct pci_dev *pdev, bus_ops->rmw = mt7921_rmw; dev->mt76.bus = bus_ops; + if (!mt7921_disable_aspm && mt76_pci_aspm_supported(pdev)) + dev->aspm_supported = true; + ret = mt792xe_mcu_fw_pmctrl(dev); if (ret) goto err_free_dev; diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c index 031ba9aaa4e2..2452b1a2d118 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/pci_mac.c @@ -34,9 +34,9 @@ int mt7921e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, if (sta) { struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; - if (time_after(jiffies, msta->last_txs + HZ / 4)) { + if (time_after(jiffies, msta->deflink.last_txs + HZ / 4)) { info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; - msta->last_txs = jiffies; + msta->deflink.last_txs = jiffies; } } diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/init.c b/drivers/net/wireless/mediatek/mt76/mt7925/init.c index c4cbc8976046..039949b344b9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/init.c @@ -179,6 +179,12 @@ static void mt7925_init_work(struct work_struct *work) mt76_set_stream_caps(&dev->mphy, true); mt7925_set_stream_he_eht_caps(&dev->phy); + ret = mt7925_init_mlo_caps(&dev->phy); + if (ret) { + dev_err(dev->mt76.dev, "MLO init failed\n"); + return; + } + ret = mt76_register_device(&dev->mt76, true, mt76_rates, ARRAY_SIZE(mt76_rates)); if (ret) { diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c index c2460ef4993d..cf36750cf709 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mac.c @@ -28,6 +28,7 @@ static void mt7925_mac_sta_poll(struct mt792x_dev *dev) }; struct ieee80211_sta *sta; struct mt792x_sta *msta; + struct mt792x_link_sta *mlink; u32 tx_time[IEEE80211_NUM_ACS], rx_time[IEEE80211_NUM_ACS]; LIST_HEAD(sta_poll_list); struct rate_info *rate; @@ -46,24 +47,25 @@ static void mt7925_mac_sta_poll(struct mt792x_dev *dev) if (list_empty(&sta_poll_list)) break; - msta = list_first_entry(&sta_poll_list, - struct mt792x_sta, wcid.poll_list); + mlink = list_first_entry(&sta_poll_list, + struct mt792x_link_sta, wcid.poll_list); + msta = container_of(mlink, struct mt792x_sta, deflink); spin_lock_bh(&dev->mt76.sta_poll_lock); - list_del_init(&msta->wcid.poll_list); + list_del_init(&mlink->wcid.poll_list); spin_unlock_bh(&dev->mt76.sta_poll_lock); - idx = msta->wcid.idx; + idx = mlink->wcid.idx; addr = mt7925_mac_wtbl_lmac_addr(dev, idx, MT_WTBL_AC0_CTT_OFFSET); for (i = 0; i < IEEE80211_NUM_ACS; i++) { - u32 tx_last = msta->airtime_ac[i]; - u32 rx_last = msta->airtime_ac[i + 4]; + u32 tx_last = mlink->airtime_ac[i]; + u32 rx_last = mlink->airtime_ac[i + 4]; - msta->airtime_ac[i] = mt76_rr(dev, addr); - msta->airtime_ac[i + 4] = mt76_rr(dev, addr + 4); + mlink->airtime_ac[i] = mt76_rr(dev, addr); + mlink->airtime_ac[i + 4] = mt76_rr(dev, addr + 4); - tx_time[i] = msta->airtime_ac[i] - tx_last; - rx_time[i] = msta->airtime_ac[i + 4] - rx_last; + tx_time[i] = mlink->airtime_ac[i] - tx_last; + rx_time[i] = mlink->airtime_ac[i + 4] - rx_last; if ((tx_last | rx_last) & BIT(30)) clear = true; @@ -74,10 +76,10 @@ static void mt7925_mac_sta_poll(struct mt792x_dev *dev) if (clear) { mt7925_mac_wtbl_update(dev, idx, MT_WTBL_UPDATE_ADM_COUNT_CLEAR); - memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac)); + memset(mlink->airtime_ac, 0, sizeof(mlink->airtime_ac)); } - if (!msta->wcid.sta) + if (!mlink->wcid.sta) continue; sta = container_of((void *)msta, struct ieee80211_sta, @@ -100,7 +102,7 @@ static void mt7925_mac_sta_poll(struct mt792x_dev *dev) * we need to make sure that flags match so polling GI * from per-sta counters directly. */ - rate = &msta->wcid.rate; + rate = &mlink->wcid.rate; switch (rate->bw) { case RATE_INFO_BW_160: @@ -144,10 +146,10 @@ static void mt7925_mac_sta_poll(struct mt792x_dev *dev) rssi[2] = to_rssi(GENMASK(23, 16), val); rssi[3] = to_rssi(GENMASK(31, 14), val); - msta->ack_signal = + mlink->ack_signal = mt76_rx_signal(msta->vif->phy->mt76->antenna_mask, rssi); - ewma_avg_signal_add(&msta->avg_ack_signal, -msta->ack_signal); + ewma_avg_signal_add(&mlink->avg_ack_signal, -mlink->ack_signal); } } @@ -365,7 +367,7 @@ mt7925_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb) u32 rxd2 = le32_to_cpu(rxd[2]); u32 rxd3 = le32_to_cpu(rxd[3]); u32 rxd4 = le32_to_cpu(rxd[4]); - struct mt792x_sta *msta = NULL; + struct mt792x_link_sta *mlink; u8 mode = 0; /* , band_idx; */ u16 seq_ctrl = 0; __le16 fc = 0; @@ -393,10 +395,10 @@ mt7925_mac_fill_rx(struct mt792x_dev *dev, struct sk_buff *skb) status->wcid = mt792x_rx_get_wcid(dev, idx, unicast); if (status->wcid) { - msta = container_of(status->wcid, struct mt792x_sta, wcid); + mlink = container_of(status->wcid, struct mt792x_link_sta, wcid); spin_lock_bh(&dev->mt76.sta_poll_lock); - if (list_empty(&msta->wcid.poll_list)) - list_add_tail(&msta->wcid.poll_list, + if (list_empty(&mlink->wcid.poll_list)) + list_add_tail(&mlink->wcid.poll_list, &dev->mt76.sta_poll_list); spin_unlock_bh(&dev->mt76.sta_poll_lock); } @@ -738,8 +740,12 @@ mt7925_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi, BSS_CHANGED_BEACON_ENABLED)); bool inband_disc = !!(changed & (BSS_CHANGED_UNSOL_BCAST_PROBE_RESP | BSS_CHANGED_FILS_DISCOVERY)); + struct mt792x_bss_conf *mconf; + + mconf = vif ? mt792x_vif_to_link((struct mt792x_vif *)vif->drv_priv, + wcid->link_id) : NULL; + mvif = mconf ? (struct mt76_vif *)&mconf->mt76 : NULL; - mvif = vif ? (struct mt76_vif *)vif->drv_priv : NULL; if (mvif) { omac_idx = mvif->omac_idx; wmm_idx = mvif->wmm_idx; @@ -800,8 +806,10 @@ mt7925_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi, txwi[5] = cpu_to_le32(val); - val = MT_TXD6_DIS_MAT | MT_TXD6_DAS | - FIELD_PREP(MT_TXD6_MSDU_CNT, 1); + val = MT_TXD6_DAS | FIELD_PREP(MT_TXD6_MSDU_CNT, 1); + if (!ieee80211_vif_is_mld(vif) || + (q_idx >= MT_LMAC_ALTX0 && q_idx <= MT_LMAC_BCN0)) + val |= MT_TXD6_DIS_MAT; txwi[6] = cpu_to_le32(val); txwi[7] = 0; @@ -831,27 +839,53 @@ mt7925_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi, } EXPORT_SYMBOL_GPL(mt7925_mac_write_txwi); -static void mt7925_tx_check_aggr(struct ieee80211_sta *sta, __le32 *txwi) +static void mt7925_tx_check_aggr(struct ieee80211_sta *sta, struct sk_buff *skb, + struct mt76_wcid *wcid) { + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_link_sta *link_sta; + struct mt792x_link_sta *mlink; struct mt792x_sta *msta; + bool is_8023; u16 fc, tid; - u32 val; - if (!sta || !(sta->deflink.ht_cap.ht_supported || sta->deflink.he_cap.has_he)) + link_sta = rcu_dereference(sta->link[wcid->link_id]); + if (!link_sta) return; - tid = le32_get_bits(txwi[1], MT_TXD1_TID); - if (tid >= 6) /* skip VO queue */ + if (!sta || !(link_sta->ht_cap.ht_supported || link_sta->he_cap.has_he)) return; - val = le32_to_cpu(txwi[2]); - fc = FIELD_GET(MT_TXD2_FRAME_TYPE, val) << 2 | - FIELD_GET(MT_TXD2_SUB_TYPE, val) << 4; + tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK; + is_8023 = info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP; + + if (is_8023) { + fc = IEEE80211_FTYPE_DATA | + (sta->wme ? IEEE80211_STYPE_QOS_DATA : + IEEE80211_STYPE_DATA); + } else { + /* No need to get precise TID for Action/Management Frame, + * since it will not meet the following Frame Control + * condition anyway. + */ + + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + + fc = le16_to_cpu(hdr->frame_control) & + (IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE); + } + if (unlikely(fc != (IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA))) return; msta = (struct mt792x_sta *)sta->drv_priv; - if (!test_and_set_bit(tid, &msta->wcid.ampdu_state)) + + if (sta->mlo && msta->deflink_id != IEEE80211_LINK_UNSPECIFIED) + mlink = rcu_dereference(msta->link[msta->deflink_id]); + else + mlink = &msta->deflink; + + if (!test_and_set_bit(tid, &mlink->wcid.ampdu_state)) ieee80211_start_tx_ba_session(sta, tid, 0); } @@ -991,7 +1025,7 @@ out_no_skb: void mt7925_mac_add_txs(struct mt792x_dev *dev, void *data) { - struct mt792x_sta *msta = NULL; + struct mt792x_link_sta *mlink = NULL; struct mt76_wcid *wcid; __le32 *txs_data = data; u16 wcidx; @@ -1015,15 +1049,15 @@ void mt7925_mac_add_txs(struct mt792x_dev *dev, void *data) if (!wcid) goto out; - msta = container_of(wcid, struct mt792x_sta, wcid); + mlink = container_of(wcid, struct mt792x_link_sta, wcid); mt7925_mac_add_txs_skb(dev, wcid, pid, txs_data); if (!wcid->sta) goto out; spin_lock_bh(&dev->mt76.sta_poll_lock); - if (list_empty(&msta->wcid.poll_list)) - list_add_tail(&msta->wcid.poll_list, &dev->mt76.sta_poll_list); + if (list_empty(&mlink->wcid.poll_list)) + list_add_tail(&mlink->wcid.poll_list, &dev->mt76.sta_poll_list); spin_unlock_bh(&dev->mt76.sta_poll_lock); out: @@ -1031,7 +1065,7 @@ out: } void mt7925_txwi_free(struct mt792x_dev *dev, struct mt76_txwi_cache *t, - struct ieee80211_sta *sta, bool clear_status, + struct ieee80211_sta *sta, struct mt76_wcid *wcid, struct list_head *free_list) { struct mt76_dev *mdev = &dev->mt76; @@ -1044,10 +1078,8 @@ void mt7925_txwi_free(struct mt792x_dev *dev, struct mt76_txwi_cache *t, txwi = (__le32 *)mt76_get_txwi_ptr(mdev, t); if (sta) { - struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; - if (likely(t->skb->protocol != cpu_to_be16(ETH_P_PAE))) - mt7925_tx_check_aggr(sta, txwi); + mt7925_tx_check_aggr(sta, t->skb, wcid); wcid_idx = wcid->idx; } else { @@ -1094,7 +1126,7 @@ mt7925_mac_tx_free(struct mt792x_dev *dev, void *data, int len) */ info = le32_to_cpu(*cur_info); if (info & MT_TXFREE_INFO_PAIR) { - struct mt792x_sta *msta; + struct mt792x_link_sta *mlink; u16 idx; idx = FIELD_GET(MT_TXFREE_INFO_WLAN_ID, info); @@ -1103,10 +1135,10 @@ mt7925_mac_tx_free(struct mt792x_dev *dev, void *data, int len) if (!sta) continue; - msta = container_of(wcid, struct mt792x_sta, wcid); + mlink = container_of(wcid, struct mt792x_link_sta, wcid); spin_lock_bh(&mdev->sta_poll_lock); - if (list_empty(&msta->wcid.poll_list)) - list_add_tail(&msta->wcid.poll_list, + if (list_empty(&mlink->wcid.poll_list)) + list_add_tail(&mlink->wcid.poll_list, &mdev->sta_poll_list); spin_unlock_bh(&mdev->sta_poll_lock); continue; @@ -1132,7 +1164,7 @@ mt7925_mac_tx_free(struct mt792x_dev *dev, void *data, int len) if (!txwi) continue; - mt7925_txwi_free(dev, txwi, sta, 0, &free_list); + mt7925_txwi_free(dev, txwi, sta, wcid, &free_list); } } @@ -1235,17 +1267,26 @@ mt7925_vif_connect_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + unsigned long valid = ieee80211_vif_is_mld(vif) ? + mvif->valid_links : BIT(0); struct mt792x_dev *dev = mvif->phy->dev; struct ieee80211_hw *hw = mt76_hw(dev); + struct ieee80211_bss_conf *bss_conf; + int i; if (vif->type == NL80211_IFTYPE_STATION) ieee80211_disconnect(vif, true); - mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, true); - mt7925_mcu_set_tx(dev, vif); + for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) { + bss_conf = mt792x_vif_to_bss_conf(vif, i); + + mt76_connac_mcu_uni_add_dev(&dev->mphy, bss_conf, + &mvif->sta.deflink.wcid, true); + mt7925_mcu_set_tx(dev, bss_conf); + } if (vif->type == NL80211_IFTYPE_AP) { - mt76_connac_mcu_uni_add_bss(dev->phy.mt76, vif, &mvif->sta.wcid, + mt76_connac_mcu_uni_add_bss(dev->phy.mt76, vif, &mvif->sta.deflink.wcid, true, NULL); mt7925_mcu_sta_update(dev, NULL, vif, true, MT76_STA_INFO_STATE_NONE); @@ -1380,9 +1421,9 @@ int mt7925_usb_sdio_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, if (sta) { struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; - if (time_after(jiffies, msta->last_txs + HZ / 4)) { + if (time_after(jiffies, msta->deflink.last_txs + HZ / 4)) { info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; - msta->last_txs = jiffies; + msta->deflink.last_txs = jiffies; } } @@ -1417,7 +1458,7 @@ void mt7925_usb_sdio_tx_complete_skb(struct mt76_dev *mdev, sta = wcid_to_sta(wcid); if (sta && likely(e->skb->protocol != cpu_to_be16(ETH_P_PAE))) - mt7925_tx_check_aggr(sta, txwi); + mt76_connac2_tx_check_aggr(sta, txwi); skb_pull(e->skb, headroom); mt76_tx_complete_skb(mdev, e->wcid, e->skb); diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/main.c b/drivers/net/wireless/mediatek/mt76/mt7925/main.c index 6179798a8845..8c0768bf9343 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/main.c @@ -236,6 +236,35 @@ mt7925_init_eht_caps(struct mt792x_phy *phy, enum nl80211_band band, eht_nss->bw._160.rx_tx_mcs13_max_nss = val; } +int mt7925_init_mlo_caps(struct mt792x_phy *phy) +{ + struct wiphy *wiphy = phy->mt76->hw->wiphy; + static const u8 ext_capa_sta[] = { + [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, + }; + static struct wiphy_iftype_ext_capab ext_capab[] = { + { + .iftype = NL80211_IFTYPE_STATION, + .extended_capabilities = ext_capa_sta, + .extended_capabilities_mask = ext_capa_sta, + .extended_capabilities_len = sizeof(ext_capa_sta), + }, + }; + + if (!(phy->chip_cap & MT792x_CHIP_CAP_MLO_EVT_EN)) + return 0; + + ext_capab[0].eml_capabilities = phy->eml_cap; + ext_capab[0].mld_capa_and_ops = + u16_encode_bits(1, IEEE80211_MLD_CAP_OP_MAX_SIMUL_LINKS); + + wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO; + wiphy->iftype_ext_capab = ext_capab; + wiphy->num_iftype_ext_capab = ARRAY_SIZE(ext_capab); + + return 0; +} + static void __mt7925_set_stream_he_eht_caps(struct mt792x_phy *phy, struct ieee80211_supported_band *sband, @@ -317,61 +346,82 @@ static int mt7925_start(struct ieee80211_hw *hw) return err; } +static int mt7925_mac_link_bss_add(struct mt792x_dev *dev, + struct ieee80211_bss_conf *link_conf, + struct mt792x_link_sta *mlink) +{ + struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf); + struct ieee80211_vif *vif = link_conf->vif; + struct mt792x_vif *mvif = mconf->vif; + struct mt76_txq *mtxq; + int idx, ret = 0; + + mconf->mt76.idx = __ffs64(~dev->mt76.vif_mask); + if (mconf->mt76.idx >= MT792x_MAX_INTERFACES) { + ret = -ENOSPC; + goto out; + } + + mconf->mt76.omac_idx = ieee80211_vif_is_mld(vif) ? + 0 : mconf->mt76.idx; + mconf->mt76.band_idx = 0xff; + mconf->mt76.wmm_idx = mconf->mt76.idx % MT76_CONNAC_MAX_WMM_SETS; + + if (mvif->phy->mt76->chandef.chan->band != NL80211_BAND_2GHZ) + mconf->mt76.basic_rates_idx = MT792x_BASIC_RATES_TBL + 4; + else + mconf->mt76.basic_rates_idx = MT792x_BASIC_RATES_TBL; + + ret = mt76_connac_mcu_uni_add_dev(&dev->mphy, link_conf, + &mlink->wcid, true); + if (ret) + goto out; + + dev->mt76.vif_mask |= BIT_ULL(mconf->mt76.idx); + mvif->phy->omac_mask |= BIT_ULL(mconf->mt76.omac_idx); + + idx = MT792x_WTBL_RESERVED - mconf->mt76.idx; + + INIT_LIST_HEAD(&mlink->wcid.poll_list); + mlink->wcid.idx = idx; + mlink->wcid.phy_idx = mconf->mt76.band_idx; + mlink->wcid.hw_key_idx = -1; + mlink->wcid.tx_info |= MT_WCID_TX_INFO_SET; + mt76_wcid_init(&mlink->wcid); + + mt7925_mac_wtbl_update(dev, idx, + MT_WTBL_UPDATE_ADM_COUNT_CLEAR); + + ewma_rssi_init(&mconf->rssi); + + rcu_assign_pointer(dev->mt76.wcid[idx], &mlink->wcid); + if (vif->txq) { + mtxq = (struct mt76_txq *)vif->txq->drv_priv; + mtxq->wcid = idx; + } + +out: + return ret; +} + static int mt7925_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct mt792x_dev *dev = mt792x_hw_dev(hw); struct mt792x_phy *phy = mt792x_hw_phy(hw); - struct mt76_txq *mtxq; - int idx, ret = 0; + int ret = 0; mt792x_mutex_acquire(dev); - mvif->mt76.idx = __ffs64(~dev->mt76.vif_mask); - if (mvif->mt76.idx >= MT792x_MAX_INTERFACES) { - ret = -ENOSPC; - goto out; - } - - mvif->mt76.omac_idx = mvif->mt76.idx; mvif->phy = phy; - mvif->mt76.band_idx = 0; - mvif->mt76.wmm_idx = mvif->mt76.idx % MT76_CONNAC_MAX_WMM_SETS; - - if (phy->mt76->chandef.chan->band != NL80211_BAND_2GHZ) - mvif->mt76.basic_rates_idx = MT792x_BASIC_RATES_TBL + 4; - else - mvif->mt76.basic_rates_idx = MT792x_BASIC_RATES_TBL; - - ret = mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, - true); - if (ret) - goto out; - - dev->mt76.vif_mask |= BIT_ULL(mvif->mt76.idx); - phy->omac_mask |= BIT_ULL(mvif->mt76.omac_idx); - - idx = MT792x_WTBL_RESERVED - mvif->mt76.idx; - - INIT_LIST_HEAD(&mvif->sta.wcid.poll_list); - mvif->sta.wcid.idx = idx; - mvif->sta.wcid.phy_idx = mvif->mt76.band_idx; - mvif->sta.wcid.hw_key_idx = -1; - mvif->sta.wcid.tx_info |= MT_WCID_TX_INFO_SET; + mvif->bss_conf.vif = mvif; mvif->sta.vif = mvif; - mt76_wcid_init(&mvif->sta.wcid); + mvif->deflink_id = IEEE80211_LINK_UNSPECIFIED; - mt7925_mac_wtbl_update(dev, idx, - MT_WTBL_UPDATE_ADM_COUNT_CLEAR); - - ewma_rssi_init(&mvif->rssi); - - rcu_assign_pointer(dev->mt76.wcid[idx], &mvif->sta.wcid); - if (vif->txq) { - mtxq = (struct mt76_txq *)vif->txq->drv_priv; - mtxq->wcid = idx; - } + ret = mt7925_mac_link_bss_add(dev, &vif->bss_conf, &mvif->sta.deflink); + if (ret < 0) + goto out; vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; out: @@ -386,7 +436,7 @@ static void mt7925_roc_iter(void *priv, u8 *mac, struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct mt792x_phy *phy = priv; - mt7925_mcu_abort_roc(phy, mvif, phy->roc_token_id); + mt7925_mcu_abort_roc(phy, &mvif->bss_conf, phy->roc_token_id); } void mt7925_roc_work(struct work_struct *work) @@ -407,7 +457,8 @@ void mt7925_roc_work(struct work_struct *work) ieee80211_remain_on_channel_expired(phy->mt76->hw); } -static int mt7925_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif) +static int mt7925_abort_roc(struct mt792x_phy *phy, + struct mt792x_bss_conf *mconf) { int err = 0; @@ -416,14 +467,14 @@ static int mt7925_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif) mt792x_mutex_acquire(phy->dev); if (test_and_clear_bit(MT76_STATE_ROC, &phy->mt76->state)) - err = mt7925_mcu_abort_roc(phy, vif, phy->roc_token_id); + err = mt7925_mcu_abort_roc(phy, mconf, phy->roc_token_id); mt792x_mutex_release(phy->dev); return err; } static int mt7925_set_roc(struct mt792x_phy *phy, - struct mt792x_vif *vif, + struct mt792x_bss_conf *mconf, struct ieee80211_channel *chan, int duration, enum mt7925_roc_req type) @@ -435,7 +486,7 @@ static int mt7925_set_roc(struct mt792x_phy *phy, phy->roc_grant = false; - err = mt7925_mcu_set_roc(phy, vif, chan, duration, type, + err = mt7925_mcu_set_roc(phy, mconf, chan, duration, type, ++phy->roc_token_id); if (err < 0) { clear_bit(MT76_STATE_ROC, &phy->mt76->state); @@ -443,7 +494,34 @@ static int mt7925_set_roc(struct mt792x_phy *phy, } if (!wait_event_timeout(phy->roc_wait, phy->roc_grant, 4 * HZ)) { - mt7925_mcu_abort_roc(phy, vif, phy->roc_token_id); + mt7925_mcu_abort_roc(phy, mconf, phy->roc_token_id); + clear_bit(MT76_STATE_ROC, &phy->mt76->state); + err = -ETIMEDOUT; + } + +out: + return err; +} + +static int mt7925_set_mlo_roc(struct mt792x_phy *phy, + struct mt792x_bss_conf *mconf, + u16 sel_links) +{ + int err; + + if (WARN_ON_ONCE(test_and_set_bit(MT76_STATE_ROC, &phy->mt76->state))) + return -EBUSY; + + phy->roc_grant = false; + + err = mt7925_mcu_set_mlo_roc(mconf, sel_links, 5, ++phy->roc_token_id); + if (err < 0) { + clear_bit(MT76_STATE_ROC, &phy->mt76->state); + goto out; + } + + if (!wait_event_timeout(phy->roc_wait, phy->roc_grant, 4 * HZ)) { + mt7925_mcu_abort_roc(phy, mconf, phy->roc_token_id); clear_bit(MT76_STATE_ROC, &phy->mt76->state); err = -ETIMEDOUT; } @@ -463,7 +541,8 @@ static int mt7925_remain_on_channel(struct ieee80211_hw *hw, int err; mt792x_mutex_acquire(phy->dev); - err = mt7925_set_roc(phy, mvif, chan, duration, MT7925_ROC_REQ_ROC); + err = mt7925_set_roc(phy, &mvif->bss_conf, + chan, duration, MT7925_ROC_REQ_ROC); mt792x_mutex_release(phy->dev); return err; @@ -475,30 +554,31 @@ static int mt7925_cancel_remain_on_channel(struct ieee80211_hw *hw, struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct mt792x_phy *phy = mt792x_hw_phy(hw); - return mt7925_abort_roc(phy, mvif); + return mt7925_abort_roc(phy, &mvif->bss_conf); } -static int mt7925_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, - struct ieee80211_vif *vif, struct ieee80211_sta *sta, - struct ieee80211_key_conf *key) +static int mt7925_set_link_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key, int link_id) { struct mt792x_dev *dev = mt792x_hw_dev(hw); struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct mt792x_sta *msta = sta ? (struct mt792x_sta *)sta->drv_priv : &mvif->sta; - struct mt76_wcid *wcid = &msta->wcid; - u8 *wcid_keyidx = &wcid->hw_key_idx; + struct ieee80211_bss_conf *link_conf; + struct ieee80211_link_sta *link_sta; int idx = key->keyidx, err = 0; + struct mt792x_link_sta *mlink; + struct mt792x_bss_conf *mconf; + struct mt76_wcid *wcid; + u8 *wcid_keyidx; - /* The hardware does not support per-STA RX GTK, fallback - * to software mode for these. - */ - if ((vif->type == NL80211_IFTYPE_ADHOC || - vif->type == NL80211_IFTYPE_MESH_POINT) && - (key->cipher == WLAN_CIPHER_SUITE_TKIP || - key->cipher == WLAN_CIPHER_SUITE_CCMP) && - !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) - return -EOPNOTSUPP; + link_conf = mt792x_vif_to_bss_conf(vif, link_id); + link_sta = sta ? mt792x_sta_to_link_sta(vif, sta, link_id) : NULL; + mconf = mt792x_vif_to_link(mvif, link_id); + mlink = mt792x_sta_to_link(msta, link_id); + wcid = &mlink->wcid; + wcid_keyidx = &wcid->hw_key_idx; /* fall back to sw encryption for unsupported ciphers */ switch (key->cipher) { @@ -522,13 +602,12 @@ static int mt7925_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, return -EOPNOTSUPP; } - mt792x_mutex_acquire(dev); - - if (cmd == SET_KEY && !mvif->mt76.cipher) { + if (cmd == SET_KEY && !mconf->mt76.cipher) { struct mt792x_phy *phy = mt792x_hw_phy(hw); - mvif->mt76.cipher = mt7925_mcu_get_cipher(key->cipher); - mt7925_mcu_add_bss_info(phy, mvif->mt76.ctx, vif, sta, true); + mconf->mt76.cipher = mt7925_mcu_get_cipher(key->cipher); + mt7925_mcu_add_bss_info(phy, mconf->mt76.ctx, link_conf, + link_sta, true); } if (cmd == SET_KEY) @@ -541,20 +620,59 @@ static int mt7925_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, mt76_wcid_key_setup(&dev->mt76, wcid, cmd == SET_KEY ? key : NULL); - err = mt7925_mcu_add_key(&dev->mt76, vif, &msta->bip, + err = mt7925_mcu_add_key(&dev->mt76, vif, &mlink->bip, key, MCU_UNI_CMD(STA_REC_UPDATE), - &msta->wcid, cmd); + &mlink->wcid, cmd, msta); if (err) goto out; if (key->cipher == WLAN_CIPHER_SUITE_WEP104 || key->cipher == WLAN_CIPHER_SUITE_WEP40) - err = mt7925_mcu_add_key(&dev->mt76, vif, &mvif->wep_sta->bip, + err = mt7925_mcu_add_key(&dev->mt76, vif, &mvif->wep_sta->deflink.bip, key, MCU_WMWA_UNI_CMD(STA_REC_UPDATE), - &mvif->wep_sta->wcid, cmd); - + &mvif->wep_sta->deflink.wcid, cmd, msta); out: + return err; +} + +static int mt7925_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, + struct ieee80211_vif *vif, struct ieee80211_sta *sta, + struct ieee80211_key_conf *key) +{ + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_sta *msta = sta ? (struct mt792x_sta *)sta->drv_priv : + &mvif->sta; + int err; + + /* The hardware does not support per-STA RX GTK, fallback + * to software mode for these. + */ + if ((vif->type == NL80211_IFTYPE_ADHOC || + vif->type == NL80211_IFTYPE_MESH_POINT) && + (key->cipher == WLAN_CIPHER_SUITE_TKIP || + key->cipher == WLAN_CIPHER_SUITE_CCMP) && + !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) + return -EOPNOTSUPP; + + mt792x_mutex_acquire(dev); + + if (ieee80211_vif_is_mld(vif)) { + unsigned int link_id; + unsigned long add; + + add = key->link_id != -1 ? BIT(key->link_id) : msta->valid_links; + + for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) { + err = mt7925_set_link_key(hw, cmd, vif, sta, key, link_id); + if (err < 0) + break; + } + } else { + err = mt7925_set_link_key(hw, cmd, vif, sta, key, vif->bss_conf.link_id); + } + mt792x_mutex_release(dev); return err; @@ -695,165 +813,387 @@ mt7925_get_rates_table(struct ieee80211_hw *hw, struct ieee80211_vif *vif, return mvif->basic_rates_idx; } -static void mt7925_bss_info_changed(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_bss_conf *info, - u64 changed) +static int mt7925_mac_link_sta_add(struct mt76_dev *mdev, + struct ieee80211_vif *vif, + struct ieee80211_link_sta *link_sta) { - struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; - struct mt792x_phy *phy = mt792x_hw_phy(hw); - struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct ieee80211_bss_conf *link_conf; + struct mt792x_bss_conf *mconf; + u8 link_id = link_sta->link_id; + struct mt792x_link_sta *mlink; + struct mt792x_sta *msta; + int ret, idx; - mt792x_mutex_acquire(dev); + msta = (struct mt792x_sta *)link_sta->sta->drv_priv; + mlink = mt792x_sta_to_link(msta, link_id); - if (changed & BSS_CHANGED_ERP_SLOT) { - int slottime = info->use_short_slot ? 9 : 20; + idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT792x_WTBL_STA - 1); + if (idx < 0) + return -ENOSPC; - if (slottime != phy->slottime) { - phy->slottime = slottime; - mt7925_mcu_set_timing(phy, vif); + mconf = mt792x_vif_to_link(mvif, link_id); + INIT_LIST_HEAD(&mlink->wcid.poll_list); + mlink->wcid.sta = 1; + mlink->wcid.idx = idx; + mlink->wcid.phy_idx = mconf->mt76.band_idx; + mlink->wcid.tx_info |= MT_WCID_TX_INFO_SET; + mlink->last_txs = jiffies; + mlink->wcid.link_id = link_sta->link_id; + mlink->wcid.link_valid = !!link_sta->sta->valid_links; + + ret = mt76_connac_pm_wake(&dev->mphy, &dev->pm); + if (ret) + return ret; + + mt7925_mac_wtbl_update(dev, idx, + MT_WTBL_UPDATE_ADM_COUNT_CLEAR); + + link_conf = mt792x_vif_to_bss_conf(vif, link_id); + + /* should update bss info before STA add */ + if (vif->type == NL80211_IFTYPE_STATION && !link_sta->sta->tdls) + mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx, + link_conf, link_sta, false); + + if (ieee80211_vif_is_mld(vif) && + link_sta == mlink->pri_link) { + ret = mt7925_mcu_sta_update(dev, link_sta, vif, true, + MT76_STA_INFO_STATE_NONE); + if (ret) + return ret; + } else if (ieee80211_vif_is_mld(vif) && + link_sta != mlink->pri_link) { + ret = mt7925_mcu_sta_update(dev, mlink->pri_link, vif, + true, MT76_STA_INFO_STATE_ASSOC); + if (ret) + return ret; + + ret = mt7925_mcu_sta_update(dev, link_sta, vif, true, + MT76_STA_INFO_STATE_ASSOC); + if (ret) + return ret; + } else { + ret = mt7925_mcu_sta_update(dev, link_sta, vif, true, + MT76_STA_INFO_STATE_NONE); + if (ret) + return ret; + } + + mt76_connac_power_save_sched(&dev->mphy, &dev->pm); + + return 0; +} + +static int +mt7925_mac_sta_add_links(struct mt792x_dev *dev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, unsigned long new_links) +{ + struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; + struct mt76_wcid *wcid; + unsigned int link_id; + int err = 0; + + for_each_set_bit(link_id, &new_links, IEEE80211_MLD_MAX_NUM_LINKS) { + struct ieee80211_link_sta *link_sta; + struct mt792x_link_sta *mlink; + + if (msta->deflink_id == IEEE80211_LINK_UNSPECIFIED) { + mlink = &msta->deflink; + msta->deflink_id = link_id; + } else { + mlink = devm_kzalloc(dev->mt76.dev, sizeof(*mlink), GFP_KERNEL); + if (!mlink) { + err = -ENOMEM; + break; + } + + wcid = &mlink->wcid; + ewma_signal_init(&wcid->rssi); + rcu_assign_pointer(dev->mt76.wcid[wcid->idx], wcid); + mt76_wcid_init(wcid); + ewma_avg_signal_init(&mlink->avg_ack_signal); + memset(mlink->airtime_ac, 0, + sizeof(msta->deflink.airtime_ac)); } + + msta->valid_links |= BIT(link_id); + rcu_assign_pointer(msta->link[link_id], mlink); + mlink->sta = msta; + mlink->pri_link = &sta->deflink; + mlink->wcid.def_wcid = &msta->deflink.wcid; + + link_sta = mt792x_sta_to_link_sta(vif, sta, link_id); + mt7925_mac_link_sta_add(&dev->mt76, vif, link_sta); } - if (changed & BSS_CHANGED_MCAST_RATE) - mvif->mcast_rates_idx = - mt7925_get_rates_table(hw, vif, false, true); - - if (changed & BSS_CHANGED_BASIC_RATES) - mvif->basic_rates_idx = - mt7925_get_rates_table(hw, vif, false, false); - - if (changed & (BSS_CHANGED_BEACON | - BSS_CHANGED_BEACON_ENABLED)) { - mvif->beacon_rates_idx = - mt7925_get_rates_table(hw, vif, true, false); - - mt7925_mcu_uni_add_beacon_offload(dev, hw, vif, - info->enable_beacon); - } - - /* ensure that enable txcmd_mode after bss_info */ - if (changed & (BSS_CHANGED_QOS | BSS_CHANGED_BEACON_ENABLED)) - mt7925_mcu_set_tx(dev, vif); - - if (changed & BSS_CHANGED_PS) - mt7925_mcu_uni_bss_ps(dev, vif); - - if (changed & BSS_CHANGED_ASSOC) { - mt7925_mcu_sta_update(dev, NULL, vif, true, - MT76_STA_INFO_STATE_ASSOC); - mt7925_mcu_set_beacon_filter(dev, vif, vif->cfg.assoc); - } - - if (changed & BSS_CHANGED_ARP_FILTER) { - struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; - - mt7925_mcu_update_arp_filter(&dev->mt76, &mvif->mt76, info); - } - - mt792x_mutex_release(dev); + return err; } int mt7925_mac_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); - struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; - int ret, idx; + struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; + int err; - idx = mt76_wcid_alloc(dev->mt76.wcid_mask, MT792x_WTBL_STA - 1); - if (idx < 0) - return -ENOSPC; - - INIT_LIST_HEAD(&msta->wcid.poll_list); msta->vif = mvif; - msta->wcid.sta = 1; - msta->wcid.idx = idx; - msta->wcid.phy_idx = mvif->mt76.band_idx; - msta->wcid.tx_info |= MT_WCID_TX_INFO_SET; - msta->last_txs = jiffies; - - ret = mt76_connac_pm_wake(&dev->mphy, &dev->pm); - if (ret) - return ret; if (vif->type == NL80211_IFTYPE_STATION) mvif->wep_sta = msta; - mt7925_mac_wtbl_update(dev, idx, - MT_WTBL_UPDATE_ADM_COUNT_CLEAR); + if (ieee80211_vif_is_mld(vif)) { + msta->deflink_id = IEEE80211_LINK_UNSPECIFIED; - /* should update bss info before STA add */ - if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) - mt7925_mcu_add_bss_info(&dev->phy, mvif->mt76.ctx, vif, sta, - false); + err = mt7925_mac_sta_add_links(dev, vif, sta, sta->valid_links); + } else { + err = mt7925_mac_link_sta_add(mdev, vif, &sta->deflink); + } - ret = mt7925_mcu_sta_update(dev, sta, vif, true, - MT76_STA_INFO_STATE_NONE); - if (ret) - return ret; - - mt76_connac_power_save_sched(&dev->mphy, &dev->pm); - - return 0; + return err; } EXPORT_SYMBOL_GPL(mt7925_mac_sta_add); +static u16 +mt7925_mac_select_links(struct mt76_dev *mdev, struct ieee80211_vif *vif) +{ + unsigned long usable_links = ieee80211_vif_usable_links(vif); + struct { + u8 link_id; + enum nl80211_band band; + } data[IEEE80211_MLD_MAX_NUM_LINKS]; + u8 link_id, i, j, n_data = 0; + u16 sel_links = 0; + + if (!ieee80211_vif_is_mld(vif)) + return 0; + + if (vif->active_links == usable_links) + return vif->active_links; + + rcu_read_lock(); + for_each_set_bit(link_id, &usable_links, IEEE80211_MLD_MAX_NUM_LINKS) { + struct ieee80211_bss_conf *link_conf = + rcu_dereference(vif->link_conf[link_id]); + + if (WARN_ON_ONCE(!link_conf)) + continue; + + data[n_data].link_id = link_id; + data[n_data].band = link_conf->chanreq.oper.chan->band; + n_data++; + } + rcu_read_unlock(); + + for (i = 0; i < n_data; i++) { + if (!(BIT(data[i].link_id) & vif->active_links)) + continue; + + sel_links = BIT(data[i].link_id); + + for (j = 0; j < n_data; j++) { + if (data[i].band != data[j].band) { + sel_links |= BIT(data[j].link_id); + break; + } + } + + break; + } + + return sel_links; +} + +static void +mt7925_mac_set_links(struct mt76_dev *mdev, struct ieee80211_vif *vif) +{ + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct ieee80211_bss_conf *link_conf = + mt792x_vif_to_bss_conf(vif, mvif->deflink_id); + struct cfg80211_chan_def *chandef = &link_conf->chanreq.oper; + enum nl80211_band band = chandef->chan->band, secondary_band; + + u16 sel_links = mt7925_mac_select_links(mdev, vif); + u8 secondary_link_id = __ffs(~BIT(mvif->deflink_id) & sel_links); + + if (!ieee80211_vif_is_mld(vif) || hweight16(sel_links) < 2) + return; + + link_conf = mt792x_vif_to_bss_conf(vif, secondary_link_id); + secondary_band = link_conf->chanreq.oper.chan->band; + + if (band == NL80211_BAND_2GHZ || + (band == NL80211_BAND_5GHZ && secondary_band == NL80211_BAND_6GHZ)) { + mt7925_abort_roc(mvif->phy, &mvif->bss_conf); + + mt792x_mutex_acquire(dev); + + mt7925_set_mlo_roc(mvif->phy, &mvif->bss_conf, sel_links); + + mt792x_mutex_release(dev); + } + + ieee80211_set_active_links_async(vif, sel_links); +} + +static void mt7925_mac_link_sta_assoc(struct mt76_dev *mdev, + struct ieee80211_vif *vif, + struct ieee80211_link_sta *link_sta) +{ + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + struct ieee80211_bss_conf *link_conf; + struct mt792x_link_sta *mlink; + struct mt792x_sta *msta; + + msta = (struct mt792x_sta *)link_sta->sta->drv_priv; + mlink = mt792x_sta_to_link(msta, link_sta->link_id); + + mt792x_mutex_acquire(dev); + + if (ieee80211_vif_is_mld(vif)) { + link_conf = mt792x_vif_to_bss_conf(vif, msta->deflink_id); + } else { + link_conf = mt792x_vif_to_bss_conf(vif, vif->bss_conf.link_id); + } + + if (vif->type == NL80211_IFTYPE_STATION && !link_sta->sta->tdls) { + struct mt792x_bss_conf *mconf; + + mconf = mt792x_link_conf_to_mconf(link_conf); + mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx, + link_conf, link_sta, true); + } + + ewma_avg_signal_init(&mlink->avg_ack_signal); + + mt7925_mac_wtbl_update(dev, mlink->wcid.idx, + MT_WTBL_UPDATE_ADM_COUNT_CLEAR); + memset(mlink->airtime_ac, 0, sizeof(mlink->airtime_ac)); + + mt7925_mcu_sta_update(dev, link_sta, vif, true, MT76_STA_INFO_STATE_ASSOC); + + mt792x_mutex_release(dev); +} + void mt7925_mac_sta_assoc(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { - struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); - struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; - struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + if (ieee80211_vif_is_mld(vif)) { + struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; + struct ieee80211_link_sta *link_sta; - mt792x_mutex_acquire(dev); + link_sta = mt792x_sta_to_link_sta(vif, sta, msta->deflink_id); - if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) - mt7925_mcu_add_bss_info(&dev->phy, mvif->mt76.ctx, vif, sta, - true); + mt7925_mac_set_links(mdev, vif); - ewma_avg_signal_init(&msta->avg_ack_signal); - - mt7925_mac_wtbl_update(dev, msta->wcid.idx, - MT_WTBL_UPDATE_ADM_COUNT_CLEAR); - memset(msta->airtime_ac, 0, sizeof(msta->airtime_ac)); - - mt7925_mcu_sta_update(dev, sta, vif, true, MT76_STA_INFO_STATE_ASSOC); - - mt792x_mutex_release(dev); + mt7925_mac_link_sta_assoc(mdev, vif, link_sta); + } else { + mt7925_mac_link_sta_assoc(mdev, vif, &sta->deflink); + } } EXPORT_SYMBOL_GPL(mt7925_mac_sta_assoc); +static void mt7925_mac_link_sta_remove(struct mt76_dev *mdev, + struct ieee80211_vif *vif, + struct ieee80211_link_sta *link_sta) +{ + struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); + struct ieee80211_bss_conf *link_conf; + u8 link_id = link_sta->link_id; + struct mt792x_link_sta *mlink; + struct mt792x_sta *msta; + + msta = (struct mt792x_sta *)link_sta->sta->drv_priv; + mlink = mt792x_sta_to_link(msta, link_id); + + mt76_connac_free_pending_tx_skbs(&dev->pm, &mlink->wcid); + mt76_connac_pm_wake(&dev->mphy, &dev->pm); + + mt7925_mcu_sta_update(dev, link_sta, vif, false, + MT76_STA_INFO_STATE_NONE); + mt7925_mac_wtbl_update(dev, mlink->wcid.idx, + MT_WTBL_UPDATE_ADM_COUNT_CLEAR); + + link_conf = mt792x_vif_to_bss_conf(vif, link_id); + + if (vif->type == NL80211_IFTYPE_STATION && !link_sta->sta->tdls) { + struct mt792x_bss_conf *mconf; + + mconf = mt792x_link_conf_to_mconf(link_conf); + mt7925_mcu_add_bss_info(&dev->phy, mconf->mt76.ctx, link_conf, + link_sta, false); + } + + spin_lock_bh(&mdev->sta_poll_lock); + if (!list_empty(&mlink->wcid.poll_list)) + list_del_init(&mlink->wcid.poll_list); + spin_unlock_bh(&mdev->sta_poll_lock); + + mt76_connac_power_save_sched(&dev->mphy, &dev->pm); +} + +static int +mt7925_mac_sta_remove_links(struct mt792x_dev *dev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, unsigned long old_links) +{ + struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; + struct mt76_dev *mdev = &dev->mt76; + struct mt76_wcid *wcid; + unsigned int link_id; + + for_each_set_bit(link_id, &old_links, IEEE80211_MLD_MAX_NUM_LINKS) { + struct ieee80211_link_sta *link_sta; + struct mt792x_link_sta *mlink; + + link_sta = mt792x_sta_to_link_sta(vif, sta, link_id); + if (!link_sta) + continue; + + mlink = mt792x_sta_to_link(msta, link_id); + if (!mlink) + continue; + + mt7925_mac_link_sta_remove(&dev->mt76, vif, link_sta); + + wcid = &mlink->wcid; + rcu_assign_pointer(msta->link[link_id], NULL); + msta->valid_links &= ~BIT(link_id); + mlink->sta = NULL; + mlink->pri_link = NULL; + + if (link_sta != mlink->pri_link) { + mt76_wcid_cleanup(mdev, wcid); + mt76_wcid_mask_clear(mdev->wcid_mask, wcid->idx); + mt76_wcid_mask_clear(mdev->wcid_phy_mask, wcid->idx); + } + + if (msta->deflink_id == link_id) + msta->deflink_id = IEEE80211_LINK_UNSPECIFIED; + } + + return 0; +} + void mt7925_mac_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct mt792x_dev *dev = container_of(mdev, struct mt792x_dev, mt76); struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; + unsigned long rem; - mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid); - mt76_connac_pm_wake(&dev->mphy, &dev->pm); + rem = ieee80211_vif_is_mld(vif) ? msta->valid_links : BIT(0); - mt7925_mcu_sta_update(dev, sta, vif, false, MT76_STA_INFO_STATE_NONE); - mt7925_mac_wtbl_update(dev, msta->wcid.idx, - MT_WTBL_UPDATE_ADM_COUNT_CLEAR); + mt7925_mac_sta_remove_links(dev, vif, sta, rem); if (vif->type == NL80211_IFTYPE_STATION) { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; mvif->wep_sta = NULL; - ewma_rssi_init(&mvif->rssi); - if (!sta->tdls) - mt7925_mcu_add_bss_info(&dev->phy, mvif->mt76.ctx, vif, sta, - false); + ewma_rssi_init(&mvif->bss_conf.rssi); } - - spin_lock_bh(&mdev->sta_poll_lock); - if (!list_empty(&msta->wcid.poll_list)) - list_del_init(&msta->wcid.poll_list); - spin_unlock_bh(&mdev->sta_poll_lock); - - mt76_connac_power_save_sched(&dev->mphy, &dev->pm); } EXPORT_SYMBOL_GPL(mt7925_mac_sta_remove); @@ -890,12 +1230,12 @@ mt7925_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, mt792x_mutex_acquire(dev); switch (action) { case IEEE80211_AMPDU_RX_START: - mt76_rx_aggr_start(&dev->mt76, &msta->wcid, tid, ssn, + mt76_rx_aggr_start(&dev->mt76, &msta->deflink.wcid, tid, ssn, params->buf_size); mt7925_mcu_uni_rx_ba(dev, params, true); break; case IEEE80211_AMPDU_RX_STOP: - mt76_rx_aggr_stop(&dev->mt76, &msta->wcid, tid); + mt76_rx_aggr_stop(&dev->mt76, &msta->deflink.wcid, tid); mt7925_mcu_uni_rx_ba(dev, params, false); break; case IEEE80211_AMPDU_TX_OPERATIONAL: @@ -906,16 +1246,16 @@ mt7925_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: mtxq->aggr = false; - clear_bit(tid, &msta->wcid.ampdu_state); + clear_bit(tid, &msta->deflink.wcid.ampdu_state); mt7925_mcu_uni_tx_ba(dev, params, false); break; case IEEE80211_AMPDU_TX_START: - set_bit(tid, &msta->wcid.ampdu_state); + set_bit(tid, &msta->deflink.wcid.ampdu_state); ret = IEEE80211_AMPDU_TX_START_IMMEDIATE; break; case IEEE80211_AMPDU_TX_STOP_CONT: mtxq->aggr = false; - clear_bit(tid, &msta->wcid.ampdu_state); + clear_bit(tid, &msta->deflink.wcid.ampdu_state); mt7925_mcu_uni_tx_ba(dev, params, false); ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; @@ -1156,27 +1496,38 @@ static void mt7925_sta_set_decap_offload(struct ieee80211_hw *hw, bool enabled) { struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct mt792x_dev *dev = mt792x_hw_dev(hw); + unsigned long valid = mvif->valid_links; + u8 i; mt792x_mutex_acquire(dev); - if (enabled) - set_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags); - else - clear_bit(MT_WCID_FLAG_HDR_TRANS, &msta->wcid.flags); + valid = ieee80211_vif_is_mld(vif) ? mvif->valid_links : BIT(0); - mt7925_mcu_wtbl_update_hdr_trans(dev, vif, sta); + for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) { + struct mt792x_link_sta *mlink; + + mlink = mt792x_sta_to_link(msta, i); + + if (enabled) + set_bit(MT_WCID_FLAG_HDR_TRANS, &mlink->wcid.flags); + else + clear_bit(MT_WCID_FLAG_HDR_TRANS, &mlink->wcid.flags); + + mt7925_mcu_wtbl_update_hdr_trans(dev, vif, sta, i); + } mt792x_mutex_release(dev); } #if IS_ENABLED(CONFIG_IPV6) -static void mt7925_ipv6_addr_change(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct inet6_dev *idev) +static void __mt7925_ipv6_addr_change(struct ieee80211_hw *hw, + struct ieee80211_bss_conf *link_conf, + struct inet6_dev *idev) { - struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; - struct mt792x_dev *dev = mvif->phy->dev; + struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf); + struct mt792x_dev *dev = mt792x_hw_dev(hw); struct inet6_ifaddr *ifa; struct sk_buff *skb; u8 idx = 0; @@ -1190,7 +1541,7 @@ static void mt7925_ipv6_addr_change(struct ieee80211_hw *hw, struct in6_addr ns_addrs[IEEE80211_BSS_ARP_ADDR_LIST_LEN]; } req_hdr = { .hdr = { - .bss_idx = mvif->mt76.idx, + .bss_idx = mconf->mt76.idx, }, .arpns = { .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ND), @@ -1225,6 +1576,23 @@ static void mt7925_ipv6_addr_change(struct ieee80211_hw *hw, ieee80211_queue_work(dev->mt76.hw, &dev->ipv6_ns_work); } + +static void mt7925_ipv6_addr_change(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct inet6_dev *idev) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + unsigned long valid = ieee80211_vif_is_mld(vif) ? + mvif->valid_links : BIT(0); + struct ieee80211_bss_conf *bss_conf; + int i; + + for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) { + bss_conf = mt792x_vif_to_bss_conf(vif, i); + __mt7925_ipv6_addr_change(hw, bss_conf, idev); + } +} + #endif int mt7925_set_tx_sar_pwr(struct ieee80211_hw *hw, @@ -1280,6 +1648,7 @@ mt7925_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const struct ieee80211_tx_queue_params *params) { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_bss_conf *mconf = mt792x_vif_to_link(mvif, link_id); static const u8 mq_to_aci[] = { [IEEE80211_AC_VO] = 3, [IEEE80211_AC_VI] = 2, @@ -1288,7 +1657,7 @@ mt7925_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, }; /* firmware uses access class index */ - mvif->queue_params[mq_to_aci[queue]] = *params; + mconf->queue_params[mq_to_aci[queue]] = *params; return 0; } @@ -1303,12 +1672,12 @@ mt7925_start_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif, mt792x_mutex_acquire(dev); - err = mt7925_mcu_add_bss_info(&dev->phy, mvif->mt76.ctx, vif, NULL, - true); + err = mt7925_mcu_add_bss_info(&dev->phy, mvif->bss_conf.mt76.ctx, + link_conf, NULL, true); if (err) goto out; - err = mt7925_mcu_set_bss_pm(dev, vif, true); + err = mt7925_mcu_set_bss_pm(dev, link_conf, true); if (err) goto out; @@ -1330,12 +1699,12 @@ mt7925_stop_ap(struct ieee80211_hw *hw, struct ieee80211_vif *vif, mt792x_mutex_acquire(dev); - err = mt7925_mcu_set_bss_pm(dev, vif, false); + err = mt7925_mcu_set_bss_pm(dev, link_conf, false); if (err) goto out; - mt7925_mcu_add_bss_info(&dev->phy, mvif->mt76.ctx, vif, NULL, - false); + mt7925_mcu_add_bss_info(&dev->phy, mvif->bss_conf.mt76.ctx, link_conf, + NULL, false); out: mt792x_mutex_release(dev); @@ -1354,34 +1723,52 @@ mt7925_remove_chanctx(struct ieee80211_hw *hw, { } -static void mt7925_ctx_iter(void *priv, u8 *mac, - struct ieee80211_vif *vif) -{ - struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; - struct ieee80211_chanctx_conf *ctx = priv; - - if (ctx != mvif->mt76.ctx) - return; - - if (vif->type == NL80211_IFTYPE_MONITOR) { - mt7925_mcu_set_sniffer(mvif->phy->dev, vif, true); - mt7925_mcu_config_sniffer(mvif, ctx); - } else { - mt7925_mcu_set_chctx(mvif->phy->mt76, &mvif->mt76, ctx); - } -} - static void mt7925_change_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx, u32 changed) { + struct mt792x_chanctx *mctx = (struct mt792x_chanctx *)ctx->drv_priv; struct mt792x_phy *phy = mt792x_hw_phy(hw); + struct mt792x_bss_conf *mconf; + struct ieee80211_vif *vif; + struct mt792x_vif *mvif; + + if (!mctx->bss_conf) + return; + + mconf = mctx->bss_conf; + mvif = mconf->vif; + vif = container_of((void *)mvif, struct ieee80211_vif, drv_priv); mt792x_mutex_acquire(phy->dev); - ieee80211_iterate_active_interfaces(phy->mt76->hw, - IEEE80211_IFACE_ITER_ACTIVE, - mt7925_ctx_iter, ctx); + if (vif->type == NL80211_IFTYPE_MONITOR) { + mt7925_mcu_set_sniffer(mvif->phy->dev, vif, true); + mt7925_mcu_config_sniffer(mvif, ctx); + } else { + if (ieee80211_vif_is_mld(vif)) { + unsigned long valid = mvif->valid_links; + u8 i; + + for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) { + mconf = mt792x_vif_to_link(mvif, i); + if (mconf && mconf->mt76.ctx == ctx) + break; + } + + } else { + mconf = &mvif->bss_conf; + } + + if (mconf) { + struct ieee80211_bss_conf *link_conf; + + link_conf = mt792x_vif_to_bss_conf(vif, mconf->link_id); + mt7925_mcu_set_chctx(mvif->phy->mt76, &mconf->mt76, + link_conf, ctx); + } + } + mt792x_mutex_release(phy->dev); } @@ -1395,7 +1782,8 @@ static void mt7925_mgd_prepare_tx(struct ieee80211_hw *hw, jiffies_to_msecs(HZ); mt792x_mutex_acquire(dev); - mt7925_set_roc(mvif->phy, mvif, mvif->mt76.ctx->def.chan, duration, + mt7925_set_roc(mvif->phy, &mvif->bss_conf, + mvif->bss_conf.mt76.ctx->def.chan, duration, MT7925_ROC_REQ_JOIN); mt792x_mutex_release(dev); } @@ -1406,7 +1794,285 @@ static void mt7925_mgd_complete_tx(struct ieee80211_hw *hw, { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; - mt7925_abort_roc(mvif->phy, mvif); + mt7925_abort_roc(mvif->phy, &mvif->bss_conf); +} + +static void mt7925_vif_cfg_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + u64 changed) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_dev *dev = mt792x_hw_dev(hw); + unsigned long valid = ieee80211_vif_is_mld(vif) ? + mvif->valid_links : BIT(0); + struct ieee80211_bss_conf *bss_conf; + int i; + + mt792x_mutex_acquire(dev); + + if (changed & BSS_CHANGED_ASSOC) { + mt7925_mcu_sta_update(dev, NULL, vif, true, + MT76_STA_INFO_STATE_ASSOC); + mt7925_mcu_set_beacon_filter(dev, vif, vif->cfg.assoc); + } + + if (changed & BSS_CHANGED_ARP_FILTER) { + for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) { + bss_conf = mt792x_vif_to_bss_conf(vif, i); + mt7925_mcu_update_arp_filter(&dev->mt76, bss_conf); + } + } + + if (changed & BSS_CHANGED_PS) { + for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) { + bss_conf = mt792x_vif_to_bss_conf(vif, i); + mt7925_mcu_uni_bss_ps(dev, bss_conf); + } + } + + mt792x_mutex_release(dev); +} + +static void mt7925_link_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, + u64 changed) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_phy *phy = mt792x_hw_phy(hw); + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt792x_bss_conf *mconf; + + mconf = mt792x_vif_to_link(mvif, info->link_id); + + mt792x_mutex_acquire(dev); + + if (changed & BSS_CHANGED_ERP_SLOT) { + int slottime = info->use_short_slot ? 9 : 20; + + if (slottime != phy->slottime) { + phy->slottime = slottime; + mt7925_mcu_set_timing(phy, info); + } + } + + if (changed & BSS_CHANGED_MCAST_RATE) + mconf->mt76.mcast_rates_idx = + mt7925_get_rates_table(hw, vif, false, true); + + if (changed & BSS_CHANGED_BASIC_RATES) + mconf->mt76.basic_rates_idx = + mt7925_get_rates_table(hw, vif, false, false); + + if (changed & (BSS_CHANGED_BEACON | + BSS_CHANGED_BEACON_ENABLED)) { + mconf->mt76.beacon_rates_idx = + mt7925_get_rates_table(hw, vif, true, false); + + mt7925_mcu_uni_add_beacon_offload(dev, hw, vif, + info->enable_beacon); + } + + /* ensure that enable txcmd_mode after bss_info */ + if (changed & (BSS_CHANGED_QOS | BSS_CHANGED_BEACON_ENABLED)) + mt7925_mcu_set_tx(dev, info); + + mt792x_mutex_release(dev); +} + +static int +mt7925_change_vif_links(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u16 old_links, u16 new_links, + struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS]) +{ + struct mt792x_bss_conf *mconfs[IEEE80211_MLD_MAX_NUM_LINKS] = {}, *mconf; + struct mt792x_link_sta *mlinks[IEEE80211_MLD_MAX_NUM_LINKS] = {}, *mlink; + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + unsigned long add = new_links & ~old_links; + unsigned long rem = old_links & ~new_links; + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct mt792x_phy *phy = mt792x_hw_phy(hw); + struct ieee80211_bss_conf *link_conf; + unsigned int link_id; + int err; + + if (old_links == new_links) + return 0; + + mt792x_mutex_acquire(dev); + + for_each_set_bit(link_id, &rem, IEEE80211_MLD_MAX_NUM_LINKS) { + mconf = mt792x_vif_to_link(mvif, link_id); + mlink = mt792x_sta_to_link(&mvif->sta, link_id); + + if (!mconf || !mlink) + continue; + + if (mconf != &mvif->bss_conf) { + mt792x_mac_link_bss_remove(dev, mconf, mlink); + devm_kfree(dev->mt76.dev, mconf); + devm_kfree(dev->mt76.dev, mlink); + } + + rcu_assign_pointer(mvif->link_conf[link_id], NULL); + rcu_assign_pointer(mvif->sta.link[link_id], NULL); + } + + for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) { + if (!old_links) { + mvif->deflink_id = link_id; + mconf = &mvif->bss_conf; + mlink = &mvif->sta.deflink; + } else { + mconf = devm_kzalloc(dev->mt76.dev, sizeof(*mconf), + GFP_KERNEL); + mlink = devm_kzalloc(dev->mt76.dev, sizeof(*mlink), + GFP_KERNEL); + } + + mconfs[link_id] = mconf; + mlinks[link_id] = mlink; + mconf->link_id = link_id; + mconf->vif = mvif; + mlink->wcid.link_id = link_id; + mlink->wcid.link_valid = !!vif->valid_links; + mlink->wcid.def_wcid = &mvif->sta.deflink.wcid; + } + + if (hweight16(mvif->valid_links) == 0) + mt792x_mac_link_bss_remove(dev, &mvif->bss_conf, + &mvif->sta.deflink); + + for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) { + mconf = mconfs[link_id]; + mlink = mlinks[link_id]; + link_conf = mt792x_vif_to_bss_conf(vif, link_id); + + rcu_assign_pointer(mvif->link_conf[link_id], mconf); + rcu_assign_pointer(mvif->sta.link[link_id], mlink); + + err = mt7925_mac_link_bss_add(dev, link_conf, mlink); + if (err < 0) + goto free; + + if (mconf != &mvif->bss_conf) { + err = mt7925_set_mlo_roc(phy, &mvif->bss_conf, + vif->active_links); + if (err < 0) + goto free; + } + } + + mvif->valid_links = new_links; + + mt792x_mutex_release(dev); + + return 0; + +free: + for_each_set_bit(link_id, &add, IEEE80211_MLD_MAX_NUM_LINKS) { + rcu_assign_pointer(mvif->link_conf[link_id], NULL); + rcu_assign_pointer(mvif->sta.link[link_id], NULL); + + if (mconf != &mvif->bss_conf) + devm_kfree(dev->mt76.dev, mconfs[link_id]); + if (mlink != &mvif->sta.deflink) + devm_kfree(dev->mt76.dev, mlinks[link_id]); + } + + mt792x_mutex_release(dev); + + return err; +} + +static int +mt7925_change_sta_links(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, u16 old_links, u16 new_links) +{ + unsigned long add = new_links & ~old_links; + unsigned long rem = old_links & ~new_links; + struct mt792x_dev *dev = mt792x_hw_dev(hw); + int err = 0; + + if (old_links == new_links) + return 0; + + mt792x_mutex_acquire(dev); + + err = mt7925_mac_sta_remove_links(dev, vif, sta, rem); + if (err < 0) + goto out; + + err = mt7925_mac_sta_add_links(dev, vif, sta, add); + if (err < 0) + goto out; + +out: + mt792x_mutex_release(dev); + + return err; +} + +static int mt7925_assign_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *ctx) +{ + struct mt792x_chanctx *mctx = (struct mt792x_chanctx *)ctx->drv_priv; + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct ieee80211_bss_conf *pri_link_conf; + struct mt792x_bss_conf *mconf; + + mutex_lock(&dev->mt76.mutex); + + if (ieee80211_vif_is_mld(vif)) { + mconf = mt792x_vif_to_link(mvif, link_conf->link_id); + pri_link_conf = mt792x_vif_to_bss_conf(vif, mvif->deflink_id); + + if (vif->type == NL80211_IFTYPE_STATION && + mconf == &mvif->bss_conf) + mt7925_mcu_add_bss_info(&dev->phy, NULL, pri_link_conf, + NULL, true); + } else { + mconf = &mvif->bss_conf; + } + + mconf->mt76.ctx = ctx; + mctx->bss_conf = mconf; + mutex_unlock(&dev->mt76.mutex); + + return 0; +} + +static void mt7925_unassign_vif_chanctx(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *ctx) +{ + struct mt792x_chanctx *mctx = (struct mt792x_chanctx *)ctx->drv_priv; + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_dev *dev = mt792x_hw_dev(hw); + struct ieee80211_bss_conf *pri_link_conf; + struct mt792x_bss_conf *mconf; + + mutex_lock(&dev->mt76.mutex); + + if (ieee80211_vif_is_mld(vif)) { + mconf = mt792x_vif_to_link(mvif, link_conf->link_id); + pri_link_conf = mt792x_vif_to_bss_conf(vif, mvif->deflink_id); + + if (vif->type == NL80211_IFTYPE_STATION && + mconf == &mvif->bss_conf) + mt7925_mcu_add_bss_info(&dev->phy, NULL, pri_link_conf, + NULL, false); + } else { + mconf = &mvif->bss_conf; + } + + mctx->bss_conf = NULL; + mconf->mt76.ctx = NULL; + mutex_unlock(&dev->mt76.mutex); } const struct ieee80211_ops mt7925_ops = { @@ -1418,7 +2084,6 @@ const struct ieee80211_ops mt7925_ops = { .config = mt7925_config, .conf_tx = mt7925_conf_tx, .configure_filter = mt7925_configure_filter, - .bss_info_changed = mt7925_bss_info_changed, .start_ap = mt7925_start_ap, .stop_ap = mt7925_stop_ap, .sta_state = mt76_sta_state, @@ -1462,10 +2127,14 @@ const struct ieee80211_ops mt7925_ops = { .add_chanctx = mt7925_add_chanctx, .remove_chanctx = mt7925_remove_chanctx, .change_chanctx = mt7925_change_chanctx, - .assign_vif_chanctx = mt792x_assign_vif_chanctx, - .unassign_vif_chanctx = mt792x_unassign_vif_chanctx, + .assign_vif_chanctx = mt7925_assign_vif_chanctx, + .unassign_vif_chanctx = mt7925_unassign_vif_chanctx, .mgd_prepare_tx = mt7925_mgd_prepare_tx, .mgd_complete_tx = mt7925_mgd_complete_tx, + .vif_cfg_changed = mt7925_vif_cfg_changed, + .link_info_changed = mt7925_link_info_changed, + .change_vif_links = mt7925_change_vif_links, + .change_sta_links = mt7925_change_sta_links, }; EXPORT_SYMBOL_GPL(mt7925_ops); diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c index 652a9accc43c..9dc22fbe25d3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.c @@ -121,11 +121,12 @@ int mt7925_mcu_regval(struct mt792x_dev *dev, u32 regidx, u32 *val, bool set) EXPORT_SYMBOL_GPL(mt7925_mcu_regval); int mt7925_mcu_update_arp_filter(struct mt76_dev *dev, - struct mt76_vif *vif, - struct ieee80211_bss_conf *info) + struct ieee80211_bss_conf *link_conf) { - struct ieee80211_vif *mvif = container_of(info, struct ieee80211_vif, - bss_conf); + struct ieee80211_vif *mvif = container_of((void *)link_conf->vif, + struct ieee80211_vif, + drv_priv); + struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf); struct sk_buff *skb; int i, len = min_t(int, mvif->cfg.arp_addr_cnt, IEEE80211_BSS_ARP_ADDR_LIST_LEN); @@ -137,7 +138,7 @@ int mt7925_mcu_update_arp_filter(struct mt76_dev *dev, struct mt7925_arpns_tlv arp; } req = { .hdr = { - .bss_idx = vif->idx, + .bss_idx = mconf->mt76.idx, }, .arp = { .tag = cpu_to_le16(UNI_OFFLOAD_OFFLOAD_ARP), @@ -308,22 +309,23 @@ mt7925_mcu_roc_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; struct mt7925_roc_grant_tlv *grant = priv; + if (ieee80211_vif_is_mld(vif) && vif->type == NL80211_IFTYPE_STATION) + return; + if (mvif->idx != grant->bss_idx) return; mvif->band_idx = grant->dbdcband; } -static void -mt7925_mcu_uni_roc_event(struct mt792x_dev *dev, struct sk_buff *skb) +static void mt7925_mcu_roc_handle_grant(struct mt792x_dev *dev, + struct tlv *tlv) { struct ieee80211_hw *hw = dev->mt76.hw; struct mt7925_roc_grant_tlv *grant; - struct mt7925_mcu_rxd *rxd; int duration; - rxd = (struct mt7925_mcu_rxd *)skb->data; - grant = (struct mt7925_roc_grant_tlv *)(rxd->tlv + 4); + grant = (struct mt7925_roc_grant_tlv *)tlv; /* should never happen */ WARN_ON_ONCE((le16_to_cpu(grant->tag) != UNI_EVENT_ROC_GRANT)); @@ -341,6 +343,29 @@ mt7925_mcu_uni_roc_event(struct mt792x_dev *dev, struct sk_buff *skb) jiffies + msecs_to_jiffies(duration)); } +static void +mt7925_mcu_uni_roc_event(struct mt792x_dev *dev, struct sk_buff *skb) +{ + struct tlv *tlv; + int i = 0; + + skb_pull(skb, sizeof(struct mt7925_mcu_rxd) + 4); + + while (i < skb->len) { + tlv = (struct tlv *)(skb->data + i); + + switch (le16_to_cpu(tlv->tag)) { + case UNI_EVENT_ROC_GRANT: + mt7925_mcu_roc_handle_grant(dev, tlv); + break; + case UNI_EVENT_ROC_GRANT_SUB_LINK: + break; + } + + i += le16_to_cpu(tlv->len); + } +} + static void mt7925_mcu_scan_event(struct mt792x_dev *dev, struct sk_buff *skb) { @@ -544,9 +569,9 @@ int mt7925_mcu_uni_tx_ba(struct mt792x_dev *dev, struct mt792x_vif *mvif = msta->vif; if (enable && !params->amsdu) - msta->wcid.amsdu = false; + msta->deflink.wcid.amsdu = false; - return mt7925_mcu_sta_ba(&dev->mt76, &mvif->mt76, params, + return mt7925_mcu_sta_ba(&dev->mt76, &mvif->bss_conf.mt76, params, enable, true); } @@ -557,7 +582,7 @@ int mt7925_mcu_uni_rx_ba(struct mt792x_dev *dev, struct mt792x_sta *msta = (struct mt792x_sta *)params->sta->drv_priv; struct mt792x_vif *mvif = msta->vif; - return mt7925_mcu_sta_ba(&dev->mt76, &mvif->mt76, params, + return mt7925_mcu_sta_ba(&dev->mt76, &mvif->bss_conf.mt76, params, enable, false); } @@ -726,6 +751,20 @@ mt7925_mcu_parse_phy_cap(struct mt792x_dev *dev, char *data) dev->has_eht = cap->eht; } +static void +mt7925_mcu_parse_eml_cap(struct mt792x_dev *dev, char *data) +{ + struct mt7925_mcu_eml_cap { + u8 rsv[4]; + __le16 eml_cap; + u8 rsv2[6]; + } __packed * cap; + + cap = (struct mt7925_mcu_eml_cap *)data; + + dev->phy.eml_cap = le16_to_cpu(cap->eml_cap); +} + static int mt7925_mcu_get_nic_capability(struct mt792x_dev *dev) { @@ -780,6 +819,12 @@ mt7925_mcu_get_nic_capability(struct mt792x_dev *dev) case MT_NIC_CAP_PHY: mt7925_mcu_parse_phy_cap(dev, tlv->data); break; + case MT_NIC_CAP_CHIP_CAP: + memcpy(&dev->phy.chip_cap, (void *)skb->data, sizeof(u64)); + break; + case MT_NIC_CAP_EML_CAP: + mt7925_mcu_parse_eml_cap(dev, tlv->data); + break; default: break; } @@ -848,7 +893,7 @@ EXPORT_SYMBOL_GPL(mt7925_run_firmware); static void mt7925_mcu_sta_hdr_trans_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, - struct ieee80211_sta *sta) + struct ieee80211_link_sta *link_sta) { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct sta_rec_hdr_trans *hdr_trans; @@ -864,10 +909,15 @@ mt7925_mcu_sta_hdr_trans_tlv(struct sk_buff *skb, else hdr_trans->from_ds = true; - if (sta) - wcid = (struct mt76_wcid *)sta->drv_priv; - else - wcid = &mvif->sta.wcid; + if (link_sta) { + struct mt792x_sta *msta = (struct mt792x_sta *)link_sta->sta->drv_priv; + struct mt792x_link_sta *mlink; + + mlink = mt792x_sta_to_link(msta, link_sta->link_id); + wcid = &mlink->wcid; + } else { + wcid = &mvif->sta.deflink.wcid; + } if (!wcid) return; @@ -881,27 +931,36 @@ mt7925_mcu_sta_hdr_trans_tlv(struct sk_buff *skb, int mt7925_mcu_wtbl_update_hdr_trans(struct mt792x_dev *dev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta) + struct ieee80211_sta *sta, + int link_id) { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct ieee80211_link_sta *link_sta = sta ? &sta->deflink : NULL; + struct mt792x_link_sta *mlink; + struct mt792x_bss_conf *mconf; struct mt792x_sta *msta; struct sk_buff *skb; msta = sta ? (struct mt792x_sta *)sta->drv_priv : &mvif->sta; - skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, - &msta->wcid, + mlink = mt792x_sta_to_link(msta, link_id); + link_sta = mt792x_sta_to_link_sta(vif, sta, link_id); + mconf = mt792x_vif_to_link(mvif, link_id); + + skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mconf->mt76, + &mlink->wcid, MT7925_STA_UPDATE_MAX_SIZE); if (IS_ERR(skb)) return PTR_ERR(skb); /* starec hdr trans */ - mt7925_mcu_sta_hdr_trans_tlv(skb, vif, sta); + mt7925_mcu_sta_hdr_trans_tlv(skb, vif, link_sta); return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_WMWA_UNI_CMD(STA_REC_UPDATE), true); } -int mt7925_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif) +int mt7925_mcu_set_tx(struct mt792x_dev *dev, + struct ieee80211_bss_conf *bss_conf) { #define MCU_EDCA_AC_PARAM 0 #define WMM_AIFS_SET BIT(0) @@ -910,12 +969,12 @@ int mt7925_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif) #define WMM_TXOP_SET BIT(3) #define WMM_PARAM_SET (WMM_AIFS_SET | WMM_CW_MIN_SET | \ WMM_CW_MAX_SET | WMM_TXOP_SET) - struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(bss_conf); struct { u8 bss_idx; u8 __rsv[3]; } __packed hdr = { - .bss_idx = mvif->mt76.idx, + .bss_idx = mconf->mt76.idx, }; struct sk_buff *skb; int len = sizeof(hdr) + IEEE80211_NUM_ACS * sizeof(struct edca); @@ -928,7 +987,7 @@ int mt7925_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif) skb_put_data(skb, &hdr, sizeof(hdr)); for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { - struct ieee80211_tx_queue_params *q = &mvif->queue_params[ac]; + struct ieee80211_tx_queue_params *q = &mconf->queue_params[ac]; struct edca *e; struct tlv *tlv; @@ -960,11 +1019,12 @@ mt7925_mcu_sta_key_tlv(struct mt76_wcid *wcid, struct mt76_connac_sta_key_conf *sta_key_conf, struct sk_buff *skb, struct ieee80211_key_conf *key, - enum set_key_cmd cmd) + enum set_key_cmd cmd, + struct mt792x_sta *msta) { - struct mt792x_sta *msta = container_of(wcid, struct mt792x_sta, wcid); - struct sta_rec_sec_uni *sec; struct mt792x_vif *mvif = msta->vif; + struct mt792x_bss_conf *mconf = mt792x_vif_to_link(mvif, wcid->link_id); + struct sta_rec_sec_uni *sec; struct ieee80211_sta *sta; struct ieee80211_vif *vif; struct tlv *tlv; @@ -976,17 +1036,27 @@ mt7925_mcu_sta_key_tlv(struct mt76_wcid *wcid, tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_KEY_V3, sizeof(*sec)); sec = (struct sta_rec_sec_uni *)tlv; - sec->bss_idx = mvif->mt76.idx; + sec->bss_idx = mconf->mt76.idx; sec->is_authenticator = 0; - sec->mgmt_prot = 0; + sec->mgmt_prot = 1; /* only used in MLO mode */ sec->wlan_idx = (u8)wcid->idx; if (sta) { + struct ieee80211_link_sta *link_sta; + sec->tx_key = 1; sec->key_type = 1; - memcpy(sec->peer_addr, sta->addr, ETH_ALEN); + link_sta = mt792x_sta_to_link_sta(vif, sta, wcid->link_id); + + if (link_sta) + memcpy(sec->peer_addr, link_sta->addr, ETH_ALEN); } else { - memcpy(sec->peer_addr, vif->bss_conf.bssid, ETH_ALEN); + struct ieee80211_bss_conf *link_conf; + + link_conf = mt792x_vif_to_bss_conf(vif, wcid->link_id); + + if (link_conf) + memcpy(sec->peer_addr, link_conf->bssid, ETH_ALEN); } if (cmd == SET_KEY) { @@ -1031,25 +1101,121 @@ mt7925_mcu_sta_key_tlv(struct mt76_wcid *wcid, int mt7925_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif, struct mt76_connac_sta_key_conf *sta_key_conf, struct ieee80211_key_conf *key, int mcu_cmd, - struct mt76_wcid *wcid, enum set_key_cmd cmd) + struct mt76_wcid *wcid, enum set_key_cmd cmd, + struct mt792x_sta *msta) { - struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_bss_conf *mconf = mt792x_vif_to_link(mvif, wcid->link_id); struct sk_buff *skb; int ret; - skb = __mt76_connac_mcu_alloc_sta_req(dev, mvif, wcid, + skb = __mt76_connac_mcu_alloc_sta_req(dev, &mconf->mt76, wcid, MT7925_STA_UPDATE_MAX_SIZE); if (IS_ERR(skb)) return PTR_ERR(skb); - ret = mt7925_mcu_sta_key_tlv(wcid, sta_key_conf, skb, key, cmd); + ret = mt7925_mcu_sta_key_tlv(wcid, sta_key_conf, skb, key, cmd, msta); if (ret) return ret; return mt76_mcu_skb_send_msg(dev, skb, mcu_cmd, true); } -int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_vif *vif, +int mt7925_mcu_set_mlo_roc(struct mt792x_bss_conf *mconf, u16 sel_links, + int duration, u8 token_id) +{ + struct mt792x_vif *mvif = mconf->vif; + struct ieee80211_vif *vif = container_of((void *)mvif, + struct ieee80211_vif, drv_priv); + struct ieee80211_bss_conf *link_conf; + struct ieee80211_channel *chan; + const u8 ch_band[] = { + [NL80211_BAND_2GHZ] = 1, + [NL80211_BAND_5GHZ] = 2, + [NL80211_BAND_6GHZ] = 3, + }; + enum mt7925_roc_req type; + int center_ch, i = 0; + bool is_AG_band = false; + struct { + u8 id; + u8 bss_idx; + u16 tag; + struct mt792x_bss_conf *mconf; + struct ieee80211_channel *chan; + } links[2]; + + struct { + struct { + u8 rsv[4]; + } __packed hdr; + struct roc_acquire_tlv roc[2]; + } __packed req; + + if (!mconf || hweight16(vif->valid_links) < 2 || + hweight16(sel_links) != 2) + return -EPERM; + + for (i = 0; i < ARRAY_SIZE(links); i++) { + links[i].id = i ? __ffs(~BIT(mconf->link_id) & sel_links) : + mconf->link_id; + link_conf = mt792x_vif_to_bss_conf(vif, links[i].id); + if (WARN_ON_ONCE(!link_conf)) + return -EPERM; + + links[i].chan = link_conf->chanreq.oper.chan; + if (WARN_ON_ONCE(!links[i].chan)) + return -EPERM; + + links[i].mconf = mt792x_vif_to_link(mvif, links[i].id); + links[i].tag = links[i].id == mconf->link_id ? + UNI_ROC_ACQUIRE : UNI_ROC_SUB_LINK; + + is_AG_band |= links[i].chan->band == NL80211_BAND_2GHZ; + } + + if (vif->cfg.eml_cap & IEEE80211_EML_CAP_EMLSR_SUPP) + type = is_AG_band ? MT7925_ROC_REQ_MLSR_AG : + MT7925_ROC_REQ_MLSR_AA; + else + type = MT7925_ROC_REQ_JOIN; + + for (i = 0; i < ARRAY_SIZE(links) && i < hweight16(vif->active_links); i++) { + if (WARN_ON_ONCE(!links[i].mconf || !links[i].chan)) + continue; + + chan = links[i].chan; + center_ch = ieee80211_frequency_to_channel(chan->center_freq); + req.roc[i].len = cpu_to_le16(sizeof(struct roc_acquire_tlv)); + req.roc[i].tag = cpu_to_le16(links[i].tag); + req.roc[i].tokenid = token_id; + req.roc[i].reqtype = type; + req.roc[i].maxinterval = cpu_to_le32(duration); + req.roc[i].bss_idx = links[i].mconf->mt76.idx; + req.roc[i].control_channel = chan->hw_value; + req.roc[i].bw = CMD_CBW_20MHZ; + req.roc[i].bw_from_ap = CMD_CBW_20MHZ; + req.roc[i].center_chan = center_ch; + req.roc[i].center_chan_from_ap = center_ch; + + /* STR : 0xfe indicates BAND_ALL with enabling DBDC + * EMLSR : 0xff indicates (BAND_AUTO) without DBDC + */ + req.roc[i].dbdcband = type == MT7925_ROC_REQ_JOIN ? 0xfe : 0xff; + + if (chan->hw_value < center_ch) + req.roc[i].sco = 1; /* SCA */ + else if (chan->hw_value > center_ch) + req.roc[i].sco = 3; /* SCB */ + + req.roc[i].band = ch_band[chan->band]; + } + + return mt76_mcu_send_msg(&mvif->phy->dev->mt76, MCU_UNI_CMD(ROC), + &req, sizeof(req), false); +} + +int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf, struct ieee80211_channel *chan, int duration, enum mt7925_roc_req type, u8 token_id) { @@ -1059,25 +1225,7 @@ int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_vif *vif, struct { u8 rsv[4]; } __packed hdr; - struct roc_acquire_tlv { - __le16 tag; - __le16 len; - u8 bss_idx; - u8 tokenid; - u8 control_channel; - u8 sco; - u8 band; - u8 bw; - u8 center_chan; - u8 center_chan2; - u8 bw_from_ap; - u8 center_chan_from_ap; - u8 center_chan2_from_ap; - u8 reqtype; - __le32 maxinterval; - u8 dbdcband; - u8 rsv[3]; - } __packed roc; + struct roc_acquire_tlv roc; } __packed req = { .roc = { .tag = cpu_to_le16(UNI_ROC_ACQUIRE), @@ -1085,7 +1233,7 @@ int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_vif *vif, .tokenid = token_id, .reqtype = type, .maxinterval = cpu_to_le32(duration), - .bss_idx = vif->mt76.idx, + .bss_idx = mconf->mt76.idx, .control_channel = chan->hw_value, .bw = CMD_CBW_20MHZ, .bw_from_ap = CMD_CBW_20MHZ, @@ -1116,7 +1264,7 @@ int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_vif *vif, &req, sizeof(req), false); } -int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif, +int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf, u8 token_id) { struct mt792x_dev *dev = phy->dev; @@ -1137,7 +1285,7 @@ int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif, .tag = cpu_to_le16(UNI_ROC_ABORT), .len = cpu_to_le16(sizeof(struct roc_abort_tlv)), .tokenid = token_id, - .bss_idx = vif->mt76.idx, + .bss_idx = mconf->mt76.idx, .dbdcband = 0xff, /* auto*/ }, }; @@ -1146,80 +1294,6 @@ int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif, &req, sizeof(req), false); } -int mt7925_mcu_set_chan_info(struct mt792x_phy *phy, u16 tag) -{ - static const u8 ch_band[] = { - [NL80211_BAND_2GHZ] = 0, - [NL80211_BAND_5GHZ] = 1, - [NL80211_BAND_6GHZ] = 2, - }; - struct mt792x_dev *dev = phy->dev; - struct cfg80211_chan_def *chandef = &phy->mt76->chandef; - int freq1 = chandef->center_freq1; - u8 band_idx = chandef->chan->band != NL80211_BAND_2GHZ; - struct { - /* fixed field */ - u8 __rsv[4]; - - __le16 tag; - __le16 len; - u8 control_ch; - u8 center_ch; - u8 bw; - u8 tx_path_num; - u8 rx_path; /* mask or num */ - u8 switch_reason; - u8 band_idx; - u8 center_ch2; /* for 80+80 only */ - __le16 cac_case; - u8 channel_band; - u8 rsv0; - __le32 outband_freq; - u8 txpower_drop; - u8 ap_bw; - u8 ap_center_ch; - u8 rsv1[53]; - } __packed req = { - .tag = cpu_to_le16(tag), - .len = cpu_to_le16(sizeof(req) - 4), - .control_ch = chandef->chan->hw_value, - .center_ch = ieee80211_frequency_to_channel(freq1), - .bw = mt76_connac_chan_bw(chandef), - .tx_path_num = hweight8(phy->mt76->antenna_mask), - .rx_path = phy->mt76->antenna_mask, - .band_idx = band_idx, - .channel_band = ch_band[chandef->chan->band], - }; - - if (chandef->chan->band == NL80211_BAND_6GHZ) - req.channel_band = 2; - else - req.channel_band = chandef->chan->band; - - if (tag == UNI_CHANNEL_RX_PATH || - dev->mt76.hw->conf.flags & IEEE80211_CONF_MONITOR) - req.switch_reason = CH_SWITCH_NORMAL; - else if (phy->mt76->hw->conf.flags & IEEE80211_CONF_OFFCHANNEL) - req.switch_reason = CH_SWITCH_SCAN_BYPASS_DPD; - else if (!cfg80211_reg_can_beacon(phy->mt76->hw->wiphy, chandef, - NL80211_IFTYPE_AP)) - req.switch_reason = CH_SWITCH_DFS; - else - req.switch_reason = CH_SWITCH_NORMAL; - - if (tag == UNI_CHANNEL_SWITCH) - req.rx_path = hweight8(req.rx_path); - - if (chandef->width == NL80211_CHAN_WIDTH_80P80) { - int freq2 = chandef->center_freq2; - - req.center_ch2 = ieee80211_frequency_to_channel(freq2); - } - - return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(CHANNEL_SWITCH), - &req, sizeof(req), true); -} - int mt7925_mcu_set_eeprom(struct mt792x_dev *dev) { struct { @@ -1242,9 +1316,10 @@ int mt7925_mcu_set_eeprom(struct mt792x_dev *dev) } EXPORT_SYMBOL_GPL(mt7925_mcu_set_eeprom); -int mt7925_mcu_uni_bss_ps(struct mt792x_dev *dev, struct ieee80211_vif *vif) +int mt7925_mcu_uni_bss_ps(struct mt792x_dev *dev, + struct ieee80211_bss_conf *link_conf) { - struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf); struct { struct { u8 bss_idx; @@ -1263,16 +1338,16 @@ int mt7925_mcu_uni_bss_ps(struct mt792x_dev *dev, struct ieee80211_vif *vif) } __packed ps; } __packed ps_req = { .hdr = { - .bss_idx = mvif->mt76.idx, + .bss_idx = mconf->mt76.idx, }, .ps = { .tag = cpu_to_le16(UNI_BSS_INFO_PS), .len = cpu_to_le16(sizeof(struct ps_tlv)), - .ps_state = vif->cfg.ps ? 2 : 0, + .ps_state = link_conf->vif->cfg.ps ? 2 : 0, }, }; - if (vif->type != NL80211_IFTYPE_STATION) + if (link_conf->vif->type != NL80211_IFTYPE_STATION) return -EOPNOTSUPP; return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE), @@ -1280,10 +1355,10 @@ int mt7925_mcu_uni_bss_ps(struct mt792x_dev *dev, struct ieee80211_vif *vif) } static int -mt7925_mcu_uni_bss_bcnft(struct mt792x_dev *dev, struct ieee80211_vif *vif, - bool enable) +mt7925_mcu_uni_bss_bcnft(struct mt792x_dev *dev, + struct ieee80211_bss_conf *link_conf, bool enable) { - struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf); struct { struct { u8 bss_idx; @@ -1300,17 +1375,17 @@ mt7925_mcu_uni_bss_bcnft(struct mt792x_dev *dev, struct ieee80211_vif *vif, } __packed bcnft; } __packed bcnft_req = { .hdr = { - .bss_idx = mvif->mt76.idx, + .bss_idx = mconf->mt76.idx, }, .bcnft = { .tag = cpu_to_le16(UNI_BSS_INFO_BCNFT), .len = cpu_to_le16(sizeof(struct bcnft_tlv)), - .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int), - .dtim_period = vif->bss_conf.dtim_period, + .bcn_interval = cpu_to_le16(link_conf->beacon_int), + .dtim_period = link_conf->dtim_period, }, }; - if (vif->type != NL80211_IFTYPE_STATION) + if (link_conf->vif->type != NL80211_IFTYPE_STATION) return 0; return mt76_mcu_send_msg(&dev->mt76, MCU_UNI_CMD(BSS_INFO_UPDATE), @@ -1318,10 +1393,11 @@ mt7925_mcu_uni_bss_bcnft(struct mt792x_dev *dev, struct ieee80211_vif *vif, } int -mt7925_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif, +mt7925_mcu_set_bss_pm(struct mt792x_dev *dev, + struct ieee80211_bss_conf *link_conf, bool enable) { - struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf); struct { struct { u8 bss_idx; @@ -1338,13 +1414,13 @@ mt7925_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif, } __packed enable; } req = { .hdr = { - .bss_idx = mvif->mt76.idx, + .bss_idx = mconf->mt76.idx, }, .enable = { .tag = cpu_to_le16(UNI_BSS_INFO_BCNFT), .len = cpu_to_le16(sizeof(struct bcnft_tlv)), - .dtim_period = vif->bss_conf.dtim_period, - .bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int), + .dtim_period = link_conf->dtim_period, + .bcn_interval = cpu_to_le16(link_conf->beacon_int), }, }; struct { @@ -1358,7 +1434,7 @@ mt7925_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif, } __packed disable; } req1 = { .hdr = { - .bss_idx = mvif->mt76.idx, + .bss_idx = mconf->mt76.idx, }, .disable = { .tag = cpu_to_le16(UNI_BSS_INFO_PM_DISABLE), @@ -1377,42 +1453,43 @@ mt7925_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif, } static void -mt7925_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) +mt7925_mcu_sta_he_tlv(struct sk_buff *skb, struct ieee80211_link_sta *link_sta) { - if (!sta->deflink.he_cap.has_he) + if (!link_sta->he_cap.has_he) return; - mt76_connac_mcu_sta_he_tlv_v2(skb, sta); + mt76_connac_mcu_sta_he_tlv_v2(skb, link_sta->sta); } static void -mt7925_mcu_sta_he_6g_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) +mt7925_mcu_sta_he_6g_tlv(struct sk_buff *skb, + struct ieee80211_link_sta *link_sta) { struct sta_rec_he_6g_capa *he_6g; struct tlv *tlv; - if (!sta->deflink.he_6ghz_capa.capa) + if (!link_sta->he_6ghz_capa.capa) return; tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HE_6G, sizeof(*he_6g)); he_6g = (struct sta_rec_he_6g_capa *)tlv; - he_6g->capa = sta->deflink.he_6ghz_capa.capa; + he_6g->capa = link_sta->he_6ghz_capa.capa; } static void -mt7925_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) +mt7925_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_link_sta *link_sta) { struct ieee80211_eht_mcs_nss_supp *mcs_map; struct ieee80211_eht_cap_elem_fixed *elem; struct sta_rec_eht *eht; struct tlv *tlv; - if (!sta->deflink.eht_cap.has_eht) + if (!link_sta->eht_cap.has_eht) return; - mcs_map = &sta->deflink.eht_cap.eht_mcs_nss_supp; - elem = &sta->deflink.eht_cap.eht_cap_elem; + mcs_map = &link_sta->eht_cap.eht_mcs_nss_supp; + elem = &link_sta->eht_cap.eht_cap_elem; tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_EHT, sizeof(*eht)); @@ -1422,50 +1499,52 @@ mt7925_mcu_sta_eht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) eht->phy_cap = cpu_to_le64(*(u64 *)elem->phy_cap_info); eht->phy_cap_ext = cpu_to_le64(elem->phy_cap_info[8]); - if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20) + if (link_sta->bandwidth == IEEE80211_STA_RX_BW_20) memcpy(eht->mcs_map_bw20, &mcs_map->only_20mhz, sizeof(eht->mcs_map_bw20)); memcpy(eht->mcs_map_bw80, &mcs_map->bw._80, sizeof(eht->mcs_map_bw80)); memcpy(eht->mcs_map_bw160, &mcs_map->bw._160, sizeof(eht->mcs_map_bw160)); } static void -mt7925_mcu_sta_ht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) +mt7925_mcu_sta_ht_tlv(struct sk_buff *skb, struct ieee80211_link_sta *link_sta) { struct sta_rec_ht *ht; struct tlv *tlv; - if (!sta->deflink.ht_cap.ht_supported) + if (!link_sta->ht_cap.ht_supported) return; tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HT, sizeof(*ht)); ht = (struct sta_rec_ht *)tlv; - ht->ht_cap = cpu_to_le16(sta->deflink.ht_cap.cap); + ht->ht_cap = cpu_to_le16(link_sta->ht_cap.cap); } static void -mt7925_mcu_sta_vht_tlv(struct sk_buff *skb, struct ieee80211_sta *sta) +mt7925_mcu_sta_vht_tlv(struct sk_buff *skb, struct ieee80211_link_sta *link_sta) { struct sta_rec_vht *vht; struct tlv *tlv; /* For 6G band, this tlv is necessary to let hw work normally */ - if (!sta->deflink.he_6ghz_capa.capa && !sta->deflink.vht_cap.vht_supported) + if (!link_sta->he_6ghz_capa.capa && !link_sta->vht_cap.vht_supported) return; tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_VHT, sizeof(*vht)); vht = (struct sta_rec_vht *)tlv; - vht->vht_cap = cpu_to_le32(sta->deflink.vht_cap.cap); - vht->vht_rx_mcs_map = sta->deflink.vht_cap.vht_mcs.rx_mcs_map; - vht->vht_tx_mcs_map = sta->deflink.vht_cap.vht_mcs.tx_mcs_map; + vht->vht_cap = cpu_to_le32(link_sta->vht_cap.cap); + vht->vht_rx_mcs_map = link_sta->vht_cap.vht_mcs.rx_mcs_map; + vht->vht_tx_mcs_map = link_sta->vht_cap.vht_mcs.tx_mcs_map; } static void mt7925_mcu_sta_amsdu_tlv(struct sk_buff *skb, - struct ieee80211_vif *vif, struct ieee80211_sta *sta) + struct ieee80211_vif *vif, + struct ieee80211_link_sta *link_sta) { - struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; + struct mt792x_sta *msta = (struct mt792x_sta *)link_sta->sta->drv_priv; + struct mt792x_link_sta *mlink; struct sta_rec_amsdu *amsdu; struct tlv *tlv; @@ -1473,16 +1552,18 @@ mt7925_mcu_sta_amsdu_tlv(struct sk_buff *skb, vif->type != NL80211_IFTYPE_AP) return; - if (!sta->deflink.agg.max_amsdu_len) + if (!link_sta->agg.max_amsdu_len) return; tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_HW_AMSDU, sizeof(*amsdu)); amsdu = (struct sta_rec_amsdu *)tlv; amsdu->max_amsdu_num = 8; amsdu->amsdu_en = true; - msta->wcid.amsdu = true; - switch (sta->deflink.agg.max_amsdu_len) { + mlink = mt792x_sta_to_link(msta, link_sta->link_id); + mlink->wcid.amsdu = true; + + switch (link_sta->agg.max_amsdu_len) { case IEEE80211_MAX_MPDU_LEN_VHT_11454: amsdu->max_mpdu_size = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; @@ -1499,34 +1580,44 @@ mt7925_mcu_sta_amsdu_tlv(struct sk_buff *skb, static void mt7925_mcu_sta_phy_tlv(struct sk_buff *skb, - struct ieee80211_vif *vif, struct ieee80211_sta *sta) + struct ieee80211_vif *vif, + struct ieee80211_link_sta *link_sta) { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; - struct cfg80211_chan_def *chandef = &mvif->mt76.ctx->def; + struct ieee80211_bss_conf *link_conf; + struct cfg80211_chan_def *chandef; + struct mt792x_bss_conf *mconf; struct sta_rec_phy *phy; struct tlv *tlv; u8 af = 0, mm = 0; + link_conf = mt792x_vif_to_bss_conf(vif, link_sta->link_id); + mconf = mt792x_vif_to_link(mvif, link_sta->link_id); + chandef = mconf->mt76.ctx ? &mconf->mt76.ctx->def : + &link_conf->chanreq.oper; + tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_PHY, sizeof(*phy)); phy = (struct sta_rec_phy *)tlv; - phy->phy_type = mt76_connac_get_phy_mode_v2(mvif->phy->mt76, vif, chandef->chan->band, sta); - phy->basic_rate = cpu_to_le16((u16)vif->bss_conf.basic_rates); - if (sta->deflink.ht_cap.ht_supported) { - af = sta->deflink.ht_cap.ampdu_factor; - mm = sta->deflink.ht_cap.ampdu_density; + phy->phy_type = mt76_connac_get_phy_mode_v2(mvif->phy->mt76, vif, + chandef->chan->band, + link_sta); + phy->basic_rate = cpu_to_le16((u16)link_conf->basic_rates); + if (link_sta->ht_cap.ht_supported) { + af = link_sta->ht_cap.ampdu_factor; + mm = link_sta->ht_cap.ampdu_density; } - if (sta->deflink.vht_cap.vht_supported) { + if (link_sta->vht_cap.vht_supported) { u8 vht_af = FIELD_GET(IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK, - sta->deflink.vht_cap.cap); + link_sta->vht_cap.cap); af = max_t(u8, af, vht_af); } - if (sta->deflink.he_6ghz_capa.capa) { - af = le16_get_bits(sta->deflink.he_6ghz_capa.capa, + if (link_sta->he_6ghz_capa.capa) { + af = le16_get_bits(link_sta->he_6ghz_capa.capa, IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP); - mm = le16_get_bits(sta->deflink.he_6ghz_capa.capa, + mm = le16_get_bits(link_sta->he_6ghz_capa.capa, IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START); } @@ -1537,7 +1628,7 @@ mt7925_mcu_sta_phy_tlv(struct sk_buff *skb, static void mt7925_mcu_sta_state_v2_tlv(struct mt76_phy *mphy, struct sk_buff *skb, - struct ieee80211_sta *sta, + struct ieee80211_link_sta *link_sta, struct ieee80211_vif *vif, u8 rcpi, u8 sta_state) { @@ -1557,28 +1648,37 @@ mt7925_mcu_sta_state_v2_tlv(struct mt76_phy *mphy, struct sk_buff *skb, state = (struct sta_rec_state_v2 *)tlv; state->state = sta_state; - if (sta->deflink.vht_cap.vht_supported) { - state->vht_opmode = sta->deflink.bandwidth; - state->vht_opmode |= sta->deflink.rx_nss << + if (link_sta->vht_cap.vht_supported) { + state->vht_opmode = link_sta->bandwidth; + state->vht_opmode |= link_sta->rx_nss << IEEE80211_OPMODE_NOTIF_RX_NSS_SHIFT; } } static void mt7925_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, - struct ieee80211_vif *vif, struct ieee80211_sta *sta) + struct ieee80211_vif *vif, + struct ieee80211_link_sta *link_sta) { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; - struct cfg80211_chan_def *chandef = &mvif->mt76.ctx->def; - enum nl80211_band band = chandef->chan->band; + struct ieee80211_bss_conf *link_conf; + struct cfg80211_chan_def *chandef; struct sta_rec_ra_info *ra_info; + struct mt792x_bss_conf *mconf; + enum nl80211_band band; struct tlv *tlv; u16 supp_rates; + link_conf = mt792x_vif_to_bss_conf(vif, link_sta->link_id); + mconf = mt792x_vif_to_link(mvif, link_sta->link_id); + chandef = mconf->mt76.ctx ? &mconf->mt76.ctx->def : + &link_conf->chanreq.oper; + band = chandef->chan->band; + tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_RA, sizeof(*ra_info)); ra_info = (struct sta_rec_ra_info *)tlv; - supp_rates = sta->deflink.supp_rates[band]; + supp_rates = link_sta->supp_rates[band]; if (band == NL80211_BAND_2GHZ) supp_rates = FIELD_PREP(RA_LEGACY_OFDM, supp_rates >> 4) | FIELD_PREP(RA_LEGACY_CCK, supp_rates & 0xf); @@ -1587,29 +1687,80 @@ mt7925_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, ra_info->legacy = cpu_to_le16(supp_rates); - if (sta->deflink.ht_cap.ht_supported) + if (link_sta->ht_cap.ht_supported) memcpy(ra_info->rx_mcs_bitmask, - sta->deflink.ht_cap.mcs.rx_mask, + link_sta->ht_cap.mcs.rx_mask, HT_MCS_MASK_NUM); } +static void +mt7925_mcu_sta_eht_mld_tlv(struct sk_buff *skb, + struct ieee80211_vif *vif, struct ieee80211_sta *sta) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct wiphy *wiphy = mvif->phy->mt76->hw->wiphy; + const struct wiphy_iftype_ext_capab *ext_capa; + struct sta_rec_eht_mld *eht_mld; + struct tlv *tlv; + u16 eml_cap; + + tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_EHT_MLD, sizeof(*eht_mld)); + eht_mld = (struct sta_rec_eht_mld *)tlv; + eht_mld->mld_type = 0xff; + + if (!ieee80211_vif_is_mld(vif)) + return; + + ext_capa = cfg80211_get_iftype_ext_capa(wiphy, + ieee80211_vif_type_p2p(vif)); + if (!ext_capa) + return; + + eml_cap = (vif->cfg.eml_cap & (IEEE80211_EML_CAP_EMLSR_SUPP | + IEEE80211_EML_CAP_TRANSITION_TIMEOUT)) | + (ext_capa->eml_capabilities & (IEEE80211_EML_CAP_EMLSR_PADDING_DELAY | + IEEE80211_EML_CAP_EMLSR_TRANSITION_DELAY)); + + if (eml_cap & IEEE80211_EML_CAP_EMLSR_SUPP) { + eht_mld->eml_cap[0] = u16_get_bits(eml_cap, GENMASK(7, 0)); + eht_mld->eml_cap[1] = u16_get_bits(eml_cap, GENMASK(15, 8)); + } else { + eht_mld->str_cap[0] = BIT(1); + } +} + static void mt7925_mcu_sta_mld_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { - struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; + unsigned long valid = mvif->valid_links; + struct mt792x_bss_conf *mconf; + struct mt792x_link_sta *mlink; struct sta_rec_mld *mld; struct tlv *tlv; + int i, cnt = 0; tlv = mt76_connac_mcu_add_tlv(skb, STA_REC_MLD, sizeof(*mld)); mld = (struct sta_rec_mld *)tlv; - memcpy(mld->mac_addr, vif->addr, ETH_ALEN); - mld->primary_id = cpu_to_le16(wcid->idx); - mld->wlan_id = cpu_to_le16(wcid->idx); + memcpy(mld->mac_addr, sta->addr, ETH_ALEN); + mld->primary_id = cpu_to_le16(msta->deflink.wcid.idx); + mld->wlan_id = cpu_to_le16(msta->deflink.wcid.idx); + mld->link_num = min_t(u8, hweight16(mvif->valid_links), 2); - /* TODO: 0 means deflink only, add secondary link(1) later */ - mld->link_num = !!(hweight8(vif->active_links) > 1); - WARN_ON_ONCE(mld->link_num); + for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) { + if (cnt == mld->link_num) + break; + + mconf = mt792x_vif_to_link(mvif, i); + mlink = mt792x_sta_to_link(msta, i); + mld->link[cnt].wlan_id = cpu_to_le16(mlink->wcid.idx); + mld->link[cnt++].bss_idx = mconf->mt76.idx; + + if (mlink != &msta->deflink) + mld->secondary_id = cpu_to_le16(mlink->wcid.idx); + } } static int @@ -1625,39 +1776,106 @@ mt7925_mcu_sta_cmd(struct mt76_phy *phy, if (IS_ERR(skb)) return PTR_ERR(skb); - if (info->sta || !info->offload_fw) - mt76_connac_mcu_sta_basic_tlv(dev, skb, info->vif, info->sta, + if (info->link_sta) + mt76_connac_mcu_sta_basic_tlv(dev, skb, info->vif, + info->link_sta, info->enable, info->newly); - if (info->sta && info->enable) { - mt7925_mcu_sta_phy_tlv(skb, info->vif, info->sta); - mt7925_mcu_sta_ht_tlv(skb, info->sta); - mt7925_mcu_sta_vht_tlv(skb, info->sta); - mt76_connac_mcu_sta_uapsd(skb, info->vif, info->sta); - mt7925_mcu_sta_amsdu_tlv(skb, info->vif, info->sta); - mt7925_mcu_sta_he_tlv(skb, info->sta); - mt7925_mcu_sta_he_6g_tlv(skb, info->sta); - mt7925_mcu_sta_eht_tlv(skb, info->sta); - mt7925_mcu_sta_rate_ctrl_tlv(skb, info->vif, info->sta); - mt7925_mcu_sta_state_v2_tlv(phy, skb, info->sta, + if (info->link_sta && info->enable) { + mt7925_mcu_sta_phy_tlv(skb, info->vif, info->link_sta); + mt7925_mcu_sta_ht_tlv(skb, info->link_sta); + mt7925_mcu_sta_vht_tlv(skb, info->link_sta); + mt76_connac_mcu_sta_uapsd(skb, info->vif, info->link_sta->sta); + mt7925_mcu_sta_amsdu_tlv(skb, info->vif, info->link_sta); + mt7925_mcu_sta_he_tlv(skb, info->link_sta); + mt7925_mcu_sta_he_6g_tlv(skb, info->link_sta); + mt7925_mcu_sta_eht_tlv(skb, info->link_sta); + mt7925_mcu_sta_rate_ctrl_tlv(skb, info->vif, + info->link_sta); + mt7925_mcu_sta_state_v2_tlv(phy, skb, info->link_sta, info->vif, info->rcpi, info->state); - mt7925_mcu_sta_mld_tlv(skb, info->vif, info->sta); + mt7925_mcu_sta_mld_tlv(skb, info->vif, info->link_sta->sta); } if (info->enable) - mt7925_mcu_sta_hdr_trans_tlv(skb, info->vif, info->sta); + mt7925_mcu_sta_hdr_trans_tlv(skb, info->vif, info->link_sta); return mt76_mcu_skb_send_msg(dev, skb, info->cmd, true); } -int mt7925_mcu_sta_update(struct mt792x_dev *dev, struct ieee80211_sta *sta, +static void +mt7925_mcu_sta_remove_tlv(struct sk_buff *skb) +{ + struct sta_rec_remove *rem; + struct tlv *tlv; + + tlv = mt76_connac_mcu_add_tlv(skb, 0x25, sizeof(*rem)); + rem = (struct sta_rec_remove *)tlv; + rem->action = 0; +} + +static int +mt7925_mcu_mlo_sta_cmd(struct mt76_phy *phy, + struct mt76_sta_cmd_info *info) +{ + struct mt792x_vif *mvif = (struct mt792x_vif *)info->vif->drv_priv; + struct mt76_dev *dev = phy->dev; + struct mt792x_bss_conf *mconf; + struct sk_buff *skb; + + mconf = mt792x_vif_to_link(mvif, info->wcid->link_id); + + skb = __mt76_connac_mcu_alloc_sta_req(dev, &mconf->mt76, info->wcid, + MT7925_STA_UPDATE_MAX_SIZE); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + if (info->enable) + mt76_connac_mcu_sta_basic_tlv(dev, skb, info->vif, + info->link_sta, + info->enable, info->newly); + + if (info->enable && info->link_sta) { + mt7925_mcu_sta_phy_tlv(skb, info->vif, info->link_sta); + mt7925_mcu_sta_ht_tlv(skb, info->link_sta); + mt7925_mcu_sta_vht_tlv(skb, info->link_sta); + mt76_connac_mcu_sta_uapsd(skb, info->vif, info->link_sta->sta); + mt7925_mcu_sta_amsdu_tlv(skb, info->vif, info->link_sta); + mt7925_mcu_sta_he_tlv(skb, info->link_sta); + mt7925_mcu_sta_he_6g_tlv(skb, info->link_sta); + mt7925_mcu_sta_eht_tlv(skb, info->link_sta); + mt7925_mcu_sta_rate_ctrl_tlv(skb, info->vif, + info->link_sta); + mt7925_mcu_sta_state_v2_tlv(phy, skb, info->link_sta, + info->vif, info->rcpi, + info->state); + + if (info->state != MT76_STA_INFO_STATE_NONE) { + mt7925_mcu_sta_mld_tlv(skb, info->vif, info->link_sta->sta); + mt7925_mcu_sta_eht_mld_tlv(skb, info->vif, info->link_sta->sta); + } + + mt7925_mcu_sta_hdr_trans_tlv(skb, info->vif, info->link_sta); + } + + if (!info->enable) { + mt7925_mcu_sta_remove_tlv(skb); + mt76_connac_mcu_add_tlv(skb, STA_REC_MLD_OFF, + sizeof(struct tlv)); + } + + return mt76_mcu_skb_send_msg(dev, skb, info->cmd, true); +} + +int mt7925_mcu_sta_update(struct mt792x_dev *dev, + struct ieee80211_link_sta *link_sta, struct ieee80211_vif *vif, bool enable, enum mt76_sta_info_state state) { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; - int rssi = -ewma_rssi_read(&mvif->rssi); + int rssi = -ewma_rssi_read(&mvif->bss_conf.rssi); struct mt76_sta_cmd_info info = { - .sta = sta, + .link_sta = link_sta, .vif = vif, .enable = enable, .cmd = MCU_UNI_CMD(STA_REC_UPDATE), @@ -1666,12 +1884,22 @@ int mt7925_mcu_sta_update(struct mt792x_dev *dev, struct ieee80211_sta *sta, .rcpi = to_rcpi(rssi), }; struct mt792x_sta *msta; + struct mt792x_link_sta *mlink; + int err; - msta = sta ? (struct mt792x_sta *)sta->drv_priv : NULL; - info.wcid = msta ? &msta->wcid : &mvif->sta.wcid; - info.newly = msta ? state != MT76_STA_INFO_STATE_ASSOC : true; + if (link_sta) { + msta = (struct mt792x_sta *)link_sta->sta->drv_priv; + mlink = mt792x_sta_to_link(msta, link_sta->link_id); + } + info.wcid = link_sta ? &mlink->wcid : &mvif->sta.deflink.wcid; + info.newly = link_sta ? state != MT76_STA_INFO_STATE_ASSOC : true; - return mt7925_mcu_sta_cmd(&dev->mphy, &info); + if (ieee80211_vif_is_mld(vif)) + err = mt7925_mcu_mlo_sta_cmd(&dev->mphy, &info); + else + err = mt7925_mcu_sta_cmd(&dev->mphy, &info); + + return err; } int mt7925_mcu_set_beacon_filter(struct mt792x_dev *dev, @@ -1680,21 +1908,32 @@ int mt7925_mcu_set_beacon_filter(struct mt792x_dev *dev, { #define MT7925_FIF_BIT_CLR BIT(1) #define MT7925_FIF_BIT_SET BIT(0) + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + unsigned long valid = ieee80211_vif_is_mld(vif) ? + mvif->valid_links : BIT(0); + struct ieee80211_bss_conf *bss_conf; int err = 0; + int i; if (enable) { - err = mt7925_mcu_uni_bss_bcnft(dev, vif, true); - if (err) - return err; + for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) { + bss_conf = mt792x_vif_to_bss_conf(vif, i); + err = mt7925_mcu_uni_bss_bcnft(dev, bss_conf, true); + if (err < 0) + return err; + } return mt7925_mcu_set_rxfilter(dev, 0, MT7925_FIF_BIT_SET, MT_WF_RFCR_DROP_OTHER_BEACON); } - err = mt7925_mcu_set_bss_pm(dev, vif, false); - if (err) - return err; + for_each_set_bit(i, &valid, IEEE80211_MLD_MAX_NUM_LINKS) { + bss_conf = mt792x_vif_to_bss_conf(vif, i); + err = mt7925_mcu_set_bss_pm(dev, bss_conf, false); + if (err) + return err; + } return mt7925_mcu_set_rxfilter(dev, 0, MT7925_FIF_BIT_CLR, @@ -1746,7 +1985,7 @@ int mt7925_mcu_set_sniffer(struct mt792x_dev *dev, struct ieee80211_vif *vif, } __packed enable; } __packed req = { .hdr = { - .band_idx = mvif->mt76.band_idx, + .band_idx = mvif->bss_conf.mt76.band_idx, }, .enable = { .tag = cpu_to_le16(UNI_SNIFFER_ENABLE), @@ -1805,7 +2044,7 @@ int mt7925_mcu_config_sniffer(struct mt792x_vif *vif, } __packed tlv; } __packed req = { .hdr = { - .band_idx = vif->mt76.band_idx, + .band_idx = vif->bss_conf.mt76.band_idx, }, .tlv = { .tag = cpu_to_le16(UNI_SNIFFER_CONFIG), @@ -1866,7 +2105,7 @@ mt7925_mcu_uni_add_beacon_offload(struct mt792x_dev *dev, } __packed beacon_tlv; } req = { .hdr = { - .bss_idx = mvif->mt76.idx, + .bss_idx = mvif->bss_conf.mt76.idx, }, .beacon_tlv = { .tag = cpu_to_le16(UNI_BSS_INFO_BCN_CONTENT), @@ -1918,83 +2157,59 @@ mt7925_mcu_uni_add_beacon_offload(struct mt792x_dev *dev, &req, sizeof(req), true); } -int mt7925_mcu_set_chctx(struct mt76_phy *phy, struct mt76_vif *mvif, - struct ieee80211_chanctx_conf *ctx) +static +void mt7925_mcu_bss_rlm_tlv(struct sk_buff *skb, struct mt76_phy *phy, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *ctx) { - struct cfg80211_chan_def *chandef = ctx ? &ctx->def : &phy->chandef; + struct cfg80211_chan_def *chandef = ctx ? &ctx->def : + &link_conf->chanreq.oper; int freq1 = chandef->center_freq1, freq2 = chandef->center_freq2; enum nl80211_band band = chandef->chan->band; - struct mt76_dev *mdev = phy->dev; - struct { - struct { - u8 bss_idx; - u8 pad[3]; - } __packed hdr; - struct rlm_tlv { - __le16 tag; - __le16 len; - u8 control_channel; - u8 center_chan; - u8 center_chan2; - u8 bw; - u8 tx_streams; - u8 rx_streams; - u8 ht_op_info; - u8 sco; - u8 band; - u8 pad[3]; - } __packed rlm; - } __packed rlm_req = { - .hdr = { - .bss_idx = mvif->idx, - }, - .rlm = { - .tag = cpu_to_le16(UNI_BSS_INFO_RLM), - .len = cpu_to_le16(sizeof(struct rlm_tlv)), - .control_channel = chandef->chan->hw_value, - .center_chan = ieee80211_frequency_to_channel(freq1), - .center_chan2 = ieee80211_frequency_to_channel(freq2), - .tx_streams = hweight8(phy->antenna_mask), - .ht_op_info = 4, /* set HT 40M allowed */ - .rx_streams = hweight8(phy->antenna_mask), - .band = band, - }, - }; + struct bss_rlm_tlv *req; + struct tlv *tlv; + + tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_RLM, sizeof(*req)); + req = (struct bss_rlm_tlv *)tlv; + req->control_channel = chandef->chan->hw_value, + req->center_chan = ieee80211_frequency_to_channel(freq1), + req->center_chan2 = ieee80211_frequency_to_channel(freq2), + req->tx_streams = hweight8(phy->antenna_mask), + req->ht_op_info = 4, /* set HT 40M allowed */ + req->rx_streams = hweight8(phy->antenna_mask), + req->band = band; switch (chandef->width) { case NL80211_CHAN_WIDTH_40: - rlm_req.rlm.bw = CMD_CBW_40MHZ; + req->bw = CMD_CBW_40MHZ; break; case NL80211_CHAN_WIDTH_80: - rlm_req.rlm.bw = CMD_CBW_80MHZ; + req->bw = CMD_CBW_80MHZ; break; case NL80211_CHAN_WIDTH_80P80: - rlm_req.rlm.bw = CMD_CBW_8080MHZ; + req->bw = CMD_CBW_8080MHZ; break; case NL80211_CHAN_WIDTH_160: - rlm_req.rlm.bw = CMD_CBW_160MHZ; + req->bw = CMD_CBW_160MHZ; break; case NL80211_CHAN_WIDTH_5: - rlm_req.rlm.bw = CMD_CBW_5MHZ; + req->bw = CMD_CBW_5MHZ; break; case NL80211_CHAN_WIDTH_10: - rlm_req.rlm.bw = CMD_CBW_10MHZ; + req->bw = CMD_CBW_10MHZ; break; case NL80211_CHAN_WIDTH_20_NOHT: case NL80211_CHAN_WIDTH_20: default: - rlm_req.rlm.bw = CMD_CBW_20MHZ; - rlm_req.rlm.ht_op_info = 0; + req->bw = CMD_CBW_20MHZ; + req->ht_op_info = 0; break; } - if (rlm_req.rlm.control_channel < rlm_req.rlm.center_chan) - rlm_req.rlm.sco = 1; /* SCA */ - else if (rlm_req.rlm.control_channel > rlm_req.rlm.center_chan) - rlm_req.rlm.sco = 3; /* SCB */ - - return mt76_mcu_send_msg(mdev, MCU_UNI_CMD(BSS_INFO_UPDATE), &rlm_req, - sizeof(rlm_req), true); + if (req->control_channel < req->center_chan) + req->sco = 1; /* SCA */ + else if (req->control_channel > req->center_chan) + req->sco = 3; /* SCB */ } static struct sk_buff * @@ -2014,18 +2229,36 @@ __mt7925_mcu_alloc_bss_req(struct mt76_dev *dev, struct mt76_vif *mvif, int len) return skb; } +int mt7925_mcu_set_chctx(struct mt76_phy *phy, struct mt76_vif *mvif, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_chanctx_conf *ctx) +{ + struct sk_buff *skb; + + skb = __mt7925_mcu_alloc_bss_req(phy->dev, mvif, + MT7925_BSS_UPDATE_MAX_SIZE); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + mt7925_mcu_bss_rlm_tlv(skb, phy, link_conf, ctx); + + return mt76_mcu_skb_send_msg(phy->dev, skb, + MCU_UNI_CMD(BSS_INFO_UPDATE), true); +} + static u8 mt7925_get_phy_mode_ext(struct mt76_phy *phy, struct ieee80211_vif *vif, - enum nl80211_band band, struct ieee80211_sta *sta) + enum nl80211_band band, + struct ieee80211_link_sta *link_sta) { struct ieee80211_he_6ghz_capa *he_6ghz_capa; const struct ieee80211_sta_eht_cap *eht_cap; __le16 capa = 0; u8 mode = 0; - if (sta) { - he_6ghz_capa = &sta->deflink.he_6ghz_capa; - eht_cap = &sta->deflink.eht_cap; + if (link_sta) { + he_6ghz_capa = &link_sta->he_6ghz_capa; + eht_cap = &link_sta->eht_cap; } else { struct ieee80211_supported_band *sband; @@ -2061,18 +2294,19 @@ mt7925_get_phy_mode_ext(struct mt76_phy *phy, struct ieee80211_vif *vif, static void mt7925_mcu_bss_basic_tlv(struct sk_buff *skb, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_link_sta *link_sta, struct ieee80211_chanctx_conf *ctx, struct mt76_phy *phy, u16 wlan_idx, bool enable) { - struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; - struct mt792x_sta *msta = sta ? (struct mt792x_sta *)sta->drv_priv : - &mvif->sta; - struct cfg80211_chan_def *chandef = ctx ? &ctx->def : &phy->chandef; + struct ieee80211_vif *vif = link_conf->vif; + struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf); + struct cfg80211_chan_def *chandef = ctx ? &ctx->def : + &link_conf->chanreq.oper; enum nl80211_band band = chandef->chan->band; struct mt76_connac_bss_basic_tlv *basic_req; + struct mt792x_link_sta *mlink; struct tlv *tlv; int conn_type; u8 idx; @@ -2080,26 +2314,39 @@ mt7925_mcu_bss_basic_tlv(struct sk_buff *skb, tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_BASIC, sizeof(*basic_req)); basic_req = (struct mt76_connac_bss_basic_tlv *)tlv; - idx = mvif->mt76.omac_idx > EXT_BSSID_START ? HW_BSSID_0 : - mvif->mt76.omac_idx; + idx = mconf->mt76.omac_idx > EXT_BSSID_START ? HW_BSSID_0 : + mconf->mt76.omac_idx; basic_req->hw_bss_idx = idx; - basic_req->phymode_ext = mt7925_get_phy_mode_ext(phy, vif, band, sta); + basic_req->phymode_ext = mt7925_get_phy_mode_ext(phy, vif, band, + link_sta); if (band == NL80211_BAND_2GHZ) basic_req->nonht_basic_phy = cpu_to_le16(PHY_TYPE_ERP_INDEX); else basic_req->nonht_basic_phy = cpu_to_le16(PHY_TYPE_OFDM_INDEX); - memcpy(basic_req->bssid, vif->bss_conf.bssid, ETH_ALEN); - basic_req->phymode = mt76_connac_get_phy_mode(phy, vif, band, sta); - basic_req->bcn_interval = cpu_to_le16(vif->bss_conf.beacon_int); - basic_req->dtim_period = vif->bss_conf.dtim_period; + memcpy(basic_req->bssid, link_conf->bssid, ETH_ALEN); + basic_req->phymode = mt76_connac_get_phy_mode(phy, vif, band, link_sta); + basic_req->bcn_interval = cpu_to_le16(link_conf->beacon_int); + basic_req->dtim_period = link_conf->dtim_period; basic_req->bmc_tx_wlan_idx = cpu_to_le16(wlan_idx); - basic_req->sta_idx = cpu_to_le16(msta->wcid.idx); - basic_req->omac_idx = mvif->mt76.omac_idx; - basic_req->band_idx = mvif->mt76.band_idx; - basic_req->wmm_idx = mvif->mt76.wmm_idx; + basic_req->link_idx = mconf->mt76.idx; + + if (link_sta) { + struct mt792x_sta *msta; + + msta = (struct mt792x_sta *)link_sta->sta->drv_priv; + mlink = mt792x_sta_to_link(msta, link_sta->link_id); + + } else { + mlink = &mconf->vif->sta.deflink; + } + + basic_req->sta_idx = cpu_to_le16(mlink->wcid.idx); + basic_req->omac_idx = mconf->mt76.omac_idx; + basic_req->band_idx = mconf->mt76.band_idx; + basic_req->wmm_idx = mconf->mt76.wmm_idx; basic_req->conn_state = !enable; switch (vif->type) { @@ -2131,9 +2378,11 @@ mt7925_mcu_bss_basic_tlv(struct sk_buff *skb, } static void -mt7925_mcu_bss_sec_tlv(struct sk_buff *skb, struct ieee80211_vif *vif) +mt7925_mcu_bss_sec_tlv(struct sk_buff *skb, + struct ieee80211_bss_conf *link_conf) { - struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf); + struct mt76_vif *mvif = &mconf->mt76; struct bss_sec_tlv { __le16 tag; __le16 len; @@ -2178,12 +2427,13 @@ mt7925_mcu_bss_sec_tlv(struct sk_buff *skb, struct ieee80211_vif *vif) static void mt7925_mcu_bss_bmc_tlv(struct sk_buff *skb, struct mt792x_phy *phy, struct ieee80211_chanctx_conf *ctx, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) + struct ieee80211_bss_conf *link_conf) { - struct cfg80211_chan_def *chandef = ctx ? &ctx->def : &phy->mt76->chandef; - struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; + struct cfg80211_chan_def *chandef = ctx ? &ctx->def : + &link_conf->chanreq.oper; + struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf); enum nl80211_band band = chandef->chan->band; + struct mt76_vif *mvif = &mconf->mt76; struct bss_rate_tlv *bmc; struct tlv *tlv; u8 idx = mvif->mcast_rates_idx ? @@ -2205,39 +2455,44 @@ mt7925_mcu_bss_bmc_tlv(struct sk_buff *skb, struct mt792x_phy *phy, static void mt7925_mcu_bss_mld_tlv(struct sk_buff *skb, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta) + struct ieee80211_bss_conf *link_conf) { - struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; - bool is_mld = ieee80211_vif_is_mld(vif); + struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf); + struct mt792x_vif *mvif = (struct mt792x_vif *)link_conf->vif->drv_priv; struct bss_mld_tlv *mld; struct tlv *tlv; + bool is_mld; + + is_mld = ieee80211_vif_is_mld(link_conf->vif) || + (hweight16(mvif->valid_links) > 1); tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_MLD, sizeof(*mld)); mld = (struct bss_mld_tlv *)tlv; - mld->link_id = sta ? (is_mld ? vif->bss_conf.link_id : 0) : 0xff; - mld->group_mld_id = is_mld ? mvif->mt76.idx : 0xff; - mld->own_mld_id = mvif->mt76.idx + 32; + mld->link_id = is_mld ? link_conf->link_id : 0xff; + /* apply the index of the primary link */ + mld->group_mld_id = is_mld ? mvif->bss_conf.mt76.idx : 0xff; + mld->own_mld_id = mconf->mt76.idx + 32; mld->remap_idx = 0xff; + mld->eml_enable = !!(link_conf->vif->cfg.eml_cap & + IEEE80211_EML_CAP_EMLSR_SUPP); - if (sta) - memcpy(mld->mac_addr, sta->addr, ETH_ALEN); + memcpy(mld->mac_addr, link_conf->addr, ETH_ALEN); } static void -mt7925_mcu_bss_qos_tlv(struct sk_buff *skb, struct ieee80211_vif *vif) +mt7925_mcu_bss_qos_tlv(struct sk_buff *skb, struct ieee80211_bss_conf *link_conf) { struct mt76_connac_bss_qos_tlv *qos; struct tlv *tlv; tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_QBSS, sizeof(*qos)); qos = (struct mt76_connac_bss_qos_tlv *)tlv; - qos->qos = vif->bss_conf.qos; + qos->qos = link_conf->qos; } static void -mt7925_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, +mt7925_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_bss_conf *link_conf, struct mt792x_phy *phy) { #define DEFAULT_HE_PE_DURATION 4 @@ -2246,16 +2501,16 @@ mt7925_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, struct bss_info_uni_he *he; struct tlv *tlv; - cap = mt76_connac_get_he_phy_cap(phy->mt76, vif); + cap = mt76_connac_get_he_phy_cap(phy->mt76, link_conf->vif); tlv = mt76_connac_mcu_add_tlv(skb, UNI_BSS_INFO_HE_BASIC, sizeof(*he)); he = (struct bss_info_uni_he *)tlv; - he->he_pe_duration = vif->bss_conf.htc_trig_based_pkt_ext; + he->he_pe_duration = link_conf->htc_trig_based_pkt_ext; if (!he->he_pe_duration) he->he_pe_duration = DEFAULT_HE_PE_DURATION; - he->he_rts_thres = cpu_to_le16(vif->bss_conf.frame_time_rts_th); + he->he_rts_thres = cpu_to_le16(link_conf->frame_time_rts_th); if (!he->he_rts_thres) he->he_rts_thres = cpu_to_le16(DEFAULT_HE_DURATION_RTS_THRES); @@ -2265,7 +2520,7 @@ mt7925_mcu_bss_he_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, } static void -mt7925_mcu_bss_color_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, +mt7925_mcu_bss_color_tlv(struct sk_buff *skb, struct ieee80211_bss_conf *link_conf, bool enable) { struct bss_info_uni_bss_color *color; @@ -2275,15 +2530,16 @@ mt7925_mcu_bss_color_tlv(struct sk_buff *skb, struct ieee80211_vif *vif, color = (struct bss_info_uni_bss_color *)tlv; color->enable = enable ? - vif->bss_conf.he_bss_color.enabled : 0; + link_conf->he_bss_color.enabled : 0; color->bss_color = enable ? - vif->bss_conf.he_bss_color.color : 0; + link_conf->he_bss_color.color : 0; } static void -mt7925_mcu_bss_ifs_tlv(struct sk_buff *skb, struct ieee80211_vif *vif) +mt7925_mcu_bss_ifs_tlv(struct sk_buff *skb, + struct ieee80211_bss_conf *link_conf) { - struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_vif *mvif = (struct mt792x_vif *)link_conf->vif->drv_priv; struct mt792x_phy *phy = mvif->phy; struct bss_ifs_time_tlv *ifs_time; struct tlv *tlv; @@ -2295,18 +2551,18 @@ mt7925_mcu_bss_ifs_tlv(struct sk_buff *skb, struct ieee80211_vif *vif) } int mt7925_mcu_set_timing(struct mt792x_phy *phy, - struct ieee80211_vif *vif) + struct ieee80211_bss_conf *link_conf) { - struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf); struct mt792x_dev *dev = phy->dev; struct sk_buff *skb; - skb = __mt7925_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76, + skb = __mt7925_mcu_alloc_bss_req(&dev->mt76, &mconf->mt76, MT7925_BSS_UPDATE_MAX_SIZE); if (IS_ERR(skb)) return PTR_ERR(skb); - mt7925_mcu_bss_ifs_tlv(skb, vif); + mt7925_mcu_bss_ifs_tlv(skb, link_conf); return mt76_mcu_skb_send_msg(&dev->mt76, skb, MCU_UNI_CMD(BSS_INFO_UPDATE), true); @@ -2314,41 +2570,42 @@ int mt7925_mcu_set_timing(struct mt792x_phy *phy, int mt7925_mcu_add_bss_info(struct mt792x_phy *phy, struct ieee80211_chanctx_conf *ctx, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_link_sta *link_sta, int enable) { - struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + struct mt792x_vif *mvif = (struct mt792x_vif *)link_conf->vif->drv_priv; + struct mt792x_bss_conf *mconf = mt792x_link_conf_to_mconf(link_conf); struct mt792x_dev *dev = phy->dev; + struct mt792x_link_sta *mlink_bc; struct sk_buff *skb; - int err; - skb = __mt7925_mcu_alloc_bss_req(&dev->mt76, &mvif->mt76, + skb = __mt7925_mcu_alloc_bss_req(&dev->mt76, &mconf->mt76, MT7925_BSS_UPDATE_MAX_SIZE); if (IS_ERR(skb)) return PTR_ERR(skb); + mlink_bc = mt792x_sta_to_link(&mvif->sta, mconf->link_id); + /* bss_basic must be first */ - mt7925_mcu_bss_basic_tlv(skb, vif, sta, ctx, phy->mt76, - mvif->sta.wcid.idx, enable); - mt7925_mcu_bss_sec_tlv(skb, vif); + mt7925_mcu_bss_basic_tlv(skb, link_conf, link_sta, ctx, phy->mt76, + mlink_bc->wcid.idx, enable); + mt7925_mcu_bss_sec_tlv(skb, link_conf); + mt7925_mcu_bss_bmc_tlv(skb, phy, ctx, link_conf); + mt7925_mcu_bss_qos_tlv(skb, link_conf); + mt7925_mcu_bss_mld_tlv(skb, link_conf); + mt7925_mcu_bss_ifs_tlv(skb, link_conf); - mt7925_mcu_bss_bmc_tlv(skb, phy, ctx, vif, sta); - mt7925_mcu_bss_qos_tlv(skb, vif); - mt7925_mcu_bss_mld_tlv(skb, vif, sta); - mt7925_mcu_bss_ifs_tlv(skb, vif); - - if (vif->bss_conf.he_support) { - mt7925_mcu_bss_he_tlv(skb, vif, phy); - mt7925_mcu_bss_color_tlv(skb, vif, enable); + if (link_conf->he_support) { + mt7925_mcu_bss_he_tlv(skb, link_conf, phy); + mt7925_mcu_bss_color_tlv(skb, link_conf, enable); } - err = mt76_mcu_skb_send_msg(&dev->mt76, skb, - MCU_UNI_CMD(BSS_INFO_UPDATE), true); - if (err < 0) - return err; + if (enable) + mt7925_mcu_bss_rlm_tlv(skb, phy->mt76, link_conf, ctx); - return mt7925_mcu_set_chctx(phy->mt76, &mvif->mt76, ctx); + return mt76_mcu_skb_send_msg(&dev->mt76, skb, + MCU_UNI_CMD(BSS_INFO_UPDATE), true); } int mt7925_mcu_set_dbdc(struct mt76_phy *phy) diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h index b8315a89f4a9..ac53bdc99332 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mcu.h @@ -366,7 +366,10 @@ struct bss_mld_tlv { u8 mac_addr[ETH_ALEN]; u8 remap_idx; u8 link_id; - u8 __rsv[2]; + u8 eml_enable; + u8 max_link_num; + u8 hybrid_mode; + u8 __rsv[3]; } __packed; struct sta_rec_ba_uni { @@ -440,6 +443,17 @@ struct sta_rec_mld { } __packed link[2]; } __packed; +struct sta_rec_eht_mld { + __le16 tag; + __le16 len; + u8 nsep; + u8 mld_type; + u8 __rsv1[1]; + u8 str_cap[3]; + u8 eml_cap[3]; + u8 __rsv2[3]; +} __packed; + struct bss_ifs_time_tlv { __le16 tag; __le16 len; @@ -456,6 +470,21 @@ struct bss_ifs_time_tlv { __le16 eifs_cck_time; } __packed; +struct bss_rlm_tlv { + __le16 tag; + __le16 len; + u8 control_channel; + u8 center_chan; + u8 center_chan2; + u8 bw; + u8 tx_streams; + u8 rx_streams; + u8 ht_op_info; + u8 sco; + u8 band; + u8 pad[3]; +} __packed; + #define MT7925_STA_UPDATE_MAX_SIZE (sizeof(struct sta_req_hdr) + \ sizeof(struct sta_rec_basic) + \ sizeof(struct sta_rec_bf) + \ @@ -474,7 +503,8 @@ struct bss_ifs_time_tlv { sizeof(struct sta_rec_eht) + \ sizeof(struct sta_rec_hdr_trans) + \ sizeof(struct sta_rec_mld) + \ - sizeof(struct tlv)) + sizeof(struct tlv) * 2 + \ + sizeof(struct sta_rec_remove)) #define MT7925_BSS_UPDATE_MAX_SIZE (sizeof(struct bss_req_hdr) + \ sizeof(struct mt76_connac_bss_basic_tlv) + \ @@ -484,6 +514,7 @@ struct bss_ifs_time_tlv { sizeof(struct bss_info_uni_he) + \ sizeof(struct bss_info_uni_bss_color) + \ sizeof(struct bss_ifs_time_tlv) + \ + sizeof(struct bss_rlm_tlv) + \ sizeof(struct tlv)) #define MT_CONNAC3_SKU_POWER_LIMIT 449 @@ -538,6 +569,26 @@ struct mt7925_wow_pattern_tlv { u8 rsv[7]; } __packed; +struct roc_acquire_tlv { + __le16 tag; + __le16 len; + u8 bss_idx; + u8 tokenid; + u8 control_channel; + u8 sco; + u8 band; + u8 bw; + u8 center_chan; + u8 center_chan2; + u8 bw_from_ap; + u8 center_chan_from_ap; + u8 center_chan2_from_ap; + u8 reqtype; + __le32 maxinterval; + u8 dbdcband; + u8 rsv[3]; +} __packed; + static inline enum connac3_mcu_cipher_type mt7925_mcu_get_cipher(int cipher) { @@ -578,18 +629,18 @@ int mt7925_mcu_sched_scan_enable(struct mt76_phy *phy, bool enable); int mt7925_mcu_add_bss_info(struct mt792x_phy *phy, struct ieee80211_chanctx_conf *ctx, - struct ieee80211_vif *vif, - struct ieee80211_sta *sta, + struct ieee80211_bss_conf *link_conf, + struct ieee80211_link_sta *link_sta, int enable); int mt7925_mcu_set_timing(struct mt792x_phy *phy, - struct ieee80211_vif *vif); + struct ieee80211_bss_conf *link_conf); int mt7925_mcu_set_deep_sleep(struct mt792x_dev *dev, bool enable); int mt7925_mcu_set_channel_domain(struct mt76_phy *phy); int mt7925_mcu_set_radio_en(struct mt792x_phy *phy, bool enable); int mt7925_mcu_set_chctx(struct mt76_phy *phy, struct mt76_vif *mvif, + struct ieee80211_bss_conf *link_conf, struct ieee80211_chanctx_conf *ctx); int mt7925_mcu_set_rate_txpower(struct mt76_phy *phy); int mt7925_mcu_update_arp_filter(struct mt76_dev *dev, - struct mt76_vif *vif, - struct ieee80211_bss_conf *info); + struct ieee80211_bss_conf *link_conf); #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h index 8a4a71f6bcb6..669f3a079d04 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h +++ b/drivers/net/wireless/mediatek/mt76/mt7925/mt7925.h @@ -30,17 +30,22 @@ enum { UNI_ROC_ACQUIRE, UNI_ROC_ABORT, + UNI_ROC_SUB_LINK = 3, UNI_ROC_NUM }; enum mt7925_roc_req { MT7925_ROC_REQ_JOIN, MT7925_ROC_REQ_ROC, + MT7925_ROC_REQ_SUB_LINK, + MT7925_ROC_REQ_MLSR_AG = 10, + MT7925_ROC_REQ_MLSR_AA, MT7925_ROC_REQ_NUM }; enum { UNI_EVENT_ROC_GRANT = 0, + UNI_EVENT_ROC_GRANT_SUB_LINK = 4, UNI_EVENT_ROC_TAG_NUM }; @@ -192,13 +197,15 @@ int __mt7925_start(struct mt792x_phy *phy); int mt7925_register_device(struct mt792x_dev *dev); void mt7925_unregister_device(struct mt792x_dev *dev); int mt7925_run_firmware(struct mt792x_dev *dev); -int mt7925_mcu_set_bss_pm(struct mt792x_dev *dev, struct ieee80211_vif *vif, +int mt7925_mcu_set_bss_pm(struct mt792x_dev *dev, + struct ieee80211_bss_conf *link_conf, bool enable); -int mt7925_mcu_sta_update(struct mt792x_dev *dev, struct ieee80211_sta *sta, +int mt7925_mcu_sta_update(struct mt792x_dev *dev, + struct ieee80211_link_sta *link_sta, struct ieee80211_vif *vif, bool enable, enum mt76_sta_info_state state); int mt7925_mcu_set_chan_info(struct mt792x_phy *phy, u16 tag); -int mt7925_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_vif *vif); +int mt7925_mcu_set_tx(struct mt792x_dev *dev, struct ieee80211_bss_conf *bss_conf); int mt7925_mcu_set_eeprom(struct mt792x_dev *dev); int mt7925_mcu_get_rx_rate(struct mt792x_phy *phy, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct rate_info *rate); @@ -228,6 +235,7 @@ void mt7925_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, struct sk_buff *skb, u32 *info); void mt7925_stats_work(struct work_struct *work); void mt7925_set_stream_he_eht_caps(struct mt792x_phy *phy); +int mt7925_init_mlo_caps(struct mt792x_phy *phy); int mt7925_init_debugfs(struct mt792x_dev *dev); int mt7925_mcu_set_beacon_filter(struct mt792x_dev *dev, @@ -241,7 +249,8 @@ int mt7925_mcu_uni_rx_ba(struct mt792x_dev *dev, bool enable); void mt7925_scan_work(struct work_struct *work); void mt7925_roc_work(struct work_struct *work); -int mt7925_mcu_uni_bss_ps(struct mt792x_dev *dev, struct ieee80211_vif *vif); +int mt7925_mcu_uni_bss_ps(struct mt792x_dev *dev, + struct ieee80211_bss_conf *link_conf); void mt7925_coredump_work(struct work_struct *work); int mt7925_get_txpwr_info(struct mt792x_dev *dev, u8 band_idx, struct mt7925_txpwr *txpwr); @@ -252,7 +261,7 @@ void mt7925_mac_write_txwi(struct mt76_dev *dev, __le32 *txwi, struct ieee80211_key_conf *key, int pid, enum mt76_txq_id qid, u32 changed); void mt7925_txwi_free(struct mt792x_dev *dev, struct mt76_txwi_cache *t, - struct ieee80211_sta *sta, bool clear_status, + struct ieee80211_sta *sta, struct mt76_wcid *wcid, struct list_head *free_list); int mt7925_mcu_parse_response(struct mt76_dev *mdev, int cmd, struct sk_buff *skb, int seq); @@ -291,20 +300,24 @@ int mt7925_set_tx_sar_pwr(struct ieee80211_hw *hw, int mt7925_mcu_regval(struct mt792x_dev *dev, u32 regidx, u32 *val, bool set); int mt7925_mcu_set_clc(struct mt792x_dev *dev, u8 *alpha2, enum environment_cap env_cap); -int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_vif *vif, +int mt7925_mcu_set_mlo_roc(struct mt792x_bss_conf *mconf, u16 sel_links, + int duration, u8 token_id); +int mt7925_mcu_set_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf, struct ieee80211_channel *chan, int duration, enum mt7925_roc_req type, u8 token_id); -int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_vif *vif, +int mt7925_mcu_abort_roc(struct mt792x_phy *phy, struct mt792x_bss_conf *mconf, u8 token_id); int mt7925_mcu_fill_message(struct mt76_dev *mdev, struct sk_buff *skb, int cmd, int *wait_seq); int mt7925_mcu_add_key(struct mt76_dev *dev, struct ieee80211_vif *vif, struct mt76_connac_sta_key_conf *sta_key_conf, struct ieee80211_key_conf *key, int mcu_cmd, - struct mt76_wcid *wcid, enum set_key_cmd cmd); + struct mt76_wcid *wcid, enum set_key_cmd cmd, + struct mt792x_sta *msta); int mt7925_mcu_set_rts_thresh(struct mt792x_phy *phy, u32 val); int mt7925_mcu_wtbl_update_hdr_trans(struct mt792x_dev *dev, struct ieee80211_vif *vif, - struct ieee80211_sta *sta); + struct ieee80211_sta *sta, + int link_id); #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c index 07b74d492ce1..6e4f4e78c350 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci.c @@ -254,7 +254,7 @@ static int mt7925_dma_init(struct mt792x_dev *dev) if (ret < 0) return ret; - netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, + netif_napi_add_tx(dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, mt792x_poll_tx); napi_enable(&dev->mt76.tx_napi); @@ -373,6 +373,9 @@ static int mt7925_pci_probe(struct pci_dev *pdev, bus_ops->rmw = mt7925_rmw; dev->mt76.bus = bus_ops; + if (!mt7925_disable_aspm && mt76_pci_aspm_supported(pdev)) + dev->aspm_supported = true; + ret = __mt792x_mcu_fw_pmctrl(dev); if (ret) goto err_free_dev; diff --git a/drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c index 9fca887977d2..faedbf766d1a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7925/pci_mac.c @@ -34,9 +34,9 @@ int mt7925e_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, if (sta) { struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; - if (time_after(jiffies, msta->last_txs + HZ / 4)) { + if (time_after(jiffies, msta->deflink.last_txs + HZ / 4)) { info->flags |= IEEE80211_TX_CTL_REQ_TX_STATUS; - msta->last_txs = jiffies; + msta->deflink.last_txs = jiffies; } } @@ -60,7 +60,7 @@ void mt7925_tx_token_put(struct mt792x_dev *dev) spin_lock_bh(&dev->mt76.token_lock); idr_for_each_entry(&dev->mt76.token, txwi, id) { - mt7925_txwi_free(dev, txwi, NULL, false, NULL); + mt7925_txwi_free(dev, txwi, NULL, NULL, NULL); dev->mt76.token_count--; } spin_unlock_bh(&dev->mt76.token_lock); diff --git a/drivers/net/wireless/mediatek/mt76/mt792x.h b/drivers/net/wireless/mediatek/mt76/mt792x.h index 20578497a405..7fa74d59cc48 100644 --- a/drivers/net/wireless/mediatek/mt76/mt792x.h +++ b/drivers/net/wireless/mediatek/mt76/mt792x.h @@ -27,6 +27,7 @@ #define MT792x_CHIP_CAP_CLC_EVT_EN BIT(0) #define MT792x_CHIP_CAP_RSSI_NOTIFY_EVT_EN BIT(1) +#define MT792x_CHIP_CAP_MLO_EVT_EN BIT(2) /* NOTE: used to map mt76_rates. idx may change if firmware expands table */ #define MT792x_BASIC_RATES_TBL 11 @@ -81,11 +82,9 @@ enum mt792x_reg_power_type { DECLARE_EWMA(avg_signal, 10, 8) -struct mt792x_sta { +struct mt792x_link_sta { struct mt76_wcid wcid; /* must be first */ - struct mt792x_vif *vif; - u32 airtime_ac[8]; int ack_signal; @@ -94,21 +93,46 @@ struct mt792x_sta { unsigned long last_txs; struct mt76_connac_sta_key_conf bip; + + struct mt792x_sta *sta; + + struct ieee80211_link_sta *pri_link; +}; + +struct mt792x_sta { + struct mt792x_link_sta deflink; /* must be first */ + struct mt792x_link_sta __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS]; + + struct mt792x_vif *vif; + + u16 valid_links; + u8 deflink_id; }; DECLARE_EWMA(rssi, 10, 8); -struct mt792x_vif { +struct mt792x_chanctx { + struct mt792x_bss_conf *bss_conf; +}; + +struct mt792x_bss_conf { struct mt76_vif mt76; /* must be first */ + struct mt792x_vif *vif; + struct ewma_rssi rssi; + struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS]; + unsigned int link_id; +}; + +struct mt792x_vif { + struct mt792x_bss_conf bss_conf; /* must be first */ + struct mt792x_bss_conf __rcu *link_conf[IEEE80211_MLD_MAX_NUM_LINKS]; struct mt792x_sta sta; struct mt792x_sta *wep_sta; struct mt792x_phy *phy; - - struct ewma_rssi rssi; - - struct ieee80211_tx_queue_params queue_params[IEEE80211_NUM_ACS]; + u16 valid_links; + u8 deflink_id; }; struct mt792x_phy { @@ -140,6 +164,7 @@ struct mt792x_phy { #endif void *clc[MT792x_CLC_MAX_NUM]; u64 chip_cap; + u16 eml_cap; struct work_struct roc_work; struct timer_list roc_timer; @@ -190,6 +215,7 @@ struct mt792x_dev { bool fw_assert:1; bool has_eht:1; bool regd_in_progress:1; + bool aspm_supported:1; wait_queue_head_t wait; struct work_struct init_work; @@ -211,6 +237,66 @@ struct mt792x_dev { u32 backup_l2; }; +static inline struct mt792x_bss_conf * +mt792x_vif_to_link(struct mt792x_vif *mvif, u8 link_id) +{ + struct ieee80211_vif *vif; + + vif = container_of((void *)mvif, struct ieee80211_vif, drv_priv); + + if (!ieee80211_vif_is_mld(vif) || + link_id >= IEEE80211_LINK_UNSPECIFIED) + return &mvif->bss_conf; + + return rcu_dereference_protected(mvif->link_conf[link_id], + lockdep_is_held(&mvif->phy->dev->mt76.mutex)); +} + +static inline struct mt792x_link_sta * +mt792x_sta_to_link(struct mt792x_sta *msta, u8 link_id) +{ + struct ieee80211_vif *vif; + + vif = container_of((void *)msta->vif, struct ieee80211_vif, drv_priv); + + if (!ieee80211_vif_is_mld(vif) || + link_id >= IEEE80211_LINK_UNSPECIFIED) + return &msta->deflink; + + return rcu_dereference_protected(msta->link[link_id], + lockdep_is_held(&msta->vif->phy->dev->mt76.mutex)); +} + +static inline struct mt792x_bss_conf * +mt792x_link_conf_to_mconf(struct ieee80211_bss_conf *link_conf) +{ + struct ieee80211_vif *vif = link_conf->vif; + struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; + + return mt792x_vif_to_link(mvif, link_conf->link_id); +} + +static inline struct ieee80211_bss_conf * +mt792x_vif_to_bss_conf(struct ieee80211_vif *vif, unsigned int link_id) +{ + if (!ieee80211_vif_is_mld(vif) || + link_id >= IEEE80211_LINK_UNSPECIFIED) + return &vif->bss_conf; + + return link_conf_dereference_protected(vif, link_id); +} + +static inline struct ieee80211_link_sta * +mt792x_sta_to_link_sta(struct ieee80211_vif *vif, struct ieee80211_sta *sta, + unsigned int link_id) +{ + if (!ieee80211_vif_is_mld(vif) || + link_id >= IEEE80211_LINK_UNSPECIFIED) + return &sta->deflink; + + return link_sta_dereference_protected(sta, link_id); +} + static inline struct mt792x_dev * mt792x_hw_dev(struct ieee80211_hw *hw) { @@ -251,7 +337,7 @@ static inline bool mt792x_dma_need_reinit(struct mt792x_dev *dev) #define mt792x_mutex_release(dev) \ mt76_connac_mutex_release(&(dev)->mt76, &(dev)->pm) -void mt792x_stop(struct ieee80211_hw *hw); +void mt792x_stop(struct ieee80211_hw *hw, bool suspend); void mt792x_pm_wake_work(struct work_struct *work); void mt792x_pm_power_save_work(struct work_struct *work); void mt792x_reset(struct mt76_dev *mdev); @@ -325,6 +411,9 @@ mt792x_get_mac80211_ops(struct device *dev, int mt792x_init_wcid(struct mt792x_dev *dev); int mt792x_mcu_drv_pmctrl(struct mt792x_dev *dev); int mt792x_mcu_fw_pmctrl(struct mt792x_dev *dev); +void mt792x_mac_link_bss_remove(struct mt792x_dev *dev, + struct mt792x_bss_conf *mconf, + struct mt792x_link_sta *mlink); static inline char *mt792x_ram_name(struct mt792x_dev *dev) { @@ -368,7 +457,7 @@ void mt792xu_wr(struct mt76_dev *dev, u32 addr, u32 val); u32 mt792xu_rmw(struct mt76_dev *dev, u32 addr, u32 mask, u32 val); void mt792xu_copy(struct mt76_dev *dev, u32 offset, const void *data, int len); void mt792xu_disconnect(struct usb_interface *usb_intf); -void mt792xu_stop(struct ieee80211_hw *hw); +void mt792xu_stop(struct ieee80211_hw *hw, bool suspend); static inline void mt792x_skb_add_usb_sdio_hdr(struct mt792x_dev *dev, struct sk_buff *skb, diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_core.c b/drivers/net/wireless/mediatek/mt76/mt792x_core.c index a405af8d9052..78fe37c2e07b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt792x_core.c +++ b/drivers/net/wireless/mediatek/mt76/mt792x_core.c @@ -59,20 +59,42 @@ void mt792x_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_vif *vif = info->control.vif; struct mt76_wcid *wcid = &dev->mt76.global_wcid; + u8 link_id; int qid; if (control->sta) { + struct mt792x_link_sta *mlink; struct mt792x_sta *sta; - + link_id = u32_get_bits(info->control.flags, + IEEE80211_TX_CTRL_MLO_LINK); sta = (struct mt792x_sta *)control->sta->drv_priv; - wcid = &sta->wcid; + mlink = mt792x_sta_to_link(sta, link_id); + wcid = &mlink->wcid; } if (vif && !control->sta) { struct mt792x_vif *mvif; mvif = (struct mt792x_vif *)vif->drv_priv; - wcid = &mvif->sta.wcid; + wcid = &mvif->sta.deflink.wcid; + } + + if (vif && control->sta && ieee80211_vif_is_mld(vif)) { + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + struct ieee80211_link_sta *link_sta; + struct ieee80211_bss_conf *conf; + + link_id = wcid->link_id; + rcu_read_lock(); + conf = rcu_dereference(vif->link_conf[link_id]); + memcpy(hdr->addr2, conf->addr, ETH_ALEN); + + link_sta = rcu_dereference(control->sta->link[link_id]); + memcpy(hdr->addr1, link_sta->addr, ETH_ALEN); + + if (vif->type == NL80211_IFTYPE_STATION) + memcpy(hdr->addr3, conf->bssid, ETH_ALEN); + rcu_read_unlock(); } if (mt76_connac_pm_ref(mphy, &dev->pm)) { @@ -91,7 +113,7 @@ void mt792x_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, } EXPORT_SYMBOL_GPL(mt792x_tx); -void mt792x_stop(struct ieee80211_hw *hw) +void mt792x_stop(struct ieee80211_hw *hw, bool suspend) { struct mt792x_dev *dev = mt792x_hw_dev(hw); struct mt792x_phy *phy = mt792x_hw_phy(hw); @@ -113,31 +135,47 @@ void mt792x_stop(struct ieee80211_hw *hw) } EXPORT_SYMBOL_GPL(mt792x_stop); +void mt792x_mac_link_bss_remove(struct mt792x_dev *dev, + struct mt792x_bss_conf *mconf, + struct mt792x_link_sta *mlink) +{ + struct ieee80211_vif *vif = container_of((void *)mconf->vif, + struct ieee80211_vif, drv_priv); + struct ieee80211_bss_conf *link_conf; + int idx = mlink->wcid.idx; + + link_conf = mt792x_vif_to_bss_conf(vif, mconf->link_id); + + mt76_connac_free_pending_tx_skbs(&dev->pm, &mlink->wcid); + mt76_connac_mcu_uni_add_dev(&dev->mphy, link_conf, &mlink->wcid, false); + + rcu_assign_pointer(dev->mt76.wcid[idx], NULL); + + dev->mt76.vif_mask &= ~BIT_ULL(mconf->mt76.idx); + mconf->vif->phy->omac_mask &= ~BIT_ULL(mconf->mt76.omac_idx); + + spin_lock_bh(&dev->mt76.sta_poll_lock); + if (!list_empty(&mlink->wcid.poll_list)) + list_del_init(&mlink->wcid.poll_list); + spin_unlock_bh(&dev->mt76.sta_poll_lock); + + mt76_wcid_cleanup(&dev->mt76, &mlink->wcid); +} +EXPORT_SYMBOL_GPL(mt792x_mac_link_bss_remove); + void mt792x_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; - struct mt792x_sta *msta = &mvif->sta; struct mt792x_dev *dev = mt792x_hw_dev(hw); - struct mt792x_phy *phy = mt792x_hw_phy(hw); - int idx = msta->wcid.idx; + struct mt792x_bss_conf *mconf; mt792x_mutex_acquire(dev); - mt76_connac_free_pending_tx_skbs(&dev->pm, &msta->wcid); - mt76_connac_mcu_uni_add_dev(&dev->mphy, vif, &mvif->sta.wcid, false); - rcu_assign_pointer(dev->mt76.wcid[idx], NULL); + mconf = mt792x_link_conf_to_mconf(&vif->bss_conf); + mt792x_mac_link_bss_remove(dev, mconf, &mvif->sta.deflink); - dev->mt76.vif_mask &= ~BIT_ULL(mvif->mt76.idx); - phy->omac_mask &= ~BIT_ULL(mvif->mt76.omac_idx); mt792x_mutex_release(dev); - - spin_lock_bh(&dev->mt76.sta_poll_lock); - if (!list_empty(&msta->wcid.poll_list)) - list_del_init(&msta->wcid.poll_list); - spin_unlock_bh(&dev->mt76.sta_poll_lock); - - mt76_wcid_cleanup(&dev->mt76, &msta->wcid); } EXPORT_SYMBOL_GPL(mt792x_remove_interface); @@ -149,7 +187,7 @@ int mt792x_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, /* no need to update right away, we'll get BSS_CHANGED_QOS */ queue = mt76_connac_lmac_mapping(queue); - mvif->queue_params[queue] = *params; + mvif->bss_conf.queue_params[queue] = *params; return 0; } @@ -178,7 +216,7 @@ u64 mt792x_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct mt792x_dev *dev = mt792x_hw_dev(hw); - u8 omac_idx = mvif->mt76.omac_idx; + u8 omac_idx = mvif->bss_conf.mt76.omac_idx; union { u64 t64; u32 t32[2]; @@ -204,7 +242,7 @@ void mt792x_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, { struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct mt792x_dev *dev = mt792x_hw_dev(hw); - u8 omac_idx = mvif->mt76.omac_idx; + u8 omac_idx = mvif->bss_conf.mt76.omac_idx; union { u64 t64; u32 t32[2]; @@ -261,11 +299,13 @@ int mt792x_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_bss_conf *link_conf, struct ieee80211_chanctx_conf *ctx) { + struct mt792x_chanctx *mctx = (struct mt792x_chanctx *)ctx->drv_priv; struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct mt792x_dev *dev = mt792x_hw_dev(hw); mutex_lock(&dev->mt76.mutex); - mvif->mt76.ctx = ctx; + mvif->bss_conf.mt76.ctx = ctx; + mctx->bss_conf = &mvif->bss_conf; mutex_unlock(&dev->mt76.mutex); return 0; @@ -277,11 +317,13 @@ void mt792x_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_bss_conf *link_conf, struct ieee80211_chanctx_conf *ctx) { + struct mt792x_chanctx *mctx = (struct mt792x_chanctx *)ctx->drv_priv; struct mt792x_vif *mvif = (struct mt792x_vif *)vif->drv_priv; struct mt792x_dev *dev = mt792x_hw_dev(hw); mutex_lock(&dev->mt76.mutex); - mvif->mt76.ctx = NULL; + mctx->bss_conf = NULL; + mvif->bss_conf.mt76.ctx = NULL; mutex_unlock(&dev->mt76.mutex); } EXPORT_SYMBOL_GPL(mt792x_unassign_vif_chanctx); @@ -405,10 +447,10 @@ mt792x_ethtool_worker(void *wi_data, struct ieee80211_sta *sta) struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; struct mt76_ethtool_worker_info *wi = wi_data; - if (msta->vif->mt76.idx != wi->idx) + if (msta->vif->bss_conf.mt76.idx != wi->idx) return; - mt76_ethtool_worker(wi, &msta->wcid.stats, true); + mt76_ethtool_worker(wi, &msta->deflink.wcid.stats, true); } void mt792x_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif, @@ -421,7 +463,7 @@ void mt792x_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct mt76_mib_stats *mib = &phy->mib; struct mt76_ethtool_worker_info wi = { .data = data, - .idx = mvif->mt76.idx, + .idx = mvif->bss_conf.mt76.idx, }; int i, ei = 0; @@ -487,7 +529,7 @@ void mt792x_sta_statistics(struct ieee80211_hw *hw, struct station_info *sinfo) { struct mt792x_sta *msta = (struct mt792x_sta *)sta->drv_priv; - struct rate_info *txrate = &msta->wcid.rate; + struct rate_info *txrate = &msta->deflink.wcid.rate; if (!txrate->legacy && !txrate->flags) return; @@ -502,19 +544,19 @@ void mt792x_sta_statistics(struct ieee80211_hw *hw, sinfo->txrate.he_dcm = txrate->he_dcm; sinfo->txrate.he_ru_alloc = txrate->he_ru_alloc; } - sinfo->tx_failed = msta->wcid.stats.tx_failed; + sinfo->tx_failed = msta->deflink.wcid.stats.tx_failed; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED); - sinfo->tx_retries = msta->wcid.stats.tx_retries; + sinfo->tx_retries = msta->deflink.wcid.stats.tx_retries; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES); sinfo->txrate.flags = txrate->flags; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); - sinfo->ack_signal = (s8)msta->ack_signal; + sinfo->ack_signal = (s8)msta->deflink.ack_signal; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL); - sinfo->avg_ack_signal = -(s8)ewma_avg_signal_read(&msta->avg_ack_signal); + sinfo->avg_ack_signal = -(s8)ewma_avg_signal_read(&msta->deflink.avg_ack_signal); sinfo->filled |= BIT_ULL(NL80211_STA_INFO_ACK_SIGNAL_AVG); } EXPORT_SYMBOL_GPL(mt792x_sta_statistics); @@ -556,6 +598,7 @@ int mt792x_init_wiphy(struct ieee80211_hw *hw) hw->sta_data_size = sizeof(struct mt792x_sta); hw->vif_data_size = sizeof(struct mt792x_vif); + hw->chanctx_data_size = sizeof(struct mt792x_chanctx); if (dev->fw_features & MT792x_FW_CAP_CNM) { wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; @@ -766,6 +809,10 @@ int __mt792xe_mcu_drv_pmctrl(struct mt792x_dev *dev) for (i = 0; i < MT792x_DRV_OWN_RETRY_COUNT; i++) { mt76_wr(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_CLR_OWN); + + if (dev->aspm_supported) + usleep_range(2000, 3000); + if (mt76_poll_msec_tick(dev, MT_CONN_ON_LPCTL, PCIE_LPCR_HOST_OWN_SYNC, 0, 50, 1)) break; diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_dma.c b/drivers/net/wireless/mediatek/mt76/mt792x_dma.c index 5cc2d59b774a..6f9db782338e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt792x_dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt792x_dma.c @@ -340,7 +340,7 @@ int mt792x_poll_rx(struct napi_struct *napi, int budget) struct mt792x_dev *dev; int done; - dev = container_of(napi->dev, struct mt792x_dev, mt76.napi_dev); + dev = mt76_priv(napi->dev); if (!mt76_connac_pm_ref(&dev->mphy, &dev->pm)) { napi_complete(napi); diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_mac.c b/drivers/net/wireless/mediatek/mt76/mt792x_mac.c index eb29434abee1..106273935b26 100644 --- a/drivers/net/wireless/mediatek/mt76/mt792x_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt792x_mac.c @@ -138,6 +138,7 @@ EXPORT_SYMBOL_GPL(mt792x_mac_update_mib_stats); struct mt76_wcid *mt792x_rx_get_wcid(struct mt792x_dev *dev, u16 idx, bool unicast) { + struct mt792x_link_sta *link; struct mt792x_sta *sta; struct mt76_wcid *wcid; @@ -151,11 +152,12 @@ struct mt76_wcid *mt792x_rx_get_wcid(struct mt792x_dev *dev, u16 idx, if (!wcid->sta) return NULL; - sta = container_of(wcid, struct mt792x_sta, wcid); + link = container_of(wcid, struct mt792x_link_sta, wcid); + sta = container_of(link, struct mt792x_sta, deflink); if (!sta->vif) return NULL; - return &sta->vif->sta.wcid; + return &sta->vif->sta.deflink.wcid; } EXPORT_SYMBOL_GPL(mt792x_rx_get_wcid); @@ -173,7 +175,7 @@ mt792x_mac_rssi_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) if (!ether_addr_equal(vif->addr, hdr->addr1)) return; - ewma_rssi_add(&mvif->rssi, -status->signal); + ewma_rssi_add(&mvif->bss_conf.rssi, -status->signal); } void mt792x_mac_assoc_rssi(struct mt792x_dev *dev, struct sk_buff *skb) diff --git a/drivers/net/wireless/mediatek/mt76/mt792x_usb.c b/drivers/net/wireless/mediatek/mt76/mt792x_usb.c index b49668a4b784..76272a03b22e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt792x_usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt792x_usb.c @@ -285,12 +285,12 @@ int mt792xu_init_reset(struct mt792x_dev *dev) } EXPORT_SYMBOL_GPL(mt792xu_init_reset); -void mt792xu_stop(struct ieee80211_hw *hw) +void mt792xu_stop(struct ieee80211_hw *hw, bool suspend) { struct mt792x_dev *dev = mt792x_hw_dev(hw); mt76u_stop_tx(&dev->mt76); - mt792x_stop(hw); + mt792x_stop(hw, false); } EXPORT_SYMBOL_GPL(mt792xu_stop); diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/dma.c b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c index 73e633d0d700..69a7d9b2e38b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/dma.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/dma.c @@ -641,7 +641,7 @@ int mt7996_dma_init(struct mt7996_dev *dev) if (ret < 0) return ret; - netif_napi_add_tx(&dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, + netif_napi_add_tx(dev->mt76.tx_napi_dev, &dev->mt76.tx_napi, mt7996_poll_tx); napi_enable(&dev->mt76.tx_napi); diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/main.c b/drivers/net/wireless/mediatek/mt76/mt7996/main.c index 7c97140d8255..bce082038219 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/main.c @@ -93,7 +93,7 @@ static int mt7996_start(struct ieee80211_hw *hw) return ret; } -static void mt7996_stop(struct ieee80211_hw *hw) +static void mt7996_stop(struct ieee80211_hw *hw, bool suspend) { struct mt7996_dev *dev = mt7996_hw_dev(hw); struct mt7996_phy *phy = mt7996_hw_phy(hw); diff --git a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c index 2c8578677800..2e4fa9f48dfb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7996/mcu.c @@ -2002,7 +2002,7 @@ mt7996_mcu_sta_rate_ctrl_tlv(struct sk_buff *skb, struct mt7996_dev *dev, ra->valid = true; ra->auto_rate = true; - ra->phy_mode = mt76_connac_get_phy_mode(mphy, vif, band, sta); + ra->phy_mode = mt76_connac_get_phy_mode(mphy, vif, band, &sta->deflink); ra->channel = chandef->chan->hw_value; ra->bw = (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_320) ? CMD_CBW_320MHZ : sta->deflink.bandwidth; @@ -2157,11 +2157,13 @@ int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif, struct ieee80211_sta *sta, bool enable, bool newly) { struct mt7996_vif *mvif = (struct mt7996_vif *)vif->drv_priv; + struct ieee80211_link_sta *link_sta; struct mt7996_sta *msta; struct sk_buff *skb; int ret; msta = sta ? (struct mt7996_sta *)sta->drv_priv : &mvif->sta; + link_sta = sta ? &sta->deflink : NULL; skb = __mt76_connac_mcu_alloc_sta_req(&dev->mt76, &mvif->mt76, &msta->wcid, @@ -2170,7 +2172,8 @@ int mt7996_mcu_add_sta(struct mt7996_dev *dev, struct ieee80211_vif *vif, return PTR_ERR(skb); /* starec basic */ - mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, vif, sta, enable, newly); + mt76_connac_mcu_sta_basic_tlv(&dev->mt76, skb, vif, link_sta, + enable, newly); if (!enable) goto out; diff --git a/drivers/net/wireless/mediatek/mt76/pci.c b/drivers/net/wireless/mediatek/mt76/pci.c index 4c1c159fbb62..b5031ca7f73f 100644 --- a/drivers/net/wireless/mediatek/mt76/pci.c +++ b/drivers/net/wireless/mediatek/mt76/pci.c @@ -45,3 +45,26 @@ void mt76_pci_disable_aspm(struct pci_dev *pdev) aspm_conf); } EXPORT_SYMBOL_GPL(mt76_pci_disable_aspm); + +bool mt76_pci_aspm_supported(struct pci_dev *pdev) +{ + struct pci_dev *parent = pdev->bus->self; + u16 aspm_conf, parent_aspm_conf = 0; + bool result = true; + + pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &aspm_conf); + aspm_conf &= PCI_EXP_LNKCTL_ASPMC; + if (parent) { + pcie_capability_read_word(parent, PCI_EXP_LNKCTL, + &parent_aspm_conf); + parent_aspm_conf &= PCI_EXP_LNKCTL_ASPMC; + } + + if (!aspm_conf && (!parent || !parent_aspm_conf)) { + /* aspm already disabled */ + result = false; + } + + return result; +} +EXPORT_SYMBOL_GPL(mt76_pci_aspm_supported); diff --git a/drivers/net/wireless/mediatek/mt7601u/main.c b/drivers/net/wireless/mediatek/mt7601u/main.c index a7330576486b..7570c6ceecea 100644 --- a/drivers/net/wireless/mediatek/mt7601u/main.c +++ b/drivers/net/wireless/mediatek/mt7601u/main.c @@ -28,7 +28,7 @@ out: return ret; } -static void mt7601u_stop(struct ieee80211_hw *hw) +static void mt7601u_stop(struct ieee80211_hw *hw, bool suspend) { struct mt7601u_dev *dev = hw->priv; diff --git a/drivers/net/wireless/microchip/wilc1000/cfg80211.c b/drivers/net/wireless/microchip/wilc1000/cfg80211.c index 089102ed9ae5..eb37b228d54e 100644 --- a/drivers/net/wireless/microchip/wilc1000/cfg80211.c +++ b/drivers/net/wireless/microchip/wilc1000/cfg80211.c @@ -1617,23 +1617,6 @@ static int del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev) return 0; } -static int wilc_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow) -{ - struct wilc *wl = wiphy_priv(wiphy); - - if (!wow && wilc_wlan_get_num_conn_ifcs(wl)) - wl->suspend_event = true; - else - wl->suspend_event = false; - - return 0; -} - -static int wilc_resume(struct wiphy *wiphy) -{ - return 0; -} - static void wilc_set_wakeup(struct wiphy *wiphy, bool enabled) { struct wilc *wl = wiphy_priv(wiphy); @@ -1739,8 +1722,6 @@ static const struct cfg80211_ops wilc_cfg80211_ops = { .set_power_mgmt = set_power_mgmt, .set_cqm_rssi_config = set_cqm_rssi_config, - .suspend = wilc_suspend, - .resume = wilc_resume, .set_wakeup = wilc_set_wakeup, .set_tx_power = set_tx_power, .get_tx_power = get_tx_power, @@ -1780,7 +1761,6 @@ int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type, const struct wilc_hif_func *ops) { struct wilc *wl; - struct wilc_vif *vif; int ret, i; wl = wilc_create_wiphy(dev); @@ -1809,18 +1789,9 @@ int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type, ret = -ENOMEM; goto free_cfg; } - vif = wilc_netdev_ifc_init(wl, "wlan%d", WILC_STATION_MODE, - NL80211_IFTYPE_STATION, false); - if (IS_ERR(vif)) { - ret = PTR_ERR(vif); - goto free_hq; - } return 0; -free_hq: - destroy_workqueue(wl->hif_workqueue); - free_cfg: wilc_wlan_cfg_deinit(wl); diff --git a/drivers/net/wireless/microchip/wilc1000/fw.h b/drivers/net/wireless/microchip/wilc1000/fw.h index 5c5cac4aab02..7a930e89614c 100644 --- a/drivers/net/wireless/microchip/wilc1000/fw.h +++ b/drivers/net/wireless/microchip/wilc1000/fw.h @@ -13,6 +13,12 @@ #define WILC_MAX_RATES_SUPPORTED 12 #define WILC_MAX_NUM_PMKIDS 16 #define WILC_MAX_NUM_SCANNED_CH 14 +#define WILC_NVMEM_MAX_NUM_BANK 6 +#define WILC_NVMEM_BANK_BASE 0x30000000 +#define WILC_NVMEM_LOW_BANK_OFFSET 0x102c +#define WILC_NVMEM_HIGH_BANK_OFFSET 0x1380 +#define WILC_NVMEM_IS_BANK_USED BIT(31) +#define WILC_NVMEM_IS_BANK_INVALID BIT(30) struct wilc_assoc_resp { __le16 capab_info; @@ -127,4 +133,11 @@ struct wilc_external_auth_param { __le32 key_mgmt_suites; __le16 status; } __packed; + +static inline u32 get_bank_offset_from_bank_index(unsigned int i) +{ + return (((i) < 2) ? WILC_NVMEM_LOW_BANK_OFFSET + ((i) * 32) : + WILC_NVMEM_HIGH_BANK_OFFSET + ((i) - 2) * 16); +} + #endif diff --git a/drivers/net/wireless/microchip/wilc1000/hif.c b/drivers/net/wireless/microchip/wilc1000/hif.c index 7719e4f3e2a2..3c48e1a57b24 100644 --- a/drivers/net/wireless/microchip/wilc1000/hif.c +++ b/drivers/net/wireless/microchip/wilc1000/hif.c @@ -1294,7 +1294,7 @@ int wilc_get_mac_address(struct wilc_vif *vif, u8 *mac_addr) return result; } -int wilc_set_mac_address(struct wilc_vif *vif, u8 *mac_addr) +int wilc_set_mac_address(struct wilc_vif *vif, const u8 *mac_addr) { struct wid wid; int result; @@ -1302,7 +1302,7 @@ int wilc_set_mac_address(struct wilc_vif *vif, u8 *mac_addr) wid.id = WID_MAC_ADDR; wid.type = WID_STR; wid.size = ETH_ALEN; - wid.val = mac_addr; + wid.val = (u8 *)mac_addr; result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1); if (result) diff --git a/drivers/net/wireless/microchip/wilc1000/hif.h b/drivers/net/wireless/microchip/wilc1000/hif.h index 0d380586b1d9..96eeaf31d237 100644 --- a/drivers/net/wireless/microchip/wilc1000/hif.h +++ b/drivers/net/wireless/microchip/wilc1000/hif.h @@ -167,7 +167,7 @@ int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len, u8 cipher_mode); int wilc_set_pmkid_info(struct wilc_vif *vif, struct wilc_pmkid_attr *pmkid); int wilc_get_mac_address(struct wilc_vif *vif, u8 *mac_addr); -int wilc_set_mac_address(struct wilc_vif *vif, u8 *mac_addr); +int wilc_set_mac_address(struct wilc_vif *vif, const u8 *mac_addr); int wilc_set_join_req(struct wilc_vif *vif, u8 *bssid, const u8 *ies, size_t ies_len); int wilc_disconnect(struct wilc_vif *vif); diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.c b/drivers/net/wireless/microchip/wilc1000/netdev.c index 710e29bea560..9ecf3fb29b55 100644 --- a/drivers/net/wireless/microchip/wilc1000/netdev.c +++ b/drivers/net/wireless/microchip/wilc1000/netdev.c @@ -590,7 +590,6 @@ static int wilc_mac_open(struct net_device *ndev) struct wilc *wl = vif->wilc; int ret = 0; struct mgmt_frame_regs mgmt_regs = {}; - u8 addr[ETH_ALEN] __aligned(2); if (!wl || !wl->dev) { netdev_err(ndev, "device not ready\n"); @@ -609,25 +608,19 @@ static int wilc_mac_open(struct net_device *ndev) return ret; } + netdev_dbg(ndev, "Mac address: %pM\n", ndev->dev_addr); + ret = wilc_set_mac_address(vif, ndev->dev_addr); + if (ret) { + netdev_err(ndev, "Failed to enforce MAC address in chip"); + wilc_deinit_host_int(ndev); + if (!wl->open_ifcs) + wilc_wlan_deinitialize(ndev); + return ret; + } + wilc_set_operation_mode(vif, wilc_get_vif_idx(vif), vif->iftype, vif->idx); - if (is_valid_ether_addr(ndev->dev_addr)) { - ether_addr_copy(addr, ndev->dev_addr); - wilc_set_mac_address(vif, addr); - } else { - wilc_get_mac_address(vif, addr); - eth_hw_addr_set(ndev, addr); - } - netdev_dbg(ndev, "Mac address: %pM\n", ndev->dev_addr); - - if (!is_valid_ether_addr(ndev->dev_addr)) { - netdev_err(ndev, "Wrong MAC address\n"); - wilc_deinit_host_int(ndev); - wilc_wlan_deinitialize(ndev); - return -EINVAL; - } - mgmt_regs.interface_stypes = vif->mgmt_reg_stypes; /* so we detect a change */ vif->mgmt_reg_stypes = 0; @@ -681,7 +674,7 @@ static int wilc_set_mac_addr(struct net_device *dev, void *p) } srcu_read_unlock(&wilc->srcu, srcu_idx); - result = wilc_set_mac_address(vif, (u8 *)addr->sa_data); + result = wilc_set_mac_address(vif, addr->sa_data); if (result) return result; @@ -948,6 +941,7 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name, int vif_type, enum nl80211_iftype type, bool rtnl_locked) { + u8 mac_address[ETH_ALEN]; struct net_device *ndev; struct wilc_vif *vif; int ret; @@ -972,6 +966,28 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name, vif->priv.wdev.iftype = type; vif->priv.dev = ndev; + ndev->needs_free_netdev = true; + vif->iftype = vif_type; + vif->idx = wilc_get_available_idx(wl); + vif->mac_opened = 0; + + memcpy(mac_address, wl->nv_mac_address, ETH_ALEN); + /* WILC firmware uses locally administered MAC address for the + * second virtual interface (bit 1 of first byte set), but + * since it is possibly not loaded/running yet, reproduce this behavior + * in the driver during interface creation. + */ + if (vif->idx) + mac_address[0] |= 0x2; + + eth_hw_addr_set(vif->ndev, mac_address); + + mutex_lock(&wl->vif_mutex); + list_add_tail_rcu(&vif->list, &wl->vif_list); + wl->vif_num += 1; + mutex_unlock(&wl->vif_mutex); + synchronize_srcu(&wl->srcu); + if (rtnl_locked) ret = cfg80211_register_netdevice(ndev); else @@ -979,29 +995,21 @@ struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name, if (ret) { ret = -EFAULT; - goto error; + goto error_remove_vif; } - ndev->needs_free_netdev = true; - vif->iftype = vif_type; - vif->idx = wilc_get_available_idx(wl); - vif->mac_opened = 0; - mutex_lock(&wl->vif_mutex); - list_add_tail_rcu(&vif->list, &wl->vif_list); - wl->vif_num += 1; - mutex_unlock(&wl->vif_mutex); - synchronize_srcu(&wl->srcu); - return vif; -error: - if (rtnl_locked) - cfg80211_unregister_netdevice(ndev); - else - unregister_netdev(ndev); +error_remove_vif: + mutex_lock(&wl->vif_mutex); + list_del_rcu(&vif->list); + wl->vif_num -= 1; + mutex_unlock(&wl->vif_mutex); + synchronize_srcu(&wl->srcu); free_netdev(ndev); return ERR_PTR(ret); } +EXPORT_SYMBOL_GPL(wilc_netdev_ifc_init); MODULE_DESCRIPTION("Atmel WILC1000 core wireless driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/microchip/wilc1000/netdev.h b/drivers/net/wireless/microchip/wilc1000/netdev.h index fde8610a9c84..95bc8b8fe65a 100644 --- a/drivers/net/wireless/microchip/wilc1000/netdev.h +++ b/drivers/net/wireless/microchip/wilc1000/netdev.h @@ -14,6 +14,7 @@ #include #include #include +#include #include "hif.h" #include "wlan.h" @@ -271,7 +272,6 @@ struct wilc { const struct firmware *firmware; struct device *dev; - bool suspend_event; struct workqueue_struct *hif_workqueue; struct wilc_cfg cfg; @@ -286,6 +286,7 @@ struct wilc { struct ieee80211_rate bitrates[ARRAY_SIZE(wilc_bitrates)]; struct ieee80211_supported_band band; u32 cipher_suites[ARRAY_SIZE(wilc_cipher_suites)]; + u8 nv_mac_address[ETH_ALEN]; }; struct wilc_wfi_mon_priv { diff --git a/drivers/net/wireless/microchip/wilc1000/sdio.c b/drivers/net/wireless/microchip/wilc1000/sdio.c index 52a770c5e76f..0043f7a0fdf9 100644 --- a/drivers/net/wireless/microchip/wilc1000/sdio.c +++ b/drivers/net/wireless/microchip/wilc1000/sdio.c @@ -24,6 +24,9 @@ MODULE_DEVICE_TABLE(sdio, wilc_sdio_ids); #define WILC_SDIO_BLOCK_SIZE 512 +static int wilc_sdio_init(struct wilc *wilc, bool resume); +static int wilc_sdio_deinit(struct wilc *wilc); + struct wilc_sdio { bool irq_gpio; u32 block_size; @@ -136,9 +139,11 @@ out: static int wilc_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) { + struct wilc_sdio *sdio_priv; + struct wilc_vif *vif; struct wilc *wilc; int ret; - struct wilc_sdio *sdio_priv; + sdio_priv = kzalloc(sizeof(*sdio_priv), GFP_KERNEL); if (!sdio_priv) @@ -176,9 +181,28 @@ static int wilc_sdio_probe(struct sdio_func *func, } clk_prepare_enable(wilc->rtc_clk); + wilc_sdio_init(wilc, false); + + ret = wilc_load_mac_from_nv(wilc); + if (ret) { + pr_err("Can not retrieve MAC address from chip\n"); + goto clk_disable_unprepare; + } + + wilc_sdio_deinit(wilc); + + vif = wilc_netdev_ifc_init(wilc, "wlan%d", WILC_STATION_MODE, + NL80211_IFTYPE_STATION, false); + if (IS_ERR(vif)) { + ret = PTR_ERR(vif); + goto clk_disable_unprepare; + } + dev_info(&func->dev, "Driver Initializing success\n"); return 0; +clk_disable_unprepare: + clk_disable_unprepare(wilc->rtc_clk); dispose_irq: irq_dispose_mapping(wilc->dev_irq_num); wilc_netdev_cleanup(wilc); @@ -225,33 +249,6 @@ static bool wilc_sdio_is_init(struct wilc *wilc) return sdio_priv->isinit; } -static int wilc_sdio_suspend(struct device *dev) -{ - struct sdio_func *func = dev_to_sdio_func(dev); - struct wilc *wilc = sdio_get_drvdata(func); - int ret; - - dev_info(dev, "sdio suspend\n"); - chip_wakeup(wilc); - - if (!IS_ERR(wilc->rtc_clk)) - clk_disable_unprepare(wilc->rtc_clk); - - if (wilc->suspend_event) { - host_sleep_notify(wilc); - chip_allow_sleep(wilc); - } - - ret = wilc_sdio_reset(wilc); - if (ret) { - dev_err(&func->dev, "Fail reset sdio\n"); - return ret; - } - sdio_claim_host(func); - - return 0; -} - static int wilc_sdio_enable_interrupt(struct wilc *dev) { struct sdio_func *func = container_of(dev->dev, struct sdio_func, dev); @@ -617,7 +614,52 @@ static int wilc_sdio_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size) static int wilc_sdio_deinit(struct wilc *wilc) { + struct sdio_func *func = dev_to_sdio_func(wilc->dev); struct wilc_sdio *sdio_priv = wilc->bus_data; + struct sdio_cmd52 cmd; + int ret; + + cmd.read_write = 1; + cmd.function = 0; + cmd.raw = 1; + + /* Disable all functions interrupts */ + cmd.address = SDIO_CCCR_IENx; + cmd.data = 0; + ret = wilc_sdio_cmd52(wilc, &cmd); + if (ret) { + dev_err(&func->dev, "Failed to disable functions interrupts\n"); + return ret; + } + + /* Disable all functions */ + cmd.address = SDIO_CCCR_IOEx; + cmd.data = 0; + ret = wilc_sdio_cmd52(wilc, &cmd); + if (ret) { + dev_err(&func->dev, + "Failed to reset all functions\n"); + return ret; + } + + /* Disable CSA */ + cmd.read_write = 0; + cmd.address = SDIO_FBR_BASE(1); + ret = wilc_sdio_cmd52(wilc, &cmd); + if (ret) { + dev_err(&func->dev, + "Failed to read CSA for function 1\n"); + return ret; + } + cmd.read_write = 1; + cmd.address = SDIO_FBR_BASE(1); + cmd.data &= ~SDIO_FBR_ENABLE_CSA; + ret = wilc_sdio_cmd52(wilc, &cmd); + if (ret) { + dev_err(&func->dev, + "Failed to disable CSA for function 1\n"); + return ret; + } sdio_priv->isinit = false; return 0; @@ -838,27 +880,12 @@ static int wilc_sdio_sync_ext(struct wilc *wilc, int nint) { struct sdio_func *func = dev_to_sdio_func(wilc->dev); struct wilc_sdio *sdio_priv = wilc->bus_data; - u32 reg; if (nint > MAX_NUM_INT) { dev_err(&func->dev, "Too many interrupts (%d)...\n", nint); return -EINVAL; } - /** - * Disable power sequencer - **/ - if (wilc_sdio_read_reg(wilc, WILC_MISC, ®)) { - dev_err(&func->dev, "Failed read misc reg...\n"); - return -EINVAL; - } - - reg &= ~BIT(8); - if (wilc_sdio_write_reg(wilc, WILC_MISC, reg)) { - dev_err(&func->dev, "Failed write misc reg...\n"); - return -EINVAL; - } - if (sdio_priv->irq_gpio) { u32 reg; int ret, i; @@ -942,20 +969,40 @@ static const struct wilc_hif_func wilc_hif_sdio = { .hif_is_init = wilc_sdio_is_init, }; +static int wilc_sdio_suspend(struct device *dev) +{ + struct sdio_func *func = dev_to_sdio_func(dev); + struct wilc *wilc = sdio_get_drvdata(func); + int ret; + + dev_info(dev, "sdio suspend\n"); + + if (!IS_ERR(wilc->rtc_clk)) + clk_disable_unprepare(wilc->rtc_clk); + + host_sleep_notify(wilc); + + wilc_sdio_disable_interrupt(wilc); + + ret = wilc_sdio_reset(wilc); + if (ret) { + dev_err(&func->dev, "Fail reset sdio\n"); + return ret; + } + + return 0; +} + static int wilc_sdio_resume(struct device *dev) { struct sdio_func *func = dev_to_sdio_func(dev); struct wilc *wilc = sdio_get_drvdata(func); dev_info(dev, "sdio resume\n"); - sdio_release_host(func); - chip_wakeup(wilc); wilc_sdio_init(wilc, true); + wilc_sdio_enable_interrupt(wilc); - if (wilc->suspend_event) - host_wakeup_notify(wilc); - - chip_allow_sleep(wilc); + host_wakeup_notify(wilc); return 0; } diff --git a/drivers/net/wireless/microchip/wilc1000/spi.c b/drivers/net/wireless/microchip/wilc1000/spi.c index 61c3572ce321..5ff940c53ad9 100644 --- a/drivers/net/wireless/microchip/wilc1000/spi.c +++ b/drivers/net/wireless/microchip/wilc1000/spi.c @@ -206,9 +206,10 @@ static void wilc_wlan_power(struct wilc *wilc, bool on) static int wilc_bus_probe(struct spi_device *spi) { - int ret; - struct wilc *wilc; struct wilc_spi *spi_priv; + struct wilc_vif *vif; + struct wilc *wilc; + int ret; spi_priv = kzalloc(sizeof(*spi_priv), GFP_KERNEL); if (!spi_priv) @@ -249,7 +250,19 @@ static int wilc_bus_probe(struct spi_device *spi) if (ret) goto power_down; + ret = wilc_load_mac_from_nv(wilc); + if (ret) { + pr_err("Can not retrieve MAC address from chip\n"); + goto power_down; + } + wilc_wlan_power(wilc, false); + vif = wilc_netdev_ifc_init(wilc, "wlan%d", WILC_STATION_MODE, + NL80211_IFTYPE_STATION, false); + if (IS_ERR(vif)) { + ret = PTR_ERR(vif); + goto power_down; + } return 0; power_down: diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.c b/drivers/net/wireless/microchip/wilc1000/wlan.c index a9e872a7b2c3..533939e71534 100644 --- a/drivers/net/wireless/microchip/wilc1000/wlan.c +++ b/drivers/net/wireless/microchip/wilc1000/wlan.c @@ -678,17 +678,17 @@ EXPORT_SYMBOL_GPL(chip_wakeup); void host_wakeup_notify(struct wilc *wilc) { - acquire_bus(wilc, WILC_BUS_ACQUIRE_ONLY); + acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP); wilc->hif_func->hif_write_reg(wilc, WILC_CORTUS_INTERRUPT_2, 1); - release_bus(wilc, WILC_BUS_RELEASE_ONLY); + release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP); } EXPORT_SYMBOL_GPL(host_wakeup_notify); void host_sleep_notify(struct wilc *wilc) { - acquire_bus(wilc, WILC_BUS_ACQUIRE_ONLY); + acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP); wilc->hif_func->hif_write_reg(wilc, WILC_CORTUS_INTERRUPT_1, 1); - release_bus(wilc, WILC_BUS_RELEASE_ONLY); + release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP); } EXPORT_SYMBOL_GPL(host_sleep_notify); @@ -1473,6 +1473,55 @@ u32 wilc_get_chipid(struct wilc *wilc, bool update) return wilc->chipid; } +int wilc_load_mac_from_nv(struct wilc *wl) +{ + int ret = -EINVAL; + unsigned int i; + + acquire_bus(wl, WILC_BUS_ACQUIRE_AND_WAKEUP); + + for (i = 0; i < WILC_NVMEM_MAX_NUM_BANK; i++) { + int bank_offset = get_bank_offset_from_bank_index(i); + u32 reg1, reg2; + u8 invalid; + u8 used; + + ret = wl->hif_func->hif_read_reg(wl, + WILC_NVMEM_BANK_BASE + bank_offset, + ®1); + if (ret) { + pr_err("Can not read address %d lower part", i); + break; + } + ret = wl->hif_func->hif_read_reg(wl, + WILC_NVMEM_BANK_BASE + bank_offset + 4, + ®2); + if (ret) { + pr_err("Can not read address %d upper part", i); + break; + } + + used = FIELD_GET(WILC_NVMEM_IS_BANK_USED, reg1); + invalid = FIELD_GET(WILC_NVMEM_IS_BANK_INVALID, reg1); + if (!used || invalid) + continue; + + wl->nv_mac_address[0] = FIELD_GET(GENMASK(23, 16), reg1); + wl->nv_mac_address[1] = FIELD_GET(GENMASK(15, 8), reg1); + wl->nv_mac_address[2] = FIELD_GET(GENMASK(7, 0), reg1); + wl->nv_mac_address[3] = FIELD_GET(GENMASK(31, 24), reg2); + wl->nv_mac_address[4] = FIELD_GET(GENMASK(23, 16), reg2); + wl->nv_mac_address[5] = FIELD_GET(GENMASK(15, 8), reg2); + + ret = 0; + break; + } + + release_bus(wl, WILC_BUS_RELEASE_ALLOW_SLEEP); + return ret; +} +EXPORT_SYMBOL_GPL(wilc_load_mac_from_nv); + int wilc_wlan_init(struct net_device *dev) { int ret = 0; diff --git a/drivers/net/wireless/microchip/wilc1000/wlan.h b/drivers/net/wireless/microchip/wilc1000/wlan.h index 54643d8fef04..dd2fb3c2f06a 100644 --- a/drivers/net/wireless/microchip/wilc1000/wlan.h +++ b/drivers/net/wireless/microchip/wilc1000/wlan.h @@ -56,7 +56,6 @@ #define WILC_HOST_RX_CTRL (WILC_PERIPH_REG_BASE + 0x80) #define WILC_HOST_RX_EXTRA_SIZE (WILC_PERIPH_REG_BASE + 0x84) #define WILC_HOST_TX_CTRL_1 (WILC_PERIPH_REG_BASE + 0x88) -#define WILC_MISC (WILC_PERIPH_REG_BASE + 0x428) #define WILC_INTR_REG_BASE (WILC_PERIPH_REG_BASE + 0xa00) #define WILC_INTR_ENABLE WILC_INTR_REG_BASE #define WILC_INTR2_ENABLE (WILC_INTR_REG_BASE + 4) @@ -445,4 +444,5 @@ int wilc_send_config_pkt(struct wilc_vif *vif, u8 mode, struct wid *wids, u32 count); int wilc_wlan_init(struct net_device *dev); u32 wilc_get_chipid(struct wilc *wilc, bool update); +int wilc_load_mac_from_nv(struct wilc *wilc); #endif diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.c b/drivers/net/wireless/purelifi/plfxlc/mac.c index 641f847d47ab..eae93efa6150 100644 --- a/drivers/net/wireless/purelifi/plfxlc/mac.c +++ b/drivers/net/wireless/purelifi/plfxlc/mac.c @@ -111,7 +111,7 @@ int plfxlc_op_start(struct ieee80211_hw *hw) return 0; } -void plfxlc_op_stop(struct ieee80211_hw *hw) +void plfxlc_op_stop(struct ieee80211_hw *hw, bool suspend) { struct plfxlc_mac *mac = plfxlc_hw_mac(hw); diff --git a/drivers/net/wireless/purelifi/plfxlc/mac.h b/drivers/net/wireless/purelifi/plfxlc/mac.h index 49b92413729b..9384acddcf26 100644 --- a/drivers/net/wireless/purelifi/plfxlc/mac.h +++ b/drivers/net/wireless/purelifi/plfxlc/mac.h @@ -178,7 +178,7 @@ int plfxlc_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, void plfxlc_mac_tx_failed(struct urb *urb); void plfxlc_mac_tx_to_dev(struct sk_buff *skb, int error); int plfxlc_op_start(struct ieee80211_hw *hw); -void plfxlc_op_stop(struct ieee80211_hw *hw); +void plfxlc_op_stop(struct ieee80211_hw *hw, bool suspend); int plfxlc_restore_settings(struct plfxlc_mac *mac); #endif /* PLFXLC_MAC_H */ diff --git a/drivers/net/wireless/purelifi/plfxlc/usb.c b/drivers/net/wireless/purelifi/plfxlc/usb.c index 311676c1ece0..15334940287d 100644 --- a/drivers/net/wireless/purelifi/plfxlc/usb.c +++ b/drivers/net/wireless/purelifi/plfxlc/usb.c @@ -408,7 +408,7 @@ void plfxlc_usb_init(struct plfxlc_usb *usb, struct ieee80211_hw *hw, void plfxlc_usb_release(struct plfxlc_usb *usb) { - plfxlc_op_stop(plfxlc_usb_to_hw(usb)); + plfxlc_op_stop(plfxlc_usb_to_hw(usb), false); plfxlc_usb_disable_tx(usb); plfxlc_usb_disable_rx(usb); usb_set_intfdata(usb->intf, NULL); @@ -761,7 +761,7 @@ static void plfxlc_usb_resume(struct plfxlc_usb *usb) static void plfxlc_usb_stop(struct plfxlc_usb *usb) { - plfxlc_op_stop(plfxlc_usb_to_hw(usb)); + plfxlc_op_stop(plfxlc_usb_to_hw(usb), false); plfxlc_usb_disable_tx(usb); plfxlc_usb_disable_rx(usb); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00.h b/drivers/net/wireless/ralink/rt2x00/rt2x00.h index 82af01448a0a..dfb4bb370f01 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00.h @@ -335,16 +335,6 @@ struct link { struct delayed_work watchdog_work; unsigned int watchdog_interval; unsigned int watchdog; - - /* - * Work structure for scheduling periodic AGC adjustments. - */ - struct delayed_work agc_work; - - /* - * Work structure for scheduling periodic VCO calibration. - */ - struct delayed_work vco_work; }; enum rt2x00_delayed_flags { @@ -1460,7 +1450,7 @@ void rt2x00mac_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb); int rt2x00mac_start(struct ieee80211_hw *hw); -void rt2x00mac_stop(struct ieee80211_hw *hw); +void rt2x00mac_stop(struct ieee80211_hw *hw, bool suspend); void rt2x00mac_reconfig_complete(struct ieee80211_hw *hw, enum ieee80211_reconfig_type reconfig_type); int rt2x00mac_add_interface(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c index 75fda72c14ca..451632488805 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00mac.c @@ -178,7 +178,7 @@ int rt2x00mac_start(struct ieee80211_hw *hw) } EXPORT_SYMBOL_GPL(rt2x00mac_start); -void rt2x00mac_stop(struct ieee80211_hw *hw) +void rt2x00mac_stop(struct ieee80211_hw *hw, bool suspend) { struct rt2x00_dev *rt2x00dev = hw->priv; diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c index 77b6cb7e1f6b..ded8d4d59289 100644 --- a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c @@ -1249,7 +1249,7 @@ static int rtl8180_start(struct ieee80211_hw *dev) return ret; } -static void rtl8180_stop(struct ieee80211_hw *dev) +static void rtl8180_stop(struct ieee80211_hw *dev, bool suspend) { struct rtl8180_priv *priv = dev->priv; u8 reg; diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c index 78d99afa373d..220ac5bdf279 100644 --- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c @@ -1019,7 +1019,7 @@ rtl8187_start_exit: return ret; } -static void rtl8187_stop(struct ieee80211_hw *dev) +static void rtl8187_stop(struct ieee80211_hw *dev, bool suspend) { struct rtl8187_priv *priv = dev->priv; struct sk_buff *skb; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/8188f.c b/drivers/net/wireless/realtek/rtl8xxxu/8188f.c index bd5a0603b4a2..3abf14d7044f 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/8188f.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/8188f.c @@ -697,9 +697,14 @@ static void rtl8188fu_init_statistics(struct rtl8xxxu_priv *priv) rtl8xxxu_write32(priv, REG_OFDM0_FA_RSTC, val32); } +#define TX_POWER_INDEX_MAX 0x3F +#define TX_POWER_INDEX_DEFAULT_CCK 0x22 +#define TX_POWER_INDEX_DEFAULT_HT40 0x27 + static int rtl8188fu_parse_efuse(struct rtl8xxxu_priv *priv) { struct rtl8188fu_efuse *efuse = &priv->efuse_wifi.efuse8188fu; + int i; if (efuse->rtl_id != cpu_to_le16(0x8129)) return -EINVAL; @@ -713,6 +718,16 @@ static int rtl8188fu_parse_efuse(struct rtl8xxxu_priv *priv) efuse->tx_power_index_A.ht40_base, sizeof(efuse->tx_power_index_A.ht40_base)); + for (i = 0; i < ARRAY_SIZE(priv->cck_tx_power_index_A); i++) { + if (priv->cck_tx_power_index_A[i] > TX_POWER_INDEX_MAX) + priv->cck_tx_power_index_A[i] = TX_POWER_INDEX_DEFAULT_CCK; + } + + for (i = 0; i < ARRAY_SIZE(priv->ht40_1s_tx_power_index_A); i++) { + if (priv->ht40_1s_tx_power_index_A[i] > TX_POWER_INDEX_MAX) + priv->ht40_1s_tx_power_index_A[i] = TX_POWER_INDEX_DEFAULT_HT40; + } + priv->ofdm_tx_power_diff[0].a = efuse->tx_power_index_A.ht20_ofdm_1s_diff.a; priv->ht20_tx_power_diff[0].a = efuse->tx_power_index_A.ht20_ofdm_1s_diff.b; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/core.c b/drivers/net/wireless/realtek/rtl8xxxu/core.c index 89a841b4e8d5..043fa364e701 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/core.c @@ -6679,7 +6679,6 @@ static void rtl8xxxu_switch_ports(struct rtl8xxxu_priv *priv) u8 macid[ETH_ALEN], bssid[ETH_ALEN], macid_1[ETH_ALEN], bssid_1[ETH_ALEN]; u8 msr, bcn_ctrl, bcn_ctrl_1, atimwnd[2], atimwnd_1[2]; struct rtl8xxxu_vif *rtlvif; - struct ieee80211_vif *vif; u8 tsftr[8], tsftr_1[8]; int i; @@ -6744,10 +6743,7 @@ static void rtl8xxxu_switch_ports(struct rtl8xxxu_priv *priv) /* write bcn ctl */ rtl8xxxu_write8(priv, REG_BEACON_CTRL, bcn_ctrl_1); rtl8xxxu_write8(priv, REG_BEACON_CTRL_1, bcn_ctrl); - - vif = priv->vifs[0]; - priv->vifs[0] = priv->vifs[1]; - priv->vifs[1] = vif; + swap(priv->vifs[0], priv->vifs[1]); /* priv->vifs[0] is NULL here, based on how this function is currently * called from rtl8xxxu_add_interface(). @@ -7521,7 +7517,7 @@ error_out: return ret; } -static void rtl8xxxu_stop(struct ieee80211_hw *hw) +static void rtl8xxxu_stop(struct ieee80211_hw *hw, bool suspend) { struct rtl8xxxu_priv *priv = hw->priv; unsigned long flags; diff --git a/drivers/net/wireless/realtek/rtlwifi/Kconfig b/drivers/net/wireless/realtek/rtlwifi/Kconfig index cfe63f7b28d9..1e66c1bf7c8b 100644 --- a/drivers/net/wireless/realtek/rtlwifi/Kconfig +++ b/drivers/net/wireless/realtek/rtlwifi/Kconfig @@ -119,6 +119,18 @@ config RTL8192CU If you choose to build it as a module, it will be called rtl8192cu +config RTL8192DU + tristate "Realtek RTL8192DU USB Wireless Network Adapter" + depends on USB + select RTLWIFI + select RTLWIFI_USB + select RTL8192D_COMMON + help + This is the driver for Realtek RTL8192DU 802.11n USB + wireless network adapters. + + If you choose to build it as a module, it will be called rtl8192du + config RTLWIFI tristate select FW_LOADER diff --git a/drivers/net/wireless/realtek/rtlwifi/Makefile b/drivers/net/wireless/realtek/rtlwifi/Makefile index 423981b148df..9cf32277c7f1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/Makefile +++ b/drivers/net/wireless/realtek/rtlwifi/Makefile @@ -25,6 +25,7 @@ obj-$(CONFIG_RTL8192CU) += rtl8192cu/ obj-$(CONFIG_RTL8192SE) += rtl8192se/ obj-$(CONFIG_RTL8192D_COMMON) += rtl8192d/ obj-$(CONFIG_RTL8192DE) += rtl8192de/ +obj-$(CONFIG_RTL8192DU) += rtl8192du/ obj-$(CONFIG_RTL8723AE) += rtl8723ae/ obj-$(CONFIG_RTL8723BE) += rtl8723be/ obj-$(CONFIG_RTL8188EE) += rtl8188ee/ diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c index 1a8d715b7c07..aab4605de9c4 100644 --- a/drivers/net/wireless/realtek/rtlwifi/base.c +++ b/drivers/net/wireless/realtek/rtlwifi/base.c @@ -2272,7 +2272,7 @@ static void rtl_c2h_content_parsing(struct ieee80211_hw *hw, struct sk_buff *skb) { struct rtl_priv *rtlpriv = rtl_priv(hw); - struct rtl_hal_ops *hal_ops = rtlpriv->cfg->ops; + const struct rtl_hal_ops *hal_ops = rtlpriv->cfg->ops; const struct rtl_btc_ops *btc_ops = rtlpriv->btcoexist.btc_ops; u8 cmd_id, cmd_len; u8 *cmd_buf = NULL; diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c index 42b7db12b1bd..7537f04b1930 100644 --- a/drivers/net/wireless/realtek/rtlwifi/core.c +++ b/drivers/net/wireless/realtek/rtlwifi/core.c @@ -144,7 +144,7 @@ static int rtl_op_start(struct ieee80211_hw *hw) return err; } -static void rtl_op_stop(struct ieee80211_hw *hw) +static void rtl_op_stop(struct ieee80211_hw *hw, bool suspend) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); @@ -547,7 +547,7 @@ static int rtl_op_suspend(struct ieee80211_hw *hw, rtlhal->enter_pnp_sleep = true; rtl_lps_leave(hw, true); - rtl_op_stop(hw); + rtl_op_stop(hw, false); device_set_wakeup_enable(wiphy_dev(hw->wiphy), true); return 0; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c index 37bb59fa8bfa..35875cda30fc 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/sw.c @@ -27,7 +27,7 @@ static void rtl88e_init_aspm_vars(struct ieee80211_hw *hw) * 2 - Enable ASPM with Clock Req, * 3 - Alwyas Enable ASPM with Clock Req, * 4 - Always Enable ASPM without Clock Req. - * set defult to RTL8192CE:3 RTL8192E:2 + * set default to RTL8192CE:3 RTL8192E:2 */ rtlpci->const_pci_aspm = 3; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c index e20f2bec45c4..ce7c28d9c874 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ce/sw.c @@ -31,7 +31,7 @@ static void rtl92c_init_aspm_vars(struct ieee80211_hw *hw) * 2 - Enable ASPM with Clock Req, * 3 - Alwyas Enable ASPM with Clock Req, * 4 - Always Enable ASPM without Clock Req. - * set defult to RTL8192CE:3 RTL8192E:2 + * set default to RTL8192CE:3 RTL8192E:2 * */ rtlpci->const_pci_aspm = 3; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c index 48be7e346efc..c9b9e2bc90cc 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192cu/sw.c @@ -53,8 +53,6 @@ static int rtl92cu_init_sw_vars(struct ieee80211_hw *hw) } else { fw_name = "rtlwifi/rtl8192cufw_TMSC.bin"; } - /* provide name of alternative file */ - rtlpriv->cfg->alt_fw_name = "rtlwifi/rtl8192cufw.bin"; pr_info("Loading firmware %s\n", fw_name); rtlpriv->max_fw_size = 0x4000; err = request_firmware_nowait(THIS_MODULE, 1, @@ -160,6 +158,7 @@ static struct rtl_hal_usbint_cfg rtl92cu_interface_cfg = { static struct rtl_hal_cfg rtl92cu_hal_cfg = { .name = "rtl92c_usb", + .alt_fw_name = "rtlwifi/rtl8192cufw.bin", .ops = &rtl8192cu_hal_ops, .mod_params = &rtl92cu_mod_params, .usb_interface_cfg = &rtl92cu_interface_cfg, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.c index 6570d5e168e9..97e0d9c01e0a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.c @@ -14,7 +14,7 @@ #include "hw_common.h" #include "phy_common.h" -void rtl92de_stop_tx_beacon(struct ieee80211_hw *hw) +void rtl92d_stop_tx_beacon(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 tmp1byte; @@ -27,9 +27,9 @@ void rtl92de_stop_tx_beacon(struct ieee80211_hw *hw) tmp1byte &= ~(BIT(0)); rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte); } -EXPORT_SYMBOL_GPL(rtl92de_stop_tx_beacon); +EXPORT_SYMBOL_GPL(rtl92d_stop_tx_beacon); -void rtl92de_resume_tx_beacon(struct ieee80211_hw *hw) +void rtl92d_resume_tx_beacon(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 tmp1byte; @@ -42,7 +42,7 @@ void rtl92de_resume_tx_beacon(struct ieee80211_hw *hw) tmp1byte |= BIT(0); rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte); } -EXPORT_SYMBOL_GPL(rtl92de_resume_tx_beacon); +EXPORT_SYMBOL_GPL(rtl92d_resume_tx_beacon); void rtl92d_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) { @@ -285,7 +285,7 @@ void rtl92d_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) } EXPORT_SYMBOL_GPL(rtl92d_set_hw_reg); -bool rtl92de_llt_write(struct ieee80211_hw *hw, u32 address, u32 data) +bool rtl92d_llt_write(struct ieee80211_hw *hw, u32 address, u32 data) { struct rtl_priv *rtlpriv = rtl_priv(hw); bool status = true; @@ -307,9 +307,9 @@ bool rtl92de_llt_write(struct ieee80211_hw *hw, u32 address, u32 data) } while (++count); return status; } -EXPORT_SYMBOL_GPL(rtl92de_llt_write); +EXPORT_SYMBOL_GPL(rtl92d_llt_write); -void rtl92de_enable_hw_security_config(struct ieee80211_hw *hw) +void rtl92d_enable_hw_security_config(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 sec_reg_value; @@ -334,16 +334,16 @@ void rtl92de_enable_hw_security_config(struct ieee80211_hw *hw) "The SECR-value %x\n", sec_reg_value); rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value); } -EXPORT_SYMBOL_GPL(rtl92de_enable_hw_security_config); +EXPORT_SYMBOL_GPL(rtl92d_enable_hw_security_config); /* don't set REG_EDCA_BE_PARAM here because * mac80211 will send pkt when scan */ -void rtl92de_set_qos(struct ieee80211_hw *hw, int aci) +void rtl92d_set_qos(struct ieee80211_hw *hw, int aci) { rtl92d_dm_init_edca_turbo(hw); } -EXPORT_SYMBOL_GPL(rtl92de_set_qos); +EXPORT_SYMBOL_GPL(rtl92d_set_qos); static enum version_8192d _rtl92d_read_chip_version(struct ieee80211_hw *hw) { @@ -362,8 +362,8 @@ static enum version_8192d _rtl92d_read_chip_version(struct ieee80211_hw *hw) return version; } -static void _rtl92de_readpowervalue_fromprom(struct txpower_info *pwrinfo, - u8 *efuse, bool autoloadfail) +static void _rtl92d_readpowervalue_fromprom(struct txpower_info *pwrinfo, + u8 *efuse, bool autoloadfail) { u32 rfpath, eeaddr, group, offset, offset1, offset2; u8 i, val8; @@ -500,8 +500,8 @@ static void _rtl92de_readpowervalue_fromprom(struct txpower_info *pwrinfo, } } -static void _rtl92de_read_txpower_info(struct ieee80211_hw *hw, - bool autoload_fail, u8 *hwinfo) +static void _rtl92d_read_txpower_info(struct ieee80211_hw *hw, + bool autoload_fail, u8 *hwinfo) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); @@ -509,7 +509,7 @@ static void _rtl92de_read_txpower_info(struct ieee80211_hw *hw, u8 tempval[2], i, pwr, diff; u32 ch, rfpath, group; - _rtl92de_readpowervalue_fromprom(&pwrinfo, hwinfo, autoload_fail); + _rtl92d_readpowervalue_fromprom(&pwrinfo, hwinfo, autoload_fail); if (!autoload_fail) { /* bit0~2 */ rtlefuse->eeprom_regulatory = (hwinfo[EEPROM_RF_OPT1] & 0x7); @@ -613,8 +613,8 @@ static void _rtl92de_read_txpower_info(struct ieee80211_hw *hw, } } -static void _rtl92de_read_macphymode_from_prom(struct ieee80211_hw *hw, - u8 *content) +static void _rtl92d_read_macphymode_from_prom(struct ieee80211_hw *hw, + u8 *content) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); @@ -636,15 +636,15 @@ static void _rtl92de_read_macphymode_from_prom(struct ieee80211_hw *hw, } } -static void _rtl92de_read_macphymode_and_bandtype(struct ieee80211_hw *hw, - u8 *content) +static void _rtl92d_read_macphymode_and_bandtype(struct ieee80211_hw *hw, + u8 *content) { - _rtl92de_read_macphymode_from_prom(hw, content); + _rtl92d_read_macphymode_from_prom(hw, content); rtl92d_phy_config_macphymode(hw); rtl92d_phy_config_macphymode_info(hw); } -static void _rtl92de_efuse_update_chip_version(struct ieee80211_hw *hw) +static void _rtl92d_efuse_update_chip_version(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); enum version_8192d chipver = rtlpriv->rtlhal.version; @@ -676,7 +676,7 @@ static void _rtl92de_efuse_update_chip_version(struct ieee80211_hw *hw) rtlpriv->rtlhal.version = chipver; } -static void _rtl92de_read_adapter_info(struct ieee80211_hw *hw) +static void _rtl92d_read_adapter_info(struct ieee80211_hw *hw) { static const int params_pci[] = { RTL8190_EEPROM_ID, EEPROM_VID, EEPROM_DID, @@ -706,8 +706,8 @@ static void _rtl92de_read_adapter_info(struct ieee80211_hw *hw) if (rtl_get_hwinfo(hw, rtlpriv, HWSET_MAX_SIZE, hwinfo, params)) goto exit; - _rtl92de_efuse_update_chip_version(hw); - _rtl92de_read_macphymode_and_bandtype(hw, hwinfo); + _rtl92d_efuse_update_chip_version(hw); + _rtl92d_read_macphymode_and_bandtype(hw, hwinfo); /* Read Permanent MAC address for 2nd interface */ if (rtlhal->interfaceindex != 0) @@ -717,7 +717,7 @@ static void _rtl92de_read_adapter_info(struct ieee80211_hw *hw) rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, rtlefuse->dev_addr); rtl_dbg(rtlpriv, COMP_INIT, DBG_DMESG, "%pM\n", rtlefuse->dev_addr); - _rtl92de_read_txpower_info(hw, rtlefuse->autoload_failflag, hwinfo); + _rtl92d_read_txpower_info(hw, rtlefuse->autoload_failflag, hwinfo); /* Read Channel Plan */ switch (rtlhal->bandset) { @@ -739,7 +739,7 @@ exit: kfree(hwinfo); } -void rtl92de_read_eeprom_info(struct ieee80211_hw *hw) +void rtl92d_read_eeprom_info(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); @@ -760,15 +760,15 @@ void rtl92de_read_eeprom_info(struct ieee80211_hw *hw) rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "Autoload OK\n"); rtlefuse->autoload_failflag = false; - _rtl92de_read_adapter_info(hw); + _rtl92d_read_adapter_info(hw); } else { pr_err("Autoload ERR!!\n"); } } -EXPORT_SYMBOL_GPL(rtl92de_read_eeprom_info); +EXPORT_SYMBOL_GPL(rtl92d_read_eeprom_info); -static void rtl92de_update_hal_rate_table(struct ieee80211_hw *hw, - struct ieee80211_sta *sta) +static void rtl92d_update_hal_rate_table(struct ieee80211_hw *hw, + struct ieee80211_sta *sta) { struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); @@ -851,9 +851,9 @@ static void rtl92de_update_hal_rate_table(struct ieee80211_hw *hw, rtl_read_dword(rtlpriv, REG_ARFR0)); } -static void rtl92de_update_hal_rate_mask(struct ieee80211_hw *hw, - struct ieee80211_sta *sta, - u8 rssi_level, bool update_bw) +static void rtl92d_update_hal_rate_mask(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + u8 rssi_level, bool update_bw) { struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); @@ -1009,20 +1009,20 @@ static void rtl92de_update_hal_rate_mask(struct ieee80211_hw *hw, sta_entry->ratr_index = ratr_index; } -void rtl92de_update_hal_rate_tbl(struct ieee80211_hw *hw, - struct ieee80211_sta *sta, - u8 rssi_level, bool update_bw) +void rtl92d_update_hal_rate_tbl(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + u8 rssi_level, bool update_bw) { struct rtl_priv *rtlpriv = rtl_priv(hw); if (rtlpriv->dm.useramask) - rtl92de_update_hal_rate_mask(hw, sta, rssi_level, update_bw); + rtl92d_update_hal_rate_mask(hw, sta, rssi_level, update_bw); else - rtl92de_update_hal_rate_table(hw, sta); + rtl92d_update_hal_rate_table(hw, sta); } -EXPORT_SYMBOL_GPL(rtl92de_update_hal_rate_tbl); +EXPORT_SYMBOL_GPL(rtl92d_update_hal_rate_tbl); -void rtl92de_update_channel_access_setting(struct ieee80211_hw *hw) +void rtl92d_update_channel_access_setting(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); @@ -1036,9 +1036,9 @@ void rtl92de_update_channel_access_setting(struct ieee80211_hw *hw) sifs_timer = 0x1010; rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer); } -EXPORT_SYMBOL_GPL(rtl92de_update_channel_access_setting); +EXPORT_SYMBOL_GPL(rtl92d_update_channel_access_setting); -bool rtl92de_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid) +bool rtl92d_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); @@ -1093,11 +1093,11 @@ bool rtl92de_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid) *valid = 1; return !ppsc->hwradiooff; } -EXPORT_SYMBOL_GPL(rtl92de_gpio_radio_on_off_checking); +EXPORT_SYMBOL_GPL(rtl92d_gpio_radio_on_off_checking); -void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index, - u8 *p_macaddr, bool is_group, u8 enc_algo, - bool is_wepkey, bool clear_all) +void rtl92d_set_key(struct ieee80211_hw *hw, u32 key_index, + u8 *p_macaddr, bool is_group, u8 enc_algo, + bool is_wepkey, bool clear_all) { static const u8 cam_const_addr[4][6] = { {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, @@ -1222,4 +1222,4 @@ void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index, } } } -EXPORT_SYMBOL_GPL(rtl92de_set_key); +EXPORT_SYMBOL_GPL(rtl92d_set_key); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.h index 2c07f5cc5766..4da1bab15f36 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/hw_common.h @@ -4,21 +4,21 @@ #ifndef __RTL92D_HW_COMMON_H__ #define __RTL92D_HW_COMMON_H__ -void rtl92de_stop_tx_beacon(struct ieee80211_hw *hw); -void rtl92de_resume_tx_beacon(struct ieee80211_hw *hw); +void rtl92d_stop_tx_beacon(struct ieee80211_hw *hw); +void rtl92d_resume_tx_beacon(struct ieee80211_hw *hw); void rtl92d_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); void rtl92d_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); -bool rtl92de_llt_write(struct ieee80211_hw *hw, u32 address, u32 data); -void rtl92de_enable_hw_security_config(struct ieee80211_hw *hw); -void rtl92de_set_qos(struct ieee80211_hw *hw, int aci); -void rtl92de_read_eeprom_info(struct ieee80211_hw *hw); -void rtl92de_update_hal_rate_tbl(struct ieee80211_hw *hw, - struct ieee80211_sta *sta, - u8 rssi_level, bool update_bw); -void rtl92de_update_channel_access_setting(struct ieee80211_hw *hw); -bool rtl92de_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid); -void rtl92de_set_key(struct ieee80211_hw *hw, u32 key_index, - u8 *p_macaddr, bool is_group, u8 enc_algo, - bool is_wepkey, bool clear_all); +bool rtl92d_llt_write(struct ieee80211_hw *hw, u32 address, u32 data); +void rtl92d_enable_hw_security_config(struct ieee80211_hw *hw); +void rtl92d_set_qos(struct ieee80211_hw *hw, int aci); +void rtl92d_read_eeprom_info(struct ieee80211_hw *hw); +void rtl92d_update_hal_rate_tbl(struct ieee80211_hw *hw, + struct ieee80211_sta *sta, + u8 rssi_level, bool update_bw); +void rtl92d_update_channel_access_setting(struct ieee80211_hw *hw); +bool rtl92d_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid); +void rtl92d_set_key(struct ieee80211_hw *hw, u32 key_index, + u8 *p_macaddr, bool is_group, u8 enc_algo, + bool is_wepkey, bool clear_all); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.c index 72d2b7426d82..9f9a34492030 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.c @@ -7,8 +7,8 @@ #include "def.h" #include "trx_common.h" -static long _rtl92de_translate_todbm(struct ieee80211_hw *hw, - u8 signal_strength_index) +static long _rtl92d_translate_todbm(struct ieee80211_hw *hw, + u8 signal_strength_index) { long signal_power; @@ -17,13 +17,13 @@ static long _rtl92de_translate_todbm(struct ieee80211_hw *hw, return signal_power; } -static void _rtl92de_query_rxphystatus(struct ieee80211_hw *hw, - struct rtl_stats *pstats, - __le32 *pdesc, - struct rx_fwinfo_92d *p_drvinfo, - bool packet_match_bssid, - bool packet_toself, - bool packet_beacon) +static void _rtl92d_query_rxphystatus(struct ieee80211_hw *hw, + struct rtl_stats *pstats, + __le32 *pdesc, + struct rx_fwinfo_92d *p_drvinfo, + bool packet_match_bssid, + bool packet_toself, + bool packet_beacon) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &rtlpriv->phy; @@ -203,8 +203,8 @@ static void rtl92d_loop_over_paths(struct ieee80211_hw *hw, } } -static void _rtl92de_process_ui_rssi(struct ieee80211_hw *hw, - struct rtl_stats *pstats) +static void _rtl92d_process_ui_rssi(struct ieee80211_hw *hw, + struct rtl_stats *pstats) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rt_smooth_data *ui_rssi; @@ -226,15 +226,15 @@ static void _rtl92de_process_ui_rssi(struct ieee80211_hw *hw, if (ui_rssi->index >= PHY_RSSI_SLID_WIN_MAX) ui_rssi->index = 0; tmpval = ui_rssi->total_val / ui_rssi->total_num; - rtlpriv->stats.signal_strength = _rtl92de_translate_todbm(hw, (u8)tmpval); + rtlpriv->stats.signal_strength = _rtl92d_translate_todbm(hw, (u8)tmpval); pstats->rssi = rtlpriv->stats.signal_strength; if (!pstats->is_cck && pstats->packet_toself) rtl92d_loop_over_paths(hw, pstats); } -static void _rtl92de_update_rxsignalstatistics(struct ieee80211_hw *hw, - struct rtl_stats *pstats) +static void _rtl92d_update_rxsignalstatistics(struct ieee80211_hw *hw, + struct rtl_stats *pstats) { struct rtl_priv *rtlpriv = rtl_priv(hw); int weighting = 0; @@ -249,8 +249,8 @@ static void _rtl92de_update_rxsignalstatistics(struct ieee80211_hw *hw, 5 + pstats->recvsignalpower + weighting) / 6; } -static void _rtl92de_process_pwdb(struct ieee80211_hw *hw, - struct rtl_stats *pstats) +static void _rtl92d_process_pwdb(struct ieee80211_hw *hw, + struct rtl_stats *pstats) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); @@ -276,7 +276,7 @@ static void _rtl92de_process_pwdb(struct ieee80211_hw *hw, (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR); } rtlpriv->dm.undec_sm_pwdb = undec_sm_pwdb; - _rtl92de_update_rxsignalstatistics(hw, pstats); + _rtl92d_update_rxsignalstatistics(hw, pstats); } } @@ -301,8 +301,8 @@ static void rtl92d_loop_over_streams(struct ieee80211_hw *hw, } } -static void _rtl92de_process_ui_link_quality(struct ieee80211_hw *hw, - struct rtl_stats *pstats) +static void _rtl92d_process_ui_link_quality(struct ieee80211_hw *hw, + struct rtl_stats *pstats) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rt_smooth_data *ui_link_quality; @@ -330,24 +330,24 @@ static void _rtl92de_process_ui_link_quality(struct ieee80211_hw *hw, rtl92d_loop_over_streams(hw, pstats); } -static void _rtl92de_process_phyinfo(struct ieee80211_hw *hw, - u8 *buffer, - struct rtl_stats *pcurrent_stats) +static void _rtl92d_process_phyinfo(struct ieee80211_hw *hw, + u8 *buffer, + struct rtl_stats *pcurrent_stats) { if (!pcurrent_stats->packet_matchbssid && !pcurrent_stats->packet_beacon) return; - _rtl92de_process_ui_rssi(hw, pcurrent_stats); - _rtl92de_process_pwdb(hw, pcurrent_stats); - _rtl92de_process_ui_link_quality(hw, pcurrent_stats); + _rtl92d_process_ui_rssi(hw, pcurrent_stats); + _rtl92d_process_pwdb(hw, pcurrent_stats); + _rtl92d_process_ui_link_quality(hw, pcurrent_stats); } -static void _rtl92de_translate_rx_signal_stuff(struct ieee80211_hw *hw, - struct sk_buff *skb, - struct rtl_stats *pstats, - __le32 *pdesc, - struct rx_fwinfo_92d *p_drvinfo) +static void _rtl92d_translate_rx_signal_stuff(struct ieee80211_hw *hw, + struct sk_buff *skb, + struct rtl_stats *pstats, + __le32 *pdesc, + struct rx_fwinfo_92d *p_drvinfo) { struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); @@ -375,15 +375,15 @@ static void _rtl92de_translate_rx_signal_stuff(struct ieee80211_hw *hw, packet_toself = packet_matchbssid && ether_addr_equal(praddr, rtlefuse->dev_addr); packet_beacon = ieee80211_is_beacon(fc); - _rtl92de_query_rxphystatus(hw, pstats, pdesc, p_drvinfo, - packet_matchbssid, packet_toself, - packet_beacon); - _rtl92de_process_phyinfo(hw, tmp_buf, pstats); + _rtl92d_query_rxphystatus(hw, pstats, pdesc, p_drvinfo, + packet_matchbssid, packet_toself, + packet_beacon); + _rtl92d_process_phyinfo(hw, tmp_buf, pstats); } -bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats, - struct ieee80211_rx_status *rx_status, - u8 *pdesc8, struct sk_buff *skb) +bool rtl92d_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats, + struct ieee80211_rx_status *rx_status, + u8 *pdesc8, struct sk_buff *skb) { __le32 *pdesc = (__le32 *)pdesc8; struct rx_fwinfo_92d *p_drvinfo; @@ -423,17 +423,17 @@ bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *stats, if (phystatus) { p_drvinfo = (struct rx_fwinfo_92d *)(skb->data + stats->rx_bufshift); - _rtl92de_translate_rx_signal_stuff(hw, skb, stats, pdesc, - p_drvinfo); + _rtl92d_translate_rx_signal_stuff(hw, skb, stats, pdesc, + p_drvinfo); } /*rx_status->qual = stats->signal; */ rx_status->signal = stats->recvsignalpower + 10; return true; } -EXPORT_SYMBOL_GPL(rtl92de_rx_query_desc); +EXPORT_SYMBOL_GPL(rtl92d_rx_query_desc); -void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc8, bool istx, - u8 desc_name, u8 *val) +void rtl92d_set_desc(struct ieee80211_hw *hw, u8 *pdesc8, bool istx, + u8 desc_name, u8 *val) { __le32 *pdesc = (__le32 *)pdesc8; @@ -473,10 +473,10 @@ void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc8, bool istx, } } } -EXPORT_SYMBOL_GPL(rtl92de_set_desc); +EXPORT_SYMBOL_GPL(rtl92d_set_desc); -u64 rtl92de_get_desc(struct ieee80211_hw *hw, - u8 *p_desc8, bool istx, u8 desc_name) +u64 rtl92d_get_desc(struct ieee80211_hw *hw, + u8 *p_desc8, bool istx, u8 desc_name) { __le32 *p_desc = (__le32 *)p_desc8; u32 ret = 0; @@ -513,4 +513,4 @@ u64 rtl92de_get_desc(struct ieee80211_hw *hw, } return ret; } -EXPORT_SYMBOL_GPL(rtl92de_get_desc); +EXPORT_SYMBOL_GPL(rtl92d_get_desc); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.h index 87d956d771eb..528182b1eba6 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192d/trx_common.h @@ -393,13 +393,13 @@ struct rx_fwinfo_92d { #endif } __packed; -bool rtl92de_rx_query_desc(struct ieee80211_hw *hw, - struct rtl_stats *stats, - struct ieee80211_rx_status *rx_status, - u8 *pdesc, struct sk_buff *skb); -void rtl92de_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, - u8 desc_name, u8 *val); -u64 rtl92de_get_desc(struct ieee80211_hw *hw, - u8 *p_desc, bool istx, u8 desc_name); +bool rtl92d_rx_query_desc(struct ieee80211_hw *hw, + struct rtl_stats *stats, + struct ieee80211_rx_status *rx_status, + u8 *pdesc, struct sk_buff *skb); +void rtl92d_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, + u8 desc_name, u8 *val); +u64 rtl92d_get_desc(struct ieee80211_hw *hw, + u8 *p_desc, bool istx, u8 desc_name); #endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c index 73b81e60cfa9..03f4314bdb2e 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/hw.c @@ -181,7 +181,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) u8 btype_ibss = val[0]; if (btype_ibss) - rtl92de_stop_tx_beacon(hw); + rtl92d_stop_tx_beacon(hw); _rtl92de_set_bcn_ctrl_reg(hw, 0, BIT(3)); rtl_write_dword(rtlpriv, REG_TSFTR, (u32) (mac->tsf & 0xffffffff)); @@ -189,7 +189,7 @@ void rtl92de_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) (u32) ((mac->tsf >> 32) & 0xffffffff)); _rtl92de_set_bcn_ctrl_reg(hw, BIT(3), 0); if (btype_ibss) - rtl92de_resume_tx_beacon(hw); + rtl92d_resume_tx_beacon(hw); break; } @@ -295,13 +295,13 @@ static bool _rtl92de_llt_table_init(struct ieee80211_hw *hw) /* 18. LLT_table_init(Adapter); */ for (i = 0; i < (txpktbuf_bndy - 1); i++) { - status = rtl92de_llt_write(hw, i, i + 1); + status = rtl92d_llt_write(hw, i, i + 1); if (!status) return status; } /* end of list */ - status = rtl92de_llt_write(hw, (txpktbuf_bndy - 1), 0xFF); + status = rtl92d_llt_write(hw, (txpktbuf_bndy - 1), 0xFF); if (!status) return status; @@ -310,13 +310,13 @@ static bool _rtl92de_llt_table_init(struct ieee80211_hw *hw) /* config this MAC as two MAC transfer. */ /* Otherwise used as local loopback buffer. */ for (i = txpktbuf_bndy; i < maxpage; i++) { - status = rtl92de_llt_write(hw, i, (i + 1)); + status = rtl92d_llt_write(hw, i, (i + 1)); if (!status) return status; } /* Let last entry point to the start entry of ring buffer */ - status = rtl92de_llt_write(hw, maxpage, txpktbuf_bndy); + status = rtl92d_llt_write(hw, maxpage, txpktbuf_bndy); if (!status) return status; @@ -688,7 +688,7 @@ int rtl92de_hw_init(struct ieee80211_hw *hw) /* reset hw sec */ rtl_cam_reset_all_entry(hw); - rtl92de_enable_hw_security_config(hw); + rtl92d_enable_hw_security_config(hw); /* Read EEPROM TX power index and PHY_REG_PG.txt to capture correct */ /* TX power index for different rate set. */ @@ -742,11 +742,11 @@ static int _rtl92de_set_media_status(struct ieee80211_hw *hw, if (type == NL80211_IFTYPE_UNSPECIFIED || type == NL80211_IFTYPE_STATION) { - rtl92de_stop_tx_beacon(hw); + rtl92d_stop_tx_beacon(hw); _rtl92de_enable_bcn_sub_func(hw); } else if (type == NL80211_IFTYPE_ADHOC || type == NL80211_IFTYPE_AP) { - rtl92de_resume_tx_beacon(hw); + rtl92d_resume_tx_beacon(hw); _rtl92de_disable_bcn_sub_func(hw); } else { rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c index 5f6311c2aac4..e36e4aeb9a95 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/sw.c @@ -32,7 +32,7 @@ static void rtl92d_init_aspm_vars(struct ieee80211_hw *hw) * 2 - Enable ASPM with Clock Req, * 3 - Alwyas Enable ASPM with Clock Req, * 4 - Always Enable ASPM without Clock Req. - * set defult to RTL8192CE:3 RTL8192E:2 + * set default to RTL8192CE:3 RTL8192E:2 * */ rtlpci->const_pci_aspm = 3; @@ -187,7 +187,7 @@ static void rtl92d_deinit_sw_vars(struct ieee80211_hw *hw) static struct rtl_hal_ops rtl8192de_hal_ops = { .init_sw_vars = rtl92d_init_sw_vars, .deinit_sw_vars = rtl92d_deinit_sw_vars, - .read_eeprom_info = rtl92de_read_eeprom_info, + .read_eeprom_info = rtl92d_read_eeprom_info, .interrupt_recognized = rtl92de_interrupt_recognized, .hw_init = rtl92de_hw_init, .hw_disable = rtl92de_card_disable, @@ -197,30 +197,30 @@ static struct rtl_hal_ops rtl8192de_hal_ops = { .disable_interrupt = rtl92de_disable_interrupt, .set_network_type = rtl92de_set_network_type, .set_chk_bssid = rtl92de_set_check_bssid, - .set_qos = rtl92de_set_qos, + .set_qos = rtl92d_set_qos, .set_bcn_reg = rtl92de_set_beacon_related_registers, .set_bcn_intv = rtl92de_set_beacon_interval, .update_interrupt_mask = rtl92de_update_interrupt_mask, .get_hw_reg = rtl92de_get_hw_reg, .set_hw_reg = rtl92de_set_hw_reg, - .update_rate_tbl = rtl92de_update_hal_rate_tbl, + .update_rate_tbl = rtl92d_update_hal_rate_tbl, .fill_tx_desc = rtl92de_tx_fill_desc, .fill_tx_cmddesc = rtl92de_tx_fill_cmddesc, - .query_rx_desc = rtl92de_rx_query_desc, - .set_channel_access = rtl92de_update_channel_access_setting, - .radio_onoff_checking = rtl92de_gpio_radio_on_off_checking, + .query_rx_desc = rtl92d_rx_query_desc, + .set_channel_access = rtl92d_update_channel_access_setting, + .radio_onoff_checking = rtl92d_gpio_radio_on_off_checking, .set_bw_mode = rtl92d_phy_set_bw_mode, .switch_channel = rtl92d_phy_sw_chnl, .dm_watchdog = rtl92de_dm_watchdog, .scan_operation_backup = rtl_phy_scan_operation_backup, .set_rf_power_state = rtl92d_phy_set_rf_power_state, .led_control = rtl92de_led_control, - .set_desc = rtl92de_set_desc, - .get_desc = rtl92de_get_desc, + .set_desc = rtl92d_set_desc, + .get_desc = rtl92d_get_desc, .is_tx_desc_closed = rtl92de_is_tx_desc_closed, .tx_polling = rtl92de_tx_polling, - .enable_hw_sec = rtl92de_enable_hw_security_config, - .set_key = rtl92de_set_key, + .enable_hw_sec = rtl92d_enable_hw_security_config, + .set_key = rtl92d_set_key, .get_bbreg = rtl92d_phy_query_bb_reg, .set_bbreg = rtl92d_phy_set_bb_reg, .get_rfreg = rtl92d_phy_query_rf_reg, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c index 2b9b352f7783..91bf399c9ef1 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/trx.c @@ -292,7 +292,7 @@ bool rtl92de_is_tx_desc_closed(struct ieee80211_hw *hw, struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue]; u8 *entry = (u8 *)(&ring->desc[ring->idx]); - u8 own = (u8)rtl92de_get_desc(hw, entry, true, HW_DESC_OWN); + u8 own = (u8)rtl92d_get_desc(hw, entry, true, HW_DESC_OWN); /* a beacon packet will only use the first * descriptor by defaut, and the own bit may not diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/Makefile b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/Makefile new file mode 100644 index 000000000000..569bfd3d5030 --- /dev/null +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/Makefile @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0 +rtl8192du-objs := \ + dm.o \ + fw.o \ + hw.o \ + led.o \ + phy.o \ + rf.o \ + sw.o \ + table.o \ + trx.o + +obj-$(CONFIG_RTL8192DU) += rtl8192du.o diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/dm.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/dm.c new file mode 100644 index 000000000000..dd57707a9184 --- /dev/null +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/dm.c @@ -0,0 +1,120 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2024 Realtek Corporation.*/ + +#include "../wifi.h" +#include "../core.h" +#include "../rtl8192d/reg.h" +#include "../rtl8192d/def.h" +#include "../rtl8192d/dm_common.h" +#include "../rtl8192d/fw_common.h" +#include "dm.h" + +static void rtl92du_dm_init_1r_cca(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct ps_t *dm_pstable = &rtlpriv->dm_pstable; + + dm_pstable->pre_ccastate = CCA_MAX; + dm_pstable->cur_ccasate = CCA_MAX; +} + +static void rtl92du_dm_1r_cca(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct ps_t *dm_pstable = &rtlpriv->dm_pstable; + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + int pwdb = rtlpriv->dm_digtable.min_undec_pwdb_for_dm; + + if (rtlhal->macphymode != SINGLEMAC_SINGLEPHY || + rtlhal->current_bandtype != BAND_ON_5G) + return; + + if (pwdb != 0) { + if (dm_pstable->pre_ccastate == CCA_2R || + dm_pstable->pre_ccastate == CCA_MAX) + dm_pstable->cur_ccasate = (pwdb >= 35) ? CCA_1R : CCA_2R; + else + dm_pstable->cur_ccasate = (pwdb <= 30) ? CCA_2R : CCA_1R; + } else { + dm_pstable->cur_ccasate = CCA_MAX; + } + + if (dm_pstable->pre_ccastate == dm_pstable->cur_ccasate) + return; + + rtl_dbg(rtlpriv, COMP_BB_POWERSAVING, DBG_TRACE, + "Old CCA state: %d new CCA state: %d\n", + dm_pstable->pre_ccastate, dm_pstable->cur_ccasate); + + if (dm_pstable->cur_ccasate == CCA_1R) { + if (rtlpriv->phy.rf_type == RF_2T2R) + rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x13); + else /* Is this branch reachable? */ + rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x23); + } else { /* CCA_2R or CCA_MAX */ + rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x33); + } +} + +static void rtl92du_dm_pwdb_monitor(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + const u32 max_macid = 32; + u32 temp; + + /* AP & ADHOC & MESH will return tmp */ + if (rtlpriv->mac80211.opmode != NL80211_IFTYPE_STATION) + return; + + /* Indicate Rx signal strength to FW. */ + if (rtlpriv->dm.useramask) { + temp = rtlpriv->dm.undec_sm_pwdb << 16; + temp |= max_macid << 8; + + rtl92d_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, (u8 *)(&temp)); + } else { + rtl_write_byte(rtlpriv, 0x4fe, (u8)rtlpriv->dm.undec_sm_pwdb); + } +} + +void rtl92du_dm_init(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER; + rtl_dm_diginit(hw, 0x20); + rtlpriv->dm_digtable.rx_gain_max = DM_DIG_FA_UPPER; + rtlpriv->dm_digtable.rx_gain_min = DM_DIG_FA_LOWER; + rtl92d_dm_init_edca_turbo(hw); + rtl92du_dm_init_1r_cca(hw); + rtl92d_dm_init_rate_adaptive_mask(hw); + rtl92d_dm_initialize_txpower_tracking(hw); +} + +void rtl92du_dm_watchdog(struct ieee80211_hw *hw) +{ + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + bool fw_current_inpsmode = false; + bool fwps_awake = true; + + /* 1. RF is OFF. (No need to do DM.) + * 2. Fw is under power saving mode for FwLPS. + * (Prevent from SW/FW I/O racing.) + * 3. IPS workitem is scheduled. (Prevent from IPS sequence + * to be swapped with DM. + * 4. RFChangeInProgress is TRUE. + * (Prevent from broken by IPS/HW/SW Rf off.) + */ + + if (ppsc->rfpwr_state != ERFON || fw_current_inpsmode || + !fwps_awake || ppsc->rfchange_inprogress) + return; + + rtl92du_dm_pwdb_monitor(hw); + rtl92d_dm_false_alarm_counter_statistics(hw); + rtl92d_dm_find_minimum_rssi(hw); + rtl92d_dm_dig(hw); + rtl92d_dm_check_txpower_tracking_thermal_meter(hw); + rtl92d_dm_check_edca_turbo(hw); + rtl92du_dm_1r_cca(hw); +} diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/dm.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/dm.h new file mode 100644 index 000000000000..2f283bf1e4d8 --- /dev/null +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/dm.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2024 Realtek Corporation.*/ + +#ifndef __RTL92DU_DM_H__ +#define __RTL92DU_DM_H__ + +void rtl92du_dm_init(struct ieee80211_hw *hw); +void rtl92du_dm_watchdog(struct ieee80211_hw *hw); + +#endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/fw.c new file mode 100644 index 000000000000..f74e4e84fe39 --- /dev/null +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/fw.c @@ -0,0 +1,63 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2024 Realtek Corporation.*/ + +#include "../wifi.h" +#include "../rtl8192d/reg.h" +#include "../rtl8192d/def.h" +#include "../rtl8192d/fw_common.h" +#include "fw.h" + +int rtl92du_download_fw(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + enum version_8192d version = rtlhal->version; + u8 *pfwheader; + u8 *pfwdata; + u32 fwsize; + int err; + + if (rtlpriv->max_fw_size == 0 || !rtlhal->pfirmware) + return 1; + + fwsize = rtlhal->fwsize; + pfwheader = rtlhal->pfirmware; + pfwdata = rtlhal->pfirmware; + rtlhal->fw_version = (u16)GET_FIRMWARE_HDR_VERSION(pfwheader); + rtlhal->fw_subversion = (u16)GET_FIRMWARE_HDR_SUB_VER(pfwheader); + + rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, + "FirmwareVersion(%d), FirmwareSubVersion(%d), Signature(%#x)\n", + rtlhal->fw_version, rtlhal->fw_subversion, + GET_FIRMWARE_HDR_SIGNATURE(pfwheader)); + + if (IS_FW_HEADER_EXIST(pfwheader)) { + rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, + "Shift 32 bytes for FW header!!\n"); + pfwdata = pfwdata + 32; + fwsize = fwsize - 32; + } + + if (rtl92d_is_fw_downloaded(rtlpriv)) + goto exit; + + /* If 8051 is running in RAM code, driver should + * inform Fw to reset by itself, or it will cause + * download Fw fail. + */ + if (rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) { + rtl92d_firmware_selfreset(hw); + rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00); + } + + rtl92d_enable_fw_download(hw, true); + rtl92d_write_fw(hw, version, pfwdata, fwsize); + rtl92d_enable_fw_download(hw, false); + + err = rtl92d_fw_free_to_go(hw); + if (err) + pr_err("fw is not ready to run!\n"); +exit: + err = rtl92d_fw_init(hw); + return err; +} diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/fw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/fw.h new file mode 100644 index 000000000000..7904bfbda4ba --- /dev/null +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/fw.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2024 Realtek Corporation.*/ + +#ifndef __RTL92DU_FW_H__ +#define __RTL92DU_FW_H__ + +int rtl92du_download_fw(struct ieee80211_hw *hw); + +#endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.c new file mode 100644 index 000000000000..700c6e2bcad1 --- /dev/null +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.c @@ -0,0 +1,1212 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2024 Realtek Corporation.*/ + +#include "../wifi.h" +#include "../cam.h" +#include "../usb.h" +#include "../rtl8192d/reg.h" +#include "../rtl8192d/def.h" +#include "../rtl8192d/dm_common.h" +#include "../rtl8192d/fw_common.h" +#include "../rtl8192d/hw_common.h" +#include "../rtl8192d/phy_common.h" +#include "phy.h" +#include "dm.h" +#include "fw.h" +#include "hw.h" +#include "trx.h" + +static void _rtl92du_set_bcn_ctrl_reg(struct ieee80211_hw *hw, + u8 set_bits, u8 clear_bits) +{ + struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtlusb->reg_bcn_ctrl_val |= set_bits; + rtlusb->reg_bcn_ctrl_val &= ~clear_bits; + rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8)rtlusb->reg_bcn_ctrl_val); +} + +static void _rtl92du_enable_bcn_sub_func(struct ieee80211_hw *hw) +{ + _rtl92du_set_bcn_ctrl_reg(hw, 0, BIT(1)); +} + +static void _rtl92du_disable_bcn_sub_func(struct ieee80211_hw *hw) +{ + _rtl92du_set_bcn_ctrl_reg(hw, BIT(1), 0); +} + +void rtl92du_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) +{ + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + + switch (variable) { + case HW_VAR_RCR: + *((u32 *)val) = mac->rx_conf; + break; + default: + rtl92d_get_hw_reg(hw, variable, val); + break; + } +} + +void rtl92du_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtlpriv); + + switch (variable) { + case HW_VAR_AC_PARAM: + rtl92d_dm_init_edca_turbo(hw); + break; + case HW_VAR_ACM_CTRL: { + u8 e_aci = *val; + union aci_aifsn *p_aci_aifsn = + (union aci_aifsn *)(&mac->ac[0].aifs); + u8 acm = p_aci_aifsn->f.acm; + u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL); + + if (acm) { + switch (e_aci) { + case AC0_BE: + acm_ctrl |= ACMHW_BEQEN; + break; + case AC2_VI: + acm_ctrl |= ACMHW_VIQEN; + break; + case AC3_VO: + acm_ctrl |= ACMHW_VOQEN; + break; + default: + rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING, + "HW_VAR_ACM_CTRL acm set failed: eACI is %d\n", + acm); + break; + } + } else { + switch (e_aci) { + case AC0_BE: + acm_ctrl &= (~ACMHW_BEQEN); + break; + case AC2_VI: + acm_ctrl &= (~ACMHW_VIQEN); + break; + case AC3_VO: + acm_ctrl &= (~ACMHW_VOQEN); + break; + default: + pr_err("%s:%d switch case %#x not processed\n", + __func__, __LINE__, e_aci); + break; + } + } + rtl_dbg(rtlpriv, COMP_QOS, DBG_TRACE, + "SetHwReg8190pci(): [HW_VAR_ACM_CTRL] Write 0x%X\n", + acm_ctrl); + rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl); + break; + } + case HW_VAR_RCR: + mac->rx_conf = ((u32 *)val)[0]; + rtl_write_dword(rtlpriv, REG_RCR, mac->rx_conf); + break; + case HW_VAR_H2C_FW_JOINBSSRPT: { + u8 tmp_regcr, tmp_reg422; + bool recover = false; + u8 mstatus = *val; + + if (mstatus == RT_MEDIA_CONNECT) { + rtlpriv->cfg->ops->set_hw_reg(hw, + HW_VAR_AID, NULL); + tmp_regcr = rtl_read_byte(rtlpriv, REG_CR + 1); + rtl_write_byte(rtlpriv, REG_CR + 1, + tmp_regcr | ENSWBCN); + _rtl92du_set_bcn_ctrl_reg(hw, 0, EN_BCN_FUNCTION); + _rtl92du_set_bcn_ctrl_reg(hw, DIS_TSF_UDT, 0); + tmp_reg422 = rtl_read_byte(rtlpriv, + REG_FWHW_TXQ_CTRL + 2); + if (tmp_reg422 & (EN_BCNQ_DL >> 16)) + recover = true; + rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, + tmp_reg422 & ~(EN_BCNQ_DL >> 16)); + + /* We don't implement FW LPS so this is not needed. */ + /* rtl92d_set_fw_rsvdpagepkt(hw, 0); */ + + _rtl92du_set_bcn_ctrl_reg(hw, EN_BCN_FUNCTION, 0); + _rtl92du_set_bcn_ctrl_reg(hw, 0, DIS_TSF_UDT); + if (recover) + rtl_write_byte(rtlpriv, + REG_FWHW_TXQ_CTRL + 2, + tmp_reg422); + rtl_write_byte(rtlpriv, REG_CR + 1, + tmp_regcr & ~ENSWBCN); + } + rtl92d_set_fw_joinbss_report_cmd(hw, (*val)); + break; + } + case HW_VAR_CORRECT_TSF: { + u8 btype_ibss = val[0]; + + if (btype_ibss) + rtl92d_stop_tx_beacon(hw); + _rtl92du_set_bcn_ctrl_reg(hw, 0, EN_BCN_FUNCTION); + rtl_write_dword(rtlpriv, REG_TSFTR, + (u32)(mac->tsf & 0xffffffff)); + rtl_write_dword(rtlpriv, REG_TSFTR + 4, + (u32)((mac->tsf >> 32) & 0xffffffff)); + _rtl92du_set_bcn_ctrl_reg(hw, EN_BCN_FUNCTION, 0); + if (btype_ibss) + rtl92d_resume_tx_beacon(hw); + + break; + } + case HW_VAR_KEEP_ALIVE: + /* Avoid "switch case not processed" error. RTL8192DU doesn't + * need to do anything here, maybe. + */ + break; + default: + rtl92d_set_hw_reg(hw, variable, val); + break; + } +} + +static void _rtl92du_init_queue_reserved_page(struct ieee80211_hw *hw, + u8 out_ep_num, + u8 queue_sel) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + u32 txqpagenum, txqpageunit; + u32 txqremainingpage; + u32 numhq = 0; + u32 numlq = 0; + u32 numnq = 0; + u32 numpubq; + u32 value32; + + if (rtlhal->macphymode != SINGLEMAC_SINGLEPHY) { + numpubq = NORMAL_PAGE_NUM_PUBQ_92D_DUAL_MAC; + txqpagenum = TX_TOTAL_PAGE_NUMBER_92D_DUAL_MAC - numpubq; + } else { + numpubq = TEST_PAGE_NUM_PUBQ_92DU; + txqpagenum = TX_TOTAL_PAGE_NUMBER_92DU - numpubq; + } + + if (rtlhal->macphymode != SINGLEMAC_SINGLEPHY && out_ep_num == 3) { + numhq = NORMAL_PAGE_NUM_HPQ_92D_DUAL_MAC; + numlq = NORMAL_PAGE_NUM_LPQ_92D_DUAL_MAC; + numnq = NORMAL_PAGE_NUM_NORMALQ_92D_DUAL_MAC; + } else { + txqpageunit = txqpagenum / out_ep_num; + txqremainingpage = txqpagenum % out_ep_num; + + if (queue_sel & TX_SELE_HQ) + numhq = txqpageunit; + if (queue_sel & TX_SELE_LQ) + numlq = txqpageunit; + if (queue_sel & TX_SELE_NQ) + numnq = txqpageunit; + + /* HIGH priority queue always present in the + * configuration of 2 or 3 out-ep. Remainder pages + * assigned to High queue + */ + if (out_ep_num > 1 && txqremainingpage) + numhq += txqremainingpage; + } + + /* NOTE: This step done before writing REG_RQPN. */ + rtl_write_byte(rtlpriv, REG_RQPN_NPQ, (u8)numnq); + + /* TX DMA */ + u32p_replace_bits(&value32, numhq, HPQ_MASK); + u32p_replace_bits(&value32, numlq, LPQ_MASK); + u32p_replace_bits(&value32, numpubq, PUBQ_MASK); + value32 |= LD_RQPN; + rtl_write_dword(rtlpriv, REG_RQPN, value32); +} + +static void _rtl92du_init_tx_buffer_boundary(struct ieee80211_hw *hw, + u8 txpktbuf_bndy) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtl_write_byte(rtlpriv, REG_TXPKTBUF_BCNQ_BDNY, txpktbuf_bndy); + rtl_write_byte(rtlpriv, REG_TXPKTBUF_MGQ_BDNY, txpktbuf_bndy); + + rtl_write_byte(rtlpriv, REG_TXPKTBUF_WMAC_LBK_BF_HD, txpktbuf_bndy); + + /* TXRKTBUG_PG_BNDY */ + rtl_write_byte(rtlpriv, REG_TRXFF_BNDY, txpktbuf_bndy); + + /* Beacon Head for TXDMA */ + rtl_write_byte(rtlpriv, REG_TDECTRL + 1, txpktbuf_bndy); +} + +static bool _rtl92du_llt_table_init(struct ieee80211_hw *hw, u8 txpktbuf_bndy) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + unsigned short i; + bool status; + u8 maxpage; + + if (rtlpriv->rtlhal.macphymode == SINGLEMAC_SINGLEPHY) + maxpage = 255; + else + maxpage = 127; + + for (i = 0; i < (txpktbuf_bndy - 1); i++) { + status = rtl92d_llt_write(hw, i, i + 1); + if (!status) + return status; + } + + /* end of list */ + status = rtl92d_llt_write(hw, txpktbuf_bndy - 1, 0xFF); + if (!status) + return status; + + /* Make the other pages as ring buffer + * This ring buffer is used as beacon buffer if we + * config this MAC as two MAC transfer. + * Otherwise used as local loopback buffer. + */ + for (i = txpktbuf_bndy; i < maxpage; i++) { + status = rtl92d_llt_write(hw, i, i + 1); + if (!status) + return status; + } + + /* Let last entry point to the start entry of ring buffer */ + status = rtl92d_llt_write(hw, maxpage, txpktbuf_bndy); + if (!status) + return status; + + return true; +} + +static void _rtl92du_init_chipn_reg_priority(struct ieee80211_hw *hw, u16 beq, + u16 bkq, u16 viq, u16 voq, + u16 mgtq, u16 hiq) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u16 value16; + + value16 = rtl_read_word(rtlpriv, REG_TRXDMA_CTRL) & 0x7; + u16p_replace_bits(&value16, beq, TXDMA_BEQ_MAP); + u16p_replace_bits(&value16, bkq, TXDMA_BKQ_MAP); + u16p_replace_bits(&value16, viq, TXDMA_VIQ_MAP); + u16p_replace_bits(&value16, voq, TXDMA_VOQ_MAP); + u16p_replace_bits(&value16, mgtq, TXDMA_MGQ_MAP); + u16p_replace_bits(&value16, hiq, TXDMA_HIQ_MAP); + rtl_write_word(rtlpriv, REG_TRXDMA_CTRL, value16); +} + +static void _rtl92du_init_chipn_one_out_ep_priority(struct ieee80211_hw *hw, + u8 queue_sel) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u16 value; + + switch (queue_sel) { + case TX_SELE_HQ: + value = QUEUE_HIGH; + break; + case TX_SELE_LQ: + value = QUEUE_LOW; + break; + case TX_SELE_NQ: + value = QUEUE_NORMAL; + break; + default: + WARN_ON(1); /* Shall not reach here! */ + return; + } + _rtl92du_init_chipn_reg_priority(hw, value, value, value, value, + value, value); + rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, + "Tx queue select: 0x%02x\n", queue_sel); +} + +static void _rtl92du_init_chipn_two_out_ep_priority(struct ieee80211_hw *hw, + u8 queue_sel) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u16 beq, bkq, viq, voq, mgtq, hiq; + u16 valuehi, valuelow; + + switch (queue_sel) { + default: + WARN_ON(1); + fallthrough; + case (TX_SELE_HQ | TX_SELE_LQ): + valuehi = QUEUE_HIGH; + valuelow = QUEUE_LOW; + break; + case (TX_SELE_NQ | TX_SELE_LQ): + valuehi = QUEUE_NORMAL; + valuelow = QUEUE_LOW; + break; + case (TX_SELE_HQ | TX_SELE_NQ): + valuehi = QUEUE_HIGH; + valuelow = QUEUE_NORMAL; + break; + } + + beq = valuelow; + bkq = valuelow; + viq = valuehi; + voq = valuehi; + mgtq = valuehi; + hiq = valuehi; + + _rtl92du_init_chipn_reg_priority(hw, beq, bkq, viq, voq, mgtq, hiq); + rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, + "Tx queue select: 0x%02x\n", queue_sel); +} + +static void _rtl92du_init_chipn_three_out_ep_priority(struct ieee80211_hw *hw, + u8 queue_sel) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u16 beq, bkq, viq, voq, mgtq, hiq; + + beq = QUEUE_LOW; + bkq = QUEUE_LOW; + viq = QUEUE_NORMAL; + voq = QUEUE_HIGH; + mgtq = QUEUE_HIGH; + hiq = QUEUE_HIGH; + + _rtl92du_init_chipn_reg_priority(hw, beq, bkq, viq, voq, mgtq, hiq); + rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, + "Tx queue select: 0x%02x\n", queue_sel); +} + +static void _rtl92du_init_queue_priority(struct ieee80211_hw *hw, + u8 out_ep_num, + u8 queue_sel) +{ + switch (out_ep_num) { + case 1: + _rtl92du_init_chipn_one_out_ep_priority(hw, queue_sel); + break; + case 2: + _rtl92du_init_chipn_two_out_ep_priority(hw, queue_sel); + break; + case 3: + _rtl92du_init_chipn_three_out_ep_priority(hw, queue_sel); + break; + default: + WARN_ON(1); /* Shall not reach here! */ + break; + } +} + +static void _rtl92du_init_wmac_setting(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtlpriv); + + mac->rx_conf = RCR_APM | RCR_AM | RCR_AB | RCR_ADF | RCR_APP_ICV | + RCR_AMF | RCR_HTC_LOC_CTRL | RCR_APP_MIC | + RCR_APP_PHYST_RXFF | RCR_APPFCS; + + rtl_write_dword(rtlpriv, REG_RCR, mac->rx_conf); + + /* Set Multicast Address. */ + rtl_write_dword(rtlpriv, REG_MAR, 0xffffffff); + rtl_write_dword(rtlpriv, REG_MAR + 4, 0xffffffff); +} + +static void _rtl92du_init_adaptive_ctrl(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 val32; + + val32 = rtl_read_dword(rtlpriv, REG_RRSR); + val32 &= ~0xfffff; + if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G) + val32 |= 0xffff0; /* No CCK */ + else + val32 |= 0xffff1; + rtl_write_dword(rtlpriv, REG_RRSR, val32); + + /* Set Spec SIFS (used in NAV) */ + rtl_write_word(rtlpriv, REG_SPEC_SIFS, 0x1010); + + /* Retry limit 0x30 */ + rtl_write_word(rtlpriv, REG_RL, 0x3030); +} + +static void _rtl92du_init_edca(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u16 val16; + + /* Disable EDCCA count down, to reduce collison and retry */ + val16 = rtl_read_word(rtlpriv, REG_RD_CTRL); + val16 |= DIS_EDCA_CNT_DWN; + rtl_write_word(rtlpriv, REG_RD_CTRL, val16); + + /* CCK SIFS shall always be 10us. */ + rtl_write_word(rtlpriv, REG_SIFS_CTX, 0x0a0a); + /* Set SIFS for OFDM */ + rtl_write_word(rtlpriv, REG_SIFS_TRX, 0x1010); + + rtl_write_word(rtlpriv, REG_PROT_MODE_CTRL, 0x0204); + + rtl_write_dword(rtlpriv, REG_BAR_MODE_CTRL, 0x014004); + + /* TXOP */ + rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, 0x005EA42B); + rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0x0000A44F); + rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, 0x005EA324); + rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x002FA226); + + rtl_write_byte(rtlpriv, REG_PIFS, 0x1C); + + rtl_write_byte(rtlpriv, REG_AGGR_BREAK_TIME, 0x16); + + rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0040); + + rtl_write_byte(rtlpriv, REG_BCNDMATIM, 0x2); + rtl_write_byte(rtlpriv, REG_ATIMWND, 0x2); +} + +static void _rtl92du_init_retry_function(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 val8; + + val8 = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL); + val8 |= EN_AMPDU_RTY_NEW; + rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL, val8); + + rtl_write_byte(rtlpriv, REG_ACKTO, 0x40); +} + +static void _rtl92du_init_operation_mode(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + + rtl_write_byte(rtlpriv, REG_BWOPMODE, BW_OPMODE_20MHZ); + + switch (rtlpriv->phy.rf_type) { + case RF_1T2R: + case RF_1T1R: + rtlhal->minspace_cfg = (MAX_MSS_DENSITY_1T << 3); + break; + case RF_2T2R: + case RF_2T2R_GREEN: + rtlhal->minspace_cfg = (MAX_MSS_DENSITY_2T << 3); + break; + } + rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, rtlhal->minspace_cfg); +} + +static void _rtl92du_init_beacon_parameters(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + rtl_write_word(rtlpriv, REG_BCN_CTRL, 0x1010); + + rtl_write_word(rtlpriv, REG_TBTT_PROHIBIT, 0x3c02); + rtl_write_byte(rtlpriv, REG_DRVERLYINT, 0x05); + rtl_write_byte(rtlpriv, REG_BCNDMATIM, 0x03); + + rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660f); +} + +static void _rtl92du_init_ampdu_aggregation(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + + /* Aggregation threshold */ + if (rtlhal->macphymode == DUALMAC_DUALPHY) + rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0x66525541); + else if (rtlhal->macphymode == DUALMAC_SINGLEPHY) + rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0x44444441); + else + rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0x88728841); + + rtl_write_byte(rtlpriv, REG_AGGR_BREAK_TIME, 0x16); +} + +static bool _rtl92du_init_power_on(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + unsigned short wordtmp; + unsigned char bytetmp; + u16 retry = 0; + + do { + if (rtl_read_byte(rtlpriv, REG_APS_FSMCO) & PFM_ALDN) + break; + + if (retry++ > 1000) + return false; + } while (true); + + /* Unlock ISO/CLK/Power control register */ + rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x00); + + /* SPS0_CTRL 0x11[7:0] = 0x2b enable SPS into PWM mode */ + rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b); + + msleep(1); + + bytetmp = rtl_read_byte(rtlpriv, REG_LDOV12D_CTRL); + if ((bytetmp & LDV12_EN) == 0) { + bytetmp |= LDV12_EN; + rtl_write_byte(rtlpriv, REG_LDOV12D_CTRL, bytetmp); + + msleep(1); + + bytetmp = rtl_read_byte(rtlpriv, REG_SYS_ISO_CTRL); + bytetmp &= ~ISO_MD2PP; + rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL, bytetmp); + } + + /* Auto enable WLAN */ + wordtmp = rtl_read_word(rtlpriv, REG_APS_FSMCO); + wordtmp |= APFM_ONMAC; + rtl_write_word(rtlpriv, REG_APS_FSMCO, wordtmp); + + wordtmp = rtl_read_word(rtlpriv, REG_APS_FSMCO); + retry = 0; + while ((wordtmp & APFM_ONMAC) && retry < 1000) { + retry++; + wordtmp = rtl_read_word(rtlpriv, REG_APS_FSMCO); + } + + /* Release RF digital isolation */ + wordtmp = rtl_read_word(rtlpriv, REG_SYS_ISO_CTRL); + wordtmp &= ~ISO_DIOR; + rtl_write_word(rtlpriv, REG_SYS_ISO_CTRL, wordtmp); + + /* Enable MAC DMA/WMAC/SCHEDULE/SEC block */ + wordtmp = rtl_read_word(rtlpriv, REG_CR); + wordtmp |= HCI_TXDMA_EN | HCI_RXDMA_EN | TXDMA_EN | RXDMA_EN | + PROTOCOL_EN | SCHEDULE_EN | MACTXEN | MACRXEN | ENSEC; + rtl_write_word(rtlpriv, REG_CR, wordtmp); + + return true; +} + +static bool _rtl92du_init_mac(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 val8; + + rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x00); + + val8 = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1); + val8 &= ~(FEN_MREGEN >> 8); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, val8); + + /* For s3/s4 may reset mac, Reg0xf8 may be set to 0, + * so reset macphy control reg here. + */ + rtl92d_phy_config_macphymode(hw); + + rtl92du_phy_set_poweron(hw); + + if (!_rtl92du_init_power_on(hw)) { + pr_err("Failed to init power on!\n"); + return false; + } + + rtl92d_phy_config_maccoexist_rfpage(hw); + + return true; +} + +int rtl92du_hw_init(struct ieee80211_hw *hw) +{ + struct rtl_usb_priv *usb_priv = rtl_usbpriv(hw); + struct rtl_usb *rtlusb = rtl_usbdev(usb_priv); + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + struct rtl_mac *mac = rtl_mac(rtlpriv); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u8 val8, txpktbuf_bndy; + int err, i; + u32 val32; + u16 val16; + + mutex_lock(rtlpriv->mutex_for_hw_init); + + /* we should do iqk after disable/enable */ + rtl92d_phy_reset_iqk_result(hw); + + if (!_rtl92du_init_mac(hw)) { + pr_err("Init MAC failed\n"); + mutex_unlock(rtlpriv->mutex_for_hw_init); + return 1; + } + + if (rtlhal->macphymode == SINGLEMAC_SINGLEPHY) + txpktbuf_bndy = 249; + else + txpktbuf_bndy = 123; + + if (!_rtl92du_llt_table_init(hw, txpktbuf_bndy)) { + pr_err("Init LLT failed\n"); + mutex_unlock(rtlpriv->mutex_for_hw_init); + return 1; + } + + err = rtl92du_download_fw(hw); + + /* return fail only when part number check fail */ + if (err && rtl_read_byte(rtlpriv, 0x1c5) == 0xe0) { + rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING, + "Failed to download FW. Init HW without FW..\n"); + mutex_unlock(rtlpriv->mutex_for_hw_init); + return 1; + } + rtlhal->last_hmeboxnum = 0; + rtlpriv->psc.fw_current_inpsmode = false; + + rtl92du_phy_mac_config(hw); + + /* Set reserved page for each queue */ + _rtl92du_init_queue_reserved_page(hw, rtlusb->out_ep_nums, + rtlusb->out_queue_sel); + + _rtl92du_init_tx_buffer_boundary(hw, txpktbuf_bndy); + + _rtl92du_init_queue_priority(hw, rtlusb->out_ep_nums, + rtlusb->out_queue_sel); + + /* Set Tx/Rx page size (Tx must be 128 Bytes, + * Rx can be 64, 128, 256, 512, 1024 bytes) + */ + rtl_write_byte(rtlpriv, REG_PBP, 0x11); + + /* Get Rx PHY status in order to report RSSI and others. */ + rtl_write_byte(rtlpriv, REG_RX_DRVINFO_SZ, 0x4); + + rtl_write_dword(rtlpriv, REG_HISR, 0xffffffff); + rtl_write_dword(rtlpriv, REG_HIMR, 0xffffffff); + + val8 = rtl_read_byte(rtlpriv, MSR); + val8 &= ~MSR_MASK; + val8 |= MSR_INFRA; + rtl_write_byte(rtlpriv, MSR, val8); + + _rtl92du_init_wmac_setting(hw); + _rtl92du_init_adaptive_ctrl(hw); + _rtl92du_init_edca(hw); + + rtl_write_dword(rtlpriv, REG_DARFRC, 0x00000000); + rtl_write_dword(rtlpriv, REG_DARFRC + 4, 0x10080404); + rtl_write_dword(rtlpriv, REG_RARFRC, 0x04030201); + rtl_write_dword(rtlpriv, REG_RARFRC + 4, 0x08070605); + + _rtl92du_init_retry_function(hw); + /* _InitUsbAggregationSetting(padapter); no aggregation for now */ + _rtl92du_init_operation_mode(hw); + _rtl92du_init_beacon_parameters(hw); + _rtl92du_init_ampdu_aggregation(hw); + + rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0xff); + + /* unit: 256us. 256ms */ + rtl_write_word(rtlpriv, REG_PKT_VO_VI_LIFE_TIME, 0x0400); + rtl_write_word(rtlpriv, REG_PKT_BE_BK_LIFE_TIME, 0x0400); + + /* Hardware-controlled blinking. */ + rtl_write_word(rtlpriv, REG_LEDCFG0, 0x8282); + rtl_write_byte(rtlpriv, REG_LEDCFG2, 0x82); + + val32 = rtl_read_dword(rtlpriv, REG_TXDMA_OFFSET_CHK); + val32 |= DROP_DATA_EN; + rtl_write_dword(rtlpriv, REG_TXDMA_OFFSET_CHK, val32); + + if (mac->rdg_en) { + rtl_write_byte(rtlpriv, REG_RD_CTRL, 0xff); + rtl_write_word(rtlpriv, REG_RD_NAV_NXT, 0x200); + rtl_write_byte(rtlpriv, REG_RD_RESP_PKT_TH, 0x05); + } + + for (i = 0; i < 4; i++) + rtl_write_dword(rtlpriv, REG_ARFR0 + i * 4, 0x1f8ffff0); + + if (rtlhal->macphymode == SINGLEMAC_SINGLEPHY) { + if (rtlusb->out_ep_nums == 2) + rtl_write_dword(rtlpriv, REG_FAST_EDCA_CTRL, 0x03066666); + else + rtl_write_word(rtlpriv, REG_FAST_EDCA_CTRL, 0x8888); + } else { + rtl_write_word(rtlpriv, REG_FAST_EDCA_CTRL, 0x5555); + } + + val8 = rtl_read_byte(rtlpriv, 0x605); + val8 |= 0xf0; + rtl_write_byte(rtlpriv, 0x605, val8); + + rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x30); + rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x30); + rtl_write_byte(rtlpriv, 0x606, 0x30); + + /* temp for high queue and mgnt Queue corrupt in time; it may + * cause hang when sw beacon use high_Q, other frame use mgnt_Q; + * or, sw beacon use mgnt_Q, other frame use high_Q; + */ + rtl_write_byte(rtlpriv, REG_DIS_TXREQ_CLR, 0x10); + val16 = rtl_read_word(rtlpriv, REG_RD_CTRL); + val16 |= BIT(12); + rtl_write_word(rtlpriv, REG_RD_CTRL, val16); + + rtl_write_byte(rtlpriv, REG_TXPAUSE, 0); + + /* usb suspend idle time count for bitfile0927 */ + val8 = rtl_read_byte(rtlpriv, 0xfe56); + val8 |= BIT(0) | BIT(1); + rtl_write_byte(rtlpriv, 0xfe56, val8); + + if (rtlhal->earlymode_enable) { + rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, + "EarlyMode Enabled!!!\n"); + + val8 = rtl_read_byte(rtlpriv, REG_EARLY_MODE_CONTROL); + val8 |= 0x1f; + rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, val8); + + rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL + 3, 0x80); + + val8 = rtl_read_byte(rtlpriv, 0x605); + val8 |= 0x40; + rtl_write_byte(rtlpriv, 0x605, val8); + } else { + rtl_write_byte(rtlpriv, REG_EARLY_MODE_CONTROL, 0); + } + + rtl92du_phy_bb_config(hw); + + rtlphy->rf_mode = RF_OP_BY_SW_3WIRE; + /* set before initialize RF */ + rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0x00f00000, 0xf); + + /* config RF */ + rtl92du_phy_rf_config(hw); + + /* set default value after initialize RF */ + rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0x00f00000, 0); + + /* After load BB, RF params, we need to do more for 92D. */ + rtl92du_update_bbrf_configuration(hw); + + rtlphy->rfreg_chnlval[0] = + rtl_get_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK); + rtlphy->rfreg_chnlval[1] = + rtl_get_rfreg(hw, RF90_PATH_B, RF_CHNLBW, RFREG_OFFSET_MASK); + + /*---- Set CCK and OFDM Block "ON"----*/ + if (rtlhal->current_bandtype == BAND_ON_2_4G) + rtl_set_bbreg(hw, RFPGA0_RFMOD, BCCKEN, 0x1); + rtl_set_bbreg(hw, RFPGA0_RFMOD, BOFDMEN, 0x1); + + /* reset hw sec */ + rtl_cam_reset_all_entry(hw); + rtl92d_enable_hw_security_config(hw); + + rtl_write_byte(rtlpriv, REG_HWSEQ_CTRL, 0xFF); + + /* schmitt trigger, improve tx evm for 92du */ + val8 = rtl_read_byte(rtlpriv, REG_AFE_XTAL_CTRL); + val8 |= BIT(1); + rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL, val8); + + /* Disable bar */ + rtl_write_dword(rtlpriv, REG_BAR_MODE_CTRL, 0xffff); + + /* Nav limit */ + rtl_write_byte(rtlpriv, REG_NAV_CTRL + 2, 0); + rtl_write_byte(rtlpriv, ROFDM0_XATXAFE + 3, 0x50); + + /* Read EEPROM TX power index and PHY_REG_PG.txt to capture correct + * TX power index for different rate set. + */ + rtl92d_phy_get_hw_reg_originalvalue(hw); + + ppsc->rfpwr_state = ERFON; + + /* do IQK for 2.4G for better scan result */ + if (rtlhal->current_bandtype == BAND_ON_2_4G) + rtl92du_phy_iq_calibrate(hw); + + rtl92du_phy_lc_calibrate(hw, IS_92D_SINGLEPHY(rtlhal->version)); + + rtl92du_phy_init_pa_bias(hw); + + mutex_unlock(rtlpriv->mutex_for_hw_init); + + rtl92du_dm_init(hw); + + /* For 2 PORT TSF SYNC */ + rtl_write_word(rtlpriv, REG_BCN_CTRL, 0x1818); + rtlusb->reg_bcn_ctrl_val = 0x18; + + udelay(500); + + if (rtlhal->macphymode != DUALMAC_DUALPHY) { + rtl_write_dword(rtlpriv, RFPGA1_TXINFO, + rtl_read_dword(rtlpriv, RFPGA1_TXINFO) & ~BIT(30)); + + rtl_write_dword(rtlpriv, RFPGA0_TXGAINSTAGE, + rtl_read_dword(rtlpriv, RFPGA0_TXGAINSTAGE) & ~BIT(31)); + + rtl_write_dword(rtlpriv, ROFDM0_XBTXAFE, 0xa0e40000); + } + + val32 = rtl_read_dword(rtlpriv, REG_FWHW_TXQ_CTRL); + val32 |= BIT(12); + rtl_write_dword(rtlpriv, REG_FWHW_TXQ_CTRL, val32); + + return err; +} + +static int _rtl92du_set_media_status(struct ieee80211_hw *hw, + enum nl80211_iftype type) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + enum led_ctl_mode ledaction = LED_CTL_NO_LINK; + u8 bt_msr = rtl_read_byte(rtlpriv, MSR); + + bt_msr &= 0xfc; + + if (type == NL80211_IFTYPE_UNSPECIFIED || + type == NL80211_IFTYPE_STATION) { + rtl92d_stop_tx_beacon(hw); + _rtl92du_enable_bcn_sub_func(hw); + } else if (type == NL80211_IFTYPE_ADHOC || + type == NL80211_IFTYPE_AP) { + rtl92d_resume_tx_beacon(hw); + _rtl92du_disable_bcn_sub_func(hw); + } else { + rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING, + "Set HW_VAR_MEDIA_STATUS: No such media status(%x)\n", + type); + } + + switch (type) { + case NL80211_IFTYPE_UNSPECIFIED: + bt_msr |= MSR_NOLINK; + ledaction = LED_CTL_LINK; + rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, + "Set Network type to NO LINK!\n"); + break; + case NL80211_IFTYPE_ADHOC: + bt_msr |= MSR_ADHOC; + rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, + "Set Network type to Ad Hoc!\n"); + break; + case NL80211_IFTYPE_STATION: + bt_msr |= MSR_INFRA; + ledaction = LED_CTL_LINK; + rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, + "Set Network type to STA!\n"); + break; + case NL80211_IFTYPE_AP: + bt_msr |= MSR_AP; + rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, + "Set Network type to AP!\n"); + break; + default: + pr_err("Network type %d not supported!\n", type); + return 1; + } + rtl_write_byte(rtlpriv, MSR, bt_msr); + + rtlpriv->cfg->ops->led_control(hw, ledaction); + + if ((bt_msr & MSR_MASK) == MSR_AP) + rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00); + else + rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66); + + return 0; +} + +void rtl92du_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 reg_rcr; + + if (rtlpriv->psc.rfpwr_state != ERFON) + return; + + rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *)(®_rcr)); + + if (check_bssid) { + reg_rcr |= RCR_CBSSID_DATA | RCR_CBSSID_BCN; + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)®_rcr); + _rtl92du_set_bcn_ctrl_reg(hw, 0, DIS_TSF_UDT); + } else if (!check_bssid) { + reg_rcr &= ~(RCR_CBSSID_DATA | RCR_CBSSID_BCN); + _rtl92du_set_bcn_ctrl_reg(hw, DIS_TSF_UDT, 0); + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *)®_rcr); + } +} + +int rtl92du_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (_rtl92du_set_media_status(hw, type)) + return -EOPNOTSUPP; + + /* check bssid */ + if (rtlpriv->mac80211.link_state == MAC80211_LINKED) { + if (type != NL80211_IFTYPE_AP) + rtl92du_set_check_bssid(hw, true); + } else { + rtl92du_set_check_bssid(hw, false); + } + + return 0; +} + +/* do iqk or reload iqk */ +/* windows just rtl92d_phy_reload_iqk_setting in set channel, + * but it's very strict for time sequence so we add + * rtl92d_phy_reload_iqk_setting here + */ +void rtl92du_linked_set_reg(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u8 channel = rtlphy->current_channel; + u8 indexforchannel; + + indexforchannel = rtl92d_get_rightchnlplace_for_iqk(channel); + if (!rtlphy->iqk_matrix[indexforchannel].iqk_done) { + rtl_dbg(rtlpriv, COMP_SCAN | COMP_INIT, DBG_DMESG, + "Do IQK for channel:%d\n", channel); + rtl92du_phy_iq_calibrate(hw); + } +} + +void rtl92du_enable_interrupt(struct ieee80211_hw *hw) +{ + /* Nothing to do. */ +} + +void rtl92du_disable_interrupt(struct ieee80211_hw *hw) +{ + /* Nothing to do. */ +} + +static void _rtl92du_poweroff_adapter(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 retry = 100; + u8 u1b_tmp; + u16 val16; + u32 val32; + + rtl_write_byte(rtlpriv, REG_LDOA15_CTRL, 0x04); + + rtl_write_byte(rtlpriv, REG_RF_CTRL, 0x00); + + /* IF fw in RAM code, do reset */ + if (rtl_read_byte(rtlpriv, REG_MCUFWDL) & MCUFWDL_RDY) { + rtl_write_byte(rtlpriv, REG_FSIMR, 0); + + /* We need to disable other HRCV INT to influence 8051 reset. */ + rtl_write_byte(rtlpriv, REG_FWIMR, 0x20); + + /* Close mask to prevent incorrect FW write operation. */ + rtl_write_byte(rtlpriv, REG_FTIMR, 0); + + rtl_write_byte(rtlpriv, REG_MCUFWDL, 0); + + /* Set (REG_HMETFR + 3) to 0x20 is reset 8051 */ + rtl_write_byte(rtlpriv, REG_HMETFR + 3, 0x20); + val16 = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN); + while (val16 & FEN_CPUEN) { + retry--; + if (retry == 0) + break; + udelay(50); + val16 = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN); + } + + if (retry == 0) { + rtl_write_byte(rtlpriv, REG_FWIMR, 0); + + /* if 8051 reset fail, reset MAC directly. */ + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, 0x50); + + mdelay(10); + } + } + + /* reset MCU, MAC register, DCORE */ + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, 0x54); + + /* reset MCU ready status */ + rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00); + + /* Pull GPIO PIN to balance level and LED control */ + + /* Disable GPIO[7:0] */ + rtl_write_word(rtlpriv, REG_GPIO_PIN_CTRL + 2, 0x0000); + val32 = rtl_read_dword(rtlpriv, REG_GPIO_PIN_CTRL); + u32p_replace_bits(&val32, val32 & 0xff, 0x0000ff00); + u32p_replace_bits(&val32, 0xff, 0x00ff0000); + rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, val32); + + /* Disable GPIO[10:8] */ + rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG, 0); + val16 = rtl_read_word(rtlpriv, REG_GPIO_IO_SEL); + u16p_replace_bits(&val16, val16 & 0xf, 0x00f0); + u16p_replace_bits(&val16, 0xf, 0x0780); + rtl_write_word(rtlpriv, REG_GPIO_IO_SEL, val16); + + /* Disable LED 0, 1, and 2 */ + rtl_write_word(rtlpriv, REG_LEDCFG0, 0x8888); + rtl_write_byte(rtlpriv, REG_LEDCFG2, 0x88); + + /* Disable analog sequence */ + + /* enter PFM mode */ + rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x23); + + rtl_write_word(rtlpriv, REG_APS_FSMCO, + APDM_HOST | AFSM_HSUS | PFM_ALDN); + + /* lock ISO/CLK/Power control register */ + rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0e); + + rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, + "In PowerOff,reg0x%x=%X\n", + REG_SPS0_CTRL, rtl_read_byte(rtlpriv, REG_SPS0_CTRL)); + + /* 0x17[7] 1b': power off in process 0b' : power off over */ + if (rtlpriv->rtlhal.macphymode != SINGLEMAC_SINGLEPHY) { + mutex_lock(rtlpriv->mutex_for_power_on_off); + u1b_tmp = rtl_read_byte(rtlpriv, REG_POWER_OFF_IN_PROCESS); + u1b_tmp &= ~BIT(7); + rtl_write_byte(rtlpriv, REG_POWER_OFF_IN_PROCESS, u1b_tmp); + mutex_unlock(rtlpriv->mutex_for_power_on_off); + } + + rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "<=======\n"); +} + +void rtl92du_card_disable(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw)); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + enum nl80211_iftype opmode; + u32 val32; + u16 val16; + u8 val8; + + mac->link_state = MAC80211_NOLINK; + opmode = NL80211_IFTYPE_UNSPECIFIED; + _rtl92du_set_media_status(hw, opmode); + + RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); + /* Power sequence for each MAC. */ + /* a. stop tx DMA */ + /* b. close RF */ + /* c. clear rx buf */ + /* d. stop rx DMA */ + /* e. reset MAC */ + + val16 = rtl_read_word(rtlpriv, REG_GPIO_MUXCFG); + val16 &= ~BIT(12); + rtl_write_word(rtlpriv, REG_GPIO_MUXCFG, val16); + + rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xff); + udelay(500); + rtl_write_byte(rtlpriv, REG_CR, 0); + + /* RF OFF sequence */ + rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0x00f00000, 0xf); + rtl_set_rfreg(hw, RF90_PATH_A, RF_AC, RFREG_OFFSET_MASK, 0x00); + + rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40); + + val8 = FEN_USBD | FEN_USBA | FEN_BB_GLB_RSTN; + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, val8); + + /* Mac0 can not do Global reset. Mac1 can do. */ + if (rtlhal->macphymode == SINGLEMAC_SINGLEPHY || + rtlhal->interfaceindex == 1) { + /* before BB reset should do clock gated */ + val32 = rtl_read_dword(rtlpriv, RFPGA0_XCD_RFPARAMETER); + val32 |= BIT(31); + rtl_write_dword(rtlpriv, RFPGA0_XCD_RFPARAMETER, val32); + + val8 &= ~FEN_BB_GLB_RSTN; + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, val8); + } + + rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "==> Do power off.......\n"); + if (!rtl92du_phy_check_poweroff(hw)) + return; + + _rtl92du_poweroff_adapter(hw); +} + +void rtl92du_set_beacon_related_registers(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtlpriv); + u16 bcn_interval, atim_window; + + bcn_interval = mac->beacon_interval; + atim_window = 2; + rtl92du_disable_interrupt(hw); + rtl_write_word(rtlpriv, REG_ATIMWND, atim_window); + rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval); + rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660f); + rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x20); + if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G) + rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x30); + else + rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x20); + rtl_write_byte(rtlpriv, 0x606, 0x30); +} + +void rtl92du_set_beacon_interval(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); + u16 bcn_interval = mac->beacon_interval; + + rtl_dbg(rtlpriv, COMP_BEACON, DBG_DMESG, + "beacon_interval:%d\n", bcn_interval); + rtl92du_disable_interrupt(hw); + rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval); + rtl92du_enable_interrupt(hw); +} + +void rtl92du_update_interrupt_mask(struct ieee80211_hw *hw, + u32 add_msr, u32 rm_msr) +{ + /* Nothing to do here. */ +} + +void rtl92du_read_chip_version(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + /* Chip version reading is done in rtl92d_read_eeprom_info. */ + + rtlpriv->rtlhal.hw_type = HARDWARE_TYPE_RTL8192DU; +} diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.h new file mode 100644 index 000000000000..80ed00c90c16 --- /dev/null +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/hw.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2024 Realtek Corporation.*/ + +#ifndef __RTL92DU_HW_H__ +#define __RTL92DU_HW_H__ + +void rtl92du_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); +void rtl92du_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val); +void rtl92du_read_chip_version(struct ieee80211_hw *hw); +int rtl92du_hw_init(struct ieee80211_hw *hw); +void rtl92du_card_disable(struct ieee80211_hw *hw); +void rtl92du_enable_interrupt(struct ieee80211_hw *hw); +void rtl92du_disable_interrupt(struct ieee80211_hw *hw); +int rtl92du_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type); +void rtl92du_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid); +void rtl92du_set_beacon_related_registers(struct ieee80211_hw *hw); +void rtl92du_set_beacon_interval(struct ieee80211_hw *hw); +void rtl92du_update_interrupt_mask(struct ieee80211_hw *hw, + u32 add_msr, u32 rm_msr); +void rtl92du_linked_set_reg(struct ieee80211_hw *hw); + +#endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/led.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/led.c new file mode 100644 index 000000000000..6c12dfbd6367 --- /dev/null +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/led.c @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2024 Realtek Corporation.*/ + +#include "../wifi.h" +#include "led.h" + +void rtl92du_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction) +{ + /* The hardware has control. */ +} diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/led.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/led.h new file mode 100644 index 000000000000..d7ebc8afcc7b --- /dev/null +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/led.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2024 Realtek Corporation.*/ + +#ifndef __RTL92DU_LED_H__ +#define __RTL92DU_LED_H__ + +void rtl92du_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction); + +#endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.c new file mode 100644 index 000000000000..289ec71ce3e5 --- /dev/null +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.c @@ -0,0 +1,3123 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2024 Realtek Corporation.*/ + +#include "../wifi.h" +#include "../ps.h" +#include "../core.h" +#include "../efuse.h" +#include "../usb.h" +#include "../rtl8192d/reg.h" +#include "../rtl8192d/def.h" +#include "../rtl8192d/phy_common.h" +#include "../rtl8192d/rf_common.h" +#include "phy.h" +#include "rf.h" +#include "table.h" + +#define MAX_RF_IMR_INDEX 12 +#define MAX_RF_IMR_INDEX_NORMAL 13 +#define RF_REG_NUM_FOR_C_CUT_5G 6 +#define RF_REG_NUM_FOR_C_CUT_5G_INTERNALPA 7 +#define RF_REG_NUM_FOR_C_CUT_2G 5 +#define RF_CHNL_NUM_5G 19 +#define RF_CHNL_NUM_5G_40M 17 +#define CV_CURVE_CNT 64 + +static const u32 rf_reg_for_5g_swchnl_normal[MAX_RF_IMR_INDEX_NORMAL] = { + 0, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x0 +}; + +static const u8 rf_reg_for_c_cut_5g[RF_REG_NUM_FOR_C_CUT_5G] = { + RF_SYN_G1, RF_SYN_G2, RF_SYN_G3, RF_SYN_G4, RF_SYN_G5, RF_SYN_G6 +}; + +static const u8 rf_reg_for_c_cut_2g[RF_REG_NUM_FOR_C_CUT_2G] = { + RF_SYN_G1, RF_SYN_G2, RF_SYN_G3, RF_SYN_G7, RF_SYN_G8 +}; + +static const u8 rf_for_c_cut_5g_internal_pa[RF_REG_NUM_FOR_C_CUT_5G_INTERNALPA] = { + 0x0B, 0x48, 0x49, 0x4B, 0x03, 0x04, 0x0E +}; + +static const u32 rf_reg_mask_for_c_cut_2g[RF_REG_NUM_FOR_C_CUT_2G] = { + BIT(19) | BIT(18) | BIT(17) | BIT(14) | BIT(1), + BIT(10) | BIT(9), + BIT(18) | BIT(17) | BIT(16) | BIT(1), + BIT(2) | BIT(1), + BIT(15) | BIT(14) | BIT(13) | BIT(12) | BIT(11) +}; + +static const u8 rf_chnl_5g[RF_CHNL_NUM_5G] = { + 36, 40, 44, 48, 52, 56, 60, 64, 100, 104, 108, + 112, 116, 120, 124, 128, 132, 136, 140 +}; + +static const u8 rf_chnl_5g_40m[RF_CHNL_NUM_5G_40M] = { + 38, 42, 46, 50, 54, 58, 62, 102, 106, 110, 114, + 118, 122, 126, 130, 134, 138 +}; + +static const u32 rf_reg_pram_c_5g[5][RF_REG_NUM_FOR_C_CUT_5G] = { + {0xE43BE, 0xFC638, 0x77C0A, 0xDE471, 0xd7110, 0x8EB04}, + {0xE43BE, 0xFC078, 0xF7C1A, 0xE0C71, 0xD7550, 0xAEB04}, + {0xE43BF, 0xFF038, 0xF7C0A, 0xDE471, 0xE5550, 0xAEB04}, + {0xE43BF, 0xFF079, 0xF7C1A, 0xDE471, 0xE5550, 0xAEB04}, + {0xE43BF, 0xFF038, 0xF7C1A, 0xDE471, 0xd7550, 0xAEB04} +}; + +static const u32 rf_reg_param_for_c_cut_2g[3][RF_REG_NUM_FOR_C_CUT_2G] = { + {0x643BC, 0xFC038, 0x77C1A, 0x41289, 0x01840}, + {0x643BC, 0xFC038, 0x07C1A, 0x41289, 0x01840}, + {0x243BC, 0xFC438, 0x07C1A, 0x4128B, 0x0FC41} +}; + +static const u32 rf_syn_g4_for_c_cut_2g = 0xD1C31 & 0x7FF; + +static const u32 rf_pram_c_5g_int_pa[3][RF_REG_NUM_FOR_C_CUT_5G_INTERNALPA] = { + {0x01a00, 0x40443, 0x00eb5, 0x89bec, 0x94a12, 0x94a12, 0x94a12}, + {0x01800, 0xc0443, 0x00730, 0x896ee, 0x94a52, 0x94a52, 0x94a52}, + {0x01800, 0xc0443, 0x00730, 0x896ee, 0x94a12, 0x94a12, 0x94a12} +}; + +/* [patha+b][reg] */ +static const u32 rf_imr_param_normal[3][MAX_RF_IMR_INDEX_NORMAL] = { + /* channels 1-14. */ + { + 0x70000, 0x00ff0, 0x4400f, 0x00ff0, 0x0, 0x0, 0x0, + 0x0, 0x0, 0x64888, 0xe266c, 0x00090, 0x22fff + }, + /* channels 36-64 */ + { + 0x70000, 0x22880, 0x4470f, 0x55880, 0x00070, 0x88000, + 0x0, 0x88080, 0x70000, 0x64a82, 0xe466c, 0x00090, + 0x32c9a + }, + /* channels 100-165 */ + { + 0x70000, 0x44880, 0x4477f, 0x77880, 0x00070, 0x88000, + 0x0, 0x880b0, 0x0, 0x64b82, 0xe466c, 0x00090, 0x32c9a + } +}; + +static const u32 targetchnl_5g[TARGET_CHNL_NUM_5G] = { + 25141, 25116, 25091, 25066, 25041, + 25016, 24991, 24966, 24941, 24917, + 24892, 24867, 24843, 24818, 24794, + 24770, 24765, 24721, 24697, 24672, + 24648, 24624, 24600, 24576, 24552, + 24528, 24504, 24480, 24457, 24433, + 24409, 24385, 24362, 24338, 24315, + 24291, 24268, 24245, 24221, 24198, + 24175, 24151, 24128, 24105, 24082, + 24059, 24036, 24013, 23990, 23967, + 23945, 23922, 23899, 23876, 23854, + 23831, 23809, 23786, 23764, 23741, + 23719, 23697, 23674, 23652, 23630, + 23608, 23586, 23564, 23541, 23519, + 23498, 23476, 23454, 23432, 23410, + 23388, 23367, 23345, 23323, 23302, + 23280, 23259, 23237, 23216, 23194, + 23173, 23152, 23130, 23109, 23088, + 23067, 23046, 23025, 23003, 22982, + 22962, 22941, 22920, 22899, 22878, + 22857, 22837, 22816, 22795, 22775, + 22754, 22733, 22713, 22692, 22672, + 22652, 22631, 22611, 22591, 22570, + 22550, 22530, 22510, 22490, 22469, + 22449, 22429, 22409, 22390, 22370, + 22350, 22336, 22310, 22290, 22271, + 22251, 22231, 22212, 22192, 22173, + 22153, 22134, 22114, 22095, 22075, + 22056, 22037, 22017, 21998, 21979, + 21960, 21941, 21921, 21902, 21883, + 21864, 21845, 21826, 21807, 21789, + 21770, 21751, 21732, 21713, 21695, + 21676, 21657, 21639, 21620, 21602, + 21583, 21565, 21546, 21528, 21509, + 21491, 21473, 21454, 21436, 21418, + 21400, 21381, 21363, 21345, 21327, + 21309, 21291, 21273, 21255, 21237, + 21219, 21201, 21183, 21166, 21148, + 21130, 21112, 21095, 21077, 21059, + 21042, 21024, 21007, 20989, 20972, + 25679, 25653, 25627, 25601, 25575, + 25549, 25523, 25497, 25471, 25446, + 25420, 25394, 25369, 25343, 25318, + 25292, 25267, 25242, 25216, 25191, + 25166 +}; + +/* channel 1~14 */ +static const u32 targetchnl_2g[TARGET_CHNL_NUM_2G] = { + 26084, 26030, 25976, 25923, 25869, 25816, 25764, + 25711, 25658, 25606, 25554, 25502, 25451, 25328 +}; + +u32 rtl92du_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + u32 returnvalue, originalvalue, bitshift; + + rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "regaddr(%#x), bitmask(%#x)\n", + regaddr, bitmask); + + if (rtlhal->during_mac1init_radioa) + regaddr |= MAC1_ACCESS_PHY0; + else if (rtlhal->during_mac0init_radiob) + regaddr |= MAC0_ACCESS_PHY1; + + originalvalue = rtl_read_dword(rtlpriv, regaddr); + bitshift = calculate_bit_shift(bitmask); + returnvalue = (originalvalue & bitmask) >> bitshift; + rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, + "BBR MASK=0x%x Addr[0x%x]=0x%x\n", + bitmask, regaddr, originalvalue); + return returnvalue; +} + +void rtl92du_phy_set_bb_reg(struct ieee80211_hw *hw, + u32 regaddr, u32 bitmask, u32 data) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + u32 originalvalue, bitshift; + + rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, + "regaddr(%#x), bitmask(%#x), data(%#x)\n", + regaddr, bitmask, data); + + if (rtlhal->during_mac1init_radioa) + regaddr |= MAC1_ACCESS_PHY0; + else if (rtlhal->during_mac0init_radiob) + regaddr |= MAC0_ACCESS_PHY1; + + if (bitmask != MASKDWORD) { + originalvalue = rtl_read_dword(rtlpriv, regaddr); + bitshift = calculate_bit_shift(bitmask); + data = (originalvalue & (~bitmask)) | + ((data << bitshift) & bitmask); + } + + rtl_write_dword(rtlpriv, regaddr, data); + rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, + "regaddr(%#x), bitmask(%#x), data(%#x)\n", + regaddr, bitmask, data); +} + +/* To avoid miswrite Reg0x800 for 92D */ +static void rtl92du_phy_set_bb_reg_1byte(struct ieee80211_hw *hw, + u32 regaddr, u32 bitmask, u32 data) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 originalvalue, bitshift, offset; + u8 value; + + /* BitMask only support bit0~bit7 or bit8~bit15, bit16~bit23, + * bit24~bit31, should be in 1 byte scale; + */ + bitshift = calculate_bit_shift(bitmask); + offset = bitshift / 8; + + originalvalue = rtl_read_dword(rtlpriv, regaddr); + data = (originalvalue & (~bitmask)) | ((data << bitshift) & bitmask); + + value = data >> (8 * offset); + + rtl_write_byte(rtlpriv, regaddr + offset, value); +} + +bool rtl92du_phy_mac_config(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 arraylength; + const u32 *ptrarray; + u32 i; + + rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Read Rtl819XMACPHY_Array\n"); + + arraylength = MAC_2T_ARRAYLENGTH; + ptrarray = rtl8192du_mac_2tarray; + + for (i = 0; i < arraylength; i = i + 2) + rtl_write_byte(rtlpriv, ptrarray[i], (u8)ptrarray[i + 1]); + + if (rtlpriv->rtlhal.macphymode == SINGLEMAC_SINGLEPHY) { + /* improve 2-stream TX EVM */ + /* rtl_write_byte(rtlpriv, 0x14,0x71); */ + /* AMPDU aggregation number 9 */ + /* rtl_write_word(rtlpriv, REG_MAX_AGGR_NUM, MAX_AGGR_NUM); */ + rtl_write_byte(rtlpriv, REG_MAX_AGGR_NUM, 0x0B); + } else { + /* 92D need to test to decide the num. */ + rtl_write_byte(rtlpriv, REG_MAX_AGGR_NUM, 0x07); + } + + return true; +} + +static bool _rtl92du_phy_config_bb(struct ieee80211_hw *hw, u8 configtype) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + u16 phy_reg_arraylen, agctab_arraylen = 0; + const u32 *agctab_array_table = NULL; + const u32 *phy_regarray_table; + int i; + + /* Normal chip, Mac0 use AGC_TAB.txt for 2G and 5G band. */ + if (rtlhal->interfaceindex == 0) { + agctab_arraylen = AGCTAB_ARRAYLENGTH; + agctab_array_table = rtl8192du_agctab_array; + rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, + " ===> phy:MAC0, Rtl819XAGCTAB_Array\n"); + } else { + if (rtlhal->current_bandtype == BAND_ON_2_4G) { + agctab_arraylen = AGCTAB_2G_ARRAYLENGTH; + agctab_array_table = rtl8192du_agctab_2garray; + rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, + " ===> phy:MAC1, Rtl819XAGCTAB_2GArray\n"); + } else { + agctab_arraylen = AGCTAB_5G_ARRAYLENGTH; + agctab_array_table = rtl8192du_agctab_5garray; + rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, + " ===> phy:MAC1, Rtl819XAGCTAB_5GArray\n"); + } + } + phy_reg_arraylen = PHY_REG_2T_ARRAYLENGTH; + phy_regarray_table = rtl8192du_phy_reg_2tarray; + rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, + " ===> phy:Rtl819XPHY_REG_Array_PG\n"); + + if (configtype == BASEBAND_CONFIG_PHY_REG) { + for (i = 0; i < phy_reg_arraylen; i = i + 2) { + rtl_addr_delay(phy_regarray_table[i]); + rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD, + phy_regarray_table[i + 1]); + udelay(1); + rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, + "The phy_regarray_table[0] is %x Rtl819XPHY_REGArray[1] is %x\n", + phy_regarray_table[i], + phy_regarray_table[i + 1]); + } + } else if (configtype == BASEBAND_CONFIG_AGC_TAB) { + for (i = 0; i < agctab_arraylen; i = i + 2) { + rtl_set_bbreg(hw, agctab_array_table[i], + MASKDWORD, agctab_array_table[i + 1]); + + /* Add 1us delay between BB/RF register setting. */ + udelay(1); + + rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, + "AGC table %u %u\n", + agctab_array_table[i], + agctab_array_table[i + 1]); + } + rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, + "Normal Chip, loaded AGC table\n"); + } + return true; +} + +static bool _rtl92du_phy_config_bb_pg(struct ieee80211_hw *hw, u8 configtype) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + const u32 *phy_regarray_table_pg; + u16 phy_regarray_pg_len; + int i; + + phy_regarray_pg_len = PHY_REG_ARRAY_PG_LENGTH; + phy_regarray_table_pg = rtl8192du_phy_reg_array_pg; + + if (configtype == BASEBAND_CONFIG_PHY_REG) { + for (i = 0; i < phy_regarray_pg_len; i = i + 3) { + rtl_addr_delay(phy_regarray_table_pg[i]); + rtl92d_store_pwrindex_diffrate_offset(hw, + phy_regarray_table_pg[i], + phy_regarray_table_pg[i + 1], + phy_regarray_table_pg[i + 2]); + } + } else { + rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, + "configtype != BaseBand_Config_PHY_REG\n"); + } + return true; +} + +static bool _rtl92du_phy_bb_config(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv); + struct rtl_phy *rtlphy = &rtlpriv->phy; + bool ret; + + rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "==>\n"); + ret = _rtl92du_phy_config_bb(hw, BASEBAND_CONFIG_PHY_REG); + if (!ret) { + pr_err("Write BB Reg Fail!!\n"); + return false; + } + + if (!rtlefuse->autoload_failflag) { + rtlphy->pwrgroup_cnt = 0; + ret = _rtl92du_phy_config_bb_pg(hw, BASEBAND_CONFIG_PHY_REG); + } + if (!ret) { + pr_err("BB_PG Reg Fail!!\n"); + return false; + } + + ret = _rtl92du_phy_config_bb(hw, BASEBAND_CONFIG_AGC_TAB); + if (!ret) { + pr_err("AGC Table Fail\n"); + return false; + } + + rtlphy->cck_high_power = (bool)rtl_get_bbreg(hw, + RFPGA0_XA_HSSIPARAMETER2, + 0x200); + + return true; +} + +bool rtl92du_phy_bb_config(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + bool rtstatus; + u32 regvaldw; + u16 regval; + u8 value; + + rtl92d_phy_init_bb_rf_register_definition(hw); + + regval = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN); + rtl_write_word(rtlpriv, REG_SYS_FUNC_EN, + regval | BIT(13) | BIT(0) | BIT(1)); + + rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL, 0x83); + rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL + 1, 0xdb); + + /* 0x1f bit7 bit6 represent for mac0/mac1 driver ready */ + value = rtl_read_byte(rtlpriv, REG_RF_CTRL); + rtl_write_byte(rtlpriv, REG_RF_CTRL, value | RF_EN | RF_RSTB | + RF_SDMRSTB); + + value = FEN_BB_GLB_RSTN | FEN_BBRSTB; + if (rtlhal->interface == INTF_PCI) + value |= FEN_PPLL | FEN_PCIEA | FEN_DIO_PCIE; + else if (rtlhal->interface == INTF_USB) + value |= FEN_USBA | FEN_USBD; + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, value); + + regvaldw = rtl_read_dword(rtlpriv, RFPGA0_XCD_RFPARAMETER); + regvaldw &= ~BIT(31); + rtl_write_dword(rtlpriv, RFPGA0_XCD_RFPARAMETER, regvaldw); + + /* To Fix MAC loopback mode fail. */ + rtl_write_byte(rtlpriv, REG_LDOHCI12_CTRL, 0x0f); + rtl_write_byte(rtlpriv, 0x15, 0xe9); + + rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL + 1, 0x80); + if (!(IS_92D_SINGLEPHY(rtlpriv->rtlhal.version)) && + rtlhal->interface == INTF_PCI) { + regvaldw = rtl_read_dword(rtlpriv, REG_LEDCFG0); + rtl_write_dword(rtlpriv, REG_LEDCFG0, regvaldw | BIT(23)); + } + + rtstatus = _rtl92du_phy_bb_config(hw); + + /* Crystal calibration */ + rtl_set_bbreg(hw, REG_AFE_XTAL_CTRL, 0xf0, + rtlpriv->efuse.crystalcap & 0x0f); + rtl_set_bbreg(hw, REG_AFE_PLL_CTRL, 0xf0000000, + (rtlpriv->efuse.crystalcap & 0xf0) >> 4); + + return rtstatus; +} + +bool rtl92du_phy_rf_config(struct ieee80211_hw *hw) +{ + return rtl92du_phy_rf6052_config(hw); +} + +bool rtl92du_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, + enum rf_content content, + enum radio_path rfpath) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u16 radioa_arraylen, radiob_arraylen; + const u32 *radioa_array_table; + const u32 *radiob_array_table; + int i; + + radioa_arraylen = RADIOA_2T_ARRAYLENGTH; + radioa_array_table = rtl8192du_radioa_2tarray; + radiob_arraylen = RADIOB_2T_ARRAYLENGTH; + radiob_array_table = rtl8192du_radiob_2tarray; + if (rtlpriv->efuse.internal_pa_5g[0]) { + radioa_arraylen = RADIOA_2T_INT_PA_ARRAYLENGTH; + radioa_array_table = rtl8192du_radioa_2t_int_paarray; + } + if (rtlpriv->efuse.internal_pa_5g[1]) { + radiob_arraylen = RADIOB_2T_INT_PA_ARRAYLENGTH; + radiob_array_table = rtl8192du_radiob_2t_int_paarray; + } + rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, + "PHY_ConfigRFWithHeaderFile() Radio_A:Rtl819XRadioA_1TArray\n"); + rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, + "PHY_ConfigRFWithHeaderFile() Radio_B:Rtl819XRadioB_1TArray\n"); + rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "Radio No %x\n", rfpath); + + /* this only happens when DMDP, mac0 start on 2.4G, + * mac1 start on 5G, mac 0 has to set phy0 & phy1 + * pathA or mac1 has to set phy0 & phy1 pathA + */ + if (content == radiob_txt && rfpath == RF90_PATH_A) { + rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, + " ===> althougth Path A, we load radiob.txt\n"); + radioa_arraylen = radiob_arraylen; + radioa_array_table = radiob_array_table; + } + + switch (rfpath) { + case RF90_PATH_A: + for (i = 0; i < radioa_arraylen; i = i + 2) { + rtl_rfreg_delay(hw, rfpath, radioa_array_table[i], + RFREG_OFFSET_MASK, + radioa_array_table[i + 1]); + } + break; + case RF90_PATH_B: + for (i = 0; i < radiob_arraylen; i = i + 2) { + rtl_rfreg_delay(hw, rfpath, radiob_array_table[i], + RFREG_OFFSET_MASK, + radiob_array_table[i + 1]); + } + break; + case RF90_PATH_C: + case RF90_PATH_D: + pr_err("switch case %#x not processed\n", rfpath); + break; + } + + return true; +} + +void rtl92du_phy_set_bw_mode(struct ieee80211_hw *hw, + enum nl80211_channel_type ch_type) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct rtl_mac *mac = rtl_mac(rtlpriv); + u8 reg_bw_opmode; + u8 reg_prsr_rsc; + + if (rtlphy->set_bwmode_inprogress) + return; + + if ((is_hal_stop(rtlhal)) || (RT_CANNOT_IO(hw))) { + rtl_dbg(rtlpriv, COMP_ERR, DBG_WARNING, + "FALSE driver sleep or unload\n"); + return; + } + + rtlphy->set_bwmode_inprogress = true; + + rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "Switch to %s bandwidth\n", + rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ? + "20MHz" : "40MHz"); + + reg_bw_opmode = rtl_read_byte(rtlpriv, REG_BWOPMODE); + reg_prsr_rsc = rtl_read_byte(rtlpriv, REG_RRSR + 2); + + switch (rtlphy->current_chan_bw) { + case HT_CHANNEL_WIDTH_20: + reg_bw_opmode |= BW_OPMODE_20MHZ; + rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode); + break; + case HT_CHANNEL_WIDTH_20_40: + reg_bw_opmode &= ~BW_OPMODE_20MHZ; + rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode); + + reg_prsr_rsc = (reg_prsr_rsc & 0x90) | + (mac->cur_40_prime_sc << 5); + rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc); + break; + default: + pr_err("unknown bandwidth: %#X\n", + rtlphy->current_chan_bw); + break; + } + + switch (rtlphy->current_chan_bw) { + case HT_CHANNEL_WIDTH_20: + rtl92du_phy_set_bb_reg_1byte(hw, RFPGA0_RFMOD, BRFMOD, 0x0); + rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x0); + /* SET BIT10 BIT11 for receive cck */ + rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10) | BIT(11), 3); + break; + case HT_CHANNEL_WIDTH_20_40: + rtl92du_phy_set_bb_reg_1byte(hw, RFPGA0_RFMOD, BRFMOD, 0x1); + rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x1); + /* Set Control channel to upper or lower. + * These settings are required only for 40MHz + */ + if (rtlhal->current_bandtype == BAND_ON_2_4G) + rtl_set_bbreg(hw, RCCK0_SYSTEM, BCCKSIDEBAND, + mac->cur_40_prime_sc >> 1); + rtl_set_bbreg(hw, ROFDM1_LSTF, 0xC00, mac->cur_40_prime_sc); + /* SET BIT10 BIT11 for receive cck */ + rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, + BIT(10) | BIT(11), 0); + rtl_set_bbreg(hw, 0x818, BIT(26) | BIT(27), + mac->cur_40_prime_sc == + HAL_PRIME_CHNL_OFFSET_LOWER ? 2 : 1); + break; + default: + pr_err("unknown bandwidth: %#X\n", + rtlphy->current_chan_bw); + break; + } + + rtl92d_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw); + + rtlphy->set_bwmode_inprogress = false; + rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n"); +} + +static void _rtl92du_phy_stop_trx_before_changeband(struct ieee80211_hw *hw) +{ + rtl92du_phy_set_bb_reg_1byte(hw, RFPGA0_RFMOD, BCCKEN | BOFDMEN, 0); + rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x00); + rtl_set_bbreg(hw, ROFDM1_TRXPATHENABLE, BDWORD, 0x0); +} + +static void rtl92du_phy_switch_wirelessband(struct ieee80211_hw *hw, u8 band) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + u16 basic_rates; + u32 reg_mac; + u8 value8; + + rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "==>\n"); + rtlhal->bandset = band; + rtlhal->current_bandtype = band; + if (IS_92D_SINGLEPHY(rtlhal->version)) + rtlhal->bandset = BAND_ON_BOTH; + + /* stop RX/Tx */ + _rtl92du_phy_stop_trx_before_changeband(hw); + + /* reconfig BB/RF according to wireless mode */ + if (rtlhal->current_bandtype == BAND_ON_2_4G) + /* BB & RF Config */ + rtl_dbg(rtlpriv, COMP_CMD, DBG_DMESG, "====>2.4G\n"); + else + /* 5G band */ + rtl_dbg(rtlpriv, COMP_CMD, DBG_DMESG, "====>5G\n"); + + if (rtlhal->interfaceindex == 1) + _rtl92du_phy_config_bb(hw, BASEBAND_CONFIG_AGC_TAB); + + rtl92du_update_bbrf_configuration(hw); + + basic_rates = RRSR_6M | RRSR_12M | RRSR_24M; + if (rtlhal->current_bandtype == BAND_ON_2_4G) + basic_rates |= RRSR_1M | RRSR_2M | RRSR_5_5M | RRSR_11M; + rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE, + (u8 *)&basic_rates); + + rtl92du_phy_set_bb_reg_1byte(hw, RFPGA0_RFMOD, BCCKEN | BOFDMEN, 0x3); + + /* 20M BW. */ + /* rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 1); */ + rtlhal->reloadtxpowerindex = true; + + reg_mac = rtlhal->interfaceindex == 0 ? REG_MAC0 : REG_MAC1; + + /* notice fw know band status 0x81[1]/0x53[1] = 0: 5G, 1: 2G */ + if (rtlhal->current_bandtype == BAND_ON_2_4G) { + value8 = rtl_read_byte(rtlpriv, reg_mac); + value8 |= BIT(1); + rtl_write_byte(rtlpriv, reg_mac, value8); + } else { + value8 = rtl_read_byte(rtlpriv, reg_mac); + value8 &= ~BIT(1); + rtl_write_byte(rtlpriv, reg_mac, value8); + } + mdelay(1); + rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "<==Switch Band OK\n"); +} + +static void _rtl92du_phy_reload_imr_setting(struct ieee80211_hw *hw, + u8 channel, u8 rfpath) +{ + struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); + struct rtl_priv *rtlpriv = rtl_priv(hw); + u8 group, i; + + if (rtlusb->udev->speed != USB_SPEED_HIGH) + return; + + rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "====>path %d\n", rfpath); + if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G) { + rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "====>5G\n"); + rtl92du_phy_set_bb_reg_1byte(hw, RFPGA0_RFMOD, + BOFDMEN | BCCKEN, 0); + rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0x00f00000, 0xf); + + /* fc area 0xd2c */ + if (channel >= 149) + rtl_set_bbreg(hw, ROFDM1_CFOTRACKING, BIT(13) | + BIT(14), 2); + else + rtl_set_bbreg(hw, ROFDM1_CFOTRACKING, BIT(13) | + BIT(14), 1); + + /* leave 0 for channel1-14. */ + group = channel <= 64 ? 1 : 2; + for (i = 0; i < MAX_RF_IMR_INDEX_NORMAL; i++) + rtl_set_rfreg(hw, (enum radio_path)rfpath, + rf_reg_for_5g_swchnl_normal[i], + RFREG_OFFSET_MASK, + rf_imr_param_normal[group][i]); + + rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0x00f00000, 0); + rtl92du_phy_set_bb_reg_1byte(hw, RFPGA0_RFMOD, + BOFDMEN | BCCKEN, 3); + } else { + /* G band. */ + rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD, + "Load RF IMR parameters for G band. IMR already setting %d\n", + rtlpriv->rtlhal.load_imrandiqk_setting_for2g); + rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "====>2.4G\n"); + + if (!rtlpriv->rtlhal.load_imrandiqk_setting_for2g) { + rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD, + "Load RF IMR parameters for G band. %d\n", + rfpath); + rtl92du_phy_set_bb_reg_1byte(hw, RFPGA0_RFMOD, + BOFDMEN | BCCKEN, 0); + rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, + 0x00f00000, 0xf); + + for (i = 0; i < MAX_RF_IMR_INDEX_NORMAL; i++) { + rtl_set_rfreg(hw, (enum radio_path)rfpath, + rf_reg_for_5g_swchnl_normal[i], + RFREG_OFFSET_MASK, + rf_imr_param_normal[0][i]); + } + + rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, + 0x00f00000, 0); + rtl92du_phy_set_bb_reg_1byte(hw, RFPGA0_RFMOD, + BOFDMEN | BCCKEN, 3); + } + } + rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n"); +} + +static void _rtl92du_phy_switch_rf_setting(struct ieee80211_hw *hw, u8 channel) +{ + struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = &rtlpriv->rtlhal; + struct rtl_phy *rtlphy = &rtlpriv->phy; + u8 path = rtlhal->current_bandtype == BAND_ON_5G ? RF90_PATH_A + : RF90_PATH_B; + u32 u4regvalue, mask = 0x1C000, value = 0, u4tmp, u4tmp2; + bool need_pwr_down = false, internal_pa = false; + u32 regb30 = rtl_get_bbreg(hw, 0xb30, BIT(27)); + u8 index = 0, i, rfpath; + + if (rtlusb->udev->speed != USB_SPEED_HIGH) + return; + + rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "====>\n"); + /* config path A for 5G */ + if (rtlhal->current_bandtype == BAND_ON_5G) { + rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "====>5G\n"); + u4tmp = rtlpriv->curveindex_5g[channel - 1]; + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "ver 1 set RF-A, 5G, 0x28 = 0x%x !!\n", u4tmp); + + for (i = 0; i < RF_CHNL_NUM_5G; i++) { + if (channel == rf_chnl_5g[i] && channel <= 140) + index = 0; + } + for (i = 0; i < RF_CHNL_NUM_5G_40M; i++) { + if (channel == rf_chnl_5g_40m[i] && channel <= 140) + index = 1; + } + if (channel == 149 || channel == 155 || channel == 161) + index = 2; + else if (channel == 151 || channel == 153 || channel == 163 || + channel == 165) + index = 3; + else if (channel == 157 || channel == 159) + index = 4; + + if (rtlhal->macphymode == DUALMAC_DUALPHY && + rtlhal->interfaceindex == 1) { + need_pwr_down = rtl92du_phy_enable_anotherphy(hw, false); + rtlhal->during_mac1init_radioa = true; + /* asume no this case */ + if (need_pwr_down) + rtl92d_phy_enable_rf_env(hw, path, + &u4regvalue); + } + + /* DMDP, if band = 5G, Mac0 need to set PHY1 when regB30[27]=1 */ + if (regb30 && rtlhal->interfaceindex == 0) { + need_pwr_down = rtl92du_phy_enable_anotherphy(hw, true); + rtlhal->during_mac0init_radiob = true; + if (need_pwr_down) + rtl92d_phy_enable_rf_env(hw, path, + &u4regvalue); + } + + for (i = 0; i < RF_REG_NUM_FOR_C_CUT_5G; i++) { + if (i == 0 && rtlhal->macphymode == DUALMAC_DUALPHY) { + rtl_set_rfreg(hw, (enum radio_path)path, + rf_reg_for_c_cut_5g[i], + RFREG_OFFSET_MASK, 0xE439D); + } else if (rf_reg_for_c_cut_5g[i] == RF_SYN_G4) { + u4tmp2 = (rf_reg_pram_c_5g[index][i] & + 0x7FF) | (u4tmp << 11); + if (channel == 36) + u4tmp2 &= ~(BIT(7) | BIT(6)); + rtl_set_rfreg(hw, (enum radio_path)path, + rf_reg_for_c_cut_5g[i], + RFREG_OFFSET_MASK, u4tmp2); + } else { + rtl_set_rfreg(hw, (enum radio_path)path, + rf_reg_for_c_cut_5g[i], + RFREG_OFFSET_MASK, + rf_reg_pram_c_5g[index][i]); + } + rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, + "offset 0x%x value 0x%x path %d index %d readback 0x%x\n", + rf_reg_for_c_cut_5g[i], + rf_reg_pram_c_5g[index][i], + path, index, + rtl_get_rfreg(hw, (enum radio_path)path, + rf_reg_for_c_cut_5g[i], + RFREG_OFFSET_MASK)); + } + if (rtlhal->macphymode == DUALMAC_DUALPHY && + rtlhal->interfaceindex == 1) { + if (need_pwr_down) + rtl92d_phy_restore_rf_env(hw, path, &u4regvalue); + + rtl92du_phy_powerdown_anotherphy(hw, false); + } + + if (regb30 && rtlhal->interfaceindex == 0) { + if (need_pwr_down) + rtl92d_phy_restore_rf_env(hw, path, &u4regvalue); + + rtl92du_phy_powerdown_anotherphy(hw, true); + } + + if (channel < 149) + value = 0x07; + else if (channel >= 149) + value = 0x02; + if (channel >= 36 && channel <= 64) + index = 0; + else if (channel >= 100 && channel <= 140) + index = 1; + else + index = 2; + + for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath; + rfpath++) { + if (rtlhal->macphymode == DUALMAC_DUALPHY && + rtlhal->interfaceindex == 1) /* MAC 1 5G */ + internal_pa = rtlpriv->efuse.internal_pa_5g[1]; + else + internal_pa = + rtlpriv->efuse.internal_pa_5g[rfpath]; + + if (internal_pa) { + for (i = 0; + i < RF_REG_NUM_FOR_C_CUT_5G_INTERNALPA; + i++) { + if (rf_for_c_cut_5g_internal_pa[i] == 0x03 && + channel >= 36 && channel <= 64) + rtl_set_rfreg(hw, rfpath, + rf_for_c_cut_5g_internal_pa[i], + RFREG_OFFSET_MASK, + 0x7bdef); + else + rtl_set_rfreg(hw, rfpath, + rf_for_c_cut_5g_internal_pa[i], + RFREG_OFFSET_MASK, + rf_pram_c_5g_int_pa[index][i]); + rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, + "offset 0x%x value 0x%x path %d index %d\n", + rf_for_c_cut_5g_internal_pa[i], + rf_pram_c_5g_int_pa[index][i], + rfpath, index); + } + } else { + rtl_set_rfreg(hw, (enum radio_path)rfpath, RF_TXPA_AG, + mask, value); + } + } + } else if (rtlhal->current_bandtype == BAND_ON_2_4G) { + rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "====>2.4G\n"); + u4tmp = rtlpriv->curveindex_2g[channel - 1]; + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "ver 3 set RF-B, 2G, 0x28 = 0x%x !!\n", u4tmp); + + if (channel == 1 || channel == 2 || channel == 4 || + channel == 9 || channel == 10 || channel == 11 || + channel == 12) + index = 0; + else if (channel == 3 || channel == 13 || channel == 14) + index = 1; + else if (channel >= 5 && channel <= 8) + index = 2; + + if (rtlhal->macphymode == DUALMAC_DUALPHY) { + path = RF90_PATH_A; + if (rtlhal->interfaceindex == 0) { + need_pwr_down = + rtl92du_phy_enable_anotherphy(hw, true); + rtlhal->during_mac0init_radiob = true; + + if (need_pwr_down) + rtl92d_phy_enable_rf_env(hw, path, + &u4regvalue); + } + + /* DMDP, if band = 2G, MAC1 need to set PHY0 when regB30[27]=1 */ + if (regb30 && rtlhal->interfaceindex == 1) { + need_pwr_down = + rtl92du_phy_enable_anotherphy(hw, false); + rtlhal->during_mac1init_radioa = true; + + if (need_pwr_down) + rtl92d_phy_enable_rf_env(hw, path, + &u4regvalue); + } + } + + for (i = 0; i < RF_REG_NUM_FOR_C_CUT_2G; i++) { + if (rf_reg_for_c_cut_2g[i] == RF_SYN_G7) + rtl_set_rfreg(hw, (enum radio_path)path, + rf_reg_for_c_cut_2g[i], + RFREG_OFFSET_MASK, + rf_reg_param_for_c_cut_2g[index][i] | + BIT(17)); + else + rtl_set_rfreg(hw, (enum radio_path)path, + rf_reg_for_c_cut_2g[i], + RFREG_OFFSET_MASK, + rf_reg_param_for_c_cut_2g + [index][i]); + + rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, + "offset 0x%x value 0x%x mak 0x%x path %d index %d readback 0x%x\n", + rf_reg_for_c_cut_2g[i], + rf_reg_param_for_c_cut_2g[index][i], + rf_reg_mask_for_c_cut_2g[i], path, index, + rtl_get_rfreg(hw, (enum radio_path)path, + rf_reg_for_c_cut_2g[i], + RFREG_OFFSET_MASK)); + } + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "cosa ver 3 set RF-B, 2G, 0x28 = 0x%x !!\n", + rf_syn_g4_for_c_cut_2g | (u4tmp << 11)); + + rtl_set_rfreg(hw, (enum radio_path)path, RF_SYN_G4, + RFREG_OFFSET_MASK, + rf_syn_g4_for_c_cut_2g | (u4tmp << 11)); + + if (rtlhal->macphymode == DUALMAC_DUALPHY && + rtlhal->interfaceindex == 0) { + if (need_pwr_down) + rtl92d_phy_restore_rf_env(hw, path, &u4regvalue); + + rtl92du_phy_powerdown_anotherphy(hw, true); + } + + if (regb30 && rtlhal->interfaceindex == 1) { + if (need_pwr_down) + rtl92d_phy_restore_rf_env(hw, path, &u4regvalue); + + rtl92du_phy_powerdown_anotherphy(hw, false); + } + } + rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n"); +} + +/* bit0 = 1 => Tx OK, bit1 = 1 => Rx OK */ +static u8 _rtl92du_phy_patha_iqk(struct ieee80211_hw *hw, bool configpathb) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u32 regeac, rege94, rege9c, regea4; + u8 result = 0; + + RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path-A IQK setting!\n"); + + if (rtlhal->interfaceindex == 0) { + rtl_set_bbreg(hw, RTX_IQK_TONE_A, MASKDWORD, 0x10008c1f); + rtl_set_bbreg(hw, RRX_IQK_TONE_A, MASKDWORD, 0x10008c1f); + } else { + rtl_set_bbreg(hw, RTX_IQK_TONE_A, MASKDWORD, 0x10008c22); + rtl_set_bbreg(hw, RRX_IQK_TONE_A, MASKDWORD, 0x10008c22); + } + rtl_set_bbreg(hw, RTX_IQK_PI_A, MASKDWORD, 0x82140102); + rtl_set_bbreg(hw, RRX_IQK_PI_A, MASKDWORD, + configpathb ? 0x28160202 : 0x28160502); + /* path-B IQK setting */ + if (configpathb) { + rtl_set_bbreg(hw, RTX_IQK_TONE_B, MASKDWORD, 0x10008c22); + rtl_set_bbreg(hw, RRX_IQK_TONE_B, MASKDWORD, 0x10008c22); + rtl_set_bbreg(hw, RTX_IQK_PI_B, MASKDWORD, 0x82140102); + rtl_set_bbreg(hw, RRX_IQK_PI_B, MASKDWORD, 0x28160206); + } + + /* LO calibration setting */ + RTPRINT(rtlpriv, FINIT, INIT_IQK, "LO calibration setting!\n"); + rtl_set_bbreg(hw, RIQK_AGC_RSP, MASKDWORD, 0x00462911); + + /* One shot, path A LOK & IQK */ + RTPRINT(rtlpriv, FINIT, INIT_IQK, "One shot, path A LOK & IQK!\n"); + rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf9000000); + rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf8000000); + + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "Delay %d ms for One shot, path A LOK & IQK\n", + IQK_DELAY_TIME); + mdelay(IQK_DELAY_TIME); + + /* Check failed */ + regeac = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_A_2, MASKDWORD); + RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeac = 0x%x\n", regeac); + rege94 = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_A, MASKDWORD); + RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe94 = 0x%x\n", rege94); + rege9c = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_A, MASKDWORD); + RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe9c = 0x%x\n", rege9c); + regea4 = rtl_get_bbreg(hw, RRX_POWER_BEFORE_IQK_A_2, MASKDWORD); + RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xea4 = 0x%x\n", regea4); + + if (!(regeac & BIT(28)) && + (((rege94 & 0x03FF0000) >> 16) != 0x142) && + (((rege9c & 0x03FF0000) >> 16) != 0x42)) + result |= 0x01; + else /* if Tx not OK, ignore Rx */ + return result; + + /* if Tx is OK, check whether Rx is OK */ + if (!(regeac & BIT(27)) && + (((regea4 & 0x03FF0000) >> 16) != 0x132) && + (((regeac & 0x03FF0000) >> 16) != 0x36)) + result |= 0x02; + else + RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path A Rx IQK fail!!\n"); + + return result; +} + +/* bit0 = 1 => Tx OK, bit1 = 1 => Rx OK */ +static u8 _rtl92du_phy_patha_iqk_5g_normal(struct ieee80211_hw *hw, + bool configpathb) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u32 TXOKBIT = BIT(28), RXOKBIT = BIT(27); + u32 regeac, rege94, rege9c, regea4; + u8 timeout = 20, timecount = 0; + u8 retrycount = 2; + u8 result = 0; + u8 i; + + if (rtlhal->interfaceindex == 1) { /* PHY1 */ + TXOKBIT = BIT(31); + RXOKBIT = BIT(30); + } + + RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path-A IQK setting!\n"); + rtl_set_bbreg(hw, RTX_IQK_TONE_A, MASKDWORD, 0x18008c1f); + rtl_set_bbreg(hw, RRX_IQK_TONE_A, MASKDWORD, 0x18008c1f); + rtl_set_bbreg(hw, RTX_IQK_PI_A, MASKDWORD, 0x82140307); + rtl_set_bbreg(hw, RRX_IQK_PI_A, MASKDWORD, 0x68160960); + /* path-B IQK setting */ + if (configpathb) { + rtl_set_bbreg(hw, RTX_IQK_TONE_B, MASKDWORD, 0x18008c2f); + rtl_set_bbreg(hw, RRX_IQK_TONE_B, MASKDWORD, 0x18008c2f); + rtl_set_bbreg(hw, RTX_IQK_PI_B, MASKDWORD, 0x82110000); + rtl_set_bbreg(hw, RRX_IQK_PI_B, MASKDWORD, 0x68110000); + } + + /* LO calibration setting */ + RTPRINT(rtlpriv, FINIT, INIT_IQK, "LO calibration setting!\n"); + rtl_set_bbreg(hw, RIQK_AGC_RSP, MASKDWORD, 0x00462911); + + /* path-A PA on */ + rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, MASKDWORD, 0x07000f60); + rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, MASKDWORD, 0x66e60e30); + + for (i = 0; i < retrycount; i++) { + /* One shot, path A LOK & IQK */ + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "One shot, path A LOK & IQK!\n"); + rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf9000000); + rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf8000000); + + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "Delay %d ms for One shot, path A LOK & IQK.\n", + IQK_DELAY_TIME); + mdelay(IQK_DELAY_TIME * 10); + + while (timecount < timeout && + rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_A_2, BIT(26)) == 0) { + udelay(IQK_DELAY_TIME * 1000 * 2); + timecount++; + } + + timecount = 0; + while (timecount < timeout && + rtl_get_bbreg(hw, RRX_POWER_BEFORE_IQK_A_2, MASK_IQK_RESULT) == 0) { + udelay(IQK_DELAY_TIME * 1000 * 2); + timecount++; + } + + /* Check failed */ + regeac = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_A_2, MASKDWORD); + RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeac = 0x%x\n", regeac); + rege94 = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_A, MASKDWORD); + RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe94 = 0x%x\n", rege94); + rege9c = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_A, MASKDWORD); + RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe9c = 0x%x\n", rege9c); + regea4 = rtl_get_bbreg(hw, RRX_POWER_BEFORE_IQK_A_2, MASKDWORD); + RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xea4 = 0x%x\n", regea4); + + if (!(regeac & TXOKBIT) && + (((rege94 & 0x03FF0000) >> 16) != 0x142)) { + result |= 0x01; + } else { /* if Tx not OK, ignore Rx */ + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "Path A Tx IQK fail!!\n"); + continue; + } + + /* if Tx is OK, check whether Rx is OK */ + if (!(regeac & RXOKBIT) && + (((regea4 & 0x03FF0000) >> 16) != 0x132)) { + result |= 0x02; + break; + } + RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path A Rx IQK fail!!\n"); + } + + /* path A PA off */ + rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, MASKDWORD, + rtlphy->iqk_bb_backup[0]); + rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, MASKDWORD, + rtlphy->iqk_bb_backup[1]); + + if (!(result & 0x01)) /* Tx IQK fail */ + rtl_set_bbreg(hw, RTX_IQK_TONE_A, MASKDWORD, 0x19008c00); + + if (!(result & 0x02)) { /* Rx IQK fail */ + rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, MASKDWORD, 0x40000100); + rtl_set_bbreg(hw, RRX_IQK_TONE_A, MASKDWORD, 0x19008c00); + + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "Path A Rx IQK fail!! 0xe34 = %#x\n", + rtl_get_bbreg(hw, RRX_IQK_TONE_A, MASKDWORD)); + } + + return result; +} + +/* bit0 = 1 => Tx OK, bit1 = 1 => Rx OK */ +static u8 _rtl92du_phy_pathb_iqk(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 regeac, regeb4, regebc, regec4, regecc; + u8 result = 0; + + RTPRINT(rtlpriv, FINIT, INIT_IQK, "One shot, path B LOK & IQK!\n"); + rtl_set_bbreg(hw, RIQK_AGC_CONT, MASKDWORD, 0x00000002); + rtl_set_bbreg(hw, RIQK_AGC_CONT, MASKDWORD, 0x00000000); + + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "Delay %d ms for One shot, path B LOK & IQK\n", IQK_DELAY_TIME); + mdelay(IQK_DELAY_TIME); + + /* Check failed */ + regeac = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_A_2, MASKDWORD); + RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeac = 0x%x\n", regeac); + regeb4 = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_B, MASKDWORD); + RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeb4 = 0x%x\n", regeb4); + regebc = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_B, MASKDWORD); + RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xebc = 0x%x\n", regebc); + regec4 = rtl_get_bbreg(hw, RRX_POWER_BEFORE_IQK_B_2, MASKDWORD); + RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xec4 = 0x%x\n", regec4); + regecc = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_B_2, MASKDWORD); + RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xecc = 0x%x\n", regecc); + + if (!(regeac & BIT(31)) && + (((regeb4 & 0x03FF0000) >> 16) != 0x142) && + (((regebc & 0x03FF0000) >> 16) != 0x42)) + result |= 0x01; + else + return result; + + if (!(regeac & BIT(30)) && + (((regec4 & 0x03FF0000) >> 16) != 0x132) && + (((regecc & 0x03FF0000) >> 16) != 0x36)) + result |= 0x02; + else + RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path B Rx IQK fail!!\n"); + + return result; +} + +/* bit0 = 1 => Tx OK, bit1 = 1 => Rx OK */ +static u8 _rtl92du_phy_pathb_iqk_5g_normal(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u32 regeac, regeb4, regebc, regec4, regecc; + u8 timeout = 20, timecount = 0; + u8 retrycount = 2; + u8 result = 0; + u8 i; + + RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path-B IQK setting!\n"); + rtl_set_bbreg(hw, RTX_IQK_TONE_A, MASKDWORD, 0x18008c1f); + rtl_set_bbreg(hw, RRX_IQK_TONE_A, MASKDWORD, 0x18008c1f); + rtl_set_bbreg(hw, RTX_IQK_PI_A, MASKDWORD, 0x82110000); + rtl_set_bbreg(hw, RRX_IQK_PI_A, MASKDWORD, 0x68110000); + + /* path-B IQK setting */ + rtl_set_bbreg(hw, RTX_IQK_TONE_B, MASKDWORD, 0x18008c2f); + rtl_set_bbreg(hw, RRX_IQK_TONE_B, MASKDWORD, 0x18008c2f); + rtl_set_bbreg(hw, RTX_IQK_PI_B, MASKDWORD, 0x82140307); + rtl_set_bbreg(hw, RRX_IQK_PI_B, MASKDWORD, 0x68160960); + + /* LO calibration setting */ + RTPRINT(rtlpriv, FINIT, INIT_IQK, "LO calibration setting!\n"); + rtl_set_bbreg(hw, RIQK_AGC_RSP, MASKDWORD, 0x00462911); + + /* path-B PA on */ + rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, MASKDWORD, 0x0f600700); + rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, MASKDWORD, 0x061f0d30); + + for (i = 0; i < retrycount; i++) { + /* One shot, path B LOK & IQK */ + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "One shot, path A LOK & IQK!\n"); + rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xfa000000); + rtl_set_bbreg(hw, RIQK_AGC_PTS, MASKDWORD, 0xf8000000); + + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "Delay %d ms for One shot, path B LOK & IQK.\n", 10); + mdelay(IQK_DELAY_TIME * 10); + + while (timecount < timeout && + rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_A_2, BIT(29)) == 0) { + udelay(IQK_DELAY_TIME * 1000 * 2); + timecount++; + } + + timecount = 0; + while (timecount < timeout && + rtl_get_bbreg(hw, RRX_POWER_BEFORE_IQK_B_2, MASK_IQK_RESULT) == 0) { + udelay(IQK_DELAY_TIME * 1000 * 2); + timecount++; + } + + /* Check failed */ + regeac = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_A_2, MASKDWORD); + RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeac = 0x%x\n", regeac); + regeb4 = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_B, MASKDWORD); + RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xeb4 = 0x%x\n", regeb4); + regebc = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_B, MASKDWORD); + RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xebc = 0x%x\n", regebc); + regec4 = rtl_get_bbreg(hw, RRX_POWER_BEFORE_IQK_B_2, MASKDWORD); + RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xec4 = 0x%x\n", regec4); + regecc = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_B_2, MASKDWORD); + RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xecc = 0x%x\n", regecc); + + if (!(regeac & BIT(31)) && + (((regeb4 & 0x03FF0000) >> 16) != 0x142)) + result |= 0x01; + else + continue; + + if (!(regeac & BIT(30)) && + (((regec4 & 0x03FF0000) >> 16) != 0x132)) { + result |= 0x02; + break; + } + + RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path B Rx IQK fail!!\n"); + } + + /* path B PA off */ + rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, MASKDWORD, + rtlphy->iqk_bb_backup[0]); + rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, MASKDWORD, + rtlphy->iqk_bb_backup[2]); + + if (!(result & 0x01)) + rtl_set_bbreg(hw, RTX_IQK_TONE_B, MASKDWORD, 0x19008c00); + + if (!(result & 0x02)) { + rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, MASKDWORD, 0x40000100); + rtl_set_bbreg(hw, RRX_IQK_TONE_B, MASKDWORD, 0x19008c00); + + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "Path B Rx IQK fail!! 0xe54 = %#x\n", + rtl_get_bbreg(hw, RRX_IQK_TONE_B, MASKDWORD)); + } + + return result; +} + +static void _rtl92du_phy_reload_adda_registers(struct ieee80211_hw *hw, + const u32 *adda_reg, + u32 *adda_backup, u32 regnum) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 i; + + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "Reload ADDA power saving parameters !\n"); + for (i = 0; i < regnum; i++) { + /* path-A/B BB to initial gain */ + if (adda_reg[i] == ROFDM0_XAAGCCORE1 || + adda_reg[i] == ROFDM0_XBAGCCORE1) + rtl_set_bbreg(hw, adda_reg[i], MASKDWORD, 0x50); + + rtl_set_bbreg(hw, adda_reg[i], MASKDWORD, adda_backup[i]); + } +} + +static void _rtl92du_phy_reload_mac_registers(struct ieee80211_hw *hw, + const u32 *macreg, u32 *macbackup) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 i; + + RTPRINT(rtlpriv, FINIT, INIT_IQK, "Reload MAC parameters !\n"); + for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++) + rtl_write_byte(rtlpriv, macreg[i], (u8)macbackup[i]); + rtl_write_dword(rtlpriv, macreg[i], macbackup[i]); +} + +static void _rtl92du_phy_patha_standby(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path-A standby mode!\n"); + + rtl_set_bbreg(hw, RFPGA0_IQK, MASKH3BYTES, 0x0); + rtl_set_bbreg(hw, RFPGA0_XA_LSSIPARAMETER, MASKDWORD, 0x00010000); + rtl_set_bbreg(hw, RFPGA0_IQK, MASKH3BYTES, 0x808000); +} + +static void _rtl92du_phy_pimode_switch(struct ieee80211_hw *hw, bool pi_mode) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 mode; + + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "BB Switch to %s mode!\n", pi_mode ? "PI" : "SI"); + mode = pi_mode ? 0x01000100 : 0x01000000; + rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1, MASKDWORD, mode); + rtl_set_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1, MASKDWORD, mode); +} + +static void _rtl92du_phy_iq_calibrate(struct ieee80211_hw *hw, long result[][8], + u8 t, bool is2t) +{ + static const u32 adda_reg[IQK_ADDA_REG_NUM] = { + RFPGA0_XCD_SWITCHCONTROL, RBLUE_TOOTH, RRX_WAIT_CCA, + RTX_CCK_RFON, RTX_CCK_BBON, RTX_OFDM_RFON, RTX_OFDM_BBON, + RTX_TO_RX, RTX_TO_TX, RRX_CCK, RRX_OFDM, RRX_WAIT_RIFS, + RRX_TO_RX, RSTANDBY, RSLEEP, RPMPD_ANAEN + }; + static const u32 iqk_mac_reg[IQK_MAC_REG_NUM] = { + REG_TXPAUSE, REG_BCN_CTRL, REG_BCN_CTRL_1, REG_GPIO_MUXCFG + }; + static const u32 iqk_bb_reg[IQK_BB_REG_NUM] = { + RFPGA0_XAB_RFINTERFACESW, RFPGA0_XA_RFINTERFACEOE, + RFPGA0_XB_RFINTERFACEOE, ROFDM0_TRMUXPAR, + RFPGA0_XCD_RFINTERFACESW, ROFDM0_TRXPATHENABLE, + RFPGA0_RFMOD, RFPGA0_ANALOGPARAMETER4, + ROFDM0_XAAGCCORE1, ROFDM0_XBAGCCORE1 + }; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + const u32 retrycount = 2; + u8 patha_ok, pathb_ok; + u32 bbvalue; + u32 i; + + RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQK for 2.4G :Start!!!\n"); + if (t == 0) { + bbvalue = rtl_get_bbreg(hw, RFPGA0_RFMOD, MASKDWORD); + RTPRINT(rtlpriv, FINIT, INIT_IQK, "==>0x%08x\n", bbvalue); + RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQ Calibration for %s\n", + is2t ? "2T2R" : "1T1R"); + + /* Save ADDA parameters, turn Path A ADDA on */ + rtl92d_phy_save_adda_registers(hw, adda_reg, + rtlphy->adda_backup, + IQK_ADDA_REG_NUM); + rtl92d_phy_save_mac_registers(hw, iqk_mac_reg, + rtlphy->iqk_mac_backup); + rtl92d_phy_save_adda_registers(hw, iqk_bb_reg, + rtlphy->iqk_bb_backup, + IQK_BB_REG_NUM); + } + rtl92d_phy_path_adda_on(hw, adda_reg, true, is2t); + + rtl_set_bbreg(hw, RPDP_ANTA, MASKDWORD, 0x01017038); + + if (t == 0) + rtlphy->rfpi_enable = (u8)rtl_get_bbreg(hw, + RFPGA0_XA_HSSIPARAMETER1, BIT(8)); + + /* Switch BB to PI mode to do IQ Calibration. */ + if (!rtlphy->rfpi_enable) + _rtl92du_phy_pimode_switch(hw, true); + + rtl92du_phy_set_bb_reg_1byte(hw, RFPGA0_RFMOD, BCCKEN, 0x00); + rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKDWORD, 0x03a05600); + rtl_set_bbreg(hw, ROFDM0_TRMUXPAR, MASKDWORD, 0x000800e4); + rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, MASKDWORD, 0x22204000); + rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0xf00000, 0x0f); + if (is2t) { + rtl_set_bbreg(hw, RFPGA0_XA_LSSIPARAMETER, MASKDWORD, + 0x00010000); + rtl_set_bbreg(hw, RFPGA0_XB_LSSIPARAMETER, MASKDWORD, + 0x00010000); + } + + /* MAC settings */ + rtl92d_phy_mac_setting_calibration(hw, iqk_mac_reg, + rtlphy->iqk_mac_backup); + + /* Page B init */ + rtl_set_bbreg(hw, RCONFIG_ANTA, MASKDWORD, 0x0f600000); + if (is2t) + rtl_set_bbreg(hw, RCONFIG_ANTB, MASKDWORD, 0x0f600000); + + /* IQ calibration setting */ + RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQK setting!\n"); + rtl_set_bbreg(hw, RFPGA0_IQK, MASKH3BYTES, 0x808000); + rtl_set_bbreg(hw, RTX_IQK, MASKDWORD, 0x01007c00); + rtl_set_bbreg(hw, RRX_IQK, MASKDWORD, 0x01004800); + + for (i = 0; i < retrycount; i++) { + patha_ok = _rtl92du_phy_patha_iqk(hw, is2t); + if (patha_ok == 0x03) { + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "Path A IQK Success!!\n"); + result[t][0] = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_A, + MASK_IQK_RESULT); + result[t][1] = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_A, + MASK_IQK_RESULT); + result[t][2] = rtl_get_bbreg(hw, RRX_POWER_BEFORE_IQK_A_2, + MASK_IQK_RESULT); + result[t][3] = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_A_2, + MASK_IQK_RESULT); + break; + } else if (i == (retrycount - 1) && patha_ok == 0x01) { + /* Tx IQK OK */ + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "Path A IQK Only Tx Success!!\n"); + + result[t][0] = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_A, + MASK_IQK_RESULT); + result[t][1] = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_A, + MASK_IQK_RESULT); + } + } + if (patha_ok == 0x00) + RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path A IQK failed!!\n"); + + if (is2t) { + _rtl92du_phy_patha_standby(hw); + /* Turn Path B ADDA on */ + rtl92d_phy_path_adda_on(hw, adda_reg, false, is2t); + + for (i = 0; i < retrycount; i++) { + pathb_ok = _rtl92du_phy_pathb_iqk(hw); + if (pathb_ok == 0x03) { + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "Path B IQK Success!!\n"); + result[t][4] = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_B, + MASK_IQK_RESULT); + result[t][5] = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_B, + MASK_IQK_RESULT); + result[t][6] = rtl_get_bbreg(hw, RRX_POWER_BEFORE_IQK_B_2, + MASK_IQK_RESULT); + result[t][7] = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_B_2, + MASK_IQK_RESULT); + break; + } else if (i == (retrycount - 1) && pathb_ok == 0x01) { + /* Tx IQK OK */ + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "Path B Only Tx IQK Success!!\n"); + result[t][4] = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_B, + MASK_IQK_RESULT); + result[t][5] = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_B, + MASK_IQK_RESULT); + } + } + if (pathb_ok == 0x00) + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "Path B IQK failed!!\n"); + } + + /* Back to BB mode, load original value */ + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "IQK:Back to BB mode, load original value!\n"); + + rtl_set_bbreg(hw, RFPGA0_IQK, MASKH3BYTES, 0x000000); + + if (t != 0) { + /* Switch back BB to SI mode after finish IQ Calibration. */ + if (!rtlphy->rfpi_enable) + _rtl92du_phy_pimode_switch(hw, false); + + /* Reload ADDA power saving parameters */ + _rtl92du_phy_reload_adda_registers(hw, adda_reg, + rtlphy->adda_backup, + IQK_ADDA_REG_NUM); + + /* Reload MAC parameters */ + _rtl92du_phy_reload_mac_registers(hw, iqk_mac_reg, + rtlphy->iqk_mac_backup); + + if (is2t) + _rtl92du_phy_reload_adda_registers(hw, iqk_bb_reg, + rtlphy->iqk_bb_backup, + IQK_BB_REG_NUM); + else + _rtl92du_phy_reload_adda_registers(hw, iqk_bb_reg, + rtlphy->iqk_bb_backup, + IQK_BB_REG_NUM - 1); + + /* load 0xe30 IQC default value */ + rtl_set_bbreg(hw, RTX_IQK_TONE_A, MASKDWORD, 0x01008c00); + rtl_set_bbreg(hw, RRX_IQK_TONE_A, MASKDWORD, 0x01008c00); + } + RTPRINT(rtlpriv, FINIT, INIT_IQK, "<==\n"); +} + +static void _rtl92du_phy_iq_calibrate_5g_normal(struct ieee80211_hw *hw, + long result[][8], u8 t) +{ + static const u32 adda_reg[IQK_ADDA_REG_NUM] = { + RFPGA0_XCD_SWITCHCONTROL, RBLUE_TOOTH, RRX_WAIT_CCA, + RTX_CCK_RFON, RTX_CCK_BBON, RTX_OFDM_RFON, RTX_OFDM_BBON, + RTX_TO_RX, RTX_TO_TX, RRX_CCK, RRX_OFDM, RRX_WAIT_RIFS, + RRX_TO_RX, RSTANDBY, RSLEEP, RPMPD_ANAEN + }; + static const u32 iqk_mac_reg[IQK_MAC_REG_NUM] = { + REG_TXPAUSE, REG_BCN_CTRL, REG_BCN_CTRL_1, REG_GPIO_MUXCFG + }; + static const u32 iqk_bb_reg[IQK_BB_REG_NUM] = { + RFPGA0_XAB_RFINTERFACESW, RFPGA0_XA_RFINTERFACEOE, + RFPGA0_XB_RFINTERFACEOE, ROFDM0_TRMUXPAR, + RFPGA0_XCD_RFINTERFACESW, ROFDM0_TRXPATHENABLE, + RFPGA0_RFMOD, RFPGA0_ANALOGPARAMETER4, + ROFDM0_XAAGCCORE1, ROFDM0_XBAGCCORE1 + }; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct rtl_hal *rtlhal = &rtlpriv->rtlhal; + bool is2t = IS_92D_SINGLEPHY(rtlhal->version); + u8 patha_ok, pathb_ok; + bool rf_path_div; + u32 bbvalue; + + /* Note: IQ calibration must be performed after loading + * PHY_REG.txt , and radio_a, radio_b.txt + */ + + RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQK for 5G NORMAL:Start!!!\n"); + + mdelay(IQK_DELAY_TIME * 20); + + if (t == 0) { + bbvalue = rtl_get_bbreg(hw, RFPGA0_RFMOD, MASKDWORD); + RTPRINT(rtlpriv, FINIT, INIT_IQK, "==>0x%08x\n", bbvalue); + RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQ Calibration for %s\n", + is2t ? "2T2R" : "1T1R"); + + /* Save ADDA parameters, turn Path A ADDA on */ + rtl92d_phy_save_adda_registers(hw, adda_reg, + rtlphy->adda_backup, + IQK_ADDA_REG_NUM); + rtl92d_phy_save_mac_registers(hw, iqk_mac_reg, + rtlphy->iqk_mac_backup); + if (is2t) + rtl92d_phy_save_adda_registers(hw, iqk_bb_reg, + rtlphy->iqk_bb_backup, + IQK_BB_REG_NUM); + else + rtl92d_phy_save_adda_registers(hw, iqk_bb_reg, + rtlphy->iqk_bb_backup, + IQK_BB_REG_NUM - 1); + } + + rf_path_div = rtl_get_bbreg(hw, 0xb30, BIT(27)); + rtl92d_phy_path_adda_on(hw, adda_reg, !rf_path_div, is2t); + + if (t == 0) + rtlphy->rfpi_enable = rtl_get_bbreg(hw, + RFPGA0_XA_HSSIPARAMETER1, + BIT(8)); + + /* Switch BB to PI mode to do IQ Calibration. */ + if (!rtlphy->rfpi_enable) + _rtl92du_phy_pimode_switch(hw, true); + + /* MAC settings */ + rtl92d_phy_mac_setting_calibration(hw, iqk_mac_reg, + rtlphy->iqk_mac_backup); + + rtl92du_phy_set_bb_reg_1byte(hw, RFPGA0_RFMOD, BCCKEN, 0x00); + rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKDWORD, 0x03a05600); + rtl_set_bbreg(hw, ROFDM0_TRMUXPAR, MASKDWORD, 0x000800e4); + rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW, MASKDWORD, 0x22208000); + rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0xf00000, 0x0f); + + /* Page A AP setting for IQK */ + rtl_set_bbreg(hw, RPDP_ANTA, MASKDWORD, 0); + rtl_set_bbreg(hw, RCONFIG_ANTA, MASKDWORD, 0x20000000); + if (is2t) { + /* Page B AP setting for IQK */ + rtl_set_bbreg(hw, RPDP_ANTB, MASKDWORD, 0); + rtl_set_bbreg(hw, RCONFIG_ANTB, MASKDWORD, 0x20000000); + } + + /* IQ calibration setting */ + RTPRINT(rtlpriv, FINIT, INIT_IQK, "IQK setting!\n"); + rtl_set_bbreg(hw, RFPGA0_IQK, MASKH3BYTES, 0x808000); + rtl_set_bbreg(hw, RTX_IQK, MASKDWORD, 0x10007c00); + rtl_set_bbreg(hw, RRX_IQK, MASKDWORD, 0x01004800); + + patha_ok = _rtl92du_phy_patha_iqk_5g_normal(hw, is2t); + if (patha_ok == 0x03) { + RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path A IQK Success!!\n"); + result[t][0] = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_A, + MASK_IQK_RESULT); + result[t][1] = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_A, + MASK_IQK_RESULT); + result[t][2] = rtl_get_bbreg(hw, RRX_POWER_BEFORE_IQK_A_2, + MASK_IQK_RESULT); + result[t][3] = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_A_2, + MASK_IQK_RESULT); + } else if (patha_ok == 0x01) { /* Tx IQK OK */ + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "Path A IQK Only Tx Success!!\n"); + + result[t][0] = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_A, + MASK_IQK_RESULT); + result[t][1] = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_A, + MASK_IQK_RESULT); + } else { + rtl_set_bbreg(hw, RFPGA0_IQK, MASKH3BYTES, 0x000000); + RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe70 = %#x\n", + rtl_get_bbreg(hw, RRX_WAIT_CCA, MASKDWORD)); + RTPRINT(rtlpriv, FINIT, INIT_IQK, "RF path A 0x0 = %#x\n", + rtl_get_rfreg(hw, RF90_PATH_A, RF_AC, RFREG_OFFSET_MASK)); + rtl_set_bbreg(hw, RFPGA0_IQK, MASKH3BYTES, 0x808000); + RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path A IQK Fail!!\n"); + } + + if (is2t) { + /* _rtl92d_phy_patha_standby(hw); */ + /* Turn Path B ADDA on */ + rtl92d_phy_path_adda_on(hw, adda_reg, false, is2t); + + pathb_ok = _rtl92du_phy_pathb_iqk_5g_normal(hw); + if (pathb_ok == 0x03) { + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "Path B IQK Success!!\n"); + result[t][4] = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_B, + MASK_IQK_RESULT); + result[t][5] = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_B, + MASK_IQK_RESULT); + result[t][6] = rtl_get_bbreg(hw, RRX_POWER_BEFORE_IQK_B_2, + MASK_IQK_RESULT); + result[t][7] = rtl_get_bbreg(hw, RRX_POWER_AFTER_IQK_B_2, + MASK_IQK_RESULT); + } else if (pathb_ok == 0x01) { /* Tx IQK OK */ + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "Path B Only Tx IQK Success!!\n"); + result[t][4] = rtl_get_bbreg(hw, RTX_POWER_BEFORE_IQK_B, + MASK_IQK_RESULT); + result[t][5] = rtl_get_bbreg(hw, RTX_POWER_AFTER_IQK_B, + MASK_IQK_RESULT); + } else { + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "Path B IQK failed!!\n"); + } + } + + /* Back to BB mode, load original value */ + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "IQK:Back to BB mode, load original value!\n"); + rtl_set_bbreg(hw, RFPGA0_IQK, MASKH3BYTES, 0); + + if (is2t) + _rtl92du_phy_reload_adda_registers(hw, iqk_bb_reg, + rtlphy->iqk_bb_backup, + IQK_BB_REG_NUM); + else + _rtl92du_phy_reload_adda_registers(hw, iqk_bb_reg, + rtlphy->iqk_bb_backup, + IQK_BB_REG_NUM - 1); + + /* path A IQ path to DP block */ + rtl_set_bbreg(hw, RPDP_ANTA, MASKDWORD, 0x010170b8); + if (is2t) /* path B IQ path to DP block */ + rtl_set_bbreg(hw, RPDP_ANTB, MASKDWORD, 0x010170b8); + + /* Reload MAC parameters */ + _rtl92du_phy_reload_mac_registers(hw, iqk_mac_reg, + rtlphy->iqk_mac_backup); + + /* Switch back BB to SI mode after finish IQ Calibration. */ + if (!rtlphy->rfpi_enable) + _rtl92du_phy_pimode_switch(hw, false); + + /* Reload ADDA power saving parameters */ + _rtl92du_phy_reload_adda_registers(hw, adda_reg, + rtlphy->adda_backup, + IQK_ADDA_REG_NUM); + + RTPRINT(rtlpriv, FINIT, INIT_IQK, "<==\n"); +} + +static bool _rtl92du_phy_simularity_compare(struct ieee80211_hw *hw, + long result[][8], u8 c1, u8 c2) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = &rtlpriv->rtlhal; + u32 i, j, diff, sim_bitmap, bound, u4temp = 0; + u8 final_candidate[2] = {0xFF, 0xFF}; /* for path A and path B */ + bool is2t = IS_92D_SINGLEPHY(rtlhal->version); + bool bresult = true; + + if (is2t) + bound = 8; + else + bound = 4; + + sim_bitmap = 0; + + for (i = 0; i < bound; i++) { + diff = abs_diff(result[c1][i], result[c2][i]); + + if (diff > MAX_TOLERANCE_92D) { + if ((i == 2 || i == 6) && !sim_bitmap) { + if (result[c1][i] + result[c1][i + 1] == 0) + final_candidate[(i / 4)] = c2; + else if (result[c2][i] + result[c2][i + 1] == 0) + final_candidate[(i / 4)] = c1; + else + sim_bitmap = sim_bitmap | (1 << i); + } else { + sim_bitmap = sim_bitmap | (1 << i); + } + } + } + + if (sim_bitmap == 0) { + for (i = 0; i < (bound / 4); i++) { + if (final_candidate[i] != 0xFF) { + for (j = i * 4; j < (i + 1) * 4 - 2; j++) + result[3][j] = + result[final_candidate[i]][j]; + bresult = false; + } + } + + for (i = 0; i < bound; i++) + u4temp += result[c1][i] + result[c2][i]; + + if (u4temp == 0) /* IQK fail for c1 & c2 */ + bresult = false; + + return bresult; + } + + if (!(sim_bitmap & 0x0F)) { /* path A OK */ + for (i = 0; i < 4; i++) + result[3][i] = result[c1][i]; + } else if (!(sim_bitmap & 0x03)) { /* path A, Tx OK */ + for (i = 0; i < 2; i++) + result[3][i] = result[c1][i]; + } + + if (!(sim_bitmap & 0xF0) && is2t) { /* path B OK */ + for (i = 4; i < 8; i++) + result[3][i] = result[c1][i]; + } else if (!(sim_bitmap & 0x30)) { /* path B, Tx OK */ + for (i = 4; i < 6; i++) + result[3][i] = result[c1][i]; + } + + return false; +} + +static void _rtl92du_phy_patha_fill_iqk_matrix_5g_normal(struct ieee80211_hw *hw, + bool iqk_ok, + long result[][8], + u8 final_candidate, + bool txonly) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = &rtlpriv->rtlhal; + u32 val_x, reg; + int val_y; + + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "Path A IQ Calibration %s !\n", iqk_ok ? "Success" : "Failed"); + if (iqk_ok && final_candidate != 0xFF) { + val_x = result[final_candidate][0]; + if ((val_x & 0x00000200) != 0) + val_x = val_x | 0xFFFFFC00; + + RTPRINT(rtlpriv, FINIT, INIT_IQK, "X = 0x%x\n", val_x); + rtl_set_bbreg(hw, RTX_IQK_TONE_A, 0x3FF0000, val_x); + rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24), 0); + + val_y = result[final_candidate][1]; + if ((val_y & 0x00000200) != 0) + val_y = val_y | 0xFFFFFC00; + + /* path B IQK result + 3 */ + if (rtlhal->current_bandtype == BAND_ON_5G) + val_y += 3; + + RTPRINT(rtlpriv, FINIT, INIT_IQK, "Y = 0x%x\n", val_y); + + rtl_set_bbreg(hw, RTX_IQK_TONE_A, 0x3FF, val_y); + rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(26), 0); + + RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe30 = 0x%x\n", + rtl_get_bbreg(hw, RTX_IQK_TONE_A, MASKDWORD)); + + if (txonly) { + RTPRINT(rtlpriv, FINIT, INIT_IQK, "only Tx OK\n"); + return; + } + + reg = result[final_candidate][2]; + rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0x3FF, reg); + reg = result[final_candidate][3] & 0x3F; + rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0xFC00, reg); + reg = (result[final_candidate][3] >> 6) & 0xF; + rtl_set_bbreg(hw, ROFDM0_RXIQEXTANTA, 0xF0000000, reg); + } else { + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "%s: Tx/Rx fail restore default value\n", __func__); + + rtl_set_bbreg(hw, RTX_IQK_TONE_A, MASKDWORD, 0x19008c00); + rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, MASKDWORD, 0x40000100); + rtl_set_bbreg(hw, RRX_IQK_TONE_A, MASKDWORD, 0x19008c00); + } +} + +static void _rtl92du_phy_patha_fill_iqk_matrix(struct ieee80211_hw *hw, + bool iqk_ok, long result[][8], + u8 final_candidate, bool txonly) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = &rtlpriv->rtlhal; + u32 oldval_0, val_x, tx0_a, reg; + long val_y, tx0_c; + bool is2t = IS_92D_SINGLEPHY(rtlhal->version) || + rtlhal->macphymode == DUALMAC_DUALPHY; + + if (rtlhal->current_bandtype == BAND_ON_5G) { + _rtl92du_phy_patha_fill_iqk_matrix_5g_normal(hw, iqk_ok, result, + final_candidate, + txonly); + return; + } + + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "Path A IQ Calibration %s !\n", iqk_ok ? "Success" : "Failed"); + if (final_candidate == 0xFF || !iqk_ok) + return; + + /* OFDM0_D */ + oldval_0 = rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0xffc00000); + + val_x = result[final_candidate][0]; + if ((val_x & 0x00000200) != 0) + val_x = val_x | 0xFFFFFC00; + + tx0_a = (val_x * oldval_0) >> 8; + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "X = 0x%x, tx0_a = 0x%x, oldval_0 0x%x\n", + val_x, tx0_a, oldval_0); + rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x3FF, tx0_a); + rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(24), + ((val_x * oldval_0 >> 7) & 0x1)); + + val_y = result[final_candidate][1]; + if ((val_y & 0x00000200) != 0) + val_y = val_y | 0xFFFFFC00; + + /* path B IQK result + 3 */ + if (rtlhal->interfaceindex == 1 && + rtlhal->current_bandtype == BAND_ON_5G) + val_y += 3; + + tx0_c = (val_y * oldval_0) >> 8; + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "Y = 0x%lx, tx0_c = 0x%lx\n", + val_y, tx0_c); + + rtl_set_bbreg(hw, ROFDM0_XCTXAFE, 0xF0000000, (tx0_c & 0x3C0) >> 6); + rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x003F0000, tx0_c & 0x3F); + if (is2t) + rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(26), + (val_y * oldval_0 >> 7) & 0x1); + + RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xC80 = 0x%x\n", + rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE, + MASKDWORD)); + + if (txonly) { + RTPRINT(rtlpriv, FINIT, INIT_IQK, "only Tx OK\n"); + return; + } + + reg = result[final_candidate][2]; + rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0x3FF, reg); + reg = result[final_candidate][3] & 0x3F; + rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0xFC00, reg); + reg = (result[final_candidate][3] >> 6) & 0xF; + rtl_set_bbreg(hw, ROFDM0_RXIQEXTANTA, 0xF0000000, reg); +} + +static void _rtl92du_phy_pathb_fill_iqk_matrix_5g_normal(struct ieee80211_hw *hw, + bool iqk_ok, + long result[][8], + u8 final_candidate, + bool txonly) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = &rtlpriv->rtlhal; + u32 val_x, reg; + int val_y; + + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "Path B IQ Calibration %s !\n", iqk_ok ? "Success" : "Failed"); + if (iqk_ok && final_candidate != 0xFF) { + val_x = result[final_candidate][4]; + if ((val_x & 0x00000200) != 0) + val_x = val_x | 0xFFFFFC00; + + RTPRINT(rtlpriv, FINIT, INIT_IQK, "X = 0x%x\n", val_x); + rtl_set_bbreg(hw, RTX_IQK_TONE_B, 0x3FF0000, val_x); + rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(28), 0); + + val_y = result[final_candidate][5]; + if ((val_y & 0x00000200) != 0) + val_y = val_y | 0xFFFFFC00; + + /* path B IQK result + 3 */ + if (rtlhal->current_bandtype == BAND_ON_5G) + val_y += 3; + + RTPRINT(rtlpriv, FINIT, INIT_IQK, "Y = 0x%x\n", val_y); + + rtl_set_bbreg(hw, RTX_IQK_TONE_B, 0x3FF, val_y); + rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(30), 0); + + RTPRINT(rtlpriv, FINIT, INIT_IQK, "0xe50 = 0x%x\n", + rtl_get_bbreg(hw, RTX_IQK_TONE_B, MASKDWORD)); + + if (txonly) { + RTPRINT(rtlpriv, FINIT, INIT_IQK, "only Tx OK\n"); + return; + } + + reg = result[final_candidate][6]; + rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0x3FF, reg); + reg = result[final_candidate][7] & 0x3F; + rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0xFC00, reg); + reg = (result[final_candidate][7] >> 6) & 0xF; + rtl_set_bbreg(hw, ROFDM0_AGCRSSITABLE, 0x0000F000, reg); + } else { + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "%s: Tx/Rx fail restore default value\n", __func__); + + rtl_set_bbreg(hw, RTX_IQK_TONE_B, MASKDWORD, 0x19008c00); + rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, MASKDWORD, 0x40000100); + rtl_set_bbreg(hw, RRX_IQK_TONE_B, MASKDWORD, 0x19008c00); + } +} + +static void _rtl92du_phy_pathb_fill_iqk_matrix(struct ieee80211_hw *hw, + bool iqk_ok, long result[][8], + u8 final_candidate, bool txonly) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = &rtlpriv->rtlhal; + u32 oldval_1, val_x, tx1_a, reg; + long val_y, tx1_c; + + if (rtlhal->current_bandtype == BAND_ON_5G) { + _rtl92du_phy_pathb_fill_iqk_matrix_5g_normal(hw, iqk_ok, result, + final_candidate, + txonly); + return; + } + + RTPRINT(rtlpriv, FINIT, INIT_IQK, "Path B IQ Calibration %s !\n", + iqk_ok ? "Success" : "Failed"); + + if (final_candidate == 0xFF || !iqk_ok) + return; + + oldval_1 = rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0xffc00000); + + val_x = result[final_candidate][4]; + if ((val_x & 0x00000200) != 0) + val_x = val_x | 0xFFFFFC00; + + tx1_a = (val_x * oldval_1) >> 8; + RTPRINT(rtlpriv, FINIT, INIT_IQK, "X = 0x%x, tx1_a = 0x%x\n", + val_x, tx1_a); + rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x3FF, tx1_a); + rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(28), + (val_x * oldval_1 >> 7) & 0x1); + + val_y = result[final_candidate][5]; + if ((val_y & 0x00000200) != 0) + val_y = val_y | 0xFFFFFC00; + + if (rtlhal->current_bandtype == BAND_ON_5G) + val_y += 3; + + tx1_c = (val_y * oldval_1) >> 8; + RTPRINT(rtlpriv, FINIT, INIT_IQK, "Y = 0x%lx, tx1_c = 0x%lx\n", + val_y, tx1_c); + + rtl_set_bbreg(hw, ROFDM0_XDTXAFE, 0xF0000000, (tx1_c & 0x3C0) >> 6); + rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x003F0000, tx1_c & 0x3F); + rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(30), + (val_y * oldval_1 >> 7) & 0x1); + + if (txonly) + return; + + reg = result[final_candidate][6]; + rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0x3FF, reg); + reg = result[final_candidate][7] & 0x3F; + rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0xFC00, reg); + reg = (result[final_candidate][7] >> 6) & 0xF; + rtl_set_bbreg(hw, ROFDM0_AGCRSSITABLE, 0x0000F000, reg); +} + +void rtl92du_phy_iq_calibrate(struct ieee80211_hw *hw) +{ + long rege94, rege9c, regea4, regeac, regeb4; + bool is12simular, is13simular, is23simular; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = &rtlpriv->rtlhal; + long regebc, regec4, regecc, regtmp = 0; + struct rtl_phy *rtlphy = &rtlpriv->phy; + u8 i, final_candidate, indexforchannel; + bool patha_ok, pathb_ok; + long result[4][8] = {}; + + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "IQK:Start!!!channel %d\n", rtlphy->current_channel); + + final_candidate = 0xff; + patha_ok = false; + pathb_ok = false; + is12simular = false; + is23simular = false; + is13simular = false; + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "IQK !!!currentband %d\n", rtlhal->current_bandtype); + + for (i = 0; i < 3; i++) { + if (rtlhal->current_bandtype == BAND_ON_5G) { + _rtl92du_phy_iq_calibrate_5g_normal(hw, result, i); + } else if (rtlhal->current_bandtype == BAND_ON_2_4G) { + if (IS_92D_SINGLEPHY(rtlhal->version)) + _rtl92du_phy_iq_calibrate(hw, result, i, true); + else + _rtl92du_phy_iq_calibrate(hw, result, i, false); + } + + if (i == 1) { + is12simular = _rtl92du_phy_simularity_compare(hw, result, + 0, 1); + if (is12simular) { + final_candidate = 0; + break; + } + } + + if (i == 2) { + is13simular = _rtl92du_phy_simularity_compare(hw, result, + 0, 2); + if (is13simular) { + final_candidate = 0; + break; + } + + is23simular = _rtl92du_phy_simularity_compare(hw, result, + 1, 2); + if (is23simular) { + final_candidate = 1; + } else { + for (i = 0; i < 8; i++) + regtmp += result[3][i]; + + if (regtmp != 0) + final_candidate = 3; + else + final_candidate = 0xFF; + } + } + } + + for (i = 0; i < 4; i++) { + rege94 = result[i][0]; + rege9c = result[i][1]; + regea4 = result[i][2]; + regeac = result[i][3]; + regeb4 = result[i][4]; + regebc = result[i][5]; + regec4 = result[i][6]; + regecc = result[i][7]; + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "IQK: rege94=%lx rege9c=%lx regea4=%lx regeac=%lx regeb4=%lx regebc=%lx regec4=%lx regecc=%lx\n", + rege94, rege9c, regea4, regeac, regeb4, regebc, regec4, + regecc); + } + + if (final_candidate != 0xff) { + rege94 = result[final_candidate][0]; + rtlphy->reg_e94 = rege94; + rege9c = result[final_candidate][1]; + rtlphy->reg_e9c = rege9c; + regea4 = result[final_candidate][2]; + regeac = result[final_candidate][3]; + regeb4 = result[final_candidate][4]; + rtlphy->reg_eb4 = regeb4; + regebc = result[final_candidate][5]; + rtlphy->reg_ebc = regebc; + regec4 = result[final_candidate][6]; + regecc = result[final_candidate][7]; + + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "IQK: final_candidate is %x\n", final_candidate); + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "IQK: rege94=%lx rege9c=%lx regea4=%lx regeac=%lx regeb4=%lx regebc=%lx regec4=%lx regecc=%lx\n", + rege94, rege9c, regea4, regeac, regeb4, regebc, regec4, + regecc); + + patha_ok = true; + pathb_ok = true; + } else { + rtlphy->reg_e94 = 0x100; + rtlphy->reg_eb4 = 0x100; /* X default value */ + rtlphy->reg_e9c = 0x0; + rtlphy->reg_ebc = 0x0; /* Y default value */ + } + if (rege94 != 0 /*&& regea4 != 0*/) + _rtl92du_phy_patha_fill_iqk_matrix(hw, patha_ok, result, + final_candidate, + regea4 == 0); + if (IS_92D_SINGLEPHY(rtlhal->version) && + regeb4 != 0 /*&& regec4 != 0*/) + _rtl92du_phy_pathb_fill_iqk_matrix(hw, pathb_ok, result, + final_candidate, + regec4 == 0); + + if (final_candidate != 0xFF) { + indexforchannel = + rtl92d_get_rightchnlplace_for_iqk(rtlphy->current_channel); + + for (i = 0; i < IQK_MATRIX_REG_NUM; i++) + rtlphy->iqk_matrix[indexforchannel].value[0][i] = + result[final_candidate][i]; + + rtlphy->iqk_matrix[indexforchannel].iqk_done = true; + + rtl_dbg(rtlpriv, COMP_SCAN | COMP_MLME, DBG_LOUD, + "IQK OK indexforchannel %d\n", indexforchannel); + } +} + +void rtl92du_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = &rtlpriv->rtlhal; + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct rtl_mac *mac = rtl_mac(rtlpriv); + u8 indexforchannel; + bool need_iqk; + + rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "channel %d\n", channel); + /*------Do IQK for normal chip and test chip 5G band------- */ + + indexforchannel = rtl92d_get_rightchnlplace_for_iqk(channel); + rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "indexforchannel %d done %d\n", + indexforchannel, + rtlphy->iqk_matrix[indexforchannel].iqk_done); + + /* We need to do IQK if we're about to connect to a network on 5 GHz. + * On 5 GHz a channel switch outside of scanning happens only before + * connecting. + */ + need_iqk = !mac->act_scanning; + + if (!rtlphy->iqk_matrix[indexforchannel].iqk_done && need_iqk) { + rtl_dbg(rtlpriv, COMP_SCAN | COMP_INIT, DBG_LOUD, + "Do IQK Matrix reg for channel:%d....\n", channel); + rtl92du_phy_iq_calibrate(hw); + return; + } + + /* Just load the value. */ + /* 2G band just load once. */ + if ((!rtlhal->load_imrandiqk_setting_for2g && indexforchannel == 0) || + indexforchannel > 0) { + rtl_dbg(rtlpriv, COMP_SCAN, DBG_LOUD, + "Just Read IQK Matrix reg for channel:%d....\n", + channel); + + if (rtlphy->iqk_matrix[indexforchannel].value[0][0] != 0) + _rtl92du_phy_patha_fill_iqk_matrix(hw, true, + rtlphy->iqk_matrix[indexforchannel].value, 0, + rtlphy->iqk_matrix[indexforchannel].value[0][2] == 0); + + if (IS_92D_SINGLEPHY(rtlhal->version) && + rtlphy->iqk_matrix[indexforchannel].value[0][4] != 0) + _rtl92du_phy_pathb_fill_iqk_matrix(hw, true, + rtlphy->iqk_matrix[indexforchannel].value, 0, + rtlphy->iqk_matrix[indexforchannel].value[0][6] == 0); + } + rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n"); +} + +static void _rtl92du_phy_reload_lck_setting(struct ieee80211_hw *hw, u8 channel) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = &rtlpriv->rtlhal; + u8 erfpath = rtlhal->current_bandtype == BAND_ON_5G ? RF90_PATH_A : + IS_92D_SINGLEPHY(rtlhal->version) ? RF90_PATH_B : RF90_PATH_A; + bool bneed_powerdown_radio = false; + u32 u4tmp, u4regvalue; + + rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "path %d\n", erfpath); + RTPRINT(rtlpriv, FINIT, INIT_IQK, "band type = %d\n", + rtlpriv->rtlhal.current_bandtype); + RTPRINT(rtlpriv, FINIT, INIT_IQK, "channel = %d\n", channel); + + if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G) {/* Path-A for 5G */ + u4tmp = rtlpriv->curveindex_5g[channel - 1]; + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "ver 1 set RF-A, 5G, 0x28 = 0x%x !!\n", u4tmp); + + if (rtlpriv->rtlhal.macphymode == DUALMAC_DUALPHY && + rtlpriv->rtlhal.interfaceindex == 1) { + bneed_powerdown_radio = + rtl92du_phy_enable_anotherphy(hw, false); + rtlpriv->rtlhal.during_mac1init_radioa = true; + /* asume no this case */ + if (bneed_powerdown_radio) + rtl92d_phy_enable_rf_env(hw, erfpath, + &u4regvalue); + } + + rtl_set_rfreg(hw, erfpath, RF_SYN_G4, 0x3f800, u4tmp); + + if (bneed_powerdown_radio) { + rtl92d_phy_restore_rf_env(hw, erfpath, &u4regvalue); + rtl92du_phy_powerdown_anotherphy(hw, false); + } + } else if (rtlpriv->rtlhal.current_bandtype == BAND_ON_2_4G) { + u4tmp = rtlpriv->curveindex_2g[channel - 1]; + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "ver 3 set RF-B, 2G, 0x28 = 0x%x !!\n", u4tmp); + + if (rtlpriv->rtlhal.macphymode == DUALMAC_DUALPHY && + rtlpriv->rtlhal.interfaceindex == 0) { + bneed_powerdown_radio = + rtl92du_phy_enable_anotherphy(hw, true); + rtlpriv->rtlhal.during_mac0init_radiob = true; + if (bneed_powerdown_radio) + rtl92d_phy_enable_rf_env(hw, erfpath, + &u4regvalue); + } + + rtl_set_rfreg(hw, erfpath, RF_SYN_G4, 0x3f800, u4tmp); + + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "ver 3 set RF-B, 2G, 0x28 = 0x%x !!\n", + rtl_get_rfreg(hw, erfpath, RF_SYN_G4, 0x3f800)); + + if (bneed_powerdown_radio) { + rtl92d_phy_restore_rf_env(hw, erfpath, &u4regvalue); + rtl92du_phy_powerdown_anotherphy(hw, true); + } + } + rtl_dbg(rtlpriv, COMP_CMD, DBG_LOUD, "<====\n"); +} + +static void _rtl92du_phy_lc_calibrate_sw(struct ieee80211_hw *hw, bool is2t) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = &rtlpriv->rtlhal; + u32 curvecount_val[CV_CURVE_CNT * 2]; + u16 timeout = 800, timecount = 0; + u32 u4tmp, offset, rf_syn_g4[2]; + u8 tmpreg, index, rf_mode[2]; + u8 path = is2t ? 2 : 1; + u8 i; + + /* Check continuous TX and Packet TX */ + tmpreg = rtl_read_byte(rtlpriv, 0xd03); + if ((tmpreg & 0x70) != 0) + /* if Deal with contisuous TX case, disable all continuous TX */ + rtl_write_byte(rtlpriv, 0xd03, tmpreg & 0x8F); + else + /* if Deal with Packet TX case, block all queues */ + rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF); + + rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0xF00000, 0x0F); + + for (index = 0; index < path; index++) { + /* 1. Read original RF mode */ + offset = index == 0 ? ROFDM0_XAAGCCORE1 : ROFDM0_XBAGCCORE1; + rf_mode[index] = rtl_read_byte(rtlpriv, offset); + + /* 2. Set RF mode = standby mode */ + rtl_set_rfreg(hw, (enum radio_path)index, RF_AC, + RFREG_OFFSET_MASK, 0x010000); + + rf_syn_g4[index] = rtl_get_rfreg(hw, index, RF_SYN_G4, + RFREG_OFFSET_MASK); + rtl_set_rfreg(hw, index, RF_SYN_G4, 0x700, 0x7); + + /* switch CV-curve control by LC-calibration */ + rtl_set_rfreg(hw, (enum radio_path)index, RF_SYN_G7, + BIT(17), 0x0); + + /* 4. Set LC calibration begin */ + rtl_set_rfreg(hw, (enum radio_path)index, RF_CHNLBW, + 0x08000, 0x01); + } + + for (index = 0; index < path; index++) { + u4tmp = rtl_get_rfreg(hw, (enum radio_path)index, RF_SYN_G6, + RFREG_OFFSET_MASK); + + while ((!(u4tmp & BIT(11))) && timecount <= timeout) { + mdelay(50); + timecount += 50; + u4tmp = rtl_get_rfreg(hw, (enum radio_path)index, + RF_SYN_G6, RFREG_OFFSET_MASK); + } + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "PHY_LCK finish delay for %d ms=2\n", timecount); + } + + if ((tmpreg & 0x70) != 0) + rtl_write_byte(rtlpriv, 0xd03, tmpreg); + else /* Deal with Packet TX case */ + rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00); + + rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER4, 0xF00000, 0x00); + + for (index = 0; index < path; index++) { + rtl_get_rfreg(hw, index, RF_SYN_G4, RFREG_OFFSET_MASK); + + if (index == 0 && rtlhal->interfaceindex == 0) { + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "path-A / 5G LCK\n"); + } else { + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "path-B / 2.4G LCK\n"); + } + + memset(curvecount_val, 0, sizeof(curvecount_val)); + + /* Set LC calibration off */ + rtl_set_rfreg(hw, (enum radio_path)index, RF_CHNLBW, + 0x08000, 0x0); + + RTPRINT(rtlpriv, FINIT, INIT_IQK, "set RF 0x18[15] = 0\n"); + + /* save Curve-counting number */ + for (i = 0; i < CV_CURVE_CNT; i++) { + u32 readval = 0, readval2 = 0; + + rtl_set_rfreg(hw, (enum radio_path)index, 0x3F, + 0x7f, i); + + rtl_set_rfreg(hw, (enum radio_path)index, 0x4D, + RFREG_OFFSET_MASK, 0x0); + + readval = rtl_get_rfreg(hw, (enum radio_path)index, + 0x4F, RFREG_OFFSET_MASK); + curvecount_val[2 * i + 1] = (readval & 0xfffe0) >> 5; + + /* reg 0x4f [4:0] */ + /* reg 0x50 [19:10] */ + readval2 = rtl_get_rfreg(hw, (enum radio_path)index, + 0x50, 0xffc00); + curvecount_val[2 * i] = (((readval & 0x1F) << 10) | + readval2); + } + + if (index == 0 && rtlhal->interfaceindex == 0) + rtl92d_phy_calc_curvindex(hw, targetchnl_5g, + curvecount_val, + true, rtlpriv->curveindex_5g); + else + rtl92d_phy_calc_curvindex(hw, targetchnl_2g, + curvecount_val, + false, rtlpriv->curveindex_2g); + + /* switch CV-curve control mode */ + rtl_set_rfreg(hw, (enum radio_path)index, RF_SYN_G7, + BIT(17), 0x1); + } + + /* Restore original situation */ + for (index = 0; index < path; index++) { + rtl_set_rfreg(hw, index, RF_SYN_G4, RFREG_OFFSET_MASK, + rf_syn_g4[index]); + + offset = index == 0 ? ROFDM0_XAAGCCORE1 : ROFDM0_XBAGCCORE1; + rtl_write_byte(rtlpriv, offset, 0x50); + rtl_write_byte(rtlpriv, offset, rf_mode[index]); + } + + _rtl92du_phy_reload_lck_setting(hw, rtlpriv->phy.current_channel); +} + +void rtl92du_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = &rtlpriv->rtlhal; + struct rtl_phy *rtlphy = &rtlpriv->phy; + u32 timeout = 2000, timecount = 0; + + while (rtlpriv->mac80211.act_scanning && timecount < timeout) { + udelay(50); + timecount += 50; + } + + rtlphy->lck_inprogress = true; + RTPRINT(rtlpriv, FINIT, INIT_IQK, + "LCK:Start!!! currentband %x delay %d ms\n", + rtlhal->current_bandtype, timecount); + + _rtl92du_phy_lc_calibrate_sw(hw, is2t); + + rtlphy->lck_inprogress = false; + RTPRINT(rtlpriv, FINIT, INIT_IQK, "LCK:Finish!!!\n"); +} + +void rtl92du_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta) +{ + /* Nothing to do. */ +} + +u8 rtl92du_phy_sw_chnl(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u8 num_total_rfpath = rtlphy->num_total_rfpath; + u8 channel = rtlphy->current_channel; + u32 timeout = 1000, timecount = 0; + u32 ret_value; + u8 rfpath; + + if (rtlphy->sw_chnl_inprogress) + return 0; + if (rtlphy->set_bwmode_inprogress) + return 0; + + if ((is_hal_stop(rtlhal)) || (RT_CANNOT_IO(hw))) { + rtl_dbg(rtlpriv, COMP_CHAN, DBG_LOUD, + "sw_chnl_inprogress false driver sleep or unload\n"); + return 0; + } + + while (rtlphy->lck_inprogress && timecount < timeout) { + mdelay(50); + timecount += 50; + } + + if (rtlhal->macphymode == SINGLEMAC_SINGLEPHY && + rtlhal->bandset == BAND_ON_BOTH) { + ret_value = rtl_get_bbreg(hw, RFPGA0_XAB_RFPARAMETER, + MASKDWORD); + if (rtlphy->current_channel > 14 && !(ret_value & BIT(0))) + rtl92du_phy_switch_wirelessband(hw, BAND_ON_5G); + else if (rtlphy->current_channel <= 14 && (ret_value & BIT(0))) + rtl92du_phy_switch_wirelessband(hw, BAND_ON_2_4G); + } + + switch (rtlhal->current_bandtype) { + case BAND_ON_5G: + /* Get first channel error when change between + * 5G and 2.4G band. + */ + if (WARN_ONCE(channel <= 14, "rtl8192du: 5G but channel<=14\n")) + return 0; + break; + case BAND_ON_2_4G: + /* Get first channel error when change between + * 5G and 2.4G band. + */ + if (WARN_ONCE(channel > 14, "rtl8192du: 2G but channel>14\n")) + return 0; + break; + default: + WARN_ONCE(true, "rtl8192du: Invalid WirelessMode(%#x)!!\n", + rtlpriv->mac80211.mode); + break; + } + + rtlphy->sw_chnl_inprogress = true; + + rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, + "switch to channel%d\n", rtlphy->current_channel); + + rtl92d_phy_set_txpower_level(hw, channel); + + for (rfpath = 0; rfpath < num_total_rfpath; rfpath++) { + u32p_replace_bits(&rtlphy->rfreg_chnlval[rfpath], + channel, 0xff); + + if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G) { + if (channel > 99) + rtlphy->rfreg_chnlval[rfpath] |= (BIT(18)); + else + rtlphy->rfreg_chnlval[rfpath] &= ~BIT(18); + rtlphy->rfreg_chnlval[rfpath] |= (BIT(16) | BIT(8)); + } else { + rtlphy->rfreg_chnlval[rfpath] &= + ~(BIT(8) | BIT(16) | BIT(18)); + } + rtl_set_rfreg(hw, rfpath, RF_CHNLBW, RFREG_OFFSET_MASK, + rtlphy->rfreg_chnlval[rfpath]); + + _rtl92du_phy_reload_imr_setting(hw, channel, rfpath); + } + + _rtl92du_phy_switch_rf_setting(hw, channel); + + /* do IQK when all parameters are ready */ + rtl92du_phy_reload_iqk_setting(hw, channel); + + rtl_dbg(rtlpriv, COMP_SCAN, DBG_TRACE, "<==\n"); + rtlphy->sw_chnl_inprogress = false; + return 1; +} + +static void _rtl92du_phy_set_rfon(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + /* a. SYS_CLKR 0x08[11] = 1 restore MAC clock */ + /* b. SPS_CTRL 0x11[7:0] = 0x2b */ + if (rtlpriv->rtlhal.macphymode == SINGLEMAC_SINGLEPHY) + rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b); + + /* c. For PCIE: SYS_FUNC_EN 0x02[7:0] = 0xE3 enable BB TRX function */ + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3); + + /* RF_ON_EXCEP(d~g): */ + /* d. APSD_CTRL 0x600[7:0] = 0x00 */ + rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00); + + /* e. SYS_FUNC_EN 0x02[7:0] = 0xE2 reset BB TRX function again */ + /* f. SYS_FUNC_EN 0x02[7:0] = 0xE3 enable BB TRX function*/ + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3); + + /* g. txpause 0x522[7:0] = 0x00 enable mac tx queue */ + rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00); +} + +static void _rtl92du_phy_set_rfsleep(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + u32 u4btmp; + u8 retry = 5; + + /* a. TXPAUSE 0x522[7:0] = 0xFF Pause MAC TX queue */ + rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF); + + /* b. RF path 0 offset 0x00 = 0x00 disable RF */ + rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00); + + /* c. APSD_CTRL 0x600[7:0] = 0x40 */ + rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40); + + /* d. APSD_CTRL 0x600[7:0] = 0x00 + * APSD_CTRL 0x600[7:0] = 0x00 + * RF path 0 offset 0x00 = 0x00 + * APSD_CTRL 0x600[7:0] = 0x40 + */ + u4btmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK); + while (u4btmp != 0 && retry > 0) { + rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x0); + rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00); + rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40); + u4btmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK); + retry--; + } + if (retry == 0) { + /* Jump out the LPS turn off sequence to RF_ON_EXCEP */ + rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00); + + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2); + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3); + rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00); + rtl_dbg(rtlpriv, COMP_POWER, DBG_LOUD, + "Fail !!! Switch RF timeout\n"); + return; + } + + /* e. For PCIE: SYS_FUNC_EN 0x02[7:0] = 0xE2 reset BB TRX function */ + rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2); + + /* f. SPS_CTRL 0x11[7:0] = 0x22 */ + if (rtlpriv->rtlhal.macphymode == SINGLEMAC_SINGLEPHY) + rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x22); +} + +bool rtl92du_phy_set_rf_power_state(struct ieee80211_hw *hw, + enum rf_pwrstate rfpwr_state) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv); + struct rtl_mac *mac = rtl_mac(rtlpriv); + bool bresult = true; + + if (rfpwr_state == ppsc->rfpwr_state) + return false; + + switch (rfpwr_state) { + case ERFON: + if (ppsc->rfpwr_state == ERFOFF && + RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) { + u32 initializecount = 0; + bool rtstatus; + + do { + initializecount++; + rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG, + "IPS Set eRf nic enable\n"); + rtstatus = rtl_ps_enable_nic(hw); + } while (!rtstatus && (initializecount < 10)); + + RT_CLEAR_PS_LEVEL(ppsc, + RT_RF_OFF_LEVL_HALT_NIC); + } else { + rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG, + "awake, slept:%d ms state_inap:%x\n", + jiffies_to_msecs(jiffies - + ppsc->last_sleep_jiffies), + rtlpriv->psc.state_inap); + ppsc->last_awake_jiffies = jiffies; + _rtl92du_phy_set_rfon(hw); + } + + if (mac->link_state == MAC80211_LINKED) + rtlpriv->cfg->ops->led_control(hw, LED_CTL_LINK); + else + rtlpriv->cfg->ops->led_control(hw, LED_CTL_NO_LINK); + break; + case ERFOFF: + if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) { + rtl_dbg(rtlpriv, COMP_RF, DBG_DMESG, + "IPS Set eRf nic disable\n"); + rtl_ps_disable_nic(hw); + RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC); + } else { + if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS) + rtlpriv->cfg->ops->led_control(hw, LED_CTL_NO_LINK); + else + rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF); + } + break; + case ERFSLEEP: + if (ppsc->rfpwr_state == ERFOFF) + return false; + + rtl_dbg(rtlpriv, COMP_POWER, DBG_DMESG, + "sleep awakened:%d ms state_inap:%x\n", + jiffies_to_msecs(jiffies - + ppsc->last_awake_jiffies), + rtlpriv->psc.state_inap); + ppsc->last_sleep_jiffies = jiffies; + _rtl92du_phy_set_rfsleep(hw); + break; + default: + pr_err("switch case %#x not processed\n", + rfpwr_state); + return false; + } + + if (bresult) + ppsc->rfpwr_state = rfpwr_state; + + return bresult; +} + +void rtl92du_phy_set_poweron(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u32 mac_reg = (rtlhal->interfaceindex == 0 ? REG_MAC0 : REG_MAC1); + u8 value8; + u16 i; + + /* notice fw know band status 0x81[1]/0x53[1] = 0: 5G, 1: 2G */ + if (rtlhal->current_bandtype == BAND_ON_2_4G) { + value8 = rtl_read_byte(rtlpriv, mac_reg); + value8 |= BIT(1); + rtl_write_byte(rtlpriv, mac_reg, value8); + } else { + value8 = rtl_read_byte(rtlpriv, mac_reg); + value8 &= ~BIT(1); + rtl_write_byte(rtlpriv, mac_reg, value8); + } + + if (rtlhal->macphymode == SINGLEMAC_SINGLEPHY) { + value8 = rtl_read_byte(rtlpriv, REG_MAC0); + rtl_write_byte(rtlpriv, REG_MAC0, value8 | MAC0_ON); + } else { + mutex_lock(rtlpriv->mutex_for_power_on_off); + if (rtlhal->interfaceindex == 0) { + value8 = rtl_read_byte(rtlpriv, REG_MAC0); + rtl_write_byte(rtlpriv, REG_MAC0, value8 | MAC0_ON); + } else { + value8 = rtl_read_byte(rtlpriv, REG_MAC1); + rtl_write_byte(rtlpriv, REG_MAC1, value8 | MAC1_ON); + } + value8 = rtl_read_byte(rtlpriv, REG_POWER_OFF_IN_PROCESS); + mutex_unlock(rtlpriv->mutex_for_power_on_off); + + for (i = 0; i < 200; i++) { + if ((value8 & BIT(7)) == 0) + break; + + udelay(500); + mutex_lock(rtlpriv->mutex_for_power_on_off); + value8 = rtl_read_byte(rtlpriv, + REG_POWER_OFF_IN_PROCESS); + mutex_unlock(rtlpriv->mutex_for_power_on_off); + } + if (i == 200) + WARN_ONCE(true, "rtl8192du: Another mac power off over time\n"); + } +} + +void rtl92du_update_bbrf_configuration(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_efuse *rtlefuse = rtl_efuse(rtlpriv); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + struct rtl_phy *rtlphy = &rtlpriv->phy; + u8 rfpath, i; + + rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "==>\n"); + /* r_select_5G for path_A/B 0 for 2.4G, 1 for 5G */ + if (rtlhal->current_bandtype == BAND_ON_2_4G) { + /* r_select_5G for path_A/B, 0x878 */ + rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(0), 0x0); + rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(15), 0x0); + if (rtlhal->macphymode != DUALMAC_DUALPHY) { + rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(16), 0x0); + rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(31), 0x0); + } + + /* rssi_table_select: index 0 for 2.4G. 1~3 for 5G, 0xc78 */ + rtl_set_bbreg(hw, ROFDM0_AGCRSSITABLE, BIT(6) | BIT(7), 0x0); + + /* fc_area 0xd2c */ + rtl_set_bbreg(hw, ROFDM1_CFOTRACKING, BIT(14) | BIT(13), 0x0); + + /* 5G LAN ON */ + rtl_set_bbreg(hw, 0xB30, 0x00F00000, 0xa); + + /* TX BB gain shift*1, Just for testchip, 0xc80, 0xc88 */ + rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD, 0x40000100); + rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, MASKDWORD, 0x40000100); + if (rtlhal->macphymode == DUALMAC_DUALPHY) { + rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, + BIT(10) | BIT(6) | BIT(5), + ((rtlefuse->eeprom_c9 & BIT(3)) >> 3) | + (rtlefuse->eeprom_c9 & BIT(1)) | + ((rtlefuse->eeprom_cc & BIT(1)) << 4)); + rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, + BIT(10) | BIT(6) | BIT(5), + ((rtlefuse->eeprom_c9 & BIT(2)) >> 2) | + ((rtlefuse->eeprom_c9 & BIT(0)) << 1) | + ((rtlefuse->eeprom_cc & BIT(0)) << 5)); + rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(15), 0); + + rtl_set_bbreg(hw, RPDP_ANTA, MASKDWORD, 0x01017038); + rtl_set_bbreg(hw, RCONFIG_ANTA, MASKDWORD, 0x0f600000); + } else { + rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, + BIT(26) | BIT(22) | BIT(21) | BIT(10) | + BIT(6) | BIT(5), + ((rtlefuse->eeprom_c9 & BIT(3)) >> 3) | + (rtlefuse->eeprom_c9 & BIT(1)) | + ((rtlefuse->eeprom_cc & BIT(1)) << 4) | + ((rtlefuse->eeprom_c9 & BIT(7)) << 9) | + ((rtlefuse->eeprom_c9 & BIT(5)) << 12) | + ((rtlefuse->eeprom_cc & BIT(3)) << 18)); + rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, + BIT(10) | BIT(6) | BIT(5), + ((rtlefuse->eeprom_c9 & BIT(2)) >> 2) | + ((rtlefuse->eeprom_c9 & BIT(0)) << 1) | + ((rtlefuse->eeprom_cc & BIT(0)) << 5)); + rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, + BIT(10) | BIT(6) | BIT(5), + ((rtlefuse->eeprom_c9 & BIT(6)) >> 6) | + ((rtlefuse->eeprom_c9 & BIT(4)) >> 3) | + ((rtlefuse->eeprom_cc & BIT(2)) << 3)); + rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, + BIT(31) | BIT(15), 0); + + rtl_set_bbreg(hw, RPDP_ANTA, MASKDWORD, 0x01017038); + rtl_set_bbreg(hw, RPDP_ANTB, MASKDWORD, 0x01017038); + rtl_set_bbreg(hw, RCONFIG_ANTA, MASKDWORD, 0x0f600000); + rtl_set_bbreg(hw, RCONFIG_ANTB, MASKDWORD, 0x0f600000); + } + /* 1.5V_LDO */ + } else { + /* r_select_5G for path_A/B */ + rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(0), 0x1); + rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(15), 0x1); + if (rtlhal->macphymode != DUALMAC_DUALPHY) { + rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(16), 0x1); + rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(31), 0x1); + } + + /* rssi_table_select: index 0 for 2.4G. 1~3 for 5G */ + rtl_set_bbreg(hw, ROFDM0_AGCRSSITABLE, BIT(6) | BIT(7), 0x1); + + /* fc_area */ + rtl_set_bbreg(hw, ROFDM1_CFOTRACKING, BIT(14) | BIT(13), 0x1); + + /* 5G LAN ON */ + rtl_set_bbreg(hw, 0xB30, 0x00F00000, 0x0); + + /* TX BB gain shift, Just for testchip, 0xc80, 0xc88 */ + if (rtlefuse->internal_pa_5g[rtlhal->interfaceindex]) + rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD, + 0x2d4000b5); + else + rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, MASKDWORD, + 0x20000080); + + if (rtlhal->macphymode != DUALMAC_DUALPHY) { + if (rtlefuse->internal_pa_5g[1]) + rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, + MASKDWORD, 0x2d4000b5); + else + rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, + MASKDWORD, 0x20000080); + } + + rtl_set_bbreg(hw, 0xB30, BIT(27), 0); + + if (rtlhal->macphymode == DUALMAC_DUALPHY) { + rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, + BIT(10) | BIT(6) | BIT(5), + (rtlefuse->eeprom_cc & BIT(5))); + rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, BIT(10), + ((rtlefuse->eeprom_cc & BIT(4)) >> 4)); + rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, BIT(15), + (rtlefuse->eeprom_cc & BIT(4)) >> 4); + + rtl_set_bbreg(hw, RPDP_ANTA, MASKDWORD, 0x01017098); + rtl_set_bbreg(hw, RCONFIG_ANTA, MASKDWORD, 0x20000000); + } else { + rtl_set_bbreg(hw, RFPGA0_XAB_RFINTERFACESW, + BIT(26) | BIT(22) | BIT(21) | BIT(10) | + BIT(6) | BIT(5), + (rtlefuse->eeprom_cc & BIT(5)) | + ((rtlefuse->eeprom_cc & BIT(7)) << 14)); + rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, BIT(10), + ((rtlefuse->eeprom_cc & BIT(4)) >> 4)); + rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE, BIT(10), + ((rtlefuse->eeprom_cc & BIT(6)) >> 6)); + rtl_set_bbreg(hw, RFPGA0_XAB_RFPARAMETER, + BIT(31) | BIT(15), + ((rtlefuse->eeprom_cc & BIT(4)) >> 4) | + ((rtlefuse->eeprom_cc & BIT(6)) << 10)); + + rtl_set_bbreg(hw, RPDP_ANTA, MASKDWORD, 0x01017098); + rtl_set_bbreg(hw, RPDP_ANTB, MASKDWORD, 0x01017098); + rtl_set_bbreg(hw, RCONFIG_ANTA, MASKDWORD, 0x20000000); + rtl_set_bbreg(hw, RCONFIG_ANTB, MASKDWORD, 0x20000000); + } + } + + /* update IQK related settings */ + rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, MASKDWORD, 0x40000100); + rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, MASKDWORD, 0x40000100); + rtl_set_bbreg(hw, ROFDM0_XCTXAFE, 0xF0000000, 0x00); + rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(30) | BIT(28) | + BIT(26) | BIT(24), 0x00); + rtl_set_bbreg(hw, ROFDM0_XDTXAFE, 0xF0000000, 0x00); + rtl_set_bbreg(hw, ROFDM0_RXIQEXTANTA, 0xF0000000, 0x00); + rtl_set_bbreg(hw, ROFDM0_AGCRSSITABLE, 0x0000F000, 0x00); + + /* Update RF */ + for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath; + rfpath++) { + if (rtlhal->current_bandtype == BAND_ON_2_4G) { + /* MOD_AG for RF path_A 0x18 BIT8,BIT16 */ + rtl_set_rfreg(hw, rfpath, RF_CHNLBW, BIT(8) | BIT(16) | + BIT(18) | 0xff, 1); + + /* RF0x0b[16:14] =3b'111 */ + rtl_set_rfreg(hw, (enum radio_path)rfpath, 0x0B, + 0x1c000, 0x07); + } else { + /* MOD_AG for RF path_A 0x18 BIT8,BIT16 */ + rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK, + 0x97524); + } + + /* Set right channel on RF reg0x18 for another mac. */ + if (rtlhal->interfaceindex == 0 && rtlhal->bandset == BAND_ON_2_4G) { + /* Set MAC1 default channel if MAC1 not up. */ + if (!(rtl_read_byte(rtlpriv, REG_MAC1) & MAC1_ON)) { + rtl92du_phy_enable_anotherphy(hw, true); + rtlhal->during_mac0init_radiob = true; + rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, + RFREG_OFFSET_MASK, 0x97524); + rtl92du_phy_powerdown_anotherphy(hw, true); + } + } else if (rtlhal->interfaceindex == 1 && rtlhal->bandset == BAND_ON_5G) { + /* Set MAC0 default channel */ + if (!(rtl_read_byte(rtlpriv, REG_MAC0) & MAC0_ON)) { + rtl92du_phy_enable_anotherphy(hw, false); + rtlhal->during_mac1init_radioa = true; + rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, + RFREG_OFFSET_MASK, 0x87401); + rtl92du_phy_powerdown_anotherphy(hw, false); + } + } + } + + /* Update for all band. */ + /* DMDP */ + if (rtlphy->rf_type == RF_1T1R) { + /* Use antenna 0, 0xc04, 0xd04 */ + rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x11); + rtl_set_bbreg(hw, ROFDM1_TRXPATHENABLE, BDWORD, 0x1); + + /* enable ad/da clock1 for dual-phy reg0x888 */ + if (rtlhal->interfaceindex == 0) { + rtl_set_bbreg(hw, RFPGA0_ADDALLOCKEN, BIT(12) | + BIT(13), 0x3); + } else if (rtl92du_phy_enable_anotherphy(hw, false)) { + rtlhal->during_mac1init_radioa = true; + rtl_set_bbreg(hw, RFPGA0_ADDALLOCKEN, + BIT(12) | BIT(13), 0x3); + rtl92du_phy_powerdown_anotherphy(hw, false); + } + + rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(19) | BIT(20), 0x0); + } else { + /* Single PHY */ + /* Use antenna 0 & 1, 0xc04, 0xd04 */ + rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x33); + rtl_set_bbreg(hw, ROFDM1_TRXPATHENABLE, BDWORD, 0x3); + /* disable ad/da clock1,0x888 */ + rtl_set_bbreg(hw, RFPGA0_ADDALLOCKEN, BIT(12) | BIT(13), 0); + + rtl_set_bbreg(hw, ROFDM1_LSTF, BIT(19) | BIT(20), 0x1); + } + + for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath; + rfpath++) { + rtlphy->rfreg_chnlval[rfpath] = rtl_get_rfreg(hw, rfpath, + RF_CHNLBW, + RFREG_OFFSET_MASK); + rtlphy->reg_rf3c[rfpath] = rtl_get_rfreg(hw, rfpath, 0x3C, + RFREG_OFFSET_MASK); + } + + for (i = 0; i < 2; i++) + rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "RF 0x18 = 0x%x\n", + rtlphy->rfreg_chnlval[i]); + + rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "<==\n"); +} + +bool rtl92du_phy_check_poweroff(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw)); + u8 u1btmp; + + if (rtlhal->macphymode == SINGLEMAC_SINGLEPHY) { + u1btmp = rtl_read_byte(rtlpriv, REG_MAC0); + rtl_write_byte(rtlpriv, REG_MAC0, u1btmp & ~MAC0_ON); + return true; + } + + mutex_lock(rtlpriv->mutex_for_power_on_off); + if (rtlhal->interfaceindex == 0) { + u1btmp = rtl_read_byte(rtlpriv, REG_MAC0); + rtl_write_byte(rtlpriv, REG_MAC0, u1btmp & ~MAC0_ON); + u1btmp = rtl_read_byte(rtlpriv, REG_MAC1); + u1btmp &= MAC1_ON; + } else { + u1btmp = rtl_read_byte(rtlpriv, REG_MAC1); + rtl_write_byte(rtlpriv, REG_MAC1, u1btmp & ~MAC1_ON); + u1btmp = rtl_read_byte(rtlpriv, REG_MAC0); + u1btmp &= MAC0_ON; + } + if (u1btmp) { + mutex_unlock(rtlpriv->mutex_for_power_on_off); + return false; + } + u1btmp = rtl_read_byte(rtlpriv, REG_POWER_OFF_IN_PROCESS); + u1btmp |= BIT(7); + rtl_write_byte(rtlpriv, REG_POWER_OFF_IN_PROCESS, u1btmp); + mutex_unlock(rtlpriv->mutex_for_power_on_off); + + return true; +} + +void rtl92du_phy_init_pa_bias(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + bool is_single_mac = rtlhal->macphymode == SINGLEMAC_SINGLEPHY; + enum radio_path rf_path; + u8 val8; + + read_efuse_byte(hw, 0x3FA, &val8); + + rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "%s: 0x3FA %#x\n", + __func__, val8); + + if (!(val8 & BIT(0)) && (is_single_mac || rtlhal->interfaceindex == 0)) { + rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK, 0x07401); + rtl_set_rfreg(hw, RF90_PATH_A, RF_AC, RFREG_OFFSET_MASK, 0x70000); + rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, RFREG_OFFSET_MASK, 0x0F425); + rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, RFREG_OFFSET_MASK, 0x4F425); + rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, RFREG_OFFSET_MASK, 0x8F425); + + /* Back to RX Mode */ + rtl_set_rfreg(hw, RF90_PATH_A, RF_AC, RFREG_OFFSET_MASK, 0x30000); + + rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "2G PA BIAS path A\n"); + } + + if (!(val8 & BIT(1)) && (is_single_mac || rtlhal->interfaceindex == 1)) { + rf_path = rtlhal->interfaceindex == 1 ? RF90_PATH_A : RF90_PATH_B; + + rtl_set_rfreg(hw, rf_path, RF_CHNLBW, RFREG_OFFSET_MASK, 0x07401); + rtl_set_rfreg(hw, rf_path, RF_AC, RFREG_OFFSET_MASK, 0x70000); + rtl_set_rfreg(hw, rf_path, RF_IPA, RFREG_OFFSET_MASK, 0x0F425); + rtl_set_rfreg(hw, rf_path, RF_IPA, RFREG_OFFSET_MASK, 0x4F425); + rtl_set_rfreg(hw, rf_path, RF_IPA, RFREG_OFFSET_MASK, 0x8F425); + + /* Back to RX Mode */ + rtl_set_rfreg(hw, rf_path, RF_AC, RFREG_OFFSET_MASK, 0x30000); + + rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "2G PA BIAS path B\n"); + } + + if (!(val8 & BIT(2)) && (is_single_mac || rtlhal->interfaceindex == 0)) { + /* 5GL_channel */ + rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK, 0x17524); + rtl_set_rfreg(hw, RF90_PATH_A, RF_AC, RFREG_OFFSET_MASK, 0x70000); + rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, RFREG_OFFSET_MASK, 0x0F496); + rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, RFREG_OFFSET_MASK, 0x4F496); + rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, RFREG_OFFSET_MASK, 0x8F496); + + /* 5GM_channel */ + rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK, 0x37564); + rtl_set_rfreg(hw, RF90_PATH_A, RF_AC, RFREG_OFFSET_MASK, 0x70000); + rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, RFREG_OFFSET_MASK, 0x0F496); + rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, RFREG_OFFSET_MASK, 0x4F496); + rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, RFREG_OFFSET_MASK, 0x8F496); + + /* 5GH_channel */ + rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK, 0x57595); + rtl_set_rfreg(hw, RF90_PATH_A, RF_AC, RFREG_OFFSET_MASK, 0x70000); + rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, RFREG_OFFSET_MASK, 0x0F496); + rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, RFREG_OFFSET_MASK, 0x4F496); + rtl_set_rfreg(hw, RF90_PATH_A, RF_IPA, RFREG_OFFSET_MASK, 0x8F496); + + /* Back to RX Mode */ + rtl_set_rfreg(hw, RF90_PATH_A, RF_AC, RFREG_OFFSET_MASK, 0x30000); + + rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "5G PA BIAS path A\n"); + } + + if (!(val8 & BIT(3)) && (is_single_mac || rtlhal->interfaceindex == 1)) { + rf_path = rtlhal->interfaceindex == 1 ? RF90_PATH_A : RF90_PATH_B; + + /* 5GL_channel */ + rtl_set_rfreg(hw, rf_path, RF_CHNLBW, RFREG_OFFSET_MASK, 0x17524); + rtl_set_rfreg(hw, rf_path, RF_AC, RFREG_OFFSET_MASK, 0x70000); + rtl_set_rfreg(hw, rf_path, RF_IPA, RFREG_OFFSET_MASK, 0x0F496); + rtl_set_rfreg(hw, rf_path, RF_IPA, RFREG_OFFSET_MASK, 0x4F496); + rtl_set_rfreg(hw, rf_path, RF_IPA, RFREG_OFFSET_MASK, 0x8F496); + + /* 5GM_channel */ + rtl_set_rfreg(hw, rf_path, RF_CHNLBW, RFREG_OFFSET_MASK, 0x37564); + rtl_set_rfreg(hw, rf_path, RF_AC, RFREG_OFFSET_MASK, 0x70000); + rtl_set_rfreg(hw, rf_path, RF_IPA, RFREG_OFFSET_MASK, 0x0F496); + rtl_set_rfreg(hw, rf_path, RF_IPA, RFREG_OFFSET_MASK, 0x4F496); + rtl_set_rfreg(hw, rf_path, RF_IPA, RFREG_OFFSET_MASK, 0x8F496); + + /* 5GH_channel */ + rtl_set_rfreg(hw, rf_path, RF_CHNLBW, RFREG_OFFSET_MASK, 0x57595); + rtl_set_rfreg(hw, rf_path, RF_AC, RFREG_OFFSET_MASK, 0x70000); + rtl_set_rfreg(hw, rf_path, RF_IPA, RFREG_OFFSET_MASK, 0x0F496); + rtl_set_rfreg(hw, rf_path, RF_IPA, RFREG_OFFSET_MASK, 0x4F496); + rtl_set_rfreg(hw, rf_path, RF_IPA, RFREG_OFFSET_MASK, 0x8F496); + + /* Back to RX Mode */ + rtl_set_rfreg(hw, rf_path, RF_AC, RFREG_OFFSET_MASK, 0x30000); + + rtl_dbg(rtlpriv, COMP_RF, DBG_TRACE, "5G PA BIAS path B\n"); + } +} diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.h new file mode 100644 index 000000000000..090a6203db7e --- /dev/null +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/phy.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2024 Realtek Corporation.*/ + +#ifndef __RTL92DU_PHY_H__ +#define __RTL92DU_PHY_H__ + +u32 rtl92du_phy_query_bb_reg(struct ieee80211_hw *hw, + u32 regaddr, u32 bitmask); +void rtl92du_phy_set_bb_reg(struct ieee80211_hw *hw, + u32 regaddr, u32 bitmask, u32 data); +bool rtl92du_phy_mac_config(struct ieee80211_hw *hw); +bool rtl92du_phy_bb_config(struct ieee80211_hw *hw); +bool rtl92du_phy_rf_config(struct ieee80211_hw *hw); +void rtl92du_phy_set_bw_mode(struct ieee80211_hw *hw, + enum nl80211_channel_type ch_type); +u8 rtl92du_phy_sw_chnl(struct ieee80211_hw *hw); +bool rtl92du_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, + enum rf_content content, + enum radio_path rfpath); +bool rtl92du_phy_set_rf_power_state(struct ieee80211_hw *hw, + enum rf_pwrstate rfpwr_state); + +void rtl92du_phy_set_poweron(struct ieee80211_hw *hw); +bool rtl92du_phy_check_poweroff(struct ieee80211_hw *hw); +void rtl92du_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t); +void rtl92du_update_bbrf_configuration(struct ieee80211_hw *hw); +void rtl92du_phy_ap_calibrate(struct ieee80211_hw *hw, s8 delta); +void rtl92du_phy_iq_calibrate(struct ieee80211_hw *hw); +void rtl92du_phy_reload_iqk_setting(struct ieee80211_hw *hw, u8 channel); +void rtl92du_phy_init_pa_bias(struct ieee80211_hw *hw); + +#endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/rf.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/rf.c new file mode 100644 index 000000000000..044dd65eafd0 --- /dev/null +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/rf.c @@ -0,0 +1,240 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2024 Realtek Corporation.*/ + +#include "../wifi.h" +#include "../rtl8192d/reg.h" +#include "../rtl8192d/phy_common.h" +#include "phy.h" +#include "rf.h" + +bool rtl92du_phy_enable_anotherphy(struct ieee80211_hw *hw, bool bmac0) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = &rtlpriv->rtlhal; + u8 mac_on_bit = bmac0 ? MAC1_ON : MAC0_ON; + u8 mac_reg = bmac0 ? REG_MAC1 : REG_MAC0; + bool bresult = true; /* true: need to enable BB/RF power */ + u32 maskforphyset = 0; + u16 val16; + u8 u1btmp; + + rtlhal->during_mac0init_radiob = false; + rtlhal->during_mac1init_radioa = false; + rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "===>\n"); + + /* MAC0 Need PHY1 load radio_b.txt . Driver use DBI to write. */ + u1btmp = rtl_read_byte(rtlpriv, mac_reg); + if (!(u1btmp & mac_on_bit)) { + rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "enable BB & RF\n"); + /* Enable BB and RF power */ + + maskforphyset = bmac0 ? MAC0_ACCESS_PHY1 : MAC1_ACCESS_PHY0; + + val16 = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN | maskforphyset); + val16 &= 0xfffc; + rtl_write_word(rtlpriv, REG_SYS_FUNC_EN | maskforphyset, val16); + + val16 = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN | maskforphyset); + val16 |= BIT(13) | BIT(0) | BIT(1); + rtl_write_word(rtlpriv, REG_SYS_FUNC_EN | maskforphyset, val16); + } else { + /* We think if MAC1 is ON,then radio_a.txt + * and radio_b.txt has been load. + */ + bresult = false; + } + rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "<===\n"); + return bresult; +} + +void rtl92du_phy_powerdown_anotherphy(struct ieee80211_hw *hw, bool bmac0) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = &rtlpriv->rtlhal; + u8 mac_on_bit = bmac0 ? MAC1_ON : MAC0_ON; + u8 mac_reg = bmac0 ? REG_MAC1 : REG_MAC0; + u32 maskforphyset = 0; + u8 u1btmp; + + rtlhal->during_mac0init_radiob = false; + rtlhal->during_mac1init_radioa = false; + rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "====>\n"); + + /* check MAC0 enable or not again now, if + * enabled, not power down radio A. + */ + u1btmp = rtl_read_byte(rtlpriv, mac_reg); + if (!(u1btmp & mac_on_bit)) { + rtl_dbg(rtlpriv, COMP_INIT, DBG_LOUD, "power down\n"); + /* power down RF radio A according to YuNan's advice. */ + maskforphyset = bmac0 ? MAC0_ACCESS_PHY1 : MAC1_ACCESS_PHY0; + rtl_write_dword(rtlpriv, RFPGA0_XA_LSSIPARAMETER | maskforphyset, + 0x00000000); + } + rtl_dbg(rtlpriv, COMP_RF, DBG_LOUD, "<====\n"); +} + +bool rtl92du_phy_rf6052_config(struct ieee80211_hw *hw) +{ + bool mac1_initradioa_first = false, mac0_initradiob_first = false; + bool need_pwrdown_radioa = false, need_pwrdown_radiob = false; + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = &rtlpriv->rtlhal; + struct rtl_phy *rtlphy = &rtlpriv->phy; + struct bb_reg_def *pphyreg; + bool true_bpath = false; + bool rtstatus = true; + u32 u4_regvalue = 0; + u8 rfpath; + + if (rtlphy->rf_type == RF_1T1R) + rtlphy->num_total_rfpath = 1; + else + rtlphy->num_total_rfpath = 2; + + /* Single phy mode: use radio_a radio_b config path_A path_B + * separately by MAC0, and MAC1 needn't configure RF; + * Dual PHY mode: MAC0 use radio_a config 1st phy path_A, + * MAC1 use radio_b config 2nd PHY path_A. + * DMDP, MAC0 on G band, MAC1 on A band. + */ + if (rtlhal->macphymode == DUALMAC_DUALPHY) { + if (rtlhal->current_bandtype == BAND_ON_2_4G && + rtlhal->interfaceindex == 0) { + /* MAC0 needs PHY1 load radio_b.txt. */ + if (rtl92du_phy_enable_anotherphy(hw, true)) { + rtlphy->num_total_rfpath = 2; + mac0_initradiob_first = true; + } else { + /* We think if MAC1 is ON,then radio_a.txt and + * radio_b.txt has been load. + */ + return rtstatus; + } + } else if (rtlhal->current_bandtype == BAND_ON_5G && + rtlhal->interfaceindex == 1) { + /* MAC1 needs PHY0 load radio_a.txt. */ + if (rtl92du_phy_enable_anotherphy(hw, false)) { + rtlphy->num_total_rfpath = 2; + mac1_initradioa_first = true; + } else { + /* We think if MAC0 is ON, then radio_a.txt and + * radio_b.txt has been load. + */ + return rtstatus; + } + } else if (rtlhal->interfaceindex == 1) { + /* MAC0 enabled, only init radia B. */ + true_bpath = true; + } + } + + for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) { + /* Mac1 use PHY0 write */ + if (mac1_initradioa_first) { + if (rfpath == RF90_PATH_A) { + rtlhal->during_mac1init_radioa = true; + need_pwrdown_radioa = true; + } else if (rfpath == RF90_PATH_B) { + rtlhal->during_mac1init_radioa = false; + mac1_initradioa_first = false; + rfpath = RF90_PATH_A; + true_bpath = true; + rtlphy->num_total_rfpath = 1; + } + } else if (mac0_initradiob_first) { + /* Mac0 use PHY1 write */ + if (rfpath == RF90_PATH_A) + rtlhal->during_mac0init_radiob = false; + if (rfpath == RF90_PATH_B) { + rtlhal->during_mac0init_radiob = true; + mac0_initradiob_first = false; + need_pwrdown_radiob = true; + rfpath = RF90_PATH_A; + true_bpath = true; + rtlphy->num_total_rfpath = 1; + } + } + + pphyreg = &rtlphy->phyreg_def[rfpath]; + + switch (rfpath) { + case RF90_PATH_A: + case RF90_PATH_C: + u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs, + BRFSI_RFENV); + break; + case RF90_PATH_B: + case RF90_PATH_D: + u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs, + BRFSI_RFENV << 16); + break; + } + + rtl_set_bbreg(hw, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1); + udelay(1); + rtl_set_bbreg(hw, pphyreg->rfintfo, BRFSI_RFENV, 0x1); + udelay(1); + + /* Set bit number of Address and Data for RF register */ + rtl_set_bbreg(hw, pphyreg->rfhssi_para2, + B3WIREADDRESSLENGTH, 0x0); + udelay(1); + rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREDATALENGTH, 0x0); + udelay(1); + + switch (rfpath) { + case RF90_PATH_A: + if (true_bpath) + rtstatus = rtl92du_phy_config_rf_with_headerfile( + hw, radiob_txt, + (enum radio_path)rfpath); + else + rtstatus = rtl92du_phy_config_rf_with_headerfile( + hw, radioa_txt, + (enum radio_path)rfpath); + break; + case RF90_PATH_B: + rtstatus = + rtl92du_phy_config_rf_with_headerfile(hw, radiob_txt, + (enum radio_path)rfpath); + break; + case RF90_PATH_C: + break; + case RF90_PATH_D: + break; + } + + switch (rfpath) { + case RF90_PATH_A: + case RF90_PATH_C: + rtl_set_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV, + u4_regvalue); + break; + case RF90_PATH_B: + case RF90_PATH_D: + rtl_set_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV << 16, + u4_regvalue); + break; + } + + if (!rtstatus) { + rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, + "Radio[%d] Fail!!\n", rfpath); + return rtstatus; + } + } + + /* check MAC0 enable or not again, if enabled, + * not power down radio A. + * check MAC1 enable or not again, if enabled, + * not power down radio B. + */ + if (need_pwrdown_radioa) + rtl92du_phy_powerdown_anotherphy(hw, false); + else if (need_pwrdown_radiob) + rtl92du_phy_powerdown_anotherphy(hw, true); + rtl_dbg(rtlpriv, COMP_INIT, DBG_TRACE, "<---\n"); + + return rtstatus; +} diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/rf.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/rf.h new file mode 100644 index 000000000000..4a92cbdd00c0 --- /dev/null +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/rf.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2024 Realtek Corporation.*/ + +#ifndef __RTL92DU_RF_H__ +#define __RTL92DU_RF_H__ + +bool rtl92du_phy_rf6052_config(struct ieee80211_hw *hw); +bool rtl92du_phy_enable_anotherphy(struct ieee80211_hw *hw, bool bmac0); +void rtl92du_phy_powerdown_anotherphy(struct ieee80211_hw *hw, bool bmac0); + +#endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/sw.c new file mode 100644 index 000000000000..d069a81ac617 --- /dev/null +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/sw.c @@ -0,0 +1,395 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2024 Realtek Corporation.*/ + +#include "../wifi.h" +#include "../core.h" +#include "../usb.h" +#include "../base.h" +#include "../rtl8192d/reg.h" +#include "../rtl8192d/def.h" +#include "../rtl8192d/fw_common.h" +#include "../rtl8192d/hw_common.h" +#include "../rtl8192d/phy_common.h" +#include "../rtl8192d/trx_common.h" +#include "phy.h" +#include "dm.h" +#include "hw.h" +#include "trx.h" +#include "led.h" + +#include + +static struct usb_interface *rtl92du_get_other_intf(struct ieee80211_hw *hw) +{ + struct usb_interface *intf; + struct usb_device *udev; + u8 other_interfaceindex; + + /* See SET_IEEE80211_DEV(hw, &intf->dev); in usb.c */ + intf = container_of_const(wiphy_dev(hw->wiphy), struct usb_interface, dev); + + if (intf->altsetting[0].desc.bInterfaceNumber == 0) + other_interfaceindex = 1; + else + other_interfaceindex = 0; + + udev = interface_to_usbdev(intf); + + return usb_ifnum_to_if(udev, other_interfaceindex); +} + +static int rtl92du_init_shared_data(struct ieee80211_hw *hw) +{ + struct usb_interface *other_intf = rtl92du_get_other_intf(hw); + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_priv *other_rtlpriv = NULL; + struct ieee80211_hw *other_hw = NULL; + + if (other_intf) + other_hw = usb_get_intfdata(other_intf); + + if (other_hw) { + /* The other interface was already probed. */ + other_rtlpriv = rtl_priv(other_hw); + rtlpriv->curveindex_2g = other_rtlpriv->curveindex_2g; + rtlpriv->curveindex_5g = other_rtlpriv->curveindex_5g; + rtlpriv->mutex_for_power_on_off = other_rtlpriv->mutex_for_power_on_off; + rtlpriv->mutex_for_hw_init = other_rtlpriv->mutex_for_hw_init; + + if (!rtlpriv->curveindex_2g || !rtlpriv->curveindex_5g || + !rtlpriv->mutex_for_power_on_off || !rtlpriv->mutex_for_hw_init) + return -ENOMEM; + + return 0; + } + + /* The other interface doesn't exist or was not probed yet. */ + rtlpriv->curveindex_2g = kcalloc(TARGET_CHNL_NUM_2G, + sizeof(*rtlpriv->curveindex_2g), + GFP_KERNEL); + rtlpriv->curveindex_5g = kcalloc(TARGET_CHNL_NUM_5G, + sizeof(*rtlpriv->curveindex_5g), + GFP_KERNEL); + rtlpriv->mutex_for_power_on_off = + kzalloc(sizeof(*rtlpriv->mutex_for_power_on_off), GFP_KERNEL); + rtlpriv->mutex_for_hw_init = + kzalloc(sizeof(*rtlpriv->mutex_for_hw_init), GFP_KERNEL); + + if (!rtlpriv->curveindex_2g || !rtlpriv->curveindex_5g || + !rtlpriv->mutex_for_power_on_off || !rtlpriv->mutex_for_hw_init) { + kfree(rtlpriv->curveindex_2g); + kfree(rtlpriv->curveindex_5g); + kfree(rtlpriv->mutex_for_power_on_off); + kfree(rtlpriv->mutex_for_hw_init); + rtlpriv->curveindex_2g = NULL; + rtlpriv->curveindex_5g = NULL; + rtlpriv->mutex_for_power_on_off = NULL; + rtlpriv->mutex_for_hw_init = NULL; + return -ENOMEM; + } + + mutex_init(rtlpriv->mutex_for_power_on_off); + mutex_init(rtlpriv->mutex_for_hw_init); + + return 0; +} + +static void rtl92du_deinit_shared_data(struct ieee80211_hw *hw) +{ + struct usb_interface *other_intf = rtl92du_get_other_intf(hw); + struct rtl_priv *rtlpriv = rtl_priv(hw); + + if (!other_intf || !usb_get_intfdata(other_intf)) { + /* The other interface doesn't exist or was already disconnected. */ + kfree(rtlpriv->curveindex_2g); + kfree(rtlpriv->curveindex_5g); + if (rtlpriv->mutex_for_power_on_off) + mutex_destroy(rtlpriv->mutex_for_power_on_off); + if (rtlpriv->mutex_for_hw_init) + mutex_destroy(rtlpriv->mutex_for_hw_init); + kfree(rtlpriv->mutex_for_power_on_off); + kfree(rtlpriv->mutex_for_hw_init); + } +} + +static int rtl92du_init_sw_vars(struct ieee80211_hw *hw) +{ + const char *fw_name = "rtlwifi/rtl8192dufw.bin"; + struct rtl_priv *rtlpriv = rtl_priv(hw); + int err; + + err = rtl92du_init_shared_data(hw); + if (err) + return err; + + rtlpriv->dm.dm_initialgain_enable = true; + rtlpriv->dm.dm_flag = 0; + rtlpriv->dm.disable_framebursting = false; + rtlpriv->dm.thermalvalue = 0; + rtlpriv->dm.useramask = true; + + /* dual mac */ + if (rtlpriv->rtlhal.current_bandtype == BAND_ON_5G) + rtlpriv->phy.current_channel = 36; + else + rtlpriv->phy.current_channel = 1; + + if (rtlpriv->rtlhal.macphymode != SINGLEMAC_SINGLEPHY) + rtlpriv->rtlhal.disable_amsdu_8k = true; + + /* for LPS & IPS */ + rtlpriv->psc.inactiveps = rtlpriv->cfg->mod_params->inactiveps; + rtlpriv->psc.swctrl_lps = rtlpriv->cfg->mod_params->swctrl_lps; + rtlpriv->psc.fwctrl_lps = rtlpriv->cfg->mod_params->fwctrl_lps; + + /* for early mode */ + rtlpriv->rtlhal.earlymode_enable = false; + + /* for firmware buf */ + rtlpriv->rtlhal.pfirmware = kmalloc(0x8000, GFP_KERNEL); + if (!rtlpriv->rtlhal.pfirmware) + return -ENOMEM; + + rtlpriv->max_fw_size = 0x8000; + pr_info("Driver for Realtek RTL8192DU WLAN interface\n"); + pr_info("Loading firmware file %s\n", fw_name); + + /* request fw */ + err = request_firmware_nowait(THIS_MODULE, 1, fw_name, + rtlpriv->io.dev, GFP_KERNEL, hw, + rtl_fw_cb); + if (err) { + pr_err("Failed to request firmware!\n"); + kfree(rtlpriv->rtlhal.pfirmware); + rtlpriv->rtlhal.pfirmware = NULL; + return err; + } + + return 0; +} + +static void rtl92du_deinit_sw_vars(struct ieee80211_hw *hw) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + + kfree(rtlpriv->rtlhal.pfirmware); + rtlpriv->rtlhal.pfirmware = NULL; + + rtl92du_deinit_shared_data(hw); +} + +static const struct rtl_hal_ops rtl8192du_hal_ops = { + .init_sw_vars = rtl92du_init_sw_vars, + .deinit_sw_vars = rtl92du_deinit_sw_vars, + .read_chip_version = rtl92du_read_chip_version, + .read_eeprom_info = rtl92d_read_eeprom_info, + .hw_init = rtl92du_hw_init, + .hw_disable = rtl92du_card_disable, + .enable_interrupt = rtl92du_enable_interrupt, + .disable_interrupt = rtl92du_disable_interrupt, + .set_network_type = rtl92du_set_network_type, + .set_chk_bssid = rtl92du_set_check_bssid, + .set_qos = rtl92d_set_qos, + .set_bcn_reg = rtl92du_set_beacon_related_registers, + .set_bcn_intv = rtl92du_set_beacon_interval, + .update_interrupt_mask = rtl92du_update_interrupt_mask, + .get_hw_reg = rtl92du_get_hw_reg, + .set_hw_reg = rtl92du_set_hw_reg, + .update_rate_tbl = rtl92d_update_hal_rate_tbl, + .fill_tx_desc = rtl92du_tx_fill_desc, + .query_rx_desc = rtl92d_rx_query_desc, + .set_channel_access = rtl92d_update_channel_access_setting, + .radio_onoff_checking = rtl92d_gpio_radio_on_off_checking, + .set_bw_mode = rtl92du_phy_set_bw_mode, + .switch_channel = rtl92du_phy_sw_chnl, + .dm_watchdog = rtl92du_dm_watchdog, + .scan_operation_backup = rtl_phy_scan_operation_backup, + .set_rf_power_state = rtl92du_phy_set_rf_power_state, + .led_control = rtl92du_led_control, + .set_desc = rtl92d_set_desc, + .get_desc = rtl92d_get_desc, + .enable_hw_sec = rtl92d_enable_hw_security_config, + .set_key = rtl92d_set_key, + .get_bbreg = rtl92du_phy_query_bb_reg, + .set_bbreg = rtl92du_phy_set_bb_reg, + .get_rfreg = rtl92d_phy_query_rf_reg, + .set_rfreg = rtl92d_phy_set_rf_reg, + .linked_set_reg = rtl92du_linked_set_reg, + .fill_h2c_cmd = rtl92d_fill_h2c_cmd, + .get_btc_status = rtl_btc_status_false, + .phy_iq_calibrate = rtl92du_phy_iq_calibrate, + .phy_lc_calibrate = rtl92du_phy_lc_calibrate, +}; + +static struct rtl_mod_params rtl92du_mod_params = { + .sw_crypto = false, + .inactiveps = false, + .swctrl_lps = false, + .debug_level = 0, + .debug_mask = 0, +}; + +static const struct rtl_hal_usbint_cfg rtl92du_interface_cfg = { + /* rx */ + .rx_urb_num = 8, + .rx_max_size = 15360, + .usb_rx_hdl = NULL, + .usb_rx_segregate_hdl = NULL, + /* tx */ + .usb_tx_cleanup = rtl92du_tx_cleanup, + .usb_tx_post_hdl = rtl92du_tx_post_hdl, + .usb_tx_aggregate_hdl = rtl92du_tx_aggregate_hdl, + .usb_endpoint_mapping = rtl92du_endpoint_mapping, + .usb_mq_to_hwq = rtl92du_mq_to_hwq, +}; + +static const struct rtl_hal_cfg rtl92du_hal_cfg = { + .name = "rtl8192du", + .ops = &rtl8192du_hal_ops, + .mod_params = &rtl92du_mod_params, + .usb_interface_cfg = &rtl92du_interface_cfg, + + .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL, + .maps[SYS_FUNC_EN] = REG_SYS_FUNC_EN, + .maps[SYS_CLK] = REG_SYS_CLKR, + .maps[MAC_RCR_AM] = RCR_AM, + .maps[MAC_RCR_AB] = RCR_AB, + .maps[MAC_RCR_ACRC32] = RCR_ACRC32, + .maps[MAC_RCR_ACF] = RCR_ACF, + .maps[MAC_RCR_AAP] = RCR_AAP, + + .maps[EFUSE_TEST] = REG_EFUSE_TEST, + .maps[EFUSE_ACCESS] = REG_EFUSE_ACCESS, + .maps[EFUSE_CTRL] = REG_EFUSE_CTRL, + .maps[EFUSE_CLK] = 0, /* just for 92se */ + .maps[EFUSE_CLK_CTRL] = REG_EFUSE_CTRL, + .maps[EFUSE_PWC_EV12V] = PWC_EV12V, + .maps[EFUSE_FEN_ELDR] = FEN_ELDR, + .maps[EFUSE_LOADER_CLK_EN] = 0, + .maps[EFUSE_ANA8M] = 0, /* just for 92se */ + .maps[EFUSE_HWSET_MAX_SIZE] = HWSET_MAX_SIZE, + .maps[EFUSE_MAX_SECTION_MAP] = EFUSE_MAX_SECTION, + .maps[EFUSE_REAL_CONTENT_SIZE] = EFUSE_REAL_CONTENT_LEN, + + .maps[RWCAM] = REG_CAMCMD, + .maps[WCAMI] = REG_CAMWRITE, + .maps[RCAMO] = REG_CAMREAD, + .maps[CAMDBG] = REG_CAMDBG, + .maps[SECR] = REG_SECCFG, + .maps[SEC_CAM_NONE] = CAM_NONE, + .maps[SEC_CAM_WEP40] = CAM_WEP40, + .maps[SEC_CAM_TKIP] = CAM_TKIP, + .maps[SEC_CAM_AES] = CAM_AES, + .maps[SEC_CAM_WEP104] = CAM_WEP104, + + .maps[RTL_IMR_BCNDMAINT6] = IMR_BCNDMAINT6, + .maps[RTL_IMR_BCNDMAINT5] = IMR_BCNDMAINT5, + .maps[RTL_IMR_BCNDMAINT4] = IMR_BCNDMAINT4, + .maps[RTL_IMR_BCNDMAINT3] = IMR_BCNDMAINT3, + .maps[RTL_IMR_BCNDMAINT2] = IMR_BCNDMAINT2, + .maps[RTL_IMR_BCNDMAINT1] = IMR_BCNDMAINT1, + .maps[RTL_IMR_BCNDOK8] = IMR_BCNDOK8, + .maps[RTL_IMR_BCNDOK7] = IMR_BCNDOK7, + .maps[RTL_IMR_BCNDOK6] = IMR_BCNDOK6, + .maps[RTL_IMR_BCNDOK5] = IMR_BCNDOK5, + .maps[RTL_IMR_BCNDOK4] = IMR_BCNDOK4, + .maps[RTL_IMR_BCNDOK3] = IMR_BCNDOK3, + .maps[RTL_IMR_BCNDOK2] = IMR_BCNDOK2, + .maps[RTL_IMR_BCNDOK1] = IMR_BCNDOK1, + .maps[RTL_IMR_TIMEOUT2] = IMR_TIMEOUT2, + .maps[RTL_IMR_TIMEOUT1] = IMR_TIMEOUT1, + + .maps[RTL_IMR_TXFOVW] = IMR_TXFOVW, + .maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT, + .maps[RTL_IMR_BCNINT] = IMR_BCNINT, + .maps[RTL_IMR_RXFOVW] = IMR_RXFOVW, + .maps[RTL_IMR_RDU] = IMR_RDU, + .maps[RTL_IMR_ATIMEND] = IMR_ATIMEND, + .maps[RTL_IMR_BDOK] = IMR_BDOK, + .maps[RTL_IMR_MGNTDOK] = IMR_MGNTDOK, + .maps[RTL_IMR_TBDER] = IMR_TBDER, + .maps[RTL_IMR_HIGHDOK] = IMR_HIGHDOK, + .maps[RTL_IMR_TBDOK] = IMR_TBDOK, + .maps[RTL_IMR_BKDOK] = IMR_BKDOK, + .maps[RTL_IMR_BEDOK] = IMR_BEDOK, + .maps[RTL_IMR_VIDOK] = IMR_VIDOK, + .maps[RTL_IMR_VODOK] = IMR_VODOK, + .maps[RTL_IMR_ROK] = IMR_ROK, + .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNINT | IMR_TBDOK | IMR_TBDER), + + .maps[RTL_RC_CCK_RATE1M] = DESC_RATE1M, + .maps[RTL_RC_CCK_RATE2M] = DESC_RATE2M, + .maps[RTL_RC_CCK_RATE5_5M] = DESC_RATE5_5M, + .maps[RTL_RC_CCK_RATE11M] = DESC_RATE11M, + .maps[RTL_RC_OFDM_RATE6M] = DESC_RATE6M, + .maps[RTL_RC_OFDM_RATE9M] = DESC_RATE9M, + .maps[RTL_RC_OFDM_RATE12M] = DESC_RATE12M, + .maps[RTL_RC_OFDM_RATE18M] = DESC_RATE18M, + .maps[RTL_RC_OFDM_RATE24M] = DESC_RATE24M, + .maps[RTL_RC_OFDM_RATE36M] = DESC_RATE36M, + .maps[RTL_RC_OFDM_RATE48M] = DESC_RATE48M, + .maps[RTL_RC_OFDM_RATE54M] = DESC_RATE54M, + + .maps[RTL_RC_HT_RATEMCS7] = DESC_RATEMCS7, + .maps[RTL_RC_HT_RATEMCS15] = DESC_RATEMCS15, +}; + +module_param_named(swenc, rtl92du_mod_params.sw_crypto, bool, 0444); +module_param_named(debug_level, rtl92du_mod_params.debug_level, int, 0644); +module_param_named(ips, rtl92du_mod_params.inactiveps, bool, 0444); +module_param_named(swlps, rtl92du_mod_params.swctrl_lps, bool, 0444); +module_param_named(debug_mask, rtl92du_mod_params.debug_mask, ullong, 0644); +MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); +MODULE_PARM_DESC(ips, "Set to 0 to not use link power save (default 0)\n"); +MODULE_PARM_DESC(swlps, "Set to 1 to use SW control power save (default 0)\n"); +MODULE_PARM_DESC(debug_level, "Set debug level (0-5) (default 0)"); +MODULE_PARM_DESC(debug_mask, "Set debug mask (default 0)"); + +#define USB_VENDOR_ID_REALTEK 0x0bda + +static const struct usb_device_id rtl8192d_usb_ids[] = { + {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x8193, rtl92du_hal_cfg)}, + {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x8194, rtl92du_hal_cfg)}, + {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x8111, rtl92du_hal_cfg)}, + {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x0193, rtl92du_hal_cfg)}, + {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0x8171, rtl92du_hal_cfg)}, + {RTL_USB_DEVICE(USB_VENDOR_ID_REALTEK, 0xe194, rtl92du_hal_cfg)}, + {RTL_USB_DEVICE(0x2019, 0xab2c, rtl92du_hal_cfg)}, + {RTL_USB_DEVICE(0x2019, 0xab2d, rtl92du_hal_cfg)}, + {RTL_USB_DEVICE(0x2019, 0x4903, rtl92du_hal_cfg)}, + {RTL_USB_DEVICE(0x2019, 0x4904, rtl92du_hal_cfg)}, + {RTL_USB_DEVICE(0x07b8, 0x8193, rtl92du_hal_cfg)}, + {RTL_USB_DEVICE(0x20f4, 0x664b, rtl92du_hal_cfg)}, + {RTL_USB_DEVICE(0x04dd, 0x954f, rtl92du_hal_cfg)}, + {RTL_USB_DEVICE(0x04dd, 0x96a6, rtl92du_hal_cfg)}, + {RTL_USB_DEVICE(0x050d, 0x110a, rtl92du_hal_cfg)}, + {RTL_USB_DEVICE(0x050d, 0x1105, rtl92du_hal_cfg)}, + {RTL_USB_DEVICE(0x050d, 0x120a, rtl92du_hal_cfg)}, + {RTL_USB_DEVICE(0x1668, 0x8102, rtl92du_hal_cfg)}, + {RTL_USB_DEVICE(0x0930, 0x0a0a, rtl92du_hal_cfg)}, + {RTL_USB_DEVICE(0x2001, 0x330c, rtl92du_hal_cfg)}, + {} +}; + +MODULE_DEVICE_TABLE(usb, rtl8192d_usb_ids); + +static int rtl8192du_probe(struct usb_interface *intf, + const struct usb_device_id *id) +{ + return rtl_usb_probe(intf, id, &rtl92du_hal_cfg); +} + +static struct usb_driver rtl8192du_driver = { + .name = "rtl8192du", + .probe = rtl8192du_probe, + .disconnect = rtl_usb_disconnect, + .id_table = rtl8192d_usb_ids, + .disable_hub_initiated_lpm = 1, +}; + +module_usb_driver(rtl8192du_driver); + +MODULE_AUTHOR("Bitterblue Smith "); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Realtek 8192DU 802.11n Dual Mac USB wireless"); +MODULE_FIRMWARE("rtlwifi/rtl8192dufw.bin"); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/table.c new file mode 100644 index 000000000000..036701433d85 --- /dev/null +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/table.c @@ -0,0 +1,1675 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2024 Realtek Corporation.*/ + +#include + +#include "table.h" + +const u32 rtl8192du_phy_reg_2tarray[PHY_REG_2T_ARRAYLENGTH] = { + 0x800, 0x80040002, + 0x804, 0x00000003, + 0x808, 0x0000fc00, + 0x80c, 0x0000000a, + 0x810, 0x10001331, + 0x814, 0x020c3d10, + 0x818, 0x02200385, + 0x81c, 0x00000000, + 0x820, 0x01000100, + 0x824, 0x00390004, + 0x828, 0x01000100, + 0x82c, 0x00390004, + 0x830, 0x27272727, + 0x834, 0x27272727, + 0x838, 0x27272727, + 0x83c, 0x27272727, + 0x840, 0x00010000, + 0x844, 0x00010000, + 0x848, 0x27272727, + 0x84c, 0x27272727, + 0x850, 0x00000000, + 0x854, 0x00000000, + 0x858, 0x569a569a, + 0x85c, 0x0c1b25a4, + 0x860, 0x66e60250, + 0x864, 0x061f0150, + 0x868, 0x27272727, + 0x86c, 0x272b2b2b, + 0x870, 0x07000700, + 0x874, 0x22188000, + 0x878, 0x08080808, + 0x87c, 0x0001fff8, + 0x880, 0xc0083070, + 0x884, 0x00000cd5, + 0x888, 0x00000000, + 0x88c, 0xcc0000c0, + 0x890, 0x00000800, + 0x894, 0xfffffffe, + 0x898, 0x40302010, + 0x89c, 0x00706050, + 0x900, 0x00000000, + 0x904, 0x00000023, + 0x908, 0x00000000, + 0x90c, 0x81121313, + 0xa00, 0x00d047c8, + 0xa04, 0x80ff000c, + 0xa08, 0x8c8a8300, + 0xa0c, 0x2e68120f, + 0xa10, 0x9500bb78, + 0xa14, 0x11144028, + 0xa18, 0x00881117, + 0xa1c, 0x89140f00, + 0xa20, 0x1a1b0000, + 0xa24, 0x090e1317, + 0xa28, 0x00000204, + 0xa2c, 0x00d30000, + 0xa70, 0x101fff00, + 0xa74, 0x00000007, + 0xc00, 0x40071d40, + 0xc04, 0x03a05633, + 0xc08, 0x001000e4, + 0xc0c, 0x6c6c6c6c, + 0xc10, 0x08800000, + 0xc14, 0x40000100, + 0xc18, 0x08800000, + 0xc1c, 0x40000100, + 0xc20, 0x00000000, + 0xc24, 0x00000000, + 0xc28, 0x00000000, + 0xc2c, 0x00000000, + 0xc30, 0x69e9ac44, + 0xc34, 0x469652af, + 0xc38, 0x49795994, + 0xc3c, 0x0a979718, + 0xc40, 0x1f7c403f, + 0xc44, 0x000100b7, + 0xc48, 0xec020107, + 0xc4c, 0x007f037f, + 0xc50, 0x69543420, + 0xc54, 0x43bc009e, + 0xc58, 0x69543420, + 0xc5c, 0x433c00a8, + 0xc60, 0x00000000, + 0xc64, 0x7112848b, + 0xc68, 0x47c00bff, + 0xc6c, 0x00000036, + 0xc70, 0x2c7f000d, + 0xc74, 0x258610db, + 0xc78, 0x0000001f, + 0xc7c, 0x40b95612, + 0xc80, 0x40000100, + 0xc84, 0x20f60000, + 0xc88, 0x40000100, + 0xc8c, 0xa0e40000, + 0xc90, 0x00121820, + 0xc94, 0x00000007, + 0xc98, 0x00121820, + 0xc9c, 0x00007f7f, + 0xca0, 0x00000000, + 0xca4, 0x00000080, + 0xca8, 0x00000000, + 0xcac, 0x00000000, + 0xcb0, 0x00000000, + 0xcb4, 0x00000000, + 0xcb8, 0x00000000, + 0xcbc, 0x28000000, + 0xcc0, 0x00000000, + 0xcc4, 0x00000000, + 0xcc8, 0x00000000, + 0xccc, 0x00000000, + 0xcd0, 0x00000000, + 0xcd4, 0x00000000, + 0xcd8, 0x64b11e20, + 0xcdc, 0xe0767533, + 0xce0, 0x00222222, + 0xce4, 0x00000000, + 0xce8, 0x37644302, + 0xcec, 0x2f97d40c, + 0xd00, 0x00080740, + 0xd04, 0x00020403, + 0xd08, 0x0000907f, + 0xd0c, 0x20010201, + 0xd10, 0xa0633333, + 0xd14, 0x3333bc43, + 0xd18, 0x7a8f5b6b, + 0xd2c, 0xcc979975, + 0xd30, 0x00000000, + 0xd34, 0x80608404, + 0xd38, 0x00000000, + 0xd3c, 0x00027353, + 0xd40, 0x00000000, + 0xd44, 0x00000000, + 0xd48, 0x00000000, + 0xd4c, 0x00000000, + 0xd50, 0x6437140a, + 0xd54, 0x00000000, + 0xd58, 0x00000000, + 0xd5c, 0x30032064, + 0xd60, 0x4653de68, + 0xd64, 0x04518a3c, + 0xd68, 0x00002101, + 0xd6c, 0x2a201c16, + 0xd70, 0x1812362e, + 0xd74, 0x322c2220, + 0xd78, 0x000e3c24, + 0xe00, 0x2a2a2a2a, + 0xe04, 0x2a2a2a2a, + 0xe08, 0x03902a2a, + 0xe10, 0x2a2a2a2a, + 0xe14, 0x2a2a2a2a, + 0xe18, 0x2a2a2a2a, + 0xe1c, 0x2a2a2a2a, + 0xe28, 0x00000000, + 0xe30, 0x1000dc1f, + 0xe34, 0x10008c1f, + 0xe38, 0x02140102, + 0xe3c, 0x681604c2, + 0xe40, 0x01007c00, + 0xe44, 0x01004800, + 0xe48, 0xfb000000, + 0xe4c, 0x000028d1, + 0xe50, 0x1000dc1f, + 0xe54, 0x10008c1f, + 0xe58, 0x02140102, + 0xe5c, 0x28160d05, + 0xe60, 0x00000010, + 0xe68, 0x001b25a4, + 0xe6c, 0x63db25a4, + 0xe70, 0x63db25a4, + 0xe74, 0x0c126da4, + 0xe78, 0x0c126da4, + 0xe7c, 0x0c126da4, + 0xe80, 0x0c126da4, + 0xe84, 0x63db25a4, + 0xe88, 0x0c126da4, + 0xe8c, 0x63db25a4, + 0xed0, 0x63db25a4, + 0xed4, 0x63db25a4, + 0xed8, 0x63db25a4, + 0xedc, 0x001b25a4, + 0xee0, 0x001b25a4, + 0xeec, 0x6fdb25a4, + 0xf14, 0x00000003, + 0xf1c, 0x00000064, + 0xf4c, 0x00000004, + 0xf00, 0x00000300, +}; + +const u32 rtl8192du_phy_reg_array_pg[PHY_REG_ARRAY_PG_LENGTH] = { + 0xe00, 0xffffffff, 0x07090c0c, + 0xe04, 0xffffffff, 0x01020405, + 0xe08, 0x0000ff00, 0x00000000, + 0x86c, 0xffffff00, 0x00000000, + 0xe10, 0xffffffff, 0x0b0c0c0e, + 0xe14, 0xffffffff, 0x01030506, + 0xe18, 0xffffffff, 0x0b0c0d0e, + 0xe1c, 0xffffffff, 0x01030509, + 0x830, 0xffffffff, 0x07090c0c, + 0x834, 0xffffffff, 0x01020405, + 0x838, 0xffffff00, 0x00000000, + 0x86c, 0x000000ff, 0x00000000, + 0x83c, 0xffffffff, 0x0b0c0c0e, + 0x848, 0xffffffff, 0x01030506, + 0x84c, 0xffffffff, 0x0b0c0d0e, + 0x868, 0xffffffff, 0x01030509, + 0xe00, 0xffffffff, 0x00000000, + 0xe04, 0xffffffff, 0x00000000, + 0xe08, 0x0000ff00, 0x00000000, + 0x86c, 0xffffff00, 0x00000000, + 0xe10, 0xffffffff, 0x00000000, + 0xe14, 0xffffffff, 0x00000000, + 0xe18, 0xffffffff, 0x00000000, + 0xe1c, 0xffffffff, 0x00000000, + 0x830, 0xffffffff, 0x00000000, + 0x834, 0xffffffff, 0x00000000, + 0x838, 0xffffff00, 0x00000000, + 0x86c, 0x000000ff, 0x00000000, + 0x83c, 0xffffffff, 0x00000000, + 0x848, 0xffffffff, 0x00000000, + 0x84c, 0xffffffff, 0x00000000, + 0x868, 0xffffffff, 0x00000000, + 0xe00, 0xffffffff, 0x04040404, + 0xe04, 0xffffffff, 0x00020204, + 0xe08, 0x0000ff00, 0x00000000, + 0x86c, 0xffffff00, 0x00000000, + 0xe10, 0xffffffff, 0x06060606, + 0xe14, 0xffffffff, 0x00020406, + 0xe18, 0xffffffff, 0x00000000, + 0xe1c, 0xffffffff, 0x00000000, + 0x830, 0xffffffff, 0x04040404, + 0x834, 0xffffffff, 0x00020204, + 0x838, 0xffffff00, 0x00000000, + 0x86c, 0x000000ff, 0x00000000, + 0x83c, 0xffffffff, 0x06060606, + 0x848, 0xffffffff, 0x00020406, + 0x84c, 0xffffffff, 0x00000000, + 0x868, 0xffffffff, 0x00000000, + 0xe00, 0xffffffff, 0x00000000, + 0xe04, 0xffffffff, 0x00000000, + 0xe08, 0x0000ff00, 0x00000000, + 0x86c, 0xffffff00, 0x00000000, + 0xe10, 0xffffffff, 0x00000000, + 0xe14, 0xffffffff, 0x00000000, + 0xe18, 0xffffffff, 0x00000000, + 0xe1c, 0xffffffff, 0x00000000, + 0x830, 0xffffffff, 0x00000000, + 0x834, 0xffffffff, 0x00000000, + 0x838, 0xffffff00, 0x00000000, + 0x86c, 0x000000ff, 0x00000000, + 0x83c, 0xffffffff, 0x00000000, + 0x848, 0xffffffff, 0x00000000, + 0x84c, 0xffffffff, 0x00000000, + 0x868, 0xffffffff, 0x00000000, + 0xe00, 0xffffffff, 0x00000000, + 0xe04, 0xffffffff, 0x00000000, + 0xe08, 0x0000ff00, 0x00000000, + 0x86c, 0xffffff00, 0x00000000, + 0xe10, 0xffffffff, 0x00000000, + 0xe14, 0xffffffff, 0x00000000, + 0xe18, 0xffffffff, 0x00000000, + 0xe1c, 0xffffffff, 0x00000000, + 0x830, 0xffffffff, 0x00000000, + 0x834, 0xffffffff, 0x00000000, + 0x838, 0xffffff00, 0x00000000, + 0x86c, 0x000000ff, 0x00000000, + 0x83c, 0xffffffff, 0x00000000, + 0x848, 0xffffffff, 0x00000000, + 0x84c, 0xffffffff, 0x00000000, + 0x868, 0xffffffff, 0x00000000, + 0xe00, 0xffffffff, 0x04040404, + 0xe04, 0xffffffff, 0x00020204, + 0xe08, 0x0000ff00, 0x00000000, + 0x86c, 0xffffff00, 0x00000000, + 0xe10, 0xffffffff, 0x00000000, + 0xe14, 0xffffffff, 0x00000000, + 0xe18, 0xffffffff, 0x00000000, + 0xe1c, 0xffffffff, 0x00000000, + 0x830, 0xffffffff, 0x04040404, + 0x834, 0xffffffff, 0x00020204, + 0x838, 0xffffff00, 0x00000000, + 0x86c, 0x000000ff, 0x00000000, + 0x83c, 0xffffffff, 0x00000000, + 0x848, 0xffffffff, 0x00000000, + 0x84c, 0xffffffff, 0x00000000, + 0x868, 0xffffffff, 0x00000000, + 0xe00, 0xffffffff, 0x00000000, + 0xe04, 0xffffffff, 0x00000000, + 0xe08, 0x0000ff00, 0x00000000, + 0x86c, 0xffffff00, 0x00000000, + 0xe10, 0xffffffff, 0x00000000, + 0xe14, 0xffffffff, 0x00000000, + 0xe18, 0xffffffff, 0x00000000, + 0xe1c, 0xffffffff, 0x00000000, + 0x830, 0xffffffff, 0x00000000, + 0x834, 0xffffffff, 0x00000000, + 0x838, 0xffffff00, 0x00000000, + 0x86c, 0x000000ff, 0x00000000, + 0x83c, 0xffffffff, 0x00000000, + 0x848, 0xffffffff, 0x00000000, + 0x84c, 0xffffffff, 0x00000000, + 0x868, 0xffffffff, 0x00000000, + 0xe00, 0xffffffff, 0x04040404, + 0xe04, 0xffffffff, 0x00020204, + 0xe08, 0x0000ff00, 0x00000000, + 0x86c, 0xffffff00, 0x00000000, + 0xe10, 0xffffffff, 0x08080808, + 0xe14, 0xffffffff, 0x00040408, + 0xe18, 0xffffffff, 0x00000000, + 0xe1c, 0xffffffff, 0x00000000, + 0x830, 0xffffffff, 0x04040404, + 0x834, 0xffffffff, 0x00020204, + 0x838, 0xffffff00, 0x00000000, + 0x86c, 0x000000ff, 0x00000000, + 0x83c, 0xffffffff, 0x08080808, + 0x848, 0xffffffff, 0x00040408, + 0x84c, 0xffffffff, 0x00000000, + 0x868, 0xffffffff, 0x00000000, + 0xe00, 0xffffffff, 0x04040404, + 0xe04, 0xffffffff, 0x00020204, + 0xe08, 0x0000ff00, 0x00000000, + 0x86c, 0xffffff00, 0x00000000, + 0xe10, 0xffffffff, 0x08080808, + 0xe14, 0xffffffff, 0x00040408, + 0xe18, 0xffffffff, 0x00000000, + 0xe1c, 0xffffffff, 0x00000000, + 0x830, 0xffffffff, 0x04040404, + 0x834, 0xffffffff, 0x00020204, + 0x838, 0xffffff00, 0x00000000, + 0x86c, 0x000000ff, 0x00000000, + 0x83c, 0xffffffff, 0x08080808, + 0x848, 0xffffffff, 0x00040408, + 0x84c, 0xffffffff, 0x00000000, + 0x868, 0xffffffff, 0x00000000, + 0xe00, 0xffffffff, 0x04040404, + 0xe04, 0xffffffff, 0x00020204, + 0xe08, 0x0000ff00, 0x00000000, + 0x86c, 0xffffff00, 0x00000000, + 0xe10, 0xffffffff, 0x08080808, + 0xe14, 0xffffffff, 0x00040408, + 0xe18, 0xffffffff, 0x00000000, + 0xe1c, 0xffffffff, 0x00000000, + 0x830, 0xffffffff, 0x04040404, + 0x834, 0xffffffff, 0x00020204, + 0x838, 0xffffff00, 0x00000000, + 0x86c, 0x000000ff, 0x00000000, + 0x83c, 0xffffffff, 0x08080808, + 0x848, 0xffffffff, 0x00040408, + 0x84c, 0xffffffff, 0x00000000, + 0x868, 0xffffffff, 0x00000000, + 0xe00, 0xffffffff, 0x04040404, + 0xe04, 0xffffffff, 0x00020204, + 0xe08, 0x0000ff00, 0x00000000, + 0x86c, 0xffffff00, 0x00000000, + 0xe10, 0xffffffff, 0x08080808, + 0xe14, 0xffffffff, 0x00040408, + 0xe18, 0xffffffff, 0x00000000, + 0xe1c, 0xffffffff, 0x00000000, + 0x830, 0xffffffff, 0x04040404, + 0x834, 0xffffffff, 0x00020204, + 0x838, 0xffffff00, 0x00000000, + 0x86c, 0x000000ff, 0x00000000, + 0x83c, 0xffffffff, 0x08080808, + 0x848, 0xffffffff, 0x00040408, + 0x84c, 0xffffffff, 0x00000000, + 0x868, 0xffffffff, 0x00000000, + 0xe00, 0xffffffff, 0x04040404, + 0xe04, 0xffffffff, 0x00020204, + 0xe08, 0x0000ff00, 0x00000000, + 0x86c, 0xffffff00, 0x00000000, + 0xe10, 0xffffffff, 0x08080808, + 0xe14, 0xffffffff, 0x00040408, + 0xe18, 0xffffffff, 0x00000000, + 0xe1c, 0xffffffff, 0x00000000, + 0x830, 0xffffffff, 0x04040404, + 0x834, 0xffffffff, 0x00020204, + 0x838, 0xffffff00, 0x00000000, + 0x86c, 0x000000ff, 0x00000000, + 0x83c, 0xffffffff, 0x08080808, + 0x848, 0xffffffff, 0x00040408, + 0x84c, 0xffffffff, 0x00000000, + 0x868, 0xffffffff, 0x00000000, + 0xe00, 0xffffffff, 0x04040404, + 0xe04, 0xffffffff, 0x00020204, + 0xe08, 0x0000ff00, 0x00000000, + 0x86c, 0xffffff00, 0x00000000, + 0xe10, 0xffffffff, 0x08080808, + 0xe14, 0xffffffff, 0x00040408, + 0xe18, 0xffffffff, 0x00000000, + 0xe1c, 0xffffffff, 0x00000000, + 0x830, 0xffffffff, 0x04040404, + 0x834, 0xffffffff, 0x00020204, + 0x838, 0xffffff00, 0x00000000, + 0x86c, 0x000000ff, 0x00000000, + 0x83c, 0xffffffff, 0x08080808, + 0x848, 0xffffffff, 0x00040408, + 0x84c, 0xffffffff, 0x00000000, + 0x868, 0xffffffff, 0x00000000, +}; + +const u32 rtl8192du_radioa_2tarray[RADIOA_2T_ARRAYLENGTH] = { + 0x000, 0x00030000, + 0x001, 0x00030000, + 0x002, 0x00000000, + 0x003, 0x00018c63, + 0x004, 0x00018c63, + 0x008, 0x00084000, + 0x00b, 0x0001c000, + 0x00e, 0x00018c67, + 0x00f, 0x00000851, + 0x014, 0x00021440, + 0x018, 0x00017524, + 0x019, 0x00000000, + 0x01d, 0x000a1290, + 0x023, 0x00001558, + 0x01a, 0x00030a99, + 0x01b, 0x00040b00, + 0x01c, 0x000fc339, + 0x03a, 0x000a57eb, + 0x03b, 0x00020000, + 0x03c, 0x000ff454, + 0x020, 0x0000aa52, + 0x021, 0x00054000, + 0x040, 0x0000aa52, + 0x041, 0x00014000, + 0x025, 0x000803be, + 0x026, 0x000fc638, + 0x027, 0x00077c18, + 0x028, 0x000de471, + 0x029, 0x000d7110, + 0x02a, 0x0008cb04, + 0x02b, 0x0004128b, + 0x02c, 0x00001840, + 0x043, 0x0002444f, + 0x044, 0x0001adb0, + 0x045, 0x00056467, + 0x046, 0x0008992c, + 0x047, 0x0000452c, + 0x048, 0x000f9c43, + 0x049, 0x00002e0c, + 0x04a, 0x000546eb, + 0x04b, 0x0008966c, + 0x04c, 0x0000dde9, + 0x018, 0x00007401, + 0x000, 0x00070000, + 0x012, 0x000dc000, + 0x012, 0x00090000, + 0x012, 0x00051000, + 0x012, 0x00012000, + 0x013, 0x000287b7, + 0x013, 0x000247ab, + 0x013, 0x0002079f, + 0x013, 0x0001c793, + 0x013, 0x0001839b, + 0x013, 0x00014392, + 0x013, 0x0001019a, + 0x013, 0x0000c191, + 0x013, 0x00008194, + 0x013, 0x000040a0, + 0x013, 0x00000018, + 0x015, 0x0000f424, + 0x015, 0x0004f424, + 0x015, 0x0008f424, + 0x016, 0x000e1330, + 0x016, 0x000a1330, + 0x016, 0x00061330, + 0x016, 0x00021330, + 0x018, 0x00017524, + 0x000, 0x00070000, + 0x012, 0x000cf000, + 0x012, 0x000bc000, + 0x012, 0x00078000, + 0x012, 0x00000000, + 0x013, 0x000287bc, + 0x013, 0x000247b0, + 0x013, 0x000203b4, + 0x013, 0x0001c3a8, + 0x013, 0x000181b4, + 0x013, 0x000141a8, + 0x013, 0x000100b4, + 0x013, 0x0000c0a8, + 0x013, 0x0000b030, + 0x013, 0x00004024, + 0x013, 0x00000018, + 0x015, 0x0000f4c3, + 0x015, 0x0004f4c3, + 0x015, 0x0008f4c3, + 0x016, 0x000e085f, + 0x016, 0x000a085f, + 0x016, 0x0006085f, + 0x016, 0x0002085f, + 0x018, 0x00037524, + 0x000, 0x00070000, + 0x012, 0x000cf000, + 0x012, 0x000bc000, + 0x012, 0x00078000, + 0x012, 0x00000000, + 0x013, 0x000287bc, + 0x013, 0x000247b0, + 0x013, 0x000203b4, + 0x013, 0x0001c3a8, + 0x013, 0x000181b4, + 0x013, 0x000141a8, + 0x013, 0x000100b4, + 0x013, 0x0000c0a8, + 0x013, 0x0000b030, + 0x013, 0x00004024, + 0x013, 0x00000018, + 0x015, 0x0000f4c3, + 0x015, 0x0004f4c3, + 0x015, 0x0008f4c3, + 0x016, 0x000e085f, + 0x016, 0x000a085f, + 0x016, 0x0006085f, + 0x016, 0x0002085f, + 0x018, 0x00057568, + 0x000, 0x00070000, + 0x012, 0x000cf000, + 0x012, 0x000bc000, + 0x012, 0x00078000, + 0x012, 0x00000000, + 0x013, 0x000287bc, + 0x013, 0x000247b0, + 0x013, 0x000203b4, + 0x013, 0x0001c3a8, + 0x013, 0x000181b4, + 0x013, 0x000141a8, + 0x013, 0x000100b4, + 0x013, 0x0000c0a8, + 0x013, 0x0000b030, + 0x013, 0x00004024, + 0x013, 0x00000018, + 0x015, 0x0000f4c3, + 0x015, 0x0004f4c3, + 0x015, 0x0008f4c3, + 0x016, 0x000e085f, + 0x016, 0x000a085f, + 0x016, 0x0006085f, + 0x016, 0x0002085f, + 0x030, 0x0004470f, + 0x031, 0x00044ff0, + 0x032, 0x00000070, + 0x033, 0x000dd480, + 0x034, 0x000ffac0, + 0x035, 0x000b80c0, + 0x036, 0x00077000, + 0x037, 0x00064ff2, + 0x038, 0x000e7661, + 0x039, 0x00000e90, + 0x000, 0x00030000, + 0x018, 0x0000f401, + 0x0fe, 0x00000000, + 0x0fe, 0x00000000, + 0x01e, 0x00088009, + 0x01f, 0x00080003, + 0x0fe, 0x00000000, + 0x01e, 0x00088001, + 0x01f, 0x00080000, + 0x0fe, 0x00000000, + 0x018, 0x00097524, + 0x0fe, 0x00000000, + 0x0fe, 0x00000000, + 0x0fe, 0x00000000, + 0x0fe, 0x00000000, + 0x02b, 0x00041289, + 0x0fe, 0x00000000, + 0x02d, 0x0006aaaa, + 0x02e, 0x000b4d01, + 0x02d, 0x00080000, + 0x02e, 0x00004d02, + 0x02d, 0x00095555, + 0x02e, 0x00054d03, + 0x02d, 0x000aaaaa, + 0x02e, 0x000b4d04, + 0x02d, 0x000c0000, + 0x02e, 0x00004d05, + 0x02d, 0x000d5555, + 0x02e, 0x00054d06, + 0x02d, 0x000eaaaa, + 0x02e, 0x000b4d07, + 0x02d, 0x00000000, + 0x02e, 0x00005108, + 0x02d, 0x00015555, + 0x02e, 0x00055109, + 0x02d, 0x0002aaaa, + 0x02e, 0x000b510a, + 0x02d, 0x00040000, + 0x02e, 0x0000510b, + 0x02d, 0x00055555, + 0x02e, 0x0005510c, +}; + +const u32 rtl8192du_radiob_2tarray[RADIOB_2T_ARRAYLENGTH] = { + 0x000, 0x00030000, + 0x001, 0x00030000, + 0x002, 0x00000000, + 0x003, 0x00018c63, + 0x004, 0x00018c63, + 0x008, 0x00084000, + 0x00b, 0x0001c000, + 0x00e, 0x00018c67, + 0x00f, 0x00000851, + 0x014, 0x00021440, + 0x018, 0x00007401, + 0x019, 0x00000060, + 0x01d, 0x000a1290, + 0x023, 0x00001558, + 0x01a, 0x00030a99, + 0x01b, 0x00040b00, + 0x01c, 0x000fc339, + 0x03a, 0x000a57eb, + 0x03b, 0x00020000, + 0x03c, 0x000ff454, + 0x020, 0x0000aa52, + 0x021, 0x00054000, + 0x040, 0x0000aa52, + 0x041, 0x00014000, + 0x025, 0x000803be, + 0x026, 0x000fc638, + 0x027, 0x00077c18, + 0x028, 0x000d1c31, + 0x029, 0x000d7110, + 0x02a, 0x000aeb04, + 0x02b, 0x0004128b, + 0x02c, 0x00001840, + 0x043, 0x0002444f, + 0x044, 0x0001adb0, + 0x045, 0x00056467, + 0x046, 0x0008992c, + 0x047, 0x0000452c, + 0x048, 0x000f9c43, + 0x049, 0x00002e0c, + 0x04a, 0x000546eb, + 0x04b, 0x0008966c, + 0x04c, 0x0000dde9, + 0x018, 0x00007401, + 0x000, 0x00070000, + 0x012, 0x000dc000, + 0x012, 0x00090000, + 0x012, 0x00051000, + 0x012, 0x00012000, + 0x013, 0x000287b7, + 0x013, 0x000247ab, + 0x013, 0x0002079f, + 0x013, 0x0001c793, + 0x013, 0x0001839b, + 0x013, 0x00014392, + 0x013, 0x0001019a, + 0x013, 0x0000c191, + 0x013, 0x00008194, + 0x013, 0x000040a0, + 0x013, 0x00000018, + 0x015, 0x0000f424, + 0x015, 0x0004f424, + 0x015, 0x0008f424, + 0x016, 0x000e1330, + 0x016, 0x000a1330, + 0x016, 0x00061330, + 0x016, 0x00021330, + 0x018, 0x00017524, + 0x000, 0x00070000, + 0x012, 0x000cf000, + 0x012, 0x000bc000, + 0x012, 0x00078000, + 0x012, 0x00000000, + 0x013, 0x000287bc, + 0x013, 0x000247b0, + 0x013, 0x000203b4, + 0x013, 0x0001c3a8, + 0x013, 0x000181b4, + 0x013, 0x000141a8, + 0x013, 0x000100b4, + 0x013, 0x0000c0a8, + 0x013, 0x0000b030, + 0x013, 0x00004024, + 0x013, 0x00000018, + 0x015, 0x0000f4c3, + 0x015, 0x0004f4c3, + 0x015, 0x0008f4c3, + 0x016, 0x000e085f, + 0x016, 0x000a085f, + 0x016, 0x0006085f, + 0x016, 0x0002085f, + 0x018, 0x00037524, + 0x000, 0x00070000, + 0x012, 0x000cf000, + 0x012, 0x000bc000, + 0x012, 0x00078000, + 0x012, 0x00000000, + 0x013, 0x000287bc, + 0x013, 0x000247b0, + 0x013, 0x000203b4, + 0x013, 0x0001c3a8, + 0x013, 0x000181b4, + 0x013, 0x000141a8, + 0x013, 0x000100b4, + 0x013, 0x0000c0a8, + 0x013, 0x0000b030, + 0x013, 0x00004024, + 0x013, 0x00000018, + 0x015, 0x0000f4c3, + 0x015, 0x0004f4c3, + 0x015, 0x0008f4c3, + 0x016, 0x000e085f, + 0x016, 0x000a085f, + 0x016, 0x0006085f, + 0x016, 0x0002085f, + 0x018, 0x00057524, + 0x000, 0x00070000, + 0x012, 0x000cf000, + 0x012, 0x000bc000, + 0x012, 0x00078000, + 0x012, 0x00000000, + 0x013, 0x000287bc, + 0x013, 0x000247b0, + 0x013, 0x000203b4, + 0x013, 0x0001c3a8, + 0x013, 0x000181b4, + 0x013, 0x000141a8, + 0x013, 0x000100b4, + 0x013, 0x0000c0a8, + 0x013, 0x0000b030, + 0x013, 0x00004024, + 0x013, 0x00000018, + 0x015, 0x0000f4c3, + 0x015, 0x0004f4c3, + 0x015, 0x0008f4c3, + 0x016, 0x000e085f, + 0x016, 0x000a085f, + 0x016, 0x0006085f, + 0x016, 0x0002085f, + 0x030, 0x0004470f, + 0x031, 0x00044ff0, + 0x032, 0x00000070, + 0x033, 0x000dd480, + 0x034, 0x000ffac0, + 0x035, 0x000b80c0, + 0x036, 0x00077000, + 0x037, 0x00064ff2, + 0x038, 0x000e7661, + 0x039, 0x00000e90, + 0x000, 0x00030000, + 0x018, 0x0000f401, + 0x0fe, 0x00000000, + 0x0fe, 0x00000000, + 0x01e, 0x00088009, + 0x01f, 0x00080003, + 0x0fe, 0x00000000, + 0x01e, 0x00088001, + 0x01f, 0x00080000, + 0x0fe, 0x00000000, + 0x018, 0x00087401, + 0x0fe, 0x00000000, + 0x0fe, 0x00000000, + 0x0fe, 0x00000000, + 0x02b, 0x00041289, + 0x0fe, 0x00000000, + 0x02d, 0x00066666, + 0x02e, 0x00064001, + 0x02d, 0x00091111, + 0x02e, 0x00014002, + 0x02d, 0x000bbbbb, + 0x02e, 0x000b4003, + 0x02d, 0x000e6666, + 0x02e, 0x00064004, + 0x02d, 0x00088888, + 0x02e, 0x00084005, + 0x02d, 0x0009dddd, + 0x02e, 0x000d4006, + 0x02d, 0x000b3333, + 0x02e, 0x00034007, + 0x02d, 0x00048888, + 0x02e, 0x00084408, + 0x02d, 0x000bbbbb, + 0x02e, 0x000b4409, + 0x02d, 0x000e6666, + 0x02e, 0x0006440a, + 0x02d, 0x00011111, + 0x02e, 0x0001480b, + 0x02d, 0x0003bbbb, + 0x02e, 0x000b480c, + 0x02d, 0x00066666, + 0x02e, 0x0006480d, + 0x02d, 0x000ccccc, + 0x02e, 0x000c480e, +}; + +const u32 rtl8192du_radioa_2t_int_paarray[RADIOA_2T_INT_PA_ARRAYLENGTH] = { + 0x000, 0x00030000, + 0x001, 0x00030000, + 0x002, 0x00000000, + 0x003, 0x00018c63, + 0x004, 0x00018c63, + 0x008, 0x00084000, + 0x00b, 0x0001c000, + 0x00e, 0x00018c67, + 0x00f, 0x00000851, + 0x014, 0x00021440, + 0x018, 0x00017524, + 0x019, 0x00000000, + 0x01d, 0x000a1290, + 0x023, 0x00001558, + 0x01a, 0x00030a99, + 0x01b, 0x00040b00, + 0x01c, 0x000fc339, + 0x03a, 0x000a57eb, + 0x03b, 0x00020000, + 0x03c, 0x000ff455, + 0x020, 0x0000aa52, + 0x021, 0x00054000, + 0x040, 0x0000aa52, + 0x041, 0x00014000, + 0x025, 0x000803be, + 0x026, 0x000fc638, + 0x027, 0x00077c18, + 0x028, 0x000de471, + 0x029, 0x000d7110, + 0x02a, 0x0008eb04, + 0x02b, 0x0004128b, + 0x02c, 0x00001840, + 0x043, 0x0002444f, + 0x044, 0x0001adb0, + 0x045, 0x00056467, + 0x046, 0x0008992c, + 0x047, 0x0000452c, + 0x048, 0x000c0443, + 0x049, 0x00000730, + 0x04a, 0x00050f0f, + 0x04b, 0x000896ef, + 0x04c, 0x0000ddee, + 0x018, 0x00007401, + 0x000, 0x00070000, + 0x012, 0x000dc000, + 0x012, 0x00090000, + 0x012, 0x00051000, + 0x012, 0x00012000, + 0x013, 0x000287b7, + 0x013, 0x000247ab, + 0x013, 0x0002079f, + 0x013, 0x0001c793, + 0x013, 0x0001839b, + 0x013, 0x00014392, + 0x013, 0x0001019a, + 0x013, 0x0000c191, + 0x013, 0x00008194, + 0x013, 0x000040a0, + 0x013, 0x00000018, + 0x015, 0x0000f424, + 0x015, 0x0004f424, + 0x015, 0x0008f424, + 0x016, 0x000e1330, + 0x016, 0x000a1330, + 0x016, 0x00061330, + 0x016, 0x00021330, + 0x018, 0x00017524, + 0x000, 0x00070000, + 0x012, 0x000cf000, + 0x012, 0x000bc000, + 0x012, 0x00078000, + 0x012, 0x00000000, + 0x013, 0x000287bf, + 0x013, 0x000247b3, + 0x013, 0x000207a7, + 0x013, 0x0001c79b, + 0x013, 0x0001839f, + 0x013, 0x00014393, + 0x013, 0x00010399, + 0x013, 0x0000c38d, + 0x013, 0x00008199, + 0x013, 0x0000418d, + 0x013, 0x00000099, + 0x015, 0x0000f495, + 0x015, 0x0004f495, + 0x015, 0x0008f495, + 0x016, 0x000e1874, + 0x016, 0x000a1874, + 0x016, 0x00061874, + 0x016, 0x00021874, + 0x018, 0x00037564, + 0x000, 0x00070000, + 0x012, 0x000cf000, + 0x012, 0x000bc000, + 0x012, 0x00078000, + 0x012, 0x00000000, + 0x013, 0x000287bf, + 0x013, 0x000247b3, + 0x013, 0x000207a7, + 0x013, 0x0001c79b, + 0x013, 0x0001839f, + 0x013, 0x00014393, + 0x013, 0x00010399, + 0x013, 0x0000c38d, + 0x013, 0x00008199, + 0x013, 0x0000418d, + 0x013, 0x00000099, + 0x015, 0x0000f495, + 0x015, 0x0004f495, + 0x015, 0x0008f495, + 0x016, 0x000e1874, + 0x016, 0x000a1874, + 0x016, 0x00061874, + 0x016, 0x00021874, + 0x018, 0x00057595, + 0x000, 0x00070000, + 0x012, 0x000cf000, + 0x012, 0x000bc000, + 0x012, 0x00078000, + 0x012, 0x00000000, + 0x013, 0x000287bf, + 0x013, 0x000247b3, + 0x013, 0x000207a7, + 0x013, 0x0001c79b, + 0x013, 0x0001839f, + 0x013, 0x00014393, + 0x013, 0x00010399, + 0x013, 0x0000c38d, + 0x013, 0x00008199, + 0x013, 0x0000418d, + 0x013, 0x00000099, + 0x015, 0x0000f495, + 0x015, 0x0004f495, + 0x015, 0x0008f495, + 0x016, 0x000e1874, + 0x016, 0x000a1874, + 0x016, 0x00061874, + 0x016, 0x00021874, + 0x030, 0x0004470f, + 0x031, 0x00044ff0, + 0x032, 0x00000070, + 0x033, 0x000dd480, + 0x034, 0x000ffac0, + 0x035, 0x000b80c0, + 0x036, 0x00077000, + 0x037, 0x00064ff2, + 0x038, 0x000e7661, + 0x039, 0x00000e90, + 0x000, 0x00030000, + 0x018, 0x0000f401, + 0x0fe, 0x00000000, + 0x0fe, 0x00000000, + 0x01e, 0x00088009, + 0x01f, 0x00080003, + 0x0fe, 0x00000000, + 0x01e, 0x00088001, + 0x01f, 0x00080000, + 0x0fe, 0x00000000, + 0x018, 0x00097524, + 0x0fe, 0x00000000, + 0x0fe, 0x00000000, + 0x0fe, 0x00000000, + 0x0fe, 0x00000000, + 0x02b, 0x00041289, + 0x0fe, 0x00000000, + 0x02d, 0x0006aaaa, + 0x02e, 0x000b4d01, + 0x02d, 0x00080000, + 0x02e, 0x00004d02, + 0x02d, 0x00095555, + 0x02e, 0x00054d03, + 0x02d, 0x000aaaaa, + 0x02e, 0x000b4d04, + 0x02d, 0x000c0000, + 0x02e, 0x00004d05, + 0x02d, 0x000d5555, + 0x02e, 0x00054d06, + 0x02d, 0x000eaaaa, + 0x02e, 0x000b4d07, + 0x02d, 0x00000000, + 0x02e, 0x00005108, + 0x02d, 0x00015555, + 0x02e, 0x00055109, + 0x02d, 0x0002aaaa, + 0x02e, 0x000b510a, + 0x02d, 0x00040000, + 0x02e, 0x0000510b, + 0x02d, 0x00055555, + 0x02e, 0x0005510c, +}; + +const u32 rtl8192du_radiob_2t_int_paarray[RADIOB_2T_INT_PA_ARRAYLENGTH] = { + 0x000, 0x00030000, + 0x001, 0x00030000, + 0x002, 0x00000000, + 0x003, 0x00018c63, + 0x004, 0x00018c63, + 0x008, 0x00084000, + 0x00b, 0x0001c000, + 0x00e, 0x00018c67, + 0x00f, 0x00000851, + 0x014, 0x00021440, + 0x018, 0x00007401, + 0x019, 0x00000060, + 0x01d, 0x000a1290, + 0x023, 0x00001558, + 0x01a, 0x00030a99, + 0x01b, 0x00040b00, + 0x01c, 0x000fc339, + 0x03a, 0x000a57eb, + 0x03b, 0x00020000, + 0x03c, 0x000ff455, + 0x020, 0x0000aa52, + 0x021, 0x00054000, + 0x040, 0x0000aa52, + 0x041, 0x00014000, + 0x025, 0x000803be, + 0x026, 0x000fc638, + 0x027, 0x00077c18, + 0x028, 0x000d1c31, + 0x029, 0x000d7110, + 0x02a, 0x000aeb04, + 0x02b, 0x0004128b, + 0x02c, 0x00001840, + 0x043, 0x0002444f, + 0x044, 0x0001adb0, + 0x045, 0x00056467, + 0x046, 0x0008992c, + 0x047, 0x0000452c, + 0x048, 0x000c0443, + 0x049, 0x00000730, + 0x04a, 0x00050f0f, + 0x04b, 0x000896ef, + 0x04c, 0x0000ddee, + 0x018, 0x00007401, + 0x000, 0x00070000, + 0x012, 0x000dc000, + 0x012, 0x00090000, + 0x012, 0x00051000, + 0x012, 0x00012000, + 0x013, 0x000287b7, + 0x013, 0x000247ab, + 0x013, 0x0002079f, + 0x013, 0x0001c793, + 0x013, 0x0001839b, + 0x013, 0x00014392, + 0x013, 0x0001019a, + 0x013, 0x0000c191, + 0x013, 0x00008194, + 0x013, 0x000040a0, + 0x013, 0x00000018, + 0x015, 0x0000f424, + 0x015, 0x0004f424, + 0x015, 0x0008f424, + 0x016, 0x000e1330, + 0x016, 0x000a1330, + 0x016, 0x00061330, + 0x016, 0x00021330, + 0x018, 0x00017524, + 0x000, 0x00070000, + 0x012, 0x000cf000, + 0x012, 0x000bc000, + 0x012, 0x00078000, + 0x012, 0x00000000, + 0x013, 0x000287bf, + 0x013, 0x000247b3, + 0x013, 0x000207a7, + 0x013, 0x0001c79b, + 0x013, 0x0001839f, + 0x013, 0x00014393, + 0x013, 0x00010399, + 0x013, 0x0000c38d, + 0x013, 0x00008199, + 0x013, 0x0000418d, + 0x013, 0x00000099, + 0x015, 0x0000f495, + 0x015, 0x0004f495, + 0x015, 0x0008f495, + 0x016, 0x000e1874, + 0x016, 0x000a1874, + 0x016, 0x00061874, + 0x016, 0x00021874, + 0x018, 0x00037564, + 0x000, 0x00070000, + 0x012, 0x000cf000, + 0x012, 0x000bc000, + 0x012, 0x00078000, + 0x012, 0x00000000, + 0x013, 0x000287bf, + 0x013, 0x000247b3, + 0x013, 0x000207a7, + 0x013, 0x0001c79b, + 0x013, 0x0001839f, + 0x013, 0x00014393, + 0x013, 0x00010399, + 0x013, 0x0000c38d, + 0x013, 0x00008199, + 0x013, 0x0000418d, + 0x013, 0x00000099, + 0x015, 0x0000f495, + 0x015, 0x0004f495, + 0x015, 0x0008f495, + 0x016, 0x000e1874, + 0x016, 0x000a1874, + 0x016, 0x00061874, + 0x016, 0x00021874, + 0x018, 0x00057595, + 0x000, 0x00070000, + 0x012, 0x000cf000, + 0x012, 0x000bc000, + 0x012, 0x00078000, + 0x012, 0x00000000, + 0x013, 0x000287bf, + 0x013, 0x000247b3, + 0x013, 0x000207a7, + 0x013, 0x0001c79b, + 0x013, 0x0001839f, + 0x013, 0x00014393, + 0x013, 0x00010399, + 0x013, 0x0000c38d, + 0x013, 0x00008199, + 0x013, 0x0000418d, + 0x013, 0x00000099, + 0x015, 0x0000f495, + 0x015, 0x0004f495, + 0x015, 0x0008f495, + 0x016, 0x000e1874, + 0x016, 0x000a1874, + 0x016, 0x00061874, + 0x016, 0x00021874, + 0x030, 0x0004470f, + 0x031, 0x00044ff0, + 0x032, 0x00000070, + 0x033, 0x000dd480, + 0x034, 0x000ffac0, + 0x035, 0x000b80c0, + 0x036, 0x00077000, + 0x037, 0x00064ff2, + 0x038, 0x000e7661, + 0x039, 0x00000e90, + 0x000, 0x00030000, + 0x018, 0x0000f401, + 0x0fe, 0x00000000, + 0x0fe, 0x00000000, + 0x01e, 0x00088009, + 0x01f, 0x00080003, + 0x0fe, 0x00000000, + 0x01e, 0x00088001, + 0x01f, 0x00080000, + 0x0fe, 0x00000000, + 0x018, 0x00087401, + 0x0fe, 0x00000000, + 0x0fe, 0x00000000, + 0x0fe, 0x00000000, + 0x02b, 0x00041289, + 0x0fe, 0x00000000, + 0x02d, 0x00066666, + 0x02e, 0x00064001, + 0x02d, 0x00091111, + 0x02e, 0x00014002, + 0x02d, 0x000bbbbb, + 0x02e, 0x000b4003, + 0x02d, 0x000e6666, + 0x02e, 0x00064004, + 0x02d, 0x00088888, + 0x02e, 0x00084005, + 0x02d, 0x0009dddd, + 0x02e, 0x000d4006, + 0x02d, 0x000b3333, + 0x02e, 0x00034007, + 0x02d, 0x00048888, + 0x02e, 0x00084408, + 0x02d, 0x000bbbbb, + 0x02e, 0x000b4409, + 0x02d, 0x000e6666, + 0x02e, 0x0006440a, + 0x02d, 0x00011111, + 0x02e, 0x0001480b, + 0x02d, 0x0003bbbb, + 0x02e, 0x000b480c, + 0x02d, 0x00066666, + 0x02e, 0x0006480d, + 0x02d, 0x000ccccc, + 0x02e, 0x000c480e, +}; + +const u32 rtl8192du_mac_2tarray[MAC_2T_ARRAYLENGTH] = { + 0x420, 0x00000080, + 0x423, 0x00000000, + 0x430, 0x00000000, + 0x431, 0x00000000, + 0x432, 0x00000000, + 0x433, 0x00000001, + 0x434, 0x00000004, + 0x435, 0x00000005, + 0x436, 0x00000006, + 0x437, 0x00000007, + 0x438, 0x00000000, + 0x439, 0x00000000, + 0x43a, 0x00000000, + 0x43b, 0x00000001, + 0x43c, 0x00000004, + 0x43d, 0x00000005, + 0x43e, 0x00000006, + 0x43f, 0x00000007, + 0x440, 0x00000050, + 0x441, 0x00000001, + 0x442, 0x00000000, + 0x444, 0x00000015, + 0x445, 0x000000f0, + 0x446, 0x0000000f, + 0x447, 0x00000000, + 0x462, 0x00000008, + 0x463, 0x00000003, + 0x4c8, 0x000000ff, + 0x4c9, 0x00000008, + 0x4cc, 0x000000ff, + 0x4cd, 0x000000ff, + 0x4ce, 0x00000001, + 0x500, 0x00000026, + 0x501, 0x000000a2, + 0x502, 0x0000002f, + 0x503, 0x00000000, + 0x504, 0x00000028, + 0x505, 0x000000a3, + 0x506, 0x0000005e, + 0x507, 0x00000000, + 0x508, 0x0000002b, + 0x509, 0x000000a4, + 0x50a, 0x0000005e, + 0x50b, 0x00000000, + 0x50c, 0x0000004f, + 0x50d, 0x000000a4, + 0x50e, 0x00000000, + 0x50f, 0x00000000, + 0x512, 0x0000001c, + 0x514, 0x0000000a, + 0x515, 0x00000010, + 0x516, 0x0000000a, + 0x517, 0x00000010, + 0x51a, 0x00000016, + 0x524, 0x0000000f, + 0x525, 0x0000004f, + 0x546, 0x00000040, + 0x547, 0x00000000, + 0x550, 0x00000010, + 0x551, 0x00000010, + 0x559, 0x00000002, + 0x55a, 0x00000002, + 0x55d, 0x000000ff, + 0x605, 0x00000080, + 0x608, 0x0000000e, + 0x609, 0x0000002a, + 0x652, 0x00000020, + 0x63c, 0x0000000a, + 0x63d, 0x0000000a, + 0x63e, 0x0000000e, + 0x63f, 0x0000000e, + 0x66e, 0x00000005, + 0x700, 0x00000021, + 0x701, 0x00000043, + 0x702, 0x00000065, + 0x703, 0x00000087, + 0x708, 0x00000021, + 0x709, 0x00000043, + 0x70a, 0x00000065, + 0x70b, 0x00000087, + 0x024, 0x0000000d, + 0x025, 0x00000080, + 0x026, 0x00000011, + 0x027, 0x00000000, + 0x028, 0x00000083, + 0x029, 0x000000db, + 0x02a, 0x000000ff, + 0x02b, 0x00000000, + 0x014, 0x00000055, + 0x015, 0x000000a9, + 0x016, 0x0000008b, + 0x017, 0x00000008, + 0x010, 0x00000003, + 0x011, 0x0000002b, + 0x012, 0x00000002, + 0x013, 0x00000049, +}; + +const u32 rtl8192du_agctab_array[AGCTAB_ARRAYLENGTH] = { + 0xc78, 0x7b000001, + 0xc78, 0x7b010001, + 0xc78, 0x7b020001, + 0xc78, 0x7b030001, + 0xc78, 0x7b040001, + 0xc78, 0x7b050001, + 0xc78, 0x7b060001, + 0xc78, 0x7a070001, + 0xc78, 0x79080001, + 0xc78, 0x78090001, + 0xc78, 0x770a0001, + 0xc78, 0x760b0001, + 0xc78, 0x750c0001, + 0xc78, 0x740d0001, + 0xc78, 0x730e0001, + 0xc78, 0x720f0001, + 0xc78, 0x71100001, + 0xc78, 0x70110001, + 0xc78, 0x6f120001, + 0xc78, 0x6e130001, + 0xc78, 0x6d140001, + 0xc78, 0x6c150001, + 0xc78, 0x6b160001, + 0xc78, 0x6a170001, + 0xc78, 0x69180001, + 0xc78, 0x68190001, + 0xc78, 0x671a0001, + 0xc78, 0x661b0001, + 0xc78, 0x651c0001, + 0xc78, 0x641d0001, + 0xc78, 0x631e0001, + 0xc78, 0x621f0001, + 0xc78, 0x61200001, + 0xc78, 0x60210001, + 0xc78, 0x49220001, + 0xc78, 0x48230001, + 0xc78, 0x47240001, + 0xc78, 0x46250001, + 0xc78, 0x45260001, + 0xc78, 0x44270001, + 0xc78, 0x43280001, + 0xc78, 0x42290001, + 0xc78, 0x412a0001, + 0xc78, 0x402b0001, + 0xc78, 0x262c0001, + 0xc78, 0x252d0001, + 0xc78, 0x242e0001, + 0xc78, 0x232f0001, + 0xc78, 0x22300001, + 0xc78, 0x21310001, + 0xc78, 0x20320001, + 0xc78, 0x06330001, + 0xc78, 0x05340001, + 0xc78, 0x04350001, + 0xc78, 0x03360001, + 0xc78, 0x02370001, + 0xc78, 0x01380001, + 0xc78, 0x00390001, + 0xc78, 0x003a0001, + 0xc78, 0x003b0001, + 0xc78, 0x003c0001, + 0xc78, 0x003d0001, + 0xc78, 0x003e0001, + 0xc78, 0x003f0001, + 0xc78, 0x7b400001, + 0xc78, 0x7b410001, + 0xc78, 0x7a420001, + 0xc78, 0x79430001, + 0xc78, 0x78440001, + 0xc78, 0x77450001, + 0xc78, 0x76460001, + 0xc78, 0x75470001, + 0xc78, 0x74480001, + 0xc78, 0x73490001, + 0xc78, 0x724a0001, + 0xc78, 0x714b0001, + 0xc78, 0x704c0001, + 0xc78, 0x6f4d0001, + 0xc78, 0x6e4e0001, + 0xc78, 0x6d4f0001, + 0xc78, 0x6c500001, + 0xc78, 0x6b510001, + 0xc78, 0x6a520001, + 0xc78, 0x69530001, + 0xc78, 0x68540001, + 0xc78, 0x67550001, + 0xc78, 0x66560001, + 0xc78, 0x65570001, + 0xc78, 0x64580001, + 0xc78, 0x63590001, + 0xc78, 0x625a0001, + 0xc78, 0x615b0001, + 0xc78, 0x605c0001, + 0xc78, 0x485d0001, + 0xc78, 0x475e0001, + 0xc78, 0x465f0001, + 0xc78, 0x45600001, + 0xc78, 0x44610001, + 0xc78, 0x43620001, + 0xc78, 0x42630001, + 0xc78, 0x41640001, + 0xc78, 0x40650001, + 0xc78, 0x27660001, + 0xc78, 0x26670001, + 0xc78, 0x25680001, + 0xc78, 0x24690001, + 0xc78, 0x236a0001, + 0xc78, 0x226b0001, + 0xc78, 0x216c0001, + 0xc78, 0x206d0001, + 0xc78, 0x206e0001, + 0xc78, 0x206f0001, + 0xc78, 0x20700001, + 0xc78, 0x20710001, + 0xc78, 0x20720001, + 0xc78, 0x20730001, + 0xc78, 0x20740001, + 0xc78, 0x20750001, + 0xc78, 0x20760001, + 0xc78, 0x20770001, + 0xc78, 0x20780001, + 0xc78, 0x20790001, + 0xc78, 0x207a0001, + 0xc78, 0x207b0001, + 0xc78, 0x207c0001, + 0xc78, 0x207d0001, + 0xc78, 0x207e0001, + 0xc78, 0x207f0001, + 0xc78, 0x38000002, + 0xc78, 0x38010002, + 0xc78, 0x38020002, + 0xc78, 0x38030002, + 0xc78, 0x38040002, + 0xc78, 0x38050002, + 0xc78, 0x38060002, + 0xc78, 0x38070002, + 0xc78, 0x38080002, + 0xc78, 0x3c090002, + 0xc78, 0x3e0a0002, + 0xc78, 0x400b0002, + 0xc78, 0x440c0002, + 0xc78, 0x480d0002, + 0xc78, 0x4c0e0002, + 0xc78, 0x500f0002, + 0xc78, 0x52100002, + 0xc78, 0x56110002, + 0xc78, 0x5a120002, + 0xc78, 0x5e130002, + 0xc78, 0x60140002, + 0xc78, 0x60150002, + 0xc78, 0x60160002, + 0xc78, 0x62170002, + 0xc78, 0x62180002, + 0xc78, 0x62190002, + 0xc78, 0x621a0002, + 0xc78, 0x621b0002, + 0xc78, 0x621c0002, + 0xc78, 0x621d0002, + 0xc78, 0x621e0002, + 0xc78, 0x621f0002, + 0xc78, 0x32000044, + 0xc78, 0x32010044, + 0xc78, 0x32020044, + 0xc78, 0x32030044, + 0xc78, 0x32040044, + 0xc78, 0x32050044, + 0xc78, 0x32060044, + 0xc78, 0x34070044, + 0xc78, 0x35080044, + 0xc78, 0x36090044, + 0xc78, 0x370a0044, + 0xc78, 0x380b0044, + 0xc78, 0x390c0044, + 0xc78, 0x3a0d0044, + 0xc78, 0x3e0e0044, + 0xc78, 0x420f0044, + 0xc78, 0x44100044, + 0xc78, 0x46110044, + 0xc78, 0x4a120044, + 0xc78, 0x4e130044, + 0xc78, 0x50140044, + 0xc78, 0x55150044, + 0xc78, 0x5a160044, + 0xc78, 0x5e170044, + 0xc78, 0x64180044, + 0xc78, 0x6e190044, + 0xc78, 0x6e1a0044, + 0xc78, 0x6e1b0044, + 0xc78, 0x6e1c0044, + 0xc78, 0x6e1d0044, + 0xc78, 0x6e1e0044, + 0xc78, 0x6e1f0044, + 0xc78, 0x6e1f0000, +}; + +const u32 rtl8192du_agctab_5garray[AGCTAB_5G_ARRAYLENGTH] = { + 0xc78, 0x7b000001, + 0xc78, 0x7b010001, + 0xc78, 0x7a020001, + 0xc78, 0x79030001, + 0xc78, 0x78040001, + 0xc78, 0x77050001, + 0xc78, 0x76060001, + 0xc78, 0x75070001, + 0xc78, 0x74080001, + 0xc78, 0x73090001, + 0xc78, 0x720a0001, + 0xc78, 0x710b0001, + 0xc78, 0x700c0001, + 0xc78, 0x6f0d0001, + 0xc78, 0x6e0e0001, + 0xc78, 0x6d0f0001, + 0xc78, 0x6c100001, + 0xc78, 0x6b110001, + 0xc78, 0x6a120001, + 0xc78, 0x69130001, + 0xc78, 0x68140001, + 0xc78, 0x67150001, + 0xc78, 0x66160001, + 0xc78, 0x65170001, + 0xc78, 0x64180001, + 0xc78, 0x63190001, + 0xc78, 0x621a0001, + 0xc78, 0x611b0001, + 0xc78, 0x601c0001, + 0xc78, 0x481d0001, + 0xc78, 0x471e0001, + 0xc78, 0x461f0001, + 0xc78, 0x45200001, + 0xc78, 0x44210001, + 0xc78, 0x43220001, + 0xc78, 0x42230001, + 0xc78, 0x41240001, + 0xc78, 0x40250001, + 0xc78, 0x27260001, + 0xc78, 0x26270001, + 0xc78, 0x25280001, + 0xc78, 0x24290001, + 0xc78, 0x232a0001, + 0xc78, 0x222b0001, + 0xc78, 0x212c0001, + 0xc78, 0x202d0001, + 0xc78, 0x202e0001, + 0xc78, 0x202f0001, + 0xc78, 0x20300001, + 0xc78, 0x20310001, + 0xc78, 0x20320001, + 0xc78, 0x20330001, + 0xc78, 0x20340001, + 0xc78, 0x20350001, + 0xc78, 0x20360001, + 0xc78, 0x20370001, + 0xc78, 0x20380001, + 0xc78, 0x20390001, + 0xc78, 0x203a0001, + 0xc78, 0x203b0001, + 0xc78, 0x203c0001, + 0xc78, 0x203d0001, + 0xc78, 0x203e0001, + 0xc78, 0x203f0001, + 0xc78, 0x32000044, + 0xc78, 0x32010044, + 0xc78, 0x32020044, + 0xc78, 0x32030044, + 0xc78, 0x32040044, + 0xc78, 0x32050044, + 0xc78, 0x32060044, + 0xc78, 0x34070044, + 0xc78, 0x35080044, + 0xc78, 0x36090044, + 0xc78, 0x370a0044, + 0xc78, 0x380b0044, + 0xc78, 0x390c0044, + 0xc78, 0x3a0d0044, + 0xc78, 0x3e0e0044, + 0xc78, 0x420f0044, + 0xc78, 0x44100044, + 0xc78, 0x46110044, + 0xc78, 0x4a120044, + 0xc78, 0x4e130044, + 0xc78, 0x50140044, + 0xc78, 0x55150044, + 0xc78, 0x5a160044, + 0xc78, 0x5e170044, + 0xc78, 0x64180044, + 0xc78, 0x6e190044, + 0xc78, 0x6e1a0044, + 0xc78, 0x6e1b0044, + 0xc78, 0x6e1c0044, + 0xc78, 0x6e1d0044, + 0xc78, 0x6e1e0044, + 0xc78, 0x6e1f0044, + 0xc78, 0x6e1f0000, +}; + +const u32 rtl8192du_agctab_2garray[AGCTAB_2G_ARRAYLENGTH] = { + 0xc78, 0x7b000001, + 0xc78, 0x7b010001, + 0xc78, 0x7b020001, + 0xc78, 0x7b030001, + 0xc78, 0x7b040001, + 0xc78, 0x7b050001, + 0xc78, 0x7b060001, + 0xc78, 0x7a070001, + 0xc78, 0x79080001, + 0xc78, 0x78090001, + 0xc78, 0x770a0001, + 0xc78, 0x760b0001, + 0xc78, 0x750c0001, + 0xc78, 0x740d0001, + 0xc78, 0x730e0001, + 0xc78, 0x720f0001, + 0xc78, 0x71100001, + 0xc78, 0x70110001, + 0xc78, 0x6f120001, + 0xc78, 0x6e130001, + 0xc78, 0x6d140001, + 0xc78, 0x6c150001, + 0xc78, 0x6b160001, + 0xc78, 0x6a170001, + 0xc78, 0x69180001, + 0xc78, 0x68190001, + 0xc78, 0x671a0001, + 0xc78, 0x661b0001, + 0xc78, 0x651c0001, + 0xc78, 0x641d0001, + 0xc78, 0x631e0001, + 0xc78, 0x621f0001, + 0xc78, 0x61200001, + 0xc78, 0x60210001, + 0xc78, 0x49220001, + 0xc78, 0x48230001, + 0xc78, 0x47240001, + 0xc78, 0x46250001, + 0xc78, 0x45260001, + 0xc78, 0x44270001, + 0xc78, 0x43280001, + 0xc78, 0x42290001, + 0xc78, 0x412a0001, + 0xc78, 0x402b0001, + 0xc78, 0x262c0001, + 0xc78, 0x252d0001, + 0xc78, 0x242e0001, + 0xc78, 0x232f0001, + 0xc78, 0x22300001, + 0xc78, 0x21310001, + 0xc78, 0x20320001, + 0xc78, 0x06330001, + 0xc78, 0x05340001, + 0xc78, 0x04350001, + 0xc78, 0x03360001, + 0xc78, 0x02370001, + 0xc78, 0x01380001, + 0xc78, 0x00390001, + 0xc78, 0x003a0001, + 0xc78, 0x003b0001, + 0xc78, 0x003c0001, + 0xc78, 0x003d0001, + 0xc78, 0x003e0001, + 0xc78, 0x003f0001, + 0xc78, 0x38000002, + 0xc78, 0x38010002, + 0xc78, 0x38020002, + 0xc78, 0x38030002, + 0xc78, 0x38040002, + 0xc78, 0x38050002, + 0xc78, 0x38060002, + 0xc78, 0x38070002, + 0xc78, 0x38080002, + 0xc78, 0x3c090002, + 0xc78, 0x3e0a0002, + 0xc78, 0x400b0002, + 0xc78, 0x440c0002, + 0xc78, 0x480d0002, + 0xc78, 0x4c0e0002, + 0xc78, 0x500f0002, + 0xc78, 0x52100002, + 0xc78, 0x56110002, + 0xc78, 0x5a120002, + 0xc78, 0x5e130002, + 0xc78, 0x60140002, + 0xc78, 0x60150002, + 0xc78, 0x60160002, + 0xc78, 0x62170002, + 0xc78, 0x62180002, + 0xc78, 0x62190002, + 0xc78, 0x621a0002, + 0xc78, 0x621b0002, + 0xc78, 0x621c0002, + 0xc78, 0x621d0002, + 0xc78, 0x621e0002, + 0xc78, 0x621f0002, + 0xc78, 0x6e1f0000, +}; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/table.h new file mode 100644 index 000000000000..b809ba511320 --- /dev/null +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/table.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2024 Realtek Corporation.*/ + +#ifndef __RTL92DU_TABLE_H__ +#define __RTL92DU_TABLE_H__ + +#define PHY_REG_2T_ARRAYLENGTH 372 +#define PHY_REG_ARRAY_PG_LENGTH 624 +#define RADIOA_2T_ARRAYLENGTH 378 +#define RADIOB_2T_ARRAYLENGTH 384 +#define RADIOA_2T_INT_PA_ARRAYLENGTH 378 +#define RADIOB_2T_INT_PA_ARRAYLENGTH 384 +#define MAC_2T_ARRAYLENGTH 192 +#define AGCTAB_ARRAYLENGTH 386 +#define AGCTAB_5G_ARRAYLENGTH 194 +#define AGCTAB_2G_ARRAYLENGTH 194 + +extern const u32 rtl8192du_phy_reg_2tarray[PHY_REG_2T_ARRAYLENGTH]; +extern const u32 rtl8192du_phy_reg_array_pg[PHY_REG_ARRAY_PG_LENGTH]; +extern const u32 rtl8192du_radioa_2tarray[RADIOA_2T_ARRAYLENGTH]; +extern const u32 rtl8192du_radiob_2tarray[RADIOB_2T_ARRAYLENGTH]; +extern const u32 rtl8192du_radioa_2t_int_paarray[RADIOA_2T_INT_PA_ARRAYLENGTH]; +extern const u32 rtl8192du_radiob_2t_int_paarray[RADIOB_2T_INT_PA_ARRAYLENGTH]; +extern const u32 rtl8192du_mac_2tarray[MAC_2T_ARRAYLENGTH]; +extern const u32 rtl8192du_agctab_array[AGCTAB_ARRAYLENGTH]; +extern const u32 rtl8192du_agctab_5garray[AGCTAB_5G_ARRAYLENGTH]; +extern const u32 rtl8192du_agctab_2garray[AGCTAB_2G_ARRAYLENGTH]; + +#endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/trx.c new file mode 100644 index 000000000000..743ce0cfffe6 --- /dev/null +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/trx.c @@ -0,0 +1,372 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright(c) 2024 Realtek Corporation.*/ + +#include "../wifi.h" +#include "../base.h" +#include "../usb.h" +#include "../rtl8192d/reg.h" +#include "../rtl8192d/def.h" +#include "../rtl8192d/trx_common.h" +#include "trx.h" + +void rtl92du_tx_cleanup(struct ieee80211_hw *hw, struct sk_buff *skb) +{ +} + +int rtl92du_tx_post_hdl(struct ieee80211_hw *hw, struct urb *urb, + struct sk_buff *skb) +{ + return 0; +} + +struct sk_buff *rtl92du_tx_aggregate_hdl(struct ieee80211_hw *hw, + struct sk_buff_head *list) +{ + return skb_dequeue(list); +} + +static enum rtl_desc_qsel _rtl92du_hwq_to_descq(u16 queue_index) +{ + switch (queue_index) { + case RTL_TXQ_BCN: + return QSLT_BEACON; + case RTL_TXQ_MGT: + return QSLT_MGNT; + case RTL_TXQ_VO: + return QSLT_VO; + case RTL_TXQ_VI: + return QSLT_VI; + case RTL_TXQ_BK: + return QSLT_BK; + default: + case RTL_TXQ_BE: + return QSLT_BE; + } +} + +/* For HW recovery information */ +static void _rtl92du_tx_desc_checksum(__le32 *txdesc) +{ + __le16 *ptr = (__le16 *)txdesc; + u16 checksum = 0; + u32 index; + + /* Clear first */ + set_tx_desc_tx_desc_checksum(txdesc, 0); + for (index = 0; index < 16; index++) + checksum = checksum ^ le16_to_cpu(*(ptr + index)); + set_tx_desc_tx_desc_checksum(txdesc, checksum); +} + +void rtl92du_tx_fill_desc(struct ieee80211_hw *hw, + struct ieee80211_hdr *hdr, u8 *pdesc_tx, + u8 *pbd_desc_tx, struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, + struct sk_buff *skb, + u8 queue_index, + struct rtl_tcb_desc *tcb_desc) +{ + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_ps_ctl *ppsc = rtl_psc(rtlpriv); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + struct rtl_mac *mac = rtl_mac(rtlpriv); + struct rtl_sta_info *sta_entry; + __le16 fc = hdr->frame_control; + u8 agg_state = RTL_AGG_STOP; + u16 pktlen = skb->len; + u32 rts_en, hw_rts_en; + u8 ampdu_density = 0; + u16 seq_number; + __le32 *txdesc; + u8 rate_flag; + u8 tid; + + rtl_get_tcb_desc(hw, info, sta, skb, tcb_desc); + + txdesc = (__le32 *)skb_push(skb, RTL_TX_HEADER_SIZE); + memset(txdesc, 0, RTL_TX_HEADER_SIZE); + + set_tx_desc_pkt_size(txdesc, pktlen); + set_tx_desc_linip(txdesc, 0); + set_tx_desc_pkt_offset(txdesc, RTL_DUMMY_OFFSET); + set_tx_desc_offset(txdesc, RTL_TX_HEADER_SIZE); + /* 5G have no CCK rate */ + if (rtlhal->current_bandtype == BAND_ON_5G) + if (tcb_desc->hw_rate < DESC_RATE6M) + tcb_desc->hw_rate = DESC_RATE6M; + + set_tx_desc_tx_rate(txdesc, tcb_desc->hw_rate); + if (tcb_desc->use_shortgi || tcb_desc->use_shortpreamble) + set_tx_desc_data_shortgi(txdesc, 1); + + if (rtlhal->macphymode == DUALMAC_DUALPHY && + tcb_desc->hw_rate == DESC_RATEMCS7) + set_tx_desc_data_shortgi(txdesc, 1); + + if (sta) { + sta_entry = (struct rtl_sta_info *)sta->drv_priv; + tid = ieee80211_get_tid(hdr); + agg_state = sta_entry->tids[tid].agg.agg_state; + ampdu_density = sta->deflink.ht_cap.ampdu_density; + } + + if (agg_state == RTL_AGG_OPERATIONAL && + info->flags & IEEE80211_TX_CTL_AMPDU) { + set_tx_desc_agg_enable(txdesc, 1); + set_tx_desc_max_agg_num(txdesc, 0x14); + set_tx_desc_ampdu_density(txdesc, ampdu_density); + tcb_desc->rts_enable = 1; + tcb_desc->rts_rate = DESC_RATE24M; + } else { + set_tx_desc_agg_break(txdesc, 1); + } + seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4; + set_tx_desc_seq(txdesc, seq_number); + + rts_en = tcb_desc->rts_enable && !tcb_desc->cts_enable; + hw_rts_en = tcb_desc->rts_enable || tcb_desc->cts_enable; + set_tx_desc_rts_enable(txdesc, rts_en); + set_tx_desc_hw_rts_enable(txdesc, hw_rts_en); + set_tx_desc_cts2self(txdesc, tcb_desc->cts_enable); + set_tx_desc_rts_stbc(txdesc, tcb_desc->rts_stbc); + /* 5G have no CCK rate */ + if (rtlhal->current_bandtype == BAND_ON_5G) + if (tcb_desc->rts_rate < DESC_RATE6M) + tcb_desc->rts_rate = DESC_RATE6M; + set_tx_desc_rts_rate(txdesc, tcb_desc->rts_rate); + set_tx_desc_rts_bw(txdesc, 0); + set_tx_desc_rts_sc(txdesc, tcb_desc->rts_sc); + set_tx_desc_rts_short(txdesc, tcb_desc->rts_use_shortpreamble); + + rate_flag = info->control.rates[0].flags; + if (mac->bw_40) { + if (rate_flag & IEEE80211_TX_RC_DUP_DATA) { + set_tx_desc_data_bw(txdesc, 1); + set_tx_desc_tx_sub_carrier(txdesc, 3); + } else if (rate_flag & IEEE80211_TX_RC_40_MHZ_WIDTH) { + set_tx_desc_data_bw(txdesc, 1); + set_tx_desc_tx_sub_carrier(txdesc, mac->cur_40_prime_sc); + } else { + set_tx_desc_data_bw(txdesc, 0); + set_tx_desc_tx_sub_carrier(txdesc, 0); + } + } else { + set_tx_desc_data_bw(txdesc, 0); + set_tx_desc_tx_sub_carrier(txdesc, 0); + } + + if (info->control.hw_key) { + struct ieee80211_key_conf *keyconf = info->control.hw_key; + + switch (keyconf->cipher) { + case WLAN_CIPHER_SUITE_WEP40: + case WLAN_CIPHER_SUITE_WEP104: + case WLAN_CIPHER_SUITE_TKIP: + set_tx_desc_sec_type(txdesc, 0x1); + break; + case WLAN_CIPHER_SUITE_CCMP: + set_tx_desc_sec_type(txdesc, 0x3); + break; + default: + set_tx_desc_sec_type(txdesc, 0x0); + break; + } + } + + set_tx_desc_pkt_id(txdesc, 0); + set_tx_desc_queue_sel(txdesc, _rtl92du_hwq_to_descq(queue_index)); + set_tx_desc_data_rate_fb_limit(txdesc, 0x1F); + set_tx_desc_rts_rate_fb_limit(txdesc, 0xF); + set_tx_desc_disable_fb(txdesc, 0); + set_tx_desc_use_rate(txdesc, tcb_desc->use_driver_rate); + + if (ieee80211_is_data_qos(fc)) { + if (mac->rdg_en) { + rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, + "Enable RDG function\n"); + set_tx_desc_rdg_enable(txdesc, 1); + set_tx_desc_htc(txdesc, 1); + } + set_tx_desc_qos(txdesc, 1); + } + + if (rtlpriv->dm.useramask) { + set_tx_desc_rate_id(txdesc, tcb_desc->ratr_index); + set_tx_desc_macid(txdesc, tcb_desc->mac_id); + } else { + set_tx_desc_rate_id(txdesc, 0xC + tcb_desc->ratr_index); + set_tx_desc_macid(txdesc, tcb_desc->ratr_index); + } + + if (!ieee80211_is_data_qos(fc) && ppsc->leisure_ps && + ppsc->fwctrl_lps) { + set_tx_desc_hwseq_en(txdesc, 1); + set_tx_desc_pkt_id(txdesc, 8); + } + + if (ieee80211_has_morefrags(fc)) + set_tx_desc_more_frag(txdesc, 1); + if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) || + is_broadcast_ether_addr(ieee80211_get_DA(hdr))) + set_tx_desc_bmc(txdesc, 1); + + set_tx_desc_own(txdesc, 1); + set_tx_desc_last_seg(txdesc, 1); + set_tx_desc_first_seg(txdesc, 1); + _rtl92du_tx_desc_checksum(txdesc); + + rtl_dbg(rtlpriv, COMP_SEND, DBG_TRACE, "==>\n"); +} + +static void _rtl92du_config_out_ep(struct ieee80211_hw *hw, u8 num_out_pipe) +{ + struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); + struct rtl_priv *rtlpriv = rtl_priv(hw); + struct rtl_hal *rtlhal = rtl_hal(rtlpriv); + u16 ep_cfg; + + rtlusb->out_queue_sel = 0; + rtlusb->out_ep_nums = 0; + + if (rtlhal->interfaceindex == 0) + ep_cfg = rtl_read_word(rtlpriv, REG_USB_Queue_Select_MAC0); + else + ep_cfg = rtl_read_word(rtlpriv, REG_USB_Queue_Select_MAC1); + + if (ep_cfg & 0x00f) { + rtlusb->out_queue_sel |= TX_SELE_HQ; + rtlusb->out_ep_nums++; + } + if (ep_cfg & 0x0f0) { + rtlusb->out_queue_sel |= TX_SELE_NQ; + rtlusb->out_ep_nums++; + } + if (ep_cfg & 0xf00) { + rtlusb->out_queue_sel |= TX_SELE_LQ; + rtlusb->out_ep_nums++; + } + + switch (num_out_pipe) { + case 3: + rtlusb->out_queue_sel = TX_SELE_HQ | TX_SELE_NQ | TX_SELE_LQ; + rtlusb->out_ep_nums = 3; + break; + case 2: + rtlusb->out_queue_sel = TX_SELE_HQ | TX_SELE_NQ; + rtlusb->out_ep_nums = 2; + break; + case 1: + rtlusb->out_queue_sel = TX_SELE_HQ; + rtlusb->out_ep_nums = 1; + break; + default: + break; + } +} + +static void _rtl92du_one_out_ep_mapping(struct rtl_usb *rtlusb, + struct rtl_ep_map *ep_map) +{ + ep_map->ep_mapping[RTL_TXQ_BE] = rtlusb->out_eps[0]; + ep_map->ep_mapping[RTL_TXQ_BK] = rtlusb->out_eps[0]; + ep_map->ep_mapping[RTL_TXQ_VI] = rtlusb->out_eps[0]; + ep_map->ep_mapping[RTL_TXQ_VO] = rtlusb->out_eps[0]; + ep_map->ep_mapping[RTL_TXQ_MGT] = rtlusb->out_eps[0]; + ep_map->ep_mapping[RTL_TXQ_BCN] = rtlusb->out_eps[0]; + ep_map->ep_mapping[RTL_TXQ_HI] = rtlusb->out_eps[0]; +} + +static void _rtl92du_two_out_ep_mapping(struct rtl_usb *rtlusb, + struct rtl_ep_map *ep_map) +{ + ep_map->ep_mapping[RTL_TXQ_BE] = rtlusb->out_eps[1]; + ep_map->ep_mapping[RTL_TXQ_BK] = rtlusb->out_eps[1]; + ep_map->ep_mapping[RTL_TXQ_VI] = rtlusb->out_eps[0]; + ep_map->ep_mapping[RTL_TXQ_VO] = rtlusb->out_eps[0]; + ep_map->ep_mapping[RTL_TXQ_MGT] = rtlusb->out_eps[0]; + ep_map->ep_mapping[RTL_TXQ_BCN] = rtlusb->out_eps[0]; + ep_map->ep_mapping[RTL_TXQ_HI] = rtlusb->out_eps[0]; +} + +static void _rtl92du_three_out_ep_mapping(struct rtl_usb *rtlusb, + struct rtl_ep_map *ep_map) +{ + ep_map->ep_mapping[RTL_TXQ_BE] = rtlusb->out_eps[2]; + ep_map->ep_mapping[RTL_TXQ_BK] = rtlusb->out_eps[2]; + ep_map->ep_mapping[RTL_TXQ_VI] = rtlusb->out_eps[1]; + ep_map->ep_mapping[RTL_TXQ_VO] = rtlusb->out_eps[0]; + ep_map->ep_mapping[RTL_TXQ_MGT] = rtlusb->out_eps[0]; + ep_map->ep_mapping[RTL_TXQ_BCN] = rtlusb->out_eps[0]; + ep_map->ep_mapping[RTL_TXQ_HI] = rtlusb->out_eps[0]; +} + +static int _rtl92du_out_ep_mapping(struct ieee80211_hw *hw) +{ + struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); + struct rtl_ep_map *ep_map = &rtlusb->ep_map; + + switch (rtlusb->out_ep_nums) { + case 1: + _rtl92du_one_out_ep_mapping(rtlusb, ep_map); + break; + case 2: + _rtl92du_two_out_ep_mapping(rtlusb, ep_map); + break; + case 3: + _rtl92du_three_out_ep_mapping(rtlusb, ep_map); + break; + default: + return -EINVAL; + } + + return 0; +} + +int rtl92du_endpoint_mapping(struct ieee80211_hw *hw) +{ + struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); + + _rtl92du_config_out_ep(hw, rtlusb->out_ep_nums); + + /* Normal chip with one IN and one OUT doesn't have interrupt IN EP. */ + if (rtlusb->out_ep_nums == 1 && rtlusb->in_ep_nums != 1) + return -EINVAL; + + return _rtl92du_out_ep_mapping(hw); +} + +u16 rtl92du_mq_to_hwq(__le16 fc, u16 mac80211_queue_index) +{ + u16 hw_queue_index; + + if (unlikely(ieee80211_is_beacon(fc))) { + hw_queue_index = RTL_TXQ_BCN; + goto out; + } + if (ieee80211_is_mgmt(fc)) { + hw_queue_index = RTL_TXQ_MGT; + goto out; + } + + switch (mac80211_queue_index) { + case 0: + hw_queue_index = RTL_TXQ_VO; + break; + case 1: + hw_queue_index = RTL_TXQ_VI; + break; + case 2: + hw_queue_index = RTL_TXQ_BE; + break; + case 3: + hw_queue_index = RTL_TXQ_BK; + break; + default: + hw_queue_index = RTL_TXQ_BE; + WARN_ONCE(true, "rtl8192du: QSLT_BE queue, skb_queue:%d\n", + mac80211_queue_index); + break; + } +out: + return hw_queue_index; +} diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192du/trx.h b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/trx.h new file mode 100644 index 000000000000..8c3d24622fa7 --- /dev/null +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192du/trx.h @@ -0,0 +1,60 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright(c) 2024 Realtek Corporation.*/ + +#ifndef __RTL92DU_TRX_H__ +#define __RTL92DU_TRX_H__ + +#define TX_SELE_HQ BIT(0) /* High Queue */ +#define TX_SELE_LQ BIT(1) /* Low Queue */ +#define TX_SELE_NQ BIT(2) /* Normal Queue */ + +#define TX_TOTAL_PAGE_NUMBER_92DU 0xF8 +#define TEST_PAGE_NUM_PUBQ_92DU 0x89 +#define TX_TOTAL_PAGE_NUMBER_92D_DUAL_MAC 0x7A +#define NORMAL_PAGE_NUM_PUBQ_92D_DUAL_MAC 0x5A +#define NORMAL_PAGE_NUM_HPQ_92D_DUAL_MAC 0x10 +#define NORMAL_PAGE_NUM_LPQ_92D_DUAL_MAC 0x10 +#define NORMAL_PAGE_NUM_NORMALQ_92D_DUAL_MAC 0 + +#define WMM_NORMAL_TX_TOTAL_PAGE_NUMBER 0xF5 + +#define WMM_NORMAL_PAGE_NUM_PUBQ_92D 0x65 +#define WMM_NORMAL_PAGE_NUM_HPQ_92D 0x30 +#define WMM_NORMAL_PAGE_NUM_LPQ_92D 0x30 +#define WMM_NORMAL_PAGE_NUM_NPQ_92D 0x30 + +#define WMM_NORMAL_PAGE_NUM_PUBQ_92D_DUAL_MAC 0x32 +#define WMM_NORMAL_PAGE_NUM_HPQ_92D_DUAL_MAC 0x18 +#define WMM_NORMAL_PAGE_NUM_LPQ_92D_DUAL_MAC 0x18 +#define WMM_NORMAL_PAGE_NUM_NPQ_92D_DUAL_MAC 0x18 + +static inline void set_tx_desc_bmc(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits(__txdesc, __value, BIT(24)); +} + +static inline void set_tx_desc_agg_break(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits((__txdesc + 1), __value, BIT(6)); +} + +static inline void set_tx_desc_tx_desc_checksum(__le32 *__txdesc, u32 __value) +{ + le32p_replace_bits((__txdesc + 7), __value, GENMASK(15, 0)); +} + +void rtl92du_tx_fill_desc(struct ieee80211_hw *hw, + struct ieee80211_hdr *hdr, u8 *pdesc, + u8 *pbd_desc_tx, struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, + struct sk_buff *skb, u8 hw_queue, + struct rtl_tcb_desc *ptcb_desc); +int rtl92du_endpoint_mapping(struct ieee80211_hw *hw); +u16 rtl92du_mq_to_hwq(__le16 fc, u16 mac80211_queue_index); +struct sk_buff *rtl92du_tx_aggregate_hdl(struct ieee80211_hw *hw, + struct sk_buff_head *list); +void rtl92du_tx_cleanup(struct ieee80211_hw *hw, struct sk_buff *skb); +int rtl92du_tx_post_hdl(struct ieee80211_hw *hw, struct urb *urb, + struct sk_buff *skb); + +#endif diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c index 7bde20fdbeab..162e734d5b08 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192ee/sw.c @@ -31,7 +31,7 @@ static void rtl92ee_init_aspm_vars(struct ieee80211_hw *hw) * 2 - Enable ASPM with Clock Req, * 3 - Alwyas Enable ASPM with Clock Req, * 4 - Always Enable ASPM without Clock Req. - * set defult to RTL8192CE:3 RTL8192E:2 + * set default to RTL8192CE:3 RTL8192E:2 */ rtlpci->const_pci_aspm = 3; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c index 675bdd32feb1..bbf8ff63dced 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192se/sw.c @@ -27,7 +27,7 @@ static void rtl92s_init_aspm_vars(struct ieee80211_hw *hw) * 2 - Enable ASPM with Clock Req, * 3 - Alwyas Enable ASPM with Clock Req, * 4 - Always Enable ASPM without Clock Req. - * set defult to RTL8192CE:3 RTL8192E:2 + * set default to RTL8192CE:3 RTL8192E:2 * */ rtlpci->const_pci_aspm = 2; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c index dd7505e2f22c..1b144fbd4d26 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/sw.c @@ -33,7 +33,7 @@ static void rtl8723e_init_aspm_vars(struct ieee80211_hw *hw) * 2 - Enable ASPM with Clock Req, * 3 - Alwyas Enable ASPM with Clock Req, * 4 - Always Enable ASPM without Clock Req. - * set defult to RTL8192CE:3 RTL8192E:2 + * set default to RTL8192CE:3 RTL8192E:2 */ rtlpci->const_pci_aspm = 3; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c index 162c34f0e9b7..0a92d0325098 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723be/sw.c @@ -32,7 +32,7 @@ static void rtl8723be_init_aspm_vars(struct ieee80211_hw *hw) * 2 - Enable ASPM with Clock Req, * 3 - Alwyas Enable ASPM with Clock Req, * 4 - Always Enable ASPM without Clock Req. - * set defult to RTL8192CE:3 RTL8192E:2 + * set default to RTL8192CE:3 RTL8192E:2 */ rtlpci->const_pci_aspm = 3; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c index 7b911695db33..a65503c5ae5a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/sw.c @@ -30,7 +30,7 @@ static void rtl8821ae_init_aspm_vars(struct ieee80211_hw *hw) * 2 - Enable ASPM with Clock Req, * 3 - Alwyas Enable ASPM with Clock Req, * 4 - Always Enable ASPM without Clock Req. - * set defult to RTL8192CE:3 RTL8192E:2 + * set default to RTL8192CE:3 RTL8192E:2 */ rtlpci->const_pci_aspm = 3; diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c index 2ea72d9e3957..d37a017b2b81 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.c +++ b/drivers/net/wireless/realtek/rtlwifi/usb.c @@ -23,6 +23,8 @@ MODULE_DESCRIPTION("USB basic driver for rtlwifi"); #define MAX_USBCTRL_VENDORREQ_TIMES 10 +static void _rtl_usb_cleanup_tx(struct ieee80211_hw *hw); + static void _usbctrl_vendorreq_sync(struct usb_device *udev, u8 reqtype, u16 value, void *pdata, u16 len) { @@ -285,9 +287,23 @@ static int _rtl_usb_init(struct ieee80211_hw *hw) } /* usb endpoint mapping */ err = rtlpriv->cfg->usb_interface_cfg->usb_endpoint_mapping(hw); - rtlusb->usb_mq_to_hwq = rtlpriv->cfg->usb_interface_cfg->usb_mq_to_hwq; - _rtl_usb_init_tx(hw); - _rtl_usb_init_rx(hw); + if (err) + return err; + + rtlusb->usb_mq_to_hwq = rtlpriv->cfg->usb_interface_cfg->usb_mq_to_hwq; + + err = _rtl_usb_init_tx(hw); + if (err) + return err; + + err = _rtl_usb_init_rx(hw); + if (err) + goto err_out; + + return 0; + +err_out: + _rtl_usb_cleanup_tx(hw); return err; } @@ -691,17 +707,13 @@ static int rtl_usb_start(struct ieee80211_hw *hw) } /*======================= tx =========================================*/ -static void rtl_usb_cleanup(struct ieee80211_hw *hw) +static void _rtl_usb_cleanup_tx(struct ieee80211_hw *hw) { u32 i; struct sk_buff *_skb; struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw)); struct ieee80211_tx_info *txinfo; - /* clean up rx stuff. */ - _rtl_usb_cleanup_rx(hw); - - /* clean up tx stuff */ for (i = 0; i < RTL_USB_MAX_EP_NUM; i++) { while ((_skb = skb_dequeue(&rtlusb->tx_skb_queue[i]))) { rtlusb->usb_tx_cleanup(hw, _skb); @@ -715,6 +727,12 @@ static void rtl_usb_cleanup(struct ieee80211_hw *hw) usb_kill_anchored_urbs(&rtlusb->tx_submitted); } +static void rtl_usb_cleanup(struct ieee80211_hw *hw) +{ + _rtl_usb_cleanup_rx(hw); + _rtl_usb_cleanup_tx(hw); +} + /* We may add some struct into struct rtl_usb later. Do deinit here. */ static void rtl_usb_deinit(struct ieee80211_hw *hw) { @@ -937,7 +955,7 @@ static const struct rtl_intf_ops rtl_usb_ops = { int rtl_usb_probe(struct usb_interface *intf, const struct usb_device_id *id, - struct rtl_hal_cfg *rtl_hal_cfg) + const struct rtl_hal_cfg *rtl_hal_cfg) { int err; struct ieee80211_hw *hw = NULL; diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.h b/drivers/net/wireless/realtek/rtlwifi/usb.h index 12529afc0510..b66d6f9ae564 100644 --- a/drivers/net/wireless/realtek/rtlwifi/usb.h +++ b/drivers/net/wireless/realtek/rtlwifi/usb.h @@ -136,7 +136,7 @@ struct rtl_usb_priv { int rtl_usb_probe(struct usb_interface *intf, const struct usb_device_id *id, - struct rtl_hal_cfg *rtl92cu_hal_cfg); + const struct rtl_hal_cfg *rtl92cu_hal_cfg); void rtl_usb_disconnect(struct usb_interface *intf); int rtl_usb_suspend(struct usb_interface *pusb_intf, pm_message_t message); int rtl_usb_resume(struct usb_interface *pusb_intf); diff --git a/drivers/net/wireless/realtek/rtlwifi/wifi.h b/drivers/net/wireless/realtek/rtlwifi/wifi.h index 442419568734..ae6e351bc83c 100644 --- a/drivers/net/wireless/realtek/rtlwifi/wifi.h +++ b/drivers/net/wireless/realtek/rtlwifi/wifi.h @@ -2356,9 +2356,9 @@ struct rtl_hal_cfg { bool write_readback; char *name; char *alt_fw_name; - struct rtl_hal_ops *ops; + const struct rtl_hal_ops *ops; struct rtl_mod_params *mod_params; - struct rtl_hal_usbint_cfg *usb_interface_cfg; + const struct rtl_hal_usbint_cfg *usb_interface_cfg; enum rtl_spec_ver spec_ver; /*this map used for some registers or vars @@ -2707,7 +2707,7 @@ struct rtl_priv { /* hal_cfg : for diff cards * intf_ops : for diff interrface usb/pcie */ - struct rtl_hal_cfg *cfg; + const struct rtl_hal_cfg *cfg; const struct rtl_intf_ops *intf_ops; /* this var will be set by set_bit, @@ -2746,6 +2746,12 @@ struct rtl_priv { */ bool use_new_trx_flow; + /* For dual MAC RTL8192DU, things shared by the 2 USB interfaces */ + u32 *curveindex_2g; + u32 *curveindex_5g; + struct mutex *mutex_for_power_on_off; /* for power on/off */ + struct mutex *mutex_for_hw_init; /* for hardware init */ + #ifdef CONFIG_PM struct wiphy_wowlan_support wowlan; #endif diff --git a/drivers/net/wireless/realtek/rtw88/mac.c b/drivers/net/wireless/realtek/rtw88/mac.c index 0dba8aae7716..564f5988ee82 100644 --- a/drivers/net/wireless/realtek/rtw88/mac.c +++ b/drivers/net/wireless/realtek/rtw88/mac.c @@ -1201,6 +1201,15 @@ static int __priority_queue_cfg(struct rtw_dev *rtwdev, rtw_write16(rtwdev, REG_FIFOPAGE_CTRL_2 + 2, fifo->rsvd_boundary); rtw_write16(rtwdev, REG_BCNQ1_BDNY_V1, fifo->rsvd_boundary); rtw_write32(rtwdev, REG_RXFF_BNDY, chip->rxff_size - C2H_PKT_BUF - 1); + + if (rtwdev->hci.type == RTW_HCI_TYPE_USB) { + rtw_write8_mask(rtwdev, REG_AUTO_LLT_V1, BIT_MASK_BLK_DESC_NUM, + chip->usb_tx_agg_desc_num); + + rtw_write8(rtwdev, REG_AUTO_LLT_V1 + 3, chip->usb_tx_agg_desc_num); + rtw_write8_set(rtwdev, REG_TXDMA_OFFSET_CHK + 1, BIT(1)); + } + rtw_write8_set(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1); if (!check_hw_ready(rtwdev, REG_AUTO_LLT_V1, BIT_AUTO_INIT_LLT_V1, 0)) diff --git a/drivers/net/wireless/realtek/rtw88/mac80211.c b/drivers/net/wireless/realtek/rtw88/mac80211.c index 0acebbfa13c4..63326b352738 100644 --- a/drivers/net/wireless/realtek/rtw88/mac80211.c +++ b/drivers/net/wireless/realtek/rtw88/mac80211.c @@ -62,7 +62,7 @@ static int rtw_ops_start(struct ieee80211_hw *hw) return ret; } -static void rtw_ops_stop(struct ieee80211_hw *hw) +static void rtw_ops_stop(struct ieee80211_hw *hw, bool suspend) { struct rtw_dev *rtwdev = hw->priv; diff --git a/drivers/net/wireless/realtek/rtw88/main.h b/drivers/net/wireless/realtek/rtw88/main.h index 49894331f7b4..49a3fd4fb7dc 100644 --- a/drivers/net/wireless/realtek/rtw88/main.h +++ b/drivers/net/wireless/realtek/rtw88/main.h @@ -1197,6 +1197,8 @@ struct rtw_chip_info { u16 fw_fifo_addr[RTW_FW_FIFO_MAX]; const struct rtw_fwcd_segs *fwcd_segs; + u8 usb_tx_agg_desc_num; + u8 default_1ss_tx_path; bool path_div_supported; diff --git a/drivers/net/wireless/realtek/rtw88/pci.c b/drivers/net/wireless/realtek/rtw88/pci.c index 30232f7e3ec5..a5b9d6c7be37 100644 --- a/drivers/net/wireless/realtek/rtw88/pci.c +++ b/drivers/net/wireless/realtek/rtw88/pci.c @@ -1682,12 +1682,16 @@ static int rtw_pci_napi_poll(struct napi_struct *napi, int budget) return work_done; } -static void rtw_pci_napi_init(struct rtw_dev *rtwdev) +static int rtw_pci_napi_init(struct rtw_dev *rtwdev) { struct rtw_pci *rtwpci = (struct rtw_pci *)rtwdev->priv; - init_dummy_netdev(&rtwpci->netdev); - netif_napi_add(&rtwpci->netdev, &rtwpci->napi, rtw_pci_napi_poll); + rtwpci->netdev = alloc_netdev_dummy(0); + if (!rtwpci->netdev) + return -ENOMEM; + + netif_napi_add(rtwpci->netdev, &rtwpci->napi, rtw_pci_napi_poll); + return 0; } static void rtw_pci_napi_deinit(struct rtw_dev *rtwdev) @@ -1696,6 +1700,7 @@ static void rtw_pci_napi_deinit(struct rtw_dev *rtwdev) rtw_pci_napi_stop(rtwdev); netif_napi_del(&rtwpci->napi); + free_netdev(rtwpci->netdev); } int rtw_pci_probe(struct pci_dev *pdev, @@ -1745,7 +1750,11 @@ int rtw_pci_probe(struct pci_dev *pdev, goto err_pci_declaim; } - rtw_pci_napi_init(rtwdev); + ret = rtw_pci_napi_init(rtwdev); + if (ret) { + rtw_err(rtwdev, "failed to setup NAPI\n"); + goto err_pci_declaim; + } ret = rtw_chip_info_setup(rtwdev); if (ret) { diff --git a/drivers/net/wireless/realtek/rtw88/pci.h b/drivers/net/wireless/realtek/rtw88/pci.h index 0c37efd8c66f..13988db1cb4c 100644 --- a/drivers/net/wireless/realtek/rtw88/pci.h +++ b/drivers/net/wireless/realtek/rtw88/pci.h @@ -215,7 +215,7 @@ struct rtw_pci { bool running; /* napi structure */ - struct net_device netdev; + struct net_device *netdev; struct napi_struct napi; u16 rx_tag; diff --git a/drivers/net/wireless/realtek/rtw88/reg.h b/drivers/net/wireless/realtek/rtw88/reg.h index b122f226924b..02ef9a77316b 100644 --- a/drivers/net/wireless/realtek/rtw88/reg.h +++ b/drivers/net/wireless/realtek/rtw88/reg.h @@ -270,6 +270,7 @@ #define BIT_MASK_BCN_HEAD_1_V1 0xfff #define REG_AUTO_LLT_V1 0x0208 #define BIT_AUTO_INIT_LLT_V1 BIT(0) +#define BIT_MASK_BLK_DESC_NUM GENMASK(7, 4) #define REG_DWBCN0_CTRL 0x0208 #define BIT_BCN_VALID BIT(16) #define REG_TXDMA_OFFSET_CHK 0x020C diff --git a/drivers/net/wireless/realtek/rtw88/rtw8703b.c b/drivers/net/wireless/realtek/rtw88/rtw8703b.c index 8919f9e11f03..222608de33cd 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8703b.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8703b.c @@ -2013,6 +2013,7 @@ const struct rtw_chip_info rtw8703b_hw_spec = { .tx_stbc = false, .max_power_index = 0x3f, .ampdu_density = IEEE80211_HT_MPDU_DENSITY_16, + .usb_tx_agg_desc_num = 1, /* Not sure if this chip has USB interface */ .path_div_supported = false, .ht_supported = true, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8723d.c b/drivers/net/wireless/realtek/rtw88/rtw8723d.c index f8df4c84d39f..3fba4054d45f 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8723d.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8723d.c @@ -2171,6 +2171,7 @@ const struct rtw_chip_info rtw8723d_hw_spec = { .band = RTW_BAND_2G, .page_size = TX_PAGE_SIZE, .dig_min = 0x20, + .usb_tx_agg_desc_num = 1, .ht_supported = true, .vht_supported = false, .lps_deep_mode_supported = 0, diff --git a/drivers/net/wireless/realtek/rtw88/rtw8821c.c b/drivers/net/wireless/realtek/rtw88/rtw8821c.c index fe5d8e188350..526e8de77b3e 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8821c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8821c.c @@ -2008,6 +2008,7 @@ const struct rtw_chip_info rtw8821c_hw_spec = { .band = RTW_BAND_2G | RTW_BAND_5G, .page_size = TX_PAGE_SIZE, .dig_min = 0x1c, + .usb_tx_agg_desc_num = 3, .ht_supported = true, .vht_supported = true, .lps_deep_mode_supported = BIT(LPS_DEEP_MODE_LCLK), diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822b.c b/drivers/net/wireless/realtek/rtw88/rtw8822b.c index 3017a9760da8..2456ff242818 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822b.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822b.c @@ -2548,6 +2548,7 @@ const struct rtw_chip_info rtw8822b_hw_spec = { .band = RTW_BAND_2G | RTW_BAND_5G, .page_size = TX_PAGE_SIZE, .dig_min = 0x1c, + .usb_tx_agg_desc_num = 3, .ht_supported = true, .vht_supported = true, .lps_deep_mode_supported = BIT(LPS_DEEP_MODE_LCLK), diff --git a/drivers/net/wireless/realtek/rtw88/rtw8822c.c b/drivers/net/wireless/realtek/rtw88/rtw8822c.c index cd965edc29ce..62376d1cca22 100644 --- a/drivers/net/wireless/realtek/rtw88/rtw8822c.c +++ b/drivers/net/wireless/realtek/rtw88/rtw8822c.c @@ -5366,6 +5366,7 @@ const struct rtw_chip_info rtw8822c_hw_spec = { .band = RTW_BAND_2G | RTW_BAND_5G, .page_size = TX_PAGE_SIZE, .dig_min = 0x20, + .usb_tx_agg_desc_num = 3, .default_1ss_tx_path = BB_PATH_A, .path_div_supported = true, .ht_supported = true, diff --git a/drivers/net/wireless/realtek/rtw88/usb.c b/drivers/net/wireless/realtek/rtw88/usb.c index a0188511099a..a55ca5a24227 100644 --- a/drivers/net/wireless/realtek/rtw88/usb.c +++ b/drivers/net/wireless/realtek/rtw88/usb.c @@ -273,6 +273,8 @@ static void rtw_usb_write_port_tx_complete(struct urb *urb) info = IEEE80211_SKB_CB(skb); tx_data = rtw_usb_get_tx_data(skb); + skb_pull(skb, rtwdev->chip->tx_pkt_desc_sz); + /* enqueue to wait for tx report */ if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) { rtw_tx_report_enqueue(rtwdev, skb, tx_data->sn); @@ -377,7 +379,9 @@ static bool rtw_usb_tx_agg_skb(struct rtw_usb *rtwusb, struct sk_buff_head *list skb_iter = skb_peek(list); - if (skb_iter && skb_iter->len + skb_head->len <= RTW_USB_MAX_XMITBUF_SZ) + if (skb_iter && + skb_iter->len + skb_head->len <= RTW_USB_MAX_XMITBUF_SZ && + agg_num < rtwdev->chip->usb_tx_agg_desc_num) __skb_unlink(skb_iter, list); else skb_iter = NULL; @@ -433,23 +437,21 @@ static int rtw_usb_write_data(struct rtw_dev *rtwdev, { const struct rtw_chip_info *chip = rtwdev->chip; struct sk_buff *skb; - unsigned int desclen, headsize, size; + unsigned int size; u8 qsel; int ret = 0; size = pkt_info->tx_pkt_size; qsel = pkt_info->qsel; - desclen = chip->tx_pkt_desc_sz; - headsize = pkt_info->offset ? pkt_info->offset : desclen; - skb = dev_alloc_skb(headsize + size); + skb = dev_alloc_skb(chip->tx_pkt_desc_sz + size); if (unlikely(!skb)) return -ENOMEM; - skb_reserve(skb, headsize); + skb_reserve(skb, chip->tx_pkt_desc_sz); skb_put_data(skb, buf, size); - skb_push(skb, headsize); - memset(skb->data, 0, headsize); + skb_push(skb, chip->tx_pkt_desc_sz); + memset(skb->data, 0, chip->tx_pkt_desc_sz); rtw_tx_fill_tx_desc(pkt_info, skb); rtw_tx_fill_txdesc_checksum(rtwdev, pkt_info, skb->data); @@ -740,7 +742,6 @@ static struct rtw_hci_ops rtw_usb_ops = { static int rtw_usb_init_rx(struct rtw_dev *rtwdev) { struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); - int i; rtwusb->rxwq = create_singlethread_workqueue("rtw88_usb: rx wq"); if (!rtwusb->rxwq) { @@ -752,13 +753,19 @@ static int rtw_usb_init_rx(struct rtw_dev *rtwdev) INIT_WORK(&rtwusb->rx_work, rtw_usb_rx_handler); + return 0; +} + +static void rtw_usb_setup_rx(struct rtw_dev *rtwdev) +{ + struct rtw_usb *rtwusb = rtw_get_usb_priv(rtwdev); + int i; + for (i = 0; i < RTW_USB_RXCB_NUM; i++) { struct rx_usb_ctrl_block *rxcb = &rtwusb->rx_cb[i]; rtw_usb_rx_resubmit(rtwusb, rxcb); } - - return 0; } static void rtw_usb_deinit_rx(struct rtw_dev *rtwdev) @@ -895,6 +902,8 @@ int rtw_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) goto err_destroy_rxwq; } + rtw_usb_setup_rx(rtwdev); + return 0; err_destroy_rxwq: diff --git a/drivers/net/wireless/realtek/rtw89/Kconfig b/drivers/net/wireless/realtek/rtw89/Kconfig index eaea4eaeb361..3c9f864805b1 100644 --- a/drivers/net/wireless/realtek/rtw89/Kconfig +++ b/drivers/net/wireless/realtek/rtw89/Kconfig @@ -22,6 +22,9 @@ config RTW89_8851B config RTW89_8852A tristate +config RTW89_8852B_COMMON + tristate + config RTW89_8852B tristate @@ -59,6 +62,7 @@ config RTW89_8852BE select RTW89_CORE select RTW89_PCI select RTW89_8852B + select RTW89_8852B_COMMON help Select this option will enable support for 8852BE chipset diff --git a/drivers/net/wireless/realtek/rtw89/Makefile b/drivers/net/wireless/realtek/rtw89/Makefile index 86a553fb0136..1f1050a7a89d 100644 --- a/drivers/net/wireless/realtek/rtw89/Makefile +++ b/drivers/net/wireless/realtek/rtw89/Makefile @@ -17,7 +17,8 @@ rtw89_core-y += core.o \ ps.o \ chan.o \ ser.o \ - acpi.o + acpi.o \ + util.o rtw89_core-$(CONFIG_PM) += wow.o @@ -39,6 +40,9 @@ rtw89_8852a-objs := rtw8852a.o \ obj-$(CONFIG_RTW89_8852AE) += rtw89_8852ae.o rtw89_8852ae-objs := rtw8852ae.o +obj-$(CONFIG_RTW89_8852B_COMMON) += rtw89_8852b_common.o +rtw89_8852b_common-objs := rtw8852b_common.o + obj-$(CONFIG_RTW89_8852B) += rtw89_8852b.o rtw89_8852b-objs := rtw8852b.o \ rtw8852b_table.o \ diff --git a/drivers/net/wireless/realtek/rtw89/cam.c b/drivers/net/wireless/realtek/rtw89/cam.c index 1864f543a6c6..4557c6e035a9 100644 --- a/drivers/net/wireless/realtek/rtw89/cam.c +++ b/drivers/net/wireless/realtek/rtw89/cam.c @@ -211,6 +211,46 @@ static int rtw89_cam_get_addr_cam_key_idx(struct rtw89_addr_cam_entry *addr_cam, return 0; } +static int rtw89_cam_detach_sec_cam(struct rtw89_dev *rtwdev, + struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + const struct rtw89_sec_cam_entry *sec_cam, + bool inform_fw) +{ + struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta); + struct rtw89_vif *rtwvif; + struct rtw89_addr_cam_entry *addr_cam; + unsigned int i; + int ret = 0; + + if (!vif) { + rtw89_err(rtwdev, "No iface for deleting sec cam\n"); + return -EINVAL; + } + + rtwvif = (struct rtw89_vif *)vif->drv_priv; + addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta); + + for_each_set_bit(i, addr_cam->sec_cam_map, RTW89_SEC_CAM_IN_ADDR_CAM) { + if (addr_cam->sec_ent[i] != sec_cam->sec_cam_idx) + continue; + + clear_bit(i, addr_cam->sec_cam_map); + } + + if (inform_fw) { + ret = rtw89_chip_h2c_dctl_sec_cam(rtwdev, rtwvif, rtwsta); + if (ret) + rtw89_err(rtwdev, + "failed to update dctl cam del key: %d\n", ret); + ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL); + if (ret) + rtw89_err(rtwdev, "failed to update cam del key: %d\n", ret); + } + + return ret; +} + static int rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta, @@ -242,10 +282,8 @@ static int rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev, return ret; } - key->hw_key_idx = key_idx; addr_cam->sec_ent_keyid[key_idx] = key->keyidx; addr_cam->sec_ent[key_idx] = sec_cam->sec_cam_idx; - addr_cam->sec_entries[key_idx] = sec_cam; set_bit(key_idx, addr_cam->sec_cam_map); ret = rtw89_chip_h2c_dctl_sec_cam(rtwdev, rtwvif, rtwsta); if (ret) { @@ -258,7 +296,6 @@ static int rtw89_cam_attach_sec_cam(struct rtw89_dev *rtwdev, rtw89_err(rtwdev, "failed to update addr cam sec entry: %d\n", ret); clear_bit(key_idx, addr_cam->sec_cam_map); - addr_cam->sec_entries[key_idx] = NULL; return ret; } @@ -295,6 +332,9 @@ static int rtw89_cam_sec_key_install(struct rtw89_dev *rtwdev, goto err_release_cam; } + key->hw_key_idx = sec_cam_idx; + cam_info->sec_entries[sec_cam_idx] = sec_cam; + sec_cam->sec_cam_idx = sec_cam_idx; sec_cam->type = hw_key_type; sec_cam->len = RTW89_SEC_CAM_LEN; @@ -316,6 +356,7 @@ static int rtw89_cam_sec_key_install(struct rtw89_dev *rtwdev, return 0; err_release_cam: + cam_info->sec_entries[sec_cam_idx] = NULL; kfree(sec_cam); clear_bit(sec_cam_idx, cam_info->sec_cam_map); if (ext_key) @@ -386,42 +427,22 @@ int rtw89_cam_sec_key_del(struct rtw89_dev *rtwdev, struct ieee80211_key_conf *key, bool inform_fw) { - struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta); struct rtw89_cam_info *cam_info = &rtwdev->cam_info; - struct rtw89_vif *rtwvif; - struct rtw89_addr_cam_entry *addr_cam; - struct rtw89_sec_cam_entry *sec_cam; - u8 key_idx = key->hw_key_idx; + const struct rtw89_sec_cam_entry *sec_cam; u8 sec_cam_idx; - int ret = 0; + int ret; - if (!vif) { - rtw89_err(rtwdev, "No iface for deleting sec cam\n"); - return -EINVAL; - } - - rtwvif = (struct rtw89_vif *)vif->drv_priv; - addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta); - sec_cam = addr_cam->sec_entries[key_idx]; + sec_cam_idx = key->hw_key_idx; + sec_cam = cam_info->sec_entries[sec_cam_idx]; if (!sec_cam) return -EINVAL; - /* detach sec cam from addr cam */ - clear_bit(key_idx, addr_cam->sec_cam_map); - addr_cam->sec_entries[key_idx] = NULL; - if (inform_fw) { - ret = rtw89_chip_h2c_dctl_sec_cam(rtwdev, rtwvif, rtwsta); - if (ret) - rtw89_err(rtwdev, "failed to update dctl cam del key: %d\n", ret); - ret = rtw89_fw_h2c_cam(rtwdev, rtwvif, rtwsta, NULL); - if (ret) - rtw89_err(rtwdev, "failed to update cam del key: %d\n", ret); - } + ret = rtw89_cam_detach_sec_cam(rtwdev, vif, sta, sec_cam, inform_fw); /* clear valid bit in addr cam will disable sec cam, * so we don't need to send H2C command again */ - sec_cam_idx = sec_cam->sec_cam_idx; + cam_info->sec_entries[sec_cam_idx] = NULL; clear_bit(sec_cam_idx, cam_info->sec_cam_map); if (sec_cam->ext_key) clear_bit(sec_cam_idx + 1, cam_info->sec_cam_map); @@ -502,6 +523,7 @@ static u8 rtw89_get_addr_cam_entry_size(struct rtw89_dev *rtwdev) case RTL8852A: case RTL8852B: case RTL8851B: + case RTL8852BT: return ADDR_CAM_ENT_SIZE; default: return ADDR_CAM_ENT_SHORT_SIZE; diff --git a/drivers/net/wireless/realtek/rtw89/chan.c b/drivers/net/wireless/realtek/rtw89/chan.c index 051a3cad6101..7f90d93dcdc0 100644 --- a/drivers/net/wireless/realtek/rtw89/chan.c +++ b/drivers/net/wireless/realtek/rtw89/chan.c @@ -141,6 +141,28 @@ bool rtw89_assign_entity_chan(struct rtw89_dev *rtwdev, return band_changed; } +int rtw89_iterate_entity_chan(struct rtw89_dev *rtwdev, + int (*iterator)(const struct rtw89_chan *chan, + void *data), + void *data) +{ + struct rtw89_hal *hal = &rtwdev->hal; + const struct rtw89_chan *chan; + int ret; + u8 idx; + + lockdep_assert_held(&rtwdev->mutex); + + for_each_set_bit(idx, hal->entity_map, NUM_OF_RTW89_SUB_ENTITY) { + chan = rtw89_chan_get(rtwdev, idx); + ret = iterator(chan, data); + if (ret) + return ret; + } + + return 0; +} + static void __rtw89_config_entity_chandef(struct rtw89_dev *rtwdev, enum rtw89_sub_entity_idx idx, const struct cfg80211_chan_def *chandef, @@ -2322,7 +2344,6 @@ static void rtw89_swap_sub_entity(struct rtw89_dev *rtwdev, enum rtw89_sub_entity_idx idx2) { struct rtw89_hal *hal = &rtwdev->hal; - struct rtw89_sub_entity tmp; struct rtw89_vif *rtwvif; u8 cur; @@ -2332,9 +2353,7 @@ static void rtw89_swap_sub_entity(struct rtw89_dev *rtwdev, hal->sub[idx1].cfg->idx = idx2; hal->sub[idx2].cfg->idx = idx1; - tmp = hal->sub[idx1]; - hal->sub[idx1] = hal->sub[idx2]; - hal->sub[idx2] = tmp; + swap(hal->sub[idx1], hal->sub[idx2]); rtw89_for_each_rtwvif(rtwdev, rtwvif) { if (!rtwvif->chanctx_assigned) diff --git a/drivers/net/wireless/realtek/rtw89/chan.h b/drivers/net/wireless/realtek/rtw89/chan.h index ffa412f281f3..5278ff8c513b 100644 --- a/drivers/net/wireless/realtek/rtw89/chan.h +++ b/drivers/net/wireless/realtek/rtw89/chan.h @@ -78,6 +78,10 @@ void rtw89_chan_create(struct rtw89_chan *chan, u8 center_chan, u8 primary_chan, bool rtw89_assign_entity_chan(struct rtw89_dev *rtwdev, enum rtw89_sub_entity_idx idx, const struct rtw89_chan *new); +int rtw89_iterate_entity_chan(struct rtw89_dev *rtwdev, + int (*iterator)(const struct rtw89_chan *chan, + void *data), + void *data); void rtw89_config_entity_chandef(struct rtw89_dev *rtwdev, enum rtw89_sub_entity_idx idx, const struct cfg80211_chan_def *chandef); diff --git a/drivers/net/wireless/realtek/rtw89/coex.c b/drivers/net/wireless/realtek/rtw89/coex.c index c443b39ab3c6..24929ef534e0 100644 --- a/drivers/net/wireless/realtek/rtw89/coex.c +++ b/drivers/net/wireless/realtek/rtw89/coex.c @@ -91,7 +91,7 @@ static const struct rtw89_btc_fbtc_slot s_def[] = { [CXST_BLK] = __DEF_FBTC_SLOT(500, 0x55555555, SLOT_MIX), [CXST_E2G] = __DEF_FBTC_SLOT(0, 0xea5a5a5a, SLOT_MIX), [CXST_E5G] = __DEF_FBTC_SLOT(0, 0xffffffff, SLOT_ISO), - [CXST_EBT] = __DEF_FBTC_SLOT(0, 0xe5555555, SLOT_MIX), + [CXST_EBT] = __DEF_FBTC_SLOT(5, 0xe5555555, SLOT_MIX), [CXST_ENULL] = __DEF_FBTC_SLOT(0, 0xaaaaaaaa, SLOT_ISO), [CXST_WLK] = __DEF_FBTC_SLOT(250, 0xea5a5a5a, SLOT_MIX), [CXST_W1FDD] = __DEF_FBTC_SLOT(50, 0xffffffff, SLOT_ISO), @@ -228,6 +228,7 @@ static u32 chip_id_to_bt_rom_code_id(u32 id) case RTL8852A: case RTL8852B: case RTL8852C: + case RTL8852BT: return 0x8852; case RTL8851B: return 0x8851; @@ -3616,6 +3617,7 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type) struct rtw89_btc_wl_info *wl = &btc->cx.wl; u8 type, null_role; u32 tbl_w1, tbl_b1, tbl_b4; + u16 dur_2; type = FIELD_GET(BTC_CXP_MASK, policy_type); @@ -3726,7 +3728,21 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type) if (hid->exist || hfp->exist) tbl_w1 = cxtbl[16]; + dur_2 = dm->e2g_slot_limit; + switch (policy_type) { + case BTC_CXP_OFFE_2GBWISOB: /* for normal-case */ + _slot_set(btc, CXST_E2G, 0, tbl_w1, SLOT_ISO); + _slot_set_le(btc, CXST_EBT, s_def[CXST_EBT].dur, + s_def[CXST_EBT].cxtbl, s_def[CXST_EBT].cxtype); + _slot_set_dur(btc, CXST_EBT, dur_2); + break; + case BTC_CXP_OFFE_2GISOB: /* for bt no-link */ + _slot_set(btc, CXST_E2G, 0, cxtbl[1], SLOT_ISO); + _slot_set_le(btc, CXST_EBT, s_def[CXST_EBT].dur, + s_def[CXST_EBT].cxtbl, s_def[CXST_EBT].cxtype); + _slot_set_dur(btc, CXST_EBT, dur_2); + break; case BTC_CXP_OFFE_DEF: _slot_set_le(btc, CXST_E2G, s_def[CXST_E2G].dur, s_def[CXST_E2G].cxtbl, s_def[CXST_E2G].cxtype); @@ -3746,6 +3762,15 @@ void rtw89_btc_set_policy_v1(struct rtw89_dev *rtwdev, u16 policy_type) _slot_set_le(btc, CXST_ENULL, s_def[CXST_ENULL].dur, s_def[CXST_ENULL].cxtbl, s_def[CXST_ENULL].cxtype); break; + case BTC_CXP_OFFE_2GBWMIXB: + _slot_set(btc, CXST_E2G, 0, 0x55555555, SLOT_MIX); + _slot_set_le(btc, CXST_EBT, s_def[CXST_EBT].dur, + cpu_to_le32(0x55555555), s_def[CXST_EBT].cxtype); + break; + case BTC_CXP_OFFE_WL: /* for 4-way */ + _slot_set(btc, CXST_E2G, 0, cxtbl[1], SLOT_MIX); + _slot_set(btc, CXST_EBT, 0, cxtbl[1], SLOT_MIX); + break; default: break; } @@ -9514,7 +9539,7 @@ static void _get_gnt(struct rtw89_dev *rtwdev, struct rtw89_mac_ax_coex_gnt *gnt u32 val, status; if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B || - chip->chip_id == RTL8851B) { + chip->chip_id == RTL8851B || chip->chip_id == RTL8852BT) { rtw89_mac_read_lte(rtwdev, R_AX_LTE_SW_CFG_1, &val); rtw89_mac_read_lte(rtwdev, R_AX_GNT_VAL, &status); diff --git a/drivers/net/wireless/realtek/rtw89/core.c b/drivers/net/wireless/realtek/rtw89/core.c index ddc390d24ec1..7019f7d482a8 100644 --- a/drivers/net/wireless/realtek/rtw89/core.c +++ b/drivers/net/wireless/realtek/rtw89/core.c @@ -499,31 +499,21 @@ static void rtw89_core_tx_update_sec_key(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) { + struct rtw89_cam_info *cam_info = &rtwdev->cam_info; const struct rtw89_chip_info *chip = rtwdev->chip; - struct ieee80211_vif *vif = tx_req->vif; - struct ieee80211_sta *sta = tx_req->sta; + const struct rtw89_sec_cam_entry *sec_cam; struct ieee80211_tx_info *info; struct ieee80211_key_conf *key; - struct rtw89_vif *rtwvif; - struct rtw89_sta *rtwsta = sta_to_rtwsta_safe(sta); - struct rtw89_addr_cam_entry *addr_cam; - struct rtw89_sec_cam_entry *sec_cam; struct rtw89_tx_desc_info *desc_info = &tx_req->desc_info; struct sk_buff *skb = tx_req->skb; u8 sec_type = RTW89_SEC_KEY_TYPE_NONE; + u8 sec_cam_idx; u64 pn64; - if (!vif) { - rtw89_warn(rtwdev, "cannot set sec key without vif\n"); - return; - } - - rtwvif = (struct rtw89_vif *)vif->drv_priv; - addr_cam = rtw89_get_addr_cam_of(rtwvif, rtwsta); - info = IEEE80211_SKB_CB(skb); key = info->control.hw_key; - sec_cam = addr_cam->sec_entries[key->hw_key_idx]; + sec_cam_idx = key->hw_key_idx; + sec_cam = cam_info->sec_entries[sec_cam_idx]; if (!sec_cam) { rtw89_warn(rtwdev, "sec cam entry is empty\n"); return; @@ -823,6 +813,8 @@ rtw89_core_tx_update_data_info(struct rtw89_dev *rtwdev, desc_info->mac_id = rtw89_core_tx_get_mac_id(rtwdev, tx_req); desc_info->port = desc_info->hiq ? rtwvif->port : 0; desc_info->er_cap = rtwsta ? rtwsta->er_cap : false; + desc_info->stbc = rtwsta ? rtwsta->ra.stbc_cap : false; + desc_info->ldpc = rtwsta ? rtwsta->ra.ldpc_cap : false; /* enable wd_info for AMPDU */ desc_info->en_wd_info = true; @@ -1137,6 +1129,8 @@ static __le32 rtw89_build_txwd_info0(struct rtw89_tx_desc_info *desc_info) { u32 dword = FIELD_PREP(RTW89_TXWD_INFO0_USE_RATE, desc_info->use_rate) | FIELD_PREP(RTW89_TXWD_INFO0_DATA_RATE, desc_info->data_rate) | + FIELD_PREP(RTW89_TXWD_INFO0_DATA_STBC, desc_info->stbc) | + FIELD_PREP(RTW89_TXWD_INFO0_DATA_LDPC, desc_info->ldpc) | FIELD_PREP(RTW89_TXWD_INFO0_DISDATAFB, desc_info->dis_data_fb) | FIELD_PREP(RTW89_TXWD_INFO0_MULTIPORT_ID, desc_info->port); @@ -1145,7 +1139,9 @@ static __le32 rtw89_build_txwd_info0(struct rtw89_tx_desc_info *desc_info) static __le32 rtw89_build_txwd_info0_v1(struct rtw89_tx_desc_info *desc_info) { - u32 dword = FIELD_PREP(RTW89_TXWD_INFO0_DISDATAFB, desc_info->dis_data_fb) | + u32 dword = FIELD_PREP(RTW89_TXWD_INFO0_DATA_STBC, desc_info->stbc) | + FIELD_PREP(RTW89_TXWD_INFO0_DATA_LDPC, desc_info->ldpc) | + FIELD_PREP(RTW89_TXWD_INFO0_DISDATAFB, desc_info->dis_data_fb) | FIELD_PREP(RTW89_TXWD_INFO0_MULTIPORT_ID, desc_info->port) | FIELD_PREP(RTW89_TXWD_INFO0_DATA_ER, desc_info->er_cap) | FIELD_PREP(RTW89_TXWD_INFO0_DATA_BW_ER, 0); @@ -1311,7 +1307,9 @@ static __le32 rtw89_build_txwd_body7_v2(struct rtw89_tx_desc_info *desc_info) static __le32 rtw89_build_txwd_info0_v2(struct rtw89_tx_desc_info *desc_info) { - u32 dword = FIELD_PREP(BE_TXD_INFO0_DISDATAFB, desc_info->dis_data_fb) | + u32 dword = FIELD_PREP(BE_TXD_INFO0_DATA_STBC, desc_info->stbc) | + FIELD_PREP(BE_TXD_INFO0_DATA_LDPC, desc_info->ldpc) | + FIELD_PREP(BE_TXD_INFO0_DISDATAFB, desc_info->dis_data_fb) | FIELD_PREP(BE_TXD_INFO0_MULTIPORT_ID, desc_info->port); return cpu_to_le32(dword); @@ -1559,6 +1557,12 @@ static void rtw89_core_parse_phy_status_ie01(struct rtw89_dev *rtwdev, u32 t; phy_ppdu->chan_idx = le32_get_bits(ie->w0, RTW89_PHY_STS_IE01_W0_CH_IDX); + + if (rtwdev->hw->conf.flags & IEEE80211_CONF_MONITOR) { + phy_ppdu->ldpc = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_LDPC); + phy_ppdu->stbc = le32_get_bits(ie->w2, RTW89_PHY_STS_IE01_W2_STBC); + } + if (phy_ppdu->rate < RTW89_HW_RATE_OFDM6) return; @@ -1917,7 +1921,8 @@ static void rtw89_vif_rx_stats_iter(void *data, u8 *mac, return; if (ieee80211_is_beacon(hdr->frame_control)) { - if (vif->type == NL80211_IFTYPE_STATION) { + if (vif->type == NL80211_IFTYPE_STATION && + !test_bit(RTW89_FLAG_WOWLAN, rtwdev->flags)) { rtw89_vif_sync_bcn_tsf(rtwvif, hdr, skb->len); rtw89_fw_h2c_rssi_offload(rtwdev, phy_ppdu); } @@ -1984,6 +1989,23 @@ static void rtw89_core_hw_to_sband_rate(struct ieee80211_rx_status *rx_status) rx_status->rate_idx -= 4; } +static +void rtw89_core_update_rx_status_by_ppdu(struct rtw89_dev *rtwdev, + struct ieee80211_rx_status *rx_status, + struct rtw89_rx_phy_ppdu *phy_ppdu) +{ + if (!(rtwdev->hw->conf.flags & IEEE80211_CONF_MONITOR)) + return; + + if (!phy_ppdu) + return; + + if (phy_ppdu->ldpc) + rx_status->enc_flags |= RX_ENC_FLAG_LDPC; + if (phy_ppdu->stbc) + rx_status->enc_flags |= u8_encode_bits(1, RX_ENC_FLAG_STBC_MASK); +} + static const u8 rx_status_bw_to_radiotap_eht_usig[] = { [RATE_INFO_BW_20] = IEEE80211_RADIOTAP_EHT_USIG_COMMON_BW_20MHZ, [RATE_INFO_BW_5] = U8_MAX, @@ -2027,10 +2049,14 @@ static void rtw89_core_update_radiotap_eht(struct rtw89_dev *rtwdev, eht->user_info[0] = cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_MCS_KNOWN | - IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_KNOWN_O); + IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_KNOWN_O | + IEEE80211_RADIOTAP_EHT_USER_INFO_CODING_KNOWN); eht->user_info[0] |= le32_encode_bits(rx_status->rate_idx, IEEE80211_RADIOTAP_EHT_USER_INFO_MCS) | le32_encode_bits(rx_status->nss, IEEE80211_RADIOTAP_EHT_USER_INFO_NSS_O); + if (rx_status->enc_flags & RX_ENC_FLAG_LDPC) + eht->user_info[0] |= + cpu_to_le32(IEEE80211_RADIOTAP_EHT_USER_INFO_CODING); /* U-SIG */ tlv = (void *)tlv + sizeof(*tlv) + ALIGN(eht_len, 4); @@ -2056,6 +2082,8 @@ static void rtw89_core_update_radiotap(struct rtw89_dev *rtwdev, { static const struct ieee80211_radiotap_he known_he = { .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN | IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN), .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN), }; @@ -2087,6 +2115,7 @@ static void rtw89_core_rx_to_mac80211(struct rtw89_dev *rtwdev, rtw89_core_hw_to_sband_rate(rx_status); rtw89_core_rx_stats(rtwdev, phy_ppdu, desc_info, skb_ppdu); + rtw89_core_update_rx_status_by_ppdu(rtwdev, rx_status, phy_ppdu); rtw89_core_update_radiotap(rtwdev, skb_ppdu, rx_status); /* In low power mode, it does RX in thread context. */ local_bh_disable(); @@ -2492,11 +2521,15 @@ void rtw89_core_napi_stop(struct rtw89_dev *rtwdev) } EXPORT_SYMBOL(rtw89_core_napi_stop); -void rtw89_core_napi_init(struct rtw89_dev *rtwdev) +int rtw89_core_napi_init(struct rtw89_dev *rtwdev) { - init_dummy_netdev(&rtwdev->netdev); - netif_napi_add(&rtwdev->netdev, &rtwdev->napi, + rtwdev->netdev = alloc_netdev_dummy(0); + if (!rtwdev->netdev) + return -ENOMEM; + + netif_napi_add(rtwdev->netdev, &rtwdev->napi, rtwdev->hci.ops->napi_poll); + return 0; } EXPORT_SYMBOL(rtw89_core_napi_init); @@ -2504,6 +2537,7 @@ void rtw89_core_napi_deinit(struct rtw89_dev *rtwdev) { rtw89_core_napi_stop(rtwdev); netif_napi_del(&rtwdev->napi); + free_netdev(rtwdev->netdev); } EXPORT_SYMBOL(rtw89_core_napi_deinit); @@ -3342,20 +3376,23 @@ int rtw89_core_sta_add(struct rtw89_dev *rtwdev, if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) { /* for station mode, assign the mac_id from itself */ rtwsta->mac_id = rtwvif->mac_id; - /* must do rtw89_reg_6ghz_power_recalc() before rfk channel */ - rtw89_reg_6ghz_power_recalc(rtwdev, rtwvif, true); + + /* must do rtw89_reg_6ghz_recalc() before rfk channel */ + ret = rtw89_reg_6ghz_recalc(rtwdev, rtwvif, true); + if (ret) + return ret; + rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta, BTC_ROLE_MSTS_STA_CONN_START); rtw89_chip_rfk_channel(rtwdev); } else if (vif->type == NL80211_IFTYPE_AP || sta->tdls) { - rtwsta->mac_id = rtw89_core_acquire_bit_map(rtwdev->mac_id_map, - RTW89_MAX_MAC_ID_NUM); + rtwsta->mac_id = rtw89_acquire_mac_id(rtwdev); if (rtwsta->mac_id == RTW89_MAX_MAC_ID_NUM) return -ENOSPC; ret = rtw89_mac_set_macid_pause(rtwdev, rtwsta->mac_id, false); if (ret) { - rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwsta->mac_id); + rtw89_release_mac_id(rtwdev, rtwsta->mac_id); rtw89_warn(rtwdev, "failed to send h2c macid pause\n"); return ret; } @@ -3363,7 +3400,7 @@ int rtw89_core_sta_add(struct rtw89_dev *rtwdev, ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, rtwsta, RTW89_ROLE_CREATE); if (ret) { - rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwsta->mac_id); + rtw89_release_mac_id(rtwdev, rtwsta->mac_id); rtw89_warn(rtwdev, "failed to send h2c role info\n"); return ret; } @@ -3532,11 +3569,11 @@ int rtw89_core_sta_remove(struct rtw89_dev *rtwdev, int ret; if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) { - rtw89_reg_6ghz_power_recalc(rtwdev, rtwvif, false); + rtw89_reg_6ghz_recalc(rtwdev, rtwvif, false); rtw89_btc_ntfy_role_info(rtwdev, rtwvif, rtwsta, BTC_ROLE_MSTS_STA_DIS_CONN); } else if (vif->type == NL80211_IFTYPE_AP || sta->tdls) { - rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwsta->mac_id); + rtw89_release_mac_id(rtwdev, rtwsta->mac_id); ret = rtw89_fw_h2c_role_maintain(rtwdev, rtwvif, rtwsta, RTW89_ROLE_REMOVE); @@ -4022,15 +4059,15 @@ void rtw89_core_update_beacon_work(struct work_struct *work) int rtw89_wait_for_cond(struct rtw89_wait_info *wait, unsigned int cond) { struct completion *cmpl = &wait->completion; - unsigned long timeout; + unsigned long time_left; unsigned int cur; cur = atomic_cmpxchg(&wait->cond, RTW89_WAIT_COND_IDLE, cond); if (cur != RTW89_WAIT_COND_IDLE) return -EBUSY; - timeout = wait_for_completion_timeout(cmpl, RTW89_WAIT_FOR_COND_TIMEOUT); - if (timeout == 0) { + time_left = wait_for_completion_timeout(cmpl, RTW89_WAIT_FOR_COND_TIMEOUT); + if (time_left == 0) { atomic_set(&wait->cond, RTW89_WAIT_COND_IDLE); return -ETIMEDOUT; } @@ -4186,6 +4223,25 @@ void rtw89_core_stop(struct rtw89_dev *rtwdev) rtw89_hci_reset(rtwdev); } +u8 rtw89_acquire_mac_id(struct rtw89_dev *rtwdev) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + u8 mac_id_num = chip->support_macid_num; + u8 mac_id; + + mac_id = find_first_zero_bit(rtwdev->mac_id_map, mac_id_num); + if (mac_id == mac_id_num) + return RTW89_MAX_MAC_ID_NUM; + + set_bit(mac_id, rtwdev->mac_id_map); + return mac_id; +} + +void rtw89_release_mac_id(struct rtw89_dev *rtwdev, u8 mac_id) +{ + clear_bit(mac_id, rtwdev->mac_id_map); +} + int rtw89_core_init(struct rtw89_dev *rtwdev) { struct rtw89_btc *btc = &rtwdev->btc; @@ -4331,7 +4387,7 @@ static void rtw89_read_chip_ver(struct rtw89_dev *rtwdev) rtwdev->hal.cv = cv; - if (chip->chip_id == RTL8852B || chip->chip_id == RTL8851B) { + if (rtw89_is_rtl885xb(rtwdev)) { ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_CV, &val); if (ret) return; @@ -4475,6 +4531,10 @@ static int rtw89_core_register_hw(struct rtw89_dev *rtwdev) hw->max_tx_aggregation_subframes = RTW89_MAX_TX_AGG_NUM; hw->uapsd_max_sp_len = IEEE80211_WMM_IE_STA_QOSINFO_SP_ALL; + hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC | + IEEE80211_RADIOTAP_MCS_HAVE_STBC; + hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC; + ieee80211_hw_set(hw, SIGNAL_DBM); ieee80211_hw_set(hw, HAS_RATE_CONTROL); ieee80211_hw_set(hw, MFP_CAPABLE); diff --git a/drivers/net/wireless/realtek/rtw89/core.h b/drivers/net/wireless/realtek/rtw89/core.h index 112bdd95fc6e..11fa003a9788 100644 --- a/drivers/net/wireless/realtek/rtw89/core.h +++ b/drivers/net/wireless/realtek/rtw89/core.h @@ -132,6 +132,7 @@ enum rtw89_hci_type { enum rtw89_core_chip_id { RTL8852A, RTL8852B, + RTL8852BT, RTL8852C, RTL8851B, RTL8922A, @@ -745,6 +746,14 @@ enum rtw89_reg_6ghz_power { RTW89_REG_6GHZ_POWER_DFLT = RTW89_REG_6GHZ_POWER_VLP, }; +#define RTW89_MIN_VALID_POWER_CONSTRAINT (-10) /* unit: dBm */ + +/* calculate based on ieee80211 Transmit Power Envelope */ +struct rtw89_reg_6ghz_tpe { + bool valid; + s8 constraint; /* unit: dBm */ +}; + enum rtw89_fw_pkt_ofld_type { RTW89_PKT_OFLD_TYPE_PROBE_RSP = 0, RTW89_PKT_OFLD_TYPE_PS_POLL = 1, @@ -793,6 +802,8 @@ struct rtw89_rx_phy_ppdu { u8 evm_max; u8 evm_min; } ofdm; + bool ldpc; + bool stbc; bool to_self; bool valid; }; @@ -884,6 +895,13 @@ enum rtw89_ps_mode { #define RTW89_BYR_BW_NUM (RTW89_CHANNEL_WIDTH_320 + 1) #define RTW89_PPE_BW_NUM (RTW89_CHANNEL_WIDTH_320 + 1) +enum rtw89_pe_duration { + RTW89_PE_DURATION_0 = 0, + RTW89_PE_DURATION_8 = 1, + RTW89_PE_DURATION_16 = 2, + RTW89_PE_DURATION_16_20 = 3, +}; + enum rtw89_ru_bandwidth { RTW89_RU26 = 0, RTW89_RU52 = 1, @@ -1129,6 +1147,8 @@ struct rtw89_tx_desc_info { bool hiq; u8 port; bool er_cap; + bool stbc; + bool ldpc; }; struct rtw89_core_tx_request { @@ -1318,6 +1338,7 @@ struct rtw89_btc_wl_smap { u32 scan: 1; u32 connecting: 1; u32 roaming: 1; + u32 dbccing: 1; u32 transacting: 1; u32 _4way: 1; u32 rf_off: 1; @@ -3249,7 +3270,6 @@ struct rtw89_addr_cam_entry { DECLARE_BITMAP(sec_cam_map, RTW89_SEC_CAM_IN_ADDR_CAM); u8 sec_ent_keyid[RTW89_SEC_CAM_IN_ADDR_CAM]; u8 sec_ent[RTW89_SEC_CAM_IN_ADDR_CAM]; - struct rtw89_sec_cam_entry *sec_entries[RTW89_SEC_CAM_IN_ADDR_CAM]; }; struct rtw89_bssid_cam_entry { @@ -3385,6 +3405,7 @@ struct rtw89_vif { bool chanctx_assigned; /* only valid when running with chanctx_ops */ enum rtw89_sub_entity_idx sub_entity_idx; enum rtw89_reg_6ghz_power reg_6ghz_power; + struct rtw89_reg_6ghz_tpe reg_6ghz_tpe; u8 mac_id; u8 port; @@ -4144,6 +4165,7 @@ struct rtw89_chip_info { u8 wde_qempty_acq_grpnum; u8 wde_qempty_mgq_grpsel; u32 rf_base_addr[2]; + u8 support_macid_num; u8 support_chanctx_num; u8 support_bands; u16 support_bandwidths; @@ -4224,7 +4246,7 @@ struct rtw89_chip_info { const u32 *c2h_regs; struct rtw89_reg_def c2h_counter_reg; const struct rtw89_page_regs *page_regs; - u32 wow_reason_reg; + const u32 *wow_reason_reg; bool cfo_src_fd; bool cfo_hw_comp; const struct rtw89_reg_def *dcfo_comp; @@ -4334,6 +4356,7 @@ enum rtw89_fw_feature { RTW89_FW_FEATURE_NO_LPS_PG, RTW89_FW_FEATURE_BEACON_FILTER, RTW89_FW_FEATURE_MACID_PAUSE_SLEEP, + RTW89_FW_FEATURE_WOW_REASON_V1, }; struct rtw89_fw_suit { @@ -4436,6 +4459,7 @@ struct rtw89_cam_info { DECLARE_BITMAP(sec_cam_map, RTW89_MAX_SEC_CAM_NUM); DECLARE_BITMAP(ba_cam_map, RTW89_MAX_BA_CAM_NUM); struct rtw89_ba_cam_entry ba_cam_entry[RTW89_MAX_BA_CAM_NUM]; + const struct rtw89_sec_cam_entry *sec_entries[RTW89_MAX_SEC_CAM_NUM]; }; enum rtw89_sar_sources { @@ -4671,7 +4695,12 @@ struct rtw89_dack_info { bool msbk_timeout[RTW89_DACK_PATH_NR]; }; -#define RTW89_RFK_CHS_NR 3 +enum rtw89_rfk_chs_nrs { + __RTW89_RFK_CHS_NR_V0 = 2, + __RTW89_RFK_CHS_NR_V1 = 3, + + RTW89_RFK_CHS_NR = __RTW89_RFK_CHS_NR_V1, +}; struct rtw89_rfk_mcc_info { u8 ch[RTW89_RFK_CHS_NR]; @@ -4750,6 +4779,8 @@ struct rtw89_dpk_info { u8 cur_idx[RTW89_DPK_RF_PATH]; u8 cur_k_set; struct rtw89_dpk_bkup_para bp[RTW89_DPK_RF_PATH][RTW89_DPK_BKUP_NUM]; + u8 max_dpk_txagc[RTW89_DPK_RF_PATH]; + u32 dpk_order[RTW89_DPK_RF_PATH]; }; struct rtw89_fem_info { @@ -4924,6 +4955,7 @@ struct rtw89_regd { struct rtw89_regulatory_info { const struct rtw89_regd *regd; enum rtw89_reg_6ghz_power reg_6ghz_power; + struct rtw89_reg_6ghz_tpe reg_6ghz_tpe; DECLARE_BITMAP(block_unii4, RTW89_REGD_MAX_COUNTRY_NUM); DECLARE_BITMAP(block_6ghz, RTW89_REGD_MAX_COUNTRY_NUM); DECLARE_BITMAP(block_6ghz_sp, RTW89_REGD_MAX_COUNTRY_NUM); @@ -5469,7 +5501,7 @@ struct rtw89_dev { struct rtw89_wow_param wow; /* napi structure */ - struct net_device netdev; + struct net_device *netdev; struct napi_struct napi; int napi_budget_countdown; @@ -6408,6 +6440,16 @@ static inline bool rtw89_is_mlo_1_1(struct rtw89_dev *rtwdev) } } +static inline bool rtw89_is_rtl885xb(struct rtw89_dev *rtwdev) +{ + enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; + + if (chip_id == RTL8852B || chip_id == RTL8851B || chip_id == RTL8852BT) + return true; + + return false; +} + int rtw89_core_tx_write(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct sk_buff *skb, int *qsel); int rtw89_h2c_tx(struct rtw89_dev *rtwdev, @@ -6441,7 +6483,7 @@ void rtw89_core_query_rxdesc_v2(struct rtw89_dev *rtwdev, u8 *data, u32 data_offset); void rtw89_core_napi_start(struct rtw89_dev *rtwdev); void rtw89_core_napi_stop(struct rtw89_dev *rtwdev); -void rtw89_core_napi_init(struct rtw89_dev *rtwdev); +int rtw89_core_napi_init(struct rtw89_dev *rtwdev); void rtw89_core_napi_deinit(struct rtw89_dev *rtwdev); int rtw89_core_sta_add(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, @@ -6470,6 +6512,8 @@ struct rtw89_dev *rtw89_alloc_ieee80211_hw(struct device *device, u32 bus_data_size, const struct rtw89_chip_info *chip); void rtw89_free_ieee80211_hw(struct rtw89_dev *rtwdev); +u8 rtw89_acquire_mac_id(struct rtw89_dev *rtwdev); +void rtw89_release_mac_id(struct rtw89_dev *rtwdev, u8 mac_id); void rtw89_core_set_chip_txpwr(struct rtw89_dev *rtwdev); void rtw89_get_default_chandef(struct cfg80211_chan_def *chandef); void rtw89_get_channel_params(const struct cfg80211_chan_def *chandef, @@ -6506,8 +6550,8 @@ void rtw89_core_scan_start(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, const u8 *mac_addr, bool hw_scan); void rtw89_core_scan_complete(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif, bool hw_scan); -void rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev, - struct rtw89_vif *rtwvif, bool active); +int rtw89_reg_6ghz_recalc(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, + bool active); void rtw89_core_update_p2p_ps(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif); void rtw89_core_ntfy_btc_event(struct rtw89_dev *rtwdev, enum rtw89_btc_hmsg event); diff --git a/drivers/net/wireless/realtek/rtw89/debug.c b/drivers/net/wireless/realtek/rtw89/debug.c index affffc4092ba..9e1353cce9cc 100644 --- a/drivers/net/wireless/realtek/rtw89/debug.c +++ b/drivers/net/wireless/realtek/rtw89/debug.c @@ -818,6 +818,28 @@ static const struct dbgfs_txpwr_table *dbgfs_txpwr_tables[RTW89_CHIP_GEN_NUM] = [RTW89_CHIP_BE] = &dbgfs_txpwr_table_be, }; +static +void rtw89_debug_priv_txpwr_table_get_regd(struct seq_file *m, + struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan) +{ + const struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; + const struct rtw89_reg_6ghz_tpe *tpe6 = ®ulatory->reg_6ghz_tpe; + + seq_printf(m, "[Chanctx] band %u, ch %u, bw %u\n", + chan->band_type, chan->channel, chan->band_width); + + seq_puts(m, "[Regulatory] "); + __print_regd(m, rtwdev, chan); + + if (chan->band_type == RTW89_BAND_6G) { + seq_printf(m, "[reg6_pwr_type] %u\n", regulatory->reg_6ghz_power); + + if (tpe6->valid) + seq_printf(m, "[TPE] %d dBm\n", tpe6->constraint); + } +} + static int rtw89_debug_priv_txpwr_table_get(struct seq_file *m, void *v) { struct rtw89_debugfs_priv *debugfs_priv = m->private; @@ -831,8 +853,7 @@ static int rtw89_debug_priv_txpwr_table_get(struct seq_file *m, void *v) rtw89_leave_ps_mode(rtwdev); chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); - seq_puts(m, "[Regulatory] "); - __print_regd(m, rtwdev, chan); + rtw89_debug_priv_txpwr_table_get_regd(m, rtwdev, chan); seq_puts(m, "[SAR]\n"); rtw89_print_sar(m, rtwdev, chan->freq); @@ -2996,7 +3017,7 @@ static bool is_dbg_port_valid(struct rtw89_dev *rtwdev, u32 sel) sel >= RTW89_DBG_PORT_SEL_PCIE_TXDMA && sel <= RTW89_DBG_PORT_SEL_PCIE_MISC2) return false; - if (rtwdev->chip->chip_id == RTL8852B && + if (rtw89_is_rtl885xb(rtwdev) && sel >= RTW89_DBG_PORT_SEL_PTCL_C1 && sel <= RTW89_DBG_PORT_SEL_TXTF_INFOH_C1) return false; @@ -3531,7 +3552,7 @@ static void rtw89_sta_info_get_iter(void *data, struct ieee80211_sta *sta) case RX_ENC_HE: seq_printf(m, "HE %dSS MCS-%d GI:%s", status->nss, status->rate_idx, status->he_gi <= NL80211_RATE_INFO_HE_GI_3_2 ? - he_gi_str[rate->he_gi] : "N/A"); + he_gi_str[status->he_gi] : "N/A"); break; case RX_ENC_EHT: seq_printf(m, "EHT %dSS MCS-%d GI:%s", status->nss, status->rate_idx, @@ -3645,17 +3666,21 @@ static int rtw89_debug_priv_phy_info_get(struct seq_file *m, void *v) } static void rtw89_dump_addr_cam(struct seq_file *m, + struct rtw89_dev *rtwdev, struct rtw89_addr_cam_entry *addr_cam) { - struct rtw89_sec_cam_entry *sec_entry; + struct rtw89_cam_info *cam_info = &rtwdev->cam_info; + const struct rtw89_sec_cam_entry *sec_entry; + u8 sec_cam_idx; int i; seq_printf(m, "\taddr_cam_idx=%u\n", addr_cam->addr_cam_idx); seq_printf(m, "\t-> bssid_cam_idx=%u\n", addr_cam->bssid_cam_idx); seq_printf(m, "\tsec_cam_bitmap=%*ph\n", (int)sizeof(addr_cam->sec_cam_map), addr_cam->sec_cam_map); - for (i = 0; i < RTW89_SEC_CAM_IN_ADDR_CAM; i++) { - sec_entry = addr_cam->sec_entries[i]; + for_each_set_bit(i, addr_cam->sec_cam_map, RTW89_SEC_CAM_IN_ADDR_CAM) { + sec_cam_idx = addr_cam->sec_ent[i]; + sec_entry = cam_info->sec_entries[sec_cam_idx]; if (!sec_entry) continue; seq_printf(m, "\tsec[%d]: sec_cam_idx %u", i, sec_entry->sec_cam_idx); @@ -3694,12 +3719,13 @@ static void rtw89_vif_ids_get_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; + struct rtw89_dev *rtwdev = rtwvif->rtwdev; struct seq_file *m = (struct seq_file *)data; struct rtw89_bssid_cam_entry *bssid_cam = &rtwvif->bssid_cam; seq_printf(m, "VIF [%d] %pM\n", rtwvif->mac_id, rtwvif->mac_addr); seq_printf(m, "\tbssid_cam_idx=%u\n", bssid_cam->bssid_cam_idx); - rtw89_dump_addr_cam(m, &rtwvif->addr_cam); + rtw89_dump_addr_cam(m, rtwdev, &rtwvif->addr_cam); rtw89_dump_pkt_offload(m, &rtwvif->general_pkt_list, "\tpkt_ofld[GENERAL]: "); } @@ -3726,11 +3752,12 @@ static void rtw89_dump_ba_cam(struct seq_file *m, struct rtw89_sta *rtwsta) static void rtw89_sta_ids_get_iter(void *data, struct ieee80211_sta *sta) { struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; + struct rtw89_dev *rtwdev = rtwsta->rtwdev; struct seq_file *m = (struct seq_file *)data; seq_printf(m, "STA [%d] %pM %s\n", rtwsta->mac_id, sta->addr, sta->tdls ? "(TDLS)" : ""); - rtw89_dump_addr_cam(m, &rtwsta->addr_cam); + rtw89_dump_addr_cam(m, rtwdev, &rtwsta->addr_cam); rtw89_dump_ba_cam(m, rtwsta); } diff --git a/drivers/net/wireless/realtek/rtw89/fw.c b/drivers/net/wireless/realtek/rtw89/fw.c index 044a5b90c7f4..fbe08c162b93 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.c +++ b/drivers/net/wireless/realtek/rtw89/fw.c @@ -13,22 +13,20 @@ #include "ps.h" #include "reg.h" #include "util.h" +#include "wow.h" struct rtw89_eapol_2_of_2 { - struct ieee80211_hdr_3addr hdr; u8 gtkbody[14]; u8 key_des_ver; u8 rsvd[92]; -} __packed __aligned(2); +} __packed; struct rtw89_sa_query { - struct ieee80211_hdr_3addr hdr; u8 category; u8 action; -} __packed __aligned(2); +} __packed; struct rtw89_arp_rsp { - struct ieee80211_hdr_3addr addr; u8 llc_hdr[sizeof(rfc1042_header)]; __be16 llc_type; struct arphdr arp_hdr; @@ -36,7 +34,7 @@ struct rtw89_arp_rsp { __be32 sender_ip; u8 target_hw[ETH_ALEN]; __be32 target_ip; -} __packed __aligned(2); +} __packed; static const u8 mss_signature[] = {0x4D, 0x53, 0x53, 0x4B, 0x50, 0x4F, 0x4F, 0x4C}; @@ -462,7 +460,7 @@ int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, const u8 *mfw = firmware->data; u32 mfw_len = firmware->size; const struct rtw89_mfw_hdr *mfw_hdr = (const struct rtw89_mfw_hdr *)mfw; - const struct rtw89_mfw_info *mfw_info; + const struct rtw89_mfw_info *mfw_info = NULL, *tmp; int i; if (mfw_hdr->sig != RTW89_MFW_SIG) { @@ -476,15 +474,27 @@ int rtw89_mfw_recognize(struct rtw89_dev *rtwdev, enum rtw89_fw_type type, } for (i = 0; i < mfw_hdr->fw_nr; i++) { - mfw_info = &mfw_hdr->info[i]; - if (mfw_info->type == type) { - if (mfw_info->cv == rtwdev->hal.cv && !mfw_info->mp) - goto found; - if (type == RTW89_FW_LOGFMT) - goto found; + tmp = &mfw_hdr->info[i]; + if (tmp->type != type) + continue; + + if (type == RTW89_FW_LOGFMT) { + mfw_info = tmp; + goto found; + } + + /* Version order of WiFi firmware in firmware file are not in order, + * pass all firmware to find the equal or less but closest version. + */ + if (tmp->cv <= rtwdev->hal.cv && !tmp->mp) { + if (!mfw_info || mfw_info->cv < tmp->cv) + mfw_info = tmp; } } + if (mfw_info) + goto found; + if (!nowarn) rtw89_err(rtwdev, "no suitable firmware found\n"); return -ENOENT; @@ -606,10 +616,16 @@ int __rtw89_fw_recognize_from_elm(struct rtw89_dev *rtwdev, struct rtw89_hal *hal = &rtwdev->hal; struct rtw89_fw_suit *fw_suit; - if (hal->cv != elm->u.bbmcu.cv) + /* Version of BB MCU is in decreasing order in firmware file, so take + * first equal or less version, which is equal or less but closest version. + */ + if (hal->cv < elm->u.bbmcu.cv) return 1; /* ignore this element */ fw_suit = rtw89_fw_suit_get(rtwdev, type); + if (fw_suit->data) + return 1; /* ignore this element (a firmware is taken already) */ + fw_suit->data = elm->u.bbmcu.contents; fw_suit->size = le32_to_cpu(elm->size); @@ -659,10 +675,12 @@ static const struct __fw_feat_cfg fw_feat_tbl[] = { __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 36, 0, SCAN_OFFLOAD), __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 40, 0, CRASH_TRIGGER), __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 56, 10, BEACON_FILTER), + __CFG_FW_FEAT(RTL8852C, ge, 0, 27, 80, 0, WOW_REASON_V1), __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 30, 0, CRASH_TRIGGER), __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 11, 0, MACID_PAUSE_SLEEP), __CFG_FW_FEAT(RTL8922A, ge, 0, 34, 35, 0, SCAN_OFFLOAD), __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 12, 0, BEACON_FILTER), + __CFG_FW_FEAT(RTL8922A, ge, 0, 35, 22, 0, WOW_REASON_V1), }; static void rtw89_fw_iterate_feature_cfg(struct rtw89_fw_info *fw, @@ -2179,8 +2197,10 @@ static struct sk_buff *rtw89_eapol_get(struct rtw89_dev *rtwdev, 0x8E, 0x01, 0x03, 0x00, 0x5F, 0x02, 0x03}; struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; + u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); struct rtw89_wow_param *rtw_wow = &rtwdev->wow; struct rtw89_eapol_2_of_2 *eapol_pkt; + struct ieee80211_hdr_3addr *hdr; struct sk_buff *skb; u8 key_des_ver; @@ -2193,17 +2213,21 @@ static struct sk_buff *rtw89_eapol_get(struct rtw89_dev *rtwdev, else key_des_ver = 0; - skb = dev_alloc_skb(sizeof(*eapol_pkt)); + skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*eapol_pkt)); if (!skb) return NULL; + hdr = skb_put_zero(skb, sizeof(*hdr)); + hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | + IEEE80211_FCTL_TODS | + IEEE80211_FCTL_PROTECTED); + ether_addr_copy(hdr->addr1, bss_conf->bssid); + ether_addr_copy(hdr->addr2, vif->addr); + ether_addr_copy(hdr->addr3, bss_conf->bssid); + + skb_put_zero(skb, sec_hdr_len); + eapol_pkt = skb_put_zero(skb, sizeof(*eapol_pkt)); - eapol_pkt->hdr.frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | - IEEE80211_FCTL_TODS | - IEEE80211_FCTL_PROTECTED); - ether_addr_copy(eapol_pkt->hdr.addr1, bss_conf->bssid); - ether_addr_copy(eapol_pkt->hdr.addr2, vif->addr); - ether_addr_copy(eapol_pkt->hdr.addr3, bss_conf->bssid); memcpy(eapol_pkt->gtkbody, gtkbody, sizeof(gtkbody)); eapol_pkt->key_des_ver = key_des_ver; @@ -2215,20 +2239,26 @@ static struct sk_buff *rtw89_sa_query_get(struct rtw89_dev *rtwdev, { struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; + u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); + struct ieee80211_hdr_3addr *hdr; struct rtw89_sa_query *sa_query; struct sk_buff *skb; - skb = dev_alloc_skb(sizeof(*sa_query)); + skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*sa_query)); if (!skb) return NULL; + hdr = skb_put_zero(skb, sizeof(*hdr)); + hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | + IEEE80211_STYPE_ACTION | + IEEE80211_FCTL_PROTECTED); + ether_addr_copy(hdr->addr1, bss_conf->bssid); + ether_addr_copy(hdr->addr2, vif->addr); + ether_addr_copy(hdr->addr3, bss_conf->bssid); + + skb_put_zero(skb, sec_hdr_len); + sa_query = skb_put_zero(skb, sizeof(*sa_query)); - sa_query->hdr.frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | - IEEE80211_STYPE_ACTION | - IEEE80211_FCTL_PROTECTED); - ether_addr_copy(sa_query->hdr.addr1, bss_conf->bssid); - ether_addr_copy(sa_query->hdr.addr2, vif->addr); - ether_addr_copy(sa_query->hdr.addr3, bss_conf->bssid); sa_query->category = WLAN_CATEGORY_SA_QUERY; sa_query->action = WLAN_ACTION_SA_QUERY_RESPONSE; @@ -2238,17 +2268,19 @@ static struct sk_buff *rtw89_sa_query_get(struct rtw89_dev *rtwdev, static struct sk_buff *rtw89_arp_response_get(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { + u8 sec_hdr_len = rtw89_wow_get_sec_hdr_len(rtwdev); struct rtw89_wow_param *rtw_wow = &rtwdev->wow; + struct ieee80211_hdr_3addr *hdr; struct rtw89_arp_rsp *arp_skb; struct arphdr *arp_hdr; struct sk_buff *skb; __le16 fc; - skb = dev_alloc_skb(sizeof(struct rtw89_arp_rsp)); + skb = dev_alloc_skb(sizeof(*hdr) + sec_hdr_len + sizeof(*arp_skb)); if (!skb) return NULL; - arp_skb = skb_put_zero(skb, sizeof(*arp_skb)); + hdr = skb_put_zero(skb, sizeof(*hdr)); if (rtw_wow->ptk_alg) fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS | @@ -2256,11 +2288,14 @@ static struct sk_buff *rtw89_arp_response_get(struct rtw89_dev *rtwdev, else fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS); - arp_skb->addr.frame_control = fc; - ether_addr_copy(arp_skb->addr.addr1, rtwvif->bssid); - ether_addr_copy(arp_skb->addr.addr2, rtwvif->mac_addr); - ether_addr_copy(arp_skb->addr.addr3, rtwvif->bssid); + hdr->frame_control = fc; + ether_addr_copy(hdr->addr1, rtwvif->bssid); + ether_addr_copy(hdr->addr2, rtwvif->mac_addr); + ether_addr_copy(hdr->addr3, rtwvif->bssid); + skb_put_zero(skb, sec_hdr_len); + + arp_skb = skb_put_zero(skb, sizeof(*arp_skb)); memcpy(arp_skb->llc_hdr, rfc1042_header, sizeof(rfc1042_header)); arp_skb->llc_type = htons(ETH_P_ARP); @@ -2461,6 +2496,7 @@ int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) struct rtw89_h2c_lps_ch_info *h2c; u32 len = sizeof(*h2c); struct sk_buff *skb; + u32 done; int ret; if (chip->chip_gen != RTW89_CHIP_BE) @@ -2484,12 +2520,18 @@ int rtw89_fw_h2c_lps_ch_info(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) H2C_CAT_OUTSRC, H2C_CL_OUTSRC_DM, H2C_FUNC_FW_LPS_CH_INFO, 0, 0, len); + rtw89_phy_write32_mask(rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT, 0); ret = rtw89_h2c_tx(rtwdev, skb, false); if (ret) { rtw89_err(rtwdev, "failed to send h2c\n"); goto fail; } + ret = read_poll_timeout(rtw89_phy_read32_mask, done, done, 50, 5000, + true, rtwdev, R_CHK_LPS_STAT, B_CHK_LPS_STAT); + if (ret) + rtw89_warn(rtwdev, "h2c_lps_ch_info done polling timeout\n"); + return 0; fail: dev_kfree_skb_any(skb); @@ -2752,11 +2794,11 @@ static void __get_sta_he_pkt_padding(struct rtw89_dev *rtwdev, ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; if (ppe16 != 7 && ppe8 == 7) - pads[i] = 2; + pads[i] = RTW89_PE_DURATION_16; else if (ppe8 != 7) - pads[i] = 1; + pads[i] = RTW89_PE_DURATION_8; else - pads[i] = 0; + pads[i] = RTW89_PE_DURATION_0; } } @@ -2889,11 +2931,11 @@ static void __get_sta_eht_pkt_padding(struct rtw89_dev *rtwdev, ppe8 = (ppe >> sh) & IEEE80211_PPE_THRES_NSS_MASK; if (ppe16 != 7 && ppe8 == 7) - pads[i] = 2; + pads[i] = RTW89_PE_DURATION_16_20; else if (ppe8 != 7) - pads[i] = 1; + pads[i] = RTW89_PE_DURATION_8; else - pads[i] = 0; + pads[i] = RTW89_PE_DURATION_0; } } @@ -4850,6 +4892,7 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev, { struct rtw89_hw_scan_info *scan_info = &rtwdev->scan_info; struct rtw89_wait_info *wait = &rtwdev->mac.fw_ofld_wait; + struct cfg80211_scan_request *req = rtwvif->scan_req; struct rtw89_h2c_scanofld_be_macc_role *macc_role; struct rtw89_chan *op = &scan_info->op_chan; struct rtw89_h2c_scanofld_be_opch *opch; @@ -4923,6 +4966,15 @@ int rtw89_fw_h2c_scan_offload_be(struct rtw89_dev *rtwdev, RTW89_H2C_SCANOFLD_BE_W6_CHAN_PROHIB_LOW); h2c->w7 = le32_encode_bits(option->prohib_chan >> 32, RTW89_H2C_SCANOFLD_BE_W7_CHAN_PROHIB_HIGH); + if (req->no_cck) { + h2c->w0 |= le32_encode_bits(true, RTW89_H2C_SCANOFLD_BE_W0_PROBE_WITH_RATE); + h2c->w8 = le32_encode_bits(RTW89_HW_RATE_OFDM6, + RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_2GHZ) | + le32_encode_bits(RTW89_HW_RATE_OFDM6, + RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_5GHZ) | + le32_encode_bits(RTW89_HW_RATE_OFDM6, + RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_6GHZ); + } ptr += sizeof(*h2c); for (i = 0; i < option->num_macc_role; i++) { @@ -6245,7 +6297,14 @@ void rtw89_hw_scan_abort(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif) ret = rtw89_hw_scan_offload(rtwdev, vif, false); if (ret) - rtw89_hw_scan_complete(rtwdev, vif, true); + rtw89_warn(rtwdev, "rtw89_hw_scan_offload failed ret %d\n", ret); + + /* Indicate ieee80211_scan_completed() before returning, which is safe + * because scan abort command always waits for completion of + * RTW89_SCAN_END_SCAN_NOTIFY, so that ieee80211_stop() can flush scan + * work properly. + */ + rtw89_hw_scan_complete(rtwdev, vif, true); } static bool rtw89_is_any_vif_connected_or_connecting(struct rtw89_dev *rtwdev) @@ -6715,10 +6774,8 @@ int rtw89_fw_h2c_wow_gtk_ofld(struct rtw89_dev *rtwdev, skb_put(skb, len); h2c = (struct rtw89_h2c_wow_gtk_ofld *)skb->data; - if (!enable) { - skb_put_zero(skb, sizeof(*gtk_info)); + if (!enable) goto hdr; - } ret = rtw89_fw_h2c_add_general_pkt(rtwdev, rtwvif, RTW89_PKT_OFLD_TYPE_EAPOL_KEY, diff --git a/drivers/net/wireless/realtek/rtw89/fw.h b/drivers/net/wireless/realtek/rtw89/fw.h index 4151c9d566bd..c3b4324c621c 100644 --- a/drivers/net/wireless/realtek/rtw89/fw.h +++ b/drivers/net/wireless/realtek/rtw89/fw.h @@ -2722,6 +2722,7 @@ struct rtw89_h2c_scanofld_be { #define RTW89_H2C_SCANOFLD_BE_W0_MACID GENMASK(23, 8) #define RTW89_H2C_SCANOFLD_BE_W0_PORT GENMASK(26, 24) #define RTW89_H2C_SCANOFLD_BE_W0_BAND GENMASK(28, 27) +#define RTW89_H2C_SCANOFLD_BE_W0_PROBE_WITH_RATE BIT(29) #define RTW89_H2C_SCANOFLD_BE_W1_NUM_MACC_ROLE GENMASK(7, 0) #define RTW89_H2C_SCANOFLD_BE_W1_NUM_OP GENMASK(15, 8) #define RTW89_H2C_SCANOFLD_BE_W1_NORM_PD GENMASK(31, 16) @@ -2738,6 +2739,9 @@ struct rtw89_h2c_scanofld_be { #define RTW89_H2C_SCANOFLD_BE_W5_MLO_MODE GENMASK(31, 0) #define RTW89_H2C_SCANOFLD_BE_W6_CHAN_PROHIB_LOW GENMASK(31, 0) #define RTW89_H2C_SCANOFLD_BE_W7_CHAN_PROHIB_HIGH GENMASK(31, 0) +#define RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_2GHZ GENMASK(7, 0) +#define RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_5GHZ GENMASK(15, 8) +#define RTW89_H2C_SCANOFLD_BE_W8_PROBE_RATE_6GHZ GENMASK(23, 16) static inline void RTW89_SET_FWCMD_P2P_MACID(void *cmd, u32 val) { @@ -4655,4 +4659,10 @@ const struct rtw89_rfe_parms * rtw89_load_rfe_data_from_fw(struct rtw89_dev *rtwdev, const struct rtw89_rfe_parms *init); +enum rtw89_wow_wakeup_ver { + RTW89_WOW_REASON_V0, + RTW89_WOW_REASON_V1, + RTW89_WOW_REASON_NUM, +}; + #endif diff --git a/drivers/net/wireless/realtek/rtw89/mac.c b/drivers/net/wireless/realtek/rtw89/mac.c index 3fe0046f6eaa..e2399796aeb1 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.c +++ b/drivers/net/wireless/realtek/rtw89/mac.c @@ -1568,6 +1568,8 @@ static int dmac_func_en_ax(struct rtw89_dev *rtwdev) B_AX_DLE_CPUIO_CLK_EN | B_AX_PKT_IN_CLK_EN | B_AX_STA_SCH_CLK_EN | B_AX_TXPKT_CTRL_CLK_EN | B_AX_WD_RLS_CLK_EN | B_AX_BBRPT_CLK_EN); + if (chip_id == RTL8852BT) + val32 |= B_AX_AXIDMA_CLK_EN; rtw89_write32(rtwdev, R_AX_DMAC_CLK_EN, val32); return 0; @@ -1577,7 +1579,7 @@ static int chip_func_en_ax(struct rtw89_dev *rtwdev) { enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; - if (chip_id == RTL8852A || chip_id == RTL8852B) + if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) rtw89_write32_set(rtwdev, R_AX_SPS_DIG_ON_CTRL0, B_AX_OCP_L1_MASK); @@ -2146,8 +2148,8 @@ int rtw89_mac_preload_init(struct rtw89_dev *rtwdev, enum rtw89_mac_idx mac_idx, { const struct rtw89_chip_info *chip = rtwdev->chip; - if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B || - chip->chip_id == RTL8851B || !is_qta_poh(rtwdev)) + if (chip->chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev) || + !is_qta_poh(rtwdev)) return 0; return preload_init_set(rtwdev, mac_idx, mode); @@ -2183,8 +2185,7 @@ static void _patch_ss2f_path(struct rtw89_dev *rtwdev) { const struct rtw89_chip_info *chip = rtwdev->chip; - if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B || - chip->chip_id == RTL8851B) + if (chip->chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) return; rtw89_write32_mask(rtwdev, R_AX_SS2FINFO_PATH, B_AX_SS_DEST_QUEUE_MASK, @@ -2360,7 +2361,7 @@ static int scheduler_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx) rtw89_write32_mask(rtwdev, reg, B_AX_SIFS_MACTXEN_T1_MASK, SIFS_MACTXEN_T1); - if (rtwdev->chip->chip_id == RTL8852B || rtwdev->chip->chip_id == RTL8851B) { + if (rtw89_is_rtl885xb(rtwdev)) { reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_SCH_EXT_CTRL, mac_idx); rtw89_write32_set(rtwdev, reg, B_AX_PORT_RST_TSF_ADV); } @@ -2588,7 +2589,9 @@ static int trxptcl_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx) case RTL8852A: sifs = WMAC_SPEC_SIFS_OFDM_52A; break; + case RTL8851B: case RTL8852B: + case RTL8852BT: sifs = WMAC_SPEC_SIFS_OFDM_52B; break; default: @@ -2632,6 +2635,7 @@ static int rmac_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx) #define RX_MAX_LEN_UNIT 512 #define PLD_RLS_MAX_PG 127 #define RX_SPEC_MAX_LEN (11454 + RX_MAX_LEN_UNIT) + enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; int ret; u32 reg, rx_max_len, rx_qta; u16 val; @@ -2652,6 +2656,8 @@ static int rmac_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx) B_AX_RX_DLK_DATA_TIME_MASK); val = u16_replace_bits(val, TRXCFG_RMAC_CCA_TO, B_AX_RX_DLK_CCA_TIME_MASK); + if (chip_id == RTL8852BT) + val |= B_AX_RX_DLK_RST_EN; rtw89_write16(rtwdev, reg, val); reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_RCR, mac_idx); @@ -2668,8 +2674,7 @@ static int rmac_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx) rx_max_len /= RX_MAX_LEN_UNIT; rtw89_write32_mask(rtwdev, reg, B_AX_RX_MPDU_MAX_LEN_MASK, rx_max_len); - if (rtwdev->chip->chip_id == RTL8852A && - rtwdev->hal.cv == CHIP_CBV) { + if (chip_id == RTL8852A && rtwdev->hal.cv == CHIP_CBV) { rtw89_write16_mask(rtwdev, rtw89_mac_reg_by_idx(rtwdev, R_AX_DLK_PROTECT_CTL, mac_idx), B_AX_RX_DLK_CCA_TIME_MASK, 0); @@ -2700,7 +2705,7 @@ static int cmac_com_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx) val = u32_replace_bits(val, 0, B_AX_TXSC_80M_MASK); rtw89_write32(rtwdev, reg, val); - if (chip_id == RTL8852A || chip_id == RTL8852B) { + if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PTCL_RRSR1, mac_idx); rtw89_write32_mask(rtwdev, reg, B_AX_RRSR_RATE_EN_MASK, RRSR_OFDM_CCK_EN); } @@ -2766,11 +2771,10 @@ static int ptcl_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx) static int cmac_dma_init_ax(struct rtw89_dev *rtwdev, u8 mac_idx) { - enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; u32 reg; int ret; - if (chip_id != RTL8852B) + if (!rtw89_is_rtl885xb(rtwdev)) return 0; ret = rtw89_mac_check_mac_en(rtwdev, mac_idx, RTW89_CMAC_SEL); @@ -3587,13 +3591,11 @@ static int enable_imr_ax(struct rtw89_dev *rtwdev, u8 mac_idx, static void err_imr_ctrl_ax(struct rtw89_dev *rtwdev, bool en) { - enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; - rtw89_write32(rtwdev, R_AX_DMAC_ERR_IMR, en ? DMAC_ERR_IMR_EN : DMAC_ERR_IMR_DIS); rtw89_write32(rtwdev, R_AX_CMAC_ERR_IMR, en ? CMAC0_ERR_IMR_EN : CMAC0_ERR_IMR_DIS); - if (chip_id != RTL8852B && rtwdev->mac.dle_info.c1_rx_qta) + if (!rtw89_is_rtl885xb(rtwdev) && rtwdev->mac.dle_info.c1_rx_qta) rtw89_write32(rtwdev, R_AX_CMAC_ERR_IMR_C1, en ? CMAC1_ERR_IMR_EN : CMAC1_ERR_IMR_DIS); } @@ -3719,10 +3721,9 @@ static int rtw89_mac_feat_init(struct rtw89_dev *rtwdev) static void rtw89_disable_fw_watchdog(struct rtw89_dev *rtwdev) { - enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; u32 val32; - if (chip_id == RTL8852B || chip_id == RTL8851B) { + if (rtw89_is_rtl885xb(rtwdev)) { rtw89_write32_clr(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_APB_WRAP_EN); rtw89_write32_set(rtwdev, R_AX_PLATFORM_ENABLE, B_AX_APB_WRAP_EN); return; @@ -3818,7 +3819,7 @@ static void rtw89_mac_dmac_func_pre_en_ax(struct rtw89_dev *rtwdev) enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; u32 val; - if (chip_id == RTL8851B) + if (chip_id == RTL8851B || chip_id == RTL8852BT) val = B_AX_DISPATCHER_CLK_EN | B_AX_AXIDMA_CLK_EN; else val = B_AX_DISPATCHER_CLK_EN; @@ -4664,8 +4665,7 @@ int rtw89_mac_add_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) { int ret; - rtwvif->mac_id = rtw89_core_acquire_bit_map(rtwdev->mac_id_map, - RTW89_MAX_MAC_ID_NUM); + rtwvif->mac_id = rtw89_acquire_mac_id(rtwdev); if (rtwvif->mac_id == RTW89_MAX_MAC_ID_NUM) return -ENOSPC; @@ -4676,7 +4676,7 @@ int rtw89_mac_add_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) return 0; release_mac_id: - rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwvif->mac_id); + rtw89_release_mac_id(rtwdev, rtwvif->mac_id); return ret; } @@ -4686,7 +4686,7 @@ int rtw89_mac_remove_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) int ret; ret = rtw89_mac_vif_deinit(rtwdev, rtwvif); - rtw89_core_release_bit_map(rtwdev->mac_id_map, rtwvif->mac_id); + rtw89_release_mac_id(rtwdev, rtwvif->mac_id); return ret; } @@ -4757,6 +4757,9 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb, } return; case RTW89_SCAN_END_SCAN_NOTIFY: + if (rtwdev->scan_info.abort) + return; + if (rtwvif && rtwvif->scan_req && last_chan < rtwvif->scan_req->n_channels) { ret = rtw89_hw_scan_offload(rtwdev, vif, true); @@ -4765,7 +4768,7 @@ rtw89_mac_c2h_scanofld_rsp(struct rtw89_dev *rtwdev, struct sk_buff *skb, rtw89_warn(rtwdev, "HW scan failed: %d\n", ret); } } else { - rtw89_hw_scan_complete(rtwdev, vif, rtwdev->scan_info.abort); + rtw89_hw_scan_complete(rtwdev, vif, false); } break; case RTW89_SCAN_ENTER_OP_NOTIFY: @@ -5199,6 +5202,46 @@ rtw89_mac_c2h_mrc_status_rpt(struct rtw89_dev *rtwdev, struct sk_buff *c2h, u32 case RTW89_MAC_MRC_DEL_SCH_OK: func = H2C_FUNC_DEL_MRC; break; + case RTW89_MAC_MRC_EMPTY_SCH_FAIL: + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MRC C2H STS RPT: empty sch fail\n"); + return; + case RTW89_MAC_MRC_ROLE_NOT_EXIST_FAIL: + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MRC C2H STS RPT: role not exist fail\n"); + return; + case RTW89_MAC_MRC_DATA_NOT_FOUND_FAIL: + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MRC C2H STS RPT: data not found fail\n"); + return; + case RTW89_MAC_MRC_GET_NEXT_SLOT_FAIL: + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MRC C2H STS RPT: get next slot fail\n"); + return; + case RTW89_MAC_MRC_ALT_ROLE_FAIL: + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MRC C2H STS RPT: alt role fail\n"); + return; + case RTW89_MAC_MRC_ADD_PSTIMER_FAIL: + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MRC C2H STS RPT: add ps timer fail\n"); + return; + case RTW89_MAC_MRC_MALLOC_FAIL: + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MRC C2H STS RPT: malloc fail\n"); + return; + case RTW89_MAC_MRC_SWITCH_CH_FAIL: + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MRC C2H STS RPT: switch ch fail\n"); + return; + case RTW89_MAC_MRC_TXNULL0_FAIL: + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MRC C2H STS RPT: tx null-0 fail\n"); + return; + case RTW89_MAC_MRC_PORT_FUNC_EN_FAIL: + rtw89_debug(rtwdev, RTW89_DBG_CHAN, + "MRC C2H STS RPT: port func en fail\n"); + return; default: rtw89_debug(rtwdev, RTW89_DBG_CHAN, "invalid MRC C2H STS RPT: status %d\n", status); @@ -5461,18 +5504,19 @@ void rtw89_mac_flush_txq(struct rtw89_dev *rtwdev, u32 queues, bool drop) int rtw89_mac_coex_init(struct rtw89_dev *rtwdev, const struct rtw89_mac_ax_coex *coex) { + enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; u8 val; u16 val16; u32 val32; int ret; rtw89_write8_set(rtwdev, R_AX_GPIO_MUXCFG, B_AX_ENBT); - if (rtwdev->chip->chip_id != RTL8851B) + if (chip_id != RTL8851B && chip_id != RTL8852BT) rtw89_write8_set(rtwdev, R_AX_BTC_FUNC_EN, B_AX_PTA_WL_TX_EN); rtw89_write8_set(rtwdev, R_AX_BT_COEX_CFG_2 + 1, B_AX_GNT_BT_POLARITY >> 8); rtw89_write8_set(rtwdev, R_AX_CSR_MODE, B_AX_STATIS_BT_EN | B_AX_WL_ACT_MSK); rtw89_write8_set(rtwdev, R_AX_CSR_MODE + 2, B_AX_BT_CNT_RST >> 16); - if (rtwdev->chip->chip_id != RTL8851B) + if (chip_id != RTL8851B && chip_id != RTL8852BT) rtw89_write8_clr(rtwdev, R_AX_TRXPTCL_RESP_0 + 3, B_AX_RSP_CHK_BTCCA >> 24); val16 = rtw89_read16(rtwdev, R_AX_CCA_CFG_0); @@ -5755,8 +5799,7 @@ bool rtw89_mac_get_ctrl_path(struct rtw89_dev *rtwdev) if (chip->chip_id == RTL8852C || chip->chip_id == RTL8922A) return false; - else if (chip->chip_id == RTL8852A || chip->chip_id == RTL8852B || - chip->chip_id == RTL8851B) + else if (chip->chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) val = rtw89_read8_mask(rtwdev, R_AX_SYS_SDIO_CTRL + 3, B_AX_LTE_MUX_CTRL_PATH >> 24); @@ -6317,9 +6360,30 @@ int rtw89_mac_ptk_drop_by_band_and_wait(struct rtw89_dev *rtwdev, return ret; } +int rtw89_mac_cpu_io_rx(struct rtw89_dev *rtwdev, bool wow_enable) +{ + struct rtw89_mac_h2c_info h2c_info = {}; + struct rtw89_mac_c2h_info c2h_info = {}; + u32 ret; + + h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_WOW_CPUIO_RX_CTRL; + h2c_info.content_len = sizeof(h2c_info.u.hdr); + h2c_info.u.hdr.w0 = u32_encode_bits(wow_enable, RTW89_H2CREG_WOW_CPUIO_RX_CTRL_EN); + + ret = rtw89_fw_msg_reg(rtwdev, &h2c_info, &c2h_info); + if (ret) + return ret; + + if (c2h_info.id != RTW89_FWCMD_C2HREG_FUNC_WOW_CPUIO_RX_ACK) + ret = -EINVAL; + + return ret; +} + static int rtw89_wow_config_mac_ax(struct rtw89_dev *rtwdev, bool enable_wow) { const struct rtw89_mac_gen_def *mac = rtwdev->chip->mac_def; + const struct rtw89_chip_info *chip = rtwdev->chip; int ret; if (enable_wow) { @@ -6330,12 +6394,19 @@ static int rtw89_wow_config_mac_ax(struct rtw89_dev *rtwdev, bool enable_wow) } rtw89_write32_set(rtwdev, R_AX_RX_FUNCTION_STOP, B_AX_HDR_RX_STOP); + rtw89_mac_cpu_io_rx(rtwdev, enable_wow); rtw89_write32_clr(rtwdev, mac->rx_fltr, B_AX_SNIFFER_MODE); rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, false); rtw89_write32(rtwdev, R_AX_ACTION_FWD0, 0); rtw89_write32(rtwdev, R_AX_ACTION_FWD1, 0); rtw89_write32(rtwdev, R_AX_TF_FWD, 0); rtw89_write32(rtwdev, R_AX_HW_RPT_FWD, 0); + + if (chip->chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) + rtw89_write8(rtwdev, R_BE_DBG_WOW_READY, WOWLAN_NOT_READY); + else + rtw89_write32_set(rtwdev, R_AX_DBG_WOW, + B_AX_DBG_WOW_CPU_IO_RX_EN); } else { ret = rtw89_mac_resize_ple_rx_quota(rtwdev, false); if (ret) { @@ -6343,6 +6414,7 @@ static int rtw89_wow_config_mac_ax(struct rtw89_dev *rtwdev, bool enable_wow) return ret; } + rtw89_mac_cpu_io_rx(rtwdev, enable_wow); rtw89_write32_clr(rtwdev, R_AX_RX_FUNCTION_STOP, B_AX_HDR_RX_STOP); rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true); rtw89_write32(rtwdev, R_AX_ACTION_FWD0, TRXCFG_MPDU_PROC_ACT_FRWD); diff --git a/drivers/net/wireless/realtek/rtw89/mac.h b/drivers/net/wireless/realtek/rtw89/mac.h index a580cb719233..d5895516b3ed 100644 --- a/drivers/net/wireless/realtek/rtw89/mac.h +++ b/drivers/net/wireless/realtek/rtw89/mac.h @@ -466,6 +466,16 @@ enum rtw89_mac_mrc_status { RTW89_MAC_MRC_START_SCH_OK = 0, RTW89_MAC_MRC_STOP_SCH_OK = 1, RTW89_MAC_MRC_DEL_SCH_OK = 2, + RTW89_MAC_MRC_EMPTY_SCH_FAIL = 16, + RTW89_MAC_MRC_ROLE_NOT_EXIST_FAIL = 17, + RTW89_MAC_MRC_DATA_NOT_FOUND_FAIL = 18, + RTW89_MAC_MRC_GET_NEXT_SLOT_FAIL = 19, + RTW89_MAC_MRC_ALT_ROLE_FAIL = 20, + RTW89_MAC_MRC_ADD_PSTIMER_FAIL = 21, + RTW89_MAC_MRC_MALLOC_FAIL = 22, + RTW89_MAC_MRC_SWITCH_CH_FAIL = 23, + RTW89_MAC_MRC_TXNULL0_FAIL = 24, + RTW89_MAC_MRC_PORT_FUNC_EN_FAIL = 25, }; struct rtw89_mac_ax_coex { @@ -1446,5 +1456,6 @@ int rtw89_mac_dle_quota_change(struct rtw89_dev *rtwdev, enum rtw89_qta_mode mod int rtw89_mac_get_dle_rsvd_qt_cfg(struct rtw89_dev *rtwdev, enum rtw89_mac_dle_rsvd_qt_type type, struct rtw89_mac_dle_rsvd_qt_cfg *cfg); +int rtw89_mac_cpu_io_rx(struct rtw89_dev *rtwdev, bool wow_enable); #endif diff --git a/drivers/net/wireless/realtek/rtw89/mac80211.c b/drivers/net/wireless/realtek/rtw89/mac80211.c index 1ec97250e88e..1508693032cb 100644 --- a/drivers/net/wireless/realtek/rtw89/mac80211.c +++ b/drivers/net/wireless/realtek/rtw89/mac80211.c @@ -66,7 +66,7 @@ static int rtw89_ops_start(struct ieee80211_hw *hw) return ret; } -static void rtw89_ops_stop(struct ieee80211_hw *hw) +static void rtw89_ops_stop(struct ieee80211_hw *hw, bool suspend) { struct rtw89_dev *rtwdev = hw->priv; @@ -397,15 +397,14 @@ static void rtw89_conf_tx(struct rtw89_dev *rtwdev, } static void rtw89_station_mode_sta_assoc(struct rtw89_dev *rtwdev, - struct ieee80211_vif *vif, - struct ieee80211_bss_conf *conf) + struct ieee80211_vif *vif) { struct ieee80211_sta *sta; if (vif->type != NL80211_IFTYPE_STATION) return; - sta = ieee80211_find_sta(vif, conf->bssid); + sta = ieee80211_find_sta(vif, vif->cfg.ap_addr); if (!sta) { rtw89_err(rtwdev, "can't find sta to set sta_assoc state\n"); return; @@ -416,10 +415,8 @@ static void rtw89_station_mode_sta_assoc(struct rtw89_dev *rtwdev, rtw89_core_sta_assoc(rtwdev, vif, sta); } -static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_bss_conf *conf, - u64 changed) +static void rtw89_ops_vif_cfg_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, u64 changed) { struct rtw89_dev *rtwdev = hw->priv; struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; @@ -429,7 +426,7 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_ASSOC) { if (vif->cfg.assoc) { - rtw89_station_mode_sta_assoc(rtwdev, vif, conf); + rtw89_station_mode_sta_assoc(rtwdev, vif); rtw89_phy_set_bss_color(rtwdev, vif); rtw89_chip_cfg_txpwr_ul_tb_offset(rtwdev, vif); rtw89_mac_port_update(rtwdev, rtwvif); @@ -445,6 +442,26 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw, } } + if (changed & BSS_CHANGED_PS) + rtw89_recalc_lps(rtwdev); + + if (changed & BSS_CHANGED_ARP_FILTER) + rtwvif->ip_addr = vif->cfg.arp_addr_list[0]; + + mutex_unlock(&rtwdev->mutex); +} + +static void rtw89_ops_link_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *conf, + u64 changed) +{ + struct rtw89_dev *rtwdev = hw->priv; + struct rtw89_vif *rtwvif = (struct rtw89_vif *)vif->drv_priv; + + mutex_lock(&rtwdev->mutex); + rtw89_leave_ps_mode(rtwdev); + if (changed & BSS_CHANGED_BSSID) { ether_addr_copy(rtwvif->bssid, conf->bssid); rtw89_cam_bssid_changed(rtwdev, rtwvif); @@ -470,11 +487,8 @@ static void rtw89_ops_bss_info_changed(struct ieee80211_hw *hw, if (changed & BSS_CHANGED_CQM) rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, vif, true); - if (changed & BSS_CHANGED_PS) - rtw89_recalc_lps(rtwdev); - - if (changed & BSS_CHANGED_ARP_FILTER) - rtwvif->ip_addr = vif->cfg.arp_addr_list[0]; + if (changed & BSS_CHANGED_TPE) + rtw89_reg_6ghz_recalc(rtwdev, rtwvif, true); mutex_unlock(&rtwdev->mutex); } @@ -1143,7 +1157,8 @@ const struct ieee80211_ops rtw89_ops = { .change_interface = rtw89_ops_change_interface, .remove_interface = rtw89_ops_remove_interface, .configure_filter = rtw89_ops_configure_filter, - .bss_info_changed = rtw89_ops_bss_info_changed, + .vif_cfg_changed = rtw89_ops_vif_cfg_changed, + .link_info_changed = rtw89_ops_link_info_changed, .start_ap = rtw89_ops_start_ap, .stop_ap = rtw89_ops_stop_ap, .set_tim = rtw89_ops_set_tim, diff --git a/drivers/net/wireless/realtek/rtw89/mac_be.c b/drivers/net/wireless/realtek/rtw89/mac_be.c index 934bdf3b398f..f212b67771d5 100644 --- a/drivers/net/wireless/realtek/rtw89/mac_be.c +++ b/drivers/net/wireless/realtek/rtw89/mac_be.c @@ -2312,26 +2312,6 @@ static void rtw89_mac_dump_qta_lost_be(struct rtw89_dev *rtwdev) dump_err_status_dispatcher_be(rtwdev); } -static int rtw89_mac_cpu_io_rx(struct rtw89_dev *rtwdev, bool wow_enable) -{ - struct rtw89_mac_h2c_info h2c_info = {}; - struct rtw89_mac_c2h_info c2h_info = {}; - u32 ret; - - h2c_info.id = RTW89_FWCMD_H2CREG_FUNC_WOW_CPUIO_RX_CTRL; - h2c_info.content_len = sizeof(h2c_info.u.hdr); - h2c_info.u.hdr.w0 = u32_encode_bits(wow_enable, RTW89_H2CREG_WOW_CPUIO_RX_CTRL_EN); - - ret = rtw89_fw_msg_reg(rtwdev, &h2c_info, &c2h_info); - if (ret) - return ret; - - if (c2h_info.id != RTW89_FWCMD_C2HREG_FUNC_WOW_CPUIO_RX_ACK) - ret = -EINVAL; - - return ret; -} - static int rtw89_wow_config_mac_be(struct rtw89_dev *rtwdev, bool enable_wow) { if (enable_wow) { diff --git a/drivers/net/wireless/realtek/rtw89/pci.c b/drivers/net/wireless/realtek/rtw89/pci.c index 03bbcf9b6737..02afeb3acce4 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.c +++ b/drivers/net/wireless/realtek/rtw89/pci.c @@ -183,14 +183,17 @@ static void rtw89_pci_sync_skb_for_device(struct rtw89_dev *rtwdev, static void rtw89_pci_rxbd_info_update(struct rtw89_dev *rtwdev, struct sk_buff *skb) { - struct rtw89_pci_rxbd_info *rxbd_info; struct rtw89_pci_rx_info *rx_info = RTW89_PCI_RX_SKB_CB(skb); + struct rtw89_pci_rxbd_info *rxbd_info; + __le32 info; rxbd_info = (struct rtw89_pci_rxbd_info *)skb->data; - rx_info->fs = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_FS); - rx_info->ls = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_LS); - rx_info->len = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_WRITE_SIZE); - rx_info->tag = le32_get_bits(rxbd_info->dword, RTW89_PCI_RXBD_TAG); + info = rxbd_info->dword; + + rx_info->fs = le32_get_bits(info, RTW89_PCI_RXBD_FS); + rx_info->ls = le32_get_bits(info, RTW89_PCI_RXBD_LS); + rx_info->len = le32_get_bits(info, RTW89_PCI_RXBD_WRITE_SIZE); + rx_info->tag = le32_get_bits(info, RTW89_PCI_RXBD_TAG); } static int rtw89_pci_validate_rx_tag(struct rtw89_dev *rtwdev, @@ -1298,10 +1301,12 @@ u32 rtw89_pci_fill_txaddr_info(struct rtw89_dev *rtwdev, dma_addr_t dma, u8 *add_info_nr) { struct rtw89_pci_tx_addr_info_32 *txaddr_info = txaddr_info_addr; + __le16 option; txaddr_info->length = cpu_to_le16(total_len); - txaddr_info->option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS | - RTW89_PCI_ADDR_NUM(1)); + option = cpu_to_le16(RTW89_PCI_ADDR_MSDU_LS | RTW89_PCI_ADDR_NUM(1)); + option |= le16_encode_bits(upper_32_bits(dma), RTW89_PCI_ADDR_HIGH_MASK); + txaddr_info->option = option; txaddr_info->dma = cpu_to_le32(dma); *add_info_nr = 1; @@ -1328,6 +1333,8 @@ u32 rtw89_pci_fill_txaddr_info_v1(struct rtw89_dev *rtwdev, length_option = FIELD_PREP(B_PCIADDR_LEN_V1_MASK, len) | FIELD_PREP(B_PCIADDR_HIGH_SEL_V1_MASK, 0) | FIELD_PREP(B_PCIADDR_LS_V1_MASK, remain == 0); + length_option |= u16_encode_bits(upper_32_bits(dma), + B_PCIADDR_HIGH_SEL_V1_MASK); txaddr_info->length_opt = cpu_to_le16(length_option); txaddr_info->dma_low_lsb = cpu_to_le16(FIELD_GET(GENMASK(15, 0), dma)); txaddr_info->dma_low_msb = cpu_to_le16(FIELD_GET(GENMASK(31, 16), dma)); @@ -1418,6 +1425,7 @@ static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev, struct sk_buff *skb = tx_req->skb; struct rtw89_pci_tx_data *tx_data = RTW89_PCI_TX_SKB_CB(skb); dma_addr_t dma; + __le16 opt; txdesc = skb_push(skb, txdesc_size); memset(txdesc, 0, txdesc_size); @@ -1430,7 +1438,9 @@ static int rtw89_pci_fwcmd_submit(struct rtw89_dev *rtwdev, } tx_data->dma = dma; - txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS); + opt = cpu_to_le16(RTW89_PCI_TXBD_OPT_LS); + opt |= le16_encode_bits(upper_32_bits(dma), RTW89_PCI_TXBD_OPT_DMA_HI); + txbd->opt = opt; txbd->length = cpu_to_le16(skb->len); txbd->dma = cpu_to_le32(tx_data->dma); skb_queue_tail(&rtwpci->h2c_queue, skb); @@ -1446,6 +1456,7 @@ static int rtw89_pci_txbd_submit(struct rtw89_dev *rtwdev, struct rtw89_core_tx_request *tx_req) { struct rtw89_pci_tx_wd *txwd; + __le16 opt; int ret; /* FWCMD queue doesn't have wd pages. Instead, it submits the CMD @@ -1470,7 +1481,9 @@ static int rtw89_pci_txbd_submit(struct rtw89_dev *rtwdev, list_add_tail(&txwd->list, &tx_ring->busy_pages); - txbd->option = cpu_to_le16(RTW89_PCI_TXBD_OPTION_LS); + opt = cpu_to_le16(RTW89_PCI_TXBD_OPT_LS); + opt |= le16_encode_bits(upper_32_bits(txwd->paddr), RTW89_PCI_TXBD_OPT_DMA_HI); + txbd->opt = opt; txbd->length = cpu_to_le16(txwd->len); txbd->dma = cpu_to_le32(txwd->paddr); @@ -1569,6 +1582,25 @@ const struct rtw89_pci_bd_ram rtw89_bd_ram_table_single[RTW89_TXCH_NUM] = { }; EXPORT_SYMBOL(rtw89_bd_ram_table_single); +static void rtw89_pci_init_wp_16sel(struct rtw89_dev *rtwdev) +{ + const struct rtw89_pci_info *info = rtwdev->pci_info; + u32 addr = info->wp_sel_addr; + u32 val; + int i; + + if (!info->wp_sel_addr) + return; + + for (i = 0; i < 16; i += 4) { + val = u32_encode_bits(i + 0, MASKBYTE0) | + u32_encode_bits(i + 1, MASKBYTE1) | + u32_encode_bits(i + 2, MASKBYTE2) | + u32_encode_bits(i + 3, MASKBYTE3); + rtw89_write32(rtwdev, addr + i, val); + } +} + static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev) { struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; @@ -1607,6 +1639,7 @@ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev) rtw89_write32(rtwdev, addr_bdram, val32); } rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); + rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma)); } for (i = 0; i < RTW89_RXCH_NUM; i++) { @@ -1626,10 +1659,13 @@ static void rtw89_pci_reset_trx_rings(struct rtw89_dev *rtwdev) rtw89_write16(rtwdev, addr_num, bd_ring->len); rtw89_write32(rtwdev, addr_desa_l, bd_ring->dma); + rtw89_write32(rtwdev, addr_desa_l + 4, upper_32_bits(bd_ring->dma)); if (info->rx_ring_eq_is_full) rtw89_write16(rtwdev, addr_idx, bd_ring->wp); } + + rtw89_pci_init_wp_16sel(rtwdev); } static void rtw89_pci_release_tx_ring(struct rtw89_dev *rtwdev, @@ -2039,7 +2075,7 @@ static int rtw89_pci_write_config_byte(struct rtw89_dev *rtwdev, u16 addr, if (!ret) return 0; - if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) + if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) ret = rtw89_dbi_write8(rtwdev, addr, data); return ret; @@ -2057,7 +2093,7 @@ static int rtw89_pci_read_config_byte(struct rtw89_dev *rtwdev, u16 addr, if (!ret) return 0; - if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) + if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) ret = rtw89_dbi_read8(rtwdev, addr, value); return ret; @@ -2137,10 +2173,9 @@ __get_target(struct rtw89_dev *rtwdev, u16 *target, enum rtw89_pcie_phy phy_rate static int rtw89_pci_autok_x(struct rtw89_dev *rtwdev) { - enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; int ret; - if (chip_id != RTL8852B && chip_id != RTL8851B) + if (!rtw89_is_rtl885xb(rtwdev)) return 0; ret = rtw89_write16_mdio_mask(rtwdev, RAC_REG_FLD_0, BAC_AUTOK_N_MASK, @@ -2150,14 +2185,13 @@ static int rtw89_pci_autok_x(struct rtw89_dev *rtwdev) static int rtw89_pci_auto_refclk_cal(struct rtw89_dev *rtwdev, bool autook_en) { - enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; enum rtw89_pcie_phy phy_rate; u16 val16, mgn_set, div_set, tar; u8 val8, bdr_ori; bool l1_flag = false; int ret = 0; - if (chip_id != RTL8852B && chip_id != RTL8851B) + if (!rtw89_is_rtl885xb(rtwdev)) return 0; ret = rtw89_pci_read_config_byte(rtwdev, RTW89_PCIE_PHY_RATE, &val8); @@ -2330,21 +2364,20 @@ static void rtw89_pci_disable_eq(struct rtw89_dev *rtwdev) u32 backup_aspm; u32 phy_offset; u16 oobs_val; - u16 val16; int ret; if (rtwdev->chip->chip_id != RTL8852C) return; - backup_aspm = rtw89_read32(rtwdev, R_AX_PCIE_MIX_CFG_V1); - rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK); - g1_oobs = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL); g2_oobs = rtw89_read16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL); if (g1_oobs && g2_oobs) - goto out; + return; + + backup_aspm = rtw89_read32(rtwdev, R_AX_PCIE_MIX_CFG_V1); + rtw89_write32_clr(rtwdev, R_AX_PCIE_MIX_CFG_V1, B_AX_ASPM_CTRL_MASK); ret = rtw89_pci_get_phy_offset_by_link_speed(rtwdev, &phy_offset); if (ret) @@ -2354,15 +2387,16 @@ static void rtw89_pci_disable_eq(struct rtw89_dev *rtwdev) rtw89_write16(rtwdev, phy_offset + RAC_ANA10 * RAC_MULT, ADDR_SEL_PINOUT_DIS_VAL); rtw89_write16_set(rtwdev, phy_offset + RAC_ANA19 * RAC_MULT, B_PCIE_BIT_RD_SEL); - val16 = rtw89_read16_mask(rtwdev, phy_offset + RAC_ANA1F * RAC_MULT, - OOBS_LEVEL_MASK); - oobs_val = u16_encode_bits(val16, OOBS_SEN_MASK); + oobs_val = rtw89_read16_mask(rtwdev, phy_offset + RAC_ANA1F * RAC_MULT, + OOBS_LEVEL_MASK); - rtw89_write16(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA03 * RAC_MULT, oobs_val); + rtw89_write16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA03 * RAC_MULT, + OOBS_SEN_MASK, oobs_val); rtw89_write16_set(rtwdev, R_RAC_DIRECT_OFFSET_G1 + RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL); - rtw89_write16(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA03 * RAC_MULT, oobs_val); + rtw89_write16_mask(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA03 * RAC_MULT, + OOBS_SEN_MASK, oobs_val); rtw89_write16_set(rtwdev, R_RAC_DIRECT_OFFSET_G2 + RAC_ANA09 * RAC_MULT, BAC_OOBS_SEL); @@ -2398,7 +2432,7 @@ static void rtw89_pci_l1off_pwroff(struct rtw89_dev *rtwdev) { enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; - if (chip_id != RTL8852A && chip_id != RTL8852B && chip_id != RTL8851B) + if (chip_id != RTL8852A && !rtw89_is_rtl885xb(rtwdev)) return; rtw89_write32_clr(rtwdev, R_AX_PCIE_PS_CTRL, B_AX_L1OFF_PWR_OFF_EN); @@ -2428,7 +2462,7 @@ static void rtw89_pci_aphy_pwrcut(struct rtw89_dev *rtwdev) { enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; - if (chip_id != RTL8852A && chip_id != RTL8852B && chip_id != RTL8851B) + if (chip_id != RTL8852A && !rtw89_is_rtl885xb(rtwdev)) return; rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_PSUS_OFF_CAPC_EN); @@ -2438,7 +2472,7 @@ static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev) { enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; - if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) { + if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, @@ -2451,9 +2485,7 @@ static void rtw89_pci_hci_ldo(struct rtw89_dev *rtwdev) static int rtw89_pci_dphy_delay(struct rtw89_dev *rtwdev) { - enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; - - if (chip_id != RTL8852B && chip_id != RTL8851B) + if (!rtw89_is_rtl885xb(rtwdev)) return 0; return rtw89_write16_mdio_mask(rtwdev, RAC_REG_REV2, BAC_CMU_EN_DLY_MASK, @@ -2715,7 +2747,7 @@ static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev) B_AX_PCIE_RX_APPLEN_MASK, 0); } - if (chip_id == RTL8852A || chip_id == RTL8852B) { + if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_TXDMA_MASK, tx_burst); rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG1, B_AX_PCIE_MAX_RXDMA_MASK, rx_burst); } else if (chip_id == RTL8852C) { @@ -2723,7 +2755,7 @@ static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev) rtw89_write32_mask(rtwdev, R_AX_HAXI_INIT_CFG1, B_AX_HAXI_MAX_RXDMA_MASK, rx_burst); } - if (chip_id == RTL8852A || chip_id == RTL8852B) { + if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { if (tag_mode == MAC_AX_TAG_SGL) { val32 = rtw89_read32(rtwdev, R_AX_PCIE_INIT_CFG1) & ~B_AX_LATENCY_CONTROL; @@ -2738,7 +2770,7 @@ static int rtw89_pci_mode_op(struct rtw89_dev *rtwdev) rtw89_write32_mask(rtwdev, info->exp_ctrl_reg, info->max_tag_num_mask, info->multi_tag_num); - if (chip_id == RTL8852A || chip_id == RTL8852B) { + if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_IDLE, wd_dma_idle_intvl); rtw89_write32_mask(rtwdev, R_AX_PCIE_INIT_CFG2, B_AX_WD_ITVL_ACT, @@ -2783,7 +2815,6 @@ static int rtw89_pci_ops_mac_pre_init_ax(struct rtw89_dev *rtwdev) const struct rtw89_pci_info *info = rtwdev->pci_info; int ret; - rtw89_pci_disable_eq(rtwdev); rtw89_pci_ber(rtwdev); rtw89_pci_rxdma_prefth(rtwdev); rtw89_pci_l1off_pwroff(rtwdev); @@ -2952,7 +2983,7 @@ static int rtw89_pci_ops_mac_post_init_ax(struct rtw89_dev *rtwdev) /* ltr sw trigger */ rtw89_write32_set(rtwdev, R_AX_LTR_CTRL_0, B_AX_APP_LTR_ACT); } - if (chip_id == RTL8852A || chip_id == RTL8852B) { + if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { /* ADDR info 8-byte mode */ rtw89_write32_set(rtwdev, R_AX_TX_ADDRESS_INFO_MODE_SETTING, B_AX_HOST_ADDR_INFO_8B_SEL); @@ -2995,6 +3026,27 @@ static void rtw89_pci_declaim_device(struct rtw89_dev *rtwdev, pci_disable_device(pdev); } +static void rtw89_pci_cfg_dac(struct rtw89_dev *rtwdev) +{ + struct rtw89_pci *rtwpci = (struct rtw89_pci *)rtwdev->priv; + const struct rtw89_chip_info *chip = rtwdev->chip; + + if (!rtwpci->enable_dac) + return; + + switch (chip->chip_id) { + case RTL8852A: + case RTL8852B: + case RTL8851B: + case RTL8852BT: + break; + default: + return; + } + + rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_L1_CTRL, RTW89_PCIE_BIT_EN_64BITS); +} + static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev, struct pci_dev *pdev) { @@ -3009,16 +3061,17 @@ static int rtw89_pci_setup_mapping(struct rtw89_dev *rtwdev, goto err; } - ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); - if (ret) { - rtw89_err(rtwdev, "failed to set dma mask to 32-bit\n"); - goto err_release_regions; - } - - ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); - if (ret) { - rtw89_err(rtwdev, "failed to set consistent dma mask to 32-bit\n"); - goto err_release_regions; + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(36)); + if (!ret) { + rtwpci->enable_dac = true; + rtw89_pci_cfg_dac(rtwdev); + } else { + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) { + rtw89_err(rtwdev, + "failed to set dma and consistent mask to 32/36-bit\n"); + goto err_release_regions; + } } resource_len = pci_resource_len(pdev, bar_id); @@ -3169,6 +3222,7 @@ static int rtw89_pci_init_rx_bd(struct rtw89_dev *rtwdev, struct pci_dev *pdev, memset(rx_bd, 0, sizeof(*rx_bd)); rx_bd->buf_size = cpu_to_le16(buf_sz); rx_bd->dma = cpu_to_le32(dma); + rx_bd->opt = le16_encode_bits(upper_32_bits(dma), RTW89_PCI_RXBD_OPT_DMA_HI); rx_info->dma = dma; return 0; @@ -3761,7 +3815,7 @@ static void rtw89_pci_clkreq_set_ax(struct rtw89_dev *rtwdev, bool enable) if (ret) rtw89_err(rtwdev, "failed to set CLKREQ Delay\n"); - if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) { + if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { if (enable) ret = rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_L1_CTRL, @@ -3813,7 +3867,7 @@ static void rtw89_pci_aspm_set_ax(struct rtw89_dev *rtwdev, bool enable) if (ret) rtw89_warn(rtwdev, "failed to read ASPM Delay\n"); - if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) { + if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { if (enable) ret = rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_L1_CTRL, @@ -3912,7 +3966,7 @@ static void rtw89_pci_l1ss_set_ax(struct rtw89_dev *rtwdev, bool enable) enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; int ret; - if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) { + if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { if (enable) ret = rtw89_pci_config_byte_set(rtwdev, RTW89_PCIE_TIMER_CTRL, @@ -4109,7 +4163,7 @@ static int __maybe_unused rtw89_pci_suspend(struct device *dev) rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); - if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) { + if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { rtw89_write32_clr(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); rtw89_write32_set(rtwdev, R_AX_PCIE_INIT_CFG1, @@ -4143,7 +4197,7 @@ static int __maybe_unused rtw89_pci_resume(struct device *dev) rtw89_write32_set(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_R_DIS_PRST); rtw89_write32_clr(rtwdev, R_AX_RSV_CTRL, B_AX_WLOCK_1C_BIT6); - if (chip_id == RTL8852A || chip_id == RTL8852B || chip_id == RTL8851B) { + if (chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev)) { rtw89_write32_set(rtwdev, R_AX_SYS_SDIO_CTRL, B_AX_PCIE_DIS_L2_CTRL_LDO_HCI); rtw89_write32_clr(rtwdev, R_AX_PCIE_INIT_CFG1, @@ -4155,6 +4209,8 @@ static int __maybe_unused rtw89_pci_resume(struct device *dev) B_AX_SEL_REQ_ENTR_L1); } rtw89_pci_l2_hci_ldo(rtwdev); + rtw89_pci_disable_eq(rtwdev); + rtw89_pci_cfg_dac(rtwdev); rtw89_pci_filter_out(rtwdev); rtw89_pci_link_cfg(rtwdev); rtw89_pci_l1ss_cfg(rtwdev); @@ -4289,11 +4345,16 @@ int rtw89_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_clear_resource; } + rtw89_pci_disable_eq(rtwdev); rtw89_pci_filter_out(rtwdev); rtw89_pci_link_cfg(rtwdev); rtw89_pci_l1ss_cfg(rtwdev); - rtw89_core_napi_init(rtwdev); + ret = rtw89_core_napi_init(rtwdev); + if (ret) { + rtw89_err(rtwdev, "failed to init napi\n"); + goto err_clear_resource; + } ret = rtw89_pci_request_irq(rtwdev, pdev); if (ret) { diff --git a/drivers/net/wireless/realtek/rtw89/pci.h b/drivers/net/wireless/realtek/rtw89/pci.h index 7666753ae983..48c3ab735db2 100644 --- a/drivers/net/wireless/realtek/rtw89/pci.h +++ b/drivers/net/wireless/realtek/rtw89/pci.h @@ -724,6 +724,11 @@ #define B_AX_CH11_BUSY BIT(1) #define B_AX_CH10_BUSY BIT(0) +#define R_AX_WP_ADDR_H_SEL0_3 0x1334 +#define R_AX_WP_ADDR_H_SEL4_7 0x1338 +#define R_AX_WP_ADDR_H_SEL8_11 0x133C +#define R_AX_WP_ADDR_H_SEL12_15 0x1340 + #define R_BE_HAXI_DMA_STOP1 0xB010 #define B_BE_STOP_WPDMA BIT(31) #define B_BE_STOP_CH14 BIT(14) @@ -823,6 +828,11 @@ #define R_BE_RPQ0_RXBD_DESA_L_V1 0xB308 #define R_BE_RPQ0_RXBD_DESA_H_V1 0xB30C +#define R_BE_WP_ADDR_H_SEL0_3_V1 0xB420 +#define R_BE_WP_ADDR_H_SEL4_7_V1 0xB424 +#define R_BE_WP_ADDR_H_SEL8_11_V1 0xB428 +#define R_BE_WP_ADDR_H_SEL12_15_V1 0xB42C + /* Configure */ #define R_AX_PCIE_INIT_CFG2 0x1004 #define B_AX_WD_ITVL_IDLE GENMASK(27, 24) @@ -1055,6 +1065,7 @@ #define RTW89_PCIE_TIMER_CTRL 0x0718 #define RTW89_PCIE_BIT_L1SUB BIT(5) #define RTW89_PCIE_L1_CTRL 0x0719 +#define RTW89_PCIE_BIT_EN_64BITS BIT(5) #define RTW89_PCIE_BIT_CLK BIT(4) #define RTW89_PCIE_BIT_L1 BIT(3) #define RTW89_PCIE_CLK_CTRL 0x0725 @@ -1304,6 +1315,7 @@ struct rtw89_pci_info { u32 rpwm_addr; u32 cpwm_addr; u32 mit_addr; + u32 wp_sel_addr; u32 tx_dma_ch_mask; const struct rtw89_pci_bd_idx_addr *bd_idx_addr_low_power; const struct rtw89_pci_ch_dma_addr_set *dma_addr_set; @@ -1330,11 +1342,11 @@ struct rtw89_pci_rx_info { u32 fs:1, ls:1, tag:13, len:14; }; -#define RTW89_PCI_TXBD_OPTION_LS BIT(14) - struct rtw89_pci_tx_bd_32 { __le16 length; - __le16 option; + __le16 opt; +#define RTW89_PCI_TXBD_OPT_LS BIT(14) +#define RTW89_PCI_TXBD_OPT_DMA_HI GENMASK(13, 6) __le32 dma; } __packed; @@ -1349,7 +1361,7 @@ struct rtw89_pci_tx_wp_info { #define RTW89_PCI_ADDR_MSDU_LS BIT(15) #define RTW89_PCI_ADDR_LS BIT(14) -#define RTW89_PCI_ADDR_HIGH(a) (((a) << 6) & GENMASK(13, 6)) +#define RTW89_PCI_ADDR_HIGH_MASK GENMASK(13, 6) #define RTW89_PCI_ADDR_NUM(x) ((x) & GENMASK(5, 0)) struct rtw89_pci_tx_addr_info_32 { @@ -1386,7 +1398,8 @@ struct rtw89_pci_rpp_fmt { struct rtw89_pci_rx_bd_32 { __le16 buf_size; - __le16 rsvd; + __le16 opt; +#define RTW89_PCI_RXBD_OPT_DMA_HI GENMASK(13, 6) __le32 dma; } __packed; @@ -1475,6 +1488,7 @@ struct rtw89_pci { bool running; bool low_power; bool under_recovery; + bool enable_dac; struct rtw89_pci_tx_ring tx_rings[RTW89_TXCH_NUM]; struct rtw89_pci_rx_ring rx_rings[RTW89_RXCH_NUM]; struct sk_buff_head h2c_queue; diff --git a/drivers/net/wireless/realtek/rtw89/phy.c b/drivers/net/wireless/realtek/rtw89/phy.c index a82b4c56a6f4..ad11d1414874 100644 --- a/drivers/net/wireless/realtek/rtw89/phy.c +++ b/drivers/net/wireless/realtek/rtw89/phy.c @@ -2,6 +2,7 @@ /* Copyright(c) 2019-2020 Realtek Corporation */ +#include "chan.h" #include "coex.h" #include "debug.h" #include "fw.h" @@ -1676,7 +1677,7 @@ static void rtw89_phy_preinit_rf_nctl_ax(struct rtw89_dev *rtwdev) rtw89_phy_write32_set(rtwdev, R_P0_PATH_RST, 0x8000000); if (chip->chip_id != RTL8851B) rtw89_phy_write32_set(rtwdev, R_P1_PATH_RST, 0x8000000); - if (chip->chip_id == RTL8852B) + if (chip->chip_id == RTL8852B || chip->chip_id == RTL8852BT) rtw89_phy_write32_set(rtwdev, R_IOQ_IQK_DPK, 0x2); /* check 0x8080 */ @@ -1847,6 +1848,36 @@ static s8 rtw89_phy_txpwr_rf_to_mac(struct rtw89_dev *rtwdev, s8 txpwr_rf) return txpwr_rf >> (chip->txpwr_factor_rf - chip->txpwr_factor_mac); } +static s8 rtw89_phy_txpwr_dbm_to_mac(struct rtw89_dev *rtwdev, s8 dbm) +{ + const struct rtw89_chip_info *chip = rtwdev->chip; + + return clamp_t(s16, dbm << chip->txpwr_factor_mac, -64, 63); +} + +static s8 rtw89_phy_txpwr_dbm_without_tolerance(s8 dbm) +{ + const u8 tssi_deviation_point = 0; + const u8 tssi_max_deviation = 2; + + if (dbm <= tssi_deviation_point) + dbm -= tssi_max_deviation; + + return dbm; +} + +static s8 rtw89_phy_get_tpe_constraint(struct rtw89_dev *rtwdev, u8 band) +{ + struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; + const struct rtw89_reg_6ghz_tpe *tpe = ®ulatory->reg_6ghz_tpe; + s8 cstr = S8_MAX; + + if (band == RTW89_BAND_6G && tpe->valid) + cstr = rtw89_phy_txpwr_dbm_without_tolerance(tpe->constraint); + + return rtw89_phy_txpwr_dbm_to_mac(rtwdev, cstr); +} + s8 rtw89_phy_read_txpwr_byrate(struct rtw89_dev *rtwdev, u8 band, u8 bw, const struct rtw89_rate_desc *rate_desc) { @@ -1921,6 +1952,7 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band, u8 regd = rtw89_regd_get(rtwdev, band); u8 reg6 = regulatory->reg_6ghz_power; s8 lmt = 0, sar; + s8 cstr; switch (band) { case RTW89_BAND_2G: @@ -1953,8 +1985,9 @@ s8 rtw89_phy_read_txpwr_limit(struct rtw89_dev *rtwdev, u8 band, lmt = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt); sar = rtw89_query_sar(rtwdev, freq); + cstr = rtw89_phy_get_tpe_constraint(rtwdev, band); - return min(lmt, sar); + return min3(lmt, sar, cstr); } EXPORT_SYMBOL(rtw89_phy_read_txpwr_limit); @@ -2178,6 +2211,7 @@ s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band, u8 regd = rtw89_regd_get(rtwdev, band); u8 reg6 = regulatory->reg_6ghz_power; s8 lmt_ru = 0, sar; + s8 cstr; switch (band) { case RTW89_BAND_2G: @@ -2210,8 +2244,9 @@ s8 rtw89_phy_read_txpwr_limit_ru(struct rtw89_dev *rtwdev, u8 band, lmt_ru = rtw89_phy_txpwr_rf_to_mac(rtwdev, lmt_ru); sar = rtw89_query_sar(rtwdev, freq); + cstr = rtw89_phy_get_tpe_constraint(rtwdev, band); - return min(lmt_ru, sar); + return min3(lmt_ru, sar, cstr); } static void @@ -5969,6 +6004,74 @@ void rtw89_phy_set_bss_color(struct rtw89_dev *rtwdev, struct ieee80211_vif *vif vif->cfg.aid, phy_idx); } +static bool rfk_chan_validate_desc(const struct rtw89_rfk_chan_desc *desc) +{ + return desc->ch != 0; +} + +static bool rfk_chan_is_equivalent(const struct rtw89_rfk_chan_desc *desc, + const struct rtw89_chan *chan) +{ + if (!rfk_chan_validate_desc(desc)) + return false; + + if (desc->ch != chan->channel) + return false; + + if (desc->has_band && desc->band != chan->band_type) + return false; + + if (desc->has_bw && desc->bw != chan->band_width) + return false; + + return true; +} + +struct rfk_chan_iter_data { + const struct rtw89_rfk_chan_desc desc; + unsigned int found; +}; + +static int rfk_chan_iter_search(const struct rtw89_chan *chan, void *data) +{ + struct rfk_chan_iter_data *iter_data = data; + + if (rfk_chan_is_equivalent(&iter_data->desc, chan)) + iter_data->found++; + + return 0; +} + +u8 rtw89_rfk_chan_lookup(struct rtw89_dev *rtwdev, + const struct rtw89_rfk_chan_desc *desc, u8 desc_nr, + const struct rtw89_chan *target_chan) +{ + int sel = -1; + u8 i; + + for (i = 0; i < desc_nr; i++) { + struct rfk_chan_iter_data iter_data = { + .desc = desc[i], + }; + + if (rfk_chan_is_equivalent(&desc[i], target_chan)) + return i; + + rtw89_iterate_entity_chan(rtwdev, rfk_chan_iter_search, &iter_data); + if (!iter_data.found && sel == -1) + sel = i; + } + + if (sel == -1) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "no idle rfk entry; force replace the first\n"); + sel = 0; + } + + return sel; +} +EXPORT_SYMBOL(rtw89_rfk_chan_lookup); + static void _rfk_write_rf(struct rtw89_dev *rtwdev, const struct rtw89_reg5_def *def) { diff --git a/drivers/net/wireless/realtek/rtw89/phy.h b/drivers/net/wireless/realtek/rtw89/phy.h index 082231ebbee5..d8df553b9cb0 100644 --- a/drivers/net/wireless/realtek/rtw89/phy.h +++ b/drivers/net/wireless/realtek/rtw89/phy.h @@ -129,6 +129,7 @@ #define EDCCA_HL_DIFF_NORMAL 8 #define RSSI_UNIT_CONVER 110 #define EDCCA_UNIT_CONVER 128 +#define EDCCA_PWROFST_DEFAULT 18 enum rtw89_phy_c2h_ra_func { RTW89_PHY_C2H_FUNC_STS_RPT, @@ -714,6 +715,19 @@ enum rtw89_phy_gain_band_be rtw89_subband_to_gain_band_be(enum rtw89_subband sub } } +struct rtw89_rfk_chan_desc { + /* desc is valid iff ch is non-zero */ + u8 ch; + + /* To avoid us from extending old chip code every time, each new + * field must be defined along with a bool flag in positivte way. + */ + bool has_band; + u8 band; + bool has_bw; + u8 bw; +}; + enum rtw89_rfk_flag { RTW89_RFK_F_WRF = 0, RTW89_RFK_F_WM = 1, @@ -949,5 +963,8 @@ enum rtw89_rf_path_bit rtw89_phy_get_kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); enum rtw89_rf_path rtw89_phy_get_syn_sel(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); +u8 rtw89_rfk_chan_lookup(struct rtw89_dev *rtwdev, + const struct rtw89_rfk_chan_desc *desc, u8 desc_nr, + const struct rtw89_chan *target_chan); #endif diff --git a/drivers/net/wireless/realtek/rtw89/reg.h b/drivers/net/wireless/realtek/rtw89/reg.h index 01cbd0312102..7df36f3bff0b 100644 --- a/drivers/net/wireless/realtek/rtw89/reg.h +++ b/drivers/net/wireless/realtek/rtw89/reg.h @@ -311,6 +311,9 @@ #define B_AX_S1_LDO2PWRCUT_F BIT(23) #define B_AX_S0_LDO_VSEL_F_MASK GENMASK(22, 21) +#define R_AX_DBG_WOW 0x0504 +#define B_AX_DBG_WOW_CPU_IO_RX_EN BIT(8) + #define R_AX_SEC_CTRL 0x0C00 #define B_AX_SEC_IDMEM_SIZE_CONFIG_MASK GENMASK(17, 16) @@ -3169,6 +3172,8 @@ #define R_AX_DLK_PROTECT_CTL_C1 0xEE02 #define B_AX_RX_DLK_CCA_TIME_MASK GENMASK(15, 8) #define B_AX_RX_DLK_DATA_TIME_MASK GENMASK(7, 4) +#define B_AX_RX_DLK_RST_EN BIT(1) +#define B_AX_RX_DLK_INT_EN BIT(0) #define R_AX_PLCP_HDR_FLTR 0xCE04 #define R_AX_PLCP_HDR_FLTR_C1 0xEE04 @@ -4313,6 +4318,8 @@ #define R_BE_WLCPU_PORT_PC 0x03FC +#define R_BE_DBG_WOW 0x0504 + #define R_BE_DCPU_PLATFORM_ENABLE 0x0888 #define B_BE_DCPU_SYM_DPLT_MEM_MUX_EN BIT(10) #define B_BE_DCPU_WARM_EN BIT(9) @@ -7811,6 +7818,8 @@ #define B_UPD_P0_EN BIT(31) #define R_EMLSR 0x0044 #define B_EMLSR_PARM GENMASK(27, 12) +#define R_CHK_LPS_STAT 0x0058 +#define B_CHK_LPS_STAT BIT(0) #define R_SPOOF_CG 0x00B4 #define B_SPOOF_CG_EN BIT(17) #define R_CHINFO_SEG 0x00B4 @@ -7827,6 +7836,7 @@ #define B_ANAPAR_PW15_H2 GENMASK(27, 26) #define R_ANAPAR 0x032C #define B_ANAPAR_15 GENMASK(31, 16) +#define B_ANAPAR_EN1 BIT(31) #define B_ANAPAR_ADCCLK BIT(30) #define B_ANAPAR_FLTRST BIT(22) #define B_ANAPAR_CRXBB GENMASK(18, 16) @@ -7868,10 +7878,12 @@ #define R_RXCCA_BE1 0x0520 #define B_RXCCA_BE1_DIS BIT(0) #define R_UPD_CLK_ADC 0x0700 +#define B_UPD_GEN_ON BIT(27) #define B_UPD_CLK_ADC_VAL GENMASK(26, 25) #define B_UPD_CLK_ADC_ON BIT(24) #define B_ENABLE_CCK BIT(5) #define R_RSTB_ASYNC 0x0704 +#define B_RSTB_ASYNC_BW80 GENMASK(9, 8) #define B_RSTB_ASYNC_ALL BIT(1) #define R_P0_ANT_SW 0x0728 #define B_P0_HW_ANTSW_DIS_BY_GNT_BT BIT(12) @@ -7935,6 +7947,8 @@ #define B_MEASUREMENT_TRIG_MSK BIT(2) #define B_CCX_TRIG_OPT_MSK BIT(1) #define B_CCX_EN_MSK BIT(0) +#define R_FAHM 0x0C1C +#define B_RXTD_CKEN BIT(2) #define R_IFS_COUNTER 0x0C28 #define B_IFS_CLM_PERIOD_MSK GENMASK(31, 16) #define B_IFS_CLM_COUNTER_UNIT_MSK GENMASK(15, 14) @@ -7968,6 +7982,7 @@ #define B_IQK_DPK_RST BIT(0) #define R_TX_COLLISION_T2R_ST 0x0C70 #define B_TX_COLLISION_T2R_ST_M GENMASK(25, 20) +#define B_TXRX_FORCE_VAL GENMASK(9, 0) #define R_TXGATING 0x0C74 #define B_TXGATING_EN BIT(4) #define R_TXRFC 0x0C7C @@ -8028,6 +8043,7 @@ #define B_P0_RFMODE_FTM_RX GENMASK(11, 0) #define R_P0_NRBW 0x12B8 #define B_P0_NRBW_DBG BIT(30) +#define B_P0_NRBW_RSTB BIT(28) #define R_S0_RXDC 0x12D4 #define B_S0_RXDC_I GENMASK(25, 16) #define B_S0_RXDC_Q GENMASK(31, 26) @@ -8109,6 +8125,8 @@ #define R_S0_ADDCK 0x1E00 #define B_S0_ADDCK_I GENMASK(9, 0) #define B_S0_ADDCK_Q GENMASK(19, 10) +#define R_TXCKEN_FORCE 0x2008 +#define B_TXCKEN_FORCE_ALL GENMASK(24, 0) #define R_EDCCA_RPT_SEL 0x20CC #define B_EDCCA_RPT_SEL_MSK GENMASK(2, 0) #define R_ADC_FIFO 0x20fc @@ -8264,6 +8282,7 @@ #define R_DCFO_COMP_S0 0x448C #define B_DCFO_COMP_S0_MSK GENMASK(11, 0) #define R_DCFO_WEIGHT 0x4490 +#define B_DAC_CLK_IDX BIT(31) #define B_DCFO_WEIGHT_MSK GENMASK(27, 24) #define R_DCFO_OPT 0x4494 #define B_DCFO_OPT_EN BIT(29) @@ -8379,6 +8398,7 @@ #define B_CDD_EVM_CHK_EN BIT(0) #define R_PATH0_BAND_SEL_V1 0x4738 #define B_PATH0_BAND_SEL_MSK_V1 BIT(17) +#define B_PATH0_BAND_NRBW_EN_V1 BIT(16) #define R_PATH0_BT_SHARE_V1 0x4738 #define B_PATH0_BT_SHARE_V1 BIT(19) #define R_PATH0_BTG_PATH_V1 0x4738 @@ -8422,6 +8442,7 @@ #define B_PATH1_G_TIA1_LNA6_OP1DB_V1 GENMASK(15, 8) #define R_PATH1_BAND_SEL_V1 0x4AA4 #define B_PATH1_BAND_SEL_MSK_V1 BIT(17) +#define B_PATH1_BAND_NRBW_EN_V1 BIT(16) #define R_PATH1_BT_SHARE_V1 0x4AA4 #define B_PATH1_BT_SHARE_V1 BIT(19) #define R_PATH1_BTG_PATH_V1 0x4AA4 @@ -8442,6 +8463,8 @@ #define B_SEG0R_PD_SPATIAL_REUSE_EN_MSK_V1 BIT(30) #define B_SEG0R_PD_SPATIAL_REUSE_EN_MSK BIT(29) #define B_SEG0R_PD_LOWER_BOUND_MSK GENMASK(10, 6) +#define R_PWOFST 0x488C +#define B_PWOFST GENMASK(21, 17) #define R_2P4G_BAND 0x4970 #define B_2P4G_BAND_SEL BIT(1) #define R_FC0_BW 0x4974 @@ -8622,6 +8645,8 @@ #define B_P0_TMETER GENMASK(15, 10) #define B_P0_TMETER_DIS BIT(16) #define B_P0_TMETER_TRK BIT(24) +#define R_P0_ADCFF_EN 0x58C8 +#define B_P0_ADCFF_EN BIT(24) #define R_P1_TSSIC 0x7814 #define B_P1_TSSIC_BYPASS BIT(11) #define R_P0_TSSI_TRK 0x5818 @@ -8633,7 +8658,9 @@ #define B_P0_TSSI_EN BIT(31) #define B_P0_TSSI_AVG GENMASK(15, 12) #define R_P0_RFCTM 0x5864 +#define B_P0_CLKG_FORCE GENMASK(31, 30) #define B_P0_RFCTM_EN BIT(29) +#define B_P0_GOT_TXRX GENMASK(28, 27) #define B_P0_RFCTM_VAL GENMASK(25, 20) #define R_P0_RFCTM_RDY BIT(26) #define R_P0_TRSW 0x5868 @@ -8666,12 +8693,14 @@ #define B_P0_RFM_BT_EN BIT(5) #define B_P0_RFM_OUT GENMASK(4, 0) #define R_P0_PATH_RST 0x58AC +#define B_P0_PATH_RST BIT(27) #define R_P0_TXDPD 0x58D4 #define B_P0_TXDPD GENMASK(31, 28) #define R_P0_TXPW_RSTB 0x58DC #define B_P0_TXPW_RSTB_MANON BIT(30) #define B_P0_TXPW_RSTB_TSSI BIT(31) #define R_P0_TSSI_MV_AVG 0x58E4 +#define B_P0_TXPW_RSTB GENMASK(28, 27) #define B_P0_TSSI_MV_MIX GENMASK(19, 11) #define B_P0_TSSI_MV_AVG GENMASK(13, 11) #define B_P0_TSSI_MV_CLR BIT(14) @@ -8796,6 +8825,10 @@ #define B_P1_TSSI_ALIM2 GENMASK(29, 0) #define R_P1_TSSI_ADC_CLK 0x766c #define B_P1_TSSI_ADC_CLK GENMASK(17, 16) +#define R_P1_TXAGC_TH 0x7800 +#define B_P1_TXAGC_MAXMIN GENMASK(15, 0) +#define R_P1_TXPW_FORCE 0x780C +#define B_P1_TXPW_RDY BIT(15) #define R_P1_TSSIC 0x7814 #define B_P1_TSSIC_BYPASS BIT(11) #define R_P1_TMETER 0x7810 @@ -8811,14 +8844,20 @@ #define B_P1_TSSI_EN BIT(31) #define B_P1_TSSI_AVG GENMASK(15, 12) #define R_P1_RFCTM 0x7864 +#define B_P1_CLKG_FORCE GENMASK(31, 30) +#define B_P1_GOT_TXRX GENMASK(28, 27) #define R_P1_RFCTM_RDY BIT(26) #define B_P1_RFCTM_VAL GENMASK(25, 20) #define B_P1_RFCTM_DEL GENMASK(19, 11) #define R_P1_PATH_RST 0x78AC +#define B_P1_PATH_RST BIT(27) +#define R_P1_ADCFF_EN 0x78C8 +#define B_P1_ADCFF_EN BIT(24) #define R_P1_TXPW_RSTB 0x78DC #define B_P1_TXPW_RSTB_MANON BIT(30) #define B_P1_TXPW_RSTB_TSSI BIT(31) #define R_P1_TSSI_MV_AVG 0x78E4 +#define B_P1_TXPW_RSTB GENMASK(28, 27) #define B_P1_TSSI_MV_MIX GENMASK(19, 11) #define B_P1_TSSI_MV_AVG GENMASK(13, 11) #define B_P1_TSSI_MV_CLR BIT(14) @@ -9003,6 +9042,7 @@ #define R_IQRSN 0x8220 #define B_IQRSN_K1 BIT(28) #define B_IQRSN_K2 BIT(16) +#define R_DPD_CH0B 0x82BC #define R_RXCFIR_P0C0 0x8D40 #define R_RXCFIR_P0C1 0x8D84 #define R_RXCFIR_P0C2 0x8DC8 @@ -9036,15 +9076,18 @@ #define B_IQKINF2_FCNT GENMASK(23, 16) #define B_IQKINF2_KCNT GENMASK(15, 8) #define B_IQKINF2_NCTLV GENMASK(7, 0) +#define R_RFK_ST 0xBFF8 #define R_DCOF0 0xC000 #define B_DCOF0_RST BIT(17) #define B_DCOF0_V GENMASK(4, 1) #define R_DCOF1 0xC004 +#define B_DCOF1_VAL GENMASK(31, 20) #define B_DCOF1_RST BIT(17) #define B_DCOF1_S BIT(0) #define R_DCOF8 0xC020 #define B_DCOF8_V GENMASK(4, 1) #define R_DCOF9 0xC024 +#define B_DCOF9_VAL GENMASK(31, 20) #define B_DCOF9_RST BIT(17) #define R_DACK_S0P0 0xC040 #define B_DACK_S0P0_OK BIT(31) @@ -9095,6 +9138,7 @@ #define R_ADCMOD 0xC0E8 #define B_ADCMOD_LP GENMASK(31, 16) #define R_DCIM 0xC0EC +#define B_DCIM_RC GENMASK(23, 16) #define B_DCIM_FR GENMASK(14, 13) #define R_ADDCK0D 0xC0F0 #define B_ADDCK0D_VAL2 GENMASK(31, 26) @@ -9117,11 +9161,18 @@ #define B_ADDCKR0_DC GENMASK(15, 4) #define B_ADDCKR0_A1 GENMASK(9, 0) #define R_DACK10 0xC100 +#define B_DACK10_RST BIT(17) #define B_DACK10 GENMASK(4, 1) #define R_DACK1_K 0xc104 +#define B_DACK1_VAL GENMASK(31, 20) +#define B_DACK1_RST BIT(17) #define B_DACK1_EN BIT(0) #define R_DACK11 0xC120 #define B_DACK11 GENMASK(4, 1) +#define R_DACK2_K 0xC124 +#define B_DACK2_VAL GENMASK(31, 20) +#define B_DACK2_RST BIT(17) +#define B_DACK2_EN BIT(0) #define R_DACK_S1P0 0xC140 #define B_DACK_S1P0_OK BIT(31) #define R_DACK_BIAS10 0xC148 @@ -9170,6 +9221,11 @@ #define B_DACKN0_V GENMASK(21, 14) #define R_DACKN1_CTL 0xC224 #define B_DACKN1_V GENMASK(21, 14) +#define B_DACKN1_ON BIT(0) +#define R_DACKN2_CTL 0xC238 +#define B_DACKN2_ON BIT(0) +#define R_DACKN3_CTL 0xC24C +#define B_DACKN3_ON BIT(0) #define R_GAIN_MAP0 0xE44C #define B_GAIN_MAP0_EN BIT(0) #define R_GAIN_MAP1 0xE54C diff --git a/drivers/net/wireless/realtek/rtw89/regd.c b/drivers/net/wireless/realtek/rtw89/regd.c index 1a133914f673..a251b0e3b16e 100644 --- a/drivers/net/wireless/realtek/rtw89/regd.c +++ b/drivers/net/wireless/realtek/rtw89/regd.c @@ -714,7 +714,154 @@ exit: mutex_unlock(&rtwdev->mutex); } -static void __rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev) +/* Maximum Transmit Power field (@raw) can be EIRP or PSD. + * Both units are 0.5 dB-based. Return a constraint in dB. + */ +static s8 tpe_get_constraint(s8 raw) +{ + const u8 hw_deviation = 3; /* unit: 0.5 dB */ + const u8 antenna_gain = 10; /* unit: 0.5 dB */ + const u8 array_gain = 6; /* unit: 0.5 dB */ + const u8 offset = hw_deviation + antenna_gain + array_gain; + + return (raw - offset) / 2; +} + +static void tpe_intersect_constraint(struct rtw89_reg_6ghz_tpe *tpe, s8 cstr) +{ + if (tpe->valid) { + tpe->constraint = min(tpe->constraint, cstr); + return; + } + + tpe->constraint = cstr; + tpe->valid = true; +} + +static void tpe_deal_with_eirp(struct rtw89_reg_6ghz_tpe *tpe, + const struct ieee80211_parsed_tpe_eirp *eirp) +{ + unsigned int i; + s8 cstr; + + if (!eirp->valid) + return; + + for (i = 0; i < eirp->count; i++) { + cstr = tpe_get_constraint(eirp->power[i]); + tpe_intersect_constraint(tpe, cstr); + } +} + +static s8 tpe_convert_psd_to_eirp(s8 psd) +{ + static const unsigned int mlog20 = 1301; + + return psd + 10 * mlog20 / 1000; +} + +static void tpe_deal_with_psd(struct rtw89_reg_6ghz_tpe *tpe, + const struct ieee80211_parsed_tpe_psd *psd) +{ + unsigned int i; + s8 cstr_psd; + s8 cstr; + + if (!psd->valid) + return; + + for (i = 0; i < psd->count; i++) { + cstr_psd = tpe_get_constraint(psd->power[i]); + cstr = tpe_convert_psd_to_eirp(cstr_psd); + tpe_intersect_constraint(tpe, cstr); + } +} + +static void rtw89_calculate_tpe(struct rtw89_dev *rtwdev, + struct rtw89_reg_6ghz_tpe *result_tpe, + const struct ieee80211_parsed_tpe *parsed_tpe) +{ + static const u8 category = IEEE80211_TPE_CAT_6GHZ_DEFAULT; + + tpe_deal_with_eirp(result_tpe, &parsed_tpe->max_local[category]); + tpe_deal_with_eirp(result_tpe, &parsed_tpe->max_reg_client[category]); + tpe_deal_with_psd(result_tpe, &parsed_tpe->psd_local[category]); + tpe_deal_with_psd(result_tpe, &parsed_tpe->psd_reg_client[category]); +} + +static bool __rtw89_reg_6ghz_tpe_recalc(struct rtw89_dev *rtwdev) +{ + struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; + struct rtw89_reg_6ghz_tpe new = {}; + struct rtw89_vif *rtwvif; + bool changed = false; + + rtw89_for_each_rtwvif(rtwdev, rtwvif) { + const struct rtw89_reg_6ghz_tpe *tmp; + const struct rtw89_chan *chan; + + chan = rtw89_chan_get(rtwdev, rtwvif->sub_entity_idx); + if (chan->band_type != RTW89_BAND_6G) + continue; + + tmp = &rtwvif->reg_6ghz_tpe; + if (!tmp->valid) + continue; + + tpe_intersect_constraint(&new, tmp->constraint); + } + + if (memcmp(®ulatory->reg_6ghz_tpe, &new, + sizeof(regulatory->reg_6ghz_tpe)) != 0) + changed = true; + + if (changed) { + if (new.valid) + rtw89_debug(rtwdev, RTW89_DBG_REGD, + "recalc 6 GHz reg TPE to %d dBm\n", + new.constraint); + else + rtw89_debug(rtwdev, RTW89_DBG_REGD, + "recalc 6 GHz reg TPE to none\n"); + + regulatory->reg_6ghz_tpe = new; + } + + return changed; +} + +static int rtw89_reg_6ghz_tpe_recalc(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif, bool active, + unsigned int *changed) +{ + struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); + struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; + struct rtw89_reg_6ghz_tpe *tpe = &rtwvif->reg_6ghz_tpe; + + memset(tpe, 0, sizeof(*tpe)); + + if (!active || rtwvif->reg_6ghz_power != RTW89_REG_6GHZ_POWER_STD) + goto bottom; + + rtw89_calculate_tpe(rtwdev, tpe, &bss_conf->tpe); + if (!tpe->valid) + goto bottom; + + if (tpe->constraint < RTW89_MIN_VALID_POWER_CONSTRAINT) { + rtw89_err(rtwdev, + "%s: constraint %d dBm is less than min valid val\n", + __func__, tpe->constraint); + + tpe->valid = false; + return -EINVAL; + } + +bottom: + *changed += __rtw89_reg_6ghz_tpe_recalc(rtwdev); + return 0; +} + +static bool __rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev) { struct rtw89_regulatory_info *regulatory = &rtwdev->regulatory; const struct rtw89_regd *regd = regulatory->regd; @@ -751,23 +898,21 @@ static void __rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev) } if (regulatory->reg_6ghz_power == sel) - return; + return false; rtw89_debug(rtwdev, RTW89_DBG_REGD, "recalc 6 GHz reg power type to %d\n", sel); regulatory->reg_6ghz_power = sel; - - rtw89_core_set_chip_txpwr(rtwdev); + return true; } -void rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev, - struct rtw89_vif *rtwvif, bool active) +static int rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev, + struct rtw89_vif *rtwvif, bool active, + unsigned int *changed) { struct ieee80211_vif *vif = rtwvif_to_vif(rtwvif); - lockdep_assert_held(&rtwdev->mutex); - if (active) { switch (vif->bss_conf.power_type) { case IEEE80211_REG_VLP_AP: @@ -787,5 +932,32 @@ void rtw89_reg_6ghz_power_recalc(struct rtw89_dev *rtwdev, rtwvif->reg_6ghz_power = RTW89_REG_6GHZ_POWER_DFLT; } - __rtw89_reg_6ghz_power_recalc(rtwdev); + *changed += __rtw89_reg_6ghz_power_recalc(rtwdev); + return 0; +} + +int rtw89_reg_6ghz_recalc(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif, + bool active) +{ + unsigned int changed = 0; + int ret; + + lockdep_assert_held(&rtwdev->mutex); + + /* The result of reg_6ghz_tpe may depend on reg_6ghz_power type, + * so must do reg_6ghz_tpe_recalc() after reg_6ghz_power_recalc(). + */ + + ret = rtw89_reg_6ghz_power_recalc(rtwdev, rtwvif, active, &changed); + if (ret) + return ret; + + ret = rtw89_reg_6ghz_tpe_recalc(rtwdev, rtwvif, active, &changed); + if (ret) + return ret; + + if (changed) + rtw89_core_set_chip_txpwr(rtwdev); + + return 0; } diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851b.c b/drivers/net/wireless/realtek/rtw89/rtw8851b.c index 87b51823244d..40cf84a79c46 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8851b.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8851b.c @@ -105,6 +105,10 @@ static const u32 rtw8851b_c2h_regs[RTW89_C2HREG_MAX] = { R_AX_C2HREG_DATA3 }; +static const u32 rtw8851b_wow_wakeup_regs[RTW89_WOW_REASON_NUM] = { + R_AX_C2HREG_DATA3 + 3, R_AX_C2HREG_DATA3 + 3, +}; + static const struct rtw89_page_regs rtw8851b_page_regs = { .hci_fc_ctrl = R_AX_HCI_FC_CTRL, .ch_page_ctrl = R_AX_CH_PAGE_CTRL, @@ -2447,6 +2451,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = { .dig_table = NULL, .dig_regs = &rtw8851b_dig_regs, .tssi_dbw_table = NULL, + .support_macid_num = RTW89_MAX_MAC_ID_NUM, .support_chanctx_num = 0, .support_rnr = false, .support_bands = BIT(NL80211_BAND_2GHZ) | @@ -2508,7 +2513,7 @@ const struct rtw89_chip_info rtw8851b_chip_info = { .c2h_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_C2H_ENQ_CNT_MASK >> 8}, .c2h_regs = rtw8851b_c2h_regs, .page_regs = &rtw8851b_page_regs, - .wow_reason_reg = R_AX_C2HREG_DATA3 + 3, + .wow_reason_reg = rtw8851b_wow_wakeup_regs, .cfo_src_fd = true, .cfo_hw_comp = true, .dcfo_comp = &rtw8851b_dcfo_comp, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8851be.c b/drivers/net/wireless/realtek/rtw89/rtw8851be.c index ec3629d95fda..d334924faec8 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8851be.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8851be.c @@ -46,6 +46,7 @@ static const struct rtw89_pci_info rtw8851b_pci_info = { .rpwm_addr = R_AX_PCIE_HRPWM, .cpwm_addr = R_AX_CPWM, .mit_addr = R_AX_INT_MIT_RX, + .wp_sel_addr = 0, .tx_dma_ch_mask = BIT(RTW89_TXCH_ACH4) | BIT(RTW89_TXCH_ACH5) | BIT(RTW89_TXCH_ACH6) | BIT(RTW89_TXCH_ACH7) | BIT(RTW89_TXCH_CH10) | BIT(RTW89_TXCH_CH11), diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852a.c b/drivers/net/wireless/realtek/rtw89/rtw8852a.c index e93cee1456bd..08e148328c62 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852a.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852a.c @@ -398,6 +398,10 @@ static const u32 rtw8852a_c2h_regs[RTW89_C2HREG_MAX] = { R_AX_C2HREG_DATA3 }; +static const u32 rtw8852a_wow_wakeup_regs[RTW89_WOW_REASON_NUM] = { + R_AX_C2HREG_DATA3 + 3, R_AX_C2HREG_DATA3 + 3, +}; + static const struct rtw89_page_regs rtw8852a_page_regs = { .hci_fc_ctrl = R_AX_HCI_FC_CTRL, .ch_page_ctrl = R_AX_CH_PAGE_CTRL, @@ -2162,6 +2166,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = { .dig_table = &rtw89_8852a_phy_dig_table, .dig_regs = &rtw8852a_dig_regs, .tssi_dbw_table = NULL, + .support_macid_num = RTW89_MAX_MAC_ID_NUM, .support_chanctx_num = 1, .support_rnr = false, .support_bands = BIT(NL80211_BAND_2GHZ) | @@ -2224,7 +2229,7 @@ const struct rtw89_chip_info rtw8852a_chip_info = { .c2h_regs = rtw8852a_c2h_regs, .c2h_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_C2H_ENQ_CNT_MASK >> 8}, .page_regs = &rtw8852a_page_regs, - .wow_reason_reg = R_AX_C2HREG_DATA3 + 3, + .wow_reason_reg = rtw8852a_wow_wakeup_regs, .cfo_src_fd = false, .cfo_hw_comp = false, .dcfo_comp = &rtw8852a_dcfo_comp, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c index fdee5dd4ba14..9a675e2193bc 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852ae.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852ae.c @@ -46,6 +46,7 @@ static const struct rtw89_pci_info rtw8852a_pci_info = { .rpwm_addr = R_AX_PCIE_HRPWM, .cpwm_addr = R_AX_CPWM, .mit_addr = R_AX_INT_MIT_RX, + .wp_sel_addr = 0, .tx_dma_ch_mask = 0, .bd_idx_addr_low_power = NULL, .dma_addr_set = &rtw89_pci_ch_dma_addr_set, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.c b/drivers/net/wireless/realtek/rtw89/rtw8852b.c index d351096fa4b4..a22847a311ad 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852b.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.c @@ -8,6 +8,7 @@ #include "phy.h" #include "reg.h" #include "rtw8852b.h" +#include "rtw8852b_common.h" #include "rtw8852b_rfk.h" #include "rtw8852b_table.h" #include "txrx.h" @@ -65,167 +66,6 @@ static const struct rtw89_dle_mem rtw8852b_dle_mem_pcie[] = { NULL}, }; -static const struct rtw89_reg3_def rtw8852b_pmac_ht20_mcs7_tbl[] = { - {0x4580, 0x0000ffff, 0x0}, - {0x4580, 0xffff0000, 0x0}, - {0x4584, 0x0000ffff, 0x0}, - {0x4584, 0xffff0000, 0x0}, - {0x4580, 0x0000ffff, 0x1}, - {0x4578, 0x00ffffff, 0x2018b}, - {0x4570, 0x03ffffff, 0x7}, - {0x4574, 0x03ffffff, 0x32407}, - {0x45b8, 0x00000010, 0x0}, - {0x45b8, 0x00000100, 0x0}, - {0x45b8, 0x00000080, 0x0}, - {0x45b8, 0x00000008, 0x0}, - {0x45a0, 0x0000ff00, 0x0}, - {0x45a0, 0xff000000, 0x1}, - {0x45a4, 0x0000ff00, 0x2}, - {0x45a4, 0xff000000, 0x3}, - {0x45b8, 0x00000020, 0x0}, - {0x4568, 0xe0000000, 0x0}, - {0x45b8, 0x00000002, 0x1}, - {0x456c, 0xe0000000, 0x0}, - {0x45b4, 0x00006000, 0x0}, - {0x45b4, 0x00001800, 0x1}, - {0x45b8, 0x00000040, 0x0}, - {0x45b8, 0x00000004, 0x0}, - {0x45b8, 0x00000200, 0x0}, - {0x4598, 0xf8000000, 0x0}, - {0x45b8, 0x00100000, 0x0}, - {0x45a8, 0x00000fc0, 0x0}, - {0x45b8, 0x00200000, 0x0}, - {0x45b0, 0x00000038, 0x0}, - {0x45b0, 0x000001c0, 0x0}, - {0x45a0, 0x000000ff, 0x0}, - {0x45b8, 0x00400000, 0x0}, - {0x4590, 0x000007ff, 0x0}, - {0x45b0, 0x00000e00, 0x0}, - {0x45ac, 0x0000001f, 0x0}, - {0x45b8, 0x00800000, 0x0}, - {0x45a8, 0x0003f000, 0x0}, - {0x45b8, 0x01000000, 0x0}, - {0x45b0, 0x00007000, 0x0}, - {0x45b0, 0x00038000, 0x0}, - {0x45a0, 0x00ff0000, 0x0}, - {0x45b8, 0x02000000, 0x0}, - {0x4590, 0x003ff800, 0x0}, - {0x45b0, 0x001c0000, 0x0}, - {0x45ac, 0x000003e0, 0x0}, - {0x45b8, 0x04000000, 0x0}, - {0x45a8, 0x00fc0000, 0x0}, - {0x45b8, 0x08000000, 0x0}, - {0x45b0, 0x00e00000, 0x0}, - {0x45b0, 0x07000000, 0x0}, - {0x45a4, 0x000000ff, 0x0}, - {0x45b8, 0x10000000, 0x0}, - {0x4594, 0x000007ff, 0x0}, - {0x45b0, 0x38000000, 0x0}, - {0x45ac, 0x00007c00, 0x0}, - {0x45b8, 0x20000000, 0x0}, - {0x45a8, 0x3f000000, 0x0}, - {0x45b8, 0x40000000, 0x0}, - {0x45b4, 0x00000007, 0x0}, - {0x45b4, 0x00000038, 0x0}, - {0x45a4, 0x00ff0000, 0x0}, - {0x45b8, 0x80000000, 0x0}, - {0x4594, 0x003ff800, 0x0}, - {0x45b4, 0x000001c0, 0x0}, - {0x4598, 0xf8000000, 0x0}, - {0x45b8, 0x00100000, 0x0}, - {0x45a8, 0x00000fc0, 0x7}, - {0x45b8, 0x00200000, 0x0}, - {0x45b0, 0x00000038, 0x0}, - {0x45b0, 0x000001c0, 0x0}, - {0x45a0, 0x000000ff, 0x0}, - {0x45b4, 0x06000000, 0x0}, - {0x45b0, 0x00000007, 0x0}, - {0x45b8, 0x00080000, 0x0}, - {0x45a8, 0x0000003f, 0x0}, - {0x457c, 0xffe00000, 0x1}, - {0x4530, 0xffffffff, 0x0}, - {0x4588, 0x00003fff, 0x0}, - {0x4598, 0x000001ff, 0x0}, - {0x4534, 0xffffffff, 0x0}, - {0x4538, 0xffffffff, 0x0}, - {0x453c, 0xffffffff, 0x0}, - {0x4588, 0x0fffc000, 0x0}, - {0x4598, 0x0003fe00, 0x0}, - {0x4540, 0xffffffff, 0x0}, - {0x4544, 0xffffffff, 0x0}, - {0x4548, 0xffffffff, 0x0}, - {0x458c, 0x00003fff, 0x0}, - {0x4598, 0x07fc0000, 0x0}, - {0x454c, 0xffffffff, 0x0}, - {0x4550, 0xffffffff, 0x0}, - {0x4554, 0xffffffff, 0x0}, - {0x458c, 0x0fffc000, 0x0}, - {0x459c, 0x000001ff, 0x0}, - {0x4558, 0xffffffff, 0x0}, - {0x455c, 0xffffffff, 0x0}, - {0x4530, 0xffffffff, 0x4e790001}, - {0x4588, 0x00003fff, 0x0}, - {0x4598, 0x000001ff, 0x1}, - {0x4534, 0xffffffff, 0x0}, - {0x4538, 0xffffffff, 0x4b}, - {0x45ac, 0x38000000, 0x7}, - {0x4588, 0xf0000000, 0x0}, - {0x459c, 0x7e000000, 0x0}, - {0x45b8, 0x00040000, 0x0}, - {0x45b8, 0x00020000, 0x0}, - {0x4590, 0xffc00000, 0x0}, - {0x45b8, 0x00004000, 0x0}, - {0x4578, 0xff000000, 0x0}, - {0x45b8, 0x00000400, 0x0}, - {0x45b8, 0x00000800, 0x0}, - {0x45b8, 0x00001000, 0x0}, - {0x45b8, 0x00002000, 0x0}, - {0x45b4, 0x00018000, 0x0}, - {0x45ac, 0x07800000, 0x0}, - {0x45b4, 0x00000600, 0x2}, - {0x459c, 0x0001fe00, 0x80}, - {0x45ac, 0x00078000, 0x3}, - {0x459c, 0x01fe0000, 0x1}, -}; - -static const struct rtw89_reg3_def rtw8852b_btc_preagc_en_defs[] = { - {0x46D0, GENMASK(1, 0), 0x3}, - {0x4790, GENMASK(1, 0), 0x3}, - {0x4AD4, GENMASK(31, 0), 0xf}, - {0x4AE0, GENMASK(31, 0), 0xf}, - {0x4688, GENMASK(31, 24), 0x80}, - {0x476C, GENMASK(31, 24), 0x80}, - {0x4694, GENMASK(7, 0), 0x80}, - {0x4694, GENMASK(15, 8), 0x80}, - {0x4778, GENMASK(7, 0), 0x80}, - {0x4778, GENMASK(15, 8), 0x80}, - {0x4AE4, GENMASK(23, 0), 0x780D1E}, - {0x4AEC, GENMASK(23, 0), 0x780D1E}, - {0x469C, GENMASK(31, 26), 0x34}, - {0x49F0, GENMASK(31, 26), 0x34}, -}; - -static DECLARE_PHY_REG3_TBL(rtw8852b_btc_preagc_en_defs); - -static const struct rtw89_reg3_def rtw8852b_btc_preagc_dis_defs[] = { - {0x46D0, GENMASK(1, 0), 0x0}, - {0x4790, GENMASK(1, 0), 0x0}, - {0x4AD4, GENMASK(31, 0), 0x60}, - {0x4AE0, GENMASK(31, 0), 0x60}, - {0x4688, GENMASK(31, 24), 0x1a}, - {0x476C, GENMASK(31, 24), 0x1a}, - {0x4694, GENMASK(7, 0), 0x2a}, - {0x4694, GENMASK(15, 8), 0x2a}, - {0x4778, GENMASK(7, 0), 0x2a}, - {0x4778, GENMASK(15, 8), 0x2a}, - {0x4AE4, GENMASK(23, 0), 0x79E99E}, - {0x4AEC, GENMASK(23, 0), 0x79E99E}, - {0x469C, GENMASK(31, 26), 0x26}, - {0x49F0, GENMASK(31, 26), 0x26}, -}; - -static DECLARE_PHY_REG3_TBL(rtw8852b_btc_preagc_dis_defs); - static const u32 rtw8852b_h2c_regs[RTW89_H2CREG_MAX] = { R_AX_H2CREG_DATA0, R_AX_H2CREG_DATA1, R_AX_H2CREG_DATA2, R_AX_H2CREG_DATA3 @@ -236,6 +76,10 @@ static const u32 rtw8852b_c2h_regs[RTW89_C2HREG_MAX] = { R_AX_C2HREG_DATA3 }; +static const u32 rtw8852b_wow_wakeup_regs[RTW89_WOW_REASON_NUM] = { + R_AX_C2HREG_DATA3 + 3, R_AX_C2HREG_DATA3 + 3, +}; + static const struct rtw89_page_regs rtw8852b_page_regs = { .hci_fc_ctrl = R_AX_HCI_FC_CTRL, .ch_page_ctrl = R_AX_CH_PAGE_CTRL, @@ -403,6 +247,8 @@ static int rtw8852b_pwr_on_func(struct rtw89_dev *rtwdev) u32 val32; u32 ret; + rtw8852b_pwr_sps_ana(rtwdev); + rtw89_write32_clr(rtwdev, R_AX_SYS_PW_CTRL, B_AX_AFSM_WLSUS_EN | B_AX_AFSM_PCIE_SUS_EN); rtw89_write32_set(rtwdev, R_AX_SYS_PW_CTRL, B_AX_DIS_WLBT_PDNSUSEN_SOPC); @@ -530,9 +376,7 @@ static int rtw8852b_pwr_off_func(struct rtw89_dev *rtwdev) u32 val32; u32 ret; - /* Only do once during probe stage after reading efuse */ - if (!test_bit(RTW89_FLAG_PROBE_DONE, rtwdev->flags)) - rtw8852b_pwr_sps_ana(rtwdev); + rtw8852b_pwr_sps_ana(rtwdev); ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_ANAPAR_WL, XTAL_SI_RFC2RF, XTAL_SI_RFC2RF); @@ -591,806 +435,6 @@ static int rtw8852b_pwr_off_func(struct rtw89_dev *rtwdev) return 0; } -static void rtw8852be_efuse_parsing(struct rtw89_efuse *efuse, - struct rtw8852b_efuse *map) -{ - ether_addr_copy(efuse->addr, map->e.mac_addr); - efuse->rfe_type = map->rfe_type; - efuse->xtal_cap = map->xtal_k; -} - -static void rtw8852b_efuse_parsing_tssi(struct rtw89_dev *rtwdev, - struct rtw8852b_efuse *map) -{ - struct rtw89_tssi_info *tssi = &rtwdev->tssi; - struct rtw8852b_tssi_offset *ofst[] = {&map->path_a_tssi, &map->path_b_tssi}; - u8 i, j; - - tssi->thermal[RF_PATH_A] = map->path_a_therm; - tssi->thermal[RF_PATH_B] = map->path_b_therm; - - for (i = 0; i < RF_PATH_NUM_8852B; i++) { - memcpy(tssi->tssi_cck[i], ofst[i]->cck_tssi, - sizeof(ofst[i]->cck_tssi)); - - for (j = 0; j < TSSI_CCK_CH_GROUP_NUM; j++) - rtw89_debug(rtwdev, RTW89_DBG_TSSI, - "[TSSI][EFUSE] path=%d cck[%d]=0x%x\n", - i, j, tssi->tssi_cck[i][j]); - - memcpy(tssi->tssi_mcs[i], ofst[i]->bw40_tssi, - sizeof(ofst[i]->bw40_tssi)); - memcpy(tssi->tssi_mcs[i] + TSSI_MCS_2G_CH_GROUP_NUM, - ofst[i]->bw40_1s_tssi_5g, sizeof(ofst[i]->bw40_1s_tssi_5g)); - - for (j = 0; j < TSSI_MCS_CH_GROUP_NUM; j++) - rtw89_debug(rtwdev, RTW89_DBG_TSSI, - "[TSSI][EFUSE] path=%d mcs[%d]=0x%x\n", - i, j, tssi->tssi_mcs[i][j]); - } -} - -static bool _decode_efuse_gain(u8 data, s8 *high, s8 *low) -{ - if (high) - *high = sign_extend32(FIELD_GET(GENMASK(7, 4), data), 3); - if (low) - *low = sign_extend32(FIELD_GET(GENMASK(3, 0), data), 3); - - return data != 0xff; -} - -static void rtw8852b_efuse_parsing_gain_offset(struct rtw89_dev *rtwdev, - struct rtw8852b_efuse *map) -{ - struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain; - bool valid = false; - - valid |= _decode_efuse_gain(map->rx_gain_2g_cck, - &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_2G_CCK], - &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_2G_CCK]); - valid |= _decode_efuse_gain(map->rx_gain_2g_ofdm, - &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_2G_OFDM], - &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_2G_OFDM]); - valid |= _decode_efuse_gain(map->rx_gain_5g_low, - &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_LOW], - &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_LOW]); - valid |= _decode_efuse_gain(map->rx_gain_5g_mid, - &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_MID], - &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_MID]); - valid |= _decode_efuse_gain(map->rx_gain_5g_high, - &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_HIGH], - &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_HIGH]); - - gain->offset_valid = valid; -} - -static int rtw8852b_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map, - enum rtw89_efuse_block block) -{ - struct rtw89_efuse *efuse = &rtwdev->efuse; - struct rtw8852b_efuse *map; - - map = (struct rtw8852b_efuse *)log_map; - - efuse->country_code[0] = map->country_code[0]; - efuse->country_code[1] = map->country_code[1]; - rtw8852b_efuse_parsing_tssi(rtwdev, map); - rtw8852b_efuse_parsing_gain_offset(rtwdev, map); - - switch (rtwdev->hci.type) { - case RTW89_HCI_TYPE_PCIE: - rtw8852be_efuse_parsing(efuse, map); - break; - default: - return -EOPNOTSUPP; - } - - rtw89_info(rtwdev, "chip rfe_type is %d\n", efuse->rfe_type); - - return 0; -} - -static void rtw8852b_phycap_parsing_power_cal(struct rtw89_dev *rtwdev, u8 *phycap_map) -{ -#define PWR_K_CHK_OFFSET 0x5E9 -#define PWR_K_CHK_VALUE 0xAA - u32 offset = PWR_K_CHK_OFFSET - rtwdev->chip->phycap_addr; - - if (phycap_map[offset] == PWR_K_CHK_VALUE) - rtwdev->efuse.power_k_valid = true; -} - -static void rtw8852b_phycap_parsing_tssi(struct rtw89_dev *rtwdev, u8 *phycap_map) -{ - struct rtw89_tssi_info *tssi = &rtwdev->tssi; - static const u32 tssi_trim_addr[RF_PATH_NUM_8852B] = {0x5D6, 0x5AB}; - u32 addr = rtwdev->chip->phycap_addr; - bool pg = false; - u32 ofst; - u8 i, j; - - for (i = 0; i < RF_PATH_NUM_8852B; i++) { - for (j = 0; j < TSSI_TRIM_CH_GROUP_NUM; j++) { - /* addrs are in decreasing order */ - ofst = tssi_trim_addr[i] - addr - j; - tssi->tssi_trim[i][j] = phycap_map[ofst]; - - if (phycap_map[ofst] != 0xff) - pg = true; - } - } - - if (!pg) { - memset(tssi->tssi_trim, 0, sizeof(tssi->tssi_trim)); - rtw89_debug(rtwdev, RTW89_DBG_TSSI, - "[TSSI][TRIM] no PG, set all trim info to 0\n"); - } - - for (i = 0; i < RF_PATH_NUM_8852B; i++) - for (j = 0; j < TSSI_TRIM_CH_GROUP_NUM; j++) - rtw89_debug(rtwdev, RTW89_DBG_TSSI, - "[TSSI] path=%d idx=%d trim=0x%x addr=0x%x\n", - i, j, tssi->tssi_trim[i][j], - tssi_trim_addr[i] - j); -} - -static void rtw8852b_phycap_parsing_thermal_trim(struct rtw89_dev *rtwdev, - u8 *phycap_map) -{ - struct rtw89_power_trim_info *info = &rtwdev->pwr_trim; - static const u32 thm_trim_addr[RF_PATH_NUM_8852B] = {0x5DF, 0x5DC}; - u32 addr = rtwdev->chip->phycap_addr; - u8 i; - - for (i = 0; i < RF_PATH_NUM_8852B; i++) { - info->thermal_trim[i] = phycap_map[thm_trim_addr[i] - addr]; - - rtw89_debug(rtwdev, RTW89_DBG_RFK, - "[THERMAL][TRIM] path=%d thermal_trim=0x%x\n", - i, info->thermal_trim[i]); - - if (info->thermal_trim[i] != 0xff) - info->pg_thermal_trim = true; - } -} - -static void rtw8852b_thermal_trim(struct rtw89_dev *rtwdev) -{ -#define __thm_setting(raw) \ -({ \ - u8 __v = (raw); \ - ((__v & 0x1) << 3) | ((__v & 0x1f) >> 1); \ -}) - struct rtw89_power_trim_info *info = &rtwdev->pwr_trim; - u8 i, val; - - if (!info->pg_thermal_trim) { - rtw89_debug(rtwdev, RTW89_DBG_RFK, - "[THERMAL][TRIM] no PG, do nothing\n"); - - return; - } - - for (i = 0; i < RF_PATH_NUM_8852B; i++) { - val = __thm_setting(info->thermal_trim[i]); - rtw89_write_rf(rtwdev, i, RR_TM2, RR_TM2_OFF, val); - - rtw89_debug(rtwdev, RTW89_DBG_RFK, - "[THERMAL][TRIM] path=%d thermal_setting=0x%x\n", - i, val); - } -#undef __thm_setting -} - -static void rtw8852b_phycap_parsing_pa_bias_trim(struct rtw89_dev *rtwdev, - u8 *phycap_map) -{ - struct rtw89_power_trim_info *info = &rtwdev->pwr_trim; - static const u32 pabias_trim_addr[RF_PATH_NUM_8852B] = {0x5DE, 0x5DB}; - u32 addr = rtwdev->chip->phycap_addr; - u8 i; - - for (i = 0; i < RF_PATH_NUM_8852B; i++) { - info->pa_bias_trim[i] = phycap_map[pabias_trim_addr[i] - addr]; - - rtw89_debug(rtwdev, RTW89_DBG_RFK, - "[PA_BIAS][TRIM] path=%d pa_bias_trim=0x%x\n", - i, info->pa_bias_trim[i]); - - if (info->pa_bias_trim[i] != 0xff) - info->pg_pa_bias_trim = true; - } -} - -static void rtw8852b_pa_bias_trim(struct rtw89_dev *rtwdev) -{ - struct rtw89_power_trim_info *info = &rtwdev->pwr_trim; - u8 pabias_2g, pabias_5g; - u8 i; - - if (!info->pg_pa_bias_trim) { - rtw89_debug(rtwdev, RTW89_DBG_RFK, - "[PA_BIAS][TRIM] no PG, do nothing\n"); - - return; - } - - for (i = 0; i < RF_PATH_NUM_8852B; i++) { - pabias_2g = FIELD_GET(GENMASK(3, 0), info->pa_bias_trim[i]); - pabias_5g = FIELD_GET(GENMASK(7, 4), info->pa_bias_trim[i]); - - rtw89_debug(rtwdev, RTW89_DBG_RFK, - "[PA_BIAS][TRIM] path=%d 2G=0x%x 5G=0x%x\n", - i, pabias_2g, pabias_5g); - - rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASA_TXG, pabias_2g); - rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASA_TXA, pabias_5g); - } -} - -static void rtw8852b_phycap_parsing_gain_comp(struct rtw89_dev *rtwdev, u8 *phycap_map) -{ - static const u32 comp_addrs[][RTW89_SUBBAND_2GHZ_5GHZ_NR] = { - {0x5BB, 0x5BA, 0, 0x5B9, 0x5B8}, - {0x590, 0x58F, 0, 0x58E, 0x58D}, - }; - struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain; - u32 phycap_addr = rtwdev->chip->phycap_addr; - bool valid = false; - int path, i; - u8 data; - - for (path = 0; path < 2; path++) - for (i = 0; i < RTW89_SUBBAND_2GHZ_5GHZ_NR; i++) { - if (comp_addrs[path][i] == 0) - continue; - - data = phycap_map[comp_addrs[path][i] - phycap_addr]; - valid |= _decode_efuse_gain(data, NULL, - &gain->comp[path][i]); - } - - gain->comp_valid = valid; -} - -static int rtw8852b_read_phycap(struct rtw89_dev *rtwdev, u8 *phycap_map) -{ - rtw8852b_phycap_parsing_power_cal(rtwdev, phycap_map); - rtw8852b_phycap_parsing_tssi(rtwdev, phycap_map); - rtw8852b_phycap_parsing_thermal_trim(rtwdev, phycap_map); - rtw8852b_phycap_parsing_pa_bias_trim(rtwdev, phycap_map); - rtw8852b_phycap_parsing_gain_comp(rtwdev, phycap_map); - - return 0; -} - -static void rtw8852b_power_trim(struct rtw89_dev *rtwdev) -{ - rtw8852b_thermal_trim(rtwdev); - rtw8852b_pa_bias_trim(rtwdev); -} - -static void rtw8852b_set_channel_mac(struct rtw89_dev *rtwdev, - const struct rtw89_chan *chan, - u8 mac_idx) -{ - u32 rf_mod = rtw89_mac_reg_by_idx(rtwdev, R_AX_WMAC_RFMOD, mac_idx); - u32 sub_carr = rtw89_mac_reg_by_idx(rtwdev, R_AX_TX_SUB_CARRIER_VALUE, mac_idx); - u32 chk_rate = rtw89_mac_reg_by_idx(rtwdev, R_AX_TXRATE_CHK, mac_idx); - u8 txsc20 = 0, txsc40 = 0; - - switch (chan->band_width) { - case RTW89_CHANNEL_WIDTH_80: - txsc40 = rtw89_phy_get_txsc(rtwdev, chan, RTW89_CHANNEL_WIDTH_40); - fallthrough; - case RTW89_CHANNEL_WIDTH_40: - txsc20 = rtw89_phy_get_txsc(rtwdev, chan, RTW89_CHANNEL_WIDTH_20); - break; - default: - break; - } - - switch (chan->band_width) { - case RTW89_CHANNEL_WIDTH_80: - rtw89_write8_mask(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK, BIT(1)); - rtw89_write32(rtwdev, sub_carr, txsc20 | (txsc40 << 4)); - break; - case RTW89_CHANNEL_WIDTH_40: - rtw89_write8_mask(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK, BIT(0)); - rtw89_write32(rtwdev, sub_carr, txsc20); - break; - case RTW89_CHANNEL_WIDTH_20: - rtw89_write8_clr(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK); - rtw89_write32(rtwdev, sub_carr, 0); - break; - default: - break; - } - - if (chan->channel > 14) { - rtw89_write8_clr(rtwdev, chk_rate, B_AX_BAND_MODE); - rtw89_write8_set(rtwdev, chk_rate, - B_AX_CHECK_CCK_EN | B_AX_RTS_LIMIT_IN_OFDM6); - } else { - rtw89_write8_set(rtwdev, chk_rate, B_AX_BAND_MODE); - rtw89_write8_clr(rtwdev, chk_rate, - B_AX_CHECK_CCK_EN | B_AX_RTS_LIMIT_IN_OFDM6); - } -} - -static const u32 rtw8852b_sco_barker_threshold[14] = { - 0x1cfea, 0x1d0e1, 0x1d1d7, 0x1d2cd, 0x1d3c3, 0x1d4b9, 0x1d5b0, 0x1d6a6, - 0x1d79c, 0x1d892, 0x1d988, 0x1da7f, 0x1db75, 0x1ddc4 -}; - -static const u32 rtw8852b_sco_cck_threshold[14] = { - 0x27de3, 0x27f35, 0x28088, 0x281da, 0x2832d, 0x2847f, 0x285d2, 0x28724, - 0x28877, 0x289c9, 0x28b1c, 0x28c6e, 0x28dc1, 0x290ed -}; - -static void rtw8852b_ctrl_sco_cck(struct rtw89_dev *rtwdev, u8 primary_ch) -{ - u8 ch_element = primary_ch - 1; - - rtw89_phy_write32_mask(rtwdev, R_RXSCOBC, B_RXSCOBC_TH, - rtw8852b_sco_barker_threshold[ch_element]); - rtw89_phy_write32_mask(rtwdev, R_RXSCOCCK, B_RXSCOCCK_TH, - rtw8852b_sco_cck_threshold[ch_element]); -} - -static u8 rtw8852b_sco_mapping(u8 central_ch) -{ - if (central_ch == 1) - return 109; - else if (central_ch >= 2 && central_ch <= 6) - return 108; - else if (central_ch >= 7 && central_ch <= 10) - return 107; - else if (central_ch >= 11 && central_ch <= 14) - return 106; - else if (central_ch == 36 || central_ch == 38) - return 51; - else if (central_ch >= 40 && central_ch <= 58) - return 50; - else if (central_ch >= 60 && central_ch <= 64) - return 49; - else if (central_ch == 100 || central_ch == 102) - return 48; - else if (central_ch >= 104 && central_ch <= 126) - return 47; - else if (central_ch >= 128 && central_ch <= 151) - return 46; - else if (central_ch >= 153 && central_ch <= 177) - return 45; - else - return 0; -} - -struct rtw8852b_bb_gain { - u32 gain_g[BB_PATH_NUM_8852B]; - u32 gain_a[BB_PATH_NUM_8852B]; - u32 gain_mask; -}; - -static const struct rtw8852b_bb_gain bb_gain_lna[LNA_GAIN_NUM] = { - { .gain_g = {0x4678, 0x475C}, .gain_a = {0x45DC, 0x4740}, - .gain_mask = 0x00ff0000 }, - { .gain_g = {0x4678, 0x475C}, .gain_a = {0x45DC, 0x4740}, - .gain_mask = 0xff000000 }, - { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744}, - .gain_mask = 0x000000ff }, - { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744}, - .gain_mask = 0x0000ff00 }, - { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744}, - .gain_mask = 0x00ff0000 }, - { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744}, - .gain_mask = 0xff000000 }, - { .gain_g = {0x4680, 0x4764}, .gain_a = {0x4664, 0x4748}, - .gain_mask = 0x000000ff }, -}; - -static const struct rtw8852b_bb_gain bb_gain_tia[TIA_GAIN_NUM] = { - { .gain_g = {0x4680, 0x4764}, .gain_a = {0x4664, 0x4748}, - .gain_mask = 0x00ff0000 }, - { .gain_g = {0x4680, 0x4764}, .gain_a = {0x4664, 0x4748}, - .gain_mask = 0xff000000 }, -}; - -static void rtw8852b_set_gain_error(struct rtw89_dev *rtwdev, - enum rtw89_subband subband, - enum rtw89_rf_path path) -{ - const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax; - u8 gain_band = rtw89_subband_to_bb_gain_band(subband); - s32 val; - u32 reg; - u32 mask; - int i; - - for (i = 0; i < LNA_GAIN_NUM; i++) { - if (subband == RTW89_CH_2G) - reg = bb_gain_lna[i].gain_g[path]; - else - reg = bb_gain_lna[i].gain_a[path]; - - mask = bb_gain_lna[i].gain_mask; - val = gain->lna_gain[gain_band][path][i]; - rtw89_phy_write32_mask(rtwdev, reg, mask, val); - } - - for (i = 0; i < TIA_GAIN_NUM; i++) { - if (subband == RTW89_CH_2G) - reg = bb_gain_tia[i].gain_g[path]; - else - reg = bb_gain_tia[i].gain_a[path]; - - mask = bb_gain_tia[i].gain_mask; - val = gain->tia_gain[gain_band][path][i]; - rtw89_phy_write32_mask(rtwdev, reg, mask, val); - } -} - -static void rtw8852b_set_gain_offset(struct rtw89_dev *rtwdev, - enum rtw89_subband subband, - enum rtw89_phy_idx phy_idx) -{ - static const u32 gain_err_addr[2] = {R_P0_AGC_RSVD, R_P1_AGC_RSVD}; - static const u32 rssi_ofst_addr[2] = {R_PATH0_G_TIA1_LNA6_OP1DB_V1, - R_PATH1_G_TIA1_LNA6_OP1DB_V1}; - struct rtw89_hal *hal = &rtwdev->hal; - struct rtw89_phy_efuse_gain *efuse_gain = &rtwdev->efuse_gain; - enum rtw89_gain_offset gain_ofdm_band; - s32 offset_a, offset_b; - s32 offset_ofdm, offset_cck; - s32 tmp; - u8 path; - - if (!efuse_gain->comp_valid) - goto next; - - for (path = RF_PATH_A; path < BB_PATH_NUM_8852B; path++) { - tmp = efuse_gain->comp[path][subband]; - tmp = clamp_t(s32, tmp << 2, S8_MIN, S8_MAX); - rtw89_phy_write32_mask(rtwdev, gain_err_addr[path], MASKBYTE0, tmp); - } - -next: - if (!efuse_gain->offset_valid) - return; - - gain_ofdm_band = rtw89_subband_to_gain_offset_band_of_ofdm(subband); - - offset_a = -efuse_gain->offset[RF_PATH_A][gain_ofdm_band]; - offset_b = -efuse_gain->offset[RF_PATH_B][gain_ofdm_band]; - - tmp = -((offset_a << 2) + (efuse_gain->offset_base[RTW89_PHY_0] >> 2)); - tmp = clamp_t(s32, tmp, S8_MIN, S8_MAX); - rtw89_phy_write32_mask(rtwdev, rssi_ofst_addr[RF_PATH_A], B_PATH0_R_G_OFST_MASK, tmp); - - tmp = -((offset_b << 2) + (efuse_gain->offset_base[RTW89_PHY_0] >> 2)); - tmp = clamp_t(s32, tmp, S8_MIN, S8_MAX); - rtw89_phy_write32_mask(rtwdev, rssi_ofst_addr[RF_PATH_B], B_PATH0_R_G_OFST_MASK, tmp); - - if (hal->antenna_rx == RF_B) { - offset_ofdm = -efuse_gain->offset[RF_PATH_B][gain_ofdm_band]; - offset_cck = -efuse_gain->offset[RF_PATH_B][0]; - } else { - offset_ofdm = -efuse_gain->offset[RF_PATH_A][gain_ofdm_band]; - offset_cck = -efuse_gain->offset[RF_PATH_A][0]; - } - - tmp = (offset_ofdm << 4) + efuse_gain->offset_base[RTW89_PHY_0]; - tmp = clamp_t(s32, tmp, S8_MIN, S8_MAX); - rtw89_phy_write32_idx(rtwdev, R_P0_RPL1, B_P0_RPL1_BIAS_MASK, tmp, phy_idx); - - tmp = (offset_ofdm << 4) + efuse_gain->rssi_base[RTW89_PHY_0]; - tmp = clamp_t(s32, tmp, S8_MIN, S8_MAX); - rtw89_phy_write32_idx(rtwdev, R_P1_RPL1, B_P0_RPL1_BIAS_MASK, tmp, phy_idx); - - if (subband == RTW89_CH_2G) { - tmp = (offset_cck << 3) + (efuse_gain->offset_base[RTW89_PHY_0] >> 1); - tmp = clamp_t(s32, tmp, S8_MIN >> 1, S8_MAX >> 1); - rtw89_phy_write32_mask(rtwdev, R_RX_RPL_OFST, - B_RX_RPL_OFST_CCK_MASK, tmp); - } -} - -static -void rtw8852b_set_rxsc_rpl_comp(struct rtw89_dev *rtwdev, enum rtw89_subband subband) -{ - const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax; - u8 band = rtw89_subband_to_bb_gain_band(subband); - u32 val; - - val = FIELD_PREP(B_P0_RPL1_20_MASK, (gain->rpl_ofst_20[band][RF_PATH_A] + - gain->rpl_ofst_20[band][RF_PATH_B]) / 2) | - FIELD_PREP(B_P0_RPL1_40_MASK, (gain->rpl_ofst_40[band][RF_PATH_A][0] + - gain->rpl_ofst_40[band][RF_PATH_B][0]) / 2) | - FIELD_PREP(B_P0_RPL1_41_MASK, (gain->rpl_ofst_40[band][RF_PATH_A][1] + - gain->rpl_ofst_40[band][RF_PATH_B][1]) / 2); - val >>= B_P0_RPL1_SHIFT; - rtw89_phy_write32_mask(rtwdev, R_P0_RPL1, B_P0_RPL1_MASK, val); - rtw89_phy_write32_mask(rtwdev, R_P1_RPL1, B_P0_RPL1_MASK, val); - - val = FIELD_PREP(B_P0_RTL2_42_MASK, (gain->rpl_ofst_40[band][RF_PATH_A][2] + - gain->rpl_ofst_40[band][RF_PATH_B][2]) / 2) | - FIELD_PREP(B_P0_RTL2_80_MASK, (gain->rpl_ofst_80[band][RF_PATH_A][0] + - gain->rpl_ofst_80[band][RF_PATH_B][0]) / 2) | - FIELD_PREP(B_P0_RTL2_81_MASK, (gain->rpl_ofst_80[band][RF_PATH_A][1] + - gain->rpl_ofst_80[band][RF_PATH_B][1]) / 2) | - FIELD_PREP(B_P0_RTL2_8A_MASK, (gain->rpl_ofst_80[band][RF_PATH_A][10] + - gain->rpl_ofst_80[band][RF_PATH_B][10]) / 2); - rtw89_phy_write32(rtwdev, R_P0_RPL2, val); - rtw89_phy_write32(rtwdev, R_P1_RPL2, val); - - val = FIELD_PREP(B_P0_RTL3_82_MASK, (gain->rpl_ofst_80[band][RF_PATH_A][2] + - gain->rpl_ofst_80[band][RF_PATH_B][2]) / 2) | - FIELD_PREP(B_P0_RTL3_83_MASK, (gain->rpl_ofst_80[band][RF_PATH_A][3] + - gain->rpl_ofst_80[band][RF_PATH_B][3]) / 2) | - FIELD_PREP(B_P0_RTL3_84_MASK, (gain->rpl_ofst_80[band][RF_PATH_A][4] + - gain->rpl_ofst_80[band][RF_PATH_B][4]) / 2) | - FIELD_PREP(B_P0_RTL3_89_MASK, (gain->rpl_ofst_80[band][RF_PATH_A][9] + - gain->rpl_ofst_80[band][RF_PATH_B][9]) / 2); - rtw89_phy_write32(rtwdev, R_P0_RPL3, val); - rtw89_phy_write32(rtwdev, R_P1_RPL3, val); -} - -static void rtw8852b_ctrl_ch(struct rtw89_dev *rtwdev, - const struct rtw89_chan *chan, - enum rtw89_phy_idx phy_idx) -{ - u8 central_ch = chan->channel; - u8 subband = chan->subband_type; - u8 sco_comp; - bool is_2g = central_ch <= 14; - - /* Path A */ - if (is_2g) - rtw89_phy_write32_idx(rtwdev, R_PATH0_BAND_SEL_V1, - B_PATH0_BAND_SEL_MSK_V1, 1, phy_idx); - else - rtw89_phy_write32_idx(rtwdev, R_PATH0_BAND_SEL_V1, - B_PATH0_BAND_SEL_MSK_V1, 0, phy_idx); - - /* Path B */ - if (is_2g) - rtw89_phy_write32_idx(rtwdev, R_PATH1_BAND_SEL_V1, - B_PATH1_BAND_SEL_MSK_V1, 1, phy_idx); - else - rtw89_phy_write32_idx(rtwdev, R_PATH1_BAND_SEL_V1, - B_PATH1_BAND_SEL_MSK_V1, 0, phy_idx); - - /* SCO compensate FC setting */ - sco_comp = rtw8852b_sco_mapping(central_ch); - rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_INV, sco_comp, phy_idx); - - if (chan->band_type == RTW89_BAND_6G) - return; - - /* CCK parameters */ - if (central_ch == 14) { - rtw89_phy_write32_mask(rtwdev, R_TXFIR0, B_TXFIR_C01, 0x3b13ff); - rtw89_phy_write32_mask(rtwdev, R_TXFIR2, B_TXFIR_C23, 0x1c42de); - rtw89_phy_write32_mask(rtwdev, R_TXFIR4, B_TXFIR_C45, 0xfdb0ad); - rtw89_phy_write32_mask(rtwdev, R_TXFIR6, B_TXFIR_C67, 0xf60f6e); - rtw89_phy_write32_mask(rtwdev, R_TXFIR8, B_TXFIR_C89, 0xfd8f92); - rtw89_phy_write32_mask(rtwdev, R_TXFIRA, B_TXFIR_CAB, 0x2d011); - rtw89_phy_write32_mask(rtwdev, R_TXFIRC, B_TXFIR_CCD, 0x1c02c); - rtw89_phy_write32_mask(rtwdev, R_TXFIRE, B_TXFIR_CEF, 0xfff00a); - } else { - rtw89_phy_write32_mask(rtwdev, R_TXFIR0, B_TXFIR_C01, 0x3d23ff); - rtw89_phy_write32_mask(rtwdev, R_TXFIR2, B_TXFIR_C23, 0x29b354); - rtw89_phy_write32_mask(rtwdev, R_TXFIR4, B_TXFIR_C45, 0xfc1c8); - rtw89_phy_write32_mask(rtwdev, R_TXFIR6, B_TXFIR_C67, 0xfdb053); - rtw89_phy_write32_mask(rtwdev, R_TXFIR8, B_TXFIR_C89, 0xf86f9a); - rtw89_phy_write32_mask(rtwdev, R_TXFIRA, B_TXFIR_CAB, 0xfaef92); - rtw89_phy_write32_mask(rtwdev, R_TXFIRC, B_TXFIR_CCD, 0xfe5fcc); - rtw89_phy_write32_mask(rtwdev, R_TXFIRE, B_TXFIR_CEF, 0xffdff5); - } - - rtw8852b_set_gain_error(rtwdev, subband, RF_PATH_A); - rtw8852b_set_gain_error(rtwdev, subband, RF_PATH_B); - rtw8852b_set_gain_offset(rtwdev, subband, phy_idx); - rtw8852b_set_rxsc_rpl_comp(rtwdev, subband); -} - -static void rtw8852b_bw_setting(struct rtw89_dev *rtwdev, u8 bw, u8 path) -{ - static const u32 adc_sel[2] = {0xC0EC, 0xC1EC}; - static const u32 wbadc_sel[2] = {0xC0E4, 0xC1E4}; - - switch (bw) { - case RTW89_CHANNEL_WIDTH_5: - rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x1); - rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x0); - break; - case RTW89_CHANNEL_WIDTH_10: - rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x2); - rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x1); - break; - case RTW89_CHANNEL_WIDTH_20: - rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x0); - rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x2); - break; - case RTW89_CHANNEL_WIDTH_40: - rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x0); - rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x2); - break; - case RTW89_CHANNEL_WIDTH_80: - rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x0); - rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x2); - break; - default: - rtw89_warn(rtwdev, "Fail to set ADC\n"); - } -} - -static void rtw8852b_ctrl_bw(struct rtw89_dev *rtwdev, u8 pri_ch, u8 bw, - enum rtw89_phy_idx phy_idx) -{ - u32 rx_path_0; - - switch (bw) { - case RTW89_CHANNEL_WIDTH_5: - rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x0, phy_idx); - rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x1, phy_idx); - rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH, 0x0, phy_idx); - - /*Set RF mode at 3 */ - rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX, - B_P0_RFMODE_ORI_RX_ALL, 0x333, phy_idx); - rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX, - B_P1_RFMODE_ORI_RX_ALL, 0x333, phy_idx); - break; - case RTW89_CHANNEL_WIDTH_10: - rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x0, phy_idx); - rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x2, phy_idx); - rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH, 0x0, phy_idx); - - /*Set RF mode at 3 */ - rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX, - B_P0_RFMODE_ORI_RX_ALL, 0x333, phy_idx); - rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX, - B_P1_RFMODE_ORI_RX_ALL, 0x333, phy_idx); - break; - case RTW89_CHANNEL_WIDTH_20: - rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x0, phy_idx); - rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x0, phy_idx); - rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH, 0x0, phy_idx); - - /*Set RF mode at 3 */ - rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX, - B_P0_RFMODE_ORI_RX_ALL, 0x333, phy_idx); - rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX, - B_P1_RFMODE_ORI_RX_ALL, 0x333, phy_idx); - break; - case RTW89_CHANNEL_WIDTH_40: - rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x1, phy_idx); - rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x0, phy_idx); - rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH, - pri_ch, phy_idx); - - /*Set RF mode at 3 */ - rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX, - B_P0_RFMODE_ORI_RX_ALL, 0x333, phy_idx); - rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX, - B_P1_RFMODE_ORI_RX_ALL, 0x333, phy_idx); - /*CCK primary channel */ - if (pri_ch == RTW89_SC_20_UPPER) - rtw89_phy_write32_mask(rtwdev, R_RXSC, B_RXSC_EN, 1); - else - rtw89_phy_write32_mask(rtwdev, R_RXSC, B_RXSC_EN, 0); - - break; - case RTW89_CHANNEL_WIDTH_80: - rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x2, phy_idx); - rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x0, phy_idx); - rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH, - pri_ch, phy_idx); - - /*Set RF mode at A */ - rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX, - B_P0_RFMODE_ORI_RX_ALL, 0xaaa, phy_idx); - rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX, - B_P1_RFMODE_ORI_RX_ALL, 0xaaa, phy_idx); - break; - default: - rtw89_warn(rtwdev, "Fail to switch bw (bw:%d, pri ch:%d)\n", bw, - pri_ch); - } - - rtw8852b_bw_setting(rtwdev, bw, RF_PATH_A); - rtw8852b_bw_setting(rtwdev, bw, RF_PATH_B); - - rx_path_0 = rtw89_phy_read32_idx(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, - phy_idx); - if (rx_path_0 == 0x1) - rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX, - B_P1_RFMODE_ORI_RX_ALL, 0x111, phy_idx); - else if (rx_path_0 == 0x2) - rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX, - B_P0_RFMODE_ORI_RX_ALL, 0x111, phy_idx); -} - -static void rtw8852b_ctrl_cck_en(struct rtw89_dev *rtwdev, bool cck_en) -{ - if (cck_en) { - rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_ENABLE_CCK, 1); - rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0); - } else { - rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_ENABLE_CCK, 0); - rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 1); - } -} - -static void rtw8852b_5m_mask(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, - enum rtw89_phy_idx phy_idx) -{ - u8 pri_ch = chan->pri_ch_idx; - bool mask_5m_low; - bool mask_5m_en; - - switch (chan->band_width) { - case RTW89_CHANNEL_WIDTH_40: - /* Prich=1: Mask 5M High, Prich=2: Mask 5M Low */ - mask_5m_en = true; - mask_5m_low = pri_ch == RTW89_SC_20_LOWER; - break; - case RTW89_CHANNEL_WIDTH_80: - /* Prich=3: Mask 5M High, Prich=4: Mask 5M Low, Else: Disable */ - mask_5m_en = pri_ch == RTW89_SC_20_UPMOST || - pri_ch == RTW89_SC_20_LOWEST; - mask_5m_low = pri_ch == RTW89_SC_20_LOWEST; - break; - default: - mask_5m_en = false; - break; - } - - if (!mask_5m_en) { - rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_EN, 0x0); - rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_EN, 0x0); - rtw89_phy_write32_idx(rtwdev, R_ASSIGN_SBD_OPT_V1, - B_ASSIGN_SBD_OPT_EN_V1, 0x0, phy_idx); - return; - } - - if (mask_5m_low) { - rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_TH, 0x4); - rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_EN, 0x1); - rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_SB2, 0x0); - rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_SB0, 0x1); - rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_TH, 0x4); - rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_EN, 0x1); - rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_SB2, 0x0); - rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_SB0, 0x1); - } else { - rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_TH, 0x4); - rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_EN, 0x1); - rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_SB2, 0x1); - rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_SB0, 0x0); - rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_TH, 0x4); - rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_EN, 0x1); - rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_SB2, 0x1); - rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_SB0, 0x0); - } - rtw89_phy_write32_idx(rtwdev, R_ASSIGN_SBD_OPT_V1, - B_ASSIGN_SBD_OPT_EN_V1, 0x1, phy_idx); -} - -static void rtw8852b_bb_reset_all(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) -{ - rtw89_phy_write32_idx(rtwdev, R_S0_HW_SI_DIS, B_S0_HW_SI_DIS_W_R_TRIG, 0x7, phy_idx); - rtw89_phy_write32_idx(rtwdev, R_S1_HW_SI_DIS, B_S1_HW_SI_DIS_W_R_TRIG, 0x7, phy_idx); - fsleep(1); - rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, phy_idx); - rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 0, phy_idx); - rtw89_phy_write32_idx(rtwdev, R_S0_HW_SI_DIS, B_S0_HW_SI_DIS_W_R_TRIG, 0x0, phy_idx); - rtw89_phy_write32_idx(rtwdev, R_S1_HW_SI_DIS, B_S1_HW_SI_DIS_W_R_TRIG, 0x0, phy_idx); - rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, phy_idx); -} - static void rtw8852b_bb_reset_en(struct rtw89_dev *rtwdev, enum rtw89_band band, enum rtw89_phy_idx phy_idx, bool en) { @@ -1422,87 +466,20 @@ static void rtw8852b_bb_reset(struct rtw89_dev *rtwdev, rtw89_phy_write32_set(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_TRK_EN); rtw89_phy_write32_set(rtwdev, R_P1_TXPW_RSTB, B_P1_TXPW_RSTB_MANON); rtw89_phy_write32_set(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_TRK_EN); - rtw8852b_bb_reset_all(rtwdev, phy_idx); + rtw8852bx_bb_reset_all(rtwdev, phy_idx); rtw89_phy_write32_clr(rtwdev, R_P0_TXPW_RSTB, B_P0_TXPW_RSTB_MANON); rtw89_phy_write32_clr(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_TRK_EN); rtw89_phy_write32_clr(rtwdev, R_P1_TXPW_RSTB, B_P1_TXPW_RSTB_MANON); rtw89_phy_write32_clr(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_TRK_EN); } -static void rtw8852b_bb_macid_ctrl_init(struct rtw89_dev *rtwdev, - enum rtw89_phy_idx phy_idx) -{ - u32 addr; - - for (addr = R_AX_PWR_MACID_LMT_TABLE0; - addr <= R_AX_PWR_MACID_LMT_TABLE127; addr += 4) - rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, 0); -} - -static void rtw8852b_bb_sethw(struct rtw89_dev *rtwdev) -{ - struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain; - - rtw89_phy_write32_clr(rtwdev, R_P0_EN_SOUND_WO_NDP, B_P0_EN_SOUND_WO_NDP); - rtw89_phy_write32_clr(rtwdev, R_P1_EN_SOUND_WO_NDP, B_P1_EN_SOUND_WO_NDP); - - rtw8852b_bb_macid_ctrl_init(rtwdev, RTW89_PHY_0); - - /* read these registers after loading BB parameters */ - gain->offset_base[RTW89_PHY_0] = - rtw89_phy_read32_mask(rtwdev, R_P0_RPL1, B_P0_RPL1_BIAS_MASK); - gain->rssi_base[RTW89_PHY_0] = - rtw89_phy_read32_mask(rtwdev, R_P1_RPL1, B_P0_RPL1_BIAS_MASK); -} - -static void rtw8852b_bb_set_pop(struct rtw89_dev *rtwdev) -{ - if (rtwdev->hw->conf.flags & IEEE80211_CONF_MONITOR) - rtw89_phy_write32_clr(rtwdev, R_PKT_CTRL, B_PKT_POP_EN); -} - -static void rtw8852b_set_channel_bb(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, - enum rtw89_phy_idx phy_idx) -{ - bool cck_en = chan->channel <= 14; - u8 pri_ch_idx = chan->pri_ch_idx; - u8 band = chan->band_type, chan_idx; - - if (cck_en) - rtw8852b_ctrl_sco_cck(rtwdev, chan->primary_channel); - - rtw8852b_ctrl_ch(rtwdev, chan, phy_idx); - rtw8852b_ctrl_bw(rtwdev, pri_ch_idx, chan->band_width, phy_idx); - rtw8852b_ctrl_cck_en(rtwdev, cck_en); - if (chan->band_type == RTW89_BAND_5G) { - rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1, - B_PATH0_BT_SHARE_V1, 0x0); - rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG_PATH_V1, - B_PATH0_BTG_PATH_V1, 0x0); - rtw89_phy_write32_mask(rtwdev, R_PATH1_BT_SHARE_V1, - B_PATH1_BT_SHARE_V1, 0x0); - rtw89_phy_write32_mask(rtwdev, R_PATH1_BTG_PATH_V1, - B_PATH1_BTG_PATH_V1, 0x0); - rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_BT_SHARE, 0x0); - rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_BT_SEG0, 0x0); - rtw89_phy_write32_mask(rtwdev, R_BT_DYN_DC_EST_EN_V1, - B_BT_DYN_DC_EST_EN_MSK, 0x0); - rtw89_phy_write32_mask(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN, 0x0); - } - chan_idx = rtw89_encode_chan_idx(rtwdev, chan->primary_channel, band); - rtw89_phy_write32_mask(rtwdev, R_MAC_PIN_SEL, B_CH_IDX_SEG0, chan_idx); - rtw8852b_5m_mask(rtwdev, chan, phy_idx); - rtw8852b_bb_set_pop(rtwdev); - rtw8852b_bb_reset_all(rtwdev, phy_idx); -} - static void rtw8852b_set_channel(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, enum rtw89_mac_idx mac_idx, enum rtw89_phy_idx phy_idx) { - rtw8852b_set_channel_mac(rtwdev, chan, mac_idx); - rtw8852b_set_channel_bb(rtwdev, chan, phy_idx); + rtw8852bx_set_channel_mac(rtwdev, chan, mac_idx); + rtw8852bx_set_channel_bb(rtwdev, chan, phy_idx); rtw8852b_set_channel_rf(rtwdev, chan, phy_idx); } @@ -1602,540 +579,6 @@ static void rtw8852b_rfk_track(struct rtw89_dev *rtwdev) rtw8852b_dpk_track(rtwdev); } -static u32 rtw8852b_bb_cal_txpwr_ref(struct rtw89_dev *rtwdev, - enum rtw89_phy_idx phy_idx, s16 ref) -{ - const u16 tssi_16dbm_cw = 0x12c; - const u8 base_cw_0db = 0x27; - const s8 ofst_int = 0; - s16 pwr_s10_3; - s16 rf_pwr_cw; - u16 bb_pwr_cw; - u32 pwr_cw; - u32 tssi_ofst_cw; - - pwr_s10_3 = (ref << 1) + (s16)(ofst_int) + (s16)(base_cw_0db << 3); - bb_pwr_cw = FIELD_GET(GENMASK(2, 0), pwr_s10_3); - rf_pwr_cw = FIELD_GET(GENMASK(8, 3), pwr_s10_3); - rf_pwr_cw = clamp_t(s16, rf_pwr_cw, 15, 63); - pwr_cw = (rf_pwr_cw << 3) | bb_pwr_cw; - - tssi_ofst_cw = (u32)((s16)tssi_16dbm_cw + (ref << 1) - (16 << 3)); - rtw89_debug(rtwdev, RTW89_DBG_TXPWR, - "[TXPWR] tssi_ofst_cw=%d rf_cw=0x%x bb_cw=0x%x\n", - tssi_ofst_cw, rf_pwr_cw, bb_pwr_cw); - - return FIELD_PREP(B_DPD_TSSI_CW, tssi_ofst_cw) | - FIELD_PREP(B_DPD_PWR_CW, pwr_cw) | - FIELD_PREP(B_DPD_REF, ref); -} - -static void rtw8852b_set_txpwr_ref(struct rtw89_dev *rtwdev, - enum rtw89_phy_idx phy_idx) -{ - static const u32 addr[RF_PATH_NUM_8852B] = {0x5800, 0x7800}; - const u32 mask = B_DPD_TSSI_CW | B_DPD_PWR_CW | B_DPD_REF; - const u8 ofst_ofdm = 0x4; - const u8 ofst_cck = 0x8; - const s16 ref_ofdm = 0; - const s16 ref_cck = 0; - u32 val; - u8 i; - - rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr reference\n"); - - rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_CTRL, - B_AX_PWR_REF, 0x0); - - rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb ofdm txpwr ref\n"); - val = rtw8852b_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_ofdm); - - for (i = 0; i < RF_PATH_NUM_8852B; i++) - rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_ofdm, mask, val, - phy_idx); - - rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb cck txpwr ref\n"); - val = rtw8852b_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_cck); - - for (i = 0; i < RF_PATH_NUM_8852B; i++) - rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_cck, mask, val, - phy_idx); -} - -static void rtw8852b_bb_set_tx_shape_dfir(struct rtw89_dev *rtwdev, - const struct rtw89_chan *chan, - u8 tx_shape_idx, - enum rtw89_phy_idx phy_idx) -{ -#define __DFIR_CFG_ADDR(i) (R_TXFIR0 + ((i) << 2)) -#define __DFIR_CFG_MASK 0xffffffff -#define __DFIR_CFG_NR 8 -#define __DECL_DFIR_PARAM(_name, _val...) \ - static const u32 param_ ## _name[] = {_val}; \ - static_assert(ARRAY_SIZE(param_ ## _name) == __DFIR_CFG_NR) - - __DECL_DFIR_PARAM(flat, - 0x023D23FF, 0x0029B354, 0x000FC1C8, 0x00FDB053, - 0x00F86F9A, 0x06FAEF92, 0x00FE5FCC, 0x00FFDFF5); - __DECL_DFIR_PARAM(sharp, - 0x023D83FF, 0x002C636A, 0x0013F204, 0x00008090, - 0x00F87FB0, 0x06F99F83, 0x00FDBFBA, 0x00003FF5); - __DECL_DFIR_PARAM(sharp_14, - 0x023B13FF, 0x001C42DE, 0x00FDB0AD, 0x00F60F6E, - 0x00FD8F92, 0x0602D011, 0x0001C02C, 0x00FFF00A); - u8 ch = chan->channel; - const u32 *param; - u32 addr; - int i; - - if (ch > 14) { - rtw89_warn(rtwdev, - "set tx shape dfir by unknown ch: %d on 2G\n", ch); - return; - } - - if (ch == 14) - param = param_sharp_14; - else - param = tx_shape_idx == 0 ? param_flat : param_sharp; - - for (i = 0; i < __DFIR_CFG_NR; i++) { - addr = __DFIR_CFG_ADDR(i); - rtw89_debug(rtwdev, RTW89_DBG_TXPWR, - "set tx shape dfir: 0x%x: 0x%x\n", addr, param[i]); - rtw89_phy_write32_idx(rtwdev, addr, __DFIR_CFG_MASK, param[i], - phy_idx); - } - -#undef __DECL_DFIR_PARAM -#undef __DFIR_CFG_NR -#undef __DFIR_CFG_MASK -#undef __DECL_CFG_ADDR -} - -static void rtw8852b_set_tx_shape(struct rtw89_dev *rtwdev, - const struct rtw89_chan *chan, - enum rtw89_phy_idx phy_idx) -{ - const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms; - u8 band = chan->band_type; - u8 regd = rtw89_regd_get(rtwdev, band); - u8 tx_shape_cck = (*rfe_parms->tx_shape.lmt)[band][RTW89_RS_CCK][regd]; - u8 tx_shape_ofdm = (*rfe_parms->tx_shape.lmt)[band][RTW89_RS_OFDM][regd]; - - if (band == RTW89_BAND_2G) - rtw8852b_bb_set_tx_shape_dfir(rtwdev, chan, tx_shape_cck, phy_idx); - - rtw89_phy_write32_mask(rtwdev, R_DCFO_OPT, B_TXSHAPE_TRIANGULAR_CFG, - tx_shape_ofdm); -} - -static void rtw8852b_set_txpwr(struct rtw89_dev *rtwdev, - const struct rtw89_chan *chan, - enum rtw89_phy_idx phy_idx) -{ - rtw89_phy_set_txpwr_byrate(rtwdev, chan, phy_idx); - rtw89_phy_set_txpwr_offset(rtwdev, chan, phy_idx); - rtw8852b_set_tx_shape(rtwdev, chan, phy_idx); - rtw89_phy_set_txpwr_limit(rtwdev, chan, phy_idx); - rtw89_phy_set_txpwr_limit_ru(rtwdev, chan, phy_idx); -} - -static void rtw8852b_set_txpwr_ctrl(struct rtw89_dev *rtwdev, - enum rtw89_phy_idx phy_idx) -{ - rtw8852b_set_txpwr_ref(rtwdev, phy_idx); -} - -static -void rtw8852b_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev, - s8 pw_ofst, enum rtw89_mac_idx mac_idx) -{ - u32 reg; - - if (pw_ofst < -16 || pw_ofst > 15) { - rtw89_warn(rtwdev, "[ULTB] Err pwr_offset=%d\n", pw_ofst); - return; - } - - reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_CTRL, mac_idx); - rtw89_write32_set(rtwdev, reg, B_AX_PWR_UL_TB_CTRL_EN); - - reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_1T, mac_idx); - rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_1T_MASK, pw_ofst); - - pw_ofst = max_t(s8, pw_ofst - 3, -16); - reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_2T, mac_idx); - rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_2T_MASK, pw_ofst); -} - -static int -rtw8852b_init_txpwr_unit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) -{ - int ret; - - ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_UL_CTRL2, 0x07763333); - if (ret) - return ret; - - ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_COEXT_CTRL, 0x01ebf000); - if (ret) - return ret; - - ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_UL_CTRL0, 0x0002f8ff); - if (ret) - return ret; - - rtw8852b_set_txpwr_ul_tb_offset(rtwdev, 0, phy_idx == RTW89_PHY_1 ? - RTW89_MAC_1 : RTW89_MAC_0); - - return 0; -} - -void rtw8852b_bb_set_plcp_tx(struct rtw89_dev *rtwdev) -{ - const struct rtw89_reg3_def *def = rtw8852b_pmac_ht20_mcs7_tbl; - u8 i; - - for (i = 0; i < ARRAY_SIZE(rtw8852b_pmac_ht20_mcs7_tbl); i++, def++) - rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data); -} - -static void rtw8852b_stop_pmac_tx(struct rtw89_dev *rtwdev, - struct rtw8852b_bb_pmac_info *tx_info, - enum rtw89_phy_idx idx) -{ - rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC Stop Tx"); - if (tx_info->mode == CONT_TX) - rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_CTX_EN, 0, idx); - else if (tx_info->mode == PKTS_TX) - rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_PTX_EN, 0, idx); -} - -static void rtw8852b_start_pmac_tx(struct rtw89_dev *rtwdev, - struct rtw8852b_bb_pmac_info *tx_info, - enum rtw89_phy_idx idx) -{ - enum rtw8852b_pmac_mode mode = tx_info->mode; - u32 pkt_cnt = tx_info->tx_cnt; - u16 period = tx_info->period; - - if (mode == CONT_TX && !tx_info->is_cck) { - rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_CTX_EN, 1, idx); - rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC CTx Start"); - } else if (mode == PKTS_TX) { - rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_PTX_EN, 1, idx); - rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, - B_PMAC_TX_PRD_MSK, period, idx); - rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_CNT, B_PMAC_TX_CNT_MSK, - pkt_cnt, idx); - rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC PTx Start"); - } - - rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_CTRL, B_PMAC_TXEN_DIS, 1, idx); - rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_CTRL, B_PMAC_TXEN_DIS, 0, idx); -} - -void rtw8852b_bb_set_pmac_tx(struct rtw89_dev *rtwdev, - struct rtw8852b_bb_pmac_info *tx_info, - enum rtw89_phy_idx idx) -{ - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); - - if (!tx_info->en_pmac_tx) { - rtw8852b_stop_pmac_tx(rtwdev, tx_info, idx); - rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0, idx); - if (chan->band_type == RTW89_BAND_2G) - rtw89_phy_write32_clr(rtwdev, R_RXCCA, B_RXCCA_DIS); - return; - } - - rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC Tx Enable"); - - rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_TXEN, 1, idx); - rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_RXEN, 1, idx); - rtw89_phy_write32_idx(rtwdev, R_PMAC_RX_CFG1, B_PMAC_OPT1_MSK, 0x3f, idx); - rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 0, idx); - rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 1, idx); - rtw89_phy_write32_set(rtwdev, R_RXCCA, B_RXCCA_DIS); - rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, idx); - - rtw8852b_start_pmac_tx(rtwdev, tx_info, idx); -} - -void rtw8852b_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable, - u16 tx_cnt, u16 period, u16 tx_time, - enum rtw89_phy_idx idx) -{ - struct rtw8852b_bb_pmac_info tx_info = {0}; - - tx_info.en_pmac_tx = enable; - tx_info.is_cck = 0; - tx_info.mode = PKTS_TX; - tx_info.tx_cnt = tx_cnt; - tx_info.period = period; - tx_info.tx_time = tx_time; - - rtw8852b_bb_set_pmac_tx(rtwdev, &tx_info, idx); -} - -void rtw8852b_bb_set_power(struct rtw89_dev *rtwdev, s16 pwr_dbm, - enum rtw89_phy_idx idx) -{ - rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC CFG Tx PWR = %d", pwr_dbm); - - rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_PWR_EN, 1, idx); - rtw89_phy_write32_idx(rtwdev, R_TXPWR, B_TXPWR_MSK, pwr_dbm, idx); -} - -void rtw8852b_bb_cfg_tx_path(struct rtw89_dev *rtwdev, u8 tx_path) -{ - rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 7, RTW89_PHY_0); - - rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC CFG Tx Path = %d", tx_path); - - if (tx_path == RF_PATH_A) { - rtw89_phy_write32_mask(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, 1); - rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 0); - } else if (tx_path == RF_PATH_B) { - rtw89_phy_write32_mask(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, 2); - rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 0); - } else if (tx_path == RF_PATH_AB) { - rtw89_phy_write32_mask(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, 3); - rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 4); - } else { - rtw89_debug(rtwdev, RTW89_DBG_TSSI, "Error Tx Path"); - } -} - -void rtw8852b_bb_tx_mode_switch(struct rtw89_dev *rtwdev, - enum rtw89_phy_idx idx, u8 mode) -{ - if (mode != 0) - return; - - rtw89_debug(rtwdev, RTW89_DBG_TSSI, "Tx mode switch"); - - rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_TXEN, 0, idx); - rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_RXEN, 0, idx); - rtw89_phy_write32_idx(rtwdev, R_PMAC_RX_CFG1, B_PMAC_OPT1_MSK, 0, idx); - rtw89_phy_write32_idx(rtwdev, R_PMAC_RXMOD, B_PMAC_RXMOD_MSK, 0, idx); - rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_DPD_EN, 0, idx); - rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 0, idx); - rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_PWR_EN, 0, idx); -} - -void rtw8852b_bb_backup_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx, - struct rtw8852b_bb_tssi_bak *bak) -{ - s32 tmp; - - bak->tx_path = rtw89_phy_read32_idx(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, idx); - bak->rx_path = rtw89_phy_read32_idx(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, idx); - bak->p0_rfmode = rtw89_phy_read32_idx(rtwdev, R_P0_RFMODE, MASKDWORD, idx); - bak->p0_rfmode_ftm = rtw89_phy_read32_idx(rtwdev, R_P0_RFMODE_FTM_RX, MASKDWORD, idx); - bak->p1_rfmode = rtw89_phy_read32_idx(rtwdev, R_P1_RFMODE, MASKDWORD, idx); - bak->p1_rfmode_ftm = rtw89_phy_read32_idx(rtwdev, R_P1_RFMODE_FTM_RX, MASKDWORD, idx); - tmp = rtw89_phy_read32_idx(rtwdev, R_TXPWR, B_TXPWR_MSK, idx); - bak->tx_pwr = sign_extend32(tmp, 8); -} - -void rtw8852b_bb_restore_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx, - const struct rtw8852b_bb_tssi_bak *bak) -{ - rtw89_phy_write32_idx(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, bak->tx_path, idx); - if (bak->tx_path == RF_AB) - rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 0x4); - else - rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 0x0); - rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, bak->rx_path, idx); - rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_PWR_EN, 1, idx); - rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE, MASKDWORD, bak->p0_rfmode, idx); - rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_FTM_RX, MASKDWORD, bak->p0_rfmode_ftm, idx); - rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE, MASKDWORD, bak->p1_rfmode, idx); - rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_FTM_RX, MASKDWORD, bak->p1_rfmode_ftm, idx); - rtw89_phy_write32_idx(rtwdev, R_TXPWR, B_TXPWR_MSK, bak->tx_pwr, idx); -} - -static void rtw8852b_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en, - enum rtw89_phy_idx phy_idx) -{ - rtw89_phy_write_reg3_tbl(rtwdev, en ? &rtw8852b_btc_preagc_en_defs_tbl : - &rtw8852b_btc_preagc_dis_defs_tbl); -} - -static void rtw8852b_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en, - enum rtw89_phy_idx phy_idx) -{ - if (en) { - rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1, - B_PATH0_BT_SHARE_V1, 0x1); - rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG_PATH_V1, - B_PATH0_BTG_PATH_V1, 0x0); - rtw89_phy_write32_mask(rtwdev, R_PATH1_G_LNA6_OP1DB_V1, - B_PATH1_G_LNA6_OP1DB_V1, 0x20); - rtw89_phy_write32_mask(rtwdev, R_PATH1_G_TIA0_LNA6_OP1DB_V1, - B_PATH1_G_TIA0_LNA6_OP1DB_V1, 0x30); - rtw89_phy_write32_mask(rtwdev, R_PATH1_BT_SHARE_V1, - B_PATH1_BT_SHARE_V1, 0x1); - rtw89_phy_write32_mask(rtwdev, R_PATH1_BTG_PATH_V1, - B_PATH1_BTG_PATH_V1, 0x1); - rtw89_phy_write32_mask(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P1, 0x0); - rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_BT_SHARE, 0x1); - rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_BT_SEG0, 0x2); - rtw89_phy_write32_mask(rtwdev, R_BT_DYN_DC_EST_EN_V1, - B_BT_DYN_DC_EST_EN_MSK, 0x1); - rtw89_phy_write32_mask(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN, 0x1); - } else { - rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1, - B_PATH0_BT_SHARE_V1, 0x0); - rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG_PATH_V1, - B_PATH0_BTG_PATH_V1, 0x0); - rtw89_phy_write32_mask(rtwdev, R_PATH1_G_LNA6_OP1DB_V1, - B_PATH1_G_LNA6_OP1DB_V1, 0x1a); - rtw89_phy_write32_mask(rtwdev, R_PATH1_G_TIA0_LNA6_OP1DB_V1, - B_PATH1_G_TIA0_LNA6_OP1DB_V1, 0x2a); - rtw89_phy_write32_mask(rtwdev, R_PATH1_BT_SHARE_V1, - B_PATH1_BT_SHARE_V1, 0x0); - rtw89_phy_write32_mask(rtwdev, R_PATH1_BTG_PATH_V1, - B_PATH1_BTG_PATH_V1, 0x0); - rtw89_phy_write32_mask(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P1, 0xc); - rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_BT_SHARE, 0x0); - rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_BT_SEG0, 0x0); - rtw89_phy_write32_mask(rtwdev, R_BT_DYN_DC_EST_EN_V1, - B_BT_DYN_DC_EST_EN_MSK, 0x1); - rtw89_phy_write32_mask(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN, 0x0); - } -} - -void rtw8852b_bb_ctrl_rx_path(struct rtw89_dev *rtwdev, - enum rtw89_rf_path_bit rx_path) -{ - const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); - u32 rst_mask0; - u32 rst_mask1; - - if (rx_path == RF_A) { - rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, 1); - rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG0, 1); - rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG1, 1); - rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 0); - rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 0); - rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_USER_MAX, 4); - rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 0); - rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 0); - } else if (rx_path == RF_B) { - rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, 2); - rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG0, 2); - rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG1, 2); - rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 0); - rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 0); - rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_USER_MAX, 4); - rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 0); - rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 0); - } else if (rx_path == RF_AB) { - rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, 3); - rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG0, 3); - rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG1, 3); - rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 1); - rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 1); - rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_USER_MAX, 4); - rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 1); - rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 1); - } - - rtw8852b_set_gain_offset(rtwdev, chan->subband_type, RTW89_PHY_0); - - if (chan->band_type == RTW89_BAND_2G && - (rx_path == RF_B || rx_path == RF_AB)) - rtw8852b_ctrl_btg_bt_rx(rtwdev, true, RTW89_PHY_0); - else - rtw8852b_ctrl_btg_bt_rx(rtwdev, false, RTW89_PHY_0); - - rst_mask0 = B_P0_TXPW_RSTB_MANON | B_P0_TXPW_RSTB_TSSI; - rst_mask1 = B_P1_TXPW_RSTB_MANON | B_P1_TXPW_RSTB_TSSI; - if (rx_path == RF_A) { - rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, rst_mask0, 1); - rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, rst_mask0, 3); - } else { - rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, rst_mask1, 1); - rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, rst_mask1, 3); - } -} - -static void rtw8852b_bb_ctrl_rf_mode_rx_path(struct rtw89_dev *rtwdev, - enum rtw89_rf_path_bit rx_path) -{ - if (rx_path == RF_A) { - rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, - B_P0_RFMODE_ORI_TXRX_FTM_TX, 0x1233312); - rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE_FTM_RX, - B_P0_RFMODE_FTM_RX, 0x333); - rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE, - B_P1_RFMODE_ORI_TXRX_FTM_TX, 0x1111111); - rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE_FTM_RX, - B_P1_RFMODE_FTM_RX, 0x111); - } else if (rx_path == RF_B) { - rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, - B_P0_RFMODE_ORI_TXRX_FTM_TX, 0x1111111); - rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE_FTM_RX, - B_P0_RFMODE_FTM_RX, 0x111); - rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE, - B_P1_RFMODE_ORI_TXRX_FTM_TX, 0x1233312); - rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE_FTM_RX, - B_P1_RFMODE_FTM_RX, 0x333); - } else if (rx_path == RF_AB) { - rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, - B_P0_RFMODE_ORI_TXRX_FTM_TX, 0x1233312); - rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE_FTM_RX, - B_P0_RFMODE_FTM_RX, 0x333); - rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE, - B_P1_RFMODE_ORI_TXRX_FTM_TX, 0x1233312); - rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE_FTM_RX, - B_P1_RFMODE_FTM_RX, 0x333); - } -} - -static void rtw8852b_bb_cfg_txrx_path(struct rtw89_dev *rtwdev) -{ - struct rtw89_hal *hal = &rtwdev->hal; - enum rtw89_rf_path_bit rx_path = hal->antenna_rx ? hal->antenna_rx : RF_AB; - - rtw8852b_bb_ctrl_rx_path(rtwdev, rx_path); - rtw8852b_bb_ctrl_rf_mode_rx_path(rtwdev, rx_path); - - if (rtwdev->hal.rx_nss == 1) { - rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 0); - rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 0); - rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 0); - rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 0); - } else { - rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 1); - rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 1); - rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 1); - rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 1); - } - - rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 0x0, RTW89_PHY_0); -} - -static u8 rtw8852b_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path) -{ - if (rtwdev->is_tssi_mode[rf_path]) { - u32 addr = 0x1c10 + (rf_path << 13); - - return rtw89_phy_read32_mask(rtwdev, addr, 0x3F000000); - } - - rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x1); - rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x0); - rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x1); - - fsleep(200); - - return rtw89_read_rf(rtwdev, rf_path, RR_TM, RR_TM_VAL); -} - static void rtw8852b_btc_set_rfe(struct rtw89_dev *rtwdev) { const struct rtw89_btc_ver *ver = rtwdev->btc.ver; @@ -2190,86 +633,6 @@ static void rtw8852b_btc_set_rfe(struct rtw89_dev *rtwdev) } } -static -void rtw8852b_set_trx_mask(struct rtw89_dev *rtwdev, u8 path, u8 group, u32 val) -{ - rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x20000); - rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, group); - rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, val); - rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x0); -} - -static void rtw8852b_btc_init_cfg(struct rtw89_dev *rtwdev) -{ - struct rtw89_btc *btc = &rtwdev->btc; - const struct rtw89_chip_info *chip = rtwdev->chip; - const struct rtw89_mac_ax_coex coex_params = { - .pta_mode = RTW89_MAC_AX_COEX_RTK_MODE, - .direction = RTW89_MAC_AX_COEX_INNER, - }; - - /* PTA init */ - rtw89_mac_coex_init(rtwdev, &coex_params); - - /* set WL Tx response = Hi-Pri */ - chip->ops->btc_set_wl_pri(rtwdev, BTC_PRI_MASK_TX_RESP, true); - chip->ops->btc_set_wl_pri(rtwdev, BTC_PRI_MASK_BEACON, true); - - /* set rf gnt debug off */ - rtw89_write_rf(rtwdev, RF_PATH_A, RR_WLSEL, RFREG_MASK, 0x0); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_WLSEL, RFREG_MASK, 0x0); - - /* set WL Tx thru in TRX mask table if GNT_WL = 0 && BT_S1 = ss group */ - if (btc->ant_type == BTC_ANT_SHARED) { - rtw8852b_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_SS_GROUP, 0x5ff); - rtw8852b_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_SS_GROUP, 0x5ff); - /* set path-A(S0) Tx/Rx no-mask if GNT_WL=0 && BT_S1=tx group */ - rtw8852b_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_TX_GROUP, 0x5ff); - rtw8852b_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_TX_GROUP, 0x55f); - } else { /* set WL Tx stb if GNT_WL = 0 && BT_S1 = ss group for 3-ant */ - rtw8852b_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_SS_GROUP, 0x5df); - rtw8852b_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_SS_GROUP, 0x5df); - rtw8852b_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_TX_GROUP, 0x5ff); - rtw8852b_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_TX_GROUP, 0x5ff); - } - - /* set PTA break table */ - rtw89_write32(rtwdev, R_BTC_BREAK_TABLE, BTC_BREAK_PARAM); - - /* enable BT counter 0xda40[16,2] = 2b'11 */ - rtw89_write32_set(rtwdev, R_AX_CSR_MODE, B_AX_BT_CNT_RST | B_AX_STATIS_BT_EN); - btc->cx.wl.status.map.init_ok = true; -} - -static -void rtw8852b_btc_set_wl_pri(struct rtw89_dev *rtwdev, u8 map, bool state) -{ - u32 bitmap; - u32 reg; - - switch (map) { - case BTC_PRI_MASK_TX_RESP: - reg = R_BTC_BT_COEX_MSK_TABLE; - bitmap = B_BTC_PRI_MASK_TX_RESP_V1; - break; - case BTC_PRI_MASK_BEACON: - reg = R_AX_WL_PRI_MSK; - bitmap = B_AX_PTA_WL_PRI_MASK_BCNQ; - break; - case BTC_PRI_MASK_RX_CCK: - reg = R_BTC_BT_COEX_MSK_TABLE; - bitmap = B_BTC_PRI_MASK_RXCCK_V1; - break; - default: - return; - } - - if (state) - rtw89_write32_set(rtwdev, reg, bitmap); - else - rtw89_write32_clr(rtwdev, reg, bitmap); -} - union rtw8852b_btc_wl_txpwr_ctrl { u32 txpwr_val; struct { @@ -2337,186 +700,19 @@ do { \ #undef __write_ctrl } -static -s8 rtw8852b_btc_get_bt_rssi(struct rtw89_dev *rtwdev, s8 val) -{ - /* +6 for compensate offset */ - return clamp_t(s8, val + 6, -100, 0) + 100; -} - -static -void rtw8852b_btc_update_bt_cnt(struct rtw89_dev *rtwdev) -{ - /* Feature move to firmware */ -} - -static void rtw8852b_btc_wl_s1_standby(struct rtw89_dev *rtwdev, bool state) -{ - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x80000); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD1, RFREG_MASK, 0x31); - - /* set WL standby = Rx for GNT_BT_Tx = 1->0 settle issue */ - if (state) - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x179); - else - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x20); - - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0); -} - -static void rtw8852b_btc_set_wl_lna2(struct rtw89_dev *rtwdev, u8 level) -{ - switch (level) { - case 0: /* default */ - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x1000); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x0); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x17); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x2); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x3); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x17); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0); - break; - case 1: /* Fix LNA2=5 */ - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x1000); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x0); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x5); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x2); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x3); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x5); - rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0); - break; - } -} - -static void rtw8852b_btc_set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level) -{ - struct rtw89_btc *btc = &rtwdev->btc; - - switch (level) { - case 0: /* original */ - default: - rtw8852b_ctrl_nbtg_bt_tx(rtwdev, false, RTW89_PHY_0); - btc->dm.wl_lna2 = 0; - break; - case 1: /* for FDD free-run */ - rtw8852b_ctrl_nbtg_bt_tx(rtwdev, true, RTW89_PHY_0); - btc->dm.wl_lna2 = 0; - break; - case 2: /* for BTG Co-Rx*/ - rtw8852b_ctrl_nbtg_bt_tx(rtwdev, false, RTW89_PHY_0); - btc->dm.wl_lna2 = 1; - break; - } - - rtw8852b_btc_set_wl_lna2(rtwdev, btc->dm.wl_lna2); -} - -static void rtw8852b_fill_freq_with_ppdu(struct rtw89_dev *rtwdev, - struct rtw89_rx_phy_ppdu *phy_ppdu, - struct ieee80211_rx_status *status) -{ - u16 chan = phy_ppdu->chan_idx; - enum nl80211_band band; - u8 ch; - - if (chan == 0) - return; - - rtw89_decode_chan_idx(rtwdev, chan, &ch, &band); - status->freq = ieee80211_channel_to_frequency(ch, band); - status->band = band; -} - -static void rtw8852b_query_ppdu(struct rtw89_dev *rtwdev, - struct rtw89_rx_phy_ppdu *phy_ppdu, - struct ieee80211_rx_status *status) -{ - u8 path; - u8 *rx_power = phy_ppdu->rssi; - - status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A], rx_power[RF_PATH_B])); - for (path = 0; path < rtwdev->chip->rf_path_num; path++) { - status->chains |= BIT(path); - status->chain_signal[path] = RTW89_RSSI_RAW_TO_DBM(rx_power[path]); - } - if (phy_ppdu->valid) - rtw8852b_fill_freq_with_ppdu(rtwdev, phy_ppdu, status); -} - -static int rtw8852b_mac_enable_bb_rf(struct rtw89_dev *rtwdev) -{ - int ret; - - rtw89_write8_set(rtwdev, R_AX_SYS_FUNC_EN, - B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN); - rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_ON_CTRL0, B_AX_REG_ZCDC_H_MASK, 0x1); - rtw89_write32_set(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG); - rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG); - rtw89_write32_set(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG); - - ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, 0xC7, - FULL_BIT_MASK); - if (ret) - return ret; - - ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, 0xC7, - FULL_BIT_MASK); - if (ret) - return ret; - - rtw89_write8(rtwdev, R_AX_PHYREG_SET, PHYREG_SET_XYN_CYCLE); - - return 0; -} - -static int rtw8852b_mac_disable_bb_rf(struct rtw89_dev *rtwdev) -{ - u8 wl_rfc_s0; - u8 wl_rfc_s1; - int ret; - - rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG); - rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN, - B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN); - - ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, &wl_rfc_s0); - if (ret) - return ret; - wl_rfc_s0 &= ~XTAL_SI_RF00S_EN; - ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, wl_rfc_s0, - FULL_BIT_MASK); - if (ret) - return ret; - - ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, &wl_rfc_s1); - if (ret) - return ret; - wl_rfc_s1 &= ~XTAL_SI_RF10S_EN; - ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, wl_rfc_s1, - FULL_BIT_MASK); - return ret; -} - static const struct rtw89_chip_ops rtw8852b_chip_ops = { - .enable_bb_rf = rtw8852b_mac_enable_bb_rf, - .disable_bb_rf = rtw8852b_mac_disable_bb_rf, + .enable_bb_rf = rtw8852bx_mac_enable_bb_rf, + .disable_bb_rf = rtw8852bx_mac_disable_bb_rf, .bb_preinit = NULL, .bb_postinit = NULL, .bb_reset = rtw8852b_bb_reset, - .bb_sethw = rtw8852b_bb_sethw, + .bb_sethw = rtw8852bx_bb_sethw, .read_rf = rtw89_phy_read_rf_v1, .write_rf = rtw89_phy_write_rf_v1, .set_channel = rtw8852b_set_channel, .set_channel_help = rtw8852b_set_channel_help, - .read_efuse = rtw8852b_read_efuse, - .read_phycap = rtw8852b_read_phycap, + .read_efuse = rtw8852bx_read_efuse, + .read_phycap = rtw8852bx_read_phycap, .fem_setup = NULL, .rfe_gpio = NULL, .rfk_hw_init = NULL, @@ -2526,16 +722,16 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = { .rfk_band_changed = rtw8852b_rfk_band_changed, .rfk_scan = rtw8852b_rfk_scan, .rfk_track = rtw8852b_rfk_track, - .power_trim = rtw8852b_power_trim, - .set_txpwr = rtw8852b_set_txpwr, - .set_txpwr_ctrl = rtw8852b_set_txpwr_ctrl, - .init_txpwr_unit = rtw8852b_init_txpwr_unit, - .get_thermal = rtw8852b_get_thermal, - .ctrl_btg_bt_rx = rtw8852b_ctrl_btg_bt_rx, - .query_ppdu = rtw8852b_query_ppdu, - .ctrl_nbtg_bt_tx = rtw8852b_ctrl_nbtg_bt_tx, - .cfg_txrx_path = rtw8852b_bb_cfg_txrx_path, - .set_txpwr_ul_tb_offset = rtw8852b_set_txpwr_ul_tb_offset, + .power_trim = rtw8852bx_power_trim, + .set_txpwr = rtw8852bx_set_txpwr, + .set_txpwr_ctrl = rtw8852bx_set_txpwr_ctrl, + .init_txpwr_unit = rtw8852bx_init_txpwr_unit, + .get_thermal = rtw8852bx_get_thermal, + .ctrl_btg_bt_rx = rtw8852bx_ctrl_btg_bt_rx, + .query_ppdu = rtw8852bx_query_ppdu, + .ctrl_nbtg_bt_tx = rtw8852bx_ctrl_nbtg_bt_tx, + .cfg_txrx_path = rtw8852bx_bb_cfg_txrx_path, + .set_txpwr_ul_tb_offset = rtw8852bx_set_txpwr_ul_tb_offset, .pwr_on_func = rtw8852b_pwr_on_func, .pwr_off_func = rtw8852b_pwr_off_func, .query_rxdesc = rtw89_core_query_rxdesc, @@ -2554,13 +750,13 @@ static const struct rtw89_chip_ops rtw8852b_chip_ops = { .h2c_ba_cam = rtw89_fw_h2c_ba_cam, .btc_set_rfe = rtw8852b_btc_set_rfe, - .btc_init_cfg = rtw8852b_btc_init_cfg, - .btc_set_wl_pri = rtw8852b_btc_set_wl_pri, + .btc_init_cfg = rtw8852bx_btc_init_cfg, + .btc_set_wl_pri = rtw8852bx_btc_set_wl_pri, .btc_set_wl_txpwr_ctrl = rtw8852b_btc_set_wl_txpwr_ctrl, - .btc_get_bt_rssi = rtw8852b_btc_get_bt_rssi, - .btc_update_bt_cnt = rtw8852b_btc_update_bt_cnt, - .btc_wl_s1_standby = rtw8852b_btc_wl_s1_standby, - .btc_set_wl_rx_gain = rtw8852b_btc_set_wl_rx_gain, + .btc_get_bt_rssi = rtw8852bx_btc_get_bt_rssi, + .btc_update_bt_cnt = rtw8852bx_btc_update_bt_cnt, + .btc_wl_s1_standby = rtw8852bx_btc_wl_s1_standby, + .btc_set_wl_rx_gain = rtw8852bx_btc_set_wl_rx_gain, .btc_set_policy = rtw89_btc_set_policy_v1, }; @@ -2587,7 +783,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = { .fifo_size = 196608, .small_fifo_size = true, .dle_scc_rsvd_size = 98304, - .max_amsdu_limit = 3500, + .max_amsdu_limit = 5000, .dis_2g_40m_ul_ofdma = true, .rsvd_ple_ofst = 0x2f800, .hfc_param_ini = rtw8852b_hfc_param_ini_pcie, @@ -2610,6 +806,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = { .dig_table = NULL, .dig_regs = &rtw8852b_dig_regs, .tssi_dbw_table = NULL, + .support_macid_num = RTW89_MAX_MAC_ID_NUM, .support_chanctx_num = 0, .support_rnr = false, .support_bands = BIT(NL80211_BAND_2GHZ) | @@ -2672,7 +869,7 @@ const struct rtw89_chip_info rtw8852b_chip_info = { .c2h_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_C2H_ENQ_CNT_MASK >> 8}, .c2h_regs = rtw8852b_c2h_regs, .page_regs = &rtw8852b_page_regs, - .wow_reason_reg = R_AX_C2HREG_DATA3 + 3, + .wow_reason_reg = rtw8852b_wow_wakeup_regs, .cfo_src_fd = true, .cfo_hw_comp = true, .dcfo_comp = &rtw8852b_dcfo_comp, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b.h b/drivers/net/wireless/realtek/rtw89/rtw8852b.h index 4f9b3d476879..5ec7180fd355 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852b.h +++ b/drivers/net/wireless/realtek/rtw89/rtw8852b.h @@ -10,128 +10,6 @@ #define RF_PATH_NUM_8852B 2 #define BB_PATH_NUM_8852B 2 -enum rtw8852b_pmac_mode { - NONE_TEST, - PKTS_TX, - PKTS_RX, - CONT_TX -}; - -struct rtw8852b_u_efuse { - u8 rsvd[0x88]; - u8 mac_addr[ETH_ALEN]; -}; - -struct rtw8852b_e_efuse { - u8 mac_addr[ETH_ALEN]; -}; - -struct rtw8852b_tssi_offset { - u8 cck_tssi[TSSI_CCK_CH_GROUP_NUM]; - u8 bw40_tssi[TSSI_MCS_2G_CH_GROUP_NUM]; - u8 rsvd[7]; - u8 bw40_1s_tssi_5g[TSSI_MCS_5G_CH_GROUP_NUM]; -} __packed; - -struct rtw8852b_efuse { - u8 rsvd[0x210]; - struct rtw8852b_tssi_offset path_a_tssi; - u8 rsvd1[10]; - struct rtw8852b_tssi_offset path_b_tssi; - u8 rsvd2[94]; - u8 channel_plan; - u8 xtal_k; - u8 rsvd3; - u8 iqk_lck; - u8 rsvd4[5]; - u8 reg_setting:2; - u8 tx_diversity:1; - u8 rx_diversity:2; - u8 ac_mode:1; - u8 module_type:2; - u8 rsvd5; - u8 shared_ant:1; - u8 coex_type:3; - u8 ant_iso:1; - u8 radio_on_off:1; - u8 rsvd6:2; - u8 eeprom_version; - u8 customer_id; - u8 tx_bb_swing_2g; - u8 tx_bb_swing_5g; - u8 tx_cali_pwr_trk_mode; - u8 trx_path_selection; - u8 rfe_type; - u8 country_code[2]; - u8 rsvd7[3]; - u8 path_a_therm; - u8 path_b_therm; - u8 rsvd8[2]; - u8 rx_gain_2g_ofdm; - u8 rsvd9; - u8 rx_gain_2g_cck; - u8 rsvd10; - u8 rx_gain_5g_low; - u8 rsvd11; - u8 rx_gain_5g_mid; - u8 rsvd12; - u8 rx_gain_5g_high; - u8 rsvd13[35]; - u8 path_a_cck_pwr_idx[6]; - u8 path_a_bw40_1tx_pwr_idx[5]; - u8 path_a_ofdm_1tx_pwr_idx_diff:4; - u8 path_a_bw20_1tx_pwr_idx_diff:4; - u8 path_a_bw20_2tx_pwr_idx_diff:4; - u8 path_a_bw40_2tx_pwr_idx_diff:4; - u8 path_a_cck_2tx_pwr_idx_diff:4; - u8 path_a_ofdm_2tx_pwr_idx_diff:4; - u8 rsvd14[0xf2]; - union { - struct rtw8852b_u_efuse u; - struct rtw8852b_e_efuse e; - }; -} __packed; - -struct rtw8852b_bb_pmac_info { - u8 en_pmac_tx:1; - u8 is_cck:1; - u8 mode:3; - u8 rsvd:3; - u16 tx_cnt; - u16 period; - u16 tx_time; - u8 duty_cycle; -}; - -struct rtw8852b_bb_tssi_bak { - u8 tx_path; - u8 rx_path; - u32 p0_rfmode; - u32 p0_rfmode_ftm; - u32 p1_rfmode; - u32 p1_rfmode_ftm; - s16 tx_pwr; /* S9 */ -}; - extern const struct rtw89_chip_info rtw8852b_chip_info; -void rtw8852b_bb_set_plcp_tx(struct rtw89_dev *rtwdev); -void rtw8852b_bb_set_pmac_tx(struct rtw89_dev *rtwdev, - struct rtw8852b_bb_pmac_info *tx_info, - enum rtw89_phy_idx idx); -void rtw8852b_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable, - u16 tx_cnt, u16 period, u16 tx_time, - enum rtw89_phy_idx idx); -void rtw8852b_bb_set_power(struct rtw89_dev *rtwdev, s16 pwr_dbm, - enum rtw89_phy_idx idx); -void rtw8852b_bb_cfg_tx_path(struct rtw89_dev *rtwdev, u8 tx_path); -void rtw8852b_bb_ctrl_rx_path(struct rtw89_dev *rtwdev, - enum rtw89_rf_path_bit rx_path); -void rtw8852b_bb_tx_mode_switch(struct rtw89_dev *rtwdev, - enum rtw89_phy_idx idx, u8 mode); -void rtw8852b_bb_backup_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx, - struct rtw8852b_bb_tssi_bak *bak); -void rtw8852b_bb_restore_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx, - const struct rtw8852b_bb_tssi_bak *bak); - #endif diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c new file mode 100644 index 000000000000..1745c2882acf --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.c @@ -0,0 +1,2053 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2024 Realtek Corporation + */ + +#include "coex.h" +#include "debug.h" +#include "mac.h" +#include "phy.h" +#include "reg.h" +#include "rtw8852b_common.h" +#include "util.h" + +static const struct rtw89_reg3_def rtw8852bx_pmac_ht20_mcs7_tbl[] = { + {0x4580, 0x0000ffff, 0x0}, + {0x4580, 0xffff0000, 0x0}, + {0x4584, 0x0000ffff, 0x0}, + {0x4584, 0xffff0000, 0x0}, + {0x4580, 0x0000ffff, 0x1}, + {0x4578, 0x00ffffff, 0x2018b}, + {0x4570, 0x03ffffff, 0x7}, + {0x4574, 0x03ffffff, 0x32407}, + {0x45b8, 0x00000010, 0x0}, + {0x45b8, 0x00000100, 0x0}, + {0x45b8, 0x00000080, 0x0}, + {0x45b8, 0x00000008, 0x0}, + {0x45a0, 0x0000ff00, 0x0}, + {0x45a0, 0xff000000, 0x1}, + {0x45a4, 0x0000ff00, 0x2}, + {0x45a4, 0xff000000, 0x3}, + {0x45b8, 0x00000020, 0x0}, + {0x4568, 0xe0000000, 0x0}, + {0x45b8, 0x00000002, 0x1}, + {0x456c, 0xe0000000, 0x0}, + {0x45b4, 0x00006000, 0x0}, + {0x45b4, 0x00001800, 0x1}, + {0x45b8, 0x00000040, 0x0}, + {0x45b8, 0x00000004, 0x0}, + {0x45b8, 0x00000200, 0x0}, + {0x4598, 0xf8000000, 0x0}, + {0x45b8, 0x00100000, 0x0}, + {0x45a8, 0x00000fc0, 0x0}, + {0x45b8, 0x00200000, 0x0}, + {0x45b0, 0x00000038, 0x0}, + {0x45b0, 0x000001c0, 0x0}, + {0x45a0, 0x000000ff, 0x0}, + {0x45b8, 0x00400000, 0x0}, + {0x4590, 0x000007ff, 0x0}, + {0x45b0, 0x00000e00, 0x0}, + {0x45ac, 0x0000001f, 0x0}, + {0x45b8, 0x00800000, 0x0}, + {0x45a8, 0x0003f000, 0x0}, + {0x45b8, 0x01000000, 0x0}, + {0x45b0, 0x00007000, 0x0}, + {0x45b0, 0x00038000, 0x0}, + {0x45a0, 0x00ff0000, 0x0}, + {0x45b8, 0x02000000, 0x0}, + {0x4590, 0x003ff800, 0x0}, + {0x45b0, 0x001c0000, 0x0}, + {0x45ac, 0x000003e0, 0x0}, + {0x45b8, 0x04000000, 0x0}, + {0x45a8, 0x00fc0000, 0x0}, + {0x45b8, 0x08000000, 0x0}, + {0x45b0, 0x00e00000, 0x0}, + {0x45b0, 0x07000000, 0x0}, + {0x45a4, 0x000000ff, 0x0}, + {0x45b8, 0x10000000, 0x0}, + {0x4594, 0x000007ff, 0x0}, + {0x45b0, 0x38000000, 0x0}, + {0x45ac, 0x00007c00, 0x0}, + {0x45b8, 0x20000000, 0x0}, + {0x45a8, 0x3f000000, 0x0}, + {0x45b8, 0x40000000, 0x0}, + {0x45b4, 0x00000007, 0x0}, + {0x45b4, 0x00000038, 0x0}, + {0x45a4, 0x00ff0000, 0x0}, + {0x45b8, 0x80000000, 0x0}, + {0x4594, 0x003ff800, 0x0}, + {0x45b4, 0x000001c0, 0x0}, + {0x4598, 0xf8000000, 0x0}, + {0x45b8, 0x00100000, 0x0}, + {0x45a8, 0x00000fc0, 0x7}, + {0x45b8, 0x00200000, 0x0}, + {0x45b0, 0x00000038, 0x0}, + {0x45b0, 0x000001c0, 0x0}, + {0x45a0, 0x000000ff, 0x0}, + {0x45b4, 0x06000000, 0x0}, + {0x45b0, 0x00000007, 0x0}, + {0x45b8, 0x00080000, 0x0}, + {0x45a8, 0x0000003f, 0x0}, + {0x457c, 0xffe00000, 0x1}, + {0x4530, 0xffffffff, 0x0}, + {0x4588, 0x00003fff, 0x0}, + {0x4598, 0x000001ff, 0x0}, + {0x4534, 0xffffffff, 0x0}, + {0x4538, 0xffffffff, 0x0}, + {0x453c, 0xffffffff, 0x0}, + {0x4588, 0x0fffc000, 0x0}, + {0x4598, 0x0003fe00, 0x0}, + {0x4540, 0xffffffff, 0x0}, + {0x4544, 0xffffffff, 0x0}, + {0x4548, 0xffffffff, 0x0}, + {0x458c, 0x00003fff, 0x0}, + {0x4598, 0x07fc0000, 0x0}, + {0x454c, 0xffffffff, 0x0}, + {0x4550, 0xffffffff, 0x0}, + {0x4554, 0xffffffff, 0x0}, + {0x458c, 0x0fffc000, 0x0}, + {0x459c, 0x000001ff, 0x0}, + {0x4558, 0xffffffff, 0x0}, + {0x455c, 0xffffffff, 0x0}, + {0x4530, 0xffffffff, 0x4e790001}, + {0x4588, 0x00003fff, 0x0}, + {0x4598, 0x000001ff, 0x1}, + {0x4534, 0xffffffff, 0x0}, + {0x4538, 0xffffffff, 0x4b}, + {0x45ac, 0x38000000, 0x7}, + {0x4588, 0xf0000000, 0x0}, + {0x459c, 0x7e000000, 0x0}, + {0x45b8, 0x00040000, 0x0}, + {0x45b8, 0x00020000, 0x0}, + {0x4590, 0xffc00000, 0x0}, + {0x45b8, 0x00004000, 0x0}, + {0x4578, 0xff000000, 0x0}, + {0x45b8, 0x00000400, 0x0}, + {0x45b8, 0x00000800, 0x0}, + {0x45b8, 0x00001000, 0x0}, + {0x45b8, 0x00002000, 0x0}, + {0x45b4, 0x00018000, 0x0}, + {0x45ac, 0x07800000, 0x0}, + {0x45b4, 0x00000600, 0x2}, + {0x459c, 0x0001fe00, 0x80}, + {0x45ac, 0x00078000, 0x3}, + {0x459c, 0x01fe0000, 0x1}, +}; + +static const struct rtw89_reg3_def rtw8852bx_btc_preagc_en_defs[] = { + {0x46D0, GENMASK(1, 0), 0x3}, + {0x4790, GENMASK(1, 0), 0x3}, + {0x4AD4, GENMASK(31, 0), 0xf}, + {0x4AE0, GENMASK(31, 0), 0xf}, + {0x4688, GENMASK(31, 24), 0x80}, + {0x476C, GENMASK(31, 24), 0x80}, + {0x4694, GENMASK(7, 0), 0x80}, + {0x4694, GENMASK(15, 8), 0x80}, + {0x4778, GENMASK(7, 0), 0x80}, + {0x4778, GENMASK(15, 8), 0x80}, + {0x4AE4, GENMASK(23, 0), 0x780D1E}, + {0x4AEC, GENMASK(23, 0), 0x780D1E}, + {0x469C, GENMASK(31, 26), 0x34}, + {0x49F0, GENMASK(31, 26), 0x34}, +}; + +static DECLARE_PHY_REG3_TBL(rtw8852bx_btc_preagc_en_defs); + +static const struct rtw89_reg3_def rtw8852bx_btc_preagc_dis_defs[] = { + {0x46D0, GENMASK(1, 0), 0x0}, + {0x4790, GENMASK(1, 0), 0x0}, + {0x4AD4, GENMASK(31, 0), 0x60}, + {0x4AE0, GENMASK(31, 0), 0x60}, + {0x4688, GENMASK(31, 24), 0x1a}, + {0x476C, GENMASK(31, 24), 0x1a}, + {0x4694, GENMASK(7, 0), 0x2a}, + {0x4694, GENMASK(15, 8), 0x2a}, + {0x4778, GENMASK(7, 0), 0x2a}, + {0x4778, GENMASK(15, 8), 0x2a}, + {0x4AE4, GENMASK(23, 0), 0x79E99E}, + {0x4AEC, GENMASK(23, 0), 0x79E99E}, + {0x469C, GENMASK(31, 26), 0x26}, + {0x49F0, GENMASK(31, 26), 0x26}, +}; + +static DECLARE_PHY_REG3_TBL(rtw8852bx_btc_preagc_dis_defs); + +static void rtw8852be_efuse_parsing(struct rtw89_efuse *efuse, + struct rtw8852bx_efuse *map) +{ + ether_addr_copy(efuse->addr, map->e.mac_addr); + efuse->rfe_type = map->rfe_type; + efuse->xtal_cap = map->xtal_k; +} + +static void rtw8852bx_efuse_parsing_tssi(struct rtw89_dev *rtwdev, + struct rtw8852bx_efuse *map) +{ + struct rtw89_tssi_info *tssi = &rtwdev->tssi; + struct rtw8852bx_tssi_offset *ofst[] = {&map->path_a_tssi, &map->path_b_tssi}; + u8 i, j; + + tssi->thermal[RF_PATH_A] = map->path_a_therm; + tssi->thermal[RF_PATH_B] = map->path_b_therm; + + for (i = 0; i < RF_PATH_NUM_8852BX; i++) { + memcpy(tssi->tssi_cck[i], ofst[i]->cck_tssi, + sizeof(ofst[i]->cck_tssi)); + + for (j = 0; j < TSSI_CCK_CH_GROUP_NUM; j++) + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][EFUSE] path=%d cck[%d]=0x%x\n", + i, j, tssi->tssi_cck[i][j]); + + memcpy(tssi->tssi_mcs[i], ofst[i]->bw40_tssi, + sizeof(ofst[i]->bw40_tssi)); + memcpy(tssi->tssi_mcs[i] + TSSI_MCS_2G_CH_GROUP_NUM, + ofst[i]->bw40_1s_tssi_5g, sizeof(ofst[i]->bw40_1s_tssi_5g)); + + for (j = 0; j < TSSI_MCS_CH_GROUP_NUM; j++) + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][EFUSE] path=%d mcs[%d]=0x%x\n", + i, j, tssi->tssi_mcs[i][j]); + } +} + +static bool _decode_efuse_gain(u8 data, s8 *high, s8 *low) +{ + if (high) + *high = sign_extend32(FIELD_GET(GENMASK(7, 4), data), 3); + if (low) + *low = sign_extend32(FIELD_GET(GENMASK(3, 0), data), 3); + + return data != 0xff; +} + +static void rtw8852bx_efuse_parsing_gain_offset(struct rtw89_dev *rtwdev, + struct rtw8852bx_efuse *map) +{ + struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain; + bool valid = false; + + valid |= _decode_efuse_gain(map->rx_gain_2g_cck, + &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_2G_CCK], + &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_2G_CCK]); + valid |= _decode_efuse_gain(map->rx_gain_2g_ofdm, + &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_2G_OFDM], + &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_2G_OFDM]); + valid |= _decode_efuse_gain(map->rx_gain_5g_low, + &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_LOW], + &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_LOW]); + valid |= _decode_efuse_gain(map->rx_gain_5g_mid, + &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_MID], + &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_MID]); + valid |= _decode_efuse_gain(map->rx_gain_5g_high, + &gain->offset[RF_PATH_A][RTW89_GAIN_OFFSET_5G_HIGH], + &gain->offset[RF_PATH_B][RTW89_GAIN_OFFSET_5G_HIGH]); + + gain->offset_valid = valid; +} + +static int __rtw8852bx_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map, + enum rtw89_efuse_block block) +{ + struct rtw89_efuse *efuse = &rtwdev->efuse; + struct rtw8852bx_efuse *map; + + map = (struct rtw8852bx_efuse *)log_map; + + efuse->country_code[0] = map->country_code[0]; + efuse->country_code[1] = map->country_code[1]; + rtw8852bx_efuse_parsing_tssi(rtwdev, map); + rtw8852bx_efuse_parsing_gain_offset(rtwdev, map); + + switch (rtwdev->hci.type) { + case RTW89_HCI_TYPE_PCIE: + rtw8852be_efuse_parsing(efuse, map); + break; + default: + return -EOPNOTSUPP; + } + + rtw89_info(rtwdev, "chip rfe_type is %d\n", efuse->rfe_type); + + return 0; +} + +static void rtw8852bx_phycap_parsing_power_cal(struct rtw89_dev *rtwdev, u8 *phycap_map) +{ +#define PWR_K_CHK_OFFSET 0x5E9 +#define PWR_K_CHK_VALUE 0xAA + u32 offset = PWR_K_CHK_OFFSET - rtwdev->chip->phycap_addr; + + if (phycap_map[offset] == PWR_K_CHK_VALUE) + rtwdev->efuse.power_k_valid = true; +} + +static void rtw8852bx_phycap_parsing_tssi(struct rtw89_dev *rtwdev, u8 *phycap_map) +{ + struct rtw89_tssi_info *tssi = &rtwdev->tssi; + static const u32 tssi_trim_addr[RF_PATH_NUM_8852BX] = {0x5D6, 0x5AB}; + u32 addr = rtwdev->chip->phycap_addr; + bool pg = false; + u32 ofst; + u8 i, j; + + for (i = 0; i < RF_PATH_NUM_8852BX; i++) { + for (j = 0; j < TSSI_TRIM_CH_GROUP_NUM; j++) { + /* addrs are in decreasing order */ + ofst = tssi_trim_addr[i] - addr - j; + tssi->tssi_trim[i][j] = phycap_map[ofst]; + + if (phycap_map[ofst] != 0xff) + pg = true; + } + } + + if (!pg) { + memset(tssi->tssi_trim, 0, sizeof(tssi->tssi_trim)); + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM] no PG, set all trim info to 0\n"); + } + + for (i = 0; i < RF_PATH_NUM_8852BX; i++) + for (j = 0; j < TSSI_TRIM_CH_GROUP_NUM; j++) + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] path=%d idx=%d trim=0x%x addr=0x%x\n", + i, j, tssi->tssi_trim[i][j], + tssi_trim_addr[i] - j); +} + +static void rtw8852bx_phycap_parsing_thermal_trim(struct rtw89_dev *rtwdev, + u8 *phycap_map) +{ + struct rtw89_power_trim_info *info = &rtwdev->pwr_trim; + static const u32 thm_trim_addr[RF_PATH_NUM_8852BX] = {0x5DF, 0x5DC}; + u32 addr = rtwdev->chip->phycap_addr; + u8 i; + + for (i = 0; i < RF_PATH_NUM_8852BX; i++) { + info->thermal_trim[i] = phycap_map[thm_trim_addr[i] - addr]; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[THERMAL][TRIM] path=%d thermal_trim=0x%x\n", + i, info->thermal_trim[i]); + + if (info->thermal_trim[i] != 0xff) + info->pg_thermal_trim = true; + } +} + +static void rtw8852bx_thermal_trim(struct rtw89_dev *rtwdev) +{ +#define __thm_setting(raw) \ +({ \ + u8 __v = (raw); \ + ((__v & 0x1) << 3) | ((__v & 0x1f) >> 1); \ +}) + struct rtw89_power_trim_info *info = &rtwdev->pwr_trim; + u8 i, val; + + if (!info->pg_thermal_trim) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[THERMAL][TRIM] no PG, do nothing\n"); + + return; + } + + for (i = 0; i < RF_PATH_NUM_8852BX; i++) { + val = __thm_setting(info->thermal_trim[i]); + rtw89_write_rf(rtwdev, i, RR_TM2, RR_TM2_OFF, val); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[THERMAL][TRIM] path=%d thermal_setting=0x%x\n", + i, val); + } +#undef __thm_setting +} + +static void rtw8852bx_phycap_parsing_pa_bias_trim(struct rtw89_dev *rtwdev, + u8 *phycap_map) +{ + struct rtw89_power_trim_info *info = &rtwdev->pwr_trim; + static const u32 pabias_trim_addr[RF_PATH_NUM_8852BX] = {0x5DE, 0x5DB}; + u32 addr = rtwdev->chip->phycap_addr; + u8 i; + + for (i = 0; i < RF_PATH_NUM_8852BX; i++) { + info->pa_bias_trim[i] = phycap_map[pabias_trim_addr[i] - addr]; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[PA_BIAS][TRIM] path=%d pa_bias_trim=0x%x\n", + i, info->pa_bias_trim[i]); + + if (info->pa_bias_trim[i] != 0xff) + info->pg_pa_bias_trim = true; + } +} + +static void rtw8852bx_pa_bias_trim(struct rtw89_dev *rtwdev) +{ + struct rtw89_power_trim_info *info = &rtwdev->pwr_trim; + u8 pabias_2g, pabias_5g; + u8 i; + + if (!info->pg_pa_bias_trim) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[PA_BIAS][TRIM] no PG, do nothing\n"); + + return; + } + + for (i = 0; i < RF_PATH_NUM_8852BX; i++) { + pabias_2g = FIELD_GET(GENMASK(3, 0), info->pa_bias_trim[i]); + pabias_5g = FIELD_GET(GENMASK(7, 4), info->pa_bias_trim[i]); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[PA_BIAS][TRIM] path=%d 2G=0x%x 5G=0x%x\n", + i, pabias_2g, pabias_5g); + + rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASA_TXG, pabias_2g); + rtw89_write_rf(rtwdev, i, RR_BIASA, RR_BIASA_TXA, pabias_5g); + } +} + +static void rtw8852bx_phycap_parsing_gain_comp(struct rtw89_dev *rtwdev, u8 *phycap_map) +{ + static const u32 comp_addrs[][RTW89_SUBBAND_2GHZ_5GHZ_NR] = { + {0x5BB, 0x5BA, 0, 0x5B9, 0x5B8}, + {0x590, 0x58F, 0, 0x58E, 0x58D}, + }; + struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain; + u32 phycap_addr = rtwdev->chip->phycap_addr; + bool valid = false; + int path, i; + u8 data; + + for (path = 0; path < 2; path++) + for (i = 0; i < RTW89_SUBBAND_2GHZ_5GHZ_NR; i++) { + if (comp_addrs[path][i] == 0) + continue; + + data = phycap_map[comp_addrs[path][i] - phycap_addr]; + valid |= _decode_efuse_gain(data, NULL, + &gain->comp[path][i]); + } + + gain->comp_valid = valid; +} + +static int __rtw8852bx_read_phycap(struct rtw89_dev *rtwdev, u8 *phycap_map) +{ + rtw8852bx_phycap_parsing_power_cal(rtwdev, phycap_map); + rtw8852bx_phycap_parsing_tssi(rtwdev, phycap_map); + rtw8852bx_phycap_parsing_thermal_trim(rtwdev, phycap_map); + rtw8852bx_phycap_parsing_pa_bias_trim(rtwdev, phycap_map); + rtw8852bx_phycap_parsing_gain_comp(rtwdev, phycap_map); + + return 0; +} + +static void __rtw8852bx_power_trim(struct rtw89_dev *rtwdev) +{ + rtw8852bx_thermal_trim(rtwdev); + rtw8852bx_pa_bias_trim(rtwdev); +} + +static void __rtw8852bx_set_channel_mac(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + u8 mac_idx) +{ + u32 rf_mod = rtw89_mac_reg_by_idx(rtwdev, R_AX_WMAC_RFMOD, mac_idx); + u32 sub_carr = rtw89_mac_reg_by_idx(rtwdev, R_AX_TX_SUB_CARRIER_VALUE, mac_idx); + u32 chk_rate = rtw89_mac_reg_by_idx(rtwdev, R_AX_TXRATE_CHK, mac_idx); + u8 txsc20 = 0, txsc40 = 0; + + switch (chan->band_width) { + case RTW89_CHANNEL_WIDTH_80: + txsc40 = rtw89_phy_get_txsc(rtwdev, chan, RTW89_CHANNEL_WIDTH_40); + fallthrough; + case RTW89_CHANNEL_WIDTH_40: + txsc20 = rtw89_phy_get_txsc(rtwdev, chan, RTW89_CHANNEL_WIDTH_20); + break; + default: + break; + } + + switch (chan->band_width) { + case RTW89_CHANNEL_WIDTH_80: + rtw89_write8_mask(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK, BIT(1)); + rtw89_write32(rtwdev, sub_carr, txsc20 | (txsc40 << 4)); + break; + case RTW89_CHANNEL_WIDTH_40: + rtw89_write8_mask(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK, BIT(0)); + rtw89_write32(rtwdev, sub_carr, txsc20); + break; + case RTW89_CHANNEL_WIDTH_20: + rtw89_write8_clr(rtwdev, rf_mod, B_AX_WMAC_RFMOD_MASK); + rtw89_write32(rtwdev, sub_carr, 0); + break; + default: + break; + } + + if (chan->channel > 14) { + rtw89_write8_clr(rtwdev, chk_rate, B_AX_BAND_MODE); + rtw89_write8_set(rtwdev, chk_rate, + B_AX_CHECK_CCK_EN | B_AX_RTS_LIMIT_IN_OFDM6); + } else { + rtw89_write8_set(rtwdev, chk_rate, B_AX_BAND_MODE); + rtw89_write8_clr(rtwdev, chk_rate, + B_AX_CHECK_CCK_EN | B_AX_RTS_LIMIT_IN_OFDM6); + } +} + +static const u32 rtw8852bx_sco_barker_threshold[14] = { + 0x1cfea, 0x1d0e1, 0x1d1d7, 0x1d2cd, 0x1d3c3, 0x1d4b9, 0x1d5b0, 0x1d6a6, + 0x1d79c, 0x1d892, 0x1d988, 0x1da7f, 0x1db75, 0x1ddc4 +}; + +static const u32 rtw8852bx_sco_cck_threshold[14] = { + 0x27de3, 0x27f35, 0x28088, 0x281da, 0x2832d, 0x2847f, 0x285d2, 0x28724, + 0x28877, 0x289c9, 0x28b1c, 0x28c6e, 0x28dc1, 0x290ed +}; + +static void rtw8852bx_ctrl_sco_cck(struct rtw89_dev *rtwdev, u8 primary_ch) +{ + u8 ch_element = primary_ch - 1; + + rtw89_phy_write32_mask(rtwdev, R_RXSCOBC, B_RXSCOBC_TH, + rtw8852bx_sco_barker_threshold[ch_element]); + rtw89_phy_write32_mask(rtwdev, R_RXSCOCCK, B_RXSCOCCK_TH, + rtw8852bx_sco_cck_threshold[ch_element]); +} + +static u8 rtw8852bx_sco_mapping(u8 central_ch) +{ + if (central_ch == 1) + return 109; + else if (central_ch >= 2 && central_ch <= 6) + return 108; + else if (central_ch >= 7 && central_ch <= 10) + return 107; + else if (central_ch >= 11 && central_ch <= 14) + return 106; + else if (central_ch == 36 || central_ch == 38) + return 51; + else if (central_ch >= 40 && central_ch <= 58) + return 50; + else if (central_ch >= 60 && central_ch <= 64) + return 49; + else if (central_ch == 100 || central_ch == 102) + return 48; + else if (central_ch >= 104 && central_ch <= 126) + return 47; + else if (central_ch >= 128 && central_ch <= 151) + return 46; + else if (central_ch >= 153 && central_ch <= 177) + return 45; + else + return 0; +} + +struct rtw8852bx_bb_gain { + u32 gain_g[BB_PATH_NUM_8852BX]; + u32 gain_a[BB_PATH_NUM_8852BX]; + u32 gain_mask; +}; + +static const struct rtw8852bx_bb_gain bb_gain_lna[LNA_GAIN_NUM] = { + { .gain_g = {0x4678, 0x475C}, .gain_a = {0x45DC, 0x4740}, + .gain_mask = 0x00ff0000 }, + { .gain_g = {0x4678, 0x475C}, .gain_a = {0x45DC, 0x4740}, + .gain_mask = 0xff000000 }, + { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744}, + .gain_mask = 0x000000ff }, + { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744}, + .gain_mask = 0x0000ff00 }, + { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744}, + .gain_mask = 0x00ff0000 }, + { .gain_g = {0x467C, 0x4760}, .gain_a = {0x4660, 0x4744}, + .gain_mask = 0xff000000 }, + { .gain_g = {0x4680, 0x4764}, .gain_a = {0x4664, 0x4748}, + .gain_mask = 0x000000ff }, +}; + +static const struct rtw8852bx_bb_gain bb_gain_tia[TIA_GAIN_NUM] = { + { .gain_g = {0x4680, 0x4764}, .gain_a = {0x4664, 0x4748}, + .gain_mask = 0x00ff0000 }, + { .gain_g = {0x4680, 0x4764}, .gain_a = {0x4664, 0x4748}, + .gain_mask = 0xff000000 }, +}; + +static void rtw8852bx_set_gain_error(struct rtw89_dev *rtwdev, + enum rtw89_subband subband, + enum rtw89_rf_path path) +{ + const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax; + u8 gain_band = rtw89_subband_to_bb_gain_band(subband); + s32 val; + u32 reg; + u32 mask; + int i; + + for (i = 0; i < LNA_GAIN_NUM; i++) { + if (subband == RTW89_CH_2G) + reg = bb_gain_lna[i].gain_g[path]; + else + reg = bb_gain_lna[i].gain_a[path]; + + mask = bb_gain_lna[i].gain_mask; + val = gain->lna_gain[gain_band][path][i]; + rtw89_phy_write32_mask(rtwdev, reg, mask, val); + } + + for (i = 0; i < TIA_GAIN_NUM; i++) { + if (subband == RTW89_CH_2G) + reg = bb_gain_tia[i].gain_g[path]; + else + reg = bb_gain_tia[i].gain_a[path]; + + mask = bb_gain_tia[i].gain_mask; + val = gain->tia_gain[gain_band][path][i]; + rtw89_phy_write32_mask(rtwdev, reg, mask, val); + } +} + +static void rtw8852bt_ext_loss_avg_update(struct rtw89_dev *rtwdev, + s8 ext_loss_a, s8 ext_loss_b) +{ + s8 ext_loss_avg; + u64 linear; + u8 pwrofst; + + if (ext_loss_a == ext_loss_b) { + ext_loss_avg = ext_loss_a; + } else { + linear = rtw89_db_2_linear(abs(ext_loss_a - ext_loss_b)) + 1; + linear = DIV_ROUND_CLOSEST_ULL(linear / 2, 1 << RTW89_LINEAR_FRAC_BITS); + ext_loss_avg = rtw89_linear_2_db(linear); + ext_loss_avg += min(ext_loss_a, ext_loss_b); + } + + pwrofst = max(DIV_ROUND_CLOSEST(ext_loss_avg, 4) + 16, EDCCA_PWROFST_DEFAULT); + + rtw89_phy_write32_mask(rtwdev, R_PWOFST, B_PWOFST, pwrofst); +} + +static void rtw8852bx_set_gain_offset(struct rtw89_dev *rtwdev, + enum rtw89_subband subband, + enum rtw89_phy_idx phy_idx) +{ + static const u32 gain_err_addr[2] = {R_P0_AGC_RSVD, R_P1_AGC_RSVD}; + static const u32 rssi_ofst_addr[2] = {R_PATH0_G_TIA1_LNA6_OP1DB_V1, + R_PATH1_G_TIA1_LNA6_OP1DB_V1}; + struct rtw89_hal *hal = &rtwdev->hal; + struct rtw89_phy_efuse_gain *efuse_gain = &rtwdev->efuse_gain; + enum rtw89_gain_offset gain_ofdm_band; + s8 ext_loss_a = 0, ext_loss_b = 0; + s32 offset_a, offset_b; + s32 offset_ofdm, offset_cck; + s32 tmp; + u8 path; + + if (!efuse_gain->comp_valid) + goto next; + + for (path = RF_PATH_A; path < BB_PATH_NUM_8852BX; path++) { + tmp = efuse_gain->comp[path][subband]; + tmp = clamp_t(s32, tmp << 2, S8_MIN, S8_MAX); + rtw89_phy_write32_mask(rtwdev, gain_err_addr[path], MASKBYTE0, tmp); + } + +next: + if (!efuse_gain->offset_valid) + goto ext_loss; + + gain_ofdm_band = rtw89_subband_to_gain_offset_band_of_ofdm(subband); + + offset_a = -efuse_gain->offset[RF_PATH_A][gain_ofdm_band]; + offset_b = -efuse_gain->offset[RF_PATH_B][gain_ofdm_band]; + + tmp = -((offset_a << 2) + (efuse_gain->offset_base[RTW89_PHY_0] >> 2)); + tmp = clamp_t(s32, tmp, S8_MIN, S8_MAX); + rtw89_phy_write32_mask(rtwdev, rssi_ofst_addr[RF_PATH_A], B_PATH0_R_G_OFST_MASK, tmp); + + tmp = -((offset_b << 2) + (efuse_gain->offset_base[RTW89_PHY_0] >> 2)); + tmp = clamp_t(s32, tmp, S8_MIN, S8_MAX); + rtw89_phy_write32_mask(rtwdev, rssi_ofst_addr[RF_PATH_B], B_PATH0_R_G_OFST_MASK, tmp); + + if (hal->antenna_rx == RF_B) { + offset_ofdm = -efuse_gain->offset[RF_PATH_B][gain_ofdm_band]; + offset_cck = -efuse_gain->offset[RF_PATH_B][0]; + } else { + offset_ofdm = -efuse_gain->offset[RF_PATH_A][gain_ofdm_band]; + offset_cck = -efuse_gain->offset[RF_PATH_A][0]; + } + + tmp = (offset_ofdm << 4) + efuse_gain->offset_base[RTW89_PHY_0]; + tmp = clamp_t(s32, tmp, S8_MIN, S8_MAX); + rtw89_phy_write32_idx(rtwdev, R_P0_RPL1, B_P0_RPL1_BIAS_MASK, tmp, phy_idx); + + tmp = (offset_ofdm << 4) + efuse_gain->rssi_base[RTW89_PHY_0]; + tmp = clamp_t(s32, tmp, S8_MIN, S8_MAX); + rtw89_phy_write32_idx(rtwdev, R_P1_RPL1, B_P0_RPL1_BIAS_MASK, tmp, phy_idx); + + if (subband == RTW89_CH_2G) { + tmp = (offset_cck << 3) + (efuse_gain->offset_base[RTW89_PHY_0] >> 1); + tmp = clamp_t(s32, tmp, S8_MIN >> 1, S8_MAX >> 1); + rtw89_phy_write32_mask(rtwdev, R_RX_RPL_OFST, + B_RX_RPL_OFST_CCK_MASK, tmp); + } + + ext_loss_a = (offset_a << 2) + (efuse_gain->offset_base[RTW89_PHY_0] >> 2); + ext_loss_b = (offset_b << 2) + (efuse_gain->offset_base[RTW89_PHY_0] >> 2); + +ext_loss: + if (rtwdev->chip->chip_id == RTL8852BT) + rtw8852bt_ext_loss_avg_update(rtwdev, ext_loss_a, ext_loss_b); +} + +static +void rtw8852bx_set_rxsc_rpl_comp(struct rtw89_dev *rtwdev, enum rtw89_subband subband) +{ + const struct rtw89_phy_bb_gain_info *gain = &rtwdev->bb_gain.ax; + u8 band = rtw89_subband_to_bb_gain_band(subband); + u32 val; + + val = u32_encode_bits((gain->rpl_ofst_20[band][RF_PATH_A] + + gain->rpl_ofst_20[band][RF_PATH_B]) >> 1, B_P0_RPL1_20_MASK) | + u32_encode_bits((gain->rpl_ofst_40[band][RF_PATH_A][0] + + gain->rpl_ofst_40[band][RF_PATH_B][0]) >> 1, B_P0_RPL1_40_MASK) | + u32_encode_bits((gain->rpl_ofst_40[band][RF_PATH_A][1] + + gain->rpl_ofst_40[band][RF_PATH_B][1]) >> 1, B_P0_RPL1_41_MASK); + val >>= B_P0_RPL1_SHIFT; + rtw89_phy_write32_mask(rtwdev, R_P0_RPL1, B_P0_RPL1_MASK, val); + rtw89_phy_write32_mask(rtwdev, R_P1_RPL1, B_P0_RPL1_MASK, val); + + val = u32_encode_bits((gain->rpl_ofst_40[band][RF_PATH_A][2] + + gain->rpl_ofst_40[band][RF_PATH_B][2]) >> 1, B_P0_RTL2_42_MASK) | + u32_encode_bits((gain->rpl_ofst_80[band][RF_PATH_A][0] + + gain->rpl_ofst_80[band][RF_PATH_B][0]) >> 1, B_P0_RTL2_80_MASK) | + u32_encode_bits((gain->rpl_ofst_80[band][RF_PATH_A][1] + + gain->rpl_ofst_80[band][RF_PATH_B][1]) >> 1, B_P0_RTL2_81_MASK) | + u32_encode_bits((gain->rpl_ofst_80[band][RF_PATH_A][10] + + gain->rpl_ofst_80[band][RF_PATH_B][10]) >> 1, B_P0_RTL2_8A_MASK); + rtw89_phy_write32(rtwdev, R_P0_RPL2, val); + rtw89_phy_write32(rtwdev, R_P1_RPL2, val); + + val = u32_encode_bits((gain->rpl_ofst_80[band][RF_PATH_A][2] + + gain->rpl_ofst_80[band][RF_PATH_B][2]) >> 1, B_P0_RTL3_82_MASK) | + u32_encode_bits((gain->rpl_ofst_80[band][RF_PATH_A][3] + + gain->rpl_ofst_80[band][RF_PATH_B][3]) >> 1, B_P0_RTL3_83_MASK) | + u32_encode_bits((gain->rpl_ofst_80[band][RF_PATH_A][4] + + gain->rpl_ofst_80[band][RF_PATH_B][4]) >> 1, B_P0_RTL3_84_MASK) | + u32_encode_bits((gain->rpl_ofst_80[band][RF_PATH_A][9] + + gain->rpl_ofst_80[band][RF_PATH_B][9]) >> 1, B_P0_RTL3_89_MASK); + rtw89_phy_write32(rtwdev, R_P0_RPL3, val); + rtw89_phy_write32(rtwdev, R_P1_RPL3, val); +} + +static void rtw8852bx_ctrl_ch(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx) +{ + u8 central_ch = chan->channel; + u8 subband = chan->subband_type; + u8 sco_comp; + bool is_2g = central_ch <= 14; + + /* Path A */ + if (is_2g) + rtw89_phy_write32_idx(rtwdev, R_PATH0_BAND_SEL_V1, + B_PATH0_BAND_SEL_MSK_V1, 1, phy_idx); + else + rtw89_phy_write32_idx(rtwdev, R_PATH0_BAND_SEL_V1, + B_PATH0_BAND_SEL_MSK_V1, 0, phy_idx); + + /* Path B */ + if (is_2g) + rtw89_phy_write32_idx(rtwdev, R_PATH1_BAND_SEL_V1, + B_PATH1_BAND_SEL_MSK_V1, 1, phy_idx); + else + rtw89_phy_write32_idx(rtwdev, R_PATH1_BAND_SEL_V1, + B_PATH1_BAND_SEL_MSK_V1, 0, phy_idx); + + /* SCO compensate FC setting */ + sco_comp = rtw8852bx_sco_mapping(central_ch); + rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_INV, sco_comp, phy_idx); + + if (chan->band_type == RTW89_BAND_6G) + return; + + /* CCK parameters */ + if (central_ch == 14) { + rtw89_phy_write32_mask(rtwdev, R_TXFIR0, B_TXFIR_C01, 0x3b13ff); + rtw89_phy_write32_mask(rtwdev, R_TXFIR2, B_TXFIR_C23, 0x1c42de); + rtw89_phy_write32_mask(rtwdev, R_TXFIR4, B_TXFIR_C45, 0xfdb0ad); + rtw89_phy_write32_mask(rtwdev, R_TXFIR6, B_TXFIR_C67, 0xf60f6e); + rtw89_phy_write32_mask(rtwdev, R_TXFIR8, B_TXFIR_C89, 0xfd8f92); + rtw89_phy_write32_mask(rtwdev, R_TXFIRA, B_TXFIR_CAB, 0x2d011); + rtw89_phy_write32_mask(rtwdev, R_TXFIRC, B_TXFIR_CCD, 0x1c02c); + rtw89_phy_write32_mask(rtwdev, R_TXFIRE, B_TXFIR_CEF, 0xfff00a); + } else { + rtw89_phy_write32_mask(rtwdev, R_TXFIR0, B_TXFIR_C01, 0x3d23ff); + rtw89_phy_write32_mask(rtwdev, R_TXFIR2, B_TXFIR_C23, 0x29b354); + rtw89_phy_write32_mask(rtwdev, R_TXFIR4, B_TXFIR_C45, 0xfc1c8); + rtw89_phy_write32_mask(rtwdev, R_TXFIR6, B_TXFIR_C67, 0xfdb053); + rtw89_phy_write32_mask(rtwdev, R_TXFIR8, B_TXFIR_C89, 0xf86f9a); + rtw89_phy_write32_mask(rtwdev, R_TXFIRA, B_TXFIR_CAB, 0xfaef92); + rtw89_phy_write32_mask(rtwdev, R_TXFIRC, B_TXFIR_CCD, 0xfe5fcc); + rtw89_phy_write32_mask(rtwdev, R_TXFIRE, B_TXFIR_CEF, 0xffdff5); + } + + rtw8852bx_set_gain_error(rtwdev, subband, RF_PATH_A); + rtw8852bx_set_gain_error(rtwdev, subband, RF_PATH_B); + rtw8852bx_set_gain_offset(rtwdev, subband, phy_idx); + rtw8852bx_set_rxsc_rpl_comp(rtwdev, subband); +} + +static void rtw8852b_bw_setting(struct rtw89_dev *rtwdev, u8 bw, u8 path) +{ + static const u32 adc_sel[2] = {0xC0EC, 0xC1EC}; + static const u32 wbadc_sel[2] = {0xC0E4, 0xC1E4}; + + switch (bw) { + case RTW89_CHANNEL_WIDTH_5: + rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x1); + rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x0); + break; + case RTW89_CHANNEL_WIDTH_10: + rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x2); + rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x1); + break; + case RTW89_CHANNEL_WIDTH_20: + rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x0); + rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x2); + break; + case RTW89_CHANNEL_WIDTH_40: + rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x0); + rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x2); + break; + case RTW89_CHANNEL_WIDTH_80: + rtw89_phy_write32_mask(rtwdev, adc_sel[path], 0x6000, 0x0); + rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], 0x30, 0x2); + break; + default: + rtw89_warn(rtwdev, "Fail to set ADC\n"); + } +} + +static +void rtw8852bt_adc_cfg(struct rtw89_dev *rtwdev, u8 bw, u8 path) +{ + static const u32 rck_reset_count[2] = {0xC0E8, 0xC1E8}; + static const u32 adc_op5_bw_sel[2] = {0xC0D8, 0xC1D8}; + static const u32 adc_sample_td[2] = {0xC0D4, 0xC1D4}; + static const u32 adc_rst_cycle[2] = {0xC0EC, 0xC1EC}; + static const u32 decim_filter[2] = {0xC0EC, 0xC1EC}; + static const u32 rck_offset[2] = {0xC0C4, 0xC1C4}; + static const u32 rx_adc_clk[2] = {0x12A0, 0x32A0}; + static const u32 wbadc_sel[2] = {0xC0E4, 0xC1E4}; + static const u32 idac2_1[2] = {0xC0D4, 0xC1D4}; + static const u32 idac2[2] = {0xC0D4, 0xC1D4}; + static const u32 upd_clk_adc = {0x704}; + + if (rtwdev->chip->chip_id != RTL8852BT) + return; + + rtw89_phy_write32_mask(rtwdev, idac2[path], B_P0_CFCH_CTL, 0x8); + rtw89_phy_write32_mask(rtwdev, rck_reset_count[path], B_ADCMOD_LP, 0x9); + rtw89_phy_write32_mask(rtwdev, wbadc_sel[path], B_WDADC_SEL, 0x2); + rtw89_phy_write32_mask(rtwdev, rx_adc_clk[path], B_P0_RXCK_ADJ, 0x49); + rtw89_phy_write32_mask(rtwdev, decim_filter[path], B_DCIM_FR, 0x0); + + switch (bw) { + case RTW89_CHANNEL_WIDTH_5: + case RTW89_CHANNEL_WIDTH_10: + case RTW89_CHANNEL_WIDTH_20: + case RTW89_CHANNEL_WIDTH_40: + rtw89_phy_write32_mask(rtwdev, idac2_1[path], B_P0_CFCH_EN, 0x2); + rtw89_phy_write32_mask(rtwdev, adc_sample_td[path], B_P0_CFCH_BW0, 0x3); + rtw89_phy_write32_mask(rtwdev, adc_op5_bw_sel[path], B_P0_CFCH_BW1, 0xf); + rtw89_phy_write32_mask(rtwdev, rck_offset[path], B_DRCK_MUL, 0x0); + /* Tx TSSI ADC update */ + rtw89_phy_write32_mask(rtwdev, upd_clk_adc, B_RSTB_ASYNC_BW80, 0); + + if (rtwdev->efuse.rfe_type >= 51) + rtw89_phy_write32_mask(rtwdev, adc_rst_cycle[path], B_DCIM_RC, 0x2); + else + rtw89_phy_write32_mask(rtwdev, adc_rst_cycle[path], B_DCIM_RC, 0x3); + break; + case RTW89_CHANNEL_WIDTH_80: + rtw89_phy_write32_mask(rtwdev, idac2_1[path], B_P0_CFCH_EN, 0x2); + rtw89_phy_write32_mask(rtwdev, adc_sample_td[path], B_P0_CFCH_BW0, 0x2); + rtw89_phy_write32_mask(rtwdev, adc_op5_bw_sel[path], B_P0_CFCH_BW1, 0x8); + rtw89_phy_write32_mask(rtwdev, rck_offset[path], B_DRCK_MUL, 0x0); + rtw89_phy_write32_mask(rtwdev, adc_rst_cycle[path], B_DCIM_RC, 0x3); + /* Tx TSSI ADC update */ + rtw89_phy_write32_mask(rtwdev, upd_clk_adc, B_RSTB_ASYNC_BW80, 1); + break; + case RTW89_CHANNEL_WIDTH_160: + rtw89_phy_write32_mask(rtwdev, idac2_1[path], B_P0_CFCH_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, adc_sample_td[path], B_P0_CFCH_BW0, 0x2); + rtw89_phy_write32_mask(rtwdev, adc_op5_bw_sel[path], B_P0_CFCH_BW1, 0x4); + rtw89_phy_write32_mask(rtwdev, rck_offset[path], B_DRCK_MUL, 0x6); + rtw89_phy_write32_mask(rtwdev, adc_rst_cycle[path], B_DCIM_RC, 0x3); + /* Tx TSSI ADC update */ + rtw89_phy_write32_mask(rtwdev, upd_clk_adc, B_RSTB_ASYNC_BW80, 2); + break; + default: + rtw89_warn(rtwdev, "Fail to set ADC\n"); + break; + } +} + +static void rtw8852bx_ctrl_bw(struct rtw89_dev *rtwdev, u8 pri_ch, u8 bw, + enum rtw89_phy_idx phy_idx) +{ + enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; + u32 rx_path_0; + u32 val; + + rx_path_0 = rtw89_phy_read32_idx(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, phy_idx); + + switch (bw) { + case RTW89_CHANNEL_WIDTH_5: + rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x0, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x1, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH, 0x0, phy_idx); + + /*Set RF mode at 3 */ + rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX, + B_P0_RFMODE_ORI_RX_ALL, 0x333, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX, + B_P1_RFMODE_ORI_RX_ALL, 0x333, phy_idx); + if (chip_id == RTL8852BT) { + rtw89_phy_write32_idx(rtwdev, R_PATH0_BAND_SEL_V1, + B_PATH0_BAND_NRBW_EN_V1, 0x0, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_PATH1_BAND_SEL_V1, + B_PATH1_BAND_NRBW_EN_V1, 0x0, phy_idx); + } + break; + case RTW89_CHANNEL_WIDTH_10: + rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x0, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x2, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH, 0x0, phy_idx); + + /*Set RF mode at 3 */ + rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX, + B_P0_RFMODE_ORI_RX_ALL, 0x333, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX, + B_P1_RFMODE_ORI_RX_ALL, 0x333, phy_idx); + if (chip_id == RTL8852BT) { + rtw89_phy_write32_idx(rtwdev, R_PATH0_BAND_SEL_V1, + B_PATH0_BAND_NRBW_EN_V1, 0x0, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_PATH1_BAND_SEL_V1, + B_PATH1_BAND_NRBW_EN_V1, 0x0, phy_idx); + } + break; + case RTW89_CHANNEL_WIDTH_20: + rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x0, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x0, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH, 0x0, phy_idx); + + /*Set RF mode at 3 */ + rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX, + B_P0_RFMODE_ORI_RX_ALL, 0x333, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX, + B_P1_RFMODE_ORI_RX_ALL, 0x333, phy_idx); + if (chip_id == RTL8852BT) { + rtw89_phy_write32_idx(rtwdev, R_PATH0_BAND_SEL_V1, + B_PATH0_BAND_NRBW_EN_V1, 0x1, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_PATH1_BAND_SEL_V1, + B_PATH1_BAND_NRBW_EN_V1, 0x1, phy_idx); + } + break; + case RTW89_CHANNEL_WIDTH_40: + rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x1, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x0, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH, + pri_ch, phy_idx); + + /*Set RF mode at 3 */ + rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX, + B_P0_RFMODE_ORI_RX_ALL, 0x333, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX, + B_P1_RFMODE_ORI_RX_ALL, 0x333, phy_idx); + /*CCK primary channel */ + if (pri_ch == RTW89_SC_20_UPPER) + rtw89_phy_write32_mask(rtwdev, R_RXSC, B_RXSC_EN, 1); + else + rtw89_phy_write32_mask(rtwdev, R_RXSC, B_RXSC_EN, 0); + + break; + case RTW89_CHANNEL_WIDTH_80: + rtw89_phy_write32_idx(rtwdev, R_FC0_BW_V1, B_FC0_BW_SET, 0x2, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_SBW, 0x0, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_CHBW_MOD_PRICH, + pri_ch, phy_idx); + + /*Set RF mode at A */ + val = chip_id == RTL8852BT ? 0x333 : 0xaaa; + rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX, + B_P0_RFMODE_ORI_RX_ALL, val, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX, + B_P1_RFMODE_ORI_RX_ALL, val, phy_idx); + break; + default: + rtw89_warn(rtwdev, "Fail to switch bw (bw:%d, pri ch:%d)\n", bw, + pri_ch); + } + + if (chip_id == RTL8852B) { + rtw8852b_bw_setting(rtwdev, bw, RF_PATH_A); + rtw8852b_bw_setting(rtwdev, bw, RF_PATH_B); + } else if (chip_id == RTL8852BT) { + rtw8852bt_adc_cfg(rtwdev, bw, RF_PATH_A); + rtw8852bt_adc_cfg(rtwdev, bw, RF_PATH_B); + } + + if (rx_path_0 == 0x1) + rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_ORI_RX, + B_P1_RFMODE_ORI_RX_ALL, 0x111, phy_idx); + else if (rx_path_0 == 0x2) + rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_ORI_RX, + B_P0_RFMODE_ORI_RX_ALL, 0x111, phy_idx); +} + +static void rtw8852bx_ctrl_cck_en(struct rtw89_dev *rtwdev, bool cck_en) +{ + if (cck_en) { + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_ENABLE_CCK, 1); + rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0); + } else { + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_ENABLE_CCK, 0); + rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 1); + } +} + +static void rtw8852bx_5m_mask(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx) +{ + u8 pri_ch = chan->pri_ch_idx; + bool mask_5m_low; + bool mask_5m_en; + + switch (chan->band_width) { + case RTW89_CHANNEL_WIDTH_40: + /* Prich=1: Mask 5M High, Prich=2: Mask 5M Low */ + mask_5m_en = true; + mask_5m_low = pri_ch == RTW89_SC_20_LOWER; + break; + case RTW89_CHANNEL_WIDTH_80: + /* Prich=3: Mask 5M High, Prich=4: Mask 5M Low, Else: Disable */ + mask_5m_en = pri_ch == RTW89_SC_20_UPMOST || + pri_ch == RTW89_SC_20_LOWEST; + mask_5m_low = pri_ch == RTW89_SC_20_LOWEST; + break; + default: + mask_5m_en = false; + break; + } + + if (!mask_5m_en) { + rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_EN, 0x0); + rtw89_phy_write32_idx(rtwdev, R_ASSIGN_SBD_OPT_V1, + B_ASSIGN_SBD_OPT_EN_V1, 0x0, phy_idx); + return; + } + + if (mask_5m_low) { + rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_TH, 0x4); + rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_SB2, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_SB0, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_TH, 0x4); + rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_SB2, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_SB0, 0x1); + } else { + rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_TH, 0x4); + rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_SB2, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PATH0_5MDET_V1, B_PATH0_5MDET_SB0, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_TH, 0x4); + rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_SB2, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PATH1_5MDET_V1, B_PATH1_5MDET_SB0, 0x0); + } + rtw89_phy_write32_idx(rtwdev, R_ASSIGN_SBD_OPT_V1, + B_ASSIGN_SBD_OPT_EN_V1, 0x1, phy_idx); +} + +static void __rtw8852bx_bb_reset_all(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +{ + rtw89_phy_write32_idx(rtwdev, R_S0_HW_SI_DIS, B_S0_HW_SI_DIS_W_R_TRIG, 0x7, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_S1_HW_SI_DIS, B_S1_HW_SI_DIS_W_R_TRIG, 0x7, phy_idx); + fsleep(1); + rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 0, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_S0_HW_SI_DIS, B_S0_HW_SI_DIS_W_R_TRIG, 0x0, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_S1_HW_SI_DIS, B_S1_HW_SI_DIS_W_R_TRIG, 0x0, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, phy_idx); +} + +static void rtw8852bx_bb_macid_ctrl_init(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx) +{ + u32 addr; + + for (addr = R_AX_PWR_MACID_LMT_TABLE0; + addr <= R_AX_PWR_MACID_LMT_TABLE127; addr += 4) + rtw89_mac_txpwr_write32(rtwdev, phy_idx, addr, 0); +} + +static void __rtw8852bx_bb_sethw(struct rtw89_dev *rtwdev) +{ + struct rtw89_phy_efuse_gain *gain = &rtwdev->efuse_gain; + + rtw89_phy_write32_clr(rtwdev, R_P0_EN_SOUND_WO_NDP, B_P0_EN_SOUND_WO_NDP); + rtw89_phy_write32_clr(rtwdev, R_P1_EN_SOUND_WO_NDP, B_P1_EN_SOUND_WO_NDP); + + rtw8852bx_bb_macid_ctrl_init(rtwdev, RTW89_PHY_0); + + /* read these registers after loading BB parameters */ + gain->offset_base[RTW89_PHY_0] = + rtw89_phy_read32_mask(rtwdev, R_P0_RPL1, B_P0_RPL1_BIAS_MASK); + gain->rssi_base[RTW89_PHY_0] = + rtw89_phy_read32_mask(rtwdev, R_P1_RPL1, B_P0_RPL1_BIAS_MASK); +} + +static void rtw8852bx_bb_set_pop(struct rtw89_dev *rtwdev) +{ + if (rtwdev->hw->conf.flags & IEEE80211_CONF_MONITOR) + rtw89_phy_write32_clr(rtwdev, R_PKT_CTRL, B_PKT_POP_EN); +} + +static u32 rtw8852bt_spur_freq(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan) +{ + u8 center_chan = chan->channel; + + switch (chan->band_type) { + case RTW89_BAND_5G: + if (center_chan == 151 || center_chan == 153 || + center_chan == 155 || center_chan == 163) + return 5760; + break; + default: + break; + } + + return 0; +} + +#define CARRIER_SPACING_312_5 312500 /* 312.5 kHz */ +#define CARRIER_SPACING_78_125 78125 /* 78.125 kHz */ +#define MAX_TONE_NUM 2048 + +static void rtw8852bt_set_csi_tone_idx(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx) +{ + s32 freq_diff, csi_idx, csi_tone_idx; + u32 spur_freq; + + spur_freq = rtw8852bt_spur_freq(rtwdev, chan); + if (spur_freq == 0) { + rtw89_phy_write32_idx(rtwdev, R_SEG0CSI_EN_V1, B_SEG0CSI_EN, + 0, phy_idx); + return; + } + + freq_diff = (spur_freq - chan->freq) * 1000000; + csi_idx = s32_div_u32_round_closest(freq_diff, CARRIER_SPACING_78_125); + s32_div_u32_round_down(csi_idx, MAX_TONE_NUM, &csi_tone_idx); + + rtw89_phy_write32_idx(rtwdev, R_SEG0CSI_V1, B_SEG0CSI_IDX, + csi_tone_idx, phy_idx); + rtw89_phy_write32_idx(rtwdev, R_SEG0CSI_EN_V1, B_SEG0CSI_EN, 1, phy_idx); +} + +static +void __rtw8852bx_set_channel_bb(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx) +{ + enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; + bool cck_en = chan->channel <= 14; + u8 pri_ch_idx = chan->pri_ch_idx; + u8 band = chan->band_type, chan_idx; + + if (cck_en) + rtw8852bx_ctrl_sco_cck(rtwdev, chan->primary_channel); + + rtw8852bx_ctrl_ch(rtwdev, chan, phy_idx); + rtw8852bx_ctrl_bw(rtwdev, pri_ch_idx, chan->band_width, phy_idx); + rtw8852bx_ctrl_cck_en(rtwdev, cck_en); + if (chip_id == RTL8852BT) + rtw8852bt_set_csi_tone_idx(rtwdev, chan, phy_idx); + if (chip_id == RTL8852B && chan->band_type == RTW89_BAND_5G) { + rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1, + B_PATH0_BT_SHARE_V1, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG_PATH_V1, + B_PATH0_BTG_PATH_V1, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PATH1_BT_SHARE_V1, + B_PATH1_BT_SHARE_V1, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PATH1_BTG_PATH_V1, + B_PATH1_BTG_PATH_V1, 0x0); + rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_BT_SHARE, 0x0); + rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_BT_SEG0, 0x0); + rtw89_phy_write32_mask(rtwdev, R_BT_DYN_DC_EST_EN_V1, + B_BT_DYN_DC_EST_EN_MSK, 0x0); + rtw89_phy_write32_mask(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN, 0x0); + } + chan_idx = rtw89_encode_chan_idx(rtwdev, chan->primary_channel, band); + rtw89_phy_write32_mask(rtwdev, R_MAC_PIN_SEL, B_CH_IDX_SEG0, chan_idx); + rtw8852bx_5m_mask(rtwdev, chan, phy_idx); + rtw8852bx_bb_set_pop(rtwdev); + __rtw8852bx_bb_reset_all(rtwdev, phy_idx); +} + +static u32 rtw8852bx_bb_cal_txpwr_ref(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, s16 ref) +{ + const u16 tssi_16dbm_cw = 0x12c; + const u8 base_cw_0db = 0x27; + const s8 ofst_int = 0; + s16 pwr_s10_3; + s16 rf_pwr_cw; + u16 bb_pwr_cw; + u32 pwr_cw; + u32 tssi_ofst_cw; + + pwr_s10_3 = (ref << 1) + (s16)(ofst_int) + (s16)(base_cw_0db << 3); + bb_pwr_cw = u16_get_bits(pwr_s10_3, GENMASK(2, 0)); + rf_pwr_cw = u16_get_bits(pwr_s10_3, GENMASK(8, 3)); + rf_pwr_cw = clamp_t(s16, rf_pwr_cw, 15, 63); + pwr_cw = (rf_pwr_cw << 3) | bb_pwr_cw; + + tssi_ofst_cw = (u32)((s16)tssi_16dbm_cw + (ref << 1) - (16 << 3)); + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, + "[TXPWR] tssi_ofst_cw=%d rf_cw=0x%x bb_cw=0x%x\n", + tssi_ofst_cw, rf_pwr_cw, bb_pwr_cw); + + return u32_encode_bits(tssi_ofst_cw, B_DPD_TSSI_CW) | + u32_encode_bits(pwr_cw, B_DPD_PWR_CW) | + u32_encode_bits(ref, B_DPD_REF); +} + +static void rtw8852bx_set_txpwr_ref(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx) +{ + static const u32 addr[RF_PATH_NUM_8852BX] = {0x5800, 0x7800}; + const u32 mask = B_DPD_TSSI_CW | B_DPD_PWR_CW | B_DPD_REF; + const u8 ofst_ofdm = 0x4; + const u8 ofst_cck = 0x8; + const s16 ref_ofdm = 0; + const s16 ref_cck = 0; + u32 val; + u8 i; + + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set txpwr reference\n"); + + rtw89_mac_txpwr_write32_mask(rtwdev, phy_idx, R_AX_PWR_RATE_CTRL, + B_AX_PWR_REF, 0x0); + + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb ofdm txpwr ref\n"); + val = rtw8852bx_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_ofdm); + + for (i = 0; i < RF_PATH_NUM_8852BX; i++) + rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_ofdm, mask, val, + phy_idx); + + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, "[TXPWR] set bb cck txpwr ref\n"); + val = rtw8852bx_bb_cal_txpwr_ref(rtwdev, phy_idx, ref_cck); + + for (i = 0; i < RF_PATH_NUM_8852BX; i++) + rtw89_phy_write32_idx(rtwdev, addr[i] + ofst_cck, mask, val, + phy_idx); +} + +static void rtw8852bx_bb_set_tx_shape_dfir(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + u8 tx_shape_idx, + enum rtw89_phy_idx phy_idx) +{ +#define __DFIR_CFG_ADDR(i) (R_TXFIR0 + ((i) << 2)) +#define __DFIR_CFG_MASK 0xffffffff +#define __DFIR_CFG_NR 8 +#define __DECL_DFIR_PARAM(_name, _val...) \ + static const u32 param_ ## _name[] = {_val}; \ + static_assert(ARRAY_SIZE(param_ ## _name) == __DFIR_CFG_NR) + + __DECL_DFIR_PARAM(flat, + 0x023D23FF, 0x0029B354, 0x000FC1C8, 0x00FDB053, + 0x00F86F9A, 0x06FAEF92, 0x00FE5FCC, 0x00FFDFF5); + __DECL_DFIR_PARAM(sharp, + 0x023D83FF, 0x002C636A, 0x0013F204, 0x00008090, + 0x00F87FB0, 0x06F99F83, 0x00FDBFBA, 0x00003FF5); + __DECL_DFIR_PARAM(sharp_14, + 0x023B13FF, 0x001C42DE, 0x00FDB0AD, 0x00F60F6E, + 0x00FD8F92, 0x0602D011, 0x0001C02C, 0x00FFF00A); + u8 ch = chan->channel; + const u32 *param; + u32 addr; + int i; + + if (ch > 14) { + rtw89_warn(rtwdev, + "set tx shape dfir by unknown ch: %d on 2G\n", ch); + return; + } + + if (ch == 14) + param = param_sharp_14; + else + param = tx_shape_idx == 0 ? param_flat : param_sharp; + + for (i = 0; i < __DFIR_CFG_NR; i++) { + addr = __DFIR_CFG_ADDR(i); + rtw89_debug(rtwdev, RTW89_DBG_TXPWR, + "set tx shape dfir: 0x%x: 0x%x\n", addr, param[i]); + rtw89_phy_write32_idx(rtwdev, addr, __DFIR_CFG_MASK, param[i], + phy_idx); + } + +#undef __DECL_DFIR_PARAM +#undef __DFIR_CFG_NR +#undef __DFIR_CFG_MASK +#undef __DECL_CFG_ADDR +} + +static void rtw8852bx_set_tx_shape(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx) +{ + const struct rtw89_rfe_parms *rfe_parms = rtwdev->rfe_parms; + u8 band = chan->band_type; + u8 regd = rtw89_regd_get(rtwdev, band); + u8 tx_shape_cck = (*rfe_parms->tx_shape.lmt)[band][RTW89_RS_CCK][regd]; + u8 tx_shape_ofdm = (*rfe_parms->tx_shape.lmt)[band][RTW89_RS_OFDM][regd]; + + if (band == RTW89_BAND_2G) + rtw8852bx_bb_set_tx_shape_dfir(rtwdev, chan, tx_shape_cck, phy_idx); + + rtw89_phy_write32_mask(rtwdev, R_DCFO_OPT, B_TXSHAPE_TRIANGULAR_CFG, + tx_shape_ofdm); +} + +static void __rtw8852bx_set_txpwr(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx) +{ + rtw89_phy_set_txpwr_byrate(rtwdev, chan, phy_idx); + rtw89_phy_set_txpwr_offset(rtwdev, chan, phy_idx); + rtw8852bx_set_tx_shape(rtwdev, chan, phy_idx); + rtw89_phy_set_txpwr_limit(rtwdev, chan, phy_idx); + rtw89_phy_set_txpwr_limit_ru(rtwdev, chan, phy_idx); +} + +static void __rtw8852bx_set_txpwr_ctrl(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx) +{ + rtw8852bx_set_txpwr_ref(rtwdev, phy_idx); +} + +static +void __rtw8852bx_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev, + s8 pw_ofst, enum rtw89_mac_idx mac_idx) +{ + u32 reg; + + if (pw_ofst < -16 || pw_ofst > 15) { + rtw89_warn(rtwdev, "[ULTB] Err pwr_offset=%d\n", pw_ofst); + return; + } + + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_CTRL, mac_idx); + rtw89_write32_set(rtwdev, reg, B_AX_PWR_UL_TB_CTRL_EN); + + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_1T, mac_idx); + rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_1T_MASK, pw_ofst); + + pw_ofst = max_t(s8, pw_ofst - 3, -16); + reg = rtw89_mac_reg_by_idx(rtwdev, R_AX_PWR_UL_TB_2T, mac_idx); + rtw89_write32_mask(rtwdev, reg, B_AX_PWR_UL_TB_2T_MASK, pw_ofst); +} + +static int +__rtw8852bx_init_txpwr_unit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +{ + int ret; + + ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_UL_CTRL2, 0x07763333); + if (ret) + return ret; + + ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_COEXT_CTRL, 0x01ebf000); + if (ret) + return ret; + + ret = rtw89_mac_txpwr_write32(rtwdev, phy_idx, R_AX_PWR_UL_CTRL0, 0x0002f8ff); + if (ret) + return ret; + + rtw8852bx_set_txpwr_ul_tb_offset(rtwdev, 0, phy_idx == RTW89_PHY_1 ? + RTW89_MAC_1 : RTW89_MAC_0); + + return 0; +} + +static +void __rtw8852bx_bb_set_plcp_tx(struct rtw89_dev *rtwdev) +{ + const struct rtw89_reg3_def *def = rtw8852bx_pmac_ht20_mcs7_tbl; + u8 i; + + for (i = 0; i < ARRAY_SIZE(rtw8852bx_pmac_ht20_mcs7_tbl); i++, def++) + rtw89_phy_write32_mask(rtwdev, def->addr, def->mask, def->data); +} + +static void rtw8852bx_stop_pmac_tx(struct rtw89_dev *rtwdev, + struct rtw8852bx_bb_pmac_info *tx_info, + enum rtw89_phy_idx idx) +{ + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC Stop Tx"); + if (tx_info->mode == CONT_TX) + rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_CTX_EN, 0, idx); + else if (tx_info->mode == PKTS_TX) + rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_PTX_EN, 0, idx); +} + +static void rtw8852bx_start_pmac_tx(struct rtw89_dev *rtwdev, + struct rtw8852bx_bb_pmac_info *tx_info, + enum rtw89_phy_idx idx) +{ + enum rtw8852bx_pmac_mode mode = tx_info->mode; + u32 pkt_cnt = tx_info->tx_cnt; + u16 period = tx_info->period; + + if (mode == CONT_TX && !tx_info->is_cck) { + rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_CTX_EN, 1, idx); + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC CTx Start"); + } else if (mode == PKTS_TX) { + rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, B_PMAC_PTX_EN, 1, idx); + rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_PRD, + B_PMAC_TX_PRD_MSK, period, idx); + rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_CNT, B_PMAC_TX_CNT_MSK, + pkt_cnt, idx); + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC PTx Start"); + } + + rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_CTRL, B_PMAC_TXEN_DIS, 1, idx); + rtw89_phy_write32_idx(rtwdev, R_PMAC_TX_CTRL, B_PMAC_TXEN_DIS, 0, idx); +} + +static +void rtw8852bx_bb_set_pmac_tx(struct rtw89_dev *rtwdev, + struct rtw8852bx_bb_pmac_info *tx_info, + enum rtw89_phy_idx idx) +{ + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + + if (!tx_info->en_pmac_tx) { + rtw8852bx_stop_pmac_tx(rtwdev, tx_info, idx); + rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0, idx); + if (chan->band_type == RTW89_BAND_2G) + rtw89_phy_write32_clr(rtwdev, R_RXCCA, B_RXCCA_DIS); + return; + } + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC Tx Enable"); + + rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_TXEN, 1, idx); + rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_RXEN, 1, idx); + rtw89_phy_write32_idx(rtwdev, R_PMAC_RX_CFG1, B_PMAC_OPT1_MSK, 0x3f, idx); + rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 0, idx); + rtw89_phy_write32_idx(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 1, idx); + rtw89_phy_write32_set(rtwdev, R_RXCCA, B_RXCCA_DIS); + rtw89_phy_write32_idx(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_ALL, 1, idx); + + rtw8852bx_start_pmac_tx(rtwdev, tx_info, idx); +} + +static +void __rtw8852bx_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable, + u16 tx_cnt, u16 period, u16 tx_time, + enum rtw89_phy_idx idx) +{ + struct rtw8852bx_bb_pmac_info tx_info = {0}; + + tx_info.en_pmac_tx = enable; + tx_info.is_cck = 0; + tx_info.mode = PKTS_TX; + tx_info.tx_cnt = tx_cnt; + tx_info.period = period; + tx_info.tx_time = tx_time; + + rtw8852bx_bb_set_pmac_tx(rtwdev, &tx_info, idx); +} + +static +void __rtw8852bx_bb_set_power(struct rtw89_dev *rtwdev, s16 pwr_dbm, + enum rtw89_phy_idx idx) +{ + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC CFG Tx PWR = %d", pwr_dbm); + + rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_PWR_EN, 1, idx); + rtw89_phy_write32_idx(rtwdev, R_TXPWR, B_TXPWR_MSK, pwr_dbm, idx); +} + +static +void __rtw8852bx_bb_cfg_tx_path(struct rtw89_dev *rtwdev, u8 tx_path) +{ + rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 7, RTW89_PHY_0); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "PMAC CFG Tx Path = %d", tx_path); + + if (tx_path == RF_PATH_A) { + rtw89_phy_write32_mask(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, 1); + rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 0); + } else if (tx_path == RF_PATH_B) { + rtw89_phy_write32_mask(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, 2); + rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 0); + } else if (tx_path == RF_PATH_AB) { + rtw89_phy_write32_mask(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, 3); + rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 4); + } else { + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "Error Tx Path"); + } +} + +static +void __rtw8852bx_bb_tx_mode_switch(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx idx, u8 mode) +{ + if (mode != 0) + return; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "Tx mode switch"); + + rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_TXEN, 0, idx); + rtw89_phy_write32_idx(rtwdev, R_PMAC_GNT, B_PMAC_GNT_RXEN, 0, idx); + rtw89_phy_write32_idx(rtwdev, R_PMAC_RX_CFG1, B_PMAC_OPT1_MSK, 0, idx); + rtw89_phy_write32_idx(rtwdev, R_PMAC_RXMOD, B_PMAC_RXMOD_MSK, 0, idx); + rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_DPD_EN, 0, idx); + rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 0, idx); + rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_PWR_EN, 0, idx); +} + +static +void __rtw8852bx_bb_backup_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx, + struct rtw8852bx_bb_tssi_bak *bak) +{ + s32 tmp; + + bak->tx_path = rtw89_phy_read32_idx(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, idx); + bak->rx_path = rtw89_phy_read32_idx(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, idx); + bak->p0_rfmode = rtw89_phy_read32_idx(rtwdev, R_P0_RFMODE, MASKDWORD, idx); + bak->p0_rfmode_ftm = rtw89_phy_read32_idx(rtwdev, R_P0_RFMODE_FTM_RX, MASKDWORD, idx); + bak->p1_rfmode = rtw89_phy_read32_idx(rtwdev, R_P1_RFMODE, MASKDWORD, idx); + bak->p1_rfmode_ftm = rtw89_phy_read32_idx(rtwdev, R_P1_RFMODE_FTM_RX, MASKDWORD, idx); + tmp = rtw89_phy_read32_idx(rtwdev, R_TXPWR, B_TXPWR_MSK, idx); + bak->tx_pwr = sign_extend32(tmp, 8); +} + +static +void __rtw8852bx_bb_restore_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx, + const struct rtw8852bx_bb_tssi_bak *bak) +{ + rtw89_phy_write32_idx(rtwdev, R_TXPATH_SEL, B_TXPATH_SEL_MSK, bak->tx_path, idx); + if (bak->tx_path == RF_AB) + rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 0x4); + else + rtw89_phy_write32_mask(rtwdev, R_TXNSS_MAP, B_TXNSS_MAP_MSK, 0x0); + rtw89_phy_write32_idx(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, bak->rx_path, idx); + rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_PWR_EN, 1, idx); + rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE, MASKDWORD, bak->p0_rfmode, idx); + rtw89_phy_write32_idx(rtwdev, R_P0_RFMODE_FTM_RX, MASKDWORD, bak->p0_rfmode_ftm, idx); + rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE, MASKDWORD, bak->p1_rfmode, idx); + rtw89_phy_write32_idx(rtwdev, R_P1_RFMODE_FTM_RX, MASKDWORD, bak->p1_rfmode_ftm, idx); + rtw89_phy_write32_idx(rtwdev, R_TXPWR, B_TXPWR_MSK, bak->tx_pwr, idx); +} + +static void __rtw8852bx_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en, + enum rtw89_phy_idx phy_idx) +{ + rtw89_phy_write_reg3_tbl(rtwdev, en ? &rtw8852bx_btc_preagc_en_defs_tbl : + &rtw8852bx_btc_preagc_dis_defs_tbl); +} + +static void __rtw8852bx_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en, + enum rtw89_phy_idx phy_idx) +{ + if (en) { + rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1, + B_PATH0_BT_SHARE_V1, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG_PATH_V1, + B_PATH0_BTG_PATH_V1, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PATH1_G_LNA6_OP1DB_V1, + B_PATH1_G_LNA6_OP1DB_V1, 0x20); + rtw89_phy_write32_mask(rtwdev, R_PATH1_G_TIA0_LNA6_OP1DB_V1, + B_PATH1_G_TIA0_LNA6_OP1DB_V1, 0x30); + rtw89_phy_write32_mask(rtwdev, R_PATH1_BT_SHARE_V1, + B_PATH1_BT_SHARE_V1, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PATH1_BTG_PATH_V1, + B_PATH1_BTG_PATH_V1, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P1, 0x0); + rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_BT_SHARE, 0x1); + rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_BT_SEG0, 0x2); + rtw89_phy_write32_mask(rtwdev, R_BT_DYN_DC_EST_EN_V1, + B_BT_DYN_DC_EST_EN_MSK, 0x1); + rtw89_phy_write32_mask(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN, 0x1); + } else { + rtw89_phy_write32_mask(rtwdev, R_PATH0_BT_SHARE_V1, + B_PATH0_BT_SHARE_V1, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PATH0_BTG_PATH_V1, + B_PATH0_BTG_PATH_V1, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PATH1_G_LNA6_OP1DB_V1, + B_PATH1_G_LNA6_OP1DB_V1, 0x1a); + rtw89_phy_write32_mask(rtwdev, R_PATH1_G_TIA0_LNA6_OP1DB_V1, + B_PATH1_G_TIA0_LNA6_OP1DB_V1, 0x2a); + rtw89_phy_write32_mask(rtwdev, R_PATH1_BT_SHARE_V1, + B_PATH1_BT_SHARE_V1, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PATH1_BTG_PATH_V1, + B_PATH1_BTG_PATH_V1, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PMAC_GNT, B_PMAC_GNT_P1, 0xc); + rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_BT_SHARE, 0x0); + rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_BT_SEG0, 0x0); + rtw89_phy_write32_mask(rtwdev, R_BT_DYN_DC_EST_EN_V1, + B_BT_DYN_DC_EST_EN_MSK, 0x1); + rtw89_phy_write32_mask(rtwdev, R_GNT_BT_WGT_EN, B_GNT_BT_WGT_EN, 0x0); + } +} + +static +void __rtw8852bx_bb_ctrl_rx_path(struct rtw89_dev *rtwdev, + enum rtw89_rf_path_bit rx_path) +{ + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + u32 rst_mask0; + u32 rst_mask1; + + if (rx_path == RF_A) { + rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, 1); + rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG0, 1); + rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG1, 1); + rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 0); + rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 0); + rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_USER_MAX, 4); + rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 0); + rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 0); + } else if (rx_path == RF_B) { + rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, 2); + rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG0, 2); + rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG1, 2); + rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 0); + rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 0); + rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_USER_MAX, 4); + rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 0); + rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 0); + } else if (rx_path == RF_AB) { + rtw89_phy_write32_mask(rtwdev, R_CHBW_MOD_V1, B_ANT_RX_SEG0, 3); + rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG0, 3); + rtw89_phy_write32_mask(rtwdev, R_FC0_BW_V1, B_ANT_RX_1RCCA_SEG1, 3); + rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 1); + rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 1); + rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_USER_MAX, 4); + rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 1); + rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 1); + } + + rtw8852bx_set_gain_offset(rtwdev, chan->subband_type, RTW89_PHY_0); + + if (chan->band_type == RTW89_BAND_2G && + (rx_path == RF_B || rx_path == RF_AB)) + rtw8852bx_ctrl_btg_bt_rx(rtwdev, true, RTW89_PHY_0); + else + rtw8852bx_ctrl_btg_bt_rx(rtwdev, false, RTW89_PHY_0); + + rst_mask0 = B_P0_TXPW_RSTB_MANON | B_P0_TXPW_RSTB_TSSI; + rst_mask1 = B_P1_TXPW_RSTB_MANON | B_P1_TXPW_RSTB_TSSI; + if (rx_path == RF_A) { + rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, rst_mask0, 1); + rtw89_phy_write32_mask(rtwdev, R_P0_TXPW_RSTB, rst_mask0, 3); + } else { + rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, rst_mask1, 1); + rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_RSTB, rst_mask1, 3); + } +} + +static void rtw8852bx_bb_ctrl_rf_mode_rx_path(struct rtw89_dev *rtwdev, + enum rtw89_rf_path_bit rx_path) +{ + if (rx_path == RF_A) { + rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, + B_P0_RFMODE_ORI_TXRX_FTM_TX, 0x1233312); + rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE_FTM_RX, + B_P0_RFMODE_FTM_RX, 0x333); + rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE, + B_P1_RFMODE_ORI_TXRX_FTM_TX, 0x1111111); + rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE_FTM_RX, + B_P1_RFMODE_FTM_RX, 0x111); + } else if (rx_path == RF_B) { + rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, + B_P0_RFMODE_ORI_TXRX_FTM_TX, 0x1111111); + rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE_FTM_RX, + B_P0_RFMODE_FTM_RX, 0x111); + rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE, + B_P1_RFMODE_ORI_TXRX_FTM_TX, 0x1233312); + rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE_FTM_RX, + B_P1_RFMODE_FTM_RX, 0x333); + } else if (rx_path == RF_AB) { + rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE, + B_P0_RFMODE_ORI_TXRX_FTM_TX, 0x1233312); + rtw89_phy_write32_mask(rtwdev, R_P0_RFMODE_FTM_RX, + B_P0_RFMODE_FTM_RX, 0x333); + rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE, + B_P1_RFMODE_ORI_TXRX_FTM_TX, 0x1233312); + rtw89_phy_write32_mask(rtwdev, R_P1_RFMODE_FTM_RX, + B_P1_RFMODE_FTM_RX, 0x333); + } +} + +static void __rtw8852bx_bb_cfg_txrx_path(struct rtw89_dev *rtwdev) +{ + struct rtw89_hal *hal = &rtwdev->hal; + enum rtw89_rf_path_bit rx_path = hal->antenna_rx ? hal->antenna_rx : RF_AB; + + rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path); + rtw8852bx_bb_ctrl_rf_mode_rx_path(rtwdev, rx_path); + + if (rtwdev->hal.rx_nss == 1) { + rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 0); + rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 0); + rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 0); + rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 0); + } else { + rtw89_phy_write32_mask(rtwdev, R_RXHT_MCS_LIMIT, B_RXHT_MCS_LIMIT, 1); + rtw89_phy_write32_mask(rtwdev, R_RXVHT_MCS_LIMIT, B_RXVHT_MCS_LIMIT, 1); + rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHE_MAX_NSS, 1); + rtw89_phy_write32_mask(rtwdev, R_RXHE, B_RXHETB_MAX_NSS, 1); + } + + rtw89_phy_write32_idx(rtwdev, R_MAC_SEL, B_MAC_SEL_MOD, 0x0, RTW89_PHY_0); +} + +static u8 __rtw8852bx_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path) +{ + if (rtwdev->is_tssi_mode[rf_path]) { + u32 addr = 0x1c10 + (rf_path << 13); + + return rtw89_phy_read32_mask(rtwdev, addr, 0x3F000000); + } + + rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x1); + rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x0); + rtw89_write_rf(rtwdev, rf_path, RR_TM, RR_TM_TRI, 0x1); + + fsleep(200); + + return rtw89_read_rf(rtwdev, rf_path, RR_TM, RR_TM_VAL); +} + +static +void rtw8852bx_set_trx_mask(struct rtw89_dev *rtwdev, u8 path, u8 group, u32 val) +{ + rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x20000); + rtw89_write_rf(rtwdev, path, RR_LUTWA, RFREG_MASK, group); + rtw89_write_rf(rtwdev, path, RR_LUTWD0, RFREG_MASK, val); + rtw89_write_rf(rtwdev, path, RR_LUTWE, RFREG_MASK, 0x0); +} + +static void __rtw8852bx_btc_init_cfg(struct rtw89_dev *rtwdev) +{ + struct rtw89_btc *btc = &rtwdev->btc; + const struct rtw89_chip_info *chip = rtwdev->chip; + const struct rtw89_mac_ax_coex coex_params = { + .pta_mode = RTW89_MAC_AX_COEX_RTK_MODE, + .direction = RTW89_MAC_AX_COEX_INNER, + }; + + /* PTA init */ + rtw89_mac_coex_init(rtwdev, &coex_params); + + /* set WL Tx response = Hi-Pri */ + chip->ops->btc_set_wl_pri(rtwdev, BTC_PRI_MASK_TX_RESP, true); + chip->ops->btc_set_wl_pri(rtwdev, BTC_PRI_MASK_BEACON, true); + + /* set rf gnt debug off */ + rtw89_write_rf(rtwdev, RF_PATH_A, RR_WLSEL, RFREG_MASK, 0x0); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_WLSEL, RFREG_MASK, 0x0); + + /* set WL Tx thru in TRX mask table if GNT_WL = 0 && BT_S1 = ss group */ + if (btc->ant_type == BTC_ANT_SHARED) { + rtw8852bx_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_SS_GROUP, 0x5ff); + rtw8852bx_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_SS_GROUP, 0x5ff); + /* set path-A(S0) Tx/Rx no-mask if GNT_WL=0 && BT_S1=tx group */ + rtw8852bx_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_TX_GROUP, 0x5ff); + rtw8852bx_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_TX_GROUP, 0x55f); + } else { /* set WL Tx stb if GNT_WL = 0 && BT_S1 = ss group for 3-ant */ + rtw8852bx_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_SS_GROUP, 0x5df); + rtw8852bx_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_SS_GROUP, 0x5df); + rtw8852bx_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_TX_GROUP, 0x5ff); + rtw8852bx_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_TX_GROUP, 0x5ff); + } + + if (rtwdev->chip->chip_id == RTL8852BT) { + rtw8852bx_set_trx_mask(rtwdev, RF_PATH_A, BTC_BT_RX_GROUP, 0x5df); + rtw8852bx_set_trx_mask(rtwdev, RF_PATH_B, BTC_BT_RX_GROUP, 0x5df); + } + + /* set PTA break table */ + rtw89_write32(rtwdev, R_BTC_BREAK_TABLE, BTC_BREAK_PARAM); + + /* enable BT counter 0xda40[16,2] = 2b'11 */ + rtw89_write32_set(rtwdev, R_AX_CSR_MODE, B_AX_BT_CNT_RST | B_AX_STATIS_BT_EN); + btc->cx.wl.status.map.init_ok = true; +} + +static +void __rtw8852bx_btc_set_wl_pri(struct rtw89_dev *rtwdev, u8 map, bool state) +{ + u32 bitmap; + u32 reg; + + switch (map) { + case BTC_PRI_MASK_TX_RESP: + reg = R_BTC_BT_COEX_MSK_TABLE; + bitmap = B_BTC_PRI_MASK_TX_RESP_V1; + break; + case BTC_PRI_MASK_BEACON: + reg = R_AX_WL_PRI_MSK; + bitmap = B_AX_PTA_WL_PRI_MASK_BCNQ; + break; + case BTC_PRI_MASK_RX_CCK: + reg = R_BTC_BT_COEX_MSK_TABLE; + bitmap = B_BTC_PRI_MASK_RXCCK_V1; + break; + default: + return; + } + + if (state) + rtw89_write32_set(rtwdev, reg, bitmap); + else + rtw89_write32_clr(rtwdev, reg, bitmap); +} + +static +s8 __rtw8852bx_btc_get_bt_rssi(struct rtw89_dev *rtwdev, s8 val) +{ + /* +6 for compensate offset */ + return clamp_t(s8, val + 6, -100, 0) + 100; +} + +static +void __rtw8852bx_btc_update_bt_cnt(struct rtw89_dev *rtwdev) +{ + /* Feature move to firmware */ +} + +static void __rtw8852bx_btc_wl_s1_standby(struct rtw89_dev *rtwdev, bool state) +{ + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x80000); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD1, RFREG_MASK, 0x31); + + /* set WL standby = Rx for GNT_BT_Tx = 1->0 settle issue */ + if (state) + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x179); + else + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x20); + + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0); +} + +static void rtw8852bx_btc_set_wl_lna2(struct rtw89_dev *rtwdev, u8 level) +{ + switch (level) { + case 0: /* default */ + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x1000); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x0); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x17); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x2); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x3); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x17); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0); + break; + case 1: /* Fix LNA2=5 */ + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x1000); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x0); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x1); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x5); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x2); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x15); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWA, RFREG_MASK, 0x3); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWD0, RFREG_MASK, 0x5); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_LUTWE, RFREG_MASK, 0x0); + break; + } +} + +static void __rtw8852bx_btc_set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level) +{ + struct rtw89_btc *btc = &rtwdev->btc; + + switch (level) { + case 0: /* original */ + default: + rtw8852bx_ctrl_nbtg_bt_tx(rtwdev, false, RTW89_PHY_0); + btc->dm.wl_lna2 = 0; + break; + case 1: /* for FDD free-run */ + rtw8852bx_ctrl_nbtg_bt_tx(rtwdev, true, RTW89_PHY_0); + btc->dm.wl_lna2 = 0; + break; + case 2: /* for BTG Co-Rx*/ + rtw8852bx_ctrl_nbtg_bt_tx(rtwdev, false, RTW89_PHY_0); + btc->dm.wl_lna2 = 1; + break; + } + + rtw8852bx_btc_set_wl_lna2(rtwdev, btc->dm.wl_lna2); +} + +static void rtw8852bx_fill_freq_with_ppdu(struct rtw89_dev *rtwdev, + struct rtw89_rx_phy_ppdu *phy_ppdu, + struct ieee80211_rx_status *status) +{ + u16 chan = phy_ppdu->chan_idx; + enum nl80211_band band; + u8 ch; + + if (chan == 0) + return; + + rtw89_decode_chan_idx(rtwdev, chan, &ch, &band); + status->freq = ieee80211_channel_to_frequency(ch, band); + status->band = band; +} + +static void __rtw8852bx_query_ppdu(struct rtw89_dev *rtwdev, + struct rtw89_rx_phy_ppdu *phy_ppdu, + struct ieee80211_rx_status *status) +{ + u8 path; + u8 *rx_power = phy_ppdu->rssi; + + status->signal = RTW89_RSSI_RAW_TO_DBM(max(rx_power[RF_PATH_A], rx_power[RF_PATH_B])); + for (path = 0; path < rtwdev->chip->rf_path_num; path++) { + status->chains |= BIT(path); + status->chain_signal[path] = RTW89_RSSI_RAW_TO_DBM(rx_power[path]); + } + if (phy_ppdu->valid) + rtw8852bx_fill_freq_with_ppdu(rtwdev, phy_ppdu, status); +} + +static int __rtw8852bx_mac_enable_bb_rf(struct rtw89_dev *rtwdev) +{ + enum rtw89_core_chip_id chip_id = rtwdev->chip->chip_id; + u32 val32; + int ret; + + rtw89_write8_set(rtwdev, R_AX_SYS_FUNC_EN, + B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN); + rtw89_write32_mask(rtwdev, R_AX_SPS_DIG_ON_CTRL0, B_AX_REG_ZCDC_H_MASK, 0x1); + rtw89_write32_set(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG); + rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG); + rtw89_write32_set(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG); + + if (chip_id == RTL8852BT) { + val32 = rtw89_read32(rtwdev, R_AX_AFE_OFF_CTRL1); + val32 = u32_replace_bits(val32, 0x1, B_AX_S0_LDO_VSEL_F_MASK); + val32 = u32_replace_bits(val32, 0x1, B_AX_S1_LDO_VSEL_F_MASK); + rtw89_write32(rtwdev, R_AX_AFE_OFF_CTRL1, val32); + } + + ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, 0xC7, + FULL_BIT_MASK); + if (ret) + return ret; + + ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, 0xC7, + FULL_BIT_MASK); + if (ret) + return ret; + + rtw89_write8(rtwdev, R_AX_PHYREG_SET, PHYREG_SET_XYN_CYCLE); + + return 0; +} + +static int __rtw8852bx_mac_disable_bb_rf(struct rtw89_dev *rtwdev) +{ + u8 wl_rfc_s0; + u8 wl_rfc_s1; + int ret; + + rtw89_write32_clr(rtwdev, R_AX_WLRF_CTRL, B_AX_AFC_AFEDIG); + rtw89_write8_clr(rtwdev, R_AX_SYS_FUNC_EN, + B_AX_FEN_BBRSTB | B_AX_FEN_BB_GLB_RSTN); + + ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, &wl_rfc_s0); + if (ret) + return ret; + wl_rfc_s0 &= ~XTAL_SI_RF00S_EN; + ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S0, wl_rfc_s0, + FULL_BIT_MASK); + if (ret) + return ret; + + ret = rtw89_mac_read_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, &wl_rfc_s1); + if (ret) + return ret; + wl_rfc_s1 &= ~XTAL_SI_RF10S_EN; + ret = rtw89_mac_write_xtal_si(rtwdev, XTAL_SI_WL_RFC_S1, wl_rfc_s1, + FULL_BIT_MASK); + return ret; +} + +const struct rtw8852bx_info rtw8852bx_info = { + .mac_enable_bb_rf = __rtw8852bx_mac_enable_bb_rf, + .mac_disable_bb_rf = __rtw8852bx_mac_disable_bb_rf, + .bb_sethw = __rtw8852bx_bb_sethw, + .bb_reset_all = __rtw8852bx_bb_reset_all, + .bb_cfg_txrx_path = __rtw8852bx_bb_cfg_txrx_path, + .bb_cfg_tx_path = __rtw8852bx_bb_cfg_tx_path, + .bb_ctrl_rx_path = __rtw8852bx_bb_ctrl_rx_path, + .bb_set_plcp_tx = __rtw8852bx_bb_set_plcp_tx, + .bb_set_power = __rtw8852bx_bb_set_power, + .bb_set_pmac_pkt_tx = __rtw8852bx_bb_set_pmac_pkt_tx, + .bb_backup_tssi = __rtw8852bx_bb_backup_tssi, + .bb_restore_tssi = __rtw8852bx_bb_restore_tssi, + .bb_tx_mode_switch = __rtw8852bx_bb_tx_mode_switch, + .set_channel_mac = __rtw8852bx_set_channel_mac, + .set_channel_bb = __rtw8852bx_set_channel_bb, + .ctrl_nbtg_bt_tx = __rtw8852bx_ctrl_nbtg_bt_tx, + .ctrl_btg_bt_rx = __rtw8852bx_ctrl_btg_bt_rx, + .query_ppdu = __rtw8852bx_query_ppdu, + .read_efuse = __rtw8852bx_read_efuse, + .read_phycap = __rtw8852bx_read_phycap, + .power_trim = __rtw8852bx_power_trim, + .set_txpwr = __rtw8852bx_set_txpwr, + .set_txpwr_ctrl = __rtw8852bx_set_txpwr_ctrl, + .init_txpwr_unit = __rtw8852bx_init_txpwr_unit, + .set_txpwr_ul_tb_offset = __rtw8852bx_set_txpwr_ul_tb_offset, + .get_thermal = __rtw8852bx_get_thermal, + .adc_cfg = rtw8852bt_adc_cfg, + .btc_init_cfg = __rtw8852bx_btc_init_cfg, + .btc_set_wl_pri = __rtw8852bx_btc_set_wl_pri, + .btc_get_bt_rssi = __rtw8852bx_btc_get_bt_rssi, + .btc_update_bt_cnt = __rtw8852bx_btc_update_bt_cnt, + .btc_wl_s1_standby = __rtw8852bx_btc_wl_s1_standby, + .btc_set_wl_rx_gain = __rtw8852bx_btc_set_wl_rx_gain, +}; +EXPORT_SYMBOL(rtw8852bx_info); + +MODULE_AUTHOR("Realtek Corporation"); +MODULE_DESCRIPTION("Realtek 802.11ax wireless 8852B common routines"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_common.h b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.h new file mode 100644 index 000000000000..801e7ab9f4fa --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_common.h @@ -0,0 +1,388 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2024 Realtek Corporation + */ + +#ifndef __RTW89_8852BX_H__ +#define __RTW89_8852BX_H__ + +#include "core.h" + +#define RF_PATH_NUM_8852BX 2 +#define BB_PATH_NUM_8852BX 2 + +enum rtw8852bx_pmac_mode { + NONE_TEST, + PKTS_TX, + PKTS_RX, + CONT_TX +}; + +struct rtw8852bx_u_efuse { + u8 rsvd[0x88]; + u8 mac_addr[ETH_ALEN]; +}; + +struct rtw8852bx_e_efuse { + u8 mac_addr[ETH_ALEN]; +}; + +struct rtw8852bx_tssi_offset { + u8 cck_tssi[TSSI_CCK_CH_GROUP_NUM]; + u8 bw40_tssi[TSSI_MCS_2G_CH_GROUP_NUM]; + u8 rsvd[7]; + u8 bw40_1s_tssi_5g[TSSI_MCS_5G_CH_GROUP_NUM]; +} __packed; + +struct rtw8852bx_efuse { + u8 rsvd[0x210]; + struct rtw8852bx_tssi_offset path_a_tssi; + u8 rsvd1[10]; + struct rtw8852bx_tssi_offset path_b_tssi; + u8 rsvd2[94]; + u8 channel_plan; + u8 xtal_k; + u8 rsvd3; + u8 iqk_lck; + u8 rsvd4[5]; + u8 reg_setting:2; + u8 tx_diversity:1; + u8 rx_diversity:2; + u8 ac_mode:1; + u8 module_type:2; + u8 rsvd5; + u8 shared_ant:1; + u8 coex_type:3; + u8 ant_iso:1; + u8 radio_on_off:1; + u8 rsvd6:2; + u8 eeprom_version; + u8 customer_id; + u8 tx_bb_swing_2g; + u8 tx_bb_swing_5g; + u8 tx_cali_pwr_trk_mode; + u8 trx_path_selection; + u8 rfe_type; + u8 country_code[2]; + u8 rsvd7[3]; + u8 path_a_therm; + u8 path_b_therm; + u8 rsvd8[2]; + u8 rx_gain_2g_ofdm; + u8 rsvd9; + u8 rx_gain_2g_cck; + u8 rsvd10; + u8 rx_gain_5g_low; + u8 rsvd11; + u8 rx_gain_5g_mid; + u8 rsvd12; + u8 rx_gain_5g_high; + u8 rsvd13[35]; + u8 path_a_cck_pwr_idx[6]; + u8 path_a_bw40_1tx_pwr_idx[5]; + u8 path_a_ofdm_1tx_pwr_idx_diff:4; + u8 path_a_bw20_1tx_pwr_idx_diff:4; + u8 path_a_bw20_2tx_pwr_idx_diff:4; + u8 path_a_bw40_2tx_pwr_idx_diff:4; + u8 path_a_cck_2tx_pwr_idx_diff:4; + u8 path_a_ofdm_2tx_pwr_idx_diff:4; + u8 rsvd14[0xf2]; + union { + struct rtw8852bx_u_efuse u; + struct rtw8852bx_e_efuse e; + }; +} __packed; + +struct rtw8852bx_bb_pmac_info { + u8 en_pmac_tx:1; + u8 is_cck:1; + u8 mode:3; + u8 rsvd:3; + u16 tx_cnt; + u16 period; + u16 tx_time; + u8 duty_cycle; +}; + +struct rtw8852bx_bb_tssi_bak { + u8 tx_path; + u8 rx_path; + u32 p0_rfmode; + u32 p0_rfmode_ftm; + u32 p1_rfmode; + u32 p1_rfmode_ftm; + s16 tx_pwr; /* S9 */ +}; + +struct rtw8852bx_info { + int (*mac_enable_bb_rf)(struct rtw89_dev *rtwdev); + int (*mac_disable_bb_rf)(struct rtw89_dev *rtwdev); + void (*bb_sethw)(struct rtw89_dev *rtwdev); + void (*bb_reset_all)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); + void (*bb_cfg_txrx_path)(struct rtw89_dev *rtwdev); + void (*bb_cfg_tx_path)(struct rtw89_dev *rtwdev, u8 tx_path); + void (*bb_ctrl_rx_path)(struct rtw89_dev *rtwdev, + enum rtw89_rf_path_bit rx_path); + void (*bb_set_plcp_tx)(struct rtw89_dev *rtwdev); + void (*bb_set_power)(struct rtw89_dev *rtwdev, s16 pwr_dbm, + enum rtw89_phy_idx idx); + void (*bb_set_pmac_pkt_tx)(struct rtw89_dev *rtwdev, u8 enable, + u16 tx_cnt, u16 period, u16 tx_time, + enum rtw89_phy_idx idx); + void (*bb_backup_tssi)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx, + struct rtw8852bx_bb_tssi_bak *bak); + void (*bb_restore_tssi)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx, + const struct rtw8852bx_bb_tssi_bak *bak); + void (*bb_tx_mode_switch)(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx idx, u8 mode); + void (*set_channel_mac)(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, u8 mac_idx); + void (*set_channel_bb)(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx); + void (*ctrl_nbtg_bt_tx)(struct rtw89_dev *rtwdev, bool en, + enum rtw89_phy_idx phy_idx); + void (*ctrl_btg_bt_rx)(struct rtw89_dev *rtwdev, bool en, + enum rtw89_phy_idx phy_idx); + void (*query_ppdu)(struct rtw89_dev *rtwdev, + struct rtw89_rx_phy_ppdu *phy_ppdu, + struct ieee80211_rx_status *status); + int (*read_efuse)(struct rtw89_dev *rtwdev, u8 *log_map, + enum rtw89_efuse_block block); + int (*read_phycap)(struct rtw89_dev *rtwdev, u8 *phycap_map); + void (*power_trim)(struct rtw89_dev *rtwdev); + void (*set_txpwr)(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx); + void (*set_txpwr_ctrl)(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx); + int (*init_txpwr_unit)(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); + void (*set_txpwr_ul_tb_offset)(struct rtw89_dev *rtwdev, + s8 pw_ofst, enum rtw89_mac_idx mac_idx); + u8 (*get_thermal)(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path); + void (*adc_cfg)(struct rtw89_dev *rtwdev, u8 bw, u8 path); + void (*btc_init_cfg)(struct rtw89_dev *rtwdev); + void (*btc_set_wl_pri)(struct rtw89_dev *rtwdev, u8 map, bool state); + s8 (*btc_get_bt_rssi)(struct rtw89_dev *rtwdev, s8 val); + void (*btc_update_bt_cnt)(struct rtw89_dev *rtwdev); + void (*btc_wl_s1_standby)(struct rtw89_dev *rtwdev, bool state); + void (*btc_set_wl_rx_gain)(struct rtw89_dev *rtwdev, u32 level); +}; + +extern const struct rtw8852bx_info rtw8852bx_info; + +static inline +int rtw8852bx_mac_enable_bb_rf(struct rtw89_dev *rtwdev) +{ + return rtw8852bx_info.mac_enable_bb_rf(rtwdev); +} + +static inline +int rtw8852bx_mac_disable_bb_rf(struct rtw89_dev *rtwdev) +{ + return rtw8852bx_info.mac_disable_bb_rf(rtwdev); +} + +static inline +void rtw8852bx_bb_sethw(struct rtw89_dev *rtwdev) +{ + rtw8852bx_info.bb_sethw(rtwdev); +} + +static inline +void rtw8852bx_bb_reset_all(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +{ + rtw8852bx_info.bb_reset_all(rtwdev, phy_idx); +} + +static inline +void rtw8852bx_bb_cfg_txrx_path(struct rtw89_dev *rtwdev) +{ + rtw8852bx_info.bb_cfg_txrx_path(rtwdev); +} + +static inline +void rtw8852bx_bb_cfg_tx_path(struct rtw89_dev *rtwdev, u8 tx_path) +{ + rtw8852bx_info.bb_cfg_tx_path(rtwdev, tx_path); +} + +static inline +void rtw8852bx_bb_ctrl_rx_path(struct rtw89_dev *rtwdev, + enum rtw89_rf_path_bit rx_path) +{ + rtw8852bx_info.bb_ctrl_rx_path(rtwdev, rx_path); +} + +static inline +void rtw8852bx_bb_set_plcp_tx(struct rtw89_dev *rtwdev) +{ + rtw8852bx_info.bb_set_plcp_tx(rtwdev); +} + +static inline +void rtw8852bx_bb_set_power(struct rtw89_dev *rtwdev, s16 pwr_dbm, + enum rtw89_phy_idx idx) +{ + rtw8852bx_info.bb_set_power(rtwdev, pwr_dbm, idx); +} + +static inline +void rtw8852bx_bb_set_pmac_pkt_tx(struct rtw89_dev *rtwdev, u8 enable, + u16 tx_cnt, u16 period, u16 tx_time, + enum rtw89_phy_idx idx) +{ + rtw8852bx_info.bb_set_pmac_pkt_tx(rtwdev, enable, tx_cnt, period, tx_time, idx); +} + +static inline +void rtw8852bx_bb_backup_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx, + struct rtw8852bx_bb_tssi_bak *bak) +{ + rtw8852bx_info.bb_backup_tssi(rtwdev, idx, bak); +} + +static inline +void rtw8852bx_bb_restore_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx idx, + const struct rtw8852bx_bb_tssi_bak *bak) +{ + rtw8852bx_info.bb_restore_tssi(rtwdev, idx, bak); +} + +static inline +void rtw8852bx_bb_tx_mode_switch(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx idx, u8 mode) +{ + rtw8852bx_info.bb_tx_mode_switch(rtwdev, idx, mode); +} + +static inline +void rtw8852bx_set_channel_mac(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, u8 mac_idx) +{ + rtw8852bx_info.set_channel_mac(rtwdev, chan, mac_idx); +} + +static inline +void rtw8852bx_set_channel_bb(struct rtw89_dev *rtwdev, const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx) +{ + rtw8852bx_info.set_channel_bb(rtwdev, chan, phy_idx); +} + +static inline +void rtw8852bx_ctrl_nbtg_bt_tx(struct rtw89_dev *rtwdev, bool en, + enum rtw89_phy_idx phy_idx) +{ + rtw8852bx_info.ctrl_nbtg_bt_tx(rtwdev, en, phy_idx); +} + +static inline +void rtw8852bx_ctrl_btg_bt_rx(struct rtw89_dev *rtwdev, bool en, + enum rtw89_phy_idx phy_idx) +{ + rtw8852bx_info.ctrl_btg_bt_rx(rtwdev, en, phy_idx); +} + +static inline +void rtw8852bx_query_ppdu(struct rtw89_dev *rtwdev, + struct rtw89_rx_phy_ppdu *phy_ppdu, + struct ieee80211_rx_status *status) +{ + rtw8852bx_info.query_ppdu(rtwdev, phy_ppdu, status); +} + +static inline +int rtw8852bx_read_efuse(struct rtw89_dev *rtwdev, u8 *log_map, + enum rtw89_efuse_block block) +{ + return rtw8852bx_info.read_efuse(rtwdev, log_map, block); +} + +static inline +int rtw8852bx_read_phycap(struct rtw89_dev *rtwdev, u8 *phycap_map) +{ + return rtw8852bx_info.read_phycap(rtwdev, phycap_map); +} + +static inline +void rtw8852bx_power_trim(struct rtw89_dev *rtwdev) +{ + rtw8852bx_info.power_trim(rtwdev); +} + +static inline +void rtw8852bx_set_txpwr(struct rtw89_dev *rtwdev, + const struct rtw89_chan *chan, + enum rtw89_phy_idx phy_idx) +{ + rtw8852bx_info.set_txpwr(rtwdev, chan, phy_idx); +} + +static inline +void rtw8852bx_set_txpwr_ctrl(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx) +{ + rtw8852bx_info.set_txpwr_ctrl(rtwdev, phy_idx); +} + +static inline +int rtw8852bx_init_txpwr_unit(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +{ + return rtw8852bx_info.init_txpwr_unit(rtwdev, phy_idx); +} + +static inline +void rtw8852bx_set_txpwr_ul_tb_offset(struct rtw89_dev *rtwdev, + s8 pw_ofst, enum rtw89_mac_idx mac_idx) +{ + rtw8852bx_info.set_txpwr_ul_tb_offset(rtwdev, pw_ofst, mac_idx); +} + +static inline +u8 rtw8852bx_get_thermal(struct rtw89_dev *rtwdev, enum rtw89_rf_path rf_path) +{ + return rtw8852bx_info.get_thermal(rtwdev, rf_path); +} + +static inline +void rtw8852bx_adc_cfg(struct rtw89_dev *rtwdev, u8 bw, u8 path) +{ + rtw8852bx_info.adc_cfg(rtwdev, bw, path); +} + +static inline +void rtw8852bx_btc_init_cfg(struct rtw89_dev *rtwdev) +{ + rtw8852bx_info.btc_init_cfg(rtwdev); +} + +static inline +void rtw8852bx_btc_set_wl_pri(struct rtw89_dev *rtwdev, u8 map, bool state) +{ + rtw8852bx_info.btc_set_wl_pri(rtwdev, map, state); +} + +static inline +s8 rtw8852bx_btc_get_bt_rssi(struct rtw89_dev *rtwdev, s8 val) +{ + return rtw8852bx_info.btc_get_bt_rssi(rtwdev, val); +} + +static inline +void rtw8852bx_btc_update_bt_cnt(struct rtw89_dev *rtwdev) +{ + rtw8852bx_info.btc_update_bt_cnt(rtwdev); +} + +static inline +void rtw8852bx_btc_wl_s1_standby(struct rtw89_dev *rtwdev, bool state) +{ + rtw8852bx_info.btc_wl_s1_standby(rtwdev, state); +} + +static inline +void rtw8852bx_btc_set_wl_rx_gain(struct rtw89_dev *rtwdev, u32 level) +{ + rtw8852bx_info.btc_set_wl_rx_gain(rtwdev, level); +} + +#endif diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c index 259df67836a0..12354612441c 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852b_rfk.c @@ -8,6 +8,7 @@ #include "phy.h" #include "reg.h" #include "rtw8852b.h" +#include "rtw8852b_common.h" #include "rtw8852b_rfk.h" #include "rtw8852b_rfk_table.h" #include "rtw8852b_table.h" @@ -20,7 +21,7 @@ #define RTW8852B_RF_REL_VERSION 34 #define RTW8852B_DPK_VER 0x0d #define RTW8852B_DPK_RF_PATH 2 -#define RTW8852B_DPK_KIP_REG_NUM 2 +#define RTW8852B_DPK_KIP_REG_NUM 3 #define _TSSI_DE_MASK GENMASK(21, 12) #define ADDC_T_AVG 100 @@ -3433,13 +3434,13 @@ static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, rx_path = RF_ABCD; /* don't change path, but still set others */ if (enable) { - rtw8852b_bb_set_plcp_tx(rtwdev); - rtw8852b_bb_cfg_tx_path(rtwdev, path); - rtw8852b_bb_ctrl_rx_path(rtwdev, rx_path); - rtw8852b_bb_set_power(rtwdev, pwr_dbm, phy); + rtw8852bx_bb_set_plcp_tx(rtwdev); + rtw8852bx_bb_cfg_tx_path(rtwdev, path); + rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path); + rtw8852bx_bb_set_power(rtwdev, pwr_dbm, phy); } - rtw8852b_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy); + rtw8852bx_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy); } static void _tssi_backup_bb_registers(struct rtw89_dev *rtwdev, @@ -3578,7 +3579,7 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u32 tssi_cw_rpt[RTW8852B_TSSI_PATH_NR] = {0}; u8 channel = chan->channel; u8 ch_idx = _tssi_ch_to_idx(rtwdev, channel); - struct rtw8852b_bb_tssi_bak tssi_bak; + struct rtw8852bx_bb_tssi_bak tssi_bak; s32 aliment_diff, tssi_cw_default; u32 start_time, finish_time; u32 bb_reg_backup[8] = {0}; @@ -3626,7 +3627,7 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, else band = TSSI_ALIMK_2G; - rtw8852b_bb_backup_tssi(rtwdev, phy, &tssi_bak); + rtw8852bx_bb_backup_tssi(rtwdev, phy, &tssi_bak); _tssi_backup_bb_registers(rtwdev, phy, bb_reg, bb_reg_backup, ARRAY_SIZE(bb_reg_backup)); rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_AVG, 0x8); @@ -3730,8 +3731,8 @@ static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, out: _tssi_reload_bb_registers(rtwdev, phy, bb_reg, bb_reg_backup, ARRAY_SIZE(bb_reg_backup)); - rtw8852b_bb_restore_tssi(rtwdev, phy, &tssi_bak); - rtw8852b_bb_tx_mode_switch(rtwdev, phy, 0); + rtw8852bx_bb_restore_tssi(rtwdev, phy, &tssi_bak); + rtw8852bx_bb_tx_mode_switch(rtwdev, phy, 0); finish_time = ktime_get_ns(); tssi_info->tssi_alimk_time += finish_time - start_time; diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852be.c b/drivers/net/wireless/realtek/rtw89/rtw8852be.c index 5f941122655c..d8f9d92ca0fb 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852be.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852be.c @@ -46,6 +46,7 @@ static const struct rtw89_pci_info rtw8852b_pci_info = { .rpwm_addr = R_AX_PCIE_HRPWM, .cpwm_addr = R_AX_CPWM, .mit_addr = R_AX_INT_MIT_RX, + .wp_sel_addr = 0, .tx_dma_ch_mask = BIT(RTW89_TXCH_ACH4) | BIT(RTW89_TXCH_ACH5) | BIT(RTW89_TXCH_ACH6) | BIT(RTW89_TXCH_ACH7) | BIT(RTW89_TXCH_CH10) | BIT(RTW89_TXCH_CH11), diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt.h b/drivers/net/wireless/realtek/rtw89/rtw8852bt.h new file mode 100644 index 000000000000..6177f36ad667 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2024 Realtek Corporation + */ + +#ifndef __RTW89_8852BT_H__ +#define __RTW89_8852BT_H__ + +#include "core.h" + +#define RF_PATH_NUM_8852BT 2 +#define BB_PATH_NUM_8852BT 2 + +#endif diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c new file mode 100644 index 000000000000..fa0e49d58112 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.c @@ -0,0 +1,4019 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2024 Realtek Corporation + */ + +#include "coex.h" +#include "debug.h" +#include "fw.h" +#include "mac.h" +#include "phy.h" +#include "reg.h" +#include "rtw8852bt.h" +#include "rtw8852bt_rfk.h" +#include "rtw8852bt_rfk_table.h" +#include "rtw8852b_common.h" + +#define RTW8852BT_RXDCK_VER 0x1 +#define RTW8852BT_IQK_VER 0x2a +#define RTW8852BT_SS 2 +#define RTW8852BT_TSSI_PATH_NR 2 +#define RTW8852BT_DPK_VER 0x06 +#define DPK_RF_PATH_MAX_8852BT 2 + +#define _TSSI_DE_MASK GENMASK(21, 12) +#define DPK_TXAGC_LOWER 0x2e +#define DPK_TXAGC_UPPER 0x3f +#define DPK_TXAGC_INVAL 0xff +#define RFREG_MASKRXBB 0x003e0 +#define RFREG_MASKMODE 0xf0000 + +enum rf_mode { + RF_SHUT_DOWN = 0x0, + RF_STANDBY = 0x1, + RF_TX = 0x2, + RF_RX = 0x3, + RF_TXIQK = 0x4, + RF_DPK = 0x5, + RF_RXK1 = 0x6, + RF_RXK2 = 0x7, +}; + +enum rtw8852bt_dpk_id { + LBK_RXIQK = 0x06, + SYNC = 0x10, + MDPK_IDL = 0x11, + MDPK_MPA = 0x12, + GAIN_LOSS = 0x13, + GAIN_CAL = 0x14, + DPK_RXAGC = 0x15, + KIP_PRESET = 0x16, + KIP_RESTORE = 0x17, + DPK_TXAGC = 0x19, + D_KIP_PRESET = 0x28, + D_TXAGC = 0x29, + D_RXAGC = 0x2a, + D_SYNC = 0x2b, + D_GAIN_LOSS = 0x2c, + D_MDPK_IDL = 0x2d, + D_GAIN_NORM = 0x2f, + D_KIP_THERMAL = 0x30, + D_KIP_RESTORE = 0x31 +}; + +enum dpk_agc_step { + DPK_AGC_STEP_SYNC_DGAIN, + DPK_AGC_STEP_GAIN_ADJ, + DPK_AGC_STEP_GAIN_LOSS_IDX, + DPK_AGC_STEP_GL_GT_CRITERION, + DPK_AGC_STEP_GL_LT_CRITERION, + DPK_AGC_STEP_SET_TX_GAIN, +}; + +enum rtw8852bt_iqk_type { + ID_TXAGC = 0x0, + ID_FLOK_COARSE = 0x1, + ID_FLOK_FINE = 0x2, + ID_TXK = 0x3, + ID_RXAGC = 0x4, + ID_RXK = 0x5, + ID_NBTXK = 0x6, + ID_NBRXK = 0x7, + ID_FLOK_VBUFFER = 0x8, + ID_A_FLOK_COARSE = 0x9, + ID_G_FLOK_COARSE = 0xa, + ID_A_FLOK_FINE = 0xb, + ID_G_FLOK_FINE = 0xc, + ID_IQK_RESTORE = 0x10, +}; + +enum adc_ck { + ADC_NA = 0, + ADC_480M = 1, + ADC_960M = 2, + ADC_1920M = 3, +}; + +enum dac_ck { + DAC_40M = 0, + DAC_80M = 1, + DAC_120M = 2, + DAC_160M = 3, + DAC_240M = 4, + DAC_320M = 5, + DAC_480M = 6, + DAC_960M = 7, +}; + +static const u32 _tssi_trigger[RTW8852BT_TSSI_PATH_NR] = {0x5820, 0x7820}; +static const u32 _tssi_cw_rpt_addr[RTW8852BT_TSSI_PATH_NR] = {0x1c18, 0x3c18}; +static const u32 _tssi_cw_default_addr[RTW8852BT_TSSI_PATH_NR][4] = { + {0x5634, 0x5630, 0x5630, 0x5630}, + {0x7634, 0x7630, 0x7630, 0x7630} }; +static const u32 _tssi_cw_default_mask[4] = { + 0x000003ff, 0x3ff00000, 0x000ffc00, 0x000003ff}; +static const u32 _tssi_de_cck_long[RF_PATH_NUM_8852BT] = {0x5858, 0x7858}; +static const u32 _tssi_de_cck_short[RF_PATH_NUM_8852BT] = {0x5860, 0x7860}; +static const u32 _tssi_de_mcs_20m[RF_PATH_NUM_8852BT] = {0x5838, 0x7838}; +static const u32 _tssi_de_mcs_40m[RF_PATH_NUM_8852BT] = {0x5840, 0x7840}; +static const u32 _tssi_de_mcs_80m[RF_PATH_NUM_8852BT] = {0x5848, 0x7848}; +static const u32 _tssi_de_mcs_80m_80m[RF_PATH_NUM_8852BT] = {0x5850, 0x7850}; +static const u32 _tssi_de_mcs_5m[RF_PATH_NUM_8852BT] = {0x5828, 0x7828}; +static const u32 _tssi_de_mcs_10m[RF_PATH_NUM_8852BT] = {0x5830, 0x7830}; + +static const u32 rtw8852bt_backup_bb_regs[] = {0x2344, 0x5800, 0x7800, 0x0704}; +static const u32 rtw8852bt_backup_rf_regs[] = { + 0xde, 0xdf, 0x8b, 0x90, 0x97, 0x85, 0x5, 0x10005}; +static const u32 rtw8852bt_backup_kip_regs[] = { + 0x813c, 0x8124, 0x8120, 0xc0d4, 0xc0d8, 0xc0c4, 0xc0ec, + 0x823c, 0x8224, 0x8220, 0xc1d4, 0xc1d8, 0xc1c4, 0xc1ec}; + +#define BACKUP_BB_REGS_NR ARRAY_SIZE(rtw8852bt_backup_bb_regs) +#define BACKUP_RF_REGS_NR ARRAY_SIZE(rtw8852bt_backup_rf_regs) +#define BACKUP_KIP_REGS_NR ARRAY_SIZE(rtw8852bt_backup_kip_regs) + +static void _rfk_get_thermal(struct rtw89_dev *rtwdev, u8 kidx, enum rtw89_rf_path path) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + + rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x1); + rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x0); + rtw89_write_rf(rtwdev, path, RR_TM, RR_TM_TRI, 0x1); + + udelay(200); + + dpk->bp[path][kidx].ther_dpk = rtw89_read_rf(rtwdev, path, RR_TM, RR_TM_VAL); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] thermal@DPK = 0x%x\n", + dpk->bp[path][kidx].ther_dpk); +} + +static void _rfk_backup_bb_reg(struct rtw89_dev *rtwdev, u32 backup_bb_reg_val[]) +{ + u32 i; + + for (i = 0; i < BACKUP_BB_REGS_NR; i++) { + backup_bb_reg_val[i] = + rtw89_phy_read32_mask(rtwdev, rtw8852bt_backup_bb_regs[i], MASKDWORD); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[RFK]backup bb reg : %x, value =%x\n", + rtw8852bt_backup_bb_regs[i], backup_bb_reg_val[i]); + } +} + +static void _rfk_backup_kip_reg(struct rtw89_dev *rtwdev, u32 backup_kip_reg_val[]) +{ + u32 i; + + for (i = 0; i < BACKUP_KIP_REGS_NR; i++) { + backup_kip_reg_val[i] = + rtw89_phy_read32_mask(rtwdev, rtw8852bt_backup_kip_regs[i], + MASKDWORD); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup 0x%x = %x\n", + rtw8852bt_backup_kip_regs[i], backup_kip_reg_val[i]); + } +} + +static +void _rfk_backup_rf_reg(struct rtw89_dev *rtwdev, u32 backup_rf_reg_val[], u8 rf_path) +{ + u32 i; + + for (i = 0; i < BACKUP_RF_REGS_NR; i++) { + backup_rf_reg_val[i] = + rtw89_read_rf(rtwdev, rf_path, rtw8852bt_backup_rf_regs[i], + RFREG_MASK); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Backup RF S%d 0x%x = %x\n", + rf_path, rtw8852bt_backup_rf_regs[i], backup_rf_reg_val[i]); + } +} + +static void _rfk_reload_bb_reg(struct rtw89_dev *rtwdev, const u32 backup_bb_reg_val[]) +{ + u32 i; + + for (i = 0; i < BACKUP_BB_REGS_NR; i++) { + rtw89_phy_write32_mask(rtwdev, rtw8852bt_backup_bb_regs[i], + MASKDWORD, backup_bb_reg_val[i]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[RFK]restore bb reg : %x, value =%x\n", + rtw8852bt_backup_bb_regs[i], backup_bb_reg_val[i]); + } +} + +static void _rfk_reload_kip_reg(struct rtw89_dev *rtwdev, u32 backup_kip_reg_val[]) +{ + u32 i; + + for (i = 0; i < BACKUP_KIP_REGS_NR; i++) { + rtw89_phy_write32_mask(rtwdev, rtw8852bt_backup_kip_regs[i], + MASKDWORD, backup_kip_reg_val[i]); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[RFK]restore kip reg : %x, value =%x\n", + rtw8852bt_backup_kip_regs[i], backup_kip_reg_val[i]); + } +} + +static void _rfk_reload_rf_reg(struct rtw89_dev *rtwdev, + const u32 backup_rf_reg_val[], u8 rf_path) +{ + u32 i; + + for (i = 0; i < BACKUP_RF_REGS_NR; i++) { + rtw89_write_rf(rtwdev, rf_path, rtw8852bt_backup_rf_regs[i], + RFREG_MASK, backup_rf_reg_val[i]); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[RFK]restore rf S%d reg: %x, value =%x\n", rf_path, + rtw8852bt_backup_rf_regs[i], backup_rf_reg_val[i]); + } +} + +static u8 _kpath(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +{ + u8 val; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RFK]dbcc_en: %x,PHY%d\n", + rtwdev->dbcc_en, phy_idx); + + if (!rtwdev->dbcc_en) { + val = RF_AB; + } else { + if (phy_idx == RTW89_PHY_0) + val = RF_A; + else + val = RF_B; + } + return val; +} + +static +void _txck_force(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool force, + enum dac_ck ck) +{ + rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_ON, 0x0); + + if (!force) + return; + + rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_VAL, ck); + rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_TXCK_ON, 0x1); +} + +static +void _rxck_force(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool force, + enum adc_ck ck) +{ + u32 bw = 0; + + rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x0); + + if (!force) + return; + + rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_VAL, ck); + rtw89_phy_write32_mask(rtwdev, R_P0_RXCK | (path << 13), B_P0_RXCK_ON, 0x1); + + switch (ck) { + case ADC_480M: + bw = RTW89_CHANNEL_WIDTH_40; + break; + case ADC_960M: + bw = RTW89_CHANNEL_WIDTH_80; + break; + case ADC_1920M: + bw = RTW89_CHANNEL_WIDTH_160; + break; + default: + rtw89_debug(rtwdev, RTW89_DBG_RFK, "%s==>Invalid ck", __func__); + break; + } + + rtw8852bx_adc_cfg(rtwdev, bw, path); +} + +static void _rfk_bb_afe_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 kpath) +{ + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, MASKHWORD, 0x0303); + rtw89_phy_write32_mask(rtwdev, R_P0_ADCFF_EN, B_P0_ADCFF_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P1_ADCFF_EN, B_P1_ADCFF_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_CLKG_FORCE, 0x3); + rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_CLKG_FORCE, 0x3); + rtw89_phy_write32_mask(rtwdev, R_TXCKEN_FORCE, B_TXCKEN_FORCE_ALL, 0x1ffffff); + rtw89_phy_write32_mask(rtwdev, R_FAHM, B_RXTD_CKEN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_GEN_ON, 0x1); + rtw89_phy_write32_mask(rtwdev, R_TX_COLLISION_T2R_ST, B_TXRX_FORCE_VAL, 0x3ff); + rtw89_phy_write32_mask(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_CLKEN, 0x3); + rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_RST, B_IQK_DPK_RST, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P0_PATH_RST, B_P0_PATH_RST, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P1_PATH_RST, B_P1_PATH_RST, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x1); + rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0x1); + rtw89_phy_write32_mask(rtwdev, R_DCFO_WEIGHT, B_DAC_CLK_IDX, 0x1); + + _txck_force(rtwdev, RF_PATH_A, true, DAC_960M); + _txck_force(rtwdev, RF_PATH_B, true, DAC_960M); + _rxck_force(rtwdev, RF_PATH_A, true, ADC_1920M); + _rxck_force(rtwdev, RF_PATH_B, true, ADC_1920M); + + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, + B_UPD_CLK_ADC_VAL | B_UPD_CLK_ADC_ON, 0x5); + rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, MASKBYTE3, 0x1f); + udelay(1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, MASKBYTE3, 0x13); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, MASKHWORD, 0x0001); + udelay(1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, MASKHWORD, 0x0041); + rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_RSTB, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, MASKHWORD, 0x3333); + + rtw89_phy_write32_mask(rtwdev, R_TXPWRB_H, B_TXPWRB_RDY, 0x1); + rtw89_phy_write32_mask(rtwdev, R_DPD_OFT_EN, MASKLWORD, 0x0000); + rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_FORCE, B_P1_TXPW_RDY, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P1_TXAGC_TH, MASKLWORD, 0x0000); +} + +static void _rfk_bb_afe_restore(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 kpath) +{ + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, MASKHWORD, 0x0303); + rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_CLKG_FORCE, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_CLKG_FORCE, 0x0); + rtw89_phy_write32_mask(rtwdev, R_TXCKEN_FORCE, B_TXCKEN_FORCE_ALL, 0x0); + rtw89_phy_write32_mask(rtwdev, R_FAHM, B_RXTD_CKEN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_GEN_ON, 0x0); + rtw89_phy_write32_mask(rtwdev, R_TX_COLLISION_T2R_ST, B_TXRX_FORCE_VAL, 0x63); + rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_TXCK_ALL, 0x00); + rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_TXCK_ALL, 0x00); + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, + B_UPD_CLK_ADC_VAL | B_UPD_CLK_ADC_ON, 0x0); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, MASKHWORD, 0x0000); + rtw89_phy_write32_mask(rtwdev, R_P0_ADCFF_EN, B_P0_ADCFF_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P1_ADCFF_EN, B_P1_ADCFF_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x0); + + rtw89_phy_write32_mask(rtwdev, R_TXPWRB_H, B_TXPWRB_RDY, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TXPW_RSTB, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TXPW_RSTB, 0x2); + rtw89_phy_write32_mask(rtwdev, R_P1_TXPW_FORCE, B_P1_TXPW_RDY, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TXPW_RSTB, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TXPW_RSTB, 0x2); +} + +static void _set_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_write_rf(rtwdev, path, RR_DCK1, RR_DCK1_CLR, 0x0); + rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0); + rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1); + mdelay(1); +} + +static void _rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + u8 path, dck_tune; + u32 rf_reg5; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[RX_DCK] ****** RXDCK Start (Ver: 0x%x, CV : 0x%x) ******\n", + RTW8852BT_RXDCK_VER, rtwdev->hal.cv); + + for (path = 0; path < RF_PATH_NUM_8852BT; path++) { + rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK); + dck_tune = rtw89_read_rf(rtwdev, path, RR_DCK, RR_DCK_FINE); + + if (rtwdev->is_tssi_mode[path]) + rtw89_phy_write32_mask(rtwdev, + R_P0_TSSI_TRK + (path << 13), + B_P0_TSSI_TRK_EN, 0x1); + + rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); + rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, 0x0); + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX); + _set_rx_dck(rtwdev, phy, path); + rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_FINE, dck_tune); + rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5); + + if (rtwdev->is_tssi_mode[path]) + rtw89_phy_write32_mask(rtwdev, + R_P0_TSSI_TRK + (path << 13), + B_P0_TSSI_TRK_EN, 0x0); + } +} + +static void _rck(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) +{ + u32 rf_reg5; + u32 rck_val; + u32 val; + int ret; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] ====== S%d RCK ======\n", path); + + rf_reg5 = rtw89_read_rf(rtwdev, path, RR_RSV1, RFREG_MASK); + + rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, RR_MOD_V_RX); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF0x00 = 0x%05x\n", + rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK)); + + /* RCK trigger */ + rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, 0x00240); + + ret = read_poll_timeout_atomic(rtw89_read_rf, val, val, 2, 30, + false, rtwdev, path, RR_RCKS, BIT(3)); + + rck_val = rtw89_read_rf(rtwdev, path, RR_RCKC, RR_RCKC_CA); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] rck_val = 0x%x, ret = %d\n", + rck_val, ret); + + rtw89_write_rf(rtwdev, path, RR_RCKC, RFREG_MASK, rck_val); + rtw89_write_rf(rtwdev, path, RR_RSV1, RFREG_MASK, rf_reg5); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[RCK] RF 0x1b = 0x%x\n", + rtw89_read_rf(rtwdev, path, RR_RCKC, RFREG_MASK)); +} + +static void _drck(struct rtw89_dev *rtwdev) +{ + u32 rck_d; + u32 val; + int ret; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]Ddie RCK start!!!\n"); + rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_EN, 0x1); + + ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, + 1, 10000, false, + rtwdev, R_DRCK_RES, B_DRCK_POL); + if (ret) + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DRCK timeout\n"); + + rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, 0x1); + udelay(1); + rtw89_phy_write32_mask(rtwdev, R_DRCK_FH, B_DRCK_LAT, 0x0); + + rck_d = rtw89_phy_read32_mask(rtwdev, R_DRCK_RES, 0x7c00); + rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_IDLE, 0x0); + rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_VAL, rck_d); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0xc0c4 = 0x%x\n", + rtw89_phy_read32_mask(rtwdev, R_DRCK, MASKDWORD)); +} + +static void _dack_backup_s0(struct rtw89_dev *rtwdev) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + u8 i; + + rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1); + + for (i = 0; i < 0x10; i++) { + rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_V, i); + dack->msbk_d[0][0][i] = + rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0M0); + + rtw89_phy_write32_mask(rtwdev, R_DCOF8, B_DCOF8_V, i); + dack->msbk_d[0][1][i] = + rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0M1); + } + + dack->biask_d[0][0] = + rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS00, B_DACK_BIAS00); + dack->biask_d[0][1] = + rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS01, B_DACK_BIAS01); + + dack->dadck_d[0][0] = + rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK00, B_DACK_DADCK00); + dack->dadck_d[0][1] = + rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK01, B_DACK_DADCK01); +} + +static void _dack_backup_s1(struct rtw89_dev *rtwdev) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + u8 i; + + rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1); + + for (i = 0; i < 0x10; i++) { + rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10, i); + dack->msbk_d[1][0][i] = + rtw89_phy_read32_mask(rtwdev, R_DACK10S, B_DACK10S); + + rtw89_phy_write32_mask(rtwdev, R_DACK11, B_DACK11, i); + dack->msbk_d[1][1][i] = + rtw89_phy_read32_mask(rtwdev, R_DACK11S, B_DACK11S); + } + + dack->biask_d[1][0] = + rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS10, B_DACK_BIAS10); + dack->biask_d[1][1] = + rtw89_phy_read32_mask(rtwdev, R_DACK_BIAS11, B_DACK_BIAS11); + + dack->dadck_d[1][0] = + rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK10, B_DACK_DADCK10); + dack->dadck_d[1][1] = + rtw89_phy_read32_mask(rtwdev, R_DACK_DADCK11, B_DACK_DADCK11); +} + +static +void _dack_reset(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) +{ + if (path == RF_PATH_A) { + rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_RST, 0x0); + rtw89_phy_write32_mask(rtwdev, R_DCOF0, B_DCOF0_RST, 0x1); + } else { + rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10_RST, 0x0); + rtw89_phy_write32_mask(rtwdev, R_DACK10, B_DACK10_RST, 0x1); + } +} + +static +void _dack_reload_by_path(struct rtw89_dev *rtwdev, u8 path, u8 index) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + u32 tmp, tmp_offset, tmp_reg; + u32 idx_offset, path_offset; + u8 i; + + if (index == 0) + idx_offset = 0; + else + idx_offset = 0x14; + + if (path == RF_PATH_A) + path_offset = 0; + else + path_offset = 0x28; + + tmp_offset = idx_offset + path_offset; + + rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_RST, 0x1); + rtw89_phy_write32_mask(rtwdev, R_DCOF9, B_DCOF9_RST, 0x1); + rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_RST, 0x1); + rtw89_phy_write32_mask(rtwdev, R_DACK2_K, B_DACK2_RST, 0x1); + + /* msbk_d: 15/14/13/12 */ + tmp = 0x0; + for (i = 0; i < 4; i++) + tmp |= dack->msbk_d[path][index][i + 12] << (i * 8); + tmp_reg = 0xc200 + tmp_offset; + rtw89_phy_write32(rtwdev, tmp_reg, tmp); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg, + rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD)); + + /* msbk_d: 11/10/9/8 */ + tmp = 0x0; + for (i = 0; i < 4; i++) + tmp |= dack->msbk_d[path][index][i + 8] << (i * 8); + tmp_reg = 0xc204 + tmp_offset; + rtw89_phy_write32(rtwdev, tmp_reg, tmp); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg, + rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD)); + + /* msbk_d: 7/6/5/4 */ + tmp = 0x0; + for (i = 0; i < 4; i++) + tmp |= dack->msbk_d[path][index][i + 4] << (i * 8); + tmp_reg = 0xc208 + tmp_offset; + rtw89_phy_write32(rtwdev, tmp_reg, tmp); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg, + rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD)); + + /* msbk_d: 3/2/1/0 */ + tmp = 0x0; + for (i = 0; i < 4; i++) + tmp |= dack->msbk_d[path][index][i] << (i * 8); + tmp_reg = 0xc20c + tmp_offset; + rtw89_phy_write32(rtwdev, tmp_reg, tmp); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg, + rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD)); + + /* dadak_d/biask_d */ + tmp = (dack->biask_d[path][index] << 22) | + (dack->dadck_d[path][index] << 14); + tmp_reg = 0xc210 + tmp_offset; + rtw89_phy_write32(rtwdev, tmp_reg, tmp); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x=0x%x\n", tmp_reg, + rtw89_phy_read32_mask(rtwdev, tmp_reg, MASKDWORD)); + + /* enable DACK result from reg */ + rtw89_phy_write32_mask(rtwdev, R_DACKN0_CTL + tmp_offset, B_DACKN0_EN, 0x1); +} + +static +void _dack_reload(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) +{ + u8 i; + + for (i = 0; i < 2; i++) + _dack_reload_by_path(rtwdev, path, i); +} + +static bool _dack_s0_poll(struct rtw89_dev *rtwdev) +{ + if (rtw89_phy_read32_mask(rtwdev, R_DACK_S0P0, B_DACK_S0P0_OK) == 0 || + rtw89_phy_read32_mask(rtwdev, R_DACK_S0P1, B_DACK_S0P1_OK) == 0 || + rtw89_phy_read32_mask(rtwdev, R_DACK_S0P2, B_DACK_S0P2_OK) == 0 || + rtw89_phy_read32_mask(rtwdev, R_DACK_S0P3, B_DACK_S0P3_OK) == 0) + return false; + + return true; +} + +static void _dack_s0(struct rtw89_dev *rtwdev) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + bool done; + int ret; + + _txck_force(rtwdev, RF_PATH_A, true, DAC_160M); + + rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, BIT(28), 0x1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN1, 0x0); + udelay(100); + rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_VAL, 0x30); + rtw89_phy_write32_mask(rtwdev, R_DCOF9, B_DCOF9_VAL, 0x30); + + _dack_reset(rtwdev, RF_PATH_A); + + rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, 0x1); + udelay(1); + + dack->msbk_timeout[0] = false; + + ret = read_poll_timeout_atomic(_dack_s0_poll, done, done, + 1, 20000, false, rtwdev); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 DACK timeout\n"); + dack->msbk_timeout[0] = true; + } + + rtw89_phy_write32_mask(rtwdev, R_DCOF1, B_DCOF1_S, 0x0); + + _txck_force(rtwdev, RF_PATH_A, false, DAC_960M); + _dack_backup_s0(rtwdev); + _dack_reload(rtwdev, RF_PATH_A); + + rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0); +} + +static bool _dack_s1_poll(struct rtw89_dev *rtwdev) +{ + if (rtw89_phy_read32_mask(rtwdev, R_DACK_S1P0, B_DACK_S1P0_OK) == 0 || + rtw89_phy_read32_mask(rtwdev, R_DACK_S1P1, B_DACK_S1P1_OK) == 0 || + rtw89_phy_read32_mask(rtwdev, R_DACK_S1P2, B_DACK_S1P2_OK) == 0 || + rtw89_phy_read32_mask(rtwdev, R_DACK_S1P3, B_DACK_S1P3_OK) == 0) + return false; + + return true; +} + +static void _dack_s1(struct rtw89_dev *rtwdev) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + bool done; + int ret; + + _txck_force(rtwdev, RF_PATH_B, true, DAC_160M); + + rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, BIT(28), 0x1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN1, 0x0); + udelay(100); + rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_VAL, 0x30); + rtw89_phy_write32_mask(rtwdev, R_DACK2_K, B_DACK2_VAL, 0x30); + + _dack_reset(rtwdev, RF_PATH_B); + + rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_EN, 0x1); + udelay(1); + + dack->msbk_timeout[1] = false; + + ret = read_poll_timeout_atomic(_dack_s1_poll, done, done, + 1, 10000, false, rtwdev); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 DACK timeout\n"); + dack->msbk_timeout[1] = true; + } + + rtw89_phy_write32_mask(rtwdev, R_DACK1_K, B_DACK1_EN, 0x0); + + _txck_force(rtwdev, RF_PATH_B, false, DAC_960M); + _dack_backup_s1(rtwdev); + _dack_reload(rtwdev, RF_PATH_B); + + rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0); +} + +static void _dack(struct rtw89_dev *rtwdev) +{ + _dack_s0(rtwdev); + _dack_s1(rtwdev); +} + +static void _dack_dump(struct rtw89_dev *rtwdev) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + u8 i; + u8 t; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DACK]S0 ADC_DCK ic = 0x%x, qc = 0x%x\n", + dack->addck_d[0][0], dack->addck_d[0][1]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DACK]S1 ADC_DCK ic = 0x%x, qc = 0x%x\n", + dack->addck_d[1][0], dack->addck_d[1][1]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DACK]S0 DAC_DCK ic = 0x%x, qc = 0x%x\n", + dack->dadck_d[0][0], dack->dadck_d[0][1]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DACK]S1 DAC_DCK ic = 0x%x, qc = 0x%x\n", + dack->dadck_d[1][0], dack->dadck_d[1][1]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DACK]S0 biask ic = 0x%x, qc = 0x%x\n", + dack->biask_d[0][0], dack->biask_d[0][1]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DACK]S1 biask ic = 0x%x, qc = 0x%x\n", + dack->biask_d[1][0], dack->biask_d[1][1]); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK ic:\n"); + for (i = 0; i < 0x10; i++) { + t = dack->msbk_d[0][0][i]; + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t); + } + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 MSBK qc:\n"); + for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { + t = dack->msbk_d[0][1][i]; + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t); + } + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK ic:\n"); + for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { + t = dack->msbk_d[1][0][i]; + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t); + } + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 MSBK qc:\n"); + for (i = 0; i < RTW89_DACK_MSBK_NR; i++) { + t = dack->msbk_d[1][1][i]; + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]0x%x\n", t); + } +} + +static void _addck_ori(struct rtw89_dev *rtwdev) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + u32 val; + int ret; + + rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_MAN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_MAN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x0); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x0); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x1); + + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xf); + udelay(100); + + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, BIT(4), 0x1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0x3); + rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_TRG, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0_TRG, 0x0); + udelay(1); + + rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x1); + dack->addck_timeout[0] = false; + + ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, + 1, 10000, false, + rtwdev, R_ADDCKR0, BIT(0)); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S0 ADDCK timeout\n"); + dack->addck_timeout[0] = true; + } + + rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, BIT(4), 0x0); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xc); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x1); + + rtw89_phy_write32_mask(rtwdev, R_ADDCK0, B_ADDCK0, 0x0); + dack->addck_d[0][0] = + rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A0); + dack->addck_d[0][1] = + rtw89_phy_read32_mask(rtwdev, R_ADDCKR0, B_ADDCKR0_A1); + rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0); + + rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x0); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x0); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_FLTRST, 0x1); + + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xf); + udelay(100); + + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, BIT(4), 0x1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0x3); + rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_TRG, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1_TRG, 0x0); + udelay(1); + + rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x1); + dack->addck_timeout[1] = false; + + ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val, + 1, 10000, false, + rtwdev, R_ADDCKR1, BIT(0)); + if (ret) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]S1 ADDCK timeout\n"); + dack->addck_timeout[1] = true; + } + + rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, BIT(4), 0x0); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15_H, 0xc); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_ADCCLK, 0x1); + + rtw89_phy_write32_mask(rtwdev, R_ADDCK1, B_ADDCK1, 0x0); + dack->addck_d[1][0] = + rtw89_phy_read32_mask(rtwdev, R_ADDCKR1, B_ADDCKR1_A0); + dack->addck_d[1][1] = + rtw89_phy_read32_mask(rtwdev, R_ADDCKR1, B_ADDCKR1_A1); + + rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0); +} + +static void _addck_reload(struct rtw89_dev *rtwdev) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + + rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL1, dack->addck_d[0][0]); + rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RL0, dack->addck_d[0][1]); + + rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RLS, 0x3); + + rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RL1, dack->addck_d[1][0]); + rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RL0, dack->addck_d[1][1]); + + rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RLS, 0x3); +} + +static void _dack_manual_off(struct rtw89_dev *rtwdev) +{ + rtw89_phy_write32_mask(rtwdev, R_ADDCK0_RL, B_ADDCK0_RLS, 0x0); + rtw89_phy_write32_mask(rtwdev, R_ADDCK1_RL, B_ADDCK1_RLS, 0x0); + + rtw89_phy_write32_mask(rtwdev, R_DACKN0_CTL, B_DACKN0_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_DACKN1_CTL, B_DACKN1_ON, 0x0); + rtw89_phy_write32_mask(rtwdev, R_DACKN2_CTL, B_DACKN2_ON, 0x0); + rtw89_phy_write32_mask(rtwdev, R_DACKN3_CTL, B_DACKN3_ON, 0x0); +} + +static void _dac_cal(struct rtw89_dev *rtwdev, bool force) +{ + struct rtw89_dack_info *dack = &rtwdev->dack; + + dack->dack_done = false; + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK start!!!\n"); + + _drck(rtwdev); + _dack_manual_off(rtwdev); + rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RFREG_MASK, 0x0); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RFREG_MASK, 0x0); + rtw89_write_rf(rtwdev, RF_PATH_A, RR_MOD, RFREG_MASK, 0x337e1); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_MOD, RFREG_MASK, 0x337e1); + _rxck_force(rtwdev, RF_PATH_A, true, ADC_960M); + _rxck_force(rtwdev, RF_PATH_B, true, ADC_960M); + _addck_ori(rtwdev); + + _rxck_force(rtwdev, RF_PATH_A, false, ADC_960M); + _rxck_force(rtwdev, RF_PATH_B, false, ADC_960M); + _addck_reload(rtwdev); + + rtw89_write_rf(rtwdev, RF_PATH_A, RR_MODOPT, RFREG_MASK, 0x0); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_MODOPT, RFREG_MASK, 0x0); + + _dack(rtwdev); + _dack_dump(rtwdev); + dack->dack_done = true; + rtw89_write_rf(rtwdev, RF_PATH_A, RR_RSV1, RFREG_MASK, 0x1); + rtw89_write_rf(rtwdev, RF_PATH_B, RR_RSV1, RFREG_MASK, 0x1); + + dack->dack_cnt++; + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DACK]DACK finish!!!\n"); +} + +static bool _iqk_check_cal(struct rtw89_dev *rtwdev, u8 path, u8 ktype) +{ + bool notready = false; + u32 val; + int ret; + + ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55, + 10, 8200, false, + rtwdev, R_RFK_ST, MASKBYTE0); + if (ret) + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]NCTL1 IQK timeout!!!\n"); + + udelay(10); + + ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x8000, + 10, 400, false, + rtwdev, R_RPT_COM, B_RPT_COM_RDY); + if (ret) { + notready = true; + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]NCTL2 IQK timeout!!!\n"); + } + + udelay(10); + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, 0x0); + + return notready; +} + +static bool _iqk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, + u8 path, u8 ktype) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + u32 iqk_cmd; + bool fail; + + switch (ktype) { + case ID_TXAGC: + iqk_cmd = 0x008 | (1 << (4 + path)) | (path << 1); + break; + case ID_FLOK_COARSE: + iqk_cmd = 0x108 | (1 << (4 + path)); + break; + case ID_FLOK_FINE: + iqk_cmd = 0x208 | (1 << (4 + path)); + break; + case ID_FLOK_VBUFFER: + iqk_cmd = 0x308 | (1 << (4 + path)); + break; + case ID_TXK: + iqk_cmd = 0x008 | (1 << (path + 4)) | + (((0x8 + iqk_info->iqk_bw[path]) & 0xf) << 8); + break; + case ID_RXAGC: + iqk_cmd = 0x508 | (1 << (4 + path)) | (path << 1); + break; + case ID_RXK: + iqk_cmd = 0x008 | (1 << (path + 4)) | + (((0xb + iqk_info->iqk_bw[path]) & 0xf) << 8); + break; + case ID_NBTXK: + iqk_cmd = 0x408 | (1 << (4 + path)); + break; + case ID_NBRXK: + iqk_cmd = 0x608 | (1 << (4 + path)); + break; + default: + return false; + } + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s, iqk_cmd = %x\n", + __func__, iqk_cmd + 1); + + rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, iqk_cmd + 1); + fail = _iqk_check_cal(rtwdev, path, ktype); + + return fail; +} + +static void _iqk_txk_setting(struct rtw89_dev *rtwdev, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + + switch (iqk_info->iqk_band[path]) { + case RTW89_BAND_2G: + rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT2, 0x0); + rtw89_write_rf(rtwdev, path, RR_TXG1, RR_TXG1_ATT1, 0x0); + rtw89_write_rf(rtwdev, path, RR_TXG2, RR_TXG2_ATT0, 0x1); + rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0); + rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1); + rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M1, 0x00); + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_IQK, 0x403e); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x5); + udelay(1); + break; + case RTW89_BAND_5G: + rtw89_write_rf(rtwdev, path, RR_BIASA, RR_BIASA_A, 0x1); + rtw89_write_rf(rtwdev, path, RR_TXGA, RR_TXGA_LOK_EXT, 0x0); + rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x1); + rtw89_write_rf(rtwdev, path, RR_LUTWA, RR_LUTWA_M1, 0x80); + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_IQK, 0x403e); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, 0x0); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, 0x4); + udelay(1); + break; + default: + break; + } +} + +static bool _iqk_2g_lok(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path) +{ + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); + + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x09); + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021); + rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000119 + (path << 4)); + + _iqk_check_cal(rtwdev, path, ID_FLOK_COARSE); + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); + + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x24); + rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000319 + (path << 4)); + + _iqk_check_cal(rtwdev, path, ID_FLOK_VBUFFER); + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); + + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x09); + rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000219 + (path << 4)); + + _iqk_check_cal(rtwdev, path, ID_FLOK_COARSE); + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); + + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x24); + rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000319 + (path << 4)); + + _iqk_check_cal(rtwdev, path, ID_FLOK_VBUFFER); + + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); + + return false; +} + +static bool _iqk_5g_lok(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path) +{ + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); + + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x09); + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, 0x021); + rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000119 + (path << 4)); + + _iqk_check_cal(rtwdev, path, ID_FLOK_COARSE); + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); + + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x24); + rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000319 + (path << 4)); + + _iqk_check_cal(rtwdev, path, ID_FLOK_VBUFFER); + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); + + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x09); + rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000219 + (path << 4)); + + _iqk_check_cal(rtwdev, path, ID_FLOK_COARSE); + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); + + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, 0x12); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), B_KIP_IQP_IQSW, 0x24); + rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, 0x00000319 + (path << 4)); + + _iqk_check_cal(rtwdev, path, ID_FLOK_VBUFFER); + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); + + return false; +} + +static bool _iqk_2g_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path) +{ + static const u32 g_power_range[4] = {0x0, 0x0, 0x0, 0x0}; + static const u32 g_track_range[4] = {0x4, 0x4, 0x6, 0x6}; + static const u32 g_gain_bb[4] = {0x08, 0x0e, 0x08, 0x0e}; + static const u32 g_itqt[4] = {0x09, 0x12, 0x1b, 0x24}; + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + bool notready = false; + bool kfail = false; + u8 gp; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); + + for (gp = 0x0; gp < 0x4; gp++) { + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, + g_power_range[gp]); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, + g_track_range[gp]); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, + g_gain_bb[gp]); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + 0x00000100, 0x1); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + 0x00000010, 0x1); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + 0x00000004, 0x0); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + 0x00000003, gp); + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, + 0x009); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + B_KIP_IQP_IQSW, g_itqt[gp]); + notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK); + iqk_info->nb_txcfir[path] = + rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD); + + if (iqk_info->is_nbiqk) + break; + + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + B_KIP_IQP_IQSW, g_itqt[gp]); + notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK); + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[IQK]S%x, gp = 0x%x, 0x8%x38 = 0x%x\n", + path, gp, 1 << path, iqk_info->nb_txcfir[path]); + } + + if (!notready) + kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); + + if (kfail) { + iqk_info->nb_txcfir[path] = 0x40000002; + rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8), + B_IQK_RES_TXCFIR, 0x0); + } + + return kfail; +} + +static bool _iqk_5g_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path) +{ + static const u32 a_power_range[4] = {0x0, 0x0, 0x0, 0x0}; + static const u32 a_track_range[4] = {0x3, 0x3, 0x6, 0x6}; + static const u32 a_gain_bb[4] = {0x08, 0x10, 0x08, 0x0e}; + static const u32 a_itqt[4] = {0x09, 0x12, 0x1b, 0x24}; + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + bool notready = false; + bool kfail = false; + u8 gp; + + for (gp = 0x0; gp < 0x4; gp++) { + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR0, a_power_range[gp]); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_GR1, a_track_range[gp]); + rtw89_write_rf(rtwdev, path, RR_TXIG, RR_TXIG_TG, a_gain_bb[gp]); + + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + MASKDWORD, a_itqt[gp]); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + 0x00000100, 0x1); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + 0x00000010, 0x1); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + 0x00000004, 0x0); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + 0x00000003, gp); + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_TXT, + 0x009); + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + B_KIP_IQP_IQSW, a_itqt[gp]); + + notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBTXK); + iqk_info->nb_txcfir[path] = + rtw89_phy_read32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD); + + if (iqk_info->is_nbiqk) + break; + + rtw89_phy_write32_mask(rtwdev, R_KIP_IQP + (path << 8), + B_KIP_IQP_IQSW, a_itqt[gp]); + notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_TXK); + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[IQK]S%x, gp = 0x%x, 0x8%x38 = 0x%x\n", + path, gp, 1 << path, iqk_info->nb_txcfir[path]); + } + + if (!notready) + kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); + + if (kfail) { + iqk_info->nb_txcfir[path] = 0x40000002; + rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8), + B_IQK_RES_TXCFIR, 0x0); + } + + return kfail; +} + +static void _iqk_adc_fifo_rst(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, u8 path) +{ + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0303); + udelay(10); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x3333); +} + +static void _iqk_rxclk_setting(struct rtw89_dev *rtwdev, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0303); + + if (iqk_info->iqk_bw[path] == RTW89_CHANNEL_WIDTH_80) { + _rxck_force(rtwdev, RF_PATH_A, true, ADC_960M); + _rxck_force(rtwdev, RF_PATH_B, true, ADC_960M); + udelay(1); + + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, + B_UPD_CLK_ADC_ON, 0x1); + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, + B_UPD_CLK_ADC_VAL, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, + B_PATH0_SAMPL_DLY_T_MSK_V1, 0x2); + rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, + B_PATH1_SAMPL_DLY_T_MSK_V1, 0x2); + rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1, B_P0_CFCH_BW1, 0x8); + rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1, + B_PATH1_BW_SEL_MSK_V1, 0x8); + } else { + _rxck_force(rtwdev, RF_PATH_A, true, ADC_480M); + _rxck_force(rtwdev, RF_PATH_B, true, ADC_480M); + udelay(1); + + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, + B_UPD_CLK_ADC_ON, 0x1); + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, + B_UPD_CLK_ADC_VAL, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, + B_PATH0_SAMPL_DLY_T_MSK_V1, 0x3); + rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, + B_PATH1_SAMPL_DLY_T_MSK_V1, 0x3); + rtw89_phy_write32_mask(rtwdev, R_P0_CFCH_BW1, B_P0_CFCH_BW1, 0xf); + rtw89_phy_write32_mask(rtwdev, R_PATH1_BW_SEL_V1, + B_PATH1_BW_SEL_MSK_V1, 0xf); + } + + rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, 0x00000780, 0x8); + rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, 0x00000780, 0x8); + rtw89_phy_write32_mask(rtwdev, R_PATH0_SAMPL_DLY_T_V1, 0x00007800, 0x2); + rtw89_phy_write32_mask(rtwdev, R_PATH1_SAMPL_DLY_T_V1, 0x00007800, 0x2); + rtw89_phy_write32_mask(rtwdev, R_DRCK, B_DRCK_MUL, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1); + udelay(1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x0f); + udelay(1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x03); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa001); + udelay(1); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0xa041); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x3333); +} + +static bool _iqk_2g_rx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path) +{ + static const u32 g_idxrxgain[2] = {0x212, 0x310}; + static const u32 g_idxattc2[2] = {0x00, 0x20}; + static const u32 g_idxattc1[2] = {0x3, 0x2}; + static const u32 g_idxrxagc[2] = {0x0, 0x2}; + static const u32 g_idx[2] = {0x0, 0x2}; + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + bool notready = false; + bool kfail = false; + u32 rf_18, tmp; + u8 gp; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); + + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc); + rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x1); + rf_18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK); + rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, rf_18); + + for (gp = 0x0; gp < 0x2; gp++) { + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM, g_idxrxgain[gp]); + rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C2G, g_idxattc2[gp]); + rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_C1G, g_idxattc1[gp]); + + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + 0x00000100, 0x1); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + 0x00000010, 0x0); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + 0x00000007, g_idx[gp]); + rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013); + udelay(100); + udelay(100); + + tmp = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK); + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, tmp); + rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, g_idxrxagc[gp]); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11); + + notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, rf rxbb = %x\n", path, + rtw89_read_rf(rtwdev, path, RR_MOD, 0x003c0)); + + rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013); + udelay(100); + udelay(100); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011); + + notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK); + iqk_info->nb_rxcfir[path] = + rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), + MASKDWORD) | 0x2; + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[IQK]S%x, gp = 0x%x, 0x8%x3c = 0x%x\n", path, + g_idx[gp], 1 << path, iqk_info->nb_rxcfir[path]); + + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); + + if (iqk_info->is_nbiqk) + break; + + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); + notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK); + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); + } + + if (!notready) + kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); + + if (kfail) { + iqk_info->nb_txcfir[path] = 0x40000002; + rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8), + B_IQK_RES_RXCFIR, 0x0); + } + rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x0); + + return kfail; +} + +static bool _iqk_5g_rx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path) +{ + static const u32 a_idxrxgain[2] = {0x110, 0x290}; + static const u32 a_idxattc2[2] = {0x0f, 0x0f}; + static const u32 a_idxattc1[2] = {0x2, 0x2}; + static const u32 a_idxrxagc[2] = {0x4, 0x6}; + static const u32 a_idx[2] = {0x0, 0x2}; + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + bool notready = false; + bool kfail = false; + u32 rf_18, tmp; + u8 gp; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); + + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0xc); + rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL5G, 0x1); + rf_18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK); + rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, rf_18); + + for (gp = 0x0; gp < 0x2; gp++) { + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_RGM, a_idxrxgain[gp]); + rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_HATT, a_idxattc2[gp]); + rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RXA2_CC2, a_idxattc1[gp]); + + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + 0x00000100, 0x1); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + 0x00000010, 0x0); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), + 0x00000007, a_idx[gp]); + rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013); + udelay(100); + udelay(100); + + tmp = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASK); + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF2, B_IQK_DIF2_RXPI, tmp); + rtw89_phy_write32_mask(rtwdev, R_IQK_RXA, B_IQK_RXAGC, a_idxrxagc[gp]); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x11); + + notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXAGC); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, rf rxbb = %x\n", path, + rtw89_read_rf(rtwdev, path, RR_MOD, 0x003c0)); + + rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80013); + udelay(200); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x011); + notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_NBRXK); + iqk_info->nb_rxcfir[path] = + rtw89_phy_read32_mask(rtwdev, R_RXIQC + (path << 8), + MASKDWORD) | 0x2; + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[IQK]S%x, gp = 0x%x, 0x8%x3c = 0x%x\n", + path, a_idx[gp], 1 << path, iqk_info->nb_rxcfir[path]); + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); + + if (iqk_info->is_nbiqk) + break; + + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); + notready = _iqk_one_shot(rtwdev, phy_idx, path, ID_RXK); + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); + } + + if (!notready) + kfail = !!rtw89_phy_read32_mask(rtwdev, R_NCTL_RPT, B_NCTL_RPT_FLG); + + if (kfail) { + iqk_info->nb_txcfir[path] = 0x40000002; + rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8), + B_IQK_RES_RXCFIR, 0x0); + } + rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_SEL2G, 0x0); + + return kfail; +} + +static void _iqk_by_path(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + bool lok_result = false; + bool txk_result = false; + bool rxk_result = false; + u8 i; + + for (i = 0; i < 3; i++) { + _iqk_txk_setting(rtwdev, path); + if (iqk_info->iqk_band[path] == RTW89_BAND_2G) + lok_result = _iqk_2g_lok(rtwdev, phy_idx, path); + else + lok_result = _iqk_5g_lok(rtwdev, phy_idx, path); + + if (!lok_result) + break; + } + + if (lok_result) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[IQK]!!!!!!!!!!LOK by Pass !!!!!!!!!!!\n"); + rtw89_write_rf(rtwdev, path, RR_DTXLOK, RFREG_MASK, 0x80200); + rtw89_write_rf(rtwdev, path, RR_RSV2, RFREG_MASK, 0x80200); + rtw89_write_rf(rtwdev, path, RR_LOKVB, RFREG_MASK, 0x80200); + } + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]RF_0x08[00:19] = 0x%x\n", + rtw89_read_rf(rtwdev, path, RR_DTXLOK, RFREG_MASK)); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]RF_0x09[00:19] = 0x%x\n", + rtw89_read_rf(rtwdev, path, RR_RSV2, RFREG_MASK)); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]RF_0x0a[00:19] = 0x%x\n", + rtw89_read_rf(rtwdev, path, RR_LOKVB, RFREG_MASK)); + + if (iqk_info->iqk_band[path] == RTW89_BAND_2G) + txk_result = _iqk_2g_tx(rtwdev, phy_idx, path); + else + txk_result = _iqk_5g_tx(rtwdev, phy_idx, path); + + _iqk_rxclk_setting(rtwdev, path); + _iqk_adc_fifo_rst(rtwdev, phy_idx, path); + + if (iqk_info->iqk_band[path] == RTW89_BAND_2G) + rxk_result = _iqk_2g_rx(rtwdev, phy_idx, path); + else + rxk_result = _iqk_5g_rx(rtwdev, phy_idx, path); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[IQK]result : lok_= %x, txk_= %x, rxk_= %x\n", + lok_result, txk_result, rxk_result); +} + +static void _iqk_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, u8 path) +{ + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + u8 get_empty_table = false; + u32 reg_rf18; + u32 reg_35c; + u8 idx; + + for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) { + if (iqk_info->iqk_mcc_ch[idx][path] == 0) { + get_empty_table = true; + break; + } + } + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] (1)idx = %x\n", idx); + + if (!get_empty_table) { + idx = iqk_info->iqk_table_idx[path] + 1; + if (idx > 1) + idx = 0; + } + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK] (2)idx = %x\n", idx); + + reg_rf18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK); + reg_35c = rtw89_phy_read32_mask(rtwdev, R_CIRST, B_CIRST_SYN); + + iqk_info->iqk_band[path] = chan->band_type; + iqk_info->iqk_bw[path] = chan->band_width; + iqk_info->iqk_ch[path] = chan->channel; + iqk_info->iqk_mcc_ch[idx][path] = chan->channel; + iqk_info->iqk_table_idx[path] = idx; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x18= 0x%x, idx = %x\n", + path, reg_rf18, idx); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x18= 0x%x\n", + path, reg_rf18); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]S%x, 0x35c= 0x%x\n", + path, reg_35c); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]times = 0x%x, ch =%x\n", + iqk_info->iqk_times, idx); + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]iqk_mcc_ch[%x][%x] = 0x%x\n", + idx, path, iqk_info->iqk_mcc_ch[idx][path]); +} + +static void _iqk_start_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, u8 path) +{ + _iqk_by_path(rtwdev, phy_idx, path); +} + +static void _iqk_restore(struct rtw89_dev *rtwdev, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "===> %s\n", __func__); + + if (iqk_info->is_nbiqk) { + rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), + MASKDWORD, iqk_info->nb_txcfir[path]); + rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), + MASKDWORD, iqk_info->nb_rxcfir[path]); + } else { + rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), + MASKDWORD, 0x40000000); + rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), + MASKDWORD, 0x40000000); + } + rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, + 0x00000e19 + (path << 4)); + + _iqk_check_cal(rtwdev, path, 0x0); + + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, B_NCTL_N1_CIP, 0x00); + rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000000); + rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000); + + rtw89_phy_write32_mask(rtwdev, R_KIP_CLK, MASKDWORD, 0x0); + rtw89_phy_write32_mask(rtwdev, R_IQRSN, B_IQRSN_K2, 0x0); + rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), BIT(28), 0x0); + + rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0); + rtw89_write_rf(rtwdev, path, RR_LUTWE, RR_LUTWE_LOK, 0x0); + rtw89_write_rf(rtwdev, path, RR_MOD, RR_MOD_MASK, 0x3); + rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1); + rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x1); +} + +static void _iqk_afebb_restore(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, u8 path) +{ + rtw89_debug(rtwdev, RTW89_DBG_RFK, "===> %s\n", __func__); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0303); + rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_CLKG_FORCE, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_CLKG_FORCE, 0x0); + rtw89_phy_write32_mask(rtwdev, R_TXCKEN_FORCE, B_TXCKEN_FORCE_ALL, 0x0000000); + rtw89_phy_write32_mask(rtwdev, R_FAHM, B_RXTD_CKEN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_GEN_ON, 0x0); + rtw89_phy_write32_mask(rtwdev, R_TX_COLLISION_T2R_ST, 0x0000001f, 0x03); + rtw89_phy_write32_mask(rtwdev, R_TX_COLLISION_T2R_ST, 0x000003e0, 0x03); + rtw89_phy_write32_mask(rtwdev, R_P0_RXCK, B_P0_TXCK_ALL, 0x00); + rtw89_phy_write32_mask(rtwdev, R_P1_RXCK, B_P1_TXCK_ALL, 0x00); + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, + B_UPD_CLK_ADC_VAL | B_UPD_CLK_ADC_ON, 0x0); + rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x0); + rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0x0); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0000); + rtw89_phy_write32_mask(rtwdev, R_P0_ADCFF_EN, B_P0_ADCFF_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P1_ADCFF_EN, B_P1_ADCFF_EN, 0x0); +} + +static void _iqk_preset(struct rtw89_dev *rtwdev, u8 path) +{ + u8 idx = 0; + + rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), 0x00000001, idx); + rtw89_phy_write32_mask(rtwdev, R_CFIR_LUT + (path << 8), 0x00000008, idx); + rtw89_phy_write32_mask(rtwdev, R_TXIQC + (path << 8), MASKDWORD, 0x40000000); + rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), MASKDWORD, 0x40000000); + + rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); + rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0); + rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080); + rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x81ff010a); +} + +static void _iqk_macbb_setting(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy_idx, u8 path) +{ + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x0303); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_GOT_TXRX, 0x3); + rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_GOT_TXRX, 0x3); + rtw89_phy_write32_mask(rtwdev, R_P0_ADCFF_EN, B_P0_ADCFF_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P1_ADCFF_EN, B_P1_ADCFF_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_CLKG_FORCE, 0x3); + rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P0_CLKG_FORCE, 0x3); + rtw89_phy_write32_mask(rtwdev, R_TXCKEN_FORCE, B_TXCKEN_FORCE_ALL, 0x1ffffff); + rtw89_phy_write32_mask(rtwdev, R_FAHM, B_RXTD_CKEN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_GEN_ON, 0x1); + rtw89_phy_write32_mask(rtwdev, R_TX_COLLISION_T2R_ST, B_TXRX_FORCE_VAL, 0x3ff); + rtw89_phy_write32_mask(rtwdev, R_IOQ_IQK_DPK, B_IOQ_IQK_DPK_CLKEN, 0x3); + rtw89_phy_write32_mask(rtwdev, R_IQK_DPK_RST, B_IQK_DPK_RST, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P0_PATH_RST, B_P0_PATH_RST, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P1_PATH_RST, B_P1_PATH_RST, 0x1); + rtw89_phy_write32_mask(rtwdev, R_PD_CTRL, B_PD_HIT_DIS, 0x1); + rtw89_phy_write32_mask(rtwdev, R_RXCCA, B_RXCCA_DIS, 0x1); + rtw89_phy_write32_mask(rtwdev, R_DCFO_WEIGHT, B_DAC_CLK_IDX, 0x1); + + _txck_force(rtwdev, RF_PATH_A, true, DAC_960M); + _txck_force(rtwdev, RF_PATH_B, true, DAC_960M); + _rxck_force(rtwdev, RF_PATH_A, true, ADC_1920M); + _rxck_force(rtwdev, RF_PATH_B, true, ADC_1920M); + + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_ON, 0x1); + rtw89_phy_write32_mask(rtwdev, R_UPD_CLK_ADC, B_UPD_CLK_ADC_VAL, 0x2); + + rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_DBG, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P1_DBGMOD, B_P1_DBGMOD_ON, 0x1); + udelay(10); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x1f); + udelay(10); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR_PW15, B_ANAPAR_PW15, 0x13); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0001); + udelay(10); + rtw89_phy_write32_mask(rtwdev, R_ANAPAR, B_ANAPAR_15, 0x0041); + rtw89_phy_write32_mask(rtwdev, R_P0_NRBW, B_P0_NRBW_RSTB, 0x1); + rtw89_phy_write32_mask(rtwdev, R_ADC_FIFO, B_ADC_FIFO_RXK, 0x3333); +} + +static void _iqk_init(struct rtw89_dev *rtwdev) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + u8 idx, path; + + rtw89_phy_write32_mask(rtwdev, R_IQKINF, MASKDWORD, 0x0); + + if (iqk_info->is_iqk_init) + return; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]===>%s\n", __func__); + iqk_info->is_iqk_init = true; + iqk_info->is_nbiqk = false; + iqk_info->iqk_fft_en = false; + iqk_info->iqk_sram_en = false; + iqk_info->iqk_cfir_en = false; + iqk_info->iqk_xym_en = false; + iqk_info->iqk_times = 0x0; + + for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) { + iqk_info->iqk_channel[idx] = 0x0; + for (path = 0; path < RTW8852BT_SS; path++) { + iqk_info->lok_cor_fail[idx][path] = false; + iqk_info->lok_fin_fail[idx][path] = false; + iqk_info->iqk_tx_fail[idx][path] = false; + iqk_info->iqk_rx_fail[idx][path] = false; + iqk_info->iqk_mcc_ch[idx][path] = 0x0; + iqk_info->iqk_table_idx[path] = 0x0; + } + } +} + +static void _wait_rx_mode(struct rtw89_dev *rtwdev, u8 kpath) +{ + u32 rf_mode; + u8 path; + int ret; + + for (path = 0; path < RF_PATH_MAX; path++) { + if (!(kpath & BIT(path))) + continue; + + ret = read_poll_timeout_atomic(rtw89_read_rf, rf_mode, + rf_mode != 2, 2, 5000, false, + rtwdev, path, RR_MOD, RR_MOD_MASK); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[RFK] Wait S%d to Rx mode!! (ret = %d)\n", path, ret); + } +} + +static void _tmac_tx_pause(struct rtw89_dev *rtwdev, enum rtw89_phy_idx band_idx, + bool is_pause) +{ + if (!is_pause) + return; + + _wait_rx_mode(rtwdev, _kpath(rtwdev, band_idx)); +} + +static void _doiqk(struct rtw89_dev *rtwdev, bool force, + enum rtw89_phy_idx phy_idx, u8 path) +{ + struct rtw89_iqk_info *iqk_info = &rtwdev->iqk; + u32 backup_bb_val[BACKUP_BB_REGS_NR]; + u32 backup_rf_val[RTW8852BT_SS][BACKUP_RF_REGS_NR]; + u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, RF_AB); + + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[IQK]==========IQK start!!!!!==========\n"); + iqk_info->iqk_times++; + iqk_info->version = RTW8852BT_IQK_VER; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[IQK]Test Ver 0x%x\n", iqk_info->version); + _iqk_get_ch_info(rtwdev, phy_idx, path); + + _rfk_backup_bb_reg(rtwdev, backup_bb_val); + _rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path); + _iqk_macbb_setting(rtwdev, phy_idx, path); + _iqk_preset(rtwdev, path); + _iqk_start_iqk(rtwdev, phy_idx, path); + _iqk_restore(rtwdev, path); + _iqk_afebb_restore(rtwdev, phy_idx, path); + _rfk_reload_bb_reg(rtwdev, backup_bb_val); + _rfk_reload_rf_reg(rtwdev, backup_rf_val[path], path); + + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP); +} + +static void _iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx, bool force) +{ + u8 kpath = _kpath(rtwdev, phy_idx); + + switch (kpath) { + case RF_A: + _doiqk(rtwdev, force, phy_idx, RF_PATH_A); + break; + case RF_B: + _doiqk(rtwdev, force, phy_idx, RF_PATH_B); + break; + case RF_AB: + _doiqk(rtwdev, force, phy_idx, RF_PATH_A); + _doiqk(rtwdev, force, phy_idx, RF_PATH_B); + break; + default: + break; + } +} + +static void _dpk_onoff(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool off) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + u8 val, kidx = dpk->cur_idx[path]; + bool off_reverse; + + val = dpk->is_dpk_enable && !off && dpk->bp[path][kidx].path_ok; + + if (off) + off_reverse = false; + else + off_reverse = true; + + val = dpk->is_dpk_enable & off_reverse & dpk->bp[path][kidx].path_ok; + + rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2), + BIT(24), val); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s !!!\n", path, + kidx, dpk->is_dpk_enable & off_reverse ? "enable" : "disable"); +} + +static void _dpk_one_shot(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, enum rtw8852bt_dpk_id id) +{ + u16 dpk_cmd; + u32 val; + int ret; + + dpk_cmd = (id << 8) | (0x19 + (path << 4)); + rtw89_phy_write32_mask(rtwdev, R_NCTL_CFG, MASKDWORD, dpk_cmd); + + ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x55, + 1, 30000, false, + rtwdev, R_RFK_ST, MASKBYTE0); + if (ret) + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] one-shot 1 over 30ms!!!!\n"); + + udelay(1); + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKDWORD, 0x00030000); + + ret = read_poll_timeout_atomic(rtw89_phy_read32_mask, val, val == 0x8000, + 1, 2000, false, + rtwdev, R_RPT_COM, MASKLWORD); + if (ret) + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] one-shot 2 over 2ms!!!!\n"); + + rtw89_phy_write32_mask(rtwdev, R_NCTL_N1, MASKBYTE0, 0x0); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] one-shot for %s = 0x%04x\n", + id == 0x06 ? "LBK_RXIQK" : + id == 0x10 ? "SYNC" : + id == 0x11 ? "MDPK_IDL" : + id == 0x12 ? "MDPK_MPA" : + id == 0x13 ? "GAIN_LOSS" : + id == 0x14 ? "PWR_CAL" : + id == 0x15 ? "DPK_RXAGC" : + id == 0x16 ? "KIP_PRESET" : + id == 0x17 ? "KIP_RESOTRE" : + "DPK_TXAGC", dpk_cmd); +} + +static void _dpk_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x0); + rtw89_write_rf(rtwdev, path, RR_DCK, RR_DCK_LV, 0x1); + + udelay(600); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d RXDCK\n", path); +} + +static void _dpk_information(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + + u8 kidx = dpk->cur_idx[path]; + + dpk->bp[path][kidx].band = chan->band_type; + dpk->bp[path][kidx].ch = chan->channel; + dpk->bp[path][kidx].bw = chan->band_width; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] S%d[%d] (PHY%d): TSSI %s/ DBCC %s/ %s/ CH%d/ %s\n", + path, dpk->cur_idx[path], phy, + rtwdev->is_tssi_mode[path] ? "on" : "off", + rtwdev->dbcc_en ? "on" : "off", + dpk->bp[path][kidx].band == 0 ? "2G" : + dpk->bp[path][kidx].band == 1 ? "5G" : "6G", + dpk->bp[path][kidx].ch, + dpk->bp[path][kidx].bw == 0 ? "20M" : + dpk->bp[path][kidx].bw == 1 ? "40M" : "80M"); +} + +static void _dpk_tssi_pause(struct rtw89_dev *rtwdev, + enum rtw89_rf_path path, bool is_pause) +{ + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK + (path << 13), + B_P0_TSSI_TRK_EN, is_pause); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d TSSI %s\n", path, + is_pause ? "pause" : "resume"); +} + +static void _dpk_kip_restore(struct rtw89_dev *rtwdev, + enum rtw89_rf_path path) +{ + rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000000); + rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x80000000); + + if (rtwdev->hal.cv > CHIP_CAV) + rtw89_phy_write32_mask(rtwdev, R_DPD_COM + (path << 8), + B_DPD_COM_OF, 0x1); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d restore KIP\n", path); +} + +static void _dpk_lbk_rxiqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 cur_rxbb, u32 rf_18) +{ + rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_IQK_RES + (path << 8), B_IQK_RES_RXCFIR, 0x0); + + rtw89_write_rf(rtwdev, path, RR_RSV4, RFREG_MASK, rf_18); + rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKMODE, 0xd); + rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x1); + + if (cur_rxbb >= 0x11) + rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x13); + else if (cur_rxbb <= 0xa) + rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x00); + else + rtw89_write_rf(rtwdev, path, RR_TXIQK, RR_TXIQK_ATT1, 0x05); + + rtw89_write_rf(rtwdev, path, RR_XGLNA2, RR_XGLNA2_SW, 0x0); + rtw89_write_rf(rtwdev, path, RR_RXKPLL, RR_RXKPLL_POW, 0x0); + rtw89_write_rf(rtwdev, path, RR_RXKPLL, RFREG_MASK, 0x80014); + + udelay(100); + + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_IQK_DIF4, B_IQK_DIF4_RXT, 0x025); + + _dpk_one_shot(rtwdev, phy, path, LBK_RXIQK); + + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); + + rtw89_write_rf(rtwdev, path, RR_RXK, RR_RXK_PLLEN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_MDPK_RX_DCK, B_MDPK_RX_DCK_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_KPATH_CFG, B_KPATH_CFG_ED, 0x0); + rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_DI, 0x1); + rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKMODE, 0x5); +} + +static void _dpk_rf_setting(struct rtw89_dev *rtwdev, u8 gain, + enum rtw89_rf_path path, u8 kidx) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + + if (dpk->bp[path][kidx].band == RTW89_BAND_2G) { + rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 0x50220); + rtw89_write_rf(rtwdev, path, RR_RXBB, RR_RXBB_FATT, 0xf2); + rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1); + rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1); + } else { + rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASK, 0x50220); + rtw89_write_rf(rtwdev, path, RR_RXA2, RR_RAA2_SWATT, 0x5); + rtw89_write_rf(rtwdev, path, RR_LUTDBG, RR_LUTDBG_TIA, 0x1); + rtw89_write_rf(rtwdev, path, RR_TIA, RR_TIA_N6, 0x1); + rtw89_write_rf(rtwdev, path, RR_RXA_LNA, RFREG_MASK, 0x920FC); + rtw89_write_rf(rtwdev, path, RR_XALNA2, RFREG_MASK, 0x002C0); + rtw89_write_rf(rtwdev, path, RR_IQGEN, RFREG_MASK, 0x38800); + } + + rtw89_write_rf(rtwdev, path, RR_RCKD, RR_RCKD_BW, 0x1); + rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_TXBB, dpk->bp[path][kidx].bw + 1); + rtw89_write_rf(rtwdev, path, RR_BTC, RR_BTC_RXBB, 0x0); +} + +static void _dpk_bypass_rxcfir(struct rtw89_dev *rtwdev, + enum rtw89_rf_path path, bool is_bypass) +{ + if (is_bypass) { + rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), + B_RXIQC_BYPASS2, 0x1); + rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), + B_RXIQC_BYPASS, 0x1); + } else { + rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), + B_RXIQC_BYPASS2, 0x0); + rtw89_phy_write32_mask(rtwdev, R_RXIQC + (path << 8), + B_RXIQC_BYPASS, 0x0); + } +} + +static +void _dpk_tpg_sel(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + + if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80) + rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x0); + else if (dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40) + rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x2); + else + rtw89_phy_write32_mask(rtwdev, R_TPG_MOD, B_TPG_MOD_F, 0x1); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] TPG_Select for %s\n", + dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_80 ? "80M" : + dpk->bp[path][kidx].bw == RTW89_CHANNEL_WIDTH_40 ? "40M" : "20M"); +} + +static void _dpk_table_select(struct rtw89_dev *rtwdev, + enum rtw89_rf_path path, u8 kidx, u8 gain) +{ + u8 val; + + val = 0x80 + kidx * 0x20 + gain * 0x10; + rtw89_phy_write32_mask(rtwdev, R_DPD_CH0 + (path << 8), MASKBYTE3, val); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] table select for Kidx[%d], Gain[%d] (0x%x)\n", kidx, + gain, val); +} + +static bool _dpk_sync_check(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 kidx) +{ +#define DPK_SYNC_TH_DC_I 200 +#define DPK_SYNC_TH_DC_Q 200 +#define DPK_SYNC_TH_CORR 170 + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + u8 corr_val, corr_idx; + u16 dc_i, dc_q; + u32 corr, dc; + + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0); + + corr = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD); + corr_idx = u32_get_bits(corr, B_PRT_COM_CORI); + corr_val = u32_get_bits(corr, B_PRT_COM_CORV); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] S%d Corr_idx / Corr_val = %d / %d\n", + path, corr_idx, corr_val); + + dpk->corr_idx[path][kidx] = corr_idx; + dpk->corr_val[path][kidx] = corr_val; + + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x9); + + dc = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD); + dc_i = u32_get_bits(dc, B_PRT_COM_DCI); + dc_q = u32_get_bits(dc, B_PRT_COM_DCQ); + + dc_i = abs(sign_extend32(dc_i, 11)); + dc_q = abs(sign_extend32(dc_q, 11)); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d DC I/Q, = %d / %d\n", + path, dc_i, dc_q); + + dpk->dc_i[path][kidx] = dc_i; + dpk->dc_q[path][kidx] = dc_q; + + if (dc_i > DPK_SYNC_TH_DC_I || dc_q > DPK_SYNC_TH_DC_Q || + corr_val < DPK_SYNC_TH_CORR) + return true; + else + return false; +} + +static void _dpk_sync(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 kidx) +{ + _dpk_one_shot(rtwdev, phy, path, SYNC); +} + +static u16 _dpk_dgain_read(struct rtw89_dev *rtwdev) +{ + u16 dgain; + + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x0); + + dgain = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_DCI); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain = 0x%x\n", dgain); + + return dgain; +} + +static s8 _dpk_dgain_mapping(struct rtw89_dev *rtwdev, u16 dgain) +{ + static const u16 bnd[15] = { + 0xbf1, 0xaa5, 0x97d, 0x875, 0x789, 0x6b7, 0x5fc, 0x556, + 0x4c1, 0x43d, 0x3c7, 0x35e, 0x2ac, 0x262, 0x220 + }; + s8 offset; + + if (dgain >= bnd[0]) + offset = 0x6; + else if (bnd[0] > dgain && dgain >= bnd[1]) + offset = 0x6; + else if (bnd[1] > dgain && dgain >= bnd[2]) + offset = 0x5; + else if (bnd[2] > dgain && dgain >= bnd[3]) + offset = 0x4; + else if (bnd[3] > dgain && dgain >= bnd[4]) + offset = 0x3; + else if (bnd[4] > dgain && dgain >= bnd[5]) + offset = 0x2; + else if (bnd[5] > dgain && dgain >= bnd[6]) + offset = 0x1; + else if (bnd[6] > dgain && dgain >= bnd[7]) + offset = 0x0; + else if (bnd[7] > dgain && dgain >= bnd[8]) + offset = 0xff; + else if (bnd[8] > dgain && dgain >= bnd[9]) + offset = 0xfe; + else if (bnd[9] > dgain && dgain >= bnd[10]) + offset = 0xfd; + else if (bnd[10] > dgain && dgain >= bnd[11]) + offset = 0xfc; + else if (bnd[11] > dgain && dgain >= bnd[12]) + offset = 0xfb; + else if (bnd[12] > dgain && dgain >= bnd[13]) + offset = 0xfa; + else if (bnd[13] > dgain && dgain >= bnd[14]) + offset = 0xf9; + else if (bnd[14] > dgain) + offset = 0xf8; + else + offset = 0x0; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] DGain offset = %d\n", offset); + + return offset; +} + +static u8 _dpk_gainloss_read(struct rtw89_dev *rtwdev) +{ + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6); + rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1); + + return rtw89_phy_read32_mask(rtwdev, R_RPT_COM, B_PRT_COM_GL); +} + +static void _dpk_gainloss(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 kidx) +{ + _dpk_one_shot(rtwdev, phy, path, GAIN_LOSS); + + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, B_KIP_RPT1_SEL, 0x6); + rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x1); +} + +static void _dpk_kip_preset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 kidx) +{ + _dpk_tpg_sel(rtwdev, path, kidx); + _dpk_one_shot(rtwdev, phy, path, KIP_PRESET); +} + +static void _dpk_kip_pwr_clk_on(struct rtw89_dev *rtwdev, + enum rtw89_rf_path path) +{ + rtw89_phy_write32_mask(rtwdev, R_NCTL_RPT, MASKDWORD, 0x00000080); + rtw89_phy_write32_mask(rtwdev, R_KIP_SYSCFG, MASKDWORD, 0x807f030a); + rtw89_phy_write32_mask(rtwdev, R_CFIR_SYS + (path << 8), MASKDWORD, 0xce000a08); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] KIP Power/CLK on\n"); +} + +static +u8 _dpk_txagc_check_8852bt(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, u8 txagc) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + + if (txagc >= dpk->max_dpk_txagc[path]) + txagc = dpk->max_dpk_txagc[path]; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Set TxAGC = 0x%x\n", txagc); + + return txagc; +} + +static void _dpk_kip_set_txagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 txagc) +{ + u8 val; + + val = _dpk_txagc_check_8852bt(rtwdev, path, txagc); + rtw89_write_rf(rtwdev, path, RR_TXAGC, RFREG_MASK, val); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); + _dpk_one_shot(rtwdev, phy, path, DPK_TXAGC); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] set TXAGC = 0x%x\n", txagc); +} + +static void _dpk_kip_set_rxagc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_phy_write32_mask(rtwdev, R_KIP_MOD, B_KIP_MOD, 0x50220); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x1); + _dpk_one_shot(rtwdev, phy, path, DPK_RXAGC); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_EN, 0x0); +} + +static u8 _dpk_set_offset(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 txagc, s8 gain_offset) +{ + txagc = rtw89_read_rf(rtwdev, path, RR_TXAGC, RFREG_MASK); + + if ((txagc - gain_offset) < DPK_TXAGC_LOWER) + txagc = DPK_TXAGC_LOWER; + else if ((txagc - gain_offset) > DPK_TXAGC_UPPER) + txagc = DPK_TXAGC_UPPER; + else + txagc = txagc - gain_offset; + + _dpk_kip_set_txagc(rtwdev, phy, path, txagc); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] tmp_txagc (GL=%d) = 0x%x\n", + gain_offset, txagc); + return txagc; +} + +static bool _dpk_pas_read(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, + u8 is_check) +{ + u32 val1_i = 0, val1_q = 0, val2_i = 0, val2_q = 0; + u8 i; + + rtw89_phy_write32_mask(rtwdev, R_KIP_RPT1, MASKBYTE2, 0x06); + rtw89_phy_write32_mask(rtwdev, R_DPK_CFG2, B_DPK_CFG2_ST, 0x0); + rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE2, 0x08); + + if (is_check) { + rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x00); + val1_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD); + val1_i = abs(sign_extend32(val1_i, 11)); + val1_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD); + val1_q = abs(sign_extend32(val1_q, 11)); + + rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, 0x1f); + val2_i = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKHWORD); + val2_i = abs(sign_extend32(val2_i, 11)); + val2_q = rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKLWORD); + val2_q = abs(sign_extend32(val2_q, 11)); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] PAS_delta = 0x%x\n", + phy_div(val1_i * val1_i + val1_q * val1_q, + val2_i * val2_i + val2_q * val2_q)); + } else { + for (i = 0; i < 32; i++) { + rtw89_phy_write32_mask(rtwdev, R_DPK_CFG3, MASKBYTE3, i); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] PAS_Read[%02d]= 0x%08x\n", i, + rtw89_phy_read32_mask(rtwdev, R_RPT_COM, MASKDWORD)); + } + } + + if (val1_i * val1_i + val1_q * val1_q >= + (val2_i * val2_i + val2_q * val2_q) * 8 / 5) + return true; + + return false; +} + +static u8 _dpk_agc(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 kidx, u8 init_txagc, + bool loss_only) +{ + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + u8 goout = 0, agc_cnt = 0, limited_rxbb = 0, gl_cnt = 0; + u8 tmp_txagc, tmp_rxbb, tmp_gl_idx = 0; + u8 step = DPK_AGC_STEP_SYNC_DGAIN; + int limit = 200; + s8 offset = 0; + u16 dgain = 0; + u32 rf_18; + + tmp_txagc = init_txagc; + + tmp_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB); + rf_18 = rtw89_read_rf(rtwdev, path, RR_CFGCH, RFREG_MASK); + + do { + switch (step) { + case DPK_AGC_STEP_SYNC_DGAIN: + _dpk_sync(rtwdev, phy, path, kidx); + if (agc_cnt == 0) { + if (chan->band_width < 2) + _dpk_bypass_rxcfir(rtwdev, path, true); + else + _dpk_lbk_rxiqk(rtwdev, phy, path, + tmp_rxbb, rf_18); + } + + if (_dpk_sync_check(rtwdev, path, kidx) == true) { + tmp_txagc = 0xff; + goout = 1; + break; + } + + dgain = _dpk_dgain_read(rtwdev); + offset = _dpk_dgain_mapping(rtwdev, dgain); + + if (loss_only == 1 || limited_rxbb == 1 || offset == 0) + step = DPK_AGC_STEP_GAIN_LOSS_IDX; + else + step = DPK_AGC_STEP_GAIN_ADJ; + break; + case DPK_AGC_STEP_GAIN_ADJ: + tmp_rxbb = rtw89_read_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB); + + if (tmp_rxbb + offset > 0x1f) { + tmp_rxbb = 0x1f; + limited_rxbb = 1; + } else if (tmp_rxbb + offset < 0) { + tmp_rxbb = 0; + limited_rxbb = 1; + } else { + tmp_rxbb = tmp_rxbb + offset; + } + + rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKRXBB, tmp_rxbb); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] Adjust RXBB (%d) = 0x%x\n", offset, tmp_rxbb); + + if (chan->band_width == RTW89_CHANNEL_WIDTH_80) + _dpk_lbk_rxiqk(rtwdev, phy, path, tmp_rxbb, rf_18); + if (dgain > 1922 || dgain < 342) + step = DPK_AGC_STEP_SYNC_DGAIN; + else + step = DPK_AGC_STEP_GAIN_LOSS_IDX; + + agc_cnt++; + break; + case DPK_AGC_STEP_GAIN_LOSS_IDX: + _dpk_gainloss(rtwdev, phy, path, kidx); + + tmp_gl_idx = _dpk_gainloss_read(rtwdev); + + if ((tmp_gl_idx == 0 && _dpk_pas_read(rtwdev, path, true)) || + tmp_gl_idx >= 7) + step = DPK_AGC_STEP_GL_GT_CRITERION; + else if (tmp_gl_idx == 0) + step = DPK_AGC_STEP_GL_LT_CRITERION; + else + step = DPK_AGC_STEP_SET_TX_GAIN; + + gl_cnt++; + break; + case DPK_AGC_STEP_GL_GT_CRITERION: + if (tmp_txagc == 0x2e || + tmp_txagc == dpk->max_dpk_txagc[path]) { + goout = 1; + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] Txagc@lower bound!!\n"); + } else { + tmp_txagc = _dpk_set_offset(rtwdev, phy, path, + tmp_txagc, 0x3); + } + step = DPK_AGC_STEP_GAIN_LOSS_IDX; + agc_cnt++; + break; + + case DPK_AGC_STEP_GL_LT_CRITERION: + if (tmp_txagc == 0x3f || tmp_txagc == dpk->max_dpk_txagc[path]) { + goout = 1; + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] Txagc@upper bound!!\n"); + } else { + tmp_txagc = _dpk_set_offset(rtwdev, phy, path, + tmp_txagc, 0xfe); + } + step = DPK_AGC_STEP_GAIN_LOSS_IDX; + agc_cnt++; + break; + + case DPK_AGC_STEP_SET_TX_GAIN: + tmp_txagc = _dpk_set_offset(rtwdev, phy, path, tmp_txagc, + tmp_gl_idx); + goout = 1; + agc_cnt++; + break; + + default: + goout = 1; + break; + } + } while (!goout && agc_cnt < 6 && limit-- > 0); + + if (gl_cnt >= 6) + _dpk_pas_read(rtwdev, path, false); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] Txagc / RXBB for DPK = 0x%x / 0x%x\n", tmp_txagc, tmp_rxbb); + + return tmp_txagc; +} + +static void _dpk_set_mdpd_para(struct rtw89_dev *rtwdev, + enum rtw89_rf_path path, u8 order) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + + switch (order) { + case 0: /* (5,3,1) */ + rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order); + rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x3); + rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN, 0x1); + dpk->dpk_order[path] = 0x3; + break; + case 1: /* (5,3,0) */ + rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order); + rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN, 0x0); + dpk->dpk_order[path] = 0x1; + break; + case 2: /* (5,0,0) */ + rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_OP, order); + rtw89_phy_write32_mask(rtwdev, R_LDL_NORM, B_LDL_NORM_PN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_MAN, 0x0); + dpk->dpk_order[path] = 0x0; + break; + default: + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] Wrong MDPD order!!(0x%x)\n", order); + break; + } + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] Set %s for IDL\n", + order == 0x0 ? "(5,3,1)" : + order == 0x1 ? "(5,3,0)" : "(5,0,0)"); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] Set MDPD order to 0x%x for IDL\n", order); +} + +static void _dpk_idl_mpa(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 kidx, u8 gain) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + + if (dpk->bp[path][kidx].bw < RTW89_CHANNEL_WIDTH_80 && + dpk->bp[path][kidx].band == RTW89_BAND_5G) + _dpk_set_mdpd_para(rtwdev, path, 0x2); + else + _dpk_set_mdpd_para(rtwdev, path, 0x0); + + _dpk_one_shot(rtwdev, phy, path, MDPK_IDL); +} + +static void _dpk_fill_result(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 kidx, u8 gain, u8 txagc) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + u8 gs = dpk->dpk_gs[phy]; + u16 pwsf = 0x78; + + rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), BIT(8), kidx); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] Fill txagc/ pwsf/ gs = 0x%x/ 0x%x/ 0x%x\n", + txagc, pwsf, gs); + + dpk->bp[path][kidx].txagc_dpk = txagc; + rtw89_phy_write32_mask(rtwdev, R_TXAGC_RFK + (path << 8), + 0x3F << ((gain << 3) + (kidx << 4)), txagc); + + dpk->bp[path][kidx].pwsf = pwsf; + rtw89_phy_write32_mask(rtwdev, R_DPD_BND + (path << 8) + (kidx << 2), + 0x1FF << (gain << 4), pwsf); + + rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x1); + rtw89_phy_write32_mask(rtwdev, R_LOAD_COEF + (path << 8), B_LOAD_COEF_MDPD, 0x0); + + dpk->bp[path][kidx].gs = gs; + if (dpk->dpk_gs[phy] == 0x7f) + rtw89_phy_write32_mask(rtwdev, + R_DPD_CH0A + (path << 8) + (kidx << 2), + MASKDWORD, 0x007f7f7f); + else + rtw89_phy_write32_mask(rtwdev, + R_DPD_CH0A + (path << 8) + (kidx << 2), + MASKDWORD, 0x005b5b5b); + + rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8) + (kidx << 2), + B_DPD_ORDER_V1, dpk->dpk_order[path]); + + rtw89_phy_write32_mask(rtwdev, R_DPD_V1 + (path << 8), MASKDWORD, 0x0); + rtw89_phy_write32_mask(rtwdev, R_MDPK_SYNC, B_MDPK_SYNC_SEL, 0x0); +} + +static bool _dpk_reload_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + u8 idx, cur_band, cur_ch; + bool is_reload = false; + + cur_band = chan->band_type; + cur_ch = chan->channel; + + for (idx = 0; idx < RTW89_DPK_BKUP_NUM; idx++) { + if (cur_band != dpk->bp[path][idx].band || + cur_ch != dpk->bp[path][idx].ch) + continue; + + rtw89_phy_write32_mask(rtwdev, R_COEF_SEL + (path << 8), + B_COEF_SEL_MDPD, idx); + dpk->cur_idx[path] = idx; + is_reload = true; + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] reload S%d[%d] success\n", path, idx); + } + + return is_reload; +} + +static +void _rf_direct_cntrl(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool is_bybb) +{ + if (is_bybb) + rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x1); + else + rtw89_write_rf(rtwdev, path, RR_RSV1, RR_RSV1_RST, 0x0); +} + +static +void _drf_direct_cntrl(struct rtw89_dev *rtwdev, enum rtw89_rf_path path, bool is_bybb) +{ + if (is_bybb) + rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x1); + else + rtw89_write_rf(rtwdev, path, RR_BBDC, RR_BBDC_SEL, 0x0); +} + +static bool _dpk_main(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u8 gain) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + u8 txagc = 0x38, kidx = dpk->cur_idx[path]; + bool is_fail = false; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] ========= S%d[%d] DPK Start =========\n", path, kidx); + + _rf_direct_cntrl(rtwdev, path, false); + _drf_direct_cntrl(rtwdev, path, false); + + _dpk_kip_pwr_clk_on(rtwdev, path); + _dpk_kip_set_txagc(rtwdev, phy, path, txagc); + _dpk_rf_setting(rtwdev, gain, path, kidx); + _dpk_rx_dck(rtwdev, phy, path); + _dpk_kip_preset(rtwdev, phy, path, kidx); + _dpk_kip_set_rxagc(rtwdev, phy, path); + _dpk_table_select(rtwdev, path, kidx, gain); + + txagc = _dpk_agc(rtwdev, phy, path, kidx, txagc, false); + + _rfk_get_thermal(rtwdev, kidx, path); + + if (txagc == 0xff) { + is_fail = true; + goto _error; + } + + _dpk_idl_mpa(rtwdev, phy, path, kidx, gain); + + rtw89_write_rf(rtwdev, path, RR_MOD, RFREG_MASKMODE, RF_RX); + _dpk_fill_result(rtwdev, phy, path, kidx, gain, txagc); + +_error: + if (!is_fail) + dpk->bp[path][kidx].path_ok = 1; + else + dpk->bp[path][kidx].path_ok = 0; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s\n", path, kidx, + is_fail ? "Check" : "Success"); + + _dpk_onoff(rtwdev, path, is_fail); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "[DPK] S%d[%d] DPK %s\n", path, kidx, + is_fail ? "Check" : "Success"); + + return is_fail; +} + +static void _dpk_cal_select(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, u8 kpath) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + u32 backup_kip_val[BACKUP_KIP_REGS_NR]; + u32 backup_bb_val[BACKUP_BB_REGS_NR]; + u32 backup_rf_val[RTW8852BT_SS][BACKUP_RF_REGS_NR]; + bool reloaded[2] = {false}; + u8 path; + + for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++) { + reloaded[path] = _dpk_reload_check(rtwdev, phy, path); + if (!reloaded[path] && dpk->bp[path][0].ch != 0) + dpk->cur_idx[path] = !dpk->cur_idx[path]; + else + _dpk_onoff(rtwdev, path, false); + } + + _rfk_backup_bb_reg(rtwdev, backup_bb_val); + _rfk_backup_kip_reg(rtwdev, backup_kip_val); + + for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++) { + _rfk_backup_rf_reg(rtwdev, backup_rf_val[path], path); + _dpk_information(rtwdev, phy, path); + if (rtwdev->is_tssi_mode[path]) + _dpk_tssi_pause(rtwdev, path, true); + } + + _rfk_bb_afe_setting(rtwdev, phy, path, kpath); + + for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++) + _dpk_main(rtwdev, phy, path, 1); + + _rfk_bb_afe_restore(rtwdev, phy, path, kpath); + + _dpk_kip_restore(rtwdev, path); + _rfk_reload_bb_reg(rtwdev, backup_bb_val); + _rfk_reload_kip_reg(rtwdev, backup_kip_val); + + for (path = 0; path < DPK_RF_PATH_MAX_8852BT; path++) { + _rfk_reload_rf_reg(rtwdev, backup_rf_val[path], path); + if (rtwdev->is_tssi_mode[path]) + _dpk_tssi_pause(rtwdev, path, false); + } +} + +static bool _dpk_bypass_check(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + struct rtw89_fem_info *fem = &rtwdev->fem; + + if (fem->epa_2g && chan->band_type == RTW89_BAND_2G) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] Skip DPK due to 2G_ext_PA exist!!\n"); + return true; + } else if (fem->epa_5g && chan->band_type == RTW89_BAND_5G) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] Skip DPK due to 5G_ext_PA exist!!\n"); + return true; + } else if (fem->epa_6g && chan->band_type == RTW89_BAND_6G) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] Skip DPK due to 6G_ext_PA exist!!\n"); + return true; + } + + return false; +} + +static void _dpk_force_bypass(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + u8 path, kpath; + + kpath = _kpath(rtwdev, phy); + + for (path = 0; path < RTW8852BT_SS; path++) { + if (kpath & BIT(path)) + _dpk_onoff(rtwdev, path, true); + } +} + +static void _dpk_track(struct rtw89_dev *rtwdev) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + s8 txagc_bb, txagc_bb_tp, ini_diff = 0, txagc_ofst; + s8 delta_ther[2] = {}; + u8 trk_idx, txagc_rf; + u8 path, kidx; + u16 pwsf[2]; + u8 cur_ther; + u32 tmp; + + for (path = 0; path < RF_PATH_NUM_8852BT; path++) { + kidx = dpk->cur_idx[path]; + + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, + "[DPK_TRK] ================[S%d[%d] (CH %d)]================\n", + path, kidx, dpk->bp[path][kidx].ch); + + cur_ther = ewma_thermal_read(&rtwdev->phystat.avg_thermal[path]); + + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, + "[DPK_TRK] thermal now = %d\n", cur_ther); + + if (dpk->bp[path][kidx].ch && cur_ther) + delta_ther[path] = dpk->bp[path][kidx].ther_dpk - cur_ther; + + if (dpk->bp[path][kidx].band == RTW89_BAND_2G) + delta_ther[path] = delta_ther[path] * 3 / 2; + else + delta_ther[path] = delta_ther[path] * 5 / 2; + + txagc_rf = rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), + B_TXAGC_RF); + + if (rtwdev->is_tssi_mode[path]) { + trk_idx = rtw89_read_rf(rtwdev, path, RR_TXA, RR_TXA_TRK); + + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, + "[DPK_TRK] txagc_RF / track_idx = 0x%x / %d\n", + txagc_rf, trk_idx); + + txagc_bb = + rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), + MASKBYTE2); + txagc_bb_tp = + rtw89_phy_read32_mask(rtwdev, R_TXAGC_TP + (path << 13), + B_TXAGC_TP); + + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, + "[DPK_TRK] txagc_bb_tp / txagc_bb = 0x%x / 0x%x\n", + txagc_bb_tp, txagc_bb); + + txagc_ofst = + rtw89_phy_read32_mask(rtwdev, R_TXAGC_BB + (path << 13), + MASKBYTE3); + + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, + "[DPK_TRK] txagc_offset / delta_ther = %d / %d\n", + txagc_ofst, delta_ther[path]); + tmp = rtw89_phy_read32_mask(rtwdev, R_DPD_COM + (path << 8), + B_DPD_COM_OF); + if (tmp == 0x1) { + txagc_ofst = 0; + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, + "[DPK_TRK] HW txagc offset mode\n"); + } + + if (txagc_rf && cur_ther) + ini_diff = txagc_ofst + (delta_ther[path]); + + tmp = rtw89_phy_read32_mask(rtwdev, + R_P0_TXDPD + (path << 13), + B_P0_TXDPD); + if (tmp == 0x0) { + pwsf[0] = dpk->bp[path][kidx].pwsf + + txagc_bb_tp - txagc_bb + ini_diff; + pwsf[1] = dpk->bp[path][kidx].pwsf + + txagc_bb_tp - txagc_bb + ini_diff; + } else { + pwsf[0] = dpk->bp[path][kidx].pwsf + ini_diff; + pwsf[1] = dpk->bp[path][kidx].pwsf + ini_diff; + } + } else { + pwsf[0] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff; + pwsf[1] = (dpk->bp[path][kidx].pwsf + delta_ther[path]) & 0x1ff; + } + + tmp = rtw89_phy_read32_mask(rtwdev, R_DPK_TRK, B_DPK_TRK_DIS); + if (!tmp && txagc_rf) { + rtw89_debug(rtwdev, RTW89_DBG_RFK_TRACK, + "[DPK_TRK] New pwsf[0] / pwsf[1] = 0x%x / 0x%x\n", + pwsf[0], pwsf[1]); + + rtw89_phy_write32_mask(rtwdev, + R_DPD_BND + (path << 8) + (kidx << 2), + B_DPD_BND_0, pwsf[0]); + rtw89_phy_write32_mask(rtwdev, + R_DPD_BND + (path << 8) + (kidx << 2), + B_DPD_BND_1, pwsf[1]); + } + } +} + +static void _set_dpd_backoff(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + u8 tx_scale, ofdm_bkof, path, kpath; + + kpath = _kpath(rtwdev, phy); + + ofdm_bkof = rtw89_phy_read32_mask(rtwdev, R_DPD_BF + (phy << 13), B_DPD_BF_OFDM); + tx_scale = rtw89_phy_read32_mask(rtwdev, R_DPD_BF + (phy << 13), B_DPD_BF_SCA); + + if (ofdm_bkof + tx_scale >= 44) { + /* move dpd backoff to bb, and set dpd backoff to 0 */ + dpk->dpk_gs[phy] = 0x7f; + for (path = 0; path < RF_PATH_NUM_8852BT; path++) { + if (!(kpath & BIT(path))) + continue; + + rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A + (path << 8), + B_DPD_CFG, 0x7f7f7f); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[RFK] Set S%d DPD backoff to 0dB\n", path); + } + } else { + dpk->dpk_gs[phy] = 0x5b; + } +} + +static void _tssi_dpk_off(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + rtw89_phy_write32_mask(rtwdev, R_DPD_CH0A, BIT(24), 0x0); + rtw89_phy_write32_mask(rtwdev, R_DPD_CH0B, BIT(24), 0x0); +} + +static void _tssi_rf_setting(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + enum rtw89_band band = chan->band_type; + + if (band == RTW89_BAND_2G) + rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXG, 0x1); + else + rtw89_write_rf(rtwdev, path, RR_TXPOW, RR_TXPOW_TXA, 0x1); +} + +static void _tssi_set_sys(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + enum rtw89_band band = chan->band_type; + + rtw89_rfk_parser(rtwdev, &rtw8852bt_tssi_sys_defs_tbl); + + if (chan->band_width == RTW89_CHANNEL_WIDTH_80) + rtw89_phy_write32_mask(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_BW80, 0x1); + else + rtw89_phy_write32_mask(rtwdev, R_RSTB_ASYNC, B_RSTB_ASYNC_BW80, 0x0); + + if (path == RF_PATH_A) + rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, + &rtw8852bt_tssi_sys_a_defs_2g_tbl, + &rtw8852bt_tssi_sys_a_defs_5g_tbl); + else + rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, + &rtw8852bt_tssi_sys_b_defs_2g_tbl, + &rtw8852bt_tssi_sys_b_defs_5g_tbl); +} + +static void _tssi_ini_txpwr_ctrl_bb(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852bt_tssi_init_txpwr_defs_a_tbl, + &rtw8852bt_tssi_init_txpwr_defs_b_tbl); +} + +static void _tssi_ini_txpwr_ctrl_bb_he_tb(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852bt_tssi_init_txpwr_he_tb_defs_a_tbl, + &rtw8852bt_tssi_init_txpwr_he_tb_defs_b_tbl); +} + +static void _tssi_set_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852bt_tssi_dck_defs_a_tbl, + &rtw8852bt_tssi_dck_defs_b_tbl); +} + +static void _tssi_set_tmeter_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ +#define RTW8852BT_TSSI_GET_VAL(ptr, idx) \ +({ \ + s8 *__ptr = (ptr); \ + u8 __idx = (idx), __i, __v; \ + u32 __val = 0; \ + for (__i = 0; __i < 4; __i++) { \ + __v = (__ptr[__idx + __i]); \ + __val |= (__v << (8 * __i)); \ + } \ + __val; \ +}) + struct rtw89_fw_txpwr_track_cfg *trk = rtwdev->fw.elm_info.txpwr_trk; + struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + u8 ch = chan->channel; + u8 subband = chan->subband_type; + const s8 *thm_up_a = NULL; + const s8 *thm_down_a = NULL; + const s8 *thm_up_b = NULL; + const s8 *thm_down_b = NULL; + u8 thermal = 0xff; + s8 thm_ofst[64] = {0}; + u32 tmp = 0; + u8 i, j; + + switch (subband) { + default: + case RTW89_CH_2G: + thm_up_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_P][0]; + thm_down_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GA_N][0]; + thm_up_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_P][0]; + thm_down_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_2GB_N][0]; + break; + case RTW89_CH_5G_BAND_1: + thm_up_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][0]; + thm_down_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][0]; + thm_up_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][0]; + thm_down_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][0]; + break; + case RTW89_CH_5G_BAND_3: + thm_up_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][1]; + thm_down_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][1]; + thm_up_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][1]; + thm_down_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][1]; + break; + case RTW89_CH_5G_BAND_4: + thm_up_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_P][2]; + thm_down_a = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GA_N][2]; + thm_up_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_P][2]; + thm_down_b = trk->delta[RTW89_FW_TXPWR_TRK_TYPE_5GB_N][2]; + break; + } + + if (path == RF_PATH_A) { + thermal = tssi_info->thermal[RF_PATH_A]; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] ch=%d thermal_pathA=0x%x\n", ch, thermal); + + rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_DIS, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER_TRK, 0x1); + + if (thermal == 0xff) { + rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, 32); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, 32); + + for (i = 0; i < 64; i += 4) { + rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, 0x0); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] write 0x%x val=0x%08x\n", + R_P0_TSSI_BASE + i, 0x0); + } + + } else { + rtw89_phy_write32_mask(rtwdev, R_P0_TMETER, B_P0_TMETER, + thermal); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, B_P0_RFCTM_VAL, + thermal); + + i = 0; + for (j = 0; j < 32; j++) + thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? + -thm_down_a[i++] : + -thm_down_a[DELTA_SWINGIDX_SIZE - 1]; + + i = 1; + for (j = 63; j >= 32; j--) + thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? + thm_up_a[i++] : + thm_up_a[DELTA_SWINGIDX_SIZE - 1]; + + for (i = 0; i < 64; i += 4) { + tmp = RTW8852BT_TSSI_GET_VAL(thm_ofst, i); + rtw89_phy_write32(rtwdev, R_P0_TSSI_BASE + i, tmp); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] write 0x%x val=0x%08x\n", + 0x5c00 + i, tmp); + } + } + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P0_RFCTM, R_P0_RFCTM_RDY, 0x0); + + } else { + thermal = tssi_info->thermal[RF_PATH_B]; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] ch=%d thermal_pathB=0x%x\n", ch, thermal); + + rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_DIS, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER_TRK, 0x1); + + if (thermal == 0xff) { + rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, 32); + rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, 32); + + for (i = 0; i < 64; i += 4) { + rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, 0x0); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] write 0x%x val=0x%08x\n", + 0x7c00 + i, 0x0); + } + + } else { + rtw89_phy_write32_mask(rtwdev, R_P1_TMETER, B_P1_TMETER, + thermal); + rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, B_P1_RFCTM_VAL, + thermal); + + i = 0; + for (j = 0; j < 32; j++) + thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? + -thm_down_b[i++] : + -thm_down_b[DELTA_SWINGIDX_SIZE - 1]; + + i = 1; + for (j = 63; j >= 32; j--) + thm_ofst[j] = i < DELTA_SWINGIDX_SIZE ? + thm_up_b[i++] : + thm_up_b[DELTA_SWINGIDX_SIZE - 1]; + + for (i = 0; i < 64; i += 4) { + tmp = RTW8852BT_TSSI_GET_VAL(thm_ofst, i); + rtw89_phy_write32(rtwdev, R_TSSI_THOF + i, tmp); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] write 0x%x val=0x%08x\n", + 0x7c00 + i, tmp); + } + } + rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P1_RFCTM, R_P1_RFCTM_RDY, 0x0); + } +#undef RTW8852BT_TSSI_GET_VAL +} + +static void _tssi_set_dac_gain_tbl(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852bt_tssi_dac_gain_defs_a_tbl, + &rtw8852bt_tssi_dac_gain_defs_b_tbl); +} + +static void _tssi_slope_cal_org(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + enum rtw89_band band = chan->band_type; + + if (path == RF_PATH_A) + rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, + &rtw8852bt_tssi_slope_a_defs_2g_tbl, + &rtw8852bt_tssi_slope_a_defs_5g_tbl); + else + rtw89_rfk_parser_by_cond(rtwdev, band == RTW89_BAND_2G, + &rtw8852bt_tssi_slope_b_defs_2g_tbl, + &rtw8852bt_tssi_slope_b_defs_5g_tbl); +} + +static void _tssi_alignment_default(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, bool all) +{ + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + enum rtw89_band band = chan->band_type; + const struct rtw89_rfk_tbl *tbl = NULL; + u8 ch = chan->channel; + + if (path == RF_PATH_A) { + if (band == RTW89_BAND_2G) + tbl = &rtw8852bt_tssi_align_a_2g_all_defs_tbl; + else if (ch >= 36 && ch <= 64) + tbl = &rtw8852bt_tssi_align_a_5g1_all_defs_tbl; + else if (ch >= 100 && ch <= 144) + tbl = &rtw8852bt_tssi_align_a_5g2_all_defs_tbl; + else if (ch >= 149 && ch <= 177) + tbl = &rtw8852bt_tssi_align_a_5g3_all_defs_tbl; + } else { + if (ch >= 1 && ch <= 14) + tbl = &rtw8852bt_tssi_align_b_2g_all_defs_tbl; + else if (ch >= 36 && ch <= 64) + tbl = &rtw8852bt_tssi_align_b_5g1_all_defs_tbl; + else if (ch >= 100 && ch <= 144) + tbl = &rtw8852bt_tssi_align_b_5g2_all_defs_tbl; + else if (ch >= 149 && ch <= 177) + tbl = &rtw8852bt_tssi_align_b_5g3_all_defs_tbl; + } + + if (tbl) + rtw89_rfk_parser(rtwdev, tbl); +} + +static void _tssi_set_tssi_slope(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_rfk_parser_by_cond(rtwdev, path == RF_PATH_A, + &rtw8852bt_tssi_slope_defs_a_tbl, + &rtw8852bt_tssi_slope_defs_b_tbl); +} + +static void _tssi_set_tssi_track(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + if (path == RF_PATH_A) + rtw89_phy_write32_mask(rtwdev, R_P0_TSSIC, B_P0_TSSIC_BYPASS, 0x0); + else + rtw89_phy_write32_mask(rtwdev, R_P1_TSSIC, B_P1_TSSIC_BYPASS, 0x0); +} + +static void _tssi_set_txagc_offset_mv_avg(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "======>%s path=%d\n", __func__, + path); + + if (path == RF_PATH_A) + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, + B_P0_TSSI_MV_MIX, 0x010); + else + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, + B_P1_RFCTM_DEL, 0x010); +} + +static void _tssi_enable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + u8 i; + + for (i = 0; i < RF_PATH_NUM_8852BT; i++) { + _tssi_set_tssi_track(rtwdev, phy, i); + _tssi_set_txagc_offset_mv_avg(rtwdev, phy, i); + + if (i == RF_PATH_A) { + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, + B_P0_TSSI_MV_CLR, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, + B_P0_TSSI_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, + B_P0_TSSI_EN, 0x1); + rtw89_write_rf(rtwdev, i, RR_TXGA_V1, + RR_TXGA_V1_TRK_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, + B_P0_TSSI_RFC, 0x3); + + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, + B_P0_TSSI_OFT, 0xc0); + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, + B_P0_TSSI_OFT_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, + B_P0_TSSI_OFT_EN, 0x1); + + rtwdev->is_tssi_mode[RF_PATH_A] = true; + } else { + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, + B_P1_TSSI_MV_CLR, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, + B_P1_TSSI_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, + B_P1_TSSI_EN, 0x1); + rtw89_write_rf(rtwdev, i, RR_TXGA_V1, + RR_TXGA_V1_TRK_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, + B_P1_TSSI_RFC, 0x3); + + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, + B_P1_TSSI_OFT, 0xc0); + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, + B_P1_TSSI_OFT_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, + B_P1_TSSI_OFT_EN, 0x1); + + rtwdev->is_tssi_mode[RF_PATH_B] = true; + } + } +} + +static void _tssi_disable(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_RFC, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_CLR, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_RFC, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_CLR, 0x1); + + rtwdev->is_tssi_mode[RF_PATH_A] = false; + rtwdev->is_tssi_mode[RF_PATH_B] = false; +} + +static u32 _tssi_get_cck_group(struct rtw89_dev *rtwdev, u8 ch) +{ + switch (ch) { + case 1 ... 2: + return 0; + case 3 ... 5: + return 1; + case 6 ... 8: + return 2; + case 9 ... 11: + return 3; + case 12 ... 13: + return 4; + case 14: + return 5; + } + + return 0; +} + +#define TSSI_EXTRA_GROUP_BIT (BIT(31)) +#define TSSI_EXTRA_GROUP(idx) (TSSI_EXTRA_GROUP_BIT | (idx)) +#define IS_TSSI_EXTRA_GROUP(group) ((group) & TSSI_EXTRA_GROUP_BIT) +#define TSSI_EXTRA_GET_GROUP_IDX1(group) ((group) & ~TSSI_EXTRA_GROUP_BIT) +#define TSSI_EXTRA_GET_GROUP_IDX2(group) (TSSI_EXTRA_GET_GROUP_IDX1(group) + 1) + +static u32 _tssi_get_ofdm_group(struct rtw89_dev *rtwdev, u8 ch) +{ + switch (ch) { + case 1 ... 2: + return 0; + case 3 ... 5: + return 1; + case 6 ... 8: + return 2; + case 9 ... 11: + return 3; + case 12 ... 14: + return 4; + case 36 ... 40: + return 5; + case 41 ... 43: + return TSSI_EXTRA_GROUP(5); + case 44 ... 48: + return 6; + case 49 ... 51: + return TSSI_EXTRA_GROUP(6); + case 52 ... 56: + return 7; + case 57 ... 59: + return TSSI_EXTRA_GROUP(7); + case 60 ... 64: + return 8; + case 100 ... 104: + return 9; + case 105 ... 107: + return TSSI_EXTRA_GROUP(9); + case 108 ... 112: + return 10; + case 113 ... 115: + return TSSI_EXTRA_GROUP(10); + case 116 ... 120: + return 11; + case 121 ... 123: + return TSSI_EXTRA_GROUP(11); + case 124 ... 128: + return 12; + case 129 ... 131: + return TSSI_EXTRA_GROUP(12); + case 132 ... 136: + return 13; + case 137 ... 139: + return TSSI_EXTRA_GROUP(13); + case 140 ... 144: + return 14; + case 149 ... 153: + return 15; + case 154 ... 156: + return TSSI_EXTRA_GROUP(15); + case 157 ... 161: + return 16; + case 162 ... 164: + return TSSI_EXTRA_GROUP(16); + case 165 ... 169: + return 17; + case 170 ... 172: + return TSSI_EXTRA_GROUP(17); + case 173 ... 177: + return 18; + } + + return 0; +} + +static u32 _tssi_get_trim_group(struct rtw89_dev *rtwdev, u8 ch) +{ + switch (ch) { + case 1 ... 8: + return 0; + case 9 ... 14: + return 1; + case 36 ... 48: + return 2; + case 52 ... 64: + return 3; + case 100 ... 112: + return 4; + case 116 ... 128: + return 5; + case 132 ... 144: + return 6; + case 149 ... 177: + return 7; + } + + return 0; +} + +static s8 _tssi_get_ofdm_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + u8 ch = chan->channel; + u32 gidx, gidx_1st, gidx_2nd; + s8 de_1st; + s8 de_2nd; + s8 val; + + gidx = _tssi_get_ofdm_group(rtwdev, ch); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs group_idx=0x%x\n", path, gidx); + + if (IS_TSSI_EXTRA_GROUP(gidx)) { + gidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(gidx); + gidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(gidx); + de_1st = tssi_info->tssi_mcs[path][gidx_1st]; + de_2nd = tssi_info->tssi_mcs[path][gidx_2nd]; + val = (de_1st + de_2nd) / 2; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs de=%d 1st=%d 2nd=%d\n", + path, val, de_1st, de_2nd); + } else { + val = tssi_info->tssi_mcs[path][gidx]; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs de=%d\n", path, val); + } + + return val; +} + +static s8 _tssi_get_ofdm_trim_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + u8 ch = chan->channel; + u32 tgidx, tgidx_1st, tgidx_2nd; + s8 tde_1st; + s8 tde_2nd; + s8 val; + + tgidx = _tssi_get_trim_group(rtwdev, ch); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs trim_group_idx=0x%x\n", + path, tgidx); + + if (IS_TSSI_EXTRA_GROUP(tgidx)) { + tgidx_1st = TSSI_EXTRA_GET_GROUP_IDX1(tgidx); + tgidx_2nd = TSSI_EXTRA_GET_GROUP_IDX2(tgidx); + tde_1st = tssi_info->tssi_trim[path][tgidx_1st]; + tde_2nd = tssi_info->tssi_trim[path][tgidx_2nd]; + val = (tde_1st + tde_2nd) / 2; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs trim_de=%d 1st=%d 2nd=%d\n", + path, val, tde_1st, tde_2nd); + } else { + val = tssi_info->tssi_trim[path][tgidx]; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs trim_de=%d\n", + path, val); + } + + return val; +} + +static void _tssi_set_efuse_to_de(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + u8 ch = chan->channel; + u8 gidx; + s8 ofdm_de; + s8 trim_de; + s32 val; + u32 i; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI][TRIM]: phy=%d ch=%d\n", + phy, ch); + + for (i = RF_PATH_A; i < RF_PATH_NUM_8852BT; i++) { + gidx = _tssi_get_cck_group(rtwdev, ch); + trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i); + val = tssi_info->tssi_cck[i][gidx] + trim_de; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d cck[%d]=0x%x trim=0x%x\n", + i, gidx, tssi_info->tssi_cck[i][gidx], trim_de); + + rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_long[i], _TSSI_DE_MASK, val); + rtw89_phy_write32_mask(rtwdev, _tssi_de_cck_short[i], _TSSI_DE_MASK, val); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] Set TSSI CCK DE 0x%x[21:12]=0x%x\n", + _tssi_de_cck_long[i], + rtw89_phy_read32_mask(rtwdev, _tssi_de_cck_long[i], + _TSSI_DE_MASK)); + + ofdm_de = _tssi_get_ofdm_de(rtwdev, phy, i); + trim_de = _tssi_get_ofdm_trim_de(rtwdev, phy, i); + val = ofdm_de + trim_de; + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI][TRIM]: path=%d mcs=0x%x trim=0x%x\n", + i, ofdm_de, trim_de); + + rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_20m[i], _TSSI_DE_MASK, val); + rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_40m[i], _TSSI_DE_MASK, val); + rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m[i], _TSSI_DE_MASK, val); + rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_80m_80m[i], + _TSSI_DE_MASK, val); + rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_5m[i], _TSSI_DE_MASK, val); + rtw89_phy_write32_mask(rtwdev, _tssi_de_mcs_10m[i], _TSSI_DE_MASK, val); + + rtw89_debug(rtwdev, RTW89_DBG_TSSI, + "[TSSI] Set TSSI MCS DE 0x%x[21:12]=0x%x\n", + _tssi_de_mcs_20m[i], + rtw89_phy_read32_mask(rtwdev, _tssi_de_mcs_20m[i], + _TSSI_DE_MASK)); + } +} + +static void _tssi_alimentk_dump_result(struct rtw89_dev *rtwdev, enum rtw89_rf_path path) +{ + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[TSSI PA K]\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n" + "0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n0x%x = 0x%08x\n", + R_TSSI_PA_K1 + (path << 13), + rtw89_phy_read32(rtwdev, R_TSSI_PA_K1 + (path << 13)), + R_TSSI_PA_K2 + (path << 13), + rtw89_phy_read32(rtwdev, R_TSSI_PA_K2 + (path << 13)), + R_P0_TSSI_ALIM1 + (path << 13), + rtw89_phy_read32(rtwdev, R_P0_TSSI_ALIM1 + (path << 13)), + R_P0_TSSI_ALIM3 + (path << 13), + rtw89_phy_read32(rtwdev, R_P0_TSSI_ALIM3 + (path << 13)), + R_TSSI_PA_K5 + (path << 13), + rtw89_phy_read32(rtwdev, R_TSSI_PA_K5 + (path << 13)), + R_P0_TSSI_ALIM2 + (path << 13), + rtw89_phy_read32(rtwdev, R_P0_TSSI_ALIM2 + (path << 13)), + R_P0_TSSI_ALIM4 + (path << 13), + rtw89_phy_read32(rtwdev, R_P0_TSSI_ALIM4 + (path << 13)), + R_TSSI_PA_K8 + (path << 13), + rtw89_phy_read32(rtwdev, R_TSSI_PA_K8 + (path << 13))); +} + +static void _tssi_alimentk_done(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, enum rtw89_rf_path path) +{ + struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + u8 channel = chan->channel; + u8 band; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "======>%s phy=%d path=%d\n", __func__, phy, path); + + if (channel >= 1 && channel <= 14) + band = TSSI_ALIMK_2G; + else if (channel >= 36 && channel <= 64) + band = TSSI_ALIMK_5GL; + else if (channel >= 100 && channel <= 144) + band = TSSI_ALIMK_5GM; + else if (channel >= 149 && channel <= 177) + band = TSSI_ALIMK_5GH; + else + band = TSSI_ALIMK_2G; + + if (tssi_info->alignment_done[path][band]) { + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD, + tssi_info->alignment_value[path][band][0]); + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD, + tssi_info->alignment_value[path][band][1]); + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD, + tssi_info->alignment_value[path][band][2]); + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD, + tssi_info->alignment_value[path][band][3]); + } + + _tssi_alimentk_dump_result(rtwdev, path); +} + +static void _tssi_hw_tx(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, u16 cnt, u16 period, s16 pwr_dbm, + u8 enable) +{ + enum rtw89_rf_path_bit rx_path; + + if (path == RF_PATH_A) + rx_path = RF_A; + else if (path == RF_PATH_B) + rx_path = RF_B; + else if (path == RF_PATH_AB) + rx_path = RF_AB; + else + rx_path = RF_ABCD; /* don't change path, but still set others */ + + if (enable) { + rtw8852bx_bb_set_plcp_tx(rtwdev); + rtw8852bx_bb_cfg_tx_path(rtwdev, path); + rtw8852bx_bb_ctrl_rx_path(rtwdev, rx_path); + rtw8852bx_bb_set_power(rtwdev, pwr_dbm, phy); + } + + rtw8852bx_bb_set_pmac_pkt_tx(rtwdev, enable, cnt, period, 20, phy); +} + +static void _tssi_backup_bb_registers(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, const u32 reg[], + u32 reg_backup[], u32 reg_num) +{ + u32 i; + + for (i = 0; i < reg_num; i++) { + reg_backup[i] = rtw89_phy_read32_mask(rtwdev, reg[i], MASKDWORD); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[TSSI] Backup BB 0x%x = 0x%x\n", reg[i], + reg_backup[i]); + } +} + +static void _tssi_reload_bb_registers(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, const u32 reg[], + u32 reg_backup[], u32 reg_num) + +{ + u32 i; + + for (i = 0; i < reg_num; i++) { + rtw89_phy_write32_mask(rtwdev, reg[i], MASKDWORD, reg_backup[i]); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[TSSI] Reload BB 0x%x = 0x%x\n", reg[i], + reg_backup[i]); + } +} + +static u8 _tssi_ch_to_idx(struct rtw89_dev *rtwdev, u8 channel) +{ + u8 channel_index; + + if (channel >= 1 && channel <= 14) + channel_index = channel - 1; + else if (channel >= 36 && channel <= 64) + channel_index = (channel - 36) / 2 + 14; + else if (channel >= 100 && channel <= 144) + channel_index = ((channel - 100) / 2) + 15 + 14; + else if (channel >= 149 && channel <= 177) + channel_index = ((channel - 149) / 2) + 38 + 14; + else + channel_index = 0; + + return channel_index; +} + +static bool _tssi_get_cw_report(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path, const s16 *power, + u32 *tssi_cw_rpt) +{ + u32 tx_counter, tx_counter_tmp; + const int retry = 100; + u32 tmp; + int j, k; + + for (j = 0; j < RTW8852BT_TSSI_PATH_NR; j++) { + rtw89_phy_write32_mask(rtwdev, _tssi_trigger[path], B_P0_TSSI_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, _tssi_trigger[path], B_P0_TSSI_EN, 0x1); + + tx_counter = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD); + + tmp = rtw89_phy_read32_mask(rtwdev, _tssi_trigger[path], MASKDWORD); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[TSSI PA K] 0x%x = 0x%08x path=%d\n", + _tssi_trigger[path], tmp, path); + + if (j == 0) + _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], true); + else + _tssi_hw_tx(rtwdev, phy, RF_PATH_ABCD, 100, 5000, power[j], true); + + tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD); + tx_counter_tmp -= tx_counter; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[TSSI PA K] First HWTXcounter=%d path=%d\n", + tx_counter_tmp, path); + + for (k = 0; k < retry; k++) { + tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_rpt_addr[path], + B_TSSI_CWRPT_RDY); + if (tmp) + break; + + udelay(30); + + tx_counter_tmp = + rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD); + tx_counter_tmp -= tx_counter; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[TSSI PA K] Flow k = %d HWTXcounter=%d path=%d\n", + k, tx_counter_tmp, path); + } + + if (k >= retry) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[TSSI PA K] TSSI finish bit k > %d mp:100ms normal:30us path=%d\n", + k, path); + + _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false); + return false; + } + + tssi_cw_rpt[j] = + rtw89_phy_read32_mask(rtwdev, _tssi_cw_rpt_addr[path], + B_TSSI_CWRPT); + + _tssi_hw_tx(rtwdev, phy, path, 100, 5000, power[j], false); + + tx_counter_tmp = rtw89_phy_read32_mask(rtwdev, R_TX_COUNTER, MASKLWORD); + tx_counter_tmp -= tx_counter; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[TSSI PA K] Final HWTXcounter=%d path=%d\n", + tx_counter_tmp, path); + } + + return true; +} + +static void _tssi_alimentk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, + enum rtw89_rf_path path) +{ + static const u32 bb_reg[8] = {0x5820, 0x7820, 0x4978, 0x58e4, + 0x78e4, 0x49c0, 0x0d18, 0x0d80}; + static const s16 power_2g[4] = {48, 20, 4, -8}; + static const s16 power_5g[4] = {48, 20, 4, 4}; + struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + s32 tssi_alim_offset_1, tssi_alim_offset_2, tssi_alim_offset_3; + u32 tssi_cw_rpt[RTW8852BT_TSSI_PATH_NR] = {}; + u8 channel = chan->channel; + u8 ch_idx = _tssi_ch_to_idx(rtwdev, channel); + struct rtw8852bx_bb_tssi_bak tssi_bak; + s32 aliment_diff, tssi_cw_default; + u32 start_time, finish_time; + u32 bb_reg_backup[8] = {}; + const s16 *power; + u8 band; + bool ok; + u32 tmp; + u8 j; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "======> %s channel=%d path=%d\n", __func__, channel, + path); + + start_time = ktime_get_ns(); + + if (chan->band_type == RTW89_BAND_2G) + power = power_2g; + else + power = power_5g; + + if (channel >= 1 && channel <= 14) + band = TSSI_ALIMK_2G; + else if (channel >= 36 && channel <= 64) + band = TSSI_ALIMK_5GL; + else if (channel >= 100 && channel <= 144) + band = TSSI_ALIMK_5GM; + else if (channel >= 149 && channel <= 177) + band = TSSI_ALIMK_5GH; + else + band = TSSI_ALIMK_2G; + + rtw8852bx_bb_backup_tssi(rtwdev, phy, &tssi_bak); + _tssi_backup_bb_registers(rtwdev, phy, bb_reg, bb_reg_backup, + ARRAY_SIZE(bb_reg_backup)); + + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_AVG, B_P0_TSSI_AVG, 0x8); + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_AVG, B_P1_TSSI_AVG, 0x8); + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_MV_AVG, B_P0_TSSI_MV_AVG, 0x2); + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_MV_AVG, B_P1_TSSI_MV_AVG, 0x2); + + ok = _tssi_get_cw_report(rtwdev, phy, path, power, tssi_cw_rpt); + if (!ok) + goto out; + + for (j = 0; j < RTW8852BT_TSSI_PATH_NR; j++) { + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[TSSI PA K] power[%d]=%d tssi_cw_rpt[%d]=%d\n", j, + power[j], j, tssi_cw_rpt[j]); + } + + tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][1], + _tssi_cw_default_mask[1]); + tssi_cw_default = sign_extend32(tmp, 8); + tssi_alim_offset_1 = tssi_cw_rpt[0] - ((power[0] - power[1]) * 2) - + tssi_cw_rpt[1] + tssi_cw_default; + aliment_diff = tssi_alim_offset_1 - tssi_cw_default; + + tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][2], + _tssi_cw_default_mask[2]); + tssi_cw_default = sign_extend32(tmp, 8); + tssi_alim_offset_2 = tssi_cw_default + aliment_diff; + + tmp = rtw89_phy_read32_mask(rtwdev, _tssi_cw_default_addr[path][3], + _tssi_cw_default_mask[3]); + tssi_cw_default = sign_extend32(tmp, 8); + tssi_alim_offset_3 = tssi_cw_default + aliment_diff; + + if (path == RF_PATH_A) { + tmp = FIELD_PREP(B_P1_TSSI_ALIM11, tssi_alim_offset_1) | + FIELD_PREP(B_P1_TSSI_ALIM12, tssi_alim_offset_2) | + FIELD_PREP(B_P1_TSSI_ALIM13, tssi_alim_offset_3); + + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM1, tmp); + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_ALIM2, B_P0_TSSI_ALIM2, tmp); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[TSSI PA K] tssi_alim_offset = 0x%x 0x%x 0x%x 0x%x\n", + rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3, B_P0_TSSI_ALIM31), + rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM11), + rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM12), + rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1, B_P0_TSSI_ALIM13)); + } else { + tmp = FIELD_PREP(B_P1_TSSI_ALIM11, tssi_alim_offset_1) | + FIELD_PREP(B_P1_TSSI_ALIM12, tssi_alim_offset_2) | + FIELD_PREP(B_P1_TSSI_ALIM13, tssi_alim_offset_3); + + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM1, tmp); + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_ALIM2, B_P1_TSSI_ALIM2, tmp); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[TSSI PA K] tssi_alim_offset = 0x%x 0x%x 0x%x 0x%x\n", + rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM3, B_P1_TSSI_ALIM31), + rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM11), + rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM12), + rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_ALIM1, B_P1_TSSI_ALIM13)); + } + + tssi_info->alignment_done[path][band] = true; + tssi_info->alignment_value[path][band][0] = + rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD); + tssi_info->alignment_value[path][band][1] = + rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD); + tssi_info->alignment_value[path][band][2] = + rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD); + tssi_info->alignment_value[path][band][3] = + rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD); + + tssi_info->check_backup_aligmk[path][ch_idx] = true; + tssi_info->alignment_backup_by_ch[path][ch_idx][0] = + rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM1 + (path << 13), MASKDWORD); + tssi_info->alignment_backup_by_ch[path][ch_idx][1] = + rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM3 + (path << 13), MASKDWORD); + tssi_info->alignment_backup_by_ch[path][ch_idx][2] = + rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM2 + (path << 13), MASKDWORD); + tssi_info->alignment_backup_by_ch[path][ch_idx][3] = + rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_ALIM4 + (path << 13), MASKDWORD); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][0], 0x%x = 0x%08x\n", + path, band, R_P0_TSSI_ALIM1 + (path << 13), + tssi_info->alignment_value[path][band][0]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][1], 0x%x = 0x%08x\n", + path, band, R_P0_TSSI_ALIM3 + (path << 13), + tssi_info->alignment_value[path][band][1]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][2], 0x%x = 0x%08x\n", + path, band, R_P0_TSSI_ALIM2 + (path << 13), + tssi_info->alignment_value[path][band][2]); + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[TSSI PA K] tssi_info->alignment_value[path=%d][band=%d][3], 0x%x = 0x%08x\n", + path, band, R_P0_TSSI_ALIM4 + (path << 13), + tssi_info->alignment_value[path][band][3]); + +out: + _tssi_reload_bb_registers(rtwdev, phy, bb_reg, bb_reg_backup, + ARRAY_SIZE(bb_reg_backup)); + rtw8852bx_bb_restore_tssi(rtwdev, phy, &tssi_bak); + rtw8852bx_bb_tx_mode_switch(rtwdev, phy, 0); + + finish_time = ktime_get_ns(); + tssi_info->tssi_alimk_time += finish_time - start_time; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[TSSI PA K] %s processing time = %d ms\n", __func__, + tssi_info->tssi_alimk_time); +} + +void rtw8852bt_dpk_init(struct rtw89_dev *rtwdev) +{ + struct rtw89_dpk_info *dpk = &rtwdev->dpk; + + u8 path; + + for (path = 0; path < 2; path++) { + dpk->cur_idx[path] = 0; + dpk->max_dpk_txagc[path] = 0x3F; + } + + dpk->is_dpk_enable = true; + dpk->is_dpk_reload_en = false; + _set_dpd_backoff(rtwdev, RTW89_PHY_0); +} + +void rtw8852bt_rck(struct rtw89_dev *rtwdev) +{ + u8 path; + + for (path = 0; path < RF_PATH_NUM_8852BT; path++) + _rck(rtwdev, path); +} + +void rtw8852bt_dack(struct rtw89_dev *rtwdev) +{ + u8 phy_map = rtw89_btc_phymap(rtwdev, RTW89_PHY_0, 0); + + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_START); + _dac_cal(rtwdev, false); + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_DACK, BTC_WRFK_STOP); +} + +void rtw8852bt_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +{ + u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0); + u32 tx_en; + + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_START); + rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL); + _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx)); + + _iqk_init(rtwdev); + _iqk(rtwdev, phy_idx, false); + + rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en); + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_STOP); +} + +void rtw8852bt_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +{ + u8 phy_map = rtw89_btc_phymap(rtwdev, phy_idx, 0); + u32 tx_en; + + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_START); + rtw89_chip_stop_sch_tx(rtwdev, phy_idx, &tx_en, RTW89_SCH_TX_SEL_ALL); + _wait_rx_mode(rtwdev, _kpath(rtwdev, phy_idx)); + + _rx_dck(rtwdev, phy_idx); + + rtw89_chip_resume_sch_tx(rtwdev, phy_idx, tx_en); + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_RXDCK, BTC_WRFK_STOP); +} + +void rtw8852bt_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) +{ + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "[DPK] ****** DPK Start (Ver: 0x%x) ******\n", RTW8852BT_DPK_VER); + + if (_dpk_bypass_check(rtwdev, phy_idx)) + _dpk_force_bypass(rtwdev, phy_idx); + else + _dpk_cal_select(rtwdev, phy_idx, RF_AB); +} + +void rtw8852bt_dpk_track(struct rtw89_dev *rtwdev) +{ + _dpk_track(rtwdev); +} + +void rtw8852bt_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en) +{ + static const u32 reg[2] = {R_DPD_CH0A, R_DPD_CH0B}; + u8 phy_map = rtw89_btc_phymap(rtwdev, phy, RF_AB); + u32 reg_backup[2] = {}; + u32 tx_en; + u8 i; + + _tssi_backup_bb_registers(rtwdev, phy, reg, reg_backup, 2); + rtw89_debug(rtwdev, RTW89_DBG_TSSI, "[TSSI] %s: phy=%d\n", __func__, phy); + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_START); + + _tssi_dpk_off(rtwdev, phy); + _tssi_disable(rtwdev, phy); + + for (i = RF_PATH_A; i < RF_PATH_NUM_8852BT; i++) { + _tssi_rf_setting(rtwdev, phy, i); + _tssi_set_sys(rtwdev, phy, i); + _tssi_ini_txpwr_ctrl_bb(rtwdev, phy, i); + _tssi_ini_txpwr_ctrl_bb_he_tb(rtwdev, phy, i); + _tssi_set_dck(rtwdev, phy, i); + _tssi_set_tmeter_tbl(rtwdev, phy, i); + _tssi_set_dac_gain_tbl(rtwdev, phy, i); + _tssi_slope_cal_org(rtwdev, phy, i); + _tssi_alignment_default(rtwdev, phy, i, true); + _tssi_set_tssi_slope(rtwdev, phy, i); + + rtw89_chip_stop_sch_tx(rtwdev, phy, &tx_en, RTW89_SCH_TX_SEL_ALL); + _tmac_tx_pause(rtwdev, phy, true); + if (hwtx_en) + _tssi_alimentk(rtwdev, phy, i); + _tmac_tx_pause(rtwdev, phy, false); + rtw89_chip_resume_sch_tx(rtwdev, phy, tx_en); + } + + _tssi_enable(rtwdev, phy); + _tssi_set_efuse_to_de(rtwdev, phy); + + _tssi_reload_bb_registers(rtwdev, phy, reg, reg_backup, 2); + + rtw89_btc_ntfy_wl_rfk(rtwdev, phy_map, BTC_WRFKT_IQK, BTC_WRFK_ONESHOT_STOP); +} + +void rtw8852bt_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy) +{ + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + struct rtw89_tssi_info *tssi_info = &rtwdev->tssi; + u8 channel = chan->channel; + u8 band; + u32 i; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "======>%s phy=%d channel=%d\n", __func__, phy, channel); + + if (channel >= 1 && channel <= 14) + band = TSSI_ALIMK_2G; + else if (channel >= 36 && channel <= 64) + band = TSSI_ALIMK_5GL; + else if (channel >= 100 && channel <= 144) + band = TSSI_ALIMK_5GM; + else if (channel >= 149 && channel <= 177) + band = TSSI_ALIMK_5GH; + else + band = TSSI_ALIMK_2G; + + _tssi_disable(rtwdev, phy); + + for (i = RF_PATH_A; i < RTW8852BT_TSSI_PATH_NR; i++) { + _tssi_rf_setting(rtwdev, phy, i); + _tssi_set_sys(rtwdev, phy, i); + _tssi_set_tmeter_tbl(rtwdev, phy, i); + + if (tssi_info->alignment_done[i][band]) + _tssi_alimentk_done(rtwdev, phy, i); + else + _tssi_alignment_default(rtwdev, phy, i, true); + } + + _tssi_enable(rtwdev, phy); + _tssi_set_efuse_to_de(rtwdev, phy); +} + +static void rtw8852bt_tssi_default_txagc(struct rtw89_dev *rtwdev, + enum rtw89_phy_idx phy, bool enable) +{ + const struct rtw89_chan *chan = rtw89_chan_get(rtwdev, RTW89_SUB_ENTITY_0); + u8 channel = chan->channel; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, "======> %s ch=%d\n", + __func__, channel); + + if (enable) + return; + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "======>%s 1 SCAN_END Set 0x5818[7:0]=0x%x 0x7818[7:0]=0x%x\n", + __func__, + rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT), + rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT)); + + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT, 0xc0); + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT, 0xc0); + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT_EN, 0x1); + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x0); + rtw89_phy_write32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT_EN, 0x1); + + _tssi_alimentk_done(rtwdev, phy, RF_PATH_A); + _tssi_alimentk_done(rtwdev, phy, RF_PATH_B); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "======>%s 2 SCAN_END Set 0x5818[7:0]=0x%x 0x7818[7:0]=0x%x\n", + __func__, + rtw89_phy_read32_mask(rtwdev, R_P0_TSSI_TRK, B_P0_TSSI_OFT), + rtw89_phy_read32_mask(rtwdev, R_P1_TSSI_TRK, B_P1_TSSI_OFT)); + + rtw89_debug(rtwdev, RTW89_DBG_RFK, + "======> %s SCAN_END\n", __func__); +} + +void rtw8852bt_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start, + enum rtw89_phy_idx phy_idx) +{ + if (scan_start) + rtw8852bt_tssi_default_txagc(rtwdev, phy_idx, true); + else + rtw8852bt_tssi_default_txagc(rtwdev, phy_idx, false); +} diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h new file mode 100644 index 000000000000..09918835c6e8 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2024 Realtek Corporation + */ + +#ifndef __RTW89_8852BT_RFK_H__ +#define __RTW89_8852BT_RFK_H__ + +#include "core.h" + +void rtw8852bt_rck(struct rtw89_dev *rtwdev); +void rtw8852bt_dack(struct rtw89_dev *rtwdev); +void rtw8852bt_iqk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); +void rtw8852bt_rx_dck(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx); +void rtw8852bt_dpk_init(struct rtw89_dev *rtwdev); +void rtw8852bt_dpk(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy); +void rtw8852bt_dpk_track(struct rtw89_dev *rtwdev); +void rtw8852bt_tssi(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy, bool hwtx_en); +void rtw8852bt_tssi_scan(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy); +void rtw8852bt_wifi_scan_notify(struct rtw89_dev *rtwdev, bool scan_start, + enum rtw89_phy_idx phy_idx); + +#endif diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk_table.c b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk_table.c new file mode 100644 index 000000000000..782144bb7f49 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk_table.c @@ -0,0 +1,490 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2024 Realtek Corporation + */ + +#include "rtw8852bt_rfk_table.h" + +static const struct rtw89_reg5_def rtw8852bt_tssi_sys_defs[] = { + RTW89_DECL_RFK_WM(0x12a8, 0x0000000f, 0x4), + RTW89_DECL_RFK_WM(0x32a8, 0x0000000f, 0x4), + RTW89_DECL_RFK_WM(0x12bc, 0x000ffff0, 0x5555), + RTW89_DECL_RFK_WM(0x32bc, 0x000ffff0, 0x5555), + RTW89_DECL_RFK_WM(0x0300, 0xff000000, 0x16), + RTW89_DECL_RFK_WM(0x0304, 0x000000ff, 0x19), + RTW89_DECL_RFK_WM(0x0314, 0xffff0000, 0x2041), + RTW89_DECL_RFK_WM(0x0318, 0xffffffff, 0x2041), + RTW89_DECL_RFK_WM(0x0318, 0xffffffff, 0x20012041), + RTW89_DECL_RFK_WM(0x0020, 0x00006000, 0x3), + RTW89_DECL_RFK_WM(0x0024, 0x00006000, 0x3), + RTW89_DECL_RFK_WM(0x0704, 0xffff0000, 0x601e), + RTW89_DECL_RFK_WM(0x2704, 0xffff0000, 0x601e), + RTW89_DECL_RFK_WM(0x0700, 0xf0000000, 0x4), + RTW89_DECL_RFK_WM(0x2700, 0xf0000000, 0x4), + RTW89_DECL_RFK_WM(0x0650, 0x3c000000, 0x0), + RTW89_DECL_RFK_WM(0x2650, 0x3c000000, 0x0), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_sys_defs); + +static const struct rtw89_reg5_def rtw8852bt_tssi_sys_a_defs_2g[] = { + RTW89_DECL_RFK_WM(0x120c, 0x000000ff, 0x33), + RTW89_DECL_RFK_WM(0x12c0, 0x0ff00000, 0x33), + RTW89_DECL_RFK_WM(0x58f8, 0x40000000, 0x1), + RTW89_DECL_RFK_WM(0x0304, 0x0000ff00, 0x1e), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_sys_a_defs_2g); + +static const struct rtw89_reg5_def rtw8852bt_tssi_sys_a_defs_5g[] = { + RTW89_DECL_RFK_WM(0x120c, 0x000000ff, 0x44), + RTW89_DECL_RFK_WM(0x12c0, 0x0ff00000, 0x44), + RTW89_DECL_RFK_WM(0x58f8, 0x40000000, 0x0), + RTW89_DECL_RFK_WM(0x0304, 0x0000ff00, 0x1d), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_sys_a_defs_5g); + +static const struct rtw89_reg5_def rtw8852bt_tssi_sys_b_defs_2g[] = { + RTW89_DECL_RFK_WM(0x32c0, 0x0ff00000, 0x33), + RTW89_DECL_RFK_WM(0x320c, 0x000000ff, 0x33), + RTW89_DECL_RFK_WM(0x78f8, 0x40000000, 0x1), + RTW89_DECL_RFK_WM(0x0304, 0x0000ff00, 0x1e), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_sys_b_defs_2g); + +static const struct rtw89_reg5_def rtw8852bt_tssi_sys_b_defs_5g[] = { + RTW89_DECL_RFK_WM(0x32c0, 0x0ff00000, 0x44), + RTW89_DECL_RFK_WM(0x320c, 0x000000ff, 0x44), + RTW89_DECL_RFK_WM(0x78f8, 0x40000000, 0x0), + RTW89_DECL_RFK_WM(0x0304, 0x0000ff00, 0x1d), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_sys_b_defs_5g); + +static const struct rtw89_reg5_def rtw8852bt_tssi_init_txpwr_defs_a[] = { + RTW89_DECL_RFK_WM(0x566c, 0x00001000, 0x0), + RTW89_DECL_RFK_WM(0x5800, 0xffffffff, 0x003f807f), + RTW89_DECL_RFK_WM(0x580c, 0x0000007f, 0x40), + RTW89_DECL_RFK_WM(0x580c, 0x0fffff00, 0x00040), + RTW89_DECL_RFK_WM(0x5810, 0xffffffff, 0x59010000), + RTW89_DECL_RFK_WM(0x5814, 0x01ffffff, 0x002d000), + RTW89_DECL_RFK_WM(0x5814, 0xf8000000, 0x00), + RTW89_DECL_RFK_WM(0x5818, 0xffffffff, 0x002c1800), + RTW89_DECL_RFK_WM(0x581c, 0x3fffffff, 0x1dc80280), + RTW89_DECL_RFK_WM(0x5820, 0xffffffff, 0x00002080), + RTW89_DECL_RFK_WM(0x580c, 0x10000000, 0x1), + RTW89_DECL_RFK_WM(0x580c, 0x40000000, 0x1), + RTW89_DECL_RFK_WM(0x5834, 0x3fffffff, 0x000115f2), + RTW89_DECL_RFK_WM(0x5838, 0x7fffffff, 0x0000121), + RTW89_DECL_RFK_WM(0x5854, 0x3fffffff, 0x000115f2), + RTW89_DECL_RFK_WM(0x5858, 0x7fffffff, 0x0000121), + RTW89_DECL_RFK_WM(0x5860, 0x80000000, 0x0), + RTW89_DECL_RFK_WM(0x5864, 0x07ffffff, 0x00801ff), + RTW89_DECL_RFK_WM(0x5898, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x589c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x58a4, 0x000000ff, 0x16), + RTW89_DECL_RFK_WM(0x58b0, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x58b4, 0x7fffffff, 0x0a002000), + RTW89_DECL_RFK_WM(0x58b8, 0x7fffffff, 0x00007628), + RTW89_DECL_RFK_WM(0x58bc, 0x07ffffff, 0x7a7807f), + RTW89_DECL_RFK_WM(0x58c0, 0xfffe0000, 0x003f), + RTW89_DECL_RFK_WM(0x58c4, 0xffffffff, 0x0003ffff), + RTW89_DECL_RFK_WM(0x58c8, 0x00ffffff, 0x000000), + RTW89_DECL_RFK_WM(0x58c8, 0xf0000000, 0x0), + RTW89_DECL_RFK_WM(0x58cc, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x58d0, 0x07ffffff, 0x2008101), + RTW89_DECL_RFK_WM(0x58d4, 0x000000ff, 0x00), + RTW89_DECL_RFK_WM(0x58d4, 0x0003fe00, 0x0ff), + RTW89_DECL_RFK_WM(0x58d4, 0x07fc0000, 0x100), + RTW89_DECL_RFK_WM(0x58d8, 0xffffffff, 0x8008016c), + RTW89_DECL_RFK_WM(0x58dc, 0x0001ffff, 0x0807f), + RTW89_DECL_RFK_WM(0x58dc, 0xfff00000, 0xc00), + RTW89_DECL_RFK_WM(0x58f0, 0x0003ffff, 0x001ff), + RTW89_DECL_RFK_WM(0x58f4, 0x000fffff, 0x000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_init_txpwr_defs_a); + +static const struct rtw89_reg5_def rtw8852bt_tssi_init_txpwr_defs_b[] = { + RTW89_DECL_RFK_WM(0x566c, 0x00001000, 0x0), + RTW89_DECL_RFK_WM(0x7800, 0xffffffff, 0x003f807f), + RTW89_DECL_RFK_WM(0x780c, 0x0000007f, 0x40), + RTW89_DECL_RFK_WM(0x780c, 0x0fffff00, 0x00040), + RTW89_DECL_RFK_WM(0x7810, 0xffffffff, 0x59010000), + RTW89_DECL_RFK_WM(0x7814, 0x01ffffff, 0x002d000), + RTW89_DECL_RFK_WM(0x7814, 0xf8000000, 0x00), + RTW89_DECL_RFK_WM(0x7818, 0xffffffff, 0x002c1800), + RTW89_DECL_RFK_WM(0x781c, 0x3fffffff, 0x1dc80280), + RTW89_DECL_RFK_WM(0x7820, 0xffffffff, 0x00002080), + RTW89_DECL_RFK_WM(0x780c, 0x10000000, 0x1), + RTW89_DECL_RFK_WM(0x780c, 0x40000000, 0x1), + RTW89_DECL_RFK_WM(0x7834, 0x3fffffff, 0x000115f2), + RTW89_DECL_RFK_WM(0x7838, 0x7fffffff, 0x0000121), + RTW89_DECL_RFK_WM(0x7854, 0x3fffffff, 0x000115f2), + RTW89_DECL_RFK_WM(0x7858, 0x7fffffff, 0x0000121), + RTW89_DECL_RFK_WM(0x7860, 0x80000000, 0x0), + RTW89_DECL_RFK_WM(0x7864, 0x07ffffff, 0x00801ff), + RTW89_DECL_RFK_WM(0x7898, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x789c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x78a4, 0x000000ff, 0x16), + RTW89_DECL_RFK_WM(0x78b0, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x78b4, 0x7fffffff, 0x0a002000), + RTW89_DECL_RFK_WM(0x78b8, 0x7fffffff, 0x00007628), + RTW89_DECL_RFK_WM(0x78bc, 0x07ffffff, 0x7a7807f), + RTW89_DECL_RFK_WM(0x78c0, 0xfffe0000, 0x003f), + RTW89_DECL_RFK_WM(0x78c4, 0xffffffff, 0x0003ffff), + RTW89_DECL_RFK_WM(0x78c8, 0x00ffffff, 0x000000), + RTW89_DECL_RFK_WM(0x78c8, 0xf0000000, 0x0), + RTW89_DECL_RFK_WM(0x78cc, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x78d0, 0x07ffffff, 0x2008101), + RTW89_DECL_RFK_WM(0x78d4, 0x000000ff, 0x00), + RTW89_DECL_RFK_WM(0x78d4, 0x0003fe00, 0x0ff), + RTW89_DECL_RFK_WM(0x78d4, 0x07fc0000, 0x100), + RTW89_DECL_RFK_WM(0x78d8, 0xffffffff, 0x8008016c), + RTW89_DECL_RFK_WM(0x78dc, 0x0001ffff, 0x0807f), + RTW89_DECL_RFK_WM(0x78dc, 0xfff00000, 0xc00), + RTW89_DECL_RFK_WM(0x78f0, 0x0003ffff, 0x001ff), + RTW89_DECL_RFK_WM(0x78f4, 0x000fffff, 0x000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_init_txpwr_defs_b); + +static const struct rtw89_reg5_def rtw8852bt_tssi_init_txpwr_he_tb_defs_a[] = { + RTW89_DECL_RFK_WM(0x58a0, 0xffffffff, 0x000000fe), + RTW89_DECL_RFK_WM(0x58e4, 0x0000007f, 0x1f), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_init_txpwr_he_tb_defs_a); + +static const struct rtw89_reg5_def rtw8852bt_tssi_init_txpwr_he_tb_defs_b[] = { + RTW89_DECL_RFK_WM(0x78a0, 0xffffffff, 0x000000fe), + RTW89_DECL_RFK_WM(0x78e4, 0x0000007f, 0x1f), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_init_txpwr_he_tb_defs_b); + +static const struct rtw89_reg5_def rtw8852bt_tssi_dck_defs_a[] = { + RTW89_DECL_RFK_WM(0x580c, 0x0fff0000, 0x000), + RTW89_DECL_RFK_WM(0x5814, 0x003ff000, 0x0ef), + RTW89_DECL_RFK_WM(0x5814, 0x18000000, 0x0), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_dck_defs_a); + +static const struct rtw89_reg5_def rtw8852bt_tssi_dck_defs_b[] = { + RTW89_DECL_RFK_WM(0x780c, 0x0fff0000, 0x000), + RTW89_DECL_RFK_WM(0x7814, 0x003ff000, 0x0ef), + RTW89_DECL_RFK_WM(0x7814, 0x18000000, 0x0), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_dck_defs_b); + +static const struct rtw89_reg5_def rtw8852bt_tssi_dac_gain_defs_a[] = { + RTW89_DECL_RFK_WM(0x58b0, 0x00000400, 0x1), + RTW89_DECL_RFK_WM(0x58b0, 0x00000fff, 0x000), + RTW89_DECL_RFK_WM(0x58b0, 0x00000800, 0x1), + RTW89_DECL_RFK_WM(0x5a00, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a04, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a08, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a0c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a10, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a14, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a18, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a1c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a20, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a24, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a28, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a2c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a30, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a34, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a38, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a3c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a40, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a44, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a48, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a4c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a50, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a54, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a58, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a5c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a60, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a64, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a68, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a6c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a70, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a74, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a78, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a7c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a80, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a84, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a88, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a8c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a90, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a94, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a98, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5a9c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5aa0, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5aa4, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5aa8, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5aac, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5ab0, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5ab4, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5ab8, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5abc, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5ac0, 0xffffffff, 0x00000000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_dac_gain_defs_a); + +static const struct rtw89_reg5_def rtw8852bt_tssi_dac_gain_defs_b[] = { + RTW89_DECL_RFK_WM(0x78b0, 0x00000fff, 0x000), + RTW89_DECL_RFK_WM(0x78b0, 0x00000800, 0x1), + RTW89_DECL_RFK_WM(0x7a00, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a04, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a08, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a0c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a10, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a14, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a18, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a1c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a20, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a24, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a28, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a2c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a30, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a34, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a38, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a3c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a40, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a44, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a48, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a4c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a50, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a54, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a58, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a5c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a60, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a64, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a68, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a6c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a70, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a74, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a78, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a7c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a80, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a84, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a88, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a8c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a90, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a94, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a98, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7a9c, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7aa0, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7aa4, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7aa8, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7aac, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7ab0, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7ab4, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7ab8, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7abc, 0xffffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7ac0, 0xffffffff, 0x00000000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_dac_gain_defs_b); + +static const struct rtw89_reg5_def rtw8852bt_tssi_slope_a_defs_2g[] = { + RTW89_DECL_RFK_WM(0x5608, 0x07ffffff, 0x0801008), + RTW89_DECL_RFK_WM(0x560c, 0x07ffffff, 0x0201020), + RTW89_DECL_RFK_WM(0x5610, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x5614, 0x07ffffff, 0x0804008), + RTW89_DECL_RFK_WM(0x5618, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x561c, 0x000001ff, 0x008), + RTW89_DECL_RFK_WM(0x561c, 0xffff0000, 0x0808), + RTW89_DECL_RFK_WM(0x5620, 0xffffffff, 0x08081e28), + RTW89_DECL_RFK_WM(0x5624, 0xffffffff, 0x08080808), + RTW89_DECL_RFK_WM(0x5628, 0xffffffff, 0x08081e28), + RTW89_DECL_RFK_WM(0x562c, 0x0000ffff, 0x0808), + RTW89_DECL_RFK_WM(0x581c, 0x00100000, 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_slope_a_defs_2g); + +static const struct rtw89_reg5_def rtw8852bt_tssi_slope_a_defs_5g[] = { + RTW89_DECL_RFK_WM(0x5608, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x560c, 0x07ffffff, 0x0201019), + RTW89_DECL_RFK_WM(0x5610, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x5614, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x5618, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x561c, 0x000001ff, 0x008), + RTW89_DECL_RFK_WM(0x561c, 0xffff0000, 0x0808), + RTW89_DECL_RFK_WM(0x5620, 0xffffffff, 0x08081808), + RTW89_DECL_RFK_WM(0x5624, 0xffffffff, 0x08080808), + RTW89_DECL_RFK_WM(0x5628, 0xffffffff, 0x08080808), + RTW89_DECL_RFK_WM(0x562c, 0x0000ffff, 0x0808), + RTW89_DECL_RFK_WM(0x581c, 0x00100000, 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_slope_a_defs_5g); + +static const struct rtw89_reg5_def rtw8852bt_tssi_slope_b_defs_2g[] = { + RTW89_DECL_RFK_WM(0x7608, 0x07ffffff, 0x0801008), + RTW89_DECL_RFK_WM(0x760c, 0x07ffffff, 0x0201020), + RTW89_DECL_RFK_WM(0x7610, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x7614, 0x07ffffff, 0x0804008), + RTW89_DECL_RFK_WM(0x7618, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x761c, 0x000001ff, 0x008), + RTW89_DECL_RFK_WM(0x761c, 0xffff0000, 0x0808), + RTW89_DECL_RFK_WM(0x7620, 0xffffffff, 0x08081e28), + RTW89_DECL_RFK_WM(0x7624, 0xffffffff, 0x08080808), + RTW89_DECL_RFK_WM(0x7628, 0xffffffff, 0x08081e28), + RTW89_DECL_RFK_WM(0x762c, 0x0000ffff, 0x0808), + RTW89_DECL_RFK_WM(0x781c, 0x00100000, 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_slope_b_defs_2g); + +static const struct rtw89_reg5_def rtw8852bt_tssi_slope_b_defs_5g[] = { + RTW89_DECL_RFK_WM(0x7608, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x760c, 0x07ffffff, 0x0201019), + RTW89_DECL_RFK_WM(0x7610, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x7614, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x7618, 0x07ffffff, 0x0201008), + RTW89_DECL_RFK_WM(0x761c, 0x000001ff, 0x008), + RTW89_DECL_RFK_WM(0x761c, 0xffff0000, 0x0808), + RTW89_DECL_RFK_WM(0x7620, 0xffffffff, 0x08081808), + RTW89_DECL_RFK_WM(0x7624, 0xffffffff, 0x08080808), + RTW89_DECL_RFK_WM(0x7628, 0xffffffff, 0x08080808), + RTW89_DECL_RFK_WM(0x762c, 0x0000ffff, 0x0808), + RTW89_DECL_RFK_WM(0x781c, 0x00100000, 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_slope_b_defs_5g); + +static const struct rtw89_reg5_def rtw8852bt_tssi_align_a_2g_all_defs[] = { + RTW89_DECL_RFK_WM(0x5604, 0x80000000, 0x1), + RTW89_DECL_RFK_WM(0x5600, 0x3fffffff, 0x3f2d2721), + RTW89_DECL_RFK_WM(0x5604, 0x003fffff, 0x010101), + RTW89_DECL_RFK_WM(0x5630, 0x3fffffff, 0x029f57c0), + RTW89_DECL_RFK_WM(0x5634, 0x3fffffff, 0x00000077), + RTW89_DECL_RFK_WM(0x5638, 0x000fffff, 0x00000), + RTW89_DECL_RFK_WM(0x563c, 0x3fffffff, 0x029f5bc0), + RTW89_DECL_RFK_WM(0x5640, 0x3fffffff, 0x00000076), + RTW89_DECL_RFK_WM(0x5644, 0x000fffff, 0x00000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_align_a_2g_all_defs); + +static const struct rtw89_reg5_def rtw8852bt_tssi_align_a_5g1_all_defs[] = { + RTW89_DECL_RFK_WM(0x5604, 0x80000000, 0x1), + RTW89_DECL_RFK_WM(0x5600, 0x3fffffff, 0x3f2d2721), + RTW89_DECL_RFK_WM(0x5604, 0x003fffff, 0x010101), + RTW89_DECL_RFK_WM(0x5630, 0x3fffffff, 0x007ff3d7), + RTW89_DECL_RFK_WM(0x5634, 0x3fffffff, 0x00000068), + RTW89_DECL_RFK_WM(0x5638, 0x000fffff, 0x00000), + RTW89_DECL_RFK_WM(0x563c, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5640, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5644, 0x000fffff, 0x00000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_align_a_5g1_all_defs); + +static const struct rtw89_reg5_def rtw8852bt_tssi_align_a_5g2_all_defs[] = { + RTW89_DECL_RFK_WM(0x5604, 0x80000000, 0x1), + RTW89_DECL_RFK_WM(0x5600, 0x3fffffff, 0x3f2d2721), + RTW89_DECL_RFK_WM(0x5604, 0x003fffff, 0x010101), + RTW89_DECL_RFK_WM(0x5630, 0x3fffffff, 0x00a003db), + RTW89_DECL_RFK_WM(0x5634, 0x3fffffff, 0x00000065), + RTW89_DECL_RFK_WM(0x5638, 0x000fffff, 0x00000), + RTW89_DECL_RFK_WM(0x563c, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5640, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5644, 0x000fffff, 0x00000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_align_a_5g2_all_defs); + +static const struct rtw89_reg5_def rtw8852bt_tssi_align_a_5g3_all_defs[] = { + RTW89_DECL_RFK_WM(0x5604, 0x80000000, 0x1), + RTW89_DECL_RFK_WM(0x5600, 0x3fffffff, 0x3f2d2721), + RTW89_DECL_RFK_WM(0x5604, 0x003fffff, 0x010101), + RTW89_DECL_RFK_WM(0x5630, 0x3fffffff, 0x01101be2), + RTW89_DECL_RFK_WM(0x5634, 0x3fffffff, 0x00000065), + RTW89_DECL_RFK_WM(0x5638, 0x000fffff, 0x00000), + RTW89_DECL_RFK_WM(0x563c, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5640, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x5644, 0x000fffff, 0x00000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_align_a_5g3_all_defs); + +static const struct rtw89_reg5_def rtw8852bt_tssi_align_b_2g_all_defs[] = { + RTW89_DECL_RFK_WM(0x7604, 0x80000000, 0x1), + RTW89_DECL_RFK_WM(0x7600, 0x3fffffff, 0x3f2d2721), + RTW89_DECL_RFK_WM(0x7604, 0x003fffff, 0x010101), + RTW89_DECL_RFK_WM(0x7630, 0x3fffffff, 0x023f3fb9), + RTW89_DECL_RFK_WM(0x7634, 0x3fffffff, 0x00000075), + RTW89_DECL_RFK_WM(0x7638, 0x000fffff, 0x00000), + RTW89_DECL_RFK_WM(0x763c, 0x3fffffff, 0x01df3fb8), + RTW89_DECL_RFK_WM(0x7640, 0x3fffffff, 0x00000074), + RTW89_DECL_RFK_WM(0x7644, 0x000fffff, 0x00000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_align_b_2g_all_defs); + +static const struct rtw89_reg5_def rtw8852bt_tssi_align_b_5g1_all_defs[] = { + RTW89_DECL_RFK_WM(0x7604, 0x80000000, 0x1), + RTW89_DECL_RFK_WM(0x7600, 0x3fffffff, 0x3f2d2721), + RTW89_DECL_RFK_WM(0x7604, 0x003fffff, 0x010101), + RTW89_DECL_RFK_WM(0x7630, 0x3fffffff, 0x010017e0), + RTW89_DECL_RFK_WM(0x7634, 0x3fffffff, 0x00000069), + RTW89_DECL_RFK_WM(0x7638, 0x000fffff, 0x00000), + RTW89_DECL_RFK_WM(0x763c, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7640, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7644, 0x000fffff, 0x00000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_align_b_5g1_all_defs); + +static const struct rtw89_reg5_def rtw8852bt_tssi_align_b_5g2_all_defs[] = { + RTW89_DECL_RFK_WM(0x7604, 0x80000000, 0x1), + RTW89_DECL_RFK_WM(0x7600, 0x3fffffff, 0x3f2d2721), + RTW89_DECL_RFK_WM(0x7604, 0x003fffff, 0x010101), + RTW89_DECL_RFK_WM(0x7630, 0x3fffffff, 0x01201fe2), + RTW89_DECL_RFK_WM(0x7634, 0x3fffffff, 0x00000066), + RTW89_DECL_RFK_WM(0x7638, 0x000fffff, 0x00000), + RTW89_DECL_RFK_WM(0x763c, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7640, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7644, 0x000fffff, 0x00000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_align_b_5g2_all_defs); + +static const struct rtw89_reg5_def rtw8852bt_tssi_align_b_5g3_all_defs[] = { + RTW89_DECL_RFK_WM(0x7604, 0x80000000, 0x1), + RTW89_DECL_RFK_WM(0x7600, 0x3fffffff, 0x3f2d2721), + RTW89_DECL_RFK_WM(0x7604, 0x003fffff, 0x010101), + RTW89_DECL_RFK_WM(0x7630, 0x3fffffff, 0x01602fe5), + RTW89_DECL_RFK_WM(0x7634, 0x3fffffff, 0x00000068), + RTW89_DECL_RFK_WM(0x7638, 0x000fffff, 0x00000), + RTW89_DECL_RFK_WM(0x763c, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7640, 0x3fffffff, 0x00000000), + RTW89_DECL_RFK_WM(0x7644, 0x000fffff, 0x00000), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_align_b_5g3_all_defs); + +static const struct rtw89_reg5_def rtw8852bt_tssi_slope_defs_a[] = { + RTW89_DECL_RFK_WM(0x5814, 0x00000800, 0x1), + RTW89_DECL_RFK_WM(0x581c, 0x20000000, 0x1), + RTW89_DECL_RFK_WM(0x5814, 0x20000000, 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_slope_defs_a); + +static const struct rtw89_reg5_def rtw8852bt_tssi_slope_defs_b[] = { + RTW89_DECL_RFK_WM(0x7814, 0x00000800, 0x1), + RTW89_DECL_RFK_WM(0x781c, 0x20000000, 0x1), + RTW89_DECL_RFK_WM(0x7814, 0x20000000, 0x1), +}; + +RTW89_DECLARE_RFK_TBL(rtw8852bt_tssi_slope_defs_b); diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk_table.h b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk_table.h new file mode 100644 index 000000000000..beb246237d17 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/rtw8852bt_rfk_table.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* Copyright(c) 2024 Realtek Corporation + */ + +#ifndef __RTW89_8852BT_RFK_TABLE_H__ +#define __RTW89_8852BT_RFK_TABLE_H__ + +#include "phy.h" + +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_sys_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_sys_a_defs_2g_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_sys_a_defs_5g_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_sys_b_defs_2g_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_sys_b_defs_5g_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_init_txpwr_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_init_txpwr_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_init_txpwr_he_tb_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_init_txpwr_he_tb_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_dck_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_dck_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_dac_gain_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_dac_gain_defs_b_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_slope_a_defs_2g_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_slope_a_defs_5g_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_slope_b_defs_2g_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_slope_b_defs_5g_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_align_a_2g_all_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_align_a_5g1_all_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_align_a_5g2_all_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_align_a_5g3_all_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_align_b_2g_all_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_align_b_5g1_all_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_align_b_5g2_all_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_align_b_5g3_all_defs_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_slope_defs_a_tbl; +extern const struct rtw89_rfk_tbl rtw8852bt_tssi_slope_defs_b_tbl; + +#endif diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c.c b/drivers/net/wireless/realtek/rtw89/rtw8852c.c index 3571b41786d7..193168dc7b6c 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c.c @@ -73,6 +73,10 @@ static const u32 rtw8852c_c2h_regs[RTW89_H2CREG_MAX] = { R_AX_C2HREG_DATA3_V1 }; +static const u32 rtw8852c_wow_wakeup_regs[RTW89_WOW_REASON_NUM] = { + R_AX_C2HREG_DATA3_V1 + 3, R_AX_DBG_WOW, +}; + static const struct rtw89_page_regs rtw8852c_page_regs = { .hci_fc_ctrl = R_AX_HCI_FC_CTRL_V1, .ch_page_ctrl = R_AX_CH_PAGE_CTRL_V1, @@ -2941,6 +2945,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = { .dig_table = NULL, .dig_regs = &rtw8852c_dig_regs, .tssi_dbw_table = &rtw89_8852c_tssi_dbw_table, + .support_macid_num = RTW89_MAX_MAC_ID_NUM, .support_chanctx_num = 2, .support_rnr = false, .support_bands = BIT(NL80211_BAND_2GHZ) | @@ -3006,7 +3011,7 @@ const struct rtw89_chip_info rtw8852c_chip_info = { .c2h_counter_reg = {R_AX_UDM1 + 1, B_AX_UDM1_HALMAC_C2H_ENQ_CNT_MASK >> 8}, .c2h_regs = rtw8852c_c2h_regs, .page_regs = &rtw8852c_page_regs, - .wow_reason_reg = R_AX_C2HREG_DATA3_V1 + 3, + .wow_reason_reg = rtw8852c_wow_wakeup_regs, .cfo_src_fd = false, .cfo_hw_comp = false, .dcfo_comp = &rtw8852c_dcfo_comp, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c index 654e3e5507cb..743f7014bf3e 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852c_rfk.c @@ -4070,12 +4070,11 @@ void rtw8852c_set_channel_rf(struct rtw89_dev *rtwdev, void rtw8852c_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_idx) { struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; - DECLARE_BITMAP(map, RTW89_IQK_CHS_NR) = {}; + struct rtw89_rfk_chan_desc desc[__RTW89_RFK_CHS_NR_V0] = {}; const struct rtw89_chan *chan; enum rtw89_entity_mode mode; u8 chan_idx; u8 idx; - u8 i; mode = rtw89_get_entity_mode(rtwdev); switch (mode) { @@ -4087,34 +4086,21 @@ void rtw8852c_mcc_get_ch_info(struct rtw89_dev *rtwdev, enum rtw89_phy_idx phy_i break; } - for (i = 0; i <= chan_idx; i++) { - chan = rtw89_chan_get(rtwdev, i); + chan = rtw89_chan_get(rtwdev, chan_idx); - for (idx = 0; idx < RTW89_IQK_CHS_NR; idx++) { - if (rfk_mcc->ch[idx] == chan->channel && - rfk_mcc->band[idx] == chan->band_type) { - if (i != chan_idx) { - set_bit(idx, map); - break; - } + for (idx = 0; idx < ARRAY_SIZE(desc); idx++) { + struct rtw89_rfk_chan_desc *p = &desc[idx]; - goto bottom; - } - } + p->ch = rfk_mcc->ch[idx]; + + p->has_band = true; + p->band = rfk_mcc->band[idx]; } - idx = find_first_zero_bit(map, RTW89_IQK_CHS_NR); - if (idx == RTW89_IQK_CHS_NR) { - rtw89_debug(rtwdev, RTW89_DBG_RFK, - "%s: no empty rfk table; force replace the first\n", - __func__); - idx = 0; - } + idx = rtw89_rfk_chan_lookup(rtwdev, desc, ARRAY_SIZE(desc), chan); rfk_mcc->ch[idx] = chan->channel; rfk_mcc->band[idx] = chan->band_type; - -bottom: rfk_mcc->table_idx = idx; } diff --git a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c index e07c7f3ade41..8aaad7d58c0d 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8852ce.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8852ce.c @@ -55,6 +55,7 @@ static const struct rtw89_pci_info rtw8852c_pci_info = { .rpwm_addr = R_AX_PCIE_HRPWM_V1, .cpwm_addr = R_AX_PCIE_CRPWM, .mit_addr = R_AX_INT_MIT_RX_V1, + .wp_sel_addr = R_AX_WP_ADDR_H_SEL0_3, .tx_dma_ch_mask = 0, .bd_idx_addr_low_power = &rtw8852c_bd_idx_addr_low_power, .dma_addr_set = &rtw89_pci_ch_dma_addr_set_v1, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a.c b/drivers/net/wireless/realtek/rtw89/rtw8922a.c index 3b3ea3a7c19a..2af568a3264d 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8922a.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8922a.c @@ -85,6 +85,10 @@ static const u32 rtw8922a_c2h_regs[RTW89_H2CREG_MAX] = { R_BE_C2HREG_DATA3 }; +static const u32 rtw8922a_wow_wakeup_regs[RTW89_WOW_REASON_NUM] = { + R_AX_C2HREG_DATA3_V1 + 3, R_BE_DBG_WOW, +}; + static const struct rtw89_page_regs rtw8922a_page_regs = { .hci_fc_ctrl = R_BE_HCI_FC_CTRL, .ch_page_ctrl = R_BE_CH_PAGE_CTRL, @@ -2544,6 +2548,7 @@ const struct rtw89_chip_info rtw8922a_chip_info = { .dig_table = NULL, .dig_regs = &rtw8922a_dig_regs, .tssi_dbw_table = NULL, + .support_macid_num = 32, .support_chanctx_num = 2, .support_rnr = true, .support_bands = BIT(NL80211_BAND_2GHZ) | @@ -2608,7 +2613,7 @@ const struct rtw89_chip_info rtw8922a_chip_info = { .c2h_counter_reg = {R_BE_UDM1 + 1, B_BE_UDM1_HALMAC_C2H_ENQ_CNT_MASK >> 8}, .c2h_regs = rtw8922a_c2h_regs, .page_regs = &rtw8922a_page_regs, - .wow_reason_reg = R_AX_C2HREG_DATA3_V1 + 3, + .wow_reason_reg = rtw8922a_wow_wakeup_regs, .cfo_src_fd = true, .cfo_hw_comp = true, .dcfo_comp = NULL, diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c index 2a371829268c..0ebcb06ae848 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8922a_rfk.c @@ -255,6 +255,7 @@ static void rtw8922a_chlk_ktbl_sel(struct rtw89_dev *rtwdev, u8 kpath, u8 idx) static void rtw8922a_chlk_reload(struct rtw89_dev *rtwdev) { struct rtw89_rfk_mcc_info *rfk_mcc = &rtwdev->rfk_mcc; + struct rtw89_rfk_chan_desc desc[__RTW89_RFK_CHS_NR_V1] = {}; enum rtw89_sub_entity_idx sub_entity_idx; const struct rtw89_chan *chan; enum rtw89_entity_mode mode; @@ -265,16 +266,28 @@ static void rtw8922a_chlk_reload(struct rtw89_dev *rtwdev) switch (mode) { case RTW89_ENTITY_MODE_MCC_PREPARE: sub_entity_idx = RTW89_SUB_ENTITY_1; - tbl_sel = 1; break; default: sub_entity_idx = RTW89_SUB_ENTITY_0; - tbl_sel = 0; break; } chan = rtw89_chan_get(rtwdev, sub_entity_idx); + for (tbl_sel = 0; tbl_sel < ARRAY_SIZE(desc); tbl_sel++) { + struct rtw89_rfk_chan_desc *p = &desc[tbl_sel]; + + p->ch = rfk_mcc->ch[tbl_sel]; + + p->has_band = true; + p->band = rfk_mcc->band[tbl_sel]; + + p->has_bw = true; + p->bw = rfk_mcc->bw[tbl_sel]; + } + + tbl_sel = rtw89_rfk_chan_lookup(rtwdev, desc, ARRAY_SIZE(desc), chan); + rfk_mcc->ch[tbl_sel] = chan->channel; rfk_mcc->band[tbl_sel] = chan->band_type; rfk_mcc->bw[tbl_sel] = chan->band_width; diff --git a/drivers/net/wireless/realtek/rtw89/rtw8922ae.c b/drivers/net/wireless/realtek/rtw89/rtw8922ae.c index ce8aaa9501e1..47f855a7a268 100644 --- a/drivers/net/wireless/realtek/rtw89/rtw8922ae.c +++ b/drivers/net/wireless/realtek/rtw89/rtw8922ae.c @@ -46,6 +46,7 @@ static const struct rtw89_pci_info rtw8922a_pci_info = { .rpwm_addr = R_BE_PCIE_HRPWM, .cpwm_addr = R_BE_PCIE_CRPWM, .mit_addr = R_BE_PCIE_MIT_CH_EN, + .wp_sel_addr = R_BE_WP_ADDR_H_SEL0_3_V1, .tx_dma_ch_mask = 0, .bd_idx_addr_low_power = NULL, .dma_addr_set = &rtw89_pci_ch_dma_addr_set_be, diff --git a/drivers/net/wireless/realtek/rtw89/ser.c b/drivers/net/wireless/realtek/rtw89/ser.c index 99896d85d2f8..5fc2faa9ba5a 100644 --- a/drivers/net/wireless/realtek/rtw89/ser.c +++ b/drivers/net/wireless/realtek/rtw89/ser.c @@ -308,9 +308,13 @@ static void ser_reset_vif(struct rtw89_dev *rtwdev, struct rtw89_vif *rtwvif) static void ser_sta_deinit_cam_iter(void *data, struct ieee80211_sta *sta) { - struct rtw89_vif *rtwvif = (struct rtw89_vif *)data; - struct rtw89_dev *rtwdev = rtwvif->rtwdev; + struct rtw89_vif *target_rtwvif = (struct rtw89_vif *)data; struct rtw89_sta *rtwsta = (struct rtw89_sta *)sta->drv_priv; + struct rtw89_vif *rtwvif = rtwsta->rtwvif; + struct rtw89_dev *rtwdev = rtwvif->rtwdev; + + if (rtwvif != target_rtwvif) + return; if (rtwvif->net_type == RTW89_NET_TYPE_AP_MODE || sta->tdls) rtw89_cam_deinit_addr_cam(rtwdev, &rtwsta->addr_cam); diff --git a/drivers/net/wireless/realtek/rtw89/txrx.h b/drivers/net/wireless/realtek/rtw89/txrx.h index c467a80ffa88..3882938c0893 100644 --- a/drivers/net/wireless/realtek/rtw89/txrx.h +++ b/drivers/net/wireless/realtek/rtw89/txrx.h @@ -113,6 +113,8 @@ static inline u8 rtw89_get_data_nss(struct rtw89_dev *rtwdev, u16 hw_rate) #define RTW89_TXWD_INFO0_GI_LTF GENMASK(27, 25) #define RTW89_TXWD_INFO0_DATA_RATE GENMASK(24, 16) #define RTW89_TXWD_INFO0_DATA_ER BIT(15) +#define RTW89_TXWD_INFO0_DATA_STBC BIT(12) +#define RTW89_TXWD_INFO0_DATA_LDPC BIT(11) #define RTW89_TXWD_INFO0_DISDATAFB BIT(10) #define RTW89_TXWD_INFO0_DATA_BW_ER BIT(8) #define RTW89_TXWD_INFO0_MULTIPORT_ID GENMASK(6, 4) @@ -556,6 +558,8 @@ struct rtw89_phy_sts_ie0 { #define RTW89_PHY_STS_IE01_W2_AVG_SNR GENMASK(5, 0) #define RTW89_PHY_STS_IE01_W2_EVM_MAX GENMASK(15, 8) #define RTW89_PHY_STS_IE01_W2_EVM_MIN GENMASK(23, 16) +#define RTW89_PHY_STS_IE01_W2_LDPC BIT(28) +#define RTW89_PHY_STS_IE01_W2_STBC BIT(30) enum rtw89_tx_channel { RTW89_TXCH_ACH0 = 0, diff --git a/drivers/net/wireless/realtek/rtw89/util.c b/drivers/net/wireless/realtek/rtw89/util.c new file mode 100644 index 000000000000..e71956ce9853 --- /dev/null +++ b/drivers/net/wireless/realtek/rtw89/util.c @@ -0,0 +1,106 @@ +// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +/* Copyright(c) 2024 Realtek Corporation + */ + +#include "util.h" + +#define FRAC_ROWS 3 +#define FRAC_ROW_MAX (FRAC_ROWS - 1) +#define NORM_ROW_MIN FRAC_ROWS + +static const u32 db_invert_table[12][8] = { + /* rows 0~2 in unit of U(32,3) */ + {10, 13, 16, 20, 25, 32, 40, 50}, + {64, 80, 101, 128, 160, 201, 256, 318}, + {401, 505, 635, 800, 1007, 1268, 1596, 2010}, + /* rows 3~11 in unit of U(32,0) */ + {316, 398, 501, 631, 794, 1000, 1259, 1585}, + {1995, 2512, 3162, 3981, 5012, 6310, 7943, 10000}, + {12589, 15849, 19953, 25119, 31623, 39811, 50119, 63098}, + {79433, 100000, 125893, 158489, 199526, 251189, 316228, 398107}, + {501187, 630957, 794328, 1000000, 1258925, 1584893, 1995262, 2511886}, + {3162278, 3981072, 5011872, 6309573, 7943282, 1000000, 12589254, + 15848932}, + {19952623, 25118864, 31622777, 39810717, 50118723, 63095734, 79432823, + 100000000}, + {125892541, 158489319, 199526232, 251188643, 316227766, 398107171, + 501187234, 630957345}, + {794328235, 1000000000, 1258925412, 1584893192, 1995262315, 2511886432U, + 3162277660U, 3981071706U}, +}; + +u32 rtw89_linear_2_db(u64 val) +{ + u8 i, j; + u32 dB; + + for (i = 0; i < 12; i++) { + for (j = 0; j < 8; j++) { + if (i <= FRAC_ROW_MAX && + (val << RTW89_LINEAR_FRAC_BITS) <= db_invert_table[i][j]) + goto cnt; + else if (i > FRAC_ROW_MAX && val <= db_invert_table[i][j]) + goto cnt; + } + } + + return 96; /* maximum 96 dB */ + +cnt: + /* special cases */ + if (j == 0 && i == 0) + goto end; + + if (i == NORM_ROW_MIN && j == 0) { + if (db_invert_table[NORM_ROW_MIN][0] - val > + val - (db_invert_table[FRAC_ROW_MAX][7] >> RTW89_LINEAR_FRAC_BITS)) { + i = FRAC_ROW_MAX; + j = 7; + } + goto end; + } + + if (i <= FRAC_ROW_MAX) + val <<= RTW89_LINEAR_FRAC_BITS; + + /* compare difference to get precise dB */ + if (j == 0) { + if (db_invert_table[i][j] - val > + val - db_invert_table[i - 1][7]) { + i--; + j = 7; + } + } else { + if (db_invert_table[i][j] - val > + val - db_invert_table[i][j - 1]) { + j--; + } + } +end: + dB = (i << 3) + j + 1; + + return dB; +} +EXPORT_SYMBOL(rtw89_linear_2_db); + +u64 rtw89_db_2_linear(u32 db) +{ + u64 linear; + u8 i, j; + + if (db > 96) + db = 96; + else if (db < 1) + return 1; + + i = (db - 1) >> 3; + j = (db - 1) & 0x7; + + linear = db_invert_table[i][j]; + + if (i >= NORM_ROW_MIN) + linear = linear << RTW89_LINEAR_FRAC_BITS; + + return linear; +} +EXPORT_SYMBOL(rtw89_db_2_linear); diff --git a/drivers/net/wireless/realtek/rtw89/util.h b/drivers/net/wireless/realtek/rtw89/util.h index e2ed4565025d..e82e7df052d8 100644 --- a/drivers/net/wireless/realtek/rtw89/util.h +++ b/drivers/net/wireless/realtek/rtw89/util.h @@ -6,6 +6,8 @@ #include "core.h" +#define RTW89_LINEAR_FRAC_BITS 3 + #define rtw89_iterate_vifs_bh(rtwdev, iterator, data) \ ieee80211_iterate_active_interfaces_atomic((rtwdev)->hw, \ IEEE80211_IFACE_ITER_NORMAL, iterator, data) @@ -55,4 +57,7 @@ static inline void ether_addr_copy_mask(u8 *dst, const u8 *src, u8 mask) } } +u32 rtw89_linear_2_db(u64 linear); +u64 rtw89_db_2_linear(u32 db); + #endif diff --git a/drivers/net/wireless/realtek/rtw89/wow.c b/drivers/net/wireless/realtek/rtw89/wow.c index fa61484c3839..9882064ef68d 100644 --- a/drivers/net/wireless/realtek/rtw89/wow.c +++ b/drivers/net/wireless/realtek/rtw89/wow.c @@ -27,17 +27,23 @@ void rtw89_wow_parse_akm(struct rtw89_dev *rtwdev, struct sk_buff *skb) rtw_wow->akm = rsn_ie->akm_cipher_suite.type; } +#define RTW89_CIPHER_INFO_DEF(cipher) \ + {WLAN_CIPHER_SUITE_ ## cipher, .fw_alg = RTW89_WOW_FW_ALG_ ## cipher, \ + .len = WLAN_KEY_LEN_ ## cipher} + static const struct rtw89_cipher_info rtw89_cipher_info_defs[] = { - {WLAN_CIPHER_SUITE_WEP40, .fw_alg = 1, .len = WLAN_KEY_LEN_WEP40,}, - {WLAN_CIPHER_SUITE_WEP104, .fw_alg = 2, .len = WLAN_KEY_LEN_WEP104,}, - {WLAN_CIPHER_SUITE_TKIP, .fw_alg = 3, .len = WLAN_KEY_LEN_TKIP,}, - {WLAN_CIPHER_SUITE_CCMP, .fw_alg = 6, .len = WLAN_KEY_LEN_CCMP,}, - {WLAN_CIPHER_SUITE_GCMP, .fw_alg = 8, .len = WLAN_KEY_LEN_GCMP,}, - {WLAN_CIPHER_SUITE_CCMP_256, .fw_alg = 7, .len = WLAN_KEY_LEN_CCMP_256,}, - {WLAN_CIPHER_SUITE_GCMP_256, .fw_alg = 23, .len = WLAN_KEY_LEN_GCMP_256,}, - {WLAN_CIPHER_SUITE_AES_CMAC, .fw_alg = 32, .len = WLAN_KEY_LEN_AES_CMAC,}, + RTW89_CIPHER_INFO_DEF(WEP40), + RTW89_CIPHER_INFO_DEF(WEP104), + RTW89_CIPHER_INFO_DEF(TKIP), + RTW89_CIPHER_INFO_DEF(CCMP), + RTW89_CIPHER_INFO_DEF(GCMP), + RTW89_CIPHER_INFO_DEF(CCMP_256), + RTW89_CIPHER_INFO_DEF(GCMP_256), + RTW89_CIPHER_INFO_DEF(AES_CMAC), }; +#undef RTW89_CIPHER_INFO_DEF + static const struct rtw89_cipher_info *rtw89_cipher_alg_recognize(u32 cipher) { @@ -717,13 +723,18 @@ static void rtw89_wow_show_wakeup_reason(struct rtw89_dev *rtwdev) { struct rtw89_wow_param *rtw_wow = &rtwdev->wow; struct rtw89_wow_aoac_report *aoac_rpt = &rtw_wow->aoac_rpt; - u32 wow_reason_reg = rtwdev->chip->wow_reason_reg; struct cfg80211_wowlan_nd_info nd_info; struct cfg80211_wowlan_wakeup wakeup = { .pattern_idx = -1, }; + u32 wow_reason_reg; u8 reason; + if (RTW89_CHK_FW_FEATURE(WOW_REASON_V1, &rtwdev->fw)) + wow_reason_reg = rtwdev->chip->wow_reason_reg[RTW89_WOW_REASON_V1]; + else + wow_reason_reg = rtwdev->chip->wow_reason_reg[RTW89_WOW_REASON_V0]; + reason = rtw89_read8(rtwdev, wow_reason_reg); switch (reason) { case RTW89_WOW_RSN_RX_DEAUTH: @@ -1284,12 +1295,16 @@ static int rtw89_wow_disable_trx_pre(struct rtw89_dev *rtwdev) static int rtw89_wow_disable_trx_post(struct rtw89_dev *rtwdev) { + struct rtw89_wow_param *rtw_wow = &rtwdev->wow; + struct ieee80211_vif *vif = rtw_wow->wow_vif; int ret; ret = rtw89_mac_cfg_ppdu_status(rtwdev, RTW89_MAC_0, true); if (ret) rtw89_err(rtwdev, "cfg ppdu status\n"); + rtw89_fw_h2c_set_bcn_fltr_cfg(rtwdev, vif, true); + return ret; } diff --git a/drivers/net/wireless/realtek/rtw89/wow.h b/drivers/net/wireless/realtek/rtw89/wow.h index e595aee0196d..0d90add0e88d 100644 --- a/drivers/net/wireless/realtek/rtw89/wow.h +++ b/drivers/net/wireless/realtek/rtw89/wow.h @@ -35,6 +35,17 @@ enum rtw89_wake_reason { RTW89_WOW_RSN_RX_NLO = 0x55, }; +enum rtw89_fw_alg { + RTW89_WOW_FW_ALG_WEP40 = 0x1, + RTW89_WOW_FW_ALG_WEP104 = 0x2, + RTW89_WOW_FW_ALG_TKIP = 0x3, + RTW89_WOW_FW_ALG_CCMP = 0x6, + RTW89_WOW_FW_ALG_CCMP_256 = 0x7, + RTW89_WOW_FW_ALG_GCMP = 0x8, + RTW89_WOW_FW_ALG_GCMP_256 = 0x9, + RTW89_WOW_FW_ALG_AES_CMAC = 0xa, +}; + struct rtw89_cipher_suite { u8 oui[3]; u8 type; @@ -64,6 +75,25 @@ struct rtw89_set_key_info_iter_data { bool error; }; +static inline int rtw89_wow_get_sec_hdr_len(struct rtw89_dev *rtwdev) +{ + struct rtw89_wow_param *rtw_wow = &rtwdev->wow; + + if (!(rtwdev->chip->chip_id == RTL8852A || rtw89_is_rtl885xb(rtwdev))) + return 0; + + switch (rtw_wow->ptk_alg) { + case RTW89_WOW_FW_ALG_WEP40: + return 4; + case RTW89_WOW_FW_ALG_TKIP: + case RTW89_WOW_FW_ALG_CCMP: + case RTW89_WOW_FW_ALG_GCMP_256: + return 8; + default: + return 0; + } +} + #ifdef CONFIG_PM int rtw89_wow_suspend(struct rtw89_dev *rtwdev, struct cfg80211_wowlan *wowlan); int rtw89_wow_resume(struct rtw89_dev *rtwdev); diff --git a/drivers/net/wireless/rsi/rsi_91x_mac80211.c b/drivers/net/wireless/rsi/rsi_91x_mac80211.c index 211fa25b9a78..3425a473b9a1 100644 --- a/drivers/net/wireless/rsi/rsi_91x_mac80211.c +++ b/drivers/net/wireless/rsi/rsi_91x_mac80211.c @@ -410,10 +410,11 @@ static int rsi_mac80211_start(struct ieee80211_hw *hw) /** * rsi_mac80211_stop() - This is the last handler that 802.11 module calls. * @hw: Pointer to the ieee80211_hw structure. + * @suspend: true if the this was called from suspend flow. * * Return: None. */ -static void rsi_mac80211_stop(struct ieee80211_hw *hw) +static void rsi_mac80211_stop(struct ieee80211_hw *hw, bool suspend) { struct rsi_hw *adapter = hw->priv; struct rsi_common *common = adapter->priv; diff --git a/drivers/net/wireless/silabs/wfx/sta.c b/drivers/net/wireless/silabs/wfx/sta.c index a904602f02ce..216d43c8bd6e 100644 --- a/drivers/net/wireless/silabs/wfx/sta.c +++ b/drivers/net/wireless/silabs/wfx/sta.c @@ -805,7 +805,7 @@ int wfx_start(struct ieee80211_hw *hw) return 0; } -void wfx_stop(struct ieee80211_hw *hw) +void wfx_stop(struct ieee80211_hw *hw, bool suspend) { struct wfx_dev *wdev = hw->priv; diff --git a/drivers/net/wireless/silabs/wfx/sta.h b/drivers/net/wireless/silabs/wfx/sta.h index c478ddcb934b..7817c7c6f3dd 100644 --- a/drivers/net/wireless/silabs/wfx/sta.h +++ b/drivers/net/wireless/silabs/wfx/sta.h @@ -20,7 +20,7 @@ struct wfx_sta_priv { /* mac80211 interface */ int wfx_start(struct ieee80211_hw *hw); -void wfx_stop(struct ieee80211_hw *hw); +void wfx_stop(struct ieee80211_hw *hw, bool suspend); int wfx_config(struct ieee80211_hw *hw, u32 changed); int wfx_set_rts_threshold(struct ieee80211_hw *hw, u32 value); void wfx_set_default_unicast_key(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int idx); diff --git a/drivers/net/wireless/st/cw1200/sta.c b/drivers/net/wireless/st/cw1200/sta.c index 8ef1d06b9bbd..c259da8161e4 100644 --- a/drivers/net/wireless/st/cw1200/sta.c +++ b/drivers/net/wireless/st/cw1200/sta.c @@ -90,7 +90,7 @@ out: return ret; } -void cw1200_stop(struct ieee80211_hw *dev) +void cw1200_stop(struct ieee80211_hw *dev, bool suspend) { struct cw1200_common *priv = dev->priv; LIST_HEAD(list); diff --git a/drivers/net/wireless/st/cw1200/sta.h b/drivers/net/wireless/st/cw1200/sta.h index a49f187c7049..b955b92cfd73 100644 --- a/drivers/net/wireless/st/cw1200/sta.h +++ b/drivers/net/wireless/st/cw1200/sta.h @@ -13,7 +13,7 @@ /* mac80211 API */ int cw1200_start(struct ieee80211_hw *dev); -void cw1200_stop(struct ieee80211_hw *dev); +void cw1200_stop(struct ieee80211_hw *dev, bool suspend); int cw1200_add_interface(struct ieee80211_hw *dev, struct ieee80211_vif *vif); void cw1200_remove_interface(struct ieee80211_hw *dev, diff --git a/drivers/net/wireless/ti/wl1251/main.c b/drivers/net/wireless/ti/wl1251/main.c index 0da2d29dd7bd..bb53d681c11b 100644 --- a/drivers/net/wireless/ti/wl1251/main.c +++ b/drivers/net/wireless/ti/wl1251/main.c @@ -415,7 +415,7 @@ out: return ret; } -static void wl1251_op_stop(struct ieee80211_hw *hw) +static void wl1251_op_stop(struct ieee80211_hw *hw, bool suspend) { struct wl1251 *wl = hw->priv; diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c index 2ccac1cdec01..39d8eebb9b6e 100644 --- a/drivers/net/wireless/ti/wl18xx/main.c +++ b/drivers/net/wireless/ti/wl18xx/main.c @@ -1177,8 +1177,49 @@ static int wl18xx_hw_init(struct wl1271 *wl) return ret; } -static void wl18xx_convert_fw_status(struct wl1271 *wl, void *raw_fw_status, - struct wl_fw_status *fw_status) +static void wl18xx_convert_fw_status_8_9_1(struct wl1271 *wl, + void *raw_fw_status, + struct wl_fw_status *fw_status) +{ + struct wl18xx_fw_status_8_9_1 *int_fw_status = raw_fw_status; + + fw_status->intr = le32_to_cpu(int_fw_status->intr); + fw_status->fw_rx_counter = int_fw_status->fw_rx_counter; + fw_status->drv_rx_counter = int_fw_status->drv_rx_counter; + fw_status->tx_results_counter = int_fw_status->tx_results_counter; + fw_status->rx_pkt_descs = int_fw_status->rx_pkt_descs; + + fw_status->fw_localtime = le32_to_cpu(int_fw_status->fw_localtime); + fw_status->link_ps_bitmap = le32_to_cpu(int_fw_status->link_ps_bitmap); + fw_status->link_fast_bitmap = + le32_to_cpu(int_fw_status->link_fast_bitmap); + fw_status->total_released_blks = + le32_to_cpu(int_fw_status->total_released_blks); + fw_status->tx_total = le32_to_cpu(int_fw_status->tx_total); + + fw_status->counters.tx_released_pkts = + int_fw_status->counters.tx_released_pkts; + fw_status->counters.tx_lnk_free_pkts = + int_fw_status->counters.tx_lnk_free_pkts; + fw_status->counters.tx_lnk_sec_pn16 = + int_fw_status->counters.tx_lnk_sec_pn16; + fw_status->counters.tx_voice_released_blks = + int_fw_status->counters.tx_voice_released_blks; + fw_status->counters.tx_last_rate = + int_fw_status->counters.tx_last_rate; + fw_status->counters.tx_last_rate_mbps = + int_fw_status->counters.tx_last_rate_mbps; + fw_status->counters.hlid = + int_fw_status->counters.hlid; + + fw_status->log_start_addr = le32_to_cpu(int_fw_status->log_start_addr); + + fw_status->priv = &int_fw_status->priv; +} + +static void wl18xx_convert_fw_status_8_9_0(struct wl1271 *wl, + void *raw_fw_status, + struct wl_fw_status *fw_status) { struct wl18xx_fw_status *int_fw_status = raw_fw_status; @@ -1214,6 +1255,15 @@ static void wl18xx_convert_fw_status(struct wl1271 *wl, void *raw_fw_status, fw_status->priv = &int_fw_status->priv; } +static void wl18xx_convert_fw_status(struct wl1271 *wl, void *raw_fw_status, + struct wl_fw_status *fw_status) +{ + if (wl->chip.fw_ver[FW_VER_MAJOR] == 0) + wl18xx_convert_fw_status_8_9_0(wl, raw_fw_status, fw_status); + else + wl18xx_convert_fw_status_8_9_1(wl, raw_fw_status, fw_status); +} + static void wl18xx_set_tx_desc_csum(struct wl1271 *wl, struct wl1271_tx_hw_descr *desc, struct sk_buff *skb) @@ -1515,12 +1565,29 @@ static int wl18xx_handle_static_data(struct wl1271 *wl, { struct wl18xx_static_data_priv *static_data_priv = (struct wl18xx_static_data_priv *) static_data->priv; + size_t fw_status_len; strscpy(wl->chip.phy_fw_ver_str, static_data_priv->phy_version, sizeof(wl->chip.phy_fw_ver_str)); wl1271_info("PHY firmware version: %s", static_data_priv->phy_version); + /* Adjust the firmware status size according to the firmware version */ + if (wl->chip.fw_ver[FW_VER_MAJOR] == 0) + fw_status_len = sizeof(struct wl18xx_fw_status); + else + fw_status_len = sizeof(struct wl18xx_fw_status_8_9_1); + + if (wl->fw_status_len != fw_status_len) { + void *new_status = krealloc(wl->raw_fw_status, fw_status_len, + GFP_KERNEL | __GFP_ZERO); + if (!new_status) + return -ENOMEM; + + wl->raw_fw_status = new_status; + wl->fw_status_len = fw_status_len; + } + return 0; } diff --git a/drivers/net/wireless/ti/wl18xx/tx.c b/drivers/net/wireless/ti/wl18xx/tx.c index 55d9b0861c53..beef393853ef 100644 --- a/drivers/net/wireless/ti/wl18xx/tx.c +++ b/drivers/net/wireless/ti/wl18xx/tx.c @@ -129,6 +129,14 @@ static void wl18xx_tx_complete_packet(struct wl1271 *wl, u8 tx_stat_byte) wl1271_free_tx_id(wl, id); } +static u8 wl18xx_next_tx_idx(u8 idx) +{ + if (++idx >= WL18XX_FW_MAX_TX_STATUS_DESC) + idx = 0; + + return idx; +} + void wl18xx_tx_immediate_complete(struct wl1271 *wl) { struct wl18xx_fw_status_priv *status_priv = @@ -161,9 +169,8 @@ void wl18xx_tx_immediate_complete(struct wl1271 *wl) return; } - for (i = priv->last_fw_rls_idx; - i != status_priv->fw_release_idx; - i = (i + 1) % WL18XX_FW_MAX_TX_STATUS_DESC) { + for (i = priv->last_fw_rls_idx; i != status_priv->fw_release_idx; + i = wl18xx_next_tx_idx(i)) { wl18xx_tx_complete_packet(wl, status_priv->released_tx_desc[i]); diff --git a/drivers/net/wireless/ti/wl18xx/wl18xx.h b/drivers/net/wireless/ti/wl18xx/wl18xx.h index b642e0c437bb..de6c671c4be6 100644 --- a/drivers/net/wireless/ti/wl18xx/wl18xx.h +++ b/drivers/net/wireless/ti/wl18xx/wl18xx.h @@ -13,7 +13,7 @@ /* minimum FW required for driver */ #define WL18XX_CHIP_VER 8 #define WL18XX_IFTYPE_VER 9 -#define WL18XX_MAJOR_VER WLCORE_FW_VER_IGNORE +#define WL18XX_MAJOR_VER 0 #define WL18XX_SUBTYPE_VER WLCORE_FW_VER_IGNORE #define WL18XX_MINOR_VER 58 @@ -155,6 +155,66 @@ struct wl18xx_fw_status { struct wl18xx_fw_status_priv priv; } __packed; +struct wl18xx_fw_packet_counters_8_9_1 { + /* Cumulative counter of released packets per AC */ + u8 tx_released_pkts[NUM_TX_QUEUES]; + + /* Cumulative counter of freed packets per HLID */ + u8 tx_lnk_free_pkts[WL18XX_MAX_LINKS]; + + /* PN16 of last TKIP/AES seq-num per HLID */ + __le16 tx_lnk_sec_pn16[WL18XX_MAX_LINKS]; + + /* Cumulative counter of released Voice memory blocks */ + u8 tx_voice_released_blks; + + /* Tx rate of the last transmitted packet */ + u8 tx_last_rate; + + /* Tx rate or Tx rate estimate pre-calculated by fw in mbps units */ + u8 tx_last_rate_mbps; + + /* hlid for which the rates were reported */ + u8 hlid; +} __packed; + +/* FW status registers */ +struct wl18xx_fw_status_8_9_1 { + __le32 intr; + u8 fw_rx_counter; + u8 drv_rx_counter; + u8 reserved; + u8 tx_results_counter; + __le32 rx_pkt_descs[WL18XX_NUM_RX_DESCRIPTORS]; + + __le32 fw_localtime; + + /* + * A bitmap (where each bit represents a single HLID) + * to indicate if the station is in PS mode. + */ + __le32 link_ps_bitmap; + + /* + * A bitmap (where each bit represents a single HLID) to indicate + * if the station is in Fast mode + */ + __le32 link_fast_bitmap; + + /* Cumulative counter of total released mem blocks since FW-reset */ + __le32 total_released_blks; + + /* Size (in Memory Blocks) of TX pool */ + __le32 tx_total; + + struct wl18xx_fw_packet_counters_8_9_1 counters; + + __le32 log_start_addr; + + /* Private status to be used by the lower drivers */ + struct wl18xx_fw_status_priv priv; +} __packed; + #define WL18XX_PHY_VERSION_MAX_LEN 20 struct wl18xx_static_data_priv { diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c index 92fc2d456c2c..cd8ad0fe59cc 100644 --- a/drivers/net/wireless/ti/wlcore/cmd.c +++ b/drivers/net/wireless/ti/wlcore/cmd.c @@ -332,6 +332,14 @@ int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid) wl->fw_status->counters.tx_lnk_free_pkts[link]; wl->links[link].wlvif = wlvif; + /* + * Take the last sec_pn16 value from the current FW status. On recovery, + * we might not have fw_status yet, and tx_lnk_sec_pn16[] will be NULL. + */ + if (wl->fw_status->counters.tx_lnk_sec_pn16) + wl->links[link].prev_sec_pn16 = + le16_to_cpu(wl->fw_status->counters.tx_lnk_sec_pn16[link]); + /* * Take saved value for total freed packets from wlvif, in case this is * recovery/resume @@ -360,6 +368,7 @@ void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid) wl->links[*hlid].allocated_pkts = 0; wl->links[*hlid].prev_freed_pkts = 0; + wl->links[*hlid].prev_sec_pn16 = 0; wl->links[*hlid].ba_bitmap = 0; eth_zero_addr(wl->links[*hlid].addr); diff --git a/drivers/net/wireless/ti/wlcore/event.c b/drivers/net/wireless/ti/wlcore/event.c index 2499dc908305..6c3a8ea9613e 100644 --- a/drivers/net/wireless/ti/wlcore/event.c +++ b/drivers/net/wireless/ti/wlcore/event.c @@ -83,7 +83,7 @@ int wlcore_event_fw_logger(struct wl1271 *wl) /* Copy initial part up to the end of ring buffer */ len = min(actual_len, available_len); wl12xx_copy_fwlog(wl, &buffer[start_loc], len); - clear_ptr = addr_ptr + start_loc + actual_len; + clear_ptr = addr_ptr + start_loc + len; if (clear_ptr == buff_end_ptr) clear_ptr = buff_start_ptr; diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 492cd7aef44f..0c77b8524160 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -379,6 +379,8 @@ static void wl12xx_irq_update_links_status(struct wl1271 *wl, static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status) { + struct wl12xx_vif *wlvifsta; + struct wl12xx_vif *wlvifap; struct wl12xx_vif *wlvif; u32 old_tx_blk_count = wl->tx_blocks_available; int avail, freed_blocks; @@ -392,7 +394,7 @@ static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status) if (ret < 0) return ret; - wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status); + wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, status); wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, " "drv_rx_counter = %d, tx_results_counter = %d)", @@ -410,23 +412,100 @@ static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status) wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i]; } + /* Find an authorized STA vif */ + wlvifsta = NULL; + wl12xx_for_each_wlvif_sta(wl, wlvif) { + if (wlvif->sta.hlid != WL12XX_INVALID_LINK_ID && + test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags)) { + wlvifsta = wlvif; + break; + } + } + + /* Find a started AP vif */ + wlvifap = NULL; + wl12xx_for_each_wlvif(wl, wlvif) { + if (wlvif->bss_type == BSS_TYPE_AP_BSS && + wlvif->inconn_count == 0 && + test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) { + wlvifap = wlvif; + break; + } + } for_each_set_bit(i, wl->links_map, wl->num_links) { - u8 diff; + u16 diff16, sec_pn16; + u8 diff, tx_lnk_free_pkts; + lnk = &wl->links[i]; /* prevent wrap-around in freed-packets counter */ - diff = (status->counters.tx_lnk_free_pkts[i] - - lnk->prev_freed_pkts) & 0xff; + tx_lnk_free_pkts = status->counters.tx_lnk_free_pkts[i]; + diff = (tx_lnk_free_pkts - lnk->prev_freed_pkts) & 0xff; - if (diff == 0) + if (diff) { + lnk->allocated_pkts -= diff; + lnk->prev_freed_pkts = tx_lnk_free_pkts; + } + + /* Get the current sec_pn16 value if present */ + if (status->counters.tx_lnk_sec_pn16) + sec_pn16 = __le16_to_cpu(status->counters.tx_lnk_sec_pn16[i]); + else + sec_pn16 = 0; + /* prevent wrap-around in pn16 counter */ + diff16 = (sec_pn16 - lnk->prev_sec_pn16) & 0xffff; + + /* FIXME: since free_pkts is a 8-bit counter of packets that + * rolls over, it can become zero. If it is zero, then we + * omit processing below. Is that really correct? + */ + if (tx_lnk_free_pkts <= 0) continue; - lnk->allocated_pkts -= diff; - lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i]; + /* For a station that has an authorized link: */ + if (wlvifsta && wlvifsta->sta.hlid == i) { + if (wlvifsta->encryption_type == KEY_TKIP || + wlvifsta->encryption_type == KEY_AES) { + if (diff16) { + lnk->prev_sec_pn16 = sec_pn16; + /* accumulate the prev_freed_pkts + * counter according to the PN from + * firmware + */ + lnk->total_freed_pkts += diff16; + } + } else { + if (diff) + /* accumulate the prev_freed_pkts + * counter according to the free packets + * count from firmware + */ + lnk->total_freed_pkts += diff; + } + } - /* accumulate the prev_freed_pkts counter */ - lnk->total_freed_pkts += diff; + /* For an AP that has been started */ + if (wlvifap && test_bit(i, wlvifap->ap.sta_hlid_map)) { + if (wlvifap->encryption_type == KEY_TKIP || + wlvifap->encryption_type == KEY_AES) { + if (diff16) { + lnk->prev_sec_pn16 = sec_pn16; + /* accumulate the prev_freed_pkts + * counter according to the PN from + * firmware + */ + lnk->total_freed_pkts += diff16; + } + } else { + if (diff) + /* accumulate the prev_freed_pkts + * counter according to the free packets + * count from firmware + */ + lnk->total_freed_pkts += diff; + } + } } /* prevent wrap-around in total blocks counter */ @@ -2006,7 +2085,7 @@ static void wlcore_op_stop_locked(struct wl1271 *wl) memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last)); } -static void wlcore_op_stop(struct ieee80211_hw *hw) +static void wlcore_op_stop(struct ieee80211_hw *hw, bool suspend) { struct wl1271 *wl = hw->priv; @@ -3537,6 +3616,10 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd, return ret; } + /* Store AP encryption key type */ + if (wlvif->bss_type == BSS_TYPE_AP_BSS) + wlvif->encryption_type = key_type; + /* * reconfiguring arp response if the unicast (or common) * encryption key type was changed diff --git a/drivers/net/wireless/ti/wlcore/wlcore_i.h b/drivers/net/wireless/ti/wlcore/wlcore_i.h index 817a8a61cac6..5bdcb341629c 100644 --- a/drivers/net/wireless/ti/wlcore/wlcore_i.h +++ b/drivers/net/wireless/ti/wlcore/wlcore_i.h @@ -151,6 +151,9 @@ struct wl_fw_status { */ u8 *tx_lnk_free_pkts; + /* PN16 of last TKIP/AES seq-num per HLID */ + __le16 *tx_lnk_sec_pn16; + /* Cumulative counter of released Voice memory blocks */ u8 tx_voice_released_blks; @@ -259,6 +262,7 @@ struct wl1271_link { /* accounting for allocated / freed packets in FW */ u8 allocated_pkts; u8 prev_freed_pkts; + u16 prev_sec_pn16; u8 addr[ETH_ALEN]; diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.c b/drivers/net/wireless/virtual/mac80211_hwsim.c index c5d896994e70..714e1f04b0cb 100644 --- a/drivers/net/wireless/virtual/mac80211_hwsim.c +++ b/drivers/net/wireless/virtual/mac80211_hwsim.c @@ -69,6 +69,10 @@ static bool mlo; module_param(mlo, bool, 0444); MODULE_PARM_DESC(mlo, "Support MLO"); +static bool multi_radio; +module_param(multi_radio, bool, 0444); +MODULE_PARM_DESC(mlo, "Support Multiple Radios per wiphy"); + /** * enum hwsim_regtest - the type of regulatory tests we offer * @@ -669,6 +673,10 @@ struct mac80211_hwsim_data { struct ieee80211_iface_limit if_limits[3]; int n_if_limits; + struct ieee80211_iface_combination if_combination_radio; + struct wiphy_radio_freq_range radio_range[NUM_NL80211_BANDS]; + struct wiphy_radio radio[NUM_NL80211_BANDS]; + u32 ciphers[ARRAY_SIZE(hwsim_ciphers)]; struct mac_address addresses[2]; @@ -917,6 +925,7 @@ static const struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = { [HWSIM_ATTR_MLO_SUPPORT] = { .type = NLA_FLAG }, [HWSIM_ATTR_PMSR_SUPPORT] = NLA_POLICY_NESTED(hwsim_pmsr_capa_policy), [HWSIM_ATTR_PMSR_RESULT] = NLA_POLICY_NESTED(hwsim_pmsr_peers_result_policy), + [HWSIM_ATTR_MULTI_RADIO] = { .type = NLA_FLAG }, }; #if IS_REACHABLE(CONFIG_VIRTIO) @@ -2098,7 +2107,7 @@ static int mac80211_hwsim_start(struct ieee80211_hw *hw) } -static void mac80211_hwsim_stop(struct ieee80211_hw *hw) +static void mac80211_hwsim_stop(struct ieee80211_hw *hw, bool suspend) { struct mac80211_hwsim_data *data = hw->priv; int i; @@ -2361,6 +2370,7 @@ static const char * const hwsim_chanwidths[] = { [NL80211_CHAN_WIDTH_4] = "4MHz", [NL80211_CHAN_WIDTH_8] = "8MHz", [NL80211_CHAN_WIDTH_16] = "16MHz", + [NL80211_CHAN_WIDTH_320] = "eht320", }; static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed) @@ -3260,7 +3270,7 @@ static int mac80211_hwsim_switch_vif_chanctx(struct ieee80211_hw *hw, hwsim_clear_chanctx_magic(vifs[i].old_ctx); break; default: - WARN_ON("Invalid mode"); + WARN(1, "Invalid mode %d\n", mode); } } return 0; @@ -4017,6 +4027,7 @@ struct hwsim_new_radio_params { bool reg_strict; bool p2p_device; bool use_chanctx; + bool multi_radio; bool destroy_on_close; const char *hwname; bool no_vif; @@ -4093,6 +4104,12 @@ static int append_radio_msg(struct sk_buff *skb, int id, return ret; } + if (param->multi_radio) { + ret = nla_put_flag(skb, HWSIM_ATTR_MULTI_RADIO); + if (ret < 0) + return ret; + } + if (param->hwname) { ret = nla_put(skb, HWSIM_ATTR_RADIO_NAME, strlen(param->hwname), param->hwname); @@ -5113,6 +5130,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, struct net *net; int idx, i; int n_limits = 0; + int n_bands = 0; if (WARN_ON(param->channels > 1 && !param->use_chanctx)) return -EINVAL; @@ -5216,22 +5234,22 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, n_limits++; } + data->if_combination.radar_detect_widths = + BIT(NL80211_CHAN_WIDTH_5) | + BIT(NL80211_CHAN_WIDTH_10) | + BIT(NL80211_CHAN_WIDTH_20_NOHT) | + BIT(NL80211_CHAN_WIDTH_20) | + BIT(NL80211_CHAN_WIDTH_40) | + BIT(NL80211_CHAN_WIDTH_80) | + BIT(NL80211_CHAN_WIDTH_160); + if (data->use_chanctx) { hw->wiphy->max_scan_ssids = 255; hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; hw->wiphy->max_remain_on_channel_duration = 1000; - data->if_combination.radar_detect_widths = 0; data->if_combination.num_different_channels = data->channels; } else { data->if_combination.num_different_channels = 1; - data->if_combination.radar_detect_widths = - BIT(NL80211_CHAN_WIDTH_5) | - BIT(NL80211_CHAN_WIDTH_10) | - BIT(NL80211_CHAN_WIDTH_20_NOHT) | - BIT(NL80211_CHAN_WIDTH_20) | - BIT(NL80211_CHAN_WIDTH_40) | - BIT(NL80211_CHAN_WIDTH_80) | - BIT(NL80211_CHAN_WIDTH_160); } if (!n_limits) { @@ -5349,6 +5367,9 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { struct ieee80211_supported_band *sband = &data->bands[band]; + struct wiphy_radio_freq_range *radio_range; + const struct ieee80211_channel *c; + struct wiphy_radio *radio; sband->band = band; @@ -5422,8 +5443,36 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, mac80211_hwsim_sband_capab(sband); hw->wiphy->bands[band] = sband; + + if (!param->multi_radio) + continue; + + c = sband->channels; + radio_range = &data->radio_range[n_bands]; + radio_range->start_freq = ieee80211_channel_to_khz(c) - 10000; + + c += sband->n_channels - 1; + radio_range->end_freq = ieee80211_channel_to_khz(c) + 10000; + + radio = &data->radio[n_bands++]; + radio->freq_range = radio_range; + radio->n_freq_range = 1; + radio->iface_combinations = &data->if_combination_radio; + radio->n_iface_combinations = 1; } + if (param->multi_radio) { + hw->wiphy->radio = data->radio; + hw->wiphy->n_radio = n_bands; + + memcpy(&data->if_combination_radio, &data->if_combination, + sizeof(data->if_combination)); + data->if_combination.num_different_channels *= n_bands; + } + + if (data->use_chanctx) + data->if_combination.radar_detect_widths = 0; + /* By default all radios belong to the first group */ data->group = 1; mutex_init(&data->mutex); @@ -6041,6 +6090,9 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) else param.use_chanctx = (param.channels > 1); + if (info->attrs[HWSIM_ATTR_MULTI_RADIO]) + param.multi_radio = true; + if (info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2]) param.reg_alpha2 = nla_data(info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2]); @@ -6121,7 +6173,7 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) param.mlo = info->attrs[HWSIM_ATTR_MLO_SUPPORT]; - if (param.mlo) + if (param.mlo || param.multi_radio) param.use_chanctx = true; if (info->attrs[HWSIM_ATTR_RADIO_NAME]) { @@ -6814,7 +6866,8 @@ static int __init init_mac80211_hwsim(void) param.p2p_device = support_p2p_device; param.mlo = mlo; - param.use_chanctx = channels > 1 || mlo; + param.multi_radio = multi_radio; + param.use_chanctx = channels > 1 || mlo || multi_radio; param.iftypes = HWSIM_IFTYPE_SUPPORT_MASK; if (param.p2p_device) param.iftypes |= BIT(NL80211_IFTYPE_P2P_DEVICE); diff --git a/drivers/net/wireless/virtual/mac80211_hwsim.h b/drivers/net/wireless/virtual/mac80211_hwsim.h index 21b1afd83dc1..f32fc3a492b0 100644 --- a/drivers/net/wireless/virtual/mac80211_hwsim.h +++ b/drivers/net/wireless/virtual/mac80211_hwsim.h @@ -157,6 +157,9 @@ enum hwsim_commands { * to provide details about peer measurement request (nl80211_peer_measurement_attrs) * @HWSIM_ATTR_PMSR_RESULT: nested attributed used with %HWSIM_CMD_REPORT_PMSR * to provide peer measurement result (nl80211_peer_measurement_attrs) + * @HWSIM_ATTR_MULTI_RADIO: Register multiple wiphy radios (flag). + * Adds one radio for each band. Number of supported channels will be set for + * each radio instead of for the wiphy. * @__HWSIM_ATTR_MAX: enum limit */ enum hwsim_attrs { @@ -189,6 +192,7 @@ enum hwsim_attrs { HWSIM_ATTR_PMSR_SUPPORT, HWSIM_ATTR_PMSR_REQUEST, HWSIM_ATTR_PMSR_RESULT, + HWSIM_ATTR_MULTI_RADIO, __HWSIM_ATTR_MAX, }; #define HWSIM_ATTR_MAX (__HWSIM_ATTR_MAX - 1) @@ -257,7 +261,7 @@ enum hwsim_tx_rate_flags { }; /** - * struct hwsim_tx_rate - rate selection/status + * struct hwsim_tx_rate_flag - rate selection/status * * @idx: rate index to attempt to send with * @flags: the rate flags according to &enum hwsim_tx_rate_flags @@ -295,7 +299,7 @@ enum hwsim_vqs { }; /** - * enum hwsim_rate_info -- bitrate information. + * enum hwsim_rate_info_attributes - bitrate information. * * Information about a receiving or transmitting bitrate * that can be mapped to struct rate_info diff --git a/drivers/net/wireless/virtual/virt_wifi.c b/drivers/net/wireless/virtual/virt_wifi.c index 6a84ec58d618..4ee374080466 100644 --- a/drivers/net/wireless/virtual/virt_wifi.c +++ b/drivers/net/wireless/virtual/virt_wifi.c @@ -136,6 +136,9 @@ static struct ieee80211_supported_band band_5ghz = { /* Assigned at module init. Guaranteed locally-administered and unicast. */ static u8 fake_router_bssid[ETH_ALEN] __ro_after_init = {}; +#define VIRT_WIFI_SSID "VirtWifi" +#define VIRT_WIFI_SSID_LEN 8 + static void virt_wifi_inform_bss(struct wiphy *wiphy) { u64 tsf = div_u64(ktime_get_boottime_ns(), 1000); @@ -146,8 +149,8 @@ static void virt_wifi_inform_bss(struct wiphy *wiphy) u8 ssid[8]; } __packed ssid = { .tag = WLAN_EID_SSID, - .len = 8, - .ssid = "VirtWifi", + .len = VIRT_WIFI_SSID_LEN, + .ssid = VIRT_WIFI_SSID, }; informed_bss = cfg80211_inform_bss(wiphy, &channel_5ghz, @@ -213,6 +216,8 @@ struct virt_wifi_netdev_priv { struct net_device *upperdev; u32 tx_packets; u32 tx_failed; + u32 connect_requested_ssid_len; + u8 connect_requested_ssid[IEEE80211_MAX_SSID_LEN]; u8 connect_requested_bss[ETH_ALEN]; bool is_up; bool is_connected; @@ -229,6 +234,12 @@ static int virt_wifi_connect(struct wiphy *wiphy, struct net_device *netdev, if (priv->being_deleted || !priv->is_up) return -EBUSY; + if (!sme->ssid) + return -EINVAL; + + priv->connect_requested_ssid_len = sme->ssid_len; + memcpy(priv->connect_requested_ssid, sme->ssid, sme->ssid_len); + could_schedule = schedule_delayed_work(&priv->connect, HZ * 2); if (!could_schedule) return -EBUSY; @@ -252,12 +263,15 @@ static void virt_wifi_connect_complete(struct work_struct *work) container_of(work, struct virt_wifi_netdev_priv, connect.work); u8 *requested_bss = priv->connect_requested_bss; bool right_addr = ether_addr_equal(requested_bss, fake_router_bssid); + bool right_ssid = priv->connect_requested_ssid_len == VIRT_WIFI_SSID_LEN && + !memcmp(priv->connect_requested_ssid, VIRT_WIFI_SSID, + priv->connect_requested_ssid_len); u16 status = WLAN_STATUS_SUCCESS; if (is_zero_ether_addr(requested_bss)) requested_bss = NULL; - if (!priv->is_up || (requested_bss && !right_addr)) + if (!priv->is_up || (requested_bss && !right_addr) || !right_ssid) status = WLAN_STATUS_UNSPECIFIED_FAILURE; else priv->is_connected = true; diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c index 900c063bd724..f90c33d19b39 100644 --- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.c +++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.c @@ -326,7 +326,7 @@ out: return r; } -void zd_op_stop(struct ieee80211_hw *hw) +void zd_op_stop(struct ieee80211_hw *hw, bool suspend) { struct zd_mac *mac = zd_hw_mac(hw); struct zd_chip *chip = &mac->chip; diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_mac.h b/drivers/net/wireless/zydas/zd1211rw/zd_mac.h index 5ff84bdc5a4c..053748a474ec 100644 --- a/drivers/net/wireless/zydas/zd1211rw/zd_mac.h +++ b/drivers/net/wireless/zydas/zd1211rw/zd_mac.h @@ -303,7 +303,7 @@ void zd_mac_tx_failed(struct urb *urb); void zd_mac_tx_to_dev(struct sk_buff *skb, int error); int zd_op_start(struct ieee80211_hw *hw); -void zd_op_stop(struct ieee80211_hw *hw); +void zd_op_stop(struct ieee80211_hw *hw, bool suspend); int zd_restore_settings(struct zd_mac *mac); #ifdef DEBUG diff --git a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c index f3b567a13ded..a8a94edf2a70 100644 --- a/drivers/net/wireless/zydas/zd1211rw/zd_usb.c +++ b/drivers/net/wireless/zydas/zd1211rw/zd_usb.c @@ -1476,7 +1476,7 @@ static void zd_usb_stop(struct zd_usb *usb) { dev_dbg_f(zd_usb_dev(usb), "\n"); - zd_op_stop(zd_usb_to_hw(usb)); + zd_op_stop(zd_usb_to_hw(usb), false); zd_usb_disable_tx(usb); zd_usb_disable_rx(usb); @@ -1698,7 +1698,7 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values, int r, i, req_len, actual_req_len, try_count = 0; struct usb_device *udev; struct usb_req_read_regs *req = NULL; - unsigned long timeout; + unsigned long time_left; bool retry = false; if (count < 1) { @@ -1748,9 +1748,9 @@ retry_read: goto error; } - timeout = wait_for_completion_timeout(&usb->intr.read_regs.completion, - msecs_to_jiffies(50)); - if (!timeout) { + time_left = wait_for_completion_timeout(&usb->intr.read_regs.completion, + msecs_to_jiffies(50)); + if (!time_left) { disable_read_regs_int(usb); dev_dbg_f(zd_usb_dev(usb), "read timed out\n"); r = -ETIMEDOUT; diff --git a/drivers/nfc/microread/i2c.c b/drivers/nfc/microread/i2c.c index 642df4e0ce24..113b2e306e35 100644 --- a/drivers/nfc/microread/i2c.c +++ b/drivers/nfc/microread/i2c.c @@ -277,7 +277,7 @@ static void microread_i2c_remove(struct i2c_client *client) } static const struct i2c_device_id microread_i2c_id[] = { - { MICROREAD_I2C_DRIVER_NAME, 0}, + { MICROREAD_I2C_DRIVER_NAME }, { } }; MODULE_DEVICE_TABLE(i2c, microread_i2c_id); diff --git a/drivers/nfc/nfcmrvl/i2c.c b/drivers/nfc/nfcmrvl/i2c.c index 74553134c1b1..39ecf2aeda80 100644 --- a/drivers/nfc/nfcmrvl/i2c.c +++ b/drivers/nfc/nfcmrvl/i2c.c @@ -252,7 +252,7 @@ static const struct of_device_id of_nfcmrvl_i2c_match[] __maybe_unused = { MODULE_DEVICE_TABLE(of, of_nfcmrvl_i2c_match); static const struct i2c_device_id nfcmrvl_i2c_id_table[] = { - { "nfcmrvl_i2c", 0 }, + { "nfcmrvl_i2c" }, {} }; MODULE_DEVICE_TABLE(i2c, nfcmrvl_i2c_id_table); diff --git a/drivers/nfc/nxp-nci/i2c.c b/drivers/nfc/nxp-nci/i2c.c index 3ae4b41c59ac..a8aced0b8010 100644 --- a/drivers/nfc/nxp-nci/i2c.c +++ b/drivers/nfc/nxp-nci/i2c.c @@ -322,7 +322,7 @@ static void nxp_nci_i2c_remove(struct i2c_client *client) } static const struct i2c_device_id nxp_nci_i2c_id_table[] = { - {"nxp-nci_i2c", 0}, + { "nxp-nci_i2c" }, {} }; MODULE_DEVICE_TABLE(i2c, nxp_nci_i2c_id_table); diff --git a/drivers/nfc/pn533/i2c.c b/drivers/nfc/pn533/i2c.c index 438ab9553f7a..132c050a365d 100644 --- a/drivers/nfc/pn533/i2c.c +++ b/drivers/nfc/pn533/i2c.c @@ -249,7 +249,7 @@ static const struct of_device_id of_pn533_i2c_match[] __maybe_unused = { MODULE_DEVICE_TABLE(of, of_pn533_i2c_match); static const struct i2c_device_id pn533_i2c_id_table[] = { - { PN533_I2C_DRIVER_NAME, 0 }, + { PN533_I2C_DRIVER_NAME }, {} }; MODULE_DEVICE_TABLE(i2c, pn533_i2c_id_table); diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c index 3f6d74832bac..9fe664960b38 100644 --- a/drivers/nfc/pn544/i2c.c +++ b/drivers/nfc/pn544/i2c.c @@ -44,7 +44,7 @@ PN544_HCI_I2C_LLC_MAX_PAYLOAD) static const struct i2c_device_id pn544_hci_i2c_id_table[] = { - {"pn544", 0}, + { "pn544" }, {} }; diff --git a/drivers/nfc/s3fwrn5/i2c.c b/drivers/nfc/s3fwrn5/i2c.c index 720d4a72493c..536c566e3f59 100644 --- a/drivers/nfc/s3fwrn5/i2c.c +++ b/drivers/nfc/s3fwrn5/i2c.c @@ -245,7 +245,7 @@ static void s3fwrn5_i2c_remove(struct i2c_client *client) } static const struct i2c_device_id s3fwrn5_i2c_id_table[] = { - {S3FWRN5_I2C_DRIVER_NAME, 0}, + { S3FWRN5_I2C_DRIVER_NAME }, {} }; MODULE_DEVICE_TABLE(i2c, s3fwrn5_i2c_id_table); diff --git a/drivers/nfc/st-nci/i2c.c b/drivers/nfc/st-nci/i2c.c index d20a337e90b4..416770adbeba 100644 --- a/drivers/nfc/st-nci/i2c.c +++ b/drivers/nfc/st-nci/i2c.c @@ -257,7 +257,7 @@ static void st_nci_i2c_remove(struct i2c_client *client) } static const struct i2c_device_id st_nci_i2c_id_table[] = { - {ST_NCI_DRIVER_NAME, 0}, + { ST_NCI_DRIVER_NAME }, {} }; MODULE_DEVICE_TABLE(i2c, st_nci_i2c_id_table); diff --git a/drivers/nfc/st21nfca/i2c.c b/drivers/nfc/st21nfca/i2c.c index 064a63db288b..02c3d11a19c4 100644 --- a/drivers/nfc/st21nfca/i2c.c +++ b/drivers/nfc/st21nfca/i2c.c @@ -573,7 +573,7 @@ static void st21nfca_hci_i2c_remove(struct i2c_client *client) } static const struct i2c_device_id st21nfca_hci_i2c_id_table[] = { - {ST21NFCA_HCI_DRIVER_NAME, 0}, + { ST21NFCA_HCI_DRIVER_NAME }, {} }; MODULE_DEVICE_TABLE(i2c, st21nfca_hci_i2c_id_table); diff --git a/drivers/ptp/ptp_ines.c b/drivers/ptp/ptp_ines.c index 385643f3f8fe..e6f7d2bf8dde 100644 --- a/drivers/ptp/ptp_ines.c +++ b/drivers/ptp/ptp_ines.c @@ -556,7 +556,7 @@ static bool ines_timestamp_expired(struct ines_timestamp *ts) } static int ines_ts_info(struct mii_timestamper *mii_ts, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index 25d4e6376591..88db8378325a 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0+ /* - * Linux for S/390 Lan Channel Station Network Driver + * Linux for S/390 LAN channel station device driver * * Copyright IBM Corp. 1999, 2009 * Author(s): Original Code written by @@ -2380,5 +2380,6 @@ module_init(lcs_init_module); module_exit(lcs_cleanup_module); MODULE_AUTHOR("Frank Pavlic "); +MODULE_DESCRIPTION("S/390 LAN channel station device driver"); MODULE_LICENSE("GPL"); diff --git a/drivers/s390/net/qeth_ethtool.c b/drivers/s390/net/qeth_ethtool.c index c1caf7734c3e..f184c58ecf24 100644 --- a/drivers/s390/net/qeth_ethtool.c +++ b/drivers/s390/net/qeth_ethtool.c @@ -247,7 +247,7 @@ static int qeth_set_channels(struct net_device *dev, } static int qeth_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { struct qeth_card *card = dev->ml_priv; diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig index fcec6ed83d5e..a1e0bc8c1757 100644 --- a/drivers/soc/fsl/Kconfig +++ b/drivers/soc/fsl/Kconfig @@ -22,7 +22,7 @@ config FSL_GUTS config FSL_MC_DPIO tristate "QorIQ DPAA2 DPIO driver" - depends on FSL_MC_BUS + depends on FSL_MC_BUS && NET select SOC_BUS select FSL_GUTS select DIMLIB diff --git a/drivers/soc/fsl/qbman/Kconfig b/drivers/soc/fsl/qbman/Kconfig index bdecb86bb656..27774ec6ff90 100644 --- a/drivers/soc/fsl/qbman/Kconfig +++ b/drivers/soc/fsl/qbman/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only menuconfig FSL_DPAA bool "QorIQ DPAA1 framework support" - depends on ((FSL_SOC_BOOKE || ARCH_LAYERSCAPE) && ARCH_DMA_ADDR_T_64BIT) + depends on ((FSL_SOC_BOOKE || ARCH_LAYERSCAPE || COMPILE_TEST) && ARCH_DMA_ADDR_T_64BIT) select GENERIC_ALLOCATOR help The Freescale Data Path Acceleration Architecture (DPAA) is a set of diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c index 283804b49e91..3ff8103366c1 100644 --- a/drivers/staging/vt6655/device_main.c +++ b/drivers/staging/vt6655/device_main.c @@ -1339,7 +1339,7 @@ err_free_rings: return ret; } -static void vnt_stop(struct ieee80211_hw *hw) +static void vnt_stop(struct ieee80211_hw *hw, bool suspend) { struct vnt_private *priv = hw->priv; diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c index 7bbed462f062..4f09e733e7a8 100644 --- a/drivers/staging/vt6656/main_usb.c +++ b/drivers/staging/vt6656/main_usb.c @@ -613,7 +613,7 @@ err: return ret; } -static void vnt_stop(struct ieee80211_hw *hw) +static void vnt_stop(struct ieee80211_hw *hw, bool suspend) { struct vnt_private *priv = hw->priv; int i; diff --git a/fs/verity/measure.c b/fs/verity/measure.c index 3969d54158d1..175d2f1bc089 100644 --- a/fs/verity/measure.c +++ b/fs/verity/measure.c @@ -111,14 +111,15 @@ __bpf_kfunc_start_defs(); /** * bpf_get_fsverity_digest: read fsverity digest of file * @file: file to get digest from - * @digest_ptr: (out) dynptr for struct fsverity_digest + * @digest_p: (out) dynptr for struct fsverity_digest * * Read fsverity_digest of *file* into *digest_ptr*. * * Return: 0 on success, a negative value on error. */ -__bpf_kfunc int bpf_get_fsverity_digest(struct file *file, struct bpf_dynptr_kern *digest_ptr) +__bpf_kfunc int bpf_get_fsverity_digest(struct file *file, struct bpf_dynptr *digest_p) { + struct bpf_dynptr_kern *digest_ptr = (struct bpf_dynptr_kern *)digest_p; const struct inode *inode = file_inode(file); u32 dynptr_sz = __bpf_dynptr_size(digest_ptr); struct fsverity_digest *arg; diff --git a/include/linux/auxiliary_bus.h b/include/linux/auxiliary_bus.h index de21d9d24a95..3ba4487c9cd9 100644 --- a/include/linux/auxiliary_bus.h +++ b/include/linux/auxiliary_bus.h @@ -58,6 +58,9 @@ * in * @name: Match name found by the auxiliary device driver, * @id: unique identitier if multiple devices of the same name are exported, + * @irqs: irqs xarray contains irq indices which are used by the device, + * @lock: Synchronize irq sysfs creation, + * @irq_dir_exists: whether "irqs" directory exists, * * An auxiliary_device represents a part of its parent device's functionality. * It is given a name that, combined with the registering drivers @@ -139,6 +142,11 @@ struct auxiliary_device { struct device dev; const char *name; u32 id; + struct { + struct xarray irqs; + struct mutex lock; /* Synchronize irq sysfs creation */ + bool irq_dir_exists; + } sysfs; }; /** @@ -212,8 +220,24 @@ int auxiliary_device_init(struct auxiliary_device *auxdev); int __auxiliary_device_add(struct auxiliary_device *auxdev, const char *modname); #define auxiliary_device_add(auxdev) __auxiliary_device_add(auxdev, KBUILD_MODNAME) +#ifdef CONFIG_SYSFS +int auxiliary_device_sysfs_irq_add(struct auxiliary_device *auxdev, int irq); +void auxiliary_device_sysfs_irq_remove(struct auxiliary_device *auxdev, + int irq); +#else /* CONFIG_SYSFS */ +static inline int +auxiliary_device_sysfs_irq_add(struct auxiliary_device *auxdev, int irq) +{ + return 0; +} + +static inline void +auxiliary_device_sysfs_irq_remove(struct auxiliary_device *auxdev, int irq) {} +#endif + static inline void auxiliary_device_uninit(struct auxiliary_device *auxdev) { + mutex_destroy(&auxdev->sysfs.lock); put_device(&auxdev->dev); } diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 5e694a308081..4f1d4a97b9d1 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -1612,6 +1612,7 @@ struct bpf_link_ops { struct bpf_link_info *info); int (*update_map)(struct bpf_link *link, struct bpf_map *new_map, struct bpf_map *old_map); + __poll_t (*poll)(struct file *file, struct poll_table_struct *pts); }; struct bpf_tramp_link { @@ -1730,9 +1731,9 @@ struct bpf_struct_ops { int (*init_member)(const struct btf_type *t, const struct btf_member *member, void *kdata, const void *udata); - int (*reg)(void *kdata); - void (*unreg)(void *kdata); - int (*update)(void *kdata, void *old_kdata); + int (*reg)(void *kdata, struct bpf_link *link); + void (*unreg)(void *kdata, struct bpf_link *link); + int (*update)(void *kdata, void *old_kdata, struct bpf_link *link); int (*validate)(void *kdata); void *cfi_stubs; struct module *owner; @@ -2333,6 +2334,7 @@ int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer); int bpf_link_settle(struct bpf_link_primer *primer); void bpf_link_cleanup(struct bpf_link_primer *primer); void bpf_link_inc(struct bpf_link *link); +struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link); void bpf_link_put(struct bpf_link *link); int bpf_link_new_fd(struct bpf_link *link); struct bpf_link *bpf_link_get_from_fd(u32 ufd); @@ -2492,7 +2494,7 @@ struct sk_buff; struct bpf_dtab_netdev; struct bpf_cpu_map_entry; -void __dev_flush(void); +void __dev_flush(struct list_head *flush_list); int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf, struct net_device *dev_rx); int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf, @@ -2505,7 +2507,7 @@ int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, struct bpf_prog *xdp_prog, struct bpf_map *map, bool exclude_ingress); -void __cpu_map_flush(void); +void __cpu_map_flush(struct list_head *flush_list); int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf, struct net_device *dev_rx); int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu, @@ -2642,8 +2644,6 @@ void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data, void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr); void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr); -bool dev_check_flush(void); -bool cpu_map_check_flush(void); #else /* !CONFIG_BPF_SYSCALL */ static inline struct bpf_prog *bpf_prog_get(u32 ufd) { @@ -2704,6 +2704,11 @@ static inline void bpf_link_inc(struct bpf_link *link) { } +static inline struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link) +{ + return NULL; +} + static inline void bpf_link_put(struct bpf_link *link) { } @@ -2731,7 +2736,7 @@ static inline struct bpf_token *bpf_token_get_from_fd(u32 ufd) return ERR_PTR(-EOPNOTSUPP); } -static inline void __dev_flush(void) +static inline void __dev_flush(struct list_head *flush_list) { } @@ -2777,7 +2782,7 @@ int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, return 0; } -static inline void __cpu_map_flush(void) +static inline void __cpu_map_flush(struct list_head *flush_list) { } @@ -2926,8 +2931,7 @@ bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr) return ret; } -void __bpf_free_used_btfs(struct bpf_prog_aux *aux, - struct btf_mod_pair *used_btfs, u32 len); +void __bpf_free_used_btfs(struct btf_mod_pair *used_btfs, u32 len); static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type) @@ -3258,8 +3262,8 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, struct bpf_insn *insn_buf, struct bpf_prog *prog, u32 *target_size); -int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags, - struct bpf_dynptr_kern *ptr); +int bpf_dynptr_from_skb_rdonly(struct __sk_buff *skb, u64 flags, + struct bpf_dynptr *ptr); #else static inline bool bpf_sock_common_is_valid_access(int off, int size, enum bpf_access_type type, @@ -3281,8 +3285,8 @@ static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, { return 0; } -static inline int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags, - struct bpf_dynptr_kern *ptr) +static inline int bpf_dynptr_from_skb_rdonly(struct __sk_buff *skb, u64 flags, + struct bpf_dynptr *ptr) { return -EOPNOTSUPP; } diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index e4070fb02b11..6503c85b10a3 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -73,7 +73,10 @@ enum bpf_iter_state { struct bpf_reg_state { /* Ordering of fields matters. See states_equal() */ enum bpf_reg_type type; - /* Fixed part of pointer offset, pointer types only */ + /* + * Fixed part of pointer offset, pointer types only. + * Or constant delta between "linked" scalars with the same ID. + */ s32 off; union { /* valid when type == PTR_TO_PACKET */ @@ -167,6 +170,13 @@ struct bpf_reg_state { * Similarly to dynptrs, we use ID to track "belonging" of a reference * to a specific instance of bpf_iter. */ + /* + * Upper bit of ID is used to remember relationship between "linked" + * registers. Example: + * r1 = r2; both will have r1->id == r2->id == N + * r1 += 10; r1->id == N | BPF_ADD_CONST and r1->off == 10 + */ +#define BPF_ADD_CONST (1U << 31) u32 id; /* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned * from a pointer-cast helper, bpf_sk_fullsock() and @@ -846,7 +856,7 @@ static inline u32 type_flag(u32 type) /* only use after check_attach_btf_id() */ static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog) { - return prog->type == BPF_PROG_TYPE_EXT ? + return (prog->type == BPF_PROG_TYPE_EXT && prog->aux->dst_prog) ? prog->aux->dst_prog->type : prog->type; } diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index 1394ba302367..028b3e00378e 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h @@ -271,12 +271,100 @@ #define BCM5482_SSD_SGMII_SLAVE_EN 0x0002 /* Slave mode enable */ #define BCM5482_SSD_SGMII_SLAVE_AD 0x0001 /* Slave auto-detection */ +/* BroadR-Reach LRE Registers. */ +#define MII_BCM54XX_LRECR 0x00 /* LRE Control Register */ +#define MII_BCM54XX_LRESR 0x01 /* LRE Status Register */ +#define MII_BCM54XX_LREPHYSID1 0x02 /* LRE PHYS ID 1 */ +#define MII_BCM54XX_LREPHYSID2 0x03 /* LRE PHYS ID 2 */ +#define MII_BCM54XX_LREANAA 0x04 /* LDS Auto-Negotiation Advertised Ability */ +#define MII_BCM54XX_LREANAC 0x05 /* LDS Auto-Negotiation Advertised Control */ +#define MII_BCM54XX_LREANPT 0x06 /* LDS Ability Next Page Transmit */ +#define MII_BCM54XX_LRELPA 0x07 /* LDS Link Partner Ability */ +#define MII_BCM54XX_LRELPNPM 0x08 /* LDS Link Partner Next Page Message */ +#define MII_BCM54XX_LRELPNPC 0x09 /* LDS Link Partner Next Page Control */ +#define MII_BCM54XX_LRELDSE 0x0a /* LDS Expansion Register */ +#define MII_BCM54XX_LREES 0x0f /* LRE Extended Status */ + +/* LRE control register. */ +#define LRECR_RESET 0x8000 /* Reset to default state */ +#define LRECR_LOOPBACK 0x4000 /* Internal Loopback */ +#define LRECR_LDSRES 0x2000 /* Restart LDS Process */ +#define LRECR_LDSEN 0x1000 /* LDS Enable */ +#define LRECR_PDOWN 0x0800 /* Enable low power state */ +#define LRECR_ISOLATE 0x0400 /* Isolate data paths from MII */ +#define LRECR_SPEED100 0x0200 /* Select 100 Mbps */ +#define LRECR_SPEED10 0x0000 /* Select 10 Mbps */ +#define LRECR_4PAIRS 0x0020 /* Select 4 Pairs */ +#define LRECR_2PAIRS 0x0010 /* Select 2 Pairs */ +#define LRECR_1PAIR 0x0000 /* Select 1 Pair */ +#define LRECR_MASTER 0x0008 /* Force Master when LDS disabled */ +#define LRECR_SLAVE 0x0000 /* Force Slave when LDS disabled */ + +/* LRE status register. */ +#define LRESR_100_1PAIR 0x2000 /* Can do 100Mbps 1 Pair */ +#define LRESR_100_4PAIR 0x1000 /* Can do 100Mbps 4 Pairs */ +#define LRESR_100_2PAIR 0x0800 /* Can do 100Mbps 2 Pairs */ +#define LRESR_10_2PAIR 0x0400 /* Can do 10Mbps 2 Pairs */ +#define LRESR_10_1PAIR 0x0200 /* Can do 10Mbps 1 Pair */ +#define LRESR_ESTATEN 0x0100 /* Extended Status in R15 */ +#define LRESR_RESV 0x0080 /* Unused... */ +#define LRESR_MFPS 0x0040 /* Can suppress Management Frames Preamble */ +#define LRESR_LDSCOMPLETE 0x0020 /* LDS Auto-negotiation complete */ +#define LRESR_8023 0x0010 /* Has IEEE 802.3 Support */ +#define LRESR_LDSABILITY 0x0008 /* LDS auto-negotiation capable */ +#define LRESR_LSTATUS 0x0004 /* Link status */ +#define LRESR_JCD 0x0002 /* Jabber detected */ +#define LRESR_ERCAP 0x0001 /* Ext-reg capability */ + +/* LDS Auto-Negotiation Advertised Ability. */ +#define LREANAA_PAUSE_ASYM 0x8000 /* Can pause asymmetrically */ +#define LREANAA_PAUSE 0x4000 /* Can pause */ +#define LREANAA_100_1PAIR 0x0020 /* Can do 100Mbps 1 Pair */ +#define LREANAA_100_4PAIR 0x0010 /* Can do 100Mbps 4 Pair */ +#define LREANAA_100_2PAIR 0x0008 /* Can do 100Mbps 2 Pair */ +#define LREANAA_10_2PAIR 0x0004 /* Can do 10Mbps 2 Pair */ +#define LREANAA_10_1PAIR 0x0002 /* Can do 10Mbps 1 Pair */ + +#define LRE_ADVERTISE_FULL (LREANAA_100_1PAIR | LREANAA_100_4PAIR | \ + LREANAA_100_2PAIR | LREANAA_10_2PAIR | \ + LREANAA_10_1PAIR) + +#define LRE_ADVERTISE_ALL LRE_ADVERTISE_FULL + +/* LDS Link Partner Ability. */ +#define LRELPA_PAUSE_ASYM 0x8000 /* Supports asymmetric pause */ +#define LRELPA_PAUSE 0x4000 /* Supports pause capability */ +#define LRELPA_100_1PAIR 0x0020 /* 100Mbps 1 Pair capable */ +#define LRELPA_100_4PAIR 0x0010 /* 100Mbps 4 Pair capable */ +#define LRELPA_100_2PAIR 0x0008 /* 100Mbps 2 Pair capable */ +#define LRELPA_10_2PAIR 0x0004 /* 10Mbps 2 Pair capable */ +#define LRELPA_10_1PAIR 0x0002 /* 10Mbps 1 Pair capable */ + +/* LDS Expansion register. */ +#define LDSE_DOWNGRADE 0x8000 /* Can do LDS Speed Downgrade */ +#define LDSE_MASTER 0x4000 /* Master / Slave */ +#define LDSE_PAIRS_MASK 0x3000 /* Pair Count Mask */ +#define LDSE_PAIRS_SHIFT 12 +#define LDSE_4PAIRS (2 << LDSE_PAIRS_SHIFT) /* 4 Pairs Connection */ +#define LDSE_2PAIRS (1 << LDSE_PAIRS_SHIFT) /* 2 Pairs Connection */ +#define LDSE_1PAIR (0 << LDSE_PAIRS_SHIFT) /* 1 Pair Connection */ +#define LDSE_CABLEN_MASK 0x0FFF /* Cable Length Mask */ + /* BCM54810 Registers */ #define BCM54810_EXP_BROADREACH_LRE_MISC_CTL (MII_BCM54XX_EXP_SEL_ER + 0x90) #define BCM54810_EXP_BROADREACH_LRE_MISC_CTL_EN (1 << 0) #define BCM54810_SHD_CLK_CTL 0x3 #define BCM54810_SHD_CLK_CTL_GTXCLK_EN (1 << 9) +/* BCM54811 Registers */ +#define BCM54811_EXP_BROADREACH_LRE_OVERLAY_CTL (MII_BCM54XX_EXP_SEL_ER + 0x9A) +/* Access Control Override Enable */ +#define BCM54811_EXP_BROADREACH_LRE_OVERLAY_CTL_EN BIT(15) +/* Access Control Override Value */ +#define BCM54811_EXP_BROADREACH_LRE_OVERLAY_CTL_OVERRIDE_VAL BIT(14) +/* Access Control Value */ +#define BCM54811_EXP_BROADREACH_LRE_OVERLAY_CTL_VAL BIT(13) + /* BCM54612E Registers */ #define BCM54612E_EXP_SPARE0 (MII_BCM54XX_EXP_SEL_ETC + 0x34) #define BCM54612E_LED4_CLK125OUT_EN (1 << 1) diff --git a/include/linux/btf.h b/include/linux/btf.h index 7c3e40c3295e..cffb43133c68 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -140,6 +140,7 @@ extern const struct file_operations btf_fops; const char *btf_get_name(const struct btf *btf); void btf_get(struct btf *btf); void btf_put(struct btf *btf); +const struct btf_header *btf_header(const struct btf *btf); int btf_new_fd(const union bpf_attr *attr, bpfptr_t uattr, u32 uattr_sz); struct btf *btf_get_by_fd(int fd); int btf_get_info_by_fd(const struct btf *btf, @@ -212,8 +213,10 @@ int btf_get_fd_by_id(u32 id); u32 btf_obj_id(const struct btf *btf); bool btf_is_kernel(const struct btf *btf); bool btf_is_module(const struct btf *btf); +bool btf_is_vmlinux(const struct btf *btf); struct module *btf_try_get_module(const struct btf *btf); u32 btf_nr_types(const struct btf *btf); +struct btf *btf_base_btf(const struct btf *btf); bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s, const struct btf_member *m, u32 expected_offset, u32 expected_size); @@ -339,6 +342,11 @@ static inline u8 btf_int_offset(const struct btf_type *t) return BTF_INT_OFFSET(*(u32 *)(t + 1)); } +static inline __u8 btf_int_bits(const struct btf_type *t) +{ + return BTF_INT_BITS(*(__u32 *)(t + 1)); +} + static inline bool btf_type_is_scalar(const struct btf_type *t) { return btf_type_is_int(t) || btf_type_is_enum(t); @@ -478,6 +486,11 @@ static inline struct btf_param *btf_params(const struct btf_type *t) return (struct btf_param *)(t + 1); } +static inline struct btf_decl_tag *btf_decl_tag(const struct btf_type *t) +{ + return (struct btf_decl_tag *)(t + 1); +} + static inline int btf_id_cmp_func(const void *a, const void *b) { const int *pa = a, *pb = b; @@ -515,9 +528,38 @@ static inline const struct bpf_struct_ops_desc *bpf_struct_ops_find(struct btf * } #endif +enum btf_field_iter_kind { + BTF_FIELD_ITER_IDS, + BTF_FIELD_ITER_STRS, +}; + +struct btf_field_desc { + /* once-per-type offsets */ + int t_off_cnt, t_offs[2]; + /* member struct size, or zero, if no members */ + int m_sz; + /* repeated per-member offsets */ + int m_off_cnt, m_offs[1]; +}; + +struct btf_field_iter { + struct btf_field_desc desc; + void *p; + int m_idx; + int off_idx; + int vlen; +}; + #ifdef CONFIG_BPF_SYSCALL const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id); +void btf_set_base_btf(struct btf *btf, const struct btf *base_btf); +int btf_relocate(struct btf *btf, const struct btf *base_btf, __u32 **map_ids); +int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t, + enum btf_field_iter_kind iter_kind); +__u32 *btf_field_iter_next(struct btf_field_iter *it); + const char *btf_name_by_offset(const struct btf *btf, u32 offset); +const char *btf_str_by_offset(const struct btf *btf, u32 offset); struct btf *btf_parse_vmlinux(void); struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog); u32 *btf_kfunc_id_set_contains(const struct btf *btf, u32 kfunc_btf_id, @@ -531,6 +573,7 @@ s32 btf_find_dtor_kfunc(struct btf *btf, u32 btf_id); int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_cnt, struct module *owner); struct btf_struct_meta *btf_find_struct_meta(const struct btf *btf, u32 btf_id); +bool btf_is_projection_of(const char *pname, const char *tname); bool btf_is_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf, const struct btf_type *t, enum bpf_prog_type prog_type, int arg); @@ -543,6 +586,28 @@ static inline const struct btf_type *btf_type_by_id(const struct btf *btf, { return NULL; } + +static inline void btf_set_base_btf(struct btf *btf, const struct btf *base_btf) +{ +} + +static inline int btf_relocate(void *log, struct btf *btf, const struct btf *base_btf, + __u32 **map_ids) +{ + return -EOPNOTSUPP; +} + +static inline int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t, + enum btf_field_iter_kind iter_kind) +{ + return -EOPNOTSUPP; +} + +static inline __u32 *btf_field_iter_next(struct btf_field_iter *it) +{ + return NULL; +} + static inline const char *btf_name_by_offset(const struct btf *btf, u32 offset) { diff --git a/include/linux/cache.h b/include/linux/cache.h index 0ecb17bb6883..ca2a05682a54 100644 --- a/include/linux/cache.h +++ b/include/linux/cache.h @@ -13,6 +13,32 @@ #define SMP_CACHE_BYTES L1_CACHE_BYTES #endif +/** + * SMP_CACHE_ALIGN - align a value to the L2 cacheline size + * @x: value to align + * + * On some architectures, L2 ("SMP") CL size is bigger than L1, and sometimes, + * this needs to be accounted. + * + * Return: aligned value. + */ +#ifndef SMP_CACHE_ALIGN +#define SMP_CACHE_ALIGN(x) ALIGN(x, SMP_CACHE_BYTES) +#endif + +/* + * ``__aligned_largest`` aligns a field to the value most optimal for the + * target architecture to perform memory operations. Get the actual value + * to be able to use it anywhere else. + */ +#ifndef __LARGEST_ALIGN +#define __LARGEST_ALIGN sizeof(struct { long x; } __aligned_largest) +#endif + +#ifndef LARGEST_ALIGN +#define LARGEST_ALIGN(x) ALIGN(x, __LARGEST_ALIGN) +#endif + /* * __read_mostly is used to keep rarely changing variables out of frequently * updated cachelines. Its use should be reserved for data that is used @@ -95,6 +121,39 @@ __u8 __cacheline_group_end__##GROUP[0] #endif +/** + * __cacheline_group_begin_aligned - declare an aligned group start + * @GROUP: name of the group + * @...: optional group alignment + * + * The following block inside a struct: + * + * __cacheline_group_begin_aligned(grp); + * field a; + * field b; + * __cacheline_group_end_aligned(grp); + * + * will always be aligned to either the specified alignment or + * ``SMP_CACHE_BYTES``. + */ +#define __cacheline_group_begin_aligned(GROUP, ...) \ + __cacheline_group_begin(GROUP) \ + __aligned((__VA_ARGS__ + 0) ? : SMP_CACHE_BYTES) + +/** + * __cacheline_group_end_aligned - declare an aligned group end + * @GROUP: name of the group + * @...: optional alignment (same as was in __cacheline_group_begin_aligned()) + * + * Note that the end marker is aligned to sizeof(long) to allow more precise + * size assertion. It also declares a padding at the end to avoid next field + * falling into this cacheline. + */ +#define __cacheline_group_end_aligned(GROUP, ...) \ + __cacheline_group_end(GROUP) __aligned(sizeof(long)); \ + struct { } __cacheline_group_pad__##GROUP \ + __aligned((__VA_ARGS__ + 0) ? : SMP_CACHE_BYTES) + #ifndef CACHELINE_ASSERT_GROUP_MEMBER #define CACHELINE_ASSERT_GROUP_MEMBER(TYPE, GROUP, MEMBER) \ BUILD_BUG_ON(!(offsetof(TYPE, MEMBER) >= \ diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index 1b92aed49363..23492213ea35 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h @@ -186,7 +186,7 @@ void close_candev(struct net_device *dev); int can_change_mtu(struct net_device *dev, int new_mtu); int can_eth_ioctl_hwts(struct net_device *netdev, struct ifreq *ifr, int cmd); int can_ethtool_op_get_ts_info_hwts(struct net_device *dev, - struct ethtool_ts_info *info); + struct kernel_ethtool_ts_info *info); int register_candev(struct net_device *dev); void unregister_candev(struct net_device *dev); diff --git a/include/linux/dim.h b/include/linux/dim.h index f343bc9aa2ec..1b581ff25a15 100644 --- a/include/linux/dim.h +++ b/include/linux/dim.h @@ -10,6 +10,15 @@ #include #include +struct net_device; + +/* Number of DIM profiles and period mode. */ +#define NET_DIM_PARAMS_NUM_PROFILES 5 +#define NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE 256 +#define NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE 128 +#define NET_DIM_DEF_PROFILE_CQE 1 +#define NET_DIM_DEF_PROFILE_EQE 1 + /* * Number of events between DIM iterations. * Causes a moderation of the algorithm run. @@ -38,12 +47,45 @@ * @pkts: CQ packet counter suggestion (by DIM) * @comps: Completion counter * @cq_period_mode: CQ period count mode (from CQE/EQE) + * @rcu: for asynchronous kfree_rcu */ struct dim_cq_moder { u16 usec; u16 pkts; u16 comps; u8 cq_period_mode; + struct rcu_head rcu; +}; + +#define DIM_PROFILE_RX BIT(0) /* support rx profile modification */ +#define DIM_PROFILE_TX BIT(1) /* support tx profile modification */ + +#define DIM_COALESCE_USEC BIT(0) /* support usec field modification */ +#define DIM_COALESCE_PKTS BIT(1) /* support pkts field modification */ +#define DIM_COALESCE_COMPS BIT(2) /* support comps field modification */ + +/** + * struct dim_irq_moder - Structure for irq moderation information. + * Used to collect irq moderation related information. + * + * @profile_flags: DIM_PROFILE_* + * @coal_flags: DIM_COALESCE_* for Rx and Tx + * @dim_rx_mode: Rx DIM period count mode: CQE or EQE + * @dim_tx_mode: Tx DIM period count mode: CQE or EQE + * @rx_profile: DIM profile list for Rx + * @tx_profile: DIM profile list for Tx + * @rx_dim_work: Rx DIM worker scheduled by net_dim() + * @tx_dim_work: Tx DIM worker scheduled by net_dim() + */ +struct dim_irq_moder { + u8 profile_flags; + u8 coal_flags; + u8 dim_rx_mode; + u8 dim_tx_mode; + struct dim_cq_moder __rcu *rx_profile; + struct dim_cq_moder __rcu *tx_profile; + void (*rx_dim_work)(struct work_struct *work); + void (*tx_dim_work)(struct work_struct *work); }; /** @@ -191,6 +233,77 @@ enum dim_step_result { DIM_ON_EDGE, }; +/** + * net_dim_init_irq_moder - collect information to initialize irq moderation + * @dev: target network device + * @profile_flags: Rx or Tx profile modification capability + * @coal_flags: irq moderation params flags + * @rx_mode: CQ period mode for Rx + * @tx_mode: CQ period mode for Tx + * @rx_dim_work: Rx worker called after dim decision + * @tx_dim_work: Tx worker called after dim decision + * + * Return: 0 on success or a negative error code. + */ +int net_dim_init_irq_moder(struct net_device *dev, u8 profile_flags, + u8 coal_flags, u8 rx_mode, u8 tx_mode, + void (*rx_dim_work)(struct work_struct *work), + void (*tx_dim_work)(struct work_struct *work)); + +/** + * net_dim_free_irq_moder - free fields for irq moderation + * @dev: target network device + */ +void net_dim_free_irq_moder(struct net_device *dev); + +/** + * net_dim_setting - initialize DIM's cq mode and schedule worker + * @dev: target network device + * @dim: DIM context + * @is_tx: true indicates the tx direction, false indicates the rx direction + */ +void net_dim_setting(struct net_device *dev, struct dim *dim, bool is_tx); + +/** + * net_dim_work_cancel - synchronously cancel dim's worker + * @dim: DIM context + */ +void net_dim_work_cancel(struct dim *dim); + +/** + * net_dim_get_rx_irq_moder - get DIM rx results based on profile_ix + * @dev: target network device + * @dim: DIM context + * + * Return: DIM irq moderation + */ +struct dim_cq_moder +net_dim_get_rx_irq_moder(struct net_device *dev, struct dim *dim); + +/** + * net_dim_get_tx_irq_moder - get DIM tx results based on profile_ix + * @dev: target network device + * @dim: DIM context + * + * Return: DIM irq moderation + */ +struct dim_cq_moder +net_dim_get_tx_irq_moder(struct net_device *dev, struct dim *dim); + +/** + * net_dim_set_rx_mode - set DIM rx cq mode + * @dev: target network device + * @rx_mode: target rx cq mode + */ +void net_dim_set_rx_mode(struct net_device *dev, u8 rx_mode); + +/** + * net_dim_set_tx_mode - set DIM tx cq mode + * @dev: target network device + * @tx_mode: target tx cq mode + */ +void net_dim_set_tx_mode(struct net_device *dev, u8 tx_mode); + /** * dim_on_top - check if current state is a good place to stop (top location) * @dim: DIM context diff --git a/include/linux/dsa/8021q.h b/include/linux/dsa/8021q.h index f3664ee12170..d13aabdeb4b2 100644 --- a/include/linux/dsa/8021q.h +++ b/include/linux/dsa/8021q.h @@ -8,12 +8,18 @@ #include #include +/* VBID is limited to three bits only and zero is reserved. + * Only 7 bridges can be enumerated. + */ +#define DSA_TAG_8021Q_MAX_NUM_BRIDGES 7 + int dsa_tag_8021q_register(struct dsa_switch *ds, __be16 proto); void dsa_tag_8021q_unregister(struct dsa_switch *ds); int dsa_tag_8021q_bridge_join(struct dsa_switch *ds, int port, - struct dsa_bridge bridge); + struct dsa_bridge bridge, bool *tx_fwd_offload, + struct netlink_ext_ack *extack); void dsa_tag_8021q_bridge_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge); diff --git a/include/linux/dsa/lan9303.h b/include/linux/dsa/lan9303.h index b4f22112ba75..3ce7cbcc37a3 100644 --- a/include/linux/dsa/lan9303.h +++ b/include/linux/dsa/lan9303.h @@ -5,8 +5,8 @@ struct lan9303; struct lan9303_phy_ops { /* PHY 1 and 2 access*/ - int (*phy_read)(struct lan9303 *chip, int port, int regnum); - int (*phy_write)(struct lan9303 *chip, int port, + int (*phy_read)(struct lan9303 *chip, int addr, int regnum); + int (*phy_write)(struct lan9303 *chip, int addr, int regnum, u16 val); }; diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 6fd9107d3cc0..303fda54ef17 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -18,6 +18,7 @@ #include #include #include +#include struct compat_ethtool_rx_flow_spec { u32 flow_type; @@ -159,6 +160,49 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) return index % n_rx_rings; } +/** + * struct ethtool_rxfh_context - a custom RSS context configuration + * @indir_size: Number of u32 entries in indirection table + * @key_size: Size of hash key, in bytes + * @priv_size: Size of driver private data, in bytes + * @hfunc: RSS hash function identifier. One of the %ETH_RSS_HASH_* + * @input_xfrm: Defines how the input data is transformed. Valid values are one + * of %RXH_XFRM_*. + * @indir_configured: indir has been specified (at create time or subsequently) + * @key_configured: hkey has been specified (at create time or subsequently) + */ +struct ethtool_rxfh_context { + u32 indir_size; + u32 key_size; + u16 priv_size; + u8 hfunc; + u8 input_xfrm; + u8 indir_configured:1; + u8 key_configured:1; + /* private: driver private data, indirection table, and hash key are + * stored sequentially in @data area. Use below helpers to access. + */ + u32 key_off; + u8 data[] __aligned(sizeof(void *)); +}; + +static inline void *ethtool_rxfh_context_priv(struct ethtool_rxfh_context *ctx) +{ + return ctx->data; +} + +static inline u32 *ethtool_rxfh_context_indir(struct ethtool_rxfh_context *ctx) +{ + return (u32 *)(ctx->data + ALIGN(ctx->priv_size, sizeof(u32))); +} + +static inline u8 *ethtool_rxfh_context_key(struct ethtool_rxfh_context *ctx) +{ + return &ctx->data[ctx->key_off]; +} + +void ethtool_rxfh_context_lost(struct net_device *dev, u32 context_id); + /* declare a link mode bitmap */ #define __ETHTOOL_DECLARE_LINK_MODE_MASK(name) \ DECLARE_BITMAP(name, __ETHTOOL_LINK_MODE_MASK_NBITS) @@ -284,7 +328,9 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, #define ETHTOOL_COALESCE_TX_AGGR_MAX_BYTES BIT(24) #define ETHTOOL_COALESCE_TX_AGGR_MAX_FRAMES BIT(25) #define ETHTOOL_COALESCE_TX_AGGR_TIME_USECS BIT(26) -#define ETHTOOL_COALESCE_ALL_PARAMS GENMASK(26, 0) +#define ETHTOOL_COALESCE_RX_PROFILE BIT(27) +#define ETHTOOL_COALESCE_TX_PROFILE BIT(28) +#define ETHTOOL_COALESCE_ALL_PARAMS GENMASK(28, 0) #define ETHTOOL_COALESCE_USECS \ (ETHTOOL_COALESCE_RX_USECS | ETHTOOL_COALESCE_TX_USECS) @@ -504,17 +550,16 @@ struct ethtool_ts_stats { #define ETH_MODULE_MAX_I2C_ADDRESS 0x7f /** - * struct ethtool_module_eeprom - EEPROM dump from specified page - * @offset: Offset within the specified EEPROM page to begin read, in bytes. - * @length: Number of bytes to read. - * @page: Page number to read from. - * @bank: Page bank number to read from, if applicable by EEPROM spec. + * struct ethtool_module_eeprom - plug-in module EEPROM read / write parameters + * @offset: When @offset is 0-127, it is used as an address to the Lower Memory + * (@page must be 0). Otherwise, it is used as an address to the + * Upper Memory. + * @length: Number of bytes to read / write. + * @page: Page number. + * @bank: Bank number, if supported by EEPROM spec. * @i2c_address: I2C address of a page. Value less than 0x7f expected. Most * EEPROMs use 0x50 or 0x51. * @data: Pointer to buffer with EEPROM data of @length size. - * - * This can be used to manage pages during EEPROM dump in ethtool and pass - * required information to the driver. */ struct ethtool_module_eeprom { u32 offset; @@ -661,6 +706,22 @@ struct ethtool_rxfh_param { u8 input_xfrm; }; +/** + * struct kernel_ethtool_ts_info - kernel copy of struct ethtool_ts_info + * @cmd: command number = %ETHTOOL_GET_TS_INFO + * @so_timestamping: bit mask of the sum of the supported SO_TIMESTAMPING flags + * @phc_index: device index of the associated PHC, or -1 if there is none + * @tx_types: bit mask of the supported hwtstamp_tx_types enumeration values + * @rx_filters: bit mask of the supported hwtstamp_rx_filters enumeration values + */ +struct kernel_ethtool_ts_info { + u32 cmd; + u32 so_timestamping; + int phc_index; + enum hwtstamp_tx_types tx_types; + enum hwtstamp_rx_filters rx_filters; +}; + /** * struct ethtool_ops - optional netdev operations * @cap_link_lanes_supported: indicates if the driver supports lanes @@ -669,6 +730,16 @@ struct ethtool_rxfh_param { * contexts. * @cap_rss_sym_xor_supported: indicates if the driver supports symmetric-xor * RSS. + * @rxfh_indir_space: max size of RSS indirection tables, if indirection table + * size as returned by @get_rxfh_indir_size may change during lifetime + * of the device. Leave as 0 if the table size is constant. + * @rxfh_key_space: same as @rxfh_indir_space, but for the key. + * @rxfh_priv_size: size of the driver private data area the core should + * allocate for an RSS context (in &struct ethtool_rxfh_context). + * @rxfh_max_context_id: maximum (exclusive) supported RSS context ID. If this + * is zero then the core may choose any (nonzero) ID, otherwise the core + * will only use IDs strictly less than this value, as the @rss_context + * argument to @create_rxfh_context and friends. * @supported_coalesce_params: supported types of interrupt coalescing. * @supported_ring_params: supported ring params. * @get_drvinfo: Report driver/device information. Modern drivers no @@ -765,6 +836,32 @@ struct ethtool_rxfh_param { * will remain unchanged. * Returns a negative error code or zero. An error code must be returned * if at least one unsupported change was requested. + * @create_rxfh_context: Create a new RSS context with the specified RX flow + * hash indirection table, hash key, and hash function. + * The &struct ethtool_rxfh_context for this context is passed in @ctx; + * note that the indir table, hkey and hfunc are not yet populated as + * of this call. The driver does not need to update these; the core + * will do so if this op succeeds. + * However, if @rxfh.indir is set to %NULL, the driver must update the + * indir table in @ctx with the (default or inherited) table actually in + * use; similarly, if @rxfh.key is %NULL, @rxfh.hfunc is + * %ETH_RSS_HASH_NO_CHANGE, or @rxfh.input_xfrm is %RXH_XFRM_NO_CHANGE, + * the driver should update the corresponding information in @ctx. + * If the driver provides this method, it must also provide + * @modify_rxfh_context and @remove_rxfh_context. + * Returns a negative error code or zero. + * @modify_rxfh_context: Reconfigure the specified RSS context. Allows setting + * the contents of the RX flow hash indirection table, hash key, and/or + * hash function associated with the given context. + * Parameters which are set to %NULL or zero will remain unchanged. + * The &struct ethtool_rxfh_context for this context is passed in @ctx; + * note that it will still contain the *old* settings. The driver does + * not need to update these; the core will do so if this op succeeds. + * Returns a negative error code or zero. An error code must be returned + * if at least one unsupported change was requested. + * @remove_rxfh_context: Remove the specified RSS context. + * The &struct ethtool_rxfh_context for this context is passed in @ctx. + * Returns a negative error code or zero. * @get_channels: Get number of channels. * @set_channels: Set number of channels. Returns a negative error code or * zero. @@ -822,6 +919,8 @@ struct ethtool_rxfh_param { * @get_module_eeprom_by_page: Get a region of plug-in module EEPROM data from * specified page. Returns a negative error code or the amount of bytes * read. + * @set_module_eeprom_by_page: Write to a region of plug-in module EEPROM, + * from kernel space only. Returns a negative error code or zero. * @get_eth_phy_stats: Query some of the IEEE 802.3 PHY statistics. * @get_eth_mac_stats: Query some of the IEEE 802.3 MAC statistics. * @get_eth_ctrl_stats: Query some of the IEEE 802.3 MAC Ctrl statistics. @@ -852,6 +951,10 @@ struct ethtool_ops { u32 cap_link_lanes_supported:1; u32 cap_rss_ctx_supported:1; u32 cap_rss_sym_xor_supported:1; + u32 rxfh_indir_space; + u16 rxfh_key_space; + u16 rxfh_priv_size; + u32 rxfh_max_context_id; u32 supported_coalesce_params; u32 supported_ring_params; void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); @@ -914,13 +1017,25 @@ struct ethtool_ops { int (*get_rxfh)(struct net_device *, struct ethtool_rxfh_param *); int (*set_rxfh)(struct net_device *, struct ethtool_rxfh_param *, struct netlink_ext_ack *extack); + int (*create_rxfh_context)(struct net_device *, + struct ethtool_rxfh_context *ctx, + const struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack); + int (*modify_rxfh_context)(struct net_device *, + struct ethtool_rxfh_context *ctx, + const struct ethtool_rxfh_param *rxfh, + struct netlink_ext_ack *extack); + int (*remove_rxfh_context)(struct net_device *, + struct ethtool_rxfh_context *ctx, + u32 rss_context, + struct netlink_ext_ack *extack); void (*get_channels)(struct net_device *, struct ethtool_channels *); int (*set_channels)(struct net_device *, struct ethtool_channels *); int (*get_dump_flag)(struct net_device *, struct ethtool_dump *); int (*get_dump_data)(struct net_device *, struct ethtool_dump *, void *); int (*set_dump)(struct net_device *, struct ethtool_dump *); - int (*get_ts_info)(struct net_device *, struct ethtool_ts_info *); + int (*get_ts_info)(struct net_device *, struct kernel_ethtool_ts_info *); void (*get_ts_stats)(struct net_device *dev, struct ethtool_ts_stats *ts_stats); int (*get_module_info)(struct net_device *, @@ -956,6 +1071,9 @@ struct ethtool_ops { int (*get_module_eeprom_by_page)(struct net_device *dev, const struct ethtool_module_eeprom *page, struct netlink_ext_ack *extack); + int (*set_module_eeprom_by_page)(struct net_device *dev, + const struct ethtool_module_eeprom *page, + struct netlink_ext_ack *extack); void (*get_eth_phy_stats)(struct net_device *dev, struct ethtool_eth_phy_stats *phy_stats); void (*get_eth_mac_stats)(struct net_device *dev, @@ -998,6 +1116,21 @@ int ethtool_virtdev_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd, u32 *dev_speed, u8 *dev_duplex); +/** + * struct ethtool_netdev_state - per-netdevice state for ethtool features + * @rss_ctx: XArray of custom RSS contexts + * @rss_lock: Protects entries in @rss_ctx. May be taken from + * within RTNL. + * @wol_enabled: Wake-on-LAN is enabled + * @module_fw_flash_in_progress: Module firmware flashing is in progress. + */ +struct ethtool_netdev_state { + struct xarray rss_ctx; + struct mutex rss_lock; + unsigned wol_enabled:1; + unsigned module_fw_flash_in_progress:1; +}; + struct phy_device; struct phy_tdr_config; struct phy_plca_cfg; @@ -1063,7 +1196,8 @@ int ethtool_get_phc_vclocks(struct net_device *dev, int **vclock_index); /* Some generic methods drivers may use in their ethtool_ops */ u32 ethtool_op_get_link(struct net_device *dev); -int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *eti); +int ethtool_op_get_ts_info(struct net_device *dev, + struct kernel_ethtool_ts_info *eti); /** * ethtool_mm_frag_size_add_to_min - Translate (standard) additional fragment @@ -1112,7 +1246,8 @@ static inline int ethtool_mm_frag_size_min_to_add(u32 val_min, u32 *val_add, * @info: buffer to hold the result * Returns zero on success, non-zero otherwise. */ -int ethtool_get_ts_info_by_layer(struct net_device *dev, struct ethtool_ts_info *info); +int ethtool_get_ts_info_by_layer(struct net_device *dev, + struct kernel_ethtool_ts_info *info); /** * ethtool_sprintf - Write formatted string to ethtool string data @@ -1155,4 +1290,24 @@ struct ethtool_forced_speed_map { void ethtool_forced_speed_maps_init(struct ethtool_forced_speed_map *maps, u32 size); + +/* C33 PSE extended state and substate. */ +struct ethtool_c33_pse_ext_state_info { + enum ethtool_c33_pse_ext_state c33_pse_ext_state; + union { + enum ethtool_c33_pse_ext_substate_error_condition error_condition; + enum ethtool_c33_pse_ext_substate_mr_pse_enable mr_pse_enable; + enum ethtool_c33_pse_ext_substate_option_detect_ted option_detect_ted; + enum ethtool_c33_pse_ext_substate_option_vport_lim option_vport_lim; + enum ethtool_c33_pse_ext_substate_ovld_detected ovld_detected; + enum ethtool_c33_pse_ext_substate_power_not_available power_not_available; + enum ethtool_c33_pse_ext_substate_short_detected short_detected; + u32 __c33_pse_ext_substate; + }; +}; + +struct ethtool_c33_pse_pw_limit_range { + u32 min; + u32 max; +}; #endif /* _LINUX_ETHTOOL_H */ diff --git a/include/linux/filter.h b/include/linux/filter.h index 5669da513cd7..b6672ff61407 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -733,21 +733,128 @@ struct bpf_nh_params { }; }; +/* flags for bpf_redirect_info kern_flags */ +#define BPF_RI_F_RF_NO_DIRECT BIT(0) /* no napi_direct on return_frame */ +#define BPF_RI_F_RI_INIT BIT(1) +#define BPF_RI_F_CPU_MAP_INIT BIT(2) +#define BPF_RI_F_DEV_MAP_INIT BIT(3) +#define BPF_RI_F_XSK_MAP_INIT BIT(4) + struct bpf_redirect_info { u64 tgt_index; void *tgt_value; struct bpf_map *map; u32 flags; - u32 kern_flags; u32 map_id; enum bpf_map_type map_type; struct bpf_nh_params nh; + u32 kern_flags; }; -DECLARE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info); +struct bpf_net_context { + struct bpf_redirect_info ri; + struct list_head cpu_map_flush_list; + struct list_head dev_map_flush_list; + struct list_head xskmap_map_flush_list; +}; -/* flags for bpf_redirect_info kern_flags */ -#define BPF_RI_F_RF_NO_DIRECT BIT(0) /* no napi_direct on return_frame */ +static inline struct bpf_net_context *bpf_net_ctx_set(struct bpf_net_context *bpf_net_ctx) +{ + struct task_struct *tsk = current; + + if (tsk->bpf_net_context != NULL) + return NULL; + bpf_net_ctx->ri.kern_flags = 0; + + tsk->bpf_net_context = bpf_net_ctx; + return bpf_net_ctx; +} + +static inline void bpf_net_ctx_clear(struct bpf_net_context *bpf_net_ctx) +{ + if (bpf_net_ctx) + current->bpf_net_context = NULL; +} + +static inline struct bpf_net_context *bpf_net_ctx_get(void) +{ + return current->bpf_net_context; +} + +static inline struct bpf_redirect_info *bpf_net_ctx_get_ri(void) +{ + struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get(); + + if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_RI_INIT)) { + memset(&bpf_net_ctx->ri, 0, offsetof(struct bpf_net_context, ri.nh)); + bpf_net_ctx->ri.kern_flags |= BPF_RI_F_RI_INIT; + } + + return &bpf_net_ctx->ri; +} + +static inline struct list_head *bpf_net_ctx_get_cpu_map_flush_list(void) +{ + struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get(); + + if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_CPU_MAP_INIT)) { + INIT_LIST_HEAD(&bpf_net_ctx->cpu_map_flush_list); + bpf_net_ctx->ri.kern_flags |= BPF_RI_F_CPU_MAP_INIT; + } + + return &bpf_net_ctx->cpu_map_flush_list; +} + +static inline struct list_head *bpf_net_ctx_get_dev_flush_list(void) +{ + struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get(); + + if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_DEV_MAP_INIT)) { + INIT_LIST_HEAD(&bpf_net_ctx->dev_map_flush_list); + bpf_net_ctx->ri.kern_flags |= BPF_RI_F_DEV_MAP_INIT; + } + + return &bpf_net_ctx->dev_map_flush_list; +} + +static inline struct list_head *bpf_net_ctx_get_xskmap_flush_list(void) +{ + struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get(); + + if (!(bpf_net_ctx->ri.kern_flags & BPF_RI_F_XSK_MAP_INIT)) { + INIT_LIST_HEAD(&bpf_net_ctx->xskmap_map_flush_list); + bpf_net_ctx->ri.kern_flags |= BPF_RI_F_XSK_MAP_INIT; + } + + return &bpf_net_ctx->xskmap_map_flush_list; +} + +static inline void bpf_net_ctx_get_all_used_flush_lists(struct list_head **lh_map, + struct list_head **lh_dev, + struct list_head **lh_xsk) +{ + struct bpf_net_context *bpf_net_ctx = bpf_net_ctx_get(); + u32 kern_flags = bpf_net_ctx->ri.kern_flags; + struct list_head *lh; + + *lh_map = *lh_dev = *lh_xsk = NULL; + + if (!IS_ENABLED(CONFIG_BPF_SYSCALL)) + return; + + lh = &bpf_net_ctx->dev_map_flush_list; + if (kern_flags & BPF_RI_F_DEV_MAP_INIT && !list_empty(lh)) + *lh_dev = lh; + + lh = &bpf_net_ctx->cpu_map_flush_list; + if (kern_flags & BPF_RI_F_CPU_MAP_INIT && !list_empty(lh)) + *lh_map = lh; + + lh = &bpf_net_ctx->xskmap_map_flush_list; + if (IS_ENABLED(CONFIG_XDP_SOCKETS) && + kern_flags & BPF_RI_F_XSK_MAP_INIT && !list_empty(lh)) + *lh_xsk = lh; +} /* Compute the linear packet data range [data, data_end) which * will be accessed by various program types (cls_bpf, act_bpf, @@ -1018,25 +1125,23 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, const struct bpf_insn *patch, u32 len); int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt); -void bpf_clear_redirect_map(struct bpf_map *map); - static inline bool xdp_return_frame_no_direct(void) { - struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); + struct bpf_redirect_info *ri = bpf_net_ctx_get_ri(); return ri->kern_flags & BPF_RI_F_RF_NO_DIRECT; } static inline void xdp_set_return_frame_no_direct(void) { - struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); + struct bpf_redirect_info *ri = bpf_net_ctx_get_ri(); ri->kern_flags |= BPF_RI_F_RF_NO_DIRECT; } static inline void xdp_clear_return_frame_no_direct(void) { - struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); + struct bpf_redirect_info *ri = bpf_net_ctx_get_ri(); ri->kern_flags &= ~BPF_RI_F_RF_NO_DIRECT; } @@ -1129,8 +1234,7 @@ bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **ro_image, struct bpf_binary_header **rw_hdr, u8 **rw_image, bpf_jit_fill_hole_t bpf_fill_ill_insns); -int bpf_jit_binary_pack_finalize(struct bpf_prog *prog, - struct bpf_binary_header *ro_header, +int bpf_jit_binary_pack_finalize(struct bpf_binary_header *ro_header, struct bpf_binary_header *rw_header); void bpf_jit_binary_pack_free(struct bpf_binary_header *ro_header, struct bpf_binary_header *rw_header); @@ -1406,7 +1510,7 @@ struct bpf_sock_ops_kern { struct bpf_sysctl_kern { struct ctl_table_header *head; - struct ctl_table *table; + const struct ctl_table *table; void *cur_val; size_t cur_len; void *new_val; @@ -1592,7 +1696,7 @@ static __always_inline long __bpf_xdp_redirect_map(struct bpf_map *map, u64 inde u64 flags, const u64 flag_mask, void *lookup_elem(struct bpf_map *map, u32 key)) { - struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); + struct bpf_redirect_info *ri = bpf_net_ctx_get_ri(); const u64 action_mask = XDP_ABORTED | XDP_DROP | XDP_PASS | XDP_TX; /* Lower bits of the flags are used as return code on lookup failure */ diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index de2dce743ee2..30cef3b940eb 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h @@ -373,6 +373,7 @@ struct ieee80211_trigger { /** * ieee80211_has_tods - check if IEEE80211_FCTL_TODS is set * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame has to-DS set */ static inline bool ieee80211_has_tods(__le16 fc) { @@ -382,6 +383,7 @@ static inline bool ieee80211_has_tods(__le16 fc) /** * ieee80211_has_fromds - check if IEEE80211_FCTL_FROMDS is set * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame has from-DS set */ static inline bool ieee80211_has_fromds(__le16 fc) { @@ -391,6 +393,7 @@ static inline bool ieee80211_has_fromds(__le16 fc) /** * ieee80211_has_a4 - check if IEEE80211_FCTL_TODS and IEEE80211_FCTL_FROMDS are set * @fc: frame control bytes in little-endian byteorder + * Return: whether or not it's a 4-address frame (from-DS and to-DS set) */ static inline bool ieee80211_has_a4(__le16 fc) { @@ -401,6 +404,7 @@ static inline bool ieee80211_has_a4(__le16 fc) /** * ieee80211_has_morefrags - check if IEEE80211_FCTL_MOREFRAGS is set * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame has more fragments (more frags bit set) */ static inline bool ieee80211_has_morefrags(__le16 fc) { @@ -410,6 +414,7 @@ static inline bool ieee80211_has_morefrags(__le16 fc) /** * ieee80211_has_retry - check if IEEE80211_FCTL_RETRY is set * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the retry flag is set */ static inline bool ieee80211_has_retry(__le16 fc) { @@ -419,6 +424,7 @@ static inline bool ieee80211_has_retry(__le16 fc) /** * ieee80211_has_pm - check if IEEE80211_FCTL_PM is set * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the power management flag is set */ static inline bool ieee80211_has_pm(__le16 fc) { @@ -428,6 +434,7 @@ static inline bool ieee80211_has_pm(__le16 fc) /** * ieee80211_has_moredata - check if IEEE80211_FCTL_MOREDATA is set * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the more data flag is set */ static inline bool ieee80211_has_moredata(__le16 fc) { @@ -437,6 +444,7 @@ static inline bool ieee80211_has_moredata(__le16 fc) /** * ieee80211_has_protected - check if IEEE80211_FCTL_PROTECTED is set * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the protected flag is set */ static inline bool ieee80211_has_protected(__le16 fc) { @@ -446,6 +454,7 @@ static inline bool ieee80211_has_protected(__le16 fc) /** * ieee80211_has_order - check if IEEE80211_FCTL_ORDER is set * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the order flag is set */ static inline bool ieee80211_has_order(__le16 fc) { @@ -455,6 +464,7 @@ static inline bool ieee80211_has_order(__le16 fc) /** * ieee80211_is_mgmt - check if type is IEEE80211_FTYPE_MGMT * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame type is management */ static inline bool ieee80211_is_mgmt(__le16 fc) { @@ -465,6 +475,7 @@ static inline bool ieee80211_is_mgmt(__le16 fc) /** * ieee80211_is_ctl - check if type is IEEE80211_FTYPE_CTL * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame type is control */ static inline bool ieee80211_is_ctl(__le16 fc) { @@ -475,6 +486,7 @@ static inline bool ieee80211_is_ctl(__le16 fc) /** * ieee80211_is_data - check if type is IEEE80211_FTYPE_DATA * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is a data frame */ static inline bool ieee80211_is_data(__le16 fc) { @@ -485,6 +497,7 @@ static inline bool ieee80211_is_data(__le16 fc) /** * ieee80211_is_ext - check if type is IEEE80211_FTYPE_EXT * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame type is extended */ static inline bool ieee80211_is_ext(__le16 fc) { @@ -496,6 +509,7 @@ static inline bool ieee80211_is_ext(__le16 fc) /** * ieee80211_is_data_qos - check if type is IEEE80211_FTYPE_DATA and IEEE80211_STYPE_QOS_DATA is set * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is a QoS data frame */ static inline bool ieee80211_is_data_qos(__le16 fc) { @@ -510,6 +524,8 @@ static inline bool ieee80211_is_data_qos(__le16 fc) /** * ieee80211_is_data_present - check if type is IEEE80211_FTYPE_DATA and has data * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is a QoS data frame that has data + * (i.e. is not null data) */ static inline bool ieee80211_is_data_present(__le16 fc) { @@ -524,6 +540,7 @@ static inline bool ieee80211_is_data_present(__le16 fc) /** * ieee80211_is_assoc_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ASSOC_REQ * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is an association request */ static inline bool ieee80211_is_assoc_req(__le16 fc) { @@ -534,6 +551,7 @@ static inline bool ieee80211_is_assoc_req(__le16 fc) /** * ieee80211_is_assoc_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ASSOC_RESP * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is an association response */ static inline bool ieee80211_is_assoc_resp(__le16 fc) { @@ -544,6 +562,7 @@ static inline bool ieee80211_is_assoc_resp(__le16 fc) /** * ieee80211_is_reassoc_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_REASSOC_REQ * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is a reassociation request */ static inline bool ieee80211_is_reassoc_req(__le16 fc) { @@ -554,6 +573,7 @@ static inline bool ieee80211_is_reassoc_req(__le16 fc) /** * ieee80211_is_reassoc_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_REASSOC_RESP * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is a reassociation response */ static inline bool ieee80211_is_reassoc_resp(__le16 fc) { @@ -564,6 +584,7 @@ static inline bool ieee80211_is_reassoc_resp(__le16 fc) /** * ieee80211_is_probe_req - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_PROBE_REQ * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is a probe request */ static inline bool ieee80211_is_probe_req(__le16 fc) { @@ -574,6 +595,7 @@ static inline bool ieee80211_is_probe_req(__le16 fc) /** * ieee80211_is_probe_resp - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_PROBE_RESP * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is a probe response */ static inline bool ieee80211_is_probe_resp(__le16 fc) { @@ -584,6 +606,7 @@ static inline bool ieee80211_is_probe_resp(__le16 fc) /** * ieee80211_is_beacon - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_BEACON * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is a (regular, not S1G) beacon */ static inline bool ieee80211_is_beacon(__le16 fc) { @@ -595,6 +618,7 @@ static inline bool ieee80211_is_beacon(__le16 fc) * ieee80211_is_s1g_beacon - check if IEEE80211_FTYPE_EXT && * IEEE80211_STYPE_S1G_BEACON * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is an S1G beacon */ static inline bool ieee80211_is_s1g_beacon(__le16 fc) { @@ -604,30 +628,21 @@ static inline bool ieee80211_is_s1g_beacon(__le16 fc) } /** - * ieee80211_next_tbtt_present - check if IEEE80211_FTYPE_EXT && - * IEEE80211_STYPE_S1G_BEACON && IEEE80211_S1G_BCN_NEXT_TBTT - * @fc: frame control bytes in little-endian byteorder - */ -static inline bool ieee80211_next_tbtt_present(__le16 fc) -{ - return (fc & cpu_to_le16(IEEE80211_FCTL_FTYPE | IEEE80211_FCTL_STYPE)) == - cpu_to_le16(IEEE80211_FTYPE_EXT | IEEE80211_STYPE_S1G_BEACON) && - fc & cpu_to_le16(IEEE80211_S1G_BCN_NEXT_TBTT); -} - -/** - * ieee80211_is_s1g_short_beacon - check if next tbtt present bit is set. Only - * true for S1G beacons when they're short. + * ieee80211_is_s1g_short_beacon - check if frame is an S1G short beacon * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is an S1G short beacon, + * i.e. it is an S1G beacon with 'next TBTT' flag set */ static inline bool ieee80211_is_s1g_short_beacon(__le16 fc) { - return ieee80211_is_s1g_beacon(fc) && ieee80211_next_tbtt_present(fc); + return ieee80211_is_s1g_beacon(fc) && + (fc & cpu_to_le16(IEEE80211_S1G_BCN_NEXT_TBTT)); } /** * ieee80211_is_atim - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ATIM * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is an ATIM frame */ static inline bool ieee80211_is_atim(__le16 fc) { @@ -638,6 +653,7 @@ static inline bool ieee80211_is_atim(__le16 fc) /** * ieee80211_is_disassoc - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_DISASSOC * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is a disassociation frame */ static inline bool ieee80211_is_disassoc(__le16 fc) { @@ -648,6 +664,7 @@ static inline bool ieee80211_is_disassoc(__le16 fc) /** * ieee80211_is_auth - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_AUTH * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is an authentication frame */ static inline bool ieee80211_is_auth(__le16 fc) { @@ -658,6 +675,7 @@ static inline bool ieee80211_is_auth(__le16 fc) /** * ieee80211_is_deauth - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_DEAUTH * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is a deauthentication frame */ static inline bool ieee80211_is_deauth(__le16 fc) { @@ -668,6 +686,7 @@ static inline bool ieee80211_is_deauth(__le16 fc) /** * ieee80211_is_action - check if IEEE80211_FTYPE_MGMT && IEEE80211_STYPE_ACTION * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is an action frame */ static inline bool ieee80211_is_action(__le16 fc) { @@ -678,6 +697,7 @@ static inline bool ieee80211_is_action(__le16 fc) /** * ieee80211_is_back_req - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_BACK_REQ * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is a block-ACK request frame */ static inline bool ieee80211_is_back_req(__le16 fc) { @@ -688,6 +708,7 @@ static inline bool ieee80211_is_back_req(__le16 fc) /** * ieee80211_is_back - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_BACK * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is a block-ACK frame */ static inline bool ieee80211_is_back(__le16 fc) { @@ -698,6 +719,7 @@ static inline bool ieee80211_is_back(__le16 fc) /** * ieee80211_is_pspoll - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_PSPOLL * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is a PS-poll frame */ static inline bool ieee80211_is_pspoll(__le16 fc) { @@ -708,6 +730,7 @@ static inline bool ieee80211_is_pspoll(__le16 fc) /** * ieee80211_is_rts - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_RTS * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is an RTS frame */ static inline bool ieee80211_is_rts(__le16 fc) { @@ -718,6 +741,7 @@ static inline bool ieee80211_is_rts(__le16 fc) /** * ieee80211_is_cts - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CTS * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is a CTS frame */ static inline bool ieee80211_is_cts(__le16 fc) { @@ -728,6 +752,7 @@ static inline bool ieee80211_is_cts(__le16 fc) /** * ieee80211_is_ack - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_ACK * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is an ACK frame */ static inline bool ieee80211_is_ack(__le16 fc) { @@ -738,6 +763,7 @@ static inline bool ieee80211_is_ack(__le16 fc) /** * ieee80211_is_cfend - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CFEND * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is a CF-end frame */ static inline bool ieee80211_is_cfend(__le16 fc) { @@ -748,6 +774,7 @@ static inline bool ieee80211_is_cfend(__le16 fc) /** * ieee80211_is_cfendack - check if IEEE80211_FTYPE_CTL && IEEE80211_STYPE_CFENDACK * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is a CF-end-ack frame */ static inline bool ieee80211_is_cfendack(__le16 fc) { @@ -758,6 +785,7 @@ static inline bool ieee80211_is_cfendack(__le16 fc) /** * ieee80211_is_nullfunc - check if frame is a regular (non-QoS) nullfunc frame * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is a nullfunc frame */ static inline bool ieee80211_is_nullfunc(__le16 fc) { @@ -768,6 +796,7 @@ static inline bool ieee80211_is_nullfunc(__le16 fc) /** * ieee80211_is_qos_nullfunc - check if frame is a QoS nullfunc frame * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is a QoS nullfunc frame */ static inline bool ieee80211_is_qos_nullfunc(__le16 fc) { @@ -778,6 +807,7 @@ static inline bool ieee80211_is_qos_nullfunc(__le16 fc) /** * ieee80211_is_trigger - check if frame is trigger frame * @fc: frame control field in little-endian byteorder + * Return: whether or not the frame is a trigger frame */ static inline bool ieee80211_is_trigger(__le16 fc) { @@ -788,6 +818,7 @@ static inline bool ieee80211_is_trigger(__le16 fc) /** * ieee80211_is_any_nullfunc - check if frame is regular or QoS nullfunc frame * @fc: frame control bytes in little-endian byteorder + * Return: whether or not the frame is a nullfunc or QoS nullfunc frame */ static inline bool ieee80211_is_any_nullfunc(__le16 fc) { @@ -797,6 +828,8 @@ static inline bool ieee80211_is_any_nullfunc(__le16 fc) /** * ieee80211_is_first_frag - check if IEEE80211_SCTL_FRAG is not set * @seq_ctrl: frame sequence control bytes in little-endian byteorder + * Return: whether or not the frame is the first fragment (also true if + * it's not fragmented at all) */ static inline bool ieee80211_is_first_frag(__le16 seq_ctrl) { @@ -806,6 +839,7 @@ static inline bool ieee80211_is_first_frag(__le16 seq_ctrl) /** * ieee80211_is_frag - check if a frame is a fragment * @hdr: 802.11 header of the frame + * Return: whether or not the frame is a fragment */ static inline bool ieee80211_is_frag(struct ieee80211_hdr *hdr) { @@ -1101,7 +1135,7 @@ enum ieee80211_vht_opmode_bits { }; /** - * enum ieee80211_s1g_chanwidth + * enum ieee80211_s1g_chanwidth - S1G channel widths * These are defined in IEEE802.11-2016ah Table 10-20 * as BSS Channel Width * @@ -2359,6 +2393,8 @@ struct ieee80211_eht_operation_info { * @max_vht_nss: current maximum NSS as advertised by the STA in * operating mode notification, can be 0 in which case the * capability data will be used to derive this (from MCS support) + * Return: The maximum NSS that can be used for the given bandwidth/MCS + * combination * * Due to the VHT Extended NSS Bandwidth Support, the maximum NSS can * vary for a given BW/MCS. This function parses the data. @@ -2370,44 +2406,6 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap, int mcs, bool ext_nss_bw_capable, unsigned int max_vht_nss); -/** - * enum ieee80211_ap_reg_power - regulatory power for a Access Point - * - * @IEEE80211_REG_UNSET_AP: Access Point has no regulatory power mode - * @IEEE80211_REG_LPI_AP: Indoor Access Point - * @IEEE80211_REG_SP_AP: Standard power Access Point - * @IEEE80211_REG_VLP_AP: Very low power Access Point - * @IEEE80211_REG_AP_POWER_AFTER_LAST: internal - * @IEEE80211_REG_AP_POWER_MAX: maximum value - */ -enum ieee80211_ap_reg_power { - IEEE80211_REG_UNSET_AP, - IEEE80211_REG_LPI_AP, - IEEE80211_REG_SP_AP, - IEEE80211_REG_VLP_AP, - IEEE80211_REG_AP_POWER_AFTER_LAST, - IEEE80211_REG_AP_POWER_MAX = - IEEE80211_REG_AP_POWER_AFTER_LAST - 1, -}; - -/** - * enum ieee80211_client_reg_power - regulatory power for a client - * - * @IEEE80211_REG_UNSET_CLIENT: Client has no regulatory power mode - * @IEEE80211_REG_DEFAULT_CLIENT: Default Client - * @IEEE80211_REG_SUBORDINATE_CLIENT: Subordinate Client - * @IEEE80211_REG_CLIENT_POWER_AFTER_LAST: internal - * @IEEE80211_REG_CLIENT_POWER_MAX: maximum value - */ -enum ieee80211_client_reg_power { - IEEE80211_REG_UNSET_CLIENT, - IEEE80211_REG_DEFAULT_CLIENT, - IEEE80211_REG_SUBORDINATE_CLIENT, - IEEE80211_REG_CLIENT_POWER_AFTER_LAST, - IEEE80211_REG_CLIENT_POWER_MAX = - IEEE80211_REG_CLIENT_POWER_AFTER_LAST - 1, -}; - /* 802.11ax HE MAC capabilities */ #define IEEE80211_HE_MAC_CAP0_HTC_HE 0x01 #define IEEE80211_HE_MAC_CAP0_TWT_REQ 0x02 @@ -2789,22 +2787,6 @@ struct ieee80211_he_6ghz_oper { u8 minrate; } __packed; -/* - * In "9.4.2.161 Transmit Power Envelope element" of "IEEE Std 802.11ax-2021", - * it show four types in "Table 9-275a-Maximum Transmit Power Interpretation - * subfield encoding", and two category for each type in "Table E-12-Regulatory - * Info subfield encoding in the United States". - * So it it totally max 8 Transmit Power Envelope element. - */ -#define IEEE80211_TPE_MAX_IE_COUNT 8 -/* - * In "Table 9-277—Meaning of Maximum Transmit Power Count subfield" - * of "IEEE Std 802.11ax™‐2021", the max power level is 8. - */ -#define IEEE80211_MAX_NUM_PWR_LEVEL 8 - -#define IEEE80211_TPE_MAX_POWER_COUNT 8 - /* transmit power interpretation type of transmit power envelope element */ enum ieee80211_tx_power_intrpt_type { IEEE80211_TPE_LOCAL_EIRP, @@ -2813,24 +2795,107 @@ enum ieee80211_tx_power_intrpt_type { IEEE80211_TPE_REG_CLIENT_EIRP_PSD, }; +/* category type of transmit power envelope element */ +enum ieee80211_tx_power_category_6ghz { + IEEE80211_TPE_CAT_6GHZ_DEFAULT = 0, + IEEE80211_TPE_CAT_6GHZ_SUBORDINATE = 1, +}; + +/* + * For IEEE80211_TPE_LOCAL_EIRP / IEEE80211_TPE_REG_CLIENT_EIRP, + * setting to 63.5 dBm means no constraint. + */ +#define IEEE80211_TPE_MAX_TX_PWR_NO_CONSTRAINT 127 + +/* + * For IEEE80211_TPE_LOCAL_EIRP_PSD / IEEE80211_TPE_REG_CLIENT_EIRP_PSD, + * setting to 127 indicates no PSD limit for the 20 MHz channel. + */ +#define IEEE80211_TPE_PSD_NO_LIMIT 127 + /** * struct ieee80211_tx_pwr_env - Transmit Power Envelope - * @tx_power_info: Transmit Power Information field - * @tx_power: Maximum Transmit Power field + * @info: Transmit Power Information field + * @variable: Maximum Transmit Power field * * This structure represents the payload of the "Transmit Power * Envelope element" as described in IEEE Std 802.11ax-2021 section * 9.4.2.161 */ struct ieee80211_tx_pwr_env { - u8 tx_power_info; - s8 tx_power[IEEE80211_TPE_MAX_POWER_COUNT]; + u8 info; + u8 variable[]; } __packed; #define IEEE80211_TX_PWR_ENV_INFO_COUNT 0x7 #define IEEE80211_TX_PWR_ENV_INFO_INTERPRET 0x38 #define IEEE80211_TX_PWR_ENV_INFO_CATEGORY 0xC0 +#define IEEE80211_TX_PWR_ENV_EXT_COUNT 0xF + +static inline bool ieee80211_valid_tpe_element(const u8 *data, u8 len) +{ + const struct ieee80211_tx_pwr_env *env = (const void *)data; + u8 count, interpret, category; + u8 needed = sizeof(*env); + u8 N; /* also called N in the spec */ + + if (len < needed) + return false; + + count = u8_get_bits(env->info, IEEE80211_TX_PWR_ENV_INFO_COUNT); + interpret = u8_get_bits(env->info, IEEE80211_TX_PWR_ENV_INFO_INTERPRET); + category = u8_get_bits(env->info, IEEE80211_TX_PWR_ENV_INFO_CATEGORY); + + switch (category) { + case IEEE80211_TPE_CAT_6GHZ_DEFAULT: + case IEEE80211_TPE_CAT_6GHZ_SUBORDINATE: + break; + default: + return false; + } + + switch (interpret) { + case IEEE80211_TPE_LOCAL_EIRP: + case IEEE80211_TPE_REG_CLIENT_EIRP: + if (count > 3) + return false; + + /* count == 0 encodes 1 value for 20 MHz, etc. */ + needed += count + 1; + + if (len < needed) + return false; + + /* there can be extension fields not accounted for in 'count' */ + + return true; + case IEEE80211_TPE_LOCAL_EIRP_PSD: + case IEEE80211_TPE_REG_CLIENT_EIRP_PSD: + if (count > 4) + return false; + + N = count ? 1 << (count - 1) : 1; + needed += N; + + if (len < needed) + return false; + + if (len > needed) { + u8 K = u8_get_bits(env->variable[N], + IEEE80211_TX_PWR_ENV_EXT_COUNT); + + needed += 1 + K; + if (len < needed) + return false; + } + + return true; + } + + return false; +} + /* * ieee80211_he_oper_size - calculate 802.11ax HE Operations IE size * @he_oper_ie: byte data of the He Operations IE, stating from the byte @@ -4145,7 +4210,7 @@ enum ieee80211_idle_options { }; /** - * struct ieee80211_bss_max_idle_period_ie + * struct ieee80211_bss_max_idle_period_ie - BSS max idle period element struct * * This structure refers to "BSS Max idle period element" * @@ -4180,7 +4245,7 @@ enum ieee80211_sa_query_action { }; /** - * struct ieee80211_bssid_index + * struct ieee80211_bssid_index - multiple BSSID index element structure * * This structure refers to "Multiple BSSID-index element" * @@ -4195,7 +4260,8 @@ struct ieee80211_bssid_index { }; /** - * struct ieee80211_multiple_bssid_configuration + * struct ieee80211_multiple_bssid_configuration - multiple BSSID configuration + * element structure * * This structure refers to "Multiple BSSID Configuration element" * @@ -4326,6 +4392,7 @@ struct ieee80211_he_6ghz_capa { /** * ieee80211_get_qos_ctl - get pointer to qos control bytes * @hdr: the frame + * Return: a pointer to the QoS control field in the frame header * * The qos ctrl bytes come after the frame_control, duration, seq_num * and 3 or 4 addresses of length ETH_ALEN. Checks frame_control to choose @@ -4348,6 +4415,7 @@ static inline u8 *ieee80211_get_qos_ctl(struct ieee80211_hdr *hdr) /** * ieee80211_get_tid - get qos TID * @hdr: the frame + * Return: the TID from the QoS control field */ static inline u8 ieee80211_get_tid(struct ieee80211_hdr *hdr) { @@ -4359,6 +4427,7 @@ static inline u8 ieee80211_get_tid(struct ieee80211_hdr *hdr) /** * ieee80211_get_SA - get pointer to SA * @hdr: the frame + * Return: a pointer to the source address (SA) * * Given an 802.11 frame, this function returns the offset * to the source address (SA). It does not verify that the @@ -4378,6 +4447,7 @@ static inline u8 *ieee80211_get_SA(struct ieee80211_hdr *hdr) /** * ieee80211_get_DA - get pointer to DA * @hdr: the frame + * Return: a pointer to the destination address (DA) * * Given an 802.11 frame, this function returns the offset * to the destination address (DA). It does not verify that @@ -4396,6 +4466,7 @@ static inline u8 *ieee80211_get_DA(struct ieee80211_hdr *hdr) /** * ieee80211_is_bufferable_mmpdu - check if frame is bufferable MMPDU * @skb: the skb to check, starting with the 802.11 header + * Return: whether or not the MMPDU is bufferable */ static inline bool ieee80211_is_bufferable_mmpdu(struct sk_buff *skb) { @@ -4434,6 +4505,7 @@ static inline bool ieee80211_is_bufferable_mmpdu(struct sk_buff *skb) /** * _ieee80211_is_robust_mgmt_frame - check if frame is a robust management frame * @hdr: the frame (buffer must include at least the first octet of payload) + * Return: whether or not the frame is a robust management frame */ static inline bool _ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr) { @@ -4470,6 +4542,7 @@ static inline bool _ieee80211_is_robust_mgmt_frame(struct ieee80211_hdr *hdr) /** * ieee80211_is_robust_mgmt_frame - check if skb contains a robust mgmt frame * @skb: the skb containing the frame, length will be checked + * Return: whether or not the frame is a robust management frame */ static inline bool ieee80211_is_robust_mgmt_frame(struct sk_buff *skb) { @@ -4482,6 +4555,7 @@ static inline bool ieee80211_is_robust_mgmt_frame(struct sk_buff *skb) * ieee80211_is_public_action - check if frame is a public action frame * @hdr: the frame * @len: length of the frame + * Return: whether or not the frame is a public action frame */ static inline bool ieee80211_is_public_action(struct ieee80211_hdr *hdr, size_t len) @@ -4527,8 +4601,9 @@ ieee80211_is_protected_dual_of_public_action(struct sk_buff *skb) /** * _ieee80211_is_group_privacy_action - check if frame is a group addressed - * privacy action frame + * privacy action frame * @hdr: the frame + * Return: whether or not the frame is a group addressed privacy action frame */ static inline bool _ieee80211_is_group_privacy_action(struct ieee80211_hdr *hdr) { @@ -4544,8 +4619,9 @@ static inline bool _ieee80211_is_group_privacy_action(struct ieee80211_hdr *hdr) /** * ieee80211_is_group_privacy_action - check if frame is a group addressed - * privacy action frame + * privacy action frame * @skb: the skb containing the frame, length will be checked + * Return: whether or not the frame is a group addressed privacy action frame */ static inline bool ieee80211_is_group_privacy_action(struct sk_buff *skb) { @@ -4557,6 +4633,7 @@ static inline bool ieee80211_is_group_privacy_action(struct sk_buff *skb) /** * ieee80211_tu_to_usec - convert time units (TU) to microseconds * @tu: the TUs + * Return: the time value converted to microseconds */ static inline unsigned long ieee80211_tu_to_usec(unsigned long tu) { @@ -4568,6 +4645,7 @@ static inline unsigned long ieee80211_tu_to_usec(unsigned long tu) * @tim: the TIM IE * @tim_len: length of the TIM IE * @aid: the AID to look for + * Return: whether or not traffic is indicated in the TIM for the given AID */ static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim, u8 tim_len, u16 aid) @@ -4594,8 +4672,10 @@ static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim, } /** - * ieee80211_get_tdls_action - get tdls packet action (or -1, if not tdls packet) + * ieee80211_get_tdls_action - get TDLS action code * @skb: the skb containing the frame, length will not be checked + * Return: the TDLS action code, or -1 if it's not an encapsulated TDLS action + * frame * * This function assumes the frame is a data frame, and that the network header * is in the correct place. @@ -4635,6 +4715,7 @@ static inline int ieee80211_get_tdls_action(struct sk_buff *skb) /** * ieee80211_action_contains_tpc - checks if the frame contains TPC element * @skb: the skb containing the frame, length will be checked + * Return: %true if the frame contains a TPC element, %false otherwise * * This function checks if it's either TPC report action frame or Link * Measurement report action frame as defined in IEEE Std. 802.11-2012 8.5.2.5 @@ -4679,6 +4760,11 @@ static inline bool ieee80211_action_contains_tpc(struct sk_buff *skb) return true; } +/** + * ieee80211_is_timing_measurement - check if frame is timing measurement response + * @skb: the SKB to check + * Return: whether or not the frame is a valid timing measurement response + */ static inline bool ieee80211_is_timing_measurement(struct sk_buff *skb) { struct ieee80211_mgmt *mgmt = (void *)skb->data; @@ -4698,6 +4784,11 @@ static inline bool ieee80211_is_timing_measurement(struct sk_buff *skb) return false; } +/** + * ieee80211_is_ftm - check if frame is FTM response + * @skb: the SKB to check + * Return: whether or not the frame is a valid FTM response action frame + */ static inline bool ieee80211_is_ftm(struct sk_buff *skb) { struct ieee80211_mgmt *mgmt = (void *)skb->data; @@ -4752,6 +4843,7 @@ struct element { * @element: element pointer after for_each_element() or friends * @data: same data pointer as passed to for_each_element() or friends * @datalen: same data length as passed to for_each_element() or friends + * Return: %true if all elements were iterated, %false otherwise; see notes * * This function returns %true if all the data was parsed or considered * while walking the elements. Only use this if your for_each_element() @@ -4955,6 +5047,7 @@ struct ieee80211_mle_tdls_common_info { * ieee80211_mle_common_size - check multi-link element common size * @data: multi-link element, must already be checked for size using * ieee80211_mle_size_ok() + * Return: the size of the multi-link element's "common" subfield */ static inline u8 ieee80211_mle_common_size(const u8 *data) { @@ -4987,11 +5080,10 @@ static inline u8 ieee80211_mle_common_size(const u8 *data) /** * ieee80211_mle_get_link_id - returns the link ID * @data: the basic multi link element + * Return: the link ID, or -1 if not present * * The element is assumed to be of the correct type (BASIC) and big enough, * this must be checked using ieee80211_mle_type_ok(). - * - * If the BSS link ID can't be found, -1 will be returned */ static inline int ieee80211_mle_get_link_id(const u8 *data) { @@ -5011,12 +5103,10 @@ static inline int ieee80211_mle_get_link_id(const u8 *data) /** * ieee80211_mle_get_bss_param_ch_cnt - returns the BSS parameter change count * @data: pointer to the basic multi link element + * Return: the BSS Parameter Change Count field value, or -1 if not present * * The element is assumed to be of the correct type (BASIC) and big enough, * this must be checked using ieee80211_mle_type_ok(). - * - * If the BSS parameter change count value can't be found (the presence bit - * for it is clear), -1 will be returned. */ static inline int ieee80211_mle_get_bss_param_ch_cnt(const u8 *data) @@ -5039,13 +5129,13 @@ ieee80211_mle_get_bss_param_ch_cnt(const u8 *data) /** * ieee80211_mle_get_eml_med_sync_delay - returns the medium sync delay - * @data: pointer to the multi link EHT IE + * @data: pointer to the multi-link element + * Return: the medium synchronization delay field value from the multi-link + * element, or the default value (%IEEE80211_MED_SYNC_DELAY_DEFAULT) + * if not present * * The element is assumed to be of the correct type (BASIC) and big enough, * this must be checked using ieee80211_mle_type_ok(). - * - * If the medium synchronization is not present, then the default value is - * returned. */ static inline u16 ieee80211_mle_get_eml_med_sync_delay(const u8 *data) { @@ -5069,12 +5159,12 @@ static inline u16 ieee80211_mle_get_eml_med_sync_delay(const u8 *data) /** * ieee80211_mle_get_eml_cap - returns the EML capability - * @data: pointer to the multi link EHT IE + * @data: pointer to the multi-link element + * Return: the EML capability field value from the multi-link element, + * or 0 if not present * * The element is assumed to be of the correct type (BASIC) and big enough, * this must be checked using ieee80211_mle_type_ok(). - * - * If the EML capability is not present, 0 will be returned. */ static inline u16 ieee80211_mle_get_eml_cap(const u8 *data) { @@ -5100,13 +5190,12 @@ static inline u16 ieee80211_mle_get_eml_cap(const u8 *data) /** * ieee80211_mle_get_mld_capa_op - returns the MLD capabilities and operations. - * @data: pointer to the multi link EHT IE + * @data: pointer to the multi-link element + * Return: the MLD capabilities and operations field value from the multi-link + * element, or 0 if not present * * The element is assumed to be of the correct type (BASIC) and big enough, * this must be checked using ieee80211_mle_type_ok(). - * - * If the MLD capabilities and operations field is not present, 0 will be - * returned. */ static inline u16 ieee80211_mle_get_mld_capa_op(const u8 *data) { @@ -5137,12 +5226,11 @@ static inline u16 ieee80211_mle_get_mld_capa_op(const u8 *data) /** * ieee80211_mle_get_mld_id - returns the MLD ID - * @data: pointer to the multi link element + * @data: pointer to the multi-link element + * Return: The MLD ID in the given multi-link element, or 0 if not present * * The element is assumed to be of the correct type (BASIC) and big enough, * this must be checked using ieee80211_mle_type_ok(). - * - * If the MLD ID is not present, 0 will be returned. */ static inline u8 ieee80211_mle_get_mld_id(const u8 *data) { @@ -5177,6 +5265,7 @@ static inline u8 ieee80211_mle_get_mld_id(const u8 *data) * ieee80211_mle_size_ok - validate multi-link element size * @data: pointer to the element data * @len: length of the containing element + * Return: whether or not the multi-link element size is OK */ static inline bool ieee80211_mle_size_ok(const u8 *data, size_t len) { @@ -5246,6 +5335,7 @@ static inline bool ieee80211_mle_size_ok(const u8 *data, size_t len) * @data: pointer to the element data * @type: expected type of the element * @len: length of the containing element + * Return: whether or not the multi-link element type matches and size is OK */ static inline bool ieee80211_mle_type_ok(const u8 *data, u8 type, size_t len) { @@ -5289,6 +5379,7 @@ struct ieee80211_mle_per_sta_profile { * profile size * @data: pointer to the sub element data * @len: length of the containing sub element + * Return: %true if the STA profile is large enough, %false otherwise */ static inline bool ieee80211_mle_basic_sta_prof_size_ok(const u8 *data, size_t len) @@ -5373,6 +5464,7 @@ ieee80211_mle_basic_sta_prof_bss_param_ch_cnt(const struct ieee80211_mle_per_sta * element sta profile size. * @data: pointer to the sub element data * @len: length of the containing sub element + * Return: %true if the STA profile is large enough, %false otherwise */ static inline bool ieee80211_mle_reconf_sta_prof_size_ok(const u8 *data, size_t len) diff --git a/include/linux/local_lock.h b/include/linux/local_lock.h index e55010fa7329..091dc0b6bdfb 100644 --- a/include/linux/local_lock.h +++ b/include/linux/local_lock.h @@ -51,4 +51,25 @@ #define local_unlock_irqrestore(lock, flags) \ __local_unlock_irqrestore(lock, flags) +DEFINE_GUARD(local_lock, local_lock_t __percpu*, + local_lock(_T), + local_unlock(_T)) +DEFINE_GUARD(local_lock_irq, local_lock_t __percpu*, + local_lock_irq(_T), + local_unlock_irq(_T)) +DEFINE_LOCK_GUARD_1(local_lock_irqsave, local_lock_t __percpu, + local_lock_irqsave(_T->lock, _T->flags), + local_unlock_irqrestore(_T->lock, _T->flags), + unsigned long flags) + +#define local_lock_nested_bh(_lock) \ + __local_lock_nested_bh(_lock) + +#define local_unlock_nested_bh(_lock) \ + __local_unlock_nested_bh(_lock) + +DEFINE_GUARD(local_lock_nested_bh, local_lock_t __percpu*, + local_lock_nested_bh(_T), + local_unlock_nested_bh(_T)) + #endif diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h index 975e33b793a7..8dd71fbbb6d2 100644 --- a/include/linux/local_lock_internal.h +++ b/include/linux/local_lock_internal.h @@ -62,6 +62,17 @@ do { \ local_lock_debug_init(lock); \ } while (0) +#define __spinlock_nested_bh_init(lock) \ +do { \ + static struct lock_class_key __key; \ + \ + debug_check_no_locks_freed((void *)lock, sizeof(*lock));\ + lockdep_init_map_type(&(lock)->dep_map, #lock, &__key, \ + 0, LD_WAIT_CONFIG, LD_WAIT_INV, \ + LD_LOCK_NORMAL); \ + local_lock_debug_init(lock); \ +} while (0) + #define __local_lock(lock) \ do { \ preempt_disable(); \ @@ -98,6 +109,15 @@ do { \ local_irq_restore(flags); \ } while (0) +#define __local_lock_nested_bh(lock) \ + do { \ + lockdep_assert_in_softirq(); \ + local_lock_acquire(this_cpu_ptr(lock)); \ + } while (0) + +#define __local_unlock_nested_bh(lock) \ + local_lock_release(this_cpu_ptr(lock)) + #else /* !CONFIG_PREEMPT_RT */ /* @@ -138,4 +158,15 @@ typedef spinlock_t local_lock_t; #define __local_unlock_irqrestore(lock, flags) __local_unlock(lock) +#define __local_lock_nested_bh(lock) \ +do { \ + lockdep_assert_in_softirq_func(); \ + spin_lock(this_cpu_ptr(lock)); \ +} while (0) + +#define __local_unlock_nested_bh(lock) \ +do { \ + spin_unlock(this_cpu_ptr((lock))); \ +} while (0) + #endif /* CONFIG_PREEMPT_RT */ diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 08b0d1d9d78b..3f5a551579cc 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h @@ -600,6 +600,8 @@ do { \ (!in_softirq() || in_irq() || in_nmi())); \ } while (0) +extern void lockdep_assert_in_softirq_func(void); + #else # define might_lock(lock) do { } while (0) # define might_lock_read(lock) do { } while (0) @@ -613,6 +615,7 @@ do { \ # define lockdep_assert_preemption_enabled() do { } while (0) # define lockdep_assert_preemption_disabled() do { } while (0) # define lockdep_assert_in_softirq() do { } while (0) +# define lockdep_assert_in_softirq_func() do { } while (0) #endif #ifdef CONFIG_PROVE_RAW_LOCK_NESTING diff --git a/include/linux/math64.h b/include/linux/math64.h index d34def7f9a8c..6aaccc1626ab 100644 --- a/include/linux/math64.h +++ b/include/linux/math64.h @@ -297,6 +297,19 @@ u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div); #define DIV64_U64_ROUND_UP(ll, d) \ ({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); }) +/** + * DIV_U64_ROUND_UP - unsigned 64bit divide with 32bit divisor rounded up + * @ll: unsigned 64bit dividend + * @d: unsigned 32bit divisor + * + * Divide unsigned 64bit dividend by unsigned 32bit divisor + * and round up. + * + * Return: dividend / divisor rounded up + */ +#define DIV_U64_ROUND_UP(ll, d) \ + ({ u32 _tmp = (d); div_u64((ll) + _tmp - 1, _tmp); }) + /** * DIV64_U64_ROUND_CLOSEST - unsigned 64bit divide with 64bit divisor rounded to nearest integer * @dividend: unsigned 64bit dividend @@ -342,4 +355,19 @@ u64 mul_u64_u64_div_u64(u64 a, u64 mul, u64 div); div_s64((__x - (__d / 2)), __d); \ } \ ) + +/** + * roundup_u64 - Round up a 64bit value to the next specified 32bit multiple + * @x: the value to up + * @y: 32bit multiple to round up to + * + * Rounds @x to the next multiple of @y. For 32bit @x values, see roundup and + * the faster round_up() for powers of 2. + * + * Return: rounded up value. + */ +static inline u64 roundup_u64(u64 x, u32 y) +{ + return DIV_U64_ROUND_UP(x, y) * y; +} #endif /* _LINUX_MATH64_H */ diff --git a/include/linux/mii_timestamper.h b/include/linux/mii_timestamper.h index 26b04f73f214..995db62570f9 100644 --- a/include/linux/mii_timestamper.h +++ b/include/linux/mii_timestamper.h @@ -59,7 +59,7 @@ struct mii_timestamper { struct phy_device *phydev); int (*ts_info)(struct mii_timestamper *mii_ts, - struct ethtool_ts_info *ts_info); + struct kernel_ethtool_ts_info *ts_info); struct device *device; }; diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index d7bb31d9a446..da09bfaa7b81 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -294,6 +294,7 @@ enum { #define MLX5_UMR_FLEX_ALIGNMENT 0x40 #define MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_mtt)) #define MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_klm)) +#define MLX5_UMR_KSM_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_ksm)) #define MLX5_USER_INDEX_LEN (MLX5_FLD_SZ_BYTES(qpc, user_index) * 8) diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 779cfdf2e9d6..0d31f77396fc 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -766,6 +766,12 @@ struct mlx5_hca_cap { u32 max[MLX5_UN_SZ_DW(hca_cap_union)]; }; +enum mlx5_wc_state { + MLX5_WC_STATE_UNINITIALIZED, + MLX5_WC_STATE_UNSUPPORTED, + MLX5_WC_STATE_SUPPORTED, +}; + struct mlx5_core_dev { struct device *device; enum mlx5_coredev_type coredev_type; @@ -824,6 +830,9 @@ struct mlx5_core_dev { #endif u64 num_ipsec_offloads; struct mlx5_sd *sd; + enum mlx5_wc_state wc_state; + /* sync write combining state */ + struct mutex wc_state_lock; }; struct mlx5_db { @@ -1375,4 +1384,6 @@ static inline bool mlx5_is_macsec_roce_supported(struct mlx5_core_dev *mdev) enum { MLX5_OCTWORD = 16, }; + +bool mlx5_wc_support_get(struct mlx5_core_dev *mdev); #endif /* MLX5_DRIVER_H */ diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index d45bfb7cf81d..c176953edcf8 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1093,7 +1093,8 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits { u8 tunnel_stateless_ip_over_ip_tx[0x1]; u8 reserved_at_2e[0x2]; u8 max_vxlan_udp_ports[0x8]; - u8 reserved_at_38[0x6]; + u8 swp_csum_l4_partial[0x1]; + u8 reserved_at_39[0x5]; u8 max_geneve_opt_len[0x1]; u8 tunnel_stateless_geneve_rx[0x1]; @@ -1526,8 +1527,7 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 ts_cqe_to_dest_cqn[0x1]; u8 reserved_at_b3[0x6]; u8 go_back_n[0x1]; - u8 shampo[0x1]; - u8 reserved_at_bb[0x5]; + u8 reserved_at_ba[0x6]; u8 max_sgl_for_optimized_performance[0x8]; u8 log_max_cq_sz[0x8]; @@ -1744,7 +1744,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_280[0x10]; u8 max_wqe_sz_sq[0x10]; - u8 reserved_at_2a0[0x10]; + u8 reserved_at_2a0[0xb]; + u8 shampo[0x1]; + u8 reserved_at_2ac[0x4]; u8 max_wqe_sz_rq[0x10]; u8 max_flow_counter_31_16[0x10]; @@ -1992,7 +1994,9 @@ struct mlx5_ifc_cmd_hca_cap_2_bits { u8 migration_tracking_state[0x1]; u8 reserved_at_ca[0x6]; u8 migration_in_chunks[0x1]; - u8 reserved_at_d1[0xf]; + u8 reserved_at_d1[0x1]; + u8 sf_eq_usage[0x1]; + u8 reserved_at_d3[0xd]; u8 cross_vhca_object_to_object_supported[0x20]; @@ -2017,7 +2021,8 @@ struct mlx5_ifc_cmd_hca_cap_2_bits { u8 reserved_at_250[0x10]; u8 reserved_at_260[0x120]; - u8 reserved_at_380[0x10]; + u8 reserved_at_380[0xb]; + u8 min_mkey_log_entity_size_fixed_buffer[0x5]; u8 ec_vf_vport_base[0x10]; u8 reserved_at_3a0[0x10]; @@ -2029,7 +2034,11 @@ struct mlx5_ifc_cmd_hca_cap_2_bits { u8 pcc_ifa2[0x1]; u8 reserved_at_3f1[0xf]; - u8 reserved_at_400[0x40]; + u8 reserved_at_400[0x1]; + u8 min_mkey_log_entity_size_fixed_buffer_valid[0x1]; + u8 reserved_at_402[0x1e]; + + u8 reserved_at_420[0x20]; u8 reserved_at_440[0x8]; u8 max_num_eqs_24b[0x18]; @@ -3912,7 +3921,7 @@ enum { }; enum { - ELEMENT_TYPE_CAP_MASK_TASR = 1 << 0, + ELEMENT_TYPE_CAP_MASK_TSAR = 1 << 0, ELEMENT_TYPE_CAP_MASK_VPORT = 1 << 1, ELEMENT_TYPE_CAP_MASK_VPORT_TC = 1 << 2, ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC = 1 << 3, @@ -5633,7 +5642,11 @@ struct mlx5_ifc_query_q_counter_out_bits { u8 local_ack_timeout_err[0x20]; - u8 reserved_at_320[0xa0]; + u8 reserved_at_320[0x60]; + + u8 req_rnr_retries_exceeded[0x20]; + + u8 reserved_at_3a0[0x20]; u8 resp_local_length_error[0x20]; diff --git a/include/linux/module.h b/include/linux/module.h index 330ffb59efe5..4213d8993cd8 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -509,7 +509,9 @@ struct module { #endif #ifdef CONFIG_DEBUG_INFO_BTF_MODULES unsigned int btf_data_size; + unsigned int btf_base_data_size; void *btf_data; + void *btf_base_data; #endif #ifdef CONFIG_JUMP_LABEL struct jump_entry *jump_entries; diff --git a/include/linux/net_tstamp.h b/include/linux/net_tstamp.h index eb01c37e71e0..662074b08c94 100644 --- a/include/linux/net_tstamp.h +++ b/include/linux/net_tstamp.h @@ -5,7 +5,16 @@ #include +#define SOF_TIMESTAMPING_SOFTWARE_MASK (SOF_TIMESTAMPING_RX_SOFTWARE | \ + SOF_TIMESTAMPING_TX_SOFTWARE | \ + SOF_TIMESTAMPING_SOFTWARE) + +#define SOF_TIMESTAMPING_HARDWARE_MASK (SOF_TIMESTAMPING_RX_HARDWARE | \ + SOF_TIMESTAMPING_TX_HARDWARE | \ + SOF_TIMESTAMPING_RAW_HARDWARE) + enum hwtstamp_source { + HWTSTAMP_SOURCE_UNSPEC, HWTSTAMP_SOURCE_NETDEV, HWTSTAMP_SOURCE_PHYLIB, }; diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index d20c6c99eb88..607009150b5f 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -43,6 +43,7 @@ #include #include +#include #include #include #include @@ -79,6 +80,7 @@ struct xdp_buff; struct xdp_frame; struct xdp_metadata_ops; struct xdp_md; +struct ethtool_netdev_state; typedef u32 xdp_features_t; @@ -1817,7 +1819,8 @@ enum netdev_reg_state { * @priv_flags: Like 'flags' but invisible to userspace, * see if.h for the definitions * @gflags: Global flags ( kept as legacy ) - * @padded: How much padding added by alloc_netdev() + * @priv_len: Size of the ->priv flexible array + * @priv: Flexible array containing private data * @operstate: RFC2863 operstate * @link_mode: Mapping policy to operstate * @if_port: Selectable AUI, TP, ... @@ -1985,8 +1988,6 @@ enum netdev_reg_state { * switch driver and used to set the phys state of the * switch port. * - * @wol_enabled: Wake-on-LAN is enabled - * * @threaded: napi threaded mode is enabled * * @net_notifier_list: List of per-net netdev notifier block @@ -1998,6 +1999,7 @@ enum netdev_reg_state { * @udp_tunnel_nic_info: static structure describing the UDP tunnel * offload capabilities of the device * @udp_tunnel_nic: UDP tunnel offload state + * @ethtool: ethtool related state * @xdp_state: stores info on attached XDP BPF programs * * @nested_level: Used as a parameter of spin_lock_nested() of @@ -2198,10 +2200,10 @@ struct net_device { unsigned short neigh_priv_len; unsigned short dev_id; unsigned short dev_port; - unsigned short padded; + int irq; + u32 priv_len; spinlock_t addr_list_lock; - int irq; struct netdev_hw_addr_list uc; struct netdev_hw_addr_list mc; @@ -2372,7 +2374,6 @@ struct net_device { struct lock_class_key *qdisc_tx_busylock; bool proto_down; bool threaded; - unsigned wol_enabled:1; struct list_head net_notifier_list; @@ -2383,6 +2384,8 @@ struct net_device { const struct udp_tunnel_nic_info *udp_tunnel_nic_info; struct udp_tunnel_nic *udp_tunnel_nic; + struct ethtool_netdev_state *ethtool; + /* protected by rtnl_lock */ struct bpf_xdp_entity xdp_state[__MAX_XDP_MODE]; @@ -2401,7 +2404,13 @@ struct net_device { /** @page_pools: page pools created for this netdevice */ struct hlist_head page_pools; #endif -}; + + /** @irq_moder: dim parameters used if IS_ENABLED(CONFIG_DIMLIB). */ + struct dim_irq_moder *irq_moder; + + u8 priv[] ____cacheline_aligned + __counted_by(priv_len); +} ____cacheline_aligned; #define to_net_dev(d) container_of(d, struct net_device, dev) /* @@ -2591,7 +2600,7 @@ void dev_net_set(struct net_device *dev, struct net *net) */ static inline void *netdev_priv(const struct net_device *dev) { - return (char *)dev + ALIGN(sizeof(struct net_device), NETDEV_ALIGN); + return (void *)dev->priv; } /* Set the sysfs physical device reference for the network logical device @@ -2731,12 +2740,12 @@ struct pcpu_sw_netstats { } __aligned(4 * sizeof(u64)); struct pcpu_dstats { - u64 rx_packets; - u64 rx_bytes; - u64 rx_drops; - u64 tx_packets; - u64 tx_bytes; - u64 tx_drops; + u64_stats_t rx_packets; + u64_stats_t rx_bytes; + u64_stats_t rx_drops; + u64_stats_t tx_packets; + u64_stats_t tx_bytes; + u64_stats_t tx_drops; struct u64_stats_sync syncp; } __aligned(8 * sizeof(u64)); @@ -3021,7 +3030,8 @@ int call_netdevice_notifiers_info(unsigned long val, #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) #define for_each_netdev_dump(net, d, ifindex) \ - xa_for_each_start(&(net)->dev_by_index, (ifindex), (d), (ifindex)) + for (; (d = xa_find(&(net)->dev_by_index, &ifindex, \ + ULONG_MAX, XA_PRESENT)); ifindex++) static inline struct net_device *next_net_device(struct net_device *dev) { @@ -3121,7 +3131,6 @@ static inline void unregister_netdevice(struct net_device *dev) int netdev_refcnt_read(const struct net_device *dev); void free_netdev(struct net_device *dev); -void netdev_freemem(struct net_device *dev); void init_dummy_netdev(struct net_device *dev); struct net_device *netdev_get_xmit_slave(struct net_device *dev, @@ -3200,6 +3209,7 @@ static inline bool dev_has_header(const struct net_device *dev) struct softnet_data { struct list_head poll_list; struct sk_buff_head process_queue; + local_lock_t process_queue_bh_lock; /* stats */ unsigned int processed; @@ -3222,13 +3232,7 @@ struct softnet_data { struct sk_buff_head xfrm_backlog; #endif /* written and read only by owning cpu: */ - struct { - u16 recursion; - u8 more; -#ifdef CONFIG_NET_EGRESS - u8 skip_txqueue; -#endif - } xmit; + struct netdev_xmit xmit; #ifdef CONFIG_RPS /* input_queue_head should be written by cpu owning this struct, * and only read by other cpus. Worth using a cache line. @@ -3256,10 +3260,18 @@ struct softnet_data { DECLARE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); +#ifndef CONFIG_PREEMPT_RT static inline int dev_recursion_level(void) { return this_cpu_read(softnet_data.xmit.recursion); } +#else +static inline int dev_recursion_level(void) +{ + return current->net_xmit.recursion; +} + +#endif void __netif_schedule(struct Qdisc *q); void netif_schedule_queue(struct netdev_queue *txq); @@ -3903,9 +3915,6 @@ int generic_hwtstamp_get_lower(struct net_device *dev, int generic_hwtstamp_set_lower(struct net_device *dev, struct kernel_hwtstamp_config *kernel_cfg, struct netlink_ext_ack *extack); -int dev_set_hwtstamp_phylib(struct net_device *dev, - struct kernel_hwtstamp_config *cfg, - struct netlink_ext_ack *extack); int dev_ethtool(struct net *net, struct ifreq *ifr, void __user *userdata); unsigned int dev_get_flags(const struct net_device *); int __dev_change_flags(struct net_device *dev, unsigned int flags, @@ -4874,18 +4883,35 @@ static inline ktime_t netdev_get_tstamp(struct net_device *dev, return hwtstamps->hwtstamp; } -static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops, - struct sk_buff *skb, struct net_device *dev, - bool more) +#ifndef CONFIG_PREEMPT_RT +static inline void netdev_xmit_set_more(bool more) { __this_cpu_write(softnet_data.xmit.more, more); - return ops->ndo_start_xmit(skb, dev); } static inline bool netdev_xmit_more(void) { return __this_cpu_read(softnet_data.xmit.more); } +#else +static inline void netdev_xmit_set_more(bool more) +{ + current->net_xmit.more = more; +} + +static inline bool netdev_xmit_more(void) +{ + return current->net_xmit.more; +} +#endif + +static inline netdev_tx_t __netdev_start_xmit(const struct net_device_ops *ops, + struct sk_buff *skb, struct net_device *dev, + bool more) +{ + netdev_xmit_set_more(more); + return ops->ndo_start_xmit(skb, dev); +} static inline netdev_tx_t netdev_start_xmit(struct sk_buff *skb, struct net_device *dev, struct netdev_queue *txq, bool more) diff --git a/include/linux/netdevice_xmit.h b/include/linux/netdevice_xmit.h new file mode 100644 index 000000000000..38325e070296 --- /dev/null +++ b/include/linux/netdevice_xmit.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _LINUX_NETDEVICE_XMIT_H +#define _LINUX_NETDEVICE_XMIT_H + +struct netdev_xmit { + u16 recursion; + u8 more; +#ifdef CONFIG_NET_EGRESS + u8 skip_txqueue; +#endif +}; + +#endif diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 5df7340d4dab..b332c2048c75 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h @@ -47,7 +47,6 @@ struct netlink_kernel_cfg { unsigned int groups; unsigned int flags; void (*input)(struct sk_buff *skb); - struct mutex *cb_mutex; int (*bind)(struct net *net, int group); void (*unbind)(struct net *net, int group); void (*release) (struct sock *sk, unsigned long *groups); diff --git a/include/linux/objagg.h b/include/linux/objagg.h index 78021777df46..6df5b887dc54 100644 --- a/include/linux/objagg.h +++ b/include/linux/objagg.h @@ -8,7 +8,6 @@ struct objagg_ops { size_t obj_size; bool (*delta_check)(void *priv, const void *parent_obj, const void *obj); - int (*hints_obj_cmp)(const void *obj1, const void *obj2); void * (*delta_create)(void *priv, void *parent_obj, void *obj); void (*delta_destroy)(void *priv, void *delta_priv); void * (*root_create)(void *priv, void *obj, unsigned int root_id); diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 942a587bb97e..76a8f2d6bd64 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -2126,6 +2126,8 @@ #define PCI_VENDOR_ID_CHELSIO 0x1425 +#define PCI_VENDOR_ID_EDIMAX 0x1432 + #define PCI_VENDOR_ID_ADLINK 0x144a #define PCI_VENDOR_ID_SAMSUNG 0x144d @@ -2599,6 +2601,8 @@ #define PCI_VENDOR_ID_HYGON 0x1d94 +#define PCI_VENDOR_ID_META 0x1d9b + #define PCI_VENDOR_ID_FUNGIBLE 0x1dad #define PCI_VENDOR_ID_HXT 0x1dbf diff --git a/include/linux/pcs/pcs-xpcs.h b/include/linux/pcs/pcs-xpcs.h index da3a6c30f6d2..b4a4eb6c8866 100644 --- a/include/linux/pcs/pcs-xpcs.h +++ b/include/linux/pcs/pcs-xpcs.h @@ -7,11 +7,12 @@ #ifndef __LINUX_PCS_XPCS_H #define __LINUX_PCS_XPCS_H +#include +#include +#include #include #include - -#define NXP_SJA1105_XPCS_ID 0x00000010 -#define NXP_SJA1110_XPCS_ID 0x00000020 +#include /* AN mode */ #define DW_AN_C73 1 @@ -20,20 +21,46 @@ #define DW_AN_C37_1000BASEX 4 #define DW_10GBASER 5 -/* device vendor OUI */ -#define DW_OUI_WX 0x0018fc80 +struct dw_xpcs_desc; -/* dev_flag */ -#define DW_DEV_TXGBE BIT(0) +enum dw_xpcs_pcs_id { + DW_XPCS_ID_NATIVE = 0, + NXP_SJA1105_XPCS_ID = 0x00000010, + NXP_SJA1110_XPCS_ID = 0x00000020, + DW_XPCS_ID = 0x7996ced0, + DW_XPCS_ID_MASK = 0xffffffff, +}; -struct xpcs_id; +enum dw_xpcs_pma_id { + DW_XPCS_PMA_ID_NATIVE = 0, + DW_XPCS_PMA_GEN1_3G_ID, + DW_XPCS_PMA_GEN2_3G_ID, + DW_XPCS_PMA_GEN2_6G_ID, + DW_XPCS_PMA_GEN4_3G_ID, + DW_XPCS_PMA_GEN4_6G_ID, + DW_XPCS_PMA_GEN5_10G_ID, + DW_XPCS_PMA_GEN5_12G_ID, + WX_TXGBE_XPCS_PMA_10G_ID = 0x0018fc80, +}; + +struct dw_xpcs_info { + u32 pcs; + u32 pma; +}; + +enum dw_xpcs_clock { + DW_XPCS_CORE_CLK, + DW_XPCS_PAD_CLK, + DW_XPCS_NUM_CLKS, +}; struct dw_xpcs { + struct dw_xpcs_info info; + const struct dw_xpcs_desc *desc; struct mdio_device *mdiodev; - const struct xpcs_id *id; + struct clk_bulk_data clks[DW_XPCS_NUM_CLKS]; struct phylink_pcs pcs; phy_interface_t interface; - int dev_flag; }; int xpcs_get_an_mode(struct dw_xpcs *xpcs, phy_interface_t interface); @@ -46,6 +73,8 @@ int xpcs_config_eee(struct dw_xpcs *xpcs, int mult_fact_100ns, int enable); struct dw_xpcs *xpcs_create_mdiodev(struct mii_bus *bus, int addr, phy_interface_t interface); +struct dw_xpcs *xpcs_create_fwnode(struct fwnode_handle *fwnode, + phy_interface_t interface); void xpcs_destroy(struct dw_xpcs *xpcs); #endif /* __LINUX_PCS_XPCS_H */ diff --git a/include/linux/phy.h b/include/linux/phy.h index 3be430cf3132..04ae5c811cfb 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -128,6 +128,7 @@ extern const int phy_10gbit_features_array[1]; * @PHY_INTERFACE_MODE_10GKR: 10GBASE-KR - with Clause 73 AN * @PHY_INTERFACE_MODE_QUSGMII: Quad Universal SGMII * @PHY_INTERFACE_MODE_1000BASEKX: 1000Base-KX - with Clause 73 AN + * @PHY_INTERFACE_MODE_10G_QXGMII: 10G-QXGMII - 4 ports over 10G USXGMII * @PHY_INTERFACE_MODE_MAX: Book keeping * * Describes the interface between the MAC and PHY. @@ -168,6 +169,7 @@ typedef enum { PHY_INTERFACE_MODE_10GKR, PHY_INTERFACE_MODE_QUSGMII, PHY_INTERFACE_MODE_1000BASEKX, + PHY_INTERFACE_MODE_10G_QXGMII, PHY_INTERFACE_MODE_MAX, } phy_interface_t; @@ -289,6 +291,8 @@ static inline const char *phy_modes(phy_interface_t interface) return "100base-x"; case PHY_INTERFACE_MODE_QUSGMII: return "qusgmii"; + case PHY_INTERFACE_MODE_10G_QXGMII: + return "10g-qxgmii"; default: return "unknown"; } @@ -612,6 +616,8 @@ struct macsec_ops; * handling shall be postponed until PHY has resumed * @irq_rerun: Flag indicating interrupts occurred while PHY was suspended, * requiring a rerun of the interrupt handler after resume + * @default_timestamp: Flag indicating whether we are using the phy + * timestamp as the default one * @interface: enum phy_interface_t value * @possible_interfaces: bitmap if interface modes that the attached PHY * will switch between depending on media speed. @@ -677,6 +683,8 @@ struct phy_device { unsigned irq_suspended:1; unsigned irq_rerun:1; + unsigned default_timestamp:1; + int rate_matching; enum phy_state state; @@ -1610,7 +1618,7 @@ static inline bool phy_rxtstamp(struct phy_device *phydev, struct sk_buff *skb, } static inline int phy_ts_info(struct phy_device *phydev, - struct ethtool_ts_info *tsinfo) + struct kernel_ethtool_ts_info *tsinfo) { return phydev->mii_ts->ts_info(phydev->mii_ts, tsinfo); } @@ -1621,6 +1629,21 @@ static inline void phy_txtstamp(struct phy_device *phydev, struct sk_buff *skb, phydev->mii_ts->txtstamp(phydev->mii_ts, skb, type); } +/** + * phy_is_default_hwtstamp - Is the PHY hwtstamp the default timestamp + * @phydev: Pointer to phy_device + * + * This is used to get default timestamping device taking into account + * the new API choice, which is selecting the timestamping from MAC by + * default if the phydev does not have default_timestamp flag enabled. + * + * Return: True if phy is the default hw timestamp, false otherwise. + */ +static inline bool phy_is_default_hwtstamp(struct phy_device *phydev) +{ + return phy_has_hwtstamp(phydev) && phydev->default_timestamp; +} + /** * phy_is_internal - Convenience function for testing if a PHY is internal * @phydev: the phy_device struct diff --git a/include/linux/phylink.h b/include/linux/phylink.h index 5ea6b2ad2396..2381e07429a2 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -141,7 +141,8 @@ enum phylink_op_type { * @mac_requires_rxc: if true, the MAC always requires a receive clock from PHY. * The PHY driver should start the clock signal as soon as * possible and avoid stopping it during suspend events. - * @ovr_an_inband: if true, override PCS to MLO_AN_INBAND + * @default_an_inband: if true, defaults to MLO_AN_INBAND rather than + * MLO_AN_PHY. A fixed-link specification will override. * @get_fixed_state: callback to execute to determine the fixed link state, * if MAC link is at %MLO_AN_FIXED mode. * @supported_interfaces: bitmap describing which PHY_INTERFACE_MODE_xxx @@ -154,7 +155,7 @@ struct phylink_config { bool poll_fixed_state; bool mac_managed_pm; bool mac_requires_rxc; - bool ovr_an_inband; + bool default_an_inband; void (*get_fixed_state)(struct phylink_config *config, struct phylink_link_state *state); DECLARE_PHY_INTERFACE_MASK(supported_interfaces); @@ -653,6 +654,7 @@ static inline int phylink_get_link_timer_ns(phy_interface_t interface) case PHY_INTERFACE_MODE_SGMII: case PHY_INTERFACE_MODE_QSGMII: case PHY_INTERFACE_MODE_USXGMII: + case PHY_INTERFACE_MODE_10G_QXGMII: return 1600000; case PHY_INTERFACE_MODE_1000BASEX: diff --git a/include/linux/pse-pd/pse.h b/include/linux/pse-pd/pse.h index 6eec24ffa866..591a53e082e6 100644 --- a/include/linux/pse-pd/pse.h +++ b/include/linux/pse-pd/pse.h @@ -9,6 +9,9 @@ #include #include +/* Maximum current in uA according to IEEE 802.3-2022 Table 145-1 */ +#define MAX_PI_CURRENT 1920000 + struct phy_device; struct pse_controller_dev; @@ -36,12 +39,29 @@ struct pse_control_config { * functions. IEEE 802.3-2022 30.9.1.1.2 aPSEAdminState * @c33_pw_status: power detection status of the PSE. * IEEE 802.3-2022 30.9.1.1.5 aPSEPowerDetectionStatus: + * @c33_pw_class: detected class of a powered PD + * IEEE 802.3-2022 30.9.1.1.8 aPSEPowerClassification + * @c33_actual_pw: power currently delivered by the PSE in mW + * IEEE 802.3-2022 30.9.1.1.23 aPSEActualPower + * @c33_ext_state_info: extended state information of the PSE + * @c33_avail_pw_limit: available power limit of the PSE in mW + * IEEE 802.3-2022 145.2.5.4 pse_avail_pwr + * @c33_pw_limit_ranges: supported power limit configuration range. The driver + * is in charge of the memory allocation. + * @c33_pw_limit_nb_ranges: number of supported power limit configuration + * ranges */ struct pse_control_status { enum ethtool_podl_pse_admin_state podl_admin_state; enum ethtool_podl_pse_pw_d_status podl_pw_status; enum ethtool_c33_pse_admin_state c33_admin_state; enum ethtool_c33_pse_pw_d_status c33_pw_status; + u32 c33_pw_class; + u32 c33_actual_pw; + struct ethtool_c33_pse_ext_state_info c33_ext_state_info; + u32 c33_avail_pw_limit; + struct ethtool_c33_pse_pw_limit_range *c33_pw_limit_ranges; + u32 c33_pw_limit_nb_ranges; }; /** @@ -53,6 +73,14 @@ struct pse_control_status { * May also return negative errno. * @pi_enable: Configure the PSE PI as enabled. * @pi_disable: Configure the PSE PI as disabled. + * @pi_get_voltage: Return voltage similarly to get_voltage regulator + * callback. + * @pi_get_current_limit: Get the configured current limit similarly to + * get_current_limit regulator callback. + * @pi_set_current_limit: Configure the current limit similarly to + * set_current_limit regulator callback. + * Should not return an error in case of MAX_PI_CURRENT + * current value set. */ struct pse_controller_ops { int (*ethtool_get_status)(struct pse_controller_dev *pcdev, @@ -62,6 +90,11 @@ struct pse_controller_ops { int (*pi_is_enabled)(struct pse_controller_dev *pcdev, int id); int (*pi_enable)(struct pse_controller_dev *pcdev, int id); int (*pi_disable)(struct pse_controller_dev *pcdev, int id); + int (*pi_get_voltage)(struct pse_controller_dev *pcdev, int id); + int (*pi_get_current_limit)(struct pse_controller_dev *pcdev, + int id); + int (*pi_set_current_limit)(struct pse_controller_dev *pcdev, + int id, int max_uA); }; struct module; @@ -148,6 +181,11 @@ int pse_ethtool_get_status(struct pse_control *psec, int pse_ethtool_set_config(struct pse_control *psec, struct netlink_ext_ack *extack, const struct pse_control_config *config); +int pse_ethtool_set_pw_limit(struct pse_control *psec, + struct netlink_ext_ack *extack, + const unsigned int pw_limit); +int pse_ethtool_get_pw_limit(struct pse_control *psec, + struct netlink_ext_ack *extack); bool pse_has_podl(struct pse_control *psec); bool pse_has_c33(struct pse_control *psec); @@ -177,6 +215,19 @@ static inline int pse_ethtool_set_config(struct pse_control *psec, return -EOPNOTSUPP; } +static inline int pse_ethtool_set_pw_limit(struct pse_control *psec, + struct netlink_ext_ack *extack, + const unsigned int pw_limit) +{ + return -EOPNOTSUPP; +} + +static inline int pse_ethtool_get_pw_limit(struct pse_control *psec, + struct netlink_ext_ack *extack) +{ + return -EOPNOTSUPP; +} + static inline bool pse_has_podl(struct pse_control *psec) { return false; diff --git a/include/linux/sched.h b/include/linux/sched.h index 3e1329e4bf1f..33dd8d9d2b85 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -36,6 +36,7 @@ #include #include #include +#include #include #include #include @@ -53,6 +54,7 @@ struct bio_list; struct blk_plug; struct bpf_local_storage; struct bpf_run_ctx; +struct bpf_net_context; struct capture_control; struct cfs_rq; struct fs_struct; @@ -981,7 +983,9 @@ struct task_struct { /* delay due to memory thrashing */ unsigned in_thrashing:1; #endif - +#ifdef CONFIG_PREEMPT_RT + struct netdev_xmit net_xmit; +#endif unsigned long atomic_flags; /* Flags requiring atomic access. */ struct restart_block restart_block; @@ -1513,6 +1517,8 @@ struct task_struct { /* Used for BPF run context */ struct bpf_run_ctx *bpf_ctx; #endif + /* Used by BPF for per-TASK xdp storage */ + struct bpf_net_context *bpf_net_context; #ifdef CONFIG_GCC_PLUGIN_STACKLEAK unsigned long lowest_stack; diff --git a/include/linux/sfp.h b/include/linux/sfp.h index a45da7eef9a2..b14be59550e3 100644 --- a/include/linux/sfp.h +++ b/include/linux/sfp.h @@ -284,6 +284,12 @@ enum { SFF8024_ID_QSFP_8438 = 0x0c, SFF8024_ID_QSFP_8436_8636 = 0x0d, SFF8024_ID_QSFP28_8636 = 0x11, + SFF8024_ID_QSFP_DD = 0x18, + SFF8024_ID_OSFP = 0x19, + SFF8024_ID_DSFP = 0x1B, + SFF8024_ID_QSFP_PLUS_CMIS = 0x1E, + SFF8024_ID_SFP_DD_CMIS = 0x1F, + SFF8024_ID_SFP_PLUS_CMIS = 0x20, SFF8024_ENCODING_UNSPEC = 0x00, SFF8024_ENCODING_8B10B = 0x01, diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 1c2902eaebd3..9c29bdd5596d 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -706,6 +706,13 @@ typedef unsigned int sk_buff_data_t; typedef unsigned char *sk_buff_data_t; #endif +enum skb_tstamp_type { + SKB_CLOCK_REALTIME, + SKB_CLOCK_MONOTONIC, + SKB_CLOCK_TAI, + __SKB_CLOCK_MAX = SKB_CLOCK_TAI, +}; + /** * DOC: Basic sk_buff geometry * @@ -823,10 +830,8 @@ typedef unsigned char *sk_buff_data_t; * @dst_pending_confirm: need to confirm neighbour * @decrypted: Decrypted SKB * @slow_gro: state present at GRO time, slower prepare step required - * @mono_delivery_time: When set, skb->tstamp has the - * delivery_time in mono clock base (i.e. EDT). Otherwise, the - * skb->tstamp has the (rcv) timestamp at ingress and - * delivery_time at egress. + * @tstamp_type: When set, skb->tstamp has the + * delivery_time clock base of skb->tstamp. * @napi_id: id of the NAPI struct this skb came from * @sender_cpu: (aka @napi_id) source CPU in XPS * @alloc_cpu: CPU which did the skb allocation. @@ -954,7 +959,7 @@ struct sk_buff { /* private: */ __u8 __mono_tc_offset[0]; /* public: */ - __u8 mono_delivery_time:1; /* See SKB_MONO_DELIVERY_TIME_MASK */ + __u8 tstamp_type:2; /* See skb_tstamp_type */ #ifdef CONFIG_NET_XGRESS __u8 tc_at_ingress:1; /* See TC_AT_INGRESS_MASK */ __u8 tc_skip_classify:1; @@ -1084,15 +1089,16 @@ struct sk_buff { #endif #define PKT_TYPE_OFFSET offsetof(struct sk_buff, __pkt_type_offset) -/* if you move tc_at_ingress or mono_delivery_time +/* if you move tc_at_ingress or tstamp_type * around, you also must adapt these constants. */ #ifdef __BIG_ENDIAN_BITFIELD -#define SKB_MONO_DELIVERY_TIME_MASK (1 << 7) -#define TC_AT_INGRESS_MASK (1 << 6) +#define SKB_TSTAMP_TYPE_MASK (3 << 6) +#define SKB_TSTAMP_TYPE_RSHIFT (6) +#define TC_AT_INGRESS_MASK (1 << 5) #else -#define SKB_MONO_DELIVERY_TIME_MASK (1 << 0) -#define TC_AT_INGRESS_MASK (1 << 1) +#define SKB_TSTAMP_TYPE_MASK (3) +#define TC_AT_INGRESS_MASK (1 << 2) #endif #define SKB_BF_MONO_TC_OFFSET offsetof(struct sk_buff, __mono_tc_offset) @@ -1245,8 +1251,14 @@ static inline bool skb_data_unref(const struct sk_buff *skb, return true; } -void __fix_address -kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason); +void __fix_address sk_skb_reason_drop(struct sock *sk, struct sk_buff *skb, + enum skb_drop_reason reason); + +static inline void +kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason) +{ + sk_skb_reason_drop(NULL, skb, reason); +} /** * kfree_skb - free an sk_buff with 'NOT_SPECIFIED' reason @@ -1492,8 +1504,14 @@ __skb_set_sw_hash(struct sk_buff *skb, __u32 hash, bool is_l4) __skb_set_hash(skb, hash, true, is_l4); } -void __skb_get_hash(struct sk_buff *skb); -u32 __skb_get_hash_symmetric(const struct sk_buff *skb); +u32 __skb_get_hash_symmetric_net(const struct net *net, const struct sk_buff *skb); + +static inline u32 __skb_get_hash_symmetric(const struct sk_buff *skb) +{ + return __skb_get_hash_symmetric_net(NULL, skb); +} + +void __skb_get_hash_net(const struct net *net, struct sk_buff *skb); u32 skb_get_poff(const struct sk_buff *skb); u32 __skb_get_poff(const struct sk_buff *skb, const void *data, const struct flow_keys_basic *keys, int hlen); @@ -1572,10 +1590,18 @@ void skb_flow_dissect_hash(const struct sk_buff *skb, struct flow_dissector *flow_dissector, void *target_container); +static inline __u32 skb_get_hash_net(const struct net *net, struct sk_buff *skb) +{ + if (!skb->l4_hash && !skb->sw_hash) + __skb_get_hash_net(net, skb); + + return skb->hash; +} + static inline __u32 skb_get_hash(struct sk_buff *skb) { if (!skb->l4_hash && !skb->sw_hash) - __skb_get_hash(skb); + __skb_get_hash_net(NULL, skb); return skb->hash; } @@ -1677,6 +1703,9 @@ int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk, struct sk_buff *skb, struct iov_iter *from, size_t length); +int zerocopy_fill_skb_from_iter(struct sk_buff *skb, + struct iov_iter *from, size_t length); + static inline int skb_zerocopy_iter_dgram(struct sk_buff *skb, struct msghdr *msg, int len) { @@ -4179,7 +4208,7 @@ static inline void skb_get_new_timestampns(const struct sk_buff *skb, static inline void __net_timestamp(struct sk_buff *skb) { skb->tstamp = ktime_get_real(); - skb->mono_delivery_time = 0; + skb->tstamp_type = SKB_CLOCK_REALTIME; } static inline ktime_t net_timedelta(ktime_t t) @@ -4188,10 +4217,36 @@ static inline ktime_t net_timedelta(ktime_t t) } static inline void skb_set_delivery_time(struct sk_buff *skb, ktime_t kt, - bool mono) + u8 tstamp_type) { skb->tstamp = kt; - skb->mono_delivery_time = kt && mono; + + if (kt) + skb->tstamp_type = tstamp_type; + else + skb->tstamp_type = SKB_CLOCK_REALTIME; +} + +static inline void skb_set_delivery_type_by_clockid(struct sk_buff *skb, + ktime_t kt, clockid_t clockid) +{ + u8 tstamp_type = SKB_CLOCK_REALTIME; + + switch (clockid) { + case CLOCK_REALTIME: + break; + case CLOCK_MONOTONIC: + tstamp_type = SKB_CLOCK_MONOTONIC; + break; + case CLOCK_TAI: + tstamp_type = SKB_CLOCK_TAI; + break; + default: + WARN_ON_ONCE(1); + kt = 0; + } + + skb_set_delivery_time(skb, kt, tstamp_type); } DECLARE_STATIC_KEY_FALSE(netstamp_needed_key); @@ -4201,8 +4256,8 @@ DECLARE_STATIC_KEY_FALSE(netstamp_needed_key); */ static inline void skb_clear_delivery_time(struct sk_buff *skb) { - if (skb->mono_delivery_time) { - skb->mono_delivery_time = 0; + if (skb->tstamp_type) { + skb->tstamp_type = SKB_CLOCK_REALTIME; if (static_branch_unlikely(&netstamp_needed_key)) skb->tstamp = ktime_get_real(); else @@ -4212,7 +4267,7 @@ static inline void skb_clear_delivery_time(struct sk_buff *skb) static inline void skb_clear_tstamp(struct sk_buff *skb) { - if (skb->mono_delivery_time) + if (skb->tstamp_type) return; skb->tstamp = 0; @@ -4220,7 +4275,7 @@ static inline void skb_clear_tstamp(struct sk_buff *skb) static inline ktime_t skb_tstamp(const struct sk_buff *skb) { - if (skb->mono_delivery_time) + if (skb->tstamp_type) return 0; return skb->tstamp; @@ -4228,7 +4283,7 @@ static inline ktime_t skb_tstamp(const struct sk_buff *skb) static inline ktime_t skb_tstamp_cond(const struct sk_buff *skb, bool cond) { - if (!skb->mono_delivery_time && skb->tstamp) + if (skb->tstamp_type != SKB_CLOCK_MONOTONIC && skb->tstamp) return skb->tstamp; if (static_branch_unlikely(&netstamp_needed_key) || cond) diff --git a/include/linux/skbuff_ref.h b/include/linux/skbuff_ref.h index 11f0a4063403..16c241a23472 100644 --- a/include/linux/skbuff_ref.h +++ b/include/linux/skbuff_ref.h @@ -32,13 +32,13 @@ static inline void skb_frag_ref(struct sk_buff *skb, int f) __skb_frag_ref(&skb_shinfo(skb)->frags[f]); } -bool napi_pp_put_page(struct page *page); +bool napi_pp_put_page(netmem_ref netmem); static inline void skb_page_unref(struct page *page, bool recycle) { #ifdef CONFIG_PAGE_POOL - if (recycle && napi_pp_put_page(page)) + if (recycle && napi_pp_put_page(page_to_netmem(page))) return; #endif put_page(page); diff --git a/include/linux/socket.h b/include/linux/socket.h index c1f16cdab677..df9cdb8bbfb8 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -76,7 +76,7 @@ struct msghdr { __kernel_size_t msg_controllen; /* ancillary data buffer length */ struct kiocb *msg_iocb; /* ptr to iocb for async requests */ struct ubuf_info *msg_ubuf; - int (*sg_from_iter)(struct sock *sk, struct sk_buff *skb, + int (*sg_from_iter)(struct sk_buff *skb, struct iov_iter *from, size_t length); }; diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index f92c195c76ed..84e13bd5df28 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -13,7 +13,7 @@ #define __STMMAC_PLATFORM_DATA #include -#include +#include #define MTL_MAX_RX_QUEUES 8 #define MTL_MAX_TX_QUEUES 8 @@ -82,8 +82,8 @@ struct stmmac_priv; struct stmmac_mdio_bus_data { unsigned int phy_mask; - unsigned int has_xpcs; - unsigned int xpcs_an_inband; + unsigned int pcs_mask; + unsigned int default_an_inband; int *irqs; int probed_phy_irq; bool needs_reset; @@ -271,6 +271,8 @@ struct plat_stmmacenet_data { void (*dump_debug_regs)(void *priv); int (*pcs_init)(struct stmmac_priv *priv); void (*pcs_exit)(struct stmmac_priv *priv); + struct phylink_pcs *(*select_pcs)(struct stmmac_priv *priv, + phy_interface_t interface); void *bsp_priv; struct clk *stmmac_clk; struct clk *pclk; diff --git a/include/net/af_unix.h b/include/net/af_unix.h index b6eedf7650da..63129c79b8cb 100644 --- a/include/net/af_unix.h +++ b/include/net/af_unix.h @@ -96,20 +96,6 @@ struct unix_sock { #define unix_state_lock(s) spin_lock(&unix_sk(s)->lock) #define unix_state_unlock(s) spin_unlock(&unix_sk(s)->lock) -enum unix_socket_lock_class { - U_LOCK_NORMAL, - U_LOCK_SECOND, /* for double locking, see unix_state_double_lock(). */ - U_LOCK_DIAG, /* used while dumping icons, see sk_diag_dump_icons(). */ - U_LOCK_GC_LISTENER, /* used for listening socket while determining gc - * candidates to close a small race window. - */ -}; - -static inline void unix_state_lock_nested(struct sock *sk, - enum unix_socket_lock_class subclass) -{ - spin_lock_nested(&unix_sk(sk)->lock, subclass); -} #define peer_wait peer_wq.wait diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index b3228bd6cd6b..5d655e109b2c 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -441,6 +441,10 @@ typedef void (*hci_req_complete_t)(struct hci_dev *hdev, u8 status, u16 opcode); typedef void (*hci_req_complete_skb_t)(struct hci_dev *hdev, u8 status, u16 opcode, struct sk_buff *skb); +void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status, + hci_req_complete_t *req_complete, + hci_req_complete_skb_t *req_complete_skb); + #define HCI_REQ_START BIT(0) #define HCI_REQ_SKB BIT(1) diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index c43716edf205..31020891fc68 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -91,8 +91,6 @@ struct discovery_state { s8 rssi; u16 uuid_count; u8 (*uuids)[16]; - unsigned long scan_start; - unsigned long scan_duration; unsigned long name_resolve_timeout; }; @@ -478,7 +476,6 @@ struct hci_dev { unsigned int iso_pkts; unsigned long acl_last_tx; - unsigned long sco_last_tx; unsigned long le_last_tx; __u8 le_tx_def_phys; @@ -530,7 +527,6 @@ struct hci_dev { struct discovery_state discovery; - int discovery_old_state; bool discovery_paused; int advertising_old_state; bool advertising_paused; @@ -649,6 +645,7 @@ struct hci_dev { int (*get_codec_config_data)(struct hci_dev *hdev, __u8 type, struct bt_codec *codec, __u8 *vnd_len, __u8 **vnd_data); + u8 (*classify_pkt_type)(struct hci_dev *hdev, struct sk_buff *skb); }; #define HCI_PHY_HANDLE(handle) (handle & 0xff) @@ -890,8 +887,6 @@ static inline void hci_discovery_filter_clear(struct hci_dev *hdev) hdev->discovery.uuid_count = 0; kfree(hdev->discovery.uuids); hdev->discovery.uuids = NULL; - hdev->discovery.scan_start = 0; - hdev->discovery.scan_duration = 0; } bool hci_discovery_active(struct hci_dev *hdev); diff --git a/include/net/bluetooth/hci_sock.h b/include/net/bluetooth/hci_sock.h index 9949870f7d78..13e8cd4414a1 100644 --- a/include/net/bluetooth/hci_sock.h +++ b/include/net/bluetooth/hci_sock.h @@ -144,7 +144,7 @@ struct hci_dev_req { struct hci_dev_list_req { __u16 dev_num; - struct hci_dev_req dev_req[]; /* hci_dev_req structures */ + struct hci_dev_req dev_req[] __counted_by(dev_num); }; struct hci_conn_list_req { diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h index 534c3386e714..75e052909b5f 100644 --- a/include/net/bluetooth/hci_sync.h +++ b/include/net/bluetooth/hci_sync.h @@ -8,6 +8,23 @@ #define UINT_PTR(_handle) ((void *)((uintptr_t)_handle)) #define PTR_UINT(_ptr) ((uintptr_t)((void *)_ptr)) +#define HCI_REQ_DONE 0 +#define HCI_REQ_PEND 1 +#define HCI_REQ_CANCELED 2 + +#define hci_req_sync_lock(hdev) mutex_lock(&hdev->req_lock) +#define hci_req_sync_unlock(hdev) mutex_unlock(&hdev->req_lock) + +struct hci_request { + struct hci_dev *hdev; + struct sk_buff_head cmd_q; + + /* If something goes wrong when building the HCI request, the error + * value is stored in this field. + */ + int err; +}; + typedef int (*hci_cmd_sync_work_func_t)(struct hci_dev *hdev, void *data); typedef void (*hci_cmd_sync_work_destroy_t)(struct hci_dev *hdev, void *data, int err); @@ -20,6 +37,10 @@ struct hci_cmd_sync_work_entry { }; struct adv_info; + +struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode, u32 plen, + const void *param, struct sock *sk); + /* Function with sync suffix shall not be called with hdev->lock held as they * wait the command to complete and in the meantime an event could be received * which could attempt to acquire hdev->lock causing a deadlock. @@ -131,6 +152,8 @@ int hci_update_discoverable(struct hci_dev *hdev); int hci_update_connectable_sync(struct hci_dev *hdev); +int hci_inquiry_sync(struct hci_dev *hdev, u8 length, u8 num_rsp); + int hci_start_discovery_sync(struct hci_dev *hdev); int hci_stop_discovery_sync(struct hci_dev *hdev); @@ -138,6 +161,7 @@ int hci_suspend_sync(struct hci_dev *hdev); int hci_resume_sync(struct hci_dev *hdev); struct hci_conn; +struct hci_conn_params; int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason); @@ -156,3 +180,5 @@ int hci_connect_acl_sync(struct hci_dev *hdev, struct hci_conn *conn); int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn); int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn); +int hci_le_conn_update_sync(struct hci_dev *hdev, struct hci_conn *conn, + struct hci_conn_params *params); diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h index 99d26879b02a..c05882476900 100644 --- a/include/net/bluetooth/rfcomm.h +++ b/include/net/bluetooth/rfcomm.h @@ -355,7 +355,7 @@ struct rfcomm_dev_info { struct rfcomm_dev_list_req { u16 dev_num; - struct rfcomm_dev_info dev_info[]; + struct rfcomm_dev_info dev_info[] __counted_by(dev_num); }; int rfcomm_dev_ioctl(struct sock *sk, unsigned int cmd, void __user *arg); diff --git a/include/net/caif/caif_layer.h b/include/net/caif/caif_layer.h index 51f7bb42a936..0f45d875905f 100644 --- a/include/net/caif/caif_layer.h +++ b/include/net/caif/caif_layer.h @@ -11,9 +11,7 @@ struct cflayer; struct cfpkt; -struct cfpktq; struct caif_payload_info; -struct caif_packet_funcs; #define CAIF_LAYER_NAME_SZ 16 diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index cbf1664dc569..192d72c8b465 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -125,33 +125,36 @@ struct wiphy; * @IEEE80211_CHAN_CAN_MONITOR: This channel can be used for monitor * mode even in the presence of other (regulatory) restrictions, * even if it is otherwise disabled. + * @IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP: Allow using this channel for AP operation + * with very low power (VLP), even if otherwise set to NO_IR. */ enum ieee80211_channel_flags { - IEEE80211_CHAN_DISABLED = 1<<0, - IEEE80211_CHAN_NO_IR = 1<<1, - IEEE80211_CHAN_PSD = 1<<2, - IEEE80211_CHAN_RADAR = 1<<3, - IEEE80211_CHAN_NO_HT40PLUS = 1<<4, - IEEE80211_CHAN_NO_HT40MINUS = 1<<5, - IEEE80211_CHAN_NO_OFDM = 1<<6, - IEEE80211_CHAN_NO_80MHZ = 1<<7, - IEEE80211_CHAN_NO_160MHZ = 1<<8, - IEEE80211_CHAN_INDOOR_ONLY = 1<<9, - IEEE80211_CHAN_IR_CONCURRENT = 1<<10, - IEEE80211_CHAN_NO_20MHZ = 1<<11, - IEEE80211_CHAN_NO_10MHZ = 1<<12, - IEEE80211_CHAN_NO_HE = 1<<13, - IEEE80211_CHAN_1MHZ = 1<<14, - IEEE80211_CHAN_2MHZ = 1<<15, - IEEE80211_CHAN_4MHZ = 1<<16, - IEEE80211_CHAN_8MHZ = 1<<17, - IEEE80211_CHAN_16MHZ = 1<<18, - IEEE80211_CHAN_NO_320MHZ = 1<<19, - IEEE80211_CHAN_NO_EHT = 1<<20, - IEEE80211_CHAN_DFS_CONCURRENT = 1<<21, - IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT = 1<<22, - IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT = 1<<23, - IEEE80211_CHAN_CAN_MONITOR = 1<<24, + IEEE80211_CHAN_DISABLED = BIT(0), + IEEE80211_CHAN_NO_IR = BIT(1), + IEEE80211_CHAN_PSD = BIT(2), + IEEE80211_CHAN_RADAR = BIT(3), + IEEE80211_CHAN_NO_HT40PLUS = BIT(4), + IEEE80211_CHAN_NO_HT40MINUS = BIT(5), + IEEE80211_CHAN_NO_OFDM = BIT(6), + IEEE80211_CHAN_NO_80MHZ = BIT(7), + IEEE80211_CHAN_NO_160MHZ = BIT(8), + IEEE80211_CHAN_INDOOR_ONLY = BIT(9), + IEEE80211_CHAN_IR_CONCURRENT = BIT(10), + IEEE80211_CHAN_NO_20MHZ = BIT(11), + IEEE80211_CHAN_NO_10MHZ = BIT(12), + IEEE80211_CHAN_NO_HE = BIT(13), + IEEE80211_CHAN_1MHZ = BIT(14), + IEEE80211_CHAN_2MHZ = BIT(15), + IEEE80211_CHAN_4MHZ = BIT(16), + IEEE80211_CHAN_8MHZ = BIT(17), + IEEE80211_CHAN_16MHZ = BIT(18), + IEEE80211_CHAN_NO_320MHZ = BIT(19), + IEEE80211_CHAN_NO_EHT = BIT(20), + IEEE80211_CHAN_DFS_CONCURRENT = BIT(21), + IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT = BIT(22), + IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT = BIT(23), + IEEE80211_CHAN_CAN_MONITOR = BIT(24), + IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP = BIT(25), }; #define IEEE80211_CHAN_NO_HT40 \ @@ -229,13 +232,13 @@ struct ieee80211_channel { * @IEEE80211_RATE_SUPPORTS_10MHZ: Rate can be used in 10 MHz mode */ enum ieee80211_rate_flags { - IEEE80211_RATE_SHORT_PREAMBLE = 1<<0, - IEEE80211_RATE_MANDATORY_A = 1<<1, - IEEE80211_RATE_MANDATORY_B = 1<<2, - IEEE80211_RATE_MANDATORY_G = 1<<3, - IEEE80211_RATE_ERP_G = 1<<4, - IEEE80211_RATE_SUPPORTS_5MHZ = 1<<5, - IEEE80211_RATE_SUPPORTS_10MHZ = 1<<6, + IEEE80211_RATE_SHORT_PREAMBLE = BIT(0), + IEEE80211_RATE_MANDATORY_A = BIT(1), + IEEE80211_RATE_MANDATORY_B = BIT(2), + IEEE80211_RATE_MANDATORY_G = BIT(3), + IEEE80211_RATE_ERP_G = BIT(4), + IEEE80211_RATE_SUPPORTS_5MHZ = BIT(5), + IEEE80211_RATE_SUPPORTS_10MHZ = BIT(6), }; /** @@ -1595,6 +1598,7 @@ struct cfg80211_color_change_settings { * * Used to pass interface combination parameters * + * @radio_idx: wiphy radio index or -1 for global * @num_different_channels: the number of different channels we want * to use for verification * @radar_detect: a bitmap where each bit corresponds to a channel @@ -1608,6 +1612,7 @@ struct cfg80211_color_change_settings { * the verification */ struct iface_combination_params { + int radio_idx; int num_different_channels; u8 radar_detect; int iftype_num[NUM_NL80211_IFTYPES]; @@ -1957,9 +1962,9 @@ struct rate_info { * @BSS_PARAM_FLAGS_SHORT_SLOT_TIME: whether short slot time is enabled */ enum bss_param_flags { - BSS_PARAM_FLAGS_CTS_PROT = 1<<0, - BSS_PARAM_FLAGS_SHORT_PREAMBLE = 1<<1, - BSS_PARAM_FLAGS_SHORT_SLOT_TIME = 1<<2, + BSS_PARAM_FLAGS_CTS_PROT = BIT(0), + BSS_PARAM_FLAGS_SHORT_PREAMBLE = BIT(1), + BSS_PARAM_FLAGS_SHORT_SLOT_TIME = BIT(2), }; /** @@ -2200,7 +2205,7 @@ struct cfg80211_sar_sub_specs { struct cfg80211_sar_specs { enum nl80211_sar_type type; u32 num_sub_specs; - struct cfg80211_sar_sub_specs sub_specs[]; + struct cfg80211_sar_sub_specs sub_specs[] __counted_by(num_sub_specs); }; @@ -2266,13 +2271,13 @@ static inline int cfg80211_get_station(struct net_device *dev, * @MONITOR_FLAG_ACTIVE: active monitor, ACKs frames on its MAC address */ enum monitor_flags { - MONITOR_FLAG_CHANGED = 1<<__NL80211_MNTR_FLAG_INVALID, - MONITOR_FLAG_FCSFAIL = 1<center_freq) % 16 == 5; } +/** + * cfg80211_radio_chandef_valid - Check if the radio supports the chandef + * + * @radio: wiphy radio + * @chandef: chandef for current channel + * + * Return: whether or not the given chandef is valid for the given radio + */ +bool cfg80211_radio_chandef_valid(const struct wiphy_radio *radio, + const struct cfg80211_chan_def *chandef); + /** * ieee80211_get_response_rate - get basic rate for a given rate * @@ -8785,6 +8859,34 @@ static inline void cfg80211_report_obss_beacon(struct wiphy *wiphy, sig_dbm); } +/** + * struct cfg80211_beaconing_check_config - beacon check configuration + * @iftype: the interface type to check for + * @relax: allow IR-relaxation conditions to apply (e.g. another + * interface connected already on the same channel) + * NOTE: If this is set, wiphy mutex must be held. + * @reg_power: &enum ieee80211_ap_reg_power value indicating the + * advertised/used 6 GHz regulatory power setting + */ +struct cfg80211_beaconing_check_config { + enum nl80211_iftype iftype; + enum ieee80211_ap_reg_power reg_power; + bool relax; +}; + +/** + * cfg80211_reg_check_beaconing - check if beaconing is allowed + * @wiphy: the wiphy + * @chandef: the channel definition + * @cfg: additional parameters for the checking + * + * Return: %true if there is no secondary channel or the secondary channel(s) + * can be used for beaconing (i.e. is not a radar channel etc.) + */ +bool cfg80211_reg_check_beaconing(struct wiphy *wiphy, + struct cfg80211_chan_def *chandef, + struct cfg80211_beaconing_check_config *cfg); + /** * cfg80211_reg_can_beacon - check if beaconing is allowed * @wiphy: the wiphy @@ -8794,9 +8896,17 @@ static inline void cfg80211_report_obss_beacon(struct wiphy *wiphy, * Return: %true if there is no secondary channel or the secondary channel(s) * can be used for beaconing (i.e. is not a radar channel etc.) */ -bool cfg80211_reg_can_beacon(struct wiphy *wiphy, - struct cfg80211_chan_def *chandef, - enum nl80211_iftype iftype); +static inline bool +cfg80211_reg_can_beacon(struct wiphy *wiphy, + struct cfg80211_chan_def *chandef, + enum nl80211_iftype iftype) +{ + struct cfg80211_beaconing_check_config config = { + .iftype = iftype, + }; + + return cfg80211_reg_check_beaconing(wiphy, chandef, &config); +} /** * cfg80211_reg_can_beacon_relax - check if beaconing is allowed with relaxation @@ -8811,9 +8921,18 @@ bool cfg80211_reg_can_beacon(struct wiphy *wiphy, * * Context: Requires the wiphy mutex to be held. */ -bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy, - struct cfg80211_chan_def *chandef, - enum nl80211_iftype iftype); +static inline bool +cfg80211_reg_can_beacon_relax(struct wiphy *wiphy, + struct cfg80211_chan_def *chandef, + enum nl80211_iftype iftype) +{ + struct cfg80211_beaconing_check_config config = { + .iftype = iftype, + .relax = true, + }; + + return cfg80211_reg_check_beaconing(wiphy, chandef, &config); +} /** * cfg80211_ch_switch_notify - update wdev channel and notify userspace diff --git a/include/net/devlink.h b/include/net/devlink.h index 35eb0f884386..db5eff6cb60f 100644 --- a/include/net/devlink.h +++ b/include/net/devlink.h @@ -352,7 +352,7 @@ struct devlink_dpipe_table { bool resource_valid; u64 resource_id; u64 resource_units; - struct devlink_dpipe_table_ops *table_ops; + const struct devlink_dpipe_table_ops *table_ops; struct rcu_head rcu; }; @@ -1751,7 +1751,7 @@ void devl_sb_unregister(struct devlink *devlink, unsigned int sb_index); void devlink_sb_unregister(struct devlink *devlink, unsigned int sb_index); int devl_dpipe_table_register(struct devlink *devlink, const char *table_name, - struct devlink_dpipe_table_ops *table_ops, + const struct devlink_dpipe_table_ops *table_ops, void *priv, bool counter_control_extern); void devl_dpipe_table_unregister(struct devlink *devlink, const char *table_name); diff --git a/include/net/dsa.h b/include/net/dsa.h index b60e7e410aba..b06f97ae3da1 100644 --- a/include/net/dsa.h +++ b/include/net/dsa.h @@ -53,6 +53,7 @@ struct tc_action; #define DSA_TAG_PROTO_RTL8_4T_VALUE 25 #define DSA_TAG_PROTO_RZN1_A5PSW_VALUE 26 #define DSA_TAG_PROTO_LAN937X_VALUE 27 +#define DSA_TAG_PROTO_VSC73XX_8021Q_VALUE 28 enum dsa_tag_protocol { DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE, @@ -83,6 +84,7 @@ enum dsa_tag_protocol { DSA_TAG_PROTO_RTL8_4T = DSA_TAG_PROTO_RTL8_4T_VALUE, DSA_TAG_PROTO_RZN1_A5PSW = DSA_TAG_PROTO_RZN1_A5PSW_VALUE, DSA_TAG_PROTO_LAN937X = DSA_TAG_PROTO_LAN937X_VALUE, + DSA_TAG_PROTO_VSC73XX_8021Q = DSA_TAG_PROTO_VSC73XX_8021Q_VALUE, }; struct dsa_switch; @@ -882,15 +884,9 @@ struct dsa_switch_ops { struct phylink_pcs *(*phylink_mac_select_pcs)(struct dsa_switch *ds, int port, phy_interface_t iface); - int (*phylink_mac_prepare)(struct dsa_switch *ds, int port, - unsigned int mode, - phy_interface_t interface); void (*phylink_mac_config)(struct dsa_switch *ds, int port, unsigned int mode, const struct phylink_link_state *state); - int (*phylink_mac_finish)(struct dsa_switch *ds, int port, - unsigned int mode, - phy_interface_t interface); void (*phylink_mac_link_down)(struct dsa_switch *ds, int port, unsigned int mode, phy_interface_t interface); @@ -940,7 +936,7 @@ struct dsa_switch_ops { * ethtool timestamp info */ int (*get_ts_info)(struct dsa_switch *ds, int port, - struct ethtool_ts_info *ts); + struct kernel_ethtool_ts_info *ts); /* * ethtool MAC merge layer diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h index 9ab376d1a677..ced79dc8e856 100644 --- a/include/net/flow_dissector.h +++ b/include/net/flow_dissector.h @@ -7,6 +7,7 @@ #include #include #include +#include struct bpf_prog; struct net; @@ -16,7 +17,8 @@ struct sk_buff; * struct flow_dissector_key_control: * @thoff: Transport header offset * @addr_type: Type of key. One of FLOW_DISSECTOR_KEY_* - * @flags: Key flags. Any of FLOW_DIS_(IS_FRAGMENT|FIRST_FRAGENCAPSULATION) + * @flags: Key flags. + * Any of FLOW_DIS_(IS_FRAGMENT|FIRST_FRAG|ENCAPSULATION|F_*) */ struct flow_dissector_key_control { u16 thoff; @@ -24,9 +26,20 @@ struct flow_dissector_key_control { u32 flags; }; -#define FLOW_DIS_IS_FRAGMENT BIT(0) -#define FLOW_DIS_FIRST_FRAG BIT(1) -#define FLOW_DIS_ENCAPSULATION BIT(2) +/* The control flags are kept in sync with TCA_FLOWER_KEY_FLAGS_*, as those + * flags are exposed to userspace in some error paths, ie. unsupported flags. + */ +enum flow_dissector_ctrl_flags { + FLOW_DIS_IS_FRAGMENT = TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT, + FLOW_DIS_FIRST_FRAG = TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST, + FLOW_DIS_F_TUNNEL_CSUM = TCA_FLOWER_KEY_FLAGS_TUNNEL_CSUM, + FLOW_DIS_F_TUNNEL_DONT_FRAGMENT = TCA_FLOWER_KEY_FLAGS_TUNNEL_DONT_FRAGMENT, + FLOW_DIS_F_TUNNEL_OAM = TCA_FLOWER_KEY_FLAGS_TUNNEL_OAM, + FLOW_DIS_F_TUNNEL_CRIT_OPT = TCA_FLOWER_KEY_FLAGS_TUNNEL_CRIT_OPT, + + /* These flags are internal to the kernel */ + FLOW_DIS_ENCAPSULATION = (TCA_FLOWER_KEY_FLAGS_MAX << 1), +}; enum flow_dissect_ret { FLOW_DISSECT_RET_OUT_GOOD, @@ -433,6 +446,8 @@ static inline bool flow_keys_have_l4(const struct flow_keys *keys) } u32 flow_hash_from_keys(struct flow_keys *keys); +u32 flow_hash_from_keys_seed(struct flow_keys *keys, + const siphash_key_t *keyval); void skb_flow_get_icmp_tci(const struct sk_buff *skb, struct flow_dissector_key_icmp *key_icmp, const void *data, int thoff, int hlen); diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h index ec9f80509f60..292cd8f4b762 100644 --- a/include/net/flow_offload.h +++ b/include/net/flow_offload.h @@ -471,6 +471,28 @@ static inline bool flow_rule_is_supp_control_flags(const u32 supp_flags, return false; } +/** + * flow_rule_is_supp_enc_control_flags() - check for supported control flags + * @supp_enc_flags: encapsulation control flags supported by driver + * @enc_ctrl_flags: encapsulation control flags present in rule + * @extack: The netlink extended ACK for reporting errors. + * + * Return: true if only supported control flags are set, false otherwise. + */ +static inline bool flow_rule_is_supp_enc_control_flags(const u32 supp_enc_flags, + const u32 enc_ctrl_flags, + struct netlink_ext_ack *extack) +{ + if (likely((enc_ctrl_flags & ~supp_enc_flags) == 0)) + return true; + + NL_SET_ERR_MSG_FMT_MOD(extack, + "Unsupported match on enc_control.flags %#x", + enc_ctrl_flags); + + return false; +} + /** * flow_rule_has_control_flags() - check for presence of any control flags * @ctrl_flags: control flags present in rule @@ -484,6 +506,19 @@ static inline bool flow_rule_has_control_flags(const u32 ctrl_flags, return !flow_rule_is_supp_control_flags(0, ctrl_flags, extack); } +/** + * flow_rule_has_enc_control_flags() - check for presence of any control flags + * @enc_ctrl_flags: encapsulation control flags present in rule + * @extack: The netlink extended ACK for reporting errors. + * + * Return: true if control flags are set, false otherwise. + */ +static inline bool flow_rule_has_enc_control_flags(const u32 enc_ctrl_flags, + struct netlink_ext_ack *extack) +{ + return !flow_rule_is_supp_enc_control_flags(0, enc_ctrl_flags, extack); +} + /** * flow_rule_match_has_control_flags() - match and check for any control flags * @rule: The flow_rule under evaluation. diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h index 925bac726a92..91762faecc13 100644 --- a/include/net/ieee80211_radiotap.h +++ b/include/net/ieee80211_radiotap.h @@ -582,6 +582,7 @@ enum ieee80211_radiotap_eht_usig_tb { /** * ieee80211_get_radiotap_len - get radiotap header length * @data: pointer to the header + * Return: the radiotap header length */ static inline u16 ieee80211_get_radiotap_len(const char *data) { diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h index 153960663ce4..5af6eb14c5db 100644 --- a/include/net/inet_frag.h +++ b/include/net/inet_frag.h @@ -76,7 +76,7 @@ struct frag_v6_compare_key { * @stamp: timestamp of the last received fragment * @len: total length of the original datagram * @meat: length of received fragments so far - * @mono_delivery_time: stamp has a mono delivery time (EDT) + * @tstamp_type: stamp has a mono delivery time (EDT) * @flags: fragment queue flags * @max_size: maximum received fragment size * @fqdir: pointer to struct fqdir @@ -97,7 +97,7 @@ struct inet_frag_queue { ktime_t stamp; int len; int meat; - u8 mono_delivery_time; + u8 tstamp_type; __u8 flags; u16 max_size; struct fqdir *fqdir; diff --git a/include/net/inet_timewait_sock.h b/include/net/inet_timewait_sock.h index 2a536eea9424..f88b68269012 100644 --- a/include/net/inet_timewait_sock.h +++ b/include/net/inet_timewait_sock.h @@ -93,17 +93,14 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, struct inet_timewait_death_row *dr, const int state); -void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, - struct inet_hashinfo *hashinfo); +void inet_twsk_hashdance_schedule(struct inet_timewait_sock *tw, + struct sock *sk, + struct inet_hashinfo *hashinfo, + int timeo); void __inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo, bool rearm); -static inline void inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo) -{ - __inet_twsk_schedule(tw, timeo, false); -} - static inline void inet_twsk_reschedule(struct inet_timewait_sock *tw, int timeo) { __inet_twsk_schedule(tw, timeo, true); diff --git a/include/net/ip.h b/include/net/ip.h index 6d735e00d3f3..c5606cadb1a5 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -506,8 +506,7 @@ static inline unsigned int ip_skb_dst_mtu(struct sock *sk, return mtu - lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu); } -struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx, - int fc_mx_len, +struct dst_metrics *ip_fib_metrics_init(struct nlattr *fc_mx, int fc_mx_len, struct netlink_ext_ack *extack); static inline void ip_fib_metrics_put(struct dst_metrics *fib_metrics) { diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h index a18ed24fed94..6dbdf60b342f 100644 --- a/include/net/ip6_route.h +++ b/include/net/ip6_route.h @@ -127,18 +127,26 @@ void rt6_age_exceptions(struct fib6_info *f6i, struct fib6_gc_args *gc_args, static inline int ip6_route_get_saddr(struct net *net, struct fib6_info *f6i, const struct in6_addr *daddr, - unsigned int prefs, + unsigned int prefs, int l3mdev_index, struct in6_addr *saddr) { + struct net_device *l3mdev; + struct net_device *dev; + bool same_vrf; int err = 0; - if (f6i && f6i->fib6_prefsrc.plen) { - *saddr = f6i->fib6_prefsrc.addr; - } else { - struct net_device *dev = f6i ? fib6_info_nh_dev(f6i) : NULL; + rcu_read_lock(); - err = ipv6_dev_get_saddr(net, dev, daddr, prefs, saddr); - } + l3mdev = dev_get_by_index_rcu(net, l3mdev_index); + if (!f6i || !f6i->fib6_prefsrc.plen || l3mdev) + dev = f6i ? fib6_info_nh_dev(f6i) : NULL; + same_vrf = !l3mdev || l3mdev_master_dev_rcu(dev) == l3mdev; + if (f6i && f6i->fib6_prefsrc.plen && same_vrf) + *saddr = f6i->fib6_prefsrc.addr; + else + err = ipv6_dev_get_saddr(net, same_vrf ? dev : l3mdev, daddr, prefs, saddr); + + rcu_read_unlock(); return err; } diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index 9b2f69ba5e49..6e7984bfb986 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h @@ -520,7 +520,35 @@ void fib_nhc_update_mtu(struct fib_nh_common *nhc, u32 new, u32 orig); #ifdef CONFIG_IP_ROUTE_MULTIPATH int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4, const struct sk_buff *skb, struct flow_keys *flkeys); + +static void +fib_multipath_hash_construct_key(siphash_key_t *key, u32 mp_seed) +{ + u64 mp_seed_64 = mp_seed; + + key->key[0] = (mp_seed_64 << 32) | mp_seed_64; + key->key[1] = key->key[0]; +} + +static inline u32 fib_multipath_hash_from_keys(const struct net *net, + struct flow_keys *keys) +{ + siphash_aligned_key_t hash_key; + u32 mp_seed; + + mp_seed = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_seed).mp_seed; + fib_multipath_hash_construct_key(&hash_key, mp_seed); + + return flow_hash_from_keys_seed(keys, &hash_key); +} +#else +static inline u32 fib_multipath_hash_from_keys(const struct net *net, + struct flow_keys *keys) +{ + return flow_hash_from_keys(keys); +} #endif + int fib_check_nh(struct net *net, struct fib_nh *nh, u32 table, u8 scope, struct netlink_ext_ack *extack); void fib_select_multipath(struct fib_result *res, int hash); diff --git a/include/net/ipv6_stubs.h b/include/net/ipv6_stubs.h index 485c39a89866..11cefd50704d 100644 --- a/include/net/ipv6_stubs.h +++ b/include/net/ipv6_stubs.h @@ -9,6 +9,7 @@ #include #include #include +#include /* structs from net/ip6_fib.h */ struct fib6_info; @@ -72,6 +73,8 @@ struct ipv6_stub { int (*output)(struct net *, struct sock *, struct sk_buff *)); struct net_device *(*ipv6_dev_find)(struct net *net, const struct in6_addr *addr, struct net_device *dev); + int (*ip6_xmit)(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, + __u32 mark, struct ipv6_txoptions *opt, int tclass, u32 priority); }; extern const struct ipv6_stub *ipv6_stub __read_mostly; diff --git a/include/net/libeth/cache.h b/include/net/libeth/cache.h new file mode 100644 index 000000000000..bdb0c043ce61 --- /dev/null +++ b/include/net/libeth/cache.h @@ -0,0 +1,66 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Copyright (C) 2024 Intel Corporation */ + +#ifndef __LIBETH_CACHE_H +#define __LIBETH_CACHE_H + +#include + +/** + * libeth_cacheline_group_assert - make sure cacheline group size is expected + * @type: type of the structure containing the group + * @grp: group name inside the struct + * @sz: expected group size + */ +#if defined(CONFIG_64BIT) && SMP_CACHE_BYTES == 64 +#define libeth_cacheline_group_assert(type, grp, sz) \ + static_assert(offsetof(type, __cacheline_group_end__##grp) - \ + offsetofend(type, __cacheline_group_begin__##grp) == \ + (sz)) +#define __libeth_cacheline_struct_assert(type, sz) \ + static_assert(sizeof(type) == (sz)) +#else /* !CONFIG_64BIT || SMP_CACHE_BYTES != 64 */ +#define libeth_cacheline_group_assert(type, grp, sz) \ + static_assert(offsetof(type, __cacheline_group_end__##grp) - \ + offsetofend(type, __cacheline_group_begin__##grp) <= \ + (sz)) +#define __libeth_cacheline_struct_assert(type, sz) \ + static_assert(sizeof(type) <= (sz)) +#endif /* !CONFIG_64BIT || SMP_CACHE_BYTES != 64 */ + +#define __libeth_cls1(sz1) SMP_CACHE_ALIGN(sz1) +#define __libeth_cls2(sz1, sz2) (SMP_CACHE_ALIGN(sz1) + SMP_CACHE_ALIGN(sz2)) +#define __libeth_cls3(sz1, sz2, sz3) \ + (SMP_CACHE_ALIGN(sz1) + SMP_CACHE_ALIGN(sz2) + SMP_CACHE_ALIGN(sz3)) +#define __libeth_cls(...) \ + CONCATENATE(__libeth_cls, COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__) + +/** + * libeth_cacheline_struct_assert - make sure CL-based struct size is expected + * @type: type of the struct + * @...: from 1 to 3 CL group sizes (read-mostly, read-write, cold) + * + * When a struct contains several CL groups, it's difficult to predict its size + * on different architectures. The macro instead takes sizes of all of the + * groups the structure contains and generates the final struct size. + */ +#define libeth_cacheline_struct_assert(type, ...) \ + __libeth_cacheline_struct_assert(type, __libeth_cls(__VA_ARGS__)); \ + static_assert(__alignof(type) >= SMP_CACHE_BYTES) + +/** + * libeth_cacheline_set_assert - make sure CL-based struct layout is expected + * @type: type of the struct + * @ro: expected size of the read-mostly group + * @rw: expected size of the read-write group + * @c: expected size of the cold group + * + * Check that each group size is expected and then do final struct size check. + */ +#define libeth_cacheline_set_assert(type, ro, rw, c) \ + libeth_cacheline_group_assert(type, read_mostly, ro); \ + libeth_cacheline_group_assert(type, read_write, rw); \ + libeth_cacheline_group_assert(type, cold, c); \ + libeth_cacheline_struct_assert(type, ro, rw, c) + +#endif /* __LIBETH_CACHE_H */ diff --git a/include/net/libeth/rx.h b/include/net/libeth/rx.h index f29ea3e34c6c..43574bd6612f 100644 --- a/include/net/libeth/rx.h +++ b/include/net/libeth/rx.h @@ -17,6 +17,8 @@ #define LIBETH_MAX_HEADROOM LIBETH_SKB_HEADROOM /* Link layer / L2 overhead: Ethernet, 2 VLAN tags (C + S), FCS */ #define LIBETH_RX_LL_LEN (ETH_HLEN + 2 * VLAN_HLEN + ETH_FCS_LEN) +/* Maximum supported L2-L4 header length */ +#define LIBETH_MAX_HEAD roundup_pow_of_two(max(MAX_HEADER, 256)) /* Always use order-0 pages */ #define LIBETH_RX_PAGE_ORDER 0 @@ -43,6 +45,18 @@ struct libeth_fqe { u32 truesize; } __aligned_largest; +/** + * enum libeth_fqe_type - enum representing types of Rx buffers + * @LIBETH_FQE_MTU: buffer size is determined by MTU + * @LIBETH_FQE_SHORT: buffer size is smaller than MTU, for short frames + * @LIBETH_FQE_HDR: buffer size is ```LIBETH_MAX_HEAD```-sized, for headers + */ +enum libeth_fqe_type { + LIBETH_FQE_MTU = 0U, + LIBETH_FQE_SHORT, + LIBETH_FQE_HDR, +}; + /** * struct libeth_fq - structure representing a buffer (fill) queue * @fp: hotpath part of the structure @@ -50,6 +64,8 @@ struct libeth_fqe { * @fqes: array of Rx buffers * @truesize: size to allocate per buffer, w/overhead * @count: number of descriptors/buffers the queue has + * @type: type of the buffers this queue has + * @hsplit: flag whether header split is enabled * @buf_len: HW-writeable length per each buffer * @nid: ID of the closest NUMA node with memory */ @@ -63,6 +79,9 @@ struct libeth_fq { ); /* Cold fields */ + enum libeth_fqe_type type:2; + bool hsplit:1; + u32 buf_len; int nid; }; diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h index 53823d61d8b6..a4bea0f33188 100644 --- a/include/net/llc_c_st.h +++ b/include/net/llc_c_st.h @@ -44,8 +44,8 @@ struct llc_conn_state_trans { }; struct llc_conn_state { - u8 current_state; - struct llc_conn_state_trans **transitions; + u8 current_state; + const struct llc_conn_state_trans **transitions; }; extern struct llc_conn_state llc_conn_state_table[]; diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h index ed5b2fa40d32..fca49d483d20 100644 --- a/include/net/llc_s_st.h +++ b/include/net/llc_s_st.h @@ -29,8 +29,8 @@ struct llc_sap_state_trans { }; struct llc_sap_state { - u8 curr_state; - struct llc_sap_state_trans **transitions; + u8 curr_state; + const struct llc_sap_state_trans **transitions; }; /* only access to SAP state table */ diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 45ad37adbe32..0a04eaf5343c 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -250,6 +250,7 @@ struct ieee80211_chan_req { * @min_def: the minimum channel definition currently required. * @ap: the channel definition the AP actually is operating as, * for use with (wider bandwidth) OFDMA + * @radio_idx: index of the wiphy radio used used for this channel * @rx_chains_static: The number of RX chains that must always be * active on the channel to receive MIMO transmissions * @rx_chains_dynamic: The number of RX chains that must be enabled @@ -264,6 +265,7 @@ struct ieee80211_chanctx_conf { struct cfg80211_chan_def min_def; struct cfg80211_chan_def ap; + int radio_idx; u8 rx_chains_static, rx_chains_dynamic; bool radar_enabled; @@ -362,6 +364,7 @@ struct ieee80211_vif_chanctx_switch { * status changed. * @BSS_CHANGED_MLD_VALID_LINKS: MLD valid links status changed. * @BSS_CHANGED_MLD_TTLM: negotiated TID to link mapping was changed + * @BSS_CHANGED_TPE: transmit power envelope changed */ enum ieee80211_bss_change { BSS_CHANGED_ASSOC = 1<<0, @@ -398,6 +401,7 @@ enum ieee80211_bss_change { BSS_CHANGED_UNSOL_BCAST_PROBE_RESP = BIT_ULL(31), BSS_CHANGED_MLD_VALID_LINKS = BIT_ULL(33), BSS_CHANGED_MLD_TTLM = BIT_ULL(34), + BSS_CHANGED_TPE = BIT_ULL(35), /* when adding here, make sure to change ieee80211_reconfig */ }; @@ -550,6 +554,39 @@ struct ieee80211_fils_discovery { u32 max_interval; }; +#define IEEE80211_TPE_EIRP_ENTRIES_320MHZ 5 +struct ieee80211_parsed_tpe_eirp { + bool valid; + s8 power[IEEE80211_TPE_EIRP_ENTRIES_320MHZ]; + u8 count; +}; + +#define IEEE80211_TPE_PSD_ENTRIES_320MHZ 16 +struct ieee80211_parsed_tpe_psd { + bool valid; + s8 power[IEEE80211_TPE_PSD_ENTRIES_320MHZ]; + u8 count, n; +}; + +/** + * struct ieee80211_parsed_tpe - parsed transmit power envelope information + * @max_local: maximum local EIRP, one value for 20, 40, 80, 160, 320 MHz each + * (indexed by TX power category) + * @max_reg_client: maximum regulatory client EIRP, one value for 20, 40, 80, + * 160, 320 MHz each + * (indexed by TX power category) + * @psd_local: maximum local power spectral density, one value for each 20 MHz + * subchannel per bss_conf's chanreq.oper + * (indexed by TX power category) + * @psd_reg_client: maximum regulatory power spectral density, one value for + * each 20 MHz subchannel per bss_conf's chanreq.oper + * (indexed by TX power category) + */ +struct ieee80211_parsed_tpe { + struct ieee80211_parsed_tpe_eirp max_local[2], max_reg_client[2]; + struct ieee80211_parsed_tpe_psd psd_local[2], psd_reg_client[2]; +}; + /** * struct ieee80211_bss_conf - holds the BSS's changing parameters * @@ -662,8 +699,7 @@ struct ieee80211_fils_discovery { * @beacon_tx_rate: The configured beacon transmit rate that needs to be passed * to driver when rate control is offloaded to firmware. * @power_type: power type of BSS for 6 GHz - * @tx_pwr_env: transmit power envelope array of BSS. - * @tx_pwr_env_num: number of @tx_pwr_env. + * @tpe: transmit power envelope information * @pwr_reduction: power constraint of BSS. * @eht_support: does this BSS support EHT * @csa_active: marks whether a channel switch is going on. @@ -701,6 +737,9 @@ struct ieee80211_fils_discovery { * beamformee * @eht_mu_beamformer: in AP-mode, does this BSS enable operation as an EHT MU * beamformer + * @eht_80mhz_full_bw_ul_mumimo: in AP-mode, does this BSS support the + * reception of an EHT TB PPDU on an RU that spans the entire PPDU + * bandwidth */ struct ieee80211_bss_conf { struct ieee80211_vif *vif; @@ -766,8 +805,9 @@ struct ieee80211_bss_conf { u32 unsol_bcast_probe_resp_interval; struct cfg80211_bitrate_mask beacon_tx_rate; enum ieee80211_ap_reg_power power_type; - struct ieee80211_tx_pwr_env tx_pwr_env[IEEE80211_TPE_MAX_IE_COUNT]; - u8 tx_pwr_env_num; + + struct ieee80211_parsed_tpe tpe; + u8 pwr_reduction; bool eht_support; @@ -793,6 +833,7 @@ struct ieee80211_bss_conf { bool eht_su_beamformer; bool eht_su_beamformee; bool eht_mu_beamformer; + bool eht_80mhz_full_bw_ul_mumimo; }; /** @@ -2728,14 +2769,6 @@ struct ieee80211_txq { * @IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA: Hardware supports buffer STA on * TDLS links. * - * @IEEE80211_HW_DEAUTH_NEED_MGD_TX_PREP: The driver requires the - * mgd_prepare_tx() callback to be called before transmission of a - * deauthentication frame in case the association was completed but no - * beacon was heard. This is required in multi-channel scenarios, where the - * virtual interface might not be given air time for the transmission of - * the frame, as it is not synced with the AP/P2P GO yet, and thus the - * deauthentication frame might not be transmitted. - * * @IEEE80211_HW_DOESNT_SUPPORT_QOS_NDP: The driver (or firmware) doesn't * support QoS NDP for AP probing - that's most likely a driver bug. * @@ -2835,7 +2868,6 @@ enum ieee80211_hw_flags { IEEE80211_HW_REPORTS_LOW_ACK, IEEE80211_HW_SUPPORTS_TX_FRAG, IEEE80211_HW_SUPPORTS_TDLS_BUFFER_STA, - IEEE80211_HW_DEAUTH_NEED_MGD_TX_PREP, IEEE80211_HW_DOESNT_SUPPORT_QOS_NDP, IEEE80211_HW_BUFF_MMPDU_TXQ, IEEE80211_HW_SUPPORTS_VHT_EXT_NSS_BW, @@ -3748,13 +3780,15 @@ enum ieee80211_reconfig_type { * @success: whether the frame exchange was successful, only * used with the mgd_complete_tx() method, and then only * valid for auth and (re)assoc. + * @was_assoc: set if this call is due to deauth/disassoc + * while just having been associated * @link_id: the link id on which the frame will be TX'ed. * Only used with the mgd_prepare_tx() method. */ struct ieee80211_prep_tx_info { u16 duration; u16 subtype; - u8 success:1; + u8 success:1, was_assoc:1; int link_id; }; @@ -4203,12 +4237,9 @@ struct ieee80211_prep_tx_info { * yet it need not necessarily be given airtime, in particular since any * transmission to a P2P GO needs to be synchronized against the GO's * powersave state. mac80211 will call this function before transmitting a - * management frame prior to having successfully associated to allow the - * driver to give it channel time for the transmission, to get a response - * and to be able to synchronize with the GO. - * For drivers that set %IEEE80211_HW_DEAUTH_NEED_MGD_TX_PREP, mac80211 - * would also call this function before transmitting a deauthentication - * frame in case that no beacon was heard from the AP/P2P GO. + * management frame prior to transmitting that frame to allow the driver + * to give it channel time for the transmission, to get a response and be + * able to synchronize with the GO. * The callback will be called before each transmission and upon return * mac80211 will transmit the frame right away. * Additional information is passed in the &struct ieee80211_prep_tx_info @@ -4405,7 +4436,7 @@ struct ieee80211_ops { struct ieee80211_tx_control *control, struct sk_buff *skb); int (*start)(struct ieee80211_hw *hw); - void (*stop)(struct ieee80211_hw *hw); + void (*stop)(struct ieee80211_hw *hw, bool suspend); #ifdef CONFIG_PM int (*suspend)(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan); int (*resume)(struct ieee80211_hw *hw); diff --git a/include/net/mana/gdma.h b/include/net/mana/gdma.h index 27684135bb4d..83963d9e804d 100644 --- a/include/net/mana/gdma.h +++ b/include/net/mana/gdma.h @@ -224,7 +224,15 @@ struct gdma_dev { struct auxiliary_device *adev; }; -#define MINIMUM_SUPPORTED_PAGE_SIZE PAGE_SIZE +/* MANA_PAGE_SIZE is the DMA unit */ +#define MANA_PAGE_SHIFT 12 +#define MANA_PAGE_SIZE BIT(MANA_PAGE_SHIFT) +#define MANA_PAGE_ALIGN(x) ALIGN((x), MANA_PAGE_SIZE) +#define MANA_PAGE_ALIGNED(addr) IS_ALIGNED((unsigned long)(addr), MANA_PAGE_SIZE) +#define MANA_PFN(a) ((a) >> MANA_PAGE_SHIFT) + +/* Required by HW */ +#define MANA_MIN_QSIZE MANA_PAGE_SIZE #define GDMA_CQE_SIZE 64 #define GDMA_EQE_SIZE 16 @@ -543,11 +551,13 @@ enum { */ #define GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX BIT(2) #define GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG BIT(3) +#define GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT BIT(5) #define GDMA_DRV_CAP_FLAGS1 \ (GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT | \ GDMA_DRV_CAP_FLAG_1_NAPI_WKDONE_FIX | \ - GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG) + GDMA_DRV_CAP_FLAG_1_HWC_TIMEOUT_RECONFIG | \ + GDMA_DRV_CAP_FLAG_1_VARIABLE_INDIRECTION_TABLE_SUPPORT) #define GDMA_DRV_CAP_FLAGS2 0 diff --git a/include/net/mana/mana.h b/include/net/mana/mana.h index 561f6719fb4e..e39b8676fe54 100644 --- a/include/net/mana/mana.h +++ b/include/net/mana/mana.h @@ -30,8 +30,8 @@ enum TRI_STATE { }; /* Number of entries for hardware indirection table must be in power of 2 */ -#define MANA_INDIRECT_TABLE_SIZE 64 -#define MANA_INDIRECT_TABLE_MASK (MANA_INDIRECT_TABLE_SIZE - 1) +#define MANA_INDIRECT_TABLE_MAX_SIZE 512 +#define MANA_INDIRECT_TABLE_DEF_SIZE 64 /* The Toeplitz hash key's length in bytes: should be multiple of 8 */ #define MANA_HASH_KEY_SIZE 40 @@ -42,7 +42,8 @@ enum TRI_STATE { #define MAX_SEND_BUFFERS_PER_QUEUE 256 -#define EQ_SIZE (8 * PAGE_SIZE) +#define EQ_SIZE (8 * MANA_PAGE_SIZE) + #define LOG2_EQ_THROTTLE 3 #define MAX_PORTS_IN_MANA_DEV 256 @@ -410,10 +411,11 @@ struct mana_port_context { struct mana_tx_qp *tx_qp; /* Indirection Table for RX & TX. The values are queue indexes */ - u32 indir_table[MANA_INDIRECT_TABLE_SIZE]; + u32 *indir_table; + u32 indir_table_sz; /* Indirection table containing RxObject Handles */ - mana_handle_t rxobj_table[MANA_INDIRECT_TABLE_SIZE]; + mana_handle_t *rxobj_table; /* Hash key used by the NIC */ u8 hashkey[MANA_HASH_KEY_SIZE]; diff --git a/include/net/netdev_queues.h b/include/net/netdev_queues.h index a8a7e48dfa6c..5ca019d294ca 100644 --- a/include/net/netdev_queues.h +++ b/include/net/netdev_queues.h @@ -62,6 +62,8 @@ struct netdev_queue_stats_tx { * statistics will not generally add up to the total number of events for * the device. The @get_base_stats callback allows filling in the delta * between events for currently live queues and overall device history. + * @get_base_stats can also be used to report any miscellaneous packets + * transferred outside of the main set of queues used by the networking stack. * When the statistics for the entire device are queried, first @get_base_stats * is issued to collect the delta, and then a series of per-queue callbacks. * Only statistics which are set in @get_base_stats will be reported diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h index 9abb7ee40d72..b63d53bb9dd6 100644 --- a/include/net/netfilter/nf_flow_table.h +++ b/include/net/netfilter/nf_flow_table.h @@ -305,11 +305,26 @@ struct flow_ports { __be16 source, dest; }; +struct nf_flowtable *nf_flowtable_by_dev(const struct net_device *dev); +int nf_flow_offload_xdp_setup(struct nf_flowtable *flowtable, + struct net_device *dev, + enum flow_block_command cmd); + unsigned int nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *state); unsigned int nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *state); +#if (IS_BUILTIN(CONFIG_NF_FLOW_TABLE) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) || \ + (IS_MODULE(CONFIG_NF_FLOW_TABLE) && IS_ENABLED(CONFIG_DEBUG_INFO_BTF_MODULES)) +extern int nf_flow_register_bpf(void); +#else +static inline int nf_flow_register_bpf(void) +{ + return 0; +} +#endif + #define MODULE_ALIAS_NF_FLOWTABLE(family) \ MODULE_ALIAS("nf-flowtable-" __stringify(family)) diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 188d41da1a40..1bfdd16890fa 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -1176,7 +1176,7 @@ static inline bool nft_chain_is_bound(struct nft_chain *chain) int nft_chain_add(struct nft_table *table, struct nft_chain *chain); void nft_chain_del(struct nft_chain *chain); -void nf_tables_chain_destroy(struct nft_ctx *ctx); +void nf_tables_chain_destroy(struct nft_chain *chain); struct nft_stats { u64 bytes; @@ -1613,41 +1613,67 @@ static inline int nft_set_elem_is_dead(const struct nft_set_ext *ext) } /** - * struct nft_trans - nf_tables object update in transaction + * struct nft_trans - nf_tables object update in transaction * - * @list: used internally - * @binding_list: list of objects with possible bindings - * @msg_type: message type - * @put_net: ctx->net needs to be put - * @ctx: transaction context - * @data: internal information related to the transaction + * @list: used internally + * @net: struct net + * @table: struct nft_table the object resides in + * @msg_type: message type + * @seq: netlink sequence number + * @flags: modifiers to new request + * @report: notify via unicast netlink message + * @put_net: net needs to be put + * + * This is the information common to all objects in the transaction, + * this must always be the first member of derived sub-types. */ struct nft_trans { struct list_head list; - struct list_head binding_list; + struct net *net; + struct nft_table *table; int msg_type; - bool put_net; - struct nft_ctx ctx; - char data[]; + u32 seq; + u16 flags; + u8 report:1; + u8 put_net:1; +}; + +/** + * struct nft_trans_binding - nf_tables object with binding support in transaction + * @nft_trans: base structure, MUST be first member + * @binding_list: list of objects with possible bindings + * + * This is the base type used by objects that can be bound to a chain. + */ +struct nft_trans_binding { + struct nft_trans nft_trans; + struct list_head binding_list; }; struct nft_trans_rule { + struct nft_trans nft_trans; struct nft_rule *rule; + struct nft_chain *chain; struct nft_flow_rule *flow; u32 rule_id; bool bound; }; -#define nft_trans_rule(trans) \ - (((struct nft_trans_rule *)trans->data)->rule) -#define nft_trans_flow_rule(trans) \ - (((struct nft_trans_rule *)trans->data)->flow) -#define nft_trans_rule_id(trans) \ - (((struct nft_trans_rule *)trans->data)->rule_id) -#define nft_trans_rule_bound(trans) \ - (((struct nft_trans_rule *)trans->data)->bound) +#define nft_trans_container_rule(trans) \ + container_of(trans, struct nft_trans_rule, nft_trans) +#define nft_trans_rule(trans) \ + nft_trans_container_rule(trans)->rule +#define nft_trans_flow_rule(trans) \ + nft_trans_container_rule(trans)->flow +#define nft_trans_rule_id(trans) \ + nft_trans_container_rule(trans)->rule_id +#define nft_trans_rule_bound(trans) \ + nft_trans_container_rule(trans)->bound +#define nft_trans_rule_chain(trans) \ + nft_trans_container_rule(trans)->chain struct nft_trans_set { + struct nft_trans_binding nft_trans_binding; struct nft_set *set; u32 set_id; u32 gc_int; @@ -1657,100 +1683,117 @@ struct nft_trans_set { u32 size; }; -#define nft_trans_set(trans) \ - (((struct nft_trans_set *)trans->data)->set) -#define nft_trans_set_id(trans) \ - (((struct nft_trans_set *)trans->data)->set_id) -#define nft_trans_set_bound(trans) \ - (((struct nft_trans_set *)trans->data)->bound) -#define nft_trans_set_update(trans) \ - (((struct nft_trans_set *)trans->data)->update) -#define nft_trans_set_timeout(trans) \ - (((struct nft_trans_set *)trans->data)->timeout) -#define nft_trans_set_gc_int(trans) \ - (((struct nft_trans_set *)trans->data)->gc_int) -#define nft_trans_set_size(trans) \ - (((struct nft_trans_set *)trans->data)->size) +#define nft_trans_container_set(t) \ + container_of(t, struct nft_trans_set, nft_trans_binding.nft_trans) +#define nft_trans_set(trans) \ + nft_trans_container_set(trans)->set +#define nft_trans_set_id(trans) \ + nft_trans_container_set(trans)->set_id +#define nft_trans_set_bound(trans) \ + nft_trans_container_set(trans)->bound +#define nft_trans_set_update(trans) \ + nft_trans_container_set(trans)->update +#define nft_trans_set_timeout(trans) \ + nft_trans_container_set(trans)->timeout +#define nft_trans_set_gc_int(trans) \ + nft_trans_container_set(trans)->gc_int +#define nft_trans_set_size(trans) \ + nft_trans_container_set(trans)->size struct nft_trans_chain { + struct nft_trans_binding nft_trans_binding; struct nft_chain *chain; - bool update; char *name; struct nft_stats __percpu *stats; u8 policy; + bool update; bool bound; u32 chain_id; struct nft_base_chain *basechain; struct list_head hook_list; }; -#define nft_trans_chain(trans) \ - (((struct nft_trans_chain *)trans->data)->chain) -#define nft_trans_chain_update(trans) \ - (((struct nft_trans_chain *)trans->data)->update) -#define nft_trans_chain_name(trans) \ - (((struct nft_trans_chain *)trans->data)->name) -#define nft_trans_chain_stats(trans) \ - (((struct nft_trans_chain *)trans->data)->stats) -#define nft_trans_chain_policy(trans) \ - (((struct nft_trans_chain *)trans->data)->policy) -#define nft_trans_chain_bound(trans) \ - (((struct nft_trans_chain *)trans->data)->bound) -#define nft_trans_chain_id(trans) \ - (((struct nft_trans_chain *)trans->data)->chain_id) -#define nft_trans_basechain(trans) \ - (((struct nft_trans_chain *)trans->data)->basechain) -#define nft_trans_chain_hooks(trans) \ - (((struct nft_trans_chain *)trans->data)->hook_list) +#define nft_trans_container_chain(t) \ + container_of(t, struct nft_trans_chain, nft_trans_binding.nft_trans) +#define nft_trans_chain(trans) \ + nft_trans_container_chain(trans)->chain +#define nft_trans_chain_update(trans) \ + nft_trans_container_chain(trans)->update +#define nft_trans_chain_name(trans) \ + nft_trans_container_chain(trans)->name +#define nft_trans_chain_stats(trans) \ + nft_trans_container_chain(trans)->stats +#define nft_trans_chain_policy(trans) \ + nft_trans_container_chain(trans)->policy +#define nft_trans_chain_bound(trans) \ + nft_trans_container_chain(trans)->bound +#define nft_trans_chain_id(trans) \ + nft_trans_container_chain(trans)->chain_id +#define nft_trans_basechain(trans) \ + nft_trans_container_chain(trans)->basechain +#define nft_trans_chain_hooks(trans) \ + nft_trans_container_chain(trans)->hook_list struct nft_trans_table { + struct nft_trans nft_trans; bool update; }; -#define nft_trans_table_update(trans) \ - (((struct nft_trans_table *)trans->data)->update) +#define nft_trans_container_table(trans) \ + container_of(trans, struct nft_trans_table, nft_trans) +#define nft_trans_table_update(trans) \ + nft_trans_container_table(trans)->update struct nft_trans_elem { + struct nft_trans nft_trans; struct nft_set *set; struct nft_elem_priv *elem_priv; bool bound; }; -#define nft_trans_elem_set(trans) \ - (((struct nft_trans_elem *)trans->data)->set) -#define nft_trans_elem_priv(trans) \ - (((struct nft_trans_elem *)trans->data)->elem_priv) -#define nft_trans_elem_set_bound(trans) \ - (((struct nft_trans_elem *)trans->data)->bound) +#define nft_trans_container_elem(t) \ + container_of(t, struct nft_trans_elem, nft_trans) +#define nft_trans_elem_set(trans) \ + nft_trans_container_elem(trans)->set +#define nft_trans_elem_priv(trans) \ + nft_trans_container_elem(trans)->elem_priv +#define nft_trans_elem_set_bound(trans) \ + nft_trans_container_elem(trans)->bound struct nft_trans_obj { + struct nft_trans nft_trans; struct nft_object *obj; struct nft_object *newobj; bool update; }; -#define nft_trans_obj(trans) \ - (((struct nft_trans_obj *)trans->data)->obj) -#define nft_trans_obj_newobj(trans) \ - (((struct nft_trans_obj *)trans->data)->newobj) -#define nft_trans_obj_update(trans) \ - (((struct nft_trans_obj *)trans->data)->update) +#define nft_trans_container_obj(t) \ + container_of(t, struct nft_trans_obj, nft_trans) +#define nft_trans_obj(trans) \ + nft_trans_container_obj(trans)->obj +#define nft_trans_obj_newobj(trans) \ + nft_trans_container_obj(trans)->newobj +#define nft_trans_obj_update(trans) \ + nft_trans_container_obj(trans)->update struct nft_trans_flowtable { + struct nft_trans nft_trans; struct nft_flowtable *flowtable; - bool update; struct list_head hook_list; u32 flags; + bool update; }; -#define nft_trans_flowtable(trans) \ - (((struct nft_trans_flowtable *)trans->data)->flowtable) -#define nft_trans_flowtable_update(trans) \ - (((struct nft_trans_flowtable *)trans->data)->update) -#define nft_trans_flowtable_hooks(trans) \ - (((struct nft_trans_flowtable *)trans->data)->hook_list) -#define nft_trans_flowtable_flags(trans) \ - (((struct nft_trans_flowtable *)trans->data)->flags) +#define nft_trans_container_flowtable(t) \ + container_of(t, struct nft_trans_flowtable, nft_trans) +#define nft_trans_flowtable(trans) \ + nft_trans_container_flowtable(trans)->flowtable +#define nft_trans_flowtable_update(trans) \ + nft_trans_container_flowtable(trans)->update +#define nft_trans_flowtable_hooks(trans) \ + nft_trans_container_flowtable(trans)->hook_list +#define nft_trans_flowtable_flags(trans) \ + nft_trans_container_flowtable(trans)->flags #define NFT_TRANS_GC_BATCHCOUNT 256 @@ -1764,6 +1807,33 @@ struct nft_trans_gc { struct rcu_head rcu; }; +static inline void nft_ctx_update(struct nft_ctx *ctx, + const struct nft_trans *trans) +{ + switch (trans->msg_type) { + case NFT_MSG_NEWRULE: + case NFT_MSG_DELRULE: + case NFT_MSG_DESTROYRULE: + ctx->chain = nft_trans_rule_chain(trans); + break; + case NFT_MSG_NEWCHAIN: + case NFT_MSG_DELCHAIN: + case NFT_MSG_DESTROYCHAIN: + ctx->chain = nft_trans_chain(trans); + break; + default: + ctx->chain = NULL; + break; + } + + ctx->net = trans->net; + ctx->table = trans->table; + ctx->family = trans->table->family; + ctx->report = trans->report; + ctx->flags = trans->flags; + ctx->seq = trans->seq; +} + struct nft_trans_gc *nft_trans_gc_alloc(struct nft_set *set, unsigned int gc_seq, gfp_t gfp); void nft_trans_gc_destroy(struct nft_trans_gc *trans); diff --git a/include/net/netmem.h b/include/net/netmem.h index d8b810245c1d..46cc9b89ac79 100644 --- a/include/net/netmem.h +++ b/include/net/netmem.h @@ -38,4 +38,19 @@ static inline netmem_ref page_to_netmem(struct page *page) return (__force netmem_ref)page; } +static inline int netmem_ref_count(netmem_ref netmem) +{ + return page_ref_count(netmem_to_page(netmem)); +} + +static inline unsigned long netmem_to_pfn(netmem_ref netmem) +{ + return page_to_pfn(netmem_to_page(netmem)); +} + +static inline netmem_ref netmem_compound_head(netmem_ref netmem) +{ + return page_to_netmem(compound_head(netmem_to_page(netmem))); +} + #endif /* _NET_NETMEM_H */ diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h index c356c458b340..5fcd61ada622 100644 --- a/include/net/netns/ipv4.h +++ b/include/net/netns/ipv4.h @@ -40,6 +40,13 @@ struct inet_timewait_death_row { struct tcp_fastopen_context; +#ifdef CONFIG_IP_ROUTE_MULTIPATH +struct sysctl_fib_multipath_hash_seed { + u32 user_seed; + u32 mp_seed; +}; +#endif + struct netns_ipv4 { /* Cacheline organization can be found documented in * Documentation/networking/net_cachelines/netns_ipv4_sysctl.rst. @@ -170,6 +177,7 @@ struct netns_ipv4 { u8 sysctl_tcp_sack; u8 sysctl_tcp_window_scaling; u8 sysctl_tcp_timestamps; + int sysctl_tcp_rto_min_us; u8 sysctl_tcp_recovery; u8 sysctl_tcp_thin_linear_timeouts; u8 sysctl_tcp_slow_start_after_idle; @@ -245,6 +253,7 @@ struct netns_ipv4 { #endif #endif #ifdef CONFIG_IP_ROUTE_MULTIPATH + struct sysctl_fib_multipath_hash_seed sysctl_fib_multipath_hash_seed; u32 sysctl_fib_multipath_hash_fields; u8 sysctl_fib_multipath_use_neigh; u8 sysctl_fib_multipath_hash_policy; diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h index 423b52eca908..d489d9250bff 100644 --- a/include/net/netns/xfrm.h +++ b/include/net/netns/xfrm.h @@ -83,6 +83,7 @@ struct netns_xfrm { spinlock_t xfrm_policy_lock; struct mutex xfrm_cfg_mutex; + struct delayed_work nat_keepalive_work; }; #endif diff --git a/include/net/page_pool/helpers.h b/include/net/page_pool/helpers.h index 873631c79ab1..2b43a893c619 100644 --- a/include/net/page_pool/helpers.h +++ b/include/net/page_pool/helpers.h @@ -55,6 +55,8 @@ #include #include +#include +#include #ifdef CONFIG_PAGE_POOL_STATS /* Deprecated driver-facing API, use netlink instead */ @@ -212,6 +214,11 @@ page_pool_get_dma_dir(const struct page_pool *pool) return pool->p.dma_dir; } +static inline void page_pool_fragment_netmem(netmem_ref netmem, long nr) +{ + atomic_long_set(&netmem_to_page(netmem)->pp_ref_count, nr); +} + /** * page_pool_fragment_page() - split a fresh page into fragments * @page: page to split @@ -232,11 +239,12 @@ page_pool_get_dma_dir(const struct page_pool *pool) */ static inline void page_pool_fragment_page(struct page *page, long nr) { - atomic_long_set(&page->pp_ref_count, nr); + page_pool_fragment_netmem(page_to_netmem(page), nr); } -static inline long page_pool_unref_page(struct page *page, long nr) +static inline long page_pool_unref_netmem(netmem_ref netmem, long nr) { + struct page *page = netmem_to_page(netmem); long ret; /* If nr == pp_ref_count then we have cleared all remaining @@ -279,15 +287,41 @@ static inline long page_pool_unref_page(struct page *page, long nr) return ret; } -static inline void page_pool_ref_page(struct page *page) +static inline long page_pool_unref_page(struct page *page, long nr) { - atomic_long_inc(&page->pp_ref_count); + return page_pool_unref_netmem(page_to_netmem(page), nr); } -static inline bool page_pool_is_last_ref(struct page *page) +static inline void page_pool_ref_netmem(netmem_ref netmem) +{ + atomic_long_inc(&netmem_to_page(netmem)->pp_ref_count); +} + +static inline void page_pool_ref_page(struct page *page) +{ + page_pool_ref_netmem(page_to_netmem(page)); +} + +static inline bool page_pool_is_last_ref(netmem_ref netmem) { /* If page_pool_unref_page() returns 0, we were the last user */ - return page_pool_unref_page(page, 1) == 0; + return page_pool_unref_netmem(netmem, 1) == 0; +} + +static inline void page_pool_put_netmem(struct page_pool *pool, + netmem_ref netmem, + unsigned int dma_sync_size, + bool allow_direct) +{ + /* When page_pool isn't compiled-in, net/core/xdp.c doesn't + * allow registering MEM_TYPE_PAGE_POOL, but shield linker. + */ +#ifdef CONFIG_PAGE_POOL + if (!page_pool_is_last_ref(netmem)) + return; + + page_pool_put_unrefed_netmem(pool, netmem, dma_sync_size, allow_direct); +#endif } /** @@ -308,15 +342,15 @@ static inline void page_pool_put_page(struct page_pool *pool, unsigned int dma_sync_size, bool allow_direct) { - /* When page_pool isn't compiled-in, net/core/xdp.c doesn't - * allow registering MEM_TYPE_PAGE_POOL, but shield linker. - */ -#ifdef CONFIG_PAGE_POOL - if (!page_pool_is_last_ref(page)) - return; + page_pool_put_netmem(pool, page_to_netmem(page), dma_sync_size, + allow_direct); +} - page_pool_put_unrefed_page(pool, page, dma_sync_size, allow_direct); -#endif +static inline void page_pool_put_full_netmem(struct page_pool *pool, + netmem_ref netmem, + bool allow_direct) +{ + page_pool_put_netmem(pool, netmem, -1, allow_direct); } /** @@ -331,7 +365,7 @@ static inline void page_pool_put_page(struct page_pool *pool, static inline void page_pool_put_full_page(struct page_pool *pool, struct page *page, bool allow_direct) { - page_pool_put_page(pool, page, -1, allow_direct); + page_pool_put_netmem(pool, page_to_netmem(page), -1, allow_direct); } /** @@ -365,6 +399,18 @@ static inline void page_pool_free_va(struct page_pool *pool, void *va, page_pool_put_page(pool, virt_to_head_page(va), -1, allow_direct); } +static inline dma_addr_t page_pool_get_dma_addr_netmem(netmem_ref netmem) +{ + struct page *page = netmem_to_page(netmem); + + dma_addr_t ret = page->dma_addr; + + if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA) + ret <<= PAGE_SHIFT; + + return ret; +} + /** * page_pool_get_dma_addr() - Retrieve the stored DMA address. * @page: page allocated from a page pool @@ -374,16 +420,14 @@ static inline void page_pool_free_va(struct page_pool *pool, void *va, */ static inline dma_addr_t page_pool_get_dma_addr(const struct page *page) { - dma_addr_t ret = page->dma_addr; - - if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA) - ret <<= PAGE_SHIFT; - - return ret; + return page_pool_get_dma_addr_netmem(page_to_netmem((struct page *)page)); } -static inline bool page_pool_set_dma_addr(struct page *page, dma_addr_t addr) +static inline bool page_pool_set_dma_addr_netmem(netmem_ref netmem, + dma_addr_t addr) { + struct page *page = netmem_to_page(netmem); + if (PAGE_POOL_32BIT_ARCH_WITH_64BIT_DMA) { page->dma_addr = addr >> PAGE_SHIFT; @@ -419,6 +463,11 @@ static inline void page_pool_dma_sync_for_cpu(const struct page_pool *pool, page_pool_get_dma_dir(pool)); } +static inline bool page_pool_set_dma_addr(struct page *page, dma_addr_t addr) +{ + return page_pool_set_dma_addr_netmem(page_to_netmem(page), addr); +} + static inline bool page_pool_put(struct page_pool *pool) { return refcount_dec_and_test(&pool->user_cnt); diff --git a/include/net/page_pool/types.h b/include/net/page_pool/types.h index 7e8477057f3d..50569fed7868 100644 --- a/include/net/page_pool/types.h +++ b/include/net/page_pool/types.h @@ -6,6 +6,7 @@ #include #include #include +#include #define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA * map/unmap @@ -40,7 +41,7 @@ #define PP_ALLOC_CACHE_REFILL 64 struct pp_alloc_cache { u32 count; - struct page *cache[PP_ALLOC_CACHE_SIZE]; + netmem_ref cache[PP_ALLOC_CACHE_SIZE]; }; /** @@ -73,7 +74,7 @@ struct page_pool_params { struct net_device *netdev; unsigned int flags; /* private: used by test code only */ - void (*init_callback)(struct page *page, void *arg); + void (*init_callback)(netmem_ref netmem, void *arg); void *init_arg; ); }; @@ -128,6 +129,16 @@ struct page_pool_stats { }; #endif +/* The whole frag API block must stay within one cacheline. On 32-bit systems, + * sizeof(long) == sizeof(int), so that the block size is ``3 * sizeof(long)``. + * On 64-bit systems, the actual size is ``2 * sizeof(long) + sizeof(int)``. + * The closest pow-2 to both of them is ``4 * sizeof(long)``, so just use that + * one for simplicity. + * Having it aligned to a cacheline boundary may be excessive and doesn't bring + * any good. + */ +#define PAGE_POOL_FRAG_GROUP_ALIGN (4 * sizeof(long)) + struct page_pool { struct page_pool_params_fast p; @@ -141,19 +152,11 @@ struct page_pool { bool system:1; /* This is a global percpu pool */ #endif - /* The following block must stay within one cacheline. On 32-bit - * systems, sizeof(long) == sizeof(int), so that the block size is - * ``3 * sizeof(long)``. On 64-bit systems, the actual size is - * ``2 * sizeof(long) + sizeof(int)``. The closest pow-2 to both of - * them is ``4 * sizeof(long)``, so just use that one for simplicity. - * Having it aligned to a cacheline boundary may be excessive and - * doesn't bring any good. - */ - __cacheline_group_begin(frag) __aligned(4 * sizeof(long)); + __cacheline_group_begin_aligned(frag, PAGE_POOL_FRAG_GROUP_ALIGN); long frag_users; - struct page *frag_page; + netmem_ref frag_page; unsigned int frag_offset; - __cacheline_group_end(frag); + __cacheline_group_end_aligned(frag, PAGE_POOL_FRAG_GROUP_ALIGN); struct delayed_work release_dw; void (*disconnect)(void *pool); @@ -220,8 +223,12 @@ struct page_pool { }; struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp); +netmem_ref page_pool_alloc_netmem(struct page_pool *pool, gfp_t gfp); struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset, unsigned int size, gfp_t gfp); +netmem_ref page_pool_alloc_frag_netmem(struct page_pool *pool, + unsigned int *offset, unsigned int size, + gfp_t gfp); struct page_pool *page_pool_create(const struct page_pool_params *params); struct page_pool *page_pool_create_percpu(const struct page_pool_params *params, int cpuid); @@ -229,6 +236,7 @@ struct page_pool *page_pool_create_percpu(const struct page_pool_params *params, struct xdp_mem_info; #ifdef CONFIG_PAGE_POOL +void page_pool_disable_direct_recycling(struct page_pool *pool); void page_pool_destroy(struct page_pool *pool); void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *), const struct xdp_mem_info *mem); @@ -251,6 +259,9 @@ static inline void page_pool_put_page_bulk(struct page_pool *pool, void **data, } #endif +void page_pool_put_unrefed_netmem(struct page_pool *pool, netmem_ref netmem, + unsigned int dma_sync_size, + bool allow_direct); void page_pool_put_unrefed_page(struct page_pool *pool, struct page *page, unsigned int dma_sync_size, bool allow_direct); diff --git a/include/net/psample.h b/include/net/psample.h index 0509d2d6be67..5071b5fc2b59 100644 --- a/include/net/psample.h +++ b/include/net/psample.h @@ -24,7 +24,10 @@ struct psample_metadata { u8 out_tc_valid:1, out_tc_occ_valid:1, latency_valid:1, - unused:5; + rate_as_probability:1, + unused:4; + const u8 *user_cookie; + u32 user_cookie_len; }; struct psample_group *psample_group_get(struct net *net, u32 group_num); @@ -35,13 +38,15 @@ struct sk_buff; #if IS_ENABLED(CONFIG_PSAMPLE) -void psample_sample_packet(struct psample_group *group, struct sk_buff *skb, - u32 sample_rate, const struct psample_metadata *md); +void psample_sample_packet(struct psample_group *group, + const struct sk_buff *skb, u32 sample_rate, + const struct psample_metadata *md); #else static inline void psample_sample_packet(struct psample_group *group, - struct sk_buff *skb, u32 sample_rate, + const struct sk_buff *skb, + u32 sample_rate, const struct psample_metadata *md) { } diff --git a/include/net/regulatory.h b/include/net/regulatory.h index ebf9e028d1ef..a103f4c8cf75 100644 --- a/include/net/regulatory.h +++ b/include/net/regulatory.h @@ -71,8 +71,6 @@ enum environment_cap { * CRDA and can be used by other regulatory requests. When a * the last request is not yet processed we must yield until it * is processed before processing any new requests. - * @country_ie_checksum: checksum of the last processed and accepted - * country IE * @country_ie_env: lets us know if the AP is telling us we are outdoor, * indoor, or if it doesn't matter * @list: used to insert into the reg_requests_list linked list diff --git a/include/net/request_sock.h b/include/net/request_sock.h index ebcb8896bffc..b07b1cd14e9f 100644 --- a/include/net/request_sock.h +++ b/include/net/request_sock.h @@ -128,39 +128,6 @@ static inline struct sock *skb_steal_sock(struct sk_buff *skb, return sk; } -static inline struct request_sock * -reqsk_alloc_noprof(const struct request_sock_ops *ops, struct sock *sk_listener, - bool attach_listener) -{ - struct request_sock *req; - - req = kmem_cache_alloc_noprof(ops->slab, GFP_ATOMIC | __GFP_NOWARN); - if (!req) - return NULL; - req->rsk_listener = NULL; - if (attach_listener) { - if (unlikely(!refcount_inc_not_zero(&sk_listener->sk_refcnt))) { - kmem_cache_free(ops->slab, req); - return NULL; - } - req->rsk_listener = sk_listener; - } - req->rsk_ops = ops; - req_to_sk(req)->sk_prot = sk_listener->sk_prot; - sk_node_init(&req_to_sk(req)->sk_node); - sk_tx_queue_clear(req_to_sk(req)); - req->saved_syn = NULL; - req->syncookie = 0; - req->timeout = 0; - req->num_timeout = 0; - req->num_retrans = 0; - req->sk = NULL; - refcount_set(&req->rsk_refcnt, 0); - - return req; -} -#define reqsk_alloc(...) alloc_hooks(reqsk_alloc_noprof(__VA_ARGS__)) - static inline void __reqsk_free(struct request_sock *req) { req->rsk_ops->destructor(req); @@ -172,14 +139,14 @@ static inline void __reqsk_free(struct request_sock *req) static inline void reqsk_free(struct request_sock *req) { - WARN_ON_ONCE(refcount_read(&req->rsk_refcnt) != 0); + DEBUG_NET_WARN_ON_ONCE(refcount_read(&req->rsk_refcnt) != 0); __reqsk_free(req); } static inline void reqsk_put(struct request_sock *req) { if (refcount_dec_and_test(&req->rsk_refcnt)) - reqsk_free(req); + __reqsk_free(req); } /* diff --git a/include/net/sctp/stream_sched.h b/include/net/sctp/stream_sched.h index 572d73fdcd5e..8034bf5febbe 100644 --- a/include/net/sctp/stream_sched.h +++ b/include/net/sctp/stream_sched.h @@ -35,10 +35,10 @@ struct sctp_sched_ops { struct sctp_chunk *(*dequeue)(struct sctp_outq *q); /* Called only if the chunk fit the packet */ void (*dequeue_done)(struct sctp_outq *q, struct sctp_chunk *chunk); - /* Sched all chunks already enqueued */ - void (*sched_all)(struct sctp_stream *steam); - /* Unched all chunks already enqueued */ - void (*unsched_all)(struct sctp_stream *steam); + /* Schedule all chunks already enqueued */ + void (*sched_all)(struct sctp_stream *stream); + /* Unschedule all chunks already enqueued */ + void (*unsched_all)(struct sctp_stream *stream); }; int sctp_sched_set_sched(struct sctp_association *asoc, diff --git a/include/net/seg6.h b/include/net/seg6.h index af668f17b398..82b3fbbcbb93 100644 --- a/include/net/seg6.h +++ b/include/net/seg6.h @@ -52,10 +52,17 @@ static inline struct seg6_pernet_data *seg6_pernet(struct net *net) extern int seg6_init(void); extern void seg6_exit(void); +#ifdef CONFIG_IPV6_SEG6_LWTUNNEL extern int seg6_iptunnel_init(void); extern void seg6_iptunnel_exit(void); extern int seg6_local_init(void); extern void seg6_local_exit(void); +#else +static inline int seg6_iptunnel_init(void) { return 0; } +static inline void seg6_iptunnel_exit(void) {} +static inline int seg6_local_init(void) { return 0; } +static inline void seg6_local_exit(void) {} +#endif extern bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len, bool reduced); extern struct ipv6_sr_hdr *seg6_get_srh(struct sk_buff *skb, int flags); diff --git a/include/net/seg6_hmac.h b/include/net/seg6_hmac.h index 2b5d2ee5613e..24f733b3e3fe 100644 --- a/include/net/seg6_hmac.h +++ b/include/net/seg6_hmac.h @@ -49,9 +49,16 @@ extern int seg6_hmac_info_del(struct net *net, u32 key); extern int seg6_push_hmac(struct net *net, struct in6_addr *saddr, struct ipv6_sr_hdr *srh); extern bool seg6_hmac_validate_skb(struct sk_buff *skb); +#ifdef CONFIG_IPV6_SEG6_HMAC extern int seg6_hmac_init(void); extern void seg6_hmac_exit(void); extern int seg6_hmac_net_init(struct net *net); extern void seg6_hmac_net_exit(struct net *net); +#else +static inline int seg6_hmac_init(void) { return 0; } +static inline void seg6_hmac_exit(void) {} +static inline int seg6_hmac_net_init(struct net *net) { return 0; } +static inline void seg6_hmac_net_exit(struct net *net) {} +#endif #endif diff --git a/include/net/seg6_local.h b/include/net/seg6_local.h index 3fab9dec2ec4..888c1ce6f527 100644 --- a/include/net/seg6_local.h +++ b/include/net/seg6_local.h @@ -19,6 +19,7 @@ extern int seg6_lookup_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr, extern bool seg6_bpf_has_valid_srh(struct sk_buff *skb); struct seg6_bpf_srh_state { + local_lock_t bh_lock; struct ipv6_sr_hdr *srh; u16 hdrlen; bool valid; diff --git a/include/net/sock.h b/include/net/sock.h index 953c8dc4e259..cce23ac4d514 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -544,6 +544,11 @@ struct sock { netns_tracker ns_tracker; }; +struct sock_bh_locked { + struct sock *sock; + local_lock_t bh_lock; +}; + enum sk_pacing { SK_PACING_NONE = 0, SK_PACING_NEEDED = 1, @@ -2095,7 +2100,7 @@ sk_dst_set(struct sock *sk, struct dst_entry *dst) sk_tx_queue_clear(sk); WRITE_ONCE(sk->sk_dst_pending_confirm, 0); - old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst); + old_dst = unrcu_pointer(xchg(&sk->sk_dst_cache, RCU_INITIALIZER(dst))); dst_release(old_dst); } diff --git a/include/net/tcp.h b/include/net/tcp.h index 060e95b331a2..2aac11e7e1cc 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -677,6 +677,7 @@ void tcp_skb_collapse_tstamp(struct sk_buff *skb, /* tcp_input.c */ void tcp_rearm_rto(struct sock *sk); void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req); +void tcp_done_with_error(struct sock *sk, int err); void tcp_reset(struct sock *sk, struct sk_buff *skb); void tcp_fin(struct sock *sk); void tcp_check_space(struct sock *sk); @@ -1065,11 +1066,19 @@ static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb) static inline bool tcp_skb_can_collapse(const struct sk_buff *to, const struct sk_buff *from) { + /* skb_cmp_decrypted() not needed, use tcp_write_collapse_fence() */ return likely(tcp_skb_can_collapse_to(to) && mptcp_skb_can_collapse(to, from) && skb_pure_zcopy_same(to, from)); } +static inline bool tcp_skb_can_collapse_rx(const struct sk_buff *to, + const struct sk_buff *from) +{ + return likely(mptcp_skb_can_collapse(to, from) && + !skb_cmp_decrypted(to, from)); +} + /* Events passed to congestion control interface */ enum tcp_ca_event { CA_EVENT_TX_START, /* first transmit when no packets in flight */ @@ -1215,7 +1224,7 @@ extern struct tcp_congestion_ops tcp_reno; struct tcp_congestion_ops *tcp_ca_find(const char *name); struct tcp_congestion_ops *tcp_ca_find_key(u32 key); -u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca); +u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca); #ifdef CONFIG_INET char *tcp_ca_get_name_by_key(u32 key, char *buffer); #else @@ -1854,12 +1863,6 @@ tcp_md5_do_lookup_any_l3index(const struct sock *sk, return __tcp_md5_do_lookup(sk, 0, addr, family, true); } -enum skb_drop_reason -tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb, - const void *saddr, const void *daddr, - int family, int l3index, const __u8 *hash_location); - - #define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key) #else static inline struct tcp_md5sig_key * @@ -1876,13 +1879,6 @@ tcp_md5_do_lookup_any_l3index(const struct sock *sk, return NULL; } -static inline enum skb_drop_reason -tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb, - const void *saddr, const void *daddr, - int family, int l3index, const __u8 *hash_location) -{ - return SKB_NOT_DROPPED_YET; -} #define tcp_twsk_md5_key(twsk) NULL #endif @@ -2094,6 +2090,14 @@ static inline void tcp_rtx_queue_unlink_and_free(struct sk_buff *skb, struct soc tcp_wmem_free_skb(sk, skb); } +static inline void tcp_write_collapse_fence(struct sock *sk) +{ + struct sk_buff *skb = tcp_write_queue_tail(sk); + + if (skb) + TCP_SKB_CB(skb)->eor = 1; +} + static inline void tcp_push_pending_frames(struct sock *sk) { if (tcp_send_head(sk)) { @@ -2369,21 +2373,15 @@ static inline void tcp_get_current_key(const struct sock *sk, static inline bool tcp_key_is_md5(const struct tcp_key *key) { -#ifdef CONFIG_TCP_MD5SIG - if (static_branch_unlikely(&tcp_md5_needed.key) && - key->type == TCP_KEY_MD5) - return true; -#endif + if (static_branch_tcp_md5()) + return key->type == TCP_KEY_MD5; return false; } static inline bool tcp_key_is_ao(const struct tcp_key *key) { -#ifdef CONFIG_TCP_AO - if (static_branch_unlikely(&tcp_ao_needed.key) && - key->type == TCP_KEY_AO) - return true; -#endif + if (static_branch_tcp_ao()) + return key->type == TCP_KEY_AO; return false; } @@ -2795,66 +2793,9 @@ static inline bool tcp_ao_required(struct sock *sk, const void *saddr, return false; } -/* Called with rcu_read_lock() */ -static inline enum skb_drop_reason -tcp_inbound_hash(struct sock *sk, const struct request_sock *req, - const struct sk_buff *skb, - const void *saddr, const void *daddr, - int family, int dif, int sdif) -{ - const struct tcphdr *th = tcp_hdr(skb); - const struct tcp_ao_hdr *aoh; - const __u8 *md5_location; - int l3index; - - /* Invalid option or two times meet any of auth options */ - if (tcp_parse_auth_options(th, &md5_location, &aoh)) { - tcp_hash_fail("TCP segment has incorrect auth options set", - family, skb, ""); - return SKB_DROP_REASON_TCP_AUTH_HDR; - } - - if (req) { - if (tcp_rsk_used_ao(req) != !!aoh) { - NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOBAD); - tcp_hash_fail("TCP connection can't start/end using TCP-AO", - family, skb, "%s", - !aoh ? "missing AO" : "AO signed"); - return SKB_DROP_REASON_TCP_AOFAILURE; - } - } - - /* sdif set, means packet ingressed via a device - * in an L3 domain and dif is set to the l3mdev - */ - l3index = sdif ? dif : 0; - - /* Fast path: unsigned segments */ - if (likely(!md5_location && !aoh)) { - /* Drop if there's TCP-MD5 or TCP-AO key with any rcvid/sndid - * for the remote peer. On TCP-AO established connection - * the last key is impossible to remove, so there's - * always at least one current_key. - */ - if (tcp_ao_required(sk, saddr, family, l3index, true)) { - tcp_hash_fail("AO hash is required, but not found", - family, skb, "L3 index %d", l3index); - return SKB_DROP_REASON_TCP_AONOTFOUND; - } - if (unlikely(tcp_md5_do_lookup(sk, l3index, saddr, family))) { - NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); - tcp_hash_fail("MD5 Hash not found", - family, skb, "L3 index %d", l3index); - return SKB_DROP_REASON_TCP_MD5NOTFOUND; - } - return SKB_NOT_DROPPED_YET; - } - - if (aoh) - return tcp_inbound_ao_hash(sk, skb, family, req, l3index, aoh); - - return tcp_inbound_md5_hash(sk, skb, saddr, daddr, family, - l3index, md5_location); -} +enum skb_drop_reason tcp_inbound_hash(struct sock *sk, + const struct request_sock *req, const struct sk_buff *skb, + const void *saddr, const void *daddr, + int family, int dif, int sdif); #endif /* _TCP_H */ diff --git a/include/net/tcp_ao.h b/include/net/tcp_ao.h index 5d8e9ed2c005..1d46460d0fef 100644 --- a/include/net/tcp_ao.h +++ b/include/net/tcp_ao.h @@ -19,6 +19,11 @@ struct tcp_ao_hdr { u8 rnext_keyid; }; +static inline u8 tcp_ao_hdr_maclen(const struct tcp_ao_hdr *aoh) +{ + return aoh->length - sizeof(struct tcp_ao_hdr); +} + struct tcp_ao_counters { atomic64_t pkt_good; atomic64_t pkt_bad; @@ -144,43 +149,6 @@ extern struct static_key_false_deferred tcp_ao_needed; #define static_branch_tcp_ao() false #endif -static inline bool tcp_hash_should_produce_warnings(void) -{ - return static_branch_tcp_md5() || static_branch_tcp_ao(); -} - -#define tcp_hash_fail(msg, family, skb, fmt, ...) \ -do { \ - const struct tcphdr *th = tcp_hdr(skb); \ - char hdr_flags[6]; \ - char *f = hdr_flags; \ - \ - if (!tcp_hash_should_produce_warnings()) \ - break; \ - if (th->fin) \ - *f++ = 'F'; \ - if (th->syn) \ - *f++ = 'S'; \ - if (th->rst) \ - *f++ = 'R'; \ - if (th->psh) \ - *f++ = 'P'; \ - if (th->ack) \ - *f++ = '.'; \ - *f = 0; \ - if ((family) == AF_INET) { \ - net_info_ratelimited("%s for %pI4.%d->%pI4.%d [%s] " fmt "\n", \ - msg, &ip_hdr(skb)->saddr, ntohs(th->source), \ - &ip_hdr(skb)->daddr, ntohs(th->dest), \ - hdr_flags, ##__VA_ARGS__); \ - } else { \ - net_info_ratelimited("%s for [%pI6c].%d->[%pI6c].%d [%s]" fmt "\n", \ - msg, &ipv6_hdr(skb)->saddr, ntohs(th->source), \ - &ipv6_hdr(skb)->daddr, ntohs(th->dest), \ - hdr_flags, ##__VA_ARGS__); \ - } \ -} while (0) - #ifdef CONFIG_TCP_AO /* TCP-AO structures and functions */ struct tcp4_ao_context { diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h index 3d54de168a6d..bfe625b55d55 100644 --- a/include/net/xdp_sock.h +++ b/include/net/xdp_sock.h @@ -121,7 +121,7 @@ struct xsk_tx_metadata_ops { int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp); int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp); -void __xsk_map_flush(void); +void __xsk_map_flush(struct list_head *flush_list); /** * xsk_tx_metadata_to_compl - Save enough relevant metadata information @@ -206,7 +206,7 @@ static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp) return -EOPNOTSUPP; } -static inline void __xsk_map_flush(void) +static inline void __xsk_map_flush(struct list_head *flush_list) { } @@ -228,14 +228,4 @@ static inline void xsk_tx_metadata_complete(struct xsk_tx_metadata_compl *compl, } #endif /* CONFIG_XDP_SOCKETS */ - -#if defined(CONFIG_XDP_SOCKETS) && defined(CONFIG_DEBUG_NET) -bool xsk_map_check_flush(void); -#else -static inline bool xsk_map_check_flush(void) -{ - return false; -} -#endif - #endif /* _LINUX_XDP_SOCK_H */ diff --git a/include/net/xfrm.h b/include/net/xfrm.h index 77ebf5bcf0b9..54cef89f6c1e 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h @@ -178,7 +178,10 @@ struct xfrm_state { struct hlist_node gclist; struct hlist_node bydst; }; - struct hlist_node bysrc; + union { + struct hlist_node dev_gclist; + struct hlist_node bysrc; + }; struct hlist_node byspi; struct hlist_node byseq; @@ -229,6 +232,10 @@ struct xfrm_state { struct xfrm_encap_tmpl *encap; struct sock __rcu *encap_sk; + /* NAT keepalive */ + u32 nat_keepalive_interval; /* seconds */ + time64_t nat_keepalive_expiration; + /* Data for care-of address */ xfrm_address_t *coaddr; @@ -1588,7 +1595,7 @@ void xfrm_state_update_stats(struct net *net); static inline void xfrm_dev_state_update_stats(struct xfrm_state *x) { struct xfrm_dev_offload *xdo = &x->xso; - struct net_device *dev = xdo->dev; + struct net_device *dev = READ_ONCE(xdo->dev); if (dev && dev->xfrmdev_ops && dev->xfrmdev_ops->xdo_dev_state_update_stats) @@ -1946,13 +1953,16 @@ int xfrm_dev_policy_add(struct net *net, struct xfrm_policy *xp, struct xfrm_user_offload *xuo, u8 dir, struct netlink_ext_ack *extack); bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x); +void xfrm_dev_state_delete(struct xfrm_state *x); +void xfrm_dev_state_free(struct xfrm_state *x); static inline void xfrm_dev_state_advance_esn(struct xfrm_state *x) { struct xfrm_dev_offload *xso = &x->xso; + struct net_device *dev = READ_ONCE(xso->dev); - if (xso->dev && xso->dev->xfrmdev_ops->xdo_dev_state_advance_esn) - xso->dev->xfrmdev_ops->xdo_dev_state_advance_esn(x); + if (dev && dev->xfrmdev_ops->xdo_dev_state_advance_esn) + dev->xfrmdev_ops->xdo_dev_state_advance_esn(x); } static inline bool xfrm_dst_offload_ok(struct dst_entry *dst) @@ -1973,28 +1983,6 @@ static inline bool xfrm_dst_offload_ok(struct dst_entry *dst) return false; } -static inline void xfrm_dev_state_delete(struct xfrm_state *x) -{ - struct xfrm_dev_offload *xso = &x->xso; - - if (xso->dev) - xso->dev->xfrmdev_ops->xdo_dev_state_delete(x); -} - -static inline void xfrm_dev_state_free(struct xfrm_state *x) -{ - struct xfrm_dev_offload *xso = &x->xso; - struct net_device *dev = xso->dev; - - if (dev && dev->xfrmdev_ops) { - if (dev->xfrmdev_ops->xdo_dev_state_free) - dev->xfrmdev_ops->xdo_dev_state_free(x); - xso->dev = NULL; - xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED; - netdev_put(dev, &xso->dev_tracker); - } -} - static inline void xfrm_dev_policy_delete(struct xfrm_policy *x) { struct xfrm_dev_offload *xdo = &x->xdo; @@ -2203,4 +2191,10 @@ static inline int register_xfrm_state_bpf(void) } #endif +int xfrm_nat_keepalive_init(unsigned short family); +void xfrm_nat_keepalive_fini(unsigned short family); +int xfrm_nat_keepalive_net_init(struct net *net); +int xfrm_nat_keepalive_net_fini(struct net *net); +void xfrm_nat_keepalive_state_updated(struct xfrm_state *x); + #endif /* _NET_XFRM_H */ diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index 1e1b40f4e664..6a37b29f4b4c 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -1016,7 +1016,7 @@ void ocelot_port_get_eth_mac_stats(struct ocelot *ocelot, int port, void ocelot_port_get_eth_phy_stats(struct ocelot *ocelot, int port, struct ethtool_eth_phy_stats *phy_stats); int ocelot_get_ts_info(struct ocelot *ocelot, int port, - struct ethtool_ts_info *info); + struct kernel_ethtool_ts_info *info); void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs); int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, bool enabled, struct netlink_ext_ack *extack); diff --git a/include/trace/events/page_pool.h b/include/trace/events/page_pool.h index 6834356b2d2a..543e54e432a1 100644 --- a/include/trace/events/page_pool.h +++ b/include/trace/events/page_pool.h @@ -42,51 +42,53 @@ TRACE_EVENT(page_pool_release, TRACE_EVENT(page_pool_state_release, TP_PROTO(const struct page_pool *pool, - const struct page *page, u32 release), + netmem_ref netmem, u32 release), - TP_ARGS(pool, page, release), + TP_ARGS(pool, netmem, release), TP_STRUCT__entry( __field(const struct page_pool *, pool) - __field(const struct page *, page) + __field(unsigned long, netmem) __field(u32, release) __field(unsigned long, pfn) ), TP_fast_assign( __entry->pool = pool; - __entry->page = page; + __entry->netmem = (__force unsigned long)netmem; __entry->release = release; - __entry->pfn = page_to_pfn(page); + __entry->pfn = netmem_to_pfn(netmem); ), - TP_printk("page_pool=%p page=%p pfn=0x%lx release=%u", - __entry->pool, __entry->page, __entry->pfn, __entry->release) + TP_printk("page_pool=%p netmem=%p pfn=0x%lx release=%u", + __entry->pool, (void *)__entry->netmem, + __entry->pfn, __entry->release) ); TRACE_EVENT(page_pool_state_hold, TP_PROTO(const struct page_pool *pool, - const struct page *page, u32 hold), + netmem_ref netmem, u32 hold), - TP_ARGS(pool, page, hold), + TP_ARGS(pool, netmem, hold), TP_STRUCT__entry( __field(const struct page_pool *, pool) - __field(const struct page *, page) + __field(unsigned long, netmem) __field(u32, hold) __field(unsigned long, pfn) ), TP_fast_assign( __entry->pool = pool; - __entry->page = page; + __entry->netmem = (__force unsigned long)netmem; __entry->hold = hold; - __entry->pfn = page_to_pfn(page); + __entry->pfn = netmem_to_pfn(netmem); ), - TP_printk("page_pool=%p page=%p pfn=0x%lx hold=%u", - __entry->pool, __entry->page, __entry->pfn, __entry->hold) + TP_printk("page_pool=%p netmem=%p pfn=0x%lx hold=%u", + __entry->pool, (void *)__entry->netmem, + __entry->pfn, __entry->hold) ); TRACE_EVENT(page_pool_update_nid, diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h index 07e0715628ec..b877133cd93a 100644 --- a/include/trace/events/skb.h +++ b/include/trace/events/skb.h @@ -24,13 +24,14 @@ DEFINE_DROP_REASON(FN, FN) TRACE_EVENT(kfree_skb, TP_PROTO(struct sk_buff *skb, void *location, - enum skb_drop_reason reason), + enum skb_drop_reason reason, struct sock *rx_sk), - TP_ARGS(skb, location, reason), + TP_ARGS(skb, location, reason, rx_sk), TP_STRUCT__entry( __field(void *, skbaddr) __field(void *, location) + __field(void *, rx_sk) __field(unsigned short, protocol) __field(enum skb_drop_reason, reason) ), @@ -38,12 +39,14 @@ TRACE_EVENT(kfree_skb, TP_fast_assign( __entry->skbaddr = skb; __entry->location = location; + __entry->rx_sk = rx_sk; __entry->protocol = ntohs(skb->protocol); __entry->reason = reason; ), - TP_printk("skbaddr=%p protocol=%u location=%pS reason: %s", - __entry->skbaddr, __entry->protocol, __entry->location, + TP_printk("skbaddr=%p rx_sk=%p protocol=%u location=%pS reason: %s", + __entry->skbaddr, __entry->rx_sk, __entry->protocol, + __entry->location, __print_symbolic(__entry->reason, DEFINE_DROP_REASON(FN, FNe))) ); diff --git a/include/trace/events/tcp.h b/include/trace/events/tcp.h index 49b5ee091cf6..1c8bd8e186b8 100644 --- a/include/trace/events/tcp.h +++ b/include/trace/events/tcp.h @@ -411,6 +411,323 @@ TRACE_EVENT(tcp_cong_state_set, __entry->cong_state) ); +DECLARE_EVENT_CLASS(tcp_hash_event, + + TP_PROTO(const struct sock *sk, const struct sk_buff *skb), + + TP_ARGS(sk, skb), + + TP_STRUCT__entry( + __field(__u64, net_cookie) + __field(const void *, skbaddr) + __field(const void *, skaddr) + __field(int, state) + + /* sockaddr_in6 is always bigger than sockaddr_in */ + __array(__u8, saddr, sizeof(struct sockaddr_in6)) + __array(__u8, daddr, sizeof(struct sockaddr_in6)) + __field(int, l3index) + + __field(__u16, sport) + __field(__u16, dport) + __field(__u16, family) + + __field(bool, fin) + __field(bool, syn) + __field(bool, rst) + __field(bool, psh) + __field(bool, ack) + ), + + TP_fast_assign( + const struct tcphdr *th = (const struct tcphdr *)skb->data; + + __entry->net_cookie = sock_net(sk)->net_cookie; + __entry->skbaddr = skb; + __entry->skaddr = sk; + __entry->state = sk->sk_state; + + memset(__entry->saddr, 0, sizeof(struct sockaddr_in6)); + memset(__entry->daddr, 0, sizeof(struct sockaddr_in6)); + TP_STORE_ADDR_PORTS_SKB(skb, th, __entry->saddr, __entry->daddr); + __entry->l3index = inet_sdif(skb) ? inet_iif(skb) : 0; + + /* For filtering use */ + __entry->sport = ntohs(th->source); + __entry->dport = ntohs(th->dest); + __entry->family = sk->sk_family; + + __entry->fin = th->fin; + __entry->syn = th->syn; + __entry->rst = th->rst; + __entry->psh = th->psh; + __entry->ack = th->ack; + ), + + TP_printk("net=%llu state=%s family=%s src=%pISpc dest=%pISpc L3index=%d [%c%c%c%c%c]", + __entry->net_cookie, + show_tcp_state_name(__entry->state), + show_family_name(__entry->family), + __entry->saddr, __entry->daddr, + __entry->l3index, + __entry->fin ? 'F' : ' ', + __entry->syn ? 'S' : ' ', + __entry->rst ? 'R' : ' ', + __entry->psh ? 'P' : ' ', + __entry->ack ? '.' : ' ') +); + +DEFINE_EVENT(tcp_hash_event, tcp_hash_bad_header, + + TP_PROTO(const struct sock *sk, const struct sk_buff *skb), + TP_ARGS(sk, skb) +); + +DEFINE_EVENT(tcp_hash_event, tcp_hash_md5_required, + + TP_PROTO(const struct sock *sk, const struct sk_buff *skb), + TP_ARGS(sk, skb) +); + +DEFINE_EVENT(tcp_hash_event, tcp_hash_md5_unexpected, + + TP_PROTO(const struct sock *sk, const struct sk_buff *skb), + TP_ARGS(sk, skb) +); + +DEFINE_EVENT(tcp_hash_event, tcp_hash_md5_mismatch, + + TP_PROTO(const struct sock *sk, const struct sk_buff *skb), + TP_ARGS(sk, skb) +); + +DEFINE_EVENT(tcp_hash_event, tcp_hash_ao_required, + + TP_PROTO(const struct sock *sk, const struct sk_buff *skb), + TP_ARGS(sk, skb) +); + +DECLARE_EVENT_CLASS(tcp_ao_event, + + TP_PROTO(const struct sock *sk, const struct sk_buff *skb, + const __u8 keyid, const __u8 rnext, const __u8 maclen), + + TP_ARGS(sk, skb, keyid, rnext, maclen), + + TP_STRUCT__entry( + __field(__u64, net_cookie) + __field(const void *, skbaddr) + __field(const void *, skaddr) + __field(int, state) + + /* sockaddr_in6 is always bigger than sockaddr_in */ + __array(__u8, saddr, sizeof(struct sockaddr_in6)) + __array(__u8, daddr, sizeof(struct sockaddr_in6)) + __field(int, l3index) + + __field(__u16, sport) + __field(__u16, dport) + __field(__u16, family) + + __field(bool, fin) + __field(bool, syn) + __field(bool, rst) + __field(bool, psh) + __field(bool, ack) + + __field(__u8, keyid) + __field(__u8, rnext) + __field(__u8, maclen) + ), + + TP_fast_assign( + const struct tcphdr *th = (const struct tcphdr *)skb->data; + + __entry->net_cookie = sock_net(sk)->net_cookie; + __entry->skbaddr = skb; + __entry->skaddr = sk; + __entry->state = sk->sk_state; + + memset(__entry->saddr, 0, sizeof(struct sockaddr_in6)); + memset(__entry->daddr, 0, sizeof(struct sockaddr_in6)); + TP_STORE_ADDR_PORTS_SKB(skb, th, __entry->saddr, __entry->daddr); + __entry->l3index = inet_sdif(skb) ? inet_iif(skb) : 0; + + /* For filtering use */ + __entry->sport = ntohs(th->source); + __entry->dport = ntohs(th->dest); + __entry->family = sk->sk_family; + + __entry->fin = th->fin; + __entry->syn = th->syn; + __entry->rst = th->rst; + __entry->psh = th->psh; + __entry->ack = th->ack; + + __entry->keyid = keyid; + __entry->rnext = rnext; + __entry->maclen = maclen; + ), + + TP_printk("net=%llu state=%s family=%s src=%pISpc dest=%pISpc L3index=%d [%c%c%c%c%c] keyid=%u rnext=%u maclen=%u", + __entry->net_cookie, + show_tcp_state_name(__entry->state), + show_family_name(__entry->family), + __entry->saddr, __entry->daddr, + __entry->l3index, + __entry->fin ? 'F' : ' ', + __entry->syn ? 'S' : ' ', + __entry->rst ? 'R' : ' ', + __entry->psh ? 'P' : ' ', + __entry->ack ? '.' : ' ', + __entry->keyid, __entry->rnext, __entry->maclen) +); + +DEFINE_EVENT(tcp_ao_event, tcp_ao_handshake_failure, + TP_PROTO(const struct sock *sk, const struct sk_buff *skb, + const __u8 keyid, const __u8 rnext, const __u8 maclen), + TP_ARGS(sk, skb, keyid, rnext, maclen) +); + +DEFINE_EVENT(tcp_ao_event, tcp_ao_wrong_maclen, + TP_PROTO(const struct sock *sk, const struct sk_buff *skb, + const __u8 keyid, const __u8 rnext, const __u8 maclen), + TP_ARGS(sk, skb, keyid, rnext, maclen) +); + +DEFINE_EVENT(tcp_ao_event, tcp_ao_mismatch, + TP_PROTO(const struct sock *sk, const struct sk_buff *skb, + const __u8 keyid, const __u8 rnext, const __u8 maclen), + TP_ARGS(sk, skb, keyid, rnext, maclen) +); + +DEFINE_EVENT(tcp_ao_event, tcp_ao_key_not_found, + TP_PROTO(const struct sock *sk, const struct sk_buff *skb, + const __u8 keyid, const __u8 rnext, const __u8 maclen), + TP_ARGS(sk, skb, keyid, rnext, maclen) +); + +DEFINE_EVENT(tcp_ao_event, tcp_ao_rnext_request, + TP_PROTO(const struct sock *sk, const struct sk_buff *skb, + const __u8 keyid, const __u8 rnext, const __u8 maclen), + TP_ARGS(sk, skb, keyid, rnext, maclen) +); + +DECLARE_EVENT_CLASS(tcp_ao_event_sk, + + TP_PROTO(const struct sock *sk, const __u8 keyid, const __u8 rnext), + + TP_ARGS(sk, keyid, rnext), + + TP_STRUCT__entry( + __field(__u64, net_cookie) + __field(const void *, skaddr) + __field(int, state) + + /* sockaddr_in6 is always bigger than sockaddr_in */ + __array(__u8, saddr, sizeof(struct sockaddr_in6)) + __array(__u8, daddr, sizeof(struct sockaddr_in6)) + + __field(__u16, sport) + __field(__u16, dport) + __field(__u16, family) + + __field(__u8, keyid) + __field(__u8, rnext) + ), + + TP_fast_assign( + const struct inet_sock *inet = inet_sk(sk); + + __entry->net_cookie = sock_net(sk)->net_cookie; + __entry->skaddr = sk; + __entry->state = sk->sk_state; + + memset(__entry->saddr, 0, sizeof(struct sockaddr_in6)); + memset(__entry->daddr, 0, sizeof(struct sockaddr_in6)); + TP_STORE_ADDR_PORTS(__entry, inet, sk); + + /* For filtering use */ + __entry->sport = ntohs(inet->inet_sport); + __entry->dport = ntohs(inet->inet_dport); + __entry->family = sk->sk_family; + + __entry->keyid = keyid; + __entry->rnext = rnext; + ), + + TP_printk("net=%llu state=%s family=%s src=%pISpc dest=%pISpc keyid=%u rnext=%u", + __entry->net_cookie, + show_tcp_state_name(__entry->state), + show_family_name(__entry->family), + __entry->saddr, __entry->daddr, + __entry->keyid, __entry->rnext) +); + +DEFINE_EVENT(tcp_ao_event_sk, tcp_ao_synack_no_key, + TP_PROTO(const struct sock *sk, const __u8 keyid, const __u8 rnext), + TP_ARGS(sk, keyid, rnext) +); + +DECLARE_EVENT_CLASS(tcp_ao_event_sne, + + TP_PROTO(const struct sock *sk, __u32 new_sne), + + TP_ARGS(sk, new_sne), + + TP_STRUCT__entry( + __field(__u64, net_cookie) + __field(const void *, skaddr) + __field(int, state) + + /* sockaddr_in6 is always bigger than sockaddr_in */ + __array(__u8, saddr, sizeof(struct sockaddr_in6)) + __array(__u8, daddr, sizeof(struct sockaddr_in6)) + + __field(__u16, sport) + __field(__u16, dport) + __field(__u16, family) + + __field(__u32, new_sne) + ), + + TP_fast_assign( + const struct inet_sock *inet = inet_sk(sk); + + __entry->net_cookie = sock_net(sk)->net_cookie; + __entry->skaddr = sk; + __entry->state = sk->sk_state; + + memset(__entry->saddr, 0, sizeof(struct sockaddr_in6)); + memset(__entry->daddr, 0, sizeof(struct sockaddr_in6)); + TP_STORE_ADDR_PORTS(__entry, inet, sk); + + /* For filtering use */ + __entry->sport = ntohs(inet->inet_sport); + __entry->dport = ntohs(inet->inet_dport); + __entry->family = sk->sk_family; + + __entry->new_sne = new_sne; + ), + + TP_printk("net=%llu state=%s family=%s src=%pISpc dest=%pISpc sne=%u", + __entry->net_cookie, + show_tcp_state_name(__entry->state), + show_family_name(__entry->family), + __entry->saddr, __entry->daddr, + __entry->new_sne) +); + +DEFINE_EVENT(tcp_ao_event_sne, tcp_ao_snd_sne_update, + TP_PROTO(const struct sock *sk, __u32 new_sne), + TP_ARGS(sk, new_sne) +); + +DEFINE_EVENT(tcp_ao_event_sne, tcp_ao_rcv_sne_update, + TP_PROTO(const struct sock *sk, __u32 new_sne), + TP_ARGS(sk, new_sne) +); + #endif /* _TRACE_TCP_H */ /* This part must be outside protection */ diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index 90706a47f6ff..35bcf52dbc65 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -1425,6 +1425,8 @@ enum { #define BPF_F_TEST_RUN_ON_CPU (1U << 0) /* If set, XDP frames will be transmitted after processing */ #define BPF_F_TEST_XDP_LIVE_FRAMES (1U << 1) +/* If set, apply CHECKSUM_COMPLETE to skb and validate the checksum */ +#define BPF_F_TEST_SKB_CHECKSUM_COMPLETE (1U << 2) /* type for BPF_ENABLE_STATS */ enum bpf_stats_type { @@ -6207,12 +6209,17 @@ union { \ __u64 :64; \ } __attribute__((aligned(8))) +/* The enum used in skb->tstamp_type. It specifies the clock type + * of the time stored in the skb->tstamp. + */ enum { - BPF_SKB_TSTAMP_UNSPEC, - BPF_SKB_TSTAMP_DELIVERY_MONO, /* tstamp has mono delivery time */ - /* For any BPF_SKB_TSTAMP_* that the bpf prog cannot handle, - * the bpf prog should handle it like BPF_SKB_TSTAMP_UNSPEC - * and try to deduce it by ingress, egress or skb->sk->sk_clockid. + BPF_SKB_TSTAMP_UNSPEC = 0, /* DEPRECATED */ + BPF_SKB_TSTAMP_DELIVERY_MONO = 1, /* DEPRECATED */ + BPF_SKB_CLOCK_REALTIME = 0, + BPF_SKB_CLOCK_MONOTONIC = 1, + BPF_SKB_CLOCK_TAI = 2, + /* For any future BPF_SKB_CLOCK_* that the bpf prog cannot handle, + * the bpf prog can try to deduce it by ingress/egress/skb->sk->sk_clockid. */ }; diff --git a/include/uapi/linux/can/isotp.h b/include/uapi/linux/can/isotp.h index 6cde62371b6f..bd990917f7c4 100644 --- a/include/uapi/linux/can/isotp.h +++ b/include/uapi/linux/can/isotp.h @@ -2,7 +2,7 @@ /* * linux/can/isotp.h * - * Definitions for isotp CAN sockets (ISO 15765-2:2016) + * Definitions for ISO 15765-2 CAN transport protocol sockets * * Copyright (c) 2020 Volkswagen Group Electronic Research * All rights reserved. diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h index 8733a3117902..4a0a6e703483 100644 --- a/include/uapi/linux/ethtool.h +++ b/include/uapi/linux/ethtool.h @@ -752,6 +752,197 @@ enum ethtool_module_power_mode { ETHTOOL_MODULE_POWER_MODE_HIGH, }; +/** + * enum ethtool_c33_pse_ext_state - groups of PSE extended states + * functions. IEEE 802.3-2022 33.2.4.4 Variables + * + * @ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION: Group of error_condition states + * @ETHTOOL_C33_PSE_EXT_STATE_MR_MPS_VALID: Group of mr_mps_valid states + * @ETHTOOL_C33_PSE_EXT_STATE_MR_PSE_ENABLE: Group of mr_pse_enable states + * @ETHTOOL_C33_PSE_EXT_STATE_OPTION_DETECT_TED: Group of option_detect_ted + * states + * @ETHTOOL_C33_PSE_EXT_STATE_OPTION_VPORT_LIM: Group of option_vport_lim states + * @ETHTOOL_C33_PSE_EXT_STATE_OVLD_DETECTED: Group of ovld_detected states + * @ETHTOOL_C33_PSE_EXT_STATE_PD_DLL_POWER_TYPE: Group of pd_dll_power_type + * states + * @ETHTOOL_C33_PSE_EXT_STATE_POWER_NOT_AVAILABLE: Group of power_not_available + * states + * @ETHTOOL_C33_PSE_EXT_STATE_SHORT_DETECTED: Group of short_detected states + */ +enum ethtool_c33_pse_ext_state { + ETHTOOL_C33_PSE_EXT_STATE_ERROR_CONDITION = 1, + ETHTOOL_C33_PSE_EXT_STATE_MR_MPS_VALID, + ETHTOOL_C33_PSE_EXT_STATE_MR_PSE_ENABLE, + ETHTOOL_C33_PSE_EXT_STATE_OPTION_DETECT_TED, + ETHTOOL_C33_PSE_EXT_STATE_OPTION_VPORT_LIM, + ETHTOOL_C33_PSE_EXT_STATE_OVLD_DETECTED, + ETHTOOL_C33_PSE_EXT_STATE_PD_DLL_POWER_TYPE, + ETHTOOL_C33_PSE_EXT_STATE_POWER_NOT_AVAILABLE, + ETHTOOL_C33_PSE_EXT_STATE_SHORT_DETECTED, +}; + +/** + * enum ethtool_c33_pse_ext_substate_mr_mps_valid - mr_mps_valid states + * functions. IEEE 802.3-2022 33.2.4.4 Variables + * + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_MR_MPS_VALID_DETECTED_UNDERLOAD: Underload + * state + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_MR_MPS_VALID_CONNECTION_OPEN: Port is not + * connected + * + * The PSE monitors either the DC or AC Maintain Power Signature + * (MPS, see 33.2.9.1). This variable indicates the presence or absence of + * a valid MPS. + */ +enum ethtool_c33_pse_ext_substate_mr_mps_valid { + ETHTOOL_C33_PSE_EXT_SUBSTATE_MR_MPS_VALID_DETECTED_UNDERLOAD = 1, + ETHTOOL_C33_PSE_EXT_SUBSTATE_MR_MPS_VALID_CONNECTION_OPEN, +}; + +/** + * enum ethtool_c33_pse_ext_substate_error_condition - error_condition states + * functions. IEEE 802.3-2022 33.2.4.4 Variables + * + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_NON_EXISTING_PORT: Non-existing + * port number + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNDEFINED_PORT: Undefined port + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_INTERNAL_HW_FAULT: Internal + * hardware fault + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_COMM_ERROR_AFTER_FORCE_ON: + * Communication error after force on + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNKNOWN_PORT_STATUS: Unknown + * port status + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_HOST_CRASH_TURN_OFF: Host + * crash turn off + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_HOST_CRASH_FORCE_SHUTDOWN: + * Host crash force shutdown + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_CONFIG_CHANGE: Configuration + * change + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_DETECTED_OVER_TEMP: Over + * temperature detected + * + * error_condition is a variable indicating the status of + * implementation-specific fault conditions or optionally other system faults + * that prevent the PSE from meeting the specifications in Table 33–11 and that + * require the PSE not to source power. These error conditions are different + * from those monitored by the state diagrams in Figure 33–10. + */ +enum ethtool_c33_pse_ext_substate_error_condition { + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_NON_EXISTING_PORT = 1, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNDEFINED_PORT, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_INTERNAL_HW_FAULT, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_COMM_ERROR_AFTER_FORCE_ON, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_UNKNOWN_PORT_STATUS, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_HOST_CRASH_TURN_OFF, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_HOST_CRASH_FORCE_SHUTDOWN, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_CONFIG_CHANGE, + ETHTOOL_C33_PSE_EXT_SUBSTATE_ERROR_CONDITION_DETECTED_OVER_TEMP, +}; + +/** + * enum ethtool_c33_pse_ext_substate_mr_pse_enable - mr_pse_enable states + * functions. IEEE 802.3-2022 33.2.4.4 Variables + * + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_MR_PSE_ENABLE_DISABLE_PIN_ACTIVE: Disable + * pin active + * + * mr_pse_enable is control variable that selects PSE operation and test + * functions. + */ +enum ethtool_c33_pse_ext_substate_mr_pse_enable { + ETHTOOL_C33_PSE_EXT_SUBSTATE_MR_PSE_ENABLE_DISABLE_PIN_ACTIVE = 1, +}; + +/** + * enum ethtool_c33_pse_ext_substate_option_detect_ted - option_detect_ted + * states functions. IEEE 802.3-2022 33.2.4.4 Variables + * + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_DETECT_TED_DET_IN_PROCESS: Detection + * in process + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_DETECT_TED_CONNECTION_CHECK_ERROR: + * Connection check error + * + * option_detect_ted is a variable indicating if detection can be performed + * by the PSE during the ted_timer interval. + */ +enum ethtool_c33_pse_ext_substate_option_detect_ted { + ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_DETECT_TED_DET_IN_PROCESS = 1, + ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_DETECT_TED_CONNECTION_CHECK_ERROR, +}; + +/** + * enum ethtool_c33_pse_ext_substate_option_vport_lim - option_vport_lim states + * functions. IEEE 802.3-2022 33.2.4.4 Variables + * + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_HIGH_VOLTAGE: Main supply + * voltage is high + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_LOW_VOLTAGE: Main supply + * voltage is low + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_VOLTAGE_INJECTION: Voltage + * injection into the port + * + * option_vport_lim is an optional variable indicates if VPSE is out of the + * operating range during normal operating state. + */ +enum ethtool_c33_pse_ext_substate_option_vport_lim { + ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_HIGH_VOLTAGE = 1, + ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_LOW_VOLTAGE, + ETHTOOL_C33_PSE_EXT_SUBSTATE_OPTION_VPORT_LIM_VOLTAGE_INJECTION, +}; + +/** + * enum ethtool_c33_pse_ext_substate_ovld_detected - ovld_detected states + * functions. IEEE 802.3-2022 33.2.4.4 Variables + * + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_OVLD_DETECTED_OVERLOAD: Overload state + * + * ovld_detected is a variable indicating if the PSE output current has been + * in an overload condition (see 33.2.7.6) for at least TCUT of a one-second + * sliding time. + */ +enum ethtool_c33_pse_ext_substate_ovld_detected { + ETHTOOL_C33_PSE_EXT_SUBSTATE_OVLD_DETECTED_OVERLOAD = 1, +}; + +/** + * enum ethtool_c33_pse_ext_substate_power_not_available - power_not_available + * states functions. IEEE 802.3-2022 33.2.4.4 Variables + * + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_BUDGET_EXCEEDED: Power + * budget exceeded for the controller + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_PORT_PW_LIMIT_EXCEEDS_CONTROLLER_BUDGET: + * Configured port power limit exceeded controller power budget + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_PD_REQUEST_EXCEEDS_PORT_LIMIT: + * Power request from PD exceeds port limit + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_HW_PW_LIMIT: Power + * denied due to Hardware power limit + * + * power_not_available is a variable that is asserted in an + * implementation-dependent manner when the PSE is no longer capable of + * sourcing sufficient power to support the attached PD. Sufficient power + * is defined by classification; see 33.2.6. + */ +enum ethtool_c33_pse_ext_substate_power_not_available { + ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_BUDGET_EXCEEDED = 1, + ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_PORT_PW_LIMIT_EXCEEDS_CONTROLLER_BUDGET, + ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_PD_REQUEST_EXCEEDS_PORT_LIMIT, + ETHTOOL_C33_PSE_EXT_SUBSTATE_POWER_NOT_AVAILABLE_HW_PW_LIMIT, +}; + +/** + * enum ethtool_c33_pse_ext_substate_short_detected - short_detected states + * functions. IEEE 802.3-2022 33.2.4.4 Variables + * + * @ETHTOOL_C33_PSE_EXT_SUBSTATE_SHORT_DETECTED_SHORT_CONDITION: Short + * condition was detected + * + * short_detected is a variable indicating if the PSE output current has been + * in a short circuit condition for TLIM within a sliding window (see 33.2.7.7). + */ +enum ethtool_c33_pse_ext_substate_short_detected { + ETHTOOL_C33_PSE_EXT_SUBSTATE_SHORT_DETECTED_SHORT_CONDITION = 1, +}; + /** * enum ethtool_pse_types - Types of PSE controller. * @ETHTOOL_PSE_UNKNOWN: Type of PSE controller is unknown @@ -877,6 +1068,24 @@ enum ethtool_mm_verify_status { ETHTOOL_MM_VERIFY_STATUS_DISABLED, }; +/** + * enum ethtool_module_fw_flash_status - plug-in module firmware flashing status + * @ETHTOOL_MODULE_FW_FLASH_STATUS_STARTED: The firmware flashing process has + * started. + * @ETHTOOL_MODULE_FW_FLASH_STATUS_IN_PROGRESS: The firmware flashing process + * is in progress. + * @ETHTOOL_MODULE_FW_FLASH_STATUS_COMPLETED: The firmware flashing process was + * completed successfully. + * @ETHTOOL_MODULE_FW_FLASH_STATUS_ERROR: The firmware flashing process was + * stopped due to an error. + */ +enum ethtool_module_fw_flash_status { + ETHTOOL_MODULE_FW_FLASH_STATUS_STARTED = 1, + ETHTOOL_MODULE_FW_FLASH_STATUS_IN_PROGRESS, + ETHTOOL_MODULE_FW_FLASH_STATUS_COMPLETED, + ETHTOOL_MODULE_FW_FLASH_STATUS_ERROR, +}; + /** * struct ethtool_gstrings - string set for data tagging * @cmd: Command number = %ETHTOOL_GSTRINGS @@ -1845,6 +2054,7 @@ enum ethtool_link_mode_bit_indices { ETHTOOL_LINK_MODE_10baseT1S_Full_BIT = 99, ETHTOOL_LINK_MODE_10baseT1S_Half_BIT = 100, ETHTOOL_LINK_MODE_10baseT1S_P2MP_Half_BIT = 101, + ETHTOOL_LINK_MODE_10baseT1BRR_Full_BIT = 102, /* must be last entry */ __ETHTOOL_LINK_MODE_MASK_NBITS diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h index b49b804b9495..6d5bdcc67631 100644 --- a/include/uapi/linux/ethtool_netlink.h +++ b/include/uapi/linux/ethtool_netlink.h @@ -57,6 +57,7 @@ enum { ETHTOOL_MSG_PLCA_GET_STATUS, ETHTOOL_MSG_MM_GET, ETHTOOL_MSG_MM_SET, + ETHTOOL_MSG_MODULE_FW_FLASH_ACT, /* add new constants above here */ __ETHTOOL_MSG_USER_CNT, @@ -109,6 +110,7 @@ enum { ETHTOOL_MSG_PLCA_NTF, ETHTOOL_MSG_MM_GET_REPLY, ETHTOOL_MSG_MM_NTF, + ETHTOOL_MSG_MODULE_FW_FLASH_NTF, /* add new constants above here */ __ETHTOOL_MSG_KERNEL_CNT, @@ -415,12 +417,34 @@ enum { ETHTOOL_A_COALESCE_TX_AGGR_MAX_BYTES, /* u32 */ ETHTOOL_A_COALESCE_TX_AGGR_MAX_FRAMES, /* u32 */ ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS, /* u32 */ + /* nest - _A_PROFILE_IRQ_MODERATION */ + ETHTOOL_A_COALESCE_RX_PROFILE, + /* nest - _A_PROFILE_IRQ_MODERATION */ + ETHTOOL_A_COALESCE_TX_PROFILE, /* add new constants above here */ __ETHTOOL_A_COALESCE_CNT, ETHTOOL_A_COALESCE_MAX = (__ETHTOOL_A_COALESCE_CNT - 1) }; +enum { + ETHTOOL_A_PROFILE_UNSPEC, + /* nest, _A_IRQ_MODERATION_* */ + ETHTOOL_A_PROFILE_IRQ_MODERATION, + __ETHTOOL_A_PROFILE_CNT, + ETHTOOL_A_PROFILE_MAX = (__ETHTOOL_A_PROFILE_CNT - 1) +}; + +enum { + ETHTOOL_A_IRQ_MODERATION_UNSPEC, + ETHTOOL_A_IRQ_MODERATION_USEC, /* u32 */ + ETHTOOL_A_IRQ_MODERATION_PKTS, /* u32 */ + ETHTOOL_A_IRQ_MODERATION_COMPS, /* u32 */ + + __ETHTOOL_A_IRQ_MODERATION_CNT, + ETHTOOL_A_IRQ_MODERATION_MAX = (__ETHTOOL_A_IRQ_MODERATION_CNT - 1) +}; + /* PAUSE */ enum { @@ -906,6 +930,12 @@ enum { }; /* Power Sourcing Equipment */ +enum { + ETHTOOL_A_C33_PSE_PW_LIMIT_UNSPEC, + ETHTOOL_A_C33_PSE_PW_LIMIT_MIN, /* u32 */ + ETHTOOL_A_C33_PSE_PW_LIMIT_MAX, /* u32 */ +}; + enum { ETHTOOL_A_PSE_UNSPEC, ETHTOOL_A_PSE_HEADER, /* nest - _A_HEADER_* */ @@ -915,6 +945,12 @@ enum { ETHTOOL_A_C33_PSE_ADMIN_STATE, /* u32 */ ETHTOOL_A_C33_PSE_ADMIN_CONTROL, /* u32 */ ETHTOOL_A_C33_PSE_PW_D_STATUS, /* u32 */ + ETHTOOL_A_C33_PSE_PW_CLASS, /* u32 */ + ETHTOOL_A_C33_PSE_ACTUAL_PW, /* u32 */ + ETHTOOL_A_C33_PSE_EXT_STATE, /* u32 */ + ETHTOOL_A_C33_PSE_EXT_SUBSTATE, /* u32 */ + ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT, /* u32 */ + ETHTOOL_A_C33_PSE_PW_LIMIT_RANGES, /* nest - _C33_PSE_PW_LIMIT_* */ /* add new constants above here */ __ETHTOOL_A_PSE_CNT, @@ -996,6 +1032,23 @@ enum { ETHTOOL_A_MM_MAX = (__ETHTOOL_A_MM_CNT - 1) }; +/* MODULE_FW_FLASH */ + +enum { + ETHTOOL_A_MODULE_FW_FLASH_UNSPEC, + ETHTOOL_A_MODULE_FW_FLASH_HEADER, /* nest - _A_HEADER_* */ + ETHTOOL_A_MODULE_FW_FLASH_FILE_NAME, /* string */ + ETHTOOL_A_MODULE_FW_FLASH_PASSWORD, /* u32 */ + ETHTOOL_A_MODULE_FW_FLASH_STATUS, /* u32 */ + ETHTOOL_A_MODULE_FW_FLASH_STATUS_MSG, /* string */ + ETHTOOL_A_MODULE_FW_FLASH_DONE, /* uint */ + ETHTOOL_A_MODULE_FW_FLASH_TOTAL, /* uint */ + + /* add new constants above here */ + __ETHTOOL_A_MODULE_FW_FLASH_CNT, + ETHTOOL_A_MODULE_FW_FLASH_MAX = (__ETHTOOL_A_MODULE_FW_FLASH_CNT - 1) +}; + /* generic netlink info */ #define ETHTOOL_GENL_NAME "ethtool" #define ETHTOOL_GENL_VERSION 1 diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h index e682ab628dfa..d358add1611c 100644 --- a/include/uapi/linux/in.h +++ b/include/uapi/linux/in.h @@ -81,6 +81,8 @@ enum { #define IPPROTO_ETHERNET IPPROTO_ETHERNET IPPROTO_RAW = 255, /* Raw IP packets */ #define IPPROTO_RAW IPPROTO_RAW + IPPROTO_SMC = 256, /* Shared Memory Communications */ +#define IPPROTO_SMC IPPROTO_SMC IPPROTO_MPTCP = 262, /* Multipath TCP connection */ #define IPPROTO_MPTCP IPPROTO_MPTCP IPPROTO_MAX diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h index aa4094ca2444..639894ed1b97 100644 --- a/include/uapi/linux/netfilter/nf_tables.h +++ b/include/uapi/linux/netfilter/nf_tables.h @@ -1376,7 +1376,7 @@ enum nft_secmark_attributes { #define NFTA_SECMARK_MAX (__NFTA_SECMARK_MAX - 1) /* Max security context length */ -#define NFT_SECMARK_CTX_MAXLEN 256 +#define NFT_SECMARK_CTX_MAXLEN 4096 /** * enum nft_reject_types - nf_tables reject expression reject types diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h index f917bc6c9b6f..f97f5adc8d51 100644 --- a/include/uapi/linux/nl80211.h +++ b/include/uapi/linux/nl80211.h @@ -2052,6 +2052,10 @@ enum nl80211_commands { * @NL80211_ATTR_INTERFACE_COMBINATIONS: Nested attribute listing the supported * interface combinations. In each nested item, it contains attributes * defined in &enum nl80211_if_combination_attrs. + * If the wiphy uses multiple radios (@NL80211_ATTR_WIPHY_RADIOS is set), + * this attribute contains the interface combinations of the first radio. + * See @NL80211_ATTR_WIPHY_INTERFACE_COMBINATIONS for the global wiphy + * combinations for the sum of all radios. * @NL80211_ATTR_SOFTWARE_IFTYPES: Nested attribute (just like * %NL80211_ATTR_SUPPORTED_IFTYPES) containing the interface types that * are managed in software: interfaces of these types aren't subject to @@ -2856,6 +2860,14 @@ enum nl80211_commands { * %NL80211_CMD_ASSOCIATE indicating the SPP A-MSDUs * are used on this connection * + * @NL80211_ATTR_WIPHY_RADIOS: Nested attribute describing physical radios + * belonging to this wiphy. See &enum nl80211_wiphy_radio_attrs. + * + * @NL80211_ATTR_WIPHY_INTERFACE_COMBINATIONS: Nested attribute listing the + * supported interface combinations for all radios combined. In each + * nested item, it contains attributes defined in + * &enum nl80211_if_combination_attrs. + * * @NUM_NL80211_ATTR: total number of nl80211_attrs available * @NL80211_ATTR_MAX: highest attribute number currently defined * @__NL80211_ATTR_AFTER_LAST: internal use @@ -3401,6 +3413,9 @@ enum nl80211_attrs { NL80211_ATTR_ASSOC_SPP_AMSDU, + NL80211_ATTR_WIPHY_RADIOS, + NL80211_ATTR_WIPHY_INTERFACE_COMBINATIONS, + /* add attributes here, update the policy in nl80211.c */ __NL80211_ATTR_AFTER_LAST, @@ -4277,6 +4292,8 @@ enum nl80211_wmm_rule { * @NL80211_FREQUENCY_ATTR_CAN_MONITOR: This channel can be used in monitor * mode despite other (regulatory) restrictions, even if the channel is * otherwise completely disabled. + * @NL80211_FREQUENCY_ATTR_ALLOW_6GHZ_VLP_AP: This channel can be used for a + * very low power (VLP) AP, despite being NO_IR. * @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number * currently defined * @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use @@ -4320,6 +4337,7 @@ enum nl80211_frequency_attr { NL80211_FREQUENCY_ATTR_NO_6GHZ_VLP_CLIENT, NL80211_FREQUENCY_ATTR_NO_6GHZ_AFC_CLIENT, NL80211_FREQUENCY_ATTR_CAN_MONITOR, + NL80211_FREQUENCY_ATTR_ALLOW_6GHZ_VLP_AP, /* keep last */ __NL80211_FREQUENCY_ATTR_AFTER_LAST, @@ -4529,6 +4547,8 @@ enum nl80211_sched_scan_match_attr { * Should be used together with %NL80211_RRF_DFS only. * @NL80211_RRF_NO_6GHZ_VLP_CLIENT: Client connection to VLP AP not allowed * @NL80211_RRF_NO_6GHZ_AFC_CLIENT: Client connection to AFC AP not allowed + * @NL80211_RRF_ALLOW_6GHZ_VLP_AP: Very low power (VLP) AP can be permitted + * despite NO_IR configuration. */ enum nl80211_reg_rule_flags { NL80211_RRF_NO_OFDM = 1<<0, @@ -4553,6 +4573,7 @@ enum nl80211_reg_rule_flags { NL80211_RRF_DFS_CONCURRENT = 1<<21, NL80211_RRF_NO_6GHZ_VLP_CLIENT = 1<<22, NL80211_RRF_NO_6GHZ_AFC_CLIENT = 1<<23, + NL80211_RRF_ALLOW_6GHZ_VLP_AP = 1<<24, }; #define NL80211_RRF_PASSIVE_SCAN NL80211_RRF_NO_IR @@ -7999,4 +8020,54 @@ enum nl80211_ap_settings_flags { NL80211_AP_SETTINGS_SA_QUERY_OFFLOAD_SUPPORT = 1 << 1, }; +/** + * enum nl80211_wiphy_radio_attrs - wiphy radio attributes + * + * @__NL80211_WIPHY_RADIO_ATTR_INVALID: Invalid + * + * @NL80211_WIPHY_RADIO_ATTR_INDEX: Index of this radio (u32) + * @NL80211_WIPHY_RADIO_ATTR_FREQ_RANGE: Frequency range supported by this + * radio. Attribute may be present multiple times. + * @NL80211_WIPHY_RADIO_ATTR_INTERFACE_COMBINATION: Supported interface + * combination for this radio. Attribute may be present multiple times + * and contains attributes defined in &enum nl80211_if_combination_attrs. + * + * @__NL80211_WIPHY_RADIO_ATTR_LAST: Internal + * @NL80211_WIPHY_RADIO_ATTR_MAX: Highest attribute + */ +enum nl80211_wiphy_radio_attrs { + __NL80211_WIPHY_RADIO_ATTR_INVALID, + + NL80211_WIPHY_RADIO_ATTR_INDEX, + NL80211_WIPHY_RADIO_ATTR_FREQ_RANGE, + NL80211_WIPHY_RADIO_ATTR_INTERFACE_COMBINATION, + + /* keep last */ + __NL80211_WIPHY_RADIO_ATTR_LAST, + NL80211_WIPHY_RADIO_ATTR_MAX = __NL80211_WIPHY_RADIO_ATTR_LAST - 1, +}; + +/** + * enum nl80211_wiphy_radio_freq_range - wiphy radio frequency range + * + * @__NL80211_WIPHY_RADIO_FREQ_ATTR_INVALID: Invalid + * + * @NL80211_WIPHY_RADIO_FREQ_ATTR_START: Frequency range start (u32). + * The unit is kHz. + * @NL80211_WIPHY_RADIO_FREQ_ATTR_END: Frequency range end (u32). + * The unit is kHz. + * + * @__NL80211_WIPHY_RADIO_FREQ_ATTR_LAST: Internal + * @NL80211_WIPHY_RADIO_FREQ_ATTR_MAX: Highest attribute + */ +enum nl80211_wiphy_radio_freq_range { + __NL80211_WIPHY_RADIO_FREQ_ATTR_INVALID, + + NL80211_WIPHY_RADIO_FREQ_ATTR_START, + NL80211_WIPHY_RADIO_FREQ_ATTR_END, + + __NL80211_WIPHY_RADIO_FREQ_ATTR_LAST, + NL80211_WIPHY_RADIO_FREQ_ATTR_MAX = __NL80211_WIPHY_RADIO_FREQ_ATTR_LAST - 1, +}; + #endif /* __LINUX_NL80211_H */ diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index efc82c318fa2..3a701bd1f31b 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -649,7 +649,8 @@ enum ovs_flow_attr { * Actions are passed as nested attributes. * * Executes the specified actions with the given probability on a per-packet - * basis. + * basis. Nested actions will be able to access the probability value of the + * parent @OVS_ACTION_ATTR_SAMPLE. */ enum ovs_sample_attr { OVS_SAMPLE_ATTR_UNSPEC, @@ -914,6 +915,31 @@ struct check_pkt_len_arg { }; #endif +#define OVS_PSAMPLE_COOKIE_MAX_SIZE 16 +/** + * enum ovs_psample_attr - Attributes for %OVS_ACTION_ATTR_PSAMPLE + * action. + * + * @OVS_PSAMPLE_ATTR_GROUP: 32-bit number to identify the source of the + * sample. + * @OVS_PSAMPLE_ATTR_COOKIE: An optional variable-length binary cookie that + * contains user-defined metadata. The maximum length is + * OVS_PSAMPLE_COOKIE_MAX_SIZE bytes. + * + * Sends the packet to the psample multicast group with the specified group and + * cookie. It is possible to combine this action with the + * %OVS_ACTION_ATTR_TRUNC action to limit the size of the sample. + */ +enum ovs_psample_attr { + OVS_PSAMPLE_ATTR_GROUP = 1, /* u32 number. */ + OVS_PSAMPLE_ATTR_COOKIE, /* Optional, user specified cookie. */ + + /* private: */ + __OVS_PSAMPLE_ATTR_MAX +}; + +#define OVS_PSAMPLE_ATTR_MAX (__OVS_PSAMPLE_ATTR_MAX - 1) + /** * enum ovs_action_attr - Action types. * @@ -966,6 +992,8 @@ struct check_pkt_len_arg { * of l3 tunnel flag in the tun_flags field of OVS_ACTION_ATTR_ADD_MPLS * argument. * @OVS_ACTION_ATTR_DROP: Explicit drop action. + * @OVS_ACTION_ATTR_PSAMPLE: Send a sample of the packet to external observers + * via psample. * * Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all * fields within a header are modifiable, e.g. the IPv4 protocol and fragment @@ -1004,6 +1032,7 @@ enum ovs_action_attr { OVS_ACTION_ATTR_ADD_MPLS, /* struct ovs_action_add_mpls. */ OVS_ACTION_ATTR_DEC_TTL, /* Nested OVS_DEC_TTL_ATTR_*. */ OVS_ACTION_ATTR_DROP, /* u32 error code. */ + OVS_ACTION_ATTR_PSAMPLE, /* Nested OVS_PSAMPLE_ATTR_*. */ __OVS_ACTION_ATTR_MAX, /* Nothing past this will be accepted * from userspace. */ diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h index 229fc925ec3a..d36d9cdf0c00 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -554,6 +554,9 @@ enum { TCA_FLOWER_KEY_SPI, /* be32 */ TCA_FLOWER_KEY_SPI_MASK, /* be32 */ + TCA_FLOWER_KEY_ENC_FLAGS, /* be32 */ + TCA_FLOWER_KEY_ENC_FLAGS_MASK, /* be32 */ + __TCA_FLOWER_MAX, }; @@ -674,8 +677,15 @@ enum { enum { TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT = (1 << 0), TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST = (1 << 1), + TCA_FLOWER_KEY_FLAGS_TUNNEL_CSUM = (1 << 2), + TCA_FLOWER_KEY_FLAGS_TUNNEL_DONT_FRAGMENT = (1 << 3), + TCA_FLOWER_KEY_FLAGS_TUNNEL_OAM = (1 << 4), + TCA_FLOWER_KEY_FLAGS_TUNNEL_CRIT_OPT = (1 << 5), + __TCA_FLOWER_KEY_FLAGS_MAX, }; +#define TCA_FLOWER_KEY_FLAGS_MAX (__TCA_FLOWER_KEY_FLAGS_MAX - 1) + enum { TCA_FLOWER_KEY_CFM_OPT_UNSPEC, TCA_FLOWER_KEY_CFM_MD_LEVEL, diff --git a/include/uapi/linux/psample.h b/include/uapi/linux/psample.h index e585db5bf2d2..b765f0e81f20 100644 --- a/include/uapi/linux/psample.h +++ b/include/uapi/linux/psample.h @@ -8,7 +8,11 @@ enum { PSAMPLE_ATTR_ORIGSIZE, PSAMPLE_ATTR_SAMPLE_GROUP, PSAMPLE_ATTR_GROUP_SEQ, - PSAMPLE_ATTR_SAMPLE_RATE, + PSAMPLE_ATTR_SAMPLE_RATE, /* u32, ratio between observed and + * sampled packets or scaled probability + * if PSAMPLE_ATTR_SAMPLE_PROBABILITY + * is set. + */ PSAMPLE_ATTR_DATA, PSAMPLE_ATTR_GROUP_REFCOUNT, PSAMPLE_ATTR_TUNNEL, @@ -19,6 +23,11 @@ enum { PSAMPLE_ATTR_LATENCY, /* u64, nanoseconds */ PSAMPLE_ATTR_TIMESTAMP, /* u64, nanoseconds */ PSAMPLE_ATTR_PROTO, /* u16 */ + PSAMPLE_ATTR_USER_COOKIE, /* binary, user provided data */ + PSAMPLE_ATTR_SAMPLE_PROBABILITY,/* no argument, interpret rate in + * PSAMPLE_ATTR_SAMPLE_RATE as a + * probability scaled 0 - U32_MAX. + */ __PSAMPLE_ATTR_MAX }; diff --git a/include/uapi/linux/tcp_metrics.h b/include/uapi/linux/tcp_metrics.h index 7cb4a172feed..927c735a5b0e 100644 --- a/include/uapi/linux/tcp_metrics.h +++ b/include/uapi/linux/tcp_metrics.h @@ -1,8 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* tcp_metrics.h - TCP Metrics Interface */ -#ifndef _LINUX_TCP_METRICS_H -#define _LINUX_TCP_METRICS_H +#ifndef _UAPI_LINUX_TCP_METRICS_H +#define _UAPI_LINUX_TCP_METRICS_H #include @@ -27,6 +27,22 @@ enum tcp_metric_index { #define TCP_METRIC_MAX (__TCP_METRIC_MAX - 1) +/* Re-define enum tcp_metric_index, again, using the values carried + * as netlink attribute types. + */ +enum { + TCP_METRICS_A_METRICS_RTT = 1, + TCP_METRICS_A_METRICS_RTTVAR, + TCP_METRICS_A_METRICS_SSTHRESH, + TCP_METRICS_A_METRICS_CWND, + TCP_METRICS_A_METRICS_REODERING, + TCP_METRICS_A_METRICS_RTT_US, + TCP_METRICS_A_METRICS_RTTVAR_US, + + __TCP_METRICS_A_METRICS_MAX +}; +#define TCP_METRICS_A_METRICS_MAX (__TCP_METRICS_A_METRICS_MAX - 1) + enum { TCP_METRICS_ATTR_UNSPEC, TCP_METRICS_ATTR_ADDR_IPV4, /* u32 */ @@ -58,4 +74,4 @@ enum { #define TCP_METRICS_CMD_MAX (__TCP_METRICS_CMD_MAX - 1) -#endif /* _LINUX_TCP_METRICS_H */ +#endif /* _UAPI_LINUX_TCP_METRICS_H */ diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h index d950d02ab791..f28701500714 100644 --- a/include/uapi/linux/xfrm.h +++ b/include/uapi/linux/xfrm.h @@ -321,6 +321,7 @@ enum xfrm_attr_type_t { XFRMA_IF_ID, /* __u32 */ XFRMA_MTIMER_THRESH, /* __u32 in seconds for input SA */ XFRMA_SA_DIR, /* __u8 */ + XFRMA_NAT_KEEPALIVE_INTERVAL, /* __u32 in seconds for NAT keepalive */ __XFRMA_MAX #define XFRMA_OUTPUT_MARK XFRMA_SET_MARK /* Compatibility */ diff --git a/io_uring/net.c b/io_uring/net.c index 7b75da2e7826..594490a1389b 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -1277,14 +1277,14 @@ int io_send_zc_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) return io_sendmsg_prep_setup(req, req->opcode == IORING_OP_SENDMSG_ZC); } -static int io_sg_from_iter_iovec(struct sock *sk, struct sk_buff *skb, +static int io_sg_from_iter_iovec(struct sk_buff *skb, struct iov_iter *from, size_t length) { skb_zcopy_downgrade_managed(skb); - return __zerocopy_sg_from_iter(NULL, sk, skb, from, length); + return zerocopy_fill_skb_from_iter(skb, from, length); } -static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb, +static int io_sg_from_iter(struct sk_buff *skb, struct iov_iter *from, size_t length) { struct skb_shared_info *shinfo = skb_shinfo(skb); @@ -1297,7 +1297,7 @@ static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb, if (!frag) shinfo->flags |= SKBFL_MANAGED_FRAG_REFS; else if (unlikely(!skb_zcopy_managed(skb))) - return __zerocopy_sg_from_iter(NULL, sk, skb, from, length); + return zerocopy_fill_skb_from_iter(skb, from, length); bi.bi_size = min(from->count, length); bi.bi_bvec_done = from->iov_offset; @@ -1324,14 +1324,6 @@ static int io_sg_from_iter(struct sock *sk, struct sk_buff *skb, skb->data_len += copied; skb->len += copied; skb->truesize += truesize; - - if (sk && sk->sk_type == SOCK_STREAM) { - sk_wmem_queued_add(sk, truesize); - if (!skb_zcopy_pure(skb)) - sk_mem_charge(sk, truesize); - } else { - refcount_add(truesize, &skb->sk->sk_wmem_alloc); - } return ret; } diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile index 7eb9ad3a3ae6..0291eef9ce92 100644 --- a/kernel/bpf/Makefile +++ b/kernel/bpf/Makefile @@ -50,5 +50,11 @@ endif obj-$(CONFIG_BPF_PRELOAD) += preload/ obj-$(CONFIG_BPF_SYSCALL) += relo_core.o -$(obj)/relo_core.o: $(srctree)/tools/lib/bpf/relo_core.c FORCE +obj-$(CONFIG_BPF_SYSCALL) += btf_iter.o +obj-$(CONFIG_BPF_SYSCALL) += btf_relocate.o + +# Some source files are common to libbpf. +vpath %.c $(srctree)/kernel/bpf:$(srctree)/tools/lib/bpf + +$(obj)/%.o: %.c FORCE $(call if_changed_rule,cc_o_c) diff --git a/kernel/bpf/bpf_lsm.c b/kernel/bpf/bpf_lsm.c index 68240c3c6e7d..08a338e1f231 100644 --- a/kernel/bpf/bpf_lsm.c +++ b/kernel/bpf/bpf_lsm.c @@ -280,6 +280,7 @@ BTF_ID(func, bpf_lsm_cred_prepare) BTF_ID(func, bpf_lsm_file_ioctl) BTF_ID(func, bpf_lsm_file_lock) BTF_ID(func, bpf_lsm_file_open) +BTF_ID(func, bpf_lsm_file_post_open) BTF_ID(func, bpf_lsm_file_receive) BTF_ID(func, bpf_lsm_inode_create) diff --git a/kernel/bpf/bpf_struct_ops.c b/kernel/bpf/bpf_struct_ops.c index 86c7884abaf8..0d515ec57aa5 100644 --- a/kernel/bpf/bpf_struct_ops.c +++ b/kernel/bpf/bpf_struct_ops.c @@ -12,6 +12,7 @@ #include #include #include +#include struct bpf_struct_ops_value { struct bpf_struct_ops_common_value common; @@ -56,6 +57,7 @@ struct bpf_struct_ops_map { struct bpf_struct_ops_link { struct bpf_link link; struct bpf_map __rcu *map; + wait_queue_head_t wait_hup; }; static DEFINE_MUTEX(update_mutex); @@ -571,7 +573,7 @@ int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks, } size = arch_prepare_bpf_trampoline(NULL, image + image_off, - image + PAGE_SIZE, + image + image_off + size, model, flags, tlinks, stub_func); if (size <= 0) { if (image != *_image) @@ -757,7 +759,7 @@ static long bpf_struct_ops_map_update_elem(struct bpf_map *map, void *key, goto unlock; } - err = st_ops->reg(kdata); + err = st_ops->reg(kdata, NULL); if (likely(!err)) { /* This refcnt increment on the map here after * 'st_ops->reg()' is secure since the state of the @@ -805,7 +807,7 @@ static long bpf_struct_ops_map_delete_elem(struct bpf_map *map, void *key) BPF_STRUCT_OPS_STATE_TOBEFREE); switch (prev_state) { case BPF_STRUCT_OPS_STATE_INUSE: - st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data); + st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, NULL); bpf_map_put(map); return 0; case BPF_STRUCT_OPS_STATE_TOBEFREE: @@ -1057,10 +1059,7 @@ static void bpf_struct_ops_map_link_dealloc(struct bpf_link *link) st_map = (struct bpf_struct_ops_map *) rcu_dereference_protected(st_link->map, true); if (st_map) { - /* st_link->map can be NULL if - * bpf_struct_ops_link_create() fails to register. - */ - st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data); + st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, link); bpf_map_put(&st_map->map); } kfree(st_link); @@ -1075,7 +1074,8 @@ static void bpf_struct_ops_map_link_show_fdinfo(const struct bpf_link *link, st_link = container_of(link, struct bpf_struct_ops_link, link); rcu_read_lock(); map = rcu_dereference(st_link->map); - seq_printf(seq, "map_id:\t%d\n", map->id); + if (map) + seq_printf(seq, "map_id:\t%d\n", map->id); rcu_read_unlock(); } @@ -1088,7 +1088,8 @@ static int bpf_struct_ops_map_link_fill_link_info(const struct bpf_link *link, st_link = container_of(link, struct bpf_struct_ops_link, link); rcu_read_lock(); map = rcu_dereference(st_link->map); - info->struct_ops.map_id = map->id; + if (map) + info->struct_ops.map_id = map->id; rcu_read_unlock(); return 0; } @@ -1113,6 +1114,10 @@ static int bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map mutex_lock(&update_mutex); old_map = rcu_dereference_protected(st_link->map, lockdep_is_held(&update_mutex)); + if (!old_map) { + err = -ENOLINK; + goto err_out; + } if (expected_old_map && old_map != expected_old_map) { err = -EPERM; goto err_out; @@ -1125,7 +1130,7 @@ static int bpf_struct_ops_map_link_update(struct bpf_link *link, struct bpf_map goto err_out; } - err = st_map->st_ops_desc->st_ops->update(st_map->kvalue.data, old_st_map->kvalue.data); + err = st_map->st_ops_desc->st_ops->update(st_map->kvalue.data, old_st_map->kvalue.data, link); if (err) goto err_out; @@ -1139,11 +1144,53 @@ err_out: return err; } +static int bpf_struct_ops_map_link_detach(struct bpf_link *link) +{ + struct bpf_struct_ops_link *st_link = container_of(link, struct bpf_struct_ops_link, link); + struct bpf_struct_ops_map *st_map; + struct bpf_map *map; + + mutex_lock(&update_mutex); + + map = rcu_dereference_protected(st_link->map, lockdep_is_held(&update_mutex)); + if (!map) { + mutex_unlock(&update_mutex); + return 0; + } + st_map = container_of(map, struct bpf_struct_ops_map, map); + + st_map->st_ops_desc->st_ops->unreg(&st_map->kvalue.data, link); + + RCU_INIT_POINTER(st_link->map, NULL); + /* Pair with bpf_map_get() in bpf_struct_ops_link_create() or + * bpf_map_inc() in bpf_struct_ops_map_link_update(). + */ + bpf_map_put(&st_map->map); + + mutex_unlock(&update_mutex); + + wake_up_interruptible_poll(&st_link->wait_hup, EPOLLHUP); + + return 0; +} + +static __poll_t bpf_struct_ops_map_link_poll(struct file *file, + struct poll_table_struct *pts) +{ + struct bpf_struct_ops_link *st_link = file->private_data; + + poll_wait(file, &st_link->wait_hup, pts); + + return rcu_access_pointer(st_link->map) ? 0 : EPOLLHUP; +} + static const struct bpf_link_ops bpf_struct_ops_map_lops = { .dealloc = bpf_struct_ops_map_link_dealloc, + .detach = bpf_struct_ops_map_link_detach, .show_fdinfo = bpf_struct_ops_map_link_show_fdinfo, .fill_link_info = bpf_struct_ops_map_link_fill_link_info, .update_map = bpf_struct_ops_map_link_update, + .poll = bpf_struct_ops_map_link_poll, }; int bpf_struct_ops_link_create(union bpf_attr *attr) @@ -1176,13 +1223,21 @@ int bpf_struct_ops_link_create(union bpf_attr *attr) if (err) goto err_out; - err = st_map->st_ops_desc->st_ops->reg(st_map->kvalue.data); + init_waitqueue_head(&link->wait_hup); + + /* Hold the update_mutex such that the subsystem cannot + * do link->ops->detach() before the link is fully initialized. + */ + mutex_lock(&update_mutex); + err = st_map->st_ops_desc->st_ops->reg(st_map->kvalue.data, &link->link); if (err) { + mutex_unlock(&update_mutex); bpf_link_cleanup(&link_primer); link = NULL; goto err_out; } RCU_INIT_POINTER(link->map, map); + mutex_unlock(&update_mutex); return bpf_link_settle(&link_primer); diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c index 821063660d9f..520f49f422fe 100644 --- a/kernel/bpf/btf.c +++ b/kernel/bpf/btf.c @@ -274,6 +274,7 @@ struct btf { u32 start_str_off; /* first string offset (0 for base BTF) */ char name[MODULE_NAME_LEN]; bool kernel_btf; + __u32 *base_id_map; /* map from distilled base BTF -> vmlinux BTF ids */ }; enum verifier_phase { @@ -414,7 +415,7 @@ const char *btf_type_str(const struct btf_type *t) struct btf_show { u64 flags; void *target; /* target of show operation (seq file, buffer) */ - void (*showfn)(struct btf_show *show, const char *fmt, va_list args); + __printf(2, 0) void (*showfn)(struct btf_show *show, const char *fmt, va_list args); const struct btf *btf; /* below are used during iteration */ struct { @@ -530,6 +531,11 @@ static bool btf_type_is_decl_tag_target(const struct btf_type *t) btf_type_is_var(t) || btf_type_is_typedef(t); } +bool btf_is_vmlinux(const struct btf *btf) +{ + return btf->kernel_btf && !btf->base_btf; +} + u32 btf_nr_types(const struct btf *btf) { u32 total = 0; @@ -772,7 +778,7 @@ static bool __btf_name_char_ok(char c, bool first) return true; } -static const char *btf_str_by_offset(const struct btf *btf, u32 offset) +const char *btf_str_by_offset(const struct btf *btf, u32 offset) { while (offset < btf->start_str_off) btf = btf->base_btf; @@ -1670,14 +1676,8 @@ static void btf_free_kfunc_set_tab(struct btf *btf) if (!tab) return; - /* For module BTF, we directly assign the sets being registered, so - * there is nothing to free except kfunc_set_tab. - */ - if (btf_is_module(btf)) - goto free_tab; for (hook = 0; hook < ARRAY_SIZE(tab->sets); hook++) kfree(tab->sets[hook]); -free_tab: kfree(tab); btf->kfunc_set_tab = NULL; } @@ -1735,7 +1735,12 @@ static void btf_free(struct btf *btf) kvfree(btf->types); kvfree(btf->resolved_sizes); kvfree(btf->resolved_ids); - kvfree(btf->data); + /* vmlinux does not allocate btf->data, it simply points it at + * __start_BTF. + */ + if (!btf_is_vmlinux(btf)) + kvfree(btf->data); + kvfree(btf->base_id_map); kfree(btf); } @@ -1764,6 +1769,23 @@ void btf_put(struct btf *btf) } } +struct btf *btf_base_btf(const struct btf *btf) +{ + return btf->base_btf; +} + +const struct btf_header *btf_header(const struct btf *btf) +{ + return &btf->hdr; +} + +void btf_set_base_btf(struct btf *btf, const struct btf *base_btf) +{ + btf->base_btf = (struct btf *)base_btf; + btf->start_id = btf_nr_types(base_btf); + btf->start_str_off = base_btf->hdr.str_len; +} + static int env_resolve_init(struct btf_verifier_env *env) { struct btf *btf = env->btf; @@ -3442,10 +3464,12 @@ btf_find_graph_root(const struct btf *btf, const struct btf_type *pt, goto end; \ } -static int btf_get_field_type(const char *name, u32 field_mask, u32 *seen_mask, +static int btf_get_field_type(const struct btf *btf, const struct btf_type *var_type, + u32 field_mask, u32 *seen_mask, int *align, int *sz) { int type = 0; + const char *name = __btf_name_by_offset(btf, var_type->name_off); if (field_mask & BPF_SPIN_LOCK) { if (!strcmp(name, "bpf_spin_lock")) { @@ -3481,7 +3505,7 @@ static int btf_get_field_type(const char *name, u32 field_mask, u32 *seen_mask, field_mask_test_name(BPF_REFCOUNT, "bpf_refcount"); /* Only return BPF_KPTR when all other types with matchable names fail */ - if (field_mask & BPF_KPTR) { + if (field_mask & BPF_KPTR && !__btf_type_is_struct(var_type)) { type = BPF_KPTR_REF; goto end; } @@ -3494,140 +3518,232 @@ end: #undef field_mask_test_name +/* Repeat a number of fields for a specified number of times. + * + * Copy the fields starting from the first field and repeat them for + * repeat_cnt times. The fields are repeated by adding the offset of each + * field with + * (i + 1) * elem_size + * where i is the repeat index and elem_size is the size of an element. + */ +static int btf_repeat_fields(struct btf_field_info *info, + u32 field_cnt, u32 repeat_cnt, u32 elem_size) +{ + u32 i, j; + u32 cur; + + /* Ensure not repeating fields that should not be repeated. */ + for (i = 0; i < field_cnt; i++) { + switch (info[i].type) { + case BPF_KPTR_UNREF: + case BPF_KPTR_REF: + case BPF_KPTR_PERCPU: + case BPF_LIST_HEAD: + case BPF_RB_ROOT: + break; + default: + return -EINVAL; + } + } + + cur = field_cnt; + for (i = 0; i < repeat_cnt; i++) { + memcpy(&info[cur], &info[0], field_cnt * sizeof(info[0])); + for (j = 0; j < field_cnt; j++) + info[cur++].off += (i + 1) * elem_size; + } + + return 0; +} + static int btf_find_struct_field(const struct btf *btf, const struct btf_type *t, u32 field_mask, - struct btf_field_info *info, int info_cnt) + struct btf_field_info *info, int info_cnt, + u32 level); + +/* Find special fields in the struct type of a field. + * + * This function is used to find fields of special types that is not a + * global variable or a direct field of a struct type. It also handles the + * repetition if it is the element type of an array. + */ +static int btf_find_nested_struct(const struct btf *btf, const struct btf_type *t, + u32 off, u32 nelems, + u32 field_mask, struct btf_field_info *info, + int info_cnt, u32 level) { - int ret, idx = 0, align, sz, field_type; - const struct btf_member *member; + int ret, err, i; + + level++; + if (level >= MAX_RESOLVE_DEPTH) + return -E2BIG; + + ret = btf_find_struct_field(btf, t, field_mask, info, info_cnt, level); + + if (ret <= 0) + return ret; + + /* Shift the offsets of the nested struct fields to the offsets + * related to the container. + */ + for (i = 0; i < ret; i++) + info[i].off += off; + + if (nelems > 1) { + err = btf_repeat_fields(info, ret, nelems - 1, t->size); + if (err == 0) + ret *= nelems; + else + ret = err; + } + + return ret; +} + +static int btf_find_field_one(const struct btf *btf, + const struct btf_type *var, + const struct btf_type *var_type, + int var_idx, + u32 off, u32 expected_size, + u32 field_mask, u32 *seen_mask, + struct btf_field_info *info, int info_cnt, + u32 level) +{ + int ret, align, sz, field_type; struct btf_field_info tmp; + const struct btf_array *array; + u32 i, nelems = 1; + + /* Walk into array types to find the element type and the number of + * elements in the (flattened) array. + */ + for (i = 0; i < MAX_RESOLVE_DEPTH && btf_type_is_array(var_type); i++) { + array = btf_array(var_type); + nelems *= array->nelems; + var_type = btf_type_by_id(btf, array->type); + } + if (i == MAX_RESOLVE_DEPTH) + return -E2BIG; + if (nelems == 0) + return 0; + + field_type = btf_get_field_type(btf, var_type, + field_mask, seen_mask, &align, &sz); + /* Look into variables of struct types */ + if (!field_type && __btf_type_is_struct(var_type)) { + sz = var_type->size; + if (expected_size && expected_size != sz * nelems) + return 0; + ret = btf_find_nested_struct(btf, var_type, off, nelems, field_mask, + &info[0], info_cnt, level); + return ret; + } + + if (field_type == 0) + return 0; + if (field_type < 0) + return field_type; + + if (expected_size && expected_size != sz * nelems) + return 0; + if (off % align) + return 0; + + switch (field_type) { + case BPF_SPIN_LOCK: + case BPF_TIMER: + case BPF_WORKQUEUE: + case BPF_LIST_NODE: + case BPF_RB_NODE: + case BPF_REFCOUNT: + ret = btf_find_struct(btf, var_type, off, sz, field_type, + info_cnt ? &info[0] : &tmp); + if (ret < 0) + return ret; + break; + case BPF_KPTR_UNREF: + case BPF_KPTR_REF: + case BPF_KPTR_PERCPU: + ret = btf_find_kptr(btf, var_type, off, sz, + info_cnt ? &info[0] : &tmp); + if (ret < 0) + return ret; + break; + case BPF_LIST_HEAD: + case BPF_RB_ROOT: + ret = btf_find_graph_root(btf, var, var_type, + var_idx, off, sz, + info_cnt ? &info[0] : &tmp, + field_type); + if (ret < 0) + return ret; + break; + default: + return -EFAULT; + } + + if (ret == BTF_FIELD_IGNORE) + return 0; + if (nelems > info_cnt) + return -E2BIG; + if (nelems > 1) { + ret = btf_repeat_fields(info, 1, nelems - 1, sz); + if (ret < 0) + return ret; + } + return nelems; +} + +static int btf_find_struct_field(const struct btf *btf, + const struct btf_type *t, u32 field_mask, + struct btf_field_info *info, int info_cnt, + u32 level) +{ + int ret, idx = 0; + const struct btf_member *member; u32 i, off, seen_mask = 0; for_each_member(i, t, member) { const struct btf_type *member_type = btf_type_by_id(btf, member->type); - field_type = btf_get_field_type(__btf_name_by_offset(btf, member_type->name_off), - field_mask, &seen_mask, &align, &sz); - if (field_type == 0) - continue; - if (field_type < 0) - return field_type; - off = __btf_member_bit_offset(t, member); if (off % 8) /* valid C code cannot generate such BTF */ return -EINVAL; off /= 8; - if (off % align) - continue; - switch (field_type) { - case BPF_SPIN_LOCK: - case BPF_TIMER: - case BPF_WORKQUEUE: - case BPF_LIST_NODE: - case BPF_RB_NODE: - case BPF_REFCOUNT: - ret = btf_find_struct(btf, member_type, off, sz, field_type, - idx < info_cnt ? &info[idx] : &tmp); - if (ret < 0) - return ret; - break; - case BPF_KPTR_UNREF: - case BPF_KPTR_REF: - case BPF_KPTR_PERCPU: - ret = btf_find_kptr(btf, member_type, off, sz, - idx < info_cnt ? &info[idx] : &tmp); - if (ret < 0) - return ret; - break; - case BPF_LIST_HEAD: - case BPF_RB_ROOT: - ret = btf_find_graph_root(btf, t, member_type, - i, off, sz, - idx < info_cnt ? &info[idx] : &tmp, - field_type); - if (ret < 0) - return ret; - break; - default: - return -EFAULT; - } - - if (ret == BTF_FIELD_IGNORE) - continue; - if (idx >= info_cnt) - return -E2BIG; - ++idx; + ret = btf_find_field_one(btf, t, member_type, i, + off, 0, + field_mask, &seen_mask, + &info[idx], info_cnt - idx, level); + if (ret < 0) + return ret; + idx += ret; } return idx; } static int btf_find_datasec_var(const struct btf *btf, const struct btf_type *t, u32 field_mask, struct btf_field_info *info, - int info_cnt) + int info_cnt, u32 level) { - int ret, idx = 0, align, sz, field_type; + int ret, idx = 0; const struct btf_var_secinfo *vsi; - struct btf_field_info tmp; u32 i, off, seen_mask = 0; for_each_vsi(i, t, vsi) { const struct btf_type *var = btf_type_by_id(btf, vsi->type); const struct btf_type *var_type = btf_type_by_id(btf, var->type); - field_type = btf_get_field_type(__btf_name_by_offset(btf, var_type->name_off), - field_mask, &seen_mask, &align, &sz); - if (field_type == 0) - continue; - if (field_type < 0) - return field_type; - off = vsi->offset; - if (vsi->size != sz) - continue; - if (off % align) - continue; - - switch (field_type) { - case BPF_SPIN_LOCK: - case BPF_TIMER: - case BPF_WORKQUEUE: - case BPF_LIST_NODE: - case BPF_RB_NODE: - case BPF_REFCOUNT: - ret = btf_find_struct(btf, var_type, off, sz, field_type, - idx < info_cnt ? &info[idx] : &tmp); - if (ret < 0) - return ret; - break; - case BPF_KPTR_UNREF: - case BPF_KPTR_REF: - case BPF_KPTR_PERCPU: - ret = btf_find_kptr(btf, var_type, off, sz, - idx < info_cnt ? &info[idx] : &tmp); - if (ret < 0) - return ret; - break; - case BPF_LIST_HEAD: - case BPF_RB_ROOT: - ret = btf_find_graph_root(btf, var, var_type, - -1, off, sz, - idx < info_cnt ? &info[idx] : &tmp, - field_type); - if (ret < 0) - return ret; - break; - default: - return -EFAULT; - } - - if (ret == BTF_FIELD_IGNORE) - continue; - if (idx >= info_cnt) - return -E2BIG; - ++idx; + ret = btf_find_field_one(btf, var, var_type, -1, off, vsi->size, + field_mask, &seen_mask, + &info[idx], info_cnt - idx, + level); + if (ret < 0) + return ret; + idx += ret; } return idx; } @@ -3637,9 +3753,9 @@ static int btf_find_field(const struct btf *btf, const struct btf_type *t, int info_cnt) { if (__btf_type_is_struct(t)) - return btf_find_struct_field(btf, t, field_mask, info, info_cnt); + return btf_find_struct_field(btf, t, field_mask, info, info_cnt, 0); else if (btf_type_is_datasec(t)) - return btf_find_datasec_var(btf, t, field_mask, info, info_cnt); + return btf_find_datasec_var(btf, t, field_mask, info, info_cnt, 0); return -EINVAL; } @@ -5726,6 +5842,15 @@ static int find_kern_ctx_type_id(enum bpf_prog_type prog_type) return ctx_type->type; } +bool btf_is_projection_of(const char *pname, const char *tname) +{ + if (strcmp(pname, "__sk_buff") == 0 && strcmp(tname, "sk_buff") == 0) + return true; + if (strcmp(pname, "xdp_md") == 0 && strcmp(tname, "xdp_buff") == 0) + return true; + return false; +} + bool btf_is_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf, const struct btf_type *t, enum bpf_prog_type prog_type, int arg) @@ -5788,9 +5913,7 @@ again: * int socket_filter_bpf_prog(struct __sk_buff *skb) * { // no fields of skb are ever used } */ - if (strcmp(ctx_tname, "__sk_buff") == 0 && strcmp(tname, "sk_buff") == 0) - return true; - if (strcmp(ctx_tname, "xdp_md") == 0 && strcmp(tname, "xdp_buff") == 0) + if (btf_is_projection_of(ctx_tname, tname)) return true; if (strcmp(ctx_tname, tname)) { /* bpf_user_pt_regs_t is a typedef, so resolve it to @@ -5982,23 +6105,15 @@ int get_kern_ctx_btf_id(struct bpf_verifier_log *log, enum bpf_prog_type prog_ty BTF_ID_LIST(bpf_ctx_convert_btf_id) BTF_ID(struct, bpf_ctx_convert) -struct btf *btf_parse_vmlinux(void) +static struct btf *btf_parse_base(struct btf_verifier_env *env, const char *name, + void *data, unsigned int data_size) { - struct btf_verifier_env *env = NULL; - struct bpf_verifier_log *log; struct btf *btf = NULL; int err; if (!IS_ENABLED(CONFIG_DEBUG_INFO_BTF)) return ERR_PTR(-ENOENT); - env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN); - if (!env) - return ERR_PTR(-ENOMEM); - - log = &env->log; - log->level = BPF_LOG_KERNEL; - btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN); if (!btf) { err = -ENOMEM; @@ -6006,10 +6121,10 @@ struct btf *btf_parse_vmlinux(void) } env->btf = btf; - btf->data = __start_BTF; - btf->data_size = __stop_BTF - __start_BTF; + btf->data = data; + btf->data_size = data_size; btf->kernel_btf = true; - snprintf(btf->name, sizeof(btf->name), "vmlinux"); + snprintf(btf->name, sizeof(btf->name), "%s", name); err = btf_parse_hdr(env); if (err) @@ -6029,20 +6144,11 @@ struct btf *btf_parse_vmlinux(void) if (err) goto errout; - /* btf_parse_vmlinux() runs under bpf_verifier_lock */ - bpf_ctx_convert.t = btf_type_by_id(btf, bpf_ctx_convert_btf_id[0]); - refcount_set(&btf->refcnt, 1); - err = btf_alloc_id(btf); - if (err) - goto errout; - - btf_verifier_env_free(env); return btf; errout: - btf_verifier_env_free(env); if (btf) { kvfree(btf->types); kfree(btf); @@ -6050,19 +6156,61 @@ errout: return ERR_PTR(err); } -#ifdef CONFIG_DEBUG_INFO_BTF_MODULES - -static struct btf *btf_parse_module(const char *module_name, const void *data, unsigned int data_size) +struct btf *btf_parse_vmlinux(void) { struct btf_verifier_env *env = NULL; struct bpf_verifier_log *log; - struct btf *btf = NULL, *base_btf; + struct btf *btf; int err; - base_btf = bpf_get_btf_vmlinux(); - if (IS_ERR(base_btf)) - return base_btf; - if (!base_btf) + env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN); + if (!env) + return ERR_PTR(-ENOMEM); + + log = &env->log; + log->level = BPF_LOG_KERNEL; + btf = btf_parse_base(env, "vmlinux", __start_BTF, __stop_BTF - __start_BTF); + if (IS_ERR(btf)) + goto err_out; + + /* btf_parse_vmlinux() runs under bpf_verifier_lock */ + bpf_ctx_convert.t = btf_type_by_id(btf, bpf_ctx_convert_btf_id[0]); + err = btf_alloc_id(btf); + if (err) { + btf_free(btf); + btf = ERR_PTR(err); + } +err_out: + btf_verifier_env_free(env); + return btf; +} + +/* If .BTF_ids section was created with distilled base BTF, both base and + * split BTF ids will need to be mapped to actual base/split ids for + * BTF now that it has been relocated. + */ +static __u32 btf_relocate_id(const struct btf *btf, __u32 id) +{ + if (!btf->base_btf || !btf->base_id_map) + return id; + return btf->base_id_map[id]; +} + +#ifdef CONFIG_DEBUG_INFO_BTF_MODULES + +static struct btf *btf_parse_module(const char *module_name, const void *data, + unsigned int data_size, void *base_data, + unsigned int base_data_size) +{ + struct btf *btf = NULL, *vmlinux_btf, *base_btf = NULL; + struct btf_verifier_env *env = NULL; + struct bpf_verifier_log *log; + int err = 0; + + vmlinux_btf = bpf_get_btf_vmlinux(); + if (IS_ERR(vmlinux_btf)) + return vmlinux_btf; + if (!vmlinux_btf) return ERR_PTR(-EINVAL); env = kzalloc(sizeof(*env), GFP_KERNEL | __GFP_NOWARN); @@ -6072,6 +6220,16 @@ static struct btf *btf_parse_module(const char *module_name, const void *data, u log = &env->log; log->level = BPF_LOG_KERNEL; + if (base_data) { + base_btf = btf_parse_base(env, ".BTF.base", base_data, base_data_size); + if (IS_ERR(base_btf)) { + err = PTR_ERR(base_btf); + goto errout; + } + } else { + base_btf = vmlinux_btf; + } + btf = kzalloc(sizeof(*btf), GFP_KERNEL | __GFP_NOWARN); if (!btf) { err = -ENOMEM; @@ -6111,12 +6269,22 @@ static struct btf *btf_parse_module(const char *module_name, const void *data, u if (err) goto errout; + if (base_btf != vmlinux_btf) { + err = btf_relocate(btf, vmlinux_btf, &btf->base_id_map); + if (err) + goto errout; + btf_free(base_btf); + base_btf = vmlinux_btf; + } + btf_verifier_env_free(env); refcount_set(&btf->refcnt, 1); return btf; errout: btf_verifier_env_free(env); + if (base_btf != vmlinux_btf) + btf_free(base_btf); if (btf) { kvfree(btf->data); kvfree(btf->types); @@ -6693,7 +6861,7 @@ int btf_struct_access(struct bpf_verifier_log *log, for (i = 0; i < rec->cnt; i++) { struct btf_field *field = &rec->fields[i]; u32 offset = field->offset; - if (off < offset + btf_field_type_size(field->type) && offset < off + size) { + if (off < offset + field->size && offset < off + size) { bpf_log(log, "direct access to %s is disallowed\n", btf_field_type_name(field->type)); @@ -7370,8 +7538,8 @@ static void btf_type_show(const struct btf *btf, u32 type_id, void *obj, btf_type_ops(t)->show(btf, t, type_id, obj, 0, show); } -static void btf_seq_show(struct btf_show *show, const char *fmt, - va_list args) +__printf(2, 0) static void btf_seq_show(struct btf_show *show, const char *fmt, + va_list args) { seq_vprintf((struct seq_file *)show->target, fmt, args); } @@ -7404,8 +7572,8 @@ struct btf_show_snprintf { int len; /* length we would have written */ }; -static void btf_snprintf_show(struct btf_show *show, const char *fmt, - va_list args) +__printf(2, 0) static void btf_snprintf_show(struct btf_show *show, const char *fmt, + va_list args) { struct btf_show_snprintf *ssnprintf = (struct btf_show_snprintf *)show; int len; @@ -7669,7 +7837,8 @@ static int btf_module_notify(struct notifier_block *nb, unsigned long op, err = -ENOMEM; goto out; } - btf = btf_parse_module(mod->name, mod->btf_data, mod->btf_data_size); + btf = btf_parse_module(mod->name, mod->btf_data, mod->btf_data_size, + mod->btf_base_data, mod->btf_base_data_size); if (IS_ERR(btf)) { kfree(btf_mod); if (!IS_ENABLED(CONFIG_MODULE_ALLOW_BTF_MISMATCH)) { @@ -7993,7 +8162,7 @@ static int btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook, bool add_filter = !!kset->filter; struct btf_kfunc_set_tab *tab; struct btf_id_set8 *set; - u32 set_cnt; + u32 set_cnt, i; int ret; if (hook >= BTF_KFUNC_HOOK_MAX) { @@ -8039,21 +8208,15 @@ static int btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook, goto end; } - /* We don't need to allocate, concatenate, and sort module sets, because - * only one is allowed per hook. Hence, we can directly assign the - * pointer and return. - */ - if (!vmlinux_set) { - tab->sets[hook] = add_set; - goto do_add_filter; - } - /* In case of vmlinux sets, there may be more than one set being * registered per hook. To create a unified set, we allocate a new set * and concatenate all individual sets being registered. While each set * is individually sorted, they may become unsorted when concatenated, * hence re-sorting the final set again is required to make binary * searching the set using btf_id_set8_contains function work. + * + * For module sets, we need to allocate as we may need to relocate + * BTF ids. */ set_cnt = set ? set->cnt : 0; @@ -8083,11 +8246,14 @@ static int btf_populate_kfunc_set(struct btf *btf, enum btf_kfunc_hook hook, /* Concatenate the two sets */ memcpy(set->pairs + set->cnt, add_set->pairs, add_set->cnt * sizeof(set->pairs[0])); + /* Now that the set is copied, update with relocated BTF ids */ + for (i = set->cnt; i < set->cnt + add_set->cnt; i++) + set->pairs[i].id = btf_relocate_id(btf, set->pairs[i].id); + set->cnt += add_set->cnt; sort(set->pairs, set->cnt, sizeof(set->pairs[0]), btf_id_cmp_func, NULL); -do_add_filter: if (add_filter) { hook_filter = &tab->hook_filters[hook]; hook_filter->filters[hook_filter->nr_filters++] = kset->filter; @@ -8207,7 +8373,7 @@ static int __register_btf_kfunc_id_set(enum btf_kfunc_hook hook, return PTR_ERR(btf); for (i = 0; i < kset->set->cnt; i++) { - ret = btf_check_kfunc_protos(btf, kset->set->pairs[i].id, + ret = btf_check_kfunc_protos(btf, btf_relocate_id(btf, kset->set->pairs[i].id), kset->set->pairs[i].flags); if (ret) goto err_out; @@ -8271,7 +8437,7 @@ static int btf_check_dtor_kfuncs(struct btf *btf, const struct btf_id_dtor_kfunc u32 nr_args, i; for (i = 0; i < cnt; i++) { - dtor_btf_id = dtors[i].kfunc_btf_id; + dtor_btf_id = btf_relocate_id(btf, dtors[i].kfunc_btf_id); dtor_func = btf_type_by_id(btf, dtor_btf_id); if (!dtor_func || !btf_type_is_func(dtor_func)) @@ -8306,7 +8472,7 @@ int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_c { struct btf_id_dtor_kfunc_tab *tab; struct btf *btf; - u32 tab_cnt; + u32 tab_cnt, i; int ret; btf = btf_get_module_btf(owner); @@ -8357,6 +8523,13 @@ int register_btf_id_dtor_kfuncs(const struct btf_id_dtor_kfunc *dtors, u32 add_c btf->dtor_kfunc_tab = tab; memcpy(tab->dtors + tab->cnt, dtors, add_cnt * sizeof(tab->dtors[0])); + + /* remap BTF ids based on BTF relocation (if any) */ + for (i = tab_cnt; i < tab_cnt + add_cnt; i++) { + tab->dtors[i].btf_id = btf_relocate_id(btf, tab->dtors[i].btf_id); + tab->dtors[i].kfunc_btf_id = btf_relocate_id(btf, tab->dtors[i].kfunc_btf_id); + } + tab->cnt += add_cnt; sort(tab->dtors, tab->cnt, sizeof(tab->dtors[0]), btf_id_cmp_func, NULL); diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index 695a0fb2cd4d..7ee62e38faf0 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -1173,8 +1173,7 @@ bpf_jit_binary_pack_alloc(unsigned int proglen, u8 **image_ptr, } /* Copy JITed text from rw_header to its final location, the ro_header. */ -int bpf_jit_binary_pack_finalize(struct bpf_prog *prog, - struct bpf_binary_header *ro_header, +int bpf_jit_binary_pack_finalize(struct bpf_binary_header *ro_header, struct bpf_binary_header *rw_header) { void *ptr; @@ -2742,8 +2741,7 @@ static void bpf_free_used_maps(struct bpf_prog_aux *aux) kfree(aux->used_maps); } -void __bpf_free_used_btfs(struct bpf_prog_aux *aux, - struct btf_mod_pair *used_btfs, u32 len) +void __bpf_free_used_btfs(struct btf_mod_pair *used_btfs, u32 len) { #ifdef CONFIG_BPF_SYSCALL struct btf_mod_pair *btf_mod; @@ -2760,7 +2758,7 @@ void __bpf_free_used_btfs(struct bpf_prog_aux *aux, static void bpf_free_used_btfs(struct bpf_prog_aux *aux) { - __bpf_free_used_btfs(aux, aux->used_btfs, aux->used_btf_cnt); + __bpf_free_used_btfs(aux->used_btfs, aux->used_btf_cnt); kfree(aux->used_btfs); } diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index a8e34416e960..fbdf5a1aabfe 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -79,8 +79,6 @@ struct bpf_cpu_map { struct bpf_cpu_map_entry __rcu **cpu_map; }; -static DEFINE_PER_CPU(struct list_head, cpu_map_flush_list); - static struct bpf_map *cpu_map_alloc(union bpf_attr *attr) { u32 value_size = attr->value_size; @@ -240,12 +238,14 @@ static int cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames, int xdp_n, struct xdp_cpumap_stats *stats, struct list_head *list) { + struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx; int nframes; if (!rcpu->prog) return xdp_n; rcu_read_lock_bh(); + bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx); nframes = cpu_map_bpf_prog_run_xdp(rcpu, frames, xdp_n, stats); @@ -255,6 +255,7 @@ static int cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames, if (unlikely(!list_empty(list))) cpu_map_bpf_prog_run_skb(rcpu, list, stats); + bpf_net_ctx_clear(bpf_net_ctx); rcu_read_unlock_bh(); /* resched point, may call do_softirq() */ return nframes; @@ -706,7 +707,6 @@ static void bq_flush_to_queue(struct xdp_bulk_queue *bq) */ static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf) { - struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list); struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq); if (unlikely(bq->count == CPU_MAP_BULK_SIZE)) @@ -723,8 +723,11 @@ static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf) */ bq->q[bq->count++] = xdpf; - if (!bq->flush_node.prev) + if (!bq->flush_node.prev) { + struct list_head *flush_list = bpf_net_ctx_get_cpu_map_flush_list(); + list_add(&bq->flush_node, flush_list); + } } int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf, @@ -756,9 +759,8 @@ trace: return ret; } -void __cpu_map_flush(void) +void __cpu_map_flush(struct list_head *flush_list) { - struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list); struct xdp_bulk_queue *bq, *tmp; list_for_each_entry_safe(bq, tmp, flush_list, flush_node) { @@ -768,24 +770,3 @@ void __cpu_map_flush(void) wake_up_process(bq->obj->kthread); } } - -#ifdef CONFIG_DEBUG_NET -bool cpu_map_check_flush(void) -{ - if (list_empty(this_cpu_ptr(&cpu_map_flush_list))) - return false; - __cpu_map_flush(); - return true; -} -#endif - -static int __init cpu_map_init(void) -{ - int cpu; - - for_each_possible_cpu(cpu) - INIT_LIST_HEAD(&per_cpu(cpu_map_flush_list, cpu)); - return 0; -} - -subsys_initcall(cpu_map_init); diff --git a/kernel/bpf/crypto.c b/kernel/bpf/crypto.c index 2bee4af91e38..94854cd9c4cc 100644 --- a/kernel/bpf/crypto.c +++ b/kernel/bpf/crypto.c @@ -275,7 +275,7 @@ static int bpf_crypto_crypt(const struct bpf_crypto_ctx *ctx, if (__bpf_dynptr_is_rdonly(dst)) return -EINVAL; - siv_len = __bpf_dynptr_size(siv); + siv_len = siv ? __bpf_dynptr_size(siv) : 0; src_len = __bpf_dynptr_size(src); dst_len = __bpf_dynptr_size(dst); if (!src_len || !dst_len) @@ -303,36 +303,44 @@ static int bpf_crypto_crypt(const struct bpf_crypto_ctx *ctx, /** * bpf_crypto_decrypt() - Decrypt buffer using configured context and IV provided. - * @ctx: The crypto context being used. The ctx must be a trusted pointer. - * @src: bpf_dynptr to the encrypted data. Must be a trusted pointer. - * @dst: bpf_dynptr to the buffer where to store the result. Must be a trusted pointer. - * @siv: bpf_dynptr to IV data and state data to be used by decryptor. + * @ctx: The crypto context being used. The ctx must be a trusted pointer. + * @src: bpf_dynptr to the encrypted data. Must be a trusted pointer. + * @dst: bpf_dynptr to the buffer where to store the result. Must be a trusted pointer. + * @siv__nullable: bpf_dynptr to IV data and state data to be used by decryptor. May be NULL. * * Decrypts provided buffer using IV data and the crypto context. Crypto context must be configured. */ __bpf_kfunc int bpf_crypto_decrypt(struct bpf_crypto_ctx *ctx, - const struct bpf_dynptr_kern *src, - const struct bpf_dynptr_kern *dst, - const struct bpf_dynptr_kern *siv) + const struct bpf_dynptr *src, + const struct bpf_dynptr *dst, + const struct bpf_dynptr *siv__nullable) { - return bpf_crypto_crypt(ctx, src, dst, siv, true); + const struct bpf_dynptr_kern *src_kern = (struct bpf_dynptr_kern *)src; + const struct bpf_dynptr_kern *dst_kern = (struct bpf_dynptr_kern *)dst; + const struct bpf_dynptr_kern *siv_kern = (struct bpf_dynptr_kern *)siv__nullable; + + return bpf_crypto_crypt(ctx, src_kern, dst_kern, siv_kern, true); } /** * bpf_crypto_encrypt() - Encrypt buffer using configured context and IV provided. - * @ctx: The crypto context being used. The ctx must be a trusted pointer. - * @src: bpf_dynptr to the plain data. Must be a trusted pointer. - * @dst: bpf_dynptr to buffer where to store the result. Must be a trusted pointer. - * @siv: bpf_dynptr to IV data and state data to be used by decryptor. + * @ctx: The crypto context being used. The ctx must be a trusted pointer. + * @src: bpf_dynptr to the plain data. Must be a trusted pointer. + * @dst: bpf_dynptr to the buffer where to store the result. Must be a trusted pointer. + * @siv__nullable: bpf_dynptr to IV data and state data to be used by decryptor. May be NULL. * * Encrypts provided buffer using IV data and the crypto context. Crypto context must be configured. */ __bpf_kfunc int bpf_crypto_encrypt(struct bpf_crypto_ctx *ctx, - const struct bpf_dynptr_kern *src, - const struct bpf_dynptr_kern *dst, - const struct bpf_dynptr_kern *siv) + const struct bpf_dynptr *src, + const struct bpf_dynptr *dst, + const struct bpf_dynptr *siv__nullable) { - return bpf_crypto_crypt(ctx, src, dst, siv, false); + const struct bpf_dynptr_kern *src_kern = (struct bpf_dynptr_kern *)src; + const struct bpf_dynptr_kern *dst_kern = (struct bpf_dynptr_kern *)dst; + const struct bpf_dynptr_kern *siv_kern = (struct bpf_dynptr_kern *)siv__nullable; + + return bpf_crypto_crypt(ctx, src_kern, dst_kern, siv_kern, false); } __bpf_kfunc_end_defs(); diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index 7f3b34452243..9e0e3b0a18e4 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -83,7 +83,6 @@ struct bpf_dtab { u32 n_buckets; }; -static DEFINE_PER_CPU(struct list_head, dev_flush_list); static DEFINE_SPINLOCK(dev_map_lock); static LIST_HEAD(dev_map_list); @@ -107,7 +106,7 @@ static inline struct hlist_head *dev_map_index_hash(struct bpf_dtab *dtab, return &dtab->dev_index_head[idx & (dtab->n_buckets - 1)]; } -static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr) +static int dev_map_alloc_check(union bpf_attr *attr) { u32 valsize = attr->value_size; @@ -121,23 +120,28 @@ static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr) attr->map_flags & ~DEV_CREATE_FLAG_MASK) return -EINVAL; + if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { + /* Hash table size must be power of 2; roundup_pow_of_two() + * can overflow into UB on 32-bit arches + */ + if (attr->max_entries > 1UL << 31) + return -EINVAL; + } + + return 0; +} + +static int dev_map_init_map(struct bpf_dtab *dtab, union bpf_attr *attr) +{ /* Lookup returns a pointer straight to dev->ifindex, so make sure the * verifier prevents writes from the BPF side */ attr->map_flags |= BPF_F_RDONLY_PROG; - - bpf_map_init_from_attr(&dtab->map, attr); if (attr->map_type == BPF_MAP_TYPE_DEVMAP_HASH) { - /* hash table size must be power of 2; roundup_pow_of_two() can - * overflow into UB on 32-bit arches, so check that first - */ - if (dtab->map.max_entries > 1UL << 31) - return -EINVAL; - + /* Hash table size must be power of 2 */ dtab->n_buckets = roundup_pow_of_two(dtab->map.max_entries); - dtab->dev_index_head = dev_map_create_hash(dtab->n_buckets, dtab->map.numa_node); if (!dtab->dev_index_head) @@ -196,7 +200,14 @@ static void dev_map_free(struct bpf_map *map) list_del_rcu(&dtab->list); spin_unlock(&dev_map_lock); - bpf_clear_redirect_map(map); + /* bpf_redirect_info->map is assigned in __bpf_xdp_redirect_map() + * during NAPI callback and cleared after the XDP redirect. There is no + * explicit RCU read section which protects bpf_redirect_info->map but + * local_bh_disable() also marks the beginning an RCU section. This + * makes the complete softirq callback RCU protected. Thus after + * following synchronize_rcu() there no bpf_redirect_info->map == map + * assignment. + */ synchronize_rcu(); /* Make sure prior __dev_map_entry_free() have completed. */ @@ -406,9 +417,8 @@ out: * driver before returning from its napi->poll() routine. See the comment above * xdp_do_flush() in filter.c. */ -void __dev_flush(void) +void __dev_flush(struct list_head *flush_list) { - struct list_head *flush_list = this_cpu_ptr(&dev_flush_list); struct xdp_dev_bulk_queue *bq, *tmp; list_for_each_entry_safe(bq, tmp, flush_list, flush_node) { @@ -419,16 +429,6 @@ void __dev_flush(void) } } -#ifdef CONFIG_DEBUG_NET -bool dev_check_flush(void) -{ - if (list_empty(this_cpu_ptr(&dev_flush_list))) - return false; - __dev_flush(); - return true; -} -#endif - /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or * by local_bh_disable() (from XDP calls inside NAPI). The * rcu_read_lock_bh_held() below makes lockdep accept both. @@ -453,7 +453,6 @@ static void *__dev_map_lookup_elem(struct bpf_map *map, u32 key) static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf, struct net_device *dev_rx, struct bpf_prog *xdp_prog) { - struct list_head *flush_list = this_cpu_ptr(&dev_flush_list); struct xdp_dev_bulk_queue *bq = this_cpu_ptr(dev->xdp_bulkq); if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) @@ -467,6 +466,8 @@ static void bq_enqueue(struct net_device *dev, struct xdp_frame *xdpf, * are only ever modified together. */ if (!bq->dev_rx) { + struct list_head *flush_list = bpf_net_ctx_get_dev_flush_list(); + bq->dev_rx = dev_rx; bq->xdp_prog = xdp_prog; list_add(&bq->flush_node, flush_list); @@ -1040,6 +1041,7 @@ static u64 dev_map_mem_usage(const struct bpf_map *map) BTF_ID_LIST_SINGLE(dev_map_btf_ids, struct, bpf_dtab) const struct bpf_map_ops dev_map_ops = { .map_meta_equal = bpf_map_meta_equal, + .map_alloc_check = dev_map_alloc_check, .map_alloc = dev_map_alloc, .map_free = dev_map_free, .map_get_next_key = dev_map_get_next_key, @@ -1054,6 +1056,7 @@ const struct bpf_map_ops dev_map_ops = { const struct bpf_map_ops dev_map_hash_ops = { .map_meta_equal = bpf_map_meta_equal, + .map_alloc_check = dev_map_alloc_check, .map_alloc = dev_map_alloc, .map_free = dev_map_free, .map_get_next_key = dev_map_hash_get_next_key, @@ -1153,15 +1156,11 @@ static struct notifier_block dev_map_notifier = { static int __init dev_map_init(void) { - int cpu; - /* Assure tracepoint shadow struct _bpf_dtab_netdev is in sync */ BUILD_BUG_ON(offsetof(struct bpf_dtab_netdev, dev) != offsetof(struct _bpf_dtab_netdev, dev)); register_netdevice_notifier(&dev_map_notifier); - for_each_possible_cpu(cpu) - INIT_LIST_HEAD(&per_cpu(dev_flush_list, cpu)); return 0; } diff --git a/kernel/bpf/helpers.c b/kernel/bpf/helpers.c index 3243c83ef3e3..b5f0adae8293 100644 --- a/kernel/bpf/helpers.c +++ b/kernel/bpf/helpers.c @@ -2498,7 +2498,7 @@ __bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid) /** * bpf_dynptr_slice() - Obtain a read-only pointer to the dynptr data. - * @ptr: The dynptr whose data slice to retrieve + * @p: The dynptr whose data slice to retrieve * @offset: Offset into the dynptr * @buffer__opt: User-provided buffer to copy contents into. May be NULL * @buffer__szk: Size (in bytes) of the buffer if present. This is the @@ -2524,9 +2524,10 @@ __bpf_kfunc struct task_struct *bpf_task_from_pid(s32 pid) * provided buffer, with its contents containing the data, if unable to obtain * direct pointer) */ -__bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr_kern *ptr, u32 offset, +__bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr *p, u32 offset, void *buffer__opt, u32 buffer__szk) { + const struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; enum bpf_dynptr_type type; u32 len = buffer__szk; int err; @@ -2568,7 +2569,7 @@ __bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr_kern *ptr, u32 offset /** * bpf_dynptr_slice_rdwr() - Obtain a writable pointer to the dynptr data. - * @ptr: The dynptr whose data slice to retrieve + * @p: The dynptr whose data slice to retrieve * @offset: Offset into the dynptr * @buffer__opt: User-provided buffer to copy contents into. May be NULL * @buffer__szk: Size (in bytes) of the buffer if present. This is the @@ -2608,9 +2609,11 @@ __bpf_kfunc void *bpf_dynptr_slice(const struct bpf_dynptr_kern *ptr, u32 offset * provided buffer, with its contents containing the data, if unable to obtain * direct pointer) */ -__bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr_kern *ptr, u32 offset, +__bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr *p, u32 offset, void *buffer__opt, u32 buffer__szk) { + const struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; + if (!ptr->data || __bpf_dynptr_is_rdonly(ptr)) return NULL; @@ -2636,11 +2639,12 @@ __bpf_kfunc void *bpf_dynptr_slice_rdwr(const struct bpf_dynptr_kern *ptr, u32 o * will be copied out into the buffer and the user will need to call * bpf_dynptr_write() to commit changes. */ - return bpf_dynptr_slice(ptr, offset, buffer__opt, buffer__szk); + return bpf_dynptr_slice(p, offset, buffer__opt, buffer__szk); } -__bpf_kfunc int bpf_dynptr_adjust(struct bpf_dynptr_kern *ptr, u32 start, u32 end) +__bpf_kfunc int bpf_dynptr_adjust(const struct bpf_dynptr *p, u32 start, u32 end) { + struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; u32 size; if (!ptr->data || start > end) @@ -2657,36 +2661,45 @@ __bpf_kfunc int bpf_dynptr_adjust(struct bpf_dynptr_kern *ptr, u32 start, u32 en return 0; } -__bpf_kfunc bool bpf_dynptr_is_null(struct bpf_dynptr_kern *ptr) +__bpf_kfunc bool bpf_dynptr_is_null(const struct bpf_dynptr *p) { + struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; + return !ptr->data; } -__bpf_kfunc bool bpf_dynptr_is_rdonly(struct bpf_dynptr_kern *ptr) +__bpf_kfunc bool bpf_dynptr_is_rdonly(const struct bpf_dynptr *p) { + struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; + if (!ptr->data) return false; return __bpf_dynptr_is_rdonly(ptr); } -__bpf_kfunc __u32 bpf_dynptr_size(const struct bpf_dynptr_kern *ptr) +__bpf_kfunc __u32 bpf_dynptr_size(const struct bpf_dynptr *p) { + struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; + if (!ptr->data) return -EINVAL; return __bpf_dynptr_size(ptr); } -__bpf_kfunc int bpf_dynptr_clone(struct bpf_dynptr_kern *ptr, - struct bpf_dynptr_kern *clone__uninit) +__bpf_kfunc int bpf_dynptr_clone(const struct bpf_dynptr *p, + struct bpf_dynptr *clone__uninit) { + struct bpf_dynptr_kern *clone = (struct bpf_dynptr_kern *)clone__uninit; + struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)p; + if (!ptr->data) { - bpf_dynptr_set_null(clone__uninit); + bpf_dynptr_set_null(clone); return -EINVAL; } - *clone__uninit = *ptr; + *clone = *ptr; return 0; } @@ -2786,7 +2799,7 @@ __bpf_kfunc int bpf_wq_start(struct bpf_wq *wq, unsigned int flags) } __bpf_kfunc int bpf_wq_set_callback_impl(struct bpf_wq *wq, - int (callback_fn)(void *map, int *key, struct bpf_wq *wq), + int (callback_fn)(void *map, int *key, void *value), unsigned int flags, void *aux__ign) { @@ -2809,6 +2822,122 @@ __bpf_kfunc void bpf_preempt_enable(void) preempt_enable(); } +struct bpf_iter_bits { + __u64 __opaque[2]; +} __aligned(8); + +struct bpf_iter_bits_kern { + union { + unsigned long *bits; + unsigned long bits_copy; + }; + u32 nr_bits; + int bit; +} __aligned(8); + +/** + * bpf_iter_bits_new() - Initialize a new bits iterator for a given memory area + * @it: The new bpf_iter_bits to be created + * @unsafe_ptr__ign: A pointer pointing to a memory area to be iterated over + * @nr_words: The size of the specified memory area, measured in 8-byte units. + * Due to the limitation of memalloc, it can't be greater than 512. + * + * This function initializes a new bpf_iter_bits structure for iterating over + * a memory area which is specified by the @unsafe_ptr__ign and @nr_words. It + * copies the data of the memory area to the newly created bpf_iter_bits @it for + * subsequent iteration operations. + * + * On success, 0 is returned. On failure, ERR is returned. + */ +__bpf_kfunc int +bpf_iter_bits_new(struct bpf_iter_bits *it, const u64 *unsafe_ptr__ign, u32 nr_words) +{ + struct bpf_iter_bits_kern *kit = (void *)it; + u32 nr_bytes = nr_words * sizeof(u64); + u32 nr_bits = BYTES_TO_BITS(nr_bytes); + int err; + + BUILD_BUG_ON(sizeof(struct bpf_iter_bits_kern) != sizeof(struct bpf_iter_bits)); + BUILD_BUG_ON(__alignof__(struct bpf_iter_bits_kern) != + __alignof__(struct bpf_iter_bits)); + + kit->nr_bits = 0; + kit->bits_copy = 0; + kit->bit = -1; + + if (!unsafe_ptr__ign || !nr_words) + return -EINVAL; + + /* Optimization for u64 mask */ + if (nr_bits == 64) { + err = bpf_probe_read_kernel_common(&kit->bits_copy, nr_bytes, unsafe_ptr__ign); + if (err) + return -EFAULT; + + kit->nr_bits = nr_bits; + return 0; + } + + /* Fallback to memalloc */ + kit->bits = bpf_mem_alloc(&bpf_global_ma, nr_bytes); + if (!kit->bits) + return -ENOMEM; + + err = bpf_probe_read_kernel_common(kit->bits, nr_bytes, unsafe_ptr__ign); + if (err) { + bpf_mem_free(&bpf_global_ma, kit->bits); + return err; + } + + kit->nr_bits = nr_bits; + return 0; +} + +/** + * bpf_iter_bits_next() - Get the next bit in a bpf_iter_bits + * @it: The bpf_iter_bits to be checked + * + * This function returns a pointer to a number representing the value of the + * next bit in the bits. + * + * If there are no further bits available, it returns NULL. + */ +__bpf_kfunc int *bpf_iter_bits_next(struct bpf_iter_bits *it) +{ + struct bpf_iter_bits_kern *kit = (void *)it; + u32 nr_bits = kit->nr_bits; + const unsigned long *bits; + int bit; + + if (nr_bits == 0) + return NULL; + + bits = nr_bits == 64 ? &kit->bits_copy : kit->bits; + bit = find_next_bit(bits, nr_bits, kit->bit + 1); + if (bit >= nr_bits) { + kit->nr_bits = 0; + return NULL; + } + + kit->bit = bit; + return &kit->bit; +} + +/** + * bpf_iter_bits_destroy() - Destroy a bpf_iter_bits + * @it: The bpf_iter_bits to be destroyed + * + * Destroy the resource associated with the bpf_iter_bits. + */ +__bpf_kfunc void bpf_iter_bits_destroy(struct bpf_iter_bits *it) +{ + struct bpf_iter_bits_kern *kit = (void *)it; + + if (kit->nr_bits <= 64) + return; + bpf_mem_free(&bpf_global_ma, kit->bits); +} + __bpf_kfunc_end_defs(); BTF_KFUNCS_START(generic_btf_ids) @@ -2891,6 +3020,9 @@ BTF_ID_FLAGS(func, bpf_wq_set_callback_impl) BTF_ID_FLAGS(func, bpf_wq_start) BTF_ID_FLAGS(func, bpf_preempt_disable) BTF_ID_FLAGS(func, bpf_preempt_enable) +BTF_ID_FLAGS(func, bpf_iter_bits_new, KF_ITER_NEW) +BTF_ID_FLAGS(func, bpf_iter_bits_next, KF_ITER_NEXT | KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_iter_bits_destroy, KF_ITER_DESTROY) BTF_KFUNCS_END(common_btf_ids) static const struct btf_kfunc_id_set common_kfunc_set = { @@ -2932,7 +3064,9 @@ late_initcall(kfunc_init); */ const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len) { - return bpf_dynptr_slice(ptr, 0, NULL, len); + const struct bpf_dynptr *p = (struct bpf_dynptr *)ptr; + + return bpf_dynptr_slice(p, 0, NULL, len); } /* Get a pointer to dynptr data up to len bytes for read write access. If diff --git a/kernel/bpf/log.c b/kernel/bpf/log.c index 4bd8f17a9f24..5aebfc3051e3 100644 --- a/kernel/bpf/log.c +++ b/kernel/bpf/log.c @@ -91,7 +91,7 @@ void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt, goto fail; } else { u64 new_end, new_start; - u32 buf_start, buf_end, new_n; + u32 buf_start, buf_end; new_end = log->end_pos + n; if (new_end - log->start_pos >= log->len_total) @@ -708,7 +708,9 @@ static void print_reg_state(struct bpf_verifier_env *env, verbose(env, "%s", btf_type_name(reg->btf, reg->btf_id)); verbose(env, "("); if (reg->id) - verbose_a("id=%d", reg->id); + verbose_a("id=%d", reg->id & ~BPF_ADD_CONST); + if (reg->id & BPF_ADD_CONST) + verbose(env, "%+d", reg->off); if (reg->ref_obj_id) verbose_a("ref_obj_id=%d", reg->ref_obj_id); if (type_is_non_owning_ref(reg->type)) diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c index f45ed6adc092..869265852d51 100644 --- a/kernel/bpf/syscall.c +++ b/kernel/bpf/syscall.c @@ -3151,6 +3151,13 @@ static void bpf_link_show_fdinfo(struct seq_file *m, struct file *filp) } #endif +static __poll_t bpf_link_poll(struct file *file, struct poll_table_struct *pts) +{ + struct bpf_link *link = file->private_data; + + return link->ops->poll(file, pts); +} + static const struct file_operations bpf_link_fops = { #ifdef CONFIG_PROC_FS .show_fdinfo = bpf_link_show_fdinfo, @@ -3160,6 +3167,16 @@ static const struct file_operations bpf_link_fops = { .write = bpf_dummy_write, }; +static const struct file_operations bpf_link_fops_poll = { +#ifdef CONFIG_PROC_FS + .show_fdinfo = bpf_link_show_fdinfo, +#endif + .release = bpf_link_release, + .read = bpf_dummy_read, + .write = bpf_dummy_write, + .poll = bpf_link_poll, +}; + static int bpf_link_alloc_id(struct bpf_link *link) { int id; @@ -3202,7 +3219,9 @@ int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer) return id; } - file = anon_inode_getfile("bpf_link", &bpf_link_fops, link, O_CLOEXEC); + file = anon_inode_getfile("bpf_link", + link->ops->poll ? &bpf_link_fops_poll : &bpf_link_fops, + link, O_CLOEXEC); if (IS_ERR(file)) { bpf_link_free_id(id); put_unused_fd(fd); @@ -3230,7 +3249,9 @@ int bpf_link_settle(struct bpf_link_primer *primer) int bpf_link_new_fd(struct bpf_link *link) { - return anon_inode_getfd("bpf-link", &bpf_link_fops, link, O_CLOEXEC); + return anon_inode_getfd("bpf-link", + link->ops->poll ? &bpf_link_fops_poll : &bpf_link_fops, + link, O_CLOEXEC); } struct bpf_link *bpf_link_get_from_fd(u32 ufd) @@ -3240,7 +3261,7 @@ struct bpf_link *bpf_link_get_from_fd(u32 ufd) if (!f.file) return ERR_PTR(-EBADF); - if (f.file->f_op != &bpf_link_fops) { + if (f.file->f_op != &bpf_link_fops && f.file->f_op != &bpf_link_fops_poll) { fdput(f); return ERR_PTR(-EINVAL); } @@ -4972,7 +4993,7 @@ static int bpf_obj_get_info_by_fd(const union bpf_attr *attr, uattr); else if (f.file->f_op == &btf_fops) err = bpf_btf_get_info_by_fd(f.file, f.file->private_data, attr, uattr); - else if (f.file->f_op == &bpf_link_fops) + else if (f.file->f_op == &bpf_link_fops || f.file->f_op == &bpf_link_fops_poll) err = bpf_link_get_info_by_fd(f.file, f.file->private_data, attr, uattr); else @@ -5107,7 +5128,7 @@ static int bpf_task_fd_query(const union bpf_attr *attr, if (!file) return -EBADF; - if (file->f_op == &bpf_link_fops) { + if (file->f_op == &bpf_link_fops || file->f_op == &bpf_link_fops_poll) { struct bpf_link *link = file->private_data; if (link->ops == &bpf_raw_tp_link_lops) { @@ -5417,10 +5438,11 @@ static int link_detach(union bpf_attr *attr) return ret; } -static struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link) +struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link) { return atomic64_fetch_add_unless(&link->refcnt, 1, 0) ? link : ERR_PTR(-ENOENT); } +EXPORT_SYMBOL(bpf_link_inc_not_zero); struct bpf_link *bpf_link_by_id(u32 id) { diff --git a/kernel/bpf/task_iter.c b/kernel/bpf/task_iter.c index ec4e97c61eef..02aa9db8d796 100644 --- a/kernel/bpf/task_iter.c +++ b/kernel/bpf/task_iter.c @@ -261,6 +261,7 @@ task_file_seq_get_next(struct bpf_iter_seq_task_file_info *info) u32 saved_tid = info->tid; struct task_struct *curr_task; unsigned int curr_fd = info->fd; + struct file *f; /* If this function returns a non-NULL file object, * it held a reference to the task/file. @@ -286,12 +287,8 @@ again: } rcu_read_lock(); - for (;; curr_fd++) { - struct file *f; - f = task_lookup_next_fdget_rcu(curr_task, &curr_fd); - if (!f) - break; - + f = task_lookup_next_fdget_rcu(curr_task, &curr_fd); + if (f) { /* set info->fd */ info->fd = curr_fd; rcu_read_unlock(); diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 214a9fa8c6fb..8da132a1ef28 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2982,8 +2982,10 @@ static int check_subprogs(struct bpf_verifier_env *env) if (code == (BPF_JMP | BPF_CALL) && insn[i].src_reg == 0 && - insn[i].imm == BPF_FUNC_tail_call) + insn[i].imm == BPF_FUNC_tail_call) { subprog[cur_subprog].has_tail_call = true; + subprog[cur_subprog].tail_call_reachable = true; + } if (BPF_CLASS(code) == BPF_LD && (BPF_MODE(code) == BPF_ABS || BPF_MODE(code) == BPF_IND)) subprog[cur_subprog].has_ld_abs = true; @@ -3215,7 +3217,8 @@ static int insn_def_regno(const struct bpf_insn *insn) case BPF_ST: return -1; case BPF_STX: - if (BPF_MODE(insn->code) == BPF_ATOMIC && + if ((BPF_MODE(insn->code) == BPF_ATOMIC || + BPF_MODE(insn->code) == BPF_PROBE_ATOMIC) && (insn->imm & BPF_FETCH)) { if (insn->imm == BPF_CMPXCHG) return BPF_REG_0; @@ -3991,7 +3994,7 @@ static bool idset_contains(struct bpf_idset *s, u32 id) u32 i; for (i = 0; i < s->count; ++i) - if (s->ids[i] == id) + if (s->ids[i] == (id & ~BPF_ADD_CONST)) return true; return false; @@ -4001,7 +4004,7 @@ static int idset_push(struct bpf_idset *s, u32 id) { if (WARN_ON_ONCE(s->count >= ARRAY_SIZE(s->ids))) return -EFAULT; - s->ids[s->count++] = id; + s->ids[s->count++] = id & ~BPF_ADD_CONST; return 0; } @@ -4438,8 +4441,20 @@ static bool __is_pointer_value(bool allow_ptr_leaks, static void assign_scalar_id_before_mov(struct bpf_verifier_env *env, struct bpf_reg_state *src_reg) { - if (src_reg->type == SCALAR_VALUE && !src_reg->id && - !tnum_is_const(src_reg->var_off)) + if (src_reg->type != SCALAR_VALUE) + return; + + if (src_reg->id & BPF_ADD_CONST) { + /* + * The verifier is processing rX = rY insn and + * rY->id has special linked register already. + * Cleared it, since multiple rX += const are not supported. + */ + src_reg->id = 0; + src_reg->off = 0; + } + + if (!src_reg->id && !tnum_is_const(src_reg->var_off)) /* Ensure that src_reg has a valid ID that will be copied to * dst_reg and then will be used by find_equal_scalars() to * propagate min/max range. @@ -5449,7 +5464,7 @@ static int check_map_access(struct bpf_verifier_env *env, u32 regno, * this program. To check that [x1, x2) overlaps with [y1, y2), * it is sufficient to check x1 < y2 && y1 < x2. */ - if (reg->smin_value + off < p + btf_field_type_size(field->type) && + if (reg->smin_value + off < p + field->size && p < reg->umax_value + off + size) { switch (field->type) { case BPF_KPTR_UNREF: @@ -7715,6 +7730,13 @@ static int process_dynptr_func(struct bpf_verifier_env *env, int regno, int insn struct bpf_reg_state *regs = cur_regs(env), *reg = ®s[regno]; int err; + if (reg->type != PTR_TO_STACK && reg->type != CONST_PTR_TO_DYNPTR) { + verbose(env, + "arg#%d expected pointer to stack or const struct bpf_dynptr\n", + regno); + return -EINVAL; + } + /* MEM_UNINIT and MEM_RDONLY are exclusive, when applied to an * ARG_PTR_TO_DYNPTR (or ARG_PTR_TO_DYNPTR | DYNPTR_TYPE_*): */ @@ -9464,6 +9486,10 @@ static int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog, return -EINVAL; } } else if (arg->arg_type == (ARG_PTR_TO_DYNPTR | MEM_RDONLY)) { + ret = check_func_arg_reg_off(env, reg, regno, ARG_PTR_TO_DYNPTR); + if (ret) + return ret; + ret = process_dynptr_func(env, regno, -1, arg->arg_type, 0); if (ret) return ret; @@ -10917,7 +10943,7 @@ enum { }; BTF_ID_LIST(kf_arg_btf_ids) -BTF_ID(struct, bpf_dynptr_kern) +BTF_ID(struct, bpf_dynptr) BTF_ID(struct, bpf_list_head) BTF_ID(struct, bpf_list_node) BTF_ID(struct, bpf_rb_root) @@ -11190,6 +11216,9 @@ get_kfunc_ptr_arg_type(struct bpf_verifier_env *env, if (btf_is_prog_ctx_type(&env->log, meta->btf, t, resolve_prog_type(env->prog), argno)) return KF_ARG_PTR_TO_CTX; + if (is_kfunc_arg_nullable(meta->btf, &args[argno]) && register_is_null(reg)) + return KF_ARG_PTR_TO_NULL; + if (is_kfunc_arg_alloc_obj(meta->btf, &args[argno])) return KF_ARG_PTR_TO_ALLOC_BTF_ID; @@ -11235,9 +11264,6 @@ get_kfunc_ptr_arg_type(struct bpf_verifier_env *env, if (is_kfunc_arg_callback(env, meta->btf, &args[argno])) return KF_ARG_PTR_TO_CALLBACK; - if (is_kfunc_arg_nullable(meta->btf, &args[argno]) && register_is_null(reg)) - return KF_ARG_PTR_TO_NULL; - if (argno + 1 < nargs && (is_kfunc_arg_mem_size(meta->btf, &args[argno + 1], ®s[regno + 1]) || is_kfunc_arg_const_mem_size(meta->btf, &args[argno + 1], ®s[regno + 1]))) @@ -11268,6 +11294,8 @@ static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env, bool strict_type_match = false; const struct btf *reg_btf; const char *reg_ref_tname; + bool taking_projection; + bool struct_same; u32 reg_ref_id; if (base_type(reg->type) == PTR_TO_BTF_ID) { @@ -11307,11 +11335,19 @@ static int process_kf_arg_ptr_to_btf_id(struct bpf_verifier_env *env, btf_type_ids_nocast_alias(&env->log, reg_btf, reg_ref_id, meta->btf, ref_id)) strict_type_match = true; - WARN_ON_ONCE(is_kfunc_trusted_args(meta) && reg->off); + WARN_ON_ONCE(is_kfunc_release(meta) && + (reg->off || !tnum_is_const(reg->var_off) || + reg->var_off.value)); reg_ref_t = btf_type_skip_modifiers(reg_btf, reg_ref_id, ®_ref_id); reg_ref_tname = btf_name_by_offset(reg_btf, reg_ref_t->name_off); - if (!btf_struct_ids_match(&env->log, reg_btf, reg_ref_id, reg->off, meta->btf, ref_id, strict_type_match)) { + struct_same = btf_struct_ids_match(&env->log, reg_btf, reg_ref_id, reg->off, meta->btf, ref_id, strict_type_match); + /* If kfunc is accepting a projection type (ie. __sk_buff), it cannot + * actually use it -- it must cast to the underlying type. So we allow + * caller to pass in the underlying type. + */ + taking_projection = btf_is_projection_of(ref_tname, reg_ref_tname); + if (!taking_projection && !struct_same) { verbose(env, "kernel function %s args#%d expected pointer to %s %s but R%d has a pointer to %s %s\n", meta->func_name, argno, btf_type_str(ref_t), ref_tname, argno + 1, btf_type_str(reg_ref_t), reg_ref_tname); @@ -11651,7 +11687,7 @@ __process_kf_arg_ptr_to_graph_node(struct bpf_verifier_env *env, node_off = reg->off + reg->var_off.value; field = reg_find_field_offset(reg, node_off, node_field_type); - if (!field || field->offset != node_off) { + if (!field) { verbose(env, "%s not found at offset=%u\n", node_type_name, node_off); return -EINVAL; } @@ -11883,12 +11919,8 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ return -EINVAL; } } - fallthrough; case KF_ARG_PTR_TO_CTX: - /* Trusted arguments have the same offset checks as release arguments */ - arg_type |= OBJ_RELEASE; - break; case KF_ARG_PTR_TO_DYNPTR: case KF_ARG_PTR_TO_ITER: case KF_ARG_PTR_TO_LIST_HEAD: @@ -11901,7 +11933,6 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ case KF_ARG_PTR_TO_REFCOUNTED_KPTR: case KF_ARG_PTR_TO_CONST_STR: case KF_ARG_PTR_TO_WORKQUEUE: - /* Trusted by default */ break; default: WARN_ON_ONCE(1); @@ -11957,12 +11988,6 @@ static int check_kfunc_args(struct bpf_verifier_env *env, struct bpf_kfunc_call_ enum bpf_arg_type dynptr_arg_type = ARG_PTR_TO_DYNPTR; int clone_ref_obj_id = 0; - if (reg->type != PTR_TO_STACK && - reg->type != CONST_PTR_TO_DYNPTR) { - verbose(env, "arg#%d expected pointer to stack or dynptr_ptr\n", i); - return -EINVAL; - } - if (reg->type == CONST_PTR_TO_DYNPTR) dynptr_arg_type |= MEM_RDONLY; @@ -12701,56 +12726,6 @@ static int check_kfunc_call(struct bpf_verifier_env *env, struct bpf_insn *insn, return 0; } -static bool signed_add_overflows(s64 a, s64 b) -{ - /* Do the add in u64, where overflow is well-defined */ - s64 res = (s64)((u64)a + (u64)b); - - if (b < 0) - return res > a; - return res < a; -} - -static bool signed_add32_overflows(s32 a, s32 b) -{ - /* Do the add in u32, where overflow is well-defined */ - s32 res = (s32)((u32)a + (u32)b); - - if (b < 0) - return res > a; - return res < a; -} - -static bool signed_add16_overflows(s16 a, s16 b) -{ - /* Do the add in u16, where overflow is well-defined */ - s16 res = (s16)((u16)a + (u16)b); - - if (b < 0) - return res > a; - return res < a; -} - -static bool signed_sub_overflows(s64 a, s64 b) -{ - /* Do the sub in u64, where overflow is well-defined */ - s64 res = (s64)((u64)a - (u64)b); - - if (b < 0) - return res < a; - return res > a; -} - -static bool signed_sub32_overflows(s32 a, s32 b) -{ - /* Do the sub in u32, where overflow is well-defined */ - s32 res = (s32)((u32)a - (u32)b); - - if (b < 0) - return res < a; - return res > a; -} - static bool check_reg_sane_offset(struct bpf_verifier_env *env, const struct bpf_reg_state *reg, enum bpf_reg_type type) @@ -13232,21 +13207,15 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, * added into the variable offset, and we copy the fixed offset * from ptr_reg. */ - if (signed_add_overflows(smin_ptr, smin_val) || - signed_add_overflows(smax_ptr, smax_val)) { + if (check_add_overflow(smin_ptr, smin_val, &dst_reg->smin_value) || + check_add_overflow(smax_ptr, smax_val, &dst_reg->smax_value)) { dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; - } else { - dst_reg->smin_value = smin_ptr + smin_val; - dst_reg->smax_value = smax_ptr + smax_val; } - if (umin_ptr + umin_val < umin_ptr || - umax_ptr + umax_val < umax_ptr) { + if (check_add_overflow(umin_ptr, umin_val, &dst_reg->umin_value) || + check_add_overflow(umax_ptr, umax_val, &dst_reg->umax_value)) { dst_reg->umin_value = 0; dst_reg->umax_value = U64_MAX; - } else { - dst_reg->umin_value = umin_ptr + umin_val; - dst_reg->umax_value = umax_ptr + umax_val; } dst_reg->var_off = tnum_add(ptr_reg->var_off, off_reg->var_off); dst_reg->off = ptr_reg->off; @@ -13289,14 +13258,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, /* A new variable offset is created. If the subtrahend is known * nonnegative, then any reg->range we had before is still good. */ - if (signed_sub_overflows(smin_ptr, smax_val) || - signed_sub_overflows(smax_ptr, smin_val)) { + if (check_sub_overflow(smin_ptr, smax_val, &dst_reg->smin_value) || + check_sub_overflow(smax_ptr, smin_val, &dst_reg->smax_value)) { /* Overflow possible, we know nothing */ dst_reg->smin_value = S64_MIN; dst_reg->smax_value = S64_MAX; - } else { - dst_reg->smin_value = smin_ptr - smax_val; - dst_reg->smax_value = smax_ptr - smin_val; } if (umin_ptr < umax_val) { /* Overflow possible, we know nothing */ @@ -13349,71 +13315,56 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env, static void scalar32_min_max_add(struct bpf_reg_state *dst_reg, struct bpf_reg_state *src_reg) { - s32 smin_val = src_reg->s32_min_value; - s32 smax_val = src_reg->s32_max_value; - u32 umin_val = src_reg->u32_min_value; - u32 umax_val = src_reg->u32_max_value; + s32 *dst_smin = &dst_reg->s32_min_value; + s32 *dst_smax = &dst_reg->s32_max_value; + u32 *dst_umin = &dst_reg->u32_min_value; + u32 *dst_umax = &dst_reg->u32_max_value; - if (signed_add32_overflows(dst_reg->s32_min_value, smin_val) || - signed_add32_overflows(dst_reg->s32_max_value, smax_val)) { - dst_reg->s32_min_value = S32_MIN; - dst_reg->s32_max_value = S32_MAX; - } else { - dst_reg->s32_min_value += smin_val; - dst_reg->s32_max_value += smax_val; + if (check_add_overflow(*dst_smin, src_reg->s32_min_value, dst_smin) || + check_add_overflow(*dst_smax, src_reg->s32_max_value, dst_smax)) { + *dst_smin = S32_MIN; + *dst_smax = S32_MAX; } - if (dst_reg->u32_min_value + umin_val < umin_val || - dst_reg->u32_max_value + umax_val < umax_val) { - dst_reg->u32_min_value = 0; - dst_reg->u32_max_value = U32_MAX; - } else { - dst_reg->u32_min_value += umin_val; - dst_reg->u32_max_value += umax_val; + if (check_add_overflow(*dst_umin, src_reg->u32_min_value, dst_umin) || + check_add_overflow(*dst_umax, src_reg->u32_max_value, dst_umax)) { + *dst_umin = 0; + *dst_umax = U32_MAX; } } static void scalar_min_max_add(struct bpf_reg_state *dst_reg, struct bpf_reg_state *src_reg) { - s64 smin_val = src_reg->smin_value; - s64 smax_val = src_reg->smax_value; - u64 umin_val = src_reg->umin_value; - u64 umax_val = src_reg->umax_value; + s64 *dst_smin = &dst_reg->smin_value; + s64 *dst_smax = &dst_reg->smax_value; + u64 *dst_umin = &dst_reg->umin_value; + u64 *dst_umax = &dst_reg->umax_value; - if (signed_add_overflows(dst_reg->smin_value, smin_val) || - signed_add_overflows(dst_reg->smax_value, smax_val)) { - dst_reg->smin_value = S64_MIN; - dst_reg->smax_value = S64_MAX; - } else { - dst_reg->smin_value += smin_val; - dst_reg->smax_value += smax_val; + if (check_add_overflow(*dst_smin, src_reg->smin_value, dst_smin) || + check_add_overflow(*dst_smax, src_reg->smax_value, dst_smax)) { + *dst_smin = S64_MIN; + *dst_smax = S64_MAX; } - if (dst_reg->umin_value + umin_val < umin_val || - dst_reg->umax_value + umax_val < umax_val) { - dst_reg->umin_value = 0; - dst_reg->umax_value = U64_MAX; - } else { - dst_reg->umin_value += umin_val; - dst_reg->umax_value += umax_val; + if (check_add_overflow(*dst_umin, src_reg->umin_value, dst_umin) || + check_add_overflow(*dst_umax, src_reg->umax_value, dst_umax)) { + *dst_umin = 0; + *dst_umax = U64_MAX; } } static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg, struct bpf_reg_state *src_reg) { - s32 smin_val = src_reg->s32_min_value; - s32 smax_val = src_reg->s32_max_value; + s32 *dst_smin = &dst_reg->s32_min_value; + s32 *dst_smax = &dst_reg->s32_max_value; u32 umin_val = src_reg->u32_min_value; u32 umax_val = src_reg->u32_max_value; - if (signed_sub32_overflows(dst_reg->s32_min_value, smax_val) || - signed_sub32_overflows(dst_reg->s32_max_value, smin_val)) { + if (check_sub_overflow(*dst_smin, src_reg->s32_max_value, dst_smin) || + check_sub_overflow(*dst_smax, src_reg->s32_min_value, dst_smax)) { /* Overflow possible, we know nothing */ - dst_reg->s32_min_value = S32_MIN; - dst_reg->s32_max_value = S32_MAX; - } else { - dst_reg->s32_min_value -= smax_val; - dst_reg->s32_max_value -= smin_val; + *dst_smin = S32_MIN; + *dst_smax = S32_MAX; } if (dst_reg->u32_min_value < umax_val) { /* Overflow possible, we know nothing */ @@ -13429,19 +13380,16 @@ static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg, static void scalar_min_max_sub(struct bpf_reg_state *dst_reg, struct bpf_reg_state *src_reg) { - s64 smin_val = src_reg->smin_value; - s64 smax_val = src_reg->smax_value; + s64 *dst_smin = &dst_reg->smin_value; + s64 *dst_smax = &dst_reg->smax_value; u64 umin_val = src_reg->umin_value; u64 umax_val = src_reg->umax_value; - if (signed_sub_overflows(dst_reg->smin_value, smax_val) || - signed_sub_overflows(dst_reg->smax_value, smin_val)) { + if (check_sub_overflow(*dst_smin, src_reg->smax_value, dst_smin) || + check_sub_overflow(*dst_smax, src_reg->smin_value, dst_smax)) { /* Overflow possible, we know nothing */ - dst_reg->smin_value = S64_MIN; - dst_reg->smax_value = S64_MAX; - } else { - dst_reg->smin_value -= smax_val; - dst_reg->smax_value -= smin_val; + *dst_smin = S64_MIN; + *dst_smax = S64_MAX; } if (dst_reg->umin_value < umax_val) { /* Overflow possible, we know nothing */ @@ -14047,6 +13995,7 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, struct bpf_func_state *state = vstate->frame[vstate->curframe]; struct bpf_reg_state *regs = state->regs, *dst_reg, *src_reg; struct bpf_reg_state *ptr_reg = NULL, off_reg = {0}; + bool alu32 = (BPF_CLASS(insn->code) != BPF_ALU64); u8 opcode = BPF_OP(insn->code); int err; @@ -14069,11 +14018,7 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, if (dst_reg->type != SCALAR_VALUE) ptr_reg = dst_reg; - else - /* Make sure ID is cleared otherwise dst_reg min/max could be - * incorrectly propagated into other registers by find_equal_scalars() - */ - dst_reg->id = 0; + if (BPF_SRC(insn->code) == BPF_X) { src_reg = ®s[insn->src_reg]; if (src_reg->type != SCALAR_VALUE) { @@ -14137,7 +14082,43 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env, verbose(env, "verifier internal error: no src_reg\n"); return -EINVAL; } - return adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg); + err = adjust_scalar_min_max_vals(env, insn, dst_reg, *src_reg); + if (err) + return err; + /* + * Compilers can generate the code + * r1 = r2 + * r1 += 0x1 + * if r2 < 1000 goto ... + * use r1 in memory access + * So remember constant delta between r2 and r1 and update r1 after + * 'if' condition. + */ + if (env->bpf_capable && BPF_OP(insn->code) == BPF_ADD && + dst_reg->id && is_reg_const(src_reg, alu32)) { + u64 val = reg_const_value(src_reg, alu32); + + if ((dst_reg->id & BPF_ADD_CONST) || + /* prevent overflow in find_equal_scalars() later */ + val > (u32)S32_MAX) { + /* + * If the register already went through rX += val + * we cannot accumulate another val into rx->off. + */ + dst_reg->off = 0; + dst_reg->id = 0; + } else { + dst_reg->id |= BPF_ADD_CONST; + dst_reg->off = val; + } + } else { + /* + * Make sure ID is cleared otherwise dst_reg min/max could be + * incorrectly propagated into other registers by find_equal_scalars() + */ + dst_reg->id = 0; + } + return 0; } /* check validity of 32-bit and 64-bit arithmetic operations */ @@ -15109,12 +15090,36 @@ static bool try_match_pkt_pointers(const struct bpf_insn *insn, static void find_equal_scalars(struct bpf_verifier_state *vstate, struct bpf_reg_state *known_reg) { + struct bpf_reg_state fake_reg; struct bpf_func_state *state; struct bpf_reg_state *reg; bpf_for_each_reg_in_vstate(vstate, state, reg, ({ - if (reg->type == SCALAR_VALUE && reg->id == known_reg->id) + if (reg->type != SCALAR_VALUE || reg == known_reg) + continue; + if ((reg->id & ~BPF_ADD_CONST) != (known_reg->id & ~BPF_ADD_CONST)) + continue; + if ((!(reg->id & BPF_ADD_CONST) && !(known_reg->id & BPF_ADD_CONST)) || + reg->off == known_reg->off) { copy_register_state(reg, known_reg); + } else { + s32 saved_off = reg->off; + + fake_reg.type = SCALAR_VALUE; + __mark_reg_known(&fake_reg, (s32)reg->off - (s32)known_reg->off); + + /* reg = known_reg; reg += delta */ + copy_register_state(reg, known_reg); + /* + * Must preserve off, id and add_const flag, + * otherwise another find_equal_scalars() will be incorrect. + */ + reg->off = saved_off; + + scalar32_min_max_add(reg, &fake_reg); + scalar_min_max_add(reg, &fake_reg); + reg->var_off = tnum_add(reg->var_off, fake_reg.var_off); + } })); } @@ -16749,6 +16754,10 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold, } if (!rold->precise && exact == NOT_EXACT) return true; + if ((rold->id & BPF_ADD_CONST) != (rcur->id & BPF_ADD_CONST)) + return false; + if ((rold->id & BPF_ADD_CONST) && (rold->off != rcur->off)) + return false; /* Why check_ids() for scalar registers? * * Consider the following BPF code: @@ -18630,8 +18639,7 @@ static void release_maps(struct bpf_verifier_env *env) /* drop refcnt of maps used by the rejected program */ static void release_btfs(struct bpf_verifier_env *env) { - __bpf_free_used_btfs(env->prog->aux, env->used_btfs, - env->used_btf_cnt); + __bpf_free_used_btfs(env->used_btfs, env->used_btf_cnt); } /* convert pseudo BPF_LD_IMM64 into generic BPF_LD_IMM64 */ @@ -18750,6 +18758,8 @@ static int adjust_jmp_off(struct bpf_prog *prog, u32 tgt_idx, u32 delta) { struct bpf_insn *insn = prog->insnsi; u32 insn_cnt = prog->len, i; + s32 imm; + s16 off; for (i = 0; i < insn_cnt; i++, insn++) { u8 code = insn->code; @@ -18761,15 +18771,15 @@ static int adjust_jmp_off(struct bpf_prog *prog, u32 tgt_idx, u32 delta) if (insn->code == (BPF_JMP32 | BPF_JA)) { if (i + 1 + insn->imm != tgt_idx) continue; - if (signed_add32_overflows(insn->imm, delta)) + if (check_add_overflow(insn->imm, delta, &imm)) return -ERANGE; - insn->imm += delta; + insn->imm = imm; } else { if (i + 1 + insn->off != tgt_idx) continue; - if (signed_add16_overflows(insn->imm, delta)) + if (check_add_overflow(insn->off, delta, &off)) return -ERANGE; - insn->off += delta; + insn->off = off; } } return 0; diff --git a/kernel/locking/spinlock.c b/kernel/locking/spinlock.c index 8475a0794f8c..438c6086d540 100644 --- a/kernel/locking/spinlock.c +++ b/kernel/locking/spinlock.c @@ -413,3 +413,11 @@ notrace int in_lock_functions(unsigned long addr) && addr < (unsigned long)__lock_text_end; } EXPORT_SYMBOL(in_lock_functions); + +#if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_PREEMPT_RT) +void notrace lockdep_assert_in_softirq_func(void) +{ + lockdep_assert_in_softirq(); +} +EXPORT_SYMBOL(lockdep_assert_in_softirq_func); +#endif diff --git a/kernel/module/main.c b/kernel/module/main.c index d18a94b973e1..d9592195c5bb 100644 --- a/kernel/module/main.c +++ b/kernel/module/main.c @@ -2166,6 +2166,8 @@ static int find_module_sections(struct module *mod, struct load_info *info) #endif #ifdef CONFIG_DEBUG_INFO_BTF_MODULES mod->btf_data = any_section_objs(info, ".BTF", 1, &mod->btf_data_size); + mod->btf_base_data = any_section_objs(info, ".BTF.base", 1, + &mod->btf_base_data_size); #endif #ifdef CONFIG_JUMP_LABEL mod->jump_entries = section_objs(info, "__jump_table", @@ -2590,8 +2592,9 @@ static noinline int do_init_module(struct module *mod) } #ifdef CONFIG_DEBUG_INFO_BTF_MODULES - /* .BTF is not SHF_ALLOC and will get removed, so sanitize pointer */ + /* .BTF is not SHF_ALLOC and will get removed, so sanitize pointers */ mod->btf_data = NULL; + mod->btf_base_data = NULL; #endif /* * We want to free module_init, but be aware that kallsyms may be diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index d1daeab1bbc1..cd098846e251 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1369,8 +1369,8 @@ __bpf_kfunc void bpf_key_put(struct bpf_key *bkey) #ifdef CONFIG_SYSTEM_DATA_VERIFICATION /** * bpf_verify_pkcs7_signature - verify a PKCS#7 signature - * @data_ptr: data to verify - * @sig_ptr: signature of the data + * @data_p: data to verify + * @sig_p: signature of the data * @trusted_keyring: keyring with keys trusted for signature verification * * Verify the PKCS#7 signature *sig_ptr* against the supplied *data_ptr* @@ -1378,10 +1378,12 @@ __bpf_kfunc void bpf_key_put(struct bpf_key *bkey) * * Return: 0 on success, a negative value on error. */ -__bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr_kern *data_ptr, - struct bpf_dynptr_kern *sig_ptr, +__bpf_kfunc int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_p, + struct bpf_dynptr *sig_p, struct bpf_key *trusted_keyring) { + struct bpf_dynptr_kern *data_ptr = (struct bpf_dynptr_kern *)data_p; + struct bpf_dynptr_kern *sig_ptr = (struct bpf_dynptr_kern *)sig_p; const void *data, *sig; u32 data_len, sig_len; int ret; @@ -1444,7 +1446,7 @@ __bpf_kfunc_start_defs(); * bpf_get_file_xattr - get xattr of a file * @file: file to get xattr from * @name__str: name of the xattr - * @value_ptr: output buffer of the xattr value + * @value_p: output buffer of the xattr value * * Get xattr *name__str* of *file* and store the output in *value_ptr*. * @@ -1453,8 +1455,9 @@ __bpf_kfunc_start_defs(); * Return: 0 on success, a negative value on error. */ __bpf_kfunc int bpf_get_file_xattr(struct file *file, const char *name__str, - struct bpf_dynptr_kern *value_ptr) + struct bpf_dynptr *value_p) { + struct bpf_dynptr_kern *value_ptr = (struct bpf_dynptr_kern *)value_p; struct dentry *dentry; u32 value_len; void *value; diff --git a/lib/Kconfig b/lib/Kconfig index b0a76dff5c18..b38849af6f13 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -623,6 +623,7 @@ config SIGNATURE config DIMLIB tristate + depends on NET help Dynamic Interrupt Moderation library. Implements an algorithm for dynamically changing CQ moderation values diff --git a/lib/dim/net_dim.c b/lib/dim/net_dim.c index 4e32f7aaac86..d7e7028e9b19 100644 --- a/lib/dim/net_dim.c +++ b/lib/dim/net_dim.c @@ -4,6 +4,7 @@ */ #include +#include /* * Net DIM profiles: @@ -11,12 +12,6 @@ * There are different set of profiles for RX/TX CQs. * Each profile size must be of NET_DIM_PARAMS_NUM_PROFILES */ -#define NET_DIM_PARAMS_NUM_PROFILES 5 -#define NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE 256 -#define NET_DIM_DEFAULT_TX_CQ_PKTS_FROM_EQE 128 -#define NET_DIM_DEF_PROFILE_CQE 1 -#define NET_DIM_DEF_PROFILE_EQE 1 - #define NET_DIM_RX_EQE_PROFILES { \ {.usec = 1, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \ {.usec = 8, .pkts = NET_DIM_DEFAULT_RX_CQ_PKTS_FROM_EQE,}, \ @@ -101,6 +96,143 @@ net_dim_get_def_tx_moderation(u8 cq_period_mode) } EXPORT_SYMBOL(net_dim_get_def_tx_moderation); +int net_dim_init_irq_moder(struct net_device *dev, u8 profile_flags, + u8 coal_flags, u8 rx_mode, u8 tx_mode, + void (*rx_dim_work)(struct work_struct *work), + void (*tx_dim_work)(struct work_struct *work)) +{ + struct dim_cq_moder *rxp = NULL, *txp; + struct dim_irq_moder *moder; + int len; + + dev->irq_moder = kzalloc(sizeof(*dev->irq_moder), GFP_KERNEL); + if (!dev->irq_moder) + return -ENOMEM; + + moder = dev->irq_moder; + len = NET_DIM_PARAMS_NUM_PROFILES * sizeof(*moder->rx_profile); + + moder->coal_flags = coal_flags; + moder->profile_flags = profile_flags; + + if (profile_flags & DIM_PROFILE_RX) { + moder->rx_dim_work = rx_dim_work; + moder->dim_rx_mode = rx_mode; + rxp = kmemdup(rx_profile[rx_mode], len, GFP_KERNEL); + if (!rxp) + goto free_moder; + + rcu_assign_pointer(moder->rx_profile, rxp); + } + + if (profile_flags & DIM_PROFILE_TX) { + moder->tx_dim_work = tx_dim_work; + moder->dim_tx_mode = tx_mode; + txp = kmemdup(tx_profile[tx_mode], len, GFP_KERNEL); + if (!txp) + goto free_rxp; + + rcu_assign_pointer(moder->tx_profile, txp); + } + + return 0; + +free_rxp: + kfree(rxp); +free_moder: + kfree(moder); + return -ENOMEM; +} +EXPORT_SYMBOL(net_dim_init_irq_moder); + +/* RTNL lock is held. */ +void net_dim_free_irq_moder(struct net_device *dev) +{ + struct dim_cq_moder *rxp, *txp; + + if (!dev->irq_moder) + return; + + rxp = rtnl_dereference(dev->irq_moder->rx_profile); + txp = rtnl_dereference(dev->irq_moder->tx_profile); + + rcu_assign_pointer(dev->irq_moder->rx_profile, NULL); + rcu_assign_pointer(dev->irq_moder->tx_profile, NULL); + + kfree_rcu(rxp, rcu); + kfree_rcu(txp, rcu); + kfree(dev->irq_moder); +} +EXPORT_SYMBOL(net_dim_free_irq_moder); + +void net_dim_setting(struct net_device *dev, struct dim *dim, bool is_tx) +{ + struct dim_irq_moder *irq_moder = dev->irq_moder; + + if (!irq_moder) + return; + + if (is_tx) { + INIT_WORK(&dim->work, irq_moder->tx_dim_work); + dim->mode = READ_ONCE(irq_moder->dim_tx_mode); + return; + } + + INIT_WORK(&dim->work, irq_moder->rx_dim_work); + dim->mode = READ_ONCE(irq_moder->dim_rx_mode); +} +EXPORT_SYMBOL(net_dim_setting); + +void net_dim_work_cancel(struct dim *dim) +{ + cancel_work_sync(&dim->work); +} +EXPORT_SYMBOL(net_dim_work_cancel); + +struct dim_cq_moder net_dim_get_rx_irq_moder(struct net_device *dev, + struct dim *dim) +{ + struct dim_cq_moder res, *profile; + + rcu_read_lock(); + profile = rcu_dereference(dev->irq_moder->rx_profile); + res = profile[dim->profile_ix]; + rcu_read_unlock(); + + res.cq_period_mode = dim->mode; + + return res; +} +EXPORT_SYMBOL(net_dim_get_rx_irq_moder); + +struct dim_cq_moder net_dim_get_tx_irq_moder(struct net_device *dev, + struct dim *dim) +{ + struct dim_cq_moder res, *profile; + + rcu_read_lock(); + profile = rcu_dereference(dev->irq_moder->tx_profile); + res = profile[dim->profile_ix]; + rcu_read_unlock(); + + res.cq_period_mode = dim->mode; + + return res; +} +EXPORT_SYMBOL(net_dim_get_tx_irq_moder); + +void net_dim_set_rx_mode(struct net_device *dev, u8 rx_mode) +{ + WRITE_ONCE(dev->irq_moder->dim_rx_mode, rx_mode); +} +EXPORT_SYMBOL(net_dim_set_rx_mode); + +void net_dim_set_tx_mode(struct net_device *dev, u8 tx_mode) +{ + WRITE_ONCE(dev->irq_moder->dim_tx_mode, tx_mode); +} +EXPORT_SYMBOL(net_dim_set_tx_mode); + static int net_dim_step(struct dim *dim) { if (dim->tired == (NET_DIM_PARAMS_NUM_PROFILES * 2)) diff --git a/lib/objagg.c b/lib/objagg.c index 1e248629ed64..363e43e849ac 100644 --- a/lib/objagg.c +++ b/lib/objagg.c @@ -167,6 +167,9 @@ static int objagg_obj_parent_assign(struct objagg *objagg, { void *delta_priv; + if (WARN_ON(!objagg_obj_is_root(parent))) + return -EINVAL; + delta_priv = objagg->ops->delta_create(objagg->priv, parent->obj, objagg_obj->obj); if (IS_ERR(delta_priv)) @@ -421,7 +424,7 @@ static struct objagg_obj *__objagg_obj_get(struct objagg *objagg, void *obj) * * There are 3 main options this function wraps: * 1) The object according to "obj" already exist. In that case - * the reference counter is incrementes and the object is returned. + * the reference counter is incremented and the object is returned. * 2) The object does not exist, but it can be aggregated within * another object. In that case, user ops->delta_create() is called * to obtain delta data and a new object is created with returned @@ -903,20 +906,6 @@ static const struct objagg_opt_algo *objagg_opt_algos[] = { [OBJAGG_OPT_ALGO_SIMPLE_GREEDY] = &objagg_opt_simple_greedy, }; -static int objagg_hints_obj_cmp(struct rhashtable_compare_arg *arg, - const void *obj) -{ - struct rhashtable *ht = arg->ht; - struct objagg_hints *objagg_hints = - container_of(ht, struct objagg_hints, node_ht); - const struct objagg_ops *ops = objagg_hints->ops; - const char *ptr = obj; - - ptr += ht->p.key_offset; - return ops->hints_obj_cmp ? ops->hints_obj_cmp(ptr, arg->key) : - memcmp(ptr, arg->key, ht->p.key_len); -} - /** * objagg_hints_get - obtains hints instance * @objagg: objagg instance @@ -955,7 +944,6 @@ struct objagg_hints *objagg_hints_get(struct objagg *objagg, offsetof(struct objagg_hints_node, obj); objagg_hints->ht_params.head_offset = offsetof(struct objagg_hints_node, ht_node); - objagg_hints->ht_params.obj_cmpfn = objagg_hints_obj_cmp; err = rhashtable_init(&objagg_hints->node_ht, &objagg_hints->ht_params); if (err) diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 207ff87194db..b7acc29bcc3b 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -15198,6 +15198,7 @@ struct tail_call_test { int flags; int result; int stack_depth; + bool has_tail_call; }; /* Flags that can be passed to tail call test cases */ @@ -15273,6 +15274,7 @@ static struct tail_call_test tail_call_tests[] = { BPF_EXIT_INSN(), }, .result = 3, + .has_tail_call = true, }, { "Tail call 3", @@ -15283,6 +15285,7 @@ static struct tail_call_test tail_call_tests[] = { BPF_EXIT_INSN(), }, .result = 6, + .has_tail_call = true, }, { "Tail call 4", @@ -15293,6 +15296,7 @@ static struct tail_call_test tail_call_tests[] = { BPF_EXIT_INSN(), }, .result = 10, + .has_tail_call = true, }, { "Tail call load/store leaf", @@ -15323,6 +15327,7 @@ static struct tail_call_test tail_call_tests[] = { }, .result = 0, .stack_depth = 16, + .has_tail_call = true, }, { "Tail call error path, max count reached", @@ -15335,6 +15340,7 @@ static struct tail_call_test tail_call_tests[] = { }, .flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE, .result = (MAX_TAIL_CALL_CNT + 1) * MAX_TESTRUNS, + .has_tail_call = true, }, { "Tail call count preserved across function calls", @@ -15357,6 +15363,7 @@ static struct tail_call_test tail_call_tests[] = { .stack_depth = 8, .flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE, .result = (MAX_TAIL_CALL_CNT + 1) * MAX_TESTRUNS, + .has_tail_call = true, }, { "Tail call error path, NULL target", @@ -15369,6 +15376,7 @@ static struct tail_call_test tail_call_tests[] = { }, .flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE, .result = MAX_TESTRUNS, + .has_tail_call = true, }, { "Tail call error path, index out of range", @@ -15381,6 +15389,7 @@ static struct tail_call_test tail_call_tests[] = { }, .flags = FLAG_NEED_STATE | FLAG_RESULT_IN_STATE, .result = MAX_TESTRUNS, + .has_tail_call = true, }, }; @@ -15430,6 +15439,7 @@ static __init int prepare_tail_call_tests(struct bpf_array **pprogs) fp->len = len; fp->type = BPF_PROG_TYPE_SOCKET_FILTER; fp->aux->stack_depth = test->stack_depth; + fp->aux->tail_call_reachable = test->has_tail_call; memcpy(fp->insnsi, test->insns, len * sizeof(struct bpf_insn)); /* Relocate runtime tail call offsets and addresses */ @@ -15706,4 +15716,5 @@ static void __exit test_bpf_exit(void) module_init(test_bpf_init); module_exit(test_bpf_exit); +MODULE_DESCRIPTION("Testsuite for BPF interpreter and BPF JIT compiler"); MODULE_LICENSE("GPL"); diff --git a/lib/test_objagg.c b/lib/test_objagg.c index c0c957c50635..d34df4306b87 100644 --- a/lib/test_objagg.c +++ b/lib/test_objagg.c @@ -60,7 +60,7 @@ static struct objagg_obj *world_obj_get(struct world *world, if (!world->key_refs[key_id_index(key_id)]) { world->objagg_objs[key_id_index(key_id)] = objagg_obj; } else if (world->objagg_objs[key_id_index(key_id)] != objagg_obj) { - pr_err("Key %u: God another object for the same key.\n", + pr_err("Key %u: Got another object for the same key.\n", key_id); err = -EINVAL; goto err_key_id_check; diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 3efba4f857ac..217be32426b5 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -677,7 +677,7 @@ static void vlan_ethtool_get_drvinfo(struct net_device *dev, } static int vlan_ethtool_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *info) + struct kernel_ethtool_ts_info *info) { const struct vlan_dev_priv *vlan = vlan_dev_priv(dev); return ethtool_get_ts_info_by_layer(vlan->real_dev, info); diff --git a/net/Kconfig b/net/Kconfig index f0a8692496ff..d27d0deac0bf 100644 --- a/net/Kconfig +++ b/net/Kconfig @@ -290,15 +290,21 @@ config MAX_SKB_FRAGS If unsure, say 17. config RPS - bool + bool "Receive packet steering" depends on SMP && SYSFS default y + help + Software receive side packet steering (RPS) distributes the + load of received packet processing across multiple CPUs. config RFS_ACCEL - bool + bool "Hardware acceleration of RFS" depends on RPS select CPU_RMAP default y + help + Allowing drivers for multiqueue hardware with flow filter tables to + accelerate RFS. config SOCK_RX_QUEUE_MAPPING bool @@ -351,7 +357,7 @@ config BPF_STREAM_PARSER BPF_MAP_TYPE_SOCKMAP. config NET_FLOW_LIMIT - bool + bool "Net flow limit" depends on RPS default y help @@ -502,6 +508,7 @@ config FAILOVER config ETHTOOL_NETLINK bool "Netlink interface for ethtool" + select DIMLIB default y help An alternative userspace interface for ethtool based on generic diff --git a/net/atm/ioctl.c b/net/atm/ioctl.c index f81f8d56f5c0..0f7a39aeccc8 100644 --- a/net/atm/ioctl.c +++ b/net/atm/ioctl.c @@ -68,7 +68,7 @@ static int do_vcc_ioctl(struct socket *sock, unsigned int cmd, goto done; } error = put_user(sk->sk_sndbuf - sk_wmem_alloc_get(sk), - (int __user *)argp) ? -EFAULT : 0; + (int __user *)argp); goto done; case SIOCINQ: { @@ -83,7 +83,7 @@ static int do_vcc_ioctl(struct socket *sock, unsigned int cmd, skb = skb_peek(&sk->sk_receive_queue); amount = skb ? skb->len : 0; spin_unlock_irq(&sk->sk_receive_queue.lock); - error = put_user(amount, (int __user *)argp) ? -EFAULT : 0; + error = put_user(amount, (int __user *)argp); goto done; } case ATM_SETSC: diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile index 628d448d78be..5a3835b7dfcd 100644 --- a/net/bluetooth/Makefile +++ b/net/bluetooth/Makefile @@ -14,8 +14,7 @@ bluetooth_6lowpan-y := 6lowpan.o bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \ hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o lib.o \ - ecdh_helper.o hci_request.o mgmt_util.o mgmt_config.o hci_codec.o \ - eir.o hci_sync.o + ecdh_helper.o mgmt_util.o mgmt_config.o hci_codec.o eir.o hci_sync.o bluetooth-$(CONFIG_DEV_COREDUMP) += coredump.o diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 080053a85b4d..8e48ccd2af30 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -34,7 +34,6 @@ #include #include -#include "hci_request.h" #include "smp.h" #include "eir.h" diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index c644b30977bd..8a4ebd93adfc 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -40,7 +40,6 @@ #include #include -#include "hci_request.h" #include "hci_debugfs.h" #include "smp.h" #include "leds.h" @@ -312,33 +311,12 @@ static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf) return copied; } -static int hci_inq_req(struct hci_request *req, unsigned long opt) -{ - struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt; - struct hci_dev *hdev = req->hdev; - struct hci_cp_inquiry cp; - - BT_DBG("%s", hdev->name); - - if (test_bit(HCI_INQUIRY, &hdev->flags)) - return 0; - - /* Start Inquiry */ - memcpy(&cp.lap, &ir->lap, 3); - cp.length = ir->length; - cp.num_rsp = ir->num_rsp; - hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp); - - return 0; -} - int hci_inquiry(void __user *arg) { __u8 __user *ptr = arg; struct hci_inquiry_req ir; struct hci_dev *hdev; int err = 0, do_inquiry = 0, max_rsp; - long timeo; __u8 *buf; if (copy_from_user(&ir, ptr, sizeof(ir))) @@ -377,11 +355,11 @@ int hci_inquiry(void __user *arg) } hci_dev_unlock(hdev); - timeo = ir.length * msecs_to_jiffies(2000); - if (do_inquiry) { - err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir, - timeo, NULL); + hci_req_sync_lock(hdev); + err = hci_inquiry_sync(hdev, ir.length, ir.num_rsp); + hci_req_sync_unlock(hdev); + if (err < 0) goto done; @@ -718,8 +696,8 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) switch (cmd) { case HCISETAUTH: - err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE, - 1, &dr.dev_opt, HCI_CMD_TIMEOUT); + err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE, + 1, &dr.dev_opt, HCI_CMD_TIMEOUT); break; case HCISETENCRYPT: @@ -730,23 +708,21 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) if (!test_bit(HCI_AUTH, &hdev->flags)) { /* Auth must be enabled first */ - err = __hci_cmd_sync_status(hdev, - HCI_OP_WRITE_AUTH_ENABLE, - 1, &dr.dev_opt, - HCI_CMD_TIMEOUT); + err = hci_cmd_sync_status(hdev, + HCI_OP_WRITE_AUTH_ENABLE, + 1, &dr.dev_opt, + HCI_CMD_TIMEOUT); if (err) break; } - err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE, - 1, &dr.dev_opt, - HCI_CMD_TIMEOUT); + err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_ENCRYPT_MODE, + 1, &dr.dev_opt, HCI_CMD_TIMEOUT); break; case HCISETSCAN: - err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE, - 1, &dr.dev_opt, - HCI_CMD_TIMEOUT); + err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE, + 1, &dr.dev_opt, HCI_CMD_TIMEOUT); /* Ensure that the connectable and discoverable states * get correctly modified as this was a non-mgmt change. @@ -758,9 +734,8 @@ int hci_dev_cmd(unsigned int cmd, void __user *arg) case HCISETLINKPOL: policy = cpu_to_le16(dr.dev_opt); - err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, - 2, &policy, - HCI_CMD_TIMEOUT); + err = hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, + 2, &policy, HCI_CMD_TIMEOUT); break; case HCISETLINKMODE: @@ -801,7 +776,7 @@ int hci_get_dev_list(void __user *arg) struct hci_dev *hdev; struct hci_dev_list_req *dl; struct hci_dev_req *dr; - int n = 0, size, err; + int n = 0, err; __u16 dev_num; if (get_user(dev_num, (__u16 __user *) arg)) @@ -810,12 +785,11 @@ int hci_get_dev_list(void __user *arg) if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr)) return -EINVAL; - size = sizeof(*dl) + dev_num * sizeof(*dr); - - dl = kzalloc(size, GFP_KERNEL); + dl = kzalloc(struct_size(dl, dev_req, dev_num), GFP_KERNEL); if (!dl) return -ENOMEM; + dl->dev_num = dev_num; dr = dl->dev_req; read_lock(&hci_dev_list_lock); @@ -829,8 +803,8 @@ int hci_get_dev_list(void __user *arg) if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) flags &= ~BIT(HCI_UP); - (dr + n)->dev_id = hdev->id; - (dr + n)->dev_opt = flags; + dr[n].dev_id = hdev->id; + dr[n].dev_opt = flags; if (++n >= dev_num) break; @@ -838,9 +812,7 @@ int hci_get_dev_list(void __user *arg) read_unlock(&hci_dev_list_lock); dl->dev_num = n; - size = sizeof(*dl) + n * sizeof(*dr); - - err = copy_to_user(arg, dl, size); + err = copy_to_user(arg, dl, struct_size(dl, dev_req, n)); kfree(dl); return err ? -EFAULT : 0; @@ -2579,7 +2551,6 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv) INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout); hci_devcd_setup(hdev); - hci_request_setup(hdev); hci_init_sysfs(hdev); discovery_init(hdev); @@ -2912,15 +2883,31 @@ int hci_reset_dev(struct hci_dev *hdev) } EXPORT_SYMBOL(hci_reset_dev); +static u8 hci_dev_classify_pkt_type(struct hci_dev *hdev, struct sk_buff *skb) +{ + if (hdev->classify_pkt_type) + return hdev->classify_pkt_type(hdev, skb); + + return hci_skb_pkt_type(skb); +} + /* Receive frame from HCI drivers */ int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb) { + u8 dev_pkt_type; + if (!hdev || (!test_bit(HCI_UP, &hdev->flags) && !test_bit(HCI_INIT, &hdev->flags))) { kfree_skb(skb); return -ENXIO; } + /* Check if the driver agree with packet type classification */ + dev_pkt_type = hci_dev_classify_pkt_type(hdev, skb); + if (hci_skb_pkt_type(skb) != dev_pkt_type) { + hci_skb_pkt_type(skb) = dev_pkt_type; + } + switch (hci_skb_pkt_type(skb)) { case HCI_EVENT_PKT: break; @@ -3065,7 +3052,7 @@ int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen); - skb = hci_prepare_cmd(hdev, opcode, plen, param); + skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL); if (!skb) { bt_dev_err(hdev, "no memory for command"); return -ENOMEM; @@ -3100,7 +3087,7 @@ int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen, return -EINVAL; } - skb = hci_prepare_cmd(hdev, opcode, plen, param); + skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, NULL); if (!skb) { bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)", opcode); @@ -4085,7 +4072,7 @@ static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb) return; } - if (hci_req_status_pend(hdev) && + if (hdev->req_status == HCI_REQ_PEND && !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) { kfree_skb(hdev->req_skb); hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL); diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c index ce3ff2fa72e5..f625074d1f00 100644 --- a/net/bluetooth/hci_debugfs.c +++ b/net/bluetooth/hci_debugfs.c @@ -28,7 +28,6 @@ #include #include "smp.h" -#include "hci_request.h" #include "hci_debugfs.h" #define DEFINE_QUIRK_ATTRIBUTE(__name, __quirk) \ diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 93f7ac905cec..dce8035ca799 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -33,7 +33,6 @@ #include #include -#include "hci_request.h" #include "hci_debugfs.h" #include "hci_codec.h" #include "smp.h" @@ -6988,6 +6987,8 @@ static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data, if (!pa_sync) goto unlock; + pa_sync->iso_qos.bcast.encryption = ev->encryption; + /* Notify iso layer */ hci_connect_cfm(pa_sync, 0); diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c deleted file mode 100644 index efea25eb56ce..000000000000 --- a/net/bluetooth/hci_request.c +++ /dev/null @@ -1,903 +0,0 @@ -/* - BlueZ - Bluetooth protocol stack for Linux - - Copyright (C) 2014 Intel Corporation - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License version 2 as - published by the Free Software Foundation; - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. - IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY - CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - - ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, - COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS - SOFTWARE IS DISCLAIMED. -*/ - -#include - -#include -#include -#include - -#include "smp.h" -#include "hci_request.h" -#include "msft.h" -#include "eir.h" - -void hci_req_init(struct hci_request *req, struct hci_dev *hdev) -{ - skb_queue_head_init(&req->cmd_q); - req->hdev = hdev; - req->err = 0; -} - -void hci_req_purge(struct hci_request *req) -{ - skb_queue_purge(&req->cmd_q); -} - -bool hci_req_status_pend(struct hci_dev *hdev) -{ - return hdev->req_status == HCI_REQ_PEND; -} - -static int req_run(struct hci_request *req, hci_req_complete_t complete, - hci_req_complete_skb_t complete_skb) -{ - struct hci_dev *hdev = req->hdev; - struct sk_buff *skb; - unsigned long flags; - - bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q)); - - /* If an error occurred during request building, remove all HCI - * commands queued on the HCI request queue. - */ - if (req->err) { - skb_queue_purge(&req->cmd_q); - return req->err; - } - - /* Do not allow empty requests */ - if (skb_queue_empty(&req->cmd_q)) - return -ENODATA; - - skb = skb_peek_tail(&req->cmd_q); - if (complete) { - bt_cb(skb)->hci.req_complete = complete; - } else if (complete_skb) { - bt_cb(skb)->hci.req_complete_skb = complete_skb; - bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB; - } - - spin_lock_irqsave(&hdev->cmd_q.lock, flags); - skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q); - spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); - - queue_work(hdev->workqueue, &hdev->cmd_work); - - return 0; -} - -int hci_req_run(struct hci_request *req, hci_req_complete_t complete) -{ - return req_run(req, complete, NULL); -} - -int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete) -{ - return req_run(req, NULL, complete); -} - -void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, - struct sk_buff *skb) -{ - bt_dev_dbg(hdev, "result 0x%2.2x", result); - - if (hdev->req_status == HCI_REQ_PEND) { - hdev->req_result = result; - hdev->req_status = HCI_REQ_DONE; - if (skb) { - kfree_skb(hdev->req_skb); - hdev->req_skb = skb_get(skb); - } - wake_up_interruptible(&hdev->req_wait_q); - } -} - -/* Execute request and wait for completion. */ -int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req, - unsigned long opt), - unsigned long opt, u32 timeout, u8 *hci_status) -{ - struct hci_request req; - int err = 0; - - bt_dev_dbg(hdev, "start"); - - hci_req_init(&req, hdev); - - hdev->req_status = HCI_REQ_PEND; - - err = func(&req, opt); - if (err) { - if (hci_status) - *hci_status = HCI_ERROR_UNSPECIFIED; - return err; - } - - err = hci_req_run_skb(&req, hci_req_sync_complete); - if (err < 0) { - hdev->req_status = 0; - - /* ENODATA means the HCI request command queue is empty. - * This can happen when a request with conditionals doesn't - * trigger any commands to be sent. This is normal behavior - * and should not trigger an error return. - */ - if (err == -ENODATA) { - if (hci_status) - *hci_status = 0; - return 0; - } - - if (hci_status) - *hci_status = HCI_ERROR_UNSPECIFIED; - - return err; - } - - err = wait_event_interruptible_timeout(hdev->req_wait_q, - hdev->req_status != HCI_REQ_PEND, timeout); - - if (err == -ERESTARTSYS) - return -EINTR; - - switch (hdev->req_status) { - case HCI_REQ_DONE: - err = -bt_to_errno(hdev->req_result); - if (hci_status) - *hci_status = hdev->req_result; - break; - - case HCI_REQ_CANCELED: - err = -hdev->req_result; - if (hci_status) - *hci_status = HCI_ERROR_UNSPECIFIED; - break; - - default: - err = -ETIMEDOUT; - if (hci_status) - *hci_status = HCI_ERROR_UNSPECIFIED; - break; - } - - kfree_skb(hdev->req_skb); - hdev->req_skb = NULL; - hdev->req_status = hdev->req_result = 0; - - bt_dev_dbg(hdev, "end: err %d", err); - - return err; -} - -int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req, - unsigned long opt), - unsigned long opt, u32 timeout, u8 *hci_status) -{ - int ret; - - /* Serialize all requests */ - hci_req_sync_lock(hdev); - /* check the state after obtaing the lock to protect the HCI_UP - * against any races from hci_dev_do_close when the controller - * gets removed. - */ - if (test_bit(HCI_UP, &hdev->flags)) - ret = __hci_req_sync(hdev, req, opt, timeout, hci_status); - else - ret = -ENETDOWN; - hci_req_sync_unlock(hdev); - - return ret; -} - -struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen, - const void *param) -{ - int len = HCI_COMMAND_HDR_SIZE + plen; - struct hci_command_hdr *hdr; - struct sk_buff *skb; - - skb = bt_skb_alloc(len, GFP_ATOMIC); - if (!skb) - return NULL; - - hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE); - hdr->opcode = cpu_to_le16(opcode); - hdr->plen = plen; - - if (plen) - skb_put_data(skb, param, plen); - - bt_dev_dbg(hdev, "skb len %d", skb->len); - - hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; - hci_skb_opcode(skb) = opcode; - - return skb; -} - -/* Queue a command to an asynchronous HCI request */ -void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, - const void *param, u8 event) -{ - struct hci_dev *hdev = req->hdev; - struct sk_buff *skb; - - bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen); - - /* If an error occurred during request building, there is no point in - * queueing the HCI command. We can simply return. - */ - if (req->err) - return; - - skb = hci_prepare_cmd(hdev, opcode, plen, param); - if (!skb) { - bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)", - opcode); - req->err = -ENOMEM; - return; - } - - if (skb_queue_empty(&req->cmd_q)) - bt_cb(skb)->hci.req_flags |= HCI_REQ_START; - - hci_skb_event(skb) = event; - - skb_queue_tail(&req->cmd_q, skb); -} - -void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, - const void *param) -{ - bt_dev_dbg(req->hdev, "HCI_REQ-0x%4.4x", opcode); - hci_req_add_ev(req, opcode, plen, param, 0); -} - -static void start_interleave_scan(struct hci_dev *hdev) -{ - hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; - queue_delayed_work(hdev->req_workqueue, - &hdev->interleave_scan, 0); -} - -static bool is_interleave_scanning(struct hci_dev *hdev) -{ - return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE; -} - -static void cancel_interleave_scan(struct hci_dev *hdev) -{ - bt_dev_dbg(hdev, "cancelling interleave scan"); - - cancel_delayed_work_sync(&hdev->interleave_scan); - - hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE; -} - -/* Return true if interleave_scan wasn't started until exiting this function, - * otherwise, return false - */ -static bool __hci_update_interleaved_scan(struct hci_dev *hdev) -{ - /* Do interleaved scan only if all of the following are true: - * - There is at least one ADV monitor - * - At least one pending LE connection or one device to be scanned for - * - Monitor offloading is not supported - * If so, we should alternate between allowlist scan and one without - * any filters to save power. - */ - bool use_interleaving = hci_is_adv_monitoring(hdev) && - !(list_empty(&hdev->pend_le_conns) && - list_empty(&hdev->pend_le_reports)) && - hci_get_adv_monitor_offload_ext(hdev) == - HCI_ADV_MONITOR_EXT_NONE; - bool is_interleaving = is_interleave_scanning(hdev); - - if (use_interleaving && !is_interleaving) { - start_interleave_scan(hdev); - bt_dev_dbg(hdev, "starting interleave scan"); - return true; - } - - if (!use_interleaving && is_interleaving) - cancel_interleave_scan(hdev); - - return false; -} - -void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn) -{ - struct hci_dev *hdev = req->hdev; - - if (hdev->scanning_paused) { - bt_dev_dbg(hdev, "Scanning is paused for suspend"); - return; - } - - if (use_ext_scan(hdev)) { - struct hci_cp_le_set_ext_scan_enable cp; - - memset(&cp, 0, sizeof(cp)); - cp.enable = LE_SCAN_DISABLE; - hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp), - &cp); - } else { - struct hci_cp_le_set_scan_enable cp; - - memset(&cp, 0, sizeof(cp)); - cp.enable = LE_SCAN_DISABLE; - hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); - } - - /* Disable address resolution */ - if (hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) { - __u8 enable = 0x00; - - hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable); - } -} - -static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr, - u8 bdaddr_type) -{ - struct hci_cp_le_del_from_accept_list cp; - - cp.bdaddr_type = bdaddr_type; - bacpy(&cp.bdaddr, bdaddr); - - bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr, - cp.bdaddr_type); - hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp); - - if (use_ll_privacy(req->hdev)) { - struct smp_irk *irk; - - irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type); - if (irk) { - struct hci_cp_le_del_from_resolv_list cp; - - cp.bdaddr_type = bdaddr_type; - bacpy(&cp.bdaddr, bdaddr); - - hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST, - sizeof(cp), &cp); - } - } -} - -/* Adds connection to accept list if needed. On error, returns -1. */ -static int add_to_accept_list(struct hci_request *req, - struct hci_conn_params *params, u8 *num_entries, - bool allow_rpa) -{ - struct hci_cp_le_add_to_accept_list cp; - struct hci_dev *hdev = req->hdev; - - /* Already in accept list */ - if (hci_bdaddr_list_lookup(&hdev->le_accept_list, ¶ms->addr, - params->addr_type)) - return 0; - - /* Select filter policy to accept all advertising */ - if (*num_entries >= hdev->le_accept_list_size) - return -1; - - /* Accept list can not be used with RPAs */ - if (!allow_rpa && - !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && - hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) { - return -1; - } - - /* During suspend, only wakeable devices can be in accept list */ - if (hdev->suspended && - !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) - return 0; - - *num_entries += 1; - cp.bdaddr_type = params->addr_type; - bacpy(&cp.bdaddr, ¶ms->addr); - - bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr, - cp.bdaddr_type); - hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp); - - if (use_ll_privacy(hdev)) { - struct smp_irk *irk; - - irk = hci_find_irk_by_addr(hdev, ¶ms->addr, - params->addr_type); - if (irk) { - struct hci_cp_le_add_to_resolv_list cp; - - cp.bdaddr_type = params->addr_type; - bacpy(&cp.bdaddr, ¶ms->addr); - memcpy(cp.peer_irk, irk->val, 16); - - if (hci_dev_test_flag(hdev, HCI_PRIVACY)) - memcpy(cp.local_irk, hdev->irk, 16); - else - memset(cp.local_irk, 0, 16); - - hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST, - sizeof(cp), &cp); - } - } - - return 0; -} - -static u8 update_accept_list(struct hci_request *req) -{ - struct hci_dev *hdev = req->hdev; - struct hci_conn_params *params; - struct bdaddr_list *b; - u8 num_entries = 0; - bool pend_conn, pend_report; - /* We allow usage of accept list even with RPAs in suspend. In the worst - * case, we won't be able to wake from devices that use the privacy1.2 - * features. Additionally, once we support privacy1.2 and IRK - * offloading, we can update this to also check for those conditions. - */ - bool allow_rpa = hdev->suspended; - - if (use_ll_privacy(hdev)) - allow_rpa = true; - - /* Go through the current accept list programmed into the - * controller one by one and check if that address is still - * in the list of pending connections or list of devices to - * report. If not present in either list, then queue the - * command to remove it from the controller. - */ - list_for_each_entry(b, &hdev->le_accept_list, list) { - pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns, - &b->bdaddr, - b->bdaddr_type); - pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports, - &b->bdaddr, - b->bdaddr_type); - - /* If the device is not likely to connect or report, - * remove it from the accept list. - */ - if (!pend_conn && !pend_report) { - del_from_accept_list(req, &b->bdaddr, b->bdaddr_type); - continue; - } - - /* Accept list can not be used with RPAs */ - if (!allow_rpa && - !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && - hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) { - return 0x00; - } - - num_entries++; - } - - /* Since all no longer valid accept list entries have been - * removed, walk through the list of pending connections - * and ensure that any new device gets programmed into - * the controller. - * - * If the list of the devices is larger than the list of - * available accept list entries in the controller, then - * just abort and return filer policy value to not use the - * accept list. - */ - list_for_each_entry(params, &hdev->pend_le_conns, action) { - if (add_to_accept_list(req, params, &num_entries, allow_rpa)) - return 0x00; - } - - /* After adding all new pending connections, walk through - * the list of pending reports and also add these to the - * accept list if there is still space. Abort if space runs out. - */ - list_for_each_entry(params, &hdev->pend_le_reports, action) { - if (add_to_accept_list(req, params, &num_entries, allow_rpa)) - return 0x00; - } - - /* Use the allowlist unless the following conditions are all true: - * - We are not currently suspending - * - There are 1 or more ADV monitors registered and it's not offloaded - * - Interleaved scanning is not currently using the allowlist - */ - if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended && - hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE && - hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST) - return 0x00; - - /* Select filter policy to use accept list */ - return 0x01; -} - -static bool scan_use_rpa(struct hci_dev *hdev) -{ - return hci_dev_test_flag(hdev, HCI_PRIVACY); -} - -static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval, - u16 window, u8 own_addr_type, u8 filter_policy, - bool filter_dup, bool addr_resolv) -{ - struct hci_dev *hdev = req->hdev; - - if (hdev->scanning_paused) { - bt_dev_dbg(hdev, "Scanning is paused for suspend"); - return; - } - - if (use_ll_privacy(hdev) && addr_resolv) { - u8 enable = 0x01; - - hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable); - } - - /* Use ext scanning if set ext scan param and ext scan enable is - * supported - */ - if (use_ext_scan(hdev)) { - struct hci_cp_le_set_ext_scan_params *ext_param_cp; - struct hci_cp_le_set_ext_scan_enable ext_enable_cp; - struct hci_cp_le_scan_phy_params *phy_params; - u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2]; - u32 plen; - - ext_param_cp = (void *)data; - phy_params = (void *)ext_param_cp->data; - - memset(ext_param_cp, 0, sizeof(*ext_param_cp)); - ext_param_cp->own_addr_type = own_addr_type; - ext_param_cp->filter_policy = filter_policy; - - plen = sizeof(*ext_param_cp); - - if (scan_1m(hdev) || scan_2m(hdev)) { - ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M; - - memset(phy_params, 0, sizeof(*phy_params)); - phy_params->type = type; - phy_params->interval = cpu_to_le16(interval); - phy_params->window = cpu_to_le16(window); - - plen += sizeof(*phy_params); - phy_params++; - } - - if (scan_coded(hdev)) { - ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED; - - memset(phy_params, 0, sizeof(*phy_params)); - phy_params->type = type; - phy_params->interval = cpu_to_le16(interval); - phy_params->window = cpu_to_le16(window); - - plen += sizeof(*phy_params); - phy_params++; - } - - hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS, - plen, ext_param_cp); - - memset(&ext_enable_cp, 0, sizeof(ext_enable_cp)); - ext_enable_cp.enable = LE_SCAN_ENABLE; - ext_enable_cp.filter_dup = filter_dup; - - hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, - sizeof(ext_enable_cp), &ext_enable_cp); - } else { - struct hci_cp_le_set_scan_param param_cp; - struct hci_cp_le_set_scan_enable enable_cp; - - memset(¶m_cp, 0, sizeof(param_cp)); - param_cp.type = type; - param_cp.interval = cpu_to_le16(interval); - param_cp.window = cpu_to_le16(window); - param_cp.own_address_type = own_addr_type; - param_cp.filter_policy = filter_policy; - hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp), - ¶m_cp); - - memset(&enable_cp, 0, sizeof(enable_cp)); - enable_cp.enable = LE_SCAN_ENABLE; - enable_cp.filter_dup = filter_dup; - hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp), - &enable_cp); - } -} - -static void set_random_addr(struct hci_request *req, bdaddr_t *rpa); -static int hci_update_random_address(struct hci_request *req, - bool require_privacy, bool use_rpa, - u8 *own_addr_type) -{ - struct hci_dev *hdev = req->hdev; - int err; - - /* If privacy is enabled use a resolvable private address. If - * current RPA has expired or there is something else than - * the current RPA in use, then generate a new one. - */ - if (use_rpa) { - /* If Controller supports LL Privacy use own address type is - * 0x03 - */ - if (use_ll_privacy(hdev)) - *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; - else - *own_addr_type = ADDR_LE_DEV_RANDOM; - - if (rpa_valid(hdev)) - return 0; - - err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); - if (err < 0) { - bt_dev_err(hdev, "failed to generate new RPA"); - return err; - } - - set_random_addr(req, &hdev->rpa); - - return 0; - } - - /* In case of required privacy without resolvable private address, - * use an non-resolvable private address. This is useful for active - * scanning and non-connectable advertising. - */ - if (require_privacy) { - bdaddr_t nrpa; - - while (true) { - /* The non-resolvable private address is generated - * from random six bytes with the two most significant - * bits cleared. - */ - get_random_bytes(&nrpa, 6); - nrpa.b[5] &= 0x3f; - - /* The non-resolvable private address shall not be - * equal to the public address. - */ - if (bacmp(&hdev->bdaddr, &nrpa)) - break; - } - - *own_addr_type = ADDR_LE_DEV_RANDOM; - set_random_addr(req, &nrpa); - return 0; - } - - /* If forcing static address is in use or there is no public - * address use the static address as random address (but skip - * the HCI command if the current random address is already the - * static one. - * - * In case BR/EDR has been disabled on a dual-mode controller - * and a static address has been configured, then use that - * address instead of the public BR/EDR address. - */ - if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || - !bacmp(&hdev->bdaddr, BDADDR_ANY) || - (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && - bacmp(&hdev->static_addr, BDADDR_ANY))) { - *own_addr_type = ADDR_LE_DEV_RANDOM; - if (bacmp(&hdev->static_addr, &hdev->random_addr)) - hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, - &hdev->static_addr); - return 0; - } - - /* Neither privacy nor static address is being used so use a - * public address. - */ - *own_addr_type = ADDR_LE_DEV_PUBLIC; - - return 0; -} - -/* Ensure to call hci_req_add_le_scan_disable() first to disable the - * controller based address resolution to be able to reconfigure - * resolving list. - */ -void hci_req_add_le_passive_scan(struct hci_request *req) -{ - struct hci_dev *hdev = req->hdev; - u8 own_addr_type; - u8 filter_policy; - u16 window, interval; - /* Default is to enable duplicates filter */ - u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE; - /* Background scanning should run with address resolution */ - bool addr_resolv = true; - - if (hdev->scanning_paused) { - bt_dev_dbg(hdev, "Scanning is paused for suspend"); - return; - } - - /* Set require_privacy to false since no SCAN_REQ are send - * during passive scanning. Not using an non-resolvable address - * here is important so that peer devices using direct - * advertising with our address will be correctly reported - * by the controller. - */ - if (hci_update_random_address(req, false, scan_use_rpa(hdev), - &own_addr_type)) - return; - - if (hdev->enable_advmon_interleave_scan && - __hci_update_interleaved_scan(hdev)) - return; - - bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state); - /* Adding or removing entries from the accept list must - * happen before enabling scanning. The controller does - * not allow accept list modification while scanning. - */ - filter_policy = update_accept_list(req); - - /* When the controller is using random resolvable addresses and - * with that having LE privacy enabled, then controllers with - * Extended Scanner Filter Policies support can now enable support - * for handling directed advertising. - * - * So instead of using filter polices 0x00 (no accept list) - * and 0x01 (accept list enabled) use the new filter policies - * 0x02 (no accept list) and 0x03 (accept list enabled). - */ - if (hci_dev_test_flag(hdev, HCI_PRIVACY) && - (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)) - filter_policy |= 0x02; - - if (hdev->suspended) { - window = hdev->le_scan_window_suspend; - interval = hdev->le_scan_int_suspend; - } else if (hci_is_le_conn_scanning(hdev)) { - window = hdev->le_scan_window_connect; - interval = hdev->le_scan_int_connect; - } else if (hci_is_adv_monitoring(hdev)) { - window = hdev->le_scan_window_adv_monitor; - interval = hdev->le_scan_int_adv_monitor; - - /* Disable duplicates filter when scanning for advertisement - * monitor for the following reasons. - * - * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm - * controllers ignore RSSI_Sampling_Period when the duplicates - * filter is enabled. - * - * For SW pattern filtering, when we're not doing interleaved - * scanning, it is necessary to disable duplicates filter, - * otherwise hosts can only receive one advertisement and it's - * impossible to know if a peer is still in range. - */ - filter_dup = LE_SCAN_FILTER_DUP_DISABLE; - } else { - window = hdev->le_scan_window; - interval = hdev->le_scan_interval; - } - - bt_dev_dbg(hdev, "LE passive scan with accept list = %d", - filter_policy); - hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window, - own_addr_type, filter_policy, filter_dup, - addr_resolv); -} - -static int hci_req_add_le_interleaved_scan(struct hci_request *req, - unsigned long opt) -{ - struct hci_dev *hdev = req->hdev; - int ret = 0; - - hci_dev_lock(hdev); - - if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) - hci_req_add_le_scan_disable(req, false); - hci_req_add_le_passive_scan(req); - - switch (hdev->interleave_scan_state) { - case INTERLEAVE_SCAN_ALLOWLIST: - bt_dev_dbg(hdev, "next state: allowlist"); - hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; - break; - case INTERLEAVE_SCAN_NO_FILTER: - bt_dev_dbg(hdev, "next state: no filter"); - hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST; - break; - case INTERLEAVE_SCAN_NONE: - BT_ERR("unexpected error"); - ret = -1; - } - - hci_dev_unlock(hdev); - - return ret; -} - -static void interleave_scan_work(struct work_struct *work) -{ - struct hci_dev *hdev = container_of(work, struct hci_dev, - interleave_scan.work); - u8 status; - unsigned long timeout; - - if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) { - timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration); - } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) { - timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration); - } else { - bt_dev_err(hdev, "unexpected error"); - return; - } - - hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0, - HCI_CMD_TIMEOUT, &status); - - /* Don't continue interleaving if it was canceled */ - if (is_interleave_scanning(hdev)) - queue_delayed_work(hdev->req_workqueue, - &hdev->interleave_scan, timeout); -} - -static void set_random_addr(struct hci_request *req, bdaddr_t *rpa) -{ - struct hci_dev *hdev = req->hdev; - - /* If we're advertising or initiating an LE connection we can't - * go ahead and change the random address at this time. This is - * because the eventual initiator address used for the - * subsequently created connection will be undefined (some - * controllers use the new address and others the one we had - * when the operation started). - * - * In this kind of scenario skip the update and let the random - * address be updated at the next cycle. - */ - if (hci_dev_test_flag(hdev, HCI_LE_ADV) || - hci_lookup_le_connect(hdev)) { - bt_dev_dbg(hdev, "Deferring random address update"); - hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); - return; - } - - hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa); -} - -void hci_request_setup(struct hci_dev *hdev) -{ - INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work); -} - -void hci_request_cancel_all(struct hci_dev *hdev) -{ - hci_cmd_sync_cancel_sync(hdev, ENODEV); - - cancel_interleave_scan(hdev); -} diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h deleted file mode 100644 index c91f2838f542..000000000000 --- a/net/bluetooth/hci_request.h +++ /dev/null @@ -1,71 +0,0 @@ -/* - BlueZ - Bluetooth protocol stack for Linux - Copyright (C) 2014 Intel Corporation - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License version 2 as - published by the Free Software Foundation; - - THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS - OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. - IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY - CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES - WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - - ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, - COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS - SOFTWARE IS DISCLAIMED. -*/ - -#include - -#define HCI_REQ_DONE 0 -#define HCI_REQ_PEND 1 -#define HCI_REQ_CANCELED 2 - -#define hci_req_sync_lock(hdev) mutex_lock(&hdev->req_lock) -#define hci_req_sync_unlock(hdev) mutex_unlock(&hdev->req_lock) - -struct hci_request { - struct hci_dev *hdev; - struct sk_buff_head cmd_q; - - /* If something goes wrong when building the HCI request, the error - * value is stored in this field. - */ - int err; -}; - -void hci_req_init(struct hci_request *req, struct hci_dev *hdev); -void hci_req_purge(struct hci_request *req); -bool hci_req_status_pend(struct hci_dev *hdev); -int hci_req_run(struct hci_request *req, hci_req_complete_t complete); -int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete); -void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, - struct sk_buff *skb); -void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, - const void *param); -void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, - const void *param, u8 event); -void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status, - hci_req_complete_t *req_complete, - hci_req_complete_skb_t *req_complete_skb); - -int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req, - unsigned long opt), - unsigned long opt, u32 timeout, u8 *hci_status); -int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req, - unsigned long opt), - unsigned long opt, u32 timeout, u8 *hci_status); - -struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen, - const void *param); - -void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn); -void hci_req_add_le_passive_scan(struct hci_request *req); - -void hci_request_setup(struct hci_dev *hdev); -void hci_request_cancel_all(struct hci_dev *hdev); diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c index eea34e6a236f..cd2ed16da8a4 100644 --- a/net/bluetooth/hci_sync.c +++ b/net/bluetooth/hci_sync.c @@ -12,7 +12,6 @@ #include #include -#include "hci_request.h" #include "hci_codec.h" #include "hci_debugfs.h" #include "smp.h" @@ -49,9 +48,8 @@ static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, wake_up_interruptible(&hdev->req_wait_q); } -static struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode, - u32 plen, const void *param, - struct sock *sk) +struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode, u32 plen, + const void *param, struct sock *sk) { int len = HCI_COMMAND_HDR_SIZE + plen; struct hci_command_hdr *hdr; @@ -147,6 +145,13 @@ static int hci_cmd_sync_run(struct hci_request *req) return 0; } +static void hci_request_init(struct hci_request *req, struct hci_dev *hdev) +{ + skb_queue_head_init(&req->cmd_q); + req->hdev = hdev; + req->err = 0; +} + /* This function requires the caller holds hdev->req_lock. */ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen, const void *param, u8 event, u32 timeout, @@ -158,7 +163,7 @@ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen, bt_dev_dbg(hdev, "Opcode 0x%4.4x", opcode); - hci_req_init(&req, hdev); + hci_request_init(&req, hdev); hci_cmd_sync_add(&req, opcode, plen, param, event, sk); @@ -347,10 +352,9 @@ static int scan_disable_sync(struct hci_dev *hdev, void *data) return hci_scan_disable_sync(hdev); } -static int hci_inquiry_sync(struct hci_dev *hdev, u8 length); static int interleaved_inquiry_sync(struct hci_dev *hdev, void *data) { - return hci_inquiry_sync(hdev, DISCOV_INTERLEAVED_INQUIRY_LEN); + return hci_inquiry_sync(hdev, DISCOV_INTERLEAVED_INQUIRY_LEN, 0); } static void le_scan_disable(struct work_struct *work) @@ -371,8 +375,6 @@ static void le_scan_disable(struct work_struct *work) goto _return; } - hdev->discovery.scan_start = 0; - /* If we were running LE only scan, change discovery state. If * we were running both LE and BR/EDR inquiry simultaneously, * and BR/EDR inquiry is already finished, stop discovery, @@ -570,6 +572,53 @@ unlock: hci_dev_unlock(hdev); } +static bool is_interleave_scanning(struct hci_dev *hdev) +{ + return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE; +} + +static int hci_passive_scan_sync(struct hci_dev *hdev); + +static void interleave_scan_work(struct work_struct *work) +{ + struct hci_dev *hdev = container_of(work, struct hci_dev, + interleave_scan.work); + unsigned long timeout; + + if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) { + timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration); + } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) { + timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration); + } else { + bt_dev_err(hdev, "unexpected error"); + return; + } + + hci_passive_scan_sync(hdev); + + hci_dev_lock(hdev); + + switch (hdev->interleave_scan_state) { + case INTERLEAVE_SCAN_ALLOWLIST: + bt_dev_dbg(hdev, "next state: allowlist"); + hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; + break; + case INTERLEAVE_SCAN_NO_FILTER: + bt_dev_dbg(hdev, "next state: no filter"); + hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST; + break; + case INTERLEAVE_SCAN_NONE: + bt_dev_err(hdev, "unexpected error"); + } + + hci_dev_unlock(hdev); + + /* Don't continue interleaving if it was canceled */ + if (is_interleave_scanning(hdev)) + queue_delayed_work(hdev->req_workqueue, + &hdev->interleave_scan, timeout); +} + void hci_cmd_sync_init(struct hci_dev *hdev) { INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work); @@ -581,6 +630,7 @@ void hci_cmd_sync_init(struct hci_dev *hdev) INIT_WORK(&hdev->reenable_adv_work, reenable_adv); INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable); INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire); + INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work); } static void _hci_cmd_sync_cancel_entry(struct hci_dev *hdev, @@ -2114,11 +2164,6 @@ static void hci_start_interleave_scan(struct hci_dev *hdev) &hdev->interleave_scan, 0); } -static bool is_interleave_scanning(struct hci_dev *hdev) -{ - return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE; -} - static void cancel_interleave_scan(struct hci_dev *hdev) { bt_dev_dbg(hdev, "cancelling interleave scan"); @@ -5017,7 +5062,9 @@ int hci_dev_close_sync(struct hci_dev *hdev) cancel_delayed_work(&hdev->ncmd_timer); cancel_delayed_work(&hdev->le_scan_disable); - hci_request_cancel_all(hdev); + hci_cmd_sync_cancel_sync(hdev, ENODEV); + + cancel_interleave_scan(hdev); if (hdev->adv_instance_timeout) { cancel_delayed_work_sync(&hdev->adv_instance_expire); @@ -5664,7 +5711,7 @@ int hci_update_connectable_sync(struct hci_dev *hdev) return hci_update_passive_scan_sync(hdev); } -static int hci_inquiry_sync(struct hci_dev *hdev, u8 length) +int hci_inquiry_sync(struct hci_dev *hdev, u8 length, u8 num_rsp) { const u8 giac[3] = { 0x33, 0x8b, 0x9e }; const u8 liac[3] = { 0x00, 0x8b, 0x9e }; @@ -5687,6 +5734,7 @@ static int hci_inquiry_sync(struct hci_dev *hdev, u8 length) memcpy(&cp.lap, giac, sizeof(cp.lap)); cp.length = length; + cp.num_rsp = num_rsp; return __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp, HCI_CMD_TIMEOUT); @@ -5773,7 +5821,7 @@ static int hci_start_interleaved_discovery_sync(struct hci_dev *hdev) if (err) return err; - return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN); + return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN, 0); } int hci_start_discovery_sync(struct hci_dev *hdev) @@ -5785,7 +5833,7 @@ int hci_start_discovery_sync(struct hci_dev *hdev) switch (hdev->discovery.type) { case DISCOV_TYPE_BREDR: - return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN); + return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN, 0); case DISCOV_TYPE_INTERLEAVED: /* When running simultaneous discovery, the LE scanning time * should occupy the whole discovery time sine BR/EDR inquiry @@ -5855,7 +5903,6 @@ static int hci_pause_discovery_sync(struct hci_dev *hdev) return err; hdev->discovery_paused = true; - hdev->discovery_old_state = old_state; hci_discovery_set_state(hdev, DISCOVERY_STOPPED); return 0; @@ -6724,3 +6771,21 @@ int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn) return -ENOENT; } + +int hci_le_conn_update_sync(struct hci_dev *hdev, struct hci_conn *conn, + struct hci_conn_params *params) +{ + struct hci_cp_le_conn_update cp; + + memset(&cp, 0, sizeof(cp)); + cp.handle = cpu_to_le16(conn->handle); + cp.conn_interval_min = cpu_to_le16(params->conn_min_interval); + cp.conn_interval_max = cpu_to_le16(params->conn_max_interval); + cp.conn_latency = cpu_to_le16(params->conn_latency); + cp.supervision_timeout = cpu_to_le16(params->supervision_timeout); + cp.min_ce_len = cpu_to_le16(0x0000); + cp.max_ce_len = cpu_to_le16(0x0000); + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_CONN_UPDATE, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c index 398fb81f7a13..d5e00d0dd1a0 100644 --- a/net/bluetooth/iso.c +++ b/net/bluetooth/iso.c @@ -1720,11 +1720,6 @@ static void iso_sock_ready(struct sock *sk) release_sock(sk); } -struct iso_list_data { - struct hci_conn *hcon; - int count; -}; - static bool iso_match_big(struct sock *sk, void *data) { struct hci_evt_le_big_sync_estabilished *ev = data; diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 80f220b7e19d..40d4887c7f79 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -33,7 +33,6 @@ #include #include -#include "hci_request.h" #include "smp.h" #include "mgmt_util.h" #include "mgmt_config.h" @@ -42,7 +41,7 @@ #include "aosp.h" #define MGMT_VERSION 1 -#define MGMT_REVISION 22 +#define MGMT_REVISION 23 static const u16 mgmt_commands[] = { MGMT_OP_READ_INDEX_LIST, @@ -7813,6 +7812,18 @@ unlock: return err; } +static int conn_update_sync(struct hci_dev *hdev, void *data) +{ + struct hci_conn_params *params = data; + struct hci_conn *conn; + + conn = hci_conn_hash_lookup_le(hdev, ¶ms->addr, params->addr_type); + if (!conn) + return -ECANCELED; + + return hci_le_conn_update_sync(hdev, conn, params); +} + static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) { @@ -7846,12 +7857,14 @@ static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data, hci_dev_lock(hdev); - hci_conn_params_clear_disabled(hdev); + if (param_count > 1) + hci_conn_params_clear_disabled(hdev); for (i = 0; i < param_count; i++) { struct mgmt_conn_param *param = &cp->params[i]; struct hci_conn_params *hci_param; u16 min, max, latency, timeout; + bool update = false; u8 addr_type; bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr, @@ -7879,6 +7892,19 @@ static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data, continue; } + /* Detect when the loading is for an existing parameter then + * attempt to trigger the connection update procedure. + */ + if (!i && param_count == 1) { + hci_param = hci_conn_params_lookup(hdev, + ¶m->addr.bdaddr, + addr_type); + if (hci_param) + update = true; + else + hci_conn_params_clear_disabled(hdev); + } + hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr, addr_type); if (!hci_param) { @@ -7890,6 +7916,25 @@ static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data, hci_param->conn_max_interval = max; hci_param->conn_latency = latency; hci_param->supervision_timeout = timeout; + + /* Check if we need to trigger a connection update */ + if (update) { + struct hci_conn *conn; + + /* Lookup for existing connection as central and check + * if parameters match and if they don't then trigger + * a connection update. + */ + conn = hci_conn_hash_lookup_le(hdev, &hci_param->addr, + addr_type); + if (conn && conn->role == HCI_ROLE_MASTER && + (conn->le_conn_min_interval != min || + conn->le_conn_max_interval != max || + conn->le_conn_latency != latency || + conn->le_supv_timeout != timeout)) + hci_cmd_sync_queue(hdev, conn_update_sync, + hci_param, NULL); + } } hci_dev_unlock(hdev); diff --git a/net/bluetooth/msft.c b/net/bluetooth/msft.c index d039683d3bdd..5a8ccc491b14 100644 --- a/net/bluetooth/msft.c +++ b/net/bluetooth/msft.c @@ -7,7 +7,6 @@ #include #include -#include "hci_request.h" #include "mgmt_util.h" #include "msft.h" diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c index 69c75c041fe1..af80d599c337 100644 --- a/net/bluetooth/rfcomm/tty.c +++ b/net/bluetooth/rfcomm/tty.c @@ -504,7 +504,7 @@ static int rfcomm_get_dev_list(void __user *arg) struct rfcomm_dev *dev; struct rfcomm_dev_list_req *dl; struct rfcomm_dev_info *di; - int n = 0, size, err; + int n = 0, err; u16 dev_num; BT_DBG(""); @@ -515,12 +515,11 @@ static int rfcomm_get_dev_list(void __user *arg) if (!dev_num || dev_num > (PAGE_SIZE * 4) / sizeof(*di)) return -EINVAL; - size = sizeof(*dl) + dev_num * sizeof(*di); - - dl = kzalloc(size, GFP_KERNEL); + dl = kzalloc(struct_size(dl, dev_info, dev_num), GFP_KERNEL); if (!dl) return -ENOMEM; + dl->dev_num = dev_num; di = dl->dev_info; mutex_lock(&rfcomm_dev_lock); @@ -528,12 +527,12 @@ static int rfcomm_get_dev_list(void __user *arg) list_for_each_entry(dev, &rfcomm_dev_list, list) { if (!tty_port_get(&dev->port)) continue; - (di + n)->id = dev->id; - (di + n)->flags = dev->flags; - (di + n)->state = dev->dlc->state; - (di + n)->channel = dev->channel; - bacpy(&(di + n)->src, &dev->src); - bacpy(&(di + n)->dst, &dev->dst); + di[n].id = dev->id; + di[n].flags = dev->flags; + di[n].state = dev->dlc->state; + di[n].channel = dev->channel; + bacpy(&di[n].src, &dev->src); + bacpy(&di[n].dst, &dev->dst); tty_port_put(&dev->port); if (++n >= dev_num) break; @@ -542,9 +541,7 @@ static int rfcomm_get_dev_list(void __user *arg) mutex_unlock(&rfcomm_dev_lock); dl->dev_num = n; - size = sizeof(*dl) + n * sizeof(*di); - - err = copy_to_user(arg, dl, size); + err = copy_to_user(arg, dl, struct_size(dl, dev_info, n)); kfree(dl); return err ? -EFAULT : 0; diff --git a/net/bpf/bpf_dummy_struct_ops.c b/net/bpf/bpf_dummy_struct_ops.c index 891cdf61c65a..3ea52b05adfb 100644 --- a/net/bpf/bpf_dummy_struct_ops.c +++ b/net/bpf/bpf_dummy_struct_ops.c @@ -272,12 +272,12 @@ static int bpf_dummy_init_member(const struct btf_type *t, return -EOPNOTSUPP; } -static int bpf_dummy_reg(void *kdata) +static int bpf_dummy_reg(void *kdata, struct bpf_link *link) { return -EOPNOTSUPP; } -static void bpf_dummy_unreg(void *kdata) +static void bpf_dummy_unreg(void *kdata, struct bpf_link *link) { } diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c index 36ae54f57bf5..6d7a442ceb89 100644 --- a/net/bpf/test_run.c +++ b/net/bpf/test_run.c @@ -127,9 +127,10 @@ struct xdp_test_data { #define TEST_XDP_FRAME_SIZE (PAGE_SIZE - sizeof(struct xdp_page_head)) #define TEST_XDP_MAX_BATCH 256 -static void xdp_test_run_init_page(struct page *page, void *arg) +static void xdp_test_run_init_page(netmem_ref netmem, void *arg) { - struct xdp_page_head *head = phys_to_virt(page_to_phys(page)); + struct xdp_page_head *head = + phys_to_virt(page_to_phys(netmem_to_page(netmem))); struct xdp_buff *new_ctx, *orig_ctx; u32 headroom = XDP_PACKET_HEADROOM; struct xdp_test_data *xdp = arg; @@ -283,9 +284,10 @@ static int xdp_recv_frames(struct xdp_frame **frames, int nframes, static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog, u32 repeat) { - struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); + struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx; int err = 0, act, ret, i, nframes = 0, batch_sz; struct xdp_frame **frames = xdp->frames; + struct bpf_redirect_info *ri; struct xdp_page_head *head; struct xdp_frame *frm; bool redirect = false; @@ -295,6 +297,8 @@ static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog, batch_sz = min_t(u32, repeat, xdp->batch_size); local_bh_disable(); + bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx); + ri = bpf_net_ctx_get_ri(); xdp_set_return_frame_no_direct(); for (i = 0; i < batch_sz; i++) { @@ -359,6 +363,7 @@ out: } xdp_clear_return_frame_no_direct(); + bpf_net_ctx_clear(bpf_net_ctx); local_bh_enable(); return err; } @@ -394,6 +399,7 @@ static int bpf_test_run_xdp_live(struct bpf_prog *prog, struct xdp_buff *ctx, static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, u32 *retval, u32 *time, bool xdp) { + struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx; struct bpf_prog_array_item item = {.prog = prog}; struct bpf_run_ctx *old_ctx; struct bpf_cg_run_ctx run_ctx; @@ -419,10 +425,14 @@ static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, do { run_ctx.prog_item = &item; local_bh_disable(); + bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx); + if (xdp) *retval = bpf_prog_run_xdp(prog, ctx); else *retval = bpf_prog_run(prog, ctx); + + bpf_net_ctx_clear(bpf_net_ctx); local_bh_enable(); } while (bpf_test_timer_continue(&t, 1, repeat, &ret, time)); bpf_reset_run_ctx(old_ctx); @@ -983,7 +993,8 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, void *data; int ret; - if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) + if ((kattr->test.flags & ~BPF_F_TEST_SKB_CHECKSUM_COMPLETE) || + kattr->test.cpu || kattr->test.batch_size) return -EINVAL; data = bpf_test_init(kattr, kattr->test.data_size_in, @@ -1031,6 +1042,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); __skb_put(skb, size); + if (ctx && ctx->ifindex > 1) { dev = dev_get_by_index(net, ctx->ifindex); if (!dev) { @@ -1066,9 +1078,19 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, __skb_push(skb, hh_len); if (is_direct_pkt_access) bpf_compute_data_pointers(skb); + ret = convert___skb_to_skb(skb, ctx); if (ret) goto out; + + if (kattr->test.flags & BPF_F_TEST_SKB_CHECKSUM_COMPLETE) { + const int off = skb_network_offset(skb); + int len = skb->len - off; + + skb->csum = skb_checksum(skb, off, len, 0); + skb->ip_summed = CHECKSUM_COMPLETE; + } + ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false); if (ret) goto out; @@ -1083,6 +1105,20 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, } memset(__skb_push(skb, hh_len), 0, hh_len); } + + if (kattr->test.flags & BPF_F_TEST_SKB_CHECKSUM_COMPLETE) { + const int off = skb_network_offset(skb); + int len = skb->len - off; + __wsum csum; + + csum = skb_checksum(skb, off, len, 0); + + if (csum_fold(skb->csum) != csum_fold(csum)) { + ret = -EBADMSG; + goto out; + } + } + convert_skb_to___skb(skb, ctx); size = skb->len; diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index d97064d460dc..e19b583ff2c6 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -25,8 +25,8 @@ static inline int should_deliver(const struct net_bridge_port *p, vg = nbp_vlan_group_rcu(p); return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) && - p->state == BR_STATE_FORWARDING && br_allowed_egress(vg, skb) && - nbp_switchdev_allowed_egress(p, skb) && + (br_mst_is_enabled(p->br) || p->state == BR_STATE_FORWARDING) && + br_allowed_egress(vg, skb) && nbp_switchdev_allowed_egress(p, skb) && !br_skb_isolated(p, skb); } diff --git a/net/bridge/br_netfilter_hooks.c b/net/bridge/br_netfilter_hooks.c index bf30c50b5689..3c9f6538990e 100644 --- a/net/bridge/br_netfilter_hooks.c +++ b/net/bridge/br_netfilter_hooks.c @@ -137,6 +137,7 @@ static inline bool is_pppoe_ipv6(const struct sk_buff *skb, #define NF_BRIDGE_MAX_MAC_HEADER_LENGTH (PPPOE_SES_HLEN + ETH_HLEN) struct brnf_frag_data { + local_lock_t bh_lock; char mac[NF_BRIDGE_MAX_MAC_HEADER_LENGTH]; u8 encap_size; u8 size; @@ -144,7 +145,9 @@ struct brnf_frag_data { __be16 vlan_proto; }; -static DEFINE_PER_CPU(struct brnf_frag_data, brnf_frag_data_storage); +static DEFINE_PER_CPU(struct brnf_frag_data, brnf_frag_data_storage) = { + .bh_lock = INIT_LOCAL_LOCK(bh_lock), +}; static void nf_bridge_info_free(struct sk_buff *skb) { @@ -850,6 +853,7 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff { struct nf_bridge_info *nf_bridge = nf_bridge_info_get(skb); unsigned int mtu, mtu_reserved; + int ret; mtu_reserved = nf_bridge_mtu_reduction(skb); mtu = skb->dev->mtu; @@ -882,6 +886,7 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff IPCB(skb)->frag_max_size = nf_bridge->frag_max_size; + local_lock_nested_bh(&brnf_frag_data_storage.bh_lock); data = this_cpu_ptr(&brnf_frag_data_storage); if (skb_vlan_tag_present(skb)) { @@ -897,7 +902,9 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff skb_copy_from_linear_data_offset(skb, -data->size, data->mac, data->size); - return br_nf_ip_fragment(net, sk, skb, br_nf_push_frag_xmit); + ret = br_nf_ip_fragment(net, sk, skb, br_nf_push_frag_xmit); + local_unlock_nested_bh(&brnf_frag_data_storage.bh_lock); + return ret; } if (IS_ENABLED(CONFIG_NF_DEFRAG_IPV6) && skb->protocol == htons(ETH_P_IPV6)) { @@ -909,6 +916,7 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff IP6CB(skb)->frag_max_size = nf_bridge->frag_max_size; + local_lock_nested_bh(&brnf_frag_data_storage.bh_lock); data = this_cpu_ptr(&brnf_frag_data_storage); data->encap_size = nf_bridge_encap_header_len(skb); data->size = ETH_HLEN + data->encap_size; @@ -916,8 +924,12 @@ static int br_nf_dev_queue_xmit(struct net *net, struct sock *sk, struct sk_buff skb_copy_from_linear_data_offset(skb, -data->size, data->mac, data->size); - if (v6ops) - return v6ops->fragment(net, sk, skb, br_nf_push_frag_xmit); + if (v6ops) { + ret = v6ops->fragment(net, sk, skb, br_nf_push_frag_xmit); + local_unlock_nested_bh(&brnf_frag_data_storage.bh_lock); + return ret; + } + local_unlock_nested_bh(&brnf_frag_data_storage.bh_lock); kfree_skb(skb); return -EMSGSIZE; diff --git a/net/bridge/br_netlink_tunnel.c b/net/bridge/br_netlink_tunnel.c index 17abf092f7ca..71a12da30004 100644 --- a/net/bridge/br_netlink_tunnel.c +++ b/net/bridge/br_netlink_tunnel.c @@ -315,8 +315,8 @@ int br_process_vlan_tunnel_info(const struct net_bridge *br, if (curr_change) *changed = curr_change; - __vlan_tunnel_handle_range(p, &v_start, &v_end, v, - curr_change); + __vlan_tunnel_handle_range(p, &v_start, &v_end, v, + curr_change); } if (v_start && v_end) br_vlan_notify(br, p, v_start->vid, v_end->vid, diff --git a/net/bridge/netfilter/nf_conntrack_bridge.c b/net/bridge/netfilter/nf_conntrack_bridge.c index c3c51b9a6826..816bb0fde718 100644 --- a/net/bridge/netfilter/nf_conntrack_bridge.c +++ b/net/bridge/netfilter/nf_conntrack_bridge.c @@ -32,7 +32,7 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk, struct sk_buff *)) { int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size; - bool mono_delivery_time = skb->mono_delivery_time; + u8 tstamp_type = skb->tstamp_type; unsigned int hlen, ll_rs, mtu; ktime_t tstamp = skb->tstamp; struct ip_frag_state state; @@ -82,7 +82,7 @@ static int nf_br_ip_fragment(struct net *net, struct sock *sk, if (iter.frag) ip_fraglist_prepare(skb, &iter); - skb_set_delivery_time(skb, tstamp, mono_delivery_time); + skb_set_delivery_time(skb, tstamp, tstamp_type); err = output(net, sk, data, skb); if (err || !iter.frag) break; @@ -113,7 +113,7 @@ slow_path: goto blackhole; } - skb_set_delivery_time(skb2, tstamp, mono_delivery_time); + skb_set_delivery_time(skb2, tstamp, tstamp_type); err = output(net, sk, data, skb2); if (err) goto blackhole; diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c index 7796414d47e5..2ae8cfa3df88 100644 --- a/net/caif/cfpkt_skbuff.c +++ b/net/caif/cfpkt_skbuff.c @@ -21,13 +21,6 @@ do { \ pr_warn(errmsg); \ } while (0) -struct cfpktq { - struct sk_buff_head head; - atomic_t count; - /* Lock protects count updates */ - spinlock_t lock; -}; - /* * net/caif/ is generic and does not * understand SKB, so we do this typecast diff --git a/net/can/Kconfig b/net/can/Kconfig index cb56be8e3862..af64a6f76458 100644 --- a/net/can/Kconfig +++ b/net/can/Kconfig @@ -56,18 +56,17 @@ config CAN_GW source "net/can/j1939/Kconfig" config CAN_ISOTP - tristate "ISO 15765-2:2016 CAN transport protocol" + tristate "ISO 15765-2 CAN transport protocol" help CAN Transport Protocols offer support for segmented Point-to-Point communication between CAN nodes via two defined CAN Identifiers. + This protocol driver implements segmented data transfers for CAN CC + (aka Classical CAN, CAN 2.0B) and CAN FD frame types which were + introduced with ISO 15765-2:2016. As CAN frames can only transport a small amount of data bytes - (max. 8 bytes for 'classic' CAN and max. 64 bytes for CAN FD) this + (max. 8 bytes for CAN CC and max. 64 bytes for CAN FD) this segmentation is needed to transport longer Protocol Data Units (PDU) as needed e.g. for vehicle diagnosis (UDS, ISO 14229) or IP-over-CAN traffic. - This protocol driver implements data transfers according to - ISO 15765-2:2016 for 'classic' CAN and CAN FD frame types. - If you want to perform automotive vehicle diagnostic services (UDS), - say 'y'. endif diff --git a/net/can/isotp.c b/net/can/isotp.c index 25bac0fafc83..16046931542a 100644 --- a/net/can/isotp.c +++ b/net/can/isotp.c @@ -72,7 +72,7 @@ #include #include -MODULE_DESCRIPTION("PF_CAN isotp 15765-2:2016 protocol"); +MODULE_DESCRIPTION("PF_CAN ISO 15765-2 transport protocol"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Oliver Hartkopp "); MODULE_ALIAS("can-proto-6"); @@ -83,10 +83,11 @@ MODULE_ALIAS("can-proto-6"); (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \ (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG)) -/* ISO 15765-2:2016 supports more than 4095 byte per ISO PDU as the FF_DL can - * take full 32 bit values (4 Gbyte). We would need some good concept to handle - * this between user space and kernel space. For now set the static buffer to - * something about 8 kbyte to be able to test this new functionality. +/* Since ISO 15765-2:2016 the CAN isotp protocol supports more than 4095 + * byte per ISO PDU as the FF_DL can take full 32 bit values (4 Gbyte). + * We would need some good concept to handle this between user space and + * kernel space. For now set the static buffer to something about 8 kbyte + * to be able to test this new functionality. */ #define DEFAULT_MAX_PDU_SIZE 8300 diff --git a/net/core/datagram.c b/net/core/datagram.c index e72dd78471a6..a40f733b37d7 100644 --- a/net/core/datagram.c +++ b/net/core/datagram.c @@ -618,16 +618,10 @@ fault: } EXPORT_SYMBOL(skb_copy_datagram_from_iter); -int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk, - struct sk_buff *skb, struct iov_iter *from, - size_t length) +int zerocopy_fill_skb_from_iter(struct sk_buff *skb, + struct iov_iter *from, size_t length) { - int frag; - - if (msg && msg->msg_ubuf && msg->sg_from_iter) - return msg->sg_from_iter(sk, skb, from, length); - - frag = skb_shinfo(skb)->nr_frags; + int frag = skb_shinfo(skb)->nr_frags; while (length && iov_iter_count(from)) { struct page *head, *last_head = NULL; @@ -635,7 +629,6 @@ int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk, int refs, order, n = 0; size_t start; ssize_t copied; - unsigned long truesize; if (frag == MAX_SKB_FRAGS) return -EMSGSIZE; @@ -647,17 +640,9 @@ int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk, length -= copied; - truesize = PAGE_ALIGN(copied + start); skb->data_len += copied; skb->len += copied; - skb->truesize += truesize; - if (sk && sk->sk_type == SOCK_STREAM) { - sk_wmem_queued_add(sk, truesize); - if (!skb_zcopy_pure(skb)) - sk_mem_charge(sk, truesize); - } else { - refcount_add(truesize, &skb->sk->sk_wmem_alloc); - } + skb->truesize += PAGE_ALIGN(copied + start); head = compound_head(pages[n]); order = compound_order(head); @@ -700,6 +685,30 @@ int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk, } return 0; } + +int __zerocopy_sg_from_iter(struct msghdr *msg, struct sock *sk, + struct sk_buff *skb, struct iov_iter *from, + size_t length) +{ + unsigned long orig_size = skb->truesize; + unsigned long truesize; + int ret; + + if (msg && msg->msg_ubuf && msg->sg_from_iter) + ret = msg->sg_from_iter(skb, from, length); + else + ret = zerocopy_fill_skb_from_iter(skb, from, length); + + truesize = skb->truesize - orig_size; + if (sk && sk->sk_type == SOCK_STREAM) { + sk_wmem_queued_add(sk, truesize); + if (!skb_zcopy_pure(skb)) + sk_mem_charge(sk, truesize); + } else { + refcount_add(truesize, &skb->sk->sk_wmem_alloc); + } + return ret; +} EXPORT_SYMBOL(__zerocopy_sg_from_iter); /** diff --git a/net/core/dev.c b/net/core/dev.c index 2b4819b610b8..6ea1d20676fb 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -229,7 +229,7 @@ static inline void backlog_lock_irq_save(struct softnet_data *sd, { if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads()) spin_lock_irqsave(&sd->input_pkt_queue.lock, *flags); - else if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + else local_irq_save(*flags); } @@ -237,7 +237,7 @@ static inline void backlog_lock_irq_disable(struct softnet_data *sd) { if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads()) spin_lock_irq(&sd->input_pkt_queue.lock); - else if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + else local_irq_disable(); } @@ -246,7 +246,7 @@ static inline void backlog_unlock_irq_restore(struct softnet_data *sd, { if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads()) spin_unlock_irqrestore(&sd->input_pkt_queue.lock, *flags); - else if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + else local_irq_restore(*flags); } @@ -254,7 +254,7 @@ static inline void backlog_unlock_irq_enable(struct softnet_data *sd) { if (IS_ENABLED(CONFIG_RPS) || use_backlog_threads()) spin_unlock_irq(&sd->input_pkt_queue.lock); - else if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + else local_irq_enable(); } @@ -449,7 +449,9 @@ static RAW_NOTIFIER_HEAD(netdev_chain); * queue in the local softnet handler. */ -DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data); +DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data) = { + .process_queue_bh_lock = INIT_LOCAL_LOCK(process_queue_bh_lock), +}; EXPORT_PER_CPU_SYMBOL(softnet_data); /* Page_pool has a lockless array/stack to alloc/recycle pages. @@ -2160,7 +2162,7 @@ EXPORT_SYMBOL(net_disable_timestamp); static inline void net_timestamp_set(struct sk_buff *skb) { skb->tstamp = 0; - skb->mono_delivery_time = 0; + skb->tstamp_type = SKB_CLOCK_REALTIME; if (static_branch_unlikely(&netstamp_needed_key)) skb->tstamp = ktime_get_real(); } @@ -3940,6 +3942,7 @@ netdev_tx_queue_mapping(struct net_device *dev, struct sk_buff *skb) return netdev_get_tx_queue(dev, netdev_cap_txqueue(dev, qm)); } +#ifndef CONFIG_PREEMPT_RT static bool netdev_xmit_txqueue_skipped(void) { return __this_cpu_read(softnet_data.xmit.skip_txqueue); @@ -3950,6 +3953,19 @@ void netdev_xmit_skip_txqueue(bool skip) __this_cpu_write(softnet_data.xmit.skip_txqueue, skip); } EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue); + +#else +static bool netdev_xmit_txqueue_skipped(void) +{ + return current->net_xmit.skip_txqueue; +} + +void netdev_xmit_skip_txqueue(bool skip) +{ + current->net_xmit.skip_txqueue = skip; +} +EXPORT_SYMBOL_GPL(netdev_xmit_skip_txqueue); +#endif #endif /* CONFIG_NET_EGRESS */ #ifdef CONFIG_NET_XGRESS @@ -4029,10 +4045,13 @@ sch_handle_ingress(struct sk_buff *skb, struct packet_type **pt_prev, int *ret, { struct bpf_mprog_entry *entry = rcu_dereference_bh(skb->dev->tcx_ingress); enum skb_drop_reason drop_reason = SKB_DROP_REASON_TC_INGRESS; + struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx; int sch_ret; if (!entry) return skb; + + bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx); if (*pt_prev) { *ret = deliver_skb(skb, *pt_prev, orig_dev); *pt_prev = NULL; @@ -4061,10 +4080,12 @@ ingress_verdict: break; } *ret = NET_RX_SUCCESS; + bpf_net_ctx_clear(bpf_net_ctx); return NULL; case TC_ACT_SHOT: kfree_skb_reason(skb, drop_reason); *ret = NET_RX_DROP; + bpf_net_ctx_clear(bpf_net_ctx); return NULL; /* used by tc_run */ case TC_ACT_STOLEN: @@ -4074,8 +4095,10 @@ ingress_verdict: fallthrough; case TC_ACT_CONSUMED: *ret = NET_RX_SUCCESS; + bpf_net_ctx_clear(bpf_net_ctx); return NULL; } + bpf_net_ctx_clear(bpf_net_ctx); return skb; } @@ -4085,11 +4108,14 @@ sch_handle_egress(struct sk_buff *skb, int *ret, struct net_device *dev) { struct bpf_mprog_entry *entry = rcu_dereference_bh(dev->tcx_egress); enum skb_drop_reason drop_reason = SKB_DROP_REASON_TC_EGRESS; + struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx; int sch_ret; if (!entry) return skb; + bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx); + /* qdisc_skb_cb(skb)->pkt_len & tcx_set_ingress() was * already set by the caller. */ @@ -4105,10 +4131,12 @@ egress_verdict: /* No need to push/pop skb's mac_header here on egress! */ skb_do_redirect(skb); *ret = NET_XMIT_SUCCESS; + bpf_net_ctx_clear(bpf_net_ctx); return NULL; case TC_ACT_SHOT: kfree_skb_reason(skb, drop_reason); *ret = NET_XMIT_DROP; + bpf_net_ctx_clear(bpf_net_ctx); return NULL; /* used by tc_run */ case TC_ACT_STOLEN: @@ -4118,8 +4146,10 @@ egress_verdict: fallthrough; case TC_ACT_CONSUMED: *ret = NET_XMIT_SUCCESS; + bpf_net_ctx_clear(bpf_net_ctx); return NULL; } + bpf_net_ctx_clear(bpf_net_ctx); return skb; } @@ -5096,11 +5126,14 @@ static DEFINE_STATIC_KEY_FALSE(generic_xdp_needed_key); int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff **pskb) { + struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx; + if (xdp_prog) { struct xdp_buff xdp; u32 act; int err; + bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx); act = netif_receive_generic_xdp(pskb, &xdp, xdp_prog); if (act != XDP_PASS) { switch (act) { @@ -5114,11 +5147,13 @@ int do_xdp_generic(struct bpf_prog *xdp_prog, struct sk_buff **pskb) generic_xdp_tx(*pskb, xdp_prog); break; } + bpf_net_ctx_clear(bpf_net_ctx); return XDP_DROP; } } return XDP_PASS; out_redir: + bpf_net_ctx_clear(bpf_net_ctx); kfree_skb_reason(*pskb, SKB_DROP_REASON_XDP); return XDP_DROP; } @@ -5234,7 +5269,7 @@ static __latent_entropy void net_tx_action(struct softirq_action *h) trace_consume_skb(skb, net_tx_action); else trace_kfree_skb(skb, net_tx_action, - get_kfree_skb_cb(skb)->reason); + get_kfree_skb_cb(skb)->reason, NULL); if (skb->fclone != SKB_FCLONE_UNAVAILABLE) __kfree_skb(skb); @@ -5935,6 +5970,7 @@ static void flush_backlog(struct work_struct *work) } backlog_unlock_irq_enable(sd); + local_lock_nested_bh(&softnet_data.process_queue_bh_lock); skb_queue_walk_safe(&sd->process_queue, skb, tmp) { if (skb->dev->reg_state == NETREG_UNREGISTERING) { __skb_unlink(skb, &sd->process_queue); @@ -5942,6 +5978,7 @@ static void flush_backlog(struct work_struct *work) rps_input_queue_head_incr(sd); } } + local_unlock_nested_bh(&softnet_data.process_queue_bh_lock); local_bh_enable(); } @@ -6063,7 +6100,9 @@ static int process_backlog(struct napi_struct *napi, int quota) while (again) { struct sk_buff *skb; + local_lock_nested_bh(&softnet_data.process_queue_bh_lock); while ((skb = __skb_dequeue(&sd->process_queue))) { + local_unlock_nested_bh(&softnet_data.process_queue_bh_lock); rcu_read_lock(); __netif_receive_skb(skb); rcu_read_unlock(); @@ -6072,7 +6111,9 @@ static int process_backlog(struct napi_struct *napi, int quota) return work; } + local_lock_nested_bh(&softnet_data.process_queue_bh_lock); } + local_unlock_nested_bh(&softnet_data.process_queue_bh_lock); backlog_lock_irq_disable(sd); if (skb_queue_empty(&sd->input_pkt_queue)) { @@ -6087,8 +6128,10 @@ static int process_backlog(struct napi_struct *napi, int quota) napi->state &= NAPIF_STATE_THREADED; again = false; } else { + local_lock_nested_bh(&softnet_data.process_queue_bh_lock); skb_queue_splice_tail_init(&sd->input_pkt_queue, &sd->process_queue); + local_unlock_nested_bh(&softnet_data.process_queue_bh_lock); } backlog_unlock_irq_enable(sd); } @@ -6301,6 +6344,7 @@ enum { static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock, unsigned flags, u16 budget) { + struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx; bool skip_schedule = false; unsigned long timeout; int rc; @@ -6318,6 +6362,7 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock, clear_bit(NAPI_STATE_IN_BUSY_POLL, &napi->state); local_bh_disable(); + bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx); if (flags & NAPI_F_PREFER_BUSY_POLL) { napi->defer_hard_irqs_count = READ_ONCE(napi->dev->napi_defer_hard_irqs); @@ -6340,6 +6385,7 @@ static void busy_poll_stop(struct napi_struct *napi, void *have_poll_lock, netpoll_poll_unlock(have_poll_lock); if (rc == budget) __busy_poll_stop(napi, skip_schedule); + bpf_net_ctx_clear(bpf_net_ctx); local_bh_enable(); } @@ -6349,6 +6395,7 @@ static void __napi_busy_loop(unsigned int napi_id, { unsigned long start_time = loop_end ? busy_loop_current_time() : 0; int (*napi_poll)(struct napi_struct *napi, int budget); + struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx; void *have_poll_lock = NULL; struct napi_struct *napi; @@ -6367,6 +6414,7 @@ restart: int work = 0; local_bh_disable(); + bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx); if (!napi_poll) { unsigned long val = READ_ONCE(napi->state); @@ -6397,6 +6445,7 @@ count: __NET_ADD_STATS(dev_net(napi->dev), LINUX_MIB_BUSYPOLLRXPACKETS, work); skb_defer_free_flush(this_cpu_ptr(&softnet_data)); + bpf_net_ctx_clear(bpf_net_ctx); local_bh_enable(); if (!loop_end || loop_end(loop_end_arg, start_time)) @@ -6824,6 +6873,7 @@ static int napi_thread_wait(struct napi_struct *napi) static void napi_threaded_poll_loop(struct napi_struct *napi) { + struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx; struct softnet_data *sd; unsigned long last_qs = jiffies; @@ -6832,6 +6882,8 @@ static void napi_threaded_poll_loop(struct napi_struct *napi) void *have; local_bh_disable(); + bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx); + sd = this_cpu_ptr(&softnet_data); sd->in_napi_threaded_poll = true; @@ -6847,6 +6899,7 @@ static void napi_threaded_poll_loop(struct napi_struct *napi) net_rps_action_and_irq_enable(sd); } skb_defer_free_flush(sd); + bpf_net_ctx_clear(bpf_net_ctx); local_bh_enable(); if (!repoll) @@ -6872,10 +6925,12 @@ static __latent_entropy void net_rx_action(struct softirq_action *h) struct softnet_data *sd = this_cpu_ptr(&softnet_data); unsigned long time_limit = jiffies + usecs_to_jiffies(READ_ONCE(net_hotdata.netdev_budget_usecs)); + struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx; int budget = READ_ONCE(net_hotdata.netdev_budget); LIST_HEAD(list); LIST_HEAD(repoll); + bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx); start: sd->in_net_rx_action = true; local_irq_disable(); @@ -6928,7 +6983,8 @@ start: sd->in_net_rx_action = false; net_rps_action_and_irq_enable(sd); -end:; +end: + bpf_net_ctx_clear(bpf_net_ctx); } struct netdev_adjacent { @@ -10285,6 +10341,10 @@ int register_netdevice(struct net_device *dev) if (ret) return ret; + /* rss ctx ID 0 is reserved for the default context, start from 1 */ + xa_init_flags(&dev->ethtool->rss_ctx, XA_FLAGS_ALLOC1); + mutex_init(&dev->ethtool->rss_lock); + spin_lock_init(&dev->addr_list_lock); netdev_set_addr_lockdep_class(dev); @@ -10703,6 +10763,54 @@ void netdev_run_todo(void) wake_up(&netdev_unregistering_wq); } +/* Collate per-cpu network dstats statistics + * + * Read per-cpu network statistics from dev->dstats and populate the related + * fields in @s. + */ +static void dev_fetch_dstats(struct rtnl_link_stats64 *s, + const struct pcpu_dstats __percpu *dstats) +{ + int cpu; + + for_each_possible_cpu(cpu) { + u64 rx_packets, rx_bytes, rx_drops; + u64 tx_packets, tx_bytes, tx_drops; + const struct pcpu_dstats *stats; + unsigned int start; + + stats = per_cpu_ptr(dstats, cpu); + do { + start = u64_stats_fetch_begin(&stats->syncp); + rx_packets = u64_stats_read(&stats->rx_packets); + rx_bytes = u64_stats_read(&stats->rx_bytes); + rx_drops = u64_stats_read(&stats->rx_drops); + tx_packets = u64_stats_read(&stats->tx_packets); + tx_bytes = u64_stats_read(&stats->tx_bytes); + tx_drops = u64_stats_read(&stats->tx_drops); + } while (u64_stats_fetch_retry(&stats->syncp, start)); + + s->rx_packets += rx_packets; + s->rx_bytes += rx_bytes; + s->rx_dropped += rx_drops; + s->tx_packets += tx_packets; + s->tx_bytes += tx_bytes; + s->tx_dropped += tx_drops; + } +} + +/* ndo_get_stats64 implementation for dtstats-based accounting. + * + * Populate @s from dev->stats and dev->dstats. This is used internally by the + * core for NETDEV_PCPU_STAT_DSTAT-type stats collection. + */ +static void dev_get_dstats64(const struct net_device *dev, + struct rtnl_link_stats64 *s) +{ + netdev_stats_to_stats64(s, &dev->stats); + dev_fetch_dstats(s, dev->dstats); +} + /* Convert net_device_stats to rtnl_link_stats64. rtnl_link_stats64 has * all the same fields in the same order as net_device_stats, with only * the type differing, but rtnl_link_stats64 may have additional fields @@ -10779,6 +10887,8 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev)); } else if (dev->pcpu_stat_type == NETDEV_PCPU_STAT_TSTATS) { dev_get_tstats64(dev, storage); + } else if (dev->pcpu_stat_type == NETDEV_PCPU_STAT_DSTATS) { + dev_get_dstats64(dev, storage); } else { netdev_stats_to_stats64(storage, &dev->stats); } @@ -10896,13 +11006,6 @@ void netdev_sw_irq_coalesce_default_on(struct net_device *dev) } EXPORT_SYMBOL_GPL(netdev_sw_irq_coalesce_default_on); -void netdev_freemem(struct net_device *dev) -{ - char *addr = (char *)dev - dev->padded; - - kvfree(addr); -} - /** * alloc_netdev_mqs - allocate network device * @sizeof_priv: size of private data to allocate space for @@ -10922,8 +11025,6 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, unsigned int txqs, unsigned int rxqs) { struct net_device *dev; - unsigned int alloc_size; - struct net_device *p; BUG_ON(strlen(name) >= sizeof(dev->name)); @@ -10937,21 +11038,12 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, return NULL; } - alloc_size = sizeof(struct net_device); - if (sizeof_priv) { - /* ensure 32-byte alignment of private area */ - alloc_size = ALIGN(alloc_size, NETDEV_ALIGN); - alloc_size += sizeof_priv; - } - /* ensure 32-byte alignment of whole construct */ - alloc_size += NETDEV_ALIGN - 1; - - p = kvzalloc(alloc_size, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL); - if (!p) + dev = kvzalloc(struct_size(dev, priv, sizeof_priv), + GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL); + if (!dev) return NULL; - dev = PTR_ALIGN(p, NETDEV_ALIGN); - dev->padded = (char *)dev - (char *)p; + dev->priv_len = sizeof_priv; ref_tracker_dir_init(&dev->refcnt_tracker, 128, name); #ifdef CONFIG_PCPU_DEV_REFCNT @@ -11015,6 +11107,9 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name, dev->real_num_rx_queues = rxqs; if (netif_alloc_rx_queues(dev)) goto free_all; + dev->ethtool = kzalloc(sizeof(*dev->ethtool), GFP_KERNEL_ACCOUNT); + if (!dev->ethtool) + goto free_all; strcpy(dev->name, name); dev->name_assign_type = name_assign_type; @@ -11035,7 +11130,7 @@ free_pcpu: free_percpu(dev->pcpu_refcnt); free_dev: #endif - netdev_freemem(dev); + kvfree(dev); return NULL; } EXPORT_SYMBOL(alloc_netdev_mqs); @@ -11065,6 +11160,7 @@ void free_netdev(struct net_device *dev) return; } + kfree(dev->ethtool); netif_free_tx_queues(dev); netif_free_rx_queues(dev); @@ -11089,7 +11185,7 @@ void free_netdev(struct net_device *dev) /* Compatibility with error handling in drivers */ if (dev->reg_state == NETREG_UNINITIALIZED || dev->reg_state == NETREG_DUMMY) { - netdev_freemem(dev); + kvfree(dev); return; } @@ -11130,6 +11226,34 @@ void synchronize_net(void) } EXPORT_SYMBOL(synchronize_net); +static void netdev_rss_contexts_free(struct net_device *dev) +{ + struct ethtool_rxfh_context *ctx; + unsigned long context; + + mutex_lock(&dev->ethtool->rss_lock); + xa_for_each(&dev->ethtool->rss_ctx, context, ctx) { + struct ethtool_rxfh_param rxfh; + + rxfh.indir = ethtool_rxfh_context_indir(ctx); + rxfh.key = ethtool_rxfh_context_key(ctx); + rxfh.hfunc = ctx->hfunc; + rxfh.input_xfrm = ctx->input_xfrm; + rxfh.rss_context = context; + rxfh.rss_delete = true; + + xa_erase(&dev->ethtool->rss_ctx, context); + if (dev->ethtool_ops->create_rxfh_context) + dev->ethtool_ops->remove_rxfh_context(dev, ctx, + context, NULL); + else + dev->ethtool_ops->set_rxfh(dev, &rxfh, NULL); + kfree(ctx); + } + xa_destroy(&dev->ethtool->rss_ctx); + mutex_unlock(&dev->ethtool->rss_lock); +} + /** * unregister_netdevice_queue - remove device from the kernel * @dev: device @@ -11233,11 +11357,15 @@ void unregister_netdevice_many_notify(struct list_head *head, netdev_name_node_alt_flush(dev); netdev_name_node_free(dev->name_node); + netdev_rss_contexts_free(dev); + call_netdevice_notifiers(NETDEV_PRE_UNINIT, dev); if (dev->netdev_ops->ndo_uninit) dev->netdev_ops->ndo_uninit(dev); + mutex_destroy(&dev->ethtool->rss_lock); + if (skb) rtmsg_ifinfo_send(skb, dev, GFP_KERNEL, portid, nlh); diff --git a/net/core/dev.h b/net/core/dev.h index b7b518bc2be5..5654325c5b71 100644 --- a/net/core/dev.h +++ b/net/core/dev.h @@ -150,6 +150,8 @@ struct napi_struct *napi_by_id(unsigned int napi_id); void kick_defer_list_purge(struct softnet_data *sd, unsigned int cpu); #define XMIT_RECURSION_LIMIT 8 + +#ifndef CONFIG_PREEMPT_RT static inline bool dev_xmit_recursion(void) { return unlikely(__this_cpu_read(softnet_data.xmit.recursion) > @@ -165,5 +167,25 @@ static inline void dev_xmit_recursion_dec(void) { __this_cpu_dec(softnet_data.xmit.recursion); } +#else +static inline bool dev_xmit_recursion(void) +{ + return unlikely(current->net_xmit.recursion > XMIT_RECURSION_LIMIT); +} + +static inline void dev_xmit_recursion_inc(void) +{ + current->net_xmit.recursion++; +} + +static inline void dev_xmit_recursion_dec(void) +{ + current->net_xmit.recursion--; +} +#endif + +int dev_set_hwtstamp_phylib(struct net_device *dev, + struct kernel_hwtstamp_config *cfg, + struct netlink_ext_ack *extack); #endif diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c index 9a66cf5015f2..8592c052c0f4 100644 --- a/net/core/dev_ioctl.c +++ b/net/core/dev_ioctl.c @@ -259,9 +259,7 @@ static int dev_eth_ioctl(struct net_device *dev, * @dev: Network device * @cfg: Timestamping configuration structure * - * Helper for enforcing a common policy that phylib timestamping, if available, - * should take precedence in front of hardware timestamping provided by the - * netdev. + * Helper for calling the default hardware provider timestamping. * * Note: phy_mii_ioctl() only handles SIOCSHWTSTAMP (not SIOCGHWTSTAMP), and * there only exists a phydev->mii_ts->hwtstamp() method. So this will return @@ -271,7 +269,7 @@ static int dev_eth_ioctl(struct net_device *dev, static int dev_get_hwtstamp_phylib(struct net_device *dev, struct kernel_hwtstamp_config *cfg) { - if (phy_has_hwtstamp(dev->phydev)) + if (phy_is_default_hwtstamp(dev->phydev)) return phy_hwtstamp_get(dev->phydev, cfg); return dev->netdev_ops->ndo_hwtstamp_get(dev, cfg); @@ -327,7 +325,7 @@ int dev_set_hwtstamp_phylib(struct net_device *dev, struct netlink_ext_ack *extack) { const struct net_device_ops *ops = dev->netdev_ops; - bool phy_ts = phy_has_hwtstamp(dev->phydev); + bool phy_ts = phy_is_default_hwtstamp(dev->phydev); struct kernel_hwtstamp_config old_cfg = {}; bool changed = false; int err; @@ -363,7 +361,6 @@ int dev_set_hwtstamp_phylib(struct net_device *dev, return 0; } -EXPORT_SYMBOL_GPL(dev_set_hwtstamp_phylib); static int dev_set_hwtstamp(struct net_device *dev, struct ifreq *ifr) { diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index 430ed18f8584..2e0ae3328232 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -109,7 +109,8 @@ static u32 net_dm_queue_len = 1000; struct net_dm_alert_ops { void (*kfree_skb_probe)(void *ignore, struct sk_buff *skb, void *location, - enum skb_drop_reason reason); + enum skb_drop_reason reason, + struct sock *rx_sk); void (*napi_poll_probe)(void *ignore, struct napi_struct *napi, int work, int budget); void (*work_item_func)(struct work_struct *work); @@ -264,7 +265,8 @@ out: static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location, - enum skb_drop_reason reason) + enum skb_drop_reason reason, + struct sock *rx_sk) { trace_drop_common(skb, location); } @@ -491,7 +493,8 @@ static const struct net_dm_alert_ops net_dm_alert_summary_ops = { static void net_dm_packet_trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location, - enum skb_drop_reason reason) + enum skb_drop_reason reason, + struct sock *rx_sk) { ktime_t tstamp = ktime_get_real(); struct per_cpu_dm_data *data; diff --git a/net/core/filter.c b/net/core/filter.c index 9933851c685e..4cf1d34f7617 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -1658,9 +1658,12 @@ struct bpf_scratchpad { __be32 diff[MAX_BPF_STACK / sizeof(__be32)]; u8 buff[MAX_BPF_STACK]; }; + local_lock_t bh_lock; }; -static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp); +static DEFINE_PER_CPU(struct bpf_scratchpad, bpf_sp) = { + .bh_lock = INIT_LOCAL_LOCK(bh_lock), +}; static inline int __bpf_try_make_writable(struct sk_buff *skb, unsigned int write_len) @@ -2021,6 +2024,7 @@ BPF_CALL_5(bpf_csum_diff, __be32 *, from, u32, from_size, struct bpf_scratchpad *sp = this_cpu_ptr(&bpf_sp); u32 diff_size = from_size + to_size; int i, j = 0; + __wsum ret; /* This is quite flexible, some examples: * @@ -2034,12 +2038,15 @@ BPF_CALL_5(bpf_csum_diff, __be32 *, from, u32, from_size, diff_size > sizeof(sp->diff))) return -EINVAL; + local_lock_nested_bh(&bpf_sp.bh_lock); for (i = 0; i < from_size / sizeof(__be32); i++, j++) sp->diff[j] = ~from[i]; for (i = 0; i < to_size / sizeof(__be32); i++, j++) sp->diff[j] = to[i]; - return csum_partial(sp->diff, diff_size, seed); + ret = csum_partial(sp->diff, diff_size, seed); + local_unlock_nested_bh(&bpf_sp.bh_lock); + return ret; } static const struct bpf_func_proto bpf_csum_diff_proto = { @@ -2279,12 +2286,12 @@ static int __bpf_redirect_neigh_v6(struct sk_buff *skb, struct net_device *dev, err = bpf_out_neigh_v6(net, skb, dev, nh); if (unlikely(net_xmit_eval(err))) - dev->stats.tx_errors++; + DEV_STATS_INC(dev, tx_errors); else ret = NET_XMIT_SUCCESS; goto out_xmit; out_drop: - dev->stats.tx_errors++; + DEV_STATS_INC(dev, tx_errors); kfree_skb(skb); out_xmit: return ret; @@ -2385,12 +2392,12 @@ static int __bpf_redirect_neigh_v4(struct sk_buff *skb, struct net_device *dev, err = bpf_out_neigh_v4(net, skb, dev, nh); if (unlikely(net_xmit_eval(err))) - dev->stats.tx_errors++; + DEV_STATS_INC(dev, tx_errors); else ret = NET_XMIT_SUCCESS; goto out_xmit; out_drop: - dev->stats.tx_errors++; + DEV_STATS_INC(dev, tx_errors); kfree_skb(skb); out_xmit: return ret; @@ -2476,9 +2483,6 @@ static const struct bpf_func_proto bpf_clone_redirect_proto = { .arg3_type = ARG_ANYTHING, }; -DEFINE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info); -EXPORT_PER_CPU_SYMBOL_GPL(bpf_redirect_info); - static struct net_device *skb_get_peer_dev(struct net_device *dev) { const struct net_device_ops *ops = dev->netdev_ops; @@ -2491,7 +2495,7 @@ static struct net_device *skb_get_peer_dev(struct net_device *dev) int skb_do_redirect(struct sk_buff *skb) { - struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); + struct bpf_redirect_info *ri = bpf_net_ctx_get_ri(); struct net *net = dev_net(skb->dev); struct net_device *dev; u32 flags = ri->flags; @@ -2524,7 +2528,7 @@ out_drop: BPF_CALL_2(bpf_redirect, u32, ifindex, u64, flags) { - struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); + struct bpf_redirect_info *ri = bpf_net_ctx_get_ri(); if (unlikely(flags & (~(BPF_F_INGRESS) | BPF_F_REDIRECT_INTERNAL))) return TC_ACT_SHOT; @@ -2545,7 +2549,7 @@ static const struct bpf_func_proto bpf_redirect_proto = { BPF_CALL_2(bpf_redirect_peer, u32, ifindex, u64, flags) { - struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); + struct bpf_redirect_info *ri = bpf_net_ctx_get_ri(); if (unlikely(flags)) return TC_ACT_SHOT; @@ -2567,7 +2571,7 @@ static const struct bpf_func_proto bpf_redirect_peer_proto = { BPF_CALL_4(bpf_redirect_neigh, u32, ifindex, struct bpf_redir_neigh *, params, int, plen, u64, flags) { - struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); + struct bpf_redirect_info *ri = bpf_net_ctx_get_ri(); if (unlikely((plen && plen < sizeof(*params)) || flags)) return TC_ACT_SHOT; @@ -4273,50 +4277,50 @@ static const struct bpf_func_proto bpf_xdp_adjust_meta_proto = { */ void xdp_do_flush(void) { - __dev_flush(); - __cpu_map_flush(); - __xsk_map_flush(); + struct list_head *lh_map, *lh_dev, *lh_xsk; + + bpf_net_ctx_get_all_used_flush_lists(&lh_map, &lh_dev, &lh_xsk); + if (lh_dev) + __dev_flush(lh_dev); + if (lh_map) + __cpu_map_flush(lh_map); + if (lh_xsk) + __xsk_map_flush(lh_xsk); } EXPORT_SYMBOL_GPL(xdp_do_flush); #if defined(CONFIG_DEBUG_NET) && defined(CONFIG_BPF_SYSCALL) void xdp_do_check_flushed(struct napi_struct *napi) { - bool ret; + struct list_head *lh_map, *lh_dev, *lh_xsk; + bool missed = false; - ret = dev_check_flush(); - ret |= cpu_map_check_flush(); - ret |= xsk_map_check_flush(); + bpf_net_ctx_get_all_used_flush_lists(&lh_map, &lh_dev, &lh_xsk); + if (lh_dev) { + __dev_flush(lh_dev); + missed = true; + } + if (lh_map) { + __cpu_map_flush(lh_map); + missed = true; + } + if (lh_xsk) { + __xsk_map_flush(lh_xsk); + missed = true; + } - WARN_ONCE(ret, "Missing xdp_do_flush() invocation after NAPI by %ps\n", + WARN_ONCE(missed, "Missing xdp_do_flush() invocation after NAPI by %ps\n", napi->poll); } #endif -void bpf_clear_redirect_map(struct bpf_map *map) -{ - struct bpf_redirect_info *ri; - int cpu; - - for_each_possible_cpu(cpu) { - ri = per_cpu_ptr(&bpf_redirect_info, cpu); - /* Avoid polluting remote cacheline due to writes if - * not needed. Once we pass this test, we need the - * cmpxchg() to make sure it hasn't been changed in - * the meantime by remote CPU. - */ - if (unlikely(READ_ONCE(ri->map) == map)) - cmpxchg(&ri->map, map, NULL); - } -} - DEFINE_STATIC_KEY_FALSE(bpf_master_redirect_enabled_key); EXPORT_SYMBOL_GPL(bpf_master_redirect_enabled_key); u32 xdp_master_redirect(struct xdp_buff *xdp) { + struct bpf_redirect_info *ri = bpf_net_ctx_get_ri(); struct net_device *master, *slave; - struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); master = netdev_master_upper_dev_get_rcu(xdp->rxq->dev); slave = master->netdev_ops->ndo_xdp_get_xmit_slave(master, xdp); @@ -4388,7 +4392,7 @@ static __always_inline int __xdp_do_redirect_frame(struct bpf_redirect_info *ri, map = READ_ONCE(ri->map); /* The map pointer is cleared when the map is being torn - * down by bpf_clear_redirect_map() + * down by dev_map_free() */ if (unlikely(!map)) { err = -ENOENT; @@ -4433,7 +4437,7 @@ err: int xdp_do_redirect(struct net_device *dev, struct xdp_buff *xdp, struct bpf_prog *xdp_prog) { - struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); + struct bpf_redirect_info *ri = bpf_net_ctx_get_ri(); enum bpf_map_type map_type = ri->map_type; if (map_type == BPF_MAP_TYPE_XSKMAP) @@ -4447,7 +4451,7 @@ EXPORT_SYMBOL_GPL(xdp_do_redirect); int xdp_do_redirect_frame(struct net_device *dev, struct xdp_buff *xdp, struct xdp_frame *xdpf, struct bpf_prog *xdp_prog) { - struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); + struct bpf_redirect_info *ri = bpf_net_ctx_get_ri(); enum bpf_map_type map_type = ri->map_type; if (map_type == BPF_MAP_TYPE_XSKMAP) @@ -4464,7 +4468,7 @@ static int xdp_do_generic_redirect_map(struct net_device *dev, enum bpf_map_type map_type, u32 map_id, u32 flags) { - struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); + struct bpf_redirect_info *ri = bpf_net_ctx_get_ri(); struct bpf_map *map; int err; @@ -4476,7 +4480,7 @@ static int xdp_do_generic_redirect_map(struct net_device *dev, map = READ_ONCE(ri->map); /* The map pointer is cleared when the map is being torn - * down by bpf_clear_redirect_map() + * down by dev_map_free() */ if (unlikely(!map)) { err = -ENOENT; @@ -4518,7 +4522,7 @@ err: int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb, struct xdp_buff *xdp, struct bpf_prog *xdp_prog) { - struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); + struct bpf_redirect_info *ri = bpf_net_ctx_get_ri(); enum bpf_map_type map_type = ri->map_type; void *fwd = ri->tgt_value; u32 map_id = ri->map_id; @@ -4554,7 +4558,7 @@ err: BPF_CALL_2(bpf_xdp_redirect, u32, ifindex, u64, flags) { - struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); + struct bpf_redirect_info *ri = bpf_net_ctx_get_ri(); if (unlikely(flags)) return XDP_ABORTED; @@ -6455,6 +6459,7 @@ BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset, void *srh_tlvs, *srh_end, *ptr; int srhoff = 0; + lockdep_assert_held(&srh_state->bh_lock); if (srh == NULL) return -EINVAL; @@ -6511,6 +6516,7 @@ BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb, int hdroff = 0; int err; + lockdep_assert_held(&srh_state->bh_lock); switch (action) { case SEG6_LOCAL_ACTION_END_X: if (!seg6_bpf_has_valid_srh(skb)) @@ -6587,6 +6593,7 @@ BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset, int srhoff = 0; int ret; + lockdep_assert_held(&srh_state->bh_lock); if (unlikely(srh == NULL)) return -EINVAL; @@ -6820,7 +6827,7 @@ static const struct bpf_func_proto bpf_skc_lookup_tcp_proto = { .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, - .arg3_type = ARG_CONST_SIZE, + .arg3_type = ARG_CONST_SIZE_OR_ZERO, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; @@ -6839,7 +6846,7 @@ static const struct bpf_func_proto bpf_sk_lookup_tcp_proto = { .ret_type = RET_PTR_TO_SOCKET_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, - .arg3_type = ARG_CONST_SIZE, + .arg3_type = ARG_CONST_SIZE_OR_ZERO, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; @@ -6858,7 +6865,7 @@ static const struct bpf_func_proto bpf_sk_lookup_udp_proto = { .ret_type = RET_PTR_TO_SOCKET_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, - .arg3_type = ARG_CONST_SIZE, + .arg3_type = ARG_CONST_SIZE_OR_ZERO, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; @@ -6882,7 +6889,7 @@ static const struct bpf_func_proto bpf_tc_skc_lookup_tcp_proto = { .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, - .arg3_type = ARG_CONST_SIZE, + .arg3_type = ARG_CONST_SIZE_OR_ZERO, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; @@ -6906,7 +6913,7 @@ static const struct bpf_func_proto bpf_tc_sk_lookup_tcp_proto = { .ret_type = RET_PTR_TO_SOCKET_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, - .arg3_type = ARG_CONST_SIZE, + .arg3_type = ARG_CONST_SIZE_OR_ZERO, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; @@ -6930,7 +6937,7 @@ static const struct bpf_func_proto bpf_tc_sk_lookup_udp_proto = { .ret_type = RET_PTR_TO_SOCKET_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, - .arg3_type = ARG_CONST_SIZE, + .arg3_type = ARG_CONST_SIZE_OR_ZERO, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; @@ -6968,7 +6975,7 @@ static const struct bpf_func_proto bpf_xdp_sk_lookup_udp_proto = { .ret_type = RET_PTR_TO_SOCKET_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, - .arg3_type = ARG_CONST_SIZE, + .arg3_type = ARG_CONST_SIZE_OR_ZERO, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; @@ -6992,7 +6999,7 @@ static const struct bpf_func_proto bpf_xdp_skc_lookup_tcp_proto = { .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, - .arg3_type = ARG_CONST_SIZE, + .arg3_type = ARG_CONST_SIZE_OR_ZERO, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; @@ -7016,7 +7023,7 @@ static const struct bpf_func_proto bpf_xdp_sk_lookup_tcp_proto = { .ret_type = RET_PTR_TO_SOCKET_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, - .arg3_type = ARG_CONST_SIZE, + .arg3_type = ARG_CONST_SIZE_OR_ZERO, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; @@ -7036,7 +7043,7 @@ static const struct bpf_func_proto bpf_sock_addr_skc_lookup_tcp_proto = { .ret_type = RET_PTR_TO_SOCK_COMMON_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, - .arg3_type = ARG_CONST_SIZE, + .arg3_type = ARG_CONST_SIZE_OR_ZERO, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; @@ -7055,7 +7062,7 @@ static const struct bpf_func_proto bpf_sock_addr_sk_lookup_tcp_proto = { .ret_type = RET_PTR_TO_SOCKET_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, - .arg3_type = ARG_CONST_SIZE, + .arg3_type = ARG_CONST_SIZE_OR_ZERO, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; @@ -7074,7 +7081,7 @@ static const struct bpf_func_proto bpf_sock_addr_sk_lookup_udp_proto = { .ret_type = RET_PTR_TO_SOCKET_OR_NULL, .arg1_type = ARG_PTR_TO_CTX, .arg2_type = ARG_PTR_TO_MEM | MEM_RDONLY, - .arg3_type = ARG_CONST_SIZE, + .arg3_type = ARG_CONST_SIZE_OR_ZERO, .arg4_type = ARG_ANYTHING, .arg5_type = ARG_ANYTHING, }; @@ -7731,17 +7738,21 @@ BPF_CALL_3(bpf_skb_set_tstamp, struct sk_buff *, skb, return -EOPNOTSUPP; switch (tstamp_type) { - case BPF_SKB_TSTAMP_DELIVERY_MONO: + case BPF_SKB_CLOCK_REALTIME: + skb->tstamp = tstamp; + skb->tstamp_type = SKB_CLOCK_REALTIME; + break; + case BPF_SKB_CLOCK_MONOTONIC: if (!tstamp) return -EINVAL; skb->tstamp = tstamp; - skb->mono_delivery_time = 1; + skb->tstamp_type = SKB_CLOCK_MONOTONIC; break; - case BPF_SKB_TSTAMP_UNSPEC: - if (tstamp) + case BPF_SKB_CLOCK_TAI: + if (!tstamp) return -EINVAL; - skb->tstamp = 0; - skb->mono_delivery_time = 0; + skb->tstamp = tstamp; + skb->tstamp_type = SKB_CLOCK_TAI; break; default: return -EINVAL; @@ -9392,16 +9403,17 @@ static struct bpf_insn *bpf_convert_tstamp_type_read(const struct bpf_insn *si, { __u8 value_reg = si->dst_reg; __u8 skb_reg = si->src_reg; - /* AX is needed because src_reg and dst_reg could be the same */ - __u8 tmp_reg = BPF_REG_AX; - - *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, - SKB_BF_MONO_TC_OFFSET); - *insn++ = BPF_JMP32_IMM(BPF_JSET, tmp_reg, - SKB_MONO_DELIVERY_TIME_MASK, 2); - *insn++ = BPF_MOV32_IMM(value_reg, BPF_SKB_TSTAMP_UNSPEC); - *insn++ = BPF_JMP_A(1); - *insn++ = BPF_MOV32_IMM(value_reg, BPF_SKB_TSTAMP_DELIVERY_MONO); + BUILD_BUG_ON(__SKB_CLOCK_MAX != (int)BPF_SKB_CLOCK_TAI); + BUILD_BUG_ON(SKB_CLOCK_REALTIME != (int)BPF_SKB_CLOCK_REALTIME); + BUILD_BUG_ON(SKB_CLOCK_MONOTONIC != (int)BPF_SKB_CLOCK_MONOTONIC); + BUILD_BUG_ON(SKB_CLOCK_TAI != (int)BPF_SKB_CLOCK_TAI); + *insn++ = BPF_LDX_MEM(BPF_B, value_reg, skb_reg, SKB_BF_MONO_TC_OFFSET); + *insn++ = BPF_ALU32_IMM(BPF_AND, value_reg, SKB_TSTAMP_TYPE_MASK); +#ifdef __BIG_ENDIAN_BITFIELD + *insn++ = BPF_ALU32_IMM(BPF_RSH, value_reg, SKB_TSTAMP_TYPE_RSHIFT); +#else + BUILD_BUG_ON(!(SKB_TSTAMP_TYPE_MASK & 0x1)); +#endif return insn; } @@ -9444,11 +9456,12 @@ static struct bpf_insn *bpf_convert_tstamp_read(const struct bpf_prog *prog, __u8 tmp_reg = BPF_REG_AX; *insn++ = BPF_LDX_MEM(BPF_B, tmp_reg, skb_reg, SKB_BF_MONO_TC_OFFSET); - *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, - TC_AT_INGRESS_MASK | SKB_MONO_DELIVERY_TIME_MASK); - *insn++ = BPF_JMP32_IMM(BPF_JNE, tmp_reg, - TC_AT_INGRESS_MASK | SKB_MONO_DELIVERY_TIME_MASK, 2); - /* skb->tc_at_ingress && skb->mono_delivery_time, + /* check if ingress mask bits is set */ + *insn++ = BPF_JMP32_IMM(BPF_JSET, tmp_reg, TC_AT_INGRESS_MASK, 1); + *insn++ = BPF_JMP_A(4); + *insn++ = BPF_JMP32_IMM(BPF_JSET, tmp_reg, SKB_TSTAMP_TYPE_MASK, 1); + *insn++ = BPF_JMP_A(2); + /* skb->tc_at_ingress && skb->tstamp_type, * read 0 as the (rcv) timestamp. */ *insn++ = BPF_MOV64_IMM(value_reg, 0); @@ -9473,7 +9486,7 @@ static struct bpf_insn *bpf_convert_tstamp_write(const struct bpf_prog *prog, * the bpf prog is aware the tstamp could have delivery time. * Thus, write skb->tstamp as is if tstamp_type_access is true. * Otherwise, writing at ingress will have to clear the - * mono_delivery_time bit also. + * skb->tstamp_type bit also. */ if (!prog->tstamp_type_access) { __u8 tmp_reg = BPF_REG_AX; @@ -9483,8 +9496,8 @@ static struct bpf_insn *bpf_convert_tstamp_write(const struct bpf_prog *prog, *insn++ = BPF_JMP32_IMM(BPF_JSET, tmp_reg, TC_AT_INGRESS_MASK, 1); /* goto */ *insn++ = BPF_JMP_A(2); - /* : mono_delivery_time */ - *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, ~SKB_MONO_DELIVERY_TIME_MASK); + /* : skb->tstamp_type */ + *insn++ = BPF_ALU32_IMM(BPF_AND, tmp_reg, ~SKB_TSTAMP_TYPE_MASK); *insn++ = BPF_STX_MEM(BPF_B, skb_reg, tmp_reg, SKB_BF_MONO_TC_OFFSET); } #endif @@ -11040,7 +11053,6 @@ const struct bpf_verifier_ops lwt_seg6local_verifier_ops = { }; const struct bpf_prog_ops lwt_seg6local_prog_ops = { - .test_run = bpf_prog_test_run_skb, }; const struct bpf_verifier_ops cg_sock_verifier_ops = { @@ -11858,28 +11870,34 @@ bpf_sk_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) } __bpf_kfunc_start_defs(); -__bpf_kfunc int bpf_dynptr_from_skb(struct sk_buff *skb, u64 flags, - struct bpf_dynptr_kern *ptr__uninit) +__bpf_kfunc int bpf_dynptr_from_skb(struct __sk_buff *s, u64 flags, + struct bpf_dynptr *ptr__uninit) { + struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)ptr__uninit; + struct sk_buff *skb = (struct sk_buff *)s; + if (flags) { - bpf_dynptr_set_null(ptr__uninit); + bpf_dynptr_set_null(ptr); return -EINVAL; } - bpf_dynptr_init(ptr__uninit, skb, BPF_DYNPTR_TYPE_SKB, 0, skb->len); + bpf_dynptr_init(ptr, skb, BPF_DYNPTR_TYPE_SKB, 0, skb->len); return 0; } -__bpf_kfunc int bpf_dynptr_from_xdp(struct xdp_buff *xdp, u64 flags, - struct bpf_dynptr_kern *ptr__uninit) +__bpf_kfunc int bpf_dynptr_from_xdp(struct xdp_md *x, u64 flags, + struct bpf_dynptr *ptr__uninit) { + struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)ptr__uninit; + struct xdp_buff *xdp = (struct xdp_buff *)x; + if (flags) { - bpf_dynptr_set_null(ptr__uninit); + bpf_dynptr_set_null(ptr); return -EINVAL; } - bpf_dynptr_init(ptr__uninit, xdp, BPF_DYNPTR_TYPE_XDP, 0, xdp_get_buff_len(xdp)); + bpf_dynptr_init(ptr, xdp, BPF_DYNPTR_TYPE_XDP, 0, xdp_get_buff_len(xdp)); return 0; } @@ -11905,10 +11923,11 @@ __bpf_kfunc int bpf_sock_addr_set_sun_path(struct bpf_sock_addr_kern *sa_kern, return 0; } -__bpf_kfunc int bpf_sk_assign_tcp_reqsk(struct sk_buff *skb, struct sock *sk, +__bpf_kfunc int bpf_sk_assign_tcp_reqsk(struct __sk_buff *s, struct sock *sk, struct bpf_tcp_req_attrs *attrs, int attrs__sz) { #if IS_ENABLED(CONFIG_SYN_COOKIES) + struct sk_buff *skb = (struct sk_buff *)s; const struct request_sock_ops *ops; struct inet_request_sock *ireq; struct tcp_request_sock *treq; @@ -12003,16 +12022,17 @@ __bpf_kfunc int bpf_sk_assign_tcp_reqsk(struct sk_buff *skb, struct sock *sk, __bpf_kfunc_end_defs(); -int bpf_dynptr_from_skb_rdonly(struct sk_buff *skb, u64 flags, - struct bpf_dynptr_kern *ptr__uninit) +int bpf_dynptr_from_skb_rdonly(struct __sk_buff *skb, u64 flags, + struct bpf_dynptr *ptr__uninit) { + struct bpf_dynptr_kern *ptr = (struct bpf_dynptr_kern *)ptr__uninit; int err; err = bpf_dynptr_from_skb(skb, flags, ptr__uninit); if (err) return err; - bpf_dynptr_set_rdonly(ptr__uninit); + bpf_dynptr_set_rdonly(ptr); return 0; } diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index f82e9a7d3b37..ada1e39b557e 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -299,9 +299,10 @@ void skb_flow_dissect_meta(const struct sk_buff *skb, EXPORT_SYMBOL(skb_flow_dissect_meta); static void -skb_flow_dissect_set_enc_addr_type(enum flow_dissector_key_id type, - struct flow_dissector *flow_dissector, - void *target_container) +skb_flow_dissect_set_enc_control(enum flow_dissector_key_id type, + u32 ctrl_flags, + struct flow_dissector *flow_dissector, + void *target_container) { struct flow_dissector_key_control *ctrl; @@ -312,6 +313,7 @@ skb_flow_dissect_set_enc_addr_type(enum flow_dissector_key_id type, FLOW_DISSECTOR_KEY_ENC_CONTROL, target_container); ctrl->addr_type = type; + ctrl->flags = ctrl_flags; } void @@ -367,6 +369,7 @@ skb_flow_dissect_tunnel_info(const struct sk_buff *skb, { struct ip_tunnel_info *info; struct ip_tunnel_key *key; + u32 ctrl_flags = 0; /* A quick check to see if there might be something to do. */ if (!dissector_uses_key(flow_dissector, @@ -391,11 +394,20 @@ skb_flow_dissect_tunnel_info(const struct sk_buff *skb, key = &info->key; + if (test_bit(IP_TUNNEL_CSUM_BIT, key->tun_flags)) + ctrl_flags |= FLOW_DIS_F_TUNNEL_CSUM; + if (test_bit(IP_TUNNEL_DONT_FRAGMENT_BIT, key->tun_flags)) + ctrl_flags |= FLOW_DIS_F_TUNNEL_DONT_FRAGMENT; + if (test_bit(IP_TUNNEL_OAM_BIT, key->tun_flags)) + ctrl_flags |= FLOW_DIS_F_TUNNEL_OAM; + if (test_bit(IP_TUNNEL_CRIT_OPT_BIT, key->tun_flags)) + ctrl_flags |= FLOW_DIS_F_TUNNEL_CRIT_OPT; + switch (ip_tunnel_info_af(info)) { case AF_INET: - skb_flow_dissect_set_enc_addr_type(FLOW_DISSECTOR_KEY_IPV4_ADDRS, - flow_dissector, - target_container); + skb_flow_dissect_set_enc_control(FLOW_DISSECTOR_KEY_IPV4_ADDRS, + ctrl_flags, flow_dissector, + target_container); if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) { struct flow_dissector_key_ipv4_addrs *ipv4; @@ -408,9 +420,9 @@ skb_flow_dissect_tunnel_info(const struct sk_buff *skb, } break; case AF_INET6: - skb_flow_dissect_set_enc_addr_type(FLOW_DISSECTOR_KEY_IPV6_ADDRS, - flow_dissector, - target_container); + skb_flow_dissect_set_enc_control(FLOW_DISSECTOR_KEY_IPV6_ADDRS, + ctrl_flags, flow_dissector, + target_container); if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) { struct flow_dissector_key_ipv6_addrs *ipv6; @@ -422,6 +434,10 @@ skb_flow_dissect_tunnel_info(const struct sk_buff *skb, ipv6->dst = key->u.ipv6.dst; } break; + default: + skb_flow_dissect_set_enc_control(0, ctrl_flags, flow_dissector, + target_container); + break; } if (dissector_uses_key(flow_dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { @@ -1792,6 +1808,13 @@ u32 flow_hash_from_keys(struct flow_keys *keys) } EXPORT_SYMBOL(flow_hash_from_keys); +u32 flow_hash_from_keys_seed(struct flow_keys *keys, + const siphash_key_t *keyval) +{ + return __flow_hash_from_keys(keys, keyval); +} +EXPORT_SYMBOL(flow_hash_from_keys_seed); + static inline u32 ___skb_get_hash(const struct sk_buff *skb, struct flow_keys *keys, const siphash_key_t *keyval) @@ -1831,22 +1854,23 @@ EXPORT_SYMBOL(make_flow_keys_digest); static struct flow_dissector flow_keys_dissector_symmetric __read_mostly; -u32 __skb_get_hash_symmetric(const struct sk_buff *skb) +u32 __skb_get_hash_symmetric_net(const struct net *net, const struct sk_buff *skb) { struct flow_keys keys; __flow_hash_secret_init(); memset(&keys, 0, sizeof(keys)); - __skb_flow_dissect(NULL, skb, &flow_keys_dissector_symmetric, + __skb_flow_dissect(net, skb, &flow_keys_dissector_symmetric, &keys, NULL, 0, 0, 0, 0); return __flow_hash_from_keys(&keys, &hashrnd); } -EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric); +EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric_net); /** - * __skb_get_hash: calculate a flow hash + * __skb_get_hash_net: calculate a flow hash + * @net: associated network namespace, derived from @skb if NULL * @skb: sk_buff to calculate flow hash from * * This function calculates a flow hash based on src/dst addresses @@ -1854,18 +1878,24 @@ EXPORT_SYMBOL_GPL(__skb_get_hash_symmetric); * on success, zero indicates no valid hash. Also, sets l4_hash in skb * if hash is a canonical 4-tuple hash over transport ports. */ -void __skb_get_hash(struct sk_buff *skb) +void __skb_get_hash_net(const struct net *net, struct sk_buff *skb) { struct flow_keys keys; u32 hash; + memset(&keys, 0, sizeof(keys)); + + __skb_flow_dissect(net, skb, &flow_keys_dissector, + &keys, NULL, 0, 0, 0, + FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL); + __flow_hash_secret_init(); - hash = ___skb_get_hash(skb, &keys, &hashrnd); + hash = __flow_hash_from_keys(&keys, &hashrnd); __skb_set_sw_hash(skb, hash, flow_keys_have_l4(&keys)); } -EXPORT_SYMBOL(__skb_get_hash); +EXPORT_SYMBOL(__skb_get_hash_net); __u32 skb_get_hash_perturb(const struct sk_buff *skb, const siphash_key_t *perturb) diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index fae9c4694186..412816076b8b 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c @@ -206,7 +206,7 @@ void gen_kill_estimator(struct net_rate_estimator __rcu **rate_est) { struct net_rate_estimator *est; - est = xchg((__force struct net_rate_estimator **)rate_est, NULL); + est = unrcu_pointer(xchg(rate_est, NULL)); if (est) { timer_shutdown_sync(&est->timer); kfree_rcu(est, rcu); diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c index 4a0797f0a154..afb05f58b64c 100644 --- a/net/core/lwt_bpf.c +++ b/net/core/lwt_bpf.c @@ -38,13 +38,14 @@ static inline struct bpf_lwt *bpf_lwt_lwtunnel(struct lwtunnel_state *lwt) static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt, struct dst_entry *dst, bool can_redirect) { + struct bpf_net_context __bpf_net_ctx, *bpf_net_ctx; int ret; - /* Migration disable and BH disable are needed to protect per-cpu - * redirect_info between BPF prog and skb_do_redirect(). + /* Disabling BH is needed to protect per-CPU bpf_redirect_info between + * BPF prog and skb_do_redirect(). */ - migrate_disable(); local_bh_disable(); + bpf_net_ctx = bpf_net_ctx_set(&__bpf_net_ctx); bpf_compute_data_pointers(skb); ret = bpf_prog_run_save_cb(lwt->prog, skb); @@ -77,8 +78,8 @@ static int run_lwt_bpf(struct sk_buff *skb, struct bpf_lwt_prog *lwt, break; } + bpf_net_ctx_clear(bpf_net_ctx); local_bh_enable(); - migrate_enable(); return ret; } diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 45fd88405b6b..277751375b0a 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -3578,7 +3578,7 @@ static void neigh_copy_dflt_parms(struct net *net, struct neigh_parms *p, rcu_read_unlock(); } -static void neigh_proc_update(struct ctl_table *ctl, int write) +static void neigh_proc_update(const struct ctl_table *ctl, int write) { struct net_device *dev = ctl->extra1; struct neigh_parms *p = ctl->extra2; diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c index 4c27a360c294..0e2084ce7b75 100644 --- a/net/core/net-sysfs.c +++ b/net/core/net-sysfs.c @@ -2028,7 +2028,7 @@ static void netdev_release(struct device *d) * device is dead and about to be freed. */ kfree(rcu_access_pointer(dev->ifalias)); - netdev_freemem(dev); + kvfree(dev); } static const void *net_namespace(const struct device *d) diff --git a/net/core/page_pool.c b/net/core/page_pool.c index f4444b4e39e6..2abe6e919224 100644 --- a/net/core/page_pool.c +++ b/net/core/page_pool.c @@ -178,7 +178,8 @@ static void page_pool_struct_check(void) CACHELINE_ASSERT_GROUP_MEMBER(struct page_pool, frag, frag_users); CACHELINE_ASSERT_GROUP_MEMBER(struct page_pool, frag, frag_page); CACHELINE_ASSERT_GROUP_MEMBER(struct page_pool, frag, frag_offset); - CACHELINE_ASSERT_GROUP_SIZE(struct page_pool, frag, 4 * sizeof(long)); + CACHELINE_ASSERT_GROUP_SIZE(struct page_pool, frag, + PAGE_POOL_FRAG_GROUP_ALIGN); } static int page_pool_init(struct page_pool *pool, @@ -327,19 +328,18 @@ struct page_pool *page_pool_create(const struct page_pool_params *params) } EXPORT_SYMBOL(page_pool_create); -static void page_pool_return_page(struct page_pool *pool, struct page *page); +static void page_pool_return_page(struct page_pool *pool, netmem_ref netmem); -noinline -static struct page *page_pool_refill_alloc_cache(struct page_pool *pool) +static noinline netmem_ref page_pool_refill_alloc_cache(struct page_pool *pool) { struct ptr_ring *r = &pool->ring; - struct page *page; + netmem_ref netmem; int pref_nid; /* preferred NUMA node */ /* Quicker fallback, avoid locks when ring is empty */ if (__ptr_ring_empty(r)) { alloc_stat_inc(pool, empty); - return NULL; + return 0; } /* Softirq guarantee CPU and thus NUMA node is stable. This, @@ -354,57 +354,57 @@ static struct page *page_pool_refill_alloc_cache(struct page_pool *pool) /* Refill alloc array, but only if NUMA match */ do { - page = __ptr_ring_consume(r); - if (unlikely(!page)) + netmem = (__force netmem_ref)__ptr_ring_consume(r); + if (unlikely(!netmem)) break; - if (likely(page_to_nid(page) == pref_nid)) { - pool->alloc.cache[pool->alloc.count++] = page; + if (likely(page_to_nid(netmem_to_page(netmem)) == pref_nid)) { + pool->alloc.cache[pool->alloc.count++] = netmem; } else { /* NUMA mismatch; * (1) release 1 page to page-allocator and * (2) break out to fallthrough to alloc_pages_node. * This limit stress on page buddy alloactor. */ - page_pool_return_page(pool, page); + page_pool_return_page(pool, netmem); alloc_stat_inc(pool, waive); - page = NULL; + netmem = 0; break; } } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL); /* Return last page */ if (likely(pool->alloc.count > 0)) { - page = pool->alloc.cache[--pool->alloc.count]; + netmem = pool->alloc.cache[--pool->alloc.count]; alloc_stat_inc(pool, refill); } - return page; + return netmem; } /* fast path */ -static struct page *__page_pool_get_cached(struct page_pool *pool) +static netmem_ref __page_pool_get_cached(struct page_pool *pool) { - struct page *page; + netmem_ref netmem; /* Caller MUST guarantee safe non-concurrent access, e.g. softirq */ if (likely(pool->alloc.count)) { /* Fast-path */ - page = pool->alloc.cache[--pool->alloc.count]; + netmem = pool->alloc.cache[--pool->alloc.count]; alloc_stat_inc(pool, fast); } else { - page = page_pool_refill_alloc_cache(pool); + netmem = page_pool_refill_alloc_cache(pool); } - return page; + return netmem; } static void __page_pool_dma_sync_for_device(const struct page_pool *pool, - const struct page *page, + netmem_ref netmem, u32 dma_sync_size) { #if defined(CONFIG_HAS_DMA) && defined(CONFIG_DMA_NEED_SYNC) - dma_addr_t dma_addr = page_pool_get_dma_addr(page); + dma_addr_t dma_addr = page_pool_get_dma_addr_netmem(netmem); dma_sync_size = min(dma_sync_size, pool->p.max_len); __dma_sync_single_for_device(pool->p.dev, dma_addr + pool->p.offset, @@ -414,14 +414,14 @@ static void __page_pool_dma_sync_for_device(const struct page_pool *pool, static __always_inline void page_pool_dma_sync_for_device(const struct page_pool *pool, - const struct page *page, + netmem_ref netmem, u32 dma_sync_size) { if (pool->dma_sync && dma_dev_need_sync(pool->p.dev)) - __page_pool_dma_sync_for_device(pool, page, dma_sync_size); + __page_pool_dma_sync_for_device(pool, netmem, dma_sync_size); } -static bool page_pool_dma_map(struct page_pool *pool, struct page *page) +static bool page_pool_dma_map(struct page_pool *pool, netmem_ref netmem) { dma_addr_t dma; @@ -430,31 +430,32 @@ static bool page_pool_dma_map(struct page_pool *pool, struct page *page) * into page private data (i.e 32bit cpu with 64bit DMA caps) * This mapping is kept for lifetime of page, until leaving pool. */ - dma = dma_map_page_attrs(pool->p.dev, page, 0, - (PAGE_SIZE << pool->p.order), - pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC | - DMA_ATTR_WEAK_ORDERING); + dma = dma_map_page_attrs(pool->p.dev, netmem_to_page(netmem), 0, + (PAGE_SIZE << pool->p.order), pool->p.dma_dir, + DMA_ATTR_SKIP_CPU_SYNC | + DMA_ATTR_WEAK_ORDERING); if (dma_mapping_error(pool->p.dev, dma)) return false; - if (page_pool_set_dma_addr(page, dma)) + if (page_pool_set_dma_addr_netmem(netmem, dma)) goto unmap_failed; - page_pool_dma_sync_for_device(pool, page, pool->p.max_len); + page_pool_dma_sync_for_device(pool, netmem, pool->p.max_len); return true; unmap_failed: - WARN_ON_ONCE("unexpected DMA address, please report to netdev@"); + WARN_ONCE(1, "unexpected DMA address, please report to netdev@"); dma_unmap_page_attrs(pool->p.dev, dma, PAGE_SIZE << pool->p.order, pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING); return false; } -static void page_pool_set_pp_info(struct page_pool *pool, - struct page *page) +static void page_pool_set_pp_info(struct page_pool *pool, netmem_ref netmem) { + struct page *page = netmem_to_page(netmem); + page->pp = pool; page->pp_magic |= PP_SIGNATURE; @@ -464,13 +465,15 @@ static void page_pool_set_pp_info(struct page_pool *pool, * is dirtying the same cache line as the page->pp_magic above, so * the overhead is negligible. */ - page_pool_fragment_page(page, 1); + page_pool_fragment_netmem(netmem, 1); if (pool->has_init_callback) - pool->slow.init_callback(page, pool->slow.init_arg); + pool->slow.init_callback(netmem, pool->slow.init_arg); } -static void page_pool_clear_pp_info(struct page *page) +static void page_pool_clear_pp_info(netmem_ref netmem) { + struct page *page = netmem_to_page(netmem); + page->pp_magic = 0; page->pp = NULL; } @@ -485,34 +488,34 @@ static struct page *__page_pool_alloc_page_order(struct page_pool *pool, if (unlikely(!page)) return NULL; - if (pool->dma_map && unlikely(!page_pool_dma_map(pool, page))) { + if (pool->dma_map && unlikely(!page_pool_dma_map(pool, page_to_netmem(page)))) { put_page(page); return NULL; } alloc_stat_inc(pool, slow_high_order); - page_pool_set_pp_info(pool, page); + page_pool_set_pp_info(pool, page_to_netmem(page)); /* Track how many pages are held 'in-flight' */ pool->pages_state_hold_cnt++; - trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt); + trace_page_pool_state_hold(pool, page_to_netmem(page), + pool->pages_state_hold_cnt); return page; } /* slow path */ -noinline -static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool, - gfp_t gfp) +static noinline netmem_ref __page_pool_alloc_pages_slow(struct page_pool *pool, + gfp_t gfp) { const int bulk = PP_ALLOC_CACHE_REFILL; unsigned int pp_order = pool->p.order; bool dma_map = pool->dma_map; - struct page *page; + netmem_ref netmem; int i, nr_pages; /* Don't support bulk alloc for high-order pages */ if (unlikely(pp_order)) - return __page_pool_alloc_page_order(pool, gfp); + return page_to_netmem(__page_pool_alloc_page_order(pool, gfp)); /* Unnecessary as alloc cache is empty, but guarantees zero count */ if (unlikely(pool->alloc.count > 0)) @@ -521,56 +524,63 @@ static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool, /* Mark empty alloc.cache slots "empty" for alloc_pages_bulk_array */ memset(&pool->alloc.cache, 0, sizeof(void *) * bulk); - nr_pages = alloc_pages_bulk_array_node(gfp, pool->p.nid, bulk, - pool->alloc.cache); + nr_pages = alloc_pages_bulk_array_node(gfp, + pool->p.nid, bulk, + (struct page **)pool->alloc.cache); if (unlikely(!nr_pages)) - return NULL; + return 0; /* Pages have been filled into alloc.cache array, but count is zero and * page element have not been (possibly) DMA mapped. */ for (i = 0; i < nr_pages; i++) { - page = pool->alloc.cache[i]; - if (dma_map && unlikely(!page_pool_dma_map(pool, page))) { - put_page(page); + netmem = pool->alloc.cache[i]; + if (dma_map && unlikely(!page_pool_dma_map(pool, netmem))) { + put_page(netmem_to_page(netmem)); continue; } - page_pool_set_pp_info(pool, page); - pool->alloc.cache[pool->alloc.count++] = page; + page_pool_set_pp_info(pool, netmem); + pool->alloc.cache[pool->alloc.count++] = netmem; /* Track how many pages are held 'in-flight' */ pool->pages_state_hold_cnt++; - trace_page_pool_state_hold(pool, page, + trace_page_pool_state_hold(pool, netmem, pool->pages_state_hold_cnt); } /* Return last page */ if (likely(pool->alloc.count > 0)) { - page = pool->alloc.cache[--pool->alloc.count]; + netmem = pool->alloc.cache[--pool->alloc.count]; alloc_stat_inc(pool, slow); } else { - page = NULL; + netmem = 0; } /* When page just alloc'ed is should/must have refcnt 1. */ - return page; + return netmem; } /* For using page_pool replace: alloc_pages() API calls, but provide * synchronization guarantee for allocation side. */ -struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp) +netmem_ref page_pool_alloc_netmem(struct page_pool *pool, gfp_t gfp) { - struct page *page; + netmem_ref netmem; /* Fast-path: Get a page from cache */ - page = __page_pool_get_cached(pool); - if (page) - return page; + netmem = __page_pool_get_cached(pool); + if (netmem) + return netmem; /* Slow-path: cache empty, do real allocation */ - page = __page_pool_alloc_pages_slow(pool, gfp); - return page; + netmem = __page_pool_alloc_pages_slow(pool, gfp); + return netmem; +} +EXPORT_SYMBOL(page_pool_alloc_netmem); + +struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp) +{ + return netmem_to_page(page_pool_alloc_netmem(pool, gfp)); } EXPORT_SYMBOL(page_pool_alloc_pages); ALLOW_ERROR_INJECTION(page_pool_alloc_pages, NULL); @@ -599,8 +609,8 @@ s32 page_pool_inflight(const struct page_pool *pool, bool strict) return inflight; } -static __always_inline -void __page_pool_release_page_dma(struct page_pool *pool, struct page *page) +static __always_inline void __page_pool_release_page_dma(struct page_pool *pool, + netmem_ref netmem) { dma_addr_t dma; @@ -610,13 +620,13 @@ void __page_pool_release_page_dma(struct page_pool *pool, struct page *page) */ return; - dma = page_pool_get_dma_addr(page); + dma = page_pool_get_dma_addr_netmem(netmem); /* When page is unmapped, it cannot be returned to our pool */ dma_unmap_page_attrs(pool->p.dev, dma, PAGE_SIZE << pool->p.order, pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING); - page_pool_set_dma_addr(page, 0); + page_pool_set_dma_addr_netmem(netmem, 0); } /* Disconnects a page (from a page_pool). API users can have a need @@ -624,35 +634,34 @@ void __page_pool_release_page_dma(struct page_pool *pool, struct page *page) * a regular page (that will eventually be returned to the normal * page-allocator via put_page). */ -void page_pool_return_page(struct page_pool *pool, struct page *page) +void page_pool_return_page(struct page_pool *pool, netmem_ref netmem) { int count; - __page_pool_release_page_dma(pool, page); - - page_pool_clear_pp_info(page); + __page_pool_release_page_dma(pool, netmem); /* This may be the last page returned, releasing the pool, so * it is not safe to reference pool afterwards. */ count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt); - trace_page_pool_state_release(pool, page, count); + trace_page_pool_state_release(pool, netmem, count); - put_page(page); + page_pool_clear_pp_info(netmem); + put_page(netmem_to_page(netmem)); /* An optimization would be to call __free_pages(page, pool->p.order) * knowing page is not part of page-cache (thus avoiding a * __page_cache_release() call). */ } -static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page) +static bool page_pool_recycle_in_ring(struct page_pool *pool, netmem_ref netmem) { int ret; /* BH protection not needed if current is softirq */ if (in_softirq()) - ret = ptr_ring_produce(&pool->ring, page); + ret = ptr_ring_produce(&pool->ring, (__force void *)netmem); else - ret = ptr_ring_produce_bh(&pool->ring, page); + ret = ptr_ring_produce_bh(&pool->ring, (__force void *)netmem); if (!ret) { recycle_stat_inc(pool, ring); @@ -667,7 +676,7 @@ static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page) * * Caller must provide appropriate safe context. */ -static bool page_pool_recycle_in_cache(struct page *page, +static bool page_pool_recycle_in_cache(netmem_ref netmem, struct page_pool *pool) { if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) { @@ -676,14 +685,15 @@ static bool page_pool_recycle_in_cache(struct page *page, } /* Caller MUST have verified/know (page_ref_count(page) == 1) */ - pool->alloc.cache[pool->alloc.count++] = page; + pool->alloc.cache[pool->alloc.count++] = netmem; recycle_stat_inc(pool, cached); return true; } -static bool __page_pool_page_can_be_recycled(const struct page *page) +static bool __page_pool_page_can_be_recycled(netmem_ref netmem) { - return page_ref_count(page) == 1 && !page_is_pfmemalloc(page); + return page_ref_count(netmem_to_page(netmem)) == 1 && + !page_is_pfmemalloc(netmem_to_page(netmem)); } /* If the page refcnt == 1, this will try to recycle the page. @@ -692,8 +702,8 @@ static bool __page_pool_page_can_be_recycled(const struct page *page) * If the page refcnt != 1, then the page will be returned to memory * subsystem. */ -static __always_inline struct page * -__page_pool_put_page(struct page_pool *pool, struct page *page, +static __always_inline netmem_ref +__page_pool_put_page(struct page_pool *pool, netmem_ref netmem, unsigned int dma_sync_size, bool allow_direct) { lockdep_assert_no_hardirq(); @@ -707,16 +717,16 @@ __page_pool_put_page(struct page_pool *pool, struct page *page, * page is NOT reusable when allocated when system is under * some pressure. (page_is_pfmemalloc) */ - if (likely(__page_pool_page_can_be_recycled(page))) { + if (likely(__page_pool_page_can_be_recycled(netmem))) { /* Read barrier done in page_ref_count / READ_ONCE */ - page_pool_dma_sync_for_device(pool, page, dma_sync_size); + page_pool_dma_sync_for_device(pool, netmem, dma_sync_size); - if (allow_direct && page_pool_recycle_in_cache(page, pool)) - return NULL; + if (allow_direct && page_pool_recycle_in_cache(netmem, pool)) + return 0; /* Page found as candidate for recycling */ - return page; + return netmem; } /* Fallback/non-XDP mode: API user have elevated refcnt. * @@ -732,9 +742,9 @@ __page_pool_put_page(struct page_pool *pool, struct page *page, * will be invoking put_page. */ recycle_stat_inc(pool, released_refcnt); - page_pool_return_page(pool, page); + page_pool_return_page(pool, netmem); - return NULL; + return 0; } static bool page_pool_napi_local(const struct page_pool *pool) @@ -760,19 +770,28 @@ static bool page_pool_napi_local(const struct page_pool *pool) return napi && READ_ONCE(napi->list_owner) == cpuid; } -void page_pool_put_unrefed_page(struct page_pool *pool, struct page *page, - unsigned int dma_sync_size, bool allow_direct) +void page_pool_put_unrefed_netmem(struct page_pool *pool, netmem_ref netmem, + unsigned int dma_sync_size, bool allow_direct) { if (!allow_direct) allow_direct = page_pool_napi_local(pool); - page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct); - if (page && !page_pool_recycle_in_ring(pool, page)) { + netmem = + __page_pool_put_page(pool, netmem, dma_sync_size, allow_direct); + if (netmem && !page_pool_recycle_in_ring(pool, netmem)) { /* Cache full, fallback to free pages */ recycle_stat_inc(pool, ring_full); - page_pool_return_page(pool, page); + page_pool_return_page(pool, netmem); } } +EXPORT_SYMBOL(page_pool_put_unrefed_netmem); + +void page_pool_put_unrefed_page(struct page_pool *pool, struct page *page, + unsigned int dma_sync_size, bool allow_direct) +{ + page_pool_put_unrefed_netmem(pool, page_to_netmem(page), dma_sync_size, + allow_direct); +} EXPORT_SYMBOL(page_pool_put_unrefed_page); /** @@ -800,16 +819,16 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data, allow_direct = page_pool_napi_local(pool); for (i = 0; i < count; i++) { - struct page *page = virt_to_head_page(data[i]); + netmem_ref netmem = page_to_netmem(virt_to_head_page(data[i])); /* It is not the last user for the page frag case */ - if (!page_pool_is_last_ref(page)) + if (!page_pool_is_last_ref(netmem)) continue; - page = __page_pool_put_page(pool, page, -1, allow_direct); + netmem = __page_pool_put_page(pool, netmem, -1, allow_direct); /* Approved for bulk recycling in ptr_ring cache */ - if (page) - data[bulk_len++] = page; + if (netmem) + data[bulk_len++] = (__force void *)netmem; } if (!bulk_len) @@ -835,98 +854,106 @@ void page_pool_put_page_bulk(struct page_pool *pool, void **data, * since put_page() with refcnt == 1 can be an expensive operation */ for (; i < bulk_len; i++) - page_pool_return_page(pool, data[i]); + page_pool_return_page(pool, (__force netmem_ref)data[i]); } EXPORT_SYMBOL(page_pool_put_page_bulk); -static struct page *page_pool_drain_frag(struct page_pool *pool, - struct page *page) +static netmem_ref page_pool_drain_frag(struct page_pool *pool, + netmem_ref netmem) { long drain_count = BIAS_MAX - pool->frag_users; /* Some user is still using the page frag */ - if (likely(page_pool_unref_page(page, drain_count))) - return NULL; + if (likely(page_pool_unref_netmem(netmem, drain_count))) + return 0; - if (__page_pool_page_can_be_recycled(page)) { - page_pool_dma_sync_for_device(pool, page, -1); - return page; + if (__page_pool_page_can_be_recycled(netmem)) { + page_pool_dma_sync_for_device(pool, netmem, -1); + return netmem; } - page_pool_return_page(pool, page); - return NULL; + page_pool_return_page(pool, netmem); + return 0; } static void page_pool_free_frag(struct page_pool *pool) { long drain_count = BIAS_MAX - pool->frag_users; - struct page *page = pool->frag_page; + netmem_ref netmem = pool->frag_page; - pool->frag_page = NULL; + pool->frag_page = 0; - if (!page || page_pool_unref_page(page, drain_count)) + if (!netmem || page_pool_unref_netmem(netmem, drain_count)) return; - page_pool_return_page(pool, page); + page_pool_return_page(pool, netmem); } -struct page *page_pool_alloc_frag(struct page_pool *pool, - unsigned int *offset, - unsigned int size, gfp_t gfp) +netmem_ref page_pool_alloc_frag_netmem(struct page_pool *pool, + unsigned int *offset, unsigned int size, + gfp_t gfp) { unsigned int max_size = PAGE_SIZE << pool->p.order; - struct page *page = pool->frag_page; + netmem_ref netmem = pool->frag_page; if (WARN_ON(size > max_size)) - return NULL; + return 0; size = ALIGN(size, dma_get_cache_alignment()); *offset = pool->frag_offset; - if (page && *offset + size > max_size) { - page = page_pool_drain_frag(pool, page); - if (page) { + if (netmem && *offset + size > max_size) { + netmem = page_pool_drain_frag(pool, netmem); + if (netmem) { alloc_stat_inc(pool, fast); goto frag_reset; } } - if (!page) { - page = page_pool_alloc_pages(pool, gfp); - if (unlikely(!page)) { - pool->frag_page = NULL; - return NULL; + if (!netmem) { + netmem = page_pool_alloc_netmem(pool, gfp); + if (unlikely(!netmem)) { + pool->frag_page = 0; + return 0; } - pool->frag_page = page; + pool->frag_page = netmem; frag_reset: pool->frag_users = 1; *offset = 0; pool->frag_offset = size; - page_pool_fragment_page(page, BIAS_MAX); - return page; + page_pool_fragment_netmem(netmem, BIAS_MAX); + return netmem; } pool->frag_users++; pool->frag_offset = *offset + size; alloc_stat_inc(pool, fast); - return page; + return netmem; +} +EXPORT_SYMBOL(page_pool_alloc_frag_netmem); + +struct page *page_pool_alloc_frag(struct page_pool *pool, unsigned int *offset, + unsigned int size, gfp_t gfp) +{ + return netmem_to_page(page_pool_alloc_frag_netmem(pool, offset, size, + gfp)); } EXPORT_SYMBOL(page_pool_alloc_frag); static void page_pool_empty_ring(struct page_pool *pool) { - struct page *page; + netmem_ref netmem; /* Empty recycle ring */ - while ((page = ptr_ring_consume_bh(&pool->ring))) { + while ((netmem = (__force netmem_ref)ptr_ring_consume_bh(&pool->ring))) { /* Verify the refcnt invariant of cached pages */ - if (!(page_ref_count(page) == 1)) + if (!(page_ref_count(netmem_to_page(netmem)) == 1)) pr_crit("%s() page_pool refcnt %d violation\n", - __func__, page_ref_count(page)); + __func__, netmem_ref_count(netmem)); - page_pool_return_page(pool, page); + page_pool_return_page(pool, netmem); } } @@ -942,7 +969,7 @@ static void __page_pool_destroy(struct page_pool *pool) static void page_pool_empty_alloc_cache_once(struct page_pool *pool) { - struct page *page; + netmem_ref netmem; if (pool->destroy_cnt) return; @@ -952,8 +979,8 @@ static void page_pool_empty_alloc_cache_once(struct page_pool *pool) * call concurrently. */ while (pool->alloc.count) { - page = pool->alloc.cache[--pool->alloc.count]; - page_pool_return_page(pool, page); + netmem = pool->alloc.cache[--pool->alloc.count]; + page_pool_return_page(pool, netmem); } } @@ -1014,7 +1041,7 @@ void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *), pool->xdp_mem_id = mem->id; } -static void page_pool_disable_direct_recycling(struct page_pool *pool) +void page_pool_disable_direct_recycling(struct page_pool *pool) { /* Disable direct recycling based on pool->cpuid. * Paired with READ_ONCE() in page_pool_napi_local(). @@ -1027,11 +1054,12 @@ static void page_pool_disable_direct_recycling(struct page_pool *pool) /* To avoid races with recycling and additional barriers make sure * pool and NAPI are unlinked when NAPI is disabled. */ - WARN_ON(!test_bit(NAPI_STATE_SCHED, &pool->p.napi->state) || - READ_ONCE(pool->p.napi->list_owner) != -1); + WARN_ON(!test_bit(NAPI_STATE_SCHED, &pool->p.napi->state)); + WARN_ON(READ_ONCE(pool->p.napi->list_owner) != -1); WRITE_ONCE(pool->p.napi, NULL); } +EXPORT_SYMBOL(page_pool_disable_direct_recycling); void page_pool_destroy(struct page_pool *pool) { @@ -1059,15 +1087,15 @@ EXPORT_SYMBOL(page_pool_destroy); /* Caller must provide appropriate safe context, e.g. NAPI. */ void page_pool_update_nid(struct page_pool *pool, int new_nid) { - struct page *page; + netmem_ref netmem; trace_page_pool_update_nid(pool, new_nid); pool->p.nid = new_nid; /* Flush pool alloc cache, as refill will check NUMA node */ while (pool->alloc.count) { - page = pool->alloc.cache[--pool->alloc.count]; - page_pool_return_page(pool, page); + netmem = pool->alloc.cache[--pool->alloc.count]; + page_pool_return_page(pool, netmem); } } EXPORT_SYMBOL(page_pool_update_nid); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 4668d6718040..87e67194f240 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -3969,22 +3969,28 @@ static int rtnl_dellinkprop(struct sk_buff *skb, struct nlmsghdr *nlh, return rtnl_linkprop(RTM_DELLINKPROP, skb, nlh, extack); } -static u32 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh) +static noinline_for_stack u32 rtnl_calcit(struct sk_buff *skb, + struct nlmsghdr *nlh) { struct net *net = sock_net(skb->sk); size_t min_ifinfo_dump_size = 0; - struct nlattr *tb[IFLA_MAX+1]; u32 ext_filter_mask = 0; struct net_device *dev; - int hdrlen; + struct nlattr *nla; + int hdrlen, rem; /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */ hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ? sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg); - if (nlmsg_parse_deprecated(nlh, hdrlen, tb, IFLA_MAX, ifla_policy, NULL) >= 0) { - if (tb[IFLA_EXT_MASK]) - ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); + if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen)) + return NLMSG_GOODSIZE; + + nla_for_each_attr_type(nla, IFLA_EXT_MASK, + nlmsg_attrdata(nlh, hdrlen), + nlmsg_attrlen(nlh, hdrlen), rem) { + if (nla_len(nla) == sizeof(u32)) + ext_filter_mask = nla_get_u32(nla); } if (!ext_filter_mask) @@ -6486,6 +6492,7 @@ static int rtnl_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh, static int rtnl_dumpit(struct sk_buff *skb, struct netlink_callback *cb) { + const bool needs_lock = !(cb->flags & RTNL_FLAG_DUMP_UNLOCKED); rtnl_dumpit_func dumpit = cb->data; int err; @@ -6495,7 +6502,11 @@ static int rtnl_dumpit(struct sk_buff *skb, struct netlink_callback *cb) if (!dumpit) return 0; + if (needs_lock) + rtnl_lock(); err = dumpit(skb, cb); + if (needs_lock) + rtnl_unlock(); /* Old dump handlers used to send NLM_DONE as in a separate recvmsg(). * Some applications which parse netlink manually depend on this. @@ -6515,7 +6526,8 @@ static int rtnetlink_dump_start(struct sock *ssk, struct sk_buff *skb, const struct nlmsghdr *nlh, struct netlink_dump_control *control) { - if (control->flags & RTNL_FLAG_DUMP_SPLIT_NLM_DONE) { + if (control->flags & RTNL_FLAG_DUMP_SPLIT_NLM_DONE || + !(control->flags & RTNL_FLAG_DUMP_UNLOCKED)) { WARN_ON(control->data); control->data = control->dump; control->dump = rtnl_dumpit; @@ -6703,7 +6715,6 @@ static int __net_init rtnetlink_net_init(struct net *net) struct netlink_kernel_cfg cfg = { .groups = RTNLGRP_MAX, .input = rtnetlink_rcv, - .cb_mutex = &rtnl_mutex, .flags = NL_CFG_F_NONROOT_RECV, .bind = rtnetlink_bind, }; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 466999a7515e..83f8cd8aa2d1 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -277,6 +277,7 @@ static void *page_frag_alloc_1k(struct page_frag_1k *nc, gfp_t gfp_mask) #endif struct napi_alloc_cache { + local_lock_t bh_lock; struct page_frag_cache page; struct page_frag_1k page_small; unsigned int skb_count; @@ -284,7 +285,9 @@ struct napi_alloc_cache { }; static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); -static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache); +static DEFINE_PER_CPU(struct napi_alloc_cache, napi_alloc_cache) = { + .bh_lock = INIT_LOCAL_LOCK(bh_lock), +}; /* Double check that napi_get_frags() allocates skbs with * skb->head being backed by slab, not a page fragment. @@ -306,11 +309,16 @@ void napi_get_frags_check(struct napi_struct *napi) void *__napi_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) { struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); + void *data; fragsz = SKB_DATA_ALIGN(fragsz); - return __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, + local_lock_nested_bh(&napi_alloc_cache.bh_lock); + data = __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, align_mask); + local_unlock_nested_bh(&napi_alloc_cache.bh_lock); + return data; + } EXPORT_SYMBOL(__napi_alloc_frag_align); @@ -318,19 +326,15 @@ void *__netdev_alloc_frag_align(unsigned int fragsz, unsigned int align_mask) { void *data; - fragsz = SKB_DATA_ALIGN(fragsz); if (in_hardirq() || irqs_disabled()) { struct page_frag_cache *nc = this_cpu_ptr(&netdev_alloc_cache); + fragsz = SKB_DATA_ALIGN(fragsz); data = __page_frag_alloc_align(nc, fragsz, GFP_ATOMIC, align_mask); } else { - struct napi_alloc_cache *nc; - local_bh_disable(); - nc = this_cpu_ptr(&napi_alloc_cache); - data = __page_frag_alloc_align(&nc->page, fragsz, GFP_ATOMIC, - align_mask); + data = __napi_alloc_frag_align(fragsz, align_mask); local_bh_enable(); } return data; @@ -342,16 +346,20 @@ static struct sk_buff *napi_skb_cache_get(void) struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache); struct sk_buff *skb; + local_lock_nested_bh(&napi_alloc_cache.bh_lock); if (unlikely(!nc->skb_count)) { nc->skb_count = kmem_cache_alloc_bulk(net_hotdata.skbuff_cache, GFP_ATOMIC, NAPI_SKB_CACHE_BULK, nc->skb_cache); - if (unlikely(!nc->skb_count)) + if (unlikely(!nc->skb_count)) { + local_unlock_nested_bh(&napi_alloc_cache.bh_lock); return NULL; + } } skb = nc->skb_cache[--nc->skb_count]; + local_unlock_nested_bh(&napi_alloc_cache.bh_lock); kasan_mempool_unpoison_object(skb, kmem_cache_size(net_hotdata.skbuff_cache)); return skb; @@ -744,9 +752,13 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len, pfmemalloc = nc->pfmemalloc; } else { local_bh_disable(); + local_lock_nested_bh(&napi_alloc_cache.bh_lock); + nc = this_cpu_ptr(&napi_alloc_cache.page); data = page_frag_alloc(nc, len, gfp_mask); pfmemalloc = nc->pfmemalloc; + + local_unlock_nested_bh(&napi_alloc_cache.bh_lock); local_bh_enable(); } @@ -810,11 +822,11 @@ struct sk_buff *napi_alloc_skb(struct napi_struct *napi, unsigned int len) goto skb_success; } - nc = this_cpu_ptr(&napi_alloc_cache); - if (sk_memalloc_socks()) gfp_mask |= __GFP_MEMALLOC; + local_lock_nested_bh(&napi_alloc_cache.bh_lock); + nc = this_cpu_ptr(&napi_alloc_cache); if (NAPI_HAS_SMALL_PAGE_FRAG && len <= SKB_WITH_OVERHEAD(1024)) { /* we are artificially inflating the allocation size, but * that is not as bad as it may look like, as: @@ -836,6 +848,7 @@ struct sk_buff *napi_alloc_skb(struct napi_struct *napi, unsigned int len) data = page_frag_alloc(&nc->page, len, gfp_mask); pfmemalloc = nc->page.pfmemalloc; } + local_unlock_nested_bh(&napi_alloc_cache.bh_lock); if (unlikely(!data)) return NULL; @@ -1002,8 +1015,10 @@ int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb, EXPORT_SYMBOL(skb_cow_data_for_xdp); #if IS_ENABLED(CONFIG_PAGE_POOL) -bool napi_pp_put_page(struct page *page) +bool napi_pp_put_page(netmem_ref netmem) { + struct page *page = netmem_to_page(netmem); + page = compound_head(page); /* page->pp_magic is OR'ed with PP_SIGNATURE after the allocation @@ -1016,7 +1031,7 @@ bool napi_pp_put_page(struct page *page) if (unlikely(!is_pp_page(page))) return false; - page_pool_put_full_page(page->pp, page, false); + page_pool_put_full_netmem(page->pp, page_to_netmem(page), false); return true; } @@ -1027,7 +1042,7 @@ static bool skb_pp_recycle(struct sk_buff *skb, void *data) { if (!IS_ENABLED(CONFIG_PAGE_POOL) || !skb->pp_recycle) return false; - return napi_pp_put_page(virt_to_page(data)); + return napi_pp_put_page(page_to_netmem(virt_to_page(data))); } /** @@ -1190,7 +1205,8 @@ void __kfree_skb(struct sk_buff *skb) EXPORT_SYMBOL(__kfree_skb); static __always_inline -bool __kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason) +bool __sk_skb_reason_drop(struct sock *sk, struct sk_buff *skb, + enum skb_drop_reason reason) { if (unlikely(!skb_unref(skb))) return false; @@ -1203,26 +1219,27 @@ bool __kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason) if (reason == SKB_CONSUMED) trace_consume_skb(skb, __builtin_return_address(0)); else - trace_kfree_skb(skb, __builtin_return_address(0), reason); + trace_kfree_skb(skb, __builtin_return_address(0), reason, sk); return true; } /** - * kfree_skb_reason - free an sk_buff with special reason + * sk_skb_reason_drop - free an sk_buff with special reason + * @sk: the socket to receive @skb, or NULL if not applicable * @skb: buffer to free * @reason: reason why this skb is dropped * - * Drop a reference to the buffer and free it if the usage count has - * hit zero. Meanwhile, pass the drop reason to 'kfree_skb' - * tracepoint. + * Drop a reference to the buffer and free it if the usage count has hit + * zero. Meanwhile, pass the receiving socket and drop reason to + * 'kfree_skb' tracepoint. */ void __fix_address -kfree_skb_reason(struct sk_buff *skb, enum skb_drop_reason reason) +sk_skb_reason_drop(struct sock *sk, struct sk_buff *skb, enum skb_drop_reason reason) { - if (__kfree_skb_reason(skb, reason)) + if (__sk_skb_reason_drop(sk, skb, reason)) __kfree_skb(skb); } -EXPORT_SYMBOL(kfree_skb_reason); +EXPORT_SYMBOL(sk_skb_reason_drop); #define KFREE_SKB_BULK_SIZE 16 @@ -1261,7 +1278,7 @@ kfree_skb_list_reason(struct sk_buff *segs, enum skb_drop_reason reason) while (segs) { struct sk_buff *next = segs->next; - if (__kfree_skb_reason(segs, reason)) { + if (__sk_skb_reason_drop(NULL, segs, reason)) { skb_poison_list(segs); kfree_skb_add_bulk(segs, &sa, reason); } @@ -1433,6 +1450,7 @@ static void napi_skb_cache_put(struct sk_buff *skb) if (!kasan_mempool_poison_object(skb)) return; + local_lock_nested_bh(&napi_alloc_cache.bh_lock); nc->skb_cache[nc->skb_count++] = skb; if (unlikely(nc->skb_count == NAPI_SKB_CACHE_SIZE)) { @@ -1444,6 +1462,7 @@ static void napi_skb_cache_put(struct sk_buff *skb) nc->skb_cache + NAPI_SKB_CACHE_HALF); nc->skb_count = NAPI_SKB_CACHE_HALF; } + local_unlock_nested_bh(&napi_alloc_cache.bh_lock); } void __napi_kfree_skb(struct sk_buff *skb, enum skb_drop_reason reason) @@ -1854,7 +1873,6 @@ int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, struct msghdr *msg, int len, struct ubuf_info *uarg) { - struct ubuf_info *orig_uarg = skb_zcopy(skb); int err, orig_len = skb->len; if (uarg->ops->link_skb) { @@ -1862,6 +1880,8 @@ int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, if (err) return err; } else { + struct ubuf_info *orig_uarg = skb_zcopy(skb); + /* An skb can only point to one uarg. This edge case happens * when TCP appends to an skb, but zerocopy_realloc triggered * a new alloc. @@ -1882,8 +1902,7 @@ int skb_zerocopy_iter_stream(struct sock *sk, struct sk_buff *skb, return err; } - if (!uarg->ops->link_skb) - skb_zcopy_set(skb, uarg, NULL); + skb_zcopy_set(skb, uarg, NULL); return skb->len - orig_len; } EXPORT_SYMBOL_GPL(skb_zerocopy_iter_stream); @@ -4139,6 +4158,9 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen) if (skb_zcopy(tgt) || skb_zcopy(skb)) return 0; + DEBUG_NET_WARN_ON_ONCE(tgt->pp_recycle != skb->pp_recycle); + DEBUG_NET_WARN_ON_ONCE(skb_cmp_decrypted(tgt, skb)); + todo = shiftlen; from = 0; to = skb_shinfo(tgt)->nr_frags; diff --git a/net/core/sock.c b/net/core/sock.c index 100e975073ca..9abc4fe25953 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1083,6 +1083,17 @@ bool sockopt_capable(int cap) } EXPORT_SYMBOL(sockopt_capable); +static int sockopt_validate_clockid(__kernel_clockid_t value) +{ + switch (value) { + case CLOCK_REALTIME: + case CLOCK_MONOTONIC: + case CLOCK_TAI: + return 0; + } + return -EINVAL; +} + /* * This is meant for all protocols to use and covers goings on * at the socket level. Everything here is generic. @@ -1497,6 +1508,11 @@ set_sndbuf: ret = -EPERM; break; } + + ret = sockopt_validate_clockid(sk_txtime.clockid); + if (ret) + break; + sock_valbool_flag(sk, SOCK_TXTIME, true); sk->sk_clockid = sk_txtime.clockid; sk->sk_txtime_deadline_mode = @@ -2262,7 +2278,12 @@ static void sk_init_common(struct sock *sk) lockdep_set_class_and_name(&sk->sk_error_queue.lock, af_elock_keys + sk->sk_family, af_family_elock_key_strings[sk->sk_family]); - lockdep_set_class_and_name(&sk->sk_callback_lock, + if (sk->sk_kern_sock) + lockdep_set_class_and_name(&sk->sk_callback_lock, + af_kern_callback_keys + sk->sk_family, + af_family_kern_clock_key_strings[sk->sk_family]); + else + lockdep_set_class_and_name(&sk->sk_callback_lock, af_callback_keys + sk->sk_family, af_family_clock_key_strings[sk->sk_family]); } @@ -3460,18 +3481,6 @@ void sock_init_data_uid(struct socket *sock, struct sock *sk, kuid_t uid) } sk->sk_uid = uid; - rwlock_init(&sk->sk_callback_lock); - if (sk->sk_kern_sock) - lockdep_set_class_and_name( - &sk->sk_callback_lock, - af_kern_callback_keys + sk->sk_family, - af_family_kern_clock_key_strings[sk->sk_family]); - else - lockdep_set_class_and_name( - &sk->sk_callback_lock, - af_callback_keys + sk->sk_family, - af_family_clock_key_strings[sk->sk_family]); - sk->sk_state_change = sock_def_wakeup; sk->sk_data_ready = sock_def_readable; sk->sk_write_space = sock_def_write_space; diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c index 654122838025..a08eed9b9142 100644 --- a/net/core/sock_diag.c +++ b/net/core/sock_diag.c @@ -18,7 +18,7 @@ static const struct sock_diag_handler __rcu *sock_diag_handlers[AF_MAX]; -static struct sock_diag_inet_compat __rcu *inet_rcv_compat; +static const struct sock_diag_inet_compat __rcu *inet_rcv_compat; static struct workqueue_struct *broadcast_wq; @@ -187,8 +187,7 @@ void sock_diag_broadcast_destroy(struct sock *sk) void sock_diag_register_inet_compat(const struct sock_diag_inet_compat *ptr) { - xchg((__force const struct sock_diag_inet_compat **)&inet_rcv_compat, - ptr); + xchg(&inet_rcv_compat, RCU_INITIALIZER(ptr)); } EXPORT_SYMBOL_GPL(sock_diag_register_inet_compat); @@ -196,8 +195,7 @@ void sock_diag_unregister_inet_compat(const struct sock_diag_inet_compat *ptr) { const struct sock_diag_inet_compat *old; - old = xchg((__force const struct sock_diag_inet_compat **)&inet_rcv_compat, - NULL); + old = unrcu_pointer(xchg(&inet_rcv_compat, NULL)); WARN_ON_ONCE(old != ptr); } EXPORT_SYMBOL_GPL(sock_diag_unregister_inet_compat); diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index c9fb9ad87485..2079000691e2 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -382,38 +382,6 @@ proc_dolongvec_minmax_bpf_restricted(struct ctl_table *table, int write, #endif static struct ctl_table net_core_table[] = { - { - .procname = "wmem_max", - .data = &sysctl_wmem_max, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &min_sndbuf, - }, - { - .procname = "rmem_max", - .data = &sysctl_rmem_max, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &min_rcvbuf, - }, - { - .procname = "wmem_default", - .data = &sysctl_wmem_default, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &min_sndbuf, - }, - { - .procname = "rmem_default", - .data = &sysctl_rmem_default, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = proc_dointvec_minmax, - .extra1 = &min_rcvbuf, - }, { .procname = "mem_pcpu_rsv", .data = &net_hotdata.sysctl_mem_pcpu_rsv, @@ -697,6 +665,41 @@ static struct ctl_table netns_core_table[] = { .extra2 = SYSCTL_ONE, .proc_handler = proc_dou8vec_minmax, }, + /* sysctl_core_net_init() will set the values after this + * to readonly in network namespaces + */ + { + .procname = "wmem_max", + .data = &sysctl_wmem_max, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &min_sndbuf, + }, + { + .procname = "rmem_max", + .data = &sysctl_rmem_max, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &min_rcvbuf, + }, + { + .procname = "wmem_default", + .data = &sysctl_wmem_default, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &min_sndbuf, + }, + { + .procname = "rmem_default", + .data = &sysctl_rmem_default, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &min_rcvbuf, + }, }; static int __init fb_tunnels_only_for_init_net_sysctl_setup(char *str) @@ -724,8 +727,14 @@ static __net_init int sysctl_core_net_init(struct net *net) if (tbl == NULL) goto err_dup; - for (i = 0; i < table_size; ++i) + for (i = 0; i < table_size; ++i) { + if (tbl[i].data == &sysctl_wmem_max) + break; + tbl[i].data += (char *)net - (char *)&init_net; + } + for (; i < table_size; ++i) + tbl[i].mode &= ~0222; } net->core.sysctl_hdr = register_net_sysctl_sz(net, "net/core", tbl, table_size); diff --git a/net/core/timestamping.c b/net/core/timestamping.c index 04840697fe79..3717fb152ecc 100644 --- a/net/core/timestamping.c +++ b/net/core/timestamping.c @@ -25,7 +25,8 @@ void skb_clone_tx_timestamp(struct sk_buff *skb) struct sk_buff *clone; unsigned int type; - if (!skb->sk) + if (!skb->sk || !skb->dev || + !phy_is_default_hwtstamp(skb->dev->phydev)) return; type = classify(skb); @@ -47,7 +48,7 @@ bool skb_defer_rx_timestamp(struct sk_buff *skb) struct mii_timestamper *mii_ts; unsigned int type; - if (!skb->dev || !skb->dev->phydev || !skb->dev->phydev->mii_ts) + if (!skb->dev || !phy_is_default_hwtstamp(skb->dev->phydev)) return false; if (skb_headroom(skb) < ETH_HLEN) diff --git a/net/core/xdp.c b/net/core/xdp.c index 022c12059cf2..bcc5551c6424 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -127,10 +127,8 @@ void xdp_unreg_mem_model(struct xdp_mem_info *mem) return; if (type == MEM_TYPE_PAGE_POOL) { - rcu_read_lock(); - xa = rhashtable_lookup(mem_id_ht, &id, mem_id_rht_params); + xa = rhashtable_lookup_fast(mem_id_ht, &id, mem_id_rht_params); page_pool_destroy(xa->page_pool); - rcu_read_unlock(); } } EXPORT_SYMBOL_GPL(xdp_unreg_mem_model); diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index 251a57cf5822..fecc8190064f 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c @@ -54,17 +54,10 @@ void dccp_time_wait(struct sock *sk, int state, int timeo) if (state == DCCP_TIME_WAIT) timeo = DCCP_TIMEWAIT_LEN; - /* tw_timer is pinned, so we need to make sure BH are disabled - * in following section, otherwise timer handler could run before - * we complete the initialization. - */ - local_bh_disable(); - inet_twsk_schedule(tw, timeo); /* Linkage updates. * Note that access to tw after this point is illegal. */ - inet_twsk_hashdance(tw, sk, &dccp_hashinfo); - local_bh_enable(); + inet_twsk_hashdance_schedule(tw, sk, &dccp_hashinfo, timeo); } else { /* Sorry, if we're out of memory, just CLOSE this * socket up. We've got bigger problems than diff --git a/net/devlink/dpipe.c b/net/devlink/dpipe.c index a72a9292efc5..55009b377447 100644 --- a/net/devlink/dpipe.c +++ b/net/devlink/dpipe.c @@ -839,7 +839,7 @@ EXPORT_SYMBOL_GPL(devlink_dpipe_table_counter_enabled); */ int devl_dpipe_table_register(struct devlink *devlink, const char *table_name, - struct devlink_dpipe_table_ops *table_ops, + const struct devlink_dpipe_table_ops *table_ops, void *priv, bool counter_control_extern) { struct devlink_dpipe_table *table; diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig index 8e698bea99a3..2dfe9063613f 100644 --- a/net/dsa/Kconfig +++ b/net/dsa/Kconfig @@ -129,7 +129,7 @@ config NET_DSA_TAG_RTL4_A tristate "Tag driver for Realtek 4 byte protocol A tags" help Say Y or M if you want to enable support for tagging frames for the - Realtek switches with 4 byte protocol A tags, sich as found in + Realtek switches with 4 byte protocol A tags, such as found in the Realtek RTL8366RB. config NET_DSA_TAG_RTL8_4 @@ -166,6 +166,12 @@ config NET_DSA_TAG_TRAILER Say Y or M if you want to enable support for tagging frames at with a trailed. e.g. Marvell 88E6060. +config NET_DSA_TAG_VSC73XX_8021Q + tristate "Tag driver for Microchip/Vitesse VSC73xx family of switches, using VLAN" + help + Say Y or M if you want to enable support for tagging frames with a + custom VLAN-based header. + config NET_DSA_TAG_XRS700X tristate "Tag driver for XRS700x switches" help diff --git a/net/dsa/Makefile b/net/dsa/Makefile index 8a1894a42552..555c07cfeb71 100644 --- a/net/dsa/Makefile +++ b/net/dsa/Makefile @@ -37,6 +37,7 @@ obj-$(CONFIG_NET_DSA_TAG_RTL8_4) += tag_rtl8_4.o obj-$(CONFIG_NET_DSA_TAG_RZN1_A5PSW) += tag_rzn1_a5psw.o obj-$(CONFIG_NET_DSA_TAG_SJA1105) += tag_sja1105.o obj-$(CONFIG_NET_DSA_TAG_TRAILER) += tag_trailer.o +obj-$(CONFIG_NET_DSA_TAG_VSC73XX_8021Q) += tag_vsc73xx_8021q.o obj-$(CONFIG_NET_DSA_TAG_XRS700X) += tag_xrs700x.o # for tracing framework to find trace.h diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c index 12521a7d4048..668c729946ea 100644 --- a/net/dsa/dsa.c +++ b/net/dsa/dsa.c @@ -1507,9 +1507,7 @@ static int dsa_switch_probe(struct dsa_switch *ds) if (ds->phylink_mac_ops) { if (ds->ops->phylink_mac_select_pcs || - ds->ops->phylink_mac_prepare || ds->ops->phylink_mac_config || - ds->ops->phylink_mac_finish || ds->ops->phylink_mac_link_down || ds->ops->phylink_mac_link_up) return -EINVAL; diff --git a/net/dsa/port.c b/net/dsa/port.c index 9a249d4ac3a5..25258b33e59e 100644 --- a/net/dsa/port.c +++ b/net/dsa/port.c @@ -1467,10 +1467,34 @@ int dsa_port_change_conduit(struct dsa_port *dp, struct net_device *conduit, */ dsa_user_unsync_ha(dev); + /* If live-changing, we also need to uninstall the user device address + * from the port FDB and the conduit interface. + */ + if (dev->flags & IFF_UP) + dsa_user_host_uc_uninstall(dev); + err = dsa_port_assign_conduit(dp, conduit, extack, true); if (err) goto rewind_old_addrs; + /* If the port doesn't have its own MAC address and relies on the DSA + * conduit's one, inherit it again from the new DSA conduit. + */ + if (is_zero_ether_addr(dp->mac)) + eth_hw_addr_inherit(dev, conduit); + + /* If live-changing, we need to install the user device address to the + * port FDB and the conduit interface. + */ + if (dev->flags & IFF_UP) { + err = dsa_user_host_uc_install(dev, dev->dev_addr); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Failed to install host UC address"); + goto rewind_addr_inherit; + } + } + dsa_user_sync_ha(dev); if (vlan_filtering) { @@ -1500,10 +1524,26 @@ rewind_new_vlan: rewind_new_addrs: dsa_user_unsync_ha(dev); + if (dev->flags & IFF_UP) + dsa_user_host_uc_uninstall(dev); + +rewind_addr_inherit: + if (is_zero_ether_addr(dp->mac)) + eth_hw_addr_inherit(dev, old_conduit); + dsa_port_assign_conduit(dp, old_conduit, NULL, false); /* Restore the objects on the old CPU port */ rewind_old_addrs: + if (dev->flags & IFF_UP) { + tmp = dsa_user_host_uc_install(dev, dev->dev_addr); + if (tmp) { + dev_err(ds->dev, + "port %d failed to restore host UC address: %pe\n", + dp->index, ERR_PTR(tmp)); + } + } + dsa_user_sync_ha(dev); if (vlan_filtering) { @@ -1549,21 +1589,6 @@ dsa_port_phylink_mac_select_pcs(struct phylink_config *config, return pcs; } -static int dsa_port_phylink_mac_prepare(struct phylink_config *config, - unsigned int mode, - phy_interface_t interface) -{ - struct dsa_port *dp = dsa_phylink_to_port(config); - struct dsa_switch *ds = dp->ds; - int err = 0; - - if (ds->ops->phylink_mac_prepare) - err = ds->ops->phylink_mac_prepare(ds, dp->index, mode, - interface); - - return err; -} - static void dsa_port_phylink_mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state) @@ -1577,21 +1602,6 @@ static void dsa_port_phylink_mac_config(struct phylink_config *config, ds->ops->phylink_mac_config(ds, dp->index, mode, state); } -static int dsa_port_phylink_mac_finish(struct phylink_config *config, - unsigned int mode, - phy_interface_t interface) -{ - struct dsa_port *dp = dsa_phylink_to_port(config); - struct dsa_switch *ds = dp->ds; - int err = 0; - - if (ds->ops->phylink_mac_finish) - err = ds->ops->phylink_mac_finish(ds, dp->index, mode, - interface); - - return err; -} - static void dsa_port_phylink_mac_link_down(struct phylink_config *config, unsigned int mode, phy_interface_t interface) @@ -1624,9 +1634,7 @@ static void dsa_port_phylink_mac_link_up(struct phylink_config *config, static const struct phylink_mac_ops dsa_port_phylink_mac_ops = { .mac_select_pcs = dsa_port_phylink_mac_select_pcs, - .mac_prepare = dsa_port_phylink_mac_prepare, .mac_config = dsa_port_phylink_mac_config, - .mac_finish = dsa_port_phylink_mac_finish, .mac_link_down = dsa_port_phylink_mac_link_down, .mac_link_up = dsa_port_phylink_mac_link_up, }; diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c index 71b26ae6db39..3ee53e28ec2e 100644 --- a/net/dsa/tag_8021q.c +++ b/net/dsa/tag_8021q.c @@ -286,7 +286,8 @@ int dsa_switch_tag_8021q_vlan_del(struct dsa_switch *ds, * be used for VLAN-unaware bridging. */ int dsa_tag_8021q_bridge_join(struct dsa_switch *ds, int port, - struct dsa_bridge bridge) + struct dsa_bridge bridge, bool *tx_fwd_offload, + struct netlink_ext_ack *extack) { struct dsa_port *dp = dsa_to_port(ds, port); u16 standalone_vid, bridge_vid; @@ -304,6 +305,8 @@ int dsa_tag_8021q_bridge_join(struct dsa_switch *ds, int port, dsa_port_tag_8021q_vlan_del(dp, standalone_vid, false); + *tx_fwd_offload = true; + return 0; } EXPORT_SYMBOL_GPL(dsa_tag_8021q_bridge_join); @@ -468,8 +471,8 @@ struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev, } EXPORT_SYMBOL_GPL(dsa_8021q_xmit); -struct net_device *dsa_tag_8021q_find_port_by_vbid(struct net_device *conduit, - int vbid) +static struct net_device * +dsa_tag_8021q_find_port_by_vbid(struct net_device *conduit, int vbid) { struct dsa_port *cpu_dp = conduit->dsa_ptr; struct dsa_switch_tree *dst = cpu_dp->dst; @@ -495,30 +498,91 @@ struct net_device *dsa_tag_8021q_find_port_by_vbid(struct net_device *conduit, return NULL; } -EXPORT_SYMBOL_GPL(dsa_tag_8021q_find_port_by_vbid); -void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id, - int *vbid) +struct net_device *dsa_tag_8021q_find_user(struct net_device *conduit, + int source_port, int switch_id, + int vid, int vbid) { - u16 vid, tci; + /* Always prefer precise source port information, if available */ + if (source_port != -1 && switch_id != -1) + return dsa_conduit_find_user(conduit, switch_id, source_port); + else if (vbid >= 1) + return dsa_tag_8021q_find_port_by_vbid(conduit, vbid); + + return dsa_find_designated_bridge_port_by_vid(conduit, vid); +} +EXPORT_SYMBOL_GPL(dsa_tag_8021q_find_user); + +/** + * dsa_8021q_rcv - Decode source information from tag_8021q header + * @skb: RX socket buffer + * @source_port: pointer to storage for precise source port information. + * If this is known already from outside tag_8021q, the pre-initialized + * value is preserved. If not known, pass -1. + * @switch_id: similar to source_port. + * @vbid: pointer to storage for imprecise bridge ID. Must be pre-initialized + * with -1. If a positive value is returned, the source_port and switch_id + * are invalid. + * @vid: pointer to storage for original VID, in case tag_8021q decoding failed. + * + * If the packet has a tag_8021q header, decode it and set @source_port, + * @switch_id and @vbid, and strip the header. Otherwise set @vid and keep the + * header in the hwaccel area of the packet. + */ +void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id, + int *vbid, int *vid) +{ + int tmp_source_port, tmp_switch_id, tmp_vbid; + __be16 vlan_proto; + u16 tmp_vid, tci; if (skb_vlan_tag_present(skb)) { + vlan_proto = skb->vlan_proto; tci = skb_vlan_tag_get(skb); __vlan_hwaccel_clear_tag(skb); } else { + struct vlan_ethhdr *hdr = vlan_eth_hdr(skb); + + vlan_proto = hdr->h_vlan_proto; skb_push_rcsum(skb, ETH_HLEN); __skb_vlan_pop(skb, &tci); skb_pull_rcsum(skb, ETH_HLEN); } - vid = tci & VLAN_VID_MASK; + tmp_vid = tci & VLAN_VID_MASK; + if (!vid_is_dsa_8021q(tmp_vid)) { + /* Not a tag_8021q frame, so return the VID to the + * caller for further processing, and put the tag back + */ + if (vid) + *vid = tmp_vid; - *source_port = dsa_8021q_rx_source_port(vid); - *switch_id = dsa_8021q_rx_switch_id(vid); + __vlan_hwaccel_put_tag(skb, vlan_proto, tci); + + return; + } + + tmp_source_port = dsa_8021q_rx_source_port(tmp_vid); + tmp_switch_id = dsa_8021q_rx_switch_id(tmp_vid); + tmp_vbid = dsa_tag_8021q_rx_vbid(tmp_vid); + + /* Precise source port information is unknown when receiving from a + * VLAN-unaware bridging domain, and tmp_source_port and tmp_switch_id + * are zeroes in this case. + * + * Preserve the source information from hardware-specific mechanisms, + * if available. This allows us to not overwrite a valid source port + * and switch ID with less precise values. + */ + if (tmp_vbid == 0 && *source_port == -1) + *source_port = tmp_source_port; + if (tmp_vbid == 0 && *switch_id == -1) + *switch_id = tmp_switch_id; if (vbid) - *vbid = dsa_tag_8021q_rx_vbid(vid); + *vbid = tmp_vbid; skb->priority = (tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; + return; } EXPORT_SYMBOL_GPL(dsa_8021q_rcv); diff --git a/net/dsa/tag_8021q.h b/net/dsa/tag_8021q.h index 41f7167ac520..27b8906f99ec 100644 --- a/net/dsa/tag_8021q.h +++ b/net/dsa/tag_8021q.h @@ -14,10 +14,11 @@ struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev, u16 tpid, u16 tci); void dsa_8021q_rcv(struct sk_buff *skb, int *source_port, int *switch_id, - int *vbid); + int *vbid, int *vid); -struct net_device *dsa_tag_8021q_find_port_by_vbid(struct net_device *conduit, - int vbid); +struct net_device *dsa_tag_8021q_find_user(struct net_device *conduit, + int source_port, int switch_id, + int vid, int vbid); int dsa_switch_tag_8021q_vlan_add(struct dsa_switch *ds, struct dsa_notifier_tag_8021q_vlan_info *info); diff --git a/net/dsa/tag_ocelot_8021q.c b/net/dsa/tag_ocelot_8021q.c index b059381310fe..8e8b1bef6af6 100644 --- a/net/dsa/tag_ocelot_8021q.c +++ b/net/dsa/tag_ocelot_8021q.c @@ -81,7 +81,7 @@ static struct sk_buff *ocelot_rcv(struct sk_buff *skb, { int src_port, switch_id; - dsa_8021q_rcv(skb, &src_port, &switch_id, NULL); + dsa_8021q_rcv(skb, &src_port, &switch_id, NULL, NULL); skb->dev = dsa_conduit_find_user(netdev, switch_id, src_port); if (!skb->dev) diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c index 1aba1d05c27a..3e902af7eea6 100644 --- a/net/dsa/tag_sja1105.c +++ b/net/dsa/tag_sja1105.c @@ -472,37 +472,14 @@ static bool sja1110_skb_has_inband_control_extension(const struct sk_buff *skb) return ntohs(eth_hdr(skb)->h_proto) == ETH_P_SJA1110; } -/* If the VLAN in the packet is a tag_8021q one, set @source_port and - * @switch_id and strip the header. Otherwise set @vid and keep it in the - * packet. - */ -static void sja1105_vlan_rcv(struct sk_buff *skb, int *source_port, - int *switch_id, int *vbid, u16 *vid) -{ - struct vlan_ethhdr *hdr = vlan_eth_hdr(skb); - u16 vlan_tci; - - if (skb_vlan_tag_present(skb)) - vlan_tci = skb_vlan_tag_get(skb); - else - vlan_tci = ntohs(hdr->h_vlan_TCI); - - if (vid_is_dsa_8021q(vlan_tci & VLAN_VID_MASK)) - return dsa_8021q_rcv(skb, source_port, switch_id, vbid); - - /* Try our best with imprecise RX */ - *vid = vlan_tci & VLAN_VID_MASK; -} - static struct sk_buff *sja1105_rcv(struct sk_buff *skb, struct net_device *netdev) { - int source_port = -1, switch_id = -1, vbid = -1; + int source_port = -1, switch_id = -1, vbid = -1, vid = -1; struct sja1105_meta meta = {0}; struct ethhdr *hdr; bool is_link_local; bool is_meta; - u16 vid; hdr = eth_hdr(skb); is_link_local = sja1105_is_link_local(skb); @@ -524,37 +501,16 @@ static struct sk_buff *sja1105_rcv(struct sk_buff *skb, /* Normal data plane traffic and link-local frames are tagged with * a tag_8021q VLAN which we have to strip */ - if (sja1105_skb_has_tag_8021q(skb)) { - int tmp_source_port = -1, tmp_switch_id = -1; - - sja1105_vlan_rcv(skb, &tmp_source_port, &tmp_switch_id, &vbid, - &vid); - /* Preserve the source information from the INCL_SRCPT option, - * if available. This allows us to not overwrite a valid source - * port and switch ID with zeroes when receiving link-local - * frames from a VLAN-unaware bridged port (non-zero vbid) or a - * VLAN-aware bridged port (non-zero vid). Furthermore, the - * tag_8021q source port information is only of trust when the - * vbid is 0 (precise port). Otherwise, tmp_source_port and - * tmp_switch_id will be zeroes. - */ - if (vbid == 0 && source_port == -1) - source_port = tmp_source_port; - if (vbid == 0 && switch_id == -1) - switch_id = tmp_switch_id; - } else if (source_port == -1 && switch_id == -1) { + if (sja1105_skb_has_tag_8021q(skb)) + dsa_8021q_rcv(skb, &source_port, &switch_id, &vbid, &vid); + else if (source_port == -1 && switch_id == -1) /* Packets with no source information have no chance of * getting accepted, drop them straight away. */ return NULL; - } - if (source_port != -1 && switch_id != -1) - skb->dev = dsa_conduit_find_user(netdev, switch_id, source_port); - else if (vbid >= 1) - skb->dev = dsa_tag_8021q_find_port_by_vbid(netdev, vbid); - else - skb->dev = dsa_find_designated_bridge_port_by_vid(netdev, vid); + skb->dev = dsa_tag_8021q_find_user(netdev, source_port, switch_id, + vid, vbid); if (!skb->dev) { netdev_warn(netdev, "Couldn't decode source port\n"); return NULL; @@ -677,9 +633,8 @@ static struct sk_buff *sja1110_rcv_inband_control_extension(struct sk_buff *skb, static struct sk_buff *sja1110_rcv(struct sk_buff *skb, struct net_device *netdev) { - int source_port = -1, switch_id = -1, vbid = -1; + int source_port = -1, switch_id = -1, vbid = -1, vid = -1; bool host_only = false; - u16 vid = 0; if (sja1110_skb_has_inband_control_extension(skb)) { skb = sja1110_rcv_inband_control_extension(skb, &source_port, @@ -691,14 +646,11 @@ static struct sk_buff *sja1110_rcv(struct sk_buff *skb, /* Packets with in-band control extensions might still have RX VLANs */ if (likely(sja1105_skb_has_tag_8021q(skb))) - sja1105_vlan_rcv(skb, &source_port, &switch_id, &vbid, &vid); + dsa_8021q_rcv(skb, &source_port, &switch_id, &vbid, &vid); + + skb->dev = dsa_tag_8021q_find_user(netdev, source_port, switch_id, + vid, vbid); - if (vbid >= 1) - skb->dev = dsa_tag_8021q_find_port_by_vbid(netdev, vbid); - else if (source_port == -1 || switch_id == -1) - skb->dev = dsa_find_designated_bridge_port_by_vid(netdev, vid); - else - skb->dev = dsa_conduit_find_user(netdev, switch_id, source_port); if (!skb->dev) { netdev_warn(netdev, "Couldn't decode source port\n"); return NULL; diff --git a/net/dsa/tag_vsc73xx_8021q.c b/net/dsa/tag_vsc73xx_8021q.c new file mode 100644 index 000000000000..af121a9aff7f --- /dev/null +++ b/net/dsa/tag_vsc73xx_8021q.c @@ -0,0 +1,68 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/* Copyright (C) 2024 Pawel Dembicki + */ +#include + +#include "tag.h" +#include "tag_8021q.h" + +#define VSC73XX_8021Q_NAME "vsc73xx-8021q" + +static struct sk_buff * +vsc73xx_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct dsa_port *dp = dsa_user_to_port(netdev); + u16 queue_mapping = skb_get_queue_mapping(skb); + u16 tx_vid = dsa_tag_8021q_standalone_vid(dp); + u8 pcp; + + if (skb->offload_fwd_mark) { + unsigned int bridge_num = dsa_port_bridge_num_get(dp); + struct net_device *br = dsa_port_bridge_dev_get(dp); + + if (br_vlan_enabled(br)) + return skb; + + tx_vid = dsa_tag_8021q_bridge_vid(bridge_num); + } + + pcp = netdev_txq_to_tc(netdev, queue_mapping); + + return dsa_8021q_xmit(skb, netdev, ETH_P_8021Q, + ((pcp << VLAN_PRIO_SHIFT) | tx_vid)); +} + +static struct sk_buff * +vsc73xx_rcv(struct sk_buff *skb, struct net_device *netdev) +{ + int src_port = -1, switch_id = -1, vbid = -1, vid = -1; + + dsa_8021q_rcv(skb, &src_port, &switch_id, &vbid, &vid); + + skb->dev = dsa_tag_8021q_find_user(netdev, src_port, switch_id, + vid, vbid); + if (!skb->dev) { + dev_warn_ratelimited(&netdev->dev, + "Couldn't decode source port\n"); + return NULL; + } + + dsa_default_offload_fwd_mark(skb); + + return skb; +} + +static const struct dsa_device_ops vsc73xx_8021q_netdev_ops = { + .name = VSC73XX_8021Q_NAME, + .proto = DSA_TAG_PROTO_VSC73XX_8021Q, + .xmit = vsc73xx_xmit, + .rcv = vsc73xx_rcv, + .needed_headroom = VLAN_HLEN, + .promisc_on_conduit = true, +}; + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("DSA tag driver for VSC73XX family of switches, using VLAN"); +MODULE_ALIAS_DSA_TAG_DRIVER(DSA_TAG_PROTO_VSC73XX_8021Q, VSC73XX_8021Q_NAME); + +module_dsa_tag_driver(vsc73xx_8021q_netdev_ops); diff --git a/net/dsa/user.c b/net/dsa/user.c index 867c5fe9a4da..f5adfa1d978a 100644 --- a/net/dsa/user.c +++ b/net/dsa/user.c @@ -355,60 +355,82 @@ static int dsa_user_get_iflink(const struct net_device *dev) return READ_ONCE(dsa_user_to_conduit(dev)->ifindex); } -static int dsa_user_open(struct net_device *dev) +int dsa_user_host_uc_install(struct net_device *dev, const u8 *addr) { struct net_device *conduit = dsa_user_to_conduit(dev); struct dsa_port *dp = dsa_user_to_port(dev); struct dsa_switch *ds = dp->ds; int err; + if (dsa_switch_supports_uc_filtering(ds)) { + err = dsa_port_standalone_host_fdb_add(dp, addr, 0); + if (err) + goto out; + } + + if (!ether_addr_equal(addr, conduit->dev_addr)) { + err = dev_uc_add(conduit, addr); + if (err < 0) + goto del_host_addr; + } + + return 0; + +del_host_addr: + if (dsa_switch_supports_uc_filtering(ds)) + dsa_port_standalone_host_fdb_del(dp, addr, 0); +out: + return err; +} + +void dsa_user_host_uc_uninstall(struct net_device *dev) +{ + struct net_device *conduit = dsa_user_to_conduit(dev); + struct dsa_port *dp = dsa_user_to_port(dev); + struct dsa_switch *ds = dp->ds; + + if (!ether_addr_equal(dev->dev_addr, conduit->dev_addr)) + dev_uc_del(conduit, dev->dev_addr); + + if (dsa_switch_supports_uc_filtering(ds)) + dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0); +} + +static int dsa_user_open(struct net_device *dev) +{ + struct net_device *conduit = dsa_user_to_conduit(dev); + struct dsa_port *dp = dsa_user_to_port(dev); + int err; + err = dev_open(conduit, NULL); if (err < 0) { netdev_err(dev, "failed to open conduit %s\n", conduit->name); goto out; } - if (dsa_switch_supports_uc_filtering(ds)) { - err = dsa_port_standalone_host_fdb_add(dp, dev->dev_addr, 0); - if (err) - goto out; - } - - if (!ether_addr_equal(dev->dev_addr, conduit->dev_addr)) { - err = dev_uc_add(conduit, dev->dev_addr); - if (err < 0) - goto del_host_addr; - } + err = dsa_user_host_uc_install(dev, dev->dev_addr); + if (err) + goto out; err = dsa_port_enable_rt(dp, dev->phydev); if (err) - goto del_unicast; + goto out_del_host_uc; return 0; -del_unicast: - if (!ether_addr_equal(dev->dev_addr, conduit->dev_addr)) - dev_uc_del(conduit, dev->dev_addr); -del_host_addr: - if (dsa_switch_supports_uc_filtering(ds)) - dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0); +out_del_host_uc: + dsa_user_host_uc_uninstall(dev); out: return err; } static int dsa_user_close(struct net_device *dev) { - struct net_device *conduit = dsa_user_to_conduit(dev); struct dsa_port *dp = dsa_user_to_port(dev); - struct dsa_switch *ds = dp->ds; dsa_port_disable_rt(dp); - if (!ether_addr_equal(dev->dev_addr, conduit->dev_addr)) - dev_uc_del(conduit, dev->dev_addr); - - if (dsa_switch_supports_uc_filtering(ds)) - dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0); + dsa_user_host_uc_uninstall(dev); return 0; } @@ -448,7 +470,6 @@ static void dsa_user_set_rx_mode(struct net_device *dev) static int dsa_user_set_mac_address(struct net_device *dev, void *a) { - struct net_device *conduit = dsa_user_to_conduit(dev); struct dsa_port *dp = dsa_user_to_port(dev); struct dsa_switch *ds = dp->ds; struct sockaddr *addr = a; @@ -470,34 +491,16 @@ static int dsa_user_set_mac_address(struct net_device *dev, void *a) if (!(dev->flags & IFF_UP)) goto out_change_dev_addr; - if (dsa_switch_supports_uc_filtering(ds)) { - err = dsa_port_standalone_host_fdb_add(dp, addr->sa_data, 0); - if (err) - return err; - } + err = dsa_user_host_uc_install(dev, addr->sa_data); + if (err) + return err; - if (!ether_addr_equal(addr->sa_data, conduit->dev_addr)) { - err = dev_uc_add(conduit, addr->sa_data); - if (err < 0) - goto del_unicast; - } - - if (!ether_addr_equal(dev->dev_addr, conduit->dev_addr)) - dev_uc_del(conduit, dev->dev_addr); - - if (dsa_switch_supports_uc_filtering(ds)) - dsa_port_standalone_host_fdb_del(dp, dev->dev_addr, 0); + dsa_user_host_uc_uninstall(dev); out_change_dev_addr: eth_hw_addr_set(dev, addr->sa_data); return 0; - -del_unicast: - if (dsa_switch_supports_uc_filtering(ds)) - dsa_port_standalone_host_fdb_del(dp, addr->sa_data, 0); - - return err; } struct dsa_user_dump_ctx { @@ -1726,7 +1729,7 @@ static int dsa_user_set_rxnfc(struct net_device *dev, } static int dsa_user_get_ts_info(struct net_device *dev, - struct ethtool_ts_info *ts) + struct kernel_ethtool_ts_info *ts) { struct dsa_user_priv *p = netdev_priv(dev); struct dsa_switch *ds = p->dp->ds; @@ -2879,12 +2882,6 @@ int dsa_user_change_conduit(struct net_device *dev, struct net_device *conduit, ERR_PTR(err)); } - /* If the port doesn't have its own MAC address and relies on the DSA - * conduit's one, inherit it again from the new DSA conduit. - */ - if (is_zero_ether_addr(dp->mac)) - eth_hw_addr_inherit(dev, conduit); - return 0; out_revert_conduit_link: diff --git a/net/dsa/user.h b/net/dsa/user.h index 996069130bea..016884bead3c 100644 --- a/net/dsa/user.h +++ b/net/dsa/user.h @@ -42,6 +42,8 @@ int dsa_user_suspend(struct net_device *user_dev); int dsa_user_resume(struct net_device *user_dev); int dsa_user_register_notifier(void); void dsa_user_unregister_notifier(void); +int dsa_user_host_uc_install(struct net_device *dev, const u8 *addr); +void dsa_user_host_uc_uninstall(struct net_device *dev); void dsa_user_sync_ha(struct net_device *dev); void dsa_user_unsync_ha(struct net_device *dev); void dsa_user_setup_tagger(struct net_device *user); diff --git a/net/ethtool/Makefile b/net/ethtool/Makefile index 504f954a1b28..9a190635fe95 100644 --- a/net/ethtool/Makefile +++ b/net/ethtool/Makefile @@ -8,4 +8,4 @@ ethtool_nl-y := netlink.o bitset.o strset.o linkinfo.o linkmodes.o rss.o \ linkstate.o debug.o wol.o features.o privflags.o rings.o \ channels.o coalesce.o pause.o eee.o tsinfo.o cabletest.o \ tunnels.o fec.o eeprom.o stats.o phc_vclocks.o mm.o \ - module.o pse-pd.o plca.o mm.o + module.o cmis_fw_update.o cmis_cdb.o pse-pd.o plca.o mm.o diff --git a/net/ethtool/cabletest.c b/net/ethtool/cabletest.c index 06a151165c31..f6f136ec7ddf 100644 --- a/net/ethtool/cabletest.c +++ b/net/ethtool/cabletest.c @@ -207,10 +207,6 @@ err: } EXPORT_SYMBOL_GPL(ethnl_cable_test_fault_length); -struct cable_test_tdr_req_info { - struct ethnl_req_info base; -}; - static const struct nla_policy cable_test_tdr_act_cfg_policy[] = { [ETHTOOL_A_CABLE_TEST_TDR_CFG_FIRST] = { .type = NLA_U32 }, [ETHTOOL_A_CABLE_TEST_TDR_CFG_LAST] = { .type = NLA_U32 }, diff --git a/net/ethtool/channels.c b/net/ethtool/channels.c index 7b4bbd674bae..cee188da54f8 100644 --- a/net/ethtool/channels.c +++ b/net/ethtool/channels.c @@ -171,11 +171,9 @@ ethnl_set_channels(struct ethnl_req_info *req_info, struct genl_info *info) */ if (ethtool_get_max_rxnfc_channel(dev, &max_rxnfc_in_use)) max_rxnfc_in_use = 0; - if (!netif_is_rxfh_configured(dev) || - ethtool_get_max_rxfh_channel(dev, &max_rxfh_in_use)) - max_rxfh_in_use = 0; + max_rxfh_in_use = ethtool_get_max_rxfh_channel(dev); if (channels.combined_count + channels.rx_count <= max_rxfh_in_use) { - GENL_SET_ERR_MSG(info, "requested channel counts are too low for existing indirection table settings"); + GENL_SET_ERR_MSG_FMT(info, "requested channel counts are too low for existing indirection table (%d)", max_rxfh_in_use); return -EINVAL; } if (channels.combined_count + channels.rx_count <= max_rxnfc_in_use) { diff --git a/net/ethtool/cmis.h b/net/ethtool/cmis.h new file mode 100644 index 000000000000..e71cc3e1b7eb --- /dev/null +++ b/net/ethtool/cmis.h @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#define ETHTOOL_CMIS_CDB_LPL_MAX_PL_LENGTH 120 +#define ETHTOOL_CMIS_CDB_CMD_PAGE 0x9F +#define ETHTOOL_CMIS_CDB_PAGE_I2C_ADDR 0x50 + +/** + * struct ethtool_cmis_cdb - CDB commands parameters + * @cmis_rev: CMIS revision major. + * @read_write_len_ext: Allowable additional number of byte octets to the LPL + * in a READ or a WRITE CDB commands. + * @max_completion_time: Maximum CDB command completion time in msec. + */ +struct ethtool_cmis_cdb { + u8 cmis_rev; + u8 read_write_len_ext; + u16 max_completion_time; +}; + +enum ethtool_cmis_cdb_cmd_id { + ETHTOOL_CMIS_CDB_CMD_QUERY_STATUS = 0x0000, + ETHTOOL_CMIS_CDB_CMD_MODULE_FEATURES = 0x0040, + ETHTOOL_CMIS_CDB_CMD_FW_MANAGMENT_FEATURES = 0x0041, + ETHTOOL_CMIS_CDB_CMD_START_FW_DOWNLOAD = 0x0101, + ETHTOOL_CMIS_CDB_CMD_WRITE_FW_BLOCK_LPL = 0x0103, + ETHTOOL_CMIS_CDB_CMD_COMPLETE_FW_DOWNLOAD = 0x0107, + ETHTOOL_CMIS_CDB_CMD_RUN_FW_IMAGE = 0x0109, + ETHTOOL_CMIS_CDB_CMD_COMMIT_FW_IMAGE = 0x010A, +}; + +/** + * struct ethtool_cmis_cdb_request - CDB commands request fields as decribed in + * the CMIS standard + * @id: Command ID. + * @epl_len: EPL memory length. + * @lpl_len: LPL memory length. + * @chk_code: Check code for the previous field and the payload. + * @resv1: Added to match the CMIS standard request continuity. + * @resv2: Added to match the CMIS standard request continuity. + * @payload: Payload for the CDB commands. + */ +struct ethtool_cmis_cdb_request { + __be16 id; + struct_group(body, + __be16 epl_len; + u8 lpl_len; + u8 chk_code; + u8 resv1; + u8 resv2; + u8 payload[ETHTOOL_CMIS_CDB_LPL_MAX_PL_LENGTH]; + ); +}; + +#define CDB_F_COMPLETION_VALID BIT(0) +#define CDB_F_STATUS_VALID BIT(1) +#define CDB_F_MODULE_STATE_VALID BIT(2) + +/** + * struct ethtool_cmis_cdb_cmd_args - CDB commands execution arguments + * @req: CDB command fields as described in the CMIS standard. + * @max_duration: Maximum duration time for command completion in msec. + * @read_write_len_ext: Allowable additional number of byte octets to the LPL + * in a READ or a WRITE commands. + * @msleep_pre_rpl: Waiting time before checking reply in msec. + * @rpl_exp_len: Expected reply length in bytes. + * @flags: Validation flags for CDB commands. + * @err_msg: Error message to be sent to user space. + */ +struct ethtool_cmis_cdb_cmd_args { + struct ethtool_cmis_cdb_request req; + u16 max_duration; + u8 read_write_len_ext; + u8 msleep_pre_rpl; + u8 rpl_exp_len; + u8 flags; + char *err_msg; +}; + +/** + * struct ethtool_cmis_cdb_rpl_hdr - CDB commands reply header arguments + * @rpl_len: Reply length. + * @rpl_chk_code: Reply check code. + */ +struct ethtool_cmis_cdb_rpl_hdr { + u8 rpl_len; + u8 rpl_chk_code; +}; + +/** + * struct ethtool_cmis_cdb_rpl - CDB commands reply arguments + * @hdr: CDB commands reply header arguments. + * @payload: Payload for the CDB commands reply. + */ +struct ethtool_cmis_cdb_rpl { + struct ethtool_cmis_cdb_rpl_hdr hdr; + u8 payload[ETHTOOL_CMIS_CDB_LPL_MAX_PL_LENGTH]; +}; + +u32 ethtool_cmis_get_max_payload_size(u8 num_of_byte_octs); + +void ethtool_cmis_cdb_compose_args(struct ethtool_cmis_cdb_cmd_args *args, + enum ethtool_cmis_cdb_cmd_id cmd, u8 *pl, + u8 lpl_len, u16 max_duration, + u8 read_write_len_ext, u16 msleep_pre_rpl, + u8 rpl_exp_len, u8 flags); + +void ethtool_cmis_cdb_check_completion_flag(u8 cmis_rev, u8 *flags); + +void ethtool_cmis_page_init(struct ethtool_module_eeprom *page_data, + u8 page, u32 offset, u32 length); +void ethtool_cmis_page_fini(struct ethtool_module_eeprom *page_data); + +struct ethtool_cmis_cdb * +ethtool_cmis_cdb_init(struct net_device *dev, + const struct ethtool_module_fw_flash_params *params, + struct ethnl_module_fw_flash_ntf_params *ntf_params); +void ethtool_cmis_cdb_fini(struct ethtool_cmis_cdb *cdb); + +int ethtool_cmis_wait_for_cond(struct net_device *dev, u8 flags, u8 flag, + u16 max_duration, u32 offset, + bool (*cond_success)(u8), bool (*cond_fail)(u8), u8 *state); + +int ethtool_cmis_cdb_execute_cmd(struct net_device *dev, + struct ethtool_cmis_cdb_cmd_args *args); diff --git a/net/ethtool/cmis_cdb.c b/net/ethtool/cmis_cdb.c new file mode 100644 index 000000000000..1bb08783b60d --- /dev/null +++ b/net/ethtool/cmis_cdb.c @@ -0,0 +1,602 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include + +#include "common.h" +#include "module_fw.h" +#include "cmis.h" + +/* For accessing the LPL field on page 9Fh, the allowable length extension is + * min(i, 15) byte octets where i specifies the allowable additional number of + * byte octets in a READ or a WRITE. + */ +u32 ethtool_cmis_get_max_payload_size(u8 num_of_byte_octs) +{ + return 8 * (1 + min_t(u8, num_of_byte_octs, 15)); +} + +void ethtool_cmis_cdb_compose_args(struct ethtool_cmis_cdb_cmd_args *args, + enum ethtool_cmis_cdb_cmd_id cmd, u8 *pl, + u8 lpl_len, u16 max_duration, + u8 read_write_len_ext, u16 msleep_pre_rpl, + u8 rpl_exp_len, u8 flags) +{ + args->req.id = cpu_to_be16(cmd); + args->req.lpl_len = lpl_len; + if (pl) + memcpy(args->req.payload, pl, args->req.lpl_len); + + args->max_duration = max_duration; + args->read_write_len_ext = + ethtool_cmis_get_max_payload_size(read_write_len_ext); + args->msleep_pre_rpl = msleep_pre_rpl; + args->rpl_exp_len = rpl_exp_len; + args->flags = flags; + args->err_msg = NULL; +} + +void ethtool_cmis_page_init(struct ethtool_module_eeprom *page_data, + u8 page, u32 offset, u32 length) +{ + page_data->page = page; + page_data->offset = offset; + page_data->length = length; + page_data->i2c_address = ETHTOOL_CMIS_CDB_PAGE_I2C_ADDR; +} + +#define CMIS_REVISION_PAGE 0x00 +#define CMIS_REVISION_OFFSET 0x01 + +struct cmis_rev_rpl { + u8 rev; +}; + +static u8 cmis_rev_rpl_major(struct cmis_rev_rpl *rpl) +{ + return rpl->rev >> 4; +} + +static int cmis_rev_major_get(struct net_device *dev, u8 *rev_major) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + struct ethtool_module_eeprom page_data = {0}; + struct netlink_ext_ack extack = {}; + struct cmis_rev_rpl rpl = {}; + int err; + + ethtool_cmis_page_init(&page_data, CMIS_REVISION_PAGE, + CMIS_REVISION_OFFSET, sizeof(rpl)); + page_data.data = (u8 *)&rpl; + + err = ops->get_module_eeprom_by_page(dev, &page_data, &extack); + if (err < 0) { + if (extack._msg) + netdev_err(dev, "%s\n", extack._msg); + return err; + } + + *rev_major = cmis_rev_rpl_major(&rpl); + + return 0; +} + +#define CMIS_CDB_ADVERTISEMENT_PAGE 0x01 +#define CMIS_CDB_ADVERTISEMENT_OFFSET 0xA3 + +/* Based on section 8.4.11 "CDB Messaging Support Advertisement" in CMIS + * standard revision 5.2. + */ +struct cmis_cdb_advert_rpl { + u8 inst_supported; + u8 read_write_len_ext; + u8 resv1; + u8 resv2; +}; + +static u8 cmis_cdb_advert_rpl_inst_supported(struct cmis_cdb_advert_rpl *rpl) +{ + return rpl->inst_supported >> 6; +} + +static int cmis_cdb_advertisement_get(struct ethtool_cmis_cdb *cdb, + struct net_device *dev) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + struct ethtool_module_eeprom page_data = {}; + struct cmis_cdb_advert_rpl rpl = {}; + struct netlink_ext_ack extack = {}; + int err; + + ethtool_cmis_page_init(&page_data, CMIS_CDB_ADVERTISEMENT_PAGE, + CMIS_CDB_ADVERTISEMENT_OFFSET, sizeof(rpl)); + page_data.data = (u8 *)&rpl; + + err = ops->get_module_eeprom_by_page(dev, &page_data, &extack); + if (err < 0) { + if (extack._msg) + netdev_err(dev, "%s\n", extack._msg); + return err; + } + + if (!cmis_cdb_advert_rpl_inst_supported(&rpl)) + return -EOPNOTSUPP; + + cdb->read_write_len_ext = rpl.read_write_len_ext; + + return 0; +} + +#define CMIS_PASSWORD_ENTRY_PAGE 0x00 +#define CMIS_PASSWORD_ENTRY_OFFSET 0x7A + +struct cmis_password_entry_pl { + __be32 password; +}; + +/* See section 9.3.1 "CMD 0000h: Query Status" in CMIS standard revision 5.2. + * struct cmis_cdb_query_status_pl and struct cmis_cdb_query_status_rpl are + * structured layouts of the flat arrays, + * struct ethtool_cmis_cdb_request::payload and + * struct ethtool_cmis_cdb_rpl::payload respectively. + */ +struct cmis_cdb_query_status_pl { + u16 response_delay; +}; + +struct cmis_cdb_query_status_rpl { + u8 length; + u8 status; +}; + +static int +cmis_cdb_validate_password(struct ethtool_cmis_cdb *cdb, + struct net_device *dev, + const struct ethtool_module_fw_flash_params *params, + struct ethnl_module_fw_flash_ntf_params *ntf_params) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + struct cmis_cdb_query_status_pl qs_pl = {0}; + struct ethtool_module_eeprom page_data = {}; + struct ethtool_cmis_cdb_cmd_args args = {}; + struct cmis_password_entry_pl pe_pl = {}; + struct cmis_cdb_query_status_rpl *rpl; + struct netlink_ext_ack extack = {}; + int err; + + ethtool_cmis_page_init(&page_data, CMIS_PASSWORD_ENTRY_PAGE, + CMIS_PASSWORD_ENTRY_OFFSET, sizeof(pe_pl)); + page_data.data = (u8 *)&pe_pl; + + pe_pl = *((struct cmis_password_entry_pl *)page_data.data); + pe_pl.password = params->password; + err = ops->set_module_eeprom_by_page(dev, &page_data, &extack); + if (err < 0) { + if (extack._msg) + netdev_err(dev, "%s\n", extack._msg); + return err; + } + + ethtool_cmis_cdb_compose_args(&args, ETHTOOL_CMIS_CDB_CMD_QUERY_STATUS, + (u8 *)&qs_pl, sizeof(qs_pl), 0, + cdb->read_write_len_ext, 1000, + sizeof(*rpl), + CDB_F_COMPLETION_VALID | CDB_F_STATUS_VALID); + + err = ethtool_cmis_cdb_execute_cmd(dev, &args); + if (err < 0) { + ethnl_module_fw_flash_ntf_err(dev, ntf_params, + "Query Status command failed", + args.err_msg); + return err; + } + + rpl = (struct cmis_cdb_query_status_rpl *)args.req.payload; + if (!rpl->length || !rpl->status) { + ethnl_module_fw_flash_ntf_err(dev, ntf_params, + "Password was not accepted", + NULL); + return -EINVAL; + } + + return 0; +} + +/* Some CDB commands asserts the CDB completion flag only from CMIS + * revision 5. Therefore, check the relevant validity flag only when + * the revision supports it. + */ +void ethtool_cmis_cdb_check_completion_flag(u8 cmis_rev, u8 *flags) +{ + *flags |= cmis_rev >= 5 ? CDB_F_COMPLETION_VALID : 0; +} + +#define CMIS_CDB_MODULE_FEATURES_RESV_DATA 34 + +/* See section 9.4.1 "CMD 0040h: Module Features" in CMIS standard revision 5.2. + * struct cmis_cdb_module_features_rpl is structured layout of the flat + * array, ethtool_cmis_cdb_rpl::payload. + */ +struct cmis_cdb_module_features_rpl { + u8 resv1[CMIS_CDB_MODULE_FEATURES_RESV_DATA]; + __be16 max_completion_time; +}; + +static u16 +cmis_cdb_module_features_completion_time(struct cmis_cdb_module_features_rpl *rpl) +{ + return be16_to_cpu(rpl->max_completion_time); +} + +static int cmis_cdb_module_features_get(struct ethtool_cmis_cdb *cdb, + struct net_device *dev, + struct ethnl_module_fw_flash_ntf_params *ntf_params) +{ + struct ethtool_cmis_cdb_cmd_args args = {}; + struct cmis_cdb_module_features_rpl *rpl; + u8 flags = CDB_F_STATUS_VALID; + int err; + + ethtool_cmis_cdb_check_completion_flag(cdb->cmis_rev, &flags); + ethtool_cmis_cdb_compose_args(&args, + ETHTOOL_CMIS_CDB_CMD_MODULE_FEATURES, + NULL, 0, 0, cdb->read_write_len_ext, + 1000, sizeof(*rpl), flags); + + err = ethtool_cmis_cdb_execute_cmd(dev, &args); + if (err < 0) { + ethnl_module_fw_flash_ntf_err(dev, ntf_params, + "Module Features command failed", + args.err_msg); + return err; + } + + rpl = (struct cmis_cdb_module_features_rpl *)args.req.payload; + cdb->max_completion_time = + cmis_cdb_module_features_completion_time(rpl); + + return 0; +} + +struct ethtool_cmis_cdb * +ethtool_cmis_cdb_init(struct net_device *dev, + const struct ethtool_module_fw_flash_params *params, + struct ethnl_module_fw_flash_ntf_params *ntf_params) +{ + struct ethtool_cmis_cdb *cdb; + int err; + + cdb = kzalloc(sizeof(*cdb), GFP_KERNEL); + if (!cdb) + return ERR_PTR(-ENOMEM); + + err = cmis_rev_major_get(dev, &cdb->cmis_rev); + if (err < 0) + goto err; + + if (cdb->cmis_rev < 4) { + ethnl_module_fw_flash_ntf_err(dev, ntf_params, + "CMIS revision doesn't support module firmware flashing", + NULL); + err = -EOPNOTSUPP; + goto err; + } + + err = cmis_cdb_advertisement_get(cdb, dev); + if (err < 0) + goto err; + + if (params->password_valid) { + err = cmis_cdb_validate_password(cdb, dev, params, ntf_params); + if (err < 0) + goto err; + } + + err = cmis_cdb_module_features_get(cdb, dev, ntf_params); + if (err < 0) + goto err; + + return cdb; + +err: + ethtool_cmis_cdb_fini(cdb); + return ERR_PTR(err); +} + +void ethtool_cmis_cdb_fini(struct ethtool_cmis_cdb *cdb) +{ + kfree(cdb); +} + +static bool is_completed(u8 data) +{ + return !!(data & 0x40); +} + +#define CMIS_CDB_STATUS_SUCCESS 0x01 + +static bool status_success(u8 data) +{ + return data == CMIS_CDB_STATUS_SUCCESS; +} + +#define CMIS_CDB_STATUS_FAIL 0x40 + +static bool status_fail(u8 data) +{ + return data & CMIS_CDB_STATUS_FAIL; +} + +struct cmis_wait_for_cond_rpl { + u8 state; +}; + +static int +ethtool_cmis_module_poll(struct net_device *dev, + struct cmis_wait_for_cond_rpl *rpl, u32 offset, + bool (*cond_success)(u8), bool (*cond_fail)(u8)) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + struct ethtool_module_eeprom page_data = {0}; + struct netlink_ext_ack extack = {}; + int err; + + ethtool_cmis_page_init(&page_data, 0, offset, sizeof(rpl)); + page_data.data = (u8 *)rpl; + + err = ops->get_module_eeprom_by_page(dev, &page_data, &extack); + if (err < 0) { + if (extack._msg) + netdev_err_once(dev, "%s\n", extack._msg); + return -EBUSY; + } + + if ((*cond_success)(rpl->state)) + return 0; + + if (*cond_fail && (*cond_fail)(rpl->state)) + return -EIO; + + return -EBUSY; +} + +int ethtool_cmis_wait_for_cond(struct net_device *dev, u8 flags, u8 flag, + u16 max_duration, u32 offset, + bool (*cond_success)(u8), bool (*cond_fail)(u8), + u8 *state) +{ + struct cmis_wait_for_cond_rpl rpl = {}; + unsigned long end; + int err; + + if (!(flags & flag)) + return 0; + + if (max_duration == 0) + max_duration = U16_MAX; + + end = jiffies + msecs_to_jiffies(max_duration); + do { + err = ethtool_cmis_module_poll(dev, &rpl, offset, cond_success, + cond_fail); + if (err != -EBUSY) + goto out; + + msleep(20); + } while (time_before(jiffies, end)); + + err = ethtool_cmis_module_poll(dev, &rpl, offset, cond_success, + cond_fail); + if (err == -EBUSY) + err = -ETIMEDOUT; + +out: + *state = rpl.state; + return err; +} + +#define CMIS_CDB_COMPLETION_FLAG_OFFSET 0x08 + +static int cmis_cdb_wait_for_completion(struct net_device *dev, + struct ethtool_cmis_cdb_cmd_args *args) +{ + u8 flag; + int err; + + /* Some vendors demand waiting time before checking completion flag + * in some CDB commands. + */ + msleep(args->msleep_pre_rpl); + + err = ethtool_cmis_wait_for_cond(dev, args->flags, + CDB_F_COMPLETION_VALID, + args->max_duration, + CMIS_CDB_COMPLETION_FLAG_OFFSET, + is_completed, NULL, &flag); + if (err < 0) + args->err_msg = "Completion Flag did not set on time"; + + return err; +} + +#define CMIS_CDB_STATUS_OFFSET 0x25 + +static void cmis_cdb_status_fail_msg_get(u8 status, char **err_msg) +{ + switch (status) { + case 0b10000001: + *err_msg = "CDB Status is in progress: Busy capturing command"; + break; + case 0b10000010: + *err_msg = + "CDB Status is in progress: Busy checking/validating command"; + break; + case 0b10000011: + *err_msg = "CDB Status is in progress: Busy executing"; + break; + case 0b01000000: + *err_msg = "CDB status failed: no specific failure"; + break; + case 0b01000010: + *err_msg = + "CDB status failed: Parameter range error or parameter not supported"; + break; + case 0b01000101: + *err_msg = "CDB status failed: CdbChkCode error"; + break; + default: + *err_msg = "Unknown failure reason"; + } +}; + +static int cmis_cdb_wait_for_status(struct net_device *dev, + struct ethtool_cmis_cdb_cmd_args *args) +{ + u8 status; + int err; + + /* Some vendors demand waiting time before checking status in some + * CDB commands. + */ + msleep(args->msleep_pre_rpl); + + err = ethtool_cmis_wait_for_cond(dev, args->flags, CDB_F_STATUS_VALID, + args->max_duration, + CMIS_CDB_STATUS_OFFSET, + status_success, status_fail, &status); + if (err < 0 && !args->err_msg) + cmis_cdb_status_fail_msg_get(status, &args->err_msg); + + return err; +} + +#define CMIS_CDB_REPLY_OFFSET 0x86 + +static int cmis_cdb_process_reply(struct net_device *dev, + struct ethtool_module_eeprom *page_data, + struct ethtool_cmis_cdb_cmd_args *args) +{ + u8 rpl_hdr_len = sizeof(struct ethtool_cmis_cdb_rpl_hdr); + u8 rpl_exp_len = args->rpl_exp_len + rpl_hdr_len; + const struct ethtool_ops *ops = dev->ethtool_ops; + struct netlink_ext_ack extack = {}; + struct ethtool_cmis_cdb_rpl *rpl; + int err; + + if (!args->rpl_exp_len) + return 0; + + ethtool_cmis_page_init(page_data, ETHTOOL_CMIS_CDB_CMD_PAGE, + CMIS_CDB_REPLY_OFFSET, rpl_exp_len); + page_data->data = kmalloc(page_data->length, GFP_KERNEL); + if (!page_data->data) + return -ENOMEM; + + err = ops->get_module_eeprom_by_page(dev, page_data, &extack); + if (err < 0) { + if (extack._msg) + netdev_err(dev, "%s\n", extack._msg); + goto out; + } + + rpl = (struct ethtool_cmis_cdb_rpl *)page_data->data; + if ((args->rpl_exp_len > rpl->hdr.rpl_len + rpl_hdr_len) || + !rpl->hdr.rpl_chk_code) { + err = -EIO; + goto out; + } + + args->req.lpl_len = rpl->hdr.rpl_len; + memcpy(args->req.payload, rpl->payload, args->req.lpl_len); + +out: + kfree(page_data->data); + return err; +} + +static int +__ethtool_cmis_cdb_execute_cmd(struct net_device *dev, + struct ethtool_module_eeprom *page_data, + u8 page, u32 offset, u32 length, void *data) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + struct netlink_ext_ack extack = {}; + int err; + + ethtool_cmis_page_init(page_data, page, offset, length); + page_data->data = kmemdup(data, page_data->length, GFP_KERNEL); + if (!page_data->data) + return -ENOMEM; + + err = ops->set_module_eeprom_by_page(dev, page_data, &extack); + if (err < 0) { + if (extack._msg) + netdev_err(dev, "%s\n", extack._msg); + } + + kfree(page_data->data); + return err; +} + +static u8 cmis_cdb_calc_checksum(const void *data, size_t size) +{ + const u8 *bytes = (const u8 *)data; + u8 checksum = 0; + + for (size_t i = 0; i < size; i++) + checksum += bytes[i]; + + return ~checksum; +} + +#define CMIS_CDB_CMD_ID_OFFSET 0x80 + +int ethtool_cmis_cdb_execute_cmd(struct net_device *dev, + struct ethtool_cmis_cdb_cmd_args *args) +{ + struct ethtool_module_eeprom page_data = {}; + u32 offset; + int err; + + args->req.chk_code = + cmis_cdb_calc_checksum(&args->req, sizeof(args->req)); + + if (args->req.lpl_len > args->read_write_len_ext) { + args->err_msg = "LPL length is longer than CDB read write length extension allows"; + return -EINVAL; + } + + /* According to the CMIS standard, there are two options to trigger the + * CDB commands. The default option is triggering the command by writing + * the CMDID bytes. Therefore, the command will be split to 2 calls: + * First, with everything except the CMDID field and then the CMDID + * field. + */ + offset = CMIS_CDB_CMD_ID_OFFSET + + offsetof(struct ethtool_cmis_cdb_request, body); + err = __ethtool_cmis_cdb_execute_cmd(dev, &page_data, + ETHTOOL_CMIS_CDB_CMD_PAGE, offset, + sizeof(args->req.body), + &args->req.body); + if (err < 0) + return err; + + offset = CMIS_CDB_CMD_ID_OFFSET + + offsetof(struct ethtool_cmis_cdb_request, id); + err = __ethtool_cmis_cdb_execute_cmd(dev, &page_data, + ETHTOOL_CMIS_CDB_CMD_PAGE, offset, + sizeof(args->req.id), + &args->req.id); + if (err < 0) + return err; + + err = cmis_cdb_wait_for_completion(dev, args); + if (err < 0) + return err; + + err = cmis_cdb_wait_for_status(dev, args); + if (err < 0) + return err; + + return cmis_cdb_process_reply(dev, &page_data, args); +} diff --git a/net/ethtool/cmis_fw_update.c b/net/ethtool/cmis_fw_update.c new file mode 100644 index 000000000000..ae4b4b28a601 --- /dev/null +++ b/net/ethtool/cmis_fw_update.c @@ -0,0 +1,399 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include +#include + +#include "common.h" +#include "module_fw.h" +#include "cmis.h" + +struct cmis_fw_update_fw_mng_features { + u8 start_cmd_payload_size; + u16 max_duration_start; + u16 max_duration_write; + u16 max_duration_complete; +}; + +/* See section 9.4.2 "CMD 0041h: Firmware Management Features" in CMIS standard + * revision 5.2. + * struct cmis_cdb_fw_mng_features_rpl is a structured layout of the flat + * array, ethtool_cmis_cdb_rpl::payload. + */ +struct cmis_cdb_fw_mng_features_rpl { + u8 resv1; + u8 resv2; + u8 start_cmd_payload_size; + u8 resv3; + u8 read_write_len_ext; + u8 write_mechanism; + u8 resv4; + u8 resv5; + __be16 max_duration_start; + __be16 resv6; + __be16 max_duration_write; + __be16 max_duration_complete; + __be16 resv7; +}; + +#define CMIS_CDB_FW_WRITE_MECHANISM_LPL 0x01 + +static int +cmis_fw_update_fw_mng_features_get(struct ethtool_cmis_cdb *cdb, + struct net_device *dev, + struct cmis_fw_update_fw_mng_features *fw_mng, + struct ethnl_module_fw_flash_ntf_params *ntf_params) +{ + struct ethtool_cmis_cdb_cmd_args args = {}; + struct cmis_cdb_fw_mng_features_rpl *rpl; + u8 flags = CDB_F_STATUS_VALID; + int err; + + ethtool_cmis_cdb_check_completion_flag(cdb->cmis_rev, &flags); + ethtool_cmis_cdb_compose_args(&args, + ETHTOOL_CMIS_CDB_CMD_FW_MANAGMENT_FEATURES, + NULL, 0, cdb->max_completion_time, + cdb->read_write_len_ext, 1000, + sizeof(*rpl), flags); + + err = ethtool_cmis_cdb_execute_cmd(dev, &args); + if (err < 0) { + ethnl_module_fw_flash_ntf_err(dev, ntf_params, + "FW Management Features command failed", + args.err_msg); + return err; + } + + rpl = (struct cmis_cdb_fw_mng_features_rpl *)args.req.payload; + if (!(rpl->write_mechanism == CMIS_CDB_FW_WRITE_MECHANISM_LPL)) { + ethnl_module_fw_flash_ntf_err(dev, ntf_params, + "Write LPL is not supported", + NULL); + return -EOPNOTSUPP; + } + + /* Above, we used read_write_len_ext that we got from CDB + * advertisement. Update it with the value that we got from module + * features query, which is specific for Firmware Management Commands + * (IDs 0100h-01FFh). + */ + cdb->read_write_len_ext = rpl->read_write_len_ext; + fw_mng->start_cmd_payload_size = rpl->start_cmd_payload_size; + fw_mng->max_duration_start = be16_to_cpu(rpl->max_duration_start); + fw_mng->max_duration_write = be16_to_cpu(rpl->max_duration_write); + fw_mng->max_duration_complete = be16_to_cpu(rpl->max_duration_complete); + + return 0; +} + +/* See section 9.7.2 "CMD 0101h: Start Firmware Download" in CMIS standard + * revision 5.2. + * struct cmis_cdb_start_fw_download_pl is a structured layout of the + * flat array, ethtool_cmis_cdb_request::payload. + */ +struct cmis_cdb_start_fw_download_pl { + __struct_group(cmis_cdb_start_fw_download_pl_h, head, /* no attrs */, + __be32 image_size; + __be32 resv1; + ); + u8 vendor_data[ETHTOOL_CMIS_CDB_LPL_MAX_PL_LENGTH - + sizeof(struct cmis_cdb_start_fw_download_pl_h)]; +}; + +static int +cmis_fw_update_start_download(struct ethtool_cmis_cdb *cdb, + struct ethtool_cmis_fw_update_params *fw_update, + struct cmis_fw_update_fw_mng_features *fw_mng) +{ + u8 vendor_data_size = fw_mng->start_cmd_payload_size; + struct cmis_cdb_start_fw_download_pl pl = {}; + struct ethtool_cmis_cdb_cmd_args args = {}; + u8 lpl_len; + int err; + + pl.image_size = cpu_to_be32(fw_update->fw->size); + memcpy(pl.vendor_data, fw_update->fw->data, vendor_data_size); + + lpl_len = offsetof(struct cmis_cdb_start_fw_download_pl, + vendor_data[vendor_data_size]); + + ethtool_cmis_cdb_compose_args(&args, + ETHTOOL_CMIS_CDB_CMD_START_FW_DOWNLOAD, + (u8 *)&pl, lpl_len, + fw_mng->max_duration_start, + cdb->read_write_len_ext, 1000, 0, + CDB_F_COMPLETION_VALID | CDB_F_STATUS_VALID); + + err = ethtool_cmis_cdb_execute_cmd(fw_update->dev, &args); + if (err < 0) + ethnl_module_fw_flash_ntf_err(fw_update->dev, + &fw_update->ntf_params, + "Start FW download command failed", + args.err_msg); + + return err; +} + +/* See section 9.7.4 "CMD 0103h: Write Firmware Block LPL" in CMIS standard + * revision 5.2. + * struct cmis_cdb_write_fw_block_lpl_pl is a structured layout of the + * flat array, ethtool_cmis_cdb_request::payload. + */ +struct cmis_cdb_write_fw_block_lpl_pl { + __be32 block_address; + u8 fw_block[ETHTOOL_CMIS_CDB_LPL_MAX_PL_LENGTH - sizeof(__be32)]; +}; + +static int +cmis_fw_update_write_image(struct ethtool_cmis_cdb *cdb, + struct ethtool_cmis_fw_update_params *fw_update, + struct cmis_fw_update_fw_mng_features *fw_mng) +{ + u8 start = fw_mng->start_cmd_payload_size; + u32 offset, max_block_size, max_lpl_len; + u32 image_size = fw_update->fw->size; + int err; + + max_lpl_len = min_t(u32, + ethtool_cmis_get_max_payload_size(cdb->read_write_len_ext), + ETHTOOL_CMIS_CDB_LPL_MAX_PL_LENGTH); + max_block_size = + max_lpl_len - sizeof_field(struct cmis_cdb_write_fw_block_lpl_pl, + block_address); + + for (offset = start; offset < image_size; offset += max_block_size) { + struct cmis_cdb_write_fw_block_lpl_pl pl = { + .block_address = cpu_to_be32(offset - start), + }; + struct ethtool_cmis_cdb_cmd_args args = {}; + u32 block_size, lpl_len; + + ethnl_module_fw_flash_ntf_in_progress(fw_update->dev, + &fw_update->ntf_params, + offset - start, + image_size); + block_size = min_t(u32, max_block_size, image_size - offset); + memcpy(pl.fw_block, &fw_update->fw->data[offset], block_size); + lpl_len = block_size + + sizeof_field(struct cmis_cdb_write_fw_block_lpl_pl, + block_address); + + ethtool_cmis_cdb_compose_args(&args, + ETHTOOL_CMIS_CDB_CMD_WRITE_FW_BLOCK_LPL, + (u8 *)&pl, lpl_len, + fw_mng->max_duration_write, + cdb->read_write_len_ext, 1, 0, + CDB_F_COMPLETION_VALID | CDB_F_STATUS_VALID); + + err = ethtool_cmis_cdb_execute_cmd(fw_update->dev, &args); + if (err < 0) { + ethnl_module_fw_flash_ntf_err(fw_update->dev, + &fw_update->ntf_params, + "Write FW block LPL command failed", + args.err_msg); + return err; + } + } + + return 0; +} + +static int +cmis_fw_update_complete_download(struct ethtool_cmis_cdb *cdb, + struct net_device *dev, + struct cmis_fw_update_fw_mng_features *fw_mng, + struct ethnl_module_fw_flash_ntf_params *ntf_params) +{ + struct ethtool_cmis_cdb_cmd_args args = {}; + int err; + + ethtool_cmis_cdb_compose_args(&args, + ETHTOOL_CMIS_CDB_CMD_COMPLETE_FW_DOWNLOAD, + NULL, 0, fw_mng->max_duration_complete, + cdb->read_write_len_ext, 1000, 0, + CDB_F_COMPLETION_VALID | CDB_F_STATUS_VALID); + + err = ethtool_cmis_cdb_execute_cmd(dev, &args); + if (err < 0) + ethnl_module_fw_flash_ntf_err(dev, ntf_params, + "Complete FW download command failed", + args.err_msg); + + return err; +} + +static int +cmis_fw_update_download_image(struct ethtool_cmis_cdb *cdb, + struct ethtool_cmis_fw_update_params *fw_update, + struct cmis_fw_update_fw_mng_features *fw_mng) +{ + int err; + + err = cmis_fw_update_start_download(cdb, fw_update, fw_mng); + if (err < 0) + return err; + + err = cmis_fw_update_write_image(cdb, fw_update, fw_mng); + if (err < 0) + return err; + + err = cmis_fw_update_complete_download(cdb, fw_update->dev, fw_mng, + &fw_update->ntf_params); + if (err < 0) + return err; + + return 0; +} + +enum { + CMIS_MODULE_LOW_PWR = 1, + CMIS_MODULE_READY = 3, +}; + +static bool module_is_ready(u8 data) +{ + u8 state = (data >> 1) & 7; + + return state == CMIS_MODULE_READY || state == CMIS_MODULE_LOW_PWR; +} + +#define CMIS_MODULE_READY_MAX_DURATION_MSEC 1000 +#define CMIS_MODULE_STATE_OFFSET 3 + +static int +cmis_fw_update_wait_for_module_state(struct net_device *dev, u8 flags) +{ + u8 state; + + return ethtool_cmis_wait_for_cond(dev, flags, CDB_F_MODULE_STATE_VALID, + CMIS_MODULE_READY_MAX_DURATION_MSEC, + CMIS_MODULE_STATE_OFFSET, + module_is_ready, NULL, &state); +} + +/* See section 9.7.10 "CMD 0109h: Run Firmware Image" in CMIS standard + * revision 5.2. + * struct cmis_cdb_run_fw_image_pl is a structured layout of the flat + * array, ethtool_cmis_cdb_request::payload. + */ +struct cmis_cdb_run_fw_image_pl { + u8 resv1; + u8 image_to_run; + u16 delay_to_reset; +}; + +static int +cmis_fw_update_run_image(struct ethtool_cmis_cdb *cdb, struct net_device *dev, + struct ethnl_module_fw_flash_ntf_params *ntf_params) +{ + struct ethtool_cmis_cdb_cmd_args args = {}; + struct cmis_cdb_run_fw_image_pl pl = {0}; + int err; + + ethtool_cmis_cdb_compose_args(&args, ETHTOOL_CMIS_CDB_CMD_RUN_FW_IMAGE, + (u8 *)&pl, sizeof(pl), + cdb->max_completion_time, + cdb->read_write_len_ext, 1000, 0, + CDB_F_MODULE_STATE_VALID); + + err = ethtool_cmis_cdb_execute_cmd(dev, &args); + if (err < 0) { + ethnl_module_fw_flash_ntf_err(dev, ntf_params, + "Run image command failed", + args.err_msg); + return err; + } + + err = cmis_fw_update_wait_for_module_state(dev, args.flags); + if (err < 0) + ethnl_module_fw_flash_ntf_err(dev, ntf_params, + "Module is not ready on time after reset", + NULL); + + return err; +} + +static int +cmis_fw_update_commit_image(struct ethtool_cmis_cdb *cdb, + struct net_device *dev, + struct ethnl_module_fw_flash_ntf_params *ntf_params) +{ + struct ethtool_cmis_cdb_cmd_args args = {}; + int err; + + ethtool_cmis_cdb_compose_args(&args, + ETHTOOL_CMIS_CDB_CMD_COMMIT_FW_IMAGE, + NULL, 0, cdb->max_completion_time, + cdb->read_write_len_ext, 1000, 0, + CDB_F_COMPLETION_VALID | CDB_F_STATUS_VALID); + + err = ethtool_cmis_cdb_execute_cmd(dev, &args); + if (err < 0) + ethnl_module_fw_flash_ntf_err(dev, ntf_params, + "Commit image command failed", + args.err_msg); + + return err; +} + +static int cmis_fw_update_reset(struct net_device *dev) +{ + __u32 reset_data = ETH_RESET_PHY; + + return dev->ethtool_ops->reset(dev, &reset_data); +} + +void +ethtool_cmis_fw_update(struct ethtool_cmis_fw_update_params *fw_update) +{ + struct ethnl_module_fw_flash_ntf_params *ntf_params = + &fw_update->ntf_params; + struct cmis_fw_update_fw_mng_features fw_mng = {0}; + struct net_device *dev = fw_update->dev; + struct ethtool_cmis_cdb *cdb; + int err; + + cdb = ethtool_cmis_cdb_init(dev, &fw_update->params, ntf_params); + if (IS_ERR(cdb)) + goto err_send_ntf; + + ethnl_module_fw_flash_ntf_start(dev, ntf_params); + + err = cmis_fw_update_fw_mng_features_get(cdb, dev, &fw_mng, ntf_params); + if (err < 0) + goto err_cdb_fini; + + err = cmis_fw_update_download_image(cdb, fw_update, &fw_mng); + if (err < 0) + goto err_cdb_fini; + + err = cmis_fw_update_run_image(cdb, dev, ntf_params); + if (err < 0) + goto err_cdb_fini; + + /* The CDB command "Run Firmware Image" resets the firmware, so the new + * one might have different settings. + * Free the old CDB instance, and init a new one. + */ + ethtool_cmis_cdb_fini(cdb); + + cdb = ethtool_cmis_cdb_init(dev, &fw_update->params, ntf_params); + if (IS_ERR(cdb)) + goto err_send_ntf; + + err = cmis_fw_update_commit_image(cdb, dev, ntf_params); + if (err < 0) + goto err_cdb_fini; + + err = cmis_fw_update_reset(dev); + if (err < 0) + goto err_cdb_fini; + + ethnl_module_fw_flash_ntf_complete(dev, ntf_params); + ethtool_cmis_cdb_fini(cdb); + return; + +err_cdb_fini: + ethtool_cmis_cdb_fini(cdb); +err_send_ntf: + ethnl_module_fw_flash_ntf_err(dev, ntf_params, NULL, NULL); +} diff --git a/net/ethtool/coalesce.c b/net/ethtool/coalesce.c index 83112c1a71ae..3e18ca1ccc5e 100644 --- a/net/ethtool/coalesce.c +++ b/net/ethtool/coalesce.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0-only +#include #include "netlink.h" #include "common.h" @@ -82,6 +83,14 @@ static int coalesce_prepare_data(const struct ethnl_req_info *req_base, static int coalesce_reply_size(const struct ethnl_req_info *req_base, const struct ethnl_reply_data *reply_base) { + int modersz = nla_total_size(0) + /* _PROFILE_IRQ_MODERATION, nest */ + nla_total_size(sizeof(u32)) + /* _IRQ_MODERATION_USEC */ + nla_total_size(sizeof(u32)) + /* _IRQ_MODERATION_PKTS */ + nla_total_size(sizeof(u32)); /* _IRQ_MODERATION_COMPS */ + + int total_modersz = nla_total_size(0) + /* _{R,T}X_PROFILE, nest */ + modersz * NET_DIM_PARAMS_NUM_PROFILES; + return nla_total_size(sizeof(u32)) + /* _RX_USECS */ nla_total_size(sizeof(u32)) + /* _RX_MAX_FRAMES */ nla_total_size(sizeof(u32)) + /* _RX_USECS_IRQ */ @@ -108,7 +117,8 @@ static int coalesce_reply_size(const struct ethnl_req_info *req_base, nla_total_size(sizeof(u8)) + /* _USE_CQE_MODE_RX */ nla_total_size(sizeof(u32)) + /* _TX_AGGR_MAX_BYTES */ nla_total_size(sizeof(u32)) + /* _TX_AGGR_MAX_FRAMES */ - nla_total_size(sizeof(u32)); /* _TX_AGGR_TIME_USECS */ + nla_total_size(sizeof(u32)) + /* _TX_AGGR_TIME_USECS */ + total_modersz * 2; /* _{R,T}X_PROFILE */ } static bool coalesce_put_u32(struct sk_buff *skb, u16 attr_type, u32 val, @@ -127,6 +137,74 @@ static bool coalesce_put_bool(struct sk_buff *skb, u16 attr_type, u32 val, return nla_put_u8(skb, attr_type, !!val); } +/** + * coalesce_put_profile - fill reply with a nla nest with four child nla nests. + * @skb: socket buffer the message is stored in + * @attr_type: nest attr type ETHTOOL_A_COALESCE_*X_PROFILE + * @profile: data passed to userspace + * @coal_flags: modifiable parameters supported by the driver + * + * Put a dim profile nest attribute. Refer to ETHTOOL_A_PROFILE_IRQ_MODERATION. + * + * Return: 0 on success or a negative error code. + */ +static int coalesce_put_profile(struct sk_buff *skb, u16 attr_type, + const struct dim_cq_moder *profile, + u8 coal_flags) +{ + struct nlattr *profile_attr, *moder_attr; + int i, ret; + + if (!profile || !coal_flags) + return 0; + + profile_attr = nla_nest_start(skb, attr_type); + if (!profile_attr) + return -EMSGSIZE; + + for (i = 0; i < NET_DIM_PARAMS_NUM_PROFILES; i++) { + moder_attr = nla_nest_start(skb, + ETHTOOL_A_PROFILE_IRQ_MODERATION); + if (!moder_attr) { + ret = -EMSGSIZE; + goto cancel_profile; + } + + if (coal_flags & DIM_COALESCE_USEC) { + ret = nla_put_u32(skb, ETHTOOL_A_IRQ_MODERATION_USEC, + profile[i].usec); + if (ret) + goto cancel_moder; + } + + if (coal_flags & DIM_COALESCE_PKTS) { + ret = nla_put_u32(skb, ETHTOOL_A_IRQ_MODERATION_PKTS, + profile[i].pkts); + if (ret) + goto cancel_moder; + } + + if (coal_flags & DIM_COALESCE_COMPS) { + ret = nla_put_u32(skb, ETHTOOL_A_IRQ_MODERATION_COMPS, + profile[i].comps); + if (ret) + goto cancel_moder; + } + + nla_nest_end(skb, moder_attr); + } + + nla_nest_end(skb, profile_attr); + + return 0; + +cancel_moder: + nla_nest_cancel(skb, moder_attr); +cancel_profile: + nla_nest_cancel(skb, profile_attr); + return ret; +} + static int coalesce_fill_reply(struct sk_buff *skb, const struct ethnl_req_info *req_base, const struct ethnl_reply_data *reply_base) @@ -135,6 +213,8 @@ static int coalesce_fill_reply(struct sk_buff *skb, const struct kernel_ethtool_coalesce *kcoal = &data->kernel_coalesce; const struct ethtool_coalesce *coal = &data->coalesce; u32 supported = data->supported_params; + struct dim_irq_moder *moder; + int ret = 0; if (coalesce_put_u32(skb, ETHTOOL_A_COALESCE_RX_USECS, coal->rx_coalesce_usecs, supported) || @@ -192,11 +272,42 @@ static int coalesce_fill_reply(struct sk_buff *skb, kcoal->tx_aggr_time_usecs, supported)) return -EMSGSIZE; - return 0; + if (!req_base->dev || !req_base->dev->irq_moder) + return 0; + + moder = req_base->dev->irq_moder; + rcu_read_lock(); + if (moder->profile_flags & DIM_PROFILE_RX) { + ret = coalesce_put_profile(skb, ETHTOOL_A_COALESCE_RX_PROFILE, + rcu_dereference(moder->rx_profile), + moder->coal_flags); + if (ret) + goto out; + } + + if (moder->profile_flags & DIM_PROFILE_TX) + ret = coalesce_put_profile(skb, ETHTOOL_A_COALESCE_TX_PROFILE, + rcu_dereference(moder->tx_profile), + moder->coal_flags); + +out: + rcu_read_unlock(); + return ret; } /* COALESCE_SET */ +static const struct nla_policy coalesce_irq_moderation_policy[] = { + [ETHTOOL_A_IRQ_MODERATION_USEC] = { .type = NLA_U32 }, + [ETHTOOL_A_IRQ_MODERATION_PKTS] = { .type = NLA_U32 }, + [ETHTOOL_A_IRQ_MODERATION_COMPS] = { .type = NLA_U32 }, +}; + +static const struct nla_policy coalesce_profile_policy[] = { + [ETHTOOL_A_PROFILE_IRQ_MODERATION] = + NLA_POLICY_NESTED(coalesce_irq_moderation_policy), +}; + const struct nla_policy ethnl_coalesce_set_policy[] = { [ETHTOOL_A_COALESCE_HEADER] = NLA_POLICY_NESTED(ethnl_header_policy), @@ -227,6 +338,10 @@ const struct nla_policy ethnl_coalesce_set_policy[] = { [ETHTOOL_A_COALESCE_TX_AGGR_MAX_BYTES] = { .type = NLA_U32 }, [ETHTOOL_A_COALESCE_TX_AGGR_MAX_FRAMES] = { .type = NLA_U32 }, [ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS] = { .type = NLA_U32 }, + [ETHTOOL_A_COALESCE_RX_PROFILE] = + NLA_POLICY_NESTED(coalesce_profile_policy), + [ETHTOOL_A_COALESCE_TX_PROFILE] = + NLA_POLICY_NESTED(coalesce_profile_policy), }; static int @@ -234,6 +349,7 @@ ethnl_set_coalesce_validate(struct ethnl_req_info *req_info, struct genl_info *info) { const struct ethtool_ops *ops = req_info->dev->ethtool_ops; + struct dim_irq_moder *irq_moder = req_info->dev->irq_moder; struct nlattr **tb = info->attrs; u32 supported_params; u16 a; @@ -243,6 +359,12 @@ ethnl_set_coalesce_validate(struct ethnl_req_info *req_info, /* make sure that only supported parameters are present */ supported_params = ops->supported_coalesce_params; + if (irq_moder && irq_moder->profile_flags & DIM_PROFILE_RX) + supported_params |= ETHTOOL_COALESCE_RX_PROFILE; + + if (irq_moder && irq_moder->profile_flags & DIM_PROFILE_TX) + supported_params |= ETHTOOL_COALESCE_TX_PROFILE; + for (a = ETHTOOL_A_COALESCE_RX_USECS; a < __ETHTOOL_A_COALESCE_CNT; a++) if (tb[a] && !(supported_params & attr_to_mask(a))) { NL_SET_ERR_MSG_ATTR(info->extack, tb[a], @@ -253,6 +375,138 @@ ethnl_set_coalesce_validate(struct ethnl_req_info *req_info, return 1; } +/** + * ethnl_update_irq_moder - update a specific field in the given profile + * @irq_moder: place that collects dim related information + * @irq_field: field in profile to modify + * @attr_type: attr type ETHTOOL_A_IRQ_MODERATION_* + * @tb: netlink attribute with new values or null + * @coal_bit: DIM_COALESCE_* bit from coal_flags + * @mod: pointer to bool for modification tracking + * @extack: netlink extended ack + * + * Return: 0 on success or a negative error code. + */ +static int ethnl_update_irq_moder(struct dim_irq_moder *irq_moder, + u16 *irq_field, u16 attr_type, + struct nlattr **tb, + u8 coal_bit, bool *mod, + struct netlink_ext_ack *extack) +{ + int ret = 0; + u32 val; + + if (!tb[attr_type]) + return 0; + + if (irq_moder->coal_flags & coal_bit) { + val = nla_get_u32(tb[attr_type]); + if (*irq_field == val) + return 0; + + *irq_field = val; + *mod = true; + } else { + NL_SET_BAD_ATTR(extack, tb[attr_type]); + ret = -EOPNOTSUPP; + } + + return ret; +} + +/** + * ethnl_update_profile - get a profile nest with child nests from userspace. + * @dev: netdevice to update the profile + * @dst: profile get from the driver and modified by ethnl_update_profile. + * @nests: nest attr ETHTOOL_A_COALESCE_*X_PROFILE to set profile. + * @mod: pointer to bool for modification tracking + * @extack: Netlink extended ack + * + * Layout of nests: + * Nested ETHTOOL_A_COALESCE_*X_PROFILE attr + * Nested ETHTOOL_A_PROFILE_IRQ_MODERATION attr + * ETHTOOL_A_IRQ_MODERATION_USEC attr + * ETHTOOL_A_IRQ_MODERATION_PKTS attr + * ETHTOOL_A_IRQ_MODERATION_COMPS attr + * ... + * Nested ETHTOOL_A_PROFILE_IRQ_MODERATION attr + * ETHTOOL_A_IRQ_MODERATION_USEC attr + * ETHTOOL_A_IRQ_MODERATION_PKTS attr + * ETHTOOL_A_IRQ_MODERATION_COMPS attr + * + * Return: 0 on success or a negative error code. + */ +static int ethnl_update_profile(struct net_device *dev, + struct dim_cq_moder __rcu **dst, + const struct nlattr *nests, + bool *mod, + struct netlink_ext_ack *extack) +{ + int len_irq_moder = ARRAY_SIZE(coalesce_irq_moderation_policy); + struct nlattr *tb[ARRAY_SIZE(coalesce_irq_moderation_policy)]; + struct dim_irq_moder *irq_moder = dev->irq_moder; + struct dim_cq_moder *new_profile, *old_profile; + int ret, rem, i = 0, len; + struct nlattr *nest; + + if (!nests) + return 0; + + if (!*dst) + return -EOPNOTSUPP; + + old_profile = rtnl_dereference(*dst); + len = NET_DIM_PARAMS_NUM_PROFILES * sizeof(*old_profile); + new_profile = kmemdup(old_profile, len, GFP_KERNEL); + if (!new_profile) + return -ENOMEM; + + nla_for_each_nested_type(nest, ETHTOOL_A_PROFILE_IRQ_MODERATION, + nests, rem) { + ret = nla_parse_nested(tb, len_irq_moder - 1, nest, + coalesce_irq_moderation_policy, + extack); + if (ret) + goto err_out; + + ret = ethnl_update_irq_moder(irq_moder, &new_profile[i].usec, + ETHTOOL_A_IRQ_MODERATION_USEC, + tb, DIM_COALESCE_USEC, + mod, extack); + if (ret) + goto err_out; + + ret = ethnl_update_irq_moder(irq_moder, &new_profile[i].pkts, + ETHTOOL_A_IRQ_MODERATION_PKTS, + tb, DIM_COALESCE_PKTS, + mod, extack); + if (ret) + goto err_out; + + ret = ethnl_update_irq_moder(irq_moder, &new_profile[i].comps, + ETHTOOL_A_IRQ_MODERATION_COMPS, + tb, DIM_COALESCE_COMPS, + mod, extack); + if (ret) + goto err_out; + + i++; + } + + /* After the profile is modified, dim itself is a dynamic + * mechanism and will quickly fit to the appropriate + * coalescing parameters according to the new profile. + */ + rcu_assign_pointer(*dst, new_profile); + kfree_rcu(old_profile, rcu); + + return 0; + +err_out: + kfree(new_profile); + return ret; +} + static int __ethnl_set_coalesce(struct ethnl_req_info *req_info, struct genl_info *info, bool *dual_change) @@ -317,6 +571,22 @@ __ethnl_set_coalesce(struct ethnl_req_info *req_info, struct genl_info *info, ethnl_update_u32(&kernel_coalesce.tx_aggr_time_usecs, tb[ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS], &mod); + if (dev->irq_moder && dev->irq_moder->profile_flags & DIM_PROFILE_RX) { + ret = ethnl_update_profile(dev, &dev->irq_moder->rx_profile, + tb[ETHTOOL_A_COALESCE_RX_PROFILE], + &mod, info->extack); + if (ret < 0) + return ret; + } + + if (dev->irq_moder && dev->irq_moder->profile_flags & DIM_PROFILE_TX) { + ret = ethnl_update_profile(dev, &dev->irq_moder->tx_profile, + tb[ETHTOOL_A_COALESCE_TX_PROFILE], + &mod, info->extack); + if (ret < 0) + return ret; + } + /* Update operation modes */ ethnl_update_bool32(&coalesce.use_adaptive_rx_coalesce, tb[ETHTOOL_A_COALESCE_USE_ADAPTIVE_RX], &mod_mode); diff --git a/net/ethtool/common.c b/net/ethtool/common.c index 6b2a360dcdf0..07032babd1b6 100644 --- a/net/ethtool/common.c +++ b/net/ethtool/common.c @@ -211,6 +211,7 @@ const char link_mode_names[][ETH_GSTRING_LEN] = { __DEFINE_LINK_MODE_NAME(10, T1S, Full), __DEFINE_LINK_MODE_NAME(10, T1S, Half), __DEFINE_LINK_MODE_NAME(10, T1S_P2MP, Half), + __DEFINE_LINK_MODE_NAME(10, T1BRR, Full), }; static_assert(ARRAY_SIZE(link_mode_names) == __ETHTOOL_LINK_MODE_MASK_NBITS); @@ -251,6 +252,7 @@ static_assert(ARRAY_SIZE(link_mode_names) == __ETHTOOL_LINK_MODE_MASK_NBITS); #define __LINK_MODE_LANES_T1S_P2MP 1 #define __LINK_MODE_LANES_VR8 8 #define __LINK_MODE_LANES_DR8_2 8 +#define __LINK_MODE_LANES_T1BRR 1 #define __DEFINE_LINK_MODE_PARAMS(_speed, _type, _duplex) \ [ETHTOOL_LINK_MODE(_speed, _type, _duplex)] = { \ @@ -374,6 +376,7 @@ const struct link_mode_info link_mode_params[] = { __DEFINE_LINK_MODE_PARAMS(10, T1S, Full), __DEFINE_LINK_MODE_PARAMS(10, T1S, Half), __DEFINE_LINK_MODE_PARAMS(10, T1S_P2MP, Half), + __DEFINE_LINK_MODE_PARAMS(10, T1BRR, Full), }; static_assert(ARRAY_SIZE(link_mode_params) == __ETHTOOL_LINK_MODE_MASK_NBITS); @@ -587,35 +590,64 @@ err_free_info: return err; } -int ethtool_get_max_rxfh_channel(struct net_device *dev, u32 *max) +static u32 ethtool_get_max_rss_ctx_channel(struct net_device *dev) +{ + struct ethtool_rxfh_context *ctx; + unsigned long context; + u32 max_ring = 0; + + mutex_lock(&dev->ethtool->rss_lock); + xa_for_each(&dev->ethtool->rss_ctx, context, ctx) { + u32 i, *tbl; + + tbl = ethtool_rxfh_context_indir(ctx); + for (i = 0; i < ctx->indir_size; i++) + max_ring = max(max_ring, tbl[i]); + } + mutex_unlock(&dev->ethtool->rss_lock); + + return max_ring; +} + +u32 ethtool_get_max_rxfh_channel(struct net_device *dev) { struct ethtool_rxfh_param rxfh = {}; - u32 dev_size, current_max = 0; + u32 dev_size, current_max; int ret; + /* While we do track whether RSS context has an indirection + * table explicitly set by the user, no driver looks at that bit. + * Assume drivers won't auto-regenerate the additional tables, + * to be safe. + */ + current_max = ethtool_get_max_rss_ctx_channel(dev); + + if (!netif_is_rxfh_configured(dev)) + return current_max; + if (!dev->ethtool_ops->get_rxfh_indir_size || !dev->ethtool_ops->get_rxfh) - return -EOPNOTSUPP; + return current_max; dev_size = dev->ethtool_ops->get_rxfh_indir_size(dev); if (dev_size == 0) - return -EOPNOTSUPP; + return current_max; rxfh.indir = kcalloc(dev_size, sizeof(rxfh.indir[0]), GFP_USER); if (!rxfh.indir) - return -ENOMEM; + return U32_MAX; ret = dev->ethtool_ops->get_rxfh(dev, &rxfh); - if (ret) - goto out; + if (ret) { + current_max = U32_MAX; + goto out_free; + } while (dev_size--) current_max = max(current_max, rxfh.indir[dev_size]); - *max = current_max; - -out: +out_free: kfree(rxfh.indir); - return ret; + return current_max; } int ethtool_check_ops(const struct ethtool_ops *ops) @@ -629,7 +661,7 @@ int ethtool_check_ops(const struct ethtool_ops *ops) return 0; } -int __ethtool_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) +int __ethtool_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info) { const struct ethtool_ops *ops = dev->ethtool_ops; struct phy_device *phydev = dev->phydev; @@ -637,7 +669,7 @@ int __ethtool_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) memset(info, 0, sizeof(*info)); info->cmd = ETHTOOL_GET_TS_INFO; - if (phy_has_tsinfo(phydev)) + if (phy_is_default_hwtstamp(phydev) && phy_has_tsinfo(phydev)) return phy_ts_info(phydev, info); if (ops->get_ts_info) return ops->get_ts_info(dev, info); @@ -651,7 +683,7 @@ int __ethtool_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) int ethtool_get_phc_vclocks(struct net_device *dev, int **vclock_index) { - struct ethtool_ts_info info = { }; + struct kernel_ethtool_ts_info info = { }; int num = 0; if (!__ethtool_get_ts_info(dev, &info)) @@ -661,7 +693,7 @@ int ethtool_get_phc_vclocks(struct net_device *dev, int **vclock_index) } EXPORT_SYMBOL(ethtool_get_phc_vclocks); -int ethtool_get_ts_info_by_layer(struct net_device *dev, struct ethtool_ts_info *info) +int ethtool_get_ts_info_by_layer(struct net_device *dev, struct kernel_ethtool_ts_info *info) { return __ethtool_get_ts_info(dev, info); } @@ -712,3 +744,17 @@ ethtool_forced_speed_maps_init(struct ethtool_forced_speed_map *maps, u32 size) } } EXPORT_SYMBOL_GPL(ethtool_forced_speed_maps_init); + +void ethtool_rxfh_context_lost(struct net_device *dev, u32 context_id) +{ + struct ethtool_rxfh_context *ctx; + + WARN_ONCE(!rtnl_is_locked() && + !lockdep_is_held_type(&dev->ethtool->rss_lock, -1), + "RSS context lock assertion failed\n"); + + netdev_err(dev, "device error, RSS context %d lost\n", context_id); + ctx = xa_erase(&dev->ethtool->rss_ctx, context_id); + kfree(ctx); +} +EXPORT_SYMBOL(ethtool_rxfh_context_lost); diff --git a/net/ethtool/common.h b/net/ethtool/common.h index 28b8aaaf9bcb..863806fcf01a 100644 --- a/net/ethtool/common.h +++ b/net/ethtool/common.h @@ -42,9 +42,9 @@ int __ethtool_get_link(struct net_device *dev); bool convert_legacy_settings_to_link_ksettings( struct ethtool_link_ksettings *link_ksettings, const struct ethtool_cmd *legacy_settings); -int ethtool_get_max_rxfh_channel(struct net_device *dev, u32 *max); +u32 ethtool_get_max_rxfh_channel(struct net_device *dev); int ethtool_get_max_rxnfc_channel(struct net_device *dev, u64 *max); -int __ethtool_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info); +int __ethtool_get_ts_info(struct net_device *dev, struct kernel_ethtool_ts_info *info); extern const struct ethtool_phy_ops *ethtool_phy_ops; extern const struct ethtool_pse_ops *ethtool_pse_ops; diff --git a/net/ethtool/eeprom.c b/net/ethtool/eeprom.c index 6209c3a9c8f7..3b8209e930fd 100644 --- a/net/ethtool/eeprom.c +++ b/net/ethtool/eeprom.c @@ -91,6 +91,12 @@ static int get_module_eeprom_by_page(struct net_device *dev, { const struct ethtool_ops *ops = dev->ethtool_ops; + if (dev->ethtool->module_fw_flash_in_progress) { + NL_SET_ERR_MSG(extack, + "Module firmware flashing is in progress"); + return -EBUSY; + } + if (dev->sfp_bus) return sfp_get_module_eeprom_by_page(dev->sfp_bus, page_data, extack); diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c index 223dcd25d88a..983fee76f5cf 100644 --- a/net/ethtool/ioctl.c +++ b/net/ethtool/ioctl.c @@ -65,7 +65,8 @@ u32 ethtool_op_get_link(struct net_device *dev) } EXPORT_SYMBOL(ethtool_op_get_link); -int ethtool_op_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) +int ethtool_op_get_ts_info(struct net_device *dev, + struct kernel_ethtool_ts_info *info) { info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | @@ -658,6 +659,9 @@ static int ethtool_get_settings(struct net_device *dev, void __user *useraddr) if (!dev->ethtool_ops->get_link_ksettings) return -EOPNOTSUPP; + if (dev->ethtool->module_fw_flash_in_progress) + return -EBUSY; + memset(&link_ksettings, 0, sizeof(link_ksettings)); err = dev->ethtool_ops->get_link_ksettings(dev, &link_ksettings); if (err < 0) @@ -1199,6 +1203,7 @@ static noinline_for_stack int ethtool_get_rxfh(struct net_device *dev, const struct ethtool_ops *ops = dev->ethtool_ops; struct ethtool_rxfh_param rxfh_dev = {}; u32 user_indir_size, user_key_size; + struct ethtool_rxfh_context *ctx; struct ethtool_rxfh rxfh; u32 indir_bytes; u8 *rss_config; @@ -1246,11 +1251,26 @@ static noinline_for_stack int ethtool_get_rxfh(struct net_device *dev, if (user_key_size) rxfh_dev.key = rss_config + indir_bytes; - rxfh_dev.rss_context = rxfh.rss_context; - - ret = dev->ethtool_ops->get_rxfh(dev, &rxfh_dev); - if (ret) - goto out; + if (rxfh.rss_context) { + ctx = xa_load(&dev->ethtool->rss_ctx, rxfh.rss_context); + if (!ctx) { + ret = -ENOENT; + goto out; + } + if (rxfh_dev.indir) + memcpy(rxfh_dev.indir, ethtool_rxfh_context_indir(ctx), + indir_bytes); + if (rxfh_dev.key) + memcpy(rxfh_dev.key, ethtool_rxfh_context_key(ctx), + user_key_size); + rxfh_dev.hfunc = ctx->hfunc; + rxfh_dev.input_xfrm = ctx->input_xfrm; + ret = 0; + } else { + ret = dev->ethtool_ops->get_rxfh(dev, &rxfh_dev); + if (ret) + goto out; + } if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh, hfunc), &rxfh_dev.hfunc, sizeof(rxfh.hfunc))) { @@ -1271,6 +1291,40 @@ out: return ret; } +static struct ethtool_rxfh_context * +ethtool_rxfh_ctx_alloc(const struct ethtool_ops *ops, + u32 indir_size, u32 key_size) +{ + size_t indir_bytes, flex_len, key_off, size; + struct ethtool_rxfh_context *ctx; + u32 priv_bytes, indir_max; + u16 key_max; + + key_max = max(key_size, ops->rxfh_key_space); + indir_max = max(indir_size, ops->rxfh_indir_space); + + priv_bytes = ALIGN(ops->rxfh_priv_size, sizeof(u32)); + indir_bytes = array_size(indir_max, sizeof(u32)); + + key_off = size_add(priv_bytes, indir_bytes); + flex_len = size_add(key_off, key_max); + size = struct_size_t(struct ethtool_rxfh_context, data, flex_len); + + ctx = kzalloc(size, GFP_KERNEL_ACCOUNT); + if (!ctx) + return NULL; + + ctx->indir_size = indir_size; + ctx->key_size = key_size; + ctx->key_off = key_off; + ctx->priv_size = ops->rxfh_priv_size; + + ctx->hfunc = ETH_RSS_HASH_NO_CHANGE; + ctx->input_xfrm = RXH_XFRM_NO_CHANGE; + + return ctx; +} + static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev, void __user *useraddr) { @@ -1278,10 +1332,13 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev, const struct ethtool_ops *ops = dev->ethtool_ops; u32 dev_indir_size = 0, dev_key_size = 0, i; struct ethtool_rxfh_param rxfh_dev = {}; + struct ethtool_rxfh_context *ctx = NULL; struct netlink_ext_ack *extack = NULL; struct ethtool_rxnfc rx_rings; struct ethtool_rxfh rxfh; + bool locked = false; /* dev->ethtool->rss_lock taken */ u32 indir_bytes = 0; + bool create = false; u8 *rss_config; int ret; @@ -1310,6 +1367,7 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev, (rxfh.input_xfrm & RXH_XFRM_SYM_XOR) && !ops->cap_rss_sym_xor_supported) return -EOPNOTSUPP; + create = rxfh.rss_context == ETH_RXFH_CONTEXT_ALLOC; /* If either indir, hash key or function is valid, proceed further. * Must request at least one change: indir size, hash key, function @@ -1375,13 +1433,69 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev, } } + if (rxfh.rss_context) { + mutex_lock(&dev->ethtool->rss_lock); + locked = true; + } + if (create) { + if (rxfh_dev.rss_delete) { + ret = -EINVAL; + goto out; + } + ctx = ethtool_rxfh_ctx_alloc(ops, dev_indir_size, dev_key_size); + if (!ctx) { + ret = -ENOMEM; + goto out; + } + + if (ops->create_rxfh_context) { + u32 limit = ops->rxfh_max_context_id ?: U32_MAX; + u32 ctx_id; + + /* driver uses new API, core allocates ID */ + ret = xa_alloc(&dev->ethtool->rss_ctx, &ctx_id, ctx, + XA_LIMIT(1, limit), GFP_KERNEL_ACCOUNT); + if (ret < 0) { + kfree(ctx); + goto out; + } + WARN_ON(!ctx_id); /* can't happen */ + rxfh.rss_context = ctx_id; + } + } else if (rxfh.rss_context) { + ctx = xa_load(&dev->ethtool->rss_ctx, rxfh.rss_context); + if (!ctx) { + ret = -ENOENT; + goto out; + } + } rxfh_dev.hfunc = rxfh.hfunc; rxfh_dev.rss_context = rxfh.rss_context; rxfh_dev.input_xfrm = rxfh.input_xfrm; - ret = ops->set_rxfh(dev, &rxfh_dev, extack); - if (ret) + if (rxfh.rss_context && ops->create_rxfh_context) { + if (create) + ret = ops->create_rxfh_context(dev, ctx, &rxfh_dev, + extack); + else if (rxfh_dev.rss_delete) + ret = ops->remove_rxfh_context(dev, ctx, + rxfh.rss_context, + extack); + else + ret = ops->modify_rxfh_context(dev, ctx, &rxfh_dev, + extack); + } else { + ret = ops->set_rxfh(dev, &rxfh_dev, extack); + } + if (ret) { + if (create) { + /* failed to create, free our new tracking entry */ + if (ops->create_rxfh_context) + xa_erase(&dev->ethtool->rss_ctx, rxfh.rss_context); + kfree(ctx); + } goto out; + } if (copy_to_user(useraddr + offsetof(struct ethtool_rxfh, rss_context), &rxfh_dev.rss_context, sizeof(rxfh_dev.rss_context))) @@ -1394,8 +1508,44 @@ static noinline_for_stack int ethtool_set_rxfh(struct net_device *dev, else if (rxfh.indir_size != ETH_RXFH_INDIR_NO_CHANGE) dev->priv_flags |= IFF_RXFH_CONFIGURED; } + /* Update rss_ctx tracking */ + if (create && !ops->create_rxfh_context) { + /* driver uses old API, it chose context ID */ + if (WARN_ON(xa_load(&dev->ethtool->rss_ctx, rxfh_dev.rss_context))) { + /* context ID reused, our tracking is screwed */ + kfree(ctx); + goto out; + } + /* Allocate the exact ID the driver gave us */ + if (xa_is_err(xa_store(&dev->ethtool->rss_ctx, rxfh_dev.rss_context, + ctx, GFP_KERNEL))) { + kfree(ctx); + goto out; + } + } + if (rxfh_dev.rss_delete) { + WARN_ON(xa_erase(&dev->ethtool->rss_ctx, rxfh.rss_context) != ctx); + kfree(ctx); + } else if (ctx) { + if (rxfh_dev.indir) { + for (i = 0; i < dev_indir_size; i++) + ethtool_rxfh_context_indir(ctx)[i] = rxfh_dev.indir[i]; + ctx->indir_configured = 1; + } + if (rxfh_dev.key) { + memcpy(ethtool_rxfh_context_key(ctx), rxfh_dev.key, + dev_key_size); + ctx->key_configured = 1; + } + if (rxfh_dev.hfunc != ETH_RSS_HASH_NO_CHANGE) + ctx->hfunc = rxfh_dev.hfunc; + if (rxfh_dev.input_xfrm != RXH_XFRM_NO_CHANGE) + ctx->input_xfrm = rxfh_dev.input_xfrm; + } out: + if (locked) + mutex_unlock(&dev->ethtool->rss_lock); kfree(rss_config); return ret; } @@ -1450,6 +1600,9 @@ static int ethtool_reset(struct net_device *dev, char __user *useraddr) if (!dev->ethtool_ops->reset) return -EOPNOTSUPP; + if (dev->ethtool->module_fw_flash_in_progress) + return -EBUSY; + if (copy_from_user(&reset, useraddr, sizeof(reset))) return -EFAULT; @@ -1504,7 +1657,7 @@ static int ethtool_set_wol(struct net_device *dev, char __user *useraddr) if (ret) return ret; - dev->wol_enabled = !!wol.wolopts; + dev->ethtool->wol_enabled = !!wol.wolopts; ethtool_notify(dev, ETHTOOL_MSG_WOL_NTF, NULL); return 0; @@ -1924,9 +2077,7 @@ static noinline_for_stack int ethtool_set_channels(struct net_device *dev, * indirection table/rxnfc settings */ if (ethtool_get_max_rxnfc_channel(dev, &max_rxnfc_in_use)) max_rxnfc_in_use = 0; - if (!netif_is_rxfh_configured(dev) || - ethtool_get_max_rxfh_channel(dev, &max_rxfh_in_use)) - max_rxfh_in_use = 0; + max_rxfh_in_use = ethtool_get_max_rxfh_channel(dev); if (channels.combined_count + channels.rx_count <= max_t(u64, max_rxnfc_in_use, max_rxfh_in_use)) return -EINVAL; @@ -2444,13 +2595,20 @@ out: static int ethtool_get_ts_info(struct net_device *dev, void __user *useraddr) { - struct ethtool_ts_info info; + struct kernel_ethtool_ts_info kernel_info; + struct ethtool_ts_info info = {}; int err; - err = __ethtool_get_ts_info(dev, &info); + err = __ethtool_get_ts_info(dev, &kernel_info); if (err) return err; + info.cmd = kernel_info.cmd; + info.so_timestamping = kernel_info.so_timestamping; + info.phc_index = kernel_info.phc_index; + info.tx_types = kernel_info.tx_types; + info.rx_filters = kernel_info.rx_filters; + if (copy_to_user(useraddr, &info, sizeof(info))) return -EFAULT; @@ -2463,6 +2621,9 @@ int ethtool_get_module_info_call(struct net_device *dev, const struct ethtool_ops *ops = dev->ethtool_ops; struct phy_device *phydev = dev->phydev; + if (dev->ethtool->module_fw_flash_in_progress) + return -EBUSY; + if (dev->sfp_bus) return sfp_get_module_info(dev->sfp_bus, modinfo); @@ -2500,6 +2661,9 @@ int ethtool_get_module_eeprom_call(struct net_device *dev, const struct ethtool_ops *ops = dev->ethtool_ops; struct phy_device *phydev = dev->phydev; + if (dev->ethtool->module_fw_flash_in_progress) + return -EBUSY; + if (dev->sfp_bus) return sfp_get_module_eeprom(dev->sfp_bus, ee, data); diff --git a/net/ethtool/module.c b/net/ethtool/module.c index ceb575efc290..6988e07bdcd6 100644 --- a/net/ethtool/module.c +++ b/net/ethtool/module.c @@ -1,10 +1,14 @@ // SPDX-License-Identifier: GPL-2.0-only #include +#include +#include +#include #include "netlink.h" #include "common.h" #include "bitset.h" +#include "module_fw.h" struct module_req_info { struct ethnl_req_info base; @@ -33,6 +37,12 @@ static int module_get_power_mode(struct net_device *dev, if (!ops->get_module_power_mode) return 0; + if (dev->ethtool->module_fw_flash_in_progress) { + NL_SET_ERR_MSG(extack, + "Module firmware flashing is in progress"); + return -EBUSY; + } + return ops->get_module_power_mode(dev, &data->power, extack); } @@ -109,6 +119,12 @@ ethnl_set_module_validate(struct ethnl_req_info *req_info, if (!tb[ETHTOOL_A_MODULE_POWER_MODE_POLICY]) return 0; + if (req_info->dev->ethtool->module_fw_flash_in_progress) { + NL_SET_ERR_MSG(info->extack, + "Module firmware flashing is in progress"); + return -EBUSY; + } + if (!ops->get_module_power_mode || !ops->set_module_power_mode) { NL_SET_ERR_MSG_ATTR(info->extack, tb[ETHTOOL_A_MODULE_POWER_MODE_POLICY], @@ -158,3 +174,381 @@ const struct ethnl_request_ops ethnl_module_request_ops = { .set = ethnl_set_module, .set_ntf_cmd = ETHTOOL_MSG_MODULE_NTF, }; + +/* MODULE_FW_FLASH_ACT */ + +const struct nla_policy +ethnl_module_fw_flash_act_policy[ETHTOOL_A_MODULE_FW_FLASH_PASSWORD + 1] = { + [ETHTOOL_A_MODULE_FW_FLASH_HEADER] = + NLA_POLICY_NESTED(ethnl_header_policy), + [ETHTOOL_A_MODULE_FW_FLASH_FILE_NAME] = { .type = NLA_NUL_STRING }, + [ETHTOOL_A_MODULE_FW_FLASH_PASSWORD] = { .type = NLA_U32 }, +}; + +static LIST_HEAD(module_fw_flash_work_list); +static DEFINE_SPINLOCK(module_fw_flash_work_list_lock); + +static int +module_flash_fw_work_list_add(struct ethtool_module_fw_flash *module_fw, + struct genl_info *info) +{ + struct ethtool_module_fw_flash *work; + + /* First, check if already registered. */ + spin_lock(&module_fw_flash_work_list_lock); + list_for_each_entry(work, &module_fw_flash_work_list, list) { + if (work->fw_update.ntf_params.portid == info->snd_portid && + work->fw_update.dev == module_fw->fw_update.dev) { + spin_unlock(&module_fw_flash_work_list_lock); + return -EALREADY; + } + } + + list_add_tail(&module_fw->list, &module_fw_flash_work_list); + spin_unlock(&module_fw_flash_work_list_lock); + + return 0; +} + +static void module_flash_fw_work_list_del(struct list_head *list) +{ + spin_lock(&module_fw_flash_work_list_lock); + list_del(list); + spin_unlock(&module_fw_flash_work_list_lock); +} + +static void module_flash_fw_work(struct work_struct *work) +{ + struct ethtool_module_fw_flash *module_fw; + + module_fw = container_of(work, struct ethtool_module_fw_flash, work); + + ethtool_cmis_fw_update(&module_fw->fw_update); + + module_flash_fw_work_list_del(&module_fw->list); + module_fw->fw_update.dev->ethtool->module_fw_flash_in_progress = false; + netdev_put(module_fw->fw_update.dev, &module_fw->dev_tracker); + release_firmware(module_fw->fw_update.fw); + kfree(module_fw); +} + +#define MODULE_EEPROM_PHYS_ID_PAGE 0 +#define MODULE_EEPROM_PHYS_ID_I2C_ADDR 0x50 + +static int module_flash_fw_work_init(struct ethtool_module_fw_flash *module_fw, + struct net_device *dev, + struct netlink_ext_ack *extack) +{ + const struct ethtool_ops *ops = dev->ethtool_ops; + struct ethtool_module_eeprom page_data = {}; + u8 phys_id; + int err; + + /* Fetch the SFF-8024 Identifier Value. For all supported standards, it + * is located at I2C address 0x50, byte 0. See section 4.1 in SFF-8024, + * revision 4.9. + */ + page_data.page = MODULE_EEPROM_PHYS_ID_PAGE; + page_data.offset = SFP_PHYS_ID; + page_data.length = sizeof(phys_id); + page_data.i2c_address = MODULE_EEPROM_PHYS_ID_I2C_ADDR; + page_data.data = &phys_id; + + err = ops->get_module_eeprom_by_page(dev, &page_data, extack); + if (err < 0) + return err; + + switch (phys_id) { + case SFF8024_ID_QSFP_DD: + case SFF8024_ID_OSFP: + case SFF8024_ID_DSFP: + case SFF8024_ID_QSFP_PLUS_CMIS: + case SFF8024_ID_SFP_DD_CMIS: + case SFF8024_ID_SFP_PLUS_CMIS: + INIT_WORK(&module_fw->work, module_flash_fw_work); + break; + default: + NL_SET_ERR_MSG(extack, + "Module type does not support firmware flashing"); + return -EOPNOTSUPP; + } + + return 0; +} + +void ethnl_module_fw_flash_sock_destroy(struct ethnl_sock_priv *sk_priv) +{ + struct ethtool_module_fw_flash *work; + + spin_lock(&module_fw_flash_work_list_lock); + list_for_each_entry(work, &module_fw_flash_work_list, list) { + if (work->fw_update.dev == sk_priv->dev && + work->fw_update.ntf_params.portid == sk_priv->portid) { + work->fw_update.ntf_params.closed_sock = true; + break; + } + } + spin_unlock(&module_fw_flash_work_list_lock); +} + +static int +module_flash_fw_schedule(struct net_device *dev, const char *file_name, + struct ethtool_module_fw_flash_params *params, + struct sk_buff *skb, struct genl_info *info) +{ + struct ethtool_cmis_fw_update_params *fw_update; + struct ethtool_module_fw_flash *module_fw; + int err; + + module_fw = kzalloc(sizeof(*module_fw), GFP_KERNEL); + if (!module_fw) + return -ENOMEM; + + fw_update = &module_fw->fw_update; + fw_update->params = *params; + err = request_firmware_direct(&fw_update->fw, + file_name, &dev->dev); + if (err) { + NL_SET_ERR_MSG(info->extack, + "Failed to request module firmware image"); + goto err_free; + } + + err = module_flash_fw_work_init(module_fw, dev, info->extack); + if (err < 0) + goto err_release_firmware; + + dev->ethtool->module_fw_flash_in_progress = true; + netdev_hold(dev, &module_fw->dev_tracker, GFP_KERNEL); + fw_update->dev = dev; + fw_update->ntf_params.portid = info->snd_portid; + fw_update->ntf_params.seq = info->snd_seq; + fw_update->ntf_params.closed_sock = false; + + err = ethnl_sock_priv_set(skb, dev, fw_update->ntf_params.portid, + ETHTOOL_SOCK_TYPE_MODULE_FW_FLASH); + if (err < 0) + goto err_release_firmware; + + err = module_flash_fw_work_list_add(module_fw, info); + if (err < 0) + goto err_release_firmware; + + schedule_work(&module_fw->work); + + return 0; + +err_release_firmware: + release_firmware(fw_update->fw); +err_free: + kfree(module_fw); + return err; +} + +static int module_flash_fw(struct net_device *dev, struct nlattr **tb, + struct sk_buff *skb, struct genl_info *info) +{ + struct ethtool_module_fw_flash_params params = {}; + const char *file_name; + struct nlattr *attr; + + if (GENL_REQ_ATTR_CHECK(info, ETHTOOL_A_MODULE_FW_FLASH_FILE_NAME)) + return -EINVAL; + + file_name = nla_data(tb[ETHTOOL_A_MODULE_FW_FLASH_FILE_NAME]); + + attr = tb[ETHTOOL_A_MODULE_FW_FLASH_PASSWORD]; + if (attr) { + params.password = cpu_to_be32(nla_get_u32(attr)); + params.password_valid = true; + } + + return module_flash_fw_schedule(dev, file_name, ¶ms, skb, info); +} + +static int ethnl_module_fw_flash_validate(struct net_device *dev, + struct netlink_ext_ack *extack) +{ + struct devlink_port *devlink_port = dev->devlink_port; + const struct ethtool_ops *ops = dev->ethtool_ops; + + if (!ops->set_module_eeprom_by_page || + !ops->get_module_eeprom_by_page) { + NL_SET_ERR_MSG(extack, + "Flashing module firmware is not supported by this device"); + return -EOPNOTSUPP; + } + + if (!ops->reset) { + NL_SET_ERR_MSG(extack, + "Reset module is not supported by this device, so flashing is not permitted"); + return -EOPNOTSUPP; + } + + if (dev->ethtool->module_fw_flash_in_progress) { + NL_SET_ERR_MSG(extack, "Module firmware flashing already in progress"); + return -EBUSY; + } + + if (dev->flags & IFF_UP) { + NL_SET_ERR_MSG(extack, "Netdevice is up, so flashing is not permitted"); + return -EBUSY; + } + + if (devlink_port && devlink_port->attrs.split) { + NL_SET_ERR_MSG(extack, "Can't perform firmware flashing on a split port"); + return -EOPNOTSUPP; + } + + return 0; +} + +int ethnl_act_module_fw_flash(struct sk_buff *skb, struct genl_info *info) +{ + struct ethnl_req_info req_info = {}; + struct nlattr **tb = info->attrs; + struct net_device *dev; + int ret; + + ret = ethnl_parse_header_dev_get(&req_info, + tb[ETHTOOL_A_MODULE_FW_FLASH_HEADER], + genl_info_net(info), info->extack, + true); + if (ret < 0) + return ret; + dev = req_info.dev; + + rtnl_lock(); + ret = ethnl_ops_begin(dev); + if (ret < 0) + goto out_rtnl; + + ret = ethnl_module_fw_flash_validate(dev, info->extack); + if (ret < 0) + goto out_rtnl; + + ret = module_flash_fw(dev, tb, skb, info); + + ethnl_ops_complete(dev); + +out_rtnl: + rtnl_unlock(); + ethnl_parse_header_dev_put(&req_info); + return ret; +} + +/* MODULE_FW_FLASH_NTF */ + +static int +ethnl_module_fw_flash_ntf_put_err(struct sk_buff *skb, char *err_msg, + char *sub_err_msg) +{ + int err_msg_len, sub_err_msg_len, total_len; + struct nlattr *attr; + + if (!err_msg) + return 0; + + err_msg_len = strlen(err_msg); + total_len = err_msg_len + 2; /* For period and NUL. */ + + if (sub_err_msg) { + sub_err_msg_len = strlen(sub_err_msg); + total_len += sub_err_msg_len + 2; /* For ", ". */ + } + + attr = nla_reserve(skb, ETHTOOL_A_MODULE_FW_FLASH_STATUS_MSG, + total_len); + if (!attr) + return -ENOMEM; + + if (sub_err_msg) + sprintf(nla_data(attr), "%s, %s.", err_msg, sub_err_msg); + else + sprintf(nla_data(attr), "%s.", err_msg); + + return 0; +} + +static void +ethnl_module_fw_flash_ntf(struct net_device *dev, + enum ethtool_module_fw_flash_status status, + struct ethnl_module_fw_flash_ntf_params *ntf_params, + char *err_msg, char *sub_err_msg, + u64 done, u64 total) +{ + struct sk_buff *skb; + void *hdr; + int ret; + + if (ntf_params->closed_sock) + return; + + skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); + if (!skb) + return; + + hdr = ethnl_unicast_put(skb, ntf_params->portid, ++ntf_params->seq, + ETHTOOL_MSG_MODULE_FW_FLASH_NTF); + if (!hdr) + goto err_skb; + + ret = ethnl_fill_reply_header(skb, dev, + ETHTOOL_A_MODULE_FW_FLASH_HEADER); + if (ret < 0) + goto err_skb; + + if (nla_put_u32(skb, ETHTOOL_A_MODULE_FW_FLASH_STATUS, status)) + goto err_skb; + + ret = ethnl_module_fw_flash_ntf_put_err(skb, err_msg, sub_err_msg); + if (ret < 0) + goto err_skb; + + if (nla_put_uint(skb, ETHTOOL_A_MODULE_FW_FLASH_DONE, done)) + goto err_skb; + + if (nla_put_uint(skb, ETHTOOL_A_MODULE_FW_FLASH_TOTAL, total)) + goto err_skb; + + genlmsg_end(skb, hdr); + genlmsg_unicast(dev_net(dev), skb, ntf_params->portid); + return; + +err_skb: + nlmsg_free(skb); +} + +void ethnl_module_fw_flash_ntf_err(struct net_device *dev, + struct ethnl_module_fw_flash_ntf_params *params, + char *err_msg, char *sub_err_msg) +{ + ethnl_module_fw_flash_ntf(dev, ETHTOOL_MODULE_FW_FLASH_STATUS_ERROR, + params, err_msg, sub_err_msg, 0, 0); +} + +void +ethnl_module_fw_flash_ntf_start(struct net_device *dev, + struct ethnl_module_fw_flash_ntf_params *params) +{ + ethnl_module_fw_flash_ntf(dev, ETHTOOL_MODULE_FW_FLASH_STATUS_STARTED, + params, NULL, NULL, 0, 0); +} + +void +ethnl_module_fw_flash_ntf_complete(struct net_device *dev, + struct ethnl_module_fw_flash_ntf_params *params) +{ + ethnl_module_fw_flash_ntf(dev, ETHTOOL_MODULE_FW_FLASH_STATUS_COMPLETED, + params, NULL, NULL, 0, 0); +} + +void +ethnl_module_fw_flash_ntf_in_progress(struct net_device *dev, + struct ethnl_module_fw_flash_ntf_params *params, + u64 done, u64 total) +{ + ethnl_module_fw_flash_ntf(dev, + ETHTOOL_MODULE_FW_FLASH_STATUS_IN_PROGRESS, + params, NULL, NULL, done, total); +} diff --git a/net/ethtool/module_fw.h b/net/ethtool/module_fw.h new file mode 100644 index 000000000000..634543a12d0c --- /dev/null +++ b/net/ethtool/module_fw.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#include +#include "netlink.h" + +/** + * struct ethnl_module_fw_flash_ntf_params - module firmware flashing + * notifications parameters + * @portid: Netlink portid of sender. + * @seq: Sequence number of sender. + * @closed_sock: Indicates whether the socket was closed from user space. + */ +struct ethnl_module_fw_flash_ntf_params { + u32 portid; + u32 seq; + bool closed_sock; +}; + +/** + * struct ethtool_module_fw_flash_params - module firmware flashing parameters + * @password: Module password. Only valid when @pass_valid is set. + * @password_valid: Whether the module password is valid or not. + */ +struct ethtool_module_fw_flash_params { + __be32 password; + u8 password_valid:1; +}; + +/** + * struct ethtool_cmis_fw_update_params - CMIS firmware update specific + * parameters + * @dev: Pointer to the net_device to be flashed. + * @params: Module firmware flashing parameters. + * @ntf_params: Module firmware flashing notification parameters. + * @fw: Firmware to flash. + */ +struct ethtool_cmis_fw_update_params { + struct net_device *dev; + struct ethtool_module_fw_flash_params params; + struct ethnl_module_fw_flash_ntf_params ntf_params; + const struct firmware *fw; +}; + +/** + * struct ethtool_module_fw_flash - module firmware flashing + * @list: List node for &module_fw_flash_work_list. + * @dev_tracker: Refcount tracker for @dev. + * @work: The flashing firmware work. + * @fw_update: CMIS firmware update specific parameters. + */ +struct ethtool_module_fw_flash { + struct list_head list; + netdevice_tracker dev_tracker; + struct work_struct work; + struct ethtool_cmis_fw_update_params fw_update; +}; + +void ethnl_module_fw_flash_sock_destroy(struct ethnl_sock_priv *sk_priv); + +void +ethnl_module_fw_flash_ntf_err(struct net_device *dev, + struct ethnl_module_fw_flash_ntf_params *params, + char *err_msg, char *sub_err_msg); +void +ethnl_module_fw_flash_ntf_start(struct net_device *dev, + struct ethnl_module_fw_flash_ntf_params *params); +void +ethnl_module_fw_flash_ntf_complete(struct net_device *dev, + struct ethnl_module_fw_flash_ntf_params *params); +void +ethnl_module_fw_flash_ntf_in_progress(struct net_device *dev, + struct ethnl_module_fw_flash_ntf_params *params, + u64 done, u64 total); + +void ethtool_cmis_fw_update(struct ethtool_cmis_fw_update_params *params); diff --git a/net/ethtool/netlink.c b/net/ethtool/netlink.c index bd04f28d5cf4..cb1eea00e349 100644 --- a/net/ethtool/netlink.c +++ b/net/ethtool/netlink.c @@ -4,6 +4,7 @@ #include #include #include "netlink.h" +#include "module_fw.h" static struct genl_family ethtool_genl_family; @@ -30,6 +31,35 @@ const struct nla_policy ethnl_header_policy_stats[] = { ETHTOOL_FLAGS_STATS), }; +int ethnl_sock_priv_set(struct sk_buff *skb, struct net_device *dev, u32 portid, + enum ethnl_sock_type type) +{ + struct ethnl_sock_priv *sk_priv; + + sk_priv = genl_sk_priv_get(ðtool_genl_family, NETLINK_CB(skb).sk); + if (IS_ERR(sk_priv)) + return PTR_ERR(sk_priv); + + sk_priv->dev = dev; + sk_priv->portid = portid; + sk_priv->type = type; + + return 0; +} + +static void ethnl_sock_priv_destroy(void *priv) +{ + struct ethnl_sock_priv *sk_priv = priv; + + switch (sk_priv->type) { + case ETHTOOL_SOCK_TYPE_MODULE_FW_FLASH: + ethnl_module_fw_flash_sock_destroy(sk_priv); + break; + default: + break; + } +} + int ethnl_ops_begin(struct net_device *dev) { int ret; @@ -239,6 +269,11 @@ void *ethnl_bcastmsg_put(struct sk_buff *skb, u8 cmd) cmd); } +void *ethnl_unicast_put(struct sk_buff *skb, u32 portid, u32 seq, u8 cmd) +{ + return genlmsg_put(skb, portid, seq, ðtool_genl_family, 0, cmd); +} + int ethnl_multicast(struct sk_buff *skb, struct net_device *dev) { return genlmsg_multicast_netns(ðtool_genl_family, dev_net(dev), skb, @@ -760,10 +795,22 @@ static void ethnl_notify_features(struct netdev_notifier_info *info) static int ethnl_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { + struct netdev_notifier_info *info = ptr; + struct netlink_ext_ack *extack; + struct net_device *dev; + + dev = netdev_notifier_info_to_dev(info); + extack = netdev_notifier_info_to_extack(info); + switch (event) { case NETDEV_FEAT_CHANGE: ethnl_notify_features(ptr); break; + case NETDEV_PRE_UP: + if (dev->ethtool->module_fw_flash_in_progress) { + NL_SET_ERR_MSG(extack, "Can't set port up while flashing module firmware"); + return NOTIFY_BAD; + } } return NOTIFY_DONE; @@ -1125,6 +1172,13 @@ static const struct genl_ops ethtool_genl_ops[] = { .policy = ethnl_mm_set_policy, .maxattr = ARRAY_SIZE(ethnl_mm_set_policy) - 1, }, + { + .cmd = ETHTOOL_MSG_MODULE_FW_FLASH_ACT, + .flags = GENL_UNS_ADMIN_PERM, + .doit = ethnl_act_module_fw_flash, + .policy = ethnl_module_fw_flash_act_policy, + .maxattr = ARRAY_SIZE(ethnl_module_fw_flash_act_policy) - 1, + }, }; static const struct genl_multicast_group ethtool_nl_mcgrps[] = { @@ -1141,6 +1195,8 @@ static struct genl_family ethtool_genl_family __ro_after_init = { .resv_start_op = ETHTOOL_MSG_MODULE_GET + 1, .mcgrps = ethtool_nl_mcgrps, .n_mcgrps = ARRAY_SIZE(ethtool_nl_mcgrps), + .sock_priv_size = sizeof(struct ethnl_sock_priv), + .sock_priv_destroy = ethnl_sock_priv_destroy, }; /* module setup */ diff --git a/net/ethtool/netlink.h b/net/ethtool/netlink.h index 9a333a8d04c1..46ec273a87c5 100644 --- a/net/ethtool/netlink.h +++ b/net/ethtool/netlink.h @@ -21,6 +21,7 @@ struct sk_buff *ethnl_reply_init(size_t payload, struct net_device *dev, u8 cmd, void **ehdrp); void *ethnl_dump_put(struct sk_buff *skb, struct netlink_callback *cb, u8 cmd); void *ethnl_bcastmsg_put(struct sk_buff *skb, u8 cmd); +void *ethnl_unicast_put(struct sk_buff *skb, u32 portid, u32 seq, u8 cmd); int ethnl_multicast(struct sk_buff *skb, struct net_device *dev); /** @@ -283,6 +284,19 @@ struct ethnl_reply_data { int ethnl_ops_begin(struct net_device *dev); void ethnl_ops_complete(struct net_device *dev); +enum ethnl_sock_type { + ETHTOOL_SOCK_TYPE_MODULE_FW_FLASH, +}; + +struct ethnl_sock_priv { + struct net_device *dev; + u32 portid; + enum ethnl_sock_type type; +}; + +int ethnl_sock_priv_set(struct sk_buff *skb, struct net_device *dev, u32 portid, + enum ethnl_sock_type type); + /** * struct ethnl_request_ops - unified handling of GET and SET requests * @request_cmd: command id for request (GET) @@ -441,6 +455,7 @@ extern const struct nla_policy ethnl_plca_set_cfg_policy[ETHTOOL_A_PLCA_MAX + 1] extern const struct nla_policy ethnl_plca_get_status_policy[ETHTOOL_A_PLCA_HEADER + 1]; extern const struct nla_policy ethnl_mm_get_policy[ETHTOOL_A_MM_HEADER + 1]; extern const struct nla_policy ethnl_mm_set_policy[ETHTOOL_A_MM_MAX + 1]; +extern const struct nla_policy ethnl_module_fw_flash_act_policy[ETHTOOL_A_MODULE_FW_FLASH_PASSWORD + 1]; int ethnl_set_features(struct sk_buff *skb, struct genl_info *info); int ethnl_act_cable_test(struct sk_buff *skb, struct genl_info *info); @@ -448,6 +463,7 @@ int ethnl_act_cable_test_tdr(struct sk_buff *skb, struct genl_info *info); int ethnl_tunnel_info_doit(struct sk_buff *skb, struct genl_info *info); int ethnl_tunnel_info_start(struct netlink_callback *cb); int ethnl_tunnel_info_dumpit(struct sk_buff *skb, struct netlink_callback *cb); +int ethnl_act_module_fw_flash(struct sk_buff *skb, struct genl_info *info); extern const char stats_std_names[__ETHTOOL_STATS_CNT][ETH_GSTRING_LEN]; extern const char stats_eth_phy_names[__ETHTOOL_A_STATS_ETH_PHY_CNT][ETH_GSTRING_LEN]; diff --git a/net/ethtool/pse-pd.c b/net/ethtool/pse-pd.c index 2c981d443f27..ff81aa749784 100644 --- a/net/ethtool/pse-pd.c +++ b/net/ethtool/pse-pd.c @@ -86,10 +86,56 @@ static int pse_reply_size(const struct ethnl_req_info *req_base, len += nla_total_size(sizeof(u32)); /* _C33_PSE_ADMIN_STATE */ if (st->c33_pw_status > 0) len += nla_total_size(sizeof(u32)); /* _C33_PSE_PW_D_STATUS */ + if (st->c33_pw_class > 0) + len += nla_total_size(sizeof(u32)); /* _C33_PSE_PW_CLASS */ + if (st->c33_actual_pw > 0) + len += nla_total_size(sizeof(u32)); /* _C33_PSE_ACTUAL_PW */ + if (st->c33_ext_state_info.c33_pse_ext_state > 0) { + len += nla_total_size(sizeof(u32)); /* _C33_PSE_EXT_STATE */ + if (st->c33_ext_state_info.__c33_pse_ext_substate > 0) + /* _C33_PSE_EXT_SUBSTATE */ + len += nla_total_size(sizeof(u32)); + } + if (st->c33_avail_pw_limit > 0) + /* _C33_AVAIL_PSE_PW_LIMIT */ + len += nla_total_size(sizeof(u32)); + if (st->c33_pw_limit_nb_ranges > 0) + /* _C33_PSE_PW_LIMIT_RANGES */ + len += st->c33_pw_limit_nb_ranges * + (nla_total_size(0) + + nla_total_size(sizeof(u32)) * 2); return len; } +static int pse_put_pw_limit_ranges(struct sk_buff *skb, + const struct pse_control_status *st) +{ + const struct ethtool_c33_pse_pw_limit_range *pw_limit_ranges; + int i; + + pw_limit_ranges = st->c33_pw_limit_ranges; + for (i = 0; i < st->c33_pw_limit_nb_ranges; i++) { + struct nlattr *nest; + + nest = nla_nest_start(skb, ETHTOOL_A_C33_PSE_PW_LIMIT_RANGES); + if (!nest) + return -EMSGSIZE; + + if (nla_put_u32(skb, ETHTOOL_A_C33_PSE_PW_LIMIT_MIN, + pw_limit_ranges->min) || + nla_put_u32(skb, ETHTOOL_A_C33_PSE_PW_LIMIT_MAX, + pw_limit_ranges->max)) { + nla_nest_cancel(skb, nest); + return -EMSGSIZE; + } + nla_nest_end(skb, nest); + pw_limit_ranges++; + } + + return 0; +} + static int pse_fill_reply(struct sk_buff *skb, const struct ethnl_req_info *req_base, const struct ethnl_reply_data *reply_base) @@ -117,9 +163,46 @@ static int pse_fill_reply(struct sk_buff *skb, st->c33_pw_status)) return -EMSGSIZE; + if (st->c33_pw_class > 0 && + nla_put_u32(skb, ETHTOOL_A_C33_PSE_PW_CLASS, + st->c33_pw_class)) + return -EMSGSIZE; + + if (st->c33_actual_pw > 0 && + nla_put_u32(skb, ETHTOOL_A_C33_PSE_ACTUAL_PW, + st->c33_actual_pw)) + return -EMSGSIZE; + + if (st->c33_ext_state_info.c33_pse_ext_state > 0) { + if (nla_put_u32(skb, ETHTOOL_A_C33_PSE_EXT_STATE, + st->c33_ext_state_info.c33_pse_ext_state)) + return -EMSGSIZE; + + if (st->c33_ext_state_info.__c33_pse_ext_substate > 0 && + nla_put_u32(skb, ETHTOOL_A_C33_PSE_EXT_SUBSTATE, + st->c33_ext_state_info.__c33_pse_ext_substate)) + return -EMSGSIZE; + } + + if (st->c33_avail_pw_limit > 0 && + nla_put_u32(skb, ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT, + st->c33_avail_pw_limit)) + return -EMSGSIZE; + + if (st->c33_pw_limit_nb_ranges > 0 && + pse_put_pw_limit_ranges(skb, st)) + return -EMSGSIZE; + return 0; } +static void pse_cleanup_data(struct ethnl_reply_data *reply_base) +{ + const struct pse_reply_data *data = PSE_REPDATA(reply_base); + + kfree(data->status.c33_pw_limit_ranges); +} + /* PSE_SET */ const struct nla_policy ethnl_pse_set_policy[ETHTOOL_A_PSE_MAX + 1] = { @@ -130,6 +213,7 @@ const struct nla_policy ethnl_pse_set_policy[ETHTOOL_A_PSE_MAX + 1] = { [ETHTOOL_A_C33_PSE_ADMIN_CONTROL] = NLA_POLICY_RANGE(NLA_U32, ETHTOOL_C33_PSE_ADMIN_STATE_DISABLED, ETHTOOL_C33_PSE_ADMIN_STATE_ENABLED), + [ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT] = { .type = NLA_U32 }, }; static int @@ -172,19 +256,43 @@ static int ethnl_set_pse(struct ethnl_req_info *req_info, struct genl_info *info) { struct net_device *dev = req_info->dev; - struct pse_control_config config = {}; struct nlattr **tb = info->attrs; struct phy_device *phydev; + int ret = 0; phydev = dev->phydev; - /* These values are already validated by the ethnl_pse_set_policy */ - if (pse_has_podl(phydev->psec)) - config.podl_admin_control = nla_get_u32(tb[ETHTOOL_A_PODL_PSE_ADMIN_CONTROL]); - if (pse_has_c33(phydev->psec)) - config.c33_admin_control = nla_get_u32(tb[ETHTOOL_A_C33_PSE_ADMIN_CONTROL]); - /* Return errno directly - PSE has no notification */ - return pse_ethtool_set_config(phydev->psec, info->extack, &config); + if (tb[ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT]) { + unsigned int pw_limit; + + pw_limit = nla_get_u32(tb[ETHTOOL_A_C33_PSE_AVAIL_PW_LIMIT]); + ret = pse_ethtool_set_pw_limit(phydev->psec, info->extack, + pw_limit); + if (ret) + return ret; + } + + /* These values are already validated by the ethnl_pse_set_policy */ + if (tb[ETHTOOL_A_PODL_PSE_ADMIN_CONTROL] || + tb[ETHTOOL_A_C33_PSE_ADMIN_CONTROL]) { + struct pse_control_config config = {}; + + if (tb[ETHTOOL_A_PODL_PSE_ADMIN_CONTROL]) + config.podl_admin_control = nla_get_u32(tb[ETHTOOL_A_PODL_PSE_ADMIN_CONTROL]); + if (tb[ETHTOOL_A_C33_PSE_ADMIN_CONTROL]) + config.c33_admin_control = nla_get_u32(tb[ETHTOOL_A_C33_PSE_ADMIN_CONTROL]); + + /* pse_ethtool_set_config() will do nothing if the config + * is zero + */ + ret = pse_ethtool_set_config(phydev->psec, info->extack, + &config); + if (ret) + return ret; + } + + /* Return errno or zero - PSE has no notification */ + return ret; } const struct ethnl_request_ops ethnl_pse_request_ops = { @@ -197,6 +305,7 @@ const struct ethnl_request_ops ethnl_pse_request_ops = { .prepare_data = pse_prepare_data, .reply_size = pse_reply_size, .fill_reply = pse_fill_reply, + .cleanup_data = pse_cleanup_data, .set_validate = ethnl_set_pse_validate, .set = ethnl_set_pse, diff --git a/net/ethtool/tsinfo.c b/net/ethtool/tsinfo.c index 57d496287e52..03d12d6f79ca 100644 --- a/net/ethtool/tsinfo.c +++ b/net/ethtool/tsinfo.c @@ -12,7 +12,7 @@ struct tsinfo_req_info { struct tsinfo_reply_data { struct ethnl_reply_data base; - struct ethtool_ts_info ts_info; + struct kernel_ethtool_ts_info ts_info; struct ethtool_ts_stats stats; }; @@ -55,7 +55,7 @@ static int tsinfo_reply_size(const struct ethnl_req_info *req_base, { const struct tsinfo_reply_data *data = TSINFO_REPDATA(reply_base); bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS; - const struct ethtool_ts_info *ts_info = &data->ts_info; + const struct kernel_ethtool_ts_info *ts_info = &data->ts_info; int len = 0; int ret; @@ -136,7 +136,7 @@ static int tsinfo_fill_reply(struct sk_buff *skb, { const struct tsinfo_reply_data *data = TSINFO_REPDATA(reply_base); bool compact = req_base->flags & ETHTOOL_FLAG_COMPACT_BITSETS; - const struct ethtool_ts_info *ts_info = &data->ts_info; + const struct kernel_ethtool_ts_info *ts_info = &data->ts_info; int ret; if (ts_info->so_timestamping) { diff --git a/net/ethtool/wol.c b/net/ethtool/wol.c index 0ed56c9ac1bc..a39d8000d808 100644 --- a/net/ethtool/wol.c +++ b/net/ethtool/wol.c @@ -137,7 +137,7 @@ ethnl_set_wol(struct ethnl_req_info *req_info, struct genl_info *info) ret = dev->ethtool_ops->set_wol(dev, &wol); if (ret) return ret; - dev->wol_enabled = !!wol.wolopts; + dev->ethtool->wol_enabled = !!wol.wolopts; return 1; } diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c index e6904288d40d..e4cc6b78dcfc 100644 --- a/net/hsr/hsr_device.c +++ b/net/hsr/hsr_device.c @@ -73,9 +73,15 @@ static void hsr_check_announce(struct net_device *hsr_dev) mod_timer(&hsr->announce_timer, jiffies + msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL)); } + + if (hsr->redbox && !timer_pending(&hsr->announce_proxy_timer)) + mod_timer(&hsr->announce_proxy_timer, jiffies + + msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL) / 2); } else { /* Deactivate the announce timer */ timer_delete(&hsr->announce_timer); + if (hsr->redbox) + timer_delete(&hsr->announce_proxy_timer); } } @@ -279,10 +285,11 @@ out: return NULL; } -static void send_hsr_supervision_frame(struct hsr_port *master, - unsigned long *interval) +static void send_hsr_supervision_frame(struct hsr_port *port, + unsigned long *interval, + const unsigned char *addr) { - struct hsr_priv *hsr = master->hsr; + struct hsr_priv *hsr = port->hsr; __u8 type = HSR_TLV_LIFE_CHECK; struct hsr_sup_payload *hsr_sp; struct hsr_sup_tlv *hsr_stlv; @@ -296,9 +303,9 @@ static void send_hsr_supervision_frame(struct hsr_port *master, hsr->announce_count++; } - skb = hsr_init_skb(master); + skb = hsr_init_skb(port); if (!skb) { - netdev_warn_once(master->dev, "HSR: Could not send supervision frame\n"); + netdev_warn_once(port->dev, "HSR: Could not send supervision frame\n"); return; } @@ -321,11 +328,12 @@ static void send_hsr_supervision_frame(struct hsr_port *master, hsr_stag->tlv.HSR_TLV_length = hsr->prot_version ? sizeof(struct hsr_sup_payload) : 12; - /* Payload: MacAddressA */ + /* Payload: MacAddressA / SAN MAC from ProxyNodeTable */ hsr_sp = skb_put(skb, sizeof(struct hsr_sup_payload)); - ether_addr_copy(hsr_sp->macaddress_A, master->dev->dev_addr); + ether_addr_copy(hsr_sp->macaddress_A, addr); - if (hsr->redbox) { + if (hsr->redbox && + hsr_is_node_in_db(&hsr->proxy_node_db, addr)) { hsr_stlv = skb_put(skb, sizeof(struct hsr_sup_tlv)); hsr_stlv->HSR_TLV_type = PRP_TLV_REDBOX_MAC; hsr_stlv->HSR_TLV_length = sizeof(struct hsr_sup_payload); @@ -340,13 +348,14 @@ static void send_hsr_supervision_frame(struct hsr_port *master, return; } - hsr_forward_skb(skb, master); + hsr_forward_skb(skb, port); spin_unlock_bh(&hsr->seqnr_lock); return; } static void send_prp_supervision_frame(struct hsr_port *master, - unsigned long *interval) + unsigned long *interval, + const unsigned char *addr) { struct hsr_priv *hsr = master->hsr; struct hsr_sup_payload *hsr_sp; @@ -396,7 +405,7 @@ static void hsr_announce(struct timer_list *t) rcu_read_lock(); master = hsr_port_get_hsr(hsr, HSR_PT_MASTER); - hsr->proto_ops->send_sv_frame(master, &interval); + hsr->proto_ops->send_sv_frame(master, &interval, master->dev->dev_addr); if (is_admin_up(master->dev)) mod_timer(&hsr->announce_timer, jiffies + interval); @@ -404,6 +413,37 @@ static void hsr_announce(struct timer_list *t) rcu_read_unlock(); } +/* Announce (supervision frame) timer function for RedBox + */ +static void hsr_proxy_announce(struct timer_list *t) +{ + struct hsr_priv *hsr = from_timer(hsr, t, announce_proxy_timer); + struct hsr_port *interlink; + unsigned long interval = 0; + struct hsr_node *node; + + rcu_read_lock(); + /* RedBOX sends supervisory frames to HSR network with MAC addresses + * of SAN nodes stored in ProxyNodeTable. + */ + interlink = hsr_port_get_hsr(hsr, HSR_PT_INTERLINK); + list_for_each_entry_rcu(node, &hsr->proxy_node_db, mac_list) { + if (hsr_addr_is_redbox(hsr, node->macaddress_A)) + continue; + hsr->proto_ops->send_sv_frame(interlink, &interval, + node->macaddress_A); + } + + if (is_admin_up(interlink->dev)) { + if (!interval) + interval = msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL); + + mod_timer(&hsr->announce_proxy_timer, jiffies + interval); + } + + rcu_read_unlock(); +} + void hsr_del_ports(struct hsr_priv *hsr) { struct hsr_port *port; @@ -590,6 +630,7 @@ int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2], timer_setup(&hsr->announce_timer, hsr_announce, 0); timer_setup(&hsr->prune_timer, hsr_prune_nodes, 0); timer_setup(&hsr->prune_proxy_timer, hsr_prune_proxy_nodes, 0); + timer_setup(&hsr->announce_proxy_timer, hsr_proxy_announce, 0); ether_addr_copy(hsr->sup_multicast_addr, def_multicast_addr); hsr->sup_multicast_addr[ETH_ALEN - 1] = multicast_spec; diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c index 05a61b8286ec..b38060246e62 100644 --- a/net/hsr/hsr_forward.c +++ b/net/hsr/hsr_forward.c @@ -117,6 +117,35 @@ static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb) return true; } +static bool is_proxy_supervision_frame(struct hsr_priv *hsr, + struct sk_buff *skb) +{ + struct hsr_sup_payload *payload; + struct ethhdr *eth_hdr; + u16 total_length = 0; + + eth_hdr = (struct ethhdr *)skb_mac_header(skb); + + /* Get the HSR protocol revision. */ + if (eth_hdr->h_proto == htons(ETH_P_HSR)) + total_length = sizeof(struct hsrv1_ethhdr_sp); + else + total_length = sizeof(struct hsrv0_ethhdr_sp); + + if (!pskb_may_pull(skb, total_length + sizeof(struct hsr_sup_payload))) + return false; + + skb_pull(skb, total_length); + payload = (struct hsr_sup_payload *)skb->data; + skb_push(skb, total_length); + + /* For RedBox (HSR-SAN) check if we have received the supervision + * frame with MAC addresses from own ProxyNodeTable. + */ + return hsr_is_node_in_db(&hsr->proxy_node_db, + payload->macaddress_A); +} + static struct sk_buff *create_stripped_skb_hsr(struct sk_buff *skb_in, struct hsr_frame_info *frame) { @@ -392,9 +421,9 @@ static int hsr_xmit(struct sk_buff *skb, struct hsr_port *port, bool prp_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port) { return ((frame->port_rcv->type == HSR_PT_SLAVE_A && - port->type == HSR_PT_SLAVE_B) || + port->type == HSR_PT_SLAVE_B) || (frame->port_rcv->type == HSR_PT_SLAVE_B && - port->type == HSR_PT_SLAVE_A)); + port->type == HSR_PT_SLAVE_A)); } bool hsr_drop_frame(struct hsr_frame_info *frame, struct hsr_port *port) @@ -499,7 +528,8 @@ static void hsr_forward_do(struct hsr_frame_info *frame) frame->sequence_nr)) continue; - if (frame->is_supervision && port->type == HSR_PT_MASTER) { + if (frame->is_supervision && port->type == HSR_PT_MASTER && + !frame->is_proxy_supervision) { hsr_handle_sup_frame(frame); continue; } @@ -637,6 +667,9 @@ static int fill_frame_info(struct hsr_frame_info *frame, memset(frame, 0, sizeof(*frame)); frame->is_supervision = is_supervision_frame(port->hsr, skb); + if (frame->is_supervision && hsr->redbox) + frame->is_proxy_supervision = + is_proxy_supervision_frame(port->hsr, skb); n_db = &hsr->node_db; if (port->type == HSR_PT_INTERLINK) @@ -688,7 +721,7 @@ void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port) /* Gets called for ingress frames as well as egress from master port. * So check and increment stats for master port only here. */ - if (port->type == HSR_PT_MASTER) { + if (port->type == HSR_PT_MASTER || port->type == HSR_PT_INTERLINK) { port->dev->stats.tx_packets++; port->dev->stats.tx_bytes += skb->len; } diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c index 614df9649794..73bc6f659812 100644 --- a/net/hsr/hsr_framereg.c +++ b/net/hsr/hsr_framereg.c @@ -36,6 +36,14 @@ static bool seq_nr_after(u16 a, u16 b) #define seq_nr_before(a, b) seq_nr_after((b), (a)) #define seq_nr_before_or_eq(a, b) (!seq_nr_after((a), (b))) +bool hsr_addr_is_redbox(struct hsr_priv *hsr, unsigned char *addr) +{ + if (!hsr->redbox || !is_valid_ether_addr(hsr->macaddress_redbox)) + return false; + + return ether_addr_equal(addr, hsr->macaddress_redbox); +} + bool hsr_addr_is_self(struct hsr_priv *hsr, unsigned char *addr) { struct hsr_self_node *sn; @@ -591,6 +599,10 @@ void hsr_prune_proxy_nodes(struct timer_list *t) spin_lock_bh(&hsr->list_lock); list_for_each_entry_safe(node, tmp, &hsr->proxy_node_db, mac_list) { + /* Don't prune RedBox node. */ + if (hsr_addr_is_redbox(hsr, node->macaddress_A)) + continue; + timestamp = node->time_in[HSR_PT_INTERLINK]; /* Prune old entries */ diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h index 7619e31c1d2d..993fa950d814 100644 --- a/net/hsr/hsr_framereg.h +++ b/net/hsr/hsr_framereg.h @@ -22,6 +22,7 @@ struct hsr_frame_info { struct hsr_node *node_src; u16 sequence_nr; bool is_supervision; + bool is_proxy_supervision; bool is_vlan; bool is_local_dest; bool is_local_exclusive; @@ -35,6 +36,7 @@ struct hsr_node *hsr_get_node(struct hsr_port *port, struct list_head *node_db, enum hsr_port_type rx_port); void hsr_handle_sup_frame(struct hsr_frame_info *frame); bool hsr_addr_is_self(struct hsr_priv *hsr, unsigned char *addr); +bool hsr_addr_is_redbox(struct hsr_priv *hsr, unsigned char *addr); void hsr_addr_subst_source(struct hsr_node *node, struct sk_buff *skb); void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb, diff --git a/net/hsr/hsr_main.h b/net/hsr/hsr_main.h index 23850b16d1ea..ab1f8d35d9dc 100644 --- a/net/hsr/hsr_main.h +++ b/net/hsr/hsr_main.h @@ -170,7 +170,8 @@ struct hsr_node; struct hsr_proto_ops { /* format and send supervision frame */ - void (*send_sv_frame)(struct hsr_port *port, unsigned long *interval); + void (*send_sv_frame)(struct hsr_port *port, unsigned long *interval, + const unsigned char addr[ETH_ALEN]); void (*handle_san_frame)(bool san, enum hsr_port_type port, struct hsr_node *node); bool (*drop_frame)(struct hsr_frame_info *frame, struct hsr_port *port); @@ -197,6 +198,7 @@ struct hsr_priv { struct list_head proxy_node_db; /* RedBox HSR proxy nodes */ struct hsr_self_node __rcu *self_node; /* MACs of slaves */ struct timer_list announce_timer; /* Supervision frame dispatch */ + struct timer_list announce_proxy_timer; struct timer_list prune_timer; struct timer_list prune_proxy_timer; int announce_count; diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c index 898f18c6da53..f6ff0b61e08a 100644 --- a/net/hsr/hsr_netlink.c +++ b/net/hsr/hsr_netlink.c @@ -131,6 +131,7 @@ static void hsr_dellink(struct net_device *dev, struct list_head *head) del_timer_sync(&hsr->prune_timer); del_timer_sync(&hsr->prune_proxy_timer); del_timer_sync(&hsr->announce_timer); + timer_delete_sync(&hsr->announce_proxy_timer); hsr_debugfs_term(hsr); hsr_del_ports(hsr); diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c index 56ef873828f4..867d637d86f0 100644 --- a/net/ieee802154/6lowpan/reassembly.c +++ b/net/ieee802154/6lowpan/reassembly.c @@ -130,7 +130,7 @@ static int lowpan_frag_queue(struct lowpan_frag_queue *fq, goto err; fq->q.stamp = skb->tstamp; - fq->q.mono_delivery_time = skb->mono_delivery_time; + fq->q.tstamp_type = skb->tstamp_type; if (frag_type == LOWPAN_DISPATCH_FRAG1) fq->q.flags |= INET_FRAG_FIRST_IN; diff --git a/net/ipv4/bpf_tcp_ca.c b/net/ipv4/bpf_tcp_ca.c index 18227757ec0c..3f88d0961e5b 100644 --- a/net/ipv4/bpf_tcp_ca.c +++ b/net/ipv4/bpf_tcp_ca.c @@ -260,17 +260,17 @@ static int bpf_tcp_ca_check_member(const struct btf_type *t, return 0; } -static int bpf_tcp_ca_reg(void *kdata) +static int bpf_tcp_ca_reg(void *kdata, struct bpf_link *link) { return tcp_register_congestion_control(kdata); } -static void bpf_tcp_ca_unreg(void *kdata) +static void bpf_tcp_ca_unreg(void *kdata, struct bpf_link *link) { tcp_unregister_congestion_control(kdata); } -static int bpf_tcp_ca_update(void *kdata, void *old_kdata) +static int bpf_tcp_ca_update(void *kdata, void *old_kdata, struct bpf_link *link) { return tcp_update_congestion_control(kdata, old_kdata); } diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c index e9cb27061c12..8cc0e2f4159d 100644 --- a/net/ipv4/cipso_ipv4.c +++ b/net/ipv4/cipso_ipv4.c @@ -1976,7 +1976,7 @@ int cipso_v4_req_setattr(struct request_sock *req, buf = NULL; req_inet = inet_rsk(req); - opt = xchg((__force struct ip_options_rcu **)&req_inet->ireq_opt, opt); + opt = unrcu_pointer(xchg(&req_inet->ireq_opt, RCU_INITIALIZER(opt))); if (opt) kfree_rcu(opt, rcu); diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 3968d3f98e08..47378ca41904 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -239,8 +239,7 @@ static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb) #else static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb) { - kfree_skb(skb); - + WARN_ON(1); return -EOPNOTSUPP; } #endif @@ -349,6 +348,7 @@ static struct ip_esp_hdr *esp_output_udp_encap(struct sk_buff *skb, { struct udphdr *uh; unsigned int len; + struct xfrm_offload *xo = xfrm_offload(skb); len = skb->len + esp->tailen - skb_transport_offset(skb); if (len + sizeof(struct iphdr) > IP_MAX_MTU) @@ -360,7 +360,12 @@ static struct ip_esp_hdr *esp_output_udp_encap(struct sk_buff *skb, uh->len = htons(len); uh->check = 0; - *skb_mac_header(skb) = IPPROTO_UDP; + /* For IPv4 ESP with UDP encapsulation, if xo is not null, the skb is in the crypto offload + * data path, which means that esp_output_udp_encap is called outside of the XFRM stack. + * In this case, the mac header doesn't point to the IPv4 protocol field, so don't set it. + */ + if (!xo || encap_type != UDP_ENCAP_ESPINUDP) + *skb_mac_header(skb) = IPPROTO_UDP; return (struct ip_esp_hdr *)(uh + 1); } diff --git a/net/ipv4/esp4_offload.c b/net/ipv4/esp4_offload.c index b3271957ad9a..80c4ea0e12f4 100644 --- a/net/ipv4/esp4_offload.c +++ b/net/ipv4/esp4_offload.c @@ -56,6 +56,13 @@ static struct sk_buff *esp4_gro_receive(struct list_head *head, x = xfrm_state_lookup(dev_net(skb->dev), skb->mark, (xfrm_address_t *)&ip_hdr(skb)->daddr, spi, IPPROTO_ESP, AF_INET); + + if (unlikely(x && x->dir && x->dir != XFRM_SA_DIR_IN)) { + /* non-offload path will record the error and audit log */ + xfrm_state_put(x); + x = NULL; + } + if (!x) goto out_reset; @@ -264,6 +271,7 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_ struct esp_info esp; bool hw_offload = true; __u32 seq; + int encap_type = 0; esp.inplace = true; @@ -296,8 +304,10 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_ esp.esph = ip_esp_hdr(skb); + if (x->encap) + encap_type = x->encap->encap_type; - if (!hw_offload || !skb_is_gso(skb)) { + if (!hw_offload || !skb_is_gso(skb) || (hw_offload && encap_type == UDP_ENCAP_ESPINUDP)) { esp.nfrags = esp_output_head(x, skb, &esp); if (esp.nfrags < 0) return esp.nfrags; @@ -324,6 +334,18 @@ static int esp_xmit(struct xfrm_state *x, struct sk_buff *skb, netdev_features_ esp.seqno = cpu_to_be64(seq + ((u64)xo->seq.hi << 32)); + if (hw_offload && encap_type == UDP_ENCAP_ESPINUDP) { + /* In the XFRM stack, the encapsulation protocol is set to iphdr->protocol by + * setting *skb_mac_header(skb) (see esp_output_udp_encap()) where skb->mac_header + * points to iphdr->protocol (see xfrm4_tunnel_encap_add()). + * However, in esp_xmit(), skb->mac_header doesn't point to iphdr->protocol. + * Therefore, the protocol field needs to be corrected. + */ + ip_hdr(skb)->protocol = IPPROTO_UDP; + + esph->seq_no = htonl(seq); + } + ip_hdr(skb)->tot_len = htons(skb->len); ip_send_check(ip_hdr(skb)); diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c index f669da98d11d..2b57cd2b96e2 100644 --- a/net/ipv4/fib_semantics.c +++ b/net/ipv4/fib_semantics.c @@ -1030,7 +1030,7 @@ bool fib_metrics_match(struct fib_config *cfg, struct fib_info *fi) bool ecn_ca = false; nla_strscpy(tmp, nla, sizeof(tmp)); - val = tcp_ca_get_key_by_name(fi->fib_net, tmp, &ecn_ca); + val = tcp_ca_get_key_by_name(tmp, &ecn_ca); } else { if (nla_len(nla) != sizeof(u32)) return false; @@ -1459,8 +1459,7 @@ struct fib_info *fib_create_info(struct fib_config *cfg, fi = kzalloc(struct_size(fi, fib_nh, nhs), GFP_KERNEL); if (!fi) goto failure; - fi->fib_metrics = ip_fib_metrics_init(fi->fib_net, cfg->fc_mx, - cfg->fc_mx_len, extack); + fi->fib_metrics = ip_fib_metrics_init(cfg->fc_mx, cfg->fc_mx_len, extack); if (IS_ERR(fi->fib_metrics)) { err = PTR_ERR(fi->fib_metrics); kfree(fi); @@ -2270,6 +2269,15 @@ void fib_select_path(struct net *net, struct fib_result *res, fib_select_default(fl4, res); check_saddr: - if (!fl4->saddr) - fl4->saddr = fib_result_prefsrc(net, res); + if (!fl4->saddr) { + struct net_device *l3mdev; + + l3mdev = dev_get_by_index_rcu(net, fl4->flowi4_l3mdev); + + if (!l3mdev || + l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) == l3mdev) + fl4->saddr = fib_result_prefsrc(net, res); + else + fl4->saddr = inet_select_addr(l3mdev, 0, RT_SCOPE_LINK); + } } diff --git a/net/ipv4/fou_core.c b/net/ipv4/fou_core.c index a8494f796dca..0abbc413e0fe 100644 --- a/net/ipv4/fou_core.c +++ b/net/ipv4/fou_core.c @@ -433,7 +433,7 @@ next_proto: offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; ops = rcu_dereference(offloads[proto]); - if (WARN_ON_ONCE(!ops || !ops->callbacks.gro_receive)) + if (!ops || !ops->callbacks.gro_receive) goto out; pp = call_gro_receive(ops->callbacks.gro_receive, head, skb); diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index d4f0eff8b20f..64d07b842e73 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -911,6 +911,64 @@ int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req) } EXPORT_SYMBOL(inet_rtx_syn_ack); +static struct request_sock * +reqsk_alloc_noprof(const struct request_sock_ops *ops, struct sock *sk_listener, + bool attach_listener) +{ + struct request_sock *req; + + req = kmem_cache_alloc_noprof(ops->slab, GFP_ATOMIC | __GFP_NOWARN); + if (!req) + return NULL; + req->rsk_listener = NULL; + if (attach_listener) { + if (unlikely(!refcount_inc_not_zero(&sk_listener->sk_refcnt))) { + kmem_cache_free(ops->slab, req); + return NULL; + } + req->rsk_listener = sk_listener; + } + req->rsk_ops = ops; + req_to_sk(req)->sk_prot = sk_listener->sk_prot; + sk_node_init(&req_to_sk(req)->sk_node); + sk_tx_queue_clear(req_to_sk(req)); + req->saved_syn = NULL; + req->syncookie = 0; + req->timeout = 0; + req->num_timeout = 0; + req->num_retrans = 0; + req->sk = NULL; + refcount_set(&req->rsk_refcnt, 0); + + return req; +} +#define reqsk_alloc(...) alloc_hooks(reqsk_alloc_noprof(__VA_ARGS__)) + +struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops, + struct sock *sk_listener, + bool attach_listener) +{ + struct request_sock *req = reqsk_alloc(ops, sk_listener, + attach_listener); + + if (req) { + struct inet_request_sock *ireq = inet_rsk(req); + + ireq->ireq_opt = NULL; +#if IS_ENABLED(CONFIG_IPV6) + ireq->pktopts = NULL; +#endif + atomic64_set(&ireq->ir_cookie, 0); + ireq->ireq_state = TCP_NEW_SYN_RECV; + write_pnet(&ireq->ireq_net, sock_net(sk_listener)); + ireq->ireq_family = sk_listener->sk_family; + req->timeout = TCP_TIMEOUT_INIT; + } + + return req; +} +EXPORT_SYMBOL(inet_reqsk_alloc); + static struct request_sock *inet_reqsk_clone(struct request_sock *req, struct sock *sk) { diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index faaec92a46ac..d179a2c84222 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c @@ -619,7 +619,7 @@ void inet_frag_reasm_finish(struct inet_frag_queue *q, struct sk_buff *head, skb_mark_not_on_list(head); head->prev = NULL; head->tstamp = q->stamp; - head->mono_delivery_time = q->mono_delivery_time; + head->tstamp_type = q->tstamp_type; if (sk) refcount_add(sum_truesize - head_truesize, &sk->sk_wmem_alloc); diff --git a/net/ipv4/inet_timewait_sock.c b/net/ipv4/inet_timewait_sock.c index e28075f0006e..337390ba85b4 100644 --- a/net/ipv4/inet_timewait_sock.c +++ b/net/ipv4/inet_timewait_sock.c @@ -92,13 +92,22 @@ static void inet_twsk_add_node_rcu(struct inet_timewait_sock *tw, hlist_nulls_add_head_rcu(&tw->tw_node, list); } +static void inet_twsk_schedule(struct inet_timewait_sock *tw, int timeo) +{ + __inet_twsk_schedule(tw, timeo, false); +} + /* - * Enter the time wait state. This is called with locally disabled BH. + * Enter the time wait state. * Essentially we whip up a timewait bucket, copy the relevant info into it * from the SK, and mess with hash chains and list linkage. + * + * The caller must not access @tw anymore after this function returns. */ -void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, - struct inet_hashinfo *hashinfo) +void inet_twsk_hashdance_schedule(struct inet_timewait_sock *tw, + struct sock *sk, + struct inet_hashinfo *hashinfo, + int timeo) { const struct inet_sock *inet = inet_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); @@ -114,6 +123,7 @@ void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, hashinfo->bhash_size)]; bhead2 = inet_bhashfn_portaddr(hashinfo, sk, twsk_net(tw), inet->inet_num); + local_bh_disable(); spin_lock(&bhead->lock); spin_lock(&bhead2->lock); @@ -129,26 +139,34 @@ void inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk, spin_lock(lock); + /* Step 2: Hash TW into tcp ehash chain */ inet_twsk_add_node_rcu(tw, &ehead->chain); /* Step 3: Remove SK from hash chain */ if (__sk_nulls_del_node_init_rcu(sk)) sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1); - spin_unlock(lock); + /* Ensure above writes are committed into memory before updating the + * refcount. + * Provides ordering vs later refcount_inc(). + */ + smp_wmb(); /* tw_refcnt is set to 3 because we have : * - one reference for bhash chain. * - one reference for ehash chain. * - one reference for timer. - * We can use atomic_set() because prior spin_lock()/spin_unlock() - * committed into memory all tw fields. * Also note that after this point, we lost our implicit reference * so we are not allowed to use tw anymore. */ refcount_set(&tw->tw_refcnt, 3); + + inet_twsk_schedule(tw, timeo); + + spin_unlock(lock); + local_bh_enable(); } -EXPORT_SYMBOL_GPL(inet_twsk_hashdance); +EXPORT_SYMBOL_GPL(inet_twsk_hashdance_schedule); static void tw_timer_handler(struct timer_list *t) { @@ -192,7 +210,7 @@ struct inet_timewait_sock *inet_twsk_alloc(const struct sock *sk, tw->tw_prot = sk->sk_prot_creator; atomic64_set(&tw->tw_cookie, atomic64_read(&sk->sk_cookie)); twsk_net_set(tw, sock_net(sk)); - timer_setup(&tw->tw_timer, tw_timer_handler, TIMER_PINNED); + timer_setup(&tw->tw_timer, tw_timer_handler, 0); /* * Because we use RCU lookups, we should not set tw_refcnt * to a non null value before everything is setup for this @@ -217,7 +235,34 @@ EXPORT_SYMBOL_GPL(inet_twsk_alloc); */ void inet_twsk_deschedule_put(struct inet_timewait_sock *tw) { - if (del_timer_sync(&tw->tw_timer)) + struct inet_hashinfo *hashinfo = tw->tw_dr->hashinfo; + spinlock_t *lock = inet_ehash_lockp(hashinfo, tw->tw_hash); + + /* inet_twsk_purge() walks over all sockets, including tw ones, + * and removes them via inet_twsk_deschedule_put() after a + * refcount_inc_not_zero(). + * + * inet_twsk_hashdance_schedule() must (re)init the refcount before + * arming the timer, i.e. inet_twsk_purge can obtain a reference to + * a twsk that did not yet schedule the timer. + * + * The ehash lock synchronizes these two: + * After acquiring the lock, the timer is always scheduled (else + * timer_shutdown returns false), because hashdance_schedule releases + * the ehash lock only after completing the timer initialization. + * + * Without grabbing the ehash lock, we get: + * 1) cpu x sets twsk refcount to 3 + * 2) cpu y bumps refcount to 4 + * 3) cpu y calls inet_twsk_deschedule_put() and shuts timer down + * 4) cpu x tries to start timer, but mod_timer is a noop post-shutdown + * -> timer refcount is never decremented. + */ + spin_lock(lock); + /* Makes sure hashdance_schedule() has completed */ + spin_unlock(lock); + + if (timer_shutdown_sync(&tw->tw_timer)) inet_twsk_kill(tw); inet_twsk_put(tw); } diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index 08e2c92e25ab..a92664a5ef2e 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -355,7 +355,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) qp->iif = dev->ifindex; qp->q.stamp = skb->tstamp; - qp->q.mono_delivery_time = skb->mono_delivery_time; + qp->q.tstamp_type = skb->tstamp_type; qp->q.meat += skb->len; qp->ecn |= ecn; add_frag_mem_limit(qp->q.fqdir, skb->truesize); diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 9500031a1f55..b90d0f78ac80 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -764,7 +764,7 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, { struct iphdr *iph; struct sk_buff *skb2; - bool mono_delivery_time = skb->mono_delivery_time; + u8 tstamp_type = skb->tstamp_type; struct rtable *rt = skb_rtable(skb); unsigned int mtu, hlen, ll_rs; struct ip_fraglist_iter iter; @@ -856,7 +856,7 @@ int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, } } - skb_set_delivery_time(skb, tstamp, mono_delivery_time); + skb_set_delivery_time(skb, tstamp, tstamp_type); err = output(net, sk, skb); if (!err) @@ -912,7 +912,7 @@ slow_path: /* * Put this fragment into the sending queue. */ - skb_set_delivery_time(skb2, tstamp, mono_delivery_time); + skb_set_delivery_time(skb2, tstamp, tstamp_type); err = output(net, sk, skb2); if (err) goto fail; @@ -1457,7 +1457,10 @@ struct sk_buff *__ip_make_skb(struct sock *sk, skb->priority = (cork->tos != -1) ? cork->priority: READ_ONCE(sk->sk_priority); skb->mark = cork->mark; - skb->tstamp = cork->transmit_time; + if (sk_is_tcp(sk)) + skb_set_delivery_time(skb, cork->transmit_time, SKB_CLOCK_MONOTONIC); + else + skb_set_delivery_type_by_clockid(skb, cork->transmit_time, sk->sk_clockid); /* * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec * on dst refcount @@ -1649,7 +1652,8 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, arg->csumoffset) = csum_fold(csum_add(nskb->csum, arg->csum)); nskb->ip_summed = CHECKSUM_NONE; - nskb->mono_delivery_time = !!transmit_time; + if (transmit_time) + nskb->tstamp_type = SKB_CLOCK_MONOTONIC; if (txhash) skb_set_hash(nskb, txhash, PKT_HASH_TYPE_L4); ip_push_pending_frames(sk, &fl4); diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index bccef2fcf620..5cffad42fe8c 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -1099,7 +1099,6 @@ static void ip_tunnel_dev_free(struct net_device *dev) gro_cells_destroy(&tunnel->gro_cells); dst_cache_destroy(&tunnel->dst_cache); - free_percpu(dev->tstats); } void ip_tunnel_dellink(struct net_device *dev, struct list_head *head) @@ -1313,20 +1312,15 @@ int ip_tunnel_init(struct net_device *dev) dev->needs_free_netdev = true; dev->priv_destructor = ip_tunnel_dev_free; - dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); - if (!dev->tstats) - return -ENOMEM; + dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; err = dst_cache_init(&tunnel->dst_cache, GFP_KERNEL); - if (err) { - free_percpu(dev->tstats); + if (err) return err; - } err = gro_cells_init(&tunnel->gro_cells, dev); if (err) { dst_cache_destroy(&tunnel->dst_cache); - free_percpu(dev->tstats); return err; } diff --git a/net/ipv4/metrics.c b/net/ipv4/metrics.c index 0e3ee1532848..8ddac1f595ed 100644 --- a/net/ipv4/metrics.c +++ b/net/ipv4/metrics.c @@ -7,7 +7,7 @@ #include #include -static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx, +static int ip_metrics_convert(struct nlattr *fc_mx, int fc_mx_len, u32 *metrics, struct netlink_ext_ack *extack) { @@ -31,7 +31,7 @@ static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx, char tmp[TCP_CA_NAME_MAX]; nla_strscpy(tmp, nla, sizeof(tmp)); - val = tcp_ca_get_key_by_name(net, tmp, &ecn_ca); + val = tcp_ca_get_key_by_name(tmp, &ecn_ca); if (val == TCP_CA_UNSPEC) { NL_SET_ERR_MSG(extack, "Unknown tcp congestion algorithm"); return -EINVAL; @@ -63,7 +63,7 @@ static int ip_metrics_convert(struct net *net, struct nlattr *fc_mx, return 0; } -struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx, +struct dst_metrics *ip_fib_metrics_init(struct nlattr *fc_mx, int fc_mx_len, struct netlink_ext_ack *extack) { @@ -77,7 +77,7 @@ struct dst_metrics *ip_fib_metrics_init(struct net *net, struct nlattr *fc_mx, if (unlikely(!fib_metrics)) return ERR_PTR(-ENOMEM); - err = ip_metrics_convert(net, fc_mx, fc_mx_len, fib_metrics->metrics, + err = ip_metrics_convert(fc_mx, fc_mx_len, fib_metrics->metrics, extack); if (!err) { refcount_set(&fib_metrics->refcnt, 1); diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c index 823306487a82..619ddc087957 100644 --- a/net/ipv4/ping.c +++ b/net/ipv4/ping.c @@ -946,7 +946,7 @@ static enum skb_drop_reason __ping_queue_rcv_skb(struct sock *sk, pr_debug("ping_queue_rcv_skb(sk=%p,sk->num=%d,skb=%p)\n", inet_sk(sk), inet_sk(sk)->inet_num, skb); if (sock_queue_rcv_skb_reason(sk, skb, &reason) < 0) { - kfree_skb_reason(skb, reason); + sk_skb_reason_drop(sk, skb, reason); pr_debug("ping_queue_rcv_skb -> failed\n"); return reason; } diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 4cb43401e0e0..474dfd263c8b 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -301,7 +301,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb) ipv4_pktinfo_prepare(sk, skb, true); if (sock_queue_rcv_skb_reason(sk, skb, &reason) < 0) { - kfree_skb_reason(skb, reason); + sk_skb_reason_drop(sk, skb, reason); return NET_RX_DROP; } @@ -312,7 +312,7 @@ int raw_rcv(struct sock *sk, struct sk_buff *skb) { if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) { atomic_inc(&sk->sk_drops); - kfree_skb_reason(skb, SKB_DROP_REASON_XFRM_POLICY); + sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_XFRM_POLICY); return NET_RX_DROP; } nf_reset_ct(skb); @@ -360,7 +360,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4, skb->protocol = htons(ETH_P_IP); skb->priority = READ_ONCE(sk->sk_priority); skb->mark = sockc->mark; - skb->tstamp = sockc->transmit_time; + skb_set_delivery_type_by_clockid(skb, sockc->transmit_time, sk->sk_clockid); skb_dst_set(skb, &rt->dst); *rtp = NULL; diff --git a/net/ipv4/route.c b/net/ipv4/route.c index b3073d1c8f8f..54512acbead7 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -1481,7 +1481,6 @@ static bool rt_cache_route(struct fib_nh_common *nhc, struct rtable *rt) struct uncached_list { spinlock_t lock; struct list_head head; - struct list_head quarantine; }; static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list); @@ -1532,7 +1531,7 @@ void rt_flush_dev(struct net_device *dev) rt->dst.dev = blackhole_netdev; netdev_ref_replace(dev, blackhole_netdev, &rt->dst.dev_tracker, GFP_ATOMIC); - list_move(&rt->dst.rt_uncached, &ul->quarantine); + list_del_init(&rt->dst.rt_uncached); } spin_unlock_bh(&ul->lock); } @@ -1924,7 +1923,7 @@ static u32 fib_multipath_custom_hash_outer(const struct net *net, hash_keys.ports.dst = keys.ports.dst; *p_has_inner = !!(keys.control.flags & FLOW_DIS_ENCAPSULATION); - return flow_hash_from_keys(&hash_keys); + return fib_multipath_hash_from_keys(net, &hash_keys); } static u32 fib_multipath_custom_hash_inner(const struct net *net, @@ -1973,7 +1972,7 @@ static u32 fib_multipath_custom_hash_inner(const struct net *net, if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT) hash_keys.ports.dst = keys.ports.dst; - return flow_hash_from_keys(&hash_keys); + return fib_multipath_hash_from_keys(net, &hash_keys); } static u32 fib_multipath_custom_hash_skb(const struct net *net, @@ -2010,7 +2009,7 @@ static u32 fib_multipath_custom_hash_fl4(const struct net *net, if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT) hash_keys.ports.dst = fl4->fl4_dport; - return flow_hash_from_keys(&hash_keys); + return fib_multipath_hash_from_keys(net, &hash_keys); } /* if skb is set it will be used and fl4 can be NULL */ @@ -2031,7 +2030,7 @@ int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4, hash_keys.addrs.v4addrs.src = fl4->saddr; hash_keys.addrs.v4addrs.dst = fl4->daddr; } - mhash = flow_hash_from_keys(&hash_keys); + mhash = fib_multipath_hash_from_keys(net, &hash_keys); break; case 1: /* skb is currently provided only when forwarding */ @@ -2065,7 +2064,7 @@ int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4, hash_keys.ports.dst = fl4->fl4_dport; hash_keys.basic.ip_proto = fl4->flowi4_proto; } - mhash = flow_hash_from_keys(&hash_keys); + mhash = fib_multipath_hash_from_keys(net, &hash_keys); break; case 2: memset(&hash_keys, 0, sizeof(hash_keys)); @@ -2096,7 +2095,7 @@ int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4, hash_keys.addrs.v4addrs.src = fl4->saddr; hash_keys.addrs.v4addrs.dst = fl4->daddr; } - mhash = flow_hash_from_keys(&hash_keys); + mhash = fib_multipath_hash_from_keys(net, &hash_keys); break; case 3: if (skb) @@ -3661,7 +3660,6 @@ int __init ip_rt_init(void) struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu); INIT_LIST_HEAD(&ul->head); - INIT_LIST_HEAD(&ul->quarantine); spin_lock_init(&ul->lock); } #ifdef CONFIG_IP_ROUTE_CLASSID diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index b61d36810fe3..1948d15f1f28 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -496,6 +496,6 @@ out: out_free: reqsk_free(req); out_drop: - kfree_skb_reason(skb, reason); + sk_skb_reason_drop(sk, skb, reason); return NULL; } diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index 162a0a3b6ba5..9140d20eb2d4 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -130,7 +130,8 @@ static int ipv4_privileged_ports(struct ctl_table *table, int write, return ret; } -static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low, kgid_t *high) +static void inet_get_ping_group_range_table(const struct ctl_table *table, + kgid_t *low, kgid_t *high) { kgid_t *data = table->data; struct net *net = @@ -145,7 +146,8 @@ static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low } /* Update system visible IP port range */ -static void set_ping_group_range(struct ctl_table *table, kgid_t low, kgid_t high) +static void set_ping_group_range(const struct ctl_table *table, + kgid_t low, kgid_t high) { kgid_t *data = table->data; struct net *net = @@ -462,6 +464,61 @@ static int proc_fib_multipath_hash_fields(struct ctl_table *table, int write, return ret; } + +static u32 proc_fib_multipath_hash_rand_seed __ro_after_init; + +static void proc_fib_multipath_hash_init_rand_seed(void) +{ + get_random_bytes(&proc_fib_multipath_hash_rand_seed, + sizeof(proc_fib_multipath_hash_rand_seed)); +} + +static void proc_fib_multipath_hash_set_seed(struct net *net, u32 user_seed) +{ + struct sysctl_fib_multipath_hash_seed new = { + .user_seed = user_seed, + .mp_seed = (user_seed ? user_seed : + proc_fib_multipath_hash_rand_seed), + }; + + WRITE_ONCE(net->ipv4.sysctl_fib_multipath_hash_seed, new); +} + +static int proc_fib_multipath_hash_seed(struct ctl_table *table, int write, + void *buffer, size_t *lenp, + loff_t *ppos) +{ + struct sysctl_fib_multipath_hash_seed *mphs; + struct net *net = table->data; + struct ctl_table tmp; + u32 user_seed; + int ret; + + mphs = &net->ipv4.sysctl_fib_multipath_hash_seed; + user_seed = mphs->user_seed; + + tmp = *table; + tmp.data = &user_seed; + + ret = proc_douintvec_minmax(&tmp, write, buffer, lenp, ppos); + + if (write && ret == 0) { + proc_fib_multipath_hash_set_seed(net, user_seed); + call_netevent_notifiers(NETEVENT_IPV4_MPATH_HASH_UPDATE, net); + } + + return ret; +} +#else + +static void proc_fib_multipath_hash_init_rand_seed(void) +{ +} + +static void proc_fib_multipath_hash_set_seed(struct net *net, u32 user_seed) +{ +} + #endif static struct ctl_table ipv4_table[] = { @@ -1070,6 +1127,13 @@ static struct ctl_table ipv4_net_table[] = { .extra1 = SYSCTL_ONE, .extra2 = &fib_multipath_hash_fields_all_mask, }, + { + .procname = "fib_multipath_hash_seed", + .data = &init_net, + .maxlen = sizeof(u32), + .mode = 0644, + .proc_handler = proc_fib_multipath_hash_seed, + }, #endif { .procname = "ip_unprivileged_port_start", @@ -1501,6 +1565,14 @@ static struct ctl_table ipv4_net_table[] = { .proc_handler = proc_dou8vec_minmax, .extra1 = SYSCTL_ONE, }, + { + .procname = "tcp_rto_min_us", + .data = &init_net.ipv4.sysctl_tcp_rto_min_us, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = SYSCTL_ONE, + }, }; static __net_init int ipv4_sysctl_init_net(struct net *net) @@ -1540,6 +1612,8 @@ static __net_init int ipv4_sysctl_init_net(struct net *net) if (!net->ipv4.sysctl_local_reserved_ports) goto err_ports; + proc_fib_multipath_hash_set_seed(net, 0); + return 0; err_ports: @@ -1574,6 +1648,8 @@ static __init int sysctl_ipv4_init(void) if (!hdr) return -ENOMEM; + proc_fib_multipath_hash_init_rand_seed(); + if (register_pernet_subsys(&ipv4_sysctl_ops)) { unregister_net_sysctl_table(hdr); return -ENOMEM; diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index e6790ea74877..e03a342c9162 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -282,6 +282,7 @@ #include #include #include +#include #include /* Track pending CMSGs. */ @@ -420,6 +421,7 @@ void tcp_init_sock(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); + int rto_min_us; tp->out_of_order_queue = RB_ROOT; sk->tcp_rtx_queue = RB_ROOT; @@ -428,7 +430,8 @@ void tcp_init_sock(struct sock *sk) INIT_LIST_HEAD(&tp->tsorted_sent_queue); icsk->icsk_rto = TCP_TIMEOUT_INIT; - icsk->icsk_rto_min = TCP_RTO_MIN; + rto_min_us = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rto_min_us); + icsk->icsk_rto_min = usecs_to_jiffies(rto_min_us); icsk->icsk_delack_max = TCP_DELACK_MAX; tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); minmax_reset(&tp->rtt_min, tcp_jiffies32, ~0U); @@ -598,7 +601,7 @@ __poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait) */ mask |= EPOLLOUT | EPOLLWRNORM; } - /* This barrier is coupled with smp_wmb() in tcp_reset() */ + /* This barrier is coupled with smp_wmb() in tcp_done_with_error() */ smp_rmb(); if (READ_ONCE(sk->sk_err) || !skb_queue_empty_lockless(&sk->sk_error_queue)) @@ -3086,7 +3089,7 @@ int tcp_disconnect(struct sock *sk, int flags) icsk->icsk_ack.rcv_mss = TCP_MIN_MSS; memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); __sk_dst_reset(sk); - dst_release(xchg((__force struct dst_entry **)&sk->sk_rx_dst, NULL)); + dst_release(unrcu_pointer(xchg(&sk->sk_rx_dst, NULL))); tcp_saved_syn_free(tp); tp->compressed_ack = 0; tp->segs_in = 0; @@ -4461,7 +4464,7 @@ int tcp_md5_hash_key(struct tcp_sigpool *hp, EXPORT_SYMBOL(tcp_md5_hash_key); /* Called with rcu_read_lock() */ -enum skb_drop_reason +static enum skb_drop_reason tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb, const void *saddr, const void *daddr, int family, int l3index, const __u8 *hash_location) @@ -4481,7 +4484,7 @@ tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb, if (!key && hash_location) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); - tcp_hash_fail("Unexpected MD5 Hash found", family, skb, ""); + trace_tcp_hash_md5_unexpected(sk, skb); return SKB_DROP_REASON_TCP_MD5UNEXPECTED; } @@ -4496,29 +4499,90 @@ tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb, NULL, skb); if (genhash || memcmp(hash_location, newhash, 16) != 0) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE); - if (family == AF_INET) { - tcp_hash_fail("MD5 Hash failed", AF_INET, skb, "%s L3 index %d", - genhash ? "tcp_v4_calc_md5_hash failed" - : "", l3index); - } else { - if (genhash) { - tcp_hash_fail("MD5 Hash failed", - AF_INET6, skb, "L3 index %d", - l3index); - } else { - tcp_hash_fail("MD5 Hash mismatch", - AF_INET6, skb, "L3 index %d", - l3index); - } - } + trace_tcp_hash_md5_mismatch(sk, skb); return SKB_DROP_REASON_TCP_MD5FAILURE; } return SKB_NOT_DROPPED_YET; } -EXPORT_SYMBOL(tcp_inbound_md5_hash); +#else +static inline enum skb_drop_reason +tcp_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb, + const void *saddr, const void *daddr, + int family, int l3index, const __u8 *hash_location) +{ + return SKB_NOT_DROPPED_YET; +} #endif +/* Called with rcu_read_lock() */ +enum skb_drop_reason +tcp_inbound_hash(struct sock *sk, const struct request_sock *req, + const struct sk_buff *skb, + const void *saddr, const void *daddr, + int family, int dif, int sdif) +{ + const struct tcphdr *th = tcp_hdr(skb); + const struct tcp_ao_hdr *aoh; + const __u8 *md5_location; + int l3index; + + /* Invalid option or two times meet any of auth options */ + if (tcp_parse_auth_options(th, &md5_location, &aoh)) { + trace_tcp_hash_bad_header(sk, skb); + return SKB_DROP_REASON_TCP_AUTH_HDR; + } + + if (req) { + if (tcp_rsk_used_ao(req) != !!aoh) { + u8 keyid, rnext, maclen; + + if (aoh) { + keyid = aoh->keyid; + rnext = aoh->rnext_keyid; + maclen = tcp_ao_hdr_maclen(aoh); + } else { + keyid = rnext = maclen = 0; + } + + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOBAD); + trace_tcp_ao_handshake_failure(sk, skb, keyid, rnext, maclen); + return SKB_DROP_REASON_TCP_AOFAILURE; + } + } + + /* sdif set, means packet ingressed via a device + * in an L3 domain and dif is set to the l3mdev + */ + l3index = sdif ? dif : 0; + + /* Fast path: unsigned segments */ + if (likely(!md5_location && !aoh)) { + /* Drop if there's TCP-MD5 or TCP-AO key with any rcvid/sndid + * for the remote peer. On TCP-AO established connection + * the last key is impossible to remove, so there's + * always at least one current_key. + */ + if (tcp_ao_required(sk, saddr, family, l3index, true)) { + trace_tcp_hash_ao_required(sk, skb); + return SKB_DROP_REASON_TCP_AONOTFOUND; + } + if (unlikely(tcp_md5_do_lookup(sk, l3index, saddr, family))) { + NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); + trace_tcp_hash_md5_required(sk, skb); + return SKB_DROP_REASON_TCP_MD5NOTFOUND; + } + return SKB_NOT_DROPPED_YET; + } + + if (aoh) + return tcp_inbound_ao_hash(sk, skb, family, req, l3index, aoh); + + return tcp_inbound_md5_hash(sk, skb, saddr, daddr, family, + l3index, md5_location); +} +EXPORT_SYMBOL_GPL(tcp_inbound_hash); + void tcp_done(struct sock *sk) { struct request_sock *req; @@ -4583,14 +4647,10 @@ int tcp_abort(struct sock *sk, int err) bh_lock_sock(sk); if (!sock_flag(sk, SOCK_DEAD)) { - WRITE_ONCE(sk->sk_err, err); - /* This barrier is coupled with smp_rmb() in tcp_poll() */ - smp_wmb(); - sk_error_report(sk); if (tcp_need_reset(sk->sk_state)) tcp_send_active_reset(sk, GFP_ATOMIC, SK_RST_REASON_NOT_SPECIFIED); - tcp_done(sk); + tcp_done_with_error(sk, err); } bh_unlock_sock(sk); diff --git a/net/ipv4/tcp_ao.c b/net/ipv4/tcp_ao.c index 09c0fa6756b7..85531437890c 100644 --- a/net/ipv4/tcp_ao.c +++ b/net/ipv4/tcp_ao.c @@ -16,6 +16,7 @@ #include #include #include +#include DEFINE_STATIC_KEY_DEFERRED_FALSE(tcp_ao_needed, HZ); @@ -884,17 +885,16 @@ tcp_ao_verify_hash(const struct sock *sk, const struct sk_buff *skb, const struct tcp_ao_hdr *aoh, struct tcp_ao_key *key, u8 *traffic_key, u8 *phash, u32 sne, int l3index) { - u8 maclen = aoh->length - sizeof(struct tcp_ao_hdr); const struct tcphdr *th = tcp_hdr(skb); + u8 maclen = tcp_ao_hdr_maclen(aoh); void *hash_buf = NULL; if (maclen != tcp_ao_maclen(key)) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOBAD); atomic64_inc(&info->counters.pkt_bad); atomic64_inc(&key->pkt_bad); - tcp_hash_fail("AO hash wrong length", family, skb, - "%u != %d L3index: %d", maclen, - tcp_ao_maclen(key), l3index); + trace_tcp_ao_wrong_maclen(sk, skb, aoh->keyid, + aoh->rnext_keyid, maclen); return SKB_DROP_REASON_TCP_AOFAILURE; } @@ -909,8 +909,8 @@ tcp_ao_verify_hash(const struct sock *sk, const struct sk_buff *skb, NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOBAD); atomic64_inc(&info->counters.pkt_bad); atomic64_inc(&key->pkt_bad); - tcp_hash_fail("AO hash mismatch", family, skb, - "L3index: %d", l3index); + trace_tcp_ao_mismatch(sk, skb, aoh->keyid, + aoh->rnext_keyid, maclen); kfree(hash_buf); return SKB_DROP_REASON_TCP_AOFAILURE; } @@ -927,6 +927,7 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb, int l3index, const struct tcp_ao_hdr *aoh) { const struct tcphdr *th = tcp_hdr(skb); + u8 maclen = tcp_ao_hdr_maclen(aoh); u8 *phash = (u8 *)(aoh + 1); /* hash goes just after the header */ struct tcp_ao_info *info; enum skb_drop_reason ret; @@ -939,8 +940,8 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb, info = rcu_dereference(tcp_sk(sk)->ao_info); if (!info) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOKEYNOTFOUND); - tcp_hash_fail("AO key not found", family, skb, - "keyid: %u L3index: %d", aoh->keyid, l3index); + trace_tcp_ao_key_not_found(sk, skb, aoh->keyid, + aoh->rnext_keyid, maclen); return SKB_DROP_REASON_TCP_AOUNEXPECTED; } @@ -981,6 +982,9 @@ tcp_inbound_ao_hash(struct sock *sk, const struct sk_buff *skb, current_key = READ_ONCE(info->current_key); /* Key rotation: the peer asks us to use new key (RNext) */ if (unlikely(aoh->rnext_keyid != current_key->sndid)) { + trace_tcp_ao_rnext_request(sk, skb, current_key->sndid, + aoh->rnext_keyid, + tcp_ao_hdr_maclen(aoh)); /* If the key is not found we do nothing. */ key = tcp_ao_established_key(info, aoh->rnext_keyid, -1); if (key) @@ -1046,8 +1050,8 @@ verify_hash: key_not_found: NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAOKEYNOTFOUND); atomic64_inc(&info->counters.key_not_found); - tcp_hash_fail("Requested by the peer AO key id not found", - family, skb, "L3index: %d", l3index); + trace_tcp_ao_key_not_found(sk, skb, aoh->keyid, + aoh->rnext_keyid, maclen); return SKB_DROP_REASON_TCP_AOKEYNOTFOUND; } diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 28ffcfbeef14..0306d257fa64 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c @@ -46,8 +46,7 @@ void tcp_set_ca_state(struct sock *sk, const u8 ca_state) } /* Must be called with rcu lock held */ -static struct tcp_congestion_ops *tcp_ca_find_autoload(struct net *net, - const char *name) +static struct tcp_congestion_ops *tcp_ca_find_autoload(const char *name) { struct tcp_congestion_ops *ca = tcp_ca_find(name); @@ -178,7 +177,7 @@ int tcp_update_congestion_control(struct tcp_congestion_ops *ca, struct tcp_cong return ret; } -u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca) +u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca) { const struct tcp_congestion_ops *ca; u32 key = TCP_CA_UNSPEC; @@ -186,7 +185,7 @@ u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca) might_sleep(); rcu_read_lock(); - ca = tcp_ca_find_autoload(net, name); + ca = tcp_ca_find_autoload(name); if (ca) { key = ca->key; *ecn_ca = ca->flags & TCP_CONG_NEEDS_ECN; @@ -203,9 +202,10 @@ char *tcp_ca_get_name_by_key(u32 key, char *buffer) rcu_read_lock(); ca = tcp_ca_find_key(key); - if (ca) - ret = strncpy(buffer, ca->name, - TCP_CA_NAME_MAX); + if (ca) { + strscpy(buffer, ca->name, TCP_CA_NAME_MAX); + ret = buffer; + } rcu_read_unlock(); return ret; @@ -283,7 +283,7 @@ int tcp_set_default_congestion_control(struct net *net, const char *name) int ret; rcu_read_lock(); - ca = tcp_ca_find_autoload(net, name); + ca = tcp_ca_find_autoload(name); if (!ca) { ret = -ENOENT; } else if (!bpf_try_module_get(ca, ca->owner)) { @@ -338,7 +338,7 @@ void tcp_get_default_congestion_control(struct net *net, char *name) rcu_read_lock(); ca = rcu_dereference(net->ipv4.tcp_congestion_control); - strncpy(name, ca->name, TCP_CA_NAME_MAX); + strscpy(name, ca->name, TCP_CA_NAME_MAX); rcu_read_unlock(); } @@ -421,7 +421,7 @@ int tcp_set_congestion_control(struct sock *sk, const char *name, bool load, if (!load) ca = tcp_ca_find(name); else - ca = tcp_ca_find_autoload(sock_net(sk), name); + ca = tcp_ca_find_autoload(name); /* No change asking for existing value */ if (ca == icsk->icsk_ca_ops) { diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c index 8ed54e7334a9..0f523cbfe329 100644 --- a/net/ipv4/tcp_fastopen.c +++ b/net/ipv4/tcp_fastopen.c @@ -49,7 +49,7 @@ void tcp_fastopen_ctx_destroy(struct net *net) { struct tcp_fastopen_context *ctxt; - ctxt = xchg((__force struct tcp_fastopen_context **)&net->ipv4.tcp_fastopen_ctx, NULL); + ctxt = unrcu_pointer(xchg(&net->ipv4.tcp_fastopen_ctx, NULL)); if (ctxt) call_rcu(&ctxt->rcu, tcp_fastopen_ctx_free); @@ -80,9 +80,10 @@ int tcp_fastopen_reset_cipher(struct net *net, struct sock *sk, if (sk) { q = &inet_csk(sk)->icsk_accept_queue.fastopenq; - octx = xchg((__force struct tcp_fastopen_context **)&q->ctx, ctx); + octx = unrcu_pointer(xchg(&q->ctx, RCU_INITIALIZER(ctx))); } else { - octx = xchg((__force struct tcp_fastopen_context **)&net->ipv4.tcp_fastopen_ctx, ctx); + octx = unrcu_pointer(xchg(&net->ipv4.tcp_fastopen_ctx, + RCU_INITIALIZER(ctx))); } if (octx) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 38da23f991d6..ff9ab3d01ced 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3604,8 +3604,10 @@ static void tcp_snd_sne_update(struct tcp_sock *tp, u32 ack) ao = rcu_dereference_protected(tp->ao_info, lockdep_sock_is_held((struct sock *)tp)); - if (ao && ack < tp->snd_una) + if (ao && ack < tp->snd_una) { ao->snd_sne++; + trace_tcp_ao_snd_sne_update((struct sock *)tp, ao->snd_sne); + } #endif } @@ -3630,8 +3632,10 @@ static void tcp_rcv_sne_update(struct tcp_sock *tp, u32 seq) ao = rcu_dereference_protected(tp->ao_info, lockdep_sock_is_held((struct sock *)tp)); - if (ao && seq < tp->rcv_nxt) + if (ao && seq < tp->rcv_nxt) { ao->rcv_sne++; + trace_tcp_ao_rcv_sne_update((struct sock *)tp, ao->rcv_sne); + } #endif } @@ -4469,9 +4473,26 @@ static enum skb_drop_reason tcp_sequence(const struct tcp_sock *tp, return SKB_NOT_DROPPED_YET; } + +void tcp_done_with_error(struct sock *sk, int err) +{ + /* This barrier is coupled with smp_rmb() in tcp_poll() */ + WRITE_ONCE(sk->sk_err, err); + smp_wmb(); + + tcp_write_queue_purge(sk); + tcp_done(sk); + + if (!sock_flag(sk, SOCK_DEAD)) + sk_error_report(sk); +} +EXPORT_SYMBOL(tcp_done_with_error); + /* When we get a reset we do this. */ void tcp_reset(struct sock *sk, struct sk_buff *skb) { + int err; + trace_tcp_receive_reset(sk); /* mptcp can't tell us to ignore reset pkts, @@ -4483,24 +4504,17 @@ void tcp_reset(struct sock *sk, struct sk_buff *skb) /* We want the right error as BSD sees it (and indeed as we do). */ switch (sk->sk_state) { case TCP_SYN_SENT: - WRITE_ONCE(sk->sk_err, ECONNREFUSED); + err = ECONNREFUSED; break; case TCP_CLOSE_WAIT: - WRITE_ONCE(sk->sk_err, EPIPE); + err = EPIPE; break; case TCP_CLOSE: return; default: - WRITE_ONCE(sk->sk_err, ECONNRESET); + err = ECONNRESET; } - /* This barrier is coupled with smp_rmb() in tcp_poll() */ - smp_wmb(); - - tcp_write_queue_purge(sk); - tcp_done(sk); - - if (!sock_flag(sk, SOCK_DEAD)) - sk_error_report(sk); + tcp_done_with_error(sk, err); } /* @@ -4836,10 +4850,7 @@ static bool tcp_try_coalesce(struct sock *sk, if (TCP_SKB_CB(from)->seq != TCP_SKB_CB(to)->end_seq) return false; - if (!mptcp_skb_can_collapse(to, from)) - return false; - - if (skb_cmp_decrypted(from, to)) + if (!tcp_skb_can_collapse_rx(to, from)) return false; if (!skb_try_coalesce(to, from, fragstolen, &delta)) @@ -4882,7 +4893,7 @@ static void tcp_drop_reason(struct sock *sk, struct sk_buff *skb, enum skb_drop_reason reason) { sk_drops_add(sk, skb); - kfree_skb_reason(skb, reason); + sk_skb_reason_drop(sk, skb, reason); } /* This one checks to see if we can put data from the @@ -5395,7 +5406,7 @@ restart: break; } - if (n && n != tail && mptcp_skb_can_collapse(skb, n) && + if (n && n != tail && tcp_skb_can_collapse_rx(skb, n) && TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(n)->seq) { end_of_skbs = false; break; @@ -5446,11 +5457,9 @@ restart: skb = tcp_collapse_one(sk, skb, list, root); if (!skb || skb == tail || - !mptcp_skb_can_collapse(nskb, skb) || + !tcp_skb_can_collapse_rx(nskb, skb) || (TCP_SKB_CB(skb)->tcp_flags & (TCPHDR_SYN | TCPHDR_FIN))) goto end; - if (skb_cmp_decrypted(skb, nskb)) - goto end; } } } @@ -5989,6 +5998,11 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb, * RFC 5961 4.2 : Send a challenge ack */ if (th->syn) { + if (sk->sk_state == TCP_SYN_RECV && sk->sk_socket && th->ack && + TCP_SKB_CB(skb)->seq + 1 == TCP_SKB_CB(skb)->end_seq && + TCP_SKB_CB(skb)->seq + 1 == tp->rcv_nxt && + TCP_SKB_CB(skb)->ack_seq == tp->snd_nxt) + goto pass; syn_challenge: if (syn_inerr) TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); @@ -5998,6 +6012,7 @@ syn_challenge: goto discard; } +pass: bpf_skops_parse_hdr(sk, skb); return true; @@ -6804,6 +6819,9 @@ tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb) tcp_fast_path_on(tp); if (sk->sk_shutdown & SEND_SHUTDOWN) tcp_shutdown(sk, SEND_SHUTDOWN); + + if (sk->sk_socket) + goto consume; break; case TCP_FIN_WAIT1: { @@ -7014,31 +7032,6 @@ static void tcp_openreq_init(struct request_sock *req, #endif } -struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops, - struct sock *sk_listener, - bool attach_listener) -{ - struct request_sock *req = reqsk_alloc(ops, sk_listener, - attach_listener); - - if (req) { - struct inet_request_sock *ireq = inet_rsk(req); - - ireq->ireq_opt = NULL; -#if IS_ENABLED(CONFIG_IPV6) - ireq->pktopts = NULL; -#endif - atomic64_set(&ireq->ir_cookie, 0); - ireq->ireq_state = TCP_NEW_SYN_RECV; - write_pnet(&ireq->ireq_net, sock_net(sk_listener)); - ireq->ireq_family = sk_listener->sk_family; - req->timeout = TCP_TIMEOUT_INIT; - } - - return req; -} -EXPORT_SYMBOL(inet_reqsk_alloc); - /* * Return true if a syncookie should be sent */ diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index b710958393e6..fd17f25ff288 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -93,7 +93,9 @@ static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, struct inet_hashinfo tcp_hashinfo; EXPORT_SYMBOL(tcp_hashinfo); -static DEFINE_PER_CPU(struct sock *, ipv4_tcp_sk); +static DEFINE_PER_CPU(struct sock_bh_locked, ipv4_tcp_sk) = { + .bh_lock = INIT_LOCAL_LOCK(bh_lock), +}; static u32 tcp_v4_init_seq(const struct sk_buff *skb) { @@ -114,6 +116,7 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) const struct inet_timewait_sock *tw = inet_twsk(sktw); const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw); struct tcp_sock *tp = tcp_sk(sk); + int ts_recent_stamp; if (reuse == 2) { /* Still does not detect *everything* that goes through @@ -152,10 +155,11 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) If TW bucket has been already destroyed we fall back to VJ's scheme and use initial timestamp retrieved from peer table. */ - if (tcptw->tw_ts_recent_stamp && + ts_recent_stamp = READ_ONCE(tcptw->tw_ts_recent_stamp); + if (ts_recent_stamp && (!twp || (reuse && time_after32(ktime_get_seconds(), - tcptw->tw_ts_recent_stamp)))) { - /* inet_twsk_hashdance() sets sk_refcnt after putting twsk + ts_recent_stamp)))) { + /* inet_twsk_hashdance_schedule() sets sk_refcnt after putting twsk * and releasing the bucket lock. */ if (unlikely(!refcount_inc_not_zero(&sktw->sk_refcnt))) @@ -178,8 +182,8 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) if (!seq) seq = 1; WRITE_ONCE(tp->write_seq, seq); - tp->rx_opt.ts_recent = tcptw->tw_ts_recent; - tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp; + tp->rx_opt.ts_recent = READ_ONCE(tcptw->tw_ts_recent); + tp->rx_opt.ts_recent_stamp = ts_recent_stamp; } return 1; @@ -611,15 +615,10 @@ int tcp_v4_err(struct sk_buff *skb, u32 info) ip_icmp_error(sk, skb, err, th->dest, info, (u8 *)th); - if (!sock_owned_by_user(sk)) { - WRITE_ONCE(sk->sk_err, err); - - sk_error_report(sk); - - tcp_done(sk); - } else { + if (!sock_owned_by_user(sk)) + tcp_done_with_error(sk, err); + else WRITE_ONCE(sk->sk_err_soft, err); - } goto out; } @@ -885,7 +884,9 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb, arg.tos = ip_hdr(skb)->tos; arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL); local_bh_disable(); - ctl_sk = this_cpu_read(ipv4_tcp_sk); + local_lock_nested_bh(&ipv4_tcp_sk.bh_lock); + ctl_sk = this_cpu_read(ipv4_tcp_sk.sock); + sock_net_set(ctl_sk, net); if (sk) { ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ? @@ -910,6 +911,7 @@ static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb, sock_net_set(ctl_sk, &init_net); __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); __TCP_INC_STATS(net, TCP_MIB_OUTRSTS); + local_unlock_nested_bh(&ipv4_tcp_sk.bh_lock); local_bh_enable(); #ifdef CONFIG_TCP_MD5SIG @@ -1005,7 +1007,8 @@ static void tcp_v4_send_ack(const struct sock *sk, arg.tos = tos; arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL); local_bh_disable(); - ctl_sk = this_cpu_read(ipv4_tcp_sk); + local_lock_nested_bh(&ipv4_tcp_sk.bh_lock); + ctl_sk = this_cpu_read(ipv4_tcp_sk.sock); sock_net_set(ctl_sk, net); ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ? inet_twsk(sk)->tw_mark : READ_ONCE(sk->sk_mark); @@ -1020,6 +1023,7 @@ static void tcp_v4_send_ack(const struct sock *sk, sock_net_set(ctl_sk, &init_net); __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); + local_unlock_nested_bh(&ipv4_tcp_sk.bh_lock); local_bh_enable(); } @@ -1057,19 +1061,17 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) #else if (0) { #endif -#ifdef CONFIG_TCP_MD5SIG - } else if (static_branch_unlikely(&tcp_md5_needed.key)) { + } else if (static_branch_tcp_md5()) { key.md5_key = tcp_twsk_md5_key(tcptw); if (key.md5_key) key.type = TCP_KEY_MD5; -#endif } tcp_v4_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcp_tw_tsval(tcptw), - tcptw->tw_ts_recent, + READ_ONCE(tcptw->tw_ts_recent), tw->tw_bound_dev_if, &key, tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0, tw->tw_tos, @@ -1131,8 +1133,7 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, #else if (0) { #endif -#ifdef CONFIG_TCP_MD5SIG - } else if (static_branch_unlikely(&tcp_md5_needed.key)) { + } else if (static_branch_tcp_md5()) { const union tcp_md5_addr *addr; int l3index; @@ -1141,7 +1142,6 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, key.md5_key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET); if (key.md5_key) key.type = TCP_KEY_MD5; -#endif } tcp_v4_send_ack(sk, skb, seq, @@ -1939,7 +1939,7 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) reset: tcp_v4_send_reset(rsk, skb, sk_rst_convert_drop_reason(reason)); discard: - kfree_skb_reason(skb, reason); + sk_skb_reason_drop(sk, skb, reason); /* Be careful here. If this function gets more complicated and * gcc suffers from register pressure on the x86, sk (in %ebx) * might be destroyed here. This current version compiles correctly, @@ -2049,8 +2049,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb, TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) || ((TCP_SKB_CB(tail)->tcp_flags ^ TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) || - !mptcp_skb_can_collapse(tail, skb) || - skb_cmp_decrypted(tail, skb) || + !tcp_skb_can_collapse_rx(tail, skb) || thtail->doff != th->doff || memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th))) goto no_coalesce; @@ -2176,8 +2175,8 @@ int tcp_v4_rcv(struct sk_buff *skb) int dif = inet_iif(skb); const struct iphdr *iph; const struct tcphdr *th; + struct sock *sk = NULL; bool refcounted; - struct sock *sk; int ret; u32 isn; @@ -2376,7 +2375,7 @@ bad_packet: discard_it: SKB_DR_OR(drop_reason, NOT_SPECIFIED); /* Discard frame. */ - kfree_skb_reason(skb, drop_reason); + sk_skb_reason_drop(sk, skb, drop_reason); return 0; discard_and_relse: @@ -3506,6 +3505,7 @@ static int __net_init tcp_sk_init(struct net *net) net->ipv4.sysctl_tcp_shrink_window = 0; net->ipv4.sysctl_tcp_pingpong_thresh = 1; + net->ipv4.sysctl_tcp_rto_min_us = jiffies_to_usecs(TCP_RTO_MIN); return 0; } @@ -3620,7 +3620,9 @@ void __init tcp_v4_init(void) */ inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO; - per_cpu(ipv4_tcp_sk, cpu) = sk; + sk->sk_clockid = CLOCK_MONOTONIC; + + per_cpu(ipv4_tcp_sk.sock, cpu) = sk; } if (register_pernet_subsys(&tcp_sk_ops)) panic("Failed to create the TCP control socket.\n"); diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 538c06f95918..a19a9dbd3409 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c @@ -101,16 +101,18 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, struct tcp_options_received tmp_opt; struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); bool paws_reject = false; + int ts_recent_stamp; tmp_opt.saw_tstamp = 0; - if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { + ts_recent_stamp = READ_ONCE(tcptw->tw_ts_recent_stamp); + if (th->doff > (sizeof(*th) >> 2) && ts_recent_stamp) { tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL); if (tmp_opt.saw_tstamp) { if (tmp_opt.rcv_tsecr) tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset; - tmp_opt.ts_recent = tcptw->tw_ts_recent; - tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp; + tmp_opt.ts_recent = READ_ONCE(tcptw->tw_ts_recent); + tmp_opt.ts_recent_stamp = ts_recent_stamp; paws_reject = tcp_paws_reject(&tmp_opt, th->rst); } } @@ -152,8 +154,10 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, twsk_rcv_nxt_update(tcptw, TCP_SKB_CB(skb)->end_seq); if (tmp_opt.saw_tstamp) { - tcptw->tw_ts_recent_stamp = ktime_get_seconds(); - tcptw->tw_ts_recent = tmp_opt.rcv_tsval; + WRITE_ONCE(tcptw->tw_ts_recent_stamp, + ktime_get_seconds()); + WRITE_ONCE(tcptw->tw_ts_recent, + tmp_opt.rcv_tsval); } inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); @@ -197,8 +201,10 @@ kill: } if (tmp_opt.saw_tstamp) { - tcptw->tw_ts_recent = tmp_opt.rcv_tsval; - tcptw->tw_ts_recent_stamp = ktime_get_seconds(); + WRITE_ONCE(tcptw->tw_ts_recent, + tmp_opt.rcv_tsval); + WRITE_ONCE(tcptw->tw_ts_recent_stamp, + ktime_get_seconds()); } inet_twsk_put(tw); @@ -225,7 +231,7 @@ kill: if (th->syn && !th->rst && !th->ack && !paws_reject && (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) || (tmp_opt.saw_tstamp && - (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) { + (s32)(READ_ONCE(tcptw->tw_ts_recent) - tmp_opt.rcv_tsval) < 0))) { u32 isn = tcptw->tw_snd_nxt + 65535 + 2; if (isn == 0) isn++; @@ -339,17 +345,10 @@ void tcp_time_wait(struct sock *sk, int state, int timeo) if (state == TCP_TIME_WAIT) timeo = TCP_TIMEWAIT_LEN; - /* tw_timer is pinned, so we need to make sure BH are disabled - * in following section, otherwise timer handler could run before - * we complete the initialization. - */ - local_bh_disable(); - inet_twsk_schedule(tw, timeo); /* Linkage updates. * Note that access to tw after this point is illegal. */ - inet_twsk_hashdance(tw, sk, net->ipv4.tcp_death_row.hashinfo); - local_bh_enable(); + inet_twsk_hashdance_schedule(tw, sk, net->ipv4.tcp_death_row.hashinfo, timeo); } else { /* Sorry, if we're out of memory, just CLOSE this * socket up. We've got bigger problems than @@ -515,9 +514,6 @@ struct sock *tcp_create_openreq_child(const struct sock *sk, const struct tcp_sock *oldtp; struct tcp_sock *newtp; u32 seq; -#ifdef CONFIG_TCP_AO - struct tcp_ao_key *ao_key; -#endif if (!newsk) return NULL; @@ -608,10 +604,14 @@ struct sock *tcp_create_openreq_child(const struct sock *sk, #endif #ifdef CONFIG_TCP_AO newtp->ao_info = NULL; - ao_key = treq->af_specific->ao_lookup(sk, req, - tcp_rsk(req)->ao_keyid, -1); - if (ao_key) - newtp->tcp_header_len += tcp_ao_len_aligned(ao_key); + + if (tcp_rsk_used_ao(req)) { + struct tcp_ao_key *ao_key; + + ao_key = treq->af_specific->ao_lookup(sk, req, tcp_rsk(req)->ao_keyid, -1); + if (ao_key) + newtp->tcp_header_len += tcp_ao_len_aligned(ao_key); + } #endif if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len) newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 95618d0e78e4..16c48df8df4c 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1301,7 +1301,7 @@ static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, tp = tcp_sk(sk); prior_wstamp = tp->tcp_wstamp_ns; tp->tcp_wstamp_ns = max(tp->tcp_wstamp_ns, tp->tcp_clock_cache); - skb_set_delivery_time(skb, tp->tcp_wstamp_ns, true); + skb_set_delivery_time(skb, tp->tcp_wstamp_ns, SKB_CLOCK_MONOTONIC); if (clone_it) { oskb = skb; @@ -1655,7 +1655,7 @@ int tcp_fragment(struct sock *sk, enum tcp_queue tcp_queue, skb_split(skb, buff, len); - skb_set_delivery_time(buff, skb->tstamp, true); + skb_set_delivery_time(buff, skb->tstamp, SKB_CLOCK_MONOTONIC); tcp_fragment_tstamp(skb, buff); old_factor = tcp_skb_pcount(skb); @@ -2764,7 +2764,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) { /* "skb_mstamp_ns" is used as a start point for the retransmit timer */ tp->tcp_wstamp_ns = tp->tcp_clock_cache; - skb_set_delivery_time(skb, tp->tcp_wstamp_ns, true); + skb_set_delivery_time(skb, tp->tcp_wstamp_ns, SKB_CLOCK_MONOTONIC); list_move_tail(&skb->tcp_tsorted_anchor, &tp->tsorted_sent_queue); tcp_init_tso_segs(skb, mss_now); goto repair; /* Skip network transmission */ @@ -3752,11 +3752,11 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, #ifdef CONFIG_SYN_COOKIES if (unlikely(synack_type == TCP_SYNACK_COOKIE && ireq->tstamp_ok)) skb_set_delivery_time(skb, cookie_init_timestamp(req, now), - true); + SKB_CLOCK_MONOTONIC); else #endif { - skb_set_delivery_time(skb, now, true); + skb_set_delivery_time(skb, now, SKB_CLOCK_MONOTONIC); if (!tcp_rsk(req)->snt_synack) /* Timestamp first SYNACK */ tcp_rsk(req)->snt_synack = tcp_skb_timestamp_us(skb); } @@ -3768,6 +3768,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, #ifdef CONFIG_TCP_AO struct tcp_ao_key *ao_key = NULL; u8 keyid = tcp_rsk(req)->ao_keyid; + u8 rnext = tcp_rsk(req)->ao_rcv_next; ao_key = tcp_sk(sk)->af_specific->ao_lookup(sk, req_to_sk(req), keyid, -1); @@ -3777,6 +3778,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, * ao_keyid (RFC5925 RNextKeyID), so let's keep it simple here. */ if (unlikely(!ao_key)) { + trace_tcp_ao_synack_no_key(sk, keyid, rnext); rcu_read_unlock(); kfree_skb(skb); net_warn_ratelimited("TCP-AO: the keyid %u from SYN packet is not present - not sending SYNACK\n", @@ -3843,7 +3845,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, bpf_skops_write_hdr_opt((struct sock *)sk, skb, req, syn_skb, synack_type, &opts); - skb_set_delivery_time(skb, now, true); + skb_set_delivery_time(skb, now, SKB_CLOCK_MONOTONIC); tcp_add_tx_delay(skb, tp); return skb; @@ -4027,7 +4029,7 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn) err = tcp_transmit_skb(sk, syn_data, 1, sk->sk_allocation); - skb_set_delivery_time(syn, syn_data->skb_mstamp_ns, true); + skb_set_delivery_time(syn, syn_data->skb_mstamp_ns, SKB_CLOCK_MONOTONIC); /* Now full SYN+DATA was cloned and sent (or not), * remove the SYN from the original skb (syn_data) @@ -4163,16 +4165,9 @@ EXPORT_SYMBOL(tcp_connect); u32 tcp_delack_max(const struct sock *sk) { - const struct dst_entry *dst = __sk_dst_get(sk); - u32 delack_max = inet_csk(sk)->icsk_delack_max; + u32 delack_from_rto_min = max(tcp_rto_min(sk), 2) - 1; - if (dst && dst_metric_locked(dst, RTAX_RTO_MIN)) { - u32 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN); - u32 delack_from_rto_min = max_t(int, 1, rto_min - 1); - - delack_max = min_t(u32, delack_max, delack_from_rto_min); - } - return delack_max; + return min(inet_csk(sk)->icsk_delack_max, delack_from_rto_min); } /* Send out a delayed ack, the caller does the policy checking diff --git a/net/ipv4/tcp_sigpool.c b/net/ipv4/tcp_sigpool.c index 8512cb09ebc0..d8a4f192873a 100644 --- a/net/ipv4/tcp_sigpool.c +++ b/net/ipv4/tcp_sigpool.c @@ -10,7 +10,14 @@ #include static size_t __scratch_size; -static DEFINE_PER_CPU(void __rcu *, sigpool_scratch); +struct sigpool_scratch { + local_lock_t bh_lock; + void __rcu *pad; +}; + +static DEFINE_PER_CPU(struct sigpool_scratch, sigpool_scratch) = { + .bh_lock = INIT_LOCAL_LOCK(bh_lock), +}; struct sigpool_entry { struct crypto_ahash *hash; @@ -72,7 +79,7 @@ static int sigpool_reserve_scratch(size_t size) break; } - old_scratch = rcu_replace_pointer(per_cpu(sigpool_scratch, cpu), + old_scratch = rcu_replace_pointer(per_cpu(sigpool_scratch.pad, cpu), scratch, lockdep_is_held(&cpool_mutex)); if (!cpu_online(cpu) || !old_scratch) { kfree(old_scratch); @@ -93,7 +100,7 @@ static void sigpool_scratch_free(void) int cpu; for_each_possible_cpu(cpu) - kfree(rcu_replace_pointer(per_cpu(sigpool_scratch, cpu), + kfree(rcu_replace_pointer(per_cpu(sigpool_scratch.pad, cpu), NULL, lockdep_is_held(&cpool_mutex))); __scratch_size = 0; } @@ -277,7 +284,8 @@ int tcp_sigpool_start(unsigned int id, struct tcp_sigpool *c) __cond_acquires(RC /* Pairs with tcp_sigpool_reserve_scratch(), scratch area is * valid (allocated) until tcp_sigpool_end(). */ - c->scratch = rcu_dereference_bh(*this_cpu_ptr(&sigpool_scratch)); + local_lock_nested_bh(&sigpool_scratch.bh_lock); + c->scratch = rcu_dereference_bh(*this_cpu_ptr(&sigpool_scratch.pad)); return 0; } EXPORT_SYMBOL_GPL(tcp_sigpool_start); @@ -286,6 +294,7 @@ void tcp_sigpool_end(struct tcp_sigpool *c) __releases(RCU_BH) { struct crypto_ahash *hash = crypto_ahash_reqtfm(c->req); + local_unlock_nested_bh(&sigpool_scratch.bh_lock); rcu_read_unlock_bh(); ahash_request_free(c->req); crypto_free_ahash(hash); diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 892c86657fbc..4d40615dc8fc 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -74,11 +74,7 @@ u32 tcp_clamp_probe0_to_user_timeout(const struct sock *sk, u32 when) static void tcp_write_err(struct sock *sk) { - WRITE_ONCE(sk->sk_err, READ_ONCE(sk->sk_err_soft) ? : ETIMEDOUT); - sk_error_report(sk); - - tcp_write_queue_purge(sk); - tcp_done(sk); + tcp_done_with_error(sk, READ_ONCE(sk->sk_err_soft) ? : ETIMEDOUT); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT); } diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 578668878a85..49c622e743e8 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -940,8 +940,7 @@ static int udp_send_skb(struct sk_buff *skb, struct flowi4 *fl4, kfree_skb(skb); return -EINVAL; } - if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite || - dst_xfrm(skb_dst(skb))) { + if (is_udplite || dst_xfrm(skb_dst(skb))) { kfree_skb(skb); return -EIO; } @@ -2076,7 +2075,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) } UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); trace_udp_fail_queue_rcv_skb(rc, sk, skb); - kfree_skb_reason(skb, drop_reason); + sk_skb_reason_drop(sk, skb, drop_reason); return -1; } @@ -2198,7 +2197,7 @@ csum_error: drop: __UDP_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); atomic_inc(&sk->sk_drops); - kfree_skb_reason(skb, drop_reason); + sk_skb_reason_drop(sk, skb, drop_reason); return -1; } @@ -2232,7 +2231,7 @@ bool udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst) struct dst_entry *old; if (dst_hold_safe(dst)) { - old = xchg((__force struct dst_entry **)&sk->sk_rx_dst, dst); + old = unrcu_pointer(xchg(&sk->sk_rx_dst, RCU_INITIALIZER(dst))); dst_release(old); return old != dst; } @@ -2385,7 +2384,7 @@ static int udp_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb, int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, int proto) { - struct sock *sk; + struct sock *sk = NULL; struct udphdr *uh; unsigned short ulen; struct rtable *rt = skb_rtable(skb); @@ -2462,7 +2461,7 @@ no_sk: * Hmm. We got an UDP packet to a port to which we * don't wanna listen. Ignore it. */ - kfree_skb_reason(skb, drop_reason); + sk_skb_reason_drop(sk, skb, drop_reason); return 0; short_packet: @@ -2487,7 +2486,7 @@ csum_error: __UDP_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); drop: __UDP_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); - kfree_skb_reason(skb, drop_reason); + sk_skb_reason_drop(sk, skb, drop_reason); return 0; } diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 59448a2dbf2c..aa2e0a28ca61 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -357,6 +357,14 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, else uh->check = gso_make_checksum(seg, ~check) ? : CSUM_MANGLED_0; + /* On the TX path, CHECKSUM_NONE and CHECKSUM_UNNECESSARY have the same + * meaning. However, check for bad offloads in the GSO stack expects the + * latter, if the checksum was calculated in software. To vouch for the + * segment skbs we actually need to set it on the gso_skb. + */ + if (gso_skb->ip_summed == CHECKSUM_NONE) + gso_skb->ip_summed = CHECKSUM_UNNECESSARY; + /* update refcount for the packet */ if (copy_dtor) { int delta = sum_truesize - gso_skb->truesize; diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 5c424a0e7232..55a0fd589fc8 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -863,7 +863,7 @@ static void addrconf_forward_change(struct net *net, __s32 newf) } } -static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf) +static int addrconf_fixup_forwarding(const struct ctl_table *table, int *p, int newf) { struct net *net; int old; @@ -931,7 +931,7 @@ static void addrconf_linkdown_change(struct net *net, __s32 newf) } } -static int addrconf_fixup_linkdown(struct ctl_table *table, int *p, int newf) +static int addrconf_fixup_linkdown(const struct ctl_table *table, int *p, int newf) { struct net *net; int old; @@ -1873,7 +1873,8 @@ int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev, master, &dst, scores, hiscore_idx); - if (scores[hiscore_idx].ifa) + if (scores[hiscore_idx].ifa && + scores[hiscore_idx].scopedist >= 0) goto out; } @@ -6378,7 +6379,7 @@ static void addrconf_disable_change(struct net *net, __s32 newf) } } -static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf) +static int addrconf_disable_ipv6(const struct ctl_table *table, int *p, int newf) { struct net *net = (struct net *)table->extra2; int old; @@ -6669,7 +6670,7 @@ void addrconf_disable_policy_idev(struct inet6_dev *idev, int val) } static -int addrconf_disable_policy(struct ctl_table *ctl, int *valp, int val) +int addrconf_disable_policy(const struct ctl_table *ctl, int *valp, int val) { struct net *net = (struct net *)ctl->extra2; struct inet6_dev *idev; diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c index 8041dc181bd4..90d2c7e3f5e9 100644 --- a/net/ipv6/af_inet6.c +++ b/net/ipv6/af_inet6.c @@ -509,7 +509,7 @@ void inet6_cleanup_sock(struct sock *sk) /* Free tx options */ - opt = xchg((__force struct ipv6_txoptions **)&np->opt, NULL); + opt = unrcu_pointer(xchg(&np->opt, NULL)); if (opt) { atomic_sub(opt->tot_len, &sk->sk_omem_alloc); txopt_put(opt); @@ -1060,6 +1060,7 @@ static const struct ipv6_stub ipv6_stub_impl = { .nd_tbl = &nd_tbl, .ipv6_fragment = ip6_fragment, .ipv6_dev_find = ipv6_dev_find, + .ip6_xmit = ip6_xmit, }; static const struct ipv6_bpf_stub ipv6_bpf_stub_impl = { diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c index 34a9a5b9ed00..3920e8aa1031 100644 --- a/net/ipv6/esp6.c +++ b/net/ipv6/esp6.c @@ -256,8 +256,7 @@ static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb) #else static int esp_output_tail_tcp(struct xfrm_state *x, struct sk_buff *skb) { - kfree_skb(skb); - + WARN_ON(1); return -EOPNOTSUPP; } #endif diff --git a/net/ipv6/esp6_offload.c b/net/ipv6/esp6_offload.c index 527b7caddbc6..919ebfabbe4e 100644 --- a/net/ipv6/esp6_offload.c +++ b/net/ipv6/esp6_offload.c @@ -83,6 +83,13 @@ static struct sk_buff *esp6_gro_receive(struct list_head *head, x = xfrm_state_lookup(dev_net(skb->dev), skb->mark, (xfrm_address_t *)&ipv6_hdr(skb)->daddr, spi, IPPROTO_ESP, AF_INET6); + + if (unlikely(x && x->dir && x->dir != XFRM_SA_DIR_IN)) { + /* non-offload path will record the error and audit log */ + xfrm_state_put(x); + x = NULL; + } + if (!x) goto out_reset; diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 83e4f9855ae1..eb111d20615c 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -987,7 +987,7 @@ static void __fib6_drop_pcpu_from(struct fib6_nh *fib6_nh, if (pcpu_rt && rcu_access_pointer(pcpu_rt->from) == match) { struct fib6_info *from; - from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL); + from = unrcu_pointer(xchg(&pcpu_rt->from, NULL)); fib6_info_release(from); } } diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 27d8725445e3..ab504d31f0cd 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -859,7 +859,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, struct rt6_info *rt = dst_rt6_info(skb_dst(skb)); struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ? inet6_sk(skb->sk) : NULL; - bool mono_delivery_time = skb->mono_delivery_time; + u8 tstamp_type = skb->tstamp_type; struct ip6_frag_state state; unsigned int mtu, hlen, nexthdr_offset; ktime_t tstamp = skb->tstamp; @@ -955,7 +955,7 @@ int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, if (iter.frag) ip6_fraglist_prepare(skb, &iter); - skb_set_delivery_time(skb, tstamp, mono_delivery_time); + skb_set_delivery_time(skb, tstamp, tstamp_type); err = output(net, sk, skb); if (!err) IP6_INC_STATS(net, ip6_dst_idev(&rt->dst), @@ -1016,7 +1016,7 @@ slow_path: /* * Put this fragment into the sending queue. */ - skb_set_delivery_time(frag, tstamp, mono_delivery_time); + skb_set_delivery_time(frag, tstamp, tstamp_type); err = output(net, sk, frag); if (err) goto fail; @@ -1124,6 +1124,7 @@ static int ip6_dst_lookup_tail(struct net *net, const struct sock *sk, from = rt ? rcu_dereference(rt->from) : NULL; err = ip6_route_get_saddr(net, from, &fl6->daddr, sk ? READ_ONCE(inet6_sk(sk)->srcprefs) : 0, + fl6->flowi6_l3mdev, &fl6->saddr); rcu_read_unlock(); @@ -1924,7 +1925,10 @@ struct sk_buff *__ip6_make_skb(struct sock *sk, skb->priority = READ_ONCE(sk->sk_priority); skb->mark = cork->base.mark; - skb->tstamp = cork->base.transmit_time; + if (sk_is_tcp(sk)) + skb_set_delivery_time(skb, cork->base.transmit_time, SKB_CLOCK_MONOTONIC); + else + skb_set_delivery_type_by_clockid(skb, cork->base.transmit_time, sk->sk_clockid); ip6_cork_steal_dst(skb, cork); IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTREQUESTS); diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index d4c28ec1bc51..cd342d5015c6 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -111,8 +111,7 @@ struct ipv6_txoptions *ipv6_update_options(struct sock *sk, icsk->icsk_sync_mss(sk, icsk->icsk_pmtu_cookie); } } - opt = xchg((__force struct ipv6_txoptions **)&inet6_sk(sk)->opt, - opt); + opt = unrcu_pointer(xchg(&inet6_sk(sk)->opt, RCU_INITIALIZER(opt))); sk_dst_reset(sk); return opt; diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index d914b23256ce..254b192c5705 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -1936,7 +1936,7 @@ static struct notifier_block ndisc_netdev_notifier = { }; #ifdef CONFIG_SYSCTL -static void ndisc_warn_deprecated_sysctl(struct ctl_table *ctl, +static void ndisc_warn_deprecated_sysctl(const struct ctl_table *ctl, const char *func, const char *dev_name) { static char warncomm[TASK_COMM_LEN]; diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c index 5d989d803009..581ce055bf52 100644 --- a/net/ipv6/netfilter.c +++ b/net/ipv6/netfilter.c @@ -127,7 +127,7 @@ int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, struct sk_buff *)) { int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size; - bool mono_delivery_time = skb->mono_delivery_time; + u8 tstamp_type = skb->tstamp_type; ktime_t tstamp = skb->tstamp; struct ip6_frag_state state; u8 *prevhdr, nexthdr = 0; @@ -193,7 +193,7 @@ int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, if (iter.frag) ip6_fraglist_prepare(skb, &iter); - skb_set_delivery_time(skb, tstamp, mono_delivery_time); + skb_set_delivery_time(skb, tstamp, tstamp_type); err = output(net, sk, data, skb); if (err || !iter.frag) break; @@ -226,7 +226,7 @@ slow_path: goto blackhole; } - skb_set_delivery_time(skb2, tstamp, mono_delivery_time); + skb_set_delivery_time(skb2, tstamp, tstamp_type); err = output(net, sk, data, skb2); if (err) goto blackhole; diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 5e1b50c6a44d..6f0844c9315d 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -263,7 +263,7 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb, fq->iif = dev->ifindex; fq->q.stamp = skb->tstamp; - fq->q.mono_delivery_time = skb->mono_delivery_time; + fq->q.tstamp_type = skb->tstamp_type; fq->q.meat += skb->len; fq->ecn |= ecn; if (payload_len > fq->q.max_size) diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 2eedf255600b..608fa9d05b55 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -362,14 +362,14 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb) if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) && skb_checksum_complete(skb)) { atomic_inc(&sk->sk_drops); - kfree_skb_reason(skb, SKB_DROP_REASON_SKB_CSUM); + sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_SKB_CSUM); return NET_RX_DROP; } /* Charge it to the socket. */ skb_dst_drop(skb); if (sock_queue_rcv_skb_reason(sk, skb, &reason) < 0) { - kfree_skb_reason(skb, reason); + sk_skb_reason_drop(sk, skb, reason); return NET_RX_DROP; } @@ -390,7 +390,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb) if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) { atomic_inc(&sk->sk_drops); - kfree_skb_reason(skb, SKB_DROP_REASON_XFRM_POLICY); + sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_XFRM_POLICY); return NET_RX_DROP; } nf_reset_ct(skb); @@ -415,7 +415,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb) if (inet_test_bit(HDRINCL, sk)) { if (skb_checksum_complete(skb)) { atomic_inc(&sk->sk_drops); - kfree_skb_reason(skb, SKB_DROP_REASON_SKB_CSUM); + sk_skb_reason_drop(sk, skb, SKB_DROP_REASON_SKB_CSUM); return NET_RX_DROP; } } @@ -621,7 +621,7 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length, skb->protocol = htons(ETH_P_IPV6); skb->priority = READ_ONCE(sk->sk_priority); skb->mark = sockc->mark; - skb->tstamp = sockc->transmit_time; + skb_set_delivery_type_by_clockid(skb, sockc->transmit_time, sk->sk_clockid); skb_put(skb, length); skb_reset_network_header(skb); diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 327caca64257..a48be617a8ab 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -198,7 +198,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, fq->iif = dev->ifindex; fq->q.stamp = skb->tstamp; - fq->q.mono_delivery_time = skb->mono_delivery_time; + fq->q.tstamp_type = skb->tstamp_type; fq->q.meat += skb->len; fq->ecn |= ecn; add_frag_mem_limit(fq->q.fqdir, skb->truesize); diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 8d72ca0b086d..c752e9ed20e6 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -131,7 +131,6 @@ static struct fib6_info *rt6_get_route_info(struct net *net, struct uncached_list { spinlock_t lock; struct list_head head; - struct list_head quarantine; }; static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list); @@ -189,8 +188,7 @@ static void rt6_uncached_list_flush_dev(struct net_device *dev) handled = true; } if (handled) - list_move(&rt->dst.rt_uncached, - &ul->quarantine); + list_del_init(&rt->dst.rt_uncached); } spin_unlock_bh(&ul->lock); } @@ -368,7 +366,7 @@ static void ip6_dst_destroy(struct dst_entry *dst) in6_dev_put(idev); } - from = xchg((__force struct fib6_info **)&rt->from, NULL); + from = unrcu_pointer(xchg(&rt->from, NULL)); fib6_info_release(from); } @@ -1440,7 +1438,7 @@ static struct rt6_info *rt6_make_pcpu_route(struct net *net, if (res->f6i->fib6_destroying) { struct fib6_info *from; - from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL); + from = unrcu_pointer(xchg(&pcpu_rt->from, NULL)); fib6_info_release(from); } @@ -1469,7 +1467,7 @@ static void rt6_remove_exception(struct rt6_exception_bucket *bucket, /* purge completely the exception to allow releasing the held resources: * some [sk] cache may keep the dst around for unlimited time */ - from = xchg((__force struct fib6_info **)&rt6_ex->rt6i->from, NULL); + from = unrcu_pointer(xchg(&rt6_ex->rt6i->from, NULL)); fib6_info_release(from); dst_dev_put(&rt6_ex->rt6i->dst); @@ -2376,7 +2374,7 @@ static u32 rt6_multipath_custom_hash_outer(const struct net *net, hash_keys.ports.dst = keys.ports.dst; *p_has_inner = !!(keys.control.flags & FLOW_DIS_ENCAPSULATION); - return flow_hash_from_keys(&hash_keys); + return fib_multipath_hash_from_keys(net, &hash_keys); } static u32 rt6_multipath_custom_hash_inner(const struct net *net, @@ -2425,7 +2423,7 @@ static u32 rt6_multipath_custom_hash_inner(const struct net *net, if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT) hash_keys.ports.dst = keys.ports.dst; - return flow_hash_from_keys(&hash_keys); + return fib_multipath_hash_from_keys(net, &hash_keys); } static u32 rt6_multipath_custom_hash_skb(const struct net *net, @@ -2464,7 +2462,7 @@ static u32 rt6_multipath_custom_hash_fl6(const struct net *net, if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT) hash_keys.ports.dst = fl6->fl6_dport; - return flow_hash_from_keys(&hash_keys); + return fib_multipath_hash_from_keys(net, &hash_keys); } /* if skb is set it will be used and fl6 can be NULL */ @@ -2486,7 +2484,7 @@ u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6, hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6); hash_keys.basic.ip_proto = fl6->flowi6_proto; } - mhash = flow_hash_from_keys(&hash_keys); + mhash = fib_multipath_hash_from_keys(net, &hash_keys); break; case 1: if (skb) { @@ -2518,7 +2516,7 @@ u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6, hash_keys.ports.dst = fl6->fl6_dport; hash_keys.basic.ip_proto = fl6->flowi6_proto; } - mhash = flow_hash_from_keys(&hash_keys); + mhash = fib_multipath_hash_from_keys(net, &hash_keys); break; case 2: memset(&hash_keys, 0, sizeof(hash_keys)); @@ -2555,7 +2553,7 @@ u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6, hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6); hash_keys.basic.ip_proto = fl6->flowi6_proto; } - mhash = flow_hash_from_keys(&hash_keys); + mhash = fib_multipath_hash_from_keys(net, &hash_keys); break; case 3: if (skb) @@ -3764,7 +3762,7 @@ static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg, if (!rt) goto out; - rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len, + rt->fib6_metrics = ip_fib_metrics_init(cfg->fc_mx, cfg->fc_mx_len, extack); if (IS_ERR(rt->fib6_metrics)) { err = PTR_ERR(rt->fib6_metrics); @@ -5689,7 +5687,7 @@ static int rt6_fill_node(struct net *net, struct sk_buff *skb, goto nla_put_failure; } else if (dest) { struct in6_addr saddr_buf; - if (ip6_route_get_saddr(net, rt, dest, 0, &saddr_buf) == 0 && + if (ip6_route_get_saddr(net, rt, dest, 0, 0, &saddr_buf) == 0 && nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf)) goto nla_put_failure; } @@ -6758,7 +6756,6 @@ int __init ip6_route_init(void) struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu); INIT_LIST_HEAD(&ul->head); - INIT_LIST_HEAD(&ul->quarantine); spin_lock_init(&ul->lock); } diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c index a31521e270f7..180da19c148c 100644 --- a/net/ipv6/seg6.c +++ b/net/ipv6/seg6.c @@ -21,9 +21,7 @@ #include #include #include -#ifdef CONFIG_IPV6_SEG6_HMAC #include -#endif bool seg6_validate_srh(struct ipv6_sr_hdr *srh, int len, bool reduced) { @@ -437,13 +435,11 @@ static int __net_init seg6_net_init(struct net *net) net->ipv6.seg6_data = sdata; -#ifdef CONFIG_IPV6_SEG6_HMAC if (seg6_hmac_net_init(net)) { kfree(rcu_dereference_raw(sdata->tun_src)); kfree(sdata); return -ENOMEM; } -#endif return 0; } @@ -452,9 +448,7 @@ static void __net_exit seg6_net_exit(struct net *net) { struct seg6_pernet_data *sdata = seg6_pernet(net); -#ifdef CONFIG_IPV6_SEG6_HMAC seg6_hmac_net_exit(net); -#endif kfree(rcu_dereference_raw(sdata->tun_src)); kfree(sdata); @@ -520,41 +514,28 @@ int __init seg6_init(void) if (err) goto out_unregister_pernet; -#ifdef CONFIG_IPV6_SEG6_LWTUNNEL err = seg6_iptunnel_init(); if (err) goto out_unregister_genl; err = seg6_local_init(); - if (err) { - seg6_iptunnel_exit(); - goto out_unregister_genl; - } -#endif - -#ifdef CONFIG_IPV6_SEG6_HMAC - err = seg6_hmac_init(); if (err) goto out_unregister_iptun; -#endif + + err = seg6_hmac_init(); + if (err) + goto out_unregister_seg6; pr_info("Segment Routing with IPv6\n"); out: return err; -#ifdef CONFIG_IPV6_SEG6_HMAC -out_unregister_iptun: -#ifdef CONFIG_IPV6_SEG6_LWTUNNEL +out_unregister_seg6: seg6_local_exit(); +out_unregister_iptun: seg6_iptunnel_exit(); -#endif -#endif -#ifdef CONFIG_IPV6_SEG6_LWTUNNEL out_unregister_genl: -#endif -#if IS_ENABLED(CONFIG_IPV6_SEG6_LWTUNNEL) || IS_ENABLED(CONFIG_IPV6_SEG6_HMAC) genl_unregister_family(&seg6_genl_family); -#endif out_unregister_pernet: unregister_pernet_subsys(&ip6_segments_ops); goto out; @@ -562,13 +543,9 @@ out_unregister_pernet: void seg6_exit(void) { -#ifdef CONFIG_IPV6_SEG6_HMAC seg6_hmac_exit(); -#endif -#ifdef CONFIG_IPV6_SEG6_LWTUNNEL seg6_local_exit(); seg6_iptunnel_exit(); -#endif genl_unregister_family(&seg6_genl_family); unregister_pernet_subsys(&ip6_segments_ops); } diff --git a/net/ipv6/seg6_local.c b/net/ipv6/seg6_local.c index c434940131b1..c74705ead984 100644 --- a/net/ipv6/seg6_local.c +++ b/net/ipv6/seg6_local.c @@ -1380,7 +1380,9 @@ drop: return err; } -DEFINE_PER_CPU(struct seg6_bpf_srh_state, seg6_bpf_srh_states); +DEFINE_PER_CPU(struct seg6_bpf_srh_state, seg6_bpf_srh_states) = { + .bh_lock = INIT_LOCAL_LOCK(bh_lock), +}; bool seg6_bpf_has_valid_srh(struct sk_buff *skb) { @@ -1388,6 +1390,7 @@ bool seg6_bpf_has_valid_srh(struct sk_buff *skb) this_cpu_ptr(&seg6_bpf_srh_states); struct ipv6_sr_hdr *srh = srh_state->srh; + lockdep_assert_held(&srh_state->bh_lock); if (unlikely(srh == NULL)) return false; @@ -1408,8 +1411,7 @@ bool seg6_bpf_has_valid_srh(struct sk_buff *skb) static int input_action_end_bpf(struct sk_buff *skb, struct seg6_local_lwt *slwt) { - struct seg6_bpf_srh_state *srh_state = - this_cpu_ptr(&seg6_bpf_srh_states); + struct seg6_bpf_srh_state *srh_state; struct ipv6_sr_hdr *srh; int ret; @@ -1420,10 +1422,14 @@ static int input_action_end_bpf(struct sk_buff *skb, } advance_nextseg(srh, &ipv6_hdr(skb)->daddr); - /* preempt_disable is needed to protect the per-CPU buffer srh_state, - * which is also accessed by the bpf_lwt_seg6_* helpers + /* The access to the per-CPU buffer srh_state is protected by running + * always in softirq context (with disabled BH). On PREEMPT_RT the + * required locking is provided by the following local_lock_nested_bh() + * statement. It is also accessed by the bpf_lwt_seg6_* helpers via + * bpf_prog_run_save_cb(). */ - preempt_disable(); + local_lock_nested_bh(&seg6_bpf_srh_states.bh_lock); + srh_state = this_cpu_ptr(&seg6_bpf_srh_states); srh_state->srh = srh; srh_state->hdrlen = srh->hdrlen << 3; srh_state->valid = true; @@ -1446,15 +1452,15 @@ static int input_action_end_bpf(struct sk_buff *skb, if (srh_state->srh && !seg6_bpf_has_valid_srh(skb)) goto drop; + local_unlock_nested_bh(&seg6_bpf_srh_states.bh_lock); - preempt_enable(); if (ret != BPF_REDIRECT) seg6_lookup_nexthop(skb, NULL, 0); return dst_input(skb); drop: - preempt_enable(); + local_unlock_nested_bh(&seg6_bpf_srh_states.bh_lock); kfree_skb(skb); return -EINVAL; } diff --git a/net/ipv6/syncookies.c b/net/ipv6/syncookies.c index bfad1e89b6a6..9d83eadd308b 100644 --- a/net/ipv6/syncookies.c +++ b/net/ipv6/syncookies.c @@ -275,6 +275,6 @@ out: out_free: reqsk_free(req); out_drop: - kfree_skb_reason(skb, reason); + sk_skb_reason_drop(sk, skb, reason); return NULL; } diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 729faf8bd366..200fea92f12f 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -490,14 +490,10 @@ static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, ipv6_icmp_error(sk, skb, err, th->dest, ntohl(info), (u8 *)th); - if (!sock_owned_by_user(sk)) { - WRITE_ONCE(sk->sk_err, err); - sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */ - - tcp_done(sk); - } else { + if (!sock_owned_by_user(sk)) + tcp_done_with_error(sk, err); + else WRITE_ONCE(sk->sk_err_soft, err); - } goto out; case TCP_LISTEN: break; @@ -975,7 +971,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 mark = inet_twsk(sk)->tw_mark; else mark = READ_ONCE(sk->sk_mark); - skb_set_delivery_time(buff, tcp_transmit_time(sk), true); + skb_set_delivery_time(buff, tcp_transmit_time(sk), SKB_CLOCK_MONOTONIC); } if (txhash) { /* autoflowlabel/skb_get_hash_flowi6 rely on buff->hash */ @@ -1200,9 +1196,9 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcp_tw_tsval(tcptw), - tcptw->tw_ts_recent, tw->tw_bound_dev_if, &key, - tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel), tw->tw_priority, - tw->tw_txhash); + READ_ONCE(tcptw->tw_ts_recent), tw->tw_bound_dev_if, + &key, tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel), + tw->tw_priority, tw->tw_txhash); #ifdef CONFIG_TCP_AO out: @@ -1678,7 +1674,7 @@ reset: discard: if (opt_skb) __kfree_skb(opt_skb); - kfree_skb_reason(skb, reason); + sk_skb_reason_drop(sk, skb, reason); return 0; csum_err: reason = SKB_DROP_REASON_TCP_CSUM; @@ -1751,8 +1747,8 @@ INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb) int dif = inet6_iif(skb); const struct tcphdr *th; const struct ipv6hdr *hdr; + struct sock *sk = NULL; bool refcounted; - struct sock *sk; int ret; u32 isn; struct net *net = dev_net(skb->dev); @@ -1944,7 +1940,7 @@ bad_packet: discard_it: SKB_DR_OR(drop_reason, NOT_SPECIFIED); - kfree_skb_reason(skb, drop_reason); + sk_skb_reason_drop(sk, skb, drop_reason); return 0; discard_and_relse: @@ -2383,8 +2379,14 @@ static struct inet_protosw tcpv6_protosw = { static int __net_init tcpv6_net_init(struct net *net) { - return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6, - SOCK_RAW, IPPROTO_TCP, net); + int res; + + res = inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6, + SOCK_RAW, IPPROTO_TCP, net); + if (!res) + net->ipv6.tcp_sk->sk_clockid = CLOCK_MONOTONIC; + + return res; } static void __net_exit tcpv6_net_exit(struct net *net) diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index c81a07ac0463..6602a2e9cdb5 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -46,7 +46,6 @@ #include #include #include -#include #include #include #include @@ -673,7 +672,7 @@ static int __udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) } UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); trace_udp_fail_queue_rcv_skb(rc, sk, skb); - kfree_skb_reason(skb, drop_reason); + sk_skb_reason_drop(sk, skb, drop_reason); return -1; } @@ -776,7 +775,7 @@ csum_error: drop: __UDP6_INC_STATS(sock_net(sk), UDP_MIB_INERRORS, is_udplite); atomic_inc(&sk->sk_drops); - kfree_skb_reason(skb, drop_reason); + sk_skb_reason_drop(sk, skb, drop_reason); return -1; } @@ -940,8 +939,8 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable, enum skb_drop_reason reason = SKB_DROP_REASON_NOT_SPECIFIED; const struct in6_addr *saddr, *daddr; struct net *net = dev_net(skb->dev); + struct sock *sk = NULL; struct udphdr *uh; - struct sock *sk; bool refcounted; u32 ulen = 0; @@ -1033,7 +1032,7 @@ no_sk: __UDP6_INC_STATS(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); - kfree_skb_reason(skb, reason); + sk_skb_reason_drop(sk, skb, reason); return 0; short_packet: @@ -1054,7 +1053,7 @@ csum_error: __UDP6_INC_STATS(net, UDP_MIB_CSUMERRORS, proto == IPPROTO_UDPLITE); discard: __UDP6_INC_STATS(net, UDP_MIB_INERRORS, proto == IPPROTO_UDPLITE); - kfree_skb_reason(skb, reason); + sk_skb_reason_drop(sk, skb, reason); return 0; } @@ -1257,8 +1256,7 @@ static int udp_v6_send_skb(struct sk_buff *skb, struct flowi6 *fl6, kfree_skb(skb); return -EINVAL; } - if (skb->ip_summed != CHECKSUM_PARTIAL || is_udplite || - dst_xfrm(skb_dst(skb))) { + if (is_udplite || dst_xfrm(skb_dst(skb))) { kfree_skb(skb); return -EIO; } diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index 2f1ea5f999a2..b1d81c4270ab 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -290,8 +290,14 @@ int __init xfrm6_init(void) ret = register_pernet_subsys(&xfrm6_net_ops); if (ret) goto out_protocol; + + ret = xfrm_nat_keepalive_init(AF_INET6); + if (ret) + goto out_nat_keepalive; out: return ret; +out_nat_keepalive: + unregister_pernet_subsys(&xfrm6_net_ops); out_protocol: xfrm6_protocol_fini(); out_state: @@ -303,6 +309,7 @@ out_policy: void xfrm6_fini(void) { + xfrm_nat_keepalive_fini(AF_INET6); unregister_pernet_subsys(&xfrm6_net_ops); xfrm6_protocol_fini(); xfrm6_policy_fini(); diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 88a34db265d8..1c1decce7f06 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -39,7 +39,6 @@ #include #include #include -#include #include #include #include @@ -61,7 +60,6 @@ #include #include "l2tp_core.h" -#include "trace.h" #define CREATE_TRACE_POINTS #include "trace.h" @@ -107,11 +105,23 @@ struct l2tp_net { /* Lock for write access to l2tp_tunnel_idr */ spinlock_t l2tp_tunnel_idr_lock; struct idr l2tp_tunnel_idr; - struct hlist_head l2tp_session_hlist[L2TP_HASH_SIZE_2]; - /* Lock for write access to l2tp_session_hlist */ - spinlock_t l2tp_session_hlist_lock; + /* Lock for write access to l2tp_v[23]_session_idr/htable */ + spinlock_t l2tp_session_idr_lock; + struct idr l2tp_v2_session_idr; + struct idr l2tp_v3_session_idr; + struct hlist_head l2tp_v3_session_htable[16]; }; +static inline u32 l2tp_v2_session_key(u16 tunnel_id, u16 session_id) +{ + return ((u32)tunnel_id) << 16 | session_id; +} + +static inline unsigned long l2tp_v3_session_hashkey(struct sock *sk, u32 session_id) +{ + return ((unsigned long)sk) + session_id; +} + #if IS_ENABLED(CONFIG_IPV6) static bool l2tp_sk_is_v6(struct sock *sk) { @@ -125,29 +135,6 @@ static inline struct l2tp_net *l2tp_pernet(const struct net *net) return net_generic(net, l2tp_net_id); } -/* Session hash global list for L2TPv3. - * The session_id SHOULD be random according to RFC3931, but several - * L2TP implementations use incrementing session_ids. So we do a real - * hash on the session_id, rather than a simple bitmask. - */ -static inline struct hlist_head * -l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id) -{ - return &pn->l2tp_session_hlist[hash_32(session_id, L2TP_HASH_BITS_2)]; -} - -/* Session hash list. - * The session_id SHOULD be random according to RFC2661, but several - * L2TP implementations (Cisco and Microsoft) use incrementing - * session_ids. So we do a real hash on the session_id, rather than a - * simple bitmask. - */ -static inline struct hlist_head * -l2tp_session_id_hash(struct l2tp_tunnel *tunnel, u32 session_id) -{ - return &tunnel->session_hlist[hash_32(session_id, L2TP_HASH_BITS)]; -} - static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel) { trace_free_tunnel(tunnel); @@ -240,66 +227,82 @@ struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth) } EXPORT_SYMBOL_GPL(l2tp_tunnel_get_nth); -struct l2tp_session *l2tp_tunnel_get_session(struct l2tp_tunnel *tunnel, - u32 session_id) +struct l2tp_session *l2tp_v3_session_get(const struct net *net, struct sock *sk, u32 session_id) { - struct hlist_head *session_list; + const struct l2tp_net *pn = l2tp_pernet(net); struct l2tp_session *session; - session_list = l2tp_session_id_hash(tunnel, session_id); - rcu_read_lock_bh(); - hlist_for_each_entry_rcu(session, session_list, hlist) - if (session->session_id == session_id) { - l2tp_session_inc_refcount(session); - rcu_read_unlock_bh(); + session = idr_find(&pn->l2tp_v3_session_idr, session_id); + if (session && !hash_hashed(&session->hlist) && + refcount_inc_not_zero(&session->ref_count)) { + rcu_read_unlock_bh(); + return session; + } - return session; - } - rcu_read_unlock_bh(); + /* If we get here and session is non-NULL, the session_id + * collides with one in another tunnel. If sk is non-NULL, + * find the session matching sk. + */ + if (session && sk) { + unsigned long key = l2tp_v3_session_hashkey(sk, session->session_id); - return NULL; -} -EXPORT_SYMBOL_GPL(l2tp_tunnel_get_session); - -struct l2tp_session *l2tp_session_get(const struct net *net, u32 session_id) -{ - struct hlist_head *session_list; - struct l2tp_session *session; - - session_list = l2tp_session_id_hash_2(l2tp_pernet(net), session_id); - - rcu_read_lock_bh(); - hlist_for_each_entry_rcu(session, session_list, global_hlist) - if (session->session_id == session_id) { - l2tp_session_inc_refcount(session); - rcu_read_unlock_bh(); - - return session; - } - rcu_read_unlock_bh(); - - return NULL; -} -EXPORT_SYMBOL_GPL(l2tp_session_get); - -struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth) -{ - int hash; - struct l2tp_session *session; - int count = 0; - - rcu_read_lock_bh(); - for (hash = 0; hash < L2TP_HASH_SIZE; hash++) { - hlist_for_each_entry_rcu(session, &tunnel->session_hlist[hash], hlist) { - if (++count > nth) { - l2tp_session_inc_refcount(session); + hash_for_each_possible_rcu(pn->l2tp_v3_session_htable, session, + hlist, key) { + if (session->tunnel->sock == sk && + refcount_inc_not_zero(&session->ref_count)) { rcu_read_unlock_bh(); return session; } } } + rcu_read_unlock_bh(); + return NULL; +} +EXPORT_SYMBOL_GPL(l2tp_v3_session_get); + +struct l2tp_session *l2tp_v2_session_get(const struct net *net, u16 tunnel_id, u16 session_id) +{ + u32 session_key = l2tp_v2_session_key(tunnel_id, session_id); + const struct l2tp_net *pn = l2tp_pernet(net); + struct l2tp_session *session; + + rcu_read_lock_bh(); + session = idr_find(&pn->l2tp_v2_session_idr, session_key); + if (session && refcount_inc_not_zero(&session->ref_count)) { + rcu_read_unlock_bh(); + return session; + } + rcu_read_unlock_bh(); + + return NULL; +} +EXPORT_SYMBOL_GPL(l2tp_v2_session_get); + +struct l2tp_session *l2tp_session_get(const struct net *net, struct sock *sk, int pver, + u32 tunnel_id, u32 session_id) +{ + if (pver == L2TP_HDR_VER_2) + return l2tp_v2_session_get(net, tunnel_id, session_id); + else + return l2tp_v3_session_get(net, sk, session_id); +} +EXPORT_SYMBOL_GPL(l2tp_session_get); + +struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth) +{ + struct l2tp_session *session; + int count = 0; + + rcu_read_lock_bh(); + list_for_each_entry_rcu(session, &tunnel->session_list, list) { + if (++count > nth) { + l2tp_session_inc_refcount(session); + rcu_read_unlock_bh(); + return session; + } + } rcu_read_unlock_bh(); return NULL; @@ -313,86 +316,188 @@ struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net, const char *ifname) { struct l2tp_net *pn = l2tp_pernet(net); - int hash; + unsigned long tunnel_id, tmp; struct l2tp_session *session; + struct l2tp_tunnel *tunnel; rcu_read_lock_bh(); - for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) { - hlist_for_each_entry_rcu(session, &pn->l2tp_session_hlist[hash], global_hlist) { - if (!strcmp(session->ifname, ifname)) { - l2tp_session_inc_refcount(session); - rcu_read_unlock_bh(); + idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) { + if (tunnel) { + list_for_each_entry_rcu(session, &tunnel->session_list, list) { + if (!strcmp(session->ifname, ifname)) { + l2tp_session_inc_refcount(session); + rcu_read_unlock_bh(); - return session; + return session; + } } } } - rcu_read_unlock_bh(); return NULL; } EXPORT_SYMBOL_GPL(l2tp_session_get_by_ifname); +static void l2tp_session_coll_list_add(struct l2tp_session_coll_list *clist, + struct l2tp_session *session) +{ + l2tp_session_inc_refcount(session); + WARN_ON_ONCE(session->coll_list); + session->coll_list = clist; + spin_lock(&clist->lock); + list_add(&session->clist, &clist->list); + spin_unlock(&clist->lock); +} + +static int l2tp_session_collision_add(struct l2tp_net *pn, + struct l2tp_session *session1, + struct l2tp_session *session2) +{ + struct l2tp_session_coll_list *clist; + + lockdep_assert_held(&pn->l2tp_session_idr_lock); + + if (!session2) + return -EEXIST; + + /* If existing session is in IP-encap tunnel, refuse new session */ + if (session2->tunnel->encap == L2TP_ENCAPTYPE_IP) + return -EEXIST; + + clist = session2->coll_list; + if (!clist) { + /* First collision. Allocate list to manage the collided sessions + * and add the existing session to the list. + */ + clist = kmalloc(sizeof(*clist), GFP_ATOMIC); + if (!clist) + return -ENOMEM; + + spin_lock_init(&clist->lock); + INIT_LIST_HEAD(&clist->list); + refcount_set(&clist->ref_count, 1); + l2tp_session_coll_list_add(clist, session2); + } + + /* If existing session isn't already in the session hlist, add it. */ + if (!hash_hashed(&session2->hlist)) + hash_add(pn->l2tp_v3_session_htable, &session2->hlist, + session2->hlist_key); + + /* Add new session to the hlist and collision list */ + hash_add(pn->l2tp_v3_session_htable, &session1->hlist, + session1->hlist_key); + refcount_inc(&clist->ref_count); + l2tp_session_coll_list_add(clist, session1); + + return 0; +} + +static void l2tp_session_collision_del(struct l2tp_net *pn, + struct l2tp_session *session) +{ + struct l2tp_session_coll_list *clist = session->coll_list; + unsigned long session_key = session->session_id; + struct l2tp_session *session2; + + lockdep_assert_held(&pn->l2tp_session_idr_lock); + + hash_del(&session->hlist); + + if (clist) { + /* Remove session from its collision list. If there + * are other sessions with the same ID, replace this + * session's IDR entry with that session, otherwise + * remove the IDR entry. If this is the last session, + * the collision list data is freed. + */ + spin_lock(&clist->lock); + list_del_init(&session->clist); + session2 = list_first_entry_or_null(&clist->list, struct l2tp_session, clist); + if (session2) { + void *old = idr_replace(&pn->l2tp_v3_session_idr, session2, session_key); + + WARN_ON_ONCE(IS_ERR_VALUE(old)); + } else { + void *removed = idr_remove(&pn->l2tp_v3_session_idr, session_key); + + WARN_ON_ONCE(removed != session); + } + session->coll_list = NULL; + spin_unlock(&clist->lock); + if (refcount_dec_and_test(&clist->ref_count)) + kfree(clist); + l2tp_session_dec_refcount(session); + } +} + int l2tp_session_register(struct l2tp_session *session, struct l2tp_tunnel *tunnel) { - struct l2tp_session *session_walk; - struct hlist_head *g_head; - struct hlist_head *head; - struct l2tp_net *pn; + struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); + struct l2tp_session *other_session = NULL; + u32 session_key; int err; - head = l2tp_session_id_hash(tunnel, session->session_id); - - spin_lock_bh(&tunnel->hlist_lock); + spin_lock_bh(&tunnel->list_lock); if (!tunnel->acpt_newsess) { err = -ENODEV; goto err_tlock; } - hlist_for_each_entry(session_walk, head, hlist) - if (session_walk->session_id == session->session_id) { - err = -EEXIST; - goto err_tlock; - } - if (tunnel->version == L2TP_HDR_VER_3) { - pn = l2tp_pernet(tunnel->l2tp_net); - g_head = l2tp_session_id_hash_2(pn, session->session_id); - - spin_lock_bh(&pn->l2tp_session_hlist_lock); - + session_key = session->session_id; + spin_lock_bh(&pn->l2tp_session_idr_lock); + err = idr_alloc_u32(&pn->l2tp_v3_session_idr, NULL, + &session_key, session_key, GFP_ATOMIC); /* IP encap expects session IDs to be globally unique, while - * UDP encap doesn't. + * UDP encap doesn't. This isn't per the RFC, which says that + * sessions are identified only by the session ID, but is to + * support existing userspace which depends on it. */ - hlist_for_each_entry(session_walk, g_head, global_hlist) - if (session_walk->session_id == session->session_id && - (session_walk->tunnel->encap == L2TP_ENCAPTYPE_IP || - tunnel->encap == L2TP_ENCAPTYPE_IP)) { - err = -EEXIST; - goto err_tlock_pnlock; - } - - l2tp_tunnel_inc_refcount(tunnel); - hlist_add_head_rcu(&session->global_hlist, g_head); - - spin_unlock_bh(&pn->l2tp_session_hlist_lock); + if (err == -ENOSPC && tunnel->encap == L2TP_ENCAPTYPE_UDP) { + other_session = idr_find(&pn->l2tp_v3_session_idr, + session_key); + err = l2tp_session_collision_add(pn, session, + other_session); + } + spin_unlock_bh(&pn->l2tp_session_idr_lock); } else { - l2tp_tunnel_inc_refcount(tunnel); + session_key = l2tp_v2_session_key(tunnel->tunnel_id, + session->session_id); + spin_lock_bh(&pn->l2tp_session_idr_lock); + err = idr_alloc_u32(&pn->l2tp_v2_session_idr, NULL, + &session_key, session_key, GFP_ATOMIC); + spin_unlock_bh(&pn->l2tp_session_idr_lock); } - hlist_add_head_rcu(&session->hlist, head); - spin_unlock_bh(&tunnel->hlist_lock); + if (err) { + if (err == -ENOSPC) + err = -EEXIST; + goto err_tlock; + } + + l2tp_tunnel_inc_refcount(tunnel); + + list_add(&session->list, &tunnel->session_list); + spin_unlock_bh(&tunnel->list_lock); + + spin_lock_bh(&pn->l2tp_session_idr_lock); + if (tunnel->version == L2TP_HDR_VER_3) { + if (!other_session) + idr_replace(&pn->l2tp_v3_session_idr, session, session_key); + } else { + idr_replace(&pn->l2tp_v2_session_idr, session, session_key); + } + spin_unlock_bh(&pn->l2tp_session_idr_lock); trace_register_session(session); return 0; -err_tlock_pnlock: - spin_unlock_bh(&pn->l2tp_session_hlist_lock); err_tlock: - spin_unlock_bh(&tunnel->hlist_lock); + spin_unlock_bh(&tunnel->list_lock); return err; } @@ -785,19 +890,14 @@ static void l2tp_session_queue_purge(struct l2tp_session *session) } } -/* Internal UDP receive frame. Do the real work of receiving an L2TP data frame - * here. The skb is not on a list when we get here. - * Returns 0 if the packet was a data packet and was successfully passed on. - * Returns 1 if the packet was not a good data packet and could not be - * forwarded. All such packets are passed up to userspace to deal with. - */ -static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb) +/* UDP encapsulation receive handler. See net/ipv4/udp.c for details. */ +int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) { struct l2tp_session *session = NULL; - struct l2tp_tunnel *orig_tunnel = tunnel; + struct l2tp_tunnel *tunnel = NULL; + struct net *net = sock_net(sk); unsigned char *ptr, *optr; u16 hdrflags; - u32 tunnel_id, session_id; u16 version; int length; @@ -807,11 +907,8 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb) __skb_pull(skb, sizeof(struct udphdr)); /* Short packet? */ - if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) { - pr_debug_ratelimited("%s: recv short packet (len=%d)\n", - tunnel->name, skb->len); - goto invalid; - } + if (!pskb_may_pull(skb, L2TP_HDR_SIZE_MAX)) + goto pass; /* Point to L2TP header */ optr = skb->data; @@ -834,6 +931,8 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb) ptr += 2; if (version == L2TP_HDR_VER_2) { + u16 tunnel_id, session_id; + /* If length is present, skip it */ if (hdrflags & L2TP_HDRFLAG_L) ptr += 2; @@ -841,49 +940,35 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb) /* Extract tunnel and session ID */ tunnel_id = ntohs(*(__be16 *)ptr); ptr += 2; - - if (tunnel_id != tunnel->tunnel_id) { - /* We are receiving trafic for another tunnel, probably - * because we have several tunnels between the same - * IP/port quadruple, look it up. - */ - struct l2tp_tunnel *alt_tunnel; - - alt_tunnel = l2tp_tunnel_get(tunnel->l2tp_net, tunnel_id); - if (!alt_tunnel) - goto pass; - tunnel = alt_tunnel; - } - session_id = ntohs(*(__be16 *)ptr); ptr += 2; + + session = l2tp_v2_session_get(net, tunnel_id, session_id); } else { + u32 session_id; + ptr += 2; /* skip reserved bits */ - tunnel_id = tunnel->tunnel_id; session_id = ntohl(*(__be32 *)ptr); ptr += 4; + + session = l2tp_v3_session_get(net, sk, session_id); } - /* Check protocol version */ - if (version != tunnel->version) { - pr_debug_ratelimited("%s: recv protocol version mismatch: got %d expected %d\n", - tunnel->name, version, tunnel->version); - goto invalid; - } - - /* Find the session context */ - session = l2tp_tunnel_get_session(tunnel, session_id); if (!session || !session->recv_skb) { if (session) l2tp_session_dec_refcount(session); /* Not found? Pass to userspace to deal with */ - pr_debug_ratelimited("%s: no session found (%u/%u). Passing up.\n", - tunnel->name, tunnel_id, session_id); goto pass; } - if (tunnel->version == L2TP_HDR_VER_3 && + tunnel = session->tunnel; + + /* Check protocol version */ + if (version != tunnel->version) + goto invalid; + + if (version == L2TP_HDR_VER_3 && l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) { l2tp_session_dec_refcount(session); goto invalid; @@ -892,9 +977,6 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb) l2tp_recv_common(session, skb, ptr, optr, hdrflags, length); l2tp_session_dec_refcount(session); - if (tunnel != orig_tunnel) - l2tp_tunnel_dec_refcount(tunnel); - return 0; invalid: @@ -904,51 +986,14 @@ pass: /* Put UDP header back */ __skb_push(skb, sizeof(struct udphdr)); - if (tunnel != orig_tunnel) - l2tp_tunnel_dec_refcount(tunnel); - - return 1; -} - -/* UDP encapsulation receive and error receive handlers. - * See net/ipv4/udp.c for details. - * - * Note that these functions are called from inside an - * RCU-protected region, but without the socket being locked. - * - * Hence we use rcu_dereference_sk_user_data to access the - * tunnel data structure rather the usual l2tp_sk_to_tunnel - * accessor function. - */ -int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb) -{ - struct l2tp_tunnel *tunnel; - - tunnel = rcu_dereference_sk_user_data(sk); - if (!tunnel) - goto pass_up; - if (WARN_ON(tunnel->magic != L2TP_TUNNEL_MAGIC)) - goto pass_up; - - if (l2tp_udp_recv_core(tunnel, skb)) - goto pass_up; - - return 0; - -pass_up: return 1; } EXPORT_SYMBOL_GPL(l2tp_udp_encap_recv); +/* UDP encapsulation receive error handler. See net/ipv4/udp.c for details. */ static void l2tp_udp_encap_err_recv(struct sock *sk, struct sk_buff *skb, int err, __be16 port, u32 info, u8 *payload) { - struct l2tp_tunnel *tunnel; - - tunnel = rcu_dereference_sk_user_data(sk); - if (!tunnel || tunnel->fd < 0) - return; - sk->sk_err = err; sk_error_report(sk); @@ -1206,26 +1251,36 @@ end: return; } -/* Remove an l2tp session from l2tp_core's hash lists. */ +/* Remove an l2tp session from l2tp_core's lists. */ static void l2tp_session_unhash(struct l2tp_session *session) { struct l2tp_tunnel *tunnel = session->tunnel; - /* Remove the session from core hashes */ if (tunnel) { - /* Remove from the per-tunnel hash */ - spin_lock_bh(&tunnel->hlist_lock); - hlist_del_init_rcu(&session->hlist); - spin_unlock_bh(&tunnel->hlist_lock); + struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); + struct l2tp_session *removed = session; - /* For L2TPv3 we have a per-net hash: remove from there, too */ - if (tunnel->version != L2TP_HDR_VER_2) { - struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); + /* Remove from the per-tunnel list */ + spin_lock_bh(&tunnel->list_lock); + list_del_init(&session->list); + spin_unlock_bh(&tunnel->list_lock); - spin_lock_bh(&pn->l2tp_session_hlist_lock); - hlist_del_init_rcu(&session->global_hlist); - spin_unlock_bh(&pn->l2tp_session_hlist_lock); + /* Remove from per-net IDR */ + spin_lock_bh(&pn->l2tp_session_idr_lock); + if (tunnel->version == L2TP_HDR_VER_3) { + if (hash_hashed(&session->hlist)) + l2tp_session_collision_del(pn, session); + else + removed = idr_remove(&pn->l2tp_v3_session_idr, + session->session_id); + } else { + u32 session_key = l2tp_v2_session_key(tunnel->tunnel_id, + session->session_id); + removed = idr_remove(&pn->l2tp_v2_session_idr, + session_key); } + WARN_ON_ONCE(removed && removed != session); + spin_unlock_bh(&pn->l2tp_session_idr_lock); synchronize_rcu(); } @@ -1236,28 +1291,22 @@ static void l2tp_session_unhash(struct l2tp_session *session) static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel) { struct l2tp_session *session; - int hash; - spin_lock_bh(&tunnel->hlist_lock); + spin_lock_bh(&tunnel->list_lock); tunnel->acpt_newsess = false; - for (hash = 0; hash < L2TP_HASH_SIZE; hash++) { -again: - hlist_for_each_entry_rcu(session, &tunnel->session_hlist[hash], hlist) { - hlist_del_init_rcu(&session->hlist); - - spin_unlock_bh(&tunnel->hlist_lock); - l2tp_session_delete(session); - spin_lock_bh(&tunnel->hlist_lock); - - /* Now restart from the beginning of this hash - * chain. We always remove a session from the - * list so we are guaranteed to make forward - * progress. - */ - goto again; - } + for (;;) { + session = list_first_entry_or_null(&tunnel->session_list, + struct l2tp_session, list); + if (!session) + break; + l2tp_session_inc_refcount(session); + list_del_init(&session->list); + spin_unlock_bh(&tunnel->list_lock); + l2tp_session_delete(session); + spin_lock_bh(&tunnel->list_lock); + l2tp_session_dec_refcount(session); } - spin_unlock_bh(&tunnel->hlist_lock); + spin_unlock_bh(&tunnel->list_lock); } /* Tunnel socket destroy hook for UDP encapsulation */ @@ -1451,8 +1500,9 @@ int l2tp_tunnel_create(int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, tunnel->magic = L2TP_TUNNEL_MAGIC; sprintf(&tunnel->name[0], "tunl %u", tunnel_id); - spin_lock_init(&tunnel->hlist_lock); + spin_lock_init(&tunnel->list_lock); tunnel->acpt_newsess = true; + INIT_LIST_HEAD(&tunnel->session_list); tunnel->encap = encap; @@ -1462,8 +1512,6 @@ int l2tp_tunnel_create(int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, /* Init delete workqueue struct */ INIT_WORK(&tunnel->del_work, l2tp_tunnel_del_work); - INIT_LIST_HEAD(&tunnel->list); - err = 0; err: if (tunnelp) @@ -1651,8 +1699,10 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn skb_queue_head_init(&session->reorder_q); + session->hlist_key = l2tp_v3_session_hashkey(tunnel->sock, session->session_id); INIT_HLIST_NODE(&session->hlist); - INIT_HLIST_NODE(&session->global_hlist); + INIT_LIST_HEAD(&session->clist); + INIT_LIST_HEAD(&session->list); if (cfg) { session->pwtype = cfg->pw_type; @@ -1685,15 +1735,13 @@ EXPORT_SYMBOL_GPL(l2tp_session_create); static __net_init int l2tp_init_net(struct net *net) { struct l2tp_net *pn = net_generic(net, l2tp_net_id); - int hash; idr_init(&pn->l2tp_tunnel_idr); spin_lock_init(&pn->l2tp_tunnel_idr_lock); - for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) - INIT_HLIST_HEAD(&pn->l2tp_session_hlist[hash]); - - spin_lock_init(&pn->l2tp_session_hlist_lock); + idr_init(&pn->l2tp_v2_session_idr); + idr_init(&pn->l2tp_v3_session_idr); + spin_lock_init(&pn->l2tp_session_idr_lock); return 0; } @@ -1703,7 +1751,6 @@ static __net_exit void l2tp_exit_net(struct net *net) struct l2tp_net *pn = l2tp_pernet(net); struct l2tp_tunnel *tunnel = NULL; unsigned long tunnel_id, tmp; - int hash; rcu_read_lock_bh(); idr_for_each_entry_ul(&pn->l2tp_tunnel_idr, tunnel, tmp, tunnel_id) { @@ -1716,8 +1763,8 @@ static __net_exit void l2tp_exit_net(struct net *net) flush_workqueue(l2tp_wq); rcu_barrier(); - for (hash = 0; hash < L2TP_HASH_SIZE_2; hash++) - WARN_ON_ONCE(!hlist_empty(&pn->l2tp_session_hlist[hash])); + idr_destroy(&pn->l2tp_v2_session_idr); + idr_destroy(&pn->l2tp_v3_session_idr); idr_destroy(&pn->l2tp_tunnel_idr); } diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 91ebf0a3f499..8ac81bc1bc6f 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -19,14 +19,6 @@ #define L2TP_TUNNEL_MAGIC 0x42114DDA #define L2TP_SESSION_MAGIC 0x0C04EB7D -/* Per tunnel session hash table size */ -#define L2TP_HASH_BITS 4 -#define L2TP_HASH_SIZE BIT(L2TP_HASH_BITS) - -/* System-wide session hash table size */ -#define L2TP_HASH_BITS_2 8 -#define L2TP_HASH_SIZE_2 BIT(L2TP_HASH_BITS_2) - struct sk_buff; struct l2tp_stats { @@ -61,10 +53,15 @@ struct l2tp_session_cfg { char *ifname; }; +struct l2tp_session_coll_list { + spinlock_t lock; /* for access to list */ + struct list_head list; + refcount_t ref_count; +}; + /* Represents a session (pseudowire) instance. * Tracks runtime state including cookies, dataplane packet sequencing, and IO statistics. - * Is linked into a per-tunnel session hashlist; and in the case of an L2TPv3 session into - * an additional per-net ("global") hashlist. + * Is linked into a per-tunnel session list and a per-net ("global") IDR tree. */ #define L2TP_SESSION_NAME_MAX 32 struct l2tp_session { @@ -88,8 +85,12 @@ struct l2tp_session { u32 nr_oos; /* NR of last OOS packet */ int nr_oos_count; /* for OOS recovery */ int nr_oos_count_max; - struct hlist_node hlist; /* hash list node */ + struct list_head list; /* per-tunnel list node */ refcount_t ref_count; + struct hlist_node hlist; /* per-net session hlist */ + unsigned long hlist_key; /* key for session hlist */ + struct l2tp_session_coll_list *coll_list; /* session collision list */ + struct list_head clist; /* for coll_list */ char name[L2TP_SESSION_NAME_MAX]; /* for logging */ char ifname[IFNAMSIZ]; @@ -102,7 +103,6 @@ struct l2tp_session { int reorder_skip; /* set if skip to next nr */ enum l2tp_pwtype pwtype; struct l2tp_stats stats; - struct hlist_node global_hlist; /* global hash list node */ /* Session receive handler for data packets. * Each pseudowire implementation should implement this callback in order to @@ -114,7 +114,7 @@ struct l2tp_session { /* Session close handler. * Each pseudowire implementation may implement this callback in order to carry * out pseudowire-specific shutdown actions. - * The callback is called by core after unhashing the session and purging its + * The callback is called by core after unlisting the session and purging its * reorder queue. */ void (*session_close)(struct l2tp_session *session); @@ -150,7 +150,7 @@ struct l2tp_tunnel_cfg { /* Represents a tunnel instance. * Tracks runtime state including IO statistics. * Holds the tunnel socket (either passed from userspace or directly created by the kernel). - * Maintains a hashlist of sessions belonging to the tunnel instance. + * Maintains a list of sessions belonging to the tunnel instance. * Is linked into a per-net list of tunnels. */ #define L2TP_TUNNEL_NAME_MAX 20 @@ -160,12 +160,11 @@ struct l2tp_tunnel { unsigned long dead; struct rcu_head rcu; - spinlock_t hlist_lock; /* write-protection for session_hlist */ + spinlock_t list_lock; /* write-protection for session_list */ bool acpt_newsess; /* indicates whether this tunnel accepts - * new sessions. Protected by hlist_lock. + * new sessions. Protected by list_lock. */ - struct hlist_head session_hlist[L2TP_HASH_SIZE]; - /* hashed list of sessions, hashed by id */ + struct list_head session_list; /* list of sessions */ u32 tunnel_id; u32 peer_tunnel_id; int version; /* 2=>L2TPv2, 3=>L2TPv3 */ @@ -174,7 +173,6 @@ struct l2tp_tunnel { enum l2tp_encap_type encap; struct l2tp_stats stats; - struct list_head list; /* list node on per-namespace list of tunnels */ struct net *l2tp_net; /* the net we belong to */ refcount_t ref_count; @@ -224,10 +222,11 @@ void l2tp_session_dec_refcount(struct l2tp_session *session); */ struct l2tp_tunnel *l2tp_tunnel_get(const struct net *net, u32 tunnel_id); struct l2tp_tunnel *l2tp_tunnel_get_nth(const struct net *net, int nth); -struct l2tp_session *l2tp_tunnel_get_session(struct l2tp_tunnel *tunnel, - u32 session_id); -struct l2tp_session *l2tp_session_get(const struct net *net, u32 session_id); +struct l2tp_session *l2tp_v3_session_get(const struct net *net, struct sock *sk, u32 session_id); +struct l2tp_session *l2tp_v2_session_get(const struct net *net, u16 tunnel_id, u16 session_id); +struct l2tp_session *l2tp_session_get(const struct net *net, struct sock *sk, int pver, + u32 tunnel_id, u32 session_id); struct l2tp_session *l2tp_session_get_nth(struct l2tp_tunnel *tunnel, int nth); struct l2tp_session *l2tp_session_get_by_ifname(const struct net *net, const char *ifname); diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c index 4595b56d175d..8755ae521154 100644 --- a/net/l2tp/l2tp_debugfs.c +++ b/net/l2tp/l2tp_debugfs.c @@ -123,17 +123,14 @@ static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v) struct l2tp_tunnel *tunnel = v; struct l2tp_session *session; int session_count = 0; - int hash; rcu_read_lock_bh(); - for (hash = 0; hash < L2TP_HASH_SIZE; hash++) { - hlist_for_each_entry_rcu(session, &tunnel->session_hlist[hash], hlist) { - /* Session ID of zero is a dummy/reserved value used by pppol2tp */ - if (session->session_id == 0) - continue; + list_for_each_entry_rcu(session, &tunnel->session_list, list) { + /* Session ID of zero is a dummy/reserved value used by pppol2tp */ + if (session->session_id == 0) + continue; - session_count++; - } + session_count++; } rcu_read_unlock_bh(); diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 19c8cc5289d5..e48aa177d74c 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -140,7 +140,7 @@ static int l2tp_ip_recv(struct sk_buff *skb) } /* Ok, this is a data packet. Lookup the session. */ - session = l2tp_session_get(net, session_id); + session = l2tp_v3_session_get(net, NULL, session_id); if (!session) goto discard; diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 8780ec64f376..d217ff1f229e 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -150,7 +150,7 @@ static int l2tp_ip6_recv(struct sk_buff *skb) } /* Ok, this is a data packet. Lookup the session. */ - session = l2tp_session_get(net, session_id); + session = l2tp_v3_session_get(net, NULL, session_id); if (!session) goto discard; diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index a901fd14fe3b..d105030520f9 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -61,7 +61,8 @@ static struct l2tp_session *l2tp_nl_session_get(struct genl_info *info) session_id = nla_get_u32(info->attrs[L2TP_ATTR_SESSION_ID]); tunnel = l2tp_tunnel_get(net, tunnel_id); if (tunnel) { - session = l2tp_tunnel_get_session(tunnel, session_id); + session = l2tp_session_get(net, tunnel->sock, tunnel->version, + tunnel_id, session_id); l2tp_tunnel_dec_refcount(tunnel); } } @@ -635,7 +636,8 @@ static int l2tp_nl_cmd_session_create(struct sk_buff *skb, struct genl_info *inf &cfg); if (ret >= 0) { - session = l2tp_tunnel_get_session(tunnel, session_id); + session = l2tp_session_get(net, tunnel->sock, tunnel->version, + tunnel_id, session_id); if (session) { ret = l2tp_session_notify(&l2tp_nl_family, info, session, L2TP_CMD_SESSION_CREATE); diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 6146e4e67bbb..3596290047b2 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -753,7 +753,8 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, if (tunnel->peer_tunnel_id == 0) tunnel->peer_tunnel_id = info.peer_tunnel_id; - session = l2tp_tunnel_get_session(tunnel, info.session_id); + session = l2tp_session_get(sock_net(sk), tunnel->sock, tunnel->version, + info.tunnel_id, info.session_id); if (session) { drop_refcnt = true; @@ -1045,7 +1046,8 @@ static int pppol2tp_tunnel_copy_stats(struct pppol2tp_ioc_stats *stats, /* If session_id is set, search the corresponding session in the * context of this tunnel and record the session's statistics. */ - session = l2tp_tunnel_get_session(tunnel, stats->session_id); + session = l2tp_session_get(tunnel->l2tp_net, tunnel->sock, tunnel->version, + tunnel->tunnel_id, stats->session_id); if (!session) return -EBADR; diff --git a/net/llc/llc_c_st.c b/net/llc/llc_c_st.c index 2467573b5f84..1c267db304df 100644 --- a/net/llc/llc_c_st.c +++ b/net/llc/llc_c_st.c @@ -42,7 +42,7 @@ static const llc_conn_action_t llc_common_actions_1[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_common_state_trans_1 = { +static const struct llc_conn_state_trans llc_common_state_trans_1 = { .ev = llc_conn_ev_disc_req, .next_state = LLC_CONN_STATE_D_CONN, .ev_qualifiers = NONE, @@ -59,7 +59,7 @@ static const llc_conn_action_t llc_common_actions_2[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_common_state_trans_2 = { +static const struct llc_conn_state_trans llc_common_state_trans_2 = { .ev = llc_conn_ev_rst_req, .next_state = LLC_CONN_STATE_RESET, .ev_qualifiers = NONE, @@ -79,7 +79,7 @@ static const llc_conn_action_t llc_common_actions_3[] = { [8] = NULL, }; -static struct llc_conn_state_trans llc_common_state_trans_3 = { +static const struct llc_conn_state_trans llc_common_state_trans_3 = { .ev = llc_conn_ev_rx_sabme_cmd_pbit_set_x, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = NONE, @@ -95,7 +95,7 @@ static const llc_conn_action_t llc_common_actions_4[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_common_state_trans_4 = { +static const struct llc_conn_state_trans llc_common_state_trans_4 = { .ev = llc_conn_ev_rx_disc_cmd_pbit_set_x, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = NONE, @@ -114,7 +114,7 @@ static const llc_conn_action_t llc_common_actions_5[] = { [7] = NULL, }; -static struct llc_conn_state_trans llc_common_state_trans_5 = { +static const struct llc_conn_state_trans llc_common_state_trans_5 = { .ev = llc_conn_ev_rx_frmr_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_RESET, .ev_qualifiers = NONE, @@ -129,7 +129,7 @@ static const llc_conn_action_t llc_common_actions_6[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_common_state_trans_6 = { +static const struct llc_conn_state_trans llc_common_state_trans_6 = { .ev = llc_conn_ev_rx_dm_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = NONE, @@ -145,7 +145,7 @@ static const llc_conn_action_t llc_common_actions_7a[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_common_state_trans_7a = { +static const struct llc_conn_state_trans llc_common_state_trans_7a = { .ev = llc_conn_ev_rx_zzz_cmd_pbit_set_x_inval_nr, .next_state = LLC_CONN_STATE_ERROR, .ev_qualifiers = NONE, @@ -161,7 +161,7 @@ static const llc_conn_action_t llc_common_actions_7b[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_common_state_trans_7b = { +static const struct llc_conn_state_trans llc_common_state_trans_7b = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_x_inval_ns, .next_state = LLC_CONN_STATE_ERROR, .ev_qualifiers = NONE, @@ -177,7 +177,7 @@ static const llc_conn_action_t llc_common_actions_8a[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_common_state_trans_8a = { +static const struct llc_conn_state_trans llc_common_state_trans_8a = { .ev = llc_conn_ev_rx_zzz_rsp_fbit_set_x_inval_nr, .next_state = LLC_CONN_STATE_ERROR, .ev_qualifiers = NONE, @@ -193,7 +193,7 @@ static const llc_conn_action_t llc_common_actions_8b[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_common_state_trans_8b = { +static const struct llc_conn_state_trans llc_common_state_trans_8b = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_x_inval_ns, .next_state = LLC_CONN_STATE_ERROR, .ev_qualifiers = NONE, @@ -209,7 +209,7 @@ static const llc_conn_action_t llc_common_actions_8c[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_common_state_trans_8c = { +static const struct llc_conn_state_trans llc_common_state_trans_8c = { .ev = llc_conn_ev_rx_bad_pdu, .next_state = LLC_CONN_STATE_ERROR, .ev_qualifiers = NONE, @@ -225,7 +225,7 @@ static const llc_conn_action_t llc_common_actions_9[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_common_state_trans_9 = { +static const struct llc_conn_state_trans llc_common_state_trans_9 = { .ev = llc_conn_ev_rx_ua_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_ERROR, .ev_qualifiers = NONE, @@ -247,7 +247,7 @@ static const llc_conn_action_t llc_common_actions_10[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_common_state_trans_10 = { +static const struct llc_conn_state_trans llc_common_state_trans_10 = { .ev = llc_conn_ev_rx_xxx_rsp_fbit_set_1, .next_state = LLC_CONN_STATE_ERROR, .ev_qualifiers = llc_common_ev_qfyrs_10, @@ -270,7 +270,7 @@ static const llc_conn_action_t llc_common_actions_11a[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_common_state_trans_11a = { +static const struct llc_conn_state_trans llc_common_state_trans_11a = { .ev = llc_conn_ev_p_tmr_exp, .next_state = LLC_CONN_STATE_RESET, .ev_qualifiers = llc_common_ev_qfyrs_11a, @@ -292,7 +292,7 @@ static const llc_conn_action_t llc_common_actions_11b[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_common_state_trans_11b = { +static const struct llc_conn_state_trans llc_common_state_trans_11b = { .ev = llc_conn_ev_ack_tmr_exp, .next_state = LLC_CONN_STATE_RESET, .ev_qualifiers = llc_common_ev_qfyrs_11b, @@ -314,7 +314,7 @@ static const llc_conn_action_t llc_common_actions_11c[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_common_state_trans_11c = { +static const struct llc_conn_state_trans llc_common_state_trans_11c = { .ev = llc_conn_ev_rej_tmr_exp, .next_state = LLC_CONN_STATE_RESET, .ev_qualifiers = llc_common_ev_qfyrs_11c, @@ -336,7 +336,7 @@ static const llc_conn_action_t llc_common_actions_11d[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_common_state_trans_11d = { +static const struct llc_conn_state_trans llc_common_state_trans_11d = { .ev = llc_conn_ev_busy_tmr_exp, .next_state = LLC_CONN_STATE_RESET, .ev_qualifiers = llc_common_ev_qfyrs_11d, @@ -347,7 +347,7 @@ static struct llc_conn_state_trans llc_common_state_trans_11d = { * Common dummy state transition; must be last entry for all state * transition groups - it'll be on .bss, so will be zeroed. */ -static struct llc_conn_state_trans llc_common_state_trans_end; +static const struct llc_conn_state_trans llc_common_state_trans_end; /* LLC_CONN_STATE_ADM transitions */ /* State transitions for LLC_CONN_EV_CONN_REQ event */ @@ -359,7 +359,7 @@ static const llc_conn_action_t llc_adm_actions_1[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_adm_state_trans_1 = { +static const struct llc_conn_state_trans llc_adm_state_trans_1 = { .ev = llc_conn_ev_conn_req, .next_state = LLC_CONN_STATE_SETUP, .ev_qualifiers = NONE, @@ -378,7 +378,7 @@ static const llc_conn_action_t llc_adm_actions_2[] = { [7] = NULL, }; -static struct llc_conn_state_trans llc_adm_state_trans_2 = { +static const struct llc_conn_state_trans llc_adm_state_trans_2 = { .ev = llc_conn_ev_rx_sabme_cmd_pbit_set_x, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = NONE, @@ -392,7 +392,7 @@ static const llc_conn_action_t llc_adm_actions_3[] = { [2] = NULL, }; -static struct llc_conn_state_trans llc_adm_state_trans_3 = { +static const struct llc_conn_state_trans llc_adm_state_trans_3 = { .ev = llc_conn_ev_rx_disc_cmd_pbit_set_x, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = NONE, @@ -406,7 +406,7 @@ static const llc_conn_action_t llc_adm_actions_4[] = { [2] = NULL, }; -static struct llc_conn_state_trans llc_adm_state_trans_4 = { +static const struct llc_conn_state_trans llc_adm_state_trans_4 = { .ev = llc_conn_ev_rx_xxx_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = NONE, @@ -419,7 +419,7 @@ static const llc_conn_action_t llc_adm_actions_5[] = { [1] = NULL, }; -static struct llc_conn_state_trans llc_adm_state_trans_5 = { +static const struct llc_conn_state_trans llc_adm_state_trans_5 = { .ev = llc_conn_ev_rx_any_frame, .next_state = LLC_CONN_OUT_OF_SVC, .ev_qualifiers = NONE, @@ -430,7 +430,7 @@ static struct llc_conn_state_trans llc_adm_state_trans_5 = { * Array of pointers; * one to each transition */ -static struct llc_conn_state_trans *llc_adm_state_transitions[] = { +static const struct llc_conn_state_trans *llc_adm_state_transitions[] = { [0] = &llc_adm_state_trans_1, /* Request */ [1] = &llc_common_state_trans_end, [2] = &llc_common_state_trans_end, /* local_busy */ @@ -453,7 +453,7 @@ static const llc_conn_action_t llc_setup_actions_1[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_setup_state_trans_1 = { +static const struct llc_conn_state_trans llc_setup_state_trans_1 = { .ev = llc_conn_ev_rx_sabme_cmd_pbit_set_x, .next_state = LLC_CONN_STATE_SETUP, .ev_qualifiers = NONE, @@ -477,7 +477,7 @@ static const llc_conn_action_t llc_setup_actions_2[] = { [6] = NULL, }; -static struct llc_conn_state_trans llc_setup_state_trans_2 = { +static const struct llc_conn_state_trans llc_setup_state_trans_2 = { .ev = llc_conn_ev_rx_ua_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_setup_ev_qfyrs_2, @@ -498,7 +498,7 @@ static const llc_conn_action_t llc_setup_actions_3[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_setup_state_trans_3 = { +static const struct llc_conn_state_trans llc_setup_state_trans_3 = { .ev = llc_conn_ev_ack_tmr_exp, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_setup_ev_qfyrs_3, @@ -519,7 +519,7 @@ static const llc_conn_action_t llc_setup_actions_4[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_setup_state_trans_4 = { +static const struct llc_conn_state_trans llc_setup_state_trans_4 = { .ev = llc_conn_ev_rx_disc_cmd_pbit_set_x, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = llc_setup_ev_qfyrs_4, @@ -539,7 +539,7 @@ static const llc_conn_action_t llc_setup_actions_5[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_setup_state_trans_5 = { +static const struct llc_conn_state_trans llc_setup_state_trans_5 = { .ev = llc_conn_ev_rx_dm_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = llc_setup_ev_qfyrs_5, @@ -560,7 +560,7 @@ static const llc_conn_action_t llc_setup_actions_7[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_setup_state_trans_7 = { +static const struct llc_conn_state_trans llc_setup_state_trans_7 = { .ev = llc_conn_ev_ack_tmr_exp, .next_state = LLC_CONN_STATE_SETUP, .ev_qualifiers = llc_setup_ev_qfyrs_7, @@ -581,7 +581,7 @@ static const llc_conn_action_t llc_setup_actions_8[] = { [2] = NULL, }; -static struct llc_conn_state_trans llc_setup_state_trans_8 = { +static const struct llc_conn_state_trans llc_setup_state_trans_8 = { .ev = llc_conn_ev_ack_tmr_exp, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = llc_setup_ev_qfyrs_8, @@ -592,7 +592,7 @@ static struct llc_conn_state_trans llc_setup_state_trans_8 = { * Array of pointers; * one to each transition */ -static struct llc_conn_state_trans *llc_setup_state_transitions[] = { +static const struct llc_conn_state_trans *llc_setup_state_transitions[] = { [0] = &llc_common_state_trans_end, /* Request */ [1] = &llc_common_state_trans_end, /* local busy */ [2] = &llc_common_state_trans_end, /* init_pf_cycle */ @@ -622,7 +622,7 @@ static const llc_conn_action_t llc_normal_actions_1[] = { [2] = NULL, }; -static struct llc_conn_state_trans llc_normal_state_trans_1 = { +static const struct llc_conn_state_trans llc_normal_state_trans_1 = { .ev = llc_conn_ev_data_req, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_normal_ev_qfyrs_1, @@ -643,7 +643,7 @@ static const llc_conn_action_t llc_normal_actions_2[] = { [2] = NULL, }; -static struct llc_conn_state_trans llc_normal_state_trans_2 = { +static const struct llc_conn_state_trans llc_normal_state_trans_2 = { .ev = llc_conn_ev_data_req, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_normal_ev_qfyrs_2, @@ -660,7 +660,7 @@ static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_2_1[] = { /* just one member, NULL, .bss zeroes it */ static const llc_conn_action_t llc_normal_actions_2_1[1]; -static struct llc_conn_state_trans llc_normal_state_trans_2_1 = { +static const struct llc_conn_state_trans llc_normal_state_trans_2_1 = { .ev = llc_conn_ev_data_req, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_normal_ev_qfyrs_2_1, @@ -680,7 +680,7 @@ static const llc_conn_action_t llc_normal_actions_3[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_normal_state_trans_3 = { +static const struct llc_conn_state_trans llc_normal_state_trans_3 = { .ev = llc_conn_ev_local_busy_detected, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_normal_ev_qfyrs_3, @@ -700,7 +700,7 @@ static const llc_conn_action_t llc_normal_actions_4[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_normal_state_trans_4 = { +static const struct llc_conn_state_trans llc_normal_state_trans_4 = { .ev = llc_conn_ev_local_busy_detected, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_normal_ev_qfyrs_4, @@ -723,7 +723,7 @@ static const llc_conn_action_t llc_normal_actions_5a[] = { [6] = NULL, }; -static struct llc_conn_state_trans llc_normal_state_trans_5a = { +static const struct llc_conn_state_trans llc_normal_state_trans_5a = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_normal_ev_qfyrs_5a, @@ -746,7 +746,7 @@ static const llc_conn_action_t llc_normal_actions_5b[] = { [6] = NULL, }; -static struct llc_conn_state_trans llc_normal_state_trans_5b = { +static const struct llc_conn_state_trans llc_normal_state_trans_5b = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_normal_ev_qfyrs_5b, @@ -769,7 +769,7 @@ static const llc_conn_action_t llc_normal_actions_5c[] = { [6] = NULL, }; -static struct llc_conn_state_trans llc_normal_state_trans_5c = { +static const struct llc_conn_state_trans llc_normal_state_trans_5c = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_normal_ev_qfyrs_5c, @@ -790,7 +790,7 @@ static const llc_conn_action_t llc_normal_actions_6a[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_normal_state_trans_6a = { +static const struct llc_conn_state_trans llc_normal_state_trans_6a = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_normal_ev_qfyrs_6a, @@ -811,7 +811,7 @@ static const llc_conn_action_t llc_normal_actions_6b[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_normal_state_trans_6b = { +static const struct llc_conn_state_trans llc_normal_state_trans_6b = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_normal_ev_qfyrs_6b, @@ -827,7 +827,7 @@ static const llc_conn_action_t llc_normal_actions_7[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_normal_state_trans_7 = { +static const struct llc_conn_state_trans llc_normal_state_trans_7 = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = NONE, @@ -850,7 +850,7 @@ static const llc_conn_action_t llc_normal_actions_8[] = { [6] = NULL, }; -static struct llc_conn_state_trans llc_normal_state_trans_8a = { +static const struct llc_conn_state_trans llc_normal_state_trans_8a = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_normal_ev_qfyrs_8a, @@ -863,7 +863,7 @@ static const llc_conn_ev_qfyr_t llc_normal_ev_qfyrs_8b[] = { [1] = NULL, }; -static struct llc_conn_state_trans llc_normal_state_trans_8b = { +static const struct llc_conn_state_trans llc_normal_state_trans_8b = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_normal_ev_qfyrs_8b, @@ -884,7 +884,7 @@ static const llc_conn_action_t llc_normal_actions_9a[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_normal_state_trans_9a = { +static const struct llc_conn_state_trans llc_normal_state_trans_9a = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_normal_ev_qfyrs_9a, @@ -905,7 +905,7 @@ static const llc_conn_action_t llc_normal_actions_9b[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_normal_state_trans_9b = { +static const struct llc_conn_state_trans llc_normal_state_trans_9b = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_normal_ev_qfyrs_9b, @@ -922,7 +922,7 @@ static const llc_conn_action_t llc_normal_actions_10[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_normal_state_trans_10 = { +static const struct llc_conn_state_trans llc_normal_state_trans_10 = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = NONE, @@ -937,7 +937,7 @@ static const llc_conn_action_t llc_normal_actions_11a[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_normal_state_trans_11a = { +static const struct llc_conn_state_trans llc_normal_state_trans_11a = { .ev = llc_conn_ev_rx_rr_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = NONE, @@ -952,7 +952,7 @@ static const llc_conn_action_t llc_normal_actions_11b[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_normal_state_trans_11b = { +static const struct llc_conn_state_trans llc_normal_state_trans_11b = { .ev = llc_conn_ev_rx_rr_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = NONE, @@ -973,7 +973,7 @@ static const llc_conn_action_t llc_normal_actions_11c[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_normal_state_trans_11c = { +static const struct llc_conn_state_trans llc_normal_state_trans_11c = { .ev = llc_conn_ev_rx_rr_rsp_fbit_set_1, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_normal_ev_qfyrs_11c, @@ -990,7 +990,7 @@ static const llc_conn_action_t llc_normal_actions_12[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_normal_state_trans_12 = { +static const struct llc_conn_state_trans llc_normal_state_trans_12 = { .ev = llc_conn_ev_rx_rr_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = NONE, @@ -1005,7 +1005,7 @@ static const llc_conn_action_t llc_normal_actions_13a[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_normal_state_trans_13a = { +static const struct llc_conn_state_trans llc_normal_state_trans_13a = { .ev = llc_conn_ev_rx_rnr_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = NONE, @@ -1020,7 +1020,7 @@ static const llc_conn_action_t llc_normal_actions_13b[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_normal_state_trans_13b = { +static const struct llc_conn_state_trans llc_normal_state_trans_13b = { .ev = llc_conn_ev_rx_rnr_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = NONE, @@ -1040,7 +1040,7 @@ static const llc_conn_action_t llc_normal_actions_13c[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_normal_state_trans_13c = { +static const struct llc_conn_state_trans llc_normal_state_trans_13c = { .ev = llc_conn_ev_rx_rnr_rsp_fbit_set_1, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_normal_ev_qfyrs_13c, @@ -1057,7 +1057,7 @@ static const llc_conn_action_t llc_normal_actions_14[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_normal_state_trans_14 = { +static const struct llc_conn_state_trans llc_normal_state_trans_14 = { .ev = llc_conn_ev_rx_rnr_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = NONE, @@ -1080,7 +1080,7 @@ static const llc_conn_action_t llc_normal_actions_15a[] = { [6] = NULL, }; -static struct llc_conn_state_trans llc_normal_state_trans_15a = { +static const struct llc_conn_state_trans llc_normal_state_trans_15a = { .ev = llc_conn_ev_rx_rej_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_normal_ev_qfyrs_15a, @@ -1103,7 +1103,7 @@ static const llc_conn_action_t llc_normal_actions_15b[] = { [6] = NULL, }; -static struct llc_conn_state_trans llc_normal_state_trans_15b = { +static const struct llc_conn_state_trans llc_normal_state_trans_15b = { .ev = llc_conn_ev_rx_rej_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_normal_ev_qfyrs_15b, @@ -1125,7 +1125,7 @@ static const llc_conn_action_t llc_normal_actions_16a[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_normal_state_trans_16a = { +static const struct llc_conn_state_trans llc_normal_state_trans_16a = { .ev = llc_conn_ev_rx_rej_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_normal_ev_qfyrs_16a, @@ -1147,7 +1147,7 @@ static const llc_conn_action_t llc_normal_actions_16b[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_normal_state_trans_16b = { +static const struct llc_conn_state_trans llc_normal_state_trans_16b = { .ev = llc_conn_ev_rx_rej_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_normal_ev_qfyrs_16b, @@ -1164,7 +1164,7 @@ static const llc_conn_action_t llc_normal_actions_17[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_normal_state_trans_17 = { +static const struct llc_conn_state_trans llc_normal_state_trans_17 = { .ev = llc_conn_ev_rx_rej_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = NONE, @@ -1183,7 +1183,7 @@ static const llc_conn_action_t llc_normal_actions_18[] = { [2] = NULL, }; -static struct llc_conn_state_trans llc_normal_state_trans_18 = { +static const struct llc_conn_state_trans llc_normal_state_trans_18 = { .ev = llc_conn_ev_init_p_f_cycle, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_normal_ev_qfyrs_18, @@ -1205,7 +1205,7 @@ static const llc_conn_action_t llc_normal_actions_19[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_normal_state_trans_19 = { +static const struct llc_conn_state_trans llc_normal_state_trans_19 = { .ev = llc_conn_ev_p_tmr_exp, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = llc_normal_ev_qfyrs_19, @@ -1228,7 +1228,7 @@ static const llc_conn_action_t llc_normal_actions_20a[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_normal_state_trans_20a = { +static const struct llc_conn_state_trans llc_normal_state_trans_20a = { .ev = llc_conn_ev_ack_tmr_exp, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = llc_normal_ev_qfyrs_20a, @@ -1251,7 +1251,7 @@ static const llc_conn_action_t llc_normal_actions_20b[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_normal_state_trans_20b = { +static const struct llc_conn_state_trans llc_normal_state_trans_20b = { .ev = llc_conn_ev_busy_tmr_exp, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = llc_normal_ev_qfyrs_20b, @@ -1270,7 +1270,7 @@ static const llc_conn_action_t llc_normal_actions_21[] = { [2] = NULL, }; -static struct llc_conn_state_trans llc_normal_state_trans_21 = { +static const struct llc_conn_state_trans llc_normal_state_trans_21 = { .ev = llc_conn_ev_tx_buffer_full, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_normal_ev_qfyrs_21, @@ -1281,7 +1281,7 @@ static struct llc_conn_state_trans llc_normal_state_trans_21 = { * Array of pointers; * one to each transition */ -static struct llc_conn_state_trans *llc_normal_state_transitions[] = { +static const struct llc_conn_state_trans *llc_normal_state_transitions[] = { [0] = &llc_normal_state_trans_1, /* Requests */ [1] = &llc_normal_state_trans_2, [2] = &llc_normal_state_trans_2_1, @@ -1354,7 +1354,7 @@ static const llc_conn_action_t llc_busy_actions_1[] = { [2] = NULL, }; -static struct llc_conn_state_trans llc_busy_state_trans_1 = { +static const struct llc_conn_state_trans llc_busy_state_trans_1 = { .ev = llc_conn_ev_data_req, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_1, @@ -1374,7 +1374,7 @@ static const llc_conn_action_t llc_busy_actions_2[] = { [2] = NULL, }; -static struct llc_conn_state_trans llc_busy_state_trans_2 = { +static const struct llc_conn_state_trans llc_busy_state_trans_2 = { .ev = llc_conn_ev_data_req, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_2, @@ -1391,7 +1391,7 @@ static const llc_conn_ev_qfyr_t llc_busy_ev_qfyrs_2_1[] = { /* just one member, NULL, .bss zeroes it */ static const llc_conn_action_t llc_busy_actions_2_1[1]; -static struct llc_conn_state_trans llc_busy_state_trans_2_1 = { +static const struct llc_conn_state_trans llc_busy_state_trans_2_1 = { .ev = llc_conn_ev_data_req, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_2_1, @@ -1411,7 +1411,7 @@ static const llc_conn_action_t llc_busy_actions_3[] = { [2] = NULL, }; -static struct llc_conn_state_trans llc_busy_state_trans_3 = { +static const struct llc_conn_state_trans llc_busy_state_trans_3 = { .ev = llc_conn_ev_local_busy_cleared, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_busy_ev_qfyrs_3, @@ -1431,7 +1431,7 @@ static const llc_conn_action_t llc_busy_actions_4[] = { [2] = NULL, }; -static struct llc_conn_state_trans llc_busy_state_trans_4 = { +static const struct llc_conn_state_trans llc_busy_state_trans_4 = { .ev = llc_conn_ev_local_busy_cleared, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_busy_ev_qfyrs_4, @@ -1450,7 +1450,7 @@ static const llc_conn_action_t llc_busy_actions_5[] = { [1] = NULL, }; -static struct llc_conn_state_trans llc_busy_state_trans_5 = { +static const struct llc_conn_state_trans llc_busy_state_trans_5 = { .ev = llc_conn_ev_local_busy_cleared, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_busy_ev_qfyrs_5, @@ -1469,7 +1469,7 @@ static const llc_conn_action_t llc_busy_actions_6[] = { [1] = NULL, }; -static struct llc_conn_state_trans llc_busy_state_trans_6 = { +static const struct llc_conn_state_trans llc_busy_state_trans_6 = { .ev = llc_conn_ev_local_busy_cleared, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_busy_ev_qfyrs_6, @@ -1488,7 +1488,7 @@ static const llc_conn_action_t llc_busy_actions_7[] = { [1] = NULL, }; -static struct llc_conn_state_trans llc_busy_state_trans_7 = { +static const struct llc_conn_state_trans llc_busy_state_trans_7 = { .ev = llc_conn_ev_local_busy_cleared, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_busy_ev_qfyrs_7, @@ -1507,7 +1507,7 @@ static const llc_conn_action_t llc_busy_actions_8[] = { [1] = NULL, }; -static struct llc_conn_state_trans llc_busy_state_trans_8 = { +static const struct llc_conn_state_trans llc_busy_state_trans_8 = { .ev = llc_conn_ev_local_busy_cleared, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_busy_ev_qfyrs_8, @@ -1529,7 +1529,7 @@ static const llc_conn_action_t llc_busy_actions_9a[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_busy_state_trans_9a = { +static const struct llc_conn_state_trans llc_busy_state_trans_9a = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_x_unexpd_ns, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_9a, @@ -1551,7 +1551,7 @@ static const llc_conn_action_t llc_busy_actions_9b[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_busy_state_trans_9b = { +static const struct llc_conn_state_trans llc_busy_state_trans_9b = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_9b, @@ -1571,7 +1571,7 @@ static const llc_conn_action_t llc_busy_actions_10a[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_busy_state_trans_10a = { +static const struct llc_conn_state_trans llc_busy_state_trans_10a = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_10a, @@ -1591,7 +1591,7 @@ static const llc_conn_action_t llc_busy_actions_10b[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_busy_state_trans_10b = { +static const struct llc_conn_state_trans llc_busy_state_trans_10b = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_10b, @@ -1606,7 +1606,7 @@ static const llc_conn_action_t llc_busy_actions_11[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_busy_state_trans_11 = { +static const struct llc_conn_state_trans llc_busy_state_trans_11 = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = NONE, @@ -1624,7 +1624,7 @@ static const llc_conn_action_t llc_busy_actions_12[] = { [6] = NULL, }; -static struct llc_conn_state_trans llc_busy_state_trans_12 = { +static const struct llc_conn_state_trans llc_busy_state_trans_12 = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = NONE, @@ -1649,7 +1649,7 @@ static const llc_conn_action_t llc_busy_actions_13a[] = { [8] = NULL, }; -static struct llc_conn_state_trans llc_busy_state_trans_13a = { +static const struct llc_conn_state_trans llc_busy_state_trans_13a = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_13a, @@ -1674,7 +1674,7 @@ static const llc_conn_action_t llc_busy_actions_13b[] = { [8] = NULL, }; -static struct llc_conn_state_trans llc_busy_state_trans_13b = { +static const struct llc_conn_state_trans llc_busy_state_trans_13b = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_13b, @@ -1697,7 +1697,7 @@ static const llc_conn_action_t llc_busy_actions_14a[] = { [6] = NULL, }; -static struct llc_conn_state_trans llc_busy_state_trans_14a = { +static const struct llc_conn_state_trans llc_busy_state_trans_14a = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_14a, @@ -1720,7 +1720,7 @@ static const llc_conn_action_t llc_busy_actions_14b[] = { [6] = NULL, }; -static struct llc_conn_state_trans llc_busy_state_trans_14b = { +static const struct llc_conn_state_trans llc_busy_state_trans_14b = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_14b, @@ -1735,7 +1735,7 @@ static const llc_conn_action_t llc_busy_actions_15a[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_busy_state_trans_15a = { +static const struct llc_conn_state_trans llc_busy_state_trans_15a = { .ev = llc_conn_ev_rx_rr_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = NONE, @@ -1750,7 +1750,7 @@ static const llc_conn_action_t llc_busy_actions_15b[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_busy_state_trans_15b = { +static const struct llc_conn_state_trans llc_busy_state_trans_15b = { .ev = llc_conn_ev_rx_rr_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = NONE, @@ -1770,7 +1770,7 @@ static const llc_conn_action_t llc_busy_actions_15c[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_busy_state_trans_15c = { +static const struct llc_conn_state_trans llc_busy_state_trans_15c = { .ev = llc_conn_ev_rx_rr_rsp_fbit_set_1, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_15c, @@ -1785,7 +1785,7 @@ static const llc_conn_action_t llc_busy_actions_16[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_busy_state_trans_16 = { +static const struct llc_conn_state_trans llc_busy_state_trans_16 = { .ev = llc_conn_ev_rx_rr_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = NONE, @@ -1800,7 +1800,7 @@ static const llc_conn_action_t llc_busy_actions_17a[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_busy_state_trans_17a = { +static const struct llc_conn_state_trans llc_busy_state_trans_17a = { .ev = llc_conn_ev_rx_rnr_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = NONE, @@ -1815,7 +1815,7 @@ static const llc_conn_action_t llc_busy_actions_17b[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_busy_state_trans_17b = { +static const struct llc_conn_state_trans llc_busy_state_trans_17b = { .ev = llc_conn_ev_rx_rnr_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = NONE, @@ -1835,7 +1835,7 @@ static const llc_conn_action_t llc_busy_actions_17c[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_busy_state_trans_17c = { +static const struct llc_conn_state_trans llc_busy_state_trans_17c = { .ev = llc_conn_ev_rx_rnr_rsp_fbit_set_1, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_17c, @@ -1850,7 +1850,7 @@ static const llc_conn_action_t llc_busy_actions_18[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_busy_state_trans_18 = { +static const struct llc_conn_state_trans llc_busy_state_trans_18 = { .ev = llc_conn_ev_rx_rnr_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = NONE, @@ -1872,7 +1872,7 @@ static const llc_conn_action_t llc_busy_actions_19a[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_busy_state_trans_19a = { +static const struct llc_conn_state_trans llc_busy_state_trans_19a = { .ev = llc_conn_ev_rx_rej_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_19a, @@ -1894,7 +1894,7 @@ static const llc_conn_action_t llc_busy_actions_19b[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_busy_state_trans_19b = { +static const struct llc_conn_state_trans llc_busy_state_trans_19b = { .ev = llc_conn_ev_rx_rej_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_19b, @@ -1915,7 +1915,7 @@ static const llc_conn_action_t llc_busy_actions_20a[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_busy_state_trans_20a = { +static const struct llc_conn_state_trans llc_busy_state_trans_20a = { .ev = llc_conn_ev_rx_rej_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_20a, @@ -1936,7 +1936,7 @@ static const llc_conn_action_t llc_busy_actions_20b[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_busy_state_trans_20b = { +static const struct llc_conn_state_trans llc_busy_state_trans_20b = { .ev = llc_conn_ev_rx_rej_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_20b, @@ -1953,7 +1953,7 @@ static const llc_conn_action_t llc_busy_actions_21[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_busy_state_trans_21 = { +static const struct llc_conn_state_trans llc_busy_state_trans_21 = { .ev = llc_conn_ev_rx_rej_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = NONE, @@ -1972,7 +1972,7 @@ static const llc_conn_action_t llc_busy_actions_22[] = { [2] = NULL, }; -static struct llc_conn_state_trans llc_busy_state_trans_22 = { +static const struct llc_conn_state_trans llc_busy_state_trans_22 = { .ev = llc_conn_ev_init_p_f_cycle, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_22, @@ -1993,7 +1993,7 @@ static const llc_conn_action_t llc_busy_actions_23[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_busy_state_trans_23 = { +static const struct llc_conn_state_trans llc_busy_state_trans_23 = { .ev = llc_conn_ev_p_tmr_exp, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_23, @@ -2015,7 +2015,7 @@ static const llc_conn_action_t llc_busy_actions_24a[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_busy_state_trans_24a = { +static const struct llc_conn_state_trans llc_busy_state_trans_24a = { .ev = llc_conn_ev_ack_tmr_exp, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_24a, @@ -2037,7 +2037,7 @@ static const llc_conn_action_t llc_busy_actions_24b[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_busy_state_trans_24b = { +static const struct llc_conn_state_trans llc_busy_state_trans_24b = { .ev = llc_conn_ev_busy_tmr_exp, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_24b, @@ -2060,7 +2060,7 @@ static const llc_conn_action_t llc_busy_actions_25[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_busy_state_trans_25 = { +static const struct llc_conn_state_trans llc_busy_state_trans_25 = { .ev = llc_conn_ev_rej_tmr_exp, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_25, @@ -2079,7 +2079,7 @@ static const llc_conn_action_t llc_busy_actions_26[] = { [1] = NULL, }; -static struct llc_conn_state_trans llc_busy_state_trans_26 = { +static const struct llc_conn_state_trans llc_busy_state_trans_26 = { .ev = llc_conn_ev_rej_tmr_exp, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_busy_ev_qfyrs_26, @@ -2090,7 +2090,7 @@ static struct llc_conn_state_trans llc_busy_state_trans_26 = { * Array of pointers; * one to each transition */ -static struct llc_conn_state_trans *llc_busy_state_transitions[] = { +static const struct llc_conn_state_trans *llc_busy_state_transitions[] = { [0] = &llc_common_state_trans_1, /* Request */ [1] = &llc_common_state_trans_2, [2] = &llc_busy_state_trans_1, @@ -2166,7 +2166,7 @@ static const llc_conn_action_t llc_reject_actions_1[] = { [1] = NULL, }; -static struct llc_conn_state_trans llc_reject_state_trans_1 = { +static const struct llc_conn_state_trans llc_reject_state_trans_1 = { .ev = llc_conn_ev_data_req, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_reject_ev_qfyrs_1, @@ -2185,7 +2185,7 @@ static const llc_conn_action_t llc_reject_actions_2[] = { [1] = NULL, }; -static struct llc_conn_state_trans llc_reject_state_trans_2 = { +static const struct llc_conn_state_trans llc_reject_state_trans_2 = { .ev = llc_conn_ev_data_req, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_reject_ev_qfyrs_2, @@ -2202,7 +2202,7 @@ static const llc_conn_ev_qfyr_t llc_reject_ev_qfyrs_2_1[] = { /* just one member, NULL, .bss zeroes it */ static const llc_conn_action_t llc_reject_actions_2_1[1]; -static struct llc_conn_state_trans llc_reject_state_trans_2_1 = { +static const struct llc_conn_state_trans llc_reject_state_trans_2_1 = { .ev = llc_conn_ev_data_req, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_reject_ev_qfyrs_2_1, @@ -2222,7 +2222,7 @@ static const llc_conn_action_t llc_reject_actions_3[] = { [2] = NULL, }; -static struct llc_conn_state_trans llc_reject_state_trans_3 = { +static const struct llc_conn_state_trans llc_reject_state_trans_3 = { .ev = llc_conn_ev_local_busy_detected, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_reject_ev_qfyrs_3, @@ -2241,7 +2241,7 @@ static const llc_conn_action_t llc_reject_actions_4[] = { [2] = NULL, }; -static struct llc_conn_state_trans llc_reject_state_trans_4 = { +static const struct llc_conn_state_trans llc_reject_state_trans_4 = { .ev = llc_conn_ev_local_busy_detected, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = llc_reject_ev_qfyrs_4, @@ -2256,7 +2256,7 @@ static const llc_conn_action_t llc_reject_actions_5a[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_reject_state_trans_5a = { +static const struct llc_conn_state_trans llc_reject_state_trans_5a = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = NONE, @@ -2271,7 +2271,7 @@ static const llc_conn_action_t llc_reject_actions_5b[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_reject_state_trans_5b = { +static const struct llc_conn_state_trans llc_reject_state_trans_5b = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = NONE, @@ -2291,7 +2291,7 @@ static const llc_conn_action_t llc_reject_actions_5c[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_reject_state_trans_5c = { +static const struct llc_conn_state_trans llc_reject_state_trans_5c = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_reject_ev_qfyrs_5c, @@ -2305,7 +2305,7 @@ static const llc_conn_action_t llc_reject_actions_6[] = { [2] = NULL, }; -static struct llc_conn_state_trans llc_reject_state_trans_6 = { +static const struct llc_conn_state_trans llc_reject_state_trans_6 = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = NONE, @@ -2330,7 +2330,7 @@ static const llc_conn_action_t llc_reject_actions_7a[] = { }; -static struct llc_conn_state_trans llc_reject_state_trans_7a = { +static const struct llc_conn_state_trans llc_reject_state_trans_7a = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_reject_ev_qfyrs_7a, @@ -2354,7 +2354,7 @@ static const llc_conn_action_t llc_reject_actions_7b[] = { [7] = NULL, }; -static struct llc_conn_state_trans llc_reject_state_trans_7b = { +static const struct llc_conn_state_trans llc_reject_state_trans_7b = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_reject_ev_qfyrs_7b, @@ -2376,7 +2376,7 @@ static const llc_conn_action_t llc_reject_actions_8a[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_reject_state_trans_8a = { +static const struct llc_conn_state_trans llc_reject_state_trans_8a = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_reject_ev_qfyrs_8a, @@ -2398,7 +2398,7 @@ static const llc_conn_action_t llc_reject_actions_8b[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_reject_state_trans_8b = { +static const struct llc_conn_state_trans llc_reject_state_trans_8b = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_reject_ev_qfyrs_8b, @@ -2415,7 +2415,7 @@ static const llc_conn_action_t llc_reject_actions_9[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_reject_state_trans_9 = { +static const struct llc_conn_state_trans llc_reject_state_trans_9 = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = NONE, @@ -2430,7 +2430,7 @@ static const llc_conn_action_t llc_reject_actions_10a[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_reject_state_trans_10a = { +static const struct llc_conn_state_trans llc_reject_state_trans_10a = { .ev = llc_conn_ev_rx_rr_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = NONE, @@ -2445,7 +2445,7 @@ static const llc_conn_action_t llc_reject_actions_10b[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_reject_state_trans_10b = { +static const struct llc_conn_state_trans llc_reject_state_trans_10b = { .ev = llc_conn_ev_rx_rr_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = NONE, @@ -2465,7 +2465,7 @@ static const llc_conn_action_t llc_reject_actions_10c[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_reject_state_trans_10c = { +static const struct llc_conn_state_trans llc_reject_state_trans_10c = { .ev = llc_conn_ev_rx_rr_rsp_fbit_set_1, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_reject_ev_qfyrs_10c, @@ -2480,7 +2480,7 @@ static const llc_conn_action_t llc_reject_actions_11[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_reject_state_trans_11 = { +static const struct llc_conn_state_trans llc_reject_state_trans_11 = { .ev = llc_conn_ev_rx_rr_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = NONE, @@ -2495,7 +2495,7 @@ static const llc_conn_action_t llc_reject_actions_12a[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_reject_state_trans_12a = { +static const struct llc_conn_state_trans llc_reject_state_trans_12a = { .ev = llc_conn_ev_rx_rnr_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = NONE, @@ -2510,7 +2510,7 @@ static const llc_conn_action_t llc_reject_actions_12b[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_reject_state_trans_12b = { +static const struct llc_conn_state_trans llc_reject_state_trans_12b = { .ev = llc_conn_ev_rx_rnr_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = NONE, @@ -2530,7 +2530,7 @@ static const llc_conn_action_t llc_reject_actions_12c[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_reject_state_trans_12c = { +static const struct llc_conn_state_trans llc_reject_state_trans_12c = { .ev = llc_conn_ev_rx_rnr_rsp_fbit_set_1, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_reject_ev_qfyrs_12c, @@ -2545,7 +2545,7 @@ static const llc_conn_action_t llc_reject_actions_13[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_reject_state_trans_13 = { +static const struct llc_conn_state_trans llc_reject_state_trans_13 = { .ev = llc_conn_ev_rx_rnr_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = NONE, @@ -2567,7 +2567,7 @@ static const llc_conn_action_t llc_reject_actions_14a[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_reject_state_trans_14a = { +static const struct llc_conn_state_trans llc_reject_state_trans_14a = { .ev = llc_conn_ev_rx_rej_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_reject_ev_qfyrs_14a, @@ -2589,7 +2589,7 @@ static const llc_conn_action_t llc_reject_actions_14b[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_reject_state_trans_14b = { +static const struct llc_conn_state_trans llc_reject_state_trans_14b = { .ev = llc_conn_ev_rx_rej_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_reject_ev_qfyrs_14b, @@ -2610,7 +2610,7 @@ static const llc_conn_action_t llc_reject_actions_15a[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_reject_state_trans_15a = { +static const struct llc_conn_state_trans llc_reject_state_trans_15a = { .ev = llc_conn_ev_rx_rej_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_reject_ev_qfyrs_15a, @@ -2631,7 +2631,7 @@ static const llc_conn_action_t llc_reject_actions_15b[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_reject_state_trans_15b = { +static const struct llc_conn_state_trans llc_reject_state_trans_15b = { .ev = llc_conn_ev_rx_rej_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_reject_ev_qfyrs_15b, @@ -2647,7 +2647,7 @@ static const llc_conn_action_t llc_reject_actions_16[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_reject_state_trans_16 = { +static const struct llc_conn_state_trans llc_reject_state_trans_16 = { .ev = llc_conn_ev_rx_rej_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = NONE, @@ -2666,7 +2666,7 @@ static const llc_conn_action_t llc_reject_actions_17[] = { [2] = NULL, }; -static struct llc_conn_state_trans llc_reject_state_trans_17 = { +static const struct llc_conn_state_trans llc_reject_state_trans_17 = { .ev = llc_conn_ev_init_p_f_cycle, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_reject_ev_qfyrs_17, @@ -2688,7 +2688,7 @@ static const llc_conn_action_t llc_reject_actions_18[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_reject_state_trans_18 = { +static const struct llc_conn_state_trans llc_reject_state_trans_18 = { .ev = llc_conn_ev_rej_tmr_exp, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = llc_reject_ev_qfyrs_18, @@ -2710,7 +2710,7 @@ static const llc_conn_action_t llc_reject_actions_19[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_reject_state_trans_19 = { +static const struct llc_conn_state_trans llc_reject_state_trans_19 = { .ev = llc_conn_ev_p_tmr_exp, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = llc_reject_ev_qfyrs_19, @@ -2733,7 +2733,7 @@ static const llc_conn_action_t llc_reject_actions_20a[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_reject_state_trans_20a = { +static const struct llc_conn_state_trans llc_reject_state_trans_20a = { .ev = llc_conn_ev_ack_tmr_exp, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = llc_reject_ev_qfyrs_20a, @@ -2756,7 +2756,7 @@ static const llc_conn_action_t llc_reject_actions_20b[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_reject_state_trans_20b = { +static const struct llc_conn_state_trans llc_reject_state_trans_20b = { .ev = llc_conn_ev_busy_tmr_exp, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = llc_reject_ev_qfyrs_20b, @@ -2767,7 +2767,7 @@ static struct llc_conn_state_trans llc_reject_state_trans_20b = { * Array of pointers; * one to each transition */ -static struct llc_conn_state_trans *llc_reject_state_transitions[] = { +static const struct llc_conn_state_trans *llc_reject_state_transitions[] = { [0] = &llc_common_state_trans_1, /* Request */ [1] = &llc_common_state_trans_2, [2] = &llc_common_state_trans_end, @@ -2834,7 +2834,7 @@ static const llc_conn_ev_qfyr_t llc_await_ev_qfyrs_1_0[] = { /* just one member, NULL, .bss zeroes it */ static const llc_conn_action_t llc_await_actions_1_0[1]; -static struct llc_conn_state_trans llc_await_state_trans_1_0 = { +static const struct llc_conn_state_trans llc_await_state_trans_1_0 = { .ev = llc_conn_ev_data_req, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = llc_await_ev_qfyrs_1_0, @@ -2848,7 +2848,7 @@ static const llc_conn_action_t llc_await_actions_1[] = { [2] = NULL, }; -static struct llc_conn_state_trans llc_await_state_trans_1 = { +static const struct llc_conn_state_trans llc_await_state_trans_1 = { .ev = llc_conn_ev_local_busy_detected, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = NONE, @@ -2867,7 +2867,7 @@ static const llc_conn_action_t llc_await_actions_2[] = { [7] = NULL, }; -static struct llc_conn_state_trans llc_await_state_trans_2 = { +static const struct llc_conn_state_trans llc_await_state_trans_2 = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = NONE, @@ -2883,7 +2883,7 @@ static const llc_conn_action_t llc_await_actions_3a[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_await_state_trans_3a = { +static const struct llc_conn_state_trans llc_await_state_trans_3a = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = NONE, @@ -2899,7 +2899,7 @@ static const llc_conn_action_t llc_await_actions_3b[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_await_state_trans_3b = { +static const struct llc_conn_state_trans llc_await_state_trans_3b = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = NONE, @@ -2916,7 +2916,7 @@ static const llc_conn_action_t llc_await_actions_4[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_await_state_trans_4 = { +static const struct llc_conn_state_trans llc_await_state_trans_4 = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = NONE, @@ -2935,7 +2935,7 @@ static const llc_conn_action_t llc_await_actions_5[] = { [7] = NULL, }; -static struct llc_conn_state_trans llc_await_state_trans_5 = { +static const struct llc_conn_state_trans llc_await_state_trans_5 = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_1, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = NONE, @@ -2952,7 +2952,7 @@ static const llc_conn_action_t llc_await_actions_6a[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_await_state_trans_6a = { +static const struct llc_conn_state_trans llc_await_state_trans_6a = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = NONE, @@ -2969,7 +2969,7 @@ static const llc_conn_action_t llc_await_actions_6b[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_await_state_trans_6b = { +static const struct llc_conn_state_trans llc_await_state_trans_6b = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = NONE, @@ -2986,7 +2986,7 @@ static const llc_conn_action_t llc_await_actions_7[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_await_state_trans_7 = { +static const struct llc_conn_state_trans llc_await_state_trans_7 = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = NONE, @@ -3003,7 +3003,7 @@ static const llc_conn_action_t llc_await_actions_8a[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_await_state_trans_8a = { +static const struct llc_conn_state_trans llc_await_state_trans_8a = { .ev = llc_conn_ev_rx_rr_rsp_fbit_set_1, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = NONE, @@ -3020,7 +3020,7 @@ static const llc_conn_action_t llc_await_actions_8b[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_await_state_trans_8b = { +static const struct llc_conn_state_trans llc_await_state_trans_8b = { .ev = llc_conn_ev_rx_rej_rsp_fbit_set_1, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = NONE, @@ -3035,7 +3035,7 @@ static const llc_conn_action_t llc_await_actions_9a[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_await_state_trans_9a = { +static const struct llc_conn_state_trans llc_await_state_trans_9a = { .ev = llc_conn_ev_rx_rr_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = NONE, @@ -3050,7 +3050,7 @@ static const llc_conn_action_t llc_await_actions_9b[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_await_state_trans_9b = { +static const struct llc_conn_state_trans llc_await_state_trans_9b = { .ev = llc_conn_ev_rx_rr_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = NONE, @@ -3065,7 +3065,7 @@ static const llc_conn_action_t llc_await_actions_9c[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_await_state_trans_9c = { +static const struct llc_conn_state_trans llc_await_state_trans_9c = { .ev = llc_conn_ev_rx_rej_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = NONE, @@ -3080,7 +3080,7 @@ static const llc_conn_action_t llc_await_actions_9d[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_await_state_trans_9d = { +static const struct llc_conn_state_trans llc_await_state_trans_9d = { .ev = llc_conn_ev_rx_rej_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = NONE, @@ -3096,7 +3096,7 @@ static const llc_conn_action_t llc_await_actions_10a[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_await_state_trans_10a = { +static const struct llc_conn_state_trans llc_await_state_trans_10a = { .ev = llc_conn_ev_rx_rr_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = NONE, @@ -3112,7 +3112,7 @@ static const llc_conn_action_t llc_await_actions_10b[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_await_state_trans_10b = { +static const struct llc_conn_state_trans llc_await_state_trans_10b = { .ev = llc_conn_ev_rx_rej_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = NONE, @@ -3128,7 +3128,7 @@ static const llc_conn_action_t llc_await_actions_11[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_await_state_trans_11 = { +static const struct llc_conn_state_trans llc_await_state_trans_11 = { .ev = llc_conn_ev_rx_rnr_rsp_fbit_set_1, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = NONE, @@ -3143,7 +3143,7 @@ static const llc_conn_action_t llc_await_actions_12a[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_await_state_trans_12a = { +static const struct llc_conn_state_trans llc_await_state_trans_12a = { .ev = llc_conn_ev_rx_rnr_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = NONE, @@ -3158,7 +3158,7 @@ static const llc_conn_action_t llc_await_actions_12b[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_await_state_trans_12b = { +static const struct llc_conn_state_trans llc_await_state_trans_12b = { .ev = llc_conn_ev_rx_rnr_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = NONE, @@ -3174,7 +3174,7 @@ static const llc_conn_action_t llc_await_actions_13[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_await_state_trans_13 = { +static const struct llc_conn_state_trans llc_await_state_trans_13 = { .ev = llc_conn_ev_rx_rnr_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = NONE, @@ -3194,7 +3194,7 @@ static const llc_conn_action_t llc_await_actions_14[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_await_state_trans_14 = { +static const struct llc_conn_state_trans llc_await_state_trans_14 = { .ev = llc_conn_ev_p_tmr_exp, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = llc_await_ev_qfyrs_14, @@ -3205,7 +3205,7 @@ static struct llc_conn_state_trans llc_await_state_trans_14 = { * Array of pointers; * one to each transition */ -static struct llc_conn_state_trans *llc_await_state_transitions[] = { +static const struct llc_conn_state_trans *llc_await_state_transitions[] = { [0] = &llc_common_state_trans_1, /* Request */ [1] = &llc_common_state_trans_2, [2] = &llc_await_state_trans_1_0, @@ -3263,7 +3263,7 @@ static const llc_conn_ev_qfyr_t llc_await_busy_ev_qfyrs_1_0[] = { /* just one member, NULL, .bss zeroes it */ static const llc_conn_action_t llc_await_busy_actions_1_0[1]; -static struct llc_conn_state_trans llc_await_busy_state_trans_1_0 = { +static const struct llc_conn_state_trans llc_await_busy_state_trans_1_0 = { .ev = llc_conn_ev_data_req, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = llc_await_busy_ev_qfyrs_1_0, @@ -3282,7 +3282,7 @@ static const llc_conn_action_t llc_await_busy_actions_1[] = { [2] = NULL, }; -static struct llc_conn_state_trans llc_await_busy_state_trans_1 = { +static const struct llc_conn_state_trans llc_await_busy_state_trans_1 = { .ev = llc_conn_ev_local_busy_cleared, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = llc_await_busy_ev_qfyrs_1, @@ -3300,7 +3300,7 @@ static const llc_conn_action_t llc_await_busy_actions_2[] = { [1] = NULL, }; -static struct llc_conn_state_trans llc_await_busy_state_trans_2 = { +static const struct llc_conn_state_trans llc_await_busy_state_trans_2 = { .ev = llc_conn_ev_local_busy_cleared, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = llc_await_busy_ev_qfyrs_2, @@ -3318,7 +3318,7 @@ static const llc_conn_action_t llc_await_busy_actions_3[] = { [1] = NULL, }; -static struct llc_conn_state_trans llc_await_busy_state_trans_3 = { +static const struct llc_conn_state_trans llc_await_busy_state_trans_3 = { .ev = llc_conn_ev_local_busy_cleared, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = llc_await_busy_ev_qfyrs_3, @@ -3337,7 +3337,7 @@ static const llc_conn_action_t llc_await_busy_actions_4[] = { [7] = NULL, }; -static struct llc_conn_state_trans llc_await_busy_state_trans_4 = { +static const struct llc_conn_state_trans llc_await_busy_state_trans_4 = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = NONE, @@ -3353,7 +3353,7 @@ static const llc_conn_action_t llc_await_busy_actions_5a[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_await_busy_state_trans_5a = { +static const struct llc_conn_state_trans llc_await_busy_state_trans_5a = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = NONE, @@ -3369,7 +3369,7 @@ static const llc_conn_action_t llc_await_busy_actions_5b[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_await_busy_state_trans_5b = { +static const struct llc_conn_state_trans llc_await_busy_state_trans_5b = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = NONE, @@ -3385,7 +3385,7 @@ static const llc_conn_action_t llc_await_busy_actions_6[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_await_busy_state_trans_6 = { +static const struct llc_conn_state_trans llc_await_busy_state_trans_6 = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = NONE, @@ -3406,7 +3406,7 @@ static const llc_conn_action_t llc_await_busy_actions_7[] = { [9] = NULL, }; -static struct llc_conn_state_trans llc_await_busy_state_trans_7 = { +static const struct llc_conn_state_trans llc_await_busy_state_trans_7 = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_1, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = NONE, @@ -3424,7 +3424,7 @@ static const llc_conn_action_t llc_await_busy_actions_8a[] = { [6] = NULL, }; -static struct llc_conn_state_trans llc_await_busy_state_trans_8a = { +static const struct llc_conn_state_trans llc_await_busy_state_trans_8a = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = NONE, @@ -3442,7 +3442,7 @@ static const llc_conn_action_t llc_await_busy_actions_8b[] = { [6] = NULL, }; -static struct llc_conn_state_trans llc_await_busy_state_trans_8b = { +static const struct llc_conn_state_trans llc_await_busy_state_trans_8b = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = NONE, @@ -3460,7 +3460,7 @@ static const llc_conn_action_t llc_await_busy_actions_9[] = { [6] = NULL, }; -static struct llc_conn_state_trans llc_await_busy_state_trans_9 = { +static const struct llc_conn_state_trans llc_await_busy_state_trans_9 = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = NONE, @@ -3477,7 +3477,7 @@ static const llc_conn_action_t llc_await_busy_actions_10a[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_await_busy_state_trans_10a = { +static const struct llc_conn_state_trans llc_await_busy_state_trans_10a = { .ev = llc_conn_ev_rx_rr_rsp_fbit_set_1, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = NONE, @@ -3494,7 +3494,7 @@ static const llc_conn_action_t llc_await_busy_actions_10b[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_await_busy_state_trans_10b = { +static const struct llc_conn_state_trans llc_await_busy_state_trans_10b = { .ev = llc_conn_ev_rx_rej_rsp_fbit_set_1, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = NONE, @@ -3509,7 +3509,7 @@ static const llc_conn_action_t llc_await_busy_actions_11a[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_await_busy_state_trans_11a = { +static const struct llc_conn_state_trans llc_await_busy_state_trans_11a = { .ev = llc_conn_ev_rx_rr_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = NONE, @@ -3524,7 +3524,7 @@ static const llc_conn_action_t llc_await_busy_actions_11b[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_await_busy_state_trans_11b = { +static const struct llc_conn_state_trans llc_await_busy_state_trans_11b = { .ev = llc_conn_ev_rx_rr_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = NONE, @@ -3539,7 +3539,7 @@ static const llc_conn_action_t llc_await_busy_actions_11c[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_await_busy_state_trans_11c = { +static const struct llc_conn_state_trans llc_await_busy_state_trans_11c = { .ev = llc_conn_ev_rx_rej_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = NONE, @@ -3554,7 +3554,7 @@ static const llc_conn_action_t llc_await_busy_actions_11d[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_await_busy_state_trans_11d = { +static const struct llc_conn_state_trans llc_await_busy_state_trans_11d = { .ev = llc_conn_ev_rx_rej_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = NONE, @@ -3570,7 +3570,7 @@ static const llc_conn_action_t llc_await_busy_actions_12a[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_await_busy_state_trans_12a = { +static const struct llc_conn_state_trans llc_await_busy_state_trans_12a = { .ev = llc_conn_ev_rx_rr_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = NONE, @@ -3586,7 +3586,7 @@ static const llc_conn_action_t llc_await_busy_actions_12b[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_await_busy_state_trans_12b = { +static const struct llc_conn_state_trans llc_await_busy_state_trans_12b = { .ev = llc_conn_ev_rx_rej_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = NONE, @@ -3602,7 +3602,7 @@ static const llc_conn_action_t llc_await_busy_actions_13[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_await_busy_state_trans_13 = { +static const struct llc_conn_state_trans llc_await_busy_state_trans_13 = { .ev = llc_conn_ev_rx_rnr_rsp_fbit_set_1, .next_state = LLC_CONN_STATE_BUSY, .ev_qualifiers = NONE, @@ -3617,7 +3617,7 @@ static const llc_conn_action_t llc_await_busy_actions_14a[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_await_busy_state_trans_14a = { +static const struct llc_conn_state_trans llc_await_busy_state_trans_14a = { .ev = llc_conn_ev_rx_rnr_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = NONE, @@ -3632,7 +3632,7 @@ static const llc_conn_action_t llc_await_busy_actions_14b[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_await_busy_state_trans_14b = { +static const struct llc_conn_state_trans llc_await_busy_state_trans_14b = { .ev = llc_conn_ev_rx_rnr_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = NONE, @@ -3648,7 +3648,7 @@ static const llc_conn_action_t llc_await_busy_actions_15[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_await_busy_state_trans_15 = { +static const struct llc_conn_state_trans llc_await_busy_state_trans_15 = { .ev = llc_conn_ev_rx_rnr_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = NONE, @@ -3668,7 +3668,7 @@ static const llc_conn_action_t llc_await_busy_actions_16[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_await_busy_state_trans_16 = { +static const struct llc_conn_state_trans llc_await_busy_state_trans_16 = { .ev = llc_conn_ev_p_tmr_exp, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = llc_await_busy_ev_qfyrs_16, @@ -3679,7 +3679,7 @@ static struct llc_conn_state_trans llc_await_busy_state_trans_16 = { * Array of pointers; * one to each transition */ -static struct llc_conn_state_trans *llc_await_busy_state_transitions[] = { +static const struct llc_conn_state_trans *llc_await_busy_state_transitions[] = { [0] = &llc_common_state_trans_1, /* Request */ [1] = &llc_common_state_trans_2, [2] = &llc_await_busy_state_trans_1_0, @@ -3739,7 +3739,7 @@ static const llc_conn_ev_qfyr_t llc_await_reject_ev_qfyrs_1_0[] = { /* just one member, NULL, .bss zeroes it */ static const llc_conn_action_t llc_await_reject_actions_1_0[1]; -static struct llc_conn_state_trans llc_await_reject_state_trans_1_0 = { +static const struct llc_conn_state_trans llc_await_reject_state_trans_1_0 = { .ev = llc_conn_ev_data_req, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = llc_await_reject_ev_qfyrs_1_0, @@ -3753,7 +3753,7 @@ static const llc_conn_action_t llc_await_rejct_actions_1[] = { [2] = NULL }; -static struct llc_conn_state_trans llc_await_rejct_state_trans_1 = { +static const struct llc_conn_state_trans llc_await_rejct_state_trans_1 = { .ev = llc_conn_ev_local_busy_detected, .next_state = LLC_CONN_STATE_AWAIT_BUSY, .ev_qualifiers = NONE, @@ -3767,7 +3767,7 @@ static const llc_conn_action_t llc_await_rejct_actions_2a[] = { [2] = NULL }; -static struct llc_conn_state_trans llc_await_rejct_state_trans_2a = { +static const struct llc_conn_state_trans llc_await_rejct_state_trans_2a = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_0_unexpd_ns, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = NONE, @@ -3781,7 +3781,7 @@ static const llc_conn_action_t llc_await_rejct_actions_2b[] = { [2] = NULL }; -static struct llc_conn_state_trans llc_await_rejct_state_trans_2b = { +static const struct llc_conn_state_trans llc_await_rejct_state_trans_2b = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_0_unexpd_ns, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = NONE, @@ -3796,7 +3796,7 @@ static const llc_conn_action_t llc_await_rejct_actions_3[] = { [3] = NULL }; -static struct llc_conn_state_trans llc_await_rejct_state_trans_3 = { +static const struct llc_conn_state_trans llc_await_rejct_state_trans_3 = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_1_unexpd_ns, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = NONE, @@ -3816,7 +3816,7 @@ static const llc_conn_action_t llc_await_rejct_actions_4[] = { [8] = NULL, }; -static struct llc_conn_state_trans llc_await_rejct_state_trans_4 = { +static const struct llc_conn_state_trans llc_await_rejct_state_trans_4 = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_1, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = NONE, @@ -3834,7 +3834,7 @@ static const llc_conn_action_t llc_await_rejct_actions_5a[] = { [6] = NULL, }; -static struct llc_conn_state_trans llc_await_rejct_state_trans_5a = { +static const struct llc_conn_state_trans llc_await_rejct_state_trans_5a = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = NONE, @@ -3852,7 +3852,7 @@ static const llc_conn_action_t llc_await_rejct_actions_5b[] = { [6] = NULL, }; -static struct llc_conn_state_trans llc_await_rejct_state_trans_5b = { +static const struct llc_conn_state_trans llc_await_rejct_state_trans_5b = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = NONE, @@ -3870,7 +3870,7 @@ static const llc_conn_action_t llc_await_rejct_actions_6[] = { [6] = NULL, }; -static struct llc_conn_state_trans llc_await_rejct_state_trans_6 = { +static const struct llc_conn_state_trans llc_await_rejct_state_trans_6 = { .ev = llc_conn_ev_rx_i_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_AWAIT, .ev_qualifiers = NONE, @@ -3887,7 +3887,7 @@ static const llc_conn_action_t llc_await_rejct_actions_7a[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_await_rejct_state_trans_7a = { +static const struct llc_conn_state_trans llc_await_rejct_state_trans_7a = { .ev = llc_conn_ev_rx_rr_rsp_fbit_set_1, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = NONE, @@ -3904,7 +3904,7 @@ static const llc_conn_action_t llc_await_rejct_actions_7b[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_await_rejct_state_trans_7b = { +static const struct llc_conn_state_trans llc_await_rejct_state_trans_7b = { .ev = llc_conn_ev_rx_rej_rsp_fbit_set_1, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = NONE, @@ -3921,7 +3921,7 @@ static const llc_conn_action_t llc_await_rejct_actions_7c[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_await_rejct_state_trans_7c = { +static const struct llc_conn_state_trans llc_await_rejct_state_trans_7c = { .ev = llc_conn_ev_rx_i_rsp_fbit_set_1_unexpd_ns, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = NONE, @@ -3936,7 +3936,7 @@ static const llc_conn_action_t llc_await_rejct_actions_8a[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_await_rejct_state_trans_8a = { +static const struct llc_conn_state_trans llc_await_rejct_state_trans_8a = { .ev = llc_conn_ev_rx_rr_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = NONE, @@ -3951,7 +3951,7 @@ static const llc_conn_action_t llc_await_rejct_actions_8b[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_await_rejct_state_trans_8b = { +static const struct llc_conn_state_trans llc_await_rejct_state_trans_8b = { .ev = llc_conn_ev_rx_rr_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = NONE, @@ -3966,7 +3966,7 @@ static const llc_conn_action_t llc_await_rejct_actions_8c[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_await_rejct_state_trans_8c = { +static const struct llc_conn_state_trans llc_await_rejct_state_trans_8c = { .ev = llc_conn_ev_rx_rej_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = NONE, @@ -3981,7 +3981,7 @@ static const llc_conn_action_t llc_await_rejct_actions_8d[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_await_rejct_state_trans_8d = { +static const struct llc_conn_state_trans llc_await_rejct_state_trans_8d = { .ev = llc_conn_ev_rx_rej_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = NONE, @@ -3997,7 +3997,7 @@ static const llc_conn_action_t llc_await_rejct_actions_9a[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_await_rejct_state_trans_9a = { +static const struct llc_conn_state_trans llc_await_rejct_state_trans_9a = { .ev = llc_conn_ev_rx_rr_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = NONE, @@ -4013,7 +4013,7 @@ static const llc_conn_action_t llc_await_rejct_actions_9b[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_await_rejct_state_trans_9b = { +static const struct llc_conn_state_trans llc_await_rejct_state_trans_9b = { .ev = llc_conn_ev_rx_rej_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = NONE, @@ -4029,7 +4029,7 @@ static const llc_conn_action_t llc_await_rejct_actions_10[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_await_rejct_state_trans_10 = { +static const struct llc_conn_state_trans llc_await_rejct_state_trans_10 = { .ev = llc_conn_ev_rx_rnr_rsp_fbit_set_1, .next_state = LLC_CONN_STATE_REJ, .ev_qualifiers = NONE, @@ -4044,7 +4044,7 @@ static const llc_conn_action_t llc_await_rejct_actions_11a[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_await_rejct_state_trans_11a = { +static const struct llc_conn_state_trans llc_await_rejct_state_trans_11a = { .ev = llc_conn_ev_rx_rnr_cmd_pbit_set_0, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = NONE, @@ -4059,7 +4059,7 @@ static const llc_conn_action_t llc_await_rejct_actions_11b[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_await_rejct_state_trans_11b = { +static const struct llc_conn_state_trans llc_await_rejct_state_trans_11b = { .ev = llc_conn_ev_rx_rnr_rsp_fbit_set_0, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = NONE, @@ -4075,7 +4075,7 @@ static const llc_conn_action_t llc_await_rejct_actions_12[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_await_rejct_state_trans_12 = { +static const struct llc_conn_state_trans llc_await_rejct_state_trans_12 = { .ev = llc_conn_ev_rx_rnr_cmd_pbit_set_1, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = NONE, @@ -4095,7 +4095,7 @@ static const llc_conn_action_t llc_await_rejct_actions_13[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_await_rejct_state_trans_13 = { +static const struct llc_conn_state_trans llc_await_rejct_state_trans_13 = { .ev = llc_conn_ev_p_tmr_exp, .next_state = LLC_CONN_STATE_AWAIT_REJ, .ev_qualifiers = llc_await_rejct_ev_qfyrs_13, @@ -4106,7 +4106,7 @@ static struct llc_conn_state_trans llc_await_rejct_state_trans_13 = { * Array of pointers; * one to each transition */ -static struct llc_conn_state_trans *llc_await_rejct_state_transitions[] = { +static const struct llc_conn_state_trans *llc_await_rejct_state_transitions[] = { [0] = &llc_await_reject_state_trans_1_0, [1] = &llc_common_state_trans_1, /* requests */ [2] = &llc_common_state_trans_2, @@ -4171,7 +4171,7 @@ static const llc_conn_action_t llc_d_conn_actions_1[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_d_conn_state_trans_1 = { +static const struct llc_conn_state_trans llc_d_conn_state_trans_1 = { .ev = llc_conn_ev_rx_sabme_cmd_pbit_set_x, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = llc_d_conn_ev_qfyrs_1, @@ -4194,7 +4194,7 @@ static const llc_conn_action_t llc_d_conn_actions_1_1[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_d_conn_state_trans_1_1 = { +static const struct llc_conn_state_trans llc_d_conn_state_trans_1_1 = { .ev = llc_conn_ev_rx_sabme_cmd_pbit_set_x, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = llc_d_conn_ev_qfyrs_1_1, @@ -4218,7 +4218,7 @@ static const llc_conn_action_t llc_d_conn_actions_2[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_d_conn_state_trans_2 = { +static const struct llc_conn_state_trans llc_d_conn_state_trans_2 = { .ev = llc_conn_ev_rx_ua_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = llc_d_conn_ev_qfyrs_2, @@ -4241,7 +4241,7 @@ static const llc_conn_action_t llc_d_conn_actions_2_1[] = { [2] = NULL, }; -static struct llc_conn_state_trans llc_d_conn_state_trans_2_1 = { +static const struct llc_conn_state_trans llc_d_conn_state_trans_2_1 = { .ev = llc_conn_ev_rx_ua_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = llc_d_conn_ev_qfyrs_2_1, @@ -4254,7 +4254,7 @@ static const llc_conn_action_t llc_d_conn_actions_3[] = { [1] = NULL, }; -static struct llc_conn_state_trans llc_d_conn_state_trans_3 = { +static const struct llc_conn_state_trans llc_d_conn_state_trans_3 = { .ev = llc_conn_ev_rx_disc_cmd_pbit_set_x, .next_state = LLC_CONN_STATE_D_CONN, .ev_qualifiers = NONE, @@ -4277,7 +4277,7 @@ static const llc_conn_action_t llc_d_conn_actions_4[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_d_conn_state_trans_4 = { +static const struct llc_conn_state_trans llc_d_conn_state_trans_4 = { .ev = llc_conn_ev_rx_dm_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = llc_d_conn_ev_qfyrs_4, @@ -4299,7 +4299,7 @@ static const llc_conn_action_t llc_d_conn_actions_4_1[] = { [2] = NULL, }; -static struct llc_conn_state_trans llc_d_conn_state_trans_4_1 = { +static const struct llc_conn_state_trans llc_d_conn_state_trans_4_1 = { .ev = llc_conn_ev_rx_dm_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = llc_d_conn_ev_qfyrs_4_1, @@ -4318,7 +4318,7 @@ static const llc_conn_ev_qfyr_t llc_d_conn_ev_qfyrs_5[] = { /* just one member, NULL, .bss zeroes it */ static const llc_conn_action_t llc_d_conn_actions_5[1]; -static struct llc_conn_state_trans llc_d_conn_state_trans_5 = { +static const struct llc_conn_state_trans llc_d_conn_state_trans_5 = { .ev = llc_conn_ev_data_req, .next_state = LLC_CONN_STATE_D_CONN, .ev_qualifiers = llc_d_conn_ev_qfyrs_5, @@ -4338,7 +4338,7 @@ static const llc_conn_action_t llc_d_conn_actions_6[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_d_conn_state_trans_6 = { +static const struct llc_conn_state_trans llc_d_conn_state_trans_6 = { .ev = llc_conn_ev_ack_tmr_exp, .next_state = LLC_CONN_STATE_D_CONN, .ev_qualifiers = llc_d_conn_ev_qfyrs_6, @@ -4359,7 +4359,7 @@ static const llc_conn_action_t llc_d_conn_actions_7[] = { [2] = NULL, }; -static struct llc_conn_state_trans llc_d_conn_state_trans_7 = { +static const struct llc_conn_state_trans llc_d_conn_state_trans_7 = { .ev = llc_conn_ev_ack_tmr_exp, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = llc_d_conn_ev_qfyrs_7, @@ -4379,7 +4379,7 @@ static const llc_conn_action_t llc_d_conn_actions_8[] = { [1] = NULL, }; -static struct llc_conn_state_trans llc_d_conn_state_trans_8 = { +static const struct llc_conn_state_trans llc_d_conn_state_trans_8 = { .ev = llc_conn_ev_ack_tmr_exp, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = llc_d_conn_ev_qfyrs_8, @@ -4390,7 +4390,7 @@ static struct llc_conn_state_trans llc_d_conn_state_trans_8 = { * Array of pointers; * one to each transition */ -static struct llc_conn_state_trans *llc_d_conn_state_transitions[] = { +static const struct llc_conn_state_trans *llc_d_conn_state_transitions[] = { [0] = &llc_d_conn_state_trans_5, /* Request */ [1] = &llc_common_state_trans_end, [2] = &llc_common_state_trans_end, /* Local busy */ @@ -4419,7 +4419,7 @@ static const llc_conn_action_t llc_rst_actions_1[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_rst_state_trans_1 = { +static const struct llc_conn_state_trans llc_rst_state_trans_1 = { .ev = llc_conn_ev_rx_sabme_cmd_pbit_set_x, .next_state = LLC_CONN_STATE_RESET, .ev_qualifiers = NONE, @@ -4447,7 +4447,7 @@ static const llc_conn_action_t llc_rst_actions_2[] = { [7] = NULL, }; -static struct llc_conn_state_trans llc_rst_state_trans_2 = { +static const struct llc_conn_state_trans llc_rst_state_trans_2 = { .ev = llc_conn_ev_rx_ua_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_rst_ev_qfyrs_2, @@ -4475,7 +4475,7 @@ static const llc_conn_action_t llc_rst_actions_2_1[] = { [7] = NULL, }; -static struct llc_conn_state_trans llc_rst_state_trans_2_1 = { +static const struct llc_conn_state_trans llc_rst_state_trans_2_1 = { .ev = llc_conn_ev_rx_ua_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_rst_ev_qfyrs_2_1, @@ -4495,7 +4495,7 @@ static const llc_conn_action_t llc_rst_actions_3[] = { [2] = NULL, }; -static struct llc_conn_state_trans llc_rst_state_trans_3 = { +static const struct llc_conn_state_trans llc_rst_state_trans_3 = { .ev = llc_conn_ev_ack_tmr_exp, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = llc_rst_ev_qfyrs_3, @@ -4518,7 +4518,7 @@ static const llc_conn_action_t llc_rst_actions_4[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_rst_state_trans_4 = { +static const struct llc_conn_state_trans llc_rst_state_trans_4 = { .ev = llc_conn_ev_rx_disc_cmd_pbit_set_x, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = llc_rst_ev_qfyrs_4, @@ -4541,7 +4541,7 @@ static const llc_conn_action_t llc_rst_actions_4_1[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_rst_state_trans_4_1 = { +static const struct llc_conn_state_trans llc_rst_state_trans_4_1 = { .ev = llc_conn_ev_rx_disc_cmd_pbit_set_x, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = llc_rst_ev_qfyrs_4_1, @@ -4564,7 +4564,7 @@ static const llc_conn_action_t llc_rst_actions_5[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_rst_state_trans_5 = { +static const struct llc_conn_state_trans llc_rst_state_trans_5 = { .ev = llc_conn_ev_rx_dm_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = llc_rst_ev_qfyrs_5, @@ -4586,7 +4586,7 @@ static const llc_conn_action_t llc_rst_actions_5_1[] = { [2] = NULL, }; -static struct llc_conn_state_trans llc_rst_state_trans_5_1 = { +static const struct llc_conn_state_trans llc_rst_state_trans_5_1 = { .ev = llc_conn_ev_rx_dm_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = llc_rst_ev_qfyrs_5_1, @@ -4602,7 +4602,7 @@ static const llc_conn_ev_qfyr_t llc_rst_ev_qfyrs_6[] = { /* just one member, NULL, .bss zeroes it */ static const llc_conn_action_t llc_rst_actions_6[1]; -static struct llc_conn_state_trans llc_rst_state_trans_6 = { +static const struct llc_conn_state_trans llc_rst_state_trans_6 = { .ev = llc_conn_ev_data_req, .next_state = LLC_CONN_STATE_RESET, .ev_qualifiers = llc_rst_ev_qfyrs_6, @@ -4623,7 +4623,7 @@ static const llc_conn_action_t llc_rst_actions_7[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_rst_state_trans_7 = { +static const struct llc_conn_state_trans llc_rst_state_trans_7 = { .ev = llc_conn_ev_ack_tmr_exp, .next_state = LLC_CONN_STATE_RESET, .ev_qualifiers = llc_rst_ev_qfyrs_7, @@ -4644,7 +4644,7 @@ static const llc_conn_action_t llc_rst_actions_8[] = { [2] = NULL, }; -static struct llc_conn_state_trans llc_rst_state_trans_8 = { +static const struct llc_conn_state_trans llc_rst_state_trans_8 = { .ev = llc_conn_ev_ack_tmr_exp, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = llc_rst_ev_qfyrs_8, @@ -4665,7 +4665,7 @@ static const llc_conn_action_t llc_rst_actions_8_1[] = { [2] = NULL, }; -static struct llc_conn_state_trans llc_rst_state_trans_8_1 = { +static const struct llc_conn_state_trans llc_rst_state_trans_8_1 = { .ev = llc_conn_ev_ack_tmr_exp, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = llc_rst_ev_qfyrs_8_1, @@ -4676,7 +4676,7 @@ static struct llc_conn_state_trans llc_rst_state_trans_8_1 = { * Array of pointers; * one to each transition */ -static struct llc_conn_state_trans *llc_rst_state_transitions[] = { +static const struct llc_conn_state_trans *llc_rst_state_transitions[] = { [0] = &llc_rst_state_trans_6, /* Request */ [1] = &llc_common_state_trans_end, [2] = &llc_common_state_trans_end, /* Local busy */ @@ -4710,7 +4710,7 @@ static const llc_conn_action_t llc_error_actions_1[] = { [8] = NULL, }; -static struct llc_conn_state_trans llc_error_state_trans_1 = { +static const struct llc_conn_state_trans llc_error_state_trans_1 = { .ev = llc_conn_ev_rx_sabme_cmd_pbit_set_x, .next_state = LLC_CONN_STATE_NORMAL, .ev_qualifiers = NONE, @@ -4726,7 +4726,7 @@ static const llc_conn_action_t llc_error_actions_2[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_error_state_trans_2 = { +static const struct llc_conn_state_trans llc_error_state_trans_2 = { .ev = llc_conn_ev_rx_disc_cmd_pbit_set_x, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = NONE, @@ -4741,7 +4741,7 @@ static const llc_conn_action_t llc_error_actions_3[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_error_state_trans_3 = { +static const struct llc_conn_state_trans llc_error_state_trans_3 = { .ev = llc_conn_ev_rx_dm_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = NONE, @@ -4757,7 +4757,7 @@ static const llc_conn_action_t llc_error_actions_4[] = { [4] = NULL, }; -static struct llc_conn_state_trans llc_error_state_trans_4 = { +static const struct llc_conn_state_trans llc_error_state_trans_4 = { .ev = llc_conn_ev_rx_frmr_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_RESET, .ev_qualifiers = NONE, @@ -4770,7 +4770,7 @@ static const llc_conn_action_t llc_error_actions_5[] = { [1] = NULL, }; -static struct llc_conn_state_trans llc_error_state_trans_5 = { +static const struct llc_conn_state_trans llc_error_state_trans_5 = { .ev = llc_conn_ev_rx_xxx_cmd_pbit_set_x, .next_state = LLC_CONN_STATE_ERROR, .ev_qualifiers = NONE, @@ -4778,7 +4778,7 @@ static struct llc_conn_state_trans llc_error_state_trans_5 = { }; /* State transitions for LLC_CONN_EV_RX_XXX_RSP_Fbit_SET_X event */ -static struct llc_conn_state_trans llc_error_state_trans_6 = { +static const struct llc_conn_state_trans llc_error_state_trans_6 = { .ev = llc_conn_ev_rx_xxx_rsp_fbit_set_x, .next_state = LLC_CONN_STATE_ERROR, .ev_qualifiers = NONE, @@ -4798,7 +4798,7 @@ static const llc_conn_action_t llc_error_actions_7[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_error_state_trans_7 = { +static const struct llc_conn_state_trans llc_error_state_trans_7 = { .ev = llc_conn_ev_ack_tmr_exp, .next_state = LLC_CONN_STATE_ERROR, .ev_qualifiers = llc_error_ev_qfyrs_7, @@ -4820,7 +4820,7 @@ static const llc_conn_action_t llc_error_actions_8[] = { [5] = NULL, }; -static struct llc_conn_state_trans llc_error_state_trans_8 = { +static const struct llc_conn_state_trans llc_error_state_trans_8 = { .ev = llc_conn_ev_ack_tmr_exp, .next_state = LLC_CONN_STATE_RESET, .ev_qualifiers = llc_error_ev_qfyrs_8, @@ -4836,7 +4836,7 @@ static const llc_conn_ev_qfyr_t llc_error_ev_qfyrs_9[] = { /* just one member, NULL, .bss zeroes it */ static const llc_conn_action_t llc_error_actions_9[1]; -static struct llc_conn_state_trans llc_error_state_trans_9 = { +static const struct llc_conn_state_trans llc_error_state_trans_9 = { .ev = llc_conn_ev_data_req, .next_state = LLC_CONN_STATE_ERROR, .ev_qualifiers = llc_error_ev_qfyrs_9, @@ -4847,7 +4847,7 @@ static struct llc_conn_state_trans llc_error_state_trans_9 = { * Array of pointers; * one to each transition */ -static struct llc_conn_state_trans *llc_error_state_transitions[] = { +static const struct llc_conn_state_trans *llc_error_state_transitions[] = { [0] = &llc_error_state_trans_9, /* Request */ [1] = &llc_common_state_trans_end, [2] = &llc_common_state_trans_end, /* Local busy */ @@ -4873,7 +4873,7 @@ static const llc_conn_action_t llc_temp_actions_1[] = { [3] = NULL, }; -static struct llc_conn_state_trans llc_temp_state_trans_1 = { +static const struct llc_conn_state_trans llc_temp_state_trans_1 = { .ev = llc_conn_ev_disc_req, .next_state = LLC_CONN_STATE_ADM, .ev_qualifiers = NONE, @@ -4884,7 +4884,7 @@ static struct llc_conn_state_trans llc_temp_state_trans_1 = { * Array of pointers; * one to each transition */ -static struct llc_conn_state_trans *llc_temp_state_transitions[] = { +static const struct llc_conn_state_trans *llc_temp_state_transitions[] = { [0] = &llc_temp_state_trans_1, /* requests */ [1] = &llc_common_state_trans_end, [2] = &llc_common_state_trans_end, /* local busy */ diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c index 0a3f5e0bec00..afc6974eafda 100644 --- a/net/llc/llc_conn.c +++ b/net/llc/llc_conn.c @@ -34,10 +34,10 @@ static int llc_find_offset(int state, int ev_type); static void llc_conn_send_pdus(struct sock *sk); static int llc_conn_service(struct sock *sk, struct sk_buff *skb); static int llc_exec_conn_trans_actions(struct sock *sk, - struct llc_conn_state_trans *trans, + const struct llc_conn_state_trans *trans, struct sk_buff *ev); -static struct llc_conn_state_trans *llc_qualify_conn_ev(struct sock *sk, - struct sk_buff *skb); +static const struct llc_conn_state_trans *llc_qualify_conn_ev(struct sock *sk, + struct sk_buff *skb); /* Offset table on connection states transition diagram */ static int llc_offset_table[NBR_CONN_STATES][NBR_CONN_EV]; @@ -356,9 +356,9 @@ static void llc_conn_send_pdus(struct sock *sk) */ static int llc_conn_service(struct sock *sk, struct sk_buff *skb) { - int rc = 1; + const struct llc_conn_state_trans *trans; struct llc_sock *llc = llc_sk(sk); - struct llc_conn_state_trans *trans; + int rc = 1; if (llc->state > NBR_CONN_STATES) goto out; @@ -384,10 +384,10 @@ out: * This function finds transition that matches with happened event. * Returns pointer to found transition on success, %NULL otherwise. */ -static struct llc_conn_state_trans *llc_qualify_conn_ev(struct sock *sk, - struct sk_buff *skb) +static const struct llc_conn_state_trans *llc_qualify_conn_ev(struct sock *sk, + struct sk_buff *skb) { - struct llc_conn_state_trans **next_trans; + const struct llc_conn_state_trans **next_trans; const llc_conn_ev_qfyr_t *next_qualifier; struct llc_conn_state_ev *ev = llc_conn_ev(skb); struct llc_sock *llc = llc_sk(sk); @@ -432,7 +432,7 @@ static struct llc_conn_state_trans *llc_qualify_conn_ev(struct sock *sk, * success, 1 to indicate failure of at least one action. */ static int llc_exec_conn_trans_actions(struct sock *sk, - struct llc_conn_state_trans *trans, + const struct llc_conn_state_trans *trans, struct sk_buff *skb) { int rc = 0; @@ -635,8 +635,8 @@ u8 llc_data_accept_state(u8 state) */ static u16 __init llc_find_next_offset(struct llc_conn_state *state, u16 offset) { + const struct llc_conn_state_trans **next_trans; u16 cnt = 0; - struct llc_conn_state_trans **next_trans; for (next_trans = state->transitions + offset; (*next_trans)->ev; next_trans++) diff --git a/net/llc/llc_s_st.c b/net/llc/llc_s_st.c index 308c616883a4..acccc827c562 100644 --- a/net/llc/llc_s_st.c +++ b/net/llc/llc_s_st.c @@ -24,7 +24,7 @@ * last entry for this state * all members are zeros, .bss zeroes it */ -static struct llc_sap_state_trans llc_sap_state_trans_end; +static const struct llc_sap_state_trans llc_sap_state_trans_end; /* state LLC_SAP_STATE_INACTIVE transition for * LLC_SAP_EV_ACTIVATION_REQ event @@ -34,14 +34,14 @@ static const llc_sap_action_t llc_sap_inactive_state_actions_1[] = { [1] = NULL, }; -static struct llc_sap_state_trans llc_sap_inactive_state_trans_1 = { +static const struct llc_sap_state_trans llc_sap_inactive_state_trans_1 = { .ev = llc_sap_ev_activation_req, .next_state = LLC_SAP_STATE_ACTIVE, .ev_actions = llc_sap_inactive_state_actions_1, }; /* array of pointers; one to each transition */ -static struct llc_sap_state_trans *llc_sap_inactive_state_transitions[] = { +static const struct llc_sap_state_trans *llc_sap_inactive_state_transitions[] = { [0] = &llc_sap_inactive_state_trans_1, [1] = &llc_sap_state_trans_end, }; @@ -52,7 +52,7 @@ static const llc_sap_action_t llc_sap_active_state_actions_1[] = { [1] = NULL, }; -static struct llc_sap_state_trans llc_sap_active_state_trans_1 = { +static const struct llc_sap_state_trans llc_sap_active_state_trans_1 = { .ev = llc_sap_ev_rx_ui, .next_state = LLC_SAP_STATE_ACTIVE, .ev_actions = llc_sap_active_state_actions_1, @@ -64,7 +64,7 @@ static const llc_sap_action_t llc_sap_active_state_actions_2[] = { [1] = NULL, }; -static struct llc_sap_state_trans llc_sap_active_state_trans_2 = { +static const struct llc_sap_state_trans llc_sap_active_state_trans_2 = { .ev = llc_sap_ev_unitdata_req, .next_state = LLC_SAP_STATE_ACTIVE, .ev_actions = llc_sap_active_state_actions_2, @@ -76,7 +76,7 @@ static const llc_sap_action_t llc_sap_active_state_actions_3[] = { [1] = NULL, }; -static struct llc_sap_state_trans llc_sap_active_state_trans_3 = { +static const struct llc_sap_state_trans llc_sap_active_state_trans_3 = { .ev = llc_sap_ev_xid_req, .next_state = LLC_SAP_STATE_ACTIVE, .ev_actions = llc_sap_active_state_actions_3, @@ -88,7 +88,7 @@ static const llc_sap_action_t llc_sap_active_state_actions_4[] = { [1] = NULL, }; -static struct llc_sap_state_trans llc_sap_active_state_trans_4 = { +static const struct llc_sap_state_trans llc_sap_active_state_trans_4 = { .ev = llc_sap_ev_rx_xid_c, .next_state = LLC_SAP_STATE_ACTIVE, .ev_actions = llc_sap_active_state_actions_4, @@ -100,7 +100,7 @@ static const llc_sap_action_t llc_sap_active_state_actions_5[] = { [1] = NULL, }; -static struct llc_sap_state_trans llc_sap_active_state_trans_5 = { +static const struct llc_sap_state_trans llc_sap_active_state_trans_5 = { .ev = llc_sap_ev_rx_xid_r, .next_state = LLC_SAP_STATE_ACTIVE, .ev_actions = llc_sap_active_state_actions_5, @@ -112,7 +112,7 @@ static const llc_sap_action_t llc_sap_active_state_actions_6[] = { [1] = NULL, }; -static struct llc_sap_state_trans llc_sap_active_state_trans_6 = { +static const struct llc_sap_state_trans llc_sap_active_state_trans_6 = { .ev = llc_sap_ev_test_req, .next_state = LLC_SAP_STATE_ACTIVE, .ev_actions = llc_sap_active_state_actions_6, @@ -124,7 +124,7 @@ static const llc_sap_action_t llc_sap_active_state_actions_7[] = { [1] = NULL, }; -static struct llc_sap_state_trans llc_sap_active_state_trans_7 = { +static const struct llc_sap_state_trans llc_sap_active_state_trans_7 = { .ev = llc_sap_ev_rx_test_c, .next_state = LLC_SAP_STATE_ACTIVE, .ev_actions = llc_sap_active_state_actions_7 @@ -136,7 +136,7 @@ static const llc_sap_action_t llc_sap_active_state_actions_8[] = { [1] = NULL, }; -static struct llc_sap_state_trans llc_sap_active_state_trans_8 = { +static const struct llc_sap_state_trans llc_sap_active_state_trans_8 = { .ev = llc_sap_ev_rx_test_r, .next_state = LLC_SAP_STATE_ACTIVE, .ev_actions = llc_sap_active_state_actions_8, @@ -150,14 +150,14 @@ static const llc_sap_action_t llc_sap_active_state_actions_9[] = { [1] = NULL, }; -static struct llc_sap_state_trans llc_sap_active_state_trans_9 = { +static const struct llc_sap_state_trans llc_sap_active_state_trans_9 = { .ev = llc_sap_ev_deactivation_req, .next_state = LLC_SAP_STATE_INACTIVE, .ev_actions = llc_sap_active_state_actions_9 }; /* array of pointers; one to each transition */ -static struct llc_sap_state_trans *llc_sap_active_state_transitions[] = { +static const struct llc_sap_state_trans *llc_sap_active_state_transitions[] = { [0] = &llc_sap_active_state_trans_2, [1] = &llc_sap_active_state_trans_1, [2] = &llc_sap_active_state_trans_3, diff --git a/net/llc/llc_sap.c b/net/llc/llc_sap.c index 116c0e479183..6cd03c2ae7d5 100644 --- a/net/llc/llc_sap.c +++ b/net/llc/llc_sap.c @@ -114,12 +114,12 @@ void llc_sap_rtn_pdu(struct llc_sap *sap, struct sk_buff *skb) * Returns the pointer to found transition on success or %NULL for * failure. */ -static struct llc_sap_state_trans *llc_find_sap_trans(struct llc_sap *sap, - struct sk_buff *skb) +static const struct llc_sap_state_trans *llc_find_sap_trans(struct llc_sap *sap, + struct sk_buff *skb) { int i = 0; - struct llc_sap_state_trans *rc = NULL; - struct llc_sap_state_trans **next_trans; + const struct llc_sap_state_trans *rc = NULL; + const struct llc_sap_state_trans **next_trans; struct llc_sap_state *curr_state = &llc_sap_state_table[sap->state - 1]; /* * Search thru events for this state until list exhausted or until @@ -143,7 +143,7 @@ static struct llc_sap_state_trans *llc_find_sap_trans(struct llc_sap *sap, * Returns 0 for success and 1 for failure of at least one action. */ static int llc_exec_sap_trans_actions(struct llc_sap *sap, - struct llc_sap_state_trans *trans, + const struct llc_sap_state_trans *trans, struct sk_buff *skb) { int rc = 0; @@ -166,8 +166,8 @@ static int llc_exec_sap_trans_actions(struct llc_sap *sap, */ static int llc_sap_next_state(struct llc_sap *sap, struct sk_buff *skb) { + const struct llc_sap_state_trans *trans; int rc = 1; - struct llc_sap_state_trans *trans; if (sap->state > LLC_NR_SAP_STATES) goto out; diff --git a/net/mac80211/agg-tx.c b/net/mac80211/agg-tx.c index 21d55dc539f6..677bbbac9f16 100644 --- a/net/mac80211/agg-tx.c +++ b/net/mac80211/agg-tx.c @@ -616,7 +616,9 @@ int ieee80211_start_tx_ba_session(struct ieee80211_sta *pubsta, u16 tid, return -EINVAL; if (!pubsta->deflink.ht_cap.ht_supported && - sta->sdata->vif.bss_conf.chanreq.oper.chan->band != NL80211_BAND_6GHZ) + !pubsta->deflink.vht_cap.vht_supported && + !pubsta->deflink.he_cap.has_he && + !pubsta->deflink.eht_cap.has_eht) return -EINVAL; if (WARN_ON_ONCE(!local->ops->ampdu_action)) diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 83ad6c9709fe..85cb71de370f 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -263,7 +263,7 @@ static int ieee80211_start_p2p_device(struct wiphy *wiphy, lockdep_assert_wiphy(sdata->local->hw.wiphy); - ret = ieee80211_check_combinations(sdata, NULL, 0, 0); + ret = ieee80211_check_combinations(sdata, NULL, 0, 0, -1); if (ret < 0) return ret; @@ -285,7 +285,7 @@ static int ieee80211_start_nan(struct wiphy *wiphy, lockdep_assert_wiphy(sdata->local->hw.wiphy); - ret = ieee80211_check_combinations(sdata, NULL, 0, 0); + ret = ieee80211_check_combinations(sdata, NULL, 0, 0, -1); if (ret < 0) return ret; @@ -742,9 +742,6 @@ static int ieee80211_get_key(struct wiphy *wiphy, struct net_device *dev, break; } - params.key = key->conf.key; - params.key_len = key->conf.keylen; - callback(cookie, ¶ms); err = 0; @@ -1379,6 +1376,11 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev, (IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ | IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ | IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ); + link_conf->eht_80mhz_full_bw_ul_mumimo = + params->eht_cap->fixed.phy_cap_info[7] & + (IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ | + IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ | + IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ); } else { link_conf->eht_su_beamformer = false; link_conf->eht_su_beamformee = false; @@ -1610,11 +1612,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev, /* abort any running channel switch or color change */ link_conf->csa_active = false; link_conf->color_change_active = false; - if (sdata->csa_blocked_queues) { - ieee80211_wake_vif_queues(local, sdata, - IEEE80211_QUEUE_STOP_REASON_CSA); - sdata->csa_blocked_queues = false; - } + ieee80211_vif_unblock_queues_csa(sdata); ieee80211_free_next_beacon(link); @@ -1666,7 +1664,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev, if (sdata->wdev.cac_started) { chandef = link_conf->chanreq.oper; - wiphy_delayed_work_cancel(wiphy, &link->dfs_cac_timer_work); + wiphy_delayed_work_cancel(wiphy, &sdata->dfs_cac_timer_work); cfg80211_cac_event(sdata->dev, &chandef, NL80211_RADAR_CAC_ABORTED, GFP_KERNEL); @@ -1809,11 +1807,17 @@ static void sta_apply_mesh_params(struct ieee80211_local *local, #endif } +enum sta_link_apply_mode { + STA_LINK_MODE_NEW, + STA_LINK_MODE_STA_MODIFY, + STA_LINK_MODE_LINK_MODIFY, +}; + static int sta_link_apply_parameters(struct ieee80211_local *local, - struct sta_info *sta, bool new_link, + struct sta_info *sta, + enum sta_link_apply_mode mode, struct link_station_parameters *params) { - int ret = 0; struct ieee80211_supported_band *sband; struct ieee80211_sub_if_data *sdata = sta->sdata; u32 link_id = params->link_id < 0 ? 0 : params->link_id; @@ -1822,18 +1826,29 @@ static int sta_link_apply_parameters(struct ieee80211_local *local, struct link_sta_info *link_sta = rcu_dereference_protected(sta->link[link_id], lockdep_is_held(&local->hw.wiphy->mtx)); + bool changes = params->link_mac || + params->txpwr_set || + params->supported_rates_len || + params->ht_capa || + params->vht_capa || + params->he_capa || + params->eht_capa || + params->opmode_notif_used; - /* - * If there are no changes, then accept a link that exist, - * unless it's a new link. - */ - if (params->link_id >= 0 && !new_link && - !params->link_mac && !params->txpwr_set && - !params->supported_rates_len && - !params->ht_capa && !params->vht_capa && - !params->he_capa && !params->eht_capa && - !params->opmode_notif_used) - return 0; + switch (mode) { + case STA_LINK_MODE_NEW: + if (!params->link_mac) + return -EINVAL; + break; + case STA_LINK_MODE_LINK_MODIFY: + break; + case STA_LINK_MODE_STA_MODIFY: + if (params->link_id >= 0) + break; + if (!changes) + return 0; + break; + } if (!link || !link_sta) return -EINVAL; @@ -1843,18 +1858,18 @@ static int sta_link_apply_parameters(struct ieee80211_local *local, return -EINVAL; if (params->link_mac) { - if (new_link) { + if (mode == STA_LINK_MODE_NEW) { memcpy(link_sta->addr, params->link_mac, ETH_ALEN); memcpy(link_sta->pub->addr, params->link_mac, ETH_ALEN); } else if (!ether_addr_equal(link_sta->addr, params->link_mac)) { return -EINVAL; } - } else if (new_link) { - return -EINVAL; } if (params->txpwr_set) { + int ret; + link_sta->pub->txpwr.type = params->txpwr.type; if (params->txpwr.type == NL80211_TX_POWER_LIMITED) link_sta->pub->txpwr.power = params->txpwr.power; @@ -1907,7 +1922,7 @@ static int sta_link_apply_parameters(struct ieee80211_local *local, ieee80211_sta_init_nss(link_sta); - return ret; + return 0; } static int sta_apply_parameters(struct ieee80211_local *local, @@ -2023,7 +2038,7 @@ static int sta_apply_parameters(struct ieee80211_local *local, if (params->listen_interval >= 0) sta->listen_interval = params->listen_interval; - ret = sta_link_apply_parameters(local, sta, false, + ret = sta_link_apply_parameters(local, sta, STA_LINK_MODE_STA_MODIFY, ¶ms->link_sta_params); if (ret) return ret; @@ -3467,7 +3482,7 @@ static int ieee80211_start_radar_detection(struct wiphy *wiphy, if (err) goto out_unlock; - wiphy_delayed_work_queue(wiphy, &sdata->deflink.dfs_cac_timer_work, + wiphy_delayed_work_queue(wiphy, &sdata->dfs_cac_timer_work, msecs_to_jiffies(cac_time_ms)); out_unlock: @@ -3483,12 +3498,8 @@ static void ieee80211_end_cac(struct wiphy *wiphy, lockdep_assert_wiphy(local->hw.wiphy); list_for_each_entry(sdata, &local->interfaces, list) { - /* it might be waiting for the local->mtx, but then - * by the time it gets it, sdata->wdev.cac_started - * will no longer be true - */ wiphy_delayed_work_cancel(wiphy, - &sdata->deflink.dfs_cac_timer_work); + &sdata->dfs_cac_timer_work); if (sdata->wdev.cac_started) { ieee80211_link_release_channel(&sdata->deflink); @@ -3638,10 +3649,10 @@ void ieee80211_csa_finish(struct ieee80211_vif *vif, unsigned int link_id) continue; wiphy_work_queue(iter->local->hw.wiphy, - &iter->deflink.csa_finalize_work); + &iter->deflink.csa.finalize_work); } } - wiphy_work_queue(local->hw.wiphy, &link_data->csa_finalize_work); + wiphy_work_queue(local->hw.wiphy, &link_data->csa.finalize_work); rcu_read_unlock(); } @@ -3728,7 +3739,7 @@ static int __ieee80211_csa_finalize(struct ieee80211_link_data *link_data) } if (!cfg80211_chandef_identical(&link_conf->chanreq.oper, - &link_data->csa_chanreq.oper)) + &link_data->csa.chanreq.oper)) return -EINVAL; link_conf->csa_active = false; @@ -3739,17 +3750,13 @@ static int __ieee80211_csa_finalize(struct ieee80211_link_data *link_data) ieee80211_link_info_change_notify(sdata, link_data, changed); - if (sdata->csa_blocked_queues) { - ieee80211_wake_vif_queues(local, sdata, - IEEE80211_QUEUE_STOP_REASON_CSA); - sdata->csa_blocked_queues = false; - } + ieee80211_vif_unblock_queues_csa(sdata); err = drv_post_channel_switch(link_data); if (err) return err; - cfg80211_ch_switch_notify(sdata->dev, &link_data->csa_chanreq.oper, + cfg80211_ch_switch_notify(sdata->dev, &link_data->csa.chanreq.oper, link_data->link_id); return 0; @@ -3770,7 +3777,7 @@ static void ieee80211_csa_finalize(struct ieee80211_link_data *link_data) void ieee80211_csa_finalize_work(struct wiphy *wiphy, struct wiphy_work *work) { struct ieee80211_link_data *link = - container_of(work, struct ieee80211_link_data, csa_finalize_work); + container_of(work, struct ieee80211_link_data, csa.finalize_work); struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_local *local = sdata->local; @@ -4001,7 +4008,7 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev, goto out; /* if reservation is invalid then this will fail */ - err = ieee80211_check_combinations(sdata, NULL, chanctx->mode, 0); + err = ieee80211_check_combinations(sdata, NULL, chanctx->mode, 0, -1); if (err) { ieee80211_link_unreserve_chanctx(link_data); goto out; @@ -4017,23 +4024,19 @@ __ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev, goto out; } - link_data->csa_chanreq = chanreq; + link_data->csa.chanreq = chanreq; link_conf->csa_active = true; - if (params->block_tx && - !ieee80211_hw_check(&local->hw, HANDLES_QUIET_CSA)) { - ieee80211_stop_vif_queues(local, sdata, - IEEE80211_QUEUE_STOP_REASON_CSA); - sdata->csa_blocked_queues = true; - } + if (params->block_tx) + ieee80211_vif_block_queues_csa(sdata); cfg80211_ch_switch_started_notify(sdata->dev, - &link_data->csa_chanreq.oper, link_id, + &link_data->csa.chanreq.oper, link_id, params->count, params->block_tx); if (changed) { ieee80211_link_info_change_notify(sdata, link_data, changed); - drv_channel_switch_beacon(sdata, &link_data->csa_chanreq.oper); + drv_channel_switch_beacon(sdata, &link_data->csa.chanreq.oper); } else { /* if the beacon didn't change, we can finalize immediately */ ieee80211_csa_finalize(link_data); @@ -4979,13 +4982,17 @@ static void ieee80211_del_intf_link(struct wiphy *wiphy, ieee80211_vif_set_links(sdata, wdev->valid_links, 0); } -static int sta_add_link_station(struct ieee80211_local *local, - struct ieee80211_sub_if_data *sdata, - struct link_station_parameters *params) +static int +ieee80211_add_link_station(struct wiphy *wiphy, struct net_device *dev, + struct link_station_parameters *params) { + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + struct ieee80211_local *local = wiphy_priv(wiphy); struct sta_info *sta; int ret; + lockdep_assert_wiphy(local->hw.wiphy); + sta = sta_info_get_bss(sdata, params->mld_mac); if (!sta) return -ENOENT; @@ -5000,7 +5007,7 @@ static int sta_add_link_station(struct ieee80211_local *local, if (ret) return ret; - ret = sta_link_apply_parameters(local, sta, true, params); + ret = sta_link_apply_parameters(local, sta, STA_LINK_MODE_NEW, params); if (ret) { ieee80211_sta_free_link(sta, params->link_id); return ret; @@ -5011,23 +5018,15 @@ static int sta_add_link_station(struct ieee80211_local *local, } static int -ieee80211_add_link_station(struct wiphy *wiphy, struct net_device *dev, +ieee80211_mod_link_station(struct wiphy *wiphy, struct net_device *dev, struct link_station_parameters *params) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = wiphy_priv(wiphy); - - lockdep_assert_wiphy(sdata->local->hw.wiphy); - - return sta_add_link_station(local, sdata, params); -} - -static int sta_mod_link_station(struct ieee80211_local *local, - struct ieee80211_sub_if_data *sdata, - struct link_station_parameters *params) -{ struct sta_info *sta; + lockdep_assert_wiphy(local->hw.wiphy); + sta = sta_info_get_bss(sdata, params->mld_mac); if (!sta) return -ENOENT; @@ -5035,26 +5034,19 @@ static int sta_mod_link_station(struct ieee80211_local *local, if (!(sta->sta.valid_links & BIT(params->link_id))) return -EINVAL; - return sta_link_apply_parameters(local, sta, false, params); + return sta_link_apply_parameters(local, sta, STA_LINK_MODE_LINK_MODIFY, + params); } static int -ieee80211_mod_link_station(struct wiphy *wiphy, struct net_device *dev, - struct link_station_parameters *params) +ieee80211_del_link_station(struct wiphy *wiphy, struct net_device *dev, + struct link_station_del_parameters *params) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); - struct ieee80211_local *local = wiphy_priv(wiphy); + struct sta_info *sta; lockdep_assert_wiphy(sdata->local->hw.wiphy); - return sta_mod_link_station(local, sdata, params); -} - -static int sta_del_link_station(struct ieee80211_sub_if_data *sdata, - struct link_station_del_parameters *params) -{ - struct sta_info *sta; - sta = sta_info_get_bss(sdata, params->mld_mac); if (!sta) return -ENOENT; @@ -5071,17 +5063,6 @@ static int sta_del_link_station(struct ieee80211_sub_if_data *sdata, return 0; } -static int -ieee80211_del_link_station(struct wiphy *wiphy, struct net_device *dev, - struct link_station_del_parameters *params) -{ - struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); - - lockdep_assert_wiphy(sdata->local->hw.wiphy); - - return sta_del_link_station(sdata, params); -} - static int ieee80211_set_hw_timestamp(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_set_hw_timestamp *hwts) @@ -5222,4 +5203,5 @@ const struct cfg80211_ops mac80211_config_ops = { .del_link_station = ieee80211_del_link_station, .set_hw_timestamp = ieee80211_set_hw_timestamp, .set_ttlm = ieee80211_set_ttlm, + .get_radio_mask = ieee80211_get_radio_mask, }; diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index 380695fdc32f..e8567723e94d 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c @@ -47,24 +47,29 @@ int ieee80211_chanctx_refcount(struct ieee80211_local *local, ieee80211_chanctx_num_reserved(local, ctx); } -static int ieee80211_num_chanctx(struct ieee80211_local *local) +static int ieee80211_num_chanctx(struct ieee80211_local *local, int radio_idx) { struct ieee80211_chanctx *ctx; int num = 0; lockdep_assert_wiphy(local->hw.wiphy); - list_for_each_entry(ctx, &local->chanctx_list, list) + list_for_each_entry(ctx, &local->chanctx_list, list) { + if (radio_idx >= 0 && ctx->conf.radio_idx != radio_idx) + continue; num++; + } return num; } -static bool ieee80211_can_create_new_chanctx(struct ieee80211_local *local) +static bool ieee80211_can_create_new_chanctx(struct ieee80211_local *local, + int radio_idx) { lockdep_assert_wiphy(local->hw.wiphy); - return ieee80211_num_chanctx(local) < ieee80211_max_num_channels(local); + return ieee80211_num_chanctx(local, radio_idx) < + ieee80211_max_num_channels(local, radio_idx); } static struct ieee80211_chanctx * @@ -295,17 +300,24 @@ ieee80211_get_max_required_bw(struct ieee80211_link_data *link) static enum nl80211_chan_width ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local, struct ieee80211_chanctx *ctx, - struct ieee80211_link_data *rsvd_for) + struct ieee80211_link_data *rsvd_for, + bool check_reserved) { struct ieee80211_sub_if_data *sdata; struct ieee80211_link_data *link; enum nl80211_chan_width max_bw = NL80211_CHAN_WIDTH_20_NOHT; + if (WARN_ON(check_reserved && rsvd_for)) + return ctx->conf.def.width; + for_each_sdata_link(local, link) { enum nl80211_chan_width width = NL80211_CHAN_WIDTH_20_NOHT; - if (link != rsvd_for && - rcu_access_pointer(link->conf->chanctx_conf) != &ctx->conf) + if (check_reserved) { + if (link->reserved_chanctx != ctx) + continue; + } else if (link != rsvd_for && + rcu_access_pointer(link->conf->chanctx_conf) != &ctx->conf) continue; switch (link->sdata->vif.type) { @@ -359,7 +371,8 @@ ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local, static u32 _ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local, struct ieee80211_chanctx *ctx, - struct ieee80211_link_data *rsvd_for) + struct ieee80211_link_data *rsvd_for, + bool check_reserved) { enum nl80211_chan_width max_bw; struct cfg80211_chan_def min_def; @@ -379,7 +392,8 @@ _ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local, return 0; } - max_bw = ieee80211_get_chanctx_max_required_bw(local, ctx, rsvd_for); + max_bw = ieee80211_get_chanctx_max_required_bw(local, ctx, rsvd_for, + check_reserved); /* downgrade chandef up to max_bw */ min_def = ctx->conf.def; @@ -396,12 +410,9 @@ _ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local, return IEEE80211_CHANCTX_CHANGE_MIN_WIDTH; } -/* calling this function is assuming that station vif is updated to - * lates changes by calling ieee80211_link_update_chanreq - */ static void ieee80211_chan_bw_change(struct ieee80211_local *local, struct ieee80211_chanctx *ctx, - bool narrowed) + bool reserved, bool narrowed) { struct sta_info *sta; struct ieee80211_supported_band *sband = @@ -418,13 +429,17 @@ static void ieee80211_chan_bw_change(struct ieee80211_local *local, continue; for (link_id = 0; link_id < ARRAY_SIZE(sta->sdata->link); link_id++) { - struct ieee80211_bss_conf *link_conf = - rcu_dereference(sdata->vif.link_conf[link_id]); + struct ieee80211_link_data *link = + rcu_dereference(sdata->link[link_id]); + struct ieee80211_bss_conf *link_conf; + struct cfg80211_chan_def *new_chandef; struct link_sta_info *link_sta; - if (!link_conf) + if (!link) continue; + link_conf = link->conf; + if (rcu_access_pointer(link_conf->chanctx_conf) != &ctx->conf) continue; @@ -432,7 +447,13 @@ static void ieee80211_chan_bw_change(struct ieee80211_local *local, if (!link_sta) continue; - new_sta_bw = ieee80211_sta_cur_vht_bw(link_sta); + if (reserved) + new_chandef = &link->reserved.oper; + else + new_chandef = &link_conf->chanreq.oper; + + new_sta_bw = _ieee80211_sta_cur_vht_bw(link_sta, + new_chandef); /* nothing change */ if (new_sta_bw == link_sta->pub->bandwidth) @@ -458,20 +479,22 @@ static void ieee80211_chan_bw_change(struct ieee80211_local *local, */ void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local, struct ieee80211_chanctx *ctx, - struct ieee80211_link_data *rsvd_for) + struct ieee80211_link_data *rsvd_for, + bool check_reserved) { - u32 changed = _ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for); + u32 changed = _ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for, + check_reserved); if (!changed) return; /* check is BW narrowed */ - ieee80211_chan_bw_change(local, ctx, true); + ieee80211_chan_bw_change(local, ctx, false, true); drv_change_chanctx(local, ctx, changed); /* check is BW wider */ - ieee80211_chan_bw_change(local, ctx, false); + ieee80211_chan_bw_change(local, ctx, false, false); } static void _ieee80211_change_chanctx(struct ieee80211_local *local, @@ -505,10 +528,10 @@ static void _ieee80211_change_chanctx(struct ieee80211_local *local, * due to maybe not returning from it, e.g in case new context was added * first time with all parameters up to date. */ - ieee80211_chan_bw_change(local, old_ctx, true); + ieee80211_chan_bw_change(local, old_ctx, false, true); if (ieee80211_chanreq_identical(&ctx_req, chanreq)) { - ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for); + ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for, false); return; } @@ -529,14 +552,14 @@ static void _ieee80211_change_chanctx(struct ieee80211_local *local, ctx->conf.ap = chanreq->ap; /* check if min chanctx also changed */ - changed |= _ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for); + changed |= _ieee80211_recalc_chanctx_min_def(local, ctx, rsvd_for, false); ieee80211_add_wbrf(local, &ctx->conf.def); drv_change_chanctx(local, ctx, changed); /* check if BW is wider */ - ieee80211_chan_bw_change(local, old_ctx, false); + ieee80211_chan_bw_change(local, old_ctx, false, false); } static void ieee80211_change_chanctx(struct ieee80211_local *local, @@ -638,7 +661,8 @@ ieee80211_chanctx_radar_required(struct ieee80211_local *local, static struct ieee80211_chanctx * ieee80211_alloc_chanctx(struct ieee80211_local *local, const struct ieee80211_chan_req *chanreq, - enum ieee80211_chanctx_mode mode) + enum ieee80211_chanctx_mode mode, + int radio_idx) { struct ieee80211_chanctx *ctx; @@ -656,7 +680,8 @@ ieee80211_alloc_chanctx(struct ieee80211_local *local, ctx->conf.rx_chains_dynamic = 1; ctx->mode = mode; ctx->conf.radar_enabled = false; - _ieee80211_recalc_chanctx_min_def(local, ctx, NULL); + ctx->conf.radio_idx = radio_idx; + _ieee80211_recalc_chanctx_min_def(local, ctx, NULL, false); return ctx; } @@ -689,14 +714,15 @@ static struct ieee80211_chanctx * ieee80211_new_chanctx(struct ieee80211_local *local, const struct ieee80211_chan_req *chanreq, enum ieee80211_chanctx_mode mode, - bool assign_on_failure) + bool assign_on_failure, + int radio_idx) { struct ieee80211_chanctx *ctx; int err; lockdep_assert_wiphy(local->hw.wiphy); - ctx = ieee80211_alloc_chanctx(local, chanreq, mode); + ctx = ieee80211_alloc_chanctx(local, chanreq, mode, radio_idx); if (!ctx) return ERR_PTR(-ENOMEM); @@ -775,13 +801,24 @@ void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local, /* TDLS peers can sometimes affect the chandef width */ list_for_each_entry(sta, &local->sta_list, list) { + struct ieee80211_sub_if_data *sdata = sta->sdata; struct ieee80211_chan_req tdls_chanreq = {}; + int tdls_link_id; + if (!sta->uploaded || !test_sta_flag(sta, WLAN_STA_TDLS_WIDER_BW) || !test_sta_flag(sta, WLAN_STA_AUTHORIZED) || !sta->tdls_chandef.chan) continue; + tdls_link_id = ieee80211_tdls_sta_link_id(sta); + link = sdata_dereference(sdata->link[tdls_link_id], sdata); + if (!link) + continue; + + if (rcu_access_pointer(link->conf->chanctx_conf) != conf) + continue; + tdls_chanreq.oper = sta->tdls_chandef; /* note this always fills and returns &tmp if compat */ @@ -838,7 +875,7 @@ static int ieee80211_assign_link_chanctx(struct ieee80211_link_data *link, if (new_ctx) { /* recalc considering the link we'll use it for now */ - ieee80211_recalc_chanctx_min_def(local, new_ctx, link); + ieee80211_recalc_chanctx_min_def(local, new_ctx, link, false); ret = drv_assign_vif_chanctx(local, sdata, link->conf, new_ctx); if (assign_on_failure || !ret) { @@ -861,12 +898,12 @@ static int ieee80211_assign_link_chanctx(struct ieee80211_link_data *link, ieee80211_recalc_chanctx_chantype(local, curr_ctx); ieee80211_recalc_smps_chanctx(local, curr_ctx); ieee80211_recalc_radar_chanctx(local, curr_ctx); - ieee80211_recalc_chanctx_min_def(local, curr_ctx, NULL); + ieee80211_recalc_chanctx_min_def(local, curr_ctx, NULL, false); } if (new_ctx && ieee80211_chanctx_num_assigned(local, new_ctx) > 0) { ieee80211_recalc_txpower(sdata, false); - ieee80211_recalc_chanctx_min_def(local, new_ctx, NULL); + ieee80211_recalc_chanctx_min_def(local, new_ctx, NULL, false); } if (conf) { @@ -1053,6 +1090,107 @@ int ieee80211_link_unreserve_chanctx(struct ieee80211_link_data *link) return 0; } +static struct ieee80211_chanctx * +ieee80211_replace_chanctx(struct ieee80211_local *local, + const struct ieee80211_chan_req *chanreq, + enum ieee80211_chanctx_mode mode, + struct ieee80211_chanctx *curr_ctx) +{ + struct ieee80211_chanctx *new_ctx, *ctx; + struct wiphy *wiphy = local->hw.wiphy; + const struct wiphy_radio *radio; + + if (!curr_ctx || (curr_ctx->replace_state == + IEEE80211_CHANCTX_WILL_BE_REPLACED) || + !list_empty(&curr_ctx->reserved_links)) { + /* + * Another link already requested this context for a + * reservation. Find another one hoping all links assigned + * to it will also switch soon enough. + * + * TODO: This needs a little more work as some cases + * (more than 2 chanctx capable devices) may fail which could + * otherwise succeed provided some channel context juggling was + * performed. + * + * Consider ctx1..3, link1..6, each ctx has 2 links. link1 and + * link2 from ctx1 request new different chandefs starting 2 + * in-place reserations with ctx4 and ctx5 replacing ctx1 and + * ctx2 respectively. Next link5 and link6 from ctx3 reserve + * ctx4. If link3 and link4 remain on ctx2 as they are then this + * fails unless `replace_ctx` from ctx5 is replaced with ctx3. + */ + list_for_each_entry(ctx, &local->chanctx_list, list) { + if (ctx->replace_state != + IEEE80211_CHANCTX_REPLACE_NONE) + continue; + + if (!list_empty(&ctx->reserved_links)) + continue; + + if (ctx->conf.radio_idx >= 0) { + radio = &wiphy->radio[ctx->conf.radio_idx]; + if (!cfg80211_radio_chandef_valid(radio, &chanreq->oper)) + continue; + } + + curr_ctx = ctx; + break; + } + } + + /* + * If that's true then all available contexts already have reservations + * and cannot be used. + */ + if (!curr_ctx || (curr_ctx->replace_state == + IEEE80211_CHANCTX_WILL_BE_REPLACED) || + !list_empty(&curr_ctx->reserved_links)) + return ERR_PTR(-EBUSY); + + new_ctx = ieee80211_alloc_chanctx(local, chanreq, mode, -1); + if (!new_ctx) + return ERR_PTR(-ENOMEM); + + new_ctx->replace_ctx = curr_ctx; + new_ctx->replace_state = IEEE80211_CHANCTX_REPLACES_OTHER; + + curr_ctx->replace_ctx = new_ctx; + curr_ctx->replace_state = IEEE80211_CHANCTX_WILL_BE_REPLACED; + + list_add_rcu(&new_ctx->list, &local->chanctx_list); + + return new_ctx; +} + +static bool +ieee80211_find_available_radio(struct ieee80211_local *local, + const struct ieee80211_chan_req *chanreq, + int *radio_idx) +{ + struct wiphy *wiphy = local->hw.wiphy; + const struct wiphy_radio *radio; + int i; + + *radio_idx = -1; + if (!wiphy->n_radio) + return true; + + for (i = 0; i < wiphy->n_radio; i++) { + radio = &wiphy->radio[i]; + if (!cfg80211_radio_chandef_valid(radio, &chanreq->oper)) + continue; + + if (!ieee80211_can_create_new_chanctx(local, i)) + continue; + + *radio_idx = i; + return true; + } + + return false; +} + int ieee80211_link_reserve_chanctx(struct ieee80211_link_data *link, const struct ieee80211_chan_req *chanreq, enum ieee80211_chanctx_mode mode, @@ -1060,7 +1198,8 @@ int ieee80211_link_reserve_chanctx(struct ieee80211_link_data *link, { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_local *local = sdata->local; - struct ieee80211_chanctx *new_ctx, *curr_ctx, *ctx; + struct ieee80211_chanctx *new_ctx, *curr_ctx; + int radio_idx; lockdep_assert_wiphy(local->hw.wiphy); @@ -1070,76 +1209,15 @@ int ieee80211_link_reserve_chanctx(struct ieee80211_link_data *link, new_ctx = ieee80211_find_reservation_chanctx(local, chanreq, mode); if (!new_ctx) { - if (ieee80211_can_create_new_chanctx(local)) { + if (ieee80211_can_create_new_chanctx(local, -1) && + ieee80211_find_available_radio(local, chanreq, &radio_idx)) new_ctx = ieee80211_new_chanctx(local, chanreq, mode, - false); - if (IS_ERR(new_ctx)) - return PTR_ERR(new_ctx); - } else { - if (!curr_ctx || - (curr_ctx->replace_state == - IEEE80211_CHANCTX_WILL_BE_REPLACED) || - !list_empty(&curr_ctx->reserved_links)) { - /* - * Another link already requested this context - * for a reservation. Find another one hoping - * all links assigned to it will also switch - * soon enough. - * - * TODO: This needs a little more work as some - * cases (more than 2 chanctx capable devices) - * may fail which could otherwise succeed - * provided some channel context juggling was - * performed. - * - * Consider ctx1..3, link1..6, each ctx has 2 - * links. link1 and link2 from ctx1 request new - * different chandefs starting 2 in-place - * reserations with ctx4 and ctx5 replacing - * ctx1 and ctx2 respectively. Next link5 and - * link6 from ctx3 reserve ctx4. If link3 and - * link4 remain on ctx2 as they are then this - * fails unless `replace_ctx` from ctx5 is - * replaced with ctx3. - */ - list_for_each_entry(ctx, &local->chanctx_list, - list) { - if (ctx->replace_state != - IEEE80211_CHANCTX_REPLACE_NONE) - continue; - - if (!list_empty(&ctx->reserved_links)) - continue; - - curr_ctx = ctx; - break; - } - } - - /* - * If that's true then all available contexts already - * have reservations and cannot be used. - */ - if (!curr_ctx || - (curr_ctx->replace_state == - IEEE80211_CHANCTX_WILL_BE_REPLACED) || - !list_empty(&curr_ctx->reserved_links)) - return -EBUSY; - - new_ctx = ieee80211_alloc_chanctx(local, chanreq, mode); - if (!new_ctx) - return -ENOMEM; - - new_ctx->replace_ctx = curr_ctx; - new_ctx->replace_state = - IEEE80211_CHANCTX_REPLACES_OTHER; - - curr_ctx->replace_ctx = new_ctx; - curr_ctx->replace_state = - IEEE80211_CHANCTX_WILL_BE_REPLACED; - - list_add_rcu(&new_ctx->list, &local->chanctx_list); - } + false, radio_idx); + else + new_ctx = ieee80211_replace_chanctx(local, chanreq, + mode, curr_ctx); + if (IS_ERR(new_ctx)) + return PTR_ERR(new_ctx); } list_add(&link->reserved_chanctx_list, &new_ctx->reserved_links); @@ -1162,11 +1240,11 @@ ieee80211_link_chanctx_reservation_complete(struct ieee80211_link_data *link) case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_OCB: wiphy_work_queue(sdata->local->hw.wiphy, - &link->csa_finalize_work); + &link->csa.finalize_work); break; case NL80211_IFTYPE_STATION: wiphy_delayed_work_queue(sdata->local->hw.wiphy, - &link->u.mgd.chswitch_work, 0); + &link->u.mgd.csa.switch_work, 0); break; case NL80211_IFTYPE_UNSPECIFIED: case NL80211_IFTYPE_AP_VLAN: @@ -1279,7 +1357,7 @@ ieee80211_link_use_reserved_reassign(struct ieee80211_link_data *link) if (ieee80211_chanctx_refcount(local, old_ctx) == 0) ieee80211_free_chanctx(local, old_ctx, false); - ieee80211_recalc_chanctx_min_def(local, new_ctx, NULL); + ieee80211_recalc_chanctx_min_def(local, new_ctx, NULL, false); ieee80211_recalc_smps_chanctx(local, new_ctx); ieee80211_recalc_radar_chanctx(local, new_ctx); @@ -1545,6 +1623,31 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local) goto err; } + /* update station rate control and min width before switch */ + list_for_each_entry(ctx, &local->chanctx_list, list) { + struct ieee80211_link_data *link; + + if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER) + continue; + + if (WARN_ON(!ctx->replace_ctx)) { + err = -EINVAL; + goto err; + } + + list_for_each_entry(link, &ctx->reserved_links, + reserved_chanctx_list) { + if (!ieee80211_link_has_in_place_reservation(link)) + continue; + + ieee80211_chan_bw_change(local, + ieee80211_link_get_chanctx(link), + true, true); + } + + ieee80211_recalc_chanctx_min_def(local, ctx, NULL, true); + } + /* * All necessary vifs are ready. Perform the switch now depending on * reservations and driver capabilities. @@ -1612,7 +1715,7 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local) ieee80211_recalc_chanctx_chantype(local, ctx); ieee80211_recalc_smps_chanctx(local, ctx); ieee80211_recalc_radar_chanctx(local, ctx); - ieee80211_recalc_chanctx_min_def(local, ctx, NULL); + ieee80211_recalc_chanctx_min_def(local, ctx, NULL, false); list_for_each_entry_safe(link, link_tmp, &ctx->reserved_links, reserved_chanctx_list) { @@ -1625,6 +1728,7 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local) link->reserved_chanctx = NULL; ieee80211_link_chanctx_reservation_complete(link); + ieee80211_chan_bw_change(local, ctx, false, false); } /* @@ -1745,6 +1849,7 @@ int _ieee80211_link_use_channel(struct ieee80211_link_data *link, struct ieee80211_chanctx *ctx; u8 radar_detect_width = 0; bool reserved = false; + int radio_idx; int ret; lockdep_assert_wiphy(local->hw.wiphy); @@ -1765,7 +1870,7 @@ int _ieee80211_link_use_channel(struct ieee80211_link_data *link, link->radar_required = ret; ret = ieee80211_check_combinations(sdata, &chanreq->oper, mode, - radar_detect_width); + radar_detect_width, -1); if (ret < 0) goto out; @@ -1775,9 +1880,11 @@ int _ieee80211_link_use_channel(struct ieee80211_link_data *link, /* Note: context is now reserved */ if (ctx) reserved = true; + else if (!ieee80211_find_available_radio(local, chanreq, &radio_idx)) + ctx = ERR_PTR(-EBUSY); else ctx = ieee80211_new_chanctx(local, chanreq, mode, - assign_on_failure); + assign_on_failure, radio_idx); if (IS_ERR(ctx)) { ret = PTR_ERR(ctx); goto out; diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c index 98310188f330..02b5476a4376 100644 --- a/net/mac80211/debugfs.c +++ b/net/mac80211/debugfs.c @@ -483,7 +483,6 @@ static const char *hw_flag_names[] = { FLAG(REPORTS_LOW_ACK), FLAG(SUPPORTS_TX_FRAG), FLAG(SUPPORTS_TDLS_BUFFER_STA), - FLAG(DEAUTH_NEED_MGD_TX_PREP), FLAG(DOESNT_SUPPORT_QOS_NDP), FLAG(BUFF_MMPDU_TXQ), FLAG(SUPPORTS_VHT_EXT_NSS_BW), diff --git a/net/mac80211/driver-ops.c b/net/mac80211/driver-ops.c index 254d745832cb..fe868b521622 100644 --- a/net/mac80211/driver-ops.c +++ b/net/mac80211/driver-ops.c @@ -33,7 +33,7 @@ int drv_start(struct ieee80211_local *local) return ret; } -void drv_stop(struct ieee80211_local *local) +void drv_stop(struct ieee80211_local *local, bool suspend) { might_sleep(); lockdep_assert_wiphy(local->hw.wiphy); @@ -41,8 +41,8 @@ void drv_stop(struct ieee80211_local *local) if (WARN_ON(!local->started)) return; - trace_drv_stop(local); - local->ops->stop(&local->hw); + trace_drv_stop(local, suspend); + local->ops->stop(&local->hw, suspend); trace_drv_return_void(local); /* sync away all work on the tasklet before clearing started */ diff --git a/net/mac80211/driver-ops.h b/net/mac80211/driver-ops.h index 5d078c0a2323..d382d9729e85 100644 --- a/net/mac80211/driver-ops.h +++ b/net/mac80211/driver-ops.h @@ -88,7 +88,7 @@ static inline int drv_get_et_sset_count(struct ieee80211_sub_if_data *sdata, } int drv_start(struct ieee80211_local *local); -void drv_stop(struct ieee80211_local *local); +void drv_stop(struct ieee80211_local *local, bool suspend); #ifdef CONFIG_PM static inline int drv_suspend(struct ieee80211_local *local, @@ -1150,6 +1150,9 @@ drv_pre_channel_switch(struct ieee80211_sub_if_data *sdata, if (!check_sdata_in_driver(sdata)) return -EIO; + if (!ieee80211_vif_link_active(&sdata->vif, ch_switch->link_id)) + return 0; + trace_drv_pre_channel_switch(local, sdata, ch_switch); if (local->ops->pre_channel_switch) ret = local->ops->pre_channel_switch(&local->hw, &sdata->vif, @@ -1171,6 +1174,9 @@ drv_post_channel_switch(struct ieee80211_link_data *link) if (!check_sdata_in_driver(sdata)) return -EIO; + if (!ieee80211_vif_link_active(&sdata->vif, link->link_id)) + return 0; + trace_drv_post_channel_switch(local, sdata); if (local->ops->post_channel_switch) ret = local->ops->post_channel_switch(&local->hw, &sdata->vif, @@ -1191,6 +1197,9 @@ drv_abort_channel_switch(struct ieee80211_link_data *link) if (!check_sdata_in_driver(sdata)) return; + if (!ieee80211_vif_link_active(&sdata->vif, link->link_id)) + return; + trace_drv_abort_channel_switch(local, sdata); if (local->ops->abort_channel_switch) @@ -1210,6 +1219,9 @@ drv_channel_switch_rx_beacon(struct ieee80211_sub_if_data *sdata, if (!check_sdata_in_driver(sdata)) return; + if (!ieee80211_vif_link_active(&sdata->vif, ch_switch->link_id)) + return; + trace_drv_channel_switch_rx_beacon(local, sdata, ch_switch); if (local->ops->channel_switch_rx_beacon) local->ops->channel_switch_rx_beacon(&local->hw, &sdata->vif, diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c index d7e8cf8e48b7..79caeb485fd5 100644 --- a/net/mac80211/ht.c +++ b/net/mac80211/ht.c @@ -475,7 +475,7 @@ void ieee80211_send_delba(struct ieee80211_sub_if_data *sdata, sdata->vif.type == NL80211_IFTYPE_MESH_POINT) memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN); else if (sdata->vif.type == NL80211_IFTYPE_STATION) - memcpy(mgmt->bssid, sdata->deflink.u.mgd.bssid, ETH_ALEN); + memcpy(mgmt->bssid, sdata->vif.cfg.ap_addr, ETH_ALEN); else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) memcpy(mgmt->bssid, sdata->u.ibss.bssid, ETH_ALEN); diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c index 7ace5cdc6c26..3f74bbceeca5 100644 --- a/net/mac80211/ibss.c +++ b/net/mac80211/ibss.c @@ -9,7 +9,7 @@ * Copyright 2009, Johannes Berg * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright(c) 2016 Intel Deutschland GmbH - * Copyright(c) 2018-2023 Intel Corporation + * Copyright(c) 2018-2024 Intel Corporation */ #include @@ -533,12 +533,12 @@ int ieee80211_ibss_finish_csa(struct ieee80211_sub_if_data *sdata, u64 *changed) IEEE80211_PRIVACY(ifibss->privacy)); /* XXX: should not really modify cfg80211 data */ if (cbss) { - cbss->channel = sdata->deflink.csa_chanreq.oper.chan; + cbss->channel = sdata->deflink.csa.chanreq.oper.chan; cfg80211_put_bss(sdata->local->hw.wiphy, cbss); } } - ifibss->chandef = sdata->deflink.csa_chanreq.oper; + ifibss->chandef = sdata->deflink.csa.chanreq.oper; /* generate the beacon */ return ieee80211_ibss_csa_beacon(sdata, NULL, changed); @@ -785,7 +785,8 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata, err = ieee80211_parse_ch_switch_ie(sdata, elems, ifibss->chandef.chan->band, vht_cap_info, &conn, - ifibss->bssid, &csa_ie); + ifibss->bssid, false, + &csa_ie); /* can't switch to destination channel, fail */ if (err < 0) goto disconnect; @@ -1745,7 +1746,7 @@ int ieee80211_ibss_join(struct ieee80211_sub_if_data *sdata, IEEE80211_CHANCTX_SHARED : IEEE80211_CHANCTX_EXCLUSIVE; ret = ieee80211_check_combinations(sdata, ¶ms->chandef, chanmode, - radar_detect_width); + radar_detect_width, -1); if (ret < 0) return ret; diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 3cedfdc9099b..a3485e4c6132 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -974,10 +975,15 @@ struct ieee80211_link_data_managed { bool disable_wmm_tracking; bool operating_11g_mode; - bool csa_waiting_bcn; - bool csa_ignored_same_chan; - bool csa_blocked_tx; - struct wiphy_delayed_work chswitch_work; + struct { + struct wiphy_delayed_work switch_work; + struct cfg80211_chan_def ap_chandef; + struct ieee80211_parsed_tpe tpe; + unsigned long time; + bool waiting_bcn; + bool ignored_same_chan; + bool blocked_tx; + } csa; struct wiphy_work request_smps_work; /* used to reconfigure hardware SM PS */ @@ -1036,11 +1042,13 @@ struct ieee80211_link_data { struct ieee80211_key __rcu *default_mgmt_key; struct ieee80211_key __rcu *default_beacon_key; - struct wiphy_work csa_finalize_work; bool operating_11g_mode; - struct ieee80211_chan_req csa_chanreq; + struct { + struct wiphy_work finalize_work; + struct ieee80211_chan_req chanreq; + } csa; struct wiphy_work color_change_finalize_work; struct delayed_work color_collision_detect_work; @@ -1059,7 +1067,6 @@ struct ieee80211_link_data { int ap_power_level; /* in dBm */ bool radar_required; - struct wiphy_delayed_work dfs_cac_timer_work; union { struct ieee80211_link_data_managed mgd; @@ -1158,6 +1165,8 @@ struct ieee80211_sub_if_data { struct ieee80211_link_data deflink; struct ieee80211_link_data __rcu *link[IEEE80211_MLD_MAX_NUM_LINKS]; + struct wiphy_delayed_work dfs_cac_timer_work; + /* for ieee80211_set_active_links_async() */ struct wiphy_work activate_links_work; u16 desired_active_links; @@ -1708,7 +1717,6 @@ struct ieee802_11_elems { const struct ieee80211_he_spr *he_spr; const struct ieee80211_mu_edca_param_set *mu_edca_param_set; const struct ieee80211_he_6ghz_capa *he_6ghz_capa; - const struct ieee80211_tx_pwr_env *tx_pwr_env[IEEE80211_TPE_MAX_IE_COUNT]; const u8 *uora_element; const u8 *mesh_id; const u8 *peering; @@ -1746,6 +1754,10 @@ struct ieee802_11_elems { const struct ieee80211_bandwidth_indication *bandwidth_indication; const struct ieee80211_ttlm_elem *ttlm[IEEE80211_TTLM_MAX_CNT]; + /* not the order in the psd values is per element, not per chandef */ + struct ieee80211_parsed_tpe tpe; + struct ieee80211_parsed_tpe csa_tpe; + /* length of them, respectively */ u8 ext_capab_len; u8 ssid_len; @@ -1764,8 +1776,6 @@ struct ieee802_11_elems { u8 perr_len; u8 country_elem_len; u8 bssid_index_len; - u8 tx_pwr_env_len[IEEE80211_TPE_MAX_IE_COUNT]; - u8 tx_pwr_env_num; u8 eht_cap_len; /* mult-link element can be de-fragmented and thus u8 is not sufficient */ @@ -1813,6 +1823,9 @@ ieee80211_have_rx_timestamp(struct ieee80211_rx_status *status) void ieee80211_vif_inc_num_mcast(struct ieee80211_sub_if_data *sdata); void ieee80211_vif_dec_num_mcast(struct ieee80211_sub_if_data *sdata); +void ieee80211_vif_block_queues_csa(struct ieee80211_sub_if_data *sdata); +void ieee80211_vif_unblock_queues_csa(struct ieee80211_sub_if_data *sdata); + /* This function returns the number of multicast stations connected to this * interface. It returns -1 if that number is not tracked, that is for netdevs * not in AP or AP_VLAN mode or when using 4addr. @@ -1970,6 +1983,7 @@ void ieee80211_offchannel_stop_vifs(struct ieee80211_local *local); void ieee80211_offchannel_return(struct ieee80211_local *local); void ieee80211_roc_setup(struct ieee80211_local *local); void ieee80211_start_next_roc(struct ieee80211_local *local); +void ieee80211_reconfig_roc(struct ieee80211_local *local); void ieee80211_roc_purge(struct ieee80211_local *local, struct ieee80211_sub_if_data *sdata); int ieee80211_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev, @@ -2144,9 +2158,21 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, const struct ieee80211_vht_cap *vht_cap_ie2, struct link_sta_info *link_sta); enum ieee80211_sta_rx_bandwidth -ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta); +_ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta, + struct cfg80211_chan_def *chandef); +static inline enum ieee80211_sta_rx_bandwidth +ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta) +{ + return _ieee80211_sta_cap_rx_bw(link_sta, NULL); +} enum ieee80211_sta_rx_bandwidth -ieee80211_sta_cur_vht_bw(struct link_sta_info *link_sta); +_ieee80211_sta_cur_vht_bw(struct link_sta_info *link_sta, + struct cfg80211_chan_def *chandef); +static inline enum ieee80211_sta_rx_bandwidth +ieee80211_sta_cur_vht_bw(struct link_sta_info *link_sta) +{ + return _ieee80211_sta_cur_vht_bw(link_sta, NULL); +} void ieee80211_sta_init_nss(struct link_sta_info *link_sta); enum ieee80211_sta_rx_bandwidth ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width); @@ -2204,6 +2230,8 @@ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata, * @conn: contains information about own capabilities and restrictions * to decide which channel switch announcements can be accepted * @bssid: the currently connected bssid (for reporting) + * @unprot_action: whether the frame was an unprotected frame or not, + * used for reporting * @csa_ie: parsed 802.11 csa elements on count, mode, chandef and mesh ttl. * All of them will be filled with if success only. * Return: 0 on success, <0 on error and >0 if there is nothing to parse. @@ -2213,12 +2241,12 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, enum nl80211_band current_band, u32 vht_cap_info, struct ieee80211_conn_settings *conn, - u8 *bssid, + u8 *bssid, bool unprot_action, struct ieee80211_csa_ie *csa_ie); /* Suspend/resume and hw reconfiguration */ int ieee80211_reconfig(struct ieee80211_local *local); -void ieee80211_stop_device(struct ieee80211_local *local); +void ieee80211_stop_device(struct ieee80211_local *local, bool suspend); int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan); @@ -2245,6 +2273,7 @@ int ieee80211_frame_duration(enum nl80211_band band, size_t len, void ieee80211_regulatory_limit_wmm_params(struct ieee80211_sub_if_data *sdata, struct ieee80211_tx_queue_params *qparam, int ac); +void ieee80211_clear_tpe(struct ieee80211_parsed_tpe *tpe); void ieee80211_set_wmm_default(struct ieee80211_link_data *link, bool bss_notify, bool enable_qos); void ieee80211_xmit(struct ieee80211_sub_if_data *sdata, @@ -2595,7 +2624,8 @@ void ieee80211_recalc_smps_chanctx(struct ieee80211_local *local, struct ieee80211_chanctx *chanctx); void ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local, struct ieee80211_chanctx *ctx, - struct ieee80211_link_data *rsvd_for); + struct ieee80211_link_data *rsvd_for, + bool check_reserved); bool ieee80211_is_radar_required(struct ieee80211_local *local); void ieee80211_dfs_cac_timer_work(struct wiphy *wiphy, struct wiphy_work *work); @@ -2610,8 +2640,9 @@ void ieee80211_recalc_dtim(struct ieee80211_local *local, int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata, const struct cfg80211_chan_def *chandef, enum ieee80211_chanctx_mode chanmode, - u8 radar_detect); -int ieee80211_max_num_channels(struct ieee80211_local *local); + u8 radar_detect, int radio_idx); +int ieee80211_max_num_channels(struct ieee80211_local *local, int radio_idx); +u32 ieee80211_get_radio_mask(struct wiphy *wiphy, struct net_device *dev); void ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local, struct ieee80211_chanctx *ctx); @@ -2683,6 +2714,11 @@ void ieee80211_remove_wbrf(struct ieee80211_local *local, struct cfg80211_chan_d #define VISIBLE_IF_MAC80211_KUNIT ieee80211_rx_result ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx); +int ieee80211_calc_chandef_subchan_offset(const struct cfg80211_chan_def *ap, + u8 n_partial_subchans); +void ieee80211_rearrange_tpe_psd(struct ieee80211_parsed_tpe_psd *psd, + const struct cfg80211_chan_def *ap, + const struct cfg80211_chan_def *used); #else #define EXPORT_SYMBOL_IF_MAC80211_KUNIT(sym) #define VISIBLE_IF_MAC80211_KUNIT static diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index b935bb5d8ed1..b4ad66af3af3 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -397,7 +397,7 @@ static int ieee80211_check_concurrent_iface(struct ieee80211_sub_if_data *sdata, } } - return ieee80211_check_combinations(sdata, NULL, 0, 0); + return ieee80211_check_combinations(sdata, NULL, 0, 0, -1); } static int ieee80211_check_queues(struct ieee80211_sub_if_data *sdata, @@ -543,18 +543,14 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do sdata->vif.bss_conf.csa_active = false; if (sdata->vif.type == NL80211_IFTYPE_STATION) - sdata->deflink.u.mgd.csa_waiting_bcn = false; - if (sdata->csa_blocked_queues) { - ieee80211_wake_vif_queues(local, sdata, - IEEE80211_QUEUE_STOP_REASON_CSA); - sdata->csa_blocked_queues = false; - } + sdata->deflink.u.mgd.csa.waiting_bcn = false; + ieee80211_vif_unblock_queues_csa(sdata); - wiphy_work_cancel(local->hw.wiphy, &sdata->deflink.csa_finalize_work); + wiphy_work_cancel(local->hw.wiphy, &sdata->deflink.csa.finalize_work); wiphy_work_cancel(local->hw.wiphy, &sdata->deflink.color_change_finalize_work); wiphy_delayed_work_cancel(local->hw.wiphy, - &sdata->deflink.dfs_cac_timer_work); + &sdata->dfs_cac_timer_work); if (sdata->wdev.cac_started) { chandef = sdata->vif.bss_conf.chanreq.oper; @@ -693,8 +689,12 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do fallthrough; default: - if (going_down) - drv_remove_interface(local, sdata); + if (!going_down) + break; + drv_remove_interface(local, sdata); + + /* Clear private driver data to prevent reuse */ + memset(sdata->vif.drv_priv, 0, local->hw.vif_data_size); } ieee80211_recalc_ps(local); @@ -703,7 +703,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_do wiphy_delayed_work_flush(local->hw.wiphy, &local->scan_work); if (local->open_count == 0) { - ieee80211_stop_device(local); + ieee80211_stop_device(local, false); /* no reconfiguring after stop! */ return; @@ -816,12 +816,6 @@ static void ieee80211_uninit(struct net_device *dev) ieee80211_teardown_sdata(IEEE80211_DEV_TO_SUB_IF(dev)); } -static void -ieee80211_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) -{ - dev_fetch_sw_netstats(stats, dev->tstats); -} - static int ieee80211_netdev_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { @@ -838,7 +832,6 @@ static const struct net_device_ops ieee80211_dataif_ops = { .ndo_start_xmit = ieee80211_subif_start_xmit, .ndo_set_rx_mode = ieee80211_set_multicast_list, .ndo_set_mac_address = ieee80211_change_mac, - .ndo_get_stats64 = ieee80211_get_stats64, .ndo_setup_tc = ieee80211_netdev_setup_tc, }; @@ -878,7 +871,6 @@ static const struct net_device_ops ieee80211_monitorif_ops = { .ndo_set_rx_mode = ieee80211_set_multicast_list, .ndo_set_mac_address = ieee80211_change_mac, .ndo_select_queue = ieee80211_monitor_select_queue, - .ndo_get_stats64 = ieee80211_get_stats64, }; static int ieee80211_netdev_fill_forward_path(struct net_device_path_ctx *ctx, @@ -946,7 +938,6 @@ static const struct net_device_ops ieee80211_dataif_8023_ops = { .ndo_start_xmit = ieee80211_subif_start_xmit_8023, .ndo_set_rx_mode = ieee80211_set_multicast_list, .ndo_set_mac_address = ieee80211_change_mac, - .ndo_get_stats64 = ieee80211_get_stats64, .ndo_fill_forward_path = ieee80211_netdev_fill_forward_path, .ndo_setup_tc = ieee80211_netdev_setup_tc, }; @@ -1446,7 +1437,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) drv_remove_interface(local, sdata); err_stop: if (!local->open_count) - drv_stop(local); + drv_stop(local, false); err_del_bss: sdata->bss = NULL; if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) @@ -1456,11 +1447,6 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up) return res; } -static void ieee80211_if_free(struct net_device *dev) -{ - free_percpu(dev->tstats); -} - static void ieee80211_if_setup(struct net_device *dev) { ether_setup(dev); @@ -1468,7 +1454,6 @@ static void ieee80211_if_setup(struct net_device *dev) dev->priv_flags |= IFF_NO_QUEUE; dev->netdev_ops = &ieee80211_dataif_ops; dev->needs_free_netdev = true; - dev->priv_destructor = ieee80211_if_free; } static void ieee80211_iface_process_skb(struct ieee80211_local *local, @@ -1744,6 +1729,8 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata, wiphy_work_init(&sdata->work, ieee80211_iface_work); wiphy_work_init(&sdata->activate_links_work, ieee80211_activate_links_work); + wiphy_delayed_work_init(&sdata->dfs_cac_timer_work, + ieee80211_dfs_cac_timer_work); switch (type) { case NL80211_IFTYPE_P2P_GO: @@ -2099,11 +2086,7 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, dev_net_set(ndev, wiphy_net(local->hw.wiphy)); - ndev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); - if (!ndev->tstats) { - free_netdev(ndev); - return -ENOMEM; - } + ndev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; ndev->needed_headroom = local->tx_headroom + 4*6 /* four MAC addresses */ @@ -2116,7 +2099,6 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name, ret = dev_alloc_name(ndev, ndev->name); if (ret < 0) { - ieee80211_if_free(ndev); free_netdev(ndev); return ret; } @@ -2361,3 +2343,26 @@ void ieee80211_vif_dec_num_mcast(struct ieee80211_sub_if_data *sdata) else if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) atomic_dec(&sdata->u.vlan.num_mcast_sta); } + +void ieee80211_vif_block_queues_csa(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + + if (ieee80211_hw_check(&local->hw, HANDLES_QUIET_CSA)) + return; + + ieee80211_stop_vif_queues(local, sdata, + IEEE80211_QUEUE_STOP_REASON_CSA); + sdata->csa_blocked_queues = true; +} + +void ieee80211_vif_unblock_queues_csa(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_local *local = sdata->local; + + if (sdata->csa_blocked_queues) { + ieee80211_wake_vif_queues(local, sdata, + IEEE80211_QUEUE_STOP_REASON_CSA); + sdata->csa_blocked_queues = false; + } +} diff --git a/net/mac80211/link.c b/net/mac80211/link.c index af0321408a97..1a211b8d4057 100644 --- a/net/mac80211/link.c +++ b/net/mac80211/link.c @@ -37,7 +37,7 @@ void ieee80211_link_init(struct ieee80211_sub_if_data *sdata, link_conf->link_id = link_id; link_conf->vif = &sdata->vif; - wiphy_work_init(&link->csa_finalize_work, + wiphy_work_init(&link->csa.finalize_work, ieee80211_csa_finalize_work); wiphy_work_init(&link->color_change_finalize_work, ieee80211_color_change_finalize_work); @@ -45,8 +45,6 @@ void ieee80211_link_init(struct ieee80211_sub_if_data *sdata, ieee80211_color_collision_detection_work); INIT_LIST_HEAD(&link->assigned_chanctx_list); INIT_LIST_HEAD(&link->reserved_chanctx_list); - wiphy_delayed_work_init(&link->dfs_cac_timer_work, - ieee80211_dfs_cac_timer_work); if (!deflink) { switch (sdata->vif.type) { @@ -74,7 +72,9 @@ void ieee80211_link_stop(struct ieee80211_link_data *link) cancel_delayed_work_sync(&link->color_collision_detect_work); wiphy_work_cancel(link->sdata->local->hw.wiphy, - &link->csa_finalize_work); + &link->color_change_finalize_work); + wiphy_work_cancel(link->sdata->local->hw.wiphy, + &link->csa.finalize_work); ieee80211_link_release_channel(link); } @@ -359,6 +359,18 @@ static int _ieee80211_set_active_links(struct ieee80211_sub_if_data *sdata, ieee80211_teardown_tdls_peers(link); __ieee80211_link_release_channel(link, true); + + /* + * If CSA is (still) active while the link is deactivated, + * just schedule the channel switch work for the time we + * had previously calculated, and we'll take the process + * from there. + */ + if (link->conf->csa_active) + wiphy_delayed_work_queue(local->hw.wiphy, + &link->u.mgd.csa.switch_work, + link->u.mgd.csa.time - + jiffies); } list_for_each_entry(sta, &local->sta_list, list) { diff --git a/net/mac80211/main.c b/net/mac80211/main.c index 0965ad11ec74..a3104b6ea6f0 100644 --- a/net/mac80211/main.c +++ b/net/mac80211/main.c @@ -148,7 +148,7 @@ static u32 ieee80211_calc_hw_conf_chan(struct ieee80211_local *local, offchannel_flag ^= local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL; /* force it also for scanning, since drivers might config differently */ - if (offchannel_flag || local->scanning || + if (offchannel_flag || local->scanning || local->in_reconfig || !cfg80211_chandef_identical(&local->hw.conf.chandef, &chandef)) { local->hw.conf.chandef = chandef; changed |= IEEE80211_CONF_CHANGE_CHANNEL; @@ -337,6 +337,8 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, might_sleep(); + WARN_ON_ONCE(ieee80211_vif_is_mld(&sdata->vif)); + if (!changed || sdata->vif.type == NL80211_IFTYPE_AP_VLAN) return; @@ -369,7 +371,6 @@ void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata, if (changed & ~BSS_CHANGED_VIF_CFG_FLAGS) { u64 ch = changed & ~BSS_CHANGED_VIF_CFG_FLAGS; - /* FIXME: should be for each link */ trace_drv_link_info_changed(local, sdata, &sdata->vif.bss_conf, changed); if (local->ops->link_info_changed) @@ -1090,6 +1091,27 @@ static int ieee80211_init_cipher_suites(struct ieee80211_local *local) return 0; } +static bool +ieee80211_ifcomb_check(const struct ieee80211_iface_combination *c, int n_comb) +{ + int i, j; + + for (i = 0; i < n_comb; i++, c++) { + /* DFS is not supported with multi-channel combinations yet */ + if (c->radar_detect_widths && + c->num_different_channels > 1) + return false; + + /* mac80211 doesn't support more than one IBSS interface */ + for (j = 0; j < c->n_limits; j++) + if ((c->limits[j].types & BIT(NL80211_IFTYPE_ADHOC)) && + c->limits[j].max > 1) + return false; + } + + return true; +} + int ieee80211_register_hw(struct ieee80211_hw *hw) { struct ieee80211_local *local = hw_to_local(hw); @@ -1160,9 +1182,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) if (WARN_ON(!ieee80211_hw_check(hw, AP_LINK_PS))) return -EINVAL; - - if (WARN_ON(ieee80211_hw_check(hw, DEAUTH_NEED_MGD_TX_PREP))) - return -EINVAL; } #ifdef CONFIG_PM @@ -1179,17 +1198,20 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) if (comb->num_different_channels > 1) return -EINVAL; } - } else { - /* DFS is not supported with multi-channel combinations yet */ - for (i = 0; i < local->hw.wiphy->n_iface_combinations; i++) { - const struct ieee80211_iface_combination *comb; + } - comb = &local->hw.wiphy->iface_combinations[i]; + if (hw->wiphy->n_radio) { + for (i = 0; i < hw->wiphy->n_radio; i++) { + const struct wiphy_radio *radio = &hw->wiphy->radio[i]; - if (comb->radar_detect_widths && - comb->num_different_channels > 1) + if (!ieee80211_ifcomb_check(radio->iface_combinations, + radio->n_iface_combinations)) return -EINVAL; } + } else { + if (!ieee80211_ifcomb_check(hw->wiphy->iface_combinations, + hw->wiphy->n_iface_combinations)) + return -EINVAL; } /* Only HW csum features are currently compatible with mac80211 */ @@ -1319,18 +1341,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR); hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_MONITOR); - /* mac80211 doesn't support more than one IBSS interface right now */ - for (i = 0; i < hw->wiphy->n_iface_combinations; i++) { - const struct ieee80211_iface_combination *c; - int j; - - c = &hw->wiphy->iface_combinations[i]; - - for (j = 0; j < c->n_limits; j++) - if ((c->limits[j].types & BIT(NL80211_IFTYPE_ADHOC)) && - c->limits[j].max > 1) - return -EINVAL; - } local->int_scan_req = kzalloc(sizeof(*local->int_scan_req) + sizeof(void *) * channels, GFP_KERNEL); diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 6d4510221c98..f94e4be0be12 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -1312,7 +1312,7 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata, memset(¶ms, 0, sizeof(params)); err = ieee80211_parse_ch_switch_ie(sdata, elems, sband->band, vht_cap_info, &conn, - sdata->vif.addr, + sdata->vif.addr, false, &csa_ie); if (err < 0) return false; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index a5f2d3cfe60d..4779a18ab75d 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -606,11 +606,218 @@ static bool ieee80211_chandef_usable(struct ieee80211_sub_if_data *sdata, return true; } +static int ieee80211_chandef_num_subchans(const struct cfg80211_chan_def *c) +{ + if (c->width == NL80211_CHAN_WIDTH_80P80) + return 4 + 4; + + return nl80211_chan_width_to_mhz(c->width) / 20; +} + +static int ieee80211_chandef_num_widths(const struct cfg80211_chan_def *c) +{ + switch (c->width) { + case NL80211_CHAN_WIDTH_20: + case NL80211_CHAN_WIDTH_20_NOHT: + return 1; + case NL80211_CHAN_WIDTH_40: + return 2; + case NL80211_CHAN_WIDTH_80P80: + case NL80211_CHAN_WIDTH_80: + return 3; + case NL80211_CHAN_WIDTH_160: + return 4; + case NL80211_CHAN_WIDTH_320: + return 5; + default: + WARN_ON(1); + return 0; + } +} + +VISIBLE_IF_MAC80211_KUNIT int +ieee80211_calc_chandef_subchan_offset(const struct cfg80211_chan_def *ap, + u8 n_partial_subchans) +{ + int n = ieee80211_chandef_num_subchans(ap); + struct cfg80211_chan_def tmp = *ap; + int offset = 0; + + /* + * Given a chandef (in this context, it's the AP's) and a number + * of subchannels that we want to look at ('n_partial_subchans'), + * calculate the offset in number of subchannels between the full + * and the subset with the desired width. + */ + + /* same number of subchannels means no offset, obviously */ + if (n == n_partial_subchans) + return 0; + + /* don't WARN - misconfigured APs could cause this if their N > width */ + if (n < n_partial_subchans) + return 0; + + while (ieee80211_chandef_num_subchans(&tmp) > n_partial_subchans) { + u32 prev = tmp.center_freq1; + + ieee80211_chandef_downgrade(&tmp, NULL); + + /* + * if center_freq moved up, half the original channels + * are gone now but were below, so increase offset + */ + if (prev < tmp.center_freq1) + offset += ieee80211_chandef_num_subchans(&tmp); + } + + /* + * 80+80 with secondary 80 below primary - four subchannels for it + * (we cannot downgrade *to* 80+80, so no need to consider 'tmp') + */ + if (ap->width == NL80211_CHAN_WIDTH_80P80 && + ap->center_freq2 < ap->center_freq1) + offset += 4; + + return offset; +} +EXPORT_SYMBOL_IF_MAC80211_KUNIT(ieee80211_calc_chandef_subchan_offset); + +VISIBLE_IF_MAC80211_KUNIT void +ieee80211_rearrange_tpe_psd(struct ieee80211_parsed_tpe_psd *psd, + const struct cfg80211_chan_def *ap, + const struct cfg80211_chan_def *used) +{ + u8 needed = ieee80211_chandef_num_subchans(used); + u8 have = ieee80211_chandef_num_subchans(ap); + u8 tmp[IEEE80211_TPE_PSD_ENTRIES_320MHZ]; + u8 offset; + + if (!psd->valid) + return; + + /* if N is zero, all defaults were used, no point in rearranging */ + if (!psd->n) + goto out; + + BUILD_BUG_ON(sizeof(tmp) != sizeof(psd->power)); + + /* + * This assumes that 'N' is consistent with the HE channel, as + * it should be (otherwise the AP is broken). + * + * In psd->power we have values in the order 0..N, 0..K, where + * N+K should cover the entire channel per 'ap', but even if it + * doesn't then we've pre-filled 'unlimited' as defaults. + * + * But this is all the wrong order, we want to have them in the + * order of the 'used' channel. + * + * So for example, we could have a 320 MHz EHT AP, which has the + * HE channel as 80 MHz (e.g. due to puncturing, which doesn't + * seem to be considered for the TPE), as follows: + * + * EHT 320: | | | | | | | | | | | | | | | | | + * HE 80: | | | | | + * used 160: | | | | | | | | | + * + * N entries: |--|--|--|--| + * K entries: |--|--|--|--|--|--|--|--| |--|--|--|--| + * power idx: 4 5 6 7 8 9 10 11 0 1 2 3 12 13 14 15 + * full chan: 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 + * used chan: 0 1 2 3 4 5 6 7 + * + * The idx in the power array ('power idx') is like this since it + * comes directly from the element's N and K entries in their + * element order, and those are this way for HE compatibility. + * + * Rearrange them as desired here, first by putting them into the + * 'full chan' order, and then selecting the necessary subset for + * the 'used chan'. + */ + + /* first reorder according to AP channel */ + offset = ieee80211_calc_chandef_subchan_offset(ap, psd->n); + for (int i = 0; i < have; i++) { + if (i < offset) + tmp[i] = psd->power[i + psd->n]; + else if (i < offset + psd->n) + tmp[i] = psd->power[i - offset]; + else + tmp[i] = psd->power[i]; + } + + /* + * and then select the subset for the used channel + * (set everything to defaults first in case a driver is confused) + */ + memset(psd->power, IEEE80211_TPE_PSD_NO_LIMIT, sizeof(psd->power)); + offset = ieee80211_calc_chandef_subchan_offset(ap, needed); + for (int i = 0; i < needed; i++) + psd->power[i] = tmp[offset + i]; + +out: + /* limit, but don't lie if there are defaults in the data */ + if (needed < psd->count) + psd->count = needed; +} +EXPORT_SYMBOL_IF_MAC80211_KUNIT(ieee80211_rearrange_tpe_psd); + +static void ieee80211_rearrange_tpe(struct ieee80211_parsed_tpe *tpe, + const struct cfg80211_chan_def *ap, + const struct cfg80211_chan_def *used) +{ + /* ignore this completely for narrow/invalid channels */ + if (!ieee80211_chandef_num_subchans(ap) || + !ieee80211_chandef_num_subchans(used)) { + ieee80211_clear_tpe(tpe); + return; + } + + for (int i = 0; i < 2; i++) { + int needed_pwr_count; + + ieee80211_rearrange_tpe_psd(&tpe->psd_local[i], ap, used); + ieee80211_rearrange_tpe_psd(&tpe->psd_reg_client[i], ap, used); + + /* limit this to the widths we actually need */ + needed_pwr_count = ieee80211_chandef_num_widths(used); + if (needed_pwr_count < tpe->max_local[i].count) + tpe->max_local[i].count = needed_pwr_count; + if (needed_pwr_count < tpe->max_reg_client[i].count) + tpe->max_reg_client[i].count = needed_pwr_count; + } +} + +/* + * The AP part of the channel request is used to distinguish settings + * to the device used for wider bandwidth OFDMA. This is used in the + * channel context code to assign two channel contexts even if they're + * both for the same channel, if the AP bandwidths are incompatible. + * If not EHT (or driver override) then ap.chan == NULL indicates that + * there's no wider BW OFDMA used. + */ +static void ieee80211_set_chanreq_ap(struct ieee80211_sub_if_data *sdata, + struct ieee80211_chan_req *chanreq, + struct ieee80211_conn_settings *conn, + struct cfg80211_chan_def *ap_chandef) +{ + chanreq->ap.chan = NULL; + + if (conn->mode < IEEE80211_CONN_MODE_EHT) + return; + if (sdata->vif.driver_flags & IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW) + return; + + chanreq->ap = *ap_chandef; +} + static struct ieee802_11_elems * ieee80211_determine_chan_mode(struct ieee80211_sub_if_data *sdata, struct ieee80211_conn_settings *conn, struct cfg80211_bss *cbss, int link_id, - struct ieee80211_chan_req *chanreq) + struct ieee80211_chan_req *chanreq, + struct cfg80211_chan_def *ap_chandef) { const struct cfg80211_bss_ies *ies = rcu_dereference(cbss->ies); struct ieee80211_bss *bss = (void *)cbss->priv; @@ -623,7 +830,6 @@ ieee80211_determine_chan_mode(struct ieee80211_sub_if_data *sdata, }; struct ieee802_11_elems *elems; struct ieee80211_supported_band *sband; - struct cfg80211_chan_def ap_chandef; enum ieee80211_conn_mode ap_mode; int ret; @@ -634,7 +840,7 @@ again: return ERR_PTR(-ENOMEM); ap_mode = ieee80211_determine_ap_chan(sdata, channel, bss->vht_cap_info, - elems, false, conn, &ap_chandef); + elems, false, conn, ap_chandef); /* this should be impossible since parsing depends on our mode */ if (WARN_ON(ap_mode > conn->mode)) { @@ -701,14 +907,9 @@ again: break; } - chanreq->oper = ap_chandef; + chanreq->oper = *ap_chandef; - /* wider-bandwidth OFDMA is only done in EHT */ - if (conn->mode >= IEEE80211_CONN_MODE_EHT && - !(sdata->vif.driver_flags & IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW)) - chanreq->ap = ap_chandef; - else - chanreq->ap.chan = NULL; + ieee80211_set_chanreq_ap(sdata, chanreq, conn, ap_chandef); while (!ieee80211_chandef_usable(sdata, &chanreq->oper, IEEE80211_CHAN_DISABLED)) { @@ -738,7 +939,7 @@ again: IEEE80211_CONN_BW_LIMIT_160); } - if (chanreq->oper.width != ap_chandef.width || ap_mode != conn->mode) + if (chanreq->oper.width != ap_chandef->width || ap_mode != conn->mode) sdata_info(sdata, "regulatory prevented using AP config, downgraded\n"); @@ -790,6 +991,7 @@ static int ieee80211_config_bw(struct ieee80211_link_data *link, struct ieee80211_channel *channel = link->conf->chanreq.oper.chan; struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_chan_req chanreq = {}; + struct cfg80211_chan_def ap_chandef; enum ieee80211_conn_mode ap_mode; u32 vht_cap_info = 0; u16 ht_opmode; @@ -805,7 +1007,7 @@ static int ieee80211_config_bw(struct ieee80211_link_data *link, ap_mode = ieee80211_determine_ap_chan(sdata, channel, vht_cap_info, elems, true, &link->u.mgd.conn, - &chanreq.ap); + &ap_chandef); if (ap_mode != link->u.mgd.conn.mode) { link_info(link, @@ -815,10 +1017,9 @@ static int ieee80211_config_bw(struct ieee80211_link_data *link, return -EINVAL; } - chanreq.oper = chanreq.ap; - if (link->u.mgd.conn.mode < IEEE80211_CONN_MODE_EHT || - sdata->vif.driver_flags & IEEE80211_VIF_IGNORE_OFDMA_WIDER_BW) - chanreq.ap.chan = NULL; + chanreq.oper = ap_chandef; + ieee80211_set_chanreq_ap(sdata, &chanreq, &link->u.mgd.conn, + &ap_chandef); /* * if HT operation mode changed store the new one - @@ -843,6 +1044,16 @@ static int ieee80211_config_bw(struct ieee80211_link_data *link, ieee80211_min_bw_limit_from_chandef(&chanreq.oper)) ieee80211_chandef_downgrade(&chanreq.oper, NULL); + if (ap_chandef.chan->band == NL80211_BAND_6GHZ && + link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HE) { + ieee80211_rearrange_tpe(&elems->tpe, &ap_chandef, + &chanreq.oper); + if (memcmp(&link->conf->tpe, &elems->tpe, sizeof(elems->tpe))) { + link->conf->tpe = elems->tpe; + *changed |= BSS_CHANGED_TPE; + } + } + if (ieee80211_chanreq_identical(&chanreq, &link->conf->chanreq)) return 0; @@ -1862,12 +2073,12 @@ void ieee80211_send_4addr_nullfunc(struct ieee80211_local *local, } /* spectrum management related things */ -static void ieee80211_chswitch_work(struct wiphy *wiphy, - struct wiphy_work *work) +static void ieee80211_csa_switch_work(struct wiphy *wiphy, + struct wiphy_work *work) { struct ieee80211_link_data *link = container_of(work, struct ieee80211_link_data, - u.mgd.chswitch_work.work); + u.mgd.csa.switch_work.work); struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; @@ -1884,6 +2095,18 @@ static void ieee80211_chswitch_work(struct wiphy *wiphy, if (!link->conf->csa_active) return; + /* + * If the link isn't active (now), we cannot wait for beacons, won't + * have a reserved chanctx, etc. Just switch over the chandef and + * update cfg80211 directly. + */ + if (!ieee80211_vif_link_active(&sdata->vif, link->link_id)) { + link->conf->chanreq = link->csa.chanreq; + cfg80211_ch_switch_notify(sdata->dev, &link->csa.chanreq.oper, + link->link_id); + return; + } + /* * using reservation isn't immediate as it may be deferred until later * with multi-vif. once reservation is complete it will re-schedule the @@ -1902,9 +2125,9 @@ static void ieee80211_chswitch_work(struct wiphy *wiphy, ret = ieee80211_link_use_reserved_context(link); if (ret) { - sdata_info(sdata, - "failed to use reserved channel context, disconnecting (err=%d)\n", - ret); + link_info(link, + "failed to use reserved channel context, disconnecting (err=%d)\n", + ret); wiphy_work_queue(sdata->local->hw.wiphy, &ifmgd->csa_connection_drop_work); } @@ -1912,15 +2135,29 @@ static void ieee80211_chswitch_work(struct wiphy *wiphy, } if (!ieee80211_chanreq_identical(&link->conf->chanreq, - &link->csa_chanreq)) { - sdata_info(sdata, - "failed to finalize channel switch, disconnecting\n"); + &link->csa.chanreq)) { + link_info(link, + "failed to finalize channel switch, disconnecting\n"); wiphy_work_queue(sdata->local->hw.wiphy, &ifmgd->csa_connection_drop_work); return; } - link->u.mgd.csa_waiting_bcn = true; + link->u.mgd.csa.waiting_bcn = true; + + /* apply new TPE restrictions immediately on the new channel */ + if (link->u.mgd.csa.ap_chandef.chan->band == NL80211_BAND_6GHZ && + link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HE) { + ieee80211_rearrange_tpe(&link->u.mgd.csa.tpe, + &link->u.mgd.csa.ap_chandef, + &link->conf->chanreq.oper); + if (memcmp(&link->conf->tpe, &link->u.mgd.csa.tpe, + sizeof(link->u.mgd.csa.tpe))) { + link->conf->tpe = link->u.mgd.csa.tpe; + ieee80211_link_info_change_notify(sdata, link, + BSS_CHANGED_TPE); + } + } ieee80211_sta_reset_beacon_monitor(sdata); ieee80211_sta_reset_conn_monitor(sdata); @@ -1929,7 +2166,6 @@ static void ieee80211_chswitch_work(struct wiphy *wiphy, static void ieee80211_chswitch_post_beacon(struct ieee80211_link_data *link) { struct ieee80211_sub_if_data *sdata = link->sdata; - struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; int ret; @@ -1937,26 +2173,22 @@ static void ieee80211_chswitch_post_beacon(struct ieee80211_link_data *link) WARN_ON(!link->conf->csa_active); - if (sdata->csa_blocked_queues) { - ieee80211_wake_vif_queues(local, sdata, - IEEE80211_QUEUE_STOP_REASON_CSA); - sdata->csa_blocked_queues = false; - } + ieee80211_vif_unblock_queues_csa(sdata); link->conf->csa_active = false; - link->u.mgd.csa_blocked_tx = false; - link->u.mgd.csa_waiting_bcn = false; + link->u.mgd.csa.blocked_tx = false; + link->u.mgd.csa.waiting_bcn = false; ret = drv_post_channel_switch(link); if (ret) { - sdata_info(sdata, - "driver post channel switch failed, disconnecting\n"); + link_info(link, + "driver post channel switch failed, disconnecting\n"); wiphy_work_queue(sdata->local->hw.wiphy, &ifmgd->csa_connection_drop_work); return; } - cfg80211_ch_switch_notify(sdata->dev, &link->reserved.oper, + cfg80211_ch_switch_notify(sdata->dev, &link->conf->chanreq.oper, link->link_id); } @@ -1971,7 +2203,8 @@ void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success, if (!success) { sdata_info(sdata, - "driver channel switch failed, disconnecting\n"); + "driver channel switch failed (link %d), disconnecting\n", + link_id); wiphy_work_queue(sdata->local->hw.wiphy, &sdata->u.mgd.csa_connection_drop_work); } else { @@ -1984,7 +2217,7 @@ void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success, } wiphy_delayed_work_queue(sdata->local->hw.wiphy, - &link->u.mgd.chswitch_work, 0); + &link->u.mgd.csa.switch_work, 0); } rcu_read_unlock(); @@ -2004,126 +2237,308 @@ ieee80211_sta_abort_chanswitch(struct ieee80211_link_data *link) ieee80211_link_unreserve_chanctx(link); - if (sdata->csa_blocked_queues) { - ieee80211_wake_vif_queues(local, sdata, - IEEE80211_QUEUE_STOP_REASON_CSA); - sdata->csa_blocked_queues = false; - } + ieee80211_vif_unblock_queues_csa(sdata); link->conf->csa_active = false; - link->u.mgd.csa_blocked_tx = false; + link->u.mgd.csa.blocked_tx = false; drv_abort_channel_switch(link); } +struct sta_csa_rnr_iter_data { + struct ieee80211_link_data *link; + struct ieee80211_channel *chan; + u8 mld_id; +}; + +static enum cfg80211_rnr_iter_ret +ieee80211_sta_csa_rnr_iter(void *_data, u8 type, + const struct ieee80211_neighbor_ap_info *info, + const u8 *tbtt_info, u8 tbtt_info_len) +{ + struct sta_csa_rnr_iter_data *data = _data; + struct ieee80211_link_data *link = data->link; + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + const struct ieee80211_tbtt_info_ge_11 *ti; + enum nl80211_band band; + unsigned int center_freq; + int link_id; + + if (type != IEEE80211_TBTT_INFO_TYPE_TBTT) + return RNR_ITER_CONTINUE; + + if (tbtt_info_len < sizeof(*ti)) + return RNR_ITER_CONTINUE; + + ti = (const void *)tbtt_info; + + if (ti->mld_params.mld_id != data->mld_id) + return RNR_ITER_CONTINUE; + + link_id = le16_get_bits(ti->mld_params.params, + IEEE80211_RNR_MLD_PARAMS_LINK_ID); + if (link_id != data->link->link_id) + return RNR_ITER_CONTINUE; + + /* we found the entry for our link! */ + + /* this AP is confused, it had this right before ... just disconnect */ + if (!ieee80211_operating_class_to_band(info->op_class, &band)) { + link_info(link, + "AP now has invalid operating class in RNR, disconnect\n"); + wiphy_work_queue(sdata->local->hw.wiphy, + &ifmgd->csa_connection_drop_work); + return RNR_ITER_BREAK; + } + + center_freq = ieee80211_channel_to_frequency(info->channel, band); + data->chan = ieee80211_get_channel(sdata->local->hw.wiphy, center_freq); + + return RNR_ITER_BREAK; +} + +static void +ieee80211_sta_other_link_csa_disappeared(struct ieee80211_link_data *link, + struct ieee802_11_elems *elems) +{ + struct ieee80211_sub_if_data *sdata = link->sdata; + struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; + struct sta_csa_rnr_iter_data data = { + .link = link, + }; + + /* + * If we get here, we see a beacon from another link without + * CSA still being reported for it, so now we have to check + * if the CSA was aborted or completed. This may not even be + * perfectly possible if the CSA was only done for changing + * the puncturing, but in that case if the link in inactive + * we don't really care, and if it's an active link (or when + * it's activated later) we'll get a beacon and adjust. + */ + + if (WARN_ON(!elems->ml_basic)) + return; + + data.mld_id = ieee80211_mle_get_mld_id((const void *)elems->ml_basic); + + /* + * So in order to do this, iterate the RNR element(s) and see + * what channel is reported now. + */ + cfg80211_iter_rnr(elems->ie_start, elems->total_len, + ieee80211_sta_csa_rnr_iter, &data); + + if (!data.chan) { + link_info(link, + "couldn't find (valid) channel in RNR for CSA, disconnect\n"); + wiphy_work_queue(sdata->local->hw.wiphy, + &ifmgd->csa_connection_drop_work); + return; + } + + /* + * If it doesn't match the CSA, then assume it aborted. This + * may erroneously detect that it was _not_ aborted when it + * was in fact aborted, but only changed the bandwidth or the + * puncturing configuration, but we don't have enough data to + * detect that. + */ + if (data.chan != link->csa.chanreq.oper.chan) + ieee80211_sta_abort_chanswitch(link); +} + +enum ieee80211_csa_source { + IEEE80211_CSA_SOURCE_BEACON, + IEEE80211_CSA_SOURCE_OTHER_LINK, + IEEE80211_CSA_SOURCE_PROT_ACTION, + IEEE80211_CSA_SOURCE_UNPROT_ACTION, +}; + static void ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link, u64 timestamp, u32 device_timestamp, - struct ieee802_11_elems *elems, - bool beacon) + struct ieee802_11_elems *full_elems, + struct ieee802_11_elems *csa_elems, + enum ieee80211_csa_source source) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; - struct cfg80211_bss *cbss = link->conf->bss; + struct ieee80211_chanctx *chanctx = NULL; struct ieee80211_chanctx_conf *conf; - struct ieee80211_chanctx *chanctx; - enum nl80211_band current_band; - struct ieee80211_csa_ie csa_ie; + struct ieee80211_csa_ie csa_ie = {}; struct ieee80211_channel_switch ch_switch = { .link_id = link->link_id, + .timestamp = timestamp, + .device_timestamp = device_timestamp, }; - struct ieee80211_bss *bss; - unsigned long timeout; + unsigned long now; int res; lockdep_assert_wiphy(local->hw.wiphy); - if (!cbss) - return; + if (csa_elems) { + struct cfg80211_bss *cbss = link->conf->bss; + enum nl80211_band current_band; + struct ieee80211_bss *bss; - current_band = cbss->channel->band; - bss = (void *)cbss->priv; - res = ieee80211_parse_ch_switch_ie(sdata, elems, current_band, - bss->vht_cap_info, - &link->u.mgd.conn, - link->u.mgd.bssid, &csa_ie); + if (WARN_ON(!cbss)) + return; - if (!res) { - ch_switch.timestamp = timestamp; - ch_switch.device_timestamp = device_timestamp; - ch_switch.block_tx = csa_ie.mode; - ch_switch.chandef = csa_ie.chanreq.oper; - ch_switch.count = csa_ie.count; - ch_switch.delay = csa_ie.max_switch_time; + current_band = cbss->channel->band; + bss = (void *)cbss->priv; + + res = ieee80211_parse_ch_switch_ie(sdata, csa_elems, + current_band, + bss->vht_cap_info, + &link->u.mgd.conn, + link->u.mgd.bssid, + source == IEEE80211_CSA_SOURCE_UNPROT_ACTION, + &csa_ie); + if (res == 0) { + ch_switch.block_tx = csa_ie.mode; + ch_switch.chandef = csa_ie.chanreq.oper; + ch_switch.count = csa_ie.count; + ch_switch.delay = csa_ie.max_switch_time; + } + + link->u.mgd.csa.tpe = csa_elems->csa_tpe; + } else { + /* + * If there was no per-STA profile for this link, we + * get called with csa_elems == NULL. This of course means + * there are no CSA elements, so set res=1 indicating + * no more CSA. + */ + res = 1; } - if (res < 0) + if (res < 0) { + /* ignore this case, not a protected frame */ + if (source == IEEE80211_CSA_SOURCE_UNPROT_ACTION) + return; goto drop_connection; + } if (link->conf->csa_active) { - /* already processing - disregard action frames */ - if (!beacon) + switch (source) { + case IEEE80211_CSA_SOURCE_PROT_ACTION: + case IEEE80211_CSA_SOURCE_UNPROT_ACTION: + /* already processing - disregard action frames */ return; + case IEEE80211_CSA_SOURCE_BEACON: + if (link->u.mgd.csa.waiting_bcn) { + ieee80211_chswitch_post_beacon(link); + /* + * If the CSA is still present after the switch + * we need to consider it as a new CSA (possibly + * to self). This happens by not returning here + * so we'll get to the check below. + */ + } else if (res) { + ieee80211_sta_abort_chanswitch(link); + return; + } else { + drv_channel_switch_rx_beacon(sdata, &ch_switch); + return; + } + break; + case IEEE80211_CSA_SOURCE_OTHER_LINK: + /* active link: we want to see the beacon to continue */ + if (ieee80211_vif_link_active(&sdata->vif, + link->link_id)) + return; - if (link->u.mgd.csa_waiting_bcn) { - ieee80211_chswitch_post_beacon(link); - /* - * If the CSA IE is still present in the beacon after - * the switch, we need to consider it as a new CSA - * (possibly to self) - this happens by not returning - * here so we'll get to the check below. - */ - } else if (res) { - ieee80211_sta_abort_chanswitch(link); - return; - } else { - drv_channel_switch_rx_beacon(sdata, &ch_switch); + /* switch work ran, so just complete the process */ + if (link->u.mgd.csa.waiting_bcn) { + ieee80211_chswitch_post_beacon(link); + /* + * If the CSA is still present after the switch + * we need to consider it as a new CSA (possibly + * to self). This happens by not returning here + * so we'll get to the check below. + */ + break; + } + + /* link still has CSA but we already know, do nothing */ + if (!res) + return; + + /* check in the RNR if the CSA aborted */ + ieee80211_sta_other_link_csa_disappeared(link, + full_elems); return; } } - /* nothing to do at all - no active CSA nor a new one */ - if (res) + /* no active CSA nor a new one */ + if (res) { + /* + * However, we may have stopped queues when receiving a public + * action frame that couldn't be protected, if it had the quiet + * bit set. This is a trade-off, we want to be quiet as soon as + * possible, but also don't trust the public action frame much, + * as it can't be protected. + */ + if (unlikely(link->u.mgd.csa.blocked_tx)) { + link->u.mgd.csa.blocked_tx = false; + ieee80211_vif_unblock_queues_csa(sdata); + } return; + } + + /* + * We don't really trust public action frames, but block queues (go to + * quiet mode) for them anyway, we should get a beacon soon to either + * know what the CSA really is, or figure out the public action frame + * was actually an attack. + */ + if (source == IEEE80211_CSA_SOURCE_UNPROT_ACTION) { + if (csa_ie.mode) { + link->u.mgd.csa.blocked_tx = true; + ieee80211_vif_block_queues_csa(sdata); + } + return; + } if (link->conf->chanreq.oper.chan->band != csa_ie.chanreq.oper.chan->band) { - sdata_info(sdata, - "AP %pM switches to different band (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n", - link->u.mgd.bssid, - csa_ie.chanreq.oper.chan->center_freq, - csa_ie.chanreq.oper.width, - csa_ie.chanreq.oper.center_freq1, - csa_ie.chanreq.oper.center_freq2); + link_info(link, + "AP %pM switches to different band (%d MHz, width:%d, CF1/2: %d/%d MHz), disconnecting\n", + link->u.mgd.bssid, + csa_ie.chanreq.oper.chan->center_freq, + csa_ie.chanreq.oper.width, + csa_ie.chanreq.oper.center_freq1, + csa_ie.chanreq.oper.center_freq2); goto drop_connection; } if (!cfg80211_chandef_usable(local->hw.wiphy, &csa_ie.chanreq.oper, IEEE80211_CHAN_DISABLED)) { - sdata_info(sdata, - "AP %pM switches to unsupported channel " - "(%d.%03d MHz, width:%d, CF1/2: %d.%03d/%d MHz), " - "disconnecting\n", - link->u.mgd.bssid, - csa_ie.chanreq.oper.chan->center_freq, - csa_ie.chanreq.oper.chan->freq_offset, - csa_ie.chanreq.oper.width, - csa_ie.chanreq.oper.center_freq1, - csa_ie.chanreq.oper.freq1_offset, - csa_ie.chanreq.oper.center_freq2); + link_info(link, + "AP %pM switches to unsupported channel (%d.%03d MHz, width:%d, CF1/2: %d.%03d/%d MHz), disconnecting\n", + link->u.mgd.bssid, + csa_ie.chanreq.oper.chan->center_freq, + csa_ie.chanreq.oper.chan->freq_offset, + csa_ie.chanreq.oper.width, + csa_ie.chanreq.oper.center_freq1, + csa_ie.chanreq.oper.freq1_offset, + csa_ie.chanreq.oper.center_freq2); goto drop_connection; } if (cfg80211_chandef_identical(&csa_ie.chanreq.oper, &link->conf->chanreq.oper) && - (!csa_ie.mode || !beacon)) { - if (link->u.mgd.csa_ignored_same_chan) + (!csa_ie.mode || source != IEEE80211_CSA_SOURCE_BEACON)) { + if (link->u.mgd.csa.ignored_same_chan) return; - sdata_info(sdata, - "AP %pM tries to chanswitch to same channel, ignore\n", - link->u.mgd.bssid); - link->u.mgd.csa_ignored_same_chan = true; + link_info(link, + "AP %pM tries to chanswitch to same channel, ignore\n", + link->u.mgd.bssid); + link->u.mgd.csa.ignored_same_chan = true; return; } @@ -2138,64 +2553,78 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link, conf = rcu_dereference_protected(link->conf->chanctx_conf, lockdep_is_held(&local->hw.wiphy->mtx)); - if (!conf) { - sdata_info(sdata, - "no channel context assigned to vif?, disconnecting\n"); + if (ieee80211_vif_link_active(&sdata->vif, link->link_id) && !conf) { + link_info(link, + "no channel context assigned to vif?, disconnecting\n"); goto drop_connection; } - chanctx = container_of(conf, struct ieee80211_chanctx, conf); + if (conf) + chanctx = container_of(conf, struct ieee80211_chanctx, conf); if (!ieee80211_hw_check(&local->hw, CHANCTX_STA_CSA)) { - sdata_info(sdata, - "driver doesn't support chan-switch with channel contexts\n"); + link_info(link, + "driver doesn't support chan-switch with channel contexts\n"); goto drop_connection; } if (drv_pre_channel_switch(sdata, &ch_switch)) { - sdata_info(sdata, - "preparing for channel switch failed, disconnecting\n"); + link_info(link, + "preparing for channel switch failed, disconnecting\n"); goto drop_connection; } - res = ieee80211_link_reserve_chanctx(link, &csa_ie.chanreq, - chanctx->mode, false); - if (res) { - sdata_info(sdata, - "failed to reserve channel context for channel switch, disconnecting (err=%d)\n", - res); - goto drop_connection; + link->u.mgd.csa.ap_chandef = csa_ie.chanreq.ap; + + link->csa.chanreq.oper = csa_ie.chanreq.oper; + ieee80211_set_chanreq_ap(sdata, &link->csa.chanreq, &link->u.mgd.conn, + &csa_ie.chanreq.ap); + + if (chanctx) { + res = ieee80211_link_reserve_chanctx(link, &link->csa.chanreq, + chanctx->mode, false); + if (res) { + link_info(link, + "failed to reserve channel context for channel switch, disconnecting (err=%d)\n", + res); + goto drop_connection; + } } link->conf->csa_active = true; - link->csa_chanreq = csa_ie.chanreq; - link->u.mgd.csa_ignored_same_chan = false; + link->u.mgd.csa.ignored_same_chan = false; link->u.mgd.beacon_crc_valid = false; - link->u.mgd.csa_blocked_tx = csa_ie.mode; + link->u.mgd.csa.blocked_tx = csa_ie.mode; - if (csa_ie.mode && - !ieee80211_hw_check(&local->hw, HANDLES_QUIET_CSA)) { - ieee80211_stop_vif_queues(local, sdata, - IEEE80211_QUEUE_STOP_REASON_CSA); - sdata->csa_blocked_queues = true; - } + if (csa_ie.mode) + ieee80211_vif_block_queues_csa(sdata); cfg80211_ch_switch_started_notify(sdata->dev, &csa_ie.chanreq.oper, link->link_id, csa_ie.count, csa_ie.mode); - if (local->ops->channel_switch) { - /* use driver's channel switch callback */ + /* we may have to handle timeout for deactivated link in software */ + now = jiffies; + link->u.mgd.csa.time = now + + TU_TO_JIFFIES((max_t(int, csa_ie.count, 1) - 1) * + link->conf->beacon_int); + + if (ieee80211_vif_link_active(&sdata->vif, link->link_id) && + local->ops->channel_switch) { + /* + * Use driver's channel switch callback, the driver will + * later call ieee80211_chswitch_done(). It may deactivate + * the link as well, we handle that elsewhere and queue + * the csa.switch_work for the calculated time then. + */ drv_channel_switch(local, sdata, &ch_switch); return; } /* channel switch handled in software */ - timeout = TU_TO_JIFFIES((max_t(int, csa_ie.count, 1) - 1) * - cbss->beacon_interval); wiphy_delayed_work_queue(local->hw.wiphy, - &link->u.mgd.chswitch_work, - timeout); + &link->u.mgd.csa.switch_work, + link->u.mgd.csa.time - now); return; drop_connection: /* @@ -2206,7 +2635,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_link_data *link, * reset when the disconnection worker runs. */ link->conf->csa_active = true; - link->u.mgd.csa_blocked_tx = csa_ie.mode; + link->u.mgd.csa.blocked_tx = csa_ie.mode; sdata->csa_blocked_queues = csa_ie.mode && !ieee80211_hw_check(&local->hw, HANDLES_QUIET_CSA); @@ -2602,16 +3031,15 @@ void ieee80211_dynamic_ps_timer(struct timer_list *t) void ieee80211_dfs_cac_timer_work(struct wiphy *wiphy, struct wiphy_work *work) { - struct ieee80211_link_data *link = - container_of(work, struct ieee80211_link_data, + struct ieee80211_sub_if_data *sdata = + container_of(work, struct ieee80211_sub_if_data, dfs_cac_timer_work.work); - struct cfg80211_chan_def chandef = link->conf->chanreq.oper; - struct ieee80211_sub_if_data *sdata = link->sdata; + struct cfg80211_chan_def chandef = sdata->vif.bss_conf.chanreq.oper; lockdep_assert_wiphy(sdata->local->hw.wiphy); if (sdata->wdev.cac_started) { - ieee80211_link_release_channel(link); + ieee80211_link_release_channel(&sdata->deflink); cfg80211_cac_event(sdata->dev, &chandef, NL80211_RADAR_CAC_FINISHED, GFP_KERNEL); @@ -3093,6 +3521,8 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, u64 changed = 0; struct ieee80211_prep_tx_info info = { .subtype = stype, + .was_assoc = true, + .link_id = ffs(sdata->vif.active_links) - 1, }; lockdep_assert_wiphy(local->hw.wiphy); @@ -3141,29 +3571,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, /* deauthenticate/disassociate now */ if (tx || frame_buf) { - /* - * In multi channel scenarios guarantee that the virtual - * interface is granted immediate airtime to transmit the - * deauthentication frame by calling mgd_prepare_tx, if the - * driver requested so. - */ - if (ieee80211_hw_check(&local->hw, DEAUTH_NEED_MGD_TX_PREP)) { - for (link_id = 0; link_id < ARRAY_SIZE(sdata->link); - link_id++) { - struct ieee80211_link_data *link; - - link = sdata_dereference(sdata->link[link_id], - sdata); - if (!link) - continue; - if (link->u.mgd.have_beacon) - break; - } - if (link_id == IEEE80211_MLD_MAX_NUM_LINKS) { - info.link_id = ffs(sdata->vif.active_links) - 1; - drv_mgd_prepare_tx(sdata->local, sdata, &info); - } - } + drv_mgd_prepare_tx(sdata->local, sdata, &info); ieee80211_send_deauth_disassoc(sdata, sdata->vif.cfg.ap_addr, sdata->vif.cfg.ap_addr, stype, @@ -3260,14 +3668,10 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, } sdata->vif.bss_conf.csa_active = false; - sdata->deflink.u.mgd.csa_blocked_tx = false; - sdata->deflink.u.mgd.csa_waiting_bcn = false; - sdata->deflink.u.mgd.csa_ignored_same_chan = false; - if (sdata->csa_blocked_queues) { - ieee80211_wake_vif_queues(local, sdata, - IEEE80211_QUEUE_STOP_REASON_CSA); - sdata->csa_blocked_queues = false; - } + sdata->deflink.u.mgd.csa.blocked_tx = false; + sdata->deflink.u.mgd.csa.waiting_bcn = false; + sdata->deflink.u.mgd.csa.ignored_same_chan = false; + ieee80211_vif_unblock_queues_csa(sdata); /* existing TX TSPEC sessions no longer exist */ memset(ifmgd->tx_tspec, 0, sizeof(ifmgd->tx_tspec)); @@ -3275,9 +3679,7 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, sdata->vif.bss_conf.power_type = IEEE80211_REG_UNSET_AP; sdata->vif.bss_conf.pwr_reduction = 0; - sdata->vif.bss_conf.tx_pwr_env_num = 0; - memset(sdata->vif.bss_conf.tx_pwr_env, 0, - sizeof(sdata->vif.bss_conf.tx_pwr_env)); + ieee80211_clear_tpe(&sdata->vif.bss_conf.tpe); sdata->vif.cfg.eml_cap = 0; sdata->vif.cfg.eml_med_sync_delay = 0; @@ -3287,8 +3689,17 @@ static void ieee80211_set_disassoc(struct ieee80211_sub_if_data *sdata, sizeof(sdata->u.mgd.ttlm_info)); wiphy_delayed_work_cancel(sdata->local->hw.wiphy, &ifmgd->ttlm_work); + memset(&sdata->vif.neg_ttlm, 0, sizeof(sdata->vif.neg_ttlm)); wiphy_delayed_work_cancel(sdata->local->hw.wiphy, &ifmgd->neg_ttlm_timeout_work); + + sdata->u.mgd.removed_links = 0; + wiphy_delayed_work_cancel(sdata->local->hw.wiphy, + &sdata->u.mgd.ml_reconf_work); + + wiphy_work_cancel(sdata->local->hw.wiphy, + &ifmgd->teardown_ttlm_work); + ieee80211_vif_set_links(sdata, 0, 0); ifmgd->mcast_seq_last = IEEE80211_SN_MODULO; @@ -3592,7 +4003,7 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) if (WARN_ON_ONCE(!link)) continue; - if (link->u.mgd.csa_blocked_tx) + if (link->u.mgd.csa.blocked_tx) continue; tx = true; @@ -3629,13 +4040,9 @@ static void __ieee80211_disconnect(struct ieee80211_sub_if_data *sdata) tx, frame_buf); /* the other links will be destroyed */ sdata->vif.bss_conf.csa_active = false; - sdata->deflink.u.mgd.csa_waiting_bcn = false; - sdata->deflink.u.mgd.csa_blocked_tx = false; - if (sdata->csa_blocked_queues) { - ieee80211_wake_vif_queues(local, sdata, - IEEE80211_QUEUE_STOP_REASON_CSA); - sdata->csa_blocked_queues = false; - } + sdata->deflink.u.mgd.csa.waiting_bcn = false; + sdata->deflink.u.mgd.csa.blocked_tx = false; + ieee80211_vif_unblock_queues_csa(sdata); ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), tx, WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY, @@ -4445,40 +4852,12 @@ static bool ieee80211_assoc_config_link(struct ieee80211_link_data *link, if (elems->he_operation && link->u.mgd.conn.mode >= IEEE80211_CONN_MODE_HE && elems->he_cap) { - const struct ieee80211_he_6ghz_oper *he_6ghz_oper; - ieee80211_he_cap_ie_to_sta_he_cap(sdata, sband, elems->he_cap, elems->he_cap_len, elems->he_6ghz_capa, link_sta); - he_6ghz_oper = ieee80211_he_6ghz_oper(elems->he_operation); - - if (is_6ghz && he_6ghz_oper) { - switch (u8_get_bits(he_6ghz_oper->control, - IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO)) { - case IEEE80211_6GHZ_CTRL_REG_LPI_AP: - case IEEE80211_6GHZ_CTRL_REG_INDOOR_LPI_AP: - bss_conf->power_type = IEEE80211_REG_LPI_AP; - break; - case IEEE80211_6GHZ_CTRL_REG_SP_AP: - case IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP: - bss_conf->power_type = IEEE80211_REG_SP_AP; - break; - case IEEE80211_6GHZ_CTRL_REG_VLP_AP: - bss_conf->power_type = IEEE80211_REG_VLP_AP; - break; - default: - bss_conf->power_type = IEEE80211_REG_UNSET_AP; - break; - } - } else if (is_6ghz) { - link_info(link, - "HE 6 GHz operation missing (on %d MHz), expect issues\n", - bss_conf->chanreq.oper.chan->center_freq); - } - bss_conf->he_support = link_sta->pub->he_cap.has_he; if (elems->rsnx && elems->rsnx_len && (elems->rsnx[0] & WLAN_RSNX_CAPA_PROTECTED_TWT) && @@ -5020,6 +5399,23 @@ ieee80211_determine_our_sta_mode_assoc(struct ieee80211_sub_if_data *sdata, conn->bw_limit, tmp.bw_limit); } +static enum ieee80211_ap_reg_power +ieee80211_ap_power_type(u8 control) +{ + switch (u8_get_bits(control, IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO)) { + case IEEE80211_6GHZ_CTRL_REG_LPI_AP: + case IEEE80211_6GHZ_CTRL_REG_INDOOR_LPI_AP: + return IEEE80211_REG_LPI_AP; + case IEEE80211_6GHZ_CTRL_REG_SP_AP: + case IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP: + return IEEE80211_REG_SP_AP; + case IEEE80211_6GHZ_CTRL_REG_VLP_AP: + return IEEE80211_REG_VLP_AP; + default: + return IEEE80211_REG_UNSET_AP; + } +} + static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, struct ieee80211_link_data *link, int link_id, @@ -5029,15 +5425,15 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, struct ieee80211_local *local = sdata->local; bool is_6ghz = cbss->channel->band == NL80211_BAND_6GHZ; struct ieee80211_chan_req chanreq = {}; + struct cfg80211_chan_def ap_chandef; struct ieee802_11_elems *elems; int ret; - u32 i; lockdep_assert_wiphy(local->hw.wiphy); rcu_read_lock(); elems = ieee80211_determine_chan_mode(sdata, conn, cbss, link_id, - &chanreq); + &chanreq, &ap_chandef); if (IS_ERR(elems)) { rcu_read_unlock(); @@ -5052,26 +5448,23 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, } if (link && is_6ghz && conn->mode >= IEEE80211_CONN_MODE_HE) { - struct ieee80211_bss_conf *bss_conf; - u8 j = 0; - - bss_conf = link->conf; + const struct ieee80211_he_6ghz_oper *he_6ghz_oper; if (elems->pwr_constr_elem) - bss_conf->pwr_reduction = *elems->pwr_constr_elem; + link->conf->pwr_reduction = *elems->pwr_constr_elem; - BUILD_BUG_ON(ARRAY_SIZE(bss_conf->tx_pwr_env) != - ARRAY_SIZE(elems->tx_pwr_env)); + he_6ghz_oper = ieee80211_he_6ghz_oper(elems->he_operation); + if (he_6ghz_oper) + link->conf->power_type = + ieee80211_ap_power_type(he_6ghz_oper->control); + else + link_info(link, + "HE 6 GHz operation missing (on %d MHz), expect issues\n", + cbss->channel->center_freq); - for (i = 0; i < elems->tx_pwr_env_num; i++) { - if (elems->tx_pwr_env_len[i] > sizeof(bss_conf->tx_pwr_env[j])) - continue; - - bss_conf->tx_pwr_env_num++; - memcpy(&bss_conf->tx_pwr_env[j], elems->tx_pwr_env[i], - elems->tx_pwr_env_len[i]); - j++; - } + link->conf->tpe = elems->tpe; + ieee80211_rearrange_tpe(&link->conf->tpe, &ap_chandef, + &chanreq.oper); } rcu_read_unlock(); /* the element data was RCU protected so no longer valid anyway */ @@ -6150,13 +6543,140 @@ static void ieee80211_process_adv_ttlm(struct ieee80211_sub_if_data *sdata, } } +static void +ieee80211_mgd_check_cross_link_csa(struct ieee80211_sub_if_data *sdata, + int reporting_link_id, + struct ieee802_11_elems *elems) +{ + const struct element *sta_profiles[IEEE80211_MLD_MAX_NUM_LINKS] = {}; + ssize_t sta_profiles_len[IEEE80211_MLD_MAX_NUM_LINKS] = {}; + const struct element *sub; + const u8 *subelems; + size_t subelems_len; + u8 common_size; + int link_id; + + if (!ieee80211_mle_size_ok((u8 *)elems->ml_basic, elems->ml_basic_len)) + return; + + common_size = ieee80211_mle_common_size((u8 *)elems->ml_basic); + subelems = (u8 *)elems->ml_basic + common_size; + subelems_len = elems->ml_basic_len - common_size; + + for_each_element_id(sub, IEEE80211_MLE_SUBELEM_PER_STA_PROFILE, + subelems, subelems_len) { + struct ieee80211_mle_per_sta_profile *prof = (void *)sub->data; + struct ieee80211_link_data *link; + ssize_t len; + + if (!ieee80211_mle_basic_sta_prof_size_ok(sub->data, + sub->datalen)) + continue; + + link_id = le16_get_bits(prof->control, + IEEE80211_MLE_STA_CONTROL_LINK_ID); + /* need a valid link ID, but also not our own, both AP bugs */ + if (link_id == reporting_link_id || + link_id >= IEEE80211_MLD_MAX_NUM_LINKS) + continue; + + link = sdata_dereference(sdata->link[link_id], sdata); + if (!link) + continue; + + len = cfg80211_defragment_element(sub, subelems, subelems_len, + NULL, 0, + IEEE80211_MLE_SUBELEM_FRAGMENT); + if (WARN_ON(len < 0)) + continue; + + sta_profiles[link_id] = sub; + sta_profiles_len[link_id] = len; + } + + for (link_id = 0; link_id < IEEE80211_MLD_MAX_NUM_LINKS; link_id++) { + struct ieee80211_mle_per_sta_profile *prof; + struct ieee802_11_elems *prof_elems; + struct ieee80211_link_data *link; + ssize_t len; + + if (link_id == reporting_link_id) + continue; + + link = sdata_dereference(sdata->link[link_id], sdata); + if (!link) + continue; + + if (!sta_profiles[link_id]) { + prof_elems = NULL; + goto handle; + } + + /* we can defragment in-place, won't use the buffer again */ + len = cfg80211_defragment_element(sta_profiles[link_id], + subelems, subelems_len, + (void *)sta_profiles[link_id], + sta_profiles_len[link_id], + IEEE80211_MLE_SUBELEM_FRAGMENT); + if (WARN_ON(len != sta_profiles_len[link_id])) + continue; + + prof = (void *)sta_profiles[link_id]; + prof_elems = ieee802_11_parse_elems(prof->variable + + (prof->sta_info_len - 1), + len - + (prof->sta_info_len - 1), + false, NULL); + + /* memory allocation failed - let's hope that's transient */ + if (!prof_elems) + continue; + +handle: + /* + * FIXME: the timings here are obviously incorrect, + * but only older Intel drivers seem to care, and + * those don't have MLO. If you really need this, + * the problem is having to calculate it with the + * TSF offset etc. The device_timestamp is still + * correct, of course. + */ + ieee80211_sta_process_chanswitch(link, 0, 0, elems, prof_elems, + IEEE80211_CSA_SOURCE_OTHER_LINK); + kfree(prof_elems); + } +} + +static bool ieee80211_mgd_ssid_mismatch(struct ieee80211_sub_if_data *sdata, + const struct ieee802_11_elems *elems) +{ + struct ieee80211_vif_cfg *cfg = &sdata->vif.cfg; + static u8 zero_ssid[IEEE80211_MAX_SSID_LEN]; + + if (!elems->ssid) + return false; + + /* hidden SSID: zero length */ + if (elems->ssid_len == 0) + return false; + + if (elems->ssid_len != cfg->ssid_len) + return true; + + /* hidden SSID: zeroed out */ + if (memcmp(elems->ssid, zero_ssid, elems->ssid_len)) + return false; + + return memcmp(elems->ssid, cfg->ssid, cfg->ssid_len); +} + static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link, struct ieee80211_hdr *hdr, size_t len, struct ieee80211_rx_status *rx_status) { struct ieee80211_sub_if_data *sdata = link->sdata; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; - struct ieee80211_bss_conf *bss_conf = &sdata->vif.bss_conf; + struct ieee80211_bss_conf *bss_conf = link->conf; struct ieee80211_vif_cfg *vif_cfg = &sdata->vif.cfg; struct ieee80211_mgmt *mgmt = (void *) hdr; size_t baselen; @@ -6200,7 +6720,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link, parse_params.len = len - baselen; rcu_read_lock(); - chanctx_conf = rcu_dereference(link->conf->chanctx_conf); + chanctx_conf = rcu_dereference(bss_conf->chanctx_conf); if (!chanctx_conf) { rcu_read_unlock(); return; @@ -6230,11 +6750,11 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link, ifmgd->assoc_data->need_beacon = false; if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY) && !ieee80211_is_s1g_beacon(hdr->frame_control)) { - link->conf->sync_tsf = + bss_conf->sync_tsf = le64_to_cpu(mgmt->u.beacon.timestamp); - link->conf->sync_device_ts = + bss_conf->sync_device_ts = rx_status->device_timestamp; - link->conf->sync_dtim_count = elems->dtim_count; + bss_conf->sync_dtim_count = elems->dtim_count; } if (elems->mbssid_config_ie) @@ -6258,7 +6778,7 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link, } if (!ifmgd->associated || - !ieee80211_rx_our_beacon(bssid, link->conf->bss)) + !ieee80211_rx_our_beacon(bssid, bss_conf->bss)) return; bssid = link->u.mgd.bssid; @@ -6285,12 +6805,21 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link, */ if (!ieee80211_is_s1g_beacon(hdr->frame_control)) ncrc = crc32_be(0, (void *)&mgmt->u.beacon.beacon_int, 4); - parse_params.bss = link->conf->bss; + parse_params.bss = bss_conf->bss; parse_params.filter = care_about_ies; parse_params.crc = ncrc; elems = ieee802_11_parse_elems_full(&parse_params); if (!elems) return; + + if (rx_status->flag & RX_FLAG_DECRYPTED && + ieee80211_mgd_ssid_mismatch(sdata, elems)) { + sdata_info(sdata, "SSID mismatch for AP %pM, disconnect\n", + sdata->vif.cfg.ap_addr); + __ieee80211_disconnect(sdata); + return; + } + ncrc = elems->crc; if (ieee80211_hw_check(&local->hw, PS_NULLFUNC_STACK) && @@ -6357,11 +6886,11 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link, */ if (ieee80211_hw_check(&local->hw, TIMING_BEACON_ONLY) && !ieee80211_is_s1g_beacon(hdr->frame_control)) { - link->conf->sync_tsf = + bss_conf->sync_tsf = le64_to_cpu(mgmt->u.beacon.timestamp); - link->conf->sync_device_ts = + bss_conf->sync_device_ts = rx_status->device_timestamp; - link->conf->sync_dtim_count = elems->dtim_count; + bss_conf->sync_dtim_count = elems->dtim_count; } if ((ncrc == link->u.mgd.beacon_crc && link->u.mgd.beacon_crc_valid) || @@ -6374,7 +6903,11 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link, ieee80211_sta_process_chanswitch(link, rx_status->mactime, rx_status->device_timestamp, - elems, true); + elems, elems, + IEEE80211_CSA_SOURCE_BEACON); + + /* note that after this elems->ml_basic can no longer be used fully */ + ieee80211_mgd_check_cross_link_csa(sdata, rx_status->link_id, elems); if (!link->u.mgd.disable_wmm_tracking && ieee80211_sta_wmm_params(local, link, elems->wmm_param, @@ -6420,10 +6953,10 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_link_data *link, goto free; } - if (WARN_ON(!link->conf->chanreq.oper.chan)) + if (WARN_ON(!bss_conf->chanreq.oper.chan)) goto free; - sband = local->hw.wiphy->bands[link->conf->chanreq.oper.chan->band]; + sband = local->hw.wiphy->bands[bss_conf->chanreq.oper.chan->band]; changed |= ieee80211_recalc_twt_req(sdata, sband, link, link_sta, elems); @@ -6834,7 +7367,7 @@ static void ieee80211_teardown_ttlm_work(struct wiphy *wiphy, u16 new_dormant_links; struct ieee80211_sub_if_data *sdata = container_of(work, struct ieee80211_sub_if_data, - u.mgd.neg_ttlm_timeout_work.work); + u.mgd.teardown_ttlm_work); if (!sdata->vif.neg_ttlm.valid) return; @@ -6908,6 +7441,7 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, { struct ieee80211_link_data *link = &sdata->deflink; struct ieee80211_rx_status *rx_status; + struct ieee802_11_elems *elems; struct ieee80211_mgmt *mgmt; u16 fc; int ies_len; @@ -6951,9 +7485,8 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, !ether_addr_equal(mgmt->bssid, sdata->vif.cfg.ap_addr)) break; - if (mgmt->u.action.category == WLAN_CATEGORY_SPECTRUM_MGMT) { - struct ieee802_11_elems *elems; - + switch (mgmt->u.action.category) { + case WLAN_CATEGORY_SPECTRUM_MGMT: ies_len = skb->len - offsetof(struct ieee80211_mgmt, u.action.u.chan_switch.variable); @@ -6966,15 +7499,20 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, mgmt->u.action.u.chan_switch.variable, ies_len, true, NULL); - if (elems && !elems->parse_error) + if (elems && !elems->parse_error) { + enum ieee80211_csa_source src = + IEEE80211_CSA_SOURCE_PROT_ACTION; + ieee80211_sta_process_chanswitch(link, rx_status->mactime, rx_status->device_timestamp, - elems, false); + elems, elems, + src); + } kfree(elems); - } else if (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC) { - struct ieee802_11_elems *elems; - + break; + case WLAN_CATEGORY_PUBLIC: + case WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION: ies_len = skb->len - offsetof(struct ieee80211_mgmt, u.action.u.ext_chan_switch.variable); @@ -6991,6 +7529,14 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, ies_len, true, NULL); if (elems && !elems->parse_error) { + enum ieee80211_csa_source src; + + if (mgmt->u.action.category == + WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION) + src = IEEE80211_CSA_SOURCE_PROT_ACTION; + else + src = IEEE80211_CSA_SOURCE_UNPROT_ACTION; + /* for the handling code pretend it was an IE */ elems->ext_chansw_ie = &mgmt->u.action.u.ext_chan_switch.data; @@ -6998,10 +7544,12 @@ void ieee80211_sta_rx_queued_mgmt(struct ieee80211_sub_if_data *sdata, ieee80211_sta_process_chanswitch(link, rx_status->mactime, rx_status->device_timestamp, - elems, false); + elems, elems, + src); } kfree(elems); + break; } break; } @@ -7321,7 +7869,7 @@ static void ieee80211_sta_bcn_mon_timer(struct timer_list *t) return; if (sdata->vif.bss_conf.csa_active && - !sdata->deflink.u.mgd.csa_waiting_bcn) + !sdata->deflink.u.mgd.csa.waiting_bcn) return; if (sdata->vif.driver_flags & IEEE80211_VIF_BEACON_FILTER) @@ -7345,7 +7893,7 @@ static void ieee80211_sta_conn_mon_timer(struct timer_list *t) return; if (sdata->vif.bss_conf.csa_active && - !sdata->deflink.u.mgd.csa_waiting_bcn) + !sdata->deflink.u.mgd.csa.waiting_bcn) return; sta = sta_info_get(sdata, sdata->vif.cfg.ap_addr); @@ -7556,8 +8104,10 @@ void ieee80211_mgd_setup_link(struct ieee80211_link_data *link) else link->u.mgd.req_smps = IEEE80211_SMPS_OFF; - wiphy_delayed_work_init(&link->u.mgd.chswitch_work, - ieee80211_chswitch_work); + wiphy_delayed_work_init(&link->u.mgd.csa.switch_work, + ieee80211_csa_switch_work); + + ieee80211_clear_tpe(&link->conf->tpe); if (sdata->u.mgd.assoc_data) ether_addr_copy(link->conf->addr, @@ -8686,7 +9236,7 @@ void ieee80211_mgd_stop_link(struct ieee80211_link_data *link) wiphy_work_cancel(link->sdata->local->hw.wiphy, &link->u.mgd.recalc_smps); wiphy_delayed_work_cancel(link->sdata->local->hw.wiphy, - &link->u.mgd.chswitch_work); + &link->u.mgd.csa.switch_work); } void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata) @@ -8704,15 +9254,8 @@ void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata) &ifmgd->beacon_connection_loss_work); wiphy_work_cancel(sdata->local->hw.wiphy, &ifmgd->csa_connection_drop_work); - wiphy_work_cancel(sdata->local->hw.wiphy, - &ifmgd->teardown_ttlm_work); wiphy_delayed_work_cancel(sdata->local->hw.wiphy, &ifmgd->tdls_peer_del_work); - wiphy_delayed_work_cancel(sdata->local->hw.wiphy, - &ifmgd->ml_reconf_work); - wiphy_delayed_work_cancel(sdata->local->hw.wiphy, &ifmgd->ttlm_work); - wiphy_delayed_work_cancel(sdata->local->hw.wiphy, - &ifmgd->neg_ttlm_timeout_work); if (ifmgd->assoc_data) ieee80211_destroy_assoc_data(sdata, ASSOC_TIMEOUT); diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c index 65e1e9e971fd..28d03196ef75 100644 --- a/net/mac80211/offchannel.c +++ b/net/mac80211/offchannel.c @@ -8,7 +8,7 @@ * Copyright 2006-2007 Jiri Benc * Copyright 2007, Michael Wu * Copyright 2009 Johannes Berg - * Copyright (C) 2019, 2022-2023 Intel Corporation + * Copyright (C) 2019, 2022-2024 Intel Corporation */ #include #include @@ -413,6 +413,39 @@ void ieee80211_start_next_roc(struct ieee80211_local *local) } } +void ieee80211_reconfig_roc(struct ieee80211_local *local) +{ + struct ieee80211_roc_work *roc, *tmp; + + /* + * In the software implementation can just continue with the + * interruption due to reconfig, roc_work is still queued if + * needed. + */ + if (!local->ops->remain_on_channel) + return; + + /* flush work so nothing from the driver is still pending */ + wiphy_work_flush(local->hw.wiphy, &local->hw_roc_start); + wiphy_work_flush(local->hw.wiphy, &local->hw_roc_done); + + list_for_each_entry_safe(roc, tmp, &local->roc_list, list) { + if (!roc->started) + break; + + if (!roc->hw_begun) { + /* it didn't start in HW yet, so we can restart it */ + roc->started = false; + continue; + } + + /* otherwise destroy it and tell userspace */ + ieee80211_roc_notify_destroy(roc); + } + + ieee80211_start_next_roc(local); +} + static void __ieee80211_roc_work(struct ieee80211_local *local) { struct ieee80211_roc_work *roc; diff --git a/net/mac80211/parse.c b/net/mac80211/parse.c index 055a60e90979..279c5143b335 100644 --- a/net/mac80211/parse.c +++ b/net/mac80211/parse.c @@ -187,6 +187,84 @@ ieee80211_parse_extension_element(u32 *crc, *crc = crc32_be(*crc, (void *)elem, elem->datalen + 2); } +static void ieee80211_parse_tpe(struct ieee80211_parsed_tpe *tpe, + const u8 *data, u8 len) +{ + const struct ieee80211_tx_pwr_env *env = (const void *)data; + u8 count, interpret, category; + u8 *out, N, *cnt_out = NULL, *N_out = NULL; + + if (!ieee80211_valid_tpe_element(data, len)) + return; + + count = u8_get_bits(env->info, IEEE80211_TX_PWR_ENV_INFO_COUNT); + interpret = u8_get_bits(env->info, IEEE80211_TX_PWR_ENV_INFO_INTERPRET); + category = u8_get_bits(env->info, IEEE80211_TX_PWR_ENV_INFO_CATEGORY); + + switch (interpret) { + case IEEE80211_TPE_LOCAL_EIRP: + out = tpe->max_local[category].power; + cnt_out = &tpe->max_local[category].count; + tpe->max_local[category].valid = true; + break; + case IEEE80211_TPE_REG_CLIENT_EIRP: + out = tpe->max_reg_client[category].power; + cnt_out = &tpe->max_reg_client[category].count; + tpe->max_reg_client[category].valid = true; + break; + case IEEE80211_TPE_LOCAL_EIRP_PSD: + out = tpe->psd_local[category].power; + cnt_out = &tpe->psd_local[category].count; + N_out = &tpe->psd_local[category].n; + tpe->psd_local[category].valid = true; + break; + case IEEE80211_TPE_REG_CLIENT_EIRP_PSD: + out = tpe->psd_reg_client[category].power; + cnt_out = &tpe->psd_reg_client[category].count; + N_out = &tpe->psd_reg_client[category].n; + tpe->psd_reg_client[category].valid = true; + break; + } + + switch (interpret) { + case IEEE80211_TPE_LOCAL_EIRP: + case IEEE80211_TPE_REG_CLIENT_EIRP: + /* count was validated <= 3, plus 320 MHz */ + BUILD_BUG_ON(IEEE80211_TPE_EIRP_ENTRIES_320MHZ < 5); + memcpy(out, env->variable, count + 1); + *cnt_out = count + 1; + /* separately take 320 MHz if present */ + if (count == 3 && len > sizeof(*env) + count + 1) { + out[4] = env->variable[4]; + *cnt_out = 5; + } + break; + case IEEE80211_TPE_LOCAL_EIRP_PSD: + case IEEE80211_TPE_REG_CLIENT_EIRP_PSD: + if (!count) { + memset(out, env->variable[0], + IEEE80211_TPE_PSD_ENTRIES_320MHZ); + *cnt_out = IEEE80211_TPE_PSD_ENTRIES_320MHZ; + break; + } + + N = 1 << (count - 1); + memcpy(out, env->variable, N); + *cnt_out = N; + *N_out = N; + + if (len > sizeof(*env) + N) { + int K = u8_get_bits(env->variable[N], + IEEE80211_TX_PWR_ENV_EXT_COUNT); + + K = min(K, IEEE80211_TPE_PSD_ENTRIES_320MHZ - N); + memcpy(out + N, env->variable + N + 1, K); + (*cnt_out) += K; + } + break; + } +} + static u32 _ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params, struct ieee80211_elems_parse *elems_parse, @@ -529,6 +607,13 @@ _ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params, elem_parse_failed = IEEE80211_PARSE_ERR_BAD_ELEM_SIZE; } + + subelem = cfg80211_find_ext_elem(WLAN_EID_TX_POWER_ENVELOPE, + pos, elen); + if (subelem) + ieee80211_parse_tpe(&elems->csa_tpe, + subelem->data + 1, + subelem->datalen - 1); break; case WLAN_EID_COUNTRY: elems->country_elem = pos; @@ -593,16 +678,9 @@ _ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params, elems->rsnx_len = elen; break; case WLAN_EID_TX_POWER_ENVELOPE: - if (elen < 1 || - elen > sizeof(struct ieee80211_tx_pwr_env)) + if (params->mode < IEEE80211_CONN_MODE_HE) break; - - if (elems->tx_pwr_env_num >= ARRAY_SIZE(elems->tx_pwr_env)) - break; - - elems->tx_pwr_env[elems->tx_pwr_env_num] = (void *)pos; - elems->tx_pwr_env_len[elems->tx_pwr_env_num] = elen; - elems->tx_pwr_env_num++; + ieee80211_parse_tpe(&elems->tpe, pos, elen); break; case WLAN_EID_EXTENSION: ieee80211_parse_extension_element(calc_crc ? @@ -889,6 +967,10 @@ ieee802_11_parse_elems_full(struct ieee80211_elems_parse_params *params) elems->ie_start = params->start; elems->total_len = params->len; + /* set all TPE entries to unlimited (but invalid) */ + ieee80211_clear_tpe(&elems->tpe); + ieee80211_clear_tpe(&elems->csa_tpe); + nontransmitted_profile = elems_parse->scratch_pos; nontransmitted_profile_len = ieee802_11_find_bssid_profile(params->start, params->len, diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c index c1fa26e09479..d823d58303e8 100644 --- a/net/mac80211/pm.c +++ b/net/mac80211/pm.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* * Portions - * Copyright (C) 2020-2021, 2023 Intel Corporation + * Copyright (C) 2020-2021, 2023-2024 Intel Corporation */ #include #include @@ -171,7 +171,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) WARN_ON(!list_empty(&local->chanctx_list)); /* stop hardware - this must stop RX */ - ieee80211_stop_device(local); + ieee80211_stop_device(local, true); suspend: local->suspended = true; diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 4914692750e5..59ad24a71141 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -3358,6 +3358,7 @@ static void ieee80211_rx_check_bss_color_collision(struct ieee80211_rx_data *rx) { struct ieee80211_mgmt *mgmt = (void *)rx->skb->data; + struct ieee80211_bss_conf *bss_conf; const struct element *ie; size_t baselen; @@ -3368,7 +3369,9 @@ ieee80211_rx_check_bss_color_collision(struct ieee80211_rx_data *rx) if (ieee80211_hw_check(&rx->local->hw, DETECTS_COLOR_COLLISION)) return; - if (rx->link->conf->csa_active) + bss_conf = rx->link->conf; + if (bss_conf->csa_active || bss_conf->color_change_active || + !bss_conf->he_bss_color.enabled) return; baselen = mgmt->u.beacon.variable - rx->skb->data; @@ -3380,7 +3383,6 @@ ieee80211_rx_check_bss_color_collision(struct ieee80211_rx_data *rx) rx->skb->len - baselen); if (ie && ie->datalen >= sizeof(struct ieee80211_he_operation) && ie->datalen >= ieee80211_he_oper_size(ie->data + 1)) { - struct ieee80211_bss_conf *bss_conf = rx->link->conf; const struct ieee80211_he_operation *he_oper; u8 color; @@ -3617,6 +3619,7 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx) break; case WLAN_CATEGORY_PUBLIC: + case WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION: if (len < IEEE80211_MIN_ACTION_SIZE + 1) goto invalid; if (sdata->vif.type != NL80211_IFTYPE_STATION) diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c index b2de4c6fb808..073ff9e0f397 100644 --- a/net/mac80211/spectmgmt.c +++ b/net/mac80211/spectmgmt.c @@ -9,7 +9,7 @@ * Copyright 2007, Michael Wu * Copyright 2007-2008, Intel Corporation * Copyright 2008, Johannes Berg - * Copyright (C) 2018, 2020, 2022-2023 Intel Corporation + * Copyright (C) 2018, 2020, 2022-2024 Intel Corporation */ #include @@ -223,7 +223,7 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, enum nl80211_band current_band, u32 vht_cap_info, struct ieee80211_conn_settings *conn, - u8 *bssid, + u8 *bssid, bool unprot_action, struct ieee80211_csa_ie *csa_ie) { enum nl80211_band new_band = current_band; @@ -258,8 +258,10 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, if (!ieee80211_operating_class_to_band(new_op_class, &new_band)) { new_op_class = 0; - sdata_info(sdata, "cannot understand ECSA IE operating class, %d, ignoring\n", - ext_chansw_elem->new_operating_class); + if (!unprot_action) + sdata_info(sdata, + "cannot understand ECSA IE operating class, %d, ignoring\n", + ext_chansw_elem->new_operating_class); } else { new_chan_no = ext_chansw_elem->new_ch_num; csa_ie->count = ext_chansw_elem->count; @@ -293,9 +295,10 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, new_freq = ieee80211_channel_to_frequency(new_chan_no, new_band); new_chan = ieee80211_get_channel(sdata->local->hw.wiphy, new_freq); if (!new_chan || new_chan->flags & IEEE80211_CHAN_DISABLED) { - sdata_info(sdata, - "BSS %pM switches to unsupported channel (%d MHz), disconnecting\n", - bssid, new_freq); + if (!unprot_action) + sdata_info(sdata, + "BSS %pM switches to unsupported channel (%d MHz), disconnecting\n", + bssid, new_freq); return -EINVAL; } @@ -340,6 +343,9 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, break; } + /* capture the AP configuration */ + csa_ie->chanreq.ap = csa_ie->chanreq.oper; + /* parse one of the Elements to build a new chandef */ memset(&new_chandef, 0, sizeof(new_chandef)); new_chandef.chan = new_chan; @@ -368,6 +374,9 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, /* if data is there validate the bandwidth & use it */ if (new_chandef.chan) { + /* capture the AP chandef before (potential) downgrading */ + csa_ie->chanreq.ap = new_chandef; + if (conn->bw_limit < IEEE80211_CONN_BW_LIMIT_320 && new_chandef.width == NL80211_CHAN_WIDTH_320) ieee80211_chandef_downgrade(&new_chandef, NULL); diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h index bd5e2f7146f6..9195d5a2de0a 100644 --- a/net/mac80211/sta_info.h +++ b/net/mac80211/sta_info.h @@ -727,6 +727,12 @@ struct sta_info { struct ieee80211_sta sta; }; +static inline int ieee80211_tdls_sta_link_id(struct sta_info *sta) +{ + /* TDLS STA can only have a single link */ + return sta->sta.valid_links ? __ffs(sta->sta.valid_links) : 0; +} + static inline enum nl80211_plink_state sta_plink_state(struct sta_info *sta) { #ifdef CONFIG_MAC80211_MESH diff --git a/net/mac80211/tests/Makefile b/net/mac80211/tests/Makefile index 4fdaf3feaca3..511dfa226699 100644 --- a/net/mac80211/tests/Makefile +++ b/net/mac80211/tests/Makefile @@ -1,3 +1,3 @@ -mac80211-tests-y += module.o elems.o mfp.o +mac80211-tests-y += module.o elems.o mfp.o tpe.o obj-$(CONFIG_MAC80211_KUNIT_TEST) += mac80211-tests.o diff --git a/net/mac80211/tests/tpe.c b/net/mac80211/tests/tpe.c new file mode 100644 index 000000000000..dd63303a2985 --- /dev/null +++ b/net/mac80211/tests/tpe.c @@ -0,0 +1,284 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * KUnit tests for TPE element handling + * + * Copyright (C) 2024 Intel Corporation + */ +#include +#include "../ieee80211_i.h" + +MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING); + +static struct ieee80211_channel chan6g_1 = { + .band = NL80211_BAND_6GHZ, + .center_freq = 5955, +}; + +static struct ieee80211_channel chan6g_33 = { + .band = NL80211_BAND_6GHZ, + .center_freq = 6115, +}; + +static struct ieee80211_channel chan6g_61 = { + .band = NL80211_BAND_6GHZ, + .center_freq = 6255, +}; + +static const struct subchan_test_case { + const char *desc; + struct cfg80211_chan_def c; + u8 n; + int expect; +} subchan_offset_cases[] = { + { + .desc = "identical 20 MHz", + .c.width = NL80211_CHAN_WIDTH_20, + .c.chan = &chan6g_1, + .c.center_freq1 = 5955, + .n = 1, + .expect = 0, + }, + { + .desc = "identical 40 MHz", + .c.width = NL80211_CHAN_WIDTH_40, + .c.chan = &chan6g_1, + .c.center_freq1 = 5965, + .n = 2, + .expect = 0, + }, + { + .desc = "identical 80+80 MHz", + /* not really is valid? doesn't matter for the test */ + .c.width = NL80211_CHAN_WIDTH_80P80, + .c.chan = &chan6g_1, + .c.center_freq1 = 5985, + .c.center_freq2 = 6225, + .n = 16, + .expect = 0, + }, + { + .desc = "identical 320 MHz", + .c.width = NL80211_CHAN_WIDTH_320, + .c.chan = &chan6g_1, + .c.center_freq1 = 6105, + .n = 16, + .expect = 0, + }, + { + .desc = "lower 160 MHz of 320 MHz", + .c.width = NL80211_CHAN_WIDTH_320, + .c.chan = &chan6g_1, + .c.center_freq1 = 6105, + .n = 8, + .expect = 0, + }, + { + .desc = "upper 160 MHz of 320 MHz", + .c.width = NL80211_CHAN_WIDTH_320, + .c.chan = &chan6g_61, + .c.center_freq1 = 6105, + .n = 8, + .expect = 8, + }, + { + .desc = "upper 160 MHz of 320 MHz, go to 40", + .c.width = NL80211_CHAN_WIDTH_320, + .c.chan = &chan6g_61, + .c.center_freq1 = 6105, + .n = 2, + .expect = 8 + 4 + 2, + }, + { + .desc = "secondary 80 above primary in 80+80 MHz", + /* not really is valid? doesn't matter for the test */ + .c.width = NL80211_CHAN_WIDTH_80P80, + .c.chan = &chan6g_1, + .c.center_freq1 = 5985, + .c.center_freq2 = 6225, + .n = 4, + .expect = 0, + }, + { + .desc = "secondary 80 below primary in 80+80 MHz", + /* not really is valid? doesn't matter for the test */ + .c.width = NL80211_CHAN_WIDTH_80P80, + .c.chan = &chan6g_61, + .c.center_freq1 = 6225, + .c.center_freq2 = 5985, + .n = 4, + .expect = 4, + }, + { + .desc = "secondary 80 below primary in 80+80 MHz, go to 20", + /* not really is valid? doesn't matter for the test */ + .c.width = NL80211_CHAN_WIDTH_80P80, + .c.chan = &chan6g_61, + .c.center_freq1 = 6225, + .c.center_freq2 = 5985, + .n = 1, + .expect = 7, + }, +}; + +KUNIT_ARRAY_PARAM_DESC(subchan_offset, subchan_offset_cases, desc); + +static void subchan_offset(struct kunit *test) +{ + const struct subchan_test_case *params = test->param_value; + int offset; + + KUNIT_ASSERT_EQ(test, cfg80211_chandef_valid(¶ms->c), true); + + offset = ieee80211_calc_chandef_subchan_offset(¶ms->c, params->n); + + KUNIT_EXPECT_EQ(test, params->expect, offset); +} + +static const struct psd_reorder_test_case { + const char *desc; + struct cfg80211_chan_def ap, used; + struct ieee80211_parsed_tpe_psd psd, out; +} psd_reorder_cases[] = { + { + .desc = "no changes, 320 MHz", + + .ap.width = NL80211_CHAN_WIDTH_320, + .ap.chan = &chan6g_1, + .ap.center_freq1 = 6105, + + .used.width = NL80211_CHAN_WIDTH_320, + .used.chan = &chan6g_1, + .used.center_freq1 = 6105, + + .psd.valid = true, + .psd.count = 16, + .psd.n = 8, + .psd.power = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, + + .out.valid = true, + .out.count = 16, + .out.n = 8, + .out.power = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, + }, + { + .desc = "no changes, 320 MHz, 160 MHz used, n=0", + + .ap.width = NL80211_CHAN_WIDTH_320, + .ap.chan = &chan6g_1, + .ap.center_freq1 = 6105, + + .used.width = NL80211_CHAN_WIDTH_160, + .used.chan = &chan6g_1, + .used.center_freq1 = 6025, + + .psd.valid = true, + .psd.count = 16, + .psd.n = 0, + .psd.power = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, }, + + .out.valid = true, + .out.count = 8, + .out.n = 0, + .out.power = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, }, + }, + { + .desc = "320 MHz, HE is 80, used 160, all lower", + + .ap.width = NL80211_CHAN_WIDTH_320, + .ap.chan = &chan6g_1, + .ap.center_freq1 = 6105, + + .used.width = NL80211_CHAN_WIDTH_160, + .used.chan = &chan6g_1, + .used.center_freq1 = 6025, + + .psd.valid = true, + .psd.count = 16, + .psd.n = 4, + .psd.power = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, + + .out.valid = true, + .out.count = 8, + .out.n = 4, + .out.power = { 0, 1, 2, 3, 4, 5, 6, 7, 127, 127, 127, 127, 127, 127, 127, 127}, + }, + { + .desc = "320 MHz, HE is 80, used 160, all upper", + /* + * EHT: | | | | | | | | | | | | | | | | | + * HE: | | | | | + * used: | | | | | | | | | + */ + + .ap.width = NL80211_CHAN_WIDTH_320, + .ap.chan = &chan6g_61, + .ap.center_freq1 = 6105, + + .used.width = NL80211_CHAN_WIDTH_160, + .used.chan = &chan6g_61, + .used.center_freq1 = 6185, + + .psd.valid = true, + .psd.count = 16, + .psd.n = 4, + .psd.power = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, + + .out.valid = true, + .out.count = 8, + .out.n = 4, + .out.power = { 12, 13, 14, 15, 0, 1, 2, 3, 127, 127, 127, 127, 127, 127, 127, 127}, + }, + { + .desc = "320 MHz, HE is 80, used 160, split", + /* + * EHT: | | | | | | | | | | | | | | | | | + * HE: | | | | | + * used: | | | | | | | | | + */ + + .ap.width = NL80211_CHAN_WIDTH_320, + .ap.chan = &chan6g_33, + .ap.center_freq1 = 6105, + + .used.width = NL80211_CHAN_WIDTH_160, + .used.chan = &chan6g_33, + .used.center_freq1 = 6185, + + .psd.valid = true, + .psd.count = 16, + .psd.n = 4, + .psd.power = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 }, + + .out.valid = true, + .out.count = 8, + .out.n = 4, + .out.power = { 0, 1, 2, 3, 12, 13, 14, 15, 127, 127, 127, 127, 127, 127, 127, 127}, + }, +}; + +KUNIT_ARRAY_PARAM_DESC(psd_reorder, psd_reorder_cases, desc); + +static void psd_reorder(struct kunit *test) +{ + const struct psd_reorder_test_case *params = test->param_value; + struct ieee80211_parsed_tpe_psd tmp = params->psd; + + KUNIT_ASSERT_EQ(test, cfg80211_chandef_valid(¶ms->ap), true); + KUNIT_ASSERT_EQ(test, cfg80211_chandef_valid(¶ms->used), true); + + ieee80211_rearrange_tpe_psd(&tmp, ¶ms->ap, ¶ms->used); + KUNIT_EXPECT_MEMEQ(test, &tmp, ¶ms->out, sizeof(tmp)); +} + +static struct kunit_case tpe_test_cases[] = { + KUNIT_CASE_PARAM(subchan_offset, subchan_offset_gen_params), + KUNIT_CASE_PARAM(psd_reorder, psd_reorder_gen_params), + {} +}; + +static struct kunit_suite tpe = { + .name = "mac80211-tpe", + .test_cases = tpe_test_cases, +}; + +kunit_test_suite(tpe); diff --git a/net/mac80211/trace.h b/net/mac80211/trace.h index b26aacfbc622..dc498cd8cd91 100644 --- a/net/mac80211/trace.h +++ b/net/mac80211/trace.h @@ -328,9 +328,18 @@ TRACE_EVENT(drv_set_wakeup, TP_printk(LOCAL_PR_FMT " enabled:%d", LOCAL_PR_ARG, __entry->enabled) ); -DEFINE_EVENT(local_only_evt, drv_stop, - TP_PROTO(struct ieee80211_local *local), - TP_ARGS(local) +TRACE_EVENT(drv_stop, + TP_PROTO(struct ieee80211_local *local, bool suspend), + TP_ARGS(local, suspend), + TP_STRUCT__entry( + LOCAL_ENTRY + __field(bool, suspend) + ), + TP_fast_assign( + LOCAL_ASSIGN; + __entry->suspend = suspend; + ), + TP_printk(LOCAL_PR_FMT " suspend:%d", LOCAL_PR_ARG, __entry->suspend) ); DEFINE_EVENT(local_sdata_addr_evt, drv_add_interface, diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index f861d99e5f05..72a9ba8bc5fd 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -2774,8 +2774,7 @@ static struct sk_buff *ieee80211_build_hdr(struct ieee80211_sub_if_data *sdata, if (tdls_peer) { /* For TDLS only one link can be valid with peer STA */ - int tdls_link_id = sta->sta.valid_links ? - __ffs(sta->sta.valid_links) : 0; + int tdls_link_id = ieee80211_tdls_sta_link_id(sta); struct ieee80211_link_data *link; /* DA SA BSSID */ @@ -3101,8 +3100,7 @@ void ieee80211_check_fast_xmit(struct sta_info *sta) case NL80211_IFTYPE_STATION: if (test_sta_flag(sta, WLAN_STA_TDLS_PEER)) { /* For TDLS only one link can be valid with peer STA */ - int tdls_link_id = sta->sta.valid_links ? - __ffs(sta->sta.valid_links) : 0; + int tdls_link_id = ieee80211_tdls_sta_link_id(sta); struct ieee80211_link_data *link; /* DA SA BSSID */ diff --git a/net/mac80211/util.c b/net/mac80211/util.c index 771c05640aa3..ced19ce7c51a 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -1565,7 +1565,7 @@ u32 ieee80211_sta_get_rates(struct ieee80211_sub_if_data *sdata, return supp_rates; } -void ieee80211_stop_device(struct ieee80211_local *local) +void ieee80211_stop_device(struct ieee80211_local *local, bool suspend) { local_bh_disable(); ieee80211_handle_queued_frames(local); @@ -1578,7 +1578,7 @@ void ieee80211_stop_device(struct ieee80211_local *local) flush_workqueue(local->workqueue); wiphy_work_flush(local->hw.wiphy, NULL); - drv_stop(local); + drv_stop(local, suspend); } static void ieee80211_flush_completed_scan(struct ieee80211_local *local, @@ -2179,8 +2179,7 @@ int ieee80211_reconfig(struct ieee80211_local *local) local->in_reconfig = false; barrier(); - /* Restart deferred ROCs */ - ieee80211_start_next_roc(local); + ieee80211_reconfig_roc(local); /* Requeue all works */ list_for_each_entry(sdata, &local->interfaces, list) @@ -2337,7 +2336,7 @@ void ieee80211_recalc_min_chandef(struct ieee80211_sub_if_data *sdata, chanctx = container_of(chanctx_conf, struct ieee80211_chanctx, conf); - ieee80211_recalc_chanctx_min_def(local, chanctx, NULL); + ieee80211_recalc_chanctx_min_def(local, chanctx, NULL, false); } } @@ -3460,12 +3459,8 @@ void ieee80211_dfs_cac_cancel(struct ieee80211_local *local) lockdep_assert_wiphy(local->hw.wiphy); list_for_each_entry(sdata, &local->interfaces, list) { - /* it might be waiting for the local->mtx, but then - * by the time it gets it, sdata->wdev.cac_started - * will no longer be true - */ wiphy_delayed_work_cancel(local->hw.wiphy, - &sdata->deflink.dfs_cac_timer_work); + &sdata->dfs_cac_timer_work); if (sdata->wdev.cac_started) { chandef = sdata->vif.bss_conf.chanreq.oper; @@ -3937,19 +3932,103 @@ static u8 ieee80211_chanctx_radar_detect(struct ieee80211_local *local, return radar_detect; } +static u32 +__ieee80211_get_radio_mask(struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_bss_conf *link_conf; + struct ieee80211_chanctx_conf *conf; + unsigned int link_id; + u32 mask = 0; + + for_each_vif_active_link(&sdata->vif, link_conf, link_id) { + conf = sdata_dereference(link_conf->chanctx_conf, sdata); + if (!conf || conf->radio_idx < 0) + continue; + + mask |= BIT(conf->radio_idx); + } + + return mask; +} + +u32 ieee80211_get_radio_mask(struct wiphy *wiphy, struct net_device *dev) +{ + struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); + + return __ieee80211_get_radio_mask(sdata); +} + +static bool +ieee80211_sdata_uses_radio(struct ieee80211_sub_if_data *sdata, int radio_idx) +{ + if (radio_idx < 0) + return true; + + return __ieee80211_get_radio_mask(sdata) & BIT(radio_idx); +} + +static int +ieee80211_fill_ifcomb_params(struct ieee80211_local *local, + struct iface_combination_params *params, + const struct cfg80211_chan_def *chandef, + struct ieee80211_sub_if_data *sdata) +{ + struct ieee80211_sub_if_data *sdata_iter; + struct ieee80211_chanctx *ctx; + int total = !!sdata; + + list_for_each_entry(ctx, &local->chanctx_list, list) { + if (ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED) + continue; + + if (params->radio_idx >= 0 && + ctx->conf.radio_idx != params->radio_idx) + continue; + + params->radar_detect |= + ieee80211_chanctx_radar_detect(local, ctx); + + if (chandef && ctx->mode != IEEE80211_CHANCTX_EXCLUSIVE && + cfg80211_chandef_compatible(chandef, &ctx->conf.def)) + continue; + + params->num_different_channels++; + } + + list_for_each_entry(sdata_iter, &local->interfaces, list) { + struct wireless_dev *wdev_iter; + + wdev_iter = &sdata_iter->wdev; + + if (sdata_iter == sdata || + !ieee80211_sdata_running(sdata_iter) || + cfg80211_iftype_allowed(local->hw.wiphy, + wdev_iter->iftype, 0, 1)) + continue; + + if (!ieee80211_sdata_uses_radio(sdata_iter, params->radio_idx)) + continue; + + params->iftype_num[wdev_iter->iftype]++; + total++; + } + + return total; +} + int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata, const struct cfg80211_chan_def *chandef, enum ieee80211_chanctx_mode chanmode, - u8 radar_detect) + u8 radar_detect, int radio_idx) { + bool shared = chanmode == IEEE80211_CHANCTX_SHARED; struct ieee80211_local *local = sdata->local; - struct ieee80211_sub_if_data *sdata_iter; enum nl80211_iftype iftype = sdata->wdev.iftype; - struct ieee80211_chanctx *ctx; - int total = 1; struct iface_combination_params params = { .radar_detect = radar_detect, + .radio_idx = radio_idx, }; + int total; lockdep_assert_wiphy(local->hw.wiphy); @@ -3986,37 +4065,9 @@ int ieee80211_check_combinations(struct ieee80211_sub_if_data *sdata, if (iftype != NL80211_IFTYPE_UNSPECIFIED) params.iftype_num[iftype] = 1; - list_for_each_entry(ctx, &local->chanctx_list, list) { - if (ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED) - continue; - params.radar_detect |= - ieee80211_chanctx_radar_detect(local, ctx); - if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE) { - params.num_different_channels++; - continue; - } - if (chandef && chanmode == IEEE80211_CHANCTX_SHARED && - cfg80211_chandef_compatible(chandef, - &ctx->conf.def)) - continue; - params.num_different_channels++; - } - - list_for_each_entry_rcu(sdata_iter, &local->interfaces, list) { - struct wireless_dev *wdev_iter; - - wdev_iter = &sdata_iter->wdev; - - if (sdata_iter == sdata || - !ieee80211_sdata_running(sdata_iter) || - cfg80211_iftype_allowed(local->hw.wiphy, - wdev_iter->iftype, 0, 1)) - continue; - - params.iftype_num[wdev_iter->iftype]++; - total++; - } - + total = ieee80211_fill_ifcomb_params(local, ¶ms, + shared ? chandef : NULL, + sdata); if (total == 1 && !params.radar_detect) return 0; @@ -4033,28 +4084,17 @@ ieee80211_iter_max_chans(const struct ieee80211_iface_combination *c, c->num_different_channels); } -int ieee80211_max_num_channels(struct ieee80211_local *local) +int ieee80211_max_num_channels(struct ieee80211_local *local, int radio_idx) { - struct ieee80211_sub_if_data *sdata; - struct ieee80211_chanctx *ctx; u32 max_num_different_channels = 1; int err; - struct iface_combination_params params = {0}; + struct iface_combination_params params = { + .radio_idx = radio_idx, + }; lockdep_assert_wiphy(local->hw.wiphy); - list_for_each_entry(ctx, &local->chanctx_list, list) { - if (ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED) - continue; - - params.num_different_channels++; - - params.radar_detect |= - ieee80211_chanctx_radar_detect(local, ctx); - } - - list_for_each_entry_rcu(sdata, &local->interfaces, list) - params.iftype_num[sdata->wdev.iftype]++; + ieee80211_fill_ifcomb_params(local, ¶ms, NULL, NULL); err = cfg80211_iter_combinations(local->hw.wiphy, ¶ms, ieee80211_iter_max_chans, @@ -4342,3 +4382,28 @@ ieee80211_min_bw_limit_from_chandef(struct cfg80211_chan_def *chandef) return IEEE80211_CONN_BW_LIMIT_20; } } + +void ieee80211_clear_tpe(struct ieee80211_parsed_tpe *tpe) +{ + for (int i = 0; i < 2; i++) { + tpe->max_local[i].valid = false; + memset(tpe->max_local[i].power, + IEEE80211_TPE_MAX_TX_PWR_NO_CONSTRAINT, + sizeof(tpe->max_local[i].power)); + + tpe->max_reg_client[i].valid = false; + memset(tpe->max_reg_client[i].power, + IEEE80211_TPE_MAX_TX_PWR_NO_CONSTRAINT, + sizeof(tpe->max_reg_client[i].power)); + + tpe->psd_local[i].valid = false; + memset(tpe->psd_local[i].power, + IEEE80211_TPE_PSD_NO_LIMIT, + sizeof(tpe->psd_local[i].power)); + + tpe->psd_reg_client[i].valid = false; + memset(tpe->psd_reg_client[i].power, + IEEE80211_TPE_PSD_NO_LIMIT, + sizeof(tpe->psd_reg_client[i].power)); + } +} diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c index 642891cafbaf..bf6ef45af757 100644 --- a/net/mac80211/vht.c +++ b/net/mac80211/vht.c @@ -351,7 +351,8 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, /* FIXME: move this to some better location - parses HE/EHT now */ enum ieee80211_sta_rx_bandwidth -ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta) +_ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta, + struct cfg80211_chan_def *chandef) { unsigned int link_id = link_sta->link_id; struct ieee80211_sub_if_data *sdata = link_sta->sta->sdata; @@ -361,44 +362,43 @@ ieee80211_sta_cap_rx_bw(struct link_sta_info *link_sta) u32 cap_width; if (he_cap->has_he) { - struct ieee80211_bss_conf *link_conf; - enum ieee80211_sta_rx_bandwidth ret; + enum nl80211_band band; u8 info; - rcu_read_lock(); - link_conf = rcu_dereference(sdata->vif.link_conf[link_id]); + if (chandef) { + band = chandef->chan->band; + } else { + struct ieee80211_bss_conf *link_conf; - if (eht_cap->has_eht && - link_conf->chanreq.oper.chan->band == NL80211_BAND_6GHZ) { + rcu_read_lock(); + link_conf = rcu_dereference(sdata->vif.link_conf[link_id]); + band = link_conf->chanreq.oper.chan->band; + rcu_read_unlock(); + } + + if (eht_cap->has_eht && band == NL80211_BAND_6GHZ) { info = eht_cap->eht_cap_elem.phy_cap_info[0]; - if (info & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ) { - ret = IEEE80211_STA_RX_BW_320; - goto out; - } + if (info & IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ) + return IEEE80211_STA_RX_BW_320; } info = he_cap->he_cap_elem.phy_cap_info[0]; - if (link_conf->chanreq.oper.chan->band == NL80211_BAND_2GHZ) { + if (band == NL80211_BAND_2GHZ) { if (info & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G) - ret = IEEE80211_STA_RX_BW_40; - else - ret = IEEE80211_STA_RX_BW_20; - goto out; + return IEEE80211_STA_RX_BW_40; + return IEEE80211_STA_RX_BW_20; } if (info & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G || info & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G) - ret = IEEE80211_STA_RX_BW_160; - else if (info & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G) - ret = IEEE80211_STA_RX_BW_80; - else - ret = IEEE80211_STA_RX_BW_20; -out: - rcu_read_unlock(); + return IEEE80211_STA_RX_BW_160; - return ret; + if (info & IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G) + return IEEE80211_STA_RX_BW_80; + + return IEEE80211_STA_RX_BW_20; } if (!vht_cap->vht_supported) @@ -503,22 +503,29 @@ ieee80211_chan_width_to_rx_bw(enum nl80211_chan_width width) /* FIXME: rename/move - this deals with everything not just VHT */ enum ieee80211_sta_rx_bandwidth -ieee80211_sta_cur_vht_bw(struct link_sta_info *link_sta) +_ieee80211_sta_cur_vht_bw(struct link_sta_info *link_sta, + struct cfg80211_chan_def *chandef) { struct sta_info *sta = link_sta->sta; - struct ieee80211_bss_conf *link_conf; enum nl80211_chan_width bss_width; enum ieee80211_sta_rx_bandwidth bw; - rcu_read_lock(); - link_conf = rcu_dereference(sta->sdata->vif.link_conf[link_sta->link_id]); - if (WARN_ON(!link_conf)) - bss_width = NL80211_CHAN_WIDTH_20_NOHT; - else - bss_width = link_conf->chanreq.oper.width; - rcu_read_unlock(); + if (chandef) { + bss_width = chandef->width; + } else { + struct ieee80211_bss_conf *link_conf; - bw = ieee80211_sta_cap_rx_bw(link_sta); + rcu_read_lock(); + link_conf = rcu_dereference(sta->sdata->vif.link_conf[link_sta->link_id]); + if (WARN_ON_ONCE(!link_conf)) { + rcu_read_unlock(); + return IEEE80211_STA_RX_BW_20; + } + bss_width = link_conf->chanreq.oper.width; + rcu_read_unlock(); + } + + bw = _ieee80211_sta_cap_rx_bw(link_sta, chandef); bw = min(bw, link_sta->cur_max_bandwidth); /* Don't consider AP's bandwidth for TDLS peers, section 11.23.1 of diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index bb7dca8aa2d9..a26c2c840fd9 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -2040,13 +2040,13 @@ static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied) do_div(grow, msk->rcvq_space.space); rcvwin += (grow << 1); - rcvbuf = min_t(u64, __tcp_space_from_win(scaling_ratio, rcvwin), + rcvbuf = min_t(u64, mptcp_space_from_win(sk, rcvwin), READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_rmem[2])); if (rcvbuf > sk->sk_rcvbuf) { u32 window_clamp; - window_clamp = __tcp_win_from_space(scaling_ratio, rcvbuf); + window_clamp = mptcp_win_from_space(sk, rcvbuf); WRITE_ONCE(sk->sk_rcvbuf, rcvbuf); /* Make subflows follow along. If we do not do this, we @@ -2202,7 +2202,7 @@ static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, if (skb_queue_empty(&msk->receive_queue) && __mptcp_move_skbs(msk)) continue; - /* only the master socket status is relevant here. The exit + /* only the MPTCP socket status is relevant here. The exit * conditions mirror closely tcp_recvmsg() */ if (copied >= target) @@ -3526,7 +3526,7 @@ void mptcp_subflow_process_delegated(struct sock *ssk, long status) static int mptcp_hash(struct sock *sk) { /* should never be called, - * we hash the TCP subflows not the master socket + * we hash the TCP subflows not the MPTCP socket */ WARN_ON_ONCE(1); return 0; diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index 7aa47e2dd52b..b11a4e50d52b 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -386,6 +386,11 @@ static inline int mptcp_win_from_space(const struct sock *sk, int space) return __tcp_win_from_space(mptcp_sk(sk)->scaling_ratio, space); } +static inline int mptcp_space_from_win(const struct sock *sk, int win) +{ + return __tcp_space_from_win(mptcp_sk(sk)->scaling_ratio, win); +} + static inline int __mptcp_space(const struct sock *sk) { return mptcp_win_from_space(sk, READ_ONCE(sk->sk_rcvbuf) - __mptcp_rmem(sk)); diff --git a/net/mptcp/sockopt.c b/net/mptcp/sockopt.c index f9a4fb17b5b7..2026a9a36f80 100644 --- a/net/mptcp/sockopt.c +++ b/net/mptcp/sockopt.c @@ -1579,7 +1579,7 @@ int mptcp_set_rcvlowat(struct sock *sk, int val) if (sk->sk_userlocks & SOCK_RCVBUF_LOCK) return 0; - space = __tcp_space_from_win(mptcp_sk(sk)->scaling_ratio, val); + space = mptcp_space_from_win(sk, val); if (space <= sk->sk_rcvbuf) return 0; diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index 612c38570a64..39e2cbdf3801 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -1719,7 +1719,7 @@ int mptcp_subflow_create_socket(struct sock *sk, unsigned short family, mptcp_sockopt_sync_locked(mptcp_sk(sk), sf->sk); release_sock(sf->sk); - /* the newly created socket really belongs to the owning MPTCP master + /* the newly created socket really belongs to the owning MPTCP * socket, even if for additional subflows the allocation is performed * by a kernel workqueue. Adjust inode references, so that the * procfs/diag interfaces really show this one belonging to the correct diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile index 614815a3ed73..f0aa4d7ef499 100644 --- a/net/netfilter/Makefile +++ b/net/netfilter/Makefile @@ -142,8 +142,13 @@ obj-$(CONFIG_NFT_FWD_NETDEV) += nft_fwd_netdev.o # flow table infrastructure obj-$(CONFIG_NF_FLOW_TABLE) += nf_flow_table.o nf_flow_table-objs := nf_flow_table_core.o nf_flow_table_ip.o \ - nf_flow_table_offload.o + nf_flow_table_offload.o nf_flow_table_xdp.o nf_flow_table-$(CONFIG_NF_FLOW_TABLE_PROCFS) += nf_flow_table_procfs.o +ifeq ($(CONFIG_NF_FLOW_TABLE),m) +nf_flow_table-$(CONFIG_DEBUG_INFO_BTF_MODULES) += nf_flow_table_bpf.o +else ifeq ($(CONFIG_NF_FLOW_TABLE),y) +nf_flow_table-$(CONFIG_DEBUG_INFO_BTF) += nf_flow_table_bpf.o +endif obj-$(CONFIG_NF_FLOW_TABLE_INET) += nf_flow_table_inet.o diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index b6d0dcf3a5c3..78a1cc72dc38 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -1924,7 +1924,8 @@ proc_do_sync_ports(struct ctl_table *table, int write, return rc; } -static int ipvs_proc_est_cpumask_set(struct ctl_table *table, void *buffer) +static int ipvs_proc_est_cpumask_set(const struct ctl_table *table, + void *buffer) { struct netns_ipvs *ipvs = table->extra2; cpumask_var_t *valp = table->data; @@ -1962,8 +1963,8 @@ out: return ret; } -static int ipvs_proc_est_cpumask_get(struct ctl_table *table, void *buffer, - size_t size) +static int ipvs_proc_est_cpumask_get(const struct ctl_table *table, + void *buffer, size_t size) { struct netns_ipvs *ipvs = table->extra2; cpumask_var_t *valp = table->data; diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c index 1e689c714127..83e452916403 100644 --- a/net/netfilter/ipvs/ip_vs_proto_sctp.c +++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c @@ -126,7 +126,7 @@ sctp_snat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, if (sctph->source != cp->vport || payload_csum || skb->ip_summed == CHECKSUM_PARTIAL) { sctph->source = cp->vport; - if (!skb_is_gso(skb) || !skb_is_gso_sctp(skb)) + if (!skb_is_gso(skb)) sctp_nat_csum(skb, sctph, sctphoff); } else { skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -175,7 +175,7 @@ sctp_dnat_handler(struct sk_buff *skb, struct ip_vs_protocol *pp, (skb->ip_summed == CHECKSUM_PARTIAL && !(skb_dst(skb)->dev->features & NETIF_F_SCTP_CRC))) { sctph->dest = cp->dport; - if (!skb_is_gso(skb) || !skb_is_gso_sctp(skb)) + if (!skb_is_gso(skb)) sctp_nat_csum(skb, sctph, sctphoff); } else if (skb->ip_summed != CHECKSUM_PARTIAL) { skb->ip_summed = CHECKSUM_UNNECESSARY; diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c index 8715617b02fe..34ba14e59e95 100644 --- a/net/netfilter/nf_conncount.c +++ b/net/netfilter/nf_conncount.c @@ -321,7 +321,6 @@ insert_tree(struct net *net, struct nf_conncount_rb *rbconn; struct nf_conncount_tuple *conn; unsigned int count = 0, gc_count = 0; - u8 keylen = data->keylen; bool do_gc = true; spin_lock_bh(&nf_conncount_locks[hash]); @@ -333,7 +332,7 @@ restart: rbconn = rb_entry(*rbnode, struct nf_conncount_rb, node); parent = *rbnode; - diff = key_diff(key, rbconn->key, keylen); + diff = key_diff(key, rbconn->key, data->keylen); if (diff < 0) { rbnode = &((*rbnode)->rb_left); } else if (diff > 0) { @@ -378,7 +377,7 @@ restart: conn->tuple = *tuple; conn->zone = *zone; - memcpy(rbconn->key, key, sizeof(u32) * keylen); + memcpy(rbconn->key, key, sizeof(u32) * data->keylen); nf_conncount_list_init(&rbconn->list); list_add(&conn->node, &rbconn->list.head); @@ -403,7 +402,6 @@ count_tree(struct net *net, struct rb_node *parent; struct nf_conncount_rb *rbconn; unsigned int hash; - u8 keylen = data->keylen; hash = jhash2(key, data->keylen, conncount_rnd) % CONNCOUNT_SLOTS; root = &data->root[hash]; @@ -414,7 +412,7 @@ count_tree(struct net *net, rbconn = rb_entry(parent, struct nf_conncount_rb, node); - diff = key_diff(key, rbconn->key, keylen); + diff = key_diff(key, rbconn->key, data->keylen); if (diff < 0) { parent = rcu_dereference_raw(parent->rb_left); } else if (diff > 0) { diff --git a/net/netfilter/nf_conntrack_bpf.c b/net/netfilter/nf_conntrack_bpf.c index d2492d050fe6..4a136fc3a9c0 100644 --- a/net/netfilter/nf_conntrack_bpf.c +++ b/net/netfilter/nf_conntrack_bpf.c @@ -32,7 +32,9 @@ * -EINVAL - Passed NULL for bpf_tuple pointer * -EINVAL - opts->reserved is not 0 * -EINVAL - netns_id is less than -1 - * -EINVAL - opts__sz isn't NF_BPF_CT_OPTS_SZ (12) + * -EINVAL - opts__sz isn't NF_BPF_CT_OPTS_SZ (16) or 12 + * -EINVAL - opts->ct_zone_id set when + opts__sz isn't NF_BPF_CT_OPTS_SZ (16) * -EPROTO - l4proto isn't one of IPPROTO_TCP or IPPROTO_UDP * -ENONET - No network namespace found for netns_id * -ENOENT - Conntrack lookup could not find entry for tuple @@ -42,6 +44,8 @@ * Values: * IPPROTO_TCP, IPPROTO_UDP * @dir: - connection tracking tuple direction. + * @ct_zone_id - connection tracking zone id. + * @ct_zone_dir - connection tracking zone direction. * @reserved - Reserved member, will be reused for more options in future * Values: * 0 @@ -51,11 +55,13 @@ struct bpf_ct_opts { s32 error; u8 l4proto; u8 dir; - u8 reserved[2]; + u16 ct_zone_id; + u8 ct_zone_dir; + u8 reserved[3]; }; enum { - NF_BPF_CT_OPTS_SZ = 12, + NF_BPF_CT_OPTS_SZ = 16, }; static int bpf_nf_ct_tuple_parse(struct bpf_sock_tuple *bpf_tuple, @@ -104,12 +110,21 @@ __bpf_nf_ct_alloc_entry(struct net *net, struct bpf_sock_tuple *bpf_tuple, u32 timeout) { struct nf_conntrack_tuple otuple, rtuple; + struct nf_conntrack_zone ct_zone; struct nf_conn *ct; int err; - if (!opts || !bpf_tuple || opts->reserved[0] || opts->reserved[1] || - opts_len != NF_BPF_CT_OPTS_SZ) + if (!opts || !bpf_tuple) return ERR_PTR(-EINVAL); + if (!(opts_len == NF_BPF_CT_OPTS_SZ || opts_len == 12)) + return ERR_PTR(-EINVAL); + if (opts_len == NF_BPF_CT_OPTS_SZ) { + if (opts->reserved[0] || opts->reserved[1] || opts->reserved[2]) + return ERR_PTR(-EINVAL); + } else { + if (opts->ct_zone_id) + return ERR_PTR(-EINVAL); + } if (unlikely(opts->netns_id < BPF_F_CURRENT_NETNS)) return ERR_PTR(-EINVAL); @@ -130,7 +145,16 @@ __bpf_nf_ct_alloc_entry(struct net *net, struct bpf_sock_tuple *bpf_tuple, return ERR_PTR(-ENONET); } - ct = nf_conntrack_alloc(net, &nf_ct_zone_dflt, &otuple, &rtuple, + if (opts_len == NF_BPF_CT_OPTS_SZ) { + if (opts->ct_zone_dir == 0) + opts->ct_zone_dir = NF_CT_DEFAULT_ZONE_DIR; + nf_ct_zone_init(&ct_zone, + opts->ct_zone_id, opts->ct_zone_dir, 0); + } else { + ct_zone = nf_ct_zone_dflt; + } + + ct = nf_conntrack_alloc(net, &ct_zone, &otuple, &rtuple, GFP_ATOMIC); if (IS_ERR(ct)) goto out; @@ -152,12 +176,21 @@ static struct nf_conn *__bpf_nf_ct_lookup(struct net *net, { struct nf_conntrack_tuple_hash *hash; struct nf_conntrack_tuple tuple; + struct nf_conntrack_zone ct_zone; struct nf_conn *ct; int err; - if (!opts || !bpf_tuple || opts->reserved[0] || opts->reserved[1] || - opts_len != NF_BPF_CT_OPTS_SZ) + if (!opts || !bpf_tuple) return ERR_PTR(-EINVAL); + if (!(opts_len == NF_BPF_CT_OPTS_SZ || opts_len == 12)) + return ERR_PTR(-EINVAL); + if (opts_len == NF_BPF_CT_OPTS_SZ) { + if (opts->reserved[0] || opts->reserved[1] || opts->reserved[2]) + return ERR_PTR(-EINVAL); + } else { + if (opts->ct_zone_id) + return ERR_PTR(-EINVAL); + } if (unlikely(opts->l4proto != IPPROTO_TCP && opts->l4proto != IPPROTO_UDP)) return ERR_PTR(-EPROTO); if (unlikely(opts->netns_id < BPF_F_CURRENT_NETNS)) @@ -174,7 +207,16 @@ static struct nf_conn *__bpf_nf_ct_lookup(struct net *net, return ERR_PTR(-ENONET); } - hash = nf_conntrack_find_get(net, &nf_ct_zone_dflt, &tuple); + if (opts_len == NF_BPF_CT_OPTS_SZ) { + if (opts->ct_zone_dir == 0) + opts->ct_zone_dir = NF_CT_DEFAULT_ZONE_DIR; + nf_ct_zone_init(&ct_zone, + opts->ct_zone_id, opts->ct_zone_dir, 0); + } else { + ct_zone = nf_ct_zone_dflt; + } + + hash = nf_conntrack_find_get(net, &ct_zone, &tuple); if (opts->netns_id >= 0) put_net(net); if (!hash) @@ -245,7 +287,7 @@ __bpf_kfunc_start_defs(); * @opts - Additional options for allocation (documented above) * Cannot be NULL * @opts__sz - Length of the bpf_ct_opts structure - * Must be NF_BPF_CT_OPTS_SZ (12) + * Must be NF_BPF_CT_OPTS_SZ (16) or 12 */ __bpf_kfunc struct nf_conn___init * bpf_xdp_ct_alloc(struct xdp_md *xdp_ctx, struct bpf_sock_tuple *bpf_tuple, @@ -279,7 +321,7 @@ bpf_xdp_ct_alloc(struct xdp_md *xdp_ctx, struct bpf_sock_tuple *bpf_tuple, * @opts - Additional options for lookup (documented above) * Cannot be NULL * @opts__sz - Length of the bpf_ct_opts structure - * Must be NF_BPF_CT_OPTS_SZ (12) + * Must be NF_BPF_CT_OPTS_SZ (16) or 12 */ __bpf_kfunc struct nf_conn * bpf_xdp_ct_lookup(struct xdp_md *xdp_ctx, struct bpf_sock_tuple *bpf_tuple, @@ -312,7 +354,7 @@ bpf_xdp_ct_lookup(struct xdp_md *xdp_ctx, struct bpf_sock_tuple *bpf_tuple, * @opts - Additional options for allocation (documented above) * Cannot be NULL * @opts__sz - Length of the bpf_ct_opts structure - * Must be NF_BPF_CT_OPTS_SZ (12) + * Must be NF_BPF_CT_OPTS_SZ (16) or 12 */ __bpf_kfunc struct nf_conn___init * bpf_skb_ct_alloc(struct __sk_buff *skb_ctx, struct bpf_sock_tuple *bpf_tuple, @@ -347,7 +389,7 @@ bpf_skb_ct_alloc(struct __sk_buff *skb_ctx, struct bpf_sock_tuple *bpf_tuple, * @opts - Additional options for lookup (documented above) * Cannot be NULL * @opts__sz - Length of the bpf_ct_opts structure - * Must be NF_BPF_CT_OPTS_SZ (12) + * Must be NF_BPF_CT_OPTS_SZ (16) or 12 */ __bpf_kfunc struct nf_conn * bpf_skb_ct_lookup(struct __sk_buff *skb_ctx, struct bpf_sock_tuple *bpf_tuple, diff --git a/net/netfilter/nf_flow_table_bpf.c b/net/netfilter/nf_flow_table_bpf.c new file mode 100644 index 000000000000..4a5f5195f2d2 --- /dev/null +++ b/net/netfilter/nf_flow_table_bpf.c @@ -0,0 +1,121 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Unstable Flow Table Helpers for XDP hook + * + * These are called from the XDP programs. + * Note that it is allowed to break compatibility for these functions since + * the interface they are exposed through to BPF programs is explicitly + * unstable. + */ + +#include +#include +#include +#include +#include +#include +#include + +/* bpf_flowtable_opts - options for bpf flowtable helpers + * @error: out parameter, set for any encountered error + */ +struct bpf_flowtable_opts { + s32 error; +}; + +enum { + NF_BPF_FLOWTABLE_OPTS_SZ = 4, +}; + +__diag_push(); +__diag_ignore_all("-Wmissing-prototypes", + "Global functions as their definitions will be in nf_flow_table BTF"); + +__bpf_kfunc_start_defs(); + +static struct flow_offload_tuple_rhash * +bpf_xdp_flow_tuple_lookup(struct net_device *dev, + struct flow_offload_tuple *tuple, __be16 proto) +{ + struct flow_offload_tuple_rhash *tuplehash; + struct nf_flowtable *nf_flow_table; + struct flow_offload *nf_flow; + + nf_flow_table = nf_flowtable_by_dev(dev); + if (!nf_flow_table) + return ERR_PTR(-ENOENT); + + tuplehash = flow_offload_lookup(nf_flow_table, tuple); + if (!tuplehash) + return ERR_PTR(-ENOENT); + + nf_flow = container_of(tuplehash, struct flow_offload, + tuplehash[tuplehash->tuple.dir]); + flow_offload_refresh(nf_flow_table, nf_flow, false); + + return tuplehash; +} + +__bpf_kfunc struct flow_offload_tuple_rhash * +bpf_xdp_flow_lookup(struct xdp_md *ctx, struct bpf_fib_lookup *fib_tuple, + struct bpf_flowtable_opts *opts, u32 opts_len) +{ + struct xdp_buff *xdp = (struct xdp_buff *)ctx; + struct flow_offload_tuple tuple = { + .iifidx = fib_tuple->ifindex, + .l3proto = fib_tuple->family, + .l4proto = fib_tuple->l4_protocol, + .src_port = fib_tuple->sport, + .dst_port = fib_tuple->dport, + }; + struct flow_offload_tuple_rhash *tuplehash; + __be16 proto; + + if (opts_len != NF_BPF_FLOWTABLE_OPTS_SZ) { + opts->error = -EINVAL; + return NULL; + } + + switch (fib_tuple->family) { + case AF_INET: + tuple.src_v4.s_addr = fib_tuple->ipv4_src; + tuple.dst_v4.s_addr = fib_tuple->ipv4_dst; + proto = htons(ETH_P_IP); + break; + case AF_INET6: + tuple.src_v6 = *(struct in6_addr *)&fib_tuple->ipv6_src; + tuple.dst_v6 = *(struct in6_addr *)&fib_tuple->ipv6_dst; + proto = htons(ETH_P_IPV6); + break; + default: + opts->error = -EAFNOSUPPORT; + return NULL; + } + + tuplehash = bpf_xdp_flow_tuple_lookup(xdp->rxq->dev, &tuple, proto); + if (IS_ERR(tuplehash)) { + opts->error = PTR_ERR(tuplehash); + return NULL; + } + + return tuplehash; +} + +__diag_pop() + +__bpf_kfunc_end_defs(); + +BTF_KFUNCS_START(nf_ft_kfunc_set) +BTF_ID_FLAGS(func, bpf_xdp_flow_lookup, KF_TRUSTED_ARGS | KF_RET_NULL) +BTF_KFUNCS_END(nf_ft_kfunc_set) + +static const struct btf_kfunc_id_set nf_flow_kfunc_set = { + .owner = THIS_MODULE, + .set = &nf_ft_kfunc_set, +}; + +int nf_flow_register_bpf(void) +{ + return register_btf_kfunc_id_set(BPF_PROG_TYPE_XDP, + &nf_flow_kfunc_set); +} +EXPORT_SYMBOL_GPL(nf_flow_register_bpf); diff --git a/net/netfilter/nf_flow_table_inet.c b/net/netfilter/nf_flow_table_inet.c index 6eef15648b7b..88787b45e30d 100644 --- a/net/netfilter/nf_flow_table_inet.c +++ b/net/netfilter/nf_flow_table_inet.c @@ -98,7 +98,7 @@ static int __init nf_flow_inet_module_init(void) nft_register_flowtable_type(&flowtable_ipv6); nft_register_flowtable_type(&flowtable_inet); - return 0; + return nf_flow_register_bpf(); } static void __exit nf_flow_inet_module_exit(void) diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c index a010b25076ca..ff1a4e36c2b5 100644 --- a/net/netfilter/nf_flow_table_offload.c +++ b/net/netfilter/nf_flow_table_offload.c @@ -1192,7 +1192,7 @@ int nf_flow_table_offload_setup(struct nf_flowtable *flowtable, int err; if (!nf_flowtable_hw_offload(flowtable)) - return 0; + return nf_flow_offload_xdp_setup(flowtable, dev, cmd); if (dev->netdev_ops->ndo_setup_tc) err = nf_flow_table_offload_cmd(&bo, flowtable, dev, cmd, diff --git a/net/netfilter/nf_flow_table_xdp.c b/net/netfilter/nf_flow_table_xdp.c new file mode 100644 index 000000000000..e1252d042699 --- /dev/null +++ b/net/netfilter/nf_flow_table_xdp.c @@ -0,0 +1,147 @@ +// SPDX-License-Identifier: GPL-2.0-only +#include +#include +#include +#include +#include +#include +#include + +struct flow_offload_xdp_ft { + struct list_head head; + struct nf_flowtable *ft; + struct rcu_head rcuhead; +}; + +struct flow_offload_xdp { + struct hlist_node hnode; + unsigned long net_device_addr; + struct list_head head; +}; + +#define NF_XDP_HT_BITS 4 +static DEFINE_HASHTABLE(nf_xdp_hashtable, NF_XDP_HT_BITS); +static DEFINE_MUTEX(nf_xdp_hashtable_lock); + +/* caller must hold rcu read lock */ +struct nf_flowtable *nf_flowtable_by_dev(const struct net_device *dev) +{ + unsigned long key = (unsigned long)dev; + struct flow_offload_xdp *iter; + + hash_for_each_possible_rcu(nf_xdp_hashtable, iter, hnode, key) { + if (key == iter->net_device_addr) { + struct flow_offload_xdp_ft *ft_elem; + + /* The user is supposed to insert a given net_device + * just into a single nf_flowtable so we always return + * the first element here. + */ + ft_elem = list_first_or_null_rcu(&iter->head, + struct flow_offload_xdp_ft, + head); + return ft_elem ? ft_elem->ft : NULL; + } + } + + return NULL; +} + +static int nf_flowtable_by_dev_insert(struct nf_flowtable *ft, + const struct net_device *dev) +{ + struct flow_offload_xdp *iter, *elem = NULL; + unsigned long key = (unsigned long)dev; + struct flow_offload_xdp_ft *ft_elem; + + ft_elem = kzalloc(sizeof(*ft_elem), GFP_KERNEL_ACCOUNT); + if (!ft_elem) + return -ENOMEM; + + ft_elem->ft = ft; + + mutex_lock(&nf_xdp_hashtable_lock); + + hash_for_each_possible(nf_xdp_hashtable, iter, hnode, key) { + if (key == iter->net_device_addr) { + elem = iter; + break; + } + } + + if (!elem) { + elem = kzalloc(sizeof(*elem), GFP_KERNEL_ACCOUNT); + if (!elem) + goto err_unlock; + + elem->net_device_addr = key; + INIT_LIST_HEAD(&elem->head); + hash_add_rcu(nf_xdp_hashtable, &elem->hnode, key); + } + list_add_tail_rcu(&ft_elem->head, &elem->head); + + mutex_unlock(&nf_xdp_hashtable_lock); + + return 0; + +err_unlock: + mutex_unlock(&nf_xdp_hashtable_lock); + kfree(ft_elem); + + return -ENOMEM; +} + +static void nf_flowtable_by_dev_remove(struct nf_flowtable *ft, + const struct net_device *dev) +{ + struct flow_offload_xdp *iter, *elem = NULL; + unsigned long key = (unsigned long)dev; + + mutex_lock(&nf_xdp_hashtable_lock); + + hash_for_each_possible(nf_xdp_hashtable, iter, hnode, key) { + if (key == iter->net_device_addr) { + elem = iter; + break; + } + } + + if (elem) { + struct flow_offload_xdp_ft *ft_elem, *ft_next; + + list_for_each_entry_safe(ft_elem, ft_next, &elem->head, head) { + if (ft_elem->ft == ft) { + list_del_rcu(&ft_elem->head); + kfree_rcu(ft_elem, rcuhead); + } + } + + if (list_empty(&elem->head)) + hash_del_rcu(&elem->hnode); + else + elem = NULL; + } + + mutex_unlock(&nf_xdp_hashtable_lock); + + if (elem) { + synchronize_rcu(); + kfree(elem); + } +} + +int nf_flow_offload_xdp_setup(struct nf_flowtable *flowtable, + struct net_device *dev, + enum flow_block_command cmd) +{ + switch (cmd) { + case FLOW_BLOCK_BIND: + return nf_flowtable_by_dev_insert(flowtable, dev); + case FLOW_BLOCK_UNBIND: + nf_flowtable_by_dev_remove(flowtable, dev); + return 0; + } + + WARN_ON_ONCE(1); + return 0; +} diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 91cc3a81ba8f..481ee78e77bc 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -153,14 +153,18 @@ static struct nft_trans *nft_trans_alloc_gfp(const struct nft_ctx *ctx, { struct nft_trans *trans; - trans = kzalloc(sizeof(struct nft_trans) + size, gfp); + trans = kzalloc(size, gfp); if (trans == NULL) return NULL; INIT_LIST_HEAD(&trans->list); - INIT_LIST_HEAD(&trans->binding_list); trans->msg_type = msg_type; - trans->ctx = *ctx; + + trans->net = ctx->net; + trans->table = ctx->table; + trans->seq = ctx->seq; + trans->flags = ctx->flags; + trans->report = ctx->report; return trans; } @@ -171,10 +175,26 @@ static struct nft_trans *nft_trans_alloc(const struct nft_ctx *ctx, return nft_trans_alloc_gfp(ctx, msg_type, size, GFP_KERNEL); } +static struct nft_trans_binding *nft_trans_get_binding(struct nft_trans *trans) +{ + switch (trans->msg_type) { + case NFT_MSG_NEWCHAIN: + case NFT_MSG_NEWSET: + return container_of(trans, struct nft_trans_binding, nft_trans); + } + + return NULL; +} + static void nft_trans_list_del(struct nft_trans *trans) { + struct nft_trans_binding *trans_binding; + list_del(&trans->list); - list_del(&trans->binding_list); + + trans_binding = nft_trans_get_binding(trans); + if (trans_binding) + list_del(&trans_binding->binding_list); } static void nft_trans_destroy(struct nft_trans *trans) @@ -236,7 +256,7 @@ static void __nft_chain_trans_bind(const struct nft_ctx *ctx, nft_trans_chain_bound(trans) = bind; break; case NFT_MSG_NEWRULE: - if (trans->ctx.chain == chain) + if (nft_trans_rule_chain(trans) == chain) nft_trans_rule_bound(trans) = bind; break; } @@ -372,21 +392,26 @@ static void nf_tables_unregister_hook(struct net *net, static void nft_trans_commit_list_add_tail(struct net *net, struct nft_trans *trans) { struct nftables_pernet *nft_net = nft_pernet(net); + struct nft_trans_binding *binding; + + list_add_tail(&trans->list, &nft_net->commit_list); + + binding = nft_trans_get_binding(trans); + if (!binding) + return; switch (trans->msg_type) { case NFT_MSG_NEWSET: if (!nft_trans_set_update(trans) && nft_set_is_anonymous(nft_trans_set(trans))) - list_add_tail(&trans->binding_list, &nft_net->binding_list); + list_add_tail(&binding->binding_list, &nft_net->binding_list); break; case NFT_MSG_NEWCHAIN: if (!nft_trans_chain_update(trans) && nft_chain_binding(nft_trans_chain(trans))) - list_add_tail(&trans->binding_list, &nft_net->binding_list); + list_add_tail(&binding->binding_list, &nft_net->binding_list); break; } - - list_add_tail(&trans->list, &nft_net->commit_list); } static int nft_trans_table_add(struct nft_ctx *ctx, int msg_type) @@ -416,11 +441,28 @@ static int nft_deltable(struct nft_ctx *ctx) return err; } +static struct nft_trans * +nft_trans_alloc_chain(const struct nft_ctx *ctx, int msg_type) +{ + struct nft_trans_chain *trans_chain; + struct nft_trans *trans; + + trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_chain)); + if (!trans) + return NULL; + + trans_chain = nft_trans_container_chain(trans); + INIT_LIST_HEAD(&trans_chain->nft_trans_binding.binding_list); + trans_chain->chain = ctx->chain; + + return trans; +} + static struct nft_trans *nft_trans_chain_add(struct nft_ctx *ctx, int msg_type) { struct nft_trans *trans; - trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_chain)); + trans = nft_trans_alloc_chain(ctx, msg_type); if (trans == NULL) return ERR_PTR(-ENOMEM); @@ -432,7 +474,6 @@ static struct nft_trans *nft_trans_chain_add(struct nft_ctx *ctx, int msg_type) ntohl(nla_get_be32(ctx->nla[NFTA_CHAIN_ID])); } } - nft_trans_chain(trans) = ctx->chain; nft_trans_commit_list_add_tail(ctx->net, trans); return trans; @@ -505,6 +546,7 @@ static struct nft_trans *nft_trans_rule_add(struct nft_ctx *ctx, int msg_type, ntohl(nla_get_be32(ctx->nla[NFTA_RULE_ID])); } nft_trans_rule(trans) = rule; + nft_trans_rule_chain(trans) = ctx->chain; nft_trans_commit_list_add_tail(ctx->net, trans); return trans; @@ -560,12 +602,16 @@ static int __nft_trans_set_add(const struct nft_ctx *ctx, int msg_type, struct nft_set *set, const struct nft_set_desc *desc) { + struct nft_trans_set *trans_set; struct nft_trans *trans; trans = nft_trans_alloc(ctx, msg_type, sizeof(struct nft_trans_set)); if (trans == NULL) return -ENOMEM; + trans_set = nft_trans_container_set(trans); + INIT_LIST_HEAD(&trans_set->nft_trans_binding.binding_list); + if (msg_type == NFT_MSG_NEWSET && ctx->nla[NFTA_SET_ID] && !desc) { nft_trans_set_id(trans) = ntohl(nla_get_be32(ctx->nla[NFTA_SET_ID])); @@ -1217,11 +1263,11 @@ static bool nft_table_pending_update(const struct nft_ctx *ctx) return true; list_for_each_entry(trans, &nft_net->commit_list, list) { - if (trans->ctx.table == ctx->table && + if (trans->table == ctx->table && ((trans->msg_type == NFT_MSG_NEWCHAIN && nft_trans_chain_update(trans)) || (trans->msg_type == NFT_MSG_DELCHAIN && - nft_is_base_chain(trans->ctx.chain)))) + nft_is_base_chain(nft_trans_chain(trans))))) return true; } @@ -1615,15 +1661,15 @@ static int nf_tables_deltable(struct sk_buff *skb, const struct nfnl_info *info, return nft_flush_table(&ctx); } -static void nf_tables_table_destroy(struct nft_ctx *ctx) +static void nf_tables_table_destroy(struct nft_table *table) { - if (WARN_ON(ctx->table->use > 0)) + if (WARN_ON(table->use > 0)) return; - rhltable_destroy(&ctx->table->chains_ht); - kfree(ctx->table->name); - kfree(ctx->table->udata); - kfree(ctx->table); + rhltable_destroy(&table->chains_ht); + kfree(table->name); + kfree(table->udata); + kfree(table); } void nft_register_chain_type(const struct nft_chain_type *ctype) @@ -2049,18 +2095,19 @@ static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr) return newstats; } -static void nft_chain_stats_replace(struct nft_trans *trans) +static void nft_chain_stats_replace(struct nft_trans_chain *trans) { - struct nft_base_chain *chain = nft_base_chain(trans->ctx.chain); + const struct nft_trans *t = &trans->nft_trans_binding.nft_trans; + struct nft_base_chain *chain = nft_base_chain(trans->chain); - if (!nft_trans_chain_stats(trans)) + if (!trans->stats) return; - nft_trans_chain_stats(trans) = - rcu_replace_pointer(chain->stats, nft_trans_chain_stats(trans), - lockdep_commit_lock_is_held(trans->ctx.net)); + trans->stats = + rcu_replace_pointer(chain->stats, trans->stats, + lockdep_commit_lock_is_held(t->net)); - if (!nft_trans_chain_stats(trans)) + if (!trans->stats) static_branch_inc(&nft_counters_enabled); } @@ -2078,9 +2125,9 @@ static void nf_tables_chain_free_chain_rules(struct nft_chain *chain) kvfree(chain->blob_next); } -void nf_tables_chain_destroy(struct nft_ctx *ctx) +void nf_tables_chain_destroy(struct nft_chain *chain) { - struct nft_chain *chain = ctx->chain; + const struct nft_table *table = chain->table; struct nft_hook *hook, *next; if (WARN_ON(chain->use > 0)) @@ -2092,7 +2139,7 @@ void nf_tables_chain_destroy(struct nft_ctx *ctx) if (nft_is_base_chain(chain)) { struct nft_base_chain *basechain = nft_base_chain(chain); - if (nft_base_chain_netdev(ctx->family, basechain->ops.hooknum)) { + if (nft_base_chain_netdev(table->family, basechain->ops.hooknum)) { list_for_each_entry_safe(hook, next, &basechain->hook_list, list) { list_del_rcu(&hook->list); @@ -2581,7 +2628,7 @@ err_chain_add: err_trans: nft_use_dec_restore(&table->use); err_destroy_chain: - nf_tables_chain_destroy(ctx); + nf_tables_chain_destroy(chain); return err; } @@ -2698,8 +2745,7 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, } err = -ENOMEM; - trans = nft_trans_alloc(ctx, NFT_MSG_NEWCHAIN, - sizeof(struct nft_trans_chain)); + trans = nft_trans_alloc_chain(ctx, NFT_MSG_NEWCHAIN); if (trans == NULL) goto err_trans; @@ -2725,7 +2771,7 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, err = -EEXIST; list_for_each_entry(tmp, &nft_net->commit_list, list) { if (tmp->msg_type == NFT_MSG_NEWCHAIN && - tmp->ctx.table == table && + tmp->table == table && nft_trans_chain_update(tmp) && nft_trans_chain_name(tmp) && strcmp(name, nft_trans_chain_name(tmp)) == 0) { @@ -2774,13 +2820,11 @@ static struct nft_chain *nft_chain_lookup_byid(const struct net *net, struct nft_trans *trans; list_for_each_entry(trans, &nft_net->commit_list, list) { - struct nft_chain *chain = trans->ctx.chain; - if (trans->msg_type == NFT_MSG_NEWCHAIN && - chain->table == table && + nft_trans_chain(trans)->table == table && id == nft_trans_chain_id(trans) && - nft_active_genmask(chain, genmask)) - return chain; + nft_active_genmask(nft_trans_chain(trans), genmask)) + return nft_trans_chain(trans); } return ERR_PTR(-ENOENT); } @@ -2915,8 +2959,7 @@ static int nft_delchain_hook(struct nft_ctx *ctx, list_move(&hook->list, &chain_del_list); } - trans = nft_trans_alloc(ctx, NFT_MSG_DELCHAIN, - sizeof(struct nft_trans_chain)); + trans = nft_trans_alloc_chain(ctx, NFT_MSG_DELCHAIN); if (!trans) { err = -ENOMEM; goto err_chain_del_hook; @@ -4200,7 +4243,7 @@ static struct nft_rule *nft_rule_lookup_byid(const struct net *net, list_for_each_entry(trans, &nft_net->commit_list, list) { if (trans->msg_type == NFT_MSG_NEWRULE && - trans->ctx.chain == chain && + nft_trans_rule_chain(trans) == chain && id == nft_trans_rule_id(trans)) return nft_trans_rule(trans); } @@ -9429,51 +9472,53 @@ static int nf_tables_validate(struct net *net) * * We defer the drop policy until the transaction has been finalized. */ -static void nft_chain_commit_drop_policy(struct nft_trans *trans) +static void nft_chain_commit_drop_policy(struct nft_trans_chain *trans) { struct nft_base_chain *basechain; - if (nft_trans_chain_policy(trans) != NF_DROP) + if (trans->policy != NF_DROP) return; - if (!nft_is_base_chain(trans->ctx.chain)) + if (!nft_is_base_chain(trans->chain)) return; - basechain = nft_base_chain(trans->ctx.chain); + basechain = nft_base_chain(trans->chain); basechain->policy = NF_DROP; } -static void nft_chain_commit_update(struct nft_trans *trans) +static void nft_chain_commit_update(struct nft_trans_chain *trans) { + struct nft_table *table = trans->nft_trans_binding.nft_trans.table; struct nft_base_chain *basechain; - if (nft_trans_chain_name(trans)) { - rhltable_remove(&trans->ctx.table->chains_ht, - &trans->ctx.chain->rhlhead, + if (trans->name) { + rhltable_remove(&table->chains_ht, + &trans->chain->rhlhead, nft_chain_ht_params); - swap(trans->ctx.chain->name, nft_trans_chain_name(trans)); - rhltable_insert_key(&trans->ctx.table->chains_ht, - trans->ctx.chain->name, - &trans->ctx.chain->rhlhead, + swap(trans->chain->name, trans->name); + rhltable_insert_key(&table->chains_ht, + trans->chain->name, + &trans->chain->rhlhead, nft_chain_ht_params); } - if (!nft_is_base_chain(trans->ctx.chain)) + if (!nft_is_base_chain(trans->chain)) return; nft_chain_stats_replace(trans); - basechain = nft_base_chain(trans->ctx.chain); + basechain = nft_base_chain(trans->chain); - switch (nft_trans_chain_policy(trans)) { + switch (trans->policy) { case NF_DROP: case NF_ACCEPT: - basechain->policy = nft_trans_chain_policy(trans); + basechain->policy = trans->policy; break; } } -static void nft_obj_commit_update(struct nft_trans *trans) +static void nft_obj_commit_update(const struct nft_ctx *ctx, + struct nft_trans *trans) { struct nft_object *newobj; struct nft_object *obj; @@ -9485,15 +9530,21 @@ static void nft_obj_commit_update(struct nft_trans *trans) return; obj->ops->update(obj, newobj); - nft_obj_destroy(&trans->ctx, newobj); + nft_obj_destroy(ctx, newobj); } static void nft_commit_release(struct nft_trans *trans) { + struct nft_ctx ctx = { + .net = trans->net, + }; + + nft_ctx_update(&ctx, trans); + switch (trans->msg_type) { case NFT_MSG_DELTABLE: case NFT_MSG_DESTROYTABLE: - nf_tables_table_destroy(&trans->ctx); + nf_tables_table_destroy(trans->table); break; case NFT_MSG_NEWCHAIN: free_percpu(nft_trans_chain_stats(trans)); @@ -9504,25 +9555,25 @@ static void nft_commit_release(struct nft_trans *trans) if (nft_trans_chain_update(trans)) nft_hooks_destroy(&nft_trans_chain_hooks(trans)); else - nf_tables_chain_destroy(&trans->ctx); + nf_tables_chain_destroy(nft_trans_chain(trans)); break; case NFT_MSG_DELRULE: case NFT_MSG_DESTROYRULE: - nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans)); + nf_tables_rule_destroy(&ctx, nft_trans_rule(trans)); break; case NFT_MSG_DELSET: case NFT_MSG_DESTROYSET: - nft_set_destroy(&trans->ctx, nft_trans_set(trans)); + nft_set_destroy(&ctx, nft_trans_set(trans)); break; case NFT_MSG_DELSETELEM: case NFT_MSG_DESTROYSETELEM: - nf_tables_set_elem_destroy(&trans->ctx, + nf_tables_set_elem_destroy(&ctx, nft_trans_elem_set(trans), nft_trans_elem_priv(trans)); break; case NFT_MSG_DELOBJ: case NFT_MSG_DESTROYOBJ: - nft_obj_destroy(&trans->ctx, nft_trans_obj(trans)); + nft_obj_destroy(&ctx, nft_trans_obj(trans)); break; case NFT_MSG_DELFLOWTABLE: case NFT_MSG_DESTROYFLOWTABLE: @@ -9534,7 +9585,7 @@ static void nft_commit_release(struct nft_trans *trans) } if (trans->put_net) - put_net(trans->ctx.net); + put_net(trans->net); kfree(trans); } @@ -9653,10 +9704,10 @@ static void nf_tables_commit_chain_prepare_cancel(struct net *net) struct nft_trans *trans, *next; list_for_each_entry_safe(trans, next, &nft_net->commit_list, list) { - struct nft_chain *chain = trans->ctx.chain; - if (trans->msg_type == NFT_MSG_NEWRULE || trans->msg_type == NFT_MSG_DELRULE) { + struct nft_chain *chain = nft_trans_rule_chain(trans); + kvfree(chain->blob_next); chain->blob_next = NULL; } @@ -10014,7 +10065,7 @@ static void nf_tables_commit_release(struct net *net) trans = list_last_entry(&nft_net->commit_list, struct nft_trans, list); - get_net(trans->ctx.net); + get_net(trans->net); WARN_ON_ONCE(trans->put_net); trans->put_net = true; @@ -10158,12 +10209,15 @@ static void nft_gc_seq_end(struct nftables_pernet *nft_net, unsigned int gc_seq) static int nf_tables_commit(struct net *net, struct sk_buff *skb) { struct nftables_pernet *nft_net = nft_pernet(net); + const struct nlmsghdr *nlh = nlmsg_hdr(skb); + struct nft_trans_binding *trans_binding; struct nft_trans *trans, *next; unsigned int base_seq, gc_seq; LIST_HEAD(set_update_list); struct nft_trans_elem *te; struct nft_chain *chain; struct nft_table *table; + struct nft_ctx ctx; LIST_HEAD(adl); int err; @@ -10172,7 +10226,10 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) return 0; } - list_for_each_entry(trans, &nft_net->binding_list, binding_list) { + nft_ctx_init(&ctx, net, skb, nlh, NFPROTO_UNSPEC, NULL, NULL, NULL); + + list_for_each_entry(trans_binding, &nft_net->binding_list, binding_list) { + trans = &trans_binding->nft_trans; switch (trans->msg_type) { case NFT_MSG_NEWSET: if (!nft_trans_set_update(trans) && @@ -10190,6 +10247,9 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) return -EINVAL; } break; + default: + WARN_ONCE(1, "Unhandled bind type %d", trans->msg_type); + break; } } @@ -10205,9 +10265,10 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) /* 1. Allocate space for next generation rules_gen_X[] */ list_for_each_entry_safe(trans, next, &nft_net->commit_list, list) { + struct nft_table *table = trans->table; int ret; - ret = nf_tables_commit_audit_alloc(&adl, trans->ctx.table); + ret = nf_tables_commit_audit_alloc(&adl, table); if (ret) { nf_tables_commit_chain_prepare_cancel(net); nf_tables_commit_audit_free(&adl); @@ -10215,7 +10276,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) } if (trans->msg_type == NFT_MSG_NEWRULE || trans->msg_type == NFT_MSG_DELRULE) { - chain = trans->ctx.chain; + chain = nft_trans_rule_chain(trans); ret = nf_tables_commit_chain_prepare(net, chain); if (ret < 0) { @@ -10248,70 +10309,71 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) net->nft.gencursor = nft_gencursor_next(net); list_for_each_entry_safe(trans, next, &nft_net->commit_list, list) { - nf_tables_commit_audit_collect(&adl, trans->ctx.table, - trans->msg_type); + struct nft_table *table = trans->table; + + nft_ctx_update(&ctx, trans); + + nf_tables_commit_audit_collect(&adl, table, trans->msg_type); switch (trans->msg_type) { case NFT_MSG_NEWTABLE: if (nft_trans_table_update(trans)) { - if (!(trans->ctx.table->flags & __NFT_TABLE_F_UPDATE)) { + if (!(table->flags & __NFT_TABLE_F_UPDATE)) { nft_trans_destroy(trans); break; } - if (trans->ctx.table->flags & NFT_TABLE_F_DORMANT) - nf_tables_table_disable(net, trans->ctx.table); + if (table->flags & NFT_TABLE_F_DORMANT) + nf_tables_table_disable(net, table); - trans->ctx.table->flags &= ~__NFT_TABLE_F_UPDATE; + table->flags &= ~__NFT_TABLE_F_UPDATE; } else { - nft_clear(net, trans->ctx.table); + nft_clear(net, table); } - nf_tables_table_notify(&trans->ctx, NFT_MSG_NEWTABLE); + nf_tables_table_notify(&ctx, NFT_MSG_NEWTABLE); nft_trans_destroy(trans); break; case NFT_MSG_DELTABLE: case NFT_MSG_DESTROYTABLE: - list_del_rcu(&trans->ctx.table->list); - nf_tables_table_notify(&trans->ctx, trans->msg_type); + list_del_rcu(&table->list); + nf_tables_table_notify(&ctx, trans->msg_type); break; case NFT_MSG_NEWCHAIN: if (nft_trans_chain_update(trans)) { - nft_chain_commit_update(trans); - nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN, + nft_chain_commit_update(nft_trans_container_chain(trans)); + nf_tables_chain_notify(&ctx, NFT_MSG_NEWCHAIN, &nft_trans_chain_hooks(trans)); list_splice(&nft_trans_chain_hooks(trans), &nft_trans_basechain(trans)->hook_list); /* trans destroyed after rcu grace period */ } else { - nft_chain_commit_drop_policy(trans); - nft_clear(net, trans->ctx.chain); - nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN, NULL); + nft_chain_commit_drop_policy(nft_trans_container_chain(trans)); + nft_clear(net, nft_trans_chain(trans)); + nf_tables_chain_notify(&ctx, NFT_MSG_NEWCHAIN, NULL); nft_trans_destroy(trans); } break; case NFT_MSG_DELCHAIN: case NFT_MSG_DESTROYCHAIN: if (nft_trans_chain_update(trans)) { - nf_tables_chain_notify(&trans->ctx, NFT_MSG_DELCHAIN, + nf_tables_chain_notify(&ctx, NFT_MSG_DELCHAIN, &nft_trans_chain_hooks(trans)); - if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT)) { + if (!(table->flags & NFT_TABLE_F_DORMANT)) { nft_netdev_unregister_hooks(net, &nft_trans_chain_hooks(trans), true); } } else { - nft_chain_del(trans->ctx.chain); - nf_tables_chain_notify(&trans->ctx, NFT_MSG_DELCHAIN, + nft_chain_del(nft_trans_chain(trans)); + nf_tables_chain_notify(&ctx, NFT_MSG_DELCHAIN, NULL); - nf_tables_unregister_hook(trans->ctx.net, - trans->ctx.table, - trans->ctx.chain); + nf_tables_unregister_hook(ctx.net, ctx.table, + nft_trans_chain(trans)); } break; case NFT_MSG_NEWRULE: - nft_clear(trans->ctx.net, nft_trans_rule(trans)); - nf_tables_rule_notify(&trans->ctx, - nft_trans_rule(trans), + nft_clear(net, nft_trans_rule(trans)); + nf_tables_rule_notify(&ctx, nft_trans_rule(trans), NFT_MSG_NEWRULE); - if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) + if (nft_trans_rule_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD) nft_flow_rule_destroy(nft_trans_flow_rule(trans)); nft_trans_destroy(trans); @@ -10319,14 +10381,12 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) case NFT_MSG_DELRULE: case NFT_MSG_DESTROYRULE: list_del_rcu(&nft_trans_rule(trans)->list); - nf_tables_rule_notify(&trans->ctx, - nft_trans_rule(trans), + nf_tables_rule_notify(&ctx, nft_trans_rule(trans), trans->msg_type); - nft_rule_expr_deactivate(&trans->ctx, - nft_trans_rule(trans), + nft_rule_expr_deactivate(&ctx, nft_trans_rule(trans), NFT_TRANS_COMMIT); - if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) + if (nft_trans_rule_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD) nft_flow_rule_destroy(nft_trans_flow_rule(trans)); break; case NFT_MSG_NEWSET: @@ -10345,9 +10405,9 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) */ if (nft_set_is_anonymous(nft_trans_set(trans)) && !list_empty(&nft_trans_set(trans)->bindings)) - nft_use_dec(&trans->ctx.table->use); + nft_use_dec(&table->use); } - nf_tables_set_notify(&trans->ctx, nft_trans_set(trans), + nf_tables_set_notify(&ctx, nft_trans_set(trans), NFT_MSG_NEWSET, GFP_KERNEL); nft_trans_destroy(trans); break; @@ -10355,14 +10415,14 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) case NFT_MSG_DESTROYSET: nft_trans_set(trans)->dead = 1; list_del_rcu(&nft_trans_set(trans)->list); - nf_tables_set_notify(&trans->ctx, nft_trans_set(trans), + nf_tables_set_notify(&ctx, nft_trans_set(trans), trans->msg_type, GFP_KERNEL); break; case NFT_MSG_NEWSETELEM: - te = (struct nft_trans_elem *)trans->data; + te = nft_trans_container_elem(trans); nft_setelem_activate(net, te->set, te->elem_priv); - nf_tables_setelem_notify(&trans->ctx, te->set, + nf_tables_setelem_notify(&ctx, te->set, te->elem_priv, NFT_MSG_NEWSETELEM); if (te->set->ops->commit && @@ -10374,9 +10434,9 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) break; case NFT_MSG_DELSETELEM: case NFT_MSG_DESTROYSETELEM: - te = (struct nft_trans_elem *)trans->data; + te = nft_trans_container_elem(trans); - nf_tables_setelem_notify(&trans->ctx, te->set, + nf_tables_setelem_notify(&ctx, te->set, te->elem_priv, trans->msg_type); nft_setelem_remove(net, te->set, te->elem_priv); @@ -10392,13 +10452,13 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) break; case NFT_MSG_NEWOBJ: if (nft_trans_obj_update(trans)) { - nft_obj_commit_update(trans); - nf_tables_obj_notify(&trans->ctx, + nft_obj_commit_update(&ctx, trans); + nf_tables_obj_notify(&ctx, nft_trans_obj(trans), NFT_MSG_NEWOBJ); } else { nft_clear(net, nft_trans_obj(trans)); - nf_tables_obj_notify(&trans->ctx, + nf_tables_obj_notify(&ctx, nft_trans_obj(trans), NFT_MSG_NEWOBJ); nft_trans_destroy(trans); @@ -10407,14 +10467,14 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) case NFT_MSG_DELOBJ: case NFT_MSG_DESTROYOBJ: nft_obj_del(nft_trans_obj(trans)); - nf_tables_obj_notify(&trans->ctx, nft_trans_obj(trans), + nf_tables_obj_notify(&ctx, nft_trans_obj(trans), trans->msg_type); break; case NFT_MSG_NEWFLOWTABLE: if (nft_trans_flowtable_update(trans)) { nft_trans_flowtable(trans)->data.flags = nft_trans_flowtable_flags(trans); - nf_tables_flowtable_notify(&trans->ctx, + nf_tables_flowtable_notify(&ctx, nft_trans_flowtable(trans), &nft_trans_flowtable_hooks(trans), NFT_MSG_NEWFLOWTABLE); @@ -10422,7 +10482,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) &nft_trans_flowtable(trans)->hook_list); } else { nft_clear(net, nft_trans_flowtable(trans)); - nf_tables_flowtable_notify(&trans->ctx, + nf_tables_flowtable_notify(&ctx, nft_trans_flowtable(trans), NULL, NFT_MSG_NEWFLOWTABLE); @@ -10432,7 +10492,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) case NFT_MSG_DELFLOWTABLE: case NFT_MSG_DESTROYFLOWTABLE: if (nft_trans_flowtable_update(trans)) { - nf_tables_flowtable_notify(&trans->ctx, + nf_tables_flowtable_notify(&ctx, nft_trans_flowtable(trans), &nft_trans_flowtable_hooks(trans), trans->msg_type); @@ -10440,7 +10500,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) &nft_trans_flowtable_hooks(trans)); } else { list_del_rcu(&nft_trans_flowtable(trans)->list); - nf_tables_flowtable_notify(&trans->ctx, + nf_tables_flowtable_notify(&ctx, nft_trans_flowtable(trans), NULL, trans->msg_type); @@ -10482,28 +10542,32 @@ static void nf_tables_module_autoload(struct net *net) static void nf_tables_abort_release(struct nft_trans *trans) { + struct nft_ctx ctx = { }; + + nft_ctx_update(&ctx, trans); + switch (trans->msg_type) { case NFT_MSG_NEWTABLE: - nf_tables_table_destroy(&trans->ctx); + nf_tables_table_destroy(trans->table); break; case NFT_MSG_NEWCHAIN: if (nft_trans_chain_update(trans)) nft_hooks_destroy(&nft_trans_chain_hooks(trans)); else - nf_tables_chain_destroy(&trans->ctx); + nf_tables_chain_destroy(nft_trans_chain(trans)); break; case NFT_MSG_NEWRULE: - nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans)); + nf_tables_rule_destroy(&ctx, nft_trans_rule(trans)); break; case NFT_MSG_NEWSET: - nft_set_destroy(&trans->ctx, nft_trans_set(trans)); + nft_set_destroy(&ctx, nft_trans_set(trans)); break; case NFT_MSG_NEWSETELEM: nft_set_elem_destroy(nft_trans_elem_set(trans), nft_trans_elem_priv(trans), true); break; case NFT_MSG_NEWOBJ: - nft_obj_destroy(&trans->ctx, nft_trans_obj(trans)); + nft_obj_destroy(&ctx, nft_trans_obj(trans)); break; case NFT_MSG_NEWFLOWTABLE: if (nft_trans_flowtable_update(trans)) @@ -10535,6 +10599,9 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) struct nft_trans *trans, *next; LIST_HEAD(set_update_list); struct nft_trans_elem *te; + struct nft_ctx ctx = { + .net = net, + }; int err = 0; if (action == NFNL_ABORT_VALIDATE && @@ -10543,37 +10610,41 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) list_for_each_entry_safe_reverse(trans, next, &nft_net->commit_list, list) { + struct nft_table *table = trans->table; + + nft_ctx_update(&ctx, trans); + switch (trans->msg_type) { case NFT_MSG_NEWTABLE: if (nft_trans_table_update(trans)) { - if (!(trans->ctx.table->flags & __NFT_TABLE_F_UPDATE)) { + if (!(table->flags & __NFT_TABLE_F_UPDATE)) { nft_trans_destroy(trans); break; } - if (trans->ctx.table->flags & __NFT_TABLE_F_WAS_DORMANT) { - nf_tables_table_disable(net, trans->ctx.table); - trans->ctx.table->flags |= NFT_TABLE_F_DORMANT; - } else if (trans->ctx.table->flags & __NFT_TABLE_F_WAS_AWAKEN) { - trans->ctx.table->flags &= ~NFT_TABLE_F_DORMANT; + if (table->flags & __NFT_TABLE_F_WAS_DORMANT) { + nf_tables_table_disable(net, table); + table->flags |= NFT_TABLE_F_DORMANT; + } else if (table->flags & __NFT_TABLE_F_WAS_AWAKEN) { + table->flags &= ~NFT_TABLE_F_DORMANT; } - if (trans->ctx.table->flags & __NFT_TABLE_F_WAS_ORPHAN) { - trans->ctx.table->flags &= ~NFT_TABLE_F_OWNER; - trans->ctx.table->nlpid = 0; + if (table->flags & __NFT_TABLE_F_WAS_ORPHAN) { + table->flags &= ~NFT_TABLE_F_OWNER; + table->nlpid = 0; } - trans->ctx.table->flags &= ~__NFT_TABLE_F_UPDATE; + table->flags &= ~__NFT_TABLE_F_UPDATE; nft_trans_destroy(trans); } else { - list_del_rcu(&trans->ctx.table->list); + list_del_rcu(&table->list); } break; case NFT_MSG_DELTABLE: case NFT_MSG_DESTROYTABLE: - nft_clear(trans->ctx.net, trans->ctx.table); + nft_clear(trans->net, table); nft_trans_destroy(trans); break; case NFT_MSG_NEWCHAIN: if (nft_trans_chain_update(trans)) { - if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT)) { + if (!(table->flags & NFT_TABLE_F_DORMANT)) { nft_netdev_unregister_hooks(net, &nft_trans_chain_hooks(trans), true); @@ -10586,11 +10657,10 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) nft_trans_destroy(trans); break; } - nft_use_dec_restore(&trans->ctx.table->use); - nft_chain_del(trans->ctx.chain); - nf_tables_unregister_hook(trans->ctx.net, - trans->ctx.table, - trans->ctx.chain); + nft_use_dec_restore(&table->use); + nft_chain_del(nft_trans_chain(trans)); + nf_tables_unregister_hook(trans->net, table, + nft_trans_chain(trans)); } break; case NFT_MSG_DELCHAIN: @@ -10599,8 +10669,8 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) list_splice(&nft_trans_chain_hooks(trans), &nft_trans_basechain(trans)->hook_list); } else { - nft_use_inc_restore(&trans->ctx.table->use); - nft_clear(trans->ctx.net, trans->ctx.chain); + nft_use_inc_restore(&table->use); + nft_clear(trans->net, nft_trans_chain(trans)); } nft_trans_destroy(trans); break; @@ -10609,20 +10679,20 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) nft_trans_destroy(trans); break; } - nft_use_dec_restore(&trans->ctx.chain->use); + nft_use_dec_restore(&nft_trans_rule_chain(trans)->use); list_del_rcu(&nft_trans_rule(trans)->list); - nft_rule_expr_deactivate(&trans->ctx, + nft_rule_expr_deactivate(&ctx, nft_trans_rule(trans), NFT_TRANS_ABORT); - if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) + if (nft_trans_rule_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD) nft_flow_rule_destroy(nft_trans_flow_rule(trans)); break; case NFT_MSG_DELRULE: case NFT_MSG_DESTROYRULE: - nft_use_inc_restore(&trans->ctx.chain->use); - nft_clear(trans->ctx.net, nft_trans_rule(trans)); - nft_rule_expr_activate(&trans->ctx, nft_trans_rule(trans)); - if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) + nft_use_inc_restore(&nft_trans_rule_chain(trans)->use); + nft_clear(trans->net, nft_trans_rule(trans)); + nft_rule_expr_activate(&ctx, nft_trans_rule(trans)); + if (nft_trans_rule_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD) nft_flow_rule_destroy(nft_trans_flow_rule(trans)); nft_trans_destroy(trans); @@ -10632,7 +10702,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) nft_trans_destroy(trans); break; } - nft_use_dec_restore(&trans->ctx.table->use); + nft_use_dec_restore(&table->use); if (nft_trans_set_bound(trans)) { nft_trans_destroy(trans); break; @@ -10642,10 +10712,10 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) break; case NFT_MSG_DELSET: case NFT_MSG_DESTROYSET: - nft_use_inc_restore(&trans->ctx.table->use); - nft_clear(trans->ctx.net, nft_trans_set(trans)); + nft_use_inc_restore(&table->use); + nft_clear(trans->net, nft_trans_set(trans)); if (nft_trans_set(trans)->flags & (NFT_SET_MAP | NFT_SET_OBJECT)) - nft_map_activate(&trans->ctx, nft_trans_set(trans)); + nft_map_activate(&ctx, nft_trans_set(trans)); nft_trans_destroy(trans); break; @@ -10654,7 +10724,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) nft_trans_destroy(trans); break; } - te = (struct nft_trans_elem *)trans->data; + te = nft_trans_container_elem(trans); nft_setelem_remove(net, te->set, te->elem_priv); if (!nft_setelem_is_catchall(te->set, te->elem_priv)) atomic_dec(&te->set->nelems); @@ -10667,7 +10737,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) break; case NFT_MSG_DELSETELEM: case NFT_MSG_DESTROYSETELEM: - te = (struct nft_trans_elem *)trans->data; + te = nft_trans_container_elem(trans); if (!nft_setelem_active_next(net, te->set, te->elem_priv)) { nft_setelem_data_activate(net, te->set, te->elem_priv); @@ -10685,17 +10755,17 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) break; case NFT_MSG_NEWOBJ: if (nft_trans_obj_update(trans)) { - nft_obj_destroy(&trans->ctx, nft_trans_obj_newobj(trans)); + nft_obj_destroy(&ctx, nft_trans_obj_newobj(trans)); nft_trans_destroy(trans); } else { - nft_use_dec_restore(&trans->ctx.table->use); + nft_use_dec_restore(&table->use); nft_obj_del(nft_trans_obj(trans)); } break; case NFT_MSG_DELOBJ: case NFT_MSG_DESTROYOBJ: - nft_use_inc_restore(&trans->ctx.table->use); - nft_clear(trans->ctx.net, nft_trans_obj(trans)); + nft_use_inc_restore(&table->use); + nft_clear(trans->net, nft_trans_obj(trans)); nft_trans_destroy(trans); break; case NFT_MSG_NEWFLOWTABLE: @@ -10703,7 +10773,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) nft_unregister_flowtable_net_hooks(net, &nft_trans_flowtable_hooks(trans)); } else { - nft_use_dec_restore(&trans->ctx.table->use); + nft_use_dec_restore(&table->use); list_del_rcu(&nft_trans_flowtable(trans)->list); nft_unregister_flowtable_net_hooks(net, &nft_trans_flowtable(trans)->hook_list); @@ -10715,8 +10785,8 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) list_splice(&nft_trans_flowtable_hooks(trans), &nft_trans_flowtable(trans)->hook_list); } else { - nft_use_inc_restore(&trans->ctx.table->use); - nft_clear(trans->ctx.net, nft_trans_flowtable(trans)); + nft_use_inc_restore(&table->use); + nft_clear(trans->net, nft_trans_flowtable(trans)); } nft_trans_destroy(trans); break; @@ -11233,7 +11303,7 @@ int __nft_release_basechain(struct nft_ctx *ctx) } nft_chain_del(ctx->chain); nft_use_dec(&ctx->table->use); - nf_tables_chain_destroy(ctx); + nf_tables_chain_destroy(ctx->chain); return 0; } @@ -11308,12 +11378,11 @@ static void __nft_release_table(struct net *net, struct nft_table *table) nft_obj_destroy(&ctx, obj); } list_for_each_entry_safe(chain, nc, &table->chains, list) { - ctx.chain = chain; nft_chain_del(chain); nft_use_dec(&table->use); - nf_tables_chain_destroy(&ctx); + nf_tables_chain_destroy(chain); } - nf_tables_table_destroy(&ctx); + nf_tables_table_destroy(table); } static void __nft_release_tables(struct net *net) @@ -11455,6 +11524,14 @@ static int __init nf_tables_module_init(void) { int err; + BUILD_BUG_ON(offsetof(struct nft_trans_table, nft_trans) != 0); + BUILD_BUG_ON(offsetof(struct nft_trans_chain, nft_trans_binding.nft_trans) != 0); + BUILD_BUG_ON(offsetof(struct nft_trans_rule, nft_trans) != 0); + BUILD_BUG_ON(offsetof(struct nft_trans_set, nft_trans_binding.nft_trans) != 0); + BUILD_BUG_ON(offsetof(struct nft_trans_elem, nft_trans) != 0); + BUILD_BUG_ON(offsetof(struct nft_trans_obj, nft_trans) != 0); + BUILD_BUG_ON(offsetof(struct nft_trans_flowtable, nft_trans) != 0); + err = register_pernet_subsys(&nf_tables_net_ops); if (err < 0) return err; diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c index 12ab78fa5d84..64675f1c7f29 100644 --- a/net/netfilter/nf_tables_offload.c +++ b/net/netfilter/nf_tables_offload.c @@ -513,38 +513,38 @@ static void nft_flow_rule_offload_abort(struct net *net, int err = 0; list_for_each_entry_continue_reverse(trans, &nft_net->commit_list, list) { - if (trans->ctx.family != NFPROTO_NETDEV) + if (trans->table->family != NFPROTO_NETDEV) continue; switch (trans->msg_type) { case NFT_MSG_NEWCHAIN: - if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) || + if (!(nft_trans_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD) || nft_trans_chain_update(trans)) continue; - err = nft_flow_offload_chain(trans->ctx.chain, NULL, + err = nft_flow_offload_chain(nft_trans_chain(trans), NULL, FLOW_BLOCK_UNBIND); break; case NFT_MSG_DELCHAIN: - if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)) + if (!(nft_trans_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD)) continue; - err = nft_flow_offload_chain(trans->ctx.chain, NULL, + err = nft_flow_offload_chain(nft_trans_chain(trans), NULL, FLOW_BLOCK_BIND); break; case NFT_MSG_NEWRULE: - if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)) + if (!(nft_trans_rule_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD)) continue; - err = nft_flow_offload_rule(trans->ctx.chain, + err = nft_flow_offload_rule(nft_trans_rule_chain(trans), nft_trans_rule(trans), NULL, FLOW_CLS_DESTROY); break; case NFT_MSG_DELRULE: - if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)) + if (!(nft_trans_rule_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD)) continue; - err = nft_flow_offload_rule(trans->ctx.chain, + err = nft_flow_offload_rule(nft_trans_rule_chain(trans), nft_trans_rule(trans), nft_trans_flow_rule(trans), FLOW_CLS_REPLACE); @@ -564,46 +564,46 @@ int nft_flow_rule_offload_commit(struct net *net) u8 policy; list_for_each_entry(trans, &nft_net->commit_list, list) { - if (trans->ctx.family != NFPROTO_NETDEV) + if (trans->table->family != NFPROTO_NETDEV) continue; switch (trans->msg_type) { case NFT_MSG_NEWCHAIN: - if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) || + if (!(nft_trans_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD) || nft_trans_chain_update(trans)) continue; policy = nft_trans_chain_policy(trans); - err = nft_flow_offload_chain(trans->ctx.chain, &policy, + err = nft_flow_offload_chain(nft_trans_chain(trans), &policy, FLOW_BLOCK_BIND); break; case NFT_MSG_DELCHAIN: - if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)) + if (!(nft_trans_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD)) continue; policy = nft_trans_chain_policy(trans); - err = nft_flow_offload_chain(trans->ctx.chain, &policy, + err = nft_flow_offload_chain(nft_trans_chain(trans), &policy, FLOW_BLOCK_UNBIND); break; case NFT_MSG_NEWRULE: - if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)) + if (!(nft_trans_rule_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD)) continue; - if (trans->ctx.flags & NLM_F_REPLACE || - !(trans->ctx.flags & NLM_F_APPEND)) { + if (trans->flags & NLM_F_REPLACE || + !(trans->flags & NLM_F_APPEND)) { err = -EOPNOTSUPP; break; } - err = nft_flow_offload_rule(trans->ctx.chain, + err = nft_flow_offload_rule(nft_trans_rule_chain(trans), nft_trans_rule(trans), nft_trans_flow_rule(trans), FLOW_CLS_REPLACE); break; case NFT_MSG_DELRULE: - if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD)) + if (!(nft_trans_rule_chain(trans)->flags & NFT_CHAIN_HW_OFFLOAD)) continue; - err = nft_flow_offload_rule(trans->ctx.chain, + err = nft_flow_offload_rule(nft_trans_rule_chain(trans), nft_trans_rule(trans), NULL, FLOW_CLS_DESTROY); break; diff --git a/net/netfilter/nf_tables_trace.c b/net/netfilter/nf_tables_trace.c index a83637e3f455..580c55268f65 100644 --- a/net/netfilter/nf_tables_trace.c +++ b/net/netfilter/nf_tables_trace.c @@ -317,7 +317,7 @@ void nft_trace_init(struct nft_traceinfo *info, const struct nft_pktinfo *pkt, net_get_random_once(&trace_key, sizeof(trace_key)); info->skbid = (u32)siphash_3u32(hash32_ptr(skb), - skb_get_hash(skb), + skb_get_hash_net(nft_net(pkt), skb), skb->skb_iif, &trace_key); } diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c index f466af4f8531..eab4f476b47f 100644 --- a/net/netfilter/nfnetlink_cttimeout.c +++ b/net/netfilter/nfnetlink_cttimeout.c @@ -366,8 +366,7 @@ static int cttimeout_default_set(struct sk_buff *skb, __u8 l4num; int ret; - if (!cda[CTA_TIMEOUT_L3PROTO] || - !cda[CTA_TIMEOUT_L4PROTO] || + if (!cda[CTA_TIMEOUT_L4PROTO] || !cda[CTA_TIMEOUT_DATA]) return -EINVAL; diff --git a/net/netfilter/nft_hash.c b/net/netfilter/nft_hash.c index 92d47e469204..868d68302d22 100644 --- a/net/netfilter/nft_hash.c +++ b/net/netfilter/nft_hash.c @@ -51,7 +51,8 @@ static void nft_symhash_eval(const struct nft_expr *expr, struct sk_buff *skb = pkt->skb; u32 h; - h = reciprocal_scale(__skb_get_hash_symmetric(skb), priv->modulus); + h = reciprocal_scale(__skb_get_hash_symmetric_net(nft_net(pkt), skb), + priv->modulus); regs->data[priv->dreg] = h + priv->offset; } diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c index 6475c7abc1fe..ac2422c215e5 100644 --- a/net/netfilter/nft_immediate.c +++ b/net/netfilter/nft_immediate.c @@ -221,7 +221,7 @@ static void nft_immediate_destroy(const struct nft_ctx *ctx, list_del(&rule->list); nf_tables_rule_destroy(&chain_ctx, rule); } - nf_tables_chain_destroy(&chain_ctx); + nf_tables_chain_destroy(chain); break; default: break; diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c index ef93e0d3bee0..588a5e6ad899 100644 --- a/net/netfilter/xt_recent.c +++ b/net/netfilter/xt_recent.c @@ -59,9 +59,9 @@ MODULE_PARM_DESC(ip_list_gid, "default owning group of /proc/net/xt_recent/* fil /* retained for backwards compatibility */ static unsigned int ip_pkt_list_tot __read_mostly; module_param(ip_pkt_list_tot, uint, 0400); -MODULE_PARM_DESC(ip_pkt_list_tot, "number of packets per IP address to remember (max. 255)"); +MODULE_PARM_DESC(ip_pkt_list_tot, "number of packets per IP address to remember (max. 65535)"); -#define XT_RECENT_MAX_NSTAMPS 256 +#define XT_RECENT_MAX_NSTAMPS 65536 struct recent_entry { struct list_head list; @@ -69,7 +69,7 @@ struct recent_entry { union nf_inet_addr addr; u_int16_t family; u_int8_t ttl; - u_int8_t index; + u_int16_t index; u_int16_t nstamps; unsigned long stamps[]; }; @@ -80,7 +80,7 @@ struct recent_table { union nf_inet_addr mask; unsigned int refcnt; unsigned int entries; - u8 nstamps_max_mask; + u_int16_t nstamps_max_mask; struct list_head lru_list; struct list_head iphash[]; }; diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index fa9c090cf629..0b7a89db3ab7 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -636,8 +636,7 @@ static struct proto netlink_proto = { }; static int __netlink_create(struct net *net, struct socket *sock, - struct mutex *dump_cb_mutex, int protocol, - int kern) + int protocol, int kern) { struct sock *sk; struct netlink_sock *nlk; @@ -655,7 +654,6 @@ static int __netlink_create(struct net *net, struct socket *sock, lockdep_set_class_and_name(&nlk->nl_cb_mutex, nlk_cb_mutex_keys + protocol, nlk_cb_mutex_key_strings[protocol]); - nlk->dump_cb_mutex = dump_cb_mutex; init_waitqueue_head(&nlk->wait); sk->sk_destruct = netlink_sock_destruct; @@ -667,7 +665,6 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol, int kern) { struct module *module = NULL; - struct mutex *cb_mutex; struct netlink_sock *nlk; int (*bind)(struct net *net, int group); void (*unbind)(struct net *net, int group); @@ -696,7 +693,6 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol, module = nl_table[protocol].module; else err = -EPROTONOSUPPORT; - cb_mutex = nl_table[protocol].cb_mutex; bind = nl_table[protocol].bind; unbind = nl_table[protocol].unbind; release = nl_table[protocol].release; @@ -705,7 +701,7 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol, if (err < 0) goto out; - err = __netlink_create(net, sock, cb_mutex, protocol, kern); + err = __netlink_create(net, sock, protocol, kern); if (err < 0) goto out_module; @@ -2016,7 +2012,6 @@ __netlink_kernel_create(struct net *net, int unit, struct module *module, struct sock *sk; struct netlink_sock *nlk; struct listeners *listeners = NULL; - struct mutex *cb_mutex = cfg ? cfg->cb_mutex : NULL; unsigned int groups; BUG_ON(!nl_table); @@ -2027,7 +2022,7 @@ __netlink_kernel_create(struct net *net, int unit, struct module *module, if (sock_create_lite(PF_NETLINK, SOCK_DGRAM, unit, &sock)) return NULL; - if (__netlink_create(net, sock, cb_mutex, unit, 1) < 0) + if (__netlink_create(net, sock, unit, 1) < 0) goto out_sock_release_nosk; sk = sock->sk; @@ -2055,7 +2050,6 @@ __netlink_kernel_create(struct net *net, int unit, struct module *module, if (!nl_table[unit].registered) { nl_table[unit].groups = groups; rcu_assign_pointer(nl_table[unit].listeners, listeners); - nl_table[unit].cb_mutex = cb_mutex; nl_table[unit].module = module; if (cfg) { nl_table[unit].bind = cfg->bind; @@ -2326,17 +2320,9 @@ static int netlink_dump(struct sock *sk, bool lock_taken) netlink_skb_set_owner_r(skb, sk); if (nlk->dump_done_errno > 0) { - struct mutex *extra_mutex = nlk->dump_cb_mutex; - cb->extack = &extack; - if (cb->flags & RTNL_FLAG_DUMP_UNLOCKED) - extra_mutex = NULL; - if (extra_mutex) - mutex_lock(extra_mutex); nlk->dump_done_errno = cb->dump(skb, cb); - if (extra_mutex) - mutex_unlock(extra_mutex); /* EMSGSIZE plus something already in the skb means * that there's more to dump but current skb has filled up. diff --git a/net/openvswitch/Kconfig b/net/openvswitch/Kconfig index 29a7081858cd..2535f3f9f462 100644 --- a/net/openvswitch/Kconfig +++ b/net/openvswitch/Kconfig @@ -10,6 +10,7 @@ config OPENVSWITCH (NF_CONNTRACK && ((!NF_DEFRAG_IPV6 || NF_DEFRAG_IPV6) && \ (!NF_NAT || NF_NAT) && \ (!NETFILTER_CONNCOUNT || NETFILTER_CONNCOUNT))) + depends on PSAMPLE || !PSAMPLE select LIBCRC32C select MPLS select NET_MPLS_GSO diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index 964225580824..101f9a23792c 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -24,6 +24,11 @@ #include #include #include + +#if IS_ENABLED(CONFIG_PSAMPLE) +#include +#endif + #include #include "datapath.h" @@ -1043,12 +1048,15 @@ static int sample(struct datapath *dp, struct sk_buff *skb, struct nlattr *sample_arg; int rem = nla_len(attr); const struct sample_arg *arg; + u32 init_probability; bool clone_flow_key; + int err; /* The first action is always 'OVS_SAMPLE_ATTR_ARG'. */ sample_arg = nla_data(attr); arg = nla_data(sample_arg); actions = nla_next(sample_arg, &rem); + init_probability = OVS_CB(skb)->probability; if ((arg->probability != U32_MAX) && (!arg->probability || get_random_u32() > arg->probability)) { @@ -1057,9 +1065,16 @@ static int sample(struct datapath *dp, struct sk_buff *skb, return 0; } + OVS_CB(skb)->probability = arg->probability; + clone_flow_key = !arg->exec; - return clone_execute(dp, skb, key, 0, actions, rem, last, - clone_flow_key); + err = clone_execute(dp, skb, key, 0, actions, rem, last, + clone_flow_key); + + if (!last) + OVS_CB(skb)->probability = init_probability; + + return err; } /* When 'last' is true, clone() should always consume the 'skb'. @@ -1299,6 +1314,44 @@ static int execute_dec_ttl(struct sk_buff *skb, struct sw_flow_key *key) return 0; } +#if IS_ENABLED(CONFIG_PSAMPLE) +static void execute_psample(struct datapath *dp, struct sk_buff *skb, + const struct nlattr *attr) +{ + struct psample_group psample_group = {}; + struct psample_metadata md = {}; + const struct nlattr *a; + u32 rate; + int rem; + + nla_for_each_attr(a, nla_data(attr), nla_len(attr), rem) { + switch (nla_type(a)) { + case OVS_PSAMPLE_ATTR_GROUP: + psample_group.group_num = nla_get_u32(a); + break; + + case OVS_PSAMPLE_ATTR_COOKIE: + md.user_cookie = nla_data(a); + md.user_cookie_len = nla_len(a); + break; + } + } + + psample_group.net = ovs_dp_get_net(dp); + md.in_ifindex = OVS_CB(skb)->input_vport->dev->ifindex; + md.trunc_size = skb->len - OVS_CB(skb)->cutlen; + md.rate_as_probability = 1; + + rate = OVS_CB(skb)->probability ? OVS_CB(skb)->probability : U32_MAX; + + psample_sample_packet(&psample_group, skb, rate, &md); +} +#else +static void execute_psample(struct datapath *dp, struct sk_buff *skb, + const struct nlattr *attr) +{} +#endif + /* Execute a list of actions against 'skb'. */ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, struct sw_flow_key *key, @@ -1502,6 +1555,15 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb, ovs_kfree_skb_reason(skb, reason); return 0; } + + case OVS_ACTION_ATTR_PSAMPLE: + execute_psample(dp, skb, a); + OVS_CB(skb)->cutlen = 0; + if (nla_is_last(a, rem)) { + consume_skb(skb); + return 0; + } + break; } if (unlikely(err)) { diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c index 3b980bf2770b..8eb1d644b741 100644 --- a/net/openvswitch/conntrack.c +++ b/net/openvswitch/conntrack.c @@ -679,6 +679,8 @@ static int ovs_ct_nat(struct net *net, struct sw_flow_key *key, action |= BIT(NF_NAT_MANIP_DST); err = nf_ct_nat(skb, ct, ctinfo, &action, &info->range, info->commit); + if (err != NF_ACCEPT) + return err; if (action & BIT(NF_NAT_MANIP_SRC)) ovs_nat_update_key(key, skb, NF_NAT_MANIP_SRC); @@ -697,6 +699,22 @@ static int ovs_ct_nat(struct net *net, struct sw_flow_key *key, } #endif +static int verdict_to_errno(unsigned int verdict) +{ + switch (verdict & NF_VERDICT_MASK) { + case NF_ACCEPT: + return 0; + case NF_DROP: + return -EINVAL; + case NF_STOLEN: + return -EINPROGRESS; + default: + break; + } + + return -EINVAL; +} + /* Pass 'skb' through conntrack in 'net', using zone configured in 'info', if * not done already. Update key with new CT state after passing the packet * through conntrack. @@ -735,7 +753,7 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key, err = nf_conntrack_in(skb, &state); if (err != NF_ACCEPT) - return -ENOENT; + return verdict_to_errno(err); /* Clear CT state NAT flags to mark that we have not yet done * NAT after the nf_conntrack_in() call. We can actually clear @@ -762,9 +780,12 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key, * the key->ct_state. */ if (info->nat && !(key->ct_state & OVS_CS_F_NAT_MASK) && - (nf_ct_is_confirmed(ct) || info->commit) && - ovs_ct_nat(net, key, info, skb, ct, ctinfo) != NF_ACCEPT) { - return -EINVAL; + (nf_ct_is_confirmed(ct) || info->commit)) { + int err = ovs_ct_nat(net, key, info, skb, ct, ctinfo); + + err = verdict_to_errno(err); + if (err) + return err; } /* Userspace may decide to perform a ct lookup without a helper @@ -795,9 +816,12 @@ static int __ovs_ct_lookup(struct net *net, struct sw_flow_key *key, * - When committing an unconfirmed connection. */ if ((nf_ct_is_confirmed(ct) ? !cached || add_helper : - info->commit) && - nf_ct_helper(skb, ct, ctinfo, info->family) != NF_ACCEPT) { - return -EINVAL; + info->commit)) { + int err = nf_ct_helper(skb, ct, ctinfo, info->family); + + err = verdict_to_errno(err); + if (err) + return err; } if (nf_ct_protonum(ct) == IPPROTO_TCP && @@ -1001,10 +1025,9 @@ static int ovs_ct_commit(struct net *net, struct sw_flow_key *key, /* This will take care of sending queued events even if the connection * is already confirmed. */ - if (nf_conntrack_confirm(skb) != NF_ACCEPT) - return -EINVAL; + err = nf_conntrack_confirm(skb); - return 0; + return verdict_to_errno(err); } /* Returns 0 on success, -EINPROGRESS if 'skb' is stolen, or other nonzero @@ -1039,6 +1062,10 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb, else err = ovs_ct_lookup(net, key, info, skb); + /* conntrack core returned NF_STOLEN */ + if (err == -EINPROGRESS) + return err; + skb_push_rcsum(skb, nh_ofs); if (err) ovs_kfree_skb_reason(skb, OVS_DROP_CONNTRACK); diff --git a/net/openvswitch/datapath.h b/net/openvswitch/datapath.h index 0cd29971a907..9ca6231ea647 100644 --- a/net/openvswitch/datapath.h +++ b/net/openvswitch/datapath.h @@ -115,12 +115,15 @@ struct datapath { * fragmented. * @acts_origlen: The netlink size of the flow actions applied to this skb. * @cutlen: The number of bytes from the packet end to be removed. + * @probability: The sampling probability that was applied to this skb; 0 means + * no sampling has occurred; U32_MAX means 100% probability. */ struct ovs_skb_cb { struct vport *input_vport; u16 mru; u16 acts_origlen; u32 cutlen; + u32 probability; }; #define OVS_CB(skb) ((struct ovs_skb_cb *)(skb)->cb) diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c index f224d9bcea5e..c92bdc4dfe19 100644 --- a/net/openvswitch/flow_netlink.c +++ b/net/openvswitch/flow_netlink.c @@ -64,6 +64,7 @@ static bool actions_may_change_flow(const struct nlattr *actions) case OVS_ACTION_ATTR_TRUNC: case OVS_ACTION_ATTR_USERSPACE: case OVS_ACTION_ATTR_DROP: + case OVS_ACTION_ATTR_PSAMPLE: break; case OVS_ACTION_ATTR_CT: @@ -2409,7 +2410,7 @@ static void ovs_nla_free_nested_actions(const struct nlattr *actions, int len) /* Whenever new actions are added, the need to update this * function should be considered. */ - BUILD_BUG_ON(OVS_ACTION_ATTR_MAX != 24); + BUILD_BUG_ON(OVS_ACTION_ATTR_MAX != 25); if (!actions) return; @@ -3157,6 +3158,28 @@ static int validate_and_copy_check_pkt_len(struct net *net, return 0; } +static int validate_psample(const struct nlattr *attr) +{ + static const struct nla_policy policy[OVS_PSAMPLE_ATTR_MAX + 1] = { + [OVS_PSAMPLE_ATTR_GROUP] = { .type = NLA_U32 }, + [OVS_PSAMPLE_ATTR_COOKIE] = { + .type = NLA_BINARY, + .len = OVS_PSAMPLE_COOKIE_MAX_SIZE, + }, + }; + struct nlattr *a[OVS_PSAMPLE_ATTR_MAX + 1]; + int err; + + if (!IS_ENABLED(CONFIG_PSAMPLE)) + return -EOPNOTSUPP; + + err = nla_parse_nested(a, OVS_PSAMPLE_ATTR_MAX, attr, policy, NULL); + if (err) + return err; + + return a[OVS_PSAMPLE_ATTR_GROUP] ? 0 : -EINVAL; +} + static int copy_action(const struct nlattr *from, struct sw_flow_actions **sfa, bool log) { @@ -3212,6 +3235,7 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr, [OVS_ACTION_ATTR_ADD_MPLS] = sizeof(struct ovs_action_add_mpls), [OVS_ACTION_ATTR_DEC_TTL] = (u32)-1, [OVS_ACTION_ATTR_DROP] = sizeof(u32), + [OVS_ACTION_ATTR_PSAMPLE] = (u32)-1, }; const struct ovs_action_push_vlan *vlan; int type = nla_type(a); @@ -3490,6 +3514,12 @@ static int __ovs_nla_copy_actions(struct net *net, const struct nlattr *attr, return -EINVAL; break; + case OVS_ACTION_ATTR_PSAMPLE: + err = validate_psample(a); + if (err) + return err; + break; + default: OVS_NLERR(log, "Unknown Action type %d", type); return -EINVAL; diff --git a/net/openvswitch/vport-internal_dev.c b/net/openvswitch/vport-internal_dev.c index 74c88a6baa43..4b33133cbdff 100644 --- a/net/openvswitch/vport-internal_dev.c +++ b/net/openvswitch/vport-internal_dev.c @@ -85,7 +85,6 @@ static const struct net_device_ops internal_dev_netdev_ops = { .ndo_stop = internal_dev_stop, .ndo_start_xmit = internal_dev_xmit, .ndo_set_mac_address = eth_mac_addr, - .ndo_get_stats64 = dev_get_tstats64, }; static struct rtnl_link_ops internal_dev_link_ops __read_mostly = { @@ -140,11 +139,7 @@ static struct vport *internal_dev_create(const struct vport_parms *parms) err = -ENOMEM; goto error_free_vport; } - vport->dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); - if (!vport->dev->tstats) { - err = -ENOMEM; - goto error_free_netdev; - } + dev->pcpu_stat_type = NETDEV_PCPU_STAT_TSTATS; dev_net_set(vport->dev, ovs_dp_get_net(vport->dp)); dev->ifindex = parms->desired_ifindex; @@ -169,8 +164,6 @@ static struct vport *internal_dev_create(const struct vport_parms *parms) error_unlock: rtnl_unlock(); - free_percpu(dev->tstats); -error_free_netdev: free_netdev(dev); error_free_vport: ovs_vport_free(vport); @@ -186,7 +179,6 @@ static void internal_dev_destroy(struct vport *vport) /* unregister_netdevice() waits for an RCU grace period. */ unregister_netdevice(vport->dev); - free_percpu(vport->dev->tstats); rtnl_unlock(); } diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index 972ae01a70f7..8732f6e51ae5 100644 --- a/net/openvswitch/vport.c +++ b/net/openvswitch/vport.c @@ -500,6 +500,7 @@ int ovs_vport_receive(struct vport *vport, struct sk_buff *skb, OVS_CB(skb)->input_vport = vport; OVS_CB(skb)->mru = 0; OVS_CB(skb)->cutlen = 0; + OVS_CB(skb)->probability = 0; if (unlikely(dev_net(skb->dev) != ovs_dp_get_net(vport->dp))) { u32 mark; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index ea3ebc160e25..4a364cdd445e 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -538,6 +538,61 @@ static void *packet_current_frame(struct packet_sock *po, return packet_lookup_frame(po, rb, rb->head, status); } +static u16 vlan_get_tci(struct sk_buff *skb, struct net_device *dev) +{ + u8 *skb_orig_data = skb->data; + int skb_orig_len = skb->len; + struct vlan_hdr vhdr, *vh; + unsigned int header_len; + + if (!dev) + return 0; + + /* In the SOCK_DGRAM scenario, skb data starts at the network + * protocol, which is after the VLAN headers. The outer VLAN + * header is at the hard_header_len offset in non-variable + * length link layer headers. If it's a VLAN device, the + * min_header_len should be used to exclude the VLAN header + * size. + */ + if (dev->min_header_len == dev->hard_header_len) + header_len = dev->hard_header_len; + else if (is_vlan_dev(dev)) + header_len = dev->min_header_len; + else + return 0; + + skb_push(skb, skb->data - skb_mac_header(skb)); + vh = skb_header_pointer(skb, header_len, sizeof(vhdr), &vhdr); + if (skb_orig_data != skb->data) { + skb->data = skb_orig_data; + skb->len = skb_orig_len; + } + if (unlikely(!vh)) + return 0; + + return ntohs(vh->h_vlan_TCI); +} + +static __be16 vlan_get_protocol_dgram(struct sk_buff *skb) +{ + __be16 proto = skb->protocol; + + if (unlikely(eth_type_vlan(proto))) { + u8 *skb_orig_data = skb->data; + int skb_orig_len = skb->len; + + skb_push(skb, skb->data - skb_mac_header(skb)); + proto = __vlan_get_protocol(skb, proto, NULL); + if (skb_orig_data != skb->data) { + skb->data = skb_orig_data; + skb->len = skb_orig_len; + } + } + + return proto; +} + static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc) { del_timer_sync(&pkc->retire_blk_timer); @@ -1007,10 +1062,16 @@ static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc, static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc, struct tpacket3_hdr *ppd) { + struct packet_sock *po = container_of(pkc, struct packet_sock, rx_ring.prb_bdqc); + if (skb_vlan_tag_present(pkc->skb)) { ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb); ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto); ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; + } else if (unlikely(po->sk.sk_type == SOCK_DGRAM && eth_type_vlan(pkc->skb->protocol))) { + ppd->hv1.tp_vlan_tci = vlan_get_tci(pkc->skb, pkc->skb->dev); + ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->protocol); + ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; } else { ppd->hv1.tp_vlan_tci = 0; ppd->hv1.tp_vlan_tpid = 0; @@ -2056,8 +2117,7 @@ retry: skb->dev = dev; skb->priority = READ_ONCE(sk->sk_priority); skb->mark = READ_ONCE(sk->sk_mark); - skb->tstamp = sockc.transmit_time; - + skb_set_delivery_type_by_clockid(skb, sockc.transmit_time, sk->sk_clockid); skb_setup_tx_timestamp(skb, sockc.tsflags); if (unlikely(extra_len == 4)) @@ -2122,7 +2182,7 @@ static int packet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { enum skb_drop_reason drop_reason = SKB_CONSUMED; - struct sock *sk; + struct sock *sk = NULL; struct sockaddr_ll *sll; struct packet_sock *po; u8 *skb_head = skb->data; @@ -2227,7 +2287,7 @@ drop_n_restore: skb->len = skb_len; } drop: - kfree_skb_reason(skb, drop_reason); + sk_skb_reason_drop(sk, skb, drop_reason); return 0; } @@ -2235,7 +2295,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { enum skb_drop_reason drop_reason = SKB_CONSUMED; - struct sock *sk; + struct sock *sk = NULL; struct packet_sock *po; struct sockaddr_ll *sll; union tpacket_uhdr h; @@ -2428,6 +2488,10 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, h.h2->tp_vlan_tci = skb_vlan_tag_get(skb); h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto); status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; + } else if (unlikely(sk->sk_type == SOCK_DGRAM && eth_type_vlan(skb->protocol))) { + h.h2->tp_vlan_tci = vlan_get_tci(skb, skb->dev); + h.h2->tp_vlan_tpid = ntohs(skb->protocol); + status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; } else { h.h2->tp_vlan_tci = 0; h.h2->tp_vlan_tpid = 0; @@ -2457,7 +2521,8 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev, sll->sll_halen = dev_parse_header(skb, sll->sll_addr); sll->sll_family = AF_PACKET; sll->sll_hatype = dev->type; - sll->sll_protocol = skb->protocol; + sll->sll_protocol = (sk->sk_type == SOCK_DGRAM) ? + vlan_get_protocol_dgram(skb) : skb->protocol; sll->sll_pkttype = skb->pkt_type; if (unlikely(packet_sock_flag(po, PACKET_SOCK_ORIGDEV))) sll->sll_ifindex = orig_dev->ifindex; @@ -2495,7 +2560,7 @@ drop_n_restore: skb->len = skb_len; } drop: - kfree_skb_reason(skb, drop_reason); + sk_skb_reason_drop(sk, skb, drop_reason); return 0; drop_n_account: @@ -2504,7 +2569,7 @@ drop_n_account: drop_reason = SKB_DROP_REASON_PACKET_SOCK_ERROR; sk->sk_data_ready(sk); - kfree_skb_reason(copy_skb, drop_reason); + sk_skb_reason_drop(sk, copy_skb, drop_reason); goto drop_n_restore; } @@ -2584,7 +2649,7 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb, skb->dev = dev; skb->priority = READ_ONCE(po->sk.sk_priority); skb->mark = READ_ONCE(po->sk.sk_mark); - skb->tstamp = sockc->transmit_time; + skb_set_delivery_type_by_clockid(skb, sockc->transmit_time, po->sk.sk_clockid); skb_setup_tx_timestamp(skb, sockc->tsflags); skb_zcopy_set_nouarg(skb, ph.raw); @@ -3062,7 +3127,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) skb->dev = dev; skb->priority = READ_ONCE(sk->sk_priority); skb->mark = sockc.mark; - skb->tstamp = sockc.transmit_time; + skb_set_delivery_type_by_clockid(skb, sockc.transmit_time, sk->sk_clockid); if (unlikely(extra_len == 4)) skb->no_fcs = 1; @@ -3482,7 +3547,8 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, /* Original length was stored in sockaddr_ll fields */ origlen = PACKET_SKB_CB(skb)->sa.origlen; sll->sll_family = AF_PACKET; - sll->sll_protocol = skb->protocol; + sll->sll_protocol = (sock->type == SOCK_DGRAM) ? + vlan_get_protocol_dgram(skb) : skb->protocol; } sock_recv_cmsgs(msg, sk, skb); @@ -3539,6 +3605,21 @@ static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, aux.tp_vlan_tci = skb_vlan_tag_get(skb); aux.tp_vlan_tpid = ntohs(skb->vlan_proto); aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; + } else if (unlikely(sock->type == SOCK_DGRAM && eth_type_vlan(skb->protocol))) { + struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll; + struct net_device *dev; + + rcu_read_lock(); + dev = dev_get_by_index_rcu(sock_net(sk), sll->sll_ifindex); + if (dev) { + aux.tp_vlan_tci = vlan_get_tci(skb, dev); + aux.tp_vlan_tpid = ntohs(skb->protocol); + aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID; + } else { + aux.tp_vlan_tci = 0; + aux.tp_vlan_tpid = 0; + } + rcu_read_unlock(); } else { aux.tp_vlan_tci = 0; aux.tp_vlan_tpid = 0; diff --git a/net/psample/psample.c b/net/psample/psample.c index a5d9b8446f77..a0ddae8a65f9 100644 --- a/net/psample/psample.c +++ b/net/psample/psample.c @@ -360,8 +360,9 @@ static int psample_tunnel_meta_len(struct ip_tunnel_info *tun_info) } #endif -void psample_sample_packet(struct psample_group *group, struct sk_buff *skb, - u32 sample_rate, const struct psample_metadata *md) +void psample_sample_packet(struct psample_group *group, + const struct sk_buff *skb, u32 sample_rate, + const struct psample_metadata *md) { ktime_t tstamp = ktime_get_real(); int out_ifindex = md->out_ifindex; @@ -376,6 +377,10 @@ void psample_sample_packet(struct psample_group *group, struct sk_buff *skb, void *data; int ret; + if (!genl_has_listeners(&psample_nl_family, group->net, + PSAMPLE_NL_MCGRP_SAMPLE)) + return; + meta_len = (in_ifindex ? nla_total_size(sizeof(u16)) : 0) + (out_ifindex ? nla_total_size(sizeof(u16)) : 0) + (md->out_tc_valid ? nla_total_size(sizeof(u16)) : 0) + @@ -386,7 +391,9 @@ void psample_sample_packet(struct psample_group *group, struct sk_buff *skb, nla_total_size(sizeof(u32)) + /* group_num */ nla_total_size(sizeof(u32)) + /* seq */ nla_total_size_64bit(sizeof(u64)) + /* timestamp */ - nla_total_size(sizeof(u16)); /* protocol */ + nla_total_size(sizeof(u16)) + /* protocol */ + (md->user_cookie_len ? + nla_total_size(md->user_cookie_len) : 0); /* user cookie */ #ifdef CONFIG_INET tun_info = skb_tunnel_info(skb); @@ -486,6 +493,14 @@ void psample_sample_packet(struct psample_group *group, struct sk_buff *skb, } #endif + if (md->user_cookie && md->user_cookie_len && + nla_put(nl_skb, PSAMPLE_ATTR_USER_COOKIE, md->user_cookie_len, + md->user_cookie)) + goto error; + + if (md->rate_as_probability) + nla_put_flag(nl_skb, PSAMPLE_ATTR_SAMPLE_PROBABILITY); + genlmsg_end(nl_skb, data); genlmsg_multicast_netns(&psample_nl_family, group->net, nl_skb, 0, PSAMPLE_NL_MCGRP_SAMPLE, GFP_ATOMIC); diff --git a/net/qrtr/ns.c b/net/qrtr/ns.c index 654a3cc0d347..3de9350cbf30 100644 --- a/net/qrtr/ns.c +++ b/net/qrtr/ns.c @@ -132,8 +132,8 @@ static int service_announce_new(struct sockaddr_qrtr *dest, return kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt)); } -static int service_announce_del(struct sockaddr_qrtr *dest, - struct qrtr_server *srv) +static void service_announce_del(struct sockaddr_qrtr *dest, + struct qrtr_server *srv) { struct qrtr_ctrl_pkt pkt; struct msghdr msg = { }; @@ -157,10 +157,10 @@ static int service_announce_del(struct sockaddr_qrtr *dest, msg.msg_namelen = sizeof(*dest); ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt)); - if (ret < 0) + if (ret < 0 && ret != -ENODEV) pr_err("failed to announce del service\n"); - return ret; + return; } static void lookup_notify(struct sockaddr_qrtr *to, struct qrtr_server *srv, @@ -188,7 +188,7 @@ static void lookup_notify(struct sockaddr_qrtr *to, struct qrtr_server *srv, msg.msg_namelen = sizeof(*to); ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt)); - if (ret < 0) + if (ret < 0 && ret != -ENODEV) pr_err("failed to send lookup notification\n"); } @@ -207,6 +207,9 @@ static int announce_servers(struct sockaddr_qrtr *sq) xa_for_each(&node->servers, index, srv) { ret = service_announce_new(sq, srv); if (ret < 0) { + if (ret == -ENODEV) + continue; + pr_err("failed to announce new service\n"); return ret; } @@ -369,7 +372,7 @@ static int ctrl_cmd_bye(struct sockaddr_qrtr *from) msg.msg_namelen = sizeof(sq); ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt)); - if (ret < 0) { + if (ret < 0 && ret != -ENODEV) { pr_err("failed to send bye cmd\n"); return ret; } @@ -443,7 +446,7 @@ static int ctrl_cmd_del_client(struct sockaddr_qrtr *from, msg.msg_namelen = sizeof(sq); ret = kernel_sendmsg(qrtr_ns.sock, &msg, &iv, 1, sizeof(pkt)); - if (ret < 0) { + if (ret < 0 && ret != -ENODEV) { pr_err("failed to send del client cmd\n"); return ret; } diff --git a/net/rds/tcp.c b/net/rds/tcp.c index d8111ac83bb6..3dc6956f66f8 100644 --- a/net/rds/tcp.c +++ b/net/rds/tcp.c @@ -719,9 +719,7 @@ static int __init rds_tcp_init(void) { int ret; - rds_tcp_conn_slab = kmem_cache_create("rds_tcp_connection", - sizeof(struct rds_tcp_connection), - 0, 0, NULL); + rds_tcp_conn_slab = KMEM_CACHE(rds_tcp_connection, 0); if (!rds_tcp_conn_slab) { ret = -ENOMEM; goto out; diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c index c00f04a1a534..7997a19d1da3 100644 --- a/net/rds/tcp_recv.c +++ b/net/rds/tcp_recv.c @@ -337,9 +337,7 @@ out: int rds_tcp_recv_init(void) { - rds_tcp_incoming_slab = kmem_cache_create("rds_tcp_incoming", - sizeof(struct rds_tcp_incoming), - 0, 0, NULL); + rds_tcp_incoming_slab = KMEM_CACHE(rds_tcp_incoming, 0); if (!rds_tcp_incoming_slab) return -ENOMEM; return 0; diff --git a/net/rfkill/core.c b/net/rfkill/core.c index c3feb4f49d09..7a5367628c05 100644 --- a/net/rfkill/core.c +++ b/net/rfkill/core.c @@ -546,10 +546,10 @@ bool rfkill_set_hw_state_reason(struct rfkill *rfkill, BUG_ON(!rfkill); - if (WARN(reason & - ~(RFKILL_HARD_BLOCK_SIGNAL | RFKILL_HARD_BLOCK_NOT_OWNER), - "hw_state reason not supported: 0x%lx", reason)) - return blocked; + if (WARN(reason & ~(RFKILL_HARD_BLOCK_SIGNAL | + RFKILL_HARD_BLOCK_NOT_OWNER), + "hw_state reason not supported: 0x%lx", reason)) + return rfkill_blocked(rfkill); spin_lock_irqsave(&rfkill->lock, flags); prev = !!(rfkill->hard_block_reasons & reason); diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 2520708b06a1..2714c4ed928e 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -62,7 +62,7 @@ static void tcf_set_action_cookie(struct tc_cookie __rcu **old_cookie, { struct tc_cookie *old; - old = xchg((__force struct tc_cookie **)old_cookie, new_cookie); + old = unrcu_pointer(xchg(old_cookie, RCU_INITIALIZER(new_cookie))); if (old) call_rcu(&old->rcu, tcf_free_cookie_rcu); } diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c index 0e3cf11ae5fc..396b576390d0 100644 --- a/net/sched/act_bpf.c +++ b/net/sched/act_bpf.c @@ -54,8 +54,8 @@ TC_INDIRECT_SCOPE int tcf_bpf_act(struct sk_buff *skb, bpf_compute_data_pointers(skb); filter_res = bpf_prog_run(filter, skb); } - if (unlikely(!skb->tstamp && skb->mono_delivery_time)) - skb->mono_delivery_time = 0; + if (unlikely(!skb->tstamp && skb->tstamp_type)) + skb->tstamp_type = SKB_CLOCK_REALTIME; if (skb_sk_is_prefetched(skb) && filter_res != TC_ACT_OK) skb_orphan(skb); diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c index 6fa3cca87d34..113b907da0f7 100644 --- a/net/sched/act_ct.c +++ b/net/sched/act_ct.c @@ -944,6 +944,8 @@ static int tcf_ct_act_nat(struct sk_buff *skb, action |= BIT(NF_NAT_MANIP_DST); err = nf_ct_nat(skb, ct, ctinfo, &action, range, commit); + if (err != NF_ACCEPT) + return err & NF_VERDICT_MASK; if (action & BIT(NF_NAT_MANIP_SRC)) tc_skb_cb(skb)->post_ct_snat = 1; @@ -1035,7 +1037,7 @@ TC_INDIRECT_SCOPE int tcf_ct_act(struct sk_buff *skb, const struct tc_action *a, state.pf = family; err = nf_conntrack_in(skb, &state); if (err != NF_ACCEPT) - goto out_push; + goto nf_error; } do_nat: @@ -1047,7 +1049,7 @@ do_nat: err = tcf_ct_act_nat(skb, ct, ctinfo, p->ct_action, &p->range, commit); if (err != NF_ACCEPT) - goto drop; + goto nf_error; if (!nf_ct_is_confirmed(ct) && commit && p->helper && !nfct_help(ct)) { err = __nf_ct_try_assign_helper(ct, p->tmpl, GFP_ATOMIC); @@ -1061,8 +1063,9 @@ do_nat: } if (nf_ct_is_confirmed(ct) ? ((!cached && !skip_add) || add_helper) : commit) { - if (nf_ct_helper(skb, ct, ctinfo, family) != NF_ACCEPT) - goto drop; + err = nf_ct_helper(skb, ct, ctinfo, family); + if (err != NF_ACCEPT) + goto nf_error; } if (commit) { @@ -1075,8 +1078,9 @@ do_nat: /* This will take care of sending queued events * even if the connection is already confirmed. */ - if (nf_conntrack_confirm(skb) != NF_ACCEPT) - goto drop; + err = nf_conntrack_confirm(skb); + if (err != NF_ACCEPT) + goto nf_error; /* The ct may be dropped if a clash has been resolved, * so it's necessary to retrieve it from skb again to @@ -1108,6 +1112,21 @@ out_frag: drop: tcf_action_inc_drop_qstats(&c->common); return TC_ACT_SHOT; + +nf_error: + /* some verdicts store extra data in upper bits, such + * as errno or queue number. + */ + switch (err & NF_VERDICT_MASK) { + case NF_DROP: + goto drop; + case NF_STOLEN: + tcf_action_inc_drop_qstats(&c->common); + return TC_ACT_CONSUMED; + default: + DEBUG_NET_WARN_ON_ONCE(1); + goto drop; + } } static const struct nla_policy ct_policy[TCA_CT_MAX + 1] = { diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c index a69b53d54039..2ceb4d141b71 100644 --- a/net/sched/act_sample.c +++ b/net/sched/act_sample.c @@ -167,7 +167,9 @@ TC_INDIRECT_SCOPE int tcf_sample_act(struct sk_buff *skb, { struct tcf_sample *s = to_sample(a); struct psample_group *psample_group; + u8 cookie_data[TC_COOKIE_MAX_SIZE]; struct psample_metadata md = {}; + struct tc_cookie *user_cookie; int retval; tcf_lastuse_update(&s->tcf_tm); @@ -189,6 +191,16 @@ TC_INDIRECT_SCOPE int tcf_sample_act(struct sk_buff *skb, if (skb_at_tc_ingress(skb) && tcf_sample_dev_ok_push(skb->dev)) skb_push(skb, skb->mac_len); + rcu_read_lock(); + user_cookie = rcu_dereference(a->user_cookie); + if (user_cookie) { + memcpy(cookie_data, user_cookie->data, + user_cookie->len); + md.user_cookie = cookie_data; + md.user_cookie_len = user_cookie->len; + } + rcu_read_unlock(); + md.trunc_size = s->truncate ? s->trunc_size : skb->len; psample_sample_packet(psample_group, skb, s->rate, &md); diff --git a/net/sched/act_skbmod.c b/net/sched/act_skbmod.c index cd0accaf844a..dc0229693461 100644 --- a/net/sched/act_skbmod.c +++ b/net/sched/act_skbmod.c @@ -246,7 +246,7 @@ static int tcf_skbmod_dump(struct sk_buff *skb, struct tc_action *a, memset(&opt, 0, sizeof(opt)); opt.index = d->tcf_index; - opt.refcnt = refcount_read(&d->tcf_refcnt) - ref, + opt.refcnt = refcount_read(&d->tcf_refcnt) - ref; opt.bindcnt = atomic_read(&d->tcf_bindcnt) - bind; spin_lock_bh(&d->tcf_lock); opt.action = d->tcf_action; diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c index 5e83e890f6a4..1941ebec23ff 100644 --- a/net/sched/cls_bpf.c +++ b/net/sched/cls_bpf.c @@ -104,8 +104,8 @@ TC_INDIRECT_SCOPE int cls_bpf_classify(struct sk_buff *skb, bpf_compute_data_pointers(skb); filter_res = bpf_prog_run(prog->filter, skb); } - if (unlikely(!skb->tstamp && skb->mono_delivery_time)) - skb->mono_delivery_time = 0; + if (unlikely(!skb->tstamp && skb->tstamp_type)) + skb->tstamp_type = SKB_CLOCK_REALTIME; if (prog->exts_integrated) { res->class = 0; diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index fd9a6f20b60b..e280c27cb9f9 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -41,6 +41,16 @@ #define TCA_FLOWER_KEY_CT_FLAGS_MASK \ (TCA_FLOWER_KEY_CT_FLAGS_MAX - 1) +#define TCA_FLOWER_KEY_FLAGS_POLICY_MASK \ + (TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT | \ + TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST) + +#define TCA_FLOWER_KEY_ENC_FLAGS_POLICY_MASK \ + (TCA_FLOWER_KEY_FLAGS_TUNNEL_CSUM | \ + TCA_FLOWER_KEY_FLAGS_TUNNEL_DONT_FRAGMENT | \ + TCA_FLOWER_KEY_FLAGS_TUNNEL_OAM | \ + TCA_FLOWER_KEY_FLAGS_TUNNEL_CRIT_OPT) + struct fl_flow_key { struct flow_dissector_key_meta meta; struct flow_dissector_key_control control; @@ -669,8 +679,10 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = { [TCA_FLOWER_KEY_ENC_UDP_SRC_PORT_MASK] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_ENC_UDP_DST_PORT] = { .type = NLA_U16 }, [TCA_FLOWER_KEY_ENC_UDP_DST_PORT_MASK] = { .type = NLA_U16 }, - [TCA_FLOWER_KEY_FLAGS] = { .type = NLA_U32 }, - [TCA_FLOWER_KEY_FLAGS_MASK] = { .type = NLA_U32 }, + [TCA_FLOWER_KEY_FLAGS] = NLA_POLICY_MASK(NLA_BE32, + TCA_FLOWER_KEY_FLAGS_POLICY_MASK), + [TCA_FLOWER_KEY_FLAGS_MASK] = NLA_POLICY_MASK(NLA_BE32, + TCA_FLOWER_KEY_FLAGS_POLICY_MASK), [TCA_FLOWER_KEY_ICMPV4_TYPE] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_ICMPV4_TYPE_MASK] = { .type = NLA_U8 }, [TCA_FLOWER_KEY_ICMPV4_CODE] = { .type = NLA_U8 }, @@ -732,6 +744,10 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = { [TCA_FLOWER_KEY_SPI_MASK] = { .type = NLA_U32 }, [TCA_FLOWER_L2_MISS] = NLA_POLICY_MAX(NLA_U8, 1), [TCA_FLOWER_KEY_CFM] = { .type = NLA_NESTED }, + [TCA_FLOWER_KEY_ENC_FLAGS] = NLA_POLICY_MASK(NLA_BE32, + TCA_FLOWER_KEY_ENC_FLAGS_POLICY_MASK), + [TCA_FLOWER_KEY_ENC_FLAGS_MASK] = NLA_POLICY_MASK(NLA_BE32, + TCA_FLOWER_KEY_ENC_FLAGS_POLICY_MASK), }; static const struct nla_policy @@ -1155,19 +1171,29 @@ static void fl_set_key_flag(u32 flower_key, u32 flower_mask, } } -static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key, - u32 *flags_mask, struct netlink_ext_ack *extack) +static int fl_set_key_flags(struct nlattr *tca_opts, struct nlattr **tb, + bool encap, u32 *flags_key, u32 *flags_mask, + struct netlink_ext_ack *extack) { + int fl_key, fl_mask; u32 key, mask; + if (encap) { + fl_key = TCA_FLOWER_KEY_ENC_FLAGS; + fl_mask = TCA_FLOWER_KEY_ENC_FLAGS_MASK; + } else { + fl_key = TCA_FLOWER_KEY_FLAGS; + fl_mask = TCA_FLOWER_KEY_FLAGS_MASK; + } + /* mask is mandatory for flags */ - if (!tb[TCA_FLOWER_KEY_FLAGS_MASK]) { + if (NL_REQ_ATTR_CHECK(extack, tca_opts, tb, fl_mask)) { NL_SET_ERR_MSG(extack, "Missing flags mask"); return -EINVAL; } - key = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS])); - mask = be32_to_cpu(nla_get_be32(tb[TCA_FLOWER_KEY_FLAGS_MASK])); + key = be32_to_cpu(nla_get_be32(tb[fl_key])); + mask = be32_to_cpu(nla_get_be32(tb[fl_mask])); *flags_key = 0; *flags_mask = 0; @@ -1178,6 +1204,21 @@ static int fl_set_key_flags(struct nlattr **tb, u32 *flags_key, TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST, FLOW_DIS_FIRST_FRAG); + fl_set_key_flag(key, mask, flags_key, flags_mask, + TCA_FLOWER_KEY_FLAGS_TUNNEL_CSUM, + FLOW_DIS_F_TUNNEL_CSUM); + + fl_set_key_flag(key, mask, flags_key, flags_mask, + TCA_FLOWER_KEY_FLAGS_TUNNEL_DONT_FRAGMENT, + FLOW_DIS_F_TUNNEL_DONT_FRAGMENT); + + fl_set_key_flag(key, mask, flags_key, flags_mask, + TCA_FLOWER_KEY_FLAGS_TUNNEL_OAM, FLOW_DIS_F_TUNNEL_OAM); + + fl_set_key_flag(key, mask, flags_key, flags_mask, + TCA_FLOWER_KEY_FLAGS_TUNNEL_CRIT_OPT, + FLOW_DIS_F_TUNNEL_CRIT_OPT); + return 0; } @@ -1825,9 +1866,9 @@ static int fl_set_key_cfm(struct nlattr **tb, return 0; } -static int fl_set_key(struct net *net, struct nlattr **tb, - struct fl_flow_key *key, struct fl_flow_key *mask, - struct netlink_ext_ack *extack) +static int fl_set_key(struct net *net, struct nlattr *tca_opts, + struct nlattr **tb, struct fl_flow_key *key, + struct fl_flow_key *mask, struct netlink_ext_ack *extack) { __be16 ethertype; int ret = 0; @@ -2059,9 +2100,18 @@ static int fl_set_key(struct net *net, struct nlattr **tb, if (ret) return ret; - if (tb[TCA_FLOWER_KEY_FLAGS]) - ret = fl_set_key_flags(tb, &key->control.flags, + if (tb[TCA_FLOWER_KEY_FLAGS]) { + ret = fl_set_key_flags(tca_opts, tb, false, + &key->control.flags, &mask->control.flags, extack); + if (ret) + return ret; + } + + if (tb[TCA_FLOWER_KEY_ENC_FLAGS]) + ret = fl_set_key_flags(tca_opts, tb, true, + &key->enc_control.flags, + &mask->enc_control.flags, extack); return ret; } @@ -2152,7 +2202,8 @@ static void fl_init_dissector(struct flow_dissector *dissector, FL_KEY_SET_IF_MASKED(mask, keys, cnt, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, enc_ipv6); if (FL_KEY_IS_MASKED(mask, enc_ipv4) || - FL_KEY_IS_MASKED(mask, enc_ipv6)) + FL_KEY_IS_MASKED(mask, enc_ipv6) || + FL_KEY_IS_MASKED(mask, enc_control)) FL_KEY_SET(keys, cnt, FLOW_DISSECTOR_KEY_ENC_CONTROL, enc_control); FL_KEY_SET_IF_MASKED(mask, keys, cnt, @@ -2310,6 +2361,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, { struct cls_fl_head *head = fl_head_dereference(tp); bool rtnl_held = !(flags & TCA_ACT_FLAGS_NO_RTNL); + struct nlattr *tca_opts = tca[TCA_OPTIONS]; struct cls_fl_filter *fold = *arg; bool bound_to_filter = false; struct cls_fl_filter *fnew; @@ -2318,7 +2370,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, bool in_ht; int err; - if (!tca[TCA_OPTIONS]) { + if (!tca_opts) { err = -EINVAL; goto errout_fold; } @@ -2336,7 +2388,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, } err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX, - tca[TCA_OPTIONS], fl_policy, NULL); + tca_opts, fl_policy, NULL); if (err < 0) goto errout_tb; @@ -2412,7 +2464,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, bound_to_filter = true; } - err = fl_set_key(net, tb, &fnew->key, &mask->key, extack); + err = fl_set_key(net, tca_opts, tb, &fnew->key, &mask->key, extack); if (err) goto unbind_filter; @@ -2752,18 +2804,19 @@ static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain, struct nlattr **tca, struct netlink_ext_ack *extack) { + struct nlattr *tca_opts = tca[TCA_OPTIONS]; struct fl_flow_tmplt *tmplt; struct nlattr **tb; int err; - if (!tca[TCA_OPTIONS]) + if (!tca_opts) return ERR_PTR(-EINVAL); tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL); if (!tb) return ERR_PTR(-ENOBUFS); err = nla_parse_nested_deprecated(tb, TCA_FLOWER_MAX, - tca[TCA_OPTIONS], fl_policy, NULL); + tca_opts, fl_policy, NULL); if (err) goto errout_tb; @@ -2773,7 +2826,8 @@ static void *fl_tmplt_create(struct net *net, struct tcf_chain *chain, goto errout_tb; } tmplt->chain = chain; - err = fl_set_key(net, tb, &tmplt->dummy_key, &tmplt->mask, extack); + err = fl_set_key(net, tca_opts, tb, &tmplt->dummy_key, + &tmplt->mask, extack); if (err) goto errout_tmplt; @@ -3049,12 +3103,22 @@ static void fl_get_key_flag(u32 dissector_key, u32 dissector_mask, } } -static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask) +static int fl_dump_key_flags(struct sk_buff *skb, bool encap, + u32 flags_key, u32 flags_mask) { - u32 key, mask; + int fl_key, fl_mask; __be32 _key, _mask; + u32 key, mask; int err; + if (encap) { + fl_key = TCA_FLOWER_KEY_ENC_FLAGS; + fl_mask = TCA_FLOWER_KEY_ENC_FLAGS_MASK; + } else { + fl_key = TCA_FLOWER_KEY_FLAGS; + fl_mask = TCA_FLOWER_KEY_FLAGS_MASK; + } + if (!memchr_inv(&flags_mask, 0, sizeof(flags_mask))) return 0; @@ -3067,14 +3131,29 @@ static int fl_dump_key_flags(struct sk_buff *skb, u32 flags_key, u32 flags_mask) TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST, FLOW_DIS_FIRST_FRAG); + fl_get_key_flag(flags_key, flags_mask, &key, &mask, + TCA_FLOWER_KEY_FLAGS_TUNNEL_CSUM, + FLOW_DIS_F_TUNNEL_CSUM); + + fl_get_key_flag(flags_key, flags_mask, &key, &mask, + TCA_FLOWER_KEY_FLAGS_TUNNEL_DONT_FRAGMENT, + FLOW_DIS_F_TUNNEL_DONT_FRAGMENT); + + fl_get_key_flag(flags_key, flags_mask, &key, &mask, + TCA_FLOWER_KEY_FLAGS_TUNNEL_OAM, FLOW_DIS_F_TUNNEL_OAM); + + fl_get_key_flag(flags_key, flags_mask, &key, &mask, + TCA_FLOWER_KEY_FLAGS_TUNNEL_CRIT_OPT, + FLOW_DIS_F_TUNNEL_CRIT_OPT); + _key = cpu_to_be32(key); _mask = cpu_to_be32(mask); - err = nla_put(skb, TCA_FLOWER_KEY_FLAGS, 4, &_key); + err = nla_put(skb, fl_key, 4, &_key); if (err) return err; - return nla_put(skb, TCA_FLOWER_KEY_FLAGS_MASK, 4, &_mask); + return nla_put(skb, fl_mask, 4, &_mask); } static int fl_dump_key_geneve_opt(struct sk_buff *skb, @@ -3581,7 +3660,8 @@ static int fl_dump_key(struct sk_buff *skb, struct net *net, if (fl_dump_key_ct(skb, &key->ct, &mask->ct)) goto nla_put_failure; - if (fl_dump_key_flags(skb, key->control.flags, mask->control.flags)) + if (fl_dump_key_flags(skb, false, key->control.flags, + mask->control.flags)) goto nla_put_failure; if (fl_dump_key_val(skb, &key->hash.hash, TCA_FLOWER_KEY_HASH, @@ -3592,6 +3672,10 @@ static int fl_dump_key(struct sk_buff *skb, struct net *net, if (fl_dump_key_cfm(skb, &key->cfm, &mask->cfm)) goto nla_put_failure; + if (fl_dump_key_flags(skb, true, key->enc_control.flags, + mask->enc_control.flags)) + goto nla_put_failure; + return 0; nla_put_failure: diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index e22ff003d52e..2af24547a82c 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -633,6 +633,7 @@ EXPORT_SYMBOL_GPL(netif_carrier_event); static int noop_enqueue(struct sk_buff *skb, struct Qdisc *qdisc, struct sk_buff **to_free) { + dev_core_stats_tx_dropped_inc(skb->dev); __qdisc_drop(skb, to_free); return NET_XMIT_CN; } diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index b284a06b5a75..cc2df9f8c14a 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -1610,7 +1610,7 @@ static int taprio_parse_clockid(struct Qdisc *sch, struct nlattr **tb, if (FULL_OFFLOAD_IS_ENABLED(q->flags)) { const struct ethtool_ops *ops = dev->ethtool_ops; - struct ethtool_ts_info info = { + struct kernel_ethtool_ts_info info = { .cmd = ETHTOOL_GET_TS_INFO, .phc_index = -1, }; diff --git a/net/sctp/socket.c b/net/sctp/socket.c index c009383369b2..32f76f1298da 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -4834,10 +4834,14 @@ int sctp_inet_connect(struct socket *sock, struct sockaddr *uaddr, return sctp_connect(sock->sk, uaddr, addr_len, flags); } -/* FIXME: Write comments. */ +/* Only called when shutdown a listening SCTP socket. */ static int sctp_disconnect(struct sock *sk, int flags) { - return -EOPNOTSUPP; /* STUB */ + if (!sctp_style(sk, TCP)) + return -EOPNOTSUPP; + + sk->sk_shutdown |= RCV_SHUTDOWN; + return 0; } /* 4.1.4 accept() - TCP Style Syntax @@ -4866,7 +4870,8 @@ static struct sock *sctp_accept(struct sock *sk, struct proto_accept_arg *arg) goto out; } - if (!sctp_sstate(sk, LISTENING)) { + if (!sctp_sstate(sk, LISTENING) || + (sk->sk_shutdown & RCV_SHUTDOWN)) { error = -EINVAL; goto out; } @@ -9393,7 +9398,8 @@ static int sctp_wait_for_accept(struct sock *sk, long timeo) } err = -EINVAL; - if (!sctp_sstate(sk, LISTENING)) + if (!sctp_sstate(sk, LISTENING) || + (sk->sk_shutdown & RCV_SHUTDOWN)) break; err = 0; diff --git a/net/smc/Makefile b/net/smc/Makefile index 2c510d543058..60f1c87d5212 100644 --- a/net/smc/Makefile +++ b/net/smc/Makefile @@ -4,6 +4,6 @@ obj-$(CONFIG_SMC) += smc.o obj-$(CONFIG_SMC_DIAG) += smc_diag.o smc-y := af_smc.o smc_pnet.o smc_ib.o smc_clc.o smc_core.o smc_wr.o smc_llc.o smc-y += smc_cdc.o smc_tx.o smc_rx.o smc_close.o smc_ism.o smc_netlink.o smc_stats.o -smc-y += smc_tracepoint.o +smc-y += smc_tracepoint.o smc_inet.o smc-$(CONFIG_SYSCTL) += smc_sysctl.o smc-$(CONFIG_SMC_LO) += smc_loopback.o diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index c5f98c6b2561..73a875573e7a 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -54,6 +54,7 @@ #include "smc_tracepoint.h" #include "smc_sysctl.h" #include "smc_loopback.h" +#include "smc_inet.h" static DEFINE_MUTEX(smc_server_lgr_pending); /* serialize link group * creation on server @@ -170,15 +171,15 @@ static bool smc_hs_congested(const struct sock *sk) return false; } -static struct smc_hashinfo smc_v4_hashinfo = { +struct smc_hashinfo smc_v4_hashinfo = { .lock = __RW_LOCK_UNLOCKED(smc_v4_hashinfo.lock), }; -static struct smc_hashinfo smc_v6_hashinfo = { +struct smc_hashinfo smc_v6_hashinfo = { .lock = __RW_LOCK_UNLOCKED(smc_v6_hashinfo.lock), }; -static int smc_hash_sk(struct sock *sk) +int smc_hash_sk(struct sock *sk) { struct smc_hashinfo *h = sk->sk_prot->h.smc_hash; struct hlist_head *head; @@ -193,7 +194,7 @@ static int smc_hash_sk(struct sock *sk) return 0; } -static void smc_unhash_sk(struct sock *sk) +void smc_unhash_sk(struct sock *sk) { struct smc_hashinfo *h = sk->sk_prot->h.smc_hash; @@ -207,7 +208,7 @@ static void smc_unhash_sk(struct sock *sk) * work which we didn't do because of user hold the sock_lock in the * BH context */ -static void smc_release_cb(struct sock *sk) +void smc_release_cb(struct sock *sk) { struct smc_sock *smc = smc_sk(sk); @@ -307,7 +308,7 @@ static int __smc_release(struct smc_sock *smc) return rc; } -static int smc_release(struct socket *sock) +int smc_release(struct socket *sock) { struct sock *sk = sock->sk; struct smc_sock *smc; @@ -361,25 +362,15 @@ static void smc_destruct(struct sock *sk) return; } -static struct sock *smc_sock_alloc(struct net *net, struct socket *sock, - int protocol) +void smc_sk_init(struct net *net, struct sock *sk, int protocol) { - struct smc_sock *smc; - struct proto *prot; - struct sock *sk; + struct smc_sock *smc = smc_sk(sk); - prot = (protocol == SMCPROTO_SMC6) ? &smc_proto6 : &smc_proto; - sk = sk_alloc(net, PF_SMC, GFP_KERNEL, prot, 0); - if (!sk) - return NULL; - - sock_init_data(sock, sk); /* sets sk_refcnt to 1 */ sk->sk_state = SMC_INIT; sk->sk_destruct = smc_destruct; sk->sk_protocol = protocol; WRITE_ONCE(sk->sk_sndbuf, 2 * READ_ONCE(net->smc.sysctl_wmem)); WRITE_ONCE(sk->sk_rcvbuf, 2 * READ_ONCE(net->smc.sysctl_rmem)); - smc = smc_sk(sk); INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work); INIT_WORK(&smc->connect_work, smc_connect_work); INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work); @@ -389,12 +380,30 @@ static struct sock *smc_sock_alloc(struct net *net, struct socket *sock, sk->sk_prot->hash(sk); mutex_init(&smc->clcsock_release_lock); smc_init_saved_callbacks(smc); + smc->limit_smc_hs = net->smc.limit_smc_hs; + smc->use_fallback = false; /* assume rdma capability first */ + smc->fallback_rsn = 0; +} + +static struct sock *smc_sock_alloc(struct net *net, struct socket *sock, + int protocol) +{ + struct proto *prot; + struct sock *sk; + + prot = (protocol == SMCPROTO_SMC6) ? &smc_proto6 : &smc_proto; + sk = sk_alloc(net, PF_SMC, GFP_KERNEL, prot, 0); + if (!sk) + return NULL; + + sock_init_data(sock, sk); /* sets sk_refcnt to 1 */ + smc_sk_init(net, sk, protocol); return sk; } -static int smc_bind(struct socket *sock, struct sockaddr *uaddr, - int addr_len) +int smc_bind(struct socket *sock, struct sockaddr *uaddr, + int addr_len) { struct sockaddr_in *addr = (struct sockaddr_in *)uaddr; struct sock *sk = sock->sk; @@ -1623,8 +1632,8 @@ out: release_sock(&smc->sk); } -static int smc_connect(struct socket *sock, struct sockaddr *addr, - int alen, int flags) +int smc_connect(struct socket *sock, struct sockaddr *addr, + int alen, int flags) { struct sock *sk = sock->sk; struct smc_sock *smc; @@ -2605,7 +2614,7 @@ out: read_unlock_bh(&listen_clcsock->sk_callback_lock); } -static int smc_listen(struct socket *sock, int backlog) +int smc_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; struct smc_sock *smc; @@ -2670,8 +2679,8 @@ out: return rc; } -static int smc_accept(struct socket *sock, struct socket *new_sock, - struct proto_accept_arg *arg) +int smc_accept(struct socket *sock, struct socket *new_sock, + struct proto_accept_arg *arg) { struct sock *sk = sock->sk, *nsk; DECLARE_WAITQUEUE(wait, current); @@ -2740,8 +2749,8 @@ out: return rc; } -static int smc_getname(struct socket *sock, struct sockaddr *addr, - int peer) +int smc_getname(struct socket *sock, struct sockaddr *addr, + int peer) { struct smc_sock *smc; @@ -2754,7 +2763,7 @@ static int smc_getname(struct socket *sock, struct sockaddr *addr, return smc->clcsock->ops->getname(smc->clcsock, addr, peer); } -static int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) +int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct smc_sock *smc; @@ -2792,8 +2801,8 @@ out: return rc; } -static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, - int flags) +int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, + int flags) { struct sock *sk = sock->sk; struct smc_sock *smc; @@ -2842,8 +2851,8 @@ static __poll_t smc_accept_poll(struct sock *parent) return mask; } -static __poll_t smc_poll(struct file *file, struct socket *sock, - poll_table *wait) +__poll_t smc_poll(struct file *file, struct socket *sock, + poll_table *wait) { struct sock *sk = sock->sk; struct smc_sock *smc; @@ -2895,7 +2904,7 @@ static __poll_t smc_poll(struct file *file, struct socket *sock, return mask; } -static int smc_shutdown(struct socket *sock, int how) +int smc_shutdown(struct socket *sock, int how) { struct sock *sk = sock->sk; bool do_shutdown = true; @@ -3035,8 +3044,8 @@ static int __smc_setsockopt(struct socket *sock, int level, int optname, return rc; } -static int smc_setsockopt(struct socket *sock, int level, int optname, - sockptr_t optval, unsigned int optlen) +int smc_setsockopt(struct socket *sock, int level, int optname, + sockptr_t optval, unsigned int optlen) { struct sock *sk = sock->sk; struct smc_sock *smc; @@ -3122,8 +3131,8 @@ out: return rc; } -static int smc_getsockopt(struct socket *sock, int level, int optname, - char __user *optval, int __user *optlen) +int smc_getsockopt(struct socket *sock, int level, int optname, + char __user *optval, int __user *optlen) { struct smc_sock *smc; int rc; @@ -3148,8 +3157,8 @@ static int smc_getsockopt(struct socket *sock, int level, int optname, return rc; } -static int smc_ioctl(struct socket *sock, unsigned int cmd, - unsigned long arg) +int smc_ioctl(struct socket *sock, unsigned int cmd, + unsigned long arg) { union smc_host_cursor cons, urg; struct smc_connection *conn; @@ -3235,9 +3244,9 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd, * Note that subsequent recv() calls have to wait till all splice() processing * completed. */ -static ssize_t smc_splice_read(struct socket *sock, loff_t *ppos, - struct pipe_inode_info *pipe, size_t len, - unsigned int flags) +ssize_t smc_splice_read(struct socket *sock, loff_t *ppos, + struct pipe_inode_info *pipe, size_t len, + unsigned int flags) { struct sock *sk = sock->sk; struct smc_sock *smc; @@ -3303,6 +3312,31 @@ static const struct proto_ops smc_sock_ops = { .splice_read = smc_splice_read, }; +int smc_create_clcsk(struct net *net, struct sock *sk, int family) +{ + struct smc_sock *smc = smc_sk(sk); + int rc; + + rc = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP, + &smc->clcsock); + if (rc) { + sk_common_release(sk); + return rc; + } + + /* smc_clcsock_release() does not wait smc->clcsock->sk's + * destruction; its sk_state might not be TCP_CLOSE after + * smc->sk is close()d, and TCP timers can be fired later, + * which need net ref. + */ + sk = smc->clcsock->sk; + __netns_tracker_free(net, &sk->ns_tracker, false); + sk->sk_net_refcnt = 1; + get_net_track(net, &sk->ns_tracker, GFP_KERNEL); + sock_inuse_add(net, 1); + return 0; +} + static int __smc_create(struct net *net, struct socket *sock, int protocol, int kern, struct socket *clcsock) { @@ -3328,35 +3362,12 @@ static int __smc_create(struct net *net, struct socket *sock, int protocol, /* create internal TCP socket for CLC handshake and fallback */ smc = smc_sk(sk); - smc->use_fallback = false; /* assume rdma capability first */ - smc->fallback_rsn = 0; - - /* default behavior from limit_smc_hs in every net namespace */ - smc->limit_smc_hs = net->smc.limit_smc_hs; rc = 0; - if (!clcsock) { - rc = sock_create_kern(net, family, SOCK_STREAM, IPPROTO_TCP, - &smc->clcsock); - if (rc) { - sk_common_release(sk); - goto out; - } - - /* smc_clcsock_release() does not wait smc->clcsock->sk's - * destruction; its sk_state might not be TCP_CLOSE after - * smc->sk is close()d, and TCP timers can be fired later, - * which need net ref. - */ - sk = smc->clcsock->sk; - __netns_tracker_free(net, &sk->ns_tracker, false); - sk->sk_net_refcnt = 1; - get_net_track(net, &sk->ns_tracker, GFP_KERNEL); - sock_inuse_add(net, 1); - } else { + if (clcsock) smc->clcsock = clcsock; - } - + else + rc = smc_create_clcsk(net, sk, family); out: return rc; } @@ -3565,10 +3576,15 @@ static int __init smc_init(void) pr_err("%s: tcp_ulp_register fails with %d\n", __func__, rc); goto out_lo; } - + rc = smc_inet_init(); + if (rc) { + pr_err("%s: smc_inet_init fails with %d\n", __func__, rc); + goto out_ulp; + } static_branch_enable(&tcp_have_smc); return 0; - +out_ulp: + tcp_unregister_ulp(&smc_ulp_ops); out_lo: smc_loopback_exit(); out_ib: @@ -3605,6 +3621,7 @@ out_pernet_subsys: static void __exit smc_exit(void) { static_branch_disable(&tcp_have_smc); + smc_inet_exit(); tcp_unregister_ulp(&smc_ulp_ops); sock_unregister(PF_SMC); smc_core_exit(); @@ -3632,4 +3649,9 @@ MODULE_DESCRIPTION("smc socket address family"); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_SMC); MODULE_ALIAS_TCP_ULP("smc"); +/* 256 for IPPROTO_SMC and 1 for SOCK_STREAM */ +MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET, 256, 1); +#if IS_ENABLED(CONFIG_IPV6) +MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 256, 1); +#endif /* CONFIG_IPV6 */ MODULE_ALIAS_GENL_FAMILY(SMC_GENL_FAMILY_NAME); diff --git a/net/smc/smc.h b/net/smc/smc.h index 18c8b7870198..34b781e463c4 100644 --- a/net/smc/smc.h +++ b/net/smc/smc.h @@ -34,6 +34,44 @@ extern struct proto smc_proto; extern struct proto smc_proto6; +extern struct smc_hashinfo smc_v4_hashinfo; +extern struct smc_hashinfo smc_v6_hashinfo; + +int smc_hash_sk(struct sock *sk); +void smc_unhash_sk(struct sock *sk); +void smc_release_cb(struct sock *sk); + +int smc_release(struct socket *sock); +int smc_bind(struct socket *sock, struct sockaddr *uaddr, + int addr_len); +int smc_connect(struct socket *sock, struct sockaddr *addr, + int alen, int flags); +int smc_accept(struct socket *sock, struct socket *new_sock, + struct proto_accept_arg *arg); +int smc_getname(struct socket *sock, struct sockaddr *addr, + int peer); +__poll_t smc_poll(struct file *file, struct socket *sock, + poll_table *wait); +int smc_ioctl(struct socket *sock, unsigned int cmd, + unsigned long arg); +int smc_listen(struct socket *sock, int backlog); +int smc_shutdown(struct socket *sock, int how); +int smc_setsockopt(struct socket *sock, int level, int optname, + sockptr_t optval, unsigned int optlen); +int smc_getsockopt(struct socket *sock, int level, int optname, + char __user *optval, int __user *optlen); +int smc_sendmsg(struct socket *sock, struct msghdr *msg, size_t len); +int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, + int flags); +ssize_t smc_splice_read(struct socket *sock, loff_t *ppos, + struct pipe_inode_info *pipe, size_t len, + unsigned int flags); + +/* smc sock initialization */ +void smc_sk_init(struct net *net, struct sock *sk, int protocol); +/* clcsock initialization */ +int smc_create_clcsk(struct net *net, struct sock *sk, int family); + #ifdef ATOMIC64_INIT #define KERNEL_HAS_ATOMIC64 #endif diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index fafdb97adfad..3b95828d9976 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -2006,7 +2006,7 @@ out: } #define SMCD_DMBE_SIZES 6 /* 0 -> 16KB, 1 -> 32KB, .. 6 -> 1MB */ -#define SMCR_RMBE_SIZES 5 /* 0 -> 16KB, 1 -> 32KB, .. 5 -> 512KB */ +#define SMCR_RMBE_SIZES 15 /* 0 -> 16KB, 1 -> 32KB, .. 15 -> 512MB */ /* convert the RMB size into the compressed notation (minimum 16K, see * SMCD/R_DMBE_SIZES. @@ -2015,7 +2015,6 @@ out: */ static u8 smc_compress_bufsize(int size, bool is_smcd, bool is_rmb) { - const unsigned int max_scat = SG_MAX_SINGLE_ALLOC * PAGE_SIZE; u8 compressed; if (size <= SMC_BUF_MIN_SIZE) @@ -2025,9 +2024,11 @@ static u8 smc_compress_bufsize(int size, bool is_smcd, bool is_rmb) compressed = min_t(u8, ilog2(size) + 1, is_smcd ? SMCD_DMBE_SIZES : SMCR_RMBE_SIZES); +#ifdef CONFIG_ARCH_NO_SG_CHAIN if (!is_smcd && is_rmb) /* RMBs are backed by & limited to max size of scatterlists */ - compressed = min_t(u8, compressed, ilog2(max_scat >> 14)); + compressed = min_t(u8, compressed, ilog2((SG_MAX_SINGLE_ALLOC * PAGE_SIZE) >> 14)); +#endif return compressed; } diff --git a/net/smc/smc_inet.c b/net/smc/smc_inet.c new file mode 100644 index 000000000000..bece346dd8e9 --- /dev/null +++ b/net/smc/smc_inet.c @@ -0,0 +1,159 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Shared Memory Communications over RDMA (SMC-R) and RoCE + * + * Definitions for the IPPROTO_SMC (socket related) + * + * Copyright IBM Corp. 2016, 2018 + * Copyright (c) 2024, Alibaba Inc. + * + * Author: D. Wythe + */ + +#include +#include + +#include "smc_inet.h" +#include "smc.h" + +static int smc_inet_init_sock(struct sock *sk); + +static struct proto smc_inet_prot = { + .name = "INET_SMC", + .owner = THIS_MODULE, + .init = smc_inet_init_sock, + .hash = smc_hash_sk, + .unhash = smc_unhash_sk, + .release_cb = smc_release_cb, + .obj_size = sizeof(struct smc_sock), + .h.smc_hash = &smc_v4_hashinfo, + .slab_flags = SLAB_TYPESAFE_BY_RCU, +}; + +static const struct proto_ops smc_inet_stream_ops = { + .family = PF_INET, + .owner = THIS_MODULE, + .release = smc_release, + .bind = smc_bind, + .connect = smc_connect, + .socketpair = sock_no_socketpair, + .accept = smc_accept, + .getname = smc_getname, + .poll = smc_poll, + .ioctl = smc_ioctl, + .listen = smc_listen, + .shutdown = smc_shutdown, + .setsockopt = smc_setsockopt, + .getsockopt = smc_getsockopt, + .sendmsg = smc_sendmsg, + .recvmsg = smc_recvmsg, + .mmap = sock_no_mmap, + .splice_read = smc_splice_read, +}; + +static struct inet_protosw smc_inet_protosw = { + .type = SOCK_STREAM, + .protocol = IPPROTO_SMC, + .prot = &smc_inet_prot, + .ops = &smc_inet_stream_ops, + .flags = INET_PROTOSW_ICSK, +}; + +#if IS_ENABLED(CONFIG_IPV6) +static struct proto smc_inet6_prot = { + .name = "INET6_SMC", + .owner = THIS_MODULE, + .init = smc_inet_init_sock, + .hash = smc_hash_sk, + .unhash = smc_unhash_sk, + .release_cb = smc_release_cb, + .obj_size = sizeof(struct smc_sock), + .h.smc_hash = &smc_v6_hashinfo, + .slab_flags = SLAB_TYPESAFE_BY_RCU, +}; + +static const struct proto_ops smc_inet6_stream_ops = { + .family = PF_INET6, + .owner = THIS_MODULE, + .release = smc_release, + .bind = smc_bind, + .connect = smc_connect, + .socketpair = sock_no_socketpair, + .accept = smc_accept, + .getname = smc_getname, + .poll = smc_poll, + .ioctl = smc_ioctl, + .listen = smc_listen, + .shutdown = smc_shutdown, + .setsockopt = smc_setsockopt, + .getsockopt = smc_getsockopt, + .sendmsg = smc_sendmsg, + .recvmsg = smc_recvmsg, + .mmap = sock_no_mmap, + .splice_read = smc_splice_read, +}; + +static struct inet_protosw smc_inet6_protosw = { + .type = SOCK_STREAM, + .protocol = IPPROTO_SMC, + .prot = &smc_inet6_prot, + .ops = &smc_inet6_stream_ops, + .flags = INET_PROTOSW_ICSK, +}; +#endif /* CONFIG_IPV6 */ + +static int smc_inet_init_sock(struct sock *sk) +{ + struct net *net = sock_net(sk); + + /* init common smc sock */ + smc_sk_init(net, sk, IPPROTO_SMC); + /* create clcsock */ + return smc_create_clcsk(net, sk, sk->sk_family); +} + +int __init smc_inet_init(void) +{ + int rc; + + rc = proto_register(&smc_inet_prot, 1); + if (rc) { + pr_err("%s: proto_register smc_inet_prot fails with %d\n", + __func__, rc); + return rc; + } + /* no return value */ + inet_register_protosw(&smc_inet_protosw); + +#if IS_ENABLED(CONFIG_IPV6) + rc = proto_register(&smc_inet6_prot, 1); + if (rc) { + pr_err("%s: proto_register smc_inet6_prot fails with %d\n", + __func__, rc); + goto out_inet6_prot; + } + rc = inet6_register_protosw(&smc_inet6_protosw); + if (rc) { + pr_err("%s: inet6_register_protosw smc_inet6_protosw fails with %d\n", + __func__, rc); + goto out_inet6_protosw; + } + return rc; +out_inet6_protosw: + proto_unregister(&smc_inet6_prot); +out_inet6_prot: + inet_unregister_protosw(&smc_inet_protosw); + proto_unregister(&smc_inet_prot); +#endif /* CONFIG_IPV6 */ + return rc; +} + +void smc_inet_exit(void) +{ +#if IS_ENABLED(CONFIG_IPV6) + inet6_unregister_protosw(&smc_inet6_protosw); + proto_unregister(&smc_inet6_prot); +#endif /* CONFIG_IPV6 */ + inet_unregister_protosw(&smc_inet_protosw); + proto_unregister(&smc_inet_prot); +} diff --git a/net/smc/smc_inet.h b/net/smc/smc_inet.h new file mode 100644 index 000000000000..a489c8a2b8ef --- /dev/null +++ b/net/smc/smc_inet.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Shared Memory Communications over RDMA (SMC-R) and RoCE + * + * Definitions for the IPPROTO_SMC (socket related) + + * Copyright IBM Corp. 2016 + * Copyright (c) 2024, Alibaba Inc. + * + * Author: D. Wythe + */ +#ifndef __INET_SMC +#define __INET_SMC + +/* Initialize protocol registration on IPPROTO_SMC, + * @return 0 on success + */ +int smc_inet_init(void); + +void smc_inet_exit(void); + +#endif /* __INET_SMC */ diff --git a/net/tipc/core.h b/net/tipc/core.h index 7eccd97e0609..7f3fe3401c45 100644 --- a/net/tipc/core.h +++ b/net/tipc/core.h @@ -72,7 +72,6 @@ struct tipc_node; struct tipc_bearer; struct tipc_bc_base; struct tipc_link; -struct tipc_name_table; struct tipc_topsrv; struct tipc_monitor; #ifdef CONFIG_TIPC_CRYPTO diff --git a/net/tipc/link.c b/net/tipc/link.c index 0716eb5c8a31..5c2088a469ce 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -241,13 +241,6 @@ enum { LINK_SYNCHING = 0xc << 24 }; -/* Link FSM state checking routines - */ -static int link_is_up(struct tipc_link *l) -{ - return l->state & (LINK_ESTABLISHED | LINK_SYNCHING); -} - static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, struct sk_buff_head *xmitq); static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe, @@ -274,7 +267,7 @@ static void tipc_link_update_cwin(struct tipc_link *l, int released, */ bool tipc_link_is_up(struct tipc_link *l) { - return link_is_up(l); + return l->state & (LINK_ESTABLISHED | LINK_SYNCHING); } bool tipc_link_peer_is_down(struct tipc_link *l) @@ -1790,7 +1783,7 @@ int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb, rcv_nxt = l->rcv_nxt; win_lim = rcv_nxt + TIPC_MAX_LINK_WIN; - if (unlikely(!link_is_up(l))) { + if (unlikely(!tipc_link_is_up(l))) { if (l->state == LINK_ESTABLISHING) rc = TIPC_LINK_UP_EVT; kfree_skb(skb); @@ -1848,7 +1841,7 @@ static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe, struct tipc_link *bcl = l->bc_rcvlink; struct tipc_msg *hdr; struct sk_buff *skb; - bool node_up = link_is_up(bcl); + bool node_up = tipc_link_is_up(bcl); u16 glen = 0, bc_rcvgap = 0; int dlen = 0; void *data; @@ -2163,7 +2156,7 @@ bool tipc_link_validate_msg(struct tipc_link *l, struct tipc_msg *hdr) if (session != curr_session) return false; /* Extra sanity check */ - if (!link_is_up(l) && msg_ack(hdr)) + if (!tipc_link_is_up(l) && msg_ack(hdr)) return false; if (!(l->peer_caps & TIPC_LINK_PROTO_SEQNO)) return true; @@ -2261,7 +2254,7 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, } /* ACTIVATE_MSG serves as PEER_RESET if link is already down */ - if (mtyp == RESET_MSG || !link_is_up(l)) + if (mtyp == RESET_MSG || !tipc_link_is_up(l)) rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT); /* ACTIVATE_MSG takes up link if it was already locally reset */ @@ -2300,7 +2293,7 @@ static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb, if (msg_probe(hdr)) l->stats.recv_probes++; - if (!link_is_up(l)) { + if (!tipc_link_is_up(l)) { if (l->state == LINK_ESTABLISHING) rc = TIPC_LINK_UP_EVT; break; @@ -2387,7 +2380,7 @@ void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr) int mtyp = msg_type(hdr); u16 peers_snd_nxt = msg_bc_snd_nxt(hdr); - if (link_is_up(l)) + if (tipc_link_is_up(l)) return; if (msg_user(hdr) == BCAST_PROTOCOL) { @@ -2415,7 +2408,7 @@ int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr, u16 peers_snd_nxt = msg_bc_snd_nxt(hdr); int rc = 0; - if (!link_is_up(l)) + if (!tipc_link_is_up(l)) return rc; if (!msg_peer_node_is_up(hdr)) @@ -2475,7 +2468,7 @@ int tipc_link_bc_ack_rcv(struct tipc_link *r, u16 acked, u16 gap, bool unused = false; int rc = 0; - if (!link_is_up(r) || !r->bc_peer_is_up) + if (!tipc_link_is_up(r) || !r->bc_peer_is_up) return 0; if (gap) { @@ -2873,7 +2866,7 @@ void tipc_link_set_tolerance(struct tipc_link *l, u32 tol, l->tolerance = tol; if (l->bc_rcvlink) l->bc_rcvlink->tolerance = tol; - if (link_is_up(l)) + if (tipc_link_is_up(l)) tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, tol, 0, xmitq); } diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index ab6e694f7bc2..dc063c2c7950 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -231,14 +231,10 @@ static void tls_device_resync_tx(struct sock *sk, struct tls_context *tls_ctx, u32 seq) { struct net_device *netdev; - struct sk_buff *skb; int err = 0; u8 *rcd_sn; - skb = tcp_write_queue_tail(sk); - if (skb) - TCP_SKB_CB(skb)->eor = 1; - + tcp_write_collapse_fence(sk); rcd_sn = tls_ctx->tx.rec_seq; trace_tls_device_tx_resync_send(sk, seq, rcd_sn); @@ -1067,7 +1063,6 @@ int tls_set_device_offload(struct sock *sk) struct tls_prot_info *prot; struct net_device *netdev; struct tls_context *ctx; - struct sk_buff *skb; char *iv, *rec_seq; int rc; @@ -1138,9 +1133,7 @@ int tls_set_device_offload(struct sock *sk) * SKBs where only part of the payload needs to be encrypted. * So mark the last skb in the write queue as end of record. */ - skb = tcp_write_queue_tail(sk); - if (skb) - TCP_SKB_CB(skb)->eor = 1; + tcp_write_collapse_fence(sk); /* Avoid offloading if the device is down * We don't want to offload new flows after diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c index 90b7f253d363..6b4b9f2749a6 100644 --- a/net/tls/tls_main.c +++ b/net/tls/tls_main.c @@ -616,6 +616,7 @@ static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval, struct tls_crypto_info *alt_crypto_info; struct tls_context *ctx = tls_get_ctx(sk); const struct tls_cipher_desc *cipher_desc; + union tls_crypto_context *crypto_ctx; int rc = 0; int conf; @@ -623,13 +624,15 @@ static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval, return -EINVAL; if (tx) { - crypto_info = &ctx->crypto_send.info; + crypto_ctx = &ctx->crypto_send; alt_crypto_info = &ctx->crypto_recv.info; } else { - crypto_info = &ctx->crypto_recv.info; + crypto_ctx = &ctx->crypto_recv; alt_crypto_info = &ctx->crypto_send.info; } + crypto_info = &crypto_ctx->info; + /* Currently we don't support set crypto info more than one time */ if (TLS_CRYPTO_INFO_READY(crypto_info)) return -EBUSY; @@ -710,7 +713,7 @@ static int do_tls_setsockopt_conf(struct sock *sk, sockptr_t optval, return 0; err_crypto_info: - memzero_explicit(crypto_info, sizeof(union tls_crypto_context)); + memzero_explicit(crypto_ctx, sizeof(*crypto_ctx)); return rc; } diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 142f56770b77..b0a4c6d08e0a 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -126,6 +126,81 @@ static spinlock_t bsd_socket_locks[UNIX_HASH_SIZE / 2]; * hash table is protected with spinlock. * each socket state is protected by separate spinlock. */ +#ifdef CONFIG_PROVE_LOCKING +#define cmp_ptr(l, r) (((l) > (r)) - ((l) < (r))) + +static int unix_table_lock_cmp_fn(const struct lockdep_map *a, + const struct lockdep_map *b) +{ + return cmp_ptr(a, b); +} + +static int unix_state_lock_cmp_fn(const struct lockdep_map *_a, + const struct lockdep_map *_b) +{ + const struct unix_sock *a, *b; + + a = container_of(_a, struct unix_sock, lock.dep_map); + b = container_of(_b, struct unix_sock, lock.dep_map); + + if (a->sk.sk_state == TCP_LISTEN) { + /* unix_stream_connect(): Before the 2nd unix_state_lock(), + * + * 1. a is TCP_LISTEN. + * 2. b is not a. + * 3. concurrent connect(b -> a) must fail. + * + * Except for 2. & 3., the b's state can be any possible + * value due to concurrent connect() or listen(). + * + * 2. is detected in debug_spin_lock_before(), and 3. cannot + * be expressed as lock_cmp_fn. + */ + switch (b->sk.sk_state) { + case TCP_CLOSE: + case TCP_ESTABLISHED: + case TCP_LISTEN: + return -1; + default: + /* Invalid case. */ + return 0; + } + } + + /* Should never happen. Just to be symmetric. */ + if (b->sk.sk_state == TCP_LISTEN) { + switch (b->sk.sk_state) { + case TCP_CLOSE: + case TCP_ESTABLISHED: + return 1; + default: + return 0; + } + } + + /* unix_state_double_lock(): ascending address order. */ + return cmp_ptr(a, b); +} + +static int unix_recvq_lock_cmp_fn(const struct lockdep_map *_a, + const struct lockdep_map *_b) +{ + const struct sock *a, *b; + + a = container_of(_a, struct sock, sk_receive_queue.lock.dep_map); + b = container_of(_b, struct sock, sk_receive_queue.lock.dep_map); + + /* unix_collect_skb(): listener -> embryo order. */ + if (a->sk_state == TCP_LISTEN && unix_sk(b)->listener == a) + return -1; + + /* Should never happen. Just to be symmetric. */ + if (b->sk_state == TCP_LISTEN && unix_sk(a)->listener == b) + return 1; + + return 0; +} +#endif static unsigned int unix_unbound_hash(struct sock *sk) { @@ -168,7 +243,7 @@ static void unix_table_double_lock(struct net *net, swap(hash1, hash2); spin_lock(&net->unx.table.locks[hash1]); - spin_lock_nested(&net->unx.table.locks[hash2], SINGLE_DEPTH_NESTING); + spin_lock(&net->unx.table.locks[hash2]); } static void unix_table_double_unlock(struct net *net, @@ -647,8 +722,8 @@ static void unix_release_sock(struct sock *sk, int embrion) while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { if (state == TCP_LISTEN) unix_release_sock(skb->sk, 1); + /* passed fds are erased in the kfree_skb hook */ - UNIXCB(skb).consumed = skb->len; kfree_skb(skb); } @@ -675,6 +750,12 @@ static void unix_release_sock(struct sock *sk, int embrion) } static void init_peercred(struct sock *sk) +{ + sk->sk_peer_pid = get_pid(task_tgid(current)); + sk->sk_peer_cred = get_current_cred(); +} + +static void update_peercred(struct sock *sk) { const struct cred *old_cred; struct pid *old_pid; @@ -682,8 +763,7 @@ static void init_peercred(struct sock *sk) spin_lock(&sk->sk_peer_lock); old_pid = sk->sk_peer_pid; old_cred = sk->sk_peer_cred; - sk->sk_peer_pid = get_pid(task_tgid(current)); - sk->sk_peer_cred = get_current_cred(); + init_peercred(sk); spin_unlock(&sk->sk_peer_lock); put_pid(old_pid); @@ -692,26 +772,12 @@ static void init_peercred(struct sock *sk) static void copy_peercred(struct sock *sk, struct sock *peersk) { - const struct cred *old_cred; - struct pid *old_pid; + lockdep_assert_held(&unix_sk(peersk)->lock); - if (sk < peersk) { - spin_lock(&sk->sk_peer_lock); - spin_lock_nested(&peersk->sk_peer_lock, SINGLE_DEPTH_NESTING); - } else { - spin_lock(&peersk->sk_peer_lock); - spin_lock_nested(&sk->sk_peer_lock, SINGLE_DEPTH_NESTING); - } - old_pid = sk->sk_peer_pid; - old_cred = sk->sk_peer_cred; - sk->sk_peer_pid = get_pid(peersk->sk_peer_pid); + spin_lock(&sk->sk_peer_lock); + sk->sk_peer_pid = get_pid(peersk->sk_peer_pid); sk->sk_peer_cred = get_cred(peersk->sk_peer_cred); - spin_unlock(&sk->sk_peer_lock); - spin_unlock(&peersk->sk_peer_lock); - - put_pid(old_pid); - put_cred(old_cred); } static int unix_listen(struct socket *sock, int backlog) @@ -735,7 +801,7 @@ static int unix_listen(struct socket *sock, int backlog) WRITE_ONCE(sk->sk_state, TCP_LISTEN); /* set credentials so connect can copy them */ - init_peercred(sk); + update_peercred(sk); err = 0; out_unlock: @@ -972,12 +1038,15 @@ static struct sock *unix_create1(struct net *net, struct socket *sock, int kern, sk->sk_write_space = unix_write_space; sk->sk_max_ack_backlog = READ_ONCE(net->unx.sysctl_max_dgram_qlen); sk->sk_destruct = unix_sock_destructor; + lock_set_cmp_fn(&sk->sk_receive_queue.lock, unix_recvq_lock_cmp_fn, NULL); + u = unix_sk(sk); u->listener = NULL; u->vertex = NULL; u->path.dentry = NULL; u->path.mnt = NULL; spin_lock_init(&u->lock); + lock_set_cmp_fn(&u->lock, unix_state_lock_cmp_fn, NULL); mutex_init(&u->iolock); /* single task reading lock */ mutex_init(&u->bindlock); /* single task binding lock */ init_waitqueue_head(&u->peer_wait); @@ -1326,11 +1395,12 @@ static void unix_state_double_lock(struct sock *sk1, struct sock *sk2) unix_state_lock(sk1); return; } + if (sk1 > sk2) swap(sk1, sk2); unix_state_lock(sk1); - unix_state_lock_nested(sk2, U_LOCK_SECOND); + unix_state_lock(sk2); } static void unix_state_double_unlock(struct sock *sk1, struct sock *sk2) @@ -1473,6 +1543,7 @@ static int unix_stream_connect(struct socket *sock, struct sockaddr *uaddr, struct unix_sock *u = unix_sk(sk), *newu, *otheru; struct net *net = sock_net(sk); struct sk_buff *skb = NULL; + unsigned char state; long timeo; int err; @@ -1523,7 +1594,6 @@ restart: goto out; } - /* Latch state of peer */ unix_state_lock(other); /* Apparently VFS overslept socket death. Retry. */ @@ -1553,37 +1623,21 @@ restart: goto restart; } - /* Latch our state. - - It is tricky place. We need to grab our state lock and cannot - drop lock on peer. It is dangerous because deadlock is - possible. Connect to self case and simultaneous - attempt to connect are eliminated by checking socket - state. other is TCP_LISTEN, if sk is TCP_LISTEN we - check this before attempt to grab lock. - - Well, and we have to recheck the state after socket locked. + /* self connect and simultaneous connect are eliminated + * by rejecting TCP_LISTEN socket to avoid deadlock. */ - switch (READ_ONCE(sk->sk_state)) { - case TCP_CLOSE: - /* This is ok... continue with connect */ - break; - case TCP_ESTABLISHED: - /* Socket is already connected */ - err = -EISCONN; - goto out_unlock; - default: - err = -EINVAL; + state = READ_ONCE(sk->sk_state); + if (unlikely(state != TCP_CLOSE)) { + err = state == TCP_ESTABLISHED ? -EISCONN : -EINVAL; goto out_unlock; } - unix_state_lock_nested(sk, U_LOCK_SECOND); + unix_state_lock(sk); - if (sk->sk_state != TCP_CLOSE) { + if (unlikely(sk->sk_state != TCP_CLOSE)) { + err = sk->sk_state == TCP_ESTABLISHED ? -EISCONN : -EINVAL; unix_state_unlock(sk); - unix_state_unlock(other); - sock_put(other); - goto restart; + goto out_unlock; } err = security_unix_stream_connect(sk, other, newsk); @@ -2717,9 +2771,8 @@ static int unix_stream_read_generic(struct unix_stream_read_state *state, skip = max(sk_peek_offset(sk, flags), 0); do { - int chunk; - bool drop_skb; struct sk_buff *skb, *last; + int chunk; redo: unix_state_lock(sk); @@ -2815,11 +2868,7 @@ unlock: } chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size); - skb_get(skb); chunk = state->recv_actor(skb, skip, chunk, state); - drop_skb = !unix_skb_len(skb); - /* skb is only safe to use if !drop_skb */ - consume_skb(skb); if (chunk < 0) { if (copied == 0) copied = -EFAULT; @@ -2828,18 +2877,6 @@ unlock: copied += chunk; size -= chunk; - if (drop_skb) { - /* the skb was touched by a concurrent reader; - * we should not expect anything from this skb - * anymore and assume it invalid - we can be - * sure it was dropped from the socket queue - * - * let's report a short read - */ - err = 0; - break; - } - /* Mark read part of skb as used */ if (!(flags & MSG_PEEK)) { UNIXCB(skb).consumed += chunk; @@ -3620,6 +3657,7 @@ static int __net_init unix_net_init(struct net *net) for (i = 0; i < UNIX_HASH_SIZE; i++) { spin_lock_init(&net->unx.table.locks[i]); + lock_set_cmp_fn(&net->unx.table.locks[i], unix_table_lock_cmp_fn, NULL); INIT_HLIST_HEAD(&net->unx.table.buckets[i]); } diff --git a/net/unix/diag.c b/net/unix/diag.c index 937edf4afed4..9138af8b465e 100644 --- a/net/unix/diag.c +++ b/net/unix/diag.c @@ -47,9 +47,7 @@ static int sk_diag_dump_peer(struct sock *sk, struct sk_buff *nlskb) peer = unix_peer_get(sk); if (peer) { - unix_state_lock(peer); ino = sock_i_ino(peer); - unix_state_unlock(peer); sock_put(peer); return nla_put_u32(nlskb, UNIX_DIAG_PEER, ino); @@ -75,20 +73,9 @@ static int sk_diag_dump_icons(struct sock *sk, struct sk_buff *nlskb) buf = nla_data(attr); i = 0; - skb_queue_walk(&sk->sk_receive_queue, skb) { - struct sock *req, *peer; + skb_queue_walk(&sk->sk_receive_queue, skb) + buf[i++] = sock_i_ino(unix_peer(skb->sk)); - req = skb->sk; - /* - * The state lock is outer for the same sk's - * queue lock. With the other's queue locked it's - * OK to lock the state. - */ - unix_state_lock_nested(req, U_LOCK_DIAG); - peer = unix_sk(req)->peer; - buf[i++] = (peer ? sock_i_ino(peer) : 0); - unix_state_unlock(req); - } spin_unlock(&sk->sk_receive_queue.lock); } @@ -180,22 +167,6 @@ out_nlmsg_trim: return -EMSGSIZE; } -static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req, - struct user_namespace *user_ns, - u32 portid, u32 seq, u32 flags) -{ - int sk_ino; - - unix_state_lock(sk); - sk_ino = sock_i_ino(sk); - unix_state_unlock(sk); - - if (!sk_ino) - return 0; - - return sk_diag_fill(sk, skb, req, user_ns, portid, seq, flags, sk_ino); -} - static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); @@ -213,14 +184,22 @@ static int unix_diag_dump(struct sk_buff *skb, struct netlink_callback *cb) num = 0; spin_lock(&net->unx.table.locks[slot]); sk_for_each(sk, &net->unx.table.buckets[slot]) { + int sk_ino; + if (num < s_num) goto next; + if (!(req->udiag_states & (1 << READ_ONCE(sk->sk_state)))) goto next; - if (sk_diag_dump(sk, skb, req, sk_user_ns(skb->sk), + + sk_ino = sock_i_ino(sk); + if (!sk_ino) + goto next; + + if (sk_diag_fill(sk, skb, req, sk_user_ns(skb->sk), NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, - NLM_F_MULTI) < 0) { + NLM_F_MULTI, sk_ino) < 0) { spin_unlock(&net->unx.table.locks[slot]); goto done; } diff --git a/net/unix/garbage.c b/net/unix/garbage.c index 23efb78fe9ef..06d94ad999e9 100644 --- a/net/unix/garbage.c +++ b/net/unix/garbage.c @@ -337,11 +337,6 @@ static bool unix_vertex_dead(struct unix_vertex *vertex) return true; } -enum unix_recv_queue_lock_class { - U_RECVQ_LOCK_NORMAL, - U_RECVQ_LOCK_EMBRYO, -}; - static void unix_collect_queue(struct unix_sock *u, struct sk_buff_head *hitlist) { skb_queue_splice_init(&u->sk.sk_receive_queue, hitlist); @@ -375,8 +370,7 @@ static void unix_collect_skb(struct list_head *scc, struct sk_buff_head *hitlist skb_queue_walk(queue, skb) { struct sk_buff_head *embryo_queue = &skb->sk->sk_receive_queue; - /* listener -> embryo order, the inversion never happens. */ - spin_lock_nested(&embryo_queue->lock, U_RECVQ_LOCK_EMBRYO); + spin_lock(&embryo_queue->lock); unix_collect_queue(unix_sk(skb->sk), hitlist); spin_unlock(&embryo_queue->lock); } diff --git a/net/wireless/chan.c b/net/wireless/chan.c index 3414b2c3abcc..e579d7e1425f 100644 --- a/net/wireless/chan.c +++ b/net/wireless/chan.c @@ -263,6 +263,37 @@ static int cfg80211_chandef_get_width(const struct cfg80211_chan_def *c) return nl80211_chan_width_to_mhz(c->width); } +static bool cfg80211_valid_center_freq(u32 center, + enum nl80211_chan_width width) +{ + int bw; + int step; + + /* We only do strict verification on 6 GHz */ + if (center < 5955 || center > 7115) + return true; + + bw = nl80211_chan_width_to_mhz(width); + if (bw < 0) + return false; + + /* Validate that the channels bw is entirely within the 6 GHz band */ + if (center - bw / 2 < 5945 || center + bw / 2 > 7125) + return false; + + /* With 320 MHz the permitted channels overlap */ + if (bw == 320) + step = 160; + else + step = bw; + + /* + * Valid channels are packed from lowest frequency towards higher ones. + * So test that the lower frequency alignes with one of these steps. + */ + return (center - bw / 2 - 5945) % step == 0; +} + bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef) { u32 control_freq, oper_freq; @@ -374,6 +405,13 @@ bool cfg80211_chandef_valid(const struct cfg80211_chan_def *chandef) return false; } + if (!cfg80211_valid_center_freq(chandef->center_freq1, chandef->width)) + return false; + + if (chandef->width == NL80211_CHAN_WIDTH_80P80 && + !cfg80211_valid_center_freq(chandef->center_freq2, chandef->width)) + return false; + /* channel 14 is only for IEEE 802.11b */ if (chandef->center_freq1 == 2484 && chandef->width != NL80211_CHAN_WIDTH_20_NOHT) @@ -1145,7 +1183,8 @@ EXPORT_SYMBOL(cfg80211_chandef_dfs_cac_time); static bool cfg80211_secondary_chans_ok(struct wiphy *wiphy, u32 center_freq, u32 bandwidth, - u32 prohibited_flags, bool monitor) + u32 prohibited_flags, + u32 permitting_flags) { struct ieee80211_channel *c; u32 freq, start_freq, end_freq; @@ -1157,7 +1196,7 @@ static bool cfg80211_secondary_chans_ok(struct wiphy *wiphy, c = ieee80211_get_channel_khz(wiphy, freq); if (!c) return false; - if (monitor && c->flags & IEEE80211_CHAN_CAN_MONITOR) + if (c->flags & permitting_flags) continue; if (c->flags & prohibited_flags) return false; @@ -1221,7 +1260,8 @@ static bool cfg80211_edmg_usable(struct wiphy *wiphy, u8 edmg_channels, bool _cfg80211_chandef_usable(struct wiphy *wiphy, const struct cfg80211_chan_def *chandef, - u32 prohibited_flags, bool monitor) + u32 prohibited_flags, + u32 permitting_flags) { struct ieee80211_sta_ht_cap *ht_cap; struct ieee80211_sta_vht_cap *vht_cap; @@ -1383,22 +1423,23 @@ bool _cfg80211_chandef_usable(struct wiphy *wiphy, if (!cfg80211_secondary_chans_ok(wiphy, ieee80211_chandef_to_khz(chandef), - width, prohibited_flags, monitor)) + width, prohibited_flags, + permitting_flags)) return false; if (!chandef->center_freq2) return true; return cfg80211_secondary_chans_ok(wiphy, MHZ_TO_KHZ(chandef->center_freq2), - width, prohibited_flags, monitor); + width, prohibited_flags, + permitting_flags); } bool cfg80211_chandef_usable(struct wiphy *wiphy, const struct cfg80211_chan_def *chandef, u32 prohibited_flags) { - return _cfg80211_chandef_usable(wiphy, chandef, prohibited_flags, - false); + return _cfg80211_chandef_usable(wiphy, chandef, prohibited_flags, 0); } EXPORT_SYMBOL(cfg80211_chandef_usable); @@ -1520,49 +1561,50 @@ static bool cfg80211_ir_permissive_chan(struct wiphy *wiphy, static bool _cfg80211_reg_can_beacon(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, enum nl80211_iftype iftype, - bool check_no_ir) + u32 prohibited_flags, + u32 permitting_flags) { - bool res; - u32 prohibited_flags = IEEE80211_CHAN_DISABLED; + bool res, check_radar; int dfs_required; - trace_cfg80211_reg_can_beacon(wiphy, chandef, iftype, check_no_ir); + trace_cfg80211_reg_can_beacon(wiphy, chandef, iftype, + prohibited_flags, + permitting_flags); - if (check_no_ir) - prohibited_flags |= IEEE80211_CHAN_NO_IR; + if (!_cfg80211_chandef_usable(wiphy, chandef, + IEEE80211_CHAN_DISABLED, 0)) + return false; dfs_required = cfg80211_chandef_dfs_required(wiphy, chandef, iftype); - if (dfs_required != 0) - prohibited_flags |= IEEE80211_CHAN_RADAR; + check_radar = dfs_required != 0; if (dfs_required > 0 && cfg80211_chandef_dfs_available(wiphy, chandef)) { /* We can skip IEEE80211_CHAN_NO_IR if chandef dfs available */ - prohibited_flags = IEEE80211_CHAN_DISABLED; + prohibited_flags &= ~IEEE80211_CHAN_NO_IR; + check_radar = false; } - res = cfg80211_chandef_usable(wiphy, chandef, prohibited_flags); + if (check_radar && + !_cfg80211_chandef_usable(wiphy, chandef, + IEEE80211_CHAN_RADAR, 0)) + return false; + + res = _cfg80211_chandef_usable(wiphy, chandef, + prohibited_flags, + permitting_flags); trace_cfg80211_return_bool(res); return res; } -bool cfg80211_reg_can_beacon(struct wiphy *wiphy, - struct cfg80211_chan_def *chandef, - enum nl80211_iftype iftype) -{ - return _cfg80211_reg_can_beacon(wiphy, chandef, iftype, true); -} -EXPORT_SYMBOL(cfg80211_reg_can_beacon); - -bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy, - struct cfg80211_chan_def *chandef, - enum nl80211_iftype iftype) +bool cfg80211_reg_check_beaconing(struct wiphy *wiphy, + struct cfg80211_chan_def *chandef, + struct cfg80211_beaconing_check_config *cfg) { struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); - bool check_no_ir; - - lockdep_assert_held(&rdev->wiphy.mtx); + u32 permitting_flags = 0; + bool check_no_ir = true; /* * Under certain conditions suggested by some regulatory bodies a @@ -1570,12 +1612,20 @@ bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy, * only if such relaxations are not enabled and the conditions are not * met. */ - check_no_ir = !cfg80211_ir_permissive_chan(wiphy, iftype, - chandef->chan); + if (cfg->relax) { + lockdep_assert_held(&rdev->wiphy.mtx); + check_no_ir = !cfg80211_ir_permissive_chan(wiphy, cfg->iftype, + chandef->chan); + } - return _cfg80211_reg_can_beacon(wiphy, chandef, iftype, check_no_ir); + if (cfg->reg_power == IEEE80211_REG_VLP_AP) + permitting_flags |= IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP; + + return _cfg80211_reg_can_beacon(wiphy, chandef, cfg->iftype, + check_no_ir ? IEEE80211_CHAN_NO_IR : 0, + permitting_flags); } -EXPORT_SYMBOL(cfg80211_reg_can_beacon_relax); +EXPORT_SYMBOL(cfg80211_reg_check_beaconing); int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev, struct cfg80211_chan_def *chandef) diff --git a/net/wireless/core.c b/net/wireless/core.c index 4b1f45e3070e..4d5d351bd0b5 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -421,6 +421,8 @@ static void cfg80211_wiphy_work(struct work_struct *work) rdev = container_of(work, struct cfg80211_registered_device, wiphy_work); + trace_wiphy_work_worker_start(&rdev->wiphy); + wiphy_lock(&rdev->wiphy); if (rdev->suspended) goto out; @@ -434,6 +436,7 @@ static void cfg80211_wiphy_work(struct work_struct *work) queue_work(system_unbound_wq, work); spin_unlock_irq(&rdev->wiphy_work_lock); + trace_wiphy_work_run(&rdev->wiphy, wk); wk->func(&rdev->wiphy, wk); } else { spin_unlock_irq(&rdev->wiphy_work_lock); @@ -1066,6 +1069,7 @@ void cfg80211_process_wiphy_works(struct cfg80211_registered_device *rdev, list_del_init(&wk->entry); spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags); + trace_wiphy_work_run(&rdev->wiphy, wk); wk->func(&rdev->wiphy, wk); spin_lock_irqsave(&rdev->wiphy_work_lock, flags); @@ -1141,7 +1145,8 @@ void wiphy_unregister(struct wiphy *wiphy) flush_work(&rdev->background_cac_abort_wk); cfg80211_rdev_free_wowlan(rdev); - cfg80211_rdev_free_coalesce(rdev); + cfg80211_free_coalesce(rdev->coalesce); + rdev->coalesce = NULL; } EXPORT_SYMBOL(wiphy_unregister); @@ -1610,6 +1615,8 @@ void wiphy_work_queue(struct wiphy *wiphy, struct wiphy_work *work) struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); unsigned long flags; + trace_wiphy_work_queue(wiphy, work); + spin_lock_irqsave(&rdev->wiphy_work_lock, flags); if (list_empty(&work->entry)) list_add_tail(&work->entry, &rdev->wiphy_work_list); @@ -1626,6 +1633,8 @@ void wiphy_work_cancel(struct wiphy *wiphy, struct wiphy_work *work) lockdep_assert_held(&wiphy->mtx); + trace_wiphy_work_cancel(wiphy, work); + spin_lock_irqsave(&rdev->wiphy_work_lock, flags); if (!list_empty(&work->entry)) list_del_init(&work->entry); @@ -1639,6 +1648,8 @@ void wiphy_work_flush(struct wiphy *wiphy, struct wiphy_work *work) unsigned long flags; bool run; + trace_wiphy_work_flush(wiphy, work); + spin_lock_irqsave(&rdev->wiphy_work_lock, flags); run = !work || !list_empty(&work->entry); spin_unlock_irqrestore(&rdev->wiphy_work_lock, flags); @@ -1660,6 +1671,8 @@ void wiphy_delayed_work_queue(struct wiphy *wiphy, struct wiphy_delayed_work *dwork, unsigned long delay) { + trace_wiphy_delayed_work_queue(wiphy, &dwork->work, delay); + if (!delay) { del_timer(&dwork->timer); wiphy_work_queue(wiphy, &dwork->work); diff --git a/net/wireless/core.h b/net/wireless/core.h index 118f2f619828..41c8c0e3ba2e 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -494,7 +494,8 @@ bool cfg80211_wdev_on_sub_chan(struct wireless_dev *wdev, bool primary_only); bool _cfg80211_chandef_usable(struct wiphy *wiphy, const struct cfg80211_chan_def *chandef, - u32 prohibited_flags, bool monitor); + u32 prohibited_flags, + u32 permitting_flags); static inline unsigned int elapsed_jiffies_msecs(unsigned long start) { @@ -532,6 +533,10 @@ struct cfg80211_internal_bss * cfg80211_bss_update(struct cfg80211_registered_device *rdev, struct cfg80211_internal_bss *tmp, bool signal_valid, unsigned long ts); + +enum ieee80211_ap_reg_power +cfg80211_get_6ghz_power_type(const u8 *elems, size_t elems_len); + #ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS #define CFG80211_DEV_WARN_ON(cond) WARN_ON(cond) #else diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c index 9f02ee5f08be..34e5acff3935 100644 --- a/net/wireless/ibss.c +++ b/net/wireless/ibss.c @@ -3,7 +3,7 @@ * Some IBSS support code for cfg80211. * * Copyright 2009 Johannes Berg - * Copyright (C) 2020-2023 Intel Corporation + * Copyright (C) 2020-2024 Intel Corporation */ #include @@ -94,6 +94,9 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev, lockdep_assert_held(&rdev->wiphy.mtx); + if (wdev->cac_started) + return -EBUSY; + if (wdev->u.ibss.ssid_len) return -EALREADY; diff --git a/net/wireless/mesh.c b/net/wireless/mesh.c index 83306979fbe2..aaca65b66af4 100644 --- a/net/wireless/mesh.c +++ b/net/wireless/mesh.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* * Portions - * Copyright (C) 2022-2023 Intel Corporation + * Copyright (C) 2022-2024 Intel Corporation */ #include #include @@ -127,6 +127,9 @@ int __cfg80211_join_mesh(struct cfg80211_registered_device *rdev, if (!rdev->ops->join_mesh) return -EOPNOTSUPP; + if (wdev->cac_started) + return -EBUSY; + if (!setup->chandef.chan) { /* if no channel explicitly given, use preset channel */ setup->chandef = wdev->u.mesh.preset_chandef; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 72c7bf558581..7397a372c78e 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -315,8 +315,7 @@ nl80211_pmsr_ftm_req_attr_policy[NL80211_PMSR_FTM_REQ_ATTR_MAX + 1] = { [NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD] = { .type = NLA_U16 }, [NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION] = NLA_POLICY_MAX(NLA_U8, 15), - [NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST] = - NLA_POLICY_MAX(NLA_U8, 31), + [NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST] = { .type = NLA_U8 }, [NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES] = { .type = NLA_U8 }, [NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI] = { .type = NLA_FLAG }, [NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC] = { .type = NLA_FLAG }, @@ -1208,6 +1207,12 @@ static int nl80211_msg_put_channel(struct sk_buff *msg, struct wiphy *wiphy, if ((chan->flags & IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT) && nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_6GHZ_AFC_CLIENT)) goto nla_put_failure; + if ((chan->flags & IEEE80211_CHAN_CAN_MONITOR) && + nla_put_flag(msg, NL80211_FREQUENCY_ATTR_CAN_MONITOR)) + goto nla_put_failure; + if ((chan->flags & IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP) && + nla_put_flag(msg, NL80211_FREQUENCY_ATTR_ALLOW_6GHZ_VLP_AP)) + goto nla_put_failure; } if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER, @@ -1626,71 +1631,87 @@ nla_put_failure: return -ENOBUFS; } +static int nl80211_put_ifcomb_data(struct sk_buff *msg, bool large, int idx, + const struct ieee80211_iface_combination *c, + u16 nested) +{ + struct nlattr *nl_combi, *nl_limits; + int i; + + nl_combi = nla_nest_start_noflag(msg, idx | nested); + if (!nl_combi) + goto nla_put_failure; + + nl_limits = nla_nest_start_noflag(msg, NL80211_IFACE_COMB_LIMITS | + nested); + if (!nl_limits) + goto nla_put_failure; + + for (i = 0; i < c->n_limits; i++) { + struct nlattr *nl_limit; + + nl_limit = nla_nest_start_noflag(msg, i + 1); + if (!nl_limit) + goto nla_put_failure; + if (nla_put_u32(msg, NL80211_IFACE_LIMIT_MAX, c->limits[i].max)) + goto nla_put_failure; + if (nl80211_put_iftypes(msg, NL80211_IFACE_LIMIT_TYPES, + c->limits[i].types)) + goto nla_put_failure; + nla_nest_end(msg, nl_limit); + } + + nla_nest_end(msg, nl_limits); + + if (c->beacon_int_infra_match && + nla_put_flag(msg, NL80211_IFACE_COMB_STA_AP_BI_MATCH)) + goto nla_put_failure; + if (nla_put_u32(msg, NL80211_IFACE_COMB_NUM_CHANNELS, + c->num_different_channels) || + nla_put_u32(msg, NL80211_IFACE_COMB_MAXNUM, + c->max_interfaces)) + goto nla_put_failure; + if (large && + (nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS, + c->radar_detect_widths) || + nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_REGIONS, + c->radar_detect_regions))) + goto nla_put_failure; + if (c->beacon_int_min_gcd && + nla_put_u32(msg, NL80211_IFACE_COMB_BI_MIN_GCD, + c->beacon_int_min_gcd)) + goto nla_put_failure; + + nla_nest_end(msg, nl_combi); + + return 0; +nla_put_failure: + return -ENOBUFS; +} + static int nl80211_put_iface_combinations(struct wiphy *wiphy, struct sk_buff *msg, - bool large) + int attr, int radio, + bool large, u16 nested) { + const struct ieee80211_iface_combination *c; struct nlattr *nl_combis; - int i, j; + int i, n; - nl_combis = nla_nest_start_noflag(msg, - NL80211_ATTR_INTERFACE_COMBINATIONS); + nl_combis = nla_nest_start_noflag(msg, attr | nested); if (!nl_combis) goto nla_put_failure; - for (i = 0; i < wiphy->n_iface_combinations; i++) { - const struct ieee80211_iface_combination *c; - struct nlattr *nl_combi, *nl_limits; - - c = &wiphy->iface_combinations[i]; - - nl_combi = nla_nest_start_noflag(msg, i + 1); - if (!nl_combi) - goto nla_put_failure; - - nl_limits = nla_nest_start_noflag(msg, - NL80211_IFACE_COMB_LIMITS); - if (!nl_limits) - goto nla_put_failure; - - for (j = 0; j < c->n_limits; j++) { - struct nlattr *nl_limit; - - nl_limit = nla_nest_start_noflag(msg, j + 1); - if (!nl_limit) - goto nla_put_failure; - if (nla_put_u32(msg, NL80211_IFACE_LIMIT_MAX, - c->limits[j].max)) - goto nla_put_failure; - if (nl80211_put_iftypes(msg, NL80211_IFACE_LIMIT_TYPES, - c->limits[j].types)) - goto nla_put_failure; - nla_nest_end(msg, nl_limit); - } - - nla_nest_end(msg, nl_limits); - - if (c->beacon_int_infra_match && - nla_put_flag(msg, NL80211_IFACE_COMB_STA_AP_BI_MATCH)) - goto nla_put_failure; - if (nla_put_u32(msg, NL80211_IFACE_COMB_NUM_CHANNELS, - c->num_different_channels) || - nla_put_u32(msg, NL80211_IFACE_COMB_MAXNUM, - c->max_interfaces)) - goto nla_put_failure; - if (large && - (nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_WIDTHS, - c->radar_detect_widths) || - nla_put_u32(msg, NL80211_IFACE_COMB_RADAR_DETECT_REGIONS, - c->radar_detect_regions))) - goto nla_put_failure; - if (c->beacon_int_min_gcd && - nla_put_u32(msg, NL80211_IFACE_COMB_BI_MIN_GCD, - c->beacon_int_min_gcd)) - goto nla_put_failure; - - nla_nest_end(msg, nl_combi); + if (radio >= 0) { + c = wiphy->radio[0].iface_combinations; + n = wiphy->radio[0].n_iface_combinations; + } else { + c = wiphy->iface_combinations; + n = wiphy->n_iface_combinations; } + for (i = 0; i < n; i++) + if (nl80211_put_ifcomb_data(msg, large, i + 1, &c[i], nested)) + goto nla_put_failure; nla_nest_end(msg, nl_combis); @@ -2396,6 +2417,80 @@ fail: return -ENOBUFS; } +static int nl80211_put_radio(struct wiphy *wiphy, struct sk_buff *msg, int idx) +{ + const struct wiphy_radio *r = &wiphy->radio[idx]; + struct nlattr *radio, *freq; + int i; + + radio = nla_nest_start(msg, idx); + if (!radio) + return -ENOBUFS; + + if (nla_put_u32(msg, NL80211_WIPHY_RADIO_ATTR_INDEX, idx)) + goto nla_put_failure; + + for (i = 0; i < r->n_freq_range; i++) { + const struct wiphy_radio_freq_range *range = &r->freq_range[i]; + + freq = nla_nest_start(msg, NL80211_WIPHY_RADIO_ATTR_FREQ_RANGE); + if (!freq) + goto nla_put_failure; + + if (nla_put_u32(msg, NL80211_WIPHY_RADIO_FREQ_ATTR_START, + range->start_freq) || + nla_put_u32(msg, NL80211_WIPHY_RADIO_FREQ_ATTR_END, + range->end_freq)) + goto nla_put_failure; + + nla_nest_end(msg, freq); + } + + for (i = 0; i < r->n_iface_combinations; i++) + if (nl80211_put_ifcomb_data(msg, true, + NL80211_WIPHY_RADIO_ATTR_INTERFACE_COMBINATION, + &r->iface_combinations[i], + NLA_F_NESTED)) + goto nla_put_failure; + + nla_nest_end(msg, radio); + + return 0; + +nla_put_failure: + return -ENOBUFS; +} + +static int nl80211_put_radios(struct wiphy *wiphy, struct sk_buff *msg) +{ + struct nlattr *radios; + int i; + + if (!wiphy->n_radio) + return 0; + + radios = nla_nest_start(msg, NL80211_ATTR_WIPHY_RADIOS); + if (!radios) + return -ENOBUFS; + + for (i = 0; i < wiphy->n_radio; i++) + if (nl80211_put_radio(wiphy, msg, i)) + goto fail; + + nla_nest_end(msg, radios); + + if (nl80211_put_iface_combinations(wiphy, msg, + NL80211_ATTR_WIPHY_INTERFACE_COMBINATIONS, + -1, true, NLA_F_NESTED)) + return -ENOBUFS; + + return 0; + +fail: + nla_nest_cancel(msg, radios); + return -ENOBUFS; +} + struct nl80211_dump_wiphy_state { s64 filter_wiphy; long start; @@ -2691,7 +2786,9 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, goto nla_put_failure; if (nl80211_put_iface_combinations(&rdev->wiphy, msg, - state->split)) + NL80211_ATTR_INTERFACE_COMBINATIONS, + rdev->wiphy.n_radio ? 0 : -1, + state->split, 0)) goto nla_put_failure; state->split_start++; @@ -3005,6 +3102,12 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev, rdev->wiphy.hw_timestamp_max_peers)) goto nla_put_failure; + state->split_start++; + break; + case 17: + if (nl80211_put_radios(&rdev->wiphy, msg)) + goto nla_put_failure; + /* done */ state->split_start = 0; break; @@ -3348,7 +3451,7 @@ static int _nl80211_parse_chandef(struct cfg80211_registered_device *rdev, if (!_cfg80211_chandef_usable(&rdev->wiphy, chandef, IEEE80211_CHAN_DISABLED, - monitor)) { + monitor ? IEEE80211_CHAN_CAN_MONITOR : 0)) { NL_SET_ERR_MSG(extack, "(extension) channel is disabled"); return -EINVAL; } @@ -3419,6 +3522,33 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev, if (chandef.chan != cur_chan) return -EBUSY; + /* only allow this for regular channel widths */ + switch (wdev->links[link_id].ap.chandef.width) { + case NL80211_CHAN_WIDTH_20_NOHT: + case NL80211_CHAN_WIDTH_20: + case NL80211_CHAN_WIDTH_40: + case NL80211_CHAN_WIDTH_80: + case NL80211_CHAN_WIDTH_80P80: + case NL80211_CHAN_WIDTH_160: + case NL80211_CHAN_WIDTH_320: + break; + default: + return -EINVAL; + } + + switch (chandef.width) { + case NL80211_CHAN_WIDTH_20_NOHT: + case NL80211_CHAN_WIDTH_20: + case NL80211_CHAN_WIDTH_40: + case NL80211_CHAN_WIDTH_80: + case NL80211_CHAN_WIDTH_80P80: + case NL80211_CHAN_WIDTH_160: + case NL80211_CHAN_WIDTH_320: + break; + default: + return -EINVAL; + } + result = rdev_set_ap_chanwidth(rdev, dev, link_id, &chandef); if (result) @@ -4455,10 +4585,7 @@ static void get_key_callback(void *c, struct key_params *params) struct nlattr *key; struct get_key_cookie *cookie = c; - if ((params->key && - nla_put(cookie->msg, NL80211_ATTR_KEY_DATA, - params->key_len, params->key)) || - (params->seq && + if ((params->seq && nla_put(cookie->msg, NL80211_ATTR_KEY_SEQ, params->seq_len, params->seq)) || (params->cipher && @@ -4470,10 +4597,7 @@ static void get_key_callback(void *c, struct key_params *params) if (!key) goto nla_put_failure; - if ((params->key && - nla_put(cookie->msg, NL80211_KEY_DATA, - params->key_len, params->key)) || - (params->seq && + if ((params->seq && nla_put(cookie->msg, NL80211_KEY_SEQ, params->seq_len, params->seq)) || (params->cipher && @@ -5928,6 +6052,7 @@ static int nl80211_validate_ap_phy_operation(struct cfg80211_ap_settings *params static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; + struct cfg80211_beaconing_check_config beacon_check = {}; unsigned int link_id = nl80211_link_id(info->attrs); struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; @@ -5941,6 +6066,9 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) if (!rdev->ops->start_ap) return -EOPNOTSUPP; + if (wdev->cac_started) + return -EBUSY; + if (wdev->links[link_id].ap.beacon_interval) return -EALREADY; @@ -6074,8 +6202,13 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info) goto out; } - if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, ¶ms->chandef, - wdev->iftype)) { + beacon_check.iftype = wdev->iftype; + beacon_check.relax = true; + beacon_check.reg_power = + cfg80211_get_6ghz_power_type(params->beacon.tail, + params->beacon.tail_len); + if (!cfg80211_reg_check_beaconing(&rdev->wiphy, ¶ms->chandef, + &beacon_check)) { err = -EINVAL; goto out; } @@ -6232,6 +6365,7 @@ out: static int nl80211_set_beacon(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; + struct cfg80211_beaconing_check_config beacon_check = {}; unsigned int link_id = nl80211_link_id(info->attrs); struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; @@ -6258,6 +6392,19 @@ static int nl80211_set_beacon(struct sk_buff *skb, struct genl_info *info) if (err) goto out; + /* recheck beaconing is permitted with possibly changed power type */ + beacon_check.iftype = wdev->iftype; + beacon_check.relax = true; + beacon_check.reg_power = + cfg80211_get_6ghz_power_type(params->beacon.tail, + params->beacon.tail_len); + if (!cfg80211_reg_check_beaconing(&rdev->wiphy, + &wdev->links[link_id].ap.chandef, + &beacon_check)) { + err = -EINVAL; + goto out; + } + attr = info->attrs[NL80211_ATTR_FILS_DISCOVERY]; if (attr) { err = nl80211_parse_fils_discovery(rdev, attr, @@ -9933,6 +10080,17 @@ static int nl80211_start_radar_detection(struct sk_buff *skb, flush_delayed_work(&rdev->dfs_update_channels_wk); + switch (wdev->iftype) { + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_P2P_GO: + case NL80211_IFTYPE_MESH_POINT: + case NL80211_IFTYPE_ADHOC: + break; + default: + /* caution - see cfg80211_beaconing_iface_active() below */ + return -EINVAL; + } + wiphy_lock(wiphy); dfs_region = reg_get_dfs_region(wiphy); @@ -9963,12 +10121,7 @@ static int nl80211_start_radar_detection(struct sk_buff *skb, goto unlock; } - if (netif_carrier_ok(dev)) { - err = -EBUSY; - goto unlock; - } - - if (wdev->cac_started) { + if (cfg80211_beaconing_iface_active(wdev) || wdev->cac_started) { err = -EBUSY; goto unlock; } @@ -13865,9 +14018,8 @@ nla_put_failure: return -ENOBUFS; } -void cfg80211_rdev_free_coalesce(struct cfg80211_registered_device *rdev) +void cfg80211_free_coalesce(struct cfg80211_coalesce *coalesce) { - struct cfg80211_coalesce *coalesce = rdev->coalesce; int i, j; struct cfg80211_coalesce_rules *rule; @@ -13876,13 +14028,13 @@ void cfg80211_rdev_free_coalesce(struct cfg80211_registered_device *rdev) for (i = 0; i < coalesce->n_rules; i++) { rule = &coalesce->rules[i]; + if (!rule) + continue; for (j = 0; j < rule->n_patterns; j++) kfree(rule->patterns[j].mask); kfree(rule->patterns); } - kfree(coalesce->rules); kfree(coalesce); - rdev->coalesce = NULL; } static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev, @@ -13980,17 +14132,16 @@ static int nl80211_set_coalesce(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; const struct wiphy_coalesce_support *coalesce = rdev->wiphy.coalesce; - struct cfg80211_coalesce new_coalesce = {}; - struct cfg80211_coalesce *n_coalesce; - int err, rem_rule, n_rules = 0, i, j; + struct cfg80211_coalesce *new_coalesce; + int err, rem_rule, n_rules = 0, i; struct nlattr *rule; - struct cfg80211_coalesce_rules *tmp_rule; if (!rdev->wiphy.coalesce || !rdev->ops->set_coalesce) return -EOPNOTSUPP; if (!info->attrs[NL80211_ATTR_COALESCE_RULE]) { - cfg80211_rdev_free_coalesce(rdev); + cfg80211_free_coalesce(rdev->coalesce); + rdev->coalesce = NULL; rdev_set_coalesce(rdev, NULL); return 0; } @@ -14001,47 +14152,34 @@ static int nl80211_set_coalesce(struct sk_buff *skb, struct genl_info *info) if (n_rules > coalesce->n_rules) return -EINVAL; - new_coalesce.rules = kcalloc(n_rules, sizeof(new_coalesce.rules[0]), - GFP_KERNEL); - if (!new_coalesce.rules) + new_coalesce = kzalloc(struct_size(new_coalesce, rules, n_rules), + GFP_KERNEL); + if (!new_coalesce) return -ENOMEM; - new_coalesce.n_rules = n_rules; + new_coalesce->n_rules = n_rules; i = 0; nla_for_each_nested(rule, info->attrs[NL80211_ATTR_COALESCE_RULE], rem_rule) { err = nl80211_parse_coalesce_rule(rdev, rule, - &new_coalesce.rules[i]); + &new_coalesce->rules[i]); if (err) goto error; i++; } - err = rdev_set_coalesce(rdev, &new_coalesce); + err = rdev_set_coalesce(rdev, new_coalesce); if (err) goto error; - n_coalesce = kmemdup(&new_coalesce, sizeof(new_coalesce), GFP_KERNEL); - if (!n_coalesce) { - err = -ENOMEM; - goto error; - } - cfg80211_rdev_free_coalesce(rdev); - rdev->coalesce = n_coalesce; + cfg80211_free_coalesce(rdev->coalesce); + rdev->coalesce = new_coalesce; return 0; error: - for (i = 0; i < new_coalesce.n_rules; i++) { - tmp_rule = &new_coalesce.rules[i]; - if (!tmp_rule) - continue; - for (j = 0; j < tmp_rule->n_patterns; j++) - kfree(tmp_rule->patterns[j].mask); - kfree(tmp_rule->patterns); - } - kfree(new_coalesce.rules); + cfg80211_free_coalesce(new_coalesce); return err; } diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h index 6376f3a87f8a..ffaab9a92e5b 100644 --- a/net/wireless/nl80211.h +++ b/net/wireless/nl80211.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* * Portions of this file - * Copyright (C) 2018, 2020-2022 Intel Corporation + * Copyright (C) 2018, 2020-2024 Intel Corporation */ #ifndef __NET_WIRELESS_NL80211_H #define __NET_WIRELESS_NL80211_H @@ -119,7 +119,7 @@ nl80211_radar_notify(struct cfg80211_registered_device *rdev, void nl80211_send_ap_stopped(struct wireless_dev *wdev, unsigned int link_id); -void cfg80211_rdev_free_coalesce(struct cfg80211_registered_device *rdev); +void cfg80211_free_coalesce(struct cfg80211_coalesce *coalesce); /* peer measurement */ int nl80211_pmsr_start(struct sk_buff *skb, struct genl_info *info); diff --git a/net/wireless/pmsr.c b/net/wireless/pmsr.c index c569c37da317..0396fa19bdf1 100644 --- a/net/wireless/pmsr.c +++ b/net/wireless/pmsr.c @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * Copyright (C) 2018 - 2021, 2023 Intel Corporation + * Copyright (C) 2018 - 2021, 2023 - 2024 Intel Corporation */ #include #include "core.h" @@ -148,6 +148,14 @@ static int pmsr_parse_ftm(struct cfg80211_registered_device *rdev, return -EINVAL; } + if (out->ftm.ftms_per_burst > 31 && !out->ftm.non_trigger_based && + !out->ftm.trigger_based) { + NL_SET_ERR_MSG_ATTR(info->extack, + tb[NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST], + "FTM: FTMs per burst must be set lower than 31"); + return -ERANGE; + } + if ((out->ftm.trigger_based || out->ftm.non_trigger_based) && out->ftm.preamble != NL80211_PREAMBLE_HE) { NL_SET_ERR_MSG_ATTR(info->extack, diff --git a/net/wireless/rdev-ops.h b/net/wireless/rdev-ops.h index 755af47b88b9..ec3f4aa1c807 100644 --- a/net/wireless/rdev-ops.h +++ b/net/wireless/rdev-ops.h @@ -578,13 +578,11 @@ static inline int rdev_leave_ibss(struct cfg80211_registered_device *rdev, static inline int rdev_set_wiphy_params(struct cfg80211_registered_device *rdev, u32 changed) { - int ret; - - if (!rdev->ops->set_wiphy_params) - return -EOPNOTSUPP; + int ret = -EOPNOTSUPP; trace_rdev_set_wiphy_params(&rdev->wiphy, changed); - ret = rdev->ops->set_wiphy_params(&rdev->wiphy, changed); + if (rdev->ops->set_wiphy_params) + ret = rdev->ops->set_wiphy_params(&rdev->wiphy, changed); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } @@ -1425,13 +1423,11 @@ rdev_set_radar_background(struct cfg80211_registered_device *rdev, struct cfg80211_chan_def *chandef) { struct wiphy *wiphy = &rdev->wiphy; - int ret; - - if (!rdev->ops->set_radar_background) - return -EOPNOTSUPP; + int ret = -EOPNOTSUPP; trace_rdev_set_radar_background(wiphy, chandef); - ret = rdev->ops->set_radar_background(wiphy, chandef); + if (rdev->ops->set_radar_background) + ret = rdev->ops->set_radar_background(wiphy, chandef); trace_rdev_return_int(wiphy, ret); return ret; @@ -1468,13 +1464,11 @@ rdev_add_link_station(struct cfg80211_registered_device *rdev, struct net_device *dev, struct link_station_parameters *params) { - int ret; - - if (!rdev->ops->add_link_station) - return -EOPNOTSUPP; + int ret = -EOPNOTSUPP; trace_rdev_add_link_station(&rdev->wiphy, dev, params); - ret = rdev->ops->add_link_station(&rdev->wiphy, dev, params); + if (rdev->ops->add_link_station) + ret = rdev->ops->add_link_station(&rdev->wiphy, dev, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } @@ -1484,13 +1478,11 @@ rdev_mod_link_station(struct cfg80211_registered_device *rdev, struct net_device *dev, struct link_station_parameters *params) { - int ret; - - if (!rdev->ops->mod_link_station) - return -EOPNOTSUPP; + int ret = -EOPNOTSUPP; trace_rdev_mod_link_station(&rdev->wiphy, dev, params); - ret = rdev->ops->mod_link_station(&rdev->wiphy, dev, params); + if (rdev->ops->mod_link_station) + ret = rdev->ops->mod_link_station(&rdev->wiphy, dev, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } @@ -1500,13 +1492,11 @@ rdev_del_link_station(struct cfg80211_registered_device *rdev, struct net_device *dev, struct link_station_del_parameters *params) { - int ret; - - if (!rdev->ops->del_link_station) - return -EOPNOTSUPP; + int ret = -EOPNOTSUPP; trace_rdev_del_link_station(&rdev->wiphy, dev, params); - ret = rdev->ops->del_link_station(&rdev->wiphy, dev, params); + if (rdev->ops->del_link_station) + ret = rdev->ops->del_link_station(&rdev->wiphy, dev, params); trace_rdev_return_int(&rdev->wiphy, ret); return ret; } @@ -1517,13 +1507,11 @@ rdev_set_hw_timestamp(struct cfg80211_registered_device *rdev, struct cfg80211_set_hw_timestamp *hwts) { struct wiphy *wiphy = &rdev->wiphy; - int ret; - - if (!rdev->ops->set_hw_timestamp) - return -EOPNOTSUPP; + int ret = -EOPNOTSUPP; trace_rdev_set_hw_timestamp(wiphy, dev, hwts); - ret = rdev->ops->set_hw_timestamp(wiphy, dev, hwts); + if (rdev->ops->set_hw_timestamp) + ret = rdev->ops->set_hw_timestamp(wiphy, dev, hwts); trace_rdev_return_int(wiphy, ret); return ret; @@ -1535,15 +1523,25 @@ rdev_set_ttlm(struct cfg80211_registered_device *rdev, struct cfg80211_ttlm_params *params) { struct wiphy *wiphy = &rdev->wiphy; - int ret; - - if (!rdev->ops->set_ttlm) - return -EOPNOTSUPP; + int ret = -EOPNOTSUPP; trace_rdev_set_ttlm(wiphy, dev, params); - ret = rdev->ops->set_ttlm(wiphy, dev, params); + if (rdev->ops->set_ttlm) + ret = rdev->ops->set_ttlm(wiphy, dev, params); trace_rdev_return_int(wiphy, ret); return ret; } + +static inline u32 +rdev_get_radio_mask(struct cfg80211_registered_device *rdev, + struct net_device *dev) +{ + struct wiphy *wiphy = &rdev->wiphy; + + if (!rdev->ops->get_radio_mask) + return 0; + + return rdev->ops->get_radio_mask(wiphy, dev); +} #endif /* __CFG80211_RDEV_OPS */ diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 3cef0021a3db..4a27f3823e25 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -1600,6 +1600,8 @@ static u32 map_regdom_flags(u32 rd_flags) channel_flags |= IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT; if (rd_flags & NL80211_RRF_PSD) channel_flags |= IEEE80211_CHAN_PSD; + if (rd_flags & NL80211_RRF_ALLOW_6GHZ_VLP_AP) + channel_flags |= IEEE80211_CHAN_ALLOW_6GHZ_VLP_AP; return channel_flags; } diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 0222ede0feb6..d99319d82205 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -1604,7 +1604,7 @@ struct cfg80211_bss *__cfg80211_get_bss(struct wiphy *wiphy, } EXPORT_SYMBOL(__cfg80211_get_bss); -static void rb_insert_bss(struct cfg80211_registered_device *rdev, +static bool rb_insert_bss(struct cfg80211_registered_device *rdev, struct cfg80211_internal_bss *bss) { struct rb_node **p = &rdev->bss_tree.rb_node; @@ -1620,7 +1620,7 @@ static void rb_insert_bss(struct cfg80211_registered_device *rdev, if (WARN_ON(!cmp)) { /* will sort of leak this BSS */ - return; + return false; } if (cmp < 0) @@ -1631,6 +1631,7 @@ static void rb_insert_bss(struct cfg80211_registered_device *rdev, rb_link_node(&bss->rbn, parent, p); rb_insert_color(&bss->rbn, &rdev->bss_tree); + return true; } static struct cfg80211_internal_bss * @@ -1657,6 +1658,34 @@ rb_find_bss(struct cfg80211_registered_device *rdev, return NULL; } +static void cfg80211_insert_bss(struct cfg80211_registered_device *rdev, + struct cfg80211_internal_bss *bss) +{ + lockdep_assert_held(&rdev->bss_lock); + + if (!rb_insert_bss(rdev, bss)) + return; + list_add_tail(&bss->list, &rdev->bss_list); + rdev->bss_entries++; +} + +static void cfg80211_rehash_bss(struct cfg80211_registered_device *rdev, + struct cfg80211_internal_bss *bss) +{ + lockdep_assert_held(&rdev->bss_lock); + + rb_erase(&bss->rbn, &rdev->bss_tree); + if (!rb_insert_bss(rdev, bss)) { + list_del(&bss->list); + if (!list_empty(&bss->hidden_list)) + list_del_init(&bss->hidden_list); + if (!list_empty(&bss->pub.nontrans_list)) + list_del_init(&bss->pub.nontrans_list); + rdev->bss_entries--; + } + rdev->bss_generation++; +} + static bool cfg80211_combine_bsses(struct cfg80211_registered_device *rdev, struct cfg80211_internal_bss *new) { @@ -1969,9 +1998,7 @@ __cfg80211_bss_update(struct cfg80211_registered_device *rdev, bss_ref_get(rdev, bss_from_pub(tmp->pub.transmitted_bss)); } - list_add_tail(&new->list, &rdev->bss_list); - rdev->bss_entries++; - rb_insert_bss(rdev, new); + cfg80211_insert_bss(rdev, new); found = new; } @@ -2136,38 +2163,53 @@ struct cfg80211_inform_single_bss_data { u64 cannot_use_reasons; }; -static bool cfg80211_6ghz_power_type_valid(const u8 *ie, size_t ielen, +enum ieee80211_ap_reg_power +cfg80211_get_6ghz_power_type(const u8 *elems, size_t elems_len) +{ + const struct ieee80211_he_6ghz_oper *he_6ghz_oper; + struct ieee80211_he_operation *he_oper; + const struct element *tmp; + + tmp = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, + elems, elems_len); + if (!tmp || tmp->datalen < sizeof(*he_oper) + 1 || + tmp->datalen < ieee80211_he_oper_size(tmp->data + 1)) + return IEEE80211_REG_UNSET_AP; + + he_oper = (void *)&tmp->data[1]; + he_6ghz_oper = ieee80211_he_6ghz_oper(he_oper); + + if (!he_6ghz_oper) + return IEEE80211_REG_UNSET_AP; + + switch (u8_get_bits(he_6ghz_oper->control, + IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO)) { + case IEEE80211_6GHZ_CTRL_REG_LPI_AP: + case IEEE80211_6GHZ_CTRL_REG_INDOOR_LPI_AP: + return IEEE80211_REG_LPI_AP; + case IEEE80211_6GHZ_CTRL_REG_SP_AP: + case IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP: + return IEEE80211_REG_SP_AP; + case IEEE80211_6GHZ_CTRL_REG_VLP_AP: + return IEEE80211_REG_VLP_AP; + default: + return IEEE80211_REG_UNSET_AP; + } +} + +static bool cfg80211_6ghz_power_type_valid(const u8 *elems, size_t elems_len, const u32 flags) { - const struct element *tmp; - struct ieee80211_he_operation *he_oper; - - tmp = cfg80211_find_ext_elem(WLAN_EID_EXT_HE_OPERATION, ie, ielen); - if (tmp && tmp->datalen >= sizeof(*he_oper) + 1 && - tmp->datalen >= ieee80211_he_oper_size(tmp->data + 1)) { - const struct ieee80211_he_6ghz_oper *he_6ghz_oper; - - he_oper = (void *)&tmp->data[1]; - he_6ghz_oper = ieee80211_he_6ghz_oper(he_oper); - - if (!he_6ghz_oper) - return false; - - switch (u8_get_bits(he_6ghz_oper->control, - IEEE80211_HE_6GHZ_OPER_CTRL_REG_INFO)) { - case IEEE80211_6GHZ_CTRL_REG_LPI_AP: - case IEEE80211_6GHZ_CTRL_REG_INDOOR_LPI_AP: - return true; - case IEEE80211_6GHZ_CTRL_REG_SP_AP: - case IEEE80211_6GHZ_CTRL_REG_INDOOR_SP_AP: - return !(flags & IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT); - case IEEE80211_6GHZ_CTRL_REG_VLP_AP: - return !(flags & IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT); - default: - return false; - } + switch (cfg80211_get_6ghz_power_type(elems, elems_len)) { + case IEEE80211_REG_LPI_AP: + return true; + case IEEE80211_REG_SP_AP: + return !(flags & IEEE80211_CHAN_NO_6GHZ_AFC_CLIENT); + case IEEE80211_REG_VLP_AP: + return !(flags & IEEE80211_CHAN_NO_6GHZ_VLP_CLIENT); + default: + return false; } - return false; } /* Returned bss is reference counted and must be cleaned up appropriately. */ @@ -3349,19 +3391,14 @@ void cfg80211_update_assoc_bss_entry(struct wireless_dev *wdev, if (!WARN_ON(!__cfg80211_unlink_bss(rdev, new))) rdev->bss_generation++; } - - rb_erase(&cbss->rbn, &rdev->bss_tree); - rb_insert_bss(rdev, cbss); - rdev->bss_generation++; + cfg80211_rehash_bss(rdev, cbss); list_for_each_entry_safe(nontrans_bss, tmp, &cbss->pub.nontrans_list, nontrans_list) { bss = bss_from_pub(nontrans_bss); bss->pub.channel = chan; - rb_erase(&bss->rbn, &rdev->bss_tree); - rb_insert_bss(rdev, bss); - rdev->bss_generation++; + cfg80211_rehash_bss(rdev, bss); } done: diff --git a/net/wireless/sme.c b/net/wireless/sme.c index a8ad55f11133..e419aa8c4a5a 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -5,7 +5,7 @@ * (for nl80211's connect() and wext) * * Copyright 2009 Johannes Berg - * Copyright (C) 2009, 2020, 2022-2023 Intel Corporation. All rights reserved. + * Copyright (C) 2009, 2020, 2022-2024 Intel Corporation. All rights reserved. * Copyright 2017 Intel Deutschland GmbH */ @@ -130,7 +130,7 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev) rdev->scan_req = request; - err = rdev_scan(rdev, request); + err = cfg80211_scan(rdev); if (!err) { wdev->conn->state = CFG80211_CONN_SCANNING; nl80211_send_scan_start(rdev, wdev); diff --git a/net/wireless/tests/chan.c b/net/wireless/tests/chan.c index d02258ac2dab..74bbee25085f 100644 --- a/net/wireless/tests/chan.c +++ b/net/wireless/tests/chan.c @@ -113,16 +113,16 @@ static const struct chandef_compat_case { }, }, { - .desc = "different primary 160 MHz", + .desc = "different primary 320 MHz", .c1 = { .width = NL80211_CHAN_WIDTH_320, .chan = &chan_6ghz_105, - .center_freq1 = 6475 + 150, + .center_freq1 = 6475 + 110, }, .c2 = { .width = NL80211_CHAN_WIDTH_320, .chan = &chan_6ghz_105, - .center_freq1 = 6475 - 10, + .center_freq1 = 6475 - 50, }, }, { @@ -131,12 +131,12 @@ static const struct chandef_compat_case { .c1 = { .width = NL80211_CHAN_WIDTH_160, .chan = &chan_6ghz_105, - .center_freq1 = 6475 + 70, + .center_freq1 = 6475 + 30, }, .c2 = { .width = NL80211_CHAN_WIDTH_320, .chan = &chan_6ghz_105, - .center_freq1 = 6475 - 10, + .center_freq1 = 6475 - 50, }, .compat = true, }, @@ -145,12 +145,12 @@ static const struct chandef_compat_case { .c1 = { .width = NL80211_CHAN_WIDTH_160, .chan = &chan_6ghz_105, - .center_freq1 = 6475 + 70, + .center_freq1 = 6475 + 30, }, .c2 = { .width = NL80211_CHAN_WIDTH_320, .chan = &chan_6ghz_105, - .center_freq1 = 6475 - 10, + .center_freq1 = 6475 - 50, .punctured = 0xf, }, .compat = true, @@ -160,13 +160,13 @@ static const struct chandef_compat_case { .c1 = { .width = NL80211_CHAN_WIDTH_160, .chan = &chan_6ghz_105, - .center_freq1 = 6475 + 70, + .center_freq1 = 6475 + 30, .punctured = 0xc0, }, .c2 = { .width = NL80211_CHAN_WIDTH_320, .chan = &chan_6ghz_105, - .center_freq1 = 6475 - 10, + .center_freq1 = 6475 - 50, .punctured = 0xc000, }, .compat = true, @@ -176,13 +176,13 @@ static const struct chandef_compat_case { .c1 = { .width = NL80211_CHAN_WIDTH_160, .chan = &chan_6ghz_105, - .center_freq1 = 6475 + 70, + .center_freq1 = 6475 + 30, .punctured = 0x80, }, .c2 = { .width = NL80211_CHAN_WIDTH_320, .chan = &chan_6ghz_105, - .center_freq1 = 6475 - 10, + .center_freq1 = 6475 - 50, .punctured = 0xc000, }, }, diff --git a/net/wireless/trace.h b/net/wireless/trace.h index 87986170d1b1..5c26f065bd68 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -242,6 +242,80 @@ } \ } while (0) +/************************************************************* + * wiphy work traces * + *************************************************************/ + +DECLARE_EVENT_CLASS(wiphy_work_event, + TP_PROTO(struct wiphy *wiphy, struct wiphy_work *work), + TP_ARGS(wiphy, work), + TP_STRUCT__entry( + WIPHY_ENTRY + __field(void *, instance) + __field(void *, func) + ), + TP_fast_assign( + WIPHY_ASSIGN; + __entry->instance = work; + __entry->func = work ? work->func : NULL; + ), + TP_printk(WIPHY_PR_FMT " instance=%p func=%pS", + WIPHY_PR_ARG, __entry->instance, __entry->func) +); + +DEFINE_EVENT(wiphy_work_event, wiphy_work_queue, + TP_PROTO(struct wiphy *wiphy, struct wiphy_work *work), + TP_ARGS(wiphy, work) +); + +DEFINE_EVENT(wiphy_work_event, wiphy_work_run, + TP_PROTO(struct wiphy *wiphy, struct wiphy_work *work), + TP_ARGS(wiphy, work) +); + +DEFINE_EVENT(wiphy_work_event, wiphy_work_cancel, + TP_PROTO(struct wiphy *wiphy, struct wiphy_work *work), + TP_ARGS(wiphy, work) +); + +DEFINE_EVENT(wiphy_work_event, wiphy_work_flush, + TP_PROTO(struct wiphy *wiphy, struct wiphy_work *work), + TP_ARGS(wiphy, work) +); + +TRACE_EVENT(wiphy_delayed_work_queue, + TP_PROTO(struct wiphy *wiphy, struct wiphy_work *work, + unsigned long delay), + TP_ARGS(wiphy, work, delay), + TP_STRUCT__entry( + WIPHY_ENTRY + __field(void *, instance) + __field(void *, func) + __field(unsigned long, delay) + ), + TP_fast_assign( + WIPHY_ASSIGN; + __entry->instance = work; + __entry->func = work->func; + __entry->delay = delay; + ), + TP_printk(WIPHY_PR_FMT " instance=%p func=%pS delay=%ld", + WIPHY_PR_ARG, __entry->instance, __entry->func, + __entry->delay) +); + +TRACE_EVENT(wiphy_work_worker_start, + TP_PROTO(struct wiphy *wiphy), + TP_ARGS(wiphy), + TP_STRUCT__entry( + WIPHY_ENTRY + ), + TP_fast_assign( + WIPHY_ASSIGN; + ), + TP_printk(WIPHY_PR_FMT, WIPHY_PR_ARG) +); + /************************************************************* * rdev->ops traces * *************************************************************/ @@ -2889,6 +2963,75 @@ DEFINE_EVENT(wiphy_wdev_link_evt, rdev_del_intf_link, TP_ARGS(wiphy, wdev, link_id) ); +TRACE_EVENT(rdev_del_link_station, + TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, + struct link_station_del_parameters *params), + TP_ARGS(wiphy, netdev, params), + TP_STRUCT__entry( + WIPHY_ENTRY + NETDEV_ENTRY + __array(u8, mld_mac, 6) + __field(u32, link_id) + ), + TP_fast_assign( + WIPHY_ASSIGN; + NETDEV_ASSIGN; + memset(__entry->mld_mac, 0, 6); + if (params->mld_mac) + memcpy(__entry->mld_mac, params->mld_mac, 6); + __entry->link_id = params->link_id; + ), + TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: %pM" + ", link id: %u", + WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->mld_mac, + __entry->link_id) +); + +TRACE_EVENT(rdev_set_hw_timestamp, + TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, + struct cfg80211_set_hw_timestamp *hwts), + + TP_ARGS(wiphy, netdev, hwts), + + TP_STRUCT__entry( + WIPHY_ENTRY + NETDEV_ENTRY + MAC_ENTRY(macaddr) + __field(bool, enable) + ), + + TP_fast_assign( + WIPHY_ASSIGN; + NETDEV_ASSIGN; + MAC_ASSIGN(macaddr, hwts->macaddr); + __entry->enable = hwts->enable; + ), + + TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", mac %pM, enable: %u", + WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->macaddr, + __entry->enable) +); + +TRACE_EVENT(rdev_set_ttlm, + TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, + struct cfg80211_ttlm_params *params), + TP_ARGS(wiphy, netdev, params), + TP_STRUCT__entry( + WIPHY_ENTRY + NETDEV_ENTRY + __array(u8, dlink, sizeof(u16) * 8) + __array(u8, ulink, sizeof(u16) * 8) + ), + TP_fast_assign( + WIPHY_ASSIGN; + NETDEV_ASSIGN; + memcpy(__entry->dlink, params->dlink, sizeof(params->dlink)); + memcpy(__entry->ulink, params->ulink, sizeof(params->ulink)); + ), + TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT, + WIPHY_PR_ARG, NETDEV_PR_ARG) +); + /************************************************************* * cfg80211 exported functions traces * *************************************************************/ @@ -3246,23 +3389,26 @@ TRACE_EVENT(cfg80211_cqm_rssi_notify, TRACE_EVENT(cfg80211_reg_can_beacon, TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, - enum nl80211_iftype iftype, bool check_no_ir), - TP_ARGS(wiphy, chandef, iftype, check_no_ir), + enum nl80211_iftype iftype, u32 prohibited_flags, + u32 permitting_flags), + TP_ARGS(wiphy, chandef, iftype, prohibited_flags, permitting_flags), TP_STRUCT__entry( WIPHY_ENTRY CHAN_DEF_ENTRY __field(enum nl80211_iftype, iftype) - __field(bool, check_no_ir) + __field(u32, prohibited_flags) + __field(u32, permitting_flags) ), TP_fast_assign( WIPHY_ASSIGN; CHAN_DEF_ASSIGN(chandef); __entry->iftype = iftype; - __entry->check_no_ir = check_no_ir; + __entry->prohibited_flags = prohibited_flags; + __entry->permitting_flags = permitting_flags; ), - TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", iftype=%d check_no_ir=%s", + TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", iftype=%d prohibited_flags=0x%x permitting_flags=0x%x", WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->iftype, - BOOL_TO_STR(__entry->check_no_ir)) + __entry->prohibited_flags, __entry->permitting_flags) ); TRACE_EVENT(cfg80211_chandef_dfs_required, @@ -3923,55 +4069,6 @@ DEFINE_EVENT(link_station_add_mod, rdev_mod_link_station, TP_ARGS(wiphy, netdev, params) ); -TRACE_EVENT(rdev_del_link_station, - TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, - struct link_station_del_parameters *params), - TP_ARGS(wiphy, netdev, params), - TP_STRUCT__entry( - WIPHY_ENTRY - NETDEV_ENTRY - __array(u8, mld_mac, 6) - __field(u32, link_id) - ), - TP_fast_assign( - WIPHY_ASSIGN; - NETDEV_ASSIGN; - memset(__entry->mld_mac, 0, 6); - if (params->mld_mac) - memcpy(__entry->mld_mac, params->mld_mac, 6); - __entry->link_id = params->link_id; - ), - TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", station mac: %pM" - ", link id: %u", - WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->mld_mac, - __entry->link_id) -); - -TRACE_EVENT(rdev_set_hw_timestamp, - TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, - struct cfg80211_set_hw_timestamp *hwts), - - TP_ARGS(wiphy, netdev, hwts), - - TP_STRUCT__entry( - WIPHY_ENTRY - NETDEV_ENTRY - MAC_ENTRY(macaddr) - __field(bool, enable) - ), - - TP_fast_assign( - WIPHY_ASSIGN; - NETDEV_ASSIGN; - MAC_ASSIGN(macaddr, hwts->macaddr); - __entry->enable = hwts->enable; - ), - - TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", mac %pM, enable: %u", - WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->macaddr, - __entry->enable) -); - TRACE_EVENT(cfg80211_links_removed, TP_PROTO(struct net_device *netdev, u16 link_mask), TP_ARGS(netdev, link_mask), @@ -3987,26 +4084,6 @@ TRACE_EVENT(cfg80211_links_removed, __entry->link_mask) ); -TRACE_EVENT(rdev_set_ttlm, - TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, - struct cfg80211_ttlm_params *params), - TP_ARGS(wiphy, netdev, params), - TP_STRUCT__entry( - WIPHY_ENTRY - NETDEV_ENTRY - __array(u8, dlink, sizeof(u16) * 8) - __array(u8, ulink, sizeof(u16) * 8) - ), - TP_fast_assign( - WIPHY_ASSIGN; - NETDEV_ASSIGN; - memcpy(__entry->dlink, params->dlink, sizeof(params->dlink)); - memcpy(__entry->ulink, params->ulink, sizeof(params->ulink)); - ), - TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT, - WIPHY_PR_ARG, NETDEV_PR_ARG) -); - #endif /* !__RDEV_OPS_TRACE || TRACE_HEADER_MULTI_READ */ #undef TRACE_INCLUDE_PATH diff --git a/net/wireless/util.c b/net/wireless/util.c index 082c6f9c5416..9a7c3adc8a3b 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -1504,7 +1504,7 @@ static u32 cfg80211_calculate_bitrate_he(struct rate_info *rate) 5120, /* 0.833333... */ }; u32 rates_160M[3] = { 960777777, 907400000, 816666666 }; - u32 rates_969[3] = { 480388888, 453700000, 408333333 }; + u32 rates_996[3] = { 480388888, 453700000, 408333333 }; u32 rates_484[3] = { 229411111, 216666666, 195000000 }; u32 rates_242[3] = { 114711111, 108333333, 97500000 }; u32 rates_106[3] = { 40000000, 37777777, 34000000 }; @@ -1524,12 +1524,14 @@ static u32 cfg80211_calculate_bitrate_he(struct rate_info *rate) if (WARN_ON_ONCE(rate->nss < 1 || rate->nss > 8)) return 0; - if (rate->bw == RATE_INFO_BW_160) + if (rate->bw == RATE_INFO_BW_160 || + (rate->bw == RATE_INFO_BW_HE_RU && + rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_2x996)) result = rates_160M[rate->he_gi]; else if (rate->bw == RATE_INFO_BW_80 || (rate->bw == RATE_INFO_BW_HE_RU && rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_996)) - result = rates_969[rate->he_gi]; + result = rates_996[rate->he_gi]; else if (rate->bw == RATE_INFO_BW_40 || (rate->bw == RATE_INFO_BW_HE_RU && rate->he_ru_alloc == NL80211_RATE_INFO_HE_RU_ALLOC_484)) @@ -2305,13 +2307,16 @@ static int cfg80211_wdev_bi(struct wireless_dev *wdev) static void cfg80211_calculate_bi_data(struct wiphy *wiphy, u32 new_beacon_int, u32 *beacon_int_gcd, - bool *beacon_int_different) + bool *beacon_int_different, + int radio_idx) { + struct cfg80211_registered_device *rdev; struct wireless_dev *wdev; *beacon_int_gcd = 0; *beacon_int_different = false; + rdev = wiphy_to_rdev(wiphy); list_for_each_entry(wdev, &wiphy->wdev_list, list) { int wdev_bi; @@ -2319,6 +2324,11 @@ static void cfg80211_calculate_bi_data(struct wiphy *wiphy, u32 new_beacon_int, if (wdev->valid_links) continue; + /* skip wdevs not active on the given wiphy radio */ + if (radio_idx >= 0 && + !(rdev_get_radio_mask(rdev, wdev->netdev) & BIT(radio_idx))) + continue; + wdev_bi = cfg80211_wdev_bi(wdev); if (!wdev_bi) @@ -2366,14 +2376,19 @@ int cfg80211_iter_combinations(struct wiphy *wiphy, void *data), void *data) { + const struct wiphy_radio *radio = NULL; + const struct ieee80211_iface_combination *c, *cs; const struct ieee80211_regdomain *regdom; enum nl80211_dfs_regions region = 0; - int i, j, iftype; + int i, j, n, iftype; int num_interfaces = 0; u32 used_iftypes = 0; u32 beacon_int_gcd; bool beacon_int_different; + if (params->radio_idx >= 0) + radio = &wiphy->radio[params->radio_idx]; + /* * This is a bit strange, since the iteration used to rely only on * the data given by the driver, but here it now relies on context, @@ -2385,7 +2400,8 @@ int cfg80211_iter_combinations(struct wiphy *wiphy, * interfaces (while being brought up) and channel/radar data. */ cfg80211_calculate_bi_data(wiphy, params->new_beacon_int, - &beacon_int_gcd, &beacon_int_different); + &beacon_int_gcd, &beacon_int_different, + params->radio_idx); if (params->radar_detect) { rcu_read_lock(); @@ -2402,13 +2418,18 @@ int cfg80211_iter_combinations(struct wiphy *wiphy, used_iftypes |= BIT(iftype); } - for (i = 0; i < wiphy->n_iface_combinations; i++) { - const struct ieee80211_iface_combination *c; + if (radio) { + cs = radio->iface_combinations; + n = radio->n_iface_combinations; + } else { + cs = wiphy->iface_combinations; + n = wiphy->n_iface_combinations; + } + for (i = 0; i < n; i++) { struct ieee80211_iface_limit *limits; u32 all_iftypes = 0; - c = &wiphy->iface_combinations[i]; - + c = &cs[i]; if (num_interfaces > c->max_interfaces) continue; if (params->num_different_channels > c->num_different_channels) @@ -2865,3 +2886,38 @@ cfg80211_get_iftype_ext_capa(struct wiphy *wiphy, enum nl80211_iftype type) return NULL; } EXPORT_SYMBOL(cfg80211_get_iftype_ext_capa); + +static bool +ieee80211_radio_freq_range_valid(const struct wiphy_radio *radio, + u32 freq, u32 width) +{ + const struct wiphy_radio_freq_range *r; + int i; + + for (i = 0; i < radio->n_freq_range; i++) { + r = &radio->freq_range[i]; + if (freq - width / 2 >= r->start_freq && + freq + width / 2 <= r->end_freq) + return true; + } + + return false; +} + +bool cfg80211_radio_chandef_valid(const struct wiphy_radio *radio, + const struct cfg80211_chan_def *chandef) +{ + u32 freq, width; + + freq = ieee80211_chandef_to_khz(chandef); + width = nl80211_chan_width_to_mhz(chandef->width); + if (!ieee80211_radio_freq_range_valid(radio, freq, width)) + return false; + + freq = MHZ_TO_KHZ(chandef->center_freq2); + if (freq && !ieee80211_radio_freq_range_valid(radio, freq, width)) + return false; + + return true; +} +EXPORT_SYMBOL(cfg80211_radio_chandef_valid); diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index 7d1c0986f9bb..7e16336044b2 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -35,8 +35,6 @@ #define TX_BATCH_SIZE 32 #define MAX_PER_SOCKET_BUDGET (TX_BATCH_SIZE) -static DEFINE_PER_CPU(struct list_head, xskmap_flush_list); - void xsk_set_rx_need_wakeup(struct xsk_buff_pool *pool) { if (pool->cached_need_wakeup & XDP_WAKEUP_RX) @@ -372,22 +370,23 @@ static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp) { - struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list); int err; err = xsk_rcv(xs, xdp); if (err) return err; - if (!xs->flush_node.prev) + if (!xs->flush_node.prev) { + struct list_head *flush_list = bpf_net_ctx_get_xskmap_flush_list(); + list_add(&xs->flush_node, flush_list); + } return 0; } -void __xsk_map_flush(void) +void __xsk_map_flush(struct list_head *flush_list) { - struct list_head *flush_list = this_cpu_ptr(&xskmap_flush_list); struct xdp_sock *xs, *tmp; list_for_each_entry_safe(xs, tmp, flush_list, flush_node) { @@ -396,16 +395,6 @@ void __xsk_map_flush(void) } } -#ifdef CONFIG_DEBUG_NET -bool xsk_map_check_flush(void) -{ - if (list_empty(this_cpu_ptr(&xskmap_flush_list))) - return false; - __xsk_map_flush(); - return true; -} -#endif - void xsk_tx_completed(struct xsk_buff_pool *pool, u32 nb_entries) { xskq_prod_submit_n(pool->cq, nb_entries); @@ -1772,7 +1761,7 @@ static struct pernet_operations xsk_net_ops = { static int __init xsk_init(void) { - int err, cpu; + int err; err = proto_register(&xsk_proto, 0 /* no slab */); if (err) @@ -1790,8 +1779,6 @@ static int __init xsk_init(void) if (err) goto out_pernet; - for_each_possible_cpu(cpu) - INIT_LIST_HEAD(&per_cpu(xskmap_flush_list, cpu)); return 0; out_pernet: diff --git a/net/xfrm/Makefile b/net/xfrm/Makefile index 547cec77ba03..512e0b2f8514 100644 --- a/net/xfrm/Makefile +++ b/net/xfrm/Makefile @@ -13,7 +13,8 @@ endif obj-$(CONFIG_XFRM) := xfrm_policy.o xfrm_state.o xfrm_hash.o \ xfrm_input.o xfrm_output.o \ - xfrm_sysctl.o xfrm_replay.o xfrm_device.o + xfrm_sysctl.o xfrm_replay.o xfrm_device.o \ + xfrm_nat_keepalive.o obj-$(CONFIG_XFRM_STATISTICS) += xfrm_proc.o obj-$(CONFIG_XFRM_ALGO) += xfrm_algo.o obj-$(CONFIG_XFRM_USER) += xfrm_user.o diff --git a/net/xfrm/xfrm_compat.c b/net/xfrm/xfrm_compat.c index 703d4172c7d7..91357ccaf4af 100644 --- a/net/xfrm/xfrm_compat.c +++ b/net/xfrm/xfrm_compat.c @@ -131,6 +131,7 @@ static const struct nla_policy compat_policy[XFRMA_MAX+1] = { [XFRMA_IF_ID] = { .type = NLA_U32 }, [XFRMA_MTIMER_THRESH] = { .type = NLA_U32 }, [XFRMA_SA_DIR] = NLA_POLICY_RANGE(NLA_U8, XFRM_SA_DIR_IN, XFRM_SA_DIR_OUT), + [XFRMA_NAT_KEEPALIVE_INTERVAL] = { .type = NLA_U32 }, }; static struct nlmsghdr *xfrm_nlmsg_put_compat(struct sk_buff *skb, @@ -280,9 +281,10 @@ static int xfrm_xlate64_attr(struct sk_buff *dst, const struct nlattr *src) case XFRMA_IF_ID: case XFRMA_MTIMER_THRESH: case XFRMA_SA_DIR: + case XFRMA_NAT_KEEPALIVE_INTERVAL: return xfrm_nla_cpy(dst, src, nla_len(src)); default: - BUILD_BUG_ON(XFRMA_MAX != XFRMA_SA_DIR); + BUILD_BUG_ON(XFRMA_MAX != XFRMA_NAT_KEEPALIVE_INTERVAL); pr_warn_once("unsupported nla_type %d\n", src->nla_type); return -EOPNOTSUPP; } @@ -437,7 +439,7 @@ static int xfrm_xlate32_attr(void *dst, const struct nlattr *nla, int err; if (type > XFRMA_MAX) { - BUILD_BUG_ON(XFRMA_MAX != XFRMA_SA_DIR); + BUILD_BUG_ON(XFRMA_MAX != XFRMA_NAT_KEEPALIVE_INTERVAL); NL_SET_ERR_MSG(extack, "Bad attribute"); return -EOPNOTSUPP; } diff --git a/net/xfrm/xfrm_device.c b/net/xfrm/xfrm_device.c index 2455a76a1cff..9a44d363ba62 100644 --- a/net/xfrm/xfrm_device.c +++ b/net/xfrm/xfrm_device.c @@ -261,9 +261,9 @@ int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, is_packet_offload = xuo->flags & XFRM_OFFLOAD_PACKET; - /* We don't yet support UDP encapsulation and TFC padding. */ - if ((!is_packet_offload && x->encap) || x->tfcpad) { - NL_SET_ERR_MSG(extack, "Encapsulation and TFC padding can't be offloaded"); + /* We don't yet support TFC padding. */ + if (x->tfcpad) { + NL_SET_ERR_MSG(extack, "TFC padding can't be offloaded"); return -EINVAL; } diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c index d2ea18dcb0cb..749e7eea99e4 100644 --- a/net/xfrm/xfrm_input.c +++ b/net/xfrm/xfrm_input.c @@ -471,14 +471,10 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) struct xfrm_offload *xo = xfrm_offload(skb); struct sec_path *sp; - if (encap_type < 0 || (xo && xo->flags & XFRM_GRO)) { + if (encap_type < 0 || (xo && (xo->flags & XFRM_GRO || encap_type == 0 || + encap_type == UDP_ENCAP_ESPINUDP))) { x = xfrm_input_state(skb); - if (unlikely(x->dir && x->dir != XFRM_SA_DIR_IN)) { - XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEDIRERROR); - goto drop; - } - if (unlikely(x->km.state != XFRM_STATE_VALID)) { if (x->km.state == XFRM_STATE_ACQ) XFRM_INC_STATS(net, LINUX_MIB_XFRMACQUIREERROR); @@ -585,8 +581,11 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type) } if (unlikely(x->dir && x->dir != XFRM_SA_DIR_IN)) { + secpath_reset(skb); XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEDIRERROR); + xfrm_audit_state_notfound(skb, family, spi, seq); xfrm_state_put(x); + x = NULL; goto drop; } diff --git a/net/xfrm/xfrm_nat_keepalive.c b/net/xfrm/xfrm_nat_keepalive.c new file mode 100644 index 000000000000..82f0a301683f --- /dev/null +++ b/net/xfrm/xfrm_nat_keepalive.c @@ -0,0 +1,292 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * xfrm_nat_keepalive.c + * + * (c) 2024 Eyal Birger + */ + +#include +#include +#include + +static DEFINE_PER_CPU(struct sock *, nat_keepalive_sk_ipv4); +#if IS_ENABLED(CONFIG_IPV6) +static DEFINE_PER_CPU(struct sock *, nat_keepalive_sk_ipv6); +#endif + +struct nat_keepalive { + struct net *net; + u16 family; + xfrm_address_t saddr; + xfrm_address_t daddr; + __be16 encap_sport; + __be16 encap_dport; + __u32 smark; +}; + +static void nat_keepalive_init(struct nat_keepalive *ka, struct xfrm_state *x) +{ + ka->net = xs_net(x); + ka->family = x->props.family; + ka->saddr = x->props.saddr; + ka->daddr = x->id.daddr; + ka->encap_sport = x->encap->encap_sport; + ka->encap_dport = x->encap->encap_dport; + ka->smark = xfrm_smark_get(0, x); +} + +static int nat_keepalive_send_ipv4(struct sk_buff *skb, + struct nat_keepalive *ka) +{ + struct net *net = ka->net; + struct flowi4 fl4; + struct rtable *rt; + struct sock *sk; + __u8 tos = 0; + int err; + + flowi4_init_output(&fl4, 0 /* oif */, skb->mark, tos, + RT_SCOPE_UNIVERSE, IPPROTO_UDP, 0, + ka->daddr.a4, ka->saddr.a4, ka->encap_dport, + ka->encap_sport, sock_net_uid(net, NULL)); + + rt = ip_route_output_key(net, &fl4); + if (IS_ERR(rt)) + return PTR_ERR(rt); + + skb_dst_set(skb, &rt->dst); + + sk = *this_cpu_ptr(&nat_keepalive_sk_ipv4); + sock_net_set(sk, net); + err = ip_build_and_send_pkt(skb, sk, fl4.saddr, fl4.daddr, NULL, tos); + sock_net_set(sk, &init_net); + return err; +} + +#if IS_ENABLED(CONFIG_IPV6) +static int nat_keepalive_send_ipv6(struct sk_buff *skb, + struct nat_keepalive *ka, + struct udphdr *uh) +{ + struct net *net = ka->net; + struct dst_entry *dst; + struct flowi6 fl6; + struct sock *sk; + __wsum csum; + int err; + + csum = skb_checksum(skb, 0, skb->len, 0); + uh->check = csum_ipv6_magic(&ka->saddr.in6, &ka->daddr.in6, + skb->len, IPPROTO_UDP, csum); + if (uh->check == 0) + uh->check = CSUM_MANGLED_0; + + memset(&fl6, 0, sizeof(fl6)); + fl6.flowi6_mark = skb->mark; + fl6.saddr = ka->saddr.in6; + fl6.daddr = ka->daddr.in6; + fl6.flowi6_proto = IPPROTO_UDP; + fl6.fl6_sport = ka->encap_sport; + fl6.fl6_dport = ka->encap_dport; + + sk = *this_cpu_ptr(&nat_keepalive_sk_ipv6); + sock_net_set(sk, net); + dst = ipv6_stub->ipv6_dst_lookup_flow(net, sk, &fl6, NULL); + if (IS_ERR(dst)) + return PTR_ERR(dst); + + skb_dst_set(skb, dst); + err = ipv6_stub->ip6_xmit(sk, skb, &fl6, skb->mark, NULL, 0, 0); + sock_net_set(sk, &init_net); + return err; +} +#endif + +static void nat_keepalive_send(struct nat_keepalive *ka) +{ + const int nat_ka_hdrs_len = max(sizeof(struct iphdr), + sizeof(struct ipv6hdr)) + + sizeof(struct udphdr); + const u8 nat_ka_payload = 0xFF; + int err = -EAFNOSUPPORT; + struct sk_buff *skb; + struct udphdr *uh; + + skb = alloc_skb(nat_ka_hdrs_len + sizeof(nat_ka_payload), GFP_ATOMIC); + if (unlikely(!skb)) + return; + + skb_reserve(skb, nat_ka_hdrs_len); + + skb_put_u8(skb, nat_ka_payload); + + uh = skb_push(skb, sizeof(*uh)); + uh->source = ka->encap_sport; + uh->dest = ka->encap_dport; + uh->len = htons(skb->len); + uh->check = 0; + + skb->mark = ka->smark; + + switch (ka->family) { + case AF_INET: + err = nat_keepalive_send_ipv4(skb, ka); + break; +#if IS_ENABLED(CONFIG_IPV6) + case AF_INET6: + err = nat_keepalive_send_ipv6(skb, ka, uh); + break; +#endif + } + if (err) + kfree_skb(skb); +} + +struct nat_keepalive_work_ctx { + time64_t next_run; + time64_t now; +}; + +static int nat_keepalive_work_single(struct xfrm_state *x, int count, void *ptr) +{ + struct nat_keepalive_work_ctx *ctx = ptr; + bool send_keepalive = false; + struct nat_keepalive ka; + time64_t next_run; + u32 interval; + int delta; + + interval = x->nat_keepalive_interval; + if (!interval) + return 0; + + spin_lock(&x->lock); + + delta = (int)(ctx->now - x->lastused); + if (delta < interval) { + x->nat_keepalive_expiration = ctx->now + interval - delta; + next_run = x->nat_keepalive_expiration; + } else if (x->nat_keepalive_expiration > ctx->now) { + next_run = x->nat_keepalive_expiration; + } else { + next_run = ctx->now + interval; + nat_keepalive_init(&ka, x); + send_keepalive = true; + } + + spin_unlock(&x->lock); + + if (send_keepalive) + nat_keepalive_send(&ka); + + if (!ctx->next_run || next_run < ctx->next_run) + ctx->next_run = next_run; + return 0; +} + +static void nat_keepalive_work(struct work_struct *work) +{ + struct nat_keepalive_work_ctx ctx; + struct xfrm_state_walk walk; + struct net *net; + + ctx.next_run = 0; + ctx.now = ktime_get_real_seconds(); + + net = container_of(work, struct net, xfrm.nat_keepalive_work.work); + xfrm_state_walk_init(&walk, IPPROTO_ESP, NULL); + xfrm_state_walk(net, &walk, nat_keepalive_work_single, &ctx); + xfrm_state_walk_done(&walk, net); + if (ctx.next_run) + schedule_delayed_work(&net->xfrm.nat_keepalive_work, + (ctx.next_run - ctx.now) * HZ); +} + +static int nat_keepalive_sk_init(struct sock * __percpu *socks, + unsigned short family) +{ + struct sock *sk; + int err, i; + + for_each_possible_cpu(i) { + err = inet_ctl_sock_create(&sk, family, SOCK_RAW, IPPROTO_UDP, + &init_net); + if (err < 0) + goto err; + + *per_cpu_ptr(socks, i) = sk; + } + + return 0; +err: + for_each_possible_cpu(i) + inet_ctl_sock_destroy(*per_cpu_ptr(socks, i)); + return err; +} + +static void nat_keepalive_sk_fini(struct sock * __percpu *socks) +{ + int i; + + for_each_possible_cpu(i) + inet_ctl_sock_destroy(*per_cpu_ptr(socks, i)); +} + +void xfrm_nat_keepalive_state_updated(struct xfrm_state *x) +{ + struct net *net; + + if (!x->nat_keepalive_interval) + return; + + net = xs_net(x); + schedule_delayed_work(&net->xfrm.nat_keepalive_work, 0); +} + +int __net_init xfrm_nat_keepalive_net_init(struct net *net) +{ + INIT_DELAYED_WORK(&net->xfrm.nat_keepalive_work, nat_keepalive_work); + return 0; +} + +int xfrm_nat_keepalive_net_fini(struct net *net) +{ + cancel_delayed_work_sync(&net->xfrm.nat_keepalive_work); + return 0; +} + +int xfrm_nat_keepalive_init(unsigned short family) +{ + int err = -EAFNOSUPPORT; + + switch (family) { + case AF_INET: + err = nat_keepalive_sk_init(&nat_keepalive_sk_ipv4, PF_INET); + break; +#if IS_ENABLED(CONFIG_IPV6) + case AF_INET6: + err = nat_keepalive_sk_init(&nat_keepalive_sk_ipv6, PF_INET6); + break; +#endif + } + + if (err) + pr_err("xfrm nat keepalive init: failed to init err:%d\n", err); + return err; +} +EXPORT_SYMBOL_GPL(xfrm_nat_keepalive_init); + +void xfrm_nat_keepalive_fini(unsigned short family) +{ + switch (family) { + case AF_INET: + nat_keepalive_sk_fini(&nat_keepalive_sk_ipv4); + break; +#if IS_ENABLED(CONFIG_IPV6) + case AF_INET6: + nat_keepalive_sk_fini(&nat_keepalive_sk_ipv6); + break; +#endif + } +} +EXPORT_SYMBOL_GPL(xfrm_nat_keepalive_fini); diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c index 66e07de2de35..c56c61b0c12e 100644 --- a/net/xfrm/xfrm_policy.c +++ b/net/xfrm/xfrm_policy.c @@ -452,6 +452,8 @@ EXPORT_SYMBOL(xfrm_policy_destroy); static void xfrm_policy_kill(struct xfrm_policy *policy) { + xfrm_dev_policy_delete(policy); + write_lock_bh(&policy->lock); policy->walk.dead = 1; write_unlock_bh(&policy->lock); @@ -1850,7 +1852,6 @@ again: __xfrm_policy_unlink(pol, dir); spin_unlock_bh(&net->xfrm.xfrm_policy_lock); - xfrm_dev_policy_delete(pol); cnt++; xfrm_audit_policy_delete(pol, 1, task_valid); xfrm_policy_kill(pol); @@ -1891,7 +1892,6 @@ again: __xfrm_policy_unlink(pol, dir); spin_unlock_bh(&net->xfrm.xfrm_policy_lock); - xfrm_dev_policy_delete(pol); cnt++; xfrm_audit_policy_delete(pol, 1, task_valid); xfrm_policy_kill(pol); @@ -2342,7 +2342,6 @@ int xfrm_policy_delete(struct xfrm_policy *pol, int dir) pol = __xfrm_policy_unlink(pol, dir); spin_unlock_bh(&net->xfrm.xfrm_policy_lock); if (pol) { - xfrm_dev_policy_delete(pol); xfrm_policy_kill(pol); return 0; } @@ -3718,12 +3717,15 @@ int __xfrm_policy_check(struct sock *sk, int dir, struct sk_buff *skb, pol = xfrm_in_fwd_icmp(skb, &fl, family, if_id); if (!pol) { + const bool is_crypto_offload = sp && + (xfrm_input_state(skb)->xso.type == XFRM_DEV_OFFLOAD_CRYPTO); + if (net->xfrm.policy_default[dir] == XFRM_USERPOLICY_BLOCK) { XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS); return 0; } - if (sp && secpath_has_nontransport(sp, 0, &xerr_idx)) { + if (sp && secpath_has_nontransport(sp, 0, &xerr_idx) && !is_crypto_offload) { xfrm_secpath_reject(xerr_idx, skb, &fl); XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOPOLS); return 0; @@ -4284,8 +4286,14 @@ static int __net_init xfrm_net_init(struct net *net) if (rv < 0) goto out_sysctl; + rv = xfrm_nat_keepalive_net_init(net); + if (rv < 0) + goto out_nat_keepalive; + return 0; +out_nat_keepalive: + xfrm_sysctl_fini(net); out_sysctl: xfrm_policy_fini(net); out_policy: @@ -4298,6 +4306,7 @@ out_statistics: static void __net_exit xfrm_net_exit(struct net *net) { + xfrm_nat_keepalive_net_fini(net); xfrm_sysctl_fini(net); xfrm_policy_fini(net); xfrm_state_fini(net); @@ -4359,6 +4368,7 @@ void __init xfrm_init(void) #endif register_xfrm_state_bpf(); + xfrm_nat_keepalive_init(AF_INET); } #ifdef CONFIG_AUDITSYSCALL diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 649bb739df0d..37478d36a8df 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -49,6 +49,7 @@ static struct kmem_cache *xfrm_state_cache __ro_after_init; static DECLARE_WORK(xfrm_state_gc_work, xfrm_state_gc_task); static HLIST_HEAD(xfrm_state_gc_list); +static HLIST_HEAD(xfrm_state_dev_gc_list); static inline bool xfrm_state_hold_rcu(struct xfrm_state __rcu *x) { @@ -214,6 +215,7 @@ static DEFINE_SPINLOCK(xfrm_state_afinfo_lock); static struct xfrm_state_afinfo __rcu *xfrm_state_afinfo[NPROTO]; static DEFINE_SPINLOCK(xfrm_state_gc_lock); +static DEFINE_SPINLOCK(xfrm_state_dev_gc_lock); int __xfrm_state_delete(struct xfrm_state *x); @@ -683,6 +685,41 @@ struct xfrm_state *xfrm_state_alloc(struct net *net) } EXPORT_SYMBOL(xfrm_state_alloc); +#ifdef CONFIG_XFRM_OFFLOAD +void xfrm_dev_state_delete(struct xfrm_state *x) +{ + struct xfrm_dev_offload *xso = &x->xso; + struct net_device *dev = READ_ONCE(xso->dev); + + if (dev) { + dev->xfrmdev_ops->xdo_dev_state_delete(x); + spin_lock_bh(&xfrm_state_dev_gc_lock); + hlist_add_head(&x->dev_gclist, &xfrm_state_dev_gc_list); + spin_unlock_bh(&xfrm_state_dev_gc_lock); + } +} +EXPORT_SYMBOL_GPL(xfrm_dev_state_delete); + +void xfrm_dev_state_free(struct xfrm_state *x) +{ + struct xfrm_dev_offload *xso = &x->xso; + struct net_device *dev = READ_ONCE(xso->dev); + + if (dev && dev->xfrmdev_ops) { + spin_lock_bh(&xfrm_state_dev_gc_lock); + if (!hlist_unhashed(&x->dev_gclist)) + hlist_del(&x->dev_gclist); + spin_unlock_bh(&xfrm_state_dev_gc_lock); + + if (dev->xfrmdev_ops->xdo_dev_state_free) + dev->xfrmdev_ops->xdo_dev_state_free(x); + WRITE_ONCE(xso->dev, NULL); + xso->type = XFRM_DEV_OFFLOAD_UNSPECIFIED; + netdev_put(dev, &xso->dev_tracker); + } +} +#endif + void __xfrm_state_destroy(struct xfrm_state *x, bool sync) { WARN_ON(x->km.state != XFRM_STATE_DEAD); @@ -715,6 +752,7 @@ int __xfrm_state_delete(struct xfrm_state *x) if (x->id.spi) hlist_del_rcu(&x->byspi); net->xfrm.state_num--; + xfrm_nat_keepalive_state_updated(x); spin_unlock(&net->xfrm.xfrm_state_lock); if (x->encap_sk) @@ -848,6 +886,9 @@ EXPORT_SYMBOL(xfrm_state_flush); int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid) { + struct xfrm_state *x; + struct hlist_node *tmp; + struct xfrm_dev_offload *xso; int i, err = 0, cnt = 0; spin_lock_bh(&net->xfrm.xfrm_state_lock); @@ -857,8 +898,6 @@ int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_vali err = -ESRCH; for (i = 0; i <= net->xfrm.state_hmask; i++) { - struct xfrm_state *x; - struct xfrm_dev_offload *xso; restart: hlist_for_each_entry(x, net->xfrm.state_bydst+i, bydst) { xso = &x->xso; @@ -868,6 +907,8 @@ restart: spin_unlock_bh(&net->xfrm.xfrm_state_lock); err = xfrm_state_delete(x); + xfrm_dev_state_free(x); + xfrm_audit_state_delete(x, err ? 0 : 1, task_valid); xfrm_state_put(x); @@ -884,6 +925,24 @@ restart: out: spin_unlock_bh(&net->xfrm.xfrm_state_lock); + + spin_lock_bh(&xfrm_state_dev_gc_lock); +restart_gc: + hlist_for_each_entry_safe(x, tmp, &xfrm_state_dev_gc_list, dev_gclist) { + xso = &x->xso; + + if (xso->dev == dev) { + spin_unlock_bh(&xfrm_state_dev_gc_lock); + xfrm_dev_state_free(x); + spin_lock_bh(&xfrm_state_dev_gc_lock); + goto restart_gc; + } + + } + spin_unlock_bh(&xfrm_state_dev_gc_lock); + + xfrm_flush_gc(); + return err; } EXPORT_SYMBOL(xfrm_dev_state_flush); @@ -1273,8 +1332,7 @@ found: xso->dev = xdo->dev; xso->real_dev = xdo->real_dev; xso->flags = XFRM_DEV_OFFLOAD_FLAG_ACQ; - netdev_tracker_alloc(xso->dev, &xso->dev_tracker, - GFP_ATOMIC); + netdev_hold(xso->dev, &xso->dev_tracker, GFP_ATOMIC); error = xso->dev->xfrmdev_ops->xdo_dev_state_add(x, NULL); if (error) { xso->dir = 0; @@ -1453,6 +1511,7 @@ static void __xfrm_state_insert(struct xfrm_state *x) net->xfrm.state_num++; xfrm_hash_grow_check(net, x->bydst.next != NULL); + xfrm_nat_keepalive_state_updated(x); } /* net->xfrm.xfrm_state_lock is held */ @@ -2871,6 +2930,21 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload, goto error; } + if (x->nat_keepalive_interval) { + if (x->dir != XFRM_SA_DIR_OUT) { + NL_SET_ERR_MSG(extack, "NAT keepalive is only supported for outbound SAs"); + err = -EINVAL; + goto error; + } + + if (!x->encap || x->encap->encap_type != UDP_ENCAP_ESPINUDP) { + NL_SET_ERR_MSG(extack, + "NAT keepalive is only supported for UDP encapsulation"); + err = -EINVAL; + goto error; + } + } + error: return err; } diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index e83c687bd64e..55f039ec3d59 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -833,6 +833,10 @@ static struct xfrm_state *xfrm_state_construct(struct net *net, if (attrs[XFRMA_SA_DIR]) x->dir = nla_get_u8(attrs[XFRMA_SA_DIR]); + if (attrs[XFRMA_NAT_KEEPALIVE_INTERVAL]) + x->nat_keepalive_interval = + nla_get_u32(attrs[XFRMA_NAT_KEEPALIVE_INTERVAL]); + err = __xfrm_init_state(x, false, attrs[XFRMA_OFFLOAD_DEV], extack); if (err) goto error; @@ -1288,6 +1292,13 @@ static int copy_to_user_state_extra(struct xfrm_state *x, } if (x->dir) ret = nla_put_u8(skb, XFRMA_SA_DIR, x->dir); + + if (x->nat_keepalive_interval) { + ret = nla_put_u32(skb, XFRMA_NAT_KEEPALIVE_INTERVAL, + x->nat_keepalive_interval); + if (ret) + goto out; + } out: return ret; } @@ -2455,7 +2466,6 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh, NETLINK_CB(skb).portid); } } else { - xfrm_dev_policy_delete(xp); xfrm_audit_policy_delete(xp, err ? 0 : 1, true); if (err != 0) @@ -3165,6 +3175,7 @@ const struct nla_policy xfrma_policy[XFRMA_MAX+1] = { [XFRMA_IF_ID] = { .type = NLA_U32 }, [XFRMA_MTIMER_THRESH] = { .type = NLA_U32 }, [XFRMA_SA_DIR] = NLA_POLICY_RANGE(NLA_U8, XFRM_SA_DIR_IN, XFRM_SA_DIR_OUT), + [XFRMA_NAT_KEEPALIVE_INTERVAL] = { .type = NLA_U32 }, }; EXPORT_SYMBOL_GPL(xfrma_policy); @@ -3474,6 +3485,9 @@ static inline unsigned int xfrm_sa_len(struct xfrm_state *x) if (x->dir) l += nla_total_size(sizeof(x->dir)); + if (x->nat_keepalive_interval) + l += nla_total_size(sizeof(x->nat_keepalive_interval)); + return l; } diff --git a/samples/bpf/cpustat_kern.c b/samples/bpf/cpustat_kern.c index 944f13fe164a..7ec7143e2757 100644 --- a/samples/bpf/cpustat_kern.c +++ b/samples/bpf/cpustat_kern.c @@ -211,7 +211,7 @@ int bpf_prog1(struct cpu_args *ctx) SEC("tracepoint/power/cpu_frequency") int bpf_prog2(struct cpu_args *ctx) { - u64 *pts, *cstate, *pstate, prev_state, cur_ts, delta; + u64 *pts, *cstate, *pstate, cur_ts, delta; u32 key, cpu, pstate_idx; u64 *val; @@ -232,7 +232,6 @@ int bpf_prog2(struct cpu_args *ctx) if (!cstate) return 0; - prev_state = *pstate; *pstate = ctx->state; if (!*pts) { diff --git a/scripts/Makefile.btf b/scripts/Makefile.btf index 2d6e5ed9081e..b75f09f3f424 100644 --- a/scripts/Makefile.btf +++ b/scripts/Makefile.btf @@ -14,17 +14,20 @@ pahole-flags-$(call test-ge, $(pahole-ver), 121) += --btf_gen_floats pahole-flags-$(call test-ge, $(pahole-ver), 122) += -j -ifeq ($(pahole-ver), 125) -pahole-flags-y += --skip_encoding_btf_inconsistent_proto --btf_gen_optimized -endif +pahole-flags-$(call test-ge, $(pahole-ver), 125) += --skip_encoding_btf_inconsistent_proto --btf_gen_optimized else # Switch to using --btf_features for v1.26 and later. -pahole-flags-$(call test-ge, $(pahole-ver), 126) = -j --btf_features=encode_force,var,float,enum64,decl_tag,type_tag,optimized_func,consistent_func +pahole-flags-$(call test-ge, $(pahole-ver), 126) = -j --btf_features=encode_force,var,float,enum64,decl_tag,type_tag,optimized_func,consistent_func,decl_tag_kfuncs + +ifneq ($(KBUILD_EXTMOD),) +module-pahole-flags-$(call test-ge, $(pahole-ver), 126) += --btf_features=distilled_base +endif endif pahole-flags-$(CONFIG_PAHOLE_HAS_LANG_EXCLUDE) += --lang_exclude=rust export PAHOLE_FLAGS := $(pahole-flags-y) +export MODULE_PAHOLE_FLAGS := $(module-pahole-flags-y) diff --git a/scripts/Makefile.modfinal b/scripts/Makefile.modfinal index 3bec9043e4f3..1fa98b5e952b 100644 --- a/scripts/Makefile.modfinal +++ b/scripts/Makefile.modfinal @@ -41,7 +41,7 @@ quiet_cmd_btf_ko = BTF [M] $@ if [ ! -f vmlinux ]; then \ printf "Skipping BTF generation for %s due to unavailability of vmlinux\n" $@ 1>&2; \ else \ - LLVM_OBJCOPY="$(OBJCOPY)" $(PAHOLE) -J $(PAHOLE_FLAGS) --btf_base vmlinux $@; \ + LLVM_OBJCOPY="$(OBJCOPY)" $(PAHOLE) -J $(PAHOLE_FLAGS) $(MODULE_PAHOLE_FLAGS) --btf_base vmlinux $@; \ $(RESOLVE_BTFIDS) -b vmlinux $@; \ fi; diff --git a/tools/bpf/bpftool/Documentation/bpftool-btf.rst b/tools/bpf/bpftool/Documentation/bpftool-btf.rst index eaba24320fb2..3f6bca03ad2e 100644 --- a/tools/bpf/bpftool/Documentation/bpftool-btf.rst +++ b/tools/bpf/bpftool/Documentation/bpftool-btf.rst @@ -28,7 +28,7 @@ BTF COMMANDS | **bpftool** **btf help** | | *BTF_SRC* := { **id** *BTF_ID* | **prog** *PROG* | **map** *MAP* [{**key** | **value** | **kv** | **all**}] | **file** *FILE* } -| *FORMAT* := { **raw** | **c** } +| *FORMAT* := { **raw** | **c** [**unsorted**] } | *MAP* := { **id** *MAP_ID* | **pinned** *FILE* } | *PROG* := { **id** *PROG_ID* | **pinned** *FILE* | **tag** *PROG_TAG* | **name** *PROG_NAME* } @@ -63,7 +63,9 @@ bpftool btf dump *BTF_SRC* pahole. **format** option can be used to override default (raw) output format. Raw - (**raw**) or C-syntax (**c**) output formats are supported. + (**raw**) or C-syntax (**c**) output formats are supported. With C-style + formatting, the output is sorted by default. Use the **unsorted** option + to avoid sorting the output. bpftool btf help Print short help message. diff --git a/tools/bpf/bpftool/Makefile b/tools/bpf/bpftool/Makefile index dfa4f1bebbb3..ba927379eb20 100644 --- a/tools/bpf/bpftool/Makefile +++ b/tools/bpf/bpftool/Makefile @@ -204,10 +204,11 @@ ifeq ($(feature-clang-bpf-co-re),1) BUILD_BPF_SKELS := 1 -$(OUTPUT)vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL_BOOTSTRAP) ifeq ($(VMLINUX_H),) +$(OUTPUT)vmlinux.h: $(VMLINUX_BTF) $(BPFTOOL_BOOTSTRAP) $(QUIET_GEN)$(BPFTOOL_BOOTSTRAP) btf dump file $< format c > $@ else +$(OUTPUT)vmlinux.h: $(VMLINUX_H) $(Q)cp "$(VMLINUX_H)" $@ endif diff --git a/tools/bpf/bpftool/bash-completion/bpftool b/tools/bpf/bpftool/bash-completion/bpftool index 04afe2ac2228..be99d49b8714 100644 --- a/tools/bpf/bpftool/bash-completion/bpftool +++ b/tools/bpf/bpftool/bash-completion/bpftool @@ -930,6 +930,9 @@ _bpftool() format) COMPREPLY=( $( compgen -W "c raw" -- "$cur" ) ) ;; + c) + COMPREPLY=( $( compgen -W "unsorted" -- "$cur" ) ) + ;; *) # emit extra options case ${words[3]} in diff --git a/tools/bpf/bpftool/btf.c b/tools/bpf/bpftool/btf.c index 91fcb75babe3..6789c7a4d5ca 100644 --- a/tools/bpf/bpftool/btf.c +++ b/tools/bpf/bpftool/btf.c @@ -20,6 +20,8 @@ #include "json_writer.h" #include "main.h" +#define KFUNC_DECL_TAG "bpf_kfunc" + static const char * const btf_kind_str[NR_BTF_KINDS] = { [BTF_KIND_UNKN] = "UNKNOWN", [BTF_KIND_INT] = "INT", @@ -43,6 +45,13 @@ static const char * const btf_kind_str[NR_BTF_KINDS] = { [BTF_KIND_ENUM64] = "ENUM64", }; +struct sort_datum { + int index; + int type_rank; + const char *sort_name; + const char *own_name; +}; + static const char *btf_int_enc_str(__u8 encoding) { switch (encoding) { @@ -454,15 +463,171 @@ static int dump_btf_raw(const struct btf *btf, return 0; } +static int dump_btf_kfuncs(struct btf_dump *d, const struct btf *btf) +{ + LIBBPF_OPTS(btf_dump_emit_type_decl_opts, opts); + int cnt = btf__type_cnt(btf); + int i; + + printf("\n/* BPF kfuncs */\n"); + printf("#ifndef BPF_NO_KFUNC_PROTOTYPES\n"); + + for (i = 1; i < cnt; i++) { + const struct btf_type *t = btf__type_by_id(btf, i); + const char *name; + int err; + + if (!btf_is_decl_tag(t)) + continue; + + if (btf_decl_tag(t)->component_idx != -1) + continue; + + name = btf__name_by_offset(btf, t->name_off); + if (strncmp(name, KFUNC_DECL_TAG, sizeof(KFUNC_DECL_TAG))) + continue; + + t = btf__type_by_id(btf, t->type); + if (!btf_is_func(t)) + continue; + + printf("extern "); + + opts.field_name = btf__name_by_offset(btf, t->name_off); + err = btf_dump__emit_type_decl(d, t->type, &opts); + if (err) + return err; + + printf(" __weak __ksym;\n"); + } + + printf("#endif\n\n"); + + return 0; +} + static void __printf(2, 0) btf_dump_printf(void *ctx, const char *fmt, va_list args) { vfprintf(stdout, fmt, args); } -static int dump_btf_c(const struct btf *btf, - __u32 *root_type_ids, int root_type_cnt) +static int btf_type_rank(const struct btf *btf, __u32 index, bool has_name) { + const struct btf_type *t = btf__type_by_id(btf, index); + const int kind = btf_kind(t); + const int max_rank = 10; + + if (t->name_off) + has_name = true; + + switch (kind) { + case BTF_KIND_ENUM: + case BTF_KIND_ENUM64: + return has_name ? 1 : 0; + case BTF_KIND_INT: + case BTF_KIND_FLOAT: + return 2; + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + return has_name ? 3 : max_rank; + case BTF_KIND_FUNC_PROTO: + return has_name ? 4 : max_rank; + case BTF_KIND_ARRAY: + if (has_name) + return btf_type_rank(btf, btf_array(t)->type, has_name); + return max_rank; + case BTF_KIND_TYPE_TAG: + case BTF_KIND_CONST: + case BTF_KIND_PTR: + case BTF_KIND_VOLATILE: + case BTF_KIND_RESTRICT: + case BTF_KIND_TYPEDEF: + case BTF_KIND_DECL_TAG: + if (has_name) + return btf_type_rank(btf, t->type, has_name); + return max_rank; + default: + return max_rank; + } +} + +static const char *btf_type_sort_name(const struct btf *btf, __u32 index, bool from_ref) +{ + const struct btf_type *t = btf__type_by_id(btf, index); + + switch (btf_kind(t)) { + case BTF_KIND_ENUM: + case BTF_KIND_ENUM64: { + int name_off = t->name_off; + + /* Use name of the first element for anonymous enums if allowed */ + if (!from_ref && !t->name_off && btf_vlen(t)) + name_off = btf_enum(t)->name_off; + + return btf__name_by_offset(btf, name_off); + } + case BTF_KIND_ARRAY: + return btf_type_sort_name(btf, btf_array(t)->type, true); + case BTF_KIND_TYPE_TAG: + case BTF_KIND_CONST: + case BTF_KIND_PTR: + case BTF_KIND_VOLATILE: + case BTF_KIND_RESTRICT: + case BTF_KIND_TYPEDEF: + case BTF_KIND_DECL_TAG: + return btf_type_sort_name(btf, t->type, true); + default: + return btf__name_by_offset(btf, t->name_off); + } + return NULL; +} + +static int btf_type_compare(const void *left, const void *right) +{ + const struct sort_datum *d1 = (const struct sort_datum *)left; + const struct sort_datum *d2 = (const struct sort_datum *)right; + int r; + + if (d1->type_rank != d2->type_rank) + return d1->type_rank < d2->type_rank ? -1 : 1; + + r = strcmp(d1->sort_name, d2->sort_name); + if (r) + return r; + + return strcmp(d1->own_name, d2->own_name); +} + +static struct sort_datum *sort_btf_c(const struct btf *btf) +{ + struct sort_datum *datums; + int n; + + n = btf__type_cnt(btf); + datums = malloc(sizeof(struct sort_datum) * n); + if (!datums) + return NULL; + + for (int i = 0; i < n; ++i) { + struct sort_datum *d = datums + i; + const struct btf_type *t = btf__type_by_id(btf, i); + + d->index = i; + d->type_rank = btf_type_rank(btf, i, false); + d->sort_name = btf_type_sort_name(btf, i, false); + d->own_name = btf__name_by_offset(btf, t->name_off); + } + + qsort(datums, n, sizeof(struct sort_datum), btf_type_compare); + + return datums; +} + +static int dump_btf_c(const struct btf *btf, + __u32 *root_type_ids, int root_type_cnt, bool sort_dump) +{ + struct sort_datum *datums = NULL; struct btf_dump *d; int err = 0, i; @@ -476,6 +641,12 @@ static int dump_btf_c(const struct btf *btf, printf("#ifndef BPF_NO_PRESERVE_ACCESS_INDEX\n"); printf("#pragma clang attribute push (__attribute__((preserve_access_index)), apply_to = record)\n"); printf("#endif\n\n"); + printf("#ifndef __ksym\n"); + printf("#define __ksym __attribute__((section(\".ksyms\")))\n"); + printf("#endif\n\n"); + printf("#ifndef __weak\n"); + printf("#define __weak __attribute__((weak))\n"); + printf("#endif\n\n"); if (root_type_cnt) { for (i = 0; i < root_type_cnt; i++) { @@ -486,11 +657,19 @@ static int dump_btf_c(const struct btf *btf, } else { int cnt = btf__type_cnt(btf); + if (sort_dump) + datums = sort_btf_c(btf); for (i = 1; i < cnt; i++) { - err = btf_dump__dump_type(d, i); + int idx = datums ? datums[i].index : i; + + err = btf_dump__dump_type(d, idx); if (err) goto done; } + + err = dump_btf_kfuncs(d, btf); + if (err) + goto done; } printf("#ifndef BPF_NO_PRESERVE_ACCESS_INDEX\n"); @@ -500,6 +679,7 @@ static int dump_btf_c(const struct btf *btf, printf("#endif /* __VMLINUX_H__ */\n"); done: + free(datums); btf_dump__free(d); return err; } @@ -549,10 +729,10 @@ static bool btf_is_kernel_module(__u32 btf_id) static int do_dump(int argc, char **argv) { + bool dump_c = false, sort_dump_c = true; struct btf *btf = NULL, *base = NULL; __u32 root_type_ids[2]; int root_type_cnt = 0; - bool dump_c = false; __u32 btf_id = -1; const char *src; int fd = -1; @@ -663,6 +843,9 @@ static int do_dump(int argc, char **argv) goto done; } NEXT_ARG(); + } else if (is_prefix(*argv, "unsorted")) { + sort_dump_c = false; + NEXT_ARG(); } else { p_err("unrecognized option: '%s'", *argv); err = -EINVAL; @@ -691,7 +874,7 @@ static int do_dump(int argc, char **argv) err = -ENOTSUP; goto done; } - err = dump_btf_c(btf, root_type_ids, root_type_cnt); + err = dump_btf_c(btf, root_type_ids, root_type_cnt, sort_dump_c); } else { err = dump_btf_raw(btf, root_type_ids, root_type_cnt); } @@ -1063,7 +1246,7 @@ static int do_help(int argc, char **argv) " %1$s %2$s help\n" "\n" " BTF_SRC := { id BTF_ID | prog PROG | map MAP [{key | value | kv | all}] | file FILE }\n" - " FORMAT := { raw | c }\n" + " FORMAT := { raw | c [unsorted] }\n" " " HELP_SPEC_MAP "\n" " " HELP_SPEC_PROGRAM "\n" " " HELP_SPEC_OPTIONS " |\n" diff --git a/tools/bpf/bpftool/cgroup.c b/tools/bpf/bpftool/cgroup.c index af6898c0f388..9af426d43299 100644 --- a/tools/bpf/bpftool/cgroup.c +++ b/tools/bpf/bpftool/cgroup.c @@ -19,6 +19,38 @@ #include "main.h" +static const int cgroup_attach_types[] = { + BPF_CGROUP_INET_INGRESS, + BPF_CGROUP_INET_EGRESS, + BPF_CGROUP_INET_SOCK_CREATE, + BPF_CGROUP_INET_SOCK_RELEASE, + BPF_CGROUP_INET4_BIND, + BPF_CGROUP_INET6_BIND, + BPF_CGROUP_INET4_POST_BIND, + BPF_CGROUP_INET6_POST_BIND, + BPF_CGROUP_INET4_CONNECT, + BPF_CGROUP_INET6_CONNECT, + BPF_CGROUP_UNIX_CONNECT, + BPF_CGROUP_INET4_GETPEERNAME, + BPF_CGROUP_INET6_GETPEERNAME, + BPF_CGROUP_UNIX_GETPEERNAME, + BPF_CGROUP_INET4_GETSOCKNAME, + BPF_CGROUP_INET6_GETSOCKNAME, + BPF_CGROUP_UNIX_GETSOCKNAME, + BPF_CGROUP_UDP4_SENDMSG, + BPF_CGROUP_UDP6_SENDMSG, + BPF_CGROUP_UNIX_SENDMSG, + BPF_CGROUP_UDP4_RECVMSG, + BPF_CGROUP_UDP6_RECVMSG, + BPF_CGROUP_UNIX_RECVMSG, + BPF_CGROUP_SOCK_OPS, + BPF_CGROUP_DEVICE, + BPF_CGROUP_SYSCTL, + BPF_CGROUP_GETSOCKOPT, + BPF_CGROUP_SETSOCKOPT, + BPF_LSM_CGROUP +}; + #define HELP_SPEC_ATTACH_FLAGS \ "ATTACH_FLAGS := { multi | override }" @@ -183,13 +215,13 @@ static int count_attached_bpf_progs(int cgroup_fd, enum bpf_attach_type type) static int cgroup_has_attached_progs(int cgroup_fd) { - enum bpf_attach_type type; + unsigned int i = 0; bool no_prog = true; - for (type = 0; type < __MAX_BPF_ATTACH_TYPE; type++) { - int count = count_attached_bpf_progs(cgroup_fd, type); + for (i = 0; i < ARRAY_SIZE(cgroup_attach_types); i++) { + int count = count_attached_bpf_progs(cgroup_fd, cgroup_attach_types[i]); - if (count < 0 && errno != EINVAL) + if (count < 0) return -1; if (count > 0) { diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c index 958e92acca8e..9b75639434b8 100644 --- a/tools/bpf/bpftool/common.c +++ b/tools/bpf/bpftool/common.c @@ -410,7 +410,7 @@ void get_prog_full_name(const struct bpf_prog_info *prog_info, int prog_fd, { const char *prog_name = prog_info->name; const struct btf_type *func_type; - const struct bpf_func_info finfo = {}; + struct bpf_func_info finfo = {}; struct bpf_prog_info info = {}; __u32 info_len = sizeof(info); struct btf *prog_btf = NULL; diff --git a/tools/bpf/bpftool/gen.c b/tools/bpf/bpftool/gen.c index b3979ddc0189..5a4d3240689e 100644 --- a/tools/bpf/bpftool/gen.c +++ b/tools/bpf/bpftool/gen.c @@ -848,28 +848,45 @@ out: } static void -codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped) +codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped, bool populate_links) { struct bpf_map *map; char ident[256]; - size_t i; + size_t i, map_sz; if (!map_cnt) return; + /* for backward compatibility with old libbpf versions that don't + * handle new BPF skeleton with new struct bpf_map_skeleton definition + * that includes link field, avoid specifying new increased size, + * unless we absolutely have to (i.e., if there are struct_ops maps + * present) + */ + map_sz = offsetof(struct bpf_map_skeleton, link); + if (populate_links) { + bpf_object__for_each_map(map, obj) { + if (bpf_map__type(map) == BPF_MAP_TYPE_STRUCT_OPS) { + map_sz = sizeof(struct bpf_map_skeleton); + break; + } + } + } + codegen("\ \n\ - \n\ + \n\ /* maps */ \n\ s->map_cnt = %zu; \n\ - s->map_skel_sz = sizeof(*s->maps); \n\ - s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt, s->map_skel_sz);\n\ + s->map_skel_sz = %zu; \n\ + s->maps = (struct bpf_map_skeleton *)calloc(s->map_cnt,\n\ + sizeof(*s->maps) > %zu ? sizeof(*s->maps) : %zu);\n\ if (!s->maps) { \n\ err = -ENOMEM; \n\ goto err; \n\ } \n\ ", - map_cnt + map_cnt, map_sz, map_sz, map_sz ); i = 0; bpf_object__for_each_map(map, obj) { @@ -878,15 +895,22 @@ codegen_maps_skeleton(struct bpf_object *obj, size_t map_cnt, bool mmaped) codegen("\ \n\ - \n\ - s->maps[%zu].name = \"%s\"; \n\ - s->maps[%zu].map = &obj->maps.%s; \n\ + \n\ + map = (struct bpf_map_skeleton *)((char *)s->maps + %zu * s->map_skel_sz);\n\ + map->name = \"%s\"; \n\ + map->map = &obj->maps.%s; \n\ ", - i, bpf_map__name(map), i, ident); + i, bpf_map__name(map), ident); /* memory-mapped internal maps */ if (mmaped && is_mmapable_map(map, ident, sizeof(ident))) { - printf("\ts->maps[%zu].mmaped = (void **)&obj->%s;\n", - i, ident); + printf("\tmap->mmaped = (void **)&obj->%s;\n", ident); + } + + if (populate_links && bpf_map__type(map) == BPF_MAP_TYPE_STRUCT_OPS) { + codegen("\ + \n\ + map->link = &obj->links.%s; \n\ + ", ident); } i++; } @@ -1141,7 +1165,7 @@ static void gen_st_ops_shadow_init(struct btf *btf, struct bpf_object *obj) static int do_skeleton(int argc, char **argv) { char header_guard[MAX_OBJ_NAME_LEN + sizeof("__SKEL_H__")]; - size_t map_cnt = 0, prog_cnt = 0, file_sz, mmap_sz; + size_t map_cnt = 0, prog_cnt = 0, attach_map_cnt = 0, file_sz, mmap_sz; DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts); char obj_name[MAX_OBJ_NAME_LEN] = "", *obj_data; struct bpf_object *obj = NULL; @@ -1225,6 +1249,10 @@ static int do_skeleton(int argc, char **argv) bpf_map__name(map)); continue; } + + if (bpf_map__type(map) == BPF_MAP_TYPE_STRUCT_OPS) + attach_map_cnt++; + map_cnt++; } bpf_object__for_each_program(prog, obj) { @@ -1260,6 +1288,8 @@ static int do_skeleton(int argc, char **argv) #include \n\ #include \n\ \n\ + #define BPF_SKEL_SUPPORTS_MAP_AUTO_ATTACH 1 \n\ + \n\ struct %1$s { \n\ struct bpf_object_skeleton *skeleton; \n\ struct bpf_object *obj; \n\ @@ -1297,6 +1327,9 @@ static int do_skeleton(int argc, char **argv) bpf_program__name(prog)); } printf("\t} progs;\n"); + } + + if (prog_cnt + attach_map_cnt) { printf("\tstruct {\n"); bpf_object__for_each_program(prog, obj) { if (use_loader) @@ -1306,6 +1339,19 @@ static int do_skeleton(int argc, char **argv) printf("\t\tstruct bpf_link *%s;\n", bpf_program__name(prog)); } + + bpf_object__for_each_map(map, obj) { + if (!get_map_ident(map, ident, sizeof(ident))) + continue; + if (bpf_map__type(map) != BPF_MAP_TYPE_STRUCT_OPS) + continue; + + if (use_loader) + printf("t\tint %s_fd;\n", ident); + else + printf("\t\tstruct bpf_link *%s;\n", ident); + } + printf("\t} links;\n"); } @@ -1433,6 +1479,7 @@ static int do_skeleton(int argc, char **argv) %1$s__create_skeleton(struct %1$s *obj) \n\ { \n\ struct bpf_object_skeleton *s; \n\ + struct bpf_map_skeleton *map __attribute__((unused));\n\ int err; \n\ \n\ s = (struct bpf_object_skeleton *)calloc(1, sizeof(*s));\n\ @@ -1448,7 +1495,7 @@ static int do_skeleton(int argc, char **argv) obj_name ); - codegen_maps_skeleton(obj, map_cnt, true /*mmaped*/); + codegen_maps_skeleton(obj, map_cnt, true /*mmaped*/, true /*links*/); codegen_progs_skeleton(obj, prog_cnt, true /*populate_links*/); codegen("\ @@ -1723,6 +1770,7 @@ static int do_subskeleton(int argc, char **argv) { \n\ struct %1$s *obj; \n\ struct bpf_object_subskeleton *s; \n\ + struct bpf_map_skeleton *map __attribute__((unused));\n\ int err; \n\ \n\ obj = (struct %1$s *)calloc(1, sizeof(*obj)); \n\ @@ -1786,7 +1834,7 @@ static int do_subskeleton(int argc, char **argv) } } - codegen_maps_skeleton(obj, map_cnt, false /*mmaped*/); + codegen_maps_skeleton(obj, map_cnt, false /*mmaped*/, false /*links*/); codegen_progs_skeleton(obj, prog_cnt, false /*links*/); codegen("\ @@ -2379,15 +2427,6 @@ out: return err; } -static int btfgen_remap_id(__u32 *type_id, void *ctx) -{ - unsigned int *ids = ctx; - - *type_id = ids[*type_id]; - - return 0; -} - /* Generate BTF from relocation information previously recorded */ static struct btf *btfgen_get_btf(struct btfgen_info *info) { @@ -2467,10 +2506,15 @@ static struct btf *btfgen_get_btf(struct btfgen_info *info) /* second pass: fix up type ids */ for (i = 1; i < btf__type_cnt(btf_new); i++) { struct btf_type *btf_type = (struct btf_type *) btf__type_by_id(btf_new, i); + struct btf_field_iter it; + __u32 *type_id; - err = btf_type_visit_type_ids(btf_type, btfgen_remap_id, ids); + err = btf_field_iter_init(&it, btf_type, BTF_FIELD_ITER_IDS); if (err) goto err_out; + + while ((type_id = btf_field_iter_next(&it))) + *type_id = ids[*type_id]; } free(ids); diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c index 1a501cf09e78..40ea743d139f 100644 --- a/tools/bpf/bpftool/prog.c +++ b/tools/bpf/bpftool/prog.c @@ -1813,6 +1813,10 @@ offload_dev: } if (pinmaps) { + err = create_and_mount_bpffs_dir(pinmaps); + if (err) + goto err_unpin; + err = bpf_object__pin_maps(obj, pinmaps); if (err) { p_err("failed to pin all maps"); diff --git a/tools/bpf/bpftool/skeleton/pid_iter.bpf.c b/tools/bpf/bpftool/skeleton/pid_iter.bpf.c index 7bdbcac3cf62..948dde25034e 100644 --- a/tools/bpf/bpftool/skeleton/pid_iter.bpf.c +++ b/tools/bpf/bpftool/skeleton/pid_iter.bpf.c @@ -29,6 +29,7 @@ enum bpf_link_type___local { }; extern const void bpf_link_fops __ksym; +extern const void bpf_link_fops_poll __ksym __weak; extern const void bpf_map_fops __ksym; extern const void bpf_prog_fops __ksym; extern const void btf_fops __ksym; @@ -84,7 +85,11 @@ int iter(struct bpf_iter__task_file *ctx) fops = &btf_fops; break; case BPF_OBJ_LINK: - fops = &bpf_link_fops; + if (&bpf_link_fops_poll && + file->f_op == &bpf_link_fops_poll) + fops = &bpf_link_fops_poll; + else + fops = &bpf_link_fops; break; default: return 0; diff --git a/tools/bpf/bpftool/skeleton/profiler.bpf.c b/tools/bpf/bpftool/skeleton/profiler.bpf.c index 2f80edc682f1..f48c783cb9f7 100644 --- a/tools/bpf/bpftool/skeleton/profiler.bpf.c +++ b/tools/bpf/bpftool/skeleton/profiler.bpf.c @@ -40,17 +40,17 @@ struct { const volatile __u32 num_cpu = 1; const volatile __u32 num_metric = 1; -#define MAX_NUM_MATRICS 4 +#define MAX_NUM_METRICS 4 SEC("fentry/XXX") int BPF_PROG(fentry_XXX) { - struct bpf_perf_event_value___local *ptrs[MAX_NUM_MATRICS]; + struct bpf_perf_event_value___local *ptrs[MAX_NUM_METRICS]; u32 key = bpf_get_smp_processor_id(); u32 i; /* look up before reading, to reduce error */ - for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) { + for (i = 0; i < num_metric && i < MAX_NUM_METRICS; i++) { u32 flag = i; ptrs[i] = bpf_map_lookup_elem(&fentry_readings, &flag); @@ -58,7 +58,7 @@ int BPF_PROG(fentry_XXX) return 0; } - for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) { + for (i = 0; i < num_metric && i < MAX_NUM_METRICS; i++) { struct bpf_perf_event_value___local reading; int err; @@ -99,14 +99,14 @@ fexit_update_maps(u32 id, struct bpf_perf_event_value___local *after) SEC("fexit/XXX") int BPF_PROG(fexit_XXX) { - struct bpf_perf_event_value___local readings[MAX_NUM_MATRICS]; + struct bpf_perf_event_value___local readings[MAX_NUM_METRICS]; u32 cpu = bpf_get_smp_processor_id(); u32 i, zero = 0; int err; u64 *count; /* read all events before updating the maps, to reduce error */ - for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) { + for (i = 0; i < num_metric && i < MAX_NUM_METRICS; i++) { err = bpf_perf_event_read_value(&events, cpu + i * num_cpu, (void *)(readings + i), sizeof(*readings)); @@ -116,7 +116,7 @@ int BPF_PROG(fexit_XXX) count = bpf_map_lookup_elem(&counts, &zero); if (count) { *count += 1; - for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) + for (i = 0; i < num_metric && i < MAX_NUM_METRICS; i++) fexit_update_maps(i, &readings[i]); } return 0; diff --git a/tools/bpf/resolve_btfids/main.c b/tools/bpf/resolve_btfids/main.c index af393c7dee1f..936ef95c3d32 100644 --- a/tools/bpf/resolve_btfids/main.c +++ b/tools/bpf/resolve_btfids/main.c @@ -409,6 +409,14 @@ static int elf_collect(struct object *obj) obj->efile.idlist = data; obj->efile.idlist_shndx = idx; obj->efile.idlist_addr = sh.sh_addr; + } else if (!strcmp(name, BTF_BASE_ELF_SEC)) { + /* If a .BTF.base section is found, do not resolve + * BTF ids relative to vmlinux; resolve relative + * to the .BTF.base section instead. btf__parse_split() + * will take care of this once the base BTF it is + * passed is NULL. + */ + obj->base_btf_path = NULL; } if (compressed_section_fix(elf, scn, &sh)) diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 90706a47f6ff..35bcf52dbc65 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -1425,6 +1425,8 @@ enum { #define BPF_F_TEST_RUN_ON_CPU (1U << 0) /* If set, XDP frames will be transmitted after processing */ #define BPF_F_TEST_XDP_LIVE_FRAMES (1U << 1) +/* If set, apply CHECKSUM_COMPLETE to skb and validate the checksum */ +#define BPF_F_TEST_SKB_CHECKSUM_COMPLETE (1U << 2) /* type for BPF_ENABLE_STATS */ enum bpf_stats_type { @@ -6207,12 +6209,17 @@ union { \ __u64 :64; \ } __attribute__((aligned(8))) +/* The enum used in skb->tstamp_type. It specifies the clock type + * of the time stored in the skb->tstamp. + */ enum { - BPF_SKB_TSTAMP_UNSPEC, - BPF_SKB_TSTAMP_DELIVERY_MONO, /* tstamp has mono delivery time */ - /* For any BPF_SKB_TSTAMP_* that the bpf prog cannot handle, - * the bpf prog should handle it like BPF_SKB_TSTAMP_UNSPEC - * and try to deduce it by ingress, egress or skb->sk->sk_clockid. + BPF_SKB_TSTAMP_UNSPEC = 0, /* DEPRECATED */ + BPF_SKB_TSTAMP_DELIVERY_MONO = 1, /* DEPRECATED */ + BPF_SKB_CLOCK_REALTIME = 0, + BPF_SKB_CLOCK_MONOTONIC = 1, + BPF_SKB_CLOCK_TAI = 2, + /* For any future BPF_SKB_CLOCK_* that the bpf prog cannot handle, + * the bpf prog can try to deduce it by ingress/egress/skb->sk->sk_clockid. */ }; diff --git a/tools/lib/bpf/Build b/tools/lib/bpf/Build index b6619199a706..e2cd558ca0b4 100644 --- a/tools/lib/bpf/Build +++ b/tools/lib/bpf/Build @@ -1,4 +1,4 @@ libbpf-y := libbpf.o bpf.o nlattr.o btf.o libbpf_errno.o str_error.o \ netlink.o bpf_prog_linfo.o libbpf_probes.o hashmap.o \ btf_dump.o ringbuf.o strset.o linker.o gen_loader.o relo_core.o \ - usdt.o zip.o elf.o features.o + usdt.o zip.o elf.o features.o btf_iter.o btf_relocate.o diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 2d0840ef599a..32c00db3b91b 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -116,6 +116,9 @@ struct btf { /* whether strings are already deduplicated */ bool strs_deduped; + /* whether base_btf should be freed in btf_free for this instance */ + bool owns_base; + /* BTF object FD, if loaded into kernel */ int fd; @@ -598,7 +601,7 @@ static int btf_sanity_check(const struct btf *btf) __u32 i, n = btf__type_cnt(btf); int err; - for (i = 1; i < n; i++) { + for (i = btf->start_id; i < n; i++) { t = btf_type_by_id(btf, i); err = btf_validate_type(btf, t, i); if (err) @@ -969,6 +972,8 @@ void btf__free(struct btf *btf) free(btf->raw_data); free(btf->raw_data_swapped); free(btf->type_offs); + if (btf->owns_base) + btf__free(btf->base_btf); free(btf); } @@ -1084,16 +1089,86 @@ struct btf *btf__new_split(const void *data, __u32 size, struct btf *base_btf) return libbpf_ptr(btf_new(data, size, base_btf)); } +struct btf_elf_secs { + Elf_Data *btf_data; + Elf_Data *btf_ext_data; + Elf_Data *btf_base_data; +}; + +static int btf_find_elf_sections(Elf *elf, const char *path, struct btf_elf_secs *secs) +{ + Elf_Scn *scn = NULL; + Elf_Data *data; + GElf_Ehdr ehdr; + size_t shstrndx; + int idx = 0; + + if (!gelf_getehdr(elf, &ehdr)) { + pr_warn("failed to get EHDR from %s\n", path); + goto err; + } + + if (elf_getshdrstrndx(elf, &shstrndx)) { + pr_warn("failed to get section names section index for %s\n", + path); + goto err; + } + + if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL)) { + pr_warn("failed to get e_shstrndx from %s\n", path); + goto err; + } + + while ((scn = elf_nextscn(elf, scn)) != NULL) { + Elf_Data **field; + GElf_Shdr sh; + char *name; + + idx++; + if (gelf_getshdr(scn, &sh) != &sh) { + pr_warn("failed to get section(%d) header from %s\n", + idx, path); + goto err; + } + name = elf_strptr(elf, shstrndx, sh.sh_name); + if (!name) { + pr_warn("failed to get section(%d) name from %s\n", + idx, path); + goto err; + } + + if (strcmp(name, BTF_ELF_SEC) == 0) + field = &secs->btf_data; + else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) + field = &secs->btf_ext_data; + else if (strcmp(name, BTF_BASE_ELF_SEC) == 0) + field = &secs->btf_base_data; + else + continue; + + data = elf_getdata(scn, 0); + if (!data) { + pr_warn("failed to get section(%d, %s) data from %s\n", + idx, name, path); + goto err; + } + *field = data; + } + + return 0; + +err: + return -LIBBPF_ERRNO__FORMAT; +} + static struct btf *btf_parse_elf(const char *path, struct btf *base_btf, struct btf_ext **btf_ext) { - Elf_Data *btf_data = NULL, *btf_ext_data = NULL; - int err = 0, fd = -1, idx = 0; + struct btf_elf_secs secs = {}; + struct btf *dist_base_btf = NULL; struct btf *btf = NULL; - Elf_Scn *scn = NULL; + int err = 0, fd = -1; Elf *elf = NULL; - GElf_Ehdr ehdr; - size_t shstrndx; if (elf_version(EV_CURRENT) == EV_NONE) { pr_warn("failed to init libelf for %s\n", path); @@ -1107,73 +1182,48 @@ static struct btf *btf_parse_elf(const char *path, struct btf *base_btf, return ERR_PTR(err); } - err = -LIBBPF_ERRNO__FORMAT; - elf = elf_begin(fd, ELF_C_READ, NULL); if (!elf) { pr_warn("failed to open %s as ELF file\n", path); goto done; } - if (!gelf_getehdr(elf, &ehdr)) { - pr_warn("failed to get EHDR from %s\n", path); + + err = btf_find_elf_sections(elf, path, &secs); + if (err) goto done; - } - if (elf_getshdrstrndx(elf, &shstrndx)) { - pr_warn("failed to get section names section index for %s\n", - path); - goto done; - } - - if (!elf_rawdata(elf_getscn(elf, shstrndx), NULL)) { - pr_warn("failed to get e_shstrndx from %s\n", path); - goto done; - } - - while ((scn = elf_nextscn(elf, scn)) != NULL) { - GElf_Shdr sh; - char *name; - - idx++; - if (gelf_getshdr(scn, &sh) != &sh) { - pr_warn("failed to get section(%d) header from %s\n", - idx, path); - goto done; - } - name = elf_strptr(elf, shstrndx, sh.sh_name); - if (!name) { - pr_warn("failed to get section(%d) name from %s\n", - idx, path); - goto done; - } - if (strcmp(name, BTF_ELF_SEC) == 0) { - btf_data = elf_getdata(scn, 0); - if (!btf_data) { - pr_warn("failed to get section(%d, %s) data from %s\n", - idx, name, path); - goto done; - } - continue; - } else if (btf_ext && strcmp(name, BTF_EXT_ELF_SEC) == 0) { - btf_ext_data = elf_getdata(scn, 0); - if (!btf_ext_data) { - pr_warn("failed to get section(%d, %s) data from %s\n", - idx, name, path); - goto done; - } - continue; - } - } - - if (!btf_data) { + if (!secs.btf_data) { pr_warn("failed to find '%s' ELF section in %s\n", BTF_ELF_SEC, path); err = -ENODATA; goto done; } - btf = btf_new(btf_data->d_buf, btf_data->d_size, base_btf); - err = libbpf_get_error(btf); - if (err) + + if (secs.btf_base_data) { + dist_base_btf = btf_new(secs.btf_base_data->d_buf, secs.btf_base_data->d_size, + NULL); + if (IS_ERR(dist_base_btf)) { + err = PTR_ERR(dist_base_btf); + dist_base_btf = NULL; + goto done; + } + } + + btf = btf_new(secs.btf_data->d_buf, secs.btf_data->d_size, + dist_base_btf ?: base_btf); + if (IS_ERR(btf)) { + err = PTR_ERR(btf); goto done; + } + if (dist_base_btf && base_btf) { + err = btf__relocate(btf, base_btf); + if (err) + goto done; + btf__free(dist_base_btf); + dist_base_btf = NULL; + } + + if (dist_base_btf) + btf->owns_base = true; switch (gelf_getclass(elf)) { case ELFCLASS32: @@ -1187,11 +1237,12 @@ static struct btf *btf_parse_elf(const char *path, struct btf *base_btf, break; } - if (btf_ext && btf_ext_data) { - *btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size); - err = libbpf_get_error(*btf_ext); - if (err) + if (btf_ext && secs.btf_ext_data) { + *btf_ext = btf_ext__new(secs.btf_ext_data->d_buf, secs.btf_ext_data->d_size); + if (IS_ERR(*btf_ext)) { + err = PTR_ERR(*btf_ext); goto done; + } } else if (btf_ext) { *btf_ext = NULL; } @@ -1205,6 +1256,7 @@ done: if (btf_ext) btf_ext__free(*btf_ext); + btf__free(dist_base_btf); btf__free(btf); return ERR_PTR(err); @@ -1739,9 +1791,8 @@ struct btf_pipe { struct hashmap *str_off_map; /* map string offsets from src to dst */ }; -static int btf_rewrite_str(__u32 *str_off, void *ctx) +static int btf_rewrite_str(struct btf_pipe *p, __u32 *str_off) { - struct btf_pipe *p = ctx; long mapped_off; int off, err; @@ -1771,10 +1822,11 @@ static int btf_rewrite_str(__u32 *str_off, void *ctx) return 0; } -int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type) +static int btf_add_type(struct btf_pipe *p, const struct btf_type *src_type) { - struct btf_pipe p = { .src = src_btf, .dst = btf }; + struct btf_field_iter it; struct btf_type *t; + __u32 *str_off; int sz, err; sz = btf_type_size(src_type); @@ -1782,35 +1834,33 @@ int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_t return libbpf_err(sz); /* deconstruct BTF, if necessary, and invalidate raw_data */ - if (btf_ensure_modifiable(btf)) + if (btf_ensure_modifiable(p->dst)) return libbpf_err(-ENOMEM); - t = btf_add_type_mem(btf, sz); + t = btf_add_type_mem(p->dst, sz); if (!t) return libbpf_err(-ENOMEM); memcpy(t, src_type, sz); - err = btf_type_visit_str_offs(t, btf_rewrite_str, &p); + err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS); if (err) return libbpf_err(err); - return btf_commit_type(btf, sz); + while ((str_off = btf_field_iter_next(&it))) { + err = btf_rewrite_str(p, str_off); + if (err) + return libbpf_err(err); + } + + return btf_commit_type(p->dst, sz); } -static int btf_rewrite_type_ids(__u32 *type_id, void *ctx) +int btf__add_type(struct btf *btf, const struct btf *src_btf, const struct btf_type *src_type) { - struct btf *btf = ctx; + struct btf_pipe p = { .src = src_btf, .dst = btf }; - if (!*type_id) /* nothing to do for VOID references */ - return 0; - - /* we haven't updated btf's type count yet, so - * btf->start_id + btf->nr_types - 1 is the type ID offset we should - * add to all newly added BTF types - */ - *type_id += btf->start_id + btf->nr_types - 1; - return 0; + return btf_add_type(&p, src_type); } static size_t btf_dedup_identity_hash_fn(long key, void *ctx); @@ -1858,6 +1908,9 @@ int btf__add_btf(struct btf *btf, const struct btf *src_btf) memcpy(t, src_btf->types_data, data_sz); for (i = 0; i < cnt; i++) { + struct btf_field_iter it; + __u32 *type_id, *str_off; + sz = btf_type_size(t); if (sz < 0) { /* unlikely, has to be corrupted src_btf */ @@ -1869,14 +1922,30 @@ int btf__add_btf(struct btf *btf, const struct btf *src_btf) *off = t - btf->types_data; /* add, dedup, and remap strings referenced by this BTF type */ - err = btf_type_visit_str_offs(t, btf_rewrite_str, &p); + err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS); + if (err) + goto err_out; + while ((str_off = btf_field_iter_next(&it))) { + err = btf_rewrite_str(&p, str_off); + if (err) + goto err_out; + } + + /* remap all type IDs referenced from this BTF type */ + err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS); if (err) goto err_out; - /* remap all type IDs referenced from this BTF type */ - err = btf_type_visit_type_ids(t, btf_rewrite_type_ids, btf); - if (err) - goto err_out; + while ((type_id = btf_field_iter_next(&it))) { + if (!*type_id) /* nothing to do for VOID references */ + continue; + + /* we haven't updated btf's type count yet, so + * btf->start_id + btf->nr_types - 1 is the type ID offset we should + * add to all newly added BTF types + */ + *type_id += btf->start_id + btf->nr_types - 1; + } /* go to next type data and type offset index entry */ t += sz; @@ -3453,11 +3522,19 @@ static int btf_for_each_str_off(struct btf_dedup *d, str_off_visit_fn fn, void * int i, r; for (i = 0; i < d->btf->nr_types; i++) { + struct btf_field_iter it; struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i); + __u32 *str_off; - r = btf_type_visit_str_offs(t, fn, ctx); + r = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS); if (r) return r; + + while ((str_off = btf_field_iter_next(&it))) { + r = fn(str_off, ctx); + if (r) + return r; + } } if (!d->btf_ext) @@ -4919,10 +4996,23 @@ static int btf_dedup_remap_types(struct btf_dedup *d) for (i = 0; i < d->btf->nr_types; i++) { struct btf_type *t = btf_type_by_id(d->btf, d->btf->start_id + i); + struct btf_field_iter it; + __u32 *type_id; - r = btf_type_visit_type_ids(t, btf_dedup_remap_type_id, d); + r = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS); if (r) return r; + + while ((type_id = btf_field_iter_next(&it))) { + __u32 resolved_id, new_id; + + resolved_id = resolve_type_id(d, *type_id); + new_id = d->hypot_map[resolved_id]; + if (new_id > BTF_MAX_NR_TYPES) + return -EINVAL; + + *type_id = new_id; + } } if (!d->btf_ext) @@ -5003,136 +5093,6 @@ struct btf *btf__load_module_btf(const char *module_name, struct btf *vmlinux_bt return btf__parse_split(path, vmlinux_btf); } -int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx) -{ - int i, n, err; - - switch (btf_kind(t)) { - case BTF_KIND_INT: - case BTF_KIND_FLOAT: - case BTF_KIND_ENUM: - case BTF_KIND_ENUM64: - return 0; - - case BTF_KIND_FWD: - case BTF_KIND_CONST: - case BTF_KIND_VOLATILE: - case BTF_KIND_RESTRICT: - case BTF_KIND_PTR: - case BTF_KIND_TYPEDEF: - case BTF_KIND_FUNC: - case BTF_KIND_VAR: - case BTF_KIND_DECL_TAG: - case BTF_KIND_TYPE_TAG: - return visit(&t->type, ctx); - - case BTF_KIND_ARRAY: { - struct btf_array *a = btf_array(t); - - err = visit(&a->type, ctx); - err = err ?: visit(&a->index_type, ctx); - return err; - } - - case BTF_KIND_STRUCT: - case BTF_KIND_UNION: { - struct btf_member *m = btf_members(t); - - for (i = 0, n = btf_vlen(t); i < n; i++, m++) { - err = visit(&m->type, ctx); - if (err) - return err; - } - return 0; - } - - case BTF_KIND_FUNC_PROTO: { - struct btf_param *m = btf_params(t); - - err = visit(&t->type, ctx); - if (err) - return err; - for (i = 0, n = btf_vlen(t); i < n; i++, m++) { - err = visit(&m->type, ctx); - if (err) - return err; - } - return 0; - } - - case BTF_KIND_DATASEC: { - struct btf_var_secinfo *m = btf_var_secinfos(t); - - for (i = 0, n = btf_vlen(t); i < n; i++, m++) { - err = visit(&m->type, ctx); - if (err) - return err; - } - return 0; - } - - default: - return -EINVAL; - } -} - -int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx) -{ - int i, n, err; - - err = visit(&t->name_off, ctx); - if (err) - return err; - - switch (btf_kind(t)) { - case BTF_KIND_STRUCT: - case BTF_KIND_UNION: { - struct btf_member *m = btf_members(t); - - for (i = 0, n = btf_vlen(t); i < n; i++, m++) { - err = visit(&m->name_off, ctx); - if (err) - return err; - } - break; - } - case BTF_KIND_ENUM: { - struct btf_enum *m = btf_enum(t); - - for (i = 0, n = btf_vlen(t); i < n; i++, m++) { - err = visit(&m->name_off, ctx); - if (err) - return err; - } - break; - } - case BTF_KIND_ENUM64: { - struct btf_enum64 *m = btf_enum64(t); - - for (i = 0, n = btf_vlen(t); i < n; i++, m++) { - err = visit(&m->name_off, ctx); - if (err) - return err; - } - break; - } - case BTF_KIND_FUNC_PROTO: { - struct btf_param *m = btf_params(t); - - for (i = 0, n = btf_vlen(t); i < n; i++, m++) { - err = visit(&m->name_off, ctx); - if (err) - return err; - } - break; - } - default: - break; - } - - return 0; -} - int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx) { const struct btf_ext_info *seg; @@ -5212,3 +5172,325 @@ int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void return 0; } + +struct btf_distill { + struct btf_pipe pipe; + int *id_map; + unsigned int split_start_id; + unsigned int split_start_str; + int diff_id; +}; + +static int btf_add_distilled_type_ids(struct btf_distill *dist, __u32 i) +{ + struct btf_type *split_t = btf_type_by_id(dist->pipe.src, i); + struct btf_field_iter it; + __u32 *id; + int err; + + err = btf_field_iter_init(&it, split_t, BTF_FIELD_ITER_IDS); + if (err) + return err; + while ((id = btf_field_iter_next(&it))) { + struct btf_type *base_t; + + if (!*id) + continue; + /* split BTF id, not needed */ + if (*id >= dist->split_start_id) + continue; + /* already added ? */ + if (dist->id_map[*id] > 0) + continue; + + /* only a subset of base BTF types should be referenced from + * split BTF; ensure nothing unexpected is referenced. + */ + base_t = btf_type_by_id(dist->pipe.src, *id); + switch (btf_kind(base_t)) { + case BTF_KIND_INT: + case BTF_KIND_FLOAT: + case BTF_KIND_FWD: + case BTF_KIND_ARRAY: + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + case BTF_KIND_TYPEDEF: + case BTF_KIND_ENUM: + case BTF_KIND_ENUM64: + case BTF_KIND_PTR: + case BTF_KIND_CONST: + case BTF_KIND_RESTRICT: + case BTF_KIND_VOLATILE: + case BTF_KIND_FUNC_PROTO: + case BTF_KIND_TYPE_TAG: + dist->id_map[*id] = *id; + break; + default: + pr_warn("unexpected reference to base type[%u] of kind [%u] when creating distilled base BTF.\n", + *id, btf_kind(base_t)); + return -EINVAL; + } + /* If a base type is used, ensure types it refers to are + * marked as used also; so for example if we find a PTR to INT + * we need both the PTR and INT. + * + * The only exception is named struct/unions, since distilled + * base BTF composite types have no members. + */ + if (btf_is_composite(base_t) && base_t->name_off) + continue; + err = btf_add_distilled_type_ids(dist, *id); + if (err) + return err; + } + return 0; +} + +static int btf_add_distilled_types(struct btf_distill *dist) +{ + bool adding_to_base = dist->pipe.dst->start_id == 1; + int id = btf__type_cnt(dist->pipe.dst); + struct btf_type *t; + int i, err = 0; + + + /* Add types for each of the required references to either distilled + * base or split BTF, depending on type characteristics. + */ + for (i = 1; i < dist->split_start_id; i++) { + const char *name; + int kind; + + if (!dist->id_map[i]) + continue; + t = btf_type_by_id(dist->pipe.src, i); + kind = btf_kind(t); + name = btf__name_by_offset(dist->pipe.src, t->name_off); + + switch (kind) { + case BTF_KIND_INT: + case BTF_KIND_FLOAT: + case BTF_KIND_FWD: + /* Named int, float, fwd are added to base. */ + if (!adding_to_base) + continue; + err = btf_add_type(&dist->pipe, t); + break; + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + /* Named struct/union are added to base as 0-vlen + * struct/union of same size. Anonymous struct/unions + * are added to split BTF as-is. + */ + if (adding_to_base) { + if (!t->name_off) + continue; + err = btf_add_composite(dist->pipe.dst, kind, name, t->size); + } else { + if (t->name_off) + continue; + err = btf_add_type(&dist->pipe, t); + } + break; + case BTF_KIND_ENUM: + case BTF_KIND_ENUM64: + /* Named enum[64]s are added to base as a sized + * enum; relocation will match with appropriately-named + * and sized enum or enum64. + * + * Anonymous enums are added to split BTF as-is. + */ + if (adding_to_base) { + if (!t->name_off) + continue; + err = btf__add_enum(dist->pipe.dst, name, t->size); + } else { + if (t->name_off) + continue; + err = btf_add_type(&dist->pipe, t); + } + break; + case BTF_KIND_ARRAY: + case BTF_KIND_TYPEDEF: + case BTF_KIND_PTR: + case BTF_KIND_CONST: + case BTF_KIND_RESTRICT: + case BTF_KIND_VOLATILE: + case BTF_KIND_FUNC_PROTO: + case BTF_KIND_TYPE_TAG: + /* All other types are added to split BTF. */ + if (adding_to_base) + continue; + err = btf_add_type(&dist->pipe, t); + break; + default: + pr_warn("unexpected kind when adding base type '%s'[%u] of kind [%u] to distilled base BTF.\n", + name, i, kind); + return -EINVAL; + + } + if (err < 0) + break; + dist->id_map[i] = id++; + } + return err; +} + +/* Split BTF ids without a mapping will be shifted downwards since distilled + * base BTF is smaller than the original base BTF. For those that have a + * mapping (either to base or updated split BTF), update the id based on + * that mapping. + */ +static int btf_update_distilled_type_ids(struct btf_distill *dist, __u32 i) +{ + struct btf_type *t = btf_type_by_id(dist->pipe.dst, i); + struct btf_field_iter it; + __u32 *id; + int err; + + err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS); + if (err) + return err; + while ((id = btf_field_iter_next(&it))) { + if (dist->id_map[*id]) + *id = dist->id_map[*id]; + else if (*id >= dist->split_start_id) + *id -= dist->diff_id; + } + return 0; +} + +/* Create updated split BTF with distilled base BTF; distilled base BTF + * consists of BTF information required to clarify the types that split + * BTF refers to, omitting unneeded details. Specifically it will contain + * base types and memberless definitions of named structs, unions and enumerated + * types. Associated reference types like pointers, arrays and anonymous + * structs, unions and enumerated types will be added to split BTF. + * Size is recorded for named struct/unions to help guide matching to the + * target base BTF during later relocation. + * + * The only case where structs, unions or enumerated types are fully represented + * is when they are anonymous; in such cases, the anonymous type is added to + * split BTF in full. + * + * We return newly-created split BTF where the split BTF refers to a newly-created + * distilled base BTF. Both must be freed separately by the caller. + */ +int btf__distill_base(const struct btf *src_btf, struct btf **new_base_btf, + struct btf **new_split_btf) +{ + struct btf *new_base = NULL, *new_split = NULL; + const struct btf *old_base; + unsigned int n = btf__type_cnt(src_btf); + struct btf_distill dist = {}; + struct btf_type *t; + int i, err = 0; + + /* src BTF must be split BTF. */ + old_base = btf__base_btf(src_btf); + if (!new_base_btf || !new_split_btf || !old_base) + return libbpf_err(-EINVAL); + + new_base = btf__new_empty(); + if (!new_base) + return libbpf_err(-ENOMEM); + dist.id_map = calloc(n, sizeof(*dist.id_map)); + if (!dist.id_map) { + err = -ENOMEM; + goto done; + } + dist.pipe.src = src_btf; + dist.pipe.dst = new_base; + dist.pipe.str_off_map = hashmap__new(btf_dedup_identity_hash_fn, btf_dedup_equal_fn, NULL); + if (IS_ERR(dist.pipe.str_off_map)) { + err = -ENOMEM; + goto done; + } + dist.split_start_id = btf__type_cnt(old_base); + dist.split_start_str = old_base->hdr->str_len; + + /* Pass over src split BTF; generate the list of base BTF type ids it + * references; these will constitute our distilled BTF set to be + * distributed over base and split BTF as appropriate. + */ + for (i = src_btf->start_id; i < n; i++) { + err = btf_add_distilled_type_ids(&dist, i); + if (err < 0) + goto done; + } + /* Next add types for each of the required references to base BTF and split BTF + * in turn. + */ + err = btf_add_distilled_types(&dist); + if (err < 0) + goto done; + + /* Create new split BTF with distilled base BTF as its base; the final + * state is split BTF with distilled base BTF that represents enough + * about its base references to allow it to be relocated with the base + * BTF available. + */ + new_split = btf__new_empty_split(new_base); + if (!new_split) { + err = -errno; + goto done; + } + dist.pipe.dst = new_split; + /* First add all split types */ + for (i = src_btf->start_id; i < n; i++) { + t = btf_type_by_id(src_btf, i); + err = btf_add_type(&dist.pipe, t); + if (err < 0) + goto done; + } + /* Now add distilled types to split BTF that are not added to base. */ + err = btf_add_distilled_types(&dist); + if (err < 0) + goto done; + + /* All split BTF ids will be shifted downwards since there are less base + * BTF ids in distilled base BTF. + */ + dist.diff_id = dist.split_start_id - btf__type_cnt(new_base); + + n = btf__type_cnt(new_split); + /* Now update base/split BTF ids. */ + for (i = 1; i < n; i++) { + err = btf_update_distilled_type_ids(&dist, i); + if (err < 0) + break; + } +done: + free(dist.id_map); + hashmap__free(dist.pipe.str_off_map); + if (err) { + btf__free(new_split); + btf__free(new_base); + return libbpf_err(err); + } + *new_base_btf = new_base; + *new_split_btf = new_split; + + return 0; +} + +const struct btf_header *btf_header(const struct btf *btf) +{ + return btf->hdr; +} + +void btf_set_base_btf(struct btf *btf, const struct btf *base_btf) +{ + btf->base_btf = (struct btf *)base_btf; + btf->start_id = btf__type_cnt(base_btf); + btf->start_str_off = base_btf->hdr->str_len; +} + +int btf__relocate(struct btf *btf, const struct btf *base_btf) +{ + int err = btf_relocate(btf, base_btf, NULL); + + if (!err) + btf->owns_base = false; + return libbpf_err(err); +} diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index 8e6880d91c84..b68d216837a9 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -18,6 +18,7 @@ extern "C" { #define BTF_ELF_SEC ".BTF" #define BTF_EXT_ELF_SEC ".BTF.ext" +#define BTF_BASE_ELF_SEC ".BTF.base" #define MAPS_ELF_SEC ".maps" struct btf; @@ -107,6 +108,27 @@ LIBBPF_API struct btf *btf__new_empty(void); */ LIBBPF_API struct btf *btf__new_empty_split(struct btf *base_btf); +/** + * @brief **btf__distill_base()** creates new versions of the split BTF + * *src_btf* and its base BTF. The new base BTF will only contain the types + * needed to improve robustness of the split BTF to small changes in base BTF. + * When that split BTF is loaded against a (possibly changed) base, this + * distilled base BTF will help update references to that (possibly changed) + * base BTF. + * + * Both the new split and its associated new base BTF must be freed by + * the caller. + * + * If successful, 0 is returned and **new_base_btf** and **new_split_btf** + * will point at new base/split BTF. Both the new split and its associated + * new base BTF must be freed by the caller. + * + * A negative value is returned on error and the thread-local `errno` variable + * is set to the error code as well. + */ +LIBBPF_API int btf__distill_base(const struct btf *src_btf, struct btf **new_base_btf, + struct btf **new_split_btf); + LIBBPF_API struct btf *btf__parse(const char *path, struct btf_ext **btf_ext); LIBBPF_API struct btf *btf__parse_split(const char *path, struct btf *base_btf); LIBBPF_API struct btf *btf__parse_elf(const char *path, struct btf_ext **btf_ext); @@ -231,6 +253,20 @@ struct btf_dedup_opts { LIBBPF_API int btf__dedup(struct btf *btf, const struct btf_dedup_opts *opts); +/** + * @brief **btf__relocate()** will check the split BTF *btf* for references + * to base BTF kinds, and verify those references are compatible with + * *base_btf*; if they are, *btf* is adjusted such that is re-parented to + * *base_btf* and type ids and strings are adjusted to accommodate this. + * + * If successful, 0 is returned and **btf** now has **base_btf** as its + * base. + * + * A negative value is returned on error and the thread-local `errno` variable + * is set to the error code as well. + */ +LIBBPF_API int btf__relocate(struct btf *btf, const struct btf *base_btf); + struct btf_dump; struct btf_dump_opts { diff --git a/tools/lib/bpf/btf_iter.c b/tools/lib/bpf/btf_iter.c new file mode 100644 index 000000000000..9a6c822c2294 --- /dev/null +++ b/tools/lib/bpf/btf_iter.c @@ -0,0 +1,177 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) +/* Copyright (c) 2021 Facebook */ +/* Copyright (c) 2024, Oracle and/or its affiliates. */ + +#ifdef __KERNEL__ +#include +#include + +#define btf_var_secinfos(t) (struct btf_var_secinfo *)btf_type_var_secinfo(t) + +#else +#include "btf.h" +#include "libbpf_internal.h" +#endif + +int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t, + enum btf_field_iter_kind iter_kind) +{ + it->p = NULL; + it->m_idx = -1; + it->off_idx = 0; + it->vlen = 0; + + switch (iter_kind) { + case BTF_FIELD_ITER_IDS: + switch (btf_kind(t)) { + case BTF_KIND_UNKN: + case BTF_KIND_INT: + case BTF_KIND_FLOAT: + case BTF_KIND_ENUM: + case BTF_KIND_ENUM64: + it->desc = (struct btf_field_desc) {}; + break; + case BTF_KIND_FWD: + case BTF_KIND_CONST: + case BTF_KIND_VOLATILE: + case BTF_KIND_RESTRICT: + case BTF_KIND_PTR: + case BTF_KIND_TYPEDEF: + case BTF_KIND_FUNC: + case BTF_KIND_VAR: + case BTF_KIND_DECL_TAG: + case BTF_KIND_TYPE_TAG: + it->desc = (struct btf_field_desc) { 1, {offsetof(struct btf_type, type)} }; + break; + case BTF_KIND_ARRAY: + it->desc = (struct btf_field_desc) { + 2, {sizeof(struct btf_type) + offsetof(struct btf_array, type), + sizeof(struct btf_type) + offsetof(struct btf_array, index_type)} + }; + break; + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + it->desc = (struct btf_field_desc) { + 0, {}, + sizeof(struct btf_member), + 1, {offsetof(struct btf_member, type)} + }; + break; + case BTF_KIND_FUNC_PROTO: + it->desc = (struct btf_field_desc) { + 1, {offsetof(struct btf_type, type)}, + sizeof(struct btf_param), + 1, {offsetof(struct btf_param, type)} + }; + break; + case BTF_KIND_DATASEC: + it->desc = (struct btf_field_desc) { + 0, {}, + sizeof(struct btf_var_secinfo), + 1, {offsetof(struct btf_var_secinfo, type)} + }; + break; + default: + return -EINVAL; + } + break; + case BTF_FIELD_ITER_STRS: + switch (btf_kind(t)) { + case BTF_KIND_UNKN: + it->desc = (struct btf_field_desc) {}; + break; + case BTF_KIND_INT: + case BTF_KIND_FLOAT: + case BTF_KIND_FWD: + case BTF_KIND_ARRAY: + case BTF_KIND_CONST: + case BTF_KIND_VOLATILE: + case BTF_KIND_RESTRICT: + case BTF_KIND_PTR: + case BTF_KIND_TYPEDEF: + case BTF_KIND_FUNC: + case BTF_KIND_VAR: + case BTF_KIND_DECL_TAG: + case BTF_KIND_TYPE_TAG: + case BTF_KIND_DATASEC: + it->desc = (struct btf_field_desc) { + 1, {offsetof(struct btf_type, name_off)} + }; + break; + case BTF_KIND_ENUM: + it->desc = (struct btf_field_desc) { + 1, {offsetof(struct btf_type, name_off)}, + sizeof(struct btf_enum), + 1, {offsetof(struct btf_enum, name_off)} + }; + break; + case BTF_KIND_ENUM64: + it->desc = (struct btf_field_desc) { + 1, {offsetof(struct btf_type, name_off)}, + sizeof(struct btf_enum64), + 1, {offsetof(struct btf_enum64, name_off)} + }; + break; + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + it->desc = (struct btf_field_desc) { + 1, {offsetof(struct btf_type, name_off)}, + sizeof(struct btf_member), + 1, {offsetof(struct btf_member, name_off)} + }; + break; + case BTF_KIND_FUNC_PROTO: + it->desc = (struct btf_field_desc) { + 1, {offsetof(struct btf_type, name_off)}, + sizeof(struct btf_param), + 1, {offsetof(struct btf_param, name_off)} + }; + break; + default: + return -EINVAL; + } + break; + default: + return -EINVAL; + } + + if (it->desc.m_sz) + it->vlen = btf_vlen(t); + + it->p = t; + return 0; +} + +__u32 *btf_field_iter_next(struct btf_field_iter *it) +{ + if (!it->p) + return NULL; + + if (it->m_idx < 0) { + if (it->off_idx < it->desc.t_off_cnt) + return it->p + it->desc.t_offs[it->off_idx++]; + /* move to per-member iteration */ + it->m_idx = 0; + it->p += sizeof(struct btf_type); + it->off_idx = 0; + } + + /* if type doesn't have members, stop */ + if (it->desc.m_sz == 0) { + it->p = NULL; + return NULL; + } + + if (it->off_idx >= it->desc.m_off_cnt) { + /* exhausted this member's fields, go to the next member */ + it->m_idx++; + it->p += it->desc.m_sz; + it->off_idx = 0; + } + + if (it->m_idx < it->vlen) + return it->p + it->desc.m_offs[it->off_idx++]; + + it->p = NULL; + return NULL; +} diff --git a/tools/lib/bpf/btf_relocate.c b/tools/lib/bpf/btf_relocate.c new file mode 100644 index 000000000000..17f8b32f94a0 --- /dev/null +++ b/tools/lib/bpf/btf_relocate.c @@ -0,0 +1,519 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024, Oracle and/or its affiliates. */ + +#ifndef _GNU_SOURCE +#define _GNU_SOURCE +#endif + +#ifdef __KERNEL__ +#include +#include +#include +#include +#include +#include + +#define btf_type_by_id (struct btf_type *)btf_type_by_id +#define btf__type_cnt btf_nr_types +#define btf__base_btf btf_base_btf +#define btf__name_by_offset btf_name_by_offset +#define btf__str_by_offset btf_str_by_offset +#define btf_kflag btf_type_kflag + +#define calloc(nmemb, sz) kvcalloc(nmemb, sz, GFP_KERNEL | __GFP_NOWARN) +#define free(ptr) kvfree(ptr) +#define qsort(base, num, sz, cmp) sort(base, num, sz, cmp, NULL) + +#else + +#include "btf.h" +#include "bpf.h" +#include "libbpf.h" +#include "libbpf_internal.h" + +#endif /* __KERNEL__ */ + +struct btf; + +struct btf_relocate { + struct btf *btf; + const struct btf *base_btf; + const struct btf *dist_base_btf; + unsigned int nr_base_types; + unsigned int nr_split_types; + unsigned int nr_dist_base_types; + int dist_str_len; + int base_str_len; + __u32 *id_map; + __u32 *str_map; +}; + +/* Set temporarily in relocation id_map if distilled base struct/union is + * embedded in a split BTF struct/union; in such a case, size information must + * match between distilled base BTF and base BTF representation of type. + */ +#define BTF_IS_EMBEDDED ((__u32)-1) + +/* triple used in sorting/searching distilled base BTF. */ +struct btf_name_info { + const char *name; + /* set when search requires a size match */ + bool needs_size: 1; + unsigned int size: 31; + __u32 id; +}; + +static int btf_relocate_rewrite_type_id(struct btf_relocate *r, __u32 i) +{ + struct btf_type *t = btf_type_by_id(r->btf, i); + struct btf_field_iter it; + __u32 *id; + int err; + + err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS); + if (err) + return err; + + while ((id = btf_field_iter_next(&it))) + *id = r->id_map[*id]; + return 0; +} + +/* Simple string comparison used for sorting within BTF, since all distilled + * types are named. If strings match, and size is non-zero for both elements + * fall back to using size for ordering. + */ +static int cmp_btf_name_size(const void *n1, const void *n2) +{ + const struct btf_name_info *ni1 = n1; + const struct btf_name_info *ni2 = n2; + int name_diff = strcmp(ni1->name, ni2->name); + + if (!name_diff && ni1->needs_size && ni2->needs_size) + return ni2->size - ni1->size; + return name_diff; +} + +/* Binary search with a small twist; find leftmost element that matches + * so that we can then iterate through all exact matches. So for example + * searching { "a", "bb", "bb", "c" } we would always match on the + * leftmost "bb". + */ +static struct btf_name_info *search_btf_name_size(struct btf_name_info *key, + struct btf_name_info *vals, + int nelems) +{ + struct btf_name_info *ret = NULL; + int high = nelems - 1; + int low = 0; + + while (low <= high) { + int mid = (low + high)/2; + struct btf_name_info *val = &vals[mid]; + int diff = cmp_btf_name_size(key, val); + + if (diff == 0) + ret = val; + /* even if found, keep searching for leftmost match */ + if (diff <= 0) + high = mid - 1; + else + low = mid + 1; + } + return ret; +} + +/* If a member of a split BTF struct/union refers to a base BTF + * struct/union, mark that struct/union id temporarily in the id_map + * with BTF_IS_EMBEDDED. Members can be const/restrict/volatile/typedef + * reference types, but if a pointer is encountered, the type is no longer + * considered embedded. + */ +static int btf_mark_embedded_composite_type_ids(struct btf_relocate *r, __u32 i) +{ + struct btf_type *t = btf_type_by_id(r->btf, i); + struct btf_field_iter it; + __u32 *id; + int err; + + if (!btf_is_composite(t)) + return 0; + + err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS); + if (err) + return err; + + while ((id = btf_field_iter_next(&it))) { + __u32 next_id = *id; + + while (next_id) { + t = btf_type_by_id(r->btf, next_id); + switch (btf_kind(t)) { + case BTF_KIND_CONST: + case BTF_KIND_RESTRICT: + case BTF_KIND_VOLATILE: + case BTF_KIND_TYPEDEF: + case BTF_KIND_TYPE_TAG: + next_id = t->type; + break; + case BTF_KIND_ARRAY: { + struct btf_array *a = btf_array(t); + + next_id = a->type; + break; + } + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + if (next_id < r->nr_dist_base_types) + r->id_map[next_id] = BTF_IS_EMBEDDED; + next_id = 0; + break; + default: + next_id = 0; + break; + } + } + } + + return 0; +} + +/* Build a map from distilled base BTF ids to base BTF ids. To do so, iterate + * through base BTF looking up distilled type (using binary search) equivalents. + */ +static int btf_relocate_map_distilled_base(struct btf_relocate *r) +{ + struct btf_name_info *info, *info_end; + struct btf_type *base_t, *dist_t; + __u8 *base_name_cnt = NULL; + int err = 0; + __u32 id; + + /* generate a sort index array of name/type ids sorted by name for + * distilled base BTF to speed name-based lookups. + */ + info = calloc(r->nr_dist_base_types, sizeof(*info)); + if (!info) { + err = -ENOMEM; + goto done; + } + info_end = info + r->nr_dist_base_types; + for (id = 0; id < r->nr_dist_base_types; id++) { + dist_t = btf_type_by_id(r->dist_base_btf, id); + info[id].name = btf__name_by_offset(r->dist_base_btf, dist_t->name_off); + info[id].id = id; + info[id].size = dist_t->size; + info[id].needs_size = true; + } + qsort(info, r->nr_dist_base_types, sizeof(*info), cmp_btf_name_size); + + /* Mark distilled base struct/union members of split BTF structs/unions + * in id_map with BTF_IS_EMBEDDED; this signals that these types + * need to match both name and size, otherwise embedding the base + * struct/union in the split type is invalid. + */ + for (id = r->nr_dist_base_types; id < r->nr_split_types; id++) { + err = btf_mark_embedded_composite_type_ids(r, id); + if (err) + goto done; + } + + /* Collect name counts for composite types in base BTF. If multiple + * instances of a struct/union of the same name exist, we need to use + * size to determine which to map to since name alone is ambiguous. + */ + base_name_cnt = calloc(r->base_str_len, sizeof(*base_name_cnt)); + if (!base_name_cnt) { + err = -ENOMEM; + goto done; + } + for (id = 1; id < r->nr_base_types; id++) { + base_t = btf_type_by_id(r->base_btf, id); + if (!btf_is_composite(base_t) || !base_t->name_off) + continue; + if (base_name_cnt[base_t->name_off] < 255) + base_name_cnt[base_t->name_off]++; + } + + /* Now search base BTF for matching distilled base BTF types. */ + for (id = 1; id < r->nr_base_types; id++) { + struct btf_name_info *dist_info, base_info = {}; + int dist_kind, base_kind; + + base_t = btf_type_by_id(r->base_btf, id); + /* distilled base consists of named types only. */ + if (!base_t->name_off) + continue; + base_kind = btf_kind(base_t); + base_info.id = id; + base_info.name = btf__name_by_offset(r->base_btf, base_t->name_off); + switch (base_kind) { + case BTF_KIND_INT: + case BTF_KIND_FLOAT: + case BTF_KIND_ENUM: + case BTF_KIND_ENUM64: + /* These types should match both name and size */ + base_info.needs_size = true; + base_info.size = base_t->size; + break; + case BTF_KIND_FWD: + /* No size considerations for fwds. */ + break; + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + /* Size only needs to be used for struct/union if there + * are multiple types in base BTF with the same name. + * If there are multiple _distilled_ types with the same + * name (a very unlikely scenario), that doesn't matter + * unless corresponding _base_ types to match them are + * missing. + */ + base_info.needs_size = base_name_cnt[base_t->name_off] > 1; + base_info.size = base_t->size; + break; + default: + continue; + } + /* iterate over all matching distilled base types */ + for (dist_info = search_btf_name_size(&base_info, info, r->nr_dist_base_types); + dist_info != NULL && dist_info < info_end && + cmp_btf_name_size(&base_info, dist_info) == 0; + dist_info++) { + if (!dist_info->id || dist_info->id >= r->nr_dist_base_types) { + pr_warn("base BTF id [%d] maps to invalid distilled base BTF id [%d]\n", + id, dist_info->id); + err = -EINVAL; + goto done; + } + dist_t = btf_type_by_id(r->dist_base_btf, dist_info->id); + dist_kind = btf_kind(dist_t); + + /* Validate that the found distilled type is compatible. + * Do not error out on mismatch as another match may + * occur for an identically-named type. + */ + switch (dist_kind) { + case BTF_KIND_FWD: + switch (base_kind) { + case BTF_KIND_FWD: + if (btf_kflag(dist_t) != btf_kflag(base_t)) + continue; + break; + case BTF_KIND_STRUCT: + if (btf_kflag(base_t)) + continue; + break; + case BTF_KIND_UNION: + if (!btf_kflag(base_t)) + continue; + break; + default: + continue; + } + break; + case BTF_KIND_INT: + if (dist_kind != base_kind || + btf_int_encoding(base_t) != btf_int_encoding(dist_t)) + continue; + break; + case BTF_KIND_FLOAT: + if (dist_kind != base_kind) + continue; + break; + case BTF_KIND_ENUM: + /* ENUM and ENUM64 are encoded as sized ENUM in + * distilled base BTF. + */ + if (base_kind != dist_kind && base_kind != BTF_KIND_ENUM64) + continue; + break; + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + /* size verification is required for embedded + * struct/unions. + */ + if (r->id_map[dist_info->id] == BTF_IS_EMBEDDED && + base_t->size != dist_t->size) + continue; + break; + default: + continue; + } + if (r->id_map[dist_info->id] && + r->id_map[dist_info->id] != BTF_IS_EMBEDDED) { + /* we already have a match; this tells us that + * multiple base types of the same name + * have the same size, since for cases where + * multiple types have the same name we match + * on name and size. In this case, we have + * no way of determining which to relocate + * to in base BTF, so error out. + */ + pr_warn("distilled base BTF type '%s' [%u], size %u has multiple candidates of the same size (ids [%u, %u]) in base BTF\n", + base_info.name, dist_info->id, + base_t->size, id, r->id_map[dist_info->id]); + err = -EINVAL; + goto done; + } + /* map id and name */ + r->id_map[dist_info->id] = id; + r->str_map[dist_t->name_off] = base_t->name_off; + } + } + /* ensure all distilled BTF ids now have a mapping... */ + for (id = 1; id < r->nr_dist_base_types; id++) { + const char *name; + + if (r->id_map[id] && r->id_map[id] != BTF_IS_EMBEDDED) + continue; + dist_t = btf_type_by_id(r->dist_base_btf, id); + name = btf__name_by_offset(r->dist_base_btf, dist_t->name_off); + pr_warn("distilled base BTF type '%s' [%d] is not mapped to base BTF id\n", + name, id); + err = -EINVAL; + break; + } +done: + free(base_name_cnt); + free(info); + return err; +} + +/* distilled base should only have named int/float/enum/fwd/struct/union types. */ +static int btf_relocate_validate_distilled_base(struct btf_relocate *r) +{ + unsigned int i; + + for (i = 1; i < r->nr_dist_base_types; i++) { + struct btf_type *t = btf_type_by_id(r->dist_base_btf, i); + int kind = btf_kind(t); + + switch (kind) { + case BTF_KIND_INT: + case BTF_KIND_FLOAT: + case BTF_KIND_ENUM: + case BTF_KIND_STRUCT: + case BTF_KIND_UNION: + case BTF_KIND_FWD: + if (t->name_off) + break; + pr_warn("type [%d], kind [%d] is invalid for distilled base BTF; it is anonymous\n", + i, kind); + return -EINVAL; + default: + pr_warn("type [%d] in distilled based BTF has unexpected kind [%d]\n", + i, kind); + return -EINVAL; + } + } + return 0; +} + +static int btf_relocate_rewrite_strs(struct btf_relocate *r, __u32 i) +{ + struct btf_type *t = btf_type_by_id(r->btf, i); + struct btf_field_iter it; + __u32 *str_off; + int off, err; + + err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS); + if (err) + return err; + + while ((str_off = btf_field_iter_next(&it))) { + if (!*str_off) + continue; + if (*str_off >= r->dist_str_len) { + *str_off += r->base_str_len - r->dist_str_len; + } else { + off = r->str_map[*str_off]; + if (!off) { + pr_warn("string '%s' [offset %u] is not mapped to base BTF", + btf__str_by_offset(r->btf, off), *str_off); + return -ENOENT; + } + *str_off = off; + } + } + return 0; +} + +/* If successful, output of relocation is updated BTF with base BTF pointing + * at base_btf, and type ids, strings adjusted accordingly. + */ +int btf_relocate(struct btf *btf, const struct btf *base_btf, __u32 **id_map) +{ + unsigned int nr_types = btf__type_cnt(btf); + const struct btf_header *dist_base_hdr; + const struct btf_header *base_hdr; + struct btf_relocate r = {}; + int err = 0; + __u32 id, i; + + r.dist_base_btf = btf__base_btf(btf); + if (!base_btf || r.dist_base_btf == base_btf) + return -EINVAL; + + r.nr_dist_base_types = btf__type_cnt(r.dist_base_btf); + r.nr_base_types = btf__type_cnt(base_btf); + r.nr_split_types = nr_types - r.nr_dist_base_types; + r.btf = btf; + r.base_btf = base_btf; + + r.id_map = calloc(nr_types, sizeof(*r.id_map)); + r.str_map = calloc(btf_header(r.dist_base_btf)->str_len, sizeof(*r.str_map)); + dist_base_hdr = btf_header(r.dist_base_btf); + base_hdr = btf_header(r.base_btf); + r.dist_str_len = dist_base_hdr->str_len; + r.base_str_len = base_hdr->str_len; + if (!r.id_map || !r.str_map) { + err = -ENOMEM; + goto err_out; + } + + err = btf_relocate_validate_distilled_base(&r); + if (err) + goto err_out; + + /* Split BTF ids need to be adjusted as base and distilled base + * have different numbers of types, changing the start id of split + * BTF. + */ + for (id = r.nr_dist_base_types; id < nr_types; id++) + r.id_map[id] = id + r.nr_base_types - r.nr_dist_base_types; + + /* Build a map from distilled base ids to actual base BTF ids; it is used + * to update split BTF id references. Also build a str_map mapping from + * distilled base BTF names to base BTF names. + */ + err = btf_relocate_map_distilled_base(&r); + if (err) + goto err_out; + + /* Next, rewrite type ids in split BTF, replacing split ids with updated + * ids based on number of types in base BTF, and base ids with + * relocated ids from base_btf. + */ + for (i = 0, id = r.nr_dist_base_types; i < r.nr_split_types; i++, id++) { + err = btf_relocate_rewrite_type_id(&r, id); + if (err) + goto err_out; + } + /* String offsets now need to be updated using the str_map. */ + for (i = 0; i < r.nr_split_types; i++) { + err = btf_relocate_rewrite_strs(&r, i + r.nr_dist_base_types); + if (err) + goto err_out; + } + /* Finally reset base BTF to be base_btf */ + btf_set_base_btf(btf, base_btf); + + if (id_map) { + *id_map = r.id_map; + r.id_map = NULL; + } +err_out: + free(r.id_map); + free(r.str_map); + return err; +} diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 5401f2df463d..a3be6f8fac09 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -229,7 +229,30 @@ static const char * const prog_type_name[] = { static int __base_pr(enum libbpf_print_level level, const char *format, va_list args) { - if (level == LIBBPF_DEBUG) + const char *env_var = "LIBBPF_LOG_LEVEL"; + static enum libbpf_print_level min_level = LIBBPF_INFO; + static bool initialized; + + if (!initialized) { + char *verbosity; + + initialized = true; + verbosity = getenv(env_var); + if (verbosity) { + if (strcasecmp(verbosity, "warn") == 0) + min_level = LIBBPF_WARN; + else if (strcasecmp(verbosity, "debug") == 0) + min_level = LIBBPF_DEBUG; + else if (strcasecmp(verbosity, "info") == 0) + min_level = LIBBPF_INFO; + else + fprintf(stderr, "libbpf: unrecognized '%s' envvar value: '%s', should be one of 'warn', 'debug', or 'info'.\n", + env_var, verbosity); + } + } + + /* if too verbose, skip logging */ + if (level > min_level) return 0; return vfprintf(stderr, format, args); @@ -549,6 +572,7 @@ struct bpf_map { bool pinned; bool reused; bool autocreate; + bool autoattach; __u64 map_extra; }; @@ -1377,6 +1401,7 @@ static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name, map->def.value_size = type->size; map->def.max_entries = 1; map->def.map_flags = strcmp(sec_name, STRUCT_OPS_LINK_SEC) == 0 ? BPF_F_LINK : 0; + map->autoattach = true; map->st_ops = calloc(1, sizeof(*map->st_ops)); if (!map->st_ops) @@ -4796,6 +4821,20 @@ int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate) return 0; } +int bpf_map__set_autoattach(struct bpf_map *map, bool autoattach) +{ + if (!bpf_map__is_struct_ops(map)) + return libbpf_err(-EINVAL); + + map->autoattach = autoattach; + return 0; +} + +bool bpf_map__autoattach(const struct bpf_map *map) +{ + return map->autoattach; +} + int bpf_map__reuse_fd(struct bpf_map *map, int fd) { struct bpf_map_info info; @@ -10336,7 +10375,7 @@ __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i) struct bpf_map * bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev) { - if (prev == NULL) + if (prev == NULL && obj != NULL) return obj->maps; return __bpf_map__iter(prev, obj, 1); @@ -10345,7 +10384,7 @@ bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev) struct bpf_map * bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next) { - if (next == NULL) { + if (next == NULL && obj != NULL) { if (!obj->nr_maps) return NULL; return obj->maps + obj->nr_maps - 1; @@ -12877,8 +12916,10 @@ struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map) __u32 zero = 0; int err, fd; - if (!bpf_map__is_struct_ops(map)) + if (!bpf_map__is_struct_ops(map)) { + pr_warn("map '%s': can't attach non-struct_ops map\n", map->name); return libbpf_err_ptr(-EINVAL); + } if (map->fd < 0) { pr_warn("map '%s': can't attach BPF map without FD (was it created?)\n", map->name); @@ -13671,14 +13712,15 @@ int libbpf_num_possible_cpus(void) static int populate_skeleton_maps(const struct bpf_object *obj, struct bpf_map_skeleton *maps, - size_t map_cnt) + size_t map_cnt, size_t map_skel_sz) { int i; for (i = 0; i < map_cnt; i++) { - struct bpf_map **map = maps[i].map; - const char *name = maps[i].name; - void **mmaped = maps[i].mmaped; + struct bpf_map_skeleton *map_skel = (void *)maps + i * map_skel_sz; + struct bpf_map **map = map_skel->map; + const char *name = map_skel->name; + void **mmaped = map_skel->mmaped; *map = bpf_object__find_map_by_name(obj, name); if (!*map) { @@ -13695,13 +13737,14 @@ static int populate_skeleton_maps(const struct bpf_object *obj, static int populate_skeleton_progs(const struct bpf_object *obj, struct bpf_prog_skeleton *progs, - size_t prog_cnt) + size_t prog_cnt, size_t prog_skel_sz) { int i; for (i = 0; i < prog_cnt; i++) { - struct bpf_program **prog = progs[i].prog; - const char *name = progs[i].name; + struct bpf_prog_skeleton *prog_skel = (void *)progs + i * prog_skel_sz; + struct bpf_program **prog = prog_skel->prog; + const char *name = prog_skel->name; *prog = bpf_object__find_program_by_name(obj, name); if (!*prog) { @@ -13742,13 +13785,13 @@ int bpf_object__open_skeleton(struct bpf_object_skeleton *s, } *s->obj = obj; - err = populate_skeleton_maps(obj, s->maps, s->map_cnt); + err = populate_skeleton_maps(obj, s->maps, s->map_cnt, s->map_skel_sz); if (err) { pr_warn("failed to populate skeleton maps for '%s': %d\n", s->name, err); return libbpf_err(err); } - err = populate_skeleton_progs(obj, s->progs, s->prog_cnt); + err = populate_skeleton_progs(obj, s->progs, s->prog_cnt, s->prog_skel_sz); if (err) { pr_warn("failed to populate skeleton progs for '%s': %d\n", s->name, err); return libbpf_err(err); @@ -13778,20 +13821,20 @@ int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s) return libbpf_err(-errno); } - err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt); + err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt, s->map_skel_sz); if (err) { pr_warn("failed to populate subskeleton maps: %d\n", err); return libbpf_err(err); } - err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt); + err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt, s->prog_skel_sz); if (err) { pr_warn("failed to populate subskeleton maps: %d\n", err); return libbpf_err(err); } for (var_idx = 0; var_idx < s->var_cnt; var_idx++) { - var_skel = &s->vars[var_idx]; + var_skel = (void *)s->vars + var_idx * s->var_skel_sz; map = *var_skel->map; map_type_id = bpf_map__btf_value_type_id(map); map_type = btf__type_by_id(btf, map_type_id); @@ -13838,10 +13881,11 @@ int bpf_object__load_skeleton(struct bpf_object_skeleton *s) } for (i = 0; i < s->map_cnt; i++) { - struct bpf_map *map = *s->maps[i].map; + struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz; + struct bpf_map *map = *map_skel->map; size_t mmap_sz = bpf_map_mmap_sz(map); int prot, map_fd = map->fd; - void **mmaped = s->maps[i].mmaped; + void **mmaped = map_skel->mmaped; if (!mmaped) continue; @@ -13889,8 +13933,9 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s) int i, err; for (i = 0; i < s->prog_cnt; i++) { - struct bpf_program *prog = *s->progs[i].prog; - struct bpf_link **link = s->progs[i].link; + struct bpf_prog_skeleton *prog_skel = (void *)s->progs + i * s->prog_skel_sz; + struct bpf_program *prog = *prog_skel->prog; + struct bpf_link **link = prog_skel->link; if (!prog->autoload || !prog->autoattach) continue; @@ -13922,6 +13967,38 @@ int bpf_object__attach_skeleton(struct bpf_object_skeleton *s) */ } + + for (i = 0; i < s->map_cnt; i++) { + struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz; + struct bpf_map *map = *map_skel->map; + struct bpf_link **link; + + if (!map->autocreate || !map->autoattach) + continue; + + /* only struct_ops maps can be attached */ + if (!bpf_map__is_struct_ops(map)) + continue; + + /* skeleton is created with earlier version of bpftool, notify user */ + if (s->map_skel_sz < offsetofend(struct bpf_map_skeleton, link)) { + pr_warn("map '%s': BPF skeleton version is old, skipping map auto-attachment...\n", + bpf_map__name(map)); + continue; + } + + link = map_skel->link; + if (*link) + continue; + + *link = bpf_map__attach_struct_ops(map); + if (!*link) { + err = -errno; + pr_warn("map '%s': failed to auto-attach: %d\n", bpf_map__name(map), err); + return libbpf_err(err); + } + } + return 0; } @@ -13930,11 +14007,25 @@ void bpf_object__detach_skeleton(struct bpf_object_skeleton *s) int i; for (i = 0; i < s->prog_cnt; i++) { - struct bpf_link **link = s->progs[i].link; + struct bpf_prog_skeleton *prog_skel = (void *)s->progs + i * s->prog_skel_sz; + struct bpf_link **link = prog_skel->link; bpf_link__destroy(*link); *link = NULL; } + + if (s->map_skel_sz < sizeof(struct bpf_map_skeleton)) + return; + + for (i = 0; i < s->map_cnt; i++) { + struct bpf_map_skeleton *map_skel = (void *)s->maps + i * s->map_skel_sz; + struct bpf_link **link = map_skel->link; + + if (link) { + bpf_link__destroy(*link); + *link = NULL; + } + } } void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s) @@ -13942,8 +14033,7 @@ void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s) if (!s) return; - if (s->progs) - bpf_object__detach_skeleton(s); + bpf_object__detach_skeleton(s); if (s->obj) bpf_object__close(*s->obj); free(s->maps); diff --git a/tools/lib/bpf/libbpf.h b/tools/lib/bpf/libbpf.h index c3f77d9260fe..64a6a3d323e3 100644 --- a/tools/lib/bpf/libbpf.h +++ b/tools/lib/bpf/libbpf.h @@ -98,7 +98,10 @@ typedef int (*libbpf_print_fn_t)(enum libbpf_print_level level, /** * @brief **libbpf_set_print()** sets user-provided log callback function to - * be used for libbpf warnings and informational messages. + * be used for libbpf warnings and informational messages. If the user callback + * is not set, messages are logged to stderr by default. The verbosity of these + * messages can be controlled by setting the environment variable + * LIBBPF_LOG_LEVEL to either warn, info, or debug. * @param fn The log print function. If NULL, libbpf won't print anything. * @return Pointer to old print function. * @@ -975,6 +978,23 @@ bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *map); LIBBPF_API int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate); LIBBPF_API bool bpf_map__autocreate(const struct bpf_map *map); +/** + * @brief **bpf_map__set_autoattach()** sets whether libbpf has to auto-attach + * map during BPF skeleton attach phase. + * @param map the BPF map instance + * @param autoattach whether to attach map during BPF skeleton attach phase + * @return 0 on success; negative error code, otherwise + */ +LIBBPF_API int bpf_map__set_autoattach(struct bpf_map *map, bool autoattach); + +/** + * @brief **bpf_map__autoattach()** returns whether BPF map is configured to + * auto-attach during BPF skeleton attach phase. + * @param map the BPF map instance + * @return true if map is set to auto-attach during skeleton attach phase; false, otherwise + */ +LIBBPF_API bool bpf_map__autoattach(const struct bpf_map *map); + /** * @brief **bpf_map__fd()** gets the file descriptor of the passed * BPF map @@ -1669,6 +1689,7 @@ struct bpf_map_skeleton { const char *name; struct bpf_map **map; void **mmaped; + struct bpf_link **link; }; struct bpf_prog_skeleton { diff --git a/tools/lib/bpf/libbpf.map b/tools/lib/bpf/libbpf.map index c1ce8aa3520b..8f0d9ea3b1b4 100644 --- a/tools/lib/bpf/libbpf.map +++ b/tools/lib/bpf/libbpf.map @@ -419,6 +419,10 @@ LIBBPF_1.4.0 { LIBBPF_1.5.0 { global: + btf__distill_base; + btf__relocate; + bpf_map__autoattach; + bpf_map__set_autoattach; bpf_program__attach_sockmap; ring__consume_n; ring_buffer__consume_n; diff --git a/tools/lib/bpf/libbpf_internal.h b/tools/lib/bpf/libbpf_internal.h index a0dcfb82e455..408df59e0771 100644 --- a/tools/lib/bpf/libbpf_internal.h +++ b/tools/lib/bpf/libbpf_internal.h @@ -234,6 +234,9 @@ struct btf_type; struct btf_type *btf_type_by_id(const struct btf *btf, __u32 type_id); const char *btf_kind_str(const struct btf_type *t); const struct btf_type *skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id); +const struct btf_header *btf_header(const struct btf *btf); +void btf_set_base_btf(struct btf *btf, const struct btf *base_btf); +int btf_relocate(struct btf *btf, const struct btf *base_btf, __u32 **id_map); static inline enum btf_func_linkage btf_func_linkage(const struct btf_type *t) { @@ -508,11 +511,33 @@ struct bpf_line_info_min { __u32 line_col; }; +enum btf_field_iter_kind { + BTF_FIELD_ITER_IDS, + BTF_FIELD_ITER_STRS, +}; + +struct btf_field_desc { + /* once-per-type offsets */ + int t_off_cnt, t_offs[2]; + /* member struct size, or zero, if no members */ + int m_sz; + /* repeated per-member offsets */ + int m_off_cnt, m_offs[1]; +}; + +struct btf_field_iter { + struct btf_field_desc desc; + void *p; + int m_idx; + int off_idx; + int vlen; +}; + +int btf_field_iter_init(struct btf_field_iter *it, struct btf_type *t, enum btf_field_iter_kind iter_kind); +__u32 *btf_field_iter_next(struct btf_field_iter *it); typedef int (*type_id_visit_fn)(__u32 *type_id, void *ctx); typedef int (*str_off_visit_fn)(__u32 *str_off, void *ctx); -int btf_type_visit_type_ids(struct btf_type *t, type_id_visit_fn visit, void *ctx); -int btf_type_visit_str_offs(struct btf_type *t, str_off_visit_fn visit, void *ctx); int btf_ext_visit_type_ids(struct btf_ext *btf_ext, type_id_visit_fn visit, void *ctx); int btf_ext_visit_str_offs(struct btf_ext *btf_ext, str_off_visit_fn visit, void *ctx); __s32 btf__find_by_name_kind_own(const struct btf *btf, const char *type_name, @@ -597,13 +622,9 @@ static inline int ensure_good_fd(int fd) return fd; } -static inline int sys_dup2(int oldfd, int newfd) +static inline int sys_dup3(int oldfd, int newfd, int flags) { -#ifdef __NR_dup2 - return syscall(__NR_dup2, oldfd, newfd); -#else - return syscall(__NR_dup3, oldfd, newfd, 0); -#endif + return syscall(__NR_dup3, oldfd, newfd, flags); } /* Point *fixed_fd* to the same file that *tmp_fd* points to. @@ -614,7 +635,7 @@ static inline int reuse_fd(int fixed_fd, int tmp_fd) { int err; - err = sys_dup2(tmp_fd, fixed_fd); + err = sys_dup3(tmp_fd, fixed_fd, O_CLOEXEC); err = err < 0 ? -errno : 0; close(tmp_fd); /* clean up temporary FD */ return err; diff --git a/tools/lib/bpf/linker.c b/tools/lib/bpf/linker.c index 0d4be829551b..9cd3d4109788 100644 --- a/tools/lib/bpf/linker.c +++ b/tools/lib/bpf/linker.c @@ -957,19 +957,33 @@ static int check_btf_str_off(__u32 *str_off, void *ctx) static int linker_sanity_check_btf(struct src_obj *obj) { struct btf_type *t; - int i, n, err = 0; + int i, n, err; if (!obj->btf) return 0; n = btf__type_cnt(obj->btf); for (i = 1; i < n; i++) { + struct btf_field_iter it; + __u32 *type_id, *str_off; + t = btf_type_by_id(obj->btf, i); - err = err ?: btf_type_visit_type_ids(t, check_btf_type_id, obj->btf); - err = err ?: btf_type_visit_str_offs(t, check_btf_str_off, obj->btf); + err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_IDS); if (err) return err; + while ((type_id = btf_field_iter_next(&it))) { + if (*type_id >= n) + return -EINVAL; + } + + err = btf_field_iter_init(&it, t, BTF_FIELD_ITER_STRS); + if (err) + return err; + while ((str_off = btf_field_iter_next(&it))) { + if (!btf__str_by_offset(obj->btf, *str_off)) + return -EINVAL; + } } return 0; @@ -2213,10 +2227,17 @@ static int linker_fixup_btf(struct src_obj *obj) vi = btf_var_secinfos(t); for (j = 0, m = btf_vlen(t); j < m; j++, vi++) { const struct btf_type *vt = btf__type_by_id(obj->btf, vi->type); - const char *var_name = btf__str_by_offset(obj->btf, vt->name_off); - int var_linkage = btf_var(vt)->linkage; + const char *var_name; + int var_linkage; Elf64_Sym *sym; + /* could be a variable or function */ + if (!btf_is_var(vt)) + continue; + + var_name = btf__str_by_offset(obj->btf, vt->name_off); + var_linkage = btf_var(vt)->linkage; + /* no need to patch up static or extern vars */ if (var_linkage != BTF_VAR_GLOBAL_ALLOCATED) continue; @@ -2234,26 +2255,10 @@ static int linker_fixup_btf(struct src_obj *obj) return 0; } -static int remap_type_id(__u32 *type_id, void *ctx) -{ - int *id_map = ctx; - int new_id = id_map[*type_id]; - - /* Error out if the type wasn't remapped. Ignore VOID which stays VOID. */ - if (new_id == 0 && *type_id != 0) { - pr_warn("failed to find new ID mapping for original BTF type ID %u\n", *type_id); - return -EINVAL; - } - - *type_id = id_map[*type_id]; - - return 0; -} - static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj) { const struct btf_type *t; - int i, j, n, start_id, id; + int i, j, n, start_id, id, err; const char *name; if (!obj->btf) @@ -2324,9 +2329,25 @@ static int linker_append_btf(struct bpf_linker *linker, struct src_obj *obj) n = btf__type_cnt(linker->btf); for (i = start_id; i < n; i++) { struct btf_type *dst_t = btf_type_by_id(linker->btf, i); + struct btf_field_iter it; + __u32 *type_id; - if (btf_type_visit_type_ids(dst_t, remap_type_id, obj->btf_type_map)) - return -EINVAL; + err = btf_field_iter_init(&it, dst_t, BTF_FIELD_ITER_IDS); + if (err) + return err; + + while ((type_id = btf_field_iter_next(&it))) { + int new_id = obj->btf_type_map[*type_id]; + + /* Error out if the type wasn't remapped. Ignore VOID which stays VOID. */ + if (new_id == 0 && *type_id != 0) { + pr_warn("failed to find new ID mapping for original BTF type ID %u\n", + *type_id); + return -EINVAL; + } + + *type_id = obj->btf_type_map[*type_id]; + } } /* Rewrite VAR/FUNC underlying types (i.e., FUNC's FUNC_PROTO and VAR's diff --git a/tools/net/ynl/Makefile b/tools/net/ynl/Makefile index 8e9e09d84e26..d1cdf2a8f826 100644 --- a/tools/net/ynl/Makefile +++ b/tools/net/ynl/Makefile @@ -2,9 +2,12 @@ SUBDIRS = lib generated samples -all: $(SUBDIRS) +all: $(SUBDIRS) libynl.a samples: | lib generated +libynl.a: | lib generated + @echo -e "\tAR $@" + @ar rcs $@ lib/ynl.o generated/*-user.o $(SUBDIRS): @if [ -f "$@/Makefile" ] ; then \ @@ -17,5 +20,6 @@ clean distclean: $(MAKE) -C $$dir $@; \ fi \ done + rm -f libynl.a .PHONY: all clean distclean $(SUBDIRS) diff --git a/tools/net/ynl/Makefile.deps b/tools/net/ynl/Makefile.deps index f4e8eb79c1b8..0712b5e82eb7 100644 --- a/tools/net/ynl/Makefile.deps +++ b/tools/net/ynl/Makefile.deps @@ -16,7 +16,8 @@ get_hdr_inc=-D$(1) -include $(UAPI_PATH)/linux/$(2) CFLAGS_devlink:=$(call get_hdr_inc,_LINUX_DEVLINK_H_,devlink.h) CFLAGS_dpll:=$(call get_hdr_inc,_LINUX_DPLL_H,dpll.h) -CFLAGS_ethtool:=$(call get_hdr_inc,_LINUX_ETHTOOL_NETLINK_H_,ethtool_netlink.h) +CFLAGS_ethtool:=$(call get_hdr_inc,_LINUX_ETHTOOL_H,ethtool.h) \ + $(call get_hdr_inc,_LINUX_ETHTOOL_NETLINK_H_,ethtool_netlink.h) CFLAGS_handshake:=$(call get_hdr_inc,_LINUX_HANDSHAKE_H,handshake.h) CFLAGS_mptcp_pm:=$(call get_hdr_inc,_LINUX_MPTCP_PM_H,mptcp_pm.h) CFLAGS_netdev:=$(call get_hdr_inc,_LINUX_NETDEV_H,netdev.h) @@ -25,3 +26,4 @@ CFLAGS_nfsd:=$(call get_hdr_inc,_LINUX_NFSD_NETLINK_H,nfsd_netlink.h) CFLAGS_ovs_datapath:=$(call get_hdr_inc,__LINUX_OPENVSWITCH_H,openvswitch.h) CFLAGS_ovs_flow:=$(call get_hdr_inc,__LINUX_OPENVSWITCH_H,openvswitch.h) CFLAGS_ovs_vport:=$(call get_hdr_inc,__LINUX_OPENVSWITCH_H,openvswitch.h) +CFLAGS_tcp_metrics:=$(call get_hdr_inc,_LINUX_TCP_METRICS_H,tcp_metrics.h) diff --git a/tools/net/ynl/lib/Makefile b/tools/net/ynl/lib/Makefile index dfff3ecd1cba..2887cc5de530 100644 --- a/tools/net/ynl/lib/Makefile +++ b/tools/net/ynl/lib/Makefile @@ -14,7 +14,9 @@ include $(wildcard *.d) all: ynl.a ynl.a: $(OBJS) - ar rcs $@ $(OBJS) + @echo -e "\tAR $@" + @ar rcs $@ $(OBJS) + clean: rm -f *.o *.d *~ rm -rf __pycache__ diff --git a/tools/net/ynl/lib/ynl-priv.h b/tools/net/ynl/lib/ynl-priv.h index 6cf890080dc0..3c09a7bbfba5 100644 --- a/tools/net/ynl/lib/ynl-priv.h +++ b/tools/net/ynl/lib/ynl-priv.h @@ -45,17 +45,17 @@ struct ynl_policy_attr { enum ynl_policy_type type; unsigned int len; const char *name; - struct ynl_policy_nest *nest; + const struct ynl_policy_nest *nest; }; struct ynl_policy_nest { unsigned int max_attr; - struct ynl_policy_attr *table; + const struct ynl_policy_attr *table; }; struct ynl_parse_arg { struct ynl_sock *ys; - struct ynl_policy_nest *rsp_policy; + const struct ynl_policy_nest *rsp_policy; void *data; }; @@ -79,7 +79,7 @@ static inline void *ynl_dump_obj_next(void *obj) struct ynl_dump_list_type *list; uptr -= offsetof(struct ynl_dump_list_type, data); - list = (void *)uptr; + list = (struct ynl_dump_list_type *)uptr; uptr = (unsigned long)list->next; uptr += offsetof(struct ynl_dump_list_type, data); @@ -119,7 +119,7 @@ struct ynl_dump_state { }; struct ynl_ntf_info { - struct ynl_policy_nest *policy; + const struct ynl_policy_nest *policy; ynl_parse_cb_t cb; size_t alloc_sz; void (*free)(struct ynl_ntf_base_type *ntf); @@ -139,7 +139,7 @@ int ynl_error_parse(struct ynl_parse_arg *yarg, const char *msg); static inline struct nlmsghdr *ynl_nlmsg_put_header(void *buf) { - struct nlmsghdr *nlh = buf; + struct nlmsghdr *nlh = (struct nlmsghdr *)buf; memset(nlh, 0, sizeof(*nlh)); nlh->nlmsg_len = NLMSG_HDRLEN; @@ -196,7 +196,7 @@ static inline void *ynl_attr_data(const struct nlattr *attr) static inline void *ynl_attr_data_end(const struct nlattr *attr) { - return ynl_attr_data(attr) + ynl_attr_data_len(attr); + return (char *)ynl_attr_data(attr) + ynl_attr_data_len(attr); } #define ynl_attr_for_each(attr, nlh, fixed_hdr_sz) \ @@ -228,7 +228,7 @@ ynl_attr_next(const void *end, const struct nlattr *prev) { struct nlattr *attr; - attr = (void *)((char *)prev + NLA_ALIGN(prev->nla_len)); + attr = (struct nlattr *)((char *)prev + NLA_ALIGN(prev->nla_len)); return ynl_attr_if_good(end, attr); } @@ -237,8 +237,8 @@ ynl_attr_first(const void *start, size_t len, size_t skip) { struct nlattr *attr; - attr = (void *)((char *)start + NLMSG_ALIGN(skip)); - return ynl_attr_if_good(start + len, attr); + attr = (struct nlattr *)((char *)start + NLMSG_ALIGN(skip)); + return ynl_attr_if_good((char *)start + len, attr); } static inline bool @@ -262,9 +262,9 @@ ynl_attr_nest_start(struct nlmsghdr *nlh, unsigned int attr_type) struct nlattr *attr; if (__ynl_attr_put_overflow(nlh, 0)) - return ynl_nlmsg_end_addr(nlh) - NLA_HDRLEN; + return (struct nlattr *)ynl_nlmsg_end_addr(nlh) - 1; - attr = ynl_nlmsg_end_addr(nlh); + attr = (struct nlattr *)ynl_nlmsg_end_addr(nlh); attr->nla_type = attr_type | NLA_F_NESTED; nlh->nlmsg_len += NLA_HDRLEN; @@ -286,7 +286,7 @@ ynl_attr_put(struct nlmsghdr *nlh, unsigned int attr_type, if (__ynl_attr_put_overflow(nlh, size)) return; - attr = ynl_nlmsg_end_addr(nlh); + attr = (struct nlattr *)ynl_nlmsg_end_addr(nlh); attr->nla_type = attr_type; attr->nla_len = NLA_HDRLEN + size; @@ -305,10 +305,10 @@ ynl_attr_put_str(struct nlmsghdr *nlh, unsigned int attr_type, const char *str) if (__ynl_attr_put_overflow(nlh, len)) return; - attr = ynl_nlmsg_end_addr(nlh); + attr = (struct nlattr *)ynl_nlmsg_end_addr(nlh); attr->nla_type = attr_type; - strcpy(ynl_attr_data(attr), str); + strcpy((char *)ynl_attr_data(attr), str); attr->nla_len = NLA_HDRLEN + NLA_ALIGN(len); nlh->nlmsg_len += NLMSG_ALIGN(attr->nla_len); diff --git a/tools/net/ynl/lib/ynl.c b/tools/net/ynl/lib/ynl.c index 4b9c091fc86b..fcb18a5a6d70 100644 --- a/tools/net/ynl/lib/ynl.c +++ b/tools/net/ynl/lib/ynl.c @@ -46,7 +46,7 @@ /* -- Netlink boiler plate */ static int -ynl_err_walk_report_one(struct ynl_policy_nest *policy, unsigned int type, +ynl_err_walk_report_one(const struct ynl_policy_nest *policy, unsigned int type, char *str, int str_sz, int *n) { if (!policy) { @@ -75,8 +75,8 @@ ynl_err_walk_report_one(struct ynl_policy_nest *policy, unsigned int type, static int ynl_err_walk(struct ynl_sock *ys, void *start, void *end, unsigned int off, - struct ynl_policy_nest *policy, char *str, int str_sz, - struct ynl_policy_nest **nest_pol) + const struct ynl_policy_nest *policy, char *str, int str_sz, + const struct ynl_policy_nest **nest_pol) { unsigned int astart_off, aend_off; const struct nlattr *attr; @@ -206,7 +206,7 @@ ynl_ext_ack_check(struct ynl_sock *ys, const struct nlmsghdr *nlh, bad_attr[n] = '\0'; } if (tb[NLMSGERR_ATTR_MISS_TYPE]) { - struct ynl_policy_nest *nest_pol = NULL; + const struct ynl_policy_nest *nest_pol = NULL; unsigned int n, off, type; void *start, *end; int n2; @@ -296,7 +296,7 @@ static int ynl_cb_done(const struct nlmsghdr *nlh, struct ynl_parse_arg *yarg) int ynl_attr_validate(struct ynl_parse_arg *yarg, const struct nlattr *attr) { - struct ynl_policy_attr *policy; + const struct ynl_policy_attr *policy; unsigned int type, len; unsigned char *data; diff --git a/tools/net/ynl/lib/ynl.h b/tools/net/ynl/lib/ynl.h index eef7c6324ed4..6cd570b283ea 100644 --- a/tools/net/ynl/lib/ynl.h +++ b/tools/net/ynl/lib/ynl.h @@ -76,7 +76,7 @@ struct ynl_sock { struct ynl_ntf_base_type **ntf_last_next; struct nlmsghdr *nlh; - struct ynl_policy_nest *req_policy; + const struct ynl_policy_nest *req_policy; unsigned char *tx_buf; unsigned char *rx_buf; unsigned char raw_buf[]; diff --git a/tools/net/ynl/lib/ynl.py b/tools/net/ynl/lib/ynl.py index 35e666928119..d42c1d605969 100644 --- a/tools/net/ynl/lib/ynl.py +++ b/tools/net/ynl/lib/ynl.py @@ -743,6 +743,8 @@ class YnlFamily(SpecFamily): decoded = attr.as_scalar(attr_spec['type'], attr_spec.byte_order) if 'enum' in attr_spec: decoded = self._decode_enum(decoded, attr_spec) + elif attr_spec.display_hint: + decoded = self._formatted_string(decoded, attr_spec.display_hint) elif attr_spec["type"] == 'indexed-array': decoded = self._decode_array_attr(attr, attr_spec) elif attr_spec["type"] == 'bitfield32': diff --git a/tools/net/ynl/ynl-gen-c.py b/tools/net/ynl/ynl-gen-c.py index a42d62b23ee0..51529fabd517 100755 --- a/tools/net/ynl/ynl-gen-c.py +++ b/tools/net/ynl/ynl-gen-c.py @@ -59,9 +59,9 @@ class Type(SpecAttr): if 'nested-attributes' in attr: self.nested_attrs = attr['nested-attributes'] if self.nested_attrs == family.name: - self.nested_render_name = c_lower(f"{family.name}") + self.nested_render_name = c_lower(f"{family.ident_name}") else: - self.nested_render_name = c_lower(f"{family.name}_{self.nested_attrs}") + self.nested_render_name = c_lower(f"{family.ident_name}_{self.nested_attrs}") if self.nested_attrs in self.family.consts: self.nested_struct_type = 'struct ' + self.nested_render_name + '_' @@ -693,9 +693,9 @@ class Struct: self.nested = type_list is None if family.name == c_lower(space_name): - self.render_name = c_lower(family.name) + self.render_name = c_lower(family.ident_name) else: - self.render_name = c_lower(family.name + '-' + space_name) + self.render_name = c_lower(family.ident_name + '-' + space_name) self.struct_name = 'struct ' + self.render_name if self.nested and space_name in family.consts: self.struct_name += '_' @@ -761,7 +761,7 @@ class EnumEntry(SpecEnumEntry): class EnumSet(SpecEnumSet): def __init__(self, family, yaml): - self.render_name = c_lower(family.name + '-' + yaml['name']) + self.render_name = c_lower(family.ident_name + '-' + yaml['name']) if 'enum-name' in yaml: if yaml['enum-name']: @@ -777,7 +777,7 @@ class EnumSet(SpecEnumSet): else: self.user_type = 'int' - self.value_pfx = yaml.get('name-prefix', f"{family.name}-{yaml['name']}-") + self.value_pfx = yaml.get('name-prefix', f"{family.ident_name}-{yaml['name']}-") super().__init__(family, yaml) @@ -802,9 +802,9 @@ class AttrSet(SpecAttrSet): if 'name-prefix' in yaml: pfx = yaml['name-prefix'] elif self.name == family.name: - pfx = family.name + '-a-' + pfx = family.ident_name + '-a-' else: - pfx = f"{family.name}-a-{self.name}-" + pfx = f"{family.ident_name}-a-{self.name}-" self.name_prefix = c_upper(pfx) self.max_name = c_upper(self.yaml.get('attr-max-name', f"{self.name_prefix}max")) self.cnt_name = c_upper(self.yaml.get('attr-cnt-name', f"__{self.name_prefix}max")) @@ -861,7 +861,7 @@ class Operation(SpecOperation): def __init__(self, family, yaml, req_value, rsp_value): super().__init__(family, yaml, req_value, rsp_value) - self.render_name = c_lower(family.name + '_' + self.name) + self.render_name = c_lower(family.ident_name + '_' + self.name) self.dual_policy = ('do' in yaml and 'request' in yaml['do']) and \ ('dump' in yaml and 'request' in yaml['dump']) @@ -911,11 +911,11 @@ class Family(SpecFamily): if 'uapi-header' in self.yaml: self.uapi_header = self.yaml['uapi-header'] else: - self.uapi_header = f"linux/{self.name}.h" + self.uapi_header = f"linux/{self.ident_name}.h" if self.uapi_header.startswith("linux/") and self.uapi_header.endswith('.h'): self.uapi_header_name = self.uapi_header[6:-2] else: - self.uapi_header_name = self.name + self.uapi_header_name = self.ident_name def resolve(self): self.resolve_up(super()) @@ -923,7 +923,7 @@ class Family(SpecFamily): if self.yaml.get('protocol', 'genetlink') not in {'genetlink', 'genetlink-c', 'genetlink-legacy'}: raise Exception("Codegen only supported for genetlink") - self.c_name = c_lower(self.name) + self.c_name = c_lower(self.ident_name) if 'name-prefix' in self.yaml['operations']: self.op_prefix = c_upper(self.yaml['operations']['name-prefix']) else: @@ -1507,12 +1507,12 @@ def print_dump_prototype(ri): def put_typol_fwd(cw, struct): - cw.p(f'extern struct ynl_policy_nest {struct.render_name}_nest;') + cw.p(f'extern const struct ynl_policy_nest {struct.render_name}_nest;') def put_typol(cw, struct): type_max = struct.attr_set.max_name - cw.block_start(line=f'struct ynl_policy_attr {struct.render_name}_policy[{type_max} + 1] =') + cw.block_start(line=f'const struct ynl_policy_attr {struct.render_name}_policy[{type_max} + 1] =') for _, arg in struct.member_list(): arg.attr_typol(cw) @@ -1520,7 +1520,7 @@ def put_typol(cw, struct): cw.block_end(line=';') cw.nl() - cw.block_start(line=f'struct ynl_policy_nest {struct.render_name}_nest =') + cw.block_start(line=f'const struct ynl_policy_nest {struct.render_name}_nest =') cw.p(f'.max_attr = {type_max},') cw.p(f'.table = {struct.render_name}_policy,') cw.block_end(line=';') @@ -2173,7 +2173,7 @@ def print_kernel_op_table_fwd(family, cw, terminate): exported = not kernel_can_gen_family_struct(family) if not terminate or exported: - cw.p(f"/* Ops table for {family.name} */") + cw.p(f"/* Ops table for {family.ident_name} */") pol_to_struct = {'global': 'genl_small_ops', 'per-op': 'genl_ops', @@ -2225,12 +2225,12 @@ def print_kernel_op_table_fwd(family, cw, terminate): continue if 'do' in op: - name = c_lower(f"{family.name}-nl-{op_name}-doit") + name = c_lower(f"{family.ident_name}-nl-{op_name}-doit") cw.write_func_prot('int', name, ['struct sk_buff *skb', 'struct genl_info *info'], suffix=';') if 'dump' in op: - name = c_lower(f"{family.name}-nl-{op_name}-dumpit") + name = c_lower(f"{family.ident_name}-nl-{op_name}-dumpit") cw.write_func_prot('int', name, ['struct sk_buff *skb', 'struct netlink_callback *cb'], suffix=';') cw.nl() @@ -2256,13 +2256,13 @@ def print_kernel_op_table(family, cw): for x in op['dont-validate']])), ) for op_mode in ['do', 'dump']: if op_mode in op: - name = c_lower(f"{family.name}-nl-{op_name}-{op_mode}it") + name = c_lower(f"{family.ident_name}-nl-{op_name}-{op_mode}it") members.append((op_mode + 'it', name)) if family.kernel_policy == 'per-op': struct = Struct(family, op['attribute-set'], type_list=op['do']['request']['attributes']) - name = c_lower(f"{family.name}-{op_name}-nl-policy") + name = c_lower(f"{family.ident_name}-{op_name}-nl-policy") members.append(('policy', name)) members.append(('maxattr', struct.attr_max_val.enum_name)) if 'flags' in op: @@ -2294,7 +2294,7 @@ def print_kernel_op_table(family, cw): members.append(('validate', ' | '.join([c_upper('genl-dont-validate-' + x) for x in dont_validate])), ) - name = c_lower(f"{family.name}-nl-{op_name}-{op_mode}it") + name = c_lower(f"{family.ident_name}-nl-{op_name}-{op_mode}it") if 'pre' in op[op_mode]: members.append((cb_names[op_mode]['pre'], c_lower(op[op_mode]['pre']))) members.append((op_mode + 'it', name)) @@ -2305,9 +2305,9 @@ def print_kernel_op_table(family, cw): type_list=op[op_mode]['request']['attributes']) if op.dual_policy: - name = c_lower(f"{family.name}-{op_name}-{op_mode}-nl-policy") + name = c_lower(f"{family.ident_name}-{op_name}-{op_mode}-nl-policy") else: - name = c_lower(f"{family.name}-{op_name}-nl-policy") + name = c_lower(f"{family.ident_name}-{op_name}-nl-policy") members.append(('policy', name)) members.append(('maxattr', struct.attr_max_val.enum_name)) flags = (op['flags'] if 'flags' in op else []) + ['cmd-cap-' + op_mode] @@ -2326,7 +2326,7 @@ def print_kernel_mcgrp_hdr(family, cw): cw.block_start('enum') for grp in family.mcgrps['list']: - grp_id = c_upper(f"{family.name}-nlgrp-{grp['name']},") + grp_id = c_upper(f"{family.ident_name}-nlgrp-{grp['name']},") cw.p(grp_id) cw.block_end(';') cw.nl() @@ -2339,7 +2339,7 @@ def print_kernel_mcgrp_src(family, cw): cw.block_start('static const struct genl_multicast_group ' + family.c_name + '_nl_mcgrps[] =') for grp in family.mcgrps['list']: name = grp['name'] - grp_id = c_upper(f"{family.name}-nlgrp-{name}") + grp_id = c_upper(f"{family.ident_name}-nlgrp-{name}") cw.p('[' + grp_id + '] = { "' + name + '", },') cw.block_end(';') cw.nl() @@ -2361,7 +2361,7 @@ def print_kernel_family_struct_src(family, cw): if not kernel_can_gen_family_struct(family): return - cw.block_start(f"struct genl_family {family.name}_nl_family __ro_after_init =") + cw.block_start(f"struct genl_family {family.ident_name}_nl_family __ro_after_init =") cw.p('.name\t\t= ' + family.fam_key + ',') cw.p('.version\t= ' + family.ver_key + ',') cw.p('.netnsok\t= true,') @@ -2429,7 +2429,7 @@ def render_uapi(family, cw): cw.p(' */') uapi_enum_start(family, cw, const, 'name') - name_pfx = const.get('name-prefix', f"{family.name}-{const['name']}-") + name_pfx = const.get('name-prefix', f"{family.ident_name}-{const['name']}-") for entry in enum.entries.values(): suffix = ',' if entry.value_change: @@ -2451,7 +2451,7 @@ def render_uapi(family, cw): cw.nl() elif const['type'] == 'const': defines.append([c_upper(family.get('c-define-name', - f"{family.name}-{const['name']}")), + f"{family.ident_name}-{const['name']}")), const['value']]) if defines: @@ -2529,7 +2529,7 @@ def render_uapi(family, cw): defines = [] for grp in family.mcgrps['list']: name = grp['name'] - defines.append([c_upper(grp.get('c-define-name', f"{family.name}-mcgrp-{name}")), + defines.append([c_upper(grp.get('c-define-name', f"{family.ident_name}-mcgrp-{name}")), f'{name}']) cw.nl() if defines: diff --git a/tools/net/ynl/ynl-gen-rst.py b/tools/net/ynl/ynl-gen-rst.py index 657e881d2ea4..6c56d0d726b4 100755 --- a/tools/net/ynl/ynl-gen-rst.py +++ b/tools/net/ynl/ynl-gen-rst.py @@ -49,7 +49,7 @@ def inline(text: str) -> str: def sanitize(text: str) -> str: """Remove newlines and multiple spaces""" # This is useful for some fields that are spread across multiple lines - return str(text).replace("\n", "").strip() + return str(text).replace("\n", " ").strip() def rst_fields(key: str, value: str, level: int = 0) -> str: @@ -156,7 +156,10 @@ def parse_do(do_dict: Dict[str, Any], level: int = 0) -> str: lines = [] for key in do_dict.keys(): lines.append(rst_paragraph(bold(key), level + 1)) - lines.append(parse_do_attributes(do_dict[key], level + 1) + "\n") + if key in ['request', 'reply']: + lines.append(parse_do_attributes(do_dict[key], level + 1) + "\n") + else: + lines.append(headroom(level + 2) + do_dict[key] + "\n") return "\n".join(lines) @@ -172,13 +175,13 @@ def parse_do_attributes(attrs: Dict[str, Any], level: int = 0) -> str: def parse_operations(operations: List[Dict[str, Any]], namespace: str) -> str: """Parse operations block""" - preprocessed = ["name", "doc", "title", "do", "dump"] + preprocessed = ["name", "doc", "title", "do", "dump", "flags"] linkable = ["fixed-header", "attribute-set"] lines = [] for operation in operations: lines.append(rst_section(namespace, 'operation', operation["name"])) - lines.append(rst_paragraph(sanitize(operation["doc"])) + "\n") + lines.append(rst_paragraph(operation["doc"]) + "\n") for key in operation.keys(): if key in preprocessed: @@ -188,6 +191,8 @@ def parse_operations(operations: List[Dict[str, Any]], namespace: str) -> str: if key in linkable: value = rst_ref(namespace, key, value) lines.append(rst_fields(key, value, 0)) + if 'flags' in operation: + lines.append(rst_fields('flags', rst_list_inline(operation['flags']))) if "do" in operation: lines.append(rst_paragraph(":do:", 0)) diff --git a/tools/testing/selftests/bpf/DENYLIST.aarch64 b/tools/testing/selftests/bpf/DENYLIST.aarch64 index 0445ac38bc07..3c7c3e79aa93 100644 --- a/tools/testing/selftests/bpf/DENYLIST.aarch64 +++ b/tools/testing/selftests/bpf/DENYLIST.aarch64 @@ -6,6 +6,7 @@ kprobe_multi_test # needs CONFIG_FPROBE module_attach # prog 'kprobe_multi': failed to auto-attach: -95 fentry_test/fentry_many_args # fentry_many_args:FAIL:fentry_many_args_attach unexpected error: -524 fexit_test/fexit_many_args # fexit_many_args:FAIL:fexit_many_args_attach unexpected error: -524 +tracing_struct/struct_many_args # struct_many_args:FAIL:tracing_struct_many_args__attach unexpected error: -524 fill_link_info/kprobe_multi_link_info # bpf_program__attach_kprobe_multi_opts unexpected error: -95 fill_link_info/kretprobe_multi_link_info # bpf_program__attach_kprobe_multi_opts unexpected error: -95 fill_link_info/kprobe_multi_invalid_ubuff # bpf_program__attach_kprobe_multi_opts unexpected error: -95 diff --git a/tools/testing/selftests/bpf/DENYLIST.s390x b/tools/testing/selftests/bpf/DENYLIST.s390x index c34adf39eeb2..3ebd77206f98 100644 --- a/tools/testing/selftests/bpf/DENYLIST.s390x +++ b/tools/testing/selftests/bpf/DENYLIST.s390x @@ -1,9 +1,5 @@ # TEMPORARY # Alphabetical order -exceptions # JIT does not support calling kfunc bpf_throw (exceptions) get_stack_raw_tp # user_stack corrupted user stack (no backchain userspace) stacktrace_build_id # compare_map_keys stackid_hmap vs. stackmap err -2 errno 2 (?) verifier_iterating_callbacks -verifier_arena # JIT does not support arena -arena_htab # JIT does not support arena -arena_atomics diff --git a/tools/testing/selftests/bpf/bpf_arena_common.h b/tools/testing/selftests/bpf/bpf_arena_common.h index 567491f3e1b5..68a51dcc0669 100644 --- a/tools/testing/selftests/bpf/bpf_arena_common.h +++ b/tools/testing/selftests/bpf/bpf_arena_common.h @@ -34,10 +34,12 @@ #if defined(__BPF_FEATURE_ADDR_SPACE_CAST) && !defined(BPF_ARENA_FORCE_ASM) #define __arena __attribute__((address_space(1))) +#define __arena_global __attribute__((address_space(1))) #define cast_kern(ptr) /* nop for bpf prog. emitted by LLVM */ #define cast_user(ptr) /* nop for bpf prog. emitted by LLVM */ #else #define __arena +#define __arena_global SEC(".addr_space.1") #define cast_kern(ptr) bpf_addr_space_cast(ptr, 0, 1) #define cast_user(ptr) bpf_addr_space_cast(ptr, 1, 0) #endif diff --git a/tools/testing/selftests/bpf/bpf_experimental.h b/tools/testing/selftests/bpf/bpf_experimental.h index 3d9e4b8c6b81..828556cdc2f0 100644 --- a/tools/testing/selftests/bpf/bpf_experimental.h +++ b/tools/testing/selftests/bpf/bpf_experimental.h @@ -163,7 +163,7 @@ struct bpf_iter_task_vma; extern int bpf_iter_task_vma_new(struct bpf_iter_task_vma *it, struct task_struct *task, - unsigned long addr) __ksym; + __u64 addr) __ksym; extern struct vm_area_struct *bpf_iter_task_vma_next(struct bpf_iter_task_vma *it) __ksym; extern void bpf_iter_task_vma_destroy(struct bpf_iter_task_vma *it) __ksym; @@ -351,6 +351,7 @@ l_true: \ l_continue:; \ }) #else +#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ #define can_loop \ ({ __label__ l_break, l_continue; \ bool ret = true; \ @@ -376,6 +377,33 @@ l_true: \ l_break: break; \ l_continue:; \ }) +#else +#define can_loop \ + ({ __label__ l_break, l_continue; \ + bool ret = true; \ + asm volatile goto("1:.byte 0xe5; \ + .byte 0; \ + .long (((%l[l_break] - 1b - 8) / 8) & 0xffff) << 16; \ + .short 0" \ + :::: l_break); \ + goto l_continue; \ + l_break: ret = false; \ + l_continue:; \ + ret; \ + }) + +#define cond_break \ + ({ __label__ l_break, l_continue; \ + asm volatile goto("1:.byte 0xe5; \ + .byte 0; \ + .long (((%l[l_break] - 1b - 8) / 8) & 0xffff) << 16; \ + .short 0" \ + :::: l_break); \ + goto l_continue; \ + l_break: break; \ + l_continue:; \ + }) +#endif #endif #ifndef bpf_nop_mov @@ -524,7 +552,7 @@ extern void bpf_iter_css_destroy(struct bpf_iter_css *it) __weak __ksym; extern int bpf_wq_init(struct bpf_wq *wq, void *p__map, unsigned int flags) __weak __ksym; extern int bpf_wq_start(struct bpf_wq *wq, unsigned int flags) __weak __ksym; extern int bpf_wq_set_callback_impl(struct bpf_wq *wq, - int (callback_fn)(void *map, int *key, struct bpf_wq *wq), + int (callback_fn)(void *map, int *key, void *value), unsigned int flags__k, void *aux__ign) __ksym; #define bpf_wq_set_callback(timer, cb, flags) \ bpf_wq_set_callback_impl(timer, cb, flags, NULL) diff --git a/tools/testing/selftests/bpf/bpf_kfuncs.h b/tools/testing/selftests/bpf/bpf_kfuncs.h index be91a6919315..3b6675ab4086 100644 --- a/tools/testing/selftests/bpf/bpf_kfuncs.h +++ b/tools/testing/selftests/bpf/bpf_kfuncs.h @@ -77,5 +77,5 @@ extern int bpf_verify_pkcs7_signature(struct bpf_dynptr *data_ptr, struct bpf_key *trusted_keyring) __ksym; extern bool bpf_session_is_return(void) __ksym __weak; -extern long *bpf_session_cookie(void) __ksym __weak; +extern __u64 *bpf_session_cookie(void) __ksym __weak; #endif diff --git a/tools/testing/selftests/bpf/bpf_test_no_cfi/bpf_test_no_cfi.c b/tools/testing/selftests/bpf/bpf_test_no_cfi/bpf_test_no_cfi.c index b1dd889d5d7d..948eb3962732 100644 --- a/tools/testing/selftests/bpf/bpf_test_no_cfi/bpf_test_no_cfi.c +++ b/tools/testing/selftests/bpf/bpf_test_no_cfi/bpf_test_no_cfi.c @@ -22,12 +22,12 @@ static int dummy_init_member(const struct btf_type *t, return 0; } -static int dummy_reg(void *kdata) +static int dummy_reg(void *kdata, struct bpf_link *link) { return 0; } -static void dummy_unreg(void *kdata) +static void dummy_unreg(void *kdata, struct bpf_link *link) { } diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c index 2a18bd320e92..f8962a1dd397 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod.c @@ -53,6 +53,13 @@ struct bpf_testmod_struct_arg_4 { int b; }; +struct bpf_testmod_struct_arg_5 { + char a; + short b; + int c; + long d; +}; + __bpf_hook_start(); noinline int @@ -110,6 +117,15 @@ bpf_testmod_test_struct_arg_8(u64 a, void *b, short c, int d, void *e, return bpf_testmod_test_struct_arg_result; } +noinline int +bpf_testmod_test_struct_arg_9(u64 a, void *b, short c, int d, void *e, char f, + short g, struct bpf_testmod_struct_arg_5 h, long i) +{ + bpf_testmod_test_struct_arg_result = a + (long)b + c + d + (long)e + + f + g + h.a + h.b + h.c + h.d + i; + return bpf_testmod_test_struct_arg_result; +} + noinline int bpf_testmod_test_arg_ptr_to_struct(struct bpf_testmod_struct_arg_1 *a) { bpf_testmod_test_struct_arg_result = a->a; @@ -154,6 +170,42 @@ __bpf_kfunc void bpf_kfunc_common_test(void) { } +__bpf_kfunc void bpf_kfunc_dynptr_test(struct bpf_dynptr *ptr, + struct bpf_dynptr *ptr__nullable) +{ +} + +__bpf_kfunc struct bpf_testmod_ctx * +bpf_testmod_ctx_create(int *err) +{ + struct bpf_testmod_ctx *ctx; + + ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC); + if (!ctx) { + *err = -ENOMEM; + return NULL; + } + refcount_set(&ctx->usage, 1); + + return ctx; +} + +static void testmod_free_cb(struct rcu_head *head) +{ + struct bpf_testmod_ctx *ctx; + + ctx = container_of(head, struct bpf_testmod_ctx, rcu); + kfree(ctx); +} + +__bpf_kfunc void bpf_testmod_ctx_release(struct bpf_testmod_ctx *ctx) +{ + if (!ctx) + return; + if (refcount_dec_and_test(&ctx->usage)) + call_rcu(&ctx->rcu, testmod_free_cb); +} + struct bpf_testmod_btf_type_tag_1 { int a; }; @@ -269,6 +321,7 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj, struct bpf_testmod_struct_arg_2 struct_arg2 = {2, 3}; struct bpf_testmod_struct_arg_3 *struct_arg3; struct bpf_testmod_struct_arg_4 struct_arg4 = {21, 22}; + struct bpf_testmod_struct_arg_5 struct_arg5 = {23, 24, 25, 26}; int i = 1; while (bpf_testmod_return_ptr(i)) @@ -283,6 +336,8 @@ bpf_testmod_test_read(struct file *file, struct kobject *kobj, (void *)20, struct_arg4); (void)bpf_testmod_test_struct_arg_8(16, (void *)17, 18, 19, (void *)20, struct_arg4, 23); + (void)bpf_testmod_test_struct_arg_9(16, (void *)17, 18, 19, (void *)20, + 21, 22, struct_arg5, 27); (void)bpf_testmod_test_arg_ptr_to_struct(&struct_arg1_2); @@ -363,8 +418,15 @@ BTF_ID_FLAGS(func, bpf_iter_testmod_seq_new, KF_ITER_NEW) BTF_ID_FLAGS(func, bpf_iter_testmod_seq_next, KF_ITER_NEXT | KF_RET_NULL) BTF_ID_FLAGS(func, bpf_iter_testmod_seq_destroy, KF_ITER_DESTROY) BTF_ID_FLAGS(func, bpf_kfunc_common_test) +BTF_ID_FLAGS(func, bpf_kfunc_dynptr_test) +BTF_ID_FLAGS(func, bpf_testmod_ctx_create, KF_ACQUIRE | KF_RET_NULL) +BTF_ID_FLAGS(func, bpf_testmod_ctx_release, KF_RELEASE) BTF_KFUNCS_END(bpf_testmod_common_kfunc_ids) +BTF_ID_LIST(bpf_testmod_dtor_ids) +BTF_ID(struct, bpf_testmod_ctx) +BTF_ID(func, bpf_testmod_ctx_release) + static const struct btf_kfunc_id_set bpf_testmod_common_kfunc_set = { .owner = THIS_MODULE, .set = &bpf_testmod_common_kfunc_ids, @@ -820,7 +882,7 @@ static const struct bpf_verifier_ops bpf_testmod_verifier_ops = { .is_valid_access = bpf_testmod_ops_is_valid_access, }; -static int bpf_dummy_reg(void *kdata) +static int bpf_dummy_reg(void *kdata, struct bpf_link *link) { struct bpf_testmod_ops *ops = kdata; @@ -835,7 +897,7 @@ static int bpf_dummy_reg(void *kdata) return 0; } -static void bpf_dummy_unreg(void *kdata) +static void bpf_dummy_unreg(void *kdata, struct bpf_link *link) { } @@ -871,7 +933,7 @@ struct bpf_struct_ops bpf_bpf_testmod_ops = { .owner = THIS_MODULE, }; -static int bpf_dummy_reg2(void *kdata) +static int bpf_dummy_reg2(void *kdata, struct bpf_link *link) { struct bpf_testmod_ops2 *ops = kdata; @@ -898,6 +960,12 @@ extern int bpf_fentry_test1(int a); static int bpf_testmod_init(void) { + const struct btf_id_dtor_kfunc bpf_testmod_dtors[] = { + { + .btf_id = bpf_testmod_dtor_ids[0], + .kfunc_btf_id = bpf_testmod_dtor_ids[1] + }, + }; int ret; ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_UNSPEC, &bpf_testmod_common_kfunc_set); @@ -906,6 +974,9 @@ static int bpf_testmod_init(void) ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_testmod_kfunc_set); ret = ret ?: register_bpf_struct_ops(&bpf_bpf_testmod_ops, bpf_testmod_ops); ret = ret ?: register_bpf_struct_ops(&bpf_testmod_ops2, bpf_testmod_ops2); + ret = ret ?: register_btf_id_dtor_kfuncs(bpf_testmod_dtors, + ARRAY_SIZE(bpf_testmod_dtors), + THIS_MODULE); if (ret < 0) return ret; if (bpf_fentry_test1(0) < 0) diff --git a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h index b0d586a6751f..e587a79f2239 100644 --- a/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h +++ b/tools/testing/selftests/bpf/bpf_testmod/bpf_testmod_kfunc.h @@ -80,6 +80,11 @@ struct sendmsg_args { int msglen; }; +struct bpf_testmod_ctx { + struct callback_head rcu; + refcount_t usage; +}; + struct prog_test_ref_kfunc * bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr) __ksym; void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym; @@ -134,4 +139,9 @@ int bpf_kfunc_call_sock_sendmsg(struct sendmsg_args *args) __ksym; int bpf_kfunc_call_kernel_getsockname(struct addr_args *args) __ksym; int bpf_kfunc_call_kernel_getpeername(struct addr_args *args) __ksym; +void bpf_kfunc_dynptr_test(struct bpf_dynptr *ptr, struct bpf_dynptr *ptr__nullable) __ksym; + +struct bpf_testmod_ctx *bpf_testmod_ctx_create(int *err) __ksym; +void bpf_testmod_ctx_release(struct bpf_testmod_ctx *ctx) __ksym; + #endif /* _BPF_TESTMOD_KFUNC_H */ diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config index 98b6b6a886ce..4ca84c8d9116 100644 --- a/tools/testing/selftests/bpf/config +++ b/tools/testing/selftests/bpf/config @@ -83,8 +83,22 @@ CONFIG_NETFILTER_XT_TARGET_CT=y CONFIG_NETKIT=y CONFIG_NF_CONNTRACK=y CONFIG_NF_CONNTRACK_MARK=y +CONFIG_NF_CONNTRACK_ZONES=y CONFIG_NF_DEFRAG_IPV4=y CONFIG_NF_DEFRAG_IPV6=y +CONFIG_NF_TABLES=y +CONFIG_NF_TABLES_INET=y +CONFIG_NF_TABLES_NETDEV=y +CONFIG_NF_TABLES_IPV4=y +CONFIG_NF_TABLES_IPV6=y +CONFIG_NETFILTER_INGRESS=y +CONFIG_NF_FLOW_TABLE=y +CONFIG_NF_FLOW_TABLE_INET=y +CONFIG_NETFILTER_NETLINK=y +CONFIG_NFT_FLOW_OFFLOAD=y +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_FILTER=y CONFIG_NF_NAT=y CONFIG_RC_CORE=y CONFIG_SECURITY=y diff --git a/tools/testing/selftests/bpf/network_helpers.c b/tools/testing/selftests/bpf/network_helpers.c index 35250e6cde7f..e0cba4178e41 100644 --- a/tools/testing/selftests/bpf/network_helpers.c +++ b/tools/testing/selftests/bpf/network_helpers.c @@ -94,7 +94,8 @@ static int __start_server(int type, const struct sockaddr *addr, socklen_t addrl if (settimeo(fd, opts->timeout_ms)) goto error_close; - if (opts->post_socket_cb && opts->post_socket_cb(fd, NULL)) { + if (opts->post_socket_cb && + opts->post_socket_cb(fd, opts->cb_opts)) { log_err("Failed to call post_socket_cb"); goto error_close; } @@ -105,7 +106,7 @@ static int __start_server(int type, const struct sockaddr *addr, socklen_t addrl } if (type == SOCK_STREAM) { - if (listen(fd, 1) < 0) { + if (listen(fd, opts->backlog ? MAX(opts->backlog, 0) : 1) < 0) { log_err("Failed to listed on socket"); goto error_close; } @@ -118,22 +119,32 @@ error_close: return -1; } +int start_server_str(int family, int type, const char *addr_str, __u16 port, + const struct network_helper_opts *opts) +{ + struct sockaddr_storage addr; + socklen_t addrlen; + + if (!opts) + opts = &default_opts; + + if (make_sockaddr(family, addr_str, port, &addr, &addrlen)) + return -1; + + return __start_server(type, (struct sockaddr *)&addr, addrlen, opts); +} + int start_server(int family, int type, const char *addr_str, __u16 port, int timeout_ms) { struct network_helper_opts opts = { .timeout_ms = timeout_ms, }; - struct sockaddr_storage addr; - socklen_t addrlen; - if (make_sockaddr(family, addr_str, port, &addr, &addrlen)) - return -1; - - return __start_server(type, (struct sockaddr *)&addr, addrlen, &opts); + return start_server_str(family, type, addr_str, port, &opts); } -static int reuseport_cb(int fd, const struct post_socket_opts *opts) +static int reuseport_cb(int fd, void *opts) { int on = 1; @@ -238,6 +249,34 @@ error_close: return -1; } +int client_socket(int family, int type, + const struct network_helper_opts *opts) +{ + int fd; + + if (!opts) + opts = &default_opts; + + fd = socket(family, type, opts->proto); + if (fd < 0) { + log_err("Failed to create client socket"); + return -1; + } + + if (settimeo(fd, opts->timeout_ms)) + goto error_close; + + if (opts->post_socket_cb && + opts->post_socket_cb(fd, opts->cb_opts)) + goto error_close; + + return fd; + +error_close: + save_errno_close(fd); + return -1; +} + static int connect_fd_to_addr(int fd, const struct sockaddr_storage *addr, socklen_t addrlen, const bool must_fail) @@ -273,15 +312,12 @@ int connect_to_addr(int type, const struct sockaddr_storage *addr, socklen_t add if (!opts) opts = &default_opts; - fd = socket(addr->ss_family, type, opts->proto); + fd = client_socket(addr->ss_family, type, opts); if (fd < 0) { log_err("Failed to create client socket"); return -1; } - if (settimeo(fd, opts->timeout_ms)) - goto error_close; - if (connect_fd_to_addr(fd, addr, addrlen, opts->must_fail)) goto error_close; @@ -292,66 +328,21 @@ error_close: return -1; } -int connect_to_fd_opts(int server_fd, const struct network_helper_opts *opts) +int connect_to_fd_opts(int server_fd, int type, const struct network_helper_opts *opts) { struct sockaddr_storage addr; - struct sockaddr_in *addr_in; - socklen_t addrlen, optlen; - int fd, type, protocol; + socklen_t addrlen; if (!opts) opts = &default_opts; - optlen = sizeof(type); - - if (opts->type) { - type = opts->type; - } else { - if (getsockopt(server_fd, SOL_SOCKET, SO_TYPE, &type, &optlen)) { - log_err("getsockopt(SOL_TYPE)"); - return -1; - } - } - - if (opts->proto) { - protocol = opts->proto; - } else { - if (getsockopt(server_fd, SOL_SOCKET, SO_PROTOCOL, &protocol, &optlen)) { - log_err("getsockopt(SOL_PROTOCOL)"); - return -1; - } - } - addrlen = sizeof(addr); if (getsockname(server_fd, (struct sockaddr *)&addr, &addrlen)) { log_err("Failed to get server addr"); return -1; } - addr_in = (struct sockaddr_in *)&addr; - fd = socket(addr_in->sin_family, type, protocol); - if (fd < 0) { - log_err("Failed to create client socket"); - return -1; - } - - if (settimeo(fd, opts->timeout_ms)) - goto error_close; - - if (opts->cc && opts->cc[0] && - setsockopt(fd, SOL_TCP, TCP_CONGESTION, opts->cc, - strlen(opts->cc) + 1)) - goto error_close; - - if (!opts->noconnect) - if (connect_fd_to_addr(fd, &addr, addrlen, opts->must_fail)) - goto error_close; - - return fd; - -error_close: - save_errno_close(fd); - return -1; + return connect_to_addr(type, &addr, addrlen, opts); } int connect_to_fd(int server_fd, int timeout_ms) @@ -359,8 +350,23 @@ int connect_to_fd(int server_fd, int timeout_ms) struct network_helper_opts opts = { .timeout_ms = timeout_ms, }; + int type, protocol; + socklen_t optlen; - return connect_to_fd_opts(server_fd, &opts); + optlen = sizeof(type); + if (getsockopt(server_fd, SOL_SOCKET, SO_TYPE, &type, &optlen)) { + log_err("getsockopt(SOL_TYPE)"); + return -1; + } + + optlen = sizeof(protocol); + if (getsockopt(server_fd, SOL_SOCKET, SO_PROTOCOL, &protocol, &optlen)) { + log_err("getsockopt(SOL_PROTOCOL)"); + return -1; + } + opts.proto = protocol; + + return connect_to_fd_opts(server_fd, type, &opts); } int connect_fd_to_fd(int client_fd, int server_fd, int timeout_ms) diff --git a/tools/testing/selftests/bpf/network_helpers.h b/tools/testing/selftests/bpf/network_helpers.h index 883c7ea9d8d5..aac5b94d6379 100644 --- a/tools/testing/selftests/bpf/network_helpers.h +++ b/tools/testing/selftests/bpf/network_helpers.h @@ -21,16 +21,22 @@ typedef __u16 __sum16; #define VIP_NUM 5 #define MAGIC_BYTES 123 -struct post_socket_opts {}; - struct network_helper_opts { - const char *cc; int timeout_ms; bool must_fail; - bool noconnect; - int type; int proto; - int (*post_socket_cb)(int fd, const struct post_socket_opts *opts); + /* +ve: Passed to listen() as-is. + * 0: Default when the test does not set + * a particular value during the struct init. + * It is changed to 1 before passing to listen(). + * Most tests only have one on-going connection. + * -ve: It is changed to 0 before passing to listen(). + * It is useful to force syncookie without + * changing the "tcp_syncookies" sysctl from 1 to 2. + */ + int backlog; + int (*post_socket_cb)(int fd, void *opts); + void *cb_opts; }; /* ipv4 test vector */ @@ -50,6 +56,8 @@ struct ipv6_packet { extern struct ipv6_packet pkt_v6; int settimeo(int fd, int timeout_ms); +int start_server_str(int family, int type, const char *addr_str, __u16 port, + const struct network_helper_opts *opts); int start_server(int family, int type, const char *addr, __u16 port, int timeout_ms); int *start_reuseport_server(int family, int type, const char *addr_str, @@ -58,10 +66,12 @@ int *start_reuseport_server(int family, int type, const char *addr_str, int start_server_addr(int type, const struct sockaddr_storage *addr, socklen_t len, const struct network_helper_opts *opts); void free_fds(int *fds, unsigned int nr_close_fds); +int client_socket(int family, int type, + const struct network_helper_opts *opts); int connect_to_addr(int type, const struct sockaddr_storage *addr, socklen_t len, const struct network_helper_opts *opts); int connect_to_fd(int server_fd, int timeout_ms); -int connect_to_fd_opts(int server_fd, const struct network_helper_opts *opts); +int connect_to_fd_opts(int server_fd, int type, const struct network_helper_opts *opts); int connect_fd_to_fd(int client_fd, int server_fd, int timeout_ms); int fastopen_connect(int server_fd, const char *data, unsigned int data_len, int timeout_ms); diff --git a/tools/testing/selftests/bpf/prog_tests/arena_atomics.c b/tools/testing/selftests/bpf/prog_tests/arena_atomics.c index 0807a48a58ee..26e7c06c6cb4 100644 --- a/tools/testing/selftests/bpf/prog_tests/arena_atomics.c +++ b/tools/testing/selftests/bpf/prog_tests/arena_atomics.c @@ -146,6 +146,22 @@ static void test_xchg(struct arena_atomics *skel) ASSERT_EQ(skel->arena->xchg32_result, 1, "xchg32_result"); } +static void test_uaf(struct arena_atomics *skel) +{ + LIBBPF_OPTS(bpf_test_run_opts, topts); + int err, prog_fd; + + /* No need to attach it, just run it directly */ + prog_fd = bpf_program__fd(skel->progs.uaf); + err = bpf_prog_test_run_opts(prog_fd, &topts); + if (!ASSERT_OK(err, "test_run_opts err")) + return; + if (!ASSERT_OK(topts.retval, "test_run_opts retval")) + return; + + ASSERT_EQ(skel->arena->uaf_recovery_fails, 0, "uaf_recovery_fails"); +} + void test_arena_atomics(void) { struct arena_atomics *skel; @@ -180,6 +196,8 @@ void test_arena_atomics(void) test_cmpxchg(skel); if (test__start_subtest("xchg")) test_xchg(skel); + if (test__start_subtest("uaf")) + test_uaf(skel); cleanup: arena_atomics__destroy(skel); diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c index 4407ea428e77..070c52c312e5 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_cookie.c @@ -451,7 +451,7 @@ static void pe_subtest(struct test_bpf_cookie *skel) attr.type = PERF_TYPE_SOFTWARE; attr.config = PERF_COUNT_SW_CPU_CLOCK; attr.freq = 1; - attr.sample_freq = 1000; + attr.sample_freq = 10000; pfd = syscall(__NR_perf_event_open, &attr, -1, 0, -1, PERF_FLAG_FD_CLOEXEC); if (!ASSERT_GE(pfd, 0, "perf_fd")) goto cleanup; diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c index b30ff6b3b81a..a4a1f93878d4 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_nf.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_nf.c @@ -104,6 +104,7 @@ static void test_bpf_nf_ct(int mode) ASSERT_EQ(skel->bss->test_einval_bpf_tuple, -EINVAL, "Test EINVAL for NULL bpf_tuple"); ASSERT_EQ(skel->bss->test_einval_reserved, -EINVAL, "Test EINVAL for reserved not set to 0"); + ASSERT_EQ(skel->bss->test_einval_reserved_new, -EINVAL, "Test EINVAL for reserved in new struct not set to 0"); ASSERT_EQ(skel->bss->test_einval_netns_id, -EINVAL, "Test EINVAL for netns_id < -1"); ASSERT_EQ(skel->bss->test_einval_len_opts, -EINVAL, "Test EINVAL for len__opts != NF_BPF_CT_OPTS_SZ"); ASSERT_EQ(skel->bss->test_eproto_l4proto, -EPROTO, "Test EPROTO for l4proto != TCP or UDP"); @@ -122,6 +123,12 @@ static void test_bpf_nf_ct(int mode) ASSERT_EQ(skel->bss->test_exist_lookup_mark, 43, "Test existing connection lookup ctmark"); ASSERT_EQ(skel->data->test_snat_addr, 0, "Test for source natting"); ASSERT_EQ(skel->data->test_dnat_addr, 0, "Test for destination natting"); + ASSERT_EQ(skel->data->test_ct_zone_id_alloc_entry, 0, "Test for alloc new entry in specified ct zone"); + ASSERT_EQ(skel->data->test_ct_zone_id_insert_entry, 0, "Test for insert new entry in specified ct zone"); + ASSERT_EQ(skel->data->test_ct_zone_id_succ_lookup, 0, "Test for successful lookup in specified ct_zone"); + ASSERT_EQ(skel->bss->test_ct_zone_dir_enoent_lookup, -ENOENT, "Test ENOENT for lookup with wrong ct zone dir"); + ASSERT_EQ(skel->bss->test_ct_zone_id_enoent_lookup, -ENOENT, "Test ENOENT for lookup in wrong ct zone"); + end: if (client_fd != -1) close(client_fd); diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c index 0aca02532794..63422f4f3896 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_tcp_ca.c @@ -23,6 +23,11 @@ static const unsigned int total_bytes = 10 * 1024 * 1024; static int expected_stg = 0xeB9F; +struct cb_opts { + const char *cc; + int map_fd; +}; + static int settcpca(int fd, const char *tcp_ca) { int err; @@ -34,55 +39,66 @@ static int settcpca(int fd, const char *tcp_ca) return 0; } -static void do_test(const char *tcp_ca, const struct bpf_map *sk_stg_map) +static bool start_test(char *addr_str, + const struct network_helper_opts *srv_opts, + const struct network_helper_opts *cli_opts, + int *srv_fd, int *cli_fd) { - int lfd = -1, fd = -1; - int err; - - lfd = start_server(AF_INET6, SOCK_STREAM, NULL, 0, 0); - if (!ASSERT_NEQ(lfd, -1, "socket")) - return; - - fd = socket(AF_INET6, SOCK_STREAM, 0); - if (!ASSERT_NEQ(fd, -1, "socket")) { - close(lfd); - return; - } - - if (settcpca(lfd, tcp_ca) || settcpca(fd, tcp_ca)) - goto done; - - if (sk_stg_map) { - err = bpf_map_update_elem(bpf_map__fd(sk_stg_map), &fd, - &expected_stg, BPF_NOEXIST); - if (!ASSERT_OK(err, "bpf_map_update_elem(sk_stg_map)")) - goto done; - } + *srv_fd = start_server_str(AF_INET6, SOCK_STREAM, addr_str, 0, srv_opts); + if (!ASSERT_NEQ(*srv_fd, -1, "start_server_str")) + goto err; /* connect to server */ - err = connect_fd_to_fd(fd, lfd, 0); - if (!ASSERT_NEQ(err, -1, "connect")) - goto done; + *cli_fd = connect_to_fd_opts(*srv_fd, SOCK_STREAM, cli_opts); + if (!ASSERT_NEQ(*cli_fd, -1, "connect_to_fd_opts")) + goto err; - if (sk_stg_map) { - int tmp_stg; + return true; - err = bpf_map_lookup_elem(bpf_map__fd(sk_stg_map), &fd, - &tmp_stg); - if (!ASSERT_ERR(err, "bpf_map_lookup_elem(sk_stg_map)") || - !ASSERT_EQ(errno, ENOENT, "bpf_map_lookup_elem(sk_stg_map)")) - goto done; +err: + if (*srv_fd != -1) { + close(*srv_fd); + *srv_fd = -1; } + if (*cli_fd != -1) { + close(*cli_fd); + *cli_fd = -1; + } + return false; +} + +static void do_test(const struct network_helper_opts *opts) +{ + int lfd = -1, fd = -1; + + if (!start_test(NULL, opts, opts, &lfd, &fd)) + goto done; ASSERT_OK(send_recv_data(lfd, fd, total_bytes), "send_recv_data"); done: - close(lfd); - close(fd); + if (lfd != -1) + close(lfd); + if (fd != -1) + close(fd); +} + +static int cc_cb(int fd, void *opts) +{ + struct cb_opts *cb_opts = (struct cb_opts *)opts; + + return settcpca(fd, cb_opts->cc); } static void test_cubic(void) { + struct cb_opts cb_opts = { + .cc = "bpf_cubic", + }; + struct network_helper_opts opts = { + .post_socket_cb = cc_cb, + .cb_opts = &cb_opts, + }; struct bpf_cubic *cubic_skel; struct bpf_link *link; @@ -96,7 +112,7 @@ static void test_cubic(void) return; } - do_test("bpf_cubic", NULL); + do_test(&opts); ASSERT_EQ(cubic_skel->bss->bpf_cubic_acked_called, 1, "pkts_acked called"); @@ -104,8 +120,37 @@ static void test_cubic(void) bpf_cubic__destroy(cubic_skel); } +static int stg_post_socket_cb(int fd, void *opts) +{ + struct cb_opts *cb_opts = (struct cb_opts *)opts; + int err; + + err = settcpca(fd, cb_opts->cc); + if (err) + return err; + + err = bpf_map_update_elem(cb_opts->map_fd, &fd, + &expected_stg, BPF_NOEXIST); + if (!ASSERT_OK(err, "bpf_map_update_elem(sk_stg_map)")) + return err; + + return 0; +} + static void test_dctcp(void) { + struct cb_opts cb_opts = { + .cc = "bpf_dctcp", + }; + struct network_helper_opts opts = { + .post_socket_cb = cc_cb, + .cb_opts = &cb_opts, + }; + struct network_helper_opts cli_opts = { + .post_socket_cb = stg_post_socket_cb, + .cb_opts = &cb_opts, + }; + int lfd = -1, fd = -1, tmp_stg, err; struct bpf_dctcp *dctcp_skel; struct bpf_link *link; @@ -119,11 +164,58 @@ static void test_dctcp(void) return; } - do_test("bpf_dctcp", dctcp_skel->maps.sk_stg_map); + cb_opts.map_fd = bpf_map__fd(dctcp_skel->maps.sk_stg_map); + if (!start_test(NULL, &opts, &cli_opts, &lfd, &fd)) + goto done; + + err = bpf_map_lookup_elem(cb_opts.map_fd, &fd, &tmp_stg); + if (!ASSERT_ERR(err, "bpf_map_lookup_elem(sk_stg_map)") || + !ASSERT_EQ(errno, ENOENT, "bpf_map_lookup_elem(sk_stg_map)")) + goto done; + + ASSERT_OK(send_recv_data(lfd, fd, total_bytes), "send_recv_data"); ASSERT_EQ(dctcp_skel->bss->stg_result, expected_stg, "stg_result"); +done: bpf_link__destroy(link); bpf_dctcp__destroy(dctcp_skel); + if (lfd != -1) + close(lfd); + if (fd != -1) + close(fd); +} + +static void test_dctcp_autoattach_map(void) +{ + struct cb_opts cb_opts = { + .cc = "bpf_dctcp", + }; + struct network_helper_opts opts = { + .post_socket_cb = cc_cb, + .cb_opts = &cb_opts, + }; + struct bpf_dctcp *dctcp_skel; + struct bpf_link *link; + + dctcp_skel = bpf_dctcp__open_and_load(); + if (!ASSERT_OK_PTR(dctcp_skel, "bpf_dctcp__open_and_load")) + return; + + bpf_map__set_autoattach(dctcp_skel->maps.dctcp, true); + bpf_map__set_autoattach(dctcp_skel->maps.dctcp_nouse, false); + + if (!ASSERT_OK(bpf_dctcp__attach(dctcp_skel), "bpf_dctcp__attach")) + goto destroy; + + /* struct_ops is auto-attached */ + link = dctcp_skel->links.dctcp; + if (!ASSERT_OK_PTR(link, "link")) + goto destroy; + + do_test(&opts); + +destroy: + bpf_dctcp__destroy(dctcp_skel); } static char *err_str; @@ -171,11 +263,22 @@ static void test_invalid_license(void) static void test_dctcp_fallback(void) { int err, lfd = -1, cli_fd = -1, srv_fd = -1; - struct network_helper_opts opts = { - .cc = "cubic", - }; struct bpf_dctcp *dctcp_skel; struct bpf_link *link = NULL; + struct cb_opts dctcp = { + .cc = "bpf_dctcp", + }; + struct network_helper_opts srv_opts = { + .post_socket_cb = cc_cb, + .cb_opts = &dctcp, + }; + struct cb_opts cubic = { + .cc = "cubic", + }; + struct network_helper_opts cli_opts = { + .post_socket_cb = cc_cb, + .cb_opts = &cubic, + }; char srv_cc[16]; socklen_t cc_len = sizeof(srv_cc); @@ -190,13 +293,7 @@ static void test_dctcp_fallback(void) if (!ASSERT_OK_PTR(link, "dctcp link")) goto done; - lfd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0); - if (!ASSERT_GE(lfd, 0, "lfd") || - !ASSERT_OK(settcpca(lfd, "bpf_dctcp"), "lfd=>bpf_dctcp")) - goto done; - - cli_fd = connect_to_fd_opts(lfd, &opts); - if (!ASSERT_GE(cli_fd, 0, "cli_fd")) + if (!start_test("::1", &srv_opts, &cli_opts, &lfd, &cli_fd)) goto done; srv_fd = accept(lfd, NULL, 0); @@ -297,6 +394,13 @@ static void test_unsupp_cong_op(void) static void test_update_ca(void) { + struct cb_opts cb_opts = { + .cc = "tcp_ca_update", + }; + struct network_helper_opts opts = { + .post_socket_cb = cc_cb, + .cb_opts = &cb_opts, + }; struct tcp_ca_update *skel; struct bpf_link *link; int saved_ca1_cnt; @@ -307,25 +411,34 @@ static void test_update_ca(void) return; link = bpf_map__attach_struct_ops(skel->maps.ca_update_1); - ASSERT_OK_PTR(link, "attach_struct_ops"); + if (!ASSERT_OK_PTR(link, "attach_struct_ops")) + goto out; - do_test("tcp_ca_update", NULL); + do_test(&opts); saved_ca1_cnt = skel->bss->ca1_cnt; ASSERT_GT(saved_ca1_cnt, 0, "ca1_ca1_cnt"); err = bpf_link__update_map(link, skel->maps.ca_update_2); ASSERT_OK(err, "update_map"); - do_test("tcp_ca_update", NULL); + do_test(&opts); ASSERT_EQ(skel->bss->ca1_cnt, saved_ca1_cnt, "ca2_ca1_cnt"); ASSERT_GT(skel->bss->ca2_cnt, 0, "ca2_ca2_cnt"); bpf_link__destroy(link); +out: tcp_ca_update__destroy(skel); } static void test_update_wrong(void) { + struct cb_opts cb_opts = { + .cc = "tcp_ca_update", + }; + struct network_helper_opts opts = { + .post_socket_cb = cc_cb, + .cb_opts = &cb_opts, + }; struct tcp_ca_update *skel; struct bpf_link *link; int saved_ca1_cnt; @@ -336,24 +449,33 @@ static void test_update_wrong(void) return; link = bpf_map__attach_struct_ops(skel->maps.ca_update_1); - ASSERT_OK_PTR(link, "attach_struct_ops"); + if (!ASSERT_OK_PTR(link, "attach_struct_ops")) + goto out; - do_test("tcp_ca_update", NULL); + do_test(&opts); saved_ca1_cnt = skel->bss->ca1_cnt; ASSERT_GT(saved_ca1_cnt, 0, "ca1_ca1_cnt"); err = bpf_link__update_map(link, skel->maps.ca_wrong); ASSERT_ERR(err, "update_map"); - do_test("tcp_ca_update", NULL); + do_test(&opts); ASSERT_GT(skel->bss->ca1_cnt, saved_ca1_cnt, "ca2_ca1_cnt"); bpf_link__destroy(link); +out: tcp_ca_update__destroy(skel); } static void test_mixed_links(void) { + struct cb_opts cb_opts = { + .cc = "tcp_ca_update", + }; + struct network_helper_opts opts = { + .post_socket_cb = cc_cb, + .cb_opts = &cb_opts, + }; struct tcp_ca_update *skel; struct bpf_link *link, *link_nl; int err; @@ -363,12 +485,13 @@ static void test_mixed_links(void) return; link_nl = bpf_map__attach_struct_ops(skel->maps.ca_no_link); - ASSERT_OK_PTR(link_nl, "attach_struct_ops_nl"); + if (!ASSERT_OK_PTR(link_nl, "attach_struct_ops_nl")) + goto out; link = bpf_map__attach_struct_ops(skel->maps.ca_update_1); ASSERT_OK_PTR(link, "attach_struct_ops"); - do_test("tcp_ca_update", NULL); + do_test(&opts); ASSERT_GT(skel->bss->ca1_cnt, 0, "ca1_ca1_cnt"); err = bpf_link__update_map(link, skel->maps.ca_no_link); @@ -376,6 +499,7 @@ static void test_mixed_links(void) bpf_link__destroy(link); bpf_link__destroy(link_nl); +out: tcp_ca_update__destroy(skel); } @@ -418,7 +542,8 @@ static void test_link_replace(void) bpf_link__destroy(link); link = bpf_map__attach_struct_ops(skel->maps.ca_update_2); - ASSERT_OK_PTR(link, "attach_struct_ops_2nd"); + if (!ASSERT_OK_PTR(link, "attach_struct_ops_2nd")) + goto out; /* BPF_F_REPLACE with a wrong old map Fd. It should fail! * @@ -441,6 +566,7 @@ static void test_link_replace(void) bpf_link__destroy(link); +out: tcp_ca_update__destroy(skel); } @@ -455,6 +581,13 @@ static void test_tcp_ca_kfunc(void) static void test_cc_cubic(void) { + struct cb_opts cb_opts = { + .cc = "bpf_cc_cubic", + }; + struct network_helper_opts opts = { + .post_socket_cb = cc_cb, + .cb_opts = &cb_opts, + }; struct bpf_cc_cubic *cc_cubic_skel; struct bpf_link *link; @@ -468,7 +601,7 @@ static void test_cc_cubic(void) return; } - do_test("bpf_cc_cubic", NULL); + do_test(&opts); bpf_link__destroy(link); bpf_cc_cubic__destroy(cc_cubic_skel); @@ -506,4 +639,6 @@ void test_bpf_tcp_ca(void) test_tcp_ca_kfunc(); if (test__start_subtest("cc_cubic")) test_cc_cubic(); + if (test__start_subtest("dctcp_autoattach_map")) + test_dctcp_autoattach_map(); } diff --git a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c index 4c6ada5b270b..73f669014b69 100644 --- a/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c +++ b/tools/testing/selftests/bpf/prog_tests/bpf_verif_scale.c @@ -45,12 +45,6 @@ err_out: return err; } -struct scale_test_def { - const char *file; - enum bpf_prog_type attach_type; - bool fails; -}; - static void scale_test(const char *file, enum bpf_prog_type attach_type, bool should_fail) diff --git a/tools/testing/selftests/bpf/prog_tests/btf_distill.c b/tools/testing/selftests/bpf/prog_tests/btf_distill.c new file mode 100644 index 000000000000..bfbe795823a2 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/btf_distill.c @@ -0,0 +1,552 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024, Oracle and/or its affiliates. */ + +#include +#include +#include "btf_helpers.h" + +/* Fabricate base, split BTF with references to base types needed; then create + * split BTF with distilled base BTF and ensure expectations are met: + * - only referenced base types from split BTF are present + * - struct/union/enum are represented as empty unless anonymous, when they + * are represented in full in split BTF + */ +static void test_distilled_base(void) +{ + struct btf *btf1 = NULL, *btf2 = NULL, *btf3 = NULL, *btf4 = NULL; + + btf1 = btf__new_empty(); + if (!ASSERT_OK_PTR(btf1, "empty_main_btf")) + return; + + btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */ + btf__add_ptr(btf1, 1); /* [2] ptr to int */ + btf__add_struct(btf1, "s1", 8); /* [3] struct s1 { */ + btf__add_field(btf1, "f1", 2, 0, 0); /* int *f1; */ + /* } */ + btf__add_struct(btf1, "", 12); /* [4] struct { */ + btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */ + btf__add_field(btf1, "f2", 3, 32, 0); /* struct s1 f2; */ + /* } */ + btf__add_int(btf1, "unsigned int", 4, 0); /* [5] unsigned int */ + btf__add_union(btf1, "u1", 12); /* [6] union u1 { */ + btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */ + btf__add_field(btf1, "f2", 2, 0, 0); /* int *f2; */ + /* } */ + btf__add_union(btf1, "", 4); /* [7] union { */ + btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */ + /* } */ + btf__add_enum(btf1, "e1", 4); /* [8] enum e1 { */ + btf__add_enum_value(btf1, "v1", 1); /* v1 = 1; */ + /* } */ + btf__add_enum(btf1, "", 4); /* [9] enum { */ + btf__add_enum_value(btf1, "av1", 2); /* av1 = 2; */ + /* } */ + btf__add_enum64(btf1, "e641", 8, true); /* [10] enum64 { */ + btf__add_enum64_value(btf1, "v1", 1024); /* v1 = 1024; */ + /* } */ + btf__add_enum64(btf1, "", 8, true); /* [11] enum64 { */ + btf__add_enum64_value(btf1, "v1", 1025); /* v1 = 1025; */ + /* } */ + btf__add_struct(btf1, "unneeded", 4); /* [12] struct unneeded { */ + btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */ + /* } */ + btf__add_struct(btf1, "embedded", 4); /* [13] struct embedded { */ + btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */ + /* } */ + btf__add_func_proto(btf1, 1); /* [14] int (*)(int *p1); */ + btf__add_func_param(btf1, "p1", 1); + + btf__add_array(btf1, 1, 1, 3); /* [15] int [3]; */ + + btf__add_struct(btf1, "from_proto", 4); /* [16] struct from_proto { */ + btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */ + /* } */ + btf__add_union(btf1, "u1", 4); /* [17] union u1 { */ + btf__add_field(btf1, "f1", 1, 0, 0); /* int f1; */ + /* } */ + VALIDATE_RAW_BTF( + btf1, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] PTR '(anon)' type_id=1", + "[3] STRUCT 's1' size=8 vlen=1\n" + "\t'f1' type_id=2 bits_offset=0", + "[4] STRUCT '(anon)' size=12 vlen=2\n" + "\t'f1' type_id=1 bits_offset=0\n" + "\t'f2' type_id=3 bits_offset=32", + "[5] INT 'unsigned int' size=4 bits_offset=0 nr_bits=32 encoding=(none)", + "[6] UNION 'u1' size=12 vlen=2\n" + "\t'f1' type_id=1 bits_offset=0\n" + "\t'f2' type_id=2 bits_offset=0", + "[7] UNION '(anon)' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0", + "[8] ENUM 'e1' encoding=UNSIGNED size=4 vlen=1\n" + "\t'v1' val=1", + "[9] ENUM '(anon)' encoding=UNSIGNED size=4 vlen=1\n" + "\t'av1' val=2", + "[10] ENUM64 'e641' encoding=SIGNED size=8 vlen=1\n" + "\t'v1' val=1024", + "[11] ENUM64 '(anon)' encoding=SIGNED size=8 vlen=1\n" + "\t'v1' val=1025", + "[12] STRUCT 'unneeded' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0", + "[13] STRUCT 'embedded' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0", + "[14] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n" + "\t'p1' type_id=1", + "[15] ARRAY '(anon)' type_id=1 index_type_id=1 nr_elems=3", + "[16] STRUCT 'from_proto' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0", + "[17] UNION 'u1' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0"); + + btf2 = btf__new_empty_split(btf1); + if (!ASSERT_OK_PTR(btf2, "empty_split_btf")) + goto cleanup; + + btf__add_ptr(btf2, 3); /* [18] ptr to struct s1 */ + /* add ptr to struct anon */ + btf__add_ptr(btf2, 4); /* [19] ptr to struct (anon) */ + btf__add_const(btf2, 6); /* [20] const union u1 */ + btf__add_restrict(btf2, 7); /* [21] restrict union (anon) */ + btf__add_volatile(btf2, 8); /* [22] volatile enum e1 */ + btf__add_typedef(btf2, "et", 9); /* [23] typedef enum (anon) */ + btf__add_const(btf2, 10); /* [24] const enum64 e641 */ + btf__add_ptr(btf2, 11); /* [25] restrict enum64 (anon) */ + btf__add_struct(btf2, "with_embedded", 4); /* [26] struct with_embedded { */ + btf__add_field(btf2, "f1", 13, 0, 0); /* struct embedded f1; */ + /* } */ + btf__add_func(btf2, "fn", BTF_FUNC_STATIC, 14); /* [27] int fn(int p1); */ + btf__add_typedef(btf2, "arraytype", 15); /* [28] typedef int[3] foo; */ + btf__add_func_proto(btf2, 1); /* [29] int (*)(struct from proto p1); */ + btf__add_func_param(btf2, "p1", 16); + + VALIDATE_RAW_BTF( + btf2, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] PTR '(anon)' type_id=1", + "[3] STRUCT 's1' size=8 vlen=1\n" + "\t'f1' type_id=2 bits_offset=0", + "[4] STRUCT '(anon)' size=12 vlen=2\n" + "\t'f1' type_id=1 bits_offset=0\n" + "\t'f2' type_id=3 bits_offset=32", + "[5] INT 'unsigned int' size=4 bits_offset=0 nr_bits=32 encoding=(none)", + "[6] UNION 'u1' size=12 vlen=2\n" + "\t'f1' type_id=1 bits_offset=0\n" + "\t'f2' type_id=2 bits_offset=0", + "[7] UNION '(anon)' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0", + "[8] ENUM 'e1' encoding=UNSIGNED size=4 vlen=1\n" + "\t'v1' val=1", + "[9] ENUM '(anon)' encoding=UNSIGNED size=4 vlen=1\n" + "\t'av1' val=2", + "[10] ENUM64 'e641' encoding=SIGNED size=8 vlen=1\n" + "\t'v1' val=1024", + "[11] ENUM64 '(anon)' encoding=SIGNED size=8 vlen=1\n" + "\t'v1' val=1025", + "[12] STRUCT 'unneeded' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0", + "[13] STRUCT 'embedded' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0", + "[14] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n" + "\t'p1' type_id=1", + "[15] ARRAY '(anon)' type_id=1 index_type_id=1 nr_elems=3", + "[16] STRUCT 'from_proto' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0", + "[17] UNION 'u1' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0", + "[18] PTR '(anon)' type_id=3", + "[19] PTR '(anon)' type_id=4", + "[20] CONST '(anon)' type_id=6", + "[21] RESTRICT '(anon)' type_id=7", + "[22] VOLATILE '(anon)' type_id=8", + "[23] TYPEDEF 'et' type_id=9", + "[24] CONST '(anon)' type_id=10", + "[25] PTR '(anon)' type_id=11", + "[26] STRUCT 'with_embedded' size=4 vlen=1\n" + "\t'f1' type_id=13 bits_offset=0", + "[27] FUNC 'fn' type_id=14 linkage=static", + "[28] TYPEDEF 'arraytype' type_id=15", + "[29] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n" + "\t'p1' type_id=16"); + + if (!ASSERT_EQ(0, btf__distill_base(btf2, &btf3, &btf4), + "distilled_base") || + !ASSERT_OK_PTR(btf3, "distilled_base") || + !ASSERT_OK_PTR(btf4, "distilled_split") || + !ASSERT_EQ(8, btf__type_cnt(btf3), "distilled_base_type_cnt")) + goto cleanup; + + VALIDATE_RAW_BTF( + btf4, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] STRUCT 's1' size=8 vlen=0", + "[3] UNION 'u1' size=12 vlen=0", + "[4] ENUM 'e1' encoding=UNSIGNED size=4 vlen=0", + "[5] ENUM 'e641' encoding=UNSIGNED size=8 vlen=0", + "[6] STRUCT 'embedded' size=4 vlen=0", + "[7] STRUCT 'from_proto' size=4 vlen=0", + /* split BTF; these types should match split BTF above from 17-28, with + * updated type id references + */ + "[8] PTR '(anon)' type_id=2", + "[9] PTR '(anon)' type_id=20", + "[10] CONST '(anon)' type_id=3", + "[11] RESTRICT '(anon)' type_id=21", + "[12] VOLATILE '(anon)' type_id=4", + "[13] TYPEDEF 'et' type_id=22", + "[14] CONST '(anon)' type_id=5", + "[15] PTR '(anon)' type_id=23", + "[16] STRUCT 'with_embedded' size=4 vlen=1\n" + "\t'f1' type_id=6 bits_offset=0", + "[17] FUNC 'fn' type_id=24 linkage=static", + "[18] TYPEDEF 'arraytype' type_id=25", + "[19] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n" + "\t'p1' type_id=7", + /* split BTF types added from original base BTF below */ + "[20] STRUCT '(anon)' size=12 vlen=2\n" + "\t'f1' type_id=1 bits_offset=0\n" + "\t'f2' type_id=2 bits_offset=32", + "[21] UNION '(anon)' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0", + "[22] ENUM '(anon)' encoding=UNSIGNED size=4 vlen=1\n" + "\t'av1' val=2", + "[23] ENUM64 '(anon)' encoding=SIGNED size=8 vlen=1\n" + "\t'v1' val=1025", + "[24] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n" + "\t'p1' type_id=1", + "[25] ARRAY '(anon)' type_id=1 index_type_id=1 nr_elems=3"); + + if (!ASSERT_EQ(btf__relocate(btf4, btf1), 0, "relocate_split")) + goto cleanup; + + VALIDATE_RAW_BTF( + btf4, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] PTR '(anon)' type_id=1", + "[3] STRUCT 's1' size=8 vlen=1\n" + "\t'f1' type_id=2 bits_offset=0", + "[4] STRUCT '(anon)' size=12 vlen=2\n" + "\t'f1' type_id=1 bits_offset=0\n" + "\t'f2' type_id=3 bits_offset=32", + "[5] INT 'unsigned int' size=4 bits_offset=0 nr_bits=32 encoding=(none)", + "[6] UNION 'u1' size=12 vlen=2\n" + "\t'f1' type_id=1 bits_offset=0\n" + "\t'f2' type_id=2 bits_offset=0", + "[7] UNION '(anon)' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0", + "[8] ENUM 'e1' encoding=UNSIGNED size=4 vlen=1\n" + "\t'v1' val=1", + "[9] ENUM '(anon)' encoding=UNSIGNED size=4 vlen=1\n" + "\t'av1' val=2", + "[10] ENUM64 'e641' encoding=SIGNED size=8 vlen=1\n" + "\t'v1' val=1024", + "[11] ENUM64 '(anon)' encoding=SIGNED size=8 vlen=1\n" + "\t'v1' val=1025", + "[12] STRUCT 'unneeded' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0", + "[13] STRUCT 'embedded' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0", + "[14] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n" + "\t'p1' type_id=1", + "[15] ARRAY '(anon)' type_id=1 index_type_id=1 nr_elems=3", + "[16] STRUCT 'from_proto' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0", + "[17] UNION 'u1' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0", + "[18] PTR '(anon)' type_id=3", + "[19] PTR '(anon)' type_id=30", + "[20] CONST '(anon)' type_id=6", + "[21] RESTRICT '(anon)' type_id=31", + "[22] VOLATILE '(anon)' type_id=8", + "[23] TYPEDEF 'et' type_id=32", + "[24] CONST '(anon)' type_id=10", + "[25] PTR '(anon)' type_id=33", + "[26] STRUCT 'with_embedded' size=4 vlen=1\n" + "\t'f1' type_id=13 bits_offset=0", + "[27] FUNC 'fn' type_id=34 linkage=static", + "[28] TYPEDEF 'arraytype' type_id=35", + "[29] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n" + "\t'p1' type_id=16", + /* below here are (duplicate) anon base types added by distill + * process to split BTF. + */ + "[30] STRUCT '(anon)' size=12 vlen=2\n" + "\t'f1' type_id=1 bits_offset=0\n" + "\t'f2' type_id=3 bits_offset=32", + "[31] UNION '(anon)' size=4 vlen=1\n" + "\t'f1' type_id=1 bits_offset=0", + "[32] ENUM '(anon)' encoding=UNSIGNED size=4 vlen=1\n" + "\t'av1' val=2", + "[33] ENUM64 '(anon)' encoding=SIGNED size=8 vlen=1\n" + "\t'v1' val=1025", + "[34] FUNC_PROTO '(anon)' ret_type_id=1 vlen=1\n" + "\t'p1' type_id=1", + "[35] ARRAY '(anon)' type_id=1 index_type_id=1 nr_elems=3"); + +cleanup: + btf__free(btf4); + btf__free(btf3); + btf__free(btf2); + btf__free(btf1); +} + +/* ensure we can cope with multiple types with the same name in + * distilled base BTF. In this case because sizes are different, + * we can still disambiguate them. + */ +static void test_distilled_base_multi(void) +{ + struct btf *btf1 = NULL, *btf2 = NULL, *btf3 = NULL, *btf4 = NULL; + + btf1 = btf__new_empty(); + if (!ASSERT_OK_PTR(btf1, "empty_main_btf")) + return; + btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */ + btf__add_int(btf1, "int", 8, BTF_INT_SIGNED); /* [2] int */ + VALIDATE_RAW_BTF( + btf1, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] INT 'int' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED"); + btf2 = btf__new_empty_split(btf1); + if (!ASSERT_OK_PTR(btf2, "empty_split_btf")) + goto cleanup; + btf__add_ptr(btf2, 1); + btf__add_const(btf2, 2); + VALIDATE_RAW_BTF( + btf2, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] INT 'int' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED", + "[3] PTR '(anon)' type_id=1", + "[4] CONST '(anon)' type_id=2"); + if (!ASSERT_EQ(0, btf__distill_base(btf2, &btf3, &btf4), + "distilled_base") || + !ASSERT_OK_PTR(btf3, "distilled_base") || + !ASSERT_OK_PTR(btf4, "distilled_split") || + !ASSERT_EQ(3, btf__type_cnt(btf3), "distilled_base_type_cnt")) + goto cleanup; + VALIDATE_RAW_BTF( + btf3, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] INT 'int' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED"); + if (!ASSERT_EQ(btf__relocate(btf4, btf1), 0, "relocate_split")) + goto cleanup; + + VALIDATE_RAW_BTF( + btf4, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] INT 'int' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED", + "[3] PTR '(anon)' type_id=1", + "[4] CONST '(anon)' type_id=2"); + +cleanup: + btf__free(btf4); + btf__free(btf3); + btf__free(btf2); + btf__free(btf1); +} + +/* If a needed type is not present in the base BTF we wish to relocate + * with, btf__relocate() should error our. + */ +static void test_distilled_base_missing_err(void) +{ + struct btf *btf1 = NULL, *btf2 = NULL, *btf3 = NULL, *btf4 = NULL, *btf5 = NULL; + + btf1 = btf__new_empty(); + if (!ASSERT_OK_PTR(btf1, "empty_main_btf")) + return; + btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */ + btf__add_int(btf1, "int", 8, BTF_INT_SIGNED); /* [2] int */ + VALIDATE_RAW_BTF( + btf1, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] INT 'int' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED"); + btf2 = btf__new_empty_split(btf1); + if (!ASSERT_OK_PTR(btf2, "empty_split_btf")) + goto cleanup; + btf__add_ptr(btf2, 1); + btf__add_const(btf2, 2); + VALIDATE_RAW_BTF( + btf2, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] INT 'int' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED", + "[3] PTR '(anon)' type_id=1", + "[4] CONST '(anon)' type_id=2"); + if (!ASSERT_EQ(0, btf__distill_base(btf2, &btf3, &btf4), + "distilled_base") || + !ASSERT_OK_PTR(btf3, "distilled_base") || + !ASSERT_OK_PTR(btf4, "distilled_split") || + !ASSERT_EQ(3, btf__type_cnt(btf3), "distilled_base_type_cnt")) + goto cleanup; + VALIDATE_RAW_BTF( + btf3, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] INT 'int' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED"); + btf5 = btf__new_empty(); + if (!ASSERT_OK_PTR(btf5, "empty_reloc_btf")) + return; + btf__add_int(btf5, "int", 4, BTF_INT_SIGNED); /* [1] int */ + VALIDATE_RAW_BTF( + btf5, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED"); + ASSERT_EQ(btf__relocate(btf4, btf5), -EINVAL, "relocate_split"); + +cleanup: + btf__free(btf5); + btf__free(btf4); + btf__free(btf3); + btf__free(btf2); + btf__free(btf1); +} + +/* With 2 types of same size in distilled base BTF, relocation should + * fail as we have no means to choose between them. + */ +static void test_distilled_base_multi_err(void) +{ + struct btf *btf1 = NULL, *btf2 = NULL, *btf3 = NULL, *btf4 = NULL; + + btf1 = btf__new_empty(); + if (!ASSERT_OK_PTR(btf1, "empty_main_btf")) + return; + btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */ + btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [2] int */ + VALIDATE_RAW_BTF( + btf1, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED"); + btf2 = btf__new_empty_split(btf1); + if (!ASSERT_OK_PTR(btf2, "empty_split_btf")) + goto cleanup; + btf__add_ptr(btf2, 1); + btf__add_const(btf2, 2); + VALIDATE_RAW_BTF( + btf2, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[3] PTR '(anon)' type_id=1", + "[4] CONST '(anon)' type_id=2"); + if (!ASSERT_EQ(0, btf__distill_base(btf2, &btf3, &btf4), + "distilled_base") || + !ASSERT_OK_PTR(btf3, "distilled_base") || + !ASSERT_OK_PTR(btf4, "distilled_split") || + !ASSERT_EQ(3, btf__type_cnt(btf3), "distilled_base_type_cnt")) + goto cleanup; + VALIDATE_RAW_BTF( + btf3, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED"); + ASSERT_EQ(btf__relocate(btf4, btf1), -EINVAL, "relocate_split"); +cleanup: + btf__free(btf4); + btf__free(btf3); + btf__free(btf2); + btf__free(btf1); +} + +/* With 2 types of same size in base BTF, relocation should + * fail as we have no means to choose between them. + */ +static void test_distilled_base_multi_err2(void) +{ + struct btf *btf1 = NULL, *btf2 = NULL, *btf3 = NULL, *btf4 = NULL, *btf5 = NULL; + + btf1 = btf__new_empty(); + if (!ASSERT_OK_PTR(btf1, "empty_main_btf")) + return; + btf__add_int(btf1, "int", 4, BTF_INT_SIGNED); /* [1] int */ + VALIDATE_RAW_BTF( + btf1, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED"); + btf2 = btf__new_empty_split(btf1); + if (!ASSERT_OK_PTR(btf2, "empty_split_btf")) + goto cleanup; + btf__add_ptr(btf2, 1); + VALIDATE_RAW_BTF( + btf2, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] PTR '(anon)' type_id=1"); + if (!ASSERT_EQ(0, btf__distill_base(btf2, &btf3, &btf4), + "distilled_base") || + !ASSERT_OK_PTR(btf3, "distilled_base") || + !ASSERT_OK_PTR(btf4, "distilled_split") || + !ASSERT_EQ(2, btf__type_cnt(btf3), "distilled_base_type_cnt")) + goto cleanup; + VALIDATE_RAW_BTF( + btf3, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED"); + btf5 = btf__new_empty(); + if (!ASSERT_OK_PTR(btf5, "empty_reloc_btf")) + return; + btf__add_int(btf5, "int", 4, BTF_INT_SIGNED); /* [1] int */ + btf__add_int(btf5, "int", 4, BTF_INT_SIGNED); /* [2] int */ + VALIDATE_RAW_BTF( + btf5, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED"); + ASSERT_EQ(btf__relocate(btf4, btf5), -EINVAL, "relocate_split"); +cleanup: + btf__free(btf5); + btf__free(btf4); + btf__free(btf3); + btf__free(btf2); + btf__free(btf1); +} + +/* create split reference BTF from vmlinux + split BTF with a few type references; + * ensure the resultant split reference BTF is as expected, containing only types + * needed to disambiguate references from split BTF. + */ +static void test_distilled_base_vmlinux(void) +{ + struct btf *split_btf = NULL, *vmlinux_btf = btf__load_vmlinux_btf(); + struct btf *split_dist = NULL, *base_dist = NULL; + __s32 int_id, myint_id; + + if (!ASSERT_OK_PTR(vmlinux_btf, "load_vmlinux")) + return; + int_id = btf__find_by_name_kind(vmlinux_btf, "int", BTF_KIND_INT); + if (!ASSERT_GT(int_id, 0, "find_int")) + goto cleanup; + split_btf = btf__new_empty_split(vmlinux_btf); + if (!ASSERT_OK_PTR(split_btf, "new_split")) + goto cleanup; + myint_id = btf__add_typedef(split_btf, "myint", int_id); + btf__add_ptr(split_btf, myint_id); + + if (!ASSERT_EQ(btf__distill_base(split_btf, &base_dist, &split_dist), 0, + "distill_vmlinux_base")) + goto cleanup; + + if (!ASSERT_OK_PTR(split_dist, "split_distilled") || + !ASSERT_OK_PTR(base_dist, "base_dist")) + goto cleanup; + VALIDATE_RAW_BTF( + split_dist, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] TYPEDEF 'myint' type_id=1", + "[3] PTR '(anon)' type_id=2"); + +cleanup: + btf__free(split_dist); + btf__free(base_dist); + btf__free(split_btf); + btf__free(vmlinux_btf); +} + +void test_btf_distill(void) +{ + if (test__start_subtest("distilled_base")) + test_distilled_base(); + if (test__start_subtest("distilled_base_multi")) + test_distilled_base_multi(); + if (test__start_subtest("distilled_base_missing_err")) + test_distilled_base_missing_err(); + if (test__start_subtest("distilled_base_multi_err")) + test_distilled_base_multi_err(); + if (test__start_subtest("distilled_base_multi_err2")) + test_distilled_base_multi_err2(); + if (test__start_subtest("distilled_base_vmlinux")) + test_distilled_base_vmlinux(); +} diff --git a/tools/testing/selftests/bpf/prog_tests/btf_field_iter.c b/tools/testing/selftests/bpf/prog_tests/btf_field_iter.c new file mode 100644 index 000000000000..32159d3eb281 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/btf_field_iter.c @@ -0,0 +1,161 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024, Oracle and/or its affiliates. */ + +#include +#include +#include "btf_helpers.h" +#include "bpf/libbpf_internal.h" + +struct field_data { + __u32 ids[5]; + const char *strs[5]; +} fields[] = { + { .ids = {}, .strs = {} }, + { .ids = {}, .strs = { "int" } }, + { .ids = {}, .strs = { "int64" } }, + { .ids = { 1 }, .strs = { "" } }, + { .ids = { 2, 1 }, .strs = { "" } }, + { .ids = { 3, 1 }, .strs = { "s1", "f1", "f2" } }, + { .ids = { 1, 5 }, .strs = { "u1", "f1", "f2" } }, + { .ids = {}, .strs = { "e1", "v1", "v2" } }, + { .ids = {}, .strs = { "fw1" } }, + { .ids = { 1 }, .strs = { "t" } }, + { .ids = { 2 }, .strs = { "" } }, + { .ids = { 1 }, .strs = { "" } }, + { .ids = { 3 }, .strs = { "" } }, + { .ids = { 1, 1, 3 }, .strs = { "", "p1", "p2" } }, + { .ids = { 13 }, .strs = { "func" } }, + { .ids = { 1 }, .strs = { "var1" } }, + { .ids = { 3 }, .strs = { "var2" } }, + { .ids = {}, .strs = { "float" } }, + { .ids = { 11 }, .strs = { "decltag" } }, + { .ids = { 6 }, .strs = { "typetag" } }, + { .ids = {}, .strs = { "e64", "eval1", "eval2", "eval3" } }, + { .ids = { 15, 16 }, .strs = { "datasec1" } } + +}; + +/* Fabricate BTF with various types and check BTF field iteration finds types, + * strings expected. + */ +void test_btf_field_iter(void) +{ + struct btf *btf = NULL; + int id; + + btf = btf__new_empty(); + if (!ASSERT_OK_PTR(btf, "empty_btf")) + return; + + btf__add_int(btf, "int", 4, BTF_INT_SIGNED); /* [1] int */ + btf__add_int(btf, "int64", 8, BTF_INT_SIGNED); /* [2] int64 */ + btf__add_ptr(btf, 1); /* [3] int * */ + btf__add_array(btf, 1, 2, 3); /* [4] int64[3] */ + btf__add_struct(btf, "s1", 12); /* [5] struct s1 { */ + btf__add_field(btf, "f1", 3, 0, 0); /* int *f1; */ + btf__add_field(btf, "f2", 1, 0, 0); /* int f2; */ + /* } */ + btf__add_union(btf, "u1", 12); /* [6] union u1 { */ + btf__add_field(btf, "f1", 1, 0, 0); /* int f1; */ + btf__add_field(btf, "f2", 5, 0, 0); /* struct s1 f2; */ + /* } */ + btf__add_enum(btf, "e1", 4); /* [7] enum e1 { */ + btf__add_enum_value(btf, "v1", 1); /* v1 = 1; */ + btf__add_enum_value(btf, "v2", 2); /* v2 = 2; */ + /* } */ + + btf__add_fwd(btf, "fw1", BTF_FWD_STRUCT); /* [8] struct fw1; */ + btf__add_typedef(btf, "t", 1); /* [9] typedef int t; */ + btf__add_volatile(btf, 2); /* [10] volatile int64; */ + btf__add_const(btf, 1); /* [11] const int; */ + btf__add_restrict(btf, 3); /* [12] restrict int *; */ + btf__add_func_proto(btf, 1); /* [13] int (*)(int p1, int *p2); */ + btf__add_func_param(btf, "p1", 1); + btf__add_func_param(btf, "p2", 3); + + btf__add_func(btf, "func", BTF_FUNC_GLOBAL, 13);/* [14] int func(int p1, int *p2); */ + btf__add_var(btf, "var1", BTF_VAR_STATIC, 1); /* [15] static int var1; */ + btf__add_var(btf, "var2", BTF_VAR_STATIC, 3); /* [16] static int *var2; */ + btf__add_float(btf, "float", 4); /* [17] float; */ + btf__add_decl_tag(btf, "decltag", 11, -1); /* [18] decltag const int; */ + btf__add_type_tag(btf, "typetag", 6); /* [19] typetag union u1; */ + btf__add_enum64(btf, "e64", 8, true); /* [20] enum { */ + btf__add_enum64_value(btf, "eval1", 1000); /* eval1 = 1000, */ + btf__add_enum64_value(btf, "eval2", 2000); /* eval2 = 2000, */ + btf__add_enum64_value(btf, "eval3", 3000); /* eval3 = 3000 */ + /* } */ + btf__add_datasec(btf, "datasec1", 12); /* [21] datasec datasec1 */ + btf__add_datasec_var_info(btf, 15, 0, 4); + btf__add_datasec_var_info(btf, 16, 4, 8); + + VALIDATE_RAW_BTF( + btf, + "[1] INT 'int' size=4 bits_offset=0 nr_bits=32 encoding=SIGNED", + "[2] INT 'int64' size=8 bits_offset=0 nr_bits=64 encoding=SIGNED", + "[3] PTR '(anon)' type_id=1", + "[4] ARRAY '(anon)' type_id=2 index_type_id=1 nr_elems=3", + "[5] STRUCT 's1' size=12 vlen=2\n" + "\t'f1' type_id=3 bits_offset=0\n" + "\t'f2' type_id=1 bits_offset=0", + "[6] UNION 'u1' size=12 vlen=2\n" + "\t'f1' type_id=1 bits_offset=0\n" + "\t'f2' type_id=5 bits_offset=0", + "[7] ENUM 'e1' encoding=UNSIGNED size=4 vlen=2\n" + "\t'v1' val=1\n" + "\t'v2' val=2", + "[8] FWD 'fw1' fwd_kind=struct", + "[9] TYPEDEF 't' type_id=1", + "[10] VOLATILE '(anon)' type_id=2", + "[11] CONST '(anon)' type_id=1", + "[12] RESTRICT '(anon)' type_id=3", + "[13] FUNC_PROTO '(anon)' ret_type_id=1 vlen=2\n" + "\t'p1' type_id=1\n" + "\t'p2' type_id=3", + "[14] FUNC 'func' type_id=13 linkage=global", + "[15] VAR 'var1' type_id=1, linkage=static", + "[16] VAR 'var2' type_id=3, linkage=static", + "[17] FLOAT 'float' size=4", + "[18] DECL_TAG 'decltag' type_id=11 component_idx=-1", + "[19] TYPE_TAG 'typetag' type_id=6", + "[20] ENUM64 'e64' encoding=SIGNED size=8 vlen=3\n" + "\t'eval1' val=1000\n" + "\t'eval2' val=2000\n" + "\t'eval3' val=3000", + "[21] DATASEC 'datasec1' size=12 vlen=2\n" + "\ttype_id=15 offset=0 size=4\n" + "\ttype_id=16 offset=4 size=8"); + + for (id = 1; id < btf__type_cnt(btf); id++) { + struct btf_type *t = btf_type_by_id(btf, id); + struct btf_field_iter it_strs, it_ids; + int str_idx = 0, id_idx = 0; + __u32 *next_str, *next_id; + + if (!ASSERT_OK_PTR(t, "btf_type_by_id")) + break; + if (!ASSERT_OK(btf_field_iter_init(&it_strs, t, BTF_FIELD_ITER_STRS), + "iter_init_strs")) + break; + if (!ASSERT_OK(btf_field_iter_init(&it_ids, t, BTF_FIELD_ITER_IDS), + "iter_init_ids")) + break; + while ((next_str = btf_field_iter_next(&it_strs))) { + const char *str = btf__str_by_offset(btf, *next_str); + + if (!ASSERT_OK(strcmp(fields[id].strs[str_idx], str), "field_str_match")) + break; + str_idx++; + } + /* ensure no more strings are expected */ + ASSERT_EQ(fields[id].strs[str_idx], NULL, "field_str_cnt"); + + while ((next_id = btf_field_iter_next(&it_ids))) { + if (!ASSERT_EQ(*next_id, fields[id].ids[id_idx], "field_id_match")) + break; + id_idx++; + } + /* ensure no more ids are expected */ + ASSERT_EQ(fields[id].ids[id_idx], 0, "field_id_cnt"); + } + btf__free(btf); +} diff --git a/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c b/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c index addf720428f7..9709c8db7275 100644 --- a/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c +++ b/tools/testing/selftests/bpf/prog_tests/cgroup_v1v2.c @@ -32,7 +32,7 @@ static int run_test(int cgroup_fd, int server_fd, bool classid) goto out; } - fd = connect_to_fd_opts(server_fd, &opts); + fd = connect_to_fd_opts(server_fd, SOCK_STREAM, &opts); if (fd < 0) err = -1; else @@ -52,7 +52,7 @@ void test_cgroup_v1v2(void) server_fd = start_server(AF_INET, SOCK_STREAM, NULL, port, 0); if (!ASSERT_GE(server_fd, 0, "server_fd")) return; - client_fd = connect_to_fd_opts(server_fd, &opts); + client_fd = connect_to_fd_opts(server_fd, SOCK_STREAM, &opts); if (!ASSERT_GE(client_fd, 0, "client_fd")) { close(server_fd); return; diff --git a/tools/testing/selftests/bpf/prog_tests/cpumask.c b/tools/testing/selftests/bpf/prog_tests/cpumask.c index ecf89df78109..2570bd4b0cb2 100644 --- a/tools/testing/selftests/bpf/prog_tests/cpumask.c +++ b/tools/testing/selftests/bpf/prog_tests/cpumask.c @@ -18,6 +18,11 @@ static const char * const cpumask_success_testcases[] = { "test_insert_leave", "test_insert_remove_release", "test_global_mask_rcu", + "test_global_mask_array_one_rcu", + "test_global_mask_array_rcu", + "test_global_mask_array_l2_rcu", + "test_global_mask_nested_rcu", + "test_global_mask_nested_deep_rcu", "test_cpumask_weight", }; diff --git a/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c b/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c index 3b7c57fe55a5..08b6391f2f56 100644 --- a/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c +++ b/tools/testing/selftests/bpf/prog_tests/ctx_rewrite.c @@ -69,15 +69,17 @@ static struct test_case test_cases[] = { { N(SCHED_CLS, struct __sk_buff, tstamp), .read = "r11 = *(u8 *)($ctx + sk_buff::__mono_tc_offset);" - "w11 &= 3;" - "if w11 != 0x3 goto pc+2;" + "if w11 & 0x4 goto pc+1;" + "goto pc+4;" + "if w11 & 0x3 goto pc+1;" + "goto pc+2;" "$dst = 0;" "goto pc+1;" "$dst = *(u64 *)($ctx + sk_buff::tstamp);", .write = "r11 = *(u8 *)($ctx + sk_buff::__mono_tc_offset);" - "if w11 & 0x2 goto pc+1;" + "if w11 & 0x4 goto pc+1;" "goto pc+2;" - "w11 &= -2;" + "w11 &= -4;" "*(u8 *)($ctx + sk_buff::__mono_tc_offset) = r11;" "*(u64 *)($ctx + sk_buff::tstamp) = $src;", }, diff --git a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c index 596536def43d..49b1ffc9af1f 100644 --- a/tools/testing/selftests/bpf/prog_tests/fexit_stress.c +++ b/tools/testing/selftests/bpf/prog_tests/fexit_stress.c @@ -50,9 +50,9 @@ void serial_test_fexit_stress(void) out: for (i = 0; i < bpf_max_tramp_links; i++) { - if (link_fd[i]) + if (link_fd[i] > 0) close(link_fd[i]); - if (fexit_fd[i]) + if (fexit_fd[i] > 0) close(fexit_fd[i]); } free(fd); diff --git a/tools/testing/selftests/bpf/prog_tests/find_vma.c b/tools/testing/selftests/bpf/prog_tests/find_vma.c index 5165b38f0e59..f7619e0ade10 100644 --- a/tools/testing/selftests/bpf/prog_tests/find_vma.c +++ b/tools/testing/selftests/bpf/prog_tests/find_vma.c @@ -29,8 +29,8 @@ static int open_pe(void) /* create perf event */ attr.size = sizeof(attr); - attr.type = PERF_TYPE_HARDWARE; - attr.config = PERF_COUNT_HW_CPU_CYCLES; + attr.type = PERF_TYPE_SOFTWARE; + attr.config = PERF_COUNT_SW_CPU_CLOCK; attr.freq = 1; attr.sample_freq = 1000; pfd = syscall(__NR_perf_event_open, &attr, 0, -1, -1, PERF_FLAG_FD_CLOEXEC); diff --git a/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c b/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c index 284764e7179f..4ddb8a5fece8 100644 --- a/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c +++ b/tools/testing/selftests/bpf/prog_tests/ip_check_defrag.c @@ -158,15 +158,13 @@ static int send_frags6(int client) void test_bpf_ip_check_defrag_ok(bool ipv6) { + int family = ipv6 ? AF_INET6 : AF_INET; struct network_helper_opts rx_opts = { .timeout_ms = 1000, - .noconnect = true, }; struct network_helper_opts tx_ops = { .timeout_ms = 1000, - .type = SOCK_RAW, .proto = IPPROTO_RAW, - .noconnect = true, }; struct sockaddr_storage caddr; struct ip_check_defrag *skel; @@ -192,7 +190,7 @@ void test_bpf_ip_check_defrag_ok(bool ipv6) nstoken = open_netns(NS1); if (!ASSERT_OK_PTR(nstoken, "setns ns1")) goto out; - srv_fd = start_server(ipv6 ? AF_INET6 : AF_INET, SOCK_DGRAM, NULL, SERVER_PORT, 0); + srv_fd = start_server(family, SOCK_DGRAM, NULL, SERVER_PORT, 0); close_netns(nstoken); if (!ASSERT_GE(srv_fd, 0, "start_server")) goto out; @@ -201,18 +199,18 @@ void test_bpf_ip_check_defrag_ok(bool ipv6) nstoken = open_netns(NS0); if (!ASSERT_OK_PTR(nstoken, "setns ns0")) goto out; - client_tx_fd = connect_to_fd_opts(srv_fd, &tx_ops); + client_tx_fd = client_socket(family, SOCK_RAW, &tx_ops); close_netns(nstoken); - if (!ASSERT_GE(client_tx_fd, 0, "connect_to_fd_opts")) + if (!ASSERT_GE(client_tx_fd, 0, "client_socket")) goto out; /* Open rx socket in ns0 */ nstoken = open_netns(NS0); if (!ASSERT_OK_PTR(nstoken, "setns ns0")) goto out; - client_rx_fd = connect_to_fd_opts(srv_fd, &rx_opts); + client_rx_fd = client_socket(family, SOCK_DGRAM, &rx_opts); close_netns(nstoken); - if (!ASSERT_GE(client_rx_fd, 0, "connect_to_fd_opts")) + if (!ASSERT_GE(client_rx_fd, 0, "client_socket")) goto out; /* Bind rx socket to a premeditated port */ diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c index 2eb71559713c..5b743212292f 100644 --- a/tools/testing/selftests/bpf/prog_tests/kfunc_call.c +++ b/tools/testing/selftests/bpf/prog_tests/kfunc_call.c @@ -78,6 +78,7 @@ static struct kfunc_test_params kfunc_tests[] = { SYSCALL_TEST(kfunc_syscall_test, 0), SYSCALL_NULL_CTX_TEST(kfunc_syscall_test_null, 0), TC_TEST(kfunc_call_test_static_unused_arg, 0), + TC_TEST(kfunc_call_ctx, 0), }; struct syscall_test_args { diff --git a/tools/testing/selftests/bpf/prog_tests/kfunc_param_nullable.c b/tools/testing/selftests/bpf/prog_tests/kfunc_param_nullable.c new file mode 100644 index 000000000000..c8f4dcaac7c7 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/kfunc_param_nullable.c @@ -0,0 +1,11 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Copyright (c) 2024 Meta Platforms, Inc */ + +#include +#include "test_kfunc_param_nullable.skel.h" + +void test_kfunc_param_nullable(void) +{ + RUN_TESTS(test_kfunc_param_nullable); +} diff --git a/tools/testing/selftests/bpf/prog_tests/linked_list.c b/tools/testing/selftests/bpf/prog_tests/linked_list.c index 2fb89de63bd2..77d07e0a4a55 100644 --- a/tools/testing/selftests/bpf/prog_tests/linked_list.c +++ b/tools/testing/selftests/bpf/prog_tests/linked_list.c @@ -183,6 +183,18 @@ static void test_linked_list_success(int mode, bool leave_in_map) if (!leave_in_map) clear_fields(skel->maps.bss_A); + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_push_pop_nested), &opts); + ASSERT_OK(ret, "global_list_push_pop_nested"); + ASSERT_OK(opts.retval, "global_list_push_pop_nested retval"); + if (!leave_in_map) + clear_fields(skel->maps.bss_A); + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.global_list_array_push_pop), &opts); + ASSERT_OK(ret, "global_list_array_push_pop"); + ASSERT_OK(opts.retval, "global_list_array_push_pop retval"); + if (!leave_in_map) + clear_fields(skel->maps.bss_A); + if (mode == PUSH_POP) goto end; diff --git a/tools/testing/selftests/bpf/prog_tests/mptcp.c b/tools/testing/selftests/bpf/prog_tests/mptcp.c index 274d2e033e39..d2ca32fa3b21 100644 --- a/tools/testing/selftests/bpf/prog_tests/mptcp.c +++ b/tools/testing/selftests/bpf/prog_tests/mptcp.c @@ -89,13 +89,8 @@ static int start_mptcp_server(int family, const char *addr_str, __u16 port, .timeout_ms = timeout_ms, .proto = IPPROTO_MPTCP, }; - struct sockaddr_storage addr; - socklen_t addrlen; - if (make_sockaddr(family, addr_str, port, &addr, &addrlen)) - return -1; - - return start_server_addr(SOCK_STREAM, &addr, addrlen, &opts); + return start_server_str(family, SOCK_STREAM, addr_str, port, &opts); } static int verify_tsk(int map_fd, int client_fd) diff --git a/tools/testing/selftests/bpf/prog_tests/rbtree.c b/tools/testing/selftests/bpf/prog_tests/rbtree.c index e9300c96607d..9818f06c97c5 100644 --- a/tools/testing/selftests/bpf/prog_tests/rbtree.c +++ b/tools/testing/selftests/bpf/prog_tests/rbtree.c @@ -31,6 +31,28 @@ static void test_rbtree_add_nodes(void) rbtree__destroy(skel); } +static void test_rbtree_add_nodes_nested(void) +{ + LIBBPF_OPTS(bpf_test_run_opts, opts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + struct rbtree *skel; + int ret; + + skel = rbtree__open_and_load(); + if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load")) + return; + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_add_nodes_nested), &opts); + ASSERT_OK(ret, "rbtree_add_nodes_nested run"); + ASSERT_OK(opts.retval, "rbtree_add_nodes_nested retval"); + ASSERT_EQ(skel->data->less_callback_ran, 1, "rbtree_add_nodes_nested less_callback_ran"); + + rbtree__destroy(skel); +} + static void test_rbtree_add_and_remove(void) { LIBBPF_OPTS(bpf_test_run_opts, opts, @@ -53,6 +75,27 @@ static void test_rbtree_add_and_remove(void) rbtree__destroy(skel); } +static void test_rbtree_add_and_remove_array(void) +{ + LIBBPF_OPTS(bpf_test_run_opts, opts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + struct rbtree *skel; + int ret; + + skel = rbtree__open_and_load(); + if (!ASSERT_OK_PTR(skel, "rbtree__open_and_load")) + return; + + ret = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.rbtree_add_and_remove_array), &opts); + ASSERT_OK(ret, "rbtree_add_and_remove_array"); + ASSERT_OK(opts.retval, "rbtree_add_and_remove_array retval"); + + rbtree__destroy(skel); +} + static void test_rbtree_first_and_remove(void) { LIBBPF_OPTS(bpf_test_run_opts, opts, @@ -104,8 +147,12 @@ void test_rbtree_success(void) { if (test__start_subtest("rbtree_add_nodes")) test_rbtree_add_nodes(); + if (test__start_subtest("rbtree_add_nodes_nested")) + test_rbtree_add_nodes_nested(); if (test__start_subtest("rbtree_add_and_remove")) test_rbtree_add_and_remove(); + if (test__start_subtest("rbtree_add_and_remove_array")) + test_rbtree_add_and_remove_array(); if (test__start_subtest("rbtree_first_and_remove")) test_rbtree_first_and_remove(); if (test__start_subtest("rbtree_api_release_aliasing")) diff --git a/tools/testing/selftests/bpf/prog_tests/send_signal.c b/tools/testing/selftests/bpf/prog_tests/send_signal.c index 920aee41bd58..6cc69900b310 100644 --- a/tools/testing/selftests/bpf/prog_tests/send_signal.c +++ b/tools/testing/selftests/bpf/prog_tests/send_signal.c @@ -156,7 +156,8 @@ static void test_send_signal_tracepoint(bool signal_thread) static void test_send_signal_perf(bool signal_thread) { struct perf_event_attr attr = { - .sample_period = 1, + .freq = 1, + .sample_freq = 1000, .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_CLOCK, }; diff --git a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c index 597d0467a926..ae87c00867ba 100644 --- a/tools/testing/selftests/bpf/prog_tests/sk_lookup.c +++ b/tools/testing/selftests/bpf/prog_tests/sk_lookup.c @@ -77,6 +77,12 @@ struct test { bool reuseport_has_conns; /* Add a connected socket to reuseport group */ }; +struct cb_opts { + int family; + int sotype; + bool reuseport; +}; + static __u32 duration; /* for CHECK macro */ static bool is_ipv6(const char *ip) @@ -142,19 +148,14 @@ static int make_socket(int sotype, const char *ip, int port, return fd; } -static int make_server(int sotype, const char *ip, int port, - struct bpf_program *reuseport_prog) +static int setsockopts(int fd, void *opts) { - struct sockaddr_storage addr = {0}; + struct cb_opts *co = (struct cb_opts *)opts; const int one = 1; - int err, fd = -1; - - fd = make_socket(sotype, ip, port, &addr); - if (fd < 0) - return -1; + int err = 0; /* Enabled for UDPv6 sockets for IPv4-mapped IPv6 to work. */ - if (sotype == SOCK_DGRAM) { + if (co->sotype == SOCK_DGRAM) { err = setsockopt(fd, SOL_IP, IP_RECVORIGDSTADDR, &one, sizeof(one)); if (CHECK(err, "setsockopt(IP_RECVORIGDSTADDR)", "failed\n")) { @@ -163,7 +164,7 @@ static int make_server(int sotype, const char *ip, int port, } } - if (sotype == SOCK_DGRAM && addr.ss_family == AF_INET6) { + if (co->sotype == SOCK_DGRAM && co->family == AF_INET6) { err = setsockopt(fd, SOL_IPV6, IPV6_RECVORIGDSTADDR, &one, sizeof(one)); if (CHECK(err, "setsockopt(IPV6_RECVORIGDSTADDR)", "failed\n")) { @@ -172,7 +173,7 @@ static int make_server(int sotype, const char *ip, int port, } } - if (sotype == SOCK_STREAM) { + if (co->sotype == SOCK_STREAM) { err = setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &one, sizeof(one)); if (CHECK(err, "setsockopt(SO_REUSEADDR)", "failed\n")) { @@ -181,7 +182,7 @@ static int make_server(int sotype, const char *ip, int port, } } - if (reuseport_prog) { + if (co->reuseport) { err = setsockopt(fd, SOL_SOCKET, SO_REUSEPORT, &one, sizeof(one)); if (CHECK(err, "setsockopt(SO_REUSEPORT)", "failed\n")) { @@ -190,19 +191,28 @@ static int make_server(int sotype, const char *ip, int port, } } - err = bind(fd, (void *)&addr, inetaddr_len(&addr)); - if (CHECK(err, "bind", "failed\n")) { - log_err("failed to bind listen socket"); - goto fail; - } +fail: + return err; +} - if (sotype == SOCK_STREAM) { - err = listen(fd, SOMAXCONN); - if (CHECK(err, "make_server", "listen")) { - log_err("failed to listen on port %d", port); - goto fail; - } - } +static int make_server(int sotype, const char *ip, int port, + struct bpf_program *reuseport_prog) +{ + struct cb_opts cb_opts = { + .family = is_ipv6(ip) ? AF_INET6 : AF_INET, + .sotype = sotype, + .reuseport = reuseport_prog, + }; + struct network_helper_opts opts = { + .backlog = SOMAXCONN, + .post_socket_cb = setsockopts, + .cb_opts = &cb_opts, + }; + int err, fd; + + fd = start_server_str(cb_opts.family, sotype, ip, port, &opts); + if (!ASSERT_OK_FD(fd, "start_server_str")) + return -1; /* Late attach reuseport prog so we can have one init path */ if (reuseport_prog) { @@ -406,18 +416,12 @@ static int udp_recv_send(int server_fd) } /* Reply from original destination address. */ - fd = socket(dst_addr->ss_family, SOCK_DGRAM, 0); - if (CHECK(fd < 0, "socket", "failed\n")) { + fd = start_server_addr(SOCK_DGRAM, dst_addr, sizeof(*dst_addr), NULL); + if (!ASSERT_OK_FD(fd, "start_server_addr")) { log_err("failed to create tx socket"); return -1; } - ret = bind(fd, (struct sockaddr *)dst_addr, sizeof(*dst_addr)); - if (CHECK(ret, "bind", "failed\n")) { - log_err("failed to bind tx socket"); - goto out; - } - msg.msg_control = NULL; msg.msg_controllen = 0; n = sendmsg(fd, &msg, 0); @@ -629,9 +633,6 @@ static void run_lookup_prog(const struct test *t) * BPF socket lookup. */ if (t->reuseport_has_conns) { - struct sockaddr_storage addr = {}; - socklen_t len = sizeof(addr); - /* Add an extra socket to reuseport group */ reuse_conn_fd = make_server(t->sotype, t->listen_at.ip, t->listen_at.port, @@ -639,12 +640,9 @@ static void run_lookup_prog(const struct test *t) if (reuse_conn_fd < 0) goto close; - /* Connect the extra socket to itself */ - err = getsockname(reuse_conn_fd, (void *)&addr, &len); - if (CHECK(err, "getsockname", "errno %d\n", errno)) - goto close; - err = connect(reuse_conn_fd, (void *)&addr, len); - if (CHECK(err, "connect", "errno %d\n", errno)) + /* Connect the extra socket to itself */ + err = connect_fd_to_fd(reuse_conn_fd, reuse_conn_fd, 0); + if (!ASSERT_OK(err, "connect_fd_to_fd")) goto close; } @@ -994,7 +992,7 @@ static void drop_on_reuseport(const struct test *t) err = update_lookup_map(t->sock_map, SERVER_A, server1); if (err) - goto detach; + goto close_srv1; /* second server on destination address we should never reach */ server2 = make_server(t->sotype, t->connect_to.ip, t->connect_to.port, diff --git a/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c index 1d3a20f01b60..7cd8be2780ca 100644 --- a/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c +++ b/tools/testing/selftests/bpf/prog_tests/sockopt_inherit.c @@ -70,7 +70,7 @@ static void *server_thread(void *arg) return (void *)(long)err; } -static int custom_cb(int fd, const struct post_socket_opts *opts) +static int custom_cb(int fd, void *opts) { char buf; int err; diff --git a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c index b1073d36d77a..327d51f59142 100644 --- a/tools/testing/selftests/bpf/prog_tests/tc_redirect.c +++ b/tools/testing/selftests/bpf/prog_tests/tc_redirect.c @@ -890,9 +890,6 @@ static void test_udp_dtime(struct test_tc_dtime *skel, int family, bool bpf_fwd) ASSERT_EQ(dtimes[INGRESS_FWDNS_P100], 0, dtime_cnt_str(t, INGRESS_FWDNS_P100)); - /* non mono delivery time is not forwarded */ - ASSERT_EQ(dtimes[INGRESS_FWDNS_P101], 0, - dtime_cnt_str(t, INGRESS_FWDNS_P101)); for (i = EGRESS_FWDNS_P100; i < SET_DTIME; i++) ASSERT_GT(dtimes[i], 0, dtime_cnt_str(t, i)); diff --git a/tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c b/tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c index ae93411fd582..09ca13bdf6ca 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c +++ b/tools/testing/selftests/bpf/prog_tests/test_skb_pkt_end.c @@ -11,6 +11,7 @@ static int sanity_run(struct bpf_program *prog) .data_in = &pkt_v4, .data_size_in = sizeof(pkt_v4), .repeat = 1, + .flags = BPF_F_TEST_SKB_CHECKSUM_COMPLETE, ); prog_fd = bpf_program__fd(prog); diff --git a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c index 29e183a80f49..bbcf12696a6b 100644 --- a/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c +++ b/tools/testing/selftests/bpf/prog_tests/test_struct_ops_module.c @@ -3,9 +3,12 @@ #include #include +#include + #include "struct_ops_module.skel.h" #include "struct_ops_nulled_out_cb.skel.h" #include "struct_ops_forgotten_cb.skel.h" +#include "struct_ops_detach.skel.h" static void check_map_info(struct bpf_map_info *info) { @@ -242,6 +245,58 @@ cleanup: struct_ops_forgotten_cb__destroy(skel); } +/* Detach a link from a user space program */ +static void test_detach_link(void) +{ + struct epoll_event ev, events[2]; + struct struct_ops_detach *skel; + struct bpf_link *link = NULL; + int fd, epollfd = -1, nfds; + int err; + + skel = struct_ops_detach__open_and_load(); + if (!ASSERT_OK_PTR(skel, "struct_ops_detach__open_and_load")) + return; + + link = bpf_map__attach_struct_ops(skel->maps.testmod_do_detach); + if (!ASSERT_OK_PTR(link, "attach_struct_ops")) + goto cleanup; + + fd = bpf_link__fd(link); + if (!ASSERT_GE(fd, 0, "link_fd")) + goto cleanup; + + epollfd = epoll_create1(0); + if (!ASSERT_GE(epollfd, 0, "epoll_create1")) + goto cleanup; + + ev.events = EPOLLHUP; + ev.data.fd = fd; + err = epoll_ctl(epollfd, EPOLL_CTL_ADD, fd, &ev); + if (!ASSERT_OK(err, "epoll_ctl")) + goto cleanup; + + err = bpf_link__detach(link); + if (!ASSERT_OK(err, "detach_link")) + goto cleanup; + + /* Wait for EPOLLHUP */ + nfds = epoll_wait(epollfd, events, 2, 500); + if (!ASSERT_EQ(nfds, 1, "epoll_wait")) + goto cleanup; + + if (!ASSERT_EQ(events[0].data.fd, fd, "epoll_wait_fd")) + goto cleanup; + if (!ASSERT_TRUE(events[0].events & EPOLLHUP, "events[0].events")) + goto cleanup; + +cleanup: + if (epollfd >= 0) + close(epollfd); + bpf_link__destroy(link); + struct_ops_detach__destroy(skel); +} + void serial_test_struct_ops_module(void) { if (test__start_subtest("struct_ops_load")) @@ -254,5 +309,7 @@ void serial_test_struct_ops_module(void) test_struct_ops_nulled_out_cb(); if (test__start_subtest("struct_ops_forgotten_cb")) test_struct_ops_forgotten_cb(); + if (test__start_subtest("test_detach_link")) + test_detach_link(); } diff --git a/tools/testing/selftests/bpf/prog_tests/tracing_struct.c b/tools/testing/selftests/bpf/prog_tests/tracing_struct.c index fe0fb0c9849a..19e68d4b3532 100644 --- a/tools/testing/selftests/bpf/prog_tests/tracing_struct.c +++ b/tools/testing/selftests/bpf/prog_tests/tracing_struct.c @@ -3,8 +3,9 @@ #include #include "tracing_struct.skel.h" +#include "tracing_struct_many_args.skel.h" -static void test_fentry(void) +static void test_struct_args(void) { struct tracing_struct *skel; int err; @@ -55,6 +56,25 @@ static void test_fentry(void) ASSERT_EQ(skel->bss->t6, 1, "t6 ret"); +destroy_skel: + tracing_struct__destroy(skel); +} + +static void test_struct_many_args(void) +{ + struct tracing_struct_many_args *skel; + int err; + + skel = tracing_struct_many_args__open_and_load(); + if (!ASSERT_OK_PTR(skel, "tracing_struct_many_args__open_and_load")) + return; + + err = tracing_struct_many_args__attach(skel); + if (!ASSERT_OK(err, "tracing_struct_many_args__attach")) + goto destroy_skel; + + ASSERT_OK(trigger_module_test_read(256), "trigger_read"); + ASSERT_EQ(skel->bss->t7_a, 16, "t7:a"); ASSERT_EQ(skel->bss->t7_b, 17, "t7:b"); ASSERT_EQ(skel->bss->t7_c, 18, "t7:c"); @@ -74,12 +94,28 @@ static void test_fentry(void) ASSERT_EQ(skel->bss->t8_g, 23, "t8:g"); ASSERT_EQ(skel->bss->t8_ret, 156, "t8 ret"); - tracing_struct__detach(skel); + ASSERT_EQ(skel->bss->t9_a, 16, "t9:a"); + ASSERT_EQ(skel->bss->t9_b, 17, "t9:b"); + ASSERT_EQ(skel->bss->t9_c, 18, "t9:c"); + ASSERT_EQ(skel->bss->t9_d, 19, "t9:d"); + ASSERT_EQ(skel->bss->t9_e, 20, "t9:e"); + ASSERT_EQ(skel->bss->t9_f, 21, "t9:f"); + ASSERT_EQ(skel->bss->t9_g, 22, "t9:f"); + ASSERT_EQ(skel->bss->t9_h_a, 23, "t9:h.a"); + ASSERT_EQ(skel->bss->t9_h_b, 24, "t9:h.b"); + ASSERT_EQ(skel->bss->t9_h_c, 25, "t9:h.c"); + ASSERT_EQ(skel->bss->t9_h_d, 26, "t9:h.d"); + ASSERT_EQ(skel->bss->t9_i, 27, "t9:i"); + ASSERT_EQ(skel->bss->t9_ret, 258, "t9 ret"); + destroy_skel: - tracing_struct__destroy(skel); + tracing_struct_many_args__destroy(skel); } void test_tracing_struct(void) { - test_fentry(); + if (test__start_subtest("struct_args")) + test_struct_args(); + if (test__start_subtest("struct_many_args")) + test_struct_many_args(); } diff --git a/tools/testing/selftests/bpf/prog_tests/verifier.c b/tools/testing/selftests/bpf/prog_tests/verifier.c index 98ef39efa77e..9dc3687bc406 100644 --- a/tools/testing/selftests/bpf/prog_tests/verifier.c +++ b/tools/testing/selftests/bpf/prog_tests/verifier.c @@ -87,6 +87,7 @@ #include "verifier_xadd.skel.h" #include "verifier_xdp.skel.h" #include "verifier_xdp_direct_packet_access.skel.h" +#include "verifier_bits_iter.skel.h" #define MAX_ENTRIES 11 @@ -204,6 +205,7 @@ void test_verifier_var_off(void) { RUN(verifier_var_off); } void test_verifier_xadd(void) { RUN(verifier_xadd); } void test_verifier_xdp(void) { RUN(verifier_xdp); } void test_verifier_xdp_direct_packet_access(void) { RUN(verifier_xdp_direct_packet_access); } +void test_verifier_bits_iter(void) { RUN(verifier_bits_iter); } static int init_test_val_map(struct bpf_object *obj, char *map_name) { diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c index f09505f8b038..53d6ad8c2257 100644 --- a/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c +++ b/tools/testing/selftests/bpf/prog_tests/xdp_adjust_tail.c @@ -222,7 +222,7 @@ static void test_xdp_adjust_frags_tail_grow(void) prog = bpf_object__next_program(obj, NULL); if (bpf_object__load(obj)) - return; + goto out; prog_fd = bpf_program__fd(prog); diff --git a/tools/testing/selftests/bpf/prog_tests/xdp_flowtable.c b/tools/testing/selftests/bpf/prog_tests/xdp_flowtable.c new file mode 100644 index 000000000000..e1bf141d3401 --- /dev/null +++ b/tools/testing/selftests/bpf/prog_tests/xdp_flowtable.c @@ -0,0 +1,168 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include +#include +#include +#include +#include + +#include "xdp_flowtable.skel.h" + +#define TX_NETNS_NAME "ns0" +#define RX_NETNS_NAME "ns1" + +#define TX_NAME "v0" +#define FORWARD_NAME "v1" +#define RX_NAME "d0" + +#define TX_MAC "00:00:00:00:00:01" +#define FORWARD_MAC "00:00:00:00:00:02" +#define RX_MAC "00:00:00:00:00:03" +#define DST_MAC "00:00:00:00:00:04" + +#define TX_ADDR "10.0.0.1" +#define FORWARD_ADDR "10.0.0.2" +#define RX_ADDR "20.0.0.1" +#define DST_ADDR "20.0.0.2" + +#define PREFIX_LEN "8" +#define N_PACKETS 10 +#define UDP_PORT 12345 +#define UDP_PORT_STR "12345" + +static int send_udp_traffic(void) +{ + struct sockaddr_storage addr; + int i, sock; + + if (make_sockaddr(AF_INET, DST_ADDR, UDP_PORT, &addr, NULL)) + return -EINVAL; + + sock = socket(AF_INET, SOCK_DGRAM, 0); + if (sock < 0) + return sock; + + for (i = 0; i < N_PACKETS; i++) { + unsigned char buf[] = { 0xaa, 0xbb, 0xcc }; + int n; + + n = sendto(sock, buf, sizeof(buf), MSG_NOSIGNAL | MSG_CONFIRM, + (struct sockaddr *)&addr, sizeof(addr)); + if (n != sizeof(buf)) { + close(sock); + return -EINVAL; + } + + usleep(50000); /* 50ms */ + } + close(sock); + + return 0; +} + +void test_xdp_flowtable(void) +{ + struct xdp_flowtable *skel = NULL; + struct nstoken *tok = NULL; + int iifindex, stats_fd; + __u32 value, key = 0; + struct bpf_link *link; + + if (SYS_NOFAIL("nft -v")) { + fprintf(stdout, "Missing required nft tool\n"); + test__skip(); + return; + } + + SYS(out, "ip netns add " TX_NETNS_NAME); + SYS(out, "ip netns add " RX_NETNS_NAME); + + tok = open_netns(RX_NETNS_NAME); + if (!ASSERT_OK_PTR(tok, "setns")) + goto out; + + SYS(out, "sysctl -qw net.ipv4.conf.all.forwarding=1"); + + SYS(out, "ip link add " TX_NAME " type veth peer " FORWARD_NAME); + SYS(out, "ip link set " TX_NAME " netns " TX_NETNS_NAME); + SYS(out, "ip link set dev " FORWARD_NAME " address " FORWARD_MAC); + SYS(out, + "ip addr add " FORWARD_ADDR "/" PREFIX_LEN " dev " FORWARD_NAME); + SYS(out, "ip link set dev " FORWARD_NAME " up"); + + SYS(out, "ip link add " RX_NAME " type dummy"); + SYS(out, "ip link set dev " RX_NAME " address " RX_MAC); + SYS(out, "ip addr add " RX_ADDR "/" PREFIX_LEN " dev " RX_NAME); + SYS(out, "ip link set dev " RX_NAME " up"); + + /* configure the flowtable */ + SYS(out, "nft add table ip filter"); + SYS(out, + "nft add flowtable ip filter f { hook ingress priority 0\\; " + "devices = { " FORWARD_NAME ", " RX_NAME " }\\; }"); + SYS(out, + "nft add chain ip filter forward " + "{ type filter hook forward priority 0\\; }"); + SYS(out, + "nft add rule ip filter forward ip protocol udp th dport " + UDP_PORT_STR " flow add @f"); + + /* Avoid ARP calls */ + SYS(out, + "ip -4 neigh add " DST_ADDR " lladdr " DST_MAC " dev " RX_NAME); + + close_netns(tok); + tok = open_netns(TX_NETNS_NAME); + if (!ASSERT_OK_PTR(tok, "setns")) + goto out; + + SYS(out, "ip addr add " TX_ADDR "/" PREFIX_LEN " dev " TX_NAME); + SYS(out, "ip link set dev " TX_NAME " address " TX_MAC); + SYS(out, "ip link set dev " TX_NAME " up"); + SYS(out, "ip route add default via " FORWARD_ADDR); + + close_netns(tok); + tok = open_netns(RX_NETNS_NAME); + if (!ASSERT_OK_PTR(tok, "setns")) + goto out; + + iifindex = if_nametoindex(FORWARD_NAME); + if (!ASSERT_NEQ(iifindex, 0, "iifindex")) + goto out; + + skel = xdp_flowtable__open_and_load(); + if (!ASSERT_OK_PTR(skel, "skel")) + goto out; + + link = bpf_program__attach_xdp(skel->progs.xdp_flowtable_do_lookup, + iifindex); + if (!ASSERT_OK_PTR(link, "prog_attach")) + goto out; + + close_netns(tok); + tok = open_netns(TX_NETNS_NAME); + if (!ASSERT_OK_PTR(tok, "setns")) + goto out; + + if (!ASSERT_OK(send_udp_traffic(), "send udp")) + goto out; + + close_netns(tok); + tok = open_netns(RX_NETNS_NAME); + if (!ASSERT_OK_PTR(tok, "setns")) + goto out; + + stats_fd = bpf_map__fd(skel->maps.stats); + if (!ASSERT_OK(bpf_map_lookup_elem(stats_fd, &key, &value), + "bpf_map_update_elem stats")) + goto out; + + ASSERT_GE(value, N_PACKETS - 2, "bpf_xdp_flow_lookup failed"); +out: + xdp_flowtable__destroy(skel); + if (tok) + close_netns(tok); + SYS_NOFAIL("ip netns del " TX_NETNS_NAME); + SYS_NOFAIL("ip netns del " RX_NETNS_NAME); +} diff --git a/tools/testing/selftests/bpf/progs/arena_atomics.c b/tools/testing/selftests/bpf/progs/arena_atomics.c index 55f10563208d..bb0acd79d28a 100644 --- a/tools/testing/selftests/bpf/progs/arena_atomics.c +++ b/tools/testing/selftests/bpf/progs/arena_atomics.c @@ -25,20 +25,13 @@ bool skip_tests = true; __u32 pid = 0; -#undef __arena -#if defined(__BPF_FEATURE_ADDR_SPACE_CAST) -#define __arena __attribute__((address_space(1))) -#else -#define __arena SEC(".addr_space.1") -#endif - -__u64 __arena add64_value = 1; -__u64 __arena add64_result = 0; -__u32 __arena add32_value = 1; -__u32 __arena add32_result = 0; -__u64 __arena add_stack_value_copy = 0; -__u64 __arena add_stack_result = 0; -__u64 __arena add_noreturn_value = 1; +__u64 __arena_global add64_value = 1; +__u64 __arena_global add64_result = 0; +__u32 __arena_global add32_value = 1; +__u32 __arena_global add32_result = 0; +__u64 __arena_global add_stack_value_copy = 0; +__u64 __arena_global add_stack_result = 0; +__u64 __arena_global add_noreturn_value = 1; SEC("raw_tp/sys_enter") int add(const void *ctx) @@ -58,13 +51,13 @@ int add(const void *ctx) return 0; } -__s64 __arena sub64_value = 1; -__s64 __arena sub64_result = 0; -__s32 __arena sub32_value = 1; -__s32 __arena sub32_result = 0; -__s64 __arena sub_stack_value_copy = 0; -__s64 __arena sub_stack_result = 0; -__s64 __arena sub_noreturn_value = 1; +__s64 __arena_global sub64_value = 1; +__s64 __arena_global sub64_result = 0; +__s32 __arena_global sub32_value = 1; +__s32 __arena_global sub32_result = 0; +__s64 __arena_global sub_stack_value_copy = 0; +__s64 __arena_global sub_stack_result = 0; +__s64 __arena_global sub_noreturn_value = 1; SEC("raw_tp/sys_enter") int sub(const void *ctx) @@ -84,8 +77,8 @@ int sub(const void *ctx) return 0; } -__u64 __arena and64_value = (0x110ull << 32); -__u32 __arena and32_value = 0x110; +__u64 __arena_global and64_value = (0x110ull << 32); +__u32 __arena_global and32_value = 0x110; SEC("raw_tp/sys_enter") int and(const void *ctx) @@ -101,8 +94,8 @@ int and(const void *ctx) return 0; } -__u32 __arena or32_value = 0x110; -__u64 __arena or64_value = (0x110ull << 32); +__u32 __arena_global or32_value = 0x110; +__u64 __arena_global or64_value = (0x110ull << 32); SEC("raw_tp/sys_enter") int or(const void *ctx) @@ -117,8 +110,8 @@ int or(const void *ctx) return 0; } -__u64 __arena xor64_value = (0x110ull << 32); -__u32 __arena xor32_value = 0x110; +__u64 __arena_global xor64_value = (0x110ull << 32); +__u32 __arena_global xor32_value = 0x110; SEC("raw_tp/sys_enter") int xor(const void *ctx) @@ -133,12 +126,12 @@ int xor(const void *ctx) return 0; } -__u32 __arena cmpxchg32_value = 1; -__u32 __arena cmpxchg32_result_fail = 0; -__u32 __arena cmpxchg32_result_succeed = 0; -__u64 __arena cmpxchg64_value = 1; -__u64 __arena cmpxchg64_result_fail = 0; -__u64 __arena cmpxchg64_result_succeed = 0; +__u32 __arena_global cmpxchg32_value = 1; +__u32 __arena_global cmpxchg32_result_fail = 0; +__u32 __arena_global cmpxchg32_result_succeed = 0; +__u64 __arena_global cmpxchg64_value = 1; +__u64 __arena_global cmpxchg64_result_fail = 0; +__u64 __arena_global cmpxchg64_result_succeed = 0; SEC("raw_tp/sys_enter") int cmpxchg(const void *ctx) @@ -156,10 +149,10 @@ int cmpxchg(const void *ctx) return 0; } -__u64 __arena xchg64_value = 1; -__u64 __arena xchg64_result = 0; -__u32 __arena xchg32_value = 1; -__u32 __arena xchg32_result = 0; +__u64 __arena_global xchg64_value = 1; +__u64 __arena_global xchg64_result = 0; +__u32 __arena_global xchg32_value = 1; +__u32 __arena_global xchg32_result = 0; SEC("raw_tp/sys_enter") int xchg(const void *ctx) @@ -176,3 +169,79 @@ int xchg(const void *ctx) return 0; } + +__u64 __arena_global uaf_sink; +volatile __u64 __arena_global uaf_recovery_fails; + +SEC("syscall") +int uaf(const void *ctx) +{ + if (pid != (bpf_get_current_pid_tgid() >> 32)) + return 0; +#if defined(ENABLE_ATOMICS_TESTS) && !defined(__TARGET_ARCH_arm64) && \ + !defined(__TARGET_ARCH_x86) + __u32 __arena *page32; + __u64 __arena *page64; + void __arena *page; + + page = bpf_arena_alloc_pages(&arena, NULL, 1, NUMA_NO_NODE, 0); + bpf_arena_free_pages(&arena, page, 1); + uaf_recovery_fails = 24; + + page32 = (__u32 __arena *)page; + uaf_sink += __sync_fetch_and_add(page32, 1); + uaf_recovery_fails -= 1; + __sync_add_and_fetch(page32, 1); + uaf_recovery_fails -= 1; + uaf_sink += __sync_fetch_and_sub(page32, 1); + uaf_recovery_fails -= 1; + __sync_sub_and_fetch(page32, 1); + uaf_recovery_fails -= 1; + uaf_sink += __sync_fetch_and_and(page32, 1); + uaf_recovery_fails -= 1; + __sync_and_and_fetch(page32, 1); + uaf_recovery_fails -= 1; + uaf_sink += __sync_fetch_and_or(page32, 1); + uaf_recovery_fails -= 1; + __sync_or_and_fetch(page32, 1); + uaf_recovery_fails -= 1; + uaf_sink += __sync_fetch_and_xor(page32, 1); + uaf_recovery_fails -= 1; + __sync_xor_and_fetch(page32, 1); + uaf_recovery_fails -= 1; + uaf_sink += __sync_val_compare_and_swap(page32, 0, 1); + uaf_recovery_fails -= 1; + uaf_sink += __sync_lock_test_and_set(page32, 1); + uaf_recovery_fails -= 1; + + page64 = (__u64 __arena *)page; + uaf_sink += __sync_fetch_and_add(page64, 1); + uaf_recovery_fails -= 1; + __sync_add_and_fetch(page64, 1); + uaf_recovery_fails -= 1; + uaf_sink += __sync_fetch_and_sub(page64, 1); + uaf_recovery_fails -= 1; + __sync_sub_and_fetch(page64, 1); + uaf_recovery_fails -= 1; + uaf_sink += __sync_fetch_and_and(page64, 1); + uaf_recovery_fails -= 1; + __sync_and_and_fetch(page64, 1); + uaf_recovery_fails -= 1; + uaf_sink += __sync_fetch_and_or(page64, 1); + uaf_recovery_fails -= 1; + __sync_or_and_fetch(page64, 1); + uaf_recovery_fails -= 1; + uaf_sink += __sync_fetch_and_xor(page64, 1); + uaf_recovery_fails -= 1; + __sync_xor_and_fetch(page64, 1); + uaf_recovery_fails -= 1; + uaf_sink += __sync_val_compare_and_swap(page64, 0, 1); + uaf_recovery_fails -= 1; + uaf_sink += __sync_lock_test_and_set(page64, 1); + uaf_recovery_fails -= 1; +#endif + + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/arena_htab.c b/tools/testing/selftests/bpf/progs/arena_htab.c index 1e6ac187a6a0..81eaa94afeb0 100644 --- a/tools/testing/selftests/bpf/progs/arena_htab.c +++ b/tools/testing/selftests/bpf/progs/arena_htab.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#define BPF_NO_KFUNC_PROTOTYPES #include #include #include @@ -18,25 +19,35 @@ void __arena *htab_for_user; bool skip = false; int zero = 0; +char __arena arr1[100000]; +char arr2[1000]; SEC("syscall") int arena_htab_llvm(void *ctx) { #if defined(__BPF_FEATURE_ADDR_SPACE_CAST) || defined(BPF_ARENA_FORCE_ASM) struct htab __arena *htab; + char __arena *arr = arr1; __u64 i; htab = bpf_alloc(sizeof(*htab)); cast_kern(htab); htab_init(htab); - /* first run. No old elems in the table */ - for (i = zero; i < 1000; i++) - htab_update_elem(htab, i, i); + cast_kern(arr); - /* should replace all elems with new ones */ - for (i = zero; i < 1000; i++) + /* first run. No old elems in the table */ + for (i = zero; i < 100000 && can_loop; i++) { htab_update_elem(htab, i, i); + arr[i] = i; + } + + /* should replace some elems with new ones */ + for (i = zero; i < 1000 && can_loop; i++) { + htab_update_elem(htab, i, i); + /* Access mem to make the verifier use bounded loop logic */ + arr2[i] = i; + } cast_user(htab); htab_for_user = htab; #else diff --git a/tools/testing/selftests/bpf/progs/arena_list.c b/tools/testing/selftests/bpf/progs/arena_list.c index 93bd0600eba0..3a2ddcacbea6 100644 --- a/tools/testing/selftests/bpf/progs/arena_list.c +++ b/tools/testing/selftests/bpf/progs/arena_list.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#define BPF_NO_KFUNC_PROTOTYPES #include #include #include diff --git a/tools/testing/selftests/bpf/progs/bpf_dctcp.c b/tools/testing/selftests/bpf/progs/bpf_dctcp.c index 3c9ffe340312..02f552e7fd4d 100644 --- a/tools/testing/selftests/bpf/progs/bpf_dctcp.c +++ b/tools/testing/selftests/bpf/progs/bpf_dctcp.c @@ -65,7 +65,7 @@ static void dctcp_reset(const struct tcp_sock *tp, struct bpf_dctcp *ca) } SEC("struct_ops") -void BPF_PROG(dctcp_init, struct sock *sk) +void BPF_PROG(bpf_dctcp_init, struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); struct bpf_dctcp *ca = inet_csk_ca(sk); @@ -77,7 +77,7 @@ void BPF_PROG(dctcp_init, struct sock *sk) (void *)fallback, sizeof(fallback)) == -EBUSY) ebusy_cnt++; - /* Switch back to myself and the recurred dctcp_init() + /* Switch back to myself and the recurred bpf_dctcp_init() * will get -EBUSY for all bpf_setsockopt(TCP_CONGESTION), * except the last "cdg" one. */ @@ -112,7 +112,7 @@ void BPF_PROG(dctcp_init, struct sock *sk) } SEC("struct_ops") -__u32 BPF_PROG(dctcp_ssthresh, struct sock *sk) +__u32 BPF_PROG(bpf_dctcp_ssthresh, struct sock *sk) { struct bpf_dctcp *ca = inet_csk_ca(sk); struct tcp_sock *tp = tcp_sk(sk); @@ -122,7 +122,7 @@ __u32 BPF_PROG(dctcp_ssthresh, struct sock *sk) } SEC("struct_ops") -void BPF_PROG(dctcp_update_alpha, struct sock *sk, __u32 flags) +void BPF_PROG(bpf_dctcp_update_alpha, struct sock *sk, __u32 flags) { const struct tcp_sock *tp = tcp_sk(sk); struct bpf_dctcp *ca = inet_csk_ca(sk); @@ -161,12 +161,12 @@ static void dctcp_react_to_loss(struct sock *sk) } SEC("struct_ops") -void BPF_PROG(dctcp_state, struct sock *sk, __u8 new_state) +void BPF_PROG(bpf_dctcp_state, struct sock *sk, __u8 new_state) { if (new_state == TCP_CA_Recovery && new_state != BPF_CORE_READ_BITFIELD(inet_csk(sk), icsk_ca_state)) dctcp_react_to_loss(sk); - /* We handle RTO in dctcp_cwnd_event to ensure that we perform only + /* We handle RTO in bpf_dctcp_cwnd_event to ensure that we perform only * one loss-adjustment per RTT. */ } @@ -208,7 +208,7 @@ static void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt, } SEC("struct_ops") -void BPF_PROG(dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev) +void BPF_PROG(bpf_dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev) { struct bpf_dctcp *ca = inet_csk_ca(sk); @@ -227,7 +227,7 @@ void BPF_PROG(dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev) } SEC("struct_ops") -__u32 BPF_PROG(dctcp_cwnd_undo, struct sock *sk) +__u32 BPF_PROG(bpf_dctcp_cwnd_undo, struct sock *sk) { const struct bpf_dctcp *ca = inet_csk_ca(sk); @@ -237,28 +237,28 @@ __u32 BPF_PROG(dctcp_cwnd_undo, struct sock *sk) extern void tcp_reno_cong_avoid(struct sock *sk, __u32 ack, __u32 acked) __ksym; SEC("struct_ops") -void BPF_PROG(dctcp_cong_avoid, struct sock *sk, __u32 ack, __u32 acked) +void BPF_PROG(bpf_dctcp_cong_avoid, struct sock *sk, __u32 ack, __u32 acked) { tcp_reno_cong_avoid(sk, ack, acked); } SEC(".struct_ops") struct tcp_congestion_ops dctcp_nouse = { - .init = (void *)dctcp_init, - .set_state = (void *)dctcp_state, + .init = (void *)bpf_dctcp_init, + .set_state = (void *)bpf_dctcp_state, .flags = TCP_CONG_NEEDS_ECN, .name = "bpf_dctcp_nouse", }; SEC(".struct_ops") struct tcp_congestion_ops dctcp = { - .init = (void *)dctcp_init, - .in_ack_event = (void *)dctcp_update_alpha, - .cwnd_event = (void *)dctcp_cwnd_event, - .ssthresh = (void *)dctcp_ssthresh, - .cong_avoid = (void *)dctcp_cong_avoid, - .undo_cwnd = (void *)dctcp_cwnd_undo, - .set_state = (void *)dctcp_state, + .init = (void *)bpf_dctcp_init, + .in_ack_event = (void *)bpf_dctcp_update_alpha, + .cwnd_event = (void *)bpf_dctcp_cwnd_event, + .ssthresh = (void *)bpf_dctcp_ssthresh, + .cong_avoid = (void *)bpf_dctcp_cong_avoid, + .undo_cwnd = (void *)bpf_dctcp_cwnd_undo, + .set_state = (void *)bpf_dctcp_state, .flags = TCP_CONG_NEEDS_ECN, .name = "bpf_dctcp", }; diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c index c5969ca6f26b..564835ba7d51 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_array_map.c @@ -6,12 +6,6 @@ char _license[] SEC("license") = "GPL"; -struct key_t { - int a; - int b; - int c; -}; - struct { __uint(type, BPF_MAP_TYPE_ARRAY); __uint(max_entries, 3); diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c index 85fa710fad90..9f0e0705b2bf 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_percpu_array_map.c @@ -6,12 +6,6 @@ char _license[] SEC("license") = "GPL"; -struct key_t { - int a; - int b; - int c; -}; - struct { __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); __uint(max_entries, 3); diff --git a/tools/testing/selftests/bpf/progs/bpf_misc.h b/tools/testing/selftests/bpf/progs/bpf_misc.h index fb2f5513e29e..81097a3f15eb 100644 --- a/tools/testing/selftests/bpf/progs/bpf_misc.h +++ b/tools/testing/selftests/bpf/progs/bpf_misc.h @@ -7,9 +7,9 @@ * * The test_loader sequentially loads each program in a skeleton. * Programs could be loaded in privileged and unprivileged modes. - * - __success, __failure, __msg imply privileged mode; - * - __success_unpriv, __failure_unpriv, __msg_unpriv imply - * unprivileged mode. + * - __success, __failure, __msg, __regex imply privileged mode; + * - __success_unpriv, __failure_unpriv, __msg_unpriv, __regex_unpriv + * imply unprivileged mode. * If combination of privileged and unprivileged attributes is present * both modes are used. If none are present privileged mode is implied. * @@ -24,6 +24,9 @@ * Multiple __msg attributes could be specified. * __msg_unpriv Same as __msg but for unprivileged mode. * + * __regex Same as __msg, but using a regular expression. + * __regex_unpriv Same as __msg_unpriv but using a regular expression. + * * __success Expect program load success in privileged mode. * __success_unpriv Expect program load success in unprivileged mode. * @@ -59,10 +62,12 @@ * __auxiliary_unpriv Same, but load program in unprivileged mode. */ #define __msg(msg) __attribute__((btf_decl_tag("comment:test_expect_msg=" msg))) +#define __regex(regex) __attribute__((btf_decl_tag("comment:test_expect_regex=" regex))) #define __failure __attribute__((btf_decl_tag("comment:test_expect_failure"))) #define __success __attribute__((btf_decl_tag("comment:test_expect_success"))) #define __description(desc) __attribute__((btf_decl_tag("comment:test_description=" desc))) #define __msg_unpriv(msg) __attribute__((btf_decl_tag("comment:test_expect_msg_unpriv=" msg))) +#define __regex_unpriv(regex) __attribute__((btf_decl_tag("comment:test_expect_regex_unpriv=" regex))) #define __failure_unpriv __attribute__((btf_decl_tag("comment:test_expect_failure_unpriv"))) #define __success_unpriv __attribute__((btf_decl_tag("comment:test_expect_success_unpriv"))) #define __log_level(lvl) __attribute__((btf_decl_tag("comment:test_log_level="#lvl))) @@ -135,4 +140,8 @@ /* make it look to compiler like value is read and written */ #define __sink(expr) asm volatile("" : "+g"(expr)) +#ifndef ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) +#endif + #endif diff --git a/tools/testing/selftests/bpf/progs/cpumask_success.c b/tools/testing/selftests/bpf/progs/cpumask_success.c index 7a1e64c6c065..fd8106831c32 100644 --- a/tools/testing/selftests/bpf/progs/cpumask_success.c +++ b/tools/testing/selftests/bpf/progs/cpumask_success.c @@ -12,6 +12,31 @@ char _license[] SEC("license") = "GPL"; int pid, nr_cpus; +struct kptr_nested { + struct bpf_cpumask __kptr * mask; +}; + +struct kptr_nested_pair { + struct bpf_cpumask __kptr * mask_1; + struct bpf_cpumask __kptr * mask_2; +}; + +struct kptr_nested_mid { + int dummy; + struct kptr_nested m; +}; + +struct kptr_nested_deep { + struct kptr_nested_mid ptrs[2]; + struct kptr_nested_pair ptr_pairs[3]; +}; + +private(MASK) static struct bpf_cpumask __kptr * global_mask_array[2]; +private(MASK) static struct bpf_cpumask __kptr * global_mask_array_l2[2][1]; +private(MASK) static struct bpf_cpumask __kptr * global_mask_array_one[1]; +private(MASK) static struct kptr_nested global_mask_nested[2]; +private(MASK_DEEP) static struct kptr_nested_deep global_mask_nested_deep; + static bool is_test_task(void) { int cur_pid = bpf_get_current_pid_tgid() >> 32; @@ -460,6 +485,152 @@ int BPF_PROG(test_global_mask_rcu, struct task_struct *task, u64 clone_flags) return 0; } +SEC("tp_btf/task_newtask") +int BPF_PROG(test_global_mask_array_one_rcu, struct task_struct *task, u64 clone_flags) +{ + struct bpf_cpumask *local, *prev; + + if (!is_test_task()) + return 0; + + /* Kptr arrays with one element are special cased, being treated + * just like a single pointer. + */ + + local = create_cpumask(); + if (!local) + return 0; + + prev = bpf_kptr_xchg(&global_mask_array_one[0], local); + if (prev) { + bpf_cpumask_release(prev); + err = 3; + return 0; + } + + bpf_rcu_read_lock(); + local = global_mask_array_one[0]; + if (!local) { + err = 4; + bpf_rcu_read_unlock(); + return 0; + } + + bpf_rcu_read_unlock(); + + return 0; +} + +static int _global_mask_array_rcu(struct bpf_cpumask **mask0, + struct bpf_cpumask **mask1) +{ + struct bpf_cpumask *local; + + if (!is_test_task()) + return 0; + + /* Check if two kptrs in the array work and independently */ + + local = create_cpumask(); + if (!local) + return 0; + + bpf_rcu_read_lock(); + + local = bpf_kptr_xchg(mask0, local); + if (local) { + err = 1; + goto err_exit; + } + + /* [, NULL] */ + if (!*mask0 || *mask1) { + err = 2; + goto err_exit; + } + + local = create_cpumask(); + if (!local) { + err = 9; + goto err_exit; + } + + local = bpf_kptr_xchg(mask1, local); + if (local) { + err = 10; + goto err_exit; + } + + /* [, ] */ + if (!*mask0 || !*mask1 || *mask0 == *mask1) { + err = 11; + goto err_exit; + } + +err_exit: + if (local) + bpf_cpumask_release(local); + bpf_rcu_read_unlock(); + return 0; +} + +SEC("tp_btf/task_newtask") +int BPF_PROG(test_global_mask_array_rcu, struct task_struct *task, u64 clone_flags) +{ + return _global_mask_array_rcu(&global_mask_array[0], &global_mask_array[1]); +} + +SEC("tp_btf/task_newtask") +int BPF_PROG(test_global_mask_array_l2_rcu, struct task_struct *task, u64 clone_flags) +{ + return _global_mask_array_rcu(&global_mask_array_l2[0][0], &global_mask_array_l2[1][0]); +} + +SEC("tp_btf/task_newtask") +int BPF_PROG(test_global_mask_nested_rcu, struct task_struct *task, u64 clone_flags) +{ + return _global_mask_array_rcu(&global_mask_nested[0].mask, &global_mask_nested[1].mask); +} + +/* Ensure that the field->offset has been correctly advanced from one + * nested struct or array sub-tree to another. In the case of + * kptr_nested_deep, it comprises two sub-trees: ktpr_1 and kptr_2. By + * calling bpf_kptr_xchg() on every single kptr in both nested sub-trees, + * the verifier should reject the program if the field->offset of any kptr + * is incorrect. + * + * For instance, if we have 10 kptrs in a nested struct and a program that + * accesses each kptr individually with bpf_kptr_xchg(), the compiler + * should emit instructions to access 10 different offsets if it works + * correctly. If the field->offset values of any pair of them are + * incorrectly the same, the number of unique offsets in btf_record for + * this nested struct should be less than 10. The verifier should fail to + * discover some of the offsets emitted by the compiler. + * + * Even if the field->offset values of kptrs are not duplicated, the + * verifier should fail to find a btf_field for the instruction accessing a + * kptr if the corresponding field->offset is pointing to a random + * incorrect offset. + */ +SEC("tp_btf/task_newtask") +int BPF_PROG(test_global_mask_nested_deep_rcu, struct task_struct *task, u64 clone_flags) +{ + int r, i; + + r = _global_mask_array_rcu(&global_mask_nested_deep.ptrs[0].m.mask, + &global_mask_nested_deep.ptrs[1].m.mask); + if (r) + return r; + + for (i = 0; i < 3; i++) { + r = _global_mask_array_rcu(&global_mask_nested_deep.ptr_pairs[i].mask_1, + &global_mask_nested_deep.ptr_pairs[i].mask_2); + if (r) + return r; + } + return 0; +} + SEC("tp_btf/task_newtask") int BPF_PROG(test_cpumask_weight, struct task_struct *task, u64 clone_flags) { diff --git a/tools/testing/selftests/bpf/progs/crypto_bench.c b/tools/testing/selftests/bpf/progs/crypto_bench.c index e61fe0882293..4ac956b26240 100644 --- a/tools/testing/selftests/bpf/progs/crypto_bench.c +++ b/tools/testing/selftests/bpf/progs/crypto_bench.c @@ -57,7 +57,7 @@ int crypto_encrypt(struct __sk_buff *skb) { struct __crypto_ctx_value *v; struct bpf_crypto_ctx *ctx; - struct bpf_dynptr psrc, pdst, iv; + struct bpf_dynptr psrc, pdst; v = crypto_ctx_value_lookup(); if (!v) { @@ -73,9 +73,8 @@ int crypto_encrypt(struct __sk_buff *skb) bpf_dynptr_from_skb(skb, 0, &psrc); bpf_dynptr_from_mem(dst, len, 0, &pdst); - bpf_dynptr_from_mem(dst, 0, 0, &iv); - status = bpf_crypto_encrypt(ctx, &psrc, &pdst, &iv); + status = bpf_crypto_encrypt(ctx, &psrc, &pdst, NULL); __sync_add_and_fetch(&hits, 1); return 0; @@ -84,7 +83,7 @@ int crypto_encrypt(struct __sk_buff *skb) SEC("tc") int crypto_decrypt(struct __sk_buff *skb) { - struct bpf_dynptr psrc, pdst, iv; + struct bpf_dynptr psrc, pdst; struct __crypto_ctx_value *v; struct bpf_crypto_ctx *ctx; @@ -98,9 +97,8 @@ int crypto_decrypt(struct __sk_buff *skb) bpf_dynptr_from_skb(skb, 0, &psrc); bpf_dynptr_from_mem(dst, len, 0, &pdst); - bpf_dynptr_from_mem(dst, 0, 0, &iv); - status = bpf_crypto_decrypt(ctx, &psrc, &pdst, &iv); + status = bpf_crypto_decrypt(ctx, &psrc, &pdst, NULL); __sync_add_and_fetch(&hits, 1); return 0; diff --git a/tools/testing/selftests/bpf/progs/crypto_sanity.c b/tools/testing/selftests/bpf/progs/crypto_sanity.c index 1be0a3fa5efd..645be6cddf36 100644 --- a/tools/testing/selftests/bpf/progs/crypto_sanity.c +++ b/tools/testing/selftests/bpf/progs/crypto_sanity.c @@ -89,7 +89,7 @@ int decrypt_sanity(struct __sk_buff *skb) { struct __crypto_ctx_value *v; struct bpf_crypto_ctx *ctx; - struct bpf_dynptr psrc, pdst, iv; + struct bpf_dynptr psrc, pdst; int err; err = skb_dynptr_validate(skb, &psrc); @@ -114,12 +114,8 @@ int decrypt_sanity(struct __sk_buff *skb) * production code, a percpu map should be used to store the result. */ bpf_dynptr_from_mem(dst, sizeof(dst), 0, &pdst); - /* iv dynptr has to be initialized with 0 size, but proper memory region - * has to be provided anyway - */ - bpf_dynptr_from_mem(dst, 0, 0, &iv); - status = bpf_crypto_decrypt(ctx, &psrc, &pdst, &iv); + status = bpf_crypto_decrypt(ctx, &psrc, &pdst, NULL); return TC_ACT_SHOT; } @@ -129,7 +125,7 @@ int encrypt_sanity(struct __sk_buff *skb) { struct __crypto_ctx_value *v; struct bpf_crypto_ctx *ctx; - struct bpf_dynptr psrc, pdst, iv; + struct bpf_dynptr psrc, pdst; int err; status = 0; @@ -156,12 +152,8 @@ int encrypt_sanity(struct __sk_buff *skb) * production code, a percpu map should be used to store the result. */ bpf_dynptr_from_mem(dst, sizeof(dst), 0, &pdst); - /* iv dynptr has to be initialized with 0 size, but proper memory region - * has to be provided anyway - */ - bpf_dynptr_from_mem(dst, 0, 0, &iv); - status = bpf_crypto_encrypt(ctx, &psrc, &pdst, &iv); + status = bpf_crypto_encrypt(ctx, &psrc, &pdst, NULL); return TC_ACT_SHOT; } diff --git a/tools/testing/selftests/bpf/progs/dynptr_fail.c b/tools/testing/selftests/bpf/progs/dynptr_fail.c index 66a60bfb5867..e35bc1eac52a 100644 --- a/tools/testing/selftests/bpf/progs/dynptr_fail.c +++ b/tools/testing/selftests/bpf/progs/dynptr_fail.c @@ -964,7 +964,7 @@ int dynptr_invalidate_slice_reinit(void *ctx) * mem_or_null pointers. */ SEC("?raw_tp") -__failure __msg("R1 type=scalar expected=percpu_ptr_") +__failure __regex("R[0-9]+ type=scalar expected=percpu_ptr_") int dynptr_invalidate_slice_or_null(void *ctx) { struct bpf_dynptr ptr; @@ -982,7 +982,7 @@ int dynptr_invalidate_slice_or_null(void *ctx) /* Destruction of dynptr should also any slices obtained from it */ SEC("?raw_tp") -__failure __msg("R7 invalid mem access 'scalar'") +__failure __regex("R[0-9]+ invalid mem access 'scalar'") int dynptr_invalidate_slice_failure(void *ctx) { struct bpf_dynptr ptr1; @@ -1069,7 +1069,7 @@ int dynptr_read_into_slot(void *ctx) /* bpf_dynptr_slice()s are read-only and cannot be written to */ SEC("?tc") -__failure __msg("R0 cannot write into rdonly_mem") +__failure __regex("R[0-9]+ cannot write into rdonly_mem") int skb_invalid_slice_write(struct __sk_buff *skb) { struct bpf_dynptr ptr; @@ -1686,3 +1686,27 @@ int test_dynptr_skb_small_buff(struct __sk_buff *skb) return !!data; } + +__noinline long global_call_bpf_dynptr(const struct bpf_dynptr *dynptr) +{ + long ret = 0; + /* Avoid leaving this global function empty to avoid having the compiler + * optimize away the call to this global function. + */ + __sink(ret); + return ret; +} + +SEC("?raw_tp") +__failure __msg("arg#1 expected pointer to stack or const struct bpf_dynptr") +int test_dynptr_reg_type(void *ctx) +{ + struct task_struct *current = NULL; + /* R1 should be holding a PTR_TO_BTF_ID, so this shouldn't be a + * reg->type that can be passed to a function accepting a + * ARG_PTR_TO_DYNPTR | MEM_RDONLY. process_dynptr_func() should catch + * this. + */ + global_call_bpf_dynptr((const struct bpf_dynptr *)current); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/get_func_ip_test.c b/tools/testing/selftests/bpf/progs/get_func_ip_test.c index 8956eb78a226..2011cacdeb18 100644 --- a/tools/testing/selftests/bpf/progs/get_func_ip_test.c +++ b/tools/testing/selftests/bpf/progs/get_func_ip_test.c @@ -5,13 +5,12 @@ char _license[] SEC("license") = "GPL"; -extern const void bpf_fentry_test1 __ksym; +extern int bpf_fentry_test1(int a) __ksym; +extern int bpf_modify_return_test(int a, int *b) __ksym; + extern const void bpf_fentry_test2 __ksym; extern const void bpf_fentry_test3 __ksym; extern const void bpf_fentry_test4 __ksym; -extern const void bpf_modify_return_test __ksym; -extern const void bpf_fentry_test6 __ksym; -extern const void bpf_fentry_test7 __ksym; extern bool CONFIG_X86_KERNEL_IBT __kconfig __weak; diff --git a/tools/testing/selftests/bpf/progs/ip_check_defrag.c b/tools/testing/selftests/bpf/progs/ip_check_defrag.c index 1c2b6c1616b0..645b2c9f7867 100644 --- a/tools/testing/selftests/bpf/progs/ip_check_defrag.c +++ b/tools/testing/selftests/bpf/progs/ip_check_defrag.c @@ -12,7 +12,7 @@ #define IP_OFFSET 0x1FFF #define NEXTHDR_FRAGMENT 44 -extern int bpf_dynptr_from_skb(struct sk_buff *skb, __u64 flags, +extern int bpf_dynptr_from_skb(struct __sk_buff *skb, __u64 flags, struct bpf_dynptr *ptr__uninit) __ksym; extern void *bpf_dynptr_slice(const struct bpf_dynptr *ptr, uint32_t offset, void *buffer, uint32_t buffer__sz) __ksym; @@ -42,7 +42,7 @@ static bool is_frag_v6(struct ipv6hdr *ip6h) return ip6h->nexthdr == NEXTHDR_FRAGMENT; } -static int handle_v4(struct sk_buff *skb) +static int handle_v4(struct __sk_buff *skb) { struct bpf_dynptr ptr; u8 iph_buf[20] = {}; @@ -64,7 +64,7 @@ static int handle_v4(struct sk_buff *skb) return NF_ACCEPT; } -static int handle_v6(struct sk_buff *skb) +static int handle_v6(struct __sk_buff *skb) { struct bpf_dynptr ptr; struct ipv6hdr *ip6h; @@ -89,9 +89,9 @@ static int handle_v6(struct sk_buff *skb) SEC("netfilter") int defrag(struct bpf_nf_ctx *ctx) { - struct sk_buff *skb = ctx->skb; + struct __sk_buff *skb = (struct __sk_buff *)ctx->skb; - switch (bpf_ntohs(skb->protocol)) { + switch (bpf_ntohs(ctx->skb->protocol)) { case ETH_P_IP: return handle_v4(skb); case ETH_P_IPV6: diff --git a/tools/testing/selftests/bpf/progs/iters.c b/tools/testing/selftests/bpf/progs/iters.c index fe65e0952a1e..16bdc3e25591 100644 --- a/tools/testing/selftests/bpf/progs/iters.c +++ b/tools/testing/selftests/bpf/progs/iters.c @@ -7,8 +7,6 @@ #include "bpf_misc.h" #include "bpf_compiler.h" -#define ARRAY_SIZE(x) (int)(sizeof(x) / sizeof((x)[0])) - static volatile int zero = 0; int my_pid; diff --git a/tools/testing/selftests/bpf/progs/kfunc_call_test.c b/tools/testing/selftests/bpf/progs/kfunc_call_test.c index cf68d1e48a0f..f502f755f567 100644 --- a/tools/testing/selftests/bpf/progs/kfunc_call_test.c +++ b/tools/testing/selftests/bpf/progs/kfunc_call_test.c @@ -177,4 +177,41 @@ int kfunc_call_test_static_unused_arg(struct __sk_buff *skb) return actual != expected ? -1 : 0; } +struct ctx_val { + struct bpf_testmod_ctx __kptr *ctx; +}; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __uint(max_entries, 1); + __type(key, int); + __type(value, struct ctx_val); +} ctx_map SEC(".maps"); + +SEC("tc") +int kfunc_call_ctx(struct __sk_buff *skb) +{ + struct bpf_testmod_ctx *ctx; + int err = 0; + + ctx = bpf_testmod_ctx_create(&err); + if (!ctx && !err) + err = -1; + if (ctx) { + int key = 0; + struct ctx_val *ctx_val = bpf_map_lookup_elem(&ctx_map, &key); + + /* Transfer ctx to map to be freed via implicit dtor call + * on cleanup. + */ + if (ctx_val) + ctx = bpf_kptr_xchg(&ctx_val->ctx, ctx); + if (ctx) { + bpf_testmod_ctx_release(ctx); + err = -1; + } + } + return err; +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/kprobe_multi_session.c b/tools/testing/selftests/bpf/progs/kprobe_multi_session.c index bbba9eb46551..bd8b7fb7061e 100644 --- a/tools/testing/selftests/bpf/progs/kprobe_multi_session.c +++ b/tools/testing/selftests/bpf/progs/kprobe_multi_session.c @@ -4,8 +4,7 @@ #include #include #include "bpf_kfuncs.h" - -#define ARRAY_SIZE(x) (int)(sizeof(x) / sizeof((x)[0])) +#include "bpf_misc.h" char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/kprobe_multi_session_cookie.c b/tools/testing/selftests/bpf/progs/kprobe_multi_session_cookie.c index d49070803e22..0835b5edf685 100644 --- a/tools/testing/selftests/bpf/progs/kprobe_multi_session_cookie.c +++ b/tools/testing/selftests/bpf/progs/kprobe_multi_session_cookie.c @@ -25,7 +25,7 @@ int BPF_PROG(trigger) static int check_cookie(__u64 val, __u64 *result) { - long *cookie; + __u64 *cookie; if (bpf_get_current_pid_tgid() >> 32 != pid) return 1; diff --git a/tools/testing/selftests/bpf/progs/linked_list.c b/tools/testing/selftests/bpf/progs/linked_list.c index 26205ca80679..421f40835acd 100644 --- a/tools/testing/selftests/bpf/progs/linked_list.c +++ b/tools/testing/selftests/bpf/progs/linked_list.c @@ -4,13 +4,26 @@ #include #include #include "bpf_experimental.h" - -#ifndef ARRAY_SIZE -#define ARRAY_SIZE(x) (int)(sizeof(x) / sizeof((x)[0])) -#endif +#include "bpf_misc.h" #include "linked_list.h" +struct head_nested_inner { + struct bpf_spin_lock lock; + struct bpf_list_head head __contains(foo, node2); +}; + +struct head_nested { + int dummy; + struct head_nested_inner inner; +}; + +private(C) struct bpf_spin_lock glock_c; +private(C) struct bpf_list_head ghead_array[2] __contains(foo, node2); +private(C) struct bpf_list_head ghead_array_one[1] __contains(foo, node2); + +private(D) struct head_nested ghead_nested; + static __always_inline int list_push_pop(struct bpf_spin_lock *lock, struct bpf_list_head *head, bool leave_in_map) { @@ -309,6 +322,32 @@ int global_list_push_pop(void *ctx) return test_list_push_pop(&glock, &ghead); } +SEC("tc") +int global_list_push_pop_nested(void *ctx) +{ + return test_list_push_pop(&ghead_nested.inner.lock, &ghead_nested.inner.head); +} + +SEC("tc") +int global_list_array_push_pop(void *ctx) +{ + int r; + + r = test_list_push_pop(&glock_c, &ghead_array[0]); + if (r) + return r; + + r = test_list_push_pop(&glock_c, &ghead_array[1]); + if (r) + return r; + + /* Arrays with only one element is a special case, being treated + * just like a bpf_list_head variable by the verifier, not an + * array. + */ + return test_list_push_pop(&glock_c, &ghead_array_one[0]); +} + SEC("tc") int map_list_push_pop_multiple(void *ctx) { diff --git a/tools/testing/selftests/bpf/progs/map_percpu_stats.c b/tools/testing/selftests/bpf/progs/map_percpu_stats.c index 10b2325c1720..63245785eb69 100644 --- a/tools/testing/selftests/bpf/progs/map_percpu_stats.c +++ b/tools/testing/selftests/bpf/progs/map_percpu_stats.c @@ -7,7 +7,7 @@ __u32 target_id; -__s64 bpf_map_sum_elem_count(struct bpf_map *map) __ksym; +__s64 bpf_map_sum_elem_count(const struct bpf_map *map) __ksym; SEC("iter/bpf_map") int dump_bpf_map(struct bpf_iter__bpf_map *ctx) diff --git a/tools/testing/selftests/bpf/progs/nested_trust_common.h b/tools/testing/selftests/bpf/progs/nested_trust_common.h index 83d33931136e..1784b496be2e 100644 --- a/tools/testing/selftests/bpf/progs/nested_trust_common.h +++ b/tools/testing/selftests/bpf/progs/nested_trust_common.h @@ -7,6 +7,6 @@ #include bool bpf_cpumask_test_cpu(unsigned int cpu, const struct cpumask *cpumask) __ksym; -bool bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym; +__u32 bpf_cpumask_first_zero(const struct cpumask *cpumask) __ksym; #endif /* _NESTED_TRUST_COMMON_H */ diff --git a/tools/testing/selftests/bpf/progs/nested_trust_failure.c b/tools/testing/selftests/bpf/progs/nested_trust_failure.c index ea39497f11ed..3568ec450100 100644 --- a/tools/testing/selftests/bpf/progs/nested_trust_failure.c +++ b/tools/testing/selftests/bpf/progs/nested_trust_failure.c @@ -31,14 +31,6 @@ int BPF_PROG(test_invalid_nested_user_cpus, struct task_struct *task, u64 clone_ return 0; } -SEC("tp_btf/task_newtask") -__failure __msg("R1 must have zero offset when passed to release func or trusted arg to kfunc") -int BPF_PROG(test_invalid_nested_offset, struct task_struct *task, u64 clone_flags) -{ - bpf_cpumask_first_zero(&task->cpus_mask); - return 0; -} - /* Although R2 is of type sk_buff but sock_common is expected, we will hit untrusted ptr first. */ SEC("tp_btf/tcp_probe") __failure __msg("R2 type=untrusted_ptr_ expected=ptr_, trusted_ptr_, rcu_ptr_") diff --git a/tools/testing/selftests/bpf/progs/nested_trust_success.c b/tools/testing/selftests/bpf/progs/nested_trust_success.c index 833840bffd3b..2b66953ca82e 100644 --- a/tools/testing/selftests/bpf/progs/nested_trust_success.c +++ b/tools/testing/selftests/bpf/progs/nested_trust_success.c @@ -32,3 +32,11 @@ int BPF_PROG(test_skb_field, struct sock *sk, struct sk_buff *skb) bpf_sk_storage_get(&sk_storage_map, skb->sk, 0, 0); return 0; } + +SEC("tp_btf/task_newtask") +__success +int BPF_PROG(test_nested_offset, struct task_struct *task, u64 clone_flags) +{ + bpf_cpumask_first_zero(&task->cpus_mask); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/netif_receive_skb.c b/tools/testing/selftests/bpf/progs/netif_receive_skb.c index c0062645fc68..9e067dcbf607 100644 --- a/tools/testing/selftests/bpf/progs/netif_receive_skb.c +++ b/tools/testing/selftests/bpf/progs/netif_receive_skb.c @@ -5,6 +5,7 @@ #include #include #include +#include "bpf_misc.h" #include @@ -23,10 +24,6 @@ bool skip = false; #define BADPTR 0 #endif -#ifndef ARRAY_SIZE -#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) -#endif - struct { __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY); __uint(max_entries, 1); diff --git a/tools/testing/selftests/bpf/progs/profiler.inc.h b/tools/testing/selftests/bpf/progs/profiler.inc.h index 6957d9f2805e..8bd1ebd7d6af 100644 --- a/tools/testing/selftests/bpf/progs/profiler.inc.h +++ b/tools/testing/selftests/bpf/progs/profiler.inc.h @@ -9,6 +9,7 @@ #include "err.h" #include "bpf_experimental.h" #include "bpf_compiler.h" +#include "bpf_misc.h" #ifndef NULL #define NULL 0 @@ -133,10 +134,6 @@ struct { __uint(max_entries, 16); } disallowed_exec_inodes SEC(".maps"); -#ifndef ARRAY_SIZE -#define ARRAY_SIZE(arr) (int)(sizeof(arr) / sizeof(arr[0])) -#endif - static INLINE bool IS_ERR(const void* ptr) { return IS_ERR_VALUE((unsigned long)ptr); diff --git a/tools/testing/selftests/bpf/progs/rbtree.c b/tools/testing/selftests/bpf/progs/rbtree.c index b09f4fffe57c..a3620c15c136 100644 --- a/tools/testing/selftests/bpf/progs/rbtree.c +++ b/tools/testing/selftests/bpf/progs/rbtree.c @@ -13,6 +13,15 @@ struct node_data { struct bpf_rb_node node; }; +struct root_nested_inner { + struct bpf_spin_lock glock; + struct bpf_rb_root root __contains(node_data, node); +}; + +struct root_nested { + struct root_nested_inner inner; +}; + long less_callback_ran = -1; long removed_key = -1; long first_data[2] = {-1, -1}; @@ -20,6 +29,9 @@ long first_data[2] = {-1, -1}; #define private(name) SEC(".data." #name) __hidden __attribute__((aligned(8))) private(A) struct bpf_spin_lock glock; private(A) struct bpf_rb_root groot __contains(node_data, node); +private(A) struct bpf_rb_root groot_array[2] __contains(node_data, node); +private(A) struct bpf_rb_root groot_array_one[1] __contains(node_data, node); +private(B) struct root_nested groot_nested; static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b) { @@ -71,6 +83,12 @@ long rbtree_add_nodes(void *ctx) return __add_three(&groot, &glock); } +SEC("tc") +long rbtree_add_nodes_nested(void *ctx) +{ + return __add_three(&groot_nested.inner.root, &groot_nested.inner.glock); +} + SEC("tc") long rbtree_add_and_remove(void *ctx) { @@ -109,6 +127,65 @@ err_out: return 1; } +SEC("tc") +long rbtree_add_and_remove_array(void *ctx) +{ + struct bpf_rb_node *res1 = NULL, *res2 = NULL, *res3 = NULL; + struct node_data *nodes[3][2] = {{NULL, NULL}, {NULL, NULL}, {NULL, NULL}}; + struct node_data *n; + long k1 = -1, k2 = -1, k3 = -1; + int i, j; + + for (i = 0; i < 3; i++) { + for (j = 0; j < 2; j++) { + nodes[i][j] = bpf_obj_new(typeof(*nodes[i][j])); + if (!nodes[i][j]) + goto err_out; + nodes[i][j]->key = i * 2 + j; + } + } + + bpf_spin_lock(&glock); + for (i = 0; i < 2; i++) + for (j = 0; j < 2; j++) + bpf_rbtree_add(&groot_array[i], &nodes[i][j]->node, less); + for (j = 0; j < 2; j++) + bpf_rbtree_add(&groot_array_one[0], &nodes[2][j]->node, less); + res1 = bpf_rbtree_remove(&groot_array[0], &nodes[0][0]->node); + res2 = bpf_rbtree_remove(&groot_array[1], &nodes[1][0]->node); + res3 = bpf_rbtree_remove(&groot_array_one[0], &nodes[2][0]->node); + bpf_spin_unlock(&glock); + + if (res1) { + n = container_of(res1, struct node_data, node); + k1 = n->key; + bpf_obj_drop(n); + } + if (res2) { + n = container_of(res2, struct node_data, node); + k2 = n->key; + bpf_obj_drop(n); + } + if (res3) { + n = container_of(res3, struct node_data, node); + k3 = n->key; + bpf_obj_drop(n); + } + if (k1 != 0 || k2 != 2 || k3 != 4) + return 2; + + return 0; + +err_out: + for (i = 0; i < 3; i++) { + for (j = 0; j < 2; j++) { + if (nodes[i][j]) + bpf_obj_drop(nodes[i][j]); + } + } + return 1; +} + SEC("tc") long rbtree_first_and_remove(void *ctx) { diff --git a/tools/testing/selftests/bpf/progs/rbtree_fail.c b/tools/testing/selftests/bpf/progs/rbtree_fail.c index 3fecf1c6dfe5..b722a1e1ddef 100644 --- a/tools/testing/selftests/bpf/progs/rbtree_fail.c +++ b/tools/testing/selftests/bpf/progs/rbtree_fail.c @@ -105,7 +105,7 @@ long rbtree_api_remove_unadded_node(void *ctx) } SEC("?tc") -__failure __msg("Unreleased reference id=3 alloc_insn=10") +__failure __regex("Unreleased reference id=3 alloc_insn=[0-9]+") long rbtree_api_remove_no_drop(void *ctx) { struct bpf_rb_node *res; diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c index 1553b9c16aa7..f8d4b7cfcd68 100644 --- a/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c +++ b/tools/testing/selftests/bpf/progs/refcounted_kptr_fail.c @@ -32,7 +32,7 @@ static bool less(struct bpf_rb_node *a, const struct bpf_rb_node *b) } SEC("?tc") -__failure __msg("Unreleased reference id=4 alloc_insn=21") +__failure __regex("Unreleased reference id=4 alloc_insn=[0-9]+") long rbtree_refcounted_node_ref_escapes(void *ctx) { struct node_acquire *n, *m; @@ -73,7 +73,7 @@ long refcount_acquire_maybe_null(void *ctx) } SEC("?tc") -__failure __msg("Unreleased reference id=3 alloc_insn=9") +__failure __regex("Unreleased reference id=3 alloc_insn=[0-9]+") long rbtree_refcounted_node_ref_escapes_owning_input(void *ctx) { struct node_acquire *n, *m; diff --git a/tools/testing/selftests/bpf/progs/setget_sockopt.c b/tools/testing/selftests/bpf/progs/setget_sockopt.c index 7a438600ae98..60518aed1ffc 100644 --- a/tools/testing/selftests/bpf/progs/setget_sockopt.c +++ b/tools/testing/selftests/bpf/progs/setget_sockopt.c @@ -6,10 +6,7 @@ #include #include #include - -#ifndef ARRAY_SIZE -#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) -#endif +#include "bpf_misc.h" extern unsigned long CONFIG_HZ __kconfig; diff --git a/tools/testing/selftests/bpf/progs/skb_pkt_end.c b/tools/testing/selftests/bpf/progs/skb_pkt_end.c index db4abd2682fc..3bb4451524a1 100644 --- a/tools/testing/selftests/bpf/progs/skb_pkt_end.c +++ b/tools/testing/selftests/bpf/progs/skb_pkt_end.c @@ -33,6 +33,8 @@ int main_prog(struct __sk_buff *skb) struct iphdr *ip = NULL; struct tcphdr *tcp; __u8 proto = 0; + int urg_ptr; + u32 offset; if (!(ip = get_iphdr(skb))) goto out; @@ -48,7 +50,14 @@ int main_prog(struct __sk_buff *skb) if (!tcp) goto out; - return tcp->urg_ptr; + urg_ptr = tcp->urg_ptr; + + /* Checksum validation part */ + proto++; + offset = sizeof(struct ethhdr) + offsetof(struct iphdr, protocol); + bpf_skb_store_bytes(skb, offset, &proto, sizeof(proto), BPF_F_RECOMPUTE_CSUM); + + return urg_ptr; out: return -1; } diff --git a/tools/testing/selftests/bpf/progs/struct_ops_detach.c b/tools/testing/selftests/bpf/progs/struct_ops_detach.c new file mode 100644 index 000000000000..56b787a89876 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/struct_ops_detach.c @@ -0,0 +1,10 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#include +#include +#include "../bpf_testmod/bpf_testmod.h" + +char _license[] SEC("license") = "GPL"; + +SEC(".struct_ops.link") +struct bpf_testmod_ops testmod_do_detach; diff --git a/tools/testing/selftests/bpf/progs/test_bpf_ma.c b/tools/testing/selftests/bpf/progs/test_bpf_ma.c index 3494ca30fa7f..4a4e0b8d9b72 100644 --- a/tools/testing/selftests/bpf/progs/test_bpf_ma.c +++ b/tools/testing/selftests/bpf/progs/test_bpf_ma.c @@ -7,10 +7,6 @@ #include "bpf_experimental.h" #include "bpf_misc.h" -#ifndef ARRAY_SIZE -#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) -#endif - struct generic_map_value { void *data; }; diff --git a/tools/testing/selftests/bpf/progs/test_bpf_nf.c b/tools/testing/selftests/bpf/progs/test_bpf_nf.c index 77ad8adf68da..f7b330ddd007 100644 --- a/tools/testing/selftests/bpf/progs/test_bpf_nf.c +++ b/tools/testing/selftests/bpf/progs/test_bpf_nf.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 +#define BPF_NO_KFUNC_PROTOTYPES #include #include #include @@ -9,10 +10,14 @@ #define EINVAL 22 #define ENOENT 2 +#define NF_CT_ZONE_DIR_ORIG (1 << IP_CT_DIR_ORIGINAL) +#define NF_CT_ZONE_DIR_REPL (1 << IP_CT_DIR_REPLY) + extern unsigned long CONFIG_HZ __kconfig; int test_einval_bpf_tuple = 0; int test_einval_reserved = 0; +int test_einval_reserved_new = 0; int test_einval_netns_id = 0; int test_einval_len_opts = 0; int test_eproto_l4proto = 0; @@ -22,6 +27,11 @@ int test_eafnosupport = 0; int test_alloc_entry = -EINVAL; int test_insert_entry = -EAFNOSUPPORT; int test_succ_lookup = -ENOENT; +int test_ct_zone_id_alloc_entry = -EINVAL; +int test_ct_zone_id_insert_entry = -EAFNOSUPPORT; +int test_ct_zone_id_succ_lookup = -ENOENT; +int test_ct_zone_dir_enoent_lookup = 0; +int test_ct_zone_id_enoent_lookup = 0; u32 test_delta_timeout = 0; u32 test_status = 0; u32 test_insert_lookup_mark = 0; @@ -45,6 +55,17 @@ struct bpf_ct_opts___local { s32 netns_id; s32 error; u8 l4proto; + u8 dir; + u8 reserved[2]; +}; + +struct bpf_ct_opts___new { + s32 netns_id; + s32 error; + u8 l4proto; + u8 dir; + u16 ct_zone_id; + u8 ct_zone_dir; u8 reserved[3]; } __attribute__((preserve_access_index)); @@ -220,10 +241,97 @@ nf_ct_test(struct nf_conn *(*lookup_fn)(void *, struct bpf_sock_tuple *, u32, } } +static __always_inline void +nf_ct_opts_new_test(struct nf_conn *(*lookup_fn)(void *, struct bpf_sock_tuple *, u32, + struct bpf_ct_opts___new *, u32), + struct nf_conn *(*alloc_fn)(void *, struct bpf_sock_tuple *, u32, + struct bpf_ct_opts___new *, u32), + void *ctx) +{ + struct bpf_ct_opts___new opts_def = { .l4proto = IPPROTO_TCP, .netns_id = -1 }; + struct bpf_sock_tuple bpf_tuple; + struct nf_conn *ct; + + __builtin_memset(&bpf_tuple, 0, sizeof(bpf_tuple.ipv4)); + + opts_def.reserved[0] = 1; + ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def, + sizeof(opts_def)); + opts_def.reserved[0] = 0; + if (ct) + bpf_ct_release(ct); + else + test_einval_reserved_new = opts_def.error; + + bpf_tuple.ipv4.saddr = bpf_get_prandom_u32(); /* src IP */ + bpf_tuple.ipv4.daddr = bpf_get_prandom_u32(); /* dst IP */ + bpf_tuple.ipv4.sport = bpf_get_prandom_u32(); /* src port */ + bpf_tuple.ipv4.dport = bpf_get_prandom_u32(); /* dst port */ + + /* use non-default ct zone */ + opts_def.ct_zone_id = 10; + opts_def.ct_zone_dir = NF_CT_ZONE_DIR_ORIG; + ct = alloc_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def, + sizeof(opts_def)); + if (ct) { + __u16 sport = bpf_get_prandom_u32(); + __u16 dport = bpf_get_prandom_u32(); + union nf_inet_addr saddr = {}; + union nf_inet_addr daddr = {}; + struct nf_conn *ct_ins; + + bpf_ct_set_timeout(ct, 10000); + + /* snat */ + saddr.ip = bpf_get_prandom_u32(); + bpf_ct_set_nat_info(ct, &saddr, sport, NF_NAT_MANIP_SRC___local); + /* dnat */ + daddr.ip = bpf_get_prandom_u32(); + bpf_ct_set_nat_info(ct, &daddr, dport, NF_NAT_MANIP_DST___local); + + ct_ins = bpf_ct_insert_entry(ct); + if (ct_ins) { + struct nf_conn *ct_lk; + + /* entry should exist in same ct zone we inserted it */ + ct_lk = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), + &opts_def, sizeof(opts_def)); + if (ct_lk) { + bpf_ct_release(ct_lk); + test_ct_zone_id_succ_lookup = 0; + } + + /* entry should not exist with wrong direction */ + opts_def.ct_zone_dir = NF_CT_ZONE_DIR_REPL; + ct_lk = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), + &opts_def, sizeof(opts_def)); + opts_def.ct_zone_dir = NF_CT_ZONE_DIR_ORIG; + if (ct_lk) + bpf_ct_release(ct_lk); + else + test_ct_zone_dir_enoent_lookup = opts_def.error; + + /* entry should not exist in default ct zone */ + opts_def.ct_zone_id = 0; + ct_lk = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), + &opts_def, sizeof(opts_def)); + if (ct_lk) + bpf_ct_release(ct_lk); + else + test_ct_zone_id_enoent_lookup = opts_def.error; + + bpf_ct_release(ct_ins); + test_ct_zone_id_insert_entry = 0; + } + test_ct_zone_id_alloc_entry = 0; + } +} + SEC("xdp") int nf_xdp_ct_test(struct xdp_md *ctx) { nf_ct_test((void *)bpf_xdp_ct_lookup, (void *)bpf_xdp_ct_alloc, ctx); + nf_ct_opts_new_test((void *)bpf_xdp_ct_lookup, (void *)bpf_xdp_ct_alloc, ctx); return 0; } @@ -231,6 +339,7 @@ SEC("tc") int nf_skb_ct_test(struct __sk_buff *ctx) { nf_ct_test((void *)bpf_skb_ct_lookup, (void *)bpf_skb_ct_alloc, ctx); + nf_ct_opts_new_test((void *)bpf_skb_ct_lookup, (void *)bpf_skb_ct_alloc, ctx); return 0; } diff --git a/tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c b/tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c index 0e4759ab38ff..a586f087ffeb 100644 --- a/tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c +++ b/tools/testing/selftests/bpf/progs/test_bpf_nf_fail.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 +#define BPF_NO_KFUNC_PROTOTYPES #include #include #include diff --git a/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c b/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c index 2dde8e3fe4c9..e68667aec6a6 100644 --- a/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c +++ b/tools/testing/selftests/bpf/progs/test_kfunc_dynptr_param.c @@ -45,7 +45,7 @@ int BPF_PROG(not_valid_dynptr, int cmd, union bpf_attr *attr, unsigned int size) } SEC("?lsm.s/bpf") -__failure __msg("arg#0 expected pointer to stack or dynptr_ptr") +__failure __msg("arg#1 expected pointer to stack or const struct bpf_dynptr") int BPF_PROG(not_ptr_to_stack, int cmd, union bpf_attr *attr, unsigned int size) { unsigned long val = 0; diff --git a/tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c b/tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c new file mode 100644 index 000000000000..7ac7e1de34d8 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_kfunc_param_nullable.c @@ -0,0 +1,43 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2024 Meta Platforms, Inc */ +#include +#include +#include "bpf_misc.h" +#include "bpf_kfuncs.h" +#include "../bpf_testmod/bpf_testmod_kfunc.h" + +SEC("tc") +int kfunc_dynptr_nullable_test1(struct __sk_buff *skb) +{ + struct bpf_dynptr data; + + bpf_dynptr_from_skb(skb, 0, &data); + bpf_kfunc_dynptr_test(&data, NULL); + + return 0; +} + +SEC("tc") +int kfunc_dynptr_nullable_test2(struct __sk_buff *skb) +{ + struct bpf_dynptr data; + + bpf_dynptr_from_skb(skb, 0, &data); + bpf_kfunc_dynptr_test(&data, &data); + + return 0; +} + +SEC("tc") +__failure __msg("expected pointer to stack or const struct bpf_dynptr") +int kfunc_dynptr_nullable_test3(struct __sk_buff *skb) +{ + struct bpf_dynptr data; + + bpf_dynptr_from_skb(skb, 0, &data); + bpf_kfunc_dynptr_test(NULL, &data); + + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_sockmap_kern.h b/tools/testing/selftests/bpf/progs/test_sockmap_kern.h index 99d2ea9fb658..f48f85f1bd70 100644 --- a/tools/testing/selftests/bpf/progs/test_sockmap_kern.h +++ b/tools/testing/selftests/bpf/progs/test_sockmap_kern.h @@ -92,7 +92,7 @@ struct { __uint(value_size, sizeof(int)); } tls_sock_map SEC(".maps"); -SEC("sk_skb1") +SEC("sk_skb/stream_parser") int bpf_prog1(struct __sk_buff *skb) { int *f, two = 2; @@ -104,7 +104,7 @@ int bpf_prog1(struct __sk_buff *skb) return skb->len; } -SEC("sk_skb2") +SEC("sk_skb/stream_verdict") int bpf_prog2(struct __sk_buff *skb) { __u32 lport = skb->local_port; @@ -151,7 +151,7 @@ static inline void bpf_write_pass(struct __sk_buff *skb, int offset) memcpy(c + offset, "PASS", 4); } -SEC("sk_skb3") +SEC("sk_skb/stream_verdict") int bpf_prog3(struct __sk_buff *skb) { int err, *f, ret = SK_PASS; @@ -177,9 +177,6 @@ int bpf_prog3(struct __sk_buff *skb) return bpf_sk_redirect_hash(skb, &tls_sock_map, &ret, flags); #endif } - f = bpf_map_lookup_elem(&sock_skb_opts, &one); - if (f && *f) - ret = SK_DROP; err = bpf_skb_adjust_room(skb, 4, 0, 0); if (err) return SK_DROP; @@ -233,7 +230,7 @@ int bpf_sockmap(struct bpf_sock_ops *skops) return 0; } -SEC("sk_msg1") +SEC("sk_msg") int bpf_prog4(struct sk_msg_md *msg) { int *bytes, zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5; @@ -263,7 +260,7 @@ int bpf_prog4(struct sk_msg_md *msg) return SK_PASS; } -SEC("sk_msg2") +SEC("sk_msg") int bpf_prog6(struct sk_msg_md *msg) { int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5, key = 0; @@ -308,7 +305,7 @@ int bpf_prog6(struct sk_msg_md *msg) #endif } -SEC("sk_msg3") +SEC("sk_msg") int bpf_prog8(struct sk_msg_md *msg) { void *data_end = (void *)(long) msg->data_end; @@ -329,7 +326,8 @@ int bpf_prog8(struct sk_msg_md *msg) return SK_PASS; } -SEC("sk_msg4") + +SEC("sk_msg") int bpf_prog9(struct sk_msg_md *msg) { void *data_end = (void *)(long) msg->data_end; @@ -347,7 +345,7 @@ int bpf_prog9(struct sk_msg_md *msg) return SK_PASS; } -SEC("sk_msg5") +SEC("sk_msg") int bpf_prog10(struct sk_msg_md *msg) { int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop; diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c index 7f74077d6622..548660e299a5 100644 --- a/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c +++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop1.c @@ -10,10 +10,7 @@ #include #include "bpf_compiler.h" - -#ifndef ARRAY_SIZE -#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) -#endif +#include "bpf_misc.h" /* tcp_mem sysctl has only 3 ints, but this test is doing TCP_MEM_LOOPS */ #define TCP_MEM_LOOPS 28 /* because 30 doesn't fit into 512 bytes of stack */ diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c index 68a75436e8af..81249d119a8b 100644 --- a/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c +++ b/tools/testing/selftests/bpf/progs/test_sysctl_loop2.c @@ -10,10 +10,7 @@ #include #include "bpf_compiler.h" - -#ifndef ARRAY_SIZE -#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) -#endif +#include "bpf_misc.h" /* tcp_mem sysctl has only 3 ints, but this test is doing TCP_MEM_LOOPS */ #define TCP_MEM_LOOPS 20 /* because 30 doesn't fit into 512 bytes of stack */ diff --git a/tools/testing/selftests/bpf/progs/test_sysctl_prog.c b/tools/testing/selftests/bpf/progs/test_sysctl_prog.c index efc3c61f7852..bbdd08764789 100644 --- a/tools/testing/selftests/bpf/progs/test_sysctl_prog.c +++ b/tools/testing/selftests/bpf/progs/test_sysctl_prog.c @@ -10,6 +10,7 @@ #include #include "bpf_compiler.h" +#include "bpf_misc.h" /* Max supported length of a string with unsigned long in base 10 (pow2 - 1). */ #define MAX_ULONG_STR_LEN 0xF @@ -17,10 +18,6 @@ /* Max supported length of sysctl value string (pow2). */ #define MAX_VALUE_STR_LEN 0x40 -#ifndef ARRAY_SIZE -#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) -#endif - const char tcp_mem_name[] = "net/ipv4/tcp_mem"; static __always_inline int is_tcp_mem(struct bpf_sysctl *ctx) { diff --git a/tools/testing/selftests/bpf/progs/test_tc_dtime.c b/tools/testing/selftests/bpf/progs/test_tc_dtime.c index 74ec09f040b7..ca8e8734d901 100644 --- a/tools/testing/selftests/bpf/progs/test_tc_dtime.c +++ b/tools/testing/selftests/bpf/progs/test_tc_dtime.c @@ -222,16 +222,20 @@ int egress_host(struct __sk_buff *skb) return TC_ACT_OK; if (skb_proto(skb_type) == IPPROTO_TCP) { - if (skb->tstamp_type == BPF_SKB_TSTAMP_DELIVERY_MONO && + if (skb->tstamp_type == BPF_SKB_CLOCK_MONOTONIC && + skb->tstamp) + inc_dtimes(EGRESS_ENDHOST); + else + inc_errs(EGRESS_ENDHOST); + } else if (skb_proto(skb_type) == IPPROTO_UDP) { + if (skb->tstamp_type == BPF_SKB_CLOCK_TAI && skb->tstamp) inc_dtimes(EGRESS_ENDHOST); else inc_errs(EGRESS_ENDHOST); } else { - if (skb->tstamp_type == BPF_SKB_TSTAMP_UNSPEC && + if (skb->tstamp_type == BPF_SKB_CLOCK_REALTIME && skb->tstamp) - inc_dtimes(EGRESS_ENDHOST); - else inc_errs(EGRESS_ENDHOST); } @@ -252,7 +256,7 @@ int ingress_host(struct __sk_buff *skb) if (!skb_type) return TC_ACT_OK; - if (skb->tstamp_type == BPF_SKB_TSTAMP_DELIVERY_MONO && + if (skb->tstamp_type == BPF_SKB_CLOCK_MONOTONIC && skb->tstamp == EGRESS_FWDNS_MAGIC) inc_dtimes(INGRESS_ENDHOST); else @@ -315,7 +319,6 @@ int egress_fwdns_prio100(struct __sk_buff *skb) SEC("tc") int ingress_fwdns_prio101(struct __sk_buff *skb) { - __u64 expected_dtime = EGRESS_ENDHOST_MAGIC; int skb_type; skb_type = skb_get_type(skb); @@ -323,29 +326,24 @@ int ingress_fwdns_prio101(struct __sk_buff *skb) /* Should have handled in prio100 */ return TC_ACT_SHOT; - if (skb_proto(skb_type) == IPPROTO_UDP) - expected_dtime = 0; - if (skb->tstamp_type) { if (fwdns_clear_dtime() || - skb->tstamp_type != BPF_SKB_TSTAMP_DELIVERY_MONO || - skb->tstamp != expected_dtime) + (skb->tstamp_type != BPF_SKB_CLOCK_MONOTONIC && + skb->tstamp_type != BPF_SKB_CLOCK_TAI) || + skb->tstamp != EGRESS_ENDHOST_MAGIC) inc_errs(INGRESS_FWDNS_P101); else inc_dtimes(INGRESS_FWDNS_P101); } else { - if (!fwdns_clear_dtime() && expected_dtime) + if (!fwdns_clear_dtime()) inc_errs(INGRESS_FWDNS_P101); } - if (skb->tstamp_type == BPF_SKB_TSTAMP_DELIVERY_MONO) { + if (skb->tstamp_type == BPF_SKB_CLOCK_MONOTONIC) { skb->tstamp = INGRESS_FWDNS_MAGIC; } else { if (bpf_skb_set_tstamp(skb, INGRESS_FWDNS_MAGIC, - BPF_SKB_TSTAMP_DELIVERY_MONO)) - inc_errs(SET_DTIME); - if (!bpf_skb_set_tstamp(skb, INGRESS_FWDNS_MAGIC, - BPF_SKB_TSTAMP_UNSPEC)) + BPF_SKB_CLOCK_MONOTONIC)) inc_errs(SET_DTIME); } @@ -370,7 +368,7 @@ int egress_fwdns_prio101(struct __sk_buff *skb) if (skb->tstamp_type) { if (fwdns_clear_dtime() || - skb->tstamp_type != BPF_SKB_TSTAMP_DELIVERY_MONO || + skb->tstamp_type != BPF_SKB_CLOCK_MONOTONIC || skb->tstamp != INGRESS_FWDNS_MAGIC) inc_errs(EGRESS_FWDNS_P101); else @@ -380,14 +378,11 @@ int egress_fwdns_prio101(struct __sk_buff *skb) inc_errs(EGRESS_FWDNS_P101); } - if (skb->tstamp_type == BPF_SKB_TSTAMP_DELIVERY_MONO) { + if (skb->tstamp_type == BPF_SKB_CLOCK_MONOTONIC) { skb->tstamp = EGRESS_FWDNS_MAGIC; } else { if (bpf_skb_set_tstamp(skb, EGRESS_FWDNS_MAGIC, - BPF_SKB_TSTAMP_DELIVERY_MONO)) - inc_errs(SET_DTIME); - if (!bpf_skb_set_tstamp(skb, INGRESS_FWDNS_MAGIC, - BPF_SKB_TSTAMP_UNSPEC)) + BPF_SKB_CLOCK_MONOTONIC)) inc_errs(SET_DTIME); } diff --git a/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c index c8e4553648bf..44ee0d037f95 100644 --- a/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c +++ b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.c @@ -9,6 +9,7 @@ #include "bpf_kfuncs.h" #include "test_siphash.h" #include "test_tcp_custom_syncookie.h" +#include "bpf_misc.h" #define MAX_PACKET_OFF 0xffff diff --git a/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h index 29a6a53cf229..f8b1b7e68d2e 100644 --- a/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h +++ b/tools/testing/selftests/bpf/progs/test_tcp_custom_syncookie.h @@ -7,8 +7,6 @@ #define __packed __attribute__((__packed__)) #define __force -#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) - #define swap(a, b) \ do { \ typeof(a) __tmp = (a); \ diff --git a/tools/testing/selftests/bpf/progs/tracing_struct.c b/tools/testing/selftests/bpf/progs/tracing_struct.c index 515daef3c84b..c435a3a8328a 100644 --- a/tools/testing/selftests/bpf/progs/tracing_struct.c +++ b/tools/testing/selftests/bpf/progs/tracing_struct.c @@ -18,11 +18,6 @@ struct bpf_testmod_struct_arg_3 { int b[]; }; -struct bpf_testmod_struct_arg_4 { - u64 a; - int b; -}; - long t1_a_a, t1_a_b, t1_b, t1_c, t1_ret, t1_nregs; __u64 t1_reg0, t1_reg1, t1_reg2, t1_reg3; long t2_a, t2_b_a, t2_b_b, t2_c, t2_ret; @@ -30,9 +25,6 @@ long t3_a, t3_b, t3_c_a, t3_c_b, t3_ret; long t4_a_a, t4_b, t4_c, t4_d, t4_e_a, t4_e_b, t4_ret; long t5_ret; int t6; -long t7_a, t7_b, t7_c, t7_d, t7_e, t7_f_a, t7_f_b, t7_ret; -long t8_a, t8_b, t8_c, t8_d, t8_e, t8_f_a, t8_f_b, t8_g, t8_ret; - SEC("fentry/bpf_testmod_test_struct_arg_1") int BPF_PROG2(test_struct_arg_1, struct bpf_testmod_struct_arg_2, a, int, b, int, c) @@ -138,50 +130,4 @@ int BPF_PROG2(test_struct_arg_11, struct bpf_testmod_struct_arg_3 *, a) return 0; } -SEC("fentry/bpf_testmod_test_struct_arg_7") -int BPF_PROG2(test_struct_arg_12, __u64, a, void *, b, short, c, int, d, - void *, e, struct bpf_testmod_struct_arg_4, f) -{ - t7_a = a; - t7_b = (long)b; - t7_c = c; - t7_d = d; - t7_e = (long)e; - t7_f_a = f.a; - t7_f_b = f.b; - return 0; -} - -SEC("fexit/bpf_testmod_test_struct_arg_7") -int BPF_PROG2(test_struct_arg_13, __u64, a, void *, b, short, c, int, d, - void *, e, struct bpf_testmod_struct_arg_4, f, int, ret) -{ - t7_ret = ret; - return 0; -} - -SEC("fentry/bpf_testmod_test_struct_arg_8") -int BPF_PROG2(test_struct_arg_14, __u64, a, void *, b, short, c, int, d, - void *, e, struct bpf_testmod_struct_arg_4, f, int, g) -{ - t8_a = a; - t8_b = (long)b; - t8_c = c; - t8_d = d; - t8_e = (long)e; - t8_f_a = f.a; - t8_f_b = f.b; - t8_g = g; - return 0; -} - -SEC("fexit/bpf_testmod_test_struct_arg_8") -int BPF_PROG2(test_struct_arg_15, __u64, a, void *, b, short, c, int, d, - void *, e, struct bpf_testmod_struct_arg_4, f, int, g, - int, ret) -{ - t8_ret = ret; - return 0; -} - char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/tracing_struct_many_args.c b/tools/testing/selftests/bpf/progs/tracing_struct_many_args.c new file mode 100644 index 000000000000..4742012ace06 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/tracing_struct_many_args.c @@ -0,0 +1,95 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include +#include + +struct bpf_testmod_struct_arg_4 { + u64 a; + int b; +}; + +struct bpf_testmod_struct_arg_5 { + char a; + short b; + int c; + long d; +}; + +long t7_a, t7_b, t7_c, t7_d, t7_e, t7_f_a, t7_f_b, t7_ret; +long t8_a, t8_b, t8_c, t8_d, t8_e, t8_f_a, t8_f_b, t8_g, t8_ret; +long t9_a, t9_b, t9_c, t9_d, t9_e, t9_f, t9_g, t9_h_a, t9_h_b, t9_h_c, t9_h_d, t9_i, t9_ret; + +SEC("fentry/bpf_testmod_test_struct_arg_7") +int BPF_PROG2(test_struct_many_args_1, __u64, a, void *, b, short, c, int, d, + void *, e, struct bpf_testmod_struct_arg_4, f) +{ + t7_a = a; + t7_b = (long)b; + t7_c = c; + t7_d = d; + t7_e = (long)e; + t7_f_a = f.a; + t7_f_b = f.b; + return 0; +} + +SEC("fexit/bpf_testmod_test_struct_arg_7") +int BPF_PROG2(test_struct_many_args_2, __u64, a, void *, b, short, c, int, d, + void *, e, struct bpf_testmod_struct_arg_4, f, int, ret) +{ + t7_ret = ret; + return 0; +} + +SEC("fentry/bpf_testmod_test_struct_arg_8") +int BPF_PROG2(test_struct_many_args_3, __u64, a, void *, b, short, c, int, d, + void *, e, struct bpf_testmod_struct_arg_4, f, int, g) +{ + t8_a = a; + t8_b = (long)b; + t8_c = c; + t8_d = d; + t8_e = (long)e; + t8_f_a = f.a; + t8_f_b = f.b; + t8_g = g; + return 0; +} + +SEC("fexit/bpf_testmod_test_struct_arg_8") +int BPF_PROG2(test_struct_many_args_4, __u64, a, void *, b, short, c, int, d, + void *, e, struct bpf_testmod_struct_arg_4, f, int, g, + int, ret) +{ + t8_ret = ret; + return 0; +} + +SEC("fentry/bpf_testmod_test_struct_arg_9") +int BPF_PROG2(test_struct_many_args_5, __u64, a, void *, b, short, c, int, d, void *, e, + char, f, short, g, struct bpf_testmod_struct_arg_5, h, long, i) +{ + t9_a = a; + t9_b = (long)b; + t9_c = c; + t9_d = d; + t9_e = (long)e; + t9_f = f; + t9_g = g; + t9_h_a = h.a; + t9_h_b = h.b; + t9_h_c = h.c; + t9_h_d = h.d; + t9_i = i; + return 0; +} + +SEC("fexit/bpf_testmod_test_struct_arg_9") +int BPF_PROG2(test_struct_many_args_6, __u64, a, void *, b, short, c, int, d, void *, e, + char, f, short, g, struct bpf_testmod_struct_arg_5, h, long, i, int, ret) +{ + t9_ret = ret; + return 0; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c b/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c index 11ab25c42c36..54de0389f878 100644 --- a/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c +++ b/tools/testing/selftests/bpf/progs/user_ringbuf_fail.c @@ -221,3 +221,25 @@ int user_ringbuf_callback_reinit_dynptr_ringbuf(void *ctx) bpf_user_ringbuf_drain(&user_ringbuf, try_reinit_dynptr_ringbuf, NULL, 0); return 0; } + +__noinline long global_call_bpf_dynptr_data(struct bpf_dynptr *dynptr) +{ + bpf_dynptr_data(dynptr, 0xA, 0xA); + return 0; +} + +static long callback_adjust_bpf_dynptr_reg_off(struct bpf_dynptr *dynptr, + void *ctx) +{ + global_call_bpf_dynptr_data(dynptr += 1024); + return 0; +} + +SEC("?raw_tp") +__failure __msg("dereference of modified dynptr_ptr ptr R1 off=16384 disallowed") +int user_ringbuf_callback_const_ptr_to_dynptr_reg_off(void *ctx) +{ + bpf_user_ringbuf_drain(&user_ringbuf, + callback_adjust_bpf_dynptr_reg_off, NULL, 0); + return 0; +} diff --git a/tools/testing/selftests/bpf/progs/verifier_arena.c b/tools/testing/selftests/bpf/progs/verifier_arena.c index 93144ae6df74..67509c5d3982 100644 --- a/tools/testing/selftests/bpf/progs/verifier_arena.c +++ b/tools/testing/selftests/bpf/progs/verifier_arena.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#define BPF_NO_KFUNC_PROTOTYPES #include #include #include diff --git a/tools/testing/selftests/bpf/progs/verifier_arena_large.c b/tools/testing/selftests/bpf/progs/verifier_arena_large.c index ef66ea460264..6065f862d964 100644 --- a/tools/testing/selftests/bpf/progs/verifier_arena_large.c +++ b/tools/testing/selftests/bpf/progs/verifier_arena_large.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ +#define BPF_NO_KFUNC_PROTOTYPES #include #include #include diff --git a/tools/testing/selftests/bpf/progs/verifier_bits_iter.c b/tools/testing/selftests/bpf/progs/verifier_bits_iter.c new file mode 100644 index 000000000000..716113c2bce2 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/verifier_bits_iter.c @@ -0,0 +1,153 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Copyright (c) 2024 Yafang Shao */ + +#include "vmlinux.h" +#include +#include + +#include "bpf_misc.h" +#include "task_kfunc_common.h" + +char _license[] SEC("license") = "GPL"; + +int bpf_iter_bits_new(struct bpf_iter_bits *it, const u64 *unsafe_ptr__ign, + u32 nr_bits) __ksym __weak; +int *bpf_iter_bits_next(struct bpf_iter_bits *it) __ksym __weak; +void bpf_iter_bits_destroy(struct bpf_iter_bits *it) __ksym __weak; + +SEC("iter.s/cgroup") +__description("bits iter without destroy") +__failure __msg("Unreleased reference") +int BPF_PROG(no_destroy, struct bpf_iter_meta *meta, struct cgroup *cgrp) +{ + struct bpf_iter_bits it; + u64 data = 1; + + bpf_iter_bits_new(&it, &data, 1); + bpf_iter_bits_next(&it); + return 0; +} + +SEC("iter/cgroup") +__description("uninitialized iter in ->next()") +__failure __msg("expected an initialized iter_bits as arg #1") +int BPF_PROG(next_uninit, struct bpf_iter_meta *meta, struct cgroup *cgrp) +{ + struct bpf_iter_bits *it = NULL; + + bpf_iter_bits_next(it); + return 0; +} + +SEC("iter/cgroup") +__description("uninitialized iter in ->destroy()") +__failure __msg("expected an initialized iter_bits as arg #1") +int BPF_PROG(destroy_uninit, struct bpf_iter_meta *meta, struct cgroup *cgrp) +{ + struct bpf_iter_bits it = {}; + + bpf_iter_bits_destroy(&it); + return 0; +} + +SEC("syscall") +__description("null pointer") +__success __retval(0) +int null_pointer(void) +{ + int nr = 0; + int *bit; + + bpf_for_each(bits, bit, NULL, 1) + nr++; + return nr; +} + +SEC("syscall") +__description("bits copy") +__success __retval(10) +int bits_copy(void) +{ + u64 data = 0xf7310UL; /* 4 + 3 + 2 + 1 + 0*/ + int nr = 0; + int *bit; + + bpf_for_each(bits, bit, &data, 1) + nr++; + return nr; +} + +SEC("syscall") +__description("bits memalloc") +__success __retval(64) +int bits_memalloc(void) +{ + u64 data[2]; + int nr = 0; + int *bit; + + __builtin_memset(&data, 0xf0, sizeof(data)); /* 4 * 16 */ + bpf_for_each(bits, bit, &data[0], sizeof(data) / sizeof(u64)) + nr++; + return nr; +} + +SEC("syscall") +__description("bit index") +__success __retval(8) +int bit_index(void) +{ + u64 data = 0x100; + int bit_idx = 0; + int *bit; + + bpf_for_each(bits, bit, &data, 1) { + if (*bit == 0) + continue; + bit_idx = *bit; + } + return bit_idx; +} + +SEC("syscall") +__description("bits nomem") +__success __retval(0) +int bits_nomem(void) +{ + u64 data[4]; + int nr = 0; + int *bit; + + __builtin_memset(&data, 0xff, sizeof(data)); + bpf_for_each(bits, bit, &data[0], 513) /* Be greater than 512 */ + nr++; + return nr; +} + +SEC("syscall") +__description("fewer words") +__success __retval(1) +int fewer_words(void) +{ + u64 data[2] = {0x1, 0xff}; + int nr = 0; + int *bit; + + bpf_for_each(bits, bit, &data[0], 1) + nr++; + return nr; +} + +SEC("syscall") +__description("zero words") +__success __retval(0) +int zero_words(void) +{ + u64 data[2] = {0x1, 0xff}; + int nr = 0; + int *bit; + + bpf_for_each(bits, bit, &data[0], 0) + nr++; + return nr; +} diff --git a/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c b/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c index 80c737b6d340..e54bb5385bc1 100644 --- a/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c +++ b/tools/testing/selftests/bpf/progs/verifier_iterating_callbacks.c @@ -551,4 +551,240 @@ int cond_break5(const void *ctx) return cnt1 > 1 && cnt2 > 1 ? 1 : 0; } +#define ARR2_SZ 1000 +SEC(".data.arr2") +char arr2[ARR2_SZ]; + +SEC("socket") +__success __flag(BPF_F_TEST_STATE_FREQ) +int loop_inside_iter(const void *ctx) +{ + struct bpf_iter_num it; + int *v, sum = 0; + __u64 i = 0; + + bpf_iter_num_new(&it, 0, ARR2_SZ); + while ((v = bpf_iter_num_next(&it))) { + if (i < ARR2_SZ) + sum += arr2[i++]; + } + bpf_iter_num_destroy(&it); + return sum; +} + +SEC("socket") +__success __flag(BPF_F_TEST_STATE_FREQ) +int loop_inside_iter_signed(const void *ctx) +{ + struct bpf_iter_num it; + int *v, sum = 0; + long i = 0; + + bpf_iter_num_new(&it, 0, ARR2_SZ); + while ((v = bpf_iter_num_next(&it))) { + if (i < ARR2_SZ && i >= 0) + sum += arr2[i++]; + } + bpf_iter_num_destroy(&it); + return sum; +} + +volatile const int limit = ARR2_SZ; + +SEC("socket") +__success __flag(BPF_F_TEST_STATE_FREQ) +int loop_inside_iter_volatile_limit(const void *ctx) +{ + struct bpf_iter_num it; + int *v, sum = 0; + __u64 i = 0; + + bpf_iter_num_new(&it, 0, ARR2_SZ); + while ((v = bpf_iter_num_next(&it))) { + if (i < limit) + sum += arr2[i++]; + } + bpf_iter_num_destroy(&it); + return sum; +} + +#define ARR_LONG_SZ 1000 + +SEC(".data.arr_long") +long arr_long[ARR_LONG_SZ]; + +SEC("socket") +__success +int test1(const void *ctx) +{ + long i; + + for (i = 0; i < ARR_LONG_SZ && can_loop; i++) + arr_long[i] = i; + return 0; +} + +SEC("socket") +__success +int test2(const void *ctx) +{ + __u64 i; + + for (i = zero; i < ARR_LONG_SZ && can_loop; i++) { + barrier_var(i); + arr_long[i] = i; + } + return 0; +} + +SEC(".data.arr_foo") +struct { + int a; + int b; +} arr_foo[ARR_LONG_SZ]; + +SEC("socket") +__success +int test3(const void *ctx) +{ + __u64 i; + + for (i = zero; i < ARR_LONG_SZ && can_loop; i++) { + barrier_var(i); + arr_foo[i].a = i; + arr_foo[i].b = i; + } + return 0; +} + +SEC("socket") +__success +int test4(const void *ctx) +{ + long i; + + for (i = zero + ARR_LONG_SZ - 1; i < ARR_LONG_SZ && i >= 0 && can_loop; i--) { + barrier_var(i); + arr_foo[i].a = i; + arr_foo[i].b = i; + } + return 0; +} + +char buf[10] SEC(".data.buf"); + +SEC("socket") +__description("check add const") +__success +__naked void check_add_const(void) +{ + /* typical LLVM generated loop with may_goto */ + asm volatile (" \ + call %[bpf_ktime_get_ns]; \ + if r0 > 9 goto l1_%=; \ +l0_%=: r1 = %[buf]; \ + r2 = r0; \ + r1 += r2; \ + r3 = *(u8 *)(r1 +0); \ + .byte 0xe5; /* may_goto */ \ + .byte 0; /* regs */ \ + .short 4; /* off of l1_%=: */ \ + .long 0; /* imm */ \ + r0 = r2; \ + r0 += 1; \ + if r2 < 9 goto l0_%=; \ + exit; \ +l1_%=: r0 = 0; \ + exit; \ +" : + : __imm(bpf_ktime_get_ns), + __imm_ptr(buf) + : __clobber_common); +} + +SEC("socket") +__failure +__msg("*(u8 *)(r7 +0) = r0") +__msg("invalid access to map value, value_size=10 off=10 size=1") +__naked void check_add_const_3regs(void) +{ + asm volatile ( + "r6 = %[buf];" + "r7 = %[buf];" + "call %[bpf_ktime_get_ns];" + "r1 = r0;" /* link r0.id == r1.id == r2.id */ + "r2 = r0;" + "r1 += 1;" /* r1 == r0+1 */ + "r2 += 2;" /* r2 == r0+2 */ + "if r0 > 8 goto 1f;" /* r0 range [0, 8] */ + "r6 += r1;" /* r1 range [1, 9] */ + "r7 += r2;" /* r2 range [2, 10] */ + "*(u8 *)(r6 +0) = r0;" /* safe, within bounds */ + "*(u8 *)(r7 +0) = r0;" /* unsafe, out of bounds */ + "1: exit;" + : + : __imm(bpf_ktime_get_ns), + __imm_ptr(buf) + : __clobber_common); +} + +SEC("socket") +__failure +__msg("*(u8 *)(r8 -1) = r0") +__msg("invalid access to map value, value_size=10 off=10 size=1") +__naked void check_add_const_3regs_2if(void) +{ + asm volatile ( + "r6 = %[buf];" + "r7 = %[buf];" + "r8 = %[buf];" + "call %[bpf_ktime_get_ns];" + "if r0 < 2 goto 1f;" + "r1 = r0;" /* link r0.id == r1.id == r2.id */ + "r2 = r0;" + "r1 += 1;" /* r1 == r0+1 */ + "r2 += 2;" /* r2 == r0+2 */ + "if r2 > 11 goto 1f;" /* r2 range [0, 11] -> r0 range [-2, 9]; r1 range [-1, 10] */ + "if r0 s< 0 goto 1f;" /* r0 range [0, 9] -> r1 range [1, 10]; r2 range [2, 11]; */ + "r6 += r0;" /* r0 range [0, 9] */ + "r7 += r1;" /* r1 range [1, 10] */ + "r8 += r2;" /* r2 range [2, 11] */ + "*(u8 *)(r6 +0) = r0;" /* safe, within bounds */ + "*(u8 *)(r7 -1) = r0;" /* safe */ + "*(u8 *)(r8 -1) = r0;" /* unsafe */ + "1: exit;" + : + : __imm(bpf_ktime_get_ns), + __imm_ptr(buf) + : __clobber_common); +} + +SEC("socket") +__failure +__flag(BPF_F_TEST_STATE_FREQ) +__naked void check_add_const_regsafe_off(void) +{ + asm volatile ( + "r8 = %[buf];" + "call %[bpf_ktime_get_ns];" + "r6 = r0;" + "call %[bpf_ktime_get_ns];" + "r7 = r0;" + "call %[bpf_ktime_get_ns];" + "r1 = r0;" /* same ids for r1 and r0 */ + "if r6 > r7 goto 1f;" /* this jump can't be predicted */ + "r1 += 1;" /* r1.off == +1 */ + "goto 2f;" + "1: r1 += 100;" /* r1.off == +100 */ + "goto +0;" /* verify r1.off in regsafe() after this insn */ + "2: if r0 > 8 goto 3f;" /* r0 range [0,8], r1 range either [1,9] or [100,108]*/ + "r8 += r1;" + "*(u8 *)(r8 +0) = r0;" /* potentially unsafe, buf size is 10 */ + "3: exit;" + : + : __imm(bpf_ktime_get_ns), + __imm_ptr(buf) + : __clobber_common); +} + char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/verifier_netfilter_ctx.c b/tools/testing/selftests/bpf/progs/verifier_netfilter_ctx.c index 65bba330e7e5..ab9f9f2620ed 100644 --- a/tools/testing/selftests/bpf/progs/verifier_netfilter_ctx.c +++ b/tools/testing/selftests/bpf/progs/verifier_netfilter_ctx.c @@ -79,7 +79,7 @@ int with_invalid_ctx_access_test5(struct bpf_nf_ctx *ctx) return NF_ACCEPT; } -extern int bpf_dynptr_from_skb(struct sk_buff *skb, __u64 flags, +extern int bpf_dynptr_from_skb(struct __sk_buff *skb, __u64 flags, struct bpf_dynptr *ptr__uninit) __ksym; extern void *bpf_dynptr_slice(const struct bpf_dynptr *ptr, uint32_t offset, void *buffer, uint32_t buffer__sz) __ksym; @@ -90,8 +90,8 @@ __success __failure_unpriv __retval(0) int with_valid_ctx_access_test6(struct bpf_nf_ctx *ctx) { + struct __sk_buff *skb = (struct __sk_buff *)ctx->skb; const struct nf_hook_state *state = ctx->state; - struct sk_buff *skb = ctx->skb; const struct iphdr *iph; const struct tcphdr *th; u8 buffer_iph[20] = {}; @@ -99,7 +99,7 @@ int with_valid_ctx_access_test6(struct bpf_nf_ctx *ctx) struct bpf_dynptr ptr; uint8_t ihl; - if (skb->len <= 20 || bpf_dynptr_from_skb(skb, 0, &ptr)) + if (ctx->skb->len <= 20 || bpf_dynptr_from_skb(skb, 0, &ptr)) return NF_ACCEPT; iph = bpf_dynptr_slice(&ptr, 0, buffer_iph, sizeof(buffer_iph)); diff --git a/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c index 4a58e0398e72..6a6fad625f7e 100644 --- a/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c +++ b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c @@ -8,8 +8,6 @@ #include "bpf_misc.h" #include <../../../tools/include/linux/filter.h> -#define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0])) - int vals[] SEC(".data.vals") = {1, 2, 3, 4}; __naked __noinline __used diff --git a/tools/testing/selftests/bpf/progs/wq.c b/tools/testing/selftests/bpf/progs/wq.c index 49e712acbf60..f8d3ae0c29ae 100644 --- a/tools/testing/selftests/bpf/progs/wq.c +++ b/tools/testing/selftests/bpf/progs/wq.c @@ -32,6 +32,7 @@ struct { } hmap_malloc SEC(".maps"); struct elem { + int ok_offset; struct bpf_wq w; }; @@ -53,7 +54,7 @@ __u32 ok; __u32 ok_sleepable; static int test_elem_callback(void *map, int *key, - int (callback_fn)(void *map, int *key, struct bpf_wq *wq)) + int (callback_fn)(void *map, int *key, void *value)) { struct elem init = {}, *val; struct bpf_wq *wq; @@ -70,6 +71,8 @@ static int test_elem_callback(void *map, int *key, if (!val) return -2; + val->ok_offset = *key; + wq = &val->w; if (bpf_wq_init(wq, map, 0) != 0) return -3; @@ -84,7 +87,7 @@ static int test_elem_callback(void *map, int *key, } static int test_hmap_elem_callback(void *map, int *key, - int (callback_fn)(void *map, int *key, struct bpf_wq *wq)) + int (callback_fn)(void *map, int *key, void *value)) { struct hmap_elem init = {}, *val; struct bpf_wq *wq; @@ -114,7 +117,7 @@ static int test_hmap_elem_callback(void *map, int *key, } /* callback for non sleepable workqueue */ -static int wq_callback(void *map, int *key, struct bpf_wq *work) +static int wq_callback(void *map, int *key, void *value) { bpf_kfunc_common_test(); ok |= (1 << *key); @@ -122,10 +125,16 @@ static int wq_callback(void *map, int *key, struct bpf_wq *work) } /* callback for sleepable workqueue */ -static int wq_cb_sleepable(void *map, int *key, struct bpf_wq *work) +static int wq_cb_sleepable(void *map, int *key, void *value) { + struct elem *data = (struct elem *)value; + int offset = data->ok_offset; + + if (*key != offset) + return 0; + bpf_kfunc_call_test_sleepable(); - ok_sleepable |= (1 << *key); + ok_sleepable |= (1 << offset); return 0; } diff --git a/tools/testing/selftests/bpf/progs/wq_failures.c b/tools/testing/selftests/bpf/progs/wq_failures.c index 4cbdb425f223..25b51a72fe0f 100644 --- a/tools/testing/selftests/bpf/progs/wq_failures.c +++ b/tools/testing/selftests/bpf/progs/wq_failures.c @@ -28,14 +28,14 @@ struct { } lru SEC(".maps"); /* callback for non sleepable workqueue */ -static int wq_callback(void *map, int *key, struct bpf_wq *work) +static int wq_callback(void *map, int *key, void *value) { bpf_kfunc_common_test(); return 0; } /* callback for sleepable workqueue */ -static int wq_cb_sleepable(void *map, int *key, struct bpf_wq *work) +static int wq_cb_sleepable(void *map, int *key, void *value) { bpf_kfunc_call_test_sleepable(); return 0; diff --git a/tools/testing/selftests/bpf/progs/xdp_flowtable.c b/tools/testing/selftests/bpf/progs/xdp_flowtable.c new file mode 100644 index 000000000000..7fdc7b23ee74 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/xdp_flowtable.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0 +#define BPF_NO_KFUNC_PROTOTYPES +#include +#include +#include + +#define ETH_P_IP 0x0800 +#define ETH_P_IPV6 0x86dd +#define IP_MF 0x2000 /* "More Fragments" */ +#define IP_OFFSET 0x1fff /* "Fragment Offset" */ +#define AF_INET 2 +#define AF_INET6 10 + +struct bpf_flowtable_opts___local { + s32 error; +}; + +struct flow_offload_tuple_rhash * +bpf_xdp_flow_lookup(struct xdp_md *, struct bpf_fib_lookup *, + struct bpf_flowtable_opts___local *, u32) __ksym; + +struct { + __uint(type, BPF_MAP_TYPE_ARRAY); + __type(key, __u32); + __type(value, __u32); + __uint(max_entries, 1); +} stats SEC(".maps"); + +static bool xdp_flowtable_offload_check_iphdr(struct iphdr *iph) +{ + /* ip fragmented traffic */ + if (iph->frag_off & bpf_htons(IP_MF | IP_OFFSET)) + return false; + + /* ip options */ + if (iph->ihl * 4 != sizeof(*iph)) + return false; + + if (iph->ttl <= 1) + return false; + + return true; +} + +static bool xdp_flowtable_offload_check_tcp_state(void *ports, void *data_end, + u8 proto) +{ + if (proto == IPPROTO_TCP) { + struct tcphdr *tcph = ports; + + if (tcph + 1 > data_end) + return false; + + if (tcph->fin || tcph->rst) + return false; + } + + return true; +} + +struct flow_ports___local { + __be16 source, dest; +} __attribute__((preserve_access_index)); + +SEC("xdp.frags") +int xdp_flowtable_do_lookup(struct xdp_md *ctx) +{ + void *data_end = (void *)(long)ctx->data_end; + struct bpf_flowtable_opts___local opts = {}; + struct flow_offload_tuple_rhash *tuplehash; + struct bpf_fib_lookup tuple = { + .ifindex = ctx->ingress_ifindex, + }; + void *data = (void *)(long)ctx->data; + struct ethhdr *eth = data; + struct flow_ports___local *ports; + __u32 *val, key = 0; + + if (eth + 1 > data_end) + return XDP_DROP; + + switch (eth->h_proto) { + case bpf_htons(ETH_P_IP): { + struct iphdr *iph = data + sizeof(*eth); + + ports = (struct flow_ports___local *)(iph + 1); + if (ports + 1 > data_end) + return XDP_PASS; + + /* sanity check on ip header */ + if (!xdp_flowtable_offload_check_iphdr(iph)) + return XDP_PASS; + + if (!xdp_flowtable_offload_check_tcp_state(ports, data_end, + iph->protocol)) + return XDP_PASS; + + tuple.family = AF_INET; + tuple.tos = iph->tos; + tuple.l4_protocol = iph->protocol; + tuple.tot_len = bpf_ntohs(iph->tot_len); + tuple.ipv4_src = iph->saddr; + tuple.ipv4_dst = iph->daddr; + tuple.sport = ports->source; + tuple.dport = ports->dest; + break; + } + case bpf_htons(ETH_P_IPV6): { + struct in6_addr *src = (struct in6_addr *)tuple.ipv6_src; + struct in6_addr *dst = (struct in6_addr *)tuple.ipv6_dst; + struct ipv6hdr *ip6h = data + sizeof(*eth); + + ports = (struct flow_ports___local *)(ip6h + 1); + if (ports + 1 > data_end) + return XDP_PASS; + + if (ip6h->hop_limit <= 1) + return XDP_PASS; + + if (!xdp_flowtable_offload_check_tcp_state(ports, data_end, + ip6h->nexthdr)) + return XDP_PASS; + + tuple.family = AF_INET6; + tuple.l4_protocol = ip6h->nexthdr; + tuple.tot_len = bpf_ntohs(ip6h->payload_len); + *src = ip6h->saddr; + *dst = ip6h->daddr; + tuple.sport = ports->source; + tuple.dport = ports->dest; + break; + } + default: + return XDP_PASS; + } + + tuplehash = bpf_xdp_flow_lookup(ctx, &tuple, &opts, sizeof(opts)); + if (!tuplehash) + return XDP_PASS; + + val = bpf_map_lookup_elem(&stats, &key); + if (val) + __sync_add_and_fetch(val, 1); + + return XDP_PASS; +} + +char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c b/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c index 7ea9785738b5..f8f5dc9f72b8 100644 --- a/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c +++ b/tools/testing/selftests/bpf/progs/xdp_synproxy_kern.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: LGPL-2.1 OR BSD-2-Clause /* Copyright (c) 2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ +#define BPF_NO_KFUNC_PROTOTYPES #include "vmlinux.h" #include diff --git a/tools/testing/selftests/bpf/progs/xfrm_info.c b/tools/testing/selftests/bpf/progs/xfrm_info.c index f6a501fbba2b..a1d9f106c3f0 100644 --- a/tools/testing/selftests/bpf/progs/xfrm_info.c +++ b/tools/testing/selftests/bpf/progs/xfrm_info.c @@ -1,4 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 +#define BPF_NO_KFUNC_PROTOTYPES #include "vmlinux.h" #include "bpf_tracing_net.h" #include diff --git a/tools/testing/selftests/bpf/test_loader.c b/tools/testing/selftests/bpf/test_loader.c index 524c38e9cde4..f14e10b0de96 100644 --- a/tools/testing/selftests/bpf/test_loader.c +++ b/tools/testing/selftests/bpf/test_loader.c @@ -2,6 +2,7 @@ /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */ #include #include +#include #include #include @@ -17,9 +18,11 @@ #define TEST_TAG_EXPECT_FAILURE "comment:test_expect_failure" #define TEST_TAG_EXPECT_SUCCESS "comment:test_expect_success" #define TEST_TAG_EXPECT_MSG_PFX "comment:test_expect_msg=" +#define TEST_TAG_EXPECT_REGEX_PFX "comment:test_expect_regex=" #define TEST_TAG_EXPECT_FAILURE_UNPRIV "comment:test_expect_failure_unpriv" #define TEST_TAG_EXPECT_SUCCESS_UNPRIV "comment:test_expect_success_unpriv" #define TEST_TAG_EXPECT_MSG_PFX_UNPRIV "comment:test_expect_msg_unpriv=" +#define TEST_TAG_EXPECT_REGEX_PFX_UNPRIV "comment:test_expect_regex_unpriv=" #define TEST_TAG_LOG_LEVEL_PFX "comment:test_log_level=" #define TEST_TAG_PROG_FLAGS_PFX "comment:test_prog_flags=" #define TEST_TAG_DESCRIPTION_PFX "comment:test_description=" @@ -46,10 +49,16 @@ enum mode { UNPRIV = 2 }; +struct expect_msg { + const char *substr; /* substring match */ + const char *regex_str; /* regex-based match */ + regex_t regex; +}; + struct test_subspec { char *name; bool expect_failure; - const char **expect_msgs; + struct expect_msg *expect_msgs; size_t expect_msg_cnt; int retval; bool execute; @@ -89,6 +98,16 @@ void test_loader_fini(struct test_loader *tester) static void free_test_spec(struct test_spec *spec) { + int i; + + /* Deallocate expect_msgs arrays. */ + for (i = 0; i < spec->priv.expect_msg_cnt; i++) + if (spec->priv.expect_msgs[i].regex_str) + regfree(&spec->priv.expect_msgs[i].regex); + for (i = 0; i < spec->unpriv.expect_msg_cnt; i++) + if (spec->unpriv.expect_msgs[i].regex_str) + regfree(&spec->unpriv.expect_msgs[i].regex); + free(spec->priv.name); free(spec->unpriv.name); free(spec->priv.expect_msgs); @@ -100,18 +119,38 @@ static void free_test_spec(struct test_spec *spec) spec->unpriv.expect_msgs = NULL; } -static int push_msg(const char *msg, struct test_subspec *subspec) +static int push_msg(const char *substr, const char *regex_str, struct test_subspec *subspec) { void *tmp; + int regcomp_res; + char error_msg[100]; + struct expect_msg *msg; - tmp = realloc(subspec->expect_msgs, (1 + subspec->expect_msg_cnt) * sizeof(void *)); + tmp = realloc(subspec->expect_msgs, + (1 + subspec->expect_msg_cnt) * sizeof(struct expect_msg)); if (!tmp) { ASSERT_FAIL("failed to realloc memory for messages\n"); return -ENOMEM; } subspec->expect_msgs = tmp; - subspec->expect_msgs[subspec->expect_msg_cnt++] = msg; + msg = &subspec->expect_msgs[subspec->expect_msg_cnt]; + if (substr) { + msg->substr = substr; + msg->regex_str = NULL; + } else { + msg->regex_str = regex_str; + msg->substr = NULL; + regcomp_res = regcomp(&msg->regex, regex_str, REG_EXTENDED|REG_NEWLINE); + if (regcomp_res != 0) { + regerror(regcomp_res, &msg->regex, error_msg, sizeof(error_msg)); + PRINT_FAIL("Regexp compilation error in '%s': '%s'\n", + regex_str, error_msg); + return -EINVAL; + } + } + + subspec->expect_msg_cnt += 1; return 0; } @@ -233,13 +272,25 @@ static int parse_test_spec(struct test_loader *tester, spec->mode_mask |= UNPRIV; } else if (str_has_pfx(s, TEST_TAG_EXPECT_MSG_PFX)) { msg = s + sizeof(TEST_TAG_EXPECT_MSG_PFX) - 1; - err = push_msg(msg, &spec->priv); + err = push_msg(msg, NULL, &spec->priv); if (err) goto cleanup; spec->mode_mask |= PRIV; } else if (str_has_pfx(s, TEST_TAG_EXPECT_MSG_PFX_UNPRIV)) { msg = s + sizeof(TEST_TAG_EXPECT_MSG_PFX_UNPRIV) - 1; - err = push_msg(msg, &spec->unpriv); + err = push_msg(msg, NULL, &spec->unpriv); + if (err) + goto cleanup; + spec->mode_mask |= UNPRIV; + } else if (str_has_pfx(s, TEST_TAG_EXPECT_REGEX_PFX)) { + msg = s + sizeof(TEST_TAG_EXPECT_REGEX_PFX) - 1; + err = push_msg(NULL, msg, &spec->priv); + if (err) + goto cleanup; + spec->mode_mask |= PRIV; + } else if (str_has_pfx(s, TEST_TAG_EXPECT_REGEX_PFX_UNPRIV)) { + msg = s + sizeof(TEST_TAG_EXPECT_REGEX_PFX_UNPRIV) - 1; + err = push_msg(NULL, msg, &spec->unpriv); if (err) goto cleanup; spec->mode_mask |= UNPRIV; @@ -337,16 +388,13 @@ static int parse_test_spec(struct test_loader *tester, } if (!spec->unpriv.expect_msgs) { - size_t sz = spec->priv.expect_msg_cnt * sizeof(void *); + for (i = 0; i < spec->priv.expect_msg_cnt; i++) { + struct expect_msg *msg = &spec->priv.expect_msgs[i]; - spec->unpriv.expect_msgs = malloc(sz); - if (!spec->unpriv.expect_msgs) { - PRINT_FAIL("failed to allocate memory for unpriv.expect_msgs\n"); - err = -ENOMEM; - goto cleanup; + err = push_msg(msg->substr, msg->regex_str, &spec->unpriv); + if (err) + goto cleanup; } - memcpy(spec->unpriv.expect_msgs, spec->priv.expect_msgs, sz); - spec->unpriv.expect_msg_cnt = spec->priv.expect_msg_cnt; } } @@ -402,27 +450,40 @@ static void validate_case(struct test_loader *tester, struct bpf_program *prog, int load_err) { - int i, j; + int i, j, err; + char *match; + regmatch_t reg_match[1]; for (i = 0; i < subspec->expect_msg_cnt; i++) { - char *match; - const char *expect_msg; + struct expect_msg *msg = &subspec->expect_msgs[i]; - expect_msg = subspec->expect_msgs[i]; - - match = strstr(tester->log_buf + tester->next_match_pos, expect_msg); - if (!ASSERT_OK_PTR(match, "expect_msg")) { - /* if we are in verbose mode, we've already emitted log */ - if (env.verbosity == VERBOSE_NONE) - emit_verifier_log(tester->log_buf, true /*force*/); - for (j = 0; j < i; j++) - fprintf(stderr, - "MATCHED MSG: '%s'\n", subspec->expect_msgs[j]); - fprintf(stderr, "EXPECTED MSG: '%s'\n", expect_msg); - return; + if (msg->substr) { + match = strstr(tester->log_buf + tester->next_match_pos, msg->substr); + if (match) + tester->next_match_pos = match - tester->log_buf + strlen(msg->substr); + } else { + err = regexec(&msg->regex, + tester->log_buf + tester->next_match_pos, 1, reg_match, 0); + if (err == 0) { + match = tester->log_buf + tester->next_match_pos + reg_match[0].rm_so; + tester->next_match_pos += reg_match[0].rm_eo; + } else { + match = NULL; + } } - tester->next_match_pos = match - tester->log_buf + strlen(expect_msg); + if (!ASSERT_OK_PTR(match, "expect_msg")) { + if (env.verbosity == VERBOSE_NONE) + emit_verifier_log(tester->log_buf, true /*force*/); + for (j = 0; j <= i; j++) { + msg = &subspec->expect_msgs[j]; + fprintf(stderr, "%s %s: '%s'\n", + j < i ? "MATCHED " : "EXPECTED", + msg->substr ? "SUBSTR" : " REGEX", + msg->substr ?: msg->regex_str); + } + return; + } } } diff --git a/tools/testing/selftests/bpf/test_progs.h b/tools/testing/selftests/bpf/test_progs.h index 0ba5a20b19ba..51341d50213b 100644 --- a/tools/testing/selftests/bpf/test_progs.h +++ b/tools/testing/selftests/bpf/test_progs.h @@ -377,6 +377,15 @@ int test__join_cgroup(const char *path); ___ok; \ }) +#define ASSERT_OK_FD(fd, name) ({ \ + static int duration = 0; \ + int ___fd = (fd); \ + bool ___ok = ___fd >= 0; \ + CHECK(!___ok, (name), "unexpected fd: %d (errno %d)\n", \ + ___fd, errno); \ + ___ok; \ +}) + #define SYS(goto_label, fmt, ...) \ ({ \ char cmd[1024]; \ diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c index 92752f5eeded..3e02d7267de8 100644 --- a/tools/testing/selftests/bpf/test_sockmap.c +++ b/tools/testing/selftests/bpf/test_sockmap.c @@ -63,7 +63,8 @@ int passed; int failed; int map_fd[9]; struct bpf_map *maps[9]; -int prog_fd[11]; +struct bpf_program *progs[9]; +struct bpf_link *links[9]; int txmsg_pass; int txmsg_redir; @@ -680,7 +681,8 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt, } } - s->bytes_recvd += recv; + if (recv > 0) + s->bytes_recvd += recv; if (opt->check_recved_len && s->bytes_recvd > total_bytes) { errno = EMSGSIZE; @@ -952,7 +954,8 @@ enum { static int run_options(struct sockmap_options *options, int cg_fd, int test) { - int i, key, next_key, err, tx_prog_fd = -1, zero = 0; + int i, key, next_key, err, zero = 0; + struct bpf_program *tx_prog; /* If base test skip BPF setup */ if (test == BASE || test == BASE_SENDPAGE) @@ -960,48 +963,44 @@ static int run_options(struct sockmap_options *options, int cg_fd, int test) /* Attach programs to sockmap */ if (!txmsg_omit_skb_parser) { - err = bpf_prog_attach(prog_fd[0], map_fd[0], - BPF_SK_SKB_STREAM_PARSER, 0); - if (err) { + links[0] = bpf_program__attach_sockmap(progs[0], map_fd[0]); + if (!links[0]) { fprintf(stderr, - "ERROR: bpf_prog_attach (sockmap %i->%i): %d (%s)\n", - prog_fd[0], map_fd[0], err, strerror(errno)); - return err; + "ERROR: bpf_program__attach_sockmap (sockmap %i->%i): (%s)\n", + bpf_program__fd(progs[0]), map_fd[0], strerror(errno)); + return -1; } } - err = bpf_prog_attach(prog_fd[1], map_fd[0], - BPF_SK_SKB_STREAM_VERDICT, 0); - if (err) { - fprintf(stderr, "ERROR: bpf_prog_attach (sockmap): %d (%s)\n", - err, strerror(errno)); - return err; + links[1] = bpf_program__attach_sockmap(progs[1], map_fd[0]); + if (!links[1]) { + fprintf(stderr, "ERROR: bpf_program__attach_sockmap (sockmap): (%s)\n", + strerror(errno)); + return -1; } /* Attach programs to TLS sockmap */ if (txmsg_ktls_skb) { if (!txmsg_omit_skb_parser) { - err = bpf_prog_attach(prog_fd[0], map_fd[8], - BPF_SK_SKB_STREAM_PARSER, 0); - if (err) { + links[2] = bpf_program__attach_sockmap(progs[0], map_fd[8]); + if (!links[2]) { fprintf(stderr, - "ERROR: bpf_prog_attach (TLS sockmap %i->%i): %d (%s)\n", - prog_fd[0], map_fd[8], err, strerror(errno)); - return err; + "ERROR: bpf_program__attach_sockmap (TLS sockmap %i->%i): (%s)\n", + bpf_program__fd(progs[0]), map_fd[8], strerror(errno)); + return -1; } } - err = bpf_prog_attach(prog_fd[2], map_fd[8], - BPF_SK_SKB_STREAM_VERDICT, 0); - if (err) { - fprintf(stderr, "ERROR: bpf_prog_attach (TLS sockmap): %d (%s)\n", - err, strerror(errno)); - return err; + links[3] = bpf_program__attach_sockmap(progs[2], map_fd[8]); + if (!links[3]) { + fprintf(stderr, "ERROR: bpf_program__attach_sockmap (TLS sockmap): (%s)\n", + strerror(errno)); + return -1; } } /* Attach to cgroups */ - err = bpf_prog_attach(prog_fd[3], cg_fd, BPF_CGROUP_SOCK_OPS, 0); + err = bpf_prog_attach(bpf_program__fd(progs[3]), cg_fd, BPF_CGROUP_SOCK_OPS, 0); if (err) { fprintf(stderr, "ERROR: bpf_prog_attach (groups): %d (%s)\n", err, strerror(errno)); @@ -1017,30 +1016,31 @@ run: /* Attach txmsg program to sockmap */ if (txmsg_pass) - tx_prog_fd = prog_fd[4]; + tx_prog = progs[4]; else if (txmsg_redir) - tx_prog_fd = prog_fd[5]; + tx_prog = progs[5]; else if (txmsg_apply) - tx_prog_fd = prog_fd[6]; + tx_prog = progs[6]; else if (txmsg_cork) - tx_prog_fd = prog_fd[7]; + tx_prog = progs[7]; else if (txmsg_drop) - tx_prog_fd = prog_fd[8]; + tx_prog = progs[8]; else - tx_prog_fd = 0; + tx_prog = NULL; - if (tx_prog_fd) { - int redir_fd, i = 0; + if (tx_prog) { + int redir_fd; - err = bpf_prog_attach(tx_prog_fd, - map_fd[1], BPF_SK_MSG_VERDICT, 0); - if (err) { + links[4] = bpf_program__attach_sockmap(tx_prog, map_fd[1]); + if (!links[4]) { fprintf(stderr, - "ERROR: bpf_prog_attach (txmsg): %d (%s)\n", - err, strerror(errno)); + "ERROR: bpf_program__attach_sockmap (txmsg): (%s)\n", + strerror(errno)); + err = -1; goto out; } + i = 0; err = bpf_map_update_elem(map_fd[1], &i, &c1, BPF_ANY); if (err) { fprintf(stderr, @@ -1279,16 +1279,14 @@ run: fprintf(stderr, "unknown test\n"); out: /* Detatch and zero all the maps */ - bpf_prog_detach2(prog_fd[3], cg_fd, BPF_CGROUP_SOCK_OPS); - bpf_prog_detach2(prog_fd[0], map_fd[0], BPF_SK_SKB_STREAM_PARSER); - bpf_prog_detach2(prog_fd[1], map_fd[0], BPF_SK_SKB_STREAM_VERDICT); - bpf_prog_detach2(prog_fd[0], map_fd[8], BPF_SK_SKB_STREAM_PARSER); - bpf_prog_detach2(prog_fd[2], map_fd[8], BPF_SK_SKB_STREAM_VERDICT); + bpf_prog_detach2(bpf_program__fd(progs[3]), cg_fd, BPF_CGROUP_SOCK_OPS); - if (tx_prog_fd >= 0) - bpf_prog_detach2(tx_prog_fd, map_fd[1], BPF_SK_MSG_VERDICT); + for (i = 0; i < ARRAY_SIZE(links); i++) { + if (links[i]) + bpf_link__detach(links[i]); + } - for (i = 0; i < 8; i++) { + for (i = 0; i < ARRAY_SIZE(map_fd); i++) { key = next_key = 0; bpf_map_update_elem(map_fd[i], &key, &zero, BPF_ANY); while (bpf_map_get_next_key(map_fd[i], &key, &next_key) == 0) { @@ -1783,34 +1781,6 @@ char *map_names[] = { "tls_sock_map", }; -int prog_attach_type[] = { - BPF_SK_SKB_STREAM_PARSER, - BPF_SK_SKB_STREAM_VERDICT, - BPF_SK_SKB_STREAM_VERDICT, - BPF_CGROUP_SOCK_OPS, - BPF_SK_MSG_VERDICT, - BPF_SK_MSG_VERDICT, - BPF_SK_MSG_VERDICT, - BPF_SK_MSG_VERDICT, - BPF_SK_MSG_VERDICT, - BPF_SK_MSG_VERDICT, - BPF_SK_MSG_VERDICT, -}; - -int prog_type[] = { - BPF_PROG_TYPE_SK_SKB, - BPF_PROG_TYPE_SK_SKB, - BPF_PROG_TYPE_SK_SKB, - BPF_PROG_TYPE_SOCK_OPS, - BPF_PROG_TYPE_SK_MSG, - BPF_PROG_TYPE_SK_MSG, - BPF_PROG_TYPE_SK_MSG, - BPF_PROG_TYPE_SK_MSG, - BPF_PROG_TYPE_SK_MSG, - BPF_PROG_TYPE_SK_MSG, - BPF_PROG_TYPE_SK_MSG, -}; - static int populate_progs(char *bpf_file) { struct bpf_program *prog; @@ -1829,17 +1799,10 @@ static int populate_progs(char *bpf_file) return -1; } - bpf_object__for_each_program(prog, obj) { - bpf_program__set_type(prog, prog_type[i]); - bpf_program__set_expected_attach_type(prog, - prog_attach_type[i]); - i++; - } - i = bpf_object__load(obj); i = 0; bpf_object__for_each_program(prog, obj) { - prog_fd[i] = bpf_program__fd(prog); + progs[i] = prog; i++; } @@ -1853,6 +1816,9 @@ static int populate_progs(char *bpf_file) } } + for (i = 0; i < ARRAY_SIZE(links); i++) + links[i] = NULL; + return 0; } @@ -1970,7 +1936,6 @@ static void test_selftests_ktls(int cg_fd, struct sockmap_options *opt) static int test_selftest(int cg_fd, struct sockmap_options *opt) { - test_selftests_sockmap(cg_fd, opt); test_selftests_sockhash(cg_fd, opt); test_selftests_ktls(cg_fd, opt); diff --git a/tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c b/tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c index 7b5fc98838cd..3844f9b8232a 100644 --- a/tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c +++ b/tools/testing/selftests/bpf/test_tcp_check_syncookie_user.c @@ -139,14 +139,14 @@ out: return ret; } -static int v6only_true(int fd, const struct post_socket_opts *opts) +static int v6only_true(int fd, void *opts) { int mode = true; return setsockopt(fd, IPPROTO_IPV6, IPV6_V6ONLY, &mode, sizeof(mode)); } -static int v6only_false(int fd, const struct post_socket_opts *opts) +static int v6only_false(int fd, void *opts) { int mode = false; @@ -156,10 +156,6 @@ static int v6only_false(int fd, const struct post_socket_opts *opts) int main(int argc, char **argv) { struct network_helper_opts opts = { 0 }; - struct sockaddr_in addr4; - struct sockaddr_in6 addr6; - struct sockaddr_in addr4dual; - struct sockaddr_in6 addr6dual; int server = -1; int server_v6 = -1; int server_dual = -1; @@ -181,36 +177,17 @@ int main(int argc, char **argv) goto err; } - memset(&addr4, 0, sizeof(addr4)); - addr4.sin_family = AF_INET; - addr4.sin_addr.s_addr = htonl(INADDR_LOOPBACK); - addr4.sin_port = 0; - memcpy(&addr4dual, &addr4, sizeof(addr4dual)); - - memset(&addr6, 0, sizeof(addr6)); - addr6.sin6_family = AF_INET6; - addr6.sin6_addr = in6addr_loopback; - addr6.sin6_port = 0; - - memset(&addr6dual, 0, sizeof(addr6dual)); - addr6dual.sin6_family = AF_INET6; - addr6dual.sin6_addr = in6addr_any; - addr6dual.sin6_port = 0; - - server = start_server_addr(SOCK_STREAM, (struct sockaddr_storage *)&addr4, - sizeof(addr4), NULL); + server = start_server_str(AF_INET, SOCK_STREAM, "127.0.0.1", 0, NULL); if (server == -1) goto err; opts.post_socket_cb = v6only_true; - server_v6 = start_server_addr(SOCK_STREAM, (struct sockaddr_storage *)&addr6, - sizeof(addr6), &opts); + server_v6 = start_server_str(AF_INET6, SOCK_STREAM, "::1", 0, &opts); if (server_v6 == -1) goto err; opts.post_socket_cb = v6only_false; - server_dual = start_server_addr(SOCK_STREAM, (struct sockaddr_storage *)&addr6dual, - sizeof(addr6dual), &opts); + server_dual = start_server_str(AF_INET6, SOCK_STREAM, "::0", 0, &opts); if (server_dual == -1) goto err; diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c index df04bda1c927..610392dfc4fb 100644 --- a/tools/testing/selftests/bpf/test_verifier.c +++ b/tools/testing/selftests/bpf/test_verifier.c @@ -1237,11 +1237,6 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type, fixup_prog_kfuncs(prog, fd_array, test->fixup_kfunc_btf_id); } -struct libcap { - struct __user_cap_header_struct hdr; - struct __user_cap_data_struct data[2]; -}; - static int set_admin(bool admin) { int err; diff --git a/tools/testing/selftests/bpf/trace_helpers.c b/tools/testing/selftests/bpf/trace_helpers.c index 70e29f316fe7..465d196c7165 100644 --- a/tools/testing/selftests/bpf/trace_helpers.c +++ b/tools/testing/selftests/bpf/trace_helpers.c @@ -211,7 +211,7 @@ long ksym_get_addr(const char *name) */ int kallsyms_find(const char *sym, unsigned long long *addr) { - char type, name[500]; + char type, name[500], *match; unsigned long long value; int err = 0; FILE *f; @@ -221,6 +221,17 @@ int kallsyms_find(const char *sym, unsigned long long *addr) return -EINVAL; while (fscanf(f, "%llx %c %499s%*[^\n]\n", &value, &type, name) > 0) { + /* If CONFIG_LTO_CLANG_THIN is enabled, static variable/function + * symbols could be promoted to global due to cross-file inlining. + * For such cases, clang compiler will add .llvm. suffix + * to those symbols to avoid potential naming conflict. + * Let us ignore .llvm. suffix during symbol comparison. + */ + if (type == 'd') { + match = strstr(name, ".llvm."); + if (match) + *match = '\0'; + } if (strcmp(name, sym) == 0) { *addr = value; goto out; diff --git a/tools/testing/selftests/bpf/verifier/calls.c b/tools/testing/selftests/bpf/verifier/calls.c index ab25a81fd3a1..d0cdd156cd55 100644 --- a/tools/testing/selftests/bpf/verifier/calls.c +++ b/tools/testing/selftests/bpf/verifier/calls.c @@ -76,7 +76,7 @@ }, .prog_type = BPF_PROG_TYPE_SCHED_CLS, .result = REJECT, - .errstr = "R1 must have zero offset when passed to release func or trusted arg to kfunc", + .errstr = "arg#0 expected pointer to ctx, but got PTR", .fixup_kfunc_btf_id = { { "bpf_kfunc_call_test_pass_ctx", 2 }, }, @@ -275,6 +275,19 @@ .result_unpriv = REJECT, .result = ACCEPT, }, +{ + "calls: invalid kfunc call: must provide (attach_prog_fd, btf_id) pair when freplace", + .insns = { + BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, BPF_PSEUDO_KFUNC_CALL, 0, 0), + BPF_EXIT_INSN(), + }, + .prog_type = BPF_PROG_TYPE_EXT, + .result = REJECT, + .errstr = "Tracing programs must provide btf_id", + .fixup_kfunc_btf_id = { + { "bpf_dynptr_from_skb", 0 }, + }, +}, { "calls: basic sanity", .insns = { diff --git a/tools/testing/selftests/bpf/verifier/precise.c b/tools/testing/selftests/bpf/verifier/precise.c index 0a9293a57211..90643ccc221d 100644 --- a/tools/testing/selftests/bpf/verifier/precise.c +++ b/tools/testing/selftests/bpf/verifier/precise.c @@ -39,12 +39,12 @@ .result = VERBOSE_ACCEPT, .errstr = "mark_precise: frame0: last_idx 26 first_idx 20\ - mark_precise: frame0: regs=r2 stack= before 25\ - mark_precise: frame0: regs=r2 stack= before 24\ - mark_precise: frame0: regs=r2 stack= before 23\ - mark_precise: frame0: regs=r2 stack= before 22\ - mark_precise: frame0: regs=r2 stack= before 20\ - mark_precise: frame0: parent state regs=r2 stack=:\ + mark_precise: frame0: regs=r2,r9 stack= before 25\ + mark_precise: frame0: regs=r2,r9 stack= before 24\ + mark_precise: frame0: regs=r2,r9 stack= before 23\ + mark_precise: frame0: regs=r2,r9 stack= before 22\ + mark_precise: frame0: regs=r2,r9 stack= before 20\ + mark_precise: frame0: parent state regs=r2,r9 stack=:\ mark_precise: frame0: last_idx 19 first_idx 10\ mark_precise: frame0: regs=r2,r9 stack= before 19\ mark_precise: frame0: regs=r9 stack= before 18\ @@ -100,11 +100,11 @@ .errstr = "26: (85) call bpf_probe_read_kernel#113\ mark_precise: frame0: last_idx 26 first_idx 22\ - mark_precise: frame0: regs=r2 stack= before 25\ - mark_precise: frame0: regs=r2 stack= before 24\ - mark_precise: frame0: regs=r2 stack= before 23\ - mark_precise: frame0: regs=r2 stack= before 22\ - mark_precise: frame0: parent state regs=r2 stack=:\ + mark_precise: frame0: regs=r2,r9 stack= before 25\ + mark_precise: frame0: regs=r2,r9 stack= before 24\ + mark_precise: frame0: regs=r2,r9 stack= before 23\ + mark_precise: frame0: regs=r2,r9 stack= before 22\ + mark_precise: frame0: parent state regs=r2,r9 stack=:\ mark_precise: frame0: last_idx 20 first_idx 20\ mark_precise: frame0: regs=r2,r9 stack= before 20\ mark_precise: frame0: parent state regs=r2,r9 stack=:\ diff --git a/tools/testing/selftests/bpf/xskxceiver.c b/tools/testing/selftests/bpf/xskxceiver.c index 2eac0895b0a1..8144fd145237 100644 --- a/tools/testing/selftests/bpf/xskxceiver.c +++ b/tools/testing/selftests/bpf/xskxceiver.c @@ -196,6 +196,12 @@ static int xsk_configure_umem(struct ifobject *ifobj, struct xsk_umem_info *umem }; int ret; + if (umem->fill_size) + cfg.fill_size = umem->fill_size; + + if (umem->comp_size) + cfg.comp_size = umem->comp_size; + if (umem->unaligned_mode) cfg.flags |= XDP_UMEM_UNALIGNED_CHUNK_FLAG; @@ -265,6 +271,10 @@ static int __xsk_configure_socket(struct xsk_socket_info *xsk, struct xsk_umem_i cfg.bind_flags |= XDP_SHARED_UMEM; if (ifobject->mtu > MAX_ETH_PKT_SIZE) cfg.bind_flags |= XDP_USE_SG; + if (umem->comp_size) + cfg.tx_size = umem->comp_size; + if (umem->fill_size) + cfg.rx_size = umem->fill_size; txr = ifobject->tx_on ? &xsk->tx : NULL; rxr = ifobject->rx_on ? &xsk->rx : NULL; @@ -1616,7 +1626,7 @@ static void xsk_populate_fill_ring(struct xsk_umem_info *umem, struct pkt_stream if (umem->num_frames < XSK_RING_PROD__DEFAULT_NUM_DESCS) buffers_to_fill = umem->num_frames; else - buffers_to_fill = XSK_RING_PROD__DEFAULT_NUM_DESCS; + buffers_to_fill = umem->fill_size; ret = xsk_ring_prod__reserve(&umem->fq, buffers_to_fill, &idx); if (ret != buffers_to_fill) @@ -1899,11 +1909,15 @@ static int testapp_validate_traffic(struct test_spec *test) } if (test->set_ring) { - if (ifobj_tx->hw_ring_size_supp) - return set_ring_size(ifobj_tx); - - ksft_test_result_skip("Changing HW ring size not supported.\n"); - return TEST_SKIP; + if (ifobj_tx->hw_ring_size_supp) { + if (set_ring_size(ifobj_tx)) { + ksft_test_result_skip("Failed to change HW ring size.\n"); + return TEST_FAILURE; + } + } else { + ksft_test_result_skip("Changing HW ring size not supported.\n"); + return TEST_SKIP; + } } xsk_attach_xdp_progs(test, ifobj_rx, ifobj_tx); @@ -2441,7 +2455,7 @@ static int testapp_hw_sw_min_ring_size(struct test_spec *test) static int testapp_hw_sw_max_ring_size(struct test_spec *test) { - u32 max_descs = XSK_RING_PROD__DEFAULT_NUM_DESCS * 2; + u32 max_descs = XSK_RING_PROD__DEFAULT_NUM_DESCS * 4; int ret; test->set_ring = true; @@ -2449,7 +2463,8 @@ static int testapp_hw_sw_max_ring_size(struct test_spec *test) test->ifobj_tx->ring.tx_pending = test->ifobj_tx->ring.tx_max_pending; test->ifobj_tx->ring.rx_pending = test->ifobj_tx->ring.rx_max_pending; test->ifobj_rx->umem->num_frames = max_descs; - test->ifobj_rx->xsk->rxqsize = max_descs; + test->ifobj_rx->umem->fill_size = max_descs; + test->ifobj_rx->umem->comp_size = max_descs; test->ifobj_tx->xsk->batch_size = XSK_RING_PROD__DEFAULT_NUM_DESCS; test->ifobj_rx->xsk->batch_size = XSK_RING_PROD__DEFAULT_NUM_DESCS; @@ -2457,9 +2472,12 @@ static int testapp_hw_sw_max_ring_size(struct test_spec *test) if (ret) return ret; - /* Set batch_size to 4095 */ - test->ifobj_tx->xsk->batch_size = max_descs - 1; - test->ifobj_rx->xsk->batch_size = max_descs - 1; + /* Set batch_size to 8152 for testing, as the ice HW ignores the 3 lowest bits when + * updating the Rx HW tail register. + */ + test->ifobj_tx->xsk->batch_size = test->ifobj_tx->ring.tx_max_pending - 8; + test->ifobj_rx->xsk->batch_size = test->ifobj_tx->ring.tx_max_pending - 8; + pkt_stream_replace(test, max_descs, MIN_PKT_SIZE); return testapp_validate_traffic(test); } diff --git a/tools/testing/selftests/bpf/xskxceiver.h b/tools/testing/selftests/bpf/xskxceiver.h index 906de5fab7a3..885c948c5d83 100644 --- a/tools/testing/selftests/bpf/xskxceiver.h +++ b/tools/testing/selftests/bpf/xskxceiver.h @@ -80,6 +80,8 @@ struct xsk_umem_info { void *buffer; u32 frame_size; u32 base_addr; + u32 fill_size; + u32 comp_size; bool unaligned_mode; }; diff --git a/tools/testing/selftests/drivers/net/hw/Makefile b/tools/testing/selftests/drivers/net/hw/Makefile index 4933d045ab66..c9f2f48fc30f 100644 --- a/tools/testing/selftests/drivers/net/hw/Makefile +++ b/tools/testing/selftests/drivers/net/hw/Makefile @@ -11,6 +11,7 @@ TEST_PROGS = \ hw_stats_l3_gre.sh \ loopback.sh \ pp_alloc_fail.py \ + rss_ctx.py \ # TEST_FILES := \ diff --git a/tools/testing/selftests/drivers/net/hw/rss_ctx.py b/tools/testing/selftests/drivers/net/hw/rss_ctx.py new file mode 100755 index 000000000000..931dbc36ca43 --- /dev/null +++ b/tools/testing/selftests/drivers/net/hw/rss_ctx.py @@ -0,0 +1,522 @@ +#!/usr/bin/env python3 +# SPDX-License-Identifier: GPL-2.0 + +import datetime +import random +from lib.py import ksft_run, ksft_pr, ksft_exit, ksft_eq, ksft_ge, ksft_lt +from lib.py import NetDrvEpEnv +from lib.py import EthtoolFamily, NetdevFamily +from lib.py import KsftSkipEx +from lib.py import rand_port +from lib.py import ethtool, ip, defer, GenerateTraffic, CmdExitFailure + + +def _rss_key_str(key): + return ":".join(["{:02x}".format(x) for x in key]) + + +def _rss_key_rand(length): + return [random.randint(0, 255) for _ in range(length)] + + +def get_rss(cfg, context=0): + return ethtool(f"-x {cfg.ifname} context {context}", json=True)[0] + + +def get_drop_err_sum(cfg): + stats = ip("-s -s link show dev " + cfg.ifname, json=True)[0] + cnt = 0 + for key in ['errors', 'dropped', 'over_errors', 'fifo_errors', + 'length_errors', 'crc_errors', 'missed_errors', + 'frame_errors']: + cnt += stats["stats64"]["rx"][key] + return cnt, stats["stats64"]["tx"]["carrier_changes"] + + +def ethtool_create(cfg, act, opts): + output = ethtool(f"{act} {cfg.ifname} {opts}").stdout + # Output will be something like: "New RSS context is 1" or + # "Added rule with ID 7", we want the integer from the end + return int(output.split()[-1]) + + +def require_ntuple(cfg): + features = ethtool(f"-k {cfg.ifname}", json=True)[0] + if not features["ntuple-filters"]["active"]: + # ntuple is more of a capability than a config knob, don't bother + # trying to enable it (until some driver actually needs it). + raise KsftSkipEx("Ntuple filters not enabled on the device: " + str(features["ntuple-filters"])) + + +# Get Rx packet counts for all queues, as a simple list of integers +# if @prev is specified the prev counts will be subtracted +def _get_rx_cnts(cfg, prev=None): + cfg.wait_hw_stats_settle() + data = cfg.netdevnl.qstats_get({"ifindex": cfg.ifindex, "scope": ["queue"]}, dump=True) + data = [x for x in data if x['queue-type'] == "rx"] + max_q = max([x["queue-id"] for x in data]) + queue_stats = [0] * (max_q + 1) + for q in data: + queue_stats[q["queue-id"]] = q["rx-packets"] + if prev and q["queue-id"] < len(prev): + queue_stats[q["queue-id"]] -= prev[q["queue-id"]] + return queue_stats + + +def _send_traffic_check(cfg, port, name, params): + # params is a dict with 3 possible keys: + # - "target": required, which queues we expect to get iperf traffic + # - "empty": optional, which queues should see no traffic at all + # - "noise": optional, which queues we expect to see low traffic; + # used for queues of the main context, since some background + # OS activity may use those queues while we're testing + # the value for each is a list, or some other iterable containing queue ids. + + cnts = _get_rx_cnts(cfg) + GenerateTraffic(cfg, port=port).wait_pkts_and_stop(20000) + cnts = _get_rx_cnts(cfg, prev=cnts) + + directed = sum(cnts[i] for i in params['target']) + + ksft_ge(directed, 20000, f"traffic on {name}: " + str(cnts)) + if params.get('noise'): + ksft_lt(sum(cnts[i] for i in params['noise']), directed / 2, + "traffic on other queues:" + str(cnts)) + if params.get('empty'): + ksft_eq(sum(cnts[i] for i in params['empty']), 0, + "traffic on inactive queues: " + str(cnts)) + + +def test_rss_key_indir(cfg): + """Test basics like updating the main RSS key and indirection table.""" + + if len(_get_rx_cnts(cfg)) < 2: + KsftSkipEx("Device has only one queue (or doesn't support queue stats)") + + data = get_rss(cfg) + want_keys = ['rss-hash-key', 'rss-hash-function', 'rss-indirection-table'] + for k in want_keys: + if k not in data: + raise KsftFailEx("ethtool results missing key: " + k) + if not data[k]: + raise KsftFailEx(f"ethtool results empty for '{k}': {data[k]}") + + key_len = len(data['rss-hash-key']) + + # Set the key + key = _rss_key_rand(key_len) + ethtool(f"-X {cfg.ifname} hkey " + _rss_key_str(key)) + + data = get_rss(cfg) + ksft_eq(key, data['rss-hash-key']) + + # Set the indirection table + ethtool(f"-X {cfg.ifname} equal 2") + reset_indir = defer(ethtool, f"-X {cfg.ifname} default") + data = get_rss(cfg) + ksft_eq(0, min(data['rss-indirection-table'])) + ksft_eq(1, max(data['rss-indirection-table'])) + + # Check we only get traffic on the first 2 queues + cnts = _get_rx_cnts(cfg) + GenerateTraffic(cfg).wait_pkts_and_stop(20000) + cnts = _get_rx_cnts(cfg, prev=cnts) + # 2 queues, 20k packets, must be at least 5k per queue + ksft_ge(cnts[0], 5000, "traffic on main context (1/2): " + str(cnts)) + ksft_ge(cnts[1], 5000, "traffic on main context (2/2): " + str(cnts)) + # The other queues should be unused + ksft_eq(sum(cnts[2:]), 0, "traffic on unused queues: " + str(cnts)) + + # Restore, and check traffic gets spread again + reset_indir.exec() + + cnts = _get_rx_cnts(cfg) + GenerateTraffic(cfg).wait_pkts_and_stop(20000) + cnts = _get_rx_cnts(cfg, prev=cnts) + # First two queues get less traffic than all the rest + ksft_lt(sum(cnts[:2]), sum(cnts[2:]), "traffic distributed: " + str(cnts)) + + +def test_rss_queue_reconfigure(cfg, main_ctx=True): + """Make sure queue changes can't override requested RSS config. + + By default main RSS table should change to include all queues. + When user sets a specific RSS config the driver should preserve it, + even when queue count changes. Driver should refuse to deactivate + queues used in the user-set RSS config. + """ + + if not main_ctx: + require_ntuple(cfg) + + # Start with 4 queues, an arbitrary known number. + try: + qcnt = len(_get_rx_cnts(cfg)) + ethtool(f"-L {cfg.ifname} combined 4") + defer(ethtool, f"-L {cfg.ifname} combined {qcnt}") + except: + raise KsftSkipEx("Not enough queues for the test or qstat not supported") + + if main_ctx: + ctx_id = 0 + ctx_ref = "" + else: + ctx_id = ethtool_create(cfg, "-X", "context new") + ctx_ref = f"context {ctx_id}" + defer(ethtool, f"-X {cfg.ifname} {ctx_ref} delete") + + # Indirection table should be distributing to all queues. + data = get_rss(cfg, context=ctx_id) + ksft_eq(0, min(data['rss-indirection-table'])) + ksft_eq(3, max(data['rss-indirection-table'])) + + # Increase queues, indirection table should be distributing to all queues. + # It's unclear whether tables of additional contexts should be reset, too. + if main_ctx: + ethtool(f"-L {cfg.ifname} combined 5") + data = get_rss(cfg) + ksft_eq(0, min(data['rss-indirection-table'])) + ksft_eq(4, max(data['rss-indirection-table'])) + ethtool(f"-L {cfg.ifname} combined 4") + + # Configure the table explicitly + port = rand_port() + ethtool(f"-X {cfg.ifname} {ctx_ref} weight 1 0 0 1") + if main_ctx: + other_key = 'empty' + defer(ethtool, f"-X {cfg.ifname} default") + else: + other_key = 'noise' + flow = f"flow-type tcp{cfg.addr_ipver} dst-port {port} context {ctx_id}" + ntuple = ethtool_create(cfg, "-N", flow) + defer(ethtool, f"-N {cfg.ifname} delete {ntuple}") + + _send_traffic_check(cfg, port, ctx_ref, { 'target': (0, 3), + other_key: (1, 2) }) + + # We should be able to increase queues, but table should be left untouched + ethtool(f"-L {cfg.ifname} combined 5") + data = get_rss(cfg, context=ctx_id) + ksft_eq({0, 3}, set(data['rss-indirection-table'])) + + _send_traffic_check(cfg, port, ctx_ref, { 'target': (0, 3), + other_key: (1, 2, 4) }) + + # Setting queue count to 3 should fail, queue 3 is used + try: + ethtool(f"-L {cfg.ifname} combined 3") + except CmdExitFailure: + pass + else: + raise Exception(f"Driver didn't prevent us from deactivating a used queue (context {ctx_id})") + + +def test_rss_resize(cfg): + """Test resizing of the RSS table. + + Some devices dynamically increase and decrease the size of the RSS + indirection table based on the number of enabled queues. + When that happens driver must maintain the balance of entries + (preferably duplicating the smaller table). + """ + + channels = cfg.ethnl.channels_get({'header': {'dev-index': cfg.ifindex}}) + ch_max = channels['combined-max'] + qcnt = channels['combined-count'] + + if ch_max < 2: + raise KsftSkipEx(f"Not enough queues for the test: {ch_max}") + + ethtool(f"-L {cfg.ifname} combined 2") + defer(ethtool, f"-L {cfg.ifname} combined {qcnt}") + + ethtool(f"-X {cfg.ifname} weight 1 7") + defer(ethtool, f"-X {cfg.ifname} default") + + ethtool(f"-L {cfg.ifname} combined {ch_max}") + data = get_rss(cfg) + ksft_eq(0, min(data['rss-indirection-table'])) + ksft_eq(1, max(data['rss-indirection-table'])) + + ksft_eq(7, + data['rss-indirection-table'].count(1) / + data['rss-indirection-table'].count(0), + f"Table imbalance after resize: {data['rss-indirection-table']}") + + +def test_hitless_key_update(cfg): + """Test that flows may be rehashed without impacting traffic. + + Some workloads may want to rehash the flows in response to an imbalance. + Most effective way to do that is changing the RSS key. Check that changing + the key does not cause link flaps or traffic disruption. + + Disrupting traffic for key update is not a bug, but makes the key + update unusable for rehashing under load. + """ + data = get_rss(cfg) + key_len = len(data['rss-hash-key']) + + key = _rss_key_rand(key_len) + + tgen = GenerateTraffic(cfg) + try: + errors0, carrier0 = get_drop_err_sum(cfg) + t0 = datetime.datetime.now() + ethtool(f"-X {cfg.ifname} hkey " + _rss_key_str(key)) + t1 = datetime.datetime.now() + errors1, carrier1 = get_drop_err_sum(cfg) + finally: + tgen.wait_pkts_and_stop(5000) + + ksft_lt((t1 - t0).total_seconds(), 0.2) + ksft_eq(errors1 - errors1, 0) + ksft_eq(carrier1 - carrier0, 0) + + +def test_rss_context(cfg, ctx_cnt=1, create_with_cfg=None): + """ + Test separating traffic into RSS contexts. + The queues will be allocated 2 for each context: + ctx0 ctx1 ctx2 ctx3 + [0 1] [2 3] [4 5] [6 7] ... + """ + + require_ntuple(cfg) + + requested_ctx_cnt = ctx_cnt + + # Try to allocate more queues when necessary + qcnt = len(_get_rx_cnts(cfg)) + if qcnt < 2 + 2 * ctx_cnt: + try: + ksft_pr(f"Increasing queue count {qcnt} -> {2 + 2 * ctx_cnt}") + ethtool(f"-L {cfg.ifname} combined {2 + 2 * ctx_cnt}") + defer(ethtool, f"-L {cfg.ifname} combined {qcnt}") + except: + raise KsftSkipEx("Not enough queues for the test") + + ports = [] + + # Use queues 0 and 1 for normal traffic + ethtool(f"-X {cfg.ifname} equal 2") + defer(ethtool, f"-X {cfg.ifname} default") + + for i in range(ctx_cnt): + want_cfg = f"start {2 + i * 2} equal 2" + create_cfg = want_cfg if create_with_cfg else "" + + try: + ctx_id = ethtool_create(cfg, "-X", f"context new {create_cfg}") + defer(ethtool, f"-X {cfg.ifname} context {ctx_id} delete") + except CmdExitFailure: + # try to carry on and skip at the end + if i == 0: + raise + ksft_pr(f"Failed to create context {i + 1}, trying to test what we got") + ctx_cnt = i + break + + if not create_with_cfg: + ethtool(f"-X {cfg.ifname} context {ctx_id} {want_cfg}") + + # Sanity check the context we just created + data = get_rss(cfg, ctx_id) + ksft_eq(min(data['rss-indirection-table']), 2 + i * 2, "Unexpected context cfg: " + str(data)) + ksft_eq(max(data['rss-indirection-table']), 2 + i * 2 + 1, "Unexpected context cfg: " + str(data)) + + ports.append(rand_port()) + flow = f"flow-type tcp{cfg.addr_ipver} dst-port {ports[i]} context {ctx_id}" + ntuple = ethtool_create(cfg, "-N", flow) + defer(ethtool, f"-N {cfg.ifname} delete {ntuple}") + + for i in range(ctx_cnt): + _send_traffic_check(cfg, ports[i], f"context {i}", + { 'target': (2+i*2, 3+i*2), + 'noise': (0, 1), + 'empty': list(range(2, 2+i*2)) + list(range(4+i*2, 2+2*ctx_cnt)) }) + + if requested_ctx_cnt != ctx_cnt: + raise KsftSkipEx(f"Tested only {ctx_cnt} contexts, wanted {requested_ctx_cnt}") + + +def test_rss_context4(cfg): + test_rss_context(cfg, 4) + + +def test_rss_context32(cfg): + test_rss_context(cfg, 32) + + +def test_rss_context4_create_with_cfg(cfg): + test_rss_context(cfg, 4, create_with_cfg=True) + + +def test_rss_context_queue_reconfigure(cfg): + test_rss_queue_reconfigure(cfg, main_ctx=False) + + +def test_rss_context_out_of_order(cfg, ctx_cnt=4): + """ + Test separating traffic into RSS contexts. + Contexts are removed in semi-random order, and steering re-tested + to make sure removal doesn't break steering to surviving contexts. + Test requires 3 contexts to work. + """ + + require_ntuple(cfg) + + requested_ctx_cnt = ctx_cnt + + # Try to allocate more queues when necessary + qcnt = len(_get_rx_cnts(cfg)) + if qcnt < 2 + 2 * ctx_cnt: + try: + ksft_pr(f"Increasing queue count {qcnt} -> {2 + 2 * ctx_cnt}") + ethtool(f"-L {cfg.ifname} combined {2 + 2 * ctx_cnt}") + defer(ethtool, f"-L {cfg.ifname} combined {qcnt}") + except: + raise KsftSkipEx("Not enough queues for the test") + + ntuple = [] + ctx = [] + ports = [] + + def remove_ctx(idx): + ntuple[idx].exec() + ntuple[idx] = None + ctx[idx].exec() + ctx[idx] = None + + def check_traffic(): + for i in range(ctx_cnt): + if ctx[i]: + expected = { + 'target': (2+i*2, 3+i*2), + 'noise': (0, 1), + 'empty': list(range(2, 2+i*2)) + list(range(4+i*2, 2+2*ctx_cnt)) + } + else: + expected = { + 'target': (0, 1), + 'empty': range(2, 2+2*ctx_cnt) + } + + _send_traffic_check(cfg, ports[i], f"context {i}", expected) + + # Use queues 0 and 1 for normal traffic + ethtool(f"-X {cfg.ifname} equal 2") + defer(ethtool, f"-X {cfg.ifname} default") + + for i in range(ctx_cnt): + ctx_id = ethtool_create(cfg, "-X", f"context new start {2 + i * 2} equal 2") + ctx.append(defer(ethtool, f"-X {cfg.ifname} context {ctx_id} delete")) + + ports.append(rand_port()) + flow = f"flow-type tcp{cfg.addr_ipver} dst-port {ports[i]} context {ctx_id}" + ntuple_id = ethtool_create(cfg, "-N", flow) + ntuple.append(defer(ethtool, f"-N {cfg.ifname} delete {ntuple_id}")) + + check_traffic() + + # Remove middle context + remove_ctx(ctx_cnt // 2) + check_traffic() + + # Remove first context + remove_ctx(0) + check_traffic() + + # Remove last context + remove_ctx(-1) + check_traffic() + + if requested_ctx_cnt != ctx_cnt: + raise KsftSkipEx(f"Tested only {ctx_cnt} contexts, wanted {requested_ctx_cnt}") + + +def test_rss_context_overlap(cfg, other_ctx=0): + """ + Test contexts overlapping with each other. + Use 4 queues for the main context, but only queues 2 and 3 for context 1. + """ + + require_ntuple(cfg) + + queue_cnt = len(_get_rx_cnts(cfg)) + if queue_cnt < 4: + try: + ksft_pr(f"Increasing queue count {queue_cnt} -> 4") + ethtool(f"-L {cfg.ifname} combined 4") + defer(ethtool, f"-L {cfg.ifname} combined {queue_cnt}") + except: + raise KsftSkipEx("Not enough queues for the test") + + if other_ctx == 0: + ethtool(f"-X {cfg.ifname} equal 4") + defer(ethtool, f"-X {cfg.ifname} default") + else: + other_ctx = ethtool_create(cfg, "-X", "context new") + ethtool(f"-X {cfg.ifname} context {other_ctx} equal 4") + defer(ethtool, f"-X {cfg.ifname} context {other_ctx} delete") + + ctx_id = ethtool_create(cfg, "-X", "context new") + ethtool(f"-X {cfg.ifname} context {ctx_id} start 2 equal 2") + defer(ethtool, f"-X {cfg.ifname} context {ctx_id} delete") + + port = rand_port() + if other_ctx: + flow = f"flow-type tcp{cfg.addr_ipver} dst-port {port} context {other_ctx}" + ntuple_id = ethtool_create(cfg, "-N", flow) + ntuple = defer(ethtool, f"-N {cfg.ifname} delete {ntuple_id}") + + # Test the main context + cnts = _get_rx_cnts(cfg) + GenerateTraffic(cfg, port=port).wait_pkts_and_stop(20000) + cnts = _get_rx_cnts(cfg, prev=cnts) + + ksft_ge(sum(cnts[ :4]), 20000, "traffic on main context: " + str(cnts)) + ksft_ge(sum(cnts[ :2]), 7000, "traffic on main context (1/2): " + str(cnts)) + ksft_ge(sum(cnts[2:4]), 7000, "traffic on main context (2/2): " + str(cnts)) + if other_ctx == 0: + ksft_eq(sum(cnts[4: ]), 0, "traffic on other queues: " + str(cnts)) + + # Now create a rule for context 1 and make sure traffic goes to a subset + if other_ctx: + ntuple.exec() + flow = f"flow-type tcp{cfg.addr_ipver} dst-port {port} context {ctx_id}" + ntuple_id = ethtool_create(cfg, "-N", flow) + defer(ethtool, f"-N {cfg.ifname} delete {ntuple_id}") + + cnts = _get_rx_cnts(cfg) + GenerateTraffic(cfg, port=port).wait_pkts_and_stop(20000) + cnts = _get_rx_cnts(cfg, prev=cnts) + + directed = sum(cnts[2:4]) + ksft_lt(sum(cnts[ :2]), directed / 2, "traffic on main context: " + str(cnts)) + ksft_ge(directed, 20000, "traffic on extra context: " + str(cnts)) + if other_ctx == 0: + ksft_eq(sum(cnts[4: ]), 0, "traffic on other queues: " + str(cnts)) + + +def test_rss_context_overlap2(cfg): + test_rss_context_overlap(cfg, True) + + +def main() -> None: + with NetDrvEpEnv(__file__, nsim_test=False) as cfg: + cfg.ethnl = EthtoolFamily() + cfg.netdevnl = NetdevFamily() + + ksft_run([test_rss_key_indir, test_rss_queue_reconfigure, + test_rss_resize, test_hitless_key_update, + test_rss_context, test_rss_context4, test_rss_context32, + test_rss_context_queue_reconfigure, + test_rss_context_overlap, test_rss_context_overlap2, + test_rss_context_out_of_order, test_rss_context4_create_with_cfg], + args=(cfg, )) + ksft_exit() + + +if __name__ == "__main__": + main() diff --git a/tools/testing/selftests/drivers/net/lib/py/env.py b/tools/testing/selftests/drivers/net/lib/py/env.py index edcedd7bffab..a5e800b8f103 100644 --- a/tools/testing/selftests/drivers/net/lib/py/env.py +++ b/tools/testing/selftests/drivers/net/lib/py/env.py @@ -1,9 +1,10 @@ # SPDX-License-Identifier: GPL-2.0 import os +import time from pathlib import Path from lib.py import KsftSkipEx, KsftXfailEx -from lib.py import cmd, ip +from lib.py import cmd, ethtool, ip from lib.py import NetNS, NetdevSimDev from .remote import Remote @@ -82,6 +83,8 @@ class NetDrvEpEnv: self.env = _load_env_file(src_path) + self._stats_settle_time = None + # Things we try to destroy self.remote = None # These are for local testing state @@ -222,3 +225,17 @@ class NetDrvEpEnv: if remote: if not self._require_cmd(comm, "remote"): raise KsftSkipEx("Test requires (remote) command: " + comm) + + def wait_hw_stats_settle(self): + """ + Wait for HW stats to become consistent, some devices DMA HW stats + periodically so events won't be reflected until next sync. + Good drivers will tell us via ethtool what their sync period is. + """ + if self._stats_settle_time is None: + data = ethtool("-c " + self.ifname, json=True)[0] + + self._stats_settle_time = 0.025 + \ + data.get('stats-block-usecs', 0) / 1000 / 1000 + + time.sleep(self._stats_settle_time) diff --git a/tools/testing/selftests/drivers/net/lib/py/load.py b/tools/testing/selftests/drivers/net/lib/py/load.py index abdb677bdb1c..d9c10613ae67 100644 --- a/tools/testing/selftests/drivers/net/lib/py/load.py +++ b/tools/testing/selftests/drivers/net/lib/py/load.py @@ -5,28 +5,45 @@ import time from lib.py import ksft_pr, cmd, ip, rand_port, wait_port_listen class GenerateTraffic: - def __init__(self, env): + def __init__(self, env, port=None): env.require_cmd("iperf3", remote=True) self.env = env - port = rand_port() - self._iperf_server = cmd(f"iperf3 -s -p {port}", background=True) + if port is None: + port = rand_port() + self._iperf_server = cmd(f"iperf3 -s -1 -p {port}", background=True) wait_port_listen(port) time.sleep(0.1) self._iperf_client = cmd(f"iperf3 -c {env.addr} -P 16 -p {port} -t 86400", background=True, host=env.remote) # Wait for traffic to ramp up - pkt = ip("-s link show dev " + env.ifname, json=True)[0]["stats64"]["rx"]["packets"] + if not self._wait_pkts(pps=1000): + self.stop(verbose=True) + raise Exception("iperf3 traffic did not ramp up") + + def _wait_pkts(self, pkt_cnt=None, pps=None): + """ + Wait until we've seen pkt_cnt or until traffic ramps up to pps. + Only one of pkt_cnt or pss can be specified. + """ + pkt_start = ip("-s link show dev " + self.env.ifname, json=True)[0]["stats64"]["rx"]["packets"] for _ in range(50): time.sleep(0.1) - now = ip("-s link show dev " + env.ifname, json=True)[0]["stats64"]["rx"]["packets"] - if now - pkt > 1000: - return - pkt = now - self.stop(verbose=True) - raise Exception("iperf3 traffic did not ramp up") + pkt_now = ip("-s link show dev " + self.env.ifname, json=True)[0]["stats64"]["rx"]["packets"] + if pps: + if pkt_now - pkt_start > pps / 10: + return True + pkt_start = pkt_now + elif pkt_cnt: + if pkt_now - pkt_start > pkt_cnt: + return True + return False + + def wait_pkts_and_stop(self, pkt_cnt): + failed = not self._wait_pkts(pkt_cnt=pkt_cnt) + self.stop(verbose=failed) def stop(self, verbose=None): self._iperf_client.process(terminate=True) diff --git a/tools/testing/selftests/drivers/net/mlxsw/mirror_gre.sh b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre.sh index 76f1ab4898d9..e1ad623146d7 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/mirror_gre.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre.sh @@ -15,6 +15,13 @@ source $lib_dir/mirror_lib.sh source $lib_dir/mirror_gre_lib.sh source $lib_dir/mirror_gre_topo_lib.sh +ALL_TESTS=" + test_keyful + test_soft + test_tos_fixed + test_ttl_inherit +" + setup_keyful() { tunnel_create gt6-key ip6gretap 2001:db8:3::1 2001:db8:3::2 \ @@ -118,15 +125,15 @@ test_span_gre_ttl_inherit() RET=0 ip link set dev $tundev type $type ttl inherit - mirror_install $swp1 ingress $tundev "matchall $tcflags" - fail_test_span_gre_dir $tundev ingress + mirror_install $swp1 ingress $tundev "matchall" + fail_test_span_gre_dir $tundev ip link set dev $tundev type $type ttl 100 - quick_test_span_gre_dir $tundev ingress + quick_test_span_gre_dir $tundev mirror_uninstall $swp1 ingress - log_test "$what: no offload on TTL of inherit ($tcflags)" + log_test "$what: no offload on TTL of inherit" } test_span_gre_tos_fixed() @@ -138,61 +145,49 @@ test_span_gre_tos_fixed() RET=0 ip link set dev $tundev type $type tos 0x10 - mirror_install $swp1 ingress $tundev "matchall $tcflags" - fail_test_span_gre_dir $tundev ingress + mirror_install $swp1 ingress $tundev "matchall" + fail_test_span_gre_dir $tundev ip link set dev $tundev type $type tos inherit - quick_test_span_gre_dir $tundev ingress + quick_test_span_gre_dir $tundev mirror_uninstall $swp1 ingress - log_test "$what: no offload on a fixed TOS ($tcflags)" + log_test "$what: no offload on a fixed TOS" } test_span_failable() { - local should_fail=$1; shift local tundev=$1; shift local what=$1; shift RET=0 - mirror_install $swp1 ingress $tundev "matchall $tcflags" - if ((should_fail)); then - fail_test_span_gre_dir $tundev ingress - else - quick_test_span_gre_dir $tundev ingress - fi + mirror_install $swp1 ingress $tundev "matchall" + fail_test_span_gre_dir $tundev mirror_uninstall $swp1 ingress - log_test "$what: should_fail=$should_fail ($tcflags)" + log_test "fail $what" } -test_failable() +test_keyful() { - local should_fail=$1; shift - - test_span_failable $should_fail gt6-key "mirror to keyful gretap" - test_span_failable $should_fail gt6-soft "mirror to gretap w/ soft underlay" + test_span_failable gt6-key "mirror to keyful gretap" } -test_sw() +test_soft() { - slow_path_trap_install $swp1 ingress - slow_path_trap_install $swp1 egress - - test_failable 0 - - slow_path_trap_uninstall $swp1 egress - slow_path_trap_uninstall $swp1 ingress + test_span_failable gt6-soft "mirror to gretap w/ soft underlay" } -test_hw() +test_tos_fixed() { - test_failable 1 - test_span_gre_tos_fixed gt4 gretap "mirror to gretap" test_span_gre_tos_fixed gt6 ip6gretap "mirror to ip6gretap" +} + +test_ttl_inherit() +{ test_span_gre_ttl_inherit gt4 gretap "mirror to gretap" test_span_gre_ttl_inherit gt6 ip6gretap "mirror to ip6gretap" } @@ -202,16 +197,6 @@ trap cleanup EXIT setup_prepare setup_wait -if ! tc_offload_check; then - check_err 1 "Could not test offloaded functionality" - log_test "mlxsw-specific tests for mirror to gretap" - exit -fi - -tcflags="skip_hw" -test_sw - -tcflags="skip_sw" -test_hw +tests_run exit $EXIT_STATUS diff --git a/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh index e5589e2fca85..d43093310e23 100644 --- a/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/mirror_gre_scale.sh @@ -79,7 +79,7 @@ mirror_gre_tunnels_create() cat >> $MIRROR_GRE_BATCH_FILE <<-EOF filter add dev $swp1 ingress pref 1000 \ protocol ipv6 \ - flower $tcflags dst_ip $match_dip \ + flower skip_sw dst_ip $match_dip \ action mirred egress mirror dev $tun EOF done @@ -107,7 +107,7 @@ mirror_gre_tunnels_destroy() done } -__mirror_gre_test() +mirror_gre_test() { local count=$1; shift local should_fail=$1; shift @@ -131,20 +131,6 @@ __mirror_gre_test() done } -mirror_gre_test() -{ - local count=$1; shift - local should_fail=$1; shift - - if ! tc_offload_check $TC_FLOWER_NUM_NETIFS; then - check_err 1 "Could not test offloaded functionality" - return - fi - - tcflags="skip_sw" - __mirror_gre_test $count $should_fail -} - mirror_gre_setup_prepare() { h1=${NETIFS[p1]} diff --git a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh index 31252bc8775e..4994bea5daf8 100755 --- a/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh +++ b/tools/testing/selftests/drivers/net/mlxsw/spectrum-2/tc_flower.sh @@ -11,7 +11,7 @@ ALL_TESTS="single_mask_test identical_filters_test two_masks_test \ multiple_masks_test ctcam_edge_cases_test delta_simple_test \ delta_two_masks_one_key_test delta_simple_rehash_test \ bloom_simple_test bloom_complex_test bloom_delta_test \ - max_erp_entries_test max_group_size_test" + max_erp_entries_test max_group_size_test collision_test" NUM_NETIFS=2 source $lib_dir/lib.sh source $lib_dir/tc_common.sh @@ -457,7 +457,7 @@ delta_two_masks_one_key_test() { # If 2 keys are the same and only differ in mask in a way that # they belong under the same ERP (second is delta of the first), - # there should be no C-TCAM spill. + # there should be C-TCAM spill. RET=0 @@ -474,8 +474,8 @@ delta_two_masks_one_key_test() tp_record "mlxsw:*" "tc filter add dev $h2 ingress protocol ip \ pref 2 handle 102 flower $tcflags dst_ip 192.0.2.2 \ action drop" - tp_check_hits "mlxsw:mlxsw_sp_acl_atcam_entry_add_ctcam_spill" 0 - check_err $? "incorrect C-TCAM spill while inserting the second rule" + tp_check_hits "mlxsw:mlxsw_sp_acl_atcam_entry_add_ctcam_spill" 1 + check_err $? "C-TCAM spill did not happen while inserting the second rule" $MZ $h1 -c 1 -p 64 -a $h1mac -b $h2mac -A 192.0.2.1 -B 192.0.2.2 \ -t ip -q @@ -1087,6 +1087,53 @@ max_group_size_test() log_test "max ACL group size test ($tcflags). max size $max_size" } +collision_test() +{ + # Filters cannot share an eRP if in the common unmasked part (i.e., + # without the delta bits) they have the same values. If the driver does + # not prevent such configuration (by spilling into the C-TCAM), then + # multiple entries will be present in the device with the same key, + # leading to collisions and a reduced scale. + # + # Create such a scenario and make sure all the filters are successfully + # added. + + RET=0 + + local ret + + if [[ "$tcflags" != "skip_sw" ]]; then + return 0; + fi + + # Add a single dst_ip/24 filter and multiple dst_ip/32 filters that all + # have the same values in the common unmasked part (dst_ip/24). + + tc filter add dev $h2 ingress pref 1 proto ipv4 handle 101 \ + flower $tcflags dst_ip 198.51.100.0/24 \ + action drop + + for i in {0..255}; do + tc filter add dev $h2 ingress pref 2 proto ipv4 \ + handle $((102 + i)) \ + flower $tcflags dst_ip 198.51.100.${i}/32 \ + action drop + ret=$? + [[ $ret -ne 0 ]] && break + done + + check_err $ret "failed to add all the filters" + + for i in {255..0}; do + tc filter del dev $h2 ingress pref 2 proto ipv4 \ + handle $((102 + i)) flower + done + + tc filter del dev $h2 ingress pref 1 proto ipv4 handle 101 flower + + log_test "collision test ($tcflags)" +} + setup_prepare() { h1=${NETIFS[p1]} diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index d9393569d03a..bc3925200637 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -55,6 +55,7 @@ TEST_PROGS += bind_bhash.sh TEST_PROGS += ip_local_port_range.sh TEST_PROGS += rps_default_mask.sh TEST_PROGS += big_tcp.sh +TEST_PROGS += netns-sysctl.sh TEST_PROGS_EXTENDED := toeplitz_client.sh toeplitz.sh TEST_GEN_FILES = socket nettest TEST_GEN_FILES += psock_fanout psock_tpacket msg_zerocopy reuseport_addr_any diff --git a/tools/testing/selftests/net/amt.sh b/tools/testing/selftests/net/amt.sh index 7e7ed6c558da..d458b45c775b 100755 --- a/tools/testing/selftests/net/amt.sh +++ b/tools/testing/selftests/net/amt.sh @@ -1,4 +1,4 @@ -#!/bin/sh +#!/bin/bash # SPDX-License-Identifier: GPL-2.0 # Author: Taehee Yoo diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config index d4891f7a2bfa..5b9baf708950 100644 --- a/tools/testing/selftests/net/config +++ b/tools/testing/selftests/net/config @@ -26,7 +26,6 @@ CONFIG_INET_ESP=y CONFIG_INET_ESP_OFFLOAD=y CONFIG_NET_FOU=y CONFIG_NET_FOU_IP_TUNNELS=y -CONFIG_IP_GRE=m CONFIG_NETFILTER=y CONFIG_NETFILTER_ADVANCED=y CONFIG_NF_CONNTRACK=m @@ -75,7 +74,12 @@ CONFIG_NET_SCH_ETF=m CONFIG_NET_SCH_NETEM=y CONFIG_NET_SCH_PRIO=m CONFIG_NFT_COMPAT=m +CONFIG_NF_CONNTRACK_OVS=y CONFIG_NF_FLOW_TABLE=m +CONFIG_OPENVSWITCH=m +CONFIG_OPENVSWITCH_GENEVE=m +CONFIG_OPENVSWITCH_GRE=m +CONFIG_OPENVSWITCH_VXLAN=m CONFIG_PSAMPLE=m CONFIG_TCP_MD5SIG=y CONFIG_TEST_BLACKHOLE_DEV=m diff --git a/tools/testing/selftests/net/forwarding/Makefile b/tools/testing/selftests/net/forwarding/Makefile index fa7b59ff4029..224346426ef2 100644 --- a/tools/testing/selftests/net/forwarding/Makefile +++ b/tools/testing/selftests/net/forwarding/Makefile @@ -39,6 +39,7 @@ TEST_PROGS = bridge_fdb_learning_limit.sh \ ipip_hier_gre.sh \ lib_sh_test.sh \ local_termination.sh \ + min_max_mtu.sh \ mirror_gre_bound.sh \ mirror_gre_bridge_1d.sh \ mirror_gre_bridge_1d_vlan.sh \ @@ -70,6 +71,7 @@ TEST_PROGS = bridge_fdb_learning_limit.sh \ router_broadcast.sh \ router_mpath_nh_res.sh \ router_mpath_nh.sh \ + router_mpath_seed.sh \ router_multicast.sh \ router_multipath.sh \ router_nh.sh \ diff --git a/tools/testing/selftests/net/forwarding/devlink_lib.sh b/tools/testing/selftests/net/forwarding/devlink_lib.sh index f1de525cfa55..62a05bca1e82 100644 --- a/tools/testing/selftests/net/forwarding/devlink_lib.sh +++ b/tools/testing/selftests/net/forwarding/devlink_lib.sh @@ -122,6 +122,8 @@ devlink_reload() still_pending=$(devlink resource show "$DEVLINK_DEV" | \ grep -c "size_new") check_err $still_pending "Failed reload - There are still unset sizes" + + udevadm settle } declare -A DEVLINK_ORIG diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh index eabbdf00d8ca..ff96bb7535ff 100644 --- a/tools/testing/selftests/net/forwarding/lib.sh +++ b/tools/testing/selftests/net/forwarding/lib.sh @@ -1134,12 +1134,19 @@ bridge_ageing_time_get() } declare -A SYSCTL_ORIG +sysctl_save() +{ + local key=$1; shift + + SYSCTL_ORIG[$key]=$(sysctl -n $key) +} + sysctl_set() { local key=$1; shift local value=$1; shift - SYSCTL_ORIG[$key]=$(sysctl -n $key) + sysctl_save "$key" sysctl -qw $key="$value" } @@ -1218,22 +1225,6 @@ trap_uninstall() tc filter del dev $dev $direction pref 1 flower } -slow_path_trap_install() -{ - # For slow-path testing, we need to install a trap to get to - # slow path the packets that would otherwise be switched in HW. - if [ "${tcflags/skip_hw}" != "$tcflags" ]; then - trap_install "$@" - fi -} - -slow_path_trap_uninstall() -{ - if [ "${tcflags/skip_hw}" != "$tcflags" ]; then - trap_uninstall "$@" - fi -} - __icmp_capture_add_del() { local add_del=$1; shift @@ -1250,22 +1241,34 @@ __icmp_capture_add_del() icmp_capture_install() { - __icmp_capture_add_del add 100 "" "$@" + local tundev=$1; shift + local filter=$1; shift + + __icmp_capture_add_del add 100 "" "$tundev" "$filter" } icmp_capture_uninstall() { - __icmp_capture_add_del del 100 "" "$@" + local tundev=$1; shift + local filter=$1; shift + + __icmp_capture_add_del del 100 "" "$tundev" "$filter" } icmp6_capture_install() { - __icmp_capture_add_del add 100 v6 "$@" + local tundev=$1; shift + local filter=$1; shift + + __icmp_capture_add_del add 100 v6 "$tundev" "$filter" } icmp6_capture_uninstall() { - __icmp_capture_add_del del 100 v6 "$@" + local tundev=$1; shift + local filter=$1; shift + + __icmp_capture_add_del del 100 v6 "$tundev" "$filter" } __vlan_capture_add_del() @@ -1283,12 +1286,18 @@ __vlan_capture_add_del() vlan_capture_install() { - __vlan_capture_add_del add 100 "$@" + local dev=$1; shift + local filter=$1; shift + + __vlan_capture_add_del add 100 "$dev" "$filter" } vlan_capture_uninstall() { - __vlan_capture_add_del del 100 "$@" + local dev=$1; shift + local filter=$1; shift + + __vlan_capture_add_del del 100 "$dev" "$filter" } __dscp_capture_add_del() @@ -1648,34 +1657,61 @@ __start_traffic() local sip=$1; shift local dip=$1; shift local dmac=$1; shift + local -a mz_args=("$@") $MZ $h_in -p $pktsize -A $sip -B $dip -c 0 \ - -a own -b $dmac -t "$proto" -q "$@" & + -a own -b $dmac -t "$proto" -q "${mz_args[@]}" & sleep 1 } start_traffic_pktsize() { local pktsize=$1; shift + local h_in=$1; shift + local sip=$1; shift + local dip=$1; shift + local dmac=$1; shift + local -a mz_args=("$@") - __start_traffic $pktsize udp "$@" + __start_traffic $pktsize udp "$h_in" "$sip" "$dip" "$dmac" \ + "${mz_args[@]}" } start_tcp_traffic_pktsize() { local pktsize=$1; shift + local h_in=$1; shift + local sip=$1; shift + local dip=$1; shift + local dmac=$1; shift + local -a mz_args=("$@") - __start_traffic $pktsize tcp "$@" + __start_traffic $pktsize tcp "$h_in" "$sip" "$dip" "$dmac" \ + "${mz_args[@]}" } start_traffic() { - start_traffic_pktsize 8000 "$@" + local h_in=$1; shift + local sip=$1; shift + local dip=$1; shift + local dmac=$1; shift + local -a mz_args=("$@") + + start_traffic_pktsize 8000 "$h_in" "$sip" "$dip" "$dmac" \ + "${mz_args[@]}" } start_tcp_traffic() { - start_tcp_traffic_pktsize 8000 "$@" + local h_in=$1; shift + local sip=$1; shift + local dip=$1; shift + local dmac=$1; shift + local -a mz_args=("$@") + + start_tcp_traffic_pktsize 8000 "$h_in" "$sip" "$dip" "$dmac" \ + "${mz_args[@]}" } stop_traffic() diff --git a/tools/testing/selftests/net/forwarding/min_max_mtu.sh b/tools/testing/selftests/net/forwarding/min_max_mtu.sh new file mode 100755 index 000000000000..97bb8b221bed --- /dev/null +++ b/tools/testing/selftests/net/forwarding/min_max_mtu.sh @@ -0,0 +1,283 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +# +--------------------+ +# | H1 | +# | | +# | $h1.10 + | +# | 192.0.2.2/24 | | +# | 2001:db8:1::2/64 | | +# | | | +# | $h1 + | +# | | | +# +------------------|-+ +# | +# +------------------|-+ +# | SW | | +# | $swp1 + | +# | | | +# | $swp1.10 + | +# | 192.0.2.1/24 | +# | 2001:db8:1::1/64 | +# | | +# +--------------------+ + +ALL_TESTS=" + ping_ipv4 + ping_ipv6 + max_mtu_config_test + max_mtu_traffic_test + min_mtu_config_test + min_mtu_traffic_test +" + +NUM_NETIFS=2 +source lib.sh + +h1_create() +{ + simple_if_init $h1 + vlan_create $h1 10 v$h1 192.0.2.2/24 2001:db8:1::2/64 +} + +h1_destroy() +{ + vlan_destroy $h1 10 192.0.2.2/24 2001:db8:1::2/64 + simple_if_fini $h1 +} + +switch_create() +{ + ip li set dev $swp1 up + vlan_create $swp1 10 "" 192.0.2.1/24 2001:db8:1::1/64 +} + +switch_destroy() +{ + ip li set dev $swp1 down + vlan_destroy $swp1 10 +} + +setup_prepare() +{ + h1=${NETIFS[p1]} + swp1=${NETIFS[p2]} + + vrf_prepare + + h1_create + + switch_create + + forwarding_enable +} + +cleanup() +{ + pre_cleanup + + forwarding_restore + + switch_destroy + + h1_destroy + + vrf_cleanup +} + +ping_ipv4() +{ + ping_test $h1.10 192.0.2.1 +} + +ping_ipv6() +{ + ping6_test $h1.10 2001:db8:1::1 +} + +min_max_mtu_get_if() +{ + local dev=$1; shift + local min_max=$1; shift + + ip -d -j link show $dev | jq ".[].$min_max" +} + +ensure_compatible_min_max_mtu() +{ + local min_max=$1; shift + + local mtu=$(min_max_mtu_get_if ${NETIFS[p1]} $min_max) + local i + + for ((i = 2; i <= NUM_NETIFS; ++i)); do + local current_mtu=$(min_max_mtu_get_if ${NETIFS[p$i]} $min_max) + + if [ $current_mtu -ne $mtu ]; then + return 1 + fi + done +} + +mtu_set_if() +{ + local dev=$1; shift + local mtu=$1; shift + local should_fail=${1:-0}; shift + + mtu_set $dev $mtu 2>/dev/null + check_err_fail $should_fail $? "Set MTU $mtu for $dev" +} + +mtu_set_all_if() +{ + local mtu=$1; shift + local i + + for ((i = 1; i <= NUM_NETIFS; ++i)); do + mtu_set_if ${NETIFS[p$i]} $mtu + mtu_set_if ${NETIFS[p$i]}.10 $mtu + done +} + +mtu_restore_all_if() +{ + local i + + for ((i = 1; i <= NUM_NETIFS; ++i)); do + mtu_restore ${NETIFS[p$i]}.10 + mtu_restore ${NETIFS[p$i]} + done +} + +mtu_test_ping4() +{ + local mtu=$1; shift + local should_fail=$1; shift + + # Ping adds 8 bytes for ICMP header and 20 bytes for IP header + local ping_headers_len=$((20 + 8)) + local pkt_size=$((mtu - ping_headers_len)) + + ping_do $h1.10 192.0.2.1 "-s $pkt_size -M do" + check_err_fail $should_fail $? "Ping, packet size: $pkt_size" +} + +mtu_test_ping6() +{ + local mtu=$1; shift + local should_fail=$1; shift + + # Ping adds 8 bytes for ICMP header and 40 bytes for IPv6 header + local ping6_headers_len=$((40 + 8)) + local pkt_size=$((mtu - ping6_headers_len)) + + ping6_do $h1.10 2001:db8:1::1 "-s $pkt_size -M do" + check_err_fail $should_fail $? "Ping6, packet size: $pkt_size" +} + +max_mtu_config_test() +{ + local i + + RET=0 + + for ((i = 1; i <= NUM_NETIFS; ++i)); do + local dev=${NETIFS[p$i]} + local max_mtu=$(min_max_mtu_get_if $dev "max_mtu") + local should_fail + + should_fail=0 + mtu_set_if $dev $max_mtu $should_fail + mtu_restore $dev + + should_fail=1 + mtu_set_if $dev $((max_mtu + 1)) $should_fail + mtu_restore $dev + done + + log_test "Test maximum MTU configuration" +} + +max_mtu_traffic_test() +{ + local should_fail + local max_mtu + + RET=0 + + if ! ensure_compatible_min_max_mtu "max_mtu"; then + log_test_xfail "Topology has incompatible maximum MTU values" + return + fi + + max_mtu=$(min_max_mtu_get_if ${NETIFS[p1]} "max_mtu") + + should_fail=0 + mtu_set_all_if $max_mtu + mtu_test_ping4 $max_mtu $should_fail + mtu_test_ping6 $max_mtu $should_fail + mtu_restore_all_if + + should_fail=1 + mtu_set_all_if $((max_mtu - 1)) + mtu_test_ping4 $max_mtu $should_fail + mtu_test_ping6 $max_mtu $should_fail + mtu_restore_all_if + + log_test "Test traffic, packet size is maximum MTU" +} + +min_mtu_config_test() +{ + local i + + RET=0 + + for ((i = 1; i <= NUM_NETIFS; ++i)); do + local dev=${NETIFS[p$i]} + local min_mtu=$(min_max_mtu_get_if $dev "min_mtu") + local should_fail + + should_fail=0 + mtu_set_if $dev $min_mtu $should_fail + mtu_restore $dev + + should_fail=1 + mtu_set_if $dev $((min_mtu - 1)) $should_fail + mtu_restore $dev + done + + log_test "Test minimum MTU configuration" +} + +min_mtu_traffic_test() +{ + local should_fail=0 + local min_mtu + + RET=0 + + if ! ensure_compatible_min_max_mtu "min_mtu"; then + log_test_xfail "Topology has incompatible minimum MTU values" + return + fi + + min_mtu=$(min_max_mtu_get_if ${NETIFS[p1]} "min_mtu") + mtu_set_all_if $min_mtu + mtu_test_ping4 $min_mtu $should_fail + # Do not test minimum MTU with IPv6, as IPv6 requires higher MTU. + + mtu_restore_all_if + + log_test "Test traffic, packet size is minimum MTU" +} + +trap cleanup EXIT + +setup_prepare +setup_wait + +tests_run + +exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/forwarding/mirror_gre.sh b/tools/testing/selftests/net/forwarding/mirror_gre.sh index 0266443601bc..921c733ee04f 100755 --- a/tools/testing/selftests/net/forwarding/mirror_gre.sh +++ b/tools/testing/selftests/net/forwarding/mirror_gre.sh @@ -74,7 +74,7 @@ test_span_gre_mac() RET=0 - mirror_install $swp1 $direction $tundev "matchall $tcflags" + mirror_install $swp1 $direction $tundev "matchall" icmp_capture_install h3-${tundev} "src_mac $src_mac dst_mac $dst_mac" mirror_test v$h1 192.0.2.1 192.0.2.2 h3-${tundev} 100 10 @@ -82,29 +82,29 @@ test_span_gre_mac() icmp_capture_uninstall h3-${tundev} mirror_uninstall $swp1 $direction - log_test "$direction $what: envelope MAC ($tcflags)" + log_test "$direction $what: envelope MAC" } test_two_spans() { RET=0 - mirror_install $swp1 ingress gt4 "matchall $tcflags" - mirror_install $swp1 egress gt6 "matchall $tcflags" - quick_test_span_gre_dir gt4 ingress - quick_test_span_gre_dir gt6 egress + mirror_install $swp1 ingress gt4 "matchall" + mirror_install $swp1 egress gt6 "matchall" + quick_test_span_gre_dir gt4 8 0 + quick_test_span_gre_dir gt6 0 8 mirror_uninstall $swp1 ingress - fail_test_span_gre_dir gt4 ingress - quick_test_span_gre_dir gt6 egress + fail_test_span_gre_dir gt4 8 0 + quick_test_span_gre_dir gt6 0 8 - mirror_install $swp1 ingress gt4 "matchall $tcflags" + mirror_install $swp1 ingress gt4 "matchall" mirror_uninstall $swp1 egress - quick_test_span_gre_dir gt4 ingress - fail_test_span_gre_dir gt6 egress + quick_test_span_gre_dir gt4 8 0 + fail_test_span_gre_dir gt6 0 8 mirror_uninstall $swp1 ingress - log_test "two simultaneously configured mirrors ($tcflags)" + log_test "two simultaneously configured mirrors" } test_gretap() @@ -131,30 +131,11 @@ test_ip6gretap_mac() test_span_gre_mac gt6 egress "mirror to ip6gretap" } -test_all() -{ - slow_path_trap_install $swp1 ingress - slow_path_trap_install $swp1 egress - - tests_run - - slow_path_trap_uninstall $swp1 egress - slow_path_trap_uninstall $swp1 ingress -} - trap cleanup EXIT setup_prepare setup_wait -tcflags="skip_hw" -test_all - -if ! tc_offload_check; then - echo "WARN: Could not test offloaded functionality" -else - tcflags="skip_sw" - test_all -fi +tests_run exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bound.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bound.sh index 6c257ec03756..e3cd48e18eeb 100755 --- a/tools/testing/selftests/net/forwarding/mirror_gre_bound.sh +++ b/tools/testing/selftests/net/forwarding/mirror_gre_bound.sh @@ -196,32 +196,11 @@ test_ip6gretap() full_test_span_gre_dir gt6 egress 0 8 "mirror to ip6gretap w/ UL" } -test_all() -{ - RET=0 - - slow_path_trap_install $swp1 ingress - slow_path_trap_install $swp1 egress - - tests_run - - slow_path_trap_uninstall $swp1 egress - slow_path_trap_uninstall $swp1 ingress -} - trap cleanup EXIT setup_prepare setup_wait -tcflags="skip_hw" -test_all - -if ! tc_offload_check; then - echo "WARN: Could not test offloaded functionality" -else - tcflags="skip_sw" - test_all -fi +tests_run exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh index 04fd14b0a9b7..6c7bd33332c2 100755 --- a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh +++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d.sh @@ -108,30 +108,11 @@ test_ip6gretap() full_test_span_gre_dir gt6 egress 0 8 "mirror to ip6gretap" } -test_all() -{ - slow_path_trap_install $swp1 ingress - slow_path_trap_install $swp1 egress - - tests_run - - slow_path_trap_uninstall $swp1 egress - slow_path_trap_uninstall $swp1 ingress -} - trap cleanup EXIT setup_prepare setup_wait -tcflags="skip_hw" -test_all - -if ! tc_offload_check; then - echo "WARN: Could not test offloaded functionality" -else - tcflags="skip_sw" - test_all -fi +tests_run exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh index f35313c76fac..909ec956a5e5 100755 --- a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh +++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1d_vlan.sh @@ -104,30 +104,11 @@ test_ip6gretap_stp() full_test_span_gre_stp gt6 $swp3.555 "mirror to ip6gretap" } -test_all() -{ - slow_path_trap_install $swp1 ingress - slow_path_trap_install $swp1 egress - - tests_run - - slow_path_trap_uninstall $swp1 egress - slow_path_trap_uninstall $swp1 ingress -} - trap cleanup EXIT setup_prepare setup_wait -tcflags="skip_hw" -test_all - -if ! tc_offload_check; then - echo "WARN: Could not test offloaded functionality" -else - tcflags="skip_sw" - test_all -fi +tests_run exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh index 0cf4c47a46f9..40ac9dd3aff1 100755 --- a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh +++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q.sh @@ -104,30 +104,11 @@ test_ip6gretap() full_test_span_gre_dir gt6 egress 0 8 "mirror to ip6gretap" } -tests() -{ - slow_path_trap_install $swp1 ingress - slow_path_trap_install $swp1 egress - - tests_run - - slow_path_trap_uninstall $swp1 egress - slow_path_trap_uninstall $swp1 ingress -} - trap cleanup EXIT setup_prepare setup_wait -tcflags="skip_hw" -tests - -if ! tc_offload_check; then - echo "WARN: Could not test offloaded functionality" -else - tcflags="skip_sw" - tests -fi +tests_run exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh index c53148b1dc63..fe4d7c906a70 100755 --- a/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh +++ b/tools/testing/selftests/net/forwarding/mirror_gre_bridge_1q_lag.sh @@ -227,10 +227,10 @@ test_lag_slave() RET=0 tc filter add dev $swp1 ingress pref 999 \ - proto 802.1q flower vlan_ethtype arp $tcflags \ + proto 802.1q flower vlan_ethtype arp \ action pass mirror_install $swp1 ingress gt4 \ - "proto 802.1q flower vlan_id 333 $tcflags" + "proto 802.1q flower vlan_id 333" # Test connectivity through $up_dev when $down_dev is set down. ip link set dev $down_dev down @@ -239,7 +239,7 @@ test_lag_slave() setup_wait_dev $host_dev $ARPING -I br1 192.0.2.130 -qfc 1 sleep 2 - mirror_test vrf-h1 192.0.2.1 192.0.2.18 $host_dev 1 10 + mirror_test vrf-h1 192.0.2.1 192.0.2.18 $host_dev 1 ">= 10" # Test lack of connectivity when both slaves are down. ip link set dev $up_dev down @@ -252,7 +252,7 @@ test_lag_slave() mirror_uninstall $swp1 ingress tc filter del dev $swp1 ingress pref 999 - log_test "$what ($tcflags)" + log_test "$what" } test_mirror_gretap_first() @@ -265,30 +265,11 @@ test_mirror_gretap_second() test_lag_slave $h4 $swp4 $swp3 "mirror to gretap: LAG second slave" } -test_all() -{ - slow_path_trap_install $swp1 ingress - slow_path_trap_install $swp1 egress - - tests_run - - slow_path_trap_uninstall $swp1 egress - slow_path_trap_uninstall $swp1 ingress -} - trap cleanup EXIT setup_prepare setup_wait -tcflags="skip_hw" -test_all - -if ! tc_offload_check; then - echo "WARN: Could not test offloaded functionality" -else - tcflags="skip_sw" - test_all -fi +tests_run exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh index 5ea9d63915f7..65ae9d960c18 100755 --- a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh +++ b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh @@ -73,7 +73,7 @@ test_span_gre_ttl() RET=0 mirror_install $swp1 ingress $tundev \ - "prot ip flower $tcflags ip_prot icmp" + "prot ip flower ip_prot icmp" tc filter add dev $h3 ingress pref 77 prot $prot \ flower skip_hw ip_ttl 50 action pass @@ -81,13 +81,13 @@ test_span_gre_ttl() ip link set dev $tundev type $type ttl 50 sleep 2 - mirror_test v$h1 192.0.2.1 192.0.2.2 $h3 77 10 + mirror_test v$h1 192.0.2.1 192.0.2.2 $h3 77 ">= 10" ip link set dev $tundev type $type ttl 100 tc filter del dev $h3 ingress pref 77 mirror_uninstall $swp1 ingress - log_test "$what: TTL change ($tcflags)" + log_test "$what: TTL change" } test_span_gre_tun_up() @@ -98,15 +98,15 @@ test_span_gre_tun_up() RET=0 ip link set dev $tundev down - mirror_install $swp1 ingress $tundev "matchall $tcflags" - fail_test_span_gre_dir $tundev ingress + mirror_install $swp1 ingress $tundev "matchall" + fail_test_span_gre_dir $tundev ip link set dev $tundev up - quick_test_span_gre_dir $tundev ingress + quick_test_span_gre_dir $tundev mirror_uninstall $swp1 ingress - log_test "$what: tunnel down/up ($tcflags)" + log_test "$what: tunnel down/up" } test_span_gre_egress_up() @@ -118,8 +118,8 @@ test_span_gre_egress_up() RET=0 ip link set dev $swp3 down - mirror_install $swp1 ingress $tundev "matchall $tcflags" - fail_test_span_gre_dir $tundev ingress + mirror_install $swp1 ingress $tundev "matchall" + fail_test_span_gre_dir $tundev # After setting the device up, wait for neighbor to get resolved so that # we can expect mirroring to work. @@ -127,10 +127,10 @@ test_span_gre_egress_up() setup_wait_dev $swp3 ping -c 1 -I $swp3 $remote_ip &>/dev/null - quick_test_span_gre_dir $tundev ingress + quick_test_span_gre_dir $tundev mirror_uninstall $swp1 ingress - log_test "$what: egress down/up ($tcflags)" + log_test "$what: egress down/up" } test_span_gre_remote_ip() @@ -144,14 +144,14 @@ test_span_gre_remote_ip() RET=0 ip link set dev $tundev type $type remote $wrong_ip - mirror_install $swp1 ingress $tundev "matchall $tcflags" - fail_test_span_gre_dir $tundev ingress + mirror_install $swp1 ingress $tundev "matchall" + fail_test_span_gre_dir $tundev ip link set dev $tundev type $type remote $correct_ip - quick_test_span_gre_dir $tundev ingress + quick_test_span_gre_dir $tundev mirror_uninstall $swp1 ingress - log_test "$what: remote address change ($tcflags)" + log_test "$what: remote address change" } test_span_gre_tun_del() @@ -165,10 +165,10 @@ test_span_gre_tun_del() RET=0 - mirror_install $swp1 ingress $tundev "matchall $tcflags" - quick_test_span_gre_dir $tundev ingress + mirror_install $swp1 ingress $tundev "matchall" + quick_test_span_gre_dir $tundev ip link del dev $tundev - fail_test_span_gre_dir $tundev ingress + fail_test_span_gre_dir $tundev tunnel_create $tundev $type $local_ip $remote_ip \ ttl 100 tos inherit $flags @@ -176,11 +176,11 @@ test_span_gre_tun_del() # Recreating the tunnel doesn't reestablish mirroring, so reinstall it # and verify it works for the follow-up tests. mirror_uninstall $swp1 ingress - mirror_install $swp1 ingress $tundev "matchall $tcflags" - quick_test_span_gre_dir $tundev ingress + mirror_install $swp1 ingress $tundev "matchall" + quick_test_span_gre_dir $tundev mirror_uninstall $swp1 ingress - log_test "$what: tunnel deleted ($tcflags)" + log_test "$what: tunnel deleted" } test_span_gre_route_del() @@ -192,18 +192,18 @@ test_span_gre_route_del() RET=0 - mirror_install $swp1 ingress $tundev "matchall $tcflags" - quick_test_span_gre_dir $tundev ingress + mirror_install $swp1 ingress $tundev "matchall" + quick_test_span_gre_dir $tundev ip route del $route dev $edev - fail_test_span_gre_dir $tundev ingress + fail_test_span_gre_dir $tundev ip route add $route dev $edev - quick_test_span_gre_dir $tundev ingress + quick_test_span_gre_dir $tundev mirror_uninstall $swp1 ingress - log_test "$what: underlay route removal ($tcflags)" + log_test "$what: underlay route removal" } test_ttl() @@ -244,30 +244,11 @@ test_route_del() test_span_gre_route_del gt6 $swp3 2001:db8:2::/64 "mirror to ip6gretap" } -test_all() -{ - slow_path_trap_install $swp1 ingress - slow_path_trap_install $swp1 egress - - tests_run - - slow_path_trap_uninstall $swp1 egress - slow_path_trap_uninstall $swp1 ingress -} - trap cleanup EXIT setup_prepare setup_wait -tcflags="skip_hw" -test_all - -if ! tc_offload_check; then - echo "WARN: Could not test offloaded functionality" -else - tcflags="skip_sw" - test_all -fi +tests_run exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_flower.sh b/tools/testing/selftests/net/forwarding/mirror_gre_flower.sh index 09389f3b9369..3a84f3ab5856 100755 --- a/tools/testing/selftests/net/forwarding/mirror_gre_flower.sh +++ b/tools/testing/selftests/net/forwarding/mirror_gre_flower.sh @@ -64,12 +64,19 @@ cleanup() test_span_gre_dir_acl() { - test_span_gre_dir_ips "$@" 192.0.2.3 192.0.2.4 + local tundev=$1; shift + local forward_type=$1; shift + local backward_type=$1; shift + + test_span_gre_dir_ips "$tundev" "$forward_type" \ + "$backward_type" 192.0.2.3 192.0.2.4 } fail_test_span_gre_dir_acl() { - fail_test_span_gre_dir_ips "$@" 192.0.2.3 192.0.2.4 + local tundev=$1; shift + + fail_test_span_gre_dir_ips "$tundev" 192.0.2.3 192.0.2.4 } full_test_span_gre_dir_acl() @@ -84,16 +91,15 @@ full_test_span_gre_dir_acl() RET=0 mirror_install $swp1 $direction $tundev \ - "protocol ip flower $tcflags dst_ip $match_dip" - fail_test_span_gre_dir $tundev $direction - test_span_gre_dir_acl "$tundev" "$direction" \ - "$forward_type" "$backward_type" + "protocol ip flower dst_ip $match_dip" + fail_test_span_gre_dir $tundev + test_span_gre_dir_acl "$tundev" "$forward_type" "$backward_type" mirror_uninstall $swp1 $direction # Test lack of mirroring after ACL mirror is uninstalled. - fail_test_span_gre_dir_acl "$tundev" "$direction" + fail_test_span_gre_dir_acl "$tundev" - log_test "$direction $what ($tcflags)" + log_test "$direction $what" } test_gretap() @@ -108,30 +114,11 @@ test_ip6gretap() full_test_span_gre_dir_acl gt6 egress 0 8 192.0.2.3 "ACL mirror to ip6gretap" } -test_all() -{ - slow_path_trap_install $swp1 ingress - slow_path_trap_install $swp1 egress - - tests_run - - slow_path_trap_uninstall $swp1 egress - slow_path_trap_uninstall $swp1 ingress -} - trap cleanup EXIT setup_prepare setup_wait -tcflags="skip_hw" -test_all - -if ! tc_offload_check; then - echo "WARN: Could not test offloaded functionality" -else - tcflags="skip_sw" - test_all -fi +tests_run exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_lag_lacp.sh b/tools/testing/selftests/net/forwarding/mirror_gre_lag_lacp.sh index 9edf4cb104a8..1261e6f46e34 100755 --- a/tools/testing/selftests/net/forwarding/mirror_gre_lag_lacp.sh +++ b/tools/testing/selftests/net/forwarding/mirror_gre_lag_lacp.sh @@ -37,8 +37,14 @@ # | \ / | # | \____________________________________________/ | # | | | -# | + lag2 (team) | -# | 192.0.2.130/28 | +# | + lag2 (team) ------> + gt4-dst (gretap) | +# | 192.0.2.130/28 loc=192.0.2.130 | +# | rem=192.0.2.129 | +# | ttl=100 | +# | tos=inherit | +# | | +# | | +# | | # | | # +---------------------------------------------------------------------------+ @@ -50,9 +56,6 @@ ALL_TESTS=" NUM_NETIFS=6 source lib.sh source mirror_lib.sh -source mirror_gre_lib.sh - -require_command $ARPING vlan_host_create() { @@ -122,16 +125,21 @@ h3_create() { vrf_create vrf-h3 ip link set dev vrf-h3 up - tc qdisc add dev $h3 clsact - tc qdisc add dev $h4 clsact h3_create_team + + tunnel_create gt4-dst gretap 192.0.2.130 192.0.2.129 \ + ttl 100 tos inherit + ip link set dev gt4-dst master vrf-h3 + tc qdisc add dev gt4-dst clsact } h3_destroy() { + tc qdisc del dev gt4-dst clsact + ip link set dev gt4-dst nomaster + tunnel_destroy gt4-dst + h3_destroy_team - tc qdisc del dev $h4 clsact - tc qdisc del dev $h3 clsact ip link set dev vrf-h3 down vrf_destroy vrf-h3 } @@ -188,18 +196,12 @@ setup_prepare() h2_create h3_create switch_create - - trap_install $h3 ingress - trap_install $h4 ingress } cleanup() { pre_cleanup - trap_uninstall $h4 ingress - trap_uninstall $h3 ingress - switch_destroy h3_destroy h2_destroy @@ -218,7 +220,8 @@ test_lag_slave() RET=0 mirror_install $swp1 ingress gt4 \ - "proto 802.1q flower vlan_id 333 $tcflags" + "proto 802.1q flower vlan_id 333" + vlan_capture_install gt4-dst "vlan_ethtype ipv4 ip_proto icmp type 8" # Move $down_dev away from the team. That will prompt change in # txability of the connected device, without changing its upness. The @@ -226,13 +229,14 @@ test_lag_slave() # other slave. ip link set dev $down_dev nomaster sleep 2 - mirror_test vrf-h1 192.0.2.1 192.0.2.18 $up_dev 1 10 + mirror_test vrf-h1 192.0.2.1 192.0.2.18 gt4-dst 100 10 # Test lack of connectivity when neither slave is txable. ip link set dev $up_dev nomaster sleep 2 - mirror_test vrf-h1 192.0.2.1 192.0.2.18 $h3 1 0 - mirror_test vrf-h1 192.0.2.1 192.0.2.18 $h4 1 0 + mirror_test vrf-h1 192.0.2.1 192.0.2.18 gt4-dst 100 0 + + vlan_capture_uninstall gt4-dst mirror_uninstall $swp1 ingress # Recreate H3's team device, because mlxsw, which this test is @@ -243,7 +247,7 @@ test_lag_slave() # Wait for ${h,swp}{3,4}. setup_wait - log_test "$what ($tcflags)" + log_test "$what" } test_mirror_gretap_first() @@ -256,30 +260,11 @@ test_mirror_gretap_second() test_lag_slave $h4 $h3 "mirror to gretap: LAG second slave" } -test_all() -{ - slow_path_trap_install $swp1 ingress - slow_path_trap_install $swp1 egress - - tests_run - - slow_path_trap_uninstall $swp1 egress - slow_path_trap_uninstall $swp1 ingress -} - trap cleanup EXIT setup_prepare setup_wait -tcflags="skip_hw" -test_all - -if ! tc_offload_check; then - echo "WARN: Could not test offloaded functionality" -else - tcflags="skip_sw" - test_all -fi +tests_run exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh b/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh index 0c36546e131e..20078cc55f24 100644 --- a/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh +++ b/tools/testing/selftests/net/forwarding/mirror_gre_lib.sh @@ -5,22 +5,34 @@ source "$net_forwarding_dir/mirror_lib.sh" quick_test_span_gre_dir_ips() { local tundev=$1; shift + local ip1=$1; shift + local ip2=$1; shift + local forward_type=$1; shift + local backward_type=$1; shift - do_test_span_dir_ips 10 h3-$tundev "$@" + do_test_span_dir_ips 10 h3-$tundev "$ip1" "$ip2" \ + "$forward_type" "$backward_type" } fail_test_span_gre_dir_ips() { local tundev=$1; shift + local ip1=$1; shift + local ip2=$1; shift - do_test_span_dir_ips 0 h3-$tundev "$@" + do_test_span_dir_ips 0 h3-$tundev "$ip1" "$ip2" } test_span_gre_dir_ips() { local tundev=$1; shift + local forward_type=$1; shift + local backward_type=$1; shift + local ip1=$1; shift + local ip2=$1; shift - test_span_dir_ips h3-$tundev "$@" + test_span_dir_ips h3-$tundev "$forward_type" \ + "$backward_type" "$ip1" "$ip2" } full_test_span_gre_dir_ips() @@ -35,12 +47,12 @@ full_test_span_gre_dir_ips() RET=0 - mirror_install $swp1 $direction $tundev "matchall $tcflags" - test_span_dir_ips "h3-$tundev" "$direction" "$forward_type" \ + mirror_install $swp1 $direction $tundev "matchall" + test_span_dir_ips "h3-$tundev" "$forward_type" \ "$backward_type" "$ip1" "$ip2" mirror_uninstall $swp1 $direction - log_test "$direction $what ($tcflags)" + log_test "$direction $what" } full_test_span_gre_dir_vlan_ips() @@ -56,45 +68,63 @@ full_test_span_gre_dir_vlan_ips() RET=0 - mirror_install $swp1 $direction $tundev "matchall $tcflags" + mirror_install $swp1 $direction $tundev "matchall" - test_span_dir_ips "h3-$tundev" "$direction" "$forward_type" \ + test_span_dir_ips "h3-$tundev" "$forward_type" \ "$backward_type" "$ip1" "$ip2" tc filter add dev $h3 ingress pref 77 prot 802.1q \ flower $vlan_match \ action pass - mirror_test v$h1 $ip1 $ip2 $h3 77 10 + mirror_test v$h1 $ip1 $ip2 $h3 77 '>= 10' tc filter del dev $h3 ingress pref 77 mirror_uninstall $swp1 $direction - log_test "$direction $what ($tcflags)" + log_test "$direction $what" } quick_test_span_gre_dir() { - quick_test_span_gre_dir_ips "$@" 192.0.2.1 192.0.2.2 + local tundev=$1; shift + local forward_type=${1-8}; shift + local backward_type=${1-0}; shift + + quick_test_span_gre_dir_ips "$tundev" 192.0.2.1 192.0.2.2 \ + "$forward_type" "$backward_type" } fail_test_span_gre_dir() { - fail_test_span_gre_dir_ips "$@" 192.0.2.1 192.0.2.2 -} + local tundev=$1; shift -test_span_gre_dir() -{ - test_span_gre_dir_ips "$@" 192.0.2.1 192.0.2.2 + fail_test_span_gre_dir_ips "$tundev" 192.0.2.1 192.0.2.2 } full_test_span_gre_dir() { - full_test_span_gre_dir_ips "$@" 192.0.2.1 192.0.2.2 + local tundev=$1; shift + local direction=$1; shift + local forward_type=$1; shift + local backward_type=$1; shift + local what=$1; shift + + full_test_span_gre_dir_ips "$tundev" "$direction" "$forward_type" \ + "$backward_type" "$what" 192.0.2.1 192.0.2.2 } full_test_span_gre_dir_vlan() { - full_test_span_gre_dir_vlan_ips "$@" 192.0.2.1 192.0.2.2 + local tundev=$1; shift + local direction=$1; shift + local vlan_match=$1; shift + local forward_type=$1; shift + local backward_type=$1; shift + local what=$1; shift + + full_test_span_gre_dir_vlan_ips "$tundev" "$direction" "$vlan_match" \ + "$forward_type" "$backward_type" \ + "$what" 192.0.2.1 192.0.2.2 } full_test_span_gre_stp_ips() @@ -104,27 +134,39 @@ full_test_span_gre_stp_ips() local what=$1; shift local ip1=$1; shift local ip2=$1; shift + local forward_type=$1; shift + local backward_type=$1; shift local h3mac=$(mac_get $h3) RET=0 - mirror_install $swp1 ingress $tundev "matchall $tcflags" - quick_test_span_gre_dir_ips $tundev ingress $ip1 $ip2 + mirror_install $swp1 ingress $tundev "matchall" + quick_test_span_gre_dir_ips $tundev $ip1 $ip2 \ + "$forward_type" "$backward_type" bridge link set dev $nbpdev state disabled sleep 1 - fail_test_span_gre_dir_ips $tundev ingress $ip1 $ip2 + fail_test_span_gre_dir_ips $tundev $ip1 $ip2 bridge link set dev $nbpdev state forwarding sleep 1 - quick_test_span_gre_dir_ips $tundev ingress $ip1 $ip2 + quick_test_span_gre_dir_ips $tundev $ip1 $ip2 \ + "$forward_type" "$backward_type" mirror_uninstall $swp1 ingress - log_test "$what: STP state ($tcflags)" + log_test "$what: STP state" } full_test_span_gre_stp() { - full_test_span_gre_stp_ips "$@" 192.0.2.1 192.0.2.2 + local tundev=$1; shift + local nbpdev=$1; shift + local what=$1; shift + local forward_type=${1-8}; shift + local backward_type=${1-0}; shift + + full_test_span_gre_stp_ips "$tundev" "$nbpdev" "$what" \ + 192.0.2.1 192.0.2.2 \ + "$forward_type" "$backward_type" } diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_neigh.sh b/tools/testing/selftests/net/forwarding/mirror_gre_neigh.sh index fc0508e40fca..2cbfbecf25c8 100755 --- a/tools/testing/selftests/net/forwarding/mirror_gre_neigh.sh +++ b/tools/testing/selftests/net/forwarding/mirror_gre_neigh.sh @@ -60,41 +60,32 @@ test_span_gre_neigh() local addr=$1; shift local tundev=$1; shift local direction=$1; shift + local forward_type=$1; shift + local backward_type=$1; shift local what=$1; shift RET=0 ip neigh replace dev $swp3 $addr lladdr 00:11:22:33:44:55 - mirror_install $swp1 $direction $tundev "matchall $tcflags" - fail_test_span_gre_dir $tundev ingress + mirror_install $swp1 $direction $tundev "matchall" + fail_test_span_gre_dir $tundev "$forward_type" "$backward_type" ip neigh del dev $swp3 $addr - quick_test_span_gre_dir $tundev ingress + quick_test_span_gre_dir $tundev "$forward_type" "$backward_type" mirror_uninstall $swp1 $direction - log_test "$direction $what: neighbor change ($tcflags)" + log_test "$direction $what: neighbor change" } test_gretap() { - test_span_gre_neigh 192.0.2.130 gt4 ingress "mirror to gretap" - test_span_gre_neigh 192.0.2.130 gt4 egress "mirror to gretap" + test_span_gre_neigh 192.0.2.130 gt4 ingress 8 0 "mirror to gretap" + test_span_gre_neigh 192.0.2.130 gt4 egress 0 8 "mirror to gretap" } test_ip6gretap() { - test_span_gre_neigh 2001:db8:2::2 gt6 ingress "mirror to ip6gretap" - test_span_gre_neigh 2001:db8:2::2 gt6 egress "mirror to ip6gretap" -} - -test_all() -{ - slow_path_trap_install $swp1 ingress - slow_path_trap_install $swp1 egress - - tests_run - - slow_path_trap_uninstall $swp1 egress - slow_path_trap_uninstall $swp1 ingress + test_span_gre_neigh 2001:db8:2::2 gt6 ingress 8 0 "mirror to ip6gretap" + test_span_gre_neigh 2001:db8:2::2 gt6 egress 0 8 "mirror to ip6gretap" } trap cleanup EXIT @@ -102,14 +93,6 @@ trap cleanup EXIT setup_prepare setup_wait -tcflags="skip_hw" -test_all - -if ! tc_offload_check; then - echo "WARN: Could not test offloaded functionality" -else - tcflags="skip_sw" - test_all -fi +tests_run exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_nh.sh b/tools/testing/selftests/net/forwarding/mirror_gre_nh.sh index 6f9ef1820e93..34bc646938e3 100755 --- a/tools/testing/selftests/net/forwarding/mirror_gre_nh.sh +++ b/tools/testing/selftests/net/forwarding/mirror_gre_nh.sh @@ -75,42 +75,31 @@ cleanup() test_gretap() { RET=0 - mirror_install $swp1 ingress gt4 "matchall $tcflags" + mirror_install $swp1 ingress gt4 "matchall" # For IPv4, test that there's no mirroring without the route directing # the traffic to tunnel remote address. Then add it and test that # mirroring starts. For IPv6 we can't test this due to the limitation # that routes for locally-specified IPv6 addresses can't be added. - fail_test_span_gre_dir gt4 ingress + fail_test_span_gre_dir gt4 ip route add 192.0.2.130/32 via 192.0.2.162 - quick_test_span_gre_dir gt4 ingress + quick_test_span_gre_dir gt4 ip route del 192.0.2.130/32 via 192.0.2.162 mirror_uninstall $swp1 ingress - log_test "mirror to gre with next-hop remote ($tcflags)" + log_test "mirror to gre with next-hop remote" } test_ip6gretap() { RET=0 - mirror_install $swp1 ingress gt6 "matchall $tcflags" - quick_test_span_gre_dir gt6 ingress + mirror_install $swp1 ingress gt6 "matchall" + quick_test_span_gre_dir gt6 mirror_uninstall $swp1 ingress - log_test "mirror to ip6gre with next-hop remote ($tcflags)" -} - -test_all() -{ - slow_path_trap_install $swp1 ingress - slow_path_trap_install $swp1 egress - - tests_run - - slow_path_trap_uninstall $swp1 egress - slow_path_trap_uninstall $swp1 ingress + log_test "mirror to ip6gre with next-hop remote" } trap cleanup EXIT @@ -118,14 +107,6 @@ trap cleanup EXIT setup_prepare setup_wait -tcflags="skip_hw" -test_all - -if ! tc_offload_check; then - echo "WARN: Could not test offloaded functionality" -else - tcflags="skip_sw" - test_all -fi +tests_run exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_vlan.sh b/tools/testing/selftests/net/forwarding/mirror_gre_vlan.sh index 88cecdb9a861..63689928cb51 100755 --- a/tools/testing/selftests/net/forwarding/mirror_gre_vlan.sh +++ b/tools/testing/selftests/net/forwarding/mirror_gre_vlan.sh @@ -63,30 +63,11 @@ test_gretap() full_test_span_gre_dir gt4 egress 0 8 "mirror to gretap" } -test_all() -{ - slow_path_trap_install $swp1 ingress - slow_path_trap_install $swp1 egress - - tests_run - - slow_path_trap_uninstall $swp1 egress - slow_path_trap_uninstall $swp1 ingress -} - trap cleanup EXIT setup_prepare setup_wait -tcflags="skip_hw" -test_all - -if ! tc_offload_check; then - echo "WARN: Could not test offloaded functionality" -else - tcflags="skip_sw" - test_all -fi +tests_run exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh index c8a9b5bd841f..1b902cc579f6 100755 --- a/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh +++ b/tools/testing/selftests/net/forwarding/mirror_gre_vlan_bridge_1q.sh @@ -153,21 +153,21 @@ test_span_gre_forbidden_cpu() RET=0 # Run the pass-test first, to prime neighbor table. - mirror_install $swp1 ingress $tundev "matchall $tcflags" - quick_test_span_gre_dir $tundev ingress + mirror_install $swp1 ingress $tundev "matchall" + quick_test_span_gre_dir $tundev # Now forbid the VLAN at the bridge and see it fail. bridge vlan del dev br1 vid 555 self sleep 1 - fail_test_span_gre_dir $tundev ingress + fail_test_span_gre_dir $tundev bridge vlan add dev br1 vid 555 self sleep 1 - quick_test_span_gre_dir $tundev ingress + quick_test_span_gre_dir $tundev mirror_uninstall $swp1 ingress - log_test "$what: vlan forbidden at a bridge ($tcflags)" + log_test "$what: vlan forbidden at a bridge" } test_gretap_forbidden_cpu() @@ -187,22 +187,22 @@ test_span_gre_forbidden_egress() RET=0 - mirror_install $swp1 ingress $tundev "matchall $tcflags" - quick_test_span_gre_dir $tundev ingress + mirror_install $swp1 ingress $tundev "matchall" + quick_test_span_gre_dir $tundev bridge vlan del dev $swp3 vid 555 sleep 1 - fail_test_span_gre_dir $tundev ingress + fail_test_span_gre_dir $tundev bridge vlan add dev $swp3 vid 555 # Re-prime FDB $ARPING -I br1.555 192.0.2.130 -fqc 1 sleep 1 - quick_test_span_gre_dir $tundev ingress + quick_test_span_gre_dir $tundev mirror_uninstall $swp1 ingress - log_test "$what: vlan forbidden at a bridge egress ($tcflags)" + log_test "$what: vlan forbidden at a bridge egress" } test_gretap_forbidden_egress() @@ -223,30 +223,30 @@ test_span_gre_untagged_egress() RET=0 - mirror_install $swp1 ingress $tundev "matchall $tcflags" + mirror_install $swp1 ingress $tundev "matchall" - quick_test_span_gre_dir $tundev ingress - quick_test_span_vlan_dir $h3 555 ingress "$ul_proto" + quick_test_span_gre_dir $tundev + quick_test_span_vlan_dir $h3 555 "$ul_proto" h3_addr_add_del del $h3.555 bridge vlan add dev $swp3 vid 555 pvid untagged h3_addr_add_del add $h3 sleep 5 - quick_test_span_gre_dir $tundev ingress - fail_test_span_vlan_dir $h3 555 ingress "$ul_proto" + quick_test_span_gre_dir $tundev + fail_test_span_vlan_dir $h3 555 "$ul_proto" h3_addr_add_del del $h3 bridge vlan add dev $swp3 vid 555 h3_addr_add_del add $h3.555 sleep 5 - quick_test_span_gre_dir $tundev ingress - quick_test_span_vlan_dir $h3 555 ingress "$ul_proto" + quick_test_span_gre_dir $tundev + quick_test_span_vlan_dir $h3 555 "$ul_proto" mirror_uninstall $swp1 ingress - log_test "$what: vlan untagged at a bridge egress ($tcflags)" + log_test "$what: vlan untagged at a bridge egress" } test_gretap_untagged_egress() @@ -267,19 +267,19 @@ test_span_gre_fdb_roaming() RET=0 - mirror_install $swp1 ingress $tundev "matchall $tcflags" - quick_test_span_gre_dir $tundev ingress + mirror_install $swp1 ingress $tundev "matchall" + quick_test_span_gre_dir $tundev while ((RET == 0)); do bridge fdb del dev $swp3 $h3mac vlan 555 master 2>/dev/null bridge fdb add dev $swp2 $h3mac vlan 555 master static sleep 1 - fail_test_span_gre_dir $tundev ingress + fail_test_span_gre_dir $tundev if ! bridge fdb sh dev $swp2 vlan 555 master \ | grep -q $h3mac; then printf "TEST: %-60s [RETRY]\n" \ - "$what: MAC roaming ($tcflags)" + "$what: MAC roaming" # ARP or ND probably reprimed the FDB while the test # was running. We would get a spurious failure. RET=0 @@ -292,11 +292,11 @@ test_span_gre_fdb_roaming() # Re-prime FDB $ARPING -I br1.555 192.0.2.130 -fqc 1 sleep 1 - quick_test_span_gre_dir $tundev ingress + quick_test_span_gre_dir $tundev mirror_uninstall $swp1 ingress - log_test "$what: MAC roaming ($tcflags)" + log_test "$what: MAC roaming" } test_gretap_fdb_roaming() @@ -319,30 +319,11 @@ test_ip6gretap_stp() full_test_span_gre_stp gt6 $swp3 "mirror to ip6gretap" } -test_all() -{ - slow_path_trap_install $swp1 ingress - slow_path_trap_install $swp1 egress - - tests_run - - slow_path_trap_uninstall $swp1 egress - slow_path_trap_uninstall $swp1 ingress -} - trap cleanup EXIT setup_prepare setup_wait -tcflags="skip_hw" -test_all - -if ! tc_offload_check; then - echo "WARN: Could not test offloaded functionality" -else - tcflags="skip_sw" - test_all -fi +tests_run exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/forwarding/mirror_lib.sh b/tools/testing/selftests/net/forwarding/mirror_lib.sh index 3e8ebeff3019..6bf9d5ae933c 100644 --- a/tools/testing/selftests/net/forwarding/mirror_lib.sh +++ b/tools/testing/selftests/net/forwarding/mirror_lib.sh @@ -44,14 +44,17 @@ mirror_test() local type="icmp echoreq" fi + if [[ -z ${expect//[[:digit:]]/} ]]; then + expect="== $expect" + fi + local t0=$(tc_rule_stats_get $dev $pref) $MZ $proto $vrf_name ${sip:+-A $sip} -B $dip -a own -b bc -q \ -c 10 -d 100msec -t $type sleep 0.5 local t1=$(tc_rule_stats_get $dev $pref) local delta=$((t1 - t0)) - # Tolerate a couple stray extra packets. - ((expect <= delta && delta <= expect + 2)) + ((delta $expect)) check_err $? "Expected to capture $expect packets, got $delta." } @@ -59,36 +62,42 @@ do_test_span_dir_ips() { local expect=$1; shift local dev=$1; shift - local direction=$1; shift local ip1=$1; shift local ip2=$1; shift + local forward_type=${1-8}; shift + local backward_type=${1-0}; shift - icmp_capture_install $dev + icmp_capture_install $dev "type $forward_type" mirror_test v$h1 $ip1 $ip2 $dev 100 $expect + icmp_capture_uninstall $dev + + icmp_capture_install $dev "type $backward_type" mirror_test v$h2 $ip2 $ip1 $dev 100 $expect icmp_capture_uninstall $dev } quick_test_span_dir_ips() { - do_test_span_dir_ips 10 "$@" -} + local dev=$1; shift + local ip1=$1; shift + local ip2=$1; shift + local forward_type=${1-8}; shift + local backward_type=${1-0}; shift -fail_test_span_dir_ips() -{ - do_test_span_dir_ips 0 "$@" + do_test_span_dir_ips 10 "$dev" "$ip1" "$ip2" \ + "$forward_type" "$backward_type" } test_span_dir_ips() { local dev=$1; shift - local direction=$1; shift local forward_type=$1; shift local backward_type=$1; shift local ip1=$1; shift local ip2=$1; shift - quick_test_span_dir_ips "$dev" "$direction" "$ip1" "$ip2" + quick_test_span_dir_ips "$dev" "$ip1" "$ip2" \ + "$forward_type" "$backward_type" icmp_capture_install $dev "type $forward_type" mirror_test v$h1 $ip1 $ip2 $dev 100 10 @@ -99,14 +108,14 @@ test_span_dir_ips() icmp_capture_uninstall $dev } -fail_test_span_dir() -{ - fail_test_span_dir_ips "$@" 192.0.2.1 192.0.2.2 -} - test_span_dir() { - test_span_dir_ips "$@" 192.0.2.1 192.0.2.2 + local dev=$1; shift + local forward_type=$1; shift + local backward_type=$1; shift + + test_span_dir_ips "$dev" "$forward_type" "$backward_type" \ + 192.0.2.1 192.0.2.2 } do_test_span_vlan_dir_ips() @@ -114,7 +123,6 @@ do_test_span_vlan_dir_ips() local expect=$1; shift local dev=$1; shift local vid=$1; shift - local direction=$1; shift local ul_proto=$1; shift local ip1=$1; shift local ip2=$1; shift @@ -123,27 +131,50 @@ do_test_span_vlan_dir_ips() # The traffic is meant for local box anyway, so will be trapped to # kernel. vlan_capture_install $dev "skip_hw vlan_id $vid vlan_ethtype $ul_proto" - mirror_test v$h1 $ip1 $ip2 $dev 100 $expect - mirror_test v$h2 $ip2 $ip1 $dev 100 $expect + mirror_test v$h1 $ip1 $ip2 $dev 100 "$expect" + mirror_test v$h2 $ip2 $ip1 $dev 100 "$expect" vlan_capture_uninstall $dev } quick_test_span_vlan_dir_ips() { - do_test_span_vlan_dir_ips 10 "$@" + local dev=$1; shift + local vid=$1; shift + local ul_proto=$1; shift + local ip1=$1; shift + local ip2=$1; shift + + do_test_span_vlan_dir_ips '>= 10' "$dev" "$vid" "$ul_proto" \ + "$ip1" "$ip2" } fail_test_span_vlan_dir_ips() { - do_test_span_vlan_dir_ips 0 "$@" + local dev=$1; shift + local vid=$1; shift + local ul_proto=$1; shift + local ip1=$1; shift + local ip2=$1; shift + + do_test_span_vlan_dir_ips 0 "$dev" "$vid" "$ul_proto" "$ip1" "$ip2" } quick_test_span_vlan_dir() { - quick_test_span_vlan_dir_ips "$@" 192.0.2.1 192.0.2.2 + local dev=$1; shift + local vid=$1; shift + local ul_proto=$1; shift + + quick_test_span_vlan_dir_ips "$dev" "$vid" "$ul_proto" \ + 192.0.2.1 192.0.2.2 } fail_test_span_vlan_dir() { - fail_test_span_vlan_dir_ips "$@" 192.0.2.1 192.0.2.2 + local dev=$1; shift + local vid=$1; shift + local ul_proto=$1; shift + + fail_test_span_vlan_dir_ips "$dev" "$vid" "$ul_proto" \ + 192.0.2.1 192.0.2.2 } diff --git a/tools/testing/selftests/net/forwarding/mirror_vlan.sh b/tools/testing/selftests/net/forwarding/mirror_vlan.sh index 0b44e148235e..2f150a414d38 100755 --- a/tools/testing/selftests/net/forwarding/mirror_vlan.sh +++ b/tools/testing/selftests/net/forwarding/mirror_vlan.sh @@ -40,12 +40,16 @@ setup_prepare() vlan_create $h2 111 v$h2 192.0.2.18/28 bridge vlan add dev $swp2 vid 111 + + trap_install $h3 ingress } cleanup() { pre_cleanup + trap_uninstall $h3 ingress + vlan_destroy $h2 111 vlan_destroy $h1 111 vlan_destroy $h3 555 @@ -63,11 +67,11 @@ test_vlan_dir() RET=0 - mirror_install $swp1 $direction $swp3.555 "matchall $tcflags" - test_span_dir "$h3.555" "$direction" "$forward_type" "$backward_type" + mirror_install $swp1 $direction $swp3.555 "matchall" + test_span_dir "$h3.555" "$forward_type" "$backward_type" mirror_uninstall $swp1 $direction - log_test "$direction mirror to vlan ($tcflags)" + log_test "$direction mirror to vlan" } test_vlan() @@ -84,14 +88,12 @@ test_tagged_vlan_dir() RET=0 - mirror_install $swp1 $direction $swp3.555 "matchall $tcflags" - do_test_span_vlan_dir_ips 10 "$h3.555" 111 "$direction" ip \ - 192.0.2.17 192.0.2.18 - do_test_span_vlan_dir_ips 0 "$h3.555" 555 "$direction" ip \ - 192.0.2.17 192.0.2.18 + mirror_install $swp1 $direction $swp3.555 "matchall" + do_test_span_vlan_dir_ips '>= 10' "$h3.555" 111 ip 192.0.2.17 192.0.2.18 + do_test_span_vlan_dir_ips 0 "$h3.555" 555 ip 192.0.2.17 192.0.2.18 mirror_uninstall $swp1 $direction - log_test "$direction mirror tagged to vlan ($tcflags)" + log_test "$direction mirror tagged to vlan" } test_tagged_vlan() @@ -100,32 +102,11 @@ test_tagged_vlan() test_tagged_vlan_dir egress 0 8 } -test_all() -{ - slow_path_trap_install $swp1 ingress - slow_path_trap_install $swp1 egress - trap_install $h3 ingress - - tests_run - - trap_uninstall $h3 ingress - slow_path_trap_uninstall $swp1 egress - slow_path_trap_uninstall $swp1 ingress -} - trap cleanup EXIT setup_prepare setup_wait -tcflags="skip_hw" -test_all - -if ! tc_offload_check; then - echo "WARN: Could not test offloaded functionality" -else - tcflags="skip_sw" - test_all -fi +tests_run exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/forwarding/router_mpath_seed.sh b/tools/testing/selftests/net/forwarding/router_mpath_seed.sh new file mode 100755 index 000000000000..314cb906c1eb --- /dev/null +++ b/tools/testing/selftests/net/forwarding/router_mpath_seed.sh @@ -0,0 +1,333 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 + +# +-------------------------+ +-------------------------+ +# | H1 | | H2 | +# | $h1 + | | + $h2 | +# | 192.0.2.1/28 | | | | 192.0.2.34/28 | +# | 2001:db8:1::1/64 | | | | 2001:db8:3::2/64 | +# +-------------------|-----+ +-|-----------------------+ +# | | +# +-------------------|-----+ +-|-----------------------+ +# | R1 | | | | R2 | +# | $rp11 + | | + $rp21 | +# | 192.0.2.2/28 | | 192.0.2.33/28 | +# | 2001:db8:1::2/64 | | 2001:db8:3::1/64 | +# | | | | +# | $rp12 + | | + $rp22 | +# | 192.0.2.17/28 | | | | 192.0.2.18..27/28 | +# | 2001:db8:2::17/64 | | | | 2001:db8:2::18..27/64 | +# +-------------------|-----+ +-|-----------------------+ +# | | +# `----------' + +ALL_TESTS=" + ping_ipv4 + ping_ipv6 + test_mpath_seed_stability_ipv4 + test_mpath_seed_stability_ipv6 + test_mpath_seed_get + test_mpath_seed_ipv4 + test_mpath_seed_ipv6 +" +NUM_NETIFS=6 +source lib.sh + +h1_create() +{ + simple_if_init $h1 192.0.2.1/28 2001:db8:1::1/64 + ip -4 route add 192.0.2.32/28 vrf v$h1 nexthop via 192.0.2.2 + ip -6 route add 2001:db8:3::/64 vrf v$h1 nexthop via 2001:db8:1::2 +} + +h1_destroy() +{ + ip -6 route del 2001:db8:3::/64 vrf v$h1 nexthop via 2001:db8:1::2 + ip -4 route del 192.0.2.32/28 vrf v$h1 nexthop via 192.0.2.2 + simple_if_fini $h1 192.0.2.1/28 2001:db8:1::1/64 +} + +h2_create() +{ + simple_if_init $h2 192.0.2.34/28 2001:db8:3::2/64 + ip -4 route add 192.0.2.0/28 vrf v$h2 nexthop via 192.0.2.33 + ip -6 route add 2001:db8:1::/64 vrf v$h2 nexthop via 2001:db8:3::1 +} + +h2_destroy() +{ + ip -6 route del 2001:db8:1::/64 vrf v$h2 nexthop via 2001:db8:3::1 + ip -4 route del 192.0.2.0/28 vrf v$h2 nexthop via 192.0.2.33 + simple_if_fini $h2 192.0.2.34/28 2001:db8:3::2/64 +} + +router1_create() +{ + simple_if_init $rp11 192.0.2.2/28 2001:db8:1::2/64 + __simple_if_init $rp12 v$rp11 192.0.2.17/28 2001:db8:2::17/64 +} + +router1_destroy() +{ + __simple_if_fini $rp12 192.0.2.17/28 2001:db8:2::17/64 + simple_if_fini $rp11 192.0.2.2/28 2001:db8:1::2/64 +} + +router2_create() +{ + simple_if_init $rp21 192.0.2.33/28 2001:db8:3::1/64 + __simple_if_init $rp22 v$rp21 192.0.2.18/28 2001:db8:2::18/64 + ip -4 route add 192.0.2.0/28 vrf v$rp21 nexthop via 192.0.2.17 + ip -6 route add 2001:db8:1::/64 vrf v$rp21 nexthop via 2001:db8:2::17 +} + +router2_destroy() +{ + ip -6 route del 2001:db8:1::/64 vrf v$rp21 nexthop via 2001:db8:2::17 + ip -4 route del 192.0.2.0/28 vrf v$rp21 nexthop via 192.0.2.17 + __simple_if_fini $rp22 192.0.2.18/28 2001:db8:2::18/64 + simple_if_fini $rp21 192.0.2.33/28 2001:db8:3::1/64 +} + +nexthops_create() +{ + local i + for i in $(seq 10); do + ip nexthop add id $((1000 + i)) via 192.0.2.18 dev $rp12 + ip nexthop add id $((2000 + i)) via 2001:db8:2::18 dev $rp12 + done + + ip nexthop add id 1000 group $(seq -s / 1001 1010) hw_stats on + ip nexthop add id 2000 group $(seq -s / 2001 2010) hw_stats on + ip -4 route add 192.0.2.32/28 vrf v$rp11 nhid 1000 + ip -6 route add 2001:db8:3::/64 vrf v$rp11 nhid 2000 +} + +nexthops_destroy() +{ + local i + + ip -6 route del 2001:db8:3::/64 vrf v$rp11 nhid 2000 + ip -4 route del 192.0.2.32/28 vrf v$rp11 nhid 1000 + ip nexthop del id 2000 + ip nexthop del id 1000 + + for i in $(seq 10 -1 1); do + ip nexthop del id $((2000 + i)) + ip nexthop del id $((1000 + i)) + done +} + +setup_prepare() +{ + h1=${NETIFS[p1]} + rp11=${NETIFS[p2]} + + rp12=${NETIFS[p3]} + rp22=${NETIFS[p4]} + + rp21=${NETIFS[p5]} + h2=${NETIFS[p6]} + + sysctl_save net.ipv4.fib_multipath_hash_seed + + vrf_prepare + + h1_create + h2_create + router1_create + router2_create + + forwarding_enable +} + +cleanup() +{ + pre_cleanup + + forwarding_restore + + nexthops_destroy + router2_destroy + router1_destroy + h2_destroy + h1_destroy + + vrf_cleanup + + sysctl_restore net.ipv4.fib_multipath_hash_seed +} + +ping_ipv4() +{ + ping_test $h1 192.0.2.34 +} + +ping_ipv6() +{ + ping6_test $h1 2001:db8:3::2 +} + +test_mpath_seed_get() +{ + RET=0 + + local i + for ((i = 0; i < 100; i++)); do + local seed_w=$((999331 * i)) + sysctl -qw net.ipv4.fib_multipath_hash_seed=$seed_w + local seed_r=$(sysctl -n net.ipv4.fib_multipath_hash_seed) + ((seed_r == seed_w)) + check_err $? "mpath seed written as $seed_w, but read as $seed_r" + done + + log_test "mpath seed set/get" +} + +nh_stats_snapshot() +{ + local group_id=$1; shift + + ip -j -s -s nexthop show id $group_id | + jq -c '[.[].group_stats | sort_by(.id) | .[].packets]' +} + +get_active_nh() +{ + local s0=$1; shift + local s1=$1; shift + + jq -n --argjson s0 "$s0" --argjson s1 "$s1" -f /dev/stdin <<-"EOF" + [range($s0 | length)] | + map($s1[.] - $s0[.]) | + map(if . > 8 then 1 else 0 end) | + index(1) + EOF +} + +probe_nh() +{ + local group_id=$1; shift + local -a mz=("$@") + + local s0=$(nh_stats_snapshot $group_id) + "${mz[@]}" + local s1=$(nh_stats_snapshot $group_id) + + get_active_nh "$s0" "$s1" +} + +probe_seed() +{ + local group_id=$1; shift + local seed=$1; shift + local -a mz=("$@") + + sysctl -qw net.ipv4.fib_multipath_hash_seed=$seed + probe_nh "$group_id" "${mz[@]}" +} + +test_mpath_seed() +{ + local group_id=$1; shift + local what=$1; shift + local -a mz=("$@") + local ii + + RET=0 + + local -a tally=(0 0 0 0 0 0 0 0 0 0) + for ((ii = 0; ii < 100; ii++)); do + local act=$(probe_seed $group_id $((999331 * ii)) "${mz[@]}") + ((tally[act]++)) + done + + local tally_str="${tally[@]}" + for ((ii = 0; ii < ${#tally[@]}; ii++)); do + ((tally[ii] > 0)) + check_err $? "NH #$ii not hit, tally='$tally_str'" + done + + log_test "mpath seed $what" + sysctl -qw net.ipv4.fib_multipath_hash_seed=0 +} + +test_mpath_seed_ipv4() +{ + test_mpath_seed 1000 IPv4 \ + $MZ $h1 -A 192.0.2.1 -B 192.0.2.34 -q \ + -p 64 -d 0 -c 10 -t udp +} + +test_mpath_seed_ipv6() +{ + test_mpath_seed 2000 IPv6 \ + $MZ -6 $h1 -A 2001:db8:1::1 -B 2001:db8:3::2 -q \ + -p 64 -d 0 -c 10 -t udp +} + +check_mpath_seed_stability() +{ + local seed=$1; shift + local act_0=$1; shift + local act_1=$1; shift + + ((act_0 == act_1)) + check_err $? "seed $seed: active NH moved from $act_0 to $act_1 after seed change" +} + +test_mpath_seed_stability() +{ + local group_id=$1; shift + local what=$1; shift + local -a mz=("$@") + + RET=0 + + local seed_0=0 + local seed_1=3221338814 + local seed_2=3735928559 + + # Initial active NH before touching the seed at all. + local act_ini=$(probe_nh $group_id "${mz[@]}") + + local act_0_0=$(probe_seed $group_id $seed_0 "${mz[@]}") + local act_1_0=$(probe_seed $group_id $seed_1 "${mz[@]}") + local act_2_0=$(probe_seed $group_id $seed_2 "${mz[@]}") + + local act_0_1=$(probe_seed $group_id $seed_0 "${mz[@]}") + local act_1_1=$(probe_seed $group_id $seed_1 "${mz[@]}") + local act_2_1=$(probe_seed $group_id $seed_2 "${mz[@]}") + + check_mpath_seed_stability initial $act_ini $act_0_0 + check_mpath_seed_stability $seed_0 $act_0_0 $act_0_1 + check_mpath_seed_stability $seed_1 $act_1_0 $act_1_1 + check_mpath_seed_stability $seed_2 $act_2_0 $act_2_1 + + log_test "mpath seed stability $what" + sysctl -qw net.ipv4.fib_multipath_hash_seed=0 +} + +test_mpath_seed_stability_ipv4() +{ + test_mpath_seed_stability 1000 IPv4 \ + $MZ $h1 -A 192.0.2.1 -B 192.0.2.34 -q \ + -p 64 -d 0 -c 10 -t udp +} + +test_mpath_seed_stability_ipv6() +{ + test_mpath_seed_stability 2000 IPv6 \ + $MZ -6 $h1 -A 2001:db8:1::1 -B 2001:db8:3::2 -q \ + -p 64 -d 0 -c 10 -t udp +} + +trap cleanup EXIT + +setup_prepare +setup_wait +nexthops_create + +tests_run + +exit $EXIT_STATUS diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh index 6f0a2e452ba1..3f9d50f1ef9e 100755 --- a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh +++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh @@ -680,9 +680,9 @@ test_learning() local mac=de:ad:be:ef:13:37 local dst=192.0.2.100 - # Enable learning on the VxLAN device and set ageing time to 10 seconds - ip link set dev br1 type bridge ageing_time 1000 - ip link set dev vx1 type vxlan ageing 10 + # Enable learning on the VxLAN device and set ageing time to 30 seconds + ip link set dev br1 type bridge ageing_time 3000 + ip link set dev vx1 type vxlan ageing 30 ip link set dev vx1 type vxlan learning reapply_config @@ -740,7 +740,7 @@ test_learning() vxlan_flood_test $mac $dst 0 10 0 - sleep 20 + sleep 60 bridge fdb show brport vx1 | grep $mac | grep -q self check_fail $? diff --git a/tools/testing/selftests/net/hsr/hsr_ping.sh b/tools/testing/selftests/net/hsr/hsr_ping.sh index 3684b813b0f6..f5d207fc770a 100755 --- a/tools/testing/selftests/net/hsr/hsr_ping.sh +++ b/tools/testing/selftests/net/hsr/hsr_ping.sh @@ -152,6 +152,15 @@ setup_hsr_interfaces() ip -net "$ns3" addr add 100.64.0.3/24 dev hsr3 ip -net "$ns3" addr add dead:beef:1::3/64 dev hsr3 nodad + ip -net "$ns1" link set address 00:11:22:00:01:01 dev ns1eth1 + ip -net "$ns1" link set address 00:11:22:00:01:02 dev ns1eth2 + + ip -net "$ns2" link set address 00:11:22:00:02:01 dev ns2eth1 + ip -net "$ns2" link set address 00:11:22:00:02:02 dev ns2eth2 + + ip -net "$ns3" link set address 00:11:22:00:03:01 dev ns3eth1 + ip -net "$ns3" link set address 00:11:22:00:03:02 dev ns3eth2 + # All Links up ip -net "$ns1" link set ns1eth1 up ip -net "$ns1" link set ns1eth2 up diff --git a/tools/testing/selftests/net/hsr/hsr_redbox.sh b/tools/testing/selftests/net/hsr/hsr_redbox.sh index 1f36785347c0..998103502d5d 100755 --- a/tools/testing/selftests/net/hsr/hsr_redbox.sh +++ b/tools/testing/selftests/net/hsr/hsr_redbox.sh @@ -96,6 +96,21 @@ setup_hsr_interfaces() ip -n "${ns4}" link set ns4eth1 up ip -n "${ns5}" link set ns5eth1 up + ip -net "$ns1" link set address 00:11:22:00:01:01 dev ns1eth1 + ip -net "$ns1" link set address 00:11:22:00:01:02 dev ns1eth2 + + ip -net "$ns2" link set address 00:11:22:00:02:01 dev ns2eth1 + ip -net "$ns2" link set address 00:11:22:00:02:02 dev ns2eth2 + ip -net "$ns2" link set address 00:11:22:00:02:03 dev ns2eth3 + + ip -net "$ns3" link set address 00:11:22:00:03:11 dev ns3eth1 + ip -net "$ns3" link set address 00:11:22:00:03:11 dev ns3eth2 + ip -net "$ns3" link set address 00:11:22:00:03:11 dev ns3eth3 + ip -net "$ns3" link set address 00:11:22:00:03:11 dev ns3br1 + + ip -net "$ns4" link set address 00:11:22:00:04:01 dev ns4eth1 + ip -net "$ns5" link set address 00:11:22:00:05:01 dev ns5eth1 + ip -net "${ns1}" link add name hsr1 type hsr slave1 ns1eth1 slave2 ns1eth2 supervision 45 version ${HSRv} proto 0 ip -net "${ns2}" link add name hsr2 type hsr slave1 ns2eth1 slave2 ns2eth2 interlink ns2eth3 supervision 45 version ${HSRv} proto 0 diff --git a/tools/testing/selftests/net/lib.sh b/tools/testing/selftests/net/lib.sh index 9155c914c064..d0219032f773 100644 --- a/tools/testing/selftests/net/lib.sh +++ b/tools/testing/selftests/net/lib.sh @@ -125,28 +125,36 @@ slowwait_for_counter() slowwait "$timeout" until_counter_is ">= $((base + delta))" "$@" } +remove_ns_list() +{ + local item=$1 + local ns + local ns_list=("${NS_LIST[@]}") + NS_LIST=() + + for ns in "${ns_list[@]}"; do + if [ "${ns}" != "${item}" ]; then + NS_LIST+=("${ns}") + fi + done +} + cleanup_ns() { local ns="" - local errexit=0 local ret=0 - # disable errexit temporary - if [[ $- =~ "e" ]]; then - errexit=1 - set +e - fi - for ns in "$@"; do [ -z "${ns}" ] && continue - ip netns delete "${ns}" &> /dev/null + ip netns delete "${ns}" &> /dev/null || true if ! busywait $BUSYWAIT_TIMEOUT ip netns list \| grep -vq "^$ns$" &> /dev/null; then echo "Warn: Failed to remove namespace $ns" ret=1 + else + remove_ns_list "${ns}" fi done - [ $errexit -eq 1 ] && set -e return $ret } @@ -159,29 +167,30 @@ cleanup_all_ns() # setup_ns local remote setup_ns() { - local ns="" local ns_name="" local ns_list=() - local ns_exist= for ns_name in "$@"; do - # Some test may setup/remove same netns multi times - if unset ${ns_name} 2> /dev/null; then - ns="${ns_name,,}-$(mktemp -u XXXXXX)" - eval readonly ${ns_name}="$ns" - ns_exist=false - else - eval ns='$'${ns_name} - cleanup_ns "$ns" - ns_exist=true + # avoid conflicts with local var: internal error + if [ "${ns_name}" = "ns_name" ]; then + echo "Failed to setup namespace '${ns_name}': invalid name" + cleanup_ns "${ns_list[@]}" + exit $ksft_fail fi - if ! ip netns add "$ns"; then + # Some test may setup/remove same netns multi times + if [ -z "${!ns_name}" ]; then + eval "${ns_name}=${ns_name,,}-$(mktemp -u XXXXXX)" + else + cleanup_ns "${!ns_name}" + fi + + if ! ip netns add "${!ns_name}"; then echo "Failed to create namespace $ns_name" cleanup_ns "${ns_list[@]}" return $ksft_skip fi - ip -n "$ns" link set lo up - ! $ns_exist && ns_list+=("$ns") + ip -n "${!ns_name}" link set lo up + ns_list+=("${!ns_name}") done NS_LIST+=("${ns_list[@]}") } @@ -190,10 +199,10 @@ tc_rule_stats_get() { local dev=$1; shift local pref=$1; shift - local dir=$1; shift + local dir=${1:-ingress}; shift local selector=${1:-.packets}; shift - tc -j -s filter show dev $dev ${dir:-ingress} pref $pref \ + tc -j -s filter show dev $dev $dir pref $pref \ | jq ".[1].options.actions[].stats$selector" } diff --git a/tools/testing/selftests/net/lib/py/ksft.py b/tools/testing/selftests/net/lib/py/ksft.py index 4769b4eb1ea1..f26c20df9db4 100644 --- a/tools/testing/selftests/net/lib/py/ksft.py +++ b/tools/testing/selftests/net/lib/py/ksft.py @@ -6,6 +6,7 @@ import sys import time import traceback from .consts import KSFT_MAIN_NAME +from .utils import global_defer_queue KSFT_RESULT = None KSFT_RESULT_ALL = True @@ -57,6 +58,11 @@ def ksft_ge(a, b, comment=""): _fail("Check failed", a, "<", b, comment) +def ksft_lt(a, b, comment=""): + if a >= b: + _fail("Check failed", a, ">=", b, comment) + + class ksft_raises: def __init__(self, expected_type): self.exception = None @@ -103,6 +109,24 @@ def ktap_result(ok, cnt=1, case="", comment=""): print(res) +def ksft_flush_defer(): + global KSFT_RESULT + + i = 0 + qlen_start = len(global_defer_queue) + while global_defer_queue: + i += 1 + entry = global_defer_queue.pop() + try: + entry.exec_only() + except: + ksft_pr(f"Exception while handling defer / cleanup (callback {i} of {qlen_start})!") + tb = traceback.format_exc() + for line in tb.strip().split('\n'): + ksft_pr("Defer Exception|", line) + KSFT_RESULT = False + + def ksft_run(cases=None, globs=None, case_pfx=None, args=()): cases = cases or [] @@ -122,32 +146,41 @@ def ksft_run(cases=None, globs=None, case_pfx=None, args=()): global KSFT_RESULT cnt = 0 + stop = False for case in cases: KSFT_RESULT = True cnt += 1 + comment = "" + cnt_key = "" + try: case(*args) except KsftSkipEx as e: - ktap_result(True, cnt, case, comment="SKIP " + str(e)) - totals['skip'] += 1 - continue + comment = "SKIP " + str(e) + cnt_key = 'skip' except KsftXfailEx as e: - ktap_result(True, cnt, case, comment="XFAIL " + str(e)) - totals['xfail'] += 1 - continue - except Exception as e: + comment = "XFAIL " + str(e) + cnt_key = 'xfail' + except BaseException as e: + stop |= isinstance(e, KeyboardInterrupt) tb = traceback.format_exc() for line in tb.strip().split('\n'): ksft_pr("Exception|", line) - ktap_result(False, cnt, case) - totals['fail'] += 1 - continue + if stop: + ksft_pr("Stopping tests due to KeyboardInterrupt.") + KSFT_RESULT = False + cnt_key = 'fail' - ktap_result(KSFT_RESULT, cnt, case) - if KSFT_RESULT: - totals['pass'] += 1 - else: - totals['fail'] += 1 + ksft_flush_defer() + + if not cnt_key: + cnt_key = 'pass' if KSFT_RESULT else 'fail' + + ktap_result(KSFT_RESULT, cnt, case, comment=comment) + totals[cnt_key] += 1 + + if stop: + break print( f"# Totals: pass:{totals['pass']} fail:{totals['fail']} xfail:{totals['xfail']} xpass:0 skip:{totals['skip']} error:0" diff --git a/tools/testing/selftests/net/lib/py/utils.py b/tools/testing/selftests/net/lib/py/utils.py index 0540ea24921d..72590c3f90f1 100644 --- a/tools/testing/selftests/net/lib/py/utils.py +++ b/tools/testing/selftests/net/lib/py/utils.py @@ -1,12 +1,18 @@ # SPDX-License-Identifier: GPL-2.0 +import errno import json as _json import random import re +import socket import subprocess import time +class CmdExitFailure(Exception): + pass + + class cmd: def __init__(self, comm, shell=True, fail=True, ns=None, background=False, host=None, timeout=5): if ns: @@ -41,8 +47,8 @@ class cmd: if self.proc.returncode != 0 and fail: if len(stderr) > 0 and stderr[-1] == "\n": stderr = stderr[:-1] - raise Exception("Command failed: %s\nSTDOUT: %s\nSTDERR: %s" % - (self.proc.args, stdout, stderr)) + raise CmdExitFailure("Command failed: %s\nSTDOUT: %s\nSTDERR: %s" % + (self.proc.args, stdout, stderr)) class bkg(cmd): @@ -60,6 +66,40 @@ class bkg(cmd): return self.process(terminate=self.terminate, fail=self.check_fail) +global_defer_queue = [] + + +class defer: + def __init__(self, func, *args, **kwargs): + global global_defer_queue + + if not callable(func): + raise Exception("defer created with un-callable object, did you call the function instead of passing its name?") + + self.func = func + self.args = args + self.kwargs = kwargs + + self._queue = global_defer_queue + self._queue.append(self) + + def __enter__(self): + return self + + def __exit__(self, ex_type, ex_value, ex_tb): + return self.exec() + + def exec_only(self): + self.func(*self.args, **self.kwargs) + + def cancel(self): + self._queue.remove(self) + + def exec(self): + self.cancel() + self.exec_only() + + def tool(name, args, json=None, ns=None, host=None): cmd_str = name + ' ' if json: @@ -77,11 +117,24 @@ def ip(args, json=None, ns=None, host=None): return tool('ip', args, json=json, host=host) +def ethtool(args, json=None, ns=None, host=None): + return tool('ethtool', args, json=json, ns=ns, host=host) + + def rand_port(): """ - Get unprivileged port, for now just random, one day we may decide to check if used. + Get a random unprivileged port, try to make sure it's not already used. """ - return random.randint(10000, 65535) + for _ in range(1000): + port = random.randint(10000, 65535) + try: + with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s: + s.bind(("", port)) + return port + except OSError as e: + if e.errno != errno.EADDRINUSE: + raise + raise Exception("Can't find any free unprivileged port") def wait_port_listen(port, proto="tcp", ns=None, host=None, sleep=0.005, deadline=5): diff --git a/tools/testing/selftests/net/mptcp/mptcp_lib.sh b/tools/testing/selftests/net/mptcp/mptcp_lib.sh index 6ffa9b7a3260..438280e68434 100644 --- a/tools/testing/selftests/net/mptcp/mptcp_lib.sh +++ b/tools/testing/selftests/net/mptcp/mptcp_lib.sh @@ -1,6 +1,9 @@ #! /bin/bash # SPDX-License-Identifier: GPL-2.0 +. "$(dirname "${0}")/../lib.sh" +. "$(dirname "${0}")/../net_helper.sh" + readonly KSFT_PASS=0 readonly KSFT_FAIL=1 readonly KSFT_SKIP=4 @@ -361,20 +364,7 @@ mptcp_lib_check_transfer() { # $1: ns, $2: port mptcp_lib_wait_local_port_listen() { - local listener_ns="${1}" - local port="${2}" - - local port_hex - port_hex="$(printf "%04X" "${port}")" - - local _ - for _ in $(seq 10); do - ip netns exec "${listener_ns}" cat /proc/net/tcp* | \ - awk "BEGIN {rc=1} {if (\$2 ~ /:${port_hex}\$/ && \$4 ~ /0A/) \ - {rc=0; exit}} END {exit rc}" && - break - sleep 0.1 - done + wait_local_port_listen "${@}" "tcp" } mptcp_lib_check_output() { @@ -438,17 +428,13 @@ mptcp_lib_check_tools() { } mptcp_lib_ns_init() { - local sec rndh - - sec=$(date +%s) - rndh=$(printf %x "${sec}")-$(mktemp -u XXXXXX) + if ! setup_ns "${@}"; then + mptcp_lib_pr_fail "Failed to setup namespaces ${*}" + exit ${KSFT_FAIL} + fi local netns for netns in "${@}"; do - eval "${netns}=${netns}-${rndh}" - - ip netns add "${!netns}" || exit ${KSFT_SKIP} - ip -net "${!netns}" link set lo up ip netns exec "${!netns}" sysctl -q net.mptcp.enabled=1 ip netns exec "${!netns}" sysctl -q net.ipv4.conf.all.rp_filter=0 ip netns exec "${!netns}" sysctl -q net.ipv4.conf.default.rp_filter=0 @@ -456,9 +442,10 @@ mptcp_lib_ns_init() { } mptcp_lib_ns_exit() { + cleanup_ns "${@}" + local netns for netns in "${@}"; do - ip netns del "${netns}" rm -f /tmp/"${netns}".{nstat,out} done } diff --git a/tools/testing/selftests/net/netfilter/nft_queue.sh b/tools/testing/selftests/net/netfilter/nft_queue.sh index 8538f08c64c2..c61d23a8c88d 100755 --- a/tools/testing/selftests/net/netfilter/nft_queue.sh +++ b/tools/testing/selftests/net/netfilter/nft_queue.sh @@ -375,6 +375,42 @@ EOF wait 2>/dev/null } +test_queue_removal() +{ + read tainted_then < /proc/sys/kernel/tainted + + ip netns exec "$ns1" nft -f - </dev/null + kill $nfqpid + + ip netns exec "$ns1" nft flush ruleset + + if [ "$tainted_then" -ne 0 ];then + return + fi + + read tainted_now < /proc/sys/kernel/tainted + if [ "$tainted_now" -eq 0 ];then + echo "PASS: queue program exiting while packets queued" + else + echo "TAINT: queue program exiting while packets queued" + ret=1 + fi +} + ip netns exec "$nsrouter" sysctl net.ipv6.conf.all.forwarding=1 > /dev/null ip netns exec "$nsrouter" sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null ip netns exec "$nsrouter" sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null @@ -413,5 +449,6 @@ test_tcp_localhost test_tcp_localhost_connectclose test_tcp_localhost_requeue test_icmp_vrf +test_queue_removal exit $ret diff --git a/tools/testing/selftests/net/netns-sysctl.sh b/tools/testing/selftests/net/netns-sysctl.sh new file mode 100755 index 000000000000..45c34a3b9aae --- /dev/null +++ b/tools/testing/selftests/net/netns-sysctl.sh @@ -0,0 +1,40 @@ +#!/bin/bash -e +# SPDX-License-Identifier: GPL-2.0 +# +# This test checks that the network buffer sysctls are present +# in a network namespaces, and that they are readonly. + +source lib.sh + +cleanup() { + cleanup_ns $test_ns +} + +trap cleanup EXIT + +fail() { + echo "ERROR: $*" >&2 + exit 1 +} + +setup_ns test_ns + +for sc in {r,w}mem_{default,max}; do + # check that this is writable in a netns + [ -w "/proc/sys/net/core/$sc" ] || + fail "$sc isn't writable in the init netns!" + + # change the value in the host netns + sysctl -qw "net.core.$sc=300000" || + fail "Can't write $sc in init netns!" + + # check that the value is read from the init netns + [ "$(ip netns exec $test_ns sysctl -n "net.core.$sc")" -eq 300000 ] || + fail "Value for $sc mismatch!" + + # check that this isn't writable in a netns + ip netns exec $test_ns [ -w "/proc/sys/net/core/$sc" ] && + fail "$sc is writable in a netns!" +done + +echo 'Test passed OK' diff --git a/tools/testing/selftests/net/openvswitch/openvswitch.sh b/tools/testing/selftests/net/openvswitch/openvswitch.sh index 15bca0708717..cc0bfae2bafa 100755 --- a/tools/testing/selftests/net/openvswitch/openvswitch.sh +++ b/tools/testing/selftests/net/openvswitch/openvswitch.sh @@ -11,6 +11,11 @@ ksft_skip=4 PAUSE_ON_FAIL=no VERBOSE=0 TRACING=0 +WAIT_TIMEOUT=5 + +if test "X$KSFT_MACHINE_SLOW" == "Xyes"; then + WAIT_TIMEOUT=10 +fi tests=" arp_ping eth-arp: Basic arp ping between two NS @@ -20,10 +25,37 @@ tests=" nat_related_v4 ip4-nat-related: ICMP related matches work with SNAT netlink_checks ovsnl: validate netlink attrs and settings upcall_interfaces ovs: test the upcall interfaces - drop_reason drop: test drop reasons are emitted" + drop_reason drop: test drop reasons are emitted + psample psample: Sampling packets with psample" info() { - [ $VERBOSE = 0 ] || echo $* + [ "${ovs_dir}" != "" ] && + echo "`date +"[%m-%d %H:%M:%S]"` $*" >> ${ovs_dir}/debug.log + [ $VERBOSE = 0 ] || echo $* +} + +ovs_wait() { + info "waiting $WAIT_TIMEOUT s for: $@" + + if "$@" ; then + info "wait succeeded immediately" + return 0 + fi + + # A quick re-check helps speed up small races in fast systems. + # However, fractional sleeps might not necessarily work. + local start=0 + sleep 0.1 || { sleep 1; start=1; } + + for (( i=start; i> ${ovs_dir}/debug.log) + (ovs_setenv $1; shift; + info "run cmd: $@"; "$@" >> ${ovs_dir}/debug.log) else ovs_setenv $1 fi @@ -102,12 +135,21 @@ ovs_netns_spawn_daemon() { shift netns=$1 shift - info "spawning cmd: $*" - ip netns exec $netns $* >> $ovs_dir/stdout 2>> $ovs_dir/stderr & + if [ "$netns" == "_default" ]; then + $* >> $ovs_dir/stdout 2>> $ovs_dir/stderr & + else + ip netns exec $netns $* >> $ovs_dir/stdout 2>> $ovs_dir/stderr & + fi pid=$! ovs_sbx "$sbx" on_exit "kill -TERM $pid 2>/dev/null" } +ovs_spawn_daemon() { + sbx=$1 + shift + ovs_netns_spawn_daemon $sbx "_default" $* +} + ovs_add_netns_and_veths () { info "Adding netns attached: sbx:$1 dp:$2 {$3, $4, $5}" ovs_sbx "$1" ip netns add "$3" || return 1 @@ -139,7 +181,7 @@ ovs_add_flow () { info "Adding flow to DP: sbx:$1 br:$2 flow:$3 act:$4" ovs_sbx "$1" python3 $ovs_base/ovs-dpctl.py add-flow "$2" "$3" "$4" if [ $? -ne 0 ]; then - echo "Flow [ $3 : $4 ] failed" >> ${ovs_dir}/debug.log + info "Flow [ $3 : $4 ] failed" return 1 fi return 0 @@ -170,6 +212,19 @@ ovs_drop_reason_count() return `echo "$perf_output" | grep "$pattern" | wc -l` } +ovs_test_flow_fails () { + ERR_MSG="Flow actions may not be safe on all matching packets" + + PRE_TEST=$(dmesg | grep -c "${ERR_MSG}") + ovs_add_flow $@ &> /dev/null $@ && return 1 + POST_TEST=$(dmesg | grep -c "${ERR_MSG}") + + if [ "$PRE_TEST" == "$POST_TEST" ]; then + return 1 + fi + return 0 +} + usage() { echo echo "$0 [OPTIONS] [TEST]..." @@ -184,6 +239,91 @@ usage() { exit 1 } + +# psample test +# - use psample to observe packets +test_psample() { + sbx_add "test_psample" || return $? + + # Add a datapath with per-vport dispatching. + ovs_add_dp "test_psample" psample -V 2:1 || return 1 + + info "create namespaces" + ovs_add_netns_and_veths "test_psample" "psample" \ + client c0 c1 172.31.110.10/24 -u || return 1 + ovs_add_netns_and_veths "test_psample" "psample" \ + server s0 s1 172.31.110.20/24 -u || return 1 + + # Check if psample actions can be configured. + ovs_add_flow "test_psample" psample \ + 'in_port(1),eth(),eth_type(0x0806),arp()' 'psample(group=1)' &> /dev/null + if [ $? == 1 ]; then + info "no support for psample - skipping" + ovs_exit_sig + return $ksft_skip + fi + + ovs_del_flows "test_psample" psample + + # Test action verification. + OLDIFS=$IFS + IFS='*' + min_key='in_port(1),eth(),eth_type(0x0800),ipv4()' + for testcase in \ + "cookie to large"*"psample(group=1,cookie=1615141312111009080706050403020100)" \ + "no group with cookie"*"psample(cookie=abcd)" \ + "no group"*"psample()"; + do + set -- $testcase; + ovs_test_flow_fails "test_psample" psample $min_key $2 + if [ $? == 1 ]; then + info "failed - $1" + return 1 + fi + done + IFS=$OLDIFS + + ovs_del_flows "test_psample" psample + # Allow ARP + ovs_add_flow "test_psample" psample \ + 'in_port(1),eth(),eth_type(0x0806),arp()' '2' || return 1 + ovs_add_flow "test_psample" psample \ + 'in_port(2),eth(),eth_type(0x0806),arp()' '1' || return 1 + + # Sample first 14 bytes of all traffic. + ovs_add_flow "test_psample" psample \ + "in_port(1),eth(),eth_type(0x0800),ipv4()" \ + "trunc(14),psample(group=1,cookie=c0ffee),2" + + # Sample all traffic. In this case, use a sample() action with both + # psample and an upcall emulating simultaneous local sampling and + # sFlow / IPFIX. + nlpid=$(grep -E "listening on upcall packet handler" \ + $ovs_dir/s0.out | cut -d ":" -f 2 | tr -d ' ') + + ovs_add_flow "test_psample" psample \ + "in_port(2),eth(),eth_type(0x0800),ipv4()" \ + "sample(sample=100%,actions(psample(group=2,cookie=eeff0c),userspace(pid=${nlpid},userdata=eeff0c))),1" + + # Record psample data. + ovs_spawn_daemon "test_psample" python3 $ovs_base/ovs-dpctl.py psample-events + ovs_wait grep -q "listening for psample events" ${ovs_dir}/stdout + + # Send a single ping. + ovs_sbx "test_psample" ip netns exec client ping -I c1 172.31.110.20 -c 1 || return 1 + + # We should have received one userspace action upcall and 2 psample packets. + ovs_wait grep -q "userspace action command" $ovs_dir/s0.out || return 1 + + # client -> server samples should only contain the first 14 bytes of the packet. + ovs_wait grep -qE "rate:4294967295,group:1,cookie:c0ffee data:[0-9a-f]{28}$" \ + $ovs_dir/stdout || return 1 + + ovs_wait grep -q "rate:4294967295,group:2,cookie:eeff0c" $ovs_dir/stdout || return 1 + + return 0 +} + # drop_reason test # - drop packets and verify the right drop reason is reported test_drop_reason() { @@ -599,7 +739,8 @@ test_upcall_interfaces() { ovs_add_netns_and_veths "test_upcall_interfaces" ui0 upc left0 l0 \ 172.31.110.1/24 -u || return 1 - sleep 1 + ovs_wait grep -q "listening on upcall packet handler" ${ovs_dir}/left0.out + info "sending arping" ip netns exec upc arping -I l0 172.31.110.20 -c 1 \ >$ovs_dir/arping.stdout 2>$ovs_dir/arping.stderr @@ -613,16 +754,20 @@ run_test() { tname="$1" tdesc="$2" - if ! lsmod | grep openvswitch >/dev/null 2>&1; then - stdbuf -o0 printf "TEST: %-60s [NOMOD]\n" "${tdesc}" - return $ksft_skip - fi - if python3 ovs-dpctl.py -h 2>&1 | \ grep -E "Need to (install|upgrade) the python" >/dev/null 2>&1; then stdbuf -o0 printf "TEST: %-60s [PYLIB]\n" "${tdesc}" return $ksft_skip fi + + python3 ovs-dpctl.py show >/dev/null 2>&1 || \ + echo "[DPCTL] show exception." + + if ! lsmod | grep openvswitch >/dev/null 2>&1; then + stdbuf -o0 printf "TEST: %-60s [NOMOD]\n" "${tdesc}" + return $ksft_skip + fi + printf "TEST: %-60s [START]\n" "${tname}" unset IFS diff --git a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py index 9f8dec2f6539..8a0396bfaf99 100644 --- a/tools/testing/selftests/net/openvswitch/ovs-dpctl.py +++ b/tools/testing/selftests/net/openvswitch/ovs-dpctl.py @@ -8,8 +8,10 @@ import argparse import errno import ipaddress import logging +import math import multiprocessing import re +import socket import struct import sys import time @@ -26,13 +28,16 @@ try: from pyroute2.netlink import genlmsg from pyroute2.netlink import nla from pyroute2.netlink import nlmsg_atoms + from pyroute2.netlink.event import EventSocket from pyroute2.netlink.exceptions import NetlinkError from pyroute2.netlink.generic import GenericNetlinkSocket + from pyroute2.netlink.nlsocket import Marshal import pyroute2 + import pyroute2.iproute except ModuleNotFoundError: print("Need to install the python pyroute2 package >= 0.6.") - sys.exit(0) + sys.exit(1) OVS_DATAPATH_FAMILY = "ovs_datapath" @@ -58,6 +63,7 @@ OVS_FLOW_CMD_DEL = 2 OVS_FLOW_CMD_GET = 3 OVS_FLOW_CMD_SET = 4 +UINT32_MAX = 0xFFFFFFFF def macstr(mac): outstr = ":".join(["%02X" % i for i in mac]) @@ -198,6 +204,18 @@ def convert_ipv4(data): return int(ipaddress.IPv4Address(ip)), int(ipaddress.IPv4Address(mask)) +def convert_ipv6(data): + ip, _, mask = data.partition('/') + + if not ip: + ip = mask = 0 + elif not mask: + mask = 'ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff' + elif mask.isdigit(): + mask = ipaddress.IPv6Network("::/" + mask).hostmask + + return ipaddress.IPv6Address(ip).packed, ipaddress.IPv6Address(mask).packed + def convert_int(size): def convert_int_sized(data): value, _, mask = data.partition('/') @@ -267,6 +285,75 @@ def parse_extract_field( return str_skipped, data +def parse_attrs(actstr, attr_desc): + """Parses the given action string and returns a list of netlink + attributes based on a list of attribute descriptions. + + Each element in the attribute description list is a tuple such as: + (name, attr_name, parse_func) + where: + name: is the string representing the attribute + attr_name: is the name of the attribute as defined in the uAPI. + parse_func: is a callable accepting a string and returning either + a single object (the parsed attribute value) or a tuple of + two values (the parsed attribute value and the remaining string) + + Returns a list of attributes and the remaining string. + """ + def parse_attr(actstr, key, func): + actstr = actstr[len(key) :] + + if not func: + return None, actstr + + delim = actstr[0] + actstr = actstr[1:] + + if delim == "=": + pos = strcspn(actstr, ",)") + ret = func(actstr[:pos]) + else: + ret = func(actstr) + + if isinstance(ret, tuple): + (datum, actstr) = ret + else: + datum = ret + actstr = actstr[strcspn(actstr, ",)"):] + + if delim == "(": + if not actstr or actstr[0] != ")": + raise ValueError("Action contains unbalanced parentheses") + + actstr = actstr[1:] + + actstr = actstr[strspn(actstr, ", ") :] + + return datum, actstr + + attrs = [] + attr_desc = list(attr_desc) + while actstr and actstr[0] != ")" and attr_desc: + found = False + for i, (key, attr, func) in enumerate(attr_desc): + if actstr.startswith(key): + datum, actstr = parse_attr(actstr, key, func) + attrs.append([attr, datum]) + found = True + del attr_desc[i] + + if not found: + raise ValueError("Unknown attribute: '%s'" % actstr) + + actstr = actstr[strspn(actstr, ", ") :] + + if actstr[0] != ")": + raise ValueError("Action string contains extra garbage or has " + "unbalanced parenthesis: '%s'" % actstr) + + return attrs, actstr[1:] + + class ovs_dp_msg(genlmsg): # include the OVS version # We need a custom header rather than just being able to rely on @@ -282,15 +369,15 @@ class ovsactions(nla): ("OVS_ACTION_ATTR_UNSPEC", "none"), ("OVS_ACTION_ATTR_OUTPUT", "uint32"), ("OVS_ACTION_ATTR_USERSPACE", "userspace"), - ("OVS_ACTION_ATTR_SET", "none"), + ("OVS_ACTION_ATTR_SET", "ovskey"), ("OVS_ACTION_ATTR_PUSH_VLAN", "none"), ("OVS_ACTION_ATTR_POP_VLAN", "flag"), - ("OVS_ACTION_ATTR_SAMPLE", "none"), + ("OVS_ACTION_ATTR_SAMPLE", "sample"), ("OVS_ACTION_ATTR_RECIRC", "uint32"), ("OVS_ACTION_ATTR_HASH", "none"), ("OVS_ACTION_ATTR_PUSH_MPLS", "none"), ("OVS_ACTION_ATTR_POP_MPLS", "flag"), - ("OVS_ACTION_ATTR_SET_MASKED", "none"), + ("OVS_ACTION_ATTR_SET_MASKED", "ovskey"), ("OVS_ACTION_ATTR_CT", "ctact"), ("OVS_ACTION_ATTR_TRUNC", "uint32"), ("OVS_ACTION_ATTR_PUSH_ETH", "none"), @@ -304,8 +391,85 @@ class ovsactions(nla): ("OVS_ACTION_ATTR_ADD_MPLS", "none"), ("OVS_ACTION_ATTR_DEC_TTL", "none"), ("OVS_ACTION_ATTR_DROP", "uint32"), + ("OVS_ACTION_ATTR_PSAMPLE", "psample"), ) + class psample(nla): + nla_flags = NLA_F_NESTED + + nla_map = ( + ("OVS_PSAMPLE_ATTR_UNSPEC", "none"), + ("OVS_PSAMPLE_ATTR_GROUP", "uint32"), + ("OVS_PSAMPLE_ATTR_COOKIE", "array(uint8)"), + ) + + def dpstr(self, more=False): + args = "group=%d" % self.get_attr("OVS_PSAMPLE_ATTR_GROUP") + + cookie = self.get_attr("OVS_PSAMPLE_ATTR_COOKIE") + if cookie: + args += ",cookie(%s)" % \ + "".join(format(x, "02x") for x in cookie) + + return "psample(%s)" % args + + def parse(self, actstr): + desc = ( + ("group", "OVS_PSAMPLE_ATTR_GROUP", int), + ("cookie", "OVS_PSAMPLE_ATTR_COOKIE", + lambda x: list(bytearray.fromhex(x))) + ) + + attrs, actstr = parse_attrs(actstr, desc) + + for attr in attrs: + self["attrs"].append(attr) + + return actstr + + class sample(nla): + nla_flags = NLA_F_NESTED + + nla_map = ( + ("OVS_SAMPLE_ATTR_UNSPEC", "none"), + ("OVS_SAMPLE_ATTR_PROBABILITY", "uint32"), + ("OVS_SAMPLE_ATTR_ACTIONS", "ovsactions"), + ) + + def dpstr(self, more=False): + args = [] + + args.append("sample={:.2f}%".format( + 100 * self.get_attr("OVS_SAMPLE_ATTR_PROBABILITY") / + UINT32_MAX)) + + actions = self.get_attr("OVS_SAMPLE_ATTR_ACTIONS") + if actions: + args.append("actions(%s)" % actions.dpstr(more)) + + return "sample(%s)" % ",".join(args) + + def parse(self, actstr): + def parse_nested_actions(actstr): + subacts = ovsactions() + parsed_len = subacts.parse(actstr) + return subacts, actstr[parsed_len :] + + def percent_to_rate(percent): + percent = float(percent.strip('%')) + return int(math.floor(UINT32_MAX * (percent / 100.0) + .5)) + + desc = ( + ("sample", "OVS_SAMPLE_ATTR_PROBABILITY", percent_to_rate), + ("actions", "OVS_SAMPLE_ATTR_ACTIONS", parse_nested_actions), + ) + attrs, actstr = parse_attrs(actstr, desc) + + for attr in attrs: + self["attrs"].append(attr) + + return actstr + class ctact(nla): nla_flags = NLA_F_NESTED @@ -427,50 +591,77 @@ class ovsactions(nla): print_str += "userdata=" for f in self.get_attr("OVS_USERSPACE_ATTR_USERDATA"): print_str += "%x." % f - if self.get_attr("OVS_USERSPACE_ATTR_TUN_PORT") is not None: + if self.get_attr("OVS_USERSPACE_ATTR_EGRESS_TUN_PORT") is not None: print_str += "egress_tun_port=%d" % self.get_attr( - "OVS_USERSPACE_ATTR_TUN_PORT" + "OVS_USERSPACE_ATTR_EGRESS_TUN_PORT" ) print_str += ")" return print_str + def parse(self, actstr): + attrs_desc = ( + ("pid", "OVS_USERSPACE_ATTR_PID", int), + ("userdata", "OVS_USERSPACE_ATTR_USERDATA", + lambda x: list(bytearray.fromhex(x))), + ("egress_tun_port", "OVS_USERSPACE_ATTR_EGRESS_TUN_PORT", int) + ) + + attrs, actstr = parse_attrs(actstr, attrs_desc) + for attr in attrs: + self["attrs"].append(attr) + + return actstr + def dpstr(self, more=False): print_str = "" - for field in self.nla_map: + for field in self["attrs"]: if field[1] == "none" or self.get_attr(field[0]) is None: continue if print_str != "": print_str += "," - if field[1] == "uint32": - if field[0] == "OVS_ACTION_ATTR_OUTPUT": - print_str += "%d" % int(self.get_attr(field[0])) - elif field[0] == "OVS_ACTION_ATTR_RECIRC": - print_str += "recirc(0x%x)" % int(self.get_attr(field[0])) - elif field[0] == "OVS_ACTION_ATTR_TRUNC": - print_str += "trunc(%d)" % int(self.get_attr(field[0])) - elif field[0] == "OVS_ACTION_ATTR_DROP": - print_str += "drop(%d)" % int(self.get_attr(field[0])) - elif field[1] == "flag": - if field[0] == "OVS_ACTION_ATTR_CT_CLEAR": - print_str += "ct_clear" - elif field[0] == "OVS_ACTION_ATTR_POP_VLAN": - print_str += "pop_vlan" - elif field[0] == "OVS_ACTION_ATTR_POP_ETH": - print_str += "pop_eth" - elif field[0] == "OVS_ACTION_ATTR_POP_NSH": - print_str += "pop_nsh" - elif field[0] == "OVS_ACTION_ATTR_POP_MPLS": - print_str += "pop_mpls" + if field[0] == "OVS_ACTION_ATTR_OUTPUT": + print_str += "%d" % int(self.get_attr(field[0])) + elif field[0] == "OVS_ACTION_ATTR_RECIRC": + print_str += "recirc(0x%x)" % int(self.get_attr(field[0])) + elif field[0] == "OVS_ACTION_ATTR_TRUNC": + print_str += "trunc(%d)" % int(self.get_attr(field[0])) + elif field[0] == "OVS_ACTION_ATTR_DROP": + print_str += "drop(%d)" % int(self.get_attr(field[0])) + elif field[0] == "OVS_ACTION_ATTR_CT_CLEAR": + print_str += "ct_clear" + elif field[0] == "OVS_ACTION_ATTR_POP_VLAN": + print_str += "pop_vlan" + elif field[0] == "OVS_ACTION_ATTR_POP_ETH": + print_str += "pop_eth" + elif field[0] == "OVS_ACTION_ATTR_POP_NSH": + print_str += "pop_nsh" + elif field[0] == "OVS_ACTION_ATTR_POP_MPLS": + print_str += "pop_mpls" else: datum = self.get_attr(field[0]) if field[0] == "OVS_ACTION_ATTR_CLONE": print_str += "clone(" print_str += datum.dpstr(more) print_str += ")" + elif field[0] == "OVS_ACTION_ATTR_SET" or \ + field[0] == "OVS_ACTION_ATTR_SET_MASKED": + print_str += "set" + field = datum + mask = None + if field[0] == "OVS_ACTION_ATTR_SET_MASKED": + print_str += "_masked" + field = datum[0] + mask = datum[1] + print_str += "(" + print_str += field.dpstr(mask, more) + print_str += ")" else: - print_str += datum.dpstr(more) + try: + print_str += datum.dpstr(more) + except: + print_str += "{ATTR: %s not decoded}" % field[0] return print_str @@ -544,6 +735,25 @@ class ovsactions(nla): self["attrs"].append(("OVS_ACTION_ATTR_CLONE", subacts)) actstr = actstr[parsedLen:] parsed = True + elif parse_starts_block(actstr, "set(", False): + parencount += 1 + k = ovskey() + actstr = actstr[len("set("):] + actstr = k.parse(actstr, None) + self["attrs"].append(("OVS_ACTION_ATTR_SET", k)) + if not actstr.startswith(")"): + actstr = ")" + actstr + parsed = True + elif parse_starts_block(actstr, "set_masked(", False): + parencount += 1 + k = ovskey() + m = ovskey() + actstr = actstr[len("set_masked("):] + actstr = k.parse(actstr, m) + self["attrs"].append(("OVS_ACTION_ATTR_SET_MASKED", [k, m])) + if not actstr.startswith(")"): + actstr = ")" + actstr + parsed = True elif parse_starts_block(actstr, "ct(", False): parencount += 1 actstr = actstr[len("ct(") :] @@ -637,6 +847,37 @@ class ovsactions(nla): self["attrs"].append(["OVS_ACTION_ATTR_CT", ctact]) parsed = True + elif parse_starts_block(actstr, "sample(", False): + sampleact = self.sample() + actstr = sampleact.parse(actstr[len("sample(") : ]) + self["attrs"].append(["OVS_ACTION_ATTR_SAMPLE", sampleact]) + parsed = True + + elif parse_starts_block(actstr, "psample(", False): + psampleact = self.psample() + actstr = psampleact.parse(actstr[len("psample(") : ]) + self["attrs"].append(["OVS_ACTION_ATTR_PSAMPLE", psampleact]) + parsed = True + + elif parse_starts_block(actstr, "userspace(", False): + uact = self.userspace() + actstr = uact.parse(actstr[len("userspace(") : ]) + self["attrs"].append(["OVS_ACTION_ATTR_USERSPACE", uact]) + parsed = True + + elif parse_starts_block(actstr, "trunc(", False): + parencount += 1 + actstr, val = parse_extract_field( + actstr, + "trunc(", + r"([0-9]+)", + int, + False, + None, + ) + self["attrs"].append(["OVS_ACTION_ATTR_TRUNC", val]) + parsed = True + actstr = actstr[strspn(actstr, ", ") :] while parencount > 0: parencount -= 1 @@ -675,7 +916,7 @@ class ovskey(nla): ("OVS_KEY_ATTR_ARP", "ovs_key_arp"), ("OVS_KEY_ATTR_ND", "ovs_key_nd"), ("OVS_KEY_ATTR_SKB_MARK", "uint32"), - ("OVS_KEY_ATTR_TUNNEL", "none"), + ("OVS_KEY_ATTR_TUNNEL", "ovs_key_tunnel"), ("OVS_KEY_ATTR_SCTP", "ovs_key_sctp"), ("OVS_KEY_ATTR_TCP_FLAGS", "be16"), ("OVS_KEY_ATTR_DP_HASH", "uint32"), @@ -907,21 +1148,21 @@ class ovskey(nla): "src", "src", lambda x: str(ipaddress.IPv6Address(x)), - lambda x: int.from_bytes(x, "big"), - lambda x: ipaddress.IPv6Address(x), + lambda x: ipaddress.IPv6Address(x).packed if x else 0, + convert_ipv6, ), ( "dst", "dst", lambda x: str(ipaddress.IPv6Address(x)), - lambda x: int.from_bytes(x, "big"), - lambda x: ipaddress.IPv6Address(x), + lambda x: ipaddress.IPv6Address(x).packed if x else 0, + convert_ipv6, ), - ("label", "label", "%d", int), - ("proto", "proto", "%d", int), - ("tclass", "tclass", "%d", int), - ("hlimit", "hlimit", "%d", int), - ("frag", "frag", "%d", int), + ("label", "label", "%d", lambda x: int(x) if x else 0), + ("proto", "proto", "%d", lambda x: int(x) if x else 0), + ("tclass", "tclass", "%d", lambda x: int(x) if x else 0), + ("hlimit", "hlimit", "%d", lambda x: int(x) if x else 0), + ("frag", "frag", "%d", lambda x: int(x) if x else 0), ) def __init__( @@ -1119,7 +1360,7 @@ class ovskey(nla): "target", "target", lambda x: str(ipaddress.IPv6Address(x)), - lambda x: int.from_bytes(x, "big"), + convert_ipv6, ), ("sll", "sll", macstr, lambda x: int.from_bytes(x, "big")), ("tll", "tll", macstr, lambda x: int.from_bytes(x, "big")), @@ -1204,13 +1445,13 @@ class ovskey(nla): "src", "src", lambda x: str(ipaddress.IPv6Address(x)), - lambda x: int.from_bytes(x, "big", convertmac), + convert_ipv6, ), ( "dst", "dst", lambda x: str(ipaddress.IPv6Address(x)), - lambda x: int.from_bytes(x, "big"), + convert_ipv6, ), ("tp_src", "tp_src", "%d", int), ("tp_dst", "tp_dst", "%d", int), @@ -1235,6 +1476,163 @@ class ovskey(nla): init=init, ) + class ovs_key_tunnel(nla): + nla_flags = NLA_F_NESTED + + nla_map = ( + ("OVS_TUNNEL_KEY_ATTR_ID", "be64"), + ("OVS_TUNNEL_KEY_ATTR_IPV4_SRC", "ipaddr"), + ("OVS_TUNNEL_KEY_ATTR_IPV4_DST", "ipaddr"), + ("OVS_TUNNEL_KEY_ATTR_TOS", "uint8"), + ("OVS_TUNNEL_KEY_ATTR_TTL", "uint8"), + ("OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT", "flag"), + ("OVS_TUNNEL_KEY_ATTR_CSUM", "flag"), + ("OVS_TUNNEL_KEY_ATTR_OAM", "flag"), + ("OVS_TUNNEL_KEY_ATTR_GENEVE_OPTS", "array(uint32)"), + ("OVS_TUNNEL_KEY_ATTR_TP_SRC", "be16"), + ("OVS_TUNNEL_KEY_ATTR_TP_DST", "be16"), + ("OVS_TUNNEL_KEY_ATTR_VXLAN_OPTS", "none"), + ("OVS_TUNNEL_KEY_ATTR_IPV6_SRC", "ipaddr"), + ("OVS_TUNNEL_KEY_ATTR_IPV6_DST", "ipaddr"), + ("OVS_TUNNEL_KEY_ATTR_PAD", "none"), + ("OVS_TUNNEL_KEY_ATTR_ERSPAN_OPTS", "none"), + ("OVS_TUNNEL_KEY_ATTR_IPV4_INFO_BRIDGE", "flag"), + ) + + def parse(self, flowstr, mask=None): + if not flowstr.startswith("tunnel("): + return None, None + + k = ovskey.ovs_key_tunnel() + if mask is not None: + mask = ovskey.ovs_key_tunnel() + + flowstr = flowstr[len("tunnel("):] + + v6_address = None + + fields = [ + ("tun_id=", r"(\d+)", int, "OVS_TUNNEL_KEY_ATTR_ID", + 0xffffffffffffffff, None, None), + + ("src=", r"([0-9a-fA-F\.]+)", str, + "OVS_TUNNEL_KEY_ATTR_IPV4_SRC", "255.255.255.255", "0.0.0.0", + False), + ("dst=", r"([0-9a-fA-F\.]+)", str, + "OVS_TUNNEL_KEY_ATTR_IPV4_DST", "255.255.255.255", "0.0.0.0", + False), + + ("ipv6_src=", r"([0-9a-fA-F:]+)", str, + "OVS_TUNNEL_KEY_ATTR_IPV6_SRC", + "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", "::", True), + ("ipv6_dst=", r"([0-9a-fA-F:]+)", str, + "OVS_TUNNEL_KEY_ATTR_IPV6_DST", + "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff", "::", True), + + ("tos=", r"(\d+)", int, "OVS_TUNNEL_KEY_ATTR_TOS", 255, 0, + None), + ("ttl=", r"(\d+)", int, "OVS_TUNNEL_KEY_ATTR_TTL", 255, 0, + None), + + ("tp_src=", r"(\d+)", int, "OVS_TUNNEL_KEY_ATTR_TP_SRC", + 65535, 0, None), + ("tp_dst=", r"(\d+)", int, "OVS_TUNNEL_KEY_ATTR_TP_DST", + 65535, 0, None), + ] + + forced_include = ["OVS_TUNNEL_KEY_ATTR_TTL"] + + for prefix, regex, typ, attr_name, mask_val, default_val, v46_flag in fields: + flowstr, value = parse_extract_field(flowstr, prefix, regex, typ, False) + if not attr_name: + raise Exception("Bad list value in tunnel fields") + + if value is None and attr_name in forced_include: + value = default_val + mask_val = default_val + + if value is not None: + if v46_flag is not None: + if v6_address is None: + v6_address = v46_flag + if v46_flag != v6_address: + raise ValueError("Cannot mix v6 and v4 addresses") + k["attrs"].append([attr_name, value]) + if mask is not None: + mask["attrs"].append([attr_name, mask_val]) + else: + if v46_flag is not None: + if v6_address is None or v46_flag != v6_address: + continue + if mask is not None: + mask["attrs"].append([attr_name, default_val]) + + if k["attrs"][0][0] != "OVS_TUNNEL_KEY_ATTR_ID": + raise ValueError("Needs a tunid set") + + if flowstr.startswith("flags("): + flowstr = flowstr[len("flags("):] + flagspos = flowstr.find(")") + flags = flowstr[:flagspos] + flowstr = flowstr[flagspos + 1:] + + flag_attrs = { + "df": "OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT", + "csum": "OVS_TUNNEL_KEY_ATTR_CSUM", + "oam": "OVS_TUNNEL_KEY_ATTR_OAM" + } + + for flag in flags.split("|"): + if flag in flag_attrs: + k["attrs"].append([flag_attrs[flag], True]) + if mask is not None: + mask["attrs"].append([flag_attrs[flag], True]) + + flowstr = flowstr[strspn(flowstr, ", ") :] + return flowstr, k, mask + + def dpstr(self, mask=None, more=False): + print_str = "tunnel(" + + flagsattrs = [] + for k in self["attrs"]: + noprint = False + if k[0] == "OVS_TUNNEL_KEY_ATTR_ID": + print_str += "tun_id=%d" % k[1] + elif k[0] == "OVS_TUNNEL_KEY_ATTR_IPV4_SRC": + print_str += "src=%s" % k[1] + elif k[0] == "OVS_TUNNEL_KEY_ATTR_IPV4_DST": + print_str += "dst=%s" % k[1] + elif k[0] == "OVS_TUNNEL_KEY_ATTR_IPV6_SRC": + print_str += "ipv6_src=%s" % k[1] + elif k[0] == "OVS_TUNNEL_KEY_ATTR_IPV6_DST": + print_str += "ipv6_dst=%s" % k[1] + elif k[0] == "OVS_TUNNEL_KEY_ATTR_TOS": + print_str += "tos=%d" % k[1] + elif k[0] == "OVS_TUNNEL_KEY_ATTR_TTL": + print_str += "ttl=%d" % k[1] + elif k[0] == "OVS_TUNNEL_KEY_ATTR_TP_SRC": + print_str += "tp_src=%d" % k[1] + elif k[0] == "OVS_TUNNEL_KEY_ATTR_TP_DST": + print_str += "tp_dst=%d" % k[1] + elif k[0] == "OVS_TUNNEL_KEY_ATTR_DONT_FRAGMENT": + noprint = True + flagsattrs.append("df") + elif k[0] == "OVS_TUNNEL_KEY_ATTR_CSUM": + noprint = True + flagsattrs.append("csum") + elif k[0] == "OVS_TUNNEL_KEY_ATTR_OAM": + noprint = True + flagsattrs.append("oam") + + if not noprint: + print_str += "," + + if len(flagsattrs): + print_str += "flags(" + "|".join(flagsattrs) + ")" + print_str += ")" + return print_str + class ovs_key_mpls(nla): fields = (("lse", ">I"),) @@ -1243,6 +1641,7 @@ class ovskey(nla): ("OVS_KEY_ATTR_PRIORITY", "skb_priority", intparse), ("OVS_KEY_ATTR_SKB_MARK", "skb_mark", intparse), ("OVS_KEY_ATTR_RECIRC_ID", "recirc_id", intparse), + ("OVS_KEY_ATTR_TUNNEL", "tunnel", ovskey.ovs_key_tunnel), ("OVS_KEY_ATTR_DP_HASH", "dp_hash", intparse), ("OVS_KEY_ATTR_CT_STATE", "ct_state", parse_ct_state), ("OVS_KEY_ATTR_CT_ZONE", "ct_zone", intparse), @@ -1309,7 +1708,7 @@ class ovskey(nla): mask["attrs"].append([field[0], m]) self["attrs"].append([field[0], k]) - flowstr = flowstr[strspn(flowstr, "),") :] + flowstr = flowstr[strspn(flowstr, "), ") :] return flowstr @@ -1345,6 +1744,13 @@ class ovskey(nla): lambda x: False, True, ), + ( + "OVS_KEY_ATTR_TUNNEL", + "tunnel", + None, + False, + False, + ), ( "OVS_KEY_ATTR_CT_STATE", "ct_state", @@ -1617,7 +2023,7 @@ class OvsVport(GenericNetlinkSocket): ("OVS_VPORT_ATTR_PORT_NO", "uint32"), ("OVS_VPORT_ATTR_TYPE", "uint32"), ("OVS_VPORT_ATTR_NAME", "asciiz"), - ("OVS_VPORT_ATTR_OPTIONS", "none"), + ("OVS_VPORT_ATTR_OPTIONS", "vportopts"), ("OVS_VPORT_ATTR_UPCALL_PID", "array(uint32)"), ("OVS_VPORT_ATTR_STATS", "vportstats"), ("OVS_VPORT_ATTR_PAD", "none"), @@ -1625,6 +2031,13 @@ class OvsVport(GenericNetlinkSocket): ("OVS_VPORT_ATTR_NETNSID", "uint32"), ) + class vportopts(nla): + nla_map = ( + ("OVS_TUNNEL_ATTR_UNSPEC", "none"), + ("OVS_TUNNEL_ATTR_DST_PORT", "uint16"), + ("OVS_TUNNEL_ATTR_EXTENSION", "none"), + ) + class vportstats(nla): fields = ( ("rx_packets", "=Q"), @@ -1693,7 +2106,7 @@ class OvsVport(GenericNetlinkSocket): raise ne return reply - def attach(self, dpindex, vport_ifname, ptype): + def attach(self, dpindex, vport_ifname, ptype, dport, lwt): msg = OvsVport.ovs_vport_msg() msg["cmd"] = OVS_VPORT_CMD_NEW @@ -1702,12 +2115,43 @@ class OvsVport(GenericNetlinkSocket): msg["dpifindex"] = dpindex port_type = OvsVport.str_to_type(ptype) - msg["attrs"].append(["OVS_VPORT_ATTR_TYPE", port_type]) msg["attrs"].append(["OVS_VPORT_ATTR_NAME", vport_ifname]) msg["attrs"].append( ["OVS_VPORT_ATTR_UPCALL_PID", [self.upcall_packet.epid]] ) + TUNNEL_DEFAULTS = [("geneve", 6081), + ("vxlan", 4789)] + + for tnl in TUNNEL_DEFAULTS: + if ptype == tnl[0]: + if not dport: + dport = tnl[1] + + if not lwt: + vportopt = OvsVport.ovs_vport_msg.vportopts() + vportopt["attrs"].append( + ["OVS_TUNNEL_ATTR_DST_PORT", socket.htons(dport)] + ) + msg["attrs"].append( + ["OVS_VPORT_ATTR_OPTIONS", vportopt] + ) + else: + port_type = OvsVport.OVS_VPORT_TYPE_NETDEV + ipr = pyroute2.iproute.IPRoute() + + if tnl[0] == "geneve": + ipr.link("add", ifname=vport_ifname, kind=tnl[0], + geneve_port=dport, + geneve_collect_metadata=True, + geneve_udp_zero_csum6_rx=1) + elif tnl[0] == "vxlan": + ipr.link("add", ifname=vport_ifname, kind=tnl[0], + vxlan_learning=0, vxlan_collect_metadata=1, + vxlan_udp_zero_csum6_rx=1, vxlan_port=dport) + break + msg["attrs"].append(["OVS_VPORT_ATTR_TYPE", port_type]) + try: reply = self.nlm_request( msg, msg_type=self.prid, msg_flags=NLM_F_REQUEST | NLM_F_ACK @@ -2018,10 +2462,71 @@ class OvsFlow(GenericNetlinkSocket): print("MISS upcall[%d/%s]: %s" % (seq, pktpres, keystr), flush=True) def execute(self, packetmsg): - print("userspace execute command") + print("userspace execute command", flush=True) def action(self, packetmsg): - print("userspace action command") + print("userspace action command", flush=True) + + +class psample_sample(genlmsg): + nla_map = ( + ("PSAMPLE_ATTR_IIFINDEX", "none"), + ("PSAMPLE_ATTR_OIFINDEX", "none"), + ("PSAMPLE_ATTR_ORIGSIZE", "none"), + ("PSAMPLE_ATTR_SAMPLE_GROUP", "uint32"), + ("PSAMPLE_ATTR_GROUP_SEQ", "none"), + ("PSAMPLE_ATTR_SAMPLE_RATE", "uint32"), + ("PSAMPLE_ATTR_DATA", "array(uint8)"), + ("PSAMPLE_ATTR_GROUP_REFCOUNT", "none"), + ("PSAMPLE_ATTR_TUNNEL", "none"), + ("PSAMPLE_ATTR_PAD", "none"), + ("PSAMPLE_ATTR_OUT_TC", "none"), + ("PSAMPLE_ATTR_OUT_TC_OCC", "none"), + ("PSAMPLE_ATTR_LATENCY", "none"), + ("PSAMPLE_ATTR_TIMESTAMP", "none"), + ("PSAMPLE_ATTR_PROTO", "none"), + ("PSAMPLE_ATTR_USER_COOKIE", "array(uint8)"), + ) + + def dpstr(self): + fields = [] + data = "" + for (attr, value) in self["attrs"]: + if attr == "PSAMPLE_ATTR_SAMPLE_GROUP": + fields.append("group:%d" % value) + if attr == "PSAMPLE_ATTR_SAMPLE_RATE": + fields.append("rate:%d" % value) + if attr == "PSAMPLE_ATTR_USER_COOKIE": + value = "".join(format(x, "02x") for x in value) + fields.append("cookie:%s" % value) + if attr == "PSAMPLE_ATTR_DATA" and len(value) > 0: + data = "data:%s" % "".join(format(x, "02x") for x in value) + + return ("%s %s" % (",".join(fields), data)).strip() + + +class psample_msg(Marshal): + PSAMPLE_CMD_SAMPLE = 0 + PSAMPLE_CMD_GET_GROUP = 1 + PSAMPLE_CMD_NEW_GROUP = 2 + PSAMPLE_CMD_DEL_GROUP = 3 + PSAMPLE_CMD_SET_FILTER = 4 + msg_map = {PSAMPLE_CMD_SAMPLE: psample_sample} + + +class PsampleEvent(EventSocket): + genl_family = "psample" + mcast_groups = ["packets"] + marshal_class = psample_msg + + def read_samples(self): + print("listening for psample events", flush=True) + while True: + try: + for msg in self.get(): + print(msg.dpstr(), flush=True) + except NetlinkError as ne: + raise ne def print_ovsdp_full(dp_lookup_rep, ifindex, ndb=NDB(), vpl=OvsVport()): @@ -2053,12 +2558,19 @@ def print_ovsdp_full(dp_lookup_rep, ifindex, ndb=NDB(), vpl=OvsVport()): for iface in ndb.interfaces: rep = vpl.info(iface.ifname, ifindex) if rep is not None: + opts = "" + vpo = rep.get_attr("OVS_VPORT_ATTR_OPTIONS") + if vpo: + dpo = vpo.get_attr("OVS_TUNNEL_ATTR_DST_PORT") + if dpo: + opts += " tnl-dport:%s" % socket.ntohs(dpo) print( - " port %d: %s (%s)" + " port %d: %s (%s%s)" % ( rep.get_attr("OVS_VPORT_ATTR_PORT_NO"), rep.get_attr("OVS_VPORT_ATTR_NAME"), OvsVport.type_to_str(rep.get_attr("OVS_VPORT_ATTR_TYPE")), + opts, ) ) @@ -2081,7 +2593,7 @@ def main(argv): help="Increment 'verbose' output counter.", default=0, ) - subparsers = parser.add_subparsers() + subparsers = parser.add_subparsers(dest="subcommand") showdpcmd = subparsers.add_parser("show") showdpcmd.add_argument( @@ -2120,12 +2632,30 @@ def main(argv): "--ptype", type=str, default="netdev", - choices=["netdev", "internal"], + choices=["netdev", "internal", "geneve", "vxlan"], help="Interface type (default netdev)", ) + addifcmd.add_argument( + "-p", + "--dport", + type=int, + default=0, + help="Destination port (0 for default)" + ) + addifcmd.add_argument( + "-l", + "--lwt", + type=bool, + default=True, + help="Use LWT infrastructure instead of vport (default true)." + ) delifcmd = subparsers.add_parser("del-if") delifcmd.add_argument("dpname", help="Datapath Name") delifcmd.add_argument("delif", help="Interface name for adding") + delifcmd.add_argument("-d", + "--dellink", + type=bool, default=False, + help="Delete the link as well.") dumpflcmd = subparsers.add_parser("dump-flows") dumpflcmd.add_argument("dumpdp", help="Datapath Name") @@ -2138,6 +2668,8 @@ def main(argv): delfscmd = subparsers.add_parser("del-flows") delfscmd.add_argument("flsbr", help="Datapath name") + subparsers.add_parser("psample-events") + args = parser.parse_args() if args.verbose > 0: @@ -2152,6 +2684,9 @@ def main(argv): sys.setrecursionlimit(100000) + if args.subcommand == "psample-events": + PsampleEvent().read_samples() + if hasattr(args, "showdp"): found = False for iface in ndb.interfaces: @@ -2186,7 +2721,8 @@ def main(argv): print("DP '%s' not found." % args.dpname) return 1 dpindex = rep["dpifindex"] - rep = ovsvp.attach(rep["dpifindex"], args.addif, args.ptype) + rep = ovsvp.attach(rep["dpifindex"], args.addif, args.ptype, + args.dport, args.lwt) msg = "vport '%s'" % args.addif if rep and rep["header"]["error"] is None: msg += " added." @@ -2207,6 +2743,9 @@ def main(argv): msg += " removed." else: msg += " failed to remove." + if args.dellink: + ipr = pyroute2.iproute.IPRoute() + ipr.link("del", index=ipr.link_lookup(ifname=args.delif)[0]) elif hasattr(args, "dumpdp"): rep = ovsdp.info(args.dumpdp, 0) if rep is None: diff --git a/tools/testing/selftests/net/openvswitch/settings b/tools/testing/selftests/net/openvswitch/settings new file mode 100644 index 000000000000..e2206265f67c --- /dev/null +++ b/tools/testing/selftests/net/openvswitch/settings @@ -0,0 +1 @@ +timeout=900 diff --git a/tools/testing/selftests/net/pmtu.sh b/tools/testing/selftests/net/pmtu.sh index cfc84958025a..5175c0c83a23 100755 --- a/tools/testing/selftests/net/pmtu.sh +++ b/tools/testing/selftests/net/pmtu.sh @@ -842,25 +842,97 @@ setup_bridge() { run_cmd ${ns_a} ip link set veth_A-C master br0 } -setup_ovs_vxlan_or_geneve() { +setup_ovs_via_internal_utility() { type="${1}" a_addr="${2}" b_addr="${3}" + dport="${4}" - if [ "${type}" = "vxlan" ]; then - opts="${opts} ttl 64 dstport 4789" - opts_b="local ${b_addr}" + run_cmd python3 ./openvswitch/ovs-dpctl.py add-if ovs_br0 ${type}_a -t ${type} || return 1 + + ports=$(python3 ./openvswitch/ovs-dpctl.py show) + br0_port=$(echo "$ports" | grep -E "\sovs_br0" | sed -e 's@port @@' | cut -d: -f1 | xargs) + type_a_port=$(echo "$ports" | grep ${type}_a | sed -e 's@port @@' | cut -d: -f1 | xargs) + veth_a_port=$(echo "$ports" | grep veth_A | sed -e 's@port @@' | cut -d: -f1 | xargs) + + v4_a_tun="${prefix4}.${a_r1}.1" + v4_b_tun="${prefix4}.${b_r1}.1" + + v6_a_tun="${prefix6}:${a_r1}::1" + v6_b_tun="${prefix6}:${b_r1}::1" + + if [ "${v4_a_tun}" = "${a_addr}" ]; then + run_cmd python3 ./openvswitch/ovs-dpctl.py add-flow ovs_br0 \ + "recirc_id(0),in_port(${veth_a_port}),eth(),eth_type(0x0800),ipv4()" \ + "set(tunnel(tun_id=1,dst=${v4_b_tun},ttl=64,tp_dst=${dport},flags(df|csum))),${type_a_port}" + run_cmd python3 ./openvswitch/ovs-dpctl.py add-flow ovs_br0 \ + "recirc_id(0),in_port(${veth_a_port}),eth(),eth_type(0x86dd),ipv6()" \ + "set(tunnel(tun_id=1,dst=${v4_b_tun},ttl=64,tp_dst=${dport},flags(df|csum))),${type_a_port}" + run_cmd python3 ./openvswitch/ovs-dpctl.py add-flow ovs_br0 \ + "recirc_id(0),tunnel(tun_id=1,src=${v4_b_tun},dst=${v4_a_tun}),in_port(${type_a_port}),eth(),eth_type(0x0800),ipv4()" \ + "${veth_a_port}" + run_cmd python3 ./openvswitch/ovs-dpctl.py add-flow ovs_br0 \ + "recirc_id(0),tunnel(tun_id=1,src=${v4_b_tun},dst=${v4_a_tun}),in_port(${type_a_port}),eth(),eth_type(0x86dd),ipv6()" \ + "${veth_a_port}" + run_cmd python3 ./openvswitch/ovs-dpctl.py add-flow ovs_br0 \ + "recirc_id(0),tunnel(tun_id=1,src=${v4_b_tun},dst=${v4_a_tun}),in_port(${type_a_port}),eth(),eth_type(0x0806),arp()" \ + "${veth_a_port}" + run_cmd python3 ./openvswitch/ovs-dpctl.py add-flow ovs_br0 \ + "recirc_id(0),in_port(${veth_a_port}),eth(),eth_type(0x0806),arp(sip=${veth4_c_addr},tip=${tunnel4_b_addr})" \ + "set(tunnel(tun_id=1,dst=${v4_b_tun},ttl=64,tp_dst=${dport},flags(df|csum))),${type_a_port}" + else + run_cmd python3 ./openvswitch/ovs-dpctl.py add-flow ovs_br0 \ + "recirc_id(0),in_port(${veth_a_port}),eth(),eth_type(0x0800),ipv4()" \ + "set(tunnel(tun_id=1,ipv6_dst=${v6_b_tun},ttl=64,tp_dst=${dport},flags(df|csum))),${type_a_port}" + run_cmd python3 ./openvswitch/ovs-dpctl.py add-flow ovs_br0 \ + "recirc_id(0),in_port(${veth_a_port}),eth(),eth_type(0x86dd),ipv6()" \ + "set(tunnel(tun_id=1,ipv6_dst=${v6_b_tun},ttl=64,tp_dst=${dport},flags(df|csum))),${type_a_port}" + run_cmd python3 ./openvswitch/ovs-dpctl.py add-flow ovs_br0 \ + "recirc_id(0),tunnel(tun_id=1,ipv6_src=${v6_b_tun},ipv6_dst=${v6_a_tun}),in_port(${type_a_port}),eth(),eth_type(0x0800),ipv4()" \ + "${veth_a_port}" + run_cmd python3 ./openvswitch/ovs-dpctl.py add-flow ovs_br0 \ + "recirc_id(0),tunnel(tun_id=1,ipv6_src=${v6_b_tun},ipv6_dst=${v6_a_tun}),in_port(${type_a_port}),eth(),eth_type(0x86dd),ipv6()" \ + "${veth_a_port}" + run_cmd python3 ./openvswitch/ovs-dpctl.py add-flow ovs_br0 \ + "recirc_id(0),tunnel(tun_id=1,ipv6_src=${v6_b_tun},ipv6_dst=${v6_a_tun}),in_port(${type_a_port}),eth(),eth_type(0x0806),arp()" \ + "${veth_a_port}" + run_cmd python3 ./openvswitch/ovs-dpctl.py add-flow ovs_br0 \ + "recirc_id(0),in_port(${veth_a_port}),eth(),eth_type(0x0806),arp(sip=${veth4_c_addr},tip=${tunnel4_b_addr})" \ + "set(tunnel(tun_id=1,ipv6_dst=${v6_b_tun},ttl=64,tp_dst=${dport},flags(df|csum))),${type_a_port}" fi +} + +setup_ovs_via_vswitchd() { + type="${1}" + b_addr="${2}" run_cmd ovs-vsctl add-port ovs_br0 ${type}_a -- \ set interface ${type}_a type=${type} \ options:remote_ip=${b_addr} options:key=1 options:csum=true || return 1 +} + +setup_ovs_vxlan_or_geneve() { + type="${1}" + a_addr="${2}" + b_addr="${3}" + dport="6081" + + if [ "${type}" = "vxlan" ]; then + dport="4789" + opts="${opts} ttl 64 dstport 4789" + opts_b="local ${b_addr}" + fi + + setup_ovs_via_internal_utility "${type}" "${a_addr}" "${b_addr}" \ + "${dport}" || \ + setup_ovs_via_vswitchd "${type}" "${b_addr}" || return 1 run_cmd ${ns_b} ip link add ${type}_b type ${type} id 1 ${opts_b} remote ${a_addr} ${opts} || return 1 run_cmd ${ns_b} ip addr add ${tunnel4_b_addr}/${tunnel4_mask} dev ${type}_b run_cmd ${ns_b} ip addr add ${tunnel6_b_addr}/${tunnel6_mask} dev ${type}_b + run_cmd ip link set ${type}_a up run_cmd ${ns_b} ip link set ${type}_b up } @@ -880,8 +952,24 @@ setup_ovs_vxlan6() { setup_ovs_vxlan_or_geneve vxlan ${prefix6}:${a_r1}::1 ${prefix6}:${b_r1}::1 } +setup_ovs_br_internal() { + run_cmd python3 ./openvswitch/ovs-dpctl.py add-dp ovs_br0 || \ + return 1 +} + +setup_ovs_br_vswitchd() { + run_cmd ovs-vsctl add-br ovs_br0 || return 1 +} + +setup_ovs_add_if() { + ifname="${1}" + run_cmd python3 ./openvswitch/ovs-dpctl.py add-if ovs_br0 \ + "${ifname}" || \ + run_cmd ovs-vsctl add-port ovs_br0 "${ifname}" +} + setup_ovs_bridge() { - run_cmd ovs-vsctl add-br ovs_br0 || return $ksft_skip + setup_ovs_br_internal || setup_ovs_br_vswitchd || return $ksft_skip run_cmd ip link set ovs_br0 up run_cmd ${ns_c} ip link add veth_C-A type veth peer name veth_A-C @@ -891,7 +979,7 @@ setup_ovs_bridge() { run_cmd ${ns_c} ip link set veth_C-A up run_cmd ${ns_c} ip addr add ${veth4_c_addr}/${veth4_mask} dev veth_C-A run_cmd ${ns_c} ip addr add ${veth6_c_addr}/${veth6_mask} dev veth_C-A - run_cmd ovs-vsctl add-port ovs_br0 veth_A-C + setup_ovs_add_if veth_A-C # Move veth_A-R1 to init run_cmd ${ns_a} ip link set veth_A-R1 netns 1 @@ -922,6 +1010,18 @@ trace() { sleep 1 } +cleanup_del_ovs_internal() { + # squelch the output of the del-if commands since it can be wordy + python3 ./openvswitch/ovs-dpctl.py del-if ovs_br0 -d true vxlan_a >/dev/null 2>&1 + python3 ./openvswitch/ovs-dpctl.py del-if ovs_br0 -d true geneve_a >/dev/null 2>&1 + python3 ./openvswitch/ovs-dpctl.py del-dp ovs_br0 >/dev/null 2>&1 +} + +cleanup_del_ovs_vswitchd() { + ovs-vsctl --if-exists del-port vxlan_a 2>/dev/null + ovs-vsctl --if-exists del-br ovs_br0 2>/dev/null +} + cleanup() { for pid in ${tcpdump_pids}; do kill ${pid} @@ -940,10 +1040,10 @@ cleanup() { cleanup_all_ns - ip link del veth_A-C 2>/dev/null - ip link del veth_A-R1 2>/dev/null - ovs-vsctl --if-exists del-port vxlan_a 2>/dev/null - ovs-vsctl --if-exists del-br ovs_br0 2>/dev/null + ip link del veth_A-C 2>/dev/null + ip link del veth_A-R1 2>/dev/null + cleanup_del_ovs_internal + cleanup_del_ovs_vswitchd rm -f "$tmpoutfile" } @@ -1397,6 +1497,12 @@ test_pmtu_ipvX_over_ovs_vxlanY_or_geneveY_exception() { outer_family=${3} ll_mtu=4000 + if [ "${type}" = "vxlan" ]; then + tun_a="vxlan_sys_4789" + elif [ "${type}" = "geneve" ]; then + tun_a="genev_sys_6081" + fi + if [ ${outer_family} -eq 4 ]; then setup namespaces routing ovs_bridge ovs_${type}4 || return $ksft_skip # IPv4 header UDP header VXLAN/GENEVE header Ethernet header @@ -1407,17 +1513,11 @@ test_pmtu_ipvX_over_ovs_vxlanY_or_geneveY_exception() { exp_mtu=$((${ll_mtu} - 40 - 8 - 8 - 14)) fi - if [ "${type}" = "vxlan" ]; then - tun_a="vxlan_sys_4789" - elif [ "${type}" = "geneve" ]; then - tun_a="genev_sys_6081" - fi - - trace "" "${tun_a}" "${ns_b}" ${type}_b \ - "" veth_A-R1 "${ns_r1}" veth_R1-A \ - "${ns_b}" veth_B-R1 "${ns_r1}" veth_R1-B \ - "" ovs_br0 "" veth-A-C \ - "${ns_c}" veth_C-A + trace "" ${type}_a "${ns_b}" ${type}_b \ + "" veth_A-R1 "${ns_r1}" veth_R1-A \ + "${ns_b}" veth_B-R1 "${ns_r1}" veth_R1-B \ + "" ovs_br0 "" veth-A_C \ + "${ns_c}" veth_C-A "" "${tun_a}" if [ ${family} -eq 4 ]; then ping=ping @@ -1436,8 +1536,9 @@ test_pmtu_ipvX_over_ovs_vxlanY_or_geneveY_exception() { mtu "${ns_b}" veth_B-R1 ${ll_mtu} mtu "${ns_r1}" veth_R1-B ${ll_mtu} - mtu "" ${tun_a} $((${ll_mtu} + 1000)) - mtu "${ns_b}" ${type}_b $((${ll_mtu} + 1000)) + mtu "" ${tun_a} $((${ll_mtu} + 1000)) 2>/dev/null || \ + mtu "" ${type}_a $((${ll_mtu} + 1000)) 2>/dev/null + mtu "${ns_b}" ${type}_b $((${ll_mtu} + 1000)) run_cmd ${ns_c} ${ping} -q -M want -i 0.1 -c 20 -s $((${ll_mtu} + 500)) ${dst} || return 1 diff --git a/tools/testing/selftests/net/tcp_ao/self-connect.c b/tools/testing/selftests/net/tcp_ao/self-connect.c index e154d9e198a9..a5698b0a3718 100644 --- a/tools/testing/selftests/net/tcp_ao/self-connect.c +++ b/tools/testing/selftests/net/tcp_ao/self-connect.c @@ -30,8 +30,6 @@ static void setup_lo_intf(const char *lo_intf) static void tcp_self_connect(const char *tst, unsigned int port, bool different_keyids, bool check_restore) { - uint64_t before_challenge_ack, after_challenge_ack; - uint64_t before_syn_challenge, after_syn_challenge; struct tcp_ao_counters before_ao, after_ao; uint64_t before_aogood, after_aogood; struct netstat *ns_before, *ns_after; @@ -62,8 +60,6 @@ static void tcp_self_connect(const char *tst, unsigned int port, ns_before = netstat_read(); before_aogood = netstat_get(ns_before, "TCPAOGood", NULL); - before_challenge_ack = netstat_get(ns_before, "TCPChallengeACK", NULL); - before_syn_challenge = netstat_get(ns_before, "TCPSYNChallenge", NULL); if (test_get_tcp_ao_counters(sk, &before_ao)) test_error("test_get_tcp_ao_counters()"); @@ -82,8 +78,6 @@ static void tcp_self_connect(const char *tst, unsigned int port, ns_after = netstat_read(); after_aogood = netstat_get(ns_after, "TCPAOGood", NULL); - after_challenge_ack = netstat_get(ns_after, "TCPChallengeACK", NULL); - after_syn_challenge = netstat_get(ns_after, "TCPSYNChallenge", NULL); if (test_get_tcp_ao_counters(sk, &after_ao)) test_error("test_get_tcp_ao_counters()"); if (!check_restore) { @@ -98,18 +92,6 @@ static void tcp_self_connect(const char *tst, unsigned int port, close(sk); return; } - if (after_challenge_ack <= before_challenge_ack || - after_syn_challenge <= before_syn_challenge) { - /* - * It's also meant to test simultaneous open, so check - * these counters as well. - */ - test_fail("%s: Didn't challenge SYN or ACK: %zu <= %zu OR %zu <= %zu", - tst, after_challenge_ack, before_challenge_ack, - after_syn_challenge, before_syn_challenge); - close(sk); - return; - } if (test_tcp_ao_counters_cmp(tst, &before_ao, &after_ao, TEST_CNT_GOOD)) { close(sk); diff --git a/tools/testing/selftests/net/udpgso.c b/tools/testing/selftests/net/udpgso.c index 85b3baa3f7f3..3e74cfa1a2bf 100644 --- a/tools/testing/selftests/net/udpgso.c +++ b/tools/testing/selftests/net/udpgso.c @@ -53,6 +53,7 @@ static bool cfg_do_ipv6; static bool cfg_do_connected; static bool cfg_do_connectionless; static bool cfg_do_msgmore; +static bool cfg_do_recv = true; static bool cfg_do_setsockopt; static int cfg_specific_test_id = -1; @@ -414,6 +415,9 @@ static void run_one(struct testcase *test, int fdt, int fdr, if (!sent) return; + if (!cfg_do_recv) + return; + if (test->gso_len) mss = test->gso_len; else @@ -464,8 +468,10 @@ static void run_test(struct sockaddr *addr, socklen_t alen) if (fdr == -1) error(1, errno, "socket r"); - if (bind(fdr, addr, alen)) - error(1, errno, "bind"); + if (cfg_do_recv) { + if (bind(fdr, addr, alen)) + error(1, errno, "bind"); + } /* Have tests fail quickly instead of hang */ if (setsockopt(fdr, SOL_SOCKET, SO_RCVTIMEO, &tv, sizeof(tv))) @@ -524,7 +530,7 @@ static void parse_opts(int argc, char **argv) { int c; - while ((c = getopt(argc, argv, "46cCmst:")) != -1) { + while ((c = getopt(argc, argv, "46cCmRst:")) != -1) { switch (c) { case '4': cfg_do_ipv4 = true; @@ -541,6 +547,9 @@ static void parse_opts(int argc, char **argv) case 'm': cfg_do_msgmore = true; break; + case 'R': + cfg_do_recv = false; + break; case 's': cfg_do_setsockopt = true; break; diff --git a/tools/testing/selftests/net/udpgso.sh b/tools/testing/selftests/net/udpgso.sh index 6c63178086b0..85d1fa3c1ff7 100755 --- a/tools/testing/selftests/net/udpgso.sh +++ b/tools/testing/selftests/net/udpgso.sh @@ -27,6 +27,31 @@ test_route_mtu() { ip route add local fd00::1/128 table local dev lo mtu 1500 } +setup_dummy_sink() { + ip link add name sink mtu 1500 type dummy + ip addr add dev sink 10.0.0.0/24 + ip addr add dev sink fd00::2/64 nodad + ip link set dev sink up +} + +test_hw_gso_hw_csum() { + setup_dummy_sink + ethtool -K sink tx-checksum-ip-generic on >/dev/null + ethtool -K sink tx-udp-segmentation on >/dev/null +} + +test_sw_gso_hw_csum() { + setup_dummy_sink + ethtool -K sink tx-checksum-ip-generic on >/dev/null + ethtool -K sink tx-udp-segmentation off >/dev/null +} + +test_sw_gso_sw_csum() { + setup_dummy_sink + ethtool -K sink tx-checksum-ip-generic off >/dev/null + ethtool -K sink tx-udp-segmentation off >/dev/null +} + if [ "$#" -gt 0 ]; then "$1" shift 2 # pop "test_*" arg and "--" delimiter @@ -56,3 +81,21 @@ echo "ipv4 msg_more" echo "ipv6 msg_more" ./in_netns.sh "$0" test_dev_mtu -- ./udpgso -6 -C -m + +echo "ipv4 hw-gso hw-csum" +./in_netns.sh "$0" test_hw_gso_hw_csum -- ./udpgso -4 -C -R + +echo "ipv6 hw-gso hw-csum" +./in_netns.sh "$0" test_hw_gso_hw_csum -- ./udpgso -6 -C -R + +echo "ipv4 sw-gso hw-csum" +./in_netns.sh "$0" test_sw_gso_hw_csum -- ./udpgso -4 -C -R + +echo "ipv6 sw-gso hw-csum" +./in_netns.sh "$0" test_sw_gso_hw_csum -- ./udpgso -6 -C -R + +echo "ipv4 sw-gso sw-csum" +./in_netns.sh "$0" test_sw_gso_sw_csum -- ./udpgso -4 -C -R + +echo "ipv6 sw-gso sw-csum" +./in_netns.sh "$0" test_sw_gso_sw_csum -- ./udpgso -6 -C -R diff --git a/tools/testing/selftests/net/vrf_route_leaking.sh b/tools/testing/selftests/net/vrf_route_leaking.sh index 2da32f4c479b..152171fb1fc8 100755 --- a/tools/testing/selftests/net/vrf_route_leaking.sh +++ b/tools/testing/selftests/net/vrf_route_leaking.sh @@ -59,6 +59,7 @@ # while it is forwarded between different vrfs. source lib.sh +PATH=$PWD:$PWD/tools/testing/selftests/net:$PATH VERBOSE=0 PAUSE_ON_FAIL=no DEFAULT_TTYPE=sym @@ -533,6 +534,86 @@ ipv6_ping_frag_asym() ipv6_ping_frag asym } +ipv4_ping_local() +{ + log_section "IPv4 (sym route): VRF ICMP local error route lookup ping" + + setup_sym + + check_connectivity || return + + run_cmd ip netns exec $r1 ip vrf exec blue ping -c1 -w1 ${H2_N2_IP} + log_test $? 0 "VRF ICMP local IPv4" +} + +ipv4_tcp_local() +{ + log_section "IPv4 (sym route): VRF tcp local connection" + + setup_sym + + check_connectivity || return + + run_cmd nettest -s -O "$h2" -l ${H2_N2_IP} -I eth0 -3 eth0 & + sleep 1 + run_cmd nettest -N "$r1" -d blue -r ${H2_N2_IP} + log_test $? 0 "VRF tcp local connection IPv4" +} + +ipv4_udp_local() +{ + log_section "IPv4 (sym route): VRF udp local connection" + + setup_sym + + check_connectivity || return + + run_cmd nettest -s -D -O "$h2" -l ${H2_N2_IP} -I eth0 -3 eth0 & + sleep 1 + run_cmd nettest -D -N "$r1" -d blue -r ${H2_N2_IP} + log_test $? 0 "VRF udp local connection IPv4" +} + +ipv6_ping_local() +{ + log_section "IPv6 (sym route): VRF ICMP local error route lookup ping" + + setup_sym + + check_connectivity6 || return + + run_cmd ip netns exec $r1 ip vrf exec blue ${ping6} -c1 -w1 ${H2_N2_IP6} + log_test $? 0 "VRF ICMP local IPv6" +} + +ipv6_tcp_local() +{ + log_section "IPv6 (sym route): VRF tcp local connection" + + setup_sym + + check_connectivity6 || return + + run_cmd nettest -s -6 -O "$h2" -l ${H2_N2_IP6} -I eth0 -3 eth0 & + sleep 1 + run_cmd nettest -6 -N "$r1" -d blue -r ${H2_N2_IP6} + log_test $? 0 "VRF tcp local connection IPv6" +} + +ipv6_udp_local() +{ + log_section "IPv6 (sym route): VRF udp local connection" + + setup_sym + + check_connectivity6 || return + + run_cmd nettest -s -6 -D -O "$h2" -l ${H2_N2_IP6} -I eth0 -3 eth0 & + sleep 1 + run_cmd nettest -6 -D -N "$r1" -d blue -r ${H2_N2_IP6} + log_test $? 0 "VRF udp local connection IPv6" +} + ################################################################################ # usage @@ -555,8 +636,10 @@ EOF # Some systems don't have a ping6 binary anymore command -v ping6 > /dev/null 2>&1 && ping6=$(command -v ping6) || ping6=$(command -v ping) -TESTS_IPV4="ipv4_ping_ttl ipv4_traceroute ipv4_ping_frag ipv4_ping_ttl_asym ipv4_traceroute_asym" -TESTS_IPV6="ipv6_ping_ttl ipv6_traceroute ipv6_ping_ttl_asym ipv6_traceroute_asym" +TESTS_IPV4="ipv4_ping_ttl ipv4_traceroute ipv4_ping_frag ipv4_ping_local ipv4_tcp_local +ipv4_udp_local ipv4_ping_ttl_asym ipv4_traceroute_asym" +TESTS_IPV6="ipv6_ping_ttl ipv6_traceroute ipv6_ping_local ipv6_tcp_local ipv6_udp_local +ipv6_ping_ttl_asym ipv6_traceroute_asym" ret=0 nsuccess=0 @@ -594,12 +677,18 @@ do ipv4_traceroute|traceroute) ipv4_traceroute;;& ipv4_traceroute_asym|traceroute) ipv4_traceroute_asym;;& ipv4_ping_frag|ping) ipv4_ping_frag;;& + ipv4_ping_local|ping) ipv4_ping_local;;& + ipv4_tcp_local) ipv4_tcp_local;;& + ipv4_udp_local) ipv4_udp_local;;& ipv6_ping_ttl|ping) ipv6_ping_ttl;;& ipv6_ping_ttl_asym|ping) ipv6_ping_ttl_asym;;& ipv6_traceroute|traceroute) ipv6_traceroute;;& ipv6_traceroute_asym|traceroute) ipv6_traceroute_asym;;& ipv6_ping_frag|ping) ipv6_ping_frag;;& + ipv6_ping_local|ping) ipv6_ping_local;;& + ipv6_tcp_local) ipv6_tcp_local;;& + ipv6_udp_local) ipv6_udp_local;;& # setup namespaces and config, but do not run any tests setup_sym|setup) setup_sym; exit 0;; diff --git a/tools/testing/selftests/net/ynl.mk b/tools/testing/selftests/net/ynl.mk new file mode 100644 index 000000000000..59cb26cf3f73 --- /dev/null +++ b/tools/testing/selftests/net/ynl.mk @@ -0,0 +1,21 @@ +# SPDX-License-Identifier: GPL-2.0 + +# YNL selftest build snippet + +# Inputs: +# +# YNL_GENS: families we need in the selftests +# YNL_PROGS: TEST_PROGS which need YNL (TODO, none exist, yet) +# YNL_GEN_FILES: TEST_GEN_FILES which need YNL + +YNL_OUTPUTS := $(patsubst %,$(OUTPUT)/%,$(YNL_GEN_FILES)) + +$(YNL_OUTPUTS): $(OUTPUT)/libynl.a +$(YNL_OUTPUTS): CFLAGS += \ + -I$(top_srcdir)/usr/include/ $(KHDR_INCLUDES) \ + -I$(top_srcdir)/tools/net/ynl/lib/ \ + -I$(top_srcdir)/tools/net/ynl/generated/ + +$(OUTPUT)/libynl.a: + $(Q)$(MAKE) -C $(top_srcdir)/tools/net/ynl GENS="$(YNL_GENS)" libynl.a + $(Q)cp $(top_srcdir)/tools/net/ynl/libynl.a $(OUTPUT)/libynl.a diff --git a/tools/testing/vsock/Makefile b/tools/testing/vsock/Makefile index a7f56a09ca9f..6e0b4e95e230 100644 --- a/tools/testing/vsock/Makefile +++ b/tools/testing/vsock/Makefile @@ -13,3 +13,16 @@ CFLAGS += -g -O2 -Werror -Wall -I. -I../../include -I../../../usr/include -Wno-p clean: ${RM} *.o *.d vsock_test vsock_diag_test vsock_perf vsock_uring_test -include *.d + +VSOCK_INSTALL_PATH ?= + +install: all +ifdef VSOCK_INSTALL_PATH + mkdir -p $(VSOCK_INSTALL_PATH) + install -m 744 vsock_test $(VSOCK_INSTALL_PATH) + install -m 744 vsock_perf $(VSOCK_INSTALL_PATH) + install -m 744 vsock_diag_test $(VSOCK_INSTALL_PATH) + install -m 744 vsock_uring_test $(VSOCK_INSTALL_PATH) +else + $(error Error: set VSOCK_INSTALL_PATH to use install) +endif