Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Cross-merge networking fixes after downstream PR. Conflicts: net/sched/sch_taprio.c d636fc5dd692 ("net: sched: add rcu annotations around qdisc->qdisc_sleeping") dced11ef84fb ("net/sched: taprio: don't overwrite "sch" variable in taprio_dump_class_stats()") net/ipv4/sysctl_net_ipv4.c e209fee4118f ("net/ipv4: ping_group_range: allow GID from 2147483648 to 4294967294") ccce324dabfe ("tcp: make the first N SYN RTO backoffs linear") https://lore.kernel.org/all/20230605100816.08d41a7b@canb.auug.org.au/ No adjacent changes. Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
commit
449f6bc17a
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Lattice Slave SPI sysCONFIG FPGA manager
|
||||
|
||||
maintainers:
|
||||
- Ivan Bornyakov <i.bornyakov@metrotek.ru>
|
||||
- Vladimir Georgiev <v.georgiev@metrotek.ru>
|
||||
|
||||
description: |
|
||||
Lattice sysCONFIG port, which is used for FPGA configuration, among others,
|
||||
|
@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
|
||||
title: Microchip Polarfire FPGA manager.
|
||||
|
||||
maintainers:
|
||||
- Ivan Bornyakov <i.bornyakov@metrotek.ru>
|
||||
- Vladimir Georgiev <v.georgiev@metrotek.ru>
|
||||
|
||||
description:
|
||||
Device Tree Bindings for Microchip Polarfire FPGA Manager using slave SPI to
|
||||
|
@ -39,6 +39,12 @@ properties:
|
||||
power-domains:
|
||||
maxItems: 1
|
||||
|
||||
vref-supply:
|
||||
description: |
|
||||
External ADC reference voltage supply on VREFH pad. If VERID[MVI] is
|
||||
set, there are additional, internal reference voltages selectable.
|
||||
VREFH1 is always from VREFH pad.
|
||||
|
||||
"#io-channel-cells":
|
||||
const: 1
|
||||
|
||||
@ -72,6 +78,7 @@ examples:
|
||||
assigned-clocks = <&clk IMX_SC_R_ADC_0>;
|
||||
assigned-clock-rates = <24000000>;
|
||||
power-domains = <&pd IMX_SC_R_ADC_0>;
|
||||
vref-supply = <®_1v8>;
|
||||
#io-channel-cells = <1>;
|
||||
};
|
||||
};
|
||||
|
@ -90,7 +90,7 @@ patternProperties:
|
||||
of the MAX chips to the GyroADC, while MISO line of each Maxim
|
||||
ADC connects to a shared input pin of the GyroADC.
|
||||
enum:
|
||||
- adi,7476
|
||||
- adi,ad7476
|
||||
- fujitsu,mb88101a
|
||||
- maxim,max1162
|
||||
- maxim,max11100
|
||||
|
@ -70,6 +70,7 @@ properties:
|
||||
dsr-gpios: true
|
||||
rng-gpios: true
|
||||
dcd-gpios: true
|
||||
rs485-rts-active-high: true
|
||||
rts-gpio: true
|
||||
power-domains: true
|
||||
clock-frequency: true
|
||||
|
@ -287,7 +287,7 @@ properties:
|
||||
description:
|
||||
High-Speed PHY interface selection between UTMI+ and ULPI when the
|
||||
DWC_USB3_HSPHY_INTERFACE has value 3.
|
||||
$ref: /schemas/types.yaml#/definitions/uint8
|
||||
$ref: /schemas/types.yaml#/definitions/string
|
||||
enum: [utmi, ulpi]
|
||||
|
||||
snps,quirk-frame-length-adjustment:
|
||||
|
@ -52,3 +52,22 @@ Build kernel with:
|
||||
|
||||
Optionally, build kernel with PAGE_TABLE_CHECK_ENFORCED in order to have page
|
||||
table support without extra kernel parameter.
|
||||
|
||||
Implementation notes
|
||||
====================
|
||||
|
||||
We specifically decided not to use VMA information in order to avoid relying on
|
||||
MM states (except for limited "struct page" info). The page table check is a
|
||||
separate from Linux-MM state machine that verifies that the user accessible
|
||||
pages are not falsely shared.
|
||||
|
||||
PAGE_TABLE_CHECK depends on EXCLUSIVE_SYSTEM_RAM. The reason is that without
|
||||
EXCLUSIVE_SYSTEM_RAM, users are allowed to map arbitrary physical memory
|
||||
regions into the userspace via /dev/mem. At the same time, pages may change
|
||||
their properties (e.g., from anonymous pages to named pages) while they are
|
||||
still being mapped in the userspace, leading to "corruption" detected by the
|
||||
page table check.
|
||||
|
||||
Even with EXCLUSIVE_SYSTEM_RAM, I/O pages may be still allowed to be mapped via
|
||||
/dev/mem. However, these pages are always considered as named pages, so they
|
||||
won't break the logic used in the page table check.
|
||||
|
@ -223,7 +223,7 @@ attribute-sets:
|
||||
name: tx-min-frag-size
|
||||
type: u32
|
||||
-
|
||||
name: tx-min-frag-size
|
||||
name: rx-min-frag-size
|
||||
type: u32
|
||||
-
|
||||
name: verify-enabled
|
||||
@ -294,7 +294,7 @@ attribute-sets:
|
||||
name: master-slave-state
|
||||
type: u8
|
||||
-
|
||||
name: master-slave-lanes
|
||||
name: lanes
|
||||
type: u32
|
||||
-
|
||||
name: rate-matching
|
||||
@ -322,7 +322,7 @@ attribute-sets:
|
||||
name: ext-substate
|
||||
type: u8
|
||||
-
|
||||
name: down-cnt
|
||||
name: ext-down-cnt
|
||||
type: u32
|
||||
-
|
||||
name: debug
|
||||
@ -577,7 +577,7 @@ attribute-sets:
|
||||
name: phc-index
|
||||
type: u32
|
||||
-
|
||||
name: cable-test-nft-nest-result
|
||||
name: cable-test-ntf-nest-result
|
||||
attributes:
|
||||
-
|
||||
name: pair
|
||||
@ -586,7 +586,7 @@ attribute-sets:
|
||||
name: code
|
||||
type: u8
|
||||
-
|
||||
name: cable-test-nft-nest-fault-length
|
||||
name: cable-test-ntf-nest-fault-length
|
||||
attributes:
|
||||
-
|
||||
name: pair
|
||||
@ -595,16 +595,16 @@ attribute-sets:
|
||||
name: cm
|
||||
type: u32
|
||||
-
|
||||
name: cable-test-nft-nest
|
||||
name: cable-test-ntf-nest
|
||||
attributes:
|
||||
-
|
||||
name: result
|
||||
type: nest
|
||||
nested-attributes: cable-test-nft-nest-result
|
||||
nested-attributes: cable-test-ntf-nest-result
|
||||
-
|
||||
name: fault-length
|
||||
type: nest
|
||||
nested-attributes: cable-test-nft-nest-fault-length
|
||||
nested-attributes: cable-test-ntf-nest-fault-length
|
||||
-
|
||||
name: cable-test
|
||||
attributes:
|
||||
@ -618,7 +618,7 @@ attribute-sets:
|
||||
-
|
||||
name: nest
|
||||
type: nest
|
||||
nested-attributes: cable-test-nft-nest
|
||||
nested-attributes: cable-test-ntf-nest
|
||||
-
|
||||
name: cable-test-tdr-cfg
|
||||
attributes:
|
||||
@ -776,7 +776,7 @@ attribute-sets:
|
||||
name: hist-bkt-hi
|
||||
type: u32
|
||||
-
|
||||
name: hist-bkt-val
|
||||
name: hist-val
|
||||
type: u64
|
||||
-
|
||||
name: stats
|
||||
@ -965,7 +965,7 @@ operations:
|
||||
- duplex
|
||||
- master-slave-cfg
|
||||
- master-slave-state
|
||||
- master-slave-lanes
|
||||
- lanes
|
||||
- rate-matching
|
||||
dump: *linkmodes-get-op
|
||||
-
|
||||
@ -999,7 +999,7 @@ operations:
|
||||
- sqi-max
|
||||
- ext-state
|
||||
- ext-substate
|
||||
- down-cnt
|
||||
- ext-down-cnt
|
||||
dump: *linkstate-get-op
|
||||
-
|
||||
name: debug-get
|
||||
@ -1351,7 +1351,7 @@ operations:
|
||||
reply:
|
||||
attributes:
|
||||
- header
|
||||
- cable-test-nft-nest
|
||||
- cable-test-ntf-nest
|
||||
-
|
||||
name: cable-test-tdr-act
|
||||
doc: Cable test TDR.
|
||||
@ -1539,7 +1539,7 @@ operations:
|
||||
- hkey
|
||||
dump: *rss-get-op
|
||||
-
|
||||
name: plca-get
|
||||
name: plca-get-cfg
|
||||
doc: Get PLCA params.
|
||||
|
||||
attribute-set: plca
|
||||
@ -1561,7 +1561,7 @@ operations:
|
||||
- burst-tmr
|
||||
dump: *plca-get-op
|
||||
-
|
||||
name: plca-set
|
||||
name: plca-set-cfg
|
||||
doc: Set PLCA params.
|
||||
|
||||
attribute-set: plca
|
||||
@ -1585,7 +1585,7 @@ operations:
|
||||
-
|
||||
name: plca-ntf
|
||||
doc: Notification for change in PLCA params.
|
||||
notify: plca-get
|
||||
notify: plca-get-cfg
|
||||
-
|
||||
name: mm-get
|
||||
doc: Get MAC Merge configuration and state
|
||||
|
@ -1363,8 +1363,8 @@ ping_group_range - 2 INTEGERS
|
||||
Restrict ICMP_PROTO datagram sockets to users in the group range.
|
||||
The default is "1 0", meaning, that nobody (not even root) may
|
||||
create ping sockets. Setting it to "100 100" would grant permissions
|
||||
to the single group. "0 4294967295" would enable it for the world, "100
|
||||
4294967295" would enable it for the users, but not daemons.
|
||||
to the single group. "0 4294967294" would enable it for the world, "100
|
||||
4294967294" would enable it for the users, but not daemons.
|
||||
|
||||
tcp_early_demux - BOOLEAN
|
||||
Enable early demux for established TCP sockets.
|
||||
|
18
MAINTAINERS
18
MAINTAINERS
@ -1594,7 +1594,7 @@ F: drivers/media/i2c/ar0521.c
|
||||
|
||||
ARASAN NAND CONTROLLER DRIVER
|
||||
M: Miquel Raynal <miquel.raynal@bootlin.com>
|
||||
M: Naga Sureshkumar Relli <nagasure@xilinx.com>
|
||||
R: Michal Simek <michal.simek@amd.com>
|
||||
L: linux-mtd@lists.infradead.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/mtd/arasan,nand-controller.yaml
|
||||
@ -1757,7 +1757,7 @@ F: include/linux/amba/mmci.h
|
||||
|
||||
ARM PRIMECELL PL35X NAND CONTROLLER DRIVER
|
||||
M: Miquel Raynal <miquel.raynal@bootlin.com>
|
||||
M: Naga Sureshkumar Relli <nagasure@xilinx.com>
|
||||
R: Michal Simek <michal.simek@amd.com>
|
||||
L: linux-mtd@lists.infradead.org
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/mtd/arm,pl353-nand-r2p1.yaml
|
||||
@ -1765,7 +1765,7 @@ F: drivers/mtd/nand/raw/pl35x-nand-controller.c
|
||||
|
||||
ARM PRIMECELL PL35X SMC DRIVER
|
||||
M: Miquel Raynal <miquel.raynal@bootlin.com>
|
||||
M: Naga Sureshkumar Relli <nagasure@xilinx.com>
|
||||
R: Michal Simek <michal.simek@amd.com>
|
||||
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
|
||||
S: Maintained
|
||||
F: Documentation/devicetree/bindings/memory-controllers/arm,pl35x-smc.yaml
|
||||
@ -5721,6 +5721,14 @@ F: include/linux/tfrc.h
|
||||
F: include/uapi/linux/dccp.h
|
||||
F: net/dccp/
|
||||
|
||||
DEBUGOBJECTS:
|
||||
M: Thomas Gleixner <tglx@linutronix.de>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git core/debugobjects
|
||||
F: lib/debugobjects.c
|
||||
F: include/linux/debugobjects.h
|
||||
|
||||
DECSTATION PLATFORM SUPPORT
|
||||
M: "Maciej W. Rozycki" <macro@orcam.me.uk>
|
||||
L: linux-mips@vger.kernel.org
|
||||
@ -10114,7 +10122,7 @@ S: Maintained
|
||||
F: Documentation/process/kernel-docs.rst
|
||||
|
||||
INDUSTRY PACK SUBSYSTEM (IPACK)
|
||||
M: Samuel Iglesias Gonsalvez <siglesias@igalia.com>
|
||||
M: Vaibhav Gupta <vaibhavgupta40@gmail.com>
|
||||
M: Jens Taprogge <jens.taprogge@taprogge.org>
|
||||
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
|
||||
L: industrypack-devel@lists.sourceforge.net
|
||||
@ -13842,7 +13850,7 @@ F: drivers/tty/serial/8250/8250_pci1xxxx.c
|
||||
|
||||
MICROCHIP POLARFIRE FPGA DRIVERS
|
||||
M: Conor Dooley <conor.dooley@microchip.com>
|
||||
R: Ivan Bornyakov <i.bornyakov@metrotek.ru>
|
||||
R: Vladimir Georgiev <v.georgiev@metrotek.ru>
|
||||
L: linux-fpga@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/devicetree/bindings/fpga/microchip,mpf-spi-fpga-mgr.yaml
|
||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
||||
VERSION = 6
|
||||
PATCHLEVEL = 4
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc4
|
||||
EXTRAVERSION = -rc5
|
||||
NAME = Hurr durr I'ma ninja sloth
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -632,9 +632,9 @@ int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size);
|
||||
*
|
||||
* The walker will walk the page-table entries corresponding to the input
|
||||
* address range specified, visiting entries according to the walker flags.
|
||||
* Invalid entries are treated as leaf entries. Leaf entries are reloaded
|
||||
* after invoking the walker callback, allowing the walker to descend into
|
||||
* a newly installed table.
|
||||
* Invalid entries are treated as leaf entries. The visited page table entry is
|
||||
* reloaded after invoking the walker callback, allowing the walker to descend
|
||||
* into a newly installed table.
|
||||
*
|
||||
* Returning a negative error code from the walker callback function will
|
||||
* terminate the walk immediately with the same error code.
|
||||
|
@ -115,8 +115,14 @@
|
||||
#define SB_BARRIER_INSN __SYS_BARRIER_INSN(0, 7, 31)
|
||||
|
||||
#define SYS_DC_ISW sys_insn(1, 0, 7, 6, 2)
|
||||
#define SYS_DC_IGSW sys_insn(1, 0, 7, 6, 4)
|
||||
#define SYS_DC_IGDSW sys_insn(1, 0, 7, 6, 6)
|
||||
#define SYS_DC_CSW sys_insn(1, 0, 7, 10, 2)
|
||||
#define SYS_DC_CGSW sys_insn(1, 0, 7, 10, 4)
|
||||
#define SYS_DC_CGDSW sys_insn(1, 0, 7, 10, 6)
|
||||
#define SYS_DC_CISW sys_insn(1, 0, 7, 14, 2)
|
||||
#define SYS_DC_CIGSW sys_insn(1, 0, 7, 14, 4)
|
||||
#define SYS_DC_CIGDSW sys_insn(1, 0, 7, 14, 6)
|
||||
|
||||
/*
|
||||
* Automatically generated definitions for system registers, the
|
||||
|
@ -412,17 +412,21 @@ static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
static bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
{
|
||||
if (!__populate_fault_info(vcpu))
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
__alias(kvm_hyp_handle_memory_fault);
|
||||
static bool kvm_hyp_handle_watchpt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
__alias(kvm_hyp_handle_memory_fault);
|
||||
|
||||
static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code)
|
||||
{
|
||||
if (!__populate_fault_info(vcpu))
|
||||
if (kvm_hyp_handle_memory_fault(vcpu, exit_code))
|
||||
return true;
|
||||
|
||||
if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
|
||||
|
@ -575,7 +575,7 @@ struct pkvm_mem_donation {
|
||||
|
||||
struct check_walk_data {
|
||||
enum pkvm_page_state desired;
|
||||
enum pkvm_page_state (*get_page_state)(kvm_pte_t pte);
|
||||
enum pkvm_page_state (*get_page_state)(kvm_pte_t pte, u64 addr);
|
||||
};
|
||||
|
||||
static int __check_page_state_visitor(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
@ -583,10 +583,7 @@ static int __check_page_state_visitor(const struct kvm_pgtable_visit_ctx *ctx,
|
||||
{
|
||||
struct check_walk_data *d = ctx->arg;
|
||||
|
||||
if (kvm_pte_valid(ctx->old) && !addr_is_allowed_memory(kvm_pte_to_phys(ctx->old)))
|
||||
return -EINVAL;
|
||||
|
||||
return d->get_page_state(ctx->old) == d->desired ? 0 : -EPERM;
|
||||
return d->get_page_state(ctx->old, ctx->addr) == d->desired ? 0 : -EPERM;
|
||||
}
|
||||
|
||||
static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size,
|
||||
@ -601,8 +598,11 @@ static int check_page_state_range(struct kvm_pgtable *pgt, u64 addr, u64 size,
|
||||
return kvm_pgtable_walk(pgt, addr, size, &walker);
|
||||
}
|
||||
|
||||
static enum pkvm_page_state host_get_page_state(kvm_pte_t pte)
|
||||
static enum pkvm_page_state host_get_page_state(kvm_pte_t pte, u64 addr)
|
||||
{
|
||||
if (!addr_is_allowed_memory(addr))
|
||||
return PKVM_NOPAGE;
|
||||
|
||||
if (!kvm_pte_valid(pte) && pte)
|
||||
return PKVM_NOPAGE;
|
||||
|
||||
@ -709,7 +709,7 @@ static int host_complete_donation(u64 addr, const struct pkvm_mem_transition *tx
|
||||
return host_stage2_set_owner_locked(addr, size, host_id);
|
||||
}
|
||||
|
||||
static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte)
|
||||
static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte, u64 addr)
|
||||
{
|
||||
if (!kvm_pte_valid(pte))
|
||||
return PKVM_NOPAGE;
|
||||
|
@ -186,6 +186,7 @@ static const exit_handler_fn hyp_exit_handlers[] = {
|
||||
[ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
|
||||
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
|
||||
[ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
|
||||
[ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
|
||||
[ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
|
||||
};
|
||||
|
||||
@ -196,6 +197,7 @@ static const exit_handler_fn pvm_exit_handlers[] = {
|
||||
[ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
|
||||
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
|
||||
[ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
|
||||
[ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
|
||||
[ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
|
||||
};
|
||||
|
||||
|
@ -209,14 +209,26 @@ static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
|
||||
.flags = flags,
|
||||
};
|
||||
int ret = 0;
|
||||
bool reload = false;
|
||||
kvm_pteref_t childp;
|
||||
bool table = kvm_pte_table(ctx.old, level);
|
||||
|
||||
if (table && (ctx.flags & KVM_PGTABLE_WALK_TABLE_PRE))
|
||||
if (table && (ctx.flags & KVM_PGTABLE_WALK_TABLE_PRE)) {
|
||||
ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_TABLE_PRE);
|
||||
reload = true;
|
||||
}
|
||||
|
||||
if (!table && (ctx.flags & KVM_PGTABLE_WALK_LEAF)) {
|
||||
ret = kvm_pgtable_visitor_cb(data, &ctx, KVM_PGTABLE_WALK_LEAF);
|
||||
reload = true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Reload the page table after invoking the walker callback for leaf
|
||||
* entries or after pre-order traversal, to allow the walker to descend
|
||||
* into a newly installed or replaced table.
|
||||
*/
|
||||
if (reload) {
|
||||
ctx.old = READ_ONCE(*ptep);
|
||||
table = kvm_pte_table(ctx.old, level);
|
||||
}
|
||||
@ -1320,4 +1332,7 @@ void kvm_pgtable_stage2_free_removed(struct kvm_pgtable_mm_ops *mm_ops, void *pg
|
||||
};
|
||||
|
||||
WARN_ON(__kvm_pgtable_walk(&data, mm_ops, ptep, level + 1));
|
||||
|
||||
WARN_ON(mm_ops->page_count(pgtable) != 1);
|
||||
mm_ops->put_page(pgtable);
|
||||
}
|
||||
|
@ -110,6 +110,7 @@ static const exit_handler_fn hyp_exit_handlers[] = {
|
||||
[ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
|
||||
[ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
|
||||
[ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
|
||||
[ESR_ELx_EC_WATCHPT_LOW] = kvm_hyp_handle_watchpt_low,
|
||||
[ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
|
||||
};
|
||||
|
||||
|
@ -694,45 +694,23 @@ out_unlock:
|
||||
|
||||
static struct arm_pmu *kvm_pmu_probe_armpmu(void)
|
||||
{
|
||||
struct perf_event_attr attr = { };
|
||||
struct perf_event *event;
|
||||
struct arm_pmu *pmu = NULL;
|
||||
struct arm_pmu *tmp, *pmu = NULL;
|
||||
struct arm_pmu_entry *entry;
|
||||
int cpu;
|
||||
|
||||
/*
|
||||
* Create a dummy event that only counts user cycles. As we'll never
|
||||
* leave this function with the event being live, it will never
|
||||
* count anything. But it allows us to probe some of the PMU
|
||||
* details. Yes, this is terrible.
|
||||
*/
|
||||
attr.type = PERF_TYPE_RAW;
|
||||
attr.size = sizeof(attr);
|
||||
attr.pinned = 1;
|
||||
attr.disabled = 0;
|
||||
attr.exclude_user = 0;
|
||||
attr.exclude_kernel = 1;
|
||||
attr.exclude_hv = 1;
|
||||
attr.exclude_host = 1;
|
||||
attr.config = ARMV8_PMUV3_PERFCTR_CPU_CYCLES;
|
||||
attr.sample_period = GENMASK(63, 0);
|
||||
mutex_lock(&arm_pmus_lock);
|
||||
|
||||
event = perf_event_create_kernel_counter(&attr, -1, current,
|
||||
kvm_pmu_perf_overflow, &attr);
|
||||
cpu = smp_processor_id();
|
||||
list_for_each_entry(entry, &arm_pmus, entry) {
|
||||
tmp = entry->arm_pmu;
|
||||
|
||||
if (IS_ERR(event)) {
|
||||
pr_err_once("kvm: pmu event creation failed %ld\n",
|
||||
PTR_ERR(event));
|
||||
return NULL;
|
||||
if (cpumask_test_cpu(cpu, &tmp->supported_cpus)) {
|
||||
pmu = tmp;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (event->pmu) {
|
||||
pmu = to_arm_pmu(event->pmu);
|
||||
if (pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_NI ||
|
||||
pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
|
||||
pmu = NULL;
|
||||
}
|
||||
|
||||
perf_event_disable(event);
|
||||
perf_event_release_kernel(event);
|
||||
mutex_unlock(&arm_pmus_lock);
|
||||
|
||||
return pmu;
|
||||
}
|
||||
@ -912,7 +890,17 @@ int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
|
||||
return -EBUSY;
|
||||
|
||||
if (!kvm->arch.arm_pmu) {
|
||||
/* No PMU set, get the default one */
|
||||
/*
|
||||
* No PMU set, get the default one.
|
||||
*
|
||||
* The observant among you will notice that the supported_cpus
|
||||
* mask does not get updated for the default PMU even though it
|
||||
* is quite possible the selected instance supports only a
|
||||
* subset of cores in the system. This is intentional, and
|
||||
* upholds the preexisting behavior on heterogeneous systems
|
||||
* where vCPUs can be scheduled on any core but the guest
|
||||
* counters could stop working.
|
||||
*/
|
||||
kvm->arch.arm_pmu = kvm_pmu_probe_armpmu();
|
||||
if (!kvm->arch.arm_pmu)
|
||||
return -ENODEV;
|
||||
|
@ -211,6 +211,19 @@ static bool access_dcsw(struct kvm_vcpu *vcpu,
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool access_dcgsw(struct kvm_vcpu *vcpu,
|
||||
struct sys_reg_params *p,
|
||||
const struct sys_reg_desc *r)
|
||||
{
|
||||
if (!kvm_has_mte(vcpu->kvm)) {
|
||||
kvm_inject_undefined(vcpu);
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Treat MTE S/W ops as we treat the classic ones: with contempt */
|
||||
return access_dcsw(vcpu, p, r);
|
||||
}
|
||||
|
||||
static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift)
|
||||
{
|
||||
switch (r->aarch32_map) {
|
||||
@ -1756,8 +1769,14 @@ static bool access_spsr(struct kvm_vcpu *vcpu,
|
||||
*/
|
||||
static const struct sys_reg_desc sys_reg_descs[] = {
|
||||
{ SYS_DESC(SYS_DC_ISW), access_dcsw },
|
||||
{ SYS_DESC(SYS_DC_IGSW), access_dcgsw },
|
||||
{ SYS_DESC(SYS_DC_IGDSW), access_dcgsw },
|
||||
{ SYS_DESC(SYS_DC_CSW), access_dcsw },
|
||||
{ SYS_DESC(SYS_DC_CGSW), access_dcgsw },
|
||||
{ SYS_DESC(SYS_DC_CGDSW), access_dcgsw },
|
||||
{ SYS_DESC(SYS_DC_CISW), access_dcsw },
|
||||
{ SYS_DESC(SYS_DC_CIGSW), access_dcgsw },
|
||||
{ SYS_DESC(SYS_DC_CIGDSW), access_dcgsw },
|
||||
|
||||
DBG_BCR_BVR_WCR_WVR_EL1(0),
|
||||
DBG_BCR_BVR_WCR_WVR_EL1(1),
|
||||
|
@ -235,9 +235,9 @@ int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu)
|
||||
* KVM io device for the redistributor that belongs to this VCPU.
|
||||
*/
|
||||
if (dist->vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
|
||||
mutex_lock(&vcpu->kvm->arch.config_lock);
|
||||
mutex_lock(&vcpu->kvm->slots_lock);
|
||||
ret = vgic_register_redist_iodev(vcpu);
|
||||
mutex_unlock(&vcpu->kvm->arch.config_lock);
|
||||
mutex_unlock(&vcpu->kvm->slots_lock);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
@ -406,7 +406,7 @@ void kvm_vgic_destroy(struct kvm *kvm)
|
||||
|
||||
/**
|
||||
* vgic_lazy_init: Lazy init is only allowed if the GIC exposed to the guest
|
||||
* is a GICv2. A GICv3 must be explicitly initialized by the guest using the
|
||||
* is a GICv2. A GICv3 must be explicitly initialized by userspace using the
|
||||
* KVM_DEV_ARM_VGIC_GRP_CTRL KVM_DEVICE group.
|
||||
* @kvm: kvm struct pointer
|
||||
*/
|
||||
@ -446,11 +446,13 @@ int vgic_lazy_init(struct kvm *kvm)
|
||||
int kvm_vgic_map_resources(struct kvm *kvm)
|
||||
{
|
||||
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||
gpa_t dist_base;
|
||||
int ret = 0;
|
||||
|
||||
if (likely(vgic_ready(kvm)))
|
||||
return 0;
|
||||
|
||||
mutex_lock(&kvm->slots_lock);
|
||||
mutex_lock(&kvm->arch.config_lock);
|
||||
if (vgic_ready(kvm))
|
||||
goto out;
|
||||
@ -463,13 +465,26 @@ int kvm_vgic_map_resources(struct kvm *kvm)
|
||||
else
|
||||
ret = vgic_v3_map_resources(kvm);
|
||||
|
||||
if (ret)
|
||||
if (ret) {
|
||||
__kvm_vgic_destroy(kvm);
|
||||
else
|
||||
dist->ready = true;
|
||||
goto out;
|
||||
}
|
||||
dist->ready = true;
|
||||
dist_base = dist->vgic_dist_base;
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
|
||||
ret = vgic_register_dist_iodev(kvm, dist_base,
|
||||
kvm_vgic_global_state.type);
|
||||
if (ret) {
|
||||
kvm_err("Unable to register VGIC dist MMIO regions\n");
|
||||
kvm_vgic_destroy(kvm);
|
||||
}
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
return ret;
|
||||
|
||||
out:
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1936,6 +1936,7 @@ void vgic_lpi_translation_cache_destroy(struct kvm *kvm)
|
||||
|
||||
static int vgic_its_create(struct kvm_device *dev, u32 type)
|
||||
{
|
||||
int ret;
|
||||
struct vgic_its *its;
|
||||
|
||||
if (type != KVM_DEV_TYPE_ARM_VGIC_ITS)
|
||||
@ -1945,9 +1946,12 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
|
||||
if (!its)
|
||||
return -ENOMEM;
|
||||
|
||||
mutex_lock(&dev->kvm->arch.config_lock);
|
||||
|
||||
if (vgic_initialized(dev->kvm)) {
|
||||
int ret = vgic_v4_init(dev->kvm);
|
||||
ret = vgic_v4_init(dev->kvm);
|
||||
if (ret < 0) {
|
||||
mutex_unlock(&dev->kvm->arch.config_lock);
|
||||
kfree(its);
|
||||
return ret;
|
||||
}
|
||||
@ -1960,12 +1964,10 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
|
||||
|
||||
/* Yep, even more trickery for lock ordering... */
|
||||
#ifdef CONFIG_LOCKDEP
|
||||
mutex_lock(&dev->kvm->arch.config_lock);
|
||||
mutex_lock(&its->cmd_lock);
|
||||
mutex_lock(&its->its_lock);
|
||||
mutex_unlock(&its->its_lock);
|
||||
mutex_unlock(&its->cmd_lock);
|
||||
mutex_unlock(&dev->kvm->arch.config_lock);
|
||||
#endif
|
||||
|
||||
its->vgic_its_base = VGIC_ADDR_UNDEF;
|
||||
@ -1986,7 +1988,11 @@ static int vgic_its_create(struct kvm_device *dev, u32 type)
|
||||
|
||||
dev->private = its;
|
||||
|
||||
return vgic_its_set_abi(its, NR_ITS_ABIS - 1);
|
||||
ret = vgic_its_set_abi(its, NR_ITS_ABIS - 1);
|
||||
|
||||
mutex_unlock(&dev->kvm->arch.config_lock);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vgic_its_destroy(struct kvm_device *kvm_dev)
|
||||
|
@ -102,7 +102,11 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
|
||||
if (get_user(addr, uaddr))
|
||||
return -EFAULT;
|
||||
|
||||
mutex_lock(&kvm->arch.config_lock);
|
||||
/*
|
||||
* Since we can't hold config_lock while registering the redistributor
|
||||
* iodevs, take the slots_lock immediately.
|
||||
*/
|
||||
mutex_lock(&kvm->slots_lock);
|
||||
switch (attr->attr) {
|
||||
case KVM_VGIC_V2_ADDR_TYPE_DIST:
|
||||
r = vgic_check_type(kvm, KVM_DEV_TYPE_ARM_VGIC_V2);
|
||||
@ -182,6 +186,7 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
|
||||
if (r)
|
||||
goto out;
|
||||
|
||||
mutex_lock(&kvm->arch.config_lock);
|
||||
if (write) {
|
||||
r = vgic_check_iorange(kvm, *addr_ptr, addr, alignment, size);
|
||||
if (!r)
|
||||
@ -189,9 +194,10 @@ static int kvm_vgic_addr(struct kvm *kvm, struct kvm_device_attr *attr, bool wri
|
||||
} else {
|
||||
addr = *addr_ptr;
|
||||
}
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
|
||||
out:
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
|
||||
if (!r && !write)
|
||||
r = put_user(addr, uaddr);
|
||||
|
@ -769,10 +769,13 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
|
||||
struct vgic_io_device *rd_dev = &vcpu->arch.vgic_cpu.rd_iodev;
|
||||
struct vgic_redist_region *rdreg;
|
||||
gpa_t rd_base;
|
||||
int ret;
|
||||
int ret = 0;
|
||||
|
||||
lockdep_assert_held(&kvm->slots_lock);
|
||||
mutex_lock(&kvm->arch.config_lock);
|
||||
|
||||
if (!IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr))
|
||||
return 0;
|
||||
goto out_unlock;
|
||||
|
||||
/*
|
||||
* We may be creating VCPUs before having set the base address for the
|
||||
@ -782,10 +785,12 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
|
||||
*/
|
||||
rdreg = vgic_v3_rdist_free_slot(&vgic->rd_regions);
|
||||
if (!rdreg)
|
||||
return 0;
|
||||
goto out_unlock;
|
||||
|
||||
if (!vgic_v3_check_base(kvm))
|
||||
return -EINVAL;
|
||||
if (!vgic_v3_check_base(kvm)) {
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
vgic_cpu->rdreg = rdreg;
|
||||
vgic_cpu->rdreg_index = rdreg->free_index;
|
||||
@ -799,16 +804,20 @@ int vgic_register_redist_iodev(struct kvm_vcpu *vcpu)
|
||||
rd_dev->nr_regions = ARRAY_SIZE(vgic_v3_rd_registers);
|
||||
rd_dev->redist_vcpu = vcpu;
|
||||
|
||||
mutex_lock(&kvm->slots_lock);
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
|
||||
ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, rd_base,
|
||||
2 * SZ_64K, &rd_dev->dev);
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Protected by slots_lock */
|
||||
rdreg->free_index++;
|
||||
return 0;
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vgic_unregister_redist_iodev(struct kvm_vcpu *vcpu)
|
||||
@ -834,12 +843,10 @@ static int vgic_register_all_redist_iodevs(struct kvm *kvm)
|
||||
/* The current c failed, so iterate over the previous ones. */
|
||||
int i;
|
||||
|
||||
mutex_lock(&kvm->slots_lock);
|
||||
for (i = 0; i < c; i++) {
|
||||
vcpu = kvm_get_vcpu(kvm, i);
|
||||
vgic_unregister_redist_iodev(vcpu);
|
||||
}
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -938,7 +945,9 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
|
||||
{
|
||||
int ret;
|
||||
|
||||
mutex_lock(&kvm->arch.config_lock);
|
||||
ret = vgic_v3_alloc_redist_region(kvm, index, addr, count);
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -950,8 +959,10 @@ int vgic_v3_set_redist_base(struct kvm *kvm, u32 index, u64 addr, u32 count)
|
||||
if (ret) {
|
||||
struct vgic_redist_region *rdreg;
|
||||
|
||||
mutex_lock(&kvm->arch.config_lock);
|
||||
rdreg = vgic_v3_rdist_region_from_index(kvm, index);
|
||||
vgic_v3_free_redist_region(rdreg);
|
||||
mutex_unlock(&kvm->arch.config_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -1096,7 +1096,6 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
|
||||
enum vgic_type type)
|
||||
{
|
||||
struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev;
|
||||
int ret = 0;
|
||||
unsigned int len;
|
||||
|
||||
switch (type) {
|
||||
@ -1114,10 +1113,6 @@ int vgic_register_dist_iodev(struct kvm *kvm, gpa_t dist_base_address,
|
||||
io_device->iodev_type = IODEV_DIST;
|
||||
io_device->redist_vcpu = NULL;
|
||||
|
||||
mutex_lock(&kvm->slots_lock);
|
||||
ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
|
||||
len, &io_device->dev);
|
||||
mutex_unlock(&kvm->slots_lock);
|
||||
|
||||
return ret;
|
||||
return kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, dist_base_address,
|
||||
len, &io_device->dev);
|
||||
}
|
||||
|
@ -312,12 +312,6 @@ int vgic_v2_map_resources(struct kvm *kvm)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V2);
|
||||
if (ret) {
|
||||
kvm_err("Unable to register VGIC MMIO regions\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!static_branch_unlikely(&vgic_v2_cpuif_trap)) {
|
||||
ret = kvm_phys_addr_ioremap(kvm, dist->vgic_cpu_base,
|
||||
kvm_vgic_global_state.vcpu_base,
|
||||
|
@ -539,7 +539,6 @@ int vgic_v3_map_resources(struct kvm *kvm)
|
||||
{
|
||||
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||
struct kvm_vcpu *vcpu;
|
||||
int ret = 0;
|
||||
unsigned long c;
|
||||
|
||||
kvm_for_each_vcpu(c, vcpu, kvm) {
|
||||
@ -569,12 +568,6 @@ int vgic_v3_map_resources(struct kvm *kvm)
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
ret = vgic_register_dist_iodev(kvm, dist->vgic_dist_base, VGIC_V3);
|
||||
if (ret) {
|
||||
kvm_err("Unable to register VGICv3 dist MMIO regions\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (kvm_vgic_global_state.has_gicv4_1)
|
||||
vgic_v4_configure_vsgis(kvm);
|
||||
|
||||
|
@ -184,13 +184,14 @@ static void vgic_v4_disable_vsgis(struct kvm_vcpu *vcpu)
|
||||
}
|
||||
}
|
||||
|
||||
/* Must be called with the kvm lock held */
|
||||
void vgic_v4_configure_vsgis(struct kvm *kvm)
|
||||
{
|
||||
struct vgic_dist *dist = &kvm->arch.vgic;
|
||||
struct kvm_vcpu *vcpu;
|
||||
unsigned long i;
|
||||
|
||||
lockdep_assert_held(&kvm->arch.config_lock);
|
||||
|
||||
kvm_arm_halt_guest(kvm);
|
||||
|
||||
kvm_for_each_vcpu(i, vcpu, kvm) {
|
||||
|
@ -22,15 +22,15 @@ sha1-ppc-spe-y := sha1-spe-asm.o sha1-spe-glue.o
|
||||
sha256-ppc-spe-y := sha256-spe-asm.o sha256-spe-glue.o
|
||||
crc32c-vpmsum-y := crc32c-vpmsum_asm.o crc32c-vpmsum_glue.o
|
||||
crct10dif-vpmsum-y := crct10dif-vpmsum_asm.o crct10dif-vpmsum_glue.o
|
||||
aes-gcm-p10-crypto-y := aes-gcm-p10-glue.o aes-gcm-p10.o ghashp8-ppc.o aesp8-ppc.o
|
||||
aes-gcm-p10-crypto-y := aes-gcm-p10-glue.o aes-gcm-p10.o ghashp10-ppc.o aesp10-ppc.o
|
||||
|
||||
quiet_cmd_perl = PERL $@
|
||||
cmd_perl = $(PERL) $< $(if $(CONFIG_CPU_LITTLE_ENDIAN), linux-ppc64le, linux-ppc64) > $@
|
||||
|
||||
targets += aesp8-ppc.S ghashp8-ppc.S
|
||||
targets += aesp10-ppc.S ghashp10-ppc.S
|
||||
|
||||
$(obj)/aesp8-ppc.S $(obj)/ghashp8-ppc.S: $(obj)/%.S: $(src)/%.pl FORCE
|
||||
$(obj)/aesp10-ppc.S $(obj)/ghashp10-ppc.S: $(obj)/%.S: $(src)/%.pl FORCE
|
||||
$(call if_changed,perl)
|
||||
|
||||
OBJECT_FILES_NON_STANDARD_aesp8-ppc.o := y
|
||||
OBJECT_FILES_NON_STANDARD_ghashp8-ppc.o := y
|
||||
OBJECT_FILES_NON_STANDARD_aesp10-ppc.o := y
|
||||
OBJECT_FILES_NON_STANDARD_ghashp10-ppc.o := y
|
||||
|
@ -30,15 +30,15 @@ MODULE_AUTHOR("Danny Tsen <dtsen@linux.ibm.com");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_ALIAS_CRYPTO("aes");
|
||||
|
||||
asmlinkage int aes_p8_set_encrypt_key(const u8 *userKey, const int bits,
|
||||
asmlinkage int aes_p10_set_encrypt_key(const u8 *userKey, const int bits,
|
||||
void *key);
|
||||
asmlinkage void aes_p8_encrypt(const u8 *in, u8 *out, const void *key);
|
||||
asmlinkage void aes_p10_encrypt(const u8 *in, u8 *out, const void *key);
|
||||
asmlinkage void aes_p10_gcm_encrypt(u8 *in, u8 *out, size_t len,
|
||||
void *rkey, u8 *iv, void *Xi);
|
||||
asmlinkage void aes_p10_gcm_decrypt(u8 *in, u8 *out, size_t len,
|
||||
void *rkey, u8 *iv, void *Xi);
|
||||
asmlinkage void gcm_init_htable(unsigned char htable[256], unsigned char Xi[16]);
|
||||
asmlinkage void gcm_ghash_p8(unsigned char *Xi, unsigned char *Htable,
|
||||
asmlinkage void gcm_ghash_p10(unsigned char *Xi, unsigned char *Htable,
|
||||
unsigned char *aad, unsigned int alen);
|
||||
|
||||
struct aes_key {
|
||||
@ -93,7 +93,7 @@ static void set_aad(struct gcm_ctx *gctx, struct Hash_ctx *hash,
|
||||
gctx->aadLen = alen;
|
||||
i = alen & ~0xf;
|
||||
if (i) {
|
||||
gcm_ghash_p8(nXi, hash->Htable+32, aad, i);
|
||||
gcm_ghash_p10(nXi, hash->Htable+32, aad, i);
|
||||
aad += i;
|
||||
alen -= i;
|
||||
}
|
||||
@ -102,7 +102,7 @@ static void set_aad(struct gcm_ctx *gctx, struct Hash_ctx *hash,
|
||||
nXi[i] ^= aad[i];
|
||||
|
||||
memset(gctx->aad_hash, 0, 16);
|
||||
gcm_ghash_p8(gctx->aad_hash, hash->Htable+32, nXi, 16);
|
||||
gcm_ghash_p10(gctx->aad_hash, hash->Htable+32, nXi, 16);
|
||||
} else {
|
||||
memcpy(gctx->aad_hash, nXi, 16);
|
||||
}
|
||||
@ -115,7 +115,7 @@ static void gcmp10_init(struct gcm_ctx *gctx, u8 *iv, unsigned char *rdkey,
|
||||
{
|
||||
__be32 counter = cpu_to_be32(1);
|
||||
|
||||
aes_p8_encrypt(hash->H, hash->H, rdkey);
|
||||
aes_p10_encrypt(hash->H, hash->H, rdkey);
|
||||
set_subkey(hash->H);
|
||||
gcm_init_htable(hash->Htable+32, hash->H);
|
||||
|
||||
@ -126,7 +126,7 @@ static void gcmp10_init(struct gcm_ctx *gctx, u8 *iv, unsigned char *rdkey,
|
||||
/*
|
||||
* Encrypt counter vector as iv tag and increment counter.
|
||||
*/
|
||||
aes_p8_encrypt(iv, gctx->ivtag, rdkey);
|
||||
aes_p10_encrypt(iv, gctx->ivtag, rdkey);
|
||||
|
||||
counter = cpu_to_be32(2);
|
||||
*((__be32 *)(iv+12)) = counter;
|
||||
@ -160,7 +160,7 @@ static void finish_tag(struct gcm_ctx *gctx, struct Hash_ctx *hash, int len)
|
||||
/*
|
||||
* hash (AAD len and len)
|
||||
*/
|
||||
gcm_ghash_p8(hash->Htable, hash->Htable+32, aclen, 16);
|
||||
gcm_ghash_p10(hash->Htable, hash->Htable+32, aclen, 16);
|
||||
|
||||
for (i = 0; i < 16; i++)
|
||||
hash->Htable[i] ^= gctx->ivtag[i];
|
||||
@ -192,7 +192,7 @@ static int p10_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
|
||||
int ret;
|
||||
|
||||
vsx_begin();
|
||||
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
|
||||
ret = aes_p10_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
|
||||
vsx_end();
|
||||
|
||||
return ret ? -EINVAL : 0;
|
||||
|
@ -110,7 +110,7 @@ die "can't locate ppc-xlate.pl";
|
||||
open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
|
||||
|
||||
$FRAME=8*$SIZE_T;
|
||||
$prefix="aes_p8";
|
||||
$prefix="aes_p10";
|
||||
|
||||
$sp="r1";
|
||||
$vrsave="r12";
|
@ -64,7 +64,7 @@ $code=<<___;
|
||||
|
||||
.text
|
||||
|
||||
.globl .gcm_init_p8
|
||||
.globl .gcm_init_p10
|
||||
lis r0,0xfff0
|
||||
li r8,0x10
|
||||
mfspr $vrsave,256
|
||||
@ -110,7 +110,7 @@ $code=<<___;
|
||||
.long 0
|
||||
.byte 0,12,0x14,0,0,0,2,0
|
||||
.long 0
|
||||
.size .gcm_init_p8,.-.gcm_init_p8
|
||||
.size .gcm_init_p10,.-.gcm_init_p10
|
||||
|
||||
.globl .gcm_init_htable
|
||||
lis r0,0xfff0
|
||||
@ -237,7 +237,7 @@ $code=<<___;
|
||||
.long 0
|
||||
.size .gcm_init_htable,.-.gcm_init_htable
|
||||
|
||||
.globl .gcm_gmult_p8
|
||||
.globl .gcm_gmult_p10
|
||||
lis r0,0xfff8
|
||||
li r8,0x10
|
||||
mfspr $vrsave,256
|
||||
@ -283,9 +283,9 @@ $code=<<___;
|
||||
.long 0
|
||||
.byte 0,12,0x14,0,0,0,2,0
|
||||
.long 0
|
||||
.size .gcm_gmult_p8,.-.gcm_gmult_p8
|
||||
.size .gcm_gmult_p10,.-.gcm_gmult_p10
|
||||
|
||||
.globl .gcm_ghash_p8
|
||||
.globl .gcm_ghash_p10
|
||||
lis r0,0xfff8
|
||||
li r8,0x10
|
||||
mfspr $vrsave,256
|
||||
@ -350,7 +350,7 @@ Loop:
|
||||
.long 0
|
||||
.byte 0,12,0x14,0,0,0,4,0
|
||||
.long 0
|
||||
.size .gcm_ghash_p8,.-.gcm_ghash_p8
|
||||
.size .gcm_ghash_p10,.-.gcm_ghash_p10
|
||||
|
||||
.asciz "GHASH for PowerISA 2.07, CRYPTOGAMS by <appro\@openssl.org>"
|
||||
.align 2
|
@ -317,13 +317,22 @@ static void tce_free_pSeriesLP(unsigned long liobn, long tcenum, long tceshift,
|
||||
static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
|
||||
{
|
||||
u64 rc;
|
||||
long rpages = npages;
|
||||
unsigned long limit;
|
||||
|
||||
if (!firmware_has_feature(FW_FEATURE_STUFF_TCE))
|
||||
return tce_free_pSeriesLP(tbl->it_index, tcenum,
|
||||
tbl->it_page_shift, npages);
|
||||
|
||||
rc = plpar_tce_stuff((u64)tbl->it_index,
|
||||
(u64)tcenum << tbl->it_page_shift, 0, npages);
|
||||
do {
|
||||
limit = min_t(unsigned long, rpages, 512);
|
||||
|
||||
rc = plpar_tce_stuff((u64)tbl->it_index,
|
||||
(u64)tcenum << tbl->it_page_shift, 0, limit);
|
||||
|
||||
rpages -= limit;
|
||||
tcenum += limit;
|
||||
} while (rpages > 0 && !rc);
|
||||
|
||||
if (rc && printk_ratelimit()) {
|
||||
printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n");
|
||||
|
@ -88,7 +88,7 @@ static unsigned long ndump = 64;
|
||||
static unsigned long nidump = 16;
|
||||
static unsigned long ncsum = 4096;
|
||||
static int termch;
|
||||
static char tmpstr[128];
|
||||
static char tmpstr[KSYM_NAME_LEN];
|
||||
static int tracing_enabled;
|
||||
|
||||
static long bus_error_jmp[JMP_BUF_LEN];
|
||||
|
@ -799,8 +799,11 @@ menu "Power management options"
|
||||
|
||||
source "kernel/power/Kconfig"
|
||||
|
||||
# Hibernation is only possible on systems where the SBI implementation has
|
||||
# marked its reserved memory as not accessible from, or does not run
|
||||
# from the same memory as, Linux
|
||||
config ARCH_HIBERNATION_POSSIBLE
|
||||
def_bool y
|
||||
def_bool NONPORTABLE
|
||||
|
||||
config ARCH_HIBERNATION_HEADER
|
||||
def_bool HIBERNATION
|
||||
|
@ -1,2 +1,6 @@
|
||||
ifdef CONFIG_RELOCATABLE
|
||||
KBUILD_CFLAGS += -fno-pie
|
||||
endif
|
||||
|
||||
obj-$(CONFIG_ERRATA_SIFIVE) += sifive/
|
||||
obj-$(CONFIG_ERRATA_THEAD) += thead/
|
||||
|
@ -36,6 +36,9 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
|
||||
unsigned long addr, pte_t *ptep,
|
||||
pte_t pte, int dirty);
|
||||
|
||||
#define __HAVE_ARCH_HUGE_PTEP_GET
|
||||
pte_t huge_ptep_get(pte_t *ptep);
|
||||
|
||||
pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags);
|
||||
#define arch_make_huge_pte arch_make_huge_pte
|
||||
|
||||
|
@ -10,4 +10,11 @@
|
||||
|
||||
#include <linux/perf_event.h>
|
||||
#define perf_arch_bpf_user_pt_regs(regs) (struct user_regs_struct *)regs
|
||||
|
||||
#define perf_arch_fetch_caller_regs(regs, __ip) { \
|
||||
(regs)->epc = (__ip); \
|
||||
(regs)->s0 = (unsigned long) __builtin_frame_address(0); \
|
||||
(regs)->sp = current_stack_pointer; \
|
||||
(regs)->status = SR_PP; \
|
||||
}
|
||||
#endif /* _ASM_RISCV_PERF_EVENT_H */
|
||||
|
@ -23,6 +23,10 @@ ifdef CONFIG_FTRACE
|
||||
CFLAGS_REMOVE_alternative.o = $(CC_FLAGS_FTRACE)
|
||||
CFLAGS_REMOVE_cpufeature.o = $(CC_FLAGS_FTRACE)
|
||||
endif
|
||||
ifdef CONFIG_RELOCATABLE
|
||||
CFLAGS_alternative.o += -fno-pie
|
||||
CFLAGS_cpufeature.o += -fno-pie
|
||||
endif
|
||||
ifdef CONFIG_KASAN
|
||||
KASAN_SANITIZE_alternative.o := n
|
||||
KASAN_SANITIZE_cpufeature.o := n
|
||||
|
@ -3,6 +3,30 @@
|
||||
#include <linux/err.h>
|
||||
|
||||
#ifdef CONFIG_RISCV_ISA_SVNAPOT
|
||||
pte_t huge_ptep_get(pte_t *ptep)
|
||||
{
|
||||
unsigned long pte_num;
|
||||
int i;
|
||||
pte_t orig_pte = ptep_get(ptep);
|
||||
|
||||
if (!pte_present(orig_pte) || !pte_napot(orig_pte))
|
||||
return orig_pte;
|
||||
|
||||
pte_num = napot_pte_num(napot_cont_order(orig_pte));
|
||||
|
||||
for (i = 0; i < pte_num; i++, ptep++) {
|
||||
pte_t pte = ptep_get(ptep);
|
||||
|
||||
if (pte_dirty(pte))
|
||||
orig_pte = pte_mkdirty(orig_pte);
|
||||
|
||||
if (pte_young(pte))
|
||||
orig_pte = pte_mkyoung(orig_pte);
|
||||
}
|
||||
|
||||
return orig_pte;
|
||||
}
|
||||
|
||||
pte_t *huge_pte_alloc(struct mm_struct *mm,
|
||||
struct vm_area_struct *vma,
|
||||
unsigned long addr,
|
||||
@ -218,6 +242,7 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
||||
{
|
||||
pte_t pte = ptep_get(ptep);
|
||||
unsigned long order;
|
||||
pte_t orig_pte;
|
||||
int i, pte_num;
|
||||
|
||||
if (!pte_napot(pte)) {
|
||||
@ -228,9 +253,12 @@ void huge_ptep_set_wrprotect(struct mm_struct *mm,
|
||||
order = napot_cont_order(pte);
|
||||
pte_num = napot_pte_num(order);
|
||||
ptep = huge_pte_offset(mm, addr, napot_cont_size(order));
|
||||
orig_pte = get_clear_contig_flush(mm, addr, ptep, pte_num);
|
||||
|
||||
orig_pte = pte_wrprotect(orig_pte);
|
||||
|
||||
for (i = 0; i < pte_num; i++, addr += PAGE_SIZE, ptep++)
|
||||
ptep_set_wrprotect(mm, addr, ptep);
|
||||
set_pte_at(mm, addr, ptep, orig_pte);
|
||||
}
|
||||
|
||||
pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
|
||||
|
@ -922,9 +922,9 @@ static void __init create_kernel_page_table(pgd_t *pgdir, bool early)
|
||||
static void __init create_fdt_early_page_table(uintptr_t fix_fdt_va,
|
||||
uintptr_t dtb_pa)
|
||||
{
|
||||
#ifndef CONFIG_BUILTIN_DTB
|
||||
uintptr_t pa = dtb_pa & ~(PMD_SIZE - 1);
|
||||
|
||||
#ifndef CONFIG_BUILTIN_DTB
|
||||
/* Make sure the fdt fixmap address is always aligned on PMD size */
|
||||
BUILD_BUG_ON(FIX_FDT % (PMD_SIZE / PAGE_SIZE));
|
||||
|
||||
|
@ -228,6 +228,23 @@ static int kvm_recalculate_phys_map(struct kvm_apic_map *new,
|
||||
u32 xapic_id = kvm_xapic_id(apic);
|
||||
u32 physical_id;
|
||||
|
||||
/*
|
||||
* For simplicity, KVM always allocates enough space for all possible
|
||||
* xAPIC IDs. Yell, but don't kill the VM, as KVM can continue on
|
||||
* without the optimized map.
|
||||
*/
|
||||
if (WARN_ON_ONCE(xapic_id > new->max_apic_id))
|
||||
return -EINVAL;
|
||||
|
||||
/*
|
||||
* Bail if a vCPU was added and/or enabled its APIC between allocating
|
||||
* the map and doing the actual calculations for the map. Note, KVM
|
||||
* hardcodes the x2APIC ID to vcpu_id, i.e. there's no TOCTOU bug if
|
||||
* the compiler decides to reload x2apic_id after this check.
|
||||
*/
|
||||
if (x2apic_id > new->max_apic_id)
|
||||
return -E2BIG;
|
||||
|
||||
/*
|
||||
* Deliberately truncate the vCPU ID when detecting a mismatched APIC
|
||||
* ID to avoid false positives if the vCPU ID, i.e. x2APIC ID, is a
|
||||
@ -253,8 +270,7 @@ static int kvm_recalculate_phys_map(struct kvm_apic_map *new,
|
||||
*/
|
||||
if (vcpu->kvm->arch.x2apic_format) {
|
||||
/* See also kvm_apic_match_physical_addr(). */
|
||||
if ((apic_x2apic_mode(apic) || x2apic_id > 0xff) &&
|
||||
x2apic_id <= new->max_apic_id)
|
||||
if (apic_x2apic_mode(apic) || x2apic_id > 0xff)
|
||||
new->phys_map[x2apic_id] = apic;
|
||||
|
||||
if (!apic_x2apic_mode(apic) && !new->phys_map[xapic_id])
|
||||
|
@ -7091,7 +7091,10 @@ static void kvm_recover_nx_huge_pages(struct kvm *kvm)
|
||||
*/
|
||||
slot = NULL;
|
||||
if (atomic_read(&kvm->nr_memslots_dirty_logging)) {
|
||||
slot = gfn_to_memslot(kvm, sp->gfn);
|
||||
struct kvm_memslots *slots;
|
||||
|
||||
slots = kvm_memslots_for_spte_role(kvm, sp->role);
|
||||
slot = __gfn_to_memslot(slots, sp->gfn);
|
||||
WARN_ON_ONCE(!slot);
|
||||
}
|
||||
|
||||
|
@ -3510,7 +3510,7 @@ static bool svm_is_vnmi_pending(struct kvm_vcpu *vcpu)
|
||||
if (!is_vnmi_enabled(svm))
|
||||
return false;
|
||||
|
||||
return !!(svm->vmcb->control.int_ctl & V_NMI_BLOCKING_MASK);
|
||||
return !!(svm->vmcb->control.int_ctl & V_NMI_PENDING_MASK);
|
||||
}
|
||||
|
||||
static bool svm_set_vnmi_pending(struct kvm_vcpu *vcpu)
|
||||
|
@ -10758,6 +10758,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
||||
exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
|
||||
break;
|
||||
}
|
||||
|
||||
/* Note, VM-Exits that go down the "slow" path are accounted below. */
|
||||
++vcpu->stat.exits;
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -915,6 +915,7 @@ static bool disk_has_partitions(struct gendisk *disk)
|
||||
void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
|
||||
{
|
||||
struct request_queue *q = disk->queue;
|
||||
unsigned int old_model = q->limits.zoned;
|
||||
|
||||
switch (model) {
|
||||
case BLK_ZONED_HM:
|
||||
@ -952,7 +953,7 @@ void disk_set_zoned(struct gendisk *disk, enum blk_zoned_model model)
|
||||
*/
|
||||
blk_queue_zone_write_granularity(q,
|
||||
queue_logical_block_size(q));
|
||||
} else {
|
||||
} else if (old_model != BLK_ZONED_NONE) {
|
||||
disk_clear_zone_settings(disk);
|
||||
}
|
||||
}
|
||||
|
@ -380,9 +380,10 @@ int public_key_verify_signature(const struct public_key *pkey,
|
||||
struct crypto_wait cwait;
|
||||
struct crypto_akcipher *tfm;
|
||||
struct akcipher_request *req;
|
||||
struct scatterlist src_sg[2];
|
||||
struct scatterlist src_sg;
|
||||
char alg_name[CRYPTO_MAX_ALG_NAME];
|
||||
char *key, *ptr;
|
||||
char *buf, *ptr;
|
||||
size_t buf_len;
|
||||
int ret;
|
||||
|
||||
pr_devel("==>%s()\n", __func__);
|
||||
@ -420,34 +421,37 @@ int public_key_verify_signature(const struct public_key *pkey,
|
||||
if (!req)
|
||||
goto error_free_tfm;
|
||||
|
||||
key = kmalloc(pkey->keylen + sizeof(u32) * 2 + pkey->paramlen,
|
||||
GFP_KERNEL);
|
||||
if (!key)
|
||||
buf_len = max_t(size_t, pkey->keylen + sizeof(u32) * 2 + pkey->paramlen,
|
||||
sig->s_size + sig->digest_size);
|
||||
|
||||
buf = kmalloc(buf_len, GFP_KERNEL);
|
||||
if (!buf)
|
||||
goto error_free_req;
|
||||
|
||||
memcpy(key, pkey->key, pkey->keylen);
|
||||
ptr = key + pkey->keylen;
|
||||
memcpy(buf, pkey->key, pkey->keylen);
|
||||
ptr = buf + pkey->keylen;
|
||||
ptr = pkey_pack_u32(ptr, pkey->algo);
|
||||
ptr = pkey_pack_u32(ptr, pkey->paramlen);
|
||||
memcpy(ptr, pkey->params, pkey->paramlen);
|
||||
|
||||
if (pkey->key_is_private)
|
||||
ret = crypto_akcipher_set_priv_key(tfm, key, pkey->keylen);
|
||||
ret = crypto_akcipher_set_priv_key(tfm, buf, pkey->keylen);
|
||||
else
|
||||
ret = crypto_akcipher_set_pub_key(tfm, key, pkey->keylen);
|
||||
ret = crypto_akcipher_set_pub_key(tfm, buf, pkey->keylen);
|
||||
if (ret)
|
||||
goto error_free_key;
|
||||
goto error_free_buf;
|
||||
|
||||
if (strcmp(pkey->pkey_algo, "sm2") == 0 && sig->data_size) {
|
||||
ret = cert_sig_digest_update(sig, tfm);
|
||||
if (ret)
|
||||
goto error_free_key;
|
||||
goto error_free_buf;
|
||||
}
|
||||
|
||||
sg_init_table(src_sg, 2);
|
||||
sg_set_buf(&src_sg[0], sig->s, sig->s_size);
|
||||
sg_set_buf(&src_sg[1], sig->digest, sig->digest_size);
|
||||
akcipher_request_set_crypt(req, src_sg, NULL, sig->s_size,
|
||||
memcpy(buf, sig->s, sig->s_size);
|
||||
memcpy(buf + sig->s_size, sig->digest, sig->digest_size);
|
||||
|
||||
sg_init_one(&src_sg, buf, sig->s_size + sig->digest_size);
|
||||
akcipher_request_set_crypt(req, &src_sg, NULL, sig->s_size,
|
||||
sig->digest_size);
|
||||
crypto_init_wait(&cwait);
|
||||
akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
|
||||
@ -455,8 +459,8 @@ int public_key_verify_signature(const struct public_key *pkey,
|
||||
crypto_req_done, &cwait);
|
||||
ret = crypto_wait_req(crypto_akcipher_verify(req), &cwait);
|
||||
|
||||
error_free_key:
|
||||
kfree(key);
|
||||
error_free_buf:
|
||||
kfree(buf);
|
||||
error_free_req:
|
||||
akcipher_request_free(req);
|
||||
error_free_tfm:
|
||||
|
@ -7,7 +7,6 @@
|
||||
#ifndef APEI_INTERNAL_H
|
||||
#define APEI_INTERNAL_H
|
||||
|
||||
#include <linux/cper.h>
|
||||
#include <linux/acpi.h>
|
||||
|
||||
struct apei_exec_context;
|
||||
@ -130,10 +129,5 @@ static inline u32 cper_estatus_len(struct acpi_hest_generic_status *estatus)
|
||||
return sizeof(*estatus) + estatus->data_length;
|
||||
}
|
||||
|
||||
void cper_estatus_print(const char *pfx,
|
||||
const struct acpi_hest_generic_status *estatus);
|
||||
int cper_estatus_check_header(const struct acpi_hest_generic_status *estatus);
|
||||
int cper_estatus_check(const struct acpi_hest_generic_status *estatus);
|
||||
|
||||
int apei_osc_setup(void);
|
||||
#endif
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include <linux/module.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/cper.h>
|
||||
#include <linux/io.h>
|
||||
|
||||
#include "apei-internal.h"
|
||||
|
@ -388,6 +388,16 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
|
||||
continue;/* skip if itself or no cacheinfo */
|
||||
for (sib_index = 0; sib_index < cache_leaves(i); sib_index++) {
|
||||
sib_leaf = per_cpu_cacheinfo_idx(i, sib_index);
|
||||
|
||||
/*
|
||||
* Comparing cache IDs only makes sense if the leaves
|
||||
* belong to the same cache level of same type. Skip
|
||||
* the check if level and type do not match.
|
||||
*/
|
||||
if (sib_leaf->level != this_leaf->level ||
|
||||
sib_leaf->type != this_leaf->type)
|
||||
continue;
|
||||
|
||||
if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
|
||||
cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
|
||||
cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
|
||||
@ -400,11 +410,14 @@ static int cache_shared_cpu_map_setup(unsigned int cpu)
|
||||
coherency_max_size = this_leaf->coherency_line_size;
|
||||
}
|
||||
|
||||
/* shared_cpu_map is now populated for the cpu */
|
||||
this_cpu_ci->cpu_map_populated = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void cache_shared_cpu_map_remove(unsigned int cpu)
|
||||
{
|
||||
struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
|
||||
struct cacheinfo *this_leaf, *sib_leaf;
|
||||
unsigned int sibling, index, sib_index;
|
||||
|
||||
@ -419,6 +432,16 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
|
||||
|
||||
for (sib_index = 0; sib_index < cache_leaves(sibling); sib_index++) {
|
||||
sib_leaf = per_cpu_cacheinfo_idx(sibling, sib_index);
|
||||
|
||||
/*
|
||||
* Comparing cache IDs only makes sense if the leaves
|
||||
* belong to the same cache level of same type. Skip
|
||||
* the check if level and type do not match.
|
||||
*/
|
||||
if (sib_leaf->level != this_leaf->level ||
|
||||
sib_leaf->type != this_leaf->type)
|
||||
continue;
|
||||
|
||||
if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
|
||||
cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
|
||||
cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
|
||||
@ -427,6 +450,9 @@ static void cache_shared_cpu_map_remove(unsigned int cpu)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* cpu is no longer populated in the shared map */
|
||||
this_cpu_ci->cpu_map_populated = false;
|
||||
}
|
||||
|
||||
static void free_cache_attributes(unsigned int cpu)
|
||||
|
@ -812,7 +812,7 @@ static void fw_log_firmware_info(const struct firmware *fw, const char *name, st
|
||||
char *outbuf;
|
||||
|
||||
alg = crypto_alloc_shash("sha256", 0, 0);
|
||||
if (!alg)
|
||||
if (IS_ERR(alg))
|
||||
return;
|
||||
|
||||
sha256buf = kmalloc(SHA256_DIGEST_SIZE, GFP_KERNEL);
|
||||
|
@ -78,7 +78,8 @@ enum qca_flags {
|
||||
QCA_HW_ERROR_EVENT,
|
||||
QCA_SSR_TRIGGERED,
|
||||
QCA_BT_OFF,
|
||||
QCA_ROM_FW
|
||||
QCA_ROM_FW,
|
||||
QCA_DEBUGFS_CREATED,
|
||||
};
|
||||
|
||||
enum qca_capabilities {
|
||||
@ -635,6 +636,9 @@ static void qca_debugfs_init(struct hci_dev *hdev)
|
||||
if (!hdev->debugfs)
|
||||
return;
|
||||
|
||||
if (test_and_set_bit(QCA_DEBUGFS_CREATED, &qca->flags))
|
||||
return;
|
||||
|
||||
ibs_dir = debugfs_create_dir("ibs", hdev->debugfs);
|
||||
|
||||
/* read only */
|
||||
|
@ -84,10 +84,10 @@ enum tis_defaults {
|
||||
#define ILB_REMAP_SIZE 0x100
|
||||
|
||||
enum tpm_tis_flags {
|
||||
TPM_TIS_ITPM_WORKAROUND = BIT(0),
|
||||
TPM_TIS_INVALID_STATUS = BIT(1),
|
||||
TPM_TIS_DEFAULT_CANCELLATION = BIT(2),
|
||||
TPM_TIS_IRQ_TESTED = BIT(3),
|
||||
TPM_TIS_ITPM_WORKAROUND = 0,
|
||||
TPM_TIS_INVALID_STATUS = 1,
|
||||
TPM_TIS_DEFAULT_CANCELLATION = 2,
|
||||
TPM_TIS_IRQ_TESTED = 3,
|
||||
};
|
||||
|
||||
struct tpm_tis_data {
|
||||
|
@ -32,7 +32,8 @@ zboot-size-len-$(CONFIG_KERNEL_GZIP) := 0
|
||||
$(obj)/vmlinuz: $(obj)/vmlinux.bin FORCE
|
||||
$(call if_changed,$(zboot-method-y))
|
||||
|
||||
OBJCOPYFLAGS_vmlinuz.o := -I binary -O $(EFI_ZBOOT_BFD_TARGET) $(EFI_ZBOOT_OBJCOPY_FLAGS) \
|
||||
# avoid eager evaluation to prevent references to non-existent build artifacts
|
||||
OBJCOPYFLAGS_vmlinuz.o = -I binary -O $(EFI_ZBOOT_BFD_TARGET) $(EFI_ZBOOT_OBJCOPY_FLAGS) \
|
||||
--rename-section .data=.gzdata,load,alloc,readonly,contents
|
||||
$(obj)/vmlinuz.o: $(obj)/vmlinuz FORCE
|
||||
$(call if_changed,objcopy)
|
||||
|
@ -1133,4 +1133,7 @@ const u8 *__efi_get_smbios_string(const struct efi_smbios_record *record,
|
||||
void efi_remap_image(unsigned long image_base, unsigned alloc_size,
|
||||
unsigned long code_size);
|
||||
|
||||
asmlinkage efi_status_t __efiapi
|
||||
efi_zboot_entry(efi_handle_t handle, efi_system_table_t *systab);
|
||||
|
||||
#endif
|
||||
|
@ -593,6 +593,8 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
|
||||
case IP_VERSION(9, 3, 0):
|
||||
/* GC 10.3.7 */
|
||||
case IP_VERSION(10, 3, 7):
|
||||
/* GC 11.0.1 */
|
||||
case IP_VERSION(11, 0, 1):
|
||||
if (amdgpu_tmz == 0) {
|
||||
adev->gmc.tmz_enabled = false;
|
||||
dev_info(adev->dev,
|
||||
@ -616,7 +618,6 @@ void amdgpu_gmc_tmz_set(struct amdgpu_device *adev)
|
||||
case IP_VERSION(10, 3, 1):
|
||||
/* YELLOW_CARP*/
|
||||
case IP_VERSION(10, 3, 3):
|
||||
case IP_VERSION(11, 0, 1):
|
||||
case IP_VERSION(11, 0, 4):
|
||||
/* Don't enable it by default yet.
|
||||
*/
|
||||
|
@ -241,6 +241,31 @@ int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_jpeg_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
|
||||
{
|
||||
int r, i;
|
||||
|
||||
r = amdgpu_ras_block_late_init(adev, ras_block);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (amdgpu_ras_is_supported(adev, ras_block->block)) {
|
||||
for (i = 0; i < adev->jpeg.num_jpeg_inst; ++i) {
|
||||
if (adev->jpeg.harvest_config & (1 << i))
|
||||
continue;
|
||||
|
||||
r = amdgpu_irq_get(adev, &adev->jpeg.inst[i].ras_poison_irq, 0);
|
||||
if (r)
|
||||
goto late_fini;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
||||
late_fini:
|
||||
amdgpu_ras_block_late_fini(adev, ras_block);
|
||||
return r;
|
||||
}
|
||||
|
||||
int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int err;
|
||||
@ -262,7 +287,7 @@ int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev)
|
||||
adev->jpeg.ras_if = &ras->ras_block.ras_comm;
|
||||
|
||||
if (!ras->ras_block.ras_late_init)
|
||||
ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
|
||||
ras->ras_block.ras_late_init = amdgpu_jpeg_ras_late_init;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -38,6 +38,7 @@ struct amdgpu_jpeg_reg{
|
||||
struct amdgpu_jpeg_inst {
|
||||
struct amdgpu_ring ring_dec;
|
||||
struct amdgpu_irq_src irq;
|
||||
struct amdgpu_irq_src ras_poison_irq;
|
||||
struct amdgpu_jpeg_reg external;
|
||||
};
|
||||
|
||||
@ -72,6 +73,8 @@ int amdgpu_jpeg_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout);
|
||||
int amdgpu_jpeg_process_poison_irq(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry);
|
||||
int amdgpu_jpeg_ras_late_init(struct amdgpu_device *adev,
|
||||
struct ras_common_if *ras_block);
|
||||
int amdgpu_jpeg_ras_sw_init(struct amdgpu_device *adev);
|
||||
|
||||
#endif /*__AMDGPU_JPEG_H__*/
|
||||
|
@ -1181,6 +1181,31 @@ int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block)
|
||||
{
|
||||
int r, i;
|
||||
|
||||
r = amdgpu_ras_block_late_init(adev, ras_block);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
if (amdgpu_ras_is_supported(adev, ras_block->block)) {
|
||||
for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
|
||||
if (adev->vcn.harvest_config & (1 << i))
|
||||
continue;
|
||||
|
||||
r = amdgpu_irq_get(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
|
||||
if (r)
|
||||
goto late_fini;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
||||
late_fini:
|
||||
amdgpu_ras_block_late_fini(adev, ras_block);
|
||||
return r;
|
||||
}
|
||||
|
||||
int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev)
|
||||
{
|
||||
int err;
|
||||
@ -1202,7 +1227,7 @@ int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev)
|
||||
adev->vcn.ras_if = &ras->ras_block.ras_comm;
|
||||
|
||||
if (!ras->ras_block.ras_late_init)
|
||||
ras->ras_block.ras_late_init = amdgpu_ras_block_late_init;
|
||||
ras->ras_block.ras_late_init = amdgpu_vcn_ras_late_init;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -234,6 +234,7 @@ struct amdgpu_vcn_inst {
|
||||
struct amdgpu_ring ring_enc[AMDGPU_VCN_MAX_ENC_RINGS];
|
||||
atomic_t sched_score;
|
||||
struct amdgpu_irq_src irq;
|
||||
struct amdgpu_irq_src ras_poison_irq;
|
||||
struct amdgpu_vcn_reg external;
|
||||
struct amdgpu_bo *dpg_sram_bo;
|
||||
struct dpg_pause_state pause_state;
|
||||
@ -400,6 +401,8 @@ void amdgpu_debugfs_vcn_fwlog_init(struct amdgpu_device *adev,
|
||||
int amdgpu_vcn_process_poison_irq(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry);
|
||||
int amdgpu_vcn_ras_late_init(struct amdgpu_device *adev,
|
||||
struct ras_common_if *ras_block);
|
||||
int amdgpu_vcn_ras_sw_init(struct amdgpu_device *adev);
|
||||
|
||||
#endif
|
||||
|
@ -102,13 +102,13 @@ static int jpeg_v2_5_sw_init(void *handle)
|
||||
|
||||
/* JPEG DJPEG POISON EVENT */
|
||||
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i],
|
||||
VCN_2_6__SRCID_DJPEG0_POISON, &adev->jpeg.inst[i].irq);
|
||||
VCN_2_6__SRCID_DJPEG0_POISON, &adev->jpeg.inst[i].ras_poison_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* JPEG EJPEG POISON EVENT */
|
||||
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_jpeg[i],
|
||||
VCN_2_6__SRCID_EJPEG0_POISON, &adev->jpeg.inst[i].irq);
|
||||
VCN_2_6__SRCID_EJPEG0_POISON, &adev->jpeg.inst[i].ras_poison_irq);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
@ -221,6 +221,9 @@ static int jpeg_v2_5_hw_fini(void *handle)
|
||||
if (adev->jpeg.cur_state != AMD_PG_STATE_GATE &&
|
||||
RREG32_SOC15(JPEG, i, mmUVD_JRBC_STATUS))
|
||||
jpeg_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
|
||||
|
||||
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG))
|
||||
amdgpu_irq_put(adev, &adev->jpeg.inst[i].ras_poison_irq, 0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -569,6 +572,14 @@ static int jpeg_v2_5_set_interrupt_state(struct amdgpu_device *adev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int jpeg_v2_6_set_ras_interrupt_state(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
unsigned int type,
|
||||
enum amdgpu_interrupt_state state)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
@ -593,10 +604,6 @@ static int jpeg_v2_5_process_interrupt(struct amdgpu_device *adev,
|
||||
case VCN_2_0__SRCID__JPEG_DECODE:
|
||||
amdgpu_fence_process(&adev->jpeg.inst[ip_instance].ring_dec);
|
||||
break;
|
||||
case VCN_2_6__SRCID_DJPEG0_POISON:
|
||||
case VCN_2_6__SRCID_EJPEG0_POISON:
|
||||
amdgpu_jpeg_process_poison_irq(adev, source, entry);
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unhandled interrupt: %d %d\n",
|
||||
entry->src_id, entry->src_data[0]);
|
||||
@ -725,6 +732,11 @@ static const struct amdgpu_irq_src_funcs jpeg_v2_5_irq_funcs = {
|
||||
.process = jpeg_v2_5_process_interrupt,
|
||||
};
|
||||
|
||||
static const struct amdgpu_irq_src_funcs jpeg_v2_6_ras_irq_funcs = {
|
||||
.set = jpeg_v2_6_set_ras_interrupt_state,
|
||||
.process = amdgpu_jpeg_process_poison_irq,
|
||||
};
|
||||
|
||||
static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
@ -735,6 +747,9 @@ static void jpeg_v2_5_set_irq_funcs(struct amdgpu_device *adev)
|
||||
|
||||
adev->jpeg.inst[i].irq.num_types = 1;
|
||||
adev->jpeg.inst[i].irq.funcs = &jpeg_v2_5_irq_funcs;
|
||||
|
||||
adev->jpeg.inst[i].ras_poison_irq.num_types = 1;
|
||||
adev->jpeg.inst[i].ras_poison_irq.funcs = &jpeg_v2_6_ras_irq_funcs;
|
||||
}
|
||||
}
|
||||
|
||||
@ -800,6 +815,7 @@ const struct amdgpu_ras_block_hw_ops jpeg_v2_6_ras_hw_ops = {
|
||||
static struct amdgpu_jpeg_ras jpeg_v2_6_ras = {
|
||||
.ras_block = {
|
||||
.hw_ops = &jpeg_v2_6_ras_hw_ops,
|
||||
.ras_late_init = amdgpu_jpeg_ras_late_init,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -87,13 +87,13 @@ static int jpeg_v4_0_sw_init(void *handle)
|
||||
|
||||
/* JPEG DJPEG POISON EVENT */
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
|
||||
VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->irq);
|
||||
VCN_4_0__SRCID_DJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
/* JPEG EJPEG POISON EVENT */
|
||||
r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_VCN,
|
||||
VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->irq);
|
||||
VCN_4_0__SRCID_EJPEG0_POISON, &adev->jpeg.inst->ras_poison_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@ -202,7 +202,8 @@ static int jpeg_v4_0_hw_fini(void *handle)
|
||||
RREG32_SOC15(JPEG, 0, regUVD_JRBC_STATUS))
|
||||
jpeg_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
|
||||
}
|
||||
amdgpu_irq_put(adev, &adev->jpeg.inst->irq, 0);
|
||||
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__JPEG))
|
||||
amdgpu_irq_put(adev, &adev->jpeg.inst->ras_poison_irq, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -670,6 +671,14 @@ static int jpeg_v4_0_set_interrupt_state(struct amdgpu_device *adev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int jpeg_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
unsigned int type,
|
||||
enum amdgpu_interrupt_state state)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
@ -680,10 +689,6 @@ static int jpeg_v4_0_process_interrupt(struct amdgpu_device *adev,
|
||||
case VCN_4_0__SRCID__JPEG_DECODE:
|
||||
amdgpu_fence_process(&adev->jpeg.inst->ring_dec);
|
||||
break;
|
||||
case VCN_4_0__SRCID_DJPEG0_POISON:
|
||||
case VCN_4_0__SRCID_EJPEG0_POISON:
|
||||
amdgpu_jpeg_process_poison_irq(adev, source, entry);
|
||||
break;
|
||||
default:
|
||||
DRM_DEV_ERROR(adev->dev, "Unhandled interrupt: %d %d\n",
|
||||
entry->src_id, entry->src_data[0]);
|
||||
@ -753,10 +758,18 @@ static const struct amdgpu_irq_src_funcs jpeg_v4_0_irq_funcs = {
|
||||
.process = jpeg_v4_0_process_interrupt,
|
||||
};
|
||||
|
||||
static const struct amdgpu_irq_src_funcs jpeg_v4_0_ras_irq_funcs = {
|
||||
.set = jpeg_v4_0_set_ras_interrupt_state,
|
||||
.process = amdgpu_jpeg_process_poison_irq,
|
||||
};
|
||||
|
||||
static void jpeg_v4_0_set_irq_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
adev->jpeg.inst->irq.num_types = 1;
|
||||
adev->jpeg.inst->irq.funcs = &jpeg_v4_0_irq_funcs;
|
||||
|
||||
adev->jpeg.inst->ras_poison_irq.num_types = 1;
|
||||
adev->jpeg.inst->ras_poison_irq.funcs = &jpeg_v4_0_ras_irq_funcs;
|
||||
}
|
||||
|
||||
const struct amdgpu_ip_block_version jpeg_v4_0_ip_block = {
|
||||
@ -811,6 +824,7 @@ const struct amdgpu_ras_block_hw_ops jpeg_v4_0_ras_hw_ops = {
|
||||
static struct amdgpu_jpeg_ras jpeg_v4_0_ras = {
|
||||
.ras_block = {
|
||||
.hw_ops = &jpeg_v4_0_ras_hw_ops,
|
||||
.ras_late_init = amdgpu_jpeg_ras_late_init,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -143,7 +143,7 @@ static int vcn_v2_5_sw_init(void *handle)
|
||||
|
||||
/* VCN POISON TRAP */
|
||||
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[j],
|
||||
VCN_2_6__SRCID_UVD_POISON, &adev->vcn.inst[j].irq);
|
||||
VCN_2_6__SRCID_UVD_POISON, &adev->vcn.inst[j].ras_poison_irq);
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
@ -354,6 +354,9 @@ static int vcn_v2_5_hw_fini(void *handle)
|
||||
(adev->vcn.cur_state != AMD_PG_STATE_GATE &&
|
||||
RREG32_SOC15(VCN, i, mmUVD_STATUS)))
|
||||
vcn_v2_5_set_powergating_state(adev, AMD_PG_STATE_GATE);
|
||||
|
||||
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
|
||||
amdgpu_irq_put(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -1807,6 +1810,14 @@ static int vcn_v2_5_set_interrupt_state(struct amdgpu_device *adev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vcn_v2_6_set_ras_interrupt_state(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
unsigned int type,
|
||||
enum amdgpu_interrupt_state state)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
struct amdgpu_iv_entry *entry)
|
||||
@ -1837,9 +1848,6 @@ static int vcn_v2_5_process_interrupt(struct amdgpu_device *adev,
|
||||
case VCN_2_0__SRCID__UVD_ENC_LOW_LATENCY:
|
||||
amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[1]);
|
||||
break;
|
||||
case VCN_2_6__SRCID_UVD_POISON:
|
||||
amdgpu_vcn_process_poison_irq(adev, source, entry);
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unhandled interrupt: %d %d\n",
|
||||
entry->src_id, entry->src_data[0]);
|
||||
@ -1854,6 +1862,11 @@ static const struct amdgpu_irq_src_funcs vcn_v2_5_irq_funcs = {
|
||||
.process = vcn_v2_5_process_interrupt,
|
||||
};
|
||||
|
||||
static const struct amdgpu_irq_src_funcs vcn_v2_6_ras_irq_funcs = {
|
||||
.set = vcn_v2_6_set_ras_interrupt_state,
|
||||
.process = amdgpu_vcn_process_poison_irq,
|
||||
};
|
||||
|
||||
static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
|
||||
{
|
||||
int i;
|
||||
@ -1863,6 +1876,9 @@ static void vcn_v2_5_set_irq_funcs(struct amdgpu_device *adev)
|
||||
continue;
|
||||
adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
|
||||
adev->vcn.inst[i].irq.funcs = &vcn_v2_5_irq_funcs;
|
||||
|
||||
adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.num_enc_rings + 1;
|
||||
adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v2_6_ras_irq_funcs;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1965,6 +1981,7 @@ const struct amdgpu_ras_block_hw_ops vcn_v2_6_ras_hw_ops = {
|
||||
static struct amdgpu_vcn_ras vcn_v2_6_ras = {
|
||||
.ras_block = {
|
||||
.hw_ops = &vcn_v2_6_ras_hw_ops,
|
||||
.ras_late_init = amdgpu_vcn_ras_late_init,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -139,7 +139,7 @@ static int vcn_v4_0_sw_init(void *handle)
|
||||
|
||||
/* VCN POISON TRAP */
|
||||
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_vcns[i],
|
||||
VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].irq);
|
||||
VCN_4_0__SRCID_UVD_POISON, &adev->vcn.inst[i].ras_poison_irq);
|
||||
if (r)
|
||||
return r;
|
||||
|
||||
@ -305,8 +305,8 @@ static int vcn_v4_0_hw_fini(void *handle)
|
||||
vcn_v4_0_set_powergating_state(adev, AMD_PG_STATE_GATE);
|
||||
}
|
||||
}
|
||||
|
||||
amdgpu_irq_put(adev, &adev->vcn.inst[i].irq, 0);
|
||||
if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__VCN))
|
||||
amdgpu_irq_put(adev, &adev->vcn.inst[i].ras_poison_irq, 0);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -1975,6 +1975,24 @@ static int vcn_v4_0_set_interrupt_state(struct amdgpu_device *adev, struct amdgp
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* vcn_v4_0_set_ras_interrupt_state - set VCN block RAS interrupt state
|
||||
*
|
||||
* @adev: amdgpu_device pointer
|
||||
* @source: interrupt sources
|
||||
* @type: interrupt types
|
||||
* @state: interrupt states
|
||||
*
|
||||
* Set VCN block RAS interrupt state
|
||||
*/
|
||||
static int vcn_v4_0_set_ras_interrupt_state(struct amdgpu_device *adev,
|
||||
struct amdgpu_irq_src *source,
|
||||
unsigned int type,
|
||||
enum amdgpu_interrupt_state state)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* vcn_v4_0_process_interrupt - process VCN block interrupt
|
||||
*
|
||||
@ -2007,9 +2025,6 @@ static int vcn_v4_0_process_interrupt(struct amdgpu_device *adev, struct amdgpu_
|
||||
case VCN_4_0__SRCID__UVD_ENC_GENERAL_PURPOSE:
|
||||
amdgpu_fence_process(&adev->vcn.inst[ip_instance].ring_enc[0]);
|
||||
break;
|
||||
case VCN_4_0__SRCID_UVD_POISON:
|
||||
amdgpu_vcn_process_poison_irq(adev, source, entry);
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unhandled interrupt: %d %d\n",
|
||||
entry->src_id, entry->src_data[0]);
|
||||
@ -2024,6 +2039,11 @@ static const struct amdgpu_irq_src_funcs vcn_v4_0_irq_funcs = {
|
||||
.process = vcn_v4_0_process_interrupt,
|
||||
};
|
||||
|
||||
static const struct amdgpu_irq_src_funcs vcn_v4_0_ras_irq_funcs = {
|
||||
.set = vcn_v4_0_set_ras_interrupt_state,
|
||||
.process = amdgpu_vcn_process_poison_irq,
|
||||
};
|
||||
|
||||
/**
|
||||
* vcn_v4_0_set_irq_funcs - set VCN block interrupt irq functions
|
||||
*
|
||||
@ -2041,6 +2061,9 @@ static void vcn_v4_0_set_irq_funcs(struct amdgpu_device *adev)
|
||||
|
||||
adev->vcn.inst[i].irq.num_types = adev->vcn.num_enc_rings + 1;
|
||||
adev->vcn.inst[i].irq.funcs = &vcn_v4_0_irq_funcs;
|
||||
|
||||
adev->vcn.inst[i].ras_poison_irq.num_types = adev->vcn.num_enc_rings + 1;
|
||||
adev->vcn.inst[i].ras_poison_irq.funcs = &vcn_v4_0_ras_irq_funcs;
|
||||
}
|
||||
}
|
||||
|
||||
@ -2114,6 +2137,7 @@ const struct amdgpu_ras_block_hw_ops vcn_v4_0_ras_hw_ops = {
|
||||
static struct amdgpu_vcn_ras vcn_v4_0_ras = {
|
||||
.ras_block = {
|
||||
.hw_ops = &vcn_v4_0_ras_hw_ops,
|
||||
.ras_late_init = amdgpu_vcn_ras_late_init,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -2113,15 +2113,6 @@ void dcn20_optimize_bandwidth(
|
||||
if (hubbub->funcs->program_compbuf_size)
|
||||
hubbub->funcs->program_compbuf_size(hubbub, context->bw_ctx.bw.dcn.compbuf_size_kb, true);
|
||||
|
||||
if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) {
|
||||
dc_dmub_srv_p_state_delegate(dc,
|
||||
true, context);
|
||||
context->bw_ctx.bw.dcn.clk.p_state_change_support = true;
|
||||
dc->clk_mgr->clks.fw_based_mclk_switching = true;
|
||||
} else {
|
||||
dc->clk_mgr->clks.fw_based_mclk_switching = false;
|
||||
}
|
||||
|
||||
dc->clk_mgr->funcs->update_clocks(
|
||||
dc->clk_mgr,
|
||||
context,
|
||||
|
@ -983,36 +983,13 @@ void dcn30_set_disp_pattern_generator(const struct dc *dc,
|
||||
}
|
||||
|
||||
void dcn30_prepare_bandwidth(struct dc *dc,
|
||||
struct dc_state *context)
|
||||
struct dc_state *context)
|
||||
{
|
||||
bool p_state_change_support = context->bw_ctx.bw.dcn.clk.p_state_change_support;
|
||||
/* Any transition into an FPO config should disable MCLK switching first to avoid
|
||||
* driver and FW P-State synchronization issues.
|
||||
*/
|
||||
if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) {
|
||||
dc->optimized_required = true;
|
||||
context->bw_ctx.bw.dcn.clk.p_state_change_support = false;
|
||||
}
|
||||
|
||||
if (dc->clk_mgr->dc_mode_softmax_enabled)
|
||||
if (dc->clk_mgr->clks.dramclk_khz <= dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000 &&
|
||||
context->bw_ctx.bw.dcn.clk.dramclk_khz > dc->clk_mgr->bw_params->dc_mode_softmax_memclk * 1000)
|
||||
dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
|
||||
|
||||
dcn20_prepare_bandwidth(dc, context);
|
||||
/*
|
||||
* enabled -> enabled: do not disable
|
||||
* enabled -> disabled: disable
|
||||
* disabled -> enabled: don't care
|
||||
* disabled -> disabled: don't care
|
||||
*/
|
||||
if (!context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching)
|
||||
dc_dmub_srv_p_state_delegate(dc, false, context);
|
||||
|
||||
if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching || dc->clk_mgr->clks.fw_based_mclk_switching) {
|
||||
/* After disabling P-State, restore the original value to ensure we get the correct P-State
|
||||
* on the next optimize. */
|
||||
context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -6925,23 +6925,6 @@ static int si_dpm_enable(struct amdgpu_device *adev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int si_set_temperature_range(struct amdgpu_device *adev)
|
||||
{
|
||||
int ret;
|
||||
|
||||
ret = si_thermal_enable_alert(adev, false);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
|
||||
if (ret)
|
||||
return ret;
|
||||
ret = si_thermal_enable_alert(adev, true);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void si_dpm_disable(struct amdgpu_device *adev)
|
||||
{
|
||||
struct rv7xx_power_info *pi = rv770_get_pi(adev);
|
||||
@ -7626,18 +7609,6 @@ static int si_dpm_process_interrupt(struct amdgpu_device *adev,
|
||||
|
||||
static int si_dpm_late_init(void *handle)
|
||||
{
|
||||
int ret;
|
||||
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return 0;
|
||||
|
||||
ret = si_set_temperature_range(adev);
|
||||
if (ret)
|
||||
return ret;
|
||||
#if 0 //TODO ?
|
||||
si_dpm_powergate_uvd(adev, true);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -582,7 +582,7 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
|
||||
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
|
||||
SmuMetrics_legacy_t metrics;
|
||||
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
|
||||
int i, size = 0, ret = 0;
|
||||
int i, idx, size = 0, ret = 0;
|
||||
uint32_t cur_value = 0, value = 0, count = 0;
|
||||
bool cur_value_match_level = false;
|
||||
|
||||
@ -656,7 +656,8 @@ static int vangogh_print_legacy_clk_levels(struct smu_context *smu,
|
||||
case SMU_MCLK:
|
||||
case SMU_FCLK:
|
||||
for (i = 0; i < count; i++) {
|
||||
ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value);
|
||||
idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
|
||||
ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!value)
|
||||
@ -683,7 +684,7 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
|
||||
DpmClocks_t *clk_table = smu->smu_table.clocks_table;
|
||||
SmuMetrics_t metrics;
|
||||
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
|
||||
int i, size = 0, ret = 0;
|
||||
int i, idx, size = 0, ret = 0;
|
||||
uint32_t cur_value = 0, value = 0, count = 0;
|
||||
bool cur_value_match_level = false;
|
||||
uint32_t min, max;
|
||||
@ -765,7 +766,8 @@ static int vangogh_print_clk_levels(struct smu_context *smu,
|
||||
case SMU_MCLK:
|
||||
case SMU_FCLK:
|
||||
for (i = 0; i < count; i++) {
|
||||
ret = vangogh_get_dpm_clk_limited(smu, clk_type, i, &value);
|
||||
idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
|
||||
ret = vangogh_get_dpm_clk_limited(smu, clk_type, idx, &value);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!value)
|
||||
|
@ -494,7 +494,7 @@ static int renoir_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
|
||||
static int renoir_print_clk_levels(struct smu_context *smu,
|
||||
enum smu_clk_type clk_type, char *buf)
|
||||
{
|
||||
int i, size = 0, ret = 0;
|
||||
int i, idx, size = 0, ret = 0;
|
||||
uint32_t cur_value = 0, value = 0, count = 0, min = 0, max = 0;
|
||||
SmuMetrics_t metrics;
|
||||
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
|
||||
@ -594,7 +594,8 @@ static int renoir_print_clk_levels(struct smu_context *smu,
|
||||
case SMU_VCLK:
|
||||
case SMU_DCLK:
|
||||
for (i = 0; i < count; i++) {
|
||||
ret = renoir_get_dpm_clk_limited(smu, clk_type, i, &value);
|
||||
idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
|
||||
ret = renoir_get_dpm_clk_limited(smu, clk_type, idx, &value);
|
||||
if (ret)
|
||||
return ret;
|
||||
if (!value)
|
||||
|
@ -478,7 +478,7 @@ static int smu_v13_0_4_get_dpm_level_count(struct smu_context *smu,
|
||||
static int smu_v13_0_4_print_clk_levels(struct smu_context *smu,
|
||||
enum smu_clk_type clk_type, char *buf)
|
||||
{
|
||||
int i, size = 0, ret = 0;
|
||||
int i, idx, size = 0, ret = 0;
|
||||
uint32_t cur_value = 0, value = 0, count = 0;
|
||||
uint32_t min, max;
|
||||
|
||||
@ -512,7 +512,8 @@ static int smu_v13_0_4_print_clk_levels(struct smu_context *smu,
|
||||
break;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, i, &value);
|
||||
idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
|
||||
ret = smu_v13_0_4_get_dpm_freq_by_index(smu, clk_type, idx, &value);
|
||||
if (ret)
|
||||
break;
|
||||
|
||||
|
@ -866,7 +866,7 @@ out:
|
||||
static int smu_v13_0_5_print_clk_levels(struct smu_context *smu,
|
||||
enum smu_clk_type clk_type, char *buf)
|
||||
{
|
||||
int i, size = 0, ret = 0;
|
||||
int i, idx, size = 0, ret = 0;
|
||||
uint32_t cur_value = 0, value = 0, count = 0;
|
||||
uint32_t min = 0, max = 0;
|
||||
|
||||
@ -898,7 +898,8 @@ static int smu_v13_0_5_print_clk_levels(struct smu_context *smu,
|
||||
goto print_clk_out;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
ret = smu_v13_0_5_get_dpm_freq_by_index(smu, clk_type, i, &value);
|
||||
idx = (clk_type == SMU_MCLK) ? (count - i - 1) : i;
|
||||
ret = smu_v13_0_5_get_dpm_freq_by_index(smu, clk_type, idx, &value);
|
||||
if (ret)
|
||||
goto print_clk_out;
|
||||
|
||||
|
@ -1000,7 +1000,7 @@ out:
|
||||
static int yellow_carp_print_clk_levels(struct smu_context *smu,
|
||||
enum smu_clk_type clk_type, char *buf)
|
||||
{
|
||||
int i, size = 0, ret = 0;
|
||||
int i, idx, size = 0, ret = 0;
|
||||
uint32_t cur_value = 0, value = 0, count = 0;
|
||||
uint32_t min, max;
|
||||
|
||||
@ -1033,7 +1033,8 @@ static int yellow_carp_print_clk_levels(struct smu_context *smu,
|
||||
goto print_clk_out;
|
||||
|
||||
for (i = 0; i < count; i++) {
|
||||
ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, i, &value);
|
||||
idx = (clk_type == SMU_FCLK || clk_type == SMU_MCLK) ? (count - i - 1) : i;
|
||||
ret = yellow_carp_get_dpm_freq_by_index(smu, clk_type, idx, &value);
|
||||
if (ret)
|
||||
goto print_clk_out;
|
||||
|
||||
|
@ -877,12 +877,17 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
|
||||
stream->oa_buffer.last_ctx_id = ctx_id;
|
||||
}
|
||||
|
||||
/*
|
||||
* Clear out the report id and timestamp as a means to detect unlanded
|
||||
* reports.
|
||||
*/
|
||||
oa_report_id_clear(stream, report32);
|
||||
oa_timestamp_clear(stream, report32);
|
||||
if (is_power_of_2(report_size)) {
|
||||
/*
|
||||
* Clear out the report id and timestamp as a means
|
||||
* to detect unlanded reports.
|
||||
*/
|
||||
oa_report_id_clear(stream, report32);
|
||||
oa_timestamp_clear(stream, report32);
|
||||
} else {
|
||||
/* Zero out the entire report */
|
||||
memset(report32, 0, report_size);
|
||||
}
|
||||
}
|
||||
|
||||
if (start_offset != *offset) {
|
||||
|
@ -286,7 +286,7 @@ static int hidpp_send_message_sync(struct hidpp_device *hidpp,
|
||||
struct hidpp_report *message,
|
||||
struct hidpp_report *response)
|
||||
{
|
||||
int ret;
|
||||
int ret = -1;
|
||||
int max_retries = 3;
|
||||
|
||||
mutex_lock(&hidpp->send_mutex);
|
||||
@ -300,13 +300,13 @@ static int hidpp_send_message_sync(struct hidpp_device *hidpp,
|
||||
*/
|
||||
*response = *message;
|
||||
|
||||
for (; max_retries != 0; max_retries--) {
|
||||
for (; max_retries != 0 && ret; max_retries--) {
|
||||
ret = __hidpp_send_report(hidpp->hid_dev, message);
|
||||
|
||||
if (ret) {
|
||||
dbg_hid("__hidpp_send_report returned err: %d\n", ret);
|
||||
memset(response, 0, sizeof(struct hidpp_report));
|
||||
goto exit;
|
||||
break;
|
||||
}
|
||||
|
||||
if (!wait_event_timeout(hidpp->wait, hidpp->answer_available,
|
||||
@ -314,14 +314,14 @@ static int hidpp_send_message_sync(struct hidpp_device *hidpp,
|
||||
dbg_hid("%s:timeout waiting for response\n", __func__);
|
||||
memset(response, 0, sizeof(struct hidpp_report));
|
||||
ret = -ETIMEDOUT;
|
||||
goto exit;
|
||||
break;
|
||||
}
|
||||
|
||||
if (response->report_id == REPORT_ID_HIDPP_SHORT &&
|
||||
response->rap.sub_id == HIDPP_ERROR) {
|
||||
ret = response->rap.params[1];
|
||||
dbg_hid("%s:got hidpp error %02X\n", __func__, ret);
|
||||
goto exit;
|
||||
break;
|
||||
}
|
||||
|
||||
if ((response->report_id == REPORT_ID_HIDPP_LONG ||
|
||||
@ -330,13 +330,12 @@ static int hidpp_send_message_sync(struct hidpp_device *hidpp,
|
||||
ret = response->fap.params[1];
|
||||
if (ret != HIDPP20_ERROR_BUSY) {
|
||||
dbg_hid("%s:got hidpp 2.0 error %02X\n", __func__, ret);
|
||||
goto exit;
|
||||
break;
|
||||
}
|
||||
dbg_hid("%s:got busy hidpp 2.0 error %02X, retrying\n", __func__, ret);
|
||||
}
|
||||
}
|
||||
|
||||
exit:
|
||||
mutex_unlock(&hidpp->send_mutex);
|
||||
return ret;
|
||||
|
||||
|
@ -1048,7 +1048,7 @@ int kx022a_probe_internal(struct device *dev)
|
||||
data->ien_reg = KX022A_REG_INC4;
|
||||
} else {
|
||||
irq = fwnode_irq_get_byname(fwnode, "INT2");
|
||||
if (irq <= 0)
|
||||
if (irq < 0)
|
||||
return dev_err_probe(dev, irq, "No suitable IRQ\n");
|
||||
|
||||
data->inc_reg = KX022A_REG_INC5;
|
||||
|
@ -1291,12 +1291,12 @@ static int apply_acpi_orientation(struct iio_dev *indio_dev)
|
||||
|
||||
adev = ACPI_COMPANION(indio_dev->dev.parent);
|
||||
if (!adev)
|
||||
return 0;
|
||||
return -ENXIO;
|
||||
|
||||
/* Read _ONT data, which should be a package of 6 integers. */
|
||||
status = acpi_evaluate_object(adev->handle, "_ONT", NULL, &buffer);
|
||||
if (status == AE_NOT_FOUND) {
|
||||
return 0;
|
||||
return -ENXIO;
|
||||
} else if (ACPI_FAILURE(status)) {
|
||||
dev_warn(&indio_dev->dev, "failed to execute _ONT: %d\n",
|
||||
status);
|
||||
|
@ -1817,6 +1817,11 @@ static const struct clk_ops ad4130_int_clk_ops = {
|
||||
.unprepare = ad4130_int_clk_unprepare,
|
||||
};
|
||||
|
||||
static void ad4130_clk_del_provider(void *of_node)
|
||||
{
|
||||
of_clk_del_provider(of_node);
|
||||
}
|
||||
|
||||
static int ad4130_setup_int_clk(struct ad4130_state *st)
|
||||
{
|
||||
struct device *dev = &st->spi->dev;
|
||||
@ -1824,6 +1829,7 @@ static int ad4130_setup_int_clk(struct ad4130_state *st)
|
||||
struct clk_init_data init;
|
||||
const char *clk_name;
|
||||
struct clk *clk;
|
||||
int ret;
|
||||
|
||||
if (st->int_pin_sel == AD4130_INT_PIN_CLK ||
|
||||
st->mclk_sel != AD4130_MCLK_76_8KHZ)
|
||||
@ -1843,7 +1849,11 @@ static int ad4130_setup_int_clk(struct ad4130_state *st)
|
||||
if (IS_ERR(clk))
|
||||
return PTR_ERR(clk);
|
||||
|
||||
return of_clk_add_provider(of_node, of_clk_src_simple_get, clk);
|
||||
ret = of_clk_add_provider(of_node, of_clk_src_simple_get, clk);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return devm_add_action_or_reset(dev, ad4130_clk_del_provider, of_node);
|
||||
}
|
||||
|
||||
static int ad4130_setup(struct iio_dev *indio_dev)
|
||||
|
@ -897,10 +897,6 @@ static const struct iio_info ad7195_info = {
|
||||
__AD719x_CHANNEL(_si, _channel1, -1, _address, NULL, IIO_VOLTAGE, \
|
||||
BIT(IIO_CHAN_INFO_SCALE), ad7192_calibsys_ext_info)
|
||||
|
||||
#define AD719x_SHORTED_CHANNEL(_si, _channel1, _address) \
|
||||
__AD719x_CHANNEL(_si, _channel1, -1, _address, "shorted", IIO_VOLTAGE, \
|
||||
BIT(IIO_CHAN_INFO_SCALE), ad7192_calibsys_ext_info)
|
||||
|
||||
#define AD719x_TEMP_CHANNEL(_si, _address) \
|
||||
__AD719x_CHANNEL(_si, 0, -1, _address, NULL, IIO_TEMP, 0, NULL)
|
||||
|
||||
@ -908,7 +904,7 @@ static const struct iio_chan_spec ad7192_channels[] = {
|
||||
AD719x_DIFF_CHANNEL(0, 1, 2, AD7192_CH_AIN1P_AIN2M),
|
||||
AD719x_DIFF_CHANNEL(1, 3, 4, AD7192_CH_AIN3P_AIN4M),
|
||||
AD719x_TEMP_CHANNEL(2, AD7192_CH_TEMP),
|
||||
AD719x_SHORTED_CHANNEL(3, 2, AD7192_CH_AIN2P_AIN2M),
|
||||
AD719x_DIFF_CHANNEL(3, 2, 2, AD7192_CH_AIN2P_AIN2M),
|
||||
AD719x_CHANNEL(4, 1, AD7192_CH_AIN1),
|
||||
AD719x_CHANNEL(5, 2, AD7192_CH_AIN2),
|
||||
AD719x_CHANNEL(6, 3, AD7192_CH_AIN3),
|
||||
@ -922,7 +918,7 @@ static const struct iio_chan_spec ad7193_channels[] = {
|
||||
AD719x_DIFF_CHANNEL(2, 5, 6, AD7193_CH_AIN5P_AIN6M),
|
||||
AD719x_DIFF_CHANNEL(3, 7, 8, AD7193_CH_AIN7P_AIN8M),
|
||||
AD719x_TEMP_CHANNEL(4, AD7193_CH_TEMP),
|
||||
AD719x_SHORTED_CHANNEL(5, 2, AD7193_CH_AIN2P_AIN2M),
|
||||
AD719x_DIFF_CHANNEL(5, 2, 2, AD7193_CH_AIN2P_AIN2M),
|
||||
AD719x_CHANNEL(6, 1, AD7193_CH_AIN1),
|
||||
AD719x_CHANNEL(7, 2, AD7193_CH_AIN2),
|
||||
AD719x_CHANNEL(8, 3, AD7193_CH_AIN3),
|
||||
|
@ -584,6 +584,10 @@ static int devm_ad_sd_probe_trigger(struct device *dev, struct iio_dev *indio_de
|
||||
init_completion(&sigma_delta->completion);
|
||||
|
||||
sigma_delta->irq_dis = true;
|
||||
|
||||
/* the IRQ core clears IRQ_DISABLE_UNLAZY flag when freeing an IRQ */
|
||||
irq_set_status_flags(sigma_delta->spi->irq, IRQ_DISABLE_UNLAZY);
|
||||
|
||||
ret = devm_request_irq(dev, sigma_delta->spi->irq,
|
||||
ad_sd_data_rdy_trig_poll,
|
||||
sigma_delta->info->irq_flags | IRQF_NO_AUTOEN,
|
||||
|
@ -236,8 +236,7 @@ static int imx93_adc_read_raw(struct iio_dev *indio_dev,
|
||||
{
|
||||
struct imx93_adc *adc = iio_priv(indio_dev);
|
||||
struct device *dev = adc->dev;
|
||||
long ret;
|
||||
u32 vref_uv;
|
||||
int ret;
|
||||
|
||||
switch (mask) {
|
||||
case IIO_CHAN_INFO_RAW:
|
||||
@ -253,10 +252,10 @@ static int imx93_adc_read_raw(struct iio_dev *indio_dev,
|
||||
return IIO_VAL_INT;
|
||||
|
||||
case IIO_CHAN_INFO_SCALE:
|
||||
ret = vref_uv = regulator_get_voltage(adc->vref);
|
||||
ret = regulator_get_voltage(adc->vref);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
*val = vref_uv / 1000;
|
||||
*val = ret / 1000;
|
||||
*val2 = 12;
|
||||
return IIO_VAL_FRACTIONAL_LOG2;
|
||||
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
#include <dt-bindings/iio/adc/mediatek,mt6370_adc.h>
|
||||
|
||||
#define MT6370_REG_DEV_INFO 0x100
|
||||
#define MT6370_REG_CHG_CTRL3 0x113
|
||||
#define MT6370_REG_CHG_CTRL7 0x117
|
||||
#define MT6370_REG_CHG_ADC 0x121
|
||||
@ -27,6 +28,7 @@
|
||||
#define MT6370_ADC_START_MASK BIT(0)
|
||||
#define MT6370_ADC_IN_SEL_MASK GENMASK(7, 4)
|
||||
#define MT6370_AICR_ICHG_MASK GENMASK(7, 2)
|
||||
#define MT6370_VENID_MASK GENMASK(7, 4)
|
||||
|
||||
#define MT6370_AICR_100_mA 0x0
|
||||
#define MT6370_AICR_150_mA 0x1
|
||||
@ -47,6 +49,10 @@
|
||||
#define ADC_CONV_TIME_MS 35
|
||||
#define ADC_CONV_POLLING_TIME_US 1000
|
||||
|
||||
#define MT6370_VID_RT5081 0x8
|
||||
#define MT6370_VID_RT5081A 0xA
|
||||
#define MT6370_VID_MT6370 0xE
|
||||
|
||||
struct mt6370_adc_data {
|
||||
struct device *dev;
|
||||
struct regmap *regmap;
|
||||
@ -55,6 +61,7 @@ struct mt6370_adc_data {
|
||||
* from being read at the same time.
|
||||
*/
|
||||
struct mutex adc_lock;
|
||||
unsigned int vid;
|
||||
};
|
||||
|
||||
static int mt6370_adc_read_channel(struct mt6370_adc_data *priv, int chan,
|
||||
@ -98,6 +105,30 @@ adc_unlock:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int mt6370_adc_get_ibus_scale(struct mt6370_adc_data *priv)
|
||||
{
|
||||
switch (priv->vid) {
|
||||
case MT6370_VID_RT5081:
|
||||
case MT6370_VID_RT5081A:
|
||||
case MT6370_VID_MT6370:
|
||||
return 3350;
|
||||
default:
|
||||
return 3875;
|
||||
}
|
||||
}
|
||||
|
||||
static int mt6370_adc_get_ibat_scale(struct mt6370_adc_data *priv)
|
||||
{
|
||||
switch (priv->vid) {
|
||||
case MT6370_VID_RT5081:
|
||||
case MT6370_VID_RT5081A:
|
||||
case MT6370_VID_MT6370:
|
||||
return 2680;
|
||||
default:
|
||||
return 3870;
|
||||
}
|
||||
}
|
||||
|
||||
static int mt6370_adc_read_scale(struct mt6370_adc_data *priv,
|
||||
int chan, int *val1, int *val2)
|
||||
{
|
||||
@ -123,7 +154,7 @@ static int mt6370_adc_read_scale(struct mt6370_adc_data *priv,
|
||||
case MT6370_AICR_250_mA:
|
||||
case MT6370_AICR_300_mA:
|
||||
case MT6370_AICR_350_mA:
|
||||
*val1 = 3350;
|
||||
*val1 = mt6370_adc_get_ibus_scale(priv);
|
||||
break;
|
||||
default:
|
||||
*val1 = 5000;
|
||||
@ -150,7 +181,7 @@ static int mt6370_adc_read_scale(struct mt6370_adc_data *priv,
|
||||
case MT6370_ICHG_600_mA:
|
||||
case MT6370_ICHG_700_mA:
|
||||
case MT6370_ICHG_800_mA:
|
||||
*val1 = 2680;
|
||||
*val1 = mt6370_adc_get_ibat_scale(priv);
|
||||
break;
|
||||
default:
|
||||
*val1 = 5000;
|
||||
@ -251,6 +282,20 @@ static const struct iio_chan_spec mt6370_adc_channels[] = {
|
||||
MT6370_ADC_CHAN(TEMP_JC, IIO_TEMP, 12, BIT(IIO_CHAN_INFO_OFFSET)),
|
||||
};
|
||||
|
||||
static int mt6370_get_vendor_info(struct mt6370_adc_data *priv)
|
||||
{
|
||||
unsigned int dev_info;
|
||||
int ret;
|
||||
|
||||
ret = regmap_read(priv->regmap, MT6370_REG_DEV_INFO, &dev_info);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
priv->vid = FIELD_GET(MT6370_VENID_MASK, dev_info);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mt6370_adc_probe(struct platform_device *pdev)
|
||||
{
|
||||
struct device *dev = &pdev->dev;
|
||||
@ -272,6 +317,10 @@ static int mt6370_adc_probe(struct platform_device *pdev)
|
||||
priv->regmap = regmap;
|
||||
mutex_init(&priv->adc_lock);
|
||||
|
||||
ret = mt6370_get_vendor_info(priv);
|
||||
if (ret)
|
||||
return dev_err_probe(dev, ret, "Failed to get vid\n");
|
||||
|
||||
ret = regmap_write(priv->regmap, MT6370_REG_CHG_ADC, 0);
|
||||
if (ret)
|
||||
return dev_err_probe(dev, ret, "Failed to reset ADC\n");
|
||||
|
@ -757,13 +757,13 @@ static int mxs_lradc_adc_probe(struct platform_device *pdev)
|
||||
|
||||
ret = mxs_lradc_adc_trigger_init(iio);
|
||||
if (ret)
|
||||
goto err_trig;
|
||||
return ret;
|
||||
|
||||
ret = iio_triggered_buffer_setup(iio, &iio_pollfunc_store_time,
|
||||
&mxs_lradc_adc_trigger_handler,
|
||||
&mxs_lradc_adc_buffer_ops);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto err_trig;
|
||||
|
||||
adc->vref_mv = mxs_lradc_adc_vref_mv[lradc->soc];
|
||||
|
||||
@ -801,9 +801,9 @@ static int mxs_lradc_adc_probe(struct platform_device *pdev)
|
||||
|
||||
err_dev:
|
||||
mxs_lradc_adc_hw_stop(adc);
|
||||
mxs_lradc_adc_trigger_remove(iio);
|
||||
err_trig:
|
||||
iio_triggered_buffer_cleanup(iio);
|
||||
err_trig:
|
||||
mxs_lradc_adc_trigger_remove(iio);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -814,8 +814,8 @@ static int mxs_lradc_adc_remove(struct platform_device *pdev)
|
||||
|
||||
iio_device_unregister(iio);
|
||||
mxs_lradc_adc_hw_stop(adc);
|
||||
mxs_lradc_adc_trigger_remove(iio);
|
||||
iio_triggered_buffer_cleanup(iio);
|
||||
mxs_lradc_adc_trigger_remove(iio);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -547,7 +547,7 @@ static int palmas_gpadc_read_raw(struct iio_dev *indio_dev,
|
||||
int adc_chan = chan->channel;
|
||||
int ret = 0;
|
||||
|
||||
if (adc_chan > PALMAS_ADC_CH_MAX)
|
||||
if (adc_chan >= PALMAS_ADC_CH_MAX)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&adc->lock);
|
||||
@ -595,7 +595,7 @@ static int palmas_gpadc_read_event_config(struct iio_dev *indio_dev,
|
||||
int adc_chan = chan->channel;
|
||||
int ret = 0;
|
||||
|
||||
if (adc_chan > PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH)
|
||||
if (adc_chan >= PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&adc->lock);
|
||||
@ -684,7 +684,7 @@ static int palmas_gpadc_write_event_config(struct iio_dev *indio_dev,
|
||||
int adc_chan = chan->channel;
|
||||
int ret;
|
||||
|
||||
if (adc_chan > PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH)
|
||||
if (adc_chan >= PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&adc->lock);
|
||||
@ -710,7 +710,7 @@ static int palmas_gpadc_read_event_value(struct iio_dev *indio_dev,
|
||||
int adc_chan = chan->channel;
|
||||
int ret;
|
||||
|
||||
if (adc_chan > PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH)
|
||||
if (adc_chan >= PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&adc->lock);
|
||||
@ -744,7 +744,7 @@ static int palmas_gpadc_write_event_value(struct iio_dev *indio_dev,
|
||||
int old;
|
||||
int ret;
|
||||
|
||||
if (adc_chan > PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH)
|
||||
if (adc_chan >= PALMAS_ADC_CH_MAX || type != IIO_EV_TYPE_THRESH)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&adc->lock);
|
||||
|
@ -2006,16 +2006,15 @@ static int stm32_adc_get_legacy_chan_count(struct iio_dev *indio_dev, struct stm
|
||||
* to get the *real* number of channels.
|
||||
*/
|
||||
ret = device_property_count_u32(dev, "st,adc-diff-channels");
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ret /= (int)(sizeof(struct stm32_adc_diff_channel) / sizeof(u32));
|
||||
if (ret > adc_info->max_channels) {
|
||||
dev_err(&indio_dev->dev, "Bad st,adc-diff-channels?\n");
|
||||
return -EINVAL;
|
||||
} else if (ret > 0) {
|
||||
adc->num_diff = ret;
|
||||
num_channels += ret;
|
||||
if (ret > 0) {
|
||||
ret /= (int)(sizeof(struct stm32_adc_diff_channel) / sizeof(u32));
|
||||
if (ret > adc_info->max_channels) {
|
||||
dev_err(&indio_dev->dev, "Bad st,adc-diff-channels?\n");
|
||||
return -EINVAL;
|
||||
} else if (ret > 0) {
|
||||
adc->num_diff = ret;
|
||||
num_channels += ret;
|
||||
}
|
||||
}
|
||||
|
||||
/* Optional sample time is provided either for each, or all channels */
|
||||
@ -2037,6 +2036,7 @@ static int stm32_adc_legacy_chan_init(struct iio_dev *indio_dev,
|
||||
struct stm32_adc_diff_channel diff[STM32_ADC_CH_MAX];
|
||||
struct device *dev = &indio_dev->dev;
|
||||
u32 num_diff = adc->num_diff;
|
||||
int num_se = nchans - num_diff;
|
||||
int size = num_diff * sizeof(*diff) / sizeof(u32);
|
||||
int scan_index = 0, ret, i, c;
|
||||
u32 smp = 0, smps[STM32_ADC_CH_MAX], chans[STM32_ADC_CH_MAX];
|
||||
@ -2063,29 +2063,32 @@ static int stm32_adc_legacy_chan_init(struct iio_dev *indio_dev,
|
||||
scan_index++;
|
||||
}
|
||||
}
|
||||
|
||||
ret = device_property_read_u32_array(dev, "st,adc-channels", chans,
|
||||
nchans);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
for (c = 0; c < nchans; c++) {
|
||||
if (chans[c] >= adc_info->max_channels) {
|
||||
dev_err(&indio_dev->dev, "Invalid channel %d\n",
|
||||
chans[c]);
|
||||
return -EINVAL;
|
||||
if (num_se > 0) {
|
||||
ret = device_property_read_u32_array(dev, "st,adc-channels", chans, num_se);
|
||||
if (ret) {
|
||||
dev_err(&indio_dev->dev, "Failed to get st,adc-channels %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Channel can't be configured both as single-ended & diff */
|
||||
for (i = 0; i < num_diff; i++) {
|
||||
if (chans[c] == diff[i].vinp) {
|
||||
dev_err(&indio_dev->dev, "channel %d misconfigured\n", chans[c]);
|
||||
for (c = 0; c < num_se; c++) {
|
||||
if (chans[c] >= adc_info->max_channels) {
|
||||
dev_err(&indio_dev->dev, "Invalid channel %d\n",
|
||||
chans[c]);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Channel can't be configured both as single-ended & diff */
|
||||
for (i = 0; i < num_diff; i++) {
|
||||
if (chans[c] == diff[i].vinp) {
|
||||
dev_err(&indio_dev->dev, "channel %d misconfigured\n",
|
||||
chans[c]);
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
stm32_adc_chan_init_one(indio_dev, &channels[scan_index],
|
||||
chans[c], 0, scan_index, false);
|
||||
scan_index++;
|
||||
}
|
||||
stm32_adc_chan_init_one(indio_dev, &channels[scan_index],
|
||||
chans[c], 0, scan_index, false);
|
||||
scan_index++;
|
||||
}
|
||||
|
||||
if (adc->nsmps > 0) {
|
||||
@ -2306,7 +2309,7 @@ static int stm32_adc_chan_fw_init(struct iio_dev *indio_dev, bool timestamping)
|
||||
|
||||
if (legacy)
|
||||
ret = stm32_adc_legacy_chan_init(indio_dev, adc, channels,
|
||||
num_channels);
|
||||
timestamping ? num_channels - 1 : num_channels);
|
||||
else
|
||||
ret = stm32_adc_generic_chan_init(indio_dev, adc, channels);
|
||||
if (ret < 0)
|
||||
|
@ -1007,7 +1007,7 @@ static int ad74413r_read_raw(struct iio_dev *indio_dev,
|
||||
|
||||
ret = ad74413r_get_single_adc_result(indio_dev, chan->channel,
|
||||
val);
|
||||
if (ret)
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
ad74413r_adc_to_resistance_result(*val, val);
|
||||
|
@ -17,7 +17,7 @@ obj-$(CONFIG_AD5592R_BASE) += ad5592r-base.o
|
||||
obj-$(CONFIG_AD5592R) += ad5592r.o
|
||||
obj-$(CONFIG_AD5593R) += ad5593r.o
|
||||
obj-$(CONFIG_AD5755) += ad5755.o
|
||||
obj-$(CONFIG_AD5755) += ad5758.o
|
||||
obj-$(CONFIG_AD5758) += ad5758.o
|
||||
obj-$(CONFIG_AD5761) += ad5761.o
|
||||
obj-$(CONFIG_AD5764) += ad5764.o
|
||||
obj-$(CONFIG_AD5766) += ad5766.o
|
||||
|
@ -47,12 +47,18 @@ static int mcp4725_suspend(struct device *dev)
|
||||
struct mcp4725_data *data = iio_priv(i2c_get_clientdata(
|
||||
to_i2c_client(dev)));
|
||||
u8 outbuf[2];
|
||||
int ret;
|
||||
|
||||
outbuf[0] = (data->powerdown_mode + 1) << 4;
|
||||
outbuf[1] = 0;
|
||||
data->powerdown = true;
|
||||
|
||||
return i2c_master_send(data->client, outbuf, 2);
|
||||
ret = i2c_master_send(data->client, outbuf, 2);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
else if (ret != 2)
|
||||
return -EIO;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int mcp4725_resume(struct device *dev)
|
||||
@ -60,13 +66,19 @@ static int mcp4725_resume(struct device *dev)
|
||||
struct mcp4725_data *data = iio_priv(i2c_get_clientdata(
|
||||
to_i2c_client(dev)));
|
||||
u8 outbuf[2];
|
||||
int ret;
|
||||
|
||||
/* restore previous DAC value */
|
||||
outbuf[0] = (data->dac_value >> 8) & 0xf;
|
||||
outbuf[1] = data->dac_value & 0xff;
|
||||
data->powerdown = false;
|
||||
|
||||
return i2c_master_send(data->client, outbuf, 2);
|
||||
ret = i2c_master_send(data->client, outbuf, 2);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
else if (ret != 2)
|
||||
return -EIO;
|
||||
return 0;
|
||||
}
|
||||
static DEFINE_SIMPLE_DEV_PM_OPS(mcp4725_pm_ops, mcp4725_suspend,
|
||||
mcp4725_resume);
|
||||
|
@ -275,9 +275,14 @@ static int inv_icm42600_buffer_preenable(struct iio_dev *indio_dev)
|
||||
{
|
||||
struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev);
|
||||
struct device *dev = regmap_get_device(st->map);
|
||||
struct inv_icm42600_timestamp *ts = iio_priv(indio_dev);
|
||||
|
||||
pm_runtime_get_sync(dev);
|
||||
|
||||
mutex_lock(&st->lock);
|
||||
inv_icm42600_timestamp_reset(ts);
|
||||
mutex_unlock(&st->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -375,7 +380,6 @@ static int inv_icm42600_buffer_postdisable(struct iio_dev *indio_dev)
|
||||
struct device *dev = regmap_get_device(st->map);
|
||||
unsigned int sensor;
|
||||
unsigned int *watermark;
|
||||
struct inv_icm42600_timestamp *ts;
|
||||
struct inv_icm42600_sensor_conf conf = INV_ICM42600_SENSOR_CONF_INIT;
|
||||
unsigned int sleep_temp = 0;
|
||||
unsigned int sleep_sensor = 0;
|
||||
@ -385,11 +389,9 @@ static int inv_icm42600_buffer_postdisable(struct iio_dev *indio_dev)
|
||||
if (indio_dev == st->indio_gyro) {
|
||||
sensor = INV_ICM42600_SENSOR_GYRO;
|
||||
watermark = &st->fifo.watermark.gyro;
|
||||
ts = iio_priv(st->indio_gyro);
|
||||
} else if (indio_dev == st->indio_accel) {
|
||||
sensor = INV_ICM42600_SENSOR_ACCEL;
|
||||
watermark = &st->fifo.watermark.accel;
|
||||
ts = iio_priv(st->indio_accel);
|
||||
} else {
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -417,8 +419,6 @@ static int inv_icm42600_buffer_postdisable(struct iio_dev *indio_dev)
|
||||
if (!st->fifo.on)
|
||||
ret = inv_icm42600_set_temp_conf(st, false, &sleep_temp);
|
||||
|
||||
inv_icm42600_timestamp_reset(ts);
|
||||
|
||||
out_unlock:
|
||||
mutex_unlock(&st->lock);
|
||||
|
||||
|
@ -337,6 +337,17 @@ free_gains:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void iio_gts_us_to_int_micro(int *time_us, int *int_micro_times,
|
||||
int num_times)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < num_times; i++) {
|
||||
int_micro_times[i * 2] = time_us[i] / 1000000;
|
||||
int_micro_times[i * 2 + 1] = time_us[i] % 1000000;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* iio_gts_build_avail_time_table - build table of available integration times
|
||||
* @gts: Gain time scale descriptor
|
||||
@ -351,7 +362,7 @@ free_gains:
|
||||
*/
|
||||
static int iio_gts_build_avail_time_table(struct iio_gts *gts)
|
||||
{
|
||||
int *times, i, j, idx = 0;
|
||||
int *times, i, j, idx = 0, *int_micro_times;
|
||||
|
||||
if (!gts->num_itime)
|
||||
return 0;
|
||||
@ -378,13 +389,24 @@ static int iio_gts_build_avail_time_table(struct iio_gts *gts)
|
||||
}
|
||||
}
|
||||
}
|
||||
gts->avail_time_tables = times;
|
||||
/*
|
||||
* This is just to survive a unlikely corner-case where times in the
|
||||
* given time table were not unique. Else we could just trust the
|
||||
* gts->num_itime.
|
||||
*/
|
||||
gts->num_avail_time_tables = idx;
|
||||
|
||||
/* create a list of times formatted as list of IIO_VAL_INT_PLUS_MICRO */
|
||||
int_micro_times = kcalloc(idx, sizeof(int) * 2, GFP_KERNEL);
|
||||
if (int_micro_times) {
|
||||
/*
|
||||
* This is just to survive a unlikely corner-case where times in
|
||||
* the given time table were not unique. Else we could just
|
||||
* trust the gts->num_itime.
|
||||
*/
|
||||
gts->num_avail_time_tables = idx;
|
||||
iio_gts_us_to_int_micro(times, int_micro_times, idx);
|
||||
}
|
||||
|
||||
gts->avail_time_tables = int_micro_times;
|
||||
kfree(times);
|
||||
|
||||
if (!int_micro_times)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -683,8 +705,8 @@ int iio_gts_avail_times(struct iio_gts *gts, const int **vals, int *type,
|
||||
return -EINVAL;
|
||||
|
||||
*vals = gts->avail_time_tables;
|
||||
*type = IIO_VAL_INT;
|
||||
*length = gts->num_avail_time_tables;
|
||||
*type = IIO_VAL_INT_PLUS_MICRO;
|
||||
*length = gts->num_avail_time_tables * 2;
|
||||
|
||||
return IIO_AVAIL_LIST;
|
||||
}
|
||||
|
@ -231,6 +231,9 @@ struct bu27034_result {
|
||||
|
||||
static const struct regmap_range bu27034_volatile_ranges[] = {
|
||||
{
|
||||
.range_min = BU27034_REG_SYSTEM_CONTROL,
|
||||
.range_max = BU27034_REG_SYSTEM_CONTROL,
|
||||
}, {
|
||||
.range_min = BU27034_REG_MODE_CONTROL4,
|
||||
.range_max = BU27034_REG_MODE_CONTROL4,
|
||||
}, {
|
||||
@ -1167,11 +1170,12 @@ static int bu27034_read_raw(struct iio_dev *idev,
|
||||
|
||||
switch (mask) {
|
||||
case IIO_CHAN_INFO_INT_TIME:
|
||||
*val = bu27034_get_int_time(data);
|
||||
if (*val < 0)
|
||||
return *val;
|
||||
*val = 0;
|
||||
*val2 = bu27034_get_int_time(data);
|
||||
if (*val2 < 0)
|
||||
return *val2;
|
||||
|
||||
return IIO_VAL_INT;
|
||||
return IIO_VAL_INT_PLUS_MICRO;
|
||||
|
||||
case IIO_CHAN_INFO_SCALE:
|
||||
return bu27034_get_scale(data, chan->channel, val, val2);
|
||||
@ -1229,7 +1233,10 @@ static int bu27034_write_raw(struct iio_dev *idev,
|
||||
ret = bu27034_set_scale(data, chan->channel, val, val2);
|
||||
break;
|
||||
case IIO_CHAN_INFO_INT_TIME:
|
||||
ret = bu27034_try_set_int_time(data, val);
|
||||
if (!val)
|
||||
ret = bu27034_try_set_int_time(data, val2);
|
||||
else
|
||||
ret = -EINVAL;
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
@ -1268,12 +1275,19 @@ static int bu27034_chip_init(struct bu27034_data *data)
|
||||
int ret, sel;
|
||||
|
||||
/* Reset */
|
||||
ret = regmap_update_bits(data->regmap, BU27034_REG_SYSTEM_CONTROL,
|
||||
ret = regmap_write_bits(data->regmap, BU27034_REG_SYSTEM_CONTROL,
|
||||
BU27034_MASK_SW_RESET, BU27034_MASK_SW_RESET);
|
||||
if (ret)
|
||||
return dev_err_probe(data->dev, ret, "Sensor reset failed\n");
|
||||
|
||||
msleep(1);
|
||||
|
||||
ret = regmap_reinit_cache(data->regmap, &bu27034_regmap);
|
||||
if (ret) {
|
||||
dev_err(data->dev, "Failed to reinit reg cache\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Read integration time here to ensure it is in regmap cache. We do
|
||||
* this to speed-up the int-time acquisition in the start of the buffer
|
||||
|
@ -8,6 +8,7 @@
|
||||
* TODO: Proximity
|
||||
*/
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/i2c.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/pm_runtime.h>
|
||||
@ -42,6 +43,7 @@
|
||||
#define VCNL4035_ALS_PERS_MASK GENMASK(3, 2)
|
||||
#define VCNL4035_INT_ALS_IF_H_MASK BIT(12)
|
||||
#define VCNL4035_INT_ALS_IF_L_MASK BIT(13)
|
||||
#define VCNL4035_DEV_ID_MASK GENMASK(7, 0)
|
||||
|
||||
/* Default values */
|
||||
#define VCNL4035_MODE_ALS_ENABLE BIT(0)
|
||||
@ -413,6 +415,7 @@ static int vcnl4035_init(struct vcnl4035_data *data)
|
||||
return ret;
|
||||
}
|
||||
|
||||
id = FIELD_GET(VCNL4035_DEV_ID_MASK, id);
|
||||
if (id != VCNL4035_DEV_ID_VAL) {
|
||||
dev_err(&data->client->dev, "Wrong id, got %x, expected %x\n",
|
||||
id, VCNL4035_DEV_ID_VAL);
|
||||
|
@ -296,12 +296,13 @@ static int tmag5273_read_raw(struct iio_dev *indio_dev,
|
||||
return ret;
|
||||
|
||||
ret = tmag5273_get_measure(data, &t, &x, &y, &z, &angle, &magnitude);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
pm_runtime_mark_last_busy(data->dev);
|
||||
pm_runtime_put_autosuspend(data->dev);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
switch (chan->address) {
|
||||
case TEMPERATURE:
|
||||
*val = t;
|
||||
|
@ -703,7 +703,7 @@ void input_close_device(struct input_handle *handle)
|
||||
|
||||
__input_release_device(handle);
|
||||
|
||||
if (!dev->inhibited && !--dev->users) {
|
||||
if (!--dev->users && !dev->inhibited) {
|
||||
if (dev->poller)
|
||||
input_dev_poller_stop(dev->poller);
|
||||
if (dev->close)
|
||||
|
@ -281,7 +281,6 @@ static const struct xpad_device {
|
||||
{ 0x1430, 0xf801, "RedOctane Controller", 0, XTYPE_XBOX360 },
|
||||
{ 0x146b, 0x0601, "BigBen Interactive XBOX 360 Controller", 0, XTYPE_XBOX360 },
|
||||
{ 0x146b, 0x0604, "Bigben Interactive DAIJA Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOX360 },
|
||||
{ 0x1532, 0x0037, "Razer Sabertooth", 0, XTYPE_XBOX360 },
|
||||
{ 0x1532, 0x0a00, "Razer Atrox Arcade Stick", MAP_TRIGGERS_TO_BUTTONS, XTYPE_XBOXONE },
|
||||
{ 0x1532, 0x0a03, "Razer Wildcat", 0, XTYPE_XBOXONE },
|
||||
{ 0x15e4, 0x3f00, "Power A Mini Pro Elite", 0, XTYPE_XBOX360 },
|
||||
|
@ -108,6 +108,27 @@ static const struct dmi_system_id dmi_use_low_level_irq[] = {
|
||||
{} /* Terminating entry */
|
||||
};
|
||||
|
||||
/*
|
||||
* Some devices have a wrong entry which points to a GPIO which is
|
||||
* required in another driver, so this driver must not claim it.
|
||||
*/
|
||||
static const struct dmi_system_id dmi_invalid_acpi_index[] = {
|
||||
{
|
||||
/*
|
||||
* Lenovo Yoga Book X90F / X90L, the PNP0C40 home button entry
|
||||
* points to a GPIO which is not a home button and which is
|
||||
* required by the lenovo-yogabook driver.
|
||||
*/
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"),
|
||||
},
|
||||
.driver_data = (void *)1l,
|
||||
},
|
||||
{} /* Terminating entry */
|
||||
};
|
||||
|
||||
/*
|
||||
* Get the Nth GPIO number from the ACPI object.
|
||||
*/
|
||||
@ -137,6 +158,8 @@ soc_button_device_create(struct platform_device *pdev,
|
||||
struct platform_device *pd;
|
||||
struct gpio_keys_button *gpio_keys;
|
||||
struct gpio_keys_platform_data *gpio_keys_pdata;
|
||||
const struct dmi_system_id *dmi_id;
|
||||
int invalid_acpi_index = -1;
|
||||
int error, gpio, irq;
|
||||
int n_buttons = 0;
|
||||
|
||||
@ -154,10 +177,17 @@ soc_button_device_create(struct platform_device *pdev,
|
||||
gpio_keys = (void *)(gpio_keys_pdata + 1);
|
||||
n_buttons = 0;
|
||||
|
||||
dmi_id = dmi_first_match(dmi_invalid_acpi_index);
|
||||
if (dmi_id)
|
||||
invalid_acpi_index = (long)dmi_id->driver_data;
|
||||
|
||||
for (info = button_info; info->name; info++) {
|
||||
if (info->autorepeat != autorepeat)
|
||||
continue;
|
||||
|
||||
if (info->acpi_index == invalid_acpi_index)
|
||||
continue;
|
||||
|
||||
error = soc_button_lookup_gpio(&pdev->dev, info->acpi_index, &gpio, &irq);
|
||||
if (error || irq < 0) {
|
||||
/*
|
||||
|
@ -674,10 +674,11 @@ static void process_packet_head_v4(struct psmouse *psmouse)
|
||||
struct input_dev *dev = psmouse->dev;
|
||||
struct elantech_data *etd = psmouse->private;
|
||||
unsigned char *packet = psmouse->packet;
|
||||
int id = ((packet[3] & 0xe0) >> 5) - 1;
|
||||
int id;
|
||||
int pres, traces;
|
||||
|
||||
if (id < 0)
|
||||
id = ((packet[3] & 0xe0) >> 5) - 1;
|
||||
if (id < 0 || id >= ETP_MAX_FINGERS)
|
||||
return;
|
||||
|
||||
etd->mt[id].x = ((packet[1] & 0x0f) << 8) | packet[2];
|
||||
@ -707,7 +708,7 @@ static void process_packet_motion_v4(struct psmouse *psmouse)
|
||||
int id, sid;
|
||||
|
||||
id = ((packet[0] & 0xe0) >> 5) - 1;
|
||||
if (id < 0)
|
||||
if (id < 0 || id >= ETP_MAX_FINGERS)
|
||||
return;
|
||||
|
||||
sid = ((packet[3] & 0xe0) >> 5) - 1;
|
||||
@ -728,7 +729,7 @@ static void process_packet_motion_v4(struct psmouse *psmouse)
|
||||
input_report_abs(dev, ABS_MT_POSITION_X, etd->mt[id].x);
|
||||
input_report_abs(dev, ABS_MT_POSITION_Y, etd->mt[id].y);
|
||||
|
||||
if (sid >= 0) {
|
||||
if (sid >= 0 && sid < ETP_MAX_FINGERS) {
|
||||
etd->mt[sid].x += delta_x2 * weight;
|
||||
etd->mt[sid].y -= delta_y2 * weight;
|
||||
input_mt_slot(dev, sid);
|
||||
|
@ -560,7 +560,7 @@ static int cyttsp5_hid_output_get_sysinfo(struct cyttsp5 *ts)
|
||||
static int cyttsp5_hid_output_bl_launch_app(struct cyttsp5 *ts)
|
||||
{
|
||||
int rc;
|
||||
u8 cmd[HID_OUTPUT_BL_LAUNCH_APP];
|
||||
u8 cmd[HID_OUTPUT_BL_LAUNCH_APP_SIZE];
|
||||
u16 crc;
|
||||
|
||||
put_unaligned_le16(HID_OUTPUT_BL_LAUNCH_APP_SIZE, cmd);
|
||||
|
@ -282,6 +282,7 @@ config EXYNOS_IOMMU_DEBUG
|
||||
config IPMMU_VMSA
|
||||
bool "Renesas VMSA-compatible IPMMU"
|
||||
depends on ARCH_RENESAS || COMPILE_TEST
|
||||
depends on ARM || ARM64 || COMPILE_TEST
|
||||
depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE
|
||||
select IOMMU_API
|
||||
select IOMMU_IO_PGTABLE_LPAE
|
||||
|
@ -15,9 +15,7 @@ extern irqreturn_t amd_iommu_int_thread(int irq, void *data);
|
||||
extern irqreturn_t amd_iommu_int_handler(int irq, void *data);
|
||||
extern void amd_iommu_apply_erratum_63(struct amd_iommu *iommu, u16 devid);
|
||||
extern void amd_iommu_restart_event_logging(struct amd_iommu *iommu);
|
||||
extern int amd_iommu_init_devices(void);
|
||||
extern void amd_iommu_uninit_devices(void);
|
||||
extern void amd_iommu_init_notifier(void);
|
||||
extern void amd_iommu_restart_ga_log(struct amd_iommu *iommu);
|
||||
extern void amd_iommu_set_rlookup_table(struct amd_iommu *iommu, u16 devid);
|
||||
|
||||
#ifdef CONFIG_AMD_IOMMU_DEBUGFS
|
||||
|
@ -758,6 +758,30 @@ void amd_iommu_restart_event_logging(struct amd_iommu *iommu)
|
||||
iommu_feature_enable(iommu, CONTROL_EVT_LOG_EN);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function restarts event logging in case the IOMMU experienced
|
||||
* an GA log overflow.
|
||||
*/
|
||||
void amd_iommu_restart_ga_log(struct amd_iommu *iommu)
|
||||
{
|
||||
u32 status;
|
||||
|
||||
status = readl(iommu->mmio_base + MMIO_STATUS_OFFSET);
|
||||
if (status & MMIO_STATUS_GALOG_RUN_MASK)
|
||||
return;
|
||||
|
||||
pr_info_ratelimited("IOMMU GA Log restarting\n");
|
||||
|
||||
iommu_feature_disable(iommu, CONTROL_GALOG_EN);
|
||||
iommu_feature_disable(iommu, CONTROL_GAINT_EN);
|
||||
|
||||
writel(MMIO_STATUS_GALOG_OVERFLOW_MASK,
|
||||
iommu->mmio_base + MMIO_STATUS_OFFSET);
|
||||
|
||||
iommu_feature_enable(iommu, CONTROL_GAINT_EN);
|
||||
iommu_feature_enable(iommu, CONTROL_GALOG_EN);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function resets the command buffer if the IOMMU stopped fetching
|
||||
* commands from it.
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user