Merge branch 'linus' into timers/core
Pick up upstream fixes for pending changes.
This commit is contained in:
commit
6808acb57a
@ -56,6 +56,18 @@ model features for SVE is included in Appendix A.
|
|||||||
is to connect to a target process first and then attempt a
|
is to connect to a target process first and then attempt a
|
||||||
ptrace(PTRACE_GETREGSET, pid, NT_ARM_SVE, &iov).
|
ptrace(PTRACE_GETREGSET, pid, NT_ARM_SVE, &iov).
|
||||||
|
|
||||||
|
* Whenever SVE scalable register values (Zn, Pn, FFR) are exchanged in memory
|
||||||
|
between userspace and the kernel, the register value is encoded in memory in
|
||||||
|
an endianness-invariant layout, with bits [(8 * i + 7) : (8 * i)] encoded at
|
||||||
|
byte offset i from the start of the memory representation. This affects for
|
||||||
|
example the signal frame (struct sve_context) and ptrace interface
|
||||||
|
(struct user_sve_header) and associated data.
|
||||||
|
|
||||||
|
Beware that on big-endian systems this results in a different byte order than
|
||||||
|
for the FPSIMD V-registers, which are stored as single host-endian 128-bit
|
||||||
|
values, with bits [(127 - 8 * i) : (120 - 8 * i)] of the register encoded at
|
||||||
|
byte offset i. (struct fpsimd_context, struct user_fpsimd_state).
|
||||||
|
|
||||||
|
|
||||||
2. Vector length terminology
|
2. Vector length terminology
|
||||||
-----------------------------
|
-----------------------------
|
||||||
@ -124,6 +136,10 @@ the SVE instruction set architecture.
|
|||||||
size and layout. Macros SVE_SIG_* are defined [1] to facilitate access to
|
size and layout. Macros SVE_SIG_* are defined [1] to facilitate access to
|
||||||
the members.
|
the members.
|
||||||
|
|
||||||
|
* Each scalable register (Zn, Pn, FFR) is stored in an endianness-invariant
|
||||||
|
layout, with bits [(8 * i + 7) : (8 * i)] stored at byte offset i from the
|
||||||
|
start of the register's representation in memory.
|
||||||
|
|
||||||
* If the SVE context is too big to fit in sigcontext.__reserved[], then extra
|
* If the SVE context is too big to fit in sigcontext.__reserved[], then extra
|
||||||
space is allocated on the stack, an extra_context record is written in
|
space is allocated on the stack, an extra_context record is written in
|
||||||
__reserved[] referencing this space. sve_context is then written in the
|
__reserved[] referencing this space. sve_context is then written in the
|
||||||
|
@ -13,11 +13,9 @@ you can do so by typing:
|
|||||||
|
|
||||||
# mount none /sys -t sysfs
|
# mount none /sys -t sysfs
|
||||||
|
|
||||||
As of the Linux 2.6.10 kernel, it is now possible to change the
|
It is possible to change the IO scheduler for a given block device on
|
||||||
IO scheduler for a given block device on the fly (thus making it possible,
|
the fly to select one of mq-deadline, none, bfq, or kyber schedulers -
|
||||||
for instance, to set the CFQ scheduler for the system default, but
|
which can improve that device's throughput.
|
||||||
set a specific device to use the deadline or noop schedulers - which
|
|
||||||
can improve that device's throughput).
|
|
||||||
|
|
||||||
To set a specific scheduler, simply do this:
|
To set a specific scheduler, simply do this:
|
||||||
|
|
||||||
@ -30,8 +28,8 @@ The list of defined schedulers can be found by simply doing
|
|||||||
a "cat /sys/block/DEV/queue/scheduler" - the list of valid names
|
a "cat /sys/block/DEV/queue/scheduler" - the list of valid names
|
||||||
will be displayed, with the currently selected scheduler in brackets:
|
will be displayed, with the currently selected scheduler in brackets:
|
||||||
|
|
||||||
# cat /sys/block/hda/queue/scheduler
|
# cat /sys/block/sda/queue/scheduler
|
||||||
noop deadline [cfq]
|
[mq-deadline] kyber bfq none
|
||||||
# echo deadline > /sys/block/hda/queue/scheduler
|
# echo none >/sys/block/sda/queue/scheduler
|
||||||
# cat /sys/block/hda/queue/scheduler
|
# cat /sys/block/sda/queue/scheduler
|
||||||
noop [deadline] cfq
|
[none] mq-deadline kyber bfq
|
||||||
|
@ -8,61 +8,13 @@ both at leaf nodes as well as at intermediate nodes in a storage hierarchy.
|
|||||||
Plan is to use the same cgroup based management interface for blkio controller
|
Plan is to use the same cgroup based management interface for blkio controller
|
||||||
and based on user options switch IO policies in the background.
|
and based on user options switch IO policies in the background.
|
||||||
|
|
||||||
Currently two IO control policies are implemented. First one is proportional
|
One IO control policy is throttling policy which can be used to
|
||||||
weight time based division of disk policy. It is implemented in CFQ. Hence
|
specify upper IO rate limits on devices. This policy is implemented in
|
||||||
this policy takes effect only on leaf nodes when CFQ is being used. The second
|
generic block layer and can be used on leaf nodes as well as higher
|
||||||
one is throttling policy which can be used to specify upper IO rate limits
|
level logical devices like device mapper.
|
||||||
on devices. This policy is implemented in generic block layer and can be
|
|
||||||
used on leaf nodes as well as higher level logical devices like device mapper.
|
|
||||||
|
|
||||||
HOWTO
|
HOWTO
|
||||||
=====
|
=====
|
||||||
Proportional Weight division of bandwidth
|
|
||||||
-----------------------------------------
|
|
||||||
You can do a very simple testing of running two dd threads in two different
|
|
||||||
cgroups. Here is what you can do.
|
|
||||||
|
|
||||||
- Enable Block IO controller
|
|
||||||
CONFIG_BLK_CGROUP=y
|
|
||||||
|
|
||||||
- Enable group scheduling in CFQ
|
|
||||||
CONFIG_CFQ_GROUP_IOSCHED=y
|
|
||||||
|
|
||||||
- Compile and boot into kernel and mount IO controller (blkio); see
|
|
||||||
cgroups.txt, Why are cgroups needed?.
|
|
||||||
|
|
||||||
mount -t tmpfs cgroup_root /sys/fs/cgroup
|
|
||||||
mkdir /sys/fs/cgroup/blkio
|
|
||||||
mount -t cgroup -o blkio none /sys/fs/cgroup/blkio
|
|
||||||
|
|
||||||
- Create two cgroups
|
|
||||||
mkdir -p /sys/fs/cgroup/blkio/test1/ /sys/fs/cgroup/blkio/test2
|
|
||||||
|
|
||||||
- Set weights of group test1 and test2
|
|
||||||
echo 1000 > /sys/fs/cgroup/blkio/test1/blkio.weight
|
|
||||||
echo 500 > /sys/fs/cgroup/blkio/test2/blkio.weight
|
|
||||||
|
|
||||||
- Create two same size files (say 512MB each) on same disk (file1, file2) and
|
|
||||||
launch two dd threads in different cgroup to read those files.
|
|
||||||
|
|
||||||
sync
|
|
||||||
echo 3 > /proc/sys/vm/drop_caches
|
|
||||||
|
|
||||||
dd if=/mnt/sdb/zerofile1 of=/dev/null &
|
|
||||||
echo $! > /sys/fs/cgroup/blkio/test1/tasks
|
|
||||||
cat /sys/fs/cgroup/blkio/test1/tasks
|
|
||||||
|
|
||||||
dd if=/mnt/sdb/zerofile2 of=/dev/null &
|
|
||||||
echo $! > /sys/fs/cgroup/blkio/test2/tasks
|
|
||||||
cat /sys/fs/cgroup/blkio/test2/tasks
|
|
||||||
|
|
||||||
- At macro level, first dd should finish first. To get more precise data, keep
|
|
||||||
on looking at (with the help of script), at blkio.disk_time and
|
|
||||||
blkio.disk_sectors files of both test1 and test2 groups. This will tell how
|
|
||||||
much disk time (in milliseconds), each group got and how many sectors each
|
|
||||||
group dispatched to the disk. We provide fairness in terms of disk time, so
|
|
||||||
ideally io.disk_time of cgroups should be in proportion to the weight.
|
|
||||||
|
|
||||||
Throttling/Upper Limit policy
|
Throttling/Upper Limit policy
|
||||||
-----------------------------
|
-----------------------------
|
||||||
- Enable Block IO controller
|
- Enable Block IO controller
|
||||||
@ -94,7 +46,7 @@ Throttling/Upper Limit policy
|
|||||||
Hierarchical Cgroups
|
Hierarchical Cgroups
|
||||||
====================
|
====================
|
||||||
|
|
||||||
Both CFQ and throttling implement hierarchy support; however,
|
Throttling implements hierarchy support; however,
|
||||||
throttling's hierarchy support is enabled iff "sane_behavior" is
|
throttling's hierarchy support is enabled iff "sane_behavior" is
|
||||||
enabled from cgroup side, which currently is a development option and
|
enabled from cgroup side, which currently is a development option and
|
||||||
not publicly available.
|
not publicly available.
|
||||||
@ -107,9 +59,8 @@ If somebody created a hierarchy like as follows.
|
|||||||
|
|
|
|
||||||
test3
|
test3
|
||||||
|
|
||||||
CFQ by default and throttling with "sane_behavior" will handle the
|
Throttling with "sane_behavior" will handle the
|
||||||
hierarchy correctly. For details on CFQ hierarchy support, refer to
|
hierarchy correctly. For throttling, all limits apply
|
||||||
Documentation/block/cfq-iosched.txt. For throttling, all limits apply
|
|
||||||
to the whole subtree while all statistics are local to the IOs
|
to the whole subtree while all statistics are local to the IOs
|
||||||
directly generated by tasks in that cgroup.
|
directly generated by tasks in that cgroup.
|
||||||
|
|
||||||
@ -130,10 +81,6 @@ CONFIG_DEBUG_BLK_CGROUP
|
|||||||
- Debug help. Right now some additional stats file show up in cgroup
|
- Debug help. Right now some additional stats file show up in cgroup
|
||||||
if this option is enabled.
|
if this option is enabled.
|
||||||
|
|
||||||
CONFIG_CFQ_GROUP_IOSCHED
|
|
||||||
- Enables group scheduling in CFQ. Currently only 1 level of group
|
|
||||||
creation is allowed.
|
|
||||||
|
|
||||||
CONFIG_BLK_DEV_THROTTLING
|
CONFIG_BLK_DEV_THROTTLING
|
||||||
- Enable block device throttling support in block layer.
|
- Enable block device throttling support in block layer.
|
||||||
|
|
||||||
@ -344,32 +291,3 @@ Common files among various policies
|
|||||||
- blkio.reset_stats
|
- blkio.reset_stats
|
||||||
- Writing an int to this file will result in resetting all the stats
|
- Writing an int to this file will result in resetting all the stats
|
||||||
for that cgroup.
|
for that cgroup.
|
||||||
|
|
||||||
CFQ sysfs tunable
|
|
||||||
=================
|
|
||||||
/sys/block/<disk>/queue/iosched/slice_idle
|
|
||||||
------------------------------------------
|
|
||||||
On a faster hardware CFQ can be slow, especially with sequential workload.
|
|
||||||
This happens because CFQ idles on a single queue and single queue might not
|
|
||||||
drive deeper request queue depths to keep the storage busy. In such scenarios
|
|
||||||
one can try setting slice_idle=0 and that would switch CFQ to IOPS
|
|
||||||
(IO operations per second) mode on NCQ supporting hardware.
|
|
||||||
|
|
||||||
That means CFQ will not idle between cfq queues of a cfq group and hence be
|
|
||||||
able to driver higher queue depth and achieve better throughput. That also
|
|
||||||
means that cfq provides fairness among groups in terms of IOPS and not in
|
|
||||||
terms of disk time.
|
|
||||||
|
|
||||||
/sys/block/<disk>/queue/iosched/group_idle
|
|
||||||
------------------------------------------
|
|
||||||
If one disables idling on individual cfq queues and cfq service trees by
|
|
||||||
setting slice_idle=0, group_idle kicks in. That means CFQ will still idle
|
|
||||||
on the group in an attempt to provide fairness among groups.
|
|
||||||
|
|
||||||
By default group_idle is same as slice_idle and does not do anything if
|
|
||||||
slice_idle is enabled.
|
|
||||||
|
|
||||||
One can experience an overall throughput drop if you have created multiple
|
|
||||||
groups and put applications in that group which are not driving enough
|
|
||||||
IO to keep disk busy. In that case set group_idle=0, and CFQ will not idle
|
|
||||||
on individual groups and throughput should improve.
|
|
||||||
|
@ -32,14 +32,18 @@ Brief summary of control files
|
|||||||
hugetlb.<hugepagesize>.usage_in_bytes # show current usage for "hugepagesize" hugetlb
|
hugetlb.<hugepagesize>.usage_in_bytes # show current usage for "hugepagesize" hugetlb
|
||||||
hugetlb.<hugepagesize>.failcnt # show the number of allocation failure due to HugeTLB limit
|
hugetlb.<hugepagesize>.failcnt # show the number of allocation failure due to HugeTLB limit
|
||||||
|
|
||||||
For a system supporting two hugepage size (16M and 16G) the control
|
For a system supporting three hugepage sizes (64k, 32M and 1G), the control
|
||||||
files include:
|
files include:
|
||||||
|
|
||||||
hugetlb.16GB.limit_in_bytes
|
hugetlb.1GB.limit_in_bytes
|
||||||
hugetlb.16GB.max_usage_in_bytes
|
hugetlb.1GB.max_usage_in_bytes
|
||||||
hugetlb.16GB.usage_in_bytes
|
hugetlb.1GB.usage_in_bytes
|
||||||
hugetlb.16GB.failcnt
|
hugetlb.1GB.failcnt
|
||||||
hugetlb.16MB.limit_in_bytes
|
hugetlb.64KB.limit_in_bytes
|
||||||
hugetlb.16MB.max_usage_in_bytes
|
hugetlb.64KB.max_usage_in_bytes
|
||||||
hugetlb.16MB.usage_in_bytes
|
hugetlb.64KB.usage_in_bytes
|
||||||
hugetlb.16MB.failcnt
|
hugetlb.64KB.failcnt
|
||||||
|
hugetlb.32MB.limit_in_bytes
|
||||||
|
hugetlb.32MB.max_usage_in_bytes
|
||||||
|
hugetlb.32MB.usage_in_bytes
|
||||||
|
hugetlb.32MB.failcnt
|
||||||
|
2
Makefile
2
Makefile
@ -2,7 +2,7 @@
|
|||||||
VERSION = 5
|
VERSION = 5
|
||||||
PATCHLEVEL = 2
|
PATCHLEVEL = 2
|
||||||
SUBLEVEL = 0
|
SUBLEVEL = 0
|
||||||
EXTRAVERSION = -rc4
|
EXTRAVERSION = -rc5
|
||||||
NAME = Golden Lions
|
NAME = Golden Lions
|
||||||
|
|
||||||
# *DOCUMENTATION*
|
# *DOCUMENTATION*
|
||||||
|
@ -51,7 +51,7 @@ endif
|
|||||||
|
|
||||||
KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) $(brokengasinst)
|
KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) $(brokengasinst)
|
||||||
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
|
KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
|
||||||
KBUILD_CFLAGS += -Wno-psabi
|
KBUILD_CFLAGS += $(call cc-disable-warning, psabi)
|
||||||
KBUILD_AFLAGS += $(lseinstr) $(brokengasinst)
|
KBUILD_AFLAGS += $(lseinstr) $(brokengasinst)
|
||||||
|
|
||||||
KBUILD_CFLAGS += $(call cc-option,-mabi=lp64)
|
KBUILD_CFLAGS += $(call cc-option,-mabi=lp64)
|
||||||
|
@ -195,6 +195,9 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma,
|
|||||||
unsigned long asid = ASID(vma->vm_mm);
|
unsigned long asid = ASID(vma->vm_mm);
|
||||||
unsigned long addr;
|
unsigned long addr;
|
||||||
|
|
||||||
|
start = round_down(start, stride);
|
||||||
|
end = round_up(end, stride);
|
||||||
|
|
||||||
if ((end - start) >= (MAX_TLBI_OPS * stride)) {
|
if ((end - start) >= (MAX_TLBI_OPS * stride)) {
|
||||||
flush_tlb_mm(vma->vm_mm);
|
flush_tlb_mm(vma->vm_mm);
|
||||||
return;
|
return;
|
||||||
|
@ -260,6 +260,13 @@ struct kvm_vcpu_events {
|
|||||||
KVM_REG_SIZE_U256 | \
|
KVM_REG_SIZE_U256 | \
|
||||||
((i) & (KVM_ARM64_SVE_MAX_SLICES - 1)))
|
((i) & (KVM_ARM64_SVE_MAX_SLICES - 1)))
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Register values for KVM_REG_ARM64_SVE_ZREG(), KVM_REG_ARM64_SVE_PREG() and
|
||||||
|
* KVM_REG_ARM64_SVE_FFR() are represented in memory in an endianness-
|
||||||
|
* invariant layout which differs from the layout used for the FPSIMD
|
||||||
|
* V-registers on big-endian systems: see sigcontext.h for more explanation.
|
||||||
|
*/
|
||||||
|
|
||||||
#define KVM_ARM64_SVE_VQ_MIN __SVE_VQ_MIN
|
#define KVM_ARM64_SVE_VQ_MIN __SVE_VQ_MIN
|
||||||
#define KVM_ARM64_SVE_VQ_MAX __SVE_VQ_MAX
|
#define KVM_ARM64_SVE_VQ_MAX __SVE_VQ_MAX
|
||||||
|
|
||||||
|
@ -176,6 +176,10 @@ struct user_sve_header {
|
|||||||
* FPCR uint32_t FPCR
|
* FPCR uint32_t FPCR
|
||||||
*
|
*
|
||||||
* Additional data might be appended in the future.
|
* Additional data might be appended in the future.
|
||||||
|
*
|
||||||
|
* The Z-, P- and FFR registers are represented in memory in an endianness-
|
||||||
|
* invariant layout which differs from the layout used for the FPSIMD
|
||||||
|
* V-registers on big-endian systems: see sigcontext.h for more explanation.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define SVE_PT_SVE_ZREG_SIZE(vq) __SVE_ZREG_SIZE(vq)
|
#define SVE_PT_SVE_ZREG_SIZE(vq) __SVE_ZREG_SIZE(vq)
|
||||||
|
@ -77,6 +77,15 @@ struct fpsimd_context {
|
|||||||
__uint128_t vregs[32];
|
__uint128_t vregs[32];
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Note: similarly to all other integer fields, each V-register is stored in an
|
||||||
|
* endianness-dependent format, with the byte at offset i from the start of the
|
||||||
|
* in-memory representation of the register value containing
|
||||||
|
*
|
||||||
|
* bits [(7 + 8 * i) : (8 * i)] of the register on little-endian hosts; or
|
||||||
|
* bits [(127 - 8 * i) : (120 - 8 * i)] on big-endian hosts.
|
||||||
|
*/
|
||||||
|
|
||||||
/* ESR_EL1 context */
|
/* ESR_EL1 context */
|
||||||
#define ESR_MAGIC 0x45535201
|
#define ESR_MAGIC 0x45535201
|
||||||
|
|
||||||
@ -204,6 +213,11 @@ struct sve_context {
|
|||||||
* FFR uint16_t[vq] first-fault status register
|
* FFR uint16_t[vq] first-fault status register
|
||||||
*
|
*
|
||||||
* Additional data might be appended in the future.
|
* Additional data might be appended in the future.
|
||||||
|
*
|
||||||
|
* Unlike vregs[] in fpsimd_context, each SVE scalable register (Z-, P- or FFR)
|
||||||
|
* is encoded in memory in an endianness-invariant format, with the byte at
|
||||||
|
* offset i from the start of the in-memory representation containing bits
|
||||||
|
* [(7 + 8 * i) : (8 * i)] of the register value.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#define SVE_SIG_ZREG_SIZE(vq) __SVE_ZREG_SIZE(vq)
|
#define SVE_SIG_ZREG_SIZE(vq) __SVE_ZREG_SIZE(vq)
|
||||||
|
@ -39,6 +39,7 @@
|
|||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/stddef.h>
|
#include <linux/stddef.h>
|
||||||
#include <linux/sysctl.h>
|
#include <linux/sysctl.h>
|
||||||
|
#include <linux/swab.h>
|
||||||
|
|
||||||
#include <asm/esr.h>
|
#include <asm/esr.h>
|
||||||
#include <asm/fpsimd.h>
|
#include <asm/fpsimd.h>
|
||||||
@ -352,6 +353,23 @@ static int __init sve_sysctl_init(void) { return 0; }
|
|||||||
#define ZREG(sve_state, vq, n) ((char *)(sve_state) + \
|
#define ZREG(sve_state, vq, n) ((char *)(sve_state) + \
|
||||||
(SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
|
(SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
|
||||||
|
|
||||||
|
#ifdef CONFIG_CPU_BIG_ENDIAN
|
||||||
|
static __uint128_t arm64_cpu_to_le128(__uint128_t x)
|
||||||
|
{
|
||||||
|
u64 a = swab64(x);
|
||||||
|
u64 b = swab64(x >> 64);
|
||||||
|
|
||||||
|
return ((__uint128_t)a << 64) | b;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static __uint128_t arm64_cpu_to_le128(__uint128_t x)
|
||||||
|
{
|
||||||
|
return x;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#define arm64_le128_to_cpu(x) arm64_cpu_to_le128(x)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Transfer the FPSIMD state in task->thread.uw.fpsimd_state to
|
* Transfer the FPSIMD state in task->thread.uw.fpsimd_state to
|
||||||
* task->thread.sve_state.
|
* task->thread.sve_state.
|
||||||
@ -369,14 +387,16 @@ static void fpsimd_to_sve(struct task_struct *task)
|
|||||||
void *sst = task->thread.sve_state;
|
void *sst = task->thread.sve_state;
|
||||||
struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
|
struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
__uint128_t *p;
|
||||||
|
|
||||||
if (!system_supports_sve())
|
if (!system_supports_sve())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
vq = sve_vq_from_vl(task->thread.sve_vl);
|
vq = sve_vq_from_vl(task->thread.sve_vl);
|
||||||
for (i = 0; i < 32; ++i)
|
for (i = 0; i < 32; ++i) {
|
||||||
memcpy(ZREG(sst, vq, i), &fst->vregs[i],
|
p = (__uint128_t *)ZREG(sst, vq, i);
|
||||||
sizeof(fst->vregs[i]));
|
*p = arm64_cpu_to_le128(fst->vregs[i]);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -395,14 +415,16 @@ static void sve_to_fpsimd(struct task_struct *task)
|
|||||||
void const *sst = task->thread.sve_state;
|
void const *sst = task->thread.sve_state;
|
||||||
struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state;
|
struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
__uint128_t const *p;
|
||||||
|
|
||||||
if (!system_supports_sve())
|
if (!system_supports_sve())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
vq = sve_vq_from_vl(task->thread.sve_vl);
|
vq = sve_vq_from_vl(task->thread.sve_vl);
|
||||||
for (i = 0; i < 32; ++i)
|
for (i = 0; i < 32; ++i) {
|
||||||
memcpy(&fst->vregs[i], ZREG(sst, vq, i),
|
p = (__uint128_t const *)ZREG(sst, vq, i);
|
||||||
sizeof(fst->vregs[i]));
|
fst->vregs[i] = arm64_le128_to_cpu(*p);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_ARM64_SVE
|
#ifdef CONFIG_ARM64_SVE
|
||||||
@ -491,6 +513,7 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
|
|||||||
void *sst = task->thread.sve_state;
|
void *sst = task->thread.sve_state;
|
||||||
struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
|
struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
|
__uint128_t *p;
|
||||||
|
|
||||||
if (!test_tsk_thread_flag(task, TIF_SVE))
|
if (!test_tsk_thread_flag(task, TIF_SVE))
|
||||||
return;
|
return;
|
||||||
@ -499,9 +522,10 @@ void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
|
|||||||
|
|
||||||
memset(sst, 0, SVE_SIG_REGS_SIZE(vq));
|
memset(sst, 0, SVE_SIG_REGS_SIZE(vq));
|
||||||
|
|
||||||
for (i = 0; i < 32; ++i)
|
for (i = 0; i < 32; ++i) {
|
||||||
memcpy(ZREG(sst, vq, i), &fst->vregs[i],
|
p = (__uint128_t *)ZREG(sst, vq, i);
|
||||||
sizeof(fst->vregs[i]));
|
*p = arm64_cpu_to_le128(fst->vregs[i]);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int sve_set_vector_length(struct task_struct *task,
|
int sve_set_vector_length(struct task_struct *task,
|
||||||
|
@ -876,6 +876,23 @@ static inline int pmd_present(pmd_t pmd)
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline int pmd_is_serializing(pmd_t pmd)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
* If the pmd is undergoing a split, the _PAGE_PRESENT bit is clear
|
||||||
|
* and _PAGE_INVALID is set (see pmd_present, pmdp_invalidate).
|
||||||
|
*
|
||||||
|
* This condition may also occur when flushing a pmd while flushing
|
||||||
|
* it (see ptep_modify_prot_start), so callers must ensure this
|
||||||
|
* case is fine as well.
|
||||||
|
*/
|
||||||
|
if ((pmd_raw(pmd) & cpu_to_be64(_PAGE_PRESENT | _PAGE_INVALID)) ==
|
||||||
|
cpu_to_be64(_PAGE_INVALID))
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static inline int pmd_bad(pmd_t pmd)
|
static inline int pmd_bad(pmd_t pmd)
|
||||||
{
|
{
|
||||||
if (radix_enabled())
|
if (radix_enabled())
|
||||||
@ -1092,6 +1109,19 @@ static inline int pmd_protnone(pmd_t pmd)
|
|||||||
#define pmd_access_permitted pmd_access_permitted
|
#define pmd_access_permitted pmd_access_permitted
|
||||||
static inline bool pmd_access_permitted(pmd_t pmd, bool write)
|
static inline bool pmd_access_permitted(pmd_t pmd, bool write)
|
||||||
{
|
{
|
||||||
|
/*
|
||||||
|
* pmdp_invalidate sets this combination (which is not caught by
|
||||||
|
* !pte_present() check in pte_access_permitted), to prevent
|
||||||
|
* lock-free lookups, as part of the serialize_against_pte_lookup()
|
||||||
|
* synchronisation.
|
||||||
|
*
|
||||||
|
* This also catches the case where the PTE's hardware PRESENT bit is
|
||||||
|
* cleared while TLB is flushed, which is suboptimal but should not
|
||||||
|
* be frequent.
|
||||||
|
*/
|
||||||
|
if (pmd_is_serializing(pmd))
|
||||||
|
return false;
|
||||||
|
|
||||||
return pte_access_permitted(pmd_pte(pmd), write);
|
return pte_access_permitted(pmd_pte(pmd), write);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -13,7 +13,11 @@ extern void btext_update_display(unsigned long phys, int width, int height,
|
|||||||
int depth, int pitch);
|
int depth, int pitch);
|
||||||
extern void btext_setup_display(int width, int height, int depth, int pitch,
|
extern void btext_setup_display(int width, int height, int depth, int pitch,
|
||||||
unsigned long address);
|
unsigned long address);
|
||||||
|
#ifdef CONFIG_PPC32
|
||||||
extern void btext_prepare_BAT(void);
|
extern void btext_prepare_BAT(void);
|
||||||
|
#else
|
||||||
|
static inline void btext_prepare_BAT(void) { }
|
||||||
|
#endif
|
||||||
extern void btext_map(void);
|
extern void btext_map(void);
|
||||||
extern void btext_unmap(void);
|
extern void btext_unmap(void);
|
||||||
|
|
||||||
|
@ -94,6 +94,9 @@ static inline bool kdump_in_progress(void)
|
|||||||
return crashing_cpu >= 0;
|
return crashing_cpu >= 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void relocate_new_kernel(unsigned long indirection_page, unsigned long reboot_code_buffer,
|
||||||
|
unsigned long start_address) __noreturn;
|
||||||
|
|
||||||
#ifdef CONFIG_KEXEC_FILE
|
#ifdef CONFIG_KEXEC_FILE
|
||||||
extern const struct kexec_file_ops kexec_elf64_ops;
|
extern const struct kexec_file_ops kexec_elf64_ops;
|
||||||
|
|
||||||
|
@ -30,7 +30,6 @@ typedef void (*relocate_new_kernel_t)(
|
|||||||
*/
|
*/
|
||||||
void default_machine_kexec(struct kimage *image)
|
void default_machine_kexec(struct kimage *image)
|
||||||
{
|
{
|
||||||
extern const unsigned char relocate_new_kernel[];
|
|
||||||
extern const unsigned int relocate_new_kernel_size;
|
extern const unsigned int relocate_new_kernel_size;
|
||||||
unsigned long page_list;
|
unsigned long page_list;
|
||||||
unsigned long reboot_code_buffer, reboot_code_buffer_phys;
|
unsigned long reboot_code_buffer, reboot_code_buffer_phys;
|
||||||
@ -58,6 +57,9 @@ void default_machine_kexec(struct kimage *image)
|
|||||||
reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE);
|
reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE);
|
||||||
printk(KERN_INFO "Bye!\n");
|
printk(KERN_INFO "Bye!\n");
|
||||||
|
|
||||||
|
if (!IS_ENABLED(CONFIG_FSL_BOOKE) && !IS_ENABLED(CONFIG_44x))
|
||||||
|
relocate_new_kernel(page_list, reboot_code_buffer_phys, image->start);
|
||||||
|
|
||||||
/* now call it */
|
/* now call it */
|
||||||
rnk = (relocate_new_kernel_t) reboot_code_buffer;
|
rnk = (relocate_new_kernel_t) reboot_code_buffer;
|
||||||
(*rnk)(page_list, reboot_code_buffer_phys, image->start);
|
(*rnk)(page_list, reboot_code_buffer_phys, image->start);
|
||||||
|
@ -2336,6 +2336,7 @@ static void __init prom_check_displays(void)
|
|||||||
prom_printf("W=%d H=%d LB=%d addr=0x%x\n",
|
prom_printf("W=%d H=%d LB=%d addr=0x%x\n",
|
||||||
width, height, pitch, addr);
|
width, height, pitch, addr);
|
||||||
btext_setup_display(width, height, 8, pitch, addr);
|
btext_setup_display(width, height, 8, pitch, addr);
|
||||||
|
btext_prepare_BAT();
|
||||||
}
|
}
|
||||||
#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
|
#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
|
||||||
}
|
}
|
||||||
|
@ -24,7 +24,7 @@ fi
|
|||||||
WHITELIST="add_reloc_offset __bss_start __bss_stop copy_and_flush
|
WHITELIST="add_reloc_offset __bss_start __bss_stop copy_and_flush
|
||||||
_end enter_prom $MEM_FUNCS reloc_offset __secondary_hold
|
_end enter_prom $MEM_FUNCS reloc_offset __secondary_hold
|
||||||
__secondary_hold_acknowledge __secondary_hold_spinloop __start
|
__secondary_hold_acknowledge __secondary_hold_spinloop __start
|
||||||
logo_linux_clut224
|
logo_linux_clut224 btext_prepare_BAT
|
||||||
reloc_got2 kernstart_addr memstart_addr linux_banner _stext
|
reloc_got2 kernstart_addr memstart_addr linux_banner _stext
|
||||||
__prom_init_toc_start __prom_init_toc_end btext_setup_display TOC."
|
__prom_init_toc_start __prom_init_toc_end btext_setup_display TOC."
|
||||||
|
|
||||||
|
@ -112,6 +112,9 @@ pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
|
|||||||
/*
|
/*
|
||||||
* This ensures that generic code that rely on IRQ disabling
|
* This ensures that generic code that rely on IRQ disabling
|
||||||
* to prevent a parallel THP split work as expected.
|
* to prevent a parallel THP split work as expected.
|
||||||
|
*
|
||||||
|
* Marking the entry with _PAGE_INVALID && ~_PAGE_PRESENT requires
|
||||||
|
* a special case check in pmd_access_permitted.
|
||||||
*/
|
*/
|
||||||
serialize_against_pte_lookup(vma->vm_mm);
|
serialize_against_pte_lookup(vma->vm_mm);
|
||||||
return __pmd(old_pmd);
|
return __pmd(old_pmd);
|
||||||
|
@ -368,13 +368,25 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea,
|
|||||||
pdshift = PMD_SHIFT;
|
pdshift = PMD_SHIFT;
|
||||||
pmdp = pmd_offset(&pud, ea);
|
pmdp = pmd_offset(&pud, ea);
|
||||||
pmd = READ_ONCE(*pmdp);
|
pmd = READ_ONCE(*pmdp);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* A hugepage collapse is captured by pmd_none, because
|
* A hugepage collapse is captured by this condition, see
|
||||||
* it mark the pmd none and do a hpte invalidate.
|
* pmdp_collapse_flush.
|
||||||
*/
|
*/
|
||||||
if (pmd_none(pmd))
|
if (pmd_none(pmd))
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
#ifdef CONFIG_PPC_BOOK3S_64
|
||||||
|
/*
|
||||||
|
* A hugepage split is captured by this condition, see
|
||||||
|
* pmdp_invalidate.
|
||||||
|
*
|
||||||
|
* Huge page modification can be caught here too.
|
||||||
|
*/
|
||||||
|
if (pmd_is_serializing(pmd))
|
||||||
|
return NULL;
|
||||||
|
#endif
|
||||||
|
|
||||||
if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) {
|
if (pmd_trans_huge(pmd) || pmd_devmap(pmd)) {
|
||||||
if (is_thp)
|
if (is_thp)
|
||||||
*is_thp = true;
|
*is_thp = true;
|
||||||
|
@ -536,7 +536,7 @@ static inline void __fpregs_load_activate(void)
|
|||||||
struct fpu *fpu = ¤t->thread.fpu;
|
struct fpu *fpu = ¤t->thread.fpu;
|
||||||
int cpu = smp_processor_id();
|
int cpu = smp_processor_id();
|
||||||
|
|
||||||
if (WARN_ON_ONCE(current->mm == NULL))
|
if (WARN_ON_ONCE(current->flags & PF_KTHREAD))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
if (!fpregs_state_valid(fpu, cpu)) {
|
if (!fpregs_state_valid(fpu, cpu)) {
|
||||||
@ -567,11 +567,11 @@ static inline void __fpregs_load_activate(void)
|
|||||||
* otherwise.
|
* otherwise.
|
||||||
*
|
*
|
||||||
* The FPU context is only stored/restored for a user task and
|
* The FPU context is only stored/restored for a user task and
|
||||||
* ->mm is used to distinguish between kernel and user threads.
|
* PF_KTHREAD is used to distinguish between kernel and user threads.
|
||||||
*/
|
*/
|
||||||
static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu)
|
static inline void switch_fpu_prepare(struct fpu *old_fpu, int cpu)
|
||||||
{
|
{
|
||||||
if (static_cpu_has(X86_FEATURE_FPU) && current->mm) {
|
if (static_cpu_has(X86_FEATURE_FPU) && !(current->flags & PF_KTHREAD)) {
|
||||||
if (!copy_fpregs_to_fpstate(old_fpu))
|
if (!copy_fpregs_to_fpstate(old_fpu))
|
||||||
old_fpu->last_cpu = -1;
|
old_fpu->last_cpu = -1;
|
||||||
else
|
else
|
||||||
|
@ -52,6 +52,9 @@
|
|||||||
|
|
||||||
#define INTEL_FAM6_CANNONLAKE_MOBILE 0x66
|
#define INTEL_FAM6_CANNONLAKE_MOBILE 0x66
|
||||||
|
|
||||||
|
#define INTEL_FAM6_ICELAKE_X 0x6A
|
||||||
|
#define INTEL_FAM6_ICELAKE_XEON_D 0x6C
|
||||||
|
#define INTEL_FAM6_ICELAKE_DESKTOP 0x7D
|
||||||
#define INTEL_FAM6_ICELAKE_MOBILE 0x7E
|
#define INTEL_FAM6_ICELAKE_MOBILE 0x7E
|
||||||
|
|
||||||
/* "Small Core" Processors (Atom) */
|
/* "Small Core" Processors (Atom) */
|
||||||
|
@ -872,7 +872,7 @@ int __init microcode_init(void)
|
|||||||
goto out_ucode_group;
|
goto out_ucode_group;
|
||||||
|
|
||||||
register_syscore_ops(&mc_syscore_ops);
|
register_syscore_ops(&mc_syscore_ops);
|
||||||
cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "x86/microcode:online",
|
cpuhp_setup_state_nocalls(CPUHP_AP_MICROCODE_LOADER, "x86/microcode:online",
|
||||||
mc_cpu_online, mc_cpu_down_prep);
|
mc_cpu_online, mc_cpu_down_prep);
|
||||||
|
|
||||||
pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION);
|
pr_info("Microcode Update Driver: v%s.", DRIVER_VERSION);
|
||||||
|
@ -360,6 +360,9 @@ static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
|
|||||||
struct list_head *head;
|
struct list_head *head;
|
||||||
struct rdtgroup *entry;
|
struct rdtgroup *entry;
|
||||||
|
|
||||||
|
if (!is_mbm_local_enabled())
|
||||||
|
return;
|
||||||
|
|
||||||
r_mba = &rdt_resources_all[RDT_RESOURCE_MBA];
|
r_mba = &rdt_resources_all[RDT_RESOURCE_MBA];
|
||||||
closid = rgrp->closid;
|
closid = rgrp->closid;
|
||||||
rmid = rgrp->mon.rmid;
|
rmid = rgrp->mon.rmid;
|
||||||
|
@ -2534,7 +2534,12 @@ static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r,
|
|||||||
if (closid_allocated(i) && i != closid) {
|
if (closid_allocated(i) && i != closid) {
|
||||||
mode = rdtgroup_mode_by_closid(i);
|
mode = rdtgroup_mode_by_closid(i);
|
||||||
if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
|
if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
|
||||||
break;
|
/*
|
||||||
|
* ctrl values for locksetup aren't relevant
|
||||||
|
* until the schemata is written, and the mode
|
||||||
|
* becomes RDT_MODE_PSEUDO_LOCKED.
|
||||||
|
*/
|
||||||
|
continue;
|
||||||
/*
|
/*
|
||||||
* If CDP is active include peer domain's
|
* If CDP is active include peer domain's
|
||||||
* usage to ensure there is no overlap
|
* usage to ensure there is no overlap
|
||||||
|
@ -102,7 +102,7 @@ static void __kernel_fpu_begin(void)
|
|||||||
|
|
||||||
kernel_fpu_disable();
|
kernel_fpu_disable();
|
||||||
|
|
||||||
if (current->mm) {
|
if (!(current->flags & PF_KTHREAD)) {
|
||||||
if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
|
if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
|
||||||
set_thread_flag(TIF_NEED_FPU_LOAD);
|
set_thread_flag(TIF_NEED_FPU_LOAD);
|
||||||
/*
|
/*
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
|
|
||||||
#include <linux/compat.h>
|
#include <linux/compat.h>
|
||||||
#include <linux/cpu.h>
|
#include <linux/cpu.h>
|
||||||
|
#include <linux/pagemap.h>
|
||||||
|
|
||||||
#include <asm/fpu/internal.h>
|
#include <asm/fpu/internal.h>
|
||||||
#include <asm/fpu/signal.h>
|
#include <asm/fpu/signal.h>
|
||||||
@ -61,6 +62,11 @@ static inline int save_fsave_header(struct task_struct *tsk, void __user *buf)
|
|||||||
struct user_i387_ia32_struct env;
|
struct user_i387_ia32_struct env;
|
||||||
struct _fpstate_32 __user *fp = buf;
|
struct _fpstate_32 __user *fp = buf;
|
||||||
|
|
||||||
|
fpregs_lock();
|
||||||
|
if (!test_thread_flag(TIF_NEED_FPU_LOAD))
|
||||||
|
copy_fxregs_to_kernel(&tsk->thread.fpu);
|
||||||
|
fpregs_unlock();
|
||||||
|
|
||||||
convert_from_fxsr(&env, tsk);
|
convert_from_fxsr(&env, tsk);
|
||||||
|
|
||||||
if (__copy_to_user(buf, &env, sizeof(env)) ||
|
if (__copy_to_user(buf, &env, sizeof(env)) ||
|
||||||
@ -189,15 +195,7 @@ retry:
|
|||||||
fpregs_unlock();
|
fpregs_unlock();
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
int aligned_size;
|
if (!fault_in_pages_writeable(buf_fx, fpu_user_xstate_size))
|
||||||
int nr_pages;
|
|
||||||
|
|
||||||
aligned_size = offset_in_page(buf_fx) + fpu_user_xstate_size;
|
|
||||||
nr_pages = DIV_ROUND_UP(aligned_size, PAGE_SIZE);
|
|
||||||
|
|
||||||
ret = get_user_pages_unlocked((unsigned long)buf_fx, nr_pages,
|
|
||||||
NULL, FOLL_WRITE);
|
|
||||||
if (ret == nr_pages)
|
|
||||||
goto retry;
|
goto retry;
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
@ -758,7 +758,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
|
|||||||
BREAK_INSTR_SIZE);
|
BREAK_INSTR_SIZE);
|
||||||
bpt->type = BP_POKE_BREAKPOINT;
|
bpt->type = BP_POKE_BREAKPOINT;
|
||||||
|
|
||||||
return err;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
|
int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
|
||||||
|
@ -199,7 +199,7 @@ static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr)
|
|||||||
if (!pgtable_l5_enabled())
|
if (!pgtable_l5_enabled())
|
||||||
return (p4d_t *)pgd;
|
return (p4d_t *)pgd;
|
||||||
|
|
||||||
p4d = __pa_nodebug(pgd_val(*pgd)) & PTE_PFN_MASK;
|
p4d = pgd_val(*pgd) & PTE_PFN_MASK;
|
||||||
p4d += __START_KERNEL_map - phys_base;
|
p4d += __START_KERNEL_map - phys_base;
|
||||||
return (p4d_t *)p4d + p4d_index(addr);
|
return (p4d_t *)p4d + p4d_index(addr);
|
||||||
}
|
}
|
||||||
|
@ -52,7 +52,7 @@ static __initdata struct kaslr_memory_region {
|
|||||||
} kaslr_regions[] = {
|
} kaslr_regions[] = {
|
||||||
{ &page_offset_base, 0 },
|
{ &page_offset_base, 0 },
|
||||||
{ &vmalloc_base, 0 },
|
{ &vmalloc_base, 0 },
|
||||||
{ &vmemmap_base, 1 },
|
{ &vmemmap_base, 0 },
|
||||||
};
|
};
|
||||||
|
|
||||||
/* Get size in bytes used by the memory region */
|
/* Get size in bytes used by the memory region */
|
||||||
@ -78,6 +78,7 @@ void __init kernel_randomize_memory(void)
|
|||||||
unsigned long rand, memory_tb;
|
unsigned long rand, memory_tb;
|
||||||
struct rnd_state rand_state;
|
struct rnd_state rand_state;
|
||||||
unsigned long remain_entropy;
|
unsigned long remain_entropy;
|
||||||
|
unsigned long vmemmap_size;
|
||||||
|
|
||||||
vaddr_start = pgtable_l5_enabled() ? __PAGE_OFFSET_BASE_L5 : __PAGE_OFFSET_BASE_L4;
|
vaddr_start = pgtable_l5_enabled() ? __PAGE_OFFSET_BASE_L5 : __PAGE_OFFSET_BASE_L4;
|
||||||
vaddr = vaddr_start;
|
vaddr = vaddr_start;
|
||||||
@ -109,6 +110,14 @@ void __init kernel_randomize_memory(void)
|
|||||||
if (memory_tb < kaslr_regions[0].size_tb)
|
if (memory_tb < kaslr_regions[0].size_tb)
|
||||||
kaslr_regions[0].size_tb = memory_tb;
|
kaslr_regions[0].size_tb = memory_tb;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Calculate the vmemmap region size in TBs, aligned to a TB
|
||||||
|
* boundary.
|
||||||
|
*/
|
||||||
|
vmemmap_size = (kaslr_regions[0].size_tb << (TB_SHIFT - PAGE_SHIFT)) *
|
||||||
|
sizeof(struct page);
|
||||||
|
kaslr_regions[2].size_tb = DIV_ROUND_UP(vmemmap_size, 1UL << TB_SHIFT);
|
||||||
|
|
||||||
/* Calculate entropy available between regions */
|
/* Calculate entropy available between regions */
|
||||||
remain_entropy = vaddr_end - vaddr_start;
|
remain_entropy = vaddr_end - vaddr_start;
|
||||||
for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++)
|
for (i = 0; i < ARRAY_SIZE(kaslr_regions); i++)
|
||||||
|
@ -73,6 +73,7 @@ config BLK_DEV_INTEGRITY
|
|||||||
|
|
||||||
config BLK_DEV_ZONED
|
config BLK_DEV_ZONED
|
||||||
bool "Zoned block device support"
|
bool "Zoned block device support"
|
||||||
|
select MQ_IOSCHED_DEADLINE
|
||||||
---help---
|
---help---
|
||||||
Block layer zoned block device support. This option enables
|
Block layer zoned block device support. This option enables
|
||||||
support for ZAC/ZBC host-managed and host-aware zoned block devices.
|
support for ZAC/ZBC host-managed and host-aware zoned block devices.
|
||||||
|
@ -1046,8 +1046,7 @@ struct blkcg_policy blkcg_policy_bfq = {
|
|||||||
struct cftype bfq_blkcg_legacy_files[] = {
|
struct cftype bfq_blkcg_legacy_files[] = {
|
||||||
{
|
{
|
||||||
.name = "bfq.weight",
|
.name = "bfq.weight",
|
||||||
.link_name = "weight",
|
.flags = CFTYPE_NOT_ON_ROOT,
|
||||||
.flags = CFTYPE_NOT_ON_ROOT | CFTYPE_SYMLINKED,
|
|
||||||
.seq_show = bfq_io_show_weight,
|
.seq_show = bfq_io_show_weight,
|
||||||
.write_u64 = bfq_io_set_weight_legacy,
|
.write_u64 = bfq_io_set_weight_legacy,
|
||||||
},
|
},
|
||||||
@ -1167,8 +1166,7 @@ struct cftype bfq_blkcg_legacy_files[] = {
|
|||||||
struct cftype bfq_blkg_files[] = {
|
struct cftype bfq_blkg_files[] = {
|
||||||
{
|
{
|
||||||
.name = "bfq.weight",
|
.name = "bfq.weight",
|
||||||
.link_name = "weight",
|
.flags = CFTYPE_NOT_ON_ROOT,
|
||||||
.flags = CFTYPE_NOT_ON_ROOT | CFTYPE_SYMLINKED,
|
|
||||||
.seq_show = bfq_io_show_weight,
|
.seq_show = bfq_io_show_weight,
|
||||||
.write = bfq_io_set_weight,
|
.write = bfq_io_set_weight,
|
||||||
},
|
},
|
||||||
|
@ -821,38 +821,28 @@ static const struct blk_mq_debugfs_attr blk_mq_debugfs_ctx_attrs[] = {
|
|||||||
{},
|
{},
|
||||||
};
|
};
|
||||||
|
|
||||||
static bool debugfs_create_files(struct dentry *parent, void *data,
|
static void debugfs_create_files(struct dentry *parent, void *data,
|
||||||
const struct blk_mq_debugfs_attr *attr)
|
const struct blk_mq_debugfs_attr *attr)
|
||||||
{
|
{
|
||||||
if (IS_ERR_OR_NULL(parent))
|
if (IS_ERR_OR_NULL(parent))
|
||||||
return false;
|
return;
|
||||||
|
|
||||||
d_inode(parent)->i_private = data;
|
d_inode(parent)->i_private = data;
|
||||||
|
|
||||||
for (; attr->name; attr++) {
|
for (; attr->name; attr++)
|
||||||
if (!debugfs_create_file(attr->name, attr->mode, parent,
|
debugfs_create_file(attr->name, attr->mode, parent,
|
||||||
(void *)attr, &blk_mq_debugfs_fops))
|
(void *)attr, &blk_mq_debugfs_fops);
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int blk_mq_debugfs_register(struct request_queue *q)
|
void blk_mq_debugfs_register(struct request_queue *q)
|
||||||
{
|
{
|
||||||
struct blk_mq_hw_ctx *hctx;
|
struct blk_mq_hw_ctx *hctx;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (!blk_debugfs_root)
|
|
||||||
return -ENOENT;
|
|
||||||
|
|
||||||
q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
|
q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
|
||||||
blk_debugfs_root);
|
blk_debugfs_root);
|
||||||
if (!q->debugfs_dir)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
if (!debugfs_create_files(q->debugfs_dir, q,
|
debugfs_create_files(q->debugfs_dir, q, blk_mq_debugfs_queue_attrs);
|
||||||
blk_mq_debugfs_queue_attrs))
|
|
||||||
goto err;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
|
* blk_mq_init_sched() attempted to do this already, but q->debugfs_dir
|
||||||
@ -864,11 +854,10 @@ int blk_mq_debugfs_register(struct request_queue *q)
|
|||||||
|
|
||||||
/* Similarly, blk_mq_init_hctx() couldn't do this previously. */
|
/* Similarly, blk_mq_init_hctx() couldn't do this previously. */
|
||||||
queue_for_each_hw_ctx(q, hctx, i) {
|
queue_for_each_hw_ctx(q, hctx, i) {
|
||||||
if (!hctx->debugfs_dir && blk_mq_debugfs_register_hctx(q, hctx))
|
if (!hctx->debugfs_dir)
|
||||||
goto err;
|
blk_mq_debugfs_register_hctx(q, hctx);
|
||||||
if (q->elevator && !hctx->sched_debugfs_dir &&
|
if (q->elevator && !hctx->sched_debugfs_dir)
|
||||||
blk_mq_debugfs_register_sched_hctx(q, hctx))
|
blk_mq_debugfs_register_sched_hctx(q, hctx);
|
||||||
goto err;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (q->rq_qos) {
|
if (q->rq_qos) {
|
||||||
@ -879,12 +868,6 @@ int blk_mq_debugfs_register(struct request_queue *q)
|
|||||||
rqos = rqos->next;
|
rqos = rqos->next;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
err:
|
|
||||||
blk_mq_debugfs_unregister(q);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_mq_debugfs_unregister(struct request_queue *q)
|
void blk_mq_debugfs_unregister(struct request_queue *q)
|
||||||
@ -894,52 +877,32 @@ void blk_mq_debugfs_unregister(struct request_queue *q)
|
|||||||
q->debugfs_dir = NULL;
|
q->debugfs_dir = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
|
static void blk_mq_debugfs_register_ctx(struct blk_mq_hw_ctx *hctx,
|
||||||
struct blk_mq_ctx *ctx)
|
struct blk_mq_ctx *ctx)
|
||||||
{
|
{
|
||||||
struct dentry *ctx_dir;
|
struct dentry *ctx_dir;
|
||||||
char name[20];
|
char name[20];
|
||||||
|
|
||||||
snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
|
snprintf(name, sizeof(name), "cpu%u", ctx->cpu);
|
||||||
ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
|
ctx_dir = debugfs_create_dir(name, hctx->debugfs_dir);
|
||||||
if (!ctx_dir)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
if (!debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs))
|
debugfs_create_files(ctx_dir, ctx, blk_mq_debugfs_ctx_attrs);
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int blk_mq_debugfs_register_hctx(struct request_queue *q,
|
void blk_mq_debugfs_register_hctx(struct request_queue *q,
|
||||||
struct blk_mq_hw_ctx *hctx)
|
struct blk_mq_hw_ctx *hctx)
|
||||||
{
|
{
|
||||||
struct blk_mq_ctx *ctx;
|
struct blk_mq_ctx *ctx;
|
||||||
char name[20];
|
char name[20];
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
if (!q->debugfs_dir)
|
|
||||||
return -ENOENT;
|
|
||||||
|
|
||||||
snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
|
snprintf(name, sizeof(name), "hctx%u", hctx->queue_num);
|
||||||
hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
|
hctx->debugfs_dir = debugfs_create_dir(name, q->debugfs_dir);
|
||||||
if (!hctx->debugfs_dir)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
if (!debugfs_create_files(hctx->debugfs_dir, hctx,
|
debugfs_create_files(hctx->debugfs_dir, hctx, blk_mq_debugfs_hctx_attrs);
|
||||||
blk_mq_debugfs_hctx_attrs))
|
|
||||||
goto err;
|
|
||||||
|
|
||||||
hctx_for_each_ctx(hctx, ctx, i) {
|
hctx_for_each_ctx(hctx, ctx, i)
|
||||||
if (blk_mq_debugfs_register_ctx(hctx, ctx))
|
blk_mq_debugfs_register_ctx(hctx, ctx);
|
||||||
goto err;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
err:
|
|
||||||
blk_mq_debugfs_unregister_hctx(hctx);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
|
void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
|
||||||
@ -949,17 +912,13 @@ void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
|
|||||||
hctx->debugfs_dir = NULL;
|
hctx->debugfs_dir = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
int blk_mq_debugfs_register_hctxs(struct request_queue *q)
|
void blk_mq_debugfs_register_hctxs(struct request_queue *q)
|
||||||
{
|
{
|
||||||
struct blk_mq_hw_ctx *hctx;
|
struct blk_mq_hw_ctx *hctx;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
queue_for_each_hw_ctx(q, hctx, i) {
|
queue_for_each_hw_ctx(q, hctx, i)
|
||||||
if (blk_mq_debugfs_register_hctx(q, hctx))
|
blk_mq_debugfs_register_hctx(q, hctx);
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
|
void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
|
||||||
@ -971,29 +930,16 @@ void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
|
|||||||
blk_mq_debugfs_unregister_hctx(hctx);
|
blk_mq_debugfs_unregister_hctx(hctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
int blk_mq_debugfs_register_sched(struct request_queue *q)
|
void blk_mq_debugfs_register_sched(struct request_queue *q)
|
||||||
{
|
{
|
||||||
struct elevator_type *e = q->elevator->type;
|
struct elevator_type *e = q->elevator->type;
|
||||||
|
|
||||||
if (!q->debugfs_dir)
|
|
||||||
return -ENOENT;
|
|
||||||
|
|
||||||
if (!e->queue_debugfs_attrs)
|
if (!e->queue_debugfs_attrs)
|
||||||
return 0;
|
return;
|
||||||
|
|
||||||
q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
|
q->sched_debugfs_dir = debugfs_create_dir("sched", q->debugfs_dir);
|
||||||
if (!q->sched_debugfs_dir)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
if (!debugfs_create_files(q->sched_debugfs_dir, q,
|
debugfs_create_files(q->sched_debugfs_dir, q, e->queue_debugfs_attrs);
|
||||||
e->queue_debugfs_attrs))
|
|
||||||
goto err;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
err:
|
|
||||||
blk_mq_debugfs_unregister_sched(q);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_mq_debugfs_unregister_sched(struct request_queue *q)
|
void blk_mq_debugfs_unregister_sched(struct request_queue *q)
|
||||||
@ -1008,36 +954,22 @@ void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
|
|||||||
rqos->debugfs_dir = NULL;
|
rqos->debugfs_dir = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
int blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
|
void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
|
||||||
{
|
{
|
||||||
struct request_queue *q = rqos->q;
|
struct request_queue *q = rqos->q;
|
||||||
const char *dir_name = rq_qos_id_to_name(rqos->id);
|
const char *dir_name = rq_qos_id_to_name(rqos->id);
|
||||||
|
|
||||||
if (!q->debugfs_dir)
|
|
||||||
return -ENOENT;
|
|
||||||
|
|
||||||
if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs)
|
if (rqos->debugfs_dir || !rqos->ops->debugfs_attrs)
|
||||||
return 0;
|
return;
|
||||||
|
|
||||||
if (!q->rqos_debugfs_dir) {
|
if (!q->rqos_debugfs_dir)
|
||||||
q->rqos_debugfs_dir = debugfs_create_dir("rqos",
|
q->rqos_debugfs_dir = debugfs_create_dir("rqos",
|
||||||
q->debugfs_dir);
|
q->debugfs_dir);
|
||||||
if (!q->rqos_debugfs_dir)
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
|
||||||
|
|
||||||
rqos->debugfs_dir = debugfs_create_dir(dir_name,
|
rqos->debugfs_dir = debugfs_create_dir(dir_name,
|
||||||
rqos->q->rqos_debugfs_dir);
|
rqos->q->rqos_debugfs_dir);
|
||||||
if (!rqos->debugfs_dir)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
if (!debugfs_create_files(rqos->debugfs_dir, rqos,
|
debugfs_create_files(rqos->debugfs_dir, rqos, rqos->ops->debugfs_attrs);
|
||||||
rqos->ops->debugfs_attrs))
|
|
||||||
goto err;
|
|
||||||
return 0;
|
|
||||||
err:
|
|
||||||
blk_mq_debugfs_unregister_rqos(rqos);
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q)
|
void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q)
|
||||||
@ -1046,27 +978,18 @@ void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q)
|
|||||||
q->rqos_debugfs_dir = NULL;
|
q->rqos_debugfs_dir = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
|
void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
|
||||||
struct blk_mq_hw_ctx *hctx)
|
struct blk_mq_hw_ctx *hctx)
|
||||||
{
|
{
|
||||||
struct elevator_type *e = q->elevator->type;
|
struct elevator_type *e = q->elevator->type;
|
||||||
|
|
||||||
if (!hctx->debugfs_dir)
|
|
||||||
return -ENOENT;
|
|
||||||
|
|
||||||
if (!e->hctx_debugfs_attrs)
|
if (!e->hctx_debugfs_attrs)
|
||||||
return 0;
|
return;
|
||||||
|
|
||||||
hctx->sched_debugfs_dir = debugfs_create_dir("sched",
|
hctx->sched_debugfs_dir = debugfs_create_dir("sched",
|
||||||
hctx->debugfs_dir);
|
hctx->debugfs_dir);
|
||||||
if (!hctx->sched_debugfs_dir)
|
debugfs_create_files(hctx->sched_debugfs_dir, hctx,
|
||||||
return -ENOMEM;
|
e->hctx_debugfs_attrs);
|
||||||
|
|
||||||
if (!debugfs_create_files(hctx->sched_debugfs_dir, hctx,
|
|
||||||
e->hctx_debugfs_attrs))
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
|
void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
|
||||||
|
@ -18,74 +18,68 @@ struct blk_mq_debugfs_attr {
|
|||||||
int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq);
|
int __blk_mq_debugfs_rq_show(struct seq_file *m, struct request *rq);
|
||||||
int blk_mq_debugfs_rq_show(struct seq_file *m, void *v);
|
int blk_mq_debugfs_rq_show(struct seq_file *m, void *v);
|
||||||
|
|
||||||
int blk_mq_debugfs_register(struct request_queue *q);
|
void blk_mq_debugfs_register(struct request_queue *q);
|
||||||
void blk_mq_debugfs_unregister(struct request_queue *q);
|
void blk_mq_debugfs_unregister(struct request_queue *q);
|
||||||
int blk_mq_debugfs_register_hctx(struct request_queue *q,
|
void blk_mq_debugfs_register_hctx(struct request_queue *q,
|
||||||
struct blk_mq_hw_ctx *hctx);
|
struct blk_mq_hw_ctx *hctx);
|
||||||
void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx);
|
void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx);
|
||||||
int blk_mq_debugfs_register_hctxs(struct request_queue *q);
|
void blk_mq_debugfs_register_hctxs(struct request_queue *q);
|
||||||
void blk_mq_debugfs_unregister_hctxs(struct request_queue *q);
|
void blk_mq_debugfs_unregister_hctxs(struct request_queue *q);
|
||||||
|
|
||||||
int blk_mq_debugfs_register_sched(struct request_queue *q);
|
void blk_mq_debugfs_register_sched(struct request_queue *q);
|
||||||
void blk_mq_debugfs_unregister_sched(struct request_queue *q);
|
void blk_mq_debugfs_unregister_sched(struct request_queue *q);
|
||||||
int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
|
void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
|
||||||
struct blk_mq_hw_ctx *hctx);
|
struct blk_mq_hw_ctx *hctx);
|
||||||
void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx);
|
void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx);
|
||||||
|
|
||||||
int blk_mq_debugfs_register_rqos(struct rq_qos *rqos);
|
void blk_mq_debugfs_register_rqos(struct rq_qos *rqos);
|
||||||
void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos);
|
void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos);
|
||||||
void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q);
|
void blk_mq_debugfs_unregister_queue_rqos(struct request_queue *q);
|
||||||
#else
|
#else
|
||||||
static inline int blk_mq_debugfs_register(struct request_queue *q)
|
static inline void blk_mq_debugfs_register(struct request_queue *q)
|
||||||
{
|
{
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void blk_mq_debugfs_unregister(struct request_queue *q)
|
static inline void blk_mq_debugfs_unregister(struct request_queue *q)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int blk_mq_debugfs_register_hctx(struct request_queue *q,
|
static inline void blk_mq_debugfs_register_hctx(struct request_queue *q,
|
||||||
struct blk_mq_hw_ctx *hctx)
|
struct blk_mq_hw_ctx *hctx)
|
||||||
{
|
{
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
|
static inline void blk_mq_debugfs_unregister_hctx(struct blk_mq_hw_ctx *hctx)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int blk_mq_debugfs_register_hctxs(struct request_queue *q)
|
static inline void blk_mq_debugfs_register_hctxs(struct request_queue *q)
|
||||||
{
|
{
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
|
static inline void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int blk_mq_debugfs_register_sched(struct request_queue *q)
|
static inline void blk_mq_debugfs_register_sched(struct request_queue *q)
|
||||||
{
|
{
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void blk_mq_debugfs_unregister_sched(struct request_queue *q)
|
static inline void blk_mq_debugfs_unregister_sched(struct request_queue *q)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
|
static inline void blk_mq_debugfs_register_sched_hctx(struct request_queue *q,
|
||||||
struct blk_mq_hw_ctx *hctx)
|
struct blk_mq_hw_ctx *hctx)
|
||||||
{
|
{
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
|
static inline void blk_mq_debugfs_unregister_sched_hctx(struct blk_mq_hw_ctx *hctx)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
|
static inline void blk_mq_debugfs_register_rqos(struct rq_qos *rqos)
|
||||||
{
|
{
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
|
static inline void blk_mq_debugfs_unregister_rqos(struct rq_qos *rqos)
|
||||||
|
@ -555,7 +555,6 @@ void blk_mq_sched_free_requests(struct request_queue *q)
|
|||||||
int i;
|
int i;
|
||||||
|
|
||||||
lockdep_assert_held(&q->sysfs_lock);
|
lockdep_assert_held(&q->sysfs_lock);
|
||||||
WARN_ON(!q->elevator);
|
|
||||||
|
|
||||||
queue_for_each_hw_ctx(q, hctx, i) {
|
queue_for_each_hw_ctx(q, hctx, i) {
|
||||||
if (hctx->sched_tags)
|
if (hctx->sched_tags)
|
||||||
|
@ -4460,9 +4460,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
|
|||||||
{ "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
|
{ "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
|
||||||
ATA_HORKAGE_FIRMWARE_WARN },
|
ATA_HORKAGE_FIRMWARE_WARN },
|
||||||
|
|
||||||
/* drives which fail FPDMA_AA activation (some may freeze afterwards) */
|
/* drives which fail FPDMA_AA activation (some may freeze afterwards)
|
||||||
{ "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
|
the ST disks also have LPM issues */
|
||||||
{ "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
|
{ "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA |
|
||||||
|
ATA_HORKAGE_NOLPM, },
|
||||||
|
{ "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA |
|
||||||
|
ATA_HORKAGE_NOLPM, },
|
||||||
{ "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
|
{ "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
|
||||||
|
|
||||||
/* Blacklist entries taken from Silicon Image 3124/3132
|
/* Blacklist entries taken from Silicon Image 3124/3132
|
||||||
|
@ -755,10 +755,32 @@ void devm_remove_action(struct device *dev, void (*action)(void *), void *data)
|
|||||||
|
|
||||||
WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match,
|
WARN_ON(devres_destroy(dev, devm_action_release, devm_action_match,
|
||||||
&devres));
|
&devres));
|
||||||
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(devm_remove_action);
|
EXPORT_SYMBOL_GPL(devm_remove_action);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* devm_release_action() - release previously added custom action
|
||||||
|
* @dev: Device that owns the action
|
||||||
|
* @action: Function implementing the action
|
||||||
|
* @data: Pointer to data passed to @action implementation
|
||||||
|
*
|
||||||
|
* Releases and removes instance of @action previously added by
|
||||||
|
* devm_add_action(). Both action and data should match one of the
|
||||||
|
* existing entries.
|
||||||
|
*/
|
||||||
|
void devm_release_action(struct device *dev, void (*action)(void *), void *data)
|
||||||
|
{
|
||||||
|
struct action_devres devres = {
|
||||||
|
.data = data,
|
||||||
|
.action = action,
|
||||||
|
};
|
||||||
|
|
||||||
|
WARN_ON(devres_release(dev, devm_action_release, devm_action_match,
|
||||||
|
&devres));
|
||||||
|
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL_GPL(devm_release_action);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Managed kmalloc/kfree
|
* Managed kmalloc/kfree
|
||||||
*/
|
*/
|
||||||
|
@ -74,10 +74,6 @@ int null_zone_report(struct gendisk *disk, sector_t sector,
|
|||||||
struct nullb_device *dev = nullb->dev;
|
struct nullb_device *dev = nullb->dev;
|
||||||
unsigned int zno, nrz = 0;
|
unsigned int zno, nrz = 0;
|
||||||
|
|
||||||
if (!dev->zoned)
|
|
||||||
/* Not a zoned null device */
|
|
||||||
return -EOPNOTSUPP;
|
|
||||||
|
|
||||||
zno = null_zone_no(dev, sector);
|
zno = null_zone_no(dev, sector);
|
||||||
if (zno < dev->nr_zones) {
|
if (zno < dev->nr_zones) {
|
||||||
nrz = min_t(unsigned int, *nr_zones, dev->nr_zones - zno);
|
nrz = min_t(unsigned int, *nr_zones, dev->nr_zones - zno);
|
||||||
|
@ -767,7 +767,7 @@ static int ps3vram_probe(struct ps3_system_bus_device *dev)
|
|||||||
strlcpy(gendisk->disk_name, DEVICE_NAME, sizeof(gendisk->disk_name));
|
strlcpy(gendisk->disk_name, DEVICE_NAME, sizeof(gendisk->disk_name));
|
||||||
set_capacity(gendisk, priv->size >> 9);
|
set_capacity(gendisk, priv->size >> 9);
|
||||||
|
|
||||||
dev_info(&dev->core, "%s: Using %lu MiB of GPU memory\n",
|
dev_info(&dev->core, "%s: Using %llu MiB of GPU memory\n",
|
||||||
gendisk->disk_name, get_capacity(gendisk) >> 11);
|
gendisk->disk_name, get_capacity(gendisk) >> 11);
|
||||||
|
|
||||||
device_add_disk(&dev->core, gendisk, NULL);
|
device_add_disk(&dev->core, gendisk, NULL);
|
||||||
|
@ -149,22 +149,22 @@ u32 arch_timer_reg_read(int access, enum arch_timer_reg reg,
|
|||||||
return val;
|
return val;
|
||||||
}
|
}
|
||||||
|
|
||||||
static u64 arch_counter_get_cntpct_stable(void)
|
static notrace u64 arch_counter_get_cntpct_stable(void)
|
||||||
{
|
{
|
||||||
return __arch_counter_get_cntpct_stable();
|
return __arch_counter_get_cntpct_stable();
|
||||||
}
|
}
|
||||||
|
|
||||||
static u64 arch_counter_get_cntpct(void)
|
static notrace u64 arch_counter_get_cntpct(void)
|
||||||
{
|
{
|
||||||
return __arch_counter_get_cntpct();
|
return __arch_counter_get_cntpct();
|
||||||
}
|
}
|
||||||
|
|
||||||
static u64 arch_counter_get_cntvct_stable(void)
|
static notrace u64 arch_counter_get_cntvct_stable(void)
|
||||||
{
|
{
|
||||||
return __arch_counter_get_cntvct_stable();
|
return __arch_counter_get_cntvct_stable();
|
||||||
}
|
}
|
||||||
|
|
||||||
static u64 arch_counter_get_cntvct(void)
|
static notrace u64 arch_counter_get_cntvct(void)
|
||||||
{
|
{
|
||||||
return __arch_counter_get_cntvct();
|
return __arch_counter_get_cntvct();
|
||||||
}
|
}
|
||||||
|
@ -896,7 +896,7 @@ static int omap_dm_timer_remove(struct platform_device *pdev)
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
const static struct omap_dm_timer_ops dmtimer_ops = {
|
static const struct omap_dm_timer_ops dmtimer_ops = {
|
||||||
.request_by_node = omap_dm_timer_request_by_node,
|
.request_by_node = omap_dm_timer_request_by_node,
|
||||||
.request_specific = omap_dm_timer_request_specific,
|
.request_specific = omap_dm_timer_request_specific,
|
||||||
.request = omap_dm_timer_request,
|
.request = omap_dm_timer_request,
|
||||||
|
@ -27,9 +27,8 @@ static void dev_dax_percpu_release(struct percpu_ref *ref)
|
|||||||
complete(&dev_dax->cmp);
|
complete(&dev_dax->cmp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dev_dax_percpu_exit(void *data)
|
static void dev_dax_percpu_exit(struct percpu_ref *ref)
|
||||||
{
|
{
|
||||||
struct percpu_ref *ref = data;
|
|
||||||
struct dev_dax *dev_dax = ref_to_dev_dax(ref);
|
struct dev_dax *dev_dax = ref_to_dev_dax(ref);
|
||||||
|
|
||||||
dev_dbg(&dev_dax->dev, "%s\n", __func__);
|
dev_dbg(&dev_dax->dev, "%s\n", __func__);
|
||||||
@ -466,18 +465,12 @@ int dev_dax_probe(struct device *dev)
|
|||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
rc = devm_add_action_or_reset(dev, dev_dax_percpu_exit, &dev_dax->ref);
|
|
||||||
if (rc)
|
|
||||||
return rc;
|
|
||||||
|
|
||||||
dev_dax->pgmap.ref = &dev_dax->ref;
|
dev_dax->pgmap.ref = &dev_dax->ref;
|
||||||
dev_dax->pgmap.kill = dev_dax_percpu_kill;
|
dev_dax->pgmap.kill = dev_dax_percpu_kill;
|
||||||
|
dev_dax->pgmap.cleanup = dev_dax_percpu_exit;
|
||||||
addr = devm_memremap_pages(dev, &dev_dax->pgmap);
|
addr = devm_memremap_pages(dev, &dev_dax->pgmap);
|
||||||
if (IS_ERR(addr)) {
|
if (IS_ERR(addr))
|
||||||
devm_remove_action(dev, dev_dax_percpu_exit, &dev_dax->ref);
|
|
||||||
percpu_ref_exit(&dev_dax->ref);
|
|
||||||
return PTR_ERR(addr);
|
return PTR_ERR(addr);
|
||||||
}
|
|
||||||
|
|
||||||
inode = dax_inode(dax_dev);
|
inode = dax_inode(dax_dev);
|
||||||
cdev = inode->i_cdev;
|
cdev = inode->i_cdev;
|
||||||
|
@ -305,7 +305,8 @@ static const struct regmap_config pca953x_i2c_regmap = {
|
|||||||
.volatile_reg = pca953x_volatile_register,
|
.volatile_reg = pca953x_volatile_register,
|
||||||
|
|
||||||
.cache_type = REGCACHE_RBTREE,
|
.cache_type = REGCACHE_RBTREE,
|
||||||
.max_register = 0x7f,
|
/* REVISIT: should be 0x7f but some 24 bit chips use REG_ADDR_AI */
|
||||||
|
.max_register = 0xff,
|
||||||
};
|
};
|
||||||
|
|
||||||
static u8 pca953x_recalc_addr(struct pca953x_chip *chip, int reg, int off,
|
static u8 pca953x_recalc_addr(struct pca953x_chip *chip, int reg, int off,
|
||||||
|
@ -2492,7 +2492,7 @@ void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
|
|||||||
|
|
||||||
int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
|
int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
|
||||||
{
|
{
|
||||||
int r = -EINVAL;
|
int r;
|
||||||
|
|
||||||
if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
|
if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
|
||||||
r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
|
r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
|
||||||
@ -2502,7 +2502,7 @@ int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_versio
|
|||||||
}
|
}
|
||||||
*smu_version = adev->pm.fw_version;
|
*smu_version = adev->pm.fw_version;
|
||||||
}
|
}
|
||||||
return r;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
|
int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
|
||||||
|
@ -172,6 +172,8 @@ static inline int amdgpu_ras_is_supported(struct amdgpu_device *adev,
|
|||||||
{
|
{
|
||||||
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
|
struct amdgpu_ras *ras = amdgpu_ras_get_context(adev);
|
||||||
|
|
||||||
|
if (block >= AMDGPU_RAS_BLOCK_COUNT)
|
||||||
|
return 0;
|
||||||
return ras && (ras->supported & (1 << block));
|
return ras && (ras->supported & (1 << block));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -594,7 +594,7 @@ error:
|
|||||||
int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
|
int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = ring->adev;
|
struct amdgpu_device *adev = ring->adev;
|
||||||
uint32_t rptr = amdgpu_ring_get_rptr(ring);
|
uint32_t rptr;
|
||||||
unsigned i;
|
unsigned i;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
@ -602,6 +602,8 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
|
|||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
rptr = amdgpu_ring_get_rptr(ring);
|
||||||
|
|
||||||
amdgpu_ring_write(ring, VCN_ENC_CMD_END);
|
amdgpu_ring_write(ring, VCN_ENC_CMD_END);
|
||||||
amdgpu_ring_commit(ring);
|
amdgpu_ring_commit(ring);
|
||||||
|
|
||||||
|
@ -170,13 +170,16 @@ static void uvd_v6_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
|
|||||||
static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
|
static int uvd_v6_0_enc_ring_test_ring(struct amdgpu_ring *ring)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = ring->adev;
|
struct amdgpu_device *adev = ring->adev;
|
||||||
uint32_t rptr = amdgpu_ring_get_rptr(ring);
|
uint32_t rptr;
|
||||||
unsigned i;
|
unsigned i;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
r = amdgpu_ring_alloc(ring, 16);
|
r = amdgpu_ring_alloc(ring, 16);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
rptr = amdgpu_ring_get_rptr(ring);
|
||||||
|
|
||||||
amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
|
amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
|
||||||
amdgpu_ring_commit(ring);
|
amdgpu_ring_commit(ring);
|
||||||
|
|
||||||
|
@ -175,7 +175,7 @@ static void uvd_v7_0_enc_ring_set_wptr(struct amdgpu_ring *ring)
|
|||||||
static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
|
static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
|
||||||
{
|
{
|
||||||
struct amdgpu_device *adev = ring->adev;
|
struct amdgpu_device *adev = ring->adev;
|
||||||
uint32_t rptr = amdgpu_ring_get_rptr(ring);
|
uint32_t rptr;
|
||||||
unsigned i;
|
unsigned i;
|
||||||
int r;
|
int r;
|
||||||
|
|
||||||
@ -185,6 +185,9 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring)
|
|||||||
r = amdgpu_ring_alloc(ring, 16);
|
r = amdgpu_ring_alloc(ring, 16);
|
||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
|
rptr = amdgpu_ring_get_rptr(ring);
|
||||||
|
|
||||||
amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
|
amdgpu_ring_write(ring, HEVC_ENC_CMD_END);
|
||||||
amdgpu_ring_commit(ring);
|
amdgpu_ring_commit(ring);
|
||||||
|
|
||||||
|
@ -1570,6 +1570,50 @@ static void connector_bad_edid(struct drm_connector *connector,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Get override or firmware EDID */
|
||||||
|
static struct edid *drm_get_override_edid(struct drm_connector *connector)
|
||||||
|
{
|
||||||
|
struct edid *override = NULL;
|
||||||
|
|
||||||
|
if (connector->override_edid)
|
||||||
|
override = drm_edid_duplicate(connector->edid_blob_ptr->data);
|
||||||
|
|
||||||
|
if (!override)
|
||||||
|
override = drm_load_edid_firmware(connector);
|
||||||
|
|
||||||
|
return IS_ERR(override) ? NULL : override;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* drm_add_override_edid_modes - add modes from override/firmware EDID
|
||||||
|
* @connector: connector we're probing
|
||||||
|
*
|
||||||
|
* Add modes from the override/firmware EDID, if available. Only to be used from
|
||||||
|
* drm_helper_probe_single_connector_modes() as a fallback for when DDC probe
|
||||||
|
* failed during drm_get_edid() and caused the override/firmware EDID to be
|
||||||
|
* skipped.
|
||||||
|
*
|
||||||
|
* Return: The number of modes added or 0 if we couldn't find any.
|
||||||
|
*/
|
||||||
|
int drm_add_override_edid_modes(struct drm_connector *connector)
|
||||||
|
{
|
||||||
|
struct edid *override;
|
||||||
|
int num_modes = 0;
|
||||||
|
|
||||||
|
override = drm_get_override_edid(connector);
|
||||||
|
if (override) {
|
||||||
|
drm_connector_update_edid_property(connector, override);
|
||||||
|
num_modes = drm_add_edid_modes(connector, override);
|
||||||
|
kfree(override);
|
||||||
|
|
||||||
|
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] adding %d modes via fallback override/firmware EDID\n",
|
||||||
|
connector->base.id, connector->name, num_modes);
|
||||||
|
}
|
||||||
|
|
||||||
|
return num_modes;
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(drm_add_override_edid_modes);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* drm_do_get_edid - get EDID data using a custom EDID block read function
|
* drm_do_get_edid - get EDID data using a custom EDID block read function
|
||||||
* @connector: connector we're probing
|
* @connector: connector we're probing
|
||||||
@ -1597,15 +1641,10 @@ struct edid *drm_do_get_edid(struct drm_connector *connector,
|
|||||||
{
|
{
|
||||||
int i, j = 0, valid_extensions = 0;
|
int i, j = 0, valid_extensions = 0;
|
||||||
u8 *edid, *new;
|
u8 *edid, *new;
|
||||||
struct edid *override = NULL;
|
struct edid *override;
|
||||||
|
|
||||||
if (connector->override_edid)
|
override = drm_get_override_edid(connector);
|
||||||
override = drm_edid_duplicate(connector->edid_blob_ptr->data);
|
if (override)
|
||||||
|
|
||||||
if (!override)
|
|
||||||
override = drm_load_edid_firmware(connector);
|
|
||||||
|
|
||||||
if (!IS_ERR_OR_NULL(override))
|
|
||||||
return override;
|
return override;
|
||||||
|
|
||||||
if ((edid = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
|
if ((edid = kmalloc(EDID_LENGTH, GFP_KERNEL)) == NULL)
|
||||||
|
@ -255,7 +255,8 @@ static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
|
|||||||
if (obj->import_attach)
|
if (obj->import_attach)
|
||||||
shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
|
shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
|
||||||
else
|
else
|
||||||
shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT, VM_MAP, PAGE_KERNEL);
|
shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
|
||||||
|
VM_MAP, pgprot_writecombine(PAGE_KERNEL));
|
||||||
|
|
||||||
if (!shmem->vaddr) {
|
if (!shmem->vaddr) {
|
||||||
DRM_DEBUG_KMS("Failed to vmap pages\n");
|
DRM_DEBUG_KMS("Failed to vmap pages\n");
|
||||||
|
@ -42,6 +42,14 @@ static const struct drm_dmi_panel_orientation_data asus_t100ha = {
|
|||||||
.orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
|
.orientation = DRM_MODE_PANEL_ORIENTATION_LEFT_UP,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct drm_dmi_panel_orientation_data gpd_micropc = {
|
||||||
|
.width = 720,
|
||||||
|
.height = 1280,
|
||||||
|
.bios_dates = (const char * const []){ "04/26/2019",
|
||||||
|
NULL },
|
||||||
|
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
|
||||||
|
};
|
||||||
|
|
||||||
static const struct drm_dmi_panel_orientation_data gpd_pocket = {
|
static const struct drm_dmi_panel_orientation_data gpd_pocket = {
|
||||||
.width = 1200,
|
.width = 1200,
|
||||||
.height = 1920,
|
.height = 1920,
|
||||||
@ -50,6 +58,14 @@ static const struct drm_dmi_panel_orientation_data gpd_pocket = {
|
|||||||
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
|
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static const struct drm_dmi_panel_orientation_data gpd_pocket2 = {
|
||||||
|
.width = 1200,
|
||||||
|
.height = 1920,
|
||||||
|
.bios_dates = (const char * const []){ "06/28/2018", "08/28/2018",
|
||||||
|
"12/07/2018", NULL },
|
||||||
|
.orientation = DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
|
||||||
|
};
|
||||||
|
|
||||||
static const struct drm_dmi_panel_orientation_data gpd_win = {
|
static const struct drm_dmi_panel_orientation_data gpd_win = {
|
||||||
.width = 720,
|
.width = 720,
|
||||||
.height = 1280,
|
.height = 1280,
|
||||||
@ -99,6 +115,14 @@ static const struct dmi_system_id orientation_data[] = {
|
|||||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100HAN"),
|
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100HAN"),
|
||||||
},
|
},
|
||||||
.driver_data = (void *)&asus_t100ha,
|
.driver_data = (void *)&asus_t100ha,
|
||||||
|
}, { /* GPD MicroPC (generic strings, also match on bios date) */
|
||||||
|
.matches = {
|
||||||
|
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
|
||||||
|
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"),
|
||||||
|
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Default string"),
|
||||||
|
DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
|
||||||
|
},
|
||||||
|
.driver_data = (void *)&gpd_micropc,
|
||||||
}, { /*
|
}, { /*
|
||||||
* GPD Pocket, note that the the DMI data is less generic then
|
* GPD Pocket, note that the the DMI data is less generic then
|
||||||
* it seems, devices with a board-vendor of "AMI Corporation"
|
* it seems, devices with a board-vendor of "AMI Corporation"
|
||||||
@ -112,6 +136,14 @@ static const struct dmi_system_id orientation_data[] = {
|
|||||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"),
|
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"),
|
||||||
},
|
},
|
||||||
.driver_data = (void *)&gpd_pocket,
|
.driver_data = (void *)&gpd_pocket,
|
||||||
|
}, { /* GPD Pocket 2 (generic strings, also match on bios date) */
|
||||||
|
.matches = {
|
||||||
|
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
|
||||||
|
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Default string"),
|
||||||
|
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Default string"),
|
||||||
|
DMI_EXACT_MATCH(DMI_BOARD_NAME, "Default string"),
|
||||||
|
},
|
||||||
|
.driver_data = (void *)&gpd_pocket2,
|
||||||
}, { /* GPD Win (same note on DMI match as GPD Pocket) */
|
}, { /* GPD Win (same note on DMI match as GPD Pocket) */
|
||||||
.matches = {
|
.matches = {
|
||||||
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
|
DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
|
||||||
|
@ -479,6 +479,13 @@ retry:
|
|||||||
|
|
||||||
count = (*connector_funcs->get_modes)(connector);
|
count = (*connector_funcs->get_modes)(connector);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Fallback for when DDC probe failed in drm_get_edid() and thus skipped
|
||||||
|
* override/firmware EDID.
|
||||||
|
*/
|
||||||
|
if (count == 0 && connector->status == connector_status_connected)
|
||||||
|
count = drm_add_override_edid_modes(connector);
|
||||||
|
|
||||||
if (count == 0 && connector->status == connector_status_connected)
|
if (count == 0 && connector->status == connector_status_connected)
|
||||||
count = drm_add_modes_noedid(connector, 1024, 768);
|
count = drm_add_modes_noedid(connector, 1024, 768);
|
||||||
count += drm_helper_probe_add_cmdline_mode(connector);
|
count += drm_helper_probe_add_cmdline_mode(connector);
|
||||||
|
@ -3005,6 +3005,7 @@ static bool gen8_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
|
|||||||
static bool gen10_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
|
static bool gen10_is_valid_mux_addr(struct drm_i915_private *dev_priv, u32 addr)
|
||||||
{
|
{
|
||||||
return gen8_is_valid_mux_addr(dev_priv, addr) ||
|
return gen8_is_valid_mux_addr(dev_priv, addr) ||
|
||||||
|
addr == i915_mmio_reg_offset(GEN10_NOA_WRITE_HIGH) ||
|
||||||
(addr >= i915_mmio_reg_offset(OA_PERFCNT3_LO) &&
|
(addr >= i915_mmio_reg_offset(OA_PERFCNT3_LO) &&
|
||||||
addr <= i915_mmio_reg_offset(OA_PERFCNT4_HI));
|
addr <= i915_mmio_reg_offset(OA_PERFCNT4_HI));
|
||||||
}
|
}
|
||||||
|
@ -1062,6 +1062,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
|
|||||||
|
|
||||||
#define NOA_DATA _MMIO(0x986C)
|
#define NOA_DATA _MMIO(0x986C)
|
||||||
#define NOA_WRITE _MMIO(0x9888)
|
#define NOA_WRITE _MMIO(0x9888)
|
||||||
|
#define GEN10_NOA_WRITE_HIGH _MMIO(0x9884)
|
||||||
|
|
||||||
#define _GEN7_PIPEA_DE_LOAD_SL 0x70068
|
#define _GEN7_PIPEA_DE_LOAD_SL 0x70068
|
||||||
#define _GEN7_PIPEB_DE_LOAD_SL 0x71068
|
#define _GEN7_PIPEB_DE_LOAD_SL 0x71068
|
||||||
|
@ -303,10 +303,17 @@ static u32 *parse_csr_fw(struct drm_i915_private *dev_priv,
|
|||||||
u32 dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
|
u32 dmc_offset = CSR_DEFAULT_FW_OFFSET, readcount = 0, nbytes;
|
||||||
u32 i;
|
u32 i;
|
||||||
u32 *dmc_payload;
|
u32 *dmc_payload;
|
||||||
|
size_t fsize;
|
||||||
|
|
||||||
if (!fw)
|
if (!fw)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
fsize = sizeof(struct intel_css_header) +
|
||||||
|
sizeof(struct intel_package_header) +
|
||||||
|
sizeof(struct intel_dmc_header);
|
||||||
|
if (fsize > fw->size)
|
||||||
|
goto error_truncated;
|
||||||
|
|
||||||
/* Extract CSS Header information*/
|
/* Extract CSS Header information*/
|
||||||
css_header = (struct intel_css_header *)fw->data;
|
css_header = (struct intel_css_header *)fw->data;
|
||||||
if (sizeof(struct intel_css_header) !=
|
if (sizeof(struct intel_css_header) !=
|
||||||
@ -366,6 +373,9 @@ static u32 *parse_csr_fw(struct drm_i915_private *dev_priv,
|
|||||||
/* Convert dmc_offset into number of bytes. By default it is in dwords*/
|
/* Convert dmc_offset into number of bytes. By default it is in dwords*/
|
||||||
dmc_offset *= 4;
|
dmc_offset *= 4;
|
||||||
readcount += dmc_offset;
|
readcount += dmc_offset;
|
||||||
|
fsize += dmc_offset;
|
||||||
|
if (fsize > fw->size)
|
||||||
|
goto error_truncated;
|
||||||
|
|
||||||
/* Extract dmc_header information. */
|
/* Extract dmc_header information. */
|
||||||
dmc_header = (struct intel_dmc_header *)&fw->data[readcount];
|
dmc_header = (struct intel_dmc_header *)&fw->data[readcount];
|
||||||
@ -397,6 +407,10 @@ static u32 *parse_csr_fw(struct drm_i915_private *dev_priv,
|
|||||||
|
|
||||||
/* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
|
/* fw_size is in dwords, so multiplied by 4 to convert into bytes. */
|
||||||
nbytes = dmc_header->fw_size * 4;
|
nbytes = dmc_header->fw_size * 4;
|
||||||
|
fsize += nbytes;
|
||||||
|
if (fsize > fw->size)
|
||||||
|
goto error_truncated;
|
||||||
|
|
||||||
if (nbytes > csr->max_fw_size) {
|
if (nbytes > csr->max_fw_size) {
|
||||||
DRM_ERROR("DMC FW too big (%u bytes)\n", nbytes);
|
DRM_ERROR("DMC FW too big (%u bytes)\n", nbytes);
|
||||||
return NULL;
|
return NULL;
|
||||||
@ -410,6 +424,10 @@ static u32 *parse_csr_fw(struct drm_i915_private *dev_priv,
|
|||||||
}
|
}
|
||||||
|
|
||||||
return memcpy(dmc_payload, &fw->data[readcount], nbytes);
|
return memcpy(dmc_payload, &fw->data[readcount], nbytes);
|
||||||
|
|
||||||
|
error_truncated:
|
||||||
|
DRM_ERROR("Truncated DMC firmware, rejecting.\n");
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void intel_csr_runtime_pm_get(struct drm_i915_private *dev_priv)
|
static void intel_csr_runtime_pm_get(struct drm_i915_private *dev_priv)
|
||||||
|
@ -2432,10 +2432,14 @@ static unsigned int intel_fb_modifier_to_tiling(u64 fb_modifier)
|
|||||||
* main surface.
|
* main surface.
|
||||||
*/
|
*/
|
||||||
static const struct drm_format_info ccs_formats[] = {
|
static const struct drm_format_info ccs_formats[] = {
|
||||||
{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
|
{ .format = DRM_FORMAT_XRGB8888, .depth = 24, .num_planes = 2,
|
||||||
{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
|
.cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
|
||||||
{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
|
{ .format = DRM_FORMAT_XBGR8888, .depth = 24, .num_planes = 2,
|
||||||
{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2, .cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
|
.cpp = { 4, 1, }, .hsub = 8, .vsub = 16, },
|
||||||
|
{ .format = DRM_FORMAT_ARGB8888, .depth = 32, .num_planes = 2,
|
||||||
|
.cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
|
||||||
|
{ .format = DRM_FORMAT_ABGR8888, .depth = 32, .num_planes = 2,
|
||||||
|
.cpp = { 4, 1, }, .hsub = 8, .vsub = 16, .has_alpha = true, },
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct drm_format_info *
|
static const struct drm_format_info *
|
||||||
@ -11942,7 +11946,7 @@ encoder_retry:
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool intel_fuzzy_clock_check(int clock1, int clock2)
|
bool intel_fuzzy_clock_check(int clock1, int clock2)
|
||||||
{
|
{
|
||||||
int diff;
|
int diff;
|
||||||
|
|
||||||
|
@ -1742,6 +1742,7 @@ int vlv_force_pll_on(struct drm_i915_private *dev_priv, enum pipe pipe,
|
|||||||
const struct dpll *dpll);
|
const struct dpll *dpll);
|
||||||
void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe);
|
void vlv_force_pll_off(struct drm_i915_private *dev_priv, enum pipe pipe);
|
||||||
int lpt_get_iclkip(struct drm_i915_private *dev_priv);
|
int lpt_get_iclkip(struct drm_i915_private *dev_priv);
|
||||||
|
bool intel_fuzzy_clock_check(int clock1, int clock2);
|
||||||
|
|
||||||
/* modesetting asserts */
|
/* modesetting asserts */
|
||||||
void assert_panel_unlocked(struct drm_i915_private *dev_priv,
|
void assert_panel_unlocked(struct drm_i915_private *dev_priv,
|
||||||
|
@ -853,6 +853,17 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
|
|||||||
if (mipi_config->target_burst_mode_freq) {
|
if (mipi_config->target_burst_mode_freq) {
|
||||||
u32 bitrate = intel_dsi_bitrate(intel_dsi);
|
u32 bitrate = intel_dsi_bitrate(intel_dsi);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Sometimes the VBT contains a slightly lower clock,
|
||||||
|
* then the bitrate we have calculated, in this case
|
||||||
|
* just replace it with the calculated bitrate.
|
||||||
|
*/
|
||||||
|
if (mipi_config->target_burst_mode_freq < bitrate &&
|
||||||
|
intel_fuzzy_clock_check(
|
||||||
|
mipi_config->target_burst_mode_freq,
|
||||||
|
bitrate))
|
||||||
|
mipi_config->target_burst_mode_freq = bitrate;
|
||||||
|
|
||||||
if (mipi_config->target_burst_mode_freq < bitrate) {
|
if (mipi_config->target_burst_mode_freq < bitrate) {
|
||||||
DRM_ERROR("Burst mode freq is less than computed\n");
|
DRM_ERROR("Burst mode freq is less than computed\n");
|
||||||
return false;
|
return false;
|
||||||
|
@ -916,6 +916,13 @@ static bool intel_sdvo_set_colorimetry(struct intel_sdvo *intel_sdvo,
|
|||||||
return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
|
return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool intel_sdvo_set_audio_state(struct intel_sdvo *intel_sdvo,
|
||||||
|
u8 audio_state)
|
||||||
|
{
|
||||||
|
return intel_sdvo_set_value(intel_sdvo, SDVO_CMD_SET_AUDIO_STAT,
|
||||||
|
&audio_state, 1);
|
||||||
|
}
|
||||||
|
|
||||||
#if 0
|
#if 0
|
||||||
static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
|
static void intel_sdvo_dump_hdmi_buf(struct intel_sdvo *intel_sdvo)
|
||||||
{
|
{
|
||||||
@ -1487,11 +1494,6 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
|
|||||||
else
|
else
|
||||||
sdvox |= SDVO_PIPE_SEL(crtc->pipe);
|
sdvox |= SDVO_PIPE_SEL(crtc->pipe);
|
||||||
|
|
||||||
if (crtc_state->has_audio) {
|
|
||||||
WARN_ON_ONCE(INTEL_GEN(dev_priv) < 4);
|
|
||||||
sdvox |= SDVO_AUDIO_ENABLE;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (INTEL_GEN(dev_priv) >= 4) {
|
if (INTEL_GEN(dev_priv) >= 4) {
|
||||||
/* done in crtc_mode_set as the dpll_md reg must be written early */
|
/* done in crtc_mode_set as the dpll_md reg must be written early */
|
||||||
} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
|
} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
|
||||||
@ -1635,8 +1637,13 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
|
|||||||
if (sdvox & HDMI_COLOR_RANGE_16_235)
|
if (sdvox & HDMI_COLOR_RANGE_16_235)
|
||||||
pipe_config->limited_color_range = true;
|
pipe_config->limited_color_range = true;
|
||||||
|
|
||||||
if (sdvox & SDVO_AUDIO_ENABLE)
|
if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_AUDIO_STAT,
|
||||||
pipe_config->has_audio = true;
|
&val, 1)) {
|
||||||
|
u8 mask = SDVO_AUDIO_ELD_VALID | SDVO_AUDIO_PRESENCE_DETECT;
|
||||||
|
|
||||||
|
if ((val & mask) == mask)
|
||||||
|
pipe_config->has_audio = true;
|
||||||
|
}
|
||||||
|
|
||||||
if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE,
|
if (intel_sdvo_get_value(intel_sdvo, SDVO_CMD_GET_ENCODE,
|
||||||
&val, 1)) {
|
&val, 1)) {
|
||||||
@ -1647,6 +1654,32 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
|
|||||||
intel_sdvo_get_avi_infoframe(intel_sdvo, pipe_config);
|
intel_sdvo_get_avi_infoframe(intel_sdvo, pipe_config);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void intel_sdvo_disable_audio(struct intel_sdvo *intel_sdvo)
|
||||||
|
{
|
||||||
|
intel_sdvo_set_audio_state(intel_sdvo, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void intel_sdvo_enable_audio(struct intel_sdvo *intel_sdvo,
|
||||||
|
const struct intel_crtc_state *crtc_state,
|
||||||
|
const struct drm_connector_state *conn_state)
|
||||||
|
{
|
||||||
|
const struct drm_display_mode *adjusted_mode =
|
||||||
|
&crtc_state->base.adjusted_mode;
|
||||||
|
struct drm_connector *connector = conn_state->connector;
|
||||||
|
u8 *eld = connector->eld;
|
||||||
|
|
||||||
|
eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
|
||||||
|
|
||||||
|
intel_sdvo_set_audio_state(intel_sdvo, 0);
|
||||||
|
|
||||||
|
intel_sdvo_write_infoframe(intel_sdvo, SDVO_HBUF_INDEX_ELD,
|
||||||
|
SDVO_HBUF_TX_DISABLED,
|
||||||
|
eld, drm_eld_size(eld));
|
||||||
|
|
||||||
|
intel_sdvo_set_audio_state(intel_sdvo, SDVO_AUDIO_ELD_VALID |
|
||||||
|
SDVO_AUDIO_PRESENCE_DETECT);
|
||||||
|
}
|
||||||
|
|
||||||
static void intel_disable_sdvo(struct intel_encoder *encoder,
|
static void intel_disable_sdvo(struct intel_encoder *encoder,
|
||||||
const struct intel_crtc_state *old_crtc_state,
|
const struct intel_crtc_state *old_crtc_state,
|
||||||
const struct drm_connector_state *conn_state)
|
const struct drm_connector_state *conn_state)
|
||||||
@ -1656,6 +1689,9 @@ static void intel_disable_sdvo(struct intel_encoder *encoder,
|
|||||||
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
|
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->base.crtc);
|
||||||
u32 temp;
|
u32 temp;
|
||||||
|
|
||||||
|
if (old_crtc_state->has_audio)
|
||||||
|
intel_sdvo_disable_audio(intel_sdvo);
|
||||||
|
|
||||||
intel_sdvo_set_active_outputs(intel_sdvo, 0);
|
intel_sdvo_set_active_outputs(intel_sdvo, 0);
|
||||||
if (0)
|
if (0)
|
||||||
intel_sdvo_set_encoder_power_state(intel_sdvo,
|
intel_sdvo_set_encoder_power_state(intel_sdvo,
|
||||||
@ -1741,6 +1777,9 @@ static void intel_enable_sdvo(struct intel_encoder *encoder,
|
|||||||
intel_sdvo_set_encoder_power_state(intel_sdvo,
|
intel_sdvo_set_encoder_power_state(intel_sdvo,
|
||||||
DRM_MODE_DPMS_ON);
|
DRM_MODE_DPMS_ON);
|
||||||
intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
|
intel_sdvo_set_active_outputs(intel_sdvo, intel_sdvo->attached_output);
|
||||||
|
|
||||||
|
if (pipe_config->has_audio)
|
||||||
|
intel_sdvo_enable_audio(intel_sdvo, pipe_config, conn_state);
|
||||||
}
|
}
|
||||||
|
|
||||||
static enum drm_mode_status
|
static enum drm_mode_status
|
||||||
@ -2603,7 +2642,6 @@ static bool
|
|||||||
intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
|
intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
|
||||||
{
|
{
|
||||||
struct drm_encoder *encoder = &intel_sdvo->base.base;
|
struct drm_encoder *encoder = &intel_sdvo->base.base;
|
||||||
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
|
|
||||||
struct drm_connector *connector;
|
struct drm_connector *connector;
|
||||||
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
|
struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
|
||||||
struct intel_connector *intel_connector;
|
struct intel_connector *intel_connector;
|
||||||
@ -2640,9 +2678,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
|
|||||||
encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
|
encoder->encoder_type = DRM_MODE_ENCODER_TMDS;
|
||||||
connector->connector_type = DRM_MODE_CONNECTOR_DVID;
|
connector->connector_type = DRM_MODE_CONNECTOR_DVID;
|
||||||
|
|
||||||
/* gen3 doesn't do the hdmi bits in the SDVO register */
|
if (intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
|
||||||
if (INTEL_GEN(dev_priv) >= 4 &&
|
|
||||||
intel_sdvo_is_hdmi_connector(intel_sdvo, device)) {
|
|
||||||
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
|
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
|
||||||
intel_sdvo_connector->is_hdmi = true;
|
intel_sdvo_connector->is_hdmi = true;
|
||||||
}
|
}
|
||||||
|
@ -707,6 +707,9 @@ struct intel_sdvo_enhancements_arg {
|
|||||||
#define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90
|
#define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90
|
||||||
#define SDVO_CMD_SET_AUDIO_STAT 0x91
|
#define SDVO_CMD_SET_AUDIO_STAT 0x91
|
||||||
#define SDVO_CMD_GET_AUDIO_STAT 0x92
|
#define SDVO_CMD_GET_AUDIO_STAT 0x92
|
||||||
|
#define SDVO_AUDIO_ELD_VALID (1 << 0)
|
||||||
|
#define SDVO_AUDIO_PRESENCE_DETECT (1 << 1)
|
||||||
|
#define SDVO_AUDIO_CP_READY (1 << 2)
|
||||||
#define SDVO_CMD_SET_HBUF_INDEX 0x93
|
#define SDVO_CMD_SET_HBUF_INDEX 0x93
|
||||||
#define SDVO_HBUF_INDEX_ELD 0
|
#define SDVO_HBUF_INDEX_ELD 0
|
||||||
#define SDVO_HBUF_INDEX_AVI_IF 1
|
#define SDVO_HBUF_INDEX_AVI_IF 1
|
||||||
|
@ -90,10 +90,6 @@ static void mtk_drm_finish_page_flip(struct mtk_drm_crtc *mtk_crtc)
|
|||||||
static void mtk_drm_crtc_destroy(struct drm_crtc *crtc)
|
static void mtk_drm_crtc_destroy(struct drm_crtc *crtc)
|
||||||
{
|
{
|
||||||
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
|
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
|
|
||||||
clk_unprepare(mtk_crtc->ddp_comp[i]->clk);
|
|
||||||
|
|
||||||
mtk_disp_mutex_put(mtk_crtc->mutex);
|
mtk_disp_mutex_put(mtk_crtc->mutex);
|
||||||
|
|
||||||
@ -186,7 +182,7 @@ static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc)
|
|||||||
|
|
||||||
DRM_DEBUG_DRIVER("%s\n", __func__);
|
DRM_DEBUG_DRIVER("%s\n", __func__);
|
||||||
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
|
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
|
||||||
ret = clk_enable(mtk_crtc->ddp_comp[i]->clk);
|
ret = clk_prepare_enable(mtk_crtc->ddp_comp[i]->clk);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
DRM_ERROR("Failed to enable clock %d: %d\n", i, ret);
|
DRM_ERROR("Failed to enable clock %d: %d\n", i, ret);
|
||||||
goto err;
|
goto err;
|
||||||
@ -196,7 +192,7 @@ static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc)
|
|||||||
return 0;
|
return 0;
|
||||||
err:
|
err:
|
||||||
while (--i >= 0)
|
while (--i >= 0)
|
||||||
clk_disable(mtk_crtc->ddp_comp[i]->clk);
|
clk_disable_unprepare(mtk_crtc->ddp_comp[i]->clk);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -206,7 +202,7 @@ static void mtk_crtc_ddp_clk_disable(struct mtk_drm_crtc *mtk_crtc)
|
|||||||
|
|
||||||
DRM_DEBUG_DRIVER("%s\n", __func__);
|
DRM_DEBUG_DRIVER("%s\n", __func__);
|
||||||
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
|
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++)
|
||||||
clk_disable(mtk_crtc->ddp_comp[i]->clk);
|
clk_disable_unprepare(mtk_crtc->ddp_comp[i]->clk);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
|
static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
|
||||||
@ -577,15 +573,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
|
|||||||
if (!comp) {
|
if (!comp) {
|
||||||
dev_err(dev, "Component %pOF not initialized\n", node);
|
dev_err(dev, "Component %pOF not initialized\n", node);
|
||||||
ret = -ENODEV;
|
ret = -ENODEV;
|
||||||
goto unprepare;
|
return ret;
|
||||||
}
|
|
||||||
|
|
||||||
ret = clk_prepare(comp->clk);
|
|
||||||
if (ret) {
|
|
||||||
dev_err(dev,
|
|
||||||
"Failed to prepare clock for component %pOF: %d\n",
|
|
||||||
node, ret);
|
|
||||||
goto unprepare;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
mtk_crtc->ddp_comp[i] = comp;
|
mtk_crtc->ddp_comp[i] = comp;
|
||||||
@ -603,23 +591,17 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
|
|||||||
ret = mtk_plane_init(drm_dev, &mtk_crtc->planes[zpos],
|
ret = mtk_plane_init(drm_dev, &mtk_crtc->planes[zpos],
|
||||||
BIT(pipe), type);
|
BIT(pipe), type);
|
||||||
if (ret)
|
if (ret)
|
||||||
goto unprepare;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0],
|
ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0],
|
||||||
mtk_crtc->layer_nr > 1 ? &mtk_crtc->planes[1] :
|
mtk_crtc->layer_nr > 1 ? &mtk_crtc->planes[1] :
|
||||||
NULL, pipe);
|
NULL, pipe);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto unprepare;
|
return ret;
|
||||||
drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE);
|
drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE);
|
||||||
drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, false, MTK_LUT_SIZE);
|
drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, false, MTK_LUT_SIZE);
|
||||||
priv->num_pipes++;
|
priv->num_pipes++;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
unprepare:
|
|
||||||
while (--i >= 0)
|
|
||||||
clk_unprepare(mtk_crtc->ddp_comp[i]->clk);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
@ -303,6 +303,7 @@ err_config_cleanup:
|
|||||||
static void mtk_drm_kms_deinit(struct drm_device *drm)
|
static void mtk_drm_kms_deinit(struct drm_device *drm)
|
||||||
{
|
{
|
||||||
drm_kms_helper_poll_fini(drm);
|
drm_kms_helper_poll_fini(drm);
|
||||||
|
drm_atomic_helper_shutdown(drm);
|
||||||
|
|
||||||
component_unbind_all(drm->dev, drm);
|
component_unbind_all(drm->dev, drm);
|
||||||
drm_mode_config_cleanup(drm);
|
drm_mode_config_cleanup(drm);
|
||||||
@ -389,7 +390,9 @@ static void mtk_drm_unbind(struct device *dev)
|
|||||||
struct mtk_drm_private *private = dev_get_drvdata(dev);
|
struct mtk_drm_private *private = dev_get_drvdata(dev);
|
||||||
|
|
||||||
drm_dev_unregister(private->drm);
|
drm_dev_unregister(private->drm);
|
||||||
|
mtk_drm_kms_deinit(private->drm);
|
||||||
drm_dev_put(private->drm);
|
drm_dev_put(private->drm);
|
||||||
|
private->num_pipes = 0;
|
||||||
private->drm = NULL;
|
private->drm = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -560,13 +563,8 @@ err_node:
|
|||||||
static int mtk_drm_remove(struct platform_device *pdev)
|
static int mtk_drm_remove(struct platform_device *pdev)
|
||||||
{
|
{
|
||||||
struct mtk_drm_private *private = platform_get_drvdata(pdev);
|
struct mtk_drm_private *private = platform_get_drvdata(pdev);
|
||||||
struct drm_device *drm = private->drm;
|
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
drm_dev_unregister(drm);
|
|
||||||
mtk_drm_kms_deinit(drm);
|
|
||||||
drm_dev_put(drm);
|
|
||||||
|
|
||||||
component_master_del(&pdev->dev, &mtk_drm_ops);
|
component_master_del(&pdev->dev, &mtk_drm_ops);
|
||||||
pm_runtime_disable(&pdev->dev);
|
pm_runtime_disable(&pdev->dev);
|
||||||
of_node_put(private->mutex_node);
|
of_node_put(private->mutex_node);
|
||||||
|
@ -136,7 +136,6 @@ static int mtk_drm_gem_object_mmap(struct drm_gem_object *obj,
|
|||||||
* VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
|
* VM_PFNMAP flag that was set by drm_gem_mmap_obj()/drm_gem_mmap().
|
||||||
*/
|
*/
|
||||||
vma->vm_flags &= ~VM_PFNMAP;
|
vma->vm_flags &= ~VM_PFNMAP;
|
||||||
vma->vm_pgoff = 0;
|
|
||||||
|
|
||||||
ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie,
|
ret = dma_mmap_attrs(priv->dma_dev, vma, mtk_gem->cookie,
|
||||||
mtk_gem->dma_addr, obj->size, mtk_gem->dma_attrs);
|
mtk_gem->dma_addr, obj->size, mtk_gem->dma_attrs);
|
||||||
@ -168,6 +167,12 @@ int mtk_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
|
|||||||
|
|
||||||
obj = vma->vm_private_data;
|
obj = vma->vm_private_data;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Set vm_pgoff (used as a fake buffer offset by DRM) to 0 and map the
|
||||||
|
* whole buffer from the start.
|
||||||
|
*/
|
||||||
|
vma->vm_pgoff = 0;
|
||||||
|
|
||||||
return mtk_drm_gem_object_mmap(obj, vma);
|
return mtk_drm_gem_object_mmap(obj, vma);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -622,6 +622,15 @@ static void mtk_dsi_poweroff(struct mtk_dsi *dsi)
|
|||||||
if (--dsi->refcount != 0)
|
if (--dsi->refcount != 0)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* mtk_dsi_stop() and mtk_dsi_start() is asymmetric, since
|
||||||
|
* mtk_dsi_stop() should be called after mtk_drm_crtc_atomic_disable(),
|
||||||
|
* which needs irq for vblank, and mtk_dsi_stop() will disable irq.
|
||||||
|
* mtk_dsi_start() needs to be called in mtk_output_dsi_enable(),
|
||||||
|
* after dsi is fully set.
|
||||||
|
*/
|
||||||
|
mtk_dsi_stop(dsi);
|
||||||
|
|
||||||
if (!mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500)) {
|
if (!mtk_dsi_switch_to_cmd_mode(dsi, VM_DONE_INT_FLAG, 500)) {
|
||||||
if (dsi->panel) {
|
if (dsi->panel) {
|
||||||
if (drm_panel_unprepare(dsi->panel)) {
|
if (drm_panel_unprepare(dsi->panel)) {
|
||||||
@ -688,7 +697,6 @@ static void mtk_output_dsi_disable(struct mtk_dsi *dsi)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mtk_dsi_stop(dsi);
|
|
||||||
mtk_dsi_poweroff(dsi);
|
mtk_dsi_poweroff(dsi);
|
||||||
|
|
||||||
dsi->enabled = false;
|
dsi->enabled = false;
|
||||||
@ -836,6 +844,8 @@ static void mtk_dsi_destroy_conn_enc(struct mtk_dsi *dsi)
|
|||||||
/* Skip connector cleanup if creation was delegated to the bridge */
|
/* Skip connector cleanup if creation was delegated to the bridge */
|
||||||
if (dsi->conn.dev)
|
if (dsi->conn.dev)
|
||||||
drm_connector_cleanup(&dsi->conn);
|
drm_connector_cleanup(&dsi->conn);
|
||||||
|
if (dsi->panel)
|
||||||
|
drm_panel_detach(dsi->panel);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void mtk_dsi_ddp_start(struct mtk_ddp_comp *comp)
|
static void mtk_dsi_ddp_start(struct mtk_ddp_comp *comp)
|
||||||
|
@ -107,8 +107,6 @@ static void meson_g12a_crtc_atomic_enable(struct drm_crtc *crtc,
|
|||||||
priv->io_base + _REG(VPP_OUT_H_V_SIZE));
|
priv->io_base + _REG(VPP_OUT_H_V_SIZE));
|
||||||
|
|
||||||
drm_crtc_vblank_on(crtc);
|
drm_crtc_vblank_on(crtc);
|
||||||
|
|
||||||
priv->viu.osd1_enabled = true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
|
static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
|
||||||
@ -137,8 +135,6 @@ static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
|
|||||||
priv->io_base + _REG(VPP_MISC));
|
priv->io_base + _REG(VPP_MISC));
|
||||||
|
|
||||||
drm_crtc_vblank_on(crtc);
|
drm_crtc_vblank_on(crtc);
|
||||||
|
|
||||||
priv->viu.osd1_enabled = true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void meson_g12a_crtc_atomic_disable(struct drm_crtc *crtc,
|
static void meson_g12a_crtc_atomic_disable(struct drm_crtc *crtc,
|
||||||
@ -256,6 +252,8 @@ static void meson_g12a_crtc_enable_osd1(struct meson_drm *priv)
|
|||||||
writel_relaxed(priv->viu.osb_blend1_size,
|
writel_relaxed(priv->viu.osb_blend1_size,
|
||||||
priv->io_base +
|
priv->io_base +
|
||||||
_REG(VIU_OSD_BLEND_BLEND1_SIZE));
|
_REG(VIU_OSD_BLEND_BLEND1_SIZE));
|
||||||
|
writel_bits_relaxed(3 << 8, 3 << 8,
|
||||||
|
priv->io_base + _REG(OSD1_BLEND_SRC_CTRL));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void meson_crtc_enable_vd1(struct meson_drm *priv)
|
static void meson_crtc_enable_vd1(struct meson_drm *priv)
|
||||||
|
@ -305,6 +305,8 @@ static void meson_plane_atomic_update(struct drm_plane *plane,
|
|||||||
meson_plane->enabled = true;
|
meson_plane->enabled = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
priv->viu.osd1_enabled = true;
|
||||||
|
|
||||||
spin_unlock_irqrestore(&priv->drm->event_lock, flags);
|
spin_unlock_irqrestore(&priv->drm->event_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -316,14 +318,14 @@ static void meson_plane_atomic_disable(struct drm_plane *plane,
|
|||||||
|
|
||||||
/* Disable OSD1 */
|
/* Disable OSD1 */
|
||||||
if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
|
if (meson_vpu_is_compatible(priv, "amlogic,meson-g12a-vpu"))
|
||||||
writel_bits_relaxed(BIT(0) | BIT(21), 0,
|
writel_bits_relaxed(3 << 8, 0,
|
||||||
priv->io_base + _REG(VIU_OSD1_CTRL_STAT));
|
priv->io_base + _REG(OSD1_BLEND_SRC_CTRL));
|
||||||
else
|
else
|
||||||
writel_bits_relaxed(VPP_OSD1_POSTBLEND, 0,
|
writel_bits_relaxed(VPP_OSD1_POSTBLEND, 0,
|
||||||
priv->io_base + _REG(VPP_MISC));
|
priv->io_base + _REG(VPP_MISC));
|
||||||
|
|
||||||
meson_plane->enabled = false;
|
meson_plane->enabled = false;
|
||||||
|
priv->viu.osd1_enabled = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct drm_plane_helper_funcs meson_plane_helper_funcs = {
|
static const struct drm_plane_helper_funcs meson_plane_helper_funcs = {
|
||||||
|
@ -503,8 +503,17 @@ void meson_hdmi_pll_set_params(struct meson_drm *priv, unsigned int m,
|
|||||||
|
|
||||||
/* G12A HDMI PLL Needs specific parameters for 5.4GHz */
|
/* G12A HDMI PLL Needs specific parameters for 5.4GHz */
|
||||||
if (m >= 0xf7) {
|
if (m >= 0xf7) {
|
||||||
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4, 0xea68dc00);
|
if (frac < 0x10000) {
|
||||||
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5, 0x65771290);
|
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4,
|
||||||
|
0x6a685c00);
|
||||||
|
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5,
|
||||||
|
0x11551293);
|
||||||
|
} else {
|
||||||
|
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL4,
|
||||||
|
0xea68dc00);
|
||||||
|
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL5,
|
||||||
|
0x65771290);
|
||||||
|
}
|
||||||
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x39272000);
|
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL6, 0x39272000);
|
||||||
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL7, 0x55540000);
|
regmap_write(priv->hhi, HHI_HDMI_PLL_CNTL7, 0x55540000);
|
||||||
} else {
|
} else {
|
||||||
|
@ -405,8 +405,7 @@ void meson_viu_init(struct meson_drm *priv)
|
|||||||
0 << 16 |
|
0 << 16 |
|
||||||
1,
|
1,
|
||||||
priv->io_base + _REG(VIU_OSD_BLEND_CTRL));
|
priv->io_base + _REG(VIU_OSD_BLEND_CTRL));
|
||||||
writel_relaxed(3 << 8 |
|
writel_relaxed(1 << 20,
|
||||||
1 << 20,
|
|
||||||
priv->io_base + _REG(OSD1_BLEND_SRC_CTRL));
|
priv->io_base + _REG(OSD1_BLEND_SRC_CTRL));
|
||||||
writel_relaxed(1 << 20,
|
writel_relaxed(1 << 20,
|
||||||
priv->io_base + _REG(OSD2_BLEND_SRC_CTRL));
|
priv->io_base + _REG(OSD2_BLEND_SRC_CTRL));
|
||||||
|
@ -10,6 +10,7 @@ config DRM_PANFROST
|
|||||||
select IOMMU_IO_PGTABLE_LPAE
|
select IOMMU_IO_PGTABLE_LPAE
|
||||||
select DRM_GEM_SHMEM_HELPER
|
select DRM_GEM_SHMEM_HELPER
|
||||||
select PM_DEVFREQ
|
select PM_DEVFREQ
|
||||||
|
select DEVFREQ_GOV_SIMPLE_ONDEMAND
|
||||||
help
|
help
|
||||||
DRM driver for ARM Mali Midgard (T6xx, T7xx, T8xx) and
|
DRM driver for ARM Mali Midgard (T6xx, T7xx, T8xx) and
|
||||||
Bifrost (G3x, G5x, G7x) GPUs.
|
Bifrost (G3x, G5x, G7x) GPUs.
|
||||||
|
@ -140,7 +140,9 @@ int panfrost_devfreq_init(struct panfrost_device *pfdev)
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
ret = dev_pm_opp_of_add_table(&pfdev->pdev->dev);
|
ret = dev_pm_opp_of_add_table(&pfdev->pdev->dev);
|
||||||
if (ret)
|
if (ret == -ENODEV) /* Optional, continue without devfreq */
|
||||||
|
return 0;
|
||||||
|
else if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
panfrost_devfreq_reset(pfdev);
|
panfrost_devfreq_reset(pfdev);
|
||||||
@ -170,6 +172,9 @@ void panfrost_devfreq_resume(struct panfrost_device *pfdev)
|
|||||||
{
|
{
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
if (!pfdev->devfreq.devfreq)
|
||||||
|
return;
|
||||||
|
|
||||||
panfrost_devfreq_reset(pfdev);
|
panfrost_devfreq_reset(pfdev);
|
||||||
for (i = 0; i < NUM_JOB_SLOTS; i++)
|
for (i = 0; i < NUM_JOB_SLOTS; i++)
|
||||||
pfdev->devfreq.slot[i].busy = false;
|
pfdev->devfreq.slot[i].busy = false;
|
||||||
@ -179,6 +184,9 @@ void panfrost_devfreq_resume(struct panfrost_device *pfdev)
|
|||||||
|
|
||||||
void panfrost_devfreq_suspend(struct panfrost_device *pfdev)
|
void panfrost_devfreq_suspend(struct panfrost_device *pfdev)
|
||||||
{
|
{
|
||||||
|
if (!pfdev->devfreq.devfreq)
|
||||||
|
return;
|
||||||
|
|
||||||
devfreq_suspend_device(pfdev->devfreq.devfreq);
|
devfreq_suspend_device(pfdev->devfreq.devfreq);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -188,6 +196,9 @@ static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, i
|
|||||||
ktime_t now;
|
ktime_t now;
|
||||||
ktime_t last;
|
ktime_t last;
|
||||||
|
|
||||||
|
if (!pfdev->devfreq.devfreq)
|
||||||
|
return;
|
||||||
|
|
||||||
now = ktime_get();
|
now = ktime_get();
|
||||||
last = pfdev->devfreq.slot[slot].time_last_update;
|
last = pfdev->devfreq.slot[slot].time_last_update;
|
||||||
|
|
||||||
|
@ -35,8 +35,10 @@ static int a4_input_mapped(struct hid_device *hdev, struct hid_input *hi,
|
|||||||
{
|
{
|
||||||
struct a4tech_sc *a4 = hid_get_drvdata(hdev);
|
struct a4tech_sc *a4 = hid_get_drvdata(hdev);
|
||||||
|
|
||||||
if (usage->type == EV_REL && usage->code == REL_WHEEL)
|
if (usage->type == EV_REL && usage->code == REL_WHEEL_HI_RES) {
|
||||||
set_bit(REL_HWHEEL, *bit);
|
set_bit(REL_HWHEEL, *bit);
|
||||||
|
set_bit(REL_HWHEEL_HI_RES, *bit);
|
||||||
|
}
|
||||||
|
|
||||||
if ((a4->quirks & A4_2WHEEL_MOUSE_HACK_7) && usage->hid == 0x00090007)
|
if ((a4->quirks & A4_2WHEEL_MOUSE_HACK_7) && usage->hid == 0x00090007)
|
||||||
return -1;
|
return -1;
|
||||||
@ -57,7 +59,7 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
|
|||||||
input = field->hidinput->input;
|
input = field->hidinput->input;
|
||||||
|
|
||||||
if (a4->quirks & A4_2WHEEL_MOUSE_HACK_B8) {
|
if (a4->quirks & A4_2WHEEL_MOUSE_HACK_B8) {
|
||||||
if (usage->type == EV_REL && usage->code == REL_WHEEL) {
|
if (usage->type == EV_REL && usage->code == REL_WHEEL_HI_RES) {
|
||||||
a4->delayed_value = value;
|
a4->delayed_value = value;
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
@ -65,6 +67,8 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
|
|||||||
if (usage->hid == 0x000100b8) {
|
if (usage->hid == 0x000100b8) {
|
||||||
input_event(input, EV_REL, value ? REL_HWHEEL :
|
input_event(input, EV_REL, value ? REL_HWHEEL :
|
||||||
REL_WHEEL, a4->delayed_value);
|
REL_WHEEL, a4->delayed_value);
|
||||||
|
input_event(input, EV_REL, value ? REL_HWHEEL_HI_RES :
|
||||||
|
REL_WHEEL_HI_RES, a4->delayed_value * 120);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -74,8 +78,9 @@ static int a4_event(struct hid_device *hdev, struct hid_field *field,
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (usage->code == REL_WHEEL && a4->hw_wheel) {
|
if (usage->code == REL_WHEEL_HI_RES && a4->hw_wheel) {
|
||||||
input_event(input, usage->type, REL_HWHEEL, value);
|
input_event(input, usage->type, REL_HWHEEL, value);
|
||||||
|
input_event(input, usage->type, REL_HWHEEL_HI_RES, value * 120);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -27,7 +27,6 @@
|
|||||||
#include <linux/vmalloc.h>
|
#include <linux/vmalloc.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/semaphore.h>
|
#include <linux/semaphore.h>
|
||||||
#include <linux/async.h>
|
|
||||||
|
|
||||||
#include <linux/hid.h>
|
#include <linux/hid.h>
|
||||||
#include <linux/hiddev.h>
|
#include <linux/hiddev.h>
|
||||||
@ -1311,10 +1310,10 @@ static u32 __extract(u8 *report, unsigned offset, int n)
|
|||||||
u32 hid_field_extract(const struct hid_device *hid, u8 *report,
|
u32 hid_field_extract(const struct hid_device *hid, u8 *report,
|
||||||
unsigned offset, unsigned n)
|
unsigned offset, unsigned n)
|
||||||
{
|
{
|
||||||
if (n > 256) {
|
if (n > 32) {
|
||||||
hid_warn(hid, "hid_field_extract() called with n (%d) > 256! (%s)\n",
|
hid_warn(hid, "hid_field_extract() called with n (%d) > 32! (%s)\n",
|
||||||
n, current->comm);
|
n, current->comm);
|
||||||
n = 256;
|
n = 32;
|
||||||
}
|
}
|
||||||
|
|
||||||
return __extract(report, offset, n);
|
return __extract(report, offset, n);
|
||||||
@ -2362,15 +2361,6 @@ int hid_add_device(struct hid_device *hdev)
|
|||||||
dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
|
dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
|
||||||
hdev->vendor, hdev->product, atomic_inc_return(&id));
|
hdev->vendor, hdev->product, atomic_inc_return(&id));
|
||||||
|
|
||||||
/*
|
|
||||||
* Try loading the module for the device before the add, so that we do
|
|
||||||
* not first have hid-generic binding only to have it replaced
|
|
||||||
* immediately afterwards with a specialized driver.
|
|
||||||
*/
|
|
||||||
if (!current_is_async())
|
|
||||||
request_module("hid:b%04Xg%04Xv%08Xp%08X", hdev->bus,
|
|
||||||
hdev->group, hdev->vendor, hdev->product);
|
|
||||||
|
|
||||||
hid_debug_register(hdev, dev_name(&hdev->dev));
|
hid_debug_register(hdev, dev_name(&hdev->dev));
|
||||||
ret = device_add(&hdev->dev);
|
ret = device_add(&hdev->dev);
|
||||||
if (!ret)
|
if (!ret)
|
||||||
|
@ -606,5 +606,7 @@ static void __exit mousevsc_exit(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
MODULE_LICENSE("GPL");
|
MODULE_LICENSE("GPL");
|
||||||
|
MODULE_DESCRIPTION("Microsoft Hyper-V Synthetic HID Driver");
|
||||||
|
|
||||||
module_init(mousevsc_init);
|
module_init(mousevsc_init);
|
||||||
module_exit(mousevsc_exit);
|
module_exit(mousevsc_exit);
|
||||||
|
@ -1086,6 +1086,7 @@
|
|||||||
#define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3
|
#define USB_DEVICE_ID_SYNAPTICS_HD 0x0ac3
|
||||||
#define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3
|
#define USB_DEVICE_ID_SYNAPTICS_QUAD_HD 0x1ac3
|
||||||
#define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710
|
#define USB_DEVICE_ID_SYNAPTICS_TP_V103 0x5710
|
||||||
|
#define USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5 0x81a7
|
||||||
|
|
||||||
#define USB_VENDOR_ID_TEXAS_INSTRUMENTS 0x2047
|
#define USB_VENDOR_ID_TEXAS_INSTRUMENTS 0x2047
|
||||||
#define USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA 0x0855
|
#define USB_DEVICE_ID_TEXAS_INSTRUMENTS_LENOVO_YOGA 0x0855
|
||||||
|
@ -113,6 +113,7 @@ enum recvr_type {
|
|||||||
recvr_type_dj,
|
recvr_type_dj,
|
||||||
recvr_type_hidpp,
|
recvr_type_hidpp,
|
||||||
recvr_type_gaming_hidpp,
|
recvr_type_gaming_hidpp,
|
||||||
|
recvr_type_mouse_only,
|
||||||
recvr_type_27mhz,
|
recvr_type_27mhz,
|
||||||
recvr_type_bluetooth,
|
recvr_type_bluetooth,
|
||||||
};
|
};
|
||||||
@ -864,9 +865,12 @@ static void logi_dj_recv_queue_notification(struct dj_receiver_dev *djrcv_dev,
|
|||||||
schedule_work(&djrcv_dev->work);
|
schedule_work(&djrcv_dev->work);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void logi_hidpp_dev_conn_notif_equad(struct hidpp_event *hidpp_report,
|
static void logi_hidpp_dev_conn_notif_equad(struct hid_device *hdev,
|
||||||
|
struct hidpp_event *hidpp_report,
|
||||||
struct dj_workitem *workitem)
|
struct dj_workitem *workitem)
|
||||||
{
|
{
|
||||||
|
struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
|
||||||
|
|
||||||
workitem->type = WORKITEM_TYPE_PAIRED;
|
workitem->type = WORKITEM_TYPE_PAIRED;
|
||||||
workitem->device_type = hidpp_report->params[HIDPP_PARAM_DEVICE_INFO] &
|
workitem->device_type = hidpp_report->params[HIDPP_PARAM_DEVICE_INFO] &
|
||||||
HIDPP_DEVICE_TYPE_MASK;
|
HIDPP_DEVICE_TYPE_MASK;
|
||||||
@ -880,6 +884,8 @@ static void logi_hidpp_dev_conn_notif_equad(struct hidpp_event *hidpp_report,
|
|||||||
break;
|
break;
|
||||||
case REPORT_TYPE_MOUSE:
|
case REPORT_TYPE_MOUSE:
|
||||||
workitem->reports_supported |= STD_MOUSE | HIDPP;
|
workitem->reports_supported |= STD_MOUSE | HIDPP;
|
||||||
|
if (djrcv_dev->type == recvr_type_mouse_only)
|
||||||
|
workitem->reports_supported |= MULTIMEDIA;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -923,7 +929,7 @@ static void logi_hidpp_recv_queue_notif(struct hid_device *hdev,
|
|||||||
case 0x01:
|
case 0x01:
|
||||||
device_type = "Bluetooth";
|
device_type = "Bluetooth";
|
||||||
/* Bluetooth connect packet contents is the same as (e)QUAD */
|
/* Bluetooth connect packet contents is the same as (e)QUAD */
|
||||||
logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
|
logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
|
||||||
if (!(hidpp_report->params[HIDPP_PARAM_DEVICE_INFO] &
|
if (!(hidpp_report->params[HIDPP_PARAM_DEVICE_INFO] &
|
||||||
HIDPP_MANUFACTURER_MASK)) {
|
HIDPP_MANUFACTURER_MASK)) {
|
||||||
hid_info(hdev, "Non Logitech device connected on slot %d\n",
|
hid_info(hdev, "Non Logitech device connected on slot %d\n",
|
||||||
@ -937,18 +943,18 @@ static void logi_hidpp_recv_queue_notif(struct hid_device *hdev,
|
|||||||
break;
|
break;
|
||||||
case 0x03:
|
case 0x03:
|
||||||
device_type = "QUAD or eQUAD";
|
device_type = "QUAD or eQUAD";
|
||||||
logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
|
logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
|
||||||
break;
|
break;
|
||||||
case 0x04:
|
case 0x04:
|
||||||
device_type = "eQUAD step 4 DJ";
|
device_type = "eQUAD step 4 DJ";
|
||||||
logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
|
logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
|
||||||
break;
|
break;
|
||||||
case 0x05:
|
case 0x05:
|
||||||
device_type = "DFU Lite";
|
device_type = "DFU Lite";
|
||||||
break;
|
break;
|
||||||
case 0x06:
|
case 0x06:
|
||||||
device_type = "eQUAD step 4 Lite";
|
device_type = "eQUAD step 4 Lite";
|
||||||
logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
|
logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
|
||||||
break;
|
break;
|
||||||
case 0x07:
|
case 0x07:
|
||||||
device_type = "eQUAD step 4 Gaming";
|
device_type = "eQUAD step 4 Gaming";
|
||||||
@ -958,11 +964,11 @@ static void logi_hidpp_recv_queue_notif(struct hid_device *hdev,
|
|||||||
break;
|
break;
|
||||||
case 0x0a:
|
case 0x0a:
|
||||||
device_type = "eQUAD nano Lite";
|
device_type = "eQUAD nano Lite";
|
||||||
logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
|
logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
|
||||||
break;
|
break;
|
||||||
case 0x0c:
|
case 0x0c:
|
||||||
device_type = "eQUAD Lightspeed";
|
device_type = "eQUAD Lightspeed";
|
||||||
logi_hidpp_dev_conn_notif_equad(hidpp_report, &workitem);
|
logi_hidpp_dev_conn_notif_equad(hdev, hidpp_report, &workitem);
|
||||||
workitem.reports_supported |= STD_KEYBOARD;
|
workitem.reports_supported |= STD_KEYBOARD;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -1313,7 +1319,8 @@ static int logi_dj_ll_parse(struct hid_device *hid)
|
|||||||
if (djdev->reports_supported & STD_MOUSE) {
|
if (djdev->reports_supported & STD_MOUSE) {
|
||||||
dbg_hid("%s: sending a mouse descriptor, reports_supported: %llx\n",
|
dbg_hid("%s: sending a mouse descriptor, reports_supported: %llx\n",
|
||||||
__func__, djdev->reports_supported);
|
__func__, djdev->reports_supported);
|
||||||
if (djdev->dj_receiver_dev->type == recvr_type_gaming_hidpp)
|
if (djdev->dj_receiver_dev->type == recvr_type_gaming_hidpp ||
|
||||||
|
djdev->dj_receiver_dev->type == recvr_type_mouse_only)
|
||||||
rdcat(rdesc, &rsize, mse_high_res_descriptor,
|
rdcat(rdesc, &rsize, mse_high_res_descriptor,
|
||||||
sizeof(mse_high_res_descriptor));
|
sizeof(mse_high_res_descriptor));
|
||||||
else if (djdev->dj_receiver_dev->type == recvr_type_27mhz)
|
else if (djdev->dj_receiver_dev->type == recvr_type_27mhz)
|
||||||
@ -1556,15 +1563,19 @@ static int logi_dj_raw_event(struct hid_device *hdev,
|
|||||||
data[0] = data[1];
|
data[0] = data[1];
|
||||||
data[1] = 0;
|
data[1] = 0;
|
||||||
}
|
}
|
||||||
/* The 27 MHz mouse-only receiver sends unnumbered mouse data */
|
/*
|
||||||
|
* Mouse-only receivers send unnumbered mouse data. The 27 MHz
|
||||||
|
* receiver uses 6 byte packets, the nano receiver 8 bytes.
|
||||||
|
*/
|
||||||
if (djrcv_dev->unnumbered_application == HID_GD_MOUSE &&
|
if (djrcv_dev->unnumbered_application == HID_GD_MOUSE &&
|
||||||
size == 6) {
|
size <= 8) {
|
||||||
u8 mouse_report[7];
|
u8 mouse_report[9];
|
||||||
|
|
||||||
/* Prepend report id */
|
/* Prepend report id */
|
||||||
mouse_report[0] = REPORT_TYPE_MOUSE;
|
mouse_report[0] = REPORT_TYPE_MOUSE;
|
||||||
memcpy(mouse_report + 1, data, 6);
|
memcpy(mouse_report + 1, data, size);
|
||||||
logi_dj_recv_forward_input_report(hdev, mouse_report, 7);
|
logi_dj_recv_forward_input_report(hdev, mouse_report,
|
||||||
|
size + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
@ -1635,6 +1646,7 @@ static int logi_dj_probe(struct hid_device *hdev,
|
|||||||
case recvr_type_dj: no_dj_interfaces = 3; break;
|
case recvr_type_dj: no_dj_interfaces = 3; break;
|
||||||
case recvr_type_hidpp: no_dj_interfaces = 2; break;
|
case recvr_type_hidpp: no_dj_interfaces = 2; break;
|
||||||
case recvr_type_gaming_hidpp: no_dj_interfaces = 3; break;
|
case recvr_type_gaming_hidpp: no_dj_interfaces = 3; break;
|
||||||
|
case recvr_type_mouse_only: no_dj_interfaces = 2; break;
|
||||||
case recvr_type_27mhz: no_dj_interfaces = 2; break;
|
case recvr_type_27mhz: no_dj_interfaces = 2; break;
|
||||||
case recvr_type_bluetooth: no_dj_interfaces = 2; break;
|
case recvr_type_bluetooth: no_dj_interfaces = 2; break;
|
||||||
}
|
}
|
||||||
@ -1808,10 +1820,10 @@ static const struct hid_device_id logi_dj_receivers[] = {
|
|||||||
{HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
|
{HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
|
||||||
USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2),
|
USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2),
|
||||||
.driver_data = recvr_type_dj},
|
.driver_data = recvr_type_dj},
|
||||||
{ /* Logitech Nano (non DJ) receiver */
|
{ /* Logitech Nano mouse only receiver */
|
||||||
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
|
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
|
||||||
USB_DEVICE_ID_LOGITECH_NANO_RECEIVER),
|
USB_DEVICE_ID_LOGITECH_NANO_RECEIVER),
|
||||||
.driver_data = recvr_type_hidpp},
|
.driver_data = recvr_type_mouse_only},
|
||||||
{ /* Logitech Nano (non DJ) receiver */
|
{ /* Logitech Nano (non DJ) receiver */
|
||||||
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
|
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
|
||||||
USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2),
|
USB_DEVICE_ID_LOGITECH_NANO_RECEIVER_2),
|
||||||
@ -1836,6 +1848,14 @@ static const struct hid_device_id logi_dj_receivers[] = {
|
|||||||
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
|
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
|
||||||
0xc70a),
|
0xc70a),
|
||||||
.driver_data = recvr_type_bluetooth},
|
.driver_data = recvr_type_bluetooth},
|
||||||
|
{ /* Logitech MX5500 HID++ / bluetooth receiver keyboard intf. */
|
||||||
|
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
|
||||||
|
0xc71b),
|
||||||
|
.driver_data = recvr_type_bluetooth},
|
||||||
|
{ /* Logitech MX5500 HID++ / bluetooth receiver mouse intf. */
|
||||||
|
HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
|
||||||
|
0xc71c),
|
||||||
|
.driver_data = recvr_type_bluetooth},
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -3728,6 +3728,9 @@ static const struct hid_device_id hidpp_devices[] = {
|
|||||||
{ /* Keyboard MX5000 (Bluetooth-receiver in HID proxy mode) */
|
{ /* Keyboard MX5000 (Bluetooth-receiver in HID proxy mode) */
|
||||||
LDJ_DEVICE(0xb305),
|
LDJ_DEVICE(0xb305),
|
||||||
.driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
|
.driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
|
||||||
|
{ /* Keyboard MX5500 (Bluetooth-receiver in HID proxy mode) */
|
||||||
|
LDJ_DEVICE(0xb30b),
|
||||||
|
.driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
|
||||||
|
|
||||||
{ LDJ_DEVICE(HID_ANY_ID) },
|
{ LDJ_DEVICE(HID_ANY_ID) },
|
||||||
|
|
||||||
@ -3740,6 +3743,9 @@ static const struct hid_device_id hidpp_devices[] = {
|
|||||||
{ /* Keyboard MX3200 (Y-RAV80) */
|
{ /* Keyboard MX3200 (Y-RAV80) */
|
||||||
L27MHZ_DEVICE(0x005c),
|
L27MHZ_DEVICE(0x005c),
|
||||||
.driver_data = HIDPP_QUIRK_KBD_ZOOM_WHEEL },
|
.driver_data = HIDPP_QUIRK_KBD_ZOOM_WHEEL },
|
||||||
|
{ /* S510 Media Remote */
|
||||||
|
L27MHZ_DEVICE(0x00fe),
|
||||||
|
.driver_data = HIDPP_QUIRK_KBD_SCROLL_WHEEL },
|
||||||
|
|
||||||
{ L27MHZ_DEVICE(HID_ANY_ID) },
|
{ L27MHZ_DEVICE(HID_ANY_ID) },
|
||||||
|
|
||||||
@ -3756,6 +3762,9 @@ static const struct hid_device_id hidpp_devices[] = {
|
|||||||
{ /* MX5000 keyboard over Bluetooth */
|
{ /* MX5000 keyboard over Bluetooth */
|
||||||
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb305),
|
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb305),
|
||||||
.driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
|
.driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
|
||||||
|
{ /* MX5500 keyboard over Bluetooth */
|
||||||
|
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb30b),
|
||||||
|
.driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS },
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -637,6 +637,13 @@ static void mt_store_field(struct hid_device *hdev,
|
|||||||
if (*target != DEFAULT_TRUE &&
|
if (*target != DEFAULT_TRUE &&
|
||||||
*target != DEFAULT_FALSE &&
|
*target != DEFAULT_FALSE &&
|
||||||
*target != DEFAULT_ZERO) {
|
*target != DEFAULT_ZERO) {
|
||||||
|
if (usage->contactid == DEFAULT_ZERO ||
|
||||||
|
usage->x == DEFAULT_ZERO ||
|
||||||
|
usage->y == DEFAULT_ZERO) {
|
||||||
|
hid_dbg(hdev,
|
||||||
|
"ignoring duplicate usage on incomplete");
|
||||||
|
return;
|
||||||
|
}
|
||||||
usage = mt_allocate_usage(hdev, application);
|
usage = mt_allocate_usage(hdev, application);
|
||||||
if (!usage)
|
if (!usage)
|
||||||
return;
|
return;
|
||||||
|
@ -35,6 +35,7 @@
|
|||||||
/* device flags */
|
/* device flags */
|
||||||
#define RMI_DEVICE BIT(0)
|
#define RMI_DEVICE BIT(0)
|
||||||
#define RMI_DEVICE_HAS_PHYS_BUTTONS BIT(1)
|
#define RMI_DEVICE_HAS_PHYS_BUTTONS BIT(1)
|
||||||
|
#define RMI_DEVICE_OUTPUT_SET_REPORT BIT(2)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* retrieve the ctrl registers
|
* retrieve the ctrl registers
|
||||||
@ -163,9 +164,19 @@ static int rmi_set_mode(struct hid_device *hdev, u8 mode)
|
|||||||
|
|
||||||
static int rmi_write_report(struct hid_device *hdev, u8 *report, int len)
|
static int rmi_write_report(struct hid_device *hdev, u8 *report, int len)
|
||||||
{
|
{
|
||||||
|
struct rmi_data *data = hid_get_drvdata(hdev);
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = hid_hw_output_report(hdev, (void *)report, len);
|
if (data->device_flags & RMI_DEVICE_OUTPUT_SET_REPORT) {
|
||||||
|
/*
|
||||||
|
* Talk to device by using SET_REPORT requests instead.
|
||||||
|
*/
|
||||||
|
ret = hid_hw_raw_request(hdev, report[0], report,
|
||||||
|
len, HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
|
||||||
|
} else {
|
||||||
|
ret = hid_hw_output_report(hdev, (void *)report, len);
|
||||||
|
}
|
||||||
|
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
dev_err(&hdev->dev, "failed to write hid report (%d)\n", ret);
|
dev_err(&hdev->dev, "failed to write hid report (%d)\n", ret);
|
||||||
return ret;
|
return ret;
|
||||||
@ -747,6 +758,8 @@ static const struct hid_device_id rmi_id[] = {
|
|||||||
.driver_data = RMI_DEVICE_HAS_PHYS_BUTTONS },
|
.driver_data = RMI_DEVICE_HAS_PHYS_BUTTONS },
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_LENOVO, USB_DEVICE_ID_LENOVO_X1_COVER) },
|
||||||
{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_REZEL) },
|
{ HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_REZEL) },
|
||||||
|
{ HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS, USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5),
|
||||||
|
.driver_data = RMI_DEVICE_OUTPUT_SET_REPORT },
|
||||||
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_RMI, HID_ANY_ID, HID_ANY_ID) },
|
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_RMI, HID_ANY_ID, HID_ANY_ID) },
|
||||||
{ }
|
{ }
|
||||||
};
|
};
|
||||||
|
@ -354,6 +354,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
|
|||||||
},
|
},
|
||||||
.driver_data = (void *)&sipodev_desc
|
.driver_data = (void *)&sipodev_desc
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
.ident = "iBall Aer3",
|
||||||
|
.matches = {
|
||||||
|
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "iBall"),
|
||||||
|
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Aer3"),
|
||||||
|
},
|
||||||
|
.driver_data = (void *)&sipodev_desc
|
||||||
|
},
|
||||||
{ } /* Terminate list */
|
{ } /* Terminate list */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1232,13 +1232,13 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
|
|||||||
/* Add back in missing bits of ID for non-USI pens */
|
/* Add back in missing bits of ID for non-USI pens */
|
||||||
wacom->id[0] |= (wacom->serial[0] >> 32) & 0xFFFFF;
|
wacom->id[0] |= (wacom->serial[0] >> 32) & 0xFFFFF;
|
||||||
}
|
}
|
||||||
wacom->tool[0] = wacom_intuos_get_tool_type(wacom_intuos_id_mangle(wacom->id[0]));
|
|
||||||
|
|
||||||
for (i = 0; i < pen_frames; i++) {
|
for (i = 0; i < pen_frames; i++) {
|
||||||
unsigned char *frame = &data[i*pen_frame_len + 1];
|
unsigned char *frame = &data[i*pen_frame_len + 1];
|
||||||
bool valid = frame[0] & 0x80;
|
bool valid = frame[0] & 0x80;
|
||||||
bool prox = frame[0] & 0x40;
|
bool prox = frame[0] & 0x40;
|
||||||
bool range = frame[0] & 0x20;
|
bool range = frame[0] & 0x20;
|
||||||
|
bool invert = frame[0] & 0x10;
|
||||||
|
|
||||||
if (!valid)
|
if (!valid)
|
||||||
continue;
|
continue;
|
||||||
@ -1247,9 +1247,24 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
|
|||||||
wacom->shared->stylus_in_proximity = false;
|
wacom->shared->stylus_in_proximity = false;
|
||||||
wacom_exit_report(wacom);
|
wacom_exit_report(wacom);
|
||||||
input_sync(pen_input);
|
input_sync(pen_input);
|
||||||
|
|
||||||
|
wacom->tool[0] = 0;
|
||||||
|
wacom->id[0] = 0;
|
||||||
|
wacom->serial[0] = 0;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (range) {
|
if (range) {
|
||||||
|
if (!wacom->tool[0]) { /* first in range */
|
||||||
|
/* Going into range select tool */
|
||||||
|
if (invert)
|
||||||
|
wacom->tool[0] = BTN_TOOL_RUBBER;
|
||||||
|
else if (wacom->id[0])
|
||||||
|
wacom->tool[0] = wacom_intuos_get_tool_type(wacom->id[0]);
|
||||||
|
else
|
||||||
|
wacom->tool[0] = BTN_TOOL_PEN;
|
||||||
|
}
|
||||||
|
|
||||||
input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1]));
|
input_report_abs(pen_input, ABS_X, get_unaligned_le16(&frame[1]));
|
||||||
input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3]));
|
input_report_abs(pen_input, ABS_Y, get_unaligned_le16(&frame[3]));
|
||||||
|
|
||||||
@ -1271,24 +1286,27 @@ static void wacom_intuos_pro2_bt_pen(struct wacom_wac *wacom)
|
|||||||
get_unaligned_le16(&frame[11]));
|
get_unaligned_le16(&frame[11]));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5]));
|
|
||||||
if (wacom->features.type == INTUOSP2_BT) {
|
if (wacom->tool[0]) {
|
||||||
input_report_abs(pen_input, ABS_DISTANCE,
|
input_report_abs(pen_input, ABS_PRESSURE, get_unaligned_le16(&frame[5]));
|
||||||
range ? frame[13] : wacom->features.distance_max);
|
if (wacom->features.type == INTUOSP2_BT) {
|
||||||
} else {
|
input_report_abs(pen_input, ABS_DISTANCE,
|
||||||
input_report_abs(pen_input, ABS_DISTANCE,
|
range ? frame[13] : wacom->features.distance_max);
|
||||||
range ? frame[7] : wacom->features.distance_max);
|
} else {
|
||||||
|
input_report_abs(pen_input, ABS_DISTANCE,
|
||||||
|
range ? frame[7] : wacom->features.distance_max);
|
||||||
|
}
|
||||||
|
|
||||||
|
input_report_key(pen_input, BTN_TOUCH, frame[0] & 0x09);
|
||||||
|
input_report_key(pen_input, BTN_STYLUS, frame[0] & 0x02);
|
||||||
|
input_report_key(pen_input, BTN_STYLUS2, frame[0] & 0x04);
|
||||||
|
|
||||||
|
input_report_key(pen_input, wacom->tool[0], prox);
|
||||||
|
input_event(pen_input, EV_MSC, MSC_SERIAL, wacom->serial[0]);
|
||||||
|
input_report_abs(pen_input, ABS_MISC,
|
||||||
|
wacom_intuos_id_mangle(wacom->id[0])); /* report tool id */
|
||||||
}
|
}
|
||||||
|
|
||||||
input_report_key(pen_input, BTN_TOUCH, frame[0] & 0x01);
|
|
||||||
input_report_key(pen_input, BTN_STYLUS, frame[0] & 0x02);
|
|
||||||
input_report_key(pen_input, BTN_STYLUS2, frame[0] & 0x04);
|
|
||||||
|
|
||||||
input_report_key(pen_input, wacom->tool[0], prox);
|
|
||||||
input_event(pen_input, EV_MSC, MSC_SERIAL, wacom->serial[0]);
|
|
||||||
input_report_abs(pen_input, ABS_MISC,
|
|
||||||
wacom_intuos_id_mangle(wacom->id[0])); /* report tool id */
|
|
||||||
|
|
||||||
wacom->shared->stylus_in_proximity = prox;
|
wacom->shared->stylus_in_proximity = prox;
|
||||||
|
|
||||||
input_sync(pen_input);
|
input_sync(pen_input);
|
||||||
@ -1349,11 +1367,17 @@ static void wacom_intuos_pro2_bt_touch(struct wacom_wac *wacom)
|
|||||||
if (wacom->num_contacts_left <= 0) {
|
if (wacom->num_contacts_left <= 0) {
|
||||||
wacom->num_contacts_left = 0;
|
wacom->num_contacts_left = 0;
|
||||||
wacom->shared->touch_down = wacom_wac_finger_count_touches(wacom);
|
wacom->shared->touch_down = wacom_wac_finger_count_touches(wacom);
|
||||||
|
input_sync(touch_input);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
input_report_switch(touch_input, SW_MUTE_DEVICE, !(data[281] >> 7));
|
if (wacom->num_contacts_left == 0) {
|
||||||
input_sync(touch_input);
|
// Be careful that we don't accidentally call input_sync with
|
||||||
|
// only a partial set of fingers of processed
|
||||||
|
input_report_switch(touch_input, SW_MUTE_DEVICE, !(data[281] >> 7));
|
||||||
|
input_sync(touch_input);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom)
|
static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom)
|
||||||
@ -1361,7 +1385,7 @@ static void wacom_intuos_pro2_bt_pad(struct wacom_wac *wacom)
|
|||||||
struct input_dev *pad_input = wacom->pad_input;
|
struct input_dev *pad_input = wacom->pad_input;
|
||||||
unsigned char *data = wacom->data;
|
unsigned char *data = wacom->data;
|
||||||
|
|
||||||
int buttons = (data[282] << 1) | ((data[281] >> 6) & 0x01);
|
int buttons = data[282] | ((data[281] & 0x40) << 2);
|
||||||
int ring = data[285] & 0x7F;
|
int ring = data[285] & 0x7F;
|
||||||
bool ringstatus = data[285] & 0x80;
|
bool ringstatus = data[285] & 0x80;
|
||||||
bool prox = buttons || ringstatus;
|
bool prox = buttons || ringstatus;
|
||||||
@ -3810,7 +3834,7 @@ static void wacom_24hd_update_leds(struct wacom *wacom, int mask, int group)
|
|||||||
static bool wacom_is_led_toggled(struct wacom *wacom, int button_count,
|
static bool wacom_is_led_toggled(struct wacom *wacom, int button_count,
|
||||||
int mask, int group)
|
int mask, int group)
|
||||||
{
|
{
|
||||||
int button_per_group;
|
int group_button;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* 21UX2 has LED group 1 to the left and LED group 0
|
* 21UX2 has LED group 1 to the left and LED group 0
|
||||||
@ -3820,9 +3844,12 @@ static bool wacom_is_led_toggled(struct wacom *wacom, int button_count,
|
|||||||
if (wacom->wacom_wac.features.type == WACOM_21UX2)
|
if (wacom->wacom_wac.features.type == WACOM_21UX2)
|
||||||
group = 1 - group;
|
group = 1 - group;
|
||||||
|
|
||||||
button_per_group = button_count/wacom->led.count;
|
group_button = group * (button_count/wacom->led.count);
|
||||||
|
|
||||||
return mask & (1 << (group * button_per_group));
|
if (wacom->wacom_wac.features.type == INTUOSP2_BT)
|
||||||
|
group_button = 8;
|
||||||
|
|
||||||
|
return mask & (1 << group_button);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void wacom_update_led(struct wacom *wacom, int button_count, int mask,
|
static void wacom_update_led(struct wacom *wacom, int button_count, int mask,
|
||||||
|
@ -81,6 +81,7 @@ static struct i2c_algo_bit_data ioc_data = {
|
|||||||
|
|
||||||
static struct i2c_adapter ioc_ops = {
|
static struct i2c_adapter ioc_ops = {
|
||||||
.nr = 0,
|
.nr = 0,
|
||||||
|
.name = "ioc",
|
||||||
.algo_data = &ioc_data,
|
.algo_data = &ioc_data,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -21,7 +21,6 @@
|
|||||||
#include <linux/platform_device.h>
|
#include <linux/platform_device.h>
|
||||||
#include <linux/i2c-algo-pca.h>
|
#include <linux/i2c-algo-pca.h>
|
||||||
#include <linux/platform_data/i2c-pca-platform.h>
|
#include <linux/platform_data/i2c-pca-platform.h>
|
||||||
#include <linux/gpio.h>
|
|
||||||
#include <linux/gpio/consumer.h>
|
#include <linux/gpio/consumer.h>
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
#include <linux/of.h>
|
#include <linux/of.h>
|
||||||
@ -173,7 +172,7 @@ static int i2c_pca_pf_probe(struct platform_device *pdev)
|
|||||||
i2c->adap.dev.parent = &pdev->dev;
|
i2c->adap.dev.parent = &pdev->dev;
|
||||||
i2c->adap.dev.of_node = np;
|
i2c->adap.dev.of_node = np;
|
||||||
|
|
||||||
i2c->gpio = devm_gpiod_get_optional(&pdev->dev, "reset-gpios", GPIOD_OUT_LOW);
|
i2c->gpio = devm_gpiod_get_optional(&pdev->dev, "reset", GPIOD_OUT_LOW);
|
||||||
if (IS_ERR(i2c->gpio))
|
if (IS_ERR(i2c->gpio))
|
||||||
return PTR_ERR(i2c->gpio);
|
return PTR_ERR(i2c->gpio);
|
||||||
|
|
||||||
|
@ -47,6 +47,15 @@
|
|||||||
|
|
||||||
#include "arm-smmu-regs.h"
|
#include "arm-smmu-regs.h"
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Apparently, some Qualcomm arm64 platforms which appear to expose their SMMU
|
||||||
|
* global register space are still, in fact, using a hypervisor to mediate it
|
||||||
|
* by trapping and emulating register accesses. Sadly, some deployed versions
|
||||||
|
* of said trapping code have bugs wherein they go horribly wrong for stores
|
||||||
|
* using r31 (i.e. XZR/WZR) as the source register.
|
||||||
|
*/
|
||||||
|
#define QCOM_DUMMY_VAL -1
|
||||||
|
|
||||||
#define ARM_MMU500_ACTLR_CPRE (1 << 1)
|
#define ARM_MMU500_ACTLR_CPRE (1 << 1)
|
||||||
|
|
||||||
#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
|
#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
|
||||||
@ -411,7 +420,7 @@ static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu,
|
|||||||
{
|
{
|
||||||
unsigned int spin_cnt, delay;
|
unsigned int spin_cnt, delay;
|
||||||
|
|
||||||
writel_relaxed(0, sync);
|
writel_relaxed(QCOM_DUMMY_VAL, sync);
|
||||||
for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
|
for (delay = 1; delay < TLB_LOOP_TIMEOUT; delay *= 2) {
|
||||||
for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
|
for (spin_cnt = TLB_SPIN_COUNT; spin_cnt > 0; spin_cnt--) {
|
||||||
if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE))
|
if (!(readl_relaxed(status) & sTLBGSTATUS_GSACTIVE))
|
||||||
@ -1751,8 +1760,8 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Invalidate the TLB, just in case */
|
/* Invalidate the TLB, just in case */
|
||||||
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH);
|
writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLH);
|
||||||
writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
|
writel_relaxed(QCOM_DUMMY_VAL, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH);
|
||||||
|
|
||||||
reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
|
reg = readl_relaxed(ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0);
|
||||||
|
|
||||||
|
@ -2504,6 +2504,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
spin_lock(&iommu->lock);
|
||||||
spin_lock_irqsave(&device_domain_lock, flags);
|
spin_lock_irqsave(&device_domain_lock, flags);
|
||||||
if (dev)
|
if (dev)
|
||||||
found = find_domain(dev);
|
found = find_domain(dev);
|
||||||
@ -2519,17 +2520,16 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
|
|||||||
|
|
||||||
if (found) {
|
if (found) {
|
||||||
spin_unlock_irqrestore(&device_domain_lock, flags);
|
spin_unlock_irqrestore(&device_domain_lock, flags);
|
||||||
|
spin_unlock(&iommu->lock);
|
||||||
free_devinfo_mem(info);
|
free_devinfo_mem(info);
|
||||||
/* Caller must free the original domain */
|
/* Caller must free the original domain */
|
||||||
return found;
|
return found;
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock(&iommu->lock);
|
|
||||||
ret = domain_attach_iommu(domain, iommu);
|
ret = domain_attach_iommu(domain, iommu);
|
||||||
spin_unlock(&iommu->lock);
|
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
spin_unlock_irqrestore(&device_domain_lock, flags);
|
spin_unlock_irqrestore(&device_domain_lock, flags);
|
||||||
|
spin_unlock(&iommu->lock);
|
||||||
free_devinfo_mem(info);
|
free_devinfo_mem(info);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -2539,6 +2539,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu,
|
|||||||
if (dev)
|
if (dev)
|
||||||
dev->archdata.iommu = info;
|
dev->archdata.iommu = info;
|
||||||
spin_unlock_irqrestore(&device_domain_lock, flags);
|
spin_unlock_irqrestore(&device_domain_lock, flags);
|
||||||
|
spin_unlock(&iommu->lock);
|
||||||
|
|
||||||
/* PASID table is mandatory for a PCI device in scalable mode. */
|
/* PASID table is mandatory for a PCI device in scalable mode. */
|
||||||
if (dev && dev_is_pci(dev) && sm_supported(iommu)) {
|
if (dev && dev_is_pci(dev) && sm_supported(iommu)) {
|
||||||
|
@ -389,7 +389,7 @@ static inline void pasid_set_present(struct pasid_entry *pe)
|
|||||||
*/
|
*/
|
||||||
static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value)
|
static inline void pasid_set_page_snoop(struct pasid_entry *pe, bool value)
|
||||||
{
|
{
|
||||||
pasid_set_bits(&pe->val[1], 1 << 23, value);
|
pasid_set_bits(&pe->val[1], 1 << 23, value << 23);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -329,7 +329,7 @@ static ssize_t iommu_group_show_type(struct iommu_group *group,
|
|||||||
type = "unmanaged\n";
|
type = "unmanaged\n";
|
||||||
break;
|
break;
|
||||||
case IOMMU_DOMAIN_DMA:
|
case IOMMU_DOMAIN_DMA:
|
||||||
type = "DMA";
|
type = "DMA\n";
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -887,12 +887,22 @@ unsigned int bch_btree_insert_key(struct btree_keys *b, struct bkey *k,
|
|||||||
struct bset *i = bset_tree_last(b)->data;
|
struct bset *i = bset_tree_last(b)->data;
|
||||||
struct bkey *m, *prev = NULL;
|
struct bkey *m, *prev = NULL;
|
||||||
struct btree_iter iter;
|
struct btree_iter iter;
|
||||||
|
struct bkey preceding_key_on_stack = ZERO_KEY;
|
||||||
|
struct bkey *preceding_key_p = &preceding_key_on_stack;
|
||||||
|
|
||||||
BUG_ON(b->ops->is_extents && !KEY_SIZE(k));
|
BUG_ON(b->ops->is_extents && !KEY_SIZE(k));
|
||||||
|
|
||||||
m = bch_btree_iter_init(b, &iter, b->ops->is_extents
|
/*
|
||||||
? PRECEDING_KEY(&START_KEY(k))
|
* If k has preceding key, preceding_key_p will be set to address
|
||||||
: PRECEDING_KEY(k));
|
* of k's preceding key; otherwise preceding_key_p will be set
|
||||||
|
* to NULL inside preceding_key().
|
||||||
|
*/
|
||||||
|
if (b->ops->is_extents)
|
||||||
|
preceding_key(&START_KEY(k), &preceding_key_p);
|
||||||
|
else
|
||||||
|
preceding_key(k, &preceding_key_p);
|
||||||
|
|
||||||
|
m = bch_btree_iter_init(b, &iter, preceding_key_p);
|
||||||
|
|
||||||
if (b->ops->insert_fixup(b, k, &iter, replace_key))
|
if (b->ops->insert_fixup(b, k, &iter, replace_key))
|
||||||
return status;
|
return status;
|
||||||
|
@ -434,20 +434,26 @@ static inline bool bch_cut_back(const struct bkey *where, struct bkey *k)
|
|||||||
return __bch_cut_back(where, k);
|
return __bch_cut_back(where, k);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define PRECEDING_KEY(_k) \
|
/*
|
||||||
({ \
|
* Pointer '*preceding_key_p' points to a memory object to store preceding
|
||||||
struct bkey *_ret = NULL; \
|
* key of k. If the preceding key does not exist, set '*preceding_key_p' to
|
||||||
\
|
* NULL. So the caller of preceding_key() needs to take care of memory
|
||||||
if (KEY_INODE(_k) || KEY_OFFSET(_k)) { \
|
* which '*preceding_key_p' pointed to before calling preceding_key().
|
||||||
_ret = &KEY(KEY_INODE(_k), KEY_OFFSET(_k), 0); \
|
* Currently the only caller of preceding_key() is bch_btree_insert_key(),
|
||||||
\
|
* and it points to an on-stack variable, so the memory release is handled
|
||||||
if (!_ret->low) \
|
* by stackframe itself.
|
||||||
_ret->high--; \
|
*/
|
||||||
_ret->low--; \
|
static inline void preceding_key(struct bkey *k, struct bkey **preceding_key_p)
|
||||||
} \
|
{
|
||||||
\
|
if (KEY_INODE(k) || KEY_OFFSET(k)) {
|
||||||
_ret; \
|
(**preceding_key_p) = KEY(KEY_INODE(k), KEY_OFFSET(k), 0);
|
||||||
})
|
if (!(*preceding_key_p)->low)
|
||||||
|
(*preceding_key_p)->high--;
|
||||||
|
(*preceding_key_p)->low--;
|
||||||
|
} else {
|
||||||
|
(*preceding_key_p) = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static inline bool bch_ptr_invalid(struct btree_keys *b, const struct bkey *k)
|
static inline bool bch_ptr_invalid(struct btree_keys *b, const struct bkey *k)
|
||||||
{
|
{
|
||||||
|
@ -431,8 +431,13 @@ STORE(bch_cached_dev)
|
|||||||
bch_writeback_queue(dc);
|
bch_writeback_queue(dc);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Only set BCACHE_DEV_WB_RUNNING when cached device attached to
|
||||||
|
* a cache set, otherwise it doesn't make sense.
|
||||||
|
*/
|
||||||
if (attr == &sysfs_writeback_percent)
|
if (attr == &sysfs_writeback_percent)
|
||||||
if (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
|
if ((dc->disk.c != NULL) &&
|
||||||
|
(!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)))
|
||||||
schedule_delayed_work(&dc->writeback_rate_update,
|
schedule_delayed_work(&dc->writeback_rate_update,
|
||||||
dc->writeback_rate_update_seconds * HZ);
|
dc->writeback_rate_update_seconds * HZ);
|
||||||
|
|
||||||
|
@ -905,7 +905,7 @@ static void dvb_frontend_get_frequency_limits(struct dvb_frontend *fe,
|
|||||||
"DVB: adapter %i frontend %u frequency limits undefined - fix the driver\n",
|
"DVB: adapter %i frontend %u frequency limits undefined - fix the driver\n",
|
||||||
fe->dvb->num, fe->id);
|
fe->dvb->num, fe->id);
|
||||||
|
|
||||||
dprintk("frequency interval: tuner: %u...%u, frontend: %u...%u",
|
dev_dbg(fe->dvb->device, "frequency interval: tuner: %u...%u, frontend: %u...%u",
|
||||||
tuner_min, tuner_max, frontend_min, frontend_max);
|
tuner_min, tuner_max, frontend_min, frontend_max);
|
||||||
|
|
||||||
/* If the standard is for satellite, convert frequencies to kHz */
|
/* If the standard is for satellite, convert frequencies to kHz */
|
||||||
|
@ -560,7 +560,7 @@ struct hfi_capability {
|
|||||||
|
|
||||||
struct hfi_capabilities {
|
struct hfi_capabilities {
|
||||||
u32 num_capabilities;
|
u32 num_capabilities;
|
||||||
struct hfi_capability *data;
|
struct hfi_capability data[];
|
||||||
};
|
};
|
||||||
|
|
||||||
#define HFI_DEBUG_MSG_LOW 0x01
|
#define HFI_DEBUG_MSG_LOW 0x01
|
||||||
@ -717,7 +717,7 @@ struct hfi_profile_level {
|
|||||||
|
|
||||||
struct hfi_profile_level_supported {
|
struct hfi_profile_level_supported {
|
||||||
u32 profile_count;
|
u32 profile_count;
|
||||||
struct hfi_profile_level *profile_level;
|
struct hfi_profile_level profile_level[];
|
||||||
};
|
};
|
||||||
|
|
||||||
struct hfi_quality_vs_speed {
|
struct hfi_quality_vs_speed {
|
||||||
|
@ -303,11 +303,19 @@ static const struct attribute_group *pmem_attribute_groups[] = {
|
|||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void pmem_release_queue(void *q)
|
static void __pmem_release_queue(struct percpu_ref *ref)
|
||||||
{
|
{
|
||||||
|
struct request_queue *q;
|
||||||
|
|
||||||
|
q = container_of(ref, typeof(*q), q_usage_counter);
|
||||||
blk_cleanup_queue(q);
|
blk_cleanup_queue(q);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void pmem_release_queue(void *ref)
|
||||||
|
{
|
||||||
|
__pmem_release_queue(ref);
|
||||||
|
}
|
||||||
|
|
||||||
static void pmem_freeze_queue(struct percpu_ref *ref)
|
static void pmem_freeze_queue(struct percpu_ref *ref)
|
||||||
{
|
{
|
||||||
struct request_queue *q;
|
struct request_queue *q;
|
||||||
@ -399,12 +407,10 @@ static int pmem_attach_disk(struct device *dev,
|
|||||||
if (!q)
|
if (!q)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
if (devm_add_action_or_reset(dev, pmem_release_queue, q))
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
pmem->pfn_flags = PFN_DEV;
|
pmem->pfn_flags = PFN_DEV;
|
||||||
pmem->pgmap.ref = &q->q_usage_counter;
|
pmem->pgmap.ref = &q->q_usage_counter;
|
||||||
pmem->pgmap.kill = pmem_freeze_queue;
|
pmem->pgmap.kill = pmem_freeze_queue;
|
||||||
|
pmem->pgmap.cleanup = __pmem_release_queue;
|
||||||
if (is_nd_pfn(dev)) {
|
if (is_nd_pfn(dev)) {
|
||||||
if (setup_pagemap_fsdax(dev, &pmem->pgmap))
|
if (setup_pagemap_fsdax(dev, &pmem->pgmap))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@ -425,6 +431,9 @@ static int pmem_attach_disk(struct device *dev,
|
|||||||
pmem->pfn_flags |= PFN_MAP;
|
pmem->pfn_flags |= PFN_MAP;
|
||||||
memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
|
memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
|
||||||
} else {
|
} else {
|
||||||
|
if (devm_add_action_or_reset(dev, pmem_release_queue,
|
||||||
|
&q->q_usage_counter))
|
||||||
|
return -ENOMEM;
|
||||||
addr = devm_memremap(dev, pmem->phys_addr,
|
addr = devm_memremap(dev, pmem->phys_addr,
|
||||||
pmem->size, ARCH_MEMREMAP_PMEM);
|
pmem->size, ARCH_MEMREMAP_PMEM);
|
||||||
memcpy(&bb_res, &nsio->res, sizeof(bb_res));
|
memcpy(&bb_res, &nsio->res, sizeof(bb_res));
|
||||||
|
@ -20,12 +20,16 @@
|
|||||||
#include <linux/seq_buf.h>
|
#include <linux/seq_buf.h>
|
||||||
|
|
||||||
struct pci_p2pdma {
|
struct pci_p2pdma {
|
||||||
struct percpu_ref devmap_ref;
|
|
||||||
struct completion devmap_ref_done;
|
|
||||||
struct gen_pool *pool;
|
struct gen_pool *pool;
|
||||||
bool p2pmem_published;
|
bool p2pmem_published;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct p2pdma_pagemap {
|
||||||
|
struct dev_pagemap pgmap;
|
||||||
|
struct percpu_ref ref;
|
||||||
|
struct completion ref_done;
|
||||||
|
};
|
||||||
|
|
||||||
static ssize_t size_show(struct device *dev, struct device_attribute *attr,
|
static ssize_t size_show(struct device *dev, struct device_attribute *attr,
|
||||||
char *buf)
|
char *buf)
|
||||||
{
|
{
|
||||||
@ -74,41 +78,45 @@ static const struct attribute_group p2pmem_group = {
|
|||||||
.name = "p2pmem",
|
.name = "p2pmem",
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static struct p2pdma_pagemap *to_p2p_pgmap(struct percpu_ref *ref)
|
||||||
|
{
|
||||||
|
return container_of(ref, struct p2pdma_pagemap, ref);
|
||||||
|
}
|
||||||
|
|
||||||
static void pci_p2pdma_percpu_release(struct percpu_ref *ref)
|
static void pci_p2pdma_percpu_release(struct percpu_ref *ref)
|
||||||
{
|
{
|
||||||
struct pci_p2pdma *p2p =
|
struct p2pdma_pagemap *p2p_pgmap = to_p2p_pgmap(ref);
|
||||||
container_of(ref, struct pci_p2pdma, devmap_ref);
|
|
||||||
|
|
||||||
complete_all(&p2p->devmap_ref_done);
|
complete(&p2p_pgmap->ref_done);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pci_p2pdma_percpu_kill(struct percpu_ref *ref)
|
static void pci_p2pdma_percpu_kill(struct percpu_ref *ref)
|
||||||
{
|
{
|
||||||
/*
|
|
||||||
* pci_p2pdma_add_resource() may be called multiple times
|
|
||||||
* by a driver and may register the percpu_kill devm action multiple
|
|
||||||
* times. We only want the first action to actually kill the
|
|
||||||
* percpu_ref.
|
|
||||||
*/
|
|
||||||
if (percpu_ref_is_dying(ref))
|
|
||||||
return;
|
|
||||||
|
|
||||||
percpu_ref_kill(ref);
|
percpu_ref_kill(ref);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void pci_p2pdma_percpu_cleanup(struct percpu_ref *ref)
|
||||||
|
{
|
||||||
|
struct p2pdma_pagemap *p2p_pgmap = to_p2p_pgmap(ref);
|
||||||
|
|
||||||
|
wait_for_completion(&p2p_pgmap->ref_done);
|
||||||
|
percpu_ref_exit(&p2p_pgmap->ref);
|
||||||
|
}
|
||||||
|
|
||||||
static void pci_p2pdma_release(void *data)
|
static void pci_p2pdma_release(void *data)
|
||||||
{
|
{
|
||||||
struct pci_dev *pdev = data;
|
struct pci_dev *pdev = data;
|
||||||
|
struct pci_p2pdma *p2pdma = pdev->p2pdma;
|
||||||
|
|
||||||
if (!pdev->p2pdma)
|
if (!p2pdma)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
wait_for_completion(&pdev->p2pdma->devmap_ref_done);
|
/* Flush and disable pci_alloc_p2p_mem() */
|
||||||
percpu_ref_exit(&pdev->p2pdma->devmap_ref);
|
|
||||||
|
|
||||||
gen_pool_destroy(pdev->p2pdma->pool);
|
|
||||||
sysfs_remove_group(&pdev->dev.kobj, &p2pmem_group);
|
|
||||||
pdev->p2pdma = NULL;
|
pdev->p2pdma = NULL;
|
||||||
|
synchronize_rcu();
|
||||||
|
|
||||||
|
gen_pool_destroy(p2pdma->pool);
|
||||||
|
sysfs_remove_group(&pdev->dev.kobj, &p2pmem_group);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int pci_p2pdma_setup(struct pci_dev *pdev)
|
static int pci_p2pdma_setup(struct pci_dev *pdev)
|
||||||
@ -124,12 +132,6 @@ static int pci_p2pdma_setup(struct pci_dev *pdev)
|
|||||||
if (!p2p->pool)
|
if (!p2p->pool)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
init_completion(&p2p->devmap_ref_done);
|
|
||||||
error = percpu_ref_init(&p2p->devmap_ref,
|
|
||||||
pci_p2pdma_percpu_release, 0, GFP_KERNEL);
|
|
||||||
if (error)
|
|
||||||
goto out_pool_destroy;
|
|
||||||
|
|
||||||
error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_release, pdev);
|
error = devm_add_action_or_reset(&pdev->dev, pci_p2pdma_release, pdev);
|
||||||
if (error)
|
if (error)
|
||||||
goto out_pool_destroy;
|
goto out_pool_destroy;
|
||||||
@ -163,6 +165,7 @@ out:
|
|||||||
int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
|
int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
|
||||||
u64 offset)
|
u64 offset)
|
||||||
{
|
{
|
||||||
|
struct p2pdma_pagemap *p2p_pgmap;
|
||||||
struct dev_pagemap *pgmap;
|
struct dev_pagemap *pgmap;
|
||||||
void *addr;
|
void *addr;
|
||||||
int error;
|
int error;
|
||||||
@ -185,18 +188,27 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
|
|||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
|
|
||||||
pgmap = devm_kzalloc(&pdev->dev, sizeof(*pgmap), GFP_KERNEL);
|
p2p_pgmap = devm_kzalloc(&pdev->dev, sizeof(*p2p_pgmap), GFP_KERNEL);
|
||||||
if (!pgmap)
|
if (!p2p_pgmap)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
|
||||||
|
init_completion(&p2p_pgmap->ref_done);
|
||||||
|
error = percpu_ref_init(&p2p_pgmap->ref,
|
||||||
|
pci_p2pdma_percpu_release, 0, GFP_KERNEL);
|
||||||
|
if (error)
|
||||||
|
goto pgmap_free;
|
||||||
|
|
||||||
|
pgmap = &p2p_pgmap->pgmap;
|
||||||
|
|
||||||
pgmap->res.start = pci_resource_start(pdev, bar) + offset;
|
pgmap->res.start = pci_resource_start(pdev, bar) + offset;
|
||||||
pgmap->res.end = pgmap->res.start + size - 1;
|
pgmap->res.end = pgmap->res.start + size - 1;
|
||||||
pgmap->res.flags = pci_resource_flags(pdev, bar);
|
pgmap->res.flags = pci_resource_flags(pdev, bar);
|
||||||
pgmap->ref = &pdev->p2pdma->devmap_ref;
|
pgmap->ref = &p2p_pgmap->ref;
|
||||||
pgmap->type = MEMORY_DEVICE_PCI_P2PDMA;
|
pgmap->type = MEMORY_DEVICE_PCI_P2PDMA;
|
||||||
pgmap->pci_p2pdma_bus_offset = pci_bus_address(pdev, bar) -
|
pgmap->pci_p2pdma_bus_offset = pci_bus_address(pdev, bar) -
|
||||||
pci_resource_start(pdev, bar);
|
pci_resource_start(pdev, bar);
|
||||||
pgmap->kill = pci_p2pdma_percpu_kill;
|
pgmap->kill = pci_p2pdma_percpu_kill;
|
||||||
|
pgmap->cleanup = pci_p2pdma_percpu_cleanup;
|
||||||
|
|
||||||
addr = devm_memremap_pages(&pdev->dev, pgmap);
|
addr = devm_memremap_pages(&pdev->dev, pgmap);
|
||||||
if (IS_ERR(addr)) {
|
if (IS_ERR(addr)) {
|
||||||
@ -204,19 +216,22 @@ int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size,
|
|||||||
goto pgmap_free;
|
goto pgmap_free;
|
||||||
}
|
}
|
||||||
|
|
||||||
error = gen_pool_add_virt(pdev->p2pdma->pool, (unsigned long)addr,
|
error = gen_pool_add_owner(pdev->p2pdma->pool, (unsigned long)addr,
|
||||||
pci_bus_address(pdev, bar) + offset,
|
pci_bus_address(pdev, bar) + offset,
|
||||||
resource_size(&pgmap->res), dev_to_node(&pdev->dev));
|
resource_size(&pgmap->res), dev_to_node(&pdev->dev),
|
||||||
|
&p2p_pgmap->ref);
|
||||||
if (error)
|
if (error)
|
||||||
goto pgmap_free;
|
goto pages_free;
|
||||||
|
|
||||||
pci_info(pdev, "added peer-to-peer DMA memory %pR\n",
|
pci_info(pdev, "added peer-to-peer DMA memory %pR\n",
|
||||||
&pgmap->res);
|
&pgmap->res);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
pages_free:
|
||||||
|
devm_memunmap_pages(&pdev->dev, pgmap);
|
||||||
pgmap_free:
|
pgmap_free:
|
||||||
devm_kfree(&pdev->dev, pgmap);
|
devm_kfree(&pdev->dev, p2p_pgmap);
|
||||||
return error;
|
return error;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(pci_p2pdma_add_resource);
|
EXPORT_SYMBOL_GPL(pci_p2pdma_add_resource);
|
||||||
@ -585,19 +600,30 @@ EXPORT_SYMBOL_GPL(pci_p2pmem_find_many);
|
|||||||
*/
|
*/
|
||||||
void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size)
|
void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret = NULL;
|
||||||
|
struct percpu_ref *ref;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Pairs with synchronize_rcu() in pci_p2pdma_release() to
|
||||||
|
* ensure pdev->p2pdma is non-NULL for the duration of the
|
||||||
|
* read-lock.
|
||||||
|
*/
|
||||||
|
rcu_read_lock();
|
||||||
if (unlikely(!pdev->p2pdma))
|
if (unlikely(!pdev->p2pdma))
|
||||||
return NULL;
|
goto out;
|
||||||
|
|
||||||
if (unlikely(!percpu_ref_tryget_live(&pdev->p2pdma->devmap_ref)))
|
ret = (void *)gen_pool_alloc_owner(pdev->p2pdma->pool, size,
|
||||||
return NULL;
|
(void **) &ref);
|
||||||
|
if (!ret)
|
||||||
ret = (void *)gen_pool_alloc(pdev->p2pdma->pool, size);
|
goto out;
|
||||||
|
|
||||||
if (unlikely(!ret))
|
|
||||||
percpu_ref_put(&pdev->p2pdma->devmap_ref);
|
|
||||||
|
|
||||||
|
if (unlikely(!percpu_ref_tryget_live(ref))) {
|
||||||
|
gen_pool_free(pdev->p2pdma->pool, (unsigned long) ret, size);
|
||||||
|
ret = NULL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
out:
|
||||||
|
rcu_read_unlock();
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(pci_alloc_p2pmem);
|
EXPORT_SYMBOL_GPL(pci_alloc_p2pmem);
|
||||||
@ -610,8 +636,11 @@ EXPORT_SYMBOL_GPL(pci_alloc_p2pmem);
|
|||||||
*/
|
*/
|
||||||
void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size)
|
void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size)
|
||||||
{
|
{
|
||||||
gen_pool_free(pdev->p2pdma->pool, (uintptr_t)addr, size);
|
struct percpu_ref *ref;
|
||||||
percpu_ref_put(&pdev->p2pdma->devmap_ref);
|
|
||||||
|
gen_pool_free_owner(pdev->p2pdma->pool, (uintptr_t)addr, size,
|
||||||
|
(void **) &ref);
|
||||||
|
percpu_ref_put(ref);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(pci_free_p2pmem);
|
EXPORT_SYMBOL_GPL(pci_free_p2pmem);
|
||||||
|
|
||||||
|
@ -694,6 +694,7 @@ static int mlxreg_hotplug_remove(struct platform_device *pdev)
|
|||||||
|
|
||||||
/* Clean interrupts setup. */
|
/* Clean interrupts setup. */
|
||||||
mlxreg_hotplug_unset_irq(priv);
|
mlxreg_hotplug_unset_irq(priv);
|
||||||
|
devm_free_irq(&pdev->dev, priv->irq, priv);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -65,10 +65,12 @@ static bool asus_q500a_i8042_filter(unsigned char data, unsigned char str,
|
|||||||
|
|
||||||
static struct quirk_entry quirk_asus_unknown = {
|
static struct quirk_entry quirk_asus_unknown = {
|
||||||
.wapf = 0,
|
.wapf = 0,
|
||||||
|
.wmi_backlight_set_devstate = true,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct quirk_entry quirk_asus_q500a = {
|
static struct quirk_entry quirk_asus_q500a = {
|
||||||
.i8042_filter = asus_q500a_i8042_filter,
|
.i8042_filter = asus_q500a_i8042_filter,
|
||||||
|
.wmi_backlight_set_devstate = true,
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -79,26 +81,32 @@ static struct quirk_entry quirk_asus_q500a = {
|
|||||||
static struct quirk_entry quirk_asus_x55u = {
|
static struct quirk_entry quirk_asus_x55u = {
|
||||||
.wapf = 4,
|
.wapf = 4,
|
||||||
.wmi_backlight_power = true,
|
.wmi_backlight_power = true,
|
||||||
|
.wmi_backlight_set_devstate = true,
|
||||||
.no_display_toggle = true,
|
.no_display_toggle = true,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct quirk_entry quirk_asus_wapf4 = {
|
static struct quirk_entry quirk_asus_wapf4 = {
|
||||||
.wapf = 4,
|
.wapf = 4,
|
||||||
|
.wmi_backlight_set_devstate = true,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct quirk_entry quirk_asus_x200ca = {
|
static struct quirk_entry quirk_asus_x200ca = {
|
||||||
.wapf = 2,
|
.wapf = 2,
|
||||||
|
.wmi_backlight_set_devstate = true,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct quirk_entry quirk_asus_ux303ub = {
|
static struct quirk_entry quirk_asus_ux303ub = {
|
||||||
.wmi_backlight_native = true,
|
.wmi_backlight_native = true,
|
||||||
|
.wmi_backlight_set_devstate = true,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct quirk_entry quirk_asus_x550lb = {
|
static struct quirk_entry quirk_asus_x550lb = {
|
||||||
|
.wmi_backlight_set_devstate = true,
|
||||||
.xusb2pr = 0x01D9,
|
.xusb2pr = 0x01D9,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct quirk_entry quirk_asus_forceals = {
|
static struct quirk_entry quirk_asus_forceals = {
|
||||||
|
.wmi_backlight_set_devstate = true,
|
||||||
.wmi_force_als_set = true,
|
.wmi_force_als_set = true,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -2146,7 +2146,7 @@ static int asus_wmi_add(struct platform_device *pdev)
|
|||||||
err = asus_wmi_backlight_init(asus);
|
err = asus_wmi_backlight_init(asus);
|
||||||
if (err && err != -ENODEV)
|
if (err && err != -ENODEV)
|
||||||
goto fail_backlight;
|
goto fail_backlight;
|
||||||
} else
|
} else if (asus->driver->quirks->wmi_backlight_set_devstate)
|
||||||
err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT, 2, NULL);
|
err = asus_wmi_set_devstate(ASUS_WMI_DEVID_BACKLIGHT, 2, NULL);
|
||||||
|
|
||||||
if (asus_wmi_has_fnlock_key(asus)) {
|
if (asus_wmi_has_fnlock_key(asus)) {
|
||||||
|
@ -31,6 +31,7 @@ struct quirk_entry {
|
|||||||
bool store_backlight_power;
|
bool store_backlight_power;
|
||||||
bool wmi_backlight_power;
|
bool wmi_backlight_power;
|
||||||
bool wmi_backlight_native;
|
bool wmi_backlight_native;
|
||||||
|
bool wmi_backlight_set_devstate;
|
||||||
bool wmi_force_als_set;
|
bool wmi_force_als_set;
|
||||||
int wapf;
|
int wapf;
|
||||||
/*
|
/*
|
||||||
|
@ -76,12 +76,24 @@ static void notify_handler(acpi_handle handle, u32 event, void *context)
|
|||||||
struct platform_device *device = context;
|
struct platform_device *device = context;
|
||||||
struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev);
|
struct intel_vbtn_priv *priv = dev_get_drvdata(&device->dev);
|
||||||
unsigned int val = !(event & 1); /* Even=press, Odd=release */
|
unsigned int val = !(event & 1); /* Even=press, Odd=release */
|
||||||
const struct key_entry *ke_rel;
|
const struct key_entry *ke, *ke_rel;
|
||||||
bool autorelease;
|
bool autorelease;
|
||||||
|
|
||||||
if (priv->wakeup_mode) {
|
if (priv->wakeup_mode) {
|
||||||
if (sparse_keymap_entry_from_scancode(priv->input_dev, event)) {
|
ke = sparse_keymap_entry_from_scancode(priv->input_dev, event);
|
||||||
|
if (ke) {
|
||||||
pm_wakeup_hard_event(&device->dev);
|
pm_wakeup_hard_event(&device->dev);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Switch events like tablet mode will wake the device
|
||||||
|
* and report the new switch position to the input
|
||||||
|
* subsystem.
|
||||||
|
*/
|
||||||
|
if (ke->type == KE_SW)
|
||||||
|
sparse_keymap_report_event(priv->input_dev,
|
||||||
|
event,
|
||||||
|
val,
|
||||||
|
0);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
goto out_unknown;
|
goto out_unknown;
|
||||||
|
@ -2032,7 +2032,7 @@ static int __init mlxplat_init(void)
|
|||||||
|
|
||||||
for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
|
for (i = 0; i < ARRAY_SIZE(mlxplat_mux_data); i++) {
|
||||||
priv->pdev_mux[i] = platform_device_register_resndata(
|
priv->pdev_mux[i] = platform_device_register_resndata(
|
||||||
&mlxplat_dev->dev,
|
&priv->pdev_i2c->dev,
|
||||||
"i2c-mux-reg", i, NULL,
|
"i2c-mux-reg", i, NULL,
|
||||||
0, &mlxplat_mux_data[i],
|
0, &mlxplat_mux_data[i],
|
||||||
sizeof(mlxplat_mux_data[i]));
|
sizeof(mlxplat_mux_data[i]));
|
||||||
|
@ -2,6 +2,7 @@
|
|||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
#include <linux/gfp.h>
|
#include <linux/gfp.h>
|
||||||
#include <linux/kernel.h>
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/workqueue.h>
|
||||||
|
|
||||||
#include <asm/mce.h>
|
#include <asm/mce.h>
|
||||||
|
|
||||||
@ -123,16 +124,12 @@ static u64 dfs_pfn;
|
|||||||
/* Amount of errors after which we offline */
|
/* Amount of errors after which we offline */
|
||||||
static unsigned int count_threshold = COUNT_MASK;
|
static unsigned int count_threshold = COUNT_MASK;
|
||||||
|
|
||||||
/*
|
/* Each element "decays" each decay_interval which is 24hrs by default. */
|
||||||
* The timer "decays" element count each timer_interval which is 24hrs by
|
#define CEC_DECAY_DEFAULT_INTERVAL 24 * 60 * 60 /* 24 hrs */
|
||||||
* default.
|
#define CEC_DECAY_MIN_INTERVAL 1 * 60 * 60 /* 1h */
|
||||||
*/
|
#define CEC_DECAY_MAX_INTERVAL 30 * 24 * 60 * 60 /* one month */
|
||||||
|
static struct delayed_work cec_work;
|
||||||
#define CEC_TIMER_DEFAULT_INTERVAL 24 * 60 * 60 /* 24 hrs */
|
static u64 decay_interval = CEC_DECAY_DEFAULT_INTERVAL;
|
||||||
#define CEC_TIMER_MIN_INTERVAL 1 * 60 * 60 /* 1h */
|
|
||||||
#define CEC_TIMER_MAX_INTERVAL 30 * 24 * 60 * 60 /* one month */
|
|
||||||
static struct timer_list cec_timer;
|
|
||||||
static u64 timer_interval = CEC_TIMER_DEFAULT_INTERVAL;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Decrement decay value. We're using DECAY_BITS bits to denote decay of an
|
* Decrement decay value. We're using DECAY_BITS bits to denote decay of an
|
||||||
@ -160,20 +157,21 @@ static void do_spring_cleaning(struct ce_array *ca)
|
|||||||
/*
|
/*
|
||||||
* @interval in seconds
|
* @interval in seconds
|
||||||
*/
|
*/
|
||||||
static void cec_mod_timer(struct timer_list *t, unsigned long interval)
|
static void cec_mod_work(unsigned long interval)
|
||||||
{
|
{
|
||||||
unsigned long iv;
|
unsigned long iv;
|
||||||
|
|
||||||
iv = interval * HZ + jiffies;
|
iv = interval * HZ;
|
||||||
|
mod_delayed_work(system_wq, &cec_work, round_jiffies(iv));
|
||||||
mod_timer(t, round_jiffies(iv));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void cec_timer_fn(struct timer_list *unused)
|
static void cec_work_fn(struct work_struct *work)
|
||||||
{
|
{
|
||||||
|
mutex_lock(&ce_mutex);
|
||||||
do_spring_cleaning(&ce_arr);
|
do_spring_cleaning(&ce_arr);
|
||||||
|
mutex_unlock(&ce_mutex);
|
||||||
|
|
||||||
cec_mod_timer(&cec_timer, timer_interval);
|
cec_mod_work(decay_interval);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -183,32 +181,38 @@ static void cec_timer_fn(struct timer_list *unused)
|
|||||||
*/
|
*/
|
||||||
static int __find_elem(struct ce_array *ca, u64 pfn, unsigned int *to)
|
static int __find_elem(struct ce_array *ca, u64 pfn, unsigned int *to)
|
||||||
{
|
{
|
||||||
|
int min = 0, max = ca->n - 1;
|
||||||
u64 this_pfn;
|
u64 this_pfn;
|
||||||
int min = 0, max = ca->n;
|
|
||||||
|
|
||||||
while (min < max) {
|
while (min <= max) {
|
||||||
int tmp = (max + min) >> 1;
|
int i = (min + max) >> 1;
|
||||||
|
|
||||||
this_pfn = PFN(ca->array[tmp]);
|
this_pfn = PFN(ca->array[i]);
|
||||||
|
|
||||||
if (this_pfn < pfn)
|
if (this_pfn < pfn)
|
||||||
min = tmp + 1;
|
min = i + 1;
|
||||||
else if (this_pfn > pfn)
|
else if (this_pfn > pfn)
|
||||||
max = tmp;
|
max = i - 1;
|
||||||
else {
|
else if (this_pfn == pfn) {
|
||||||
min = tmp;
|
if (to)
|
||||||
break;
|
*to = i;
|
||||||
|
|
||||||
|
return i;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* When the loop terminates without finding @pfn, min has the index of
|
||||||
|
* the element slot where the new @pfn should be inserted. The loop
|
||||||
|
* terminates when min > max, which means the min index points to the
|
||||||
|
* bigger element while the max index to the smaller element, in-between
|
||||||
|
* which the new @pfn belongs to.
|
||||||
|
*
|
||||||
|
* For more details, see exercise 1, Section 6.2.1 in TAOCP, vol. 3.
|
||||||
|
*/
|
||||||
if (to)
|
if (to)
|
||||||
*to = min;
|
*to = min;
|
||||||
|
|
||||||
this_pfn = PFN(ca->array[min]);
|
|
||||||
|
|
||||||
if (this_pfn == pfn)
|
|
||||||
return min;
|
|
||||||
|
|
||||||
return -ENOKEY;
|
return -ENOKEY;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -374,15 +378,15 @@ static int decay_interval_set(void *data, u64 val)
|
|||||||
{
|
{
|
||||||
*(u64 *)data = val;
|
*(u64 *)data = val;
|
||||||
|
|
||||||
if (val < CEC_TIMER_MIN_INTERVAL)
|
if (val < CEC_DECAY_MIN_INTERVAL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
if (val > CEC_TIMER_MAX_INTERVAL)
|
if (val > CEC_DECAY_MAX_INTERVAL)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
timer_interval = val;
|
decay_interval = val;
|
||||||
|
|
||||||
cec_mod_timer(&cec_timer, timer_interval);
|
cec_mod_work(decay_interval);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
DEFINE_DEBUGFS_ATTRIBUTE(decay_interval_ops, u64_get, decay_interval_set, "%lld\n");
|
DEFINE_DEBUGFS_ATTRIBUTE(decay_interval_ops, u64_get, decay_interval_set, "%lld\n");
|
||||||
@ -426,7 +430,7 @@ static int array_dump(struct seq_file *m, void *v)
|
|||||||
|
|
||||||
seq_printf(m, "Flags: 0x%x\n", ca->flags);
|
seq_printf(m, "Flags: 0x%x\n", ca->flags);
|
||||||
|
|
||||||
seq_printf(m, "Timer interval: %lld seconds\n", timer_interval);
|
seq_printf(m, "Decay interval: %lld seconds\n", decay_interval);
|
||||||
seq_printf(m, "Decays: %lld\n", ca->decays_done);
|
seq_printf(m, "Decays: %lld\n", ca->decays_done);
|
||||||
|
|
||||||
seq_printf(m, "Action threshold: %d\n", count_threshold);
|
seq_printf(m, "Action threshold: %d\n", count_threshold);
|
||||||
@ -472,7 +476,7 @@ static int __init create_debugfs_nodes(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
decay = debugfs_create_file("decay_interval", S_IRUSR | S_IWUSR, d,
|
decay = debugfs_create_file("decay_interval", S_IRUSR | S_IWUSR, d,
|
||||||
&timer_interval, &decay_interval_ops);
|
&decay_interval, &decay_interval_ops);
|
||||||
if (!decay) {
|
if (!decay) {
|
||||||
pr_warn("Error creating decay_interval debugfs node!\n");
|
pr_warn("Error creating decay_interval debugfs node!\n");
|
||||||
goto err;
|
goto err;
|
||||||
@ -508,8 +512,8 @@ void __init cec_init(void)
|
|||||||
if (create_debugfs_nodes())
|
if (create_debugfs_nodes())
|
||||||
return;
|
return;
|
||||||
|
|
||||||
timer_setup(&cec_timer, cec_timer_fn, 0);
|
INIT_DELAYED_WORK(&cec_work, cec_work_fn);
|
||||||
cec_mod_timer(&cec_timer, CEC_TIMER_DEFAULT_INTERVAL);
|
schedule_delayed_work(&cec_work, CEC_DECAY_DEFAULT_INTERVAL);
|
||||||
|
|
||||||
pr_info("Correctable Errors collector initialized.\n");
|
pr_info("Correctable Errors collector initialized.\n");
|
||||||
}
|
}
|
||||||
|
@ -403,12 +403,12 @@ static int tps6507x_pmic_probe(struct platform_device *pdev)
|
|||||||
/* common for all regulators */
|
/* common for all regulators */
|
||||||
tps->mfd = tps6507x_dev;
|
tps->mfd = tps6507x_dev;
|
||||||
|
|
||||||
for (i = 0; i < TPS6507X_NUM_REGULATOR; i++, info++, init_data++) {
|
for (i = 0; i < TPS6507X_NUM_REGULATOR; i++, info++) {
|
||||||
/* Register the regulators */
|
/* Register the regulators */
|
||||||
tps->info[i] = info;
|
tps->info[i] = info;
|
||||||
if (init_data && init_data->driver_data) {
|
if (init_data && init_data[i].driver_data) {
|
||||||
struct tps6507x_reg_platform_data *data =
|
struct tps6507x_reg_platform_data *data =
|
||||||
init_data->driver_data;
|
init_data[i].driver_data;
|
||||||
info->defdcdc_default = data->defdcdc_default;
|
info->defdcdc_default = data->defdcdc_default;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user