Linux 4.1-rc5
-----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQEcBAABAgAGBQJVYnloAAoJEHm+PkMAQRiGCgkH/j3r2djOOm4h83FXrShaHORY p8TBI3FNj4fzLk2PfzqbmiDw2T2CwygB+pxb2Ac9CE99epw8qPk2SRvPXBpdKR7t lolhhwfzApLJMZbhzNLVywUCDUhFoiEWRhmPqIfA3WXFcIW3t5VNXAoIFjV5HFr6 sYUlaxSI1XiQ5tldVv8D6YSFHms41pisziBIZmzhIUg10P6Vv3D0FbE74fjAJwx0 +08zj3EO7yQMv7Aeeq8F8AJ3628142rcZf0NWF5ohlKLRK3gt0cl9jO5U4Co2dDt 29v03LP5EI6jDKkIbuWlqRMq9YxJz7N3wnkzV0EJiqXucoqPLFDqzbxB4gnS1pI= =7vbA -----END PGP SIGNATURE----- Merge branch 'linus' into x86/fpu Resolve semantic conflict in arch/x86/kvm/cpuid.c with: c447e76b4cab ("kvm/fpu: Enable eager restore kvm FPU for MPX") By removing the FPU internal include files. Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
commit
3152657f10
@ -17,7 +17,8 @@ Required properties:
|
||||
- #clock-cells: from common clock binding; shall be set to 1.
|
||||
- clocks: from common clock binding; list of parent clock
|
||||
handles, shall be xtal reference clock or xtal and clkin for
|
||||
si5351c only.
|
||||
si5351c only. Corresponding clock input names are "xtal" and
|
||||
"clkin" respectively.
|
||||
- #address-cells: shall be set to 1.
|
||||
- #size-cells: shall be set to 0.
|
||||
|
||||
@ -71,6 +72,7 @@ i2c-master-node {
|
||||
|
||||
/* connect xtal input to 25MHz reference */
|
||||
clocks = <&ref25>;
|
||||
clock-names = "xtal";
|
||||
|
||||
/* connect xtal input as source of pll0 and pll1 */
|
||||
silabs,pll-source = <0 0>, <1 0>;
|
||||
|
@ -3,7 +3,8 @@
|
||||
Required properties:
|
||||
- compatible: Should be "cdns,[<chip>-]{emac}"
|
||||
Use "cdns,at91rm9200-emac" Atmel at91rm9200 SoC.
|
||||
or the generic form: "cdns,emac".
|
||||
Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC.
|
||||
Or the generic form: "cdns,emac".
|
||||
- reg: Address and length of the register set for the device
|
||||
- interrupts: Should contain macb interrupt
|
||||
- phy-mode: see ethernet.txt file in the same directory.
|
||||
|
@ -169,6 +169,10 @@ Shadow pages contain the following information:
|
||||
Contains the value of cr4.smep && !cr0.wp for which the page is valid
|
||||
(pages for which this is true are different from other pages; see the
|
||||
treatment of cr0.wp=0 below).
|
||||
role.smap_andnot_wp:
|
||||
Contains the value of cr4.smap && !cr0.wp for which the page is valid
|
||||
(pages for which this is true are different from other pages; see the
|
||||
treatment of cr0.wp=0 below).
|
||||
gfn:
|
||||
Either the guest page table containing the translations shadowed by this
|
||||
page, or the base page frame for linear translations. See role.direct.
|
||||
@ -344,10 +348,16 @@ on fault type:
|
||||
|
||||
(user write faults generate a #PF)
|
||||
|
||||
In the first case there is an additional complication if CR4.SMEP is
|
||||
enabled: since we've turned the page into a kernel page, the kernel may now
|
||||
execute it. We handle this by also setting spte.nx. If we get a user
|
||||
fetch or read fault, we'll change spte.u=1 and spte.nx=gpte.nx back.
|
||||
In the first case there are two additional complications:
|
||||
- if CR4.SMEP is enabled: since we've turned the page into a kernel page,
|
||||
the kernel may now execute it. We handle this by also setting spte.nx.
|
||||
If we get a user fetch or read fault, we'll change spte.u=1 and
|
||||
spte.nx=gpte.nx back.
|
||||
- if CR4.SMAP is disabled: since the page has been changed to a kernel
|
||||
page, it can not be reused when CR4.SMAP is enabled. We set
|
||||
CR4.SMAP && !CR0.WP into shadow page's role to avoid this case. Note,
|
||||
here we do not care the case that CR4.SMAP is enabled since KVM will
|
||||
directly inject #PF to guest due to failed permission check.
|
||||
|
||||
To prevent an spte that was converted into a kernel page with cr0.wp=0
|
||||
from being written by the kernel after cr0.wp has changed to 1, we make
|
||||
|
15
MAINTAINERS
15
MAINTAINERS
@ -3825,10 +3825,11 @@ M: David Woodhouse <dwmw2@infradead.org>
|
||||
L: linux-embedded@vger.kernel.org
|
||||
S: Maintained
|
||||
|
||||
EMULEX LPFC FC SCSI DRIVER
|
||||
M: James Smart <james.smart@emulex.com>
|
||||
EMULEX/AVAGO LPFC FC/FCOE SCSI DRIVER
|
||||
M: James Smart <james.smart@avagotech.com>
|
||||
M: Dick Kennedy <dick.kennedy@avagotech.com>
|
||||
L: linux-scsi@vger.kernel.org
|
||||
W: http://sourceforge.net/projects/lpfcxxxx
|
||||
W: http://www.avagotech.com
|
||||
S: Supported
|
||||
F: drivers/scsi/lpfc/
|
||||
|
||||
@ -4536,7 +4537,7 @@ M: Jean Delvare <jdelvare@suse.de>
|
||||
M: Guenter Roeck <linux@roeck-us.net>
|
||||
L: lm-sensors@lm-sensors.org
|
||||
W: http://www.lm-sensors.org/
|
||||
T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/
|
||||
T: quilt http://jdelvare.nerim.net/devel/linux/jdelvare-hwmon/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
|
||||
S: Maintained
|
||||
F: Documentation/hwmon/
|
||||
@ -8829,9 +8830,11 @@ F: drivers/misc/phantom.c
|
||||
F: include/uapi/linux/phantom.h
|
||||
|
||||
SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER
|
||||
M: Jayamohan Kallickal <jayamohan.kallickal@emulex.com>
|
||||
M: Jayamohan Kallickal <jayamohan.kallickal@avagotech.com>
|
||||
M: Minh Tran <minh.tran@avagotech.com>
|
||||
M: John Soni Jose <sony.john-n@avagotech.com>
|
||||
L: linux-scsi@vger.kernel.org
|
||||
W: http://www.emulex.com
|
||||
W: http://www.avagotech.com
|
||||
S: Supported
|
||||
F: drivers/scsi/be2iscsi/
|
||||
|
||||
|
2
Makefile
2
Makefile
@ -1,7 +1,7 @@
|
||||
VERSION = 4
|
||||
PATCHLEVEL = 1
|
||||
SUBLEVEL = 0
|
||||
EXTRAVERSION = -rc4
|
||||
EXTRAVERSION = -rc5
|
||||
NAME = Hurr durr I'ma sheep
|
||||
|
||||
# *DOCUMENTATION*
|
||||
|
@ -193,7 +193,7 @@
|
||||
};
|
||||
|
||||
gem0: ethernet@e000b000 {
|
||||
compatible = "cdns,gem";
|
||||
compatible = "cdns,zynq-gem";
|
||||
reg = <0xe000b000 0x1000>;
|
||||
status = "disabled";
|
||||
interrupts = <0 22 4>;
|
||||
@ -204,7 +204,7 @@
|
||||
};
|
||||
|
||||
gem1: ethernet@e000c000 {
|
||||
compatible = "cdns,gem";
|
||||
compatible = "cdns,zynq-gem";
|
||||
reg = <0xe000c000 0x1000>;
|
||||
status = "disabled";
|
||||
interrupts = <0 45 4>;
|
||||
|
@ -272,6 +272,7 @@ void xen_arch_pre_suspend(void) { }
|
||||
void xen_arch_post_suspend(int suspend_cancelled) { }
|
||||
void xen_timer_resume(void) { }
|
||||
void xen_arch_resume(void) { }
|
||||
void xen_arch_suspend(void) { }
|
||||
|
||||
|
||||
/* In the hypervisor.S file. */
|
||||
|
@ -73,7 +73,7 @@ void save_mce_event(struct pt_regs *regs, long handled,
|
||||
uint64_t nip, uint64_t addr)
|
||||
{
|
||||
uint64_t srr1;
|
||||
int index = __this_cpu_inc_return(mce_nest_count);
|
||||
int index = __this_cpu_inc_return(mce_nest_count) - 1;
|
||||
struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]);
|
||||
|
||||
/*
|
||||
@ -184,7 +184,7 @@ void machine_check_queue_event(void)
|
||||
if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
|
||||
return;
|
||||
|
||||
index = __this_cpu_inc_return(mce_queue_count);
|
||||
index = __this_cpu_inc_return(mce_queue_count) - 1;
|
||||
/* If queue is full, just return for now. */
|
||||
if (index >= MAX_MC_EVT) {
|
||||
__this_cpu_dec(mce_queue_count);
|
||||
|
@ -213,6 +213,7 @@ SECTIONS
|
||||
*(.opd)
|
||||
}
|
||||
|
||||
. = ALIGN(256);
|
||||
.got : AT(ADDR(.got) - LOAD_OFFSET) {
|
||||
__toc_start = .;
|
||||
#ifndef CONFIG_RELOCATABLE
|
||||
|
@ -1952,7 +1952,7 @@ static void post_guest_process(struct kvmppc_vcore *vc)
|
||||
*/
|
||||
static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
struct kvm_vcpu *vcpu, *vnext;
|
||||
int i;
|
||||
int srcu_idx;
|
||||
|
||||
@ -1982,7 +1982,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
|
||||
*/
|
||||
if ((threads_per_core > 1) &&
|
||||
((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
|
||||
list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
|
||||
list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
|
||||
arch.run_list) {
|
||||
vcpu->arch.ret = -EBUSY;
|
||||
kvmppc_remove_runnable(vc, vcpu);
|
||||
wake_up(&vcpu->arch.cpu_run);
|
||||
|
@ -689,27 +689,34 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
|
||||
struct page *
|
||||
follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
|
||||
{
|
||||
pte_t *ptep;
|
||||
struct page *page;
|
||||
pte_t *ptep, pte;
|
||||
unsigned shift;
|
||||
unsigned long mask, flags;
|
||||
struct page *page = ERR_PTR(-EINVAL);
|
||||
|
||||
local_irq_save(flags);
|
||||
ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
|
||||
if (!ptep)
|
||||
goto no_page;
|
||||
pte = READ_ONCE(*ptep);
|
||||
/*
|
||||
* Verify it is a huge page else bail.
|
||||
* Transparent hugepages are handled by generic code. We can skip them
|
||||
* here.
|
||||
*/
|
||||
local_irq_save(flags);
|
||||
ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
|
||||
if (!shift || pmd_trans_huge(__pmd(pte_val(pte))))
|
||||
goto no_page;
|
||||
|
||||
/* Verify it is a huge page else bail. */
|
||||
if (!ptep || !shift || pmd_trans_huge(*(pmd_t *)ptep)) {
|
||||
local_irq_restore(flags);
|
||||
return ERR_PTR(-EINVAL);
|
||||
if (!pte_present(pte)) {
|
||||
page = NULL;
|
||||
goto no_page;
|
||||
}
|
||||
mask = (1UL << shift) - 1;
|
||||
page = pte_page(*ptep);
|
||||
page = pte_page(pte);
|
||||
if (page)
|
||||
page += (address & mask) / PAGE_SIZE;
|
||||
|
||||
no_page:
|
||||
local_irq_restore(flags);
|
||||
return page;
|
||||
}
|
||||
|
@ -839,6 +839,17 @@ pmd_t pmdp_get_and_clear(struct mm_struct *mm,
|
||||
* hash fault look at them.
|
||||
*/
|
||||
memset(pgtable, 0, PTE_FRAG_SIZE);
|
||||
/*
|
||||
* Serialize against find_linux_pte_or_hugepte which does lock-less
|
||||
* lookup in page tables with local interrupts disabled. For huge pages
|
||||
* it casts pmd_t to pte_t. Since format of pte_t is different from
|
||||
* pmd_t we want to prevent transit from pmd pointing to page table
|
||||
* to pmd pointing to huge page (and back) while interrupts are disabled.
|
||||
* We clear pmd to possibly replace it with page table pointer in
|
||||
* different code paths. So make sure we wait for the parallel
|
||||
* find_linux_pte_or_hugepage to finish.
|
||||
*/
|
||||
kick_all_cpus_sync();
|
||||
return old_pmd;
|
||||
}
|
||||
|
||||
|
@ -16,11 +16,12 @@
|
||||
#define GHASH_DIGEST_SIZE 16
|
||||
|
||||
struct ghash_ctx {
|
||||
u8 icv[16];
|
||||
u8 key[16];
|
||||
u8 key[GHASH_BLOCK_SIZE];
|
||||
};
|
||||
|
||||
struct ghash_desc_ctx {
|
||||
u8 icv[GHASH_BLOCK_SIZE];
|
||||
u8 key[GHASH_BLOCK_SIZE];
|
||||
u8 buffer[GHASH_BLOCK_SIZE];
|
||||
u32 bytes;
|
||||
};
|
||||
@ -28,8 +29,10 @@ struct ghash_desc_ctx {
|
||||
static int ghash_init(struct shash_desc *desc)
|
||||
{
|
||||
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
|
||||
|
||||
memset(dctx, 0, sizeof(*dctx));
|
||||
memcpy(dctx->key, ctx->key, GHASH_BLOCK_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -45,7 +48,6 @@ static int ghash_setkey(struct crypto_shash *tfm,
|
||||
}
|
||||
|
||||
memcpy(ctx->key, key, GHASH_BLOCK_SIZE);
|
||||
memset(ctx->icv, 0, GHASH_BLOCK_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -54,7 +56,6 @@ static int ghash_update(struct shash_desc *desc,
|
||||
const u8 *src, unsigned int srclen)
|
||||
{
|
||||
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
|
||||
unsigned int n;
|
||||
u8 *buf = dctx->buffer;
|
||||
int ret;
|
||||
@ -70,7 +71,7 @@ static int ghash_update(struct shash_desc *desc,
|
||||
src += n;
|
||||
|
||||
if (!dctx->bytes) {
|
||||
ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf,
|
||||
ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf,
|
||||
GHASH_BLOCK_SIZE);
|
||||
if (ret != GHASH_BLOCK_SIZE)
|
||||
return -EIO;
|
||||
@ -79,7 +80,7 @@ static int ghash_update(struct shash_desc *desc,
|
||||
|
||||
n = srclen & ~(GHASH_BLOCK_SIZE - 1);
|
||||
if (n) {
|
||||
ret = crypt_s390_kimd(KIMD_GHASH, ctx, src, n);
|
||||
ret = crypt_s390_kimd(KIMD_GHASH, dctx, src, n);
|
||||
if (ret != n)
|
||||
return -EIO;
|
||||
src += n;
|
||||
@ -94,7 +95,7 @@ static int ghash_update(struct shash_desc *desc,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
|
||||
static int ghash_flush(struct ghash_desc_ctx *dctx)
|
||||
{
|
||||
u8 *buf = dctx->buffer;
|
||||
int ret;
|
||||
@ -104,24 +105,24 @@ static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
|
||||
|
||||
memset(pos, 0, dctx->bytes);
|
||||
|
||||
ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, GHASH_BLOCK_SIZE);
|
||||
ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE);
|
||||
if (ret != GHASH_BLOCK_SIZE)
|
||||
return -EIO;
|
||||
|
||||
dctx->bytes = 0;
|
||||
}
|
||||
|
||||
dctx->bytes = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int ghash_final(struct shash_desc *desc, u8 *dst)
|
||||
{
|
||||
struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
|
||||
struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
|
||||
int ret;
|
||||
|
||||
ret = ghash_flush(ctx, dctx);
|
||||
ret = ghash_flush(dctx);
|
||||
if (!ret)
|
||||
memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE);
|
||||
memcpy(dst, dctx->icv, GHASH_BLOCK_SIZE);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -125,7 +125,7 @@ static int generate_entropy(u8 *ebuf, size_t nbytes)
|
||||
/* fill page with urandom bytes */
|
||||
get_random_bytes(pg, PAGE_SIZE);
|
||||
/* exor page with stckf values */
|
||||
for (n = 0; n < sizeof(PAGE_SIZE/sizeof(u64)); n++) {
|
||||
for (n = 0; n < PAGE_SIZE / sizeof(u64); n++) {
|
||||
u64 *p = ((u64 *)pg) + n;
|
||||
*p ^= get_tod_clock_fast();
|
||||
}
|
||||
|
@ -494,7 +494,7 @@ static inline int pmd_large(pmd_t pmd)
|
||||
return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
|
||||
}
|
||||
|
||||
static inline int pmd_pfn(pmd_t pmd)
|
||||
static inline unsigned long pmd_pfn(pmd_t pmd)
|
||||
{
|
||||
unsigned long origin_mask;
|
||||
|
||||
|
@ -443,8 +443,11 @@ static void bpf_jit_epilogue(struct bpf_jit *jit)
|
||||
|
||||
/*
|
||||
* Compile one eBPF instruction into s390x code
|
||||
*
|
||||
* NOTE: Use noinline because for gcov (-fprofile-arcs) gcc allocates a lot of
|
||||
* stack space for the large switch statement.
|
||||
*/
|
||||
static int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
|
||||
static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
|
||||
{
|
||||
struct bpf_insn *insn = &fp->insnsi[i];
|
||||
int jmp_off, last, insn_count = 1;
|
||||
@ -588,8 +591,8 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
|
||||
EMIT4(0xb9160000, dst_reg, rc_reg);
|
||||
break;
|
||||
}
|
||||
case BPF_ALU64 | BPF_DIV | BPF_X: /* dst = dst / (u32) src */
|
||||
case BPF_ALU64 | BPF_MOD | BPF_X: /* dst = dst % (u32) src */
|
||||
case BPF_ALU64 | BPF_DIV | BPF_X: /* dst = dst / src */
|
||||
case BPF_ALU64 | BPF_MOD | BPF_X: /* dst = dst % src */
|
||||
{
|
||||
int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
|
||||
|
||||
@ -602,10 +605,8 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
|
||||
EMIT4_IMM(0xa7090000, REG_W0, 0);
|
||||
/* lgr %w1,%dst */
|
||||
EMIT4(0xb9040000, REG_W1, dst_reg);
|
||||
/* llgfr %dst,%src (u32 cast) */
|
||||
EMIT4(0xb9160000, dst_reg, src_reg);
|
||||
/* dlgr %w0,%dst */
|
||||
EMIT4(0xb9870000, REG_W0, dst_reg);
|
||||
EMIT4(0xb9870000, REG_W0, src_reg);
|
||||
/* lgr %dst,%rc */
|
||||
EMIT4(0xb9040000, dst_reg, rc_reg);
|
||||
break;
|
||||
@ -632,8 +633,8 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
|
||||
EMIT4(0xb9160000, dst_reg, rc_reg);
|
||||
break;
|
||||
}
|
||||
case BPF_ALU64 | BPF_DIV | BPF_K: /* dst = dst / (u32) imm */
|
||||
case BPF_ALU64 | BPF_MOD | BPF_K: /* dst = dst % (u32) imm */
|
||||
case BPF_ALU64 | BPF_DIV | BPF_K: /* dst = dst / imm */
|
||||
case BPF_ALU64 | BPF_MOD | BPF_K: /* dst = dst % imm */
|
||||
{
|
||||
int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
|
||||
|
||||
@ -649,7 +650,7 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
|
||||
EMIT4(0xb9040000, REG_W1, dst_reg);
|
||||
/* dlg %w0,<d(imm)>(%l) */
|
||||
EMIT6_DISP_LH(0xe3000000, 0x0087, REG_W0, REG_0, REG_L,
|
||||
EMIT_CONST_U64((u32) imm));
|
||||
EMIT_CONST_U64(imm));
|
||||
/* lgr %dst,%rc */
|
||||
EMIT4(0xb9040000, dst_reg, rc_reg);
|
||||
break;
|
||||
|
@ -207,6 +207,7 @@ union kvm_mmu_page_role {
|
||||
unsigned nxe:1;
|
||||
unsigned cr0_wp:1;
|
||||
unsigned smep_andnot_wp:1;
|
||||
unsigned smap_andnot_wp:1;
|
||||
};
|
||||
};
|
||||
|
||||
@ -400,6 +401,7 @@ struct kvm_vcpu_arch {
|
||||
struct kvm_mmu_memory_cache mmu_page_header_cache;
|
||||
|
||||
struct fpu guest_fpu;
|
||||
bool eager_fpu;
|
||||
u64 xcr0;
|
||||
u64 guest_supported_xcr0;
|
||||
u32 guest_xstate_size;
|
||||
@ -743,6 +745,7 @@ struct kvm_x86_ops {
|
||||
void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
|
||||
unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
|
||||
void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
|
||||
void (*fpu_activate)(struct kvm_vcpu *vcpu);
|
||||
void (*fpu_deactivate)(struct kvm_vcpu *vcpu);
|
||||
|
||||
void (*tlb_flush)(struct kvm_vcpu *vcpu);
|
||||
|
@ -95,6 +95,8 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
|
||||
if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
|
||||
best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
|
||||
|
||||
vcpu->arch.eager_fpu = guest_cpuid_has_mpx(vcpu);
|
||||
|
||||
/*
|
||||
* The existing code assumes virtual address is 48-bit in the canonical
|
||||
* address checks; exit if it is ever changed.
|
||||
|
@ -117,4 +117,12 @@ static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
|
||||
best = kvm_find_cpuid_entry(vcpu, 7, 0);
|
||||
return best && (best->ebx & bit(X86_FEATURE_RTM));
|
||||
}
|
||||
|
||||
static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
struct kvm_cpuid_entry2 *best;
|
||||
|
||||
best = kvm_find_cpuid_entry(vcpu, 7, 0);
|
||||
return best && (best->ebx & bit(X86_FEATURE_MPX));
|
||||
}
|
||||
#endif
|
||||
|
@ -3736,8 +3736,8 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
|
||||
}
|
||||
}
|
||||
|
||||
void update_permission_bitmask(struct kvm_vcpu *vcpu,
|
||||
struct kvm_mmu *mmu, bool ept)
|
||||
static void update_permission_bitmask(struct kvm_vcpu *vcpu,
|
||||
struct kvm_mmu *mmu, bool ept)
|
||||
{
|
||||
unsigned bit, byte, pfec;
|
||||
u8 map;
|
||||
@ -3918,6 +3918,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
|
||||
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
|
||||
bool smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
|
||||
struct kvm_mmu *context = &vcpu->arch.mmu;
|
||||
|
||||
MMU_WARN_ON(VALID_PAGE(context->root_hpa));
|
||||
@ -3936,6 +3937,8 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
|
||||
context->base_role.cr0_wp = is_write_protection(vcpu);
|
||||
context->base_role.smep_andnot_wp
|
||||
= smep && !is_write_protection(vcpu);
|
||||
context->base_role.smap_andnot_wp
|
||||
= smap && !is_write_protection(vcpu);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
|
||||
|
||||
@ -4207,12 +4210,18 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
|
||||
const u8 *new, int bytes)
|
||||
{
|
||||
gfn_t gfn = gpa >> PAGE_SHIFT;
|
||||
union kvm_mmu_page_role mask = { .word = 0 };
|
||||
struct kvm_mmu_page *sp;
|
||||
LIST_HEAD(invalid_list);
|
||||
u64 entry, gentry, *spte;
|
||||
int npte;
|
||||
bool remote_flush, local_flush, zap_page;
|
||||
union kvm_mmu_page_role mask = (union kvm_mmu_page_role) {
|
||||
.cr0_wp = 1,
|
||||
.cr4_pae = 1,
|
||||
.nxe = 1,
|
||||
.smep_andnot_wp = 1,
|
||||
.smap_andnot_wp = 1,
|
||||
};
|
||||
|
||||
/*
|
||||
* If we don't have indirect shadow pages, it means no page is
|
||||
@ -4238,7 +4247,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
|
||||
++vcpu->kvm->stat.mmu_pte_write;
|
||||
kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
|
||||
|
||||
mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
|
||||
for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
|
||||
if (detect_write_misaligned(sp, gpa, bytes) ||
|
||||
detect_write_flooding(sp)) {
|
||||
|
@ -71,8 +71,6 @@ enum {
|
||||
int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
|
||||
void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
|
||||
void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly);
|
||||
void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
|
||||
bool ept);
|
||||
|
||||
static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
|
||||
{
|
||||
@ -166,6 +164,8 @@ static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
|
||||
int index = (pfec >> 1) +
|
||||
(smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
|
||||
|
||||
WARN_ON(pfec & PFERR_RSVD_MASK);
|
||||
|
||||
return (mmu->permissions[index] >> pte_access) & 1;
|
||||
}
|
||||
|
||||
|
@ -718,6 +718,13 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
|
||||
mmu_is_nested(vcpu));
|
||||
if (likely(r != RET_MMIO_PF_INVALID))
|
||||
return r;
|
||||
|
||||
/*
|
||||
* page fault with PFEC.RSVD = 1 is caused by shadow
|
||||
* page fault, should not be used to walk guest page
|
||||
* table.
|
||||
*/
|
||||
error_code &= ~PFERR_RSVD_MASK;
|
||||
};
|
||||
|
||||
r = mmu_topup_memory_caches(vcpu);
|
||||
|
@ -4381,6 +4381,7 @@ static struct kvm_x86_ops svm_x86_ops = {
|
||||
.cache_reg = svm_cache_reg,
|
||||
.get_rflags = svm_get_rflags,
|
||||
.set_rflags = svm_set_rflags,
|
||||
.fpu_activate = svm_fpu_activate,
|
||||
.fpu_deactivate = svm_fpu_deactivate,
|
||||
|
||||
.tlb_flush = svm_flush_tlb,
|
||||
|
@ -10184,6 +10184,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
|
||||
.cache_reg = vmx_cache_reg,
|
||||
.get_rflags = vmx_get_rflags,
|
||||
.set_rflags = vmx_set_rflags,
|
||||
.fpu_activate = vmx_fpu_activate,
|
||||
.fpu_deactivate = vmx_fpu_deactivate,
|
||||
|
||||
.tlb_flush = vmx_flush_tlb,
|
||||
|
@ -701,8 +701,9 @@ EXPORT_SYMBOL_GPL(kvm_set_xcr);
|
||||
int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
||||
{
|
||||
unsigned long old_cr4 = kvm_read_cr4(vcpu);
|
||||
unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE |
|
||||
X86_CR4_PAE | X86_CR4_SMEP;
|
||||
unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
|
||||
X86_CR4_SMEP | X86_CR4_SMAP;
|
||||
|
||||
if (cr4 & CR4_RESERVED_BITS)
|
||||
return 1;
|
||||
|
||||
@ -743,9 +744,6 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
|
||||
(!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
|
||||
kvm_mmu_reset_context(vcpu);
|
||||
|
||||
if ((cr4 ^ old_cr4) & X86_CR4_SMAP)
|
||||
update_permission_bitmask(vcpu, vcpu->arch.walk_mmu, false);
|
||||
|
||||
if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
|
||||
kvm_update_cpuid(vcpu);
|
||||
|
||||
@ -6196,6 +6194,8 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
|
||||
return;
|
||||
|
||||
page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
|
||||
if (is_error_page(page))
|
||||
return;
|
||||
kvm_x86_ops->set_apic_access_page_addr(vcpu, page_to_phys(page));
|
||||
|
||||
/*
|
||||
@ -7045,7 +7045,9 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
|
||||
copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
|
||||
__kernel_fpu_end();
|
||||
++vcpu->stat.fpu_reload;
|
||||
kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
|
||||
if (!vcpu->arch.eager_fpu)
|
||||
kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
|
||||
|
||||
trace_kvm_fpu(0);
|
||||
}
|
||||
|
||||
@ -7060,11 +7062,21 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
|
||||
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
|
||||
unsigned int id)
|
||||
{
|
||||
struct kvm_vcpu *vcpu;
|
||||
|
||||
if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
|
||||
printk_once(KERN_WARNING
|
||||
"kvm: SMP vm created on host with unstable TSC; "
|
||||
"guest TSC will not be reliable\n");
|
||||
return kvm_x86_ops->vcpu_create(kvm, id);
|
||||
|
||||
vcpu = kvm_x86_ops->vcpu_create(kvm, id);
|
||||
|
||||
/*
|
||||
* Activate fpu unconditionally in case the guest needs eager FPU. It will be
|
||||
* deactivated soon if it doesn't.
|
||||
*/
|
||||
kvm_x86_ops->fpu_activate(vcpu);
|
||||
return vcpu;
|
||||
}
|
||||
|
||||
int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
|
||||
|
@ -734,6 +734,8 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
|
||||
}
|
||||
EXPORT_SYMBOL(blk_init_queue_node);
|
||||
|
||||
static void blk_queue_bio(struct request_queue *q, struct bio *bio);
|
||||
|
||||
struct request_queue *
|
||||
blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
|
||||
spinlock_t *lock)
|
||||
@ -1578,7 +1580,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
|
||||
blk_rq_bio_prep(req->q, req, bio);
|
||||
}
|
||||
|
||||
void blk_queue_bio(struct request_queue *q, struct bio *bio)
|
||||
static void blk_queue_bio(struct request_queue *q, struct bio *bio)
|
||||
{
|
||||
const bool sync = !!(bio->bi_rw & REQ_SYNC);
|
||||
struct blk_plug *plug;
|
||||
@ -1686,7 +1688,6 @@ out_unlock:
|
||||
spin_unlock_irq(q->queue_lock);
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(blk_queue_bio); /* for device mapper only */
|
||||
|
||||
/*
|
||||
* If bio->bi_dev is a partition, remap the location
|
||||
|
@ -33,7 +33,7 @@ struct aead_ctx {
|
||||
/*
|
||||
* RSGL_MAX_ENTRIES is an artificial limit where user space at maximum
|
||||
* can cause the kernel to allocate RSGL_MAX_ENTRIES * ALG_MAX_PAGES
|
||||
* bytes
|
||||
* pages
|
||||
*/
|
||||
#define RSGL_MAX_ENTRIES ALG_MAX_PAGES
|
||||
struct af_alg_sgl rsgl[RSGL_MAX_ENTRIES];
|
||||
@ -435,11 +435,10 @@ static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored,
|
||||
if (err < 0)
|
||||
goto unlock;
|
||||
usedpages += err;
|
||||
/* chain the new scatterlist with initial list */
|
||||
/* chain the new scatterlist with previous one */
|
||||
if (cnt)
|
||||
scatterwalk_crypto_chain(ctx->rsgl[0].sg,
|
||||
ctx->rsgl[cnt].sg, 1,
|
||||
sg_nents(ctx->rsgl[cnt-1].sg));
|
||||
af_alg_link_sg(&ctx->rsgl[cnt-1], &ctx->rsgl[cnt]);
|
||||
|
||||
/* we do not need more iovecs as we have sufficient memory */
|
||||
if (outlen <= usedpages)
|
||||
break;
|
||||
|
@ -2257,7 +2257,8 @@ static int nvme_trans_inquiry(struct nvme_ns *ns, struct sg_io_hdr *hdr,
|
||||
page_code = GET_INQ_PAGE_CODE(cmd);
|
||||
alloc_len = GET_INQ_ALLOC_LENGTH(cmd);
|
||||
|
||||
inq_response = kmalloc(alloc_len, GFP_KERNEL);
|
||||
inq_response = kmalloc(max(alloc_len, STANDARD_INQUIRY_LENGTH),
|
||||
GFP_KERNEL);
|
||||
if (inq_response == NULL) {
|
||||
res = -ENOMEM;
|
||||
goto out_mem;
|
||||
|
@ -88,6 +88,7 @@ static const struct usb_device_id ath3k_table[] = {
|
||||
{ USB_DEVICE(0x04CA, 0x3007) },
|
||||
{ USB_DEVICE(0x04CA, 0x3008) },
|
||||
{ USB_DEVICE(0x04CA, 0x300b) },
|
||||
{ USB_DEVICE(0x04CA, 0x300f) },
|
||||
{ USB_DEVICE(0x04CA, 0x3010) },
|
||||
{ USB_DEVICE(0x0930, 0x0219) },
|
||||
{ USB_DEVICE(0x0930, 0x0220) },
|
||||
@ -104,6 +105,7 @@ static const struct usb_device_id ath3k_table[] = {
|
||||
{ USB_DEVICE(0x0cf3, 0xe003) },
|
||||
{ USB_DEVICE(0x0CF3, 0xE004) },
|
||||
{ USB_DEVICE(0x0CF3, 0xE005) },
|
||||
{ USB_DEVICE(0x0CF3, 0xE006) },
|
||||
{ USB_DEVICE(0x13d3, 0x3362) },
|
||||
{ USB_DEVICE(0x13d3, 0x3375) },
|
||||
{ USB_DEVICE(0x13d3, 0x3393) },
|
||||
@ -143,6 +145,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
|
||||
{ USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
|
||||
@ -158,6 +161,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
|
||||
{ USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0xe006), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
|
||||
|
@ -186,6 +186,7 @@ static const struct usb_device_id blacklist_table[] = {
|
||||
{ USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
|
||||
@ -202,6 +203,7 @@ static const struct usb_device_id blacklist_table[] = {
|
||||
{ USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0xe006), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
|
||||
@ -218,6 +220,7 @@ static const struct usb_device_id blacklist_table[] = {
|
||||
{ USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
|
||||
|
||||
/* QCA ROME chipset */
|
||||
{ USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME },
|
||||
{ USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME },
|
||||
{ USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME },
|
||||
|
||||
|
@ -1128,13 +1128,6 @@ static int si5351_dt_parse(struct i2c_client *client,
|
||||
if (!pdata)
|
||||
return -ENOMEM;
|
||||
|
||||
pdata->clk_xtal = of_clk_get(np, 0);
|
||||
if (!IS_ERR(pdata->clk_xtal))
|
||||
clk_put(pdata->clk_xtal);
|
||||
pdata->clk_clkin = of_clk_get(np, 1);
|
||||
if (!IS_ERR(pdata->clk_clkin))
|
||||
clk_put(pdata->clk_clkin);
|
||||
|
||||
/*
|
||||
* property silabs,pll-source : <num src>, [<..>]
|
||||
* allow to selectively set pll source
|
||||
@ -1328,8 +1321,22 @@ static int si5351_i2c_probe(struct i2c_client *client,
|
||||
i2c_set_clientdata(client, drvdata);
|
||||
drvdata->client = client;
|
||||
drvdata->variant = variant;
|
||||
drvdata->pxtal = pdata->clk_xtal;
|
||||
drvdata->pclkin = pdata->clk_clkin;
|
||||
drvdata->pxtal = devm_clk_get(&client->dev, "xtal");
|
||||
drvdata->pclkin = devm_clk_get(&client->dev, "clkin");
|
||||
|
||||
if (PTR_ERR(drvdata->pxtal) == -EPROBE_DEFER ||
|
||||
PTR_ERR(drvdata->pclkin) == -EPROBE_DEFER)
|
||||
return -EPROBE_DEFER;
|
||||
|
||||
/*
|
||||
* Check for valid parent clock: VARIANT_A and VARIANT_B need XTAL,
|
||||
* VARIANT_C can have CLKIN instead.
|
||||
*/
|
||||
if (IS_ERR(drvdata->pxtal) &&
|
||||
(drvdata->variant != SI5351_VARIANT_C || IS_ERR(drvdata->pclkin))) {
|
||||
dev_err(&client->dev, "missing parent clock\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
drvdata->regmap = devm_regmap_init_i2c(client, &si5351_regmap_config);
|
||||
if (IS_ERR(drvdata->regmap)) {
|
||||
@ -1393,6 +1400,11 @@ static int si5351_i2c_probe(struct i2c_client *client,
|
||||
}
|
||||
}
|
||||
|
||||
if (!IS_ERR(drvdata->pxtal))
|
||||
clk_prepare_enable(drvdata->pxtal);
|
||||
if (!IS_ERR(drvdata->pclkin))
|
||||
clk_prepare_enable(drvdata->pclkin);
|
||||
|
||||
/* register xtal input clock gate */
|
||||
memset(&init, 0, sizeof(init));
|
||||
init.name = si5351_input_names[0];
|
||||
@ -1407,7 +1419,8 @@ static int si5351_i2c_probe(struct i2c_client *client,
|
||||
clk = devm_clk_register(&client->dev, &drvdata->xtal);
|
||||
if (IS_ERR(clk)) {
|
||||
dev_err(&client->dev, "unable to register %s\n", init.name);
|
||||
return PTR_ERR(clk);
|
||||
ret = PTR_ERR(clk);
|
||||
goto err_clk;
|
||||
}
|
||||
|
||||
/* register clkin input clock gate */
|
||||
@ -1425,7 +1438,8 @@ static int si5351_i2c_probe(struct i2c_client *client,
|
||||
if (IS_ERR(clk)) {
|
||||
dev_err(&client->dev, "unable to register %s\n",
|
||||
init.name);
|
||||
return PTR_ERR(clk);
|
||||
ret = PTR_ERR(clk);
|
||||
goto err_clk;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1447,7 +1461,8 @@ static int si5351_i2c_probe(struct i2c_client *client,
|
||||
clk = devm_clk_register(&client->dev, &drvdata->pll[0].hw);
|
||||
if (IS_ERR(clk)) {
|
||||
dev_err(&client->dev, "unable to register %s\n", init.name);
|
||||
return -EINVAL;
|
||||
ret = PTR_ERR(clk);
|
||||
goto err_clk;
|
||||
}
|
||||
|
||||
/* register PLLB or VXCO (Si5351B) */
|
||||
@ -1471,7 +1486,8 @@ static int si5351_i2c_probe(struct i2c_client *client,
|
||||
clk = devm_clk_register(&client->dev, &drvdata->pll[1].hw);
|
||||
if (IS_ERR(clk)) {
|
||||
dev_err(&client->dev, "unable to register %s\n", init.name);
|
||||
return -EINVAL;
|
||||
ret = PTR_ERR(clk);
|
||||
goto err_clk;
|
||||
}
|
||||
|
||||
/* register clk multisync and clk out divider */
|
||||
@ -1492,8 +1508,10 @@ static int si5351_i2c_probe(struct i2c_client *client,
|
||||
num_clocks * sizeof(*drvdata->onecell.clks), GFP_KERNEL);
|
||||
|
||||
if (WARN_ON(!drvdata->msynth || !drvdata->clkout ||
|
||||
!drvdata->onecell.clks))
|
||||
return -ENOMEM;
|
||||
!drvdata->onecell.clks)) {
|
||||
ret = -ENOMEM;
|
||||
goto err_clk;
|
||||
}
|
||||
|
||||
for (n = 0; n < num_clocks; n++) {
|
||||
drvdata->msynth[n].num = n;
|
||||
@ -1511,7 +1529,8 @@ static int si5351_i2c_probe(struct i2c_client *client,
|
||||
if (IS_ERR(clk)) {
|
||||
dev_err(&client->dev, "unable to register %s\n",
|
||||
init.name);
|
||||
return -EINVAL;
|
||||
ret = PTR_ERR(clk);
|
||||
goto err_clk;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1538,7 +1557,8 @@ static int si5351_i2c_probe(struct i2c_client *client,
|
||||
if (IS_ERR(clk)) {
|
||||
dev_err(&client->dev, "unable to register %s\n",
|
||||
init.name);
|
||||
return -EINVAL;
|
||||
ret = PTR_ERR(clk);
|
||||
goto err_clk;
|
||||
}
|
||||
drvdata->onecell.clks[n] = clk;
|
||||
|
||||
@ -1557,10 +1577,17 @@ static int si5351_i2c_probe(struct i2c_client *client,
|
||||
&drvdata->onecell);
|
||||
if (ret) {
|
||||
dev_err(&client->dev, "unable to add clk provider\n");
|
||||
return ret;
|
||||
goto err_clk;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
err_clk:
|
||||
if (!IS_ERR(drvdata->pxtal))
|
||||
clk_disable_unprepare(drvdata->pxtal);
|
||||
if (!IS_ERR(drvdata->pclkin))
|
||||
clk_disable_unprepare(drvdata->pclkin);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct i2c_device_id si5351_i2c_ids[] = {
|
||||
|
@ -1475,8 +1475,10 @@ static struct clk_core *__clk_set_parent_before(struct clk_core *clk,
|
||||
*/
|
||||
if (clk->prepare_count) {
|
||||
clk_core_prepare(parent);
|
||||
flags = clk_enable_lock();
|
||||
clk_core_enable(parent);
|
||||
clk_core_enable(clk);
|
||||
clk_enable_unlock(flags);
|
||||
}
|
||||
|
||||
/* update the clk tree topology */
|
||||
@ -1491,13 +1493,17 @@ static void __clk_set_parent_after(struct clk_core *core,
|
||||
struct clk_core *parent,
|
||||
struct clk_core *old_parent)
|
||||
{
|
||||
unsigned long flags;
|
||||
|
||||
/*
|
||||
* Finish the migration of prepare state and undo the changes done
|
||||
* for preventing a race with clk_enable().
|
||||
*/
|
||||
if (core->prepare_count) {
|
||||
flags = clk_enable_lock();
|
||||
clk_core_disable(core);
|
||||
clk_core_disable(old_parent);
|
||||
clk_enable_unlock(flags);
|
||||
clk_core_unprepare(old_parent);
|
||||
}
|
||||
}
|
||||
@ -1525,8 +1531,10 @@ static int __clk_set_parent(struct clk_core *clk, struct clk_core *parent,
|
||||
clk_enable_unlock(flags);
|
||||
|
||||
if (clk->prepare_count) {
|
||||
flags = clk_enable_lock();
|
||||
clk_core_disable(clk);
|
||||
clk_core_disable(parent);
|
||||
clk_enable_unlock(flags);
|
||||
clk_core_unprepare(parent);
|
||||
}
|
||||
return ret;
|
||||
|
@ -71,8 +71,8 @@ static const char *gcc_xo_gpll0_bimc[] = {
|
||||
static const struct parent_map gcc_xo_gpll0a_gpll1_gpll2a_map[] = {
|
||||
{ P_XO, 0 },
|
||||
{ P_GPLL0_AUX, 3 },
|
||||
{ P_GPLL2_AUX, 2 },
|
||||
{ P_GPLL1, 1 },
|
||||
{ P_GPLL2_AUX, 2 },
|
||||
};
|
||||
|
||||
static const char *gcc_xo_gpll0a_gpll1_gpll2a[] = {
|
||||
@ -1115,7 +1115,7 @@ static struct clk_rcg2 usb_hs_system_clk_src = {
|
||||
static const struct freq_tbl ftbl_gcc_venus0_vcodec0_clk[] = {
|
||||
F(100000000, P_GPLL0, 8, 0, 0),
|
||||
F(160000000, P_GPLL0, 5, 0, 0),
|
||||
F(228570000, P_GPLL0, 5, 0, 0),
|
||||
F(228570000, P_GPLL0, 3.5, 0, 0),
|
||||
{ }
|
||||
};
|
||||
|
||||
|
@ -10,7 +10,7 @@ obj-$(CONFIG_SOC_EXYNOS5250) += clk-exynos5250.o
|
||||
obj-$(CONFIG_SOC_EXYNOS5260) += clk-exynos5260.o
|
||||
obj-$(CONFIG_SOC_EXYNOS5410) += clk-exynos5410.o
|
||||
obj-$(CONFIG_SOC_EXYNOS5420) += clk-exynos5420.o
|
||||
obj-$(CONFIG_ARCH_EXYNOS5433) += clk-exynos5433.o
|
||||
obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos5433.o
|
||||
obj-$(CONFIG_SOC_EXYNOS5440) += clk-exynos5440.o
|
||||
obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-audss.o
|
||||
obj-$(CONFIG_ARCH_EXYNOS) += clk-exynos-clkout.o
|
||||
|
@ -271,6 +271,7 @@ static const struct samsung_clk_reg_dump exynos5420_set_clksrc[] = {
|
||||
{ .offset = SRC_MASK_PERIC0, .value = 0x11111110, },
|
||||
{ .offset = SRC_MASK_PERIC1, .value = 0x11111100, },
|
||||
{ .offset = SRC_MASK_ISP, .value = 0x11111000, },
|
||||
{ .offset = GATE_BUS_TOP, .value = 0xffffffff, },
|
||||
{ .offset = GATE_BUS_DISP1, .value = 0xffffffff, },
|
||||
{ .offset = GATE_IP_PERIC, .value = 0xffffffff, },
|
||||
};
|
||||
|
@ -748,7 +748,7 @@ static struct samsung_pll_rate_table exynos5443_pll_rates[] = {
|
||||
PLL_35XX_RATE(825000000U, 275, 4, 1),
|
||||
PLL_35XX_RATE(800000000U, 400, 6, 1),
|
||||
PLL_35XX_RATE(733000000U, 733, 12, 1),
|
||||
PLL_35XX_RATE(700000000U, 360, 6, 1),
|
||||
PLL_35XX_RATE(700000000U, 175, 3, 1),
|
||||
PLL_35XX_RATE(667000000U, 222, 4, 1),
|
||||
PLL_35XX_RATE(633000000U, 211, 4, 1),
|
||||
PLL_35XX_RATE(600000000U, 500, 5, 2),
|
||||
@ -760,14 +760,14 @@ static struct samsung_pll_rate_table exynos5443_pll_rates[] = {
|
||||
PLL_35XX_RATE(444000000U, 370, 5, 2),
|
||||
PLL_35XX_RATE(420000000U, 350, 5, 2),
|
||||
PLL_35XX_RATE(400000000U, 400, 6, 2),
|
||||
PLL_35XX_RATE(350000000U, 360, 6, 2),
|
||||
PLL_35XX_RATE(350000000U, 350, 6, 2),
|
||||
PLL_35XX_RATE(333000000U, 222, 4, 2),
|
||||
PLL_35XX_RATE(300000000U, 500, 5, 3),
|
||||
PLL_35XX_RATE(266000000U, 532, 6, 3),
|
||||
PLL_35XX_RATE(200000000U, 400, 6, 3),
|
||||
PLL_35XX_RATE(166000000U, 332, 6, 3),
|
||||
PLL_35XX_RATE(160000000U, 320, 6, 3),
|
||||
PLL_35XX_RATE(133000000U, 552, 6, 4),
|
||||
PLL_35XX_RATE(133000000U, 532, 6, 4),
|
||||
PLL_35XX_RATE(100000000U, 400, 6, 4),
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
@ -1490,7 +1490,7 @@ static struct samsung_gate_clock mif_gate_clks[] __initdata = {
|
||||
|
||||
/* ENABLE_PCLK_MIF_SECURE_MONOTONIC_CNT */
|
||||
GATE(CLK_PCLK_MONOTONIC_CNT, "pclk_monotonic_cnt", "div_aclk_mif_133",
|
||||
ENABLE_PCLK_MIF_SECURE_RTC, 0, 0, 0),
|
||||
ENABLE_PCLK_MIF_SECURE_MONOTONIC_CNT, 0, 0, 0),
|
||||
|
||||
/* ENABLE_PCLK_MIF_SECURE_RTC */
|
||||
GATE(CLK_PCLK_RTC, "pclk_rtc", "div_aclk_mif_133",
|
||||
@ -3665,7 +3665,7 @@ static struct samsung_gate_clock apollo_gate_clks[] __initdata = {
|
||||
ENABLE_SCLK_APOLLO, 3, CLK_IGNORE_UNUSED, 0),
|
||||
GATE(CLK_SCLK_HPM_APOLLO, "sclk_hpm_apollo", "div_sclk_hpm_apollo",
|
||||
ENABLE_SCLK_APOLLO, 1, CLK_IGNORE_UNUSED, 0),
|
||||
GATE(CLK_SCLK_APOLLO, "sclk_apollo", "div_apollo_pll",
|
||||
GATE(CLK_SCLK_APOLLO, "sclk_apollo", "div_apollo2",
|
||||
ENABLE_SCLK_APOLLO, 0, CLK_IGNORE_UNUSED, 0),
|
||||
};
|
||||
|
||||
@ -3927,7 +3927,7 @@ CLK_OF_DECLARE(exynos5433_cmu_atlas, "samsung,exynos5433-cmu-atlas",
|
||||
#define ENABLE_PCLK_MSCL 0x0900
|
||||
#define ENABLE_PCLK_MSCL_SECURE_SMMU_M2MSCALER0 0x0904
|
||||
#define ENABLE_PCLK_MSCL_SECURE_SMMU_M2MSCALER1 0x0908
|
||||
#define ENABLE_PCLK_MSCL_SECURE_SMMU_JPEG 0x000c
|
||||
#define ENABLE_PCLK_MSCL_SECURE_SMMU_JPEG 0x090c
|
||||
#define ENABLE_SCLK_MSCL 0x0a00
|
||||
#define ENABLE_IP_MSCL0 0x0b00
|
||||
#define ENABLE_IP_MSCL1 0x0b04
|
||||
|
@ -91,7 +91,7 @@ static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc)
|
||||
|
||||
static void decon_clear_channel(struct decon_context *ctx)
|
||||
{
|
||||
int win, ch_enabled = 0;
|
||||
unsigned int win, ch_enabled = 0;
|
||||
|
||||
DRM_DEBUG_KMS("%s\n", __FILE__);
|
||||
|
||||
@ -710,7 +710,7 @@ static void decon_dpms(struct exynos_drm_crtc *crtc, int mode)
|
||||
}
|
||||
}
|
||||
|
||||
static struct exynos_drm_crtc_ops decon_crtc_ops = {
|
||||
static const struct exynos_drm_crtc_ops decon_crtc_ops = {
|
||||
.dpms = decon_dpms,
|
||||
.mode_fixup = decon_mode_fixup,
|
||||
.commit = decon_commit,
|
||||
|
@ -32,7 +32,6 @@
|
||||
#include <drm/bridge/ptn3460.h>
|
||||
|
||||
#include "exynos_dp_core.h"
|
||||
#include "exynos_drm_fimd.h"
|
||||
|
||||
#define ctx_from_connector(c) container_of(c, struct exynos_dp_device, \
|
||||
connector)
|
||||
@ -196,7 +195,7 @@ static int exynos_dp_read_edid(struct exynos_dp_device *dp)
|
||||
}
|
||||
}
|
||||
|
||||
dev_err(dp->dev, "EDID Read success!\n");
|
||||
dev_dbg(dp->dev, "EDID Read success!\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -1066,6 +1065,8 @@ static void exynos_dp_phy_exit(struct exynos_dp_device *dp)
|
||||
|
||||
static void exynos_dp_poweron(struct exynos_dp_device *dp)
|
||||
{
|
||||
struct exynos_drm_crtc *crtc = dp_to_crtc(dp);
|
||||
|
||||
if (dp->dpms_mode == DRM_MODE_DPMS_ON)
|
||||
return;
|
||||
|
||||
@ -1076,7 +1077,8 @@ static void exynos_dp_poweron(struct exynos_dp_device *dp)
|
||||
}
|
||||
}
|
||||
|
||||
fimd_dp_clock_enable(dp_to_crtc(dp), true);
|
||||
if (crtc->ops->clock_enable)
|
||||
crtc->ops->clock_enable(dp_to_crtc(dp), true);
|
||||
|
||||
clk_prepare_enable(dp->clock);
|
||||
exynos_dp_phy_init(dp);
|
||||
@ -1087,6 +1089,8 @@ static void exynos_dp_poweron(struct exynos_dp_device *dp)
|
||||
|
||||
static void exynos_dp_poweroff(struct exynos_dp_device *dp)
|
||||
{
|
||||
struct exynos_drm_crtc *crtc = dp_to_crtc(dp);
|
||||
|
||||
if (dp->dpms_mode != DRM_MODE_DPMS_ON)
|
||||
return;
|
||||
|
||||
@ -1102,7 +1106,8 @@ static void exynos_dp_poweroff(struct exynos_dp_device *dp)
|
||||
exynos_dp_phy_exit(dp);
|
||||
clk_disable_unprepare(dp->clock);
|
||||
|
||||
fimd_dp_clock_enable(dp_to_crtc(dp), false);
|
||||
if (crtc->ops->clock_enable)
|
||||
crtc->ops->clock_enable(dp_to_crtc(dp), false);
|
||||
|
||||
if (dp->panel) {
|
||||
if (drm_panel_unprepare(dp->panel))
|
||||
|
@ -238,11 +238,11 @@ static struct drm_crtc_funcs exynos_crtc_funcs = {
|
||||
};
|
||||
|
||||
struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
|
||||
struct drm_plane *plane,
|
||||
int pipe,
|
||||
enum exynos_drm_output_type type,
|
||||
struct exynos_drm_crtc_ops *ops,
|
||||
void *ctx)
|
||||
struct drm_plane *plane,
|
||||
int pipe,
|
||||
enum exynos_drm_output_type type,
|
||||
const struct exynos_drm_crtc_ops *ops,
|
||||
void *ctx)
|
||||
{
|
||||
struct exynos_drm_crtc *exynos_crtc;
|
||||
struct exynos_drm_private *private = drm_dev->dev_private;
|
||||
|
@ -18,11 +18,11 @@
|
||||
#include "exynos_drm_drv.h"
|
||||
|
||||
struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
|
||||
struct drm_plane *plane,
|
||||
int pipe,
|
||||
enum exynos_drm_output_type type,
|
||||
struct exynos_drm_crtc_ops *ops,
|
||||
void *context);
|
||||
struct drm_plane *plane,
|
||||
int pipe,
|
||||
enum exynos_drm_output_type type,
|
||||
const struct exynos_drm_crtc_ops *ops,
|
||||
void *context);
|
||||
int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe);
|
||||
void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe);
|
||||
void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe);
|
||||
|
@ -71,13 +71,6 @@ enum exynos_drm_output_type {
|
||||
* @dma_addr: array of bus(accessed by dma) address to the memory region
|
||||
* allocated for a overlay.
|
||||
* @zpos: order of overlay layer(z position).
|
||||
* @index_color: if using color key feature then this value would be used
|
||||
* as index color.
|
||||
* @default_win: a window to be enabled.
|
||||
* @color_key: color key on or off.
|
||||
* @local_path: in case of lcd type, local path mode on or off.
|
||||
* @transparency: transparency on or off.
|
||||
* @activated: activated or not.
|
||||
* @enabled: enabled or not.
|
||||
* @resume: to resume or not.
|
||||
*
|
||||
@ -108,13 +101,7 @@ struct exynos_drm_plane {
|
||||
uint32_t pixel_format;
|
||||
dma_addr_t dma_addr[MAX_FB_BUFFER];
|
||||
unsigned int zpos;
|
||||
unsigned int index_color;
|
||||
|
||||
bool default_win:1;
|
||||
bool color_key:1;
|
||||
bool local_path:1;
|
||||
bool transparency:1;
|
||||
bool activated:1;
|
||||
bool enabled:1;
|
||||
bool resume:1;
|
||||
};
|
||||
@ -181,6 +168,10 @@ struct exynos_drm_display {
|
||||
* @win_disable: disable hardware specific overlay.
|
||||
* @te_handler: trigger to transfer video image at the tearing effect
|
||||
* synchronization signal if there is a page flip request.
|
||||
* @clock_enable: optional function enabling/disabling display domain clock,
|
||||
* called from exynos-dp driver before powering up (with
|
||||
* 'enable' argument as true) and after powering down (with
|
||||
* 'enable' as false).
|
||||
*/
|
||||
struct exynos_drm_crtc;
|
||||
struct exynos_drm_crtc_ops {
|
||||
@ -195,6 +186,7 @@ struct exynos_drm_crtc_ops {
|
||||
void (*win_commit)(struct exynos_drm_crtc *crtc, unsigned int zpos);
|
||||
void (*win_disable)(struct exynos_drm_crtc *crtc, unsigned int zpos);
|
||||
void (*te_handler)(struct exynos_drm_crtc *crtc);
|
||||
void (*clock_enable)(struct exynos_drm_crtc *crtc, bool enable);
|
||||
};
|
||||
|
||||
/*
|
||||
@ -221,7 +213,7 @@ struct exynos_drm_crtc {
|
||||
unsigned int dpms;
|
||||
wait_queue_head_t pending_flip_queue;
|
||||
struct drm_pending_vblank_event *event;
|
||||
struct exynos_drm_crtc_ops *ops;
|
||||
const struct exynos_drm_crtc_ops *ops;
|
||||
void *ctx;
|
||||
};
|
||||
|
||||
|
@ -171,43 +171,6 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
|
||||
return &exynos_fb->fb;
|
||||
}
|
||||
|
||||
static u32 exynos_drm_format_num_buffers(struct drm_mode_fb_cmd2 *mode_cmd)
|
||||
{
|
||||
unsigned int cnt = 0;
|
||||
|
||||
if (mode_cmd->pixel_format != DRM_FORMAT_NV12)
|
||||
return drm_format_num_planes(mode_cmd->pixel_format);
|
||||
|
||||
while (cnt != MAX_FB_BUFFER) {
|
||||
if (!mode_cmd->handles[cnt])
|
||||
break;
|
||||
cnt++;
|
||||
}
|
||||
|
||||
/*
|
||||
* check if NV12 or NV12M.
|
||||
*
|
||||
* NV12
|
||||
* handles[0] = base1, offsets[0] = 0
|
||||
* handles[1] = base1, offsets[1] = Y_size
|
||||
*
|
||||
* NV12M
|
||||
* handles[0] = base1, offsets[0] = 0
|
||||
* handles[1] = base2, offsets[1] = 0
|
||||
*/
|
||||
if (cnt == 2) {
|
||||
/*
|
||||
* in case of NV12 format, offsets[1] is not 0 and
|
||||
* handles[0] is same as handles[1].
|
||||
*/
|
||||
if (mode_cmd->offsets[1] &&
|
||||
mode_cmd->handles[0] == mode_cmd->handles[1])
|
||||
cnt = 1;
|
||||
}
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
static struct drm_framebuffer *
|
||||
exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
|
||||
struct drm_mode_fb_cmd2 *mode_cmd)
|
||||
@ -230,7 +193,7 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
|
||||
|
||||
drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
|
||||
exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
|
||||
exynos_fb->buf_cnt = exynos_drm_format_num_buffers(mode_cmd);
|
||||
exynos_fb->buf_cnt = drm_format_num_planes(mode_cmd->pixel_format);
|
||||
|
||||
DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt);
|
||||
|
||||
|
@ -33,7 +33,6 @@
|
||||
#include "exynos_drm_crtc.h"
|
||||
#include "exynos_drm_plane.h"
|
||||
#include "exynos_drm_iommu.h"
|
||||
#include "exynos_drm_fimd.h"
|
||||
|
||||
/*
|
||||
* FIMD stands for Fully Interactive Mobile Display and
|
||||
@ -216,7 +215,7 @@ static void fimd_wait_for_vblank(struct exynos_drm_crtc *crtc)
|
||||
DRM_DEBUG_KMS("vblank wait timed out.\n");
|
||||
}
|
||||
|
||||
static void fimd_enable_video_output(struct fimd_context *ctx, int win,
|
||||
static void fimd_enable_video_output(struct fimd_context *ctx, unsigned int win,
|
||||
bool enable)
|
||||
{
|
||||
u32 val = readl(ctx->regs + WINCON(win));
|
||||
@ -229,7 +228,8 @@ static void fimd_enable_video_output(struct fimd_context *ctx, int win,
|
||||
writel(val, ctx->regs + WINCON(win));
|
||||
}
|
||||
|
||||
static void fimd_enable_shadow_channel_path(struct fimd_context *ctx, int win,
|
||||
static void fimd_enable_shadow_channel_path(struct fimd_context *ctx,
|
||||
unsigned int win,
|
||||
bool enable)
|
||||
{
|
||||
u32 val = readl(ctx->regs + SHADOWCON);
|
||||
@ -244,7 +244,7 @@ static void fimd_enable_shadow_channel_path(struct fimd_context *ctx, int win,
|
||||
|
||||
static void fimd_clear_channel(struct fimd_context *ctx)
|
||||
{
|
||||
int win, ch_enabled = 0;
|
||||
unsigned int win, ch_enabled = 0;
|
||||
|
||||
DRM_DEBUG_KMS("%s\n", __FILE__);
|
||||
|
||||
@ -946,7 +946,24 @@ static void fimd_te_handler(struct exynos_drm_crtc *crtc)
|
||||
drm_handle_vblank(ctx->drm_dev, ctx->pipe);
|
||||
}
|
||||
|
||||
static struct exynos_drm_crtc_ops fimd_crtc_ops = {
|
||||
static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
|
||||
{
|
||||
struct fimd_context *ctx = crtc->ctx;
|
||||
u32 val;
|
||||
|
||||
/*
|
||||
* Only Exynos 5250, 5260, 5410 and 542x requires enabling DP/MIE
|
||||
* clock. On these SoCs the bootloader may enable it but any
|
||||
* power domain off/on will reset it to disable state.
|
||||
*/
|
||||
if (ctx->driver_data != &exynos5_fimd_driver_data)
|
||||
return;
|
||||
|
||||
val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
|
||||
writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON);
|
||||
}
|
||||
|
||||
static const struct exynos_drm_crtc_ops fimd_crtc_ops = {
|
||||
.dpms = fimd_dpms,
|
||||
.mode_fixup = fimd_mode_fixup,
|
||||
.commit = fimd_commit,
|
||||
@ -956,6 +973,7 @@ static struct exynos_drm_crtc_ops fimd_crtc_ops = {
|
||||
.win_commit = fimd_win_commit,
|
||||
.win_disable = fimd_win_disable,
|
||||
.te_handler = fimd_te_handler,
|
||||
.clock_enable = fimd_dp_clock_enable,
|
||||
};
|
||||
|
||||
static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
|
||||
@ -1025,12 +1043,7 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
|
||||
if (ctx->display)
|
||||
exynos_drm_create_enc_conn(drm_dev, ctx->display);
|
||||
|
||||
ret = fimd_iommu_attach_devices(ctx, drm_dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
|
||||
return fimd_iommu_attach_devices(ctx, drm_dev);
|
||||
}
|
||||
|
||||
static void fimd_unbind(struct device *dev, struct device *master,
|
||||
@ -1192,24 +1205,6 @@ static int fimd_remove(struct platform_device *pdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
|
||||
{
|
||||
struct fimd_context *ctx = crtc->ctx;
|
||||
u32 val;
|
||||
|
||||
/*
|
||||
* Only Exynos 5250, 5260, 5410 and 542x requires enabling DP/MIE
|
||||
* clock. On these SoCs the bootloader may enable it but any
|
||||
* power domain off/on will reset it to disable state.
|
||||
*/
|
||||
if (ctx->driver_data != &exynos5_fimd_driver_data)
|
||||
return;
|
||||
|
||||
val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
|
||||
writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(fimd_dp_clock_enable);
|
||||
|
||||
struct platform_driver fimd_driver = {
|
||||
.probe = fimd_probe,
|
||||
.remove = fimd_remove,
|
||||
|
@ -1,15 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2015 Samsung Electronics Co., Ltd.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the
|
||||
* Free Software Foundation; either version 2 of the License, or (at your
|
||||
* option) any later version.
|
||||
*/
|
||||
|
||||
#ifndef _EXYNOS_DRM_FIMD_H_
|
||||
#define _EXYNOS_DRM_FIMD_H_
|
||||
|
||||
extern void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable);
|
||||
|
||||
#endif /* _EXYNOS_DRM_FIMD_H_ */
|
@ -76,7 +76,7 @@ int exynos_check_plane(struct drm_plane *plane, struct drm_framebuffer *fb)
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
exynos_plane->dma_addr[i] = buffer->dma_addr;
|
||||
exynos_plane->dma_addr[i] = buffer->dma_addr + fb->offsets[i];
|
||||
|
||||
DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n",
|
||||
i, (unsigned long)exynos_plane->dma_addr[i]);
|
||||
|
@ -217,7 +217,7 @@ static int vidi_ctx_initialize(struct vidi_context *ctx,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct exynos_drm_crtc_ops vidi_crtc_ops = {
|
||||
static const struct exynos_drm_crtc_ops vidi_crtc_ops = {
|
||||
.dpms = vidi_dpms,
|
||||
.enable_vblank = vidi_enable_vblank,
|
||||
.disable_vblank = vidi_disable_vblank,
|
||||
|
@ -44,6 +44,12 @@
|
||||
#define MIXER_WIN_NR 3
|
||||
#define MIXER_DEFAULT_WIN 0
|
||||
|
||||
/* The pixelformats that are natively supported by the mixer. */
|
||||
#define MXR_FORMAT_RGB565 4
|
||||
#define MXR_FORMAT_ARGB1555 5
|
||||
#define MXR_FORMAT_ARGB4444 6
|
||||
#define MXR_FORMAT_ARGB8888 7
|
||||
|
||||
struct mixer_resources {
|
||||
int irq;
|
||||
void __iomem *mixer_regs;
|
||||
@ -327,7 +333,8 @@ static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, unsigned int height)
|
||||
mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_RGB_FMT_MASK);
|
||||
}
|
||||
|
||||
static void mixer_cfg_layer(struct mixer_context *ctx, int win, bool enable)
|
||||
static void mixer_cfg_layer(struct mixer_context *ctx, unsigned int win,
|
||||
bool enable)
|
||||
{
|
||||
struct mixer_resources *res = &ctx->mixer_res;
|
||||
u32 val = enable ? ~0 : 0;
|
||||
@ -359,8 +366,6 @@ static void mixer_run(struct mixer_context *ctx)
|
||||
struct mixer_resources *res = &ctx->mixer_res;
|
||||
|
||||
mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_REG_RUN);
|
||||
|
||||
mixer_regs_dump(ctx);
|
||||
}
|
||||
|
||||
static void mixer_stop(struct mixer_context *ctx)
|
||||
@ -373,16 +378,13 @@ static void mixer_stop(struct mixer_context *ctx)
|
||||
while (!(mixer_reg_read(res, MXR_STATUS) & MXR_STATUS_REG_IDLE) &&
|
||||
--timeout)
|
||||
usleep_range(10000, 12000);
|
||||
|
||||
mixer_regs_dump(ctx);
|
||||
}
|
||||
|
||||
static void vp_video_buffer(struct mixer_context *ctx, int win)
|
||||
static void vp_video_buffer(struct mixer_context *ctx, unsigned int win)
|
||||
{
|
||||
struct mixer_resources *res = &ctx->mixer_res;
|
||||
unsigned long flags;
|
||||
struct exynos_drm_plane *plane;
|
||||
unsigned int buf_num = 1;
|
||||
dma_addr_t luma_addr[2], chroma_addr[2];
|
||||
bool tiled_mode = false;
|
||||
bool crcb_mode = false;
|
||||
@ -393,27 +395,18 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
|
||||
switch (plane->pixel_format) {
|
||||
case DRM_FORMAT_NV12:
|
||||
crcb_mode = false;
|
||||
buf_num = 2;
|
||||
break;
|
||||
/* TODO: single buffer format NV12, NV21 */
|
||||
case DRM_FORMAT_NV21:
|
||||
crcb_mode = true;
|
||||
break;
|
||||
default:
|
||||
/* ignore pixel format at disable time */
|
||||
if (!plane->dma_addr[0])
|
||||
break;
|
||||
|
||||
DRM_ERROR("pixel format for vp is wrong [%d].\n",
|
||||
plane->pixel_format);
|
||||
return;
|
||||
}
|
||||
|
||||
if (buf_num == 2) {
|
||||
luma_addr[0] = plane->dma_addr[0];
|
||||
chroma_addr[0] = plane->dma_addr[1];
|
||||
} else {
|
||||
luma_addr[0] = plane->dma_addr[0];
|
||||
chroma_addr[0] = plane->dma_addr[0]
|
||||
+ (plane->pitch * plane->fb_height);
|
||||
}
|
||||
luma_addr[0] = plane->dma_addr[0];
|
||||
chroma_addr[0] = plane->dma_addr[1];
|
||||
|
||||
if (plane->scan_flag & DRM_MODE_FLAG_INTERLACE) {
|
||||
ctx->interlace = true;
|
||||
@ -484,6 +477,7 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
|
||||
mixer_vsync_set_update(ctx, true);
|
||||
spin_unlock_irqrestore(&res->reg_slock, flags);
|
||||
|
||||
mixer_regs_dump(ctx);
|
||||
vp_regs_dump(ctx);
|
||||
}
|
||||
|
||||
@ -518,7 +512,7 @@ fail:
|
||||
return -ENOTSUPP;
|
||||
}
|
||||
|
||||
static void mixer_graph_buffer(struct mixer_context *ctx, int win)
|
||||
static void mixer_graph_buffer(struct mixer_context *ctx, unsigned int win)
|
||||
{
|
||||
struct mixer_resources *res = &ctx->mixer_res;
|
||||
unsigned long flags;
|
||||
@ -531,20 +525,27 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
|
||||
|
||||
plane = &ctx->planes[win];
|
||||
|
||||
#define RGB565 4
|
||||
#define ARGB1555 5
|
||||
#define ARGB4444 6
|
||||
#define ARGB8888 7
|
||||
switch (plane->pixel_format) {
|
||||
case DRM_FORMAT_XRGB4444:
|
||||
fmt = MXR_FORMAT_ARGB4444;
|
||||
break;
|
||||
|
||||
switch (plane->bpp) {
|
||||
case 16:
|
||||
fmt = ARGB4444;
|
||||
case DRM_FORMAT_XRGB1555:
|
||||
fmt = MXR_FORMAT_ARGB1555;
|
||||
break;
|
||||
case 32:
|
||||
fmt = ARGB8888;
|
||||
|
||||
case DRM_FORMAT_RGB565:
|
||||
fmt = MXR_FORMAT_RGB565;
|
||||
break;
|
||||
|
||||
case DRM_FORMAT_XRGB8888:
|
||||
case DRM_FORMAT_ARGB8888:
|
||||
fmt = MXR_FORMAT_ARGB8888;
|
||||
break;
|
||||
|
||||
default:
|
||||
fmt = ARGB8888;
|
||||
DRM_DEBUG_KMS("pixelformat unsupported by mixer\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* check if mixer supports requested scaling setup */
|
||||
@ -617,6 +618,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
|
||||
|
||||
mixer_vsync_set_update(ctx, true);
|
||||
spin_unlock_irqrestore(&res->reg_slock, flags);
|
||||
|
||||
mixer_regs_dump(ctx);
|
||||
}
|
||||
|
||||
static void vp_win_reset(struct mixer_context *ctx)
|
||||
@ -1070,6 +1073,7 @@ static void mixer_poweroff(struct mixer_context *ctx)
|
||||
mutex_unlock(&ctx->mixer_mutex);
|
||||
|
||||
mixer_stop(ctx);
|
||||
mixer_regs_dump(ctx);
|
||||
mixer_window_suspend(ctx);
|
||||
|
||||
ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
|
||||
@ -1126,7 +1130,7 @@ int mixer_check_mode(struct drm_display_mode *mode)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static struct exynos_drm_crtc_ops mixer_crtc_ops = {
|
||||
static const struct exynos_drm_crtc_ops mixer_crtc_ops = {
|
||||
.dpms = mixer_dpms,
|
||||
.enable_vblank = mixer_enable_vblank,
|
||||
.disable_vblank = mixer_disable_vblank,
|
||||
@ -1156,7 +1160,7 @@ static struct mixer_drv_data exynos4210_mxr_drv_data = {
|
||||
.has_sclk = 1,
|
||||
};
|
||||
|
||||
static struct platform_device_id mixer_driver_types[] = {
|
||||
static const struct platform_device_id mixer_driver_types[] = {
|
||||
{
|
||||
.name = "s5p-mixer",
|
||||
.driver_data = (unsigned long)&exynos4210_mxr_drv_data,
|
||||
|
@ -2045,22 +2045,20 @@ static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
|
||||
p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
|
||||
p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
|
||||
|
||||
if (crtc->primary->state->fb) {
|
||||
p->pri.enabled = true;
|
||||
if (crtc->primary->state->fb)
|
||||
p->pri.bytes_per_pixel =
|
||||
crtc->primary->state->fb->bits_per_pixel / 8;
|
||||
} else {
|
||||
p->pri.enabled = false;
|
||||
p->pri.bytes_per_pixel = 0;
|
||||
}
|
||||
else
|
||||
p->pri.bytes_per_pixel = 4;
|
||||
|
||||
p->cur.bytes_per_pixel = 4;
|
||||
/*
|
||||
* TODO: for now, assume primary and cursor planes are always enabled.
|
||||
* Setting them to false makes the screen flicker.
|
||||
*/
|
||||
p->pri.enabled = true;
|
||||
p->cur.enabled = true;
|
||||
|
||||
if (crtc->cursor->state->fb) {
|
||||
p->cur.enabled = true;
|
||||
p->cur.bytes_per_pixel = 4;
|
||||
} else {
|
||||
p->cur.enabled = false;
|
||||
p->cur.bytes_per_pixel = 0;
|
||||
}
|
||||
p->pri.horiz_pixels = intel_crtc->config->pipe_src_w;
|
||||
p->cur.horiz_pixels = intel_crtc->base.cursor->state->crtc_w;
|
||||
|
||||
|
@ -384,7 +384,7 @@ void adreno_gpu_cleanup(struct adreno_gpu *gpu)
|
||||
if (gpu->memptrs_bo) {
|
||||
if (gpu->memptrs_iova)
|
||||
msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
|
||||
drm_gem_object_unreference(gpu->memptrs_bo);
|
||||
drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
|
||||
}
|
||||
release_firmware(gpu->pm4);
|
||||
release_firmware(gpu->pfp);
|
||||
|
@ -177,6 +177,11 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) {
|
||||
encoders[i]->bridge = msm_dsi->bridge;
|
||||
msm_dsi->encoders[i] = encoders[i];
|
||||
}
|
||||
|
||||
msm_dsi->connector = msm_dsi_manager_connector_init(msm_dsi->id);
|
||||
if (IS_ERR(msm_dsi->connector)) {
|
||||
ret = PTR_ERR(msm_dsi->connector);
|
||||
@ -185,11 +190,6 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) {
|
||||
encoders[i]->bridge = msm_dsi->bridge;
|
||||
msm_dsi->encoders[i] = encoders[i];
|
||||
}
|
||||
|
||||
priv->bridges[priv->num_bridges++] = msm_dsi->bridge;
|
||||
priv->connectors[priv->num_connectors++] = msm_dsi->connector;
|
||||
|
||||
|
@ -1023,7 +1023,7 @@ static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg)
|
||||
*data = buf[1]; /* strip out dcs type */
|
||||
return 1;
|
||||
} else {
|
||||
pr_err("%s: read data does not match with rx_buf len %d\n",
|
||||
pr_err("%s: read data does not match with rx_buf len %zu\n",
|
||||
__func__, msg->rx_len);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1040,7 +1040,7 @@ static int dsi_short_read2_resp(u8 *buf, const struct mipi_dsi_msg *msg)
|
||||
data[1] = buf[2];
|
||||
return 2;
|
||||
} else {
|
||||
pr_err("%s: read data does not match with rx_buf len %d\n",
|
||||
pr_err("%s: read data does not match with rx_buf len %zu\n",
|
||||
__func__, msg->rx_len);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -1093,7 +1093,6 @@ static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host,
|
||||
{
|
||||
u32 *lp, *temp, data;
|
||||
int i, j = 0, cnt;
|
||||
bool ack_error = false;
|
||||
u32 read_cnt;
|
||||
u8 reg[16];
|
||||
int repeated_bytes = 0;
|
||||
@ -1105,15 +1104,10 @@ static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host,
|
||||
if (cnt > 4)
|
||||
cnt = 4; /* 4 x 32 bits registers only */
|
||||
|
||||
/* Calculate real read data count */
|
||||
read_cnt = dsi_read(msm_host, 0x1d4) >> 16;
|
||||
|
||||
ack_error = (rx_byte == 4) ?
|
||||
(read_cnt == 8) : /* short pkt + 4-byte error pkt */
|
||||
(read_cnt == (pkt_size + 6 + 4)); /* long pkt+4-byte error pkt*/
|
||||
|
||||
if (ack_error)
|
||||
read_cnt -= 4; /* Remove 4 byte error pkt */
|
||||
if (rx_byte == 4)
|
||||
read_cnt = 4;
|
||||
else
|
||||
read_cnt = pkt_size + 6;
|
||||
|
||||
/*
|
||||
* In case of multiple reads from the panel, after the first read, there
|
||||
@ -1215,7 +1209,7 @@ static void dsi_err_worker(struct work_struct *work)
|
||||
container_of(work, struct msm_dsi_host, err_work);
|
||||
u32 status = msm_host->err_work_state;
|
||||
|
||||
pr_err("%s: status=%x\n", __func__, status);
|
||||
pr_err_ratelimited("%s: status=%x\n", __func__, status);
|
||||
if (status & DSI_ERR_STATE_MDP_FIFO_UNDERFLOW)
|
||||
dsi_sw_reset_restore(msm_host);
|
||||
|
||||
@ -1797,6 +1791,7 @@ int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
|
||||
case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
|
||||
pr_err("%s: rx ACK_ERR_PACLAGE\n", __func__);
|
||||
ret = 0;
|
||||
break;
|
||||
case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
|
||||
case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
|
||||
ret = dsi_short_read1_resp(buf, msg);
|
||||
|
@ -462,7 +462,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
|
||||
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
|
||||
struct drm_connector *connector = NULL;
|
||||
struct dsi_connector *dsi_connector;
|
||||
int ret;
|
||||
int ret, i;
|
||||
|
||||
dsi_connector = devm_kzalloc(msm_dsi->dev->dev,
|
||||
sizeof(*dsi_connector), GFP_KERNEL);
|
||||
@ -495,6 +495,10 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
for (i = 0; i < MSM_DSI_ENCODER_NUM; i++)
|
||||
drm_mode_connector_attach_encoder(connector,
|
||||
msm_dsi->encoders[i]);
|
||||
|
||||
return connector;
|
||||
|
||||
fail:
|
||||
|
@ -132,7 +132,7 @@ ssize_t edp_aux_transfer(struct drm_dp_aux *drm_aux, struct drm_dp_aux_msg *msg)
|
||||
/* msg sanity check */
|
||||
if ((native && (msg->size > AUX_CMD_NATIVE_MAX)) ||
|
||||
(msg->size > AUX_CMD_I2C_MAX)) {
|
||||
pr_err("%s: invalid msg: size(%d), request(%x)\n",
|
||||
pr_err("%s: invalid msg: size(%zu), request(%x)\n",
|
||||
__func__, msg->size, msg->request);
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -155,7 +155,7 @@ ssize_t edp_aux_transfer(struct drm_dp_aux *drm_aux, struct drm_dp_aux_msg *msg)
|
||||
*/
|
||||
edp_write(aux->base + REG_EDP_AUX_TRANS_CTRL, 0);
|
||||
msm_edp_aux_ctrl(aux, 1);
|
||||
pr_err("%s: aux timeout, %d\n", __func__, ret);
|
||||
pr_err("%s: aux timeout, %zd\n", __func__, ret);
|
||||
goto unlock_exit;
|
||||
}
|
||||
DBG("completion");
|
||||
|
@ -151,6 +151,8 @@ struct drm_connector *msm_edp_connector_init(struct msm_edp *edp)
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
drm_mode_connector_attach_encoder(connector, edp->encoder);
|
||||
|
||||
return connector;
|
||||
|
||||
fail:
|
||||
|
@ -1149,12 +1149,13 @@ int msm_edp_ctrl_init(struct msm_edp *edp)
|
||||
ctrl->aux = msm_edp_aux_init(dev, ctrl->base, &ctrl->drm_aux);
|
||||
if (!ctrl->aux || !ctrl->drm_aux) {
|
||||
pr_err("%s:failed to init aux\n", __func__);
|
||||
return ret;
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ctrl->phy = msm_edp_phy_init(dev, ctrl->base);
|
||||
if (!ctrl->phy) {
|
||||
pr_err("%s:failed to init phy\n", __func__);
|
||||
ret = -ENOMEM;
|
||||
goto err_destory_aux;
|
||||
}
|
||||
|
||||
|
@ -72,14 +72,13 @@ const struct mdp5_cfg_hw msm8x74_config = {
|
||||
.base = { 0x12d00, 0x12e00, 0x12f00 },
|
||||
},
|
||||
.intf = {
|
||||
.count = 4,
|
||||
.base = { 0x12500, 0x12700, 0x12900, 0x12b00 },
|
||||
},
|
||||
.intfs = {
|
||||
[0] = INTF_eDP,
|
||||
[1] = INTF_DSI,
|
||||
[2] = INTF_DSI,
|
||||
[3] = INTF_HDMI,
|
||||
.connect = {
|
||||
[0] = INTF_eDP,
|
||||
[1] = INTF_DSI,
|
||||
[2] = INTF_DSI,
|
||||
[3] = INTF_HDMI,
|
||||
},
|
||||
},
|
||||
.max_clk = 200000000,
|
||||
};
|
||||
@ -142,14 +141,13 @@ const struct mdp5_cfg_hw apq8084_config = {
|
||||
.base = { 0x12f00, 0x13000, 0x13100, 0x13200 },
|
||||
},
|
||||
.intf = {
|
||||
.count = 5,
|
||||
.base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 },
|
||||
},
|
||||
.intfs = {
|
||||
[0] = INTF_eDP,
|
||||
[1] = INTF_DSI,
|
||||
[2] = INTF_DSI,
|
||||
[3] = INTF_HDMI,
|
||||
.connect = {
|
||||
[0] = INTF_eDP,
|
||||
[1] = INTF_DSI,
|
||||
[2] = INTF_DSI,
|
||||
[3] = INTF_HDMI,
|
||||
},
|
||||
},
|
||||
.max_clk = 320000000,
|
||||
};
|
||||
@ -196,10 +194,12 @@ const struct mdp5_cfg_hw msm8x16_config = {
|
||||
|
||||
},
|
||||
.intf = {
|
||||
.count = 1, /* INTF_1 */
|
||||
.base = { 0x6B800 },
|
||||
.base = { 0x00000, 0x6b800 },
|
||||
.connect = {
|
||||
[0] = INTF_DISABLED,
|
||||
[1] = INTF_DSI,
|
||||
},
|
||||
},
|
||||
/* TODO enable .intfs[] with [1] = INTF_DSI, once DSI is implemented */
|
||||
.max_clk = 320000000,
|
||||
};
|
||||
|
||||
|
@ -59,6 +59,11 @@ struct mdp5_smp_block {
|
||||
|
||||
#define MDP5_INTF_NUM_MAX 5
|
||||
|
||||
struct mdp5_intf_block {
|
||||
uint32_t base[MAX_BASES];
|
||||
u32 connect[MDP5_INTF_NUM_MAX]; /* array of enum mdp5_intf_type */
|
||||
};
|
||||
|
||||
struct mdp5_cfg_hw {
|
||||
char *name;
|
||||
|
||||
@ -72,9 +77,7 @@ struct mdp5_cfg_hw {
|
||||
struct mdp5_sub_block dspp;
|
||||
struct mdp5_sub_block ad;
|
||||
struct mdp5_sub_block pp;
|
||||
struct mdp5_sub_block intf;
|
||||
|
||||
u32 intfs[MDP5_INTF_NUM_MAX]; /* array of enum mdp5_intf_type */
|
||||
struct mdp5_intf_block intf;
|
||||
|
||||
uint32_t max_clk;
|
||||
};
|
||||
|
@ -206,8 +206,8 @@ static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
|
||||
|
||||
static int get_dsi_id_from_intf(const struct mdp5_cfg_hw *hw_cfg, int intf_num)
|
||||
{
|
||||
const int intf_cnt = hw_cfg->intf.count;
|
||||
const u32 *intfs = hw_cfg->intfs;
|
||||
const enum mdp5_intf_type *intfs = hw_cfg->intf.connect;
|
||||
const int intf_cnt = ARRAY_SIZE(hw_cfg->intf.connect);
|
||||
int id = 0, i;
|
||||
|
||||
for (i = 0; i < intf_cnt; i++) {
|
||||
@ -228,7 +228,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
const struct mdp5_cfg_hw *hw_cfg =
|
||||
mdp5_cfg_get_hw_config(mdp5_kms->cfg);
|
||||
enum mdp5_intf_type intf_type = hw_cfg->intfs[intf_num];
|
||||
enum mdp5_intf_type intf_type = hw_cfg->intf.connect[intf_num];
|
||||
struct drm_encoder *encoder;
|
||||
int ret = 0;
|
||||
|
||||
@ -365,7 +365,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
|
||||
/* Construct encoders and modeset initialize connector devices
|
||||
* for each external display interface.
|
||||
*/
|
||||
for (i = 0; i < ARRAY_SIZE(hw_cfg->intfs); i++) {
|
||||
for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) {
|
||||
ret = modeset_init_intf(mdp5_kms, i);
|
||||
if (ret)
|
||||
goto fail;
|
||||
@ -514,8 +514,8 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
|
||||
*/
|
||||
mdp5_enable(mdp5_kms);
|
||||
for (i = 0; i < MDP5_INTF_NUM_MAX; i++) {
|
||||
if (!config->hw->intf.base[i] ||
|
||||
mdp5_cfg_intf_is_virtual(config->hw->intfs[i]))
|
||||
if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) ||
|
||||
!config->hw->intf.base[i])
|
||||
continue;
|
||||
mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
|
||||
}
|
||||
|
@ -273,7 +273,7 @@ static void set_scanout_locked(struct drm_plane *plane,
|
||||
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe),
|
||||
msm_framebuffer_iova(fb, mdp5_kms->id, 2));
|
||||
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
|
||||
msm_framebuffer_iova(fb, mdp5_kms->id, 4));
|
||||
msm_framebuffer_iova(fb, mdp5_kms->id, 3));
|
||||
|
||||
plane->fb = fb;
|
||||
}
|
||||
|
@ -21,9 +21,11 @@
|
||||
|
||||
static void msm_fb_output_poll_changed(struct drm_device *dev)
|
||||
{
|
||||
#ifdef CONFIG_DRM_MSM_FBDEV
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
if (priv->fbdev)
|
||||
drm_fb_helper_hotplug_event(priv->fbdev);
|
||||
#endif
|
||||
}
|
||||
|
||||
static const struct drm_mode_config_funcs mode_config_funcs = {
|
||||
@ -94,7 +96,7 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
|
||||
}
|
||||
|
||||
if (reglog)
|
||||
printk(KERN_DEBUG "IO:region %s %08x %08lx\n", dbgname, (u32)ptr, size);
|
||||
printk(KERN_DEBUG "IO:region %s %p %08lx\n", dbgname, ptr, size);
|
||||
|
||||
return ptr;
|
||||
}
|
||||
@ -102,7 +104,7 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
|
||||
void msm_writel(u32 data, void __iomem *addr)
|
||||
{
|
||||
if (reglog)
|
||||
printk(KERN_DEBUG "IO:W %08x %08x\n", (u32)addr, data);
|
||||
printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
|
||||
writel(data, addr);
|
||||
}
|
||||
|
||||
@ -110,7 +112,7 @@ u32 msm_readl(const void __iomem *addr)
|
||||
{
|
||||
u32 val = readl(addr);
|
||||
if (reglog)
|
||||
printk(KERN_ERR "IO:R %08x %08x\n", (u32)addr, val);
|
||||
printk(KERN_ERR "IO:R %p %08x\n", addr, val);
|
||||
return val;
|
||||
}
|
||||
|
||||
@ -143,8 +145,8 @@ static int msm_unload(struct drm_device *dev)
|
||||
if (gpu) {
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
gpu->funcs->pm_suspend(gpu);
|
||||
gpu->funcs->destroy(gpu);
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
gpu->funcs->destroy(gpu);
|
||||
}
|
||||
|
||||
if (priv->vram.paddr) {
|
||||
@ -177,7 +179,7 @@ static int get_mdp_ver(struct platform_device *pdev)
|
||||
const struct of_device_id *match;
|
||||
match = of_match_node(match_types, dev->of_node);
|
||||
if (match)
|
||||
return (int)match->data;
|
||||
return (int)(unsigned long)match->data;
|
||||
#endif
|
||||
return 4;
|
||||
}
|
||||
@ -216,7 +218,7 @@ static int msm_init_vram(struct drm_device *dev)
|
||||
if (ret)
|
||||
return ret;
|
||||
size = r.end - r.start;
|
||||
DRM_INFO("using VRAM carveout: %lx@%08x\n", size, r.start);
|
||||
DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
|
||||
} else
|
||||
#endif
|
||||
|
||||
@ -283,10 +285,6 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
|
||||
|
||||
drm_mode_config_init(dev);
|
||||
|
||||
ret = msm_init_vram(dev);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
platform_set_drvdata(pdev, dev);
|
||||
|
||||
/* Bind all our sub-components: */
|
||||
@ -294,6 +292,10 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = msm_init_vram(dev);
|
||||
if (ret)
|
||||
goto fail;
|
||||
|
||||
switch (get_mdp_ver(pdev)) {
|
||||
case 4:
|
||||
kms = mdp4_kms_init(dev);
|
||||
@ -419,9 +421,11 @@ static void msm_preclose(struct drm_device *dev, struct drm_file *file)
|
||||
|
||||
static void msm_lastclose(struct drm_device *dev)
|
||||
{
|
||||
#ifdef CONFIG_DRM_MSM_FBDEV
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
if (priv->fbdev)
|
||||
drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
|
||||
#endif
|
||||
}
|
||||
|
||||
static irqreturn_t msm_irq(int irq, void *arg)
|
||||
|
@ -172,8 +172,8 @@ struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
|
||||
{
|
||||
struct msm_drm_private *priv = dev->dev_private;
|
||||
struct msm_kms *kms = priv->kms;
|
||||
struct msm_framebuffer *msm_fb;
|
||||
struct drm_framebuffer *fb = NULL;
|
||||
struct msm_framebuffer *msm_fb = NULL;
|
||||
struct drm_framebuffer *fb;
|
||||
const struct msm_format *format;
|
||||
int ret, i, n;
|
||||
unsigned int hsub, vsub;
|
||||
@ -239,8 +239,7 @@ struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
|
||||
return fb;
|
||||
|
||||
fail:
|
||||
if (fb)
|
||||
msm_framebuffer_destroy(fb);
|
||||
kfree(msm_fb);
|
||||
|
||||
return ERR_PTR(ret);
|
||||
}
|
||||
|
@ -483,7 +483,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
|
||||
uint64_t off = drm_vma_node_start(&obj->vma_node);
|
||||
|
||||
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
|
||||
seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %d\n",
|
||||
seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %zu\n",
|
||||
msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
|
||||
msm_obj->read_fence, msm_obj->write_fence,
|
||||
obj->name, obj->refcount.refcount.counter,
|
||||
|
@ -60,7 +60,7 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
|
||||
u32 pa = sg_phys(sg) - sg->offset;
|
||||
size_t bytes = sg->length + sg->offset;
|
||||
|
||||
VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes);
|
||||
VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
|
||||
|
||||
ret = iommu_map(domain, da, pa, bytes, prot);
|
||||
if (ret)
|
||||
@ -99,7 +99,7 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
|
||||
if (unmapped < bytes)
|
||||
return unmapped;
|
||||
|
||||
VERB("unmap[%d]: %08x(%x)", i, iova, bytes);
|
||||
VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
|
||||
|
||||
BUG_ON(!PAGE_ALIGNED(bytes));
|
||||
|
||||
|
@ -56,6 +56,6 @@ fail:
|
||||
void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
|
||||
{
|
||||
if (ring->bo)
|
||||
drm_gem_object_unreference(ring->bo);
|
||||
drm_gem_object_unreference_unlocked(ring->bo);
|
||||
kfree(ring);
|
||||
}
|
||||
|
@ -421,19 +421,21 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
|
||||
{
|
||||
struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
|
||||
u8 msg[DP_DPCD_SIZE];
|
||||
int ret;
|
||||
int ret, i;
|
||||
|
||||
ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
|
||||
DP_DPCD_SIZE);
|
||||
if (ret > 0) {
|
||||
memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
|
||||
for (i = 0; i < 7; i++) {
|
||||
ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
|
||||
DP_DPCD_SIZE);
|
||||
if (ret == DP_DPCD_SIZE) {
|
||||
memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
|
||||
|
||||
DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
|
||||
dig_connector->dpcd);
|
||||
DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
|
||||
dig_connector->dpcd);
|
||||
|
||||
radeon_dp_probe_oui(radeon_connector);
|
||||
radeon_dp_probe_oui(radeon_connector);
|
||||
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
dig_connector->dpcd[0] = 0;
|
||||
return false;
|
||||
|
@ -30,8 +30,6 @@
|
||||
AUX_SW_RX_HPD_DISCON | \
|
||||
AUX_SW_RX_PARTIAL_BYTE | \
|
||||
AUX_SW_NON_AUX_MODE | \
|
||||
AUX_SW_RX_MIN_COUNT_VIOL | \
|
||||
AUX_SW_RX_INVALID_STOP | \
|
||||
AUX_SW_RX_SYNC_INVALID_L | \
|
||||
AUX_SW_RX_SYNC_INVALID_H | \
|
||||
AUX_SW_RX_INVALID_START | \
|
||||
|
@ -164,6 +164,7 @@
|
||||
#define USB_DEVICE_ID_ATEN_2PORTKVM 0x2204
|
||||
#define USB_DEVICE_ID_ATEN_4PORTKVM 0x2205
|
||||
#define USB_DEVICE_ID_ATEN_4PORTKVMC 0x2208
|
||||
#define USB_DEVICE_ID_ATEN_CS682 0x2213
|
||||
|
||||
#define USB_VENDOR_ID_ATMEL 0x03eb
|
||||
#define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c
|
||||
|
@ -44,7 +44,6 @@ MODULE_PARM_DESC(disable_raw_mode,
|
||||
/* bits 1..20 are reserved for classes */
|
||||
#define HIDPP_QUIRK_DELAYED_INIT BIT(21)
|
||||
#define HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS BIT(22)
|
||||
#define HIDPP_QUIRK_MULTI_INPUT BIT(23)
|
||||
|
||||
/*
|
||||
* There are two hidpp protocols in use, the first version hidpp10 is known
|
||||
@ -706,12 +705,6 @@ static int wtp_input_mapping(struct hid_device *hdev, struct hid_input *hi,
|
||||
struct hid_field *field, struct hid_usage *usage,
|
||||
unsigned long **bit, int *max)
|
||||
{
|
||||
struct hidpp_device *hidpp = hid_get_drvdata(hdev);
|
||||
|
||||
if ((hidpp->quirks & HIDPP_QUIRK_MULTI_INPUT) &&
|
||||
(field->application == HID_GD_KEYBOARD))
|
||||
return 0;
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
@ -720,10 +713,6 @@ static void wtp_populate_input(struct hidpp_device *hidpp,
|
||||
{
|
||||
struct wtp_data *wd = hidpp->private_data;
|
||||
|
||||
if ((hidpp->quirks & HIDPP_QUIRK_MULTI_INPUT) && origin_is_hid_core)
|
||||
/* this is the generic hid-input call */
|
||||
return;
|
||||
|
||||
__set_bit(EV_ABS, input_dev->evbit);
|
||||
__set_bit(EV_KEY, input_dev->evbit);
|
||||
__clear_bit(EV_REL, input_dev->evbit);
|
||||
@ -1245,10 +1234,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
|
||||
if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT)
|
||||
connect_mask &= ~HID_CONNECT_HIDINPUT;
|
||||
|
||||
/* Re-enable hidinput for multi-input devices */
|
||||
if (hidpp->quirks & HIDPP_QUIRK_MULTI_INPUT)
|
||||
connect_mask |= HID_CONNECT_HIDINPUT;
|
||||
|
||||
ret = hid_hw_start(hdev, connect_mask);
|
||||
if (ret) {
|
||||
hid_err(hdev, "%s:hid_hw_start returned error\n", __func__);
|
||||
@ -1296,11 +1281,6 @@ static const struct hid_device_id hidpp_devices[] = {
|
||||
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
|
||||
USB_DEVICE_ID_LOGITECH_T651),
|
||||
.driver_data = HIDPP_QUIRK_CLASS_WTP },
|
||||
{ /* Keyboard TK820 */
|
||||
HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
|
||||
USB_VENDOR_ID_LOGITECH, 0x4102),
|
||||
.driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_MULTI_INPUT |
|
||||
HIDPP_QUIRK_CLASS_WTP },
|
||||
|
||||
{ HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
|
||||
USB_VENDOR_ID_LOGITECH, HID_ANY_ID)},
|
||||
|
@ -294,7 +294,7 @@ int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
|
||||
if (!report)
|
||||
return -EINVAL;
|
||||
|
||||
mutex_lock(&hsdev->mutex);
|
||||
mutex_lock(hsdev->mutex_ptr);
|
||||
if (flag == SENSOR_HUB_SYNC) {
|
||||
memset(&hsdev->pending, 0, sizeof(hsdev->pending));
|
||||
init_completion(&hsdev->pending.ready);
|
||||
@ -328,7 +328,7 @@ int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
|
||||
kfree(hsdev->pending.raw_data);
|
||||
hsdev->pending.status = false;
|
||||
}
|
||||
mutex_unlock(&hsdev->mutex);
|
||||
mutex_unlock(hsdev->mutex_ptr);
|
||||
|
||||
return ret_val;
|
||||
}
|
||||
@ -667,7 +667,14 @@ static int sensor_hub_probe(struct hid_device *hdev,
|
||||
hsdev->vendor_id = hdev->vendor;
|
||||
hsdev->product_id = hdev->product;
|
||||
hsdev->usage = collection->usage;
|
||||
mutex_init(&hsdev->mutex);
|
||||
hsdev->mutex_ptr = devm_kzalloc(&hdev->dev,
|
||||
sizeof(struct mutex),
|
||||
GFP_KERNEL);
|
||||
if (!hsdev->mutex_ptr) {
|
||||
ret = -ENOMEM;
|
||||
goto err_stop_hw;
|
||||
}
|
||||
mutex_init(hsdev->mutex_ptr);
|
||||
hsdev->start_collection_index = i;
|
||||
if (last_hsdev)
|
||||
last_hsdev->end_collection_index = i;
|
||||
|
@ -862,6 +862,7 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
|
||||
union acpi_object *obj;
|
||||
struct acpi_device *adev;
|
||||
acpi_handle handle;
|
||||
int ret;
|
||||
|
||||
handle = ACPI_HANDLE(&client->dev);
|
||||
if (!handle || acpi_bus_get_device(handle, &adev))
|
||||
@ -877,7 +878,9 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
|
||||
pdata->hid_descriptor_address = obj->integer.value;
|
||||
ACPI_FREE(obj);
|
||||
|
||||
return acpi_dev_add_driver_gpios(adev, i2c_hid_acpi_gpios);
|
||||
/* GPIOs are optional */
|
||||
ret = acpi_dev_add_driver_gpios(adev, i2c_hid_acpi_gpios);
|
||||
return ret < 0 && ret != -ENXIO ? ret : 0;
|
||||
}
|
||||
|
||||
static const struct acpi_device_id i2c_hid_acpi_match[] = {
|
||||
|
@ -61,6 +61,7 @@ static const struct hid_blacklist {
|
||||
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
|
||||
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET },
|
||||
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET },
|
||||
{ USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS682, HID_QUIRK_NOGET },
|
||||
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FIGHTERSTICK, HID_QUIRK_NOGET },
|
||||
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_COMBATSTICK, HID_QUIRK_NOGET },
|
||||
{ USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE, HID_QUIRK_NOGET },
|
||||
|
@ -1072,6 +1072,9 @@ static int wacom_wac_finger_count_touches(struct wacom_wac *wacom)
|
||||
int count = 0;
|
||||
int i;
|
||||
|
||||
if (!touch_max)
|
||||
return 0;
|
||||
|
||||
/* non-HID_GENERIC single touch input doesn't call this routine */
|
||||
if ((touch_max == 1) && (wacom->features.type == HID_GENERIC))
|
||||
return wacom->hid_data.tipswitch &&
|
||||
|
@ -861,6 +861,7 @@ retest:
|
||||
cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
|
||||
break;
|
||||
case IB_CM_REQ_SENT:
|
||||
case IB_CM_MRA_REQ_RCVD:
|
||||
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
|
||||
spin_unlock_irq(&cm_id_priv->lock);
|
||||
ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
|
||||
@ -879,7 +880,6 @@ retest:
|
||||
NULL, 0, NULL, 0);
|
||||
}
|
||||
break;
|
||||
case IB_CM_MRA_REQ_RCVD:
|
||||
case IB_CM_REP_SENT:
|
||||
case IB_CM_MRA_REP_RCVD:
|
||||
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
|
||||
|
@ -845,18 +845,26 @@ static void cma_save_ib_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id
|
||||
listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr;
|
||||
ib = (struct sockaddr_ib *) &id->route.addr.src_addr;
|
||||
ib->sib_family = listen_ib->sib_family;
|
||||
ib->sib_pkey = path->pkey;
|
||||
ib->sib_flowinfo = path->flow_label;
|
||||
memcpy(&ib->sib_addr, &path->sgid, 16);
|
||||
if (path) {
|
||||
ib->sib_pkey = path->pkey;
|
||||
ib->sib_flowinfo = path->flow_label;
|
||||
memcpy(&ib->sib_addr, &path->sgid, 16);
|
||||
} else {
|
||||
ib->sib_pkey = listen_ib->sib_pkey;
|
||||
ib->sib_flowinfo = listen_ib->sib_flowinfo;
|
||||
ib->sib_addr = listen_ib->sib_addr;
|
||||
}
|
||||
ib->sib_sid = listen_ib->sib_sid;
|
||||
ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL);
|
||||
ib->sib_scope_id = listen_ib->sib_scope_id;
|
||||
|
||||
ib = (struct sockaddr_ib *) &id->route.addr.dst_addr;
|
||||
ib->sib_family = listen_ib->sib_family;
|
||||
ib->sib_pkey = path->pkey;
|
||||
ib->sib_flowinfo = path->flow_label;
|
||||
memcpy(&ib->sib_addr, &path->dgid, 16);
|
||||
if (path) {
|
||||
ib = (struct sockaddr_ib *) &id->route.addr.dst_addr;
|
||||
ib->sib_family = listen_ib->sib_family;
|
||||
ib->sib_pkey = path->pkey;
|
||||
ib->sib_flowinfo = path->flow_label;
|
||||
memcpy(&ib->sib_addr, &path->dgid, 16);
|
||||
}
|
||||
}
|
||||
|
||||
static __be16 ss_get_port(const struct sockaddr_storage *ss)
|
||||
@ -905,9 +913,11 @@ static int cma_save_net_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id
|
||||
{
|
||||
struct cma_hdr *hdr;
|
||||
|
||||
if ((listen_id->route.addr.src_addr.ss_family == AF_IB) &&
|
||||
(ib_event->event == IB_CM_REQ_RECEIVED)) {
|
||||
cma_save_ib_info(id, listen_id, ib_event->param.req_rcvd.primary_path);
|
||||
if (listen_id->route.addr.src_addr.ss_family == AF_IB) {
|
||||
if (ib_event->event == IB_CM_REQ_RECEIVED)
|
||||
cma_save_ib_info(id, listen_id, ib_event->param.req_rcvd.primary_path);
|
||||
else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED)
|
||||
cma_save_ib_info(id, listen_id, NULL);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -40,7 +40,7 @@
|
||||
#include <be_roce.h>
|
||||
#include "ocrdma_sli.h"
|
||||
|
||||
#define OCRDMA_ROCE_DRV_VERSION "10.4.205.0u"
|
||||
#define OCRDMA_ROCE_DRV_VERSION "10.6.0.0"
|
||||
|
||||
#define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver"
|
||||
#define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA"
|
||||
@ -515,6 +515,8 @@ static inline int ocrdma_resolve_dmac(struct ocrdma_dev *dev,
|
||||
memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
|
||||
if (rdma_is_multicast_addr(&in6))
|
||||
rdma_get_mcast_mac(&in6, mac_addr);
|
||||
else if (rdma_link_local_addr(&in6))
|
||||
rdma_get_ll_mac(&in6, mac_addr);
|
||||
else
|
||||
memcpy(mac_addr, ah_attr->dmac, ETH_ALEN);
|
||||
return 0;
|
||||
|
@ -56,7 +56,13 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
|
||||
vlan_tag = attr->vlan_id;
|
||||
if (!vlan_tag || (vlan_tag > 0xFFF))
|
||||
vlan_tag = dev->pvid;
|
||||
if (vlan_tag && (vlan_tag < 0x1000)) {
|
||||
if (vlan_tag || dev->pfc_state) {
|
||||
if (!vlan_tag) {
|
||||
pr_err("ocrdma%d:Using VLAN with PFC is recommended\n",
|
||||
dev->id);
|
||||
pr_err("ocrdma%d:Using VLAN 0 for this connection\n",
|
||||
dev->id);
|
||||
}
|
||||
eth.eth_type = cpu_to_be16(0x8100);
|
||||
eth.roce_eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
|
||||
vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT;
|
||||
@ -121,7 +127,9 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
|
||||
goto av_conf_err;
|
||||
}
|
||||
|
||||
if (pd->uctx) {
|
||||
if ((pd->uctx) &&
|
||||
(!rdma_is_multicast_addr((struct in6_addr *)attr->grh.dgid.raw)) &&
|
||||
(!rdma_link_local_addr((struct in6_addr *)attr->grh.dgid.raw))) {
|
||||
status = rdma_addr_find_dmac_by_grh(&sgid, &attr->grh.dgid,
|
||||
attr->dmac, &attr->vlan_id);
|
||||
if (status) {
|
||||
|
@ -933,12 +933,18 @@ static irqreturn_t ocrdma_irq_handler(int irq, void *handle)
|
||||
struct ocrdma_eqe eqe;
|
||||
struct ocrdma_eqe *ptr;
|
||||
u16 cq_id;
|
||||
u8 mcode;
|
||||
int budget = eq->cq_cnt;
|
||||
|
||||
do {
|
||||
ptr = ocrdma_get_eqe(eq);
|
||||
eqe = *ptr;
|
||||
ocrdma_le32_to_cpu(&eqe, sizeof(eqe));
|
||||
mcode = (eqe.id_valid & OCRDMA_EQE_MAJOR_CODE_MASK)
|
||||
>> OCRDMA_EQE_MAJOR_CODE_SHIFT;
|
||||
if (mcode == OCRDMA_MAJOR_CODE_SENTINAL)
|
||||
pr_err("EQ full on eqid = 0x%x, eqe = 0x%x\n",
|
||||
eq->q.id, eqe.id_valid);
|
||||
if ((eqe.id_valid & OCRDMA_EQE_VALID_MASK) == 0)
|
||||
break;
|
||||
|
||||
@ -1434,27 +1440,30 @@ static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
|
||||
struct ocrdma_alloc_pd_range_rsp *rsp;
|
||||
|
||||
/* Pre allocate the DPP PDs */
|
||||
cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd));
|
||||
if (!cmd)
|
||||
return -ENOMEM;
|
||||
cmd->pd_count = dev->attr.max_dpp_pds;
|
||||
cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP;
|
||||
status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
|
||||
if (status)
|
||||
goto mbx_err;
|
||||
rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
|
||||
if (dev->attr.max_dpp_pds) {
|
||||
cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE,
|
||||
sizeof(*cmd));
|
||||
if (!cmd)
|
||||
return -ENOMEM;
|
||||
cmd->pd_count = dev->attr.max_dpp_pds;
|
||||
cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP;
|
||||
status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
|
||||
rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
|
||||
|
||||
if ((rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) && rsp->pd_count) {
|
||||
dev->pd_mgr->dpp_page_index = rsp->dpp_page_pdid >>
|
||||
OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT;
|
||||
dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid &
|
||||
OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
|
||||
dev->pd_mgr->max_dpp_pd = rsp->pd_count;
|
||||
pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
|
||||
dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size,
|
||||
GFP_KERNEL);
|
||||
if (!status && (rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) &&
|
||||
rsp->pd_count) {
|
||||
dev->pd_mgr->dpp_page_index = rsp->dpp_page_pdid >>
|
||||
OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT;
|
||||
dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid &
|
||||
OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
|
||||
dev->pd_mgr->max_dpp_pd = rsp->pd_count;
|
||||
pd_bitmap_size =
|
||||
BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
|
||||
dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size,
|
||||
GFP_KERNEL);
|
||||
}
|
||||
kfree(cmd);
|
||||
}
|
||||
kfree(cmd);
|
||||
|
||||
cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd));
|
||||
if (!cmd)
|
||||
@ -1462,10 +1471,8 @@ static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
|
||||
|
||||
cmd->pd_count = dev->attr.max_pd - dev->attr.max_dpp_pds;
|
||||
status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
|
||||
if (status)
|
||||
goto mbx_err;
|
||||
rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
|
||||
if (rsp->pd_count) {
|
||||
if (!status && rsp->pd_count) {
|
||||
dev->pd_mgr->pd_norm_start = rsp->dpp_page_pdid &
|
||||
OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
|
||||
dev->pd_mgr->max_normal_pd = rsp->pd_count;
|
||||
@ -1473,15 +1480,13 @@ static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
|
||||
dev->pd_mgr->pd_norm_bitmap = kzalloc(pd_bitmap_size,
|
||||
GFP_KERNEL);
|
||||
}
|
||||
kfree(cmd);
|
||||
|
||||
if (dev->pd_mgr->pd_norm_bitmap || dev->pd_mgr->pd_dpp_bitmap) {
|
||||
/* Enable PD resource manager */
|
||||
dev->pd_mgr->pd_prealloc_valid = true;
|
||||
} else {
|
||||
return -ENOMEM;
|
||||
return 0;
|
||||
}
|
||||
mbx_err:
|
||||
kfree(cmd);
|
||||
return status;
|
||||
}
|
||||
|
||||
@ -2406,7 +2411,7 @@ int ocrdma_mbx_query_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
|
||||
struct ocrdma_query_qp *cmd;
|
||||
struct ocrdma_query_qp_rsp *rsp;
|
||||
|
||||
cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_QP, sizeof(*cmd));
|
||||
cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_QP, sizeof(*rsp));
|
||||
if (!cmd)
|
||||
return status;
|
||||
cmd->qp_id = qp->id;
|
||||
@ -2428,7 +2433,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
|
||||
int status;
|
||||
struct ib_ah_attr *ah_attr = &attrs->ah_attr;
|
||||
union ib_gid sgid, zgid;
|
||||
u32 vlan_id;
|
||||
u32 vlan_id = 0xFFFF;
|
||||
u8 mac_addr[6];
|
||||
struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
|
||||
|
||||
@ -2468,12 +2473,22 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
|
||||
cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8);
|
||||
if (attr_mask & IB_QP_VID) {
|
||||
vlan_id = attrs->vlan_id;
|
||||
} else if (dev->pfc_state) {
|
||||
vlan_id = 0;
|
||||
pr_err("ocrdma%d:Using VLAN with PFC is recommended\n",
|
||||
dev->id);
|
||||
pr_err("ocrdma%d:Using VLAN 0 for this connection\n",
|
||||
dev->id);
|
||||
}
|
||||
|
||||
if (vlan_id < 0x1000) {
|
||||
cmd->params.vlan_dmac_b4_to_b5 |=
|
||||
vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
|
||||
cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID;
|
||||
cmd->params.rnt_rc_sl_fl |=
|
||||
(dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2519,8 +2534,10 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
|
||||
cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID;
|
||||
}
|
||||
if (attr_mask & IB_QP_PATH_MTU) {
|
||||
if (attrs->path_mtu < IB_MTU_256 ||
|
||||
if (attrs->path_mtu < IB_MTU_512 ||
|
||||
attrs->path_mtu > IB_MTU_4096) {
|
||||
pr_err("ocrdma%d: IB MTU %d is not supported\n",
|
||||
dev->id, ib_mtu_enum_to_int(attrs->path_mtu));
|
||||
status = -EINVAL;
|
||||
goto pmtu_err;
|
||||
}
|
||||
@ -3147,9 +3164,9 @@ void ocrdma_cleanup_hw(struct ocrdma_dev *dev)
|
||||
ocrdma_free_pd_pool(dev);
|
||||
ocrdma_mbx_delete_ah_tbl(dev);
|
||||
|
||||
/* cleanup the eqs */
|
||||
ocrdma_destroy_eqs(dev);
|
||||
|
||||
/* cleanup the control path */
|
||||
ocrdma_destroy_mq(dev);
|
||||
|
||||
/* cleanup the eqs */
|
||||
ocrdma_destroy_eqs(dev);
|
||||
}
|
||||
|
@ -1176,6 +1176,8 @@ struct ocrdma_query_qp_rsp {
|
||||
struct ocrdma_mqe_hdr hdr;
|
||||
struct ocrdma_mbx_rsp rsp;
|
||||
struct ocrdma_qp_params params;
|
||||
u32 dpp_credits_cqid;
|
||||
u32 rbq_id;
|
||||
};
|
||||
|
||||
enum {
|
||||
@ -1624,12 +1626,19 @@ struct ocrdma_delete_ah_tbl_rsp {
|
||||
enum {
|
||||
OCRDMA_EQE_VALID_SHIFT = 0,
|
||||
OCRDMA_EQE_VALID_MASK = BIT(0),
|
||||
OCRDMA_EQE_MAJOR_CODE_MASK = 0x0E,
|
||||
OCRDMA_EQE_MAJOR_CODE_SHIFT = 0x01,
|
||||
OCRDMA_EQE_FOR_CQE_MASK = 0xFFFE,
|
||||
OCRDMA_EQE_RESOURCE_ID_SHIFT = 16,
|
||||
OCRDMA_EQE_RESOURCE_ID_MASK = 0xFFFF <<
|
||||
OCRDMA_EQE_RESOURCE_ID_SHIFT,
|
||||
};
|
||||
|
||||
enum major_code {
|
||||
OCRDMA_MAJOR_CODE_COMPLETION = 0x00,
|
||||
OCRDMA_MAJOR_CODE_SENTINAL = 0x01
|
||||
};
|
||||
|
||||
struct ocrdma_eqe {
|
||||
u32 id_valid;
|
||||
};
|
||||
|
@ -365,7 +365,7 @@ static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
|
||||
if (!pd)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
if (udata && uctx) {
|
||||
if (udata && uctx && dev->attr.max_dpp_pds) {
|
||||
pd->dpp_enabled =
|
||||
ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R;
|
||||
pd->num_dpp_qp =
|
||||
@ -1721,18 +1721,20 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
|
||||
struct ocrdma_qp *qp;
|
||||
struct ocrdma_dev *dev;
|
||||
struct ib_qp_attr attrs;
|
||||
int attr_mask = IB_QP_STATE;
|
||||
int attr_mask;
|
||||
unsigned long flags;
|
||||
|
||||
qp = get_ocrdma_qp(ibqp);
|
||||
dev = get_ocrdma_dev(ibqp->device);
|
||||
|
||||
attrs.qp_state = IB_QPS_ERR;
|
||||
pd = qp->pd;
|
||||
|
||||
/* change the QP state to ERROR */
|
||||
_ocrdma_modify_qp(ibqp, &attrs, attr_mask);
|
||||
|
||||
if (qp->state != OCRDMA_QPS_RST) {
|
||||
attrs.qp_state = IB_QPS_ERR;
|
||||
attr_mask = IB_QP_STATE;
|
||||
_ocrdma_modify_qp(ibqp, &attrs, attr_mask);
|
||||
}
|
||||
/* ensure that CQEs for newly created QP (whose id may be same with
|
||||
* one which just getting destroyed are same), dont get
|
||||
* discarded until the old CQEs are discarded.
|
||||
|
@ -747,6 +747,63 @@ static void joydev_cleanup(struct joydev *joydev)
|
||||
input_close_device(handle);
|
||||
}
|
||||
|
||||
static bool joydev_dev_is_absolute_mouse(struct input_dev *dev)
|
||||
{
|
||||
DECLARE_BITMAP(jd_scratch, KEY_CNT);
|
||||
|
||||
BUILD_BUG_ON(ABS_CNT > KEY_CNT || EV_CNT > KEY_CNT);
|
||||
|
||||
/*
|
||||
* Virtualization (VMware, etc) and remote management (HP
|
||||
* ILO2) solutions use absolute coordinates for their virtual
|
||||
* pointing devices so that there is one-to-one relationship
|
||||
* between pointer position on the host screen and virtual
|
||||
* guest screen, and so their mice use ABS_X, ABS_Y and 3
|
||||
* primary button events. This clashes with what joydev
|
||||
* considers to be joysticks (a device with at minimum ABS_X
|
||||
* axis).
|
||||
*
|
||||
* Here we are trying to separate absolute mice from
|
||||
* joysticks. A device is, for joystick detection purposes,
|
||||
* considered to be an absolute mouse if the following is
|
||||
* true:
|
||||
*
|
||||
* 1) Event types are exactly EV_ABS, EV_KEY and EV_SYN.
|
||||
* 2) Absolute events are exactly ABS_X and ABS_Y.
|
||||
* 3) Keys are exactly BTN_LEFT, BTN_RIGHT and BTN_MIDDLE.
|
||||
* 4) Device is not on "Amiga" bus.
|
||||
*/
|
||||
|
||||
bitmap_zero(jd_scratch, EV_CNT);
|
||||
__set_bit(EV_ABS, jd_scratch);
|
||||
__set_bit(EV_KEY, jd_scratch);
|
||||
__set_bit(EV_SYN, jd_scratch);
|
||||
if (!bitmap_equal(jd_scratch, dev->evbit, EV_CNT))
|
||||
return false;
|
||||
|
||||
bitmap_zero(jd_scratch, ABS_CNT);
|
||||
__set_bit(ABS_X, jd_scratch);
|
||||
__set_bit(ABS_Y, jd_scratch);
|
||||
if (!bitmap_equal(dev->absbit, jd_scratch, ABS_CNT))
|
||||
return false;
|
||||
|
||||
bitmap_zero(jd_scratch, KEY_CNT);
|
||||
__set_bit(BTN_LEFT, jd_scratch);
|
||||
__set_bit(BTN_RIGHT, jd_scratch);
|
||||
__set_bit(BTN_MIDDLE, jd_scratch);
|
||||
|
||||
if (!bitmap_equal(dev->keybit, jd_scratch, KEY_CNT))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* Amiga joystick (amijoy) historically uses left/middle/right
|
||||
* button events.
|
||||
*/
|
||||
if (dev->id.bustype == BUS_AMIGA)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool joydev_match(struct input_handler *handler, struct input_dev *dev)
|
||||
{
|
||||
@ -758,6 +815,10 @@ static bool joydev_match(struct input_handler *handler, struct input_dev *dev)
|
||||
if (test_bit(EV_KEY, dev->evbit) && test_bit(BTN_DIGI, dev->keybit))
|
||||
return false;
|
||||
|
||||
/* Avoid absolute mice */
|
||||
if (joydev_dev_is_absolute_mouse(dev))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -156,7 +156,7 @@ config MOUSE_PS2_VMMOUSE
|
||||
Say Y here if you are running under control of VMware hypervisor
|
||||
(ESXi, Workstation or Fusion). Also make sure that when you enable
|
||||
this option, you remove the xf86-input-vmmouse user-space driver
|
||||
or upgrade it to at least xf86-input-vmmouse 13.0.1, which doesn't
|
||||
or upgrade it to at least xf86-input-vmmouse 13.1.0, which doesn't
|
||||
load in the presence of an in-kernel vmmouse driver.
|
||||
|
||||
If unsure, say N.
|
||||
|
@ -941,6 +941,11 @@ static void alps_get_finger_coordinate_v7(struct input_mt_pos *mt,
|
||||
case V7_PACKET_ID_TWO:
|
||||
mt[1].x &= ~0x000F;
|
||||
mt[1].y |= 0x000F;
|
||||
/* Detect false-postive touches where x & y report max value */
|
||||
if (mt[1].y == 0x7ff && mt[1].x == 0xff0) {
|
||||
mt[1].x = 0;
|
||||
/* y gets set to 0 at the end of this function */
|
||||
}
|
||||
break;
|
||||
|
||||
case V7_PACKET_ID_MULTI:
|
||||
|
@ -315,7 +315,7 @@ static void elantech_report_semi_mt_data(struct input_dev *dev,
|
||||
unsigned int x2, unsigned int y2)
|
||||
{
|
||||
elantech_set_slot(dev, 0, num_fingers != 0, x1, y1);
|
||||
elantech_set_slot(dev, 1, num_fingers == 2, x2, y2);
|
||||
elantech_set_slot(dev, 1, num_fingers >= 2, x2, y2);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -164,7 +164,7 @@ static irqreturn_t stmpe_ts_handler(int irq, void *data)
|
||||
STMPE_TSC_CTRL_TSC_EN, STMPE_TSC_CTRL_TSC_EN);
|
||||
|
||||
/* start polling for touch_det to detect release */
|
||||
schedule_delayed_work(&ts->work, HZ / 50);
|
||||
schedule_delayed_work(&ts->work, msecs_to_jiffies(50));
|
||||
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
@ -187,7 +187,7 @@ static int sx8654_probe(struct i2c_client *client,
|
||||
return -ENOMEM;
|
||||
|
||||
input = devm_input_allocate_device(&client->dev);
|
||||
if (!sx8654)
|
||||
if (!input)
|
||||
return -ENOMEM;
|
||||
|
||||
input->name = "SX8654 I2C Touchscreen";
|
||||
|
@ -828,7 +828,14 @@ static int its_alloc_tables(struct its_node *its)
|
||||
u64 typer = readq_relaxed(its->base + GITS_TYPER);
|
||||
u32 ids = GITS_TYPER_DEVBITS(typer);
|
||||
|
||||
order = get_order((1UL << ids) * entry_size);
|
||||
/*
|
||||
* 'order' was initialized earlier to the default page
|
||||
* granule of the the ITS. We can't have an allocation
|
||||
* smaller than that. If the requested allocation
|
||||
* is smaller, round up to the default page granule.
|
||||
*/
|
||||
order = max(get_order((1UL << ids) * entry_size),
|
||||
order);
|
||||
if (order >= MAX_ORDER) {
|
||||
order = MAX_ORDER - 1;
|
||||
pr_warn("%s: Device Table too large, reduce its page order to %u\n",
|
||||
|
@ -177,11 +177,16 @@ static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mdde
|
||||
* nr_pending is 0 and In_sync is clear, the entries we return will
|
||||
* still be in the same position on the list when we re-enter
|
||||
* list_for_each_entry_continue_rcu.
|
||||
*
|
||||
* Note that if entered with 'rdev == NULL' to start at the
|
||||
* beginning, we temporarily assign 'rdev' to an address which
|
||||
* isn't really an rdev, but which can be used by
|
||||
* list_for_each_entry_continue_rcu() to find the first entry.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
if (rdev == NULL)
|
||||
/* start at the beginning */
|
||||
rdev = list_entry_rcu(&mddev->disks, struct md_rdev, same_set);
|
||||
rdev = list_entry(&mddev->disks, struct md_rdev, same_set);
|
||||
else {
|
||||
/* release the previous rdev and start from there. */
|
||||
rdev_dec_pending(rdev, mddev);
|
||||
|
@ -524,6 +524,9 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
|
||||
? (sector & (chunk_sects-1))
|
||||
: sector_div(sector, chunk_sects));
|
||||
|
||||
/* Restore due to sector_div */
|
||||
sector = bio->bi_iter.bi_sector;
|
||||
|
||||
if (sectors < bio_sectors(bio)) {
|
||||
split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set);
|
||||
bio_chain(split, bio);
|
||||
@ -531,7 +534,6 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
|
||||
split = bio;
|
||||
}
|
||||
|
||||
sector = bio->bi_iter.bi_sector;
|
||||
zone = find_zone(mddev->private, §or);
|
||||
tmp_dev = map_sector(mddev, zone, sector, §or);
|
||||
split->bi_bdev = tmp_dev->bdev;
|
||||
|
@ -1822,7 +1822,7 @@ again:
|
||||
} else
|
||||
init_async_submit(&submit, 0, tx, NULL, NULL,
|
||||
to_addr_conv(sh, percpu, j));
|
||||
async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
|
||||
tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit);
|
||||
if (!last_stripe) {
|
||||
j++;
|
||||
sh = list_first_entry(&sh->batch_list, struct stripe_head,
|
||||
|
@ -1304,7 +1304,7 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
||||
|
||||
if (ios->clock) {
|
||||
unsigned int clock_min = ~0U;
|
||||
u32 clkdiv;
|
||||
int clkdiv;
|
||||
|
||||
spin_lock_bh(&host->lock);
|
||||
if (!host->mode_reg) {
|
||||
@ -1328,7 +1328,12 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
|
||||
/* Calculate clock divider */
|
||||
if (host->caps.has_odd_clk_div) {
|
||||
clkdiv = DIV_ROUND_UP(host->bus_hz, clock_min) - 2;
|
||||
if (clkdiv > 511) {
|
||||
if (clkdiv < 0) {
|
||||
dev_warn(&mmc->class_dev,
|
||||
"clock %u too fast; using %lu\n",
|
||||
clock_min, host->bus_hz / 2);
|
||||
clkdiv = 0;
|
||||
} else if (clkdiv > 511) {
|
||||
dev_warn(&mmc->class_dev,
|
||||
"clock %u too slow; using %lu\n",
|
||||
clock_min, host->bus_hz / (511 + 2));
|
||||
|
@ -624,7 +624,7 @@ int __bond_opt_set(struct bonding *bond,
|
||||
out:
|
||||
if (ret)
|
||||
bond_opt_error_interpret(bond, opt, ret, val);
|
||||
else
|
||||
else if (bond->dev->reg_state == NETREG_REGISTERED)
|
||||
call_netdevice_notifiers(NETDEV_CHANGEINFODATA, bond->dev);
|
||||
|
||||
return ret;
|
||||
|
@ -350,6 +350,9 @@ static int macb_mii_probe(struct net_device *dev)
|
||||
else
|
||||
phydev->supported &= PHY_BASIC_FEATURES;
|
||||
|
||||
if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF)
|
||||
phydev->supported &= ~SUPPORTED_1000baseT_Half;
|
||||
|
||||
phydev->advertising = phydev->supported;
|
||||
|
||||
bp->link = 0;
|
||||
@ -1037,6 +1040,12 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
|
||||
* add that if/when we get our hands on a full-blown MII PHY.
|
||||
*/
|
||||
|
||||
/* There is a hardware issue under heavy load where DMA can
|
||||
* stop, this causes endless "used buffer descriptor read"
|
||||
* interrupts but it can be cleared by re-enabling RX. See
|
||||
* the at91 manual, section 41.3.1 or the Zynq manual
|
||||
* section 16.7.4 for details.
|
||||
*/
|
||||
if (status & MACB_BIT(RXUBR)) {
|
||||
ctrl = macb_readl(bp, NCR);
|
||||
macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
|
||||
@ -2693,6 +2702,14 @@ static const struct macb_config emac_config = {
|
||||
.init = at91ether_init,
|
||||
};
|
||||
|
||||
static const struct macb_config zynq_config = {
|
||||
.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
|
||||
MACB_CAPS_NO_GIGABIT_HALF,
|
||||
.dma_burst_length = 16,
|
||||
.clk_init = macb_clk_init,
|
||||
.init = macb_init,
|
||||
};
|
||||
|
||||
static const struct of_device_id macb_dt_ids[] = {
|
||||
{ .compatible = "cdns,at32ap7000-macb" },
|
||||
{ .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
|
||||
@ -2703,6 +2720,7 @@ static const struct of_device_id macb_dt_ids[] = {
|
||||
{ .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
|
||||
{ .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
|
||||
{ .compatible = "cdns,emac", .data = &emac_config },
|
||||
{ .compatible = "cdns,zynq-gem", .data = &zynq_config },
|
||||
{ /* sentinel */ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, macb_dt_ids);
|
||||
|
@ -393,6 +393,7 @@
|
||||
#define MACB_CAPS_ISR_CLEAR_ON_WRITE 0x00000001
|
||||
#define MACB_CAPS_USRIO_HAS_CLKEN 0x00000002
|
||||
#define MACB_CAPS_USRIO_DEFAULT_IS_MII 0x00000004
|
||||
#define MACB_CAPS_NO_GIGABIT_HALF 0x00000008
|
||||
#define MACB_CAPS_FIFO_MODE 0x10000000
|
||||
#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
|
||||
#define MACB_CAPS_SG_DISABLED 0x40000000
|
||||
|
@ -3187,7 +3187,7 @@ int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
|
||||
int cqn = vhcr->in_modifier;
|
||||
struct mlx4_cq_context *cqc = inbox->buf;
|
||||
int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
|
||||
struct res_cq *cq;
|
||||
struct res_cq *cq = NULL;
|
||||
struct res_mtt *mtt;
|
||||
|
||||
err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
|
||||
@ -3223,7 +3223,7 @@ int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
|
||||
{
|
||||
int err;
|
||||
int cqn = vhcr->in_modifier;
|
||||
struct res_cq *cq;
|
||||
struct res_cq *cq = NULL;
|
||||
|
||||
err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
|
||||
if (err)
|
||||
@ -3362,7 +3362,7 @@ int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
|
||||
int err;
|
||||
int srqn = vhcr->in_modifier;
|
||||
struct res_mtt *mtt;
|
||||
struct res_srq *srq;
|
||||
struct res_srq *srq = NULL;
|
||||
struct mlx4_srq_context *srqc = inbox->buf;
|
||||
int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
|
||||
|
||||
@ -3406,7 +3406,7 @@ int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
|
||||
{
|
||||
int err;
|
||||
int srqn = vhcr->in_modifier;
|
||||
struct res_srq *srq;
|
||||
struct res_srq *srq = NULL;
|
||||
|
||||
err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
|
||||
if (err)
|
||||
|
@ -2921,10 +2921,11 @@ static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
|
||||
struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
|
||||
int err = 0;
|
||||
|
||||
if (!n)
|
||||
if (!n) {
|
||||
n = neigh_create(&arp_tbl, &ip_addr, dev);
|
||||
if (!n)
|
||||
return -ENOMEM;
|
||||
if (IS_ERR(n))
|
||||
return IS_ERR(n);
|
||||
}
|
||||
|
||||
/* If the neigh is already resolved, then go ahead and
|
||||
* install the entry, otherwise start the ARP process to
|
||||
@ -2936,6 +2937,7 @@ static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
|
||||
else
|
||||
neigh_event_send(n, NULL);
|
||||
|
||||
neigh_release(n);
|
||||
return err;
|
||||
}
|
||||
|
||||
|
@ -742,6 +742,9 @@ EXPORT_SYMBOL(phy_stop);
|
||||
*/
|
||||
void phy_start(struct phy_device *phydev)
|
||||
{
|
||||
bool do_resume = false;
|
||||
int err = 0;
|
||||
|
||||
mutex_lock(&phydev->lock);
|
||||
|
||||
switch (phydev->state) {
|
||||
@ -752,11 +755,22 @@ void phy_start(struct phy_device *phydev)
|
||||
phydev->state = PHY_UP;
|
||||
break;
|
||||
case PHY_HALTED:
|
||||
/* make sure interrupts are re-enabled for the PHY */
|
||||
err = phy_enable_interrupts(phydev);
|
||||
if (err < 0)
|
||||
break;
|
||||
|
||||
phydev->state = PHY_RESUMING;
|
||||
do_resume = true;
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
mutex_unlock(&phydev->lock);
|
||||
|
||||
/* if phy was suspended, bring the physical link up again */
|
||||
if (do_resume)
|
||||
phy_resume(phydev);
|
||||
}
|
||||
EXPORT_SYMBOL(phy_start);
|
||||
|
||||
@ -769,7 +783,7 @@ void phy_state_machine(struct work_struct *work)
|
||||
struct delayed_work *dwork = to_delayed_work(work);
|
||||
struct phy_device *phydev =
|
||||
container_of(dwork, struct phy_device, state_queue);
|
||||
bool needs_aneg = false, do_suspend = false, do_resume = false;
|
||||
bool needs_aneg = false, do_suspend = false;
|
||||
int err = 0;
|
||||
|
||||
mutex_lock(&phydev->lock);
|
||||
@ -888,14 +902,6 @@ void phy_state_machine(struct work_struct *work)
|
||||
}
|
||||
break;
|
||||
case PHY_RESUMING:
|
||||
err = phy_clear_interrupt(phydev);
|
||||
if (err)
|
||||
break;
|
||||
|
||||
err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
|
||||
if (err)
|
||||
break;
|
||||
|
||||
if (AUTONEG_ENABLE == phydev->autoneg) {
|
||||
err = phy_aneg_done(phydev);
|
||||
if (err < 0)
|
||||
@ -933,7 +939,6 @@ void phy_state_machine(struct work_struct *work)
|
||||
}
|
||||
phydev->adjust_link(phydev->attached_dev);
|
||||
}
|
||||
do_resume = true;
|
||||
break;
|
||||
}
|
||||
|
||||
@ -943,8 +948,6 @@ void phy_state_machine(struct work_struct *work)
|
||||
err = phy_start_aneg(phydev);
|
||||
else if (do_suspend)
|
||||
phy_suspend(phydev);
|
||||
else if (do_resume)
|
||||
phy_resume(phydev);
|
||||
|
||||
if (err < 0)
|
||||
phy_error(phydev);
|
||||
@ -1053,13 +1056,14 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
|
||||
{
|
||||
/* According to 802.3az,the EEE is supported only in full duplex-mode.
|
||||
* Also EEE feature is active when core is operating with MII, GMII
|
||||
* or RGMII. Internal PHYs are also allowed to proceed and should
|
||||
* return an error if they do not support EEE.
|
||||
* or RGMII (all kinds). Internal PHYs are also allowed to proceed and
|
||||
* should return an error if they do not support EEE.
|
||||
*/
|
||||
if ((phydev->duplex == DUPLEX_FULL) &&
|
||||
((phydev->interface == PHY_INTERFACE_MODE_MII) ||
|
||||
(phydev->interface == PHY_INTERFACE_MODE_GMII) ||
|
||||
(phydev->interface == PHY_INTERFACE_MODE_RGMII) ||
|
||||
(phydev->interface >= PHY_INTERFACE_MODE_RGMII &&
|
||||
phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID) ||
|
||||
phy_is_internal(phydev))) {
|
||||
int eee_lp, eee_cap, eee_adv;
|
||||
u32 lp, cap, adv;
|
||||
|
@ -1182,7 +1182,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
|
||||
* payload data instead.
|
||||
*/
|
||||
usbnet_set_skb_tx_stats(skb_out, n,
|
||||
ctx->tx_curr_frame_payload - skb_out->len);
|
||||
(long)ctx->tx_curr_frame_payload - skb_out->len);
|
||||
|
||||
return skb_out;
|
||||
|
||||
|
@ -2961,7 +2961,7 @@ static void __net_exit vxlan_exit_net(struct net *net)
|
||||
* to the list by the previous loop.
|
||||
*/
|
||||
if (!net_eq(dev_net(vxlan->dev), net))
|
||||
unregister_netdevice_queue(dev, &list);
|
||||
unregister_netdevice_queue(vxlan->dev, &list);
|
||||
}
|
||||
|
||||
unregister_netdevice_many(&list);
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include <linux/mfd/syscon.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/pwm.h>
|
||||
#include <linux/regmap.h>
|
||||
@ -38,7 +39,22 @@
|
||||
#define PERIP_PWM_PDM_CONTROL_CH_MASK 0x1
|
||||
#define PERIP_PWM_PDM_CONTROL_CH_SHIFT(ch) ((ch) * 4)
|
||||
|
||||
#define MAX_TMBASE_STEPS 65536
|
||||
/*
|
||||
* PWM period is specified with a timebase register,
|
||||
* in number of step periods. The PWM duty cycle is also
|
||||
* specified in step periods, in the [0, $timebase] range.
|
||||
* In other words, the timebase imposes the duty cycle
|
||||
* resolution. Therefore, let's constraint the timebase to
|
||||
* a minimum value to allow a sane range of duty cycle values.
|
||||
* Imposing a minimum timebase, will impose a maximum PWM frequency.
|
||||
*
|
||||
* The value chosen is completely arbitrary.
|
||||
*/
|
||||
#define MIN_TMBASE_STEPS 16
|
||||
|
||||
struct img_pwm_soc_data {
|
||||
u32 max_timebase;
|
||||
};
|
||||
|
||||
struct img_pwm_chip {
|
||||
struct device *dev;
|
||||
@ -47,6 +63,9 @@ struct img_pwm_chip {
|
||||
struct clk *sys_clk;
|
||||
void __iomem *base;
|
||||
struct regmap *periph_regs;
|
||||
int max_period_ns;
|
||||
int min_period_ns;
|
||||
const struct img_pwm_soc_data *data;
|
||||
};
|
||||
|
||||
static inline struct img_pwm_chip *to_img_pwm_chip(struct pwm_chip *chip)
|
||||
@ -72,24 +91,31 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
|
||||
u32 val, div, duty, timebase;
|
||||
unsigned long mul, output_clk_hz, input_clk_hz;
|
||||
struct img_pwm_chip *pwm_chip = to_img_pwm_chip(chip);
|
||||
unsigned int max_timebase = pwm_chip->data->max_timebase;
|
||||
|
||||
if (period_ns < pwm_chip->min_period_ns ||
|
||||
period_ns > pwm_chip->max_period_ns) {
|
||||
dev_err(chip->dev, "configured period not in range\n");
|
||||
return -ERANGE;
|
||||
}
|
||||
|
||||
input_clk_hz = clk_get_rate(pwm_chip->pwm_clk);
|
||||
output_clk_hz = DIV_ROUND_UP(NSEC_PER_SEC, period_ns);
|
||||
|
||||
mul = DIV_ROUND_UP(input_clk_hz, output_clk_hz);
|
||||
if (mul <= MAX_TMBASE_STEPS) {
|
||||
if (mul <= max_timebase) {
|
||||
div = PWM_CTRL_CFG_NO_SUB_DIV;
|
||||
timebase = DIV_ROUND_UP(mul, 1);
|
||||
} else if (mul <= MAX_TMBASE_STEPS * 8) {
|
||||
} else if (mul <= max_timebase * 8) {
|
||||
div = PWM_CTRL_CFG_SUB_DIV0;
|
||||
timebase = DIV_ROUND_UP(mul, 8);
|
||||
} else if (mul <= MAX_TMBASE_STEPS * 64) {
|
||||
} else if (mul <= max_timebase * 64) {
|
||||
div = PWM_CTRL_CFG_SUB_DIV1;
|
||||
timebase = DIV_ROUND_UP(mul, 64);
|
||||
} else if (mul <= MAX_TMBASE_STEPS * 512) {
|
||||
} else if (mul <= max_timebase * 512) {
|
||||
div = PWM_CTRL_CFG_SUB_DIV0_DIV1;
|
||||
timebase = DIV_ROUND_UP(mul, 512);
|
||||
} else if (mul > MAX_TMBASE_STEPS * 512) {
|
||||
} else if (mul > max_timebase * 512) {
|
||||
dev_err(chip->dev,
|
||||
"failed to configure timebase steps/divider value\n");
|
||||
return -EINVAL;
|
||||
@ -143,11 +169,27 @@ static const struct pwm_ops img_pwm_ops = {
|
||||
.owner = THIS_MODULE,
|
||||
};
|
||||
|
||||
static const struct img_pwm_soc_data pistachio_pwm = {
|
||||
.max_timebase = 255,
|
||||
};
|
||||
|
||||
static const struct of_device_id img_pwm_of_match[] = {
|
||||
{
|
||||
.compatible = "img,pistachio-pwm",
|
||||
.data = &pistachio_pwm,
|
||||
},
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, img_pwm_of_match);
|
||||
|
||||
static int img_pwm_probe(struct platform_device *pdev)
|
||||
{
|
||||
int ret;
|
||||
u64 val;
|
||||
unsigned long clk_rate;
|
||||
struct resource *res;
|
||||
struct img_pwm_chip *pwm;
|
||||
const struct of_device_id *of_dev_id;
|
||||
|
||||
pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
|
||||
if (!pwm)
|
||||
@ -160,6 +202,11 @@ static int img_pwm_probe(struct platform_device *pdev)
|
||||
if (IS_ERR(pwm->base))
|
||||
return PTR_ERR(pwm->base);
|
||||
|
||||
of_dev_id = of_match_device(img_pwm_of_match, &pdev->dev);
|
||||
if (!of_dev_id)
|
||||
return -ENODEV;
|
||||
pwm->data = of_dev_id->data;
|
||||
|
||||
pwm->periph_regs = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
|
||||
"img,cr-periph");
|
||||
if (IS_ERR(pwm->periph_regs))
|
||||
@ -189,6 +236,17 @@ static int img_pwm_probe(struct platform_device *pdev)
|
||||
goto disable_sysclk;
|
||||
}
|
||||
|
||||
clk_rate = clk_get_rate(pwm->pwm_clk);
|
||||
|
||||
/* The maximum input clock divider is 512 */
|
||||
val = (u64)NSEC_PER_SEC * 512 * pwm->data->max_timebase;
|
||||
do_div(val, clk_rate);
|
||||
pwm->max_period_ns = val;
|
||||
|
||||
val = (u64)NSEC_PER_SEC * MIN_TMBASE_STEPS;
|
||||
do_div(val, clk_rate);
|
||||
pwm->min_period_ns = val;
|
||||
|
||||
pwm->chip.dev = &pdev->dev;
|
||||
pwm->chip.ops = &img_pwm_ops;
|
||||
pwm->chip.base = -1;
|
||||
@ -228,12 +286,6 @@ static int img_pwm_remove(struct platform_device *pdev)
|
||||
return pwmchip_remove(&pwm_chip->chip);
|
||||
}
|
||||
|
||||
static const struct of_device_id img_pwm_of_match[] = {
|
||||
{ .compatible = "img,pistachio-pwm", },
|
||||
{ }
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, img_pwm_of_match);
|
||||
|
||||
static struct platform_driver img_pwm_driver = {
|
||||
.driver = {
|
||||
.name = "img-pwm",
|
||||
|
@ -1158,11 +1158,12 @@ static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
|
||||
poll_timeout = time;
|
||||
hr_time = ktime_set(0, poll_timeout);
|
||||
|
||||
if (!hrtimer_is_queued(&ap_poll_timer) ||
|
||||
!hrtimer_forward(&ap_poll_timer, hrtimer_get_expires(&ap_poll_timer), hr_time)) {
|
||||
hrtimer_set_expires(&ap_poll_timer, hr_time);
|
||||
hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS);
|
||||
}
|
||||
spin_lock_bh(&ap_poll_timer_lock);
|
||||
hrtimer_cancel(&ap_poll_timer);
|
||||
hrtimer_set_expires(&ap_poll_timer, hr_time);
|
||||
hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS);
|
||||
spin_unlock_bh(&ap_poll_timer_lock);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
@ -1528,14 +1529,11 @@ static inline void __ap_schedule_poll_timer(void)
|
||||
ktime_t hr_time;
|
||||
|
||||
spin_lock_bh(&ap_poll_timer_lock);
|
||||
if (hrtimer_is_queued(&ap_poll_timer) || ap_suspend_flag)
|
||||
goto out;
|
||||
if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) {
|
||||
if (!hrtimer_is_queued(&ap_poll_timer) && !ap_suspend_flag) {
|
||||
hr_time = ktime_set(0, poll_timeout);
|
||||
hrtimer_forward_now(&ap_poll_timer, hr_time);
|
||||
hrtimer_restart(&ap_poll_timer);
|
||||
}
|
||||
out:
|
||||
spin_unlock_bh(&ap_poll_timer_lock);
|
||||
}
|
||||
|
||||
@ -1952,7 +1950,7 @@ static void ap_reset_domain(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (ap_domain_index != -1)
|
||||
if ((ap_domain_index != -1) && (ap_test_config_domain(ap_domain_index)))
|
||||
for (i = 0; i < AP_DEVICES; i++)
|
||||
ap_reset_queue(AP_MKQID(i, ap_domain_index));
|
||||
}
|
||||
@ -2097,7 +2095,6 @@ void ap_module_exit(void)
|
||||
hrtimer_cancel(&ap_poll_timer);
|
||||
destroy_workqueue(ap_work_queue);
|
||||
tasklet_kill(&ap_tasklet);
|
||||
root_device_unregister(ap_root_device);
|
||||
while ((dev = bus_find_device(&ap_bus_type, NULL, NULL,
|
||||
__ap_match_all)))
|
||||
{
|
||||
@ -2106,6 +2103,7 @@ void ap_module_exit(void)
|
||||
}
|
||||
for (i = 0; ap_bus_attrs[i]; i++)
|
||||
bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
|
||||
root_device_unregister(ap_root_device);
|
||||
bus_unregister(&ap_bus_type);
|
||||
unregister_reset_call(&ap_reset_call);
|
||||
if (ap_using_interrupts())
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user