Merge branch 'fixes' into next
Merge our fixes branch, in particular to bring in the change to arch/powerpc/boot/Makefile, which is depended upon by a subsequent series.
This commit is contained in:
commit
4b668b3202
@ -906,11 +906,17 @@ config DATA_SHIFT
|
||||
|
||||
config ARCH_FORCE_MAX_ORDER
|
||||
int "Order of maximal physically contiguous allocations"
|
||||
range 7 8 if PPC64 && PPC_64K_PAGES
|
||||
default "8" if PPC64 && PPC_64K_PAGES
|
||||
range 12 12 if PPC64 && !PPC_64K_PAGES
|
||||
default "12" if PPC64 && !PPC_64K_PAGES
|
||||
range 8 10 if PPC32 && PPC_16K_PAGES
|
||||
default "8" if PPC32 && PPC_16K_PAGES
|
||||
range 6 10 if PPC32 && PPC_64K_PAGES
|
||||
default "6" if PPC32 && PPC_64K_PAGES
|
||||
range 4 10 if PPC32 && PPC_256K_PAGES
|
||||
default "4" if PPC32 && PPC_256K_PAGES
|
||||
range 10 10
|
||||
default "10"
|
||||
help
|
||||
The kernel page allocator limits the size of maximal physically
|
||||
|
@ -34,8 +34,6 @@ endif
|
||||
|
||||
BOOTCFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
|
||||
-fno-strict-aliasing -O2 -msoft-float -mno-altivec -mno-vsx \
|
||||
$(call cc-option,-mno-prefixed) $(call cc-option,-mno-pcrel) \
|
||||
$(call cc-option,-mno-mma) \
|
||||
$(call cc-option,-mno-spe) $(call cc-option,-mspe=no) \
|
||||
-pipe -fomit-frame-pointer -fno-builtin -fPIC -nostdinc \
|
||||
$(LINUXINCLUDE)
|
||||
@ -71,6 +69,10 @@ BOOTAFLAGS := -D__ASSEMBLY__ $(BOOTCFLAGS) -nostdinc
|
||||
|
||||
BOOTARFLAGS := -crD
|
||||
|
||||
BOOTCFLAGS += $(call cc-option,-mno-prefixed) \
|
||||
$(call cc-option,-mno-pcrel) \
|
||||
$(call cc-option,-mno-mma)
|
||||
|
||||
ifdef CONFIG_CC_IS_CLANG
|
||||
BOOTCFLAGS += $(CLANG_FLAGS)
|
||||
BOOTAFLAGS += $(CLANG_FLAGS)
|
||||
|
@ -96,7 +96,7 @@ config CRYPTO_AES_PPC_SPE
|
||||
|
||||
config CRYPTO_AES_GCM_P10
|
||||
tristate "Stitched AES/GCM acceleration support on P10 or later CPU (PPC)"
|
||||
depends on PPC64 && CPU_LITTLE_ENDIAN
|
||||
depends on PPC64 && CPU_LITTLE_ENDIAN && VSX
|
||||
select CRYPTO_LIB_AES
|
||||
select CRYPTO_ALGAPI
|
||||
select CRYPTO_AEAD
|
||||
|
@ -22,15 +22,15 @@ sha1-ppc-spe-y := sha1-spe-asm.o sha1-spe-glue.o
|
||||
sha256-ppc-spe-y := sha256-spe-asm.o sha256-spe-glue.o
|
||||
crc32c-vpmsum-y := crc32c-vpmsum_asm.o crc32c-vpmsum_glue.o
|
||||
crct10dif-vpmsum-y := crct10dif-vpmsum_asm.o crct10dif-vpmsum_glue.o
|
||||
aes-gcm-p10-crypto-y := aes-gcm-p10-glue.o aes-gcm-p10.o ghashp8-ppc.o aesp8-ppc.o
|
||||
aes-gcm-p10-crypto-y := aes-gcm-p10-glue.o aes-gcm-p10.o ghashp10-ppc.o aesp10-ppc.o
|
||||
|
||||
quiet_cmd_perl = PERL $@
|
||||
cmd_perl = $(PERL) $< $(if $(CONFIG_CPU_LITTLE_ENDIAN), linux-ppc64le, linux-ppc64) > $@
|
||||
|
||||
targets += aesp8-ppc.S ghashp8-ppc.S
|
||||
targets += aesp10-ppc.S ghashp10-ppc.S
|
||||
|
||||
$(obj)/aesp8-ppc.S $(obj)/ghashp8-ppc.S: $(obj)/%.S: $(src)/%.pl FORCE
|
||||
$(obj)/aesp10-ppc.S $(obj)/ghashp10-ppc.S: $(obj)/%.S: $(src)/%.pl FORCE
|
||||
$(call if_changed,perl)
|
||||
|
||||
OBJECT_FILES_NON_STANDARD_aesp8-ppc.o := y
|
||||
OBJECT_FILES_NON_STANDARD_ghashp8-ppc.o := y
|
||||
OBJECT_FILES_NON_STANDARD_aesp10-ppc.o := y
|
||||
OBJECT_FILES_NON_STANDARD_ghashp10-ppc.o := y
|
||||
|
@ -30,15 +30,15 @@ MODULE_AUTHOR("Danny Tsen <dtsen@linux.ibm.com");
|
||||
MODULE_LICENSE("GPL v2");
|
||||
MODULE_ALIAS_CRYPTO("aes");
|
||||
|
||||
asmlinkage int aes_p8_set_encrypt_key(const u8 *userKey, const int bits,
|
||||
asmlinkage int aes_p10_set_encrypt_key(const u8 *userKey, const int bits,
|
||||
void *key);
|
||||
asmlinkage void aes_p8_encrypt(const u8 *in, u8 *out, const void *key);
|
||||
asmlinkage void aes_p10_encrypt(const u8 *in, u8 *out, const void *key);
|
||||
asmlinkage void aes_p10_gcm_encrypt(u8 *in, u8 *out, size_t len,
|
||||
void *rkey, u8 *iv, void *Xi);
|
||||
asmlinkage void aes_p10_gcm_decrypt(u8 *in, u8 *out, size_t len,
|
||||
void *rkey, u8 *iv, void *Xi);
|
||||
asmlinkage void gcm_init_htable(unsigned char htable[256], unsigned char Xi[16]);
|
||||
asmlinkage void gcm_ghash_p8(unsigned char *Xi, unsigned char *Htable,
|
||||
asmlinkage void gcm_ghash_p10(unsigned char *Xi, unsigned char *Htable,
|
||||
unsigned char *aad, unsigned int alen);
|
||||
|
||||
struct aes_key {
|
||||
@ -93,7 +93,7 @@ static void set_aad(struct gcm_ctx *gctx, struct Hash_ctx *hash,
|
||||
gctx->aadLen = alen;
|
||||
i = alen & ~0xf;
|
||||
if (i) {
|
||||
gcm_ghash_p8(nXi, hash->Htable+32, aad, i);
|
||||
gcm_ghash_p10(nXi, hash->Htable+32, aad, i);
|
||||
aad += i;
|
||||
alen -= i;
|
||||
}
|
||||
@ -102,7 +102,7 @@ static void set_aad(struct gcm_ctx *gctx, struct Hash_ctx *hash,
|
||||
nXi[i] ^= aad[i];
|
||||
|
||||
memset(gctx->aad_hash, 0, 16);
|
||||
gcm_ghash_p8(gctx->aad_hash, hash->Htable+32, nXi, 16);
|
||||
gcm_ghash_p10(gctx->aad_hash, hash->Htable+32, nXi, 16);
|
||||
} else {
|
||||
memcpy(gctx->aad_hash, nXi, 16);
|
||||
}
|
||||
@ -115,7 +115,7 @@ static void gcmp10_init(struct gcm_ctx *gctx, u8 *iv, unsigned char *rdkey,
|
||||
{
|
||||
__be32 counter = cpu_to_be32(1);
|
||||
|
||||
aes_p8_encrypt(hash->H, hash->H, rdkey);
|
||||
aes_p10_encrypt(hash->H, hash->H, rdkey);
|
||||
set_subkey(hash->H);
|
||||
gcm_init_htable(hash->Htable+32, hash->H);
|
||||
|
||||
@ -126,7 +126,7 @@ static void gcmp10_init(struct gcm_ctx *gctx, u8 *iv, unsigned char *rdkey,
|
||||
/*
|
||||
* Encrypt counter vector as iv tag and increment counter.
|
||||
*/
|
||||
aes_p8_encrypt(iv, gctx->ivtag, rdkey);
|
||||
aes_p10_encrypt(iv, gctx->ivtag, rdkey);
|
||||
|
||||
counter = cpu_to_be32(2);
|
||||
*((__be32 *)(iv+12)) = counter;
|
||||
@ -160,7 +160,7 @@ static void finish_tag(struct gcm_ctx *gctx, struct Hash_ctx *hash, int len)
|
||||
/*
|
||||
* hash (AAD len and len)
|
||||
*/
|
||||
gcm_ghash_p8(hash->Htable, hash->Htable+32, aclen, 16);
|
||||
gcm_ghash_p10(hash->Htable, hash->Htable+32, aclen, 16);
|
||||
|
||||
for (i = 0; i < 16; i++)
|
||||
hash->Htable[i] ^= gctx->ivtag[i];
|
||||
@ -192,7 +192,7 @@ static int p10_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key,
|
||||
int ret;
|
||||
|
||||
vsx_begin();
|
||||
ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
|
||||
ret = aes_p10_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
|
||||
vsx_end();
|
||||
|
||||
return ret ? -EINVAL : 0;
|
||||
|
@ -110,7 +110,7 @@ die "can't locate ppc-xlate.pl";
|
||||
open STDOUT,"| $^X $xlate $flavour ".shift || die "can't call $xlate: $!";
|
||||
|
||||
$FRAME=8*$SIZE_T;
|
||||
$prefix="aes_p8";
|
||||
$prefix="aes_p10";
|
||||
|
||||
$sp="r1";
|
||||
$vrsave="r12";
|
@ -64,7 +64,7 @@ $code=<<___;
|
||||
|
||||
.text
|
||||
|
||||
.globl .gcm_init_p8
|
||||
.globl .gcm_init_p10
|
||||
lis r0,0xfff0
|
||||
li r8,0x10
|
||||
mfspr $vrsave,256
|
||||
@ -110,7 +110,7 @@ $code=<<___;
|
||||
.long 0
|
||||
.byte 0,12,0x14,0,0,0,2,0
|
||||
.long 0
|
||||
.size .gcm_init_p8,.-.gcm_init_p8
|
||||
.size .gcm_init_p10,.-.gcm_init_p10
|
||||
|
||||
.globl .gcm_init_htable
|
||||
lis r0,0xfff0
|
||||
@ -237,7 +237,7 @@ $code=<<___;
|
||||
.long 0
|
||||
.size .gcm_init_htable,.-.gcm_init_htable
|
||||
|
||||
.globl .gcm_gmult_p8
|
||||
.globl .gcm_gmult_p10
|
||||
lis r0,0xfff8
|
||||
li r8,0x10
|
||||
mfspr $vrsave,256
|
||||
@ -283,9 +283,9 @@ $code=<<___;
|
||||
.long 0
|
||||
.byte 0,12,0x14,0,0,0,2,0
|
||||
.long 0
|
||||
.size .gcm_gmult_p8,.-.gcm_gmult_p8
|
||||
.size .gcm_gmult_p10,.-.gcm_gmult_p10
|
||||
|
||||
.globl .gcm_ghash_p8
|
||||
.globl .gcm_ghash_p10
|
||||
lis r0,0xfff8
|
||||
li r8,0x10
|
||||
mfspr $vrsave,256
|
||||
@ -350,7 +350,7 @@ Loop:
|
||||
.long 0
|
||||
.byte 0,12,0x14,0,0,0,4,0
|
||||
.long 0
|
||||
.size .gcm_ghash_p8,.-.gcm_ghash_p8
|
||||
.size .gcm_ghash_p10,.-.gcm_ghash_p10
|
||||
|
||||
.asciz "GHASH for PowerISA 2.07, CRYPTOGAMS by <appro\@openssl.org>"
|
||||
.align 2
|
@ -205,7 +205,6 @@ extern void iommu_register_group(struct iommu_table_group *table_group,
|
||||
int pci_domain_number, unsigned long pe_num);
|
||||
extern int iommu_add_device(struct iommu_table_group *table_group,
|
||||
struct device *dev);
|
||||
extern void iommu_del_device(struct device *dev);
|
||||
extern long iommu_tce_xchg(struct mm_struct *mm, struct iommu_table *tbl,
|
||||
unsigned long entry, unsigned long *hpa,
|
||||
enum dma_data_direction *direction);
|
||||
@ -229,10 +228,6 @@ static inline int iommu_add_device(struct iommu_table_group *table_group,
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void iommu_del_device(struct device *dev)
|
||||
{
|
||||
}
|
||||
#endif /* !CONFIG_IOMMU_API */
|
||||
|
||||
u64 dma_iommu_get_required_mask(struct device *dev);
|
||||
|
@ -144,7 +144,7 @@ static bool dma_iommu_bypass_supported(struct device *dev, u64 mask)
|
||||
/* We support DMA to/from any memory page via the iommu */
|
||||
int dma_iommu_dma_supported(struct device *dev, u64 mask)
|
||||
{
|
||||
struct iommu_table *tbl = get_iommu_table_base(dev);
|
||||
struct iommu_table *tbl;
|
||||
|
||||
if (dev_is_pci(dev) && dma_iommu_bypass_supported(dev, mask)) {
|
||||
/*
|
||||
@ -162,6 +162,8 @@ int dma_iommu_dma_supported(struct device *dev, u64 mask)
|
||||
return 1;
|
||||
}
|
||||
|
||||
tbl = get_iommu_table_base(dev);
|
||||
|
||||
if (!tbl) {
|
||||
dev_err(dev, "Warning: IOMMU dma not supported: mask 0x%08llx, table unavailable\n", mask);
|
||||
return 0;
|
||||
|
@ -518,7 +518,7 @@ int ppc_iommu_map_sg(struct device *dev, struct iommu_table *tbl,
|
||||
/* Convert entry to a dma_addr_t */
|
||||
entry += tbl->it_offset;
|
||||
dma_addr = entry << tbl->it_page_shift;
|
||||
dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl));
|
||||
dma_addr |= (vaddr & ~IOMMU_PAGE_MASK(tbl));
|
||||
|
||||
DBG(" - %lu pages, entry: %lx, dma_addr: %lx\n",
|
||||
npages, entry, dma_addr);
|
||||
@ -905,6 +905,7 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
|
||||
unsigned int order;
|
||||
unsigned int nio_pages, io_order;
|
||||
struct page *page;
|
||||
int tcesize = (1 << tbl->it_page_shift);
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
order = get_order(size);
|
||||
@ -931,7 +932,8 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
|
||||
memset(ret, 0, size);
|
||||
|
||||
/* Set up tces to cover the allocated range */
|
||||
nio_pages = size >> tbl->it_page_shift;
|
||||
nio_pages = IOMMU_PAGE_ALIGN(size, tbl) >> tbl->it_page_shift;
|
||||
|
||||
io_order = get_iommu_order(size, tbl);
|
||||
mapping = iommu_alloc(dev, tbl, ret, nio_pages, DMA_BIDIRECTIONAL,
|
||||
mask >> tbl->it_page_shift, io_order, 0);
|
||||
@ -939,7 +941,8 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl,
|
||||
free_pages((unsigned long)ret, order);
|
||||
return NULL;
|
||||
}
|
||||
*dma_handle = mapping;
|
||||
|
||||
*dma_handle = mapping | ((u64)ret & (tcesize - 1));
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -950,7 +953,7 @@ void iommu_free_coherent(struct iommu_table *tbl, size_t size,
|
||||
unsigned int nio_pages;
|
||||
|
||||
size = PAGE_ALIGN(size);
|
||||
nio_pages = size >> tbl->it_page_shift;
|
||||
nio_pages = IOMMU_PAGE_ALIGN(size, tbl) >> tbl->it_page_shift;
|
||||
iommu_free(tbl, dma_handle, nio_pages);
|
||||
size = PAGE_ALIGN(size);
|
||||
free_pages((unsigned long)vaddr, get_order(size));
|
||||
@ -1168,23 +1171,6 @@ int iommu_add_device(struct iommu_table_group *table_group, struct device *dev)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_add_device);
|
||||
|
||||
void iommu_del_device(struct device *dev)
|
||||
{
|
||||
/*
|
||||
* Some devices might not have IOMMU table and group
|
||||
* and we needn't detach them from the associated
|
||||
* IOMMU groups
|
||||
*/
|
||||
if (!device_iommu_mapped(dev)) {
|
||||
pr_debug("iommu_tce: skipping device %s with no tbl\n",
|
||||
dev_name(dev));
|
||||
return;
|
||||
}
|
||||
|
||||
iommu_group_remove_device(dev);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(iommu_del_device);
|
||||
|
||||
/*
|
||||
* A simple iommu_table_group_ops which only allows reusing the existing
|
||||
* iommu_table. This handles VFIO for POWER7 or the nested KVM.
|
||||
|
@ -93,11 +93,12 @@ static int process_ISA_OF_ranges(struct device_node *isa_node,
|
||||
}
|
||||
|
||||
inval_range:
|
||||
if (!phb_io_base_phys) {
|
||||
if (phb_io_base_phys) {
|
||||
pr_err("no ISA IO ranges or unexpected isa range, mapping 64k\n");
|
||||
remap_isa_base(phb_io_base_phys, 0x10000);
|
||||
return 0;
|
||||
}
|
||||
return 0;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
||||
|
@ -1040,8 +1040,8 @@ void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
|
||||
pte_t entry, unsigned long address, int psize)
|
||||
{
|
||||
struct mm_struct *mm = vma->vm_mm;
|
||||
unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED |
|
||||
_PAGE_RW | _PAGE_EXEC);
|
||||
unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_SOFT_DIRTY |
|
||||
_PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
|
||||
|
||||
unsigned long change = pte_val(entry) ^ pte_val(*ptep);
|
||||
/*
|
||||
|
@ -101,6 +101,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
|
||||
bpf_hdr = jit_data->header;
|
||||
proglen = jit_data->proglen;
|
||||
extra_pass = true;
|
||||
/* During extra pass, ensure index is reset before repopulating extable entries */
|
||||
cgctx.exentry_idx = 0;
|
||||
goto skip_init_ctx;
|
||||
}
|
||||
|
||||
|
@ -265,6 +265,7 @@ config CPM2
|
||||
config FSL_ULI1575
|
||||
bool "ULI1575 PCIe south bridge support"
|
||||
depends on FSL_SOC_BOOKE || PPC_86xx
|
||||
depends on PCI
|
||||
select FSL_PCI
|
||||
select GENERIC_ISA_DMA
|
||||
help
|
||||
|
@ -865,28 +865,3 @@ void __init pnv_pci_init(void)
|
||||
/* Configure IOMMU DMA hooks */
|
||||
set_pci_dma_ops(&dma_iommu_ops);
|
||||
}
|
||||
|
||||
static int pnv_tce_iommu_bus_notifier(struct notifier_block *nb,
|
||||
unsigned long action, void *data)
|
||||
{
|
||||
struct device *dev = data;
|
||||
|
||||
switch (action) {
|
||||
case BUS_NOTIFY_DEL_DEVICE:
|
||||
iommu_del_device(dev);
|
||||
return 0;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static struct notifier_block pnv_tce_iommu_bus_nb = {
|
||||
.notifier_call = pnv_tce_iommu_bus_notifier,
|
||||
};
|
||||
|
||||
static int __init pnv_tce_iommu_bus_notifier_init(void)
|
||||
{
|
||||
bus_register_notifier(&pci_bus_type, &pnv_tce_iommu_bus_nb);
|
||||
return 0;
|
||||
}
|
||||
machine_subsys_initcall_sync(powernv, pnv_tce_iommu_bus_notifier_init);
|
||||
|
@ -91,19 +91,24 @@ static struct iommu_table_group *iommu_pseries_alloc_group(int node)
|
||||
static void iommu_pseries_free_group(struct iommu_table_group *table_group,
|
||||
const char *node_name)
|
||||
{
|
||||
struct iommu_table *tbl;
|
||||
|
||||
if (!table_group)
|
||||
return;
|
||||
|
||||
tbl = table_group->tables[0];
|
||||
#ifdef CONFIG_IOMMU_API
|
||||
if (table_group->group) {
|
||||
iommu_group_put(table_group->group);
|
||||
BUG_ON(table_group->group);
|
||||
}
|
||||
#endif
|
||||
iommu_tce_table_put(tbl);
|
||||
|
||||
/* Default DMA window table is at index 0, while DDW at 1. SR-IOV
|
||||
* adapters only have table on index 1.
|
||||
*/
|
||||
if (table_group->tables[0])
|
||||
iommu_tce_table_put(table_group->tables[0]);
|
||||
|
||||
if (table_group->tables[1])
|
||||
iommu_tce_table_put(table_group->tables[1]);
|
||||
|
||||
kfree(table_group);
|
||||
}
|
||||
@ -312,13 +317,22 @@ static void tce_free_pSeriesLP(unsigned long liobn, long tcenum, long tceshift,
|
||||
static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages)
|
||||
{
|
||||
u64 rc;
|
||||
long rpages = npages;
|
||||
unsigned long limit;
|
||||
|
||||
if (!firmware_has_feature(FW_FEATURE_STUFF_TCE))
|
||||
return tce_free_pSeriesLP(tbl->it_index, tcenum,
|
||||
tbl->it_page_shift, npages);
|
||||
|
||||
rc = plpar_tce_stuff((u64)tbl->it_index,
|
||||
(u64)tcenum << tbl->it_page_shift, 0, npages);
|
||||
do {
|
||||
limit = min_t(unsigned long, rpages, 512);
|
||||
|
||||
rc = plpar_tce_stuff((u64)tbl->it_index,
|
||||
(u64)tcenum << tbl->it_page_shift, 0, limit);
|
||||
|
||||
rpages -= limit;
|
||||
tcenum += limit;
|
||||
} while (rpages > 0 && !rc);
|
||||
|
||||
if (rc && printk_ratelimit()) {
|
||||
printk("tce_freemulti_pSeriesLP: plpar_tce_stuff failed\n");
|
||||
@ -1695,31 +1709,6 @@ static int __init disable_multitce(char *str)
|
||||
|
||||
__setup("multitce=", disable_multitce);
|
||||
|
||||
static int tce_iommu_bus_notifier(struct notifier_block *nb,
|
||||
unsigned long action, void *data)
|
||||
{
|
||||
struct device *dev = data;
|
||||
|
||||
switch (action) {
|
||||
case BUS_NOTIFY_DEL_DEVICE:
|
||||
iommu_del_device(dev);
|
||||
return 0;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
static struct notifier_block tce_iommu_bus_nb = {
|
||||
.notifier_call = tce_iommu_bus_notifier,
|
||||
};
|
||||
|
||||
static int __init tce_iommu_bus_notifier_init(void)
|
||||
{
|
||||
bus_register_notifier(&pci_bus_type, &tce_iommu_bus_nb);
|
||||
return 0;
|
||||
}
|
||||
machine_subsys_initcall_sync(pseries, tce_iommu_bus_notifier_init);
|
||||
|
||||
#ifdef CONFIG_SPAPR_TCE_IOMMU
|
||||
struct iommu_group *pSeries_pci_device_group(struct pci_controller *hose,
|
||||
struct pci_dev *pdev)
|
||||
|
@ -88,7 +88,7 @@ static unsigned long ndump = 64;
|
||||
static unsigned long nidump = 16;
|
||||
static unsigned long ncsum = 4096;
|
||||
static int termch;
|
||||
static char tmpstr[128];
|
||||
static char tmpstr[KSYM_NAME_LEN];
|
||||
static int tracing_enabled;
|
||||
|
||||
static long bus_error_jmp[JMP_BUF_LEN];
|
||||
|
Loading…
x
Reference in New Issue
Block a user