Merge branch 'akpm' (patches from Andrew)
Merge third patch-bomb from Andrew Morton: - even more of the rest of MM - lib/ updates - checkpatch updates - small changes to a few scruffy filesystems - kmod fixes/cleanups - kexec updates - a dma-mapping cleanup series from hch * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (81 commits) dma-mapping: consolidate dma_set_mask dma-mapping: consolidate dma_supported dma-mapping: cosolidate dma_mapping_error dma-mapping: consolidate dma_{alloc,free}_noncoherent dma-mapping: consolidate dma_{alloc,free}_{attrs,coherent} mm: use vma_is_anonymous() in create_huge_pmd() and wp_huge_pmd() mm: make sure all file VMAs have ->vm_ops set mm, mpx: add "vm_flags_t vm_flags" arg to do_mmap_pgoff() mm: mark most vm_operations_struct const namei: fix warning while make xmldocs caused by namei.c ipc: convert invalid scenarios to use WARN_ON zlib_deflate/deftree: remove bi_reverse() lib/decompress_unlzma: Do a NULL check for pointer lib/decompressors: use real out buf size for gunzip with kernel fs/affs: make root lookup from blkdev logical size sysctl: fix int -> unsigned long assignments in INT_MIN case kexec: export KERNEL_IMAGE_SIZE to vmcoreinfo kexec: align crash_notes allocation to make it be inside one physical page kexec: remove unnecessary test in kimage_alloc_crash_control_pages() kexec: split kexec_load syscall from kexec core code ...
This commit is contained in:
commit
33e247c7e5
4
CREDITS
4
CREDITS
@ -2992,6 +2992,10 @@ S: 2200 Mission College Blvd
|
|||||||
S: Santa Clara, CA 95052
|
S: Santa Clara, CA 95052
|
||||||
S: USA
|
S: USA
|
||||||
|
|
||||||
|
N: Anil Ravindranath
|
||||||
|
E: anil_ravindranath@pmc-sierra.com
|
||||||
|
D: PMC-Sierra MaxRAID driver
|
||||||
|
|
||||||
N: Eric S. Raymond
|
N: Eric S. Raymond
|
||||||
E: esr@thyrsus.com
|
E: esr@thyrsus.com
|
||||||
W: http://www.tuxedo.org/~esr/
|
W: http://www.tuxedo.org/~esr/
|
||||||
|
@ -14,6 +14,8 @@ hugetlbpage.txt
|
|||||||
- a brief summary of hugetlbpage support in the Linux kernel.
|
- a brief summary of hugetlbpage support in the Linux kernel.
|
||||||
hwpoison.txt
|
hwpoison.txt
|
||||||
- explains what hwpoison is
|
- explains what hwpoison is
|
||||||
|
idle_page_tracking.txt
|
||||||
|
- description of the idle page tracking feature.
|
||||||
ksm.txt
|
ksm.txt
|
||||||
- how to use the Kernel Samepage Merging feature.
|
- how to use the Kernel Samepage Merging feature.
|
||||||
numa
|
numa
|
||||||
|
98
Documentation/vm/idle_page_tracking.txt
Normal file
98
Documentation/vm/idle_page_tracking.txt
Normal file
@ -0,0 +1,98 @@
|
|||||||
|
MOTIVATION
|
||||||
|
|
||||||
|
The idle page tracking feature allows to track which memory pages are being
|
||||||
|
accessed by a workload and which are idle. This information can be useful for
|
||||||
|
estimating the workload's working set size, which, in turn, can be taken into
|
||||||
|
account when configuring the workload parameters, setting memory cgroup limits,
|
||||||
|
or deciding where to place the workload within a compute cluster.
|
||||||
|
|
||||||
|
It is enabled by CONFIG_IDLE_PAGE_TRACKING=y.
|
||||||
|
|
||||||
|
USER API
|
||||||
|
|
||||||
|
The idle page tracking API is located at /sys/kernel/mm/page_idle. Currently,
|
||||||
|
it consists of the only read-write file, /sys/kernel/mm/page_idle/bitmap.
|
||||||
|
|
||||||
|
The file implements a bitmap where each bit corresponds to a memory page. The
|
||||||
|
bitmap is represented by an array of 8-byte integers, and the page at PFN #i is
|
||||||
|
mapped to bit #i%64 of array element #i/64, byte order is native. When a bit is
|
||||||
|
set, the corresponding page is idle.
|
||||||
|
|
||||||
|
A page is considered idle if it has not been accessed since it was marked idle
|
||||||
|
(for more details on what "accessed" actually means see the IMPLEMENTATION
|
||||||
|
DETAILS section). To mark a page idle one has to set the bit corresponding to
|
||||||
|
the page by writing to the file. A value written to the file is OR-ed with the
|
||||||
|
current bitmap value.
|
||||||
|
|
||||||
|
Only accesses to user memory pages are tracked. These are pages mapped to a
|
||||||
|
process address space, page cache and buffer pages, swap cache pages. For other
|
||||||
|
page types (e.g. SLAB pages) an attempt to mark a page idle is silently ignored,
|
||||||
|
and hence such pages are never reported idle.
|
||||||
|
|
||||||
|
For huge pages the idle flag is set only on the head page, so one has to read
|
||||||
|
/proc/kpageflags in order to correctly count idle huge pages.
|
||||||
|
|
||||||
|
Reading from or writing to /sys/kernel/mm/page_idle/bitmap will return
|
||||||
|
-EINVAL if you are not starting the read/write on an 8-byte boundary, or
|
||||||
|
if the size of the read/write is not a multiple of 8 bytes. Writing to
|
||||||
|
this file beyond max PFN will return -ENXIO.
|
||||||
|
|
||||||
|
That said, in order to estimate the amount of pages that are not used by a
|
||||||
|
workload one should:
|
||||||
|
|
||||||
|
1. Mark all the workload's pages as idle by setting corresponding bits in
|
||||||
|
/sys/kernel/mm/page_idle/bitmap. The pages can be found by reading
|
||||||
|
/proc/pid/pagemap if the workload is represented by a process, or by
|
||||||
|
filtering out alien pages using /proc/kpagecgroup in case the workload is
|
||||||
|
placed in a memory cgroup.
|
||||||
|
|
||||||
|
2. Wait until the workload accesses its working set.
|
||||||
|
|
||||||
|
3. Read /sys/kernel/mm/page_idle/bitmap and count the number of bits set. If
|
||||||
|
one wants to ignore certain types of pages, e.g. mlocked pages since they
|
||||||
|
are not reclaimable, he or she can filter them out using /proc/kpageflags.
|
||||||
|
|
||||||
|
See Documentation/vm/pagemap.txt for more information about /proc/pid/pagemap,
|
||||||
|
/proc/kpageflags, and /proc/kpagecgroup.
|
||||||
|
|
||||||
|
IMPLEMENTATION DETAILS
|
||||||
|
|
||||||
|
The kernel internally keeps track of accesses to user memory pages in order to
|
||||||
|
reclaim unreferenced pages first on memory shortage conditions. A page is
|
||||||
|
considered referenced if it has been recently accessed via a process address
|
||||||
|
space, in which case one or more PTEs it is mapped to will have the Accessed bit
|
||||||
|
set, or marked accessed explicitly by the kernel (see mark_page_accessed()). The
|
||||||
|
latter happens when:
|
||||||
|
|
||||||
|
- a userspace process reads or writes a page using a system call (e.g. read(2)
|
||||||
|
or write(2))
|
||||||
|
|
||||||
|
- a page that is used for storing filesystem buffers is read or written,
|
||||||
|
because a process needs filesystem metadata stored in it (e.g. lists a
|
||||||
|
directory tree)
|
||||||
|
|
||||||
|
- a page is accessed by a device driver using get_user_pages()
|
||||||
|
|
||||||
|
When a dirty page is written to swap or disk as a result of memory reclaim or
|
||||||
|
exceeding the dirty memory limit, it is not marked referenced.
|
||||||
|
|
||||||
|
The idle memory tracking feature adds a new page flag, the Idle flag. This flag
|
||||||
|
is set manually, by writing to /sys/kernel/mm/page_idle/bitmap (see the USER API
|
||||||
|
section), and cleared automatically whenever a page is referenced as defined
|
||||||
|
above.
|
||||||
|
|
||||||
|
When a page is marked idle, the Accessed bit must be cleared in all PTEs it is
|
||||||
|
mapped to, otherwise we will not be able to detect accesses to the page coming
|
||||||
|
from a process address space. To avoid interference with the reclaimer, which,
|
||||||
|
as noted above, uses the Accessed bit to promote actively referenced pages, one
|
||||||
|
more page flag is introduced, the Young flag. When the PTE Accessed bit is
|
||||||
|
cleared as a result of setting or updating a page's Idle flag, the Young flag
|
||||||
|
is set on the page. The reclaimer treats the Young flag as an extra PTE
|
||||||
|
Accessed bit and therefore will consider such a page as referenced.
|
||||||
|
|
||||||
|
Since the idle memory tracking feature is based on the memory reclaimer logic,
|
||||||
|
it only works with pages that are on an LRU list, other pages are silently
|
||||||
|
ignored. That means it will ignore a user memory page if it is isolated, but
|
||||||
|
since there are usually not many of them, it should not affect the overall
|
||||||
|
result noticeably. In order not to stall scanning of the idle page bitmap,
|
||||||
|
locked pages may be skipped too.
|
@ -5,7 +5,7 @@ pagemap is a new (as of 2.6.25) set of interfaces in the kernel that allow
|
|||||||
userspace programs to examine the page tables and related information by
|
userspace programs to examine the page tables and related information by
|
||||||
reading files in /proc.
|
reading files in /proc.
|
||||||
|
|
||||||
There are three components to pagemap:
|
There are four components to pagemap:
|
||||||
|
|
||||||
* /proc/pid/pagemap. This file lets a userspace process find out which
|
* /proc/pid/pagemap. This file lets a userspace process find out which
|
||||||
physical frame each virtual page is mapped to. It contains one 64-bit
|
physical frame each virtual page is mapped to. It contains one 64-bit
|
||||||
@ -70,6 +70,11 @@ There are three components to pagemap:
|
|||||||
22. THP
|
22. THP
|
||||||
23. BALLOON
|
23. BALLOON
|
||||||
24. ZERO_PAGE
|
24. ZERO_PAGE
|
||||||
|
25. IDLE
|
||||||
|
|
||||||
|
* /proc/kpagecgroup. This file contains a 64-bit inode number of the
|
||||||
|
memory cgroup each page is charged to, indexed by PFN. Only available when
|
||||||
|
CONFIG_MEMCG is set.
|
||||||
|
|
||||||
Short descriptions to the page flags:
|
Short descriptions to the page flags:
|
||||||
|
|
||||||
@ -116,6 +121,12 @@ Short descriptions to the page flags:
|
|||||||
24. ZERO_PAGE
|
24. ZERO_PAGE
|
||||||
zero page for pfn_zero or huge_zero page
|
zero page for pfn_zero or huge_zero page
|
||||||
|
|
||||||
|
25. IDLE
|
||||||
|
page has not been accessed since it was marked idle (see
|
||||||
|
Documentation/vm/idle_page_tracking.txt). Note that this flag may be
|
||||||
|
stale in case the page was accessed via a PTE. To make sure the flag
|
||||||
|
is up-to-date one has to read /sys/kernel/mm/page_idle/bitmap first.
|
||||||
|
|
||||||
[IO related page flags]
|
[IO related page flags]
|
||||||
1. ERROR IO error occurred
|
1. ERROR IO error occurred
|
||||||
3. UPTODATE page has up-to-date data
|
3. UPTODATE page has up-to-date data
|
||||||
|
@ -32,7 +32,7 @@ can also be enabled and disabled at runtime using the sysfs interface.
|
|||||||
An example command to enable zswap at runtime, assuming sysfs is mounted
|
An example command to enable zswap at runtime, assuming sysfs is mounted
|
||||||
at /sys, is:
|
at /sys, is:
|
||||||
|
|
||||||
echo 1 > /sys/modules/zswap/parameters/enabled
|
echo 1 > /sys/module/zswap/parameters/enabled
|
||||||
|
|
||||||
When zswap is disabled at runtime it will stop storing pages that are
|
When zswap is disabled at runtime it will stop storing pages that are
|
||||||
being swapped out. However, it will _not_ immediately write out or fault
|
being swapped out. However, it will _not_ immediately write out or fault
|
||||||
@ -49,14 +49,26 @@ Zswap receives pages for compression through the Frontswap API and is able to
|
|||||||
evict pages from its own compressed pool on an LRU basis and write them back to
|
evict pages from its own compressed pool on an LRU basis and write them back to
|
||||||
the backing swap device in the case that the compressed pool is full.
|
the backing swap device in the case that the compressed pool is full.
|
||||||
|
|
||||||
Zswap makes use of zbud for the managing the compressed memory pool. Each
|
Zswap makes use of zpool for the managing the compressed memory pool. Each
|
||||||
allocation in zbud is not directly accessible by address. Rather, a handle is
|
allocation in zpool is not directly accessible by address. Rather, a handle is
|
||||||
returned by the allocation routine and that handle must be mapped before being
|
returned by the allocation routine and that handle must be mapped before being
|
||||||
accessed. The compressed memory pool grows on demand and shrinks as compressed
|
accessed. The compressed memory pool grows on demand and shrinks as compressed
|
||||||
pages are freed. The pool is not preallocated.
|
pages are freed. The pool is not preallocated. By default, a zpool of type
|
||||||
|
zbud is created, but it can be selected at boot time by setting the "zpool"
|
||||||
|
attribute, e.g. zswap.zpool=zbud. It can also be changed at runtime using the
|
||||||
|
sysfs "zpool" attribute, e.g.
|
||||||
|
|
||||||
|
echo zbud > /sys/module/zswap/parameters/zpool
|
||||||
|
|
||||||
|
The zbud type zpool allocates exactly 1 page to store 2 compressed pages, which
|
||||||
|
means the compression ratio will always be 2:1 or worse (because of half-full
|
||||||
|
zbud pages). The zsmalloc type zpool has a more complex compressed page
|
||||||
|
storage method, and it can achieve greater storage densities. However,
|
||||||
|
zsmalloc does not implement compressed page eviction, so once zswap fills it
|
||||||
|
cannot evict the oldest page, it can only reject new pages.
|
||||||
|
|
||||||
When a swap page is passed from frontswap to zswap, zswap maintains a mapping
|
When a swap page is passed from frontswap to zswap, zswap maintains a mapping
|
||||||
of the swap entry, a combination of the swap type and swap offset, to the zbud
|
of the swap entry, a combination of the swap type and swap offset, to the zpool
|
||||||
handle that references that compressed swap page. This mapping is achieved
|
handle that references that compressed swap page. This mapping is achieved
|
||||||
with a red-black tree per swap type. The swap offset is the search key for the
|
with a red-black tree per swap type. The swap offset is the search key for the
|
||||||
tree nodes.
|
tree nodes.
|
||||||
@ -74,9 +86,17 @@ controlled policy:
|
|||||||
* max_pool_percent - The maximum percentage of memory that the compressed
|
* max_pool_percent - The maximum percentage of memory that the compressed
|
||||||
pool can occupy.
|
pool can occupy.
|
||||||
|
|
||||||
Zswap allows the compressor to be selected at kernel boot time by setting the
|
The default compressor is lzo, but it can be selected at boot time by setting
|
||||||
“compressor” attribute. The default compressor is lzo. e.g.
|
the “compressor” attribute, e.g. zswap.compressor=lzo. It can also be changed
|
||||||
zswap.compressor=deflate
|
at runtime using the sysfs "compressor" attribute, e.g.
|
||||||
|
|
||||||
|
echo lzo > /sys/module/zswap/parameters/compressor
|
||||||
|
|
||||||
|
When the zpool and/or compressor parameter is changed at runtime, any existing
|
||||||
|
compressed pages are not modified; they are left in their own zpool. When a
|
||||||
|
request is made for a page in an old zpool, it is uncompressed using its
|
||||||
|
original compressor. Once all pages are removed from an old zpool, the zpool
|
||||||
|
and its compressor are freed.
|
||||||
|
|
||||||
A debugfs interface is provided for various statistic about pool size, number
|
A debugfs interface is provided for various statistic about pool size, number
|
||||||
of pages stored, and various counters for the reasons pages are rejected.
|
of pages stored, and various counters for the reasons pages are rejected.
|
||||||
|
@ -8199,10 +8199,9 @@ F: drivers/hwmon/pmbus/
|
|||||||
F: include/linux/i2c/pmbus.h
|
F: include/linux/i2c/pmbus.h
|
||||||
|
|
||||||
PMC SIERRA MaxRAID DRIVER
|
PMC SIERRA MaxRAID DRIVER
|
||||||
M: Anil Ravindranath <anil_ravindranath@pmc-sierra.com>
|
|
||||||
L: linux-scsi@vger.kernel.org
|
L: linux-scsi@vger.kernel.org
|
||||||
W: http://www.pmc-sierra.com/
|
W: http://www.pmc-sierra.com/
|
||||||
S: Supported
|
S: Orphan
|
||||||
F: drivers/scsi/pmcraid.*
|
F: drivers/scsi/pmcraid.*
|
||||||
|
|
||||||
PMC SIERRA PM8001 DRIVER
|
PMC SIERRA PM8001 DRIVER
|
||||||
|
@ -2,6 +2,9 @@
|
|||||||
# General architecture dependent options
|
# General architecture dependent options
|
||||||
#
|
#
|
||||||
|
|
||||||
|
config KEXEC_CORE
|
||||||
|
bool
|
||||||
|
|
||||||
config OPROFILE
|
config OPROFILE
|
||||||
tristate "OProfile system profiling"
|
tristate "OProfile system profiling"
|
||||||
depends on PROFILING
|
depends on PROFILING
|
||||||
|
@ -12,42 +12,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
|||||||
|
|
||||||
#include <asm-generic/dma-mapping-common.h>
|
#include <asm-generic/dma-mapping-common.h>
|
||||||
|
|
||||||
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
|
|
||||||
|
|
||||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
|
||||||
dma_addr_t *dma_handle, gfp_t gfp,
|
|
||||||
struct dma_attrs *attrs)
|
|
||||||
{
|
|
||||||
return get_dma_ops(dev)->alloc(dev, size, dma_handle, gfp, attrs);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
|
|
||||||
|
|
||||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
|
||||||
void *vaddr, dma_addr_t dma_handle,
|
|
||||||
struct dma_attrs *attrs)
|
|
||||||
{
|
|
||||||
get_dma_ops(dev)->free(dev, size, vaddr, dma_handle, attrs);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
|
||||||
{
|
|
||||||
return get_dma_ops(dev)->mapping_error(dev, dma_addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int dma_supported(struct device *dev, u64 mask)
|
|
||||||
{
|
|
||||||
return get_dma_ops(dev)->dma_supported(dev, mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int dma_set_mask(struct device *dev, u64 mask)
|
|
||||||
{
|
|
||||||
return get_dma_ops(dev)->set_dma_mask(dev, mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
|
||||||
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
|
||||||
|
|
||||||
#define dma_cache_sync(dev, va, size, dir) ((void)0)
|
#define dma_cache_sync(dev, va, size, dir) ((void)0)
|
||||||
|
|
||||||
#endif /* _ALPHA_DMA_MAPPING_H */
|
#endif /* _ALPHA_DMA_MAPPING_H */
|
||||||
|
@ -166,15 +166,6 @@ static int alpha_noop_supported(struct device *dev, u64 mask)
|
|||||||
return mask < 0x00ffffffUL ? 0 : 1;
|
return mask < 0x00ffffffUL ? 0 : 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int alpha_noop_set_mask(struct device *dev, u64 mask)
|
|
||||||
{
|
|
||||||
if (!dev->dma_mask || !dma_supported(dev, mask))
|
|
||||||
return -EIO;
|
|
||||||
|
|
||||||
*dev->dma_mask = mask;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct dma_map_ops alpha_noop_ops = {
|
struct dma_map_ops alpha_noop_ops = {
|
||||||
.alloc = alpha_noop_alloc_coherent,
|
.alloc = alpha_noop_alloc_coherent,
|
||||||
.free = alpha_noop_free_coherent,
|
.free = alpha_noop_free_coherent,
|
||||||
@ -182,7 +173,6 @@ struct dma_map_ops alpha_noop_ops = {
|
|||||||
.map_sg = alpha_noop_map_sg,
|
.map_sg = alpha_noop_map_sg,
|
||||||
.mapping_error = alpha_noop_mapping_error,
|
.mapping_error = alpha_noop_mapping_error,
|
||||||
.dma_supported = alpha_noop_supported,
|
.dma_supported = alpha_noop_supported,
|
||||||
.set_dma_mask = alpha_noop_set_mask,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct dma_map_ops *dma_ops = &alpha_noop_ops;
|
struct dma_map_ops *dma_ops = &alpha_noop_ops;
|
||||||
|
@ -939,16 +939,6 @@ static int alpha_pci_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
|||||||
return dma_addr == 0;
|
return dma_addr == 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int alpha_pci_set_mask(struct device *dev, u64 mask)
|
|
||||||
{
|
|
||||||
if (!dev->dma_mask ||
|
|
||||||
!pci_dma_supported(alpha_gendev_to_pci(dev), mask))
|
|
||||||
return -EIO;
|
|
||||||
|
|
||||||
*dev->dma_mask = mask;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct dma_map_ops alpha_pci_ops = {
|
struct dma_map_ops alpha_pci_ops = {
|
||||||
.alloc = alpha_pci_alloc_coherent,
|
.alloc = alpha_pci_alloc_coherent,
|
||||||
.free = alpha_pci_free_coherent,
|
.free = alpha_pci_free_coherent,
|
||||||
@ -958,7 +948,6 @@ struct dma_map_ops alpha_pci_ops = {
|
|||||||
.unmap_sg = alpha_pci_unmap_sg,
|
.unmap_sg = alpha_pci_unmap_sg,
|
||||||
.mapping_error = alpha_pci_mapping_error,
|
.mapping_error = alpha_pci_mapping_error,
|
||||||
.dma_supported = alpha_pci_supported,
|
.dma_supported = alpha_pci_supported,
|
||||||
.set_dma_mask = alpha_pci_set_mask,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct dma_map_ops *dma_ops = &alpha_pci_ops;
|
struct dma_map_ops *dma_ops = &alpha_pci_ops;
|
||||||
|
@ -2020,6 +2020,7 @@ config KEXEC
|
|||||||
bool "Kexec system call (EXPERIMENTAL)"
|
bool "Kexec system call (EXPERIMENTAL)"
|
||||||
depends on (!SMP || PM_SLEEP_SMP)
|
depends on (!SMP || PM_SLEEP_SMP)
|
||||||
depends on !CPU_V7M
|
depends on !CPU_V7M
|
||||||
|
select KEXEC_CORE
|
||||||
help
|
help
|
||||||
kexec is a system call that implements the ability to shutdown your
|
kexec is a system call that implements the ability to shutdown your
|
||||||
current kernel, and to start another kernel. It is like a reboot
|
current kernel, and to start another kernel. It is like a reboot
|
||||||
|
@ -57,5 +57,5 @@ extern char * strstr(const char * s1, const char *s2);
|
|||||||
|
|
||||||
int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x))
|
int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x))
|
||||||
{
|
{
|
||||||
return decompress(input, len, NULL, NULL, output, NULL, error);
|
return __decompress(input, len, NULL, NULL, output, 0, NULL, error);
|
||||||
}
|
}
|
||||||
|
@ -8,7 +8,6 @@
|
|||||||
#include <linux/dma-attrs.h>
|
#include <linux/dma-attrs.h>
|
||||||
#include <linux/dma-debug.h>
|
#include <linux/dma-debug.h>
|
||||||
|
|
||||||
#include <asm-generic/dma-coherent.h>
|
|
||||||
#include <asm/memory.h>
|
#include <asm/memory.h>
|
||||||
|
|
||||||
#include <xen/xen.h>
|
#include <xen/xen.h>
|
||||||
@ -39,12 +38,15 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
|
|||||||
dev->archdata.dma_ops = ops;
|
dev->archdata.dma_ops = ops;
|
||||||
}
|
}
|
||||||
|
|
||||||
#include <asm-generic/dma-mapping-common.h>
|
#define HAVE_ARCH_DMA_SUPPORTED 1
|
||||||
|
extern int dma_supported(struct device *dev, u64 mask);
|
||||||
|
|
||||||
static inline int dma_set_mask(struct device *dev, u64 mask)
|
/*
|
||||||
{
|
* Note that while the generic code provides dummy dma_{alloc,free}_noncoherent
|
||||||
return get_dma_ops(dev)->set_dma_mask(dev, mask);
|
* implementations, we don't provide a dma_cache_sync function so drivers using
|
||||||
}
|
* this API are highlighted with build warnings.
|
||||||
|
*/
|
||||||
|
#include <asm-generic/dma-mapping-common.h>
|
||||||
|
|
||||||
#ifdef __arch_page_to_dma
|
#ifdef __arch_page_to_dma
|
||||||
#error Please update to __arch_pfn_to_dma
|
#error Please update to __arch_pfn_to_dma
|
||||||
@ -167,32 +169,6 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
|||||||
|
|
||||||
static inline void dma_mark_clean(void *addr, size_t size) { }
|
static inline void dma_mark_clean(void *addr, size_t size) { }
|
||||||
|
|
||||||
/*
|
|
||||||
* DMA errors are defined by all-bits-set in the DMA address.
|
|
||||||
*/
|
|
||||||
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
|
||||||
{
|
|
||||||
debug_dma_mapping_error(dev, dma_addr);
|
|
||||||
return dma_addr == DMA_ERROR_CODE;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Dummy noncoherent implementation. We don't provide a dma_cache_sync
|
|
||||||
* function so drivers using this API are highlighted with build warnings.
|
|
||||||
*/
|
|
||||||
static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
|
|
||||||
dma_addr_t *handle, gfp_t gfp)
|
|
||||||
{
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void dma_free_noncoherent(struct device *dev, size_t size,
|
|
||||||
void *cpu_addr, dma_addr_t handle)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
extern int dma_supported(struct device *dev, u64 mask);
|
|
||||||
|
|
||||||
extern int arm_dma_set_mask(struct device *dev, u64 dma_mask);
|
extern int arm_dma_set_mask(struct device *dev, u64 dma_mask);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -209,21 +185,6 @@ extern int arm_dma_set_mask(struct device *dev, u64 dma_mask);
|
|||||||
extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
||||||
gfp_t gfp, struct dma_attrs *attrs);
|
gfp_t gfp, struct dma_attrs *attrs);
|
||||||
|
|
||||||
#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
|
|
||||||
|
|
||||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
|
||||||
dma_addr_t *dma_handle, gfp_t flag,
|
|
||||||
struct dma_attrs *attrs)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
||||||
void *cpu_addr;
|
|
||||||
BUG_ON(!ops);
|
|
||||||
|
|
||||||
cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
|
|
||||||
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
|
|
||||||
return cpu_addr;
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* arm_dma_free - free memory allocated by arm_dma_alloc
|
* arm_dma_free - free memory allocated by arm_dma_alloc
|
||||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||||
@ -241,19 +202,6 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
|||||||
extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
||||||
dma_addr_t handle, struct dma_attrs *attrs);
|
dma_addr_t handle, struct dma_attrs *attrs);
|
||||||
|
|
||||||
#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL)
|
|
||||||
|
|
||||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
|
||||||
void *cpu_addr, dma_addr_t dma_handle,
|
|
||||||
struct dma_attrs *attrs)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
||||||
BUG_ON(!ops);
|
|
||||||
|
|
||||||
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
|
|
||||||
ops->free(dev, size, cpu_addr, dma_handle, attrs);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* arm_dma_mmap - map a coherent DMA allocation into user space
|
* arm_dma_mmap - map a coherent DMA allocation into user space
|
||||||
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
|
||||||
|
@ -676,10 +676,6 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
|||||||
gfp_t gfp, struct dma_attrs *attrs)
|
gfp_t gfp, struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
|
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
|
||||||
void *memory;
|
|
||||||
|
|
||||||
if (dma_alloc_from_coherent(dev, size, handle, &memory))
|
|
||||||
return memory;
|
|
||||||
|
|
||||||
return __dma_alloc(dev, size, handle, gfp, prot, false,
|
return __dma_alloc(dev, size, handle, gfp, prot, false,
|
||||||
attrs, __builtin_return_address(0));
|
attrs, __builtin_return_address(0));
|
||||||
@ -688,11 +684,6 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
|
|||||||
static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
|
static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
|
||||||
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
|
dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
void *memory;
|
|
||||||
|
|
||||||
if (dma_alloc_from_coherent(dev, size, handle, &memory))
|
|
||||||
return memory;
|
|
||||||
|
|
||||||
return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true,
|
return __dma_alloc(dev, size, handle, gfp, PAGE_KERNEL, true,
|
||||||
attrs, __builtin_return_address(0));
|
attrs, __builtin_return_address(0));
|
||||||
}
|
}
|
||||||
@ -752,9 +743,6 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
|
|||||||
struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
|
struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
|
||||||
bool want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs);
|
bool want_vaddr = !dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs);
|
||||||
|
|
||||||
if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
|
|
||||||
return;
|
|
||||||
|
|
||||||
size = PAGE_ALIGN(size);
|
size = PAGE_ALIGN(size);
|
||||||
|
|
||||||
if (nommu()) {
|
if (nommu()) {
|
||||||
|
@ -22,8 +22,6 @@
|
|||||||
#include <linux/types.h>
|
#include <linux/types.h>
|
||||||
#include <linux/vmalloc.h>
|
#include <linux/vmalloc.h>
|
||||||
|
|
||||||
#include <asm-generic/dma-coherent.h>
|
|
||||||
|
|
||||||
#include <xen/xen.h>
|
#include <xen/xen.h>
|
||||||
#include <asm/xen/hypervisor.h>
|
#include <asm/xen/hypervisor.h>
|
||||||
|
|
||||||
@ -86,28 +84,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr)
|
|||||||
return (phys_addr_t)dev_addr;
|
return (phys_addr_t)dev_addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int dma_mapping_error(struct device *dev, dma_addr_t dev_addr)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
||||||
debug_dma_mapping_error(dev, dev_addr);
|
|
||||||
return ops->mapping_error(dev, dev_addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int dma_supported(struct device *dev, u64 mask)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
||||||
return ops->dma_supported(dev, mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int dma_set_mask(struct device *dev, u64 mask)
|
|
||||||
{
|
|
||||||
if (!dev->dma_mask || !dma_supported(dev, mask))
|
|
||||||
return -EIO;
|
|
||||||
*dev->dma_mask = mask;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
||||||
{
|
{
|
||||||
if (!dev->dma_mask)
|
if (!dev->dma_mask)
|
||||||
@ -120,50 +96,5 @@ static inline void dma_mark_clean(void *addr, size_t size)
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
|
|
||||||
#define dma_free_coherent(d, s, h, f) dma_free_attrs(d, s, h, f, NULL)
|
|
||||||
|
|
||||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
|
||||||
dma_addr_t *dma_handle, gfp_t flags,
|
|
||||||
struct dma_attrs *attrs)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
||||||
void *vaddr;
|
|
||||||
|
|
||||||
if (dma_alloc_from_coherent(dev, size, dma_handle, &vaddr))
|
|
||||||
return vaddr;
|
|
||||||
|
|
||||||
vaddr = ops->alloc(dev, size, dma_handle, flags, attrs);
|
|
||||||
debug_dma_alloc_coherent(dev, size, *dma_handle, vaddr);
|
|
||||||
return vaddr;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
|
||||||
void *vaddr, dma_addr_t dev_addr,
|
|
||||||
struct dma_attrs *attrs)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
if (dma_release_from_coherent(dev, get_order(size), vaddr))
|
|
||||||
return;
|
|
||||||
|
|
||||||
debug_dma_free_coherent(dev, size, vaddr, dev_addr);
|
|
||||||
ops->free(dev, size, vaddr, dev_addr, attrs);
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* There is no dma_cache_sync() implementation, so just return NULL here.
|
|
||||||
*/
|
|
||||||
static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
|
|
||||||
dma_addr_t *handle, gfp_t flags)
|
|
||||||
{
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void dma_free_noncoherent(struct device *dev, size_t size,
|
|
||||||
void *cpu_addr, dma_addr_t handle)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* __KERNEL__ */
|
#endif /* __KERNEL__ */
|
||||||
#endif /* __ASM_DMA_MAPPING_H */
|
#endif /* __ASM_DMA_MAPPING_H */
|
||||||
|
@ -70,5 +70,5 @@ void decompress_kernel(void)
|
|||||||
free_mem_ptr = (unsigned long)&_end;
|
free_mem_ptr = (unsigned long)&_end;
|
||||||
free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
|
free_mem_end_ptr = free_mem_ptr + HEAP_SIZE;
|
||||||
|
|
||||||
decompress(input_data, input_len, NULL, NULL, output, NULL, error);
|
__decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
|
||||||
}
|
}
|
||||||
|
@ -1,8 +1,6 @@
|
|||||||
#ifndef _H8300_DMA_MAPPING_H
|
#ifndef _H8300_DMA_MAPPING_H
|
||||||
#define _H8300_DMA_MAPPING_H
|
#define _H8300_DMA_MAPPING_H
|
||||||
|
|
||||||
#include <asm-generic/dma-coherent.h>
|
|
||||||
|
|
||||||
extern struct dma_map_ops h8300_dma_map_ops;
|
extern struct dma_map_ops h8300_dma_map_ops;
|
||||||
|
|
||||||
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
||||||
@ -12,46 +10,4 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
|||||||
|
|
||||||
#include <asm-generic/dma-mapping-common.h>
|
#include <asm-generic/dma-mapping-common.h>
|
||||||
|
|
||||||
static inline int dma_supported(struct device *dev, u64 mask)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int dma_set_mask(struct device *dev, u64 mask)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
|
||||||
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
|
||||||
|
|
||||||
#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
|
|
||||||
|
|
||||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
|
||||||
dma_addr_t *dma_handle, gfp_t flag,
|
|
||||||
struct dma_attrs *attrs)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
||||||
void *memory;
|
|
||||||
|
|
||||||
memory = ops->alloc(dev, size, dma_handle, flag, attrs);
|
|
||||||
return memory;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL)
|
|
||||||
|
|
||||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
|
||||||
void *cpu_addr, dma_addr_t dma_handle,
|
|
||||||
struct dma_attrs *attrs)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
ops->free(dev, size, cpu_addr, dma_handle, attrs);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -31,12 +31,10 @@
|
|||||||
|
|
||||||
struct device;
|
struct device;
|
||||||
extern int bad_dma_address;
|
extern int bad_dma_address;
|
||||||
|
#define DMA_ERROR_CODE bad_dma_address
|
||||||
|
|
||||||
extern struct dma_map_ops *dma_ops;
|
extern struct dma_map_ops *dma_ops;
|
||||||
|
|
||||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
|
||||||
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
|
||||||
|
|
||||||
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
||||||
{
|
{
|
||||||
if (unlikely(dev == NULL))
|
if (unlikely(dev == NULL))
|
||||||
@ -45,8 +43,8 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
|||||||
return dma_ops;
|
return dma_ops;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define HAVE_ARCH_DMA_SUPPORTED 1
|
||||||
extern int dma_supported(struct device *dev, u64 mask);
|
extern int dma_supported(struct device *dev, u64 mask);
|
||||||
extern int dma_set_mask(struct device *dev, u64 mask);
|
|
||||||
extern int dma_is_consistent(struct device *dev, dma_addr_t dma_handle);
|
extern int dma_is_consistent(struct device *dev, dma_addr_t dma_handle);
|
||||||
extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||||
enum dma_data_direction direction);
|
enum dma_data_direction direction);
|
||||||
@ -60,47 +58,4 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
|||||||
return addr + size - 1 <= *dev->dma_mask;
|
return addr + size - 1 <= *dev->dma_mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *dma_ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
if (dma_ops->mapping_error)
|
|
||||||
return dma_ops->mapping_error(dev, dma_addr);
|
|
||||||
|
|
||||||
return (dma_addr == bad_dma_address);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
|
|
||||||
|
|
||||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
|
||||||
dma_addr_t *dma_handle, gfp_t flag,
|
|
||||||
struct dma_attrs *attrs)
|
|
||||||
{
|
|
||||||
void *ret;
|
|
||||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
BUG_ON(!dma_ops);
|
|
||||||
|
|
||||||
ret = ops->alloc(dev, size, dma_handle, flag, attrs);
|
|
||||||
|
|
||||||
debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
|
|
||||||
|
|
||||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
|
||||||
void *cpu_addr, dma_addr_t dma_handle,
|
|
||||||
struct dma_attrs *attrs)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *dma_ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
BUG_ON(!dma_ops);
|
|
||||||
|
|
||||||
dma_ops->free(dev, size, cpu_addr, dma_handle, attrs);
|
|
||||||
|
|
||||||
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -44,17 +44,6 @@ int dma_supported(struct device *dev, u64 mask)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(dma_supported);
|
EXPORT_SYMBOL(dma_supported);
|
||||||
|
|
||||||
int dma_set_mask(struct device *dev, u64 mask)
|
|
||||||
{
|
|
||||||
if (!dev->dma_mask || !dma_supported(dev, mask))
|
|
||||||
return -EIO;
|
|
||||||
|
|
||||||
*dev->dma_mask = mask;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(dma_set_mask);
|
|
||||||
|
|
||||||
static struct gen_pool *coherent_pool;
|
static struct gen_pool *coherent_pool;
|
||||||
|
|
||||||
|
|
||||||
|
@ -518,6 +518,7 @@ source "drivers/sn/Kconfig"
|
|||||||
config KEXEC
|
config KEXEC
|
||||||
bool "kexec system call"
|
bool "kexec system call"
|
||||||
depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
|
depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
|
||||||
|
select KEXEC_CORE
|
||||||
help
|
help
|
||||||
kexec is a system call that implements the ability to shutdown your
|
kexec is a system call that implements the ability to shutdown your
|
||||||
current kernel, and to start another kernel. It is like a reboot
|
current kernel, and to start another kernel. It is like a reboot
|
||||||
|
@ -23,60 +23,10 @@ extern void machvec_dma_sync_single(struct device *, dma_addr_t, size_t,
|
|||||||
extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
|
extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
|
||||||
enum dma_data_direction);
|
enum dma_data_direction);
|
||||||
|
|
||||||
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
|
|
||||||
|
|
||||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
|
||||||
dma_addr_t *daddr, gfp_t gfp,
|
|
||||||
struct dma_attrs *attrs)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *ops = platform_dma_get_ops(dev);
|
|
||||||
void *caddr;
|
|
||||||
|
|
||||||
caddr = ops->alloc(dev, size, daddr, gfp, attrs);
|
|
||||||
debug_dma_alloc_coherent(dev, size, *daddr, caddr);
|
|
||||||
return caddr;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
|
|
||||||
|
|
||||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
|
||||||
void *caddr, dma_addr_t daddr,
|
|
||||||
struct dma_attrs *attrs)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *ops = platform_dma_get_ops(dev);
|
|
||||||
debug_dma_free_coherent(dev, size, caddr, daddr);
|
|
||||||
ops->free(dev, size, caddr, daddr, attrs);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
|
||||||
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
|
||||||
|
|
||||||
#define get_dma_ops(dev) platform_dma_get_ops(dev)
|
#define get_dma_ops(dev) platform_dma_get_ops(dev)
|
||||||
|
|
||||||
#include <asm-generic/dma-mapping-common.h>
|
#include <asm-generic/dma-mapping-common.h>
|
||||||
|
|
||||||
static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *ops = platform_dma_get_ops(dev);
|
|
||||||
debug_dma_mapping_error(dev, daddr);
|
|
||||||
return ops->mapping_error(dev, daddr);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int dma_supported(struct device *dev, u64 mask)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *ops = platform_dma_get_ops(dev);
|
|
||||||
return ops->dma_supported(dev, mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int
|
|
||||||
dma_set_mask (struct device *dev, u64 mask)
|
|
||||||
{
|
|
||||||
if (!dev->dma_mask || !dma_supported(dev, mask))
|
|
||||||
return -EIO;
|
|
||||||
*dev->dma_mask = mask;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
||||||
{
|
{
|
||||||
if (!dev->dma_mask)
|
if (!dev->dma_mask)
|
||||||
|
@ -86,6 +86,7 @@ decompress_kernel(int mmu_on, unsigned char *zimage_data,
|
|||||||
free_mem_end_ptr = free_mem_ptr + BOOT_HEAP_SIZE;
|
free_mem_end_ptr = free_mem_ptr + BOOT_HEAP_SIZE;
|
||||||
|
|
||||||
puts("\nDecompressing Linux... ");
|
puts("\nDecompressing Linux... ");
|
||||||
decompress(input_data, input_len, NULL, NULL, output_data, NULL, error);
|
__decompress(input_data, input_len, NULL, NULL, output_data, 0,
|
||||||
|
NULL, error);
|
||||||
puts("done.\nBooting the kernel.\n");
|
puts("done.\nBooting the kernel.\n");
|
||||||
}
|
}
|
||||||
|
@ -95,6 +95,7 @@ config MMU_SUN3
|
|||||||
config KEXEC
|
config KEXEC
|
||||||
bool "kexec system call"
|
bool "kexec system call"
|
||||||
depends on M68KCLASSIC
|
depends on M68KCLASSIC
|
||||||
|
select KEXEC_CORE
|
||||||
help
|
help
|
||||||
kexec is a system call that implements the ability to shutdown your
|
kexec is a system call that implements the ability to shutdown your
|
||||||
current kernel, and to start another kernel. It is like a reboot
|
current kernel, and to start another kernel. It is like a reboot
|
||||||
|
@ -27,7 +27,6 @@
|
|||||||
#include <linux/dma-debug.h>
|
#include <linux/dma-debug.h>
|
||||||
#include <linux/dma-attrs.h>
|
#include <linux/dma-attrs.h>
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
#include <asm-generic/dma-coherent.h>
|
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
|
|
||||||
#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
|
#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
|
||||||
@ -45,31 +44,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
|||||||
return &dma_direct_ops;
|
return &dma_direct_ops;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int dma_supported(struct device *dev, u64 mask)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
if (unlikely(!ops))
|
|
||||||
return 0;
|
|
||||||
if (!ops->dma_supported)
|
|
||||||
return 1;
|
|
||||||
return ops->dma_supported(dev, mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int dma_set_mask(struct device *dev, u64 dma_mask)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
if (unlikely(ops == NULL))
|
|
||||||
return -EIO;
|
|
||||||
if (ops->set_dma_mask)
|
|
||||||
return ops->set_dma_mask(dev, dma_mask);
|
|
||||||
if (!dev->dma_mask || !dma_supported(dev, dma_mask))
|
|
||||||
return -EIO;
|
|
||||||
*dev->dma_mask = dma_mask;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
#include <asm-generic/dma-mapping-common.h>
|
#include <asm-generic/dma-mapping-common.h>
|
||||||
|
|
||||||
static inline void __dma_sync(unsigned long paddr,
|
static inline void __dma_sync(unsigned long paddr,
|
||||||
@ -88,50 +62,6 @@ static inline void __dma_sync(unsigned long paddr,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
debug_dma_mapping_error(dev, dma_addr);
|
|
||||||
if (ops->mapping_error)
|
|
||||||
return ops->mapping_error(dev, dma_addr);
|
|
||||||
|
|
||||||
return (dma_addr == DMA_ERROR_CODE);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
|
||||||
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
|
||||||
|
|
||||||
#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
|
|
||||||
|
|
||||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
|
||||||
dma_addr_t *dma_handle, gfp_t flag,
|
|
||||||
struct dma_attrs *attrs)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
||||||
void *memory;
|
|
||||||
|
|
||||||
BUG_ON(!ops);
|
|
||||||
|
|
||||||
memory = ops->alloc(dev, size, dma_handle, flag, attrs);
|
|
||||||
|
|
||||||
debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
|
|
||||||
return memory;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d, s, c, h, NULL)
|
|
||||||
|
|
||||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
|
||||||
void *cpu_addr, dma_addr_t dma_handle,
|
|
||||||
struct dma_attrs *attrs)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
BUG_ON(!ops);
|
|
||||||
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
|
|
||||||
ops->free(dev, size, cpu_addr, dma_handle, attrs);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||||
enum dma_data_direction direction)
|
enum dma_data_direction direction)
|
||||||
{
|
{
|
||||||
|
@ -2597,6 +2597,7 @@ source "kernel/Kconfig.preempt"
|
|||||||
|
|
||||||
config KEXEC
|
config KEXEC
|
||||||
bool "Kexec system call"
|
bool "Kexec system call"
|
||||||
|
select KEXEC_CORE
|
||||||
help
|
help
|
||||||
kexec is a system call that implements the ability to shutdown your
|
kexec is a system call that implements the ability to shutdown your
|
||||||
current kernel, and to start another kernel. It is like a reboot
|
current kernel, and to start another kernel. It is like a reboot
|
||||||
|
@ -111,8 +111,8 @@ void decompress_kernel(unsigned long boot_heap_start)
|
|||||||
puts("\n");
|
puts("\n");
|
||||||
|
|
||||||
/* Decompress the kernel with according algorithm */
|
/* Decompress the kernel with according algorithm */
|
||||||
decompress((char *)zimage_start, zimage_size, 0, 0,
|
__decompress((char *)zimage_start, zimage_size, 0, 0,
|
||||||
(void *)VMLINUX_LOAD_ADDRESS_ULL, 0, error);
|
(void *)VMLINUX_LOAD_ADDRESS_ULL, 0, 0, error);
|
||||||
|
|
||||||
/* FIXME: should we flush cache here? */
|
/* FIXME: should we flush cache here? */
|
||||||
puts("Now, booting the kernel...\n");
|
puts("Now, booting the kernel...\n");
|
||||||
|
@ -161,9 +161,6 @@ static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
|
|||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
/* ignore region specifiers */
|
/* ignore region specifiers */
|
||||||
gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
|
gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
|
||||||
|
|
||||||
@ -194,11 +191,6 @@ static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
|
|||||||
static void octeon_dma_free_coherent(struct device *dev, size_t size,
|
static void octeon_dma_free_coherent(struct device *dev, size_t size,
|
||||||
void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs)
|
void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
int order = get_order(size);
|
|
||||||
|
|
||||||
if (dma_release_from_coherent(dev, order, vaddr))
|
|
||||||
return;
|
|
||||||
|
|
||||||
swiotlb_free_coherent(dev, size, vaddr, dma_handle);
|
swiotlb_free_coherent(dev, size, vaddr, dma_handle);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4,7 +4,6 @@
|
|||||||
#include <linux/scatterlist.h>
|
#include <linux/scatterlist.h>
|
||||||
#include <asm/dma-coherence.h>
|
#include <asm/dma-coherence.h>
|
||||||
#include <asm/cache.h>
|
#include <asm/cache.h>
|
||||||
#include <asm-generic/dma-coherent.h>
|
|
||||||
|
|
||||||
#ifndef CONFIG_SGI_IP27 /* Kludge to fix 2.6.39 build for IP27 */
|
#ifndef CONFIG_SGI_IP27 /* Kludge to fix 2.6.39 build for IP27 */
|
||||||
#include <dma-coherence.h>
|
#include <dma-coherence.h>
|
||||||
@ -32,73 +31,7 @@ static inline void dma_mark_clean(void *addr, size_t size) {}
|
|||||||
|
|
||||||
#include <asm-generic/dma-mapping-common.h>
|
#include <asm-generic/dma-mapping-common.h>
|
||||||
|
|
||||||
static inline int dma_supported(struct device *dev, u64 mask)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
||||||
return ops->dma_supported(dev, mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int dma_mapping_error(struct device *dev, u64 mask)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
debug_dma_mapping_error(dev, mask);
|
|
||||||
return ops->mapping_error(dev, mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int
|
|
||||||
dma_set_mask(struct device *dev, u64 mask)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
if(!dev->dma_mask || !dma_supported(dev, mask))
|
|
||||||
return -EIO;
|
|
||||||
|
|
||||||
if (ops->set_dma_mask)
|
|
||||||
return ops->set_dma_mask(dev, mask);
|
|
||||||
|
|
||||||
*dev->dma_mask = mask;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||||
enum dma_data_direction direction);
|
enum dma_data_direction direction);
|
||||||
|
|
||||||
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
|
|
||||||
|
|
||||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
|
||||||
dma_addr_t *dma_handle, gfp_t gfp,
|
|
||||||
struct dma_attrs *attrs)
|
|
||||||
{
|
|
||||||
void *ret;
|
|
||||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
ret = ops->alloc(dev, size, dma_handle, gfp, attrs);
|
|
||||||
|
|
||||||
debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
|
|
||||||
|
|
||||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
|
||||||
void *vaddr, dma_addr_t dma_handle,
|
|
||||||
struct dma_attrs *attrs)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
ops->free(dev, size, vaddr, dma_handle, attrs);
|
|
||||||
|
|
||||||
debug_dma_free_coherent(dev, size, vaddr, dma_handle);
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void *dma_alloc_noncoherent(struct device *dev, size_t size,
|
|
||||||
dma_addr_t *dma_handle, gfp_t flag);
|
|
||||||
|
|
||||||
void dma_free_noncoherent(struct device *dev, size_t size,
|
|
||||||
void *vaddr, dma_addr_t dma_handle);
|
|
||||||
|
|
||||||
#endif /* _ASM_DMA_MAPPING_H */
|
#endif /* _ASM_DMA_MAPPING_H */
|
||||||
|
@ -14,9 +14,6 @@ static void *loongson_dma_alloc_coherent(struct device *dev, size_t size,
|
|||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
|
|
||||||
if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
/* ignore region specifiers */
|
/* ignore region specifiers */
|
||||||
gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
|
gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
|
||||||
|
|
||||||
@ -46,11 +43,6 @@ static void *loongson_dma_alloc_coherent(struct device *dev, size_t size,
|
|||||||
static void loongson_dma_free_coherent(struct device *dev, size_t size,
|
static void loongson_dma_free_coherent(struct device *dev, size_t size,
|
||||||
void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs)
|
void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
int order = get_order(size);
|
|
||||||
|
|
||||||
if (dma_release_from_coherent(dev, order, vaddr))
|
|
||||||
return;
|
|
||||||
|
|
||||||
swiotlb_free_coherent(dev, size, vaddr, dma_handle);
|
swiotlb_free_coherent(dev, size, vaddr, dma_handle);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -93,6 +85,9 @@ static void loongson_dma_sync_sg_for_device(struct device *dev,
|
|||||||
|
|
||||||
static int loongson_dma_set_mask(struct device *dev, u64 mask)
|
static int loongson_dma_set_mask(struct device *dev, u64 mask)
|
||||||
{
|
{
|
||||||
|
if (!dev->dma_mask || !dma_supported(dev, mask))
|
||||||
|
return -EIO;
|
||||||
|
|
||||||
if (mask > DMA_BIT_MASK(loongson_sysconf.dma_mask_bits)) {
|
if (mask > DMA_BIT_MASK(loongson_sysconf.dma_mask_bits)) {
|
||||||
*dev->dma_mask = DMA_BIT_MASK(loongson_sysconf.dma_mask_bits);
|
*dev->dma_mask = DMA_BIT_MASK(loongson_sysconf.dma_mask_bits);
|
||||||
return -EIO;
|
return -EIO;
|
||||||
|
@ -112,7 +112,7 @@ static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
|
|||||||
return gfp | dma_flag;
|
return gfp | dma_flag;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *dma_alloc_noncoherent(struct device *dev, size_t size,
|
static void *mips_dma_alloc_noncoherent(struct device *dev, size_t size,
|
||||||
dma_addr_t * dma_handle, gfp_t gfp)
|
dma_addr_t * dma_handle, gfp_t gfp)
|
||||||
{
|
{
|
||||||
void *ret;
|
void *ret;
|
||||||
@ -128,7 +128,6 @@ void *dma_alloc_noncoherent(struct device *dev, size_t size,
|
|||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(dma_alloc_noncoherent);
|
|
||||||
|
|
||||||
static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
|
static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
|
||||||
dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs)
|
dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs)
|
||||||
@ -137,8 +136,12 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
|
|||||||
struct page *page = NULL;
|
struct page *page = NULL;
|
||||||
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||||
|
|
||||||
if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
|
/*
|
||||||
return ret;
|
* XXX: seems like the coherent and non-coherent implementations could
|
||||||
|
* be consolidated.
|
||||||
|
*/
|
||||||
|
if (dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs))
|
||||||
|
return mips_dma_alloc_noncoherent(dev, size, dma_handle, gfp);
|
||||||
|
|
||||||
gfp = massage_gfp_flags(dev, gfp);
|
gfp = massage_gfp_flags(dev, gfp);
|
||||||
|
|
||||||
@ -164,24 +167,24 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
|
static void mips_dma_free_noncoherent(struct device *dev, size_t size,
|
||||||
dma_addr_t dma_handle)
|
void *vaddr, dma_addr_t dma_handle)
|
||||||
{
|
{
|
||||||
plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
|
plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
|
||||||
free_pages((unsigned long) vaddr, get_order(size));
|
free_pages((unsigned long) vaddr, get_order(size));
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(dma_free_noncoherent);
|
|
||||||
|
|
||||||
static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
|
static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
|
||||||
dma_addr_t dma_handle, struct dma_attrs *attrs)
|
dma_addr_t dma_handle, struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
unsigned long addr = (unsigned long) vaddr;
|
unsigned long addr = (unsigned long) vaddr;
|
||||||
int order = get_order(size);
|
|
||||||
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
||||||
struct page *page = NULL;
|
struct page *page = NULL;
|
||||||
|
|
||||||
if (dma_release_from_coherent(dev, order, vaddr))
|
if (dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) {
|
||||||
|
mips_dma_free_noncoherent(dev, size, vaddr, dma_handle);
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
|
plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
|
||||||
|
|
||||||
|
@ -47,11 +47,6 @@ static char *nlm_swiotlb;
|
|||||||
static void *nlm_dma_alloc_coherent(struct device *dev, size_t size,
|
static void *nlm_dma_alloc_coherent(struct device *dev, size_t size,
|
||||||
dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
|
dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
void *ret;
|
|
||||||
|
|
||||||
if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
/* ignore region specifiers */
|
/* ignore region specifiers */
|
||||||
gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
|
gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
|
||||||
|
|
||||||
@ -69,11 +64,6 @@ static void *nlm_dma_alloc_coherent(struct device *dev, size_t size,
|
|||||||
static void nlm_dma_free_coherent(struct device *dev, size_t size,
|
static void nlm_dma_free_coherent(struct device *dev, size_t size,
|
||||||
void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs)
|
void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs)
|
||||||
{
|
{
|
||||||
int order = get_order(size);
|
|
||||||
|
|
||||||
if (dma_release_from_coherent(dev, order, vaddr))
|
|
||||||
return;
|
|
||||||
|
|
||||||
swiotlb_free_coherent(dev, size, vaddr, dma_handle);
|
swiotlb_free_coherent(dev, size, vaddr, dma_handle);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -23,7 +23,6 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
#include <linux/dma-debug.h>
|
#include <linux/dma-debug.h>
|
||||||
#include <asm-generic/dma-coherent.h>
|
|
||||||
#include <linux/kmemcheck.h>
|
#include <linux/kmemcheck.h>
|
||||||
#include <linux/dma-mapping.h>
|
#include <linux/dma-mapping.h>
|
||||||
|
|
||||||
@ -36,75 +35,13 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
|||||||
return &or1k_dma_map_ops;
|
return &or1k_dma_map_ops;
|
||||||
}
|
}
|
||||||
|
|
||||||
#include <asm-generic/dma-mapping-common.h>
|
#define HAVE_ARCH_DMA_SUPPORTED 1
|
||||||
|
|
||||||
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
|
|
||||||
|
|
||||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
|
||||||
dma_addr_t *dma_handle, gfp_t gfp,
|
|
||||||
struct dma_attrs *attrs)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
||||||
void *memory;
|
|
||||||
|
|
||||||
memory = ops->alloc(dev, size, dma_handle, gfp, attrs);
|
|
||||||
|
|
||||||
debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
|
|
||||||
|
|
||||||
return memory;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
|
|
||||||
|
|
||||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
|
||||||
void *cpu_addr, dma_addr_t dma_handle,
|
|
||||||
struct dma_attrs *attrs)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
|
|
||||||
|
|
||||||
ops->free(dev, size, cpu_addr, dma_handle, attrs);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
|
|
||||||
dma_addr_t *dma_handle, gfp_t gfp)
|
|
||||||
{
|
|
||||||
struct dma_attrs attrs;
|
|
||||||
|
|
||||||
dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
|
|
||||||
|
|
||||||
return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void dma_free_noncoherent(struct device *dev, size_t size,
|
|
||||||
void *cpu_addr, dma_addr_t dma_handle)
|
|
||||||
{
|
|
||||||
struct dma_attrs attrs;
|
|
||||||
|
|
||||||
dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
|
|
||||||
|
|
||||||
dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int dma_supported(struct device *dev, u64 dma_mask)
|
static inline int dma_supported(struct device *dev, u64 dma_mask)
|
||||||
{
|
{
|
||||||
/* Support 32 bit DMA mask exclusively */
|
/* Support 32 bit DMA mask exclusively */
|
||||||
return dma_mask == DMA_BIT_MASK(32);
|
return dma_mask == DMA_BIT_MASK(32);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
#include <asm-generic/dma-mapping-common.h>
|
||||||
{
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int dma_set_mask(struct device *dev, u64 dma_mask)
|
|
||||||
{
|
|
||||||
if (!dev->dma_mask || !dma_supported(dev, dma_mask))
|
|
||||||
return -EIO;
|
|
||||||
|
|
||||||
*dev->dma_mask = dma_mask;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
#endif /* __ASM_OPENRISC_DMA_MAPPING_H */
|
#endif /* __ASM_OPENRISC_DMA_MAPPING_H */
|
||||||
|
@ -420,6 +420,7 @@ config PPC64_SUPPORTS_MEMORY_FAILURE
|
|||||||
config KEXEC
|
config KEXEC
|
||||||
bool "kexec system call"
|
bool "kexec system call"
|
||||||
depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
|
depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP))
|
||||||
|
select KEXEC_CORE
|
||||||
help
|
help
|
||||||
kexec is a system call that implements the ability to shutdown your
|
kexec is a system call that implements the ability to shutdown your
|
||||||
current kernel, and to start another kernel. It is like a reboot
|
current kernel, and to start another kernel. It is like a reboot
|
||||||
|
@ -18,7 +18,9 @@
|
|||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
#include <asm/swiotlb.h>
|
#include <asm/swiotlb.h>
|
||||||
|
|
||||||
|
#ifdef CONFIG_PPC64
|
||||||
#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
|
#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
|
||||||
|
#endif
|
||||||
|
|
||||||
/* Some dma direct funcs must be visible for use in other dma_ops */
|
/* Some dma direct funcs must be visible for use in other dma_ops */
|
||||||
extern void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
|
extern void *__dma_direct_alloc_coherent(struct device *dev, size_t size,
|
||||||
@ -120,71 +122,14 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
|
|||||||
/* this will be removed soon */
|
/* this will be removed soon */
|
||||||
#define flush_write_buffers()
|
#define flush_write_buffers()
|
||||||
|
|
||||||
|
#define HAVE_ARCH_DMA_SET_MASK 1
|
||||||
|
extern int dma_set_mask(struct device *dev, u64 dma_mask);
|
||||||
|
|
||||||
#include <asm-generic/dma-mapping-common.h>
|
#include <asm-generic/dma-mapping-common.h>
|
||||||
|
|
||||||
static inline int dma_supported(struct device *dev, u64 mask)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *dma_ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
if (unlikely(dma_ops == NULL))
|
|
||||||
return 0;
|
|
||||||
if (dma_ops->dma_supported == NULL)
|
|
||||||
return 1;
|
|
||||||
return dma_ops->dma_supported(dev, mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
extern int dma_set_mask(struct device *dev, u64 dma_mask);
|
|
||||||
extern int __dma_set_mask(struct device *dev, u64 dma_mask);
|
extern int __dma_set_mask(struct device *dev, u64 dma_mask);
|
||||||
extern u64 __dma_get_required_mask(struct device *dev);
|
extern u64 __dma_get_required_mask(struct device *dev);
|
||||||
|
|
||||||
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
|
|
||||||
|
|
||||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
|
||||||
dma_addr_t *dma_handle, gfp_t flag,
|
|
||||||
struct dma_attrs *attrs)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *dma_ops = get_dma_ops(dev);
|
|
||||||
void *cpu_addr;
|
|
||||||
|
|
||||||
BUG_ON(!dma_ops);
|
|
||||||
|
|
||||||
cpu_addr = dma_ops->alloc(dev, size, dma_handle, flag, attrs);
|
|
||||||
|
|
||||||
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
|
|
||||||
|
|
||||||
return cpu_addr;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
|
|
||||||
|
|
||||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
|
||||||
void *cpu_addr, dma_addr_t dma_handle,
|
|
||||||
struct dma_attrs *attrs)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *dma_ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
BUG_ON(!dma_ops);
|
|
||||||
|
|
||||||
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
|
|
||||||
|
|
||||||
dma_ops->free(dev, size, cpu_addr, dma_handle, attrs);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *dma_ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
debug_dma_mapping_error(dev, dma_addr);
|
|
||||||
if (dma_ops->mapping_error)
|
|
||||||
return dma_ops->mapping_error(dev, dma_addr);
|
|
||||||
|
|
||||||
#ifdef CONFIG_PPC64
|
|
||||||
return (dma_addr == DMA_ERROR_CODE);
|
|
||||||
#else
|
|
||||||
return 0;
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_SWIOTLB
|
#ifdef CONFIG_SWIOTLB
|
||||||
@ -210,9 +155,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
|
|||||||
return daddr - get_dma_offset(dev);
|
return daddr - get_dma_offset(dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
|
||||||
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
|
||||||
|
|
||||||
#define ARCH_HAS_DMA_MMAP_COHERENT
|
#define ARCH_HAS_DMA_MMAP_COHERENT
|
||||||
|
|
||||||
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||||
|
@ -48,6 +48,7 @@ config ARCH_SUPPORTS_DEBUG_PAGEALLOC
|
|||||||
|
|
||||||
config KEXEC
|
config KEXEC
|
||||||
def_bool y
|
def_bool y
|
||||||
|
select KEXEC_CORE
|
||||||
|
|
||||||
config AUDIT_ARCH
|
config AUDIT_ARCH
|
||||||
def_bool y
|
def_bool y
|
||||||
|
@ -167,7 +167,7 @@ unsigned long decompress_kernel(void)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
puts("Uncompressing Linux... ");
|
puts("Uncompressing Linux... ");
|
||||||
decompress(input_data, input_len, NULL, NULL, output, NULL, error);
|
__decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
|
||||||
puts("Ok, booting the kernel.\n");
|
puts("Ok, booting the kernel.\n");
|
||||||
return (unsigned long) output;
|
return (unsigned long) output;
|
||||||
}
|
}
|
||||||
|
@ -18,27 +18,13 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
|||||||
return &s390_dma_ops;
|
return &s390_dma_ops;
|
||||||
}
|
}
|
||||||
|
|
||||||
extern int dma_set_mask(struct device *dev, u64 mask);
|
|
||||||
|
|
||||||
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||||
enum dma_data_direction direction)
|
enum dma_data_direction direction)
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
|
||||||
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
|
||||||
|
|
||||||
#include <asm-generic/dma-mapping-common.h>
|
#include <asm-generic/dma-mapping-common.h>
|
||||||
|
|
||||||
static inline int dma_supported(struct device *dev, u64 mask)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *dma_ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
if (dma_ops->dma_supported == NULL)
|
|
||||||
return 1;
|
|
||||||
return dma_ops->dma_supported(dev, mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
||||||
{
|
{
|
||||||
if (!dev->dma_mask)
|
if (!dev->dma_mask)
|
||||||
@ -46,45 +32,4 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
|||||||
return addr + size - 1 <= *dev->dma_mask;
|
return addr + size - 1 <= *dev->dma_mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *dma_ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
debug_dma_mapping_error(dev, dma_addr);
|
|
||||||
if (dma_ops->mapping_error)
|
|
||||||
return dma_ops->mapping_error(dev, dma_addr);
|
|
||||||
return dma_addr == DMA_ERROR_CODE;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
|
|
||||||
|
|
||||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
|
||||||
dma_addr_t *dma_handle, gfp_t flags,
|
|
||||||
struct dma_attrs *attrs)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
||||||
void *cpu_addr;
|
|
||||||
|
|
||||||
BUG_ON(!ops);
|
|
||||||
|
|
||||||
cpu_addr = ops->alloc(dev, size, dma_handle, flags, attrs);
|
|
||||||
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
|
|
||||||
|
|
||||||
return cpu_addr;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL)
|
|
||||||
|
|
||||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
|
||||||
void *cpu_addr, dma_addr_t dma_handle,
|
|
||||||
struct dma_attrs *attrs)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
BUG_ON(!ops);
|
|
||||||
|
|
||||||
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
|
|
||||||
ops->free(dev, size, cpu_addr, dma_handle, attrs);
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif /* _ASM_S390_DMA_MAPPING_H */
|
#endif /* _ASM_S390_DMA_MAPPING_H */
|
||||||
|
@ -262,16 +262,6 @@ out:
|
|||||||
spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
|
spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
int dma_set_mask(struct device *dev, u64 mask)
|
|
||||||
{
|
|
||||||
if (!dev->dma_mask || !dma_supported(dev, mask))
|
|
||||||
return -EIO;
|
|
||||||
|
|
||||||
*dev->dma_mask = mask;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL_GPL(dma_set_mask);
|
|
||||||
|
|
||||||
static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
|
static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
|
||||||
unsigned long offset, size_t size,
|
unsigned long offset, size_t size,
|
||||||
enum dma_data_direction direction,
|
enum dma_data_direction direction,
|
||||||
|
@ -602,6 +602,7 @@ source kernel/Kconfig.hz
|
|||||||
config KEXEC
|
config KEXEC
|
||||||
bool "kexec system call (EXPERIMENTAL)"
|
bool "kexec system call (EXPERIMENTAL)"
|
||||||
depends on SUPERH32 && MMU
|
depends on SUPERH32 && MMU
|
||||||
|
select KEXEC_CORE
|
||||||
help
|
help
|
||||||
kexec is a system call that implements the ability to shutdown your
|
kexec is a system call that implements the ability to shutdown your
|
||||||
current kernel, and to start another kernel. It is like a reboot
|
current kernel, and to start another kernel. It is like a reboot
|
||||||
|
@ -132,7 +132,7 @@ void decompress_kernel(void)
|
|||||||
|
|
||||||
puts("Uncompressing Linux... ");
|
puts("Uncompressing Linux... ");
|
||||||
cache_control(CACHE_ENABLE);
|
cache_control(CACHE_ENABLE);
|
||||||
decompress(input_data, input_len, NULL, NULL, output, NULL, error);
|
__decompress(input_data, input_len, NULL, NULL, output, 0, NULL, error);
|
||||||
cache_control(CACHE_DISABLE);
|
cache_control(CACHE_DISABLE);
|
||||||
puts("Ok, booting the kernel.\n");
|
puts("Ok, booting the kernel.\n");
|
||||||
}
|
}
|
||||||
|
@ -9,86 +9,13 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
|||||||
return dma_ops;
|
return dma_ops;
|
||||||
}
|
}
|
||||||
|
|
||||||
#include <asm-generic/dma-coherent.h>
|
#define DMA_ERROR_CODE 0
|
||||||
|
|
||||||
#include <asm-generic/dma-mapping-common.h>
|
#include <asm-generic/dma-mapping-common.h>
|
||||||
|
|
||||||
static inline int dma_supported(struct device *dev, u64 mask)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
if (ops->dma_supported)
|
|
||||||
return ops->dma_supported(dev, mask);
|
|
||||||
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int dma_set_mask(struct device *dev, u64 mask)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
if (!dev->dma_mask || !dma_supported(dev, mask))
|
|
||||||
return -EIO;
|
|
||||||
if (ops->set_dma_mask)
|
|
||||||
return ops->set_dma_mask(dev, mask);
|
|
||||||
|
|
||||||
*dev->dma_mask = mask;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||||
enum dma_data_direction dir);
|
enum dma_data_direction dir);
|
||||||
|
|
||||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
|
||||||
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
|
||||||
|
|
||||||
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
debug_dma_mapping_error(dev, dma_addr);
|
|
||||||
if (ops->mapping_error)
|
|
||||||
return ops->mapping_error(dev, dma_addr);
|
|
||||||
|
|
||||||
return dma_addr == 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
|
|
||||||
|
|
||||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
|
||||||
dma_addr_t *dma_handle, gfp_t gfp,
|
|
||||||
struct dma_attrs *attrs)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
||||||
void *memory;
|
|
||||||
|
|
||||||
if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
|
|
||||||
return memory;
|
|
||||||
if (!ops->alloc)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
memory = ops->alloc(dev, size, dma_handle, gfp, attrs);
|
|
||||||
debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
|
|
||||||
|
|
||||||
return memory;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
|
|
||||||
|
|
||||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
|
||||||
void *vaddr, dma_addr_t dma_handle,
|
|
||||||
struct dma_attrs *attrs)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
if (dma_release_from_coherent(dev, get_order(size), vaddr))
|
|
||||||
return;
|
|
||||||
|
|
||||||
debug_dma_free_coherent(dev, size, vaddr, dma_handle);
|
|
||||||
if (ops->free)
|
|
||||||
ops->free(dev, size, vaddr, dma_handle, attrs);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* arch/sh/mm/consistent.c */
|
/* arch/sh/mm/consistent.c */
|
||||||
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
|
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
|
||||||
dma_addr_t *dma_addr, gfp_t flag,
|
dma_addr_t *dma_addr, gfp_t flag,
|
||||||
|
@ -7,11 +7,9 @@
|
|||||||
|
|
||||||
#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
|
#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
|
||||||
|
|
||||||
|
#define HAVE_ARCH_DMA_SUPPORTED 1
|
||||||
int dma_supported(struct device *dev, u64 mask);
|
int dma_supported(struct device *dev, u64 mask);
|
||||||
|
|
||||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
|
||||||
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
|
||||||
|
|
||||||
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||||
enum dma_data_direction dir)
|
enum dma_data_direction dir)
|
||||||
{
|
{
|
||||||
@ -39,39 +37,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
|||||||
return dma_ops;
|
return dma_ops;
|
||||||
}
|
}
|
||||||
|
|
||||||
#include <asm-generic/dma-mapping-common.h>
|
#define HAVE_ARCH_DMA_SET_MASK 1
|
||||||
|
|
||||||
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
|
|
||||||
|
|
||||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
|
||||||
dma_addr_t *dma_handle, gfp_t flag,
|
|
||||||
struct dma_attrs *attrs)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
||||||
void *cpu_addr;
|
|
||||||
|
|
||||||
cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
|
|
||||||
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
|
|
||||||
return cpu_addr;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
|
|
||||||
|
|
||||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
|
||||||
void *cpu_addr, dma_addr_t dma_handle,
|
|
||||||
struct dma_attrs *attrs)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
|
|
||||||
ops->free(dev, size, cpu_addr, dma_handle, attrs);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
|
||||||
{
|
|
||||||
debug_dma_mapping_error(dev, dma_addr);
|
|
||||||
return (dma_addr == DMA_ERROR_CODE);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int dma_set_mask(struct device *dev, u64 mask)
|
static inline int dma_set_mask(struct device *dev, u64 mask)
|
||||||
{
|
{
|
||||||
@ -86,4 +52,6 @@ static inline int dma_set_mask(struct device *dev, u64 mask)
|
|||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#include <asm-generic/dma-mapping-common.h>
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -205,6 +205,7 @@ source "kernel/Kconfig.hz"
|
|||||||
|
|
||||||
config KEXEC
|
config KEXEC
|
||||||
bool "kexec system call"
|
bool "kexec system call"
|
||||||
|
select KEXEC_CORE
|
||||||
---help---
|
---help---
|
||||||
kexec is a system call that implements the ability to shutdown your
|
kexec is a system call that implements the ability to shutdown your
|
||||||
current kernel, and to start another kernel. It is like a reboot
|
current kernel, and to start another kernel. It is like a reboot
|
||||||
|
@ -59,8 +59,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
|
|||||||
|
|
||||||
static inline void dma_mark_clean(void *addr, size_t size) {}
|
static inline void dma_mark_clean(void *addr, size_t size) {}
|
||||||
|
|
||||||
#include <asm-generic/dma-mapping-common.h>
|
|
||||||
|
|
||||||
static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
|
static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
|
||||||
{
|
{
|
||||||
dev->archdata.dma_ops = ops;
|
dev->archdata.dma_ops = ops;
|
||||||
@ -74,18 +72,9 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
|||||||
return addr + size - 1 <= *dev->dma_mask;
|
return addr + size - 1 <= *dev->dma_mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int
|
#define HAVE_ARCH_DMA_SET_MASK 1
|
||||||
dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
|
||||||
{
|
|
||||||
debug_dma_mapping_error(dev, dma_addr);
|
|
||||||
return get_dma_ops(dev)->mapping_error(dev, dma_addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int
|
#include <asm-generic/dma-mapping-common.h>
|
||||||
dma_supported(struct device *dev, u64 mask)
|
|
||||||
{
|
|
||||||
return get_dma_ops(dev)->dma_supported(dev, mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int
|
static inline int
|
||||||
dma_set_mask(struct device *dev, u64 mask)
|
dma_set_mask(struct device *dev, u64 mask)
|
||||||
@ -116,36 +105,6 @@ dma_set_mask(struct device *dev, u64 mask)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
|
||||||
dma_addr_t *dma_handle, gfp_t flag,
|
|
||||||
struct dma_attrs *attrs)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *dma_ops = get_dma_ops(dev);
|
|
||||||
void *cpu_addr;
|
|
||||||
|
|
||||||
cpu_addr = dma_ops->alloc(dev, size, dma_handle, flag, attrs);
|
|
||||||
|
|
||||||
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
|
|
||||||
|
|
||||||
return cpu_addr;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
|
||||||
void *cpu_addr, dma_addr_t dma_handle,
|
|
||||||
struct dma_attrs *attrs)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *dma_ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
|
|
||||||
|
|
||||||
dma_ops->free(dev, size, cpu_addr, dma_handle, attrs);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
|
|
||||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
|
|
||||||
#define dma_free_coherent(d, s, v, h) dma_free_attrs(d, s, v, h, NULL)
|
|
||||||
#define dma_free_noncoherent(d, s, v, h) dma_free_attrs(d, s, v, h, NULL)
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* dma_alloc_noncoherent() is #defined to return coherent memory,
|
* dma_alloc_noncoherent() is #defined to return coherent memory,
|
||||||
* so there's no need to do any flushing here.
|
* so there's no need to do any flushing here.
|
||||||
|
@ -119,8 +119,8 @@ unsigned long decompress_kernel(unsigned long output_start,
|
|||||||
output_ptr = get_unaligned_le32(tmp);
|
output_ptr = get_unaligned_le32(tmp);
|
||||||
|
|
||||||
arch_decomp_puts("Uncompressing Linux...");
|
arch_decomp_puts("Uncompressing Linux...");
|
||||||
decompress(input_data, input_data_end - input_data, NULL, NULL,
|
__decompress(input_data, input_data_end - input_data, NULL, NULL,
|
||||||
output_data, NULL, error);
|
output_data, 0, NULL, error);
|
||||||
arch_decomp_puts(" done, booting the kernel.\n");
|
arch_decomp_puts(" done, booting the kernel.\n");
|
||||||
return output_ptr;
|
return output_ptr;
|
||||||
}
|
}
|
||||||
|
@ -18,8 +18,6 @@
|
|||||||
#include <linux/scatterlist.h>
|
#include <linux/scatterlist.h>
|
||||||
#include <linux/swiotlb.h>
|
#include <linux/swiotlb.h>
|
||||||
|
|
||||||
#include <asm-generic/dma-coherent.h>
|
|
||||||
|
|
||||||
#include <asm/memory.h>
|
#include <asm/memory.h>
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
|
|
||||||
@ -30,26 +28,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
|||||||
return &swiotlb_dma_map_ops;
|
return &swiotlb_dma_map_ops;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int dma_supported(struct device *dev, u64 mask)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *dma_ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
if (unlikely(dma_ops == NULL))
|
|
||||||
return 0;
|
|
||||||
|
|
||||||
return dma_ops->dma_supported(dev, mask);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *dma_ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
if (dma_ops->mapping_error)
|
|
||||||
return dma_ops->mapping_error(dev, dma_addr);
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
#include <asm-generic/dma-mapping-common.h>
|
#include <asm-generic/dma-mapping-common.h>
|
||||||
|
|
||||||
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
|
||||||
@ -72,41 +50,6 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
|
|||||||
|
|
||||||
static inline void dma_mark_clean(void *addr, size_t size) {}
|
static inline void dma_mark_clean(void *addr, size_t size) {}
|
||||||
|
|
||||||
static inline int dma_set_mask(struct device *dev, u64 dma_mask)
|
|
||||||
{
|
|
||||||
if (!dev->dma_mask || !dma_supported(dev, dma_mask))
|
|
||||||
return -EIO;
|
|
||||||
|
|
||||||
*dev->dma_mask = dma_mask;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
|
|
||||||
|
|
||||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
|
||||||
dma_addr_t *dma_handle, gfp_t flag,
|
|
||||||
struct dma_attrs *attrs)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *dma_ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
return dma_ops->alloc(dev, size, dma_handle, flag, attrs);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
|
|
||||||
|
|
||||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
|
||||||
void *cpu_addr, dma_addr_t dma_handle,
|
|
||||||
struct dma_attrs *attrs)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *dma_ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
dma_ops->free(dev, size, cpu_addr, dma_handle, attrs);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
|
||||||
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
|
||||||
|
|
||||||
static inline void dma_cache_sync(struct device *dev, void *vaddr,
|
static inline void dma_cache_sync(struct device *dev, void *vaddr,
|
||||||
size_t size, enum dma_data_direction direction)
|
size_t size, enum dma_data_direction direction)
|
||||||
{
|
{
|
||||||
|
@ -1754,6 +1754,7 @@ source kernel/Kconfig.hz
|
|||||||
|
|
||||||
config KEXEC
|
config KEXEC
|
||||||
bool "kexec system call"
|
bool "kexec system call"
|
||||||
|
select KEXEC_CORE
|
||||||
---help---
|
---help---
|
||||||
kexec is a system call that implements the ability to shutdown your
|
kexec is a system call that implements the ability to shutdown your
|
||||||
current kernel, and to start another kernel. It is like a reboot
|
current kernel, and to start another kernel. It is like a reboot
|
||||||
@ -1770,8 +1771,8 @@ config KEXEC
|
|||||||
|
|
||||||
config KEXEC_FILE
|
config KEXEC_FILE
|
||||||
bool "kexec file based system call"
|
bool "kexec file based system call"
|
||||||
|
select KEXEC_CORE
|
||||||
select BUILD_BIN2C
|
select BUILD_BIN2C
|
||||||
depends on KEXEC
|
|
||||||
depends on X86_64
|
depends on X86_64
|
||||||
depends on CRYPTO=y
|
depends on CRYPTO=y
|
||||||
depends on CRYPTO_SHA256=y
|
depends on CRYPTO_SHA256=y
|
||||||
|
@ -448,7 +448,8 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
debug_putstr("\nDecompressing Linux... ");
|
debug_putstr("\nDecompressing Linux... ");
|
||||||
decompress(input_data, input_len, NULL, NULL, output, NULL, error);
|
__decompress(input_data, input_len, NULL, NULL, output, output_len,
|
||||||
|
NULL, error);
|
||||||
parse_elf(output);
|
parse_elf(output);
|
||||||
/*
|
/*
|
||||||
* 32-bit always performs relocations. 64-bit relocations are only
|
* 32-bit always performs relocations. 64-bit relocations are only
|
||||||
|
@ -414,7 +414,7 @@ xloadflags:
|
|||||||
# define XLF23 0
|
# define XLF23 0
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(CONFIG_X86_64) && defined(CONFIG_EFI) && defined(CONFIG_KEXEC)
|
#if defined(CONFIG_X86_64) && defined(CONFIG_EFI) && defined(CONFIG_KEXEC_CORE)
|
||||||
# define XLF4 XLF_EFI_KEXEC
|
# define XLF4 XLF_EFI_KEXEC
|
||||||
#else
|
#else
|
||||||
# define XLF4 0
|
# define XLF4 0
|
||||||
|
@ -277,7 +277,7 @@ static const char *gate_vma_name(struct vm_area_struct *vma)
|
|||||||
{
|
{
|
||||||
return "[vsyscall]";
|
return "[vsyscall]";
|
||||||
}
|
}
|
||||||
static struct vm_operations_struct gate_vma_ops = {
|
static const struct vm_operations_struct gate_vma_ops = {
|
||||||
.name = gate_vma_name,
|
.name = gate_vma_name,
|
||||||
};
|
};
|
||||||
static struct vm_area_struct gate_vma = {
|
static struct vm_area_struct gate_vma = {
|
||||||
|
@ -12,7 +12,6 @@
|
|||||||
#include <linux/dma-attrs.h>
|
#include <linux/dma-attrs.h>
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
#include <asm/swiotlb.h>
|
#include <asm/swiotlb.h>
|
||||||
#include <asm-generic/dma-coherent.h>
|
|
||||||
#include <linux/dma-contiguous.h>
|
#include <linux/dma-contiguous.h>
|
||||||
|
|
||||||
#ifdef CONFIG_ISA
|
#ifdef CONFIG_ISA
|
||||||
@ -41,24 +40,13 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
|||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
#include <asm-generic/dma-mapping-common.h>
|
bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp);
|
||||||
|
#define arch_dma_alloc_attrs arch_dma_alloc_attrs
|
||||||
/* Make sure we keep the same behaviour */
|
|
||||||
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
||||||
debug_dma_mapping_error(dev, dma_addr);
|
|
||||||
if (ops->mapping_error)
|
|
||||||
return ops->mapping_error(dev, dma_addr);
|
|
||||||
|
|
||||||
return (dma_addr == DMA_ERROR_CODE);
|
|
||||||
}
|
|
||||||
|
|
||||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
|
|
||||||
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
|
|
||||||
|
|
||||||
|
#define HAVE_ARCH_DMA_SUPPORTED 1
|
||||||
extern int dma_supported(struct device *hwdev, u64 mask);
|
extern int dma_supported(struct device *hwdev, u64 mask);
|
||||||
extern int dma_set_mask(struct device *dev, u64 mask);
|
|
||||||
|
#include <asm-generic/dma-mapping-common.h>
|
||||||
|
|
||||||
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
|
extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
|
||||||
dma_addr_t *dma_addr, gfp_t flag,
|
dma_addr_t *dma_addr, gfp_t flag,
|
||||||
@ -125,16 +113,4 @@ static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
|
|||||||
return gfp;
|
return gfp;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
|
|
||||||
|
|
||||||
void *
|
|
||||||
dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
|
||||||
gfp_t gfp, struct dma_attrs *attrs);
|
|
||||||
|
|
||||||
#define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
|
|
||||||
|
|
||||||
void dma_free_attrs(struct device *dev, size_t size,
|
|
||||||
void *vaddr, dma_addr_t bus,
|
|
||||||
struct dma_attrs *attrs);
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -29,7 +29,7 @@ extern void show_trace(struct task_struct *t, struct pt_regs *regs,
|
|||||||
extern void __show_regs(struct pt_regs *regs, int all);
|
extern void __show_regs(struct pt_regs *regs, int all);
|
||||||
extern unsigned long oops_begin(void);
|
extern unsigned long oops_begin(void);
|
||||||
extern void oops_end(unsigned long, struct pt_regs *, int signr);
|
extern void oops_end(unsigned long, struct pt_regs *, int signr);
|
||||||
#ifdef CONFIG_KEXEC
|
#ifdef CONFIG_KEXEC_CORE
|
||||||
extern int in_crash_kexec;
|
extern int in_crash_kexec;
|
||||||
#else
|
#else
|
||||||
/* no crash dump is ever in progress if no crash kernel can be kexec'd */
|
/* no crash dump is ever in progress if no crash kernel can be kexec'd */
|
||||||
|
@ -71,8 +71,8 @@ obj-$(CONFIG_LIVEPATCH) += livepatch.o
|
|||||||
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
|
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
|
||||||
obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
|
obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
|
||||||
obj-$(CONFIG_X86_TSC) += trace_clock.o
|
obj-$(CONFIG_X86_TSC) += trace_clock.o
|
||||||
obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o
|
obj-$(CONFIG_KEXEC_CORE) += machine_kexec_$(BITS).o
|
||||||
obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o
|
obj-$(CONFIG_KEXEC_CORE) += relocate_kernel_$(BITS).o crash.o
|
||||||
obj-$(CONFIG_KEXEC_FILE) += kexec-bzimage64.o
|
obj-$(CONFIG_KEXEC_FILE) += kexec-bzimage64.o
|
||||||
obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
|
obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
|
||||||
obj-y += kprobes/
|
obj-y += kprobes/
|
||||||
|
@ -200,7 +200,7 @@ static void kvm_setup_secondary_clock(void)
|
|||||||
* kind of shutdown from our side, we unregister the clock by writting anything
|
* kind of shutdown from our side, we unregister the clock by writting anything
|
||||||
* that does not have the 'enable' bit set in the msr
|
* that does not have the 'enable' bit set in the msr
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_KEXEC
|
#ifdef CONFIG_KEXEC_CORE
|
||||||
static void kvm_crash_shutdown(struct pt_regs *regs)
|
static void kvm_crash_shutdown(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
native_write_msr(msr_kvm_system_time, 0, 0);
|
native_write_msr(msr_kvm_system_time, 0, 0);
|
||||||
@ -259,7 +259,7 @@ void __init kvmclock_init(void)
|
|||||||
x86_platform.save_sched_clock_state = kvm_save_sched_clock_state;
|
x86_platform.save_sched_clock_state = kvm_save_sched_clock_state;
|
||||||
x86_platform.restore_sched_clock_state = kvm_restore_sched_clock_state;
|
x86_platform.restore_sched_clock_state = kvm_restore_sched_clock_state;
|
||||||
machine_ops.shutdown = kvm_shutdown;
|
machine_ops.shutdown = kvm_shutdown;
|
||||||
#ifdef CONFIG_KEXEC
|
#ifdef CONFIG_KEXEC_CORE
|
||||||
machine_ops.crash_shutdown = kvm_crash_shutdown;
|
machine_ops.crash_shutdown = kvm_crash_shutdown;
|
||||||
#endif
|
#endif
|
||||||
kvm_get_preset_lpj();
|
kvm_get_preset_lpj();
|
||||||
|
@ -58,17 +58,6 @@ EXPORT_SYMBOL(x86_dma_fallback_dev);
|
|||||||
/* Number of entries preallocated for DMA-API debugging */
|
/* Number of entries preallocated for DMA-API debugging */
|
||||||
#define PREALLOC_DMA_DEBUG_ENTRIES 65536
|
#define PREALLOC_DMA_DEBUG_ENTRIES 65536
|
||||||
|
|
||||||
int dma_set_mask(struct device *dev, u64 mask)
|
|
||||||
{
|
|
||||||
if (!dev->dma_mask || !dma_supported(dev, mask))
|
|
||||||
return -EIO;
|
|
||||||
|
|
||||||
*dev->dma_mask = mask;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(dma_set_mask);
|
|
||||||
|
|
||||||
void __init pci_iommu_alloc(void)
|
void __init pci_iommu_alloc(void)
|
||||||
{
|
{
|
||||||
struct iommu_table_entry *p;
|
struct iommu_table_entry *p;
|
||||||
@ -140,50 +129,19 @@ void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
|
|||||||
free_pages((unsigned long)vaddr, get_order(size));
|
free_pages((unsigned long)vaddr, get_order(size));
|
||||||
}
|
}
|
||||||
|
|
||||||
void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
|
bool arch_dma_alloc_attrs(struct device **dev, gfp_t *gfp)
|
||||||
gfp_t gfp, struct dma_attrs *attrs)
|
|
||||||
{
|
{
|
||||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
*gfp = dma_alloc_coherent_gfp_flags(*dev, *gfp);
|
||||||
void *memory;
|
*gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
|
||||||
|
|
||||||
gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
|
if (!*dev)
|
||||||
|
*dev = &x86_dma_fallback_dev;
|
||||||
|
if (!is_device_dma_capable(*dev))
|
||||||
|
return false;
|
||||||
|
return true;
|
||||||
|
|
||||||
if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
|
|
||||||
return memory;
|
|
||||||
|
|
||||||
if (!dev)
|
|
||||||
dev = &x86_dma_fallback_dev;
|
|
||||||
|
|
||||||
if (!is_device_dma_capable(dev))
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
if (!ops->alloc)
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
memory = ops->alloc(dev, size, dma_handle,
|
|
||||||
dma_alloc_coherent_gfp_flags(dev, gfp), attrs);
|
|
||||||
debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
|
|
||||||
|
|
||||||
return memory;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(dma_alloc_attrs);
|
EXPORT_SYMBOL(arch_dma_alloc_attrs);
|
||||||
|
|
||||||
void dma_free_attrs(struct device *dev, size_t size,
|
|
||||||
void *vaddr, dma_addr_t bus,
|
|
||||||
struct dma_attrs *attrs)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
WARN_ON(irqs_disabled()); /* for portability */
|
|
||||||
|
|
||||||
if (dma_release_from_coherent(dev, get_order(size), vaddr))
|
|
||||||
return;
|
|
||||||
|
|
||||||
debug_dma_free_coherent(dev, size, vaddr, bus);
|
|
||||||
if (ops->free)
|
|
||||||
ops->free(dev, size, vaddr, bus, attrs);
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(dma_free_attrs);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel
|
* See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel
|
||||||
|
@ -673,7 +673,7 @@ struct machine_ops machine_ops = {
|
|||||||
.emergency_restart = native_machine_emergency_restart,
|
.emergency_restart = native_machine_emergency_restart,
|
||||||
.restart = native_machine_restart,
|
.restart = native_machine_restart,
|
||||||
.halt = native_machine_halt,
|
.halt = native_machine_halt,
|
||||||
#ifdef CONFIG_KEXEC
|
#ifdef CONFIG_KEXEC_CORE
|
||||||
.crash_shutdown = native_machine_crash_shutdown,
|
.crash_shutdown = native_machine_crash_shutdown,
|
||||||
#endif
|
#endif
|
||||||
};
|
};
|
||||||
@ -703,7 +703,7 @@ void machine_halt(void)
|
|||||||
machine_ops.halt();
|
machine_ops.halt();
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_KEXEC
|
#ifdef CONFIG_KEXEC_CORE
|
||||||
void machine_crash_shutdown(struct pt_regs *regs)
|
void machine_crash_shutdown(struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
machine_ops.crash_shutdown(regs);
|
machine_ops.crash_shutdown(regs);
|
||||||
|
@ -478,7 +478,7 @@ static void __init memblock_x86_reserve_range_setup_data(void)
|
|||||||
* --------- Crashkernel reservation ------------------------------
|
* --------- Crashkernel reservation ------------------------------
|
||||||
*/
|
*/
|
||||||
|
|
||||||
#ifdef CONFIG_KEXEC
|
#ifdef CONFIG_KEXEC_CORE
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Keep the crash kernel below this limit. On 32 bits earlier kernels
|
* Keep the crash kernel below this limit. On 32 bits earlier kernels
|
||||||
|
@ -364,7 +364,7 @@ INIT_PER_CPU(irq_stack_union);
|
|||||||
|
|
||||||
#endif /* CONFIG_X86_32 */
|
#endif /* CONFIG_X86_32 */
|
||||||
|
|
||||||
#ifdef CONFIG_KEXEC
|
#ifdef CONFIG_KEXEC_CORE
|
||||||
#include <asm/kexec.h>
|
#include <asm/kexec.h>
|
||||||
|
|
||||||
. = ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
|
. = ASSERT(kexec_control_code_size <= KEXEC_CONTROL_CODE_MAX_SIZE,
|
||||||
|
@ -1264,7 +1264,7 @@ static void vmcs_load(struct vmcs *vmcs)
|
|||||||
vmcs, phys_addr);
|
vmcs, phys_addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_KEXEC
|
#ifdef CONFIG_KEXEC_CORE
|
||||||
/*
|
/*
|
||||||
* This bitmap is used to indicate whether the vmclear
|
* This bitmap is used to indicate whether the vmclear
|
||||||
* operation is enabled on all cpus. All disabled by
|
* operation is enabled on all cpus. All disabled by
|
||||||
@ -1302,7 +1302,7 @@ static void crash_vmclear_local_loaded_vmcss(void)
|
|||||||
#else
|
#else
|
||||||
static inline void crash_enable_local_vmclear(int cpu) { }
|
static inline void crash_enable_local_vmclear(int cpu) { }
|
||||||
static inline void crash_disable_local_vmclear(int cpu) { }
|
static inline void crash_disable_local_vmclear(int cpu) { }
|
||||||
#endif /* CONFIG_KEXEC */
|
#endif /* CONFIG_KEXEC_CORE */
|
||||||
|
|
||||||
static void __loaded_vmcs_clear(void *arg)
|
static void __loaded_vmcs_clear(void *arg)
|
||||||
{
|
{
|
||||||
@ -10411,7 +10411,7 @@ static int __init vmx_init(void)
|
|||||||
if (r)
|
if (r)
|
||||||
return r;
|
return r;
|
||||||
|
|
||||||
#ifdef CONFIG_KEXEC
|
#ifdef CONFIG_KEXEC_CORE
|
||||||
rcu_assign_pointer(crash_vmclear_loaded_vmcss,
|
rcu_assign_pointer(crash_vmclear_loaded_vmcss,
|
||||||
crash_vmclear_local_loaded_vmcss);
|
crash_vmclear_local_loaded_vmcss);
|
||||||
#endif
|
#endif
|
||||||
@ -10421,7 +10421,7 @@ static int __init vmx_init(void)
|
|||||||
|
|
||||||
static void __exit vmx_exit(void)
|
static void __exit vmx_exit(void)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_KEXEC
|
#ifdef CONFIG_KEXEC_CORE
|
||||||
RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
|
RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
|
||||||
synchronize_rcu();
|
synchronize_rcu();
|
||||||
#endif
|
#endif
|
||||||
|
@ -42,58 +42,21 @@ static inline unsigned long mpx_bt_size_bytes(struct mm_struct *mm)
|
|||||||
*/
|
*/
|
||||||
static unsigned long mpx_mmap(unsigned long len)
|
static unsigned long mpx_mmap(unsigned long len)
|
||||||
{
|
{
|
||||||
unsigned long ret;
|
|
||||||
unsigned long addr, pgoff;
|
|
||||||
struct mm_struct *mm = current->mm;
|
struct mm_struct *mm = current->mm;
|
||||||
vm_flags_t vm_flags;
|
unsigned long addr, populate;
|
||||||
struct vm_area_struct *vma;
|
|
||||||
|
|
||||||
/* Only bounds table can be allocated here */
|
/* Only bounds table can be allocated here */
|
||||||
if (len != mpx_bt_size_bytes(mm))
|
if (len != mpx_bt_size_bytes(mm))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
down_write(&mm->mmap_sem);
|
down_write(&mm->mmap_sem);
|
||||||
|
addr = do_mmap(NULL, 0, len, PROT_READ | PROT_WRITE,
|
||||||
/* Too many mappings? */
|
MAP_ANONYMOUS | MAP_PRIVATE, VM_MPX, 0, &populate);
|
||||||
if (mm->map_count > sysctl_max_map_count) {
|
|
||||||
ret = -ENOMEM;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Obtain the address to map to. we verify (or select) it and ensure
|
|
||||||
* that it represents a valid section of the address space.
|
|
||||||
*/
|
|
||||||
addr = get_unmapped_area(NULL, 0, len, 0, MAP_ANONYMOUS | MAP_PRIVATE);
|
|
||||||
if (addr & ~PAGE_MASK) {
|
|
||||||
ret = addr;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
vm_flags = VM_READ | VM_WRITE | VM_MPX |
|
|
||||||
mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
|
|
||||||
|
|
||||||
/* Set pgoff according to addr for anon_vma */
|
|
||||||
pgoff = addr >> PAGE_SHIFT;
|
|
||||||
|
|
||||||
ret = mmap_region(NULL, addr, len, vm_flags, pgoff);
|
|
||||||
if (IS_ERR_VALUE(ret))
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
vma = find_vma(mm, ret);
|
|
||||||
if (!vma) {
|
|
||||||
ret = -ENOMEM;
|
|
||||||
goto out;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (vm_flags & VM_LOCKED) {
|
|
||||||
up_write(&mm->mmap_sem);
|
up_write(&mm->mmap_sem);
|
||||||
mm_populate(ret, len);
|
if (populate)
|
||||||
return ret;
|
mm_populate(addr, populate);
|
||||||
}
|
|
||||||
|
|
||||||
out:
|
return addr;
|
||||||
up_write(&mm->mmap_sem);
|
|
||||||
return ret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
enum reg_type {
|
enum reg_type {
|
||||||
|
@ -650,7 +650,7 @@ static void __init get_systab_virt_addr(efi_memory_desc_t *md)
|
|||||||
|
|
||||||
static void __init save_runtime_map(void)
|
static void __init save_runtime_map(void)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_KEXEC
|
#ifdef CONFIG_KEXEC_CORE
|
||||||
efi_memory_desc_t *md;
|
efi_memory_desc_t *md;
|
||||||
void *tmp, *p, *q = NULL;
|
void *tmp, *p, *q = NULL;
|
||||||
int count = 0;
|
int count = 0;
|
||||||
@ -748,7 +748,7 @@ static void * __init efi_map_regions(int *count, int *pg_shift)
|
|||||||
|
|
||||||
static void __init kexec_enter_virtual_mode(void)
|
static void __init kexec_enter_virtual_mode(void)
|
||||||
{
|
{
|
||||||
#ifdef CONFIG_KEXEC
|
#ifdef CONFIG_KEXEC_CORE
|
||||||
efi_memory_desc_t *md;
|
efi_memory_desc_t *md;
|
||||||
void *p;
|
void *p;
|
||||||
|
|
||||||
|
@ -492,7 +492,7 @@ static void uv_nmi_touch_watchdogs(void)
|
|||||||
touch_nmi_watchdog();
|
touch_nmi_watchdog();
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(CONFIG_KEXEC)
|
#if defined(CONFIG_KEXEC_CORE)
|
||||||
static atomic_t uv_nmi_kexec_failed;
|
static atomic_t uv_nmi_kexec_failed;
|
||||||
static void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs)
|
static void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
@ -519,13 +519,13 @@ static void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs)
|
|||||||
uv_nmi_sync_exit(0);
|
uv_nmi_sync_exit(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
#else /* !CONFIG_KEXEC */
|
#else /* !CONFIG_KEXEC_CORE */
|
||||||
static inline void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs)
|
static inline void uv_nmi_kdump(int cpu, int master, struct pt_regs *regs)
|
||||||
{
|
{
|
||||||
if (master)
|
if (master)
|
||||||
pr_err("UV: NMI kdump: KEXEC not supported in this kernel\n");
|
pr_err("UV: NMI kdump: KEXEC not supported in this kernel\n");
|
||||||
}
|
}
|
||||||
#endif /* !CONFIG_KEXEC */
|
#endif /* !CONFIG_KEXEC_CORE */
|
||||||
|
|
||||||
#ifdef CONFIG_KGDB
|
#ifdef CONFIG_KGDB
|
||||||
#ifdef CONFIG_KGDB_KDB
|
#ifdef CONFIG_KGDB_KDB
|
||||||
|
@ -32,66 +32,6 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
|
|||||||
|
|
||||||
#include <asm-generic/dma-mapping-common.h>
|
#include <asm-generic/dma-mapping-common.h>
|
||||||
|
|
||||||
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
|
|
||||||
#define dma_free_noncoherent(d, s, v, h) dma_free_attrs(d, s, v, h, NULL)
|
|
||||||
#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
|
|
||||||
#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL)
|
|
||||||
|
|
||||||
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
|
||||||
dma_addr_t *dma_handle, gfp_t gfp,
|
|
||||||
struct dma_attrs *attrs)
|
|
||||||
{
|
|
||||||
void *ret;
|
|
||||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
ret = ops->alloc(dev, size, dma_handle, gfp, attrs);
|
|
||||||
debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void dma_free_attrs(struct device *dev, size_t size,
|
|
||||||
void *vaddr, dma_addr_t dma_handle,
|
|
||||||
struct dma_attrs *attrs)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
if (dma_release_from_coherent(dev, get_order(size), vaddr))
|
|
||||||
return;
|
|
||||||
|
|
||||||
ops->free(dev, size, vaddr, dma_handle, attrs);
|
|
||||||
debug_dma_free_coherent(dev, size, vaddr, dma_handle);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int
|
|
||||||
dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
|
||||||
{
|
|
||||||
struct dma_map_ops *ops = get_dma_ops(dev);
|
|
||||||
|
|
||||||
debug_dma_mapping_error(dev, dma_addr);
|
|
||||||
return ops->mapping_error(dev, dma_addr);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int
|
|
||||||
dma_supported(struct device *dev, u64 mask)
|
|
||||||
{
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline int
|
|
||||||
dma_set_mask(struct device *dev, u64 mask)
|
|
||||||
{
|
|
||||||
if(!dev->dma_mask || !dma_supported(dev, mask))
|
|
||||||
return -EIO;
|
|
||||||
|
|
||||||
*dev->dma_mask = mask;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
|
||||||
enum dma_data_direction direction);
|
enum dma_data_direction direction);
|
||||||
|
|
||||||
|
@ -2834,7 +2834,7 @@ static int binder_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|||||||
return VM_FAULT_SIGBUS;
|
return VM_FAULT_SIGBUS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct vm_operations_struct binder_vm_ops = {
|
static const struct vm_operations_struct binder_vm_ops = {
|
||||||
.open = binder_vma_open,
|
.open = binder_vma_open,
|
||||||
.close = binder_vma_close,
|
.close = binder_vma_close,
|
||||||
.fault = binder_vm_fault,
|
.fault = binder_vm_fault,
|
||||||
|
@ -86,9 +86,7 @@ static int adf_ring_show(struct seq_file *sfile, void *v)
|
|||||||
{
|
{
|
||||||
struct adf_etr_ring_data *ring = sfile->private;
|
struct adf_etr_ring_data *ring = sfile->private;
|
||||||
struct adf_etr_bank_data *bank = ring->bank;
|
struct adf_etr_bank_data *bank = ring->bank;
|
||||||
uint32_t *msg = v;
|
|
||||||
void __iomem *csr = ring->bank->csr_addr;
|
void __iomem *csr = ring->bank->csr_addr;
|
||||||
int i, x;
|
|
||||||
|
|
||||||
if (v == SEQ_START_TOKEN) {
|
if (v == SEQ_START_TOKEN) {
|
||||||
int head, tail, empty;
|
int head, tail, empty;
|
||||||
@ -113,18 +111,8 @@ static int adf_ring_show(struct seq_file *sfile, void *v)
|
|||||||
seq_puts(sfile, "----------- Ring data ------------\n");
|
seq_puts(sfile, "----------- Ring data ------------\n");
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
seq_printf(sfile, "%p:", msg);
|
seq_hex_dump(sfile, "", DUMP_PREFIX_ADDRESS, 32, 4,
|
||||||
x = 0;
|
v, ADF_MSG_SIZE_TO_BYTES(ring->msg_size), false);
|
||||||
i = 0;
|
|
||||||
for (; i < (ADF_MSG_SIZE_TO_BYTES(ring->msg_size) >> 2); i++) {
|
|
||||||
seq_printf(sfile, " %08X", *(msg + i));
|
|
||||||
if ((ADF_MSG_SIZE_TO_BYTES(ring->msg_size) >> 2) != i + 1 &&
|
|
||||||
(++x == 8)) {
|
|
||||||
seq_printf(sfile, "\n%p:", msg + i + 1);
|
|
||||||
x = 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
seq_puts(sfile, "\n");
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -43,7 +43,7 @@ config EFI_VARS_PSTORE_DEFAULT_DISABLE
|
|||||||
|
|
||||||
config EFI_RUNTIME_MAP
|
config EFI_RUNTIME_MAP
|
||||||
bool "Export efi runtime maps to sysfs"
|
bool "Export efi runtime maps to sysfs"
|
||||||
depends on X86 && EFI && KEXEC
|
depends on X86 && EFI && KEXEC_CORE
|
||||||
default y
|
default y
|
||||||
help
|
help
|
||||||
Export efi runtime memory maps to /sys/firmware/efi/runtime-map.
|
Export efi runtime memory maps to /sys/firmware/efi/runtime-map.
|
||||||
|
@ -125,7 +125,7 @@ static int vgem_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct vm_operations_struct vgem_gem_vm_ops = {
|
static const struct vm_operations_struct vgem_gem_vm_ops = {
|
||||||
.fault = vgem_gem_fault,
|
.fault = vgem_gem_fault,
|
||||||
.open = drm_gem_vm_open,
|
.open = drm_gem_vm_open,
|
||||||
.close = drm_gem_vm_close,
|
.close = drm_gem_vm_close,
|
||||||
|
@ -1110,7 +1110,7 @@ static int cs_char_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct vm_operations_struct cs_char_vm_ops = {
|
static const struct vm_operations_struct cs_char_vm_ops = {
|
||||||
.fault = cs_char_vma_fault,
|
.fault = cs_char_vma_fault,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -908,7 +908,7 @@ static int qib_file_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct vm_operations_struct qib_file_vm_ops = {
|
static const struct vm_operations_struct qib_file_vm_ops = {
|
||||||
.fault = qib_file_vma_fault,
|
.fault = qib_file_vma_fault,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -75,7 +75,7 @@ static void qib_vma_close(struct vm_area_struct *vma)
|
|||||||
kref_put(&ip->ref, qib_release_mmap_info);
|
kref_put(&ip->ref, qib_release_mmap_info);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct vm_operations_struct qib_vm_ops = {
|
static const struct vm_operations_struct qib_vm_ops = {
|
||||||
.open = qib_vma_open,
|
.open = qib_vma_open,
|
||||||
.close = qib_vma_close,
|
.close = qib_vma_close,
|
||||||
};
|
};
|
||||||
|
@ -872,7 +872,7 @@ static void omap_vout_vm_close(struct vm_area_struct *vma)
|
|||||||
vout->mmap_count--;
|
vout->mmap_count--;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct vm_operations_struct omap_vout_vm_ops = {
|
static const struct vm_operations_struct omap_vout_vm_ops = {
|
||||||
.open = omap_vout_vm_open,
|
.open = omap_vout_vm_open,
|
||||||
.close = omap_vout_vm_close,
|
.close = omap_vout_vm_close,
|
||||||
};
|
};
|
||||||
|
@ -418,7 +418,7 @@ static void genwqe_vma_close(struct vm_area_struct *vma)
|
|||||||
kfree(dma_map);
|
kfree(dma_map);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct vm_operations_struct genwqe_vma_ops = {
|
static const struct vm_operations_struct genwqe_vma_ops = {
|
||||||
.open = genwqe_vma_open,
|
.open = genwqe_vma_open,
|
||||||
.close = genwqe_vma_close,
|
.close = genwqe_vma_close,
|
||||||
};
|
};
|
||||||
|
@ -156,6 +156,12 @@ static const struct file_operations fops_vring = {
|
|||||||
.llseek = seq_lseek,
|
.llseek = seq_lseek,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static void wil_seq_hexdump(struct seq_file *s, void *p, int len,
|
||||||
|
const char *prefix)
|
||||||
|
{
|
||||||
|
seq_hex_dump(s, prefix, DUMP_PREFIX_NONE, 16, 1, p, len, false);
|
||||||
|
}
|
||||||
|
|
||||||
static void wil_print_ring(struct seq_file *s, const char *prefix,
|
static void wil_print_ring(struct seq_file *s, const char *prefix,
|
||||||
void __iomem *off)
|
void __iomem *off)
|
||||||
{
|
{
|
||||||
@ -212,8 +218,6 @@ static void wil_print_ring(struct seq_file *s, const char *prefix,
|
|||||||
le16_to_cpu(hdr.seq), len,
|
le16_to_cpu(hdr.seq), len,
|
||||||
le16_to_cpu(hdr.type), hdr.flags);
|
le16_to_cpu(hdr.type), hdr.flags);
|
||||||
if (len <= MAX_MBOXITEM_SIZE) {
|
if (len <= MAX_MBOXITEM_SIZE) {
|
||||||
int n = 0;
|
|
||||||
char printbuf[16 * 3 + 2];
|
|
||||||
unsigned char databuf[MAX_MBOXITEM_SIZE];
|
unsigned char databuf[MAX_MBOXITEM_SIZE];
|
||||||
void __iomem *src = wmi_buffer(wil, d.addr) +
|
void __iomem *src = wmi_buffer(wil, d.addr) +
|
||||||
sizeof(struct wil6210_mbox_hdr);
|
sizeof(struct wil6210_mbox_hdr);
|
||||||
@ -223,16 +227,7 @@ static void wil_print_ring(struct seq_file *s, const char *prefix,
|
|||||||
* reading header
|
* reading header
|
||||||
*/
|
*/
|
||||||
wil_memcpy_fromio_32(databuf, src, len);
|
wil_memcpy_fromio_32(databuf, src, len);
|
||||||
while (n < len) {
|
wil_seq_hexdump(s, databuf, len, " : ");
|
||||||
int l = min(len - n, 16);
|
|
||||||
|
|
||||||
hex_dump_to_buffer(databuf + n, l,
|
|
||||||
16, 1, printbuf,
|
|
||||||
sizeof(printbuf),
|
|
||||||
false);
|
|
||||||
seq_printf(s, " : %s\n", printbuf);
|
|
||||||
n += l;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
seq_puts(s, "\n");
|
seq_puts(s, "\n");
|
||||||
@ -867,22 +862,6 @@ static const struct file_operations fops_wmi = {
|
|||||||
.open = simple_open,
|
.open = simple_open,
|
||||||
};
|
};
|
||||||
|
|
||||||
static void wil_seq_hexdump(struct seq_file *s, void *p, int len,
|
|
||||||
const char *prefix)
|
|
||||||
{
|
|
||||||
char printbuf[16 * 3 + 2];
|
|
||||||
int i = 0;
|
|
||||||
|
|
||||||
while (i < len) {
|
|
||||||
int l = min(len - i, 16);
|
|
||||||
|
|
||||||
hex_dump_to_buffer(p + i, l, 16, 1, printbuf,
|
|
||||||
sizeof(printbuf), false);
|
|
||||||
seq_printf(s, "%s%s\n", prefix, printbuf);
|
|
||||||
i += l;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void wil_seq_print_skb(struct seq_file *s, struct sk_buff *skb)
|
static void wil_seq_print_skb(struct seq_file *s, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
int i = 0;
|
int i = 0;
|
||||||
|
@ -1103,16 +1103,9 @@ static int ccio_proc_bitmap_info(struct seq_file *m, void *p)
|
|||||||
struct ioc *ioc = ioc_list;
|
struct ioc *ioc = ioc_list;
|
||||||
|
|
||||||
while (ioc != NULL) {
|
while (ioc != NULL) {
|
||||||
u32 *res_ptr = (u32 *)ioc->res_map;
|
seq_hex_dump(m, " ", DUMP_PREFIX_NONE, 32, 4, ioc->res_map,
|
||||||
int j;
|
ioc->res_size, false);
|
||||||
|
seq_putc(m, '\n');
|
||||||
for (j = 0; j < (ioc->res_size / sizeof(u32)); j++) {
|
|
||||||
if ((j & 7) == 0)
|
|
||||||
seq_puts(m, "\n ");
|
|
||||||
seq_printf(m, "%08x", *res_ptr);
|
|
||||||
res_ptr++;
|
|
||||||
}
|
|
||||||
seq_puts(m, "\n\n");
|
|
||||||
ioc = ioc->next;
|
ioc = ioc->next;
|
||||||
break; /* XXX - remove me */
|
break; /* XXX - remove me */
|
||||||
}
|
}
|
||||||
|
@ -1854,14 +1854,9 @@ sba_proc_bitmap_info(struct seq_file *m, void *p)
|
|||||||
{
|
{
|
||||||
struct sba_device *sba_dev = sba_list;
|
struct sba_device *sba_dev = sba_list;
|
||||||
struct ioc *ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */
|
struct ioc *ioc = &sba_dev->ioc[0]; /* FIXME: Multi-IOC support! */
|
||||||
unsigned int *res_ptr = (unsigned int *)ioc->res_map;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < (ioc->res_size/sizeof(unsigned int)); ++i, ++res_ptr) {
|
seq_hex_dump(m, " ", DUMP_PREFIX_NONE, 32, 4, ioc->res_map,
|
||||||
if ((i & 7) == 0)
|
ioc->res_size, false);
|
||||||
seq_puts(m, "\n ");
|
|
||||||
seq_printf(m, " %08x", *res_ptr);
|
|
||||||
}
|
|
||||||
seq_putc(m, '\n');
|
seq_putc(m, '\n');
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -467,7 +467,7 @@ static void pci_device_shutdown(struct device *dev)
|
|||||||
pci_msi_shutdown(pci_dev);
|
pci_msi_shutdown(pci_dev);
|
||||||
pci_msix_shutdown(pci_dev);
|
pci_msix_shutdown(pci_dev);
|
||||||
|
|
||||||
#ifdef CONFIG_KEXEC
|
#ifdef CONFIG_KEXEC_CORE
|
||||||
/*
|
/*
|
||||||
* If this is a kexec reboot, turn off Bus Master bit on the
|
* If this is a kexec reboot, turn off Bus Master bit on the
|
||||||
* device to tell it to not continue to do DMA. Don't touch
|
* device to tell it to not continue to do DMA. Don't touch
|
||||||
|
@ -1206,16 +1206,8 @@ static void sprinthx(unsigned char *title, struct seq_file *m,
|
|||||||
static void sprinthx4(unsigned char *title, struct seq_file *m,
|
static void sprinthx4(unsigned char *title, struct seq_file *m,
|
||||||
unsigned int *array, unsigned int len)
|
unsigned int *array, unsigned int len)
|
||||||
{
|
{
|
||||||
int r;
|
|
||||||
|
|
||||||
seq_printf(m, "\n%s\n", title);
|
seq_printf(m, "\n%s\n", title);
|
||||||
for (r = 0; r < len; r++) {
|
seq_hex_dump(m, " ", DUMP_PREFIX_NONE, 32, 4, array, len, false);
|
||||||
if ((r % 8) == 0)
|
|
||||||
seq_printf(m, " ");
|
|
||||||
seq_printf(m, "%08X ", array[r]);
|
|
||||||
if ((r % 8) == 7)
|
|
||||||
seq_putc(m, '\n');
|
|
||||||
}
|
|
||||||
seq_putc(m, '\n');
|
seq_putc(m, '\n');
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -997,7 +997,7 @@ static void ion_vm_close(struct vm_area_struct *vma)
|
|||||||
mutex_unlock(&buffer->lock);
|
mutex_unlock(&buffer->lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct vm_operations_struct ion_vma_ops = {
|
static const struct vm_operations_struct ion_vma_ops = {
|
||||||
.open = ion_vm_open,
|
.open = ion_vm_open,
|
||||||
.close = ion_vm_close,
|
.close = ion_vm_close,
|
||||||
.fault = ion_vm_fault,
|
.fault = ion_vm_fault,
|
||||||
|
@ -2156,7 +2156,7 @@ static void comedi_vm_close(struct vm_area_struct *area)
|
|||||||
comedi_buf_map_put(bm);
|
comedi_buf_map_put(bm);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct vm_operations_struct comedi_vm_ops = {
|
static const struct vm_operations_struct comedi_vm_ops = {
|
||||||
.open = comedi_vm_open,
|
.open = comedi_vm_open,
|
||||||
.close = comedi_vm_close,
|
.close = comedi_vm_close,
|
||||||
};
|
};
|
||||||
|
@ -1091,7 +1091,7 @@ static void mmap_user_close(struct vm_area_struct *vma)
|
|||||||
omapfb_put_mem_region(rg);
|
omapfb_put_mem_region(rg);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct vm_operations_struct mmap_user_ops = {
|
static const struct vm_operations_struct mmap_user_ops = {
|
||||||
.open = mmap_user_open,
|
.open = mmap_user_open,
|
||||||
.close = mmap_user_close,
|
.close = mmap_user_close,
|
||||||
};
|
};
|
||||||
|
@ -494,7 +494,7 @@ static void gntalloc_vma_close(struct vm_area_struct *vma)
|
|||||||
mutex_unlock(&gref_mutex);
|
mutex_unlock(&gref_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct vm_operations_struct gntalloc_vmops = {
|
static const struct vm_operations_struct gntalloc_vmops = {
|
||||||
.open = gntalloc_vma_open,
|
.open = gntalloc_vma_open,
|
||||||
.close = gntalloc_vma_close,
|
.close = gntalloc_vma_close,
|
||||||
};
|
};
|
||||||
|
@ -433,7 +433,7 @@ static struct page *gntdev_vma_find_special_page(struct vm_area_struct *vma,
|
|||||||
return map->pages[(addr - map->pages_vm_start) >> PAGE_SHIFT];
|
return map->pages[(addr - map->pages_vm_start) >> PAGE_SHIFT];
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct vm_operations_struct gntdev_vmops = {
|
static const struct vm_operations_struct gntdev_vmops = {
|
||||||
.open = gntdev_vma_open,
|
.open = gntdev_vma_open,
|
||||||
.close = gntdev_vma_close,
|
.close = gntdev_vma_close,
|
||||||
.find_special_page = gntdev_vma_find_special_page,
|
.find_special_page = gntdev_vma_find_special_page,
|
||||||
|
@ -414,7 +414,7 @@ static int alloc_empty_pages(struct vm_area_struct *vma, int numpgs)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct vm_operations_struct privcmd_vm_ops;
|
static const struct vm_operations_struct privcmd_vm_ops;
|
||||||
|
|
||||||
static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
|
static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
|
||||||
{
|
{
|
||||||
@ -605,7 +605,7 @@ static int privcmd_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|||||||
return VM_FAULT_SIGBUS;
|
return VM_FAULT_SIGBUS;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct vm_operations_struct privcmd_vm_ops = {
|
static const struct vm_operations_struct privcmd_vm_ops = {
|
||||||
.close = privcmd_close,
|
.close = privcmd_close,
|
||||||
.fault = privcmd_fault
|
.fault = privcmd_fault
|
||||||
};
|
};
|
||||||
|
@ -311,9 +311,6 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
|
|||||||
*/
|
*/
|
||||||
flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
|
flags &= ~(__GFP_DMA | __GFP_HIGHMEM);
|
||||||
|
|
||||||
if (dma_alloc_from_coherent(hwdev, size, dma_handle, &ret))
|
|
||||||
return ret;
|
|
||||||
|
|
||||||
/* On ARM this function returns an ioremap'ped virtual address for
|
/* On ARM this function returns an ioremap'ped virtual address for
|
||||||
* which virt_to_phys doesn't return the corresponding physical
|
* which virt_to_phys doesn't return the corresponding physical
|
||||||
* address. In fact on ARM virt_to_phys only works for kernel direct
|
* address. In fact on ARM virt_to_phys only works for kernel direct
|
||||||
@ -356,9 +353,6 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
|
|||||||
phys_addr_t phys;
|
phys_addr_t phys;
|
||||||
u64 dma_mask = DMA_BIT_MASK(32);
|
u64 dma_mask = DMA_BIT_MASK(32);
|
||||||
|
|
||||||
if (dma_release_from_coherent(hwdev, order, vaddr))
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (hwdev && hwdev->coherent_dma_mask)
|
if (hwdev && hwdev->coherent_dma_mask)
|
||||||
dma_mask = hwdev->coherent_dma_mask;
|
dma_mask = hwdev->coherent_dma_mask;
|
||||||
|
|
||||||
|
@ -18,6 +18,7 @@
|
|||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/writeback.h>
|
#include <linux/writeback.h>
|
||||||
|
#include <linux/blkdev.h>
|
||||||
#include "affs.h"
|
#include "affs.h"
|
||||||
|
|
||||||
static int affs_statfs(struct dentry *dentry, struct kstatfs *buf);
|
static int affs_statfs(struct dentry *dentry, struct kstatfs *buf);
|
||||||
@ -352,18 +353,19 @@ static int affs_fill_super(struct super_block *sb, void *data, int silent)
|
|||||||
* blocks, we will have to change it.
|
* blocks, we will have to change it.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
size = sb->s_bdev->bd_inode->i_size >> 9;
|
size = i_size_read(sb->s_bdev->bd_inode) >> 9;
|
||||||
pr_debug("initial blocksize=%d, #blocks=%d\n", 512, size);
|
pr_debug("initial blocksize=%d, #blocks=%d\n", 512, size);
|
||||||
|
|
||||||
affs_set_blocksize(sb, PAGE_SIZE);
|
affs_set_blocksize(sb, PAGE_SIZE);
|
||||||
/* Try to find root block. Its location depends on the block size. */
|
/* Try to find root block. Its location depends on the block size. */
|
||||||
|
|
||||||
i = 512;
|
i = bdev_logical_block_size(sb->s_bdev);
|
||||||
j = 4096;
|
j = PAGE_SIZE;
|
||||||
if (blocksize > 0) {
|
if (blocksize > 0) {
|
||||||
i = j = blocksize;
|
i = j = blocksize;
|
||||||
size = size / (blocksize / 512);
|
size = size / (blocksize / 512);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (blocksize = i; blocksize <= j; blocksize <<= 1, size >>= 1) {
|
for (blocksize = i; blocksize <= j; blocksize <<= 1, size >>= 1) {
|
||||||
sbi->s_root_block = root_block;
|
sbi->s_root_block = root_block;
|
||||||
if (root_block < 0)
|
if (root_block < 0)
|
||||||
|
@ -1593,7 +1593,7 @@ out:
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct vm_operations_struct ceph_vmops = {
|
static const struct vm_operations_struct ceph_vmops = {
|
||||||
.fault = ceph_filemap_fault,
|
.fault = ceph_filemap_fault,
|
||||||
.page_mkwrite = ceph_page_mkwrite,
|
.page_mkwrite = ceph_page_mkwrite,
|
||||||
};
|
};
|
||||||
|
@ -3216,7 +3216,7 @@ cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
|
|||||||
return VM_FAULT_LOCKED;
|
return VM_FAULT_LOCKED;
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct vm_operations_struct cifs_file_vm_ops = {
|
static const struct vm_operations_struct cifs_file_vm_ops = {
|
||||||
.fault = filemap_fault,
|
.fault = filemap_fault,
|
||||||
.map_pages = filemap_map_pages,
|
.map_pages = filemap_map_pages,
|
||||||
.page_mkwrite = cifs_page_mkwrite,
|
.page_mkwrite = cifs_page_mkwrite,
|
||||||
|
@ -353,7 +353,7 @@ int venus_readlink(struct super_block *sb, struct CodaFid *fid,
|
|||||||
char *result;
|
char *result;
|
||||||
|
|
||||||
insize = max_t(unsigned int,
|
insize = max_t(unsigned int,
|
||||||
INSIZE(readlink), OUTSIZE(readlink)+ *length + 1);
|
INSIZE(readlink), OUTSIZE(readlink)+ *length);
|
||||||
UPARG(CODA_READLINK);
|
UPARG(CODA_READLINK);
|
||||||
|
|
||||||
inp->coda_readlink.VFid = *fid;
|
inp->coda_readlink.VFid = *fid;
|
||||||
@ -361,8 +361,8 @@ int venus_readlink(struct super_block *sb, struct CodaFid *fid,
|
|||||||
error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
|
error = coda_upcall(coda_vcp(sb), insize, &outsize, inp);
|
||||||
if (!error) {
|
if (!error) {
|
||||||
retlen = outp->coda_readlink.count;
|
retlen = outp->coda_readlink.count;
|
||||||
if ( retlen > *length )
|
if (retlen >= *length)
|
||||||
retlen = *length;
|
retlen = *length - 1;
|
||||||
*length = retlen;
|
*length = retlen;
|
||||||
result = (char *)outp + (long)outp->coda_readlink.data;
|
result = (char *)outp + (long)outp->coda_readlink.data;
|
||||||
memcpy(buffer, result, retlen);
|
memcpy(buffer, result, retlen);
|
||||||
|
@ -513,10 +513,10 @@ void do_coredump(const siginfo_t *siginfo)
|
|||||||
const struct cred *old_cred;
|
const struct cred *old_cred;
|
||||||
struct cred *cred;
|
struct cred *cred;
|
||||||
int retval = 0;
|
int retval = 0;
|
||||||
int flag = 0;
|
|
||||||
int ispipe;
|
int ispipe;
|
||||||
struct files_struct *displaced;
|
struct files_struct *displaced;
|
||||||
bool need_nonrelative = false;
|
/* require nonrelative corefile path and be extra careful */
|
||||||
|
bool need_suid_safe = false;
|
||||||
bool core_dumped = false;
|
bool core_dumped = false;
|
||||||
static atomic_t core_dump_count = ATOMIC_INIT(0);
|
static atomic_t core_dump_count = ATOMIC_INIT(0);
|
||||||
struct coredump_params cprm = {
|
struct coredump_params cprm = {
|
||||||
@ -550,9 +550,8 @@ void do_coredump(const siginfo_t *siginfo)
|
|||||||
*/
|
*/
|
||||||
if (__get_dumpable(cprm.mm_flags) == SUID_DUMP_ROOT) {
|
if (__get_dumpable(cprm.mm_flags) == SUID_DUMP_ROOT) {
|
||||||
/* Setuid core dump mode */
|
/* Setuid core dump mode */
|
||||||
flag = O_EXCL; /* Stop rewrite attacks */
|
|
||||||
cred->fsuid = GLOBAL_ROOT_UID; /* Dump root private */
|
cred->fsuid = GLOBAL_ROOT_UID; /* Dump root private */
|
||||||
need_nonrelative = true;
|
need_suid_safe = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
retval = coredump_wait(siginfo->si_signo, &core_state);
|
retval = coredump_wait(siginfo->si_signo, &core_state);
|
||||||
@ -633,7 +632,7 @@ void do_coredump(const siginfo_t *siginfo)
|
|||||||
if (cprm.limit < binfmt->min_coredump)
|
if (cprm.limit < binfmt->min_coredump)
|
||||||
goto fail_unlock;
|
goto fail_unlock;
|
||||||
|
|
||||||
if (need_nonrelative && cn.corename[0] != '/') {
|
if (need_suid_safe && cn.corename[0] != '/') {
|
||||||
printk(KERN_WARNING "Pid %d(%s) can only dump core "\
|
printk(KERN_WARNING "Pid %d(%s) can only dump core "\
|
||||||
"to fully qualified path!\n",
|
"to fully qualified path!\n",
|
||||||
task_tgid_vnr(current), current->comm);
|
task_tgid_vnr(current), current->comm);
|
||||||
@ -641,8 +640,35 @@ void do_coredump(const siginfo_t *siginfo)
|
|||||||
goto fail_unlock;
|
goto fail_unlock;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Unlink the file if it exists unless this is a SUID
|
||||||
|
* binary - in that case, we're running around with root
|
||||||
|
* privs and don't want to unlink another user's coredump.
|
||||||
|
*/
|
||||||
|
if (!need_suid_safe) {
|
||||||
|
mm_segment_t old_fs;
|
||||||
|
|
||||||
|
old_fs = get_fs();
|
||||||
|
set_fs(KERNEL_DS);
|
||||||
|
/*
|
||||||
|
* If it doesn't exist, that's fine. If there's some
|
||||||
|
* other problem, we'll catch it at the filp_open().
|
||||||
|
*/
|
||||||
|
(void) sys_unlink((const char __user *)cn.corename);
|
||||||
|
set_fs(old_fs);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* There is a race between unlinking and creating the
|
||||||
|
* file, but if that causes an EEXIST here, that's
|
||||||
|
* fine - another process raced with us while creating
|
||||||
|
* the corefile, and the other process won. To userspace,
|
||||||
|
* what matters is that at least one of the two processes
|
||||||
|
* writes its coredump successfully, not which one.
|
||||||
|
*/
|
||||||
cprm.file = filp_open(cn.corename,
|
cprm.file = filp_open(cn.corename,
|
||||||
O_CREAT | 2 | O_NOFOLLOW | O_LARGEFILE | flag,
|
O_CREAT | 2 | O_NOFOLLOW |
|
||||||
|
O_LARGEFILE | O_EXCL,
|
||||||
0600);
|
0600);
|
||||||
if (IS_ERR(cprm.file))
|
if (IS_ERR(cprm.file))
|
||||||
goto fail_unlock;
|
goto fail_unlock;
|
||||||
@ -659,11 +685,15 @@ void do_coredump(const siginfo_t *siginfo)
|
|||||||
if (!S_ISREG(inode->i_mode))
|
if (!S_ISREG(inode->i_mode))
|
||||||
goto close_fail;
|
goto close_fail;
|
||||||
/*
|
/*
|
||||||
* Dont allow local users get cute and trick others to coredump
|
* Don't dump core if the filesystem changed owner or mode
|
||||||
* into their pre-created files.
|
* of the file during file creation. This is an issue when
|
||||||
|
* a process dumps core while its cwd is e.g. on a vfat
|
||||||
|
* filesystem.
|
||||||
*/
|
*/
|
||||||
if (!uid_eq(inode->i_uid, current_fsuid()))
|
if (!uid_eq(inode->i_uid, current_fsuid()))
|
||||||
goto close_fail;
|
goto close_fail;
|
||||||
|
if ((inode->i_mode & 0677) != 0600)
|
||||||
|
goto close_fail;
|
||||||
if (!(cprm.file->f_mode & FMODE_CAN_WRITE))
|
if (!(cprm.file->f_mode & FMODE_CAN_WRITE))
|
||||||
goto close_fail;
|
goto close_fail;
|
||||||
if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file))
|
if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file))
|
||||||
|
@ -288,7 +288,6 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
|
|||||||
page_cache_release(page);
|
page_cache_release(page);
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
page_cache_release(page);
|
|
||||||
node->page[i] = page;
|
node->page[i] = page;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -398,11 +397,11 @@ node_error:
|
|||||||
|
|
||||||
void hfs_bnode_free(struct hfs_bnode *node)
|
void hfs_bnode_free(struct hfs_bnode *node)
|
||||||
{
|
{
|
||||||
//int i;
|
int i;
|
||||||
|
|
||||||
//for (i = 0; i < node->tree->pages_per_bnode; i++)
|
for (i = 0; i < node->tree->pages_per_bnode; i++)
|
||||||
// if (node->page[i])
|
if (node->page[i])
|
||||||
// page_cache_release(node->page[i]);
|
page_cache_release(node->page[i]);
|
||||||
kfree(node);
|
kfree(node);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -131,13 +131,16 @@ skip:
|
|||||||
hfs_bnode_write(node, entry, data_off + key_len, entry_len);
|
hfs_bnode_write(node, entry, data_off + key_len, entry_len);
|
||||||
hfs_bnode_dump(node);
|
hfs_bnode_dump(node);
|
||||||
|
|
||||||
if (new_node) {
|
/*
|
||||||
/* update parent key if we inserted a key
|
* update parent key if we inserted a key
|
||||||
* at the start of the first node
|
* at the start of the node and it is not the new node
|
||||||
*/
|
*/
|
||||||
if (!rec && new_node != node)
|
if (!rec && new_node != node) {
|
||||||
|
hfs_bnode_read_key(node, fd->search_key, data_off + size);
|
||||||
hfs_brec_update_parent(fd);
|
hfs_brec_update_parent(fd);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (new_node) {
|
||||||
hfs_bnode_put(fd->bnode);
|
hfs_bnode_put(fd->bnode);
|
||||||
if (!new_node->parent) {
|
if (!new_node->parent) {
|
||||||
hfs_btree_inc_height(tree);
|
hfs_btree_inc_height(tree);
|
||||||
@ -166,9 +169,6 @@ skip:
|
|||||||
goto again;
|
goto again;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!rec)
|
|
||||||
hfs_brec_update_parent(fd);
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -366,6 +366,8 @@ again:
|
|||||||
if (IS_ERR(parent))
|
if (IS_ERR(parent))
|
||||||
return PTR_ERR(parent);
|
return PTR_ERR(parent);
|
||||||
__hfs_brec_find(parent, fd);
|
__hfs_brec_find(parent, fd);
|
||||||
|
if (fd->record < 0)
|
||||||
|
return -ENOENT;
|
||||||
hfs_bnode_dump(parent);
|
hfs_bnode_dump(parent);
|
||||||
rec = fd->record;
|
rec = fd->record;
|
||||||
|
|
||||||
|
@ -454,7 +454,6 @@ static struct hfs_bnode *__hfs_bnode_create(struct hfs_btree *tree, u32 cnid)
|
|||||||
page_cache_release(page);
|
page_cache_release(page);
|
||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
page_cache_release(page);
|
|
||||||
node->page[i] = page;
|
node->page[i] = page;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -566,13 +565,11 @@ node_error:
|
|||||||
|
|
||||||
void hfs_bnode_free(struct hfs_bnode *node)
|
void hfs_bnode_free(struct hfs_bnode *node)
|
||||||
{
|
{
|
||||||
#if 0
|
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < node->tree->pages_per_bnode; i++)
|
for (i = 0; i < node->tree->pages_per_bnode; i++)
|
||||||
if (node->page[i])
|
if (node->page[i])
|
||||||
page_cache_release(node->page[i]);
|
page_cache_release(node->page[i]);
|
||||||
#endif
|
|
||||||
kfree(node);
|
kfree(node);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2438,7 +2438,7 @@ done:
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* path_mountpoint - look up a path to be umounted
|
* path_mountpoint - look up a path to be umounted
|
||||||
* @nameidata: lookup context
|
* @nd: lookup context
|
||||||
* @flags: lookup flags
|
* @flags: lookup flags
|
||||||
* @path: pointer to container for result
|
* @path: pointer to container for result
|
||||||
*
|
*
|
||||||
|
111
fs/proc/base.c
111
fs/proc/base.c
@ -1230,10 +1230,9 @@ static ssize_t proc_loginuid_write(struct file * file, const char __user * buf,
|
|||||||
size_t count, loff_t *ppos)
|
size_t count, loff_t *ppos)
|
||||||
{
|
{
|
||||||
struct inode * inode = file_inode(file);
|
struct inode * inode = file_inode(file);
|
||||||
char *page, *tmp;
|
|
||||||
ssize_t length;
|
|
||||||
uid_t loginuid;
|
uid_t loginuid;
|
||||||
kuid_t kloginuid;
|
kuid_t kloginuid;
|
||||||
|
int rv;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
if (current != pid_task(proc_pid(inode), PIDTYPE_PID)) {
|
if (current != pid_task(proc_pid(inode), PIDTYPE_PID)) {
|
||||||
@ -1242,46 +1241,28 @@ static ssize_t proc_loginuid_write(struct file * file, const char __user * buf,
|
|||||||
}
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
if (count >= PAGE_SIZE)
|
|
||||||
count = PAGE_SIZE - 1;
|
|
||||||
|
|
||||||
if (*ppos != 0) {
|
if (*ppos != 0) {
|
||||||
/* No partial writes. */
|
/* No partial writes. */
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
page = (char*)__get_free_page(GFP_TEMPORARY);
|
|
||||||
if (!page)
|
|
||||||
return -ENOMEM;
|
|
||||||
length = -EFAULT;
|
|
||||||
if (copy_from_user(page, buf, count))
|
|
||||||
goto out_free_page;
|
|
||||||
|
|
||||||
page[count] = '\0';
|
rv = kstrtou32_from_user(buf, count, 10, &loginuid);
|
||||||
loginuid = simple_strtoul(page, &tmp, 10);
|
if (rv < 0)
|
||||||
if (tmp == page) {
|
return rv;
|
||||||
length = -EINVAL;
|
|
||||||
goto out_free_page;
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
/* is userspace tring to explicitly UNSET the loginuid? */
|
/* is userspace tring to explicitly UNSET the loginuid? */
|
||||||
if (loginuid == AUDIT_UID_UNSET) {
|
if (loginuid == AUDIT_UID_UNSET) {
|
||||||
kloginuid = INVALID_UID;
|
kloginuid = INVALID_UID;
|
||||||
} else {
|
} else {
|
||||||
kloginuid = make_kuid(file->f_cred->user_ns, loginuid);
|
kloginuid = make_kuid(file->f_cred->user_ns, loginuid);
|
||||||
if (!uid_valid(kloginuid)) {
|
if (!uid_valid(kloginuid))
|
||||||
length = -EINVAL;
|
return -EINVAL;
|
||||||
goto out_free_page;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
length = audit_set_loginuid(kloginuid);
|
rv = audit_set_loginuid(kloginuid);
|
||||||
if (likely(length == 0))
|
if (rv < 0)
|
||||||
length = count;
|
return rv;
|
||||||
|
return count;
|
||||||
out_free_page:
|
|
||||||
free_page((unsigned long) page);
|
|
||||||
return length;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct file_operations proc_loginuid_operations = {
|
static const struct file_operations proc_loginuid_operations = {
|
||||||
@ -1335,8 +1316,9 @@ static ssize_t proc_fault_inject_write(struct file * file,
|
|||||||
const char __user * buf, size_t count, loff_t *ppos)
|
const char __user * buf, size_t count, loff_t *ppos)
|
||||||
{
|
{
|
||||||
struct task_struct *task;
|
struct task_struct *task;
|
||||||
char buffer[PROC_NUMBUF], *end;
|
char buffer[PROC_NUMBUF];
|
||||||
int make_it_fail;
|
int make_it_fail;
|
||||||
|
int rv;
|
||||||
|
|
||||||
if (!capable(CAP_SYS_RESOURCE))
|
if (!capable(CAP_SYS_RESOURCE))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
@ -1345,9 +1327,9 @@ static ssize_t proc_fault_inject_write(struct file * file,
|
|||||||
count = sizeof(buffer) - 1;
|
count = sizeof(buffer) - 1;
|
||||||
if (copy_from_user(buffer, buf, count))
|
if (copy_from_user(buffer, buf, count))
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
make_it_fail = simple_strtol(strstrip(buffer), &end, 0);
|
rv = kstrtoint(strstrip(buffer), 0, &make_it_fail);
|
||||||
if (*end)
|
if (rv < 0)
|
||||||
return -EINVAL;
|
return rv;
|
||||||
if (make_it_fail < 0 || make_it_fail > 1)
|
if (make_it_fail < 0 || make_it_fail > 1)
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
@ -1836,8 +1818,6 @@ end_instantiate:
|
|||||||
return dir_emit(ctx, name, len, 1, DT_UNKNOWN);
|
return dir_emit(ctx, name, len, 1, DT_UNKNOWN);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_CHECKPOINT_RESTORE
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* dname_to_vma_addr - maps a dentry name into two unsigned longs
|
* dname_to_vma_addr - maps a dentry name into two unsigned longs
|
||||||
* which represent vma start and end addresses.
|
* which represent vma start and end addresses.
|
||||||
@ -1864,11 +1844,6 @@ static int map_files_d_revalidate(struct dentry *dentry, unsigned int flags)
|
|||||||
if (flags & LOOKUP_RCU)
|
if (flags & LOOKUP_RCU)
|
||||||
return -ECHILD;
|
return -ECHILD;
|
||||||
|
|
||||||
if (!capable(CAP_SYS_ADMIN)) {
|
|
||||||
status = -EPERM;
|
|
||||||
goto out_notask;
|
|
||||||
}
|
|
||||||
|
|
||||||
inode = d_inode(dentry);
|
inode = d_inode(dentry);
|
||||||
task = get_proc_task(inode);
|
task = get_proc_task(inode);
|
||||||
if (!task)
|
if (!task)
|
||||||
@ -1957,6 +1932,29 @@ struct map_files_info {
|
|||||||
unsigned char name[4*sizeof(long)+2]; /* max: %lx-%lx\0 */
|
unsigned char name[4*sizeof(long)+2]; /* max: %lx-%lx\0 */
|
||||||
};
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Only allow CAP_SYS_ADMIN to follow the links, due to concerns about how the
|
||||||
|
* symlinks may be used to bypass permissions on ancestor directories in the
|
||||||
|
* path to the file in question.
|
||||||
|
*/
|
||||||
|
static const char *
|
||||||
|
proc_map_files_follow_link(struct dentry *dentry, void **cookie)
|
||||||
|
{
|
||||||
|
if (!capable(CAP_SYS_ADMIN))
|
||||||
|
return ERR_PTR(-EPERM);
|
||||||
|
|
||||||
|
return proc_pid_follow_link(dentry, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Identical to proc_pid_link_inode_operations except for follow_link()
|
||||||
|
*/
|
||||||
|
static const struct inode_operations proc_map_files_link_inode_operations = {
|
||||||
|
.readlink = proc_pid_readlink,
|
||||||
|
.follow_link = proc_map_files_follow_link,
|
||||||
|
.setattr = proc_setattr,
|
||||||
|
};
|
||||||
|
|
||||||
static int
|
static int
|
||||||
proc_map_files_instantiate(struct inode *dir, struct dentry *dentry,
|
proc_map_files_instantiate(struct inode *dir, struct dentry *dentry,
|
||||||
struct task_struct *task, const void *ptr)
|
struct task_struct *task, const void *ptr)
|
||||||
@ -1972,7 +1970,7 @@ proc_map_files_instantiate(struct inode *dir, struct dentry *dentry,
|
|||||||
ei = PROC_I(inode);
|
ei = PROC_I(inode);
|
||||||
ei->op.proc_get_link = proc_map_files_get_link;
|
ei->op.proc_get_link = proc_map_files_get_link;
|
||||||
|
|
||||||
inode->i_op = &proc_pid_link_inode_operations;
|
inode->i_op = &proc_map_files_link_inode_operations;
|
||||||
inode->i_size = 64;
|
inode->i_size = 64;
|
||||||
inode->i_mode = S_IFLNK;
|
inode->i_mode = S_IFLNK;
|
||||||
|
|
||||||
@ -1996,10 +1994,6 @@ static struct dentry *proc_map_files_lookup(struct inode *dir,
|
|||||||
int result;
|
int result;
|
||||||
struct mm_struct *mm;
|
struct mm_struct *mm;
|
||||||
|
|
||||||
result = -EPERM;
|
|
||||||
if (!capable(CAP_SYS_ADMIN))
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
result = -ENOENT;
|
result = -ENOENT;
|
||||||
task = get_proc_task(dir);
|
task = get_proc_task(dir);
|
||||||
if (!task)
|
if (!task)
|
||||||
@ -2053,10 +2047,6 @@ proc_map_files_readdir(struct file *file, struct dir_context *ctx)
|
|||||||
struct map_files_info *p;
|
struct map_files_info *p;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
ret = -EPERM;
|
|
||||||
if (!capable(CAP_SYS_ADMIN))
|
|
||||||
goto out;
|
|
||||||
|
|
||||||
ret = -ENOENT;
|
ret = -ENOENT;
|
||||||
task = get_proc_task(file_inode(file));
|
task = get_proc_task(file_inode(file));
|
||||||
if (!task)
|
if (!task)
|
||||||
@ -2245,7 +2235,6 @@ static const struct file_operations proc_timers_operations = {
|
|||||||
.llseek = seq_lseek,
|
.llseek = seq_lseek,
|
||||||
.release = seq_release_private,
|
.release = seq_release_private,
|
||||||
};
|
};
|
||||||
#endif /* CONFIG_CHECKPOINT_RESTORE */
|
|
||||||
|
|
||||||
static int proc_pident_instantiate(struct inode *dir,
|
static int proc_pident_instantiate(struct inode *dir,
|
||||||
struct dentry *dentry, struct task_struct *task, const void *ptr)
|
struct dentry *dentry, struct task_struct *task, const void *ptr)
|
||||||
@ -2481,32 +2470,20 @@ static ssize_t proc_coredump_filter_write(struct file *file,
|
|||||||
{
|
{
|
||||||
struct task_struct *task;
|
struct task_struct *task;
|
||||||
struct mm_struct *mm;
|
struct mm_struct *mm;
|
||||||
char buffer[PROC_NUMBUF], *end;
|
|
||||||
unsigned int val;
|
unsigned int val;
|
||||||
int ret;
|
int ret;
|
||||||
int i;
|
int i;
|
||||||
unsigned long mask;
|
unsigned long mask;
|
||||||
|
|
||||||
ret = -EFAULT;
|
ret = kstrtouint_from_user(buf, count, 0, &val);
|
||||||
memset(buffer, 0, sizeof(buffer));
|
if (ret < 0)
|
||||||
if (count > sizeof(buffer) - 1)
|
return ret;
|
||||||
count = sizeof(buffer) - 1;
|
|
||||||
if (copy_from_user(buffer, buf, count))
|
|
||||||
goto out_no_task;
|
|
||||||
|
|
||||||
ret = -EINVAL;
|
|
||||||
val = (unsigned int)simple_strtoul(buffer, &end, 0);
|
|
||||||
if (*end == '\n')
|
|
||||||
end++;
|
|
||||||
if (end - buffer == 0)
|
|
||||||
goto out_no_task;
|
|
||||||
|
|
||||||
ret = -ESRCH;
|
ret = -ESRCH;
|
||||||
task = get_proc_task(file_inode(file));
|
task = get_proc_task(file_inode(file));
|
||||||
if (!task)
|
if (!task)
|
||||||
goto out_no_task;
|
goto out_no_task;
|
||||||
|
|
||||||
ret = end - buffer;
|
|
||||||
mm = get_task_mm(task);
|
mm = get_task_mm(task);
|
||||||
if (!mm)
|
if (!mm)
|
||||||
goto out_no_mm;
|
goto out_no_mm;
|
||||||
@ -2522,7 +2499,9 @@ static ssize_t proc_coredump_filter_write(struct file *file,
|
|||||||
out_no_mm:
|
out_no_mm:
|
||||||
put_task_struct(task);
|
put_task_struct(task);
|
||||||
out_no_task:
|
out_no_task:
|
||||||
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
return count;
|
||||||
}
|
}
|
||||||
|
|
||||||
static const struct file_operations proc_coredump_filter_operations = {
|
static const struct file_operations proc_coredump_filter_operations = {
|
||||||
@ -2744,9 +2723,7 @@ static const struct inode_operations proc_task_inode_operations;
|
|||||||
static const struct pid_entry tgid_base_stuff[] = {
|
static const struct pid_entry tgid_base_stuff[] = {
|
||||||
DIR("task", S_IRUGO|S_IXUGO, proc_task_inode_operations, proc_task_operations),
|
DIR("task", S_IRUGO|S_IXUGO, proc_task_inode_operations, proc_task_operations),
|
||||||
DIR("fd", S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations),
|
DIR("fd", S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations),
|
||||||
#ifdef CONFIG_CHECKPOINT_RESTORE
|
|
||||||
DIR("map_files", S_IRUSR|S_IXUSR, proc_map_files_inode_operations, proc_map_files_operations),
|
DIR("map_files", S_IRUSR|S_IXUSR, proc_map_files_inode_operations, proc_map_files_operations),
|
||||||
#endif
|
|
||||||
DIR("fdinfo", S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations),
|
DIR("fdinfo", S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations),
|
||||||
DIR("ns", S_IRUSR|S_IXUGO, proc_ns_dir_inode_operations, proc_ns_dir_operations),
|
DIR("ns", S_IRUSR|S_IXUGO, proc_ns_dir_inode_operations, proc_ns_dir_operations),
|
||||||
#ifdef CONFIG_NET
|
#ifdef CONFIG_NET
|
||||||
|
@ -26,7 +26,7 @@
|
|||||||
|
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
|
|
||||||
static DEFINE_SPINLOCK(proc_subdir_lock);
|
static DEFINE_RWLOCK(proc_subdir_lock);
|
||||||
|
|
||||||
static int proc_match(unsigned int len, const char *name, struct proc_dir_entry *de)
|
static int proc_match(unsigned int len, const char *name, struct proc_dir_entry *de)
|
||||||
{
|
{
|
||||||
@ -172,9 +172,9 @@ static int xlate_proc_name(const char *name, struct proc_dir_entry **ret,
|
|||||||
{
|
{
|
||||||
int rv;
|
int rv;
|
||||||
|
|
||||||
spin_lock(&proc_subdir_lock);
|
read_lock(&proc_subdir_lock);
|
||||||
rv = __xlate_proc_name(name, ret, residual);
|
rv = __xlate_proc_name(name, ret, residual);
|
||||||
spin_unlock(&proc_subdir_lock);
|
read_unlock(&proc_subdir_lock);
|
||||||
return rv;
|
return rv;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -231,11 +231,11 @@ struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir,
|
|||||||
{
|
{
|
||||||
struct inode *inode;
|
struct inode *inode;
|
||||||
|
|
||||||
spin_lock(&proc_subdir_lock);
|
read_lock(&proc_subdir_lock);
|
||||||
de = pde_subdir_find(de, dentry->d_name.name, dentry->d_name.len);
|
de = pde_subdir_find(de, dentry->d_name.name, dentry->d_name.len);
|
||||||
if (de) {
|
if (de) {
|
||||||
pde_get(de);
|
pde_get(de);
|
||||||
spin_unlock(&proc_subdir_lock);
|
read_unlock(&proc_subdir_lock);
|
||||||
inode = proc_get_inode(dir->i_sb, de);
|
inode = proc_get_inode(dir->i_sb, de);
|
||||||
if (!inode)
|
if (!inode)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
@ -243,7 +243,7 @@ struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *dir,
|
|||||||
d_add(dentry, inode);
|
d_add(dentry, inode);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
spin_unlock(&proc_subdir_lock);
|
read_unlock(&proc_subdir_lock);
|
||||||
return ERR_PTR(-ENOENT);
|
return ERR_PTR(-ENOENT);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -270,12 +270,12 @@ int proc_readdir_de(struct proc_dir_entry *de, struct file *file,
|
|||||||
if (!dir_emit_dots(file, ctx))
|
if (!dir_emit_dots(file, ctx))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
spin_lock(&proc_subdir_lock);
|
read_lock(&proc_subdir_lock);
|
||||||
de = pde_subdir_first(de);
|
de = pde_subdir_first(de);
|
||||||
i = ctx->pos - 2;
|
i = ctx->pos - 2;
|
||||||
for (;;) {
|
for (;;) {
|
||||||
if (!de) {
|
if (!de) {
|
||||||
spin_unlock(&proc_subdir_lock);
|
read_unlock(&proc_subdir_lock);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
if (!i)
|
if (!i)
|
||||||
@ -287,19 +287,19 @@ int proc_readdir_de(struct proc_dir_entry *de, struct file *file,
|
|||||||
do {
|
do {
|
||||||
struct proc_dir_entry *next;
|
struct proc_dir_entry *next;
|
||||||
pde_get(de);
|
pde_get(de);
|
||||||
spin_unlock(&proc_subdir_lock);
|
read_unlock(&proc_subdir_lock);
|
||||||
if (!dir_emit(ctx, de->name, de->namelen,
|
if (!dir_emit(ctx, de->name, de->namelen,
|
||||||
de->low_ino, de->mode >> 12)) {
|
de->low_ino, de->mode >> 12)) {
|
||||||
pde_put(de);
|
pde_put(de);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
spin_lock(&proc_subdir_lock);
|
read_lock(&proc_subdir_lock);
|
||||||
ctx->pos++;
|
ctx->pos++;
|
||||||
next = pde_subdir_next(de);
|
next = pde_subdir_next(de);
|
||||||
pde_put(de);
|
pde_put(de);
|
||||||
de = next;
|
de = next;
|
||||||
} while (de);
|
} while (de);
|
||||||
spin_unlock(&proc_subdir_lock);
|
read_unlock(&proc_subdir_lock);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -338,16 +338,16 @@ static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp
|
|||||||
if (ret)
|
if (ret)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
spin_lock(&proc_subdir_lock);
|
write_lock(&proc_subdir_lock);
|
||||||
dp->parent = dir;
|
dp->parent = dir;
|
||||||
if (pde_subdir_insert(dir, dp) == false) {
|
if (pde_subdir_insert(dir, dp) == false) {
|
||||||
WARN(1, "proc_dir_entry '%s/%s' already registered\n",
|
WARN(1, "proc_dir_entry '%s/%s' already registered\n",
|
||||||
dir->name, dp->name);
|
dir->name, dp->name);
|
||||||
spin_unlock(&proc_subdir_lock);
|
write_unlock(&proc_subdir_lock);
|
||||||
proc_free_inum(dp->low_ino);
|
proc_free_inum(dp->low_ino);
|
||||||
return -EEXIST;
|
return -EEXIST;
|
||||||
}
|
}
|
||||||
spin_unlock(&proc_subdir_lock);
|
write_unlock(&proc_subdir_lock);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -549,9 +549,9 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
|
|||||||
const char *fn = name;
|
const char *fn = name;
|
||||||
unsigned int len;
|
unsigned int len;
|
||||||
|
|
||||||
spin_lock(&proc_subdir_lock);
|
write_lock(&proc_subdir_lock);
|
||||||
if (__xlate_proc_name(name, &parent, &fn) != 0) {
|
if (__xlate_proc_name(name, &parent, &fn) != 0) {
|
||||||
spin_unlock(&proc_subdir_lock);
|
write_unlock(&proc_subdir_lock);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
len = strlen(fn);
|
len = strlen(fn);
|
||||||
@ -559,7 +559,7 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
|
|||||||
de = pde_subdir_find(parent, fn, len);
|
de = pde_subdir_find(parent, fn, len);
|
||||||
if (de)
|
if (de)
|
||||||
rb_erase(&de->subdir_node, &parent->subdir);
|
rb_erase(&de->subdir_node, &parent->subdir);
|
||||||
spin_unlock(&proc_subdir_lock);
|
write_unlock(&proc_subdir_lock);
|
||||||
if (!de) {
|
if (!de) {
|
||||||
WARN(1, "name '%s'\n", name);
|
WARN(1, "name '%s'\n", name);
|
||||||
return;
|
return;
|
||||||
@ -583,16 +583,16 @@ int remove_proc_subtree(const char *name, struct proc_dir_entry *parent)
|
|||||||
const char *fn = name;
|
const char *fn = name;
|
||||||
unsigned int len;
|
unsigned int len;
|
||||||
|
|
||||||
spin_lock(&proc_subdir_lock);
|
write_lock(&proc_subdir_lock);
|
||||||
if (__xlate_proc_name(name, &parent, &fn) != 0) {
|
if (__xlate_proc_name(name, &parent, &fn) != 0) {
|
||||||
spin_unlock(&proc_subdir_lock);
|
write_unlock(&proc_subdir_lock);
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
}
|
}
|
||||||
len = strlen(fn);
|
len = strlen(fn);
|
||||||
|
|
||||||
root = pde_subdir_find(parent, fn, len);
|
root = pde_subdir_find(parent, fn, len);
|
||||||
if (!root) {
|
if (!root) {
|
||||||
spin_unlock(&proc_subdir_lock);
|
write_unlock(&proc_subdir_lock);
|
||||||
return -ENOENT;
|
return -ENOENT;
|
||||||
}
|
}
|
||||||
rb_erase(&root->subdir_node, &parent->subdir);
|
rb_erase(&root->subdir_node, &parent->subdir);
|
||||||
@ -605,7 +605,7 @@ int remove_proc_subtree(const char *name, struct proc_dir_entry *parent)
|
|||||||
de = next;
|
de = next;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
spin_unlock(&proc_subdir_lock);
|
write_unlock(&proc_subdir_lock);
|
||||||
|
|
||||||
proc_entry_rundown(de);
|
proc_entry_rundown(de);
|
||||||
next = de->parent;
|
next = de->parent;
|
||||||
@ -616,7 +616,7 @@ int remove_proc_subtree(const char *name, struct proc_dir_entry *parent)
|
|||||||
break;
|
break;
|
||||||
pde_put(de);
|
pde_put(de);
|
||||||
|
|
||||||
spin_lock(&proc_subdir_lock);
|
write_lock(&proc_subdir_lock);
|
||||||
de = next;
|
de = next;
|
||||||
}
|
}
|
||||||
pde_put(root);
|
pde_put(root);
|
||||||
|
@ -9,12 +9,16 @@
|
|||||||
#include <linux/proc_fs.h>
|
#include <linux/proc_fs.h>
|
||||||
#include <linux/seq_file.h>
|
#include <linux/seq_file.h>
|
||||||
#include <linux/hugetlb.h>
|
#include <linux/hugetlb.h>
|
||||||
|
#include <linux/memcontrol.h>
|
||||||
|
#include <linux/mmu_notifier.h>
|
||||||
|
#include <linux/page_idle.h>
|
||||||
#include <linux/kernel-page-flags.h>
|
#include <linux/kernel-page-flags.h>
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
|
|
||||||
#define KPMSIZE sizeof(u64)
|
#define KPMSIZE sizeof(u64)
|
||||||
#define KPMMASK (KPMSIZE - 1)
|
#define KPMMASK (KPMSIZE - 1)
|
||||||
|
#define KPMBITS (KPMSIZE * BITS_PER_BYTE)
|
||||||
|
|
||||||
/* /proc/kpagecount - an array exposing page counts
|
/* /proc/kpagecount - an array exposing page counts
|
||||||
*
|
*
|
||||||
@ -54,6 +58,8 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf,
|
|||||||
pfn++;
|
pfn++;
|
||||||
out++;
|
out++;
|
||||||
count -= KPMSIZE;
|
count -= KPMSIZE;
|
||||||
|
|
||||||
|
cond_resched();
|
||||||
}
|
}
|
||||||
|
|
||||||
*ppos += (char __user *)out - buf;
|
*ppos += (char __user *)out - buf;
|
||||||
@ -146,6 +152,9 @@ u64 stable_page_flags(struct page *page)
|
|||||||
if (PageBalloon(page))
|
if (PageBalloon(page))
|
||||||
u |= 1 << KPF_BALLOON;
|
u |= 1 << KPF_BALLOON;
|
||||||
|
|
||||||
|
if (page_is_idle(page))
|
||||||
|
u |= 1 << KPF_IDLE;
|
||||||
|
|
||||||
u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked);
|
u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked);
|
||||||
|
|
||||||
u |= kpf_copy_bit(k, KPF_SLAB, PG_slab);
|
u |= kpf_copy_bit(k, KPF_SLAB, PG_slab);
|
||||||
@ -212,6 +221,8 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf,
|
|||||||
pfn++;
|
pfn++;
|
||||||
out++;
|
out++;
|
||||||
count -= KPMSIZE;
|
count -= KPMSIZE;
|
||||||
|
|
||||||
|
cond_resched();
|
||||||
}
|
}
|
||||||
|
|
||||||
*ppos += (char __user *)out - buf;
|
*ppos += (char __user *)out - buf;
|
||||||
@ -225,10 +236,64 @@ static const struct file_operations proc_kpageflags_operations = {
|
|||||||
.read = kpageflags_read,
|
.read = kpageflags_read,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#ifdef CONFIG_MEMCG
|
||||||
|
static ssize_t kpagecgroup_read(struct file *file, char __user *buf,
|
||||||
|
size_t count, loff_t *ppos)
|
||||||
|
{
|
||||||
|
u64 __user *out = (u64 __user *)buf;
|
||||||
|
struct page *ppage;
|
||||||
|
unsigned long src = *ppos;
|
||||||
|
unsigned long pfn;
|
||||||
|
ssize_t ret = 0;
|
||||||
|
u64 ino;
|
||||||
|
|
||||||
|
pfn = src / KPMSIZE;
|
||||||
|
count = min_t(unsigned long, count, (max_pfn * KPMSIZE) - src);
|
||||||
|
if (src & KPMMASK || count & KPMMASK)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
while (count > 0) {
|
||||||
|
if (pfn_valid(pfn))
|
||||||
|
ppage = pfn_to_page(pfn);
|
||||||
|
else
|
||||||
|
ppage = NULL;
|
||||||
|
|
||||||
|
if (ppage)
|
||||||
|
ino = page_cgroup_ino(ppage);
|
||||||
|
else
|
||||||
|
ino = 0;
|
||||||
|
|
||||||
|
if (put_user(ino, out)) {
|
||||||
|
ret = -EFAULT;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
pfn++;
|
||||||
|
out++;
|
||||||
|
count -= KPMSIZE;
|
||||||
|
|
||||||
|
cond_resched();
|
||||||
|
}
|
||||||
|
|
||||||
|
*ppos += (char __user *)out - buf;
|
||||||
|
if (!ret)
|
||||||
|
ret = (char __user *)out - buf;
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const struct file_operations proc_kpagecgroup_operations = {
|
||||||
|
.llseek = mem_lseek,
|
||||||
|
.read = kpagecgroup_read,
|
||||||
|
};
|
||||||
|
#endif /* CONFIG_MEMCG */
|
||||||
|
|
||||||
static int __init proc_page_init(void)
|
static int __init proc_page_init(void)
|
||||||
{
|
{
|
||||||
proc_create("kpagecount", S_IRUSR, NULL, &proc_kpagecount_operations);
|
proc_create("kpagecount", S_IRUSR, NULL, &proc_kpagecount_operations);
|
||||||
proc_create("kpageflags", S_IRUSR, NULL, &proc_kpageflags_operations);
|
proc_create("kpageflags", S_IRUSR, NULL, &proc_kpageflags_operations);
|
||||||
|
#ifdef CONFIG_MEMCG
|
||||||
|
proc_create("kpagecgroup", S_IRUSR, NULL, &proc_kpagecgroup_operations);
|
||||||
|
#endif
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
fs_initcall(proc_page_init);
|
fs_initcall(proc_page_init);
|
||||||
|
@ -13,6 +13,7 @@
|
|||||||
#include <linux/swap.h>
|
#include <linux/swap.h>
|
||||||
#include <linux/swapops.h>
|
#include <linux/swapops.h>
|
||||||
#include <linux/mmu_notifier.h>
|
#include <linux/mmu_notifier.h>
|
||||||
|
#include <linux/page_idle.h>
|
||||||
|
|
||||||
#include <asm/elf.h>
|
#include <asm/elf.h>
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
@ -459,7 +460,7 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
|
|||||||
|
|
||||||
mss->resident += size;
|
mss->resident += size;
|
||||||
/* Accumulate the size in pages that have been accessed. */
|
/* Accumulate the size in pages that have been accessed. */
|
||||||
if (young || PageReferenced(page))
|
if (young || page_is_young(page) || PageReferenced(page))
|
||||||
mss->referenced += size;
|
mss->referenced += size;
|
||||||
mapcount = page_mapcount(page);
|
mapcount = page_mapcount(page);
|
||||||
if (mapcount >= 2) {
|
if (mapcount >= 2) {
|
||||||
@ -807,6 +808,7 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
|
|||||||
|
|
||||||
/* Clear accessed and referenced bits. */
|
/* Clear accessed and referenced bits. */
|
||||||
pmdp_test_and_clear_young(vma, addr, pmd);
|
pmdp_test_and_clear_young(vma, addr, pmd);
|
||||||
|
test_and_clear_page_young(page);
|
||||||
ClearPageReferenced(page);
|
ClearPageReferenced(page);
|
||||||
out:
|
out:
|
||||||
spin_unlock(ptl);
|
spin_unlock(ptl);
|
||||||
@ -834,6 +836,7 @@ out:
|
|||||||
|
|
||||||
/* Clear accessed and referenced bits. */
|
/* Clear accessed and referenced bits. */
|
||||||
ptep_test_and_clear_young(vma, addr, pte);
|
ptep_test_and_clear_young(vma, addr, pte);
|
||||||
|
test_and_clear_page_young(page);
|
||||||
ClearPageReferenced(page);
|
ClearPageReferenced(page);
|
||||||
}
|
}
|
||||||
pte_unmap_unlock(pte - 1, ptl);
|
pte_unmap_unlock(pte - 1, ptl);
|
||||||
|
@ -12,6 +12,7 @@
|
|||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/cred.h>
|
#include <linux/cred.h>
|
||||||
#include <linux/mm.h>
|
#include <linux/mm.h>
|
||||||
|
#include <linux/printk.h>
|
||||||
|
|
||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
@ -773,6 +774,47 @@ void seq_pad(struct seq_file *m, char c)
|
|||||||
}
|
}
|
||||||
EXPORT_SYMBOL(seq_pad);
|
EXPORT_SYMBOL(seq_pad);
|
||||||
|
|
||||||
|
/* A complete analogue of print_hex_dump() */
|
||||||
|
void seq_hex_dump(struct seq_file *m, const char *prefix_str, int prefix_type,
|
||||||
|
int rowsize, int groupsize, const void *buf, size_t len,
|
||||||
|
bool ascii)
|
||||||
|
{
|
||||||
|
const u8 *ptr = buf;
|
||||||
|
int i, linelen, remaining = len;
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
if (rowsize != 16 && rowsize != 32)
|
||||||
|
rowsize = 16;
|
||||||
|
|
||||||
|
for (i = 0; i < len && !seq_has_overflowed(m); i += rowsize) {
|
||||||
|
linelen = min(remaining, rowsize);
|
||||||
|
remaining -= rowsize;
|
||||||
|
|
||||||
|
switch (prefix_type) {
|
||||||
|
case DUMP_PREFIX_ADDRESS:
|
||||||
|
seq_printf(m, "%s%p: ", prefix_str, ptr + i);
|
||||||
|
break;
|
||||||
|
case DUMP_PREFIX_OFFSET:
|
||||||
|
seq_printf(m, "%s%.8x: ", prefix_str, i);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
seq_printf(m, "%s", prefix_str);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
ret = hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
|
||||||
|
m->buf + m->count, m->size - m->count,
|
||||||
|
ascii);
|
||||||
|
if (ret >= m->size - m->count) {
|
||||||
|
seq_set_overflow(m);
|
||||||
|
} else {
|
||||||
|
m->count += ret;
|
||||||
|
seq_putc(m, '\n');
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
EXPORT_SYMBOL(seq_hex_dump);
|
||||||
|
|
||||||
struct list_head *seq_list_start(struct list_head *head, loff_t pos)
|
struct list_head *seq_list_start(struct list_head *head, loff_t pos)
|
||||||
{
|
{
|
||||||
struct list_head *lh;
|
struct list_head *lh;
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#include <linux/scatterlist.h>
|
#include <linux/scatterlist.h>
|
||||||
#include <linux/dma-debug.h>
|
#include <linux/dma-debug.h>
|
||||||
#include <linux/dma-attrs.h>
|
#include <linux/dma-attrs.h>
|
||||||
|
#include <asm-generic/dma-coherent.h>
|
||||||
|
|
||||||
static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
|
static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
|
||||||
size_t size,
|
size_t size,
|
||||||
@ -237,4 +238,121 @@ dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
|
|||||||
|
|
||||||
#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
|
#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
|
||||||
|
|
||||||
|
#ifndef arch_dma_alloc_attrs
|
||||||
|
#define arch_dma_alloc_attrs(dev, flag) (true)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static inline void *dma_alloc_attrs(struct device *dev, size_t size,
|
||||||
|
dma_addr_t *dma_handle, gfp_t flag,
|
||||||
|
struct dma_attrs *attrs)
|
||||||
|
{
|
||||||
|
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||||
|
void *cpu_addr;
|
||||||
|
|
||||||
|
BUG_ON(!ops);
|
||||||
|
|
||||||
|
if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
|
||||||
|
return cpu_addr;
|
||||||
|
|
||||||
|
if (!arch_dma_alloc_attrs(&dev, &flag))
|
||||||
|
return NULL;
|
||||||
|
if (!ops->alloc)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
|
||||||
|
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
|
||||||
|
return cpu_addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void dma_free_attrs(struct device *dev, size_t size,
|
||||||
|
void *cpu_addr, dma_addr_t dma_handle,
|
||||||
|
struct dma_attrs *attrs)
|
||||||
|
{
|
||||||
|
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||||
|
|
||||||
|
BUG_ON(!ops);
|
||||||
|
WARN_ON(irqs_disabled());
|
||||||
|
|
||||||
|
if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (!ops->free)
|
||||||
|
return;
|
||||||
|
|
||||||
|
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
|
||||||
|
ops->free(dev, size, cpu_addr, dma_handle, attrs);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
|
||||||
|
dma_addr_t *dma_handle, gfp_t flag)
|
||||||
|
{
|
||||||
|
return dma_alloc_attrs(dev, size, dma_handle, flag, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void dma_free_coherent(struct device *dev, size_t size,
|
||||||
|
void *cpu_addr, dma_addr_t dma_handle)
|
||||||
|
{
|
||||||
|
return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
|
||||||
|
dma_addr_t *dma_handle, gfp_t gfp)
|
||||||
|
{
|
||||||
|
DEFINE_DMA_ATTRS(attrs);
|
||||||
|
|
||||||
|
dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
|
||||||
|
return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void dma_free_noncoherent(struct device *dev, size_t size,
|
||||||
|
void *cpu_addr, dma_addr_t dma_handle)
|
||||||
|
{
|
||||||
|
DEFINE_DMA_ATTRS(attrs);
|
||||||
|
|
||||||
|
dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
|
||||||
|
dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
|
||||||
|
{
|
||||||
|
debug_dma_mapping_error(dev, dma_addr);
|
||||||
|
|
||||||
|
if (get_dma_ops(dev)->mapping_error)
|
||||||
|
return get_dma_ops(dev)->mapping_error(dev, dma_addr);
|
||||||
|
|
||||||
|
#ifdef DMA_ERROR_CODE
|
||||||
|
return dma_addr == DMA_ERROR_CODE;
|
||||||
|
#else
|
||||||
|
return 0;
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifndef HAVE_ARCH_DMA_SUPPORTED
|
||||||
|
static inline int dma_supported(struct device *dev, u64 mask)
|
||||||
|
{
|
||||||
|
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||||
|
|
||||||
|
if (!ops)
|
||||||
|
return 0;
|
||||||
|
if (!ops->dma_supported)
|
||||||
|
return 1;
|
||||||
|
return ops->dma_supported(dev, mask);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#ifndef HAVE_ARCH_DMA_SET_MASK
|
||||||
|
static inline int dma_set_mask(struct device *dev, u64 mask)
|
||||||
|
{
|
||||||
|
struct dma_map_ops *ops = get_dma_ops(dev);
|
||||||
|
|
||||||
|
if (ops->set_dma_mask)
|
||||||
|
return ops->set_dma_mask(dev, mask);
|
||||||
|
|
||||||
|
if (!dev->dma_mask || !dma_supported(dev, mask))
|
||||||
|
return -EIO;
|
||||||
|
*dev->dma_mask = mask;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -16,7 +16,7 @@
|
|||||||
|
|
||||||
#include <uapi/linux/kexec.h>
|
#include <uapi/linux/kexec.h>
|
||||||
|
|
||||||
#ifdef CONFIG_KEXEC
|
#ifdef CONFIG_KEXEC_CORE
|
||||||
#include <linux/list.h>
|
#include <linux/list.h>
|
||||||
#include <linux/linkage.h>
|
#include <linux/linkage.h>
|
||||||
#include <linux/compat.h>
|
#include <linux/compat.h>
|
||||||
@ -318,13 +318,24 @@ int crash_shrink_memory(unsigned long new_size);
|
|||||||
size_t crash_get_memory_size(void);
|
size_t crash_get_memory_size(void);
|
||||||
void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
|
void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
|
||||||
|
|
||||||
#else /* !CONFIG_KEXEC */
|
int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
|
||||||
|
unsigned long buf_len);
|
||||||
|
void * __weak arch_kexec_kernel_image_load(struct kimage *image);
|
||||||
|
int __weak arch_kimage_file_post_load_cleanup(struct kimage *image);
|
||||||
|
int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
|
||||||
|
unsigned long buf_len);
|
||||||
|
int __weak arch_kexec_apply_relocations_add(const Elf_Ehdr *ehdr,
|
||||||
|
Elf_Shdr *sechdrs, unsigned int relsec);
|
||||||
|
int __weak arch_kexec_apply_relocations(const Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
|
||||||
|
unsigned int relsec);
|
||||||
|
|
||||||
|
#else /* !CONFIG_KEXEC_CORE */
|
||||||
struct pt_regs;
|
struct pt_regs;
|
||||||
struct task_struct;
|
struct task_struct;
|
||||||
static inline void crash_kexec(struct pt_regs *regs) { }
|
static inline void crash_kexec(struct pt_regs *regs) { }
|
||||||
static inline int kexec_should_crash(struct task_struct *p) { return 0; }
|
static inline int kexec_should_crash(struct task_struct *p) { return 0; }
|
||||||
#define kexec_in_progress false
|
#define kexec_in_progress false
|
||||||
#endif /* CONFIG_KEXEC */
|
#endif /* CONFIG_KEXEC_CORE */
|
||||||
|
|
||||||
#endif /* !defined(__ASSEBMLY__) */
|
#endif /* !defined(__ASSEBMLY__) */
|
||||||
|
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user