Merge branch 'akpm' (incoming from Andrew)
Merge second patch-bomb from Andrew Morton: - the rest of MM - zram updates - zswap updates - exit - procfs - exec - wait - crash dump - lib/idr - rapidio - adfs, affs, bfs, ufs - cris - Kconfig things - initramfs - small amount of IPC material - percpu enhancements - early ioremap support - various other misc things * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (156 commits) MAINTAINERS: update Intel C600 SAS driver maintainers fs/ufs: remove unused ufs_super_block_third pointer fs/ufs: remove unused ufs_super_block_second pointer fs/ufs: remove unused ufs_super_block_first pointer fs/ufs/super.c: add __init to init_inodecache() doc/kernel-parameters.txt: add early_ioremap_debug arm64: add early_ioremap support arm64: initialize pgprot info earlier in boot x86: use generic early_ioremap mm: create generic early_ioremap() support x86/mm: sparse warning fix for early_memremap lglock: map to spinlock when !CONFIG_SMP percpu: add preemption checks to __this_cpu ops vmstat: use raw_cpu_ops to avoid false positives on preemption checks slub: use raw_cpu_inc for incrementing statistics net: replace __this_cpu_inc in route.c with raw_cpu_inc modules: use raw_cpu_write for initialization of per cpu refcount. mm: use raw_cpu ops for determining current NUMA node percpu: add raw_cpu_ops slub: fix leak of 'name' in sysfs_slab_add ...
This commit is contained in:
commit
26c12d9334
@ -43,6 +43,36 @@ Description:
|
|||||||
The invalid_io file is read-only and specifies the number of
|
The invalid_io file is read-only and specifies the number of
|
||||||
non-page-size-aligned I/O requests issued to this device.
|
non-page-size-aligned I/O requests issued to this device.
|
||||||
|
|
||||||
|
What: /sys/block/zram<id>/failed_reads
|
||||||
|
Date: February 2014
|
||||||
|
Contact: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
|
||||||
|
Description:
|
||||||
|
The failed_reads file is read-only and specifies the number of
|
||||||
|
failed reads happened on this device.
|
||||||
|
|
||||||
|
What: /sys/block/zram<id>/failed_writes
|
||||||
|
Date: February 2014
|
||||||
|
Contact: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
|
||||||
|
Description:
|
||||||
|
The failed_writes file is read-only and specifies the number of
|
||||||
|
failed writes happened on this device.
|
||||||
|
|
||||||
|
What: /sys/block/zram<id>/max_comp_streams
|
||||||
|
Date: February 2014
|
||||||
|
Contact: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
|
||||||
|
Description:
|
||||||
|
The max_comp_streams file is read-write and specifies the
|
||||||
|
number of backend's zcomp_strm compression streams (number of
|
||||||
|
concurrent compress operations).
|
||||||
|
|
||||||
|
What: /sys/block/zram<id>/comp_algorithm
|
||||||
|
Date: February 2014
|
||||||
|
Contact: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
|
||||||
|
Description:
|
||||||
|
The comp_algorithm file is read-write and lets to show
|
||||||
|
available and selected compression algorithms, change
|
||||||
|
compression algorithm selection.
|
||||||
|
|
||||||
What: /sys/block/zram<id>/notify_free
|
What: /sys/block/zram<id>/notify_free
|
||||||
Date: August 2010
|
Date: August 2010
|
||||||
Contact: Nitin Gupta <ngupta@vflare.org>
|
Contact: Nitin Gupta <ngupta@vflare.org>
|
||||||
@ -53,15 +83,6 @@ Description:
|
|||||||
is freed. This statistic is applicable only when this disk is
|
is freed. This statistic is applicable only when this disk is
|
||||||
being used as a swap disk.
|
being used as a swap disk.
|
||||||
|
|
||||||
What: /sys/block/zram<id>/discard
|
|
||||||
Date: August 2010
|
|
||||||
Contact: Nitin Gupta <ngupta@vflare.org>
|
|
||||||
Description:
|
|
||||||
The discard file is read-only and specifies the number of
|
|
||||||
discard requests received by this device. These requests
|
|
||||||
provide information to block device regarding blocks which are
|
|
||||||
no longer used by filesystem.
|
|
||||||
|
|
||||||
What: /sys/block/zram<id>/zero_pages
|
What: /sys/block/zram<id>/zero_pages
|
||||||
Date: August 2010
|
Date: August 2010
|
||||||
Contact: Nitin Gupta <ngupta@vflare.org>
|
Contact: Nitin Gupta <ngupta@vflare.org>
|
||||||
|
@ -671,7 +671,7 @@ printk(KERN_INFO "my ip: %pI4\n", &ipaddress);
|
|||||||
|
|
||||||
<sect1 id="routines-local-irqs">
|
<sect1 id="routines-local-irqs">
|
||||||
<title><function>local_irq_save()</function>/<function>local_irq_restore()</function>
|
<title><function>local_irq_save()</function>/<function>local_irq_restore()</function>
|
||||||
<filename class="headerfile">include/asm/system.h</filename>
|
<filename class="headerfile">include/linux/irqflags.h</filename>
|
||||||
</title>
|
</title>
|
||||||
|
|
||||||
<para>
|
<para>
|
||||||
|
@ -39,7 +39,7 @@ ffffffbffa000000 ffffffbffaffffff 16MB PCI I/O space
|
|||||||
|
|
||||||
ffffffbffb000000 ffffffbffbbfffff 12MB [guard]
|
ffffffbffb000000 ffffffbffbbfffff 12MB [guard]
|
||||||
|
|
||||||
ffffffbffbc00000 ffffffbffbdfffff 2MB earlyprintk device
|
ffffffbffbc00000 ffffffbffbdfffff 2MB fixed mappings
|
||||||
|
|
||||||
ffffffbffbe00000 ffffffbffbffffff 2MB [guard]
|
ffffffbffbe00000 ffffffbffbffffff 2MB [guard]
|
||||||
|
|
||||||
@ -66,7 +66,7 @@ fffffdfffa000000 fffffdfffaffffff 16MB PCI I/O space
|
|||||||
|
|
||||||
fffffdfffb000000 fffffdfffbbfffff 12MB [guard]
|
fffffdfffb000000 fffffdfffbbfffff 12MB [guard]
|
||||||
|
|
||||||
fffffdfffbc00000 fffffdfffbdfffff 2MB earlyprintk device
|
fffffdfffbc00000 fffffdfffbdfffff 2MB fixed mappings
|
||||||
|
|
||||||
fffffdfffbe00000 fffffdfffbffffff 2MB [guard]
|
fffffdfffbe00000 fffffdfffbffffff 2MB [guard]
|
||||||
|
|
||||||
|
@ -21,7 +21,43 @@ Following shows a typical sequence of steps for using zram.
|
|||||||
This creates 4 devices: /dev/zram{0,1,2,3}
|
This creates 4 devices: /dev/zram{0,1,2,3}
|
||||||
(num_devices parameter is optional. Default: 1)
|
(num_devices parameter is optional. Default: 1)
|
||||||
|
|
||||||
2) Set Disksize
|
2) Set max number of compression streams
|
||||||
|
Compression backend may use up to max_comp_streams compression streams,
|
||||||
|
thus allowing up to max_comp_streams concurrent compression operations.
|
||||||
|
By default, compression backend uses single compression stream.
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
#show max compression streams number
|
||||||
|
cat /sys/block/zram0/max_comp_streams
|
||||||
|
|
||||||
|
#set max compression streams number to 3
|
||||||
|
echo 3 > /sys/block/zram0/max_comp_streams
|
||||||
|
|
||||||
|
Note:
|
||||||
|
In order to enable compression backend's multi stream support max_comp_streams
|
||||||
|
must be initially set to desired concurrency level before ZRAM device
|
||||||
|
initialisation. Once the device initialised as a single stream compression
|
||||||
|
backend (max_comp_streams equals to 1), you will see error if you try to change
|
||||||
|
the value of max_comp_streams because single stream compression backend
|
||||||
|
implemented as a special case by lock overhead issue and does not support
|
||||||
|
dynamic max_comp_streams. Only multi stream backend supports dynamic
|
||||||
|
max_comp_streams adjustment.
|
||||||
|
|
||||||
|
3) Select compression algorithm
|
||||||
|
Using comp_algorithm device attribute one can see available and
|
||||||
|
currently selected (shown in square brackets) compression algortithms,
|
||||||
|
change selected compression algorithm (once the device is initialised
|
||||||
|
there is no way to change compression algorithm).
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
#show supported compression algorithms
|
||||||
|
cat /sys/block/zram0/comp_algorithm
|
||||||
|
lzo [lz4]
|
||||||
|
|
||||||
|
#select lzo compression algorithm
|
||||||
|
echo lzo > /sys/block/zram0/comp_algorithm
|
||||||
|
|
||||||
|
4) Set Disksize
|
||||||
Set disk size by writing the value to sysfs node 'disksize'.
|
Set disk size by writing the value to sysfs node 'disksize'.
|
||||||
The value can be either in bytes or you can use mem suffixes.
|
The value can be either in bytes or you can use mem suffixes.
|
||||||
Examples:
|
Examples:
|
||||||
@ -33,32 +69,38 @@ Following shows a typical sequence of steps for using zram.
|
|||||||
echo 512M > /sys/block/zram0/disksize
|
echo 512M > /sys/block/zram0/disksize
|
||||||
echo 1G > /sys/block/zram0/disksize
|
echo 1G > /sys/block/zram0/disksize
|
||||||
|
|
||||||
3) Activate:
|
Note:
|
||||||
|
There is little point creating a zram of greater than twice the size of memory
|
||||||
|
since we expect a 2:1 compression ratio. Note that zram uses about 0.1% of the
|
||||||
|
size of the disk when not in use so a huge zram is wasteful.
|
||||||
|
|
||||||
|
5) Activate:
|
||||||
mkswap /dev/zram0
|
mkswap /dev/zram0
|
||||||
swapon /dev/zram0
|
swapon /dev/zram0
|
||||||
|
|
||||||
mkfs.ext4 /dev/zram1
|
mkfs.ext4 /dev/zram1
|
||||||
mount /dev/zram1 /tmp
|
mount /dev/zram1 /tmp
|
||||||
|
|
||||||
4) Stats:
|
6) Stats:
|
||||||
Per-device statistics are exported as various nodes under
|
Per-device statistics are exported as various nodes under
|
||||||
/sys/block/zram<id>/
|
/sys/block/zram<id>/
|
||||||
disksize
|
disksize
|
||||||
num_reads
|
num_reads
|
||||||
num_writes
|
num_writes
|
||||||
|
failed_reads
|
||||||
|
failed_writes
|
||||||
invalid_io
|
invalid_io
|
||||||
notify_free
|
notify_free
|
||||||
discard
|
|
||||||
zero_pages
|
zero_pages
|
||||||
orig_data_size
|
orig_data_size
|
||||||
compr_data_size
|
compr_data_size
|
||||||
mem_used_total
|
mem_used_total
|
||||||
|
|
||||||
5) Deactivate:
|
7) Deactivate:
|
||||||
swapoff /dev/zram0
|
swapoff /dev/zram0
|
||||||
umount /dev/zram1
|
umount /dev/zram1
|
||||||
|
|
||||||
6) Reset:
|
8) Reset:
|
||||||
Write any positive value to 'reset' sysfs node
|
Write any positive value to 'reset' sysfs node
|
||||||
echo 1 > /sys/block/zram0/reset
|
echo 1 > /sys/block/zram0/reset
|
||||||
echo 1 > /sys/block/zram1/reset
|
echo 1 > /sys/block/zram1/reset
|
||||||
|
@ -24,7 +24,7 @@ Please note that implementation details can be changed.
|
|||||||
|
|
||||||
a page/swp_entry may be charged (usage += PAGE_SIZE) at
|
a page/swp_entry may be charged (usage += PAGE_SIZE) at
|
||||||
|
|
||||||
mem_cgroup_newpage_charge()
|
mem_cgroup_charge_anon()
|
||||||
Called at new page fault and Copy-On-Write.
|
Called at new page fault and Copy-On-Write.
|
||||||
|
|
||||||
mem_cgroup_try_charge_swapin()
|
mem_cgroup_try_charge_swapin()
|
||||||
@ -32,7 +32,7 @@ Please note that implementation details can be changed.
|
|||||||
Followed by charge-commit-cancel protocol. (With swap accounting)
|
Followed by charge-commit-cancel protocol. (With swap accounting)
|
||||||
At commit, a charge recorded in swap_cgroup is removed.
|
At commit, a charge recorded in swap_cgroup is removed.
|
||||||
|
|
||||||
mem_cgroup_cache_charge()
|
mem_cgroup_charge_file()
|
||||||
Called at add_to_page_cache()
|
Called at add_to_page_cache()
|
||||||
|
|
||||||
mem_cgroup_cache_charge_swapin()
|
mem_cgroup_cache_charge_swapin()
|
||||||
|
@ -76,15 +76,7 @@ to work with it.
|
|||||||
limit_fail_at parameter is set to the particular res_counter element
|
limit_fail_at parameter is set to the particular res_counter element
|
||||||
where the charging failed.
|
where the charging failed.
|
||||||
|
|
||||||
d. int res_counter_charge_locked
|
d. u64 res_counter_uncharge(struct res_counter *rc, unsigned long val)
|
||||||
(struct res_counter *rc, unsigned long val, bool force)
|
|
||||||
|
|
||||||
The same as res_counter_charge(), but it must not acquire/release the
|
|
||||||
res_counter->lock internally (it must be called with res_counter->lock
|
|
||||||
held). The force parameter indicates whether we can bypass the limit.
|
|
||||||
|
|
||||||
e. u64 res_counter_uncharge[_locked]
|
|
||||||
(struct res_counter *rc, unsigned long val)
|
|
||||||
|
|
||||||
When a resource is released (freed) it should be de-accounted
|
When a resource is released (freed) it should be de-accounted
|
||||||
from the resource counter it was accounted to. This is called
|
from the resource counter it was accounted to. This is called
|
||||||
@ -93,7 +85,7 @@ to work with it.
|
|||||||
|
|
||||||
The _locked routines imply that the res_counter->lock is taken.
|
The _locked routines imply that the res_counter->lock is taken.
|
||||||
|
|
||||||
f. u64 res_counter_uncharge_until
|
e. u64 res_counter_uncharge_until
|
||||||
(struct res_counter *rc, struct res_counter *top,
|
(struct res_counter *rc, struct res_counter *top,
|
||||||
unsigned long val)
|
unsigned long val)
|
||||||
|
|
||||||
|
@ -529,6 +529,7 @@ locking rules:
|
|||||||
open: yes
|
open: yes
|
||||||
close: yes
|
close: yes
|
||||||
fault: yes can return with page locked
|
fault: yes can return with page locked
|
||||||
|
map_pages: yes
|
||||||
page_mkwrite: yes can return with page locked
|
page_mkwrite: yes can return with page locked
|
||||||
access: yes
|
access: yes
|
||||||
|
|
||||||
@ -540,6 +541,15 @@ the page, then ensure it is not already truncated (the page lock will block
|
|||||||
subsequent truncate), and then return with VM_FAULT_LOCKED, and the page
|
subsequent truncate), and then return with VM_FAULT_LOCKED, and the page
|
||||||
locked. The VM will unlock the page.
|
locked. The VM will unlock the page.
|
||||||
|
|
||||||
|
->map_pages() is called when VM asks to map easy accessible pages.
|
||||||
|
Filesystem should find and map pages associated with offsets from "pgoff"
|
||||||
|
till "max_pgoff". ->map_pages() is called with page table locked and must
|
||||||
|
not block. If it's not possible to reach a page without blocking,
|
||||||
|
filesystem should skip it. Filesystem should use do_set_pte() to setup
|
||||||
|
page table entry. Pointer to entry associated with offset "pgoff" is
|
||||||
|
passed in "pte" field in vm_fault structure. Pointers to entries for other
|
||||||
|
offsets should be calculated relative to "pte".
|
||||||
|
|
||||||
->page_mkwrite() is called when a previously read-only pte is
|
->page_mkwrite() is called when a previously read-only pte is
|
||||||
about to become writeable. The filesystem again must ensure that there are
|
about to become writeable. The filesystem again must ensure that there are
|
||||||
no truncate/invalidate races, and then return with the page locked. If
|
no truncate/invalidate races, and then return with the page locked. If
|
||||||
|
@ -49,6 +49,10 @@ mode=mode Sets the mode flags to the given (octal) value, regardless
|
|||||||
This is useful since most of the plain AmigaOS files
|
This is useful since most of the plain AmigaOS files
|
||||||
will map to 600.
|
will map to 600.
|
||||||
|
|
||||||
|
nofilenametruncate
|
||||||
|
The file system will return an error when filename exceeds
|
||||||
|
standard maximum filename length (30 characters).
|
||||||
|
|
||||||
reserved=num Sets the number of reserved blocks at the start of the
|
reserved=num Sets the number of reserved blocks at the start of the
|
||||||
partition to num. You should never need this option.
|
partition to num. You should never need this option.
|
||||||
Default is 2.
|
Default is 2.
|
||||||
@ -181,9 +185,8 @@ tested, though several hundred MB have been read and written using
|
|||||||
this fs. For a most up-to-date list of bugs please consult
|
this fs. For a most up-to-date list of bugs please consult
|
||||||
fs/affs/Changes.
|
fs/affs/Changes.
|
||||||
|
|
||||||
Filenames are truncated to 30 characters without warning (this
|
By default, filenames are truncated to 30 characters without warning.
|
||||||
can be changed by setting the compile-time option AFFS_NO_TRUNCATE
|
'nofilenametruncate' mount option can change that behavior.
|
||||||
in include/linux/amigaffs.h).
|
|
||||||
|
|
||||||
Case is ignored by the affs in filename matching, but Linux shells
|
Case is ignored by the affs in filename matching, but Linux shells
|
||||||
do care about the case. Example (with /wb being an affs mounted fs):
|
do care about the case. Example (with /wb being an affs mounted fs):
|
||||||
|
@ -1648,18 +1648,21 @@ pids, so one need to either stop or freeze processes being inspected
|
|||||||
if precise results are needed.
|
if precise results are needed.
|
||||||
|
|
||||||
|
|
||||||
3.7 /proc/<pid>/fdinfo/<fd> - Information about opened file
|
3.8 /proc/<pid>/fdinfo/<fd> - Information about opened file
|
||||||
---------------------------------------------------------------
|
---------------------------------------------------------------
|
||||||
This file provides information associated with an opened file. The regular
|
This file provides information associated with an opened file. The regular
|
||||||
files have at least two fields -- 'pos' and 'flags'. The 'pos' represents
|
files have at least three fields -- 'pos', 'flags' and mnt_id. The 'pos'
|
||||||
the current offset of the opened file in decimal form [see lseek(2) for
|
represents the current offset of the opened file in decimal form [see lseek(2)
|
||||||
details] and 'flags' denotes the octal O_xxx mask the file has been
|
for details], 'flags' denotes the octal O_xxx mask the file has been
|
||||||
created with [see open(2) for details].
|
created with [see open(2) for details] and 'mnt_id' represents mount ID of
|
||||||
|
the file system containing the opened file [see 3.5 /proc/<pid>/mountinfo
|
||||||
|
for details].
|
||||||
|
|
||||||
A typical output is
|
A typical output is
|
||||||
|
|
||||||
pos: 0
|
pos: 0
|
||||||
flags: 0100002
|
flags: 0100002
|
||||||
|
mnt_id: 19
|
||||||
|
|
||||||
The files such as eventfd, fsnotify, signalfd, epoll among the regular pos/flags
|
The files such as eventfd, fsnotify, signalfd, epoll among the regular pos/flags
|
||||||
pair provide additional information particular to the objects they represent.
|
pair provide additional information particular to the objects they represent.
|
||||||
@ -1668,6 +1671,7 @@ pair provide additional information particular to the objects they represent.
|
|||||||
~~~~~~~~~~~~~
|
~~~~~~~~~~~~~
|
||||||
pos: 0
|
pos: 0
|
||||||
flags: 04002
|
flags: 04002
|
||||||
|
mnt_id: 9
|
||||||
eventfd-count: 5a
|
eventfd-count: 5a
|
||||||
|
|
||||||
where 'eventfd-count' is hex value of a counter.
|
where 'eventfd-count' is hex value of a counter.
|
||||||
@ -1676,6 +1680,7 @@ pair provide additional information particular to the objects they represent.
|
|||||||
~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~
|
||||||
pos: 0
|
pos: 0
|
||||||
flags: 04002
|
flags: 04002
|
||||||
|
mnt_id: 9
|
||||||
sigmask: 0000000000000200
|
sigmask: 0000000000000200
|
||||||
|
|
||||||
where 'sigmask' is hex value of the signal mask associated
|
where 'sigmask' is hex value of the signal mask associated
|
||||||
@ -1685,6 +1690,7 @@ pair provide additional information particular to the objects they represent.
|
|||||||
~~~~~~~~~~~
|
~~~~~~~~~~~
|
||||||
pos: 0
|
pos: 0
|
||||||
flags: 02
|
flags: 02
|
||||||
|
mnt_id: 9
|
||||||
tfd: 5 events: 1d data: ffffffffffffffff
|
tfd: 5 events: 1d data: ffffffffffffffff
|
||||||
|
|
||||||
where 'tfd' is a target file descriptor number in decimal form,
|
where 'tfd' is a target file descriptor number in decimal form,
|
||||||
@ -1718,6 +1724,7 @@ pair provide additional information particular to the objects they represent.
|
|||||||
|
|
||||||
pos: 0
|
pos: 0
|
||||||
flags: 02
|
flags: 02
|
||||||
|
mnt_id: 9
|
||||||
fanotify flags:10 event-flags:0
|
fanotify flags:10 event-flags:0
|
||||||
fanotify mnt_id:12 mflags:40 mask:38 ignored_mask:40000003
|
fanotify mnt_id:12 mflags:40 mask:38 ignored_mask:40000003
|
||||||
fanotify ino:4f969 sdev:800013 mflags:0 mask:3b ignored_mask:40000000 fhandle-bytes:8 fhandle-type:1 f_handle:69f90400c275b5b4
|
fanotify ino:4f969 sdev:800013 mflags:0 mask:3b ignored_mask:40000000 fhandle-bytes:8 fhandle-type:1 f_handle:69f90400c275b5b4
|
||||||
|
@ -22,13 +22,6 @@ rather straightforward and risk-free manner.
|
|||||||
Architectures that want to support this need to do a couple of
|
Architectures that want to support this need to do a couple of
|
||||||
code-organizational changes first:
|
code-organizational changes first:
|
||||||
|
|
||||||
- move their irq-flags manipulation code from their asm/system.h header
|
|
||||||
to asm/irqflags.h
|
|
||||||
|
|
||||||
- rename local_irq_disable()/etc to raw_local_irq_disable()/etc. so that
|
|
||||||
the linux/irqflags.h code can inject callbacks and can construct the
|
|
||||||
real local_irq_disable()/etc APIs.
|
|
||||||
|
|
||||||
- add and enable TRACE_IRQFLAGS_SUPPORT in their arch level Kconfig file
|
- add and enable TRACE_IRQFLAGS_SUPPORT in their arch level Kconfig file
|
||||||
|
|
||||||
and then a couple of functional changes are needed as well to implement
|
and then a couple of functional changes are needed as well to implement
|
||||||
|
@ -157,6 +157,10 @@ applicable everywhere (see syntax).
|
|||||||
to the build environment (if this is desired, it can be done via
|
to the build environment (if this is desired, it can be done via
|
||||||
another symbol).
|
another symbol).
|
||||||
|
|
||||||
|
- "allnoconfig_y"
|
||||||
|
This declares the symbol as one that should have the value y when
|
||||||
|
using "allnoconfig". Used for symbols that hide other symbols.
|
||||||
|
|
||||||
Menu dependencies
|
Menu dependencies
|
||||||
-----------------
|
-----------------
|
||||||
|
|
||||||
|
@ -884,6 +884,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
|||||||
Enable debug messages at boot time. See
|
Enable debug messages at boot time. See
|
||||||
Documentation/dynamic-debug-howto.txt for details.
|
Documentation/dynamic-debug-howto.txt for details.
|
||||||
|
|
||||||
|
early_ioremap_debug [KNL]
|
||||||
|
Enable debug messages in early_ioremap support. This
|
||||||
|
is useful for tracking down temporary early mappings
|
||||||
|
which are not unmapped.
|
||||||
|
|
||||||
earlycon= [KNL] Output early console device and options.
|
earlycon= [KNL] Output early console device and options.
|
||||||
uart[8250],io,<addr>[,options]
|
uart[8250],io,<addr>[,options]
|
||||||
uart[8250],mmio,<addr>[,options]
|
uart[8250],mmio,<addr>[,options]
|
||||||
|
@ -2,8 +2,8 @@
|
|||||||
|
|
||||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
1. Device Subdirectories
|
1. RapidIO Device Subdirectories
|
||||||
------------------------
|
--------------------------------
|
||||||
|
|
||||||
For each RapidIO device, the RapidIO subsystem creates files in an individual
|
For each RapidIO device, the RapidIO subsystem creates files in an individual
|
||||||
subdirectory with the following name, /sys/bus/rapidio/devices/<device_name>.
|
subdirectory with the following name, /sys/bus/rapidio/devices/<device_name>.
|
||||||
@ -25,8 +25,8 @@ seen by the enumerating host (destID = 1):
|
|||||||
NOTE: An enumerating or discovering endpoint does not create a sysfs entry for
|
NOTE: An enumerating or discovering endpoint does not create a sysfs entry for
|
||||||
itself, this is why an endpoint with destID=1 is not shown in the list.
|
itself, this is why an endpoint with destID=1 is not shown in the list.
|
||||||
|
|
||||||
2. Attributes Common for All Devices
|
2. Attributes Common for All RapidIO Devices
|
||||||
------------------------------------
|
--------------------------------------------
|
||||||
|
|
||||||
Each device subdirectory contains the following informational read-only files:
|
Each device subdirectory contains the following informational read-only files:
|
||||||
|
|
||||||
@ -52,16 +52,16 @@ This attribute is similar in behavior to the "config" attribute of PCI devices
|
|||||||
and provides an access to the RapidIO device registers using standard file read
|
and provides an access to the RapidIO device registers using standard file read
|
||||||
and write operations.
|
and write operations.
|
||||||
|
|
||||||
3. Endpoint Device Attributes
|
3. RapidIO Endpoint Device Attributes
|
||||||
-----------------------------
|
-------------------------------------
|
||||||
|
|
||||||
Currently Linux RapidIO subsystem does not create any endpoint specific sysfs
|
Currently Linux RapidIO subsystem does not create any endpoint specific sysfs
|
||||||
attributes. It is possible that RapidIO master port drivers and endpoint device
|
attributes. It is possible that RapidIO master port drivers and endpoint device
|
||||||
drivers will add their device-specific sysfs attributes but such attributes are
|
drivers will add their device-specific sysfs attributes but such attributes are
|
||||||
outside the scope of this document.
|
outside the scope of this document.
|
||||||
|
|
||||||
4. Switch Device Attributes
|
4. RapidIO Switch Device Attributes
|
||||||
---------------------------
|
-----------------------------------
|
||||||
|
|
||||||
RapidIO switches have additional attributes in sysfs. RapidIO subsystem supports
|
RapidIO switches have additional attributes in sysfs. RapidIO subsystem supports
|
||||||
common and device-specific sysfs attributes for switches. Because switches are
|
common and device-specific sysfs attributes for switches. Because switches are
|
||||||
@ -106,3 +106,53 @@ attribute:
|
|||||||
for that controller always will be 0.
|
for that controller always will be 0.
|
||||||
To initiate RapidIO enumeration/discovery on all available mports
|
To initiate RapidIO enumeration/discovery on all available mports
|
||||||
a user must write '-1' (or RIO_MPORT_ANY) into this attribute file.
|
a user must write '-1' (or RIO_MPORT_ANY) into this attribute file.
|
||||||
|
|
||||||
|
|
||||||
|
6. RapidIO Bus Controllers/Ports
|
||||||
|
--------------------------------
|
||||||
|
|
||||||
|
On-chip RapidIO controllers and PCIe-to-RapidIO bridges (referenced as
|
||||||
|
"Master Port" or "mport") are presented in sysfs as the special class of
|
||||||
|
devices: "rapidio_port".
|
||||||
|
|
||||||
|
The /sys/class/rapidio_port subdirectory contains individual subdirectories
|
||||||
|
named as "rapidioN" where N = mport ID registered with RapidIO subsystem.
|
||||||
|
|
||||||
|
NOTE: An mport ID is not a RapidIO destination ID assigned to a given local
|
||||||
|
mport device.
|
||||||
|
|
||||||
|
Each mport device subdirectory in addition to standard entries contains the
|
||||||
|
following device-specific attributes:
|
||||||
|
|
||||||
|
port_destid - reports RapidIO destination ID assigned to the given RapidIO
|
||||||
|
mport device. If value 0xFFFFFFFF is returned this means that
|
||||||
|
no valid destination ID have been assigned to the mport (yet).
|
||||||
|
Normally, before enumeration/discovery have been executed only
|
||||||
|
fabric enumerating mports have a valid destination ID assigned
|
||||||
|
to them using "hdid=..." rapidio module parameter.
|
||||||
|
sys_size - reports RapidIO common transport system size:
|
||||||
|
0 = small (8-bit destination ID, max. 256 devices),
|
||||||
|
1 = large (16-bit destination ID, max. 65536 devices).
|
||||||
|
|
||||||
|
After enumeration or discovery was performed for a given mport device,
|
||||||
|
the corresponding subdirectory will also contain subdirectories for each
|
||||||
|
child RapidIO device connected to the mport. Naming conventions for RapidIO
|
||||||
|
devices are described in Section 1 above.
|
||||||
|
|
||||||
|
The example below shows mport device subdirectory with several child RapidIO
|
||||||
|
devices attached to it.
|
||||||
|
|
||||||
|
[rio@rapidio ~]$ ls /sys/class/rapidio_port/rapidio0/ -l
|
||||||
|
total 0
|
||||||
|
drwxr-xr-x 3 root root 0 Feb 11 15:10 00:e:0001
|
||||||
|
drwxr-xr-x 3 root root 0 Feb 11 15:10 00:e:0004
|
||||||
|
drwxr-xr-x 3 root root 0 Feb 11 15:10 00:e:0007
|
||||||
|
drwxr-xr-x 3 root root 0 Feb 11 15:10 00:s:0002
|
||||||
|
drwxr-xr-x 3 root root 0 Feb 11 15:10 00:s:0003
|
||||||
|
drwxr-xr-x 3 root root 0 Feb 11 15:10 00:s:0005
|
||||||
|
lrwxrwxrwx 1 root root 0 Feb 11 15:11 device -> ../../../0000:01:00.0
|
||||||
|
-r--r--r-- 1 root root 4096 Feb 11 15:11 port_destid
|
||||||
|
drwxr-xr-x 2 root root 0 Feb 11 15:11 power
|
||||||
|
lrwxrwxrwx 1 root root 0 Feb 11 15:04 subsystem -> ../../../../../../class/rapidio_port
|
||||||
|
-r--r--r-- 1 root root 4096 Feb 11 15:11 sys_size
|
||||||
|
-rw-r--r-- 1 root root 4096 Feb 11 15:04 uevent
|
||||||
|
@ -8,7 +8,7 @@ Context switch
|
|||||||
By default, the switch_to arch function is called with the runqueue
|
By default, the switch_to arch function is called with the runqueue
|
||||||
locked. This is usually not a problem unless switch_to may need to
|
locked. This is usually not a problem unless switch_to may need to
|
||||||
take the runqueue lock. This is usually due to a wake up operation in
|
take the runqueue lock. This is usually due to a wake up operation in
|
||||||
the context switch. See arch/ia64/include/asm/system.h for an example.
|
the context switch. See arch/ia64/include/asm/switch_to.h for an example.
|
||||||
|
|
||||||
To request the scheduler call switch_to with the runqueue unlocked,
|
To request the scheduler call switch_to with the runqueue unlocked,
|
||||||
you must `#define __ARCH_WANT_UNLOCKED_CTXSW` in a header file
|
you must `#define __ARCH_WANT_UNLOCKED_CTXSW` in a header file
|
||||||
|
@ -317,6 +317,7 @@ for more than this value report a warning.
|
|||||||
This file shows up if CONFIG_DETECT_HUNG_TASK is enabled.
|
This file shows up if CONFIG_DETECT_HUNG_TASK is enabled.
|
||||||
|
|
||||||
0: means infinite timeout - no checking done.
|
0: means infinite timeout - no checking done.
|
||||||
|
Possible values to set are in range {0..LONG_MAX/HZ}.
|
||||||
|
|
||||||
==============================================================
|
==============================================================
|
||||||
|
|
||||||
|
@ -4542,8 +4542,7 @@ K: \b(ABS|SYN)_MT_
|
|||||||
|
|
||||||
INTEL C600 SERIES SAS CONTROLLER DRIVER
|
INTEL C600 SERIES SAS CONTROLLER DRIVER
|
||||||
M: Intel SCU Linux support <intel-linux-scu@intel.com>
|
M: Intel SCU Linux support <intel-linux-scu@intel.com>
|
||||||
M: Lukasz Dorau <lukasz.dorau@intel.com>
|
M: Artur Paszkiewicz <artur.paszkiewicz@intel.com>
|
||||||
M: Maciej Patelczyk <maciej.patelczyk@intel.com>
|
|
||||||
M: Dave Jiang <dave.jiang@intel.com>
|
M: Dave Jiang <dave.jiang@intel.com>
|
||||||
L: linux-scsi@vger.kernel.org
|
L: linux-scsi@vger.kernel.org
|
||||||
T: git git://git.code.sf.net/p/intel-sas/isci
|
T: git git://git.code.sf.net/p/intel-sas/isci
|
||||||
|
@ -57,7 +57,7 @@ config ARCH_FLATMEM_ENABLE
|
|||||||
config MMU
|
config MMU
|
||||||
def_bool y
|
def_bool y
|
||||||
|
|
||||||
config NO_IOPORT
|
config NO_IOPORT_MAP
|
||||||
def_bool y
|
def_bool y
|
||||||
|
|
||||||
config GENERIC_CALIBRATE_DELAY
|
config GENERIC_CALIBRATE_DELAY
|
||||||
|
@ -126,7 +126,7 @@ config HAVE_TCM
|
|||||||
config HAVE_PROC_CPU
|
config HAVE_PROC_CPU
|
||||||
bool
|
bool
|
||||||
|
|
||||||
config NO_IOPORT
|
config NO_IOPORT_MAP
|
||||||
bool
|
bool
|
||||||
|
|
||||||
config EISA
|
config EISA
|
||||||
@ -410,7 +410,7 @@ config ARCH_EBSA110
|
|||||||
select ISA
|
select ISA
|
||||||
select NEED_MACH_IO_H
|
select NEED_MACH_IO_H
|
||||||
select NEED_MACH_MEMORY_H
|
select NEED_MACH_MEMORY_H
|
||||||
select NO_IOPORT
|
select NO_IOPORT_MAP
|
||||||
help
|
help
|
||||||
This is an evaluation board for the StrongARM processor available
|
This is an evaluation board for the StrongARM processor available
|
||||||
from Digital. It has limited hardware on-board, including an
|
from Digital. It has limited hardware on-board, including an
|
||||||
@ -428,7 +428,7 @@ config ARCH_EFM32
|
|||||||
select CPU_V7M
|
select CPU_V7M
|
||||||
select GENERIC_CLOCKEVENTS
|
select GENERIC_CLOCKEVENTS
|
||||||
select NO_DMA
|
select NO_DMA
|
||||||
select NO_IOPORT
|
select NO_IOPORT_MAP
|
||||||
select SPARSE_IRQ
|
select SPARSE_IRQ
|
||||||
select USE_OF
|
select USE_OF
|
||||||
help
|
help
|
||||||
@ -677,7 +677,7 @@ config ARCH_SHMOBILE_LEGACY
|
|||||||
select HAVE_SMP
|
select HAVE_SMP
|
||||||
select MIGHT_HAVE_CACHE_L2X0
|
select MIGHT_HAVE_CACHE_L2X0
|
||||||
select MULTI_IRQ_HANDLER
|
select MULTI_IRQ_HANDLER
|
||||||
select NO_IOPORT
|
select NO_IOPORT_MAP
|
||||||
select PINCTRL
|
select PINCTRL
|
||||||
select PM_GENERIC_DOMAINS if PM
|
select PM_GENERIC_DOMAINS if PM
|
||||||
select SPARSE_IRQ
|
select SPARSE_IRQ
|
||||||
@ -699,7 +699,7 @@ config ARCH_RPC
|
|||||||
select ISA_DMA_API
|
select ISA_DMA_API
|
||||||
select NEED_MACH_IO_H
|
select NEED_MACH_IO_H
|
||||||
select NEED_MACH_MEMORY_H
|
select NEED_MACH_MEMORY_H
|
||||||
select NO_IOPORT
|
select NO_IOPORT_MAP
|
||||||
select VIRT_TO_BUS
|
select VIRT_TO_BUS
|
||||||
help
|
help
|
||||||
On the Acorn Risc-PC, Linux can support the internal IDE disk and
|
On the Acorn Risc-PC, Linux can support the internal IDE disk and
|
||||||
@ -760,7 +760,7 @@ config ARCH_S3C64XX
|
|||||||
select HAVE_S3C2410_I2C if I2C
|
select HAVE_S3C2410_I2C if I2C
|
||||||
select HAVE_S3C2410_WATCHDOG if WATCHDOG
|
select HAVE_S3C2410_WATCHDOG if WATCHDOG
|
||||||
select HAVE_TCM
|
select HAVE_TCM
|
||||||
select NO_IOPORT
|
select NO_IOPORT_MAP
|
||||||
select PLAT_SAMSUNG
|
select PLAT_SAMSUNG
|
||||||
select PM_GENERIC_DOMAINS if PM
|
select PM_GENERIC_DOMAINS if PM
|
||||||
select S3C_DEV_NAND
|
select S3C_DEV_NAND
|
||||||
|
@ -4,4 +4,4 @@ config ARCH_PICOXCELL
|
|||||||
select ARM_VIC
|
select ARM_VIC
|
||||||
select DW_APB_TIMER_OF
|
select DW_APB_TIMER_OF
|
||||||
select HAVE_TCM
|
select HAVE_TCM
|
||||||
select NO_IOPORT
|
select NO_IOPORT_MAP
|
||||||
|
@ -3,7 +3,7 @@ config ARCH_SIRF
|
|||||||
select ARCH_HAS_RESET_CONTROLLER
|
select ARCH_HAS_RESET_CONTROLLER
|
||||||
select ARCH_REQUIRE_GPIOLIB
|
select ARCH_REQUIRE_GPIOLIB
|
||||||
select GENERIC_IRQ_CHIP
|
select GENERIC_IRQ_CHIP
|
||||||
select NO_IOPORT
|
select NO_IOPORT_MAP
|
||||||
select PINCTRL
|
select PINCTRL
|
||||||
select PINCTRL_SIRF
|
select PINCTRL_SIRF
|
||||||
help
|
help
|
||||||
|
@ -12,7 +12,7 @@ if ARCH_S3C24XX
|
|||||||
config PLAT_S3C24XX
|
config PLAT_S3C24XX
|
||||||
def_bool y
|
def_bool y
|
||||||
select ARCH_REQUIRE_GPIOLIB
|
select ARCH_REQUIRE_GPIOLIB
|
||||||
select NO_IOPORT
|
select NO_IOPORT_MAP
|
||||||
select S3C_DEV_NAND
|
select S3C_DEV_NAND
|
||||||
select IRQ_DOMAIN
|
select IRQ_DOMAIN
|
||||||
help
|
help
|
||||||
|
@ -10,7 +10,7 @@ config ARCH_SHMOBILE_MULTI
|
|||||||
select ARM_GIC
|
select ARM_GIC
|
||||||
select MIGHT_HAVE_PCI
|
select MIGHT_HAVE_PCI
|
||||||
select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE
|
select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE
|
||||||
select NO_IOPORT
|
select NO_IOPORT_MAP
|
||||||
select PINCTRL
|
select PINCTRL
|
||||||
select ARCH_REQUIRE_GPIOLIB
|
select ARCH_REQUIRE_GPIOLIB
|
||||||
|
|
||||||
|
@ -10,7 +10,7 @@ config ARCH_VEXPRESS
|
|||||||
select HAVE_ARM_TWD if SMP
|
select HAVE_ARM_TWD if SMP
|
||||||
select HAVE_PATA_PLATFORM
|
select HAVE_PATA_PLATFORM
|
||||||
select ICST
|
select ICST
|
||||||
select NO_IOPORT
|
select NO_IOPORT_MAP
|
||||||
select PLAT_VERSATILE
|
select PLAT_VERSATILE
|
||||||
select PLAT_VERSATILE_CLCD
|
select PLAT_VERSATILE_CLCD
|
||||||
select POWER_RESET
|
select POWER_RESET
|
||||||
|
@ -9,7 +9,7 @@ config PLAT_SAMSUNG
|
|||||||
depends on PLAT_S3C24XX || ARCH_S3C64XX || PLAT_S5P || ARCH_EXYNOS
|
depends on PLAT_S3C24XX || ARCH_S3C64XX || PLAT_S5P || ARCH_EXYNOS
|
||||||
default y
|
default y
|
||||||
select GENERIC_IRQ_CHIP
|
select GENERIC_IRQ_CHIP
|
||||||
select NO_IOPORT
|
select NO_IOPORT_MAP
|
||||||
help
|
help
|
||||||
Base platform code for all Samsung SoC based systems
|
Base platform code for all Samsung SoC based systems
|
||||||
|
|
||||||
@ -19,7 +19,7 @@ config PLAT_S5P
|
|||||||
default y
|
default y
|
||||||
select ARCH_REQUIRE_GPIOLIB
|
select ARCH_REQUIRE_GPIOLIB
|
||||||
select ARM_VIC
|
select ARM_VIC
|
||||||
select NO_IOPORT
|
select NO_IOPORT_MAP
|
||||||
select PLAT_SAMSUNG
|
select PLAT_SAMSUNG
|
||||||
select S3C_GPIO_TRACK
|
select S3C_GPIO_TRACK
|
||||||
select S5P_GPIO_DRVSTR
|
select S5P_GPIO_DRVSTR
|
||||||
|
@ -17,6 +17,7 @@ config ARM64
|
|||||||
select GENERIC_CLOCKEVENTS
|
select GENERIC_CLOCKEVENTS
|
||||||
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
|
select GENERIC_CLOCKEVENTS_BROADCAST if SMP
|
||||||
select GENERIC_CPU_AUTOPROBE
|
select GENERIC_CPU_AUTOPROBE
|
||||||
|
select GENERIC_EARLY_IOREMAP
|
||||||
select GENERIC_IOMAP
|
select GENERIC_IOMAP
|
||||||
select GENERIC_IRQ_PROBE
|
select GENERIC_IRQ_PROBE
|
||||||
select GENERIC_IRQ_SHOW
|
select GENERIC_IRQ_SHOW
|
||||||
@ -66,7 +67,7 @@ config ARCH_PHYS_ADDR_T_64BIT
|
|||||||
config MMU
|
config MMU
|
||||||
def_bool y
|
def_bool y
|
||||||
|
|
||||||
config NO_IOPORT
|
config NO_IOPORT_MAP
|
||||||
def_bool y
|
def_bool y
|
||||||
|
|
||||||
config STACKTRACE_SUPPORT
|
config STACKTRACE_SUPPORT
|
||||||
|
@ -10,6 +10,7 @@ generic-y += delay.h
|
|||||||
generic-y += div64.h
|
generic-y += div64.h
|
||||||
generic-y += dma.h
|
generic-y += dma.h
|
||||||
generic-y += emergency-restart.h
|
generic-y += emergency-restart.h
|
||||||
|
generic-y += early_ioremap.h
|
||||||
generic-y += errno.h
|
generic-y += errno.h
|
||||||
generic-y += ftrace.h
|
generic-y += ftrace.h
|
||||||
generic-y += hash.h
|
generic-y += hash.h
|
||||||
|
67
arch/arm64/include/asm/fixmap.h
Normal file
67
arch/arm64/include/asm/fixmap.h
Normal file
@ -0,0 +1,67 @@
|
|||||||
|
/*
|
||||||
|
* fixmap.h: compile-time virtual memory allocation
|
||||||
|
*
|
||||||
|
* This file is subject to the terms and conditions of the GNU General Public
|
||||||
|
* License. See the file "COPYING" in the main directory of this archive
|
||||||
|
* for more details.
|
||||||
|
*
|
||||||
|
* Copyright (C) 1998 Ingo Molnar
|
||||||
|
* Copyright (C) 2013 Mark Salter <msalter@redhat.com>
|
||||||
|
*
|
||||||
|
* Adapted from arch/x86_64 version.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _ASM_ARM64_FIXMAP_H
|
||||||
|
#define _ASM_ARM64_FIXMAP_H
|
||||||
|
|
||||||
|
#ifndef __ASSEMBLY__
|
||||||
|
#include <linux/kernel.h>
|
||||||
|
#include <asm/page.h>
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Here we define all the compile-time 'special' virtual
|
||||||
|
* addresses. The point is to have a constant address at
|
||||||
|
* compile time, but to set the physical address only
|
||||||
|
* in the boot process.
|
||||||
|
*
|
||||||
|
* These 'compile-time allocated' memory buffers are
|
||||||
|
* page-sized. Use set_fixmap(idx,phys) to associate
|
||||||
|
* physical memory with fixmap indices.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
enum fixed_addresses {
|
||||||
|
FIX_EARLYCON_MEM_BASE,
|
||||||
|
__end_of_permanent_fixed_addresses,
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Temporary boot-time mappings, used by early_ioremap(),
|
||||||
|
* before ioremap() is functional.
|
||||||
|
*/
|
||||||
|
#ifdef CONFIG_ARM64_64K_PAGES
|
||||||
|
#define NR_FIX_BTMAPS 4
|
||||||
|
#else
|
||||||
|
#define NR_FIX_BTMAPS 64
|
||||||
|
#endif
|
||||||
|
#define FIX_BTMAPS_SLOTS 7
|
||||||
|
#define TOTAL_FIX_BTMAPS (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS)
|
||||||
|
|
||||||
|
FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
|
||||||
|
FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,
|
||||||
|
__end_of_fixed_addresses
|
||||||
|
};
|
||||||
|
|
||||||
|
#define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
|
||||||
|
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
|
||||||
|
|
||||||
|
#define FIXMAP_PAGE_IO __pgprot(PROT_DEVICE_nGnRE)
|
||||||
|
|
||||||
|
extern void __early_set_fixmap(enum fixed_addresses idx,
|
||||||
|
phys_addr_t phys, pgprot_t flags);
|
||||||
|
|
||||||
|
#define __set_fixmap __early_set_fixmap
|
||||||
|
|
||||||
|
#include <asm-generic/fixmap.h>
|
||||||
|
|
||||||
|
#endif /* !__ASSEMBLY__ */
|
||||||
|
#endif /* _ASM_ARM64_FIXMAP_H */
|
@ -27,6 +27,7 @@
|
|||||||
#include <asm/byteorder.h>
|
#include <asm/byteorder.h>
|
||||||
#include <asm/barrier.h>
|
#include <asm/barrier.h>
|
||||||
#include <asm/pgtable.h>
|
#include <asm/pgtable.h>
|
||||||
|
#include <asm/early_ioremap.h>
|
||||||
|
|
||||||
#include <xen/xen.h>
|
#include <xen/xen.h>
|
||||||
|
|
||||||
|
@ -49,7 +49,7 @@
|
|||||||
#define PAGE_OFFSET (UL(0xffffffffffffffff) << (VA_BITS - 1))
|
#define PAGE_OFFSET (UL(0xffffffffffffffff) << (VA_BITS - 1))
|
||||||
#define MODULES_END (PAGE_OFFSET)
|
#define MODULES_END (PAGE_OFFSET)
|
||||||
#define MODULES_VADDR (MODULES_END - SZ_64M)
|
#define MODULES_VADDR (MODULES_END - SZ_64M)
|
||||||
#define EARLYCON_IOBASE (MODULES_VADDR - SZ_4M)
|
#define FIXADDR_TOP (MODULES_VADDR - SZ_2M - PAGE_SIZE)
|
||||||
#define TASK_SIZE_64 (UL(1) << VA_BITS)
|
#define TASK_SIZE_64 (UL(1) << VA_BITS)
|
||||||
|
|
||||||
#ifdef CONFIG_COMPAT
|
#ifdef CONFIG_COMPAT
|
||||||
|
@ -27,5 +27,6 @@ typedef struct {
|
|||||||
extern void paging_init(void);
|
extern void paging_init(void);
|
||||||
extern void setup_mm_for_reboot(void);
|
extern void setup_mm_for_reboot(void);
|
||||||
extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
|
extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
|
||||||
|
extern void init_mem_pgprot(void);
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -26,6 +26,8 @@
|
|||||||
#include <linux/amba/serial.h>
|
#include <linux/amba/serial.h>
|
||||||
#include <linux/serial_reg.h>
|
#include <linux/serial_reg.h>
|
||||||
|
|
||||||
|
#include <asm/fixmap.h>
|
||||||
|
|
||||||
static void __iomem *early_base;
|
static void __iomem *early_base;
|
||||||
static void (*printch)(char ch);
|
static void (*printch)(char ch);
|
||||||
|
|
||||||
@ -141,8 +143,10 @@ static int __init setup_early_printk(char *buf)
|
|||||||
}
|
}
|
||||||
/* no options parsing yet */
|
/* no options parsing yet */
|
||||||
|
|
||||||
if (paddr)
|
if (paddr) {
|
||||||
early_base = early_io_map(paddr, EARLYCON_IOBASE);
|
set_fixmap_io(FIX_EARLYCON_MEM_BASE, paddr);
|
||||||
|
early_base = (void __iomem *)fix_to_virt(FIX_EARLYCON_MEM_BASE);
|
||||||
|
}
|
||||||
|
|
||||||
printch = match->printch;
|
printch = match->printch;
|
||||||
early_console = &early_console_dev;
|
early_console = &early_console_dev;
|
||||||
|
@ -404,7 +404,7 @@ ENDPROC(__calc_phys_offset)
|
|||||||
* - identity mapping to enable the MMU (low address, TTBR0)
|
* - identity mapping to enable the MMU (low address, TTBR0)
|
||||||
* - first few MB of the kernel linear mapping to jump to once the MMU has
|
* - first few MB of the kernel linear mapping to jump to once the MMU has
|
||||||
* been enabled, including the FDT blob (TTBR1)
|
* been enabled, including the FDT blob (TTBR1)
|
||||||
* - UART mapping if CONFIG_EARLY_PRINTK is enabled (TTBR1)
|
* - pgd entry for fixed mappings (TTBR1)
|
||||||
*/
|
*/
|
||||||
__create_page_tables:
|
__create_page_tables:
|
||||||
pgtbl x25, x26, x24 // idmap_pg_dir and swapper_pg_dir addresses
|
pgtbl x25, x26, x24 // idmap_pg_dir and swapper_pg_dir addresses
|
||||||
@ -461,15 +461,12 @@ __create_page_tables:
|
|||||||
sub x6, x6, #1 // inclusive range
|
sub x6, x6, #1 // inclusive range
|
||||||
create_block_map x0, x7, x3, x5, x6
|
create_block_map x0, x7, x3, x5, x6
|
||||||
1:
|
1:
|
||||||
#ifdef CONFIG_EARLY_PRINTK
|
|
||||||
/*
|
/*
|
||||||
* Create the pgd entry for the UART mapping. The full mapping is done
|
* Create the pgd entry for the fixed mappings.
|
||||||
* later based earlyprintk kernel parameter.
|
|
||||||
*/
|
*/
|
||||||
ldr x5, =EARLYCON_IOBASE // UART virtual address
|
ldr x5, =FIXADDR_TOP // Fixed mapping virtual address
|
||||||
add x0, x26, #2 * PAGE_SIZE // section table address
|
add x0, x26, #2 * PAGE_SIZE // section table address
|
||||||
create_pgd_entry x26, x0, x5, x6, x7
|
create_pgd_entry x26, x0, x5, x6, x7
|
||||||
#endif
|
|
||||||
ret
|
ret
|
||||||
ENDPROC(__create_page_tables)
|
ENDPROC(__create_page_tables)
|
||||||
.ltorg
|
.ltorg
|
||||||
|
@ -42,6 +42,7 @@
|
|||||||
#include <linux/of_fdt.h>
|
#include <linux/of_fdt.h>
|
||||||
#include <linux/of_platform.h>
|
#include <linux/of_platform.h>
|
||||||
|
|
||||||
|
#include <asm/fixmap.h>
|
||||||
#include <asm/cputype.h>
|
#include <asm/cputype.h>
|
||||||
#include <asm/elf.h>
|
#include <asm/elf.h>
|
||||||
#include <asm/cputable.h>
|
#include <asm/cputable.h>
|
||||||
@ -360,6 +361,9 @@ void __init setup_arch(char **cmdline_p)
|
|||||||
|
|
||||||
*cmdline_p = boot_command_line;
|
*cmdline_p = boot_command_line;
|
||||||
|
|
||||||
|
init_mem_pgprot();
|
||||||
|
early_ioremap_init();
|
||||||
|
|
||||||
parse_early_param();
|
parse_early_param();
|
||||||
|
|
||||||
arm64_memblock_init();
|
arm64_memblock_init();
|
||||||
|
@ -25,6 +25,10 @@
|
|||||||
#include <linux/vmalloc.h>
|
#include <linux/vmalloc.h>
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
|
|
||||||
|
#include <asm/fixmap.h>
|
||||||
|
#include <asm/tlbflush.h>
|
||||||
|
#include <asm/pgalloc.h>
|
||||||
|
|
||||||
static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
|
static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
|
||||||
pgprot_t prot, void *caller)
|
pgprot_t prot, void *caller)
|
||||||
{
|
{
|
||||||
@ -98,3 +102,84 @@ void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
|
|||||||
__builtin_return_address(0));
|
__builtin_return_address(0));
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ioremap_cache);
|
EXPORT_SYMBOL(ioremap_cache);
|
||||||
|
|
||||||
|
#ifndef CONFIG_ARM64_64K_PAGES
|
||||||
|
static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
|
||||||
|
{
|
||||||
|
pgd_t *pgd;
|
||||||
|
pud_t *pud;
|
||||||
|
|
||||||
|
pgd = pgd_offset_k(addr);
|
||||||
|
BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
|
||||||
|
|
||||||
|
pud = pud_offset(pgd, addr);
|
||||||
|
BUG_ON(pud_none(*pud) || pud_bad(*pud));
|
||||||
|
|
||||||
|
return pmd_offset(pud, addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline pte_t * __init early_ioremap_pte(unsigned long addr)
|
||||||
|
{
|
||||||
|
pmd_t *pmd = early_ioremap_pmd(addr);
|
||||||
|
|
||||||
|
BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
|
||||||
|
|
||||||
|
return pte_offset_kernel(pmd, addr);
|
||||||
|
}
|
||||||
|
|
||||||
|
void __init early_ioremap_init(void)
|
||||||
|
{
|
||||||
|
pmd_t *pmd;
|
||||||
|
|
||||||
|
pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
|
||||||
|
#ifndef CONFIG_ARM64_64K_PAGES
|
||||||
|
/* need to populate pmd for 4k pagesize only */
|
||||||
|
pmd_populate_kernel(&init_mm, pmd, bm_pte);
|
||||||
|
#endif
|
||||||
|
/*
|
||||||
|
* The boot-ioremap range spans multiple pmds, for which
|
||||||
|
* we are not prepared:
|
||||||
|
*/
|
||||||
|
BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
|
||||||
|
!= (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
|
||||||
|
|
||||||
|
if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
|
||||||
|
WARN_ON(1);
|
||||||
|
pr_warn("pmd %p != %p\n",
|
||||||
|
pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
|
||||||
|
pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
|
||||||
|
fix_to_virt(FIX_BTMAP_BEGIN));
|
||||||
|
pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
|
||||||
|
fix_to_virt(FIX_BTMAP_END));
|
||||||
|
|
||||||
|
pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
|
||||||
|
pr_warn("FIX_BTMAP_BEGIN: %d\n",
|
||||||
|
FIX_BTMAP_BEGIN);
|
||||||
|
}
|
||||||
|
|
||||||
|
early_ioremap_setup();
|
||||||
|
}
|
||||||
|
|
||||||
|
void __init __early_set_fixmap(enum fixed_addresses idx,
|
||||||
|
phys_addr_t phys, pgprot_t flags)
|
||||||
|
{
|
||||||
|
unsigned long addr = __fix_to_virt(idx);
|
||||||
|
pte_t *pte;
|
||||||
|
|
||||||
|
if (idx >= __end_of_fixed_addresses) {
|
||||||
|
BUG();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
pte = early_ioremap_pte(addr);
|
||||||
|
|
||||||
|
if (pgprot_val(flags))
|
||||||
|
set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
|
||||||
|
else {
|
||||||
|
pte_clear(&init_mm, addr, pte);
|
||||||
|
flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
@ -125,7 +125,7 @@ early_param("cachepolicy", early_cachepolicy);
|
|||||||
/*
|
/*
|
||||||
* Adjust the PMD section entries according to the CPU in use.
|
* Adjust the PMD section entries according to the CPU in use.
|
||||||
*/
|
*/
|
||||||
static void __init init_mem_pgprot(void)
|
void __init init_mem_pgprot(void)
|
||||||
{
|
{
|
||||||
pteval_t default_pgprot;
|
pteval_t default_pgprot;
|
||||||
int i;
|
int i;
|
||||||
@ -260,47 +260,6 @@ static void __init create_mapping(phys_addr_t phys, unsigned long virt,
|
|||||||
} while (pgd++, addr = next, addr != end);
|
} while (pgd++, addr = next, addr != end);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_EARLY_PRINTK
|
|
||||||
/*
|
|
||||||
* Create an early I/O mapping using the pgd/pmd entries already populated
|
|
||||||
* in head.S as this function is called too early to allocated any memory. The
|
|
||||||
* mapping size is 2MB with 4KB pages or 64KB or 64KB pages.
|
|
||||||
*/
|
|
||||||
void __iomem * __init early_io_map(phys_addr_t phys, unsigned long virt)
|
|
||||||
{
|
|
||||||
unsigned long size, mask;
|
|
||||||
bool page64k = IS_ENABLED(CONFIG_ARM64_64K_PAGES);
|
|
||||||
pgd_t *pgd;
|
|
||||||
pud_t *pud;
|
|
||||||
pmd_t *pmd;
|
|
||||||
pte_t *pte;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* No early pte entries with !ARM64_64K_PAGES configuration, so using
|
|
||||||
* sections (pmd).
|
|
||||||
*/
|
|
||||||
size = page64k ? PAGE_SIZE : SECTION_SIZE;
|
|
||||||
mask = ~(size - 1);
|
|
||||||
|
|
||||||
pgd = pgd_offset_k(virt);
|
|
||||||
pud = pud_offset(pgd, virt);
|
|
||||||
if (pud_none(*pud))
|
|
||||||
return NULL;
|
|
||||||
pmd = pmd_offset(pud, virt);
|
|
||||||
|
|
||||||
if (page64k) {
|
|
||||||
if (pmd_none(*pmd))
|
|
||||||
return NULL;
|
|
||||||
pte = pte_offset_kernel(pmd, virt);
|
|
||||||
set_pte(pte, __pte((phys & mask) | PROT_DEVICE_nGnRE));
|
|
||||||
} else {
|
|
||||||
set_pmd(pmd, __pmd((phys & mask) | PROT_SECT_DEVICE_nGnRE));
|
|
||||||
}
|
|
||||||
|
|
||||||
return (void __iomem *)((virt & mask) + (phys & ~mask));
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static void __init map_mem(void)
|
static void __init map_mem(void)
|
||||||
{
|
{
|
||||||
struct memblock_region *reg;
|
struct memblock_region *reg;
|
||||||
@ -357,7 +316,6 @@ void __init paging_init(void)
|
|||||||
{
|
{
|
||||||
void *zero_page;
|
void *zero_page;
|
||||||
|
|
||||||
init_mem_pgprot();
|
|
||||||
map_mem();
|
map_mem();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -29,7 +29,7 @@ config GENERIC_CALIBRATE_DELAY
|
|||||||
bool
|
bool
|
||||||
default y
|
default y
|
||||||
|
|
||||||
config NO_IOPORT
|
config NO_IOPORT_MAP
|
||||||
def_bool y
|
def_bool y
|
||||||
|
|
||||||
config FORCE_MAX_ZONEORDER
|
config FORCE_MAX_ZONEORDER
|
||||||
@ -138,6 +138,7 @@ config ETRAX_ARCH_V10
|
|||||||
bool
|
bool
|
||||||
default y if ETRAX100LX || ETRAX100LX_V2
|
default y if ETRAX100LX || ETRAX100LX_V2
|
||||||
default n if !(ETRAX100LX || ETRAX100LX_V2)
|
default n if !(ETRAX100LX || ETRAX100LX_V2)
|
||||||
|
select TTY
|
||||||
|
|
||||||
config ETRAX_ARCH_V32
|
config ETRAX_ARCH_V32
|
||||||
bool
|
bool
|
||||||
|
@ -165,6 +165,7 @@ void __init setup_arch(char **cmdline_p)
|
|||||||
strcpy(init_utsname()->machine, cris_machine_name);
|
strcpy(init_utsname()->machine, cris_machine_name);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_PROC_FS
|
||||||
static void *c_start(struct seq_file *m, loff_t *pos)
|
static void *c_start(struct seq_file *m, loff_t *pos)
|
||||||
{
|
{
|
||||||
return *pos < nr_cpu_ids ? (void *)(int)(*pos + 1) : NULL;
|
return *pos < nr_cpu_ids ? (void *)(int)(*pos + 1) : NULL;
|
||||||
@ -188,6 +189,7 @@ const struct seq_operations cpuinfo_op = {
|
|||||||
.stop = c_stop,
|
.stop = c_stop,
|
||||||
.show = show_cpuinfo,
|
.show = show_cpuinfo,
|
||||||
};
|
};
|
||||||
|
#endif /* CONFIG_PROC_FS */
|
||||||
|
|
||||||
static int __init topology_init(void)
|
static int __init topology_init(void)
|
||||||
{
|
{
|
||||||
|
@ -19,7 +19,7 @@ config HEXAGON
|
|||||||
select GENERIC_IRQ_SHOW
|
select GENERIC_IRQ_SHOW
|
||||||
select HAVE_ARCH_KGDB
|
select HAVE_ARCH_KGDB
|
||||||
select HAVE_ARCH_TRACEHOOK
|
select HAVE_ARCH_TRACEHOOK
|
||||||
select NO_IOPORT
|
select NO_IOPORT_MAP
|
||||||
select GENERIC_IOMAP
|
select GENERIC_IOMAP
|
||||||
select GENERIC_SMP_IDLE_THREAD
|
select GENERIC_SMP_IDLE_THREAD
|
||||||
select STACKTRACE_SUPPORT
|
select STACKTRACE_SUPPORT
|
||||||
|
@ -21,6 +21,7 @@ config IA64
|
|||||||
select HAVE_FUNCTION_TRACER
|
select HAVE_FUNCTION_TRACER
|
||||||
select HAVE_DMA_ATTRS
|
select HAVE_DMA_ATTRS
|
||||||
select HAVE_KVM
|
select HAVE_KVM
|
||||||
|
select TTY
|
||||||
select HAVE_ARCH_TRACEHOOK
|
select HAVE_ARCH_TRACEHOOK
|
||||||
select HAVE_DMA_API_DEBUG
|
select HAVE_DMA_API_DEBUG
|
||||||
select HAVE_MEMBLOCK
|
select HAVE_MEMBLOCK
|
||||||
|
@ -28,7 +28,7 @@ config ZONE_DMA
|
|||||||
bool
|
bool
|
||||||
default y
|
default y
|
||||||
|
|
||||||
config NO_IOPORT
|
config NO_IOPORT_MAP
|
||||||
def_bool y
|
def_bool y
|
||||||
|
|
||||||
config NO_DMA
|
config NO_DMA
|
||||||
|
@ -52,7 +52,7 @@ config TIME_LOW_RES
|
|||||||
bool
|
bool
|
||||||
default y
|
default y
|
||||||
|
|
||||||
config NO_IOPORT
|
config NO_IOPORT_MAP
|
||||||
def_bool y
|
def_bool y
|
||||||
|
|
||||||
config NO_DMA
|
config NO_DMA
|
||||||
|
@ -52,7 +52,7 @@ config GENERIC_HWEIGHT
|
|||||||
config GENERIC_CALIBRATE_DELAY
|
config GENERIC_CALIBRATE_DELAY
|
||||||
def_bool y
|
def_bool y
|
||||||
|
|
||||||
config NO_IOPORT
|
config NO_IOPORT_MAP
|
||||||
def_bool y
|
def_bool y
|
||||||
|
|
||||||
source "init/Kconfig"
|
source "init/Kconfig"
|
||||||
|
@ -175,7 +175,7 @@ config MACH_DECSTATION
|
|||||||
select CPU_R4000_WORKAROUNDS if 64BIT
|
select CPU_R4000_WORKAROUNDS if 64BIT
|
||||||
select CPU_R4400_WORKAROUNDS if 64BIT
|
select CPU_R4400_WORKAROUNDS if 64BIT
|
||||||
select DMA_NONCOHERENT
|
select DMA_NONCOHERENT
|
||||||
select NO_IOPORT
|
select NO_IOPORT_MAP
|
||||||
select IRQ_CPU
|
select IRQ_CPU
|
||||||
select SYS_HAS_CPU_R3000
|
select SYS_HAS_CPU_R3000
|
||||||
select SYS_HAS_CPU_R4X00
|
select SYS_HAS_CPU_R4X00
|
||||||
@ -947,7 +947,7 @@ config SYNC_R4K
|
|||||||
config MIPS_MACHINE
|
config MIPS_MACHINE
|
||||||
def_bool n
|
def_bool n
|
||||||
|
|
||||||
config NO_IOPORT
|
config NO_IOPORT_MAP
|
||||||
def_bool n
|
def_bool n
|
||||||
|
|
||||||
config GENERIC_ISA_DMA
|
config GENERIC_ISA_DMA
|
||||||
|
@ -41,7 +41,7 @@ config RWSEM_XCHGADD_ALGORITHM
|
|||||||
config GENERIC_HWEIGHT
|
config GENERIC_HWEIGHT
|
||||||
def_bool y
|
def_bool y
|
||||||
|
|
||||||
config NO_IOPORT
|
config NO_IOPORT_MAP
|
||||||
def_bool y
|
def_bool y
|
||||||
|
|
||||||
config TRACE_IRQFLAGS_SUPPORT
|
config TRACE_IRQFLAGS_SUPPORT
|
||||||
|
@ -210,7 +210,6 @@ extern int is_fadump_active(void);
|
|||||||
extern void crash_fadump(struct pt_regs *, const char *);
|
extern void crash_fadump(struct pt_regs *, const char *);
|
||||||
extern void fadump_cleanup(void);
|
extern void fadump_cleanup(void);
|
||||||
|
|
||||||
extern void vmcore_cleanup(void);
|
|
||||||
#else /* CONFIG_FA_DUMP */
|
#else /* CONFIG_FA_DUMP */
|
||||||
static inline int is_fadump_active(void) { return 0; }
|
static inline int is_fadump_active(void) { return 0; }
|
||||||
static inline void crash_fadump(struct pt_regs *regs, const char *str) { }
|
static inline void crash_fadump(struct pt_regs *regs, const char *str) { }
|
||||||
|
@ -73,6 +73,7 @@ config PPC_BOOK3S_64
|
|||||||
select SYS_SUPPORTS_HUGETLBFS
|
select SYS_SUPPORTS_HUGETLBFS
|
||||||
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if PPC_64K_PAGES
|
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if PPC_64K_PAGES
|
||||||
select ARCH_SUPPORTS_NUMA_BALANCING
|
select ARCH_SUPPORTS_NUMA_BALANCING
|
||||||
|
select IRQ_WORK
|
||||||
|
|
||||||
config PPC_BOOK3E_64
|
config PPC_BOOK3E_64
|
||||||
bool "Embedded processors"
|
bool "Embedded processors"
|
||||||
|
@ -531,6 +531,7 @@ int fsl_rio_setup(struct platform_device *dev)
|
|||||||
sprintf(port->name, "RIO mport %d", i);
|
sprintf(port->name, "RIO mport %d", i);
|
||||||
|
|
||||||
priv->dev = &dev->dev;
|
priv->dev = &dev->dev;
|
||||||
|
port->dev.parent = &dev->dev;
|
||||||
port->ops = ops;
|
port->ops = ops;
|
||||||
port->priv = priv;
|
port->priv = priv;
|
||||||
port->phys_efptr = 0x100;
|
port->phys_efptr = 0x100;
|
||||||
|
@ -52,7 +52,7 @@ config KEXEC
|
|||||||
config AUDIT_ARCH
|
config AUDIT_ARCH
|
||||||
def_bool y
|
def_bool y
|
||||||
|
|
||||||
config NO_IOPORT
|
config NO_IOPORT_MAP
|
||||||
def_bool y
|
def_bool y
|
||||||
|
|
||||||
config PCI_QUIRKS
|
config PCI_QUIRKS
|
||||||
|
@ -505,6 +505,9 @@ static int gmap_connect_pgtable(unsigned long address, unsigned long segment,
|
|||||||
if (!pmd_present(*pmd) &&
|
if (!pmd_present(*pmd) &&
|
||||||
__pte_alloc(mm, vma, pmd, vmaddr))
|
__pte_alloc(mm, vma, pmd, vmaddr))
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
/* large pmds cannot yet be handled */
|
||||||
|
if (pmd_large(*pmd))
|
||||||
|
return -EFAULT;
|
||||||
/* pmd now points to a valid segment table entry. */
|
/* pmd now points to a valid segment table entry. */
|
||||||
rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
|
rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
|
||||||
if (!rmap)
|
if (!rmap)
|
||||||
|
@ -3,7 +3,7 @@ config SUPERH
|
|||||||
select ARCH_MIGHT_HAVE_PC_PARPORT
|
select ARCH_MIGHT_HAVE_PC_PARPORT
|
||||||
select EXPERT
|
select EXPERT
|
||||||
select CLKDEV_LOOKUP
|
select CLKDEV_LOOKUP
|
||||||
select HAVE_IDE if HAS_IOPORT
|
select HAVE_IDE if HAS_IOPORT_MAP
|
||||||
select HAVE_MEMBLOCK
|
select HAVE_MEMBLOCK
|
||||||
select HAVE_MEMBLOCK_NODE_MAP
|
select HAVE_MEMBLOCK_NODE_MAP
|
||||||
select ARCH_DISCARD_MEMBLOCK
|
select ARCH_DISCARD_MEMBLOCK
|
||||||
@ -138,7 +138,7 @@ config ARCH_HAS_ILOG2_U32
|
|||||||
config ARCH_HAS_ILOG2_U64
|
config ARCH_HAS_ILOG2_U64
|
||||||
def_bool n
|
def_bool n
|
||||||
|
|
||||||
config NO_IOPORT
|
config NO_IOPORT_MAP
|
||||||
def_bool !PCI
|
def_bool !PCI
|
||||||
depends on !SH_CAYMAN && !SH_SH4202_MICRODEV && !SH_SHMIN && \
|
depends on !SH_CAYMAN && !SH_SH4202_MICRODEV && !SH_SHMIN && \
|
||||||
!SH_HP6XX && !SH_SOLUTION_ENGINE
|
!SH_HP6XX && !SH_SOLUTION_ENGINE
|
||||||
|
@ -158,7 +158,7 @@ config SH_SDK7786
|
|||||||
bool "SDK7786"
|
bool "SDK7786"
|
||||||
depends on CPU_SUBTYPE_SH7786
|
depends on CPU_SUBTYPE_SH7786
|
||||||
select SYS_SUPPORTS_PCI
|
select SYS_SUPPORTS_PCI
|
||||||
select NO_IOPORT if !PCI
|
select NO_IOPORT_MAP if !PCI
|
||||||
select ARCH_WANT_OPTIONAL_GPIOLIB
|
select ARCH_WANT_OPTIONAL_GPIOLIB
|
||||||
select HAVE_SRAM_POOL
|
select HAVE_SRAM_POOL
|
||||||
select REGULATOR_FIXED_VOLTAGE if REGULATOR
|
select REGULATOR_FIXED_VOLTAGE if REGULATOR
|
||||||
@ -204,7 +204,7 @@ config SH_URQUELL
|
|||||||
depends on CPU_SUBTYPE_SH7786
|
depends on CPU_SUBTYPE_SH7786
|
||||||
select ARCH_REQUIRE_GPIOLIB
|
select ARCH_REQUIRE_GPIOLIB
|
||||||
select SYS_SUPPORTS_PCI
|
select SYS_SUPPORTS_PCI
|
||||||
select NO_IOPORT if !PCI
|
select NO_IOPORT_MAP if !PCI
|
||||||
|
|
||||||
config SH_MIGOR
|
config SH_MIGOR
|
||||||
bool "Migo-R"
|
bool "Migo-R"
|
||||||
@ -306,7 +306,7 @@ config SH_LBOX_RE2
|
|||||||
config SH_X3PROTO
|
config SH_X3PROTO
|
||||||
bool "SH-X3 Prototype board"
|
bool "SH-X3 Prototype board"
|
||||||
depends on CPU_SUBTYPE_SHX3
|
depends on CPU_SUBTYPE_SHX3
|
||||||
select NO_IOPORT if !PCI
|
select NO_IOPORT_MAP if !PCI
|
||||||
select IRQ_DOMAIN
|
select IRQ_DOMAIN
|
||||||
|
|
||||||
config SH_MAGIC_PANEL_R2
|
config SH_MAGIC_PANEL_R2
|
||||||
@ -333,7 +333,7 @@ config SH_POLARIS
|
|||||||
|
|
||||||
config SH_SH2007
|
config SH_SH2007
|
||||||
bool "SH-2007 board"
|
bool "SH-2007 board"
|
||||||
select NO_IOPORT
|
select NO_IOPORT_MAP
|
||||||
select REGULATOR_FIXED_VOLTAGE if REGULATOR
|
select REGULATOR_FIXED_VOLTAGE if REGULATOR
|
||||||
depends on CPU_SUBTYPE_SH7780
|
depends on CPU_SUBTYPE_SH7780
|
||||||
help
|
help
|
||||||
|
@ -122,7 +122,7 @@ __BUILD_MEMORY_STRING(__raw_, l, u32)
|
|||||||
|
|
||||||
__BUILD_MEMORY_STRING(__raw_, q, u64)
|
__BUILD_MEMORY_STRING(__raw_, q, u64)
|
||||||
|
|
||||||
#ifdef CONFIG_HAS_IOPORT
|
#ifdef CONFIG_HAS_IOPORT_MAP
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Slowdown I/O port space accesses for antique hardware.
|
* Slowdown I/O port space accesses for antique hardware.
|
||||||
@ -218,7 +218,7 @@ __BUILD_IOPORT_STRING(w, u16)
|
|||||||
__BUILD_IOPORT_STRING(l, u32)
|
__BUILD_IOPORT_STRING(l, u32)
|
||||||
__BUILD_IOPORT_STRING(q, u64)
|
__BUILD_IOPORT_STRING(q, u64)
|
||||||
|
|
||||||
#else /* !CONFIG_HAS_IOPORT */
|
#else /* !CONFIG_HAS_IOPORT_MAP */
|
||||||
|
|
||||||
#include <asm/io_noioport.h>
|
#include <asm/io_noioport.h>
|
||||||
|
|
||||||
|
@ -36,7 +36,7 @@ __ioremap_trapped(unsigned long offset, unsigned long size)
|
|||||||
#define __ioremap_trapped(offset, size) NULL
|
#define __ioremap_trapped(offset, size) NULL
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_HAS_IOPORT
|
#ifdef CONFIG_HAS_IOPORT_MAP
|
||||||
extern struct list_head trapped_io;
|
extern struct list_head trapped_io;
|
||||||
|
|
||||||
static inline void __iomem *
|
static inline void __iomem *
|
||||||
|
@ -21,7 +21,7 @@ struct sh_machine_vector {
|
|||||||
int (*mv_irq_demux)(int irq);
|
int (*mv_irq_demux)(int irq);
|
||||||
void (*mv_init_irq)(void);
|
void (*mv_init_irq)(void);
|
||||||
|
|
||||||
#ifdef CONFIG_HAS_IOPORT
|
#ifdef CONFIG_HAS_IOPORT_MAP
|
||||||
void __iomem *(*mv_ioport_map)(unsigned long port, unsigned int size);
|
void __iomem *(*mv_ioport_map)(unsigned long port, unsigned int size);
|
||||||
void (*mv_ioport_unmap)(void __iomem *);
|
void (*mv_ioport_unmap)(void __iomem *);
|
||||||
#endif
|
#endif
|
||||||
|
@ -22,7 +22,7 @@ obj-y := debugtraps.o dma-nommu.o dumpstack.o \
|
|||||||
|
|
||||||
ifndef CONFIG_GENERIC_IOMAP
|
ifndef CONFIG_GENERIC_IOMAP
|
||||||
obj-y += iomap.o
|
obj-y += iomap.o
|
||||||
obj-$(CONFIG_HAS_IOPORT) += ioport.o
|
obj-$(CONFIG_HAS_IOPORT_MAP) += ioport.o
|
||||||
endif
|
endif
|
||||||
|
|
||||||
obj-$(CONFIG_SUPERH32) += sys_sh32.o
|
obj-$(CONFIG_SUPERH32) += sys_sh32.o
|
||||||
|
@ -22,7 +22,7 @@
|
|||||||
|
|
||||||
#define TRAPPED_PAGES_MAX 16
|
#define TRAPPED_PAGES_MAX 16
|
||||||
|
|
||||||
#ifdef CONFIG_HAS_IOPORT
|
#ifdef CONFIG_HAS_IOPORT_MAP
|
||||||
LIST_HEAD(trapped_io);
|
LIST_HEAD(trapped_io);
|
||||||
EXPORT_SYMBOL_GPL(trapped_io);
|
EXPORT_SYMBOL_GPL(trapped_io);
|
||||||
#endif
|
#endif
|
||||||
@ -90,7 +90,7 @@ int register_trapped_io(struct trapped_io *tiop)
|
|||||||
tiop->magic = IO_TRAPPED_MAGIC;
|
tiop->magic = IO_TRAPPED_MAGIC;
|
||||||
INIT_LIST_HEAD(&tiop->list);
|
INIT_LIST_HEAD(&tiop->list);
|
||||||
spin_lock_irq(&trapped_lock);
|
spin_lock_irq(&trapped_lock);
|
||||||
#ifdef CONFIG_HAS_IOPORT
|
#ifdef CONFIG_HAS_IOPORT_MAP
|
||||||
if (flags & IORESOURCE_IO)
|
if (flags & IORESOURCE_IO)
|
||||||
list_add(&tiop->list, &trapped_io);
|
list_add(&tiop->list, &trapped_io);
|
||||||
#endif
|
#endif
|
||||||
|
@ -411,7 +411,7 @@ config PCI_DOMAINS
|
|||||||
config NO_IOMEM
|
config NO_IOMEM
|
||||||
def_bool !PCI
|
def_bool !PCI
|
||||||
|
|
||||||
config NO_IOPORT
|
config NO_IOPORT_MAP
|
||||||
def_bool !PCI
|
def_bool !PCI
|
||||||
|
|
||||||
config TILE_PCI_IO
|
config TILE_PCI_IO
|
||||||
|
@ -359,7 +359,7 @@ int singlestepping(void * t)
|
|||||||
/*
|
/*
|
||||||
* Only x86 and x86_64 have an arch_align_stack().
|
* Only x86 and x86_64 have an arch_align_stack().
|
||||||
* All other arches have "#define arch_align_stack(x) (x)"
|
* All other arches have "#define arch_align_stack(x) (x)"
|
||||||
* in their asm/system.h
|
* in their asm/exec.h
|
||||||
* As this is included in UML from asm-um/system-generic.h,
|
* As this is included in UML from asm-um/system-generic.h,
|
||||||
* we can use it to behave as the subarch does.
|
* we can use it to behave as the subarch does.
|
||||||
*/
|
*/
|
||||||
|
@ -27,7 +27,7 @@ config UNICORE32
|
|||||||
config GENERIC_CSUM
|
config GENERIC_CSUM
|
||||||
def_bool y
|
def_bool y
|
||||||
|
|
||||||
config NO_IOPORT
|
config NO_IOPORT_MAP
|
||||||
bool
|
bool
|
||||||
|
|
||||||
config STACKTRACE_SUPPORT
|
config STACKTRACE_SUPPORT
|
||||||
|
@ -14,6 +14,8 @@
|
|||||||
|
|
||||||
#include <linux/compiler.h>
|
#include <linux/compiler.h>
|
||||||
#include <linux/sched.h>
|
#include <linux/sched.h>
|
||||||
|
#include <linux/mm.h>
|
||||||
|
#include <linux/vmacache.h>
|
||||||
#include <linux/io.h>
|
#include <linux/io.h>
|
||||||
|
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
@ -73,7 +75,7 @@ do { \
|
|||||||
else \
|
else \
|
||||||
mm->mmap = NULL; \
|
mm->mmap = NULL; \
|
||||||
rb_erase(&high_vma->vm_rb, &mm->mm_rb); \
|
rb_erase(&high_vma->vm_rb, &mm->mm_rb); \
|
||||||
mm->mmap_cache = NULL; \
|
vmacache_invalidate(mm); \
|
||||||
mm->map_count--; \
|
mm->map_count--; \
|
||||||
remove_vma(high_vma); \
|
remove_vma(high_vma); \
|
||||||
} \
|
} \
|
||||||
|
@ -43,6 +43,7 @@ config X86
|
|||||||
select HAVE_DMA_ATTRS
|
select HAVE_DMA_ATTRS
|
||||||
select HAVE_DMA_CONTIGUOUS if !SWIOTLB
|
select HAVE_DMA_CONTIGUOUS if !SWIOTLB
|
||||||
select HAVE_KRETPROBES
|
select HAVE_KRETPROBES
|
||||||
|
select GENERIC_EARLY_IOREMAP
|
||||||
select HAVE_OPTPROBES
|
select HAVE_OPTPROBES
|
||||||
select HAVE_KPROBES_ON_FTRACE
|
select HAVE_KPROBES_ON_FTRACE
|
||||||
select HAVE_FTRACE_MCOUNT_RECORD
|
select HAVE_FTRACE_MCOUNT_RECORD
|
||||||
|
@ -5,5 +5,6 @@ genhdr-y += unistd_64.h
|
|||||||
genhdr-y += unistd_x32.h
|
genhdr-y += unistd_x32.h
|
||||||
|
|
||||||
generic-y += clkdev.h
|
generic-y += clkdev.h
|
||||||
|
generic-y += early_ioremap.h
|
||||||
generic-y += cputime.h
|
generic-y += cputime.h
|
||||||
generic-y += mcs_spinlock.h
|
generic-y += mcs_spinlock.h
|
||||||
|
@ -1,7 +1,6 @@
|
|||||||
#ifndef _ASM_X86_BUG_H
|
#ifndef _ASM_X86_BUG_H
|
||||||
#define _ASM_X86_BUG_H
|
#define _ASM_X86_BUG_H
|
||||||
|
|
||||||
#ifdef CONFIG_BUG
|
|
||||||
#define HAVE_ARCH_BUG
|
#define HAVE_ARCH_BUG
|
||||||
|
|
||||||
#ifdef CONFIG_DEBUG_BUGVERBOSE
|
#ifdef CONFIG_DEBUG_BUGVERBOSE
|
||||||
@ -33,8 +32,6 @@ do { \
|
|||||||
} while (0)
|
} while (0)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#endif /* !CONFIG_BUG */
|
|
||||||
|
|
||||||
#include <asm-generic/bug.h>
|
#include <asm-generic/bug.h>
|
||||||
|
|
||||||
#endif /* _ASM_X86_BUG_H */
|
#endif /* _ASM_X86_BUG_H */
|
||||||
|
@ -163,5 +163,11 @@ static inline void __set_fixmap(enum fixed_addresses idx,
|
|||||||
|
|
||||||
#include <asm-generic/fixmap.h>
|
#include <asm-generic/fixmap.h>
|
||||||
|
|
||||||
|
#define __late_set_fixmap(idx, phys, flags) __set_fixmap(idx, phys, flags)
|
||||||
|
#define __late_clear_fixmap(idx) __set_fixmap(idx, 0, __pgprot(0))
|
||||||
|
|
||||||
|
void __early_set_fixmap(enum fixed_addresses idx,
|
||||||
|
phys_addr_t phys, pgprot_t flags);
|
||||||
|
|
||||||
#endif /* !__ASSEMBLY__ */
|
#endif /* !__ASSEMBLY__ */
|
||||||
#endif /* _ASM_X86_FIXMAP_H */
|
#endif /* _ASM_X86_FIXMAP_H */
|
||||||
|
@ -39,6 +39,7 @@
|
|||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
#include <linux/compiler.h>
|
#include <linux/compiler.h>
|
||||||
#include <asm/page.h>
|
#include <asm/page.h>
|
||||||
|
#include <asm/early_ioremap.h>
|
||||||
|
|
||||||
#define build_mmio_read(name, size, type, reg, barrier) \
|
#define build_mmio_read(name, size, type, reg, barrier) \
|
||||||
static inline type name(const volatile void __iomem *addr) \
|
static inline type name(const volatile void __iomem *addr) \
|
||||||
@ -316,19 +317,6 @@ extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
|
|||||||
unsigned long prot_val);
|
unsigned long prot_val);
|
||||||
extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
|
extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
|
||||||
|
|
||||||
/*
|
|
||||||
* early_ioremap() and early_iounmap() are for temporary early boot-time
|
|
||||||
* mappings, before the real ioremap() is functional.
|
|
||||||
* A boot-time mapping is currently limited to at most 16 pages.
|
|
||||||
*/
|
|
||||||
extern void early_ioremap_init(void);
|
|
||||||
extern void early_ioremap_reset(void);
|
|
||||||
extern void __iomem *early_ioremap(resource_size_t phys_addr,
|
|
||||||
unsigned long size);
|
|
||||||
extern void __iomem *early_memremap(resource_size_t phys_addr,
|
|
||||||
unsigned long size);
|
|
||||||
extern void early_iounmap(void __iomem *addr, unsigned long size);
|
|
||||||
extern void fixup_early_ioremap(void);
|
|
||||||
extern bool is_early_ioremap_ptep(pte_t *ptep);
|
extern bool is_early_ioremap_ptep(pte_t *ptep);
|
||||||
|
|
||||||
#ifdef CONFIG_XEN
|
#ifdef CONFIG_XEN
|
||||||
|
@ -52,7 +52,7 @@
|
|||||||
* Compared to the generic __my_cpu_offset version, the following
|
* Compared to the generic __my_cpu_offset version, the following
|
||||||
* saves one instruction and avoids clobbering a temp register.
|
* saves one instruction and avoids clobbering a temp register.
|
||||||
*/
|
*/
|
||||||
#define __this_cpu_ptr(ptr) \
|
#define raw_cpu_ptr(ptr) \
|
||||||
({ \
|
({ \
|
||||||
unsigned long tcp_ptr__; \
|
unsigned long tcp_ptr__; \
|
||||||
__verify_pcpu_ptr(ptr); \
|
__verify_pcpu_ptr(ptr); \
|
||||||
@ -362,25 +362,25 @@ do { \
|
|||||||
*/
|
*/
|
||||||
#define this_cpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var)))
|
#define this_cpu_read_stable(var) percpu_from_op("mov", var, "p" (&(var)))
|
||||||
|
|
||||||
#define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
#define raw_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
||||||
#define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
#define raw_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
||||||
#define __this_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
#define raw_cpu_read_4(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
||||||
|
|
||||||
#define __this_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
|
#define raw_cpu_write_1(pcp, val) percpu_to_op("mov", (pcp), val)
|
||||||
#define __this_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
|
#define raw_cpu_write_2(pcp, val) percpu_to_op("mov", (pcp), val)
|
||||||
#define __this_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
|
#define raw_cpu_write_4(pcp, val) percpu_to_op("mov", (pcp), val)
|
||||||
#define __this_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
|
#define raw_cpu_add_1(pcp, val) percpu_add_op((pcp), val)
|
||||||
#define __this_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
|
#define raw_cpu_add_2(pcp, val) percpu_add_op((pcp), val)
|
||||||
#define __this_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
|
#define raw_cpu_add_4(pcp, val) percpu_add_op((pcp), val)
|
||||||
#define __this_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
|
#define raw_cpu_and_1(pcp, val) percpu_to_op("and", (pcp), val)
|
||||||
#define __this_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
|
#define raw_cpu_and_2(pcp, val) percpu_to_op("and", (pcp), val)
|
||||||
#define __this_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
|
#define raw_cpu_and_4(pcp, val) percpu_to_op("and", (pcp), val)
|
||||||
#define __this_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
|
#define raw_cpu_or_1(pcp, val) percpu_to_op("or", (pcp), val)
|
||||||
#define __this_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
|
#define raw_cpu_or_2(pcp, val) percpu_to_op("or", (pcp), val)
|
||||||
#define __this_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
|
#define raw_cpu_or_4(pcp, val) percpu_to_op("or", (pcp), val)
|
||||||
#define __this_cpu_xchg_1(pcp, val) percpu_xchg_op(pcp, val)
|
#define raw_cpu_xchg_1(pcp, val) percpu_xchg_op(pcp, val)
|
||||||
#define __this_cpu_xchg_2(pcp, val) percpu_xchg_op(pcp, val)
|
#define raw_cpu_xchg_2(pcp, val) percpu_xchg_op(pcp, val)
|
||||||
#define __this_cpu_xchg_4(pcp, val) percpu_xchg_op(pcp, val)
|
#define raw_cpu_xchg_4(pcp, val) percpu_xchg_op(pcp, val)
|
||||||
|
|
||||||
#define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
#define this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
||||||
#define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
#define this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
||||||
@ -401,12 +401,12 @@ do { \
|
|||||||
#define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
|
#define this_cpu_xchg_2(pcp, nval) percpu_xchg_op(pcp, nval)
|
||||||
#define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
|
#define this_cpu_xchg_4(pcp, nval) percpu_xchg_op(pcp, nval)
|
||||||
|
|
||||||
#define __this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
|
#define raw_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
|
||||||
#define __this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
|
#define raw_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
|
||||||
#define __this_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
|
#define raw_cpu_add_return_4(pcp, val) percpu_add_return_op(pcp, val)
|
||||||
#define __this_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
#define raw_cpu_cmpxchg_1(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
||||||
#define __this_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
#define raw_cpu_cmpxchg_2(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
||||||
#define __this_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
#define raw_cpu_cmpxchg_4(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
||||||
|
|
||||||
#define this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
|
#define this_cpu_add_return_1(pcp, val) percpu_add_return_op(pcp, val)
|
||||||
#define this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
|
#define this_cpu_add_return_2(pcp, val) percpu_add_return_op(pcp, val)
|
||||||
@ -427,7 +427,7 @@ do { \
|
|||||||
__ret; \
|
__ret; \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define __this_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double
|
#define raw_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double
|
||||||
#define this_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double
|
#define this_cpu_cmpxchg_double_4 percpu_cmpxchg8b_double
|
||||||
#endif /* CONFIG_X86_CMPXCHG64 */
|
#endif /* CONFIG_X86_CMPXCHG64 */
|
||||||
|
|
||||||
@ -436,14 +436,14 @@ do { \
|
|||||||
* 32 bit must fall back to generic operations.
|
* 32 bit must fall back to generic operations.
|
||||||
*/
|
*/
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
#define __this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
#define raw_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
||||||
#define __this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
|
#define raw_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
|
||||||
#define __this_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
|
#define raw_cpu_add_8(pcp, val) percpu_add_op((pcp), val)
|
||||||
#define __this_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
|
#define raw_cpu_and_8(pcp, val) percpu_to_op("and", (pcp), val)
|
||||||
#define __this_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
|
#define raw_cpu_or_8(pcp, val) percpu_to_op("or", (pcp), val)
|
||||||
#define __this_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
|
#define raw_cpu_add_return_8(pcp, val) percpu_add_return_op(pcp, val)
|
||||||
#define __this_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
|
#define raw_cpu_xchg_8(pcp, nval) percpu_xchg_op(pcp, nval)
|
||||||
#define __this_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
#define raw_cpu_cmpxchg_8(pcp, oval, nval) percpu_cmpxchg_op(pcp, oval, nval)
|
||||||
|
|
||||||
#define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
#define this_cpu_read_8(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
|
||||||
#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
|
#define this_cpu_write_8(pcp, val) percpu_to_op("mov", (pcp), val)
|
||||||
@ -474,7 +474,7 @@ do { \
|
|||||||
__ret; \
|
__ret; \
|
||||||
})
|
})
|
||||||
|
|
||||||
#define __this_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double
|
#define raw_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double
|
||||||
#define this_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double
|
#define this_cpu_cmpxchg_double_8 percpu_cmpxchg16b_double
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
@ -495,9 +495,9 @@ static __always_inline int x86_this_cpu_constant_test_bit(unsigned int nr,
|
|||||||
unsigned long __percpu *a = (unsigned long *)addr + nr / BITS_PER_LONG;
|
unsigned long __percpu *a = (unsigned long *)addr + nr / BITS_PER_LONG;
|
||||||
|
|
||||||
#ifdef CONFIG_X86_64
|
#ifdef CONFIG_X86_64
|
||||||
return ((1UL << (nr % BITS_PER_LONG)) & __this_cpu_read_8(*a)) != 0;
|
return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_8(*a)) != 0;
|
||||||
#else
|
#else
|
||||||
return ((1UL << (nr % BITS_PER_LONG)) & __this_cpu_read_4(*a)) != 0;
|
return ((1UL << (nr % BITS_PER_LONG)) & raw_cpu_read_4(*a)) != 0;
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -19,12 +19,12 @@ DECLARE_PER_CPU(int, __preempt_count);
|
|||||||
*/
|
*/
|
||||||
static __always_inline int preempt_count(void)
|
static __always_inline int preempt_count(void)
|
||||||
{
|
{
|
||||||
return __this_cpu_read_4(__preempt_count) & ~PREEMPT_NEED_RESCHED;
|
return raw_cpu_read_4(__preempt_count) & ~PREEMPT_NEED_RESCHED;
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline void preempt_count_set(int pc)
|
static __always_inline void preempt_count_set(int pc)
|
||||||
{
|
{
|
||||||
__this_cpu_write_4(__preempt_count, pc);
|
raw_cpu_write_4(__preempt_count, pc);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -53,17 +53,17 @@ static __always_inline void preempt_count_set(int pc)
|
|||||||
|
|
||||||
static __always_inline void set_preempt_need_resched(void)
|
static __always_inline void set_preempt_need_resched(void)
|
||||||
{
|
{
|
||||||
__this_cpu_and_4(__preempt_count, ~PREEMPT_NEED_RESCHED);
|
raw_cpu_and_4(__preempt_count, ~PREEMPT_NEED_RESCHED);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline void clear_preempt_need_resched(void)
|
static __always_inline void clear_preempt_need_resched(void)
|
||||||
{
|
{
|
||||||
__this_cpu_or_4(__preempt_count, PREEMPT_NEED_RESCHED);
|
raw_cpu_or_4(__preempt_count, PREEMPT_NEED_RESCHED);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline bool test_preempt_need_resched(void)
|
static __always_inline bool test_preempt_need_resched(void)
|
||||||
{
|
{
|
||||||
return !(__this_cpu_read_4(__preempt_count) & PREEMPT_NEED_RESCHED);
|
return !(raw_cpu_read_4(__preempt_count) & PREEMPT_NEED_RESCHED);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -72,12 +72,12 @@ static __always_inline bool test_preempt_need_resched(void)
|
|||||||
|
|
||||||
static __always_inline void __preempt_count_add(int val)
|
static __always_inline void __preempt_count_add(int val)
|
||||||
{
|
{
|
||||||
__this_cpu_add_4(__preempt_count, val);
|
raw_cpu_add_4(__preempt_count, val);
|
||||||
}
|
}
|
||||||
|
|
||||||
static __always_inline void __preempt_count_sub(int val)
|
static __always_inline void __preempt_count_sub(int val)
|
||||||
{
|
{
|
||||||
__this_cpu_add_4(__preempt_count, -val);
|
raw_cpu_add_4(__preempt_count, -val);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -95,7 +95,7 @@ static __always_inline bool __preempt_count_dec_and_test(void)
|
|||||||
*/
|
*/
|
||||||
static __always_inline bool should_resched(void)
|
static __always_inline bool should_resched(void)
|
||||||
{
|
{
|
||||||
return unlikely(!__this_cpu_read_4(__preempt_count));
|
return unlikely(!raw_cpu_read_4(__preempt_count));
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_PREEMPT
|
#ifdef CONFIG_PREEMPT
|
||||||
|
@ -328,17 +328,6 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int __initdata early_ioremap_debug;
|
|
||||||
|
|
||||||
static int __init early_ioremap_debug_setup(char *str)
|
|
||||||
{
|
|
||||||
early_ioremap_debug = 1;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
early_param("early_ioremap_debug", early_ioremap_debug_setup);
|
|
||||||
|
|
||||||
static __initdata int after_paging_init;
|
|
||||||
static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
|
static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
|
||||||
|
|
||||||
static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
|
static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
|
||||||
@ -362,18 +351,11 @@ bool __init is_early_ioremap_ptep(pte_t *ptep)
|
|||||||
return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
|
return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
|
|
||||||
|
|
||||||
void __init early_ioremap_init(void)
|
void __init early_ioremap_init(void)
|
||||||
{
|
{
|
||||||
pmd_t *pmd;
|
pmd_t *pmd;
|
||||||
int i;
|
|
||||||
|
|
||||||
if (early_ioremap_debug)
|
early_ioremap_setup();
|
||||||
printk(KERN_INFO "early_ioremap_init()\n");
|
|
||||||
|
|
||||||
for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
|
|
||||||
slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
|
|
||||||
|
|
||||||
pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
|
pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
|
||||||
memset(bm_pte, 0, sizeof(bm_pte));
|
memset(bm_pte, 0, sizeof(bm_pte));
|
||||||
@ -402,12 +384,7 @@ void __init early_ioremap_init(void)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void __init early_ioremap_reset(void)
|
void __init __early_set_fixmap(enum fixed_addresses idx,
|
||||||
{
|
|
||||||
after_paging_init = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __init __early_set_fixmap(enum fixed_addresses idx,
|
|
||||||
phys_addr_t phys, pgprot_t flags)
|
phys_addr_t phys, pgprot_t flags)
|
||||||
{
|
{
|
||||||
unsigned long addr = __fix_to_virt(idx);
|
unsigned long addr = __fix_to_virt(idx);
|
||||||
@ -425,198 +402,3 @@ static void __init __early_set_fixmap(enum fixed_addresses idx,
|
|||||||
pte_clear(&init_mm, addr, pte);
|
pte_clear(&init_mm, addr, pte);
|
||||||
__flush_tlb_one(addr);
|
__flush_tlb_one(addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void __init early_set_fixmap(enum fixed_addresses idx,
|
|
||||||
phys_addr_t phys, pgprot_t prot)
|
|
||||||
{
|
|
||||||
if (after_paging_init)
|
|
||||||
__set_fixmap(idx, phys, prot);
|
|
||||||
else
|
|
||||||
__early_set_fixmap(idx, phys, prot);
|
|
||||||
}
|
|
||||||
|
|
||||||
static inline void __init early_clear_fixmap(enum fixed_addresses idx)
|
|
||||||
{
|
|
||||||
if (after_paging_init)
|
|
||||||
clear_fixmap(idx);
|
|
||||||
else
|
|
||||||
__early_set_fixmap(idx, 0, __pgprot(0));
|
|
||||||
}
|
|
||||||
|
|
||||||
static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
|
|
||||||
static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
|
|
||||||
|
|
||||||
void __init fixup_early_ioremap(void)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
|
|
||||||
if (prev_map[i]) {
|
|
||||||
WARN_ON(1);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
early_ioremap_init();
|
|
||||||
}
|
|
||||||
|
|
||||||
static int __init check_early_ioremap_leak(void)
|
|
||||||
{
|
|
||||||
int count = 0;
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
|
|
||||||
if (prev_map[i])
|
|
||||||
count++;
|
|
||||||
|
|
||||||
if (!count)
|
|
||||||
return 0;
|
|
||||||
WARN(1, KERN_WARNING
|
|
||||||
"Debug warning: early ioremap leak of %d areas detected.\n",
|
|
||||||
count);
|
|
||||||
printk(KERN_WARNING
|
|
||||||
"please boot with early_ioremap_debug and report the dmesg.\n");
|
|
||||||
|
|
||||||
return 1;
|
|
||||||
}
|
|
||||||
late_initcall(check_early_ioremap_leak);
|
|
||||||
|
|
||||||
static void __init __iomem *
|
|
||||||
__early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
|
|
||||||
{
|
|
||||||
unsigned long offset;
|
|
||||||
resource_size_t last_addr;
|
|
||||||
unsigned int nrpages;
|
|
||||||
enum fixed_addresses idx;
|
|
||||||
int i, slot;
|
|
||||||
|
|
||||||
WARN_ON(system_state != SYSTEM_BOOTING);
|
|
||||||
|
|
||||||
slot = -1;
|
|
||||||
for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
|
|
||||||
if (!prev_map[i]) {
|
|
||||||
slot = i;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (slot < 0) {
|
|
||||||
printk(KERN_INFO "%s(%08llx, %08lx) not found slot\n",
|
|
||||||
__func__, (u64)phys_addr, size);
|
|
||||||
WARN_ON(1);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (early_ioremap_debug) {
|
|
||||||
printk(KERN_INFO "%s(%08llx, %08lx) [%d] => ",
|
|
||||||
__func__, (u64)phys_addr, size, slot);
|
|
||||||
dump_stack();
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Don't allow wraparound or zero size */
|
|
||||||
last_addr = phys_addr + size - 1;
|
|
||||||
if (!size || last_addr < phys_addr) {
|
|
||||||
WARN_ON(1);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
prev_size[slot] = size;
|
|
||||||
/*
|
|
||||||
* Mappings have to be page-aligned
|
|
||||||
*/
|
|
||||||
offset = phys_addr & ~PAGE_MASK;
|
|
||||||
phys_addr &= PAGE_MASK;
|
|
||||||
size = PAGE_ALIGN(last_addr + 1) - phys_addr;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Mappings have to fit in the FIX_BTMAP area.
|
|
||||||
*/
|
|
||||||
nrpages = size >> PAGE_SHIFT;
|
|
||||||
if (nrpages > NR_FIX_BTMAPS) {
|
|
||||||
WARN_ON(1);
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Ok, go for it..
|
|
||||||
*/
|
|
||||||
idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
|
|
||||||
while (nrpages > 0) {
|
|
||||||
early_set_fixmap(idx, phys_addr, prot);
|
|
||||||
phys_addr += PAGE_SIZE;
|
|
||||||
--idx;
|
|
||||||
--nrpages;
|
|
||||||
}
|
|
||||||
if (early_ioremap_debug)
|
|
||||||
printk(KERN_CONT "%08lx + %08lx\n", offset, slot_virt[slot]);
|
|
||||||
|
|
||||||
prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
|
|
||||||
return prev_map[slot];
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Remap an IO device */
|
|
||||||
void __init __iomem *
|
|
||||||
early_ioremap(resource_size_t phys_addr, unsigned long size)
|
|
||||||
{
|
|
||||||
return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Remap memory */
|
|
||||||
void __init __iomem *
|
|
||||||
early_memremap(resource_size_t phys_addr, unsigned long size)
|
|
||||||
{
|
|
||||||
return __early_ioremap(phys_addr, size, PAGE_KERNEL);
|
|
||||||
}
|
|
||||||
|
|
||||||
void __init early_iounmap(void __iomem *addr, unsigned long size)
|
|
||||||
{
|
|
||||||
unsigned long virt_addr;
|
|
||||||
unsigned long offset;
|
|
||||||
unsigned int nrpages;
|
|
||||||
enum fixed_addresses idx;
|
|
||||||
int i, slot;
|
|
||||||
|
|
||||||
slot = -1;
|
|
||||||
for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
|
|
||||||
if (prev_map[i] == addr) {
|
|
||||||
slot = i;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (slot < 0) {
|
|
||||||
printk(KERN_INFO "early_iounmap(%p, %08lx) not found slot\n",
|
|
||||||
addr, size);
|
|
||||||
WARN_ON(1);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (prev_size[slot] != size) {
|
|
||||||
printk(KERN_INFO "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
|
|
||||||
addr, size, slot, prev_size[slot]);
|
|
||||||
WARN_ON(1);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (early_ioremap_debug) {
|
|
||||||
printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
|
|
||||||
size, slot);
|
|
||||||
dump_stack();
|
|
||||||
}
|
|
||||||
|
|
||||||
virt_addr = (unsigned long)addr;
|
|
||||||
if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
|
|
||||||
WARN_ON(1);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
offset = virt_addr & ~PAGE_MASK;
|
|
||||||
nrpages = PAGE_ALIGN(offset + size) >> PAGE_SHIFT;
|
|
||||||
|
|
||||||
idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
|
|
||||||
while (nrpages > 0) {
|
|
||||||
early_clear_fixmap(idx);
|
|
||||||
--idx;
|
|
||||||
--nrpages;
|
|
||||||
}
|
|
||||||
prev_map[slot] = NULL;
|
|
||||||
}
|
|
||||||
|
@ -127,7 +127,7 @@ static int __init parse_reservetop(char *arg)
|
|||||||
|
|
||||||
address = memparse(arg, &arg);
|
address = memparse(arg, &arg);
|
||||||
reserve_top_address(address);
|
reserve_top_address(address);
|
||||||
fixup_early_ioremap();
|
early_ioremap_init();
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
early_param("reservetop", parse_reservetop);
|
early_param("reservetop", parse_reservetop);
|
||||||
|
@ -41,7 +41,7 @@ config ARCH_HAS_ILOG2_U32
|
|||||||
config ARCH_HAS_ILOG2_U64
|
config ARCH_HAS_ILOG2_U64
|
||||||
def_bool n
|
def_bool n
|
||||||
|
|
||||||
config NO_IOPORT
|
config NO_IOPORT_MAP
|
||||||
def_bool n
|
def_bool n
|
||||||
|
|
||||||
config HZ
|
config HZ
|
||||||
@ -239,7 +239,7 @@ config XTENSA_PLATFORM_XT2000
|
|||||||
config XTENSA_PLATFORM_S6105
|
config XTENSA_PLATFORM_S6105
|
||||||
bool "S6105"
|
bool "S6105"
|
||||||
select SERIAL_CONSOLE
|
select SERIAL_CONSOLE
|
||||||
select NO_IOPORT
|
select NO_IOPORT_MAP
|
||||||
|
|
||||||
config XTENSA_PLATFORM_XTFPGA
|
config XTENSA_PLATFORM_XTFPGA
|
||||||
bool "XTFPGA"
|
bool "XTFPGA"
|
||||||
|
@ -11,7 +11,7 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
|
|||||||
CONFIG_GENERIC_HWEIGHT=y
|
CONFIG_GENERIC_HWEIGHT=y
|
||||||
# CONFIG_ARCH_HAS_ILOG2_U32 is not set
|
# CONFIG_ARCH_HAS_ILOG2_U32 is not set
|
||||||
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
|
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
|
||||||
CONFIG_NO_IOPORT=y
|
CONFIG_NO_IOPORT_MAP=y
|
||||||
CONFIG_HZ=100
|
CONFIG_HZ=100
|
||||||
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
|
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
|
||||||
CONFIG_CONSTRUCTORS=y
|
CONFIG_CONSTRUCTORS=y
|
||||||
|
@ -11,7 +11,7 @@ CONFIG_GENERIC_FIND_NEXT_BIT=y
|
|||||||
CONFIG_GENERIC_HWEIGHT=y
|
CONFIG_GENERIC_HWEIGHT=y
|
||||||
# CONFIG_ARCH_HAS_ILOG2_U32 is not set
|
# CONFIG_ARCH_HAS_ILOG2_U32 is not set
|
||||||
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
|
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
|
||||||
CONFIG_NO_IOPORT=y
|
CONFIG_NO_IOPORT_MAP=y
|
||||||
CONFIG_HZ=100
|
CONFIG_HZ=100
|
||||||
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
|
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
|
||||||
|
|
||||||
|
@ -15,6 +15,16 @@ config ZRAM
|
|||||||
|
|
||||||
See zram.txt for more information.
|
See zram.txt for more information.
|
||||||
|
|
||||||
|
config ZRAM_LZ4_COMPRESS
|
||||||
|
bool "Enable LZ4 algorithm support"
|
||||||
|
depends on ZRAM
|
||||||
|
select LZ4_COMPRESS
|
||||||
|
select LZ4_DECOMPRESS
|
||||||
|
default n
|
||||||
|
help
|
||||||
|
This option enables LZ4 compression algorithm support. Compression
|
||||||
|
algorithm can be changed using `comp_algorithm' device attribute.
|
||||||
|
|
||||||
config ZRAM_DEBUG
|
config ZRAM_DEBUG
|
||||||
bool "Compressed RAM block device debug support"
|
bool "Compressed RAM block device debug support"
|
||||||
depends on ZRAM
|
depends on ZRAM
|
||||||
|
@ -1,3 +1,5 @@
|
|||||||
zram-y := zram_drv.o
|
zram-y := zcomp_lzo.o zcomp.o zram_drv.o
|
||||||
|
|
||||||
|
zram-$(CONFIG_ZRAM_LZ4_COMPRESS) += zcomp_lz4.o
|
||||||
|
|
||||||
obj-$(CONFIG_ZRAM) += zram.o
|
obj-$(CONFIG_ZRAM) += zram.o
|
||||||
|
353
drivers/block/zram/zcomp.c
Normal file
353
drivers/block/zram/zcomp.c
Normal file
@ -0,0 +1,353 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (C) 2014 Sergey Senozhatsky.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU General Public License
|
||||||
|
* as published by the Free Software Foundation; either version
|
||||||
|
* 2 of the License, or (at your option) any later version.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/string.h>
|
||||||
|
#include <linux/err.h>
|
||||||
|
#include <linux/slab.h>
|
||||||
|
#include <linux/wait.h>
|
||||||
|
#include <linux/sched.h>
|
||||||
|
|
||||||
|
#include "zcomp.h"
|
||||||
|
#include "zcomp_lzo.h"
|
||||||
|
#ifdef CONFIG_ZRAM_LZ4_COMPRESS
|
||||||
|
#include "zcomp_lz4.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
|
/*
|
||||||
|
* single zcomp_strm backend
|
||||||
|
*/
|
||||||
|
struct zcomp_strm_single {
|
||||||
|
struct mutex strm_lock;
|
||||||
|
struct zcomp_strm *zstrm;
|
||||||
|
};
|
||||||
|
|
||||||
|
/*
|
||||||
|
* multi zcomp_strm backend
|
||||||
|
*/
|
||||||
|
struct zcomp_strm_multi {
|
||||||
|
/* protect strm list */
|
||||||
|
spinlock_t strm_lock;
|
||||||
|
/* max possible number of zstrm streams */
|
||||||
|
int max_strm;
|
||||||
|
/* number of available zstrm streams */
|
||||||
|
int avail_strm;
|
||||||
|
/* list of available strms */
|
||||||
|
struct list_head idle_strm;
|
||||||
|
wait_queue_head_t strm_wait;
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct zcomp_backend *backends[] = {
|
||||||
|
&zcomp_lzo,
|
||||||
|
#ifdef CONFIG_ZRAM_LZ4_COMPRESS
|
||||||
|
&zcomp_lz4,
|
||||||
|
#endif
|
||||||
|
NULL
|
||||||
|
};
|
||||||
|
|
||||||
|
static struct zcomp_backend *find_backend(const char *compress)
|
||||||
|
{
|
||||||
|
int i = 0;
|
||||||
|
while (backends[i]) {
|
||||||
|
if (sysfs_streq(compress, backends[i]->name))
|
||||||
|
break;
|
||||||
|
i++;
|
||||||
|
}
|
||||||
|
return backends[i];
|
||||||
|
}
|
||||||
|
|
||||||
|
static void zcomp_strm_free(struct zcomp *comp, struct zcomp_strm *zstrm)
|
||||||
|
{
|
||||||
|
if (zstrm->private)
|
||||||
|
comp->backend->destroy(zstrm->private);
|
||||||
|
free_pages((unsigned long)zstrm->buffer, 1);
|
||||||
|
kfree(zstrm);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* allocate new zcomp_strm structure with ->private initialized by
|
||||||
|
* backend, return NULL on error
|
||||||
|
*/
|
||||||
|
static struct zcomp_strm *zcomp_strm_alloc(struct zcomp *comp)
|
||||||
|
{
|
||||||
|
struct zcomp_strm *zstrm = kmalloc(sizeof(*zstrm), GFP_KERNEL);
|
||||||
|
if (!zstrm)
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
zstrm->private = comp->backend->create();
|
||||||
|
/*
|
||||||
|
* allocate 2 pages. 1 for compressed data, plus 1 extra for the
|
||||||
|
* case when compressed size is larger than the original one
|
||||||
|
*/
|
||||||
|
zstrm->buffer = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
|
||||||
|
if (!zstrm->private || !zstrm->buffer) {
|
||||||
|
zcomp_strm_free(comp, zstrm);
|
||||||
|
zstrm = NULL;
|
||||||
|
}
|
||||||
|
return zstrm;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* get idle zcomp_strm or wait until other process release
|
||||||
|
* (zcomp_strm_release()) one for us
|
||||||
|
*/
|
||||||
|
static struct zcomp_strm *zcomp_strm_multi_find(struct zcomp *comp)
|
||||||
|
{
|
||||||
|
struct zcomp_strm_multi *zs = comp->stream;
|
||||||
|
struct zcomp_strm *zstrm;
|
||||||
|
|
||||||
|
while (1) {
|
||||||
|
spin_lock(&zs->strm_lock);
|
||||||
|
if (!list_empty(&zs->idle_strm)) {
|
||||||
|
zstrm = list_entry(zs->idle_strm.next,
|
||||||
|
struct zcomp_strm, list);
|
||||||
|
list_del(&zstrm->list);
|
||||||
|
spin_unlock(&zs->strm_lock);
|
||||||
|
return zstrm;
|
||||||
|
}
|
||||||
|
/* zstrm streams limit reached, wait for idle stream */
|
||||||
|
if (zs->avail_strm >= zs->max_strm) {
|
||||||
|
spin_unlock(&zs->strm_lock);
|
||||||
|
wait_event(zs->strm_wait, !list_empty(&zs->idle_strm));
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
/* allocate new zstrm stream */
|
||||||
|
zs->avail_strm++;
|
||||||
|
spin_unlock(&zs->strm_lock);
|
||||||
|
|
||||||
|
zstrm = zcomp_strm_alloc(comp);
|
||||||
|
if (!zstrm) {
|
||||||
|
spin_lock(&zs->strm_lock);
|
||||||
|
zs->avail_strm--;
|
||||||
|
spin_unlock(&zs->strm_lock);
|
||||||
|
wait_event(zs->strm_wait, !list_empty(&zs->idle_strm));
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return zstrm;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* add stream back to idle list and wake up waiter or free the stream */
|
||||||
|
static void zcomp_strm_multi_release(struct zcomp *comp, struct zcomp_strm *zstrm)
|
||||||
|
{
|
||||||
|
struct zcomp_strm_multi *zs = comp->stream;
|
||||||
|
|
||||||
|
spin_lock(&zs->strm_lock);
|
||||||
|
if (zs->avail_strm <= zs->max_strm) {
|
||||||
|
list_add(&zstrm->list, &zs->idle_strm);
|
||||||
|
spin_unlock(&zs->strm_lock);
|
||||||
|
wake_up(&zs->strm_wait);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
zs->avail_strm--;
|
||||||
|
spin_unlock(&zs->strm_lock);
|
||||||
|
zcomp_strm_free(comp, zstrm);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* change max_strm limit */
|
||||||
|
static bool zcomp_strm_multi_set_max_streams(struct zcomp *comp, int num_strm)
|
||||||
|
{
|
||||||
|
struct zcomp_strm_multi *zs = comp->stream;
|
||||||
|
struct zcomp_strm *zstrm;
|
||||||
|
|
||||||
|
spin_lock(&zs->strm_lock);
|
||||||
|
zs->max_strm = num_strm;
|
||||||
|
/*
|
||||||
|
* if user has lowered the limit and there are idle streams,
|
||||||
|
* immediately free as much streams (and memory) as we can.
|
||||||
|
*/
|
||||||
|
while (zs->avail_strm > num_strm && !list_empty(&zs->idle_strm)) {
|
||||||
|
zstrm = list_entry(zs->idle_strm.next,
|
||||||
|
struct zcomp_strm, list);
|
||||||
|
list_del(&zstrm->list);
|
||||||
|
zcomp_strm_free(comp, zstrm);
|
||||||
|
zs->avail_strm--;
|
||||||
|
}
|
||||||
|
spin_unlock(&zs->strm_lock);
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void zcomp_strm_multi_destroy(struct zcomp *comp)
|
||||||
|
{
|
||||||
|
struct zcomp_strm_multi *zs = comp->stream;
|
||||||
|
struct zcomp_strm *zstrm;
|
||||||
|
|
||||||
|
while (!list_empty(&zs->idle_strm)) {
|
||||||
|
zstrm = list_entry(zs->idle_strm.next,
|
||||||
|
struct zcomp_strm, list);
|
||||||
|
list_del(&zstrm->list);
|
||||||
|
zcomp_strm_free(comp, zstrm);
|
||||||
|
}
|
||||||
|
kfree(zs);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int zcomp_strm_multi_create(struct zcomp *comp, int max_strm)
|
||||||
|
{
|
||||||
|
struct zcomp_strm *zstrm;
|
||||||
|
struct zcomp_strm_multi *zs;
|
||||||
|
|
||||||
|
comp->destroy = zcomp_strm_multi_destroy;
|
||||||
|
comp->strm_find = zcomp_strm_multi_find;
|
||||||
|
comp->strm_release = zcomp_strm_multi_release;
|
||||||
|
comp->set_max_streams = zcomp_strm_multi_set_max_streams;
|
||||||
|
zs = kmalloc(sizeof(struct zcomp_strm_multi), GFP_KERNEL);
|
||||||
|
if (!zs)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
comp->stream = zs;
|
||||||
|
spin_lock_init(&zs->strm_lock);
|
||||||
|
INIT_LIST_HEAD(&zs->idle_strm);
|
||||||
|
init_waitqueue_head(&zs->strm_wait);
|
||||||
|
zs->max_strm = max_strm;
|
||||||
|
zs->avail_strm = 1;
|
||||||
|
|
||||||
|
zstrm = zcomp_strm_alloc(comp);
|
||||||
|
if (!zstrm) {
|
||||||
|
kfree(zs);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
list_add(&zstrm->list, &zs->idle_strm);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct zcomp_strm *zcomp_strm_single_find(struct zcomp *comp)
|
||||||
|
{
|
||||||
|
struct zcomp_strm_single *zs = comp->stream;
|
||||||
|
mutex_lock(&zs->strm_lock);
|
||||||
|
return zs->zstrm;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void zcomp_strm_single_release(struct zcomp *comp,
|
||||||
|
struct zcomp_strm *zstrm)
|
||||||
|
{
|
||||||
|
struct zcomp_strm_single *zs = comp->stream;
|
||||||
|
mutex_unlock(&zs->strm_lock);
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool zcomp_strm_single_set_max_streams(struct zcomp *comp, int num_strm)
|
||||||
|
{
|
||||||
|
/* zcomp_strm_single support only max_comp_streams == 1 */
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void zcomp_strm_single_destroy(struct zcomp *comp)
|
||||||
|
{
|
||||||
|
struct zcomp_strm_single *zs = comp->stream;
|
||||||
|
zcomp_strm_free(comp, zs->zstrm);
|
||||||
|
kfree(zs);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int zcomp_strm_single_create(struct zcomp *comp)
|
||||||
|
{
|
||||||
|
struct zcomp_strm_single *zs;
|
||||||
|
|
||||||
|
comp->destroy = zcomp_strm_single_destroy;
|
||||||
|
comp->strm_find = zcomp_strm_single_find;
|
||||||
|
comp->strm_release = zcomp_strm_single_release;
|
||||||
|
comp->set_max_streams = zcomp_strm_single_set_max_streams;
|
||||||
|
zs = kmalloc(sizeof(struct zcomp_strm_single), GFP_KERNEL);
|
||||||
|
if (!zs)
|
||||||
|
return -ENOMEM;
|
||||||
|
|
||||||
|
comp->stream = zs;
|
||||||
|
mutex_init(&zs->strm_lock);
|
||||||
|
zs->zstrm = zcomp_strm_alloc(comp);
|
||||||
|
if (!zs->zstrm) {
|
||||||
|
kfree(zs);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* show available compressors */
|
||||||
|
ssize_t zcomp_available_show(const char *comp, char *buf)
|
||||||
|
{
|
||||||
|
ssize_t sz = 0;
|
||||||
|
int i = 0;
|
||||||
|
|
||||||
|
while (backends[i]) {
|
||||||
|
if (sysfs_streq(comp, backends[i]->name))
|
||||||
|
sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
|
||||||
|
"[%s] ", backends[i]->name);
|
||||||
|
else
|
||||||
|
sz += scnprintf(buf + sz, PAGE_SIZE - sz - 2,
|
||||||
|
"%s ", backends[i]->name);
|
||||||
|
i++;
|
||||||
|
}
|
||||||
|
sz += scnprintf(buf + sz, PAGE_SIZE - sz, "\n");
|
||||||
|
return sz;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool zcomp_set_max_streams(struct zcomp *comp, int num_strm)
|
||||||
|
{
|
||||||
|
return comp->set_max_streams(comp, num_strm);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct zcomp_strm *zcomp_strm_find(struct zcomp *comp)
|
||||||
|
{
|
||||||
|
return comp->strm_find(comp);
|
||||||
|
}
|
||||||
|
|
||||||
|
void zcomp_strm_release(struct zcomp *comp, struct zcomp_strm *zstrm)
|
||||||
|
{
|
||||||
|
comp->strm_release(comp, zstrm);
|
||||||
|
}
|
||||||
|
|
||||||
|
int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm,
|
||||||
|
const unsigned char *src, size_t *dst_len)
|
||||||
|
{
|
||||||
|
return comp->backend->compress(src, zstrm->buffer, dst_len,
|
||||||
|
zstrm->private);
|
||||||
|
}
|
||||||
|
|
||||||
|
int zcomp_decompress(struct zcomp *comp, const unsigned char *src,
|
||||||
|
size_t src_len, unsigned char *dst)
|
||||||
|
{
|
||||||
|
return comp->backend->decompress(src, src_len, dst);
|
||||||
|
}
|
||||||
|
|
||||||
|
void zcomp_destroy(struct zcomp *comp)
|
||||||
|
{
|
||||||
|
comp->destroy(comp);
|
||||||
|
kfree(comp);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* search available compressors for requested algorithm.
|
||||||
|
* allocate new zcomp and initialize it. return compressing
|
||||||
|
* backend pointer or ERR_PTR if things went bad. ERR_PTR(-EINVAL)
|
||||||
|
* if requested algorithm is not supported, ERR_PTR(-ENOMEM) in
|
||||||
|
* case of allocation error.
|
||||||
|
*/
|
||||||
|
struct zcomp *zcomp_create(const char *compress, int max_strm)
|
||||||
|
{
|
||||||
|
struct zcomp *comp;
|
||||||
|
struct zcomp_backend *backend;
|
||||||
|
|
||||||
|
backend = find_backend(compress);
|
||||||
|
if (!backend)
|
||||||
|
return ERR_PTR(-EINVAL);
|
||||||
|
|
||||||
|
comp = kzalloc(sizeof(struct zcomp), GFP_KERNEL);
|
||||||
|
if (!comp)
|
||||||
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
|
comp->backend = backend;
|
||||||
|
if (max_strm > 1)
|
||||||
|
zcomp_strm_multi_create(comp, max_strm);
|
||||||
|
else
|
||||||
|
zcomp_strm_single_create(comp);
|
||||||
|
if (!comp->stream) {
|
||||||
|
kfree(comp);
|
||||||
|
return ERR_PTR(-ENOMEM);
|
||||||
|
}
|
||||||
|
return comp;
|
||||||
|
}
|
68
drivers/block/zram/zcomp.h
Normal file
68
drivers/block/zram/zcomp.h
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (C) 2014 Sergey Senozhatsky.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU General Public License
|
||||||
|
* as published by the Free Software Foundation; either version
|
||||||
|
* 2 of the License, or (at your option) any later version.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _ZCOMP_H_
|
||||||
|
#define _ZCOMP_H_
|
||||||
|
|
||||||
|
#include <linux/mutex.h>
|
||||||
|
|
||||||
|
struct zcomp_strm {
|
||||||
|
/* compression/decompression buffer */
|
||||||
|
void *buffer;
|
||||||
|
/*
|
||||||
|
* The private data of the compression stream, only compression
|
||||||
|
* stream backend can touch this (e.g. compression algorithm
|
||||||
|
* working memory)
|
||||||
|
*/
|
||||||
|
void *private;
|
||||||
|
/* used in multi stream backend, protected by backend strm_lock */
|
||||||
|
struct list_head list;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* static compression backend */
|
||||||
|
struct zcomp_backend {
|
||||||
|
int (*compress)(const unsigned char *src, unsigned char *dst,
|
||||||
|
size_t *dst_len, void *private);
|
||||||
|
|
||||||
|
int (*decompress)(const unsigned char *src, size_t src_len,
|
||||||
|
unsigned char *dst);
|
||||||
|
|
||||||
|
void *(*create)(void);
|
||||||
|
void (*destroy)(void *private);
|
||||||
|
|
||||||
|
const char *name;
|
||||||
|
};
|
||||||
|
|
||||||
|
/* dynamic per-device compression frontend */
|
||||||
|
struct zcomp {
|
||||||
|
void *stream;
|
||||||
|
struct zcomp_backend *backend;
|
||||||
|
|
||||||
|
struct zcomp_strm *(*strm_find)(struct zcomp *comp);
|
||||||
|
void (*strm_release)(struct zcomp *comp, struct zcomp_strm *zstrm);
|
||||||
|
bool (*set_max_streams)(struct zcomp *comp, int num_strm);
|
||||||
|
void (*destroy)(struct zcomp *comp);
|
||||||
|
};
|
||||||
|
|
||||||
|
ssize_t zcomp_available_show(const char *comp, char *buf);
|
||||||
|
|
||||||
|
struct zcomp *zcomp_create(const char *comp, int max_strm);
|
||||||
|
void zcomp_destroy(struct zcomp *comp);
|
||||||
|
|
||||||
|
struct zcomp_strm *zcomp_strm_find(struct zcomp *comp);
|
||||||
|
void zcomp_strm_release(struct zcomp *comp, struct zcomp_strm *zstrm);
|
||||||
|
|
||||||
|
int zcomp_compress(struct zcomp *comp, struct zcomp_strm *zstrm,
|
||||||
|
const unsigned char *src, size_t *dst_len);
|
||||||
|
|
||||||
|
int zcomp_decompress(struct zcomp *comp, const unsigned char *src,
|
||||||
|
size_t src_len, unsigned char *dst);
|
||||||
|
|
||||||
|
bool zcomp_set_max_streams(struct zcomp *comp, int num_strm);
|
||||||
|
#endif /* _ZCOMP_H_ */
|
47
drivers/block/zram/zcomp_lz4.c
Normal file
47
drivers/block/zram/zcomp_lz4.c
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (C) 2014 Sergey Senozhatsky.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU General Public License
|
||||||
|
* as published by the Free Software Foundation; either version
|
||||||
|
* 2 of the License, or (at your option) any later version.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/slab.h>
|
||||||
|
#include <linux/lz4.h>
|
||||||
|
|
||||||
|
#include "zcomp_lz4.h"
|
||||||
|
|
||||||
|
static void *zcomp_lz4_create(void)
|
||||||
|
{
|
||||||
|
return kzalloc(LZ4_MEM_COMPRESS, GFP_KERNEL);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void zcomp_lz4_destroy(void *private)
|
||||||
|
{
|
||||||
|
kfree(private);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int zcomp_lz4_compress(const unsigned char *src, unsigned char *dst,
|
||||||
|
size_t *dst_len, void *private)
|
||||||
|
{
|
||||||
|
/* return : Success if return 0 */
|
||||||
|
return lz4_compress(src, PAGE_SIZE, dst, dst_len, private);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int zcomp_lz4_decompress(const unsigned char *src, size_t src_len,
|
||||||
|
unsigned char *dst)
|
||||||
|
{
|
||||||
|
size_t dst_len = PAGE_SIZE;
|
||||||
|
/* return : Success if return 0 */
|
||||||
|
return lz4_decompress_unknownoutputsize(src, src_len, dst, &dst_len);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct zcomp_backend zcomp_lz4 = {
|
||||||
|
.compress = zcomp_lz4_compress,
|
||||||
|
.decompress = zcomp_lz4_decompress,
|
||||||
|
.create = zcomp_lz4_create,
|
||||||
|
.destroy = zcomp_lz4_destroy,
|
||||||
|
.name = "lz4",
|
||||||
|
};
|
17
drivers/block/zram/zcomp_lz4.h
Normal file
17
drivers/block/zram/zcomp_lz4.h
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (C) 2014 Sergey Senozhatsky.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU General Public License
|
||||||
|
* as published by the Free Software Foundation; either version
|
||||||
|
* 2 of the License, or (at your option) any later version.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _ZCOMP_LZ4_H_
|
||||||
|
#define _ZCOMP_LZ4_H_
|
||||||
|
|
||||||
|
#include "zcomp.h"
|
||||||
|
|
||||||
|
extern struct zcomp_backend zcomp_lz4;
|
||||||
|
|
||||||
|
#endif /* _ZCOMP_LZ4_H_ */
|
47
drivers/block/zram/zcomp_lzo.c
Normal file
47
drivers/block/zram/zcomp_lzo.c
Normal file
@ -0,0 +1,47 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (C) 2014 Sergey Senozhatsky.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU General Public License
|
||||||
|
* as published by the Free Software Foundation; either version
|
||||||
|
* 2 of the License, or (at your option) any later version.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/kernel.h>
|
||||||
|
#include <linux/slab.h>
|
||||||
|
#include <linux/lzo.h>
|
||||||
|
|
||||||
|
#include "zcomp_lzo.h"
|
||||||
|
|
||||||
|
static void *lzo_create(void)
|
||||||
|
{
|
||||||
|
return kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void lzo_destroy(void *private)
|
||||||
|
{
|
||||||
|
kfree(private);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int lzo_compress(const unsigned char *src, unsigned char *dst,
|
||||||
|
size_t *dst_len, void *private)
|
||||||
|
{
|
||||||
|
int ret = lzo1x_1_compress(src, PAGE_SIZE, dst, dst_len, private);
|
||||||
|
return ret == LZO_E_OK ? 0 : ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int lzo_decompress(const unsigned char *src, size_t src_len,
|
||||||
|
unsigned char *dst)
|
||||||
|
{
|
||||||
|
size_t dst_len = PAGE_SIZE;
|
||||||
|
int ret = lzo1x_decompress_safe(src, src_len, dst, &dst_len);
|
||||||
|
return ret == LZO_E_OK ? 0 : ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct zcomp_backend zcomp_lzo = {
|
||||||
|
.compress = lzo_compress,
|
||||||
|
.decompress = lzo_decompress,
|
||||||
|
.create = lzo_create,
|
||||||
|
.destroy = lzo_destroy,
|
||||||
|
.name = "lzo",
|
||||||
|
};
|
17
drivers/block/zram/zcomp_lzo.h
Normal file
17
drivers/block/zram/zcomp_lzo.h
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (C) 2014 Sergey Senozhatsky.
|
||||||
|
*
|
||||||
|
* This program is free software; you can redistribute it and/or
|
||||||
|
* modify it under the terms of the GNU General Public License
|
||||||
|
* as published by the Free Software Foundation; either version
|
||||||
|
* 2 of the License, or (at your option) any later version.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#ifndef _ZCOMP_LZO_H_
|
||||||
|
#define _ZCOMP_LZO_H_
|
||||||
|
|
||||||
|
#include "zcomp.h"
|
||||||
|
|
||||||
|
extern struct zcomp_backend zcomp_lzo;
|
||||||
|
|
||||||
|
#endif /* _ZCOMP_LZO_H_ */
|
@ -29,19 +29,36 @@
|
|||||||
#include <linux/genhd.h>
|
#include <linux/genhd.h>
|
||||||
#include <linux/highmem.h>
|
#include <linux/highmem.h>
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/lzo.h>
|
|
||||||
#include <linux/string.h>
|
#include <linux/string.h>
|
||||||
#include <linux/vmalloc.h>
|
#include <linux/vmalloc.h>
|
||||||
|
#include <linux/err.h>
|
||||||
|
|
||||||
#include "zram_drv.h"
|
#include "zram_drv.h"
|
||||||
|
|
||||||
/* Globals */
|
/* Globals */
|
||||||
static int zram_major;
|
static int zram_major;
|
||||||
static struct zram *zram_devices;
|
static struct zram *zram_devices;
|
||||||
|
static const char *default_compressor = "lzo";
|
||||||
|
|
||||||
/* Module params (documentation at end) */
|
/* Module params (documentation at end) */
|
||||||
static unsigned int num_devices = 1;
|
static unsigned int num_devices = 1;
|
||||||
|
|
||||||
|
#define ZRAM_ATTR_RO(name) \
|
||||||
|
static ssize_t zram_attr_##name##_show(struct device *d, \
|
||||||
|
struct device_attribute *attr, char *b) \
|
||||||
|
{ \
|
||||||
|
struct zram *zram = dev_to_zram(d); \
|
||||||
|
return scnprintf(b, PAGE_SIZE, "%llu\n", \
|
||||||
|
(u64)atomic64_read(&zram->stats.name)); \
|
||||||
|
} \
|
||||||
|
static struct device_attribute dev_attr_##name = \
|
||||||
|
__ATTR(name, S_IRUGO, zram_attr_##name##_show, NULL);
|
||||||
|
|
||||||
|
static inline int init_done(struct zram *zram)
|
||||||
|
{
|
||||||
|
return zram->meta != NULL;
|
||||||
|
}
|
||||||
|
|
||||||
static inline struct zram *dev_to_zram(struct device *dev)
|
static inline struct zram *dev_to_zram(struct device *dev)
|
||||||
{
|
{
|
||||||
return (struct zram *)dev_to_disk(dev)->private_data;
|
return (struct zram *)dev_to_disk(dev)->private_data;
|
||||||
@ -52,59 +69,20 @@ static ssize_t disksize_show(struct device *dev,
|
|||||||
{
|
{
|
||||||
struct zram *zram = dev_to_zram(dev);
|
struct zram *zram = dev_to_zram(dev);
|
||||||
|
|
||||||
return sprintf(buf, "%llu\n", zram->disksize);
|
return scnprintf(buf, PAGE_SIZE, "%llu\n", zram->disksize);
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t initstate_show(struct device *dev,
|
static ssize_t initstate_show(struct device *dev,
|
||||||
struct device_attribute *attr, char *buf)
|
struct device_attribute *attr, char *buf)
|
||||||
{
|
{
|
||||||
|
u32 val;
|
||||||
struct zram *zram = dev_to_zram(dev);
|
struct zram *zram = dev_to_zram(dev);
|
||||||
|
|
||||||
return sprintf(buf, "%u\n", zram->init_done);
|
down_read(&zram->init_lock);
|
||||||
}
|
val = init_done(zram);
|
||||||
|
up_read(&zram->init_lock);
|
||||||
|
|
||||||
static ssize_t num_reads_show(struct device *dev,
|
return scnprintf(buf, PAGE_SIZE, "%u\n", val);
|
||||||
struct device_attribute *attr, char *buf)
|
|
||||||
{
|
|
||||||
struct zram *zram = dev_to_zram(dev);
|
|
||||||
|
|
||||||
return sprintf(buf, "%llu\n",
|
|
||||||
(u64)atomic64_read(&zram->stats.num_reads));
|
|
||||||
}
|
|
||||||
|
|
||||||
static ssize_t num_writes_show(struct device *dev,
|
|
||||||
struct device_attribute *attr, char *buf)
|
|
||||||
{
|
|
||||||
struct zram *zram = dev_to_zram(dev);
|
|
||||||
|
|
||||||
return sprintf(buf, "%llu\n",
|
|
||||||
(u64)atomic64_read(&zram->stats.num_writes));
|
|
||||||
}
|
|
||||||
|
|
||||||
static ssize_t invalid_io_show(struct device *dev,
|
|
||||||
struct device_attribute *attr, char *buf)
|
|
||||||
{
|
|
||||||
struct zram *zram = dev_to_zram(dev);
|
|
||||||
|
|
||||||
return sprintf(buf, "%llu\n",
|
|
||||||
(u64)atomic64_read(&zram->stats.invalid_io));
|
|
||||||
}
|
|
||||||
|
|
||||||
static ssize_t notify_free_show(struct device *dev,
|
|
||||||
struct device_attribute *attr, char *buf)
|
|
||||||
{
|
|
||||||
struct zram *zram = dev_to_zram(dev);
|
|
||||||
|
|
||||||
return sprintf(buf, "%llu\n",
|
|
||||||
(u64)atomic64_read(&zram->stats.notify_free));
|
|
||||||
}
|
|
||||||
|
|
||||||
static ssize_t zero_pages_show(struct device *dev,
|
|
||||||
struct device_attribute *attr, char *buf)
|
|
||||||
{
|
|
||||||
struct zram *zram = dev_to_zram(dev);
|
|
||||||
|
|
||||||
return sprintf(buf, "%u\n", atomic_read(&zram->stats.pages_zero));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t orig_data_size_show(struct device *dev,
|
static ssize_t orig_data_size_show(struct device *dev,
|
||||||
@ -112,17 +90,8 @@ static ssize_t orig_data_size_show(struct device *dev,
|
|||||||
{
|
{
|
||||||
struct zram *zram = dev_to_zram(dev);
|
struct zram *zram = dev_to_zram(dev);
|
||||||
|
|
||||||
return sprintf(buf, "%llu\n",
|
return scnprintf(buf, PAGE_SIZE, "%llu\n",
|
||||||
(u64)(atomic_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
|
(u64)(atomic64_read(&zram->stats.pages_stored)) << PAGE_SHIFT);
|
||||||
}
|
|
||||||
|
|
||||||
static ssize_t compr_data_size_show(struct device *dev,
|
|
||||||
struct device_attribute *attr, char *buf)
|
|
||||||
{
|
|
||||||
struct zram *zram = dev_to_zram(dev);
|
|
||||||
|
|
||||||
return sprintf(buf, "%llu\n",
|
|
||||||
(u64)atomic64_read(&zram->stats.compr_size));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t mem_used_total_show(struct device *dev,
|
static ssize_t mem_used_total_show(struct device *dev,
|
||||||
@ -133,11 +102,81 @@ static ssize_t mem_used_total_show(struct device *dev,
|
|||||||
struct zram_meta *meta = zram->meta;
|
struct zram_meta *meta = zram->meta;
|
||||||
|
|
||||||
down_read(&zram->init_lock);
|
down_read(&zram->init_lock);
|
||||||
if (zram->init_done)
|
if (init_done(zram))
|
||||||
val = zs_get_total_size_bytes(meta->mem_pool);
|
val = zs_get_total_size_bytes(meta->mem_pool);
|
||||||
up_read(&zram->init_lock);
|
up_read(&zram->init_lock);
|
||||||
|
|
||||||
return sprintf(buf, "%llu\n", val);
|
return scnprintf(buf, PAGE_SIZE, "%llu\n", val);
|
||||||
|
}
|
||||||
|
|
||||||
|
static ssize_t max_comp_streams_show(struct device *dev,
|
||||||
|
struct device_attribute *attr, char *buf)
|
||||||
|
{
|
||||||
|
int val;
|
||||||
|
struct zram *zram = dev_to_zram(dev);
|
||||||
|
|
||||||
|
down_read(&zram->init_lock);
|
||||||
|
val = zram->max_comp_streams;
|
||||||
|
up_read(&zram->init_lock);
|
||||||
|
|
||||||
|
return scnprintf(buf, PAGE_SIZE, "%d\n", val);
|
||||||
|
}
|
||||||
|
|
||||||
|
static ssize_t max_comp_streams_store(struct device *dev,
|
||||||
|
struct device_attribute *attr, const char *buf, size_t len)
|
||||||
|
{
|
||||||
|
int num;
|
||||||
|
struct zram *zram = dev_to_zram(dev);
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = kstrtoint(buf, 0, &num);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
if (num < 1)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
down_write(&zram->init_lock);
|
||||||
|
if (init_done(zram)) {
|
||||||
|
if (!zcomp_set_max_streams(zram->comp, num)) {
|
||||||
|
pr_info("Cannot change max compression streams\n");
|
||||||
|
ret = -EINVAL;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
zram->max_comp_streams = num;
|
||||||
|
ret = len;
|
||||||
|
out:
|
||||||
|
up_write(&zram->init_lock);
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
static ssize_t comp_algorithm_show(struct device *dev,
|
||||||
|
struct device_attribute *attr, char *buf)
|
||||||
|
{
|
||||||
|
size_t sz;
|
||||||
|
struct zram *zram = dev_to_zram(dev);
|
||||||
|
|
||||||
|
down_read(&zram->init_lock);
|
||||||
|
sz = zcomp_available_show(zram->compressor, buf);
|
||||||
|
up_read(&zram->init_lock);
|
||||||
|
|
||||||
|
return sz;
|
||||||
|
}
|
||||||
|
|
||||||
|
static ssize_t comp_algorithm_store(struct device *dev,
|
||||||
|
struct device_attribute *attr, const char *buf, size_t len)
|
||||||
|
{
|
||||||
|
struct zram *zram = dev_to_zram(dev);
|
||||||
|
down_write(&zram->init_lock);
|
||||||
|
if (init_done(zram)) {
|
||||||
|
up_write(&zram->init_lock);
|
||||||
|
pr_info("Can't change algorithm for initialized device\n");
|
||||||
|
return -EBUSY;
|
||||||
|
}
|
||||||
|
strlcpy(zram->compressor, buf, sizeof(zram->compressor));
|
||||||
|
up_write(&zram->init_lock);
|
||||||
|
return len;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* flag operations needs meta->tb_lock */
|
/* flag operations needs meta->tb_lock */
|
||||||
@ -192,8 +231,6 @@ static inline int valid_io_request(struct zram *zram, struct bio *bio)
|
|||||||
static void zram_meta_free(struct zram_meta *meta)
|
static void zram_meta_free(struct zram_meta *meta)
|
||||||
{
|
{
|
||||||
zs_destroy_pool(meta->mem_pool);
|
zs_destroy_pool(meta->mem_pool);
|
||||||
kfree(meta->compress_workmem);
|
|
||||||
free_pages((unsigned long)meta->compress_buffer, 1);
|
|
||||||
vfree(meta->table);
|
vfree(meta->table);
|
||||||
kfree(meta);
|
kfree(meta);
|
||||||
}
|
}
|
||||||
@ -205,22 +242,11 @@ static struct zram_meta *zram_meta_alloc(u64 disksize)
|
|||||||
if (!meta)
|
if (!meta)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
meta->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
|
|
||||||
if (!meta->compress_workmem)
|
|
||||||
goto free_meta;
|
|
||||||
|
|
||||||
meta->compress_buffer =
|
|
||||||
(void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
|
|
||||||
if (!meta->compress_buffer) {
|
|
||||||
pr_err("Error allocating compressor buffer space\n");
|
|
||||||
goto free_workmem;
|
|
||||||
}
|
|
||||||
|
|
||||||
num_pages = disksize >> PAGE_SHIFT;
|
num_pages = disksize >> PAGE_SHIFT;
|
||||||
meta->table = vzalloc(num_pages * sizeof(*meta->table));
|
meta->table = vzalloc(num_pages * sizeof(*meta->table));
|
||||||
if (!meta->table) {
|
if (!meta->table) {
|
||||||
pr_err("Error allocating zram address table\n");
|
pr_err("Error allocating zram address table\n");
|
||||||
goto free_buffer;
|
goto free_meta;
|
||||||
}
|
}
|
||||||
|
|
||||||
meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
|
meta->mem_pool = zs_create_pool(GFP_NOIO | __GFP_HIGHMEM);
|
||||||
@ -230,15 +256,10 @@ static struct zram_meta *zram_meta_alloc(u64 disksize)
|
|||||||
}
|
}
|
||||||
|
|
||||||
rwlock_init(&meta->tb_lock);
|
rwlock_init(&meta->tb_lock);
|
||||||
mutex_init(&meta->buffer_lock);
|
|
||||||
return meta;
|
return meta;
|
||||||
|
|
||||||
free_table:
|
free_table:
|
||||||
vfree(meta->table);
|
vfree(meta->table);
|
||||||
free_buffer:
|
|
||||||
free_pages((unsigned long)meta->compress_buffer, 1);
|
|
||||||
free_workmem:
|
|
||||||
kfree(meta->compress_workmem);
|
|
||||||
free_meta:
|
free_meta:
|
||||||
kfree(meta);
|
kfree(meta);
|
||||||
meta = NULL;
|
meta = NULL;
|
||||||
@ -288,7 +309,6 @@ static void zram_free_page(struct zram *zram, size_t index)
|
|||||||
{
|
{
|
||||||
struct zram_meta *meta = zram->meta;
|
struct zram_meta *meta = zram->meta;
|
||||||
unsigned long handle = meta->table[index].handle;
|
unsigned long handle = meta->table[index].handle;
|
||||||
u16 size = meta->table[index].size;
|
|
||||||
|
|
||||||
if (unlikely(!handle)) {
|
if (unlikely(!handle)) {
|
||||||
/*
|
/*
|
||||||
@ -297,21 +317,15 @@ static void zram_free_page(struct zram *zram, size_t index)
|
|||||||
*/
|
*/
|
||||||
if (zram_test_flag(meta, index, ZRAM_ZERO)) {
|
if (zram_test_flag(meta, index, ZRAM_ZERO)) {
|
||||||
zram_clear_flag(meta, index, ZRAM_ZERO);
|
zram_clear_flag(meta, index, ZRAM_ZERO);
|
||||||
atomic_dec(&zram->stats.pages_zero);
|
atomic64_dec(&zram->stats.zero_pages);
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(size > max_zpage_size))
|
|
||||||
atomic_dec(&zram->stats.bad_compress);
|
|
||||||
|
|
||||||
zs_free(meta->mem_pool, handle);
|
zs_free(meta->mem_pool, handle);
|
||||||
|
|
||||||
if (size <= PAGE_SIZE / 2)
|
atomic64_sub(meta->table[index].size, &zram->stats.compr_data_size);
|
||||||
atomic_dec(&zram->stats.good_compress);
|
atomic64_dec(&zram->stats.pages_stored);
|
||||||
|
|
||||||
atomic64_sub(meta->table[index].size, &zram->stats.compr_size);
|
|
||||||
atomic_dec(&zram->stats.pages_stored);
|
|
||||||
|
|
||||||
meta->table[index].handle = 0;
|
meta->table[index].handle = 0;
|
||||||
meta->table[index].size = 0;
|
meta->table[index].size = 0;
|
||||||
@ -319,8 +333,7 @@ static void zram_free_page(struct zram *zram, size_t index)
|
|||||||
|
|
||||||
static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
|
static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
|
||||||
{
|
{
|
||||||
int ret = LZO_E_OK;
|
int ret = 0;
|
||||||
size_t clen = PAGE_SIZE;
|
|
||||||
unsigned char *cmem;
|
unsigned char *cmem;
|
||||||
struct zram_meta *meta = zram->meta;
|
struct zram_meta *meta = zram->meta;
|
||||||
unsigned long handle;
|
unsigned long handle;
|
||||||
@ -340,12 +353,12 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
|
|||||||
if (size == PAGE_SIZE)
|
if (size == PAGE_SIZE)
|
||||||
copy_page(mem, cmem);
|
copy_page(mem, cmem);
|
||||||
else
|
else
|
||||||
ret = lzo1x_decompress_safe(cmem, size, mem, &clen);
|
ret = zcomp_decompress(zram->comp, cmem, size, mem);
|
||||||
zs_unmap_object(meta->mem_pool, handle);
|
zs_unmap_object(meta->mem_pool, handle);
|
||||||
read_unlock(&meta->tb_lock);
|
read_unlock(&meta->tb_lock);
|
||||||
|
|
||||||
/* Should NEVER happen. Return bio error if it does. */
|
/* Should NEVER happen. Return bio error if it does. */
|
||||||
if (unlikely(ret != LZO_E_OK)) {
|
if (unlikely(ret)) {
|
||||||
pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
|
pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
|
||||||
atomic64_inc(&zram->stats.failed_reads);
|
atomic64_inc(&zram->stats.failed_reads);
|
||||||
return ret;
|
return ret;
|
||||||
@ -388,7 +401,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
|
|||||||
|
|
||||||
ret = zram_decompress_page(zram, uncmem, index);
|
ret = zram_decompress_page(zram, uncmem, index);
|
||||||
/* Should NEVER happen. Return bio error if it does. */
|
/* Should NEVER happen. Return bio error if it does. */
|
||||||
if (unlikely(ret != LZO_E_OK))
|
if (unlikely(ret))
|
||||||
goto out_cleanup;
|
goto out_cleanup;
|
||||||
|
|
||||||
if (is_partial_io(bvec))
|
if (is_partial_io(bvec))
|
||||||
@ -413,11 +426,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|||||||
struct page *page;
|
struct page *page;
|
||||||
unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
|
unsigned char *user_mem, *cmem, *src, *uncmem = NULL;
|
||||||
struct zram_meta *meta = zram->meta;
|
struct zram_meta *meta = zram->meta;
|
||||||
|
struct zcomp_strm *zstrm;
|
||||||
bool locked = false;
|
bool locked = false;
|
||||||
|
|
||||||
page = bvec->bv_page;
|
page = bvec->bv_page;
|
||||||
src = meta->compress_buffer;
|
|
||||||
|
|
||||||
if (is_partial_io(bvec)) {
|
if (is_partial_io(bvec)) {
|
||||||
/*
|
/*
|
||||||
* This is a partial IO. We need to read the full page
|
* This is a partial IO. We need to read the full page
|
||||||
@ -433,7 +445,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
mutex_lock(&meta->buffer_lock);
|
zstrm = zcomp_strm_find(zram->comp);
|
||||||
locked = true;
|
locked = true;
|
||||||
user_mem = kmap_atomic(page);
|
user_mem = kmap_atomic(page);
|
||||||
|
|
||||||
@ -454,28 +466,25 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|||||||
zram_set_flag(meta, index, ZRAM_ZERO);
|
zram_set_flag(meta, index, ZRAM_ZERO);
|
||||||
write_unlock(&zram->meta->tb_lock);
|
write_unlock(&zram->meta->tb_lock);
|
||||||
|
|
||||||
atomic_inc(&zram->stats.pages_zero);
|
atomic64_inc(&zram->stats.zero_pages);
|
||||||
ret = 0;
|
ret = 0;
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
|
ret = zcomp_compress(zram->comp, zstrm, uncmem, &clen);
|
||||||
meta->compress_workmem);
|
|
||||||
if (!is_partial_io(bvec)) {
|
if (!is_partial_io(bvec)) {
|
||||||
kunmap_atomic(user_mem);
|
kunmap_atomic(user_mem);
|
||||||
user_mem = NULL;
|
user_mem = NULL;
|
||||||
uncmem = NULL;
|
uncmem = NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (unlikely(ret != LZO_E_OK)) {
|
if (unlikely(ret)) {
|
||||||
pr_err("Compression failed! err=%d\n", ret);
|
pr_err("Compression failed! err=%d\n", ret);
|
||||||
goto out;
|
goto out;
|
||||||
}
|
}
|
||||||
|
src = zstrm->buffer;
|
||||||
if (unlikely(clen > max_zpage_size)) {
|
if (unlikely(clen > max_zpage_size)) {
|
||||||
atomic_inc(&zram->stats.bad_compress);
|
|
||||||
clen = PAGE_SIZE;
|
clen = PAGE_SIZE;
|
||||||
src = NULL;
|
|
||||||
if (is_partial_io(bvec))
|
if (is_partial_io(bvec))
|
||||||
src = uncmem;
|
src = uncmem;
|
||||||
}
|
}
|
||||||
@ -497,6 +506,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|||||||
memcpy(cmem, src, clen);
|
memcpy(cmem, src, clen);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
zcomp_strm_release(zram->comp, zstrm);
|
||||||
|
locked = false;
|
||||||
zs_unmap_object(meta->mem_pool, handle);
|
zs_unmap_object(meta->mem_pool, handle);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -511,49 +522,88 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
|
|||||||
write_unlock(&zram->meta->tb_lock);
|
write_unlock(&zram->meta->tb_lock);
|
||||||
|
|
||||||
/* Update stats */
|
/* Update stats */
|
||||||
atomic64_add(clen, &zram->stats.compr_size);
|
atomic64_add(clen, &zram->stats.compr_data_size);
|
||||||
atomic_inc(&zram->stats.pages_stored);
|
atomic64_inc(&zram->stats.pages_stored);
|
||||||
if (clen <= PAGE_SIZE / 2)
|
|
||||||
atomic_inc(&zram->stats.good_compress);
|
|
||||||
|
|
||||||
out:
|
out:
|
||||||
if (locked)
|
if (locked)
|
||||||
mutex_unlock(&meta->buffer_lock);
|
zcomp_strm_release(zram->comp, zstrm);
|
||||||
if (is_partial_io(bvec))
|
if (is_partial_io(bvec))
|
||||||
kfree(uncmem);
|
kfree(uncmem);
|
||||||
|
|
||||||
if (ret)
|
if (ret)
|
||||||
atomic64_inc(&zram->stats.failed_writes);
|
atomic64_inc(&zram->stats.failed_writes);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
|
static int zram_bvec_rw(struct zram *zram, struct bio_vec *bvec, u32 index,
|
||||||
int offset, struct bio *bio, int rw)
|
int offset, struct bio *bio)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
int rw = bio_data_dir(bio);
|
||||||
|
|
||||||
if (rw == READ)
|
if (rw == READ) {
|
||||||
|
atomic64_inc(&zram->stats.num_reads);
|
||||||
ret = zram_bvec_read(zram, bvec, index, offset, bio);
|
ret = zram_bvec_read(zram, bvec, index, offset, bio);
|
||||||
else
|
} else {
|
||||||
|
atomic64_inc(&zram->stats.num_writes);
|
||||||
ret = zram_bvec_write(zram, bvec, index, offset);
|
ret = zram_bvec_write(zram, bvec, index, offset);
|
||||||
|
}
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* zram_bio_discard - handler on discard request
|
||||||
|
* @index: physical block index in PAGE_SIZE units
|
||||||
|
* @offset: byte offset within physical block
|
||||||
|
*/
|
||||||
|
static void zram_bio_discard(struct zram *zram, u32 index,
|
||||||
|
int offset, struct bio *bio)
|
||||||
|
{
|
||||||
|
size_t n = bio->bi_iter.bi_size;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* zram manages data in physical block size units. Because logical block
|
||||||
|
* size isn't identical with physical block size on some arch, we
|
||||||
|
* could get a discard request pointing to a specific offset within a
|
||||||
|
* certain physical block. Although we can handle this request by
|
||||||
|
* reading that physiclal block and decompressing and partially zeroing
|
||||||
|
* and re-compressing and then re-storing it, this isn't reasonable
|
||||||
|
* because our intent with a discard request is to save memory. So
|
||||||
|
* skipping this logical block is appropriate here.
|
||||||
|
*/
|
||||||
|
if (offset) {
|
||||||
|
if (n < offset)
|
||||||
|
return;
|
||||||
|
|
||||||
|
n -= offset;
|
||||||
|
index++;
|
||||||
|
}
|
||||||
|
|
||||||
|
while (n >= PAGE_SIZE) {
|
||||||
|
/*
|
||||||
|
* Discard request can be large so the lock hold times could be
|
||||||
|
* lengthy. So take the lock once per page.
|
||||||
|
*/
|
||||||
|
write_lock(&zram->meta->tb_lock);
|
||||||
|
zram_free_page(zram, index);
|
||||||
|
write_unlock(&zram->meta->tb_lock);
|
||||||
|
index++;
|
||||||
|
n -= PAGE_SIZE;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static void zram_reset_device(struct zram *zram, bool reset_capacity)
|
static void zram_reset_device(struct zram *zram, bool reset_capacity)
|
||||||
{
|
{
|
||||||
size_t index;
|
size_t index;
|
||||||
struct zram_meta *meta;
|
struct zram_meta *meta;
|
||||||
|
|
||||||
down_write(&zram->init_lock);
|
down_write(&zram->init_lock);
|
||||||
if (!zram->init_done) {
|
if (!init_done(zram)) {
|
||||||
up_write(&zram->init_lock);
|
up_write(&zram->init_lock);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
meta = zram->meta;
|
meta = zram->meta;
|
||||||
zram->init_done = 0;
|
|
||||||
|
|
||||||
/* Free all pages that are still in this zram device */
|
/* Free all pages that are still in this zram device */
|
||||||
for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
|
for (index = 0; index < zram->disksize >> PAGE_SHIFT; index++) {
|
||||||
unsigned long handle = meta->table[index].handle;
|
unsigned long handle = meta->table[index].handle;
|
||||||
@ -563,6 +613,9 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity)
|
|||||||
zs_free(meta->mem_pool, handle);
|
zs_free(meta->mem_pool, handle);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
zcomp_destroy(zram->comp);
|
||||||
|
zram->max_comp_streams = 1;
|
||||||
|
|
||||||
zram_meta_free(zram->meta);
|
zram_meta_free(zram->meta);
|
||||||
zram->meta = NULL;
|
zram->meta = NULL;
|
||||||
/* Reset stats */
|
/* Reset stats */
|
||||||
@ -574,37 +627,14 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity)
|
|||||||
up_write(&zram->init_lock);
|
up_write(&zram->init_lock);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void zram_init_device(struct zram *zram, struct zram_meta *meta)
|
|
||||||
{
|
|
||||||
if (zram->disksize > 2 * (totalram_pages << PAGE_SHIFT)) {
|
|
||||||
pr_info(
|
|
||||||
"There is little point creating a zram of greater than "
|
|
||||||
"twice the size of memory since we expect a 2:1 compression "
|
|
||||||
"ratio. Note that zram uses about 0.1%% of the size of "
|
|
||||||
"the disk when not in use so a huge zram is "
|
|
||||||
"wasteful.\n"
|
|
||||||
"\tMemory Size: %lu kB\n"
|
|
||||||
"\tSize you selected: %llu kB\n"
|
|
||||||
"Continuing anyway ...\n",
|
|
||||||
(totalram_pages << PAGE_SHIFT) >> 10, zram->disksize >> 10
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* zram devices sort of resembles non-rotational disks */
|
|
||||||
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
|
|
||||||
|
|
||||||
zram->meta = meta;
|
|
||||||
zram->init_done = 1;
|
|
||||||
|
|
||||||
pr_debug("Initialization done!\n");
|
|
||||||
}
|
|
||||||
|
|
||||||
static ssize_t disksize_store(struct device *dev,
|
static ssize_t disksize_store(struct device *dev,
|
||||||
struct device_attribute *attr, const char *buf, size_t len)
|
struct device_attribute *attr, const char *buf, size_t len)
|
||||||
{
|
{
|
||||||
u64 disksize;
|
u64 disksize;
|
||||||
|
struct zcomp *comp;
|
||||||
struct zram_meta *meta;
|
struct zram_meta *meta;
|
||||||
struct zram *zram = dev_to_zram(dev);
|
struct zram *zram = dev_to_zram(dev);
|
||||||
|
int err;
|
||||||
|
|
||||||
disksize = memparse(buf, NULL);
|
disksize = memparse(buf, NULL);
|
||||||
if (!disksize)
|
if (!disksize)
|
||||||
@ -614,20 +644,35 @@ static ssize_t disksize_store(struct device *dev,
|
|||||||
meta = zram_meta_alloc(disksize);
|
meta = zram_meta_alloc(disksize);
|
||||||
if (!meta)
|
if (!meta)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
down_write(&zram->init_lock);
|
|
||||||
if (zram->init_done) {
|
comp = zcomp_create(zram->compressor, zram->max_comp_streams);
|
||||||
up_write(&zram->init_lock);
|
if (IS_ERR(comp)) {
|
||||||
zram_meta_free(meta);
|
pr_info("Cannot initialise %s compressing backend\n",
|
||||||
pr_info("Cannot change disksize for initialized device\n");
|
zram->compressor);
|
||||||
return -EBUSY;
|
err = PTR_ERR(comp);
|
||||||
|
goto out_free_meta;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
down_write(&zram->init_lock);
|
||||||
|
if (init_done(zram)) {
|
||||||
|
pr_info("Cannot change disksize for initialized device\n");
|
||||||
|
err = -EBUSY;
|
||||||
|
goto out_destroy_comp;
|
||||||
|
}
|
||||||
|
|
||||||
|
zram->meta = meta;
|
||||||
|
zram->comp = comp;
|
||||||
zram->disksize = disksize;
|
zram->disksize = disksize;
|
||||||
set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
|
set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
|
||||||
zram_init_device(zram, meta);
|
|
||||||
up_write(&zram->init_lock);
|
up_write(&zram->init_lock);
|
||||||
|
|
||||||
return len;
|
return len;
|
||||||
|
|
||||||
|
out_destroy_comp:
|
||||||
|
up_write(&zram->init_lock);
|
||||||
|
zcomp_destroy(comp);
|
||||||
|
out_free_meta:
|
||||||
|
zram_meta_free(meta);
|
||||||
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
static ssize_t reset_store(struct device *dev,
|
static ssize_t reset_store(struct device *dev,
|
||||||
@ -671,26 +716,23 @@ out:
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
|
static void __zram_make_request(struct zram *zram, struct bio *bio)
|
||||||
{
|
{
|
||||||
int offset;
|
int offset;
|
||||||
u32 index;
|
u32 index;
|
||||||
struct bio_vec bvec;
|
struct bio_vec bvec;
|
||||||
struct bvec_iter iter;
|
struct bvec_iter iter;
|
||||||
|
|
||||||
switch (rw) {
|
|
||||||
case READ:
|
|
||||||
atomic64_inc(&zram->stats.num_reads);
|
|
||||||
break;
|
|
||||||
case WRITE:
|
|
||||||
atomic64_inc(&zram->stats.num_writes);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
|
index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
|
||||||
offset = (bio->bi_iter.bi_sector &
|
offset = (bio->bi_iter.bi_sector &
|
||||||
(SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
|
(SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
|
||||||
|
|
||||||
|
if (unlikely(bio->bi_rw & REQ_DISCARD)) {
|
||||||
|
zram_bio_discard(zram, index, offset, bio);
|
||||||
|
bio_endio(bio, 0);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
bio_for_each_segment(bvec, bio, iter) {
|
bio_for_each_segment(bvec, bio, iter) {
|
||||||
int max_transfer_size = PAGE_SIZE - offset;
|
int max_transfer_size = PAGE_SIZE - offset;
|
||||||
|
|
||||||
@ -705,16 +747,15 @@ static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
|
|||||||
bv.bv_len = max_transfer_size;
|
bv.bv_len = max_transfer_size;
|
||||||
bv.bv_offset = bvec.bv_offset;
|
bv.bv_offset = bvec.bv_offset;
|
||||||
|
|
||||||
if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0)
|
if (zram_bvec_rw(zram, &bv, index, offset, bio) < 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
bv.bv_len = bvec.bv_len - max_transfer_size;
|
bv.bv_len = bvec.bv_len - max_transfer_size;
|
||||||
bv.bv_offset += max_transfer_size;
|
bv.bv_offset += max_transfer_size;
|
||||||
if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0)
|
if (zram_bvec_rw(zram, &bv, index + 1, 0, bio) < 0)
|
||||||
goto out;
|
goto out;
|
||||||
} else
|
} else
|
||||||
if (zram_bvec_rw(zram, &bvec, index, offset, bio, rw)
|
if (zram_bvec_rw(zram, &bvec, index, offset, bio) < 0)
|
||||||
< 0)
|
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
update_position(&index, &offset, &bvec);
|
update_position(&index, &offset, &bvec);
|
||||||
@ -736,7 +777,7 @@ static void zram_make_request(struct request_queue *queue, struct bio *bio)
|
|||||||
struct zram *zram = queue->queuedata;
|
struct zram *zram = queue->queuedata;
|
||||||
|
|
||||||
down_read(&zram->init_lock);
|
down_read(&zram->init_lock);
|
||||||
if (unlikely(!zram->init_done))
|
if (unlikely(!init_done(zram)))
|
||||||
goto error;
|
goto error;
|
||||||
|
|
||||||
if (!valid_io_request(zram, bio)) {
|
if (!valid_io_request(zram, bio)) {
|
||||||
@ -744,7 +785,7 @@ static void zram_make_request(struct request_queue *queue, struct bio *bio)
|
|||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
|
|
||||||
__zram_make_request(zram, bio, bio_data_dir(bio));
|
__zram_make_request(zram, bio);
|
||||||
up_read(&zram->init_lock);
|
up_read(&zram->init_lock);
|
||||||
|
|
||||||
return;
|
return;
|
||||||
@ -778,14 +819,21 @@ static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR,
|
|||||||
disksize_show, disksize_store);
|
disksize_show, disksize_store);
|
||||||
static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL);
|
static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL);
|
||||||
static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store);
|
static DEVICE_ATTR(reset, S_IWUSR, NULL, reset_store);
|
||||||
static DEVICE_ATTR(num_reads, S_IRUGO, num_reads_show, NULL);
|
|
||||||
static DEVICE_ATTR(num_writes, S_IRUGO, num_writes_show, NULL);
|
|
||||||
static DEVICE_ATTR(invalid_io, S_IRUGO, invalid_io_show, NULL);
|
|
||||||
static DEVICE_ATTR(notify_free, S_IRUGO, notify_free_show, NULL);
|
|
||||||
static DEVICE_ATTR(zero_pages, S_IRUGO, zero_pages_show, NULL);
|
|
||||||
static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL);
|
static DEVICE_ATTR(orig_data_size, S_IRUGO, orig_data_size_show, NULL);
|
||||||
static DEVICE_ATTR(compr_data_size, S_IRUGO, compr_data_size_show, NULL);
|
|
||||||
static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);
|
static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);
|
||||||
|
static DEVICE_ATTR(max_comp_streams, S_IRUGO | S_IWUSR,
|
||||||
|
max_comp_streams_show, max_comp_streams_store);
|
||||||
|
static DEVICE_ATTR(comp_algorithm, S_IRUGO | S_IWUSR,
|
||||||
|
comp_algorithm_show, comp_algorithm_store);
|
||||||
|
|
||||||
|
ZRAM_ATTR_RO(num_reads);
|
||||||
|
ZRAM_ATTR_RO(num_writes);
|
||||||
|
ZRAM_ATTR_RO(failed_reads);
|
||||||
|
ZRAM_ATTR_RO(failed_writes);
|
||||||
|
ZRAM_ATTR_RO(invalid_io);
|
||||||
|
ZRAM_ATTR_RO(notify_free);
|
||||||
|
ZRAM_ATTR_RO(zero_pages);
|
||||||
|
ZRAM_ATTR_RO(compr_data_size);
|
||||||
|
|
||||||
static struct attribute *zram_disk_attrs[] = {
|
static struct attribute *zram_disk_attrs[] = {
|
||||||
&dev_attr_disksize.attr,
|
&dev_attr_disksize.attr,
|
||||||
@ -793,12 +841,16 @@ static struct attribute *zram_disk_attrs[] = {
|
|||||||
&dev_attr_reset.attr,
|
&dev_attr_reset.attr,
|
||||||
&dev_attr_num_reads.attr,
|
&dev_attr_num_reads.attr,
|
||||||
&dev_attr_num_writes.attr,
|
&dev_attr_num_writes.attr,
|
||||||
|
&dev_attr_failed_reads.attr,
|
||||||
|
&dev_attr_failed_writes.attr,
|
||||||
&dev_attr_invalid_io.attr,
|
&dev_attr_invalid_io.attr,
|
||||||
&dev_attr_notify_free.attr,
|
&dev_attr_notify_free.attr,
|
||||||
&dev_attr_zero_pages.attr,
|
&dev_attr_zero_pages.attr,
|
||||||
&dev_attr_orig_data_size.attr,
|
&dev_attr_orig_data_size.attr,
|
||||||
&dev_attr_compr_data_size.attr,
|
&dev_attr_compr_data_size.attr,
|
||||||
&dev_attr_mem_used_total.attr,
|
&dev_attr_mem_used_total.attr,
|
||||||
|
&dev_attr_max_comp_streams.attr,
|
||||||
|
&dev_attr_comp_algorithm.attr,
|
||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -839,7 +891,8 @@ static int create_device(struct zram *zram, int device_id)
|
|||||||
|
|
||||||
/* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
|
/* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
|
||||||
set_capacity(zram->disk, 0);
|
set_capacity(zram->disk, 0);
|
||||||
|
/* zram devices sort of resembles non-rotational disks */
|
||||||
|
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
|
||||||
/*
|
/*
|
||||||
* To ensure that we always get PAGE_SIZE aligned
|
* To ensure that we always get PAGE_SIZE aligned
|
||||||
* and n*PAGE_SIZED sized I/O requests.
|
* and n*PAGE_SIZED sized I/O requests.
|
||||||
@ -849,6 +902,21 @@ static int create_device(struct zram *zram, int device_id)
|
|||||||
ZRAM_LOGICAL_BLOCK_SIZE);
|
ZRAM_LOGICAL_BLOCK_SIZE);
|
||||||
blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
|
blk_queue_io_min(zram->disk->queue, PAGE_SIZE);
|
||||||
blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
|
blk_queue_io_opt(zram->disk->queue, PAGE_SIZE);
|
||||||
|
zram->disk->queue->limits.discard_granularity = PAGE_SIZE;
|
||||||
|
zram->disk->queue->limits.max_discard_sectors = UINT_MAX;
|
||||||
|
/*
|
||||||
|
* zram_bio_discard() will clear all logical blocks if logical block
|
||||||
|
* size is identical with physical block size(PAGE_SIZE). But if it is
|
||||||
|
* different, we will skip discarding some parts of logical blocks in
|
||||||
|
* the part of the request range which isn't aligned to physical block
|
||||||
|
* size. So we can't ensure that all discarded logical blocks are
|
||||||
|
* zeroed.
|
||||||
|
*/
|
||||||
|
if (ZRAM_LOGICAL_BLOCK_SIZE == PAGE_SIZE)
|
||||||
|
zram->disk->queue->limits.discard_zeroes_data = 1;
|
||||||
|
else
|
||||||
|
zram->disk->queue->limits.discard_zeroes_data = 0;
|
||||||
|
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, zram->disk->queue);
|
||||||
|
|
||||||
add_disk(zram->disk);
|
add_disk(zram->disk);
|
||||||
|
|
||||||
@ -858,8 +926,9 @@ static int create_device(struct zram *zram, int device_id)
|
|||||||
pr_warn("Error creating sysfs group");
|
pr_warn("Error creating sysfs group");
|
||||||
goto out_free_disk;
|
goto out_free_disk;
|
||||||
}
|
}
|
||||||
|
strlcpy(zram->compressor, default_compressor, sizeof(zram->compressor));
|
||||||
zram->init_done = 0;
|
zram->meta = NULL;
|
||||||
|
zram->max_comp_streams = 1;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_free_disk:
|
out_free_disk:
|
||||||
|
@ -16,9 +16,10 @@
|
|||||||
#define _ZRAM_DRV_H_
|
#define _ZRAM_DRV_H_
|
||||||
|
|
||||||
#include <linux/spinlock.h>
|
#include <linux/spinlock.h>
|
||||||
#include <linux/mutex.h>
|
|
||||||
#include <linux/zsmalloc.h>
|
#include <linux/zsmalloc.h>
|
||||||
|
|
||||||
|
#include "zcomp.h"
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Some arbitrary value. This is just to catch
|
* Some arbitrary value. This is just to catch
|
||||||
* invalid value for num_devices module parameter.
|
* invalid value for num_devices module parameter.
|
||||||
@ -64,38 +65,33 @@ enum zram_pageflags {
|
|||||||
struct table {
|
struct table {
|
||||||
unsigned long handle;
|
unsigned long handle;
|
||||||
u16 size; /* object size (excluding header) */
|
u16 size; /* object size (excluding header) */
|
||||||
u8 count; /* object ref count (not yet used) */
|
|
||||||
u8 flags;
|
u8 flags;
|
||||||
} __aligned(4);
|
} __aligned(4);
|
||||||
|
|
||||||
struct zram_stats {
|
struct zram_stats {
|
||||||
atomic64_t compr_size; /* compressed size of pages stored */
|
atomic64_t compr_data_size; /* compressed size of pages stored */
|
||||||
atomic64_t num_reads; /* failed + successful */
|
atomic64_t num_reads; /* failed + successful */
|
||||||
atomic64_t num_writes; /* --do-- */
|
atomic64_t num_writes; /* --do-- */
|
||||||
atomic64_t failed_reads; /* should NEVER! happen */
|
atomic64_t failed_reads; /* should NEVER! happen */
|
||||||
atomic64_t failed_writes; /* can happen when memory is too low */
|
atomic64_t failed_writes; /* can happen when memory is too low */
|
||||||
atomic64_t invalid_io; /* non-page-aligned I/O requests */
|
atomic64_t invalid_io; /* non-page-aligned I/O requests */
|
||||||
atomic64_t notify_free; /* no. of swap slot free notifications */
|
atomic64_t notify_free; /* no. of swap slot free notifications */
|
||||||
atomic_t pages_zero; /* no. of zero filled pages */
|
atomic64_t zero_pages; /* no. of zero filled pages */
|
||||||
atomic_t pages_stored; /* no. of pages currently stored */
|
atomic64_t pages_stored; /* no. of pages currently stored */
|
||||||
atomic_t good_compress; /* % of pages with compression ratio<=50% */
|
|
||||||
atomic_t bad_compress; /* % of pages with compression ratio>=75% */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct zram_meta {
|
struct zram_meta {
|
||||||
rwlock_t tb_lock; /* protect table */
|
rwlock_t tb_lock; /* protect table */
|
||||||
void *compress_workmem;
|
|
||||||
void *compress_buffer;
|
|
||||||
struct table *table;
|
struct table *table;
|
||||||
struct zs_pool *mem_pool;
|
struct zs_pool *mem_pool;
|
||||||
struct mutex buffer_lock; /* protect compress buffers */
|
|
||||||
};
|
};
|
||||||
|
|
||||||
struct zram {
|
struct zram {
|
||||||
struct zram_meta *meta;
|
struct zram_meta *meta;
|
||||||
struct request_queue *queue;
|
struct request_queue *queue;
|
||||||
struct gendisk *disk;
|
struct gendisk *disk;
|
||||||
int init_done;
|
struct zcomp *comp;
|
||||||
|
|
||||||
/* Prevent concurrent execution of device init, reset and R/W request */
|
/* Prevent concurrent execution of device init, reset and R/W request */
|
||||||
struct rw_semaphore init_lock;
|
struct rw_semaphore init_lock;
|
||||||
/*
|
/*
|
||||||
@ -103,7 +99,8 @@ struct zram {
|
|||||||
* we can store in a disk.
|
* we can store in a disk.
|
||||||
*/
|
*/
|
||||||
u64 disksize; /* bytes */
|
u64 disksize; /* bytes */
|
||||||
|
int max_comp_streams;
|
||||||
struct zram_stats stats;
|
struct zram_stats stats;
|
||||||
|
char compressor[10];
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
@ -74,7 +74,7 @@ config TCG_NSC
|
|||||||
|
|
||||||
config TCG_ATMEL
|
config TCG_ATMEL
|
||||||
tristate "Atmel TPM Interface"
|
tristate "Atmel TPM Interface"
|
||||||
depends on PPC64 || HAS_IOPORT
|
depends on PPC64 || HAS_IOPORT_MAP
|
||||||
---help---
|
---help---
|
||||||
If you have a TPM security chip from Atmel say Yes and it
|
If you have a TPM security chip from Atmel say Yes and it
|
||||||
will be accessible from within Linux. To compile this driver
|
will be accessible from within Linux. To compile this driver
|
||||||
|
@ -936,7 +936,7 @@ config I2C_ACORN
|
|||||||
|
|
||||||
config I2C_ELEKTOR
|
config I2C_ELEKTOR
|
||||||
tristate "Elektor ISA card"
|
tristate "Elektor ISA card"
|
||||||
depends on ISA && HAS_IOPORT && BROKEN_ON_SMP
|
depends on ISA && HAS_IOPORT_MAP && BROKEN_ON_SMP
|
||||||
select I2C_ALGOPCF
|
select I2C_ALGOPCF
|
||||||
help
|
help
|
||||||
This supports the PCF8584 ISA bus I2C adapter. Say Y if you own
|
This supports the PCF8584 ISA bus I2C adapter. Say Y if you own
|
||||||
|
@ -887,7 +887,7 @@ void guest_new_pagetable(struct lg_cpu *cpu, unsigned long pgtable)
|
|||||||
* _PAGE_ACCESSED then we can put a read-only PTE entry in immediately, and if
|
* _PAGE_ACCESSED then we can put a read-only PTE entry in immediately, and if
|
||||||
* they set _PAGE_DIRTY then we can put a writable PTE entry in immediately.
|
* they set _PAGE_DIRTY then we can put a writable PTE entry in immediately.
|
||||||
*/
|
*/
|
||||||
static void do_set_pte(struct lg_cpu *cpu, int idx,
|
static void __guest_set_pte(struct lg_cpu *cpu, int idx,
|
||||||
unsigned long vaddr, pte_t gpte)
|
unsigned long vaddr, pte_t gpte)
|
||||||
{
|
{
|
||||||
/* Look up the matching shadow page directory entry. */
|
/* Look up the matching shadow page directory entry. */
|
||||||
@ -960,13 +960,13 @@ void guest_set_pte(struct lg_cpu *cpu,
|
|||||||
unsigned int i;
|
unsigned int i;
|
||||||
for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++)
|
for (i = 0; i < ARRAY_SIZE(cpu->lg->pgdirs); i++)
|
||||||
if (cpu->lg->pgdirs[i].pgdir)
|
if (cpu->lg->pgdirs[i].pgdir)
|
||||||
do_set_pte(cpu, i, vaddr, gpte);
|
__guest_set_pte(cpu, i, vaddr, gpte);
|
||||||
} else {
|
} else {
|
||||||
/* Is this page table one we have a shadow for? */
|
/* Is this page table one we have a shadow for? */
|
||||||
int pgdir = find_pgdir(cpu->lg, gpgdir);
|
int pgdir = find_pgdir(cpu->lg, gpgdir);
|
||||||
if (pgdir != ARRAY_SIZE(cpu->lg->pgdirs))
|
if (pgdir != ARRAY_SIZE(cpu->lg->pgdirs))
|
||||||
/* If so, do the update. */
|
/* If so, do the update. */
|
||||||
do_set_pte(cpu, pgdir, vaddr, gpte);
|
__guest_set_pte(cpu, pgdir, vaddr, gpte);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -178,10 +178,10 @@ static int gru_dump_context(struct gru_state *gru, int ctxnum,
|
|||||||
hdr.cbrcnt = cbrcnt;
|
hdr.cbrcnt = cbrcnt;
|
||||||
hdr.dsrcnt = dsrcnt;
|
hdr.dsrcnt = dsrcnt;
|
||||||
hdr.cch_locked = cch_locked;
|
hdr.cch_locked = cch_locked;
|
||||||
if (!ret && copy_to_user((void __user *)uhdr, &hdr, sizeof(hdr)))
|
if (copy_to_user(uhdr, &hdr, sizeof(hdr)))
|
||||||
ret = -EFAULT;
|
return -EFAULT;
|
||||||
|
|
||||||
return ret ? ret : bytes;
|
return bytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
int gru_dump_chiplet_request(unsigned long arg)
|
int gru_dump_chiplet_request(unsigned long arg)
|
||||||
|
@ -39,7 +39,7 @@ config CAN_EMS_PCI
|
|||||||
config CAN_PEAK_PCMCIA
|
config CAN_PEAK_PCMCIA
|
||||||
tristate "PEAK PCAN-PC Card"
|
tristate "PEAK PCAN-PC Card"
|
||||||
depends on PCMCIA
|
depends on PCMCIA
|
||||||
depends on HAS_IOPORT
|
depends on HAS_IOPORT_MAP
|
||||||
---help---
|
---help---
|
||||||
This driver is for the PCAN-PC Card PCMCIA adapter (1 or 2 channels)
|
This driver is for the PCAN-PC Card PCMCIA adapter (1 or 2 channels)
|
||||||
from PEAK-System (http://www.peak-system.com). To compile this
|
from PEAK-System (http://www.peak-system.com). To compile this
|
||||||
|
@ -66,7 +66,7 @@ config PCMCIA_3C589
|
|||||||
|
|
||||||
config VORTEX
|
config VORTEX
|
||||||
tristate "3c590/3c900 series (592/595/597) \"Vortex/Boomerang\" support"
|
tristate "3c590/3c900 series (592/595/597) \"Vortex/Boomerang\" support"
|
||||||
depends on (PCI || EISA) && HAS_IOPORT
|
depends on (PCI || EISA) && HAS_IOPORT_MAP
|
||||||
select MII
|
select MII
|
||||||
---help---
|
---help---
|
||||||
This option enables driver support for a large number of 10Mbps and
|
This option enables driver support for a large number of 10Mbps and
|
||||||
|
@ -493,6 +493,7 @@ static int rionet_setup_netdev(struct rio_mport *mport, struct net_device *ndev)
|
|||||||
ndev->netdev_ops = &rionet_netdev_ops;
|
ndev->netdev_ops = &rionet_netdev_ops;
|
||||||
ndev->mtu = RIO_MAX_MSG_SIZE - 14;
|
ndev->mtu = RIO_MAX_MSG_SIZE - 14;
|
||||||
ndev->features = NETIF_F_LLTX;
|
ndev->features = NETIF_F_LLTX;
|
||||||
|
SET_NETDEV_DEV(ndev, &mport->dev);
|
||||||
SET_ETHTOOL_OPS(ndev, &rionet_ethtool_ops);
|
SET_ETHTOOL_OPS(ndev, &rionet_ethtool_ops);
|
||||||
|
|
||||||
spin_lock_init(&rnet->lock);
|
spin_lock_init(&rnet->lock);
|
||||||
|
@ -2256,6 +2256,7 @@ static int tsi721_setup_mport(struct tsi721_device *priv)
|
|||||||
mport->phy_type = RIO_PHY_SERIAL;
|
mport->phy_type = RIO_PHY_SERIAL;
|
||||||
mport->priv = (void *)priv;
|
mport->priv = (void *)priv;
|
||||||
mport->phys_efptr = 0x100;
|
mport->phys_efptr = 0x100;
|
||||||
|
mport->dev.parent = &pdev->dev;
|
||||||
priv->mport = mport;
|
priv->mport = mport;
|
||||||
|
|
||||||
INIT_LIST_HEAD(&mport->dbells);
|
INIT_LIST_HEAD(&mport->dbells);
|
||||||
|
@ -644,6 +644,9 @@ enum tsi721_smsg_int_flag {
|
|||||||
|
|
||||||
#ifdef CONFIG_RAPIDIO_DMA_ENGINE
|
#ifdef CONFIG_RAPIDIO_DMA_ENGINE
|
||||||
|
|
||||||
|
#define TSI721_BDMA_BD_RING_SZ 128
|
||||||
|
#define TSI721_BDMA_MAX_BCOUNT (TSI721_DMAD_BCOUNT1 + 1)
|
||||||
|
|
||||||
struct tsi721_tx_desc {
|
struct tsi721_tx_desc {
|
||||||
struct dma_async_tx_descriptor txd;
|
struct dma_async_tx_descriptor txd;
|
||||||
struct tsi721_dma_desc *hw_desc;
|
struct tsi721_dma_desc *hw_desc;
|
||||||
@ -652,6 +655,7 @@ struct tsi721_tx_desc {
|
|||||||
u64 rio_addr;
|
u64 rio_addr;
|
||||||
/* upper 2-bits of 66-bit RIO address */
|
/* upper 2-bits of 66-bit RIO address */
|
||||||
u8 rio_addr_u;
|
u8 rio_addr_u;
|
||||||
|
u32 bcount;
|
||||||
bool interrupt;
|
bool interrupt;
|
||||||
struct list_head desc_node;
|
struct list_head desc_node;
|
||||||
struct list_head tx_list;
|
struct list_head tx_list;
|
||||||
|
@ -304,35 +304,17 @@ struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan)
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
tsi721_fill_desc(struct tsi721_bdma_chan *bdma_chan,
|
tsi721_desc_fill_init(struct tsi721_tx_desc *desc, struct scatterlist *sg,
|
||||||
struct tsi721_tx_desc *desc, struct scatterlist *sg,
|
|
||||||
enum dma_rtype rtype, u32 sys_size)
|
enum dma_rtype rtype, u32 sys_size)
|
||||||
{
|
{
|
||||||
struct tsi721_dma_desc *bd_ptr = desc->hw_desc;
|
struct tsi721_dma_desc *bd_ptr = desc->hw_desc;
|
||||||
u64 rio_addr;
|
u64 rio_addr;
|
||||||
|
|
||||||
if (sg_dma_len(sg) > TSI721_DMAD_BCOUNT1 + 1) {
|
|
||||||
dev_err(bdma_chan->dchan.device->dev,
|
|
||||||
"SG element is too large\n");
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
dev_dbg(bdma_chan->dchan.device->dev,
|
|
||||||
"desc: 0x%llx, addr: 0x%llx len: 0x%x\n",
|
|
||||||
(u64)desc->txd.phys, (unsigned long long)sg_dma_address(sg),
|
|
||||||
sg_dma_len(sg));
|
|
||||||
|
|
||||||
dev_dbg(bdma_chan->dchan.device->dev,
|
|
||||||
"bd_ptr = %p did=%d raddr=0x%llx\n",
|
|
||||||
bd_ptr, desc->destid, desc->rio_addr);
|
|
||||||
|
|
||||||
/* Initialize DMA descriptor */
|
/* Initialize DMA descriptor */
|
||||||
bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) |
|
bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) |
|
||||||
(rtype << 19) | desc->destid);
|
(rtype << 19) | desc->destid);
|
||||||
if (desc->interrupt)
|
|
||||||
bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF);
|
|
||||||
bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) |
|
bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) |
|
||||||
(sys_size << 26) | sg_dma_len(sg));
|
(sys_size << 26));
|
||||||
rio_addr = (desc->rio_addr >> 2) |
|
rio_addr = (desc->rio_addr >> 2) |
|
||||||
((u64)(desc->rio_addr_u & 0x3) << 62);
|
((u64)(desc->rio_addr_u & 0x3) << 62);
|
||||||
bd_ptr->raddr_lo = cpu_to_le32(rio_addr & 0xffffffff);
|
bd_ptr->raddr_lo = cpu_to_le32(rio_addr & 0xffffffff);
|
||||||
@ -346,6 +328,20 @@ tsi721_fill_desc(struct tsi721_bdma_chan *bdma_chan,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int
|
||||||
|
tsi721_desc_fill_end(struct tsi721_tx_desc *desc)
|
||||||
|
{
|
||||||
|
struct tsi721_dma_desc *bd_ptr = desc->hw_desc;
|
||||||
|
|
||||||
|
/* Update DMA descriptor */
|
||||||
|
if (desc->interrupt)
|
||||||
|
bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF);
|
||||||
|
bd_ptr->bcount |= cpu_to_le32(desc->bcount & TSI721_DMAD_BCOUNT1);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
static void tsi721_dma_chain_complete(struct tsi721_bdma_chan *bdma_chan,
|
static void tsi721_dma_chain_complete(struct tsi721_bdma_chan *bdma_chan,
|
||||||
struct tsi721_tx_desc *desc)
|
struct tsi721_tx_desc *desc)
|
||||||
{
|
{
|
||||||
@ -674,6 +670,7 @@ struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
|
|||||||
unsigned int i;
|
unsigned int i;
|
||||||
u32 sys_size = dma_to_mport(dchan->device)->sys_size;
|
u32 sys_size = dma_to_mport(dchan->device)->sys_size;
|
||||||
enum dma_rtype rtype;
|
enum dma_rtype rtype;
|
||||||
|
dma_addr_t next_addr = -1;
|
||||||
|
|
||||||
if (!sgl || !sg_len) {
|
if (!sgl || !sg_len) {
|
||||||
dev_err(dchan->device->dev, "%s: No SG list\n", __func__);
|
dev_err(dchan->device->dev, "%s: No SG list\n", __func__);
|
||||||
@ -704,36 +701,84 @@ struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
|
|||||||
for_each_sg(sgl, sg, sg_len, i) {
|
for_each_sg(sgl, sg, sg_len, i) {
|
||||||
int err;
|
int err;
|
||||||
|
|
||||||
dev_dbg(dchan->device->dev, "%s: sg #%d\n", __func__, i);
|
if (sg_dma_len(sg) > TSI721_BDMA_MAX_BCOUNT) {
|
||||||
|
dev_err(dchan->device->dev,
|
||||||
|
"%s: SG entry %d is too large\n", __func__, i);
|
||||||
|
goto err_desc_put;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If this sg entry forms contiguous block with previous one,
|
||||||
|
* try to merge it into existing DMA descriptor
|
||||||
|
*/
|
||||||
|
if (desc) {
|
||||||
|
if (next_addr == sg_dma_address(sg) &&
|
||||||
|
desc->bcount + sg_dma_len(sg) <=
|
||||||
|
TSI721_BDMA_MAX_BCOUNT) {
|
||||||
|
/* Adjust byte count of the descriptor */
|
||||||
|
desc->bcount += sg_dma_len(sg);
|
||||||
|
goto entry_done;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Finalize this descriptor using total
|
||||||
|
* byte count value.
|
||||||
|
*/
|
||||||
|
tsi721_desc_fill_end(desc);
|
||||||
|
dev_dbg(dchan->device->dev, "%s: desc final len: %d\n",
|
||||||
|
__func__, desc->bcount);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Obtain and initialize a new descriptor
|
||||||
|
*/
|
||||||
desc = tsi721_desc_get(bdma_chan);
|
desc = tsi721_desc_get(bdma_chan);
|
||||||
if (!desc) {
|
if (!desc) {
|
||||||
dev_err(dchan->device->dev,
|
dev_err(dchan->device->dev,
|
||||||
"Not enough descriptors available\n");
|
"%s: Failed to get new descriptor for SG %d\n",
|
||||||
goto err_desc_get;
|
__func__, i);
|
||||||
|
goto err_desc_put;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (sg_is_last(sg))
|
|
||||||
desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0;
|
|
||||||
else
|
|
||||||
desc->interrupt = false;
|
|
||||||
|
|
||||||
desc->destid = rext->destid;
|
desc->destid = rext->destid;
|
||||||
desc->rio_addr = rio_addr;
|
desc->rio_addr = rio_addr;
|
||||||
desc->rio_addr_u = 0;
|
desc->rio_addr_u = 0;
|
||||||
|
desc->bcount = sg_dma_len(sg);
|
||||||
|
|
||||||
err = tsi721_fill_desc(bdma_chan, desc, sg, rtype, sys_size);
|
dev_dbg(dchan->device->dev,
|
||||||
|
"sg%d desc: 0x%llx, addr: 0x%llx len: %d\n",
|
||||||
|
i, (u64)desc->txd.phys,
|
||||||
|
(unsigned long long)sg_dma_address(sg),
|
||||||
|
sg_dma_len(sg));
|
||||||
|
|
||||||
|
dev_dbg(dchan->device->dev,
|
||||||
|
"bd_ptr = %p did=%d raddr=0x%llx\n",
|
||||||
|
desc->hw_desc, desc->destid, desc->rio_addr);
|
||||||
|
|
||||||
|
err = tsi721_desc_fill_init(desc, sg, rtype, sys_size);
|
||||||
if (err) {
|
if (err) {
|
||||||
dev_err(dchan->device->dev,
|
dev_err(dchan->device->dev,
|
||||||
"Failed to build desc: %d\n", err);
|
"Failed to build desc: %d\n", err);
|
||||||
goto err_desc_get;
|
goto err_desc_put;
|
||||||
}
|
}
|
||||||
|
|
||||||
rio_addr += sg_dma_len(sg);
|
next_addr = sg_dma_address(sg);
|
||||||
|
|
||||||
if (!first)
|
if (!first)
|
||||||
first = desc;
|
first = desc;
|
||||||
else
|
else
|
||||||
list_add_tail(&desc->desc_node, &first->tx_list);
|
list_add_tail(&desc->desc_node, &first->tx_list);
|
||||||
|
|
||||||
|
entry_done:
|
||||||
|
if (sg_is_last(sg)) {
|
||||||
|
desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0;
|
||||||
|
tsi721_desc_fill_end(desc);
|
||||||
|
dev_dbg(dchan->device->dev, "%s: desc final len: %d\n",
|
||||||
|
__func__, desc->bcount);
|
||||||
|
} else {
|
||||||
|
rio_addr += sg_dma_len(sg);
|
||||||
|
next_addr += sg_dma_len(sg);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
first->txd.cookie = -EBUSY;
|
first->txd.cookie = -EBUSY;
|
||||||
@ -741,7 +786,7 @@ struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
|
|||||||
|
|
||||||
return &first->txd;
|
return &first->txd;
|
||||||
|
|
||||||
err_desc_get:
|
err_desc_put:
|
||||||
tsi721_desc_put(bdma_chan, first);
|
tsi721_desc_put(bdma_chan, first);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
@ -792,7 +837,7 @@ int tsi721_register_dma(struct tsi721_device *priv)
|
|||||||
if (i == TSI721_DMACH_MAINT)
|
if (i == TSI721_DMACH_MAINT)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
bdma_chan->bd_num = 64;
|
bdma_chan->bd_num = TSI721_BDMA_BD_RING_SZ;
|
||||||
bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i);
|
bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i);
|
||||||
|
|
||||||
bdma_chan->dchan.device = &mport->dma;
|
bdma_chan->dchan.device = &mport->dma;
|
||||||
|
@ -167,7 +167,6 @@ void rio_unregister_driver(struct rio_driver *rdrv)
|
|||||||
void rio_attach_device(struct rio_dev *rdev)
|
void rio_attach_device(struct rio_dev *rdev)
|
||||||
{
|
{
|
||||||
rdev->dev.bus = &rio_bus_type;
|
rdev->dev.bus = &rio_bus_type;
|
||||||
rdev->dev.parent = &rio_bus;
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(rio_attach_device);
|
EXPORT_SYMBOL_GPL(rio_attach_device);
|
||||||
|
|
||||||
@ -216,9 +215,12 @@ static int rio_uevent(struct device *dev, struct kobj_uevent_env *env)
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct device rio_bus = {
|
struct class rio_mport_class = {
|
||||||
.init_name = "rapidio",
|
.name = "rapidio_port",
|
||||||
|
.owner = THIS_MODULE,
|
||||||
|
.dev_groups = rio_mport_groups,
|
||||||
};
|
};
|
||||||
|
EXPORT_SYMBOL_GPL(rio_mport_class);
|
||||||
|
|
||||||
struct bus_type rio_bus_type = {
|
struct bus_type rio_bus_type = {
|
||||||
.name = "rapidio",
|
.name = "rapidio",
|
||||||
@ -233,14 +235,20 @@ struct bus_type rio_bus_type = {
|
|||||||
/**
|
/**
|
||||||
* rio_bus_init - Register the RapidIO bus with the device model
|
* rio_bus_init - Register the RapidIO bus with the device model
|
||||||
*
|
*
|
||||||
* Registers the RIO bus device and RIO bus type with the Linux
|
* Registers the RIO mport device class and RIO bus type with the Linux
|
||||||
* device model.
|
* device model.
|
||||||
*/
|
*/
|
||||||
static int __init rio_bus_init(void)
|
static int __init rio_bus_init(void)
|
||||||
{
|
{
|
||||||
if (device_register(&rio_bus) < 0)
|
int ret;
|
||||||
printk("RIO: failed to register RIO bus device\n");
|
|
||||||
return bus_register(&rio_bus_type);
|
ret = class_register(&rio_mport_class);
|
||||||
|
if (!ret) {
|
||||||
|
ret = bus_register(&rio_bus_type);
|
||||||
|
if (ret)
|
||||||
|
class_unregister(&rio_mport_class);
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
postcore_initcall(rio_bus_init);
|
postcore_initcall(rio_bus_init);
|
||||||
|
@ -461,6 +461,7 @@ static struct rio_dev *rio_setup_device(struct rio_net *net,
|
|||||||
rdev->comp_tag & RIO_CTAG_UDEVID);
|
rdev->comp_tag & RIO_CTAG_UDEVID);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
rdev->dev.parent = &port->dev;
|
||||||
rio_attach_device(rdev);
|
rio_attach_device(rdev);
|
||||||
|
|
||||||
device_initialize(&rdev->dev);
|
device_initialize(&rdev->dev);
|
||||||
|
@ -341,3 +341,43 @@ const struct attribute_group *rio_bus_groups[] = {
|
|||||||
&rio_bus_group,
|
&rio_bus_group,
|
||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static ssize_t
|
||||||
|
port_destid_show(struct device *dev, struct device_attribute *attr,
|
||||||
|
char *buf)
|
||||||
|
{
|
||||||
|
struct rio_mport *mport = to_rio_mport(dev);
|
||||||
|
|
||||||
|
if (mport)
|
||||||
|
return sprintf(buf, "0x%04x\n", mport->host_deviceid);
|
||||||
|
else
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
static DEVICE_ATTR_RO(port_destid);
|
||||||
|
|
||||||
|
static ssize_t sys_size_show(struct device *dev, struct device_attribute *attr,
|
||||||
|
char *buf)
|
||||||
|
{
|
||||||
|
struct rio_mport *mport = to_rio_mport(dev);
|
||||||
|
|
||||||
|
if (mport)
|
||||||
|
return sprintf(buf, "%u\n", mport->sys_size);
|
||||||
|
else
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
static DEVICE_ATTR_RO(sys_size);
|
||||||
|
|
||||||
|
static struct attribute *rio_mport_attrs[] = {
|
||||||
|
&dev_attr_port_destid.attr,
|
||||||
|
&dev_attr_sys_size.attr,
|
||||||
|
NULL,
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct attribute_group rio_mport_group = {
|
||||||
|
.attrs = rio_mport_attrs,
|
||||||
|
};
|
||||||
|
|
||||||
|
const struct attribute_group *rio_mport_groups[] = {
|
||||||
|
&rio_mport_group,
|
||||||
|
NULL,
|
||||||
|
};
|
||||||
|
@ -1884,6 +1884,7 @@ static int rio_get_hdid(int index)
|
|||||||
int rio_register_mport(struct rio_mport *port)
|
int rio_register_mport(struct rio_mport *port)
|
||||||
{
|
{
|
||||||
struct rio_scan_node *scan = NULL;
|
struct rio_scan_node *scan = NULL;
|
||||||
|
int res = 0;
|
||||||
|
|
||||||
if (next_portid >= RIO_MAX_MPORTS) {
|
if (next_portid >= RIO_MAX_MPORTS) {
|
||||||
pr_err("RIO: reached specified max number of mports\n");
|
pr_err("RIO: reached specified max number of mports\n");
|
||||||
@ -1894,6 +1895,16 @@ int rio_register_mport(struct rio_mport *port)
|
|||||||
port->host_deviceid = rio_get_hdid(port->id);
|
port->host_deviceid = rio_get_hdid(port->id);
|
||||||
port->nscan = NULL;
|
port->nscan = NULL;
|
||||||
|
|
||||||
|
dev_set_name(&port->dev, "rapidio%d", port->id);
|
||||||
|
port->dev.class = &rio_mport_class;
|
||||||
|
|
||||||
|
res = device_register(&port->dev);
|
||||||
|
if (res)
|
||||||
|
dev_err(&port->dev, "RIO: mport%d registration failed ERR=%d\n",
|
||||||
|
port->id, res);
|
||||||
|
else
|
||||||
|
dev_dbg(&port->dev, "RIO: mport%d registered\n", port->id);
|
||||||
|
|
||||||
mutex_lock(&rio_mport_list_lock);
|
mutex_lock(&rio_mport_list_lock);
|
||||||
list_add_tail(&port->node, &rio_mports);
|
list_add_tail(&port->node, &rio_mports);
|
||||||
|
|
||||||
|
@ -50,6 +50,7 @@ extern int rio_mport_scan(int mport_id);
|
|||||||
/* Structures internal to the RIO core code */
|
/* Structures internal to the RIO core code */
|
||||||
extern const struct attribute_group *rio_dev_groups[];
|
extern const struct attribute_group *rio_dev_groups[];
|
||||||
extern const struct attribute_group *rio_bus_groups[];
|
extern const struct attribute_group *rio_bus_groups[];
|
||||||
|
extern const struct attribute_group *rio_mport_groups[];
|
||||||
|
|
||||||
#define RIO_GET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x00ff0000) >> 16))
|
#define RIO_GET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x00ff0000) >> 16))
|
||||||
#define RIO_SET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x000000ff) << 16))
|
#define RIO_SET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x000000ff) << 16))
|
||||||
|
@ -832,6 +832,7 @@ static void v9fs_mmap_vm_close(struct vm_area_struct *vma)
|
|||||||
|
|
||||||
static const struct vm_operations_struct v9fs_file_vm_ops = {
|
static const struct vm_operations_struct v9fs_file_vm_ops = {
|
||||||
.fault = filemap_fault,
|
.fault = filemap_fault,
|
||||||
|
.map_pages = filemap_map_pages,
|
||||||
.page_mkwrite = v9fs_vm_page_mkwrite,
|
.page_mkwrite = v9fs_vm_page_mkwrite,
|
||||||
.remap_pages = generic_file_remap_pages,
|
.remap_pages = generic_file_remap_pages,
|
||||||
};
|
};
|
||||||
@ -839,6 +840,7 @@ static const struct vm_operations_struct v9fs_file_vm_ops = {
|
|||||||
static const struct vm_operations_struct v9fs_mmap_file_vm_ops = {
|
static const struct vm_operations_struct v9fs_mmap_file_vm_ops = {
|
||||||
.close = v9fs_mmap_vm_close,
|
.close = v9fs_mmap_vm_close,
|
||||||
.fault = filemap_fault,
|
.fault = filemap_fault,
|
||||||
|
.map_pages = filemap_map_pages,
|
||||||
.page_mkwrite = v9fs_vm_page_mkwrite,
|
.page_mkwrite = v9fs_vm_page_mkwrite,
|
||||||
.remap_pages = generic_file_remap_pages,
|
.remap_pages = generic_file_remap_pages,
|
||||||
};
|
};
|
||||||
|
@ -266,7 +266,7 @@ static void init_once(void *foo)
|
|||||||
inode_init_once(&ei->vfs_inode);
|
inode_init_once(&ei->vfs_inode);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int init_inodecache(void)
|
static int __init init_inodecache(void)
|
||||||
{
|
{
|
||||||
adfs_inode_cachep = kmem_cache_create("adfs_inode_cache",
|
adfs_inode_cachep = kmem_cache_create("adfs_inode_cache",
|
||||||
sizeof(struct adfs_inode_info),
|
sizeof(struct adfs_inode_info),
|
||||||
|
@ -5,14 +5,6 @@
|
|||||||
#include <linux/mutex.h>
|
#include <linux/mutex.h>
|
||||||
#include <linux/workqueue.h>
|
#include <linux/workqueue.h>
|
||||||
|
|
||||||
/* AmigaOS allows file names with up to 30 characters length.
|
|
||||||
* Names longer than that will be silently truncated. If you
|
|
||||||
* want to disallow this, comment out the following #define.
|
|
||||||
* Creating filesystem objects with longer names will then
|
|
||||||
* result in an error (ENAMETOOLONG).
|
|
||||||
*/
|
|
||||||
/*#define AFFS_NO_TRUNCATE */
|
|
||||||
|
|
||||||
/* Ugly macros make the code more pretty. */
|
/* Ugly macros make the code more pretty. */
|
||||||
|
|
||||||
#define GET_END_PTR(st,p,sz) ((st *)((char *)(p)+((sz)-sizeof(st))))
|
#define GET_END_PTR(st,p,sz) ((st *)((char *)(p)+((sz)-sizeof(st))))
|
||||||
@ -28,7 +20,6 @@
|
|||||||
|
|
||||||
#define AFFS_CACHE_SIZE PAGE_SIZE
|
#define AFFS_CACHE_SIZE PAGE_SIZE
|
||||||
|
|
||||||
#define AFFS_MAX_PREALLOC 32
|
|
||||||
#define AFFS_LC_SIZE (AFFS_CACHE_SIZE/sizeof(u32)/2)
|
#define AFFS_LC_SIZE (AFFS_CACHE_SIZE/sizeof(u32)/2)
|
||||||
#define AFFS_AC_SIZE (AFFS_CACHE_SIZE/sizeof(struct affs_ext_key)/2)
|
#define AFFS_AC_SIZE (AFFS_CACHE_SIZE/sizeof(struct affs_ext_key)/2)
|
||||||
#define AFFS_AC_MASK (AFFS_AC_SIZE-1)
|
#define AFFS_AC_MASK (AFFS_AC_SIZE-1)
|
||||||
@ -118,6 +109,7 @@ struct affs_sb_info {
|
|||||||
#define SF_OFS 0x0200 /* Old filesystem */
|
#define SF_OFS 0x0200 /* Old filesystem */
|
||||||
#define SF_PREFIX 0x0400 /* Buffer for prefix is allocated */
|
#define SF_PREFIX 0x0400 /* Buffer for prefix is allocated */
|
||||||
#define SF_VERBOSE 0x0800 /* Talk about fs when mounting */
|
#define SF_VERBOSE 0x0800 /* Talk about fs when mounting */
|
||||||
|
#define SF_NO_TRUNCATE 0x1000 /* Don't truncate filenames */
|
||||||
|
|
||||||
/* short cut to get to the affs specific sb data */
|
/* short cut to get to the affs specific sb data */
|
||||||
static inline struct affs_sb_info *AFFS_SB(struct super_block *sb)
|
static inline struct affs_sb_info *AFFS_SB(struct super_block *sb)
|
||||||
@ -137,9 +129,13 @@ extern void affs_fix_checksum(struct super_block *sb, struct buffer_head *bh);
|
|||||||
extern void secs_to_datestamp(time_t secs, struct affs_date *ds);
|
extern void secs_to_datestamp(time_t secs, struct affs_date *ds);
|
||||||
extern umode_t prot_to_mode(u32 prot);
|
extern umode_t prot_to_mode(u32 prot);
|
||||||
extern void mode_to_prot(struct inode *inode);
|
extern void mode_to_prot(struct inode *inode);
|
||||||
extern void affs_error(struct super_block *sb, const char *function, const char *fmt, ...);
|
extern void affs_error(struct super_block *sb, const char *function,
|
||||||
extern void affs_warning(struct super_block *sb, const char *function, const char *fmt, ...);
|
const char *fmt, ...);
|
||||||
extern int affs_check_name(const unsigned char *name, int len);
|
extern void affs_warning(struct super_block *sb, const char *function,
|
||||||
|
const char *fmt, ...);
|
||||||
|
extern bool affs_nofilenametruncate(const struct dentry *dentry);
|
||||||
|
extern int affs_check_name(const unsigned char *name, int len,
|
||||||
|
bool notruncate);
|
||||||
extern int affs_copy_name(unsigned char *bstr, struct dentry *dentry);
|
extern int affs_copy_name(unsigned char *bstr, struct dentry *dentry);
|
||||||
|
|
||||||
/* bitmap. c */
|
/* bitmap. c */
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user