docs/mm: memblock: update kernel-doc comments

* make memblock_discard description kernel-doc compatible
* add brief description for memblock_setclr_flag and describe its
  parameters
* fixup return value descriptions

Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com>
Signed-off-by: Jonathan Corbet <corbet@lwn.net>
This commit is contained in:
Mike Rapoport 2018-06-30 17:55:02 +03:00 committed by Jonathan Corbet
parent e1720fee27
commit 47cec4432a
2 changed files with 59 additions and 42 deletions

View File

@ -239,7 +239,6 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
/** /**
* for_each_resv_unavail_range - iterate through reserved and unavailable memory * for_each_resv_unavail_range - iterate through reserved and unavailable memory
* @i: u64 used as loop variable * @i: u64 used as loop variable
* @flags: pick from blocks based on memory attributes
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
* *
@ -367,8 +366,10 @@ phys_addr_t memblock_get_current_limit(void);
*/ */
/** /**
* memblock_region_memory_base_pfn - Return the lowest pfn intersecting with the memory region * memblock_region_memory_base_pfn - get the lowest pfn of the memory region
* @reg: memblock_region structure * @reg: memblock_region structure
*
* Return: the lowest pfn intersecting with the memory region
*/ */
static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg) static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
{ {
@ -376,8 +377,10 @@ static inline unsigned long memblock_region_memory_base_pfn(const struct membloc
} }
/** /**
* memblock_region_memory_end_pfn - Return the end_pfn this region * memblock_region_memory_end_pfn - get the end pfn of the memory region
* @reg: memblock_region structure * @reg: memblock_region structure
*
* Return: the end_pfn of the reserved region
*/ */
static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg) static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
{ {
@ -385,8 +388,10 @@ static inline unsigned long memblock_region_memory_end_pfn(const struct memblock
} }
/** /**
* memblock_region_reserved_base_pfn - Return the lowest pfn intersecting with the reserved region * memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region
* @reg: memblock_region structure * @reg: memblock_region structure
*
* Return: the lowest pfn intersecting with the reserved region
*/ */
static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg) static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
{ {
@ -394,8 +399,10 @@ static inline unsigned long memblock_region_reserved_base_pfn(const struct membl
} }
/** /**
* memblock_region_reserved_end_pfn - Return the end_pfn this region * memblock_region_reserved_end_pfn - get the end pfn of the reserved region
* @reg: memblock_region structure * @reg: memblock_region structure
*
* Return: the end_pfn of the reserved region
*/ */
static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg) static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
{ {

View File

@ -92,10 +92,11 @@ bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
return i < type->cnt; return i < type->cnt;
} }
/* /**
* __memblock_find_range_bottom_up - find free area utility in bottom-up * __memblock_find_range_bottom_up - find free area utility in bottom-up
* @start: start of candidate range * @start: start of candidate range
* @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
* %MEMBLOCK_ALLOC_ACCESSIBLE
* @size: size of free area to find * @size: size of free area to find
* @align: alignment of free area to find * @align: alignment of free area to find
* @nid: nid of the free area to find, %NUMA_NO_NODE for any node * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
@ -103,7 +104,7 @@ bool __init_memblock memblock_overlaps_region(struct memblock_type *type,
* *
* Utility called from memblock_find_in_range_node(), find free area bottom-up. * Utility called from memblock_find_in_range_node(), find free area bottom-up.
* *
* RETURNS: * Return:
* Found address on success, 0 on failure. * Found address on success, 0 on failure.
*/ */
static phys_addr_t __init_memblock static phys_addr_t __init_memblock
@ -129,7 +130,8 @@ __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
/** /**
* __memblock_find_range_top_down - find free area utility, in top-down * __memblock_find_range_top_down - find free area utility, in top-down
* @start: start of candidate range * @start: start of candidate range
* @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
* %MEMBLOCK_ALLOC_ACCESSIBLE
* @size: size of free area to find * @size: size of free area to find
* @align: alignment of free area to find * @align: alignment of free area to find
* @nid: nid of the free area to find, %NUMA_NO_NODE for any node * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
@ -137,7 +139,7 @@ __memblock_find_range_bottom_up(phys_addr_t start, phys_addr_t end,
* *
* Utility called from memblock_find_in_range_node(), find free area top-down. * Utility called from memblock_find_in_range_node(), find free area top-down.
* *
* RETURNS: * Return:
* Found address on success, 0 on failure. * Found address on success, 0 on failure.
*/ */
static phys_addr_t __init_memblock static phys_addr_t __init_memblock
@ -169,7 +171,8 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
* @size: size of free area to find * @size: size of free area to find
* @align: alignment of free area to find * @align: alignment of free area to find
* @start: start of candidate range * @start: start of candidate range
* @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
* %MEMBLOCK_ALLOC_ACCESSIBLE
* @nid: nid of the free area to find, %NUMA_NO_NODE for any node * @nid: nid of the free area to find, %NUMA_NO_NODE for any node
* @flags: pick from blocks based on memory attributes * @flags: pick from blocks based on memory attributes
* *
@ -183,7 +186,7 @@ __memblock_find_range_top_down(phys_addr_t start, phys_addr_t end,
* *
* If bottom-up allocation failed, will try to allocate memory top-down. * If bottom-up allocation failed, will try to allocate memory top-down.
* *
* RETURNS: * Return:
* Found address on success, 0 on failure. * Found address on success, 0 on failure.
*/ */
phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size, phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
@ -238,13 +241,14 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
/** /**
* memblock_find_in_range - find free area in given range * memblock_find_in_range - find free area in given range
* @start: start of candidate range * @start: start of candidate range
* @end: end of candidate range, can be %MEMBLOCK_ALLOC_{ANYWHERE|ACCESSIBLE} * @end: end of candidate range, can be %MEMBLOCK_ALLOC_ANYWHERE or
* %MEMBLOCK_ALLOC_ACCESSIBLE
* @size: size of free area to find * @size: size of free area to find
* @align: alignment of free area to find * @align: alignment of free area to find
* *
* Find @size free area aligned to @align in the specified range. * Find @size free area aligned to @align in the specified range.
* *
* RETURNS: * Return:
* Found address on success, 0 on failure. * Found address on success, 0 on failure.
*/ */
phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start, phys_addr_t __init_memblock memblock_find_in_range(phys_addr_t start,
@ -288,7 +292,7 @@ static void __init_memblock memblock_remove_region(struct memblock_type *type, u
#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK #ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
/** /**
* Discard memory and reserved arrays if they were allocated * memblock_discard - discard memory and reserved arrays if they were allocated
*/ */
void __init memblock_discard(void) void __init memblock_discard(void)
{ {
@ -318,11 +322,11 @@ void __init memblock_discard(void)
* *
* Double the size of the @type regions array. If memblock is being used to * Double the size of the @type regions array. If memblock is being used to
* allocate memory for a new reserved regions array and there is a previously * allocate memory for a new reserved regions array and there is a previously
* allocated memory range [@new_area_start,@new_area_start+@new_area_size] * allocated memory range [@new_area_start, @new_area_start + @new_area_size]
* waiting to be reserved, ensure the memory used by the new array does * waiting to be reserved, ensure the memory used by the new array does
* not overlap. * not overlap.
* *
* RETURNS: * Return:
* 0 on success, -1 on failure. * 0 on success, -1 on failure.
*/ */
static int __init_memblock memblock_double_array(struct memblock_type *type, static int __init_memblock memblock_double_array(struct memblock_type *type,
@ -467,7 +471,7 @@ static void __init_memblock memblock_merge_regions(struct memblock_type *type)
* @nid: node id of the new region * @nid: node id of the new region
* @flags: flags of the new region * @flags: flags of the new region
* *
* Insert new memblock region [@base,@base+@size) into @type at @idx. * Insert new memblock region [@base, @base + @size) into @type at @idx.
* @type must already have extra room to accommodate the new region. * @type must already have extra room to accommodate the new region.
*/ */
static void __init_memblock memblock_insert_region(struct memblock_type *type, static void __init_memblock memblock_insert_region(struct memblock_type *type,
@ -496,12 +500,12 @@ static void __init_memblock memblock_insert_region(struct memblock_type *type,
* @nid: nid of the new region * @nid: nid of the new region
* @flags: flags of the new region * @flags: flags of the new region
* *
* Add new memblock region [@base,@base+@size) into @type. The new region * Add new memblock region [@base, @base + @size) into @type. The new region
* is allowed to overlap with existing ones - overlaps don't affect already * is allowed to overlap with existing ones - overlaps don't affect already
* existing regions. @type is guaranteed to be minimal (all neighbouring * existing regions. @type is guaranteed to be minimal (all neighbouring
* compatible regions are merged) after the addition. * compatible regions are merged) after the addition.
* *
* RETURNS: * Return:
* 0 on success, -errno on failure. * 0 on success, -errno on failure.
*/ */
int __init_memblock memblock_add_range(struct memblock_type *type, int __init_memblock memblock_add_range(struct memblock_type *type,
@ -615,11 +619,11 @@ int __init_memblock memblock_add(phys_addr_t base, phys_addr_t size)
* @end_rgn: out parameter for the end of isolated region * @end_rgn: out parameter for the end of isolated region
* *
* Walk @type and ensure that regions don't cross the boundaries defined by * Walk @type and ensure that regions don't cross the boundaries defined by
* [@base,@base+@size). Crossing regions are split at the boundaries, * [@base, @base + @size). Crossing regions are split at the boundaries,
* which may create at most two more regions. The index of the first * which may create at most two more regions. The index of the first
* region inside the range is returned in *@start_rgn and end in *@end_rgn. * region inside the range is returned in *@start_rgn and end in *@end_rgn.
* *
* RETURNS: * Return:
* 0 on success, -errno on failure. * 0 on success, -errno on failure.
*/ */
static int __init_memblock memblock_isolate_range(struct memblock_type *type, static int __init_memblock memblock_isolate_range(struct memblock_type *type,
@ -730,10 +734,15 @@ int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
} }
/** /**
* memblock_setclr_flag - set or clear flag for a memory region
* @base: base address of the region
* @size: size of the region
* @set: set or clear the flag
* @flag: the flag to udpate
* *
* This function isolates region [@base, @base + @size), and sets/clears flag * This function isolates region [@base, @base + @size), and sets/clears flag
* *
* Return 0 on success, -errno on failure. * Return: 0 on success, -errno on failure.
*/ */
static int __init_memblock memblock_setclr_flag(phys_addr_t base, static int __init_memblock memblock_setclr_flag(phys_addr_t base,
phys_addr_t size, int set, int flag) phys_addr_t size, int set, int flag)
@ -760,7 +769,7 @@ static int __init_memblock memblock_setclr_flag(phys_addr_t base,
* @base: the base phys addr of the region * @base: the base phys addr of the region
* @size: the size of the region * @size: the size of the region
* *
* Return 0 on success, -errno on failure. * Return: 0 on success, -errno on failure.
*/ */
int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size) int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
{ {
@ -772,7 +781,7 @@ int __init_memblock memblock_mark_hotplug(phys_addr_t base, phys_addr_t size)
* @base: the base phys addr of the region * @base: the base phys addr of the region
* @size: the size of the region * @size: the size of the region
* *
* Return 0 on success, -errno on failure. * Return: 0 on success, -errno on failure.
*/ */
int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size) int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
{ {
@ -784,7 +793,7 @@ int __init_memblock memblock_clear_hotplug(phys_addr_t base, phys_addr_t size)
* @base: the base phys addr of the region * @base: the base phys addr of the region
* @size: the size of the region * @size: the size of the region
* *
* Return 0 on success, -errno on failure. * Return: 0 on success, -errno on failure.
*/ */
int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size) int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
{ {
@ -798,7 +807,7 @@ int __init_memblock memblock_mark_mirror(phys_addr_t base, phys_addr_t size)
* @base: the base phys addr of the region * @base: the base phys addr of the region
* @size: the size of the region * @size: the size of the region
* *
* Return 0 on success, -errno on failure. * Return: 0 on success, -errno on failure.
*/ */
int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size) int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
{ {
@ -810,7 +819,7 @@ int __init_memblock memblock_mark_nomap(phys_addr_t base, phys_addr_t size)
* @base: the base phys addr of the region * @base: the base phys addr of the region
* @size: the size of the region * @size: the size of the region
* *
* Return 0 on success, -errno on failure. * Return: 0 on success, -errno on failure.
*/ */
int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size) int __init_memblock memblock_clear_nomap(phys_addr_t base, phys_addr_t size)
{ {
@ -971,9 +980,6 @@ void __init_memblock __next_mem_range(u64 *idx, int nid,
/** /**
* __next_mem_range_rev - generic next function for for_each_*_range_rev() * __next_mem_range_rev - generic next function for for_each_*_range_rev()
* *
* Finds the next range from type_a which is not marked as unsuitable
* in type_b.
*
* @idx: pointer to u64 loop variable * @idx: pointer to u64 loop variable
* @nid: node selector, %NUMA_NO_NODE for all nodes * @nid: node selector, %NUMA_NO_NODE for all nodes
* @flags: pick from blocks based on memory attributes * @flags: pick from blocks based on memory attributes
@ -983,6 +989,9 @@ void __init_memblock __next_mem_range(u64 *idx, int nid,
* @out_end: ptr to phys_addr_t for end address of the range, can be %NULL * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
* @out_nid: ptr to int for nid of the range, can be %NULL * @out_nid: ptr to int for nid of the range, can be %NULL
* *
* Finds the next range from type_a which is not marked as unsuitable
* in type_b.
*
* Reverse of __next_mem_range(). * Reverse of __next_mem_range().
*/ */
void __init_memblock __next_mem_range_rev(u64 *idx, int nid, void __init_memblock __next_mem_range_rev(u64 *idx, int nid,
@ -1118,10 +1127,10 @@ void __init_memblock __next_mem_pfn_range(int *idx, int nid,
* @type: memblock type to set node ID for * @type: memblock type to set node ID for
* @nid: node ID to set * @nid: node ID to set
* *
* Set the nid of memblock @type regions in [@base,@base+@size) to @nid. * Set the nid of memblock @type regions in [@base, @base + @size) to @nid.
* Regions which cross the area boundaries are split as necessary. * Regions which cross the area boundaries are split as necessary.
* *
* RETURNS: * Return:
* 0 on success, -errno on failure. * 0 on success, -errno on failure.
*/ */
int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size, int __init_memblock memblock_set_node(phys_addr_t base, phys_addr_t size,
@ -1245,7 +1254,7 @@ phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, i
* The allocation is performed from memory region limited by * The allocation is performed from memory region limited by
* memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE. * memblock.current_limit if @max_addr == %BOOTMEM_ALLOC_ACCESSIBLE.
* *
* The memory block is aligned on SMP_CACHE_BYTES if @align == 0. * The memory block is aligned on %SMP_CACHE_BYTES if @align == 0.
* *
* The phys address of allocated boot memory block is converted to virtual and * The phys address of allocated boot memory block is converted to virtual and
* allocated memory is reset to 0. * allocated memory is reset to 0.
@ -1253,7 +1262,7 @@ phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, i
* In addition, function sets the min_count to 0 using kmemleak_alloc for * In addition, function sets the min_count to 0 using kmemleak_alloc for
* allocated boot memory block, so that it is never reported as leaks. * allocated boot memory block, so that it is never reported as leaks.
* *
* RETURNS: * Return:
* Virtual address of allocated memory block on success, NULL on failure. * Virtual address of allocated memory block on success, NULL on failure.
*/ */
static void * __init memblock_virt_alloc_internal( static void * __init memblock_virt_alloc_internal(
@ -1338,7 +1347,7 @@ done:
* info), if enabled. Does not zero allocated memory, does not panic if request * info), if enabled. Does not zero allocated memory, does not panic if request
* cannot be satisfied. * cannot be satisfied.
* *
* RETURNS: * Return:
* Virtual address of allocated memory block on success, NULL on failure. * Virtual address of allocated memory block on success, NULL on failure.
*/ */
void * __init memblock_virt_alloc_try_nid_raw( void * __init memblock_virt_alloc_try_nid_raw(
@ -1375,7 +1384,7 @@ void * __init memblock_virt_alloc_try_nid_raw(
* Public function, provides additional debug information (including caller * Public function, provides additional debug information (including caller
* info), if enabled. This function zeroes the allocated memory. * info), if enabled. This function zeroes the allocated memory.
* *
* RETURNS: * Return:
* Virtual address of allocated memory block on success, NULL on failure. * Virtual address of allocated memory block on success, NULL on failure.
*/ */
void * __init memblock_virt_alloc_try_nid_nopanic( void * __init memblock_virt_alloc_try_nid_nopanic(
@ -1411,7 +1420,7 @@ void * __init memblock_virt_alloc_try_nid_nopanic(
* which provides debug information (including caller info), if enabled, * which provides debug information (including caller info), if enabled,
* and panics if the request can not be satisfied. * and panics if the request can not be satisfied.
* *
* RETURNS: * Return:
* Virtual address of allocated memory block on success, NULL on failure. * Virtual address of allocated memory block on success, NULL on failure.
*/ */
void * __init memblock_virt_alloc_try_nid( void * __init memblock_virt_alloc_try_nid(
@ -1668,9 +1677,9 @@ int __init_memblock memblock_search_pfn_nid(unsigned long pfn,
* @base: base of region to check * @base: base of region to check
* @size: size of region to check * @size: size of region to check
* *
* Check if the region [@base, @base+@size) is a subset of a memory block. * Check if the region [@base, @base + @size) is a subset of a memory block.
* *
* RETURNS: * Return:
* 0 if false, non-zero if true * 0 if false, non-zero if true
*/ */
bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
@ -1689,9 +1698,10 @@ bool __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t siz
* @base: base of region to check * @base: base of region to check
* @size: size of region to check * @size: size of region to check
* *
* Check if the region [@base, @base+@size) intersects a reserved memory block. * Check if the region [@base, @base + @size) intersects a reserved
* memory block.
* *
* RETURNS: * Return:
* True if they intersect, false if not. * True if they intersect, false if not.
*/ */
bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) bool __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)