nommu: provide follow_pfn().
With the introduction of follow_pfn() as an exported symbol, modules have begun making use of it. Unfortunately this was not reflected on nommu at the time, so the in-tree users have subsequently all blown up with link errors there. This provides a simple follow_pfn() that just returns addr >> PAGE_SHIFT, which will do the right thing on nommu. There is no need to do range checking within the vma, as the find_vma() case will already take care of this. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
parent
1fbcf37128
commit
dfc2f91ac2
21
mm/nommu.c
21
mm/nommu.c
@ -240,6 +240,27 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
|
||||
}
|
||||
EXPORT_SYMBOL(get_user_pages);
|
||||
|
||||
/**
|
||||
* follow_pfn - look up PFN at a user virtual address
|
||||
* @vma: memory mapping
|
||||
* @address: user virtual address
|
||||
* @pfn: location to store found PFN
|
||||
*
|
||||
* Only IO mappings and raw PFN mappings are allowed.
|
||||
*
|
||||
* Returns zero and the pfn at @pfn on success, -ve otherwise.
|
||||
*/
|
||||
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
|
||||
unsigned long *pfn)
|
||||
{
|
||||
if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
|
||||
return -EINVAL;
|
||||
|
||||
*pfn = address >> PAGE_SHIFT;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(follow_pfn);
|
||||
|
||||
DEFINE_RWLOCK(vmlist_lock);
|
||||
struct vm_struct *vmlist;
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user