mm: rename migrate_pgmap_owner
MMU notifier ranges have a migrate_pgmap_owner field which is used by drivers to store a pointer. This is subsequently used by the driver callback to filter MMU_NOTIFY_MIGRATE events. Other notifier event types can also benefit from this filtering, so rename the 'migrate_pgmap_owner' field to 'owner' and create a new notifier initialisation function to initialise this field. Link: https://lkml.kernel.org/r/20210616105937.23201-6-apopple@nvidia.com Signed-off-by: Alistair Popple <apopple@nvidia.com> Suggested-by: Peter Xu <peterx@redhat.com> Reviewed-by: Peter Xu <peterx@redhat.com> Cc: Ben Skeggs <bskeggs@redhat.com> Cc: Christoph Hellwig <hch@lst.de> Cc: Hugh Dickins <hughd@google.com> Cc: Jason Gunthorpe <jgg@nvidia.com> Cc: John Hubbard <jhubbard@nvidia.com> Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org> Cc: Ralph Campbell <rcampbell@nvidia.com> Cc: Shakeel Butt <shakeelb@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
a98a2f0c8c
commit
6b49bf6ddb
@ -332,7 +332,7 @@ between device driver specific code and shared common code:
|
||||
walks to fill in the ``args->src`` array with PFNs to be migrated.
|
||||
The ``invalidate_range_start()`` callback is passed a
|
||||
``struct mmu_notifier_range`` with the ``event`` field set to
|
||||
``MMU_NOTIFY_MIGRATE`` and the ``migrate_pgmap_owner`` field set to
|
||||
``MMU_NOTIFY_MIGRATE`` and the ``owner`` field set to
|
||||
the ``args->pgmap_owner`` field passed to migrate_vma_setup(). This is
|
||||
allows the device driver to skip the invalidation callback and only
|
||||
invalidate device private MMU mappings that are actually migrating.
|
||||
|
@ -265,7 +265,7 @@ nouveau_svmm_invalidate_range_start(struct mmu_notifier *mn,
|
||||
* the invalidation is handled as part of the migration process.
|
||||
*/
|
||||
if (update->event == MMU_NOTIFY_MIGRATE &&
|
||||
update->migrate_pgmap_owner == svmm->vmm->cli->drm->dev)
|
||||
update->owner == svmm->vmm->cli->drm->dev)
|
||||
goto out;
|
||||
|
||||
if (limit > svmm->unmanaged.start && start < svmm->unmanaged.limit) {
|
||||
|
@ -41,7 +41,7 @@ struct mmu_interval_notifier;
|
||||
*
|
||||
* @MMU_NOTIFY_MIGRATE: used during migrate_vma_collect() invalidate to signal
|
||||
* a device driver to possibly ignore the invalidation if the
|
||||
* migrate_pgmap_owner field matches the driver's device private pgmap owner.
|
||||
* owner field matches the driver's device private pgmap owner.
|
||||
*/
|
||||
enum mmu_notifier_event {
|
||||
MMU_NOTIFY_UNMAP = 0,
|
||||
@ -269,7 +269,7 @@ struct mmu_notifier_range {
|
||||
unsigned long end;
|
||||
unsigned flags;
|
||||
enum mmu_notifier_event event;
|
||||
void *migrate_pgmap_owner;
|
||||
void *owner;
|
||||
};
|
||||
|
||||
static inline int mm_has_notifiers(struct mm_struct *mm)
|
||||
@ -521,14 +521,14 @@ static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
|
||||
range->flags = flags;
|
||||
}
|
||||
|
||||
static inline void mmu_notifier_range_init_migrate(
|
||||
struct mmu_notifier_range *range, unsigned int flags,
|
||||
static inline void mmu_notifier_range_init_owner(
|
||||
struct mmu_notifier_range *range,
|
||||
enum mmu_notifier_event event, unsigned int flags,
|
||||
struct vm_area_struct *vma, struct mm_struct *mm,
|
||||
unsigned long start, unsigned long end, void *pgmap)
|
||||
unsigned long start, unsigned long end, void *owner)
|
||||
{
|
||||
mmu_notifier_range_init(range, MMU_NOTIFY_MIGRATE, flags, vma, mm,
|
||||
start, end);
|
||||
range->migrate_pgmap_owner = pgmap;
|
||||
mmu_notifier_range_init(range, event, flags, vma, mm, start, end);
|
||||
range->owner = owner;
|
||||
}
|
||||
|
||||
#define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
|
||||
@ -655,8 +655,8 @@ static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
|
||||
|
||||
#define mmu_notifier_range_init(range,event,flags,vma,mm,start,end) \
|
||||
_mmu_notifier_range_init(range, start, end)
|
||||
#define mmu_notifier_range_init_migrate(range, flags, vma, mm, start, end, \
|
||||
pgmap) \
|
||||
#define mmu_notifier_range_init_owner(range, event, flags, vma, mm, start, \
|
||||
end, owner) \
|
||||
_mmu_notifier_range_init(range, start, end)
|
||||
|
||||
static inline bool
|
||||
|
@ -218,7 +218,7 @@ static bool dmirror_interval_invalidate(struct mmu_interval_notifier *mni,
|
||||
* the invalidation is handled as part of the migration process.
|
||||
*/
|
||||
if (range->event == MMU_NOTIFY_MIGRATE &&
|
||||
range->migrate_pgmap_owner == dmirror->mdevice)
|
||||
range->owner == dmirror->mdevice)
|
||||
return true;
|
||||
|
||||
if (mmu_notifier_range_blockable(range))
|
||||
|
10
mm/migrate.c
10
mm/migrate.c
@ -2416,8 +2416,8 @@ static void migrate_vma_collect(struct migrate_vma *migrate)
|
||||
* that the registered device driver can skip invalidating device
|
||||
* private page mappings that won't be migrated.
|
||||
*/
|
||||
mmu_notifier_range_init_migrate(&range, 0, migrate->vma,
|
||||
migrate->vma->vm_mm, migrate->start, migrate->end,
|
||||
mmu_notifier_range_init_owner(&range, MMU_NOTIFY_MIGRATE, 0,
|
||||
migrate->vma, migrate->vma->vm_mm, migrate->start, migrate->end,
|
||||
migrate->pgmap_owner);
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
|
||||
@ -2927,9 +2927,9 @@ void migrate_vma_pages(struct migrate_vma *migrate)
|
||||
if (!notified) {
|
||||
notified = true;
|
||||
|
||||
mmu_notifier_range_init_migrate(&range, 0,
|
||||
migrate->vma, migrate->vma->vm_mm,
|
||||
addr, migrate->end,
|
||||
mmu_notifier_range_init_owner(&range,
|
||||
MMU_NOTIFY_MIGRATE, 0, migrate->vma,
|
||||
migrate->vma->vm_mm, addr, migrate->end,
|
||||
migrate->pgmap_owner);
|
||||
mmu_notifier_invalidate_range_start(&range);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user