mirror of
https://github.com/fail0verflow/switch-linux.git
synced 2025-05-04 02:34:21 -04:00
mm: add unmap_mapping_pages()
Several users of unmap_mapping_range() would prefer to express their range in pages rather than bytes. Unfortuately, on a 32-bit kernel, you have to remember to cast your page number to a 64-bit type before shifting it, and four places in the current tree didn't remember to do that. That's a sign of a bad interface. Conveniently, unmap_mapping_range() actually converts from bytes into pages, so hoist the guts of unmap_mapping_range() into a new function unmap_mapping_pages() and convert the callers which want to use pages. Link: http://lkml.kernel.org/r/20171206142627.GD32044@bombadil.infradead.org Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com> Reported-by: "zhangyi (F)" <yi.zhang@huawei.com> Reviewed-by: Ross Zwisler <ross.zwisler@linux.intel.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
a365ac09d3
commit
977fbdcd59
6 changed files with 61 additions and 60 deletions
19
fs/dax.c
19
fs/dax.c
|
@ -44,6 +44,7 @@
|
||||||
|
|
||||||
/* The 'colour' (ie low bits) within a PMD of a page offset. */
|
/* The 'colour' (ie low bits) within a PMD of a page offset. */
|
||||||
#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
|
#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
|
||||||
|
#define PG_PMD_NR (PMD_SIZE >> PAGE_SHIFT)
|
||||||
|
|
||||||
static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
|
static wait_queue_head_t wait_table[DAX_WAIT_TABLE_ENTRIES];
|
||||||
|
|
||||||
|
@ -375,8 +376,8 @@ restart:
|
||||||
* unmapped.
|
* unmapped.
|
||||||
*/
|
*/
|
||||||
if (pmd_downgrade && dax_is_zero_entry(entry))
|
if (pmd_downgrade && dax_is_zero_entry(entry))
|
||||||
unmap_mapping_range(mapping,
|
unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
|
||||||
(index << PAGE_SHIFT) & PMD_MASK, PMD_SIZE, 0);
|
PG_PMD_NR, false);
|
||||||
|
|
||||||
err = radix_tree_preload(
|
err = radix_tree_preload(
|
||||||
mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
|
mapping_gfp_mask(mapping) & ~__GFP_HIGHMEM);
|
||||||
|
@ -538,12 +539,10 @@ static void *dax_insert_mapping_entry(struct address_space *mapping,
|
||||||
if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_ZERO_PAGE)) {
|
if (dax_is_zero_entry(entry) && !(flags & RADIX_DAX_ZERO_PAGE)) {
|
||||||
/* we are replacing a zero page with block mapping */
|
/* we are replacing a zero page with block mapping */
|
||||||
if (dax_is_pmd_entry(entry))
|
if (dax_is_pmd_entry(entry))
|
||||||
unmap_mapping_range(mapping,
|
unmap_mapping_pages(mapping, index & ~PG_PMD_COLOUR,
|
||||||
(vmf->pgoff << PAGE_SHIFT) & PMD_MASK,
|
PG_PMD_NR, false);
|
||||||
PMD_SIZE, 0);
|
|
||||||
else /* pte entry */
|
else /* pte entry */
|
||||||
unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
|
unmap_mapping_pages(mapping, vmf->pgoff, 1, false);
|
||||||
PAGE_SIZE, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
spin_lock_irq(&mapping->tree_lock);
|
spin_lock_irq(&mapping->tree_lock);
|
||||||
|
@ -1269,12 +1268,6 @@ static int dax_iomap_pte_fault(struct vm_fault *vmf, pfn_t *pfnp,
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_FS_DAX_PMD
|
#ifdef CONFIG_FS_DAX_PMD
|
||||||
/*
|
|
||||||
* The 'colour' (ie low bits) within a PMD of a page offset. This comes up
|
|
||||||
* more often than one might expect in the below functions.
|
|
||||||
*/
|
|
||||||
#define PG_PMD_COLOUR ((PMD_SIZE >> PAGE_SHIFT) - 1)
|
|
||||||
|
|
||||||
static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
|
static int dax_pmd_load_hole(struct vm_fault *vmf, struct iomap *iomap,
|
||||||
void *entry)
|
void *entry)
|
||||||
{
|
{
|
||||||
|
|
|
@ -1312,8 +1312,6 @@ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
|
||||||
unsigned long end, unsigned long floor, unsigned long ceiling);
|
unsigned long end, unsigned long floor, unsigned long ceiling);
|
||||||
int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
|
int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
|
||||||
struct vm_area_struct *vma);
|
struct vm_area_struct *vma);
|
||||||
void unmap_mapping_range(struct address_space *mapping,
|
|
||||||
loff_t const holebegin, loff_t const holelen, int even_cows);
|
|
||||||
int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
|
int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
|
||||||
unsigned long *start, unsigned long *end,
|
unsigned long *start, unsigned long *end,
|
||||||
pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
|
pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
|
||||||
|
@ -1324,12 +1322,6 @@ int follow_phys(struct vm_area_struct *vma, unsigned long address,
|
||||||
int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
|
int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
|
||||||
void *buf, int len, int write);
|
void *buf, int len, int write);
|
||||||
|
|
||||||
static inline void unmap_shared_mapping_range(struct address_space *mapping,
|
|
||||||
loff_t const holebegin, loff_t const holelen)
|
|
||||||
{
|
|
||||||
unmap_mapping_range(mapping, holebegin, holelen, 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
extern void truncate_pagecache(struct inode *inode, loff_t new);
|
extern void truncate_pagecache(struct inode *inode, loff_t new);
|
||||||
extern void truncate_setsize(struct inode *inode, loff_t newsize);
|
extern void truncate_setsize(struct inode *inode, loff_t newsize);
|
||||||
void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
|
void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
|
||||||
|
@ -1344,6 +1336,10 @@ extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
|
||||||
extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
|
extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
|
||||||
unsigned long address, unsigned int fault_flags,
|
unsigned long address, unsigned int fault_flags,
|
||||||
bool *unlocked);
|
bool *unlocked);
|
||||||
|
void unmap_mapping_pages(struct address_space *mapping,
|
||||||
|
pgoff_t start, pgoff_t nr, bool even_cows);
|
||||||
|
void unmap_mapping_range(struct address_space *mapping,
|
||||||
|
loff_t const holebegin, loff_t const holelen, int even_cows);
|
||||||
#else
|
#else
|
||||||
static inline int handle_mm_fault(struct vm_area_struct *vma,
|
static inline int handle_mm_fault(struct vm_area_struct *vma,
|
||||||
unsigned long address, unsigned int flags)
|
unsigned long address, unsigned int flags)
|
||||||
|
@ -1360,10 +1356,20 @@ static inline int fixup_user_fault(struct task_struct *tsk,
|
||||||
BUG();
|
BUG();
|
||||||
return -EFAULT;
|
return -EFAULT;
|
||||||
}
|
}
|
||||||
|
static inline void unmap_mapping_pages(struct address_space *mapping,
|
||||||
|
pgoff_t start, pgoff_t nr, bool even_cows) { }
|
||||||
|
static inline void unmap_mapping_range(struct address_space *mapping,
|
||||||
|
loff_t const holebegin, loff_t const holelen, int even_cows) { }
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
|
static inline void unmap_shared_mapping_range(struct address_space *mapping,
|
||||||
unsigned int gup_flags);
|
loff_t const holebegin, loff_t const holelen)
|
||||||
|
{
|
||||||
|
unmap_mapping_range(mapping, holebegin, holelen, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
|
||||||
|
void *buf, int len, unsigned int gup_flags);
|
||||||
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
|
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
|
||||||
void *buf, int len, unsigned int gup_flags);
|
void *buf, int len, unsigned int gup_flags);
|
||||||
extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
|
extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
|
||||||
|
|
|
@ -1399,8 +1399,7 @@ static void collapse_shmem(struct mm_struct *mm,
|
||||||
}
|
}
|
||||||
|
|
||||||
if (page_mapped(page))
|
if (page_mapped(page))
|
||||||
unmap_mapping_range(mapping, index << PAGE_SHIFT,
|
unmap_mapping_pages(mapping, index, 1, false);
|
||||||
PAGE_SIZE, 0);
|
|
||||||
|
|
||||||
spin_lock_irq(&mapping->tree_lock);
|
spin_lock_irq(&mapping->tree_lock);
|
||||||
|
|
||||||
|
|
43
mm/memory.c
43
mm/memory.c
|
@ -2798,9 +2798,38 @@ static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* unmap_mapping_pages() - Unmap pages from processes.
|
||||||
|
* @mapping: The address space containing pages to be unmapped.
|
||||||
|
* @start: Index of first page to be unmapped.
|
||||||
|
* @nr: Number of pages to be unmapped. 0 to unmap to end of file.
|
||||||
|
* @even_cows: Whether to unmap even private COWed pages.
|
||||||
|
*
|
||||||
|
* Unmap the pages in this address space from any userspace process which
|
||||||
|
* has them mmaped. Generally, you want to remove COWed pages as well when
|
||||||
|
* a file is being truncated, but not when invalidating pages from the page
|
||||||
|
* cache.
|
||||||
|
*/
|
||||||
|
void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
|
||||||
|
pgoff_t nr, bool even_cows)
|
||||||
|
{
|
||||||
|
struct zap_details details = { };
|
||||||
|
|
||||||
|
details.check_mapping = even_cows ? NULL : mapping;
|
||||||
|
details.first_index = start;
|
||||||
|
details.last_index = start + nr - 1;
|
||||||
|
if (details.last_index < details.first_index)
|
||||||
|
details.last_index = ULONG_MAX;
|
||||||
|
|
||||||
|
i_mmap_lock_write(mapping);
|
||||||
|
if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
|
||||||
|
unmap_mapping_range_tree(&mapping->i_mmap, &details);
|
||||||
|
i_mmap_unlock_write(mapping);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* unmap_mapping_range - unmap the portion of all mmaps in the specified
|
* unmap_mapping_range - unmap the portion of all mmaps in the specified
|
||||||
* address_space corresponding to the specified page range in the underlying
|
* address_space corresponding to the specified byte range in the underlying
|
||||||
* file.
|
* file.
|
||||||
*
|
*
|
||||||
* @mapping: the address space containing mmaps to be unmapped.
|
* @mapping: the address space containing mmaps to be unmapped.
|
||||||
|
@ -2818,7 +2847,6 @@ static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
|
||||||
void unmap_mapping_range(struct address_space *mapping,
|
void unmap_mapping_range(struct address_space *mapping,
|
||||||
loff_t const holebegin, loff_t const holelen, int even_cows)
|
loff_t const holebegin, loff_t const holelen, int even_cows)
|
||||||
{
|
{
|
||||||
struct zap_details details = { };
|
|
||||||
pgoff_t hba = holebegin >> PAGE_SHIFT;
|
pgoff_t hba = holebegin >> PAGE_SHIFT;
|
||||||
pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
pgoff_t hlen = (holelen + PAGE_SIZE - 1) >> PAGE_SHIFT;
|
||||||
|
|
||||||
|
@ -2830,16 +2858,7 @@ void unmap_mapping_range(struct address_space *mapping,
|
||||||
hlen = ULONG_MAX - hba + 1;
|
hlen = ULONG_MAX - hba + 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
details.check_mapping = even_cows ? NULL : mapping;
|
unmap_mapping_pages(mapping, hba, hlen, even_cows);
|
||||||
details.first_index = hba;
|
|
||||||
details.last_index = hba + hlen - 1;
|
|
||||||
if (details.last_index < details.first_index)
|
|
||||||
details.last_index = ULONG_MAX;
|
|
||||||
|
|
||||||
i_mmap_lock_write(mapping);
|
|
||||||
if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root)))
|
|
||||||
unmap_mapping_range_tree(&mapping->i_mmap, &details);
|
|
||||||
i_mmap_unlock_write(mapping);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(unmap_mapping_range);
|
EXPORT_SYMBOL(unmap_mapping_range);
|
||||||
|
|
||||||
|
|
|
@ -1788,13 +1788,6 @@ unsigned long arch_get_unmapped_area(struct file *file, unsigned long addr,
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
void unmap_mapping_range(struct address_space *mapping,
|
|
||||||
loff_t const holebegin, loff_t const holelen,
|
|
||||||
int even_cows)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(unmap_mapping_range);
|
|
||||||
|
|
||||||
int filemap_fault(struct vm_fault *vmf)
|
int filemap_fault(struct vm_fault *vmf)
|
||||||
{
|
{
|
||||||
BUG();
|
BUG();
|
||||||
|
|
|
@ -179,12 +179,8 @@ static void
|
||||||
truncate_cleanup_page(struct address_space *mapping, struct page *page)
|
truncate_cleanup_page(struct address_space *mapping, struct page *page)
|
||||||
{
|
{
|
||||||
if (page_mapped(page)) {
|
if (page_mapped(page)) {
|
||||||
loff_t holelen;
|
pgoff_t nr = PageTransHuge(page) ? HPAGE_PMD_NR : 1;
|
||||||
|
unmap_mapping_pages(mapping, page->index, nr, false);
|
||||||
holelen = PageTransHuge(page) ? HPAGE_PMD_SIZE : PAGE_SIZE;
|
|
||||||
unmap_mapping_range(mapping,
|
|
||||||
(loff_t)page->index << PAGE_SHIFT,
|
|
||||||
holelen, 0);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (page_has_private(page))
|
if (page_has_private(page))
|
||||||
|
@ -715,19 +711,15 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
|
||||||
/*
|
/*
|
||||||
* Zap the rest of the file in one hit.
|
* Zap the rest of the file in one hit.
|
||||||
*/
|
*/
|
||||||
unmap_mapping_range(mapping,
|
unmap_mapping_pages(mapping, index,
|
||||||
(loff_t)index << PAGE_SHIFT,
|
(1 + end - index), false);
|
||||||
(loff_t)(1 + end - index)
|
|
||||||
<< PAGE_SHIFT,
|
|
||||||
0);
|
|
||||||
did_range_unmap = 1;
|
did_range_unmap = 1;
|
||||||
} else {
|
} else {
|
||||||
/*
|
/*
|
||||||
* Just zap this page
|
* Just zap this page
|
||||||
*/
|
*/
|
||||||
unmap_mapping_range(mapping,
|
unmap_mapping_pages(mapping, index,
|
||||||
(loff_t)index << PAGE_SHIFT,
|
1, false);
|
||||||
PAGE_SIZE, 0);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
BUG_ON(page_mapped(page));
|
BUG_ON(page_mapped(page));
|
||||||
|
@ -753,8 +745,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping,
|
||||||
* get remapped later.
|
* get remapped later.
|
||||||
*/
|
*/
|
||||||
if (dax_mapping(mapping)) {
|
if (dax_mapping(mapping)) {
|
||||||
unmap_mapping_range(mapping, (loff_t)start << PAGE_SHIFT,
|
unmap_mapping_pages(mapping, start, end - start + 1, false);
|
||||||
(loff_t)(end - start + 1) << PAGE_SHIFT, 0);
|
|
||||||
}
|
}
|
||||||
out:
|
out:
|
||||||
cleancache_invalidate_inode(mapping);
|
cleancache_invalidate_inode(mapping);
|
||||||
|
|
Loading…
Add table
Reference in a new issue