diff --git a/include/linux/mm.h b/include/linux/mm.h index c1f6c95f349631..5dfe009adcb94d 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2141,6 +2141,8 @@ struct vm_unmapped_area_info { extern unsigned long unmapped_area(struct vm_unmapped_area_info *info); extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info); +struct file *map_hugetlb_setup(unsigned long *len, unsigned long flags); + /* * Search for an unmapped address range. * diff --git a/mm/mmap.c b/mm/mmap.c index f19efcf7541878..f24fc14808e13b 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -1490,24 +1490,7 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, if (unlikely(flags & MAP_HUGETLB && !is_file_hugepages(file))) goto out_fput; } else if (flags & MAP_HUGETLB) { - struct user_struct *user = NULL; - struct hstate *hs; - - hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); - if (!hs) - return -EINVAL; - - len = ALIGN(len, huge_page_size(hs)); - /* - * VM_NORESERVE is used because the reservations will be - * taken when vm_ops->mmap() is called - * A dummy user value is used because we are not locking - * memory so no accounting is necessary - */ - file = hugetlb_file_setup(HUGETLB_ANON_FILE, len, - VM_NORESERVE, - &user, HUGETLB_ANONHUGE_INODE, - (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); + file = map_hugetlb_setup(&len, flags); if (IS_ERR(file)) return PTR_ERR(file); } diff --git a/mm/util.c b/mm/util.c index 9ecddf568fe30e..93c253512aaa32 100644 --- a/mm/util.c +++ b/mm/util.c @@ -340,6 +340,29 @@ unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr, return ret; } +struct file *map_hugetlb_setup(unsigned long *len, unsigned long flags) +{ + struct user_struct *user = NULL; + struct hstate *hs; + + hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); + if (!hs) + return ERR_PTR(-EINVAL); + + *len = ALIGN(*len, huge_page_size(hs)); + + /* + * VM_NORESERVE is used because the reservations will be + * taken when vm_ops->mmap() is called + * A dummy user value is used because we are not locking + * memory so no accounting is necessary + */ + return hugetlb_file_setup(HUGETLB_ANON_FILE, *len, + VM_NORESERVE, + &user, HUGETLB_ANONHUGE_INODE, + (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK); +} + unsigned long vm_mmap(struct file *file, unsigned long addr, unsigned long len, unsigned long prot, unsigned long flag, unsigned long offset) @@ -349,6 +372,15 @@ unsigned long vm_mmap(struct file *file, unsigned long addr, if (unlikely(offset_in_page(offset))) return -EINVAL; + if (flag & MAP_HUGETLB) { + if (file) + return -EINVAL; + + file = map_hugetlb_setup(&len, flag); + if (IS_ERR(file)) + return PTR_ERR(file); + } + return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT); } EXPORT_SYMBOL(vm_mmap);