root/arch/arm64/kernel/vdso.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. __vdso_remap
  2. __vdso_init
  3. __setup_additional_pages
  4. aarch32_vdso_mremap
  5. aarch32_alloc_kuser_vdso_page
  6. __aarch32_alloc_vdso_pages
  7. __aarch32_alloc_vdso_pages
  8. aarch32_alloc_vdso_pages
  9. aarch32_kuser_helpers_setup
  10. aarch32_sigreturn_setup
  11. aarch32_setup_additional_pages
  12. vdso_mremap
  13. vdso_init
  14. arch_setup_additional_pages

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * VDSO implementations.
   4  *
   5  * Copyright (C) 2012 ARM Limited
   6  *
   7  * Author: Will Deacon <will.deacon@arm.com>
   8  */
   9 
  10 #include <linux/cache.h>
  11 #include <linux/clocksource.h>
  12 #include <linux/elf.h>
  13 #include <linux/err.h>
  14 #include <linux/errno.h>
  15 #include <linux/gfp.h>
  16 #include <linux/kernel.h>
  17 #include <linux/mm.h>
  18 #include <linux/sched.h>
  19 #include <linux/signal.h>
  20 #include <linux/slab.h>
  21 #include <linux/timekeeper_internal.h>
  22 #include <linux/vmalloc.h>
  23 #include <vdso/datapage.h>
  24 #include <vdso/helpers.h>
  25 #include <vdso/vsyscall.h>
  26 
  27 #include <asm/cacheflush.h>
  28 #include <asm/signal32.h>
  29 #include <asm/vdso.h>
  30 
  31 extern char vdso_start[], vdso_end[];
  32 #ifdef CONFIG_COMPAT_VDSO
  33 extern char vdso32_start[], vdso32_end[];
  34 #endif /* CONFIG_COMPAT_VDSO */
  35 
  36 /* vdso_lookup arch_index */
  37 enum arch_vdso_type {
  38         ARM64_VDSO = 0,
  39 #ifdef CONFIG_COMPAT_VDSO
  40         ARM64_VDSO32 = 1,
  41 #endif /* CONFIG_COMPAT_VDSO */
  42 };
  43 #ifdef CONFIG_COMPAT_VDSO
  44 #define VDSO_TYPES              (ARM64_VDSO32 + 1)
  45 #else
  46 #define VDSO_TYPES              (ARM64_VDSO + 1)
  47 #endif /* CONFIG_COMPAT_VDSO */
  48 
  49 struct __vdso_abi {
  50         const char *name;
  51         const char *vdso_code_start;
  52         const char *vdso_code_end;
  53         unsigned long vdso_pages;
  54         /* Data Mapping */
  55         struct vm_special_mapping *dm;
  56         /* Code Mapping */
  57         struct vm_special_mapping *cm;
  58 };
  59 
  60 static struct __vdso_abi vdso_lookup[VDSO_TYPES] __ro_after_init = {
  61         {
  62                 .name = "vdso",
  63                 .vdso_code_start = vdso_start,
  64                 .vdso_code_end = vdso_end,
  65         },
  66 #ifdef CONFIG_COMPAT_VDSO
  67         {
  68                 .name = "vdso32",
  69                 .vdso_code_start = vdso32_start,
  70                 .vdso_code_end = vdso32_end,
  71         },
  72 #endif /* CONFIG_COMPAT_VDSO */
  73 };
  74 
  75 /*
  76  * The vDSO data page.
  77  */
  78 static union {
  79         struct vdso_data        data[CS_BASES];
  80         u8                      page[PAGE_SIZE];
  81 } vdso_data_store __page_aligned_data;
  82 struct vdso_data *vdso_data = vdso_data_store.data;
  83 
  84 static int __vdso_remap(enum arch_vdso_type arch_index,
  85                         const struct vm_special_mapping *sm,
  86                         struct vm_area_struct *new_vma)
  87 {
  88         unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
  89         unsigned long vdso_size = vdso_lookup[arch_index].vdso_code_end -
  90                                   vdso_lookup[arch_index].vdso_code_start;
  91 
  92         if (vdso_size != new_size)
  93                 return -EINVAL;
  94 
  95         current->mm->context.vdso = (void *)new_vma->vm_start;
  96 
  97         return 0;
  98 }
  99 
 100 static int __vdso_init(enum arch_vdso_type arch_index)
 101 {
 102         int i;
 103         struct page **vdso_pagelist;
 104         unsigned long pfn;
 105 
 106         if (memcmp(vdso_lookup[arch_index].vdso_code_start, "\177ELF", 4)) {
 107                 pr_err("vDSO is not a valid ELF object!\n");
 108                 return -EINVAL;
 109         }
 110 
 111         vdso_lookup[arch_index].vdso_pages = (
 112                         vdso_lookup[arch_index].vdso_code_end -
 113                         vdso_lookup[arch_index].vdso_code_start) >>
 114                         PAGE_SHIFT;
 115 
 116         /* Allocate the vDSO pagelist, plus a page for the data. */
 117         vdso_pagelist = kcalloc(vdso_lookup[arch_index].vdso_pages + 1,
 118                                 sizeof(struct page *),
 119                                 GFP_KERNEL);
 120         if (vdso_pagelist == NULL)
 121                 return -ENOMEM;
 122 
 123         /* Grab the vDSO data page. */
 124         vdso_pagelist[0] = phys_to_page(__pa_symbol(vdso_data));
 125 
 126 
 127         /* Grab the vDSO code pages. */
 128         pfn = sym_to_pfn(vdso_lookup[arch_index].vdso_code_start);
 129 
 130         for (i = 0; i < vdso_lookup[arch_index].vdso_pages; i++)
 131                 vdso_pagelist[i + 1] = pfn_to_page(pfn + i);
 132 
 133         vdso_lookup[arch_index].dm->pages = &vdso_pagelist[0];
 134         vdso_lookup[arch_index].cm->pages = &vdso_pagelist[1];
 135 
 136         return 0;
 137 }
 138 
 139 static int __setup_additional_pages(enum arch_vdso_type arch_index,
 140                                     struct mm_struct *mm,
 141                                     struct linux_binprm *bprm,
 142                                     int uses_interp)
 143 {
 144         unsigned long vdso_base, vdso_text_len, vdso_mapping_len;
 145         void *ret;
 146 
 147         vdso_text_len = vdso_lookup[arch_index].vdso_pages << PAGE_SHIFT;
 148         /* Be sure to map the data page */
 149         vdso_mapping_len = vdso_text_len + PAGE_SIZE;
 150 
 151         vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
 152         if (IS_ERR_VALUE(vdso_base)) {
 153                 ret = ERR_PTR(vdso_base);
 154                 goto up_fail;
 155         }
 156 
 157         ret = _install_special_mapping(mm, vdso_base, PAGE_SIZE,
 158                                        VM_READ|VM_MAYREAD,
 159                                        vdso_lookup[arch_index].dm);
 160         if (IS_ERR(ret))
 161                 goto up_fail;
 162 
 163         vdso_base += PAGE_SIZE;
 164         mm->context.vdso = (void *)vdso_base;
 165         ret = _install_special_mapping(mm, vdso_base, vdso_text_len,
 166                                        VM_READ|VM_EXEC|
 167                                        VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
 168                                        vdso_lookup[arch_index].cm);
 169         if (IS_ERR(ret))
 170                 goto up_fail;
 171 
 172         return 0;
 173 
 174 up_fail:
 175         mm->context.vdso = NULL;
 176         return PTR_ERR(ret);
 177 }
 178 
 179 #ifdef CONFIG_COMPAT
 180 /*
 181  * Create and map the vectors page for AArch32 tasks.
 182  */
 183 #ifdef CONFIG_COMPAT_VDSO
 184 static int aarch32_vdso_mremap(const struct vm_special_mapping *sm,
 185                 struct vm_area_struct *new_vma)
 186 {
 187         return __vdso_remap(ARM64_VDSO32, sm, new_vma);
 188 }
 189 #endif /* CONFIG_COMPAT_VDSO */
 190 
 191 /*
 192  * aarch32_vdso_pages:
 193  * 0 - kuser helpers
 194  * 1 - sigreturn code
 195  * or (CONFIG_COMPAT_VDSO):
 196  * 0 - kuser helpers
 197  * 1 - vdso data
 198  * 2 - vdso code
 199  */
 200 #define C_VECTORS       0
 201 #ifdef CONFIG_COMPAT_VDSO
 202 #define C_VVAR          1
 203 #define C_VDSO          2
 204 #define C_PAGES         (C_VDSO + 1)
 205 #else
 206 #define C_SIGPAGE       1
 207 #define C_PAGES         (C_SIGPAGE + 1)
 208 #endif /* CONFIG_COMPAT_VDSO */
 209 static struct page *aarch32_vdso_pages[C_PAGES] __ro_after_init;
 210 static struct vm_special_mapping aarch32_vdso_spec[C_PAGES] = {
 211         {
 212                 .name   = "[vectors]", /* ABI */
 213                 .pages  = &aarch32_vdso_pages[C_VECTORS],
 214         },
 215 #ifdef CONFIG_COMPAT_VDSO
 216         {
 217                 .name = "[vvar]",
 218         },
 219         {
 220                 .name = "[vdso]",
 221                 .mremap = aarch32_vdso_mremap,
 222         },
 223 #else
 224         {
 225                 .name   = "[sigpage]", /* ABI */
 226                 .pages  = &aarch32_vdso_pages[C_SIGPAGE],
 227         },
 228 #endif /* CONFIG_COMPAT_VDSO */
 229 };
 230 
 231 static int aarch32_alloc_kuser_vdso_page(void)
 232 {
 233         extern char __kuser_helper_start[], __kuser_helper_end[];
 234         int kuser_sz = __kuser_helper_end - __kuser_helper_start;
 235         unsigned long vdso_page;
 236 
 237         if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
 238                 return 0;
 239 
 240         vdso_page = get_zeroed_page(GFP_ATOMIC);
 241         if (!vdso_page)
 242                 return -ENOMEM;
 243 
 244         memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start,
 245                kuser_sz);
 246         aarch32_vdso_pages[C_VECTORS] = virt_to_page(vdso_page);
 247         flush_dcache_page(aarch32_vdso_pages[C_VECTORS]);
 248         return 0;
 249 }
 250 
 251 #ifdef CONFIG_COMPAT_VDSO
 252 static int __aarch32_alloc_vdso_pages(void)
 253 {
 254         int ret;
 255 
 256         vdso_lookup[ARM64_VDSO32].dm = &aarch32_vdso_spec[C_VVAR];
 257         vdso_lookup[ARM64_VDSO32].cm = &aarch32_vdso_spec[C_VDSO];
 258 
 259         ret = __vdso_init(ARM64_VDSO32);
 260         if (ret)
 261                 return ret;
 262 
 263         return aarch32_alloc_kuser_vdso_page();
 264 }
 265 #else
 266 static int __aarch32_alloc_vdso_pages(void)
 267 {
 268         extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
 269         int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
 270         unsigned long sigpage;
 271         int ret;
 272 
 273         sigpage = get_zeroed_page(GFP_ATOMIC);
 274         if (!sigpage)
 275                 return -ENOMEM;
 276 
 277         memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz);
 278         aarch32_vdso_pages[C_SIGPAGE] = virt_to_page(sigpage);
 279         flush_dcache_page(aarch32_vdso_pages[C_SIGPAGE]);
 280 
 281         ret = aarch32_alloc_kuser_vdso_page();
 282         if (ret)
 283                 free_page(sigpage);
 284 
 285         return ret;
 286 }
 287 #endif /* CONFIG_COMPAT_VDSO */
 288 
 289 static int __init aarch32_alloc_vdso_pages(void)
 290 {
 291         return __aarch32_alloc_vdso_pages();
 292 }
 293 arch_initcall(aarch32_alloc_vdso_pages);
 294 
 295 static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
 296 {
 297         void *ret;
 298 
 299         if (!IS_ENABLED(CONFIG_KUSER_HELPERS))
 300                 return 0;
 301 
 302         /*
 303          * Avoid VM_MAYWRITE for compatibility with arch/arm/, where it's
 304          * not safe to CoW the page containing the CPU exception vectors.
 305          */
 306         ret = _install_special_mapping(mm, AARCH32_VECTORS_BASE, PAGE_SIZE,
 307                                        VM_READ | VM_EXEC |
 308                                        VM_MAYREAD | VM_MAYEXEC,
 309                                        &aarch32_vdso_spec[C_VECTORS]);
 310 
 311         return PTR_ERR_OR_ZERO(ret);
 312 }
 313 
 314 #ifndef CONFIG_COMPAT_VDSO
 315 static int aarch32_sigreturn_setup(struct mm_struct *mm)
 316 {
 317         unsigned long addr;
 318         void *ret;
 319 
 320         addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
 321         if (IS_ERR_VALUE(addr)) {
 322                 ret = ERR_PTR(addr);
 323                 goto out;
 324         }
 325 
 326         /*
 327          * VM_MAYWRITE is required to allow gdb to Copy-on-Write and
 328          * set breakpoints.
 329          */
 330         ret = _install_special_mapping(mm, addr, PAGE_SIZE,
 331                                        VM_READ | VM_EXEC | VM_MAYREAD |
 332                                        VM_MAYWRITE | VM_MAYEXEC,
 333                                        &aarch32_vdso_spec[C_SIGPAGE]);
 334         if (IS_ERR(ret))
 335                 goto out;
 336 
 337         mm->context.vdso = (void *)addr;
 338 
 339 out:
 340         return PTR_ERR_OR_ZERO(ret);
 341 }
 342 #endif /* !CONFIG_COMPAT_VDSO */
 343 
 344 int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
 345 {
 346         struct mm_struct *mm = current->mm;
 347         int ret;
 348 
 349         if (down_write_killable(&mm->mmap_sem))
 350                 return -EINTR;
 351 
 352         ret = aarch32_kuser_helpers_setup(mm);
 353         if (ret)
 354                 goto out;
 355 
 356 #ifdef CONFIG_COMPAT_VDSO
 357         ret = __setup_additional_pages(ARM64_VDSO32,
 358                                        mm,
 359                                        bprm,
 360                                        uses_interp);
 361 #else
 362         ret = aarch32_sigreturn_setup(mm);
 363 #endif /* CONFIG_COMPAT_VDSO */
 364 
 365 out:
 366         up_write(&mm->mmap_sem);
 367         return ret;
 368 }
 369 #endif /* CONFIG_COMPAT */
 370 
 371 static int vdso_mremap(const struct vm_special_mapping *sm,
 372                 struct vm_area_struct *new_vma)
 373 {
 374         return __vdso_remap(ARM64_VDSO, sm, new_vma);
 375 }
 376 
 377 /*
 378  * aarch64_vdso_pages:
 379  * 0 - vvar
 380  * 1 - vdso
 381  */
 382 #define A_VVAR          0
 383 #define A_VDSO          1
 384 #define A_PAGES         (A_VDSO + 1)
 385 static struct vm_special_mapping vdso_spec[A_PAGES] __ro_after_init = {
 386         {
 387                 .name   = "[vvar]",
 388         },
 389         {
 390                 .name   = "[vdso]",
 391                 .mremap = vdso_mremap,
 392         },
 393 };
 394 
 395 static int __init vdso_init(void)
 396 {
 397         vdso_lookup[ARM64_VDSO].dm = &vdso_spec[A_VVAR];
 398         vdso_lookup[ARM64_VDSO].cm = &vdso_spec[A_VDSO];
 399 
 400         return __vdso_init(ARM64_VDSO);
 401 }
 402 arch_initcall(vdso_init);
 403 
 404 int arch_setup_additional_pages(struct linux_binprm *bprm,
 405                                 int uses_interp)
 406 {
 407         struct mm_struct *mm = current->mm;
 408         int ret;
 409 
 410         if (down_write_killable(&mm->mmap_sem))
 411                 return -EINTR;
 412 
 413         ret = __setup_additional_pages(ARM64_VDSO,
 414                                        mm,
 415                                        bprm,
 416                                        uses_interp);
 417 
 418         up_write(&mm->mmap_sem);
 419 
 420         return ret;
 421 }

/* [<][>][^][v][top][bottom][index][help] */