root/arch/arm64/kernel/module.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. module_alloc
  2. do_reloc
  3. reloc_data
  4. reloc_insn_movw
  5. reloc_insn_imm
  6. reloc_insn_adrp
  7. apply_relocate_add
  8. module_finalize

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * AArch64 loadable module support.
   4  *
   5  * Copyright (C) 2012 ARM Limited
   6  *
   7  * Author: Will Deacon <will.deacon@arm.com>
   8  */
   9 
  10 #include <linux/bitops.h>
  11 #include <linux/elf.h>
  12 #include <linux/gfp.h>
  13 #include <linux/kasan.h>
  14 #include <linux/kernel.h>
  15 #include <linux/mm.h>
  16 #include <linux/moduleloader.h>
  17 #include <linux/vmalloc.h>
  18 #include <asm/alternative.h>
  19 #include <asm/insn.h>
  20 #include <asm/sections.h>
  21 
  22 void *module_alloc(unsigned long size)
  23 {
  24         u64 module_alloc_end = module_alloc_base + MODULES_VSIZE;
  25         gfp_t gfp_mask = GFP_KERNEL;
  26         void *p;
  27 
  28         /* Silence the initial allocation */
  29         if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS))
  30                 gfp_mask |= __GFP_NOWARN;
  31 
  32         if (IS_ENABLED(CONFIG_KASAN))
  33                 /* don't exceed the static module region - see below */
  34                 module_alloc_end = MODULES_END;
  35 
  36         p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
  37                                 module_alloc_end, gfp_mask, PAGE_KERNEL, 0,
  38                                 NUMA_NO_NODE, __builtin_return_address(0));
  39 
  40         if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
  41             !IS_ENABLED(CONFIG_KASAN))
  42                 /*
  43                  * KASAN can only deal with module allocations being served
  44                  * from the reserved module region, since the remainder of
  45                  * the vmalloc region is already backed by zero shadow pages,
  46                  * and punching holes into it is non-trivial. Since the module
  47                  * region is not randomized when KASAN is enabled, it is even
  48                  * less likely that the module region gets exhausted, so we
  49                  * can simply omit this fallback in that case.
  50                  */
  51                 p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
  52                                 module_alloc_base + SZ_2G, GFP_KERNEL,
  53                                 PAGE_KERNEL, 0, NUMA_NO_NODE,
  54                                 __builtin_return_address(0));
  55 
  56         if (p && (kasan_module_alloc(p, size) < 0)) {
  57                 vfree(p);
  58                 return NULL;
  59         }
  60 
  61         return p;
  62 }
  63 
  64 enum aarch64_reloc_op {
  65         RELOC_OP_NONE,
  66         RELOC_OP_ABS,
  67         RELOC_OP_PREL,
  68         RELOC_OP_PAGE,
  69 };
  70 
  71 static u64 do_reloc(enum aarch64_reloc_op reloc_op, __le32 *place, u64 val)
  72 {
  73         switch (reloc_op) {
  74         case RELOC_OP_ABS:
  75                 return val;
  76         case RELOC_OP_PREL:
  77                 return val - (u64)place;
  78         case RELOC_OP_PAGE:
  79                 return (val & ~0xfff) - ((u64)place & ~0xfff);
  80         case RELOC_OP_NONE:
  81                 return 0;
  82         }
  83 
  84         pr_err("do_reloc: unknown relocation operation %d\n", reloc_op);
  85         return 0;
  86 }
  87 
  88 static int reloc_data(enum aarch64_reloc_op op, void *place, u64 val, int len)
  89 {
  90         s64 sval = do_reloc(op, place, val);
  91 
  92         /*
  93          * The ELF psABI for AArch64 documents the 16-bit and 32-bit place
  94          * relative and absolute relocations as having a range of [-2^15, 2^16)
  95          * or [-2^31, 2^32), respectively. However, in order to be able to
  96          * detect overflows reliably, we have to choose whether we interpret
  97          * such quantities as signed or as unsigned, and stick with it.
  98          * The way we organize our address space requires a signed
  99          * interpretation of 32-bit relative references, so let's use that
 100          * for all R_AARCH64_PRELxx relocations. This means our upper
 101          * bound for overflow detection should be Sxx_MAX rather than Uxx_MAX.
 102          */
 103 
 104         switch (len) {
 105         case 16:
 106                 *(s16 *)place = sval;
 107                 switch (op) {
 108                 case RELOC_OP_ABS:
 109                         if (sval < 0 || sval > U16_MAX)
 110                                 return -ERANGE;
 111                         break;
 112                 case RELOC_OP_PREL:
 113                         if (sval < S16_MIN || sval > S16_MAX)
 114                                 return -ERANGE;
 115                         break;
 116                 default:
 117                         pr_err("Invalid 16-bit data relocation (%d)\n", op);
 118                         return 0;
 119                 }
 120                 break;
 121         case 32:
 122                 *(s32 *)place = sval;
 123                 switch (op) {
 124                 case RELOC_OP_ABS:
 125                         if (sval < 0 || sval > U32_MAX)
 126                                 return -ERANGE;
 127                         break;
 128                 case RELOC_OP_PREL:
 129                         if (sval < S32_MIN || sval > S32_MAX)
 130                                 return -ERANGE;
 131                         break;
 132                 default:
 133                         pr_err("Invalid 32-bit data relocation (%d)\n", op);
 134                         return 0;
 135                 }
 136                 break;
 137         case 64:
 138                 *(s64 *)place = sval;
 139                 break;
 140         default:
 141                 pr_err("Invalid length (%d) for data relocation\n", len);
 142                 return 0;
 143         }
 144         return 0;
 145 }
 146 
 147 enum aarch64_insn_movw_imm_type {
 148         AARCH64_INSN_IMM_MOVNZ,
 149         AARCH64_INSN_IMM_MOVKZ,
 150 };
 151 
 152 static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val,
 153                            int lsb, enum aarch64_insn_movw_imm_type imm_type)
 154 {
 155         u64 imm;
 156         s64 sval;
 157         u32 insn = le32_to_cpu(*place);
 158 
 159         sval = do_reloc(op, place, val);
 160         imm = sval >> lsb;
 161 
 162         if (imm_type == AARCH64_INSN_IMM_MOVNZ) {
 163                 /*
 164                  * For signed MOVW relocations, we have to manipulate the
 165                  * instruction encoding depending on whether or not the
 166                  * immediate is less than zero.
 167                  */
 168                 insn &= ~(3 << 29);
 169                 if (sval >= 0) {
 170                         /* >=0: Set the instruction to MOVZ (opcode 10b). */
 171                         insn |= 2 << 29;
 172                 } else {
 173                         /*
 174                          * <0: Set the instruction to MOVN (opcode 00b).
 175                          *     Since we've masked the opcode already, we
 176                          *     don't need to do anything other than
 177                          *     inverting the new immediate field.
 178                          */
 179                         imm = ~imm;
 180                 }
 181         }
 182 
 183         /* Update the instruction with the new encoding. */
 184         insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
 185         *place = cpu_to_le32(insn);
 186 
 187         if (imm > U16_MAX)
 188                 return -ERANGE;
 189 
 190         return 0;
 191 }
 192 
 193 static int reloc_insn_imm(enum aarch64_reloc_op op, __le32 *place, u64 val,
 194                           int lsb, int len, enum aarch64_insn_imm_type imm_type)
 195 {
 196         u64 imm, imm_mask;
 197         s64 sval;
 198         u32 insn = le32_to_cpu(*place);
 199 
 200         /* Calculate the relocation value. */
 201         sval = do_reloc(op, place, val);
 202         sval >>= lsb;
 203 
 204         /* Extract the value bits and shift them to bit 0. */
 205         imm_mask = (BIT(lsb + len) - 1) >> lsb;
 206         imm = sval & imm_mask;
 207 
 208         /* Update the instruction's immediate field. */
 209         insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
 210         *place = cpu_to_le32(insn);
 211 
 212         /*
 213          * Extract the upper value bits (including the sign bit) and
 214          * shift them to bit 0.
 215          */
 216         sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1);
 217 
 218         /*
 219          * Overflow has occurred if the upper bits are not all equal to
 220          * the sign bit of the value.
 221          */
 222         if ((u64)(sval + 1) >= 2)
 223                 return -ERANGE;
 224 
 225         return 0;
 226 }
 227 
 228 static int reloc_insn_adrp(struct module *mod, Elf64_Shdr *sechdrs,
 229                            __le32 *place, u64 val)
 230 {
 231         u32 insn;
 232 
 233         if (!is_forbidden_offset_for_adrp(place))
 234                 return reloc_insn_imm(RELOC_OP_PAGE, place, val, 12, 21,
 235                                       AARCH64_INSN_IMM_ADR);
 236 
 237         /* patch ADRP to ADR if it is in range */
 238         if (!reloc_insn_imm(RELOC_OP_PREL, place, val & ~0xfff, 0, 21,
 239                             AARCH64_INSN_IMM_ADR)) {
 240                 insn = le32_to_cpu(*place);
 241                 insn &= ~BIT(31);
 242         } else {
 243                 /* out of range for ADR -> emit a veneer */
 244                 val = module_emit_veneer_for_adrp(mod, sechdrs, place, val & ~0xfff);
 245                 if (!val)
 246                         return -ENOEXEC;
 247                 insn = aarch64_insn_gen_branch_imm((u64)place, val,
 248                                                    AARCH64_INSN_BRANCH_NOLINK);
 249         }
 250 
 251         *place = cpu_to_le32(insn);
 252         return 0;
 253 }
 254 
 255 int apply_relocate_add(Elf64_Shdr *sechdrs,
 256                        const char *strtab,
 257                        unsigned int symindex,
 258                        unsigned int relsec,
 259                        struct module *me)
 260 {
 261         unsigned int i;
 262         int ovf;
 263         bool overflow_check;
 264         Elf64_Sym *sym;
 265         void *loc;
 266         u64 val;
 267         Elf64_Rela *rel = (void *)sechdrs[relsec].sh_addr;
 268 
 269         for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
 270                 /* loc corresponds to P in the AArch64 ELF document. */
 271                 loc = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
 272                         + rel[i].r_offset;
 273 
 274                 /* sym is the ELF symbol we're referring to. */
 275                 sym = (Elf64_Sym *)sechdrs[symindex].sh_addr
 276                         + ELF64_R_SYM(rel[i].r_info);
 277 
 278                 /* val corresponds to (S + A) in the AArch64 ELF document. */
 279                 val = sym->st_value + rel[i].r_addend;
 280 
 281                 /* Check for overflow by default. */
 282                 overflow_check = true;
 283 
 284                 /* Perform the static relocation. */
 285                 switch (ELF64_R_TYPE(rel[i].r_info)) {
 286                 /* Null relocations. */
 287                 case R_ARM_NONE:
 288                 case R_AARCH64_NONE:
 289                         ovf = 0;
 290                         break;
 291 
 292                 /* Data relocations. */
 293                 case R_AARCH64_ABS64:
 294                         overflow_check = false;
 295                         ovf = reloc_data(RELOC_OP_ABS, loc, val, 64);
 296                         break;
 297                 case R_AARCH64_ABS32:
 298                         ovf = reloc_data(RELOC_OP_ABS, loc, val, 32);
 299                         break;
 300                 case R_AARCH64_ABS16:
 301                         ovf = reloc_data(RELOC_OP_ABS, loc, val, 16);
 302                         break;
 303                 case R_AARCH64_PREL64:
 304                         overflow_check = false;
 305                         ovf = reloc_data(RELOC_OP_PREL, loc, val, 64);
 306                         break;
 307                 case R_AARCH64_PREL32:
 308                         ovf = reloc_data(RELOC_OP_PREL, loc, val, 32);
 309                         break;
 310                 case R_AARCH64_PREL16:
 311                         ovf = reloc_data(RELOC_OP_PREL, loc, val, 16);
 312                         break;
 313 
 314                 /* MOVW instruction relocations. */
 315                 case R_AARCH64_MOVW_UABS_G0_NC:
 316                         overflow_check = false;
 317                         /* Fall through */
 318                 case R_AARCH64_MOVW_UABS_G0:
 319                         ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
 320                                               AARCH64_INSN_IMM_MOVKZ);
 321                         break;
 322                 case R_AARCH64_MOVW_UABS_G1_NC:
 323                         overflow_check = false;
 324                         /* Fall through */
 325                 case R_AARCH64_MOVW_UABS_G1:
 326                         ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
 327                                               AARCH64_INSN_IMM_MOVKZ);
 328                         break;
 329                 case R_AARCH64_MOVW_UABS_G2_NC:
 330                         overflow_check = false;
 331                         /* Fall through */
 332                 case R_AARCH64_MOVW_UABS_G2:
 333                         ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
 334                                               AARCH64_INSN_IMM_MOVKZ);
 335                         break;
 336                 case R_AARCH64_MOVW_UABS_G3:
 337                         /* We're using the top bits so we can't overflow. */
 338                         overflow_check = false;
 339                         ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 48,
 340                                               AARCH64_INSN_IMM_MOVKZ);
 341                         break;
 342                 case R_AARCH64_MOVW_SABS_G0:
 343                         ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 0,
 344                                               AARCH64_INSN_IMM_MOVNZ);
 345                         break;
 346                 case R_AARCH64_MOVW_SABS_G1:
 347                         ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 16,
 348                                               AARCH64_INSN_IMM_MOVNZ);
 349                         break;
 350                 case R_AARCH64_MOVW_SABS_G2:
 351                         ovf = reloc_insn_movw(RELOC_OP_ABS, loc, val, 32,
 352                                               AARCH64_INSN_IMM_MOVNZ);
 353                         break;
 354                 case R_AARCH64_MOVW_PREL_G0_NC:
 355                         overflow_check = false;
 356                         ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
 357                                               AARCH64_INSN_IMM_MOVKZ);
 358                         break;
 359                 case R_AARCH64_MOVW_PREL_G0:
 360                         ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 0,
 361                                               AARCH64_INSN_IMM_MOVNZ);
 362                         break;
 363                 case R_AARCH64_MOVW_PREL_G1_NC:
 364                         overflow_check = false;
 365                         ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
 366                                               AARCH64_INSN_IMM_MOVKZ);
 367                         break;
 368                 case R_AARCH64_MOVW_PREL_G1:
 369                         ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 16,
 370                                               AARCH64_INSN_IMM_MOVNZ);
 371                         break;
 372                 case R_AARCH64_MOVW_PREL_G2_NC:
 373                         overflow_check = false;
 374                         ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
 375                                               AARCH64_INSN_IMM_MOVKZ);
 376                         break;
 377                 case R_AARCH64_MOVW_PREL_G2:
 378                         ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 32,
 379                                               AARCH64_INSN_IMM_MOVNZ);
 380                         break;
 381                 case R_AARCH64_MOVW_PREL_G3:
 382                         /* We're using the top bits so we can't overflow. */
 383                         overflow_check = false;
 384                         ovf = reloc_insn_movw(RELOC_OP_PREL, loc, val, 48,
 385                                               AARCH64_INSN_IMM_MOVNZ);
 386                         break;
 387 
 388                 /* Immediate instruction relocations. */
 389                 case R_AARCH64_LD_PREL_LO19:
 390                         ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
 391                                              AARCH64_INSN_IMM_19);
 392                         break;
 393                 case R_AARCH64_ADR_PREL_LO21:
 394                         ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 0, 21,
 395                                              AARCH64_INSN_IMM_ADR);
 396                         break;
 397                 case R_AARCH64_ADR_PREL_PG_HI21_NC:
 398                         overflow_check = false;
 399                         /* Fall through */
 400                 case R_AARCH64_ADR_PREL_PG_HI21:
 401                         ovf = reloc_insn_adrp(me, sechdrs, loc, val);
 402                         if (ovf && ovf != -ERANGE)
 403                                 return ovf;
 404                         break;
 405                 case R_AARCH64_ADD_ABS_LO12_NC:
 406                 case R_AARCH64_LDST8_ABS_LO12_NC:
 407                         overflow_check = false;
 408                         ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 0, 12,
 409                                              AARCH64_INSN_IMM_12);
 410                         break;
 411                 case R_AARCH64_LDST16_ABS_LO12_NC:
 412                         overflow_check = false;
 413                         ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 1, 11,
 414                                              AARCH64_INSN_IMM_12);
 415                         break;
 416                 case R_AARCH64_LDST32_ABS_LO12_NC:
 417                         overflow_check = false;
 418                         ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 2, 10,
 419                                              AARCH64_INSN_IMM_12);
 420                         break;
 421                 case R_AARCH64_LDST64_ABS_LO12_NC:
 422                         overflow_check = false;
 423                         ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 3, 9,
 424                                              AARCH64_INSN_IMM_12);
 425                         break;
 426                 case R_AARCH64_LDST128_ABS_LO12_NC:
 427                         overflow_check = false;
 428                         ovf = reloc_insn_imm(RELOC_OP_ABS, loc, val, 4, 8,
 429                                              AARCH64_INSN_IMM_12);
 430                         break;
 431                 case R_AARCH64_TSTBR14:
 432                         ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 14,
 433                                              AARCH64_INSN_IMM_14);
 434                         break;
 435                 case R_AARCH64_CONDBR19:
 436                         ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 19,
 437                                              AARCH64_INSN_IMM_19);
 438                         break;
 439                 case R_AARCH64_JUMP26:
 440                 case R_AARCH64_CALL26:
 441                         ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2, 26,
 442                                              AARCH64_INSN_IMM_26);
 443 
 444                         if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
 445                             ovf == -ERANGE) {
 446                                 val = module_emit_plt_entry(me, sechdrs, loc, &rel[i], sym);
 447                                 if (!val)
 448                                         return -ENOEXEC;
 449                                 ovf = reloc_insn_imm(RELOC_OP_PREL, loc, val, 2,
 450                                                      26, AARCH64_INSN_IMM_26);
 451                         }
 452                         break;
 453 
 454                 default:
 455                         pr_err("module %s: unsupported RELA relocation: %llu\n",
 456                                me->name, ELF64_R_TYPE(rel[i].r_info));
 457                         return -ENOEXEC;
 458                 }
 459 
 460                 if (overflow_check && ovf == -ERANGE)
 461                         goto overflow;
 462 
 463         }
 464 
 465         return 0;
 466 
 467 overflow:
 468         pr_err("module %s: overflow in relocation type %d val %Lx\n",
 469                me->name, (int)ELF64_R_TYPE(rel[i].r_info), val);
 470         return -ENOEXEC;
 471 }
 472 
 473 int module_finalize(const Elf_Ehdr *hdr,
 474                     const Elf_Shdr *sechdrs,
 475                     struct module *me)
 476 {
 477         const Elf_Shdr *s, *se;
 478         const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
 479 
 480         for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
 481                 if (strcmp(".altinstructions", secstrs + s->sh_name) == 0)
 482                         apply_alternatives_module((void *)s->sh_addr, s->sh_size);
 483 #ifdef CONFIG_ARM64_MODULE_PLTS
 484                 if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
 485                     !strcmp(".text.ftrace_trampoline", secstrs + s->sh_name))
 486                         me->arch.ftrace_trampoline = (void *)s->sh_addr;
 487 #endif
 488         }
 489 
 490         return 0;
 491 }

/* [<][>][^][v][top][bottom][index][help] */