1/* 2 * xsave/xrstor support. 3 * 4 * Author: Suresh Siddha <suresh.b.siddha@intel.com> 5 */ 6 7#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 8 9#include <linux/bootmem.h> 10#include <linux/compat.h> 11#include <linux/cpu.h> 12#include <asm/i387.h> 13#include <asm/fpu-internal.h> 14#include <asm/sigframe.h> 15#include <asm/tlbflush.h> 16#include <asm/xcr.h> 17 18/* 19 * Supported feature mask by the CPU and the kernel. 20 */ 21u64 pcntxt_mask; 22 23/* 24 * Represents init state for the supported extended state. 25 */ 26struct xsave_struct *init_xstate_buf; 27 28static struct _fpx_sw_bytes fx_sw_reserved, fx_sw_reserved_ia32; 29static unsigned int *xstate_offsets, *xstate_sizes; 30static unsigned int xstate_comp_offsets[sizeof(pcntxt_mask)*8]; 31static unsigned int xstate_features; 32 33/* 34 * If a processor implementation discern that a processor state component is 35 * in its initialized state it may modify the corresponding bit in the 36 * xsave_hdr.xstate_bv as '0', with out modifying the corresponding memory 37 * layout in the case of xsaveopt. While presenting the xstate information to 38 * the user, we always ensure that the memory layout of a feature will be in 39 * the init state if the corresponding header bit is zero. This is to ensure 40 * that the user doesn't see some stale state in the memory layout during 41 * signal handling, debugging etc. 42 */ 43void __sanitize_i387_state(struct task_struct *tsk) 44{ 45 struct i387_fxsave_struct *fx = &tsk->thread.fpu.state->fxsave; 46 int feature_bit = 0x2; 47 u64 xstate_bv; 48 49 if (!fx) 50 return; 51 52 xstate_bv = tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv; 53 54 /* 55 * None of the feature bits are in init state. So nothing else 56 * to do for us, as the memory layout is up to date. 57 */ 58 if ((xstate_bv & pcntxt_mask) == pcntxt_mask) 59 return; 60 61 /* 62 * FP is in init state 63 */ 64 if (!(xstate_bv & XSTATE_FP)) { 65 fx->cwd = 0x37f; 66 fx->swd = 0; 67 fx->twd = 0; 68 fx->fop = 0; 69 fx->rip = 0; 70 fx->rdp = 0; 71 memset(&fx->st_space[0], 0, 128); 72 } 73 74 /* 75 * SSE is in init state 76 */ 77 if (!(xstate_bv & XSTATE_SSE)) 78 memset(&fx->xmm_space[0], 0, 256); 79 80 xstate_bv = (pcntxt_mask & ~xstate_bv) >> 2; 81 82 /* 83 * Update all the other memory layouts for which the corresponding 84 * header bit is in the init state. 85 */ 86 while (xstate_bv) { 87 if (xstate_bv & 0x1) { 88 int offset = xstate_offsets[feature_bit]; 89 int size = xstate_sizes[feature_bit]; 90 91 memcpy(((void *) fx) + offset, 92 ((void *) init_xstate_buf) + offset, 93 size); 94 } 95 96 xstate_bv >>= 1; 97 feature_bit++; 98 } 99} 100 101/* 102 * Check for the presence of extended state information in the 103 * user fpstate pointer in the sigcontext. 104 */ 105static inline int check_for_xstate(struct i387_fxsave_struct __user *buf, 106 void __user *fpstate, 107 struct _fpx_sw_bytes *fx_sw) 108{ 109 int min_xstate_size = sizeof(struct i387_fxsave_struct) + 110 sizeof(struct xsave_hdr_struct); 111 unsigned int magic2; 112 113 if (__copy_from_user(fx_sw, &buf->sw_reserved[0], sizeof(*fx_sw))) 114 return -1; 115 116 /* Check for the first magic field and other error scenarios. */ 117 if (fx_sw->magic1 != FP_XSTATE_MAGIC1 || 118 fx_sw->xstate_size < min_xstate_size || 119 fx_sw->xstate_size > xstate_size || 120 fx_sw->xstate_size > fx_sw->extended_size) 121 return -1; 122 123 /* 124 * Check for the presence of second magic word at the end of memory 125 * layout. This detects the case where the user just copied the legacy 126 * fpstate layout with out copying the extended state information 127 * in the memory layout. 128 */ 129 if (__get_user(magic2, (__u32 __user *)(fpstate + fx_sw->xstate_size)) 130 || magic2 != FP_XSTATE_MAGIC2) 131 return -1; 132 133 return 0; 134} 135 136/* 137 * Signal frame handlers. 138 */ 139static inline int save_fsave_header(struct task_struct *tsk, void __user *buf) 140{ 141 if (use_fxsr()) { 142 struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave; 143 struct user_i387_ia32_struct env; 144 struct _fpstate_ia32 __user *fp = buf; 145 146 convert_from_fxsr(&env, tsk); 147 148 if (__copy_to_user(buf, &env, sizeof(env)) || 149 __put_user(xsave->i387.swd, &fp->status) || 150 __put_user(X86_FXSR_MAGIC, &fp->magic)) 151 return -1; 152 } else { 153 struct i387_fsave_struct __user *fp = buf; 154 u32 swd; 155 if (__get_user(swd, &fp->swd) || __put_user(swd, &fp->status)) 156 return -1; 157 } 158 159 return 0; 160} 161 162static inline int save_xstate_epilog(void __user *buf, int ia32_frame) 163{ 164 struct xsave_struct __user *x = buf; 165 struct _fpx_sw_bytes *sw_bytes; 166 u32 xstate_bv; 167 int err; 168 169 /* Setup the bytes not touched by the [f]xsave and reserved for SW. */ 170 sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved; 171 err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes)); 172 173 if (!use_xsave()) 174 return err; 175 176 err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size)); 177 178 /* 179 * Read the xstate_bv which we copied (directly from the cpu or 180 * from the state in task struct) to the user buffers. 181 */ 182 err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv); 183 184 /* 185 * For legacy compatible, we always set FP/SSE bits in the bit 186 * vector while saving the state to the user context. This will 187 * enable us capturing any changes(during sigreturn) to 188 * the FP/SSE bits by the legacy applications which don't touch 189 * xstate_bv in the xsave header. 190 * 191 * xsave aware apps can change the xstate_bv in the xsave 192 * header as well as change any contents in the memory layout. 193 * xrestore as part of sigreturn will capture all the changes. 194 */ 195 xstate_bv |= XSTATE_FPSSE; 196 197 err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv); 198 199 return err; 200} 201 202static inline int save_user_xstate(struct xsave_struct __user *buf) 203{ 204 int err; 205 206 if (use_xsave()) 207 err = xsave_user(buf); 208 else if (use_fxsr()) 209 err = fxsave_user((struct i387_fxsave_struct __user *) buf); 210 else 211 err = fsave_user((struct i387_fsave_struct __user *) buf); 212 213 if (unlikely(err) && __clear_user(buf, xstate_size)) 214 err = -EFAULT; 215 return err; 216} 217 218/* 219 * Save the fpu, extended register state to the user signal frame. 220 * 221 * 'buf_fx' is the 64-byte aligned pointer at which the [f|fx|x]save 222 * state is copied. 223 * 'buf' points to the 'buf_fx' or to the fsave header followed by 'buf_fx'. 224 * 225 * buf == buf_fx for 64-bit frames and 32-bit fsave frame. 226 * buf != buf_fx for 32-bit frames with fxstate. 227 * 228 * If the fpu, extended register state is live, save the state directly 229 * to the user frame pointed by the aligned pointer 'buf_fx'. Otherwise, 230 * copy the thread's fpu state to the user frame starting at 'buf_fx'. 231 * 232 * If this is a 32-bit frame with fxstate, put a fsave header before 233 * the aligned state at 'buf_fx'. 234 * 235 * For [f]xsave state, update the SW reserved fields in the [f]xsave frame 236 * indicating the absence/presence of the extended state to the user. 237 */ 238int save_xstate_sig(void __user *buf, void __user *buf_fx, int size) 239{ 240 struct xsave_struct *xsave = ¤t->thread.fpu.state->xsave; 241 struct task_struct *tsk = current; 242 int ia32_fxstate = (buf != buf_fx); 243 244 ia32_fxstate &= (config_enabled(CONFIG_X86_32) || 245 config_enabled(CONFIG_IA32_EMULATION)); 246 247 if (!access_ok(VERIFY_WRITE, buf, size)) 248 return -EACCES; 249 250 if (!static_cpu_has(X86_FEATURE_FPU)) 251 return fpregs_soft_get(current, NULL, 0, 252 sizeof(struct user_i387_ia32_struct), NULL, 253 (struct _fpstate_ia32 __user *) buf) ? -1 : 1; 254 255 if (user_has_fpu()) { 256 /* Save the live register state to the user directly. */ 257 if (save_user_xstate(buf_fx)) 258 return -1; 259 /* Update the thread's fxstate to save the fsave header. */ 260 if (ia32_fxstate) 261 fpu_fxsave(&tsk->thread.fpu); 262 } else { 263 sanitize_i387_state(tsk); 264 if (__copy_to_user(buf_fx, xsave, xstate_size)) 265 return -1; 266 } 267 268 /* Save the fsave header for the 32-bit frames. */ 269 if ((ia32_fxstate || !use_fxsr()) && save_fsave_header(tsk, buf)) 270 return -1; 271 272 if (use_fxsr() && save_xstate_epilog(buf_fx, ia32_fxstate)) 273 return -1; 274 275 return 0; 276} 277 278static inline void 279sanitize_restored_xstate(struct task_struct *tsk, 280 struct user_i387_ia32_struct *ia32_env, 281 u64 xstate_bv, int fx_only) 282{ 283 struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave; 284 struct xsave_hdr_struct *xsave_hdr = &xsave->xsave_hdr; 285 286 if (use_xsave()) { 287 /* These bits must be zero. */ 288 memset(xsave_hdr->reserved, 0, 48); 289 290 /* 291 * Init the state that is not present in the memory 292 * layout and not enabled by the OS. 293 */ 294 if (fx_only) 295 xsave_hdr->xstate_bv = XSTATE_FPSSE; 296 else 297 xsave_hdr->xstate_bv &= (pcntxt_mask & xstate_bv); 298 } 299 300 if (use_fxsr()) { 301 /* 302 * mscsr reserved bits must be masked to zero for security 303 * reasons. 304 */ 305 xsave->i387.mxcsr &= mxcsr_feature_mask; 306 307 convert_to_fxsr(tsk, ia32_env); 308 } 309} 310 311/* 312 * Restore the extended state if present. Otherwise, restore the FP/SSE state. 313 */ 314static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only) 315{ 316 if (use_xsave()) { 317 if ((unsigned long)buf % 64 || fx_only) { 318 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE; 319 xrstor_state(init_xstate_buf, init_bv); 320 return fxrstor_user(buf); 321 } else { 322 u64 init_bv = pcntxt_mask & ~xbv; 323 if (unlikely(init_bv)) 324 xrstor_state(init_xstate_buf, init_bv); 325 return xrestore_user(buf, xbv); 326 } 327 } else if (use_fxsr()) { 328 return fxrstor_user(buf); 329 } else 330 return frstor_user(buf); 331} 332 333int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size) 334{ 335 int ia32_fxstate = (buf != buf_fx); 336 struct task_struct *tsk = current; 337 int state_size = xstate_size; 338 u64 xstate_bv = 0; 339 int fx_only = 0; 340 341 ia32_fxstate &= (config_enabled(CONFIG_X86_32) || 342 config_enabled(CONFIG_IA32_EMULATION)); 343 344 if (!buf) { 345 fpu_reset_state(tsk); 346 return 0; 347 } 348 349 if (!access_ok(VERIFY_READ, buf, size)) 350 return -EACCES; 351 352 if (!used_math() && init_fpu(tsk)) 353 return -1; 354 355 if (!static_cpu_has(X86_FEATURE_FPU)) 356 return fpregs_soft_set(current, NULL, 357 0, sizeof(struct user_i387_ia32_struct), 358 NULL, buf) != 0; 359 360 if (use_xsave()) { 361 struct _fpx_sw_bytes fx_sw_user; 362 if (unlikely(check_for_xstate(buf_fx, buf_fx, &fx_sw_user))) { 363 /* 364 * Couldn't find the extended state information in the 365 * memory layout. Restore just the FP/SSE and init all 366 * the other extended state. 367 */ 368 state_size = sizeof(struct i387_fxsave_struct); 369 fx_only = 1; 370 } else { 371 state_size = fx_sw_user.xstate_size; 372 xstate_bv = fx_sw_user.xstate_bv; 373 } 374 } 375 376 if (ia32_fxstate) { 377 /* 378 * For 32-bit frames with fxstate, copy the user state to the 379 * thread's fpu state, reconstruct fxstate from the fsave 380 * header. Sanitize the copied state etc. 381 */ 382 struct fpu *fpu = &tsk->thread.fpu; 383 struct user_i387_ia32_struct env; 384 int err = 0; 385 386 /* 387 * Drop the current fpu which clears used_math(). This ensures 388 * that any context-switch during the copy of the new state, 389 * avoids the intermediate state from getting restored/saved. 390 * Thus avoiding the new restored state from getting corrupted. 391 * We will be ready to restore/save the state only after 392 * set_used_math() is again set. 393 */ 394 drop_fpu(tsk); 395 396 if (__copy_from_user(&fpu->state->xsave, buf_fx, state_size) || 397 __copy_from_user(&env, buf, sizeof(env))) { 398 fpu_finit(fpu); 399 err = -1; 400 } else { 401 sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only); 402 } 403 404 set_used_math(); 405 if (use_eager_fpu()) { 406 preempt_disable(); 407 math_state_restore(); 408 preempt_enable(); 409 } 410 411 return err; 412 } else { 413 /* 414 * For 64-bit frames and 32-bit fsave frames, restore the user 415 * state to the registers directly (with exceptions handled). 416 */ 417 user_fpu_begin(); 418 if (restore_user_xstate(buf_fx, xstate_bv, fx_only)) { 419 fpu_reset_state(tsk); 420 return -1; 421 } 422 } 423 424 return 0; 425} 426 427/* 428 * Prepare the SW reserved portion of the fxsave memory layout, indicating 429 * the presence of the extended state information in the memory layout 430 * pointed by the fpstate pointer in the sigcontext. 431 * This will be saved when ever the FP and extended state context is 432 * saved on the user stack during the signal handler delivery to the user. 433 */ 434static void prepare_fx_sw_frame(void) 435{ 436 int fsave_header_size = sizeof(struct i387_fsave_struct); 437 int size = xstate_size + FP_XSTATE_MAGIC2_SIZE; 438 439 if (config_enabled(CONFIG_X86_32)) 440 size += fsave_header_size; 441 442 fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1; 443 fx_sw_reserved.extended_size = size; 444 fx_sw_reserved.xstate_bv = pcntxt_mask; 445 fx_sw_reserved.xstate_size = xstate_size; 446 447 if (config_enabled(CONFIG_IA32_EMULATION)) { 448 fx_sw_reserved_ia32 = fx_sw_reserved; 449 fx_sw_reserved_ia32.extended_size += fsave_header_size; 450 } 451} 452 453/* 454 * Enable the extended processor state save/restore feature 455 */ 456static inline void xstate_enable(void) 457{ 458 cr4_set_bits(X86_CR4_OSXSAVE); 459 xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask); 460} 461 462/* 463 * Record the offsets and sizes of different state managed by the xsave 464 * memory layout. 465 */ 466static void __init setup_xstate_features(void) 467{ 468 int eax, ebx, ecx, edx, leaf = 0x2; 469 470 xstate_features = fls64(pcntxt_mask); 471 xstate_offsets = alloc_bootmem(xstate_features * sizeof(int)); 472 xstate_sizes = alloc_bootmem(xstate_features * sizeof(int)); 473 474 do { 475 cpuid_count(XSTATE_CPUID, leaf, &eax, &ebx, &ecx, &edx); 476 477 if (eax == 0) 478 break; 479 480 xstate_offsets[leaf] = ebx; 481 xstate_sizes[leaf] = eax; 482 483 leaf++; 484 } while (1); 485} 486 487/* 488 * This function sets up offsets and sizes of all extended states in 489 * xsave area. This supports both standard format and compacted format 490 * of the xsave aread. 491 * 492 * Input: void 493 * Output: void 494 */ 495void setup_xstate_comp(void) 496{ 497 unsigned int xstate_comp_sizes[sizeof(pcntxt_mask)*8]; 498 int i; 499 500 /* 501 * The FP xstates and SSE xstates are legacy states. They are always 502 * in the fixed offsets in the xsave area in either compacted form 503 * or standard form. 504 */ 505 xstate_comp_offsets[0] = 0; 506 xstate_comp_offsets[1] = offsetof(struct i387_fxsave_struct, xmm_space); 507 508 if (!cpu_has_xsaves) { 509 for (i = 2; i < xstate_features; i++) { 510 if (test_bit(i, (unsigned long *)&pcntxt_mask)) { 511 xstate_comp_offsets[i] = xstate_offsets[i]; 512 xstate_comp_sizes[i] = xstate_sizes[i]; 513 } 514 } 515 return; 516 } 517 518 xstate_comp_offsets[2] = FXSAVE_SIZE + XSAVE_HDR_SIZE; 519 520 for (i = 2; i < xstate_features; i++) { 521 if (test_bit(i, (unsigned long *)&pcntxt_mask)) 522 xstate_comp_sizes[i] = xstate_sizes[i]; 523 else 524 xstate_comp_sizes[i] = 0; 525 526 if (i > 2) 527 xstate_comp_offsets[i] = xstate_comp_offsets[i-1] 528 + xstate_comp_sizes[i-1]; 529 530 } 531} 532 533/* 534 * setup the xstate image representing the init state 535 */ 536static void __init setup_init_fpu_buf(void) 537{ 538 /* 539 * Setup init_xstate_buf to represent the init state of 540 * all the features managed by the xsave 541 */ 542 init_xstate_buf = alloc_bootmem_align(xstate_size, 543 __alignof__(struct xsave_struct)); 544 fx_finit(&init_xstate_buf->i387); 545 546 if (!cpu_has_xsave) 547 return; 548 549 setup_xstate_features(); 550 551 if (cpu_has_xsaves) { 552 init_xstate_buf->xsave_hdr.xcomp_bv = 553 (u64)1 << 63 | pcntxt_mask; 554 init_xstate_buf->xsave_hdr.xstate_bv = pcntxt_mask; 555 } 556 557 /* 558 * Init all the features state with header_bv being 0x0 559 */ 560 xrstor_state_booting(init_xstate_buf, -1); 561 /* 562 * Dump the init state again. This is to identify the init state 563 * of any feature which is not represented by all zero's. 564 */ 565 xsave_state_booting(init_xstate_buf, -1); 566} 567 568static enum { AUTO, ENABLE, DISABLE } eagerfpu = AUTO; 569static int __init eager_fpu_setup(char *s) 570{ 571 if (!strcmp(s, "on")) 572 eagerfpu = ENABLE; 573 else if (!strcmp(s, "off")) 574 eagerfpu = DISABLE; 575 else if (!strcmp(s, "auto")) 576 eagerfpu = AUTO; 577 return 1; 578} 579__setup("eagerfpu=", eager_fpu_setup); 580 581 582/* 583 * Calculate total size of enabled xstates in XCR0/pcntxt_mask. 584 */ 585static void __init init_xstate_size(void) 586{ 587 unsigned int eax, ebx, ecx, edx; 588 int i; 589 590 if (!cpu_has_xsaves) { 591 cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx); 592 xstate_size = ebx; 593 return; 594 } 595 596 xstate_size = FXSAVE_SIZE + XSAVE_HDR_SIZE; 597 for (i = 2; i < 64; i++) { 598 if (test_bit(i, (unsigned long *)&pcntxt_mask)) { 599 cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx); 600 xstate_size += eax; 601 } 602 } 603} 604 605/* 606 * Enable and initialize the xsave feature. 607 */ 608static void __init xstate_enable_boot_cpu(void) 609{ 610 unsigned int eax, ebx, ecx, edx; 611 612 if (boot_cpu_data.cpuid_level < XSTATE_CPUID) { 613 WARN(1, KERN_ERR "XSTATE_CPUID missing\n"); 614 return; 615 } 616 617 cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx); 618 pcntxt_mask = eax + ((u64)edx << 32); 619 620 if ((pcntxt_mask & XSTATE_FPSSE) != XSTATE_FPSSE) { 621 pr_err("FP/SSE not shown under xsave features 0x%llx\n", 622 pcntxt_mask); 623 BUG(); 624 } 625 626 /* 627 * Support only the state known to OS. 628 */ 629 pcntxt_mask = pcntxt_mask & XCNTXT_MASK; 630 631 xstate_enable(); 632 633 /* 634 * Recompute the context size for enabled features 635 */ 636 init_xstate_size(); 637 638 update_regset_xstate_info(xstate_size, pcntxt_mask); 639 prepare_fx_sw_frame(); 640 setup_init_fpu_buf(); 641 642 /* Auto enable eagerfpu for xsaveopt */ 643 if (cpu_has_xsaveopt && eagerfpu != DISABLE) 644 eagerfpu = ENABLE; 645 646 if (pcntxt_mask & XSTATE_EAGER) { 647 if (eagerfpu == DISABLE) { 648 pr_err("eagerfpu not present, disabling some xstate features: 0x%llx\n", 649 pcntxt_mask & XSTATE_EAGER); 650 pcntxt_mask &= ~XSTATE_EAGER; 651 } else { 652 eagerfpu = ENABLE; 653 } 654 } 655 656 pr_info("enabled xstate_bv 0x%llx, cntxt size 0x%x using %s\n", 657 pcntxt_mask, xstate_size, 658 cpu_has_xsaves ? "compacted form" : "standard form"); 659} 660 661/* 662 * For the very first instance, this calls xstate_enable_boot_cpu(); 663 * for all subsequent instances, this calls xstate_enable(). 664 * 665 * This is somewhat obfuscated due to the lack of powerful enough 666 * overrides for the section checks. 667 */ 668void xsave_init(void) 669{ 670 static __refdata void (*next_func)(void) = xstate_enable_boot_cpu; 671 void (*this_func)(void); 672 673 if (!cpu_has_xsave) 674 return; 675 676 this_func = next_func; 677 next_func = xstate_enable; 678 this_func(); 679} 680 681/* 682 * setup_init_fpu_buf() is __init and it is OK to call it here because 683 * init_xstate_buf will be unset only once during boot. 684 */ 685void __init_refok eager_fpu_init(void) 686{ 687 WARN_ON(used_math()); 688 current_thread_info()->status = 0; 689 690 if (eagerfpu == ENABLE) 691 setup_force_cpu_cap(X86_FEATURE_EAGER_FPU); 692 693 if (!cpu_has_eager_fpu) { 694 stts(); 695 return; 696 } 697 698 if (!init_xstate_buf) 699 setup_init_fpu_buf(); 700} 701 702/* 703 * Given the xsave area and a state inside, this function returns the 704 * address of the state. 705 * 706 * This is the API that is called to get xstate address in either 707 * standard format or compacted format of xsave area. 708 * 709 * Inputs: 710 * xsave: base address of the xsave area; 711 * xstate: state which is defined in xsave.h (e.g. XSTATE_FP, XSTATE_SSE, 712 * etc.) 713 * Output: 714 * address of the state in the xsave area. 715 */ 716void *get_xsave_addr(struct xsave_struct *xsave, int xstate) 717{ 718 int feature = fls64(xstate) - 1; 719 if (!test_bit(feature, (unsigned long *)&pcntxt_mask)) 720 return NULL; 721 722 return (void *)xsave + xstate_comp_offsets[feature]; 723} 724EXPORT_SYMBOL_GPL(get_xsave_addr); 725