root/arch/hexagon/include/asm/uaccess.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. hexagon_strncpy_from_user

   1 /* SPDX-License-Identifier: GPL-2.0-only */
   2 /*
   3  * User memory access support for Hexagon
   4  *
   5  * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
   6  */
   7 
   8 #ifndef _ASM_UACCESS_H
   9 #define _ASM_UACCESS_H
  10 /*
  11  * User space memory access functions
  12  */
  13 #include <linux/mm.h>
  14 #include <asm/sections.h>
  15 
  16 /*
  17  * access_ok: - Checks if a user space pointer is valid
  18  * @addr: User space pointer to start of block to check
  19  * @size: Size of block to check
  20  *
  21  * Context: User context only. This function may sleep if pagefaults are
  22  *          enabled.
  23  *
  24  * Checks if a pointer to a block of memory in user space is valid.
  25  *
  26  * Returns true (nonzero) if the memory block *may* be valid, false (zero)
  27  * if it is definitely invalid.
  28  *
  29  * User address space in Hexagon, like x86, goes to 0xbfffffff, so the
  30  * simple MSB-based tests used by MIPS won't work.  Some further
  31  * optimization is probably possible here, but for now, keep it
  32  * reasonably simple and not *too* slow.  After all, we've got the
  33  * MMU for backup.
  34  */
  35 
  36 #define __access_ok(addr, size) \
  37         ((get_fs().seg == KERNEL_DS.seg) || \
  38         (((unsigned long)addr < get_fs().seg) && \
  39           (unsigned long)size < (get_fs().seg - (unsigned long)addr)))
  40 
  41 /*
  42  * When a kernel-mode page fault is taken, the faulting instruction
  43  * address is checked against a table of exception_table_entries.
  44  * Each entry is a tuple of the address of an instruction that may
  45  * be authorized to fault, and the address at which execution should
  46  * be resumed instead of the faulting instruction, so as to effect
  47  * a workaround.
  48  */
  49 
  50 /*  Assembly somewhat optimized copy routines  */
  51 unsigned long raw_copy_from_user(void *to, const void __user *from,
  52                                      unsigned long n);
  53 unsigned long raw_copy_to_user(void __user *to, const void *from,
  54                                    unsigned long n);
  55 #define INLINE_COPY_FROM_USER
  56 #define INLINE_COPY_TO_USER
  57 
  58 __kernel_size_t __clear_user_hexagon(void __user *dest, unsigned long count);
  59 #define __clear_user(a, s) __clear_user_hexagon((a), (s))
  60 
  61 #define __strncpy_from_user(dst, src, n) hexagon_strncpy_from_user(dst, src, n)
  62 
  63 /*  get around the ifndef in asm-generic/uaccess.h  */
  64 #define __strnlen_user __strnlen_user
  65 
  66 extern long __strnlen_user(const char __user *src, long n);
  67 
  68 static inline long hexagon_strncpy_from_user(char *dst, const char __user *src,
  69                                              long n);
  70 
  71 #include <asm-generic/uaccess.h>
  72 
  73 /*  Todo:  an actual accelerated version of this.  */
  74 static inline long hexagon_strncpy_from_user(char *dst, const char __user *src,
  75                                              long n)
  76 {
  77         long res = __strnlen_user(src, n);
  78 
  79         if (unlikely(!res))
  80                 return -EFAULT;
  81 
  82         if (res > n) {
  83                 long left = raw_copy_from_user(dst, src, n);
  84                 if (unlikely(left))
  85                         memset(dst + (n - left), 0, left);
  86                 return n;
  87         } else {
  88                 long left = raw_copy_from_user(dst, src, res);
  89                 if (unlikely(left))
  90                         memset(dst + (res - left), 0, left);
  91                 return res-1;
  92         }
  93 }
  94 
  95 #endif

/* [<][>][^][v][top][bottom][index][help] */