root/include/linux/byteorder/generic.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. le16_add_cpu
  2. le32_add_cpu
  3. le64_add_cpu
  4. le32_to_cpu_array
  5. cpu_to_le32_array
  6. be16_add_cpu
  7. be32_add_cpu
  8. be64_add_cpu
  9. cpu_to_be32_array
  10. be32_to_cpu_array

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef _LINUX_BYTEORDER_GENERIC_H
   3 #define _LINUX_BYTEORDER_GENERIC_H
   4 
   5 /*
   6  * linux/byteorder/generic.h
   7  * Generic Byte-reordering support
   8  *
   9  * The "... p" macros, like le64_to_cpup, can be used with pointers
  10  * to unaligned data, but there will be a performance penalty on 
  11  * some architectures.  Use get_unaligned for unaligned data.
  12  *
  13  * Francois-Rene Rideau <fare@tunes.org> 19970707
  14  *    gathered all the good ideas from all asm-foo/byteorder.h into one file,
  15  *    cleaned them up.
  16  *    I hope it is compliant with non-GCC compilers.
  17  *    I decided to put __BYTEORDER_HAS_U64__ in byteorder.h,
  18  *    because I wasn't sure it would be ok to put it in types.h
  19  *    Upgraded it to 2.1.43
  20  * Francois-Rene Rideau <fare@tunes.org> 19971012
  21  *    Upgraded it to 2.1.57
  22  *    to please Linus T., replaced huge #ifdef's between little/big endian
  23  *    by nestedly #include'd files.
  24  * Francois-Rene Rideau <fare@tunes.org> 19971205
  25  *    Made it to 2.1.71; now a facelift:
  26  *    Put files under include/linux/byteorder/
  27  *    Split swab from generic support.
  28  *
  29  * TODO:
  30  *   = Regular kernel maintainers could also replace all these manual
  31  *    byteswap macros that remain, disseminated among drivers,
  32  *    after some grep or the sources...
  33  *   = Linus might want to rename all these macros and files to fit his taste,
  34  *    to fit his personal naming scheme.
  35  *   = it seems that a few drivers would also appreciate
  36  *    nybble swapping support...
  37  *   = every architecture could add their byteswap macro in asm/byteorder.h
  38  *    see how some architectures already do (i386, alpha, ppc, etc)
  39  *   = cpu_to_beXX and beXX_to_cpu might some day need to be well
  40  *    distinguished throughout the kernel. This is not the case currently,
  41  *    since little endian, big endian, and pdp endian machines needn't it.
  42  *    But this might be the case for, say, a port of Linux to 20/21 bit
  43  *    architectures (and F21 Linux addict around?).
  44  */
  45 
  46 /*
  47  * The following macros are to be defined by <asm/byteorder.h>:
  48  *
  49  * Conversion of long and short int between network and host format
  50  *      ntohl(__u32 x)
  51  *      ntohs(__u16 x)
  52  *      htonl(__u32 x)
  53  *      htons(__u16 x)
  54  * It seems that some programs (which? where? or perhaps a standard? POSIX?)
  55  * might like the above to be functions, not macros (why?).
  56  * if that's true, then detect them, and take measures.
  57  * Anyway, the measure is: define only ___ntohl as a macro instead,
  58  * and in a separate file, have
  59  * unsigned long inline ntohl(x){return ___ntohl(x);}
  60  *
  61  * The same for constant arguments
  62  *      __constant_ntohl(__u32 x)
  63  *      __constant_ntohs(__u16 x)
  64  *      __constant_htonl(__u32 x)
  65  *      __constant_htons(__u16 x)
  66  *
  67  * Conversion of XX-bit integers (16- 32- or 64-)
  68  * between native CPU format and little/big endian format
  69  * 64-bit stuff only defined for proper architectures
  70  *      cpu_to_[bl]eXX(__uXX x)
  71  *      [bl]eXX_to_cpu(__uXX x)
  72  *
  73  * The same, but takes a pointer to the value to convert
  74  *      cpu_to_[bl]eXXp(__uXX x)
  75  *      [bl]eXX_to_cpup(__uXX x)
  76  *
  77  * The same, but change in situ
  78  *      cpu_to_[bl]eXXs(__uXX x)
  79  *      [bl]eXX_to_cpus(__uXX x)
  80  *
  81  * See asm-foo/byteorder.h for examples of how to provide
  82  * architecture-optimized versions
  83  *
  84  */
  85 
  86 #define cpu_to_le64 __cpu_to_le64
  87 #define le64_to_cpu __le64_to_cpu
  88 #define cpu_to_le32 __cpu_to_le32
  89 #define le32_to_cpu __le32_to_cpu
  90 #define cpu_to_le16 __cpu_to_le16
  91 #define le16_to_cpu __le16_to_cpu
  92 #define cpu_to_be64 __cpu_to_be64
  93 #define be64_to_cpu __be64_to_cpu
  94 #define cpu_to_be32 __cpu_to_be32
  95 #define be32_to_cpu __be32_to_cpu
  96 #define cpu_to_be16 __cpu_to_be16
  97 #define be16_to_cpu __be16_to_cpu
  98 #define cpu_to_le64p __cpu_to_le64p
  99 #define le64_to_cpup __le64_to_cpup
 100 #define cpu_to_le32p __cpu_to_le32p
 101 #define le32_to_cpup __le32_to_cpup
 102 #define cpu_to_le16p __cpu_to_le16p
 103 #define le16_to_cpup __le16_to_cpup
 104 #define cpu_to_be64p __cpu_to_be64p
 105 #define be64_to_cpup __be64_to_cpup
 106 #define cpu_to_be32p __cpu_to_be32p
 107 #define be32_to_cpup __be32_to_cpup
 108 #define cpu_to_be16p __cpu_to_be16p
 109 #define be16_to_cpup __be16_to_cpup
 110 #define cpu_to_le64s __cpu_to_le64s
 111 #define le64_to_cpus __le64_to_cpus
 112 #define cpu_to_le32s __cpu_to_le32s
 113 #define le32_to_cpus __le32_to_cpus
 114 #define cpu_to_le16s __cpu_to_le16s
 115 #define le16_to_cpus __le16_to_cpus
 116 #define cpu_to_be64s __cpu_to_be64s
 117 #define be64_to_cpus __be64_to_cpus
 118 #define cpu_to_be32s __cpu_to_be32s
 119 #define be32_to_cpus __be32_to_cpus
 120 #define cpu_to_be16s __cpu_to_be16s
 121 #define be16_to_cpus __be16_to_cpus
 122 
 123 /*
 124  * They have to be macros in order to do the constant folding
 125  * correctly - if the argument passed into a inline function
 126  * it is no longer constant according to gcc..
 127  */
 128 
 129 #undef ntohl
 130 #undef ntohs
 131 #undef htonl
 132 #undef htons
 133 
 134 #define ___htonl(x) __cpu_to_be32(x)
 135 #define ___htons(x) __cpu_to_be16(x)
 136 #define ___ntohl(x) __be32_to_cpu(x)
 137 #define ___ntohs(x) __be16_to_cpu(x)
 138 
 139 #define htonl(x) ___htonl(x)
 140 #define ntohl(x) ___ntohl(x)
 141 #define htons(x) ___htons(x)
 142 #define ntohs(x) ___ntohs(x)
 143 
 144 static inline void le16_add_cpu(__le16 *var, u16 val)
 145 {
 146         *var = cpu_to_le16(le16_to_cpu(*var) + val);
 147 }
 148 
 149 static inline void le32_add_cpu(__le32 *var, u32 val)
 150 {
 151         *var = cpu_to_le32(le32_to_cpu(*var) + val);
 152 }
 153 
 154 static inline void le64_add_cpu(__le64 *var, u64 val)
 155 {
 156         *var = cpu_to_le64(le64_to_cpu(*var) + val);
 157 }
 158 
 159 /* XXX: this stuff can be optimized */
 160 static inline void le32_to_cpu_array(u32 *buf, unsigned int words)
 161 {
 162         while (words--) {
 163                 __le32_to_cpus(buf);
 164                 buf++;
 165         }
 166 }
 167 
 168 static inline void cpu_to_le32_array(u32 *buf, unsigned int words)
 169 {
 170         while (words--) {
 171                 __cpu_to_le32s(buf);
 172                 buf++;
 173         }
 174 }
 175 
 176 static inline void be16_add_cpu(__be16 *var, u16 val)
 177 {
 178         *var = cpu_to_be16(be16_to_cpu(*var) + val);
 179 }
 180 
 181 static inline void be32_add_cpu(__be32 *var, u32 val)
 182 {
 183         *var = cpu_to_be32(be32_to_cpu(*var) + val);
 184 }
 185 
 186 static inline void be64_add_cpu(__be64 *var, u64 val)
 187 {
 188         *var = cpu_to_be64(be64_to_cpu(*var) + val);
 189 }
 190 
 191 static inline void cpu_to_be32_array(__be32 *dst, const u32 *src, size_t len)
 192 {
 193         int i;
 194 
 195         for (i = 0; i < len; i++)
 196                 dst[i] = cpu_to_be32(src[i]);
 197 }
 198 
 199 static inline void be32_to_cpu_array(u32 *dst, const __be32 *src, size_t len)
 200 {
 201         int i;
 202 
 203         for (i = 0; i < len; i++)
 204                 dst[i] = be32_to_cpu(src[i]);
 205 }
 206 
 207 #endif /* _LINUX_BYTEORDER_GENERIC_H */

/* [<][>][^][v][top][bottom][index][help] */