1#ifndef _X_TABLES_H 2#define _X_TABLES_H 3 4 5#include <linux/netdevice.h> 6#include <uapi/linux/netfilter/x_tables.h> 7 8/** 9 * struct xt_action_param - parameters for matches/targets 10 * 11 * @match: the match extension 12 * @target: the target extension 13 * @matchinfo: per-match data 14 * @targetinfo: per-target data 15 * @in: input netdevice 16 * @out: output netdevice 17 * @fragoff: packet is a fragment, this is the data offset 18 * @thoff: position of transport header relative to skb->data 19 * @hook: hook number given packet came from 20 * @family: Actual NFPROTO_* through which the function is invoked 21 * (helpful when match->family == NFPROTO_UNSPEC) 22 * 23 * Fields written to by extensions: 24 * 25 * @hotdrop: drop packet if we had inspection problems 26 * Network namespace obtainable using dev_net(in/out) 27 */ 28struct xt_action_param { 29 union { 30 const struct xt_match *match; 31 const struct xt_target *target; 32 }; 33 union { 34 const void *matchinfo, *targinfo; 35 }; 36 const struct net_device *in, *out; 37 int fragoff; 38 unsigned int thoff; 39 unsigned int hooknum; 40 u_int8_t family; 41 bool hotdrop; 42}; 43 44/** 45 * struct xt_mtchk_param - parameters for match extensions' 46 * checkentry functions 47 * 48 * @net: network namespace through which the check was invoked 49 * @table: table the rule is tried to be inserted into 50 * @entryinfo: the family-specific rule data 51 * (struct ipt_ip, ip6t_ip, arpt_arp or (note) ebt_entry) 52 * @match: struct xt_match through which this function was invoked 53 * @matchinfo: per-match data 54 * @hook_mask: via which hooks the new rule is reachable 55 * Other fields as above. 56 */ 57struct xt_mtchk_param { 58 struct net *net; 59 const char *table; 60 const void *entryinfo; 61 const struct xt_match *match; 62 void *matchinfo; 63 unsigned int hook_mask; 64 u_int8_t family; 65}; 66 67/** 68 * struct xt_mdtor_param - match destructor parameters 69 * Fields as above. 70 */ 71struct xt_mtdtor_param { 72 struct net *net; 73 const struct xt_match *match; 74 void *matchinfo; 75 u_int8_t family; 76}; 77 78/** 79 * struct xt_tgchk_param - parameters for target extensions' 80 * checkentry functions 81 * 82 * @entryinfo: the family-specific rule data 83 * (struct ipt_entry, ip6t_entry, arpt_entry, ebt_entry) 84 * 85 * Other fields see above. 86 */ 87struct xt_tgchk_param { 88 struct net *net; 89 const char *table; 90 const void *entryinfo; 91 const struct xt_target *target; 92 void *targinfo; 93 unsigned int hook_mask; 94 u_int8_t family; 95}; 96 97/* Target destructor parameters */ 98struct xt_tgdtor_param { 99 struct net *net; 100 const struct xt_target *target; 101 void *targinfo; 102 u_int8_t family; 103}; 104 105struct xt_match { 106 struct list_head list; 107 108 const char name[XT_EXTENSION_MAXNAMELEN]; 109 u_int8_t revision; 110 111 /* Return true or false: return FALSE and set *hotdrop = 1 to 112 force immediate packet drop. */ 113 /* Arguments changed since 2.6.9, as this must now handle 114 non-linear skb, using skb_header_pointer and 115 skb_ip_make_writable. */ 116 bool (*match)(const struct sk_buff *skb, 117 struct xt_action_param *); 118 119 /* Called when user tries to insert an entry of this type. */ 120 int (*checkentry)(const struct xt_mtchk_param *); 121 122 /* Called when entry of this type deleted. */ 123 void (*destroy)(const struct xt_mtdtor_param *); 124#ifdef CONFIG_COMPAT 125 /* Called when userspace align differs from kernel space one */ 126 void (*compat_from_user)(void *dst, const void *src); 127 int (*compat_to_user)(void __user *dst, const void *src); 128#endif 129 /* Set this to THIS_MODULE if you are a module, otherwise NULL */ 130 struct module *me; 131 132 const char *table; 133 unsigned int matchsize; 134#ifdef CONFIG_COMPAT 135 unsigned int compatsize; 136#endif 137 unsigned int hooks; 138 unsigned short proto; 139 140 unsigned short family; 141}; 142 143/* Registration hooks for targets. */ 144struct xt_target { 145 struct list_head list; 146 147 const char name[XT_EXTENSION_MAXNAMELEN]; 148 u_int8_t revision; 149 150 /* Returns verdict. Argument order changed since 2.6.9, as this 151 must now handle non-linear skbs, using skb_copy_bits and 152 skb_ip_make_writable. */ 153 unsigned int (*target)(struct sk_buff *skb, 154 const struct xt_action_param *); 155 156 /* Called when user tries to insert an entry of this type: 157 hook_mask is a bitmask of hooks from which it can be 158 called. */ 159 /* Should return 0 on success or an error code otherwise (-Exxxx). */ 160 int (*checkentry)(const struct xt_tgchk_param *); 161 162 /* Called when entry of this type deleted. */ 163 void (*destroy)(const struct xt_tgdtor_param *); 164#ifdef CONFIG_COMPAT 165 /* Called when userspace align differs from kernel space one */ 166 void (*compat_from_user)(void *dst, const void *src); 167 int (*compat_to_user)(void __user *dst, const void *src); 168#endif 169 /* Set this to THIS_MODULE if you are a module, otherwise NULL */ 170 struct module *me; 171 172 const char *table; 173 unsigned int targetsize; 174#ifdef CONFIG_COMPAT 175 unsigned int compatsize; 176#endif 177 unsigned int hooks; 178 unsigned short proto; 179 180 unsigned short family; 181}; 182 183/* Furniture shopping... */ 184struct xt_table { 185 struct list_head list; 186 187 /* What hooks you will enter on */ 188 unsigned int valid_hooks; 189 190 /* Man behind the curtain... */ 191 struct xt_table_info *private; 192 193 /* Set this to THIS_MODULE if you are a module, otherwise NULL */ 194 struct module *me; 195 196 u_int8_t af; /* address/protocol family */ 197 int priority; /* hook order */ 198 199 /* A unique name... */ 200 const char name[XT_TABLE_MAXNAMELEN]; 201}; 202 203#include <linux/netfilter_ipv4.h> 204 205/* The table itself */ 206struct xt_table_info { 207 /* Size per table */ 208 unsigned int size; 209 /* Number of entries: FIXME. --RR */ 210 unsigned int number; 211 /* Initial number of entries. Needed for module usage count */ 212 unsigned int initial_entries; 213 214 /* Entry points and underflows */ 215 unsigned int hook_entry[NF_INET_NUMHOOKS]; 216 unsigned int underflow[NF_INET_NUMHOOKS]; 217 218 /* 219 * Number of user chains. Since tables cannot have loops, at most 220 * @stacksize jumps (number of user chains) can possibly be made. 221 */ 222 unsigned int stacksize; 223 unsigned int __percpu *stackptr; 224 void ***jumpstack; 225 /* ipt_entry tables: one per CPU */ 226 /* Note : this field MUST be the last one, see XT_TABLE_INFO_SZ */ 227 void *entries[1]; 228}; 229 230#define XT_TABLE_INFO_SZ (offsetof(struct xt_table_info, entries) \ 231 + nr_cpu_ids * sizeof(char *)) 232int xt_register_target(struct xt_target *target); 233void xt_unregister_target(struct xt_target *target); 234int xt_register_targets(struct xt_target *target, unsigned int n); 235void xt_unregister_targets(struct xt_target *target, unsigned int n); 236 237int xt_register_match(struct xt_match *target); 238void xt_unregister_match(struct xt_match *target); 239int xt_register_matches(struct xt_match *match, unsigned int n); 240void xt_unregister_matches(struct xt_match *match, unsigned int n); 241 242int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto, 243 bool inv_proto); 244int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto, 245 bool inv_proto); 246 247struct xt_table *xt_register_table(struct net *net, 248 const struct xt_table *table, 249 struct xt_table_info *bootstrap, 250 struct xt_table_info *newinfo); 251void *xt_unregister_table(struct xt_table *table); 252 253struct xt_table_info *xt_replace_table(struct xt_table *table, 254 unsigned int num_counters, 255 struct xt_table_info *newinfo, 256 int *error); 257 258struct xt_match *xt_find_match(u8 af, const char *name, u8 revision); 259struct xt_target *xt_find_target(u8 af, const char *name, u8 revision); 260struct xt_match *xt_request_find_match(u8 af, const char *name, u8 revision); 261struct xt_target *xt_request_find_target(u8 af, const char *name, u8 revision); 262int xt_find_revision(u8 af, const char *name, u8 revision, int target, 263 int *err); 264 265struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, 266 const char *name); 267void xt_table_unlock(struct xt_table *t); 268 269int xt_proto_init(struct net *net, u_int8_t af); 270void xt_proto_fini(struct net *net, u_int8_t af); 271 272struct xt_table_info *xt_alloc_table_info(unsigned int size); 273void xt_free_table_info(struct xt_table_info *info); 274 275/** 276 * xt_recseq - recursive seqcount for netfilter use 277 * 278 * Packet processing changes the seqcount only if no recursion happened 279 * get_counters() can use read_seqcount_begin()/read_seqcount_retry(), 280 * because we use the normal seqcount convention : 281 * Low order bit set to 1 if a writer is active. 282 */ 283DECLARE_PER_CPU(seqcount_t, xt_recseq); 284 285/** 286 * xt_write_recseq_begin - start of a write section 287 * 288 * Begin packet processing : all readers must wait the end 289 * 1) Must be called with preemption disabled 290 * 2) softirqs must be disabled too (or we should use this_cpu_add()) 291 * Returns : 292 * 1 if no recursion on this cpu 293 * 0 if recursion detected 294 */ 295static inline unsigned int xt_write_recseq_begin(void) 296{ 297 unsigned int addend; 298 299 /* 300 * Low order bit of sequence is set if we already 301 * called xt_write_recseq_begin(). 302 */ 303 addend = (__this_cpu_read(xt_recseq.sequence) + 1) & 1; 304 305 /* 306 * This is kind of a write_seqcount_begin(), but addend is 0 or 1 307 * We dont check addend value to avoid a test and conditional jump, 308 * since addend is most likely 1 309 */ 310 __this_cpu_add(xt_recseq.sequence, addend); 311 smp_wmb(); 312 313 return addend; 314} 315 316/** 317 * xt_write_recseq_end - end of a write section 318 * @addend: return value from previous xt_write_recseq_begin() 319 * 320 * End packet processing : all readers can proceed 321 * 1) Must be called with preemption disabled 322 * 2) softirqs must be disabled too (or we should use this_cpu_add()) 323 */ 324static inline void xt_write_recseq_end(unsigned int addend) 325{ 326 /* this is kind of a write_seqcount_end(), but addend is 0 or 1 */ 327 smp_wmb(); 328 __this_cpu_add(xt_recseq.sequence, addend); 329} 330 331/* 332 * This helper is performance critical and must be inlined 333 */ 334static inline unsigned long ifname_compare_aligned(const char *_a, 335 const char *_b, 336 const char *_mask) 337{ 338 const unsigned long *a = (const unsigned long *)_a; 339 const unsigned long *b = (const unsigned long *)_b; 340 const unsigned long *mask = (const unsigned long *)_mask; 341 unsigned long ret; 342 343 ret = (a[0] ^ b[0]) & mask[0]; 344 if (IFNAMSIZ > sizeof(unsigned long)) 345 ret |= (a[1] ^ b[1]) & mask[1]; 346 if (IFNAMSIZ > 2 * sizeof(unsigned long)) 347 ret |= (a[2] ^ b[2]) & mask[2]; 348 if (IFNAMSIZ > 3 * sizeof(unsigned long)) 349 ret |= (a[3] ^ b[3]) & mask[3]; 350 BUILD_BUG_ON(IFNAMSIZ > 4 * sizeof(unsigned long)); 351 return ret; 352} 353 354struct nf_hook_ops *xt_hook_link(const struct xt_table *, nf_hookfn *); 355void xt_hook_unlink(const struct xt_table *, struct nf_hook_ops *); 356 357#ifdef CONFIG_COMPAT 358#include <net/compat.h> 359 360struct compat_xt_entry_match { 361 union { 362 struct { 363 u_int16_t match_size; 364 char name[XT_FUNCTION_MAXNAMELEN - 1]; 365 u_int8_t revision; 366 } user; 367 struct { 368 u_int16_t match_size; 369 compat_uptr_t match; 370 } kernel; 371 u_int16_t match_size; 372 } u; 373 unsigned char data[0]; 374}; 375 376struct compat_xt_entry_target { 377 union { 378 struct { 379 u_int16_t target_size; 380 char name[XT_FUNCTION_MAXNAMELEN - 1]; 381 u_int8_t revision; 382 } user; 383 struct { 384 u_int16_t target_size; 385 compat_uptr_t target; 386 } kernel; 387 u_int16_t target_size; 388 } u; 389 unsigned char data[0]; 390}; 391 392/* FIXME: this works only on 32 bit tasks 393 * need to change whole approach in order to calculate align as function of 394 * current task alignment */ 395 396struct compat_xt_counters { 397 compat_u64 pcnt, bcnt; /* Packet and byte counters */ 398}; 399 400struct compat_xt_counters_info { 401 char name[XT_TABLE_MAXNAMELEN]; 402 compat_uint_t num_counters; 403 struct compat_xt_counters counters[0]; 404}; 405 406struct _compat_xt_align { 407 __u8 u8; 408 __u16 u16; 409 __u32 u32; 410 compat_u64 u64; 411}; 412 413#define COMPAT_XT_ALIGN(s) __ALIGN_KERNEL((s), __alignof__(struct _compat_xt_align)) 414 415void xt_compat_lock(u_int8_t af); 416void xt_compat_unlock(u_int8_t af); 417 418int xt_compat_add_offset(u_int8_t af, unsigned int offset, int delta); 419void xt_compat_flush_offsets(u_int8_t af); 420void xt_compat_init_offsets(u_int8_t af, unsigned int number); 421int xt_compat_calc_jump(u_int8_t af, unsigned int offset); 422 423int xt_compat_match_offset(const struct xt_match *match); 424int xt_compat_match_from_user(struct xt_entry_match *m, void **dstptr, 425 unsigned int *size); 426int xt_compat_match_to_user(const struct xt_entry_match *m, 427 void __user **dstptr, unsigned int *size); 428 429int xt_compat_target_offset(const struct xt_target *target); 430void xt_compat_target_from_user(struct xt_entry_target *t, void **dstptr, 431 unsigned int *size); 432int xt_compat_target_to_user(const struct xt_entry_target *t, 433 void __user **dstptr, unsigned int *size); 434 435#endif /* CONFIG_COMPAT */ 436#endif /* _X_TABLES_H */ 437