1/* 2 * net/sched/act_api.c Packet action API. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 * 9 * Author: Jamal Hadi Salim 10 * 11 * 12 */ 13 14#include <linux/types.h> 15#include <linux/kernel.h> 16#include <linux/string.h> 17#include <linux/errno.h> 18#include <linux/slab.h> 19#include <linux/skbuff.h> 20#include <linux/init.h> 21#include <linux/kmod.h> 22#include <linux/err.h> 23#include <linux/module.h> 24#include <net/net_namespace.h> 25#include <net/sock.h> 26#include <net/sch_generic.h> 27#include <net/act_api.h> 28#include <net/netlink.h> 29 30void tcf_hash_destroy(struct tc_action *a) 31{ 32 struct tcf_common *p = a->priv; 33 struct tcf_hashinfo *hinfo = a->ops->hinfo; 34 35 spin_lock_bh(&hinfo->lock); 36 hlist_del(&p->tcfc_head); 37 spin_unlock_bh(&hinfo->lock); 38 gen_kill_estimator(&p->tcfc_bstats, 39 &p->tcfc_rate_est); 40 /* 41 * gen_estimator est_timer() might access p->tcfc_lock 42 * or bstats, wait a RCU grace period before freeing p 43 */ 44 kfree_rcu(p, tcfc_rcu); 45} 46EXPORT_SYMBOL(tcf_hash_destroy); 47 48int __tcf_hash_release(struct tc_action *a, bool bind, bool strict) 49{ 50 struct tcf_common *p = a->priv; 51 int ret = 0; 52 53 if (p) { 54 if (bind) 55 p->tcfc_bindcnt--; 56 else if (strict && p->tcfc_bindcnt > 0) 57 return -EPERM; 58 59 p->tcfc_refcnt--; 60 if (p->tcfc_bindcnt <= 0 && p->tcfc_refcnt <= 0) { 61 if (a->ops->cleanup) 62 a->ops->cleanup(a, bind); 63 tcf_hash_destroy(a); 64 ret = 1; 65 } 66 } 67 68 return ret; 69} 70EXPORT_SYMBOL(__tcf_hash_release); 71 72static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb, 73 struct tc_action *a) 74{ 75 struct tcf_hashinfo *hinfo = a->ops->hinfo; 76 struct hlist_head *head; 77 struct tcf_common *p; 78 int err = 0, index = -1, i = 0, s_i = 0, n_i = 0; 79 struct nlattr *nest; 80 81 spin_lock_bh(&hinfo->lock); 82 83 s_i = cb->args[0]; 84 85 for (i = 0; i < (hinfo->hmask + 1); i++) { 86 head = &hinfo->htab[tcf_hash(i, hinfo->hmask)]; 87 88 hlist_for_each_entry_rcu(p, head, tcfc_head) { 89 index++; 90 if (index < s_i) 91 continue; 92 a->priv = p; 93 a->order = n_i; 94 95 nest = nla_nest_start(skb, a->order); 96 if (nest == NULL) 97 goto nla_put_failure; 98 err = tcf_action_dump_1(skb, a, 0, 0); 99 if (err < 0) { 100 index--; 101 nlmsg_trim(skb, nest); 102 goto done; 103 } 104 nla_nest_end(skb, nest); 105 n_i++; 106 if (n_i >= TCA_ACT_MAX_PRIO) 107 goto done; 108 } 109 } 110done: 111 spin_unlock_bh(&hinfo->lock); 112 if (n_i) 113 cb->args[0] += n_i; 114 return n_i; 115 116nla_put_failure: 117 nla_nest_cancel(skb, nest); 118 goto done; 119} 120 121static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a) 122{ 123 struct tcf_hashinfo *hinfo = a->ops->hinfo; 124 struct hlist_head *head; 125 struct hlist_node *n; 126 struct tcf_common *p; 127 struct nlattr *nest; 128 int i = 0, n_i = 0; 129 int ret = -EINVAL; 130 131 nest = nla_nest_start(skb, a->order); 132 if (nest == NULL) 133 goto nla_put_failure; 134 if (nla_put_string(skb, TCA_KIND, a->ops->kind)) 135 goto nla_put_failure; 136 for (i = 0; i < (hinfo->hmask + 1); i++) { 137 head = &hinfo->htab[tcf_hash(i, hinfo->hmask)]; 138 hlist_for_each_entry_safe(p, n, head, tcfc_head) { 139 a->priv = p; 140 ret = __tcf_hash_release(a, false, true); 141 if (ret == ACT_P_DELETED) { 142 module_put(a->ops->owner); 143 n_i++; 144 } else if (ret < 0) 145 goto nla_put_failure; 146 } 147 } 148 if (nla_put_u32(skb, TCA_FCNT, n_i)) 149 goto nla_put_failure; 150 nla_nest_end(skb, nest); 151 152 return n_i; 153nla_put_failure: 154 nla_nest_cancel(skb, nest); 155 return ret; 156} 157 158static int tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb, 159 int type, struct tc_action *a) 160{ 161 if (type == RTM_DELACTION) { 162 return tcf_del_walker(skb, a); 163 } else if (type == RTM_GETACTION) { 164 return tcf_dump_walker(skb, cb, a); 165 } else { 166 WARN(1, "tcf_generic_walker: unknown action %d\n", type); 167 return -EINVAL; 168 } 169} 170 171static struct tcf_common *tcf_hash_lookup(u32 index, struct tcf_hashinfo *hinfo) 172{ 173 struct tcf_common *p = NULL; 174 struct hlist_head *head; 175 176 spin_lock_bh(&hinfo->lock); 177 head = &hinfo->htab[tcf_hash(index, hinfo->hmask)]; 178 hlist_for_each_entry_rcu(p, head, tcfc_head) 179 if (p->tcfc_index == index) 180 break; 181 spin_unlock_bh(&hinfo->lock); 182 183 return p; 184} 185 186u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo) 187{ 188 u32 val = hinfo->index; 189 190 do { 191 if (++val == 0) 192 val = 1; 193 } while (tcf_hash_lookup(val, hinfo)); 194 195 hinfo->index = val; 196 return val; 197} 198EXPORT_SYMBOL(tcf_hash_new_index); 199 200int tcf_hash_search(struct tc_action *a, u32 index) 201{ 202 struct tcf_hashinfo *hinfo = a->ops->hinfo; 203 struct tcf_common *p = tcf_hash_lookup(index, hinfo); 204 205 if (p) { 206 a->priv = p; 207 return 1; 208 } 209 return 0; 210} 211EXPORT_SYMBOL(tcf_hash_search); 212 213int tcf_hash_check(u32 index, struct tc_action *a, int bind) 214{ 215 struct tcf_hashinfo *hinfo = a->ops->hinfo; 216 struct tcf_common *p = NULL; 217 if (index && (p = tcf_hash_lookup(index, hinfo)) != NULL) { 218 if (bind) 219 p->tcfc_bindcnt++; 220 p->tcfc_refcnt++; 221 a->priv = p; 222 return 1; 223 } 224 return 0; 225} 226EXPORT_SYMBOL(tcf_hash_check); 227 228void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est) 229{ 230 struct tcf_common *pc = a->priv; 231 if (est) 232 gen_kill_estimator(&pc->tcfc_bstats, 233 &pc->tcfc_rate_est); 234 kfree_rcu(pc, tcfc_rcu); 235} 236EXPORT_SYMBOL(tcf_hash_cleanup); 237 238int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a, 239 int size, int bind) 240{ 241 struct tcf_hashinfo *hinfo = a->ops->hinfo; 242 struct tcf_common *p = kzalloc(size, GFP_KERNEL); 243 244 if (unlikely(!p)) 245 return -ENOMEM; 246 p->tcfc_refcnt = 1; 247 if (bind) 248 p->tcfc_bindcnt = 1; 249 250 spin_lock_init(&p->tcfc_lock); 251 INIT_HLIST_NODE(&p->tcfc_head); 252 p->tcfc_index = index ? index : tcf_hash_new_index(hinfo); 253 p->tcfc_tm.install = jiffies; 254 p->tcfc_tm.lastuse = jiffies; 255 if (est) { 256 int err = gen_new_estimator(&p->tcfc_bstats, NULL, 257 &p->tcfc_rate_est, 258 &p->tcfc_lock, est); 259 if (err) { 260 kfree(p); 261 return err; 262 } 263 } 264 265 a->priv = (void *) p; 266 return 0; 267} 268EXPORT_SYMBOL(tcf_hash_create); 269 270void tcf_hash_insert(struct tc_action *a) 271{ 272 struct tcf_common *p = a->priv; 273 struct tcf_hashinfo *hinfo = a->ops->hinfo; 274 unsigned int h = tcf_hash(p->tcfc_index, hinfo->hmask); 275 276 spin_lock_bh(&hinfo->lock); 277 hlist_add_head(&p->tcfc_head, &hinfo->htab[h]); 278 spin_unlock_bh(&hinfo->lock); 279} 280EXPORT_SYMBOL(tcf_hash_insert); 281 282static LIST_HEAD(act_base); 283static DEFINE_RWLOCK(act_mod_lock); 284 285int tcf_register_action(struct tc_action_ops *act, unsigned int mask) 286{ 287 struct tc_action_ops *a; 288 int err; 289 290 /* Must supply act, dump and init */ 291 if (!act->act || !act->dump || !act->init) 292 return -EINVAL; 293 294 /* Supply defaults */ 295 if (!act->lookup) 296 act->lookup = tcf_hash_search; 297 if (!act->walk) 298 act->walk = tcf_generic_walker; 299 300 act->hinfo = kmalloc(sizeof(struct tcf_hashinfo), GFP_KERNEL); 301 if (!act->hinfo) 302 return -ENOMEM; 303 err = tcf_hashinfo_init(act->hinfo, mask); 304 if (err) { 305 kfree(act->hinfo); 306 return err; 307 } 308 309 write_lock(&act_mod_lock); 310 list_for_each_entry(a, &act_base, head) { 311 if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) { 312 write_unlock(&act_mod_lock); 313 tcf_hashinfo_destroy(act->hinfo); 314 kfree(act->hinfo); 315 return -EEXIST; 316 } 317 } 318 list_add_tail(&act->head, &act_base); 319 write_unlock(&act_mod_lock); 320 return 0; 321} 322EXPORT_SYMBOL(tcf_register_action); 323 324int tcf_unregister_action(struct tc_action_ops *act) 325{ 326 struct tc_action_ops *a; 327 int err = -ENOENT; 328 329 write_lock(&act_mod_lock); 330 list_for_each_entry(a, &act_base, head) { 331 if (a == act) { 332 list_del(&act->head); 333 tcf_hashinfo_destroy(act->hinfo); 334 kfree(act->hinfo); 335 err = 0; 336 break; 337 } 338 } 339 write_unlock(&act_mod_lock); 340 return err; 341} 342EXPORT_SYMBOL(tcf_unregister_action); 343 344/* lookup by name */ 345static struct tc_action_ops *tc_lookup_action_n(char *kind) 346{ 347 struct tc_action_ops *a, *res = NULL; 348 349 if (kind) { 350 read_lock(&act_mod_lock); 351 list_for_each_entry(a, &act_base, head) { 352 if (strcmp(kind, a->kind) == 0) { 353 if (try_module_get(a->owner)) 354 res = a; 355 break; 356 } 357 } 358 read_unlock(&act_mod_lock); 359 } 360 return res; 361} 362 363/* lookup by nlattr */ 364static struct tc_action_ops *tc_lookup_action(struct nlattr *kind) 365{ 366 struct tc_action_ops *a, *res = NULL; 367 368 if (kind) { 369 read_lock(&act_mod_lock); 370 list_for_each_entry(a, &act_base, head) { 371 if (nla_strcmp(kind, a->kind) == 0) { 372 if (try_module_get(a->owner)) 373 res = a; 374 break; 375 } 376 } 377 read_unlock(&act_mod_lock); 378 } 379 return res; 380} 381 382int tcf_action_exec(struct sk_buff *skb, const struct list_head *actions, 383 struct tcf_result *res) 384{ 385 const struct tc_action *a; 386 int ret = -1; 387 388 if (skb->tc_verd & TC_NCLS) { 389 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd); 390 ret = TC_ACT_OK; 391 goto exec_done; 392 } 393 list_for_each_entry(a, actions, list) { 394repeat: 395 ret = a->ops->act(skb, a, res); 396 if (TC_MUNGED & skb->tc_verd) { 397 /* copied already, allow trampling */ 398 skb->tc_verd = SET_TC_OK2MUNGE(skb->tc_verd); 399 skb->tc_verd = CLR_TC_MUNGED(skb->tc_verd); 400 } 401 if (ret == TC_ACT_REPEAT) 402 goto repeat; /* we need a ttl - JHS */ 403 if (ret != TC_ACT_PIPE) 404 goto exec_done; 405 } 406exec_done: 407 return ret; 408} 409EXPORT_SYMBOL(tcf_action_exec); 410 411int tcf_action_destroy(struct list_head *actions, int bind) 412{ 413 struct tc_action *a, *tmp; 414 int ret = 0; 415 416 list_for_each_entry_safe(a, tmp, actions, list) { 417 ret = __tcf_hash_release(a, bind, true); 418 if (ret == ACT_P_DELETED) 419 module_put(a->ops->owner); 420 else if (ret < 0) 421 return ret; 422 list_del(&a->list); 423 kfree(a); 424 } 425 return ret; 426} 427 428int 429tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 430{ 431 return a->ops->dump(skb, a, bind, ref); 432} 433 434int 435tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref) 436{ 437 int err = -EINVAL; 438 unsigned char *b = skb_tail_pointer(skb); 439 struct nlattr *nest; 440 441 if (nla_put_string(skb, TCA_KIND, a->ops->kind)) 442 goto nla_put_failure; 443 if (tcf_action_copy_stats(skb, a, 0)) 444 goto nla_put_failure; 445 nest = nla_nest_start(skb, TCA_OPTIONS); 446 if (nest == NULL) 447 goto nla_put_failure; 448 err = tcf_action_dump_old(skb, a, bind, ref); 449 if (err > 0) { 450 nla_nest_end(skb, nest); 451 return err; 452 } 453 454nla_put_failure: 455 nlmsg_trim(skb, b); 456 return -1; 457} 458EXPORT_SYMBOL(tcf_action_dump_1); 459 460int 461tcf_action_dump(struct sk_buff *skb, struct list_head *actions, int bind, int ref) 462{ 463 struct tc_action *a; 464 int err = -EINVAL; 465 struct nlattr *nest; 466 467 list_for_each_entry(a, actions, list) { 468 nest = nla_nest_start(skb, a->order); 469 if (nest == NULL) 470 goto nla_put_failure; 471 err = tcf_action_dump_1(skb, a, bind, ref); 472 if (err < 0) 473 goto errout; 474 nla_nest_end(skb, nest); 475 } 476 477 return 0; 478 479nla_put_failure: 480 err = -EINVAL; 481errout: 482 nla_nest_cancel(skb, nest); 483 return err; 484} 485 486struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla, 487 struct nlattr *est, char *name, int ovr, 488 int bind) 489{ 490 struct tc_action *a; 491 struct tc_action_ops *a_o; 492 char act_name[IFNAMSIZ]; 493 struct nlattr *tb[TCA_ACT_MAX + 1]; 494 struct nlattr *kind; 495 int err; 496 497 if (name == NULL) { 498 err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL); 499 if (err < 0) 500 goto err_out; 501 err = -EINVAL; 502 kind = tb[TCA_ACT_KIND]; 503 if (kind == NULL) 504 goto err_out; 505 if (nla_strlcpy(act_name, kind, IFNAMSIZ) >= IFNAMSIZ) 506 goto err_out; 507 } else { 508 err = -EINVAL; 509 if (strlcpy(act_name, name, IFNAMSIZ) >= IFNAMSIZ) 510 goto err_out; 511 } 512 513 a_o = tc_lookup_action_n(act_name); 514 if (a_o == NULL) { 515#ifdef CONFIG_MODULES 516 rtnl_unlock(); 517 request_module("act_%s", act_name); 518 rtnl_lock(); 519 520 a_o = tc_lookup_action_n(act_name); 521 522 /* We dropped the RTNL semaphore in order to 523 * perform the module load. So, even if we 524 * succeeded in loading the module we have to 525 * tell the caller to replay the request. We 526 * indicate this using -EAGAIN. 527 */ 528 if (a_o != NULL) { 529 err = -EAGAIN; 530 goto err_mod; 531 } 532#endif 533 err = -ENOENT; 534 goto err_out; 535 } 536 537 err = -ENOMEM; 538 a = kzalloc(sizeof(*a), GFP_KERNEL); 539 if (a == NULL) 540 goto err_mod; 541 542 a->ops = a_o; 543 INIT_LIST_HEAD(&a->list); 544 /* backward compatibility for policer */ 545 if (name == NULL) 546 err = a_o->init(net, tb[TCA_ACT_OPTIONS], est, a, ovr, bind); 547 else 548 err = a_o->init(net, nla, est, a, ovr, bind); 549 if (err < 0) 550 goto err_free; 551 552 /* module count goes up only when brand new policy is created 553 * if it exists and is only bound to in a_o->init() then 554 * ACT_P_CREATED is not returned (a zero is). 555 */ 556 if (err != ACT_P_CREATED) 557 module_put(a_o->owner); 558 559 return a; 560 561err_free: 562 kfree(a); 563err_mod: 564 module_put(a_o->owner); 565err_out: 566 return ERR_PTR(err); 567} 568 569int tcf_action_init(struct net *net, struct nlattr *nla, 570 struct nlattr *est, char *name, int ovr, 571 int bind, struct list_head *actions) 572{ 573 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; 574 struct tc_action *act; 575 int err; 576 int i; 577 578 err = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL); 579 if (err < 0) 580 return err; 581 582 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { 583 act = tcf_action_init_1(net, tb[i], est, name, ovr, bind); 584 if (IS_ERR(act)) { 585 err = PTR_ERR(act); 586 goto err; 587 } 588 act->order = i; 589 list_add_tail(&act->list, actions); 590 } 591 return 0; 592 593err: 594 tcf_action_destroy(actions, bind); 595 return err; 596} 597 598int tcf_action_copy_stats(struct sk_buff *skb, struct tc_action *a, 599 int compat_mode) 600{ 601 int err = 0; 602 struct gnet_dump d; 603 struct tcf_common *p = a->priv; 604 605 if (p == NULL) 606 goto errout; 607 608 /* compat_mode being true specifies a call that is supposed 609 * to add additional backward compatibility statistic TLVs. 610 */ 611 if (compat_mode) { 612 if (a->type == TCA_OLD_COMPAT) 613 err = gnet_stats_start_copy_compat(skb, 0, 614 TCA_STATS, TCA_XSTATS, &p->tcfc_lock, &d); 615 else 616 return 0; 617 } else 618 err = gnet_stats_start_copy(skb, TCA_ACT_STATS, 619 &p->tcfc_lock, &d); 620 621 if (err < 0) 622 goto errout; 623 624 if (gnet_stats_copy_basic(&d, NULL, &p->tcfc_bstats) < 0 || 625 gnet_stats_copy_rate_est(&d, &p->tcfc_bstats, 626 &p->tcfc_rate_est) < 0 || 627 gnet_stats_copy_queue(&d, NULL, 628 &p->tcfc_qstats, 629 p->tcfc_qstats.qlen) < 0) 630 goto errout; 631 632 if (gnet_stats_finish_copy(&d) < 0) 633 goto errout; 634 635 return 0; 636 637errout: 638 return -1; 639} 640 641static int 642tca_get_fill(struct sk_buff *skb, struct list_head *actions, u32 portid, u32 seq, 643 u16 flags, int event, int bind, int ref) 644{ 645 struct tcamsg *t; 646 struct nlmsghdr *nlh; 647 unsigned char *b = skb_tail_pointer(skb); 648 struct nlattr *nest; 649 650 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*t), flags); 651 if (!nlh) 652 goto out_nlmsg_trim; 653 t = nlmsg_data(nlh); 654 t->tca_family = AF_UNSPEC; 655 t->tca__pad1 = 0; 656 t->tca__pad2 = 0; 657 658 nest = nla_nest_start(skb, TCA_ACT_TAB); 659 if (nest == NULL) 660 goto out_nlmsg_trim; 661 662 if (tcf_action_dump(skb, actions, bind, ref) < 0) 663 goto out_nlmsg_trim; 664 665 nla_nest_end(skb, nest); 666 667 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 668 return skb->len; 669 670out_nlmsg_trim: 671 nlmsg_trim(skb, b); 672 return -1; 673} 674 675static int 676act_get_notify(struct net *net, u32 portid, struct nlmsghdr *n, 677 struct list_head *actions, int event) 678{ 679 struct sk_buff *skb; 680 681 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 682 if (!skb) 683 return -ENOBUFS; 684 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, event, 0, 0) <= 0) { 685 kfree_skb(skb); 686 return -EINVAL; 687 } 688 689 return rtnl_unicast(skb, net, portid); 690} 691 692static struct tc_action *create_a(int i) 693{ 694 struct tc_action *act; 695 696 act = kzalloc(sizeof(*act), GFP_KERNEL); 697 if (act == NULL) { 698 pr_debug("create_a: failed to alloc!\n"); 699 return NULL; 700 } 701 act->order = i; 702 INIT_LIST_HEAD(&act->list); 703 return act; 704} 705 706static struct tc_action * 707tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 portid) 708{ 709 struct nlattr *tb[TCA_ACT_MAX + 1]; 710 struct tc_action *a; 711 int index; 712 int err; 713 714 err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL); 715 if (err < 0) 716 goto err_out; 717 718 err = -EINVAL; 719 if (tb[TCA_ACT_INDEX] == NULL || 720 nla_len(tb[TCA_ACT_INDEX]) < sizeof(index)) 721 goto err_out; 722 index = nla_get_u32(tb[TCA_ACT_INDEX]); 723 724 err = -ENOMEM; 725 a = create_a(0); 726 if (a == NULL) 727 goto err_out; 728 729 err = -EINVAL; 730 a->ops = tc_lookup_action(tb[TCA_ACT_KIND]); 731 if (a->ops == NULL) /* could happen in batch of actions */ 732 goto err_free; 733 err = -ENOENT; 734 if (a->ops->lookup(a, index) == 0) 735 goto err_mod; 736 737 module_put(a->ops->owner); 738 return a; 739 740err_mod: 741 module_put(a->ops->owner); 742err_free: 743 kfree(a); 744err_out: 745 return ERR_PTR(err); 746} 747 748static void cleanup_a(struct list_head *actions) 749{ 750 struct tc_action *a, *tmp; 751 752 list_for_each_entry_safe(a, tmp, actions, list) { 753 list_del(&a->list); 754 kfree(a); 755 } 756} 757 758static int tca_action_flush(struct net *net, struct nlattr *nla, 759 struct nlmsghdr *n, u32 portid) 760{ 761 struct sk_buff *skb; 762 unsigned char *b; 763 struct nlmsghdr *nlh; 764 struct tcamsg *t; 765 struct netlink_callback dcb; 766 struct nlattr *nest; 767 struct nlattr *tb[TCA_ACT_MAX + 1]; 768 struct nlattr *kind; 769 struct tc_action a; 770 int err = -ENOMEM; 771 772 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 773 if (!skb) { 774 pr_debug("tca_action_flush: failed skb alloc\n"); 775 return err; 776 } 777 778 b = skb_tail_pointer(skb); 779 780 err = nla_parse_nested(tb, TCA_ACT_MAX, nla, NULL); 781 if (err < 0) 782 goto err_out; 783 784 err = -EINVAL; 785 kind = tb[TCA_ACT_KIND]; 786 memset(&a, 0, sizeof(struct tc_action)); 787 INIT_LIST_HEAD(&a.list); 788 a.ops = tc_lookup_action(kind); 789 if (a.ops == NULL) /*some idjot trying to flush unknown action */ 790 goto err_out; 791 792 nlh = nlmsg_put(skb, portid, n->nlmsg_seq, RTM_DELACTION, sizeof(*t), 0); 793 if (!nlh) 794 goto out_module_put; 795 t = nlmsg_data(nlh); 796 t->tca_family = AF_UNSPEC; 797 t->tca__pad1 = 0; 798 t->tca__pad2 = 0; 799 800 nest = nla_nest_start(skb, TCA_ACT_TAB); 801 if (nest == NULL) 802 goto out_module_put; 803 804 err = a.ops->walk(skb, &dcb, RTM_DELACTION, &a); 805 if (err < 0) 806 goto out_module_put; 807 if (err == 0) 808 goto noflush_out; 809 810 nla_nest_end(skb, nest); 811 812 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 813 nlh->nlmsg_flags |= NLM_F_ROOT; 814 module_put(a.ops->owner); 815 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 816 n->nlmsg_flags & NLM_F_ECHO); 817 if (err > 0) 818 return 0; 819 820 return err; 821 822out_module_put: 823 module_put(a.ops->owner); 824err_out: 825noflush_out: 826 kfree_skb(skb); 827 return err; 828} 829 830static int 831tcf_del_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions, 832 u32 portid) 833{ 834 int ret; 835 struct sk_buff *skb; 836 837 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 838 if (!skb) 839 return -ENOBUFS; 840 841 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, 0, RTM_DELACTION, 842 0, 1) <= 0) { 843 kfree_skb(skb); 844 return -EINVAL; 845 } 846 847 /* now do the delete */ 848 ret = tcf_action_destroy(actions, 0); 849 if (ret < 0) { 850 kfree_skb(skb); 851 return ret; 852 } 853 854 ret = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 855 n->nlmsg_flags & NLM_F_ECHO); 856 if (ret > 0) 857 return 0; 858 return ret; 859} 860 861static int 862tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n, 863 u32 portid, int event) 864{ 865 int i, ret; 866 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; 867 struct tc_action *act; 868 LIST_HEAD(actions); 869 870 ret = nla_parse_nested(tb, TCA_ACT_MAX_PRIO, nla, NULL); 871 if (ret < 0) 872 return ret; 873 874 if (event == RTM_DELACTION && n->nlmsg_flags & NLM_F_ROOT) { 875 if (tb[1] != NULL) 876 return tca_action_flush(net, tb[1], n, portid); 877 else 878 return -EINVAL; 879 } 880 881 for (i = 1; i <= TCA_ACT_MAX_PRIO && tb[i]; i++) { 882 act = tcf_action_get_1(tb[i], n, portid); 883 if (IS_ERR(act)) { 884 ret = PTR_ERR(act); 885 goto err; 886 } 887 act->order = i; 888 list_add_tail(&act->list, &actions); 889 } 890 891 if (event == RTM_GETACTION) 892 ret = act_get_notify(net, portid, n, &actions, event); 893 else { /* delete */ 894 ret = tcf_del_notify(net, n, &actions, portid); 895 if (ret) 896 goto err; 897 return ret; 898 } 899err: 900 cleanup_a(&actions); 901 return ret; 902} 903 904static int 905tcf_add_notify(struct net *net, struct nlmsghdr *n, struct list_head *actions, 906 u32 portid) 907{ 908 struct sk_buff *skb; 909 int err = 0; 910 911 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); 912 if (!skb) 913 return -ENOBUFS; 914 915 if (tca_get_fill(skb, actions, portid, n->nlmsg_seq, n->nlmsg_flags, 916 RTM_NEWACTION, 0, 0) <= 0) { 917 kfree_skb(skb); 918 return -EINVAL; 919 } 920 921 err = rtnetlink_send(skb, net, portid, RTNLGRP_TC, 922 n->nlmsg_flags & NLM_F_ECHO); 923 if (err > 0) 924 err = 0; 925 return err; 926} 927 928static int 929tcf_action_add(struct net *net, struct nlattr *nla, struct nlmsghdr *n, 930 u32 portid, int ovr) 931{ 932 int ret = 0; 933 LIST_HEAD(actions); 934 935 ret = tcf_action_init(net, nla, NULL, NULL, ovr, 0, &actions); 936 if (ret) 937 goto done; 938 939 /* dump then free all the actions after update; inserted policy 940 * stays intact 941 */ 942 ret = tcf_add_notify(net, n, &actions, portid); 943 cleanup_a(&actions); 944done: 945 return ret; 946} 947 948static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n) 949{ 950 struct net *net = sock_net(skb->sk); 951 struct nlattr *tca[TCA_ACT_MAX + 1]; 952 u32 portid = skb ? NETLINK_CB(skb).portid : 0; 953 int ret = 0, ovr = 0; 954 955 if ((n->nlmsg_type != RTM_GETACTION) && !netlink_capable(skb, CAP_NET_ADMIN)) 956 return -EPERM; 957 958 ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL); 959 if (ret < 0) 960 return ret; 961 962 if (tca[TCA_ACT_TAB] == NULL) { 963 pr_notice("tc_ctl_action: received NO action attribs\n"); 964 return -EINVAL; 965 } 966 967 /* n->nlmsg_flags & NLM_F_CREATE */ 968 switch (n->nlmsg_type) { 969 case RTM_NEWACTION: 970 /* we are going to assume all other flags 971 * imply create only if it doesn't exist 972 * Note that CREATE | EXCL implies that 973 * but since we want avoid ambiguity (eg when flags 974 * is zero) then just set this 975 */ 976 if (n->nlmsg_flags & NLM_F_REPLACE) 977 ovr = 1; 978replay: 979 ret = tcf_action_add(net, tca[TCA_ACT_TAB], n, portid, ovr); 980 if (ret == -EAGAIN) 981 goto replay; 982 break; 983 case RTM_DELACTION: 984 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n, 985 portid, RTM_DELACTION); 986 break; 987 case RTM_GETACTION: 988 ret = tca_action_gd(net, tca[TCA_ACT_TAB], n, 989 portid, RTM_GETACTION); 990 break; 991 default: 992 BUG(); 993 } 994 995 return ret; 996} 997 998static struct nlattr * 999find_dump_kind(const struct nlmsghdr *n) 1000{ 1001 struct nlattr *tb1, *tb2[TCA_ACT_MAX + 1]; 1002 struct nlattr *tb[TCA_ACT_MAX_PRIO + 1]; 1003 struct nlattr *nla[TCAA_MAX + 1]; 1004 struct nlattr *kind; 1005 1006 if (nlmsg_parse(n, sizeof(struct tcamsg), nla, TCAA_MAX, NULL) < 0) 1007 return NULL; 1008 tb1 = nla[TCA_ACT_TAB]; 1009 if (tb1 == NULL) 1010 return NULL; 1011 1012 if (nla_parse(tb, TCA_ACT_MAX_PRIO, nla_data(tb1), 1013 NLMSG_ALIGN(nla_len(tb1)), NULL) < 0) 1014 return NULL; 1015 1016 if (tb[1] == NULL) 1017 return NULL; 1018 if (nla_parse(tb2, TCA_ACT_MAX, nla_data(tb[1]), 1019 nla_len(tb[1]), NULL) < 0) 1020 return NULL; 1021 kind = tb2[TCA_ACT_KIND]; 1022 1023 return kind; 1024} 1025 1026static int 1027tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb) 1028{ 1029 struct nlmsghdr *nlh; 1030 unsigned char *b = skb_tail_pointer(skb); 1031 struct nlattr *nest; 1032 struct tc_action_ops *a_o; 1033 struct tc_action a; 1034 int ret = 0; 1035 struct tcamsg *t = (struct tcamsg *) nlmsg_data(cb->nlh); 1036 struct nlattr *kind = find_dump_kind(cb->nlh); 1037 1038 if (kind == NULL) { 1039 pr_info("tc_dump_action: action bad kind\n"); 1040 return 0; 1041 } 1042 1043 a_o = tc_lookup_action(kind); 1044 if (a_o == NULL) 1045 return 0; 1046 1047 memset(&a, 0, sizeof(struct tc_action)); 1048 a.ops = a_o; 1049 1050 nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 1051 cb->nlh->nlmsg_type, sizeof(*t), 0); 1052 if (!nlh) 1053 goto out_module_put; 1054 t = nlmsg_data(nlh); 1055 t->tca_family = AF_UNSPEC; 1056 t->tca__pad1 = 0; 1057 t->tca__pad2 = 0; 1058 1059 nest = nla_nest_start(skb, TCA_ACT_TAB); 1060 if (nest == NULL) 1061 goto out_module_put; 1062 1063 ret = a_o->walk(skb, cb, RTM_GETACTION, &a); 1064 if (ret < 0) 1065 goto out_module_put; 1066 1067 if (ret > 0) { 1068 nla_nest_end(skb, nest); 1069 ret = skb->len; 1070 } else 1071 nla_nest_cancel(skb, nest); 1072 1073 nlh->nlmsg_len = skb_tail_pointer(skb) - b; 1074 if (NETLINK_CB(cb->skb).portid && ret) 1075 nlh->nlmsg_flags |= NLM_F_MULTI; 1076 module_put(a_o->owner); 1077 return skb->len; 1078 1079out_module_put: 1080 module_put(a_o->owner); 1081 nlmsg_trim(skb, b); 1082 return skb->len; 1083} 1084 1085static int __init tc_action_init(void) 1086{ 1087 rtnl_register(PF_UNSPEC, RTM_NEWACTION, tc_ctl_action, NULL, NULL); 1088 rtnl_register(PF_UNSPEC, RTM_DELACTION, tc_ctl_action, NULL, NULL); 1089 rtnl_register(PF_UNSPEC, RTM_GETACTION, tc_ctl_action, tc_dump_action, 1090 NULL); 1091 1092 return 0; 1093} 1094 1095subsys_initcall(tc_action_init); 1096