1#include <linux/if.h>
2#include <linux/if_ether.h>
3#include <linux/if_link.h>
4#include <linux/netdevice.h>
5#include <linux/in.h>
6#include <linux/types.h>
7#include <linux/skbuff.h>
8#include <net/flow_keys.h>
9#include "enic_res.h"
10#include "enic_clsf.h"
11
12/* enic_addfltr_5t - Add ipv4 5tuple filter
13 *	@enic: enic struct of vnic
14 *	@keys: flow_keys of ipv4 5tuple
15 *	@rq: rq number to steer to
16 *
17 * This function returns filter_id(hardware_id) of the filter
18 * added. In case of error it returns an negative number.
19 */
20int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq)
21{
22	int res;
23	struct filter data;
24
25	switch (keys->ip_proto) {
26	case IPPROTO_TCP:
27		data.u.ipv4.protocol = PROTO_TCP;
28		break;
29	case IPPROTO_UDP:
30		data.u.ipv4.protocol = PROTO_UDP;
31		break;
32	default:
33		return -EPROTONOSUPPORT;
34	};
35	data.type = FILTER_IPV4_5TUPLE;
36	data.u.ipv4.src_addr = ntohl(keys->src);
37	data.u.ipv4.dst_addr = ntohl(keys->dst);
38	data.u.ipv4.src_port = ntohs(keys->port16[0]);
39	data.u.ipv4.dst_port = ntohs(keys->port16[1]);
40	data.u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
41
42	spin_lock_bh(&enic->devcmd_lock);
43	res = vnic_dev_classifier(enic->vdev, CLSF_ADD, &rq, &data);
44	spin_unlock_bh(&enic->devcmd_lock);
45	res = (res == 0) ? rq : res;
46
47	return res;
48}
49
50/* enic_delfltr - Delete clsf filter
51 *	@enic: enic struct of vnic
52 *	@filter_id: filter_is(hardware_id) of filter to be deleted
53 *
54 * This function returns zero in case of success, negative number incase of
55 * error.
56 */
57int enic_delfltr(struct enic *enic, u16 filter_id)
58{
59	int ret;
60
61	spin_lock_bh(&enic->devcmd_lock);
62	ret = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL);
63	spin_unlock_bh(&enic->devcmd_lock);
64
65	return ret;
66}
67
68/* enic_rfs_flw_tbl_init - initialize enic->rfs_h members
69 *	@enic: enic data
70 */
71void enic_rfs_flw_tbl_init(struct enic *enic)
72{
73	int i;
74
75	spin_lock_init(&enic->rfs_h.lock);
76	for (i = 0; i <= ENIC_RFS_FLW_MASK; i++)
77		INIT_HLIST_HEAD(&enic->rfs_h.ht_head[i]);
78	enic->rfs_h.max = enic->config.num_arfs;
79	enic->rfs_h.free = enic->rfs_h.max;
80	enic->rfs_h.toclean = 0;
81	enic_rfs_timer_start(enic);
82}
83
84void enic_rfs_flw_tbl_free(struct enic *enic)
85{
86	int i;
87
88	enic_rfs_timer_stop(enic);
89	spin_lock_bh(&enic->rfs_h.lock);
90	enic->rfs_h.free = 0;
91	for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) {
92		struct hlist_head *hhead;
93		struct hlist_node *tmp;
94		struct enic_rfs_fltr_node *n;
95
96		hhead = &enic->rfs_h.ht_head[i];
97		hlist_for_each_entry_safe(n, tmp, hhead, node) {
98			enic_delfltr(enic, n->fltr_id);
99			hlist_del(&n->node);
100			kfree(n);
101		}
102	}
103	spin_unlock_bh(&enic->rfs_h.lock);
104}
105
106struct enic_rfs_fltr_node *htbl_fltr_search(struct enic *enic, u16 fltr_id)
107{
108	int i;
109
110	for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) {
111		struct hlist_head *hhead;
112		struct hlist_node *tmp;
113		struct enic_rfs_fltr_node *n;
114
115		hhead = &enic->rfs_h.ht_head[i];
116		hlist_for_each_entry_safe(n, tmp, hhead, node)
117			if (n->fltr_id == fltr_id)
118				return n;
119	}
120
121	return NULL;
122}
123
124#ifdef CONFIG_RFS_ACCEL
125void enic_flow_may_expire(unsigned long data)
126{
127	struct enic *enic = (struct enic *)data;
128	bool res;
129	int j;
130
131	spin_lock_bh(&enic->rfs_h.lock);
132	for (j = 0; j < ENIC_CLSF_EXPIRE_COUNT; j++) {
133		struct hlist_head *hhead;
134		struct hlist_node *tmp;
135		struct enic_rfs_fltr_node *n;
136
137		hhead = &enic->rfs_h.ht_head[enic->rfs_h.toclean++];
138		hlist_for_each_entry_safe(n, tmp, hhead, node) {
139			res = rps_may_expire_flow(enic->netdev, n->rq_id,
140						  n->flow_id, n->fltr_id);
141			if (res) {
142				res = enic_delfltr(enic, n->fltr_id);
143				if (unlikely(res))
144					continue;
145				hlist_del(&n->node);
146				kfree(n);
147				enic->rfs_h.free++;
148			}
149		}
150	}
151	spin_unlock_bh(&enic->rfs_h.lock);
152	mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4);
153}
154
155static struct enic_rfs_fltr_node *htbl_key_search(struct hlist_head *h,
156						  struct flow_keys *k)
157{
158	struct enic_rfs_fltr_node *tpos;
159
160	hlist_for_each_entry(tpos, h, node)
161		if (tpos->keys.src == k->src &&
162		    tpos->keys.dst == k->dst &&
163		    tpos->keys.ports == k->ports &&
164		    tpos->keys.ip_proto == k->ip_proto &&
165		    tpos->keys.n_proto == k->n_proto)
166			return tpos;
167	return NULL;
168}
169
170int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
171		       u16 rxq_index, u32 flow_id)
172{
173	struct flow_keys keys;
174	struct enic_rfs_fltr_node *n;
175	struct enic *enic;
176	u16 tbl_idx;
177	int res, i;
178
179	enic = netdev_priv(dev);
180	res = skb_flow_dissect(skb, &keys);
181	if (!res || keys.n_proto != htons(ETH_P_IP) ||
182	    (keys.ip_proto != IPPROTO_TCP && keys.ip_proto != IPPROTO_UDP))
183		return -EPROTONOSUPPORT;
184
185	tbl_idx = skb_get_hash_raw(skb) & ENIC_RFS_FLW_MASK;
186	spin_lock_bh(&enic->rfs_h.lock);
187	n = htbl_key_search(&enic->rfs_h.ht_head[tbl_idx], &keys);
188
189	if (n) { /* entry already present  */
190		if (rxq_index == n->rq_id) {
191			res = -EEXIST;
192			goto ret_unlock;
193		}
194
195		/* desired rq changed for the flow, we need to delete
196		 * old fltr and add new one
197		 *
198		 * The moment we delete the fltr, the upcoming pkts
199		 * are put it default rq based on rss. When we add
200		 * new filter, upcoming pkts are put in desired queue.
201		 * This could cause ooo pkts.
202		 *
203		 * Lets 1st try adding new fltr and then del old one.
204		 */
205		i = --enic->rfs_h.free;
206		/* clsf tbl is full, we have to del old fltr first*/
207		if (unlikely(i < 0)) {
208			enic->rfs_h.free++;
209			res = enic_delfltr(enic, n->fltr_id);
210			if (unlikely(res < 0))
211				goto ret_unlock;
212			res = enic_addfltr_5t(enic, &keys, rxq_index);
213			if (res < 0) {
214				hlist_del(&n->node);
215				enic->rfs_h.free++;
216				goto ret_unlock;
217			}
218		/* add new fltr 1st then del old fltr */
219		} else {
220			int ret;
221
222			res = enic_addfltr_5t(enic, &keys, rxq_index);
223			if (res < 0) {
224				enic->rfs_h.free++;
225				goto ret_unlock;
226			}
227			ret = enic_delfltr(enic, n->fltr_id);
228			/* deleting old fltr failed. Add old fltr to list.
229			 * enic_flow_may_expire() will try to delete it later.
230			 */
231			if (unlikely(ret < 0)) {
232				struct enic_rfs_fltr_node *d;
233				struct hlist_head *head;
234
235				head = &enic->rfs_h.ht_head[tbl_idx];
236				d = kmalloc(sizeof(*d), GFP_ATOMIC);
237				if (d) {
238					d->fltr_id = n->fltr_id;
239					INIT_HLIST_NODE(&d->node);
240					hlist_add_head(&d->node, head);
241				}
242			} else {
243				enic->rfs_h.free++;
244			}
245		}
246		n->rq_id = rxq_index;
247		n->fltr_id = res;
248		n->flow_id = flow_id;
249	/* entry not present */
250	} else {
251		i = --enic->rfs_h.free;
252		if (i <= 0) {
253			enic->rfs_h.free++;
254			res = -EBUSY;
255			goto ret_unlock;
256		}
257
258		n = kmalloc(sizeof(*n), GFP_ATOMIC);
259		if (!n) {
260			res = -ENOMEM;
261			enic->rfs_h.free++;
262			goto ret_unlock;
263		}
264
265		res = enic_addfltr_5t(enic, &keys, rxq_index);
266		if (res < 0) {
267			kfree(n);
268			enic->rfs_h.free++;
269			goto ret_unlock;
270		}
271		n->rq_id = rxq_index;
272		n->fltr_id = res;
273		n->flow_id = flow_id;
274		n->keys = keys;
275		INIT_HLIST_NODE(&n->node);
276		hlist_add_head(&n->node, &enic->rfs_h.ht_head[tbl_idx]);
277	}
278
279ret_unlock:
280	spin_unlock_bh(&enic->rfs_h.lock);
281	return res;
282}
283
284#endif /* CONFIG_RFS_ACCEL */
285