1/* Intel Ethernet Switch Host Interface Driver
2 * Copyright(c) 2013 - 2015 Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11 * more details.
12 *
13 * The full GNU General Public License is included in this distribution in
14 * the file called "COPYING".
15 *
16 * Contact Information:
17 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
18 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
19 */
20
21#include "fm10k.h"
22#include "fm10k_vf.h"
23#include "fm10k_pf.h"
24
25static s32 fm10k_iov_msg_error(struct fm10k_hw *hw, u32 **results,
26			       struct fm10k_mbx_info *mbx)
27{
28	struct fm10k_vf_info *vf_info = (struct fm10k_vf_info *)mbx;
29	struct fm10k_intfc *interface = hw->back;
30	struct pci_dev *pdev = interface->pdev;
31
32	dev_err(&pdev->dev, "Unknown message ID %u on VF %d\n",
33		**results & FM10K_TLV_ID_MASK, vf_info->vf_idx);
34
35	return fm10k_tlv_msg_error(hw, results, mbx);
36}
37
38static const struct fm10k_msg_data iov_mbx_data[] = {
39	FM10K_TLV_MSG_TEST_HANDLER(fm10k_tlv_msg_test),
40	FM10K_VF_MSG_MSIX_HANDLER(fm10k_iov_msg_msix_pf),
41	FM10K_VF_MSG_MAC_VLAN_HANDLER(fm10k_iov_msg_mac_vlan_pf),
42	FM10K_VF_MSG_LPORT_STATE_HANDLER(fm10k_iov_msg_lport_state_pf),
43	FM10K_TLV_MSG_ERROR_HANDLER(fm10k_iov_msg_error),
44};
45
46s32 fm10k_iov_event(struct fm10k_intfc *interface)
47{
48	struct fm10k_hw *hw = &interface->hw;
49	struct fm10k_iov_data *iov_data;
50	s64 vflre;
51	int i;
52
53	/* if there is no iov_data then there is no mailboxes to process */
54	if (!ACCESS_ONCE(interface->iov_data))
55		return 0;
56
57	rcu_read_lock();
58
59	iov_data = interface->iov_data;
60
61	/* check again now that we are in the RCU block */
62	if (!iov_data)
63		goto read_unlock;
64
65	if (!(fm10k_read_reg(hw, FM10K_EICR) & FM10K_EICR_VFLR))
66		goto read_unlock;
67
68	/* read VFLRE to determine if any VFs have been reset */
69	do {
70		vflre = fm10k_read_reg(hw, FM10K_PFVFLRE(0));
71		vflre <<= 32;
72		vflre |= fm10k_read_reg(hw, FM10K_PFVFLRE(1));
73		vflre = (vflre << 32) | (vflre >> 32);
74		vflre |= fm10k_read_reg(hw, FM10K_PFVFLRE(0));
75
76		i = iov_data->num_vfs;
77
78		for (vflre <<= 64 - i; vflre && i--; vflre += vflre) {
79			struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
80
81			if (vflre >= 0)
82				continue;
83
84			hw->iov.ops.reset_resources(hw, vf_info);
85			vf_info->mbx.ops.connect(hw, &vf_info->mbx);
86		}
87	} while (i != iov_data->num_vfs);
88
89read_unlock:
90	rcu_read_unlock();
91
92	return 0;
93}
94
95s32 fm10k_iov_mbx(struct fm10k_intfc *interface)
96{
97	struct fm10k_hw *hw = &interface->hw;
98	struct fm10k_iov_data *iov_data;
99	int i;
100
101	/* if there is no iov_data then there is no mailboxes to process */
102	if (!ACCESS_ONCE(interface->iov_data))
103		return 0;
104
105	rcu_read_lock();
106
107	iov_data = interface->iov_data;
108
109	/* check again now that we are in the RCU block */
110	if (!iov_data)
111		goto read_unlock;
112
113	/* lock the mailbox for transmit and receive */
114	fm10k_mbx_lock(interface);
115
116	/* Most VF messages sent to the PF cause the PF to respond by
117	 * requesting from the SM mailbox. This means that too many VF
118	 * messages processed at once could cause a mailbox timeout on the PF.
119	 * To prevent this, store a pointer to the next VF mbx to process. Use
120	 * that as the start of the loop so that we don't starve whichever VF
121	 * got ignored on the previous run.
122	 */
123process_mbx:
124	for (i = iov_data->next_vf_mbx ? : iov_data->num_vfs; i--;) {
125		struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
126		struct fm10k_mbx_info *mbx = &vf_info->mbx;
127		u16 glort = vf_info->glort;
128
129		/* verify port mapping is valid, if not reset port */
130		if (vf_info->vf_flags && !fm10k_glort_valid_pf(hw, glort))
131			hw->iov.ops.reset_lport(hw, vf_info);
132
133		/* reset VFs that have mailbox timed out */
134		if (!mbx->timeout) {
135			hw->iov.ops.reset_resources(hw, vf_info);
136			mbx->ops.connect(hw, mbx);
137		}
138
139		/* guarantee we have free space in the SM mailbox */
140		if (!hw->mbx.ops.tx_ready(&hw->mbx, FM10K_VFMBX_MSG_MTU))
141			break;
142
143		/* cleanup mailbox and process received messages */
144		mbx->ops.process(hw, mbx);
145	}
146
147	/* if we stopped processing mailboxes early, update next_vf_mbx.
148	 * Otherwise, reset next_vf_mbx, and restart loop so that we process
149	 * the remaining mailboxes we skipped at the start.
150	 */
151	if (i >= 0) {
152		iov_data->next_vf_mbx = i + 1;
153	} else if (iov_data->next_vf_mbx) {
154		iov_data->next_vf_mbx = 0;
155		goto process_mbx;
156	}
157
158	/* free the lock */
159	fm10k_mbx_unlock(interface);
160
161read_unlock:
162	rcu_read_unlock();
163
164	return 0;
165}
166
167void fm10k_iov_suspend(struct pci_dev *pdev)
168{
169	struct fm10k_intfc *interface = pci_get_drvdata(pdev);
170	struct fm10k_iov_data *iov_data = interface->iov_data;
171	struct fm10k_hw *hw = &interface->hw;
172	int num_vfs, i;
173
174	/* pull out num_vfs from iov_data */
175	num_vfs = iov_data ? iov_data->num_vfs : 0;
176
177	/* shut down queue mapping for VFs */
178	fm10k_write_reg(hw, FM10K_DGLORTMAP(fm10k_dglort_vf_rss),
179			FM10K_DGLORTMAP_NONE);
180
181	/* Stop any active VFs and reset their resources */
182	for (i = 0; i < num_vfs; i++) {
183		struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
184
185		hw->iov.ops.reset_resources(hw, vf_info);
186		hw->iov.ops.reset_lport(hw, vf_info);
187	}
188}
189
190int fm10k_iov_resume(struct pci_dev *pdev)
191{
192	struct fm10k_intfc *interface = pci_get_drvdata(pdev);
193	struct fm10k_iov_data *iov_data = interface->iov_data;
194	struct fm10k_dglort_cfg dglort = { 0 };
195	struct fm10k_hw *hw = &interface->hw;
196	int num_vfs, i;
197
198	/* pull out num_vfs from iov_data */
199	num_vfs = iov_data ? iov_data->num_vfs : 0;
200
201	/* return error if iov_data is not already populated */
202	if (!iov_data)
203		return -ENOMEM;
204
205	/* allocate hardware resources for the VFs */
206	hw->iov.ops.assign_resources(hw, num_vfs, num_vfs);
207
208	/* configure DGLORT mapping for RSS */
209	dglort.glort = hw->mac.dglort_map & FM10K_DGLORTMAP_NONE;
210	dglort.idx = fm10k_dglort_vf_rss;
211	dglort.inner_rss = 1;
212	dglort.rss_l = fls(fm10k_queues_per_pool(hw) - 1);
213	dglort.queue_b = fm10k_vf_queue_index(hw, 0);
214	dglort.vsi_l = fls(hw->iov.total_vfs - 1);
215	dglort.vsi_b = 1;
216
217	hw->mac.ops.configure_dglort_map(hw, &dglort);
218
219	/* assign resources to the device */
220	for (i = 0; i < num_vfs; i++) {
221		struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
222
223		/* allocate all but the last GLORT to the VFs */
224		if (i == ((~hw->mac.dglort_map) >> FM10K_DGLORTMAP_MASK_SHIFT))
225			break;
226
227		/* assign GLORT to VF, and restrict it to multicast */
228		hw->iov.ops.set_lport(hw, vf_info, i,
229				      FM10K_VF_FLAG_MULTI_CAPABLE);
230
231		/* assign our default vid to the VF following reset */
232		vf_info->sw_vid = hw->mac.default_vid;
233
234		/* mailbox is disconnected so we don't send a message */
235		hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
236
237		/* now we are ready so we can connect */
238		vf_info->mbx.ops.connect(hw, &vf_info->mbx);
239	}
240
241	return 0;
242}
243
244s32 fm10k_iov_update_pvid(struct fm10k_intfc *interface, u16 glort, u16 pvid)
245{
246	struct fm10k_iov_data *iov_data = interface->iov_data;
247	struct fm10k_hw *hw = &interface->hw;
248	struct fm10k_vf_info *vf_info;
249	u16 vf_idx = (glort - hw->mac.dglort_map) & FM10K_DGLORTMAP_NONE;
250
251	/* no IOV support, not our message to process */
252	if (!iov_data)
253		return FM10K_ERR_PARAM;
254
255	/* glort outside our range, not our message to process */
256	if (vf_idx >= iov_data->num_vfs)
257		return FM10K_ERR_PARAM;
258
259	/* determine if an update has occurred and if so notify the VF */
260	vf_info = &iov_data->vf_info[vf_idx];
261	if (vf_info->sw_vid != pvid) {
262		vf_info->sw_vid = pvid;
263		hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
264	}
265
266	return 0;
267}
268
269static void fm10k_iov_free_data(struct pci_dev *pdev)
270{
271	struct fm10k_intfc *interface = pci_get_drvdata(pdev);
272
273	if (!interface->iov_data)
274		return;
275
276	/* reclaim hardware resources */
277	fm10k_iov_suspend(pdev);
278
279	/* drop iov_data from interface */
280	kfree_rcu(interface->iov_data, rcu);
281	interface->iov_data = NULL;
282}
283
284static s32 fm10k_iov_alloc_data(struct pci_dev *pdev, int num_vfs)
285{
286	struct fm10k_intfc *interface = pci_get_drvdata(pdev);
287	struct fm10k_iov_data *iov_data = interface->iov_data;
288	struct fm10k_hw *hw = &interface->hw;
289	size_t size;
290	int i, err;
291
292	/* return error if iov_data is already populated */
293	if (iov_data)
294		return -EBUSY;
295
296	/* The PF should always be able to assign resources */
297	if (!hw->iov.ops.assign_resources)
298		return -ENODEV;
299
300	/* nothing to do if no VFs are requested */
301	if (!num_vfs)
302		return 0;
303
304	/* allocate memory for VF storage */
305	size = offsetof(struct fm10k_iov_data, vf_info[num_vfs]);
306	iov_data = kzalloc(size, GFP_KERNEL);
307	if (!iov_data)
308		return -ENOMEM;
309
310	/* record number of VFs */
311	iov_data->num_vfs = num_vfs;
312
313	/* loop through vf_info structures initializing each entry */
314	for (i = 0; i < num_vfs; i++) {
315		struct fm10k_vf_info *vf_info = &iov_data->vf_info[i];
316
317		/* Record VF VSI value */
318		vf_info->vsi = i + 1;
319		vf_info->vf_idx = i;
320
321		/* initialize mailbox memory */
322		err = fm10k_pfvf_mbx_init(hw, &vf_info->mbx, iov_mbx_data, i);
323		if (err) {
324			dev_err(&pdev->dev,
325				"Unable to initialize SR-IOV mailbox\n");
326			kfree(iov_data);
327			return err;
328		}
329	}
330
331	/* assign iov_data to interface */
332	interface->iov_data = iov_data;
333
334	/* allocate hardware resources for the VFs */
335	fm10k_iov_resume(pdev);
336
337	return 0;
338}
339
340void fm10k_iov_disable(struct pci_dev *pdev)
341{
342	if (pci_num_vf(pdev) && pci_vfs_assigned(pdev))
343		dev_err(&pdev->dev,
344			"Cannot disable SR-IOV while VFs are assigned\n");
345	else
346		pci_disable_sriov(pdev);
347
348	fm10k_iov_free_data(pdev);
349}
350
351static void fm10k_disable_aer_comp_abort(struct pci_dev *pdev)
352{
353	u32 err_sev;
354	int pos;
355
356	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
357	if (!pos)
358		return;
359
360	pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &err_sev);
361	err_sev &= ~PCI_ERR_UNC_COMP_ABORT;
362	pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, err_sev);
363}
364
365int fm10k_iov_configure(struct pci_dev *pdev, int num_vfs)
366{
367	int current_vfs = pci_num_vf(pdev);
368	int err = 0;
369
370	if (current_vfs && pci_vfs_assigned(pdev)) {
371		dev_err(&pdev->dev,
372			"Cannot modify SR-IOV while VFs are assigned\n");
373		num_vfs = current_vfs;
374	} else {
375		pci_disable_sriov(pdev);
376		fm10k_iov_free_data(pdev);
377	}
378
379	/* allocate resources for the VFs */
380	err = fm10k_iov_alloc_data(pdev, num_vfs);
381	if (err)
382		return err;
383
384	/* allocate VFs if not already allocated */
385	if (num_vfs && (num_vfs != current_vfs)) {
386		/* Disable completer abort error reporting as
387		 * the VFs can trigger this any time they read a queue
388		 * that they don't own.
389		 */
390		fm10k_disable_aer_comp_abort(pdev);
391
392		err = pci_enable_sriov(pdev, num_vfs);
393		if (err) {
394			dev_err(&pdev->dev,
395				"Enable PCI SR-IOV failed: %d\n", err);
396			return err;
397		}
398	}
399
400	return num_vfs;
401}
402
403int fm10k_ndo_set_vf_mac(struct net_device *netdev, int vf_idx, u8 *mac)
404{
405	struct fm10k_intfc *interface = netdev_priv(netdev);
406	struct fm10k_iov_data *iov_data = interface->iov_data;
407	struct fm10k_hw *hw = &interface->hw;
408	struct fm10k_vf_info *vf_info;
409
410	/* verify SR-IOV is active and that vf idx is valid */
411	if (!iov_data || vf_idx >= iov_data->num_vfs)
412		return -EINVAL;
413
414	/* verify MAC addr is valid */
415	if (!is_zero_ether_addr(mac) && !is_valid_ether_addr(mac))
416		return -EINVAL;
417
418	/* record new MAC address */
419	vf_info = &iov_data->vf_info[vf_idx];
420	ether_addr_copy(vf_info->mac, mac);
421
422	/* assigning the MAC will send a mailbox message so lock is needed */
423	fm10k_mbx_lock(interface);
424
425	/* assign MAC address to VF */
426	hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
427
428	fm10k_mbx_unlock(interface);
429
430	return 0;
431}
432
433int fm10k_ndo_set_vf_vlan(struct net_device *netdev, int vf_idx, u16 vid,
434			  u8 qos)
435{
436	struct fm10k_intfc *interface = netdev_priv(netdev);
437	struct fm10k_iov_data *iov_data = interface->iov_data;
438	struct fm10k_hw *hw = &interface->hw;
439	struct fm10k_vf_info *vf_info;
440
441	/* verify SR-IOV is active and that vf idx is valid */
442	if (!iov_data || vf_idx >= iov_data->num_vfs)
443		return -EINVAL;
444
445	/* QOS is unsupported and VLAN IDs accepted range 0-4094 */
446	if (qos || (vid > (VLAN_VID_MASK - 1)))
447		return -EINVAL;
448
449	vf_info = &iov_data->vf_info[vf_idx];
450
451	/* exit if there is nothing to do */
452	if (vf_info->pf_vid == vid)
453		return 0;
454
455	/* record default VLAN ID for VF */
456	vf_info->pf_vid = vid;
457
458	/* assigning the VLAN will send a mailbox message so lock is needed */
459	fm10k_mbx_lock(interface);
460
461	/* Clear the VLAN table for the VF */
462	hw->mac.ops.update_vlan(hw, FM10K_VLAN_ALL, vf_info->vsi, false);
463
464	/* Update VF assignment and trigger reset */
465	hw->iov.ops.assign_default_mac_vlan(hw, vf_info);
466
467	fm10k_mbx_unlock(interface);
468
469	return 0;
470}
471
472int fm10k_ndo_set_vf_bw(struct net_device *netdev, int vf_idx,
473			int __always_unused unused, int rate)
474{
475	struct fm10k_intfc *interface = netdev_priv(netdev);
476	struct fm10k_iov_data *iov_data = interface->iov_data;
477	struct fm10k_hw *hw = &interface->hw;
478
479	/* verify SR-IOV is active and that vf idx is valid */
480	if (!iov_data || vf_idx >= iov_data->num_vfs)
481		return -EINVAL;
482
483	/* rate limit cannot be less than 10Mbs or greater than link speed */
484	if (rate && ((rate < FM10K_VF_TC_MIN) || rate > FM10K_VF_TC_MAX))
485		return -EINVAL;
486
487	/* store values */
488	iov_data->vf_info[vf_idx].rate = rate;
489
490	/* update hardware configuration */
491	hw->iov.ops.configure_tc(hw, vf_idx, rate);
492
493	return 0;
494}
495
496int fm10k_ndo_get_vf_config(struct net_device *netdev,
497			    int vf_idx, struct ifla_vf_info *ivi)
498{
499	struct fm10k_intfc *interface = netdev_priv(netdev);
500	struct fm10k_iov_data *iov_data = interface->iov_data;
501	struct fm10k_vf_info *vf_info;
502
503	/* verify SR-IOV is active and that vf idx is valid */
504	if (!iov_data || vf_idx >= iov_data->num_vfs)
505		return -EINVAL;
506
507	vf_info = &iov_data->vf_info[vf_idx];
508
509	ivi->vf = vf_idx;
510	ivi->max_tx_rate = vf_info->rate;
511	ivi->min_tx_rate = 0;
512	ether_addr_copy(ivi->mac, vf_info->mac);
513	ivi->vlan = vf_info->pf_vid;
514	ivi->qos = 0;
515
516	return 0;
517}
518