Searched refs:pf (Results 1 - 200 of 304) sorted by relevance

12

/linux-4.1.27/drivers/block/paride/
H A Dpf.c2 pf.c (c) 1997-8 Grant R. Guenther <grant@torque.net>
12 The behaviour of the pf driver can be altered by setting
70 (default "pf").
91 pf.drive0
92 pf.drive1
93 pf.drive2
94 pf.drive3
95 pf.cluster
96 pf.nice
98 In addition, you can use the parameter pf.disable to disable
118 #define PF_NAME "pf"
245 static int pf_identify(struct pf_unit *pf);
246 static void pf_lock(struct pf_unit *pf, int func);
247 static void pf_eject(struct pf_unit *pf);
281 struct pf_unit *pf; pf_init_units() local
285 for (unit = 0, pf = units; unit < PF_UNITS; unit++, pf++) { pf_init_units()
289 pf->disk = disk; pf_init_units()
290 pf->pi = &pf->pia; pf_init_units()
291 pf->media_status = PF_NM; pf_init_units()
292 pf->drive = (*drives[unit])[D_SLV]; pf_init_units()
293 pf->lun = (*drives[unit])[D_LUN]; pf_init_units()
294 snprintf(pf->name, PF_NAMELEN, "%s%d", name, unit); pf_init_units()
297 strcpy(disk->disk_name, pf->name); pf_init_units()
306 struct pf_unit *pf = bdev->bd_disk->private_data; pf_open() local
310 pf_identify(pf); pf_open()
313 if (pf->media_status == PF_NM) pf_open()
317 if ((pf->media_status == PF_RO) && (mode & FMODE_WRITE)) pf_open()
321 pf->access++; pf_open()
322 if (pf->removable) pf_open()
323 pf_lock(pf, 1); pf_open()
331 struct pf_unit *pf = bdev->bd_disk->private_data; pf_getgeo() local
332 sector_t capacity = get_capacity(pf->disk); pf_getgeo()
349 struct pf_unit *pf = bdev->bd_disk->private_data; pf_ioctl() local
354 if (pf->access != 1) pf_ioctl()
357 pf_eject(pf); pf_ioctl()
365 struct pf_unit *pf = disk->private_data; pf_release() local
368 if (pf->access <= 0) { pf_release()
374 pf->access--; pf_release()
376 if (!pf->access && pf->removable) pf_release()
377 pf_lock(pf, 0); pf_release()
387 static inline int status_reg(struct pf_unit *pf) status_reg() argument
389 return pi_read_regr(pf->pi, 1, 6); status_reg()
392 static inline int read_reg(struct pf_unit *pf, int reg) read_reg() argument
394 return pi_read_regr(pf->pi, 0, reg); read_reg()
397 static inline void write_reg(struct pf_unit *pf, int reg, int val) write_reg() argument
399 pi_write_regr(pf->pi, 0, reg, val); write_reg()
402 static int pf_wait(struct pf_unit *pf, int go, int stop, char *fun, char *msg) pf_wait() argument
407 while ((((r = status_reg(pf)) & go) || (stop && (!(r & stop)))) pf_wait()
412 s = read_reg(pf, 7); pf_wait()
413 e = read_reg(pf, 1); pf_wait()
414 p = read_reg(pf, 2); pf_wait()
420 pf->name, fun, msg, r, s, e, j, p); pf_wait()
426 static int pf_command(struct pf_unit *pf, char *cmd, int dlen, char *fun) pf_command() argument
428 pi_connect(pf->pi); pf_command()
430 write_reg(pf, 6, 0xa0+0x10*pf->drive); pf_command()
432 if (pf_wait(pf, STAT_BUSY | STAT_DRQ, 0, fun, "before command")) { pf_command()
433 pi_disconnect(pf->pi); pf_command()
437 write_reg(pf, 4, dlen % 256); pf_command()
438 write_reg(pf, 5, dlen / 256); pf_command()
439 write_reg(pf, 7, 0xa0); /* ATAPI packet command */ pf_command()
441 if (pf_wait(pf, STAT_BUSY, STAT_DRQ, fun, "command DRQ")) { pf_command()
442 pi_disconnect(pf->pi); pf_command()
446 if (read_reg(pf, 2) != 1) { pf_command()
447 printk("%s: %s: command phase error\n", pf->name, fun); pf_command()
448 pi_disconnect(pf->pi); pf_command()
452 pi_write_block(pf->pi, cmd, 12); pf_command()
457 static int pf_completion(struct pf_unit *pf, char *buf, char *fun) pf_completion() argument
461 r = pf_wait(pf, STAT_BUSY, STAT_DRQ | STAT_READY | STAT_ERR, pf_completion()
464 if ((read_reg(pf, 2) & 2) && (read_reg(pf, 7) & STAT_DRQ)) { pf_completion()
465 n = (((read_reg(pf, 4) + 256 * read_reg(pf, 5)) + pf_completion()
467 pi_read_block(pf->pi, buf, n); pf_completion()
470 s = pf_wait(pf, STAT_BUSY, STAT_READY | STAT_ERR, fun, "data done"); pf_completion()
472 pi_disconnect(pf->pi); pf_completion()
477 static void pf_req_sense(struct pf_unit *pf, int quiet) pf_req_sense() argument
480 { ATAPI_REQ_SENSE, pf->lun << 5, 0, 0, 16, 0, 0, 0, 0, 0, 0, 0 }; pf_req_sense()
484 r = pf_command(pf, rs_cmd, 16, "Request sense"); pf_req_sense()
487 pf_completion(pf, buf, "Request sense"); pf_req_sense()
491 pf->name, buf[2] & 0xf, buf[12], buf[13]); pf_req_sense()
494 static int pf_atapi(struct pf_unit *pf, char *cmd, int dlen, char *buf, char *fun) pf_atapi() argument
498 r = pf_command(pf, cmd, dlen, fun); pf_atapi()
501 r = pf_completion(pf, buf, fun); pf_atapi()
503 pf_req_sense(pf, !fun); pf_atapi()
508 static void pf_lock(struct pf_unit *pf, int func) pf_lock() argument
510 char lo_cmd[12] = { ATAPI_LOCK, pf->lun << 5, 0, 0, func, 0, 0, 0, 0, 0, 0, 0 }; pf_lock()
512 pf_atapi(pf, lo_cmd, 0, pf_scratch, func ? "lock" : "unlock"); pf_lock()
515 static void pf_eject(struct pf_unit *pf) pf_eject() argument
517 char ej_cmd[12] = { ATAPI_DOOR, pf->lun << 5, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0 }; pf_eject()
519 pf_lock(pf, 0); pf_eject()
520 pf_atapi(pf, ej_cmd, 0, pf_scratch, "eject"); pf_eject()
535 static int pf_reset(struct pf_unit *pf) pf_reset() argument
540 pi_connect(pf->pi); pf_reset()
541 write_reg(pf, 6, 0xa0+0x10*pf->drive); pf_reset()
542 write_reg(pf, 7, 8); pf_reset()
547 while ((k++ < PF_RESET_TMO) && (status_reg(pf) & STAT_BUSY)) pf_reset()
552 flg &= (read_reg(pf, i + 1) == expect[i]); pf_reset()
555 printk("%s: Reset (%d) signature = ", pf->name, k); pf_reset()
557 printk("%3x", read_reg(pf, i + 1)); pf_reset()
563 pi_disconnect(pf->pi); pf_reset()
567 static void pf_mode_sense(struct pf_unit *pf) pf_mode_sense() argument
570 { ATAPI_MODE_SENSE, pf->lun << 5, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0 }; pf_mode_sense()
573 pf_atapi(pf, ms_cmd, 8, buf, "mode sense"); pf_mode_sense()
574 pf->media_status = PF_RW; pf_mode_sense()
576 pf->media_status = PF_RO; pf_mode_sense()
603 static void pf_get_capacity(struct pf_unit *pf) pf_get_capacity() argument
605 char rc_cmd[12] = { ATAPI_CAPACITY, pf->lun << 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; pf_get_capacity()
609 if (pf_atapi(pf, rc_cmd, 8, buf, "get capacity")) { pf_get_capacity()
610 pf->media_status = PF_NM; pf_get_capacity()
613 set_capacity(pf->disk, xl(buf, 0) + 1); pf_get_capacity()
616 set_capacity(pf->disk, 0); pf_get_capacity()
620 pf->name, pf->drive, pf->lun, bs); pf_get_capacity()
624 static int pf_identify(struct pf_unit *pf) pf_identify() argument
630 { ATAPI_IDENTIFY, pf->lun << 5, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0 }; pf_identify()
633 s = pf_atapi(pf, id_cmd, 36, buf, "identify"); pf_identify()
641 pf->name, pf->drive, pf->lun, dt); pf_identify()
648 pf->removable = (buf[1] & 0x80); pf_identify()
650 pf_mode_sense(pf); pf_identify()
651 pf_mode_sense(pf); pf_identify()
652 pf_mode_sense(pf); pf_identify()
654 pf_get_capacity(pf); pf_identify()
657 pf->name, mf, id, ms[pf->drive], pf->lun, dt); pf_identify()
658 if (pf->removable) pf_identify()
660 if (pf->media_status == PF_NM) pf_identify()
663 if (pf->media_status == PF_RO) pf_identify()
666 (unsigned long long)get_capacity(pf->disk)); pf_identify()
674 static int pf_probe(struct pf_unit *pf) pf_probe() argument
676 if (pf->drive == -1) { pf_probe()
677 for (pf->drive = 0; pf->drive <= 1; pf->drive++) pf_probe()
678 if (!pf_reset(pf)) { pf_probe()
679 if (pf->lun != -1) pf_probe()
680 return pf_identify(pf); pf_probe()
682 for (pf->lun = 0; pf->lun < 8; pf->lun++) pf_probe()
683 if (!pf_identify(pf)) pf_probe()
687 if (pf_reset(pf)) pf_probe()
689 if (pf->lun != -1) pf_probe()
690 return pf_identify(pf); pf_probe()
691 for (pf->lun = 0; pf->lun < 8; pf->lun++) pf_probe()
692 if (!pf_identify(pf)) pf_probe()
700 struct pf_unit *pf = units; pf_detect() local
708 if (pi_init(pf->pi, 1, -1, -1, -1, -1, -1, pf_scratch, PI_PF, pf_detect()
709 verbose, pf->name)) { pf_detect()
710 if (!pf_probe(pf) && pf->disk) { pf_detect()
711 pf->present = 1; pf_detect()
714 pi_release(pf->pi); pf_detect()
718 for (unit = 0; unit < PF_UNITS; unit++, pf++) { pf_detect()
722 if (pi_init(pf->pi, 0, conf[D_PRT], conf[D_MOD], pf_detect()
724 pf_scratch, PI_PF, verbose, pf->name)) { pf_detect()
725 if (pf->disk && !pf_probe(pf)) { pf_detect()
726 pf->present = 1; pf_detect()
729 pi_release(pf->pi); pf_detect()
736 for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) pf_detect()
737 put_disk(pf->disk); pf_detect()
743 static int pf_start(struct pf_unit *pf, int cmd, int b, int c) pf_start() argument
746 char io_cmd[12] = { cmd, pf->lun << 5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; pf_start()
756 i = pf_command(pf, io_cmd, c * 512, "start i/o"); pf_start()
951 struct pf_unit *pf; pf_init() local
964 for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) pf_init()
965 put_disk(pf->disk); pf_init()
971 for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) pf_init()
972 put_disk(pf->disk); pf_init()
978 for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { pf_init()
979 struct gendisk *disk = pf->disk; pf_init()
981 if (!pf->present) pf_init()
983 disk->private_data = pf; pf_init()
992 struct pf_unit *pf; pf_exit() local
995 for (pf = units, unit = 0; unit < PF_UNITS; pf++, unit++) { pf_exit()
996 if (!pf->present) pf_exit()
998 del_gendisk(pf->disk); pf_exit()
999 put_disk(pf->disk); pf_exit()
1000 pi_release(pf->pi); pf_exit()
H A Dparide.h9 IDE device drivers (pd, pf, pcd, pt) and the adapter chips.
/linux-4.1.27/drivers/net/ethernet/intel/i40e/
H A Di40e_debugfs.c38 * @pf - the PF structure to search for the vsi
41 static struct i40e_vsi *i40e_dbg_find_vsi(struct i40e_pf *pf, int seid) i40e_dbg_find_vsi() argument
46 dev_info(&pf->pdev->dev, "%d: bad seid\n", seid); i40e_dbg_find_vsi()
48 for (i = 0; i < pf->num_alloc_vsi; i++) i40e_dbg_find_vsi()
49 if (pf->vsi[i] && (pf->vsi[i]->seid == seid)) i40e_dbg_find_vsi()
50 return pf->vsi[i]; i40e_dbg_find_vsi()
57 * @pf - the PF structure to search for the veb
60 static struct i40e_veb *i40e_dbg_find_veb(struct i40e_pf *pf, int seid) i40e_dbg_find_veb() argument
66 dev_info(&pf->pdev->dev, "%d: bad seid\n", seid); i40e_dbg_find_veb()
69 if (pf->veb[i] && pf->veb[i]->seid == seid) i40e_dbg_find_veb()
70 return pf->veb[i]; i40e_dbg_find_veb()
115 * @pf: the PF we're working with
120 static int i40e_dbg_prep_dump_buf(struct i40e_pf *pf, int buflen) i40e_dbg_prep_dump_buf() argument
152 struct i40e_pf *pf = filp->private_data; i40e_dbg_dump_write() local
168 dev_info(&pf->pdev->dev, "bad seid value\n"); i40e_dbg_dump_write()
176 dev_info(&pf->pdev->dev, "debug buffer freed\n"); i40e_dbg_dump_write()
178 } else if (seid == pf->pf_seid || seid == 1) { i40e_dbg_dump_write()
183 * (pf->hw.aq.num_arq_entries + pf->hw.aq.num_asq_entries)); i40e_dbg_dump_write()
185 if (i40e_dbg_prep_dump_buf(pf, buflen)) { i40e_dbg_dump_write()
189 memcpy(p, pf, len); i40e_dbg_dump_write()
193 * pf->hw.aq.num_asq_entries); i40e_dbg_dump_write()
194 memcpy(p, pf->hw.aq.asq.desc_buf.va, len); i40e_dbg_dump_write()
198 * pf->hw.aq.num_arq_entries); i40e_dbg_dump_write()
199 memcpy(p, pf->hw.aq.arq.desc_buf.va, len); i40e_dbg_dump_write()
203 dev_info(&pf->pdev->dev, i40e_dbg_dump_write()
212 mutex_lock(&pf->switch_mutex); i40e_dbg_dump_write()
213 vsi = i40e_dbg_find_vsi(pf, seid); i40e_dbg_dump_write()
215 mutex_unlock(&pf->switch_mutex); i40e_dbg_dump_write()
228 if (i40e_dbg_prep_dump_buf(pf, buflen)) { i40e_dbg_dump_write()
273 dev_info(&pf->pdev->dev, i40e_dbg_dump_write()
277 mutex_unlock(&pf->switch_mutex); i40e_dbg_dump_write()
281 mutex_lock(&pf->switch_mutex); i40e_dbg_dump_write()
282 veb = i40e_dbg_find_veb(pf, seid); i40e_dbg_dump_write()
284 mutex_unlock(&pf->switch_mutex); i40e_dbg_dump_write()
289 if (i40e_dbg_prep_dump_buf(pf, buflen)) { i40e_dbg_dump_write()
293 dev_info(&pf->pdev->dev, i40e_dbg_dump_write()
297 mutex_unlock(&pf->switch_mutex); i40e_dbg_dump_write()
302 dev_info(&pf->pdev->dev, "unknown seid %ld\n", seid); i40e_dbg_dump_write()
333 struct i40e_pf *pf = filp->private_data; i40e_dbg_command_read() local
350 pf->vsi[pf->lan_vsi]->netdev->name, i40e_dbg_command_read()
365 * @pf: the i40e_pf created in command write
368 static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) i40e_dbg_dump_vsi_seid() argument
375 vsi = i40e_dbg_find_vsi(pf, seid); i40e_dbg_dump_vsi_seid()
377 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
381 dev_info(&pf->pdev->dev, "vsi seid %d\n", seid); i40e_dbg_dump_vsi_seid()
383 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
387 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
389 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
393 if (vsi == pf->vsi[pf->lan_vsi]) i40e_dbg_dump_vsi_seid()
394 dev_info(&pf->pdev->dev, "MAC address: %pM SAN MAC: %pM Port MAC: %pM\n", i40e_dbg_dump_vsi_seid()
395 pf->hw.mac.addr, i40e_dbg_dump_vsi_seid()
396 pf->hw.mac.san_addr, i40e_dbg_dump_vsi_seid()
397 pf->hw.mac.port_addr); i40e_dbg_dump_vsi_seid()
399 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
405 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
411 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
417 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
421 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
426 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
431 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
436 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
440 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
444 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
450 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
456 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
460 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
465 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
470 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
475 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
479 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
483 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
493 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
496 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
501 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
506 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
511 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
517 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
522 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
527 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
531 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
541 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
544 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
549 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
554 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
557 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
563 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
568 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
573 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
577 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
581 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
586 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
592 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
595 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
598 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
601 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
604 dev_info(&pf->pdev->dev, " type = %i\n", vsi->type); i40e_dbg_dump_vsi_seid()
605 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
608 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
611 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
614 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
618 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
622 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
625 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
629 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
635 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
641 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
647 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
653 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
656 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
659 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
665 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
671 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
674 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
683 dev_info(&pf->pdev->dev, " PF = %p\n", vsi->back); i40e_dbg_dump_vsi_seid()
684 dev_info(&pf->pdev->dev, " idx = %d\n", vsi->idx); i40e_dbg_dump_vsi_seid()
685 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
689 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
695 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
699 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
707 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
712 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
716 dev_info(&pf->pdev->dev, i40e_dbg_dump_vsi_seid()
720 dev_info(&pf->pdev->dev, " fcoe_stats: ddp_count = %llu\n", i40e_dbg_dump_vsi_seid()
728 * @pf: the i40e_pf created in command write
730 static void i40e_dbg_dump_aq_desc(struct i40e_pf *pf) i40e_dbg_dump_aq_desc() argument
733 struct i40e_hw *hw = &pf->hw; i40e_dbg_dump_aq_desc()
738 dev_driver_string(&pf->pdev->dev), i40e_dbg_dump_aq_desc()
739 dev_name(&pf->pdev->dev)); i40e_dbg_dump_aq_desc()
742 dev_info(&pf->pdev->dev, "AdminQ Tx Ring\n"); i40e_dbg_dump_aq_desc()
746 dev_info(&pf->pdev->dev, i40e_dbg_dump_aq_desc()
754 dev_info(&pf->pdev->dev, "AdminQ Rx Ring\n"); i40e_dbg_dump_aq_desc()
758 dev_info(&pf->pdev->dev, i40e_dbg_dump_aq_desc()
773 * @pf: the i40e_pf created in command write
777 struct i40e_pf *pf, bool is_rx_ring) i40e_dbg_dump_desc()
785 vsi = i40e_dbg_find_vsi(pf, vsi_seid); i40e_dbg_dump_desc()
787 dev_info(&pf->pdev->dev, "vsi %d not found\n", vsi_seid); i40e_dbg_dump_desc()
791 dev_info(&pf->pdev->dev, "ring %d not found\n", ring_id); i40e_dbg_dump_desc()
795 dev_info(&pf->pdev->dev, i40e_dbg_dump_desc()
808 dev_info(&pf->pdev->dev, "vsi = %02i %s ring = %02i\n", i40e_dbg_dump_desc()
813 dev_info(&pf->pdev->dev, i40e_dbg_dump_desc()
820 dev_info(&pf->pdev->dev, i40e_dbg_dump_desc()
826 dev_info(&pf->pdev->dev, i40e_dbg_dump_desc()
835 dev_info(&pf->pdev->dev, i40e_dbg_dump_desc()
841 dev_info(&pf->pdev->dev, i40e_dbg_dump_desc()
848 dev_info(&pf->pdev->dev, i40e_dbg_dump_desc()
854 dev_info(&pf->pdev->dev, i40e_dbg_dump_desc()
861 dev_info(&pf->pdev->dev, "dump desc rx/tx <vsi_seid> <ring_id> [<desc_n>]\n"); i40e_dbg_dump_desc()
870 * @pf: the i40e_pf created in command write
872 static void i40e_dbg_dump_vsi_no_seid(struct i40e_pf *pf) i40e_dbg_dump_vsi_no_seid() argument
876 for (i = 0; i < pf->num_alloc_vsi; i++) i40e_dbg_dump_vsi_no_seid()
877 if (pf->vsi[i]) i40e_dbg_dump_vsi_no_seid()
878 dev_info(&pf->pdev->dev, "dump vsi[%d]: %d\n", i40e_dbg_dump_vsi_no_seid()
879 i, pf->vsi[i]->seid); i40e_dbg_dump_vsi_no_seid()
884 * @pf: the i40e_pf created in command write
887 static void i40e_dbg_dump_eth_stats(struct i40e_pf *pf, i40e_dbg_dump_eth_stats() argument
890 dev_info(&pf->pdev->dev, " ethstats:\n"); i40e_dbg_dump_eth_stats()
891 dev_info(&pf->pdev->dev, i40e_dbg_dump_eth_stats()
894 dev_info(&pf->pdev->dev, i40e_dbg_dump_eth_stats()
897 dev_info(&pf->pdev->dev, i40e_dbg_dump_eth_stats()
900 dev_info(&pf->pdev->dev, i40e_dbg_dump_eth_stats()
903 dev_info(&pf->pdev->dev, i40e_dbg_dump_eth_stats()
910 * @pf: the i40e_pf created in command write
913 static void i40e_dbg_dump_veb_seid(struct i40e_pf *pf, int seid) i40e_dbg_dump_veb_seid() argument
919 dev_info(&pf->pdev->dev, "%d: bad seid\n", seid); i40e_dbg_dump_veb_seid()
923 veb = i40e_dbg_find_veb(pf, seid); i40e_dbg_dump_veb_seid()
925 dev_info(&pf->pdev->dev, "can't find veb %d\n", seid); i40e_dbg_dump_veb_seid()
928 dev_info(&pf->pdev->dev, i40e_dbg_dump_veb_seid()
933 i40e_dbg_dump_eth_stats(pf, &veb->stats); i40e_dbg_dump_veb_seid()
938 * @pf: the i40e_pf created in command write
940 static void i40e_dbg_dump_veb_all(struct i40e_pf *pf) i40e_dbg_dump_veb_all() argument
946 veb = pf->veb[i]; i40e_dbg_dump_veb_all()
948 i40e_dbg_dump_veb_seid(pf, veb->seid); i40e_dbg_dump_veb_all()
954 * @pf: the PF that would be altered
958 static void i40e_dbg_cmd_fd_ctrl(struct i40e_pf *pf, u64 flag, bool enable) i40e_dbg_cmd_fd_ctrl() argument
961 pf->flags |= flag; i40e_dbg_cmd_fd_ctrl()
963 pf->flags &= ~flag; i40e_dbg_cmd_fd_ctrl()
964 pf->auto_disable_flags |= flag; i40e_dbg_cmd_fd_ctrl()
966 dev_info(&pf->pdev->dev, "requesting a PF reset\n"); i40e_dbg_cmd_fd_ctrl()
967 i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED)); i40e_dbg_cmd_fd_ctrl()
982 struct i40e_pf *pf = filp->private_data; i40e_dbg_command_write() local
1017 vsi_seid = pf->vsi[pf->lan_vsi]->seid; i40e_dbg_command_write()
1019 dev_info(&pf->pdev->dev, "add VSI %d: bad vsi seid\n", i40e_dbg_command_write()
1027 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { i40e_dbg_command_write()
1028 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; i40e_dbg_command_write()
1029 i40e_do_reset_safe(pf, i40e_dbg_command_write()
1033 vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0); i40e_dbg_command_write()
1035 dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n", i40e_dbg_command_write()
1038 dev_info(&pf->pdev->dev, "'%s' failed\n", cmd_buf); i40e_dbg_command_write()
1042 vsi = i40e_dbg_find_vsi(pf, vsi_seid); i40e_dbg_command_write()
1044 dev_info(&pf->pdev->dev, "del VSI %d: seid not found\n", i40e_dbg_command_write()
1049 dev_info(&pf->pdev->dev, "deleting VSI %d\n", vsi_seid); i40e_dbg_command_write()
1058 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1063 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1069 vsi = i40e_dbg_find_vsi(pf, vsi_seid); i40e_dbg_command_write()
1071 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1077 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) i40e_dbg_command_write()
1080 uplink_seid != pf->mac_seid) { i40e_dbg_command_write()
1081 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1087 veb = i40e_veb_setup(pf, 0, uplink_seid, vsi_seid, i40e_dbg_command_write()
1090 dev_info(&pf->pdev->dev, "added relay %d\n", veb->seid); i40e_dbg_command_write()
1092 dev_info(&pf->pdev->dev, "add relay failed\n"); i40e_dbg_command_write()
1098 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1103 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1110 if (pf->veb[i] && pf->veb[i]->seid == veb_seid) i40e_dbg_command_write()
1113 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1118 dev_info(&pf->pdev->dev, "deleting relay %d\n", veb_seid); i40e_dbg_command_write()
1119 i40e_veb_release(pf->veb[i]); i40e_dbg_command_write()
1135 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1141 vsi = i40e_dbg_find_vsi(pf, vsi_seid); i40e_dbg_command_write()
1143 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1151 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1155 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1172 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1178 vsi = i40e_dbg_find_vsi(pf, vsi_seid); i40e_dbg_command_write()
1180 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1188 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1192 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1203 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1208 vsi = i40e_dbg_find_vsi(pf, vsi_seid); i40e_dbg_command_write()
1210 dev_info(&pf->pdev->dev, "add pvid: VSI %d not found\n", i40e_dbg_command_write()
1218 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1222 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1230 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1236 vsi = i40e_dbg_find_vsi(pf, vsi_seid); i40e_dbg_command_write()
1238 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1244 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1249 i40e_fetch_switch_configuration(pf, true); i40e_dbg_command_write()
1253 i40e_dbg_dump_vsi_seid(pf, vsi_seid); i40e_dbg_command_write()
1255 i40e_dbg_dump_vsi_no_seid(pf); i40e_dbg_command_write()
1259 i40e_dbg_dump_veb_seid(pf, vsi_seid); i40e_dbg_command_write()
1261 i40e_dbg_dump_veb_all(pf); i40e_dbg_command_write()
1268 desc_n, pf, true); i40e_dbg_command_write()
1274 desc_n, pf, false); i40e_dbg_command_write()
1276 i40e_dbg_dump_aq_desc(pf); i40e_dbg_command_write()
1278 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1280 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1282 dev_info(&pf->pdev->dev, "dump desc aq\n"); i40e_dbg_command_write()
1285 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1286 "core reset count: %d\n", pf->corer_count); i40e_dbg_command_write()
1287 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1288 "global reset count: %d\n", pf->globr_count); i40e_dbg_command_write()
1289 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1290 "emp reset count: %d\n", pf->empr_count); i40e_dbg_command_write()
1291 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1292 "pf reset count: %d\n", pf->pfr_count); i40e_dbg_command_write()
1293 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1294 "pf tx sluggish count: %d\n", i40e_dbg_command_write()
1295 pf->tx_sluggish_count); i40e_dbg_command_write()
1299 &pf->hw.local_dcbx_config; i40e_dbg_command_write()
1301 &pf->hw.remote_dcbx_config; i40e_dbg_command_write()
1312 ret = i40e_aq_query_port_ets_config(&pf->hw, i40e_dbg_command_write()
1313 pf->mac_seid, i40e_dbg_command_write()
1316 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1318 pf->hw.aq.asq_last_status); i40e_dbg_command_write()
1323 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1330 dev_info(&pf->pdev->dev, "port bw: tc_bw_share=%d tc_bw_limit=%d\n", i40e_dbg_command_write()
1338 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1340 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1345 dev_info(&pf->pdev->dev, "port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n", i40e_dbg_command_write()
1351 dev_info(&pf->pdev->dev, "port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n", i40e_dbg_command_write()
1356 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1360 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1363 dev_info(&pf->pdev->dev, "port app_table: %d prio=%d selector=%d protocol=0x%x\n", i40e_dbg_command_write()
1369 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1374 dev_info(&pf->pdev->dev, "remote port ets_cfg: %d prio_tc=%d tcbw=%d tctsa=%d\n", i40e_dbg_command_write()
1380 dev_info(&pf->pdev->dev, "remote port ets_rec: %d prio_tc=%d tcbw=%d tctsa=%d\n", i40e_dbg_command_write()
1385 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1391 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1395 dev_info(&pf->pdev->dev, "remote port app_table: %d prio=%d selector=%d protocol=0x%x\n", i40e_dbg_command_write()
1412 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1417 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1424 ret = i40e_aq_debug_dump(&pf->hw, cluster_id, table_id, i40e_dbg_command_write()
1429 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1431 ret, pf->hw.aq.asq_last_status); i40e_dbg_command_write()
1436 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1445 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1447 dev_info(&pf->pdev->dev, "dump switch\n"); i40e_dbg_command_write()
1448 dev_info(&pf->pdev->dev, "dump vsi [seid]\n"); i40e_dbg_command_write()
1449 dev_info(&pf->pdev->dev, "dump reset stats\n"); i40e_dbg_command_write()
1450 dev_info(&pf->pdev->dev, "dump port\n"); i40e_dbg_command_write()
1451 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1460 pf->hw.debug_mask = level; i40e_dbg_command_write()
1461 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1463 pf->hw.debug_mask); i40e_dbg_command_write()
1465 pf->msg_enable = level; i40e_dbg_command_write()
1466 dev_info(&pf->pdev->dev, "set msg_enable = 0x%08x\n", i40e_dbg_command_write()
1467 pf->msg_enable); i40e_dbg_command_write()
1469 dev_info(&pf->pdev->dev, "msg_enable = 0x%08x\n", i40e_dbg_command_write()
1470 pf->msg_enable); i40e_dbg_command_write()
1473 dev_info(&pf->pdev->dev, "debugfs: forcing PFR\n"); i40e_dbg_command_write()
1474 i40e_do_reset_safe(pf, (1 << __I40E_PF_RESET_REQUESTED)); i40e_dbg_command_write()
1477 dev_info(&pf->pdev->dev, "debugfs: forcing CoreR\n"); i40e_dbg_command_write()
1478 i40e_do_reset_safe(pf, (1 << __I40E_CORE_RESET_REQUESTED)); i40e_dbg_command_write()
1481 dev_info(&pf->pdev->dev, "debugfs: forcing GlobR\n"); i40e_dbg_command_write()
1482 i40e_do_reset_safe(pf, (1 << __I40E_GLOBAL_RESET_REQUESTED)); i40e_dbg_command_write()
1485 dev_info(&pf->pdev->dev, "debugfs: forcing EMPR\n"); i40e_dbg_command_write()
1486 i40e_do_reset_safe(pf, (1 << __I40E_EMP_RESET_REQUESTED)); i40e_dbg_command_write()
1493 dev_info(&pf->pdev->dev, "read <reg>\n"); i40e_dbg_command_write()
1499 dev_info(&pf->pdev->dev, "read reg address 0x%08x too large\n", i40e_dbg_command_write()
1504 value = rd32(&pf->hw, address); i40e_dbg_command_write()
1505 dev_info(&pf->pdev->dev, "read: 0x%08x = 0x%08x\n", i40e_dbg_command_write()
1512 dev_info(&pf->pdev->dev, "write <reg> <value>\n"); i40e_dbg_command_write()
1518 dev_info(&pf->pdev->dev, "write reg address 0x%08x too large\n", i40e_dbg_command_write()
1522 wr32(&pf->hw, address, value); i40e_dbg_command_write()
1523 value = rd32(&pf->hw, address); i40e_dbg_command_write()
1524 dev_info(&pf->pdev->dev, "write: 0x%08x = 0x%08x\n", i40e_dbg_command_write()
1531 for (i = 0; i < pf->num_alloc_vsi; i++) i40e_dbg_command_write()
1532 i40e_vsi_reset_stats(pf->vsi[i]); i40e_dbg_command_write()
1533 dev_info(&pf->pdev->dev, "vsi clear stats called for all vsi's\n"); i40e_dbg_command_write()
1535 vsi = i40e_dbg_find_vsi(pf, vsi_seid); i40e_dbg_command_write()
1537 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1543 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1547 dev_info(&pf->pdev->dev, "clear_stats vsi [seid]\n"); i40e_dbg_command_write()
1550 if (pf->hw.partition_id == 1) { i40e_dbg_command_write()
1551 i40e_pf_reset_stats(pf); i40e_dbg_command_write()
1552 dev_info(&pf->pdev->dev, "port stats cleared\n"); i40e_dbg_command_write()
1554 dev_info(&pf->pdev->dev, "clear port stats not allowed on this port partition\n"); i40e_dbg_command_write()
1557 dev_info(&pf->pdev->dev, "clear_stats vsi [seid] or clear_stats port\n"); i40e_dbg_command_write()
1576 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1583 ret = i40e_asq_send_command(&pf->hw, desc, NULL, 0, NULL); i40e_dbg_command_write()
1585 dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n"); i40e_dbg_command_write()
1587 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1589 desc->opcode, pf->hw.aq.asq_last_status); i40e_dbg_command_write()
1591 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1595 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1625 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1643 ret = i40e_asq_send_command(&pf->hw, desc, buff, i40e_dbg_command_write()
1646 dev_info(&pf->pdev->dev, "AQ command sent Status : Success\n"); i40e_dbg_command_write()
1648 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1650 desc->opcode, pf->hw.aq.asq_last_status); i40e_dbg_command_write()
1652 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1656 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1680 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) i40e_dbg_command_write()
1686 if (add && (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) i40e_dbg_command_write()
1711 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1733 dev_info(&pf->pdev->dev, "FD raw packet dump\n"); i40e_dbg_command_write()
1737 ret = i40e_program_fdir_filter(&fd_data, raw_packet, pf, add); i40e_dbg_command_write()
1739 dev_info(&pf->pdev->dev, "Filter command send Status : Success\n"); i40e_dbg_command_write()
1741 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1749 i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, false); i40e_dbg_command_write()
1751 i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, true); i40e_dbg_command_write()
1753 dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n", i40e_dbg_command_write()
1754 i40e_get_current_fd_count(pf)); i40e_dbg_command_write()
1758 ret = i40e_aq_stop_lldp(&pf->hw, false, NULL); i40e_dbg_command_write()
1760 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1762 pf->hw.aq.asq_last_status); i40e_dbg_command_write()
1765 ret = i40e_aq_add_rem_control_packet_filter(&pf->hw, i40e_dbg_command_write()
1766 pf->hw.mac.addr, i40e_dbg_command_write()
1768 pf->vsi[pf->lan_vsi]->seid, i40e_dbg_command_write()
1771 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1773 __func__, pf->hw.aq.asq_last_status); i40e_dbg_command_write()
1777 pf->dcbx_cap = DCB_CAP_DCBX_HOST | i40e_dbg_command_write()
1782 ret = i40e_aq_add_rem_control_packet_filter(&pf->hw, i40e_dbg_command_write()
1783 pf->hw.mac.addr, i40e_dbg_command_write()
1785 pf->vsi[pf->lan_vsi]->seid, i40e_dbg_command_write()
1788 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1790 __func__, pf->hw.aq.asq_last_status); i40e_dbg_command_write()
1794 ret = i40e_aq_start_lldp(&pf->hw, NULL); i40e_dbg_command_write()
1796 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1798 pf->hw.aq.asq_last_status); i40e_dbg_command_write()
1802 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | i40e_dbg_command_write()
1814 ret = i40e_aq_get_lldp_mib(&pf->hw, 0, i40e_dbg_command_write()
1819 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1821 pf->hw.aq.asq_last_status); i40e_dbg_command_write()
1826 dev_info(&pf->pdev->dev, "LLDP MIB (local)\n"); i40e_dbg_command_write()
1840 ret = i40e_aq_get_lldp_mib(&pf->hw, i40e_dbg_command_write()
1846 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1848 pf->hw.aq.asq_last_status); i40e_dbg_command_write()
1853 dev_info(&pf->pdev->dev, "LLDP MIB (remote)\n"); i40e_dbg_command_write()
1861 ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw, i40e_dbg_command_write()
1864 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1866 pf->hw.aq.asq_last_status); i40e_dbg_command_write()
1871 ret = i40e_aq_cfg_lldp_mib_change_event(&pf->hw, i40e_dbg_command_write()
1874 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1876 pf->hw.aq.asq_last_status); i40e_dbg_command_write()
1899 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1915 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ); i40e_dbg_command_write()
1917 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1919 ret, pf->hw.aq.asq_last_status); i40e_dbg_command_write()
1924 ret = i40e_aq_read_nvm(&pf->hw, module, (2 * offset), i40e_dbg_command_write()
1926 i40e_release_nvm(&pf->hw); i40e_dbg_command_write()
1928 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1930 ret, pf->hw.aq.asq_last_status); i40e_dbg_command_write()
1932 dev_info(&pf->pdev->dev, i40e_dbg_command_write()
1943 dev_info(&pf->pdev->dev, "unknown command '%s'\n", cmd_buf); i40e_dbg_command_write()
1944 dev_info(&pf->pdev->dev, "available commands\n"); i40e_dbg_command_write()
1945 dev_info(&pf->pdev->dev, " add vsi [relay_seid]\n"); i40e_dbg_command_write()
1946 dev_info(&pf->pdev->dev, " del vsi [vsi_seid]\n"); i40e_dbg_command_write()
1947 dev_info(&pf->pdev->dev, " add relay <uplink_seid> <vsi_seid>\n"); i40e_dbg_command_write()
1948 dev_info(&pf->pdev->dev, " del relay <relay_seid>\n"); i40e_dbg_command_write()
1949 dev_info(&pf->pdev->dev, " add macaddr <vsi_seid> <aa:bb:cc:dd:ee:ff> [vlan]\n"); i40e_dbg_command_write()
1950 dev_info(&pf->pdev->dev, " del macaddr <vsi_seid> <aa:bb:cc:dd:ee:ff> [vlan]\n"); i40e_dbg_command_write()
1951 dev_info(&pf->pdev->dev, " add pvid <vsi_seid> <vid>\n"); i40e_dbg_command_write()
1952 dev_info(&pf->pdev->dev, " del pvid <vsi_seid>\n"); i40e_dbg_command_write()
1953 dev_info(&pf->pdev->dev, " dump switch\n"); i40e_dbg_command_write()
1954 dev_info(&pf->pdev->dev, " dump vsi [seid]\n"); i40e_dbg_command_write()
1955 dev_info(&pf->pdev->dev, " dump desc tx <vsi_seid> <ring_id> [<desc_n>]\n"); i40e_dbg_command_write()
1956 dev_info(&pf->pdev->dev, " dump desc rx <vsi_seid> <ring_id> [<desc_n>]\n"); i40e_dbg_command_write()
1957 dev_info(&pf->pdev->dev, " dump desc aq\n"); i40e_dbg_command_write()
1958 dev_info(&pf->pdev->dev, " dump reset stats\n"); i40e_dbg_command_write()
1959 dev_info(&pf->pdev->dev, " dump debug fwdata <cluster_id> <table_id> <index>\n"); i40e_dbg_command_write()
1960 dev_info(&pf->pdev->dev, " msg_enable [level]\n"); i40e_dbg_command_write()
1961 dev_info(&pf->pdev->dev, " read <reg>\n"); i40e_dbg_command_write()
1962 dev_info(&pf->pdev->dev, " write <reg> <value>\n"); i40e_dbg_command_write()
1963 dev_info(&pf->pdev->dev, " clear_stats vsi [seid]\n"); i40e_dbg_command_write()
1964 dev_info(&pf->pdev->dev, " clear_stats port\n"); i40e_dbg_command_write()
1965 dev_info(&pf->pdev->dev, " pfr\n"); i40e_dbg_command_write()
1966 dev_info(&pf->pdev->dev, " corer\n"); i40e_dbg_command_write()
1967 dev_info(&pf->pdev->dev, " globr\n"); i40e_dbg_command_write()
1968 dev_info(&pf->pdev->dev, " send aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3>\n"); i40e_dbg_command_write()
1969 dev_info(&pf->pdev->dev, " send indirect aq_cmd <flags> <opcode> <datalen> <retval> <cookie_h> <cookie_l> <param0> <param1> <param2> <param3> <buffer_len>\n"); i40e_dbg_command_write()
1970 dev_info(&pf->pdev->dev, " add fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n"); i40e_dbg_command_write()
1971 dev_info(&pf->pdev->dev, " rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n"); i40e_dbg_command_write()
1972 dev_info(&pf->pdev->dev, " fd-atr off\n"); i40e_dbg_command_write()
1973 dev_info(&pf->pdev->dev, " fd-atr on\n"); i40e_dbg_command_write()
1974 dev_info(&pf->pdev->dev, " fd current cnt"); i40e_dbg_command_write()
1975 dev_info(&pf->pdev->dev, " lldp start\n"); i40e_dbg_command_write()
1976 dev_info(&pf->pdev->dev, " lldp stop\n"); i40e_dbg_command_write()
1977 dev_info(&pf->pdev->dev, " lldp get local\n"); i40e_dbg_command_write()
1978 dev_info(&pf->pdev->dev, " lldp get remote\n"); i40e_dbg_command_write()
1979 dev_info(&pf->pdev->dev, " lldp event on\n"); i40e_dbg_command_write()
1980 dev_info(&pf->pdev->dev, " lldp event off\n"); i40e_dbg_command_write()
1981 dev_info(&pf->pdev->dev, " nvm read [module] [word_offset] [word_count]\n"); i40e_dbg_command_write()
2014 struct i40e_pf *pf = filp->private_data; i40e_dbg_netdev_ops_read() local
2031 pf->vsi[pf->lan_vsi]->netdev->name, i40e_dbg_netdev_ops_read()
2055 struct i40e_pf *pf = filp->private_data; i40e_dbg_netdev_ops_write() local
2086 dev_info(&pf->pdev->dev, "tx_timeout <vsi_seid>\n"); i40e_dbg_netdev_ops_write()
2089 vsi = i40e_dbg_find_vsi(pf, vsi_seid); i40e_dbg_netdev_ops_write()
2091 dev_info(&pf->pdev->dev, i40e_dbg_netdev_ops_write()
2094 dev_info(&pf->pdev->dev, "tx_timeout: no netdev for VSI %d\n", i40e_dbg_netdev_ops_write()
2097 dev_info(&pf->pdev->dev, "tx_timeout: VSI %d not UP\n", i40e_dbg_netdev_ops_write()
2102 dev_info(&pf->pdev->dev, "tx_timeout called\n"); i40e_dbg_netdev_ops_write()
2104 dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n"); i40e_dbg_netdev_ops_write()
2111 dev_info(&pf->pdev->dev, "change_mtu <vsi_seid> <mtu>\n"); i40e_dbg_netdev_ops_write()
2114 vsi = i40e_dbg_find_vsi(pf, vsi_seid); i40e_dbg_netdev_ops_write()
2116 dev_info(&pf->pdev->dev, i40e_dbg_netdev_ops_write()
2119 dev_info(&pf->pdev->dev, "change_mtu: no netdev for VSI %d\n", i40e_dbg_netdev_ops_write()
2125 dev_info(&pf->pdev->dev, "change_mtu called\n"); i40e_dbg_netdev_ops_write()
2127 dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n"); i40e_dbg_netdev_ops_write()
2133 dev_info(&pf->pdev->dev, "set_rx_mode <vsi_seid>\n"); i40e_dbg_netdev_ops_write()
2136 vsi = i40e_dbg_find_vsi(pf, vsi_seid); i40e_dbg_netdev_ops_write()
2138 dev_info(&pf->pdev->dev, i40e_dbg_netdev_ops_write()
2141 dev_info(&pf->pdev->dev, "set_rx_mode: no netdev for VSI %d\n", i40e_dbg_netdev_ops_write()
2146 dev_info(&pf->pdev->dev, "set_rx_mode called\n"); i40e_dbg_netdev_ops_write()
2148 dev_info(&pf->pdev->dev, "Could not acquire RTNL - please try again\n"); i40e_dbg_netdev_ops_write()
2154 dev_info(&pf->pdev->dev, "napi <vsi_seid>\n"); i40e_dbg_netdev_ops_write()
2157 vsi = i40e_dbg_find_vsi(pf, vsi_seid); i40e_dbg_netdev_ops_write()
2159 dev_info(&pf->pdev->dev, "napi: VSI %d not found\n", i40e_dbg_netdev_ops_write()
2162 dev_info(&pf->pdev->dev, "napi: no netdev for VSI %d\n", i40e_dbg_netdev_ops_write()
2167 dev_info(&pf->pdev->dev, "napi called\n"); i40e_dbg_netdev_ops_write()
2170 dev_info(&pf->pdev->dev, "unknown command '%s'\n", i40e_dbg_netdev_ops_write()
2172 dev_info(&pf->pdev->dev, "available commands\n"); i40e_dbg_netdev_ops_write()
2173 dev_info(&pf->pdev->dev, " tx_timeout <vsi_seid>\n"); i40e_dbg_netdev_ops_write()
2174 dev_info(&pf->pdev->dev, " change_mtu <vsi_seid> <mtu>\n"); i40e_dbg_netdev_ops_write()
2175 dev_info(&pf->pdev->dev, " set_rx_mode <vsi_seid>\n"); i40e_dbg_netdev_ops_write()
2176 dev_info(&pf->pdev->dev, " napi <vsi_seid>\n"); i40e_dbg_netdev_ops_write()
2191 * @pf: the PF that is starting up
2193 void i40e_dbg_pf_init(struct i40e_pf *pf) i40e_dbg_pf_init() argument
2196 const char *name = pci_name(pf->pdev); i40e_dbg_pf_init()
2197 const struct device *dev = &pf->pdev->dev; i40e_dbg_pf_init()
2199 pf->i40e_dbg_pf = debugfs_create_dir(name, i40e_dbg_root); i40e_dbg_pf_init()
2200 if (!pf->i40e_dbg_pf) i40e_dbg_pf_init()
2203 pfile = debugfs_create_file("command", 0600, pf->i40e_dbg_pf, pf, i40e_dbg_pf_init()
2208 pfile = debugfs_create_file("dump", 0600, pf->i40e_dbg_pf, pf, i40e_dbg_pf_init()
2213 pfile = debugfs_create_file("netdev_ops", 0600, pf->i40e_dbg_pf, pf, i40e_dbg_pf_init()
2222 debugfs_remove_recursive(pf->i40e_dbg_pf); i40e_dbg_pf_init()
2228 * @pf: the PF that is stopping
2230 void i40e_dbg_pf_exit(struct i40e_pf *pf) i40e_dbg_pf_exit() argument
2232 debugfs_remove_recursive(pf->i40e_dbg_pf); i40e_dbg_pf_exit()
2233 pf->i40e_dbg_pf = NULL; i40e_dbg_pf_exit()
776 i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, struct i40e_pf *pf, bool is_rx_ring) i40e_dbg_dump_desc() argument
H A Di40e_main.c51 static void i40e_handle_reset_warning(struct i40e_pf *pf);
54 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
55 static int i40e_setup_misc_vector(struct i40e_pf *pf);
56 static void i40e_determine_queue_usage(struct i40e_pf *pf);
57 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
58 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
104 struct i40e_pf *pf = (struct i40e_pf *)hw->back; i40e_allocate_dma_mem_d() local
107 mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size, i40e_allocate_dma_mem_d()
122 struct i40e_pf *pf = (struct i40e_pf *)hw->back; i40e_free_dma_mem_d() local
124 dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa); i40e_free_dma_mem_d()
167 * @pf: board private structure
178 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile, i40e_get_lump() argument
185 dev_info(&pf->pdev->dev, i40e_get_lump()
254 * @pf - the pf structure to search for the vsi
257 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id) i40e_find_vsi_from_id() argument
261 for (i = 0; i < pf->num_alloc_vsi; i++) i40e_find_vsi_from_id()
262 if (pf->vsi[i] && (pf->vsi[i]->id == id)) i40e_find_vsi_from_id()
263 return pf->vsi[i]; i40e_find_vsi_from_id()
270 * @pf: board private structure
274 static void i40e_service_event_schedule(struct i40e_pf *pf) i40e_service_event_schedule() argument
276 if (!test_bit(__I40E_DOWN, &pf->state) && i40e_service_event_schedule()
277 !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) && i40e_service_event_schedule()
278 !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state)) i40e_service_event_schedule()
279 schedule_work(&pf->service_task); i40e_service_event_schedule()
298 struct i40e_pf *pf = vsi->back; i40e_tx_timeout() local
300 pf->tx_timeout_count++; i40e_tx_timeout()
302 if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20))) i40e_tx_timeout()
303 pf->tx_timeout_recovery_level = 1; i40e_tx_timeout()
304 pf->tx_timeout_last_recovery = jiffies; i40e_tx_timeout()
306 pf->tx_timeout_recovery_level); i40e_tx_timeout()
308 switch (pf->tx_timeout_recovery_level) { i40e_tx_timeout()
312 set_bit(__I40E_REINIT_REQUESTED, &pf->state); i40e_tx_timeout()
319 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); i40e_tx_timeout()
322 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); i40e_tx_timeout()
325 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); i40e_tx_timeout()
329 set_bit(__I40E_DOWN_REQUESTED, &pf->state); i40e_tx_timeout()
333 i40e_service_event_schedule(pf); i40e_tx_timeout()
334 pf->tx_timeout_recovery_level++; i40e_tx_timeout()
471 * @pf: the PF to be reset
473 void i40e_pf_reset_stats(struct i40e_pf *pf) i40e_pf_reset_stats() argument
477 memset(&pf->stats, 0, sizeof(pf->stats)); i40e_pf_reset_stats()
478 memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets)); i40e_pf_reset_stats()
479 pf->stat_offsets_loaded = false; i40e_pf_reset_stats()
482 if (pf->veb[i]) { i40e_pf_reset_stats()
483 memset(&pf->veb[i]->stats, 0, i40e_pf_reset_stats()
484 sizeof(pf->veb[i]->stats)); i40e_pf_reset_stats()
485 memset(&pf->veb[i]->stats_offsets, 0, i40e_pf_reset_stats()
486 sizeof(pf->veb[i]->stats_offsets)); i40e_pf_reset_stats()
487 pf->veb[i]->stat_offsets_loaded = false; i40e_pf_reset_stats()
556 struct i40e_pf *pf = vsi->back; i40e_update_eth_stats() local
557 struct i40e_hw *hw = &pf->hw; i40e_update_eth_stats()
620 struct i40e_pf *pf = veb->pf; i40e_update_veb_stats() local
621 struct i40e_hw *hw = &pf->hw; i40e_update_veb_stats()
674 struct i40e_pf *pf = vsi->back; i40e_update_fcoe_stats() local
675 struct i40e_hw *hw = &pf->hw; i40e_update_fcoe_stats()
683 idx = (pf->pf_seid - I40E_BASE_PF_SEID) + I40E_FCOE_PF_STAT_OFFSET; i40e_update_fcoe_stats()
718 * @pf: the corresponding PF
722 static void i40e_update_link_xoff_rx(struct i40e_pf *pf) i40e_update_link_xoff_rx() argument
724 struct i40e_hw_port_stats *osd = &pf->stats_offsets; i40e_update_link_xoff_rx()
725 struct i40e_hw_port_stats *nsd = &pf->stats; i40e_update_link_xoff_rx()
726 struct i40e_hw *hw = &pf->hw; i40e_update_link_xoff_rx()
736 pf->stat_offsets_loaded, i40e_update_link_xoff_rx()
744 for (v = 0; v < pf->num_alloc_vsi; v++) { i40e_update_link_xoff_rx()
745 struct i40e_vsi *vsi = pf->vsi[v]; i40e_update_link_xoff_rx()
759 * @pf: the corresponding PF
763 static void i40e_update_prio_xoff_rx(struct i40e_pf *pf) i40e_update_prio_xoff_rx() argument
765 struct i40e_hw_port_stats *osd = &pf->stats_offsets; i40e_update_prio_xoff_rx()
766 struct i40e_hw_port_stats *nsd = &pf->stats; i40e_update_prio_xoff_rx()
769 struct i40e_hw *hw = &pf->hw; i40e_update_prio_xoff_rx()
776 if (!(pf->flags & I40E_FLAG_DCB_ENABLED) || i40e_update_prio_xoff_rx()
778 i40e_update_link_xoff_rx(pf); i40e_update_prio_xoff_rx()
785 pf->stat_offsets_loaded, i40e_update_prio_xoff_rx()
798 for (v = 0; v < pf->num_alloc_vsi; v++) { i40e_update_prio_xoff_rx()
799 struct i40e_vsi *vsi = pf->vsi[v]; i40e_update_prio_xoff_rx()
827 struct i40e_pf *pf = vsi->back; i40e_update_vsi_stats() local
842 test_bit(__I40E_CONFIG_BUSY, &pf->state)) i40e_update_vsi_stats()
908 if (vsi == pf->vsi[pf->lan_vsi]) { i40e_update_vsi_stats()
909 ns->rx_crc_errors = pf->stats.crc_errors; i40e_update_vsi_stats()
910 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes; i40e_update_vsi_stats()
911 ns->rx_length_errors = pf->stats.rx_length_errors; i40e_update_vsi_stats()
917 * @pf: the PF to be updated
919 static void i40e_update_pf_stats(struct i40e_pf *pf) i40e_update_pf_stats() argument
921 struct i40e_hw_port_stats *osd = &pf->stats_offsets; i40e_update_pf_stats()
922 struct i40e_hw_port_stats *nsd = &pf->stats; i40e_update_pf_stats()
923 struct i40e_hw *hw = &pf->hw; i40e_update_pf_stats()
929 pf->stat_offsets_loaded, i40e_update_pf_stats()
933 pf->stat_offsets_loaded, i40e_update_pf_stats()
936 pf->stat_offsets_loaded, i40e_update_pf_stats()
941 pf->stat_offsets_loaded, i40e_update_pf_stats()
946 pf->stat_offsets_loaded, i40e_update_pf_stats()
951 pf->stat_offsets_loaded, i40e_update_pf_stats()
956 pf->stat_offsets_loaded, i40e_update_pf_stats()
961 pf->stat_offsets_loaded, i40e_update_pf_stats()
966 pf->stat_offsets_loaded, i40e_update_pf_stats()
971 pf->stat_offsets_loaded, i40e_update_pf_stats()
976 pf->stat_offsets_loaded, i40e_update_pf_stats()
980 pf->stat_offsets_loaded, i40e_update_pf_stats()
984 pf->stat_offsets_loaded, i40e_update_pf_stats()
988 pf->stat_offsets_loaded, i40e_update_pf_stats()
993 pf->stat_offsets_loaded, i40e_update_pf_stats()
998 pf->stat_offsets_loaded, i40e_update_pf_stats()
1001 pf->stat_offsets_loaded, i40e_update_pf_stats()
1003 i40e_update_prio_xoff_rx(pf); /* handles I40E_GLPRT_LXOFFRXC */ i40e_update_pf_stats()
1005 pf->stat_offsets_loaded, i40e_update_pf_stats()
1010 pf->stat_offsets_loaded, i40e_update_pf_stats()
1014 pf->stat_offsets_loaded, i40e_update_pf_stats()
1018 pf->stat_offsets_loaded, i40e_update_pf_stats()
1023 pf->stat_offsets_loaded, i40e_update_pf_stats()
1030 pf->stat_offsets_loaded, i40e_update_pf_stats()
1034 pf->stat_offsets_loaded, i40e_update_pf_stats()
1038 pf->stat_offsets_loaded, i40e_update_pf_stats()
1042 pf->stat_offsets_loaded, i40e_update_pf_stats()
1046 pf->stat_offsets_loaded, i40e_update_pf_stats()
1050 pf->stat_offsets_loaded, i40e_update_pf_stats()
1054 pf->stat_offsets_loaded, i40e_update_pf_stats()
1059 pf->stat_offsets_loaded, i40e_update_pf_stats()
1063 pf->stat_offsets_loaded, i40e_update_pf_stats()
1067 pf->stat_offsets_loaded, i40e_update_pf_stats()
1071 pf->stat_offsets_loaded, i40e_update_pf_stats()
1075 pf->stat_offsets_loaded, i40e_update_pf_stats()
1079 pf->stat_offsets_loaded, i40e_update_pf_stats()
1083 pf->stat_offsets_loaded, i40e_update_pf_stats()
1087 pf->stat_offsets_loaded, i40e_update_pf_stats()
1090 pf->stat_offsets_loaded, i40e_update_pf_stats()
1093 pf->stat_offsets_loaded, i40e_update_pf_stats()
1096 pf->stat_offsets_loaded, i40e_update_pf_stats()
1100 i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_atr_cnt_idx), i40e_update_pf_stats()
1101 pf->stat_offsets_loaded, i40e_update_pf_stats()
1103 i40e_stat_update32(hw, I40E_GLQF_PCNT(pf->fd_sb_cnt_idx), i40e_update_pf_stats()
1104 pf->stat_offsets_loaded, i40e_update_pf_stats()
1115 pf->stat_offsets_loaded, i40e_update_pf_stats()
1118 pf->stat_offsets_loaded, i40e_update_pf_stats()
1121 pf->stat_offsets_loaded = true; i40e_update_pf_stats()
1132 struct i40e_pf *pf = vsi->back; i40e_update_stats() local
1134 if (vsi == pf->vsi[pf->lan_vsi]) i40e_update_stats()
1135 i40e_update_pf_stats(pf); i40e_update_stats()
1261 struct i40e_pf *pf = vsi->back; i40e_rm_default_mac_filter() local
1273 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); i40e_rm_default_mac_filter()
1406 struct i40e_pf *pf = vsi->back; i40e_set_mac() local
1407 struct i40e_hw *hw = &pf->hw; i40e_set_mac()
1449 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); i40e_set_mac()
1461 i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL); i40e_set_mac()
1496 struct i40e_pf *pf = vsi->back; i40e_vsi_setup_queue_map() local
1516 dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n"); i40e_vsi_setup_queue_map()
1531 qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix); i40e_vsi_setup_queue_map()
1543 qcount = min_t(int, pf->rss_size, num_tc_qps); i40e_vsi_setup_queue_map()
1595 vsi->num_queue_pairs = pf->num_lan_msix; i40e_vsi_setup_queue_map()
1715 struct i40e_pf *pf; i40e_sync_vsi_filters() local
1726 pf = vsi->back; i40e_sync_vsi_filters()
1736 filter_list_len = pf->hw.aq.asq_buf_size / i40e_sync_vsi_filters()
1769 aq_ret = i40e_aq_remove_macvlan(&pf->hw, i40e_sync_vsi_filters()
1776 pf->hw.aq.asq_last_status != i40e_sync_vsi_filters()
1778 dev_info(&pf->pdev->dev, i40e_sync_vsi_filters()
1781 pf->hw.aq.asq_last_status); i40e_sync_vsi_filters()
1785 aq_ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, i40e_sync_vsi_filters()
1790 pf->hw.aq.asq_last_status != I40E_AQ_RC_ENOENT) i40e_sync_vsi_filters()
1791 dev_info(&pf->pdev->dev, i40e_sync_vsi_filters()
1793 aq_ret, pf->hw.aq.asq_last_status); i40e_sync_vsi_filters()
1800 filter_list_len = pf->hw.aq.asq_buf_size / i40e_sync_vsi_filters()
1831 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, i40e_sync_vsi_filters()
1842 aq_ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid, i40e_sync_vsi_filters()
1850 pf->hw.aq.asq_last_status != I40E_AQ_RC_EINVAL) { i40e_sync_vsi_filters()
1851 dev_info(&pf->pdev->dev, i40e_sync_vsi_filters()
1853 aq_ret, pf->hw.aq.asq_last_status); i40e_sync_vsi_filters()
1854 if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) && i40e_sync_vsi_filters()
1860 dev_info(&pf->pdev->dev, "promiscuous mode forced on\n"); i40e_sync_vsi_filters()
1874 dev_info(&pf->pdev->dev, i40e_sync_vsi_filters()
1876 aq_ret, pf->hw.aq.asq_last_status); i40e_sync_vsi_filters()
1887 dev_info(&pf->pdev->dev, i40e_sync_vsi_filters()
1889 aq_ret, pf->hw.aq.asq_last_status); i40e_sync_vsi_filters()
1894 dev_info(&pf->pdev->dev, i40e_sync_vsi_filters()
1896 aq_ret, pf->hw.aq.asq_last_status); i40e_sync_vsi_filters()
1905 * @pf: board private structure
1907 static void i40e_sync_filters_subtask(struct i40e_pf *pf) i40e_sync_filters_subtask() argument
1911 if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC)) i40e_sync_filters_subtask()
1913 pf->flags &= ~I40E_FLAG_FILTER_SYNC; i40e_sync_filters_subtask()
1915 for (v = 0; v < pf->num_alloc_vsi; v++) { i40e_sync_filters_subtask()
1916 if (pf->vsi[v] && i40e_sync_filters_subtask()
1917 (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) i40e_sync_filters_subtask()
1918 i40e_sync_vsi_filters(pf->vsi[v]); i40e_sync_filters_subtask()
1957 struct i40e_pf *pf = np->vsi->back; i40e_ioctl() local
1961 return i40e_ptp_get_ts_config(pf, ifr); i40e_ioctl()
1963 return i40e_ptp_set_ts_config(pf, ifr); i40e_ioctl()
2760 struct i40e_pf *pf = vsi->back; i40e_fdir_filter_restore() local
2763 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) i40e_fdir_filter_restore()
2767 &pf->fdir_filter_list, fdir_node) { i40e_fdir_filter_restore()
2796 struct i40e_pf *pf = vsi->back; i40e_vsi_configure_msix() local
2798 struct i40e_hw *hw = &pf->hw; i40e_vsi_configure_msix()
2857 static void i40e_enable_misc_int_causes(struct i40e_pf *pf) i40e_enable_misc_int_causes() argument
2859 struct i40e_hw *hw = &pf->hw; i40e_enable_misc_int_causes()
2875 if (pf->flags & I40E_FLAG_PTP) i40e_enable_misc_int_causes()
2895 struct i40e_pf *pf = vsi->back; i40e_configure_msi_and_legacy() local
2896 struct i40e_hw *hw = &pf->hw; i40e_configure_msi_and_legacy()
2907 i40e_enable_misc_int_causes(pf); i40e_configure_msi_and_legacy()
2929 * @pf: board private structure
2931 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf) i40e_irq_dynamic_disable_icr0() argument
2933 struct i40e_hw *hw = &pf->hw; i40e_irq_dynamic_disable_icr0()
2942 * @pf: board private structure
2944 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf) i40e_irq_dynamic_enable_icr0() argument
2946 struct i40e_hw *hw = &pf->hw; i40e_irq_dynamic_enable_icr0()
2964 struct i40e_pf *pf = vsi->back; i40e_irq_dynamic_enable() local
2965 struct i40e_hw *hw = &pf->hw; i40e_irq_dynamic_enable()
2982 struct i40e_pf *pf = vsi->back; i40e_irq_dynamic_disable() local
2983 struct i40e_hw *hw = &pf->hw; i40e_irq_dynamic_disable()
3018 struct i40e_pf *pf = vsi->back; i40e_vsi_request_irq_msix() local
3041 err = request_irq(pf->msix_entries[base + vector].vector, i40e_vsi_request_irq_msix()
3047 dev_info(&pf->pdev->dev, i40e_vsi_request_irq_msix()
3053 irq_set_affinity_hint(pf->msix_entries[base + vector].vector, i40e_vsi_request_irq_msix()
3063 irq_set_affinity_hint(pf->msix_entries[base + vector].vector, i40e_vsi_request_irq_msix()
3065 free_irq(pf->msix_entries[base + vector].vector, i40e_vsi_request_irq_msix()
3077 struct i40e_pf *pf = vsi->back; i40e_vsi_disable_irq() local
3078 struct i40e_hw *hw = &pf->hw; i40e_vsi_disable_irq()
3087 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { i40e_vsi_disable_irq()
3094 synchronize_irq(pf->msix_entries[i + base].vector); i40e_vsi_disable_irq()
3100 synchronize_irq(pf->pdev->irq); i40e_vsi_disable_irq()
3110 struct i40e_pf *pf = vsi->back; i40e_vsi_enable_irq() local
3113 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { i40e_vsi_enable_irq()
3118 i40e_irq_dynamic_enable_icr0(pf); i40e_vsi_enable_irq()
3121 i40e_flush(&pf->hw); i40e_vsi_enable_irq()
3127 * @pf: board private structure
3129 static void i40e_stop_misc_vector(struct i40e_pf *pf) i40e_stop_misc_vector() argument
3132 wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0); i40e_stop_misc_vector()
3133 i40e_flush(&pf->hw); i40e_stop_misc_vector()
3147 struct i40e_pf *pf = (struct i40e_pf *)data; i40e_intr() local
3148 struct i40e_hw *hw = &pf->hw; i40e_intr()
3163 pf->sw_int_count++; i40e_intr()
3177 if (!test_bit(__I40E_DOWN, &pf->state)) i40e_intr()
3178 napi_schedule(&pf->vsi[pf->lan_vsi]->q_vectors[0]->napi); i40e_intr()
3183 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); i40e_intr()
3188 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state); i40e_intr()
3193 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state); i40e_intr()
3197 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) i40e_intr()
3198 set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); i40e_intr()
3204 pf->corer_count++; i40e_intr()
3206 pf->globr_count++; i40e_intr()
3208 pf->empr_count++; i40e_intr()
3209 set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state); i40e_intr()
3215 dev_info(&pf->pdev->dev, "HMC error interrupt\n"); i40e_intr()
3216 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n", i40e_intr()
3226 i40e_ptp_tx_hwtstamp(pf); i40e_intr()
3236 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n", i40e_intr()
3241 dev_info(&pf->pdev->dev, "device will be reset\n"); i40e_intr()
3242 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); i40e_intr()
3243 i40e_service_event_schedule(pf); i40e_intr()
3252 if (!test_bit(__I40E_DOWN, &pf->state)) { i40e_intr()
3253 i40e_service_event_schedule(pf); i40e_intr()
3254 i40e_irq_dynamic_enable_icr0(pf); i40e_intr()
3439 struct i40e_pf *pf = vsi->back; i40e_vsi_request_irq() local
3442 if (pf->flags & I40E_FLAG_MSIX_ENABLED) i40e_vsi_request_irq()
3444 else if (pf->flags & I40E_FLAG_MSI_ENABLED) i40e_vsi_request_irq()
3445 err = request_irq(pf->pdev->irq, i40e_intr, 0, i40e_vsi_request_irq()
3446 pf->int_name, pf); i40e_vsi_request_irq()
3448 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED, i40e_vsi_request_irq()
3449 pf->int_name, pf); i40e_vsi_request_irq()
3452 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err); i40e_vsi_request_irq()
3473 struct i40e_pf *pf = vsi->back; i40e_netpoll() local
3480 pf->flags |= I40E_FLAG_IN_NETPOLL; i40e_netpoll()
3481 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { i40e_netpoll()
3485 i40e_intr(pf->pdev->irq, netdev); i40e_netpoll()
3487 pf->flags &= ~I40E_FLAG_IN_NETPOLL; i40e_netpoll()
3493 * @pf: the PF being configured
3502 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable) i40e_pf_txq_wait() argument
3508 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q)); i40e_pf_txq_wait()
3527 struct i40e_pf *pf = vsi->back; i40e_vsi_control_tx() local
3528 struct i40e_hw *hw = &pf->hw; i40e_vsi_control_tx()
3536 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable); i40e_vsi_control_tx()
3561 if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state)) i40e_vsi_control_tx()
3565 ret = i40e_pf_txq_wait(pf, pf_q, enable); i40e_vsi_control_tx()
3567 dev_info(&pf->pdev->dev, i40e_vsi_control_tx()
3582 * @pf: the PF being configured
3591 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable) i40e_pf_rxq_wait() argument
3597 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q)); i40e_pf_rxq_wait()
3616 struct i40e_pf *pf = vsi->back; i40e_vsi_control_rx() local
3617 struct i40e_hw *hw = &pf->hw; i40e_vsi_control_rx()
3643 ret = i40e_pf_rxq_wait(pf, pf_q, enable); i40e_vsi_control_rx()
3645 dev_info(&pf->pdev->dev, i40e_vsi_control_rx()
3686 struct i40e_pf *pf = vsi->back; i40e_vsi_free_irq() local
3687 struct i40e_hw *hw = &pf->hw; i40e_vsi_free_irq()
3692 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { i40e_vsi_free_irq()
3709 irq_set_affinity_hint(pf->msix_entries[vector].vector, i40e_vsi_free_irq()
3711 free_irq(pf->msix_entries[vector].vector, i40e_vsi_free_irq()
3761 free_irq(pf->pdev->irq, pf); i40e_vsi_free_irq()
3845 * @pf: board private structure
3847 static void i40e_reset_interrupt_capability(struct i40e_pf *pf) i40e_reset_interrupt_capability() argument
3850 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { i40e_reset_interrupt_capability()
3851 pci_disable_msix(pf->pdev); i40e_reset_interrupt_capability()
3852 kfree(pf->msix_entries); i40e_reset_interrupt_capability()
3853 pf->msix_entries = NULL; i40e_reset_interrupt_capability()
3854 kfree(pf->irq_pile); i40e_reset_interrupt_capability()
3855 pf->irq_pile = NULL; i40e_reset_interrupt_capability()
3856 } else if (pf->flags & I40E_FLAG_MSI_ENABLED) { i40e_reset_interrupt_capability()
3857 pci_disable_msi(pf->pdev); i40e_reset_interrupt_capability()
3859 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED); i40e_reset_interrupt_capability()
3864 * @pf: board private structure
3869 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf) i40e_clear_interrupt_scheme() argument
3873 i40e_stop_misc_vector(pf); i40e_clear_interrupt_scheme()
3874 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { i40e_clear_interrupt_scheme()
3875 synchronize_irq(pf->msix_entries[0].vector); i40e_clear_interrupt_scheme()
3876 free_irq(pf->msix_entries[0].vector, pf); i40e_clear_interrupt_scheme()
3879 i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1); i40e_clear_interrupt_scheme()
3880 for (i = 0; i < pf->num_alloc_vsi; i++) i40e_clear_interrupt_scheme()
3881 if (pf->vsi[i]) i40e_clear_interrupt_scheme()
3882 i40e_vsi_free_q_vectors(pf->vsi[i]); i40e_clear_interrupt_scheme()
3883 i40e_reset_interrupt_capability(pf); i40e_clear_interrupt_scheme()
3973 * @pf: the PF
3975 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf) i40e_pf_quiesce_all_vsi() argument
3979 for (v = 0; v < pf->num_alloc_vsi; v++) { i40e_pf_quiesce_all_vsi()
3980 if (pf->vsi[v]) i40e_pf_quiesce_all_vsi()
3981 i40e_quiesce_vsi(pf->vsi[v]); i40e_pf_quiesce_all_vsi()
3987 * @pf: the PF
3989 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf) i40e_pf_unquiesce_all_vsi() argument
3993 for (v = 0; v < pf->num_alloc_vsi; v++) { i40e_pf_unquiesce_all_vsi()
3994 if (pf->vsi[v]) i40e_pf_unquiesce_all_vsi()
3995 i40e_unquiesce_vsi(pf->vsi[v]); i40e_pf_unquiesce_all_vsi()
4008 struct i40e_pf *pf = vsi->back; i40e_vsi_wait_txq_disabled() local
4014 ret = i40e_pf_txq_wait(pf, pf_q, false); i40e_vsi_wait_txq_disabled()
4016 dev_info(&pf->pdev->dev, i40e_vsi_wait_txq_disabled()
4028 * @pf: the PF
4033 static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf) i40e_pf_wait_txq_disabled() argument
4037 for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { i40e_pf_wait_txq_disabled()
4039 if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) { i40e_pf_wait_txq_disabled()
4040 ret = i40e_vsi_wait_txq_disabled(pf->vsi[v]); i40e_pf_wait_txq_disabled()
4052 * @pf: pointer to PF
4057 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf) i40e_get_iscsi_tc_map() argument
4060 struct i40e_hw *hw = &pf->hw; i40e_get_iscsi_tc_map()
4127 * @pf: PF being queried
4131 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) i40e_pf_get_num_tc() argument
4133 struct i40e_hw *hw = &pf->hw; i40e_pf_get_num_tc()
4139 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) i40e_pf_get_num_tc()
4143 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) i40e_pf_get_num_tc()
4147 if (pf->hw.func_caps.iscsi) i40e_pf_get_num_tc()
4148 enabled_tc = i40e_get_iscsi_tc_map(pf); i40e_pf_get_num_tc()
4163 * @pf: PF being queried
4167 static u8 i40e_pf_get_default_tc(struct i40e_pf *pf) i40e_pf_get_default_tc() argument
4169 u8 enabled_tc = pf->hw.func_caps.enabled_tcmap; i40e_pf_get_default_tc()
4186 * @pf: PF being queried
4190 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf) i40e_pf_get_tc_map() argument
4193 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) i40e_pf_get_tc_map()
4194 return i40e_pf_get_default_tc(pf); i40e_pf_get_tc_map()
4197 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) i40e_pf_get_tc_map()
4198 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config); i40e_pf_get_tc_map()
4201 if (pf->hw.func_caps.iscsi) i40e_pf_get_tc_map()
4202 return i40e_get_iscsi_tc_map(pf); i40e_pf_get_tc_map()
4204 return i40e_pf_get_default_tc(pf); i40e_pf_get_tc_map()
4217 struct i40e_pf *pf = vsi->back; i40e_vsi_get_bw_info() local
4218 struct i40e_hw *hw = &pf->hw; i40e_vsi_get_bw_info()
4226 dev_info(&pf->pdev->dev, i40e_vsi_get_bw_info()
4228 aq_ret, pf->hw.aq.asq_last_status); i40e_vsi_get_bw_info()
4236 dev_info(&pf->pdev->dev, i40e_vsi_get_bw_info()
4238 aq_ret, pf->hw.aq.asq_last_status); i40e_vsi_get_bw_info()
4243 dev_info(&pf->pdev->dev, i40e_vsi_get_bw_info()
4308 struct i40e_pf *pf = vsi->back; i40e_vsi_config_netdev_tc() local
4309 struct i40e_hw *hw = &pf->hw; i40e_vsi_config_netdev_tc()
4454 struct i40e_pf *pf = veb->pf; i40e_veb_config_tc() local
4471 ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid, i40e_veb_config_tc()
4474 dev_info(&pf->pdev->dev, i40e_veb_config_tc()
4476 pf->hw.aq.asq_last_status); i40e_veb_config_tc()
4483 dev_info(&pf->pdev->dev, i40e_veb_config_tc()
4485 pf->hw.aq.asq_last_status); i40e_veb_config_tc()
4495 * @pf: PF struct
4501 static void i40e_dcb_reconfigure(struct i40e_pf *pf) i40e_dcb_reconfigure() argument
4508 tc_map = i40e_pf_get_tc_map(pf); i40e_dcb_reconfigure()
4510 if (!pf->veb[v]) i40e_dcb_reconfigure()
4512 ret = i40e_veb_config_tc(pf->veb[v], tc_map); i40e_dcb_reconfigure()
4514 dev_info(&pf->pdev->dev, i40e_dcb_reconfigure()
4516 pf->veb[v]->seid); i40e_dcb_reconfigure()
4522 for (v = 0; v < pf->num_alloc_vsi; v++) { i40e_dcb_reconfigure()
4523 if (!pf->vsi[v]) i40e_dcb_reconfigure()
4533 if (v == pf->lan_vsi) i40e_dcb_reconfigure()
4534 tc_map = i40e_pf_get_tc_map(pf); i40e_dcb_reconfigure()
4536 tc_map = i40e_pf_get_default_tc(pf); i40e_dcb_reconfigure()
4538 if (pf->vsi[v]->type == I40E_VSI_FCOE) i40e_dcb_reconfigure()
4539 tc_map = i40e_get_fcoe_tc_map(pf); i40e_dcb_reconfigure()
4542 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map); i40e_dcb_reconfigure()
4544 dev_info(&pf->pdev->dev, i40e_dcb_reconfigure()
4546 pf->vsi[v]->seid); i40e_dcb_reconfigure()
4550 i40e_vsi_map_rings_to_vectors(pf->vsi[v]); i40e_dcb_reconfigure()
4551 if (pf->vsi[v]->netdev) i40e_dcb_reconfigure()
4552 i40e_dcbnl_set_all(pf->vsi[v]); i40e_dcb_reconfigure()
4559 * @pf: PF struct
4564 static int i40e_resume_port_tx(struct i40e_pf *pf) i40e_resume_port_tx() argument
4566 struct i40e_hw *hw = &pf->hw; i40e_resume_port_tx()
4571 dev_info(&pf->pdev->dev, i40e_resume_port_tx()
4573 pf->hw.aq.asq_last_status); i40e_resume_port_tx()
4575 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); i40e_resume_port_tx()
4576 i40e_service_event_schedule(pf); i40e_resume_port_tx()
4584 * @pf: PF being configured
4589 static int i40e_init_pf_dcb(struct i40e_pf *pf) i40e_init_pf_dcb() argument
4591 struct i40e_hw *hw = &pf->hw; i40e_init_pf_dcb()
4595 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || i40e_init_pf_dcb()
4596 (pf->hw.aq.fw_maj_ver < 4)) i40e_init_pf_dcb()
4605 dev_info(&pf->pdev->dev, i40e_init_pf_dcb()
4608 if (pf->flags & I40E_FLAG_MFP_ENABLED) i40e_init_pf_dcb()
4613 pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | i40e_init_pf_dcb()
4616 pf->flags |= I40E_FLAG_DCB_CAPABLE; i40e_init_pf_dcb()
4619 pf->flags |= I40E_FLAG_DCB_ENABLED; i40e_init_pf_dcb()
4620 dev_dbg(&pf->pdev->dev, i40e_init_pf_dcb()
4624 dev_info(&pf->pdev->dev, i40e_init_pf_dcb()
4626 pf->hw.aq.asq_last_status); i40e_init_pf_dcb()
4703 struct i40e_pf *pf = vsi->back; i40e_up_complete() local
4706 if (pf->flags & I40E_FLAG_MSIX_ENABLED) i40e_up_complete()
4720 if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) && i40e_up_complete()
4728 if ((pf->hw.phy.link_info.link_info & i40e_up_complete()
4730 (!(pf->hw.phy.link_info.an_info & i40e_up_complete()
4739 pf->fd_add_err = pf->fd_atr_cnt = 0; i40e_up_complete()
4740 if (pf->fd_tcp_rule > 0) { i40e_up_complete()
4741 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; i40e_up_complete()
4742 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n"); i40e_up_complete()
4743 pf->fd_tcp_rule = 0; i40e_up_complete()
4747 i40e_service_event_schedule(pf); i40e_up_complete()
4761 struct i40e_pf *pf = vsi->back; i40e_vsi_reinit_locked() local
4764 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state)) i40e_vsi_reinit_locked()
4775 clear_bit(__I40E_CONFIG_BUSY, &pf->state); i40e_vsi_reinit_locked()
4831 struct i40e_pf *pf = vsi->back; i40e_setup_tc() local
4837 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) { i40e_setup_tc()
4843 if (pf->flags & I40E_FLAG_MFP_ENABLED) { i40e_setup_tc()
4849 if (tc > i40e_pf_get_num_tc(pf)) { i40e_setup_tc()
4896 struct i40e_pf *pf = vsi->back; i40e_open() local
4900 if (test_bit(__I40E_TESTING, &pf->state) || i40e_open()
4901 test_bit(__I40E_BAD_EEPROM, &pf->state)) i40e_open()
4911 wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH | i40e_open()
4913 wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH | i40e_open()
4916 wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16); i40e_open()
4935 struct i40e_pf *pf = vsi->back; i40e_vsi_open() local
4953 dev_driver_string(&pf->pdev->dev), vsi->netdev->name); i40e_vsi_open()
4971 dev_driver_string(&pf->pdev->dev), i40e_vsi_open()
4972 dev_name(&pf->pdev->dev)); i40e_vsi_open()
4994 if (vsi == pf->vsi[pf->lan_vsi]) i40e_vsi_open()
4995 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); i40e_vsi_open()
5002 * @pf: Pointer to PF
5007 static void i40e_fdir_filter_exit(struct i40e_pf *pf) i40e_fdir_filter_exit() argument
5013 &pf->fdir_filter_list, fdir_node) { i40e_fdir_filter_exit()
5017 pf->fdir_pf_active_filters = 0; i40e_fdir_filter_exit()
5046 * @pf: board private structure
5053 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags) i40e_do_reset() argument
5059 if (i40e_check_asq_alive(&pf->hw)) i40e_do_reset()
5060 i40e_vc_notify_reset(pf); i40e_do_reset()
5073 dev_dbg(&pf->pdev->dev, "GlobalR requested\n"); i40e_do_reset()
5074 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); i40e_do_reset()
5076 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); i40e_do_reset()
5084 dev_dbg(&pf->pdev->dev, "CoreR requested\n"); i40e_do_reset()
5085 val = rd32(&pf->hw, I40E_GLGEN_RTRIG); i40e_do_reset()
5087 wr32(&pf->hw, I40E_GLGEN_RTRIG, val); i40e_do_reset()
5088 i40e_flush(&pf->hw); i40e_do_reset()
5100 dev_dbg(&pf->pdev->dev, "PFR requested\n"); i40e_do_reset()
5101 i40e_handle_reset_warning(pf); i40e_do_reset()
5107 dev_info(&pf->pdev->dev, i40e_do_reset()
5109 for (v = 0; v < pf->num_alloc_vsi; v++) { i40e_do_reset()
5110 struct i40e_vsi *vsi = pf->vsi[v]; i40e_do_reset()
5113 i40e_vsi_reinit_locked(pf->vsi[v]); i40e_do_reset()
5124 dev_info(&pf->pdev->dev, "VSI down requested\n"); i40e_do_reset()
5125 for (v = 0; v < pf->num_alloc_vsi; v++) { i40e_do_reset()
5126 struct i40e_vsi *vsi = pf->vsi[v]; i40e_do_reset()
5138 dev_info(&pf->pdev->dev, i40e_do_reset()
5147 * @pf: board private structure
5151 bool i40e_dcb_need_reconfig(struct i40e_pf *pf, i40e_dcb_need_reconfig() argument
5166 dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n"); i40e_dcb_need_reconfig()
5172 dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n"); i40e_dcb_need_reconfig()
5177 dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n"); i40e_dcb_need_reconfig()
5185 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n"); i40e_dcb_need_reconfig()
5193 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n"); i40e_dcb_need_reconfig()
5196 dev_dbg(&pf->pdev->dev, "%s: need_reconfig=%d\n", __func__, i40e_dcb_need_reconfig()
5203 * @pf: board private structure
5206 static int i40e_handle_lldp_event(struct i40e_pf *pf, i40e_handle_lldp_event() argument
5211 struct i40e_hw *hw = &pf->hw; i40e_handle_lldp_event()
5218 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) i40e_handle_lldp_event()
5224 dev_dbg(&pf->pdev->dev, i40e_handle_lldp_event()
5231 dev_dbg(&pf->pdev->dev, i40e_handle_lldp_event()
5248 ret = i40e_get_dcb_config(&pf->hw); i40e_handle_lldp_event()
5250 dev_info(&pf->pdev->dev, "Failed querying DCB configuration data from firmware.\n"); i40e_handle_lldp_event()
5257 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n"); i40e_handle_lldp_event()
5261 need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg, i40e_handle_lldp_event()
5264 i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config); i40e_handle_lldp_event()
5271 pf->flags |= I40E_FLAG_DCB_ENABLED; i40e_handle_lldp_event()
5273 pf->flags &= ~I40E_FLAG_DCB_ENABLED; i40e_handle_lldp_event()
5275 set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state); i40e_handle_lldp_event()
5277 i40e_pf_quiesce_all_vsi(pf); i40e_handle_lldp_event()
5280 i40e_dcb_reconfigure(pf); i40e_handle_lldp_event()
5282 ret = i40e_resume_port_tx(pf); i40e_handle_lldp_event()
5284 clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state); i40e_handle_lldp_event()
5290 ret = i40e_pf_wait_txq_disabled(pf); i40e_handle_lldp_event()
5293 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); i40e_handle_lldp_event()
5294 i40e_service_event_schedule(pf); i40e_handle_lldp_event()
5296 i40e_pf_unquiesce_all_vsi(pf); i40e_handle_lldp_event()
5306 * @pf: board private structure
5310 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags) i40e_do_reset_safe() argument
5313 i40e_do_reset(pf, reset_flags); i40e_do_reset_safe()
5319 * @pf: board private structure
5325 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf, i40e_handle_lan_overflow_event() argument
5332 struct i40e_hw *hw = &pf->hw; i40e_handle_lan_overflow_event()
5336 dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n", i40e_handle_lan_overflow_event()
5345 vf = &pf->vf[vf_id]; i40e_handle_lan_overflow_event()
5355 * @pf: board private structure
5357 static void i40e_service_event_complete(struct i40e_pf *pf) i40e_service_event_complete() argument
5359 BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state)); i40e_service_event_complete()
5363 clear_bit(__I40E_SERVICE_SCHED, &pf->state); i40e_service_event_complete()
5368 * @pf: board private structure
5370 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf) i40e_get_cur_guaranteed_fd_count() argument
5374 val = rd32(&pf->hw, I40E_PFQF_FDSTAT); i40e_get_cur_guaranteed_fd_count()
5381 * @pf: board private structure
5383 u32 i40e_get_current_fd_count(struct i40e_pf *pf) i40e_get_current_fd_count() argument
5387 val = rd32(&pf->hw, I40E_PFQF_FDSTAT); i40e_get_current_fd_count()
5396 * @pf: board private structure
5398 u32 i40e_get_global_fd_count(struct i40e_pf *pf) i40e_get_global_fd_count() argument
5402 val = rd32(&pf->hw, I40E_GLQF_FDCNT_0); i40e_get_global_fd_count()
5411 * @pf: board private structure
5413 void i40e_fdir_check_and_reenable(struct i40e_pf *pf) i40e_fdir_check_and_reenable() argument
5417 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) i40e_fdir_check_and_reenable()
5423 fcnt_prog = i40e_get_global_fd_count(pf); i40e_fdir_check_and_reenable()
5424 fcnt_avail = pf->fdir_pf_filter_count; i40e_fdir_check_and_reenable()
5426 (pf->fd_add_err == 0) || i40e_fdir_check_and_reenable()
5427 (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) { i40e_fdir_check_and_reenable()
5428 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && i40e_fdir_check_and_reenable()
5429 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { i40e_fdir_check_and_reenable()
5430 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; i40e_fdir_check_and_reenable()
5431 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n"); i40e_fdir_check_and_reenable()
5436 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && i40e_fdir_check_and_reenable()
5437 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) { i40e_fdir_check_and_reenable()
5438 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; i40e_fdir_check_and_reenable()
5439 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n"); i40e_fdir_check_and_reenable()
5448 * @pf: board private structure
5450 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) i40e_fdir_flush_and_replay() argument
5458 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))) i40e_fdir_flush_and_replay()
5461 if (time_after(jiffies, pf->fd_flush_timestamp + i40e_fdir_flush_and_replay()
5466 min_flush_time = pf->fd_flush_timestamp i40e_fdir_flush_and_replay()
5468 fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters; i40e_fdir_flush_and_replay()
5472 dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n"); i40e_fdir_flush_and_replay()
5476 pf->fd_flush_timestamp = jiffies; i40e_fdir_flush_and_replay()
5477 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; i40e_fdir_flush_and_replay()
5479 wr32(&pf->hw, I40E_PFQF_CTL_1, i40e_fdir_flush_and_replay()
5481 i40e_flush(&pf->hw); i40e_fdir_flush_and_replay()
5482 pf->fd_flush_cnt++; i40e_fdir_flush_and_replay()
5483 pf->fd_add_err = 0; i40e_fdir_flush_and_replay()
5487 reg = rd32(&pf->hw, I40E_PFQF_CTL_1); i40e_fdir_flush_and_replay()
5492 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n"); i40e_fdir_flush_and_replay()
5495 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); i40e_fdir_flush_and_replay()
5497 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; i40e_fdir_flush_and_replay()
5498 clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); i40e_fdir_flush_and_replay()
5499 dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n"); i40e_fdir_flush_and_replay()
5506 * @pf: board private structure
5508 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf) i40e_get_current_atr_cnt() argument
5510 return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters; i40e_get_current_atr_cnt()
5522 * @pf: board private structure
5524 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf) i40e_fdir_reinit_subtask() argument
5528 if (test_bit(__I40E_DOWN, &pf->state)) i40e_fdir_reinit_subtask()
5531 if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))) i40e_fdir_reinit_subtask()
5534 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) i40e_fdir_reinit_subtask()
5535 i40e_fdir_flush_and_replay(pf); i40e_fdir_reinit_subtask()
5537 i40e_fdir_check_and_reenable(pf); i40e_fdir_reinit_subtask()
5585 struct i40e_pf *pf; i40e_veb_link_event() local
5588 if (!veb || !veb->pf) i40e_veb_link_event()
5590 pf = veb->pf; i40e_veb_link_event()
5594 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid)) i40e_veb_link_event()
5595 i40e_veb_link_event(pf->veb[i], link_up); i40e_veb_link_event()
5598 for (i = 0; i < pf->num_alloc_vsi; i++) i40e_veb_link_event()
5599 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid)) i40e_veb_link_event()
5600 i40e_vsi_link_event(pf->vsi[i], link_up); i40e_veb_link_event()
5605 * @pf: board private structure
5607 static void i40e_link_event(struct i40e_pf *pf) i40e_link_event() argument
5610 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; i40e_link_event()
5614 pf->hw.phy.get_link_info = true; i40e_link_event()
5616 old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP); i40e_link_event()
5617 new_link = i40e_get_link_status(&pf->hw); i40e_link_event()
5618 old_link_speed = pf->hw.phy.link_info_old.link_speed; i40e_link_event()
5619 new_link_speed = pf->hw.phy.link_info.link_speed; i40e_link_event()
5633 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) i40e_link_event()
5634 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link); i40e_link_event()
5638 if (pf->vf) i40e_link_event()
5639 i40e_vc_notify_link_state(pf); i40e_link_event()
5641 if (pf->flags & I40E_FLAG_PTP) i40e_link_event()
5642 i40e_ptp_set_increment(pf); i40e_link_event()
5647 * @pf: board private structure
5652 static void i40e_check_hang_subtask(struct i40e_pf *pf) i40e_check_hang_subtask() argument
5657 if (test_bit(__I40E_DOWN, &pf->state) || i40e_check_hang_subtask()
5658 test_bit(__I40E_CONFIG_BUSY, &pf->state)) i40e_check_hang_subtask()
5667 for (v = 0; v < pf->num_alloc_vsi; v++) { i40e_check_hang_subtask()
5668 struct i40e_vsi *vsi = pf->vsi[v]; i40e_check_hang_subtask()
5671 if (!pf->vsi[v] || i40e_check_hang_subtask()
5684 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) { i40e_check_hang_subtask()
5709 * @pf: board private structure
5711 static void i40e_watchdog_subtask(struct i40e_pf *pf) i40e_watchdog_subtask() argument
5716 if (test_bit(__I40E_DOWN, &pf->state) || i40e_watchdog_subtask()
5717 test_bit(__I40E_CONFIG_BUSY, &pf->state)) i40e_watchdog_subtask()
5721 if (time_before(jiffies, (pf->service_timer_previous + i40e_watchdog_subtask()
5722 pf->service_timer_period))) i40e_watchdog_subtask()
5724 pf->service_timer_previous = jiffies; i40e_watchdog_subtask()
5726 i40e_check_hang_subtask(pf); i40e_watchdog_subtask()
5727 i40e_link_event(pf); i40e_watchdog_subtask()
5732 for (i = 0; i < pf->num_alloc_vsi; i++) i40e_watchdog_subtask()
5733 if (pf->vsi[i] && pf->vsi[i]->netdev) i40e_watchdog_subtask()
5734 i40e_update_stats(pf->vsi[i]); i40e_watchdog_subtask()
5738 if (pf->veb[i]) i40e_watchdog_subtask()
5739 i40e_update_veb_stats(pf->veb[i]); i40e_watchdog_subtask()
5741 i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]); i40e_watchdog_subtask()
5746 * @pf: board private structure
5748 static void i40e_reset_subtask(struct i40e_pf *pf) i40e_reset_subtask() argument
5753 if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) { i40e_reset_subtask()
5755 clear_bit(__I40E_REINIT_REQUESTED, &pf->state); i40e_reset_subtask()
5757 if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) { i40e_reset_subtask()
5759 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state); i40e_reset_subtask()
5761 if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) { i40e_reset_subtask()
5763 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state); i40e_reset_subtask()
5765 if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) { i40e_reset_subtask()
5767 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state); i40e_reset_subtask()
5769 if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) { i40e_reset_subtask()
5771 clear_bit(__I40E_DOWN_REQUESTED, &pf->state); i40e_reset_subtask()
5777 if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) { i40e_reset_subtask()
5778 i40e_handle_reset_warning(pf); i40e_reset_subtask()
5784 !test_bit(__I40E_DOWN, &pf->state) && i40e_reset_subtask()
5785 !test_bit(__I40E_CONFIG_BUSY, &pf->state)) i40e_reset_subtask()
5786 i40e_do_reset(pf, reset_flags); i40e_reset_subtask()
5794 * @pf: board private structure
5797 static void i40e_handle_link_event(struct i40e_pf *pf, i40e_handle_link_event() argument
5800 struct i40e_hw *hw = &pf->hw; i40e_handle_link_event()
5813 i40e_link_event(pf); i40e_handle_link_event()
5819 dev_err(&pf->pdev->dev, i40e_handle_link_event()
5825 * @pf: board private structure
5827 static void i40e_clean_adminq_subtask(struct i40e_pf *pf) i40e_clean_adminq_subtask() argument
5830 struct i40e_hw *hw = &pf->hw; i40e_clean_adminq_subtask()
5838 if (test_bit(__I40E_RESET_FAILED, &pf->state)) i40e_clean_adminq_subtask()
5842 val = rd32(&pf->hw, pf->hw.aq.arq.len); i40e_clean_adminq_subtask()
5845 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n"); i40e_clean_adminq_subtask()
5849 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n"); i40e_clean_adminq_subtask()
5853 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n"); i40e_clean_adminq_subtask()
5857 wr32(&pf->hw, pf->hw.aq.arq.len, val); i40e_clean_adminq_subtask()
5859 val = rd32(&pf->hw, pf->hw.aq.asq.len); i40e_clean_adminq_subtask()
5862 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n"); i40e_clean_adminq_subtask()
5866 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n"); i40e_clean_adminq_subtask()
5870 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n"); i40e_clean_adminq_subtask()
5874 wr32(&pf->hw, pf->hw.aq.asq.len, val); i40e_clean_adminq_subtask()
5886 dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret); i40e_clean_adminq_subtask()
5894 i40e_handle_link_event(pf, &event); i40e_clean_adminq_subtask()
5897 ret = i40e_vc_process_vf_msg(pf, i40e_clean_adminq_subtask()
5905 dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n"); i40e_clean_adminq_subtask()
5908 ret = i40e_handle_lldp_event(pf, &event); i40e_clean_adminq_subtask()
5913 dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n"); i40e_clean_adminq_subtask()
5914 i40e_handle_lan_overflow_event(pf, &event); i40e_clean_adminq_subtask()
5917 dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n"); i40e_clean_adminq_subtask()
5921 i40e_debug(&pf->hw, I40E_DEBUG_NVM, "ARQ NVM operation completed\n"); i40e_clean_adminq_subtask()
5924 dev_info(&pf->pdev->dev, i40e_clean_adminq_subtask()
5929 } while (pending && (i++ < pf->adminq_work_limit)); i40e_clean_adminq_subtask()
5931 clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state); i40e_clean_adminq_subtask()
5943 * @pf: board private structure
5945 static void i40e_verify_eeprom(struct i40e_pf *pf) i40e_verify_eeprom() argument
5949 err = i40e_diag_eeprom_test(&pf->hw); i40e_verify_eeprom()
5952 err = i40e_diag_eeprom_test(&pf->hw); i40e_verify_eeprom()
5954 dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n", i40e_verify_eeprom()
5956 set_bit(__I40E_BAD_EEPROM, &pf->state); i40e_verify_eeprom()
5960 if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) { i40e_verify_eeprom()
5961 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n"); i40e_verify_eeprom()
5962 clear_bit(__I40E_BAD_EEPROM, &pf->state); i40e_verify_eeprom()
5968 * @pf: pointer to the PF structure
5972 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) i40e_enable_pf_switch_lb() argument
5974 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; i40e_enable_pf_switch_lb()
5978 ctxt.seid = pf->main_vsi_seid; i40e_enable_pf_switch_lb()
5979 ctxt.pf_num = pf->hw.pf_id; i40e_enable_pf_switch_lb()
5981 aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); i40e_enable_pf_switch_lb()
5983 dev_info(&pf->pdev->dev, i40e_enable_pf_switch_lb()
5985 __func__, aq_ret, pf->hw.aq.asq_last_status); i40e_enable_pf_switch_lb()
5994 dev_info(&pf->pdev->dev, i40e_enable_pf_switch_lb()
6002 * @pf: pointer to the PF structure
6006 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) i40e_disable_pf_switch_lb() argument
6008 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; i40e_disable_pf_switch_lb()
6012 ctxt.seid = pf->main_vsi_seid; i40e_disable_pf_switch_lb()
6013 ctxt.pf_num = pf->hw.pf_id; i40e_disable_pf_switch_lb()
6015 aq_ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); i40e_disable_pf_switch_lb()
6017 dev_info(&pf->pdev->dev, i40e_disable_pf_switch_lb()
6019 __func__, aq_ret, pf->hw.aq.asq_last_status); i40e_disable_pf_switch_lb()
6028 dev_info(&pf->pdev->dev, i40e_disable_pf_switch_lb()
6044 struct i40e_pf *pf = veb->pf; i40e_config_bridge_mode() local
6046 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n", i40e_config_bridge_mode()
6049 i40e_disable_pf_switch_lb(pf); i40e_config_bridge_mode()
6051 i40e_enable_pf_switch_lb(pf); i40e_config_bridge_mode()
6066 struct i40e_pf *pf = veb->pf; i40e_reconstitute_veb() local
6071 for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) { i40e_reconstitute_veb()
6072 if (pf->vsi[v] && i40e_reconstitute_veb()
6073 pf->vsi[v]->veb_idx == veb->idx && i40e_reconstitute_veb()
6074 pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) { i40e_reconstitute_veb()
6075 ctl_vsi = pf->vsi[v]; i40e_reconstitute_veb()
6080 dev_info(&pf->pdev->dev, i40e_reconstitute_veb()
6085 if (ctl_vsi != pf->vsi[pf->lan_vsi]) i40e_reconstitute_veb()
6086 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; i40e_reconstitute_veb()
6089 dev_info(&pf->pdev->dev, i40e_reconstitute_veb()
6100 if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED) i40e_reconstitute_veb()
6107 for (v = 0; v < pf->num_alloc_vsi; v++) { i40e_reconstitute_veb()
6108 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi) i40e_reconstitute_veb()
6111 if (pf->vsi[v]->veb_idx == veb->idx) { i40e_reconstitute_veb()
6112 struct i40e_vsi *vsi = pf->vsi[v]; i40e_reconstitute_veb()
6116 dev_info(&pf->pdev->dev, i40e_reconstitute_veb()
6127 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) { i40e_reconstitute_veb()
6128 pf->veb[veb_idx]->uplink_seid = veb->seid; i40e_reconstitute_veb()
6129 ret = i40e_reconstitute_veb(pf->veb[veb_idx]); i40e_reconstitute_veb()
6141 * @pf: the PF struct
6143 static int i40e_get_capabilities(struct i40e_pf *pf) i40e_get_capabilities() argument
6157 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len, i40e_get_capabilities()
6164 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) { i40e_get_capabilities()
6167 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) { i40e_get_capabilities()
6168 dev_info(&pf->pdev->dev, i40e_get_capabilities()
6170 pf->hw.aq.asq_last_status); i40e_get_capabilities()
6175 if (((pf->hw.aq.fw_maj_ver == 2) && (pf->hw.aq.fw_min_ver < 22)) || i40e_get_capabilities()
6176 (pf->hw.aq.fw_maj_ver < 2)) { i40e_get_capabilities()
6177 pf->hw.func_caps.num_msix_vectors++; i40e_get_capabilities()
6178 pf->hw.func_caps.num_msix_vectors_vf++; i40e_get_capabilities()
6181 if (pf->hw.debug_mask & I40E_DEBUG_USER) i40e_get_capabilities()
6182 dev_info(&pf->pdev->dev, i40e_get_capabilities()
6183 "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n", i40e_get_capabilities()
6184 pf->hw.pf_id, pf->hw.func_caps.num_vfs, i40e_get_capabilities()
6185 pf->hw.func_caps.num_msix_vectors, i40e_get_capabilities()
6186 pf->hw.func_caps.num_msix_vectors_vf, i40e_get_capabilities()
6187 pf->hw.func_caps.fd_filters_guaranteed, i40e_get_capabilities()
6188 pf->hw.func_caps.fd_filters_best_effort, i40e_get_capabilities()
6189 pf->hw.func_caps.num_tx_qp, i40e_get_capabilities()
6190 pf->hw.func_caps.num_vsis); i40e_get_capabilities()
6192 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \ i40e_get_capabilities()
6193 + pf->hw.func_caps.num_vfs) i40e_get_capabilities()
6194 if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) { i40e_get_capabilities()
6195 dev_info(&pf->pdev->dev, i40e_get_capabilities()
6197 pf->hw.func_caps.num_vsis, DEF_NUM_VSI); i40e_get_capabilities()
6198 pf->hw.func_caps.num_vsis = DEF_NUM_VSI; i40e_get_capabilities()
6208 * @pf: board private structure
6210 static void i40e_fdir_sb_setup(struct i40e_pf *pf) i40e_fdir_sb_setup() argument
6218 if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) { i40e_fdir_sb_setup()
6226 wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]); i40e_fdir_sb_setup()
6229 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) i40e_fdir_sb_setup()
6234 for (i = 0; i < pf->num_alloc_vsi; i++) { i40e_fdir_sb_setup()
6235 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { i40e_fdir_sb_setup()
6236 vsi = pf->vsi[i]; i40e_fdir_sb_setup()
6243 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, i40e_fdir_sb_setup()
6244 pf->vsi[pf->lan_vsi]->seid, 0); i40e_fdir_sb_setup()
6246 dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n"); i40e_fdir_sb_setup()
6247 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; i40e_fdir_sb_setup()
6257 * @pf: board private structure
6259 static void i40e_fdir_teardown(struct i40e_pf *pf) i40e_fdir_teardown() argument
6263 i40e_fdir_filter_exit(pf); i40e_fdir_teardown()
6264 for (i = 0; i < pf->num_alloc_vsi; i++) { i40e_fdir_teardown()
6265 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { i40e_fdir_teardown()
6266 i40e_vsi_release(pf->vsi[i]); i40e_fdir_teardown()
6274 * @pf: board private structure
6278 static void i40e_prep_for_reset(struct i40e_pf *pf) i40e_prep_for_reset() argument
6280 struct i40e_hw *hw = &pf->hw; i40e_prep_for_reset()
6284 clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state); i40e_prep_for_reset()
6285 if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) i40e_prep_for_reset()
6288 dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n"); i40e_prep_for_reset()
6291 i40e_pf_quiesce_all_vsi(pf); i40e_prep_for_reset()
6293 for (v = 0; v < pf->num_alloc_vsi; v++) { i40e_prep_for_reset()
6294 if (pf->vsi[v]) i40e_prep_for_reset()
6295 pf->vsi[v]->seid = 0; i40e_prep_for_reset()
6298 i40e_shutdown_adminq(&pf->hw); i40e_prep_for_reset()
6304 dev_warn(&pf->pdev->dev, i40e_prep_for_reset()
6311 * @pf: PF struct
6313 static void i40e_send_version(struct i40e_pf *pf) i40e_send_version() argument
6322 i40e_aq_send_driver_version(&pf->hw, &dv, NULL); i40e_send_version()
6327 * @pf: board private structure
6330 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit) i40e_reset_and_rebuild() argument
6332 struct i40e_hw *hw = &pf->hw; i40e_reset_and_rebuild()
6343 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret); i40e_reset_and_rebuild()
6344 set_bit(__I40E_RESET_FAILED, &pf->state); i40e_reset_and_rebuild()
6347 pf->pfr_count++; i40e_reset_and_rebuild()
6349 if (test_bit(__I40E_DOWN, &pf->state)) i40e_reset_and_rebuild()
6351 dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n"); i40e_reset_and_rebuild()
6354 ret = i40e_init_adminq(&pf->hw); i40e_reset_and_rebuild()
6356 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, %d\n", ret); i40e_reset_and_rebuild()
6361 if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state)) i40e_reset_and_rebuild()
6362 i40e_verify_eeprom(pf); i40e_reset_and_rebuild()
6365 ret = i40e_get_capabilities(pf); i40e_reset_and_rebuild()
6367 dev_info(&pf->pdev->dev, "i40e_get_capabilities failed, %d\n", i40e_reset_and_rebuild()
6374 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num); i40e_reset_and_rebuild()
6376 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret); i40e_reset_and_rebuild()
6381 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret); i40e_reset_and_rebuild()
6386 ret = i40e_init_pf_dcb(pf); i40e_reset_and_rebuild()
6388 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret); i40e_reset_and_rebuild()
6389 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; i40e_reset_and_rebuild()
6394 ret = i40e_init_pf_fcoe(pf); i40e_reset_and_rebuild()
6396 dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", ret); i40e_reset_and_rebuild()
6400 ret = i40e_setup_pf_switch(pf, reinit); i40e_reset_and_rebuild()
6407 ret = i40e_aq_set_phy_int_mask(&pf->hw, i40e_reset_and_rebuild()
6411 dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", ret); i40e_reset_and_rebuild()
6414 ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true); i40e_reset_and_rebuild()
6416 dev_info(&pf->pdev->dev, "set fc fail, aq_err %d\n", ret); i40e_reset_and_rebuild()
6425 if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) { i40e_reset_and_rebuild()
6426 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n"); i40e_reset_and_rebuild()
6429 if (!pf->veb[v]) i40e_reset_and_rebuild()
6432 if (pf->veb[v]->uplink_seid == pf->mac_seid || i40e_reset_and_rebuild()
6433 pf->veb[v]->uplink_seid == 0) { i40e_reset_and_rebuild()
6434 ret = i40e_reconstitute_veb(pf->veb[v]); i40e_reset_and_rebuild()
6445 if (pf->veb[v]->uplink_seid == pf->mac_seid) { i40e_reset_and_rebuild()
6446 dev_info(&pf->pdev->dev, i40e_reset_and_rebuild()
6449 pf->vsi[pf->lan_vsi]->uplink_seid i40e_reset_and_rebuild()
6450 = pf->mac_seid; i40e_reset_and_rebuild()
6452 } else if (pf->veb[v]->uplink_seid == 0) { i40e_reset_and_rebuild()
6453 dev_info(&pf->pdev->dev, i40e_reset_and_rebuild()
6461 if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) { i40e_reset_and_rebuild()
6462 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n"); i40e_reset_and_rebuild()
6464 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]); i40e_reset_and_rebuild()
6466 dev_info(&pf->pdev->dev, i40e_reset_and_rebuild()
6472 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || i40e_reset_and_rebuild()
6473 (pf->hw.aq.fw_maj_ver < 4)) { i40e_reset_and_rebuild()
6475 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); i40e_reset_and_rebuild()
6477 dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", i40e_reset_and_rebuild()
6478 pf->hw.aq.asq_last_status); i40e_reset_and_rebuild()
6481 if (pf->flags & I40E_FLAG_MSIX_ENABLED) i40e_reset_and_rebuild()
6482 ret = i40e_setup_misc_vector(pf); i40e_reset_and_rebuild()
6485 i40e_pf_unquiesce_all_vsi(pf); i40e_reset_and_rebuild()
6487 if (pf->num_alloc_vfs) { i40e_reset_and_rebuild()
6488 for (v = 0; v < pf->num_alloc_vfs; v++) i40e_reset_and_rebuild()
6489 i40e_reset_vf(&pf->vf[v], true); i40e_reset_and_rebuild()
6493 i40e_send_version(pf); i40e_reset_and_rebuild()
6496 clear_bit(__I40E_RESET_FAILED, &pf->state); i40e_reset_and_rebuild()
6498 clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state); i40e_reset_and_rebuild()
6503 * @pf: board private structure
6508 static void i40e_handle_reset_warning(struct i40e_pf *pf) i40e_handle_reset_warning() argument
6510 i40e_prep_for_reset(pf); i40e_handle_reset_warning()
6511 i40e_reset_and_rebuild(pf, false); i40e_handle_reset_warning()
6516 * @pf: pointer to the PF structure
6520 static void i40e_handle_mdd_event(struct i40e_pf *pf) i40e_handle_mdd_event() argument
6522 struct i40e_hw *hw = &pf->hw; i40e_handle_mdd_event()
6529 if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state)) i40e_handle_mdd_event()
6543 pf->hw.func_caps.base_queue; i40e_handle_mdd_event()
6544 if (netif_msg_tx_err(pf)) i40e_handle_mdd_event()
6545 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n", i40e_handle_mdd_event()
6558 pf->hw.func_caps.base_queue; i40e_handle_mdd_event()
6559 if (netif_msg_rx_err(pf)) i40e_handle_mdd_event()
6560 dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n", i40e_handle_mdd_event()
6570 dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n"); i40e_handle_mdd_event()
6576 dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n"); i40e_handle_mdd_event()
6581 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); i40e_handle_mdd_event()
6582 i40e_service_event_schedule(pf); i40e_handle_mdd_event()
6587 for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) { i40e_handle_mdd_event()
6588 vf = &(pf->vf[i]); i40e_handle_mdd_event()
6593 dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n", i40e_handle_mdd_event()
6601 dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n", i40e_handle_mdd_event()
6606 dev_info(&pf->pdev->dev, i40e_handle_mdd_event()
6608 dev_info(&pf->pdev->dev, i40e_handle_mdd_event()
6615 clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state); i40e_handle_mdd_event()
6625 * @pf: board private structure
6627 static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf) i40e_sync_vxlan_filters_subtask() argument
6629 struct i40e_hw *hw = &pf->hw; i40e_sync_vxlan_filters_subtask()
6634 if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC)) i40e_sync_vxlan_filters_subtask()
6637 pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC; i40e_sync_vxlan_filters_subtask()
6640 if (pf->pending_vxlan_bitmap & (1 << i)) { i40e_sync_vxlan_filters_subtask()
6641 pf->pending_vxlan_bitmap &= ~(1 << i); i40e_sync_vxlan_filters_subtask()
6642 port = pf->vxlan_ports[i]; i40e_sync_vxlan_filters_subtask()
6651 dev_info(&pf->pdev->dev, i40e_sync_vxlan_filters_subtask()
6655 pf->hw.aq.asq_last_status); i40e_sync_vxlan_filters_subtask()
6656 pf->vxlan_ports[i] = 0; i40e_sync_vxlan_filters_subtask()
6669 struct i40e_pf *pf = container_of(work, i40e_service_task() local
6675 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) { i40e_service_task()
6676 i40e_service_event_complete(pf); i40e_service_task()
6680 i40e_reset_subtask(pf); i40e_service_task()
6681 i40e_handle_mdd_event(pf); i40e_service_task()
6682 i40e_vc_process_vflr_event(pf); i40e_service_task()
6683 i40e_watchdog_subtask(pf); i40e_service_task()
6684 i40e_fdir_reinit_subtask(pf); i40e_service_task()
6685 i40e_sync_filters_subtask(pf); i40e_service_task()
6687 i40e_sync_vxlan_filters_subtask(pf); i40e_service_task()
6689 i40e_clean_adminq_subtask(pf); i40e_service_task()
6691 i40e_service_event_complete(pf); i40e_service_task()
6697 if (time_after(jiffies, (start_time + pf->service_timer_period)) || i40e_service_task()
6698 test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state) || i40e_service_task()
6699 test_bit(__I40E_MDD_EVENT_PENDING, &pf->state) || i40e_service_task()
6700 test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) i40e_service_task()
6701 i40e_service_event_schedule(pf); i40e_service_task()
6710 struct i40e_pf *pf = (struct i40e_pf *)data; i40e_service_timer() local
6712 mod_timer(&pf->service_timer, i40e_service_timer()
6713 round_jiffies(jiffies + pf->service_timer_period)); i40e_service_timer()
6714 i40e_service_event_schedule(pf); i40e_service_timer()
6723 struct i40e_pf *pf = vsi->back; i40e_set_num_rings_in_vsi() local
6727 vsi->alloc_queue_pairs = pf->num_lan_qps; i40e_set_num_rings_in_vsi()
6730 if (pf->flags & I40E_FLAG_MSIX_ENABLED) i40e_set_num_rings_in_vsi()
6731 vsi->num_q_vectors = pf->num_lan_msix; i40e_set_num_rings_in_vsi()
6745 vsi->alloc_queue_pairs = pf->num_vmdq_qps; i40e_set_num_rings_in_vsi()
6748 vsi->num_q_vectors = pf->num_vmdq_msix; i40e_set_num_rings_in_vsi()
6752 vsi->alloc_queue_pairs = pf->num_vf_qps; i40e_set_num_rings_in_vsi()
6759 vsi->alloc_queue_pairs = pf->num_fcoe_qps; i40e_set_num_rings_in_vsi()
6762 vsi->num_q_vectors = pf->num_fcoe_msix; i40e_set_num_rings_in_vsi()
6812 * @pf: board private structure
6818 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type) i40e_vsi_mem_alloc() argument
6826 mutex_lock(&pf->switch_mutex); i40e_vsi_mem_alloc()
6834 i = pf->next_vsi; i40e_vsi_mem_alloc()
6835 while (i < pf->num_alloc_vsi && pf->vsi[i]) i40e_vsi_mem_alloc()
6837 if (i >= pf->num_alloc_vsi) { i40e_vsi_mem_alloc()
6839 while (i < pf->next_vsi && pf->vsi[i]) i40e_vsi_mem_alloc()
6843 if (i < pf->num_alloc_vsi && !pf->vsi[i]) { i40e_vsi_mem_alloc()
6849 pf->next_vsi = ++i; i40e_vsi_mem_alloc()
6857 vsi->back = pf; i40e_vsi_mem_alloc()
6861 vsi->rx_itr_setting = pf->rx_itr_default; i40e_vsi_mem_alloc()
6862 vsi->tx_itr_setting = pf->tx_itr_default; i40e_vsi_mem_alloc()
6864 pf->rss_table_size : 64; i40e_vsi_mem_alloc()
6881 pf->vsi[vsi_idx] = vsi; i40e_vsi_mem_alloc()
6886 pf->next_vsi = i - 1; i40e_vsi_mem_alloc()
6889 mutex_unlock(&pf->switch_mutex); i40e_vsi_mem_alloc()
6919 struct i40e_pf *pf; i40e_vsi_clear() local
6926 pf = vsi->back; i40e_vsi_clear()
6928 mutex_lock(&pf->switch_mutex); i40e_vsi_clear()
6929 if (!pf->vsi[vsi->idx]) { i40e_vsi_clear()
6930 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n", i40e_vsi_clear()
6935 if (pf->vsi[vsi->idx] != vsi) { i40e_vsi_clear()
6936 dev_err(&pf->pdev->dev, i40e_vsi_clear()
6937 "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n", i40e_vsi_clear()
6938 pf->vsi[vsi->idx]->idx, i40e_vsi_clear()
6939 pf->vsi[vsi->idx], i40e_vsi_clear()
6940 pf->vsi[vsi->idx]->type, i40e_vsi_clear()
6946 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); i40e_vsi_clear()
6947 i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); i40e_vsi_clear()
6951 pf->vsi[vsi->idx] = NULL; i40e_vsi_clear()
6952 if (vsi->idx < pf->next_vsi) i40e_vsi_clear()
6953 pf->next_vsi = vsi->idx; i40e_vsi_clear()
6956 mutex_unlock(&pf->switch_mutex); i40e_vsi_clear()
6987 struct i40e_pf *pf = vsi->back; i40e_alloc_rings() local
7002 tx_ring->dev = &pf->pdev->dev; i40e_alloc_rings()
7014 rx_ring->dev = &pf->pdev->dev; i40e_alloc_rings()
7018 if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) i40e_alloc_rings()
7034 * @pf: board private structure
7039 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors) i40e_reserve_msix_vectors() argument
7041 vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries, i40e_reserve_msix_vectors()
7044 dev_info(&pf->pdev->dev, i40e_reserve_msix_vectors()
7054 * @pf: board private structure
7060 static int i40e_init_msix(struct i40e_pf *pf) i40e_init_msix() argument
7062 struct i40e_hw *hw = &pf->hw; i40e_init_msix()
7067 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED)) i40e_init_msix()
7097 pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left); i40e_init_msix()
7098 vectors_left -= pf->num_lan_msix; i40e_init_msix()
7099 v_budget += pf->num_lan_msix; i40e_init_msix()
7102 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { i40e_init_msix()
7107 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; i40e_init_msix()
7113 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { i40e_init_msix()
7115 pf->num_fcoe_msix = 0; i40e_init_msix()
7116 else if (vectors_left >= pf->num_fcoe_qps) i40e_init_msix()
7117 pf->num_fcoe_msix = pf->num_fcoe_qps; i40e_init_msix()
7119 pf->num_fcoe_msix = 1; i40e_init_msix()
7120 v_budget += pf->num_fcoe_msix; i40e_init_msix()
7121 vectors_left -= pf->num_fcoe_msix; i40e_init_msix()
7126 if (pf->flags & I40E_FLAG_VMDQ_ENABLED) { i40e_init_msix()
7127 int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps; i40e_init_msix()
7137 pf->num_vmdq_qps = 1; i40e_init_msix()
7138 pf->num_vmdq_msix = pf->num_vmdq_qps; i40e_init_msix()
7144 pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry), i40e_init_msix()
7146 if (!pf->msix_entries) i40e_init_msix()
7150 pf->msix_entries[i].entry = i; i40e_init_msix()
7151 v_actual = i40e_reserve_msix_vectors(pf, v_budget); i40e_init_msix()
7160 pf->num_fcoe_qps = 0; i40e_init_msix()
7161 pf->num_fcoe_msix = 0; i40e_init_msix()
7163 pf->num_vmdq_msix = 0; i40e_init_msix()
7167 pf->flags &= ~I40E_FLAG_MSIX_ENABLED; i40e_init_msix()
7168 kfree(pf->msix_entries); i40e_init_msix()
7169 pf->msix_entries = NULL; i40e_init_msix()
7174 pf->num_vmdq_vsis = 0; i40e_init_msix()
7175 pf->num_vmdq_qps = 0; i40e_init_msix()
7176 pf->num_lan_qps = 1; i40e_init_msix()
7177 pf->num_lan_msix = 1; i40e_init_msix()
7186 pf->num_vmdq_msix = 1; /* force VMDqs to only one vector */ i40e_init_msix()
7187 pf->num_vmdq_vsis = 1; i40e_init_msix()
7188 pf->num_vmdq_qps = 1; i40e_init_msix()
7189 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; i40e_init_msix()
7194 pf->num_lan_msix = 1; i40e_init_msix()
7199 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { i40e_init_msix()
7200 pf->num_lan_msix = 1; i40e_init_msix()
7201 pf->num_fcoe_msix = 1; i40e_init_msix()
7204 pf->num_lan_msix = 2; i40e_init_msix()
7210 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { i40e_init_msix()
7211 pf->num_fcoe_msix = 1; i40e_init_msix()
7216 pf->num_lan_msix = min_t(int, vec, pf->num_lan_qps); i40e_init_msix()
7221 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && i40e_init_msix()
7222 (pf->num_vmdq_msix == 0)) { i40e_init_msix()
7223 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n"); i40e_init_msix()
7224 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED; i40e_init_msix()
7228 if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) { i40e_init_msix()
7229 dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n"); i40e_init_msix()
7230 pf->flags &= ~I40E_FLAG_FCOE_ENABLED; i40e_init_msix()
7277 struct i40e_pf *pf = vsi->back; i40e_vsi_alloc_q_vectors() local
7282 if (pf->flags & I40E_FLAG_MSIX_ENABLED) i40e_vsi_alloc_q_vectors()
7284 else if (vsi == pf->vsi[pf->lan_vsi]) i40e_vsi_alloc_q_vectors()
7306 * @pf: board private structure to initialize
7308 static int i40e_init_interrupt_scheme(struct i40e_pf *pf) i40e_init_interrupt_scheme() argument
7313 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { i40e_init_interrupt_scheme()
7314 vectors = i40e_init_msix(pf); i40e_init_interrupt_scheme()
7316 pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | i40e_init_interrupt_scheme()
7328 i40e_determine_queue_usage(pf); i40e_init_interrupt_scheme()
7332 if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) && i40e_init_interrupt_scheme()
7333 (pf->flags & I40E_FLAG_MSI_ENABLED)) { i40e_init_interrupt_scheme()
7334 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n"); i40e_init_interrupt_scheme()
7335 vectors = pci_enable_msi(pf->pdev); i40e_init_interrupt_scheme()
7337 dev_info(&pf->pdev->dev, "MSI init failed - %d\n", i40e_init_interrupt_scheme()
7339 pf->flags &= ~I40E_FLAG_MSI_ENABLED; i40e_init_interrupt_scheme()
7344 if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED))) i40e_init_interrupt_scheme()
7345 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n"); i40e_init_interrupt_scheme()
7349 pf->irq_pile = kzalloc(size, GFP_KERNEL); i40e_init_interrupt_scheme()
7350 if (!pf->irq_pile) { i40e_init_interrupt_scheme()
7351 dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n"); i40e_init_interrupt_scheme()
7354 pf->irq_pile->num_entries = vectors; i40e_init_interrupt_scheme()
7355 pf->irq_pile->search_hint = 0; i40e_init_interrupt_scheme()
7358 (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1); i40e_init_interrupt_scheme()
7365 * @pf: board private structure
7371 static int i40e_setup_misc_vector(struct i40e_pf *pf) i40e_setup_misc_vector() argument
7373 struct i40e_hw *hw = &pf->hw; i40e_setup_misc_vector()
7379 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) { i40e_setup_misc_vector()
7380 err = request_irq(pf->msix_entries[0].vector, i40e_setup_misc_vector()
7381 i40e_intr, 0, pf->int_name, pf); i40e_setup_misc_vector()
7383 dev_info(&pf->pdev->dev, i40e_setup_misc_vector()
7385 pf->int_name, err); i40e_setup_misc_vector()
7390 i40e_enable_misc_int_causes(pf); i40e_setup_misc_vector()
7398 i40e_irq_dynamic_enable_icr0(pf); i40e_setup_misc_vector()
7405 * @pf: board private structure
7407 static int i40e_config_rss(struct i40e_pf *pf) i40e_config_rss() argument
7410 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; i40e_config_rss()
7411 struct i40e_hw *hw = &pf->hw; i40e_config_rss()
7428 vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs); i40e_config_rss()
7432 if (pf->rss_table_size == 512) i40e_config_rss()
7439 for (i = 0, j = 0; i < pf->rss_table_size; i++, j++) { i40e_config_rss()
7452 ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1)); i40e_config_rss()
7464 * @pf: board private structure
7470 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) i40e_reconfig_rss_queues() argument
7472 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; i40e_reconfig_rss_queues()
7475 if (!(pf->flags & I40E_FLAG_RSS_ENABLED)) i40e_reconfig_rss_queues()
7478 new_rss_size = min_t(int, queue_count, pf->rss_size_max); i40e_reconfig_rss_queues()
7482 i40e_prep_for_reset(pf); i40e_reconfig_rss_queues()
7484 pf->rss_size = new_rss_size; i40e_reconfig_rss_queues()
7486 i40e_reset_and_rebuild(pf, true); i40e_reconfig_rss_queues()
7487 i40e_config_rss(pf); i40e_reconfig_rss_queues()
7489 dev_info(&pf->pdev->dev, "RSS count: %d\n", pf->rss_size); i40e_reconfig_rss_queues()
7490 return pf->rss_size; i40e_reconfig_rss_queues()
7495 * @pf: board private structure
7497 i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf) i40e_get_npar_bw_setting() argument
7503 status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw, i40e_get_npar_bw_setting()
7508 pf->npar_min_bw = min_bw; i40e_get_npar_bw_setting()
7510 pf->npar_max_bw = max_bw; i40e_get_npar_bw_setting()
7518 * @pf: board private structure
7520 i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf) i40e_set_npar_bw_setting() argument
7526 bw_data.pf_valid_bits = cpu_to_le16(1 << pf->hw.pf_id); i40e_set_npar_bw_setting()
7527 bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK; i40e_set_npar_bw_setting()
7528 bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK; i40e_set_npar_bw_setting()
7531 status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL); i40e_set_npar_bw_setting()
7538 * @pf: board private structure
7540 i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf) i40e_commit_npar_bw_setting() argument
7547 if (pf->hw.partition_id != 1) { i40e_commit_npar_bw_setting()
7548 dev_info(&pf->pdev->dev, i40e_commit_npar_bw_setting()
7550 pf->hw.partition_id); i40e_commit_npar_bw_setting()
7556 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ); i40e_commit_npar_bw_setting()
7557 last_aq_status = pf->hw.aq.asq_last_status; i40e_commit_npar_bw_setting()
7559 dev_info(&pf->pdev->dev, i40e_commit_npar_bw_setting()
7566 ret = i40e_aq_read_nvm(&pf->hw, i40e_commit_npar_bw_setting()
7573 last_aq_status = pf->hw.aq.asq_last_status; i40e_commit_npar_bw_setting()
7574 i40e_release_nvm(&pf->hw); i40e_commit_npar_bw_setting()
7576 dev_info(&pf->pdev->dev, "NVM read error, err %d aq_err %d\n", i40e_commit_npar_bw_setting()
7585 ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE); i40e_commit_npar_bw_setting()
7586 last_aq_status = pf->hw.aq.asq_last_status; i40e_commit_npar_bw_setting()
7588 dev_info(&pf->pdev->dev, i40e_commit_npar_bw_setting()
7597 ret = i40e_aq_update_nvm(&pf->hw, i40e_commit_npar_bw_setting()
7604 last_aq_status = pf->hw.aq.asq_last_status; i40e_commit_npar_bw_setting()
7605 i40e_release_nvm(&pf->hw); i40e_commit_npar_bw_setting()
7607 dev_info(&pf->pdev->dev, i40e_commit_npar_bw_setting()
7617 * @pf: board private structure to initialize
7623 static int i40e_sw_init(struct i40e_pf *pf) i40e_sw_init() argument
7628 pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE, i40e_sw_init()
7630 pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG; i40e_sw_init()
7633 pf->hw.debug_mask = debug; i40e_sw_init()
7634 pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER), i40e_sw_init()
7639 pf->flags = I40E_FLAG_RX_CSUM_ENABLED | i40e_sw_init()
7644 pf->flags |= I40E_FLAG_RX_PS_ENABLED; i40e_sw_init()
7646 pf->flags |= I40E_FLAG_RX_1BUF_ENABLED; i40e_sw_init()
7649 pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF; i40e_sw_init()
7650 pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF; i40e_sw_init()
7655 pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width; i40e_sw_init()
7656 pf->rss_size = 1; i40e_sw_init()
7657 pf->rss_table_size = pf->hw.func_caps.rss_table_size; i40e_sw_init()
7658 pf->rss_size_max = min_t(int, pf->rss_size_max, i40e_sw_init()
7659 pf->hw.func_caps.num_tx_qp); i40e_sw_init()
7660 if (pf->hw.func_caps.rss) { i40e_sw_init()
7661 pf->flags |= I40E_FLAG_RSS_ENABLED; i40e_sw_init()
7662 pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus()); i40e_sw_init()
7666 if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.mfp_mode_1) { i40e_sw_init()
7667 pf->flags |= I40E_FLAG_MFP_ENABLED; i40e_sw_init()
7668 dev_info(&pf->pdev->dev, "MFP mode Enabled\n"); i40e_sw_init()
7669 if (i40e_get_npar_bw_setting(pf)) i40e_sw_init()
7670 dev_warn(&pf->pdev->dev, i40e_sw_init()
7673 dev_info(&pf->pdev->dev, i40e_sw_init()
7675 pf->npar_min_bw, pf->npar_max_bw); i40e_sw_init()
7679 if ((pf->hw.func_caps.fd_filters_guaranteed > 0) || i40e_sw_init()
7680 (pf->hw.func_caps.fd_filters_best_effort > 0)) { i40e_sw_init()
7681 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; i40e_sw_init()
7682 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE; i40e_sw_init()
7684 pf->fd_atr_cnt_idx = I40E_FD_ATR_STAT_IDX(pf->hw.pf_id); i40e_sw_init()
7685 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) { i40e_sw_init()
7686 pf->flags |= I40E_FLAG_FD_SB_ENABLED; i40e_sw_init()
7688 pf->fd_sb_cnt_idx = I40E_FD_SB_STAT_IDX(pf->hw.pf_id); i40e_sw_init()
7690 dev_info(&pf->pdev->dev, i40e_sw_init()
7693 pf->fdir_pf_filter_count = i40e_sw_init()
7694 pf->hw.func_caps.fd_filters_guaranteed; i40e_sw_init()
7695 pf->hw.fdir_shared_filter_count = i40e_sw_init()
7696 pf->hw.func_caps.fd_filters_best_effort; i40e_sw_init()
7699 if (pf->hw.func_caps.vmdq) { i40e_sw_init()
7700 pf->flags |= I40E_FLAG_VMDQ_ENABLED; i40e_sw_init()
7701 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; i40e_sw_init()
7702 pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ; i40e_sw_init()
7706 err = i40e_init_pf_fcoe(pf); i40e_sw_init()
7708 dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", err); i40e_sw_init()
7712 if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) { i40e_sw_init()
7713 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF; i40e_sw_init()
7714 pf->flags |= I40E_FLAG_SRIOV_ENABLED; i40e_sw_init()
7715 pf->num_req_vfs = min_t(int, i40e_sw_init()
7716 pf->hw.func_caps.num_vfs, i40e_sw_init()
7720 pf->eeprom_version = 0xDEAD; i40e_sw_init()
7721 pf->lan_veb = I40E_NO_VEB; i40e_sw_init()
7722 pf->lan_vsi = I40E_NO_VSI; i40e_sw_init()
7726 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp); i40e_sw_init()
7727 pf->qp_pile = kzalloc(size, GFP_KERNEL); i40e_sw_init()
7728 if (!pf->qp_pile) { i40e_sw_init()
7732 pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp; i40e_sw_init()
7733 pf->qp_pile->search_hint = 0; i40e_sw_init()
7735 pf->tx_timeout_recovery_level = 1; i40e_sw_init()
7737 mutex_init(&pf->switch_mutex); i40e_sw_init()
7740 if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf))) i40e_sw_init()
7741 i40e_set_npar_bw_setting(pf); i40e_sw_init()
7749 * @pf: board private structure to initialize
7754 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features) i40e_set_ntuple() argument
7763 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) i40e_set_ntuple()
7765 pf->flags |= I40E_FLAG_FD_SB_ENABLED; i40e_set_ntuple()
7768 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { i40e_set_ntuple()
7770 i40e_fdir_filter_exit(pf); i40e_set_ntuple()
7772 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; i40e_set_ntuple()
7773 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED; i40e_set_ntuple()
7775 pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0; i40e_set_ntuple()
7776 pf->fdir_pf_active_filters = 0; i40e_set_ntuple()
7777 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; i40e_set_ntuple()
7778 dev_info(&pf->pdev->dev, "ATR re-enabled.\n"); i40e_set_ntuple()
7780 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) && i40e_set_ntuple()
7781 (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) i40e_set_ntuple()
7782 pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED; i40e_set_ntuple()
7797 struct i40e_pf *pf = vsi->back; i40e_set_features() local
7805 need_reset = i40e_set_ntuple(pf, features); i40e_set_features()
7808 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); i40e_set_features()
7816 * @pf: board private structure
7821 static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port) i40e_get_vxlan_port_idx() argument
7826 if (pf->vxlan_ports[i] == port) i40e_get_vxlan_port_idx()
7844 struct i40e_pf *pf = vsi->back; i40e_add_vxlan_port() local
7851 idx = i40e_get_vxlan_port_idx(pf, port); i40e_add_vxlan_port()
7861 next_idx = i40e_get_vxlan_port_idx(pf, 0); i40e_add_vxlan_port()
7870 pf->vxlan_ports[next_idx] = port; i40e_add_vxlan_port()
7871 pf->pending_vxlan_bitmap |= (1 << next_idx); i40e_add_vxlan_port()
7872 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; i40e_add_vxlan_port()
7874 dev_info(&pf->pdev->dev, "adding vxlan port %d\n", ntohs(port)); i40e_add_vxlan_port()
7888 struct i40e_pf *pf = vsi->back; i40e_del_vxlan_port() local
7894 idx = i40e_get_vxlan_port_idx(pf, port); i40e_del_vxlan_port()
7901 pf->vxlan_ports[idx] = 0; i40e_del_vxlan_port()
7902 pf->pending_vxlan_bitmap |= (1 << idx); i40e_del_vxlan_port()
7903 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC; i40e_del_vxlan_port()
7905 dev_info(&pf->pdev->dev, "deleting vxlan port %d\n", i40e_del_vxlan_port()
7918 struct i40e_pf *pf = np->vsi->back; i40e_get_phys_port_id() local
7919 struct i40e_hw *hw = &pf->hw; i40e_get_phys_port_id()
7921 if (!(pf->flags & I40E_FLAG_PORT_ID_VALID)) i40e_get_phys_port_id()
7944 struct i40e_pf *pf = np->vsi->back; i40e_ndo_fdb_add() local
7947 if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED)) i40e_ndo_fdb_add()
7995 struct i40e_pf *pf = vsi->back; i40e_ndo_bridge_setlink() local
8001 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) i40e_ndo_bridge_setlink()
8006 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) i40e_ndo_bridge_setlink()
8007 veb = pf->veb[i]; i40e_ndo_bridge_setlink()
8025 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, nla_for_each_nested()
8040 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; nla_for_each_nested()
8042 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; nla_for_each_nested()
8043 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); nla_for_each_nested()
8073 struct i40e_pf *pf = vsi->back; i40e_ndo_bridge_getlink() local
8078 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) i40e_ndo_bridge_getlink()
8083 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) i40e_ndo_bridge_getlink()
8084 veb = pf->veb[i]; i40e_ndo_bridge_getlink()
8144 struct i40e_pf *pf = vsi->back; i40e_config_netdev() local
8145 struct i40e_hw *hw = &pf->hw; i40e_config_netdev()
8180 if (!(pf->flags & I40E_FLAG_MFP_ENABLED)) i40e_config_netdev()
8187 SET_NETDEV_DEV(netdev, &pf->pdev->dev); i40e_config_netdev()
8200 pf->vsi[pf->lan_vsi]->netdev->name); i40e_config_netdev()
8253 struct i40e_pf *pf = vsi->back; i40e_is_vsi_uplink_mode_veb() local
8259 veb = pf->veb[vsi->veb_idx]; i40e_is_vsi_uplink_mode_veb()
8279 struct i40e_pf *pf = vsi->back; i40e_add_vsi() local
8280 struct i40e_hw *hw = &pf->hw; i40e_add_vsi()
8293 ctxt.seid = pf->main_vsi_seid; i40e_add_vsi()
8294 ctxt.pf_num = pf->hw.pf_id; i40e_add_vsi()
8296 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL); i40e_add_vsi()
8299 dev_info(&pf->pdev->dev, i40e_add_vsi()
8301 ret, pf->hw.aq.asq_last_status); i40e_add_vsi()
8310 enabled_tc = i40e_pf_get_tc_map(pf); i40e_add_vsi()
8313 if ((pf->flags & I40E_FLAG_MFP_ENABLED) && i40e_add_vsi()
8314 !(pf->hw.func_caps.iscsi)) { /* NIC type PF */ i40e_add_vsi()
8316 ctxt.seid = pf->main_vsi_seid; i40e_add_vsi()
8317 ctxt.pf_num = pf->hw.pf_id; i40e_add_vsi()
8322 dev_info(&pf->pdev->dev, i40e_add_vsi()
8324 pf->hw.aq.asq_last_status); i40e_add_vsi()
8340 dev_info(&pf->pdev->dev, i40e_add_vsi()
8343 pf->hw.aq.asq_last_status); i40e_add_vsi()
8355 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) && i40e_add_vsi()
8405 if (pf->vf[vsi->vf_id].spoofchk) { i40e_add_vsi()
8420 dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n"); i40e_add_vsi()
8473 pf->flags |= I40E_FLAG_FILTER_SYNC; i40e_add_vsi()
8479 dev_info(&pf->pdev->dev, i40e_add_vsi()
8481 ret, pf->hw.aq.asq_last_status); i40e_add_vsi()
8500 struct i40e_pf *pf; i40e_vsi_release() local
8504 pf = vsi->back; i40e_vsi_release()
8508 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n", i40e_vsi_release()
8512 if (vsi == pf->vsi[pf->lan_vsi] && i40e_vsi_release()
8513 !test_bit(__I40E_DOWN, &pf->state)) { i40e_vsi_release()
8514 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); i40e_vsi_release()
8554 for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) { i40e_vsi_release()
8555 if (pf->vsi[i] && i40e_vsi_release()
8556 pf->vsi[i]->uplink_seid == uplink_seid && i40e_vsi_release()
8557 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { i40e_vsi_release()
8562 if (!pf->veb[i]) i40e_vsi_release()
8564 if (pf->veb[i]->uplink_seid == uplink_seid) i40e_vsi_release()
8566 if (pf->veb[i]->seid == uplink_seid) i40e_vsi_release()
8567 veb = pf->veb[i]; i40e_vsi_release()
8588 struct i40e_pf *pf = vsi->back; i40e_vsi_setup_vectors() local
8591 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n", i40e_vsi_setup_vectors()
8597 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n", i40e_vsi_setup_vectors()
8604 dev_info(&pf->pdev->dev, i40e_vsi_setup_vectors()
8612 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile, i40e_vsi_setup_vectors()
8615 dev_info(&pf->pdev->dev, i40e_vsi_setup_vectors()
8638 struct i40e_pf *pf = vsi->back; i40e_vsi_reinit_setup() local
8642 i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); i40e_vsi_reinit_setup()
8651 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx); i40e_vsi_reinit_setup()
8653 dev_info(&pf->pdev->dev, i40e_vsi_reinit_setup()
8663 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; i40e_vsi_reinit_setup()
8664 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; i40e_vsi_reinit_setup()
8665 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; i40e_vsi_reinit_setup()
8666 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); i40e_vsi_reinit_setup()
8685 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); i40e_vsi_reinit_setup()
8693 * @pf: board private structure
8704 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, i40e_vsi_setup() argument
8726 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) { i40e_vsi_setup()
8727 veb = pf->veb[i]; i40e_vsi_setup()
8732 if (!veb && uplink_seid != pf->mac_seid) { i40e_vsi_setup()
8734 for (i = 0; i < pf->num_alloc_vsi; i++) { i40e_vsi_setup()
8735 if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) { i40e_vsi_setup()
8736 vsi = pf->vsi[i]; i40e_vsi_setup()
8741 dev_info(&pf->pdev->dev, "no such uplink_seid %d\n", i40e_vsi_setup()
8746 if (vsi->uplink_seid == pf->mac_seid) i40e_vsi_setup()
8747 veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid, i40e_vsi_setup()
8750 veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, i40e_vsi_setup()
8753 if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) { i40e_vsi_setup()
8763 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { i40e_vsi_setup()
8765 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; i40e_vsi_setup()
8770 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid) i40e_vsi_setup()
8771 veb = pf->veb[i]; i40e_vsi_setup()
8774 dev_info(&pf->pdev->dev, "couldn't add VEB\n"); i40e_vsi_setup()
8783 v_idx = i40e_vsi_mem_alloc(pf, type); i40e_vsi_setup()
8786 vsi = pf->vsi[v_idx]; i40e_vsi_setup()
8793 pf->lan_vsi = v_idx; i40e_vsi_setup()
8797 ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, i40e_vsi_setup()
8800 dev_info(&pf->pdev->dev, i40e_vsi_setup()
8865 i40e_aq_delete_element(&pf->hw, vsi->seid, NULL); i40e_vsi_setup()
8882 struct i40e_pf *pf = veb->pf; i40e_veb_get_bw_info() local
8883 struct i40e_hw *hw = &pf->hw; i40e_veb_get_bw_info()
8891 dev_info(&pf->pdev->dev, i40e_veb_get_bw_info()
8900 dev_info(&pf->pdev->dev, i40e_veb_get_bw_info()
8925 * @pf: board private structure
8930 static int i40e_veb_mem_alloc(struct i40e_pf *pf) i40e_veb_mem_alloc() argument
8937 mutex_lock(&pf->switch_mutex); i40e_veb_mem_alloc()
8946 while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL)) i40e_veb_mem_alloc()
8958 veb->pf = pf; i40e_veb_mem_alloc()
8962 pf->veb[i] = veb; i40e_veb_mem_alloc()
8965 mutex_unlock(&pf->switch_mutex); i40e_veb_mem_alloc()
8978 struct i40e_pf *pf = branch->pf; i40e_switch_branch_release() local
8985 if (!pf->veb[i]) i40e_switch_branch_release()
8987 if (pf->veb[i]->uplink_seid == branch->seid) i40e_switch_branch_release()
8988 i40e_switch_branch_release(pf->veb[i]); i40e_switch_branch_release()
8996 for (i = 0; i < pf->num_alloc_vsi; i++) { i40e_switch_branch_release()
8997 if (!pf->vsi[i]) i40e_switch_branch_release()
8999 if (pf->vsi[i]->uplink_seid == branch_seid && i40e_switch_branch_release()
9000 (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) { i40e_switch_branch_release()
9001 i40e_vsi_release(pf->vsi[i]); i40e_switch_branch_release()
9010 if (pf->veb[veb_idx]) i40e_switch_branch_release()
9011 i40e_veb_release(pf->veb[veb_idx]); i40e_switch_branch_release()
9023 if (veb->pf) { i40e_veb_clear()
9024 struct i40e_pf *pf = veb->pf; i40e_veb_clear() local
9026 mutex_lock(&pf->switch_mutex); i40e_veb_clear()
9027 if (pf->veb[veb->idx] == veb) i40e_veb_clear()
9028 pf->veb[veb->idx] = NULL; i40e_veb_clear()
9029 mutex_unlock(&pf->switch_mutex); i40e_veb_clear()
9042 struct i40e_pf *pf; i40e_veb_release() local
9045 pf = veb->pf; i40e_veb_release()
9048 for (i = 0; i < pf->num_alloc_vsi; i++) { i40e_veb_release()
9049 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) { i40e_veb_release()
9051 vsi = pf->vsi[i]; i40e_veb_release()
9055 dev_info(&pf->pdev->dev, i40e_veb_release()
9065 if (veb->uplink_seid == pf->mac_seid) i40e_veb_release()
9071 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid; i40e_veb_release()
9072 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx; i40e_veb_release()
9075 i40e_aq_delete_element(&pf->hw, veb->seid, NULL); i40e_veb_release()
9091 ret = i40e_aq_add_veb(&veb->pf->hw, veb->uplink_seid, vsi->seid, i40e_add_veb()
9095 dev_info(&veb->pf->pdev->dev, i40e_add_veb()
9097 ret, veb->pf->hw.aq.asq_last_status); i40e_add_veb()
9102 ret = i40e_aq_get_veb_parameters(&veb->pf->hw, veb->seid, NULL, NULL, i40e_add_veb()
9105 dev_info(&veb->pf->pdev->dev, i40e_add_veb()
9107 ret, veb->pf->hw.aq.asq_last_status); i40e_add_veb()
9112 dev_info(&veb->pf->pdev->dev, i40e_add_veb()
9114 ret, veb->pf->hw.aq.asq_last_status); i40e_add_veb()
9115 i40e_aq_delete_element(&veb->pf->hw, veb->seid, NULL); i40e_add_veb()
9128 * @pf: board private structure
9142 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, i40e_veb_setup() argument
9153 dev_info(&pf->pdev->dev, i40e_veb_setup()
9160 for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++) i40e_veb_setup()
9161 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid) i40e_veb_setup()
9163 if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) { i40e_veb_setup()
9164 dev_info(&pf->pdev->dev, "vsi seid %d not found\n", i40e_veb_setup()
9169 if (uplink_seid && uplink_seid != pf->mac_seid) { i40e_veb_setup()
9171 if (pf->veb[veb_idx] && i40e_veb_setup()
9172 pf->veb[veb_idx]->seid == uplink_seid) { i40e_veb_setup()
9173 uplink_veb = pf->veb[veb_idx]; i40e_veb_setup()
9178 dev_info(&pf->pdev->dev, i40e_veb_setup()
9185 veb_idx = i40e_veb_mem_alloc(pf); i40e_veb_setup()
9188 veb = pf->veb[veb_idx]; i40e_veb_setup()
9195 ret = i40e_add_veb(veb, pf->vsi[vsi_idx]); i40e_veb_setup()
9198 if (vsi_idx == pf->lan_vsi) i40e_veb_setup()
9199 pf->lan_veb = veb->idx; i40e_veb_setup()
9211 * @pf: board private structure
9218 static void i40e_setup_pf_switch_element(struct i40e_pf *pf, i40e_setup_pf_switch_element() argument
9228 dev_info(&pf->pdev->dev, i40e_setup_pf_switch_element()
9234 pf->mac_seid = seid; i40e_setup_pf_switch_element()
9238 if (uplink_seid != pf->mac_seid) i40e_setup_pf_switch_element()
9240 if (pf->lan_veb == I40E_NO_VEB) { i40e_setup_pf_switch_element()
9245 if (pf->veb[v] && (pf->veb[v]->seid == seid)) { i40e_setup_pf_switch_element()
9246 pf->lan_veb = v; i40e_setup_pf_switch_element()
9250 if (pf->lan_veb == I40E_NO_VEB) { i40e_setup_pf_switch_element()
9251 v = i40e_veb_mem_alloc(pf); i40e_setup_pf_switch_element()
9254 pf->lan_veb = v; i40e_setup_pf_switch_element()
9258 pf->veb[pf->lan_veb]->seid = seid; i40e_setup_pf_switch_element()
9259 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid; i40e_setup_pf_switch_element()
9260 pf->veb[pf->lan_veb]->pf = pf; i40e_setup_pf_switch_element()
9261 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB; i40e_setup_pf_switch_element()
9269 pf->mac_seid = uplink_seid; i40e_setup_pf_switch_element()
9270 pf->pf_seid = downlink_seid; i40e_setup_pf_switch_element()
9271 pf->main_vsi_seid = seid; i40e_setup_pf_switch_element()
9273 dev_info(&pf->pdev->dev, i40e_setup_pf_switch_element()
9275 pf->pf_seid, pf->main_vsi_seid); i40e_setup_pf_switch_element()
9286 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n", i40e_setup_pf_switch_element()
9294 * @pf: board private structure
9300 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig) i40e_fetch_switch_configuration() argument
9316 ret = i40e_aq_get_switch_config(&pf->hw, sw_config, i40e_fetch_switch_configuration()
9320 dev_info(&pf->pdev->dev, i40e_fetch_switch_configuration()
9322 ret, pf->hw.aq.asq_last_status); i40e_fetch_switch_configuration()
9331 dev_info(&pf->pdev->dev, i40e_fetch_switch_configuration()
9339 i40e_setup_pf_switch_element(pf, ele, num_reported, i40e_fetch_switch_configuration()
9350 * @pf: board private structure
9355 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit) i40e_setup_pf_switch() argument
9360 ret = i40e_fetch_switch_configuration(pf, false); i40e_setup_pf_switch()
9362 dev_info(&pf->pdev->dev, i40e_setup_pf_switch()
9364 ret, pf->hw.aq.asq_last_status); i40e_setup_pf_switch()
9367 i40e_pf_reset_stats(pf); i40e_setup_pf_switch()
9370 if (pf->lan_vsi == I40E_NO_VSI || reinit) { i40e_setup_pf_switch()
9377 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb]) i40e_setup_pf_switch()
9378 uplink_seid = pf->veb[pf->lan_veb]->seid; i40e_setup_pf_switch()
9380 uplink_seid = pf->mac_seid; i40e_setup_pf_switch()
9381 if (pf->lan_vsi == I40E_NO_VSI) i40e_setup_pf_switch()
9382 vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0); i40e_setup_pf_switch()
9384 vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]); i40e_setup_pf_switch()
9386 dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n"); i40e_setup_pf_switch()
9387 i40e_fdir_teardown(pf); i40e_setup_pf_switch()
9392 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; i40e_setup_pf_switch()
9393 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; i40e_setup_pf_switch()
9394 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; i40e_setup_pf_switch()
9395 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); i40e_setup_pf_switch()
9397 i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]); i40e_setup_pf_switch()
9399 i40e_fdir_sb_setup(pf); i40e_setup_pf_switch()
9402 ret = i40e_setup_pf_filter_control(pf); i40e_setup_pf_switch()
9404 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n", i40e_setup_pf_switch()
9412 if ((pf->flags & I40E_FLAG_RSS_ENABLED)) i40e_setup_pf_switch()
9413 i40e_config_rss(pf); i40e_setup_pf_switch()
9416 i40e_aq_get_link_info(&pf->hw, true, NULL, NULL); i40e_setup_pf_switch()
9417 i40e_link_event(pf); i40e_setup_pf_switch()
9420 pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info & i40e_setup_pf_switch()
9423 i40e_ptp_init(pf); i40e_setup_pf_switch()
9430 * @pf: board private structure
9432 static void i40e_determine_queue_usage(struct i40e_pf *pf) i40e_determine_queue_usage() argument
9436 pf->num_lan_qps = 0; i40e_determine_queue_usage()
9438 pf->num_fcoe_qps = 0; i40e_determine_queue_usage()
9445 queues_left = pf->hw.func_caps.num_tx_qp; i40e_determine_queue_usage()
9448 !(pf->flags & I40E_FLAG_MSIX_ENABLED)) { i40e_determine_queue_usage()
9451 pf->rss_size = pf->num_lan_qps = 1; i40e_determine_queue_usage()
9454 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | i40e_determine_queue_usage()
9463 } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED | i40e_determine_queue_usage()
9468 pf->rss_size = pf->num_lan_qps = 1; i40e_determine_queue_usage()
9469 queues_left -= pf->num_lan_qps; i40e_determine_queue_usage()
9471 pf->flags &= ~(I40E_FLAG_RSS_ENABLED | i40e_determine_queue_usage()
9481 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) && i40e_determine_queue_usage()
9483 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; i40e_determine_queue_usage()
9484 dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n"); i40e_determine_queue_usage()
9486 pf->num_lan_qps = max_t(int, pf->rss_size_max, i40e_determine_queue_usage()
9488 pf->num_lan_qps = min_t(int, pf->num_lan_qps, i40e_determine_queue_usage()
9489 pf->hw.func_caps.num_tx_qp); i40e_determine_queue_usage()
9491 queues_left -= pf->num_lan_qps; i40e_determine_queue_usage()
9495 if (pf->flags & I40E_FLAG_FCOE_ENABLED) { i40e_determine_queue_usage()
9497 pf->num_fcoe_qps = I40E_DEFAULT_FCOE; i40e_determine_queue_usage()
9499 pf->num_fcoe_qps = I40E_MINIMUM_FCOE; i40e_determine_queue_usage()
9501 pf->num_fcoe_qps = 0; i40e_determine_queue_usage()
9502 pf->flags &= ~I40E_FLAG_FCOE_ENABLED; i40e_determine_queue_usage()
9503 dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n"); i40e_determine_queue_usage()
9506 queues_left -= pf->num_fcoe_qps; i40e_determine_queue_usage()
9510 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { i40e_determine_queue_usage()
9514 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED; i40e_determine_queue_usage()
9515 dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n"); i40e_determine_queue_usage()
9519 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && i40e_determine_queue_usage()
9520 pf->num_vf_qps && pf->num_req_vfs && queues_left) { i40e_determine_queue_usage()
9521 pf->num_req_vfs = min_t(int, pf->num_req_vfs, i40e_determine_queue_usage()
9522 (queues_left / pf->num_vf_qps)); i40e_determine_queue_usage()
9523 queues_left -= (pf->num_req_vfs * pf->num_vf_qps); i40e_determine_queue_usage()
9526 if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) && i40e_determine_queue_usage()
9527 pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) { i40e_determine_queue_usage()
9528 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis, i40e_determine_queue_usage()
9529 (queues_left / pf->num_vmdq_qps)); i40e_determine_queue_usage()
9530 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps); i40e_determine_queue_usage()
9533 pf->queues_left = queues_left; i40e_determine_queue_usage()
9535 dev_info(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps); i40e_determine_queue_usage()
9541 * @pf: PF to be setup
9546 * ethertype and macvlan type filter settings for the pf.
9550 static int i40e_setup_pf_filter_control(struct i40e_pf *pf) i40e_setup_pf_filter_control() argument
9552 struct i40e_filter_control_settings *settings = &pf->filter_settings; i40e_setup_pf_filter_control()
9557 if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)) i40e_setup_pf_filter_control()
9564 if (i40e_set_filter_control(&pf->hw, settings)) i40e_setup_pf_filter_control()
9571 static void i40e_print_features(struct i40e_pf *pf) i40e_print_features() argument
9573 struct i40e_hw *hw = &pf->hw; i40e_print_features()
9578 dev_err(&pf->pdev->dev, "Features string allocation failed\n"); i40e_print_features()
9586 buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs); i40e_print_features()
9589 pf->hw.func_caps.num_vsis, i40e_print_features()
9590 pf->vsi[pf->lan_vsi]->num_queue_pairs, i40e_print_features()
9591 pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF"); i40e_print_features()
9593 if (pf->flags & I40E_FLAG_RSS_ENABLED) i40e_print_features()
9595 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) i40e_print_features()
9597 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) { i40e_print_features()
9601 if (pf->flags & I40E_FLAG_DCB_CAPABLE) i40e_print_features()
9603 if (pf->flags & I40E_FLAG_PTP) i40e_print_features()
9606 if (pf->flags & I40E_FLAG_FCOE_ENABLED) i40e_print_features()
9611 dev_info(&pf->pdev->dev, "%s\n", string); i40e_print_features()
9630 struct i40e_pf *pf; i40e_probe() local
9670 pf = kzalloc(sizeof(*pf), GFP_KERNEL); i40e_probe()
9671 if (!pf) { i40e_probe()
9675 pf->next_vsi = 0; i40e_probe()
9676 pf->pdev = pdev; i40e_probe()
9677 set_bit(__I40E_DOWN, &pf->state); i40e_probe()
9679 hw = &pf->hw; i40e_probe()
9680 hw->back = pf; i40e_probe()
9700 pf->instance = pfs_found; i40e_probe()
9703 pf->msg_enable = pf->hw.debug_mask; i40e_probe()
9704 pf->msg_enable = debug; i40e_probe()
9713 pf->corer_count++; i40e_probe()
9725 pf->pfr_count++; i40e_probe()
9731 pf->adminq_work_limit = I40E_AQ_WORK_LIMIT; i40e_probe()
9733 snprintf(pf->int_name, sizeof(pf->int_name) - 1, i40e_probe()
9735 dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev)); i40e_probe()
9744 pf->hw.fc.requested_mode = I40E_FC_NONE; i40e_probe()
9763 i40e_verify_eeprom(pf); i40e_probe()
9770 err = i40e_get_capabilities(pf); i40e_probe()
9774 err = i40e_sw_init(pf); i40e_probe()
9782 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num); i40e_probe()
9799 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || i40e_probe()
9800 (pf->hw.aq.fw_maj_ver < 4)) { i40e_probe()
9815 pf->flags |= I40E_FLAG_PORT_ID_VALID; i40e_probe()
9826 dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr); i40e_probe()
9829 pci_set_drvdata(pdev, pf); i40e_probe()
9832 err = i40e_init_pf_dcb(pf); i40e_probe()
9835 pf->flags &= ~I40E_FLAG_DCB_CAPABLE; i40e_probe()
9841 setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf); i40e_probe()
9842 pf->service_timer_period = HZ; i40e_probe()
9844 INIT_WORK(&pf->service_task, i40e_service_task); i40e_probe()
9845 clear_bit(__I40E_SERVICE_SCHED, &pf->state); i40e_probe()
9846 pf->flags |= I40E_FLAG_NEED_LINK_UPDATE; i40e_probe()
9847 pf->link_check_timeout = jiffies; i40e_probe()
9850 pf->wol_en = false; i40e_probe()
9851 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en); i40e_probe()
9854 i40e_determine_queue_usage(pf); i40e_probe()
9855 err = i40e_init_interrupt_scheme(pf); i40e_probe()
9864 if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC) i40e_probe()
9865 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC; i40e_probe()
9867 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis; i40e_probe()
9870 len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi; i40e_probe()
9871 pf->vsi = kzalloc(len, GFP_KERNEL); i40e_probe()
9872 if (!pf->vsi) { i40e_probe()
9879 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && i40e_probe()
9880 (pf->flags & I40E_FLAG_MSIX_ENABLED) && i40e_probe()
9881 !test_bit(__I40E_BAD_EEPROM, &pf->state)) { i40e_probe()
9883 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; i40e_probe()
9886 err = i40e_setup_pf_switch(pf, false); i40e_probe()
9892 for (i = 0; i < pf->num_alloc_vsi; i++) { i40e_probe()
9893 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) { i40e_probe()
9894 i40e_vsi_open(pf->vsi[i]); i40e_probe()
9902 err = i40e_aq_set_phy_int_mask(&pf->hw, i40e_probe()
9906 dev_info(&pf->pdev->dev, "set phy mask fail, aq_err %d\n", err); i40e_probe()
9908 if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || i40e_probe()
9909 (pf->hw.aq.fw_maj_ver < 4)) { i40e_probe()
9911 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL); i40e_probe()
9913 dev_info(&pf->pdev->dev, "link restart failed, aq_err=%d\n", i40e_probe()
9914 pf->hw.aq.asq_last_status); i40e_probe()
9920 clear_bit(__I40E_DOWN, &pf->state); i40e_probe()
9927 if (pf->flags & I40E_FLAG_MSIX_ENABLED) { i40e_probe()
9928 err = i40e_setup_misc_vector(pf); i40e_probe()
9938 if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) && i40e_probe()
9939 (pf->flags & I40E_FLAG_MSIX_ENABLED) && i40e_probe()
9940 !test_bit(__I40E_BAD_EEPROM, &pf->state)) { i40e_probe()
9952 err = i40e_alloc_vfs(pf, pci_num_vf(pdev)); i40e_probe()
9963 i40e_dbg_pf_init(pf); i40e_probe()
9966 i40e_send_version(pf); i40e_probe()
9969 mod_timer(&pf->service_timer, i40e_probe()
9970 round_jiffies(jiffies + pf->service_timer_period)); i40e_probe()
9974 i40e_fcoe_vsi_setup(pf); i40e_probe()
9978 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status); i40e_probe()
10002 dev_info(&pf->pdev->dev, "get phy abilities failed, aq_err %d, advertised speed settings may not be correct\n", i40e_probe()
10004 pf->hw.phy.link_info.requested_speeds = abilities.link_speed; i40e_probe()
10007 i40e_print_features(pf); i40e_probe()
10013 set_bit(__I40E_DOWN, &pf->state); i40e_probe()
10014 i40e_clear_interrupt_scheme(pf); i40e_probe()
10015 kfree(pf->vsi); i40e_probe()
10017 i40e_reset_interrupt_capability(pf); i40e_probe()
10018 del_timer_sync(&pf->service_timer); i40e_probe()
10023 kfree(pf->qp_pile); i40e_probe()
10030 kfree(pf); i40e_probe()
10052 struct i40e_pf *pf = pci_get_drvdata(pdev); i40e_remove() local
10056 i40e_dbg_pf_exit(pf); i40e_remove()
10058 i40e_ptp_stop(pf); i40e_remove()
10061 set_bit(__I40E_DOWN, &pf->state); i40e_remove()
10062 del_timer_sync(&pf->service_timer); i40e_remove()
10063 cancel_work_sync(&pf->service_task); i40e_remove()
10064 i40e_fdir_teardown(pf); i40e_remove()
10066 if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { i40e_remove()
10067 i40e_free_vfs(pf); i40e_remove()
10068 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED; i40e_remove()
10071 i40e_fdir_teardown(pf); i40e_remove()
10077 if (!pf->veb[i]) i40e_remove()
10080 if (pf->veb[i]->uplink_seid == pf->mac_seid || i40e_remove()
10081 pf->veb[i]->uplink_seid == 0) i40e_remove()
10082 i40e_switch_branch_release(pf->veb[i]); i40e_remove()
10088 if (pf->vsi[pf->lan_vsi]) i40e_remove()
10089 i40e_vsi_release(pf->vsi[pf->lan_vsi]); i40e_remove()
10092 if (pf->hw.hmc.hmc_obj) { i40e_remove()
10093 ret_code = i40e_shutdown_lan_hmc(&pf->hw); i40e_remove()
10101 ret_code = i40e_shutdown_adminq(&pf->hw); i40e_remove()
10108 i40e_clear_interrupt_scheme(pf); i40e_remove()
10109 for (i = 0; i < pf->num_alloc_vsi; i++) { i40e_remove()
10110 if (pf->vsi[i]) { i40e_remove()
10111 i40e_vsi_clear_rings(pf->vsi[i]); i40e_remove()
10112 i40e_vsi_clear(pf->vsi[i]); i40e_remove()
10113 pf->vsi[i] = NULL; i40e_remove()
10118 kfree(pf->veb[i]); i40e_remove()
10119 pf->veb[i] = NULL; i40e_remove()
10122 kfree(pf->qp_pile); i40e_remove()
10123 kfree(pf->vsi); i40e_remove()
10125 iounmap(pf->hw.hw_addr); i40e_remove()
10126 kfree(pf); i40e_remove()
10145 struct i40e_pf *pf = pci_get_drvdata(pdev); i40e_pci_error_detected() local
10150 if (!test_bit(__I40E_SUSPENDED, &pf->state)) { i40e_pci_error_detected()
10152 i40e_prep_for_reset(pf); i40e_pci_error_detected()
10171 struct i40e_pf *pf = pci_get_drvdata(pdev); i40e_pci_error_slot_reset() local
10187 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG); i40e_pci_error_slot_reset()
10214 struct i40e_pf *pf = pci_get_drvdata(pdev); i40e_pci_error_resume() local
10217 if (test_bit(__I40E_SUSPENDED, &pf->state)) i40e_pci_error_resume()
10221 i40e_handle_reset_warning(pf); i40e_pci_error_resume()
10231 struct i40e_pf *pf = pci_get_drvdata(pdev); i40e_shutdown() local
10232 struct i40e_hw *hw = &pf->hw; i40e_shutdown()
10234 set_bit(__I40E_SUSPENDED, &pf->state); i40e_shutdown()
10235 set_bit(__I40E_DOWN, &pf->state); i40e_shutdown()
10237 i40e_prep_for_reset(pf); i40e_shutdown()
10240 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); i40e_shutdown()
10241 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); i40e_shutdown()
10243 i40e_clear_interrupt_scheme(pf); i40e_shutdown()
10246 pci_wake_from_d3(pdev, pf->wol_en); i40e_shutdown()
10258 struct i40e_pf *pf = pci_get_drvdata(pdev); i40e_suspend() local
10259 struct i40e_hw *hw = &pf->hw; i40e_suspend()
10261 set_bit(__I40E_SUSPENDED, &pf->state); i40e_suspend()
10262 set_bit(__I40E_DOWN, &pf->state); i40e_suspend()
10263 del_timer_sync(&pf->service_timer); i40e_suspend()
10264 cancel_work_sync(&pf->service_task); i40e_suspend()
10265 i40e_fdir_teardown(pf); i40e_suspend()
10268 i40e_prep_for_reset(pf); i40e_suspend()
10271 wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); i40e_suspend()
10272 wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); i40e_suspend()
10274 pci_wake_from_d3(pdev, pf->wol_en); i40e_suspend()
10286 struct i40e_pf *pf = pci_get_drvdata(pdev); i40e_resume() local
10309 if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) { i40e_resume()
10310 clear_bit(__I40E_DOWN, &pf->state); i40e_resume()
10312 i40e_reset_and_rebuild(pf, false); i40e_resume()
H A Di40e_virtchnl_pf.c33 * @pf: pointer to the PF structure
41 static void i40e_vc_vf_broadcast(struct i40e_pf *pf, i40e_vc_vf_broadcast() argument
46 struct i40e_hw *hw = &pf->hw; i40e_vc_vf_broadcast()
47 struct i40e_vf *vf = pf->vf; i40e_vc_vf_broadcast()
50 for (i = 0; i < pf->num_alloc_vfs; i++, vf++) { i40e_vc_vf_broadcast()
74 struct i40e_pf *pf = vf->pf; i40e_vc_notify_vf_link_state() local
75 struct i40e_hw *hw = &pf->hw; i40e_vc_notify_vf_link_state()
76 struct i40e_link_status *ls = &pf->hw.phy.link_info; i40e_vc_notify_vf_link_state()
96 * @pf: pointer to the PF structure
100 void i40e_vc_notify_link_state(struct i40e_pf *pf) i40e_vc_notify_link_state() argument
104 for (i = 0; i < pf->num_alloc_vfs; i++) i40e_vc_notify_link_state()
105 i40e_vc_notify_vf_link_state(&pf->vf[i]); i40e_vc_notify_link_state()
110 * @pf: pointer to the PF structure
114 void i40e_vc_notify_reset(struct i40e_pf *pf) i40e_vc_notify_reset() argument
120 i40e_vc_vf_broadcast(pf, I40E_VIRTCHNL_OP_EVENT, 0, i40e_vc_notify_reset()
136 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) i40e_vc_notify_vf_reset()
144 abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id; i40e_vc_notify_vf_reset()
148 i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT, i40e_vc_notify_vf_reset()
156 * @pf: pointer to the PF info
161 static inline void i40e_vc_disable_vf(struct i40e_pf *pf, struct i40e_vf *vf) i40e_vc_disable_vf() argument
163 struct i40e_hw *hw = &pf->hw; i40e_vc_disable_vf()
181 struct i40e_pf *pf = vf->pf; i40e_vc_isvalid_vsi_id() local
182 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); i40e_vc_isvalid_vsi_id()
198 struct i40e_pf *pf = vf->pf; i40e_vc_isvalid_queue_id() local
199 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); i40e_vc_isvalid_queue_id()
213 struct i40e_pf *pf = vf->pf; i40e_vc_isvalid_vector_id() local
215 return vector_id < pf->hw.func_caps.num_msix_vectors_vf; i40e_vc_isvalid_vector_id()
231 struct i40e_pf *pf = vf->pf; i40e_vc_get_pf_queue_id() local
232 struct i40e_vsi *vsi = i40e_find_vsi_from_id(pf, vsi_id); i40e_vc_get_pf_queue_id()
261 struct i40e_pf *pf = vf->pf; i40e_config_irq_link_list() local
262 struct i40e_hw *hw = &pf->hw; i40e_config_irq_link_list()
275 ((pf->hw.func_caps.num_msix_vectors_vf - 1) * vf->vf_id) + i40e_config_irq_link_list()
362 struct i40e_pf *pf = vf->pf; i40e_config_vsi_tx_queue() local
363 struct i40e_hw *hw = &pf->hw; i40e_config_vsi_tx_queue()
371 vsi = i40e_find_vsi_from_id(pf, vsi_id); i40e_config_vsi_tx_queue()
387 dev_err(&pf->pdev->dev, i40e_config_vsi_tx_queue()
397 dev_err(&pf->pdev->dev, i40e_config_vsi_tx_queue()
431 struct i40e_pf *pf = vf->pf; i40e_config_vsi_rx_queue() local
432 struct i40e_hw *hw = &pf->hw; i40e_config_vsi_rx_queue()
488 dev_err(&pf->pdev->dev, i40e_config_vsi_rx_queue()
498 dev_err(&pf->pdev->dev, i40e_config_vsi_rx_queue()
519 struct i40e_pf *pf = vf->pf; i40e_alloc_vsi_res() local
523 vsi = i40e_vsi_setup(pf, type, pf->vsi[pf->lan_vsi]->seid, vf->vf_id); i40e_alloc_vsi_res()
526 dev_err(&pf->pdev->dev, i40e_alloc_vsi_res()
528 vf->vf_id, pf->hw.aq.asq_last_status); i40e_alloc_vsi_res()
547 dev_info(&pf->pdev->dev, i40e_alloc_vsi_res()
552 dev_info(&pf->pdev->dev, i40e_alloc_vsi_res()
559 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); i40e_alloc_vsi_res()
563 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, i40e_alloc_vsi_res()
566 dev_err(&pf->pdev->dev, "Unable to set tx rate, VF %d, error code %d.\n", i40e_alloc_vsi_res()
582 struct i40e_pf *pf = vf->pf; i40e_enable_vf_mappings() local
583 struct i40e_hw *hw = &pf->hw; i40e_enable_vf_mappings()
599 for (j = 0; j < pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; j++) { i40e_enable_vf_mappings()
608 if (j * 2 >= pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs) { i40e_enable_vf_mappings()
632 struct i40e_pf *pf = vf->pf; i40e_disable_vf_mappings() local
633 struct i40e_hw *hw = &pf->hw; i40e_disable_vf_mappings()
652 struct i40e_pf *pf = vf->pf; i40e_free_vf_res() local
653 struct i40e_hw *hw = &pf->hw; i40e_free_vf_res()
659 i40e_vsi_release(pf->vsi[vf->lan_vsi_idx]); i40e_free_vf_res()
663 msix_vf = pf->hw.func_caps.num_msix_vectors_vf; i40e_free_vf_res()
707 struct i40e_pf *pf = vf->pf; i40e_alloc_vf_res() local
715 total_queue_pairs += pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; i40e_alloc_vf_res()
744 struct i40e_pf *pf = vf->pf; i40e_quiesce_vf_pci() local
745 struct i40e_hw *hw = &pf->hw; i40e_quiesce_vf_pci()
771 struct i40e_pf *pf = vf->pf; i40e_reset_vf() local
772 struct i40e_hw *hw = &pf->hw; i40e_reset_vf()
777 if (test_and_set_bit(__I40E_VF_DISABLE, &pf->state)) i40e_reset_vf()
795 dev_err(&pf->pdev->dev, "VF %d PCI transactions stuck\n", i40e_reset_vf()
819 dev_err(&pf->pdev->dev, "VF reset check timeout on VF %d\n", i40e_reset_vf()
831 i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false); i40e_reset_vf()
842 clear_bit(__I40E_VF_DISABLE, &pf->state); i40e_reset_vf()
847 * @pf: pointer to the PF structure
851 void i40e_free_vfs(struct i40e_pf *pf) i40e_free_vfs() argument
853 struct i40e_hw *hw = &pf->hw; i40e_free_vfs()
857 if (!pf->vf) i40e_free_vfs()
859 while (test_and_set_bit(__I40E_VF_DISABLE, &pf->state)) i40e_free_vfs()
862 for (i = 0; i < pf->num_alloc_vfs; i++) i40e_free_vfs()
863 if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states)) i40e_free_vfs()
864 i40e_vsi_control_rings(pf->vsi[pf->vf[i].lan_vsi_idx], i40e_free_vfs()
871 if (!pci_vfs_assigned(pf->pdev)) i40e_free_vfs()
872 pci_disable_sriov(pf->pdev); i40e_free_vfs()
874 dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n"); i40e_free_vfs()
879 tmp = pf->num_alloc_vfs; i40e_free_vfs()
880 pf->num_alloc_vfs = 0; i40e_free_vfs()
882 if (test_bit(I40E_VF_STAT_INIT, &pf->vf[i].vf_states)) i40e_free_vfs()
883 i40e_free_vf_res(&pf->vf[i]); i40e_free_vfs()
885 i40e_disable_vf_mappings(&pf->vf[i]); i40e_free_vfs()
888 kfree(pf->vf); i40e_free_vfs()
889 pf->vf = NULL; i40e_free_vfs()
895 if (!pci_vfs_assigned(pf->pdev)) { i40e_free_vfs()
905 clear_bit(__I40E_VF_DISABLE, &pf->state); i40e_free_vfs()
911 * @pf: pointer to the PF structure
916 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) i40e_alloc_vfs() argument
922 i40e_irq_dynamic_disable_icr0(pf); i40e_alloc_vfs()
925 if (pci_num_vf(pf->pdev) != num_alloc_vfs) { i40e_alloc_vfs()
926 ret = pci_enable_sriov(pf->pdev, num_alloc_vfs); i40e_alloc_vfs()
928 dev_err(&pf->pdev->dev, i40e_alloc_vfs()
930 pf->num_alloc_vfs = 0; i40e_alloc_vfs()
940 pf->vf = vfs; i40e_alloc_vfs()
944 vfs[i].pf = pf; i40e_alloc_vfs()
957 pf->num_alloc_vfs = num_alloc_vfs; i40e_alloc_vfs()
961 i40e_free_vfs(pf); i40e_alloc_vfs()
964 i40e_irq_dynamic_enable_icr0(pf); i40e_alloc_vfs()
979 struct i40e_pf *pf = pci_get_drvdata(pdev); i40e_pci_sriov_enable() local
985 i40e_free_vfs(pf); i40e_pci_sriov_enable()
989 if (num_vfs > pf->num_req_vfs) { i40e_pci_sriov_enable()
994 err = i40e_alloc_vfs(pf, num_vfs); i40e_pci_sriov_enable()
1019 struct i40e_pf *pf = pci_get_drvdata(pdev); i40e_pci_sriov_configure() local
1022 if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { i40e_pci_sriov_configure()
1023 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; i40e_pci_sriov_configure()
1024 i40e_do_reset_safe(pf, i40e_pci_sriov_configure()
1030 if (!pci_vfs_assigned(pf->pdev)) { i40e_pci_sriov_configure()
1031 i40e_free_vfs(pf); i40e_pci_sriov_configure()
1032 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED; i40e_pci_sriov_configure()
1033 i40e_do_reset_safe(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED)); i40e_pci_sriov_configure()
1056 struct i40e_pf *pf; i40e_vc_send_msg_to_vf() local
1062 if (!vf || vf->vf_id >= vf->pf->num_alloc_vfs) i40e_vc_send_msg_to_vf()
1065 pf = vf->pf; i40e_vc_send_msg_to_vf()
1066 hw = &pf->hw; i40e_vc_send_msg_to_vf()
1072 dev_err(&pf->pdev->dev, "Failed opcode %d Error: %d\n", i40e_vc_send_msg_to_vf()
1076 dev_err(&pf->pdev->dev, i40e_vc_send_msg_to_vf()
1079 dev_err(&pf->pdev->dev, "Use PF Control I/F to enable the VF\n"); i40e_vc_send_msg_to_vf()
1089 dev_err(&pf->pdev->dev, i40e_vc_send_msg_to_vf()
1091 vf->vf_id, pf->hw.aq.asq_last_status); i40e_vc_send_msg_to_vf()
1142 struct i40e_pf *pf = vf->pf; i40e_vc_get_vf_resources_msg() local
1165 vsi = pf->vsi[vf->lan_vsi_idx]; i40e_vc_get_vf_resources_msg()
1171 vfres->max_vectors = pf->hw.func_caps.num_msix_vectors_vf; i40e_vc_get_vf_resources_msg()
1176 pf->vsi[vf->lan_vsi_idx]->alloc_queue_pairs; i40e_vc_get_vf_resources_msg()
1222 struct i40e_pf *pf = vf->pf; i40e_vc_config_promiscuous_mode_msg() local
1223 struct i40e_hw *hw = &pf->hw; i40e_vc_config_promiscuous_mode_msg()
1228 vsi = i40e_find_vsi_from_id(pf, info->vsi_id); i40e_vc_config_promiscuous_mode_msg()
1262 struct i40e_pf *pf = vf->pf; i40e_vc_config_queues_msg() local
1297 pf->vsi[vf->lan_vsi_idx]->num_queue_pairs = qci->num_queue_pairs; i40e_vc_config_queues_msg()
1380 struct i40e_pf *pf = vf->pf; i40e_vc_enable_queues_msg() local
1399 if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], true)) i40e_vc_enable_queues_msg()
1420 struct i40e_pf *pf = vf->pf; i40e_vc_disable_queues_msg() local
1438 if (i40e_vsi_control_rings(pf->vsi[vf->lan_vsi_idx], false)) i40e_vc_disable_queues_msg()
1459 struct i40e_pf *pf = vf->pf; i40e_vc_get_stats_msg() local
1476 vsi = pf->vsi[vf->lan_vsi_idx]; i40e_vc_get_stats_msg()
1502 struct i40e_pf *pf = vf->pf; i40e_check_vf_permission() local
1507 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", macaddr); i40e_check_vf_permission()
1517 dev_err(&pf->pdev->dev, i40e_check_vf_permission()
1536 struct i40e_pf *pf = vf->pf; i40e_vc_add_mac_addr_msg() local
1554 vsi = pf->vsi[vf->lan_vsi_idx]; i40e_vc_add_mac_addr_msg()
1571 dev_err(&pf->pdev->dev, i40e_vc_add_mac_addr_msg()
1580 dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n"); i40e_vc_add_mac_addr_msg()
1600 struct i40e_pf *pf = vf->pf; i40e_vc_del_mac_addr_msg() local
1616 dev_err(&pf->pdev->dev, "invalid VF MAC addr %pM\n", i40e_vc_del_mac_addr_msg()
1622 vsi = pf->vsi[vf->lan_vsi_idx]; i40e_vc_del_mac_addr_msg()
1631 dev_err(&pf->pdev->dev, "Unable to program VF MAC filters\n"); i40e_vc_del_mac_addr_msg()
1651 struct i40e_pf *pf = vf->pf; i40e_vc_add_vlan_msg() local
1667 dev_err(&pf->pdev->dev, i40e_vc_add_vlan_msg()
1672 vsi = pf->vsi[vf->lan_vsi_idx]; i40e_vc_add_vlan_msg()
1683 dev_err(&pf->pdev->dev, i40e_vc_add_vlan_msg()
1705 struct i40e_pf *pf = vf->pf; i40e_vc_remove_vlan_msg() local
1725 vsi = pf->vsi[vf->lan_vsi_idx]; i40e_vc_remove_vlan_msg()
1734 dev_err(&pf->pdev->dev, i40e_vc_remove_vlan_msg()
1852 * @pf: pointer to the PF structure
1861 int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode, i40e_vc_process_vf_msg() argument
1864 struct i40e_hw *hw = &pf->hw; i40e_vc_process_vf_msg()
1869 pf->vf_aq_requests++; i40e_vc_process_vf_msg()
1870 if (local_vf_id >= pf->num_alloc_vfs) i40e_vc_process_vf_msg()
1872 vf = &(pf->vf[local_vf_id]); i40e_vc_process_vf_msg()
1877 dev_err(&pf->pdev->dev, "Invalid message from VF %d, opcode %d, len %d\n", i40e_vc_process_vf_msg()
1926 dev_err(&pf->pdev->dev, "Unsupported opcode %d from VF %d\n", i40e_vc_process_vf_msg()
1938 * @pf: pointer to the PF structure
1943 int i40e_vc_process_vflr_event(struct i40e_pf *pf) i40e_vc_process_vflr_event() argument
1946 struct i40e_hw *hw = &pf->hw; i40e_vc_process_vflr_event()
1949 if (!test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state)) i40e_vc_process_vflr_event()
1958 clear_bit(__I40E_VFLR_EVENT_PENDING, &pf->state); i40e_vc_process_vflr_event()
1959 for (vf_id = 0; vf_id < pf->num_alloc_vfs; vf_id++) { i40e_vc_process_vflr_event()
1963 vf = &pf->vf[vf_id]; i40e_vc_process_vflr_event()
1969 if (!test_bit(__I40E_DOWN, &pf->state)) i40e_vc_process_vflr_event()
1989 struct i40e_pf *pf = vsi->back; i40e_ndo_set_vf_mac() local
1995 if (vf_id >= pf->num_alloc_vfs) { i40e_ndo_set_vf_mac()
1996 dev_err(&pf->pdev->dev, i40e_ndo_set_vf_mac()
2002 vf = &(pf->vf[vf_id]); i40e_ndo_set_vf_mac()
2003 vsi = pf->vsi[vf->lan_vsi_idx]; i40e_ndo_set_vf_mac()
2005 dev_err(&pf->pdev->dev, i40e_ndo_set_vf_mac()
2012 dev_err(&pf->pdev->dev, i40e_ndo_set_vf_mac()
2028 dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id); i40e_ndo_set_vf_mac()
2031 dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); i40e_ndo_set_vf_mac()
2038 i40e_vc_disable_vf(pf, vf); i40e_ndo_set_vf_mac()
2039 dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n"); i40e_ndo_set_vf_mac()
2058 struct i40e_pf *pf = np->vsi->back; i40e_ndo_set_vf_port_vlan() local
2064 if (vf_id >= pf->num_alloc_vfs) { i40e_ndo_set_vf_port_vlan()
2065 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); i40e_ndo_set_vf_port_vlan()
2071 dev_err(&pf->pdev->dev, "Invalid VF Parameters\n"); i40e_ndo_set_vf_port_vlan()
2076 vf = &(pf->vf[vf_id]); i40e_ndo_set_vf_port_vlan()
2077 vsi = pf->vsi[vf->lan_vsi_idx]; i40e_ndo_set_vf_port_vlan()
2079 dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id); i40e_ndo_set_vf_port_vlan()
2085 dev_err(&pf->pdev->dev, i40e_ndo_set_vf_port_vlan()
2092 i40e_vc_disable_vf(pf, vf); i40e_ndo_set_vf_port_vlan()
2115 ret, pf->hw.aq.asq_last_status); i40e_ndo_set_vf_port_vlan()
2125 dev_info(&pf->pdev->dev, "Setting VLAN %d, QOS 0x%x on VF %d\n", i40e_ndo_set_vf_port_vlan()
2143 dev_err(&pf->pdev->dev, "Unable to update VF vsi context\n"); i40e_ndo_set_vf_port_vlan()
2170 struct i40e_pf *pf = np->vsi->back; i40e_ndo_set_vf_bw() local
2177 if (vf_id >= pf->num_alloc_vfs) { i40e_ndo_set_vf_bw()
2178 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d.\n", vf_id); i40e_ndo_set_vf_bw()
2184 dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n", i40e_ndo_set_vf_bw()
2189 vf = &(pf->vf[vf_id]); i40e_ndo_set_vf_bw()
2190 vsi = pf->vsi[vf->lan_vsi_idx]; i40e_ndo_set_vf_bw()
2192 dev_err(&pf->pdev->dev, "Uninitialized VF %d.\n", vf_id); i40e_ndo_set_vf_bw()
2197 switch (pf->hw.phy.link_info.link_speed) { i40e_ndo_set_vf_bw()
2212 dev_err(&pf->pdev->dev, "Invalid max tx rate %d specified for VF %d.", i40e_ndo_set_vf_bw()
2219 dev_warn(&pf->pdev->dev, "Setting max Tx rate to minimum usable value of 50Mbps.\n"); i40e_ndo_set_vf_bw()
2224 ret = i40e_aq_config_vsi_bw_limit(&pf->hw, vsi->seid, i40e_ndo_set_vf_bw()
2228 dev_err(&pf->pdev->dev, "Unable to set max tx rate, error code %d.\n", i40e_ndo_set_vf_bw()
2251 struct i40e_pf *pf = vsi->back; i40e_ndo_get_vf_config() local
2256 if (vf_id >= pf->num_alloc_vfs) { i40e_ndo_get_vf_config()
2257 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); i40e_ndo_get_vf_config()
2262 vf = &(pf->vf[vf_id]); i40e_ndo_get_vf_config()
2264 vsi = pf->vsi[vf->lan_vsi_idx]; i40e_ndo_get_vf_config()
2266 dev_err(&pf->pdev->dev, "Uninitialized VF %d\n", vf_id); i40e_ndo_get_vf_config()
2304 struct i40e_pf *pf = np->vsi->back; i40e_ndo_set_vf_link_state() local
2306 struct i40e_hw *hw = &pf->hw; i40e_ndo_set_vf_link_state()
2312 if (vf_id >= pf->num_alloc_vfs) { i40e_ndo_set_vf_link_state()
2313 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); i40e_ndo_set_vf_link_state()
2318 vf = &pf->vf[vf_id]; i40e_ndo_set_vf_link_state()
2328 pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP; i40e_ndo_set_vf_link_state()
2330 pf->hw.phy.link_info.link_speed; i40e_ndo_set_vf_link_state()
2368 struct i40e_pf *pf = vsi->back; i40e_ndo_set_vf_spoofchk() local
2370 struct i40e_hw *hw = &pf->hw; i40e_ndo_set_vf_spoofchk()
2375 if (vf_id >= pf->num_alloc_vfs) { i40e_ndo_set_vf_spoofchk()
2376 dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); i40e_ndo_set_vf_spoofchk()
2381 vf = &(pf->vf[vf_id]); i40e_ndo_set_vf_spoofchk()
2388 ctxt.seid = pf->vsi[vf->lan_vsi_idx]->seid; i40e_ndo_set_vf_spoofchk()
2389 ctxt.pf_num = pf->hw.pf_id; i40e_ndo_set_vf_spoofchk()
2396 dev_err(&pf->pdev->dev, "Error %d updating VSI parameters\n", i40e_ndo_set_vf_spoofchk()
H A Di40e_dcb_nl.c57 struct i40e_pf *pf = i40e_netdev_to_pf(dev); i40e_dcbnl_ieee_getets() local
59 struct i40e_hw *hw = &pf->hw; i40e_dcbnl_ieee_getets()
61 if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) i40e_dcbnl_ieee_getets()
96 struct i40e_pf *pf = i40e_netdev_to_pf(dev); i40e_dcbnl_ieee_getpfc() local
98 struct i40e_hw *hw = &pf->hw; i40e_dcbnl_ieee_getpfc()
101 if (!(pf->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) i40e_dcbnl_ieee_getpfc()
112 pfc->requests[i] = pf->stats.priority_xoff_tx[i]; i40e_dcbnl_ieee_getpfc()
113 pfc->indications[i] = pf->stats.priority_xoff_rx[i]; i40e_dcbnl_ieee_getpfc()
127 struct i40e_pf *pf = i40e_netdev_to_pf(dev); i40e_dcbnl_getdcbx() local
129 return pf->dcbx_cap; i40e_dcbnl_getdcbx()
141 struct i40e_pf *pf = i40e_netdev_to_pf(dev); i40e_dcbnl_get_perm_hw_addr() local
147 perm_addr[i] = pf->hw.mac.perm_addr[i]; i40e_dcbnl_get_perm_hw_addr()
150 perm_addr[i] = pf->hw.mac.san_addr[j]; i40e_dcbnl_get_perm_hw_addr()
170 struct i40e_pf *pf = i40e_netdev_to_pf(dev); i40e_dcbnl_set_all() local
172 struct i40e_hw *hw = &pf->hw; i40e_dcbnl_set_all()
178 if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) i40e_dcbnl_set_all()
182 if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(pf->hw.func_caps.iscsi)) i40e_dcbnl_set_all()
230 * @pf: the corresponding PF
235 static void i40e_dcbnl_del_app(struct i40e_pf *pf, i40e_dcbnl_del_app() argument
239 for (v = 0; v < pf->num_alloc_vsi; v++) { i40e_dcbnl_del_app()
240 if (pf->vsi[v] && pf->vsi[v]->netdev) { i40e_dcbnl_del_app()
241 err = i40e_dcbnl_vsi_del_app(pf->vsi[v], app); i40e_dcbnl_del_app()
243 dev_info(&pf->pdev->dev, "%s: Failed deleting app for VSI seid=%d err=%d sel=%d proto=0x%x prio=%d\n", i40e_dcbnl_del_app()
244 __func__, pf->vsi[v]->seid, i40e_dcbnl_del_app()
275 * @pf: the corresponding PF
282 void i40e_dcbnl_flush_apps(struct i40e_pf *pf, i40e_dcbnl_flush_apps() argument
290 if ((pf->flags & I40E_FLAG_MFP_ENABLED) && !(pf->hw.func_caps.iscsi)) i40e_dcbnl_flush_apps()
297 i40e_dcbnl_del_app(pf, &app); i40e_dcbnl_flush_apps()
310 struct i40e_pf *pf = i40e_netdev_to_pf(dev); i40e_dcbnl_setup() local
313 if (!(pf->flags & I40E_FLAG_DCB_CAPABLE)) i40e_dcbnl_setup()
H A Di40e_ptp.c53 * @pf: Board private structure
60 static void i40e_ptp_read(struct i40e_pf *pf, struct timespec64 *ts) i40e_ptp_read() argument
62 struct i40e_hw *hw = &pf->hw; i40e_ptp_read()
77 * @pf: Board private structure
84 static void i40e_ptp_write(struct i40e_pf *pf, const struct timespec64 *ts) i40e_ptp_write() argument
86 struct i40e_hw *hw = &pf->hw; i40e_ptp_write()
123 struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); i40e_ptp_adjfreq() local
124 struct i40e_hw *hw = &pf->hw; i40e_ptp_adjfreq()
134 adj = ACCESS_ONCE(pf->ptp_base_adj); i40e_ptp_adjfreq()
161 struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); i40e_ptp_adjtime() local
165 spin_lock_irqsave(&pf->tmreg_lock, flags); i40e_ptp_adjtime()
167 i40e_ptp_read(pf, &now); i40e_ptp_adjtime()
169 i40e_ptp_write(pf, (const struct timespec64 *)&now); i40e_ptp_adjtime()
171 spin_unlock_irqrestore(&pf->tmreg_lock, flags); i40e_ptp_adjtime()
186 struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); i40e_ptp_gettime() local
189 spin_lock_irqsave(&pf->tmreg_lock, flags); i40e_ptp_gettime()
190 i40e_ptp_read(pf, ts); i40e_ptp_gettime()
191 spin_unlock_irqrestore(&pf->tmreg_lock, flags); i40e_ptp_gettime()
207 struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); i40e_ptp_settime() local
210 spin_lock_irqsave(&pf->tmreg_lock, flags); i40e_ptp_settime()
211 i40e_ptp_write(pf, ts); i40e_ptp_settime()
212 spin_unlock_irqrestore(&pf->tmreg_lock, flags); i40e_ptp_settime()
243 struct i40e_pf *pf = vsi->back; i40e_ptp_rx_hang() local
244 struct i40e_hw *hw = &pf->hw; i40e_ptp_rx_hang()
255 if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_rx) i40e_ptp_rx_hang()
272 pf->last_rx_ptp_check = jiffies; i40e_ptp_rx_hang()
277 rx_event = pf->last_rx_ptp_check; i40e_ptp_rx_hang()
290 pf->last_rx_ptp_check = jiffies; i40e_ptp_rx_hang()
291 pf->rx_hwtstamp_cleared++; i40e_ptp_rx_hang()
300 * @pf: Board private structure
306 void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf) i40e_ptp_tx_hwtstamp() argument
309 struct i40e_hw *hw = &pf->hw; i40e_ptp_tx_hwtstamp()
313 if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_tx) i40e_ptp_tx_hwtstamp()
317 if (!pf->ptp_tx_skb) i40e_ptp_tx_hwtstamp()
326 skb_tstamp_tx(pf->ptp_tx_skb, &shhwtstamps); i40e_ptp_tx_hwtstamp()
327 dev_kfree_skb_any(pf->ptp_tx_skb); i40e_ptp_tx_hwtstamp()
328 pf->ptp_tx_skb = NULL; i40e_ptp_tx_hwtstamp()
329 clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, &pf->state); i40e_ptp_tx_hwtstamp()
334 * @pf: Board private structure
344 void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index) i40e_ptp_rx_hwtstamp() argument
353 if (!(pf->flags & I40E_FLAG_PTP) || !pf->ptp_rx) i40e_ptp_rx_hwtstamp()
356 hw = &pf->hw; i40e_ptp_rx_hwtstamp()
373 * @pf: Board private structure
379 void i40e_ptp_set_increment(struct i40e_pf *pf) i40e_ptp_set_increment() argument
382 struct i40e_hw *hw = &pf->hw; i40e_ptp_set_increment()
387 i40e_aq_get_link_info(&pf->hw, true, NULL, NULL); i40e_ptp_set_increment()
401 dev_warn(&pf->pdev->dev, i40e_ptp_set_increment()
422 ACCESS_ONCE(pf->ptp_base_adj) = incval; i40e_ptp_set_increment()
428 * @pf: Board private structure
435 int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr) i40e_ptp_get_ts_config() argument
437 struct hwtstamp_config *config = &pf->tstamp_config; i40e_ptp_get_ts_config()
439 if (!(pf->flags & I40E_FLAG_PTP)) i40e_ptp_get_ts_config()
448 * @pf: Board private structure
458 static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf, i40e_ptp_set_timestamp_mode() argument
461 struct i40e_hw *hw = &pf->hw; i40e_ptp_set_timestamp_mode()
470 pf->ptp_tx = false; i40e_ptp_set_timestamp_mode()
473 pf->ptp_tx = true; i40e_ptp_set_timestamp_mode()
481 pf->ptp_rx = false; i40e_ptp_set_timestamp_mode()
492 pf->ptp_rx = true; i40e_ptp_set_timestamp_mode()
507 pf->ptp_rx = true; i40e_ptp_set_timestamp_mode()
528 if (pf->ptp_tx) i40e_ptp_set_timestamp_mode()
535 if (pf->ptp_tx) i40e_ptp_set_timestamp_mode()
545 * ignore Rx timestamps via the pf->ptp_rx flag. i40e_ptp_set_timestamp_mode()
559 * @pf: Board private structure
571 int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr) i40e_ptp_set_ts_config() argument
576 if (!(pf->flags & I40E_FLAG_PTP)) i40e_ptp_set_ts_config()
582 err = i40e_ptp_set_timestamp_mode(pf, &config); i40e_ptp_set_ts_config()
587 pf->tstamp_config = config; i40e_ptp_set_ts_config()
595 * @pf: Board private structure
603 static long i40e_ptp_create_clock(struct i40e_pf *pf) i40e_ptp_create_clock() argument
606 if (!IS_ERR_OR_NULL(pf->ptp_clock)) i40e_ptp_create_clock()
609 strncpy(pf->ptp_caps.name, i40e_driver_name, sizeof(pf->ptp_caps.name)); i40e_ptp_create_clock()
610 pf->ptp_caps.owner = THIS_MODULE; i40e_ptp_create_clock()
611 pf->ptp_caps.max_adj = 999999999; i40e_ptp_create_clock()
612 pf->ptp_caps.n_ext_ts = 0; i40e_ptp_create_clock()
613 pf->ptp_caps.pps = 0; i40e_ptp_create_clock()
614 pf->ptp_caps.adjfreq = i40e_ptp_adjfreq; i40e_ptp_create_clock()
615 pf->ptp_caps.adjtime = i40e_ptp_adjtime; i40e_ptp_create_clock()
616 pf->ptp_caps.gettime64 = i40e_ptp_gettime; i40e_ptp_create_clock()
617 pf->ptp_caps.settime64 = i40e_ptp_settime; i40e_ptp_create_clock()
618 pf->ptp_caps.enable = i40e_ptp_feature_enable; i40e_ptp_create_clock()
621 pf->ptp_clock = ptp_clock_register(&pf->ptp_caps, &pf->pdev->dev); i40e_ptp_create_clock()
622 if (IS_ERR(pf->ptp_clock)) { i40e_ptp_create_clock()
623 return PTR_ERR(pf->ptp_clock); i40e_ptp_create_clock()
630 pf->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; i40e_ptp_create_clock()
631 pf->tstamp_config.tx_type = HWTSTAMP_TX_OFF; i40e_ptp_create_clock()
638 * @pf: Board private structure
644 void i40e_ptp_init(struct i40e_pf *pf) i40e_ptp_init() argument
646 struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev; i40e_ptp_init()
647 struct i40e_hw *hw = &pf->hw; i40e_ptp_init()
657 pf->flags &= ~I40E_FLAG_PTP; i40e_ptp_init()
658 dev_info(&pf->pdev->dev, "%s: PTP not supported on %s\n", i40e_ptp_init()
667 spin_lock_init(&pf->tmreg_lock); i40e_ptp_init()
670 err = i40e_ptp_create_clock(pf); i40e_ptp_init()
672 pf->ptp_clock = NULL; i40e_ptp_init()
673 dev_err(&pf->pdev->dev, "%s: ptp_clock_register failed\n", i40e_ptp_init()
679 dev_info(&pf->pdev->dev, "%s: added PHC on %s\n", __func__, i40e_ptp_init()
681 pf->flags |= I40E_FLAG_PTP; i40e_ptp_init()
692 i40e_ptp_set_increment(pf); i40e_ptp_init()
695 i40e_ptp_set_timestamp_mode(pf, &pf->tstamp_config); i40e_ptp_init()
699 i40e_ptp_settime(&pf->ptp_caps, &ts); i40e_ptp_init()
705 * @pf: Board private structure
710 void i40e_ptp_stop(struct i40e_pf *pf) i40e_ptp_stop() argument
712 pf->flags &= ~I40E_FLAG_PTP; i40e_ptp_stop()
713 pf->ptp_tx = false; i40e_ptp_stop()
714 pf->ptp_rx = false; i40e_ptp_stop()
716 if (pf->ptp_tx_skb) { i40e_ptp_stop()
717 dev_kfree_skb_any(pf->ptp_tx_skb); i40e_ptp_stop()
718 pf->ptp_tx_skb = NULL; i40e_ptp_stop()
719 clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, &pf->state); i40e_ptp_stop()
722 if (pf->ptp_clock) { i40e_ptp_stop()
723 ptp_clock_unregister(pf->ptp_clock); i40e_ptp_stop()
724 pf->ptp_clock = NULL; i40e_ptp_stop()
725 dev_info(&pf->pdev->dev, "%s: removed PHC on %s\n", __func__, i40e_ptp_stop()
726 pf->vsi[pf->lan_vsi]->netdev->name); i40e_ptp_stop()
H A Di40e_ethtool.c229 * @pf: the PF struct
231 static void i40e_partition_setting_complaint(struct i40e_pf *pf) i40e_partition_setting_complaint() argument
233 dev_info(&pf->pdev->dev, i40e_partition_setting_complaint()
462 struct i40e_pf *pf = np->vsi->back; i40e_get_settings() local
463 struct i40e_hw *hw = &pf->hw; i40e_get_settings()
546 struct i40e_pf *pf = np->vsi->back; i40e_set_settings() local
548 struct i40e_hw *hw = &pf->hw; i40e_set_settings()
560 i40e_partition_setting_complaint(pf); i40e_set_settings()
564 if (vsi != pf->vsi[pf->lan_vsi]) i40e_set_settings()
702 struct i40e_pf *pf = np->vsi->back; i40e_nway_reset() local
703 struct i40e_hw *hw = &pf->hw; i40e_nway_reset()
710 pf->hw.aq.asq_last_status); i40e_nway_reset()
725 struct i40e_pf *pf = np->vsi->back; i40e_get_pauseparam() local
726 struct i40e_hw *hw = &pf->hw; i40e_get_pauseparam()
760 struct i40e_pf *pf = np->vsi->back; i40e_set_pauseparam() local
762 struct i40e_hw *hw = &pf->hw; i40e_set_pauseparam()
774 i40e_partition_setting_complaint(pf); i40e_set_pauseparam()
778 if (vsi != pf->vsi[pf->lan_vsi]) i40e_set_pauseparam()
788 if (!test_bit(__I40E_DOWN, &pf->state) && i40e_set_pauseparam()
837 if (!test_bit(__I40E_DOWN, &pf->state)) { i40e_set_pauseparam()
840 if (!test_bit(__I40E_DOWN, &pf->state)) i40e_set_pauseparam()
850 struct i40e_pf *pf = np->vsi->back; i40e_get_msglevel() local
852 return pf->msg_enable; i40e_get_msglevel()
858 struct i40e_pf *pf = np->vsi->back; i40e_set_msglevel() local
861 pf->hw.debug_mask = data; i40e_set_msglevel()
862 pf->msg_enable = data; i40e_set_msglevel()
880 struct i40e_pf *pf = np->vsi->back; i40e_get_regs() local
881 struct i40e_hw *hw = &pf->hw; i40e_get_regs()
912 struct i40e_pf *pf = np->vsi->back; i40e_get_eeprom() local
938 dev_info(&pf->pdev->dev, i40e_get_eeprom()
956 dev_info(&pf->pdev->dev, i40e_get_eeprom()
976 dev_info(&pf->pdev->dev, i40e_get_eeprom()
982 dev_info(&pf->pdev->dev, i40e_get_eeprom()
987 dev_info(&pf->pdev->dev, i40e_get_eeprom()
1020 struct i40e_pf *pf = np->vsi->back; i40e_set_eeprom() local
1035 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) || i40e_set_eeprom()
1036 test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) i40e_set_eeprom()
1045 dev_info(&pf->pdev->dev, i40e_set_eeprom()
1059 struct i40e_pf *pf = vsi->back; i40e_get_drvinfo() local
1064 strlcpy(drvinfo->fw_version, i40e_fw_version_str(&pf->hw), i40e_get_drvinfo()
1066 strlcpy(drvinfo->bus_info, pci_name(pf->pdev), i40e_get_drvinfo()
1075 struct i40e_pf *pf = np->vsi->back; i40e_get_ringparam() local
1076 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; i40e_get_ringparam()
1094 struct i40e_pf *pf = vsi->back; i40e_set_ringparam() local
1120 while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state)) i40e_set_ringparam()
1232 clear_bit(__I40E_CONFIG_BUSY, &pf->state); i40e_set_ringparam()
1241 struct i40e_pf *pf = vsi->back; i40e_get_sset_count() local
1247 if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) { i40e_get_sset_count()
1250 if (pf->lan_veb != I40E_NO_VEB) i40e_get_sset_count()
1269 struct i40e_pf *pf = vsi->back; i40e_get_ethtool_stats() local
1320 if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) i40e_get_ethtool_stats()
1323 if (pf->lan_veb != I40E_NO_VEB) { i40e_get_ethtool_stats()
1324 struct i40e_veb *veb = pf->veb[pf->lan_veb]; i40e_get_ethtool_stats()
1333 p = (char *)pf + i40e_gstrings_stats[j].stat_offset; i40e_get_ethtool_stats()
1338 data[i++] = pf->stats.priority_xon_tx[j]; i40e_get_ethtool_stats()
1339 data[i++] = pf->stats.priority_xoff_tx[j]; i40e_get_ethtool_stats()
1342 data[i++] = pf->stats.priority_xon_rx[j]; i40e_get_ethtool_stats()
1343 data[i++] = pf->stats.priority_xoff_rx[j]; i40e_get_ethtool_stats()
1346 data[i++] = pf->stats.priority_xon_2_xoff[j]; i40e_get_ethtool_stats()
1354 struct i40e_pf *pf = vsi->back; i40e_get_strings() local
1393 if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) i40e_get_strings()
1396 if (pf->lan_veb != I40E_NO_VEB) { i40e_get_strings()
1446 struct i40e_pf *pf = i40e_netdev_to_pf(dev); i40e_get_ts_info() local
1449 if (!(pf->flags & I40E_FLAG_PTP)) i40e_get_ts_info()
1459 if (pf->ptp_clock) i40e_get_ts_info()
1460 info->phc_index = ptp_clock_index(pf->ptp_clock); i40e_get_ts_info()
1485 struct i40e_pf *pf = np->vsi->back; i40e_link_test() local
1487 netif_info(pf, hw, netdev, "link test\n"); i40e_link_test()
1488 if (i40e_get_link_status(&pf->hw)) i40e_link_test()
1499 struct i40e_pf *pf = np->vsi->back; i40e_reg_test() local
1501 netif_info(pf, hw, netdev, "register test\n"); i40e_reg_test()
1502 *data = i40e_diag_reg_test(&pf->hw); i40e_reg_test()
1510 struct i40e_pf *pf = np->vsi->back; i40e_eeprom_test() local
1512 netif_info(pf, hw, netdev, "eeprom test\n"); i40e_eeprom_test()
1513 *data = i40e_diag_eeprom_test(&pf->hw); i40e_eeprom_test()
1516 pf->hw.nvmupd_state = I40E_NVMUPD_STATE_INIT; i40e_eeprom_test()
1524 struct i40e_pf *pf = np->vsi->back; i40e_intr_test() local
1525 u16 swc_old = pf->sw_int_count; i40e_intr_test()
1527 netif_info(pf, hw, netdev, "interrupt test\n"); i40e_intr_test()
1528 wr32(&pf->hw, I40E_PFINT_DYN_CTL0, i40e_intr_test()
1535 *data = (swc_old == pf->sw_int_count); i40e_intr_test()
1543 struct i40e_pf *pf = np->vsi->back; i40e_loopback_test() local
1545 netif_info(pf, hw, netdev, "loopback test not implemented\n"); i40e_loopback_test()
1556 struct i40e_pf *pf = np->vsi->back; i40e_diag_test() local
1560 netif_info(pf, drv, netdev, "offline testing starting\n"); i40e_diag_test()
1562 set_bit(__I40E_TESTING, &pf->state); i40e_diag_test()
1568 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); i40e_diag_test()
1589 clear_bit(__I40E_TESTING, &pf->state); i40e_diag_test()
1590 i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED)); i40e_diag_test()
1596 netif_info(pf, drv, netdev, "online testing starting\n"); i40e_diag_test()
1608 netif_info(pf, drv, netdev, "testing finished\n"); i40e_diag_test()
1615 struct i40e_pf *pf = np->vsi->back; i40e_get_wol() local
1616 struct i40e_hw *hw = &pf->hw; i40e_get_wol()
1626 wol->wolopts = (pf->wol_en ? WAKE_MAGIC : 0); i40e_get_wol()
1638 struct i40e_pf *pf = np->vsi->back; i40e_set_wol() local
1640 struct i40e_hw *hw = &pf->hw; i40e_set_wol()
1645 i40e_partition_setting_complaint(pf); i40e_set_wol()
1649 if (vsi != pf->vsi[pf->lan_vsi]) i40e_set_wol()
1662 if (pf->wol_en != !!wol->wolopts) { i40e_set_wol()
1663 pf->wol_en = !!wol->wolopts; i40e_set_wol()
1664 device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en); i40e_set_wol()
1674 struct i40e_pf *pf = np->vsi->back; i40e_set_phys_id() local
1675 struct i40e_hw *hw = &pf->hw; i40e_set_phys_id()
1680 pf->led_status = i40e_led_get(hw); i40e_set_phys_id()
1689 i40e_led_set(hw, pf->led_status, false); i40e_set_phys_id()
1730 struct i40e_pf *pf = vsi->back; i40e_set_coalesce() local
1731 struct i40e_hw *hw = &pf->hw; i40e_set_coalesce()
1745 netif_info(pf, drv, netdev, "rx-usecs=0, need to disable adaptive-rx for a complete disable\n"); i40e_set_coalesce()
1747 netif_info(pf, drv, netdev, "Invalid value, rx-usecs range is 0-8160\n"); i40e_set_coalesce()
1757 netif_info(pf, drv, netdev, "tx-usecs=0, need to disable adaptive-tx for a complete disable\n"); i40e_set_coalesce()
1759 netif_info(pf, drv, netdev, i40e_set_coalesce()
1788 * @pf: pointer to the physical function struct
1793 static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd) i40e_get_rss_hash_opts() argument
1797 if (pf->vsi[pf->lan_vsi]->rxnfc.data != 0) { i40e_get_rss_hash_opts()
1798 cmd->data = pf->vsi[pf->lan_vsi]->rxnfc.data; i40e_get_rss_hash_opts()
1799 cmd->flow_type = pf->vsi[pf->lan_vsi]->rxnfc.flow_type; i40e_get_rss_hash_opts()
1835 * @pf: Pointer to the physical function struct
1844 static int i40e_get_ethtool_fdir_all(struct i40e_pf *pf, i40e_get_ethtool_fdir_all() argument
1853 cmd->data = i40e_get_fd_cnt_all(pf); i40e_get_ethtool_fdir_all()
1856 &pf->fdir_filter_list, fdir_node) { i40e_get_ethtool_fdir_all()
1871 * @pf: Pointer to the physical function struct
1879 static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf, i40e_get_ethtool_fdir_entry() argument
1888 &pf->fdir_filter_list, fdir_node) { i40e_get_ethtool_fdir_entry()
1916 if (rule->dest_vsi != pf->vsi[pf->lan_vsi]->id) { i40e_get_ethtool_fdir_entry()
1919 vsi = i40e_find_vsi_from_id(pf, rule->dest_vsi); i40e_get_ethtool_fdir_entry()
1941 struct i40e_pf *pf = vsi->back; i40e_get_rxnfc() local
1950 ret = i40e_get_rss_hash_opts(pf, cmd); i40e_get_rxnfc()
1953 cmd->rule_cnt = pf->fdir_pf_active_filters; i40e_get_rxnfc()
1955 cmd->data = i40e_get_fd_cnt_all(pf); i40e_get_rxnfc()
1959 ret = i40e_get_ethtool_fdir_entry(pf, cmd); i40e_get_rxnfc()
1962 ret = i40e_get_ethtool_fdir_all(pf, cmd, rule_locs); i40e_get_rxnfc()
1973 * @pf: pointer to the physical function struct
1978 static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) i40e_set_rss_hash_opt() argument
1980 struct i40e_hw *hw = &pf->hw; i40e_set_rss_hash_opt()
2084 pf->vsi[pf->lan_vsi]->rxnfc = *nfc; i40e_set_rss_hash_opt()
2125 struct i40e_pf *pf = vsi->back; i40e_update_ethtool_fdir_entry() local
2133 &pf->fdir_filter_list, fdir_node) { i40e_update_ethtool_fdir_entry()
2148 pf->fdir_pf_active_filters--; i40e_update_ethtool_fdir_entry()
2165 &pf->fdir_filter_list); i40e_update_ethtool_fdir_entry()
2168 pf->fdir_pf_active_filters++; i40e_update_ethtool_fdir_entry()
2188 struct i40e_pf *pf = vsi->back; i40e_del_fdir_entry() local
2191 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) || i40e_del_fdir_entry()
2192 test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) i40e_del_fdir_entry()
2195 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) i40e_del_fdir_entry()
2200 i40e_fdir_check_and_reenable(pf); i40e_del_fdir_entry()
2217 struct i40e_pf *pf; i40e_add_fdir_ethtool() local
2224 pf = vsi->back; i40e_add_fdir_ethtool()
2226 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED)) i40e_add_fdir_ethtool()
2229 if (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED) i40e_add_fdir_ethtool()
2232 if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) || i40e_add_fdir_ethtool()
2233 test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) i40e_add_fdir_ethtool()
2236 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) i40e_add_fdir_ethtool()
2241 if (fsp->location >= (pf->hw.func_caps.fd_filters_best_effort + i40e_add_fdir_ethtool()
2242 pf->hw.func_caps.fd_filters_guaranteed)) { i40e_add_fdir_ethtool()
2268 input->cnt_index = pf->fd_sb_cnt_idx; i40e_add_fdir_ethtool()
2281 if (ntohl(fsp->h_ext.data[1]) >= pf->num_alloc_vfs) { i40e_add_fdir_ethtool()
2282 netif_info(pf, drv, vsi->netdev, "Invalid VF id\n"); i40e_add_fdir_ethtool()
2287 input->dest_vsi = pf->vf[vf_id].lan_vsi_id; i40e_add_fdir_ethtool()
2288 if (input->q_index >= pf->vf[vf_id].num_queue_pairs) { i40e_add_fdir_ethtool()
2289 netif_info(pf, drv, vsi->netdev, "Invalid queue id\n"); i40e_add_fdir_ethtool()
2315 struct i40e_pf *pf = vsi->back; i40e_set_rxnfc() local
2320 ret = i40e_set_rss_hash_opt(pf, cmd); i40e_set_rxnfc()
2360 struct i40e_pf *pf = vsi->back; i40e_get_channels() local
2366 ch->other_count = (pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0; i40e_get_channels()
2387 struct i40e_pf *pf = vsi->back; i40e_set_channels() local
2399 if (ch->other_count != ((pf->flags & I40E_FLAG_FD_SB_ENABLED) ? 1 : 0)) i40e_set_channels()
2412 new_count = i40e_reconfig_rss_queues(pf, count); i40e_set_channels()
2447 struct i40e_pf *pf = vsi->back; i40e_get_rxfh() local
2448 struct i40e_hw *hw = &pf->hw; i40e_get_rxfh()
2492 struct i40e_pf *pf = vsi->back; i40e_set_rxfh() local
2493 struct i40e_hw *hw = &pf->hw; i40e_set_rxfh()
2537 struct i40e_pf *pf = vsi->back; i40e_get_priv_flags() local
2540 ret_flags |= pf->hw.func_caps.npar_enable ? i40e_get_priv_flags()
H A Di40e_fcoe.c152 * @pf: pointer to PF
160 static inline void i40e_fcoe_ddp_unmap(struct i40e_pf *pf, i40e_fcoe_ddp_unmap() argument
167 dma_unmap_sg(&pf->pdev->dev, ddp->sgl, ddp->sgc, i40e_fcoe_ddp_unmap()
254 struct i40e_pf *pf = np->vsi->back; i40e_fcoe_ddp_put() local
255 struct i40e_fcoe *fcoe = &pf->fcoe; i40e_fcoe_ddp_put()
264 i40e_fcoe_ddp_unmap(pf, ddp); i40e_fcoe_ddp_put()
271 * @pf: pointer to PF
275 int i40e_init_pf_fcoe(struct i40e_pf *pf) i40e_init_pf_fcoe() argument
277 struct i40e_hw *hw = &pf->hw; i40e_init_pf_fcoe()
280 pf->flags &= ~I40E_FLAG_FCOE_ENABLED; i40e_init_pf_fcoe()
281 pf->num_fcoe_qps = 0; i40e_init_pf_fcoe()
282 pf->fcoe_hmc_cntx_num = 0; i40e_init_pf_fcoe()
283 pf->fcoe_hmc_filt_num = 0; i40e_init_pf_fcoe()
285 if (!pf->hw.func_caps.fcoe) { i40e_init_pf_fcoe()
286 dev_info(&pf->pdev->dev, "FCoE capability is disabled\n"); i40e_init_pf_fcoe()
290 if (!pf->hw.func_caps.dcb) { i40e_init_pf_fcoe()
291 dev_warn(&pf->pdev->dev, i40e_init_pf_fcoe()
304 pf->flags |= I40E_FLAG_FCOE_ENABLED; i40e_init_pf_fcoe()
305 pf->num_fcoe_qps = I40E_DEFAULT_FCOE; i40e_init_pf_fcoe()
308 pf->fcoe_hmc_cntx_num = (1 << I40E_DMA_CNTX_SIZE_4K) * i40e_init_pf_fcoe()
310 pf->fcoe_hmc_filt_num = pf->fcoe_hmc_cntx_num + i40e_init_pf_fcoe()
315 pf->filter_settings.fcoe_filt_num = I40E_HASH_FILTER_SIZE_16K; i40e_init_pf_fcoe()
316 pf->filter_settings.fcoe_cntx_num = I40E_DMA_CNTX_SIZE_4K; i40e_init_pf_fcoe()
325 dev_info(&pf->pdev->dev, "FCoE is supported.\n"); i40e_init_pf_fcoe()
331 * @pf: pointer to PF
334 u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf) i40e_get_fcoe_tc_map() argument
337 struct i40e_hw *hw = &pf->hw; i40e_get_fcoe_tc_map()
369 struct i40e_pf *pf = vsi->back; i40e_fcoe_vsi_init() local
370 struct i40e_hw *hw = &pf->hw; i40e_fcoe_vsi_init()
373 if (!(pf->flags & I40E_FLAG_FCOE_ENABLED)) { i40e_fcoe_vsi_init()
374 dev_err(&pf->pdev->dev, i40e_fcoe_vsi_init()
402 enabled_tc = i40e_get_fcoe_tc_map(pf); i40e_fcoe_vsi_init()
427 struct i40e_pf *pf = vsi->back; i40e_fcoe_enable() local
428 struct i40e_fcoe *fcoe = &pf->fcoe; i40e_fcoe_enable()
430 if (!(pf->flags & I40E_FLAG_FCOE_ENABLED)) { i40e_fcoe_enable()
456 struct i40e_pf *pf = vsi->back; i40e_fcoe_disable() local
457 struct i40e_fcoe *fcoe = &pf->fcoe; i40e_fcoe_disable()
459 if (!(pf->flags & I40E_FLAG_FCOE_ENABLED)) { i40e_fcoe_disable()
536 struct i40e_pf *pf = vsi->back; i40e_fcoe_free_ddp_resources() local
537 struct i40e_fcoe *fcoe = &pf->fcoe; i40e_fcoe_free_ddp_resources()
552 i40e_fcoe_dma_pool_free(fcoe, &pf->pdev->dev, cpu); i40e_fcoe_free_ddp_resources()
570 struct i40e_pf *pf = vsi->back; i40e_fcoe_setup_ddp_resources() local
571 struct device *dev = &pf->pdev->dev; i40e_fcoe_setup_ddp_resources()
572 struct i40e_fcoe *fcoe = &pf->fcoe; i40e_fcoe_setup_ddp_resources()
586 dev_err(&pf->pdev->dev, "failed to allocate percpu DDP\n"); i40e_fcoe_setup_ddp_resources()
622 struct i40e_pf *pf = rx_ring->vsi->back; i40e_fcoe_handle_status() local
623 struct i40e_fcoe *fcoe = &pf->fcoe; i40e_fcoe_handle_status()
649 dev_err(&pf->pdev->dev, "xid %x ddp->xid %x TABLE FULL\n", i40e_fcoe_handle_status()
654 dev_err(&pf->pdev->dev, "xid %x ddp->xid %x CONFLICT\n", i40e_fcoe_handle_status()
663 dev_err(&pf->pdev->dev, "xid %x ddp->xid %x INVALIDATION FAILURE\n", i40e_fcoe_handle_status()
672 i40e_fcoe_ddp_unmap(pf, ddp); i40e_fcoe_handle_status()
693 struct i40e_pf *pf = rx_ring->vsi->back; i40e_fcoe_handle_offload() local
694 struct i40e_fcoe *fcoe = &pf->fcoe; i40e_fcoe_handle_offload()
717 dev_err(&pf->pdev->dev, "Protocol Error\n"); i40e_fcoe_handle_offload()
746 dev_err(&pf->pdev->dev, "xid 0x%x does not match ctx_xid 0x%x\n", i40e_fcoe_handle_offload()
753 dev_err(&pf->pdev->dev, "xid 0x%x fcerr 0x%x reported fcer 0x%x\n", i40e_fcoe_handle_offload()
786 i40e_fcoe_ddp_unmap(pf, ddp); i40e_fcoe_handle_offload()
822 struct i40e_pf *pf = np->vsi->back; i40e_fcoe_ddp_setup() local
823 struct i40e_fcoe *fcoe = &pf->fcoe; i40e_fcoe_ddp_setup()
834 dev_warn(&pf->pdev->dev, "xid=0x%x out-of-range\n", xid); i40e_fcoe_ddp_setup()
839 if (test_bit(__I40E_DOWN, &pf->state) || i40e_fcoe_ddp_setup()
840 test_bit(__I40E_NEEDS_RESTART, &pf->state)) { i40e_fcoe_ddp_setup()
841 dev_info(&pf->pdev->dev, "xid=0x%x device in reset/down\n", i40e_fcoe_ddp_setup()
848 dev_info(&pf->pdev->dev, "xid 0x%x w/ non-null sgl=%p nents=%d\n", i40e_fcoe_ddp_setup()
855 dev_info(&pf->pdev->dev, "No DDP pool, xid 0x%x\n", xid); i40e_fcoe_ddp_setup()
861 dev_info(&pf->pdev->dev, "No percpu ddp pool, xid 0x%x\n", xid); i40e_fcoe_ddp_setup()
866 dmacount = dma_map_sg(&pf->pdev->dev, sgl, sgc, DMA_FROM_DEVICE); i40e_fcoe_ddp_setup()
868 dev_info(&pf->pdev->dev, "dma_map_sg for sgl %p, sgc %d failed\n", i40e_fcoe_ddp_setup()
876 dev_info(&pf->pdev->dev, i40e_fcoe_ddp_setup()
890 dev_info(&pf->pdev->dev, for_each_sg()
942 dma_unmap_sg(&pf->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
1012 struct i40e_pf *pf = tx_ring->vsi->back; i40e_fcoe_program_ddp() local
1020 dev_warn(&pf->pdev->dev, i40e_fcoe_program_ddp()
1028 dev_warn(&pf->pdev->dev, i40e_fcoe_program_ddp()
1146 struct i40e_pf *pf = tx_ring->vsi->back; i40e_fcoe_handle_ddp() local
1147 struct i40e_fcoe *fcoe = &pf->fcoe; i40e_fcoe_handle_ddp()
1489 struct i40e_pf *pf = vsi->back; i40e_fcoe_config_netdev() local
1511 SET_NETDEV_DEV(netdev, &pf->pdev->dev); i40e_fcoe_config_netdev()
1533 * @pf: the PF that VSI is associated with
1536 void i40e_fcoe_vsi_setup(struct i40e_pf *pf) i40e_fcoe_vsi_setup() argument
1542 if (!(pf->flags & I40E_FLAG_FCOE_ENABLED)) i40e_fcoe_vsi_setup()
1545 BUG_ON(!pf->vsi[pf->lan_vsi]); i40e_fcoe_vsi_setup()
1547 for (i = 0; i < pf->num_alloc_vsi; i++) { i40e_fcoe_vsi_setup()
1548 vsi = pf->vsi[i]; i40e_fcoe_vsi_setup()
1550 dev_warn(&pf->pdev->dev, i40e_fcoe_vsi_setup()
1556 seid = pf->vsi[pf->lan_vsi]->seid; i40e_fcoe_vsi_setup()
1557 vsi = i40e_vsi_setup(pf, I40E_VSI_FCOE, seid, 0); i40e_fcoe_vsi_setup()
1559 dev_dbg(&pf->pdev->dev, i40e_fcoe_vsi_setup()
1563 dev_info(&pf->pdev->dev, "Failed to create FCoE VSI\n"); i40e_fcoe_vsi_setup()
H A Di40e.h415 struct i40e_pf *pf; member in struct:i40e_veb
520 u16 idx; /* index in pf->vsi[] */
557 struct i40e_pf *pf; member in struct:i40e_device
618 * @pf: pointer to the PF struct
620 static inline int i40e_get_fd_cnt_all(struct i40e_pf *pf) i40e_get_fd_cnt_all() argument
622 return pf->hw.fdir_shared_filter_count + pf->fdir_pf_filter_count; i40e_get_fd_cnt_all()
630 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags);
631 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags);
632 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id);
636 int i40e_fetch_switch_configuration(struct i40e_pf *pf,
640 struct i40e_pf *pf, bool add);
643 void i40e_fdir_check_and_reenable(struct i40e_pf *pf);
644 u32 i40e_get_current_fd_count(struct i40e_pf *pf);
645 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf);
646 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf);
647 u32 i40e_get_global_fd_count(struct i40e_pf *pf);
648 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features);
656 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
659 struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf, enum i40e_vsi_type type,
667 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count);
668 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid,
676 void i40e_pf_reset_stats(struct i40e_pf *pf);
678 void i40e_dbg_pf_init(struct i40e_pf *pf);
679 void i40e_dbg_pf_exit(struct i40e_pf *pf);
683 static inline void i40e_dbg_pf_init(struct i40e_pf *pf) {} i40e_dbg_pf_exit() argument
684 static inline void i40e_dbg_pf_exit(struct i40e_pf *pf) {} i40e_dbg_init() argument
690 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
691 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
724 u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf);
726 void i40e_fcoe_vsi_setup(struct i40e_pf *pf);
727 int i40e_init_pf_fcoe(struct i40e_pf *pf);
738 void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
743 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
748 void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf);
749 void i40e_ptp_rx_hwtstamp(struct i40e_pf *pf, struct sk_buff *skb, u8 index);
750 void i40e_ptp_set_increment(struct i40e_pf *pf);
751 int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
752 int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr);
753 void i40e_ptp_init(struct i40e_pf *pf);
754 void i40e_ptp_stop(struct i40e_pf *pf);
756 i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf);
757 i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf);
758 i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf);
H A Di40e_virtchnl_pf.h72 struct i40e_pf *pf; member in struct:i40e_vf
107 void i40e_free_vfs(struct i40e_pf *pf);
109 int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs);
110 int i40e_vc_process_vf_msg(struct i40e_pf *pf, u16 vf_id, u32 v_opcode,
112 int i40e_vc_process_vflr_event(struct i40e_pf *pf);
127 void i40e_vc_notify_link_state(struct i40e_pf *pf);
128 void i40e_vc_notify_reset(struct i40e_pf *pf);
H A Di40e_txrx.c48 * @pf: The PF pointer
52 struct i40e_pf *pf, bool add) i40e_program_fdir_filter()
68 for (i = 0; i < pf->num_alloc_vsi; i++) i40e_program_fdir_filter()
69 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) i40e_program_fdir_filter()
70 vsi = pf->vsi[i]; i40e_program_fdir_filter()
112 fpt |= (pf->vsi[pf->lan_vsi]->id) << i40e_program_fdir_filter()
200 struct i40e_pf *pf = vsi->back; i40e_add_del_fdir_udpv4() local
225 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); i40e_add_del_fdir_udpv4()
227 dev_info(&pf->pdev->dev, i40e_add_del_fdir_udpv4()
231 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) { i40e_add_del_fdir_udpv4()
233 dev_info(&pf->pdev->dev, i40e_add_del_fdir_udpv4()
237 dev_info(&pf->pdev->dev, i40e_add_del_fdir_udpv4()
257 struct i40e_pf *pf = vsi->back; i40e_add_del_fdir_tcpv4() local
284 pf->fd_tcp_rule++; i40e_add_del_fdir_tcpv4()
285 if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) { i40e_add_del_fdir_tcpv4()
286 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 flow being applied\n"); i40e_add_del_fdir_tcpv4()
287 pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED; i40e_add_del_fdir_tcpv4()
290 pf->fd_tcp_rule = (pf->fd_tcp_rule > 0) ? i40e_add_del_fdir_tcpv4()
291 (pf->fd_tcp_rule - 1) : 0; i40e_add_del_fdir_tcpv4()
292 if (pf->fd_tcp_rule == 0) { i40e_add_del_fdir_tcpv4()
293 pf->flags |= I40E_FLAG_FD_ATR_ENABLED; i40e_add_del_fdir_tcpv4()
294 dev_info(&pf->pdev->dev, "ATR re-enabled due to no sideband TCP/IPv4 rules\n"); i40e_add_del_fdir_tcpv4()
299 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); i40e_add_del_fdir_tcpv4()
302 dev_info(&pf->pdev->dev, i40e_add_del_fdir_tcpv4()
306 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) { i40e_add_del_fdir_tcpv4()
308 dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d loc = %d)\n", i40e_add_del_fdir_tcpv4()
311 dev_info(&pf->pdev->dev, i40e_add_del_fdir_tcpv4()
349 struct i40e_pf *pf = vsi->back; i40e_add_del_fdir_ipv4() local
372 ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add); i40e_add_del_fdir_ipv4()
375 dev_info(&pf->pdev->dev, i40e_add_del_fdir_ipv4()
379 } else if (I40E_DEBUG_FD & pf->hw.debug_mask) { i40e_add_del_fdir_ipv4()
381 dev_info(&pf->pdev->dev, i40e_add_del_fdir_ipv4()
385 dev_info(&pf->pdev->dev, i40e_add_del_fdir_ipv4()
404 struct i40e_pf *pf = vsi->back; i40e_add_del_fdir() local
437 dev_info(&pf->pdev->dev, "Could not specify spec type %d\n", i40e_add_del_fdir()
458 struct i40e_pf *pf = rx_ring->vsi->back; i40e_fd_handle_status() local
459 struct pci_dev *pdev = pf->pdev; i40e_fd_handle_status()
470 (I40E_DEBUG_FD & pf->hw.debug_mask)) i40e_fd_handle_status()
480 if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state)) i40e_fd_handle_status()
483 pf->fd_add_err++; i40e_fd_handle_status()
485 pf->fd_atr_cnt = i40e_get_current_atr_cnt(pf); i40e_fd_handle_status()
488 (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) { i40e_fd_handle_status()
489 pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED; i40e_fd_handle_status()
490 set_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state); i40e_fd_handle_status()
494 fcnt_prog = i40e_get_global_fd_count(pf); i40e_fd_handle_status()
495 fcnt_avail = pf->fdir_pf_filter_count; i40e_fd_handle_status()
501 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) && i40e_fd_handle_status()
502 !(pf->auto_disable_flags & i40e_fd_handle_status()
505 pf->auto_disable_flags |= i40e_fd_handle_status()
514 if (I40E_DEBUG_FD & pf->hw.debug_mask) i40e_fd_handle_status()
648 struct i40e_pf *pf = tx_ring->vsi->back; i40e_check_tx_hang() local
670 if (I40E_DEBUG_FLOW & pf->hw.debug_mask) i40e_check_tx_hang()
673 pf->tx_sluggish_count++; i40e_check_tx_hang()
1935 struct i40e_pf *pf = tx_ring->vsi->back; i40e_atr() local
1947 if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED)) i40e_atr()
1950 if ((pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) i40e_atr()
1979 if (th->syn && (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) i40e_atr()
2026 ((u32)pf->fd_atr_cnt_idx << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & i40e_atr()
2190 struct i40e_pf *pf; i40e_tsyn() local
2202 pf = i40e_netdev_to_pf(tx_ring->netdev); i40e_tsyn()
2203 if (!(pf->flags & I40E_FLAG_PTP)) i40e_tsyn()
2206 if (pf->ptp_tx && i40e_tsyn()
2207 !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) { i40e_tsyn()
2209 pf->ptp_tx_skb = skb_get(skb); i40e_tsyn()
51 i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet, struct i40e_pf *pf, bool add) i40e_program_fdir_filter() argument
/linux-4.1.27/net/lapb/
H A Dlapb_in.c49 lapb_dbg(1, "(%p) S0 RX SABM(%d)\n", lapb->dev, frame->pf); lapb_state0_machine()
52 lapb->dev, frame->pf); lapb_state0_machine()
53 lapb_send_control(lapb, LAPB_DM, frame->pf, lapb_state0_machine()
57 lapb->dev, frame->pf); lapb_state0_machine()
59 lapb_send_control(lapb, LAPB_UA, frame->pf, lapb_state0_machine()
74 lapb_dbg(1, "(%p) S0 RX SABME(%d)\n", lapb->dev, frame->pf); lapb_state0_machine()
77 lapb->dev, frame->pf); lapb_state0_machine()
79 lapb_send_control(lapb, LAPB_UA, frame->pf, lapb_state0_machine()
92 lapb->dev, frame->pf); lapb_state0_machine()
93 lapb_send_control(lapb, LAPB_DM, frame->pf, lapb_state0_machine()
99 lapb_dbg(1, "(%p) S0 RX DISC(%d)\n", lapb->dev, frame->pf); lapb_state0_machine()
100 lapb_dbg(1, "(%p) S0 TX UA(%d)\n", lapb->dev, frame->pf); lapb_state0_machine()
101 lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE); lapb_state0_machine()
120 lapb_dbg(1, "(%p) S1 RX SABM(%d)\n", lapb->dev, frame->pf); lapb_state1_machine()
123 lapb->dev, frame->pf); lapb_state1_machine()
124 lapb_send_control(lapb, LAPB_DM, frame->pf, lapb_state1_machine()
128 lapb->dev, frame->pf); lapb_state1_machine()
129 lapb_send_control(lapb, LAPB_UA, frame->pf, lapb_state1_machine()
135 lapb_dbg(1, "(%p) S1 RX SABME(%d)\n", lapb->dev, frame->pf); lapb_state1_machine()
138 lapb->dev, frame->pf); lapb_state1_machine()
139 lapb_send_control(lapb, LAPB_UA, frame->pf, lapb_state1_machine()
143 lapb->dev, frame->pf); lapb_state1_machine()
144 lapb_send_control(lapb, LAPB_DM, frame->pf, lapb_state1_machine()
150 lapb_dbg(1, "(%p) S1 RX DISC(%d)\n", lapb->dev, frame->pf); lapb_state1_machine()
151 lapb_dbg(1, "(%p) S1 TX DM(%d)\n", lapb->dev, frame->pf); lapb_state1_machine()
152 lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE); lapb_state1_machine()
156 lapb_dbg(1, "(%p) S1 RX UA(%d)\n", lapb->dev, frame->pf); lapb_state1_machine()
157 if (frame->pf) { lapb_state1_machine()
172 lapb_dbg(1, "(%p) S1 RX DM(%d)\n", lapb->dev, frame->pf); lapb_state1_machine()
173 if (frame->pf) { lapb_state1_machine()
198 lapb->dev, frame->pf); lapb_state2_machine()
199 lapb_dbg(1, "(%p) S2 TX DM(%d)\n", lapb->dev, frame->pf); lapb_state2_machine()
200 lapb_send_control(lapb, LAPB_DM, frame->pf, LAPB_RESPONSE); lapb_state2_machine()
204 lapb_dbg(1, "(%p) S2 RX DISC(%d)\n", lapb->dev, frame->pf); lapb_state2_machine()
205 lapb_dbg(1, "(%p) S2 TX UA(%d)\n", lapb->dev, frame->pf); lapb_state2_machine()
206 lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE); lapb_state2_machine()
210 lapb_dbg(1, "(%p) S2 RX UA(%d)\n", lapb->dev, frame->pf); lapb_state2_machine()
211 if (frame->pf) { lapb_state2_machine()
221 lapb_dbg(1, "(%p) S2 RX DM(%d)\n", lapb->dev, frame->pf); lapb_state2_machine()
222 if (frame->pf) { lapb_state2_machine()
236 lapb->dev, frame->pf); lapb_state2_machine()
237 lapb_dbg(1, "(%p) S2 RX DM(%d)\n", lapb->dev, frame->pf); lapb_state2_machine()
238 if (frame->pf) lapb_state2_machine()
239 lapb_send_control(lapb, LAPB_DM, frame->pf, lapb_state2_machine()
260 lapb_dbg(1, "(%p) S3 RX SABM(%d)\n", lapb->dev, frame->pf); lapb_state3_machine()
263 lapb->dev, frame->pf); lapb_state3_machine()
264 lapb_send_control(lapb, LAPB_DM, frame->pf, lapb_state3_machine()
268 lapb->dev, frame->pf); lapb_state3_machine()
269 lapb_send_control(lapb, LAPB_UA, frame->pf, lapb_state3_machine()
283 lapb_dbg(1, "(%p) S3 RX SABME(%d)\n", lapb->dev, frame->pf); lapb_state3_machine()
286 lapb->dev, frame->pf); lapb_state3_machine()
287 lapb_send_control(lapb, LAPB_UA, frame->pf, lapb_state3_machine()
299 lapb->dev, frame->pf); lapb_state3_machine()
300 lapb_send_control(lapb, LAPB_DM, frame->pf, lapb_state3_machine()
306 lapb_dbg(1, "(%p) S3 RX DISC(%d)\n", lapb->dev, frame->pf); lapb_state3_machine()
309 lapb_send_control(lapb, LAPB_UA, frame->pf, LAPB_RESPONSE); lapb_state3_machine()
317 lapb_dbg(1, "(%p) S3 RX DM(%d)\n", lapb->dev, frame->pf); lapb_state3_machine()
328 lapb->dev, frame->pf, frame->nr); lapb_state3_machine()
330 lapb_check_need_response(lapb, frame->cr, frame->pf); lapb_state3_machine()
347 lapb->dev, frame->pf, frame->nr); lapb_state3_machine()
349 lapb_check_need_response(lapb, frame->cr, frame->pf); lapb_state3_machine()
366 lapb->dev, frame->pf, frame->nr); lapb_state3_machine()
368 lapb_check_need_response(lapb, frame->cr, frame->pf); lapb_state3_machine()
388 lapb->dev, frame->pf, frame->ns, frame->nr); lapb_state3_machine()
422 if (frame->pf) lapb_state3_machine()
433 if (frame->pf) lapb_state3_machine()
437 lapb->dev, frame->pf, lapb->vr); lapb_state3_machine()
439 lapb_send_control(lapb, LAPB_REJ, frame->pf, lapb_state3_machine()
448 lapb->dev, frame->pf, lapb_state3_machine()
458 lapb_dbg(1, "(%p) S3 RX ILLEGAL(%d)\n", lapb->dev, frame->pf); lapb_state3_machine()
483 lapb_dbg(1, "(%p) S4 RX SABM(%d)\n", lapb->dev, frame->pf); lapb_state4_machine()
486 lapb->dev, frame->pf); lapb_state4_machine()
487 lapb_send_control(lapb, LAPB_DM, frame->pf, lapb_state4_machine()
491 lapb->dev, frame->pf); lapb_state4_machine()
493 lapb_send_control(lapb, LAPB_UA, frame->pf, lapb_state4_machine()
508 lapb_dbg(1, "(%p) S4 RX SABME(%d)\n", lapb->dev, frame->pf); lapb_state4_machine()
511 lapb->dev, frame->pf); lapb_state4_machine()
513 lapb_send_control(lapb, LAPB_UA, frame->pf, lapb_state4_machine()
526 lapb->dev, frame->pf); lapb_state4_machine()
527 lapb_send_control(lapb, LAPB_DM, frame->pf, lapb_state4_machine()
H A Dlapb_subr.c164 frame->pf = skb->data[1] & LAPB_EPF; lapb_decode()
176 frame->pf = skb->data[1] & LAPB_EPF; lapb_decode()
185 frame->pf = skb->data[0] & LAPB_SPF; lapb_decode()
198 frame->pf = skb->data[0] & LAPB_SPF; lapb_decode()
205 frame->pf = skb->data[0] & LAPB_SPF; lapb_decode()
211 frame->pf = skb->data[0] & LAPB_SPF; lapb_decode()
H A Dlapb_out.c207 void lapb_check_need_response(struct lapb_cb *lapb, int type, int pf) lapb_check_need_response() argument
209 if (type == LAPB_COMMAND && pf) lapb_check_need_response()
/linux-4.1.27/tools/perf/util/
H A Dprobe-finder.c510 static int convert_variable(Dwarf_Die *vr_die, struct probe_finder *pf) convert_variable() argument
518 ret = convert_variable_location(vr_die, pf->addr, pf->fb_ops, convert_variable()
519 &pf->sp_die, pf->tvar); convert_variable()
522 " Perhaps, it has been optimized out.\n", pf->pvar->var); convert_variable()
525 else if (ret == 0 && pf->pvar->field) { convert_variable()
526 ret = convert_variable_fields(vr_die, pf->pvar->var, convert_variable()
527 pf->pvar->field, &pf->tvar->ref, convert_variable()
532 ret = convert_variable_type(vr_die, pf->tvar, pf->pvar->type); convert_variable()
538 static int find_variable(Dwarf_Die *sc_die, struct probe_finder *pf) find_variable() argument
544 if (!is_c_varname(pf->pvar->var)) { find_variable()
546 pf->tvar->value = strdup(pf->pvar->var); find_variable()
547 if (pf->tvar->value == NULL) find_variable()
549 if (pf->pvar->type) { find_variable()
550 pf->tvar->type = strdup(pf->pvar->type); find_variable()
551 if (pf->tvar->type == NULL) find_variable()
554 if (pf->pvar->name) { find_variable()
555 pf->tvar->name = strdup(pf->pvar->name); find_variable()
556 if (pf->tvar->name == NULL) find_variable()
559 pf->tvar->name = NULL; find_variable()
563 if (pf->pvar->name) find_variable()
564 pf->tvar->name = strdup(pf->pvar->name); find_variable()
566 ret = synthesize_perf_probe_arg(pf->pvar, buf, 32); find_variable()
572 pf->tvar->name = strdup(buf); find_variable()
574 if (pf->tvar->name == NULL) find_variable()
577 pr_debug("Searching '%s' variable in context.\n", pf->pvar->var); find_variable()
579 if (!die_find_variable_at(sc_die, pf->pvar->var, pf->addr, &vr_die)) { find_variable()
581 if (!die_find_variable_at(&pf->cu_die, pf->pvar->var, find_variable()
584 pf->pvar->var); find_variable()
589 ret = convert_variable(&vr_die, pf); find_variable()
651 static int call_probe_finder(Dwarf_Die *sc_die, struct probe_finder *pf) call_probe_finder() argument
664 if (!die_find_realfunc(&pf->cu_die, pf->addr, &pf->sp_die)) { call_probe_finder()
670 memcpy(&pf->sp_die, sc_die, sizeof(Dwarf_Die)); call_probe_finder()
673 dwarf_attr(&pf->sp_die, DW_AT_frame_base, &fb_attr); call_probe_finder()
674 ret = dwarf_getlocation_addr(&fb_attr, pf->addr, &pf->fb_ops, &nops, 1); call_probe_finder()
676 pf->fb_ops = NULL; call_probe_finder()
678 } else if (nops == 1 && pf->fb_ops[0].atom == DW_OP_call_frame_cfa && call_probe_finder()
679 pf->cfi != NULL) { call_probe_finder()
681 if (dwarf_cfi_addrframe(pf->cfi, pf->addr, &frame) != 0 || call_probe_finder()
682 dwarf_frame_cfa(frame, &pf->fb_ops, &nops) != 0) { call_probe_finder()
684 (uintmax_t)pf->addr); call_probe_finder()
691 ret = pf->callback(sc_die, pf); call_probe_finder()
693 /* *pf->fb_ops will be cached in libdw. Don't free it. */ call_probe_finder()
694 pf->fb_ops = NULL; call_probe_finder()
741 static Dwarf_Die *find_best_scope(struct probe_finder *pf, Dwarf_Die *die_mem) find_best_scope() argument
744 .function = pf->pev->point.function, find_best_scope()
745 .file = pf->fname, find_best_scope()
746 .line = pf->lno, find_best_scope()
752 cu_walk_functions_at(&pf->cu_die, pf->addr, find_best_scope_cb, &fsp); find_best_scope()
760 struct probe_finder *pf = data; probe_point_line_walker() local
764 if (lineno != pf->lno || strtailcmp(fname, pf->fname) != 0) probe_point_line_walker()
767 pf->addr = addr; probe_point_line_walker()
768 sc_die = find_best_scope(pf, &die_mem); probe_point_line_walker()
774 ret = call_probe_finder(sc_die, pf); probe_point_line_walker()
781 static int find_probe_point_by_line(struct probe_finder *pf) find_probe_point_by_line() argument
783 return die_walk_lines(&pf->cu_die, probe_point_line_walker, pf); find_probe_point_by_line()
829 struct probe_finder *pf = data; probe_point_lazy_walker() local
833 if (!intlist__has_entry(pf->lcache, lineno) || probe_point_lazy_walker()
834 strtailcmp(fname, pf->fname) != 0) probe_point_lazy_walker()
839 pf->addr = addr; probe_point_lazy_walker()
840 pf->lno = lineno; probe_point_lazy_walker()
841 sc_die = find_best_scope(pf, &die_mem); probe_point_lazy_walker()
847 ret = call_probe_finder(sc_die, pf); probe_point_lazy_walker()
857 static int find_probe_point_lazy(Dwarf_Die *sp_die, struct probe_finder *pf) find_probe_point_lazy() argument
862 if (intlist__empty(pf->lcache)) { find_probe_point_lazy()
865 comp_dir = cu_get_comp_dir(&pf->cu_die); find_probe_point_lazy()
866 ret = get_real_path(pf->fname, comp_dir, &fpath); find_probe_point_lazy()
873 ret = find_lazy_match_lines(pf->lcache, fpath, find_probe_point_lazy()
874 pf->pev->point.lazy_line); find_probe_point_lazy()
880 return die_walk_lines(sp_die, probe_point_lazy_walker, pf); find_probe_point_lazy()
885 struct probe_finder *pf = data; probe_point_inline_cb() local
886 struct perf_probe_point *pp = &pf->pev->point; probe_point_inline_cb()
891 ret = find_probe_point_lazy(in_die, pf); probe_point_inline_cb()
899 pf->addr = addr; probe_point_inline_cb()
900 pf->addr += pp->offset; probe_point_inline_cb()
902 (uintmax_t)pf->addr); probe_point_inline_cb()
904 ret = call_probe_finder(in_die, pf); probe_point_inline_cb()
920 struct probe_finder *pf = param->data; probe_point_search_cb() local
921 struct perf_probe_point *pp = &pf->pev->point; probe_point_search_cb()
932 pf->fname = dwarf_decl_file(sp_die); probe_point_search_cb()
934 dwarf_decl_line(sp_die, &pf->lno); probe_point_search_cb()
935 pf->lno += pp->line; probe_point_search_cb()
936 param->retval = find_probe_point_by_line(pf); probe_point_search_cb()
939 dwarf_entrypc(sp_die, &pf->addr); probe_point_search_cb()
942 param->retval = find_probe_point_lazy(sp_die, pf); probe_point_search_cb()
944 pf->addr += pp->offset; probe_point_search_cb()
946 param->retval = call_probe_finder(sp_die, pf); probe_point_search_cb()
951 probe_point_inline_cb, (void *)pf); probe_point_search_cb()
956 static int find_probe_point_by_func(struct probe_finder *pf) find_probe_point_by_func() argument
958 struct dwarf_callback_param _param = {.data = (void *)pf, find_probe_point_by_func()
960 dwarf_getfuncs(&pf->cu_die, probe_point_search_cb, &_param, 0); find_probe_point_by_func()
998 struct probe_finder *pf) debuginfo__find_probes()
1000 struct perf_probe_point *pp = &pf->pev->point; debuginfo__find_probes()
1021 pf->cfi = dwarf_getcfi_elf(elf); debuginfo__find_probes()
1023 pf->cfi = dwarf_getcfi(dbg->dbg); debuginfo__find_probes()
1028 pf->lcache = intlist__new(NULL); debuginfo__find_probes()
1029 if (!pf->lcache) debuginfo__find_probes()
1037 .cu_die = &pf->cu_die, debuginfo__find_probes()
1038 .sp_die = &pf->sp_die, debuginfo__find_probes()
1042 .data = pf, debuginfo__find_probes()
1048 ret = probe_point_search_cb(&pf->sp_die, &probe_param); debuginfo__find_probes()
1057 diep = dwarf_offdie(dbg->dbg, off + cuhl, &pf->cu_die); debuginfo__find_probes()
1063 pf->fname = cu_find_realpath(&pf->cu_die, pp->file); debuginfo__find_probes()
1065 pf->fname = NULL; debuginfo__find_probes()
1067 if (!pp->file || pf->fname) { debuginfo__find_probes()
1069 ret = find_probe_point_by_func(pf); debuginfo__find_probes()
1071 ret = find_probe_point_lazy(&pf->cu_die, pf); debuginfo__find_probes()
1073 pf->lno = pp->line; debuginfo__find_probes()
1074 ret = find_probe_point_by_line(pf); debuginfo__find_probes()
1083 intlist__delete(pf->lcache); debuginfo__find_probes()
1084 pf->lcache = NULL; debuginfo__find_probes()
1090 struct probe_finder *pf; member in struct:local_vars_finder
1101 struct probe_finder *pf = vf->pf; copy_variables_cb() local
1107 if (convert_variable_location(die_mem, vf->pf->addr, copy_variables_cb()
1108 vf->pf->fb_ops, &pf->sp_die, copy_variables_cb()
1120 if (dwarf_haspc(die_mem, vf->pf->addr)) copy_variables_cb()
1126 static int expand_probe_args(Dwarf_Die *sc_die, struct probe_finder *pf, expand_probe_args() argument
1132 struct local_vars_finder vf = {.pf = pf, .args = args, expand_probe_args()
1135 for (i = 0; i < pf->pev->nargs; i++) { expand_probe_args()
1137 if (strcmp(pf->pev->args[i].var, "$vars") == 0) { expand_probe_args()
1149 args[n] = pf->pev->args[i]; expand_probe_args()
1157 static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf) add_probe_trace_event() argument
1160 container_of(pf, struct trace_event_finder, pf); add_probe_trace_event()
1174 ret = convert_to_trace_point(&pf->sp_die, tf->mod, pf->addr, add_probe_trace_event()
1175 pf->pev->point.retprobe, &tev->point); add_probe_trace_event()
1187 ret = expand_probe_args(sc_die, pf, args); add_probe_trace_event()
1200 pf->pvar = &args[i]; add_probe_trace_event()
1201 pf->tvar = &tev->args[i]; add_probe_trace_event()
1203 ret = find_variable(sc_die, pf); add_probe_trace_event()
1219 .pf = {.pev = pev, .callback = add_probe_trace_event}, debuginfo__find_trace_events()
1231 ret = debuginfo__find_probes(dbg, &tf.pf); debuginfo__find_trace_events()
1255 ret = convert_variable_location(die_mem, af->pf.addr, collect_variables_cb()
1256 af->pf.fb_ops, &af->pf.sp_die, collect_variables_cb()
1266 if (af->child && dwarf_haspc(die_mem, af->pf.addr)) collect_variables_cb()
1273 static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf) add_available_vars() argument
1276 container_of(pf, struct available_var_finder, pf); add_available_vars()
1289 ret = convert_to_trace_point(&pf->sp_die, af->mod, pf->addr, add_available_vars()
1290 pf->pev->point.retprobe, &vl->point); add_available_vars()
1309 die_find_child(&pf->cu_die, collect_variables_cb, (void *)af, &die_mem); add_available_vars()
1331 .pf = {.pev = pev, .callback = add_available_vars}, debuginfo__find_available_vars_at()
1344 ret = debuginfo__find_probes(dbg, &af.pf); debuginfo__find_available_vars_at()
997 debuginfo__find_probes(struct debuginfo *dbg, struct probe_finder *pf) debuginfo__find_probes() argument
H A Dprobe-finder.h66 int (*callback)(Dwarf_Die *sc_die, struct probe_finder *pf);
86 struct probe_finder pf; member in struct:trace_event_finder
94 struct probe_finder pf; member in struct:available_var_finder
/linux-4.1.27/net/ax25/
H A Dax25_std_in.c42 static int ax25_std_state1_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int pf, int type) ax25_std_state1_machine() argument
48 ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE); ax25_std_state1_machine()
54 ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE); ax25_std_state1_machine()
58 ax25_send_control(ax25, AX25_DM, pf, AX25_RESPONSE); ax25_std_state1_machine()
62 if (pf) { ax25_std_state1_machine()
84 if (pf) { ax25_std_state1_machine()
106 static int ax25_std_state2_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int pf, int type) ax25_std_state2_machine() argument
111 ax25_send_control(ax25, AX25_DM, pf, AX25_RESPONSE); ax25_std_state2_machine()
115 ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE); ax25_std_state2_machine()
121 if (pf) ax25_std_state2_machine()
129 if (pf) ax25_send_control(ax25, AX25_DM, AX25_POLLON, AX25_RESPONSE); ax25_std_state2_machine()
144 static int ax25_std_state3_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int ns, int nr, int pf, int type) ax25_std_state3_machine() argument
158 ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE); ax25_std_state3_machine()
171 ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE); ax25_std_state3_machine()
185 if (type == AX25_COMMAND && pf) ax25_std_state3_machine()
197 if (type == AX25_COMMAND && pf) ax25_std_state3_machine()
223 if (pf) ax25_std_enquiry_response(ax25); ax25_std_state3_machine()
232 if (pf) { ax25_std_state3_machine()
242 if (pf) ax25_std_enquiry_response(ax25); ax25_std_state3_machine()
245 ax25_send_control(ax25, AX25_REJ, pf, AX25_RESPONSE); ax25_std_state3_machine()
269 static int ax25_std_state4_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int ns, int nr, int pf, int type) ax25_std_state4_machine() argument
283 ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE); ax25_std_state4_machine()
298 ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE); ax25_std_state4_machine()
312 if (type == AX25_RESPONSE && pf) { ax25_std_state4_machine()
329 if (type == AX25_COMMAND && pf) ax25_std_state4_machine()
341 if (pf && type == AX25_RESPONSE) { ax25_std_state4_machine()
358 if (type == AX25_COMMAND && pf) ax25_std_state4_machine()
377 if (pf) ax25_std_state4_machine()
387 if (pf) { ax25_std_state4_machine()
397 if (pf) ax25_std_enquiry_response(ax25); ax25_std_state4_machine()
400 ax25_send_control(ax25, AX25_REJ, pf, AX25_RESPONSE); ax25_std_state4_machine()
424 int queued = 0, frametype, ns, nr, pf; ax25_std_frame_in() local
426 frametype = ax25_decode(ax25, skb, &ns, &nr, &pf); ax25_std_frame_in()
430 queued = ax25_std_state1_machine(ax25, skb, frametype, pf, type); ax25_std_frame_in()
433 queued = ax25_std_state2_machine(ax25, skb, frametype, pf, type); ax25_std_frame_in()
436 queued = ax25_std_state3_machine(ax25, skb, frametype, ns, nr, pf, type); ax25_std_frame_in()
439 queued = ax25_std_state4_machine(ax25, skb, frametype, ns, nr, pf, type); ax25_std_frame_in()
H A Dax25_ds_in.c35 static int ax25_ds_state1_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int pf, int type) ax25_ds_state1_machine() argument
41 ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE); ax25_ds_state1_machine()
47 ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE); ax25_ds_state1_machine()
51 ax25_send_control(ax25, AX25_DM, pf, AX25_RESPONSE); ax25_ds_state1_machine()
85 if (pf) ax25_ds_state1_machine()
90 if (pf) ax25_ds_state1_machine()
103 static int ax25_ds_state2_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int pf, int type) ax25_ds_state2_machine() argument
113 ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE); ax25_ds_state2_machine()
120 if (pf) { ax25_ds_state2_machine()
130 if (pf) { ax25_ds_state2_machine()
148 static int ax25_ds_state3_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int ns, int nr, int pf, int type) ax25_ds_state3_machine() argument
162 ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE); ax25_ds_state3_machine()
175 ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE); ax25_ds_state3_machine()
195 if (type == AX25_COMMAND && pf) ax25_ds_state3_machine()
216 if (type == AX25_COMMAND && pf) ax25_ds_state3_machine()
238 if (pf) ax25_ds_enquiry_response(ax25); ax25_ds_state3_machine()
247 if (pf) { ax25_ds_state3_machine()
257 if (pf) ax25_ds_enquiry_response(ax25); ax25_ds_state3_machine()
284 int queued = 0, frametype, ns, nr, pf; ax25_ds_frame_in() local
286 frametype = ax25_decode(ax25, skb, &ns, &nr, &pf); ax25_ds_frame_in()
290 queued = ax25_ds_state1_machine(ax25, skb, frametype, pf, type); ax25_ds_frame_in()
293 queued = ax25_ds_state2_machine(ax25, skb, frametype, pf, type); ax25_ds_frame_in()
296 queued = ax25_ds_state3_machine(ax25, skb, frametype, ns, nr, pf, type); ax25_ds_frame_in()
H A Dax25_subr.c100 int ax25_decode(ax25_cb *ax25, struct sk_buff *skb, int *ns, int *nr, int *pf) ax25_decode() argument
106 *ns = *nr = *pf = 0; ax25_decode()
113 *pf = frame[0] & AX25_PF; ax25_decode()
117 *pf = frame[0] & AX25_PF; ax25_decode()
120 *pf = frame[0] & AX25_PF; ax25_decode()
128 *pf = frame[1] & AX25_EPF; ax25_decode()
133 *pf = frame[1] & AX25_EPF; ax25_decode()
137 *pf = frame[0] & AX25_PF; ax25_decode()
/linux-4.1.27/net/netfilter/
H A Dnf_log.c25 static struct nf_logger *__find_logger(int pf, const char *str_logger) __find_logger() argument
31 if (loggers[pf][i] == NULL) __find_logger()
34 log = nft_log_dereference(loggers[pf][i]); __find_logger()
42 void nf_log_set(struct net *net, u_int8_t pf, const struct nf_logger *logger) nf_log_set() argument
46 if (pf == NFPROTO_UNSPEC) nf_log_set()
50 log = nft_log_dereference(net->nf.nf_loggers[pf]); nf_log_set()
52 rcu_assign_pointer(net->nf.nf_loggers[pf], logger); nf_log_set()
75 int nf_log_register(u_int8_t pf, struct nf_logger *logger) nf_log_register() argument
80 if (pf >= ARRAY_SIZE(init_net.nf.nf_loggers)) nf_log_register()
85 if (pf == NFPROTO_UNSPEC) { nf_log_register()
95 if (rcu_access_pointer(loggers[pf][logger->type])) { nf_log_register()
99 rcu_assign_pointer(loggers[pf][logger->type], logger); nf_log_register()
124 int nf_log_bind_pf(struct net *net, u_int8_t pf, nf_log_bind_pf() argument
127 if (pf >= ARRAY_SIZE(net->nf.nf_loggers)) nf_log_bind_pf()
130 if (__find_logger(pf, logger->name) == NULL) { nf_log_bind_pf()
134 rcu_assign_pointer(net->nf.nf_loggers[pf], logger); nf_log_bind_pf()
140 void nf_log_unbind_pf(struct net *net, u_int8_t pf) nf_log_unbind_pf() argument
142 if (pf >= ARRAY_SIZE(net->nf.nf_loggers)) nf_log_unbind_pf()
145 RCU_INIT_POINTER(net->nf.nf_loggers[pf], NULL); nf_log_unbind_pf()
150 void nf_logger_request_module(int pf, enum nf_log_type type) nf_logger_request_module() argument
152 if (loggers[pf][type] == NULL) nf_logger_request_module()
153 request_module("nf-logger-%u-%u", pf, type); nf_logger_request_module()
157 int nf_logger_find_get(int pf, enum nf_log_type type) nf_logger_find_get() argument
162 if (rcu_access_pointer(loggers[pf][type]) == NULL) nf_logger_find_get()
163 request_module("nf-logger-%u-%u", pf, type); nf_logger_find_get()
166 logger = rcu_dereference(loggers[pf][type]); nf_logger_find_get()
178 void nf_logger_put(int pf, enum nf_log_type type) nf_logger_put() argument
182 BUG_ON(loggers[pf][type] == NULL); nf_logger_put()
185 logger = rcu_dereference(loggers[pf][type]); nf_logger_put()
192 u_int8_t pf, nf_log_packet()
206 logger = rcu_dereference(loggers[pf][loginfo->type]); nf_log_packet()
208 logger = rcu_dereference(net->nf.nf_loggers[pf]); nf_log_packet()
214 logger->logfn(net, pf, hooknum, skb, in, out, loginfo, prefix); nf_log_packet()
221 u_int8_t pf, nf_log_trace()
233 logger = rcu_dereference(net->nf.nf_loggers[pf]); nf_log_trace()
238 logger->logfn(net, pf, hooknum, skb, in, out, loginfo, prefix); nf_log_trace()
191 nf_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct nf_loginfo *loginfo, const char *fmt, ...) nf_log_packet() argument
220 nf_log_trace(struct net *net, u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct nf_loginfo *loginfo, const char *fmt, ...) nf_log_trace() argument
H A Dnf_sockopt.c31 if (ops->pf == reg->pf nf_register_sockopt()
61 static struct nf_sockopt_ops *nf_sockopt_find(struct sock *sk, u_int8_t pf, nf_sockopt_find() argument
68 if (ops->pf == pf) { nf_sockopt_find()
92 static int nf_sockopt(struct sock *sk, u_int8_t pf, int val, nf_sockopt() argument
98 ops = nf_sockopt_find(sk, pf, val, get); nf_sockopt()
111 int nf_setsockopt(struct sock *sk, u_int8_t pf, int val, char __user *opt, nf_setsockopt() argument
114 return nf_sockopt(sk, pf, val, opt, &len, 0); nf_setsockopt()
118 int nf_getsockopt(struct sock *sk, u_int8_t pf, int val, char __user *opt, nf_getsockopt() argument
121 return nf_sockopt(sk, pf, val, opt, len, 1); nf_getsockopt()
126 static int compat_nf_sockopt(struct sock *sk, u_int8_t pf, int val, compat_nf_sockopt() argument
132 ops = nf_sockopt_find(sk, pf, val, get); compat_nf_sockopt()
152 int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, compat_nf_setsockopt() argument
155 return compat_nf_sockopt(sk, pf, val, opt, &len, 0); compat_nf_setsockopt()
159 int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, compat_nf_getsockopt() argument
162 return compat_nf_sockopt(sk, pf, val, opt, len, 1); compat_nf_getsockopt()
H A Dnf_conntrack_proto_udplite.c92 u_int8_t pf, udplite_packet()
122 u_int8_t pf, udplite_error()
134 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, udplite_error()
144 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, udplite_error()
152 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, udplite_error()
160 pf)) { udplite_error()
162 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, udplite_error()
88 udplite_packet(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info ctinfo, u_int8_t pf, unsigned int hooknum, unsigned int *timeouts) udplite_packet() argument
118 udplite_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum) udplite_error() argument
H A Dnf_conntrack_proto_udp.c84 u_int8_t pf, udp_packet()
112 u_int8_t pf, udp_error()
123 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, udp_error()
131 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, udp_error()
145 nf_checksum(skb, hooknum, dataoff, IPPROTO_UDP, pf)) { udp_error()
147 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, udp_error()
80 udp_packet(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info ctinfo, u_int8_t pf, unsigned int hooknum, unsigned int *timeouts) udp_packet() argument
110 udp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum) udp_error() argument
H A Dcore.c70 list_for_each_entry(elem, &nf_hooks[reg->pf][reg->hooknum], list) { nf_register_hook()
77 static_key_slow_inc(&nf_hooks_needed[reg->pf][reg->hooknum]); nf_register_hook()
89 static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]); nf_unregister_hook()
170 elem = list_entry_rcu(&nf_hooks[state->pf][state->hook], nf_hook_slow()
173 verdict = nf_iterate(&nf_hooks[state->pf][state->hook], skb, state, nf_hook_slow()
H A Dnf_conntrack_sane.c181 pr_debug("nf_ct_sane: unregistering helper for pf: %d " nf_conntrack_sane_fini()
219 pr_debug("nf_ct_sane: registering helper for pf: %d " nf_conntrack_sane_init()
225 "register helper for pf: %d port: %d\n", nf_conntrack_sane_init()
H A Dnf_queue.c148 afinfo = nf_get_afinfo(state->pf); nf_queue()
207 afinfo = nf_get_afinfo(entry->state.pf); nf_reinject()
216 verdict = nf_iterate(&nf_hooks[entry->state.pf][entry->state.hook], nf_reinject()
H A Dnf_tables_inet.c28 ops->pf = afi->family; nft_inet_hook_ops_init()
H A Dnf_conntrack_tftp.c142 " helper for pf: %u port: %u\n", nf_conntrack_tftp_init()
H A Dnfnetlink_log.c400 u_int8_t pf, __build_packet_message()
419 nfmsg->nfgen_family = pf; __build_packet_message()
440 if (pf == PF_BRIDGE) { __build_packet_message()
475 if (pf == PF_BRIDGE) { __build_packet_message()
617 u_int8_t pf, nfulnl_log_packet()
719 __build_packet_message(log, inst, skb, data_len, pf, nfulnl_log_packet()
812 u_int8_t pf = nfmsg->nfgen_family; nfulnl_recv_config() local
818 return nf_log_bind_pf(net, pf, &nfulnl_logger); nfulnl_recv_config()
820 nf_log_unbind_pf(net, pf); nfulnl_recv_config()
396 __build_packet_message(struct nfnl_log_net *log, struct nfulnl_instance *inst, const struct sk_buff *skb, unsigned int data_len, u_int8_t pf, unsigned int hooknum, const struct net_device *indev, const struct net_device *outdev, const char *prefix, unsigned int plen) __build_packet_message() argument
616 nfulnl_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct nf_loginfo *li_user, const char *prefix) nfulnl_log_packet() argument
H A Dnft_queue.c45 priv->queues_total, pkt->ops->pf, nft_queue_eval()
H A Dnft_reject_inet.c27 switch (pkt->ops->pf) { nft_reject_inet_eval()
H A Dnft_meta.c45 *dest = pkt->ops->pf; nft_meta_get_eval()
138 switch (pkt->ops->pf) { nft_meta_get_eval()
H A Dnf_conntrack_proto_tcp.c507 u_int8_t pf) tcp_in_window()
716 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, tcp_in_window()
756 u_int8_t pf, tcp_error()
768 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, tcp_error()
776 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, tcp_error()
787 nf_checksum(skb, hooknum, dataoff, IPPROTO_TCP, pf)) { tcp_error()
789 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, tcp_error()
798 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, tcp_error()
816 u_int8_t pf, tcp_packet()
955 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, tcp_packet()
980 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, tcp_packet()
997 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, tcp_packet()
1009 nf_log_packet(net, pf, 0, skb, NULL, NULL, tcp_packet()
1037 skb, dataoff, th, pf)) { tcp_packet()
500 tcp_in_window(const struct nf_conn *ct, struct ip_ct_tcp *state, enum ip_conntrack_dir dir, unsigned int index, const struct sk_buff *skb, unsigned int dataoff, const struct tcphdr *tcph, u_int8_t pf) tcp_in_window() argument
752 tcp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum) tcp_error() argument
812 tcp_packet(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info ctinfo, u_int8_t pf, unsigned int hooknum, unsigned int *timeouts) tcp_packet() argument
H A Dnf_conntrack_proto_generic.c81 u_int8_t pf, generic_packet()
77 generic_packet(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info ctinfo, u_int8_t pf, unsigned int hooknum, unsigned int *timeout) generic_packet() argument
H A Dnf_log_common.c152 nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf, nf_log_dump_packet_common() argument
H A Dnft_log.c36 nf_log_packet(net, pkt->ops->pf, pkt->ops->hooknum, pkt->skb, pkt->in, nft_log_eval()
H A Dnf_conntrack_proto_dccp.c480 u_int8_t pf, unsigned int hooknum, dccp_packet()
546 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, dccp_packet()
552 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, dccp_packet()
573 u_int8_t pf, unsigned int hooknum) dccp_error()
603 pf)) { dccp_error()
617 nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "%s", msg); dccp_error()
478 dccp_packet(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info ctinfo, u_int8_t pf, unsigned int hooknum, unsigned int *timeouts) dccp_packet() argument
570 dccp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum) dccp_error() argument
H A Dnf_conntrack_ftp.c589 pr_debug("nf_ct_ftp: unregistering helper for pf: %d " nf_conntrack_ftp_fini()
628 pr_debug("nf_ct_ftp: registering helper for pf: %d " nf_conntrack_ftp_init()
634 " helper for pf: %d port: %d\n", nf_conntrack_ftp_init()
H A Dnf_conntrack_core.c1059 nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum, nf_conntrack_in() argument
1083 l3proto = __nf_ct_l3proto_find(pf); nf_conntrack_in()
1094 l4proto = __nf_ct_l4proto_find(pf, protonum); nf_conntrack_in()
1101 pf, hooknum); nf_conntrack_in()
1113 ct = resolve_normal_ct(net, tmpl, skb, dataoff, pf, protonum, nf_conntrack_in()
1134 ret = l4proto->packet(ct, skb, dataoff, ctinfo, pf, hooknum, timeouts); nf_conntrack_in()
H A Dnf_conntrack_irc.c271 "for pf: %u port: %u\n", nf_conntrack_irc_init()
H A Dnfnetlink_queue_core.c371 nfmsg->nfgen_family = entry->state.pf; nfqnl_build_packet_message()
387 if (entry->state.pf == PF_BRIDGE) { nfqnl_build_packet_message()
421 if (entry->state.pf == PF_BRIDGE) { nfqnl_build_packet_message()
658 switch (entry->state.pf) { nfqnl_enqueue_packet()
H A Dnf_conntrack_proto_gre.c255 u_int8_t pf, gre_packet()
251 gre_packet(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info ctinfo, u_int8_t pf, unsigned int hooknum, unsigned int *timeouts) gre_packet() argument
/linux-4.1.27/include/net/netfilter/
H A Dnfnetlink_log.h6 u_int8_t pf,
H A Dnf_log.h37 u_int8_t pf,
53 int nf_log_register(u_int8_t pf, struct nf_logger *logger);
56 void nf_log_set(struct net *net, u_int8_t pf,
60 int nf_log_bind_pf(struct net *net, u_int8_t pf,
62 void nf_log_unbind_pf(struct net *net, u_int8_t pf);
64 int nf_logger_find_get(int pf, enum nf_log_type type);
65 void nf_logger_put(int pf, enum nf_log_type type);
66 void nf_logger_request_module(int pf, enum nf_log_type type);
74 u_int8_t pf,
84 u_int8_t pf,
105 void nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf,
H A Dnf_conntrack_l4proto.h42 u_int8_t pf,
56 u_int8_t pf, unsigned int hooknum);
H A Dnf_conntrack_core.h23 unsigned int nf_conntrack_in(struct net *net, u_int8_t pf, unsigned int hooknum,
/linux-4.1.27/include/linux/
H A Dnetfilter.h53 u_int8_t pf; member in struct:nf_hook_state
62 int thresh, u_int8_t pf, nf_hook_state_init()
70 p->pf = pf; nf_hook_state_init()
88 u_int8_t pf; member in struct:nf_hook_ops
97 u_int8_t pf; member in struct:nf_sockopt_ops
134 static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook) nf_hooks_active() argument
136 if (__builtin_constant_p(pf) && nf_hooks_active()
138 return static_key_false(&nf_hooks_needed[pf][hook]); nf_hooks_active()
140 return !list_empty(&nf_hooks[pf][hook]); nf_hooks_active()
143 static inline bool nf_hooks_active(u_int8_t pf, unsigned int hook) nf_hooks_active() argument
145 return !list_empty(&nf_hooks[pf][hook]); nf_hooks_active()
158 static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook, nf_hook_thresh() argument
166 if (nf_hooks_active(pf, hook)) { nf_hook_thresh()
169 nf_hook_state_init(&state, hook, thresh, pf, nf_hook_thresh()
176 static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sock *sk, nf_hook() argument
181 return nf_hook_thresh(pf, hook, sk, skb, indev, outdev, okfn, INT_MIN); nf_hook()
202 NF_HOOK_THRESH(uint8_t pf, unsigned int hook, struct sock *sk, NF_HOOK_THRESH() argument
207 int ret = nf_hook_thresh(pf, hook, sk, skb, in, out, okfn, thresh); NF_HOOK_THRESH()
214 NF_HOOK_COND(uint8_t pf, unsigned int hook, struct sock *sk, NF_HOOK_COND() argument
221 ((ret = nf_hook_thresh(pf, hook, sk, skb, in, out, okfn, INT_MIN)) == 1)) NF_HOOK_COND()
227 NF_HOOK(uint8_t pf, unsigned int hook, struct sock *sk, struct sk_buff *skb, NF_HOOK() argument
231 return NF_HOOK_THRESH(pf, hook, sk, skb, in, out, okfn, INT_MIN); NF_HOOK()
235 int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
237 int nf_getsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
240 int compat_nf_setsockopt(struct sock *sk, u_int8_t pf, int optval,
242 int compat_nf_getsockopt(struct sock *sk, u_int8_t pf, int optval,
331 #define NF_HOOK(pf, hook, sk, skb, indev, outdev, okfn) (okfn)(sk, skb)
332 #define NF_HOOK_COND(pf, hook, sk, skb, indev, outdev, okfn, cond) (okfn)(sk, skb) nf_hook_thresh()
333 static inline int nf_hook_thresh(u_int8_t pf, unsigned int hook, nf_hook_thresh() argument
342 static inline int nf_hook(u_int8_t pf, unsigned int hook, struct sock *sk, nf_hook() argument
60 nf_hook_state_init(struct nf_hook_state *p, unsigned int hook, int thresh, u_int8_t pf, struct net_device *indev, struct net_device *outdev, struct sock *sk, int (*okfn)(struct sock *, struct sk_buff *)) nf_hook_state_init() argument
H A Dnet.h287 MODULE_ALIAS("net-pf-" __stringify(proto))
289 #define MODULE_ALIAS_NET_PF_PROTO(pf, proto) \
290 MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto))
292 #define MODULE_ALIAS_NET_PF_PROTO_TYPE(pf, proto, type) \
293 MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto) \
296 #define MODULE_ALIAS_NET_PF_PROTO_NAME(pf, proto, name) \
297 MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto) \
H A Dparport.h294 - pf is the preemption function (may be NULL for no callback)
300 int (*pf)(void *), void (*kf)(void *),
/linux-4.1.27/fs/affs/
H A Dsymlink.c37 char *pf; affs_symlink_readpage() local
39 pf = sbi->s_prefix ? sbi->s_prefix : "/"; affs_symlink_readpage()
40 while (i < 1023 && (c = pf[i])) affs_symlink_readpage()
/linux-4.1.27/drivers/iio/
H A Dindustrialio-trigger.c198 struct iio_poll_func *pf) iio_trigger_attach_poll_func()
205 __module_get(pf->indio_dev->info->driver_module); iio_trigger_attach_poll_func()
206 pf->irq = iio_trigger_get_irq(trig); iio_trigger_attach_poll_func()
207 ret = request_threaded_irq(pf->irq, pf->h, pf->thread, iio_trigger_attach_poll_func()
208 pf->type, pf->name, iio_trigger_attach_poll_func()
209 pf); iio_trigger_attach_poll_func()
211 module_put(pf->indio_dev->info->driver_module); iio_trigger_attach_poll_func()
218 module_put(pf->indio_dev->info->driver_module); iio_trigger_attach_poll_func()
225 struct iio_poll_func *pf) iio_trigger_detach_poll_func()
237 iio_trigger_put_irq(trig, pf->irq); iio_trigger_detach_poll_func()
238 free_irq(pf->irq, pf); iio_trigger_detach_poll_func()
239 module_put(pf->indio_dev->info->driver_module); iio_trigger_detach_poll_func()
246 struct iio_poll_func *pf = p; iio_pollfunc_store_time() local
247 pf->timestamp = iio_get_time_ns(); iio_pollfunc_store_time()
261 struct iio_poll_func *pf; iio_alloc_pollfunc() local
263 pf = kmalloc(sizeof *pf, GFP_KERNEL); iio_alloc_pollfunc()
264 if (pf == NULL) iio_alloc_pollfunc()
267 pf->name = kvasprintf(GFP_KERNEL, fmt, vargs); iio_alloc_pollfunc()
269 if (pf->name == NULL) { iio_alloc_pollfunc()
270 kfree(pf); iio_alloc_pollfunc()
273 pf->h = h; iio_alloc_pollfunc()
274 pf->thread = thread; iio_alloc_pollfunc()
275 pf->type = type; iio_alloc_pollfunc()
276 pf->indio_dev = indio_dev; iio_alloc_pollfunc()
278 return pf; iio_alloc_pollfunc()
282 void iio_dealloc_pollfunc(struct iio_poll_func *pf) iio_dealloc_pollfunc() argument
284 kfree(pf->name); iio_dealloc_pollfunc()
285 kfree(pf); iio_dealloc_pollfunc()
197 iio_trigger_attach_poll_func(struct iio_trigger *trig, struct iio_poll_func *pf) iio_trigger_attach_poll_func() argument
224 iio_trigger_detach_poll_func(struct iio_trigger *trig, struct iio_poll_func *pf) iio_trigger_detach_poll_func() argument
/linux-4.1.27/arch/mips/include/asm/mach-ip28/
H A Dspaces.h9 * 2004 pf
H A Dcpu-feature-overrides.h7 * 6/2004 pf
/linux-4.1.27/arch/x86/include/asm/trace/
H A Dexceptions.h31 TP_printk("address=%pf ip=%pf error_code=0x%lx",
/linux-4.1.27/include/media/
H A Dsh_mobile_ceu.h26 unsigned int *asd_sizes; /* 0-terminated array pf asd group sizes */
/linux-4.1.27/drivers/infiniband/hw/usnic/
H A Dusnic_ib_main.c64 return scnprintf(buf, buf_sz, "PF: %s ", vf->pf->ib_dev.name); usnic_ib_dump_vf_hdr()
502 struct usnic_ib_dev *pf; usnic_ib_pci_probe() local
535 pf = usnic_ib_discover_pf(vf->vnic); usnic_ib_pci_probe()
536 if (IS_ERR_OR_NULL(pf)) { usnic_ib_pci_probe()
537 usnic_err("Failed to discover pf of vnic %s with err%ld\n", usnic_ib_pci_probe()
538 pci_name(pdev), PTR_ERR(pf)); usnic_ib_pci_probe()
539 err = pf ? PTR_ERR(pf) : -EFAULT; usnic_ib_pci_probe()
543 vf->pf = pf; usnic_ib_pci_probe()
545 mutex_lock(&pf->usdev_lock); usnic_ib_pci_probe()
546 list_add_tail(&vf->link, &pf->vf_dev_list); usnic_ib_pci_probe()
554 pf->vf_res_cnt[res_type] = usnic_vnic_res_cnt(vf->vnic, usnic_ib_pci_probe()
558 mutex_unlock(&pf->usdev_lock); usnic_ib_pci_probe()
561 pf->ib_dev.name); usnic_ib_pci_probe()
581 struct usnic_ib_dev *pf = vf->pf; usnic_ib_pci_remove() local
583 mutex_lock(&pf->usdev_lock); usnic_ib_pci_remove()
585 mutex_unlock(&pf->usdev_lock); usnic_ib_pci_remove()
587 kref_put(&pf->vf_cnt, usnic_ib_undiscover_pf); usnic_ib_pci_remove()
H A Dusnic_ib_verbs.c55 us_ibdev = qp_grp->vf->pf; usnic_ib_fill_create_qp_resp()
366 mutex_lock(&vf->pf->usdev_lock); usnic_ib_query_qp()
381 mutex_unlock(&vf->pf->usdev_lock); usnic_ib_query_qp()
385 mutex_unlock(&vf->pf->usdev_lock); usnic_ib_query_qp()
533 mutex_lock(&vf->pf->usdev_lock); usnic_ib_destroy_qp()
541 mutex_unlock(&vf->pf->usdev_lock); usnic_ib_destroy_qp()
556 mutex_lock(&qp_grp->vf->pf->usdev_lock); usnic_ib_modify_qp()
569 mutex_unlock(&qp_grp->vf->pf->usdev_lock); usnic_ib_modify_qp()
H A Dusnic_ib.h73 struct usnic_ib_dev *pf; member in struct:usnic_ib_vf
H A Dusnic_ib_sysfs.c322 us_ibdev = qp_grp->vf->pf; usnic_ib_sysfs_qpn_add()
337 us_ibdev = qp_grp->vf->pf; usnic_ib_sysfs_qpn_remove()
H A Dusnic_fwd.c273 usnic_dbg("Filter %u already deleted for VF Idx %u pf: %s status: %d", usnic_fwd_dealloc_flow()
/linux-4.1.27/arch/x86/kernel/cpu/microcode/
H A Dintel_lib.c36 unsigned int sig, unsigned int pf) update_match_cpu()
38 return (!sigmatch(sig, csig, pf, cpf)) ? 0 : 1; update_match_cpu()
112 - (mc_header->sig + mc_header->pf + mc_header->cksum) microcode_sanity_check()
113 + (ext_sig->sig + ext_sig->pf + ext_sig->cksum); microcode_sanity_check()
135 if (update_match_cpu(csig, cpf, mc_header->sig, mc_header->pf)) get_matching_sig()
147 if (update_match_cpu(csig, cpf, ext_sig->sig, ext_sig->pf)) get_matching_sig()
35 update_match_cpu(unsigned int csig, unsigned int cpf, unsigned int sig, unsigned int pf) update_match_cpu() argument
H A Dintel_early.c63 uci->cpu_sig.pf, load_microcode_early()
249 unsigned int sig, pf, new_rev; _save_mc() local
257 pf = mc_saved_hdr->pf; _save_mc()
260 if (!get_matching_sig(sig, pf, new_rev, ucode_ptr)) _save_mc()
359 csig.pf = 0; collect_cpu_info_early()
375 csig.pf = 1 << ((val[1] >> 18) & 7); collect_cpu_info_early()
397 unsigned int sig, pf, rev, total_size, data_size, date; show_saved_mc() local
409 pf = uci.cpu_sig.pf; show_saved_mc()
411 pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig, pf, rev); show_saved_mc()
422 pf = mc_saved_header->pf; show_saved_mc()
428 pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, toal size=0x%x, date = %04x-%02x-%02x\n", show_saved_mc()
429 i, sig, pf, rev, total_size, show_saved_mc()
444 pf = ext_sig->pf; show_saved_mc()
446 pr_debug("\tExtended[%d]: sig=0x%x, pf=0x%x\n", show_saved_mc()
447 j, sig, pf); show_saved_mc()
H A Dintel.c70 * Fix sigmatch() macro to handle old CPUs with pf == 0.
102 csig->pf = 1 << ((val[1] >> 18) & 7); collect_cpu_info()
106 pr_info("CPU%d sig=0x%x, pf=0x%x, revision=0x%x\n", collect_cpu_info()
107 cpu_num, csig->sig, csig->pf, csig->rev); collect_cpu_info()
124 cpf = cpu_sig.pf; get_matching_mc()
228 cpf = uci->cpu_sig.pf; generic_load_microcode()
H A Dcore.c70 * Fix sigmatch() macro to handle old CPUs with pf == 0.
350 return sprintf(buf, "0x%x\n", uci->cpu_sig.pf); pf_show()
/linux-4.1.27/drivers/iio/common/st_sensors/
H A Dst_sensors_buffer.c108 struct iio_poll_func *pf = p; st_sensors_trigger_handler() local
109 struct iio_dev *indio_dev = pf->indio_dev; st_sensors_trigger_handler()
117 pf->timestamp); st_sensors_trigger_handler()
/linux-4.1.27/drivers/iio/imu/
H A Dadis16400_buffer.c63 struct iio_poll_func *pf = p; adis16400_trigger_handler() local
64 struct iio_dev *indio_dev = pf->indio_dev; adis16400_trigger_handler()
95 pf->timestamp); adis16400_trigger_handler()
H A Dadis_buffer.c78 struct iio_poll_func *pf = p; adis_trigger_handler() local
79 struct iio_dev *indio_dev = pf->indio_dev; adis_trigger_handler()
106 pf->timestamp); adis_trigger_handler()
/linux-4.1.27/net/bridge/netfilter/
H A Debtable_filter.c79 .pf = NFPROTO_BRIDGE,
86 .pf = NFPROTO_BRIDGE,
93 .pf = NFPROTO_BRIDGE,
H A Debtable_nat.c79 .pf = NFPROTO_BRIDGE,
86 .pf = NFPROTO_BRIDGE,
93 .pf = NFPROTO_BRIDGE,
H A Dnf_log_bridge.c19 static void nf_log_bridge_packet(struct net *net, u_int8_t pf, nf_log_bridge_packet() argument
H A Debt_log.c75 ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum, ebt_log_packet() argument
/linux-4.1.27/include/linux/netfilter/ipset/
H A Dip_set_getport.h18 extern bool ip_set_get_ip_port(const struct sk_buff *skb, u8 pf, bool src,
/linux-4.1.27/net/ipv4/netfilter/
H A Diptable_nat.c74 .pf = NFPROTO_IPV4,
82 .pf = NFPROTO_IPV4,
90 .pf = NFPROTO_IPV4,
98 .pf = NFPROTO_IPV4,
H A Dnf_defrag_ipv4.c98 .pf = NFPROTO_IPV4,
105 .pf = NFPROTO_IPV4,
H A Dnf_log_arp.c80 static void nf_log_arp_packet(struct net *net, u_int8_t pf, nf_log_arp_packet() argument
98 nf_log_dump_packet_common(m, pf, hooknum, skb, in, out, loginfo, nf_log_arp_packet()
H A Dnf_conntrack_l3proto_ipv4.c170 .pf = NFPROTO_IPV4,
177 .pf = NFPROTO_IPV4,
184 .pf = NFPROTO_IPV4,
191 .pf = NFPROTO_IPV4,
198 .pf = NFPROTO_IPV4,
205 .pf = NFPROTO_IPV4,
350 .pf = PF_INET,
H A Dnf_log_ipv4.c312 static void nf_log_ip_packet(struct net *net, u_int8_t pf, nf_log_ip_packet() argument
330 nf_log_dump_packet_common(m, pf, hooknum, skb, in, nf_log_ip_packet()
H A Dipt_SYNPROXY.c436 .pf = NFPROTO_IPV4,
443 .pf = NFPROTO_IPV4,
H A Dnf_conntrack_proto_icmp.c94 u_int8_t pf, icmp_packet()
182 enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum) icmp_error()
90 icmp_packet(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info ctinfo, u_int8_t pf, unsigned int hooknum, unsigned int *timeout) icmp_packet() argument
180 icmp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum) icmp_error() argument
/linux-4.1.27/net/ipv6/netfilter/
H A Dip6table_nat.c76 .pf = NFPROTO_IPV6,
84 .pf = NFPROTO_IPV6,
92 .pf = NFPROTO_IPV6,
100 .pf = NFPROTO_IPV6,
H A Dnf_defrag_ipv6_hooks.c89 .pf = NFPROTO_IPV6,
96 .pf = NFPROTO_IPV6,
H A Dnf_conntrack_l3proto_ipv6.c191 .pf = NFPROTO_IPV6,
198 .pf = NFPROTO_IPV6,
205 .pf = NFPROTO_IPV6,
212 .pf = NFPROTO_IPV6,
219 .pf = NFPROTO_IPV6,
226 .pf = NFPROTO_IPV6,
338 .pf = NFPROTO_IPV6,
H A Dnf_log_ipv6.c344 static void nf_log_ip6_packet(struct net *net, u_int8_t pf, nf_log_ip6_packet() argument
362 nf_log_dump_packet_common(m, pf, hooknum, skb, in, out, nf_log_ip6_packet()
H A Dnf_conntrack_proto_icmpv6.c106 u_int8_t pf, icmpv6_packet()
198 enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum) icmpv6_error()
102 icmpv6_packet(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info ctinfo, u_int8_t pf, unsigned int hooknum, unsigned int *timeout) icmpv6_packet() argument
196 icmpv6_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum) icmpv6_error() argument
H A Dip6t_SYNPROXY.c459 .pf = NFPROTO_IPV6,
466 .pf = NFPROTO_IPV6,
/linux-4.1.27/net/sunrpc/auth_gss/
H A Dgss_mech_switch.c59 struct pf_desc *pf; gss_mech_free() local
63 pf = &gm->gm_pfs[i]; gss_mech_free()
64 kfree(pf->auth_domain_name); gss_mech_free()
65 pf->auth_domain_name = NULL; gss_mech_free()
86 struct pf_desc *pf; gss_mech_svc_setup() local
90 pf = &gm->gm_pfs[i]; gss_mech_svc_setup()
91 pf->auth_domain_name = make_auth_domain_name(pf->name); gss_mech_svc_setup()
93 if (pf->auth_domain_name == NULL) gss_mech_svc_setup()
95 status = svcauth_gss_register_pseudoflavor(pf->pseudoflavor, gss_mech_svc_setup()
96 pf->auth_domain_name); gss_mech_svc_setup()
/linux-4.1.27/security/smack/
H A Dsmack_netfilter.c61 .pf = NFPROTO_IPV4,
69 .pf = NFPROTO_IPV6,
/linux-4.1.27/drivers/iio/gyro/
H A Ditg3200_buffer.c49 struct iio_poll_func *pf = p; itg3200_trigger_handler() local
50 struct iio_dev *indio_dev = pf->indio_dev; itg3200_trigger_handler()
58 iio_push_to_buffers_with_timestamp(indio_dev, buf, pf->timestamp); itg3200_trigger_handler()
/linux-4.1.27/drivers/staging/iio/meter/
H A Dade7758_ring.c62 struct iio_poll_func *pf = p; ade7758_trigger_handler() local
63 struct iio_dev *indio_dev = pf->indio_dev; ade7758_trigger_handler()
72 iio_push_to_buffers_with_timestamp(indio_dev, dat64, pf->timestamp); ade7758_trigger_handler()
/linux-4.1.27/drivers/iio/imu/inv_mpu6050/
H A Dinv_mpu_ring.c105 struct iio_poll_func *pf = p; inv_mpu6050_irq_handler() local
106 struct iio_dev *indio_dev = pf->indio_dev; inv_mpu6050_irq_handler()
122 struct iio_poll_func *pf = p; inv_mpu6050_read_fifo() local
123 struct iio_dev *indio_dev = pf->indio_dev; inv_mpu6050_read_fifo()
/linux-4.1.27/arch/arm/include/asm/
H A Dglue-pf.h2 * arch/arm/include/asm/glue-pf.h
/linux-4.1.27/drivers/staging/iio/adc/
H A Dad7606_ring.c27 struct iio_poll_func *pf = p; ad7606_trigger_handler_th_bh() local
28 struct ad7606_state *st = iio_priv(pf->indio_dev); ad7606_trigger_handler_th_bh()
/linux-4.1.27/arch/x86/include/asm/
H A Dmicrocode_intel.h13 unsigned int pf; member in struct:microcode_header_intel
27 unsigned int pf; member in struct:extended_signature
H A Dmicrocode.h21 unsigned int pf; member in struct:cpu_signature
H A Dxor.h63 #define BLK64(pf, op, i) \
64 pf(i) \
/linux-4.1.27/include/trace/events/
H A Dworkqueue.h60 TP_printk("work struct=%p function=%pf workqueue=%p req_cpu=%u cpu=%u",
102 TP_printk("work struct %p: function %pf", __entry->work, __entry->function)
H A Dtimer.h64 TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld]",
93 TP_printk("timer=%p function=%pf now=%lu", __entry->timer, __entry->function,__entry->now)
180 TP_printk("hrtimer=%p function=%pf expires=%llu softexpires=%llu",
214 TP_printk("hrtimer=%p function=%pf now=%llu", __entry->hrtimer, __entry->function,
/linux-4.1.27/drivers/net/ppp/
H A Dppp_generic.c91 #define PF_TO_X(pf, X) container_of(pf, X, file)
93 #define PF_TO_PPP(pf) PF_TO_X(pf, struct ppp)
94 #define PF_TO_CHANNEL(pf) PF_TO_X(pf, struct channel)
248 static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf,
273 static void init_ppp_file(struct ppp_file *pf, int kind);
388 struct ppp_file *pf = file->private_data; ppp_release() local
391 if (pf) { ppp_release()
393 if (pf->kind == INTERFACE) { ppp_release()
394 ppp = PF_TO_PPP(pf); ppp_release()
398 if (atomic_dec_and_test(&pf->refcnt)) { ppp_release()
399 switch (pf->kind) { ppp_release()
401 ppp_destroy_interface(PF_TO_PPP(pf)); ppp_release()
404 ppp_destroy_channel(PF_TO_CHANNEL(pf)); ppp_release()
415 struct ppp_file *pf = file->private_data; ppp_read() local
424 if (!pf) ppp_read()
426 add_wait_queue(&pf->rwait, &wait); ppp_read()
429 skb = skb_dequeue(&pf->rq); ppp_read()
433 if (pf->dead) ppp_read()
435 if (pf->kind == INTERFACE) { ppp_read()
441 struct ppp *ppp = PF_TO_PPP(pf); ppp_read()
455 remove_wait_queue(&pf->rwait, &wait); ppp_read()
480 struct ppp_file *pf = file->private_data; ppp_write() local
484 if (!pf) ppp_write()
487 skb = alloc_skb(count + pf->hdrlen, GFP_KERNEL); ppp_write()
490 skb_reserve(skb, pf->hdrlen); ppp_write()
497 skb_queue_tail(&pf->xq, skb); ppp_write()
499 switch (pf->kind) { ppp_write()
501 ppp_xmit_process(PF_TO_PPP(pf)); ppp_write()
504 ppp_channel_push(PF_TO_CHANNEL(pf)); ppp_write()
517 struct ppp_file *pf = file->private_data; ppp_poll() local
520 if (!pf) ppp_poll()
522 poll_wait(file, &pf->rwait, wait); ppp_poll()
524 if (skb_peek(&pf->rq)) ppp_poll()
526 if (pf->dead) ppp_poll()
528 else if (pf->kind == INTERFACE) { ppp_poll()
530 struct ppp *ppp = PF_TO_PPP(pf); ppp_poll()
566 struct ppp_file *pf = file->private_data; ppp_ioctl() local
576 if (!pf) ppp_ioctl()
578 pf, file, cmd, arg); ppp_ioctl()
594 if (pf->kind == INTERFACE) { ppp_ioctl()
595 ppp = PF_TO_PPP(pf); ppp_ioctl()
609 if (pf->kind == CHANNEL) { ppp_ioctl()
614 pch = PF_TO_CHANNEL(pf); ppp_ioctl()
639 if (pf->kind != INTERFACE) { ppp_ioctl()
646 ppp = PF_TO_PPP(pf); ppp_ioctl()
824 static int ppp_unattached_ioctl(struct net *net, struct ppp_file *pf, ppp_unattached_ioctl() argument
2767 init_ppp_file(struct ppp_file *pf, int kind) init_ppp_file() argument
2769 pf->kind = kind; init_ppp_file()
2770 skb_queue_head_init(&pf->xq); init_ppp_file()
2771 skb_queue_head_init(&pf->rq); init_ppp_file()
2772 atomic_set(&pf->refcnt, 1); init_ppp_file()
2773 init_waitqueue_head(&pf->rwait); init_ppp_file()
/linux-4.1.27/tools/perf/bench/
H A Dmem-memcpy.c118 #define pf (no_prefault ? 0 : 1) macro
163 result_cycle[pf] = info->do_cycle(r, len, only_prefault); __bench_mem_routine()
165 result_bps[pf] = info->do_gettimeofday(r, len, only_prefault); __bench_mem_routine()
187 (double)result_cycle[pf] __bench_mem_routine()
190 print_bps(result_bps[pf]); __bench_mem_routine()
207 printf("%lf\n", (double)result_cycle[pf] __bench_mem_routine()
210 printf("%lf\n", result_bps[pf]); __bench_mem_routine()
/linux-4.1.27/drivers/iio/proximity/
H A Das3935.c201 struct iio_poll_func *pf = private; as3935_trigger_handler() local
202 struct iio_dev *indio_dev = pf->indio_dev; as3935_trigger_handler()
212 iio_push_to_buffers_with_timestamp(indio_dev, &val, pf->timestamp); as3935_trigger_handler()
349 "ams,tuning-capacitor-pf", &st->tune_cap); as3935_probe()
353 "no tuning-capacitor-pf set, defaulting to %d", as3935_probe()
359 "wrong tuning-capacitor-pf setting of %d\n", as3935_probe()
H A Dsx9500.c490 struct iio_poll_func *pf = private; sx9500_trigger_handler() local
491 struct iio_dev *indio_dev = pf->indio_dev; sx9500_trigger_handler()
/linux-4.1.27/drivers/net/ethernet/broadcom/bnx2x/
H A Dbnx2x_vfpf.h80 /* vf pf channel tlvs */
81 /* general tlv header (used for both vf->pf request and pf->vf response) */
87 /* header of first vf->pf tlv carries the offset used to calculate response
95 /* header of pf->vf tlvs, carries the status of handling the request */
181 /* in case of status NO_RESOURCE in message hdr, pf will fill
H A Dbnx2x_vfpf.c43 DP(BNX2X_MSG_IOV, "preparing to send %d tlv over vf pf channel\n", bnx2x_vfpf_prep()
60 DP(BNX2X_MSG_IOV, "done sending [%d] tlv over vf pf channel\n", bnx2x_vfpf_finalize()
146 BNX2X_ERR("done was non zero before message to pf was sent\n"); bnx2x_send_msg2pf()
205 BNX2X_ERR("Invalid ME register value: 0x%08x\n. Is pf driver up?", bnx2x_get_vf_id()
249 /* pf 2 vf bulletin board address */ bnx2x_vfpf_acquire()
520 BNX2X_ERR("Sending CLOSE failed: pf response was %d\n", bnx2x_vfpf_close_vf()
583 /* ask the pf to open a queue for the vf */ bnx2x_vfpf_setup_q()
708 /* request pf to add a mac for the vf */ bnx2x_vfpf_config_mac()
741 /* send message to pf */ bnx2x_vfpf_config_mac()
744 BNX2X_ERR("failed to send message to pf. rc was %d\n", rc); bnx2x_vfpf_config_mac()
762 /* send message to pf */ bnx2x_vfpf_config_mac()
781 /* request pf to config rss table for vf queues*/ bnx2x_vfpf_config_rss()
828 /* send message to pf */ bnx2x_vfpf_config_rss()
831 BNX2X_ERR("failed to send message to pf. rc was %d\n", rc); bnx2x_vfpf_config_rss()
980 /* enable vf_pf mailbox (aka vf-pf-channel) */ bnx2x_vf_enable_mbx()
2054 "vf pf event received: vfid %d, address_hi %x, address lo %x", bnx2x_vf_mbx_schedule()
2078 /* handle new vf-pf messages */ bnx2x_vf_mbx()
2103 "Handling vf pf event vfid %d, address: [%x:%x], resp_offset 0x%x\n", for_each_vf()
H A Dbnx2x_ethtool.c1165 /* Per pf misc lock must be acquired before the per port mcp lock. Otherwise,
1169 * pf A takes the port lock.
1170 * pf B succeeds in taking the same lock since they are from the same port.
1171 * pf A takes the per pf misc lock. Performs eeprom access.
1172 * pf A finishes. Unlocks the per pf misc lock.
1174 * pf A unlocks the per port lock, while pf B is still working (!).
1175 * mcp takes the per port lock and corrupts pf B's access (and/or has it's own
1176 * access corrupted by pf B)
H A Dbnx2x_sriov.h240 /* VF mail box (aka vf-pf channel) */
242 /* a container for the bi-directional vf<-->pf messages.
/linux-4.1.27/net/bridge/
H A Dbr_netfilter.c730 u_int8_t pf; br_nf_forward_ip() local
749 pf = NFPROTO_IPV4; br_nf_forward_ip()
751 pf = NFPROTO_IPV6; br_nf_forward_ip()
762 if (pf == NFPROTO_IPV4) { br_nf_forward_ip()
772 if (pf == NFPROTO_IPV4) br_nf_forward_ip()
777 NF_HOOK(pf, NF_INET_FORWARD, NULL, skb, br_nf_forward_ip()
890 u_int8_t pf; br_nf_post_routing() local
904 pf = NFPROTO_IPV4; br_nf_post_routing()
906 pf = NFPROTO_IPV6; br_nf_post_routing()
918 if (pf == NFPROTO_IPV4) br_nf_post_routing()
923 NF_HOOK(pf, NF_INET_POST_ROUTING, state->sk, skb, br_nf_post_routing()
994 .pf = NFPROTO_BRIDGE,
1001 .pf = NFPROTO_BRIDGE,
1008 .pf = NFPROTO_BRIDGE,
1015 .pf = NFPROTO_BRIDGE,
1022 .pf = NFPROTO_BRIDGE,
1029 .pf = NFPROTO_IPV4,
1036 .pf = NFPROTO_IPV6,
/linux-4.1.27/drivers/mmc/core/
H A Dquirks.c94 dev_dbg(&card->dev, "calling %pf\n", f->vendor_fixup); mmc_fixup_device()
/linux-4.1.27/include/uapi/linux/netfilter/
H A Dnfnetlink_queue.h74 __be16 pf; /* AF_xxx for PF_[UN]BIND */ member in struct:nfqnl_msg_config_cmd
/linux-4.1.27/net/sched/
H A Dcls_u32.c59 struct tc_u32_pcnt __percpu *pf; member in struct:tc_u_knode
131 __this_cpu_inc(n->pf->rcnt); u32_classify()
159 __this_cpu_inc(n->pf->kcnts[j]); u32_classify()
177 __this_cpu_inc(n->pf->rhit); u32_classify()
365 free_percpu(n->pf); u32_destroy_key()
706 * a special destroy call must be made to not free the pf memory. u32_init_knode()
708 new->pf = n->pf; u32_init_knode()
837 n->pf = __alloc_percpu(size, __alignof__(struct tc_u32_pcnt)); u32_change()
838 if (!n->pf) { u32_change()
891 free_percpu(n->pf); u32_change()
1023 struct tc_u32_pcnt *pf = per_cpu_ptr(n->pf, cpu); for_each_possible_cpu() local
1025 gpf->rcnt += pf->rcnt; for_each_possible_cpu()
1026 gpf->rhit += pf->rhit; for_each_possible_cpu()
1028 gpf->kcnts[i] += pf->kcnts[i]; for_each_possible_cpu()
/linux-4.1.27/include/linux/iio/
H A Dtrigger_consumer.h51 void iio_dealloc_pollfunc(struct iio_poll_func *pf);
/linux-4.1.27/drivers/md/
H A Ddm-thin.c227 struct pool_features pf; member in struct:pool
1060 if (len < pool->sectors_per_block && pool->pf.zero_new_blocks) { schedule_copy()
1098 if (!pool->pf.zero_new_blocks) schedule_zero()
1257 return pool->pf.error_if_no_space ? -ENOSPC : 0; should_error_unserviceable_bio()
1335 m->pass_discard = pool->pf.discard_passdown; process_discard_cell()
1356 if ((!lookup_result.shared) && pool->pf.discard_passdown) process_discard_cell()
2027 if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) do_no_space_timeout()
2096 return pool->pf.mode; get_pool_mode()
2180 if (!pool->pf.error_if_no_space && no_space_timeout) set_pool_mode()
2197 pool->pf.mode = new_mode; set_pool_mode()
2472 pool->pf = pt->adjusted_pf; bind_control_target()
2490 static void pool_features_init(struct pool_features *pf) pool_features_init() argument
2492 pf->mode = PM_WRITE; pool_features_init()
2493 pf->zero_new_blocks = true; pool_features_init()
2494 pf->discard_enabled = true; pool_features_init()
2495 pf->discard_passdown = true; pool_features_init()
2496 pf->error_if_no_space = false; pool_features_init()
2554 pool_features_init(&pool->pf); pool_create()
2716 static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf, parse_pool_features() argument
2742 pf->zero_new_blocks = false; parse_pool_features()
2745 pf->discard_enabled = false; parse_pool_features()
2748 pf->discard_passdown = false; parse_pool_features()
2751 pf->mode = PM_READ_ONLY; parse_pool_features()
2754 pf->error_if_no_space = true; parse_pool_features()
2845 struct pool_features pf; pool_ctr() local
2870 pool_features_init(&pf); pool_ctr()
2873 r = parse_pool_features(&as, &pf, ti); pool_ctr()
2877 metadata_mode = FMODE_READ | ((pf.mode == PM_READ_ONLY) ? 0 : FMODE_WRITE); pool_ctr()
2913 block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created); pool_ctr()
2925 if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) { pool_ctr()
2936 pt->adjusted_pf = pt->requested_pf = pf; pool_ctr()
2945 if (pf.discard_enabled && pf.discard_passdown) { pool_ctr()
3422 static void emit_flags(struct pool_features *pf, char *result, emit_flags() argument
3425 unsigned count = !pf->zero_new_blocks + !pf->discard_enabled + emit_flags()
3426 !pf->discard_passdown + (pf->mode == PM_READ_ONLY) + emit_flags()
3427 pf->error_if_no_space; emit_flags()
3430 if (!pf->zero_new_blocks) emit_flags()
3433 if (!pf->discard_enabled) emit_flags()
3436 if (!pf->discard_passdown) emit_flags()
3439 if (pf->mode == PM_READ_ONLY) emit_flags()
3442 if (pf->error_if_no_space) emit_flags()
3532 if (pool->pf.mode == PM_OUT_OF_DATA_SPACE) pool_status()
3534 else if (pool->pf.mode == PM_READ_ONLY) pool_status()
3539 if (!pool->pf.discard_enabled) pool_status()
3541 else if (pool->pf.discard_passdown) pool_status()
3546 if (pool->pf.error_if_no_space) pool_status()
3825 if (tc->pool->pf.discard_enabled) { thin_ctr()
/linux-4.1.27/drivers/infiniband/hw/ehca/
H A Dehca_eq.c78 &eq->pf, ehca_create_eq()
107 &eq->pf, ehca_create_eq()
H A Dehca_classes.h83 struct ehca_pfeq pf; member in struct:ehca_eq
205 struct ehca_pfqp pf; member in struct:ehca_qp
246 struct ehca_pfcq pf; member in struct:ehca_cq
H A Dehca_qp.c1012 &my_qp->pf, ehca_create_srq()
1026 &my_qp->pf, ehca_create_srq()
1040 &my_qp->pf, ehca_create_srq()
1081 my_qp->ipz_qp_handle, &my_qp->pf, prepare_sqe_rts()
1171 my_qp->ipz_qp_handle, &my_qp->pf, check_for_left_cqes()
1265 &my_qp->pf, internal_modify_qp()
1694 &my_qp->pf, internal_modify_qp()
1724 &my_qp->pf, internal_modify_qp()
1915 &my_qp->pf, ehca_query_qp()
H A Dehca_cq.c214 &my_cq->pf, ehca_create_cq()
H A Dipz_pt_fn.h190 /* struct page table for a queue, only to be used in pf */
/linux-4.1.27/drivers/staging/iio/
H A Diio_simple_dummy_buffer.c47 struct iio_poll_func *pf = p; iio_simple_dummy_trigger_h() local
48 struct iio_dev *indio_dev = pf->indio_dev; iio_simple_dummy_trigger_h()
/linux-4.1.27/net/core/
H A Dsock_diag.c168 request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, __sock_diag_rcv_msg()
190 request_module("net-pf-%d-proto-%d-type-%d", PF_NETLINK, sock_diag_rcv_msg()
H A Dnet-procfs.c286 seq_printf(seq, " %-8s %pf\n", ptype_seq_show()
/linux-4.1.27/net/netfilter/ipset/
H A Dip_set_getport.c149 ip_set_get_ip_port(const struct sk_buff *skb, u8 pf, bool src, __be16 *port) ip_set_get_ip_port() argument
154 switch (pf) { ip_set_get_ip_port()
/linux-4.1.27/include/uapi/linux/
H A Dllc.h38 LLC_OPT_P_TMR_EXP, /* pf cycle expire time (secs). */
/linux-4.1.27/drivers/staging/iio/accel/
H A Dlis3l02dq_ring.c139 struct iio_poll_func *pf = p; lis3l02dq_trigger_handler() local
140 struct iio_dev *indio_dev = pf->indio_dev; lis3l02dq_trigger_handler()
151 iio_push_to_buffers_with_timestamp(indio_dev, data, pf->timestamp); lis3l02dq_trigger_handler()
/linux-4.1.27/drivers/video/fbdev/
H A Dps3fb.c193 /* 480pf */
194 "480pf", 60, 720, 480, 37037, 58, 17, 30, 9, 63, 6,
197 /* 720pf */
198 "720pf", 60, 1280, 720, 13481, 220, 70, 19, 6, 80, 5,
205 /* 1080pf */
206 "1080pf", 60, 1920, 1080, 6741, 148, 44, 36, 4, 88, 5,
216 /* 576pf */
217 "576pf", 50, 720, 576, 37037, 70, 11, 39, 5, 63, 5,
220 /* 720pf */
221 "720pf", 50, 1280, 720, 13468, 220, 400, 19, 6, 80, 5,
228 /* 1080pf */
229 "1080pf", 50, 1920, 1080, 6734, 148, 484, 36, 4, 88, 5,
/linux-4.1.27/net/irda/
H A Dirlap_event.c177 * Calculate and set time before we will have to send back the pf bit
255 * We just received the pf bit and are at the beginning irlap_do_event()
1011 * with the pf bit, to avoid falling back on the irlap_state_xmit_p()
1243 if (!info->pf) { irlap_state_nrm_p()
1249 /* No longer waiting for pf */ irlap_state_nrm_p()
1281 if (!info->pf) { irlap_state_nrm_p()
1313 if (info->pf) { irlap_state_nrm_p()
1364 if (info->pf) { irlap_state_nrm_p()
1387 if (info->pf) { irlap_state_nrm_p()
1409 if (!info->pf) { irlap_state_nrm_p()
1523 /* Retry sending the pf bit to the secondary */ irlap_state_nrm_p()
1873 pr_debug("%s(), event=%s nr=%d, vs=%d, ns=%d, vr=%d, pf=%d\n", irlap_state_nrm_s()
1875 self->vs, info->ns, self->vr, info->pf); irlap_state_nrm_s()
1895 if (!info->pf) { irlap_state_nrm_s()
1953 if (!info->pf) { irlap_state_nrm_s()
1974 if (info->pf) { irlap_state_nrm_s()
1996 if (!info->pf) { irlap_state_nrm_s()
2022 if (!info->pf) { irlap_state_nrm_s()
2088 /* Just send back pf bit */ irlap_state_nrm_s()
2263 * with pf=1 shall restart the wd-timer and resend the rd:rsp irlap_state_sclose()
2265 if (info != NULL && info->pf) { irlap_state_sclose()
H A Dirlap_frame.c1019 * If send window > 1 then send frame with pf irlap_resend_rejected_frames()
1115 info->pf = skb->data[1] & PF_BIT; /* Final bit */ irlap_recv_i_frame()
1134 info->pf = skb->data[1] & PF_BIT; /* Final bit */ irlap_recv_ui_frame()
1164 info->pf = frame[2] & PF_BIT; /* Final bit */ irlap_recv_frmr_frame()
1321 info.pf = skb->data[1] & PF_BIT; irlap_driver_rcv()
/linux-4.1.27/drivers/regulator/
H A Dqcom_rpm-regulator.c43 struct request_member pf; /* pin function */ member in struct:rpm_reg_parts
79 .pf = { 0, 0xC0000000, 30 },
90 .pf = { 0, 0xC0000000, 30 },
102 .pf = { 0, 0x000000C0, 6 },
119 .pf = { 0, 0xF0000000, 28 },
130 .pf = { 0, 0xF0000000, 28 },
144 .pf = { 0, 0x000003C0, 6 },
/linux-4.1.27/arch/alpha/kernel/
H A Dpci_iommu.c240 DBGA("pci_dac_dma_supported %s from %pf\n", pci_dac_dma_supported()
272 DBGA2("pci_map_single: [%p,%zx] -> direct %llx from %pf\n", pci_map_single_1()
283 DBGA2("pci_map_single: [%p,%zx] -> DAC %llx from %pf\n", pci_map_single_1()
320 DBGA2("pci_map_single: [%p,%zx] np %ld -> sg %llx from %pf\n", pci_map_single_1()
387 DBGA2("pci_unmap_single: direct [%llx,%zx] from %pf\n", alpha_pci_unmap_page()
394 DBGA2("pci64_unmap_single: DAC [%llx,%zx] from %pf\n", alpha_pci_unmap_page()
426 DBGA2("pci_unmap_single: sg [%llx,%zx] np %ld from %pf\n", alpha_pci_unmap_page()
449 "get_free_pages failed from %pf\n", alpha_pci_alloc_coherent()
468 DBGA2("pci_alloc_consistent: %zx -> [%p,%llx] from %pf\n", alpha_pci_alloc_coherent()
488 DBGA2("pci_free_consistent: [%llx,%zx] from %pf\n", alpha_pci_free_coherent()
/linux-4.1.27/net/ipv6/
H A Daf_inet6.c142 * Be more specific, e.g. net-pf-10-proto-132-type-1 inet6_create()
143 * (net-pf-PF_INET6-proto-IPPROTO_SCTP-type-SOCK_STREAM) inet6_create()
146 request_module("net-pf-%d-proto-%d-type-%d", inet6_create()
149 * Fall back to generic, e.g. net-pf-10-proto-132 inet6_create()
150 * (net-pf-PF_INET6-proto-IPPROTO_SCTP) inet6_create()
153 request_module("net-pf-%d-proto-%d", inet6_create()
/linux-4.1.27/drivers/iio/adc/
H A Dad7266.c85 struct iio_poll_func *pf = p; ad7266_trigger_handler() local
86 struct iio_dev *indio_dev = pf->indio_dev; ad7266_trigger_handler()
93 pf->timestamp); ad7266_trigger_handler()
H A Dad_sigma_delta.c362 struct iio_poll_func *pf = p; ad_sd_trigger_handler() local
363 struct iio_dev *indio_dev = pf->indio_dev; ad_sd_trigger_handler()
390 iio_push_to_buffers_with_timestamp(indio_dev, data, pf->timestamp); ad_sd_trigger_handler()
H A Dad7298.c156 struct iio_poll_func *pf = p; ad7298_trigger_handler() local
157 struct iio_dev *indio_dev = pf->indio_dev; ad7298_trigger_handler()
H A Dad7476.c63 struct iio_poll_func *pf = p; ad7476_trigger_handler() local
64 struct iio_dev *indio_dev = pf->indio_dev; ad7476_trigger_handler()
H A Dad7887.c115 struct iio_poll_func *pf = p; ad7887_trigger_handler() local
116 struct iio_dev *indio_dev = pf->indio_dev; ad7887_trigger_handler()
H A Dad7923.c174 struct iio_poll_func *pf = p; ad7923_trigger_handler() local
175 struct iio_dev *indio_dev = pf->indio_dev; ad7923_trigger_handler()
H A Dcc10001_adc.c141 struct iio_poll_func *pf = p; cc10001_adc_trigger_h() local
150 indio_dev = pf->indio_dev; cc10001_adc_trigger_h()
H A Dmax1027.c376 struct iio_poll_func *pf = (struct iio_poll_func *)private; max1027_trigger_handler() local
377 struct iio_dev *indio_dev = pf->indio_dev; max1027_trigger_handler()
H A Dad799x.c182 struct iio_poll_func *pf = p; ad799x_trigger_handler() local
183 struct iio_dev *indio_dev = pf->indio_dev; ad799x_trigger_handler()
H A Dat91_adc.c245 struct iio_poll_func *pf = p; at91_adc_trigger_handler() local
246 struct iio_dev *idev = pf->indio_dev; at91_adc_trigger_handler()
257 iio_push_to_buffers_with_timestamp(idev, st->buffer, pf->timestamp); at91_adc_trigger_handler()
/linux-4.1.27/drivers/net/ethernet/intel/igb/
H A De1000_mbx.c352 /* lock the mailbox to prevent pf/vf race condition */ igb_write_mbx_pf()
393 /* lock the mailbox to prevent pf/vf race condition */ igb_read_mbx_pf()
413 * e1000_init_mbx_params_pf - set initial values for pf mailbox
416 * Initializes the hw->mbx struct to correct values for pf mailbox
/linux-4.1.27/drivers/net/ethernet/intel/ixgbe/
H A Dixgbe_mbx.c362 /* lock the mailbox to prevent pf/vf race condition */ ixgbe_write_mbx_pf()
401 /* lock the mailbox to prevent pf/vf race condition */ ixgbe_read_mbx_pf()
421 * ixgbe_init_mbx_params_pf - set initial values for pf mailbox
424 * Initializes the hw->mbx struct to correct values for pf mailbox
H A Dixgbe_debugfs.c252 * @pf: the pf that is stopping
H A Dixgbe_common.h108 void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf);
/linux-4.1.27/net/decnet/netfilter/
H A Ddn_rtmsg.c118 .pf = NFPROTO_DECNET,
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb4/
H A Dcxgb4.h773 uint32_t pf:PF_BITWIDTH; /* PCI-E PF ID */ member in struct:ch_filter_tuple
1198 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
1259 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
1262 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
1266 unsigned int pf, unsigned int vf,
1269 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
1275 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
1297 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
1300 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
1302 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
1304 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
H A Dt4_hw.c2874 u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A)); t4_intr_enable() local
2886 t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf); t4_intr_enable()
2899 u32 pf = SOURCEPF_G(t4_read_reg(adapter, PL_WHOAMI_A)); t4_intr_disable() local
2902 t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0); t4_intr_disable()
4327 * @pf: the PF
4336 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf, t4_query_params() argument
4349 FW_CMD_READ_F | FW_PARAMS_CMD_PFN_V(pf) | t4_query_params()
4366 * @pf: the PF
4377 unsigned int pf, unsigned int vf, t4_set_params_nosleep()
4390 FW_PARAMS_CMD_PFN_V(pf) | t4_set_params_nosleep()
4406 * @pf: the PF
4415 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf, t4_set_params() argument
4427 FW_CMD_WRITE_F | FW_PARAMS_CMD_PFN_V(pf) | t4_set_params()
4442 * @pf: the PF being configured
4459 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, t4_cfg_pfvf() argument
4469 FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) | t4_cfg_pfvf()
4490 * @pf: the PF owning the VI
4503 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, t4_alloc_vi()
4512 FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf)); t4_alloc_vi()
4795 * @pf: the PF owning the queues
4804 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, t4_iq_free() argument
4812 FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) | t4_iq_free()
4826 * @pf: the PF owning the queue
4832 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, t4_eth_eq_free() argument
4839 FW_CMD_EXEC_F | FW_EQ_ETH_CMD_PFN_V(pf) | t4_eth_eq_free()
4850 * @pf: the PF owning the queue
4856 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, t4_ctrl_eq_free() argument
4863 FW_CMD_EXEC_F | FW_EQ_CTRL_CMD_PFN_V(pf) | t4_ctrl_eq_free()
4874 * @pf: the PF owning the queue
4880 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf, t4_ofld_eq_free() argument
4887 FW_CMD_EXEC_F | FW_EQ_OFLD_CMD_PFN_V(pf) | t4_ofld_eq_free()
5376 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf) t4_port_init() argument
5403 ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size); for_each_port()
4376 t4_set_params_nosleep(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int nparams, const u32 *params, const u32 *val) t4_set_params_nosleep() argument
4502 t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, unsigned int *rss_size) t4_alloc_vi() argument
H A Dcxgb4_debugfs.c1571 int pf; rss_pf_config_open() local
1580 for (pf = 0; pf < 8; pf++) { rss_pf_config_open()
1581 pfconf[pf].rss_pf_map = rss_pf_map; rss_pf_config_open()
1582 pfconf[pf].rss_pf_mask = rss_pf_mask; rss_pf_config_open()
1583 t4_read_rss_pf_config(adapter, pf, &pfconf[pf].rss_pf_config); rss_pf_config_open()
H A Dl2t.c440 u32 pf = FW_VIID_PFN_G(viid); cxgb4_select_ntuple() local
444 FT_VNID_ID_PF_V(pf) | cxgb4_select_ntuple()
H A Dcxgb4_uld.h261 unsigned int pf; /* Physical Function we're using */ member in struct:cxgb4_lld_info
/linux-4.1.27/drivers/iio/accel/
H A Dmma8452.c276 struct iio_poll_func *pf = p; mma8452_trigger_handler() local
277 struct iio_dev *indio_dev = pf->indio_dev; mma8452_trigger_handler()
H A Dbma180.c654 struct iio_poll_func *pf = p; bma180_trigger_handler() local
655 struct iio_dev *indio_dev = pf->indio_dev; bma180_trigger_handler()
/linux-4.1.27/drivers/iio/light/
H A Dadjd_s311.c118 struct iio_poll_func *pf = p; adjd_s311_trigger_handler() local
119 struct iio_dev *indio_dev = pf->indio_dev; adjd_s311_trigger_handler()
H A Disl29125.c176 struct iio_poll_func *pf = p; isl29125_trigger_handler() local
177 struct iio_dev *indio_dev = pf->indio_dev; isl29125_trigger_handler()
H A Dltr501.c269 struct iio_poll_func *pf = p; ltr501_trigger_handler() local
270 struct iio_dev *indio_dev = pf->indio_dev; ltr501_trigger_handler()
H A Dtcs3414.c204 struct iio_poll_func *pf = p; tcs3414_trigger_handler() local
205 struct iio_dev *indio_dev = pf->indio_dev; tcs3414_trigger_handler()
H A Dtcs3472.c187 struct iio_poll_func *pf = p; tcs3472_trigger_handler() local
188 struct iio_dev *indio_dev = pf->indio_dev; tcs3472_trigger_handler()
H A Dgp2ap020a00f.c966 struct iio_poll_func *pf = data; gp2ap020a00f_trigger_handler() local
967 struct iio_dev *indio_dev = pf->indio_dev; gp2ap020a00f_trigger_handler()
993 pf->timestamp); gp2ap020a00f_trigger_handler()
/linux-4.1.27/drivers/iio/magnetometer/
H A Dmag3110.c245 struct iio_poll_func *pf = p; mag3110_trigger_handler() local
246 struct iio_dev *indio_dev = pf->indio_dev; mag3110_trigger_handler()
/linux-4.1.27/drivers/iio/pressure/
H A Dmpl3115.c139 struct iio_poll_func *pf = p; mpl3115_trigger_handler() local
140 struct iio_dev *indio_dev = pf->indio_dev; mpl3115_trigger_handler()
/linux-4.1.27/arch/x86/platform/efi/
H A Defi_64.c207 unsigned long pf = 0; __map_region() local
210 pf |= _PAGE_PCD; __map_region()
212 if (kernel_map_pages_in_pgd(pgd, md->phys_addr, va, md->num_pages, pf)) __map_region()
/linux-4.1.27/net/netfilter/ipvs/
H A Dip_vs_core.c1882 .pf = NFPROTO_IPV4,
1892 .pf = NFPROTO_IPV4,
1900 .pf = NFPROTO_IPV4,
1908 .pf = NFPROTO_IPV4,
1917 .pf = NFPROTO_IPV4,
1925 .pf = NFPROTO_IPV4,
1934 .pf = NFPROTO_IPV6,
1944 .pf = NFPROTO_IPV6,
1952 .pf = NFPROTO_IPV6,
1960 .pf = NFPROTO_IPV6,
1969 .pf = NFPROTO_IPV6,
1977 .pf = NFPROTO_IPV6,
H A Dip_vs_xmit.c543 static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb, ip_vs_nat_send_or_cont() argument
565 NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb, ip_vs_nat_send_or_cont()
574 static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb, ip_vs_send_or_cont() argument
587 NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb, ip_vs_send_or_cont()
/linux-4.1.27/net/sctp/
H A Dbind_addr.c325 if (opt->pf->cmp_addr(&laddr->a, addr, opt)) { sctp_bind_addr_match()
363 conflict = sp->pf->cmp_addr(&laddr->a, addr, sp); sctp_bind_addr_conflict()
424 if (opt->pf->cmp_addr(&laddr->a, addr, opt)) sctp_find_unmatch_addr()
H A Dprotocol.c1087 int sctp_register_pf(struct sctp_pf *pf, sa_family_t family) sctp_register_pf() argument
1093 sctp_pf_inet_specific = pf; sctp_register_pf()
1098 sctp_pf_inet6_specific = pf; sctp_register_pf()
1591 MODULE_ALIAS("net-pf-" __stringify(PF_INET) "-proto-132");
1592 MODULE_ALIAS("net-pf-" __stringify(PF_INET6) "-proto-132");
H A Dchunk.c199 sctp_sk(asoc->base.sk)->pf->af->net_header_len - sctp_datamsg_from_user()
/linux-4.1.27/drivers/md/bcache/
H A Dclosure.c180 seq_printf(f, "%p: %pF -> %pf p %p r %i ", debug_seq_show()
/linux-4.1.27/drivers/hwmon/
H A Dlm70.c10 * http://www.national.com/pf/LM/LM70.html
H A Dlm83.c11 * http://www.national.com/pf/LM/LM83.html
17 * http://www.national.com/pf/LM/LM82.html
H A Dlm77.c11 * http://www.national.com/pf/LM/LM77.html
H A Dlm92.c12 * http://www.national.com/pf/LM/LM92.html
/linux-4.1.27/drivers/gpu/drm/armada/
H A Darmada_fb.c110 DRM_DEBUG_DRIVER("w%u h%u pf%08x f%u p%u,%u,%u\n", armada_fb_create()
/linux-4.1.27/include/net/
H A Dlapb.h74 unsigned char pf; /* Poll/Final */ member in struct:lapb_frame
/linux-4.1.27/arch/arm/nwfpe/
H A Dfpmodule.c150 "NWFPE: %s[%d] takes exception %08x at %pf from %08lx\n", float_raise()
/linux-4.1.27/drivers/net/ethernet/intel/igbvf/
H A Dmbx.c261 /* lock the mailbox to prevent pf/vf race condition */ e1000_write_mbx_vf()
297 /* lock the mailbox to prevent pf/vf race condition */ e1000_read_mbx_vf()
/linux-4.1.27/include/net/irda/
H A Dirlap.h66 * IrTTP, and another to know that we should not send the pf bit.
105 int pf; /* Poll/final bit set */ member in struct:irlap_info
/linux-4.1.27/arch/ia64/kernel/
H A Dsal.c158 struct ia64_sal_desc_platform_feature *pf = p; sal_desc_platform_feature() local
159 sal_platform_features = pf->feature_mask; sal_desc_platform_feature()
/linux-4.1.27/drivers/parport/
H A Dshare.c458 * @pf: preemption callback
472 * The preemption callback function, @pf, is called when this
485 * does not support preemption, @pf can be %NULL.
525 int (*pf)(void *), void (*kf)(void *), parport_register_device()
539 if (!pf || !kf) { parport_register_device()
571 tmp->preempt = pf; parport_register_device()
/linux-4.1.27/arch/mips/include/asm/octeon/
H A Dcvmx-pci-defs.h315 uint32_t pf:1; member in struct:cvmx_pci_cfg04::cvmx_pci_cfg04_s
321 uint32_t pf:1;
359 uint32_t pf:1; member in struct:cvmx_pci_cfg06::cvmx_pci_cfg06_s
365 uint32_t pf:1;
402 uint32_t pf:1; member in struct:cvmx_pci_cfg08::cvmx_pci_cfg08_s
408 uint32_t pf:1;
/linux-4.1.27/net/bluetooth/rfcomm/
H A Dcore.c87 #define __ctrl(type, pf) (((type & 0xef) | (pf << 4)))
1671 static int rfcomm_recv_data(struct rfcomm_session *s, u8 dlci, int pf, struct sk_buff *skb) rfcomm_recv_data() argument
1675 BT_DBG("session %p state %ld dlci %d pf %d", s, s->state, dlci, pf); rfcomm_recv_data()
1683 if (pf && d->cfc) { rfcomm_recv_data()
/linux-4.1.27/net/ipv4/
H A Daf_inet.c294 * Be more specific, e.g. net-pf-2-proto-132-type-1 inet_create()
295 * (net-pf-PF_INET-proto-IPPROTO_SCTP-type-SOCK_STREAM) inet_create()
298 request_module("net-pf-%d-proto-%d-type-%d", inet_create()
301 * Fall back to generic, e.g. net-pf-2-proto-132 inet_create()
302 * (net-pf-PF_INET-proto-IPPROTO_SCTP) inet_create()
305 request_module("net-pf-%d-proto-%d", inet_create()
/linux-4.1.27/drivers/staging/iio/magnetometer/
H A Dhmc5843_core.c419 struct iio_poll_func *pf = p; hmc5843_trigger_handler() local
420 struct iio_dev *indio_dev = pf->indio_dev; hmc5843_trigger_handler()
/linux-4.1.27/drivers/staging/media/lirc/
H A Dlirc_parallel.c611 static int pf(void *handle) pf() function
661 pf, kf, lirc_lirc_irq_handler, 0, lirc_parallel_init()
/linux-4.1.27/kernel/irq/
H A Dspurious.c217 printk(KERN_ERR "[<%p>] %pf", action->handler, action->handler); __report_bad_irq()
219 printk(KERN_CONT " threaded [<%p>] %pf", __report_bad_irq()
/linux-4.1.27/lib/
H A Dpercpu-refcount.c150 "percpu ref (%pf) <= 0 (%ld) after switching to atomic", percpu_ref_switch_to_atomic_rcu()
303 "%s called more than once on %pf!", __func__, ref->release); percpu_ref_kill_and_confirm()
/linux-4.1.27/arch/microblaze/mm/
H A Dpgtable.c77 pr_warn("__ioremap(): phys addr "PTE_FMT" is RAM lr %pf\n", __ioremap()
/linux-4.1.27/arch/mips/sgi-ip22/
H A Dip22-mc.c7 * Copyright (C) 2004 Peter Fuerst (pf@net.alphadv.de) - IP28
/linux-4.1.27/arch/arm/kernel/
H A Dasm-offsets.c22 #include <asm/glue-pf.h>
/linux-4.1.27/net/llc/
H A Dllc_proc.c182 "dsap state retr txw rxw pf ff sf df rs cs " llc_seq_core_show()
/linux-4.1.27/drivers/net/ethernet/intel/ixgbevf/
H A Dvf.c553 /* if link status is down no point in checking to see if pf is up */ ixgbevf_check_mac_link_vf()
598 /* the pf is talking, if we timed out in the past we reinit */ ixgbevf_check_mac_link_vf()
/linux-4.1.27/drivers/net/ethernet/broadcom/
H A Dbcmsysport.h510 u32 pf; /* RO # of Received pause frame pkt */ member in struct:bcm_sysport_rx_counters
531 u32 pf; /* RO # of xmited pause frame count */ member in struct:bcm_sysport_tx_counters
/linux-4.1.27/drivers/net/ethernet/broadcom/genet/
H A Dbcmgenet.h97 u32 pf; /* RO # of Received pause frame pkt */ member in struct:bcmgenet_rx_counters
118 u32 pf; /* RO # of xmited pause frame count */ member in struct:bcmgenet_tx_counters
/linux-4.1.27/net/
H A Dsocket.c1102 const struct net_proto_family *pf; __sock_create() local
1153 request_module("net-pf-%d", family); __sock_create()
1157 pf = rcu_dereference(net_families[family]); __sock_create()
1159 if (!pf) __sock_create()
1166 if (!try_module_get(pf->owner)) __sock_create()
1172 err = pf->create(net, sock, protocol, kern); __sock_create()
1187 module_put(pf->owner); __sock_create()
1199 module_put(pf->owner); __sock_create()
/linux-4.1.27/drivers/leds/
H A Dleds-lp3944.c14 * http://www.national.com/pf/LP/LP3944.html
/linux-4.1.27/drivers/spi/
H A Dspi-lm70llp.c41 * datasheet is available at http://www.national.com/pf/LM/LM70.html
/linux-4.1.27/drivers/staging/fbtft/
H A Dfb_ra8875.c9 - Pf@nne (pf@nne-mail.de) * * ***** *
/linux-4.1.27/drivers/staging/iio/cdc/
H A Dad7152.c245 /* Values are nano relative to pf base. */
/linux-4.1.27/crypto/
H A Dcrypto_user.c560 MODULE_ALIAS("net-pf-16-proto-21");
/linux-4.1.27/fs/pstore/
H A Dinode.c110 seq_printf(s, "%d %08lx %08lx %pf <- %pF\n", pstore_ftrace_seq_show()
/linux-4.1.27/drivers/media/i2c/
H A Ds5k4ecgx.c589 const struct s5k4ecgx_pixfmt *pf; s5k4ecgx_set_fmt() local
593 pf = s5k4ecgx_try_fmt(sd, &fmt->format); s5k4ecgx_set_fmt()
609 priv->curr_pixfmt = pf; s5k4ecgx_set_fmt()
/linux-4.1.27/drivers/platform/x86/
H A Dmsi-laptop.c28 * This driver exports a few files in /sys/devices/platform/msi-laptop-pf/:
575 .name = "msi-laptop-pf",
1092 msipf_device = platform_device_alloc("msi-laptop-pf", -1); msi_init()

Completed in 4741 milliseconds

12