peer              141 drivers/acpi/acpica/aclocal.h 	struct acpi_namespace_node *peer;	/* First peer */
peer              277 drivers/acpi/acpica/exdump.c 	{ACPI_EXD_NODE, ACPI_EXD_NSOFFSET(peer), "Peer"}
peer              115 drivers/acpi/acpica/nsaccess.c 			prev_node->peer = new_node;
peer              149 drivers/acpi/acpica/nsalloc.c 		next_node = next_node->peer;
peer              156 drivers/acpi/acpica/nsalloc.c 		prev_node->peer = node->peer;
peer              162 drivers/acpi/acpica/nsalloc.c 		parent_node->child = node->peer;
peer              222 drivers/acpi/acpica/nsalloc.c 	node->peer = NULL;
peer              231 drivers/acpi/acpica/nsalloc.c 		while (child_node->peer) {
peer              232 drivers/acpi/acpica/nsalloc.c 			child_node = child_node->peer;
peer              235 drivers/acpi/acpica/nsalloc.c 		child_node->peer = node;
peer              295 drivers/acpi/acpica/nsalloc.c 		next_node = next_node->peer;
peer              121 drivers/acpi/acpica/nssearch.c 		node = node->peer;
peer               50 drivers/acpi/acpica/nswalk.c 	return (child_node->peer);
peer              108 drivers/acpi/acpica/nswalk.c 		next_node = next_node->peer;
peer              189 drivers/acpi/acpica/utinit.c 	acpi_gbl_root_node_struct.peer = NULL;
peer               50 drivers/ata/pata_amd.c 	struct ata_device *peer = ata_dev_pair(adev);
peer               67 drivers/ata/pata_amd.c 	if (peer) {
peer               69 drivers/ata/pata_amd.c 		if (peer->dma_mode) {
peer               70 drivers/ata/pata_amd.c 			ata_timing_compute(peer, peer->dma_mode, &apeer, T, UT);
peer               73 drivers/ata/pata_amd.c 		ata_timing_compute(peer, peer->pio_mode, &apeer, T, UT);
peer              204 drivers/ata/pata_atp867x.c 	struct ata_device *peer = ata_dev_pair(adev);
peer              215 drivers/ata/pata_atp867x.c 	if (peer && peer->pio_mode) {
peer              216 drivers/ata/pata_atp867x.c 		ata_timing_compute(peer, peer->pio_mode, &p, T, UT);
peer              249 drivers/ata/pata_via.c 	struct ata_device *peer = ata_dev_pair(adev);
peer              270 drivers/ata/pata_via.c 	if (peer) {
peer              271 drivers/ata/pata_via.c 		if (peer->pio_mode) {
peer              272 drivers/ata/pata_via.c 			ata_timing_compute(peer, peer->pio_mode, &p, T, UT);
peer             1965 drivers/block/drbd/drbd_main.c 		  .peer = R_UNKNOWN,
peer             3396 drivers/block/drbd/drbd_main.c 		unsigned int peer;
peer             3397 drivers/block/drbd/drbd_main.c 		peer = be32_to_cpu(buffer->la_peer_max_bio_size);
peer             3398 drivers/block/drbd/drbd_main.c 		peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE);
peer             3399 drivers/block/drbd/drbd_main.c 		device->peer_max_bio_size = peer;
peer             1382 drivers/block/drbd/drbd_nl.c 	unsigned int now, new, local, peer;
peer             1386 drivers/block/drbd/drbd_nl.c 	peer = device->peer_max_bio_size; /* Eventually last known value, from meta data */
peer             1399 drivers/block/drbd/drbd_nl.c 			peer = min(device->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
peer             1402 drivers/block/drbd/drbd_nl.c 			peer = DRBD_MAX_SIZE_H80_PACKET;
peer             1404 drivers/block/drbd/drbd_nl.c 			peer = DRBD_MAX_BIO_SIZE_P95;  /* drbd 8.3.8 onwards, before 8.4.0 */
peer             1406 drivers/block/drbd/drbd_nl.c 			peer = DRBD_MAX_BIO_SIZE;
peer             1412 drivers/block/drbd/drbd_nl.c 		if (peer > device->peer_max_bio_size)
peer             1413 drivers/block/drbd/drbd_nl.c 			device->peer_max_bio_size = peer;
peer             1415 drivers/block/drbd/drbd_nl.c 	new = min(local, peer);
peer             1990 drivers/block/drbd/drbd_nl.c 	    (device->state.role == R_PRIMARY || device->state.peer == R_PRIMARY) &&
peer             2826 drivers/block/drbd/drbd_nl.c 	if (device->state.role != device->state.peer)
peer             2881 drivers/block/drbd/drbd_nl.c 	    device->state.peer == R_SECONDARY) {
peer              278 drivers/block/drbd/drbd_proc.c 			   drbd_role_str(state.peer),
peer             3017 drivers/block/drbd/drbd_receiver.c 	if (device->state.peer != R_PRIMARY
peer             3054 drivers/block/drbd/drbd_receiver.c 	int self, peer, rv = -100;
peer             3059 drivers/block/drbd/drbd_receiver.c 	peer = device->p_uuid[UI_BITMAP] & 1;
peer             3077 drivers/block/drbd/drbd_receiver.c 		if (self == 0 && peer == 1) {
peer             3081 drivers/block/drbd/drbd_receiver.c 		if (self == 1 && peer == 0) {
peer             3087 drivers/block/drbd/drbd_receiver.c 		if (self == 0 && peer == 1) {
peer             3091 drivers/block/drbd/drbd_receiver.c 		if (self == 1 && peer == 0) {
peer             3272 drivers/block/drbd/drbd_receiver.c 	u64 self, peer;
peer             3276 drivers/block/drbd/drbd_receiver.c 	peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
peer             3279 drivers/block/drbd/drbd_receiver.c 	if (self == UUID_JUST_CREATED && peer == UUID_JUST_CREATED)
peer             3284 drivers/block/drbd/drbd_receiver.c 	     peer != UUID_JUST_CREATED)
peer             3289 drivers/block/drbd/drbd_receiver.c 	    (peer == UUID_JUST_CREATED || peer == (u64)0))
peer             3292 drivers/block/drbd/drbd_receiver.c 	if (self == peer) {
peer             3390 drivers/block/drbd/drbd_receiver.c 	peer = device->p_uuid[UI_BITMAP] & ~((u64)1);
peer             3391 drivers/block/drbd/drbd_receiver.c 	if (self == peer)
peer             3395 drivers/block/drbd/drbd_receiver.c 	peer = device->p_uuid[UI_HISTORY_START] & ~((u64)1);
peer             3396 drivers/block/drbd/drbd_receiver.c 	if (self == peer) {
peer             3400 drivers/block/drbd/drbd_receiver.c 		    peer + UUID_NEW_BM_OFFSET == (device->p_uuid[UI_BITMAP] & ~((u64)1))) {
peer             3420 drivers/block/drbd/drbd_receiver.c 		peer = device->p_uuid[i] & ~((u64)1);
peer             3421 drivers/block/drbd/drbd_receiver.c 		if (self == peer)
peer             3427 drivers/block/drbd/drbd_receiver.c 	peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
peer             3428 drivers/block/drbd/drbd_receiver.c 	if (self == peer)
peer             3433 drivers/block/drbd/drbd_receiver.c 	if (self == peer) {
peer             3457 drivers/block/drbd/drbd_receiver.c 	peer = device->p_uuid[UI_CURRENT] & ~((u64)1);
peer             3460 drivers/block/drbd/drbd_receiver.c 		if (self == peer)
peer             3466 drivers/block/drbd/drbd_receiver.c 	peer = device->p_uuid[UI_BITMAP] & ~((u64)1);
peer             3467 drivers/block/drbd/drbd_receiver.c 	if (self == peer && self != ((u64)0))
peer             3474 drivers/block/drbd/drbd_receiver.c 			peer = device->p_uuid[j] & ~((u64)1);
peer             3475 drivers/block/drbd/drbd_receiver.c 			if (self == peer)
peer             3652 drivers/block/drbd/drbd_receiver.c static enum drbd_after_sb_p convert_after_sb(enum drbd_after_sb_p peer)
peer             3655 drivers/block/drbd/drbd_receiver.c 	if (peer == ASB_DISCARD_REMOTE)
peer             3659 drivers/block/drbd/drbd_receiver.c 	if (peer == ASB_DISCARD_LOCAL)
peer             3663 drivers/block/drbd/drbd_receiver.c 	return peer;
peer             4374 drivers/block/drbd/drbd_receiver.c 	ms.peer = ps.role;
peer             4375 drivers/block/drbd/drbd_receiver.c 	ms.role = ps.peer;
peer             4592 drivers/block/drbd/drbd_receiver.c 	ns.peer = peer_state.role;
peer              353 drivers/block/drbd/drbd_state.c 	enum drbd_role peer = R_UNKNOWN;
peer              360 drivers/block/drbd/drbd_state.c 		peer = max_role(peer, device->state.peer);
peer              364 drivers/block/drbd/drbd_state.c 	return peer;
peer              721 drivers/block/drbd/drbd_state.c 	    drbd_role_str(ns.peer),
peer              754 drivers/block/drbd/drbd_state.c 	if (ns.peer != os.peer && flags & CS_DC_PEER)
peer              756 drivers/block/drbd/drbd_state.c 			       drbd_role_str(os.peer),
peer              757 drivers/block/drbd/drbd_state.c 			       drbd_role_str(ns.peer));
peer              841 drivers/block/drbd/drbd_state.c 			if (ns.peer == R_PRIMARY)
peer             1072 drivers/block/drbd/drbd_state.c 		ns.peer = R_UNKNOWN;
peer             1419 drivers/block/drbd/drbd_state.c 		    (device->state.pdsk < D_INCONSISTENT && device->state.peer == R_PRIMARY))
peer             1440 drivers/block/drbd/drbd_state.c 	    os.peer == R_SECONDARY && ns.peer == R_PRIMARY)
peer             1816 drivers/block/drbd/drbd_state.c 			if ((ns.role == R_PRIMARY || ns.peer == R_PRIMARY) &&
peer             1830 drivers/block/drbd/drbd_state.c 		if (os.peer != R_PRIMARY && ns.peer == R_PRIMARY &&
peer             1836 drivers/block/drbd/drbd_state.c 		if (os.peer == R_PRIMARY && ns.peer == R_SECONDARY)
peer             2111 drivers/block/drbd/drbd_state.c 		  .peer = R_UNKNOWN,
peer             2131 drivers/block/drbd/drbd_state.c 		if (cs.peer != os.peer)
peer             2200 drivers/block/drbd/drbd_state.c 		  .peer = R_MASK,
peer             2234 drivers/block/drbd/drbd_state.c 		ns_max.peer = max_role(ns.peer, ns_max.peer);
peer             2240 drivers/block/drbd/drbd_state.c 		ns_min.peer = min_role(ns.peer, ns_min.peer);
peer             2250 drivers/block/drbd/drbd_state.c 				.peer = R_UNKNOWN,
peer               88 drivers/block/drbd/drbd_state.h 		unsigned peer:2 ;   /* 3/4	 primary/secondary/unknown */
peer              106 drivers/block/drbd/drbd_state.h 		unsigned peer:2 ;   /* 3/4	 primary/secondary/unknown */
peer               30 drivers/clk/qcom/clk-rpm.c 		.peer = &_platform##_##_active,				      \
peer               41 drivers/clk/qcom/clk-rpm.c 		.peer = &_platform##_##_name,				      \
peer               81 drivers/clk/qcom/clk-rpm.c 		.peer = &_platform##_##_active,				      \
peer               93 drivers/clk/qcom/clk-rpm.c 		.peer = &_platform##_##_name,				      \
peer              108 drivers/clk/qcom/clk-rpm.c 		.peer = &_platform##_##_active,				      \
peer              121 drivers/clk/qcom/clk-rpm.c 		.peer = &_platform##_##_name,				      \
peer              143 drivers/clk/qcom/clk-rpm.c 	struct clk_rpm *peer;
peer              223 drivers/clk/qcom/clk-rpm.c 	struct clk_rpm *peer = r->peer;
peer              238 drivers/clk/qcom/clk-rpm.c 	if (peer->enabled)
peer              239 drivers/clk/qcom/clk-rpm.c 		to_active_sleep(peer, peer->rate,
peer              272 drivers/clk/qcom/clk-rpm.c 	struct clk_rpm *peer = r->peer;
peer              283 drivers/clk/qcom/clk-rpm.c 	if (peer->enabled)
peer              284 drivers/clk/qcom/clk-rpm.c 		to_active_sleep(peer, peer->rate, &peer_rate,
peer              373 drivers/clk/qcom/clk-rpm.c 	struct clk_rpm *peer = r->peer;
peer              387 drivers/clk/qcom/clk-rpm.c 	if (peer->enabled)
peer              388 drivers/clk/qcom/clk-rpm.c 		to_active_sleep(peer, peer->rate,
peer               63 drivers/clk/qcom/clk-rpmh.c 	struct clk_rpmh *peer;
peer               81 drivers/clk/qcom/clk-rpmh.c 		.peer = &_platform##_##_name_active,			\
peer              100 drivers/clk/qcom/clk-rpmh.c 		.peer = &_platform##_##_name,				\
peer              174 drivers/clk/qcom/clk-rpmh.c 	c->peer->last_sent_aggr_state =  c->last_sent_aggr_state;
peer              192 drivers/clk/qcom/clk-rpmh.c 	c->aggr_state = c->state | c->peer->state;
peer              193 drivers/clk/qcom/clk-rpmh.c 	c->peer->aggr_state = c->aggr_state;
peer               37 drivers/clk/qcom/clk-smd-rpm.c 		.peer = &_platform##_##_active,				      \
peer               52 drivers/clk/qcom/clk-smd-rpm.c 		.peer = &_platform##_##_name,				      \
peer               71 drivers/clk/qcom/clk-smd-rpm.c 		.peer = &_platform##_##_active,				      \
peer               87 drivers/clk/qcom/clk-smd-rpm.c 		.peer = &_platform##_##_name,				      \
peer              129 drivers/clk/qcom/clk-smd-rpm.c 	struct clk_smd_rpm *peer;
peer              223 drivers/clk/qcom/clk-smd-rpm.c 	struct clk_smd_rpm *peer = r->peer;
peer              238 drivers/clk/qcom/clk-smd-rpm.c 	if (peer->enabled)
peer              239 drivers/clk/qcom/clk-smd-rpm.c 		to_active_sleep(peer, peer->rate,
peer              272 drivers/clk/qcom/clk-smd-rpm.c 	struct clk_smd_rpm *peer = r->peer;
peer              283 drivers/clk/qcom/clk-smd-rpm.c 	if (peer->enabled)
peer              284 drivers/clk/qcom/clk-smd-rpm.c 		to_active_sleep(peer, peer->rate, &peer_rate,
peer              307 drivers/clk/qcom/clk-smd-rpm.c 	struct clk_smd_rpm *peer = r->peer;
peer              321 drivers/clk/qcom/clk-smd-rpm.c 	if (peer->enabled)
peer              322 drivers/clk/qcom/clk-smd-rpm.c 		to_active_sleep(peer, peer->rate,
peer              362 drivers/firewire/net.c 		struct fwnet_peer *peer, u16 datagram_label, unsigned dg_size,
peer              386 drivers/firewire/net.c 	list_add_tail(&new->pd_link, &peer->pd_list);
peer              398 drivers/firewire/net.c static struct fwnet_partial_datagram *fwnet_pd_find(struct fwnet_peer *peer,
peer              403 drivers/firewire/net.c 	list_for_each_entry(pd, &peer->pd_list, pd_link)
peer              423 drivers/firewire/net.c static bool fwnet_pd_update(struct fwnet_peer *peer,
peer              436 drivers/firewire/net.c 	list_move_tail(&pd->pd_link, &peer->pd_list);
peer              454 drivers/firewire/net.c 	struct fwnet_peer *peer;
peer              456 drivers/firewire/net.c 	list_for_each_entry(peer, &dev->peer_list, peer_link)
peer              457 drivers/firewire/net.c 		if (peer->guid == guid)
peer              458 drivers/firewire/net.c 			return peer;
peer              467 drivers/firewire/net.c 	struct fwnet_peer *peer;
peer              469 drivers/firewire/net.c 	list_for_each_entry(peer, &dev->peer_list, peer_link)
peer              470 drivers/firewire/net.c 		if (peer->node_id    == node_id &&
peer              471 drivers/firewire/net.c 		    peer->generation == generation)
peer              472 drivers/firewire/net.c 			return peer;
peer              578 drivers/firewire/net.c 	struct fwnet_peer *peer;
peer              637 drivers/firewire/net.c 	peer = fwnet_peer_find_by_node_id(dev, source_node_id, generation);
peer              638 drivers/firewire/net.c 	if (!peer) {
peer              643 drivers/firewire/net.c 	pd = fwnet_pd_find(peer, datagram_label);
peer              645 drivers/firewire/net.c 		while (peer->pdg_size >= FWNET_MAX_FRAGMENTS) {
peer              647 drivers/firewire/net.c 			fwnet_pd_delete(list_first_entry(&peer->pd_list,
peer              649 drivers/firewire/net.c 			peer->pdg_size--;
peer              651 drivers/firewire/net.c 		pd = fwnet_pd_new(net, peer, datagram_label,
peer              657 drivers/firewire/net.c 		peer->pdg_size++;
peer              666 drivers/firewire/net.c 			pd = fwnet_pd_new(net, peer, datagram_label,
peer              669 drivers/firewire/net.c 				peer->pdg_size--;
peer              674 drivers/firewire/net.c 			if (!fwnet_pd_update(peer, pd, buf, fg_off, len)) {
peer              681 drivers/firewire/net.c 				peer->pdg_size--;
peer              693 drivers/firewire/net.c 		peer->pdg_size--;
peer             1246 drivers/firewire/net.c 	struct fwnet_peer *peer;
peer             1304 drivers/firewire/net.c 		peer = fwnet_peer_find_by_guid(dev, be64_to_cpu(guid));
peer             1305 drivers/firewire/net.c 		if (!peer)
peer             1308 drivers/firewire/net.c 		generation         = peer->generation;
peer             1309 drivers/firewire/net.c 		dest_node          = peer->node_id;
peer             1310 drivers/firewire/net.c 		max_payload        = peer->max_payload;
peer             1311 drivers/firewire/net.c 		datagram_label_ptr = &peer->datagram_label;
peer             1316 drivers/firewire/net.c 		ptask->speed       = peer->speed;
peer             1413 drivers/firewire/net.c 	struct fwnet_peer *peer;
peer             1415 drivers/firewire/net.c 	peer = kmalloc(sizeof(*peer), GFP_KERNEL);
peer             1416 drivers/firewire/net.c 	if (!peer)
peer             1419 drivers/firewire/net.c 	dev_set_drvdata(&unit->device, peer);
peer             1421 drivers/firewire/net.c 	peer->dev = dev;
peer             1422 drivers/firewire/net.c 	peer->guid = (u64)device->config_rom[3] << 32 | device->config_rom[4];
peer             1423 drivers/firewire/net.c 	INIT_LIST_HEAD(&peer->pd_list);
peer             1424 drivers/firewire/net.c 	peer->pdg_size = 0;
peer             1425 drivers/firewire/net.c 	peer->datagram_label = 0;
peer             1426 drivers/firewire/net.c 	peer->speed = device->max_speed;
peer             1427 drivers/firewire/net.c 	peer->max_payload = fwnet_max_payload(device->max_rec, peer->speed);
peer             1429 drivers/firewire/net.c 	peer->generation = device->generation;
peer             1431 drivers/firewire/net.c 	peer->node_id = device->node_id;
peer             1434 drivers/firewire/net.c 	list_add_tail(&peer->peer_link, &dev->peer_list);
peer             1536 drivers/firewire/net.c 	struct fwnet_peer *peer = dev_get_drvdata(&unit->device);
peer             1541 drivers/firewire/net.c 	spin_lock_irq(&peer->dev->lock);
peer             1542 drivers/firewire/net.c 	peer->node_id    = device->node_id;
peer             1543 drivers/firewire/net.c 	peer->generation = generation;
peer             1544 drivers/firewire/net.c 	spin_unlock_irq(&peer->dev->lock);
peer             1547 drivers/firewire/net.c static void fwnet_remove_peer(struct fwnet_peer *peer, struct fwnet_device *dev)
peer             1552 drivers/firewire/net.c 	list_del(&peer->peer_link);
peer             1557 drivers/firewire/net.c 	list_for_each_entry_safe(pd, pd_next, &peer->pd_list, pd_link)
peer             1560 drivers/firewire/net.c 	kfree(peer);
peer             1565 drivers/firewire/net.c 	struct fwnet_peer *peer = dev_get_drvdata(&unit->device);
peer             1566 drivers/firewire/net.c 	struct fwnet_device *dev = peer->dev;
peer             1574 drivers/firewire/net.c 	fwnet_remove_peer(peer, dev);
peer               73 drivers/firmware/tegra/ivc.c 	if (!ivc->peer)
peer               76 drivers/firmware/tegra/ivc.c 	dma_sync_single_for_cpu(ivc->peer, phys, TEGRA_IVC_ALIGN,
peer               82 drivers/firmware/tegra/ivc.c 	if (!ivc->peer)
peer               85 drivers/firmware/tegra/ivc.c 	dma_sync_single_for_device(ivc->peer, phys, TEGRA_IVC_ALIGN,
peer              244 drivers/firmware/tegra/ivc.c 	if (!ivc->peer || WARN_ON(frame >= ivc->num_frames))
peer              249 drivers/firmware/tegra/ivc.c 	dma_sync_single_for_cpu(ivc->peer, phys, size, DMA_FROM_DEVICE);
peer              258 drivers/firmware/tegra/ivc.c 	if (!ivc->peer || WARN_ON(frame >= ivc->num_frames))
peer              263 drivers/firmware/tegra/ivc.c 	dma_sync_single_for_device(ivc->peer, phys, size, DMA_TO_DEVICE);
peer              612 drivers/firmware/tegra/ivc.c int tegra_ivc_init(struct tegra_ivc *ivc, struct device *peer, void *rx,
peer              638 drivers/firmware/tegra/ivc.c 	if (peer) {
peer              639 drivers/firmware/tegra/ivc.c 		ivc->rx.phys = dma_map_single(peer, rx, queue_size,
peer              641 drivers/firmware/tegra/ivc.c 		if (dma_mapping_error(peer, ivc->rx.phys))
peer              644 drivers/firmware/tegra/ivc.c 		ivc->tx.phys = dma_map_single(peer, tx, queue_size,
peer              646 drivers/firmware/tegra/ivc.c 		if (dma_mapping_error(peer, ivc->tx.phys)) {
peer              647 drivers/firmware/tegra/ivc.c 			dma_unmap_single(peer, ivc->rx.phys, queue_size,
peer              658 drivers/firmware/tegra/ivc.c 	ivc->peer = peer;
peer              677 drivers/firmware/tegra/ivc.c 	if (ivc->peer) {
peer              681 drivers/firmware/tegra/ivc.c 		dma_unmap_single(ivc->peer, ivc->rx.phys, size,
peer              683 drivers/firmware/tegra/ivc.c 		dma_unmap_single(ivc->peer, ivc->tx.phys, size,
peer             1379 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	struct kfd_dev *dev, *peer;
peer             1426 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 		peer = kfd_device_by_id(devices_arr[i]);
peer             1427 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 		if (!peer) {
peer             1434 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 		peer_pdd = kfd_bind_process_to_device(peer, p);
peer             1440 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 			peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm);
peer             1459 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 		peer = kfd_device_by_id(devices_arr[i]);
peer             1460 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 		if (WARN_ON_ONCE(!peer))
peer             1462 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 		peer_pdd = kfd_get_process_device_data(peer, p);
peer             1489 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 	struct kfd_dev *dev, *peer;
peer             1535 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 		peer = kfd_device_by_id(devices_arr[i]);
peer             1536 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 		if (!peer) {
peer             1541 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 		peer_pdd = kfd_get_process_device_data(peer, p);
peer             1547 drivers/gpu/drm/amd/amdkfd/kfd_chardev.c 			peer->kgd, (struct kgd_mem *)mem, peer_pdd->vm);
peer               81 drivers/ide/amd74xx.c 	ide_drive_t *peer = ide_get_pair_dev(drive);
peer               92 drivers/ide/amd74xx.c 	if (peer) {
peer               93 drivers/ide/amd74xx.c 		ide_timing_compute(peer, peer->pio_mode, &p, T, UT);
peer              473 drivers/ide/cmd640.c 		ide_drive_t *peer = ide_get_pair_dev(drive);
peer              476 drivers/ide/cmd640.c 		if (peer) {
peer              177 drivers/ide/via82cxxx.c 	ide_drive_t *peer = ide_get_pair_dev(drive);
peer              197 drivers/ide/via82cxxx.c 	if (peer) {
peer              198 drivers/ide/via82cxxx.c 		ide_timing_compute(peer, peer->pio_mode, &p, T, UT);
peer             14710 drivers/infiniband/hw/hfi1/chip.c 	struct hfi1_devdata *peer;
peer             14721 drivers/infiniband/hw/hfi1/chip.c 	xa_for_each(&hfi1_dev_table, index, peer) {
peer             14722 drivers/infiniband/hw/hfi1/chip.c 		if ((HFI_BASE_GUID(dd) == HFI_BASE_GUID(peer)) &&
peer             14723 drivers/infiniband/hw/hfi1/chip.c 		    dd->unit != peer->unit)
peer             14727 drivers/infiniband/hw/hfi1/chip.c 	if (peer) {
peer             14729 drivers/infiniband/hw/hfi1/chip.c 		dd->asic_data = peer->asic_data;
peer             14739 drivers/infiniband/hw/hfi1/chip.c 	if (!peer)
peer              853 drivers/isdn/hardware/mISDN/avmfritz.c 		ch->peer = NULL;
peer             3696 drivers/isdn/hardware/mISDN/hfcmulti.c 		ch->peer = NULL;
peer             1555 drivers/isdn/hardware/mISDN/hfcpci.c 		ch->peer = NULL;
peer             1820 drivers/isdn/hardware/mISDN/hfcsusb.c 		ch->peer = NULL;
peer             1403 drivers/isdn/hardware/mISDN/mISDNipac.c 		ch->peer = NULL;
peer             1587 drivers/isdn/hardware/mISDN/mISDNisar.c 		ch->peer = NULL;
peer              807 drivers/isdn/hardware/mISDN/netjet.c 		ch->peer = NULL;
peer             1042 drivers/isdn/hardware/mISDN/w6692.c 		ch->peer = NULL;
peer              370 drivers/isdn/mISDN/dsp_cmx.c 	if (dsp->ch.peer)
peer              371 drivers/isdn/mISDN/dsp_cmx.c 		dsp->ch.peer->ctrl(dsp->ch.peer, CONTROL_CHANNEL, &cq);
peer              216 drivers/isdn/mISDN/dsp_core.c 	if (!dsp->ch.peer) {
peer              224 drivers/isdn/mISDN/dsp_core.c 	if (dsp->ch.peer->ctrl(dsp->ch.peer, CONTROL_CHANNEL, &cq)) {
peer              261 drivers/isdn/mISDN/dsp_core.c 	if (!dsp->ch.peer) {
peer              270 drivers/isdn/mISDN/dsp_core.c 	if (dsp->ch.peer->ctrl(dsp->ch.peer, CONTROL_CHANNEL, &cq)) {
peer              631 drivers/isdn/mISDN/dsp_core.c 	if (!ch->peer) {
peer              639 drivers/isdn/mISDN/dsp_core.c 	if (ch->peer->ctrl(ch->peer, CONTROL_CHANNEL, &cq) < 0) {
peer              653 drivers/isdn/mISDN/dsp_core.c 		if (ch->peer->ctrl(ch->peer, CONTROL_CHANNEL, &cq)) {
peer              917 drivers/isdn/mISDN/dsp_core.c 		if (ch->peer)
peer              918 drivers/isdn/mISDN/dsp_core.c 			return ch->recv(ch->peer, skb);
peer              937 drivers/isdn/mISDN/dsp_core.c 		if (ch->peer)
peer              938 drivers/isdn/mISDN/dsp_core.c 			return ch->recv(ch->peer, skb);
peer              965 drivers/isdn/mISDN/dsp_core.c 		if (dsp->ch.peer)
peer              966 drivers/isdn/mISDN/dsp_core.c 			dsp->ch.peer->ctrl(dsp->ch.peer, CLOSE_CHANNEL, NULL);
peer             1035 drivers/isdn/mISDN/dsp_core.c 			if (dsp->ch.peer) {
peer             1037 drivers/isdn/mISDN/dsp_core.c 				if (dsp->ch.recv(dsp->ch.peer, skb)) {
peer               85 drivers/isdn/mISDN/dsp_hwec.c 	if (!dsp->ch.peer->ctrl(&dsp->ch, CONTROL_CHANNEL, &cq)) {
peer              105 drivers/isdn/mISDN/dsp_hwec.c 	if (!dsp->ch.peer->ctrl(&dsp->ch, CONTROL_CHANNEL, &cq)) {
peer              447 drivers/isdn/mISDN/dsp_tones.c 		if (dsp->ch.peer) {
peer              448 drivers/isdn/mISDN/dsp_tones.c 			if (dsp->ch.recv(dsp->ch.peer, nskb))
peer               22 drivers/isdn/mISDN/hwchannel.c 			if (likely(dch->dev.D.peer)) {
peer               23 drivers/isdn/mISDN/hwchannel.c 				err = dch->dev.D.recv(dch->dev.D.peer, skb);
peer               46 drivers/isdn/mISDN/hwchannel.c 			if (likely(bch->ch.peer)) {
peer               47 drivers/isdn/mISDN/hwchannel.c 				err = bch->ch.recv(bch->ch.peer, skb);
peer              391 drivers/isdn/mISDN/hwchannel.c 		if (ch->peer) {
peer              395 drivers/isdn/mISDN/hwchannel.c 			if (!ch->recv(ch->peer, skb))
peer             1212 drivers/isdn/mISDN/l1oip_core.c 		ch->peer = NULL;
peer              183 drivers/isdn/mISDN/layer2.c 	ret = l2->ch.recv(l2->ch.peer, skb);
peer             2099 drivers/isdn/mISDN/layer2.c 		if (l2->ch.peer)
peer             2100 drivers/isdn/mISDN/layer2.c 			l2->ch.peer->ctrl(l2->ch.peer, CLOSE_CHANNEL, NULL);
peer              219 drivers/isdn/mISDN/socket.c 	if (!_pms(sk)->ch.peer)
peer              221 drivers/isdn/mISDN/socket.c 	err = _pms(sk)->ch.recv(_pms(sk)->ch.peer, skb);
peer              553 drivers/isdn/mISDN/socket.c 		  int peer)
peer              388 drivers/isdn/mISDN/stack.c 	dev->teimgr->peer = &newst->own;
peer              393 drivers/isdn/mISDN/stack.c 	dev->D.peer = &newst->own;
peer              435 drivers/isdn/mISDN/stack.c 		ch->peer = &dev->D.st->own;
peer              476 drivers/isdn/mISDN/stack.c 		ch->peer = rq.ch;
peer              478 drivers/isdn/mISDN/stack.c 		rq.ch->peer = ch;
peer              491 drivers/isdn/mISDN/stack.c 		ch->peer = rq2.ch;
peer              501 drivers/isdn/mISDN/stack.c 		rq2.ch->peer = rq.ch;
peer              503 drivers/isdn/mISDN/stack.c 		rq.ch->peer = rq2.ch;
peer              534 drivers/isdn/mISDN/stack.c 		ch->peer = &dev->D.st->own;
peer              551 drivers/isdn/mISDN/stack.c 			rq.ch->peer = &dev->D.st->own;
peer              575 drivers/isdn/mISDN/stack.c 		if (ch->peer) {
peer              576 drivers/isdn/mISDN/stack.c 			ch->peer->ctrl(ch->peer, CLOSE_CHANNEL, NULL);
peer              577 drivers/isdn/mISDN/stack.c 			ch->peer = NULL;
peer              343 drivers/isdn/mISDN/tei.c 		if (mgr->ch.recv(mgr->ch.peer, skb)) {
peer              362 drivers/isdn/mISDN/tei.c 					if (!mgr->ch.recv(mgr->ch.peer, skb))
peer              833 drivers/isdn/mISDN/tei.c 		l2->ch.peer = mgr->ch.peer;
peer             2650 drivers/message/fusion/mptbase.c 	struct pci_dev *peer=NULL;
peer             2660 drivers/message/fusion/mptbase.c 	peer = pci_get_slot(pdev->bus, PCI_DEVFN(slot,func-1));
peer             2661 drivers/message/fusion/mptbase.c 	if (!peer) {
peer             2662 drivers/message/fusion/mptbase.c 		peer = pci_get_slot(pdev->bus, PCI_DEVFN(slot,func+1));
peer             2663 drivers/message/fusion/mptbase.c 		if (!peer)
peer             2669 drivers/message/fusion/mptbase.c 		if (_pcidev == peer) {
peer             2690 drivers/message/fusion/mptbase.c 	pci_dev_put(peer);
peer              139 drivers/misc/mic/scif/scif_api.c 	msg.dst = ep->peer;
peer              737 drivers/misc/mic/scif/scif_api.c int scif_accept(scif_epd_t epd, struct scif_port_id *peer,
peer              753 drivers/misc/mic/scif/scif_api.c 	if (!peer || !newepd)
peer              794 drivers/misc/mic/scif/scif_api.c 	peer->node = conreq->msg.src.node;
peer              795 drivers/misc/mic/scif/scif_api.c 	peer->port = conreq->msg.src.port;
peer              806 drivers/misc/mic/scif/scif_api.c 	cep->remote_dev = &scif_dev[peer->node];
peer              847 drivers/misc/mic/scif/scif_api.c 	cep->peer.node = peer->node;
peer              848 drivers/misc/mic/scif/scif_api.c 	cep->peer.port = peer->port;
peer              171 drivers/misc/mic/scif/scif_epd.c 		ep->peer.node = msg->src.node;
peer              172 drivers/misc/mic/scif/scif_epd.c 		ep->peer.port = msg->src.port;
peer              100 drivers/misc/mic/scif/scif_epd.h 	struct scif_port_id peer;
peer              109 drivers/misc/mic/scif/scif_fd.c 		err = __scif_connect(priv, &req.peer, non_block);
peer              136 drivers/misc/mic/scif/scif_fd.c 		err = scif_accept(priv, &request.peer, ep, request.flags);
peer               62 drivers/misc/vmw_vmci/vmci_host.c 	u32 peer;
peer              459 drivers/misc/vmw_vmci/vmci_host.c 						alloc_info.peer,
peer              484 drivers/misc/vmw_vmci/vmci_host.c 						alloc_info.peer,
peer              151 drivers/misc/vmw_vmci/vmci_queue_pair.c 	u32 peer;
peer              187 drivers/misc/vmw_vmci/vmci_queue_pair.c 	u32 peer;
peer              871 drivers/misc/vmw_vmci/vmci_queue_pair.c 			 u32 peer,
peer              892 drivers/misc/vmw_vmci/vmci_queue_pair.c 		entry->qp.peer = peer;
peer              959 drivers/misc/vmw_vmci/vmci_queue_pair.c 	alloc_msg->peer = entry->qp.peer;
peer             1090 drivers/misc/vmw_vmci/vmci_queue_pair.c 			       u32 peer,
peer             1160 drivers/misc/vmw_vmci/vmci_queue_pair.c 	queue_pair_entry = qp_guest_endpoint_create(*handle, peer, flags,
peer             1195 drivers/misc/vmw_vmci/vmci_queue_pair.c 		    (queue_pair_entry->qp.peer != VMCI_INVALID_ID &&
peer             1196 drivers/misc/vmw_vmci/vmci_queue_pair.c 		     queue_pair_entry->qp.peer != context_id)) {
peer             1275 drivers/misc/vmw_vmci/vmci_queue_pair.c 			    u32 peer,
peer             1300 drivers/misc/vmw_vmci/vmci_queue_pair.c 	if (handle.context != context_id && handle.context != peer)
peer             1303 drivers/misc/vmw_vmci/vmci_queue_pair.c 	if (VMCI_CONTEXT_IS_VM(context_id) && VMCI_CONTEXT_IS_VM(peer))
peer             1310 drivers/misc/vmw_vmci/vmci_queue_pair.c 	if (is_local && peer != VMCI_INVALID_ID && context_id != peer)
peer             1334 drivers/misc/vmw_vmci/vmci_queue_pair.c 	entry->qp.peer = peer;
peer             1503 drivers/misc/vmw_vmci/vmci_queue_pair.c 			    u32 peer,
peer             1556 drivers/misc/vmw_vmci/vmci_queue_pair.c 	if (entry->qp.peer != VMCI_INVALID_ID && entry->qp.peer != context_id)
peer             1688 drivers/misc/vmw_vmci/vmci_queue_pair.c 			   u32 peer,
peer             1737 drivers/misc/vmw_vmci/vmci_queue_pair.c 		    qp_broker_create(handle, peer, flags, priv_flags,
peer             1743 drivers/misc/vmw_vmci/vmci_queue_pair.c 		    qp_broker_attach(entry, peer, flags, priv_flags,
peer             1766 drivers/misc/vmw_vmci/vmci_queue_pair.c 			      u32 peer,
peer             1787 drivers/misc/vmw_vmci/vmci_queue_pair.c 	    qp_broker_alloc(new_handle, peer, flags, priv_flags,
peer             1825 drivers/misc/vmw_vmci/vmci_queue_pair.c 		  u32 peer,
peer             1839 drivers/misc/vmw_vmci/vmci_queue_pair.c 					   consume_size, peer,
peer             1844 drivers/misc/vmw_vmci/vmci_queue_pair.c 					  consume_size, peer, flags,
peer             1922 drivers/misc/vmw_vmci/vmci_queue_pair.c 			 u32 peer,
peer             1930 drivers/misc/vmw_vmci/vmci_queue_pair.c 	return qp_broker_alloc(handle, peer, flags, priv_flags,
peer             2657 drivers/misc/vmw_vmci/vmci_queue_pair.c 		     u32 peer,
peer             2664 drivers/misc/vmw_vmci/vmci_queue_pair.c 	struct vmci_handle dst = vmci_make_handle(peer, VMCI_INVALID_ID);
peer             2706 drivers/misc/vmw_vmci/vmci_queue_pair.c 	my_qpair->peer = peer;
peer             2731 drivers/misc/vmw_vmci/vmci_queue_pair.c 			       my_qpair->peer,
peer             2780 drivers/misc/vmw_vmci/vmci_queue_pair.c 	old_qpair->peer = VMCI_INVALID_ID;
peer               31 drivers/misc/vmw_vmci/vmci_queue_pair.h 	u32 peer;
peer              142 drivers/misc/vmw_vmci/vmci_queue_pair.h int vmci_qp_broker_alloc(struct vmci_handle handle, u32 peer,
peer              157 drivers/misc/vmw_vmci/vmci_queue_pair.h 		  u32 peer, u32 flags, u32 priv_flags,
peer              311 drivers/net/can/janz-ican3.c 	u8 locl, peer, xord;
peer              315 drivers/net/can/janz-ican3.c 	peer = ioread8(mod->dpm + MSYNC_PEER);
peer              317 drivers/net/can/janz-ican3.c 	xord = locl ^ peer;
peer              356 drivers/net/can/janz-ican3.c 	u8 locl, peer, xord;
peer              360 drivers/net/can/janz-ican3.c 	peer = ioread8(mod->dpm + MSYNC_PEER);
peer              362 drivers/net/can/janz-ican3.c 	xord = locl ^ peer;
peer               33 drivers/net/can/vxcan.c 	struct net_device __rcu	*peer;
peer               39 drivers/net/can/vxcan.c 	struct net_device *peer;
peer               47 drivers/net/can/vxcan.c 	peer = rcu_dereference(priv->peer);
peer               48 drivers/net/can/vxcan.c 	if (unlikely(!peer)) {
peer               61 drivers/net/can/vxcan.c 	skb->dev        = peer;
peer               67 drivers/net/can/vxcan.c 		peerstats = &peer->stats;
peer               81 drivers/net/can/vxcan.c 	struct net_device *peer = rtnl_dereference(priv->peer);
peer               83 drivers/net/can/vxcan.c 	if (!peer)
peer               86 drivers/net/can/vxcan.c 	if (peer->flags & IFF_UP) {
peer               88 drivers/net/can/vxcan.c 		netif_carrier_on(peer);
peer               96 drivers/net/can/vxcan.c 	struct net_device *peer = rtnl_dereference(priv->peer);
peer               99 drivers/net/can/vxcan.c 	if (peer)
peer              100 drivers/net/can/vxcan.c 		netif_carrier_off(peer);
peer              108 drivers/net/can/vxcan.c 	struct net_device *peer;
peer              112 drivers/net/can/vxcan.c 	peer = rcu_dereference(priv->peer);
peer              113 drivers/net/can/vxcan.c 	iflink = peer ? peer->ifindex : 0;
peer              161 drivers/net/can/vxcan.c 	struct net_device *peer;
peer              200 drivers/net/can/vxcan.c 	peer = rtnl_create_link(peer_net, ifname, name_assign_type,
peer              202 drivers/net/can/vxcan.c 	if (IS_ERR(peer)) {
peer              204 drivers/net/can/vxcan.c 		return PTR_ERR(peer);
peer              208 drivers/net/can/vxcan.c 		peer->ifindex = ifmp->ifi_index;
peer              210 drivers/net/can/vxcan.c 	err = register_netdevice(peer);
peer              214 drivers/net/can/vxcan.c 		free_netdev(peer);
peer              218 drivers/net/can/vxcan.c 	netif_carrier_off(peer);
peer              220 drivers/net/can/vxcan.c 	err = rtnl_configure_link(peer, ifmp);
peer              238 drivers/net/can/vxcan.c 	rcu_assign_pointer(priv->peer, peer);
peer              240 drivers/net/can/vxcan.c 	priv = netdev_priv(peer);
peer              241 drivers/net/can/vxcan.c 	rcu_assign_pointer(priv->peer, dev);
peer              246 drivers/net/can/vxcan.c 	unregister_netdevice(peer);
peer              253 drivers/net/can/vxcan.c 	struct net_device *peer;
peer              256 drivers/net/can/vxcan.c 	peer = rtnl_dereference(priv->peer);
peer              262 drivers/net/can/vxcan.c 	RCU_INIT_POINTER(priv->peer, NULL);
peer              265 drivers/net/can/vxcan.c 	if (peer) {
peer              266 drivers/net/can/vxcan.c 		priv = netdev_priv(peer);
peer              267 drivers/net/can/vxcan.c 		RCU_INIT_POINTER(priv->peer, NULL);
peer              268 drivers/net/can/vxcan.c 		unregister_netdevice_queue(peer, head);
peer              279 drivers/net/can/vxcan.c 	struct net_device *peer = rtnl_dereference(priv->peer);
peer              281 drivers/net/can/vxcan.c 	return peer ? dev_net(peer) : dev_net(dev);
peer             16066 drivers/net/ethernet/broadcom/tg3.c 	struct pci_dev *peer;
peer             16070 drivers/net/ethernet/broadcom/tg3.c 		peer = pci_get_slot(tp->pdev->bus, devnr | func);
peer             16071 drivers/net/ethernet/broadcom/tg3.c 		if (peer && peer != tp->pdev)
peer             16073 drivers/net/ethernet/broadcom/tg3.c 		pci_dev_put(peer);
peer             16078 drivers/net/ethernet/broadcom/tg3.c 	if (!peer) {
peer             16079 drivers/net/ethernet/broadcom/tg3.c 		peer = tp->pdev;
peer             16080 drivers/net/ethernet/broadcom/tg3.c 		return peer;
peer             16087 drivers/net/ethernet/broadcom/tg3.c 	pci_dev_put(peer);
peer             16089 drivers/net/ethernet/broadcom/tg3.c 	return peer;
peer              784 drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c 			  int peer)
peer              797 drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c 		if (peer)
peer              128 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	struct list_head	peer;    /* flows with peer flow */
peer             1613 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	list_del(&flow->peer);
peer             3680 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	list_add_tail(&flow->peer, &esw->offloads.peer_flows);
peer             4179 drivers/net/ethernet/mellanox/mlx5/core/en_tc.c 	list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer)
peer             1020 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c 	struct qlcnic_dcb_cee *peer;
peer             1029 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c 	peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
peer             1032 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c 		if (peer->app[i].valid)
peer             1043 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c 	struct qlcnic_dcb_cee *peer;
peer             1050 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c 	peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
peer             1053 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c 		app = &peer->app[i];
peer             1069 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c 	struct qlcnic_dcb_cee *peer;
peer             1075 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c 	peer = &adapter->dcb->cfg->type[QLC_DCB_PEER_IDX];
peer             1078 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c 		if (!peer->pg_cfg[i].valid)
peer             1081 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c 		pg->pg_bw[j] = peer->pg_cfg[i].total_bw_percent;
peer             1084 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c 			if (peer->tc_cfg[i].valid &&
peer             1085 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c 			    (peer->tc_cfg[i].pgid == i)) {
peer             1086 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c 				map = peer->tc_cfg[i].up_tc_map;
peer             1102 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c 	struct qlcnic_dcb_cee *peer;
peer             1110 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c 	peer = &cfg->type[QLC_DCB_PEER_IDX];
peer             1113 drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c 		tc = &peer->tc_cfg[i];
peer             1084 drivers/net/ethernet/sfc/siena_sriov.c 	struct vfdi_endpoint *peer;
peer             1100 drivers/net/ethernet/sfc/siena_sriov.c 	peer = vfdi_status->peers + 1;
peer             1108 drivers/net/ethernet/sfc/siena_sriov.c 			*peer++ = vf->addr;
peer             1118 drivers/net/ethernet/sfc/siena_sriov.c 		ether_addr_copy(peer->mac_addr, local_addr->addr);
peer             1119 drivers/net/ethernet/sfc/siena_sriov.c 		peer->tci = 0;
peer             1120 drivers/net/ethernet/sfc/siena_sriov.c 		++peer;
peer             1141 drivers/net/ethernet/sfc/siena_sriov.c 			peer = (struct vfdi_endpoint *)epp->ptr;
peer              723 drivers/net/ppp/pppoe.c 		  int peer)
peer              475 drivers/net/ppp/pptp.c 	int peer)
peer              231 drivers/net/rionet.c 	struct rionet_peer *peer;
peer              240 drivers/net/rionet.c 			list_for_each_entry(peer, &nets[netid].peers, node) {
peer              241 drivers/net/rionet.c 				if (peer->rdev->destid == sid) {
peer              242 drivers/net/rionet.c 					nets[netid].active[sid] = peer->rdev;
peer              311 drivers/net/rionet.c 	struct rionet_peer *peer;
peer              354 drivers/net/rionet.c 	list_for_each_entry(peer, &nets[netid].peers, node) {
peer              356 drivers/net/rionet.c 		rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN);
peer              368 drivers/net/rionet.c 	struct rionet_peer *peer;
peer              384 drivers/net/rionet.c 	list_for_each_entry(peer, &nets[netid].peers, node) {
peer              385 drivers/net/rionet.c 		if (nets[netid].active[peer->rdev->destid]) {
peer              386 drivers/net/rionet.c 			rio_send_doorbell(peer->rdev, RIONET_DOORBELL_LEAVE);
peer              387 drivers/net/rionet.c 			nets[netid].active[peer->rdev->destid] = NULL;
peer              389 drivers/net/rionet.c 		if (peer->res)
peer              390 drivers/net/rionet.c 			rio_release_outb_dbell(peer->rdev, peer->res);
peer              406 drivers/net/rionet.c 	struct rionet_peer *peer;
peer              414 drivers/net/rionet.c 	list_for_each_entry(peer, &nets[netid].peers, node) {
peer              415 drivers/net/rionet.c 		if (peer->rdev == rdev) {
peer              416 drivers/net/rionet.c 			list_del(&peer->node);
peer              434 drivers/net/rionet.c 		if (peer->res)
peer              435 drivers/net/rionet.c 			rio_release_outb_dbell(rdev, peer->res);
peer              436 drivers/net/rionet.c 		kfree(peer);
peer              547 drivers/net/rionet.c 	struct rionet_peer *peer;
peer              603 drivers/net/rionet.c 		peer = kzalloc(sizeof(*peer), GFP_KERNEL);
peer              604 drivers/net/rionet.c 		if (!peer) {
peer              608 drivers/net/rionet.c 		peer->rdev = rdev;
peer              609 drivers/net/rionet.c 		peer->res = rio_request_outb_dbell(peer->rdev,
peer              612 drivers/net/rionet.c 		if (!peer->res) {
peer              614 drivers/net/rionet.c 			kfree(peer);
peer              620 drivers/net/rionet.c 		list_add_tail(&peer->node, &nets[netid].peers);
peer              627 drivers/net/rionet.c 			rio_send_doorbell(peer->rdev, RIONET_DOORBELL_JOIN);
peer              638 drivers/net/rionet.c 	struct rionet_peer *peer;
peer              649 drivers/net/rionet.c 		list_for_each_entry(peer, &nets[i].peers, node) {
peer              650 drivers/net/rionet.c 			if (nets[i].active[peer->rdev->destid]) {
peer              651 drivers/net/rionet.c 				rio_send_doorbell(peer->rdev,
peer              653 drivers/net/rionet.c 				nets[i].active[peer->rdev->destid] = NULL;
peer               62 drivers/net/veth.c 	struct net_device __rcu	*peer;
peer              151 drivers/net/veth.c 	struct net_device *peer = rtnl_dereference(priv->peer);
peer              154 drivers/net/veth.c 	data[0] = peer ? peer->ifindex : 0;
peer              246 drivers/net/veth.c 	rcv = rcu_dereference(priv->peer);
peer              336 drivers/net/veth.c 	struct net_device *peer;
peer              350 drivers/net/veth.c 	peer = rcu_dereference(priv->peer);
peer              351 drivers/net/veth.c 	if (peer) {
peer              352 drivers/net/veth.c 		tot->rx_dropped += veth_stats_tx(&tx, peer);
peer              356 drivers/net/veth.c 		veth_stats_rx(&rx, peer);
peer              406 drivers/net/veth.c 	rcv = rcu_dereference(priv->peer);
peer              476 drivers/net/veth.c 	rcv = rcu_dereference(priv->peer);
peer              909 drivers/net/veth.c 	struct net_device *peer = rtnl_dereference(priv->peer);
peer              912 drivers/net/veth.c 	if (!peer)
peer              921 drivers/net/veth.c 	if (peer->flags & IFF_UP) {
peer              923 drivers/net/veth.c 		netif_carrier_on(peer);
peer              932 drivers/net/veth.c 	struct net_device *peer = rtnl_dereference(priv->peer);
peer              935 drivers/net/veth.c 	if (peer)
peer              936 drivers/net/veth.c 		netif_carrier_off(peer);
peer             1014 drivers/net/veth.c 	struct net_device *peer;
peer             1018 drivers/net/veth.c 	peer = rcu_dereference(priv->peer);
peer             1019 drivers/net/veth.c 	iflink = peer ? peer->ifindex : 0;
peer             1029 drivers/net/veth.c 	struct net_device *peer;
peer             1031 drivers/net/veth.c 	peer = rtnl_dereference(priv->peer);
peer             1032 drivers/net/veth.c 	if (peer) {
peer             1033 drivers/net/veth.c 		struct veth_priv *peer_priv = netdev_priv(peer);
peer             1045 drivers/net/veth.c 	struct net_device *peer;
peer             1051 drivers/net/veth.c 	peer = rcu_dereference(priv->peer);
peer             1052 drivers/net/veth.c 	if (unlikely(!peer))
peer             1055 drivers/net/veth.c 	peer_priv = netdev_priv(peer);
peer             1059 drivers/net/veth.c 	peer->needed_headroom = new_hr;
peer             1070 drivers/net/veth.c 	struct net_device *peer;
peer             1076 drivers/net/veth.c 	peer = rtnl_dereference(priv->peer);
peer             1079 drivers/net/veth.c 		if (!peer) {
peer             1086 drivers/net/veth.c 			  peer->hard_header_len -
peer             1088 drivers/net/veth.c 		if (peer->mtu > max_mtu) {
peer             1094 drivers/net/veth.c 		if (dev->real_num_rx_queues < peer->real_num_tx_queues) {
peer             1109 drivers/net/veth.c 			peer->hw_features &= ~NETIF_F_GSO_SOFTWARE;
peer             1110 drivers/net/veth.c 			peer->max_mtu = max_mtu;
peer             1119 drivers/net/veth.c 			if (peer) {
peer             1120 drivers/net/veth.c 				peer->hw_features |= NETIF_F_GSO_SOFTWARE;
peer             1121 drivers/net/veth.c 				peer->max_mtu = ETH_MAX_MTU;
peer             1127 drivers/net/veth.c 	if ((!!old_prog ^ !!prog) && peer)
peer             1128 drivers/net/veth.c 		netdev_update_features(peer);
peer             1241 drivers/net/veth.c 	struct net_device *peer;
peer             1286 drivers/net/veth.c 	peer = rtnl_create_link(net, ifname, name_assign_type,
peer             1288 drivers/net/veth.c 	if (IS_ERR(peer)) {
peer             1290 drivers/net/veth.c 		return PTR_ERR(peer);
peer             1294 drivers/net/veth.c 		eth_hw_addr_random(peer);
peer             1297 drivers/net/veth.c 		peer->ifindex = ifmp->ifi_index;
peer             1299 drivers/net/veth.c 	peer->gso_max_size = dev->gso_max_size;
peer             1300 drivers/net/veth.c 	peer->gso_max_segs = dev->gso_max_segs;
peer             1302 drivers/net/veth.c 	err = register_netdevice(peer);
peer             1308 drivers/net/veth.c 	netif_carrier_off(peer);
peer             1310 drivers/net/veth.c 	err = rtnl_configure_link(peer, ifmp);
peer             1340 drivers/net/veth.c 	rcu_assign_pointer(priv->peer, peer);
peer             1342 drivers/net/veth.c 	priv = netdev_priv(peer);
peer             1343 drivers/net/veth.c 	rcu_assign_pointer(priv->peer, dev);
peer             1350 drivers/net/veth.c 	unregister_netdevice(peer);
peer             1354 drivers/net/veth.c 	free_netdev(peer);
peer             1361 drivers/net/veth.c 	struct net_device *peer;
peer             1364 drivers/net/veth.c 	peer = rtnl_dereference(priv->peer);
peer             1370 drivers/net/veth.c 	RCU_INIT_POINTER(priv->peer, NULL);
peer             1373 drivers/net/veth.c 	if (peer) {
peer             1374 drivers/net/veth.c 		priv = netdev_priv(peer);
peer             1375 drivers/net/veth.c 		RCU_INIT_POINTER(priv->peer, NULL);
peer             1376 drivers/net/veth.c 		unregister_netdevice_queue(peer, head);
peer             1387 drivers/net/veth.c 	struct net_device *peer = rtnl_dereference(priv->peer);
peer             1389 drivers/net/veth.c 	return peer ? dev_net(peer) : dev_net(dev);
peer               53 drivers/net/wireless/ath/ath10k/debugfs_sta.c 	struct ath10k_peer *peer;
peer               62 drivers/net/wireless/ath/ath10k/debugfs_sta.c 	peer = ath10k_peer_find_by_id(ar, peer_id);
peer               63 drivers/net/wireless/ath/ath10k/debugfs_sta.c 	if (!peer || !peer->sta)
peer               66 drivers/net/wireless/ath/ath10k/debugfs_sta.c 	arsta = (struct ath10k_sta *)peer->sta->drv_priv;
peer              131 drivers/net/wireless/ath/ath10k/debugfs_sta.c 	struct ath10k_fw_extd_stats_peer *peer;
peer              136 drivers/net/wireless/ath/ath10k/debugfs_sta.c 	list_for_each_entry(peer, &stats->peers_extd, list) {
peer              137 drivers/net/wireless/ath/ath10k/debugfs_sta.c 		sta = ieee80211_find_sta_by_ifaddr(ar->hw, peer->peer_macaddr,
peer              142 drivers/net/wireless/ath/ath10k/debugfs_sta.c 		arsta->rx_duration += (u64)peer->rx_duration;
peer              150 drivers/net/wireless/ath/ath10k/debugfs_sta.c 	struct ath10k_fw_stats_peer *peer;
peer              155 drivers/net/wireless/ath/ath10k/debugfs_sta.c 	list_for_each_entry(peer, &stats->peers, list) {
peer              156 drivers/net/wireless/ath/ath10k/debugfs_sta.c 		sta = ieee80211_find_sta_by_ifaddr(ar->hw, peer->peer_macaddr,
peer              161 drivers/net/wireless/ath/ath10k/debugfs_sta.c 		arsta->rx_duration += (u64)peer->rx_duration;
peer             1048 drivers/net/wireless/ath/ath10k/htt_rx.c 	struct ath10k_peer *peer;
peer             1069 drivers/net/wireless/ath/ath10k/htt_rx.c 	peer = ath10k_peer_find_by_id(ar, peer_id);
peer             1070 drivers/net/wireless/ath/ath10k/htt_rx.c 	if (!peer)
peer             1073 drivers/net/wireless/ath/ath10k/htt_rx.c 	arvif = ath10k_get_arvif(ar, peer->vdev_id);
peer             2087 drivers/net/wireless/ath/ath10k/htt_rx.c 					     struct ath10k_peer *peer,
peer             2101 drivers/net/wireless/ath/ath10k/htt_rx.c 	if (!peer)
peer             2117 drivers/net/wireless/ath/ath10k/htt_rx.c 	last_pn_valid = peer->tids_last_pn_valid[tid];
peer             2118 drivers/net/wireless/ath/ath10k/htt_rx.c 	last_pn = &peer->tids_last_pn[tid];
peer             2125 drivers/net/wireless/ath/ath10k/htt_rx.c 	sec_type = peer->rx_pn[sec_index].sec_type;
peer             2126 drivers/net/wireless/ath/ath10k/htt_rx.c 	ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
peer             2136 drivers/net/wireless/ath/ath10k/htt_rx.c 		peer->tids_last_pn_valid[tid] = 1;
peer             2151 drivers/net/wireless/ath/ath10k/htt_rx.c 	struct ath10k_peer *peer;
peer             2172 drivers/net/wireless/ath/ath10k/htt_rx.c 	peer = ath10k_peer_find_by_id(ar, peer_id);
peer             2174 drivers/net/wireless/ath/ath10k/htt_rx.c 	if (!peer && peer_id != HTT_INVALID_PEERID)
peer             2177 drivers/net/wireless/ath/ath10k/htt_rx.c 	if (!peer)
peer             2212 drivers/net/wireless/ath/ath10k/htt_rx.c 	sec_type = peer->rx_pn[sec_index].sec_type;
peer             2215 drivers/net/wireless/ath/ath10k/htt_rx.c 	ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
peer             2219 drivers/net/wireless/ath/ath10k/htt_rx.c 		pn_invalid = ath10k_htt_rx_pn_check_replay_hl(ar, peer, rx);
peer             2305 drivers/net/wireless/ath/ath10k/htt_rx.c 			for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
peer             2306 drivers/net/wireless/ath/ath10k/htt_rx.c 				if (peer->keys[i] &&
peer             2307 drivers/net/wireless/ath/ath10k/htt_rx.c 				    peer->keys[i]->flags & IEEE80211_KEY_FLAG_PAIRWISE)
peer             2308 drivers/net/wireless/ath/ath10k/htt_rx.c 					keyidx = peer->keys[i]->keyidx;
peer             2454 drivers/net/wireless/ath/ath10k/htt_rx.c 	struct ath10k_peer *peer;
peer             2466 drivers/net/wireless/ath/ath10k/htt_rx.c 	peer = ath10k_peer_find_by_id(ar, peer_id);
peer             2467 drivers/net/wireless/ath/ath10k/htt_rx.c 	if (!peer) {
peer             2505 drivers/net/wireless/ath/ath10k/htt_rx.c 	sec_type = peer->rx_pn[sec_index].sec_type;
peer             2506 drivers/net/wireless/ath/ath10k/htt_rx.c 	ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
peer             2557 drivers/net/wireless/ath/ath10k/htt_rx.c 	last_pn = &peer->frag_tids_last_pn[tid];
peer             2560 drivers/net/wireless/ath/ath10k/htt_rx.c 		if (ath10k_htt_rx_pn_check_replay_hl(ar, peer, &resp->rx_ind_hl))
peer             2564 drivers/net/wireless/ath/ath10k/htt_rx.c 		peer->frag_tids_seq[tid] = seq;
peer             2566 drivers/net/wireless/ath/ath10k/htt_rx.c 		if (seq != peer->frag_tids_seq[tid])
peer             2573 drivers/net/wireless/ath/ath10k/htt_rx.c 		last_pn = &peer->tids_last_pn[tid];
peer             2632 drivers/net/wireless/ath/ath10k/htt_rx.c 	struct ath10k_peer *peer;
peer             2728 drivers/net/wireless/ath/ath10k/htt_rx.c 		peer = ath10k_peer_find_by_id(ar, peer_id);
peer             2729 drivers/net/wireless/ath/ath10k/htt_rx.c 		if (!peer || !peer->sta) {
peer             2738 drivers/net/wireless/ath/ath10k/htt_rx.c 		ieee80211_sta_register_airtime(peer->sta, tid, tx_duration, 0);
peer             2748 drivers/net/wireless/ath/ath10k/htt_rx.c 	struct ath10k_peer *peer;
peer             2761 drivers/net/wireless/ath/ath10k/htt_rx.c 	peer = ath10k_peer_find_by_id(ar, peer_id);
peer             2762 drivers/net/wireless/ath/ath10k/htt_rx.c 	if (!peer) {
peer             2769 drivers/net/wireless/ath/ath10k/htt_rx.c 	arvif = ath10k_get_arvif(ar, peer->vdev_id);
peer             2772 drivers/net/wireless/ath/ath10k/htt_rx.c 			    peer->vdev_id);
peer             2779 drivers/net/wireless/ath/ath10k/htt_rx.c 		   peer->addr, tid, ev->window_size);
peer             2781 drivers/net/wireless/ath/ath10k/htt_rx.c 	ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
peer             2788 drivers/net/wireless/ath/ath10k/htt_rx.c 	struct ath10k_peer *peer;
peer             2801 drivers/net/wireless/ath/ath10k/htt_rx.c 	peer = ath10k_peer_find_by_id(ar, peer_id);
peer             2802 drivers/net/wireless/ath/ath10k/htt_rx.c 	if (!peer) {
peer             2809 drivers/net/wireless/ath/ath10k/htt_rx.c 	arvif = ath10k_get_arvif(ar, peer->vdev_id);
peer             2812 drivers/net/wireless/ath/ath10k/htt_rx.c 			    peer->vdev_id);
peer             2819 drivers/net/wireless/ath/ath10k/htt_rx.c 		   peer->addr, tid);
peer             2821 drivers/net/wireless/ath/ath10k/htt_rx.c 	ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
peer             3576 drivers/net/wireless/ath/ath10k/htt_rx.c 	struct ath10k_peer *peer;
peer             3594 drivers/net/wireless/ath/ath10k/htt_rx.c 	peer = ath10k_peer_find_by_id(ar, peer_id);
peer             3595 drivers/net/wireless/ath/ath10k/htt_rx.c 	if (!peer || !peer->sta) {
peer             3601 drivers/net/wireless/ath/ath10k/htt_rx.c 	sta = peer->sta;
peer             3631 drivers/net/wireless/ath/ath10k/htt_rx.c 	struct ath10k_peer *peer;
peer             3648 drivers/net/wireless/ath/ath10k/htt_rx.c 	peer = ath10k_peer_find_by_id(ar, peer_id);
peer             3649 drivers/net/wireless/ath/ath10k/htt_rx.c 	if (!peer || !peer->sta) {
peer             3655 drivers/net/wireless/ath/ath10k/htt_rx.c 	sta = peer->sta;
peer             3698 drivers/net/wireless/ath/ath10k/htt_rx.c 	struct ath10k_peer *peer;
peer             3702 drivers/net/wireless/ath/ath10k/htt_rx.c 	peer = ath10k_peer_find_by_id(ar, __le16_to_cpu(ev->peer_id));
peer             3703 drivers/net/wireless/ath/ath10k/htt_rx.c 	if (!peer) {
peer             3716 drivers/net/wireless/ath/ath10k/htt_rx.c 	peer->rx_pn[sec_index].sec_type = sec_type;
peer             3717 drivers/net/wireless/ath/ath10k/htt_rx.c 	peer->rx_pn[sec_index].pn_len = ath10k_htt_rx_pn_len(sec_type);
peer             3719 drivers/net/wireless/ath/ath10k/htt_rx.c 	memset(peer->tids_last_pn_valid, 0, sizeof(peer->tids_last_pn_valid));
peer             3720 drivers/net/wireless/ath/ath10k/htt_rx.c 	memset(peer->tids_last_pn, 0, sizeof(peer->tids_last_pn));
peer              314 drivers/net/wireless/ath/ath10k/mac.c 	struct ath10k_peer *peer;
peer              327 drivers/net/wireless/ath/ath10k/mac.c 	peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
peer              330 drivers/net/wireless/ath/ath10k/mac.c 	if (!peer)
peer              367 drivers/net/wireless/ath/ath10k/mac.c 		peer->keys[i] = arvif->wep_keys[i];
peer              402 drivers/net/wireless/ath/ath10k/mac.c 	struct ath10k_peer *peer;
peer              411 drivers/net/wireless/ath/ath10k/mac.c 	peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
peer              414 drivers/net/wireless/ath/ath10k/mac.c 	if (!peer)
peer              417 drivers/net/wireless/ath/ath10k/mac.c 	for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
peer              418 drivers/net/wireless/ath/ath10k/mac.c 		if (peer->keys[i] == NULL)
peer              422 drivers/net/wireless/ath/ath10k/mac.c 		ret = ath10k_install_key(arvif, peer->keys[i],
peer              432 drivers/net/wireless/ath/ath10k/mac.c 		peer->keys[i] = NULL;
peer              442 drivers/net/wireless/ath/ath10k/mac.c 	struct ath10k_peer *peer;
peer              452 drivers/net/wireless/ath/ath10k/mac.c 	peer = ath10k_peer_find(ar, 0, addr);
peer              453 drivers/net/wireless/ath/ath10k/mac.c 	if (!peer)
peer              456 drivers/net/wireless/ath/ath10k/mac.c 	for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
peer              457 drivers/net/wireless/ath/ath10k/mac.c 		if (peer->keys[i] && peer->keys[i]->keyidx == keyidx)
peer              468 drivers/net/wireless/ath/ath10k/mac.c 	struct ath10k_peer *peer;
peer              483 drivers/net/wireless/ath/ath10k/mac.c 		list_for_each_entry(peer, &ar->peers, list) {
peer              484 drivers/net/wireless/ath/ath10k/mac.c 			for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
peer              485 drivers/net/wireless/ath/ath10k/mac.c 				if (peer->keys[i] == key) {
peer              486 drivers/net/wireless/ath/ath10k/mac.c 					ether_addr_copy(addr, peer->addr);
peer              487 drivers/net/wireless/ath/ath10k/mac.c 					peer->keys[i] = NULL;
peer              492 drivers/net/wireless/ath/ath10k/mac.c 			if (i < ARRAY_SIZE(peer->keys))
peer              497 drivers/net/wireless/ath/ath10k/mac.c 		if (i == ARRAY_SIZE(peer->keys))
peer              516 drivers/net/wireless/ath/ath10k/mac.c 	struct ath10k_peer *peer;
peer              521 drivers/net/wireless/ath/ath10k/mac.c 	list_for_each_entry(peer, &ar->peers, list) {
peer              522 drivers/net/wireless/ath/ath10k/mac.c 		if (ether_addr_equal(peer->addr, arvif->vif->addr))
peer              525 drivers/net/wireless/ath/ath10k/mac.c 		if (ether_addr_equal(peer->addr, arvif->bssid))
peer              528 drivers/net/wireless/ath/ath10k/mac.c 		if (peer->keys[key->keyidx] == key)
peer              534 drivers/net/wireless/ath/ath10k/mac.c 		ret = ath10k_install_peer_wep_keys(arvif, peer->addr);
peer              537 drivers/net/wireless/ath/ath10k/mac.c 				    arvif->vdev_id, peer->addr, ret);
peer              724 drivers/net/wireless/ath/ath10k/mac.c 	struct ath10k_peer *peer;
peer              755 drivers/net/wireless/ath/ath10k/mac.c 	peer = ath10k_peer_find(ar, vdev_id, addr);
peer              756 drivers/net/wireless/ath/ath10k/mac.c 	if (!peer) {
peer              764 drivers/net/wireless/ath/ath10k/mac.c 	peer->vif = vif;
peer              765 drivers/net/wireless/ath/ath10k/mac.c 	peer->sta = sta;
peer              861 drivers/net/wireless/ath/ath10k/mac.c 	struct ath10k_peer *peer, *tmp;
peer              868 drivers/net/wireless/ath/ath10k/mac.c 	list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
peer              869 drivers/net/wireless/ath/ath10k/mac.c 		if (peer->vdev_id != vdev_id)
peer              873 drivers/net/wireless/ath/ath10k/mac.c 			    peer->addr, vdev_id);
peer              875 drivers/net/wireless/ath/ath10k/mac.c 		for_each_set_bit(peer_id, peer->peer_ids,
peer              884 drivers/net/wireless/ath/ath10k/mac.c 			if (ar->peer_map[i] == peer) {
peer              886 drivers/net/wireless/ath/ath10k/mac.c 					    peer->addr, peer, i);
peer              891 drivers/net/wireless/ath/ath10k/mac.c 		list_del(&peer->list);
peer              892 drivers/net/wireless/ath/ath10k/mac.c 		kfree(peer);
peer              900 drivers/net/wireless/ath/ath10k/mac.c 	struct ath10k_peer *peer, *tmp;
peer              906 drivers/net/wireless/ath/ath10k/mac.c 	list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
peer              907 drivers/net/wireless/ath/ath10k/mac.c 		list_del(&peer->list);
peer              908 drivers/net/wireless/ath/ath10k/mac.c 		kfree(peer);
peer             3777 drivers/net/wireless/ath/ath10k/mac.c 	struct ath10k_peer *peer;
peer             3814 drivers/net/wireless/ath/ath10k/mac.c 		peer = ath10k_peer_find(ar, vdev_id, peer_addr);
peer             3817 drivers/net/wireless/ath/ath10k/mac.c 		if (peer)
peer             3822 drivers/net/wireless/ath/ath10k/mac.c 		if (!peer) {
peer             3866 drivers/net/wireless/ath/ath10k/mac.c 		if (!peer && tmp_peer_created) {
peer             3962 drivers/net/wireless/ath/ath10k/mac.c 	struct ath10k_peer *peer;
peer             3966 drivers/net/wireless/ath/ath10k/mac.c 	peer = ar->peer_map[peer_id];
peer             3967 drivers/net/wireless/ath/ath10k/mac.c 	if (!peer)
peer             3970 drivers/net/wireless/ath/ath10k/mac.c 	if (peer->removed)
peer             3973 drivers/net/wireless/ath/ath10k/mac.c 	if (peer->sta)
peer             3974 drivers/net/wireless/ath/ath10k/mac.c 		return peer->sta->txq[tid];
peer             3975 drivers/net/wireless/ath/ath10k/mac.c 	else if (peer->vif)
peer             3976 drivers/net/wireless/ath/ath10k/mac.c 		return peer->vif->txq;
peer             5137 drivers/net/wireless/ath/ath10k/mac.c 	struct ath10k_peer *peer;
peer             5357 drivers/net/wireless/ath/ath10k/mac.c 		peer = ath10k_peer_find(ar, arvif->vdev_id, vif->addr);
peer             5358 drivers/net/wireless/ath/ath10k/mac.c 		if (!peer) {
peer             5366 drivers/net/wireless/ath/ath10k/mac.c 		arvif->peer_id = find_first_bit(peer->peer_ids,
peer             5498 drivers/net/wireless/ath/ath10k/mac.c 	struct ath10k_peer *peer;
peer             5566 drivers/net/wireless/ath/ath10k/mac.c 		peer = ar->peer_map[i];
peer             5567 drivers/net/wireless/ath/ath10k/mac.c 		if (!peer)
peer             5570 drivers/net/wireless/ath/ath10k/mac.c 		if (peer->vif == vif) {
peer             5573 drivers/net/wireless/ath/ath10k/mac.c 			peer->vif = NULL;
peer             6111 drivers/net/wireless/ath/ath10k/mac.c 	struct ath10k_peer *peer;
peer             6155 drivers/net/wireless/ath/ath10k/mac.c 	peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
peer             6158 drivers/net/wireless/ath/ath10k/mac.c 	if (!peer) {
peer             6232 drivers/net/wireless/ath/ath10k/mac.c 	peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
peer             6233 drivers/net/wireless/ath/ath10k/mac.c 	if (peer && cmd == SET_KEY)
peer             6234 drivers/net/wireless/ath/ath10k/mac.c 		peer->keys[key->keyidx] = key;
peer             6235 drivers/net/wireless/ath/ath10k/mac.c 	else if (peer && cmd == DISABLE_KEY)
peer             6236 drivers/net/wireless/ath/ath10k/mac.c 		peer->keys[key->keyidx] = NULL;
peer             6237 drivers/net/wireless/ath/ath10k/mac.c 	else if (peer == NULL)
peer             6460 drivers/net/wireless/ath/ath10k/mac.c 	struct ath10k_peer *peer;
peer             6537 drivers/net/wireless/ath/ath10k/mac.c 		peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr);
peer             6538 drivers/net/wireless/ath/ath10k/mac.c 		if (!peer) {
peer             6549 drivers/net/wireless/ath/ath10k/mac.c 		arsta->peer_id = find_first_bit(peer->peer_ids,
peer             6612 drivers/net/wireless/ath/ath10k/mac.c 			peer = ar->peer_map[i];
peer             6613 drivers/net/wireless/ath/ath10k/mac.c 			if (!peer)
peer             6616 drivers/net/wireless/ath/ath10k/mac.c 			if (peer->sta == sta) {
peer             6618 drivers/net/wireless/ath/ath10k/mac.c 					    sta->addr, peer, i, arvif->vdev_id);
peer             6619 drivers/net/wireless/ath/ath10k/mac.c 				peer->sta = NULL;
peer             6624 drivers/net/wireless/ath/ath10k/mac.c 				list_del(&peer->list);
peer             6626 drivers/net/wireless/ath/ath10k/mac.c 				kfree(peer);
peer             7588 drivers/net/wireless/ath/ath10k/mac.c 	struct ath10k_peer *peer;
peer             7593 drivers/net/wireless/ath/ath10k/mac.c 	peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr);
peer             7594 drivers/net/wireless/ath/ath10k/mac.c 	if (!peer) {
peer             8146 drivers/net/wireless/ath/ath10k/mac.c 	struct ath10k_peer *peer;
peer             8150 drivers/net/wireless/ath/ath10k/mac.c 	list_for_each_entry(peer, &ar->peers, list)
peer             8151 drivers/net/wireless/ath/ath10k/mac.c 		if (peer->sta == sta)
peer             8152 drivers/net/wireless/ath/ath10k/mac.c 			peer->removed = true;
peer              135 drivers/net/wireless/ath/ath10k/txrx.c 	struct ath10k_peer *peer;
peer              139 drivers/net/wireless/ath/ath10k/txrx.c 	list_for_each_entry(peer, &ar->peers, list) {
peer              140 drivers/net/wireless/ath/ath10k/txrx.c 		if (peer->vdev_id != vdev_id)
peer              142 drivers/net/wireless/ath/ath10k/txrx.c 		if (!ether_addr_equal(peer->addr, addr))
peer              145 drivers/net/wireless/ath/ath10k/txrx.c 		return peer;
peer              153 drivers/net/wireless/ath/ath10k/txrx.c 	struct ath10k_peer *peer;
peer              155 drivers/net/wireless/ath/ath10k/txrx.c 	if (peer_id >= BITS_PER_TYPE(peer->peer_ids))
peer              160 drivers/net/wireless/ath/ath10k/txrx.c 	list_for_each_entry(peer, &ar->peers, list)
peer              161 drivers/net/wireless/ath/ath10k/txrx.c 		if (test_bit(peer_id, peer->peer_ids))
peer              162 drivers/net/wireless/ath/ath10k/txrx.c 			return peer;
peer              203 drivers/net/wireless/ath/ath10k/txrx.c 	struct ath10k_peer *peer;
peer              213 drivers/net/wireless/ath/ath10k/txrx.c 	peer = ath10k_peer_find(ar, ev->vdev_id, ev->addr);
peer              214 drivers/net/wireless/ath/ath10k/txrx.c 	if (!peer) {
peer              215 drivers/net/wireless/ath/ath10k/txrx.c 		peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
peer              216 drivers/net/wireless/ath/ath10k/txrx.c 		if (!peer)
peer              219 drivers/net/wireless/ath/ath10k/txrx.c 		peer->vdev_id = ev->vdev_id;
peer              220 drivers/net/wireless/ath/ath10k/txrx.c 		ether_addr_copy(peer->addr, ev->addr);
peer              221 drivers/net/wireless/ath/ath10k/txrx.c 		list_add(&peer->list, &ar->peers);
peer              228 drivers/net/wireless/ath/ath10k/txrx.c 	WARN_ON(ar->peer_map[ev->peer_id] && (ar->peer_map[ev->peer_id] != peer));
peer              229 drivers/net/wireless/ath/ath10k/txrx.c 	ar->peer_map[ev->peer_id] = peer;
peer              230 drivers/net/wireless/ath/ath10k/txrx.c 	set_bit(ev->peer_id, peer->peer_ids);
peer              239 drivers/net/wireless/ath/ath10k/txrx.c 	struct ath10k_peer *peer;
peer              249 drivers/net/wireless/ath/ath10k/txrx.c 	peer = ath10k_peer_find_by_id(ar, ev->peer_id);
peer              250 drivers/net/wireless/ath/ath10k/txrx.c 	if (!peer) {
peer              257 drivers/net/wireless/ath/ath10k/txrx.c 		   peer->vdev_id, peer->addr, ev->peer_id);
peer              260 drivers/net/wireless/ath/ath10k/txrx.c 	clear_bit(ev->peer_id, peer->peer_ids);
peer              262 drivers/net/wireless/ath/ath10k/txrx.c 	if (bitmap_empty(peer->peer_ids, ATH10K_MAX_NUM_PEER_IDS)) {
peer              263 drivers/net/wireless/ath/ath10k/txrx.c 		list_del(&peer->list);
peer              264 drivers/net/wireless/ath/ath10k/txrx.c 		kfree(peer);
peer             5105 drivers/net/wireless/ath/ath10k/wmi.c 	struct ath10k_peer *peer;
peer             5124 drivers/net/wireless/ath/ath10k/wmi.c 	peer = ath10k_peer_find(ar, vdev_id, ev->peer_macaddr.addr);
peer             5127 drivers/net/wireless/ath/ath10k/wmi.c 	if (!peer) {
peer             8311 drivers/net/wireless/ath/ath10k/wmi.c ath10k_wmi_fw_peer_stats_fill(const struct ath10k_fw_stats_peer *peer,
peer             8318 drivers/net/wireless/ath/ath10k/wmi.c 			"Peer MAC address", peer->peer_macaddr);
peer             8320 drivers/net/wireless/ath/ath10k/wmi.c 			"Peer RSSI", peer->peer_rssi);
peer             8322 drivers/net/wireless/ath/ath10k/wmi.c 			"Peer TX rate", peer->peer_tx_rate);
peer             8324 drivers/net/wireless/ath/ath10k/wmi.c 			"Peer RX rate", peer->peer_rx_rate);
peer             8327 drivers/net/wireless/ath/ath10k/wmi.c 				"Peer RX duration", peer->rx_duration);
peer             8334 drivers/net/wireless/ath/ath10k/wmi.c ath10k_wmi_fw_extd_peer_stats_fill(const struct ath10k_fw_extd_stats_peer *peer,
peer             8341 drivers/net/wireless/ath/ath10k/wmi.c 			"Peer MAC address", peer->peer_macaddr);
peer             8343 drivers/net/wireless/ath/ath10k/wmi.c 			"Peer RX duration", peer->rx_duration);
peer             8354 drivers/net/wireless/ath/ath10k/wmi.c 	const struct ath10k_fw_stats_peer *peer;
peer             8390 drivers/net/wireless/ath/ath10k/wmi.c 	list_for_each_entry(peer, &fw_stats->peers, list) {
peer             8391 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_fw_peer_stats_fill(peer, buf, &len,
peer             8412 drivers/net/wireless/ath/ath10k/wmi.c 	const struct ath10k_fw_stats_peer *peer;
peer             8449 drivers/net/wireless/ath/ath10k/wmi.c 	list_for_each_entry(peer, &fw_stats->peers, list) {
peer             8450 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_fw_peer_stats_fill(peer, buf, &len,
peer             8559 drivers/net/wireless/ath/ath10k/wmi.c 	const struct ath10k_fw_stats_peer *peer;
peer             8622 drivers/net/wireless/ath/ath10k/wmi.c 	list_for_each_entry(peer, &fw_stats->peers, list) {
peer             8623 drivers/net/wireless/ath/ath10k/wmi.c 		ath10k_wmi_fw_peer_stats_fill(peer, buf, &len,
peer             4641 drivers/net/wireless/ath/ath10k/wmi.h 	struct wmi_pdev_stats_peer peer;
peer             4657 drivers/net/wireless/ath/ath10k/wmi.h 	struct wmi_pdev_stats_peer peer;
peer             4673 drivers/net/wireless/ath/ath10k/wmi.h 	struct wmi_pdev_stats_peer peer;
peer             2371 drivers/net/wireless/ath/wil6210/cfg80211.c 				     const u8 *peer, u64 *cookie)
peer             2376 drivers/net/wireless/ath/wil6210/cfg80211.c 	int cid = wil_find_cid(wil, vif->mid, peer);
peer             2379 drivers/net/wireless/ath/wil6210/cfg80211.c 		     peer, cid, vif->mid);
peer              389 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c 			     u8 peer[ETH_ALEN])
peer              395 drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcdc.c 			       u8 peer[ETH_ALEN])
peer             5128 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 				    struct net_device *ndev, const u8 *peer,
peer             5144 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	if (peer)
peer             5145 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 		memcpy(info.ea, peer, ETH_ALEN);
peer              432 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 				u8 peer[ETH_ALEN])
peer              448 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 		if (memcmp(search->mac, peer, ETH_ALEN) == 0) {
peer              458 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 		if ((sta || (memcmp(hash[i].mac, peer, ETH_ALEN) == 0)) &&
peer              481 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 				  u8 peer[ETH_ALEN])
peer              490 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 	memcpy(tdls_entry->mac, peer, ETH_ALEN);
peer              496 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 		if (memcmp(search->mac, peer, ETH_ALEN) == 0)
peer              500 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c 			if (memcmp(search->mac, peer, ETH_ALEN) == 0)
peer               69 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h 				u8 peer[ETH_ALEN]);
peer               71 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h 				  u8 peer[ETH_ALEN]);
peer              830 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c brcmf_msgbuf_delete_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN])
peer              834 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 	brcmf_flowring_delete_peer(msgbuf->flow, ifidx, peer);
peer              839 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c brcmf_msgbuf_add_tdls_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN])
peer              843 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c 	brcmf_flowring_add_tdls_peer(msgbuf->flow, ifidx, peer);
peer               32 drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h 			    u8 peer[ETH_ALEN]);
peer               34 drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h 			      u8 peer[ETH_ALEN]);
peer               94 drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h brcmf_proto_delete_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN])
peer               96 drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h 	drvr->proto->delete_peer(drvr, ifidx, peer);
peer               99 drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h brcmf_proto_add_tdls_peer(struct brcmf_pub *drvr, int ifidx, u8 peer[ETH_ALEN])
peer              101 drivers/net/wireless/broadcom/brcm80211/brcmfmac/proto.h 	drvr->proto->add_tdls_peer(drvr, ifidx, peer);
peer              927 drivers/net/wireless/intel/iwlwifi/fw/file.h 	u8 peer[ETH_ALEN];
peer              214 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 				      struct cfg80211_pmsr_request_peer *peer,
peer              218 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	u32 freq = peer->chandef.chan->center_freq;
peer              222 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	switch (peer->chandef.width) {
peer              237 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 			peer->chandef.width);
peer              241 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	*ctrl_ch_position = (peer->chandef.width > NL80211_CHAN_WIDTH_20) ?
peer              242 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 		iwl_mvm_get_ctrl_pos(&peer->chandef) : 0;
peer              249 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 			  struct cfg80211_pmsr_request_peer *peer,
peer              254 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	ret = iwl_mvm_ftm_target_chandef(mvm, peer, &target->channel_num,
peer              260 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	memcpy(target->bssid, peer->addr, ETH_ALEN);
peer              262 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 		cpu_to_le16(peer->ftm.burst_period);
peer              263 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	target->samples_per_burst = peer->ftm.ftms_per_burst;
peer              264 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	target->num_of_bursts = peer->ftm.num_bursts_exp;
peer              266 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	target->retries_per_sample = peer->ftm.ftmr_retries;
peer              267 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	target->asap_mode = peer->ftm.asap;
peer              270 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	if (peer->ftm.request_lci)
peer              272 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	if (peer->ftm.request_civicloc)
peer              284 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 				  struct cfg80211_pmsr_request_peer *peer,
peer              289 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	ret = iwl_mvm_ftm_target_chandef(mvm, peer, &target->channel_num,
peer              295 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	memcpy(target->bssid, peer->addr, ETH_ALEN);
peer              297 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 		cpu_to_le16(peer->ftm.burst_period);
peer              298 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	target->samples_per_burst = peer->ftm.ftms_per_burst;
peer              299 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	target->num_of_bursts = peer->ftm.num_bursts_exp;
peer              300 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	target->ftmr_max_retries = peer->ftm.ftmr_retries;
peer              303 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	if (peer->ftm.asap)
peer              306 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	if (peer->ftm.request_lci)
peer              309 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 	if (peer->ftm.request_civicloc)
peer              356 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
peer              359 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 			err = iwl_mvm_ftm_put_target(mvm, peer, &cmd.ap[i]);
peer              361 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 			err = iwl_mvm_ftm_put_target_v2(mvm, peer,
peer              408 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 		struct cfg80211_pmsr_request_peer *peer = &req->peers[i];
peer              410 drivers/net/wireless/intel/iwlwifi/mvm/ftm-initiator.c 		if (ether_addr_equal_unaligned(peer->addr, addr))
peer             1392 drivers/net/wireless/intel/iwlwifi/mvm/fw.c 	mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
peer             2957 drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c 	    memcmp(tdls_trig->peer, peer_addr, ETH_ALEN) != 0)
peer             1110 drivers/net/wireless/intel/iwlwifi/mvm/mvm.h 		} peer;
peer             1908 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 	if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
peer             1909 drivers/net/wireless/intel/iwlwifi/mvm/sta.c 		mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
peer              246 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 		mvm->tdls_cs.peer.sent_timestamp = iwl_mvm_get_systime(mvm);
peer              296 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 			  const u8 *peer, bool peer_initiator, u32 timestamp)
peer              308 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 			same_peer = ether_addr_equal(peer, sta->addr);
peer              335 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 		else if (timestamp <= mvm->tdls_cs.peer.sent_timestamp)
peer              370 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 			       type, mvm->tdls_cs.state, peer, same_peer,
peer              380 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 				   const u8 *peer, bool peer_initiator,
peer              399 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 	ret = iwl_mvm_tdls_check_action(mvm, type, peer, peer_initiator,
peer              415 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 	sta = ieee80211_find_sta(vif, peer);
peer              426 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 		    mvm->tdls_cs.peer.chandef.chan) {
peer              428 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 			chandef = &mvm->tdls_cs.peer.chandef;
peer              518 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 	if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA)
peer              522 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 				mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
peer              533 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 						 mvm->tdls_cs.peer.initiator,
peer              534 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 						 mvm->tdls_cs.peer.op_class,
peer              535 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 						 &mvm->tdls_cs.peer.chandef,
peer              537 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 						 mvm->tdls_cs.peer.skb,
peer              538 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 						 mvm->tdls_cs.peer.ch_sw_tm_ie);
peer              567 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 	if (mvm->tdls_cs.peer.sta_id != IWL_MVM_INVALID_STA) {
peer              587 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 	mvm->tdls_cs.peer.skb = skb_copy(tmpl_skb, GFP_KERNEL);
peer              588 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 	if (!mvm->tdls_cs.peer.skb) {
peer              594 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 	mvm->tdls_cs.peer.sta_id = mvmsta->sta_id;
peer              595 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 	mvm->tdls_cs.peer.chandef = *chandef;
peer              596 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 	mvm->tdls_cs.peer.initiator = sta->tdls_initiator;
peer              597 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 	mvm->tdls_cs.peer.op_class = oper_class;
peer              598 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 	mvm->tdls_cs.peer.ch_sw_tm_ie = ch_sw_tm_ie;
peer              627 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 	if (mvm->tdls_cs.peer.sta_id == IWL_MVM_INVALID_STA) {
peer              633 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 				mvm->fw_id_to_mac_id[mvm->tdls_cs.peer.sta_id],
peer              644 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 	if (mvm->tdls_cs.cur_sta_id == mvm->tdls_cs.peer.sta_id &&
peer              648 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 	mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
peer              649 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 	dev_kfree_skb(mvm->tdls_cs.peer.skb);
peer              650 drivers/net/wireless/intel/iwlwifi/mvm/tdls.c 	mvm->tdls_cs.peer.skb = NULL;
peer             1701 drivers/net/wireless/marvell/mwifiex/cfg80211.c 				const u8 *peer,
peer             3727 drivers/net/wireless/marvell/mwifiex/cfg80211.c 			   const u8 *peer, u8 action_code, u8 dialog_token,
peer             3746 drivers/net/wireless/marvell/mwifiex/cfg80211.c 			    peer, status_code);
peer             3747 drivers/net/wireless/marvell/mwifiex/cfg80211.c 		mwifiex_add_auto_tdls_peer(priv, peer);
peer             3748 drivers/net/wireless/marvell/mwifiex/cfg80211.c 		ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
peer             3753 drivers/net/wireless/marvell/mwifiex/cfg80211.c 		mwifiex_add_auto_tdls_peer(priv, peer);
peer             3756 drivers/net/wireless/marvell/mwifiex/cfg80211.c 			    peer, status_code);
peer             3757 drivers/net/wireless/marvell/mwifiex/cfg80211.c 		ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
peer             3763 drivers/net/wireless/marvell/mwifiex/cfg80211.c 			    "Send TDLS Confirm to %pM status_code=%d\n", peer,
peer             3765 drivers/net/wireless/marvell/mwifiex/cfg80211.c 		ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
peer             3771 drivers/net/wireless/marvell/mwifiex/cfg80211.c 			    "Send TDLS Tear down to %pM\n", peer);
peer             3772 drivers/net/wireless/marvell/mwifiex/cfg80211.c 		ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
peer             3778 drivers/net/wireless/marvell/mwifiex/cfg80211.c 			    "Send TDLS Discovery Request to %pM\n", peer);
peer             3779 drivers/net/wireless/marvell/mwifiex/cfg80211.c 		ret = mwifiex_send_tdls_data_frame(priv, peer, action_code,
peer             3785 drivers/net/wireless/marvell/mwifiex/cfg80211.c 			    "Send TDLS Discovery Response to %pM\n", peer);
peer             3786 drivers/net/wireless/marvell/mwifiex/cfg80211.c 		ret = mwifiex_send_tdls_action_frame(priv, peer, action_code,
peer             3792 drivers/net/wireless/marvell/mwifiex/cfg80211.c 			    "Unknown TDLS mgmt/action frame %pM\n", peer);
peer             3802 drivers/net/wireless/marvell/mwifiex/cfg80211.c 			   const u8 *peer, enum nl80211_tdls_operation action)
peer             3815 drivers/net/wireless/marvell/mwifiex/cfg80211.c 		    "TDLS peer=%pM, oper=%d\n", peer, action);
peer             3845 drivers/net/wireless/marvell/mwifiex/cfg80211.c 	return mwifiex_tdls_oper(priv, peer, action);
peer             1615 drivers/net/wireless/marvell/mwifiex/main.h int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer,
peer             1619 drivers/net/wireless/marvell/mwifiex/main.h int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv, const u8 *peer,
peer             1625 drivers/net/wireless/marvell/mwifiex/main.h int mwifiex_tdls_oper(struct mwifiex_private *priv, const u8 *peer, u8 action);
peer              464 drivers/net/wireless/marvell/mwifiex/tdls.c 					const u8 *peer, u8 action_code,
peer              477 drivers/net/wireless/marvell/mwifiex/tdls.c 	memcpy(tf->da, peer, ETH_ALEN);
peer              571 drivers/net/wireless/marvell/mwifiex/tdls.c 			ret = mwifiex_tdls_add_vht_oper(priv, peer, skb);
peer              576 drivers/net/wireless/marvell/mwifiex/tdls.c 			ret = mwifiex_tdls_add_ht_oper(priv, peer, 1, skb);
peer              582 drivers/net/wireless/marvell/mwifiex/tdls.c 			ret = mwifiex_tdls_add_ht_oper(priv, peer, 0, skb);
peer              613 drivers/net/wireless/marvell/mwifiex/tdls.c 			 const u8 *peer, const u8 *bssid)
peer              624 drivers/net/wireless/marvell/mwifiex/tdls.c 	memcpy(lnkid->resp_sta, peer, ETH_ALEN);
peer              627 drivers/net/wireless/marvell/mwifiex/tdls.c int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer,
peer              672 drivers/net/wireless/marvell/mwifiex/tdls.c 		ret = mwifiex_prep_tdls_encap_data(priv, peer, action_code,
peer              681 drivers/net/wireless/marvell/mwifiex/tdls.c 		mwifiex_tdls_add_link_ie(skb, priv->curr_addr, peer,
peer              685 drivers/net/wireless/marvell/mwifiex/tdls.c 		ret = mwifiex_prep_tdls_encap_data(priv, peer, action_code,
peer              694 drivers/net/wireless/marvell/mwifiex/tdls.c 		mwifiex_tdls_add_link_ie(skb, peer, priv->curr_addr,
peer              729 drivers/net/wireless/marvell/mwifiex/tdls.c 				    const u8 *peer,
peer              744 drivers/net/wireless/marvell/mwifiex/tdls.c 	memcpy(mgmt->da, peer, ETH_ALEN);
peer              809 drivers/net/wireless/marvell/mwifiex/tdls.c int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv, const u8 *peer,
peer              855 drivers/net/wireless/marvell/mwifiex/tdls.c 	if (mwifiex_construct_tdls_action_frame(priv, peer, action_code,
peer              867 drivers/net/wireless/marvell/mwifiex/tdls.c 	mwifiex_tdls_add_link_ie(skb, peer, priv->curr_addr,
peer              894 drivers/net/wireless/marvell/mwifiex/tdls.c 	u8 *peer, *pos, *end;
peer              906 drivers/net/wireless/marvell/mwifiex/tdls.c 	peer = buf + ETH_ALEN;
peer              909 drivers/net/wireless/marvell/mwifiex/tdls.c 		    "rx:tdls action: peer=%pM, action=%d\n", peer, action);
peer              944 drivers/net/wireless/marvell/mwifiex/tdls.c 	sta_ptr = mwifiex_add_sta_entry(priv, peer);
peer             1057 drivers/net/wireless/marvell/mwifiex/tdls.c mwifiex_tdls_process_config_link(struct mwifiex_private *priv, const u8 *peer)
peer             1063 drivers/net/wireless/marvell/mwifiex/tdls.c 	sta_ptr = mwifiex_get_sta_entry(priv, peer);
peer             1067 drivers/net/wireless/marvell/mwifiex/tdls.c 			    "link absent for peer %pM; cannot config\n", peer);
peer             1071 drivers/net/wireless/marvell/mwifiex/tdls.c 	memcpy(&tdls_oper.peer_mac, peer, ETH_ALEN);
peer             1078 drivers/net/wireless/marvell/mwifiex/tdls.c mwifiex_tdls_process_create_link(struct mwifiex_private *priv, const u8 *peer)
peer             1084 drivers/net/wireless/marvell/mwifiex/tdls.c 	sta_ptr = mwifiex_get_sta_entry(priv, peer);
peer             1088 drivers/net/wireless/marvell/mwifiex/tdls.c 			    "Setup already in progress for peer %pM\n", peer);
peer             1092 drivers/net/wireless/marvell/mwifiex/tdls.c 	sta_ptr = mwifiex_add_sta_entry(priv, peer);
peer             1097 drivers/net/wireless/marvell/mwifiex/tdls.c 	mwifiex_hold_tdls_packets(priv, peer);
peer             1098 drivers/net/wireless/marvell/mwifiex/tdls.c 	memcpy(&tdls_oper.peer_mac, peer, ETH_ALEN);
peer             1105 drivers/net/wireless/marvell/mwifiex/tdls.c mwifiex_tdls_process_disable_link(struct mwifiex_private *priv, const u8 *peer)
peer             1111 drivers/net/wireless/marvell/mwifiex/tdls.c 	sta_ptr = mwifiex_get_sta_entry(priv, peer);
peer             1120 drivers/net/wireless/marvell/mwifiex/tdls.c 		mwifiex_del_sta_entry(priv, peer);
peer             1123 drivers/net/wireless/marvell/mwifiex/tdls.c 	mwifiex_restore_tdls_packets(priv, peer, TDLS_LINK_TEARDOWN);
peer             1124 drivers/net/wireless/marvell/mwifiex/tdls.c 	mwifiex_auto_tdls_update_peer_status(priv, peer, TDLS_NOT_SETUP);
peer             1125 drivers/net/wireless/marvell/mwifiex/tdls.c 	memcpy(&tdls_oper.peer_mac, peer, ETH_ALEN);
peer             1132 drivers/net/wireless/marvell/mwifiex/tdls.c mwifiex_tdls_process_enable_link(struct mwifiex_private *priv, const u8 *peer)
peer             1138 drivers/net/wireless/marvell/mwifiex/tdls.c 	sta_ptr = mwifiex_get_sta_entry(priv, peer);
peer             1142 drivers/net/wireless/marvell/mwifiex/tdls.c 			    "tdls: enable link %pM success\n", peer);
peer             1172 drivers/net/wireless/marvell/mwifiex/tdls.c 		mwifiex_restore_tdls_packets(priv, peer, TDLS_SETUP_COMPLETE);
peer             1173 drivers/net/wireless/marvell/mwifiex/tdls.c 		mwifiex_auto_tdls_update_peer_status(priv, peer,
peer             1177 drivers/net/wireless/marvell/mwifiex/tdls.c 			    "tdls: enable link %pM failed\n", peer);
peer             1183 drivers/net/wireless/marvell/mwifiex/tdls.c 			mwifiex_del_sta_entry(priv, peer);
peer             1185 drivers/net/wireless/marvell/mwifiex/tdls.c 		mwifiex_restore_tdls_packets(priv, peer, TDLS_LINK_TEARDOWN);
peer             1186 drivers/net/wireless/marvell/mwifiex/tdls.c 		mwifiex_auto_tdls_update_peer_status(priv, peer,
peer             1195 drivers/net/wireless/marvell/mwifiex/tdls.c int mwifiex_tdls_oper(struct mwifiex_private *priv, const u8 *peer, u8 action)
peer             1199 drivers/net/wireless/marvell/mwifiex/tdls.c 		return mwifiex_tdls_process_enable_link(priv, peer);
peer             1201 drivers/net/wireless/marvell/mwifiex/tdls.c 		return mwifiex_tdls_process_disable_link(priv, peer);
peer             1203 drivers/net/wireless/marvell/mwifiex/tdls.c 		return mwifiex_tdls_process_create_link(priv, peer);
peer             1205 drivers/net/wireless/marvell/mwifiex/tdls.c 		return mwifiex_tdls_process_config_link(priv, peer);
peer             1225 drivers/net/wireless/marvell/mwifiex/tdls.c 	struct tdls_peer_info *peer = buf;
peer             1238 drivers/net/wireless/marvell/mwifiex/tdls.c 			ether_addr_copy(peer->peer_addr, sta_ptr->mac_addr);
peer             1239 drivers/net/wireless/marvell/mwifiex/tdls.c 			peer++;
peer             1284 drivers/net/wireless/marvell/mwifiex/tdls.c 	struct mwifiex_auto_tdls_peer *peer;
peer             1290 drivers/net/wireless/marvell/mwifiex/tdls.c 	list_for_each_entry(peer, &priv->auto_tdls_list, list) {
peer             1291 drivers/net/wireless/marvell/mwifiex/tdls.c 		if (!memcmp(mac, peer->mac_addr, ETH_ALEN)) {
peer             1292 drivers/net/wireless/marvell/mwifiex/tdls.c 			if (peer->rssi <= MWIFIEX_TDLS_RSSI_HIGH &&
peer             1293 drivers/net/wireless/marvell/mwifiex/tdls.c 			    peer->tdls_status == TDLS_NOT_SETUP &&
peer             1294 drivers/net/wireless/marvell/mwifiex/tdls.c 			    (peer->failure_count <
peer             1296 drivers/net/wireless/marvell/mwifiex/tdls.c 				peer->tdls_status = TDLS_SETUP_INPROGRESS;
peer             1299 drivers/net/wireless/marvell/mwifiex/tdls.c 					    peer->mac_addr, peer->rssi);
peer             1302 drivers/net/wireless/marvell/mwifiex/tdls.c 							   peer->mac_addr,
peer             1305 drivers/net/wireless/marvell/mwifiex/tdls.c 				peer->do_setup = false;
peer             1307 drivers/net/wireless/marvell/mwifiex/tdls.c 			} else if (peer->failure_count <
peer             1309 drivers/net/wireless/marvell/mwifiex/tdls.c 				   peer->do_discover) {
peer             1311 drivers/net/wireless/marvell/mwifiex/tdls.c 							     peer->mac_addr,
peer             1314 drivers/net/wireless/marvell/mwifiex/tdls.c 				peer->do_discover = false;
peer             1325 drivers/net/wireless/marvell/mwifiex/tdls.c 	struct mwifiex_auto_tdls_peer *peer, *tmp_node;
peer             1328 drivers/net/wireless/marvell/mwifiex/tdls.c 	list_for_each_entry_safe(peer, tmp_node, &priv->auto_tdls_list, list) {
peer             1329 drivers/net/wireless/marvell/mwifiex/tdls.c 		list_del(&peer->list);
peer             1330 drivers/net/wireless/marvell/mwifiex/tdls.c 		kfree(peer);
peer             1373 drivers/net/wireless/marvell/mwifiex/tdls.c 	struct mwifiex_auto_tdls_peer *peer;
peer             1379 drivers/net/wireless/marvell/mwifiex/tdls.c 	list_for_each_entry(peer, &priv->auto_tdls_list, list) {
peer             1380 drivers/net/wireless/marvell/mwifiex/tdls.c 		if (!memcmp(peer->mac_addr, mac, ETH_ALEN)) {
peer             1382 drivers/net/wireless/marvell/mwifiex/tdls.c 			    (peer->tdls_status == TDLS_SETUP_INPROGRESS))
peer             1383 drivers/net/wireless/marvell/mwifiex/tdls.c 				peer->failure_count++;
peer             1385 drivers/net/wireless/marvell/mwifiex/tdls.c 				peer->failure_count = 0;
peer             1387 drivers/net/wireless/marvell/mwifiex/tdls.c 			peer->tdls_status = link_status;
peer             1397 drivers/net/wireless/marvell/mwifiex/tdls.c 	struct mwifiex_auto_tdls_peer *peer;
peer             1403 drivers/net/wireless/marvell/mwifiex/tdls.c 	list_for_each_entry(peer, &priv->auto_tdls_list, list) {
peer             1404 drivers/net/wireless/marvell/mwifiex/tdls.c 		if (!memcmp(peer->mac_addr, mac, ETH_ALEN)) {
peer             1405 drivers/net/wireless/marvell/mwifiex/tdls.c 			peer->rssi = nflr - snr;
peer             1406 drivers/net/wireless/marvell/mwifiex/tdls.c 			peer->rssi_jiffies = jiffies;
peer              719 drivers/net/wireless/st/cw1200/sta.c 				memcpy(wsm_key->wep_pairwise.peer,
peer              736 drivers/net/wireless/st/cw1200/sta.c 				memcpy(wsm_key->tkip_pairwise.peer,
peer              770 drivers/net/wireless/st/cw1200/sta.c 				memcpy(wsm_key->aes_pairwise.peer,
peer              793 drivers/net/wireless/st/cw1200/sta.c 				memcpy(wsm_key->wapi_pairwise.peer,
peer             1015 drivers/net/wireless/st/cw1200/wsm.h 			u8 peer[6];	/* MAC address of the peer station */
peer             1027 drivers/net/wireless/st/cw1200/wsm.h 			u8 peer[6];	/* MAC address of the peer station */
peer             1041 drivers/net/wireless/st/cw1200/wsm.h 			u8 peer[6];	/* MAC address of the peer station */
peer             1052 drivers/net/wireless/st/cw1200/wsm.h 			u8 peer[6];	/* MAC address of the peer station */
peer             1129 drivers/ntb/hw/idt/ntb_hw_idt.c 	struct idt_ntb_peer *peer;
peer             1142 drivers/ntb/hw/idt/ntb_hw_idt.c 		peer = &ndev->peers[pidx];
peer             1143 drivers/ntb/hw/idt/ntb_hw_idt.c 		peer->mws = idt_scan_mws(ndev, peer->port, &peer->mw_cnt);
peer             1144 drivers/ntb/hw/idt/ntb_hw_idt.c 		if (IS_ERR(peer->mws)) {
peer             1146 drivers/ntb/hw/idt/ntb_hw_idt.c 				"Failed to scan mws of port %hhu", peer->port);
peer             1147 drivers/ntb/hw/idt/ntb_hw_idt.c 			return PTR_ERR(peer->mws);
peer             1200 drivers/ntb/hw/idt/ntb_hw_idt.c 	struct idt_ntb_peer *peer;
peer             1205 drivers/ntb/hw/idt/ntb_hw_idt.c 	peer = &ndev->peers[pidx];
peer             1207 drivers/ntb/hw/idt/ntb_hw_idt.c 	if (widx < 0 || peer->mw_cnt <= widx)
peer             1211 drivers/ntb/hw/idt/ntb_hw_idt.c 		*addr_align = peer->mws[widx].addr_align;
peer             1214 drivers/ntb/hw/idt/ntb_hw_idt.c 		*size_align = peer->mws[widx].size_align;
peer             1217 drivers/ntb/hw/idt/ntb_hw_idt.c 		*size_max = peer->mws[widx].size_max;
peer              496 drivers/ntb/hw/mscc/ntb_hw_switchtec.c 		u64 peer = ioread64(&sndev->peer_shared->magic);
peer              498 drivers/ntb/hw/mscc/ntb_hw_switchtec.c 		if ((peer & 0xFFFFFFFF) == SWITCHTEC_NTB_MAGIC)
peer              499 drivers/ntb/hw/mscc/ntb_hw_switchtec.c 			link_sta = peer >> 32;
peer              101 drivers/ntb/msi.c 	int peer, peer_widx;
peer              114 drivers/ntb/msi.c 	for (peer = 0; peer < ntb_peer_port_count(ntb); peer++) {
peer              115 drivers/ntb/msi.c 		peer_widx = ntb_peer_highest_mw_idx(ntb, peer);
peer              119 drivers/ntb/msi.c 		ret = ntb_mw_get_align(ntb, peer, peer_widx, &addr_align,
peer              127 drivers/ntb/msi.c 	for (peer = 0; peer < ntb_peer_port_count(ntb); peer++) {
peer              128 drivers/ntb/msi.c 		peer_widx = ntb_peer_highest_mw_idx(ntb, peer);
peer              134 drivers/ntb/msi.c 		ret = ntb_mw_get_align(ntb, peer, peer_widx, NULL,
peer              144 drivers/ntb/msi.c 		ret = ntb_mw_set_trans(ntb, peer, peer_widx,
peer              156 drivers/ntb/msi.c 	for (i = 0; i < peer; i++) {
peer              157 drivers/ntb/msi.c 		peer_widx = ntb_peer_highest_mw_idx(ntb, peer);
peer              176 drivers/ntb/msi.c 	int peer;
peer              179 drivers/ntb/msi.c 	for (peer = 0; peer < ntb_peer_port_count(ntb); peer++) {
peer              180 drivers/ntb/msi.c 		peer_widx = ntb_peer_highest_mw_idx(ntb, peer);
peer              184 drivers/ntb/msi.c 		ntb_mw_clear_trans(ntb, peer, peer_widx);
peer              363 drivers/ntb/msi.c int ntb_msi_peer_trigger(struct ntb_dev *ntb, int peer,
peer              371 drivers/ntb/msi.c 	idx = desc->addr_offset / sizeof(*ntb->msi->peer_mws[peer]);
peer              373 drivers/ntb/msi.c 	iowrite32(desc->data, &ntb->msi->peer_mws[peer][idx]);
peer              393 drivers/ntb/msi.c int ntb_msi_peer_addr(struct ntb_dev *ntb, int peer,
peer              397 drivers/ntb/msi.c 	int peer_widx = ntb_peer_mw_count(ntb) - 1 - peer;
peer              131 drivers/ntb/test/ntb_msi_test.c static void ntb_msit_copy_peer_desc(struct ntb_msit_ctx *nm, int peer)
peer              134 drivers/ntb/test/ntb_msi_test.c 	struct ntb_msi_desc *desc = nm->peers[peer].msi_desc;
peer              135 drivers/ntb/test/ntb_msi_test.c 	int irq_count = nm->peers[peer].num_irqs;
peer              138 drivers/ntb/test/ntb_msi_test.c 		desc[i].addr_offset = ntb_peer_spad_read(nm->ntb, peer,
peer              140 drivers/ntb/test/ntb_msi_test.c 		desc[i].data = ntb_peer_spad_read(nm->ntb, peer, 2 * i + 2);
peer              144 drivers/ntb/test/ntb_msi_test.c 		 irq_count, peer);
peer              146 drivers/ntb/test/ntb_msi_test.c 	complete_all(&nm->peers[peer].init_comp);
peer              155 drivers/ntb/test/ntb_msi_test.c 	int peer;
peer              159 drivers/ntb/test/ntb_msi_test.c 	for (peer = 0; peer < sizeof(peer_mask) * 8; peer++) {
peer              160 drivers/ntb/test/ntb_msi_test.c 		if (!(peer_mask & BIT(peer)))
peer              163 drivers/ntb/test/ntb_msi_test.c 		irq_count = ntb_peer_spad_read(nm->ntb, peer, 0);
peer              171 drivers/ntb/test/ntb_msi_test.c 		kfree(nm->peers[peer].msi_desc);
peer              172 drivers/ntb/test/ntb_msi_test.c 		nm->peers[peer].msi_desc = desc;
peer              173 drivers/ntb/test/ntb_msi_test.c 		nm->peers[peer].num_irqs = irq_count;
peer              175 drivers/ntb/test/ntb_msi_test.c 		ntb_msit_copy_peer_desc(nm, peer);
peer              186 drivers/ntb/test/ntb_msi_test.c 	struct ntb_msit_peer *peer = data;
peer              188 drivers/ntb/test/ntb_msi_test.c 	if (idx >= peer->num_irqs)
peer              191 drivers/ntb/test/ntb_msi_test.c 	dev_dbg(&peer->nm->ntb->dev, "trigger irq %llu on peer %u\n",
peer              192 drivers/ntb/test/ntb_msi_test.c 		idx, peer->pidx);
peer              194 drivers/ntb/test/ntb_msi_test.c 	return ntb_msi_peer_trigger(peer->nm->ntb, peer->pidx,
peer              195 drivers/ntb/test/ntb_msi_test.c 				    &peer->msi_desc[idx]);
peer              203 drivers/ntb/test/ntb_msi_test.c 	struct ntb_msit_peer *peer = data;
peer              205 drivers/ntb/test/ntb_msi_test.c 	*port = ntb_peer_port_number(peer->nm->ntb, peer->pidx);
peer              215 drivers/ntb/test/ntb_msi_test.c 	struct ntb_msit_peer *peer = data;
peer              217 drivers/ntb/test/ntb_msi_test.c 	*count = peer->num_irqs;
peer              227 drivers/ntb/test/ntb_msi_test.c 	struct ntb_msit_peer *peer = data;
peer              229 drivers/ntb/test/ntb_msi_test.c 	*ready = try_wait_for_completion(&peer->init_comp);
peer              236 drivers/ntb/test/ntb_msi_test.c 	struct ntb_msit_peer *peer = data;
peer              238 drivers/ntb/test/ntb_msi_test.c 	return wait_for_completion_interruptible(&peer->init_comp);
peer              201 drivers/ntb/test/ntb_perf.c 	int (*cmd_send)(struct perf_peer *peer, enum perf_cmd cmd, u64 data);
peer              246 drivers/ntb/test/ntb_perf.c static inline bool perf_link_is_up(struct perf_peer *peer)
peer              250 drivers/ntb/test/ntb_perf.c 	link = ntb_link_is_up(peer->perf->ntb, NULL, NULL);
peer              251 drivers/ntb/test/ntb_perf.c 	return !!(link & BIT_ULL_MASK(peer->pidx));
peer              254 drivers/ntb/test/ntb_perf.c static int perf_spad_cmd_send(struct perf_peer *peer, enum perf_cmd cmd,
peer              257 drivers/ntb/test/ntb_perf.c 	struct perf_ctx *perf = peer->perf;
peer              271 drivers/ntb/test/ntb_perf.c 		if (!perf_link_is_up(peer))
peer              274 drivers/ntb/test/ntb_perf.c 		sts = ntb_peer_spad_read(perf->ntb, peer->pidx,
peer              281 drivers/ntb/test/ntb_perf.c 		ntb_peer_spad_write(perf->ntb, peer->pidx,
peer              284 drivers/ntb/test/ntb_perf.c 		ntb_peer_spad_write(perf->ntb, peer->pidx,
peer              287 drivers/ntb/test/ntb_perf.c 		ntb_peer_spad_write(perf->ntb, peer->pidx,
peer              290 drivers/ntb/test/ntb_perf.c 		ntb_peer_db_set(perf->ntb, PERF_SPAD_NOTIFY(peer->gidx));
peer              293 drivers/ntb/test/ntb_perf.c 			PERF_SPAD_NOTIFY(peer->gidx));
peer              304 drivers/ntb/test/ntb_perf.c 	struct perf_peer *peer;
peer              316 drivers/ntb/test/ntb_perf.c 		peer = &perf->peers[*pidx];
peer              318 drivers/ntb/test/ntb_perf.c 		if (!perf_link_is_up(peer))
peer              321 drivers/ntb/test/ntb_perf.c 		val = ntb_spad_read(perf->ntb, PERF_SPAD_CMD(peer->gidx));
peer              327 drivers/ntb/test/ntb_perf.c 		val = ntb_spad_read(perf->ntb, PERF_SPAD_LDATA(peer->gidx));
peer              330 drivers/ntb/test/ntb_perf.c 		val = ntb_spad_read(perf->ntb, PERF_SPAD_HDATA(peer->gidx));
peer              334 drivers/ntb/test/ntb_perf.c 		ntb_spad_write(perf->ntb, PERF_SPAD_CMD(peer->gidx),
peer              345 drivers/ntb/test/ntb_perf.c static int perf_msg_cmd_send(struct perf_peer *peer, enum perf_cmd cmd,
peer              348 drivers/ntb/test/ntb_perf.c 	struct perf_ctx *perf = peer->perf;
peer              363 drivers/ntb/test/ntb_perf.c 		if (!perf_link_is_up(peer))
peer              370 drivers/ntb/test/ntb_perf.c 		ntb_peer_msg_write(perf->ntb, peer->pidx, PERF_MSG_LDATA,
peer              378 drivers/ntb/test/ntb_perf.c 		ntb_peer_msg_write(perf->ntb, peer->pidx, PERF_MSG_HDATA,
peer              382 drivers/ntb/test/ntb_perf.c 		ntb_peer_msg_write(perf->ntb, peer->pidx, PERF_MSG_CMD, cmd);
peer              418 drivers/ntb/test/ntb_perf.c static int perf_cmd_send(struct perf_peer *peer, enum perf_cmd cmd, u64 data)
peer              420 drivers/ntb/test/ntb_perf.c 	struct perf_ctx *perf = peer->perf;
peer              423 drivers/ntb/test/ntb_perf.c 		return perf->cmd_send(peer, cmd, data);
peer              429 drivers/ntb/test/ntb_perf.c static int perf_cmd_exec(struct perf_peer *peer, enum perf_cmd cmd)
peer              439 drivers/ntb/test/ntb_perf.c 		dev_err(&peer->perf->ntb->dev, "Exec invalid command\n");
peer              444 drivers/ntb/test/ntb_perf.c 	set_bit(cmd, &peer->sts);
peer              446 drivers/ntb/test/ntb_perf.c 	dev_dbg(&peer->perf->ntb->dev, "CMD exec: %d\n", cmd);
peer              448 drivers/ntb/test/ntb_perf.c 	(void)queue_work(system_highpri_wq, &peer->service);
peer              455 drivers/ntb/test/ntb_perf.c 	struct perf_peer *peer;
peer              460 drivers/ntb/test/ntb_perf.c 		peer = &perf->peers[pidx];
peer              464 drivers/ntb/test/ntb_perf.c 			peer->inbuf_size = data;
peer              465 drivers/ntb/test/ntb_perf.c 			return perf_cmd_exec(peer, PERF_CMD_RSIZE);
peer              467 drivers/ntb/test/ntb_perf.c 			peer->outbuf_xlat = data;
peer              468 drivers/ntb/test/ntb_perf.c 			return perf_cmd_exec(peer, PERF_CMD_RXLAT);
peer              482 drivers/ntb/test/ntb_perf.c 	struct perf_peer *peer;
peer              487 drivers/ntb/test/ntb_perf.c 		peer = &perf->peers[pidx];
peer              489 drivers/ntb/test/ntb_perf.c 		lnk_up = perf_link_is_up(peer);
peer              492 drivers/ntb/test/ntb_perf.c 		    !test_and_set_bit(PERF_STS_LNKUP, &peer->sts)) {
peer              493 drivers/ntb/test/ntb_perf.c 			perf_cmd_exec(peer, PERF_CMD_SSIZE);
peer              495 drivers/ntb/test/ntb_perf.c 			   test_and_clear_bit(PERF_STS_LNKUP, &peer->sts)) {
peer              496 drivers/ntb/test/ntb_perf.c 			perf_cmd_exec(peer, PERF_CMD_CLEAR);
peer              529 drivers/ntb/test/ntb_perf.c static void perf_free_outbuf(struct perf_peer *peer)
peer              531 drivers/ntb/test/ntb_perf.c 	(void)ntb_peer_mw_clear_trans(peer->perf->ntb, peer->pidx, peer->gidx);
peer              534 drivers/ntb/test/ntb_perf.c static int perf_setup_outbuf(struct perf_peer *peer)
peer              536 drivers/ntb/test/ntb_perf.c 	struct perf_ctx *perf = peer->perf;
peer              540 drivers/ntb/test/ntb_perf.c 	ret = ntb_peer_mw_set_trans(perf->ntb, peer->pidx, peer->gidx,
peer              541 drivers/ntb/test/ntb_perf.c 				    peer->outbuf_xlat, peer->outbuf_size);
peer              548 drivers/ntb/test/ntb_perf.c 	set_bit(PERF_STS_DONE, &peer->sts);
peer              553 drivers/ntb/test/ntb_perf.c static void perf_free_inbuf(struct perf_peer *peer)
peer              555 drivers/ntb/test/ntb_perf.c 	if (!peer->inbuf)
peer              558 drivers/ntb/test/ntb_perf.c 	(void)ntb_mw_clear_trans(peer->perf->ntb, peer->pidx, peer->gidx);
peer              559 drivers/ntb/test/ntb_perf.c 	dma_free_coherent(&peer->perf->ntb->dev, peer->inbuf_size,
peer              560 drivers/ntb/test/ntb_perf.c 			  peer->inbuf, peer->inbuf_xlat);
peer              561 drivers/ntb/test/ntb_perf.c 	peer->inbuf = NULL;
peer              564 drivers/ntb/test/ntb_perf.c static int perf_setup_inbuf(struct perf_peer *peer)
peer              567 drivers/ntb/test/ntb_perf.c 	struct perf_ctx *perf = peer->perf;
peer              571 drivers/ntb/test/ntb_perf.c 	ret = ntb_mw_get_align(perf->ntb, peer->pidx, perf->gidx,
peer              578 drivers/ntb/test/ntb_perf.c 	if (peer->inbuf_size > size_max) {
peer              580 drivers/ntb/test/ntb_perf.c 			&peer->inbuf_size, &size_max);
peer              584 drivers/ntb/test/ntb_perf.c 	peer->inbuf_size = round_up(peer->inbuf_size, size_align);
peer              586 drivers/ntb/test/ntb_perf.c 	perf_free_inbuf(peer);
peer              588 drivers/ntb/test/ntb_perf.c 	peer->inbuf = dma_alloc_coherent(&perf->ntb->dev, peer->inbuf_size,
peer              589 drivers/ntb/test/ntb_perf.c 					 &peer->inbuf_xlat, GFP_KERNEL);
peer              590 drivers/ntb/test/ntb_perf.c 	if (!peer->inbuf) {
peer              592 drivers/ntb/test/ntb_perf.c 			&peer->inbuf_size);
peer              595 drivers/ntb/test/ntb_perf.c 	if (!IS_ALIGNED(peer->inbuf_xlat, xlat_align)) {
peer              600 drivers/ntb/test/ntb_perf.c 	ret = ntb_mw_set_trans(perf->ntb, peer->pidx, peer->gidx,
peer              601 drivers/ntb/test/ntb_perf.c 			       peer->inbuf_xlat, peer->inbuf_size);
peer              612 drivers/ntb/test/ntb_perf.c 	(void)perf_cmd_exec(peer, PERF_CMD_SXLAT);
peer              617 drivers/ntb/test/ntb_perf.c 	perf_free_inbuf(peer);
peer              624 drivers/ntb/test/ntb_perf.c 	struct perf_peer *peer = to_peer_service(work);
peer              626 drivers/ntb/test/ntb_perf.c 	if (test_and_clear_bit(PERF_CMD_SSIZE, &peer->sts))
peer              627 drivers/ntb/test/ntb_perf.c 		perf_cmd_send(peer, PERF_CMD_SSIZE, peer->outbuf_size);
peer              629 drivers/ntb/test/ntb_perf.c 	if (test_and_clear_bit(PERF_CMD_RSIZE, &peer->sts))
peer              630 drivers/ntb/test/ntb_perf.c 		perf_setup_inbuf(peer);
peer              632 drivers/ntb/test/ntb_perf.c 	if (test_and_clear_bit(PERF_CMD_SXLAT, &peer->sts))
peer              633 drivers/ntb/test/ntb_perf.c 		perf_cmd_send(peer, PERF_CMD_SXLAT, peer->inbuf_xlat);
peer              635 drivers/ntb/test/ntb_perf.c 	if (test_and_clear_bit(PERF_CMD_RXLAT, &peer->sts))
peer              636 drivers/ntb/test/ntb_perf.c 		perf_setup_outbuf(peer);
peer              638 drivers/ntb/test/ntb_perf.c 	if (test_and_clear_bit(PERF_CMD_CLEAR, &peer->sts)) {
peer              639 drivers/ntb/test/ntb_perf.c 		clear_bit(PERF_STS_DONE, &peer->sts);
peer              640 drivers/ntb/test/ntb_perf.c 		if (test_bit(0, &peer->perf->busy_flag) &&
peer              641 drivers/ntb/test/ntb_perf.c 		    peer == peer->perf->test_peer) {
peer              642 drivers/ntb/test/ntb_perf.c 			dev_warn(&peer->perf->ntb->dev,
peer              644 drivers/ntb/test/ntb_perf.c 			perf_terminate_test(peer->perf);
peer              646 drivers/ntb/test/ntb_perf.c 		perf_free_outbuf(peer);
peer              647 drivers/ntb/test/ntb_perf.c 		perf_free_inbuf(peer);
peer              755 drivers/ntb/test/ntb_perf.c 		struct perf_peer *peer = &perf->peers[pidx];
peer              757 drivers/ntb/test/ntb_perf.c 		ntb_spad_write(perf->ntb, PERF_SPAD_CMD(peer->gidx), 0);
peer              898 drivers/ntb/test/ntb_perf.c 	struct perf_peer *peer = pthr->perf->test_peer;
peer              907 drivers/ntb/test/ntb_perf.c 	chunk_size = min_t(u64, peer->outbuf_size, chunk_size);
peer              910 drivers/ntb/test/ntb_perf.c 	bnd_dst = peer->outbuf + peer->outbuf_size;
peer              911 drivers/ntb/test/ntb_perf.c 	flt_dst = peer->outbuf;
peer              928 drivers/ntb/test/ntb_perf.c 		if (flt_dst >= bnd_dst || flt_dst < peer->outbuf) {
peer              929 drivers/ntb/test/ntb_perf.c 			flt_dst = peer->outbuf;
peer             1048 drivers/ntb/test/ntb_perf.c static int perf_submit_test(struct perf_peer *peer)
peer             1050 drivers/ntb/test/ntb_perf.c 	struct perf_ctx *perf = peer->perf;
peer             1054 drivers/ntb/test/ntb_perf.c 	if (!test_bit(PERF_STS_DONE, &peer->sts))
peer             1060 drivers/ntb/test/ntb_perf.c 	perf->test_peer = peer;
peer             1154 drivers/ntb/test/ntb_perf.c 	struct perf_peer *peer;
peer             1183 drivers/ntb/test/ntb_perf.c 		peer = &perf->peers[pidx];
peer             1187 drivers/ntb/test/ntb_perf.c 			ntb_peer_port_number(perf->ntb, peer->pidx), peer->pidx,
peer             1188 drivers/ntb/test/ntb_perf.c 			peer->gidx);
peer             1192 drivers/ntb/test/ntb_perf.c 			test_bit(PERF_STS_LNKUP, &peer->sts) ? "up" : "down");
peer             1195 drivers/ntb/test/ntb_perf.c 			"\tOut buffer addr 0x%pK\n", peer->outbuf);
peer             1198 drivers/ntb/test/ntb_perf.c 			"\tOut buffer size %pa\n", &peer->outbuf_size);
peer             1201 drivers/ntb/test/ntb_perf.c 			"\tOut buffer xlat 0x%016llx[p]\n", peer->outbuf_xlat);
peer             1203 drivers/ntb/test/ntb_perf.c 		if (!peer->inbuf) {
peer             1210 drivers/ntb/test/ntb_perf.c 			"\tIn buffer addr 0x%pK\n", peer->inbuf);
peer             1213 drivers/ntb/test/ntb_perf.c 			"\tIn buffer size %pa\n", &peer->inbuf_size);
peer             1216 drivers/ntb/test/ntb_perf.c 			"\tIn buffer xlat %pad[p]\n", &peer->inbuf_xlat);
peer             1256 drivers/ntb/test/ntb_perf.c 	struct perf_peer *peer;
peer             1266 drivers/ntb/test/ntb_perf.c 	peer = &perf->peers[pidx];
peer             1268 drivers/ntb/test/ntb_perf.c 	ret = perf_submit_test(peer);
peer             1374 drivers/ntb/test/ntb_perf.c static int perf_setup_peer_mw(struct perf_peer *peer)
peer             1376 drivers/ntb/test/ntb_perf.c 	struct perf_ctx *perf = peer->perf;
peer             1382 drivers/ntb/test/ntb_perf.c 				   &peer->outbuf_size);
peer             1386 drivers/ntb/test/ntb_perf.c 	peer->outbuf = devm_ioremap_wc(&perf->ntb->dev, phys_addr,
peer             1387 drivers/ntb/test/ntb_perf.c 					peer->outbuf_size);
peer             1388 drivers/ntb/test/ntb_perf.c 	if (!peer->outbuf)
peer             1391 drivers/ntb/test/ntb_perf.c 	if (max_mw_size && peer->outbuf_size > max_mw_size) {
peer             1392 drivers/ntb/test/ntb_perf.c 		peer->outbuf_size = max_mw_size;
peer             1393 drivers/ntb/test/ntb_perf.c 		dev_warn(&peer->perf->ntb->dev,
peer             1394 drivers/ntb/test/ntb_perf.c 			"Peer %d outbuf reduced to %pa\n", peer->pidx,
peer             1395 drivers/ntb/test/ntb_perf.c 			&peer->outbuf_size);
peer             1403 drivers/ntb/test/ntb_perf.c 	struct perf_peer *peer;
peer             1409 drivers/ntb/test/ntb_perf.c 		peer = &perf->peers[pidx];
peer             1411 drivers/ntb/test/ntb_perf.c 		peer->perf = perf;
peer             1412 drivers/ntb/test/ntb_perf.c 		peer->pidx = pidx;
peer             1416 drivers/ntb/test/ntb_perf.c 			peer->gidx = pidx + 1;
peer             1418 drivers/ntb/test/ntb_perf.c 			peer->gidx = pidx;
peer             1420 drivers/ntb/test/ntb_perf.c 		INIT_WORK(&peer->service, perf_service_work);
peer              429 drivers/ntb/test/ntb_tool.c 	struct tool_peer *peer = filep->private_data;
peer              430 drivers/ntb/test/ntb_tool.c 	struct tool_ctx *tc = peer->tc;
peer              435 drivers/ntb/test/ntb_tool.c 		ntb_peer_port_number(tc->ntb, peer->pidx));
peer              496 drivers/ntb/test/ntb_tool.c 	struct tool_peer *peer = filep->private_data;
peer              497 drivers/ntb/test/ntb_tool.c 	struct tool_ctx *tc = peer->tc;
peer              500 drivers/ntb/test/ntb_tool.c 	if (ntb_link_is_up(tc->ntb, NULL, NULL) & BIT(peer->pidx))
peer              518 drivers/ntb/test/ntb_tool.c 	struct tool_peer *peer = filep->private_data;
peer              519 drivers/ntb/test/ntb_tool.c 	struct tool_ctx *tc = peer->tc;
peer              528 drivers/ntb/test/ntb_tool.c 	link_msk = BIT_ULL_MASK(peer->pidx);
peer              943 drivers/rapidio/rio_cm.c 			    struct cm_peer *peer, u16 rem_ch)
peer              960 drivers/rapidio/rio_cm.c 	ch->rdev = peer->rdev;
peer              976 drivers/rapidio/rio_cm.c 	hdr->bhdr.dst_id = htonl(peer->rdev->destid);
peer              989 drivers/rapidio/rio_cm.c 	ret = riocm_post_send(cm, peer->rdev, hdr, sizeof(*hdr));
peer              994 drivers/rapidio/rio_cm.c 		ret = riocm_queue_req(cm, peer->rdev, hdr, sizeof(*hdr));
peer             1077 drivers/rapidio/rio_cm.c 	struct cm_peer *peer;
peer             1154 drivers/rapidio/rio_cm.c 	list_for_each_entry(peer, &new_ch->cmdev->peers, node) {
peer             1155 drivers/rapidio/rio_cm.c 		if (peer->rdev->destid == new_ch->rem_destid) {
peer             1157 drivers/rapidio/rio_cm.c 				    rio_name(peer->rdev));
peer             1170 drivers/rapidio/rio_cm.c 	new_ch->rdev = peer->rdev;
peer             1563 drivers/rapidio/rio_cm.c 	struct cm_peer *peer;
peer             1596 drivers/rapidio/rio_cm.c 	list_for_each_entry(peer, &cm->peers, node) {
peer             1597 drivers/rapidio/rio_cm.c 		*entry_ptr = (u32)peer->rdev->destid;
peer             1782 drivers/rapidio/rio_cm.c 	struct cm_peer *peer;
peer             1811 drivers/rapidio/rio_cm.c 	list_for_each_entry(peer, &cm->peers, node) {
peer             1812 drivers/rapidio/rio_cm.c 		if (peer->rdev->destid == chan.remote_destid) {
peer             1823 drivers/rapidio/rio_cm.c 	return riocm_ch_connect(chan.id, cm, peer, chan.remote_channel);
peer             1946 drivers/rapidio/rio_cm.c 	struct cm_peer *peer;
peer             1956 drivers/rapidio/rio_cm.c 	peer = kmalloc(sizeof(*peer), GFP_KERNEL);
peer             1957 drivers/rapidio/rio_cm.c 	if (!peer)
peer             1968 drivers/rapidio/rio_cm.c 	kfree(peer);
peer             1972 drivers/rapidio/rio_cm.c 	peer->rdev = rdev;
peer             1973 drivers/rapidio/rio_cm.c 	list_add_tail(&peer->node, &cm->peers);
peer             1992 drivers/rapidio/rio_cm.c 	struct cm_peer *peer;
peer             2020 drivers/rapidio/rio_cm.c 	list_for_each_entry(peer, &cm->peers, node) {
peer             2021 drivers/rapidio/rio_cm.c 		if (peer->rdev == rdev) {
peer             2024 drivers/rapidio/rio_cm.c 			list_del(&peer->node);
peer             2026 drivers/rapidio/rio_cm.c 			kfree(peer);
peer             2181 drivers/rapidio/rio_cm.c 	struct cm_peer *peer, *temp;
peer             2230 drivers/rapidio/rio_cm.c 	list_for_each_entry_safe(peer, temp, &cm->peers, node) {
peer             2231 drivers/rapidio/rio_cm.c 		riocm_debug(RDEV, "removing peer %s", rio_name(peer->rdev));
peer             2232 drivers/rapidio/rio_cm.c 		list_del(&peer->node);
peer             2233 drivers/rapidio/rio_cm.c 		kfree(peer);
peer               24 drivers/scsi/ufs/ufshcd-dwc.c 			ATTR_SET_NOR, v[attr_node].mib_val, v[attr_node].peer);
peer               16 drivers/scsi/ufs/ufshcd-dwc.h 	u8 peer;
peer             3631 drivers/scsi/ufs/ufshcd.c 			u8 attr_set, u32 mib_val, u8 peer)
peer             3638 drivers/scsi/ufs/ufshcd.c 	const char *set = action[!!peer];
peer             3642 drivers/scsi/ufs/ufshcd.c 	uic_cmd.command = peer ?
peer             3654 drivers/scsi/ufs/ufshcd.c 	} while (ret && peer && --retries);
peer             3675 drivers/scsi/ufs/ufshcd.c 			u32 *mib_val, u8 peer)
peer             3682 drivers/scsi/ufs/ufshcd.c 	const char *get = action[!!peer];
peer             3689 drivers/scsi/ufs/ufshcd.c 	if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)) {
peer             3711 drivers/scsi/ufs/ufshcd.c 	uic_cmd.command = peer ?
peer             3721 drivers/scsi/ufs/ufshcd.c 	} while (ret && peer && --retries);
peer             3731 drivers/scsi/ufs/ufshcd.c 	if (peer && (hba->quirks & UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE)
peer             4339 drivers/scsi/ufs/ufshcd.c static int ufshcd_disable_tx_lcc(struct ufs_hba *hba, bool peer)
peer             4343 drivers/scsi/ufs/ufshcd.c 	if (!peer)
peer             4350 drivers/scsi/ufs/ufshcd.c 		if (!peer)
peer             4362 drivers/scsi/ufs/ufshcd.c 				__func__, peer, i, err);
peer              850 drivers/scsi/ufs/ufshcd.h 			       u8 attr_set, u32 mib_val, u8 peer);
peer              852 drivers/scsi/ufs/ufshcd.h 			       u32 *mib_val, u8 peer);
peer               42 drivers/soc/qcom/rpmhpd.c 	struct rpmhpd	*peer;
peer               90 drivers/soc/qcom/rpmhpd.c 	.peer = &sdm845_mx_ao,
peer               97 drivers/soc/qcom/rpmhpd.c 	.peer = &sdm845_mx,
peer              104 drivers/soc/qcom/rpmhpd.c 	.peer = &sdm845_cx_ao,
peer              112 drivers/soc/qcom/rpmhpd.c 	.peer = &sdm845_cx,
peer              180 drivers/soc/qcom/rpmhpd.c 	struct rpmhpd *peer = pd->peer;
peer              187 drivers/soc/qcom/rpmhpd.c 	if (peer && peer->enabled)
peer              188 drivers/soc/qcom/rpmhpd.c 		to_active_sleep(peer, peer->corner, &peer_active_corner,
peer              200 drivers/soc/qcom/rpmhpd.c 	if (peer) {
peer              201 drivers/soc/qcom/rpmhpd.c 		peer->active_corner = active_corner;
peer               44 drivers/soc/qcom/rpmpd.c 		.peer = &_platform##_##_active,				\
peer               51 drivers/soc/qcom/rpmpd.c 		.peer = &_platform##_##_name,				\
peer               98 drivers/soc/qcom/rpmpd.c 	struct rpmpd *peer;
peer              245 drivers/soc/qcom/rpmpd.c 	struct rpmpd *peer = pd->peer;
peer              252 drivers/soc/qcom/rpmpd.c 	if (peer && peer->enabled)
peer              253 drivers/soc/qcom/rpmpd.c 		to_active_sleep(peer, peer->corner, &peer_active_corner,
peer              227 drivers/staging/fwserial/fwserial.c static int fwtty_send_data_async(struct fwtty_peer *peer, int tcode,
peer              242 drivers/staging/fwserial/fwserial.c 	generation = peer->generation;
peer              244 drivers/staging/fwserial/fwserial.c 	fw_send_request(peer->serial->card, &txn->fw_txn, tcode,
peer              245 drivers/staging/fwserial/fwserial.c 			peer->node_id, generation, peer->speed, addr, payload,
peer              250 drivers/staging/fwserial/fwserial.c static void fwtty_send_txn_async(struct fwtty_peer *peer,
peer              261 drivers/staging/fwserial/fwserial.c 	generation = peer->generation;
peer              263 drivers/staging/fwserial/fwserial.c 	fw_send_request(peer->serial->card, &txn->fw_txn, tcode,
peer              264 drivers/staging/fwserial/fwserial.c 			peer->node_id, generation, peer->speed, addr, payload,
peer              413 drivers/staging/fwserial/fwserial.c 	struct fwtty_peer *peer;
peer              418 drivers/staging/fwserial/fwserial.c 	peer = rcu_dereference(port->peer);
peer              419 drivers/staging/fwserial/fwserial.c 	if (peer) {
peer              420 drivers/staging/fwserial/fwserial.c 		err = fwtty_send_data_async(peer, TCODE_WRITE_QUADLET_REQUEST,
peer              421 drivers/staging/fwserial/fwserial.c 					    peer->status_addr, &status,
peer              607 drivers/staging/fwserial/fwserial.c 	struct fwtty_peer *peer;
peer              613 drivers/staging/fwserial/fwserial.c 	peer = __fwserial_peer_by_node_id(card, generation, source);
peer              615 drivers/staging/fwserial/fwserial.c 	if (!peer || peer != rcu_access_pointer(port->peer)) {
peer              702 drivers/staging/fwserial/fwserial.c 	struct fwtty_peer *peer;
peer              712 drivers/staging/fwserial/fwserial.c 	peer = rcu_dereference(port->peer);
peer              713 drivers/staging/fwserial/fwserial.c 	if (!peer) {
peer              755 drivers/staging/fwserial/fwserial.c 		fwtty_send_txn_async(peer, txn, TCODE_WRITE_BLOCK_REQUEST,
peer              756 drivers/staging/fwserial/fwserial.c 				     peer->fifo_addr, txn->dma_pended.data,
peer              803 drivers/staging/fwserial/fwserial.c 	struct fwtty_peer *peer;
peer              810 drivers/staging/fwserial/fwserial.c 	peer = rcu_dereference(port->peer);
peer              811 drivers/staging/fwserial/fwserial.c 	if (peer) {
peer              812 drivers/staging/fwserial/fwserial.c 		fwtty_send_data_async(peer, TCODE_WRITE_BLOCK_REQUEST,
peer              813 drivers/staging/fwserial/fwserial.c 				      peer->fifo_addr, &ch, sizeof(ch),
peer             1435 drivers/staging/fwserial/fwserial.c static void fwtty_debugfs_show_peer(struct seq_file *m, struct fwtty_peer *peer)
peer             1437 drivers/staging/fwserial/fwserial.c 	int generation = peer->generation;
peer             1440 drivers/staging/fwserial/fwserial.c 	seq_printf(m, " %s:", dev_name(&peer->unit->device));
peer             1441 drivers/staging/fwserial/fwserial.c 	seq_printf(m, " node:%04x gen:%d", peer->node_id, generation);
peer             1442 drivers/staging/fwserial/fwserial.c 	seq_printf(m, " sp:%d max:%d guid:%016llx", peer->speed,
peer             1443 drivers/staging/fwserial/fwserial.c 		   peer->max_payload, (unsigned long long)peer->guid);
peer             1444 drivers/staging/fwserial/fwserial.c 	seq_printf(m, " mgmt:%012llx", (unsigned long long)peer->mgmt_addr);
peer             1445 drivers/staging/fwserial/fwserial.c 	seq_printf(m, " addr:%012llx", (unsigned long long)peer->status_addr);
peer             1488 drivers/staging/fwserial/fwserial.c 	struct fwtty_peer *peer;
peer             1494 drivers/staging/fwserial/fwserial.c 	list_for_each_entry_rcu(peer, &serial->peer_list, list)
peer             1495 drivers/staging/fwserial/fwserial.c 		fwtty_debugfs_show_peer(m, peer);
peer             1623 drivers/staging/fwserial/fwserial.c static void fwserial_virt_plug_complete(struct fwtty_peer *peer,
peer             1626 drivers/staging/fwserial/fwserial.c 	struct fwtty_port *port = peer->port;
peer             1628 drivers/staging/fwserial/fwserial.c 	peer->status_addr = be32_to_u64(params->status_hi, params->status_lo);
peer             1629 drivers/staging/fwserial/fwserial.c 	peer->fifo_addr = be32_to_u64(params->fifo_hi, params->fifo_lo);
peer             1630 drivers/staging/fwserial/fwserial.c 	peer->fifo_len = be32_to_cpu(params->fifo_len);
peer             1631 drivers/staging/fwserial/fwserial.c 	peer_set_state(peer, FWPS_ATTACHED);
peer             1635 drivers/staging/fwserial/fwserial.c 	port->max_payload = min(peer->max_payload, peer->fifo_len);
peer             1637 drivers/staging/fwserial/fwserial.c 	spin_unlock_bh(&peer->port->lock);
peer             1642 drivers/staging/fwserial/fwserial.c 	fwtty_info(&peer->unit, "peer (guid:%016llx) connected on %s\n",
peer             1643 drivers/staging/fwserial/fwserial.c 		   (unsigned long long)peer->guid, dev_name(port->device));
peer             1646 drivers/staging/fwserial/fwserial.c static inline int fwserial_send_mgmt_sync(struct fwtty_peer *peer,
peer             1653 drivers/staging/fwserial/fwserial.c 		generation = peer->generation;
peer             1656 drivers/staging/fwserial/fwserial.c 		rcode = fw_run_transaction(peer->serial->card,
peer             1658 drivers/staging/fwserial/fwserial.c 					   peer->node_id,
peer             1659 drivers/staging/fwserial/fwserial.c 					   generation, peer->speed,
peer             1660 drivers/staging/fwserial/fwserial.c 					   peer->mgmt_addr,
peer             1664 drivers/staging/fwserial/fwserial.c 			fwtty_dbg(&peer->unit, "mgmt write error: %d\n", rcode);
peer             1679 drivers/staging/fwserial/fwserial.c static struct fwtty_port *fwserial_claim_port(struct fwtty_peer *peer,
peer             1690 drivers/staging/fwserial/fwserial.c 	port = peer->serial->ports[index];
peer             1692 drivers/staging/fwserial/fwserial.c 	if (!rcu_access_pointer(port->peer))
peer             1693 drivers/staging/fwserial/fwserial.c 		rcu_assign_pointer(port->peer, peer);
peer             1707 drivers/staging/fwserial/fwserial.c static struct fwtty_port *fwserial_find_port(struct fwtty_peer *peer)
peer             1709 drivers/staging/fwserial/fwserial.c 	struct fwtty_port **ports = peer->serial->ports;
peer             1720 drivers/staging/fwserial/fwserial.c 		if (!ports[i]->peer) {
peer             1722 drivers/staging/fwserial/fwserial.c 			rcu_assign_pointer(ports[i]->peer, peer);
peer             1743 drivers/staging/fwserial/fwserial.c 	RCU_INIT_POINTER(port->peer, NULL);
peer             1752 drivers/staging/fwserial/fwserial.c 	struct fwtty_peer *peer = from_timer(peer, t, timer);
peer             1755 drivers/staging/fwserial/fwserial.c 	spin_lock_bh(&peer->lock);
peer             1756 drivers/staging/fwserial/fwserial.c 	if (peer->state != FWPS_PLUG_PENDING) {
peer             1757 drivers/staging/fwserial/fwserial.c 		spin_unlock_bh(&peer->lock);
peer             1761 drivers/staging/fwserial/fwserial.c 	port = peer_revert_state(peer);
peer             1762 drivers/staging/fwserial/fwserial.c 	spin_unlock_bh(&peer->lock);
peer             1774 drivers/staging/fwserial/fwserial.c static int fwserial_connect_peer(struct fwtty_peer *peer)
peer             1784 drivers/staging/fwserial/fwserial.c 	port = fwserial_find_port(peer);
peer             1786 drivers/staging/fwserial/fwserial.c 		fwtty_err(&peer->unit, "avail ports in use\n");
peer             1791 drivers/staging/fwserial/fwserial.c 	spin_lock_bh(&peer->lock);
peer             1794 drivers/staging/fwserial/fwserial.c 	if (peer->state != FWPS_NOT_ATTACHED) {
peer             1799 drivers/staging/fwserial/fwserial.c 	peer->port = port;
peer             1800 drivers/staging/fwserial/fwserial.c 	peer_set_state(peer, FWPS_PLUG_PENDING);
peer             1802 drivers/staging/fwserial/fwserial.c 	fill_plug_req(pkt, peer->port);
peer             1804 drivers/staging/fwserial/fwserial.c 	mod_timer(&peer->timer, jiffies + VIRT_CABLE_PLUG_TIMEOUT);
peer             1805 drivers/staging/fwserial/fwserial.c 	spin_unlock_bh(&peer->lock);
peer             1807 drivers/staging/fwserial/fwserial.c 	rcode = fwserial_send_mgmt_sync(peer, pkt);
peer             1809 drivers/staging/fwserial/fwserial.c 	spin_lock_bh(&peer->lock);
peer             1810 drivers/staging/fwserial/fwserial.c 	if (peer->state == FWPS_PLUG_PENDING && rcode != RCODE_COMPLETE) {
peer             1817 drivers/staging/fwserial/fwserial.c 	spin_unlock_bh(&peer->lock);
peer             1823 drivers/staging/fwserial/fwserial.c 	del_timer(&peer->timer);
peer             1824 drivers/staging/fwserial/fwserial.c 	peer_revert_state(peer);
peer             1826 drivers/staging/fwserial/fwserial.c 	spin_unlock_bh(&peer->lock);
peer             1913 drivers/staging/fwserial/fwserial.c 	struct fwtty_peer *peer;
peer             1928 drivers/staging/fwserial/fwserial.c 	list_for_each_entry_rcu(peer, &serial->peer_list, list) {
peer             1929 drivers/staging/fwserial/fwserial.c 		int g = peer->generation;
peer             1932 drivers/staging/fwserial/fwserial.c 		if (generation == g && id == peer->node_id)
peer             1933 drivers/staging/fwserial/fwserial.c 			return peer;
peer             1943 drivers/staging/fwserial/fwserial.c 	struct fwtty_peer *peer;
peer             1949 drivers/staging/fwserial/fwserial.c 	list_for_each_entry_rcu(peer, &serial->peer_list, list) {
peer             1950 drivers/staging/fwserial/fwserial.c 		int g = peer->generation;
peer             1954 drivers/staging/fwserial/fwserial.c 			  g, peer->node_id, (unsigned long long)peer->guid);
peer             1963 drivers/staging/fwserial/fwserial.c 	struct fwtty_peer *peer = to_peer(to_delayed_work(work), connect);
peer             1966 drivers/staging/fwserial/fwserial.c 	err = fwserial_connect_peer(peer);
peer             1967 drivers/staging/fwserial/fwserial.c 	if (err == -EAGAIN && ++peer->connect_retries < MAX_CONNECT_RETRIES)
peer             1968 drivers/staging/fwserial/fwserial.c 		schedule_delayed_work(&peer->connect, CONNECT_RETRY_DELAY);
peer             1973 drivers/staging/fwserial/fwserial.c 	struct fwtty_peer *peer = to_peer(work, work);
peer             1975 drivers/staging/fwserial/fwserial.c 	peer->workfn(work);
peer             1998 drivers/staging/fwserial/fwserial.c 	struct fwtty_peer *peer;
peer             2003 drivers/staging/fwserial/fwserial.c 	peer = kzalloc(sizeof(*peer), GFP_KERNEL);
peer             2004 drivers/staging/fwserial/fwserial.c 	if (!peer)
peer             2007 drivers/staging/fwserial/fwserial.c 	peer_set_state(peer, FWPS_NOT_ATTACHED);
peer             2009 drivers/staging/fwserial/fwserial.c 	dev_set_drvdata(dev, peer);
peer             2010 drivers/staging/fwserial/fwserial.c 	peer->unit = unit;
peer             2011 drivers/staging/fwserial/fwserial.c 	peer->guid = (u64)parent->config_rom[3] << 32 | parent->config_rom[4];
peer             2012 drivers/staging/fwserial/fwserial.c 	peer->speed = parent->max_speed;
peer             2013 drivers/staging/fwserial/fwserial.c 	peer->max_payload = min(device_max_receive(parent),
peer             2014 drivers/staging/fwserial/fwserial.c 				link_speed_to_max_payload(peer->speed));
peer             2018 drivers/staging/fwserial/fwserial.c 	peer->node_id = parent->node_id;
peer             2020 drivers/staging/fwserial/fwserial.c 	peer->generation = generation;
peer             2026 drivers/staging/fwserial/fwserial.c 			peer->mgmt_addr = CSR_REGISTER_BASE + 4 * val;
peer             2030 drivers/staging/fwserial/fwserial.c 	if (peer->mgmt_addr == 0ULL) {
peer             2035 drivers/staging/fwserial/fwserial.c 		peer_set_state(peer, FWPS_NO_MGMT_ADDR);
peer             2038 drivers/staging/fwserial/fwserial.c 	spin_lock_init(&peer->lock);
peer             2039 drivers/staging/fwserial/fwserial.c 	peer->port = NULL;
peer             2041 drivers/staging/fwserial/fwserial.c 	timer_setup(&peer->timer, fwserial_plug_timeout, 0);
peer             2042 drivers/staging/fwserial/fwserial.c 	INIT_WORK(&peer->work, fwserial_peer_workfn);
peer             2043 drivers/staging/fwserial/fwserial.c 	INIT_DELAYED_WORK(&peer->connect, fwserial_auto_connect);
peer             2046 drivers/staging/fwserial/fwserial.c 	peer->serial = serial;
peer             2047 drivers/staging/fwserial/fwserial.c 	list_add_rcu(&peer->list, &serial->peer_list);
peer             2049 drivers/staging/fwserial/fwserial.c 	fwtty_info(&peer->unit, "peer added (guid:%016llx)\n",
peer             2050 drivers/staging/fwserial/fwserial.c 		   (unsigned long long)peer->guid);
peer             2054 drivers/staging/fwserial/fwserial.c 		serial->self = peer;
peer             2058 drivers/staging/fwserial/fwserial.c 			port = fwserial_claim_port(peer, num_ttys);
peer             2062 drivers/staging/fwserial/fwserial.c 				spin_lock_bh(&peer->lock);
peer             2063 drivers/staging/fwserial/fwserial.c 				peer->port = port;
peer             2065 drivers/staging/fwserial/fwserial.c 				fwserial_virt_plug_complete(peer, &params);
peer             2066 drivers/staging/fwserial/fwserial.c 				spin_unlock_bh(&peer->lock);
peer             2074 drivers/staging/fwserial/fwserial.c 		schedule_delayed_work(&peer->connect, 1);
peer             2089 drivers/staging/fwserial/fwserial.c static void fwserial_remove_peer(struct fwtty_peer *peer)
peer             2093 drivers/staging/fwserial/fwserial.c 	spin_lock_bh(&peer->lock);
peer             2094 drivers/staging/fwserial/fwserial.c 	peer_set_state(peer, FWPS_GONE);
peer             2095 drivers/staging/fwserial/fwserial.c 	spin_unlock_bh(&peer->lock);
peer             2097 drivers/staging/fwserial/fwserial.c 	cancel_delayed_work_sync(&peer->connect);
peer             2098 drivers/staging/fwserial/fwserial.c 	cancel_work_sync(&peer->work);
peer             2100 drivers/staging/fwserial/fwserial.c 	spin_lock_bh(&peer->lock);
peer             2102 drivers/staging/fwserial/fwserial.c 	if (peer == peer->serial->self)
peer             2103 drivers/staging/fwserial/fwserial.c 		peer->serial->self = NULL;
peer             2106 drivers/staging/fwserial/fwserial.c 	del_timer(&peer->timer);
peer             2108 drivers/staging/fwserial/fwserial.c 	port = peer->port;
peer             2109 drivers/staging/fwserial/fwserial.c 	peer->port = NULL;
peer             2111 drivers/staging/fwserial/fwserial.c 	list_del_rcu(&peer->list);
peer             2113 drivers/staging/fwserial/fwserial.c 	fwtty_info(&peer->unit, "peer removed (guid:%016llx)\n",
peer             2114 drivers/staging/fwserial/fwserial.c 		   (unsigned long long)peer->guid);
peer             2116 drivers/staging/fwserial/fwserial.c 	spin_unlock_bh(&peer->lock);
peer             2122 drivers/staging/fwserial/fwserial.c 	kfree(peer);
peer             2178 drivers/staging/fwserial/fwserial.c 		RCU_INIT_POINTER(port->peer, NULL);
peer             2343 drivers/staging/fwserial/fwserial.c 	struct fwtty_peer *peer = dev_get_drvdata(&unit->device);
peer             2344 drivers/staging/fwserial/fwserial.c 	struct fw_serial *serial = peer->serial;
peer             2348 drivers/staging/fwserial/fwserial.c 	fwserial_remove_peer(peer);
peer             2382 drivers/staging/fwserial/fwserial.c 	struct fwtty_peer *peer = dev_get_drvdata(&unit->device);
peer             2387 drivers/staging/fwserial/fwserial.c 	peer->node_id = parent->node_id;
peer             2389 drivers/staging/fwserial/fwserial.c 	peer->generation = generation;
peer             2481 drivers/staging/fwserial/fwserial.c 	struct fwtty_peer *peer = to_peer(work, work);
peer             2482 drivers/staging/fwserial/fwserial.c 	struct virt_plug_params *plug_req = &peer->work_params.plug_req;
peer             2491 drivers/staging/fwserial/fwserial.c 	port = fwserial_find_port(peer);
peer             2493 drivers/staging/fwserial/fwserial.c 	spin_lock_bh(&peer->lock);
peer             2495 drivers/staging/fwserial/fwserial.c 	switch (peer->state) {
peer             2498 drivers/staging/fwserial/fwserial.c 			fwtty_err(&peer->unit, "no more ports avail\n");
peer             2501 drivers/staging/fwserial/fwserial.c 			peer->port = port;
peer             2502 drivers/staging/fwserial/fwserial.c 			fill_plug_rsp_ok(pkt, peer->port);
peer             2503 drivers/staging/fwserial/fwserial.c 			peer_set_state(peer, FWPS_PLUG_RESPONDING);
peer             2510 drivers/staging/fwserial/fwserial.c 		if (peer->serial->card->guid > peer->guid)
peer             2514 drivers/staging/fwserial/fwserial.c 		del_timer(&peer->timer);
peer             2515 drivers/staging/fwserial/fwserial.c 		fill_plug_rsp_ok(pkt, peer->port);
peer             2516 drivers/staging/fwserial/fwserial.c 		peer_set_state(peer, FWPS_PLUG_RESPONDING);
peer             2523 drivers/staging/fwserial/fwserial.c 	spin_unlock_bh(&peer->lock);
peer             2527 drivers/staging/fwserial/fwserial.c 	rcode = fwserial_send_mgmt_sync(peer, pkt);
peer             2529 drivers/staging/fwserial/fwserial.c 	spin_lock_bh(&peer->lock);
peer             2530 drivers/staging/fwserial/fwserial.c 	if (peer->state == FWPS_PLUG_RESPONDING) {
peer             2532 drivers/staging/fwserial/fwserial.c 			struct fwtty_port *tmp = peer->port;
peer             2534 drivers/staging/fwserial/fwserial.c 			fwserial_virt_plug_complete(peer, plug_req);
peer             2535 drivers/staging/fwserial/fwserial.c 			spin_unlock_bh(&peer->lock);
peer             2538 drivers/staging/fwserial/fwserial.c 			spin_lock_bh(&peer->lock);
peer             2540 drivers/staging/fwserial/fwserial.c 			fwtty_err(&peer->unit, "PLUG_RSP error (%d)\n", rcode);
peer             2541 drivers/staging/fwserial/fwserial.c 			port = peer_revert_state(peer);
peer             2545 drivers/staging/fwserial/fwserial.c 	spin_unlock_bh(&peer->lock);
peer             2553 drivers/staging/fwserial/fwserial.c 	struct fwtty_peer *peer = to_peer(work, work);
peer             2562 drivers/staging/fwserial/fwserial.c 	spin_lock_bh(&peer->lock);
peer             2564 drivers/staging/fwserial/fwserial.c 	switch (peer->state) {
peer             2567 drivers/staging/fwserial/fwserial.c 		peer_set_state(peer, FWPS_UNPLUG_RESPONDING);
peer             2571 drivers/staging/fwserial/fwserial.c 		if (peer->serial->card->guid > peer->guid)
peer             2575 drivers/staging/fwserial/fwserial.c 		del_timer(&peer->timer);
peer             2577 drivers/staging/fwserial/fwserial.c 		peer_set_state(peer, FWPS_UNPLUG_RESPONDING);
peer             2584 drivers/staging/fwserial/fwserial.c 	spin_unlock_bh(&peer->lock);
peer             2586 drivers/staging/fwserial/fwserial.c 	rcode = fwserial_send_mgmt_sync(peer, pkt);
peer             2588 drivers/staging/fwserial/fwserial.c 	spin_lock_bh(&peer->lock);
peer             2589 drivers/staging/fwserial/fwserial.c 	if (peer->state == FWPS_UNPLUG_RESPONDING) {
peer             2591 drivers/staging/fwserial/fwserial.c 			fwtty_err(&peer->unit, "UNPLUG_RSP error (%d)\n",
peer             2593 drivers/staging/fwserial/fwserial.c 		port = peer_revert_state(peer);
peer             2596 drivers/staging/fwserial/fwserial.c 	spin_unlock_bh(&peer->lock);
peer             2602 drivers/staging/fwserial/fwserial.c static int fwserial_parse_mgmt_write(struct fwtty_peer *peer,
peer             2618 drivers/staging/fwserial/fwserial.c 	spin_lock_bh(&peer->lock);
peer             2619 drivers/staging/fwserial/fwserial.c 	if (peer->state == FWPS_GONE) {
peer             2626 drivers/staging/fwserial/fwserial.c 		fwtty_err(&peer->unit, "peer already removed\n");
peer             2627 drivers/staging/fwserial/fwserial.c 		spin_unlock_bh(&peer->lock);
peer             2633 drivers/staging/fwserial/fwserial.c 	fwtty_dbg(&peer->unit, "mgmt: hdr.code: %04hx\n", pkt->hdr.code);
peer             2637 drivers/staging/fwserial/fwserial.c 		if (work_pending(&peer->work)) {
peer             2638 drivers/staging/fwserial/fwserial.c 			fwtty_err(&peer->unit, "plug req: busy\n");
peer             2642 drivers/staging/fwserial/fwserial.c 			peer->work_params.plug_req = pkt->plug_req;
peer             2643 drivers/staging/fwserial/fwserial.c 			peer->workfn = fwserial_handle_plug_req;
peer             2644 drivers/staging/fwserial/fwserial.c 			queue_work(system_unbound_wq, &peer->work);
peer             2649 drivers/staging/fwserial/fwserial.c 		if (peer->state != FWPS_PLUG_PENDING) {
peer             2653 drivers/staging/fwserial/fwserial.c 			fwtty_notice(&peer->unit, "NACK plug rsp\n");
peer             2654 drivers/staging/fwserial/fwserial.c 			port = peer_revert_state(peer);
peer             2657 drivers/staging/fwserial/fwserial.c 			struct fwtty_port *tmp = peer->port;
peer             2659 drivers/staging/fwserial/fwserial.c 			fwserial_virt_plug_complete(peer, &pkt->plug_rsp);
peer             2660 drivers/staging/fwserial/fwserial.c 			spin_unlock_bh(&peer->lock);
peer             2663 drivers/staging/fwserial/fwserial.c 			spin_lock_bh(&peer->lock);
peer             2668 drivers/staging/fwserial/fwserial.c 		if (work_pending(&peer->work)) {
peer             2669 drivers/staging/fwserial/fwserial.c 			fwtty_err(&peer->unit, "unplug req: busy\n");
peer             2672 drivers/staging/fwserial/fwserial.c 			peer->workfn = fwserial_handle_unplug_req;
peer             2673 drivers/staging/fwserial/fwserial.c 			queue_work(system_unbound_wq, &peer->work);
peer             2678 drivers/staging/fwserial/fwserial.c 		if (peer->state != FWPS_UNPLUG_PENDING) {
peer             2682 drivers/staging/fwserial/fwserial.c 				fwtty_notice(&peer->unit, "NACK unplug?\n");
peer             2683 drivers/staging/fwserial/fwserial.c 			port = peer_revert_state(peer);
peer             2689 drivers/staging/fwserial/fwserial.c 		fwtty_err(&peer->unit, "unknown mgmt code %d\n",
peer             2693 drivers/staging/fwserial/fwserial.c 	spin_unlock_bh(&peer->lock);
peer             2717 drivers/staging/fwserial/fwserial.c 	struct fwtty_peer *peer;
peer             2721 drivers/staging/fwserial/fwserial.c 	peer = __fwserial_peer_by_node_id(card, generation, source);
peer             2722 drivers/staging/fwserial/fwserial.c 	if (!peer) {
peer             2730 drivers/staging/fwserial/fwserial.c 			rcode = fwserial_parse_mgmt_write(peer, pkt, addr, len);
peer              125 drivers/staging/fwserial/fwserial.h static inline void peer_set_state(struct fwtty_peer *peer, int new)
peer              127 drivers/staging/fwserial/fwserial.h 	peer->state = new;
peer              130 drivers/staging/fwserial/fwserial.h static inline struct fwtty_port *peer_revert_state(struct fwtty_peer *peer)
peer              132 drivers/staging/fwserial/fwserial.h 	struct fwtty_port *port = peer->port;
peer              134 drivers/staging/fwserial/fwserial.h 	peer->port = NULL;
peer              135 drivers/staging/fwserial/fwserial.h 	peer_set_state(peer, FWPS_NOT_ATTACHED);
peer              270 drivers/staging/fwserial/fwserial.h 	struct fwtty_peer __rcu	   *peer;
peer              848 drivers/staging/most/usb/usb.c 		int peer = 1 - channel;
peer              850 drivers/staging/most/usb/usb.c 					       mdev->ep_address[peer]);
peer              315 drivers/staging/rtl8723bs/include/rtw_mlme.h 	u8 peer[ETH_ALEN];
peer             1071 drivers/tty/hvc/hvc_iucv.c static DEVICE_ATTR(peer, 0640, hvc_iucv_dev_peer_show, NULL);
peer             2533 drivers/usb/core/hcd.c 		struct usb_hcd *peer = hcd->shared_hcd;
peer             2535 drivers/usb/core/hcd.c 		peer->shared_hcd = NULL;
peer             2536 drivers/usb/core/hcd.c 		peer->primary_hcd = NULL;
peer               97 drivers/usb/core/hub.h 	struct usb_port *peer;
peer              198 drivers/usb/core/port.c 	struct usb_port *peer = port_dev->peer;
peer              213 drivers/usb/core/port.c 	if (!port_dev->is_superspeed && peer)
peer              214 drivers/usb/core/port.c 		pm_runtime_get_sync(&peer->dev);
peer              256 drivers/usb/core/port.c 	struct usb_port *peer = port_dev->peer;
peer              287 drivers/usb/core/port.c 	if (!port_dev->is_superspeed && peer)
peer              288 drivers/usb/core/port.c 		pm_runtime_put(&peer->dev);
peer              326 drivers/usb/core/port.c 	if (left->peer == right && right->peer == left)
peer              329 drivers/usb/core/port.c 	if (left->peer || right->peer) {
peer              330 drivers/usb/core/port.c 		struct usb_port *lpeer = left->peer;
peer              331 drivers/usb/core/port.c 		struct usb_port *rpeer = right->peer;
peer              373 drivers/usb/core/port.c 	left->peer = right;
peer              374 drivers/usb/core/port.c 	right->peer = left;
peer              409 drivers/usb/core/port.c 	WARN(right->peer != left || left->peer != right,
peer              429 drivers/usb/core/port.c 	right->peer = NULL;
peer              431 drivers/usb/core/port.c 	left->peer = NULL;
peer              449 drivers/usb/core/port.c 	struct usb_port *port_dev = p, *peer;
peer              463 drivers/usb/core/port.c 		peer = peer_hub->ports[port1 - 1];
peer              464 drivers/usb/core/port.c 		if (peer && peer->location == port_dev->location) {
peer              465 drivers/usb/core/port.c 			link_peers_report(port_dev, peer);
peer              480 drivers/usb/core/port.c 	struct usb_port *port_dev = hub->ports[port1 - 1], *peer;
peer              512 drivers/usb/core/port.c 		if (!upstream || !upstream->peer)
peer              515 drivers/usb/core/port.c 		peer_hdev = upstream->peer->child;
peer              526 drivers/usb/core/port.c 	peer = peer_hub->ports[port1 - 1];
peer              527 drivers/usb/core/port.c 	if (peer && peer->location == 0)
peer              528 drivers/usb/core/port.c 		link_peers_report(port_dev, peer);
peer              617 drivers/usb/core/port.c 	struct usb_port *peer;
peer              619 drivers/usb/core/port.c 	peer = port_dev->peer;
peer              620 drivers/usb/core/port.c 	if (peer)
peer              621 drivers/usb/core/port.c 		unlink_peers(port_dev, peer);
peer              583 fs/btrfs/reada.c 		struct reada_zone *peer;
peer              584 fs/btrfs/reada.c 		peer = radix_tree_lookup(&zone->devs[i]->reada_zones, index);
peer              585 fs/btrfs/reada.c 		if (peer && peer->device != zone->device)
peer              586 fs/btrfs/reada.c 			peer->locked = lock;
peer             3734 fs/ceph/caps.c 	int peer;
peer             3738 fs/ceph/caps.c 		peer = le32_to_cpu(ph->mds);
peer             3741 fs/ceph/caps.c 		peer = -1;
peer             3745 fs/ceph/caps.c 	     inode, ci, mds, mseq, peer);
peer             3770 fs/ceph/caps.c 	ocap = peer >= 0 ? __get_cap_for_mds(ci, peer) : NULL;
peer             3773 fs/ceph/caps.c 		     ocap, peer, ph->flags);
peer             3781 fs/ceph/caps.c 					ceph_vinop(inode), peer, ocap->seq,
peer             3809 fs/ceph/caps.c 	struct ceph_mds_cap_peer *peer = NULL;
peer             3847 fs/ceph/caps.c 			if (p + sizeof(*peer) > end)
peer             3849 fs/ceph/caps.c 			peer = p;
peer             3850 fs/ceph/caps.c 			p += sizeof(*peer);
peer             3853 fs/ceph/caps.c 			peer = (void *)&h->size;
peer             3952 fs/ceph/caps.c 		handle_cap_export(inode, h, peer, session);
peer             3966 fs/ceph/caps.c 		handle_cap_import(mdsc, inode, h, peer, session,
peer              257 include/linux/drbd.h 		unsigned peer:2 ;   /* 3/4	 primary/secondary/unknown */
peer              279 include/linux/drbd.h 		unsigned peer:2 ;   /* 3/4	 primary/secondary/unknown */
peer             1596 include/linux/ide.h 	ide_drive_t *peer = drive->hwif->devices[(drive->dn ^ 1) & 1];
peer             1598 include/linux/ide.h 	return (peer->dev_flags & IDE_DFLAG_PRESENT) ? peer : NULL;
peer              474 include/linux/mISDNif.h 	struct mISDNchannel	*peer;
peer              570 include/linux/mISDNif.h 	if (!ch->peer)
peer              575 include/linux/mISDNif.h 	if (ch->recv(ch->peer, skb))
peer              151 include/linux/net.h 				      int peer);
peer             1651 include/linux/ntb.h int ntb_msi_peer_trigger(struct ntb_dev *ntb, int peer,
peer             1653 include/linux/ntb.h int ntb_msi_peer_addr(struct ntb_dev *ntb, int peer,
peer             1679 include/linux/ntb.h static inline int ntb_msi_peer_trigger(struct ntb_dev *ntb, int peer,
peer             1684 include/linux/ntb.h static inline int ntb_msi_peer_addr(struct ntb_dev *ntb, int peer,
peer              331 include/linux/scif.h int scif_accept(scif_epd_t epd, struct scif_port_id *peer, scif_epd_t
peer               50 include/linux/vmw_vmci_api.h 		     u32 peer, u32 flags, u32 priv_flags);
peer              684 include/linux/vmw_vmci_defs.h 	u32 peer;
peer               59 include/net/af_unix.h 	struct sock		*peer;
peer             3278 include/net/cfg80211.h 	u8 peer[ETH_ALEN] __aligned(2);
peer             3780 include/net/cfg80211.h 				    const u8 *peer,
peer             3840 include/net/cfg80211.h 			     const u8 *peer, u8 action_code,  u8 dialog_token,
peer             3844 include/net/cfg80211.h 			     const u8 *peer, enum nl80211_tdls_operation oper);
peer             3847 include/net/cfg80211.h 				const u8 *peer, u64 *cookie);
peer             3894 include/net/cfg80211.h 			     u8 tsid, const u8 *peer, u8 user_prio,
peer             3897 include/net/cfg80211.h 			     u8 tsid, const u8 *peer);
peer             6831 include/net/cfg80211.h 				 const u8 *peer, u32 num_packets, gfp_t gfp);
peer             6845 include/net/cfg80211.h void cfg80211_cqm_txe_notify(struct net_device *dev, const u8 *peer,
peer             7072 include/net/cfg80211.h void cfg80211_tdls_oper_request(struct net_device *dev, const u8 *peer,
peer               70 include/net/dn.h 	struct sockaddr_dn peer; /* Remote address */
peer               97 include/net/dn_dev.h 	struct neighbour *peer;   /* Peer on pointopoint links */
peer               41 include/net/inet_common.h 		 int peer);
peer              148 include/net/inetpeer.h bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout);
peer             1106 include/net/ipv6.h 		  int peer);
peer             6194 include/net/mac80211.h void ieee80211_tdls_oper_request(struct ieee80211_vif *vif, const u8 *peer,
peer              345 include/net/net_namespace.h int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp);
peer              346 include/net/net_namespace.h int peernet2id(struct net *net, struct net *peer);
peer              347 include/net/net_namespace.h bool peernet_has_id(struct net *net, struct net *peer);
peer             1379 include/net/sctp/structs.h 		      const union sctp_addr *peer,
peer             1732 include/net/sctp/structs.h 	} peer;
peer             2123 include/net/sctp/structs.h 			 struct sctp_transport *peer);
peer               15 include/soc/tegra/ivc.h 	struct device *peer;
peer               94 include/soc/tegra/ivc.h int tegra_ivc_init(struct tegra_ivc *ivc, struct device *peer, void *rx,
peer              528 include/trace/events/rxrpc.h 		    __field(unsigned int,	peer		)
peer              535 include/trace/events/rxrpc.h 		    __entry->peer = peer_debug_id;
peer              542 include/trace/events/rxrpc.h 		      __entry->peer,
peer             1437 include/trace/events/rxrpc.h 	    TP_PROTO(struct rxrpc_peer *peer, struct sock_extended_err *ee,
peer             1440 include/trace/events/rxrpc.h 	    TP_ARGS(peer, ee, srx),
peer             1443 include/trace/events/rxrpc.h 		    __field(unsigned int,			peer	)
peer             1449 include/trace/events/rxrpc.h 		    __entry->peer = peer->debug_id;
peer             1455 include/trace/events/rxrpc.h 		      __entry->peer,
peer               32 include/trace/events/sctp.h 		__entry->primary = (sp == asoc->peer.primary_path);
peer               74 include/trace/events/sctp.h 		__entry->peer_port = asoc->peer.port;
peer               76 include/trace/events/sctp.h 		__entry->rwnd = asoc->peer.rwnd;
peer               82 include/trace/events/sctp.h 			list_for_each_entry(sp, &asoc->peer.transport_addr_list,
peer               81 include/uapi/linux/scif_ioctl.h 	struct scif_port_id	peer;
peer               92 include/uapi/linux/scif_ioctl.h 	struct scif_port_id	peer;
peer              225 include/uapi/linux/tipc.h 	__u32 peer;
peer              231 include/uapi/linux/tipc.h 	__u32 peer;
peer             1241 net/appletalk/ddp.c 			 int peer)
peer             1256 net/appletalk/ddp.c 	if (peer) {
peer               90 net/atm/pvc.c  		       int peer)
peer              422 net/atm/svc.c  		       int peer)
peer              427 net/atm/svc.c  	memcpy(addr, peer ? &ATM_SD(sock)->remote : &ATM_SD(sock)->local,
peer             1399 net/ax25/af_ax25.c 	int peer)
peer             1411 net/ax25/af_ax25.c 	if (peer != 0) {
peer               83 net/bluetooth/6lowpan.c 			    struct lowpan_peer *peer)
peer               85 net/bluetooth/6lowpan.c 	list_add_rcu(&peer->list, &dev->peers);
peer               90 net/bluetooth/6lowpan.c 			    struct lowpan_peer *peer)
peer               92 net/bluetooth/6lowpan.c 	list_del_rcu(&peer->list);
peer               93 net/bluetooth/6lowpan.c 	kfree_rcu(peer, rcu);
peer              108 net/bluetooth/6lowpan.c 	struct lowpan_peer *peer;
peer              115 net/bluetooth/6lowpan.c 	list_for_each_entry_rcu(peer, &dev->peers, list) {
peer              117 net/bluetooth/6lowpan.c 		       &peer->chan->dst, peer->chan->dst_type);
peer              119 net/bluetooth/6lowpan.c 		if (bacmp(&peer->chan->dst, ba))
peer              122 net/bluetooth/6lowpan.c 		if (type == peer->chan->dst_type) {
peer              124 net/bluetooth/6lowpan.c 			return peer;
peer              136 net/bluetooth/6lowpan.c 	struct lowpan_peer *peer;
peer              138 net/bluetooth/6lowpan.c 	list_for_each_entry_rcu(peer, &dev->peers, list) {
peer              139 net/bluetooth/6lowpan.c 		if (peer->chan == chan)
peer              140 net/bluetooth/6lowpan.c 			return peer;
peer              149 net/bluetooth/6lowpan.c 	struct lowpan_peer *peer;
peer              151 net/bluetooth/6lowpan.c 	list_for_each_entry_rcu(peer, &dev->peers, list) {
peer              152 net/bluetooth/6lowpan.c 		if (peer->chan->conn == conn)
peer              153 net/bluetooth/6lowpan.c 			return peer;
peer              166 net/bluetooth/6lowpan.c 	struct lowpan_peer *peer;
peer              196 net/bluetooth/6lowpan.c 	list_for_each_entry_rcu(peer, &dev->peers, list) {
peer              198 net/bluetooth/6lowpan.c 		       &peer->chan->dst, peer->chan->dst_type,
peer              199 net/bluetooth/6lowpan.c 		       &peer->peer_addr);
peer              201 net/bluetooth/6lowpan.c 		if (!ipv6_addr_cmp(&peer->peer_addr, nexthop)) {
peer              203 net/bluetooth/6lowpan.c 			return peer;
peer              211 net/bluetooth/6lowpan.c 		list_for_each_entry_rcu(peer, &dev->peers, list) {
peer              212 net/bluetooth/6lowpan.c 			if (!memcmp(neigh->ha, peer->lladdr, ETH_ALEN)) {
peer              215 net/bluetooth/6lowpan.c 				return peer;
peer              229 net/bluetooth/6lowpan.c 	struct lowpan_peer *peer = NULL;
peer              234 net/bluetooth/6lowpan.c 		peer = __peer_lookup_conn(entry, conn);
peer              235 net/bluetooth/6lowpan.c 		if (peer)
peer              241 net/bluetooth/6lowpan.c 	return peer;
peer              275 net/bluetooth/6lowpan.c 			   struct lowpan_peer *peer)
peer              279 net/bluetooth/6lowpan.c 	saddr = peer->lladdr;
peer              285 net/bluetooth/6lowpan.c 		    struct lowpan_peer *peer)
peer              338 net/bluetooth/6lowpan.c 		ret = iphc_decompress(local_skb, dev, peer);
peer              375 net/bluetooth/6lowpan.c 	struct lowpan_peer *peer;
peer              378 net/bluetooth/6lowpan.c 	peer = lookup_peer(chan->conn);
peer              379 net/bluetooth/6lowpan.c 	if (!peer)
peer              386 net/bluetooth/6lowpan.c 	err = recv_pkt(skb, dev->netdev, peer);
peer              401 net/bluetooth/6lowpan.c 	struct lowpan_peer *peer;
peer              422 net/bluetooth/6lowpan.c 		peer = peer_lookup_dst(dev, &ipv6_daddr, skb);
peer              423 net/bluetooth/6lowpan.c 		if (!peer) {
peer              428 net/bluetooth/6lowpan.c 		daddr = peer->lladdr;
peer              429 net/bluetooth/6lowpan.c 		*peer_addr = peer->chan->dst;
peer              430 net/bluetooth/6lowpan.c 		*peer_addr_type = peer->chan->dst_type;
peer              431 net/bluetooth/6lowpan.c 		lowpan_cb(skb)->chan = peer->chan;
peer              657 net/bluetooth/6lowpan.c 	struct lowpan_peer *peer;
peer              659 net/bluetooth/6lowpan.c 	peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
peer              660 net/bluetooth/6lowpan.c 	if (!peer)
peer              663 net/bluetooth/6lowpan.c 	peer->chan = chan;
peer              664 net/bluetooth/6lowpan.c 	memset(&peer->peer_addr, 0, sizeof(struct in6_addr));
peer              666 net/bluetooth/6lowpan.c 	baswap((void *)peer->lladdr, &chan->dst);
peer              668 net/bluetooth/6lowpan.c 	lowpan_iphc_uncompress_eui48_lladdr(&peer->peer_addr, peer->lladdr);
peer              671 net/bluetooth/6lowpan.c 	INIT_LIST_HEAD(&peer->list);
peer              672 net/bluetooth/6lowpan.c 	peer_add(dev, peer);
peer              680 net/bluetooth/6lowpan.c 	return peer->chan;
peer              786 net/bluetooth/6lowpan.c 	struct lowpan_peer *peer;
peer              806 net/bluetooth/6lowpan.c 		peer = __peer_lookup_chan(dev, chan);
peer              807 net/bluetooth/6lowpan.c 		if (peer) {
peer              808 net/bluetooth/6lowpan.c 			last = peer_del(dev, peer);
peer              812 net/bluetooth/6lowpan.c 			       last ? "last " : "1 ", peer);
peer              935 net/bluetooth/6lowpan.c 	struct lowpan_peer *peer;
peer              939 net/bluetooth/6lowpan.c 	peer = lookup_peer(conn);
peer              940 net/bluetooth/6lowpan.c 	if (!peer)
peer              943 net/bluetooth/6lowpan.c 	BT_DBG("peer %p chan %p", peer, peer->chan);
peer              945 net/bluetooth/6lowpan.c 	l2cap_chan_close(peer->chan, ENOENT);
peer             1018 net/bluetooth/6lowpan.c 	struct lowpan_peer *peer, *tmp_peer, *new_peer;
peer             1031 net/bluetooth/6lowpan.c 		list_for_each_entry_rcu(peer, &entry->peers, list) {
peer             1036 net/bluetooth/6lowpan.c 			new_peer->chan = peer->chan;
peer             1046 net/bluetooth/6lowpan.c 	list_for_each_entry_safe(peer, tmp_peer, &peers, list) {
peer             1047 net/bluetooth/6lowpan.c 		l2cap_chan_close(peer->chan, ENOENT);
peer             1049 net/bluetooth/6lowpan.c 		list_del_rcu(&peer->list);
peer             1050 net/bluetooth/6lowpan.c 		kfree_rcu(peer, rcu);
peer             1137 net/bluetooth/6lowpan.c 			struct lowpan_peer *peer;
peer             1142 net/bluetooth/6lowpan.c 			peer = lookup_peer(conn);
peer             1143 net/bluetooth/6lowpan.c 			if (peer) {
peer             1178 net/bluetooth/6lowpan.c 	struct lowpan_peer *peer;
peer             1183 net/bluetooth/6lowpan.c 		list_for_each_entry(peer, &entry->peers, list)
peer             1185 net/bluetooth/6lowpan.c 				   &peer->chan->dst, peer->chan->dst_type);
peer             1345 net/bluetooth/hci_sock.c 			    int peer)
peer             1354 net/bluetooth/hci_sock.c 	if (peer)
peer              361 net/bluetooth/l2cap_sock.c 			      int peer)
peer              369 net/bluetooth/l2cap_sock.c 	if (peer && sk->sk_state != BT_CONNECTED &&
peer              379 net/bluetooth/l2cap_sock.c 	if (peer) {
peer              537 net/bluetooth/rfcomm/sock.c static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int peer)
peer              544 net/bluetooth/rfcomm/sock.c 	if (peer && sk->sk_state != BT_CONNECTED &&
peer              551 net/bluetooth/rfcomm/sock.c 	if (peer)
peer              684 net/bluetooth/sco.c 			    int peer)
peer              693 net/bluetooth/sco.c 	if (peer)
peer              554 net/can/j1939/socket.c 				       const struct j1939_sock *jsk, int peer)
peer              559 net/can/j1939/socket.c 	if (peer) {
peer              569 net/can/j1939/socket.c 			    int peer)
peer              578 net/can/j1939/socket.c 	if (peer && !(jsk->state & J1939_SOCK_CONNECTED)) {
peer              583 net/can/j1939/socket.c 	j1939_sk_sock2sockaddr_can(addr, jsk, peer);
peer              471 net/can/raw.c  		       int peer)
peer              477 net/can/raw.c  	if (peer)
peer              189 net/core/net_namespace.c static int alloc_netid(struct net *net, struct net *peer, int reqid)
peer              198 net/core/net_namespace.c 	return idr_alloc(&net->netns_ids, peer, min, max, GFP_ATOMIC);
peer              207 net/core/net_namespace.c static int net_eq_idr(int id, void *net, void *peer)
peer              209 net/core/net_namespace.c 	if (net_eq(net, peer))
peer              218 net/core/net_namespace.c static int __peernet2id_alloc(struct net *net, struct net *peer, bool *alloc)
peer              220 net/core/net_namespace.c 	int id = idr_for_each(&net->netns_ids, net_eq_idr, peer);
peer              232 net/core/net_namespace.c 		id = alloc_netid(net, peer, -1);
peer              241 net/core/net_namespace.c static int __peernet2id(struct net *net, struct net *peer)
peer              245 net/core/net_namespace.c 	return __peernet2id_alloc(net, peer, &no);
peer              253 net/core/net_namespace.c int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp)
peer              267 net/core/net_namespace.c 	if (maybe_get_net(peer))
peer              269 net/core/net_namespace.c 	id = __peernet2id_alloc(net, peer, &alloc);
peer              274 net/core/net_namespace.c 		put_net(peer);
peer              280 net/core/net_namespace.c int peernet2id(struct net *net, struct net *peer)
peer              285 net/core/net_namespace.c 	id = __peernet2id(net, peer);
peer              294 net/core/net_namespace.c bool peernet_has_id(struct net *net, struct net *peer)
peer              296 net/core/net_namespace.c 	return peernet2id(net, peer) >= 0;
peer              301 net/core/net_namespace.c 	struct net *peer;
peer              307 net/core/net_namespace.c 	peer = idr_find(&net->netns_ids, id);
peer              308 net/core/net_namespace.c 	if (peer)
peer              309 net/core/net_namespace.c 		peer = maybe_get_net(peer);
peer              312 net/core/net_namespace.c 	return peer;
peer              728 net/core/net_namespace.c 	struct net *peer;
peer              742 net/core/net_namespace.c 		peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
peer              745 net/core/net_namespace.c 		peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
peer              751 net/core/net_namespace.c 	if (IS_ERR(peer)) {
peer              754 net/core/net_namespace.c 		return PTR_ERR(peer);
peer              758 net/core/net_namespace.c 	if (__peernet2id(net, peer) >= 0) {
peer              767 net/core/net_namespace.c 	err = alloc_netid(net, peer, nsid);
peer              779 net/core/net_namespace.c 	put_net(peer);
peer              876 net/core/net_namespace.c 	struct net *peer, *target = net;
peer              885 net/core/net_namespace.c 		peer = get_net_ns_by_pid(nla_get_u32(tb[NETNSA_PID]));
peer              888 net/core/net_namespace.c 		peer = get_net_ns_by_fd(nla_get_u32(tb[NETNSA_FD]));
peer              891 net/core/net_namespace.c 		peer = get_net_ns_by_id(net, nla_get_s32(tb[NETNSA_NSID]));
peer              892 net/core/net_namespace.c 		if (!peer)
peer              893 net/core/net_namespace.c 			peer = ERR_PTR(-ENOENT);
peer              900 net/core/net_namespace.c 	if (IS_ERR(peer)) {
peer              903 net/core/net_namespace.c 		return PTR_ERR(peer);
peer              918 net/core/net_namespace.c 		fillargs.ref_nsid = peernet2id(net, peer);
peer              927 net/core/net_namespace.c 	fillargs.nsid = peernet2id(target, peer);
peer              940 net/core/net_namespace.c 	put_net(peer);
peer              953 net/core/net_namespace.c static int rtnl_net_dumpid_one(int id, void *peer, void *data)
peer              963 net/core/net_namespace.c 		net_cb->fillargs.ref_nsid = __peernet2id(net_cb->ref_net, peer);
peer             2673 net/core/sock.c 		    int peer)
peer              414 net/decnet/af_decnet.c 		if (cb->src != dn_saddr2dn(&scp->peer))
peer              509 net/decnet/af_decnet.c 	scp->peer.sdn_family    = AF_DECnet;
peer              941 net/decnet/af_decnet.c 	memcpy(&scp->peer, addr, sizeof(struct sockaddr_dn));
peer              946 net/decnet/af_decnet.c 	fld.daddr = dn_saddr2dn(&scp->peer);
peer             1130 net/decnet/af_decnet.c 	skb_pull(skb, dn_username2sockaddr(skb->data, skb->len, &(DN_SK(newsk)->peer), &type));
peer             1131 net/decnet/af_decnet.c 	*(__le16 *)(DN_SK(newsk)->peer.sdn_add.a_addr) = cb->src;
peer             1144 net/decnet/af_decnet.c 		DN_SK(newsk)->peer.sdn_flags |= SDF_PROXY;
peer             1147 net/decnet/af_decnet.c 		DN_SK(newsk)->peer.sdn_flags |= SDF_UICPROXY;
peer             1175 net/decnet/af_decnet.c static int dn_getname(struct socket *sock, struct sockaddr *uaddr,int peer)
peer             1183 net/decnet/af_decnet.c 	if (peer) {
peer             1191 net/decnet/af_decnet.c 		memcpy(sa, &scp->peer, sizeof(struct sockaddr_dn));
peer             1811 net/decnet/af_decnet.c 		memcpy(msg->msg_name, &scp->peer, sizeof(struct sockaddr_dn));
peer             2268 net/decnet/af_decnet.c 	dn_printable_object(&scp->peer, remote_object);
peer             2281 net/decnet/af_decnet.c 		   dn_addr2asc(le16_to_cpu(dn_saddr2dn(&scp->peer)), buf2),
peer             1218 net/decnet/dn_dev.c 	if (dn_db->peer)
peer             1219 net/decnet/dn_dev.c 		neigh_release(dn_db->peer);
peer             1374 net/decnet/dn_dev.c 				dn_db->peer ? dn_addr2asc(le16_to_cpu(*(__le16 *)dn_db->peer->primary_key), peer_buf) : "");
peer               87 net/decnet/dn_nsp_out.c 	fld.daddr = dn_saddr2dn(&scp->peer);
peer              653 net/decnet/dn_nsp_out.c 	if (scp->peer.sdn_objnum)
peer              656 net/decnet/dn_nsp_out.c 	skb_put(skb, dn_sockaddr2username(&scp->peer,
peer              662 net/decnet/dn_nsp_out.c 	if (scp->peer.sdn_flags & SDF_PROXY)
peer              664 net/decnet/dn_nsp_out.c 	if (scp->peer.sdn_flags & SDF_UICPROXY)
peer              761 net/ipv4/af_inet.c 			int peer)
peer              768 net/ipv4/af_inet.c 	if (peer) {
peer              771 net/ipv4/af_inet.c 		     peer == 1))
peer              314 net/ipv4/icmp.c 	struct inet_peer *peer;
peer              326 net/ipv4/icmp.c 	peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, vif, 1);
peer              327 net/ipv4/icmp.c 	rc = inet_peer_xrlim_allow(peer, net->ipv4.sysctl_icmp_ratelimit);
peer              328 net/ipv4/icmp.c 	if (peer)
peer              329 net/ipv4/icmp.c 		inet_putpeer(peer);
peer              273 net/ipv4/inetpeer.c bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
peer              278 net/ipv4/inetpeer.c 	if (!peer)
peer              281 net/ipv4/inetpeer.c 	token = peer->rate_tokens;
peer              283 net/ipv4/inetpeer.c 	token += now - peer->rate_last;
peer              284 net/ipv4/inetpeer.c 	peer->rate_last = now;
peer              291 net/ipv4/inetpeer.c 	peer->rate_tokens = token;
peer              301 net/ipv4/inetpeer.c 		struct inet_peer *peer = rb_entry(p, struct inet_peer, rb_node);
peer              304 net/ipv4/inetpeer.c 		rb_erase(&peer->rb_node, &base->rb_root);
peer              305 net/ipv4/inetpeer.c 		inet_putpeer(peer);
peer               68 net/ipv4/ip_fragment.c 	struct inet_peer *peer;
peer               91 net/ipv4/ip_fragment.c 	qp->peer = q->fqdir->max_dist ?
peer              101 net/ipv4/ip_fragment.c 	if (qp->peer)
peer              102 net/ipv4/ip_fragment.c 		inet_putpeer(qp->peer);
peer              226 net/ipv4/ip_fragment.c 	struct inet_peer *peer = qp->peer;
peer              232 net/ipv4/ip_fragment.c 	if (!peer || !max)
peer              236 net/ipv4/ip_fragment.c 	end = atomic_inc_return(&peer->rid);
peer              873 net/ipv4/route.c 	struct inet_peer *peer;
peer              889 net/ipv4/route.c 	peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif, 1);
peer              890 net/ipv4/route.c 	if (!peer) {
peer              899 net/ipv4/route.c 	if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) {
peer              900 net/ipv4/route.c 		peer->rate_tokens = 0;
peer              901 net/ipv4/route.c 		peer->n_redirects = 0;
peer              907 net/ipv4/route.c 	if (peer->n_redirects >= ip_rt_redirect_number) {
peer              908 net/ipv4/route.c 		peer->rate_last = jiffies;
peer              915 net/ipv4/route.c 	if (peer->n_redirects == 0 ||
peer              917 net/ipv4/route.c 		       (peer->rate_last +
peer              918 net/ipv4/route.c 			(ip_rt_redirect_load << peer->n_redirects)))) {
peer              922 net/ipv4/route.c 		peer->rate_last = jiffies;
peer              923 net/ipv4/route.c 		++peer->n_redirects;
peer              926 net/ipv4/route.c 		    peer->n_redirects == ip_rt_redirect_number)
peer              933 net/ipv4/route.c 	inet_putpeer(peer);
peer              941 net/ipv4/route.c 	struct inet_peer *peer;
peer              989 net/ipv4/route.c 	peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr,
peer              993 net/ipv4/route.c 	if (peer) {
peer              995 net/ipv4/route.c 		peer->rate_tokens += now - peer->rate_last;
peer              996 net/ipv4/route.c 		if (peer->rate_tokens > ip_rt_error_burst)
peer              997 net/ipv4/route.c 			peer->rate_tokens = ip_rt_error_burst;
peer              998 net/ipv4/route.c 		peer->rate_last = now;
peer              999 net/ipv4/route.c 		if (peer->rate_tokens >= ip_rt_error_cost)
peer             1000 net/ipv4/route.c 			peer->rate_tokens -= ip_rt_error_cost;
peer             1003 net/ipv4/route.c 		inet_putpeer(peer);
peer             5314 net/ipv6/addrconf.c 	struct in6_addr *addr = NULL, *peer;
peer             5333 net/ipv6/addrconf.c 	addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer);
peer              506 net/ipv6/af_inet6.c 		 int peer)
peer              516 net/ipv6/af_inet6.c 	if (peer) {
peer              520 net/ipv6/af_inet6.c 		    peer == 1)
peer              217 net/ipv6/icmp.c 		struct inet_peer *peer;
peer              223 net/ipv6/icmp.c 		peer = inet_getpeer_v6(net->ipv6.peers, &fl6->daddr, 1);
peer              224 net/ipv6/icmp.c 		res = inet_peer_xrlim_allow(peer, tmo);
peer              225 net/ipv6/icmp.c 		if (peer)
peer              226 net/ipv6/icmp.c 			inet_putpeer(peer);
peer              504 net/ipv6/ip6_output.c 		struct inet_peer *peer;
peer              518 net/ipv6/ip6_output.c 		peer = inet_getpeer_v6(net->ipv6.peers, &hdr->daddr, 1);
peer              523 net/ipv6/ip6_output.c 		if (inet_peer_xrlim_allow(peer, 1*HZ))
peer              525 net/ipv6/ip6_output.c 		if (peer)
peer              526 net/ipv6/ip6_output.c 			inet_putpeer(peer);
peer             1582 net/ipv6/ndisc.c 	struct inet_peer *peer;
peer             1632 net/ipv6/ndisc.c 	peer = inet_getpeer_v6(net->ipv6.peers, &ipv6_hdr(skb)->saddr, 1);
peer             1633 net/ipv6/ndisc.c 	ret = inet_peer_xrlim_allow(peer, 1*HZ);
peer             1634 net/ipv6/ndisc.c 	if (peer)
peer             1635 net/ipv6/ndisc.c 		inet_putpeer(peer);
peer             1014 net/iucv/af_iucv.c 			     int peer)
peer             1022 net/iucv/af_iucv.c 	if (peer) {
peer              362 net/l2tp/l2tp_ip.c 			   int peer)
peer              371 net/l2tp/l2tp_ip.c 	if (peer) {
peer              434 net/l2tp/l2tp_ip6.c 			    int peer)
peer              445 net/l2tp/l2tp_ip6.c 	if (peer) {
peer              904 net/l2tp/l2tp_ppp.c 			    int peer)
peer              989 net/llc/af_llc.c 			  int peer)
peer             1000 net/llc/af_llc.c 	if (peer) {
peer             3539 net/mac80211/cfg.c 				  const u8 *peer, u64 *cookie)
peer             3564 net/mac80211/cfg.c 	sta = sta_info_get_bss(sdata, peer);
peer             3706 net/mac80211/cfg.c 			       u8 tsid, const u8 *peer, u8 up,
peer             3732 net/mac80211/cfg.c 			       u8 tsid, const u8 *peer)
peer              585 net/mac80211/debugfs_netdev.c IEEE80211_IF_FILE(peer, u.wds.remote_addr, MAC);
peer              710 net/mac80211/debugfs_netdev.c 	DEBUGFS_ADD(peer);
peer             2231 net/mac80211/ieee80211_i.h 			const u8 *peer, u8 action_code, u8 dialog_token,
peer             2236 net/mac80211/ieee80211_i.h 			const u8 *peer, enum nl80211_tdls_operation oper);
peer             2247 net/mac80211/ieee80211_i.h 				      const u8 *peer, u16 reason);
peer              202 net/mac80211/tdls.c 				       struct sk_buff *skb, const u8 *peer,
peer              210 net/mac80211/tdls.c 		rsp_addr = peer;
peer              212 net/mac80211/tdls.c 		init_addr = peer;
peer              363 net/mac80211/tdls.c 				   struct sk_buff *skb, const u8 *peer,
peer              433 net/mac80211/tdls.c 		sta = sta_info_get(sdata, peer);
peer              475 net/mac80211/tdls.c 	ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator);
peer              544 net/mac80211/tdls.c 				 struct sk_buff *skb, const u8 *peer,
peer              561 net/mac80211/tdls.c 	sta = sta_info_get(sdata, peer);
peer              618 net/mac80211/tdls.c 	ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator);
peer              646 net/mac80211/tdls.c 				       struct sk_buff *skb, const u8 *peer,
peer              674 net/mac80211/tdls.c 	ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator);
peer              685 net/mac80211/tdls.c 					struct sk_buff *skb, const u8 *peer,
peer              691 net/mac80211/tdls.c 		ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator);
peer              698 net/mac80211/tdls.c 				   struct sk_buff *skb, const u8 *peer,
peer              709 net/mac80211/tdls.c 			ieee80211_tdls_add_setup_start_ies(sdata, skb, peer,
peer              717 net/mac80211/tdls.c 			ieee80211_tdls_add_setup_cfm_ies(sdata, skb, peer,
peer              726 net/mac80211/tdls.c 			ieee80211_tdls_add_link_ie(sdata, skb, peer, initiator);
peer              729 net/mac80211/tdls.c 		ieee80211_tdls_add_chan_switch_req_ies(sdata, skb, peer,
peer              735 net/mac80211/tdls.c 		ieee80211_tdls_add_chan_switch_resp_ies(sdata, skb, peer,
peer              746 net/mac80211/tdls.c 			       const u8 *peer, u8 action_code, u8 dialog_token,
peer              754 net/mac80211/tdls.c 	memcpy(tf->da, peer, ETH_ALEN);
peer              828 net/mac80211/tdls.c 			   const u8 *peer, u8 action_code, u8 dialog_token,
peer              835 net/mac80211/tdls.c 	memcpy(mgmt->da, peer, ETH_ALEN);
peer              863 net/mac80211/tdls.c 				      const u8 *peer, u8 action_code,
peer              904 net/mac80211/tdls.c 						     sdata->dev, peer,
peer              910 net/mac80211/tdls.c 						 peer, action_code,
peer              922 net/mac80211/tdls.c 	ieee80211_tdls_add_ies(sdata, skb, peer, action_code, status_code,
peer              934 net/mac80211/tdls.c 				const u8 *peer, u8 action_code, u8 dialog_token,
peer              947 net/mac80211/tdls.c 	sta = sta_info_get(sdata, peer);
peer              992 net/mac80211/tdls.c 	skb = ieee80211_tdls_build_mgmt_packet_data(sdata, peer, action_code,
peer             1033 net/mac80211/tdls.c 		sta = sta_info_get(sdata, peer);
peer             1070 net/mac80211/tdls.c 			  const u8 *peer, u8 action_code, u8 dialog_token,
peer             1091 net/mac80211/tdls.c 	    !ether_addr_equal(sdata->u.mgd.tdls_peer, peer)) {
peer             1105 net/mac80211/tdls.c 		if (!sta_info_get(sdata, peer)) {
peer             1114 net/mac80211/tdls.c 	memcpy(sdata->u.mgd.tdls_peer, peer, ETH_ALEN);
peer             1118 net/mac80211/tdls.c 	ret = ieee80211_tdls_prep_mgmt_packet(wiphy, dev, peer, action_code,
peer             1142 net/mac80211/tdls.c 			     const u8 *peer, u8 action_code, u8 dialog_token,
peer             1162 net/mac80211/tdls.c 	ret = ieee80211_tdls_prep_mgmt_packet(wiphy, dev, peer, action_code,
peer             1176 net/mac80211/tdls.c 	sta = sta_info_get(sdata, peer);
peer             1188 net/mac80211/tdls.c 			const u8 *peer, u8 action_code, u8 dialog_token,
peer             1207 net/mac80211/tdls.c 		ret = ieee80211_tdls_mgmt_setup(wiphy, dev, peer, action_code,
peer             1213 net/mac80211/tdls.c 		ret = ieee80211_tdls_mgmt_teardown(wiphy, dev, peer,
peer             1230 net/mac80211/tdls.c 		ret = ieee80211_tdls_prep_mgmt_packet(wiphy, dev, peer,
peer             1244 net/mac80211/tdls.c 		 action_code, peer, ret);
peer             1343 net/mac80211/tdls.c 			const u8 *peer, enum nl80211_tdls_operation oper)
peer             1372 net/mac80211/tdls.c 	tdls_dbg(sdata, "TDLS oper %d peer %pM\n", oper, peer);
peer             1383 net/mac80211/tdls.c 		sta = sta_info_get(sdata, peer);
peer             1397 net/mac80211/tdls.c 			     !ether_addr_equal(sdata->u.mgd.tdls_peer, peer));
peer             1415 net/mac80211/tdls.c 		ret = sta_info_destroy_addr(sdata, peer);
peer             1428 net/mac80211/tdls.c 	if (ret == 0 && ether_addr_equal(sdata->u.mgd.tdls_peer, peer)) {
peer             1442 net/mac80211/tdls.c void ieee80211_tdls_oper_request(struct ieee80211_vif *vif, const u8 *peer,
peer             1454 net/mac80211/tdls.c 	cfg80211_tdls_oper_request(sdata->dev, peer, oper, reason_code, gfp);
peer             1998 net/mac80211/tdls.c 				      const u8 *peer, u16 reason)
peer             2003 net/mac80211/tdls.c 	sta = ieee80211_find_sta(&sdata->vif, peer);
peer             2011 net/mac80211/tdls.c 		 peer, reason,
peer             2014 net/mac80211/tdls.c 	ieee80211_tdls_oper_request(&sdata->vif, peer,
peer             1104 net/netlink/af_netlink.c 			   int peer)
peer             1113 net/netlink/af_netlink.c 	if (peer) {
peer              807 net/netrom/af_netrom.c 	int peer)
peer              817 net/netrom/af_netrom.c 	if (peer != 0) {
peer              493 net/nfc/llcp_sock.c 			     int peer)
peer             3457 net/packet/af_packet.c 			       int peer)
peer             3462 net/packet/af_packet.c 	if (peer)
peer             3477 net/packet/af_packet.c 			  int peer)
peer             3484 net/packet/af_packet.c 	if (peer)
peer               91 net/phonet/pep.c 	struct sockaddr_pn peer;
peer              103 net/phonet/pep.c 	pn_skb_get_src_sockaddr(oskb, &peer);
peer              104 net/phonet/pep.c 	return pn_skb_send(sk, skb, &peer);
peer              316 net/phonet/socket.c 				int peer)
peer              323 net/phonet/socket.c 	if (!peer) /* Race with bind() here is userland's problem. */
peer               88 net/qrtr/qrtr.c 	struct sockaddr_qrtr peer;
peer              758 net/qrtr/qrtr.c 		addr = &ipc->peer;
peer              891 net/qrtr/qrtr.c 	ipc->peer = *addr;
peer              901 net/qrtr/qrtr.c 			int peer)
peer              908 net/qrtr/qrtr.c 	if (peer) {
peer              914 net/qrtr/qrtr.c 		qaddr = ipc->peer;
peer              115 net/rds/af_rds.c 		       int peer)
peer              123 net/rds/af_rds.c 	if (peer) {
peer              918 net/rose/af_rose.c 	int peer)
peer              926 net/rose/af_rose.c 	if (peer != 0) {
peer              329 net/rxrpc/af_rxrpc.c 	rxrpc_put_peer(cp.peer);
peer              342 net/rxrpc/ar-internal.h 	struct rxrpc_peer	*peer;		/* Remote endpoint */
peer              561 net/rxrpc/ar-internal.h 	struct rxrpc_peer	*peer;		/* Peer record for remote address */
peer               67 net/rxrpc/call_accept.c 		struct rxrpc_peer *peer = rxrpc_alloc_peer(rx->local, gfp);
peer               68 net/rxrpc/call_accept.c 		if (!peer)
peer               70 net/rxrpc/call_accept.c 		b->peer_backlog[head] = peer;
peer              204 net/rxrpc/call_accept.c 		struct rxrpc_peer *peer = b->peer_backlog[tail];
peer              205 net/rxrpc/call_accept.c 		kfree(peer);
peer              251 net/rxrpc/call_accept.c 	if (call->peer->rtt_count < 3 ||
peer              252 net/rxrpc/call_accept.c 	    ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000), now))
peer              264 net/rxrpc/call_accept.c 						    struct rxrpc_peer *peer,
peer              293 net/rxrpc/call_accept.c 		if (peer && !rxrpc_get_peer_maybe(peer))
peer              294 net/rxrpc/call_accept.c 			peer = NULL;
peer              295 net/rxrpc/call_accept.c 		if (!peer) {
peer              296 net/rxrpc/call_accept.c 			peer = b->peer_backlog[peer_tail];
peer              297 net/rxrpc/call_accept.c 			if (rxrpc_extract_addr_from_skb(&peer->srx, skb) < 0)
peer              304 net/rxrpc/call_accept.c 			rxrpc_new_incoming_peer(rx, local, peer);
peer              313 net/rxrpc/call_accept.c 		conn->params.peer = peer;
peer              329 net/rxrpc/call_accept.c 	call->peer = rxrpc_get_peer(conn->params.peer);
peer              330 net/rxrpc/call_accept.c 	call->cong_cwnd = call->peer->cong_cwnd;
peer              356 net/rxrpc/call_accept.c 	struct rxrpc_peer *peer = NULL;
peer              377 net/rxrpc/call_accept.c 	conn = rxrpc_find_connection_rcu(local, skb, &peer);
peer              382 net/rxrpc/call_accept.c 	call = rxrpc_alloc_incoming_call(rx, local, peer, conn, sec, key, skb);
peer              114 net/rxrpc/call_event.c 		if (call->peer->srtt_us != 0)
peer              115 net/rxrpc/call_event.c 			ack_at = usecs_to_jiffies(call->peer->srtt_us >> 3);
peer              168 net/rxrpc/call_event.c 	rto_j = call->peer->rto_j;
peer              231 net/rxrpc/call_event.c 		if (ktime_to_us(ack_ts) < (call->peer->srtt_us >> 3))
peer              359 net/rxrpc/call_object.c 	spin_lock(&conn->params.peer->lock);
peer              360 net/rxrpc/call_object.c 	hlist_add_head_rcu(&call->error_link, &conn->params.peer->error_targets);
peer              361 net/rxrpc/call_object.c 	spin_unlock(&conn->params.peer->lock);
peer              572 net/rxrpc/call_object.c 	rxrpc_put_peer(call->peer);
peer              211 net/rxrpc/conn_client.c 	cp->peer = NULL;
peer              289 net/rxrpc/conn_client.c 	cp->peer = rxrpc_lookup_peer(rx, cp->local, srx, gfp);
peer              290 net/rxrpc/conn_client.c 	if (!cp->peer)
peer              293 net/rxrpc/conn_client.c 	call->cong_cwnd = cp->peer->cong_cwnd;
peer              310 net/rxrpc/conn_client.c 			diff = (cmp(peer) ?:
peer              377 net/rxrpc/conn_client.c 		diff = (cmp(peer) ?:
peer              440 net/rxrpc/conn_client.c 	rxrpc_put_peer(cp->peer);
peer              441 net/rxrpc/conn_client.c 	cp->peer = NULL;
peer              570 net/rxrpc/conn_client.c 	call->peer	= rxrpc_get_peer(conn->params.peer);
peer              722 net/rxrpc/conn_client.c 	spin_lock_bh(&call->conn->params.peer->lock);
peer              724 net/rxrpc/conn_client.c 			   &call->conn->params.peer->error_targets);
peer              725 net/rxrpc/conn_client.c 	spin_unlock_bh(&call->conn->params.peer->lock);
peer               55 net/rxrpc/conn_event.c 	msg.msg_name	= &conn->params.peer->srx.transport;
peer               56 net/rxrpc/conn_event.c 	msg.msg_namelen	= conn->params.peer->srx.transport_len;
peer               89 net/rxrpc/conn_event.c 		mtu = conn->params.peer->if_mtu;
peer               90 net/rxrpc/conn_event.c 		mtu -= conn->params.peer->hdrsize;
peer              137 net/rxrpc/conn_event.c 	conn->params.peer->last_tx_at = ktime_get_seconds();
peer              216 net/rxrpc/conn_event.c 	msg.msg_name	= &conn->params.peer->srx.transport;
peer              217 net/rxrpc/conn_event.c 	msg.msg_namelen	= conn->params.peer->srx.transport_len;
peer              257 net/rxrpc/conn_event.c 	conn->params.peer->last_tx_at = ktime_get_seconds();
peer               81 net/rxrpc/conn_object.c 	struct rxrpc_peer *peer;
peer              105 net/rxrpc/conn_object.c 		peer = rxrpc_lookup_peer_rcu(local, &srx);
peer              106 net/rxrpc/conn_object.c 		if (!peer)
peer              108 net/rxrpc/conn_object.c 		*_peer = peer;
peer              109 net/rxrpc/conn_object.c 		conn = rxrpc_find_service_conn_rcu(peer, skb);
peer              129 net/rxrpc/conn_object.c 		peer = conn->params.peer;
peer              132 net/rxrpc/conn_object.c 			if (peer->srx.transport.sin.sin_port !=
peer              134 net/rxrpc/conn_object.c 			    peer->srx.transport.sin.sin_addr.s_addr !=
peer              140 net/rxrpc/conn_object.c 			if (peer->srx.transport.sin6.sin6_port !=
peer              142 net/rxrpc/conn_object.c 			    memcmp(&peer->srx.transport.sin6.sin6_addr,
peer              213 net/rxrpc/conn_object.c 	call->peer->cong_cwnd = call->cong_cwnd;
peer              215 net/rxrpc/conn_object.c 	spin_lock_bh(&conn->params.peer->lock);
peer              217 net/rxrpc/conn_object.c 	spin_unlock_bh(&conn->params.peer->lock);
peer              366 net/rxrpc/conn_object.c 	rxrpc_put_peer(conn->params.peer);
peer               21 net/rxrpc/conn_service.c struct rxrpc_connection *rxrpc_find_service_conn_rcu(struct rxrpc_peer *peer,
peer               38 net/rxrpc/conn_service.c 		read_seqbegin_or_lock(&peer->service_conn_lock, &seq);
peer               40 net/rxrpc/conn_service.c 		p = rcu_dereference_raw(peer->service_conns.rb_node);
peer               52 net/rxrpc/conn_service.c 	} while (need_seqretry(&peer->service_conn_lock, seq));
peer               54 net/rxrpc/conn_service.c 	done_seqretry(&peer->service_conn_lock, seq);
peer               63 net/rxrpc/conn_service.c static void rxrpc_publish_service_conn(struct rxrpc_peer *peer,
peer               70 net/rxrpc/conn_service.c 	write_seqlock_bh(&peer->service_conn_lock);
peer               72 net/rxrpc/conn_service.c 	pp = &peer->service_conns.rb_node;
peer               88 net/rxrpc/conn_service.c 	rb_insert_color(&conn->service_node, &peer->service_conns);
peer               91 net/rxrpc/conn_service.c 	write_sequnlock_bh(&peer->service_conn_lock);
peer               98 net/rxrpc/conn_service.c 	write_sequnlock_bh(&peer->service_conn_lock);
peer              110 net/rxrpc/conn_service.c 			    &peer->service_conns);
peer              181 net/rxrpc/conn_service.c 	rxrpc_publish_service_conn(conn->params.peer, conn);
peer              192 net/rxrpc/conn_service.c 	struct rxrpc_peer *peer = conn->params.peer;
peer              194 net/rxrpc/conn_service.c 	write_seqlock_bh(&peer->service_conn_lock);
peer              196 net/rxrpc/conn_service.c 		rb_erase(&conn->service_node, &peer->service_conns);
peer              197 net/rxrpc/conn_service.c 	write_sequnlock_bh(&peer->service_conn_lock);
peer               94 net/rxrpc/input.c 		if (call->peer->rtt_count == 0)
peer               98 net/rxrpc/input.c 					      call->peer->srtt_us >> 3)))
peer              716 net/rxrpc/input.c 	struct rxrpc_peer *peer;
peer              741 net/rxrpc/input.c 	peer = call->peer;
peer              742 net/rxrpc/input.c 	if (mtu < peer->maxdata) {
peer              743 net/rxrpc/input.c 		spin_lock_bh(&peer->lock);
peer              744 net/rxrpc/input.c 		peer->maxdata = mtu;
peer              745 net/rxrpc/input.c 		peer->mtu = mtu + peer->hdrsize;
peer              746 net/rxrpc/input.c 		spin_unlock_bh(&peer->lock);
peer              747 net/rxrpc/input.c 		_net("Net MTU %u (maxdata %u)", peer->mtu, peer->maxdata);
peer             1201 net/rxrpc/input.c 	struct rxrpc_peer *peer = NULL;
peer             1323 net/rxrpc/input.c 	conn = rxrpc_find_connection_rcu(local, skb, &peer);
peer              112 net/rxrpc/output.c 	mtu = conn->params.peer->if_mtu;
peer              113 net/rxrpc/output.c 	mtu -= conn->params.peer->hdrsize;
peer              151 net/rxrpc/output.c 	msg.msg_name	= &call->peer->srx.transport;
peer              152 net/rxrpc/output.c 	msg.msg_namelen	= call->peer->srx.transport_len;
peer              213 net/rxrpc/output.c 	conn->params.peer->last_tx_at = ktime_get_seconds();
peer              274 net/rxrpc/output.c 	msg.msg_name	= &call->peer->srx.transport;
peer              275 net/rxrpc/output.c 	msg.msg_namelen	= call->peer->srx.transport_len;
peer              300 net/rxrpc/output.c 	conn->params.peer->last_tx_at = ktime_get_seconds();
peer              353 net/rxrpc/output.c 	msg.msg_name = &call->peer->srx.transport;
peer              354 net/rxrpc/output.c 	msg.msg_namelen = call->peer->srx.transport_len;
peer              372 net/rxrpc/output.c 	     (call->peer->rtt_count < 3 && sp->hdr.seq & 1) ||
peer              373 net/rxrpc/output.c 	     ktime_before(ktime_add_ms(call->peer->rtt_last_req, 1000),
peer              392 net/rxrpc/output.c 	if (iov[1].iov_len >= call->peer->maxdata)
peer              408 net/rxrpc/output.c 	conn->params.peer->last_tx_at = ktime_get_seconds();
peer              424 net/rxrpc/output.c 			call->peer->rtt_last_req = skb->tstamp;
peer              426 net/rxrpc/output.c 			if (call->peer->rtt_count > 1) {
peer              429 net/rxrpc/output.c 				ack_lost_at = rxrpc_get_rto_backoff(call->peer, retrans);
peer              460 net/rxrpc/output.c 	_leave(" = %d [%u]", ret, call->peer->maxdata);
peer              482 net/rxrpc/output.c 		conn->params.peer->last_tx_at = ktime_get_seconds();
peer              586 net/rxrpc/output.c void rxrpc_send_keepalive(struct rxrpc_peer *peer)
peer              596 net/rxrpc/output.c 	msg.msg_name	= &peer->srx.transport;
peer              597 net/rxrpc/output.c 	msg.msg_namelen	= peer->srx.transport_len;
peer              602 net/rxrpc/output.c 	whdr.epoch	= htonl(peer->local->rxnet->epoch);
peer              623 net/rxrpc/output.c 	ret = kernel_sendmsg(peer->local->socket, &msg, iov, 2, len);
peer              625 net/rxrpc/output.c 		trace_rxrpc_tx_fail(peer->debug_id, 0, ret,
peer              628 net/rxrpc/output.c 		trace_rxrpc_tx_packet(peer->debug_id, &whdr,
peer              631 net/rxrpc/output.c 	peer->last_tx_at = ktime_get_seconds();
peer              107 net/rxrpc/peer_event.c static void rxrpc_adjust_mtu(struct rxrpc_peer *peer, struct sock_exterr_skb *serr)
peer              114 net/rxrpc/peer_event.c 	if (mtu > 0 && peer->if_mtu == 65535 && mtu < peer->if_mtu) {
peer              115 net/rxrpc/peer_event.c 		peer->if_mtu = mtu;
peer              121 net/rxrpc/peer_event.c 		mtu = peer->if_mtu;
peer              128 net/rxrpc/peer_event.c 			if (mtu < peer->hdrsize)
peer              129 net/rxrpc/peer_event.c 				mtu = peer->hdrsize + 4;
peer              133 net/rxrpc/peer_event.c 	if (mtu < peer->mtu) {
peer              134 net/rxrpc/peer_event.c 		spin_lock_bh(&peer->lock);
peer              135 net/rxrpc/peer_event.c 		peer->mtu = mtu;
peer              136 net/rxrpc/peer_event.c 		peer->maxdata = peer->mtu - peer->hdrsize;
peer              137 net/rxrpc/peer_event.c 		spin_unlock_bh(&peer->lock);
peer              139 net/rxrpc/peer_event.c 		     peer->mtu, peer->maxdata);
peer              151 net/rxrpc/peer_event.c 	struct rxrpc_peer *peer;
peer              182 net/rxrpc/peer_event.c 	peer = rxrpc_lookup_peer_icmp_rcu(local, skb, &srx);
peer              183 net/rxrpc/peer_event.c 	if (peer && !rxrpc_get_peer_maybe(peer))
peer              184 net/rxrpc/peer_event.c 		peer = NULL;
peer              185 net/rxrpc/peer_event.c 	if (!peer) {
peer              192 net/rxrpc/peer_event.c 	trace_rxrpc_rx_icmp(peer, &serr->ee, &srx);
peer              197 net/rxrpc/peer_event.c 		rxrpc_adjust_mtu(peer, serr);
peer              200 net/rxrpc/peer_event.c 		rxrpc_put_peer(peer);
peer              205 net/rxrpc/peer_event.c 	rxrpc_store_error(peer, serr);
peer              208 net/rxrpc/peer_event.c 	rxrpc_put_peer(peer);
peer              216 net/rxrpc/peer_event.c static void rxrpc_store_error(struct rxrpc_peer *peer,
peer              279 net/rxrpc/peer_event.c 	rxrpc_distribute_error(peer, err, compl);
peer              285 net/rxrpc/peer_event.c static void rxrpc_distribute_error(struct rxrpc_peer *peer, int error,
peer              290 net/rxrpc/peer_event.c 	hlist_for_each_entry_rcu(call, &peer->error_targets, error_link) {
peer              306 net/rxrpc/peer_event.c 	struct rxrpc_peer *peer;
peer              314 net/rxrpc/peer_event.c 		peer = list_entry(collector->next,
peer              317 net/rxrpc/peer_event.c 		list_del_init(&peer->keepalive_link);
peer              318 net/rxrpc/peer_event.c 		if (!rxrpc_get_peer_maybe(peer))
peer              321 net/rxrpc/peer_event.c 		if (__rxrpc_use_local(peer->local)) {
peer              324 net/rxrpc/peer_event.c 			keepalive_at = peer->last_tx_at + RXRPC_KEEPALIVE_TIME;
peer              327 net/rxrpc/peer_event.c 			       cursor, peer->debug_id, slot, &peer->srx.transport);
peer              331 net/rxrpc/peer_event.c 				rxrpc_send_keepalive(peer);
peer              342 net/rxrpc/peer_event.c 			list_add_tail(&peer->keepalive_link,
peer              344 net/rxrpc/peer_event.c 			rxrpc_unuse_local(peer->local);
peer              346 net/rxrpc/peer_event.c 		rxrpc_put_peer_locked(peer);
peer               76 net/rxrpc/peer_object.c static long rxrpc_peer_cmp_key(const struct rxrpc_peer *peer,
peer               83 net/rxrpc/peer_object.c 	diff = ((peer->hash_key - hash_key) ?:
peer               84 net/rxrpc/peer_object.c 		((unsigned long)peer->local - (unsigned long)local) ?:
peer               85 net/rxrpc/peer_object.c 		(peer->srx.transport_type - srx->transport_type) ?:
peer               86 net/rxrpc/peer_object.c 		(peer->srx.transport_len - srx->transport_len) ?:
peer               87 net/rxrpc/peer_object.c 		(peer->srx.transport.family - srx->transport.family));
peer               93 net/rxrpc/peer_object.c 		return ((u16 __force)peer->srx.transport.sin.sin_port -
peer               95 net/rxrpc/peer_object.c 			memcmp(&peer->srx.transport.sin.sin_addr,
peer              100 net/rxrpc/peer_object.c 		return ((u16 __force)peer->srx.transport.sin6.sin6_port -
peer              102 net/rxrpc/peer_object.c 			memcmp(&peer->srx.transport.sin6.sin6_addr,
peer              119 net/rxrpc/peer_object.c 	struct rxrpc_peer *peer;
peer              122 net/rxrpc/peer_object.c 	hash_for_each_possible_rcu(rxnet->peer_hash, peer, hash_link, hash_key) {
peer              123 net/rxrpc/peer_object.c 		if (rxrpc_peer_cmp_key(peer, local, srx, hash_key) == 0 &&
peer              124 net/rxrpc/peer_object.c 		    atomic_read(&peer->usage) > 0)
peer              125 net/rxrpc/peer_object.c 			return peer;
peer              137 net/rxrpc/peer_object.c 	struct rxrpc_peer *peer;
peer              140 net/rxrpc/peer_object.c 	peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
peer              141 net/rxrpc/peer_object.c 	if (peer) {
peer              142 net/rxrpc/peer_object.c 		_net("PEER %d {%pISp}", peer->debug_id, &peer->srx.transport);
peer              143 net/rxrpc/peer_object.c 		_leave(" = %p {u=%d}", peer, atomic_read(&peer->usage));
peer              145 net/rxrpc/peer_object.c 	return peer;
peer              153 net/rxrpc/peer_object.c 				  struct rxrpc_peer *peer)
peer              164 net/rxrpc/peer_object.c 	peer->if_mtu = 1500;
peer              167 net/rxrpc/peer_object.c 	switch (peer->srx.transport.family) {
peer              171 net/rxrpc/peer_object.c 			peer->srx.transport.sin.sin_addr.s_addr, 0,
peer              185 net/rxrpc/peer_object.c 		memcpy(&fl6->daddr, &peer->srx.transport.sin6.sin6_addr,
peer              201 net/rxrpc/peer_object.c 	peer->if_mtu = dst_mtu(dst);
peer              204 net/rxrpc/peer_object.c 	_leave(" [if_mtu %u]", peer->if_mtu);
peer              212 net/rxrpc/peer_object.c 	struct rxrpc_peer *peer;
peer              216 net/rxrpc/peer_object.c 	peer = kzalloc(sizeof(struct rxrpc_peer), gfp);
peer              217 net/rxrpc/peer_object.c 	if (peer) {
peer              218 net/rxrpc/peer_object.c 		atomic_set(&peer->usage, 1);
peer              219 net/rxrpc/peer_object.c 		peer->local = rxrpc_get_local(local);
peer              220 net/rxrpc/peer_object.c 		INIT_HLIST_HEAD(&peer->error_targets);
peer              221 net/rxrpc/peer_object.c 		peer->service_conns = RB_ROOT;
peer              222 net/rxrpc/peer_object.c 		seqlock_init(&peer->service_conn_lock);
peer              223 net/rxrpc/peer_object.c 		spin_lock_init(&peer->lock);
peer              224 net/rxrpc/peer_object.c 		spin_lock_init(&peer->rtt_input_lock);
peer              225 net/rxrpc/peer_object.c 		peer->debug_id = atomic_inc_return(&rxrpc_debug_id);
peer              227 net/rxrpc/peer_object.c 		rxrpc_peer_init_rtt(peer);
peer              230 net/rxrpc/peer_object.c 			peer->cong_cwnd = 2;
peer              232 net/rxrpc/peer_object.c 			peer->cong_cwnd = 3;
peer              234 net/rxrpc/peer_object.c 			peer->cong_cwnd = 4;
peer              237 net/rxrpc/peer_object.c 	_leave(" = %p", peer);
peer              238 net/rxrpc/peer_object.c 	return peer;
peer              244 net/rxrpc/peer_object.c static void rxrpc_init_peer(struct rxrpc_sock *rx, struct rxrpc_peer *peer,
peer              247 net/rxrpc/peer_object.c 	peer->hash_key = hash_key;
peer              248 net/rxrpc/peer_object.c 	rxrpc_assess_MTU_size(rx, peer);
peer              249 net/rxrpc/peer_object.c 	peer->mtu = peer->if_mtu;
peer              250 net/rxrpc/peer_object.c 	peer->rtt_last_req = ktime_get_real();
peer              252 net/rxrpc/peer_object.c 	switch (peer->srx.transport.family) {
peer              254 net/rxrpc/peer_object.c 		peer->hdrsize = sizeof(struct iphdr);
peer              258 net/rxrpc/peer_object.c 		peer->hdrsize = sizeof(struct ipv6hdr);
peer              265 net/rxrpc/peer_object.c 	switch (peer->srx.transport_type) {
peer              267 net/rxrpc/peer_object.c 		peer->hdrsize += sizeof(struct udphdr);
peer              273 net/rxrpc/peer_object.c 	peer->hdrsize += sizeof(struct rxrpc_wire_header);
peer              274 net/rxrpc/peer_object.c 	peer->maxdata = peer->mtu - peer->hdrsize;
peer              286 net/rxrpc/peer_object.c 	struct rxrpc_peer *peer;
peer              290 net/rxrpc/peer_object.c 	peer = rxrpc_alloc_peer(local, gfp);
peer              291 net/rxrpc/peer_object.c 	if (peer) {
peer              292 net/rxrpc/peer_object.c 		memcpy(&peer->srx, srx, sizeof(*srx));
peer              293 net/rxrpc/peer_object.c 		rxrpc_init_peer(rx, peer, hash_key);
peer              296 net/rxrpc/peer_object.c 	_leave(" = %p", peer);
peer              297 net/rxrpc/peer_object.c 	return peer;
peer              306 net/rxrpc/peer_object.c 			     struct rxrpc_peer *peer)
peer              311 net/rxrpc/peer_object.c 	hash_key = rxrpc_peer_hash_key(local, &peer->srx);
peer              312 net/rxrpc/peer_object.c 	rxrpc_init_peer(rx, peer, hash_key);
peer              315 net/rxrpc/peer_object.c 	hash_add_rcu(rxnet->peer_hash, &peer->hash_link, hash_key);
peer              316 net/rxrpc/peer_object.c 	list_add_tail(&peer->keepalive_link, &rxnet->peer_keepalive_new);
peer              327 net/rxrpc/peer_object.c 	struct rxrpc_peer *peer, *candidate;
peer              335 net/rxrpc/peer_object.c 	peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
peer              336 net/rxrpc/peer_object.c 	if (peer && !rxrpc_get_peer_maybe(peer))
peer              337 net/rxrpc/peer_object.c 		peer = NULL;
peer              340 net/rxrpc/peer_object.c 	if (!peer) {
peer              353 net/rxrpc/peer_object.c 		peer = __rxrpc_lookup_peer_rcu(local, srx, hash_key);
peer              354 net/rxrpc/peer_object.c 		if (peer && !rxrpc_get_peer_maybe(peer))
peer              355 net/rxrpc/peer_object.c 			peer = NULL;
peer              356 net/rxrpc/peer_object.c 		if (!peer) {
peer              365 net/rxrpc/peer_object.c 		if (peer)
peer              368 net/rxrpc/peer_object.c 			peer = candidate;
peer              371 net/rxrpc/peer_object.c 	_net("PEER %d {%pISp}", peer->debug_id, &peer->srx.transport);
peer              373 net/rxrpc/peer_object.c 	_leave(" = %p {u=%d}", peer, atomic_read(&peer->usage));
peer              374 net/rxrpc/peer_object.c 	return peer;
peer              380 net/rxrpc/peer_object.c struct rxrpc_peer *rxrpc_get_peer(struct rxrpc_peer *peer)
peer              385 net/rxrpc/peer_object.c 	n = atomic_inc_return(&peer->usage);
peer              386 net/rxrpc/peer_object.c 	trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, n, here);
peer              387 net/rxrpc/peer_object.c 	return peer;
peer              393 net/rxrpc/peer_object.c struct rxrpc_peer *rxrpc_get_peer_maybe(struct rxrpc_peer *peer)
peer              397 net/rxrpc/peer_object.c 	if (peer) {
peer              398 net/rxrpc/peer_object.c 		int n = atomic_fetch_add_unless(&peer->usage, 1, 0);
peer              400 net/rxrpc/peer_object.c 			trace_rxrpc_peer(peer->debug_id, rxrpc_peer_got, n + 1, here);
peer              402 net/rxrpc/peer_object.c 			peer = NULL;
peer              404 net/rxrpc/peer_object.c 	return peer;
peer              410 net/rxrpc/peer_object.c static void __rxrpc_put_peer(struct rxrpc_peer *peer)
peer              412 net/rxrpc/peer_object.c 	struct rxrpc_net *rxnet = peer->local->rxnet;
peer              414 net/rxrpc/peer_object.c 	ASSERT(hlist_empty(&peer->error_targets));
peer              417 net/rxrpc/peer_object.c 	hash_del_rcu(&peer->hash_link);
peer              418 net/rxrpc/peer_object.c 	list_del_init(&peer->keepalive_link);
peer              421 net/rxrpc/peer_object.c 	rxrpc_put_local(peer->local);
peer              422 net/rxrpc/peer_object.c 	kfree_rcu(peer, rcu);
peer              428 net/rxrpc/peer_object.c void rxrpc_put_peer(struct rxrpc_peer *peer)
peer              434 net/rxrpc/peer_object.c 	if (peer) {
peer              435 net/rxrpc/peer_object.c 		debug_id = peer->debug_id;
peer              436 net/rxrpc/peer_object.c 		n = atomic_dec_return(&peer->usage);
peer              439 net/rxrpc/peer_object.c 			__rxrpc_put_peer(peer);
peer              447 net/rxrpc/peer_object.c void rxrpc_put_peer_locked(struct rxrpc_peer *peer)
peer              450 net/rxrpc/peer_object.c 	unsigned int debug_id = peer->debug_id;
peer              453 net/rxrpc/peer_object.c 	n = atomic_dec_return(&peer->usage);
peer              456 net/rxrpc/peer_object.c 		hash_del_rcu(&peer->hash_link);
peer              457 net/rxrpc/peer_object.c 		list_del_init(&peer->keepalive_link);
peer              458 net/rxrpc/peer_object.c 		rxrpc_put_local(peer->local);
peer              459 net/rxrpc/peer_object.c 		kfree_rcu(peer, rcu);
peer              468 net/rxrpc/peer_object.c 	struct rxrpc_peer *peer;
peer              475 net/rxrpc/peer_object.c 		hlist_for_each_entry(peer, &rxnet->peer_hash[i], hash_link) {
peer              477 net/rxrpc/peer_object.c 			       peer->debug_id,
peer              478 net/rxrpc/peer_object.c 			       atomic_read(&peer->usage),
peer              479 net/rxrpc/peer_object.c 			       &peer->srx.transport);
peer              495 net/rxrpc/peer_object.c 	*_srx = call->peer->srx;
peer              508 net/rxrpc/peer_object.c 	return call->peer->srtt_us >> 3;
peer               59 net/rxrpc/proc.c 	struct rxrpc_peer *peer;
peer               88 net/rxrpc/proc.c 	peer = call->peer;
peer               89 net/rxrpc/proc.c 	if (peer)
peer               90 net/rxrpc/proc.c 		sprintf(rbuff, "%pISpc", &peer->srx.transport);
peer              182 net/rxrpc/proc.c 	sprintf(rbuff, "%pISpc", &conn->params.peer->srx.transport);
peer              217 net/rxrpc/proc.c 	struct rxrpc_peer *peer;
peer              230 net/rxrpc/proc.c 	peer = list_entry(v, struct rxrpc_peer, hash_link);
peer              232 net/rxrpc/proc.c 	sprintf(lbuff, "%pISpc", &peer->local->srx.transport);
peer              234 net/rxrpc/proc.c 	sprintf(rbuff, "%pISpc", &peer->srx.transport);
peer              242 net/rxrpc/proc.c 		   atomic_read(&peer->usage),
peer              243 net/rxrpc/proc.c 		   peer->cong_cwnd,
peer              244 net/rxrpc/proc.c 		   peer->mtu,
peer              245 net/rxrpc/proc.c 		   now - peer->last_tx_at,
peer              246 net/rxrpc/proc.c 		   peer->srtt_us >> 3,
peer              247 net/rxrpc/proc.c 		   jiffies_to_usecs(peer->rto_j));
peer              546 net/rxrpc/recvmsg.c 		size_t len = sizeof(call->peer->srx);
peer              548 net/rxrpc/recvmsg.c 		memcpy(msg->msg_name, &call->peer->srx, len);
peer               19 net/rxrpc/rtt.c static u32 rxrpc_rto_min_us(struct rxrpc_peer *peer)
peer               24 net/rxrpc/rtt.c static u32 __rxrpc_set_rto(const struct rxrpc_peer *peer)
peer               26 net/rxrpc/rtt.c 	return _usecs_to_jiffies((peer->srtt_us >> 3) + peer->rttvar_us);
peer               44 net/rxrpc/rtt.c static void rxrpc_rtt_estimator(struct rxrpc_peer *peer, long sample_rtt_us)
peer               47 net/rxrpc/rtt.c 	u32 srtt = peer->srtt_us;
peer               70 net/rxrpc/rtt.c 			m -= (peer->mdev_us >> 2);   /* similar update on mdev */
peer               82 net/rxrpc/rtt.c 			m -= (peer->mdev_us >> 2);   /* similar update on mdev */
peer               85 net/rxrpc/rtt.c 		peer->mdev_us += m;		/* mdev = 3/4 mdev + 1/4 new */
peer               86 net/rxrpc/rtt.c 		if (peer->mdev_us > peer->mdev_max_us) {
peer               87 net/rxrpc/rtt.c 			peer->mdev_max_us = peer->mdev_us;
peer               88 net/rxrpc/rtt.c 			if (peer->mdev_max_us > peer->rttvar_us)
peer               89 net/rxrpc/rtt.c 				peer->rttvar_us = peer->mdev_max_us;
peer               94 net/rxrpc/rtt.c 		peer->mdev_us = m << 1;	/* make sure rto = 3*rtt */
peer               95 net/rxrpc/rtt.c 		peer->rttvar_us = max(peer->mdev_us, rxrpc_rto_min_us(peer));
peer               96 net/rxrpc/rtt.c 		peer->mdev_max_us = peer->rttvar_us;
peer               99 net/rxrpc/rtt.c 	peer->srtt_us = max(1U, srtt);
peer              106 net/rxrpc/rtt.c static void rxrpc_set_rto(struct rxrpc_peer *peer)
peer              117 net/rxrpc/rtt.c 	rto = __rxrpc_set_rto(peer);
peer              128 net/rxrpc/rtt.c 	peer->rto_j = rxrpc_bound_rto(rto);
peer              131 net/rxrpc/rtt.c static void rxrpc_ack_update_rtt(struct rxrpc_peer *peer, long rtt_us)
peer              137 net/rxrpc/rtt.c 	rxrpc_rtt_estimator(peer, rtt_us);
peer              138 net/rxrpc/rtt.c 	rxrpc_set_rto(peer);
peer              141 net/rxrpc/rtt.c 	peer->backoff = 0;
peer              152 net/rxrpc/rtt.c 	struct rxrpc_peer *peer = call->peer;
peer              159 net/rxrpc/rtt.c 	spin_lock(&peer->rtt_input_lock);
peer              160 net/rxrpc/rtt.c 	rxrpc_ack_update_rtt(peer, rtt_us);
peer              161 net/rxrpc/rtt.c 	if (peer->rtt_count < 3)
peer              162 net/rxrpc/rtt.c 		peer->rtt_count++;
peer              163 net/rxrpc/rtt.c 	spin_unlock(&peer->rtt_input_lock);
peer              166 net/rxrpc/rtt.c 			   peer->srtt_us >> 3, peer->rto_j);
peer              173 net/rxrpc/rtt.c unsigned long rxrpc_get_rto_backoff(struct rxrpc_peer *peer, bool retrans)
peer              176 net/rxrpc/rtt.c 	u8 backoff = READ_ONCE(peer->backoff);
peer              178 net/rxrpc/rtt.c 	timo_j = peer->rto_j;
peer              181 net/rxrpc/rtt.c 		WRITE_ONCE(peer->backoff, backoff + 1);
peer              189 net/rxrpc/rtt.c void rxrpc_peer_init_rtt(struct rxrpc_peer *peer)
peer              191 net/rxrpc/rtt.c 	peer->rto_j	= RXRPC_TIMEOUT_INIT;
peer              192 net/rxrpc/rtt.c 	peer->mdev_us	= jiffies_to_usecs(RXRPC_TIMEOUT_INIT);
peer              193 net/rxrpc/rtt.c 	peer->backoff	= 0;
peer              664 net/rxrpc/rxkad.c 	msg.msg_name	= &conn->params.peer->srx.transport;
peer              665 net/rxrpc/rxkad.c 	msg.msg_namelen	= conn->params.peer->srx.transport_len;
peer              699 net/rxrpc/rxkad.c 	conn->params.peer->last_tx_at = ktime_get_seconds();
peer              723 net/rxrpc/rxkad.c 	msg.msg_name	= &conn->params.peer->srx.transport;
peer              724 net/rxrpc/rxkad.c 	msg.msg_namelen	= conn->params.peer->srx.transport_len;
peer              757 net/rxrpc/rxkad.c 	conn->params.peer->last_tx_at = ktime_get_seconds();
peer               71 net/rxrpc/sendmsg.c 	rtt = READ_ONCE(call->peer->srtt_us) >> 3;
peer              274 net/rxrpc/sendmsg.c 		unsigned long resend_at = now + call->peer->rto_j;
peer              628 net/rxrpc/sendmsg.c 	rxrpc_put_peer(cp.peer);
peer              165 net/sctp/associola.c 	asoc->peer.rwnd = SCTP_DEFAULT_MAXWINDOW;
peer              201 net/sctp/associola.c 	INIT_LIST_HEAD(&asoc->peer.transport_addr_list);
peer              214 net/sctp/associola.c 	asoc->peer.sack_needed = 1;
peer              215 net/sctp/associola.c 	asoc->peer.sack_generation = 1;
peer              238 net/sctp/associola.c 	asoc->peer.ipv4_address = 1;
peer              240 net/sctp/associola.c 		asoc->peer.ipv6_address = 1;
peer              345 net/sctp/associola.c 	sctp_tsnmap_free(&asoc->peer.tsn_map);
peer              367 net/sctp/associola.c 	kfree(asoc->peer.cookie);
peer              368 net/sctp/associola.c 	kfree(asoc->peer.peer_random);
peer              369 net/sctp/associola.c 	kfree(asoc->peer.peer_chunks);
peer              370 net/sctp/associola.c 	kfree(asoc->peer.peer_hmacs);
peer              373 net/sctp/associola.c 	list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
peer              380 net/sctp/associola.c 	asoc->peer.transport_count = 0;
peer              428 net/sctp/associola.c 	if (asoc->peer.primary_path != NULL &&
peer              429 net/sctp/associola.c 	    asoc->peer.primary_path != transport)
peer              432 net/sctp/associola.c 	asoc->peer.primary_path = transport;
peer              435 net/sctp/associola.c 	memcpy(&asoc->peer.primary_addr, &transport->ipaddr,
peer              443 net/sctp/associola.c 		asoc->peer.active_path = transport;
peer              478 net/sctp/associola.c 			struct sctp_transport *peer)
peer              485 net/sctp/associola.c 		 __func__, asoc, &peer->ipaddr.sa);
peer              490 net/sctp/associola.c 	if (asoc->peer.retran_path == peer)
peer              494 net/sctp/associola.c 	list_del_rcu(&peer->transports);
peer              496 net/sctp/associola.c 	sctp_unhash_transport(peer);
peer              499 net/sctp/associola.c 	pos = asoc->peer.transport_addr_list.next;
peer              503 net/sctp/associola.c 	if (asoc->peer.primary_path == peer)
peer              505 net/sctp/associola.c 	if (asoc->peer.active_path == peer)
peer              506 net/sctp/associola.c 		asoc->peer.active_path = transport;
peer              507 net/sctp/associola.c 	if (asoc->peer.retran_path == peer)
peer              508 net/sctp/associola.c 		asoc->peer.retran_path = transport;
peer              509 net/sctp/associola.c 	if (asoc->peer.last_data_from == peer)
peer              510 net/sctp/associola.c 		asoc->peer.last_data_from = transport;
peer              513 net/sctp/associola.c 	    asoc->strreset_chunk->transport == peer) {
peer              523 net/sctp/associola.c 	if (asoc->init_last_sent_to == peer)
peer              531 net/sctp/associola.c 	if (asoc->shutdown_last_sent_to == peer)
peer              538 net/sctp/associola.c 	    asoc->addip_last_asconf->transport == peer)
peer              544 net/sctp/associola.c 	if (!list_empty(&peer->transmitted)) {
peer              545 net/sctp/associola.c 		struct sctp_transport *active = asoc->peer.active_path;
peer              548 net/sctp/associola.c 		list_for_each_entry(ch, &peer->transmitted,
peer              554 net/sctp/associola.c 		list_splice_tail_init(&peer->transmitted,
peer              568 net/sctp/associola.c 		if (ch->transport == peer)
peer              571 net/sctp/associola.c 	asoc->peer.transport_count--;
peer              573 net/sctp/associola.c 	sctp_transport_free(peer);
peer              583 net/sctp/associola.c 	struct sctp_transport *peer;
peer              596 net/sctp/associola.c 	if (0 == asoc->peer.port)
peer              597 net/sctp/associola.c 		asoc->peer.port = port;
peer              600 net/sctp/associola.c 	peer = sctp_assoc_lookup_paddr(asoc, addr);
peer              601 net/sctp/associola.c 	if (peer) {
peer              606 net/sctp/associola.c 		if (peer->state == SCTP_UNKNOWN) {
peer              607 net/sctp/associola.c 			peer->state = SCTP_ACTIVE;
peer              609 net/sctp/associola.c 		return peer;
peer              612 net/sctp/associola.c 	peer = sctp_transport_new(net, addr, gfp);
peer              613 net/sctp/associola.c 	if (!peer)
peer              616 net/sctp/associola.c 	sctp_transport_set_owner(peer, asoc);
peer              621 net/sctp/associola.c 	peer->hbinterval = asoc->hbinterval;
peer              624 net/sctp/associola.c 	peer->pathmaxrxt = asoc->pathmaxrxt;
peer              627 net/sctp/associola.c 	peer->pf_retrans = asoc->pf_retrans;
peer              632 net/sctp/associola.c 	peer->sackdelay = asoc->sackdelay;
peer              633 net/sctp/associola.c 	peer->sackfreq = asoc->sackfreq;
peer              639 net/sctp/associola.c 			peer->flowlabel = ntohl(info & IPV6_FLOWLABEL_MASK);
peer              640 net/sctp/associola.c 			peer->flowlabel |= SCTP_FLOWLABEL_SET_MASK;
peer              642 net/sctp/associola.c 			peer->flowlabel = asoc->flowlabel;
peer              645 net/sctp/associola.c 	peer->dscp = asoc->dscp;
peer              650 net/sctp/associola.c 	peer->param_flags = asoc->param_flags;
peer              653 net/sctp/associola.c 	sctp_transport_route(peer, NULL, sp);
peer              661 net/sctp/associola.c 				  min_t(int, peer->pathmtu, asoc->pathmtu) :
peer              662 net/sctp/associola.c 				  peer->pathmtu);
peer              664 net/sctp/associola.c 	peer->pmtu_pending = 0;
peer              669 net/sctp/associola.c 	sctp_packet_init(&peer->packet, peer, asoc->base.bind_addr.port,
peer              670 net/sctp/associola.c 			 asoc->peer.port);
peer              682 net/sctp/associola.c 	peer->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
peer              688 net/sctp/associola.c 	peer->ssthresh = SCTP_DEFAULT_MAXWINDOW;
peer              690 net/sctp/associola.c 	peer->partial_bytes_acked = 0;
peer              691 net/sctp/associola.c 	peer->flight_size = 0;
peer              692 net/sctp/associola.c 	peer->burst_limited = 0;
peer              695 net/sctp/associola.c 	peer->rto = asoc->rto_initial;
peer              696 net/sctp/associola.c 	sctp_max_rto(asoc, peer);
peer              699 net/sctp/associola.c 	peer->state = peer_state;
peer              702 net/sctp/associola.c 	if (sctp_hash_transport(peer)) {
peer              703 net/sctp/associola.c 		sctp_transport_free(peer);
peer              708 net/sctp/associola.c 	list_add_tail_rcu(&peer->transports, &asoc->peer.transport_addr_list);
peer              709 net/sctp/associola.c 	asoc->peer.transport_count++;
peer              712 net/sctp/associola.c 	if (!asoc->peer.primary_path) {
peer              713 net/sctp/associola.c 		sctp_assoc_set_primary(asoc, peer);
peer              714 net/sctp/associola.c 		asoc->peer.retran_path = peer;
peer              717 net/sctp/associola.c 	if (asoc->peer.active_path == asoc->peer.retran_path &&
peer              718 net/sctp/associola.c 	    peer->state != SCTP_UNCONFIRMED) {
peer              719 net/sctp/associola.c 		asoc->peer.retran_path = peer;
peer              722 net/sctp/associola.c 	return peer;
peer              733 net/sctp/associola.c 	list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
peer              752 net/sctp/associola.c 	list_for_each_entry(t, &asoc->peer.transport_addr_list,
peer              768 net/sctp/associola.c 	list_for_each_entry_safe(t, temp, &asoc->peer.transport_addr_list,
peer              946 net/sctp/associola.c 	active = asoc->peer.active_path;
peer              958 net/sctp/associola.c 	list_for_each_entry(transport, &asoc->peer.transport_addr_list,
peer             1037 net/sctp/associola.c 			asoc->peer.last_data_from = chunk->transport;
peer             1108 net/sctp/associola.c 	asoc->peer.rwnd = new->peer.rwnd;
peer             1109 net/sctp/associola.c 	asoc->peer.sack_needed = new->peer.sack_needed;
peer             1110 net/sctp/associola.c 	asoc->peer.auth_capable = new->peer.auth_capable;
peer             1111 net/sctp/associola.c 	asoc->peer.i = new->peer.i;
peer             1113 net/sctp/associola.c 	if (!sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
peer             1114 net/sctp/associola.c 			      asoc->peer.i.initial_tsn, GFP_ATOMIC))
peer             1118 net/sctp/associola.c 	list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
peer             1158 net/sctp/associola.c 		list_for_each_entry(trans, &new->peer.transport_addr_list,
peer             1179 net/sctp/associola.c 	kfree(asoc->peer.peer_random);
peer             1180 net/sctp/associola.c 	asoc->peer.peer_random = new->peer.peer_random;
peer             1181 net/sctp/associola.c 	new->peer.peer_random = NULL;
peer             1183 net/sctp/associola.c 	kfree(asoc->peer.peer_chunks);
peer             1184 net/sctp/associola.c 	asoc->peer.peer_chunks = new->peer.peer_chunks;
peer             1185 net/sctp/associola.c 	new->peer.peer_chunks = NULL;
peer             1187 net/sctp/associola.c 	kfree(asoc->peer.peer_hmacs);
peer             1188 net/sctp/associola.c 	asoc->peer.peer_hmacs = new->peer.peer_hmacs;
peer             1189 net/sctp/associola.c 	new->peer.peer_hmacs = NULL;
peer             1275 net/sctp/associola.c 	struct sctp_transport *trans = asoc->peer.retran_path;
peer             1279 net/sctp/associola.c 	if (asoc->peer.transport_count == 1)
peer             1284 net/sctp/associola.c 	if (asoc->peer.active_path == asoc->peer.retran_path &&
peer             1285 net/sctp/associola.c 	    asoc->peer.active_path->state == SCTP_ACTIVE)
peer             1292 net/sctp/associola.c 		if (&trans->transports == &asoc->peer.transport_addr_list)
peer             1301 net/sctp/associola.c 		if (trans == asoc->peer.retran_path)
peer             1305 net/sctp/associola.c 	asoc->peer.retran_path = trans_next;
peer             1308 net/sctp/associola.c 		 __func__, asoc, &asoc->peer.retran_path->ipaddr.sa);
peer             1317 net/sctp/associola.c 	list_for_each_entry(trans, &asoc->peer.transport_addr_list,
peer             1351 net/sctp/associola.c 	if ((asoc->peer.primary_path->state == SCTP_ACTIVE ||
peer             1352 net/sctp/associola.c 	     asoc->peer.primary_path->state == SCTP_UNKNOWN) &&
peer             1353 net/sctp/associola.c 	     asoc->peer.primary_path != trans_pri) {
peer             1355 net/sctp/associola.c 		trans_pri = asoc->peer.primary_path;
peer             1369 net/sctp/associola.c 		trans_pri = sctp_trans_elect_best(asoc->peer.active_path, trans_pf);
peer             1374 net/sctp/associola.c 	asoc->peer.active_path = trans_pri;
peer             1375 net/sctp/associola.c 	asoc->peer.retran_path = trans_sec;
peer             1387 net/sctp/associola.c 		return asoc->peer.active_path;
peer             1389 net/sctp/associola.c 		if (last_sent_to == asoc->peer.retran_path)
peer             1392 net/sctp/associola.c 		return asoc->peer.retran_path;
peer             1433 net/sctp/associola.c 	list_for_each_entry(t, &asoc->peer.transport_addr_list, transports) {
peer             1515 net/sctp/associola.c 		asoc->peer.sack_needed = 0;
peer             1578 net/sctp/associola.c 	if (asoc->peer.ipv4_address)
peer             1580 net/sctp/associola.c 	if (asoc->peer.ipv6_address)
peer             1681 net/sctp/associola.c 				htonl(asoc->peer.addip_serial))
peer              236 net/sctp/auth.c 	return sctp_auth_make_key_vector(asoc->peer.peer_random,
peer              237 net/sctp/auth.c 					 asoc->peer.peer_chunks,
peer              238 net/sctp/auth.c 					 asoc->peer.peer_hmacs,
peer              392 net/sctp/auth.c 	if (!asoc->peer.auth_capable)
peer              537 net/sctp/auth.c 	hmacs = asoc->peer.peer_hmacs;
peer              678 net/sctp/auth.c 	if (!asoc->peer.auth_capable)
peer              681 net/sctp/auth.c 	return __sctp_auth_cid(chunk, asoc->peer.peer_chunks);
peer              690 net/sctp/auth.c 	if (!asoc->peer.auth_capable)
peer              835 net/sctp/auth.c 		if (!asoc->peer.auth_capable)
peer              884 net/sctp/auth.c 		if (!asoc->peer.auth_capable)
peer              924 net/sctp/auth.c 		if (!asoc->peer.auth_capable)
peer              967 net/sctp/auth.c 		if (!asoc->peer.auth_capable)
peer              167 net/sctp/chunk.c 	if (asoc->peer.prsctp_capable && sinfo->sinfo_timetolive &&
peer              304 net/sctp/chunk.c 	if (!chunk->asoc->peer.prsctp_capable)
peer               32 net/sctp/diag.c 	struct timer_list *t3_rtx = &asoc->peer.primary_path->T3_rtx_timer;
peer               36 net/sctp/diag.c 	paddr = asoc->peer.primary_path->ipaddr;
peer               37 net/sctp/diag.c 	dst = asoc->peer.primary_path->dst;
peer               41 net/sctp/diag.c 	r->id.idiag_dport = htons(asoc->peer.port);
peer              106 net/sctp/diag.c 			   addrlen * asoc->peer.transport_count);
peer              111 net/sctp/diag.c 	list_for_each_entry(from, &asoc->peer.transport_addr_list,
peer              240 net/sctp/diag.c 		+ nla_total_size(addrlen * asoc->peer.transport_count)
peer              314 net/sctp/diag.c 		if (r->id.idiag_dport != htons(assoc->peer.port) &&
peer              385 net/sctp/endpointola.c 			asoc->peer.last_data_from = chunk->transport;
peer               61 net/sctp/input.c 					const union sctp_addr *peer,
peer             1030 net/sctp/input.c 					const union sctp_addr *peer,
peer             1036 net/sctp/input.c 	t = sctp_addrs_lookup_transport(net, local, peer);
peer              745 net/sctp/ipv6.c 	sctp_v6_to_sk_daddr(&asoc->peer.primary_addr, newsk);
peer              817 net/sctp/ipv6.c 	paddr = &asoc->peer.primary_addr;
peer              821 net/sctp/ipv6.c 		addr->v4.sin_port = htons(asoc->peer.port);
peer              830 net/sctp/ipv6.c 		addr->v6.sin6_port = htons(asoc->peer.port);
peer             1000 net/sctp/ipv6.c 			int peer)
peer             1004 net/sctp/ipv6.c 	rc = inet6_getname(sock, uaddr, peer);
peer              274 net/sctp/output.c 			    pkt->transport->asoc->peer.sack_generation)
peer              288 net/sctp/output.c 				asoc->peer.sack_needed = 0;
peer              620 net/sctp/output.c 		if (asoc->peer.last_sent_to != tp)
peer              621 net/sctp/output.c 			asoc->peer.last_sent_to = tp;
peer              669 net/sctp/output.c 	rwnd = asoc->peer.rwnd;
peer              738 net/sctp/output.c 	u32 rwnd = asoc->peer.rwnd;
peer              752 net/sctp/output.c 	asoc->peer.rwnd = rwnd;
peer              212 net/sctp/outqueue.c 	list_for_each_entry(transport, &q->asoc->peer.transport_addr_list,
peer              299 net/sctp/outqueue.c 		if (chunk->asoc->peer.prsctp_capable &&
peer              424 net/sctp/outqueue.c 	if (!asoc->peer.prsctp_capable || !asoc->sent_cnt_removable)
peer              433 net/sctp/outqueue.c 	list_for_each_entry(transport, &asoc->peer.transport_addr_list,
peer              473 net/sctp/outqueue.c 				q->asoc->peer.rwnd += sctp_data_size(chunk);
peer              493 net/sctp/outqueue.c 			q->asoc->peer.rwnd += sctp_data_size(chunk);
peer              545 net/sctp/outqueue.c 		if (transport == transport->asoc->peer.retran_path)
peer              765 net/sctp/outqueue.c 	const __u16 dport = asoc->peer.port;
peer              766 net/sctp/outqueue.c 	const __u32 vtag = asoc->peer.i.init_tag;
peer              817 net/sctp/outqueue.c 			new_transport = ctx->asoc->peer.active_path;
peer              842 net/sctp/outqueue.c 				new_transport = ctx->asoc->peer.active_path;
peer              859 net/sctp/outqueue.c 				   ctx->asoc->peer.i.init_tag,
peer              860 net/sctp/outqueue.c 				   ctx->asoc->peer.ecn_capable);
peer              976 net/sctp/outqueue.c 	if (ctx->asoc->peer.retran_path->state == SCTP_UNCONFIRMED)
peer              979 net/sctp/outqueue.c 	if (ctx->transport != ctx->asoc->peer.retran_path) {
peer              981 net/sctp/outqueue.c 		ctx->transport = ctx->asoc->peer.retran_path;
peer              988 net/sctp/outqueue.c 		sctp_packet_config(ctx->packet, ctx->asoc->peer.i.init_tag,
peer              989 net/sctp/outqueue.c 				   ctx->asoc->peer.ecn_capable);
peer             1233 net/sctp/outqueue.c 	struct sctp_transport *primary = asoc->peer.primary_path;
peer             1239 net/sctp/outqueue.c 	transport_list = &asoc->peer.transport_addr_list;
peer             1342 net/sctp/outqueue.c 			if (asoc->peer.prsctp_capable &&
peer             1355 net/sctp/outqueue.c 	asoc->peer.zero_window_announced = !sack_a_rwnd;
peer             1363 net/sctp/outqueue.c 	asoc->peer.rwnd = sack_a_rwnd;
peer             1485 net/sctp/outqueue.c 					    q->asoc->peer.primary_path->cacc.
peer             1636 net/sctp/outqueue.c 			if (!q->asoc->peer.rwnd &&
peer             1683 net/sctp/outqueue.c 	struct sctp_transport *primary = asoc->peer.primary_path;
peer             1797 net/sctp/outqueue.c 	if (!asoc->peer.prsctp_capable)
peer               81 net/sctp/proc.c 	struct sctp_transport *peer;
peer               88 net/sctp/proc.c 		peer = asoc->peer.primary_path;
peer               89 net/sctp/proc.c 		if (unlikely(peer == NULL)) {
peer               94 net/sctp/proc.c 		primary = &peer->saddr;
peer              119 net/sctp/proc.c 	primary = &assoc->peer.primary_addr;
peer              120 net/sctp/proc.c 	list_for_each_entry_rcu(transport, &assoc->peer.transport_addr_list,
peer              263 net/sctp/proc.c 		   assoc->peer.port);
peer              306 net/sctp/proc.c 	list_for_each_entry_rcu(tsp, &assoc->peer.transport_addr_list,
peer              587 net/sctp/protocol.c 	newinet->inet_daddr = asoc->peer.primary_addr.v4.sin_addr.s_addr;
peer              903 net/sctp/protocol.c 		sinfrom = &asoc->peer.primary_addr.v4;
peer              904 net/sctp/protocol.c 		sin->sin_port = htons(asoc->peer.port);
peer              423 net/sctp/sm_make_chunk.c 	if (asoc->peer.ecn_capable)
peer              426 net/sctp/sm_make_chunk.c 	if (asoc->peer.prsctp_capable)
peer              429 net/sctp/sm_make_chunk.c 	if (asoc->peer.asconf_capable) {
peer              435 net/sctp/sm_make_chunk.c 	if (asoc->peer.reconf_capable) {
peer              443 net/sctp/sm_make_chunk.c 	if (asoc->peer.intl_capable) {
peer              448 net/sctp/sm_make_chunk.c 	if (asoc->peer.auth_capable) {
peer              494 net/sctp/sm_make_chunk.c 	if (asoc->peer.ecn_capable)
peer              502 net/sctp/sm_make_chunk.c 	if (asoc->peer.prsctp_capable)
peer              512 net/sctp/sm_make_chunk.c 	if (asoc->peer.auth_capable) {
peer              574 net/sctp/sm_make_chunk.c 	cookie = asoc->peer.cookie;
peer              575 net/sctp/sm_make_chunk.c 	cookie_len = asoc->peer.cookie_len;
peer              756 net/sctp/sm_make_chunk.c 	struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map;
peer              819 net/sctp/sm_make_chunk.c 	retval->transport = asoc->peer.last_data_from;
peer              843 net/sctp/sm_make_chunk.c 	if (++asoc->peer.sack_generation == 0) {
peer              844 net/sctp/sm_make_chunk.c 		list_for_each_entry(trans, &asoc->peer.transport_addr_list,
peer              847 net/sctp/sm_make_chunk.c 		asoc->peer.sack_generation = 1;
peer              862 net/sctp/sm_make_chunk.c 		ctsn = sctp_tsnmap_get_ctsn(&chunk->asoc->peer.tsn_map);
peer              864 net/sctp/sm_make_chunk.c 		ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map);
peer             1655 net/sctp/sm_make_chunk.c 	cookie->c.prsctp_capable = asoc->peer.prsctp_capable;
peer             1658 net/sctp/sm_make_chunk.c 	cookie->c.adaptation_ind = asoc->peer.adaptation_ind;
peer             1830 net/sctp/sm_make_chunk.c 	retval->peer.port = ntohs(chunk->sctp_hdr->source);
peer             1853 net/sctp/sm_make_chunk.c 	retval->peer.prsctp_capable = retval->c.prsctp_capable;
peer             1854 net/sctp/sm_make_chunk.c 	retval->peer.adaptation_ind = retval->c.adaptation_ind;
peer             2018 net/sctp/sm_make_chunk.c 				asoc->peer.reconf_capable = 1;
peer             2022 net/sctp/sm_make_chunk.c 				asoc->peer.prsctp_capable = 1;
peer             2029 net/sctp/sm_make_chunk.c 				asoc->peer.auth_capable = 1;
peer             2034 net/sctp/sm_make_chunk.c 				asoc->peer.asconf_capable = 1;
peer             2038 net/sctp/sm_make_chunk.c 				asoc->peer.intl_capable = 1;
peer             2360 net/sctp/sm_make_chunk.c 	if (asoc->peer.auth_capable && (!asoc->peer.peer_random ||
peer             2361 net/sctp/sm_make_chunk.c 					!asoc->peer.peer_hmacs))
peer             2362 net/sctp/sm_make_chunk.c 		asoc->peer.auth_capable = 0;
peer             2371 net/sctp/sm_make_chunk.c 	     (asoc->peer.asconf_capable && !asoc->peer.auth_capable)) {
peer             2372 net/sctp/sm_make_chunk.c 		asoc->peer.addip_disabled_mask |= (SCTP_PARAM_ADD_IP |
peer             2375 net/sctp/sm_make_chunk.c 		asoc->peer.asconf_capable = 0;
peer             2380 net/sctp/sm_make_chunk.c 	list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
peer             2390 net/sctp/sm_make_chunk.c 	asoc->peer.i.init_tag =
peer             2392 net/sctp/sm_make_chunk.c 	asoc->peer.i.a_rwnd =
peer             2394 net/sctp/sm_make_chunk.c 	asoc->peer.i.num_outbound_streams =
peer             2396 net/sctp/sm_make_chunk.c 	asoc->peer.i.num_inbound_streams =
peer             2398 net/sctp/sm_make_chunk.c 	asoc->peer.i.initial_tsn =
peer             2401 net/sctp/sm_make_chunk.c 	asoc->strreset_inseq = asoc->peer.i.initial_tsn;
peer             2419 net/sctp/sm_make_chunk.c 	asoc->c.peer_vtag = asoc->peer.i.init_tag;
peer             2422 net/sctp/sm_make_chunk.c 	asoc->peer.rwnd = asoc->peer.i.a_rwnd;
peer             2428 net/sctp/sm_make_chunk.c 	list_for_each_entry(transport, &asoc->peer.transport_addr_list,
peer             2430 net/sctp/sm_make_chunk.c 		transport->ssthresh = asoc->peer.i.a_rwnd;
peer             2434 net/sctp/sm_make_chunk.c 	if (!sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
peer             2435 net/sctp/sm_make_chunk.c 				asoc->peer.i.initial_tsn, gfp))
peer             2466 net/sctp/sm_make_chunk.c 	asoc->peer.addip_serial = asoc->peer.i.initial_tsn - 1;
peer             2471 net/sctp/sm_make_chunk.c 	list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
peer             2525 net/sctp/sm_make_chunk.c 		af->from_addr_param(&addr, param.addr, htons(asoc->peer.port), 0);
peer             2552 net/sctp/sm_make_chunk.c 		asoc->peer.ipv4_address = 0;
peer             2553 net/sctp/sm_make_chunk.c 		asoc->peer.ipv6_address = 0;
peer             2559 net/sctp/sm_make_chunk.c 			asoc->peer.ipv6_address = 1;
peer             2561 net/sctp/sm_make_chunk.c 			asoc->peer.ipv4_address = 1;
peer             2571 net/sctp/sm_make_chunk.c 				asoc->peer.ipv4_address = 1;
peer             2576 net/sctp/sm_make_chunk.c 					asoc->peer.ipv6_address = 1;
peer             2580 net/sctp/sm_make_chunk.c 				asoc->peer.hostname_address = 1;
peer             2590 net/sctp/sm_make_chunk.c 		asoc->peer.cookie_len =
peer             2592 net/sctp/sm_make_chunk.c 		kfree(asoc->peer.cookie);
peer             2593 net/sctp/sm_make_chunk.c 		asoc->peer.cookie = kmemdup(param.cookie->body, asoc->peer.cookie_len, gfp);
peer             2594 net/sctp/sm_make_chunk.c 		if (!asoc->peer.cookie)
peer             2608 net/sctp/sm_make_chunk.c 			asoc->peer.ecn_capable = 1;
peer             2616 net/sctp/sm_make_chunk.c 		asoc->peer.adaptation_ind = ntohl(param.aind->adaptation_ind);
peer             2630 net/sctp/sm_make_chunk.c 				    htons(asoc->peer.port), 0);
peer             2651 net/sctp/sm_make_chunk.c 			asoc->peer.prsctp_capable = 1;
peer             2662 net/sctp/sm_make_chunk.c 		kfree(asoc->peer.peer_random);
peer             2663 net/sctp/sm_make_chunk.c 		asoc->peer.peer_random = kmemdup(param.p,
peer             2665 net/sctp/sm_make_chunk.c 		if (!asoc->peer.peer_random) {
peer             2676 net/sctp/sm_make_chunk.c 		kfree(asoc->peer.peer_hmacs);
peer             2677 net/sctp/sm_make_chunk.c 		asoc->peer.peer_hmacs = kmemdup(param.p,
peer             2679 net/sctp/sm_make_chunk.c 		if (!asoc->peer.peer_hmacs) {
peer             2692 net/sctp/sm_make_chunk.c 		kfree(asoc->peer.peer_chunks);
peer             2693 net/sctp/sm_make_chunk.c 		asoc->peer.peer_chunks = kmemdup(param.p,
peer             2695 net/sctp/sm_make_chunk.c 		if (!asoc->peer.peer_chunks)
peer             3020 net/sctp/sm_make_chunk.c 	struct sctp_transport *peer;
peer             3033 net/sctp/sm_make_chunk.c 		if (!asoc->peer.ipv6_address)
peer             3037 net/sctp/sm_make_chunk.c 		if (!asoc->peer.ipv4_address)
peer             3048 net/sctp/sm_make_chunk.c 	af->from_addr_param(&addr, addr_param, htons(asoc->peer.port), 0);
peer             3080 net/sctp/sm_make_chunk.c 		peer = sctp_assoc_add_peer(asoc, &addr, GFP_ATOMIC, SCTP_UNCONFIRMED);
peer             3081 net/sctp/sm_make_chunk.c 		if (!peer)
peer             3085 net/sctp/sm_make_chunk.c 		sctp_transport_reset_hb_timer(peer);
peer             3086 net/sctp/sm_make_chunk.c 		asoc->new_transport = peer;
peer             3094 net/sctp/sm_make_chunk.c 		if (asoc->peer.transport_count == 1)
peer             3124 net/sctp/sm_make_chunk.c 		peer = sctp_assoc_lookup_paddr(asoc, &addr);
peer             3125 net/sctp/sm_make_chunk.c 		if (!peer)
peer             3128 net/sctp/sm_make_chunk.c 		sctp_assoc_rm_peer(asoc, peer);
peer             3145 net/sctp/sm_make_chunk.c 		peer = sctp_assoc_lookup_paddr(asoc, &addr);
peer             3146 net/sctp/sm_make_chunk.c 		if (!peer)
peer             3149 net/sctp/sm_make_chunk.c 		sctp_assoc_set_primary(asoc, peer);
peer             3296 net/sctp/sm_make_chunk.c 	asoc->peer.addip_serial++;
peer             3338 net/sctp/sm_make_chunk.c 		list_for_each_entry(transport, &asoc->peer.transport_addr_list,
peer             3352 net/sctp/sm_make_chunk.c 		list_for_each_entry(transport, &asoc->peer.transport_addr_list,
peer             3485 net/sctp/sm_make_chunk.c 			asoc->peer.addip_disabled_mask |=
peer             3506 net/sctp/sm_make_chunk.c 		sctp_transport_immediate_rtx(asoc->peer.primary_path);
peer              141 net/sctp/sm_sideeffect.c 	struct sctp_transport *trans = asoc->peer.last_data_from;
peer              149 net/sctp/sm_sideeffect.c 		asoc->peer.sack_needed = 1;
peer              151 net/sctp/sm_sideeffect.c 	ctsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map);
peer              152 net/sctp/sm_sideeffect.c 	max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map);
peer              165 net/sctp/sm_sideeffect.c 		asoc->peer.sack_needed = 1;
peer              175 net/sctp/sm_sideeffect.c 	if (!asoc->peer.sack_needed) {
peer              176 net/sctp/sm_sideeffect.c 		asoc->peer.sack_cnt++;
peer              185 net/sctp/sm_sideeffect.c 			if (asoc->peer.sack_cnt >= trans->sackfreq - 1)
peer              186 net/sctp/sm_sideeffect.c 				asoc->peer.sack_needed = 1;
peer              192 net/sctp/sm_sideeffect.c 			if (asoc->peer.sack_cnt >= asoc->sackfreq - 1)
peer              193 net/sctp/sm_sideeffect.c 				asoc->peer.sack_needed = 1;
peer              212 net/sctp/sm_sideeffect.c 		asoc->peer.sack_needed = 0;
peer              213 net/sctp/sm_sideeffect.c 		asoc->peer.sack_cnt = 0;
peer              685 net/sctp/sm_sideeffect.c 	list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
peer              696 net/sctp/sm_sideeffect.c 	list_for_each_entry(t, &asoc->peer.transport_addr_list,
peer              709 net/sctp/sm_sideeffect.c 	list_for_each_entry(t, &asoc->peer.transport_addr_list,
peer              783 net/sctp/sm_sideeffect.c 	if (was_unconfirmed && asoc->peer.transport_count == 1)
peer              887 net/sctp/sm_sideeffect.c 		kfree(asoc->peer.cookie);
peer              888 net/sctp/sm_sideeffect.c 		asoc->peer.cookie = NULL;
peer              982 net/sctp/sm_sideeffect.c 				if (asoc->peer.asconf_capable == 0)
peer              985 net/sctp/sm_sideeffect.c 				asoc->peer.asconf_capable = 0;
peer             1009 net/sctp/sm_sideeffect.c 	list_for_each_safe(pos, temp, &asoc->peer.transport_addr_list) {
peer             1012 net/sctp/sm_sideeffect.c 					 &asoc->peer.primary_addr)) {
peer             1299 net/sctp/sm_sideeffect.c 			BUG_ON(asoc->peer.primary_path == NULL);
peer             1328 net/sctp/sm_sideeffect.c 			error = sctp_tsnmap_mark(&asoc->peer.tsn_map,
peer             1416 net/sctp/sm_sideeffect.c 			if ((asoc->peer.retran_path !=
peer             1417 net/sctp/sm_sideeffect.c 			     asoc->peer.primary_path) &&
peer             1591 net/sctp/sm_sideeffect.c 			list_for_each_entry(t, &asoc->peer.transport_addr_list,
peer             1618 net/sctp/sm_sideeffect.c 			list_for_each_entry(t, &asoc->peer.transport_addr_list,
peer             1625 net/sctp/sm_sideeffect.c 			sctp_tsnmap_mark_dup(&asoc->peer.tsn_map,
peer             1676 net/sctp/sm_sideeffect.c 			sackh.a_rwnd = htonl(asoc->peer.rwnd +
peer             1720 net/sctp/sm_sideeffect.c 			asoc->peer.i.init_tag = 0;
peer             1729 net/sctp/sm_sideeffect.c 			t = asoc->peer.retran_path;
peer             1730 net/sctp/sm_sideeffect.c 			asoc->peer.retran_path = asoc->peer.primary_path;
peer             1733 net/sctp/sm_sideeffect.c 			asoc->peer.retran_path = t;
peer             1754 net/sctp/sm_sideeffect.c 			asoc->peer.i.init_tag = cmd->obj.u32;
peer              637 net/sctp/sm_statefuns.c 	if (!net->sctp.auth_enable || !asoc->peer.auth_capable)
peer              817 net/sctp/sm_statefuns.c 	if (new_asoc->peer.adaptation_ind) {
peer              824 net/sctp/sm_statefuns.c 	if (!new_asoc->peer.auth_capable) {
peer              965 net/sctp/sm_statefuns.c 	if (asoc->peer.adaptation_ind) {
peer              974 net/sctp/sm_statefuns.c 	if (!asoc->peer.auth_capable) {
peer             1348 net/sctp/sm_statefuns.c 	list_for_each_entry(new_addr, &new_asoc->peer.transport_addr_list,
peer             1350 net/sctp/sm_statefuns.c 		if (!list_has_sctp_addr(&asoc->peer.transport_addr_list,
peer             1952 net/sctp/sm_statefuns.c 	if (asoc->peer.adaptation_ind)
peer             1955 net/sctp/sm_statefuns.c 	if (!asoc->peer.auth_capable)
peer             2047 net/sctp/sm_statefuns.c 		if (asoc->peer.adaptation_ind) {
peer             2055 net/sctp/sm_statefuns.c 		if (!asoc->peer.auth_capable) {
peer             2497 net/sctp/sm_statefuns.c 			SCTP_TRANSPORT(asoc->peer.primary_path));
peer             3738 net/sctp/sm_statefuns.c 	if (!asoc->peer.asconf_capable ||
peer             3760 net/sctp/sm_statefuns.c 	if (serial == asoc->peer.addip_serial + 1) {
peer             3779 net/sctp/sm_statefuns.c 	} else if (serial < asoc->peer.addip_serial + 1) {
peer             3881 net/sctp/sm_statefuns.c 	if (!asoc->peer.asconf_capable ||
peer             4066 net/sctp/sm_statefuns.c 	if (!asoc->peer.prsctp_capable)
peer             4086 net/sctp/sm_statefuns.c 	if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0)
peer             4133 net/sctp/sm_statefuns.c 	if (!asoc->peer.prsctp_capable)
peer             4153 net/sctp/sm_statefuns.c 	if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0)
peer             4289 net/sctp/sm_statefuns.c 	if (!asoc->peer.auth_capable)
peer             4575 net/sctp/sm_statefuns.c 		    !asoc->peer.i.init_tag) {
peer             5643 net/sctp/sm_statefuns.c 		if (asoc->peer.zero_window_announced &&
peer             6244 net/sctp/sm_statefuns.c 			vtag = asoc->peer.i.init_tag;
peer             6329 net/sctp/sm_statefuns.c 	struct sctp_tsnmap *map = (struct sctp_tsnmap *)&asoc->peer.tsn_map;
peer             6358 net/sctp/sm_statefuns.c 	if (asoc->peer.ecn_capable && !chunk->ecn_ce_done) {
peer             6369 net/sctp/sm_statefuns.c 	tmp = sctp_tsnmap_check(&asoc->peer.tsn_map, tsn);
peer              173 net/sctp/socket.c 	list_for_each_entry(t, &asoc->peer.transport_addr_list, transports)
peer              567 net/sctp/socket.c 		if (!asoc->peer.asconf_capable)
peer              570 net/sctp/socket.c 		if (asoc->peer.addip_disabled_mask & SCTP_PARAM_ADD_IP)
peer              628 net/sctp/socket.c 			    &asoc->peer.transport_addr_list, transports) {
peer              631 net/sctp/socket.c 				trans->ssthresh = asoc->peer.i.a_rwnd;
peer              771 net/sctp/socket.c 		if (!asoc->peer.asconf_capable)
peer              774 net/sctp/socket.c 		if (asoc->peer.addip_disabled_mask & SCTP_PARAM_DEL_IP)
peer              877 net/sctp/socket.c 		list_for_each_entry(transport, &asoc->peer.transport_addr_list,
peer             1208 net/sctp/socket.c 		if (asoc->peer.port != ntohs(daddr->v4.sin_port))
peer             1233 net/sctp/socket.c 	inet_sk(sk)->inet_dport = htons(asoc->peer.port);
peer             1727 net/sctp/socket.c 			daddr->v4.sin_port = htons(asoc->peer.port);
peer             1738 net/sctp/socket.c 			daddr->v6.sin6_port = htons(asoc->peer.port);
peer             2583 net/sctp/socket.c 			list_for_each_entry(t, &asoc->peer.transport_addr_list,
peer             2608 net/sctp/socket.c 			list_for_each_entry(t, &asoc->peer.transport_addr_list,
peer             2699 net/sctp/socket.c 		list_for_each_entry(trans, &asoc->peer.transport_addr_list,
peer             2739 net/sctp/socket.c 	list_for_each_entry(trans, &asoc->peer.transport_addr_list,
peer             3196 net/sctp/socket.c 			list_for_each_entry(peer_addr, &asoc->peer.transport_addr_list,
peer             3369 net/sctp/socket.c 	if (!asoc->peer.asconf_capable)
peer             3372 net/sctp/socket.c 	if (asoc->peer.addip_disabled_mask & SCTP_PARAM_SET_PRIMARY)
peer             3992 net/sctp/socket.c 		list_for_each_entry(trans, &asoc->peer.transport_addr_list,
peer             5237 net/sctp/socket.c 	info->sctpi_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map);
peer             5247 net/sctp/socket.c 	info->sctpi_peer_rwnd = asoc->peer.rwnd;
peer             5250 net/sctp/socket.c 	mask = asoc->peer.ecn_capable << 1;
peer             5251 net/sctp/socket.c 	mask = (mask | asoc->peer.ipv4_address) << 1;
peer             5252 net/sctp/socket.c 	mask = (mask | asoc->peer.ipv6_address) << 1;
peer             5253 net/sctp/socket.c 	mask = (mask | asoc->peer.hostname_address) << 1;
peer             5254 net/sctp/socket.c 	mask = (mask | asoc->peer.asconf_capable) << 1;
peer             5255 net/sctp/socket.c 	mask = (mask | asoc->peer.prsctp_capable) << 1;
peer             5256 net/sctp/socket.c 	mask = (mask | asoc->peer.auth_capable);
peer             5258 net/sctp/socket.c 	mask = asoc->peer.sack_needed << 1;
peer             5259 net/sctp/socket.c 	mask = (mask | asoc->peer.sack_generation) << 1;
peer             5260 net/sctp/socket.c 	mask = (mask | asoc->peer.zero_window_announced);
peer             5278 net/sctp/socket.c 	prim = asoc->peer.primary_path;
peer             5327 net/sctp/socket.c 		    t->asoc->peer.primary_path == t)
peer             5466 net/sctp/socket.c 	transport = asoc->peer.primary_path;
peer             5470 net/sctp/socket.c 	status.sstat_rwnd =  asoc->peer.rwnd;
peer             5473 net/sctp/socket.c 	status.sstat_penddata = sctp_tsnmap_pending(&asoc->peer.tsn_map);
peer             5677 net/sctp/socket.c 	sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sk);
peer             6190 net/sctp/socket.c 	list_for_each_entry(from, &asoc->peer.transport_addr_list,
peer             6385 net/sctp/socket.c 	if (!asoc->peer.primary_path)
peer             6388 net/sctp/socket.c 	memcpy(&prim.ssp_addr, &asoc->peer.primary_path->ipaddr,
peer             6389 net/sctp/socket.c 		asoc->peer.primary_path->af_specific->sockaddr_len);
peer             6654 net/sctp/socket.c 		assocparams.sasoc_peer_rwnd = asoc->peer.rwnd;
peer             6658 net/sctp/socket.c 		list_for_each(pos, &asoc->peer.transport_addr_list) {
peer             6965 net/sctp/socket.c 		if (!asoc->peer.auth_capable)
peer             7003 net/sctp/socket.c 	if (!asoc->peer.auth_capable)
peer             7006 net/sctp/socket.c 	ch = asoc->peer.peer_chunks;
peer             7050 net/sctp/socket.c 		if (!asoc->peer.auth_capable)
peer             7364 net/sctp/socket.c 	params.assoc_value = asoc ? asoc->peer.prsctp_capable
peer             7572 net/sctp/socket.c 	params.assoc_value = asoc ? asoc->peer.reconf_capable
peer             7731 net/sctp/socket.c 	params.assoc_value = asoc ? asoc->peer.intl_capable
peer             7825 net/sctp/socket.c 	params.assoc_value = asoc ? asoc->peer.asconf_capable
peer             7864 net/sctp/socket.c 	params.assoc_value = asoc ? asoc->peer.auth_capable
peer             7903 net/sctp/socket.c 	params.assoc_value = asoc ? asoc->peer.ecn_capable
peer             9322 net/sctp/socket.c 	newinet->inet_dport = htons(asoc->peer.port);
peer               53 net/sctp/stream.c 		if (asoc->peer.prsctp_capable &&
peer              269 net/sctp/stream.c 	if (!asoc->peer.reconf_capable ||
peer              381 net/sctp/stream.c 	if (!asoc->peer.reconf_capable ||
peer              427 net/sctp/stream.c 	if (!asoc->peer.reconf_capable ||
peer              525 net/sctp/stream.c 	    sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)) {
peer              699 net/sctp/stream.c 				sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + 1;
peer              724 net/sctp/stream.c 	max_tsn_seen = sctp_tsnmap_get_max_tsn_seen(&asoc->peer.tsn_map);
peer              732 net/sctp/stream.c 	init_tsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map) + (1 << 31);
peer              733 net/sctp/stream.c 	sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
peer              999 net/sctp/stream.c 						&asoc->peer.tsn_map);
peer             1004 net/sctp/stream.c 			sctp_tsnmap_init(&asoc->peer.tsn_map,
peer             1119 net/sctp/stream_interleave.c 	if (!asoc->peer.prsctp_capable)
peer             1209 net/sctp/stream_interleave.c 	sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn);
peer             1244 net/sctp/stream_interleave.c 	sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn);
peer             1248 net/sctp/stream_interleave.c 	if (ftsn == sctp_tsnmap_get_max_tsn_seen(&ulpq->asoc->peer.tsn_map))
peer             1361 net/sctp/stream_interleave.c 	stream->si = asoc->peer.intl_capable ? &sctp_stream_interleave_1
peer              231 net/sctp/stream_sched.c 	    !q->asoc->peer.intl_capable) {
peer               41 net/sctp/transport.c 						  struct sctp_transport *peer,
peer               46 net/sctp/transport.c 	peer->af_specific = sctp_get_af_specific(addr->sa.sa_family);
peer               47 net/sctp/transport.c 	memcpy(&peer->ipaddr, addr, peer->af_specific->sockaddr_len);
peer               48 net/sctp/transport.c 	memset(&peer->saddr, 0, sizeof(union sctp_addr));
peer               50 net/sctp/transport.c 	peer->sack_generation = 0;
peer               58 net/sctp/transport.c 	peer->rto = msecs_to_jiffies(net->sctp.rto_initial);
peer               60 net/sctp/transport.c 	peer->last_time_heard = 0;
peer               61 net/sctp/transport.c 	peer->last_time_ecne_reduced = jiffies;
peer               63 net/sctp/transport.c 	peer->param_flags = SPP_HB_DISABLE |
peer               68 net/sctp/transport.c 	peer->pathmaxrxt  = net->sctp.max_retrans_path;
peer               69 net/sctp/transport.c 	peer->pf_retrans  = net->sctp.pf_retrans;
peer               71 net/sctp/transport.c 	INIT_LIST_HEAD(&peer->transmitted);
peer               72 net/sctp/transport.c 	INIT_LIST_HEAD(&peer->send_ready);
peer               73 net/sctp/transport.c 	INIT_LIST_HEAD(&peer->transports);
peer               75 net/sctp/transport.c 	timer_setup(&peer->T3_rtx_timer, sctp_generate_t3_rtx_event, 0);
peer               76 net/sctp/transport.c 	timer_setup(&peer->hb_timer, sctp_generate_heartbeat_event, 0);
peer               77 net/sctp/transport.c 	timer_setup(&peer->reconf_timer, sctp_generate_reconf_event, 0);
peer               78 net/sctp/transport.c 	timer_setup(&peer->proto_unreach_timer,
peer               82 net/sctp/transport.c 	get_random_bytes(&peer->hb_nonce, sizeof(peer->hb_nonce));
peer               84 net/sctp/transport.c 	refcount_set(&peer->refcnt, 1);
peer               86 net/sctp/transport.c 	return peer;
peer              310 net/sctp/transport.c 	    (!asoc->peer.primary_path || transport == asoc->peer.active_path))
peer              649 net/sctp/transport.c 	t->ssthresh = asoc->peer.i.a_rwnd;
peer              117 net/sctp/tsnmap.c 				trans->asoc->peer.sack_generation;
peer              600 net/sctp/ulpevent.c 	sai->sai_adaptation_ind = asoc->peer.adaptation_ind;
peer              650 net/sctp/ulpevent.c 	if (sctp_tsnmap_mark(&asoc->peer.tsn_map,
peer              692 net/sctp/ulpevent.c 		event->cumtsn = sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map);
peer              599 net/sctp/ulpqueue.c 		ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
peer              974 net/sctp/ulpqueue.c 	tsnmap = &ulpq->asoc->peer.tsn_map;
peer             1050 net/sctp/ulpqueue.c 		if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
peer             1507 net/smc/af_smc.c 		       int peer)
peer             1511 net/smc/af_smc.c 	if (peer && (sock->sk->sk_state != SMC_ACTIVE) &&
peer             1517 net/smc/af_smc.c 	return smc->clcsock->ops->getname(smc->clcsock, addr, peer);
peer              212 net/smc/smc_cdc.h static inline void smc_host_cursor_to_cdc(union smc_cdc_cursor *peer,
peer              218 net/smc/smc_cdc.h 	peer->count = htonl(save->count);
peer              219 net/smc/smc_cdc.h 	peer->wrap = htons(save->wrap);
peer              223 net/smc/smc_cdc.h static inline void smc_host_msg_to_cdc(struct smc_cdc_msg *peer,
peer              229 net/smc/smc_cdc.h 	peer->common.type = local->common.type;
peer              230 net/smc/smc_cdc.h 	peer->len = local->len;
peer              231 net/smc/smc_cdc.h 	peer->seqno = htons(local->seqno);
peer              232 net/smc/smc_cdc.h 	peer->token = htonl(local->token);
peer              233 net/smc/smc_cdc.h 	smc_host_cursor_to_cdc(&peer->prod, &local->prod, save, conn);
peer              234 net/smc/smc_cdc.h 	smc_host_cursor_to_cdc(&peer->cons, &local->cons, save, conn);
peer              235 net/smc/smc_cdc.h 	peer->prod_flags = local->prod_flags;
peer              236 net/smc/smc_cdc.h 	peer->conn_state_flags = local->conn_state_flags;
peer              240 net/smc/smc_cdc.h 					  union smc_cdc_cursor *peer,
peer              247 net/smc/smc_cdc.h 	smc_curs_copy_net(&net, peer, conn);
peer              259 net/smc/smc_cdc.h 					struct smc_cdc_msg *peer,
peer              262 net/smc/smc_cdc.h 	local->common.type = peer->common.type;
peer              263 net/smc/smc_cdc.h 	local->len = peer->len;
peer              264 net/smc/smc_cdc.h 	local->seqno = ntohs(peer->seqno);
peer              265 net/smc/smc_cdc.h 	local->token = ntohl(peer->token);
peer              266 net/smc/smc_cdc.h 	smc_cdc_cursor_to_host(&local->prod, &peer->prod, conn);
peer              267 net/smc/smc_cdc.h 	smc_cdc_cursor_to_host(&local->cons, &peer->cons, conn);
peer              268 net/smc/smc_cdc.h 	local->prod_flags = peer->prod_flags;
peer              269 net/smc/smc_cdc.h 	local->conn_state_flags = peer->conn_state_flags;
peer              273 net/smc/smc_cdc.h 					struct smcd_cdc_msg *peer,
peer              278 net/smc/smc_cdc.h 	temp.wrap = peer->prod.wrap;
peer              279 net/smc/smc_cdc.h 	temp.count = peer->prod.count;
peer              282 net/smc/smc_cdc.h 	temp.wrap = peer->cons.wrap;
peer              283 net/smc/smc_cdc.h 	temp.count = peer->cons.count;
peer              285 net/smc/smc_cdc.h 	local->prod_flags = peer->cons.prod_flags;
peer              286 net/smc/smc_cdc.h 	local->conn_state_flags = peer->cons.conn_state_flags;
peer              290 net/smc/smc_cdc.h 				       struct smc_cdc_msg *peer,
peer              294 net/smc/smc_cdc.h 		smcd_cdc_msg_to_host(local, (struct smcd_cdc_msg *)peer, conn);
peer              296 net/smc/smc_cdc.h 		smcr_cdc_msg_to_host(local, peer, conn);
peer              445 net/tipc/link.c 		      u32 peer, u8 *peer_id, u16 peer_caps,
peer              469 net/tipc/link.c 			sprintf(peer_str, "%x", peer);
peer              476 net/tipc/link.c 	l->addr = peer;
peer              515 net/tipc/link.c bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
peer              525 net/tipc/link.c 			      0, ownnode, peer, NULL, peer_caps, bc_sndlink,
peer               77 net/tipc/link.h 		      u32 peer, u8 *peer_id, u16 peer_caps,
peer               83 net/tipc/link.h bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
peer              136 net/tipc/monitor.c static struct tipc_peer *peer_prev(struct tipc_peer *peer)
peer              138 net/tipc/monitor.c 	return list_last_entry(&peer->list, struct tipc_peer, list);
peer              141 net/tipc/monitor.c static struct tipc_peer *peer_nxt(struct tipc_peer *peer)
peer              143 net/tipc/monitor.c 	return list_first_entry(&peer->list, struct tipc_peer, list);
peer              146 net/tipc/monitor.c static struct tipc_peer *peer_head(struct tipc_peer *peer)
peer              148 net/tipc/monitor.c 	while (!peer->is_head)
peer              149 net/tipc/monitor.c 		peer = peer_prev(peer);
peer              150 net/tipc/monitor.c 	return peer;
peer              155 net/tipc/monitor.c 	struct tipc_peer *peer;
peer              158 net/tipc/monitor.c 	hlist_for_each_entry(peer, &mon->peers[thash], hash) {
peer              159 net/tipc/monitor.c 		if (peer->addr == addr)
peer              160 net/tipc/monitor.c 			return peer;
peer              181 net/tipc/monitor.c static void mon_identify_lost_members(struct tipc_peer *peer,
peer              185 net/tipc/monitor.c 	struct tipc_peer *member = peer;
peer              186 net/tipc/monitor.c 	struct tipc_mon_domain *dom_aft = peer->domain;
peer              187 net/tipc/monitor.c 	int applied_aft = peer->applied;
peer              216 net/tipc/monitor.c 			     struct tipc_peer *peer)
peer              218 net/tipc/monitor.c 	struct tipc_mon_domain *dom = peer->domain;
peer              223 net/tipc/monitor.c 	if (!dom || !peer->is_up)
peer              227 net/tipc/monitor.c 	peer->applied = 0;
peer              228 net/tipc/monitor.c 	member = peer_nxt(peer);
peer              233 net/tipc/monitor.c 		peer->applied++;
peer              245 net/tipc/monitor.c 	struct tipc_peer *peer = self;
peer              259 net/tipc/monitor.c 		peer = peer_nxt(peer);
peer              260 net/tipc/monitor.c 		diff |= dom->members[i] != peer->addr;
peer              261 net/tipc/monitor.c 		dom->members[i] = peer->addr;
peer              262 net/tipc/monitor.c 		map_set(&dom->up_map, i, peer->is_up);
peer              263 net/tipc/monitor.c 		cache->members[i] = htonl(peer->addr);
peer              279 net/tipc/monitor.c 				 struct tipc_peer *peer)
peer              285 net/tipc/monitor.c 		mon_apply_domain(mon, peer);
peer              286 net/tipc/monitor.c 		peer = peer_prev(peer);
peer              296 net/tipc/monitor.c 	struct tipc_peer *peer = peer_nxt(head);
peer              300 net/tipc/monitor.c 	for (; peer != self; peer = peer_nxt(peer)) {
peer              301 net/tipc/monitor.c 		peer->is_local = false;
peer              305 net/tipc/monitor.c 			peer->is_head = false;
peer              307 net/tipc/monitor.c 				peer->is_local = true;
peer              311 net/tipc/monitor.c 		if (!peer->is_up)
peer              313 net/tipc/monitor.c 		if (peer->is_head)
peer              315 net/tipc/monitor.c 		head = peer;
peer              326 net/tipc/monitor.c 	struct tipc_peer *peer, *prev, *head;
peer              329 net/tipc/monitor.c 	peer = get_peer(mon, addr);
peer              330 net/tipc/monitor.c 	if (!peer)
peer              332 net/tipc/monitor.c 	prev = peer_prev(peer);
peer              333 net/tipc/monitor.c 	list_del(&peer->list);
peer              334 net/tipc/monitor.c 	hlist_del(&peer->hash);
peer              335 net/tipc/monitor.c 	kfree(peer->domain);
peer              336 net/tipc/monitor.c 	kfree(peer);
peer              345 net/tipc/monitor.c 		list_for_each_entry(peer, &self->list, list) {
peer              346 net/tipc/monitor.c 			kfree(peer->domain);
peer              347 net/tipc/monitor.c 			peer->domain = NULL;
peer              348 net/tipc/monitor.c 			peer->applied = 0;
peer              357 net/tipc/monitor.c 			      struct tipc_peer **peer)
peer              363 net/tipc/monitor.c 	*peer = p;
peer              392 net/tipc/monitor.c 	struct tipc_peer *peer, *head;
peer              395 net/tipc/monitor.c 	peer = get_peer(mon, addr);
peer              396 net/tipc/monitor.c 	if (!peer && !tipc_mon_add_peer(mon, addr, &peer))
peer              398 net/tipc/monitor.c 	peer->is_up = true;
peer              399 net/tipc/monitor.c 	head = peer_head(peer);
peer              411 net/tipc/monitor.c 	struct tipc_peer *peer, *head;
peer              416 net/tipc/monitor.c 	peer = get_peer(mon, addr);
peer              417 net/tipc/monitor.c 	if (!peer) {
peer              421 net/tipc/monitor.c 	applied = peer->applied;
peer              422 net/tipc/monitor.c 	peer->applied = 0;
peer              423 net/tipc/monitor.c 	dom = peer->domain;
peer              424 net/tipc/monitor.c 	peer->domain = NULL;
peer              425 net/tipc/monitor.c 	if (peer->is_head)
peer              426 net/tipc/monitor.c 		mon_identify_lost_members(peer, dom, applied);
peer              428 net/tipc/monitor.c 	peer->is_up = false;
peer              429 net/tipc/monitor.c 	peer->is_head = false;
peer              430 net/tipc/monitor.c 	peer->is_local = false;
peer              431 net/tipc/monitor.c 	peer->down_cnt = 0;
peer              432 net/tipc/monitor.c 	head = peer_head(peer);
peer              449 net/tipc/monitor.c 	struct tipc_peer *peer;
peer              482 net/tipc/monitor.c 	peer = get_peer(mon, addr);
peer              483 net/tipc/monitor.c 	if (!peer || !peer->is_up)
peer              487 net/tipc/monitor.c 	peer->down_cnt = 0;
peer              497 net/tipc/monitor.c 	dom = peer->domain;
peer              505 net/tipc/monitor.c 		peer->domain = dom;
peer              517 net/tipc/monitor.c 	applied_bef = peer->applied;
peer              518 net/tipc/monitor.c 	mon_apply_domain(mon, peer);
peer              519 net/tipc/monitor.c 	mon_identify_lost_members(peer, &dom_bef, applied_bef);
peer              520 net/tipc/monitor.c 	mon_assign_roles(mon, peer_head(peer));
peer              563 net/tipc/monitor.c 	struct tipc_peer *peer;
peer              578 net/tipc/monitor.c 	peer = get_peer(mon, addr);
peer              579 net/tipc/monitor.c 	if (peer) {
peer              581 net/tipc/monitor.c 		state->probing |= peer->down_cnt;
peer              582 net/tipc/monitor.c 		state->reset |= peer->down_cnt >= MAX_PEER_DOWN_EVENTS;
peer              583 net/tipc/monitor.c 		state->monitoring = peer->is_local;
peer              584 net/tipc/monitor.c 		state->monitoring |= peer->is_head;
peer              646 net/tipc/monitor.c 	struct tipc_peer *peer, *tmp;
peer              654 net/tipc/monitor.c 	list_for_each_entry_safe(peer, tmp, &self->list, list) {
peer              655 net/tipc/monitor.c 		list_del(&peer->list);
peer              656 net/tipc/monitor.c 		hlist_del(&peer->hash);
peer              657 net/tipc/monitor.c 		kfree(peer->domain);
peer              658 net/tipc/monitor.c 		kfree(peer);
peer              702 net/tipc/monitor.c static int __tipc_nl_add_monitor_peer(struct tipc_peer *peer,
peer              705 net/tipc/monitor.c 	struct tipc_mon_domain *dom = peer->domain;
peer              718 net/tipc/monitor.c 	if (nla_put_u32(msg->skb, TIPC_NLA_MON_PEER_ADDR, peer->addr))
peer              720 net/tipc/monitor.c 	if (nla_put_u32(msg->skb, TIPC_NLA_MON_PEER_APPLIED, peer->applied))
peer              723 net/tipc/monitor.c 	if (peer->is_up)
peer              726 net/tipc/monitor.c 	if (peer->is_local)
peer              729 net/tipc/monitor.c 	if (peer->is_head)
peer              760 net/tipc/monitor.c 	struct tipc_peer *peer;
peer              766 net/tipc/monitor.c 	peer = mon->self;
peer              769 net/tipc/monitor.c 			if (peer->addr == *prev_node)
peer              774 net/tipc/monitor.c 		if (__tipc_nl_add_monitor_peer(peer, msg)) {
peer              775 net/tipc/monitor.c 			*prev_node = peer->addr;
peer              779 net/tipc/monitor.c 	} while ((peer = peer_nxt(peer)) != mon->self);
peer              601 net/tipc/node.c static bool tipc_node_cleanup(struct tipc_node *peer)
peer              604 net/tipc/node.c 	struct tipc_net *tn = tipc_net(peer->net);
peer              611 net/tipc/node.c 	tipc_node_write_lock(peer);
peer              613 net/tipc/node.c 	if (!node_is_up(peer) && time_after(jiffies, peer->delete_at)) {
peer              614 net/tipc/node.c 		tipc_node_clear_links(peer);
peer              615 net/tipc/node.c 		tipc_node_delete_from_list(peer);
peer              618 net/tipc/node.c 	tipc_node_write_unlock(peer);
peer             1929 net/tipc/node.c 	struct tipc_node *peer;
peer             1952 net/tipc/node.c 	peer = tipc_node_find(net, addr);
peer             1953 net/tipc/node.c 	if (!peer) {
peer             1958 net/tipc/node.c 	tipc_node_write_lock(peer);
peer             1959 net/tipc/node.c 	if (peer->state != SELF_DOWN_PEER_DOWN &&
peer             1960 net/tipc/node.c 	    peer->state != SELF_DOWN_PEER_LEAVING) {
peer             1961 net/tipc/node.c 		tipc_node_write_unlock(peer);
peer             1966 net/tipc/node.c 	tipc_node_clear_links(peer);
peer             1967 net/tipc/node.c 	tipc_node_write_unlock(peer);
peer             1968 net/tipc/node.c 	tipc_node_delete(peer);
peer             1972 net/tipc/node.c 	tipc_node_put(peer);
peer              114 net/tipc/socket.c 	struct sockaddr_tipc peer;
peer              677 net/tipc/socket.c 			int peer)
peer              684 net/tipc/socket.c 	if (peer) {
peer              686 net/tipc/socket.c 		    ((peer != 2) || (sk->sk_state != TIPC_DISCONNECTING)))
peer             1336 net/tipc/socket.c 		dest = &tsk->peer;
peer             2404 net/tipc/socket.c 		memset(&tsk->peer, 0, sizeof(struct sockaddr_tipc));
peer             2415 net/tipc/socket.c 		memcpy(&tsk->peer, dest, destlen);
peer             3118 net/tipc/socket.c 					    lnr.bearer_id & 0xffff, lnr.peer,
peer             3128 net/tipc/socket.c 		if (!tipc_node_get_id(net, nr.peer, nr.node_id))
peer             3144 net/tipc/socket.c 	tsk1->peer.family = AF_TIPC;
peer             3145 net/tipc/socket.c 	tsk1->peer.addrtype = TIPC_ADDR_ID;
peer             3146 net/tipc/socket.c 	tsk1->peer.scope = TIPC_NODE_SCOPE;
peer             3147 net/tipc/socket.c 	tsk1->peer.addr.id.ref = tsk2->portid;
peer             3148 net/tipc/socket.c 	tsk1->peer.addr.id.node = onode;
peer             3149 net/tipc/socket.c 	tsk2->peer.family = AF_TIPC;
peer             3150 net/tipc/socket.c 	tsk2->peer.addrtype = TIPC_ADDR_ID;
peer             3151 net/tipc/socket.c 	tsk2->peer.scope = TIPC_NODE_SCOPE;
peer             3152 net/tipc/socket.c 	tsk2->peer.addr.id.ref = tsk1->portid;
peer             3153 net/tipc/socket.c 	tsk2->peer.addr.id.node = onode;
peer              180 net/unix/af_unix.c #define unix_peer(sk) (unix_sk(sk)->peer)
peer              199 net/unix/af_unix.c 	struct sock *peer;
peer              202 net/unix/af_unix.c 	peer = unix_peer(s);
peer              203 net/unix/af_unix.c 	if (peer)
peer              204 net/unix/af_unix.c 		sock_hold(peer);
peer              206 net/unix/af_unix.c 	return peer;
peer             1476 net/unix/af_unix.c static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
peer             1483 net/unix/af_unix.c 	if (peer) {
peer               44 net/unix/diag.c 	struct sock *peer;
peer               47 net/unix/diag.c 	peer = unix_peer_get(sk);
peer               48 net/unix/diag.c 	if (peer) {
peer               49 net/unix/diag.c 		unix_state_lock(peer);
peer               50 net/unix/diag.c 		ino = sock_i_ino(peer);
peer               51 net/unix/diag.c 		unix_state_unlock(peer);
peer               52 net/unix/diag.c 		sock_put(peer);
peer               78 net/unix/diag.c 			struct sock *req, *peer;
peer               87 net/unix/diag.c 			peer = unix_sk(req)->peer;
peer               88 net/unix/diag.c 			buf[i++] = (peer ? sock_i_ino(peer) : 0);
peer              746 net/vmw_vsock/af_vsock.c 			 struct sockaddr *addr, int peer)
peer              759 net/vmw_vsock/af_vsock.c 	if (peer) {
peer              557 net/vmw_vsock/vmci_transport.c 				u32 peer, u32 flags, bool trusted)
peer              568 net/vmw_vsock/vmci_transport.c 				       peer, flags,
peer              576 net/vmw_vsock/vmci_transport.c 			       peer, flags, VMCI_NO_PRIVILEGE_FLAGS);
peer             10338 net/wireless/nl80211.c 	u8 *peer;
peer             10352 net/wireless/nl80211.c 	peer = nla_data(info->attrs[NL80211_ATTR_MAC]);
peer             10361 net/wireless/nl80211.c 	return rdev_tdls_mgmt(rdev, dev, peer, action_code,
peer             10373 net/wireless/nl80211.c 	u8 *peer;
peer             10384 net/wireless/nl80211.c 	peer = nla_data(info->attrs[NL80211_ATTR_MAC]);
peer             10386 net/wireless/nl80211.c 	return rdev_tdls_oper(rdev, dev, peer, operation);
peer             13282 net/wireless/nl80211.c 	const u8 *peer;
peer             13306 net/wireless/nl80211.c 	peer = nla_data(info->attrs[NL80211_ATTR_MAC]);
peer             13328 net/wireless/nl80211.c 	err = rdev_add_tx_ts(rdev, dev, tsid, peer, up, admitted_time);
peer             13340 net/wireless/nl80211.c 	const u8 *peer;
peer             13348 net/wireless/nl80211.c 	peer = nla_data(info->attrs[NL80211_ATTR_MAC]);
peer             13351 net/wireless/nl80211.c 	err = rdev_del_tx_ts(rdev, dev, tsid, peer);
peer             13747 net/wireless/nl80211.c 	nla_memcpy(owe_info.peer, info->attrs[NL80211_ATTR_MAC], ETH_ALEN);
peer             16065 net/wireless/nl80211.c 			     const u8 *peer, u32 num_packets,
peer             16070 net/wireless/nl80211.c 	msg = cfg80211_prepare_cqm(dev, peer, gfp);
peer             16092 net/wireless/nl80211.c 				 const u8 *peer, u32 num_packets, gfp_t gfp)
peer             16096 net/wireless/nl80211.c 	trace_cfg80211_cqm_pktloss_notify(dev, peer, num_packets);
peer             16098 net/wireless/nl80211.c 	msg = cfg80211_prepare_cqm(dev, peer, gfp);
peer             16689 net/wireless/nl80211.c void cfg80211_tdls_oper_request(struct net_device *dev, const u8 *peer,
peer             16698 net/wireless/nl80211.c 	trace_cfg80211_tdls_oper_request(wdev->wiphy, dev, peer, oper,
peer             16714 net/wireless/nl80211.c 	    nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, peer) ||
peer             16969 net/wireless/nl80211.c 	    nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, owe_info->peer))
peer              133 net/wireless/pmsr.c 			   struct nlattr *peer,
peer              143 net/wireless/pmsr.c 	nla_parse_nested_deprecated(tb, NL80211_PMSR_PEER_ATTR_MAX, peer,
peer              149 net/wireless/pmsr.c 		NL_SET_ERR_MSG_ATTR(info->extack, peer,
peer              215 net/wireless/pmsr.c 	struct nlattr *peers, *peer;
peer              230 net/wireless/pmsr.c 	nla_for_each_nested(peer, peers, rem) {
peer              234 net/wireless/pmsr.c 			NL_SET_ERR_MSG_ATTR(info->extack, peer,
peer              266 net/wireless/pmsr.c 	nla_for_each_nested(peer, peers, rem) {
peer              268 net/wireless/pmsr.c 		err = pmsr_parse_peer(rdev, peer, &req->peers[idx], info);
peer              423 net/wireless/pmsr.c 	struct nlattr *pmsr, *peers, *peer, *resp, *data, *typedata;
peer              433 net/wireless/pmsr.c 	peer = nla_nest_start_noflag(msg, 1);
peer              434 net/wireless/pmsr.c 	if (!peer)
peer              477 net/wireless/pmsr.c 	nla_nest_end(msg, peer);
peer              641 net/wireless/rdev-ops.h 		      struct net_device *dev, const u8 *peer,
peer              645 net/wireless/rdev-ops.h 	trace_rdev_set_bitrate_mask(&rdev->wiphy, dev, peer, mask);
peer              646 net/wireless/rdev-ops.h 	ret = rdev->ops->set_bitrate_mask(&rdev->wiphy, dev, peer, mask);
peer              877 net/wireless/rdev-ops.h 				 struct net_device *dev, u8 *peer,
peer              883 net/wireless/rdev-ops.h 	trace_rdev_tdls_mgmt(&rdev->wiphy, dev, peer, action_code,
peer              886 net/wireless/rdev-ops.h 	ret = rdev->ops->tdls_mgmt(&rdev->wiphy, dev, peer, action_code,
peer              894 net/wireless/rdev-ops.h 				 struct net_device *dev, u8 *peer,
peer              898 net/wireless/rdev-ops.h 	trace_rdev_tdls_oper(&rdev->wiphy, dev, peer, oper);
peer              899 net/wireless/rdev-ops.h 	ret = rdev->ops->tdls_oper(&rdev->wiphy, dev, peer, oper);
peer              905 net/wireless/rdev-ops.h 				    struct net_device *dev, const u8 *peer,
peer              909 net/wireless/rdev-ops.h 	trace_rdev_probe_client(&rdev->wiphy, dev, peer);
peer              910 net/wireless/rdev-ops.h 	ret = rdev->ops->probe_client(&rdev->wiphy, dev, peer, cookie);
peer             1104 net/wireless/rdev-ops.h 	       struct net_device *dev, u8 tsid, const u8 *peer,
peer             1109 net/wireless/rdev-ops.h 	trace_rdev_add_tx_ts(&rdev->wiphy, dev, tsid, peer,
peer             1112 net/wireless/rdev-ops.h 		ret = rdev->ops->add_tx_ts(&rdev->wiphy, dev, tsid, peer,
peer             1121 net/wireless/rdev-ops.h 	       struct net_device *dev, u8 tsid, const u8 *peer)
peer             1125 net/wireless/rdev-ops.h 	trace_rdev_del_tx_ts(&rdev->wiphy, dev, tsid, peer);
peer             1127 net/wireless/rdev-ops.h 		ret = rdev->ops->del_tx_ts(&rdev->wiphy, dev, tsid, peer);
peer             1552 net/wireless/trace.h 		 const u8 *peer, const struct cfg80211_bitrate_mask *mask),
peer             1553 net/wireless/trace.h 	TP_ARGS(wiphy, netdev, peer, mask),
peer             1557 net/wireless/trace.h 		MAC_ENTRY(peer)
peer             1562 net/wireless/trace.h 		MAC_ASSIGN(peer, peer);
peer             1565 net/wireless/trace.h 		  WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer))
peer             1682 net/wireless/trace.h 		 u8 *peer, u8 action_code, u8 dialog_token,
peer             1685 net/wireless/trace.h 	TP_ARGS(wiphy, netdev, peer, action_code, dialog_token, status_code,
peer             1690 net/wireless/trace.h 		MAC_ENTRY(peer)
peer             1701 net/wireless/trace.h 		MAC_ASSIGN(peer, peer);
peer             1712 net/wireless/trace.h 		  WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer),
peer             1778 net/wireless/trace.h 		 u8 *peer, enum nl80211_tdls_operation oper),
peer             1779 net/wireless/trace.h 	TP_ARGS(wiphy, netdev, peer, oper),
peer             1783 net/wireless/trace.h 		MAC_ENTRY(peer)
peer             1789 net/wireless/trace.h 		MAC_ASSIGN(peer, peer);
peer             1793 net/wireless/trace.h 		  WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer), __entry->oper)
peer             1816 net/wireless/trace.h 		 const u8 *peer),
peer             1817 net/wireless/trace.h 	TP_ARGS(wiphy, netdev, peer),
peer             1821 net/wireless/trace.h 		MAC_ENTRY(peer)
peer             1826 net/wireless/trace.h 		MAC_ASSIGN(peer, peer);
peer             1829 net/wireless/trace.h 		  WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer))
peer             2247 net/wireless/trace.h 		 u8 tsid, const u8 *peer, u8 user_prio, u16 admitted_time),
peer             2248 net/wireless/trace.h 	TP_ARGS(wiphy, netdev, tsid, peer, user_prio, admitted_time),
peer             2252 net/wireless/trace.h 		MAC_ENTRY(peer)
peer             2260 net/wireless/trace.h 		MAC_ASSIGN(peer, peer);
peer             2266 net/wireless/trace.h 		  WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer),
peer             2272 net/wireless/trace.h 		 u8 tsid, const u8 *peer),
peer             2273 net/wireless/trace.h 	TP_ARGS(wiphy, netdev, tsid, peer),
peer             2277 net/wireless/trace.h 		MAC_ENTRY(peer)
peer             2283 net/wireless/trace.h 		MAC_ASSIGN(peer, peer);
peer             2287 net/wireless/trace.h 		  WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer), __entry->tsid)
peer             3040 net/wireless/trace.h 	TP_PROTO(struct net_device *netdev, const u8 *peer, u32 num_packets),
peer             3041 net/wireless/trace.h 	TP_ARGS(netdev, peer, num_packets),
peer             3044 net/wireless/trace.h 		MAC_ENTRY(peer)
peer             3049 net/wireless/trace.h 		MAC_ASSIGN(peer, peer);
peer             3053 net/wireless/trace.h 		  NETDEV_PR_ARG, MAC_PR_ARG(peer), __entry->num_packets)
peer             3101 net/wireless/trace.h 	TP_PROTO(struct wiphy *wiphy, struct net_device *netdev, const u8 *peer,
peer             3103 net/wireless/trace.h 	TP_ARGS(wiphy, netdev, peer, oper, reason_code),
peer             3107 net/wireless/trace.h 		MAC_ENTRY(peer)
peer             3114 net/wireless/trace.h 		MAC_ASSIGN(peer, peer);
peer             3119 net/wireless/trace.h 		  WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer), __entry->oper,
peer             3417 net/wireless/trace.h 			     MAC_ENTRY(peer)
peer             3422 net/wireless/trace.h 			   MAC_ASSIGN(peer, owe_info->peer);
peer             3427 net/wireless/trace.h 		  " status %d", WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer),
peer             3437 net/wireless/trace.h 			     MAC_ENTRY(peer)
peer             3441 net/wireless/trace.h 			   MAC_ASSIGN(peer, owe_info->peer);
peer             3445 net/wireless/trace.h 		      WIPHY_PR_ARG, NETDEV_PR_ARG, MAC_PR_ARG(peer))
peer              908 net/x25/af_x25.c 		       int peer)
peer              915 net/x25/af_x25.c 	if (peer) {
peer               96 net/x25/x25_forward.c 	struct net_device *peer = NULL;
peer              107 net/x25/x25_forward.c 				peer = frwd->dev2;
peer              109 net/x25/x25_forward.c 				peer = frwd->dev1;
peer              116 net/x25/x25_forward.c 	if ( (nb = x25_get_neigh(peer)) == NULL)
peer               76 security/apparmor/file.c 	if (aad(sa)->peer) {
peer               78 security/apparmor/file.c 		aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
peer              113 security/apparmor/file.c 	aad(&sa)->peer = tlabel;
peer              119 security/apparmor/include/audit.h 			struct aa_label *peer;
peer               51 security/apparmor/include/net.h 	struct aa_label *peer;
peer               59 security/apparmor/ipc.c 	aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
peer               66 security/apparmor/ipc.c 			     struct aa_label *peer, u32 request,
peer               71 security/apparmor/ipc.c 	aad(sa)->peer = peer;
peer               72 security/apparmor/ipc.c 	aa_profile_match_label(profile, peer, AA_CLASS_PTRACE, request,
peer              104 security/apparmor/ipc.c 	aad(sa)->peer = tracee;
peer              183 security/apparmor/ipc.c 	aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
peer              188 security/apparmor/ipc.c 			       struct aa_label *peer, u32 request,
peer              198 security/apparmor/ipc.c 	aad(sa)->peer = peer;
peer              203 security/apparmor/ipc.c 	aa_label_match(profile, peer, state, false, request, &perms);
peer              279 security/apparmor/lib.c 	aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
peer              402 security/apparmor/lib.c 	aad(sa)->peer = &target->label;
peer              781 security/apparmor/lsm.c 	aa_put_label(ctx->peer);
peer              795 security/apparmor/lsm.c 	new->peer = aa_get_label(ctx->peer);
peer             1043 security/apparmor/lsm.c 	if (ctx->peer)
peer             1044 security/apparmor/lsm.c 		return ctx->peer;
peer             1062 security/apparmor/lsm.c 	struct aa_label *peer;
peer             1065 security/apparmor/lsm.c 	peer = sk_peer_label(sock->sk);
peer             1066 security/apparmor/lsm.c 	if (IS_ERR(peer)) {
peer             1067 security/apparmor/lsm.c 		error = PTR_ERR(peer);
peer             1070 security/apparmor/lsm.c 	slen = aa_label_asxprint(&name, labels_ns(label), peer,
peer               98 security/apparmor/net.c 	if (aad(sa)->peer) {
peer              100 security/apparmor/net.c 		aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
peer               36 security/apparmor/resource.c 	if (aad(sa)->peer) {
peer               38 security/apparmor/resource.c 		aa_label_xaudit(ab, labels_ns(aad(sa)->label), aad(sa)->peer,
peer               53 security/apparmor/resource.c 			  unsigned long value, struct aa_label *peer,
peer               60 security/apparmor/resource.c 	aad(&sa)->peer = peer;
peer              108 security/apparmor/resource.c 	struct aa_label *peer;
peer              112 security/apparmor/resource.c 	peer = aa_get_newest_cred_label(__task_cred(task));
peer              122 security/apparmor/resource.c 	if (label != peer &&
peer              126 security/apparmor/resource.c 					       new_rlim->rlim_max, peer,
peer              131 security/apparmor/resource.c 	aa_put_label(peer);
peer             1860 sound/soc/soc-dapm.c static void dapm_widget_set_peer_power(struct snd_soc_dapm_widget *peer,
peer             1871 sound/soc/soc-dapm.c 	if (power != peer->power)
peer             1872 sound/soc/soc-dapm.c 		dapm_mark_dirty(peer, "peer state change");
peer              289 sound/usb/6fire/pcm.c 	struct pcm_urb *out_urb = in_urb->peer;
peer              646 sound/usb/6fire/pcm.c 		rt->in_urbs[i].peer = &rt->out_urbs[i];
peer              647 sound/usb/6fire/pcm.c 		rt->out_urbs[i].peer = &rt->in_urbs[i];
peer               33 sound/usb/6fire/pcm.h 	struct pcm_urb *peer;