nd                190 arch/arm/mach-exynos/firmware.c 	struct device_node *nd;
nd                193 arch/arm/mach-exynos/firmware.c 	nd = of_find_compatible_node(NULL, NULL,
nd                195 arch/arm/mach-exynos/firmware.c 	if (!nd)
nd                198 arch/arm/mach-exynos/firmware.c 	addr = of_get_address(nd, 0, NULL, NULL);
nd                199 arch/arm/mach-exynos/firmware.c 	of_node_put(nd);
nd                428 arch/arm64/include/asm/assembler.h 	cmp	\tmp2, \end
nd                222 arch/arm64/mm/numa.c 	void *nd;
nd                233 arch/arm64/mm/numa.c 	nd = __va(nd_pa);
nd                242 arch/arm64/mm/numa.c 	node_data[nid] = nd;
nd                158 arch/ia64/kernel/crash.c 	struct ia64_mca_notify_die *nd;
nd                191 arch/ia64/kernel/crash.c 	nd = (struct ia64_mca_notify_die *)args->err;
nd                196 arch/ia64/kernel/crash.c 		if (kdump_on_init && (nd->sos->rv_rc != 1)) {
nd                203 arch/ia64/kernel/crash.c 		if (kdump_on_init && (nd->sos->rv_rc != 1))
nd                208 arch/ia64/kernel/crash.c 		if (kdump_on_fatal_mca && !(*(nd->data))) {
nd                780 arch/ia64/kernel/mca.c 	struct ia64_mca_notify_die nd =
nd                786 arch/ia64/kernel/mca.c 	NOTIFY_MCA(DIE_MCA_RENDZVOUS_ENTER, get_irq_regs(), (long)&nd, 1);
nd                794 arch/ia64/kernel/mca.c 	NOTIFY_MCA(DIE_MCA_RENDZVOUS_PROCESS, get_irq_regs(), (long)&nd, 1);
nd                800 arch/ia64/kernel/mca.c 	NOTIFY_MCA(DIE_MCA_RENDZVOUS_LEAVE, get_irq_regs(), (long)&nd, 1);
nd               1287 arch/ia64/kernel/mca.c 	struct ia64_mca_notify_die nd =
nd               1304 arch/ia64/kernel/mca.c 	NOTIFY_MCA(DIE_MCA_MONARCH_ENTER, regs, (long)&nd, 1);
nd               1323 arch/ia64/kernel/mca.c 	NOTIFY_MCA(DIE_MCA_MONARCH_PROCESS, regs, (long)&nd, 1);
nd               1349 arch/ia64/kernel/mca.c 	NOTIFY_MCA(DIE_MCA_MONARCH_LEAVE, regs, (long)&nd, 1);
nd               1667 arch/ia64/kernel/mca.c 	struct ia64_mca_notify_die nd =
nd               1670 arch/ia64/kernel/mca.c 	NOTIFY_INIT(DIE_INIT_ENTER, regs, (long)&nd, 0);
nd               1714 arch/ia64/kernel/mca.c 		NOTIFY_INIT(DIE_INIT_SLAVE_ENTER, regs, (long)&nd, 1);
nd               1715 arch/ia64/kernel/mca.c 		NOTIFY_INIT(DIE_INIT_SLAVE_PROCESS, regs, (long)&nd, 1);
nd               1725 arch/ia64/kernel/mca.c 		NOTIFY_INIT(DIE_INIT_SLAVE_LEAVE, regs, (long)&nd, 1);
nd               1735 arch/ia64/kernel/mca.c 	NOTIFY_INIT(DIE_INIT_MONARCH_ENTER, regs, (long)&nd, 1);
nd               1750 arch/ia64/kernel/mca.c 	NOTIFY_INIT(DIE_INIT_MONARCH_PROCESS, regs, (long)&nd, 1);
nd               1751 arch/ia64/kernel/mca.c 	NOTIFY_INIT(DIE_INIT_MONARCH_LEAVE, regs, (long)&nd, 1);
nd                997 arch/mips/include/uapi/asm/inst.h 	__BITFIELD_FIELD(unsigned int nd : 1,
nd                381 arch/mips/kernel/branch.c 				if (inst.rr.nd)
nd                806 arch/powerpc/mm/numa.c 	void *nd;
nd                814 arch/powerpc/mm/numa.c 	nd = __va(nd_pa);
nd                823 arch/powerpc/mm/numa.c 	node_data[nid] = nd;
nd                235 arch/powerpc/platforms/cell/spufs/spufs.h long spufs_create(struct path *nd, struct dentry *dentry, unsigned int flags,
nd               1031 arch/powerpc/platforms/powermac/pci.c 	struct device_node* nd;
nd               1033 arch/powerpc/platforms/powermac/pci.c 	for_each_node_by_name(nd, "firewire") {
nd               1034 arch/powerpc/platforms/powermac/pci.c 		if (nd->parent && (of_device_is_compatible(nd, "pci106b,18") ||
nd               1035 arch/powerpc/platforms/powermac/pci.c 				   of_device_is_compatible(nd, "pci106b,30") ||
nd               1036 arch/powerpc/platforms/powermac/pci.c 				   of_device_is_compatible(nd, "pci11c1,5811"))
nd               1037 arch/powerpc/platforms/powermac/pci.c 		    && of_device_is_compatible(nd->parent, "uni-north")) {
nd               1038 arch/powerpc/platforms/powermac/pci.c 			pmac_call_feature(PMAC_FTR_1394_ENABLE, nd, 0, 0);
nd               1039 arch/powerpc/platforms/powermac/pci.c 			pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, nd, 0, 0);
nd               1042 arch/powerpc/platforms/powermac/pci.c 	for_each_node_by_name(nd, "ethernet") {
nd               1043 arch/powerpc/platforms/powermac/pci.c 		if (nd->parent && of_device_is_compatible(nd, "gmac")
nd               1044 arch/powerpc/platforms/powermac/pci.c 		    && of_device_is_compatible(nd->parent, "uni-north"))
nd               1045 arch/powerpc/platforms/powermac/pci.c 			pmac_call_feature(PMAC_FTR_GMAC_ENABLE, nd, 0, 0);
nd                 12 arch/s390/include/asm/alternative-asm.h 	.if ( \end - \start ) > 254
nd                 15 arch/s390/include/asm/alternative-asm.h 	.if ( \end - \start ) % 2
nd                 34 arch/sparc/kernel/devices.c static int check_cpu_node(phandle nd, int *cur_inst,
nd                 38 arch/sparc/kernel/devices.c 	if (!compare(nd, *cur_inst, compare_arg)) {
nd                 40 arch/sparc/kernel/devices.c 			*prom_node = nd;
nd                 42 arch/sparc/kernel/devices.c 			*mid = prom_getintdefault(nd, cpu_mid_prop(), 0);
nd                 74 arch/sparc/kernel/devices.c static int cpu_instance_compare(phandle nd, int instance, void *_arg)
nd                 89 arch/sparc/kernel/devices.c static int cpu_mid_compare(phandle nd, int instance, void *_arg)
nd                 94 arch/sparc/kernel/devices.c 	this_mid = prom_getintdefault(nd, cpu_mid_prop(), 0);
nd               1046 arch/sparc/mm/srmmu.c 	phandle nd;
nd               1055 arch/sparc/mm/srmmu.c 	nd = prom_getchild(prom_root_node);
nd               1056 arch/sparc/mm/srmmu.c 	while ((nd = prom_getsibling(nd)) != 0) {
nd               1057 arch/sparc/mm/srmmu.c 		prom_getstring(nd, "device_type", node_str, sizeof(node_str));
nd               1059 arch/sparc/mm/srmmu.c 			vac_line_size = prom_getint(nd, "cache-line-size");
nd               1064 arch/sparc/mm/srmmu.c 			cache_lines = prom_getint(nd, "cache-nlines");
nd               1085 arch/sparc/mm/srmmu.c 	if (nd == 0) {
nd                 38 arch/x86/include/asm/unwind_hints.h 		.byte \end
nd                192 arch/x86/mm/numa.c 	void *nd;
nd                205 arch/x86/mm/numa.c 	nd = __va(nd_pa);
nd                214 arch/x86/mm/numa.c 	node_data[nid] = nd;
nd                133 drivers/block/aoe/aoe.h 	struct net_device *nd;
nd                 63 drivers/block/aoe/aoeblk.c 	struct net_device *nds[8], **nd, **nnd, **ne;
nd                 69 drivers/block/aoe/aoeblk.c 	nd = nds;
nd                 70 drivers/block/aoe/aoeblk.c 	ne = nd + ARRAY_SIZE(nds);
nd                 76 drivers/block/aoe/aoeblk.c 		for (; ifp < e && ifp->nd; ifp++) {
nd                 77 drivers/block/aoe/aoeblk.c 			for (nnd = nds; nnd < nd; nnd++)
nd                 78 drivers/block/aoe/aoeblk.c 				if (*nnd == ifp->nd)
nd                 80 drivers/block/aoe/aoeblk.c 			if (nnd == nd && nd != ne)
nd                 81 drivers/block/aoe/aoeblk.c 				*nd++ = ifp->nd;
nd                 85 drivers/block/aoe/aoeblk.c 	ne = nd;
nd                 86 drivers/block/aoe/aoeblk.c 	nd = nds;
nd                 87 drivers/block/aoe/aoeblk.c 	if (*nd == NULL)
nd                 89 drivers/block/aoe/aoeblk.c 	for (p = page; nd < ne; nd++)
nd                 91 drivers/block/aoe/aoeblk.c 			p == page ? "" : ",", (*nd)->name);
nd                146 drivers/block/aoe/aoeblk.c 		for (; ifp->nd && ifp < ife; ifp++) {
nd                147 drivers/block/aoe/aoeblk.c 			seq_printf(s, "%c%s", c, ifp->nd->name);
nd                133 drivers/block/aoe/aoecmd.c 	memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src);
nd                163 drivers/block/aoe/aoecmd.c 	if (ifp >= &t->ifs[NAOEIFS] || ifp->nd == NULL)
nd                165 drivers/block/aoe/aoecmd.c 	if (ifp->nd == NULL)
nd                274 drivers/block/aoe/aoecmd.c 		&& t->ifp->nd) {
nd                365 drivers/block/aoe/aoecmd.c 	skb->dev = t->ifp->nd;
nd                485 drivers/block/aoe/aoecmd.c 	memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src);
nd                487 drivers/block/aoe/aoecmd.c 	skb->dev = t->ifp->nd;
nd                526 drivers/block/aoe/aoecmd.c getif(struct aoetgt *t, struct net_device *nd)
nd                533 drivers/block/aoe/aoecmd.c 		if (p->nd == nd)
nd                542 drivers/block/aoe/aoecmd.c 	struct net_device *nd;
nd                545 drivers/block/aoe/aoecmd.c 	nd = ifp->nd;
nd                549 drivers/block/aoe/aoecmd.c 	e->nd = NULL;
nd                550 drivers/block/aoe/aoecmd.c 	dev_put(nd);
nd                803 drivers/block/aoe/aoecmd.c 			&& (ifp != t->ifs || t->ifs[1].nd)) {
nd               1408 drivers/block/aoe/aoecmd.c 	skb->dev = t->ifp->nd;
nd               1492 drivers/block/aoe/aoecmd.c setifbcnt(struct aoetgt *t, struct net_device *nd, int bcnt)
nd               1503 drivers/block/aoe/aoecmd.c 		if (p->nd == NULL)
nd               1505 drivers/block/aoe/aoecmd.c 		if (p->nd == nd) {
nd               1507 drivers/block/aoe/aoecmd.c 			nd = NULL;
nd               1511 drivers/block/aoe/aoecmd.c 	if (nd) {
nd               1516 drivers/block/aoe/aoecmd.c 		dev_hold(nd);
nd               1517 drivers/block/aoe/aoecmd.c 		p->nd = nd;
nd                506 drivers/block/aoe/aoedev.c 		if (!ifp->nd)
nd                508 drivers/block/aoe/aoedev.c 		dev_put(ifp->nd);
nd                 62 drivers/clk/at91/clk-audio-pll.c 	u8 nd;
nd                102 drivers/clk/at91/clk-audio-pll.c 			   AT91_PMC_AUDIO_PLL_ND(frac->nd));
nd                160 drivers/clk/at91/clk-audio-pll.c 					unsigned long nd, unsigned long fracr)
nd                170 drivers/clk/at91/clk-audio-pll.c 	return parent_rate * (nd + 1) + fr;
nd                179 drivers/clk/at91/clk-audio-pll.c 	fout = clk_audio_pll_fout(parent_rate, frac->nd, frac->fracr);
nd                182 drivers/clk/at91/clk-audio-pll.c 		 fout, frac->nd, (unsigned long)frac->fracr);
nd                218 drivers/clk/at91/clk-audio-pll.c 					   unsigned long *nd,
nd                231 drivers/clk/at91/clk-audio-pll.c 	*nd = tmp - 1;
nd                247 drivers/clk/at91/clk-audio-pll.c 	unsigned long fracr, nd;
nd                259 drivers/clk/at91/clk-audio-pll.c 					      &nd, &fracr);
nd                263 drivers/clk/at91/clk-audio-pll.c 	req->rate = clk_audio_pll_fout(req->best_parent_rate, nd, fracr);
nd                268 drivers/clk/at91/clk-audio-pll.c 		 __func__, req->rate, nd, fracr);
nd                368 drivers/clk/at91/clk-audio-pll.c 	unsigned long fracr, nd;
nd                377 drivers/clk/at91/clk-audio-pll.c 	ret = clk_audio_pll_frac_compute_frac(rate, parent_rate, &nd, &fracr);
nd                381 drivers/clk/at91/clk-audio-pll.c 	frac->nd = nd;
nd                263 drivers/clk/st/clkgen-fsyn.c 	unsigned long nd = fs->ndiv + 16; /* ndiv value */
nd                265 drivers/clk/st/clkgen-fsyn.c 	*rate = input * nd;
nd                285 drivers/fpga/of-fpga-region.c 					   struct of_overlay_notify_data *nd)
nd                291 drivers/fpga/of-fpga-region.c 	info = of_fpga_region_parse_ov(region, nd->overlay);
nd                325 drivers/fpga/of-fpga-region.c 					      struct of_overlay_notify_data *nd)
nd                347 drivers/fpga/of-fpga-region.c 	struct of_overlay_notify_data *nd = arg;
nd                368 drivers/fpga/of-fpga-region.c 	region = of_fpga_region_find(nd->target);
nd                375 drivers/fpga/of-fpga-region.c 		ret = of_fpga_region_notify_pre_apply(region, nd);
nd                379 drivers/fpga/of-fpga-region.c 		of_fpga_region_notify_post_remove(region, nd);
nd                173 drivers/hid/hid-ntrig.c 	struct ntrig_data *nd = hid_get_drvdata(hdev);
nd                175 drivers/hid/hid-ntrig.c 	return sprintf(buf, "%d\n", nd->sensor_physical_width);
nd                185 drivers/hid/hid-ntrig.c 	struct ntrig_data *nd = hid_get_drvdata(hdev);
nd                187 drivers/hid/hid-ntrig.c 	return sprintf(buf, "%d\n", nd->sensor_physical_height);
nd                197 drivers/hid/hid-ntrig.c 	struct ntrig_data *nd = hid_get_drvdata(hdev);
nd                199 drivers/hid/hid-ntrig.c 	return sprintf(buf, "%d\n", nd->sensor_logical_width);
nd                209 drivers/hid/hid-ntrig.c 	struct ntrig_data *nd = hid_get_drvdata(hdev);
nd                211 drivers/hid/hid-ntrig.c 	return sprintf(buf, "%d\n", nd->sensor_logical_height);
nd                221 drivers/hid/hid-ntrig.c 	struct ntrig_data *nd = hid_get_drvdata(hdev);
nd                223 drivers/hid/hid-ntrig.c 	return sprintf(buf, "%d\n", nd->min_width *
nd                224 drivers/hid/hid-ntrig.c 				    nd->sensor_physical_width /
nd                225 drivers/hid/hid-ntrig.c 				    nd->sensor_logical_width);
nd                233 drivers/hid/hid-ntrig.c 	struct ntrig_data *nd = hid_get_drvdata(hdev);
nd                240 drivers/hid/hid-ntrig.c 	if (val > nd->sensor_physical_width)
nd                243 drivers/hid/hid-ntrig.c 	nd->min_width = val * nd->sensor_logical_width /
nd                244 drivers/hid/hid-ntrig.c 			      nd->sensor_physical_width;
nd                256 drivers/hid/hid-ntrig.c 	struct ntrig_data *nd = hid_get_drvdata(hdev);
nd                258 drivers/hid/hid-ntrig.c 	return sprintf(buf, "%d\n", nd->min_height *
nd                259 drivers/hid/hid-ntrig.c 				    nd->sensor_physical_height /
nd                260 drivers/hid/hid-ntrig.c 				    nd->sensor_logical_height);
nd                268 drivers/hid/hid-ntrig.c 	struct ntrig_data *nd = hid_get_drvdata(hdev);
nd                275 drivers/hid/hid-ntrig.c 	if (val > nd->sensor_physical_height)
nd                278 drivers/hid/hid-ntrig.c 	nd->min_height = val * nd->sensor_logical_height /
nd                279 drivers/hid/hid-ntrig.c 			       nd->sensor_physical_height;
nd                292 drivers/hid/hid-ntrig.c 	struct ntrig_data *nd = hid_get_drvdata(hdev);
nd                294 drivers/hid/hid-ntrig.c 	return sprintf(buf, "%d\n", nd->activate_slack);
nd                302 drivers/hid/hid-ntrig.c 	struct ntrig_data *nd = hid_get_drvdata(hdev);
nd                312 drivers/hid/hid-ntrig.c 	nd->activate_slack = val;
nd                325 drivers/hid/hid-ntrig.c 	struct ntrig_data *nd = hid_get_drvdata(hdev);
nd                327 drivers/hid/hid-ntrig.c 	return sprintf(buf, "%d\n", nd->activation_width *
nd                328 drivers/hid/hid-ntrig.c 				    nd->sensor_physical_width /
nd                329 drivers/hid/hid-ntrig.c 				    nd->sensor_logical_width);
nd                337 drivers/hid/hid-ntrig.c 	struct ntrig_data *nd = hid_get_drvdata(hdev);
nd                344 drivers/hid/hid-ntrig.c 	if (val > nd->sensor_physical_width)
nd                347 drivers/hid/hid-ntrig.c 	nd->activation_width = val * nd->sensor_logical_width /
nd                348 drivers/hid/hid-ntrig.c 				     nd->sensor_physical_width;
nd                361 drivers/hid/hid-ntrig.c 	struct ntrig_data *nd = hid_get_drvdata(hdev);
nd                363 drivers/hid/hid-ntrig.c 	return sprintf(buf, "%d\n", nd->activation_height *
nd                364 drivers/hid/hid-ntrig.c 				    nd->sensor_physical_height /
nd                365 drivers/hid/hid-ntrig.c 				    nd->sensor_logical_height);
nd                373 drivers/hid/hid-ntrig.c 	struct ntrig_data *nd = hid_get_drvdata(hdev);
nd                380 drivers/hid/hid-ntrig.c 	if (val > nd->sensor_physical_height)
nd                383 drivers/hid/hid-ntrig.c 	nd->activation_height = val * nd->sensor_logical_height /
nd                384 drivers/hid/hid-ntrig.c 				      nd->sensor_physical_height;
nd                397 drivers/hid/hid-ntrig.c 	struct ntrig_data *nd = hid_get_drvdata(hdev);
nd                399 drivers/hid/hid-ntrig.c 	return sprintf(buf, "%d\n", -nd->deactivate_slack);
nd                407 drivers/hid/hid-ntrig.c 	struct ntrig_data *nd = hid_get_drvdata(hdev);
nd                422 drivers/hid/hid-ntrig.c 	nd->deactivate_slack = -val;
nd                458 drivers/hid/hid-ntrig.c 	struct ntrig_data *nd = hid_get_drvdata(hdev);
nd                474 drivers/hid/hid-ntrig.c 			if (!nd->sensor_logical_width) {
nd                475 drivers/hid/hid-ntrig.c 				nd->sensor_logical_width =
nd                478 drivers/hid/hid-ntrig.c 				nd->sensor_physical_width =
nd                481 drivers/hid/hid-ntrig.c 				nd->activation_width = activation_width *
nd                482 drivers/hid/hid-ntrig.c 					nd->sensor_logical_width /
nd                483 drivers/hid/hid-ntrig.c 					nd->sensor_physical_width;
nd                484 drivers/hid/hid-ntrig.c 				nd->min_width = min_width *
nd                485 drivers/hid/hid-ntrig.c 					nd->sensor_logical_width /
nd                486 drivers/hid/hid-ntrig.c 					nd->sensor_physical_width;
nd                496 drivers/hid/hid-ntrig.c 			if (!nd->sensor_logical_height) {
nd                497 drivers/hid/hid-ntrig.c 				nd->sensor_logical_height =
nd                500 drivers/hid/hid-ntrig.c 				nd->sensor_physical_height =
nd                503 drivers/hid/hid-ntrig.c 				nd->activation_height = activation_height *
nd                504 drivers/hid/hid-ntrig.c 					nd->sensor_logical_height /
nd                505 drivers/hid/hid-ntrig.c 					nd->sensor_physical_height;
nd                506 drivers/hid/hid-ntrig.c 				nd->min_height = min_height *
nd                507 drivers/hid/hid-ntrig.c 					nd->sensor_logical_height /
nd                508 drivers/hid/hid-ntrig.c 					nd->sensor_physical_height;
nd                569 drivers/hid/hid-ntrig.c 	struct ntrig_data *nd = hid_get_drvdata(hid);
nd                590 drivers/hid/hid-ntrig.c 		nd->reading_mt = true;
nd                591 drivers/hid/hid-ntrig.c 		nd->first_contact_touch = false;
nd                594 drivers/hid/hid-ntrig.c 		nd->tipswitch = value;
nd                598 drivers/hid/hid-ntrig.c 		nd->confidence = value;
nd                601 drivers/hid/hid-ntrig.c 		nd->x = value;
nd                603 drivers/hid/hid-ntrig.c 		nd->mt_foot_count = 0;
nd                606 drivers/hid/hid-ntrig.c 		nd->y = value;
nd                609 drivers/hid/hid-ntrig.c 		nd->id = value;
nd                612 drivers/hid/hid-ntrig.c 		nd->w = value;
nd                615 drivers/hid/hid-ntrig.c 		nd->h = value;
nd                621 drivers/hid/hid-ntrig.c 		if (!nd->reading_mt) {
nd                627 drivers/hid/hid-ntrig.c 					 nd->tipswitch);
nd                629 drivers/hid/hid-ntrig.c 					 nd->tipswitch);
nd                630 drivers/hid/hid-ntrig.c 			input_event(input, EV_ABS, ABS_X, nd->x);
nd                631 drivers/hid/hid-ntrig.c 			input_event(input, EV_ABS, ABS_Y, nd->y);
nd                643 drivers/hid/hid-ntrig.c 		if (nd->mt_foot_count >= 4)
nd                646 drivers/hid/hid-ntrig.c 		nd->mt_footer[nd->mt_foot_count++] = value;
nd                649 drivers/hid/hid-ntrig.c 		if (nd->mt_foot_count != 4)
nd                653 drivers/hid/hid-ntrig.c 		if (nd->mt_footer[2]) {
nd                661 drivers/hid/hid-ntrig.c 			nd->act_state = deactivate_slack - 1;
nd                662 drivers/hid/hid-ntrig.c 			nd->confidence = false;
nd                670 drivers/hid/hid-ntrig.c 		if (nd->mt_footer[0]) {
nd                676 drivers/hid/hid-ntrig.c 			if (nd->w < nd->min_width ||
nd                677 drivers/hid/hid-ntrig.c 			    nd->h < nd->min_height)
nd                678 drivers/hid/hid-ntrig.c 				nd->confidence = false;
nd                682 drivers/hid/hid-ntrig.c 		if (nd->act_state > 0) {
nd                686 drivers/hid/hid-ntrig.c 			if (nd->w >= nd->activation_width &&
nd                687 drivers/hid/hid-ntrig.c 			    nd->h >= nd->activation_height) {
nd                688 drivers/hid/hid-ntrig.c 				if (nd->id)
nd                692 drivers/hid/hid-ntrig.c 					nd->act_state = 0;
nd                699 drivers/hid/hid-ntrig.c 					nd->act_state = 1;
nd                711 drivers/hid/hid-ntrig.c 		if (!nd->confidence)
nd                715 drivers/hid/hid-ntrig.c 		if (nd->id == 0) {
nd                722 drivers/hid/hid-ntrig.c 			nd->first_contact_touch = nd->confidence;
nd                723 drivers/hid/hid-ntrig.c 			input_event(input, EV_ABS, ABS_X, nd->x);
nd                724 drivers/hid/hid-ntrig.c 			input_event(input, EV_ABS, ABS_Y, nd->y);
nd                728 drivers/hid/hid-ntrig.c 		input_event(input, EV_ABS, ABS_MT_POSITION_X, nd->x);
nd                729 drivers/hid/hid-ntrig.c 		input_event(input, EV_ABS, ABS_MT_POSITION_Y, nd->y);
nd                735 drivers/hid/hid-ntrig.c 		if (nd->w > nd->h) {
nd                739 drivers/hid/hid-ntrig.c 					ABS_MT_TOUCH_MAJOR, nd->w);
nd                741 drivers/hid/hid-ntrig.c 					ABS_MT_TOUCH_MINOR, nd->h);
nd                746 drivers/hid/hid-ntrig.c 					ABS_MT_TOUCH_MAJOR, nd->h);
nd                748 drivers/hid/hid-ntrig.c 					ABS_MT_TOUCH_MINOR, nd->w);
nd                754 drivers/hid/hid-ntrig.c 		if (!nd->reading_mt) /* Just to be sure */
nd                757 drivers/hid/hid-ntrig.c 		nd->reading_mt = false;
nd                781 drivers/hid/hid-ntrig.c 		if (nd->act_state > 0) { /* Currently inactive */
nd                787 drivers/hid/hid-ntrig.c 				nd->act_state = (nd->act_state > value)
nd                788 drivers/hid/hid-ntrig.c 						? nd->act_state - value
nd                795 drivers/hid/hid-ntrig.c 				nd->act_state = nd->activate_slack;
nd                804 drivers/hid/hid-ntrig.c 			if (value && nd->act_state >=
nd                805 drivers/hid/hid-ntrig.c 				     nd->deactivate_slack)
nd                810 drivers/hid/hid-ntrig.c 				nd->act_state = 0;
nd                811 drivers/hid/hid-ntrig.c 			else if (nd->act_state <= nd->deactivate_slack)
nd                816 drivers/hid/hid-ntrig.c 				nd->act_state =
nd                817 drivers/hid/hid-ntrig.c 					nd->activate_slack;
nd                819 drivers/hid/hid-ntrig.c 				nd->act_state--;
nd                824 drivers/hid/hid-ntrig.c 		if (nd->first_contact_touch && nd->act_state <= 0) {
nd                896 drivers/hid/hid-ntrig.c 	struct ntrig_data *nd;
nd                903 drivers/hid/hid-ntrig.c 	nd = kmalloc(sizeof(struct ntrig_data), GFP_KERNEL);
nd                904 drivers/hid/hid-ntrig.c 	if (!nd) {
nd                909 drivers/hid/hid-ntrig.c 	nd->reading_mt = false;
nd                910 drivers/hid/hid-ntrig.c 	nd->min_width = 0;
nd                911 drivers/hid/hid-ntrig.c 	nd->min_height = 0;
nd                912 drivers/hid/hid-ntrig.c 	nd->activate_slack = activate_slack;
nd                913 drivers/hid/hid-ntrig.c 	nd->act_state = activate_slack;
nd                914 drivers/hid/hid-ntrig.c 	nd->deactivate_slack = -deactivate_slack;
nd                915 drivers/hid/hid-ntrig.c 	nd->sensor_logical_width = 1;
nd                916 drivers/hid/hid-ntrig.c 	nd->sensor_logical_height = 1;
nd                917 drivers/hid/hid-ntrig.c 	nd->sensor_physical_width = 1;
nd                918 drivers/hid/hid-ntrig.c 	nd->sensor_physical_height = 1;
nd                920 drivers/hid/hid-ntrig.c 	hid_set_drvdata(hdev, nd);
nd                959 drivers/hid/hid-ntrig.c 	kfree(nd);
nd                361 drivers/hwtracing/coresight/coresight.c 					struct coresight_node *nd)
nd                366 drivers/hwtracing/coresight/coresight.c 	if (!nd)
nd                367 drivers/hwtracing/coresight/coresight.c 		nd = list_first_entry(path, struct coresight_node, link);
nd                369 drivers/hwtracing/coresight/coresight.c 	list_for_each_entry_continue(nd, path, link) {
nd                370 drivers/hwtracing/coresight/coresight.c 		csdev = nd->csdev;
nd                397 drivers/hwtracing/coresight/coresight.c 			parent = list_prev_entry(nd, link)->csdev;
nd                398 drivers/hwtracing/coresight/coresight.c 			child = list_next_entry(nd, link)->csdev;
nd                417 drivers/hwtracing/coresight/coresight.c 	struct coresight_node *nd;
nd                420 drivers/hwtracing/coresight/coresight.c 	list_for_each_entry_reverse(nd, path, link) {
nd                421 drivers/hwtracing/coresight/coresight.c 		csdev = nd->csdev;
nd                451 drivers/hwtracing/coresight/coresight.c 			parent = list_prev_entry(nd, link)->csdev;
nd                452 drivers/hwtracing/coresight/coresight.c 			child = list_next_entry(nd, link)->csdev;
nd                465 drivers/hwtracing/coresight/coresight.c 	coresight_disable_path_from(path, nd);
nd                699 drivers/hwtracing/coresight/coresight.c 	struct coresight_node *nd, *next;
nd                701 drivers/hwtracing/coresight/coresight.c 	list_for_each_entry_safe(nd, next, path, link) {
nd                702 drivers/hwtracing/coresight/coresight.c 		csdev = nd->csdev;
nd                705 drivers/hwtracing/coresight/coresight.c 		list_del(&nd->link);
nd                706 drivers/hwtracing/coresight/coresight.c 		kfree(nd);
nd                586 drivers/infiniband/hw/hfi1/mad.c 	struct opa_node_description *nd;
nd                588 drivers/infiniband/hw/hfi1/mad.c 	if (am || smp_length_check(sizeof(*nd), max_len)) {
nd                593 drivers/infiniband/hw/hfi1/mad.c 	nd = (struct opa_node_description *)data;
nd                595 drivers/infiniband/hw/hfi1/mad.c 	memcpy(nd->data, ibdev->node_desc, sizeof(nd->data));
nd                598 drivers/infiniband/hw/hfi1/mad.c 		*resp_len += sizeof(*nd);
nd                387 drivers/infiniband/hw/mlx4/cm.c 	struct rb_node *nd;
nd                419 drivers/infiniband/hw/mlx4/cm.c 		nd = rb_first(sl_id_map);
nd                420 drivers/infiniband/hw/mlx4/cm.c 		while (nd) {
nd                422 drivers/infiniband/hw/mlx4/cm.c 				rb_entry(nd, struct id_map_entry, node);
nd                423 drivers/infiniband/hw/mlx4/cm.c 			nd = rb_next(nd);
nd                856 drivers/mmc/host/mmci.c 	struct mmci_dmae_next *nd = &dmae->next_data;
nd                862 drivers/mmc/host/mmci.c 		return _mmci_dmae_prep_data(host, data, &nd->chan, &nd->desc);
nd               1412 drivers/net/ethernet/amd/sunlance.c 			struct device_node *nd;
nd               1417 drivers/net/ethernet/amd/sunlance.c 			nd = of_find_node_by_path("/options");
nd               1418 drivers/net/ethernet/amd/sunlance.c 			if (!nd)
nd               1421 drivers/net/ethernet/amd/sunlance.c 			prop = of_get_property(nd, "tpe-link-test?", NULL);
nd               1433 drivers/net/ethernet/amd/sunlance.c 			of_node_put(nd);
nd               1416 drivers/net/ethernet/chelsio/cxgb/sge.c 		struct net_device *nd = adap->port[i].dev;
nd               1418 drivers/net/ethernet/chelsio/cxgb/sge.c 		if (test_and_clear_bit(nd->if_port, &sge->stopped_tx_queues) &&
nd               1419 drivers/net/ethernet/chelsio/cxgb/sge.c 		    netif_running(nd)) {
nd               1421 drivers/net/ethernet/chelsio/cxgb/sge.c 			netif_wake_queue(nd);
nd               1711 drivers/net/ethernet/faraday/ftgmac100.c static void ftgmac100_ncsi_handler(struct ncsi_dev *nd)
nd               1713 drivers/net/ethernet/faraday/ftgmac100.c 	if (unlikely(nd->state != ncsi_dev_state_functional))
nd               1716 drivers/net/ethernet/faraday/ftgmac100.c 	netdev_dbg(nd->dev, "NCSI interface %s\n",
nd               1717 drivers/net/ethernet/faraday/ftgmac100.c 		   nd->link_up ? "up" : "down");
nd                124 drivers/net/ethernet/intel/i40e/i40e_debugfs.c 		struct net_device *nd = vsi->netdev;
nd                127 drivers/net/ethernet/intel/i40e/i40e_debugfs.c 			 nd->name, nd->state, nd->flags);
nd                129 drivers/net/ethernet/intel/i40e/i40e_debugfs.c 			 (unsigned long int)nd->features);
nd                131 drivers/net/ethernet/intel/i40e/i40e_debugfs.c 			 (unsigned long int)nd->hw_features);
nd                133 drivers/net/ethernet/intel/i40e/i40e_debugfs.c 			 (unsigned long int)nd->vlan_features);
nd                855 drivers/net/vxlan.c 	struct vxlan_rdst *rd, *nd;
nd                857 drivers/net/vxlan.c 	list_for_each_entry_safe(rd, nd, &f->remotes, list) {
nd               3545 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	memcpy(cfg->wowl.nd->ssid.ssid, netinfo->SSID, netinfo->SSID_len);
nd               3546 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	cfg->wowl.nd->ssid.ssid_len = netinfo->SSID_len;
nd               3547 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	cfg->wowl.nd->n_channels = 1;
nd               3548 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	cfg->wowl.nd->channels[0] =
nd               3553 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	cfg->wowl.nd_info->matches[0] = cfg->wowl.nd;
nd               5845 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	kfree(cfg->wowl.nd);
nd               5846 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	cfg->wowl.nd = NULL;
nd               5861 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	cfg->wowl.nd = kzalloc(sizeof(*cfg->wowl.nd) + sizeof(u32), GFP_KERNEL);
nd               5862 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c 	if (!cfg->wowl.nd)
nd                251 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h 	struct cfg80211_wowlan_nd_match *nd;
nd                371 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h static inline struct brcmf_cfg80211_profile *ndev_to_prof(struct net_device *nd)
nd                373 drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h 	struct brcmf_if *ifp = netdev_priv(nd);
nd                123 drivers/nfc/fdp/fdp.c 	u32 nd, num, delta;
nd                126 drivers/nfc/fdp/fdp.c 	nd = (24 * fc) / clock_freq;
nd                127 drivers/nfc/fdp/fdp.c 	delta = 24 * fc - nd * clock_freq;
nd                138 drivers/nfc/fdp/fdp.c 	data[7] = nd;
nd                149 drivers/nubus/nubus.c unsigned char *nubus_dirptr(const struct nubus_dirent *nd)
nd                151 drivers/nubus/nubus.c 	unsigned char *p = nd->base;
nd                155 drivers/nubus/nubus.c 	nubus_move(&p, nubus_expand32(nd->data), nd->mask);
nd                268 drivers/nubus/nubus.c int nubus_readdir(struct nubus_dir *nd, struct nubus_dirent *ent)
nd                272 drivers/nubus/nubus.c 	if (nd->done)
nd                276 drivers/nubus/nubus.c 	ent->base = nd->ptr;
nd                279 drivers/nubus/nubus.c 	resid = nubus_get_rom(&nd->ptr, 4, nd->mask);
nd                284 drivers/nubus/nubus.c 		nd->done = 1;
nd                292 drivers/nubus/nubus.c 	ent->mask = nd->mask;
nd                162 drivers/of/overlay.c 	struct of_overlay_notify_data nd;
nd                168 drivers/of/overlay.c 		nd.target = fragment->target;
nd                169 drivers/of/overlay.c 		nd.overlay = fragment->overlay;
nd                172 drivers/of/overlay.c 						   action, &nd);
nd                178 drivers/of/overlay.c 			       of_overlay_action_name[action], ret, nd.target);
nd                570 drivers/phy/rockchip/phy-rockchip-inno-hdmi.c 	u8 nd, no_a, no_b, no_d;
nd                574 drivers/phy/rockchip/phy-rockchip-inno-hdmi.c 	nd = inno_read(inno, 0xe2) & RK3228_PRE_PLL_PRE_DIV_MASK;
nd                580 drivers/phy/rockchip/phy-rockchip-inno-hdmi.c 		do_div(vco, nd * 5);
nd                590 drivers/phy/rockchip/phy-rockchip-inno-hdmi.c 		do_div(vco, (nd * (no_a == 1 ? no_b : no_a) * no_d * 2));
nd                717 drivers/phy/rockchip/phy-rockchip-inno-hdmi.c 	u8 nd, no_a, no_b, no_c, no_d;
nd                721 drivers/phy/rockchip/phy-rockchip-inno-hdmi.c 	nd = inno_read(inno, 0xa1) & RK3328_PRE_PLL_PRE_DIV_MASK;
nd                734 drivers/phy/rockchip/phy-rockchip-inno-hdmi.c 		do_div(vco, nd * 5);
nd                745 drivers/phy/rockchip/phy-rockchip-inno-hdmi.c 		do_div(vco, (nd * (no_a == 1 ? no_b : no_a) * no_d * 2));
nd                364 drivers/s390/cio/chsc.c static void format_node_data(char *params, char *id, struct node_descriptor *nd)
nd                369 drivers/s390/cio/chsc.c 	if (nd->validity != ND_VALIDITY_VALID) {
nd                376 drivers/s390/cio/chsc.c 	snprintf(params, PARAMS_LEN, "%02x,%06x", nd->byte0, nd->params);
nd                378 drivers/s390/cio/chsc.c 	id = store_ebcdic(id, nd->type, sizeof(nd->type), '/');
nd                379 drivers/s390/cio/chsc.c 	id = store_ebcdic(id, nd->model, sizeof(nd->model), ',');
nd                380 drivers/s390/cio/chsc.c 	id = store_ebcdic(id, nd->manufacturer, sizeof(nd->manufacturer), '.');
nd                381 drivers/s390/cio/chsc.c 	id = store_ebcdic(id, nd->plant, sizeof(nd->plant), 0);
nd                382 drivers/s390/cio/chsc.c 	id = store_ebcdic(id, nd->seq, sizeof(nd->seq), ',');
nd                383 drivers/s390/cio/chsc.c 	sprintf(id, "%04X", nd->tag);
nd                300 drivers/s390/crypto/ap_bus.c 	int nd;
nd                312 drivers/s390/crypto/ap_bus.c 		nd = (info >> 16) & 0xff;
nd                314 drivers/s390/crypto/ap_bus.c 		if ((info & (1UL << 57)) && nd > 0)
nd                315 drivers/s390/crypto/ap_bus.c 			ap_max_domain_id = nd;
nd               1826 drivers/s390/net/qeth_core_main.c 	struct qeth_node_desc *nd = (struct qeth_node_desc *) iob->data;
nd               1832 drivers/s390/net/qeth_core_main.c 	if (data_length < sizeof(*nd)) {
nd               1837 drivers/s390/net/qeth_core_main.c 	card->info.is_vm_nic = nd->nd1.plant[0] == _ascebc['V'] &&
nd               1838 drivers/s390/net/qeth_core_main.c 			       nd->nd1.plant[1] == _ascebc['M'];
nd               1839 drivers/s390/net/qeth_core_main.c 	tag = (u8 *)&nd->nd1.tag;
nd               1843 drivers/s390/net/qeth_core_main.c 	tag = (u8 *)&nd->nd2.tag;
nd               1846 drivers/s390/net/qeth_core_main.c 	card->info.use_v1_blkt = nd->nd3.model[0] == 0xF0 &&
nd               1847 drivers/s390/net/qeth_core_main.c 				 nd->nd3.model[1] == 0xF0 &&
nd               1848 drivers/s390/net/qeth_core_main.c 				 nd->nd3.model[2] >= 0xF1 &&
nd               1849 drivers/s390/net/qeth_core_main.c 				 nd->nd3.model[2] <= 0xF4;
nd                498 drivers/sbus/char/openprom.c 	phandle nd;
nd                502 drivers/sbus/char/openprom.c 	if (copy_from_user(&nd, argp, sizeof(phandle)))
nd                505 drivers/sbus/char/openprom.c 	if (nd == 0) {
nd                510 drivers/sbus/char/openprom.c 		dp = of_find_node_by_phandle(nd);
nd                511 drivers/sbus/char/openprom.c 		nd = 0;
nd                520 drivers/sbus/char/openprom.c 		nd = dp->phandle;
nd                521 drivers/sbus/char/openprom.c 	if (copy_to_user(argp, &nd, sizeof(phandle)))
nd                118 drivers/scsi/aha1740.h 	 nd:1,			/* No Disconnect */
nd                164 drivers/staging/most/net/net.c 	struct net_dev_context *nd = netdev_priv(dev);
nd                170 drivers/staging/most/net/net.c 	nd->is_mamac =
nd                178 drivers/staging/most/net/net.c 	dev->mtu = nd->is_mamac ? MAMAC_DATA_LEN : ETH_DATA_LEN;
nd                188 drivers/staging/most/net/net.c 	struct net_dev_context *nd = netdev_priv(dev);
nd                193 drivers/staging/most/net/net.c 	if (most_start_channel(nd->iface, nd->rx.ch_id, &comp)) {
nd                199 drivers/staging/most/net/net.c 	if (most_start_channel(nd->iface, nd->tx.ch_id, &comp)) {
nd                201 drivers/staging/most/net/net.c 		most_stop_channel(nd->iface, nd->rx.ch_id, &comp);
nd                212 drivers/staging/most/net/net.c 	if (nd->iface->request_netinfo)
nd                213 drivers/staging/most/net/net.c 		nd->iface->request_netinfo(nd->iface, nd->tx.ch_id, on_netinfo);
nd                222 drivers/staging/most/net/net.c 	struct net_dev_context *nd = netdev_priv(dev);
nd                225 drivers/staging/most/net/net.c 	if (nd->iface->request_netinfo)
nd                226 drivers/staging/most/net/net.c 		nd->iface->request_netinfo(nd->iface, nd->tx.ch_id, NULL);
nd                227 drivers/staging/most/net/net.c 	most_stop_channel(nd->iface, nd->rx.ch_id, &comp);
nd                228 drivers/staging/most/net/net.c 	most_stop_channel(nd->iface, nd->tx.ch_id, &comp);
nd                236 drivers/staging/most/net/net.c 	struct net_dev_context *nd = netdev_priv(dev);
nd                240 drivers/staging/most/net/net.c 	mbo = most_get_mbo(nd->iface, nd->tx.ch_id, &comp);
nd                248 drivers/staging/most/net/net.c 	if (nd->is_mamac)
nd                282 drivers/staging/most/net/net.c 	struct net_dev_context *nd;
nd                284 drivers/staging/most/net/net.c 	list_for_each_entry(nd, &net_devices, list)
nd                285 drivers/staging/most/net/net.c 		if (nd->iface == iface)
nd                286 drivers/staging/most/net/net.c 			return nd;
nd                292 drivers/staging/most/net/net.c 	struct net_dev_context *nd;
nd                296 drivers/staging/most/net/net.c 	nd = get_net_dev(iface);
nd                297 drivers/staging/most/net/net.c 	if (nd && nd->rx.linked && nd->tx.linked)
nd                298 drivers/staging/most/net/net.c 		dev_hold(nd->dev);
nd                300 drivers/staging/most/net/net.c 		nd = NULL;
nd                302 drivers/staging/most/net/net.c 	return nd;
nd                309 drivers/staging/most/net/net.c 	struct net_dev_context *nd;
nd                322 drivers/staging/most/net/net.c 	nd = get_net_dev(iface);
nd                323 drivers/staging/most/net/net.c 	if (!nd) {
nd                331 drivers/staging/most/net/net.c 		nd = netdev_priv(dev);
nd                332 drivers/staging/most/net/net.c 		nd->iface = iface;
nd                333 drivers/staging/most/net/net.c 		nd->dev = dev;
nd                336 drivers/staging/most/net/net.c 		list_add(&nd->list, &net_devices);
nd                339 drivers/staging/most/net/net.c 		ch = ccfg->direction == MOST_CH_TX ? &nd->tx : &nd->rx;
nd                341 drivers/staging/most/net/net.c 		ch = ccfg->direction == MOST_CH_TX ? &nd->tx : &nd->rx;
nd                348 drivers/staging/most/net/net.c 		if (register_netdev(nd->dev)) {
nd                365 drivers/staging/most/net/net.c 	struct net_dev_context *nd;
nd                371 drivers/staging/most/net/net.c 	nd = get_net_dev(iface);
nd                372 drivers/staging/most/net/net.c 	if (!nd) {
nd                377 drivers/staging/most/net/net.c 	if (nd->rx.linked && channel_idx == nd->rx.ch_id) {
nd                378 drivers/staging/most/net/net.c 		ch = &nd->rx;
nd                379 drivers/staging/most/net/net.c 	} else if (nd->tx.linked && channel_idx == nd->tx.ch_id) {
nd                380 drivers/staging/most/net/net.c 		ch = &nd->tx;
nd                386 drivers/staging/most/net/net.c 	if (nd->rx.linked && nd->tx.linked) {
nd                395 drivers/staging/most/net/net.c 		unregister_netdev(nd->dev);
nd                398 drivers/staging/most/net/net.c 		list_del(&nd->list);
nd                401 drivers/staging/most/net/net.c 		free_netdev(nd->dev);
nd                412 drivers/staging/most/net/net.c 	struct net_dev_context *nd;
nd                414 drivers/staging/most/net/net.c 	nd = get_net_dev_hold(iface);
nd                415 drivers/staging/most/net/net.c 	if (!nd)
nd                418 drivers/staging/most/net/net.c 	if (nd->tx.ch_id != channel_idx)
nd                421 drivers/staging/most/net/net.c 	netif_wake_queue(nd->dev);
nd                424 drivers/staging/most/net/net.c 	dev_put(nd->dev);
nd                431 drivers/staging/most/net/net.c 	struct net_dev_context *nd;
nd                439 drivers/staging/most/net/net.c 	nd = get_net_dev_hold(mbo->ifp);
nd                440 drivers/staging/most/net/net.c 	if (!nd)
nd                443 drivers/staging/most/net/net.c 	if (nd->rx.ch_id != mbo->hdm_channel_id) {
nd                448 drivers/staging/most/net/net.c 	dev = nd->dev;
nd                450 drivers/staging/most/net/net.c 	if (nd->is_mamac) {
nd                474 drivers/staging/most/net/net.c 	if (nd->is_mamac) {
nd                506 drivers/staging/most/net/net.c 	dev_put(nd->dev);
nd                549 drivers/staging/most/net/net.c 	struct net_dev_context *nd;
nd                553 drivers/staging/most/net/net.c 	nd = get_net_dev_hold(iface);
nd                554 drivers/staging/most/net/net.c 	if (!nd)
nd                557 drivers/staging/most/net/net.c 	dev = nd->dev;
nd                576 drivers/staging/most/net/net.c 	dev_put(nd->dev);
nd                 66 drivers/staging/netlogic/xlr_net.c 	stnid = priv->nd->rfr_station;
nd                284 drivers/staging/netlogic/xlr_net.c 	if (priv->nd->phy_interface == PHY_INTERFACE_MODE_RGMII ||
nd                285 drivers/staging/netlogic/xlr_net.c 	    priv->nd->phy_interface == PHY_INTERFACE_MODE_SGMII)
nd                453 drivers/staging/netlogic/xlr_net.c 	for (i = 0; i < hweight32(priv->nd->cpu_mask); i++)
nd                479 drivers/staging/netlogic/xlr_net.c 	struct xlr_fmn_info *gmac = priv->nd->gmac_fmn_info;
nd                482 drivers/staging/netlogic/xlr_net.c 	int *bucket_size = priv->nd->bucket_size;
nd                527 drivers/staging/netlogic/xlr_net.c 	cpu_mask = priv->nd->cpu_mask;
nd                794 drivers/staging/netlogic/xlr_net.c 			     xlr_gmac_link_adjust, priv->nd->phy_interface);
nd                860 drivers/staging/netlogic/xlr_net.c 	    priv->nd->phy_interface == PHY_INTERFACE_MODE_RGMII)
nd                978 drivers/staging/netlogic/xlr_net.c 		priv->nd = (struct xlr_net_data *)pdev->dev.platform_data;
nd                998 drivers/staging/netlogic/xlr_net.c 		priv->phy_addr = priv->nd->phy_addr[port];
nd                999 drivers/staging/netlogic/xlr_net.c 		priv->tx_stnid = priv->nd->tx_stnid[port];
nd               1000 drivers/staging/netlogic/xlr_net.c 		priv->mii_addr = priv->nd->mii_addr;
nd               1001 drivers/staging/netlogic/xlr_net.c 		priv->serdes_addr = priv->nd->serdes_addr;
nd               1002 drivers/staging/netlogic/xlr_net.c 		priv->pcs_addr = priv->nd->pcs_addr;
nd               1003 drivers/staging/netlogic/xlr_net.c 		priv->gpio_addr = priv->nd->gpio_addr;
nd               1069 drivers/staging/netlogic/xlr_net.h 	struct xlr_net_data *nd;
nd                491 fs/dlm/config.c 	struct dlm_node *nd;
nd                493 fs/dlm/config.c 	nd = kzalloc(sizeof(struct dlm_node), GFP_NOFS);
nd                494 fs/dlm/config.c 	if (!nd)
nd                497 fs/dlm/config.c 	config_item_init_type_name(&nd->item, name, &node_type);
nd                498 fs/dlm/config.c 	nd->nodeid = -1;
nd                499 fs/dlm/config.c 	nd->weight = 1;  /* default weight of 1 if none is set */
nd                500 fs/dlm/config.c 	nd->new = 1;     /* set to 0 once it's been read by dlm_nodeid_list() */
nd                503 fs/dlm/config.c 	list_add(&nd->list, &sp->members);
nd                507 fs/dlm/config.c 	return &nd->item;
nd                513 fs/dlm/config.c 	struct dlm_node *nd = config_item_to_node(i);
nd                516 fs/dlm/config.c 	list_del(&nd->list);
nd                525 fs/dlm/config.c 	struct dlm_node *nd = config_item_to_node(i);
nd                526 fs/dlm/config.c 	kfree(nd);
nd                684 fs/dlm/config.c 	struct dlm_node *nd = config_item_to_node(item);
nd                686 fs/dlm/config.c 	int rc = kstrtoint(buf, 0, &nd->nodeid);
nd                690 fs/dlm/config.c 	dlm_comm_seq(nd->nodeid, &seq);
nd                691 fs/dlm/config.c 	nd->comm_seq = seq;
nd                779 fs/dlm/config.c 	struct dlm_node *nd;
nd                803 fs/dlm/config.c 	list_for_each_entry(nd, &sp->members, list) {
nd                804 fs/dlm/config.c 		node->nodeid = nd->nodeid;
nd                805 fs/dlm/config.c 		node->weight = nd->weight;
nd                806 fs/dlm/config.c 		node->new = nd->new;
nd                807 fs/dlm/config.c 		node->comm_seq = nd->comm_seq;
nd                810 fs/dlm/config.c 		nd->new = 0;
nd                242 fs/hpfs/dnode.c 	struct dnode *d, *ad, *rd, *nd = NULL;
nd                259 fs/hpfs/dnode.c 		kfree(nd);
nd                264 fs/hpfs/dnode.c 		kfree(nd);
nd                272 fs/hpfs/dnode.c 			kfree(nd);
nd                285 fs/hpfs/dnode.c 		kfree(nd);
nd                289 fs/hpfs/dnode.c 	if (!nd) if (!(nd = kmalloc(0x924, GFP_NOFS))) {
nd                300 fs/hpfs/dnode.c 	memcpy(nd, d, le32_to_cpu(d->first_free));
nd                301 fs/hpfs/dnode.c 	copy_de(de = hpfs_add_de(i->i_sb, nd, name, namelen, down_ptr), new_de);
nd                302 fs/hpfs/dnode.c 	for_all_poss(i, hpfs_pos_ins, get_pos(nd, de), 1);
nd                303 fs/hpfs/dnode.c 	h = ((char *)dnode_last_de(nd) - (char *)nd) / 2 + 10;
nd                307 fs/hpfs/dnode.c 		kfree(nd);
nd                314 fs/hpfs/dnode.c 	for (de = dnode_first_de(nd); (char *)de_next_de(de) - (char *)nd < h; de = de_next_de(de)) {
nd                327 fs/hpfs/dnode.c 	memmove((char *)nd + 20, de, le32_to_cpu(nd->first_free) + (char *)nd - (char *)de);
nd                328 fs/hpfs/dnode.c 	le32_add_cpu(&nd->first_free, -((char *)de - (char *)nd - 20));
nd                329 fs/hpfs/dnode.c 	memcpy(d, nd, le32_to_cpu(nd->first_free));
nd                345 fs/hpfs/dnode.c 		kfree(nd);
nd                358 fs/hpfs/dnode.c 		kfree(nd);
nd               4065 fs/jfs/jfs_dtree.c 	int xssi, ns, nd;
nd               4086 fs/jfs/jfs_dtree.c 	ns = nd = 0;
nd               4155 fs/jfs/jfs_dtree.c 		nd++;
nd               4193 fs/jfs/jfs_dtree.c 			nd++;
nd               4225 fs/jfs/jfs_dtree.c 	dlv->length = nd;
nd               4231 fs/jfs/jfs_dtree.c 	sp->header.freecnt += nd;
nd               4237 fs/jfs/jfs_dtree.c 	dp->header.freecnt -= nd;
nd                533 fs/namei.c     static int __nd_alloc_stack(struct nameidata *nd)
nd                537 fs/namei.c     	if (nd->flags & LOOKUP_RCU) {
nd                548 fs/namei.c     	memcpy(p, nd->internal, sizeof(nd->internal));
nd                549 fs/namei.c     	nd->stack = p;
nd                572 fs/namei.c     static inline int nd_alloc_stack(struct nameidata *nd)
nd                574 fs/namei.c     	if (likely(nd->depth != EMBEDDED_LEVELS))
nd                576 fs/namei.c     	if (likely(nd->stack != nd->internal))
nd                578 fs/namei.c     	return __nd_alloc_stack(nd);
nd                581 fs/namei.c     static void drop_links(struct nameidata *nd)
nd                583 fs/namei.c     	int i = nd->depth;
nd                585 fs/namei.c     		struct saved *last = nd->stack + i;
nd                591 fs/namei.c     static void terminate_walk(struct nameidata *nd)
nd                593 fs/namei.c     	drop_links(nd);
nd                594 fs/namei.c     	if (!(nd->flags & LOOKUP_RCU)) {
nd                596 fs/namei.c     		path_put(&nd->path);
nd                597 fs/namei.c     		for (i = 0; i < nd->depth; i++)
nd                598 fs/namei.c     			path_put(&nd->stack[i].link);
nd                599 fs/namei.c     		if (nd->flags & LOOKUP_ROOT_GRABBED) {
nd                600 fs/namei.c     			path_put(&nd->root);
nd                601 fs/namei.c     			nd->flags &= ~LOOKUP_ROOT_GRABBED;
nd                604 fs/namei.c     		nd->flags &= ~LOOKUP_RCU;
nd                607 fs/namei.c     	nd->depth = 0;
nd                611 fs/namei.c     static bool legitimize_path(struct nameidata *nd,
nd                614 fs/namei.c     	int res = __legitimize_mnt(path->mnt, nd->m_seq);
nd                628 fs/namei.c     static bool legitimize_links(struct nameidata *nd)
nd                631 fs/namei.c     	for (i = 0; i < nd->depth; i++) {
nd                632 fs/namei.c     		struct saved *last = nd->stack + i;
nd                633 fs/namei.c     		if (unlikely(!legitimize_path(nd, &last->link, last->seq))) {
nd                634 fs/namei.c     			drop_links(nd);
nd                635 fs/namei.c     			nd->depth = i + 1;
nd                642 fs/namei.c     static bool legitimize_root(struct nameidata *nd)
nd                644 fs/namei.c     	if (!nd->root.mnt || (nd->flags & LOOKUP_ROOT))
nd                646 fs/namei.c     	nd->flags |= LOOKUP_ROOT_GRABBED;
nd                647 fs/namei.c     	return legitimize_path(nd, &nd->root, nd->root_seq);
nd                672 fs/namei.c     static int unlazy_walk(struct nameidata *nd)
nd                674 fs/namei.c     	struct dentry *parent = nd->path.dentry;
nd                676 fs/namei.c     	BUG_ON(!(nd->flags & LOOKUP_RCU));
nd                678 fs/namei.c     	nd->flags &= ~LOOKUP_RCU;
nd                679 fs/namei.c     	if (unlikely(!legitimize_links(nd)))
nd                681 fs/namei.c     	if (unlikely(!legitimize_path(nd, &nd->path, nd->seq)))
nd                683 fs/namei.c     	if (unlikely(!legitimize_root(nd)))
nd                686 fs/namei.c     	BUG_ON(nd->inode != parent->d_inode);
nd                690 fs/namei.c     	nd->path.mnt = NULL;
nd                691 fs/namei.c     	nd->path.dentry = NULL;
nd                710 fs/namei.c     static int unlazy_child(struct nameidata *nd, struct dentry *dentry, unsigned seq)
nd                712 fs/namei.c     	BUG_ON(!(nd->flags & LOOKUP_RCU));
nd                714 fs/namei.c     	nd->flags &= ~LOOKUP_RCU;
nd                715 fs/namei.c     	if (unlikely(!legitimize_links(nd)))
nd                717 fs/namei.c     	if (unlikely(!legitimize_mnt(nd->path.mnt, nd->m_seq)))
nd                719 fs/namei.c     	if (unlikely(!lockref_get_not_dead(&nd->path.dentry->d_lockref)))
nd                737 fs/namei.c     	if (unlikely(!legitimize_root(nd)))
nd                743 fs/namei.c     	nd->path.mnt = NULL;
nd                745 fs/namei.c     	nd->path.dentry = NULL;
nd                773 fs/namei.c     static int complete_walk(struct nameidata *nd)
nd                775 fs/namei.c     	struct dentry *dentry = nd->path.dentry;
nd                778 fs/namei.c     	if (nd->flags & LOOKUP_RCU) {
nd                779 fs/namei.c     		if (!(nd->flags & LOOKUP_ROOT))
nd                780 fs/namei.c     			nd->root.mnt = NULL;
nd                781 fs/namei.c     		if (unlikely(unlazy_walk(nd)))
nd                785 fs/namei.c     	if (likely(!(nd->flags & LOOKUP_JUMPED)))
nd                791 fs/namei.c     	status = dentry->d_op->d_weak_revalidate(dentry, nd->flags);
nd                801 fs/namei.c     static void set_root(struct nameidata *nd)
nd                805 fs/namei.c     	if (nd->flags & LOOKUP_RCU) {
nd                810 fs/namei.c     			nd->root = fs->root;
nd                811 fs/namei.c     			nd->root_seq = __read_seqcount_begin(&nd->root.dentry->d_seq);
nd                814 fs/namei.c     		get_fs_root(fs, &nd->root);
nd                815 fs/namei.c     		nd->flags |= LOOKUP_ROOT_GRABBED;
nd                819 fs/namei.c     static void path_put_conditional(struct path *path, struct nameidata *nd)
nd                822 fs/namei.c     	if (path->mnt != nd->path.mnt)
nd                827 fs/namei.c     					struct nameidata *nd)
nd                829 fs/namei.c     	if (!(nd->flags & LOOKUP_RCU)) {
nd                830 fs/namei.c     		dput(nd->path.dentry);
nd                831 fs/namei.c     		if (nd->path.mnt != path->mnt)
nd                832 fs/namei.c     			mntput(nd->path.mnt);
nd                834 fs/namei.c     	nd->path.mnt = path->mnt;
nd                835 fs/namei.c     	nd->path.dentry = path->dentry;
nd                838 fs/namei.c     static int nd_jump_root(struct nameidata *nd)
nd                840 fs/namei.c     	if (nd->flags & LOOKUP_RCU) {
nd                842 fs/namei.c     		nd->path = nd->root;
nd                843 fs/namei.c     		d = nd->path.dentry;
nd                844 fs/namei.c     		nd->inode = d->d_inode;
nd                845 fs/namei.c     		nd->seq = nd->root_seq;
nd                846 fs/namei.c     		if (unlikely(read_seqcount_retry(&d->d_seq, nd->seq)))
nd                849 fs/namei.c     		path_put(&nd->path);
nd                850 fs/namei.c     		nd->path = nd->root;
nd                851 fs/namei.c     		path_get(&nd->path);
nd                852 fs/namei.c     		nd->inode = nd->path.dentry->d_inode;
nd                854 fs/namei.c     	nd->flags |= LOOKUP_JUMPED;
nd                864 fs/namei.c     	struct nameidata *nd = current->nameidata;
nd                865 fs/namei.c     	path_put(&nd->path);
nd                867 fs/namei.c     	nd->path = *path;
nd                868 fs/namei.c     	nd->inode = nd->path.dentry->d_inode;
nd                869 fs/namei.c     	nd->flags |= LOOKUP_JUMPED;
nd                872 fs/namei.c     static inline void put_link(struct nameidata *nd)
nd                874 fs/namei.c     	struct saved *last = nd->stack + --nd->depth;
nd                876 fs/namei.c     	if (!(nd->flags & LOOKUP_RCU))
nd                900 fs/namei.c     static inline int may_follow_link(struct nameidata *nd)
nd                910 fs/namei.c     	inode = nd->link_inode;
nd                915 fs/namei.c     	parent = nd->inode;
nd                924 fs/namei.c     	if (nd->flags & LOOKUP_RCU)
nd                927 fs/namei.c     	audit_inode(nd->name, nd->stack[0].link.dentry, 0);
nd               1041 fs/namei.c     const char *get_link(struct nameidata *nd)
nd               1043 fs/namei.c     	struct saved *last = nd->stack + nd->depth - 1;
nd               1045 fs/namei.c     	struct inode *inode = nd->link_inode;
nd               1049 fs/namei.c     	if (!(nd->flags & LOOKUP_RCU)) {
nd               1053 fs/namei.c     		if (unlikely(unlazy_walk(nd)))
nd               1059 fs/namei.c     					   nd->flags & LOOKUP_RCU);
nd               1063 fs/namei.c     	nd->last_type = LAST_BIND;
nd               1069 fs/namei.c     		if (nd->flags & LOOKUP_RCU) {
nd               1072 fs/namei.c     				if (unlikely(unlazy_walk(nd)))
nd               1083 fs/namei.c     		if (!nd->root.mnt)
nd               1084 fs/namei.c     			set_root(nd);
nd               1085 fs/namei.c     		if (unlikely(nd_jump_root(nd)))
nd               1133 fs/namei.c     static int follow_automount(struct path *path, struct nameidata *nd,
nd               1153 fs/namei.c     	if (!(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
nd               1158 fs/namei.c     	nd->total_link_count++;
nd               1159 fs/namei.c     	if (nd->total_link_count >= 40)
nd               1173 fs/namei.c     		if (PTR_ERR(mnt) == -EISDIR && (nd->flags & LOOKUP_PARENT))
nd               1213 fs/namei.c     static int follow_managed(struct path *path, struct nameidata *nd)
nd               1257 fs/namei.c     			ret = follow_automount(path, nd, &need_mntput);
nd               1272 fs/namei.c     		nd->flags |= LOOKUP_JUMPED;
nd               1274 fs/namei.c     		path_put_conditional(path, nd);
nd               1304 fs/namei.c     static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
nd               1331 fs/namei.c     		nd->flags |= LOOKUP_JUMPED;
nd               1340 fs/namei.c     	return !read_seqretry(&mount_lock, nd->m_seq) &&
nd               1344 fs/namei.c     static int follow_dotdot_rcu(struct nameidata *nd)
nd               1346 fs/namei.c     	struct inode *inode = nd->inode;
nd               1349 fs/namei.c     		if (path_equal(&nd->path, &nd->root))
nd               1351 fs/namei.c     		if (nd->path.dentry != nd->path.mnt->mnt_root) {
nd               1352 fs/namei.c     			struct dentry *old = nd->path.dentry;
nd               1358 fs/namei.c     			if (unlikely(read_seqcount_retry(&old->d_seq, nd->seq)))
nd               1360 fs/namei.c     			nd->path.dentry = parent;
nd               1361 fs/namei.c     			nd->seq = seq;
nd               1362 fs/namei.c     			if (unlikely(!path_connected(&nd->path)))
nd               1366 fs/namei.c     			struct mount *mnt = real_mount(nd->path.mnt);
nd               1371 fs/namei.c     			if (unlikely(read_seqretry(&mount_lock, nd->m_seq)))
nd               1373 fs/namei.c     			if (&mparent->mnt == nd->path.mnt)
nd               1376 fs/namei.c     			nd->path.dentry = mountpoint;
nd               1377 fs/namei.c     			nd->path.mnt = &mparent->mnt;
nd               1379 fs/namei.c     			nd->seq = seq;
nd               1382 fs/namei.c     	while (unlikely(d_mountpoint(nd->path.dentry))) {
nd               1384 fs/namei.c     		mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry);
nd               1385 fs/namei.c     		if (unlikely(read_seqretry(&mount_lock, nd->m_seq)))
nd               1389 fs/namei.c     		nd->path.mnt = &mounted->mnt;
nd               1390 fs/namei.c     		nd->path.dentry = mounted->mnt.mnt_root;
nd               1391 fs/namei.c     		inode = nd->path.dentry->d_inode;
nd               1392 fs/namei.c     		nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
nd               1394 fs/namei.c     	nd->inode = inode;
nd               1474 fs/namei.c     static int follow_dotdot(struct nameidata *nd)
nd               1477 fs/namei.c     		if (path_equal(&nd->path, &nd->root))
nd               1479 fs/namei.c     		if (nd->path.dentry != nd->path.mnt->mnt_root) {
nd               1480 fs/namei.c     			int ret = path_parent_directory(&nd->path);
nd               1485 fs/namei.c     		if (!follow_up(&nd->path))
nd               1488 fs/namei.c     	follow_mount(&nd->path);
nd               1489 fs/namei.c     	nd->inode = nd->path.dentry->d_inode;
nd               1547 fs/namei.c     static int lookup_fast(struct nameidata *nd,
nd               1551 fs/namei.c     	struct vfsmount *mnt = nd->path.mnt;
nd               1552 fs/namei.c     	struct dentry *dentry, *parent = nd->path.dentry;
nd               1561 fs/namei.c     	if (nd->flags & LOOKUP_RCU) {
nd               1564 fs/namei.c     		dentry = __d_lookup_rcu(parent, &nd->last, &seq);
nd               1566 fs/namei.c     			if (unlazy_walk(nd))
nd               1587 fs/namei.c     		if (unlikely(__read_seqcount_retry(&parent->d_seq, nd->seq)))
nd               1591 fs/namei.c     		status = d_revalidate(dentry, nd->flags);
nd               1601 fs/namei.c     			if (likely(__follow_mount_rcu(nd, path, inode, seqp)))
nd               1604 fs/namei.c     		if (unlazy_child(nd, dentry, seq))
nd               1608 fs/namei.c     			status = d_revalidate(dentry, nd->flags);
nd               1610 fs/namei.c     		dentry = __d_lookup(parent, &nd->last);
nd               1613 fs/namei.c     		status = d_revalidate(dentry, nd->flags);
nd               1628 fs/namei.c     	err = follow_managed(path, nd);
nd               1686 fs/namei.c     static inline int may_lookup(struct nameidata *nd)
nd               1688 fs/namei.c     	if (nd->flags & LOOKUP_RCU) {
nd               1689 fs/namei.c     		int err = inode_permission(nd->inode, MAY_EXEC|MAY_NOT_BLOCK);
nd               1692 fs/namei.c     		if (unlazy_walk(nd))
nd               1695 fs/namei.c     	return inode_permission(nd->inode, MAY_EXEC);
nd               1698 fs/namei.c     static inline int handle_dots(struct nameidata *nd, int type)
nd               1701 fs/namei.c     		if (!nd->root.mnt)
nd               1702 fs/namei.c     			set_root(nd);
nd               1703 fs/namei.c     		if (nd->flags & LOOKUP_RCU) {
nd               1704 fs/namei.c     			return follow_dotdot_rcu(nd);
nd               1706 fs/namei.c     			return follow_dotdot(nd);
nd               1711 fs/namei.c     static int pick_link(struct nameidata *nd, struct path *link,
nd               1716 fs/namei.c     	if (unlikely(nd->total_link_count++ >= MAXSYMLINKS)) {
nd               1717 fs/namei.c     		path_to_nameidata(link, nd);
nd               1720 fs/namei.c     	if (!(nd->flags & LOOKUP_RCU)) {
nd               1721 fs/namei.c     		if (link->mnt == nd->path.mnt)
nd               1724 fs/namei.c     	error = nd_alloc_stack(nd);
nd               1727 fs/namei.c     			if (unlikely(!legitimize_path(nd, link, seq))) {
nd               1728 fs/namei.c     				drop_links(nd);
nd               1729 fs/namei.c     				nd->depth = 0;
nd               1730 fs/namei.c     				nd->flags &= ~LOOKUP_RCU;
nd               1731 fs/namei.c     				nd->path.mnt = NULL;
nd               1732 fs/namei.c     				nd->path.dentry = NULL;
nd               1734 fs/namei.c     			} else if (likely(unlazy_walk(nd)) == 0)
nd               1735 fs/namei.c     				error = nd_alloc_stack(nd);
nd               1743 fs/namei.c     	last = nd->stack + nd->depth++;
nd               1746 fs/namei.c     	nd->link_inode = inode;
nd               1759 fs/namei.c     static inline int step_into(struct nameidata *nd, struct path *path,
nd               1762 fs/namei.c     	if (!(flags & WALK_MORE) && nd->depth)
nd               1763 fs/namei.c     		put_link(nd);
nd               1765 fs/namei.c     	   !(flags & WALK_FOLLOW || nd->flags & LOOKUP_FOLLOW)) {
nd               1767 fs/namei.c     		path_to_nameidata(path, nd);
nd               1768 fs/namei.c     		nd->inode = inode;
nd               1769 fs/namei.c     		nd->seq = seq;
nd               1773 fs/namei.c     	if (nd->flags & LOOKUP_RCU) {
nd               1777 fs/namei.c     	return pick_link(nd, path, inode, seq);
nd               1780 fs/namei.c     static int walk_component(struct nameidata *nd, int flags)
nd               1791 fs/namei.c     	if (unlikely(nd->last_type != LAST_NORM)) {
nd               1792 fs/namei.c     		err = handle_dots(nd, nd->last_type);
nd               1793 fs/namei.c     		if (!(flags & WALK_MORE) && nd->depth)
nd               1794 fs/namei.c     			put_link(nd);
nd               1797 fs/namei.c     	err = lookup_fast(nd, &path, &inode, &seq);
nd               1801 fs/namei.c     		path.dentry = lookup_slow(&nd->last, nd->path.dentry,
nd               1802 fs/namei.c     					  nd->flags);
nd               1806 fs/namei.c     		path.mnt = nd->path.mnt;
nd               1807 fs/namei.c     		err = follow_managed(&path, nd);
nd               1812 fs/namei.c     			path_to_nameidata(&path, nd);
nd               1820 fs/namei.c     	return step_into(nd, &path, flags, inode, seq);
nd               2059 fs/namei.c     static int link_path_walk(const char *name, struct nameidata *nd)
nd               2075 fs/namei.c     		err = may_lookup(nd);
nd               2079 fs/namei.c     		hash_len = hash_name(nd->path.dentry, name);
nd               2086 fs/namei.c     					nd->flags |= LOOKUP_JUMPED;
nd               2093 fs/namei.c     			struct dentry *parent = nd->path.dentry;
nd               2094 fs/namei.c     			nd->flags &= ~LOOKUP_JUMPED;
nd               2105 fs/namei.c     		nd->last.hash_len = hash_len;
nd               2106 fs/namei.c     		nd->last.name = name;
nd               2107 fs/namei.c     		nd->last_type = type;
nd               2122 fs/namei.c     			if (!nd->depth)
nd               2124 fs/namei.c     			name = nd->stack[nd->depth - 1].name;
nd               2129 fs/namei.c     			err = walk_component(nd, WALK_FOLLOW);
nd               2132 fs/namei.c     			err = walk_component(nd, WALK_FOLLOW | WALK_MORE);
nd               2138 fs/namei.c     			const char *s = get_link(nd);
nd               2145 fs/namei.c     				put_link(nd);
nd               2147 fs/namei.c     				nd->stack[nd->depth - 1].name = name;
nd               2152 fs/namei.c     		if (unlikely(!d_can_lookup(nd->path.dentry))) {
nd               2153 fs/namei.c     			if (nd->flags & LOOKUP_RCU) {
nd               2154 fs/namei.c     				if (unlazy_walk(nd))
nd               2163 fs/namei.c     static const char *path_init(struct nameidata *nd, unsigned flags)
nd               2165 fs/namei.c     	const char *s = nd->name->name;
nd               2172 fs/namei.c     	nd->last_type = LAST_ROOT; /* if there are only slashes... */
nd               2173 fs/namei.c     	nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT;
nd               2174 fs/namei.c     	nd->depth = 0;
nd               2176 fs/namei.c     		struct dentry *root = nd->root.dentry;
nd               2180 fs/namei.c     		nd->path = nd->root;
nd               2181 fs/namei.c     		nd->inode = inode;
nd               2183 fs/namei.c     			nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
nd               2184 fs/namei.c     			nd->root_seq = nd->seq;
nd               2185 fs/namei.c     			nd->m_seq = read_seqbegin(&mount_lock);
nd               2187 fs/namei.c     			path_get(&nd->path);
nd               2192 fs/namei.c     	nd->root.mnt = NULL;
nd               2193 fs/namei.c     	nd->path.mnt = NULL;
nd               2194 fs/namei.c     	nd->path.dentry = NULL;
nd               2196 fs/namei.c     	nd->m_seq = read_seqbegin(&mount_lock);
nd               2198 fs/namei.c     		set_root(nd);
nd               2199 fs/namei.c     		if (likely(!nd_jump_root(nd)))
nd               2202 fs/namei.c     	} else if (nd->dfd == AT_FDCWD) {
nd               2209 fs/namei.c     				nd->path = fs->pwd;
nd               2210 fs/namei.c     				nd->inode = nd->path.dentry->d_inode;
nd               2211 fs/namei.c     				nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
nd               2214 fs/namei.c     			get_fs_pwd(current->fs, &nd->path);
nd               2215 fs/namei.c     			nd->inode = nd->path.dentry->d_inode;
nd               2220 fs/namei.c     		struct fd f = fdget_raw(nd->dfd);
nd               2233 fs/namei.c     		nd->path = f.file->f_path;
nd               2235 fs/namei.c     			nd->inode = nd->path.dentry->d_inode;
nd               2236 fs/namei.c     			nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
nd               2238 fs/namei.c     			path_get(&nd->path);
nd               2239 fs/namei.c     			nd->inode = nd->path.dentry->d_inode;
nd               2246 fs/namei.c     static const char *trailing_symlink(struct nameidata *nd)
nd               2249 fs/namei.c     	int error = may_follow_link(nd);
nd               2252 fs/namei.c     	nd->flags |= LOOKUP_PARENT;
nd               2253 fs/namei.c     	nd->stack[0].name = NULL;
nd               2254 fs/namei.c     	s = get_link(nd);
nd               2258 fs/namei.c     static inline int lookup_last(struct nameidata *nd)
nd               2260 fs/namei.c     	if (nd->last_type == LAST_NORM && nd->last.name[nd->last.len])
nd               2261 fs/namei.c     		nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
nd               2263 fs/namei.c     	nd->flags &= ~LOOKUP_PARENT;
nd               2264 fs/namei.c     	return walk_component(nd, 0);
nd               2267 fs/namei.c     static int handle_lookup_down(struct nameidata *nd)
nd               2269 fs/namei.c     	struct path path = nd->path;
nd               2270 fs/namei.c     	struct inode *inode = nd->inode;
nd               2271 fs/namei.c     	unsigned seq = nd->seq;
nd               2274 fs/namei.c     	if (nd->flags & LOOKUP_RCU) {
nd               2280 fs/namei.c     		if (unlikely(!__follow_mount_rcu(nd, &path, &inode, &seq)))
nd               2284 fs/namei.c     		err = follow_managed(&path, nd);
nd               2290 fs/namei.c     	path_to_nameidata(&path, nd);
nd               2291 fs/namei.c     	nd->inode = inode;
nd               2292 fs/namei.c     	nd->seq = seq;
nd               2297 fs/namei.c     static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path)
nd               2299 fs/namei.c     	const char *s = path_init(nd, flags);
nd               2303 fs/namei.c     		err = handle_lookup_down(nd);
nd               2308 fs/namei.c     	while (!(err = link_path_walk(s, nd))
nd               2309 fs/namei.c     		&& ((err = lookup_last(nd)) > 0)) {
nd               2310 fs/namei.c     		s = trailing_symlink(nd);
nd               2313 fs/namei.c     		err = complete_walk(nd);
nd               2315 fs/namei.c     	if (!err && nd->flags & LOOKUP_DIRECTORY)
nd               2316 fs/namei.c     		if (!d_can_lookup(nd->path.dentry))
nd               2319 fs/namei.c     		*path = nd->path;
nd               2320 fs/namei.c     		nd->path.mnt = NULL;
nd               2321 fs/namei.c     		nd->path.dentry = NULL;
nd               2323 fs/namei.c     	terminate_walk(nd);
nd               2331 fs/namei.c     	struct nameidata nd;
nd               2335 fs/namei.c     		nd.root = *root;
nd               2338 fs/namei.c     	set_nameidata(&nd, dfd, name);
nd               2339 fs/namei.c     	retval = path_lookupat(&nd, flags | LOOKUP_RCU, path);
nd               2341 fs/namei.c     		retval = path_lookupat(&nd, flags, path);
nd               2343 fs/namei.c     		retval = path_lookupat(&nd, flags | LOOKUP_REVAL, path);
nd               2353 fs/namei.c     static int path_parentat(struct nameidata *nd, unsigned flags,
nd               2356 fs/namei.c     	const char *s = path_init(nd, flags);
nd               2357 fs/namei.c     	int err = link_path_walk(s, nd);
nd               2359 fs/namei.c     		err = complete_walk(nd);
nd               2361 fs/namei.c     		*parent = nd->path;
nd               2362 fs/namei.c     		nd->path.mnt = NULL;
nd               2363 fs/namei.c     		nd->path.dentry = NULL;
nd               2365 fs/namei.c     	terminate_walk(nd);
nd               2374 fs/namei.c     	struct nameidata nd;
nd               2378 fs/namei.c     	set_nameidata(&nd, dfd, name);
nd               2379 fs/namei.c     	retval = path_parentat(&nd, flags | LOOKUP_RCU, parent);
nd               2381 fs/namei.c     		retval = path_parentat(&nd, flags, parent);
nd               2383 fs/namei.c     		retval = path_parentat(&nd, flags | LOOKUP_REVAL, parent);
nd               2385 fs/namei.c     		*last = nd.last;
nd               2386 fs/namei.c     		*type = nd.last_type;
nd               2628 fs/namei.c     mountpoint_last(struct nameidata *nd)
nd               2631 fs/namei.c     	struct dentry *dir = nd->path.dentry;
nd               2635 fs/namei.c     	if (nd->flags & LOOKUP_RCU) {
nd               2636 fs/namei.c     		if (unlazy_walk(nd))
nd               2640 fs/namei.c     	nd->flags &= ~LOOKUP_PARENT;
nd               2642 fs/namei.c     	if (unlikely(nd->last_type != LAST_NORM)) {
nd               2643 fs/namei.c     		error = handle_dots(nd, nd->last_type);
nd               2646 fs/namei.c     		path.dentry = dget(nd->path.dentry);
nd               2648 fs/namei.c     		path.dentry = d_lookup(dir, &nd->last);
nd               2656 fs/namei.c     			path.dentry = lookup_slow(&nd->last, dir,
nd               2657 fs/namei.c     					     nd->flags | LOOKUP_NO_REVAL);
nd               2666 fs/namei.c     	path.mnt = nd->path.mnt;
nd               2667 fs/namei.c     	return step_into(nd, &path, 0, d_backing_inode(path.dentry), 0);
nd               2680 fs/namei.c     path_mountpoint(struct nameidata *nd, unsigned flags, struct path *path)
nd               2682 fs/namei.c     	const char *s = path_init(nd, flags);
nd               2685 fs/namei.c     	while (!(err = link_path_walk(s, nd)) &&
nd               2686 fs/namei.c     		(err = mountpoint_last(nd)) > 0) {
nd               2687 fs/namei.c     		s = trailing_symlink(nd);
nd               2690 fs/namei.c     		*path = nd->path;
nd               2691 fs/namei.c     		nd->path.mnt = NULL;
nd               2692 fs/namei.c     		nd->path.dentry = NULL;
nd               2695 fs/namei.c     	terminate_walk(nd);
nd               2703 fs/namei.c     	struct nameidata nd;
nd               2707 fs/namei.c     	set_nameidata(&nd, dfd, name);
nd               2708 fs/namei.c     	error = path_mountpoint(&nd, flags | LOOKUP_RCU, path);
nd               2710 fs/namei.c     		error = path_mountpoint(&nd, flags, path);
nd               2712 fs/namei.c     		error = path_mountpoint(&nd, flags | LOOKUP_REVAL, path);
nd               3046 fs/namei.c     static int atomic_open(struct nameidata *nd, struct dentry *dentry,
nd               3052 fs/namei.c     	struct inode *dir =  nd->path.dentry->d_inode;
nd               3058 fs/namei.c     	if (nd->flags & LOOKUP_DIRECTORY)
nd               3062 fs/namei.c     	file->f_path.mnt = nd->path.mnt;
nd               3094 fs/namei.c     				path->mnt = nd->path.mnt;
nd               3118 fs/namei.c     static int lookup_open(struct nameidata *nd, struct path *path,
nd               3123 fs/namei.c     	struct dentry *dir = nd->path.dentry;
nd               3135 fs/namei.c     	dentry = d_lookup(dir, &nd->last);
nd               3138 fs/namei.c     			dentry = d_alloc_parallel(dir, &nd->last, &wq);
nd               3145 fs/namei.c     		error = d_revalidate(dentry, nd->flags);
nd               3178 fs/namei.c     			create_error = may_o_create(&nd->path, dentry, mode);
nd               3195 fs/namei.c     		error = atomic_open(nd, dentry, path, file, op, open_flag,
nd               3205 fs/namei.c     							     nd->flags);
nd               3237 fs/namei.c     	path->mnt = nd->path.mnt;
nd               3248 fs/namei.c     static int do_last(struct nameidata *nd,
nd               3251 fs/namei.c     	struct dentry *dir = nd->path.dentry;
nd               3252 fs/namei.c     	kuid_t dir_uid = nd->inode->i_uid;
nd               3253 fs/namei.c     	umode_t dir_mode = nd->inode->i_mode;
nd               3263 fs/namei.c     	nd->flags &= ~LOOKUP_PARENT;
nd               3264 fs/namei.c     	nd->flags |= op->intent;
nd               3266 fs/namei.c     	if (nd->last_type != LAST_NORM) {
nd               3267 fs/namei.c     		error = handle_dots(nd, nd->last_type);
nd               3274 fs/namei.c     		if (nd->last.name[nd->last.len])
nd               3275 fs/namei.c     			nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
nd               3277 fs/namei.c     		error = lookup_fast(nd, &path, &inode, &seq);
nd               3284 fs/namei.c     		BUG_ON(nd->inode != dir->d_inode);
nd               3285 fs/namei.c     		BUG_ON(nd->flags & LOOKUP_RCU);
nd               3293 fs/namei.c     		error = complete_walk(nd);
nd               3297 fs/namei.c     		audit_inode(nd->name, dir, AUDIT_INODE_PARENT);
nd               3299 fs/namei.c     		if (unlikely(nd->last.name[nd->last.len]))
nd               3304 fs/namei.c     		error = mnt_want_write(nd->path.mnt);
nd               3317 fs/namei.c     	error = lookup_open(nd, &path, file, op, got_write);
nd               3331 fs/namei.c     		audit_inode(nd->name, file->f_path.dentry, 0);
nd               3340 fs/namei.c     		path_to_nameidata(&path, nd);
nd               3350 fs/namei.c     		mnt_drop_write(nd->path.mnt);
nd               3354 fs/namei.c     	error = follow_managed(&path, nd);
nd               3359 fs/namei.c     		path_to_nameidata(&path, nd);
nd               3366 fs/namei.c     	audit_inode(nd->name, path.dentry, 0);
nd               3369 fs/namei.c     		path_to_nameidata(&path, nd);
nd               3376 fs/namei.c     	error = step_into(nd, &path, 0, inode, seq);
nd               3381 fs/namei.c     	error = complete_walk(nd);
nd               3384 fs/namei.c     	audit_inode(nd->name, nd->path.dentry, 0);
nd               3387 fs/namei.c     		if (d_is_dir(nd->path.dentry))
nd               3390 fs/namei.c     					     d_backing_inode(nd->path.dentry));
nd               3395 fs/namei.c     	if ((nd->flags & LOOKUP_DIRECTORY) && !d_can_lookup(nd->path.dentry))
nd               3397 fs/namei.c     	if (!d_is_reg(nd->path.dentry))
nd               3401 fs/namei.c     		error = mnt_want_write(nd->path.mnt);
nd               3407 fs/namei.c     	error = may_open(&nd->path, acc_mode, open_flag);
nd               3411 fs/namei.c     	error = vfs_open(&nd->path, file);
nd               3424 fs/namei.c     		mnt_drop_write(nd->path.mnt);
nd               3467 fs/namei.c     static int do_tmpfile(struct nameidata *nd, unsigned flags,
nd               3473 fs/namei.c     	int error = path_lookupat(nd, flags | LOOKUP_DIRECTORY, &path);
nd               3485 fs/namei.c     	audit_inode(nd->name, child, 0);
nd               3499 fs/namei.c     static int do_o_path(struct nameidata *nd, unsigned flags, struct file *file)
nd               3502 fs/namei.c     	int error = path_lookupat(nd, flags, &path);
nd               3504 fs/namei.c     		audit_inode(nd->name, path.dentry, 0);
nd               3511 fs/namei.c     static struct file *path_openat(struct nameidata *nd,
nd               3522 fs/namei.c     		error = do_tmpfile(nd, flags, op, file);
nd               3524 fs/namei.c     		error = do_o_path(nd, flags, file);
nd               3526 fs/namei.c     		const char *s = path_init(nd, flags);
nd               3527 fs/namei.c     		while (!(error = link_path_walk(s, nd)) &&
nd               3528 fs/namei.c     			(error = do_last(nd, file, op)) > 0) {
nd               3529 fs/namei.c     			nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL);
nd               3530 fs/namei.c     			s = trailing_symlink(nd);
nd               3532 fs/namei.c     		terminate_walk(nd);
nd               3553 fs/namei.c     	struct nameidata nd;
nd               3557 fs/namei.c     	set_nameidata(&nd, dfd, pathname);
nd               3558 fs/namei.c     	filp = path_openat(&nd, op, flags | LOOKUP_RCU);
nd               3560 fs/namei.c     		filp = path_openat(&nd, op, flags);
nd               3562 fs/namei.c     		filp = path_openat(&nd, op, flags | LOOKUP_REVAL);
nd               3570 fs/namei.c     	struct nameidata nd;
nd               3575 fs/namei.c     	nd.root.mnt = mnt;
nd               3576 fs/namei.c     	nd.root.dentry = dentry;
nd               3585 fs/namei.c     	set_nameidata(&nd, -1, filename);
nd               3586 fs/namei.c     	file = path_openat(&nd, op, flags | LOOKUP_RCU);
nd               3588 fs/namei.c     		file = path_openat(&nd, op, flags);
nd               3590 fs/namei.c     		file = path_openat(&nd, op, flags | LOOKUP_REVAL);
nd                120 include/linux/mempolicy.h 	struct rb_node nd;
nd                163 include/linux/nubus.h unsigned char *nubus_dirptr(const struct nubus_dirent *nd);
nd                 35 include/net/ncsi.h 				   void (*notifier)(struct ncsi_dev *nd));
nd                 36 include/net/ncsi.h int ncsi_start_dev(struct ncsi_dev *nd);
nd                 37 include/net/ncsi.h void ncsi_stop_dev(struct ncsi_dev *nd);
nd                 38 include/net/ncsi.h void ncsi_unregister_dev(struct ncsi_dev *nd);
nd                 51 include/net/ncsi.h 					void (*notifier)(struct ncsi_dev *nd))
nd                 56 include/net/ncsi.h static inline int ncsi_start_dev(struct ncsi_dev *nd)
nd                 61 include/net/ncsi.h static void ncsi_stop_dev(struct ncsi_dev *nd)
nd                 65 include/net/ncsi.h static inline void ncsi_unregister_dev(struct ncsi_dev *nd)
nd                843 lib/inflate.c    unsigned nd;          /* number of distance codes */
nd                870 lib/inflate.c    nd = 1 + ((unsigned)b & 0x1f);        /* number of distance codes */
nd                876 lib/inflate.c    if (nl > 288 || nd > 32)
nd                878 lib/inflate.c    if (nl > 286 || nd > 30)
nd                912 lib/inflate.c    n = nl + nd;
nd                990 lib/inflate.c    if ((i = huft_build(ll + nl, nd, 0, cpdist, cpdext, &td, &bd)) != 0)
nd                198 mm/memory-failure.c 	struct list_head nd;
nd                350 mm/memory-failure.c 	list_add_tail(&tk->nd, to_kill);
nd                366 mm/memory-failure.c 	list_for_each_entry_safe (tk, next, to_kill, nd) {
nd               1207 mm/memory-failure.c 	list_for_each_entry(tk, &tokill, nd)
nd               1809 mm/mempolicy.c 								int nd)
nd               1812 mm/mempolicy.c 		nd = policy->v.preferred_node;
nd               1822 mm/mempolicy.c 	return nd;
nd               2293 mm/mempolicy.c 		struct sp_node *p = rb_entry(n, struct sp_node, nd);
nd               2309 mm/mempolicy.c 		w = rb_entry(prev, struct sp_node, nd);
nd               2314 mm/mempolicy.c 	return rb_entry(n, struct sp_node, nd);
nd               2325 mm/mempolicy.c 	struct sp_node *nd;
nd               2329 mm/mempolicy.c 		nd = rb_entry(parent, struct sp_node, nd);
nd               2330 mm/mempolicy.c 		if (new->start < nd->start)
nd               2332 mm/mempolicy.c 		else if (new->end > nd->end)
nd               2337 mm/mempolicy.c 	rb_link_node(&new->nd, parent, p);
nd               2338 mm/mempolicy.c 	rb_insert_color(&new->nd, &sp->root);
nd               2471 mm/mempolicy.c 	rb_erase(&n->nd, &sp->root);
nd               2518 mm/mempolicy.c 		struct rb_node *next = rb_next(&n->nd);
nd               2543 mm/mempolicy.c 		n = rb_entry(next, struct sp_node, nd);
nd               2654 mm/mempolicy.c 		n = rb_entry(next, struct sp_node, nd);
nd               2655 mm/mempolicy.c 		next = rb_next(&n->nd);
nd                330 mm/mmap.c      	struct rb_node *nd, *pn = NULL;
nd                333 mm/mmap.c      	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
nd                335 mm/mmap.c      		vma = rb_entry(nd, struct vm_area_struct, vm_rb);
nd                360 mm/mmap.c      		pn = nd;
nd                365 mm/mmap.c      	for (nd = pn; nd; nd = rb_prev(nd))
nd                376 mm/mmap.c      	struct rb_node *nd;
nd                378 mm/mmap.c      	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
nd                380 mm/mmap.c      		vma = rb_entry(nd, struct vm_area_struct, vm_rb);
nd                158 net/dsa/dsa.c  		struct net_device *nd;
nd                160 net/dsa/dsa.c  		nd = to_net_dev(d);
nd                161 net/dsa/dsa.c  		dev_hold(nd);
nd                164 net/dsa/dsa.c  		return nd;
nd                337 net/ncsi/internal.h #define TO_NCSI_DEV_PRIV(nd) \
nd                338 net/ncsi/internal.h 	container_of(nd, struct ncsi_dev_priv, ndev)
nd                347 net/ncsi/internal.h int ncsi_reset_dev(struct ncsi_dev *nd);
nd                271 net/ncsi/ncsi-cmd.c 	struct ncsi_dev *nd = &ndp->ndev;
nd                272 net/ncsi/ncsi-cmd.c 	struct net_device *dev = nd->dev;
nd                 51 net/ncsi/ncsi-manage.c 	struct ncsi_dev *nd = &ndp->ndev;
nd                 56 net/ncsi/ncsi-manage.c 	nd->state = ncsi_dev_state_functional;
nd                 58 net/ncsi/ncsi-manage.c 		nd->link_up = 0;
nd                 62 net/ncsi/ncsi-manage.c 	nd->link_up = 0;
nd                 75 net/ncsi/ncsi-manage.c 				nd->link_up = 1;
nd                 84 net/ncsi/ncsi-manage.c 	nd->handler(nd);
nd                461 net/ncsi/ncsi-manage.c 	struct ncsi_dev *nd = &ndp->ndev;
nd                472 net/ncsi/ncsi-manage.c 	switch (nd->state) {
nd                474 net/ncsi/ncsi-manage.c 		nd->state = ncsi_dev_state_suspend_select;
nd                496 net/ncsi/ncsi-manage.c 			nd->state = ncsi_dev_state_suspend_gls;
nd                498 net/ncsi/ncsi-manage.c 			nd->state = ncsi_dev_state_suspend_dcnt;
nd                510 net/ncsi/ncsi-manage.c 		nd->state = ncsi_dev_state_suspend_dcnt;
nd                526 net/ncsi/ncsi-manage.c 		nd->state = ncsi_dev_state_suspend_dc;
nd                540 net/ncsi/ncsi-manage.c 		nd->state = ncsi_dev_state_suspend_deselect;
nd                550 net/ncsi/ncsi-manage.c 				nd->state = ncsi_dev_state_suspend_done;
nd                562 net/ncsi/ncsi-manage.c 		nd->state = ncsi_dev_state_suspend_done;
nd                573 net/ncsi/ncsi-manage.c 			ncsi_reset_dev(nd);
nd                578 net/ncsi/ncsi-manage.c 		netdev_warn(nd->dev, "Wrong NCSI state 0x%x in suspend\n",
nd                579 net/ncsi/ncsi-manage.c 			    nd->state);
nd                584 net/ncsi/ncsi-manage.c 	nd->state = ncsi_dev_state_functional;
nd                912 net/ncsi/ncsi-manage.c 	struct ncsi_dev *nd = &ndp->ndev;
nd                913 net/ncsi/ncsi-manage.c 	struct net_device *dev = nd->dev;
nd                921 net/ncsi/ncsi-manage.c 	switch (nd->state) {
nd                941 net/ncsi/ncsi-manage.c 		nd->state = ncsi_dev_state_config_cis;
nd                957 net/ncsi/ncsi-manage.c 		nd->state = ncsi_dev_state_config_oem_gma;
nd                960 net/ncsi/ncsi-manage.c 		nd->state = ncsi_dev_state_config_clear_vids;
nd                991 net/ncsi/ncsi-manage.c 		if (nd->state == ncsi_dev_state_config_clear_vids) {
nd                994 net/ncsi/ncsi-manage.c 				nd->state = ncsi_dev_state_config_svf;
nd                999 net/ncsi/ncsi-manage.c 			nd->state = ncsi_dev_state_config_clear_vids;
nd               1001 net/ncsi/ncsi-manage.c 		} else if (nd->state == ncsi_dev_state_config_svf) {
nd               1004 net/ncsi/ncsi-manage.c 				nd->state = ncsi_dev_state_config_ev;
nd               1009 net/ncsi/ncsi-manage.c 			nd->state = ncsi_dev_state_config_svf;
nd               1011 net/ncsi/ncsi-manage.c 		} else if (nd->state == ncsi_dev_state_config_ev) {
nd               1018 net/ncsi/ncsi-manage.c 			nd->state = ncsi_dev_state_config_sma;
nd               1019 net/ncsi/ncsi-manage.c 		} else if (nd->state == ncsi_dev_state_config_sma) {
nd               1029 net/ncsi/ncsi-manage.c 			nd->state = ncsi_dev_state_config_ebf;
nd               1030 net/ncsi/ncsi-manage.c 		} else if (nd->state == ncsi_dev_state_config_ebf) {
nd               1039 net/ncsi/ncsi-manage.c 				nd->state = ncsi_dev_state_config_dgmf;
nd               1041 net/ncsi/ncsi-manage.c 				nd->state = ncsi_dev_state_config_ecnt;
nd               1043 net/ncsi/ncsi-manage.c 				nd->state = ncsi_dev_state_config_ec;
nd               1044 net/ncsi/ncsi-manage.c 		} else if (nd->state == ncsi_dev_state_config_dgmf) {
nd               1047 net/ncsi/ncsi-manage.c 				nd->state = ncsi_dev_state_config_ecnt;
nd               1049 net/ncsi/ncsi-manage.c 				nd->state = ncsi_dev_state_config_ec;
nd               1050 net/ncsi/ncsi-manage.c 		} else if (nd->state == ncsi_dev_state_config_ecnt) {
nd               1057 net/ncsi/ncsi-manage.c 			nd->state = ncsi_dev_state_config_ec;
nd               1058 net/ncsi/ncsi-manage.c 		} else if (nd->state == ncsi_dev_state_config_ec) {
nd               1061 net/ncsi/ncsi-manage.c 			nd->state = ncsi_dev_state_config_ae;
nd               1063 net/ncsi/ncsi-manage.c 				nd->state = ncsi_dev_state_config_gls;
nd               1064 net/ncsi/ncsi-manage.c 		} else if (nd->state == ncsi_dev_state_config_ae) {
nd               1068 net/ncsi/ncsi-manage.c 			nd->state = ncsi_dev_state_config_gls;
nd               1069 net/ncsi/ncsi-manage.c 		} else if (nd->state == ncsi_dev_state_config_gls) {
nd               1071 net/ncsi/ncsi-manage.c 			nd->state = ncsi_dev_state_config_done;
nd               1092 net/ncsi/ncsi-manage.c 			ncsi_reset_dev(nd);
nd               1134 net/ncsi/ncsi-manage.c 			     nd->state);
nd               1266 net/ncsi/ncsi-manage.c 	struct ncsi_dev *nd = &ndp->ndev;
nd               1275 net/ncsi/ncsi-manage.c 	switch (nd->state) {
nd               1277 net/ncsi/ncsi-manage.c 		nd->state = ncsi_dev_state_probe_deselect;
nd               1292 net/ncsi/ncsi-manage.c 		nd->state = ncsi_dev_state_probe_package;
nd               1304 net/ncsi/ncsi-manage.c 		nd->state = ncsi_dev_state_probe_channel;
nd               1311 net/ncsi/ncsi-manage.c 			nd->state = ncsi_dev_state_probe_dp;
nd               1315 net/ncsi/ncsi-manage.c 		nd->state = ncsi_dev_state_probe_cis;
nd               1331 net/ncsi/ncsi-manage.c 		nd->state = ncsi_dev_state_probe_gvi;
nd               1340 net/ncsi/ncsi-manage.c 		if (nd->state == ncsi_dev_state_probe_gvi)
nd               1342 net/ncsi/ncsi-manage.c 		else if (nd->state == ncsi_dev_state_probe_gc)
nd               1355 net/ncsi/ncsi-manage.c 		if (nd->state == ncsi_dev_state_probe_gvi)
nd               1356 net/ncsi/ncsi-manage.c 			nd->state = ncsi_dev_state_probe_gc;
nd               1357 net/ncsi/ncsi-manage.c 		else if (nd->state == ncsi_dev_state_probe_gc)
nd               1358 net/ncsi/ncsi-manage.c 			nd->state = ncsi_dev_state_probe_gls;
nd               1360 net/ncsi/ncsi-manage.c 			nd->state = ncsi_dev_state_probe_dp;
nd               1380 net/ncsi/ncsi-manage.c 		nd->state = ncsi_dev_state_probe_package;
nd               1384 net/ncsi/ncsi-manage.c 		netdev_warn(nd->dev, "Wrong NCSI state 0x%0x in enumeration\n",
nd               1385 net/ncsi/ncsi-manage.c 			    nd->state);
nd               1406 net/ncsi/ncsi-manage.c 	struct ncsi_dev *nd = &ndp->ndev;
nd               1408 net/ncsi/ncsi-manage.c 	switch (nd->state & ncsi_dev_state_major) {
nd               1419 net/ncsi/ncsi-manage.c 		netdev_warn(nd->dev, "Wrong NCSI state 0x%x in workqueue\n",
nd               1420 net/ncsi/ncsi-manage.c 			    nd->state);
nd               1485 net/ncsi/ncsi-manage.c 	struct ncsi_dev *nd = &ndp->ndev;
nd               1505 net/ncsi/ncsi-manage.c 					netdev_dbg(nd->dev,
nd               1525 net/ncsi/ncsi-manage.c 			netdev_dbg(nd->dev, "NCSI: kicked channel %p\n", nc);
nd               1538 net/ncsi/ncsi-manage.c 	struct ncsi_dev *nd;
nd               1544 net/ncsi/ncsi-manage.c 	nd = ncsi_find_dev(dev);
nd               1545 net/ncsi/ncsi-manage.c 	if (!nd) {
nd               1550 net/ncsi/ncsi-manage.c 	ndp = TO_NCSI_DEV_PRIV(nd);
nd               1588 net/ncsi/ncsi-manage.c 	struct ncsi_dev *nd;
nd               1594 net/ncsi/ncsi-manage.c 	nd = ncsi_find_dev(dev);
nd               1595 net/ncsi/ncsi-manage.c 	if (!nd) {
nd               1600 net/ncsi/ncsi-manage.c 	ndp = TO_NCSI_DEV_PRIV(nd);
nd               1626 net/ncsi/ncsi-manage.c 	struct ncsi_dev *nd;
nd               1631 net/ncsi/ncsi-manage.c 	nd = ncsi_find_dev(dev);
nd               1632 net/ncsi/ncsi-manage.c 	if (nd)
nd               1633 net/ncsi/ncsi-manage.c 		return nd;
nd               1640 net/ncsi/ncsi-manage.c 	nd = &ndp->ndev;
nd               1641 net/ncsi/ncsi-manage.c 	nd->state = ncsi_dev_state_registered;
nd               1642 net/ncsi/ncsi-manage.c 	nd->dev = dev;
nd               1643 net/ncsi/ncsi-manage.c 	nd->handler = handler;
nd               1673 net/ncsi/ncsi-manage.c 	return nd;
nd               1677 net/ncsi/ncsi-manage.c int ncsi_start_dev(struct ncsi_dev *nd)
nd               1679 net/ncsi/ncsi-manage.c 	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
nd               1681 net/ncsi/ncsi-manage.c 	if (nd->state != ncsi_dev_state_registered &&
nd               1682 net/ncsi/ncsi-manage.c 	    nd->state != ncsi_dev_state_functional)
nd               1687 net/ncsi/ncsi-manage.c 		nd->state = ncsi_dev_state_probe;
nd               1692 net/ncsi/ncsi-manage.c 	return ncsi_reset_dev(nd);
nd               1696 net/ncsi/ncsi-manage.c void ncsi_stop_dev(struct ncsi_dev *nd)
nd               1698 net/ncsi/ncsi-manage.c 	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
nd               1728 net/ncsi/ncsi-manage.c int ncsi_reset_dev(struct ncsi_dev *nd)
nd               1730 net/ncsi/ncsi-manage.c 	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
nd               1739 net/ncsi/ncsi-manage.c 		switch (nd->state & ncsi_dev_state_major) {
nd               1756 net/ncsi/ncsi-manage.c 		switch (nd->state) {
nd               1809 net/ncsi/ncsi-manage.c 	nd->state = ncsi_dev_state_suspend;
nd               1814 net/ncsi/ncsi-manage.c void ncsi_unregister_dev(struct ncsi_dev *nd)
nd               1816 net/ncsi/ncsi-manage.c 	struct ncsi_dev_priv *ndp = TO_NCSI_DEV_PRIV(nd);
nd               1829 net/ncsi/ncsi-manage.c 	ncsi_unregister_netlink(nd->dev);
nd                 38 net/ncsi/ncsi-netlink.c 	struct ncsi_dev *nd;
nd                 50 net/ncsi/ncsi-netlink.c 	nd = ncsi_find_dev(dev);
nd                 51 net/ncsi/ncsi-netlink.c 	ndp = nd ? TO_NCSI_DEV_PRIV(nd) : NULL;
nd               1109 net/ncsi/ncsi-rsp.c 	struct ncsi_dev *nd;
nd               1117 net/ncsi/ncsi-rsp.c 	nd = ncsi_find_dev(dev);
nd               1118 net/ncsi/ncsi-rsp.c 	ndp = nd ? TO_NCSI_DEV_PRIV(nd) : NULL;
nd               1140 net/ncsi/ncsi-rsp.c 		netdev_err(nd->dev, "Received unrecognized packet (0x%x)\n",
nd                415 net/openvswitch/flow.c 	memset(&key->ipv6.nd, 0, sizeof(key->ipv6.nd));
nd                421 net/openvswitch/flow.c 		struct nd_msg *nd;
nd                427 net/openvswitch/flow.c 		if (unlikely(icmp_len < sizeof(*nd)))
nd                433 net/openvswitch/flow.c 		nd = (struct nd_msg *)skb_transport_header(skb);
nd                434 net/openvswitch/flow.c 		key->ipv6.nd.target = nd->target;
nd                436 net/openvswitch/flow.c 		icmp_len -= sizeof(*nd);
nd                440 net/openvswitch/flow.c 				 (struct nd_opt_hdr *)(nd->opt + offset);
nd                452 net/openvswitch/flow.c 				if (unlikely(!is_zero_ether_addr(key->ipv6.nd.sll)))
nd                454 net/openvswitch/flow.c 				ether_addr_copy(key->ipv6.nd.sll,
nd                455 net/openvswitch/flow.c 						&nd->opt[offset+sizeof(*nd_opt)]);
nd                458 net/openvswitch/flow.c 				if (unlikely(!is_zero_ether_addr(key->ipv6.nd.tll)))
nd                460 net/openvswitch/flow.c 				ether_addr_copy(key->ipv6.nd.tll,
nd                461 net/openvswitch/flow.c 						&nd->opt[offset+sizeof(*nd_opt)]);
nd                472 net/openvswitch/flow.c 	memset(&key->ipv6.nd.target, 0, sizeof(key->ipv6.nd.target));
nd                473 net/openvswitch/flow.c 	memset(key->ipv6.nd.sll, 0, sizeof(key->ipv6.nd.sll));
nd                474 net/openvswitch/flow.c 	memset(key->ipv6.nd.tll, 0, sizeof(key->ipv6.nd.tll));
nd                135 net/openvswitch/flow.h 				} nd;
nd               1699 net/openvswitch/flow_netlink.c 		SW_FLOW_KEY_MEMCPY(match, ipv6.nd.target,
nd               1701 net/openvswitch/flow_netlink.c 			sizeof(match->key->ipv6.nd.target),
nd               1703 net/openvswitch/flow_netlink.c 		SW_FLOW_KEY_MEMCPY(match, ipv6.nd.sll,
nd               1705 net/openvswitch/flow_netlink.c 		SW_FLOW_KEY_MEMCPY(match, ipv6.nd.tll,
nd               2190 net/openvswitch/flow_netlink.c 				memcpy(nd_key->nd_target, &output->ipv6.nd.target,
nd               2192 net/openvswitch/flow_netlink.c 				ether_addr_copy(nd_key->nd_sll, output->ipv6.nd.sll);
nd               2193 net/openvswitch/flow_netlink.c 				ether_addr_copy(nd_key->nd_tll, output->ipv6.nd.tll);
nd               11216 net/wireless/nl80211.c 	struct nlattr *nd, *freqs, *matches, *match, *scan_plans, *scan_plan;
nd               11222 net/wireless/nl80211.c 	nd = nla_nest_start_noflag(msg, NL80211_WOWLAN_TRIG_NET_DETECT);
nd               11223 net/wireless/nl80211.c 	if (!nd)
nd               11298 net/wireless/nl80211.c 	nla_nest_end(msg, nd);
nd               16518 net/wireless/nl80211.c 	struct cfg80211_wowlan_nd_info *nd = wakeup->net_detect;
nd               16527 net/wireless/nl80211.c 	for (i = 0; i < nd->n_matches; i++) {
nd               16528 net/wireless/nl80211.c 		struct cfg80211_wowlan_nd_match *match = nd->matches[i];
nd                 26 scripts/dtc/data.c 	struct data nd;
nd                 32 scripts/dtc/data.c 	nd = d;
nd                 39 scripts/dtc/data.c 	nd.val = xrealloc(d.val, newsize);
nd                 41 scripts/dtc/data.c 	return nd;
nd                313 tools/perf/builtin-annotate.c 	struct rb_node *nd = rb_first_cached(&hists->entries), *next;
nd                316 tools/perf/builtin-annotate.c 	while (nd) {
nd                317 tools/perf/builtin-annotate.c 		struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
nd                331 tools/perf/builtin-annotate.c 				nd = rb_prev(nd);
nd                333 tools/perf/builtin-annotate.c 				nd = rb_next(nd);
nd                355 tools/perf/builtin-annotate.c 			nd = rb_next(nd);
nd                365 tools/perf/builtin-annotate.c 				next = rb_next(nd);
nd                368 tools/perf/builtin-annotate.c 				next = rb_prev(nd);
nd                375 tools/perf/builtin-annotate.c 				nd = next;
nd                378 tools/perf/builtin-annotate.c 			nd = rb_next(nd);
nd                335 tools/perf/builtin-buildid-cache.c 	struct str_node *nd;
nd                343 tools/perf/builtin-buildid-cache.c 	strlist__for_each_entry(nd, bidlist) {
nd                344 tools/perf/builtin-buildid-cache.c 		buf = build_id_cache__origname(nd->s);
nd                345 tools/perf/builtin-buildid-cache.c 		fprintf(stdout, "%s %s\n", nd->s, buf);
nd               2217 tools/perf/builtin-c2c.c 	struct rb_node *nd;
nd               2233 tools/perf/builtin-c2c.c 	nd = rb_first_cached(&c2c.hists.hists.entries);
nd               2235 tools/perf/builtin-c2c.c 	for (; nd; nd = rb_next(nd)) {
nd               2236 tools/perf/builtin-c2c.c 		struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
nd               2301 tools/perf/builtin-c2c.c 	struct rb_node *nd = rb_first_cached(&hb->hists->entries);
nd               2303 tools/perf/builtin-c2c.c 	while (nd) {
nd               2304 tools/perf/builtin-c2c.c 		struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node);
nd               2309 tools/perf/builtin-c2c.c 		nd = rb_next(nd);
nd                395 tools/perf/builtin-probe.c 	struct str_node *nd;
nd                405 tools/perf/builtin-probe.c 	strlist__for_each_entry(nd, bidlist) {
nd                406 tools/perf/builtin-probe.c 		cache = probe_cache__new(nd->s, NULL);
nd                411 tools/perf/builtin-probe.c 			pr_warning("Failed to remove entries for %s\n", nd->s);
nd                729 tools/perf/builtin-report.c 	struct rb_node *nd;
nd                731 tools/perf/builtin-report.c 	for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
nd                732 tools/perf/builtin-report.c 		struct map *map = rb_entry(nd, struct map, rb_node);
nd                776 tools/perf/builtin-report.c 	struct rb_node *nd;
nd                795 tools/perf/builtin-report.c 		for (nd = rb_first_cached(&threads->entries); nd;
nd                796 tools/perf/builtin-report.c 		     nd = rb_next(nd)) {
nd                799 tools/perf/builtin-report.c 			task->thread = rb_entry(nd, struct thread, rb_node);
nd               3673 tools/perf/builtin-trace.c 	struct int_node *source = rb_entry(nd, struct int_node, rb_node);
nd               3686 tools/perf/builtin-trace.c 	struct rb_node *nd;
nd               3698 tools/perf/builtin-trace.c 	resort_rb__for_each_entry(nd, syscall_stats) {
nd               3761 tools/perf/builtin-trace.c 	entry->thread = rb_entry(nd, struct thread, rb_node);
nd               3767 tools/perf/builtin-trace.c 	struct rb_node *nd;
nd               3778 tools/perf/builtin-trace.c 		resort_rb__for_each_entry(nd, threads)
nd                 20 tools/perf/tests/vmlinux-kallsyms.c 	struct rb_node *nd;
nd                112 tools/perf/tests/vmlinux-kallsyms.c 	map__for_each_symbol(vmlinux_map, sym, nd) {
nd                115 tools/perf/tests/vmlinux-kallsyms.c 		sym  = rb_entry(nd, struct symbol, rb_node);
nd                141 tools/perf/ui/browser.c 	struct rb_node *nd;
nd                145 tools/perf/ui/browser.c 		nd = rb_first(root);
nd                148 tools/perf/ui/browser.c 		nd = browser->top;
nd                151 tools/perf/ui/browser.c 		nd = rb_last(root);
nd                159 tools/perf/ui/browser.c 			nd = rb_next(nd);
nd                162 tools/perf/ui/browser.c 			nd = rb_prev(nd);
nd                165 tools/perf/ui/browser.c 	browser->top = nd;
nd                170 tools/perf/ui/browser.c 	struct rb_node *nd;
nd                176 tools/perf/ui/browser.c 	nd = browser->top;
nd                178 tools/perf/ui/browser.c 	while (nd != NULL) {
nd                180 tools/perf/ui/browser.c 		browser->write(browser, nd, row);
nd                183 tools/perf/ui/browser.c 		nd = rb_next(nd);
nd                290 tools/perf/ui/browsers/annotate.c 					 struct rb_node *nd)
nd                293 tools/perf/ui/browsers/annotate.c 	struct annotation_line * pos = rb_entry(nd, struct annotation_line, rb_node);
nd                299 tools/perf/ui/browsers/annotate.c 	browser->curr_hot = nd;
nd                663 tools/perf/ui/browsers/annotate.c 	struct rb_node *nd = NULL;
nd                684 tools/perf/ui/browsers/annotate.c 	nd = browser->curr_hot;
nd                696 tools/perf/ui/browsers/annotate.c 			if (nd != NULL && RB_EMPTY_NODE(nd))
nd                697 tools/perf/ui/browsers/annotate.c 				nd = NULL;
nd                712 tools/perf/ui/browsers/annotate.c 			if (nd != NULL) {
nd                713 tools/perf/ui/browsers/annotate.c 				nd = rb_prev(nd);
nd                714 tools/perf/ui/browsers/annotate.c 				if (nd == NULL)
nd                715 tools/perf/ui/browsers/annotate.c 					nd = rb_last(&browser->entries);
nd                717 tools/perf/ui/browsers/annotate.c 				nd = browser->curr_hot;
nd                720 tools/perf/ui/browsers/annotate.c 			if (nd != NULL) {
nd                721 tools/perf/ui/browsers/annotate.c 				nd = rb_next(nd);
nd                722 tools/perf/ui/browsers/annotate.c 				if (nd == NULL)
nd                723 tools/perf/ui/browsers/annotate.c 					nd = rb_first(&browser->entries);
nd                725 tools/perf/ui/browsers/annotate.c 				nd = browser->curr_hot;
nd                762 tools/perf/ui/browsers/annotate.c 			nd = browser->curr_hot;
nd                865 tools/perf/ui/browsers/annotate.c 		if (nd != NULL)
nd                866 tools/perf/ui/browsers/annotate.c 			annotate_browser__set_rb_top(browser, nd);
nd                 50 tools/perf/ui/browsers/hists.c static struct rb_node *hists__filter_entries(struct rb_node *nd,
nd                 60 tools/perf/ui/browsers/hists.c 	struct rb_node *nd;
nd                 64 tools/perf/ui/browsers/hists.c 	for (nd = rb_first_cached(&hists->entries);
nd                 65 tools/perf/ui/browsers/hists.c 	     (nd = hists__filter_entries(nd, browser->min_pcnt)) != NULL;
nd                 66 tools/perf/ui/browsers/hists.c 	     nd = rb_hierarchy_next(nd)) {
nd                 68 tools/perf/ui/browsers/hists.c 			rb_entry(nd, struct hist_entry, rb_node);
nd                176 tools/perf/ui/browsers/hists.c 	struct rb_node *nd;
nd                178 tools/perf/ui/browsers/hists.c 	for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) {
nd                179 tools/perf/ui/browsers/hists.c 		struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
nd                258 tools/perf/ui/browsers/hists.c 	struct rb_node *nd;
nd                261 tools/perf/ui/browsers/hists.c 	for (nd = rb_first(chain); nd; nd = rb_next(nd)) {
nd                262 tools/perf/ui/browsers/hists.c 		struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
nd                327 tools/perf/ui/browsers/hists.c 	struct rb_node *nd = rb_first(&node->rb_root);
nd                329 tools/perf/ui/browsers/hists.c 	for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) {
nd                330 tools/perf/ui/browsers/hists.c 		struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
nd                366 tools/perf/ui/browsers/hists.c 	struct rb_node *nd = rb_first(root);
nd                367 tools/perf/ui/browsers/hists.c 	bool has_sibling = nd && rb_next(nd);
nd                369 tools/perf/ui/browsers/hists.c 	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
nd                370 tools/perf/ui/browsers/hists.c 		struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
nd                464 tools/perf/ui/browsers/hists.c 	struct rb_node *nd;
nd                466 tools/perf/ui/browsers/hists.c 	for (nd = rb_first(&node->rb_root); nd; nd = rb_next(nd)) {
nd                467 tools/perf/ui/browsers/hists.c 		struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node);
nd                504 tools/perf/ui/browsers/hists.c 	struct rb_node *nd;
nd                507 tools/perf/ui/browsers/hists.c 	for (nd = rb_first(chain); nd; nd = rb_next(nd)) {
nd                508 tools/perf/ui/browsers/hists.c 		struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node);
nd                519 tools/perf/ui/browsers/hists.c 	struct rb_node *nd;
nd                523 tools/perf/ui/browsers/hists.c 	for (nd = rb_first_cached(&he->hroot_out); nd; nd = rb_next(nd)) {
nd                524 tools/perf/ui/browsers/hists.c 		child = rb_entry(nd, struct hist_entry, rb_node);
nd                578 tools/perf/ui/browsers/hists.c 	struct rb_node *nd;
nd                581 tools/perf/ui/browsers/hists.c 	nd = rb_first_cached(&browser->hists->entries);
nd                582 tools/perf/ui/browsers/hists.c 	while (nd) {
nd                583 tools/perf/ui/browsers/hists.c 		he = rb_entry(nd, struct hist_entry, rb_node);
nd                586 tools/perf/ui/browsers/hists.c 		nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD);
nd               1766 tools/perf/ui/browsers/hists.c 	struct rb_node *nd;
nd               1776 tools/perf/ui/browsers/hists.c 	for (nd = browser->top; nd; nd = rb_hierarchy_next(nd)) {
nd               1777 tools/perf/ui/browsers/hists.c 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
nd               1811 tools/perf/ui/browsers/hists.c static struct rb_node *hists__filter_entries(struct rb_node *nd,
nd               1814 tools/perf/ui/browsers/hists.c 	while (nd != NULL) {
nd               1815 tools/perf/ui/browsers/hists.c 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
nd               1819 tools/perf/ui/browsers/hists.c 			return nd;
nd               1825 tools/perf/ui/browsers/hists.c 		if (rb_next(nd))
nd               1826 tools/perf/ui/browsers/hists.c 			nd = rb_next(nd);
nd               1828 tools/perf/ui/browsers/hists.c 			nd = rb_hierarchy_next(nd);
nd               1834 tools/perf/ui/browsers/hists.c static struct rb_node *hists__filter_prev_entries(struct rb_node *nd,
nd               1837 tools/perf/ui/browsers/hists.c 	while (nd != NULL) {
nd               1838 tools/perf/ui/browsers/hists.c 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
nd               1842 tools/perf/ui/browsers/hists.c 			return nd;
nd               1844 tools/perf/ui/browsers/hists.c 		nd = rb_hierarchy_prev(nd);
nd               1854 tools/perf/ui/browsers/hists.c 	struct rb_node *nd;
nd               1867 tools/perf/ui/browsers/hists.c 		nd = hists__filter_entries(rb_first(browser->entries),
nd               1871 tools/perf/ui/browsers/hists.c 		nd = browser->top;
nd               1874 tools/perf/ui/browsers/hists.c 		nd = rb_hierarchy_last(rb_last(browser->entries));
nd               1875 tools/perf/ui/browsers/hists.c 		nd = hists__filter_prev_entries(nd, hb->min_pcnt);
nd               1903 tools/perf/ui/browsers/hists.c 	if (!nd)
nd               1908 tools/perf/ui/browsers/hists.c 			h = rb_entry(nd, struct hist_entry, rb_node);
nd               1917 tools/perf/ui/browsers/hists.c 					browser->top = nd;
nd               1921 tools/perf/ui/browsers/hists.c 			nd = hists__filter_entries(rb_hierarchy_next(nd),
nd               1923 tools/perf/ui/browsers/hists.c 			if (nd == NULL)
nd               1926 tools/perf/ui/browsers/hists.c 			browser->top = nd;
nd               1930 tools/perf/ui/browsers/hists.c 			h = rb_entry(nd, struct hist_entry, rb_node);
nd               1939 tools/perf/ui/browsers/hists.c 						browser->top = nd;
nd               1949 tools/perf/ui/browsers/hists.c 						browser->top = nd;
nd               1955 tools/perf/ui/browsers/hists.c 			nd = hists__filter_prev_entries(rb_hierarchy_prev(nd),
nd               1957 tools/perf/ui/browsers/hists.c 			if (nd == NULL)
nd               1960 tools/perf/ui/browsers/hists.c 			browser->top = nd;
nd               1967 tools/perf/ui/browsers/hists.c 				h = rb_entry(nd, struct hist_entry, rb_node);
nd               1975 tools/perf/ui/browsers/hists.c 		browser->top = nd;
nd               1976 tools/perf/ui/browsers/hists.c 		h = rb_entry(nd, struct hist_entry, rb_node);
nd               2097 tools/perf/ui/browsers/hists.c 	struct rb_node *nd = hists__filter_entries(rb_first(browser->b.entries),
nd               2101 tools/perf/ui/browsers/hists.c 	while (nd) {
nd               2102 tools/perf/ui/browsers/hists.c 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
nd               2112 tools/perf/ui/browsers/hists.c 		nd = hists__filter_entries(rb_hierarchy_next(nd),
nd               2765 tools/perf/ui/browsers/hists.c 	struct rb_node *nd = rb_first_cached(&hb->hists->entries);
nd               2772 tools/perf/ui/browsers/hists.c 	while ((nd = hists__filter_entries(nd, hb->min_pcnt)) != NULL) {
nd               2774 tools/perf/ui/browsers/hists.c 		nd = rb_hierarchy_next(nd);
nd               2785 tools/perf/ui/browsers/hists.c 	struct rb_node *nd = rb_first_cached(&hb->hists->entries);
nd               2791 tools/perf/ui/browsers/hists.c 	while ((nd = hists__filter_entries(nd, hb->min_pcnt)) != NULL) {
nd               2792 tools/perf/ui/browsers/hists.c 		he = rb_entry(nd, struct hist_entry, rb_node);
nd               2815 tools/perf/ui/browsers/hists.c 		nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD);
nd                 25 tools/perf/ui/browsers/map.c static void map_browser__write(struct ui_browser *browser, void *nd, int row)
nd                 27 tools/perf/ui/browsers/map.c 	struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
nd                116 tools/perf/ui/browsers/map.c 	struct rb_node *nd;
nd                120 tools/perf/ui/browsers/map.c 	for (nd = rb_first(mb.b.entries); nd; nd = rb_next(nd)) {
nd                121 tools/perf/ui/browsers/map.c 		struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
nd                100 tools/perf/ui/gtk/hists.c 	struct rb_node *nd;
nd                103 tools/perf/ui/gtk/hists.c 	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
nd                109 tools/perf/ui/gtk/hists.c 		node = rb_entry(nd, struct callchain_node, rb_node);
nd                163 tools/perf/ui/gtk/hists.c 	struct rb_node *nd;
nd                165 tools/perf/ui/gtk/hists.c 	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
nd                173 tools/perf/ui/gtk/hists.c 		node = rb_entry(nd, struct callchain_node, rb_node);
nd                223 tools/perf/ui/gtk/hists.c 	struct rb_node *nd;
nd                226 tools/perf/ui/gtk/hists.c 	for (nd = rb_first(root); nd; nd = rb_next(nd)) {
nd                233 tools/perf/ui/gtk/hists.c 		node = rb_entry(nd, struct callchain_node, rb_node);
nd                300 tools/perf/ui/gtk/hists.c 	struct rb_node *nd;
nd                358 tools/perf/ui/gtk/hists.c 	for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) {
nd                359 tools/perf/ui/gtk/hists.c 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
nd                798 tools/perf/ui/stdio/hist.c 	struct rb_node *nd;
nd                829 tools/perf/ui/stdio/hist.c 	for (nd = rb_first_cached(&hists->entries); nd;
nd                830 tools/perf/ui/stdio/hist.c 	     nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
nd                831 tools/perf/ui/stdio/hist.c 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
nd                366 tools/perf/util/build-id.c 	struct rb_node *nd;
nd                372 tools/perf/util/build-id.c 	for (nd = rb_first_cached(&session->machines.guests); nd;
nd                373 tools/perf/util/build-id.c 	     nd = rb_next(nd)) {
nd                374 tools/perf/util/build-id.c 		struct machine *pos = rb_entry(nd, struct machine, rb_node);
nd                399 tools/perf/util/build-id.c 	struct rb_node *nd;
nd                406 tools/perf/util/build-id.c 	for (nd = rb_first_cached(&session->machines.guests); nd;
nd                407 tools/perf/util/build-id.c 	     nd = rb_next(nd)) {
nd                408 tools/perf/util/build-id.c 		struct machine *pos = rb_entry(nd, struct machine, rb_node);
nd                442 tools/perf/util/build-id.c 	struct str_node *nd, *nd2;
nd                467 tools/perf/util/build-id.c 	strlist__for_each_entry(nd, toplist) {
nd                468 tools/perf/util/build-id.c 		if (asprintf(&linkdir, "%s/%s", topdir, nd->s) < 0)
nd                478 tools/perf/util/build-id.c 				     nd->s, nd2->s) != SBUILD_ID_SIZE - 1)
nd                519 tools/perf/util/build-id.c 	struct str_node *nd, *cand = NULL;
nd                531 tools/perf/util/build-id.c 	strlist__for_each_entry(nd, bidlist) {
nd                532 tools/perf/util/build-id.c 		if (strncmp(nd->s, incomplete_sbuild_id, len) != 0)
nd                538 tools/perf/util/build-id.c 		cand = nd;
nd                849 tools/perf/util/build-id.c 	struct rb_node *nd;
nd                860 tools/perf/util/build-id.c 	for (nd = rb_first_cached(&session->machines.guests); nd;
nd                861 tools/perf/util/build-id.c 	     nd = rb_next(nd)) {
nd                862 tools/perf/util/build-id.c 		struct machine *pos = rb_entry(nd, struct machine, rb_node);
nd                875 tools/perf/util/build-id.c 	struct rb_node *nd;
nd                878 tools/perf/util/build-id.c 	for (nd = rb_first_cached(&session->machines.guests); nd;
nd                879 tools/perf/util/build-id.c 	     nd = rb_next(nd)) {
nd                880 tools/perf/util/build-id.c 		struct machine *pos = rb_entry(nd, struct machine, rb_node);
nd               1300 tools/perf/util/dso.c 	struct rb_node *nd;
nd               1308 tools/perf/util/dso.c 	for (nd = rb_first_cached(&dso->symbols); nd; nd = rb_next(nd)) {
nd               1309 tools/perf/util/dso.c 		struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
nd               2093 tools/perf/util/hist.c 	struct rb_node *nd;
nd               2100 tools/perf/util/hist.c 	for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) {
nd               2101 tools/perf/util/hist.c 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
nd               2117 tools/perf/util/hist.c 	struct rb_node *nd;
nd               2138 tools/perf/util/hist.c 	nd = rb_first_cached(&he->hroot_out);
nd               2139 tools/perf/util/hist.c 	while (nd) {
nd               2140 tools/perf/util/hist.c 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
nd               2142 tools/perf/util/hist.c 		nd = rb_next(nd);
nd               2153 tools/perf/util/hist.c 	struct rb_node *nd;
nd               2161 tools/perf/util/hist.c 	nd = rb_first_cached(&hists->entries);
nd               2162 tools/perf/util/hist.c 	while (nd) {
nd               2163 tools/perf/util/hist.c 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
nd               2176 tools/perf/util/hist.c 			nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD);
nd               2185 tools/perf/util/hist.c 			nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
nd               2195 tools/perf/util/hist.c 			nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
nd               2205 tools/perf/util/hist.c 	nd = rb_first_cached(&hists->entries);
nd               2206 tools/perf/util/hist.c 	while (nd) {
nd               2207 tools/perf/util/hist.c 		struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
nd               2209 tools/perf/util/hist.c 		nd = rb_next(nd);
nd               2428 tools/perf/util/hist.c 	struct rb_node *nd;
nd               2431 tools/perf/util/hist.c 	for (nd = rb_first_cached(leader_root); nd; nd = rb_next(nd)) {
nd               2432 tools/perf/util/hist.c 		pos  = rb_entry(nd, struct hist_entry, rb_node_in);
nd               2448 tools/perf/util/hist.c 	struct rb_node *nd;
nd               2462 tools/perf/util/hist.c 	for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
nd               2463 tools/perf/util/hist.c 		pos  = rb_entry(nd, struct hist_entry, rb_node_in);
nd               2476 tools/perf/util/hist.c 	struct rb_node *nd;
nd               2479 tools/perf/util/hist.c 	for (nd = rb_first_cached(other_root); nd; nd = rb_next(nd)) {
nd               2480 tools/perf/util/hist.c 		pos = rb_entry(nd, struct hist_entry, rb_node_in);
nd               2523 tools/perf/util/hist.c 	struct rb_node *nd;
nd               2538 tools/perf/util/hist.c 	for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
nd               2539 tools/perf/util/hist.c 		pos = rb_entry(nd, struct hist_entry, rb_node_in);
nd               2555 tools/perf/util/hist.c 	struct rb_node *nd;
nd               2563 tools/perf/util/hist.c 	for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
nd               2564 tools/perf/util/hist.c 		pos = rb_entry(nd, struct hist_entry, rb_node_in);
nd                190 tools/perf/util/machine.c 	struct rb_node *nd;
nd                196 tools/perf/util/machine.c 		nd = rb_first_cached(&threads->entries);
nd                197 tools/perf/util/machine.c 		while (nd) {
nd                198 tools/perf/util/machine.c 			struct thread *t = rb_entry(nd, struct thread, rb_node);
nd                200 tools/perf/util/machine.c 			nd = rb_next(nd);
nd                295 tools/perf/util/machine.c 	struct rb_node *nd;
nd                299 tools/perf/util/machine.c 	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
nd                300 tools/perf/util/machine.c 		struct machine *machine = rb_entry(nd, struct machine, rb_node);
nd                369 tools/perf/util/machine.c 	struct rb_node *nd;
nd                371 tools/perf/util/machine.c 	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
nd                372 tools/perf/util/machine.c 		struct machine *pos = rb_entry(nd, struct machine, rb_node);
nd                805 tools/perf/util/machine.c 	struct rb_node *nd;
nd                808 tools/perf/util/machine.c 	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
nd                809 tools/perf/util/machine.c 		struct machine *pos = rb_entry(nd, struct machine, rb_node);
nd                825 tools/perf/util/machine.c 	struct rb_node *nd;
nd                828 tools/perf/util/machine.c 	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
nd                829 tools/perf/util/machine.c 		struct machine *pos = rb_entry(nd, struct machine, rb_node);
nd                857 tools/perf/util/machine.c 	struct rb_node *nd;
nd                868 tools/perf/util/machine.c 		for (nd = rb_first_cached(&threads->entries); nd;
nd                869 tools/perf/util/machine.c 		     nd = rb_next(nd)) {
nd                870 tools/perf/util/machine.c 			struct thread *pos = rb_entry(nd, struct thread, rb_node);
nd               2543 tools/perf/util/machine.c 	struct rb_node *nd;
nd               2550 tools/perf/util/machine.c 		for (nd = rb_first_cached(&threads->entries); nd;
nd               2551 tools/perf/util/machine.c 		     nd = rb_next(nd)) {
nd               2552 tools/perf/util/machine.c 			thread = rb_entry(nd, struct thread, rb_node);
nd               2571 tools/perf/util/machine.c 	struct rb_node *nd;
nd               2578 tools/perf/util/machine.c 	for (nd = rb_first_cached(&machines->guests); nd; nd = rb_next(nd)) {
nd               2579 tools/perf/util/machine.c 		struct machine *machine = rb_entry(nd, struct machine, rb_node);
nd                310 tools/perf/util/map.c 	struct rb_node *nd = rb_first_cached(symbols);
nd                311 tools/perf/util/map.c 	if (nd != NULL) {
nd                312 tools/perf/util/map.c 		struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
nd                320 tools/perf/util/map.c 	struct rb_node *nd = rb_last(&symbols->rb_root);
nd                321 tools/perf/util/map.c 	if (nd != NULL) {
nd                322 tools/perf/util/map.c 		struct symbol *sym = rb_entry(nd, struct symbol, rb_node);
nd                690 tools/perf/util/map.c 	struct rb_node *nd;
nd                694 tools/perf/util/map.c 	for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
nd                695 tools/perf/util/map.c 		struct map *pos = rb_entry(nd, struct map, rb_node);
nd                742 tools/perf/util/map.c 	struct rb_node *nd;
nd                746 tools/perf/util/map.c 	for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
nd                747 tools/perf/util/map.c 		struct map *pos = rb_entry(nd, struct map, rb_node);
nd                 30 tools/perf/util/metricgroup.c 	struct rb_node *nd;
nd                 38 tools/perf/util/metricgroup.c 	nd = rblist__find(metric_events, &me);
nd                 39 tools/perf/util/metricgroup.c 	if (nd)
nd                 40 tools/perf/util/metricgroup.c 		return container_of(nd, struct metric_event, nd);
nd                 43 tools/perf/util/metricgroup.c 		nd = rblist__find(metric_events, &me);
nd                 44 tools/perf/util/metricgroup.c 		if (nd)
nd                 45 tools/perf/util/metricgroup.c 			return container_of(nd, struct metric_event, nd);
nd                 54 tools/perf/util/metricgroup.c 					      nd);
nd                 74 tools/perf/util/metricgroup.c 	return &me->nd;
nd                 85 tools/perf/util/metricgroup.c 	struct list_head nd;
nd                164 tools/perf/util/metricgroup.c 	list_for_each_entry (eg, groups, nd) {
nd                195 tools/perf/util/metricgroup.c 		list_add(&expr->nd, &me->head);
nd                222 tools/perf/util/metricgroup.c 	struct rb_node nd;
nd                229 tools/perf/util/metricgroup.c 	struct mep *a = container_of(rb_node, struct mep, nd);
nd                249 tools/perf/util/metricgroup.c 	return &me->nd;
nd                259 tools/perf/util/metricgroup.c 	struct rb_node *nd;
nd                263 tools/perf/util/metricgroup.c 	nd = rblist__find(groups, &me);
nd                264 tools/perf/util/metricgroup.c 	if (nd)
nd                265 tools/perf/util/metricgroup.c 		return container_of(nd, struct mep, nd);
nd                267 tools/perf/util/metricgroup.c 	nd = rblist__find(groups, &me);
nd                268 tools/perf/util/metricgroup.c 	if (nd)
nd                269 tools/perf/util/metricgroup.c 		return container_of(nd, struct mep, nd);
nd                274 tools/perf/util/metricgroup.c 		       struct rb_node *nd)
nd                276 tools/perf/util/metricgroup.c 	struct mep *me = container_of(nd, struct mep, nd);
nd                388 tools/perf/util/metricgroup.c 		struct mep *me = container_of(node, struct mep, nd);
nd                466 tools/perf/util/metricgroup.c 			list_add_tail(&eg->nd, group_list);
nd                504 tools/perf/util/metricgroup.c 	list_for_each_entry_safe (eg, egtmp, group_list, nd) {
nd                508 tools/perf/util/metricgroup.c 		list_del_init(&eg->nd);
nd                 14 tools/perf/util/metricgroup.h 	struct rb_node nd;
nd                 20 tools/perf/util/metricgroup.h 	struct list_head nd;
nd               2373 tools/perf/util/parse-events.c 	struct str_node *nd, *nd2;
nd               2388 tools/perf/util/parse-events.c 	strlist__for_each_entry(nd, bidlist) {
nd               2389 tools/perf/util/parse-events.c 		pcache = probe_cache__new(nd->s, NULL);
nd               2402 tools/perf/util/parse-events.c 					ent->pev.event, nd->s);
nd               2410 tools/perf/util/parse-events.c 	strlist__for_each_entry(nd, sdtlist) {
nd               2411 tools/perf/util/parse-events.c 		buf = strchr(nd->s, '@');
nd               2415 tools/perf/util/parse-events.c 			printf("%s ", nd->s);
nd               2418 tools/perf/util/parse-events.c 		nd2 = strlist__next(nd);
nd               2423 tools/perf/util/parse-events.c 			if (strcmp(nd->s, nd2->s) == 0)
nd               2428 tools/perf/util/parse-events.c 			ret = asprintf(&buf, "%s@%s(%.12s)", nd->s, path, buf);
nd               2435 tools/perf/util/parse-events.c 			printf("  %-50s [%s]\n", nd->s, "SDT event");
nd               2437 tools/perf/util/parse-events.c 			if (strcmp(nd->s, nd2->s) != 0)
nd               3220 tools/perf/util/probe-event.c 	struct str_node *nd;
nd               3234 tools/perf/util/probe-event.c 	strlist__for_each_entry(nd, bidlist) {
nd               3235 tools/perf/util/probe-event.c 		pathname = build_id_cache__origname(nd->s);
nd               3503 tools/perf/util/probe-event.c         struct rb_node *nd;
nd               3536 tools/perf/util/probe-event.c 	for (nd = rb_first_cached(&map->dso->symbol_names); nd;
nd               3537 tools/perf/util/probe-event.c 	     nd = rb_next(nd)) {
nd               3538 tools/perf/util/probe-event.c 		struct symbol_name_rb_node *pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);
nd                980 tools/perf/util/probe-file.c 	struct str_node *nd;
nd                991 tools/perf/util/probe-file.c 	strlist__for_each_entry(nd, bidlist) {
nd                992 tools/perf/util/probe-file.c 		pcache = probe_cache__new(nd->s, NULL);
nd                996 tools/perf/util/probe-file.c 			buf = build_id_cache__origname(nd->s);
nd                997 tools/perf/util/probe-file.c 			printf("%s (%s):\n", buf, nd->s);
nd                 60 tools/perf/util/rb_resort.h static void __name##_sorted__init_entry(struct rb_node *nd,			\
nd                 73 tools/perf/util/rb_resort.h        struct __name##_sorted_entry nd[0];					\
nd                 94 tools/perf/util/rb_resort.h 	struct rb_node *nd;							\
nd                 96 tools/perf/util/rb_resort.h 	for (nd = rb_first(entries); nd; nd = rb_next(nd)) {			\
nd                 97 tools/perf/util/rb_resort.h 		struct __name##_sorted_entry *snd = &sorted->nd[i++];		\
nd                 98 tools/perf/util/rb_resort.h 		__name##_sorted__init_entry(nd, snd);				\
nd                107 tools/perf/util/rb_resort.h 	sorted = malloc(sizeof(*sorted) + sizeof(sorted->nd[0]) * nr_entries);	\
nd                120 tools/perf/util/rb_resort.h static void __name##_sorted__init_entry(struct rb_node *nd,			\
nd                 26 tools/perf/util/srccode.c 	struct list_head nd;
nd                 86 tools/perf/util/srccode.c 	list_del_init(&sf->nd);
nd                107 tools/perf/util/srccode.c 			list_del(&h->nd);
nd                108 tools/perf/util/srccode.c 			list_add(&h->nd, &srcfile_list);
nd                117 tools/perf/util/srccode.c 		h = list_entry(srcfile_list.prev, struct srcfile, nd);
nd                148 tools/perf/util/srccode.c 	list_add(&h->nd, &srcfile_list);
nd                 80 tools/perf/util/stat-shadow.c 	struct saved_value *nd = malloc(sizeof(struct saved_value));
nd                 82 tools/perf/util/stat-shadow.c 	if (!nd)
nd                 84 tools/perf/util/stat-shadow.c 	memcpy(nd, entry, sizeof(struct saved_value));
nd                 85 tools/perf/util/stat-shadow.c 	return &nd->rb_node;
nd                106 tools/perf/util/stat-shadow.c 	struct rb_node *nd;
nd                117 tools/perf/util/stat-shadow.c 	nd = rblist__find(rblist, &dm);
nd                118 tools/perf/util/stat-shadow.c 	if (nd)
nd                119 tools/perf/util/stat-shadow.c 		return container_of(nd, struct saved_value, rb_node);
nd                122 tools/perf/util/stat-shadow.c 		nd = rblist__find(rblist, &dm);
nd                123 tools/perf/util/stat-shadow.c 		if (nd)
nd                124 tools/perf/util/stat-shadow.c 			return container_of(nd, struct saved_value, rb_node);
nd               1048 tools/perf/util/stat-shadow.c 		list_for_each_entry (mexp, &me->head, nd) {
nd                187 tools/perf/util/symbol.c 	struct rb_node *nd;
nd                193 tools/perf/util/symbol.c 	nd = rb_first_cached(symbols);
nd                195 tools/perf/util/symbol.c 	while (nd) {
nd                196 tools/perf/util/symbol.c 		curr = rb_entry(nd, struct symbol, rb_node);
nd                198 tools/perf/util/symbol.c 		nd = rb_next(&curr->rb_node);
nd                199 tools/perf/util/symbol.c 		next = rb_entry(nd, struct symbol, rb_node);
nd                201 tools/perf/util/symbol.c 		if (!nd)
nd                212 tools/perf/util/symbol.c 			nd = rb_next(&curr->rb_node);
nd                221 tools/perf/util/symbol.c 	struct rb_node *nd, *prevnd = rb_first_cached(symbols);
nd                229 tools/perf/util/symbol.c 	for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
nd                231 tools/perf/util/symbol.c 		curr = rb_entry(nd, struct symbol, rb_node);
nd                435 tools/perf/util/symbol.c 	struct rb_node *nd;
nd                437 tools/perf/util/symbol.c 	for (nd = rb_first_cached(source); nd; nd = rb_next(nd)) {
nd                438 tools/perf/util/symbol.c 		struct symbol *pos = rb_entry(nd, struct symbol, rb_node);
nd               1892 tools/perf/util/symbol.c 	struct str_node *nd;
nd               1898 tools/perf/util/symbol.c 	strlist__for_each_entry(nd, dirs) {
nd               1900 tools/perf/util/symbol.c 			  "%s/%s/kallsyms", dir, nd->s);
nd                 69 tools/perf/util/symbol.h #define symbols__for_each_entry(symbols, pos, nd)			\
nd                 70 tools/perf/util/symbol.h 	for (nd = rb_first_cached(symbols);					\
nd                 71 tools/perf/util/symbol.h 	     nd && (pos = rb_entry(nd, struct symbol, rb_node));	\
nd                 72 tools/perf/util/symbol.h 	     nd = rb_next(nd))
nd                 66 tools/perf/util/symbol_fprintf.c 	struct rb_node *nd;
nd                 69 tools/perf/util/symbol_fprintf.c 	for (nd = rb_first_cached(&dso->symbol_names); nd; nd = rb_next(nd)) {
nd                 70 tools/perf/util/symbol_fprintf.c 		pos = rb_entry(nd, struct symbol_name_rb_node, rb_node);