Lines Matching refs:pgpath

32 struct pgpath {  struct
43 #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path) argument
82 struct pgpath *current_pgpath;
112 struct pgpath *pgpath; member
116 typedef int (*action_fn) (struct pgpath *pgpath);
123 static int __pgpath_busy(struct pgpath *pgpath);
130 static struct pgpath *alloc_pgpath(void) in alloc_pgpath()
132 struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL); in alloc_pgpath() local
134 if (pgpath) { in alloc_pgpath()
135 pgpath->is_active = 1; in alloc_pgpath()
136 INIT_DELAYED_WORK(&pgpath->activate_path, activate_path); in alloc_pgpath()
139 return pgpath; in alloc_pgpath()
142 static void free_pgpath(struct pgpath *pgpath) in free_pgpath() argument
144 kfree(pgpath); in free_pgpath()
161 struct pgpath *pgpath, *tmp; in free_pgpaths() local
163 list_for_each_entry_safe(pgpath, tmp, pgpaths, list) { in free_pgpaths()
164 list_del(&pgpath->list); in free_pgpaths()
165 dm_put_device(ti, pgpath->path.dev); in free_pgpaths()
166 free_pgpath(pgpath); in free_pgpaths()
253 struct pgpath *pgpath; in __pg_init_all_paths() local
269 list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) { in __pg_init_all_paths()
271 if (!pgpath->is_active) in __pg_init_all_paths()
273 if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path, in __pg_init_all_paths()
280 static void __switch_pg(struct multipath *m, struct pgpath *pgpath) in __switch_pg() argument
282 m->current_pg = pgpath->pg; in __switch_pg()
386 struct pgpath *pgpath; in __multipath_map() local
397 pgpath = m->current_pgpath; in __multipath_map()
399 if (!pgpath) { in __multipath_map()
413 mpio->pgpath = pgpath; in __multipath_map()
416 bdev = pgpath->path.dev->bdev; in __multipath_map()
439 if (pgpath->pg->ps.type->start_io) in __multipath_map()
440 pgpath->pg->ps.type->start_io(&pgpath->pg->ps, in __multipath_map()
441 &pgpath->path, in __multipath_map()
550 static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps, in parse_path()
554 struct pgpath *p; in parse_path()
681 struct pgpath *pgpath; in parse_priority_group() local
693 pgpath = parse_path(&path_args, &pg->ps, ti); in parse_priority_group()
694 if (IS_ERR(pgpath)) { in parse_priority_group()
695 r = PTR_ERR(pgpath); in parse_priority_group()
699 pgpath->pg = pg; in parse_priority_group()
700 list_add_tail(&pgpath->list, &pg->pgpaths); in parse_priority_group()
944 static int fail_path(struct pgpath *pgpath) in fail_path() argument
947 struct multipath *m = pgpath->pg->m; in fail_path()
951 if (!pgpath->is_active) in fail_path()
954 DMWARN("Failing path %s.", pgpath->path.dev->name); in fail_path()
956 pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path); in fail_path()
957 pgpath->is_active = 0; in fail_path()
958 pgpath->fail_count++; in fail_path()
962 if (pgpath == m->current_pgpath) in fail_path()
966 pgpath->path.dev->name, m->nr_valid_paths); in fail_path()
979 static int reinstate_path(struct pgpath *pgpath) in reinstate_path() argument
983 struct multipath *m = pgpath->pg->m; in reinstate_path()
987 if (pgpath->is_active) in reinstate_path()
990 if (!pgpath->pg->ps.type->reinstate_path) { in reinstate_path()
992 pgpath->pg->ps.type->name); in reinstate_path()
997 r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path); in reinstate_path()
1001 pgpath->is_active = 1; in reinstate_path()
1006 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { in reinstate_path()
1007 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work)) in reinstate_path()
1012 pgpath->path.dev->name, m->nr_valid_paths); in reinstate_path()
1031 struct pgpath *pgpath; in action_dev() local
1035 list_for_each_entry(pgpath, &pg->pgpaths, list) { in action_dev()
1036 if (pgpath->path.dev == dev) in action_dev()
1037 r = action(pgpath); in action_dev()
1123 static int pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath) in pg_init_limit_reached() argument
1142 struct pgpath *pgpath = data; in pg_init_done() local
1143 struct priority_group *pg = pgpath->pg; in pg_init_done()
1162 fail_path(pgpath); in pg_init_done()
1176 if (pg_init_limit_reached(m, pgpath)) in pg_init_done()
1177 fail_path(pgpath); in pg_init_done()
1186 fail_path(pgpath); in pg_init_done()
1191 if (pgpath == m->current_pgpath) { in pg_init_done()
1221 struct pgpath *pgpath = in activate_path() local
1222 container_of(work, struct pgpath, activate_path.work); in activate_path()
1224 if (pgpath->is_active) in activate_path()
1225 scsi_dh_activate(bdev_get_queue(pgpath->path.dev->bdev), in activate_path()
1226 pg_init_done, pgpath); in activate_path()
1228 pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED); in activate_path()
1272 if (mpio->pgpath) in do_end_io()
1273 fail_path(mpio->pgpath); in do_end_io()
1295 struct pgpath *pgpath; in multipath_end_io() local
1302 pgpath = mpio->pgpath; in multipath_end_io()
1303 if (pgpath) { in multipath_end_io()
1304 ps = &pgpath->pg->ps; in multipath_end_io()
1306 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes); in multipath_end_io()
1371 struct pgpath *p; in multipath_status()
1592 struct pgpath *p; in multipath_iterate_devices()
1607 static int __pgpath_busy(struct pgpath *pgpath) in __pgpath_busy() argument
1609 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); in __pgpath_busy()
1627 struct pgpath *pgpath; in multipath_busy() local
1658 list_for_each_entry(pgpath, &pg->pgpaths, list) in multipath_busy()
1659 if (pgpath->is_active) { in multipath_busy()
1662 if (!__pgpath_busy(pgpath)) { in multipath_busy()