Lines Matching refs:pd

74 #define pkt_err(pd, fmt, ...)						\  argument
75 pr_err("%s: " fmt, pd->name, ##__VA_ARGS__)
76 #define pkt_notice(pd, fmt, ...) \ argument
77 pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__)
78 #define pkt_info(pd, fmt, ...) \ argument
79 pr_info("%s: " fmt, pd->name, ##__VA_ARGS__)
81 #define pkt_dbg(level, pd, fmt, ...) \ argument
85 pd->name, __func__, ##__VA_ARGS__); \
87 pr_notice("%s: " fmt, pd->name, ##__VA_ARGS__); \
109 static sector_t get_zone(sector_t sector, struct pktcdvd_device *pd) in get_zone() argument
111 return (sector + pd->offset) & ~(sector_t)(pd->settings.size - 1); in get_zone()
117 static struct pktcdvd_kobj* pkt_kobj_create(struct pktcdvd_device *pd, in pkt_kobj_create() argument
128 p->pd = pd; in pkt_kobj_create()
208 struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd; in kobj_pkt_show() local
212 n = sprintf(data, "%lu\n", pd->stats.pkt_started); in kobj_pkt_show()
215 n = sprintf(data, "%lu\n", pd->stats.pkt_ended); in kobj_pkt_show()
218 n = sprintf(data, "%lu\n", pd->stats.secs_w >> 1); in kobj_pkt_show()
221 n = sprintf(data, "%lu\n", pd->stats.secs_r >> 1); in kobj_pkt_show()
224 n = sprintf(data, "%lu\n", pd->stats.secs_rg >> 1); in kobj_pkt_show()
227 spin_lock(&pd->lock); in kobj_pkt_show()
228 v = pd->bio_queue_size; in kobj_pkt_show()
229 spin_unlock(&pd->lock); in kobj_pkt_show()
233 spin_lock(&pd->lock); in kobj_pkt_show()
234 v = pd->write_congestion_off; in kobj_pkt_show()
235 spin_unlock(&pd->lock); in kobj_pkt_show()
239 spin_lock(&pd->lock); in kobj_pkt_show()
240 v = pd->write_congestion_on; in kobj_pkt_show()
241 spin_unlock(&pd->lock); in kobj_pkt_show()
268 struct pktcdvd_device *pd = to_pktcdvdkobj(kobj)->pd; in kobj_pkt_store() local
272 pd->stats.pkt_started = 0; in kobj_pkt_store()
273 pd->stats.pkt_ended = 0; in kobj_pkt_store()
274 pd->stats.secs_w = 0; in kobj_pkt_store()
275 pd->stats.secs_rg = 0; in kobj_pkt_store()
276 pd->stats.secs_r = 0; in kobj_pkt_store()
280 spin_lock(&pd->lock); in kobj_pkt_store()
281 pd->write_congestion_off = val; in kobj_pkt_store()
282 init_write_congestion_marks(&pd->write_congestion_off, in kobj_pkt_store()
283 &pd->write_congestion_on); in kobj_pkt_store()
284 spin_unlock(&pd->lock); in kobj_pkt_store()
288 spin_lock(&pd->lock); in kobj_pkt_store()
289 pd->write_congestion_on = val; in kobj_pkt_store()
290 init_write_congestion_marks(&pd->write_congestion_off, in kobj_pkt_store()
291 &pd->write_congestion_on); in kobj_pkt_store()
292 spin_unlock(&pd->lock); in kobj_pkt_store()
312 static void pkt_sysfs_dev_new(struct pktcdvd_device *pd) in pkt_sysfs_dev_new() argument
315 pd->dev = device_create(class_pktcdvd, NULL, MKDEV(0, 0), NULL, in pkt_sysfs_dev_new()
316 "%s", pd->name); in pkt_sysfs_dev_new()
317 if (IS_ERR(pd->dev)) in pkt_sysfs_dev_new()
318 pd->dev = NULL; in pkt_sysfs_dev_new()
320 if (pd->dev) { in pkt_sysfs_dev_new()
321 pd->kobj_stat = pkt_kobj_create(pd, "stat", in pkt_sysfs_dev_new()
322 &pd->dev->kobj, in pkt_sysfs_dev_new()
324 pd->kobj_wqueue = pkt_kobj_create(pd, "write_queue", in pkt_sysfs_dev_new()
325 &pd->dev->kobj, in pkt_sysfs_dev_new()
330 static void pkt_sysfs_dev_remove(struct pktcdvd_device *pd) in pkt_sysfs_dev_remove() argument
332 pkt_kobj_remove(pd->kobj_stat); in pkt_sysfs_dev_remove()
333 pkt_kobj_remove(pd->kobj_wqueue); in pkt_sysfs_dev_remove()
335 device_unregister(pd->dev); in pkt_sysfs_dev_remove()
358 struct pktcdvd_device *pd = pkt_devs[idx]; in class_pktcdvd_show_map() local
359 if (!pd) in class_pktcdvd_show_map()
362 pd->name, in class_pktcdvd_show_map()
363 MAJOR(pd->pkt_dev), MINOR(pd->pkt_dev), in class_pktcdvd_show_map()
364 MAJOR(pd->bdev->bd_dev), in class_pktcdvd_show_map()
365 MINOR(pd->bdev->bd_dev)); in class_pktcdvd_show_map()
472 static void pkt_debugfs_dev_new(struct pktcdvd_device *pd) in pkt_debugfs_dev_new() argument
476 pd->dfs_d_root = debugfs_create_dir(pd->name, pkt_debugfs_root); in pkt_debugfs_dev_new()
477 if (!pd->dfs_d_root) in pkt_debugfs_dev_new()
480 pd->dfs_f_info = debugfs_create_file("info", S_IRUGO, in pkt_debugfs_dev_new()
481 pd->dfs_d_root, pd, &debug_fops); in pkt_debugfs_dev_new()
484 static void pkt_debugfs_dev_remove(struct pktcdvd_device *pd) in pkt_debugfs_dev_remove() argument
488 debugfs_remove(pd->dfs_f_info); in pkt_debugfs_dev_remove()
489 debugfs_remove(pd->dfs_d_root); in pkt_debugfs_dev_remove()
490 pd->dfs_f_info = NULL; in pkt_debugfs_dev_remove()
491 pd->dfs_d_root = NULL; in pkt_debugfs_dev_remove()
508 static void pkt_bio_finished(struct pktcdvd_device *pd) in pkt_bio_finished() argument
510 BUG_ON(atomic_read(&pd->cdrw.pending_bios) <= 0); in pkt_bio_finished()
511 if (atomic_dec_and_test(&pd->cdrw.pending_bios)) { in pkt_bio_finished()
512 pkt_dbg(2, pd, "queue empty\n"); in pkt_bio_finished()
513 atomic_set(&pd->iosched.attention, 1); in pkt_bio_finished()
514 wake_up(&pd->wqueue); in pkt_bio_finished()
590 static void pkt_shrink_pktlist(struct pktcdvd_device *pd) in pkt_shrink_pktlist() argument
594 BUG_ON(!list_empty(&pd->cdrw.pkt_active_list)); in pkt_shrink_pktlist()
596 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_free_list, list) { in pkt_shrink_pktlist()
599 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list); in pkt_shrink_pktlist()
602 static int pkt_grow_pktlist(struct pktcdvd_device *pd, int nr_packets) in pkt_grow_pktlist() argument
606 BUG_ON(!list_empty(&pd->cdrw.pkt_free_list)); in pkt_grow_pktlist()
609 pkt = pkt_alloc_packet_data(pd->settings.size >> 2); in pkt_grow_pktlist()
611 pkt_shrink_pktlist(pd); in pkt_grow_pktlist()
615 pkt->pd = pd; in pkt_grow_pktlist()
616 list_add(&pkt->list, &pd->cdrw.pkt_free_list); in pkt_grow_pktlist()
630 static void pkt_rbtree_erase(struct pktcdvd_device *pd, struct pkt_rb_node *node) in pkt_rbtree_erase() argument
632 rb_erase(&node->rb_node, &pd->bio_queue); in pkt_rbtree_erase()
633 mempool_free(node, pd->rb_pool); in pkt_rbtree_erase()
634 pd->bio_queue_size--; in pkt_rbtree_erase()
635 BUG_ON(pd->bio_queue_size < 0); in pkt_rbtree_erase()
641 static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s) in pkt_rbtree_find() argument
643 struct rb_node *n = pd->bio_queue.rb_node; in pkt_rbtree_find()
648 BUG_ON(pd->bio_queue_size > 0); in pkt_rbtree_find()
675 static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *node) in pkt_rbtree_insert() argument
677 struct rb_node **p = &pd->bio_queue.rb_node; in pkt_rbtree_insert()
691 rb_insert_color(&node->rb_node, &pd->bio_queue); in pkt_rbtree_insert()
692 pd->bio_queue_size++; in pkt_rbtree_insert()
699 static int pkt_generic_packet(struct pktcdvd_device *pd, struct packet_command *cgc) in pkt_generic_packet() argument
701 struct request_queue *q = bdev_get_queue(pd->bdev); in pkt_generic_packet()
725 blk_execute_rq(rq->q, pd->bdev->bd_disk, rq, 0); in pkt_generic_packet()
748 static void pkt_dump_sense(struct pktcdvd_device *pd, in pkt_dump_sense() argument
754 pkt_err(pd, "%*ph - sense %02x.%02x.%02x (%s)\n", in pkt_dump_sense()
759 pkt_err(pd, "%*ph - no sense\n", CDROM_PACKET_SIZE, cgc->cmd); in pkt_dump_sense()
765 static int pkt_flush_cache(struct pktcdvd_device *pd) in pkt_flush_cache() argument
780 return pkt_generic_packet(pd, &cgc); in pkt_flush_cache()
786 static noinline_for_stack int pkt_set_speed(struct pktcdvd_device *pd, in pkt_set_speed() argument
801 if ((ret = pkt_generic_packet(pd, &cgc))) in pkt_set_speed()
802 pkt_dump_sense(pd, &cgc); in pkt_set_speed()
811 static void pkt_queue_bio(struct pktcdvd_device *pd, struct bio *bio) in pkt_queue_bio() argument
813 spin_lock(&pd->iosched.lock); in pkt_queue_bio()
815 bio_list_add(&pd->iosched.read_queue, bio); in pkt_queue_bio()
817 bio_list_add(&pd->iosched.write_queue, bio); in pkt_queue_bio()
818 spin_unlock(&pd->iosched.lock); in pkt_queue_bio()
820 atomic_set(&pd->iosched.attention, 1); in pkt_queue_bio()
821 wake_up(&pd->wqueue); in pkt_queue_bio()
840 static void pkt_iosched_process_queue(struct pktcdvd_device *pd) in pkt_iosched_process_queue() argument
843 if (atomic_read(&pd->iosched.attention) == 0) in pkt_iosched_process_queue()
845 atomic_set(&pd->iosched.attention, 0); in pkt_iosched_process_queue()
851 spin_lock(&pd->iosched.lock); in pkt_iosched_process_queue()
852 reads_queued = !bio_list_empty(&pd->iosched.read_queue); in pkt_iosched_process_queue()
853 writes_queued = !bio_list_empty(&pd->iosched.write_queue); in pkt_iosched_process_queue()
854 spin_unlock(&pd->iosched.lock); in pkt_iosched_process_queue()
859 if (pd->iosched.writing) { in pkt_iosched_process_queue()
861 spin_lock(&pd->iosched.lock); in pkt_iosched_process_queue()
862 bio = bio_list_peek(&pd->iosched.write_queue); in pkt_iosched_process_queue()
863 spin_unlock(&pd->iosched.lock); in pkt_iosched_process_queue()
865 pd->iosched.last_write)) in pkt_iosched_process_queue()
868 if (atomic_read(&pd->cdrw.pending_bios) > 0) { in pkt_iosched_process_queue()
869 pkt_dbg(2, pd, "write, waiting\n"); in pkt_iosched_process_queue()
872 pkt_flush_cache(pd); in pkt_iosched_process_queue()
873 pd->iosched.writing = 0; in pkt_iosched_process_queue()
877 if (atomic_read(&pd->cdrw.pending_bios) > 0) { in pkt_iosched_process_queue()
878 pkt_dbg(2, pd, "read, waiting\n"); in pkt_iosched_process_queue()
881 pd->iosched.writing = 1; in pkt_iosched_process_queue()
885 spin_lock(&pd->iosched.lock); in pkt_iosched_process_queue()
886 if (pd->iosched.writing) in pkt_iosched_process_queue()
887 bio = bio_list_pop(&pd->iosched.write_queue); in pkt_iosched_process_queue()
889 bio = bio_list_pop(&pd->iosched.read_queue); in pkt_iosched_process_queue()
890 spin_unlock(&pd->iosched.lock); in pkt_iosched_process_queue()
896 pd->iosched.successive_reads += in pkt_iosched_process_queue()
899 pd->iosched.successive_reads = 0; in pkt_iosched_process_queue()
900 pd->iosched.last_write = bio_end_sector(bio); in pkt_iosched_process_queue()
902 if (pd->iosched.successive_reads >= HI_SPEED_SWITCH) { in pkt_iosched_process_queue()
903 if (pd->read_speed == pd->write_speed) { in pkt_iosched_process_queue()
904 pd->read_speed = MAX_SPEED; in pkt_iosched_process_queue()
905 pkt_set_speed(pd, pd->write_speed, pd->read_speed); in pkt_iosched_process_queue()
908 if (pd->read_speed != pd->write_speed) { in pkt_iosched_process_queue()
909 pd->read_speed = pd->write_speed; in pkt_iosched_process_queue()
910 pkt_set_speed(pd, pd->write_speed, pd->read_speed); in pkt_iosched_process_queue()
914 atomic_inc(&pd->cdrw.pending_bios); in pkt_iosched_process_queue()
923 static int pkt_set_segment_merging(struct pktcdvd_device *pd, struct request_queue *q) in pkt_set_segment_merging() argument
925 if ((pd->settings.size << 9) / CD_FRAMESIZE in pkt_set_segment_merging()
930 clear_bit(PACKET_MERGE_SEGS, &pd->flags); in pkt_set_segment_merging()
932 } else if ((pd->settings.size << 9) / PAGE_SIZE in pkt_set_segment_merging()
938 set_bit(PACKET_MERGE_SEGS, &pd->flags); in pkt_set_segment_merging()
941 pkt_err(pd, "cdrom max_phys_segments too small\n"); in pkt_set_segment_merging()
982 struct pktcdvd_device *pd = pkt->pd; in pkt_end_io_read() local
983 BUG_ON(!pd); in pkt_end_io_read()
985 pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n", in pkt_end_io_read()
993 wake_up(&pd->wqueue); in pkt_end_io_read()
995 pkt_bio_finished(pd); in pkt_end_io_read()
1001 struct pktcdvd_device *pd = pkt->pd; in pkt_end_io_packet_write() local
1002 BUG_ON(!pd); in pkt_end_io_packet_write()
1004 pkt_dbg(2, pd, "id=%d, err=%d\n", pkt->id, err); in pkt_end_io_packet_write()
1006 pd->stats.pkt_ended++; in pkt_end_io_packet_write()
1008 pkt_bio_finished(pd); in pkt_end_io_packet_write()
1011 wake_up(&pd->wqueue); in pkt_end_io_packet_write()
1017 static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt) in pkt_gather_data() argument
1038 pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9); in pkt_gather_data()
1047 pkt_dbg(2, pd, "zone %llx cached\n", in pkt_gather_data()
1064 bio->bi_bdev = pd->bdev; in pkt_gather_data()
1070 pkt_dbg(2, pd, "Adding frame %d, page:%p offs:%d\n", in pkt_gather_data()
1077 pkt_queue_bio(pd, bio); in pkt_gather_data()
1082 pkt_dbg(2, pd, "need %d frames for zone %llx\n", in pkt_gather_data()
1084 pd->stats.pkt_started++; in pkt_gather_data()
1085 pd->stats.secs_rg += frames_read * (CD_FRAMESIZE >> 9); in pkt_gather_data()
1092 static struct packet_data *pkt_get_packet_data(struct pktcdvd_device *pd, int zone) in pkt_get_packet_data() argument
1096 list_for_each_entry(pkt, &pd->cdrw.pkt_free_list, list) { in pkt_get_packet_data()
1097 if (pkt->sector == zone || pkt->list.next == &pd->cdrw.pkt_free_list) { in pkt_get_packet_data()
1108 static void pkt_put_packet_data(struct pktcdvd_device *pd, struct packet_data *pkt) in pkt_put_packet_data() argument
1111 list_add(&pkt->list, &pd->cdrw.pkt_free_list); in pkt_put_packet_data()
1113 list_add_tail(&pkt->list, &pd->cdrw.pkt_free_list); in pkt_put_packet_data()
1132 struct pktcdvd_device *pd = rq->rq_disk->private_data; in pkt_start_recovery()
1138 pkt_bdev = bdget(kdev_t_to_nr(pd->pkt_dev)); in pkt_start_recovery()
1158 pkt->bio->bi_bdev = pd->bdev; in pkt_start_recovery()
1183 pkt_dbg(2, pd, "pkt %2d : s=%6llx %s -> %s\n", in pkt_set_state()
1194 static int pkt_handle_queue(struct pktcdvd_device *pd) in pkt_handle_queue() argument
1203 atomic_set(&pd->scan_queue, 0); in pkt_handle_queue()
1205 if (list_empty(&pd->cdrw.pkt_free_list)) { in pkt_handle_queue()
1206 pkt_dbg(2, pd, "no pkt\n"); in pkt_handle_queue()
1213 spin_lock(&pd->lock); in pkt_handle_queue()
1214 first_node = pkt_rbtree_find(pd, pd->current_sector); in pkt_handle_queue()
1216 n = rb_first(&pd->bio_queue); in pkt_handle_queue()
1223 zone = get_zone(bio->bi_iter.bi_sector, pd); in pkt_handle_queue()
1224 list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) { in pkt_handle_queue()
1234 n = rb_first(&pd->bio_queue); in pkt_handle_queue()
1241 spin_unlock(&pd->lock); in pkt_handle_queue()
1243 pkt_dbg(2, pd, "no bio\n"); in pkt_handle_queue()
1247 pkt = pkt_get_packet_data(pd, zone); in pkt_handle_queue()
1249 pd->current_sector = zone + pd->settings.size; in pkt_handle_queue()
1251 BUG_ON(pkt->frames != pd->settings.size >> 2); in pkt_handle_queue()
1258 spin_lock(&pd->lock); in pkt_handle_queue()
1259 pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone); in pkt_handle_queue()
1260 while ((node = pkt_rbtree_find(pd, zone)) != NULL) { in pkt_handle_queue()
1262 pkt_dbg(2, pd, "found zone=%llx\n", (unsigned long long) in pkt_handle_queue()
1263 get_zone(bio->bi_iter.bi_sector, pd)); in pkt_handle_queue()
1264 if (get_zone(bio->bi_iter.bi_sector, pd) != zone) in pkt_handle_queue()
1266 pkt_rbtree_erase(pd, node); in pkt_handle_queue()
1274 wakeup = (pd->write_congestion_on > 0 in pkt_handle_queue()
1275 && pd->bio_queue_size <= pd->write_congestion_off); in pkt_handle_queue()
1276 spin_unlock(&pd->lock); in pkt_handle_queue()
1278 clear_bdi_congested(&pd->disk->queue->backing_dev_info, in pkt_handle_queue()
1286 spin_lock(&pd->cdrw.active_list_lock); in pkt_handle_queue()
1287 list_add(&pkt->list, &pd->cdrw.pkt_active_list); in pkt_handle_queue()
1288 spin_unlock(&pd->cdrw.active_list_lock); in pkt_handle_queue()
1297 static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt) in pkt_start_write() argument
1304 pkt->w_bio->bi_bdev = pd->bdev; in pkt_start_write()
1315 pkt_dbg(2, pd, "vcnt=%d\n", pkt->w_bio->bi_vcnt); in pkt_start_write()
1326 pkt_dbg(2, pd, "Writing %d frames for zone %llx\n", in pkt_start_write()
1329 if (test_bit(PACKET_MERGE_SEGS, &pd->flags) || (pkt->write_size < pkt->frames)) { in pkt_start_write()
1339 pkt_queue_bio(pd, pkt->w_bio); in pkt_start_write()
1354 static void pkt_run_state_machine(struct pktcdvd_device *pd, struct packet_data *pkt) in pkt_run_state_machine() argument
1358 pkt_dbg(2, pd, "pkt %d\n", pkt->id); in pkt_run_state_machine()
1367 pkt_gather_data(pd, pkt); in pkt_run_state_machine()
1378 pkt_start_write(pd, pkt); in pkt_run_state_machine()
1395 pkt_start_write(pd, pkt); in pkt_run_state_machine()
1397 pkt_dbg(2, pd, "No recovery possible\n"); in pkt_run_state_machine()
1414 static void pkt_handle_packets(struct pktcdvd_device *pd) in pkt_handle_packets() argument
1421 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { in pkt_handle_packets()
1424 pkt_run_state_machine(pd, pkt); in pkt_handle_packets()
1431 spin_lock(&pd->cdrw.active_list_lock); in pkt_handle_packets()
1432 list_for_each_entry_safe(pkt, next, &pd->cdrw.pkt_active_list, list) { in pkt_handle_packets()
1435 pkt_put_packet_data(pd, pkt); in pkt_handle_packets()
1437 atomic_set(&pd->scan_queue, 1); in pkt_handle_packets()
1440 spin_unlock(&pd->cdrw.active_list_lock); in pkt_handle_packets()
1443 static void pkt_count_states(struct pktcdvd_device *pd, int *states) in pkt_count_states() argument
1451 spin_lock(&pd->cdrw.active_list_lock); in pkt_count_states()
1452 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { in pkt_count_states()
1455 spin_unlock(&pd->cdrw.active_list_lock); in pkt_count_states()
1464 struct pktcdvd_device *pd = foobar; in kcdrwd() local
1477 add_wait_queue(&pd->wqueue, &wait); in kcdrwd()
1482 if (atomic_read(&pd->scan_queue) > 0) in kcdrwd()
1486 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { in kcdrwd()
1492 if (atomic_read(&pd->iosched.attention) != 0) in kcdrwd()
1498 pkt_count_states(pd, states); in kcdrwd()
1499 pkt_dbg(2, pd, "i:%d ow:%d rw:%d ww:%d rec:%d fin:%d\n", in kcdrwd()
1505 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { in kcdrwd()
1510 pkt_dbg(2, pd, "sleeping\n"); in kcdrwd()
1512 pkt_dbg(2, pd, "wake up\n"); in kcdrwd()
1517 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { in kcdrwd()
1532 remove_wait_queue(&pd->wqueue, &wait); in kcdrwd()
1541 while (pkt_handle_queue(pd)) in kcdrwd()
1547 pkt_handle_packets(pd); in kcdrwd()
1552 pkt_iosched_process_queue(pd); in kcdrwd()
1558 static void pkt_print_settings(struct pktcdvd_device *pd) in pkt_print_settings() argument
1560 pkt_info(pd, "%s packets, %u blocks, Mode-%c disc\n", in pkt_print_settings()
1561 pd->settings.fp ? "Fixed" : "Variable", in pkt_print_settings()
1562 pd->settings.size >> 2, in pkt_print_settings()
1563 pd->settings.block_mode == 8 ? '1' : '2'); in pkt_print_settings()
1566 static int pkt_mode_sense(struct pktcdvd_device *pd, struct packet_command *cgc, int page_code, int… in pkt_mode_sense() argument
1575 return pkt_generic_packet(pd, cgc); in pkt_mode_sense()
1578 static int pkt_mode_select(struct pktcdvd_device *pd, struct packet_command *cgc) in pkt_mode_select() argument
1587 return pkt_generic_packet(pd, cgc); in pkt_mode_select()
1590 static int pkt_get_disc_info(struct pktcdvd_device *pd, disc_information *di) in pkt_get_disc_info() argument
1601 if ((ret = pkt_generic_packet(pd, &cgc))) in pkt_get_disc_info()
1614 return pkt_generic_packet(pd, &cgc); in pkt_get_disc_info()
1617 static int pkt_get_track_info(struct pktcdvd_device *pd, __u16 track, __u8 type, track_information … in pkt_get_track_info() argument
1630 if ((ret = pkt_generic_packet(pd, &cgc))) in pkt_get_track_info()
1640 return pkt_generic_packet(pd, &cgc); in pkt_get_track_info()
1643 static noinline_for_stack int pkt_get_last_written(struct pktcdvd_device *pd, in pkt_get_last_written() argument
1651 if ((ret = pkt_get_disc_info(pd, &di))) in pkt_get_last_written()
1655 if ((ret = pkt_get_track_info(pd, last_track, 1, &ti))) in pkt_get_last_written()
1661 if ((ret = pkt_get_track_info(pd, last_track, 1, &ti))) in pkt_get_last_written()
1681 static noinline_for_stack int pkt_set_write_settings(struct pktcdvd_device *pd) in pkt_set_write_settings() argument
1690 if ((pd->mmc3_profile == 0x1a) || (pd->mmc3_profile == 0x12)) in pkt_set_write_settings()
1696 if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) { in pkt_set_write_settings()
1697 pkt_dump_sense(pd, &cgc); in pkt_set_write_settings()
1702 pd->mode_offset = (buffer[6] << 8) | (buffer[7] & 0xff); in pkt_set_write_settings()
1711 if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WRITE_PARMS_PAGE, 0))) { in pkt_set_write_settings()
1712 pkt_dump_sense(pd, &cgc); in pkt_set_write_settings()
1719 wp = (write_param_page *) &buffer[sizeof(struct mode_page_header) + pd->mode_offset]; in pkt_set_write_settings()
1721 wp->fp = pd->settings.fp; in pkt_set_write_settings()
1722 wp->track_mode = pd->settings.track_mode; in pkt_set_write_settings()
1723 wp->write_type = pd->settings.write_type; in pkt_set_write_settings()
1724 wp->data_block_type = pd->settings.block_mode; in pkt_set_write_settings()
1747 pkt_err(pd, "write mode wrong %d\n", wp->data_block_type); in pkt_set_write_settings()
1750 wp->packet_size = cpu_to_be32(pd->settings.size >> 2); in pkt_set_write_settings()
1753 if ((ret = pkt_mode_select(pd, &cgc))) { in pkt_set_write_settings()
1754 pkt_dump_sense(pd, &cgc); in pkt_set_write_settings()
1758 pkt_print_settings(pd); in pkt_set_write_settings()
1765 static int pkt_writable_track(struct pktcdvd_device *pd, track_information *ti) in pkt_writable_track() argument
1767 switch (pd->mmc3_profile) { in pkt_writable_track()
1791 pkt_err(pd, "bad state %d-%d-%d\n", ti->rt, ti->blank, ti->packet); in pkt_writable_track()
1798 static int pkt_writable_disc(struct pktcdvd_device *pd, disc_information *di) in pkt_writable_disc() argument
1800 switch (pd->mmc3_profile) { in pkt_writable_disc()
1809 pkt_dbg(2, pd, "Wrong disc profile (%x)\n", in pkt_writable_disc()
1810 pd->mmc3_profile); in pkt_writable_disc()
1819 pkt_notice(pd, "unknown disc - no track?\n"); in pkt_writable_disc()
1824 pkt_err(pd, "wrong disc type (%x)\n", di->disc_type); in pkt_writable_disc()
1829 pkt_notice(pd, "disc not erasable\n"); in pkt_writable_disc()
1834 pkt_err(pd, "can't write to last track (reserved)\n"); in pkt_writable_disc()
1841 static noinline_for_stack int pkt_probe_settings(struct pktcdvd_device *pd) in pkt_probe_settings() argument
1852 ret = pkt_generic_packet(pd, &cgc); in pkt_probe_settings()
1853 pd->mmc3_profile = ret ? 0xffff : buf[6] << 8 | buf[7]; in pkt_probe_settings()
1858 if ((ret = pkt_get_disc_info(pd, &di))) { in pkt_probe_settings()
1859 pkt_err(pd, "failed get_disc\n"); in pkt_probe_settings()
1863 if (!pkt_writable_disc(pd, &di)) in pkt_probe_settings()
1866 pd->type = di.erasable ? PACKET_CDRW : PACKET_CDR; in pkt_probe_settings()
1869 if ((ret = pkt_get_track_info(pd, track, 1, &ti))) { in pkt_probe_settings()
1870 pkt_err(pd, "failed get_track\n"); in pkt_probe_settings()
1874 if (!pkt_writable_track(pd, &ti)) { in pkt_probe_settings()
1875 pkt_err(pd, "can't write to this track\n"); in pkt_probe_settings()
1883 pd->settings.size = be32_to_cpu(ti.fixed_packet_size) << 2; in pkt_probe_settings()
1884 if (pd->settings.size == 0) { in pkt_probe_settings()
1885 pkt_notice(pd, "detected zero packet size!\n"); in pkt_probe_settings()
1888 if (pd->settings.size > PACKET_MAX_SECTORS) { in pkt_probe_settings()
1889 pkt_err(pd, "packet size is too big\n"); in pkt_probe_settings()
1892 pd->settings.fp = ti.fp; in pkt_probe_settings()
1893 pd->offset = (be32_to_cpu(ti.track_start) << 2) & (pd->settings.size - 1); in pkt_probe_settings()
1896 pd->nwa = be32_to_cpu(ti.next_writable); in pkt_probe_settings()
1897 set_bit(PACKET_NWA_VALID, &pd->flags); in pkt_probe_settings()
1906 pd->lra = be32_to_cpu(ti.last_rec_address); in pkt_probe_settings()
1907 set_bit(PACKET_LRA_VALID, &pd->flags); in pkt_probe_settings()
1909 pd->lra = 0xffffffff; in pkt_probe_settings()
1910 set_bit(PACKET_LRA_VALID, &pd->flags); in pkt_probe_settings()
1916 pd->settings.link_loss = 7; in pkt_probe_settings()
1917 pd->settings.write_type = 0; /* packet */ in pkt_probe_settings()
1918 pd->settings.track_mode = ti.track_mode; in pkt_probe_settings()
1925 pd->settings.block_mode = PACKET_BLOCK_MODE1; in pkt_probe_settings()
1928 pd->settings.block_mode = PACKET_BLOCK_MODE2; in pkt_probe_settings()
1931 pkt_err(pd, "unknown data mode\n"); in pkt_probe_settings()
1940 static noinline_for_stack int pkt_write_caching(struct pktcdvd_device *pd, in pkt_write_caching() argument
1950 cgc.buflen = pd->mode_offset + 12; in pkt_write_caching()
1957 if ((ret = pkt_mode_sense(pd, &cgc, GPMODE_WCACHING_PAGE, 0))) in pkt_write_caching()
1960 buf[pd->mode_offset + 10] |= (!!set << 2); in pkt_write_caching()
1963 ret = pkt_mode_select(pd, &cgc); in pkt_write_caching()
1965 pkt_err(pd, "write caching control failed\n"); in pkt_write_caching()
1966 pkt_dump_sense(pd, &cgc); in pkt_write_caching()
1968 pkt_notice(pd, "enabled write caching\n"); in pkt_write_caching()
1972 static int pkt_lock_door(struct pktcdvd_device *pd, int lockflag) in pkt_lock_door() argument
1979 return pkt_generic_packet(pd, &cgc); in pkt_lock_door()
1985 static noinline_for_stack int pkt_get_max_speed(struct pktcdvd_device *pd, in pkt_get_max_speed() argument
1994 cap_buf = &buf[sizeof(struct mode_page_header) + pd->mode_offset]; in pkt_get_max_speed()
1998 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0); in pkt_get_max_speed()
2000 cgc.buflen = pd->mode_offset + cap_buf[1] + 2 + in pkt_get_max_speed()
2002 ret = pkt_mode_sense(pd, &cgc, GPMODE_CAPABILITIES_PAGE, 0); in pkt_get_max_speed()
2004 pkt_dump_sense(pd, &cgc); in pkt_get_max_speed()
2046 static noinline_for_stack int pkt_media_speed(struct pktcdvd_device *pd, in pkt_media_speed() argument
2061 ret = pkt_generic_packet(pd, &cgc); in pkt_media_speed()
2063 pkt_dump_sense(pd, &cgc); in pkt_media_speed()
2076 ret = pkt_generic_packet(pd, &cgc); in pkt_media_speed()
2078 pkt_dump_sense(pd, &cgc); in pkt_media_speed()
2083 pkt_notice(pd, "disc type is not CD-RW\n"); in pkt_media_speed()
2087 pkt_notice(pd, "A1 values on media are not valid, maybe not CDRW?\n"); in pkt_media_speed()
2107 pkt_notice(pd, "unknown disc sub-type %d\n", st); in pkt_media_speed()
2111 pkt_info(pd, "maximum media speed: %d\n", *speed); in pkt_media_speed()
2114 pkt_notice(pd, "unknown speed %d for sub-type %d\n", sp, st); in pkt_media_speed()
2119 static noinline_for_stack int pkt_perform_opc(struct pktcdvd_device *pd) in pkt_perform_opc() argument
2125 pkt_dbg(2, pd, "Performing OPC\n"); in pkt_perform_opc()
2132 if ((ret = pkt_generic_packet(pd, &cgc))) in pkt_perform_opc()
2133 pkt_dump_sense(pd, &cgc); in pkt_perform_opc()
2137 static int pkt_open_write(struct pktcdvd_device *pd) in pkt_open_write() argument
2142 if ((ret = pkt_probe_settings(pd))) { in pkt_open_write()
2143 pkt_dbg(2, pd, "failed probe\n"); in pkt_open_write()
2147 if ((ret = pkt_set_write_settings(pd))) { in pkt_open_write()
2148 pkt_dbg(1, pd, "failed saving write settings\n"); in pkt_open_write()
2152 pkt_write_caching(pd, USE_WCACHING); in pkt_open_write()
2154 if ((ret = pkt_get_max_speed(pd, &write_speed))) in pkt_open_write()
2156 switch (pd->mmc3_profile) { in pkt_open_write()
2160 pkt_dbg(1, pd, "write speed %ukB/s\n", write_speed); in pkt_open_write()
2163 if ((ret = pkt_media_speed(pd, &media_write_speed))) in pkt_open_write()
2166 pkt_dbg(1, pd, "write speed %ux\n", write_speed / 176); in pkt_open_write()
2171 if ((ret = pkt_set_speed(pd, write_speed, read_speed))) { in pkt_open_write()
2172 pkt_dbg(1, pd, "couldn't set write speed\n"); in pkt_open_write()
2175 pd->write_speed = write_speed; in pkt_open_write()
2176 pd->read_speed = read_speed; in pkt_open_write()
2178 if ((ret = pkt_perform_opc(pd))) { in pkt_open_write()
2179 pkt_dbg(1, pd, "Optimum Power Calibration failed\n"); in pkt_open_write()
2188 static int pkt_open_dev(struct pktcdvd_device *pd, fmode_t write) in pkt_open_dev() argument
2199 bdget(pd->bdev->bd_dev); in pkt_open_dev()
2200 if ((ret = blkdev_get(pd->bdev, FMODE_READ | FMODE_EXCL, pd))) in pkt_open_dev()
2203 if ((ret = pkt_get_last_written(pd, &lba))) { in pkt_open_dev()
2204 pkt_err(pd, "pkt_get_last_written failed\n"); in pkt_open_dev()
2208 set_capacity(pd->disk, lba << 2); in pkt_open_dev()
2209 set_capacity(pd->bdev->bd_disk, lba << 2); in pkt_open_dev()
2210 bd_set_size(pd->bdev, (loff_t)lba << 11); in pkt_open_dev()
2212 q = bdev_get_queue(pd->bdev); in pkt_open_dev()
2214 if ((ret = pkt_open_write(pd))) in pkt_open_dev()
2221 blk_queue_max_hw_sectors(q, pd->settings.size); in pkt_open_dev()
2223 set_bit(PACKET_WRITABLE, &pd->flags); in pkt_open_dev()
2225 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED); in pkt_open_dev()
2226 clear_bit(PACKET_WRITABLE, &pd->flags); in pkt_open_dev()
2229 if ((ret = pkt_set_segment_merging(pd, q))) in pkt_open_dev()
2233 if (!pkt_grow_pktlist(pd, CONFIG_CDROM_PKTCDVD_BUFFERS)) { in pkt_open_dev()
2234 pkt_err(pd, "not enough memory for buffers\n"); in pkt_open_dev()
2238 pkt_info(pd, "%lukB available on disc\n", lba << 1); in pkt_open_dev()
2244 blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL); in pkt_open_dev()
2253 static void pkt_release_dev(struct pktcdvd_device *pd, int flush) in pkt_release_dev() argument
2255 if (flush && pkt_flush_cache(pd)) in pkt_release_dev()
2256 pkt_dbg(1, pd, "not flushing cache\n"); in pkt_release_dev()
2258 pkt_lock_door(pd, 0); in pkt_release_dev()
2260 pkt_set_speed(pd, MAX_SPEED, MAX_SPEED); in pkt_release_dev()
2261 blkdev_put(pd->bdev, FMODE_READ | FMODE_EXCL); in pkt_release_dev()
2263 pkt_shrink_pktlist(pd); in pkt_release_dev()
2275 struct pktcdvd_device *pd = NULL; in pkt_open() local
2280 pd = pkt_find_dev_from_minor(MINOR(bdev->bd_dev)); in pkt_open()
2281 if (!pd) { in pkt_open()
2285 BUG_ON(pd->refcnt < 0); in pkt_open()
2287 pd->refcnt++; in pkt_open()
2288 if (pd->refcnt > 1) { in pkt_open()
2290 !test_bit(PACKET_WRITABLE, &pd->flags)) { in pkt_open()
2295 ret = pkt_open_dev(pd, mode & FMODE_WRITE); in pkt_open()
2310 pd->refcnt--; in pkt_open()
2319 struct pktcdvd_device *pd = disk->private_data; in pkt_close() local
2323 pd->refcnt--; in pkt_close()
2324 BUG_ON(pd->refcnt < 0); in pkt_close()
2325 if (pd->refcnt == 0) { in pkt_close()
2326 int flush = test_bit(PACKET_WRITABLE, &pd->flags); in pkt_close()
2327 pkt_release_dev(pd, flush); in pkt_close()
2337 struct pktcdvd_device *pd = psd->pd; in pkt_end_io_read_cloned() local
2342 pkt_bio_finished(pd); in pkt_end_io_read_cloned()
2345 static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio) in pkt_make_request_read() argument
2350 psd->pd = pd; in pkt_make_request_read()
2352 cloned_bio->bi_bdev = pd->bdev; in pkt_make_request_read()
2355 pd->stats.secs_r += bio_sectors(bio); in pkt_make_request_read()
2356 pkt_queue_bio(pd, cloned_bio); in pkt_make_request_read()
2361 struct pktcdvd_device *pd = q->queuedata; in pkt_make_request_write() local
2367 zone = get_zone(bio->bi_iter.bi_sector, pd); in pkt_make_request_write()
2373 spin_lock(&pd->cdrw.active_list_lock); in pkt_make_request_write()
2375 list_for_each_entry(pkt, &pd->cdrw.pkt_active_list, list) { in pkt_make_request_write()
2386 wake_up(&pd->wqueue); in pkt_make_request_write()
2389 spin_unlock(&pd->cdrw.active_list_lock); in pkt_make_request_write()
2397 spin_unlock(&pd->cdrw.active_list_lock); in pkt_make_request_write()
2404 spin_lock(&pd->lock); in pkt_make_request_write()
2405 if (pd->write_congestion_on > 0 in pkt_make_request_write()
2406 && pd->bio_queue_size >= pd->write_congestion_on) { in pkt_make_request_write()
2409 spin_unlock(&pd->lock); in pkt_make_request_write()
2411 spin_lock(&pd->lock); in pkt_make_request_write()
2412 } while(pd->bio_queue_size > pd->write_congestion_off); in pkt_make_request_write()
2414 spin_unlock(&pd->lock); in pkt_make_request_write()
2419 node = mempool_alloc(pd->rb_pool, GFP_NOIO); in pkt_make_request_write()
2421 spin_lock(&pd->lock); in pkt_make_request_write()
2422 BUG_ON(pd->bio_queue_size < 0); in pkt_make_request_write()
2423 was_empty = (pd->bio_queue_size == 0); in pkt_make_request_write()
2424 pkt_rbtree_insert(pd, node); in pkt_make_request_write()
2425 spin_unlock(&pd->lock); in pkt_make_request_write()
2430 atomic_set(&pd->scan_queue, 1); in pkt_make_request_write()
2433 wake_up(&pd->wqueue); in pkt_make_request_write()
2434 } else if (!list_empty(&pd->cdrw.pkt_free_list) && !blocked_bio) { in pkt_make_request_write()
2439 wake_up(&pd->wqueue); in pkt_make_request_write()
2445 struct pktcdvd_device *pd; in pkt_make_request() local
2449 pd = q->queuedata; in pkt_make_request()
2450 if (!pd) { in pkt_make_request()
2456 pkt_dbg(2, pd, "start = %6llx stop = %6llx\n", in pkt_make_request()
2464 pkt_make_request_read(pd, bio); in pkt_make_request()
2468 if (!test_bit(PACKET_WRITABLE, &pd->flags)) { in pkt_make_request()
2469 pkt_notice(pd, "WRITE for ro device (%llu)\n", in pkt_make_request()
2475 pkt_err(pd, "wrong bio size\n"); in pkt_make_request()
2482 sector_t zone = get_zone(bio->bi_iter.bi_sector, pd); in pkt_make_request()
2483 sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd); in pkt_make_request()
2486 BUG_ON(last_zone != zone + pd->settings.size); in pkt_make_request()
2509 struct pktcdvd_device *pd = q->queuedata; in pkt_merge_bvec() local
2510 sector_t zone = get_zone(bmd->bi_sector, pd); in pkt_merge_bvec()
2512 int remaining = (pd->settings.size << 9) - used; in pkt_merge_bvec()
2526 static void pkt_init_queue(struct pktcdvd_device *pd) in pkt_init_queue() argument
2528 struct request_queue *q = pd->disk->queue; in pkt_init_queue()
2534 q->queuedata = pd; in pkt_init_queue()
2539 struct pktcdvd_device *pd = m->private; in pkt_seq_show() local
2544 seq_printf(m, "Writer %s mapped to %s:\n", pd->name, in pkt_seq_show()
2545 bdevname(pd->bdev, bdev_buf)); in pkt_seq_show()
2548 seq_printf(m, "\tpacket size:\t\t%dkB\n", pd->settings.size / 2); in pkt_seq_show()
2550 if (pd->settings.write_type == 0) in pkt_seq_show()
2556 seq_printf(m, "\tpacket type:\t\t%s\n", pd->settings.fp ? "Fixed" : "Variable"); in pkt_seq_show()
2557 seq_printf(m, "\tlink loss:\t\t%d\n", pd->settings.link_loss); in pkt_seq_show()
2559 seq_printf(m, "\ttrack mode:\t\t%d\n", pd->settings.track_mode); in pkt_seq_show()
2561 if (pd->settings.block_mode == PACKET_BLOCK_MODE1) in pkt_seq_show()
2563 else if (pd->settings.block_mode == PACKET_BLOCK_MODE2) in pkt_seq_show()
2570 seq_printf(m, "\tpackets started:\t%lu\n", pd->stats.pkt_started); in pkt_seq_show()
2571 seq_printf(m, "\tpackets ended:\t\t%lu\n", pd->stats.pkt_ended); in pkt_seq_show()
2572 seq_printf(m, "\twritten:\t\t%lukB\n", pd->stats.secs_w >> 1); in pkt_seq_show()
2573 seq_printf(m, "\tread gather:\t\t%lukB\n", pd->stats.secs_rg >> 1); in pkt_seq_show()
2574 seq_printf(m, "\tread:\t\t\t%lukB\n", pd->stats.secs_r >> 1); in pkt_seq_show()
2577 seq_printf(m, "\treference count:\t%d\n", pd->refcnt); in pkt_seq_show()
2578 seq_printf(m, "\tflags:\t\t\t0x%lx\n", pd->flags); in pkt_seq_show()
2579 seq_printf(m, "\tread speed:\t\t%ukB/s\n", pd->read_speed); in pkt_seq_show()
2580 seq_printf(m, "\twrite speed:\t\t%ukB/s\n", pd->write_speed); in pkt_seq_show()
2581 seq_printf(m, "\tstart offset:\t\t%lu\n", pd->offset); in pkt_seq_show()
2582 seq_printf(m, "\tmode page offset:\t%u\n", pd->mode_offset); in pkt_seq_show()
2585 seq_printf(m, "\tbios queued:\t\t%d\n", pd->bio_queue_size); in pkt_seq_show()
2586 seq_printf(m, "\tbios pending:\t\t%d\n", atomic_read(&pd->cdrw.pending_bios)); in pkt_seq_show()
2587 seq_printf(m, "\tcurrent sector:\t\t0x%llx\n", (unsigned long long)pd->current_sector); in pkt_seq_show()
2589 pkt_count_states(pd, states); in pkt_seq_show()
2594 pd->write_congestion_off, in pkt_seq_show()
2595 pd->write_congestion_on); in pkt_seq_show()
2611 static int pkt_new_dev(struct pktcdvd_device *pd, dev_t dev) in pkt_new_dev() argument
2618 if (pd->pkt_dev == dev) { in pkt_new_dev()
2619 pkt_err(pd, "recursive setup not allowed\n"); in pkt_new_dev()
2627 pkt_err(pd, "%s already setup\n", in pkt_new_dev()
2632 pkt_err(pd, "can't chain pktcdvd devices\n"); in pkt_new_dev()
2647 pd->bdev = bdev; in pkt_new_dev()
2650 pkt_init_queue(pd); in pkt_new_dev()
2652 atomic_set(&pd->cdrw.pending_bios, 0); in pkt_new_dev()
2653 pd->cdrw.thread = kthread_run(kcdrwd, pd, "%s", pd->name); in pkt_new_dev()
2654 if (IS_ERR(pd->cdrw.thread)) { in pkt_new_dev()
2655 pkt_err(pd, "can't start kernel thread\n"); in pkt_new_dev()
2660 proc_create_data(pd->name, 0, pkt_proc, &pkt_proc_fops, pd); in pkt_new_dev()
2661 pkt_dbg(1, pd, "writer mapped to %s\n", bdevname(bdev, b)); in pkt_new_dev()
2673 struct pktcdvd_device *pd = bdev->bd_disk->private_data; in pkt_ioctl() local
2676 pkt_dbg(2, pd, "cmd %x, dev %d:%d\n", in pkt_ioctl()
2686 if (pd->refcnt == 1) in pkt_ioctl()
2687 pkt_lock_door(pd, 0); in pkt_ioctl()
2697 ret = __blkdev_driver_ioctl(pd->bdev, mode, cmd, arg); in pkt_ioctl()
2701 pkt_dbg(2, pd, "Unknown ioctl (%x)\n", cmd); in pkt_ioctl()
2712 struct pktcdvd_device *pd = disk->private_data; in pkt_check_events() local
2715 if (!pd) in pkt_check_events()
2717 if (!pd->bdev) in pkt_check_events()
2719 attached_disk = pd->bdev->bd_disk; in pkt_check_events()
2745 struct pktcdvd_device *pd; in pkt_setup_dev() local
2759 pd = kzalloc(sizeof(struct pktcdvd_device), GFP_KERNEL); in pkt_setup_dev()
2760 if (!pd) in pkt_setup_dev()
2763 pd->rb_pool = mempool_create_kmalloc_pool(PKT_RB_POOL_SIZE, in pkt_setup_dev()
2765 if (!pd->rb_pool) in pkt_setup_dev()
2768 INIT_LIST_HEAD(&pd->cdrw.pkt_free_list); in pkt_setup_dev()
2769 INIT_LIST_HEAD(&pd->cdrw.pkt_active_list); in pkt_setup_dev()
2770 spin_lock_init(&pd->cdrw.active_list_lock); in pkt_setup_dev()
2772 spin_lock_init(&pd->lock); in pkt_setup_dev()
2773 spin_lock_init(&pd->iosched.lock); in pkt_setup_dev()
2774 bio_list_init(&pd->iosched.read_queue); in pkt_setup_dev()
2775 bio_list_init(&pd->iosched.write_queue); in pkt_setup_dev()
2776 sprintf(pd->name, DRIVER_NAME"%d", idx); in pkt_setup_dev()
2777 init_waitqueue_head(&pd->wqueue); in pkt_setup_dev()
2778 pd->bio_queue = RB_ROOT; in pkt_setup_dev()
2780 pd->write_congestion_on = write_congestion_on; in pkt_setup_dev()
2781 pd->write_congestion_off = write_congestion_off; in pkt_setup_dev()
2786 pd->disk = disk; in pkt_setup_dev()
2791 strcpy(disk->disk_name, pd->name); in pkt_setup_dev()
2793 disk->private_data = pd; in pkt_setup_dev()
2798 pd->pkt_dev = MKDEV(pktdev_major, idx); in pkt_setup_dev()
2799 ret = pkt_new_dev(pd, dev); in pkt_setup_dev()
2804 disk->events = pd->bdev->bd_disk->events; in pkt_setup_dev()
2805 disk->async_events = pd->bdev->bd_disk->async_events; in pkt_setup_dev()
2809 pkt_sysfs_dev_new(pd); in pkt_setup_dev()
2810 pkt_debugfs_dev_new(pd); in pkt_setup_dev()
2812 pkt_devs[idx] = pd; in pkt_setup_dev()
2814 *pkt_dev = pd->pkt_dev; in pkt_setup_dev()
2824 if (pd->rb_pool) in pkt_setup_dev()
2825 mempool_destroy(pd->rb_pool); in pkt_setup_dev()
2826 kfree(pd); in pkt_setup_dev()
2838 struct pktcdvd_device *pd; in pkt_remove_dev() local
2845 pd = pkt_devs[idx]; in pkt_remove_dev()
2846 if (pd && (pd->pkt_dev == pkt_dev)) in pkt_remove_dev()
2855 if (pd->refcnt > 0) { in pkt_remove_dev()
2859 if (!IS_ERR(pd->cdrw.thread)) in pkt_remove_dev()
2860 kthread_stop(pd->cdrw.thread); in pkt_remove_dev()
2864 pkt_debugfs_dev_remove(pd); in pkt_remove_dev()
2865 pkt_sysfs_dev_remove(pd); in pkt_remove_dev()
2867 blkdev_put(pd->bdev, FMODE_READ | FMODE_NDELAY); in pkt_remove_dev()
2869 remove_proc_entry(pd->name, pkt_proc); in pkt_remove_dev()
2870 pkt_dbg(1, pd, "writer unmapped\n"); in pkt_remove_dev()
2872 del_gendisk(pd->disk); in pkt_remove_dev()
2873 blk_cleanup_queue(pd->disk->queue); in pkt_remove_dev()
2874 put_disk(pd->disk); in pkt_remove_dev()
2876 mempool_destroy(pd->rb_pool); in pkt_remove_dev()
2877 kfree(pd); in pkt_remove_dev()
2889 struct pktcdvd_device *pd; in pkt_get_status() local
2893 pd = pkt_find_dev_from_minor(ctrl_cmd->dev_index); in pkt_get_status()
2894 if (pd) { in pkt_get_status()
2895 ctrl_cmd->dev = new_encode_dev(pd->bdev->bd_dev); in pkt_get_status()
2896 ctrl_cmd->pkt_dev = new_encode_dev(pd->pkt_dev); in pkt_get_status()