| 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | #ifndef __BLK_NULL_BLK_H |
| 3 | #define __BLK_NULL_BLK_H |
| 4 | |
| 5 | #undef pr_fmt |
| 6 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 7 | |
| 8 | #include <linux/blkdev.h> |
| 9 | #include <linux/slab.h> |
| 10 | #include <linux/blk-mq.h> |
| 11 | #include <linux/hrtimer.h> |
| 12 | #include <linux/configfs.h> |
| 13 | #include <linux/badblocks.h> |
| 14 | #include <linux/fault-inject.h> |
| 15 | #include <linux/spinlock.h> |
| 16 | #include <linux/mutex.h> |
| 17 | |
| 18 | struct nullb_cmd { |
| 19 | blk_status_t error; |
| 20 | bool fake_timeout; |
| 21 | struct nullb_queue *nq; |
| 22 | struct hrtimer timer; |
| 23 | }; |
| 24 | |
| 25 | struct nullb_queue { |
| 26 | struct nullb_device *dev; |
| 27 | unsigned int requeue_selection; |
| 28 | |
| 29 | struct list_head poll_list; |
| 30 | spinlock_t poll_lock; |
| 31 | }; |
| 32 | |
| 33 | struct nullb_zone { |
| 34 | /* |
| 35 | * Zone lock to prevent concurrent modification of a zone write |
| 36 | * pointer position and condition: with memory backing, a write |
| 37 | * command execution may sleep on memory allocation. For this case, |
| 38 | * use mutex as the zone lock. Otherwise, use the spinlock for |
| 39 | * locking the zone. |
| 40 | */ |
| 41 | union { |
| 42 | spinlock_t spinlock; |
| 43 | struct mutex mutex; |
| 44 | }; |
| 45 | enum blk_zone_type type; |
| 46 | enum blk_zone_cond cond; |
| 47 | sector_t start; |
| 48 | sector_t wp; |
| 49 | unsigned int len; |
| 50 | unsigned int capacity; |
| 51 | }; |
| 52 | |
| 53 | struct nullb_device { |
| 54 | struct nullb *nullb; |
| 55 | struct config_group group; |
| 56 | #ifdef CONFIG_BLK_DEV_NULL_BLK_FAULT_INJECTION |
| 57 | struct fault_config timeout_config; |
| 58 | struct fault_config requeue_config; |
| 59 | struct fault_config init_hctx_fault_config; |
| 60 | #endif |
| 61 | struct radix_tree_root data; /* data stored in the disk */ |
| 62 | struct radix_tree_root cache; /* disk cache data */ |
| 63 | unsigned long flags; /* device flags */ |
| 64 | unsigned int curr_cache; |
| 65 | struct badblocks badblocks; |
| 66 | bool badblocks_once; |
| 67 | bool badblocks_partial_io; |
| 68 | |
| 69 | unsigned int nr_zones; |
| 70 | unsigned int nr_zones_imp_open; |
| 71 | unsigned int nr_zones_exp_open; |
| 72 | unsigned int nr_zones_closed; |
| 73 | unsigned int imp_close_zone_no; |
| 74 | struct nullb_zone *zones; |
| 75 | sector_t zone_size_sects; |
| 76 | bool need_zone_res_mgmt; |
| 77 | spinlock_t zone_res_lock; |
| 78 | |
| 79 | unsigned long size; /* device size in MB */ |
| 80 | unsigned long completion_nsec; /* time in ns to complete a request */ |
| 81 | unsigned long cache_size; /* disk cache size in MB */ |
| 82 | unsigned long zone_size; /* zone size in MB if device is zoned */ |
| 83 | unsigned long zone_capacity; /* zone capacity in MB if device is zoned */ |
| 84 | unsigned int zone_nr_conv; /* number of conventional zones */ |
| 85 | unsigned int zone_max_open; /* max number of open zones */ |
| 86 | unsigned int zone_max_active; /* max number of active zones */ |
| 87 | unsigned int zone_append_max_sectors; /* Max sectors per zone append command */ |
| 88 | unsigned int submit_queues; /* number of submission queues */ |
| 89 | unsigned int prev_submit_queues; /* number of submission queues before change */ |
| 90 | unsigned int poll_queues; /* number of IOPOLL submission queues */ |
| 91 | unsigned int prev_poll_queues; /* number of IOPOLL submission queues before change */ |
| 92 | unsigned int home_node; /* home node for the device */ |
| 93 | unsigned int queue_mode; /* block interface */ |
| 94 | unsigned int blocksize; /* block size */ |
| 95 | unsigned int max_sectors; /* Max sectors per command */ |
| 96 | unsigned int irqmode; /* IRQ completion handler */ |
| 97 | unsigned int hw_queue_depth; /* queue depth */ |
| 98 | unsigned int index; /* index of the disk, only valid with a disk */ |
| 99 | unsigned int mbps; /* Bandwidth throttle cap (in MB/s) */ |
| 100 | bool blocking; /* blocking blk-mq device */ |
| 101 | bool use_per_node_hctx; /* use per-node allocation for hardware context */ |
| 102 | bool power; /* power on/off the device */ |
| 103 | bool memory_backed; /* if data is stored in memory */ |
| 104 | bool discard; /* if support discard */ |
| 105 | bool zoned; /* if device is zoned */ |
| 106 | bool zone_full; /* Initialize zones to be full */ |
| 107 | bool virt_boundary; /* virtual boundary on/off for the device */ |
| 108 | bool no_sched; /* no IO scheduler for the device */ |
| 109 | bool shared_tags; /* share tag set between devices for blk-mq */ |
| 110 | bool shared_tag_bitmap; /* use hostwide shared tags */ |
| 111 | bool fua; /* Support FUA */ |
| 112 | bool rotational; /* Fake rotational device */ |
| 113 | }; |
| 114 | |
| 115 | struct nullb { |
| 116 | struct nullb_device *dev; |
| 117 | struct list_head list; |
| 118 | unsigned int index; |
| 119 | struct request_queue *q; |
| 120 | struct gendisk *disk; |
| 121 | struct blk_mq_tag_set *tag_set; |
| 122 | struct blk_mq_tag_set __tag_set; |
| 123 | atomic_long_t cur_bytes; |
| 124 | struct hrtimer bw_timer; |
| 125 | unsigned long cache_flush_pos; |
| 126 | spinlock_t lock; |
| 127 | |
| 128 | struct nullb_queue *queues; |
| 129 | char disk_name[DISK_NAME_LEN]; |
| 130 | }; |
| 131 | |
| 132 | blk_status_t null_handle_discard(struct nullb_device *dev, sector_t sector, |
| 133 | sector_t nr_sectors); |
| 134 | blk_status_t null_process_cmd(struct nullb_cmd *cmd, enum req_op op, |
| 135 | sector_t sector, unsigned int nr_sectors); |
| 136 | blk_status_t null_handle_badblocks(struct nullb_cmd *cmd, sector_t sector, |
| 137 | unsigned int *nr_sectors); |
| 138 | blk_status_t null_handle_memory_backed(struct nullb_cmd *cmd, enum req_op op, |
| 139 | sector_t sector, sector_t nr_sectors); |
| 140 | |
| 141 | #ifdef CONFIG_BLK_DEV_ZONED |
| 142 | int null_init_zoned_dev(struct nullb_device *dev, struct queue_limits *lim); |
| 143 | int null_register_zoned_dev(struct nullb *nullb); |
| 144 | void null_free_zoned_dev(struct nullb_device *dev); |
| 145 | int null_report_zones(struct gendisk *disk, sector_t sector, |
| 146 | unsigned int nr_zones, |
| 147 | struct blk_report_zones_args *args); |
| 148 | blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_op op, |
| 149 | sector_t sector, sector_t nr_sectors); |
| 150 | size_t null_zone_valid_read_len(struct nullb *nullb, |
| 151 | sector_t sector, unsigned int len); |
| 152 | ssize_t zone_cond_store(struct nullb_device *dev, const char *page, |
| 153 | size_t count, enum blk_zone_cond cond); |
| 154 | #else |
| 155 | static inline int null_init_zoned_dev(struct nullb_device *dev, |
| 156 | struct queue_limits *lim) |
| 157 | { |
| 158 | pr_err("CONFIG_BLK_DEV_ZONED not enabled\n" ); |
| 159 | return -EINVAL; |
| 160 | } |
| 161 | static inline int null_register_zoned_dev(struct nullb *nullb) |
| 162 | { |
| 163 | return -ENODEV; |
| 164 | } |
| 165 | static inline void null_free_zoned_dev(struct nullb_device *dev) {} |
| 166 | static inline blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, |
| 167 | enum req_op op, sector_t sector, sector_t nr_sectors) |
| 168 | { |
| 169 | return BLK_STS_NOTSUPP; |
| 170 | } |
| 171 | static inline size_t null_zone_valid_read_len(struct nullb *nullb, |
| 172 | sector_t sector, |
| 173 | unsigned int len) |
| 174 | { |
| 175 | return len; |
| 176 | } |
| 177 | static inline ssize_t zone_cond_store(struct nullb_device *dev, |
| 178 | const char *page, size_t count, |
| 179 | enum blk_zone_cond cond) |
| 180 | { |
| 181 | return -EOPNOTSUPP; |
| 182 | } |
| 183 | #define null_report_zones NULL |
| 184 | #endif /* CONFIG_BLK_DEV_ZONED */ |
| 185 | #endif /* __NULL_BLK_H */ |
| 186 | |