1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * History: |
4 | * Started: Aug 9 by Lawrence Foard (entropy@world.std.com), |
5 | * to allow user process control of SCSI devices. |
6 | * Development Sponsored by Killy Corp. NY NY |
7 | * |
8 | * Original driver (sg.c): |
9 | * Copyright (C) 1992 Lawrence Foard |
10 | * Version 2 and 3 extensions to driver: |
11 | * Copyright (C) 1998 - 2014 Douglas Gilbert |
12 | */ |
13 | |
14 | static int sg_version_num = 30536; /* 2 digits for each component */ |
15 | #define SG_VERSION_STR "3.5.36" |
16 | |
17 | /* |
18 | * D. P. Gilbert (dgilbert@interlog.com), notes: |
19 | * - scsi logging is available via SCSI_LOG_TIMEOUT macros. First |
20 | * the kernel/module needs to be built with CONFIG_SCSI_LOGGING |
21 | * (otherwise the macros compile to empty statements). |
22 | * |
23 | */ |
24 | #include <linux/module.h> |
25 | |
26 | #include <linux/fs.h> |
27 | #include <linux/kernel.h> |
28 | #include <linux/sched.h> |
29 | #include <linux/string.h> |
30 | #include <linux/mm.h> |
31 | #include <linux/errno.h> |
32 | #include <linux/mtio.h> |
33 | #include <linux/ioctl.h> |
34 | #include <linux/major.h> |
35 | #include <linux/slab.h> |
36 | #include <linux/fcntl.h> |
37 | #include <linux/init.h> |
38 | #include <linux/poll.h> |
39 | #include <linux/moduleparam.h> |
40 | #include <linux/cdev.h> |
41 | #include <linux/idr.h> |
42 | #include <linux/seq_file.h> |
43 | #include <linux/blkdev.h> |
44 | #include <linux/delay.h> |
45 | #include <linux/blktrace_api.h> |
46 | #include <linux/mutex.h> |
47 | #include <linux/atomic.h> |
48 | #include <linux/ratelimit.h> |
49 | #include <linux/uio.h> |
50 | #include <linux/cred.h> /* for sg_check_file_access() */ |
51 | |
52 | #include <scsi/scsi.h> |
53 | #include <scsi/scsi_cmnd.h> |
54 | #include <scsi/scsi_dbg.h> |
55 | #include <scsi/scsi_device.h> |
56 | #include <scsi/scsi_driver.h> |
57 | #include <scsi/scsi_eh.h> |
58 | #include <scsi/scsi_host.h> |
59 | #include <scsi/scsi_ioctl.h> |
60 | #include <scsi/scsi_tcq.h> |
61 | #include <scsi/sg.h> |
62 | |
63 | #include "scsi_logging.h" |
64 | |
65 | #ifdef CONFIG_SCSI_PROC_FS |
66 | #include <linux/proc_fs.h> |
67 | static char *sg_version_date = "20140603" ; |
68 | |
69 | static int sg_proc_init(void); |
70 | #endif |
71 | |
72 | #define SG_ALLOW_DIO_DEF 0 |
73 | |
74 | #define SG_MAX_DEVS (1 << MINORBITS) |
75 | |
76 | /* SG_MAX_CDB_SIZE should be 260 (spc4r37 section 3.1.30) however the type |
77 | * of sg_io_hdr::cmd_len can only represent 255. All SCSI commands greater |
78 | * than 16 bytes are "variable length" whose length is a multiple of 4 |
79 | */ |
80 | #define SG_MAX_CDB_SIZE 252 |
81 | |
82 | #define SG_DEFAULT_TIMEOUT mult_frac(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ) |
83 | |
84 | static int sg_big_buff = SG_DEF_RESERVED_SIZE; |
85 | /* N.B. This variable is readable and writeable via |
86 | /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer |
87 | of this size (or less if there is not enough memory) will be reserved |
88 | for use by this file descriptor. [Deprecated usage: this variable is also |
89 | readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into |
90 | the kernel (i.e. it is not a module).] */ |
91 | static int def_reserved_size = -1; /* picks up init parameter */ |
92 | static int sg_allow_dio = SG_ALLOW_DIO_DEF; |
93 | |
94 | static int scatter_elem_sz = SG_SCATTER_SZ; |
95 | static int scatter_elem_sz_prev = SG_SCATTER_SZ; |
96 | |
97 | #define SG_SECTOR_SZ 512 |
98 | |
99 | static int sg_add_device(struct device *); |
100 | static void sg_remove_device(struct device *); |
101 | |
102 | static DEFINE_IDR(sg_index_idr); |
103 | static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock |
104 | file descriptor list for device */ |
105 | |
106 | static struct class_interface sg_interface = { |
107 | .add_dev = sg_add_device, |
108 | .remove_dev = sg_remove_device, |
109 | }; |
110 | |
111 | typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */ |
112 | unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */ |
113 | unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */ |
114 | unsigned bufflen; /* Size of (aggregate) data buffer */ |
115 | struct page **pages; |
116 | int page_order; |
117 | char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */ |
118 | unsigned char cmd_opcode; /* first byte of command */ |
119 | } Sg_scatter_hold; |
120 | |
121 | struct sg_device; /* forward declarations */ |
122 | struct sg_fd; |
123 | |
124 | typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */ |
125 | struct list_head entry; /* list entry */ |
126 | struct sg_fd *parentfp; /* NULL -> not in use */ |
127 | Sg_scatter_hold data; /* hold buffer, perhaps scatter list */ |
128 | sg_io_hdr_t ; /* scsi command+info, see <scsi/sg.h> */ |
129 | unsigned char sense_b[SCSI_SENSE_BUFFERSIZE]; |
130 | char res_used; /* 1 -> using reserve buffer, 0 -> not ... */ |
131 | char orphan; /* 1 -> drop on sight, 0 -> normal */ |
132 | char sg_io_owned; /* 1 -> packet belongs to SG_IO */ |
133 | /* done protected by rq_list_lock */ |
134 | char done; /* 0->before bh, 1->before read, 2->read */ |
135 | struct request *rq; |
136 | struct bio *bio; |
137 | struct execute_work ew; |
138 | } Sg_request; |
139 | |
140 | typedef struct sg_fd { /* holds the state of a file descriptor */ |
141 | struct list_head sfd_siblings; /* protected by device's sfd_lock */ |
142 | struct sg_device *parentdp; /* owning device */ |
143 | wait_queue_head_t read_wait; /* queue read until command done */ |
144 | rwlock_t rq_list_lock; /* protect access to list in req_arr */ |
145 | struct mutex f_mutex; /* protect against changes in this fd */ |
146 | int timeout; /* defaults to SG_DEFAULT_TIMEOUT */ |
147 | int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */ |
148 | Sg_scatter_hold reserve; /* buffer held for this file descriptor */ |
149 | struct list_head rq_list; /* head of request list */ |
150 | struct fasync_struct *async_qp; /* used by asynchronous notification */ |
151 | Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */ |
152 | char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */ |
153 | char cmd_q; /* 1 -> allow command queuing, 0 -> don't */ |
154 | unsigned char next_cmd_len; /* 0: automatic, >0: use on next write() */ |
155 | char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */ |
156 | char mmap_called; /* 0 -> mmap() never called on this fd */ |
157 | char res_in_use; /* 1 -> 'reserve' array in use */ |
158 | struct kref f_ref; |
159 | struct execute_work ew; |
160 | } Sg_fd; |
161 | |
162 | typedef struct sg_device { /* holds the state of each scsi generic device */ |
163 | struct scsi_device *device; |
164 | wait_queue_head_t open_wait; /* queue open() when O_EXCL present */ |
165 | struct mutex open_rel_lock; /* held when in open() or release() */ |
166 | int sg_tablesize; /* adapter's max scatter-gather table size */ |
167 | u32 index; /* device index number */ |
168 | struct list_head sfds; |
169 | rwlock_t sfd_lock; /* protect access to sfd list */ |
170 | atomic_t detaching; /* 0->device usable, 1->device detaching */ |
171 | bool exclude; /* 1->open(O_EXCL) succeeded and is active */ |
172 | int open_cnt; /* count of opens (perhaps < num(sfds) ) */ |
173 | char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */ |
174 | char name[DISK_NAME_LEN]; |
175 | struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */ |
176 | struct kref d_ref; |
177 | } Sg_device; |
178 | |
179 | /* tasklet or soft irq callback */ |
180 | static enum rq_end_io_ret sg_rq_end_io(struct request *rq, blk_status_t status); |
181 | static int sg_start_req(Sg_request *srp, unsigned char *cmd); |
182 | static int sg_finish_rem_req(Sg_request * srp); |
183 | static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size); |
184 | static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, |
185 | Sg_request * srp); |
186 | static ssize_t sg_new_write(Sg_fd *sfp, struct file *file, |
187 | const char __user *buf, size_t count, int blocking, |
188 | int read_only, int sg_io_owned, Sg_request **o_srp); |
189 | static int sg_common_write(Sg_fd * sfp, Sg_request * srp, |
190 | unsigned char *cmnd, int timeout, int blocking); |
191 | static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer); |
192 | static void sg_remove_scat(Sg_fd * sfp, Sg_scatter_hold * schp); |
193 | static void sg_build_reserve(Sg_fd * sfp, int req_size); |
194 | static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size); |
195 | static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp); |
196 | static Sg_fd *sg_add_sfp(Sg_device * sdp); |
197 | static void sg_remove_sfp(struct kref *); |
198 | static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id, bool *busy); |
199 | static Sg_request *sg_add_request(Sg_fd * sfp); |
200 | static int sg_remove_request(Sg_fd * sfp, Sg_request * srp); |
201 | static Sg_device *sg_get_dev(int dev); |
202 | static void sg_device_destroy(struct kref *kref); |
203 | |
204 | #define sizeof(struct sg_header) |
205 | #define SZ_SG_IO_HDR sizeof(sg_io_hdr_t) |
206 | #define SZ_SG_IOVEC sizeof(sg_iovec_t) |
207 | #define SZ_SG_REQ_INFO sizeof(sg_req_info_t) |
208 | |
209 | #define sg_printk(prefix, sdp, fmt, a...) \ |
210 | sdev_prefix_printk(prefix, (sdp)->device, (sdp)->name, fmt, ##a) |
211 | |
212 | /* |
213 | * The SCSI interfaces that use read() and write() as an asynchronous variant of |
214 | * ioctl(..., SG_IO, ...) are fundamentally unsafe, since there are lots of ways |
215 | * to trigger read() and write() calls from various contexts with elevated |
216 | * privileges. This can lead to kernel memory corruption (e.g. if these |
217 | * interfaces are called through splice()) and privilege escalation inside |
218 | * userspace (e.g. if a process with access to such a device passes a file |
219 | * descriptor to a SUID binary as stdin/stdout/stderr). |
220 | * |
221 | * This function provides protection for the legacy API by restricting the |
222 | * calling context. |
223 | */ |
224 | static int sg_check_file_access(struct file *filp, const char *caller) |
225 | { |
226 | if (filp->f_cred != current_real_cred()) { |
227 | pr_err_once("%s: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n" , |
228 | caller, task_tgid_vnr(current), current->comm); |
229 | return -EPERM; |
230 | } |
231 | return 0; |
232 | } |
233 | |
234 | static int sg_allow_access(struct file *filp, unsigned char *cmd) |
235 | { |
236 | struct sg_fd *sfp = filp->private_data; |
237 | |
238 | if (sfp->parentdp->device->type == TYPE_SCANNER) |
239 | return 0; |
240 | if (!scsi_cmd_allowed(cmd, open_for_write: filp->f_mode & FMODE_WRITE)) |
241 | return -EPERM; |
242 | return 0; |
243 | } |
244 | |
245 | static int |
246 | open_wait(Sg_device *sdp, int flags) |
247 | { |
248 | int retval = 0; |
249 | |
250 | if (flags & O_EXCL) { |
251 | while (sdp->open_cnt > 0) { |
252 | mutex_unlock(lock: &sdp->open_rel_lock); |
253 | retval = wait_event_interruptible(sdp->open_wait, |
254 | (atomic_read(&sdp->detaching) || |
255 | !sdp->open_cnt)); |
256 | mutex_lock(&sdp->open_rel_lock); |
257 | |
258 | if (retval) /* -ERESTARTSYS */ |
259 | return retval; |
260 | if (atomic_read(v: &sdp->detaching)) |
261 | return -ENODEV; |
262 | } |
263 | } else { |
264 | while (sdp->exclude) { |
265 | mutex_unlock(lock: &sdp->open_rel_lock); |
266 | retval = wait_event_interruptible(sdp->open_wait, |
267 | (atomic_read(&sdp->detaching) || |
268 | !sdp->exclude)); |
269 | mutex_lock(&sdp->open_rel_lock); |
270 | |
271 | if (retval) /* -ERESTARTSYS */ |
272 | return retval; |
273 | if (atomic_read(v: &sdp->detaching)) |
274 | return -ENODEV; |
275 | } |
276 | } |
277 | |
278 | return retval; |
279 | } |
280 | |
281 | /* Returns 0 on success, else a negated errno value */ |
282 | static int |
283 | sg_open(struct inode *inode, struct file *filp) |
284 | { |
285 | int dev = iminor(inode); |
286 | int flags = filp->f_flags; |
287 | struct request_queue *q; |
288 | struct scsi_device *device; |
289 | Sg_device *sdp; |
290 | Sg_fd *sfp; |
291 | int retval; |
292 | |
293 | nonseekable_open(inode, filp); |
294 | if ((flags & O_EXCL) && (O_RDONLY == (flags & O_ACCMODE))) |
295 | return -EPERM; /* Can't lock it with read only access */ |
296 | sdp = sg_get_dev(dev); |
297 | if (IS_ERR(ptr: sdp)) |
298 | return PTR_ERR(ptr: sdp); |
299 | |
300 | SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, |
301 | "sg_open: flags=0x%x\n" , flags)); |
302 | |
303 | /* This driver's module count bumped by fops_get in <linux/fs.h> */ |
304 | /* Prevent the device driver from vanishing while we sleep */ |
305 | device = sdp->device; |
306 | retval = scsi_device_get(device); |
307 | if (retval) |
308 | goto sg_put; |
309 | |
310 | retval = scsi_autopm_get_device(device); |
311 | if (retval) |
312 | goto sdp_put; |
313 | |
314 | /* scsi_block_when_processing_errors() may block so bypass |
315 | * check if O_NONBLOCK. Permits SCSI commands to be issued |
316 | * during error recovery. Tread carefully. */ |
317 | if (!((flags & O_NONBLOCK) || |
318 | scsi_block_when_processing_errors(device))) { |
319 | retval = -ENXIO; |
320 | /* we are in error recovery for this device */ |
321 | goto error_out; |
322 | } |
323 | |
324 | mutex_lock(&sdp->open_rel_lock); |
325 | if (flags & O_NONBLOCK) { |
326 | if (flags & O_EXCL) { |
327 | if (sdp->open_cnt > 0) { |
328 | retval = -EBUSY; |
329 | goto error_mutex_locked; |
330 | } |
331 | } else { |
332 | if (sdp->exclude) { |
333 | retval = -EBUSY; |
334 | goto error_mutex_locked; |
335 | } |
336 | } |
337 | } else { |
338 | retval = open_wait(sdp, flags); |
339 | if (retval) /* -ERESTARTSYS or -ENODEV */ |
340 | goto error_mutex_locked; |
341 | } |
342 | |
343 | /* N.B. at this point we are holding the open_rel_lock */ |
344 | if (flags & O_EXCL) |
345 | sdp->exclude = true; |
346 | |
347 | if (sdp->open_cnt < 1) { /* no existing opens */ |
348 | sdp->sgdebug = 0; |
349 | q = device->request_queue; |
350 | sdp->sg_tablesize = queue_max_segments(q); |
351 | } |
352 | sfp = sg_add_sfp(sdp); |
353 | if (IS_ERR(ptr: sfp)) { |
354 | retval = PTR_ERR(ptr: sfp); |
355 | goto out_undo; |
356 | } |
357 | |
358 | filp->private_data = sfp; |
359 | sdp->open_cnt++; |
360 | mutex_unlock(lock: &sdp->open_rel_lock); |
361 | |
362 | retval = 0; |
363 | sg_put: |
364 | kref_put(kref: &sdp->d_ref, release: sg_device_destroy); |
365 | return retval; |
366 | |
367 | out_undo: |
368 | if (flags & O_EXCL) { |
369 | sdp->exclude = false; /* undo if error */ |
370 | wake_up_interruptible(&sdp->open_wait); |
371 | } |
372 | error_mutex_locked: |
373 | mutex_unlock(lock: &sdp->open_rel_lock); |
374 | error_out: |
375 | scsi_autopm_put_device(device); |
376 | sdp_put: |
377 | kref_put(kref: &sdp->d_ref, release: sg_device_destroy); |
378 | scsi_device_put(device); |
379 | return retval; |
380 | } |
381 | |
382 | /* Release resources associated with a successful sg_open() |
383 | * Returns 0 on success, else a negated errno value */ |
384 | static int |
385 | sg_release(struct inode *inode, struct file *filp) |
386 | { |
387 | Sg_device *sdp; |
388 | Sg_fd *sfp; |
389 | |
390 | if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) |
391 | return -ENXIO; |
392 | SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, "sg_release\n" )); |
393 | |
394 | mutex_lock(&sdp->open_rel_lock); |
395 | scsi_autopm_put_device(sdp->device); |
396 | kref_put(kref: &sfp->f_ref, release: sg_remove_sfp); |
397 | sdp->open_cnt--; |
398 | |
399 | /* possibly many open()s waiting on exlude clearing, start many; |
400 | * only open(O_EXCL)s wait on 0==open_cnt so only start one */ |
401 | if (sdp->exclude) { |
402 | sdp->exclude = false; |
403 | wake_up_interruptible_all(&sdp->open_wait); |
404 | } else if (0 == sdp->open_cnt) { |
405 | wake_up_interruptible(&sdp->open_wait); |
406 | } |
407 | mutex_unlock(lock: &sdp->open_rel_lock); |
408 | return 0; |
409 | } |
410 | |
411 | static int get_sg_io_pack_id(int *pack_id, void __user *buf, size_t count) |
412 | { |
413 | struct sg_header __user *old_hdr = buf; |
414 | int reply_len; |
415 | |
416 | if (count >= SZ_SG_HEADER) { |
417 | /* negative reply_len means v3 format, otherwise v1/v2 */ |
418 | if (get_user(reply_len, &old_hdr->reply_len)) |
419 | return -EFAULT; |
420 | |
421 | if (reply_len >= 0) |
422 | return get_user(*pack_id, &old_hdr->pack_id); |
423 | |
424 | if (in_compat_syscall() && |
425 | count >= sizeof(struct compat_sg_io_hdr)) { |
426 | struct compat_sg_io_hdr __user *hp = buf; |
427 | |
428 | return get_user(*pack_id, &hp->pack_id); |
429 | } |
430 | |
431 | if (count >= sizeof(struct sg_io_hdr)) { |
432 | struct sg_io_hdr __user *hp = buf; |
433 | |
434 | return get_user(*pack_id, &hp->pack_id); |
435 | } |
436 | } |
437 | |
438 | /* no valid header was passed, so ignore the pack_id */ |
439 | *pack_id = -1; |
440 | return 0; |
441 | } |
442 | |
443 | static ssize_t |
444 | sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) |
445 | { |
446 | Sg_device *sdp; |
447 | Sg_fd *sfp; |
448 | Sg_request *srp; |
449 | int req_pack_id = -1; |
450 | bool busy; |
451 | sg_io_hdr_t *hp; |
452 | struct sg_header *old_hdr; |
453 | int retval; |
454 | |
455 | /* |
456 | * This could cause a response to be stranded. Close the associated |
457 | * file descriptor to free up any resources being held. |
458 | */ |
459 | retval = sg_check_file_access(filp, caller: __func__); |
460 | if (retval) |
461 | return retval; |
462 | |
463 | if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) |
464 | return -ENXIO; |
465 | SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, |
466 | "sg_read: count=%d\n" , (int) count)); |
467 | |
468 | if (sfp->force_packid) |
469 | retval = get_sg_io_pack_id(pack_id: &req_pack_id, buf, count); |
470 | if (retval) |
471 | return retval; |
472 | |
473 | srp = sg_get_rq_mark(sfp, pack_id: req_pack_id, busy: &busy); |
474 | if (!srp) { /* now wait on packet to arrive */ |
475 | if (filp->f_flags & O_NONBLOCK) |
476 | return -EAGAIN; |
477 | retval = wait_event_interruptible(sfp->read_wait, |
478 | ((srp = sg_get_rq_mark(sfp, req_pack_id, &busy)) || |
479 | (!busy && atomic_read(&sdp->detaching)))); |
480 | if (!srp) |
481 | /* signal or detaching */ |
482 | return retval ? retval : -ENODEV; |
483 | } |
484 | if (srp->header.interface_id != '\0') |
485 | return sg_new_read(sfp, buf, count, srp); |
486 | |
487 | hp = &srp->header; |
488 | old_hdr = kzalloc(SZ_SG_HEADER, GFP_KERNEL); |
489 | if (!old_hdr) |
490 | return -ENOMEM; |
491 | |
492 | old_hdr->reply_len = (int) hp->timeout; |
493 | old_hdr->pack_len = old_hdr->reply_len; /* old, strange behaviour */ |
494 | old_hdr->pack_id = hp->pack_id; |
495 | old_hdr->twelve_byte = |
496 | ((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0; |
497 | old_hdr->target_status = hp->masked_status; |
498 | old_hdr->host_status = hp->host_status; |
499 | old_hdr->driver_status = hp->driver_status; |
500 | if ((CHECK_CONDITION & hp->masked_status) || |
501 | (srp->sense_b[0] & 0x70) == 0x70) { |
502 | old_hdr->driver_status = DRIVER_SENSE; |
503 | memcpy(old_hdr->sense_buffer, srp->sense_b, |
504 | sizeof (old_hdr->sense_buffer)); |
505 | } |
506 | switch (hp->host_status) { |
507 | /* This setup of 'result' is for backward compatibility and is best |
508 | ignored by the user who should use target, host + driver status */ |
509 | case DID_OK: |
510 | case DID_PASSTHROUGH: |
511 | case DID_SOFT_ERROR: |
512 | old_hdr->result = 0; |
513 | break; |
514 | case DID_NO_CONNECT: |
515 | case DID_BUS_BUSY: |
516 | case DID_TIME_OUT: |
517 | old_hdr->result = EBUSY; |
518 | break; |
519 | case DID_BAD_TARGET: |
520 | case DID_ABORT: |
521 | case DID_PARITY: |
522 | case DID_RESET: |
523 | case DID_BAD_INTR: |
524 | old_hdr->result = EIO; |
525 | break; |
526 | case DID_ERROR: |
527 | old_hdr->result = (srp->sense_b[0] == 0 && |
528 | hp->masked_status == GOOD) ? 0 : EIO; |
529 | break; |
530 | default: |
531 | old_hdr->result = EIO; |
532 | break; |
533 | } |
534 | |
535 | /* Now copy the result back to the user buffer. */ |
536 | if (count >= SZ_SG_HEADER) { |
537 | if (copy_to_user(to: buf, from: old_hdr, SZ_SG_HEADER)) { |
538 | retval = -EFAULT; |
539 | goto free_old_hdr; |
540 | } |
541 | buf += SZ_SG_HEADER; |
542 | if (count > old_hdr->reply_len) |
543 | count = old_hdr->reply_len; |
544 | if (count > SZ_SG_HEADER) { |
545 | if (sg_read_oxfer(srp, outp: buf, num_read_xfer: count - SZ_SG_HEADER)) { |
546 | retval = -EFAULT; |
547 | goto free_old_hdr; |
548 | } |
549 | } |
550 | } else |
551 | count = (old_hdr->result == 0) ? 0 : -EIO; |
552 | sg_finish_rem_req(srp); |
553 | sg_remove_request(sfp, srp); |
554 | retval = count; |
555 | free_old_hdr: |
556 | kfree(objp: old_hdr); |
557 | return retval; |
558 | } |
559 | |
560 | static ssize_t |
561 | sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp) |
562 | { |
563 | sg_io_hdr_t *hp = &srp->header; |
564 | int err = 0, err2; |
565 | int len; |
566 | |
567 | if (in_compat_syscall()) { |
568 | if (count < sizeof(struct compat_sg_io_hdr)) { |
569 | err = -EINVAL; |
570 | goto err_out; |
571 | } |
572 | } else if (count < SZ_SG_IO_HDR) { |
573 | err = -EINVAL; |
574 | goto err_out; |
575 | } |
576 | hp->sb_len_wr = 0; |
577 | if ((hp->mx_sb_len > 0) && hp->sbp) { |
578 | if ((CHECK_CONDITION & hp->masked_status) || |
579 | (srp->sense_b[0] & 0x70) == 0x70) { |
580 | int sb_len = SCSI_SENSE_BUFFERSIZE; |
581 | sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len; |
582 | len = 8 + (int) srp->sense_b[7]; /* Additional sense length field */ |
583 | len = (len > sb_len) ? sb_len : len; |
584 | if (copy_to_user(to: hp->sbp, from: srp->sense_b, n: len)) { |
585 | err = -EFAULT; |
586 | goto err_out; |
587 | } |
588 | hp->driver_status = DRIVER_SENSE; |
589 | hp->sb_len_wr = len; |
590 | } |
591 | } |
592 | if (hp->masked_status || hp->host_status || hp->driver_status) |
593 | hp->info |= SG_INFO_CHECK; |
594 | err = put_sg_io_hdr(hdr: hp, argp: buf); |
595 | err_out: |
596 | err2 = sg_finish_rem_req(srp); |
597 | sg_remove_request(sfp, srp); |
598 | return err ? : err2 ? : count; |
599 | } |
600 | |
601 | static ssize_t |
602 | sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) |
603 | { |
604 | int mxsize, cmd_size, k; |
605 | int input_size, blocking; |
606 | unsigned char opcode; |
607 | Sg_device *sdp; |
608 | Sg_fd *sfp; |
609 | Sg_request *srp; |
610 | struct sg_header old_hdr; |
611 | sg_io_hdr_t *hp; |
612 | unsigned char cmnd[SG_MAX_CDB_SIZE]; |
613 | int retval; |
614 | |
615 | retval = sg_check_file_access(filp, caller: __func__); |
616 | if (retval) |
617 | return retval; |
618 | |
619 | if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) |
620 | return -ENXIO; |
621 | SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, |
622 | "sg_write: count=%d\n" , (int) count)); |
623 | if (atomic_read(v: &sdp->detaching)) |
624 | return -ENODEV; |
625 | if (!((filp->f_flags & O_NONBLOCK) || |
626 | scsi_block_when_processing_errors(sdp->device))) |
627 | return -ENXIO; |
628 | |
629 | if (count < SZ_SG_HEADER) |
630 | return -EIO; |
631 | if (copy_from_user(to: &old_hdr, from: buf, SZ_SG_HEADER)) |
632 | return -EFAULT; |
633 | blocking = !(filp->f_flags & O_NONBLOCK); |
634 | if (old_hdr.reply_len < 0) |
635 | return sg_new_write(sfp, file: filp, buf, count, |
636 | blocking, read_only: 0, sg_io_owned: 0, NULL); |
637 | if (count < (SZ_SG_HEADER + 6)) |
638 | return -EIO; /* The minimum scsi command length is 6 bytes. */ |
639 | |
640 | buf += SZ_SG_HEADER; |
641 | if (get_user(opcode, buf)) |
642 | return -EFAULT; |
643 | |
644 | if (!(srp = sg_add_request(sfp))) { |
645 | SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sdp, |
646 | "sg_write: queue full\n" )); |
647 | return -EDOM; |
648 | } |
649 | mutex_lock(&sfp->f_mutex); |
650 | if (sfp->next_cmd_len > 0) { |
651 | cmd_size = sfp->next_cmd_len; |
652 | sfp->next_cmd_len = 0; /* reset so only this write() effected */ |
653 | } else { |
654 | cmd_size = COMMAND_SIZE(opcode); /* based on SCSI command group */ |
655 | if ((opcode >= 0xc0) && old_hdr.twelve_byte) |
656 | cmd_size = 12; |
657 | } |
658 | mutex_unlock(lock: &sfp->f_mutex); |
659 | SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp, |
660 | "sg_write: scsi opcode=0x%02x, cmd_size=%d\n" , (int) opcode, cmd_size)); |
661 | /* Determine buffer size. */ |
662 | input_size = count - cmd_size; |
663 | mxsize = (input_size > old_hdr.reply_len) ? input_size : old_hdr.reply_len; |
664 | mxsize -= SZ_SG_HEADER; |
665 | input_size -= SZ_SG_HEADER; |
666 | if (input_size < 0) { |
667 | sg_remove_request(sfp, srp); |
668 | return -EIO; /* User did not pass enough bytes for this command. */ |
669 | } |
670 | hp = &srp->header; |
671 | hp->interface_id = '\0'; /* indicator of old interface tunnelled */ |
672 | hp->cmd_len = (unsigned char) cmd_size; |
673 | hp->iovec_count = 0; |
674 | hp->mx_sb_len = 0; |
675 | if (input_size > 0) |
676 | hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ? |
677 | SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV; |
678 | else |
679 | hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE; |
680 | hp->dxfer_len = mxsize; |
681 | if ((hp->dxfer_direction == SG_DXFER_TO_DEV) || |
682 | (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV)) |
683 | hp->dxferp = (char __user *)buf + cmd_size; |
684 | else |
685 | hp->dxferp = NULL; |
686 | hp->sbp = NULL; |
687 | hp->timeout = old_hdr.reply_len; /* structure abuse ... */ |
688 | hp->flags = input_size; /* structure abuse ... */ |
689 | hp->pack_id = old_hdr.pack_id; |
690 | hp->usr_ptr = NULL; |
691 | if (copy_from_user(to: cmnd, from: buf, n: cmd_size)) { |
692 | sg_remove_request(sfp, srp); |
693 | return -EFAULT; |
694 | } |
695 | /* |
696 | * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV, |
697 | * but is is possible that the app intended SG_DXFER_TO_DEV, because there |
698 | * is a non-zero input_size, so emit a warning. |
699 | */ |
700 | if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) { |
701 | printk_ratelimited(KERN_WARNING |
702 | "sg_write: data in/out %d/%d bytes " |
703 | "for SCSI command 0x%x-- guessing " |
704 | "data in;\n program %s not setting " |
705 | "count and/or reply_len properly\n" , |
706 | old_hdr.reply_len - (int)SZ_SG_HEADER, |
707 | input_size, (unsigned int) cmnd[0], |
708 | current->comm); |
709 | } |
710 | k = sg_common_write(sfp, srp, cmnd, timeout: sfp->timeout, blocking); |
711 | return (k < 0) ? k : count; |
712 | } |
713 | |
714 | static ssize_t |
715 | sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf, |
716 | size_t count, int blocking, int read_only, int sg_io_owned, |
717 | Sg_request **o_srp) |
718 | { |
719 | int k; |
720 | Sg_request *srp; |
721 | sg_io_hdr_t *hp; |
722 | unsigned char cmnd[SG_MAX_CDB_SIZE]; |
723 | int timeout; |
724 | unsigned long ul_timeout; |
725 | |
726 | if (count < SZ_SG_IO_HDR) |
727 | return -EINVAL; |
728 | |
729 | sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */ |
730 | if (!(srp = sg_add_request(sfp))) { |
731 | SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp, |
732 | "sg_new_write: queue full\n" )); |
733 | return -EDOM; |
734 | } |
735 | srp->sg_io_owned = sg_io_owned; |
736 | hp = &srp->header; |
737 | if (get_sg_io_hdr(hdr: hp, argp: buf)) { |
738 | sg_remove_request(sfp, srp); |
739 | return -EFAULT; |
740 | } |
741 | if (hp->interface_id != 'S') { |
742 | sg_remove_request(sfp, srp); |
743 | return -ENOSYS; |
744 | } |
745 | if (hp->flags & SG_FLAG_MMAP_IO) { |
746 | if (hp->dxfer_len > sfp->reserve.bufflen) { |
747 | sg_remove_request(sfp, srp); |
748 | return -ENOMEM; /* MMAP_IO size must fit in reserve buffer */ |
749 | } |
750 | if (hp->flags & SG_FLAG_DIRECT_IO) { |
751 | sg_remove_request(sfp, srp); |
752 | return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */ |
753 | } |
754 | if (sfp->res_in_use) { |
755 | sg_remove_request(sfp, srp); |
756 | return -EBUSY; /* reserve buffer already being used */ |
757 | } |
758 | } |
759 | ul_timeout = msecs_to_jiffies(m: srp->header.timeout); |
760 | timeout = (ul_timeout < INT_MAX) ? ul_timeout : INT_MAX; |
761 | if ((!hp->cmdp) || (hp->cmd_len < 6) || (hp->cmd_len > sizeof (cmnd))) { |
762 | sg_remove_request(sfp, srp); |
763 | return -EMSGSIZE; |
764 | } |
765 | if (copy_from_user(to: cmnd, from: hp->cmdp, n: hp->cmd_len)) { |
766 | sg_remove_request(sfp, srp); |
767 | return -EFAULT; |
768 | } |
769 | if (read_only && sg_allow_access(filp: file, cmd: cmnd)) { |
770 | sg_remove_request(sfp, srp); |
771 | return -EPERM; |
772 | } |
773 | k = sg_common_write(sfp, srp, cmnd, timeout, blocking); |
774 | if (k < 0) |
775 | return k; |
776 | if (o_srp) |
777 | *o_srp = srp; |
778 | return count; |
779 | } |
780 | |
781 | static int |
782 | sg_common_write(Sg_fd * sfp, Sg_request * srp, |
783 | unsigned char *cmnd, int timeout, int blocking) |
784 | { |
785 | int k, at_head; |
786 | Sg_device *sdp = sfp->parentdp; |
787 | sg_io_hdr_t *hp = &srp->header; |
788 | |
789 | srp->data.cmd_opcode = cmnd[0]; /* hold opcode of command */ |
790 | hp->status = 0; |
791 | hp->masked_status = 0; |
792 | hp->msg_status = 0; |
793 | hp->info = 0; |
794 | hp->host_status = 0; |
795 | hp->driver_status = 0; |
796 | hp->resid = 0; |
797 | SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, |
798 | "sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n" , |
799 | (int) cmnd[0], (int) hp->cmd_len)); |
800 | |
801 | if (hp->dxfer_len >= SZ_256M) { |
802 | sg_remove_request(sfp, srp); |
803 | return -EINVAL; |
804 | } |
805 | |
806 | k = sg_start_req(srp, cmd: cmnd); |
807 | if (k) { |
808 | SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp, |
809 | "sg_common_write: start_req err=%d\n" , k)); |
810 | sg_finish_rem_req(srp); |
811 | sg_remove_request(sfp, srp); |
812 | return k; /* probably out of space --> ENOMEM */ |
813 | } |
814 | if (atomic_read(v: &sdp->detaching)) { |
815 | if (srp->bio) { |
816 | blk_mq_free_request(rq: srp->rq); |
817 | srp->rq = NULL; |
818 | } |
819 | |
820 | sg_finish_rem_req(srp); |
821 | sg_remove_request(sfp, srp); |
822 | return -ENODEV; |
823 | } |
824 | |
825 | hp->duration = jiffies_to_msecs(j: jiffies); |
826 | if (hp->interface_id != '\0' && /* v3 (or later) interface */ |
827 | (SG_FLAG_Q_AT_TAIL & hp->flags)) |
828 | at_head = 0; |
829 | else |
830 | at_head = 1; |
831 | |
832 | srp->rq->timeout = timeout; |
833 | kref_get(kref: &sfp->f_ref); /* sg_rq_end_io() does kref_put(). */ |
834 | srp->rq->end_io = sg_rq_end_io; |
835 | blk_execute_rq_nowait(rq: srp->rq, at_head); |
836 | return 0; |
837 | } |
838 | |
839 | static int srp_done(Sg_fd *sfp, Sg_request *srp) |
840 | { |
841 | unsigned long flags; |
842 | int ret; |
843 | |
844 | read_lock_irqsave(&sfp->rq_list_lock, flags); |
845 | ret = srp->done; |
846 | read_unlock_irqrestore(&sfp->rq_list_lock, flags); |
847 | return ret; |
848 | } |
849 | |
850 | static int max_sectors_bytes(struct request_queue *q) |
851 | { |
852 | unsigned int max_sectors = queue_max_sectors(q); |
853 | |
854 | max_sectors = min_t(unsigned int, max_sectors, INT_MAX >> 9); |
855 | |
856 | return max_sectors << 9; |
857 | } |
858 | |
859 | static void |
860 | sg_fill_request_table(Sg_fd *sfp, sg_req_info_t *rinfo) |
861 | { |
862 | Sg_request *srp; |
863 | int val; |
864 | unsigned int ms; |
865 | |
866 | val = 0; |
867 | list_for_each_entry(srp, &sfp->rq_list, entry) { |
868 | if (val >= SG_MAX_QUEUE) |
869 | break; |
870 | rinfo[val].req_state = srp->done + 1; |
871 | rinfo[val].problem = |
872 | srp->header.masked_status & |
873 | srp->header.host_status & |
874 | srp->header.driver_status; |
875 | if (srp->done) |
876 | rinfo[val].duration = |
877 | srp->header.duration; |
878 | else { |
879 | ms = jiffies_to_msecs(j: jiffies); |
880 | rinfo[val].duration = |
881 | (ms > srp->header.duration) ? |
882 | (ms - srp->header.duration) : 0; |
883 | } |
884 | rinfo[val].orphan = srp->orphan; |
885 | rinfo[val].sg_io_owned = srp->sg_io_owned; |
886 | rinfo[val].pack_id = srp->header.pack_id; |
887 | rinfo[val].usr_ptr = srp->header.usr_ptr; |
888 | val++; |
889 | } |
890 | } |
891 | |
892 | #ifdef CONFIG_COMPAT |
893 | struct compat_sg_req_info { /* used by SG_GET_REQUEST_TABLE ioctl() */ |
894 | char req_state; |
895 | char orphan; |
896 | char sg_io_owned; |
897 | char problem; |
898 | int pack_id; |
899 | compat_uptr_t usr_ptr; |
900 | unsigned int duration; |
901 | int unused; |
902 | }; |
903 | |
904 | static int put_compat_request_table(struct compat_sg_req_info __user *o, |
905 | struct sg_req_info *rinfo) |
906 | { |
907 | int i; |
908 | for (i = 0; i < SG_MAX_QUEUE; i++) { |
909 | if (copy_to_user(to: o + i, from: rinfo + i, offsetof(sg_req_info_t, usr_ptr)) || |
910 | put_user((uintptr_t)rinfo[i].usr_ptr, &o[i].usr_ptr) || |
911 | put_user(rinfo[i].duration, &o[i].duration) || |
912 | put_user(rinfo[i].unused, &o[i].unused)) |
913 | return -EFAULT; |
914 | } |
915 | return 0; |
916 | } |
917 | #endif |
918 | |
919 | static long |
920 | sg_ioctl_common(struct file *filp, Sg_device *sdp, Sg_fd *sfp, |
921 | unsigned int cmd_in, void __user *p) |
922 | { |
923 | int __user *ip = p; |
924 | int result, val, read_only; |
925 | Sg_request *srp; |
926 | unsigned long iflags; |
927 | |
928 | SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, |
929 | "sg_ioctl: cmd=0x%x\n" , (int) cmd_in)); |
930 | read_only = (O_RDWR != (filp->f_flags & O_ACCMODE)); |
931 | |
932 | switch (cmd_in) { |
933 | case SG_IO: |
934 | if (atomic_read(v: &sdp->detaching)) |
935 | return -ENODEV; |
936 | if (!scsi_block_when_processing_errors(sdp->device)) |
937 | return -ENXIO; |
938 | result = sg_new_write(sfp, file: filp, buf: p, SZ_SG_IO_HDR, |
939 | blocking: 1, read_only, sg_io_owned: 1, o_srp: &srp); |
940 | if (result < 0) |
941 | return result; |
942 | result = wait_event_interruptible(sfp->read_wait, |
943 | srp_done(sfp, srp)); |
944 | write_lock_irq(&sfp->rq_list_lock); |
945 | if (srp->done) { |
946 | srp->done = 2; |
947 | write_unlock_irq(&sfp->rq_list_lock); |
948 | result = sg_new_read(sfp, buf: p, SZ_SG_IO_HDR, srp); |
949 | return (result < 0) ? result : 0; |
950 | } |
951 | srp->orphan = 1; |
952 | write_unlock_irq(&sfp->rq_list_lock); |
953 | return result; /* -ERESTARTSYS because signal hit process */ |
954 | case SG_SET_TIMEOUT: |
955 | result = get_user(val, ip); |
956 | if (result) |
957 | return result; |
958 | if (val < 0) |
959 | return -EIO; |
960 | if (val >= mult_frac((s64)INT_MAX, USER_HZ, HZ)) |
961 | val = min_t(s64, mult_frac((s64)INT_MAX, USER_HZ, HZ), |
962 | INT_MAX); |
963 | sfp->timeout_user = val; |
964 | sfp->timeout = mult_frac(val, HZ, USER_HZ); |
965 | |
966 | return 0; |
967 | case SG_GET_TIMEOUT: /* N.B. User receives timeout as return value */ |
968 | /* strange ..., for backward compatibility */ |
969 | return sfp->timeout_user; |
970 | case SG_SET_FORCE_LOW_DMA: |
971 | /* |
972 | * N.B. This ioctl never worked properly, but failed to |
973 | * return an error value. So returning '0' to keep compability |
974 | * with legacy applications. |
975 | */ |
976 | return 0; |
977 | case SG_GET_LOW_DMA: |
978 | return put_user(0, ip); |
979 | case SG_GET_SCSI_ID: |
980 | { |
981 | sg_scsi_id_t v; |
982 | |
983 | if (atomic_read(v: &sdp->detaching)) |
984 | return -ENODEV; |
985 | memset(&v, 0, sizeof(v)); |
986 | v.host_no = sdp->device->host->host_no; |
987 | v.channel = sdp->device->channel; |
988 | v.scsi_id = sdp->device->id; |
989 | v.lun = sdp->device->lun; |
990 | v.scsi_type = sdp->device->type; |
991 | v.h_cmd_per_lun = sdp->device->host->cmd_per_lun; |
992 | v.d_queue_depth = sdp->device->queue_depth; |
993 | if (copy_to_user(to: p, from: &v, n: sizeof(sg_scsi_id_t))) |
994 | return -EFAULT; |
995 | return 0; |
996 | } |
997 | case SG_SET_FORCE_PACK_ID: |
998 | result = get_user(val, ip); |
999 | if (result) |
1000 | return result; |
1001 | sfp->force_packid = val ? 1 : 0; |
1002 | return 0; |
1003 | case SG_GET_PACK_ID: |
1004 | read_lock_irqsave(&sfp->rq_list_lock, iflags); |
1005 | list_for_each_entry(srp, &sfp->rq_list, entry) { |
1006 | if ((1 == srp->done) && (!srp->sg_io_owned)) { |
1007 | read_unlock_irqrestore(&sfp->rq_list_lock, |
1008 | iflags); |
1009 | return put_user(srp->header.pack_id, ip); |
1010 | } |
1011 | } |
1012 | read_unlock_irqrestore(&sfp->rq_list_lock, iflags); |
1013 | return put_user(-1, ip); |
1014 | case SG_GET_NUM_WAITING: |
1015 | read_lock_irqsave(&sfp->rq_list_lock, iflags); |
1016 | val = 0; |
1017 | list_for_each_entry(srp, &sfp->rq_list, entry) { |
1018 | if ((1 == srp->done) && (!srp->sg_io_owned)) |
1019 | ++val; |
1020 | } |
1021 | read_unlock_irqrestore(&sfp->rq_list_lock, iflags); |
1022 | return put_user(val, ip); |
1023 | case SG_GET_SG_TABLESIZE: |
1024 | return put_user(sdp->sg_tablesize, ip); |
1025 | case SG_SET_RESERVED_SIZE: |
1026 | result = get_user(val, ip); |
1027 | if (result) |
1028 | return result; |
1029 | if (val < 0) |
1030 | return -EINVAL; |
1031 | val = min_t(int, val, |
1032 | max_sectors_bytes(sdp->device->request_queue)); |
1033 | mutex_lock(&sfp->f_mutex); |
1034 | if (val != sfp->reserve.bufflen) { |
1035 | if (sfp->mmap_called || |
1036 | sfp->res_in_use) { |
1037 | mutex_unlock(lock: &sfp->f_mutex); |
1038 | return -EBUSY; |
1039 | } |
1040 | |
1041 | sg_remove_scat(sfp, schp: &sfp->reserve); |
1042 | sg_build_reserve(sfp, req_size: val); |
1043 | } |
1044 | mutex_unlock(lock: &sfp->f_mutex); |
1045 | return 0; |
1046 | case SG_GET_RESERVED_SIZE: |
1047 | val = min_t(int, sfp->reserve.bufflen, |
1048 | max_sectors_bytes(sdp->device->request_queue)); |
1049 | return put_user(val, ip); |
1050 | case SG_SET_COMMAND_Q: |
1051 | result = get_user(val, ip); |
1052 | if (result) |
1053 | return result; |
1054 | sfp->cmd_q = val ? 1 : 0; |
1055 | return 0; |
1056 | case SG_GET_COMMAND_Q: |
1057 | return put_user((int) sfp->cmd_q, ip); |
1058 | case SG_SET_KEEP_ORPHAN: |
1059 | result = get_user(val, ip); |
1060 | if (result) |
1061 | return result; |
1062 | sfp->keep_orphan = val; |
1063 | return 0; |
1064 | case SG_GET_KEEP_ORPHAN: |
1065 | return put_user((int) sfp->keep_orphan, ip); |
1066 | case SG_NEXT_CMD_LEN: |
1067 | result = get_user(val, ip); |
1068 | if (result) |
1069 | return result; |
1070 | if (val > SG_MAX_CDB_SIZE) |
1071 | return -ENOMEM; |
1072 | sfp->next_cmd_len = (val > 0) ? val : 0; |
1073 | return 0; |
1074 | case SG_GET_VERSION_NUM: |
1075 | return put_user(sg_version_num, ip); |
1076 | case SG_GET_ACCESS_COUNT: |
1077 | /* faked - we don't have a real access count anymore */ |
1078 | val = (sdp->device ? 1 : 0); |
1079 | return put_user(val, ip); |
1080 | case SG_GET_REQUEST_TABLE: |
1081 | { |
1082 | sg_req_info_t *rinfo; |
1083 | |
1084 | rinfo = kcalloc(SG_MAX_QUEUE, SZ_SG_REQ_INFO, |
1085 | GFP_KERNEL); |
1086 | if (!rinfo) |
1087 | return -ENOMEM; |
1088 | read_lock_irqsave(&sfp->rq_list_lock, iflags); |
1089 | sg_fill_request_table(sfp, rinfo); |
1090 | read_unlock_irqrestore(&sfp->rq_list_lock, iflags); |
1091 | #ifdef CONFIG_COMPAT |
1092 | if (in_compat_syscall()) |
1093 | result = put_compat_request_table(o: p, rinfo); |
1094 | else |
1095 | #endif |
1096 | result = copy_to_user(to: p, from: rinfo, |
1097 | SZ_SG_REQ_INFO * SG_MAX_QUEUE); |
1098 | result = result ? -EFAULT : 0; |
1099 | kfree(objp: rinfo); |
1100 | return result; |
1101 | } |
1102 | case SG_EMULATED_HOST: |
1103 | if (atomic_read(v: &sdp->detaching)) |
1104 | return -ENODEV; |
1105 | return put_user(sdp->device->host->hostt->emulated, ip); |
1106 | case SCSI_IOCTL_SEND_COMMAND: |
1107 | if (atomic_read(v: &sdp->detaching)) |
1108 | return -ENODEV; |
1109 | return scsi_ioctl(sdev: sdp->device, open_for_write: filp->f_mode & FMODE_WRITE, |
1110 | cmd: cmd_in, arg: p); |
1111 | case SG_SET_DEBUG: |
1112 | result = get_user(val, ip); |
1113 | if (result) |
1114 | return result; |
1115 | sdp->sgdebug = (char) val; |
1116 | return 0; |
1117 | case BLKSECTGET: |
1118 | return put_user(max_sectors_bytes(sdp->device->request_queue), |
1119 | ip); |
1120 | case BLKTRACESETUP: |
1121 | return blk_trace_setup(q: sdp->device->request_queue, name: sdp->name, |
1122 | MKDEV(SCSI_GENERIC_MAJOR, sdp->index), |
1123 | NULL, arg: p); |
1124 | case BLKTRACESTART: |
1125 | return blk_trace_startstop(q: sdp->device->request_queue, start: 1); |
1126 | case BLKTRACESTOP: |
1127 | return blk_trace_startstop(q: sdp->device->request_queue, start: 0); |
1128 | case BLKTRACETEARDOWN: |
1129 | return blk_trace_remove(q: sdp->device->request_queue); |
1130 | case SCSI_IOCTL_GET_IDLUN: |
1131 | case SCSI_IOCTL_GET_BUS_NUMBER: |
1132 | case SCSI_IOCTL_PROBE_HOST: |
1133 | case SG_GET_TRANSFORM: |
1134 | case SG_SCSI_RESET: |
1135 | if (atomic_read(v: &sdp->detaching)) |
1136 | return -ENODEV; |
1137 | break; |
1138 | default: |
1139 | if (read_only) |
1140 | return -EPERM; /* don't know so take safe approach */ |
1141 | break; |
1142 | } |
1143 | |
1144 | result = scsi_ioctl_block_when_processing_errors(sdev: sdp->device, |
1145 | cmd: cmd_in, ndelay: filp->f_flags & O_NDELAY); |
1146 | if (result) |
1147 | return result; |
1148 | |
1149 | return -ENOIOCTLCMD; |
1150 | } |
1151 | |
1152 | static long |
1153 | sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) |
1154 | { |
1155 | void __user *p = (void __user *)arg; |
1156 | Sg_device *sdp; |
1157 | Sg_fd *sfp; |
1158 | int ret; |
1159 | |
1160 | if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) |
1161 | return -ENXIO; |
1162 | |
1163 | ret = sg_ioctl_common(filp, sdp, sfp, cmd_in, p); |
1164 | if (ret != -ENOIOCTLCMD) |
1165 | return ret; |
1166 | return scsi_ioctl(sdev: sdp->device, open_for_write: filp->f_mode & FMODE_WRITE, cmd: cmd_in, arg: p); |
1167 | } |
1168 | |
1169 | static __poll_t |
1170 | sg_poll(struct file *filp, poll_table * wait) |
1171 | { |
1172 | __poll_t res = 0; |
1173 | Sg_device *sdp; |
1174 | Sg_fd *sfp; |
1175 | Sg_request *srp; |
1176 | int count = 0; |
1177 | unsigned long iflags; |
1178 | |
1179 | sfp = filp->private_data; |
1180 | if (!sfp) |
1181 | return EPOLLERR; |
1182 | sdp = sfp->parentdp; |
1183 | if (!sdp) |
1184 | return EPOLLERR; |
1185 | poll_wait(filp, wait_address: &sfp->read_wait, p: wait); |
1186 | read_lock_irqsave(&sfp->rq_list_lock, iflags); |
1187 | list_for_each_entry(srp, &sfp->rq_list, entry) { |
1188 | /* if any read waiting, flag it */ |
1189 | if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned)) |
1190 | res = EPOLLIN | EPOLLRDNORM; |
1191 | ++count; |
1192 | } |
1193 | read_unlock_irqrestore(&sfp->rq_list_lock, iflags); |
1194 | |
1195 | if (atomic_read(v: &sdp->detaching)) |
1196 | res |= EPOLLHUP; |
1197 | else if (!sfp->cmd_q) { |
1198 | if (0 == count) |
1199 | res |= EPOLLOUT | EPOLLWRNORM; |
1200 | } else if (count < SG_MAX_QUEUE) |
1201 | res |= EPOLLOUT | EPOLLWRNORM; |
1202 | SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, |
1203 | "sg_poll: res=0x%x\n" , (__force u32) res)); |
1204 | return res; |
1205 | } |
1206 | |
1207 | static int |
1208 | sg_fasync(int fd, struct file *filp, int mode) |
1209 | { |
1210 | Sg_device *sdp; |
1211 | Sg_fd *sfp; |
1212 | |
1213 | if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) |
1214 | return -ENXIO; |
1215 | SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, |
1216 | "sg_fasync: mode=%d\n" , mode)); |
1217 | |
1218 | return fasync_helper(fd, filp, mode, &sfp->async_qp); |
1219 | } |
1220 | |
1221 | static vm_fault_t |
1222 | sg_vma_fault(struct vm_fault *vmf) |
1223 | { |
1224 | struct vm_area_struct *vma = vmf->vma; |
1225 | Sg_fd *sfp; |
1226 | unsigned long offset, len, sa; |
1227 | Sg_scatter_hold *rsv_schp; |
1228 | int k, length; |
1229 | |
1230 | if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data))) |
1231 | return VM_FAULT_SIGBUS; |
1232 | rsv_schp = &sfp->reserve; |
1233 | offset = vmf->pgoff << PAGE_SHIFT; |
1234 | if (offset >= rsv_schp->bufflen) |
1235 | return VM_FAULT_SIGBUS; |
1236 | SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sfp->parentdp, |
1237 | "sg_vma_fault: offset=%lu, scatg=%d\n" , |
1238 | offset, rsv_schp->k_use_sg)); |
1239 | sa = vma->vm_start; |
1240 | length = 1 << (PAGE_SHIFT + rsv_schp->page_order); |
1241 | for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) { |
1242 | len = vma->vm_end - sa; |
1243 | len = (len < length) ? len : length; |
1244 | if (offset < len) { |
1245 | struct page *page = nth_page(rsv_schp->pages[k], |
1246 | offset >> PAGE_SHIFT); |
1247 | get_page(page); /* increment page count */ |
1248 | vmf->page = page; |
1249 | return 0; /* success */ |
1250 | } |
1251 | sa += len; |
1252 | offset -= len; |
1253 | } |
1254 | |
1255 | return VM_FAULT_SIGBUS; |
1256 | } |
1257 | |
1258 | static const struct vm_operations_struct sg_mmap_vm_ops = { |
1259 | .fault = sg_vma_fault, |
1260 | }; |
1261 | |
1262 | static int |
1263 | sg_mmap(struct file *filp, struct vm_area_struct *vma) |
1264 | { |
1265 | Sg_fd *sfp; |
1266 | unsigned long req_sz, len, sa; |
1267 | Sg_scatter_hold *rsv_schp; |
1268 | int k, length; |
1269 | int ret = 0; |
1270 | |
1271 | if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data))) |
1272 | return -ENXIO; |
1273 | req_sz = vma->vm_end - vma->vm_start; |
1274 | SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sfp->parentdp, |
1275 | "sg_mmap starting, vm_start=%p, len=%d\n" , |
1276 | (void *) vma->vm_start, (int) req_sz)); |
1277 | if (vma->vm_pgoff) |
1278 | return -EINVAL; /* want no offset */ |
1279 | rsv_schp = &sfp->reserve; |
1280 | mutex_lock(&sfp->f_mutex); |
1281 | if (req_sz > rsv_schp->bufflen) { |
1282 | ret = -ENOMEM; /* cannot map more than reserved buffer */ |
1283 | goto out; |
1284 | } |
1285 | |
1286 | sa = vma->vm_start; |
1287 | length = 1 << (PAGE_SHIFT + rsv_schp->page_order); |
1288 | for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) { |
1289 | len = vma->vm_end - sa; |
1290 | len = (len < length) ? len : length; |
1291 | sa += len; |
1292 | } |
1293 | |
1294 | sfp->mmap_called = 1; |
1295 | vm_flags_set(vma, VM_IO | VM_DONTEXPAND | VM_DONTDUMP); |
1296 | vma->vm_private_data = sfp; |
1297 | vma->vm_ops = &sg_mmap_vm_ops; |
1298 | out: |
1299 | mutex_unlock(lock: &sfp->f_mutex); |
1300 | return ret; |
1301 | } |
1302 | |
1303 | static void |
1304 | sg_rq_end_io_usercontext(struct work_struct *work) |
1305 | { |
1306 | struct sg_request *srp = container_of(work, struct sg_request, ew.work); |
1307 | struct sg_fd *sfp = srp->parentfp; |
1308 | |
1309 | sg_finish_rem_req(srp); |
1310 | sg_remove_request(sfp, srp); |
1311 | kref_put(kref: &sfp->f_ref, release: sg_remove_sfp); |
1312 | } |
1313 | |
1314 | /* |
1315 | * This function is a "bottom half" handler that is called by the mid |
1316 | * level when a command is completed (or has failed). |
1317 | */ |
1318 | static enum rq_end_io_ret |
1319 | sg_rq_end_io(struct request *rq, blk_status_t status) |
1320 | { |
1321 | struct scsi_cmnd *scmd = blk_mq_rq_to_pdu(rq); |
1322 | struct sg_request *srp = rq->end_io_data; |
1323 | Sg_device *sdp; |
1324 | Sg_fd *sfp; |
1325 | unsigned long iflags; |
1326 | unsigned int ms; |
1327 | char *sense; |
1328 | int result, resid, done = 1; |
1329 | |
1330 | if (WARN_ON(srp->done != 0)) |
1331 | return RQ_END_IO_NONE; |
1332 | |
1333 | sfp = srp->parentfp; |
1334 | if (WARN_ON(sfp == NULL)) |
1335 | return RQ_END_IO_NONE; |
1336 | |
1337 | sdp = sfp->parentdp; |
1338 | if (unlikely(atomic_read(&sdp->detaching))) |
1339 | pr_info("%s: device detaching\n" , __func__); |
1340 | |
1341 | sense = scmd->sense_buffer; |
1342 | result = scmd->result; |
1343 | resid = scmd->resid_len; |
1344 | |
1345 | SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sdp, |
1346 | "sg_cmd_done: pack_id=%d, res=0x%x\n" , |
1347 | srp->header.pack_id, result)); |
1348 | srp->header.resid = resid; |
1349 | ms = jiffies_to_msecs(j: jiffies); |
1350 | srp->header.duration = (ms > srp->header.duration) ? |
1351 | (ms - srp->header.duration) : 0; |
1352 | if (0 != result) { |
1353 | struct scsi_sense_hdr sshdr; |
1354 | |
1355 | srp->header.status = 0xff & result; |
1356 | srp->header.masked_status = sg_status_byte(result); |
1357 | srp->header.msg_status = COMMAND_COMPLETE; |
1358 | srp->header.host_status = host_byte(result); |
1359 | srp->header.driver_status = driver_byte(result); |
1360 | if ((sdp->sgdebug > 0) && |
1361 | ((CHECK_CONDITION == srp->header.masked_status) || |
1362 | (COMMAND_TERMINATED == srp->header.masked_status))) |
1363 | __scsi_print_sense(sdp->device, name: __func__, sense_buffer: sense, |
1364 | SCSI_SENSE_BUFFERSIZE); |
1365 | |
1366 | /* Following if statement is a patch supplied by Eric Youngdale */ |
1367 | if (driver_byte(result) != 0 |
1368 | && scsi_normalize_sense(sense_buffer: sense, SCSI_SENSE_BUFFERSIZE, sshdr: &sshdr) |
1369 | && !scsi_sense_is_deferred(sshdr: &sshdr) |
1370 | && sshdr.sense_key == UNIT_ATTENTION |
1371 | && sdp->device->removable) { |
1372 | /* Detected possible disc change. Set the bit - this */ |
1373 | /* may be used if there are filesystems using this device */ |
1374 | sdp->device->changed = 1; |
1375 | } |
1376 | } |
1377 | |
1378 | if (scmd->sense_len) |
1379 | memcpy(srp->sense_b, scmd->sense_buffer, SCSI_SENSE_BUFFERSIZE); |
1380 | |
1381 | /* Rely on write phase to clean out srp status values, so no "else" */ |
1382 | |
1383 | /* |
1384 | * Free the request as soon as it is complete so that its resources |
1385 | * can be reused without waiting for userspace to read() the |
1386 | * result. But keep the associated bio (if any) around until |
1387 | * blk_rq_unmap_user() can be called from user context. |
1388 | */ |
1389 | srp->rq = NULL; |
1390 | blk_mq_free_request(rq); |
1391 | |
1392 | write_lock_irqsave(&sfp->rq_list_lock, iflags); |
1393 | if (unlikely(srp->orphan)) { |
1394 | if (sfp->keep_orphan) |
1395 | srp->sg_io_owned = 0; |
1396 | else |
1397 | done = 0; |
1398 | } |
1399 | srp->done = done; |
1400 | write_unlock_irqrestore(&sfp->rq_list_lock, iflags); |
1401 | |
1402 | if (likely(done)) { |
1403 | /* Now wake up any sg_read() that is waiting for this |
1404 | * packet. |
1405 | */ |
1406 | wake_up_interruptible(&sfp->read_wait); |
1407 | kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN); |
1408 | kref_put(kref: &sfp->f_ref, release: sg_remove_sfp); |
1409 | } else { |
1410 | INIT_WORK(&srp->ew.work, sg_rq_end_io_usercontext); |
1411 | schedule_work(work: &srp->ew.work); |
1412 | } |
1413 | return RQ_END_IO_NONE; |
1414 | } |
1415 | |
1416 | static const struct file_operations sg_fops = { |
1417 | .owner = THIS_MODULE, |
1418 | .read = sg_read, |
1419 | .write = sg_write, |
1420 | .poll = sg_poll, |
1421 | .unlocked_ioctl = sg_ioctl, |
1422 | .compat_ioctl = compat_ptr_ioctl, |
1423 | .open = sg_open, |
1424 | .mmap = sg_mmap, |
1425 | .release = sg_release, |
1426 | .fasync = sg_fasync, |
1427 | .llseek = no_llseek, |
1428 | }; |
1429 | |
1430 | static const struct class sg_sysfs_class = { |
1431 | .name = "scsi_generic" |
1432 | }; |
1433 | |
1434 | static int sg_sysfs_valid = 0; |
1435 | |
1436 | static Sg_device * |
1437 | sg_alloc(struct scsi_device *scsidp) |
1438 | { |
1439 | struct request_queue *q = scsidp->request_queue; |
1440 | Sg_device *sdp; |
1441 | unsigned long iflags; |
1442 | int error; |
1443 | u32 k; |
1444 | |
1445 | sdp = kzalloc(size: sizeof(Sg_device), GFP_KERNEL); |
1446 | if (!sdp) { |
1447 | sdev_printk(KERN_WARNING, scsidp, "%s: kmalloc Sg_device " |
1448 | "failure\n" , __func__); |
1449 | return ERR_PTR(error: -ENOMEM); |
1450 | } |
1451 | |
1452 | idr_preload(GFP_KERNEL); |
1453 | write_lock_irqsave(&sg_index_lock, iflags); |
1454 | |
1455 | error = idr_alloc(&sg_index_idr, ptr: sdp, start: 0, SG_MAX_DEVS, GFP_NOWAIT); |
1456 | if (error < 0) { |
1457 | if (error == -ENOSPC) { |
1458 | sdev_printk(KERN_WARNING, scsidp, |
1459 | "Unable to attach sg device type=%d, minor number exceeds %d\n" , |
1460 | scsidp->type, SG_MAX_DEVS - 1); |
1461 | error = -ENODEV; |
1462 | } else { |
1463 | sdev_printk(KERN_WARNING, scsidp, "%s: idr " |
1464 | "allocation Sg_device failure: %d\n" , |
1465 | __func__, error); |
1466 | } |
1467 | goto out_unlock; |
1468 | } |
1469 | k = error; |
1470 | |
1471 | SCSI_LOG_TIMEOUT(3, sdev_printk(KERN_INFO, scsidp, |
1472 | "sg_alloc: dev=%d \n" , k)); |
1473 | sprintf(buf: sdp->name, fmt: "sg%d" , k); |
1474 | sdp->device = scsidp; |
1475 | mutex_init(&sdp->open_rel_lock); |
1476 | INIT_LIST_HEAD(list: &sdp->sfds); |
1477 | init_waitqueue_head(&sdp->open_wait); |
1478 | atomic_set(v: &sdp->detaching, i: 0); |
1479 | rwlock_init(&sdp->sfd_lock); |
1480 | sdp->sg_tablesize = queue_max_segments(q); |
1481 | sdp->index = k; |
1482 | kref_init(kref: &sdp->d_ref); |
1483 | error = 0; |
1484 | |
1485 | out_unlock: |
1486 | write_unlock_irqrestore(&sg_index_lock, iflags); |
1487 | idr_preload_end(); |
1488 | |
1489 | if (error) { |
1490 | kfree(objp: sdp); |
1491 | return ERR_PTR(error); |
1492 | } |
1493 | return sdp; |
1494 | } |
1495 | |
1496 | static int |
1497 | sg_add_device(struct device *cl_dev) |
1498 | { |
1499 | struct scsi_device *scsidp = to_scsi_device(cl_dev->parent); |
1500 | Sg_device *sdp = NULL; |
1501 | struct cdev * cdev = NULL; |
1502 | int error; |
1503 | unsigned long iflags; |
1504 | |
1505 | if (!blk_get_queue(scsidp->request_queue)) { |
1506 | pr_warn("%s: get scsi_device queue failed\n" , __func__); |
1507 | return -ENODEV; |
1508 | } |
1509 | |
1510 | error = -ENOMEM; |
1511 | cdev = cdev_alloc(); |
1512 | if (!cdev) { |
1513 | pr_warn("%s: cdev_alloc failed\n" , __func__); |
1514 | goto out; |
1515 | } |
1516 | cdev->owner = THIS_MODULE; |
1517 | cdev->ops = &sg_fops; |
1518 | |
1519 | sdp = sg_alloc(scsidp); |
1520 | if (IS_ERR(ptr: sdp)) { |
1521 | pr_warn("%s: sg_alloc failed\n" , __func__); |
1522 | error = PTR_ERR(ptr: sdp); |
1523 | goto out; |
1524 | } |
1525 | |
1526 | error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), 1); |
1527 | if (error) |
1528 | goto cdev_add_err; |
1529 | |
1530 | sdp->cdev = cdev; |
1531 | if (sg_sysfs_valid) { |
1532 | struct device *sg_class_member; |
1533 | |
1534 | sg_class_member = device_create(cls: &sg_sysfs_class, parent: cl_dev->parent, |
1535 | MKDEV(SCSI_GENERIC_MAJOR, |
1536 | sdp->index), |
1537 | drvdata: sdp, fmt: "%s" , sdp->name); |
1538 | if (IS_ERR(ptr: sg_class_member)) { |
1539 | pr_err("%s: device_create failed\n" , __func__); |
1540 | error = PTR_ERR(ptr: sg_class_member); |
1541 | goto cdev_add_err; |
1542 | } |
1543 | error = sysfs_create_link(kobj: &scsidp->sdev_gendev.kobj, |
1544 | target: &sg_class_member->kobj, name: "generic" ); |
1545 | if (error) |
1546 | pr_err("%s: unable to make symlink 'generic' back " |
1547 | "to sg%d\n" , __func__, sdp->index); |
1548 | } else |
1549 | pr_warn("%s: sg_sys Invalid\n" , __func__); |
1550 | |
1551 | sdev_printk(KERN_NOTICE, scsidp, "Attached scsi generic sg%d " |
1552 | "type %d\n" , sdp->index, scsidp->type); |
1553 | |
1554 | dev_set_drvdata(dev: cl_dev, data: sdp); |
1555 | |
1556 | return 0; |
1557 | |
1558 | cdev_add_err: |
1559 | write_lock_irqsave(&sg_index_lock, iflags); |
1560 | idr_remove(&sg_index_idr, id: sdp->index); |
1561 | write_unlock_irqrestore(&sg_index_lock, iflags); |
1562 | kfree(objp: sdp); |
1563 | |
1564 | out: |
1565 | if (cdev) |
1566 | cdev_del(cdev); |
1567 | blk_put_queue(scsidp->request_queue); |
1568 | return error; |
1569 | } |
1570 | |
1571 | static void |
1572 | sg_device_destroy(struct kref *kref) |
1573 | { |
1574 | struct sg_device *sdp = container_of(kref, struct sg_device, d_ref); |
1575 | struct request_queue *q = sdp->device->request_queue; |
1576 | unsigned long flags; |
1577 | |
1578 | /* CAUTION! Note that the device can still be found via idr_find() |
1579 | * even though the refcount is 0. Therefore, do idr_remove() BEFORE |
1580 | * any other cleanup. |
1581 | */ |
1582 | |
1583 | blk_trace_remove(q); |
1584 | blk_put_queue(q); |
1585 | |
1586 | write_lock_irqsave(&sg_index_lock, flags); |
1587 | idr_remove(&sg_index_idr, id: sdp->index); |
1588 | write_unlock_irqrestore(&sg_index_lock, flags); |
1589 | |
1590 | SCSI_LOG_TIMEOUT(3, |
1591 | sg_printk(KERN_INFO, sdp, "sg_device_destroy\n" )); |
1592 | |
1593 | kfree(objp: sdp); |
1594 | } |
1595 | |
1596 | static void |
1597 | sg_remove_device(struct device *cl_dev) |
1598 | { |
1599 | struct scsi_device *scsidp = to_scsi_device(cl_dev->parent); |
1600 | Sg_device *sdp = dev_get_drvdata(dev: cl_dev); |
1601 | unsigned long iflags; |
1602 | Sg_fd *sfp; |
1603 | int val; |
1604 | |
1605 | if (!sdp) |
1606 | return; |
1607 | /* want sdp->detaching non-zero as soon as possible */ |
1608 | val = atomic_inc_return(v: &sdp->detaching); |
1609 | if (val > 1) |
1610 | return; /* only want to do following once per device */ |
1611 | |
1612 | SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, |
1613 | "%s\n" , __func__)); |
1614 | |
1615 | read_lock_irqsave(&sdp->sfd_lock, iflags); |
1616 | list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) { |
1617 | wake_up_interruptible_all(&sfp->read_wait); |
1618 | kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP); |
1619 | } |
1620 | wake_up_interruptible_all(&sdp->open_wait); |
1621 | read_unlock_irqrestore(&sdp->sfd_lock, iflags); |
1622 | |
1623 | sysfs_remove_link(kobj: &scsidp->sdev_gendev.kobj, name: "generic" ); |
1624 | device_destroy(cls: &sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index)); |
1625 | cdev_del(sdp->cdev); |
1626 | sdp->cdev = NULL; |
1627 | |
1628 | kref_put(kref: &sdp->d_ref, release: sg_device_destroy); |
1629 | } |
1630 | |
1631 | module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR); |
1632 | module_param_named(def_reserved_size, def_reserved_size, int, |
1633 | S_IRUGO | S_IWUSR); |
1634 | module_param_named(allow_dio, sg_allow_dio, int, S_IRUGO | S_IWUSR); |
1635 | |
1636 | MODULE_AUTHOR("Douglas Gilbert" ); |
1637 | MODULE_DESCRIPTION("SCSI generic (sg) driver" ); |
1638 | MODULE_LICENSE("GPL" ); |
1639 | MODULE_VERSION(SG_VERSION_STR); |
1640 | MODULE_ALIAS_CHARDEV_MAJOR(SCSI_GENERIC_MAJOR); |
1641 | |
1642 | MODULE_PARM_DESC(scatter_elem_sz, "scatter gather element " |
1643 | "size (default: max(SG_SCATTER_SZ, PAGE_SIZE))" ); |
1644 | MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd" ); |
1645 | MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))" ); |
1646 | |
1647 | #ifdef CONFIG_SYSCTL |
1648 | #include <linux/sysctl.h> |
1649 | |
1650 | static struct ctl_table sg_sysctls[] = { |
1651 | { |
1652 | .procname = "sg-big-buff" , |
1653 | .data = &sg_big_buff, |
1654 | .maxlen = sizeof(int), |
1655 | .mode = 0444, |
1656 | .proc_handler = proc_dointvec, |
1657 | }, |
1658 | }; |
1659 | |
1660 | static struct ctl_table_header *hdr; |
1661 | static void register_sg_sysctls(void) |
1662 | { |
1663 | if (!hdr) |
1664 | hdr = register_sysctl("kernel" , sg_sysctls); |
1665 | } |
1666 | |
1667 | static void unregister_sg_sysctls(void) |
1668 | { |
1669 | if (hdr) |
1670 | unregister_sysctl_table(table: hdr); |
1671 | } |
1672 | #else |
1673 | #define register_sg_sysctls() do { } while (0) |
1674 | #define unregister_sg_sysctls() do { } while (0) |
1675 | #endif /* CONFIG_SYSCTL */ |
1676 | |
1677 | static int __init |
1678 | init_sg(void) |
1679 | { |
1680 | int rc; |
1681 | |
1682 | if (scatter_elem_sz < PAGE_SIZE) { |
1683 | scatter_elem_sz = PAGE_SIZE; |
1684 | scatter_elem_sz_prev = scatter_elem_sz; |
1685 | } |
1686 | if (def_reserved_size >= 0) |
1687 | sg_big_buff = def_reserved_size; |
1688 | else |
1689 | def_reserved_size = sg_big_buff; |
1690 | |
1691 | rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), |
1692 | SG_MAX_DEVS, "sg" ); |
1693 | if (rc) |
1694 | return rc; |
1695 | rc = class_register(class: &sg_sysfs_class); |
1696 | if (rc) |
1697 | goto err_out; |
1698 | sg_sysfs_valid = 1; |
1699 | rc = scsi_register_interface(&sg_interface); |
1700 | if (0 == rc) { |
1701 | #ifdef CONFIG_SCSI_PROC_FS |
1702 | sg_proc_init(); |
1703 | #endif /* CONFIG_SCSI_PROC_FS */ |
1704 | return 0; |
1705 | } |
1706 | class_unregister(class: &sg_sysfs_class); |
1707 | register_sg_sysctls(); |
1708 | err_out: |
1709 | unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS); |
1710 | return rc; |
1711 | } |
1712 | |
1713 | static void __exit |
1714 | exit_sg(void) |
1715 | { |
1716 | unregister_sg_sysctls(); |
1717 | #ifdef CONFIG_SCSI_PROC_FS |
1718 | remove_proc_subtree("scsi/sg" , NULL); |
1719 | #endif /* CONFIG_SCSI_PROC_FS */ |
1720 | scsi_unregister_interface(&sg_interface); |
1721 | class_unregister(class: &sg_sysfs_class); |
1722 | sg_sysfs_valid = 0; |
1723 | unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), |
1724 | SG_MAX_DEVS); |
1725 | idr_destroy(&sg_index_idr); |
1726 | } |
1727 | |
1728 | static int |
1729 | sg_start_req(Sg_request *srp, unsigned char *cmd) |
1730 | { |
1731 | int res; |
1732 | struct request *rq; |
1733 | Sg_fd *sfp = srp->parentfp; |
1734 | sg_io_hdr_t *hp = &srp->header; |
1735 | int dxfer_len = (int) hp->dxfer_len; |
1736 | int dxfer_dir = hp->dxfer_direction; |
1737 | unsigned int iov_count = hp->iovec_count; |
1738 | Sg_scatter_hold *req_schp = &srp->data; |
1739 | Sg_scatter_hold *rsv_schp = &sfp->reserve; |
1740 | struct request_queue *q = sfp->parentdp->device->request_queue; |
1741 | struct rq_map_data *md, map_data; |
1742 | int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? ITER_SOURCE : ITER_DEST; |
1743 | struct scsi_cmnd *scmd; |
1744 | |
1745 | SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, |
1746 | "sg_start_req: dxfer_len=%d\n" , |
1747 | dxfer_len)); |
1748 | |
1749 | /* |
1750 | * NOTE |
1751 | * |
1752 | * With scsi-mq enabled, there are a fixed number of preallocated |
1753 | * requests equal in number to shost->can_queue. If all of the |
1754 | * preallocated requests are already in use, then scsi_alloc_request() |
1755 | * will sleep until an active command completes, freeing up a request. |
1756 | * Although waiting in an asynchronous interface is less than ideal, we |
1757 | * do not want to use BLK_MQ_REQ_NOWAIT here because userspace might |
1758 | * not expect an EWOULDBLOCK from this condition. |
1759 | */ |
1760 | rq = scsi_alloc_request(q, opf: hp->dxfer_direction == SG_DXFER_TO_DEV ? |
1761 | REQ_OP_DRV_OUT : REQ_OP_DRV_IN, flags: 0); |
1762 | if (IS_ERR(ptr: rq)) |
1763 | return PTR_ERR(ptr: rq); |
1764 | scmd = blk_mq_rq_to_pdu(rq); |
1765 | |
1766 | if (hp->cmd_len > sizeof(scmd->cmnd)) { |
1767 | blk_mq_free_request(rq); |
1768 | return -EINVAL; |
1769 | } |
1770 | |
1771 | memcpy(scmd->cmnd, cmd, hp->cmd_len); |
1772 | scmd->cmd_len = hp->cmd_len; |
1773 | |
1774 | srp->rq = rq; |
1775 | rq->end_io_data = srp; |
1776 | scmd->allowed = SG_DEFAULT_RETRIES; |
1777 | |
1778 | if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE)) |
1779 | return 0; |
1780 | |
1781 | if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO && |
1782 | dxfer_dir != SG_DXFER_UNKNOWN && !iov_count && |
1783 | blk_rq_aligned(q, addr: (unsigned long)hp->dxferp, len: dxfer_len)) |
1784 | md = NULL; |
1785 | else |
1786 | md = &map_data; |
1787 | |
1788 | if (md) { |
1789 | mutex_lock(&sfp->f_mutex); |
1790 | if (dxfer_len <= rsv_schp->bufflen && |
1791 | !sfp->res_in_use) { |
1792 | sfp->res_in_use = 1; |
1793 | sg_link_reserve(sfp, srp, size: dxfer_len); |
1794 | } else if (hp->flags & SG_FLAG_MMAP_IO) { |
1795 | res = -EBUSY; /* sfp->res_in_use == 1 */ |
1796 | if (dxfer_len > rsv_schp->bufflen) |
1797 | res = -ENOMEM; |
1798 | mutex_unlock(lock: &sfp->f_mutex); |
1799 | return res; |
1800 | } else { |
1801 | res = sg_build_indirect(schp: req_schp, sfp, buff_size: dxfer_len); |
1802 | if (res) { |
1803 | mutex_unlock(lock: &sfp->f_mutex); |
1804 | return res; |
1805 | } |
1806 | } |
1807 | mutex_unlock(lock: &sfp->f_mutex); |
1808 | |
1809 | md->pages = req_schp->pages; |
1810 | md->page_order = req_schp->page_order; |
1811 | md->nr_entries = req_schp->k_use_sg; |
1812 | md->offset = 0; |
1813 | md->null_mapped = hp->dxferp ? 0 : 1; |
1814 | if (dxfer_dir == SG_DXFER_TO_FROM_DEV) |
1815 | md->from_user = 1; |
1816 | else |
1817 | md->from_user = 0; |
1818 | } |
1819 | |
1820 | res = blk_rq_map_user_io(rq, md, hp->dxferp, hp->dxfer_len, |
1821 | GFP_ATOMIC, iov_count, iov_count, 1, rw); |
1822 | if (!res) { |
1823 | srp->bio = rq->bio; |
1824 | |
1825 | if (!md) { |
1826 | req_schp->dio_in_use = 1; |
1827 | hp->info |= SG_INFO_DIRECT_IO; |
1828 | } |
1829 | } |
1830 | return res; |
1831 | } |
1832 | |
1833 | static int |
1834 | sg_finish_rem_req(Sg_request *srp) |
1835 | { |
1836 | int ret = 0; |
1837 | |
1838 | Sg_fd *sfp = srp->parentfp; |
1839 | Sg_scatter_hold *req_schp = &srp->data; |
1840 | |
1841 | SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, |
1842 | "sg_finish_rem_req: res_used=%d\n" , |
1843 | (int) srp->res_used)); |
1844 | if (srp->bio) |
1845 | ret = blk_rq_unmap_user(srp->bio); |
1846 | |
1847 | if (srp->rq) |
1848 | blk_mq_free_request(rq: srp->rq); |
1849 | |
1850 | if (srp->res_used) |
1851 | sg_unlink_reserve(sfp, srp); |
1852 | else |
1853 | sg_remove_scat(sfp, schp: req_schp); |
1854 | |
1855 | return ret; |
1856 | } |
1857 | |
1858 | static int |
1859 | sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize) |
1860 | { |
1861 | int sg_bufflen = tablesize * sizeof(struct page *); |
1862 | gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN; |
1863 | |
1864 | schp->pages = kzalloc(size: sg_bufflen, flags: gfp_flags); |
1865 | if (!schp->pages) |
1866 | return -ENOMEM; |
1867 | schp->sglist_len = sg_bufflen; |
1868 | return tablesize; /* number of scat_gath elements allocated */ |
1869 | } |
1870 | |
1871 | static int |
1872 | sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) |
1873 | { |
1874 | int ret_sz = 0, i, k, rem_sz, num, mx_sc_elems; |
1875 | int sg_tablesize = sfp->parentdp->sg_tablesize; |
1876 | int blk_size = buff_size, order; |
1877 | gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN | __GFP_ZERO; |
1878 | |
1879 | if (blk_size < 0) |
1880 | return -EFAULT; |
1881 | if (0 == blk_size) |
1882 | ++blk_size; /* don't know why */ |
1883 | /* round request up to next highest SG_SECTOR_SZ byte boundary */ |
1884 | blk_size = ALIGN(blk_size, SG_SECTOR_SZ); |
1885 | SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, |
1886 | "sg_build_indirect: buff_size=%d, blk_size=%d\n" , |
1887 | buff_size, blk_size)); |
1888 | |
1889 | /* N.B. ret_sz carried into this block ... */ |
1890 | mx_sc_elems = sg_build_sgat(schp, sfp, tablesize: sg_tablesize); |
1891 | if (mx_sc_elems < 0) |
1892 | return mx_sc_elems; /* most likely -ENOMEM */ |
1893 | |
1894 | num = scatter_elem_sz; |
1895 | if (unlikely(num != scatter_elem_sz_prev)) { |
1896 | if (num < PAGE_SIZE) { |
1897 | scatter_elem_sz = PAGE_SIZE; |
1898 | scatter_elem_sz_prev = PAGE_SIZE; |
1899 | } else |
1900 | scatter_elem_sz_prev = num; |
1901 | } |
1902 | |
1903 | order = get_order(size: num); |
1904 | retry: |
1905 | ret_sz = 1 << (PAGE_SHIFT + order); |
1906 | |
1907 | for (k = 0, rem_sz = blk_size; rem_sz > 0 && k < mx_sc_elems; |
1908 | k++, rem_sz -= ret_sz) { |
1909 | |
1910 | num = (rem_sz > scatter_elem_sz_prev) ? |
1911 | scatter_elem_sz_prev : rem_sz; |
1912 | |
1913 | schp->pages[k] = alloc_pages(gfp: gfp_mask, order); |
1914 | if (!schp->pages[k]) |
1915 | goto out; |
1916 | |
1917 | if (num == scatter_elem_sz_prev) { |
1918 | if (unlikely(ret_sz > scatter_elem_sz_prev)) { |
1919 | scatter_elem_sz = ret_sz; |
1920 | scatter_elem_sz_prev = ret_sz; |
1921 | } |
1922 | } |
1923 | |
1924 | SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp, |
1925 | "sg_build_indirect: k=%d, num=%d, ret_sz=%d\n" , |
1926 | k, num, ret_sz)); |
1927 | } /* end of for loop */ |
1928 | |
1929 | schp->page_order = order; |
1930 | schp->k_use_sg = k; |
1931 | SCSI_LOG_TIMEOUT(5, sg_printk(KERN_INFO, sfp->parentdp, |
1932 | "sg_build_indirect: k_use_sg=%d, rem_sz=%d\n" , |
1933 | k, rem_sz)); |
1934 | |
1935 | schp->bufflen = blk_size; |
1936 | if (rem_sz > 0) /* must have failed */ |
1937 | return -ENOMEM; |
1938 | return 0; |
1939 | out: |
1940 | for (i = 0; i < k; i++) |
1941 | __free_pages(page: schp->pages[i], order); |
1942 | |
1943 | if (--order >= 0) |
1944 | goto retry; |
1945 | |
1946 | return -ENOMEM; |
1947 | } |
1948 | |
1949 | static void |
1950 | sg_remove_scat(Sg_fd * sfp, Sg_scatter_hold * schp) |
1951 | { |
1952 | SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, |
1953 | "sg_remove_scat: k_use_sg=%d\n" , schp->k_use_sg)); |
1954 | if (schp->pages && schp->sglist_len > 0) { |
1955 | if (!schp->dio_in_use) { |
1956 | int k; |
1957 | |
1958 | for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) { |
1959 | SCSI_LOG_TIMEOUT(5, |
1960 | sg_printk(KERN_INFO, sfp->parentdp, |
1961 | "sg_remove_scat: k=%d, pg=0x%p\n" , |
1962 | k, schp->pages[k])); |
1963 | __free_pages(page: schp->pages[k], order: schp->page_order); |
1964 | } |
1965 | |
1966 | kfree(objp: schp->pages); |
1967 | } |
1968 | } |
1969 | memset(schp, 0, sizeof (*schp)); |
1970 | } |
1971 | |
1972 | static int |
1973 | sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer) |
1974 | { |
1975 | Sg_scatter_hold *schp = &srp->data; |
1976 | int k, num; |
1977 | |
1978 | SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, srp->parentfp->parentdp, |
1979 | "sg_read_oxfer: num_read_xfer=%d\n" , |
1980 | num_read_xfer)); |
1981 | if ((!outp) || (num_read_xfer <= 0)) |
1982 | return 0; |
1983 | |
1984 | num = 1 << (PAGE_SHIFT + schp->page_order); |
1985 | for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) { |
1986 | if (num > num_read_xfer) { |
1987 | if (copy_to_user(to: outp, page_address(schp->pages[k]), |
1988 | n: num_read_xfer)) |
1989 | return -EFAULT; |
1990 | break; |
1991 | } else { |
1992 | if (copy_to_user(to: outp, page_address(schp->pages[k]), |
1993 | n: num)) |
1994 | return -EFAULT; |
1995 | num_read_xfer -= num; |
1996 | if (num_read_xfer <= 0) |
1997 | break; |
1998 | outp += num; |
1999 | } |
2000 | } |
2001 | |
2002 | return 0; |
2003 | } |
2004 | |
2005 | static void |
2006 | sg_build_reserve(Sg_fd * sfp, int req_size) |
2007 | { |
2008 | Sg_scatter_hold *schp = &sfp->reserve; |
2009 | |
2010 | SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, |
2011 | "sg_build_reserve: req_size=%d\n" , req_size)); |
2012 | do { |
2013 | if (req_size < PAGE_SIZE) |
2014 | req_size = PAGE_SIZE; |
2015 | if (0 == sg_build_indirect(schp, sfp, buff_size: req_size)) |
2016 | return; |
2017 | else |
2018 | sg_remove_scat(sfp, schp); |
2019 | req_size >>= 1; /* divide by 2 */ |
2020 | } while (req_size > (PAGE_SIZE / 2)); |
2021 | } |
2022 | |
2023 | static void |
2024 | sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size) |
2025 | { |
2026 | Sg_scatter_hold *req_schp = &srp->data; |
2027 | Sg_scatter_hold *rsv_schp = &sfp->reserve; |
2028 | int k, num, rem; |
2029 | |
2030 | srp->res_used = 1; |
2031 | SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, sfp->parentdp, |
2032 | "sg_link_reserve: size=%d\n" , size)); |
2033 | rem = size; |
2034 | |
2035 | num = 1 << (PAGE_SHIFT + rsv_schp->page_order); |
2036 | for (k = 0; k < rsv_schp->k_use_sg; k++) { |
2037 | if (rem <= num) { |
2038 | req_schp->k_use_sg = k + 1; |
2039 | req_schp->sglist_len = rsv_schp->sglist_len; |
2040 | req_schp->pages = rsv_schp->pages; |
2041 | |
2042 | req_schp->bufflen = size; |
2043 | req_schp->page_order = rsv_schp->page_order; |
2044 | break; |
2045 | } else |
2046 | rem -= num; |
2047 | } |
2048 | |
2049 | if (k >= rsv_schp->k_use_sg) |
2050 | SCSI_LOG_TIMEOUT(1, sg_printk(KERN_INFO, sfp->parentdp, |
2051 | "sg_link_reserve: BAD size\n" )); |
2052 | } |
2053 | |
2054 | static void |
2055 | sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp) |
2056 | { |
2057 | Sg_scatter_hold *req_schp = &srp->data; |
2058 | |
2059 | SCSI_LOG_TIMEOUT(4, sg_printk(KERN_INFO, srp->parentfp->parentdp, |
2060 | "sg_unlink_reserve: req->k_use_sg=%d\n" , |
2061 | (int) req_schp->k_use_sg)); |
2062 | req_schp->k_use_sg = 0; |
2063 | req_schp->bufflen = 0; |
2064 | req_schp->pages = NULL; |
2065 | req_schp->page_order = 0; |
2066 | req_schp->sglist_len = 0; |
2067 | srp->res_used = 0; |
2068 | /* Called without mutex lock to avoid deadlock */ |
2069 | sfp->res_in_use = 0; |
2070 | } |
2071 | |
2072 | static Sg_request * |
2073 | sg_get_rq_mark(Sg_fd * sfp, int pack_id, bool *busy) |
2074 | { |
2075 | Sg_request *resp; |
2076 | unsigned long iflags; |
2077 | |
2078 | *busy = false; |
2079 | write_lock_irqsave(&sfp->rq_list_lock, iflags); |
2080 | list_for_each_entry(resp, &sfp->rq_list, entry) { |
2081 | /* look for requests that are not SG_IO owned */ |
2082 | if ((!resp->sg_io_owned) && |
2083 | ((-1 == pack_id) || (resp->header.pack_id == pack_id))) { |
2084 | switch (resp->done) { |
2085 | case 0: /* request active */ |
2086 | *busy = true; |
2087 | break; |
2088 | case 1: /* request done; response ready to return */ |
2089 | resp->done = 2; /* guard against other readers */ |
2090 | write_unlock_irqrestore(&sfp->rq_list_lock, iflags); |
2091 | return resp; |
2092 | case 2: /* response already being returned */ |
2093 | break; |
2094 | } |
2095 | } |
2096 | } |
2097 | write_unlock_irqrestore(&sfp->rq_list_lock, iflags); |
2098 | return NULL; |
2099 | } |
2100 | |
2101 | /* always adds to end of list */ |
2102 | static Sg_request * |
2103 | sg_add_request(Sg_fd * sfp) |
2104 | { |
2105 | int k; |
2106 | unsigned long iflags; |
2107 | Sg_request *rp = sfp->req_arr; |
2108 | |
2109 | write_lock_irqsave(&sfp->rq_list_lock, iflags); |
2110 | if (!list_empty(head: &sfp->rq_list)) { |
2111 | if (!sfp->cmd_q) |
2112 | goto out_unlock; |
2113 | |
2114 | for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) { |
2115 | if (!rp->parentfp) |
2116 | break; |
2117 | } |
2118 | if (k >= SG_MAX_QUEUE) |
2119 | goto out_unlock; |
2120 | } |
2121 | memset(rp, 0, sizeof (Sg_request)); |
2122 | rp->parentfp = sfp; |
2123 | rp->header.duration = jiffies_to_msecs(j: jiffies); |
2124 | list_add_tail(new: &rp->entry, head: &sfp->rq_list); |
2125 | write_unlock_irqrestore(&sfp->rq_list_lock, iflags); |
2126 | return rp; |
2127 | out_unlock: |
2128 | write_unlock_irqrestore(&sfp->rq_list_lock, iflags); |
2129 | return NULL; |
2130 | } |
2131 | |
2132 | /* Return of 1 for found; 0 for not found */ |
2133 | static int |
2134 | sg_remove_request(Sg_fd * sfp, Sg_request * srp) |
2135 | { |
2136 | unsigned long iflags; |
2137 | int res = 0; |
2138 | |
2139 | if (!sfp || !srp || list_empty(head: &sfp->rq_list)) |
2140 | return res; |
2141 | write_lock_irqsave(&sfp->rq_list_lock, iflags); |
2142 | if (!list_empty(head: &srp->entry)) { |
2143 | list_del(entry: &srp->entry); |
2144 | srp->parentfp = NULL; |
2145 | res = 1; |
2146 | } |
2147 | write_unlock_irqrestore(&sfp->rq_list_lock, iflags); |
2148 | |
2149 | /* |
2150 | * If the device is detaching, wakeup any readers in case we just |
2151 | * removed the last response, which would leave nothing for them to |
2152 | * return other than -ENODEV. |
2153 | */ |
2154 | if (unlikely(atomic_read(&sfp->parentdp->detaching))) |
2155 | wake_up_interruptible_all(&sfp->read_wait); |
2156 | |
2157 | return res; |
2158 | } |
2159 | |
2160 | static Sg_fd * |
2161 | sg_add_sfp(Sg_device * sdp) |
2162 | { |
2163 | Sg_fd *sfp; |
2164 | unsigned long iflags; |
2165 | int bufflen; |
2166 | |
2167 | sfp = kzalloc(size: sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN); |
2168 | if (!sfp) |
2169 | return ERR_PTR(error: -ENOMEM); |
2170 | |
2171 | init_waitqueue_head(&sfp->read_wait); |
2172 | rwlock_init(&sfp->rq_list_lock); |
2173 | INIT_LIST_HEAD(list: &sfp->rq_list); |
2174 | kref_init(kref: &sfp->f_ref); |
2175 | mutex_init(&sfp->f_mutex); |
2176 | sfp->timeout = SG_DEFAULT_TIMEOUT; |
2177 | sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER; |
2178 | sfp->force_packid = SG_DEF_FORCE_PACK_ID; |
2179 | sfp->cmd_q = SG_DEF_COMMAND_Q; |
2180 | sfp->keep_orphan = SG_DEF_KEEP_ORPHAN; |
2181 | sfp->parentdp = sdp; |
2182 | write_lock_irqsave(&sdp->sfd_lock, iflags); |
2183 | if (atomic_read(v: &sdp->detaching)) { |
2184 | write_unlock_irqrestore(&sdp->sfd_lock, iflags); |
2185 | kfree(objp: sfp); |
2186 | return ERR_PTR(error: -ENODEV); |
2187 | } |
2188 | list_add_tail(new: &sfp->sfd_siblings, head: &sdp->sfds); |
2189 | write_unlock_irqrestore(&sdp->sfd_lock, iflags); |
2190 | SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, |
2191 | "sg_add_sfp: sfp=0x%p\n" , sfp)); |
2192 | if (unlikely(sg_big_buff != def_reserved_size)) |
2193 | sg_big_buff = def_reserved_size; |
2194 | |
2195 | bufflen = min_t(int, sg_big_buff, |
2196 | max_sectors_bytes(sdp->device->request_queue)); |
2197 | sg_build_reserve(sfp, req_size: bufflen); |
2198 | SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp, |
2199 | "sg_add_sfp: bufflen=%d, k_use_sg=%d\n" , |
2200 | sfp->reserve.bufflen, |
2201 | sfp->reserve.k_use_sg)); |
2202 | |
2203 | kref_get(kref: &sdp->d_ref); |
2204 | __module_get(THIS_MODULE); |
2205 | return sfp; |
2206 | } |
2207 | |
2208 | static void |
2209 | sg_remove_sfp_usercontext(struct work_struct *work) |
2210 | { |
2211 | struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work); |
2212 | struct sg_device *sdp = sfp->parentdp; |
2213 | struct scsi_device *device = sdp->device; |
2214 | Sg_request *srp; |
2215 | unsigned long iflags; |
2216 | |
2217 | /* Cleanup any responses which were never read(). */ |
2218 | write_lock_irqsave(&sfp->rq_list_lock, iflags); |
2219 | while (!list_empty(head: &sfp->rq_list)) { |
2220 | srp = list_first_entry(&sfp->rq_list, Sg_request, entry); |
2221 | sg_finish_rem_req(srp); |
2222 | list_del(entry: &srp->entry); |
2223 | srp->parentfp = NULL; |
2224 | } |
2225 | write_unlock_irqrestore(&sfp->rq_list_lock, iflags); |
2226 | |
2227 | if (sfp->reserve.bufflen > 0) { |
2228 | SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp, |
2229 | "sg_remove_sfp: bufflen=%d, k_use_sg=%d\n" , |
2230 | (int) sfp->reserve.bufflen, |
2231 | (int) sfp->reserve.k_use_sg)); |
2232 | sg_remove_scat(sfp, schp: &sfp->reserve); |
2233 | } |
2234 | |
2235 | SCSI_LOG_TIMEOUT(6, sg_printk(KERN_INFO, sdp, |
2236 | "sg_remove_sfp: sfp=0x%p\n" , sfp)); |
2237 | kfree(objp: sfp); |
2238 | |
2239 | kref_put(kref: &sdp->d_ref, release: sg_device_destroy); |
2240 | scsi_device_put(device); |
2241 | module_put(THIS_MODULE); |
2242 | } |
2243 | |
2244 | static void |
2245 | sg_remove_sfp(struct kref *kref) |
2246 | { |
2247 | struct sg_fd *sfp = container_of(kref, struct sg_fd, f_ref); |
2248 | struct sg_device *sdp = sfp->parentdp; |
2249 | unsigned long iflags; |
2250 | |
2251 | write_lock_irqsave(&sdp->sfd_lock, iflags); |
2252 | list_del(entry: &sfp->sfd_siblings); |
2253 | write_unlock_irqrestore(&sdp->sfd_lock, iflags); |
2254 | |
2255 | INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext); |
2256 | schedule_work(work: &sfp->ew.work); |
2257 | } |
2258 | |
2259 | #ifdef CONFIG_SCSI_PROC_FS |
2260 | static int |
2261 | sg_idr_max_id(int id, void *p, void *data) |
2262 | { |
2263 | int *k = data; |
2264 | |
2265 | if (*k < id) |
2266 | *k = id; |
2267 | |
2268 | return 0; |
2269 | } |
2270 | |
2271 | static int |
2272 | sg_last_dev(void) |
2273 | { |
2274 | int k = -1; |
2275 | unsigned long iflags; |
2276 | |
2277 | read_lock_irqsave(&sg_index_lock, iflags); |
2278 | idr_for_each(&sg_index_idr, fn: sg_idr_max_id, data: &k); |
2279 | read_unlock_irqrestore(&sg_index_lock, iflags); |
2280 | return k + 1; /* origin 1 */ |
2281 | } |
2282 | #endif |
2283 | |
2284 | /* must be called with sg_index_lock held */ |
2285 | static Sg_device *sg_lookup_dev(int dev) |
2286 | { |
2287 | return idr_find(&sg_index_idr, id: dev); |
2288 | } |
2289 | |
2290 | static Sg_device * |
2291 | sg_get_dev(int dev) |
2292 | { |
2293 | struct sg_device *sdp; |
2294 | unsigned long flags; |
2295 | |
2296 | read_lock_irqsave(&sg_index_lock, flags); |
2297 | sdp = sg_lookup_dev(dev); |
2298 | if (!sdp) |
2299 | sdp = ERR_PTR(error: -ENXIO); |
2300 | else if (atomic_read(v: &sdp->detaching)) { |
2301 | /* If sdp->detaching, then the refcount may already be 0, in |
2302 | * which case it would be a bug to do kref_get(). |
2303 | */ |
2304 | sdp = ERR_PTR(error: -ENODEV); |
2305 | } else |
2306 | kref_get(kref: &sdp->d_ref); |
2307 | read_unlock_irqrestore(&sg_index_lock, flags); |
2308 | |
2309 | return sdp; |
2310 | } |
2311 | |
2312 | #ifdef CONFIG_SCSI_PROC_FS |
2313 | static int sg_proc_seq_show_int(struct seq_file *s, void *v); |
2314 | |
2315 | static int sg_proc_single_open_adio(struct inode *inode, struct file *file); |
2316 | static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer, |
2317 | size_t count, loff_t *off); |
2318 | static const struct proc_ops adio_proc_ops = { |
2319 | .proc_open = sg_proc_single_open_adio, |
2320 | .proc_read = seq_read, |
2321 | .proc_lseek = seq_lseek, |
2322 | .proc_write = sg_proc_write_adio, |
2323 | .proc_release = single_release, |
2324 | }; |
2325 | |
2326 | static int sg_proc_single_open_dressz(struct inode *inode, struct file *file); |
2327 | static ssize_t sg_proc_write_dressz(struct file *filp, |
2328 | const char __user *buffer, size_t count, loff_t *off); |
2329 | static const struct proc_ops dressz_proc_ops = { |
2330 | .proc_open = sg_proc_single_open_dressz, |
2331 | .proc_read = seq_read, |
2332 | .proc_lseek = seq_lseek, |
2333 | .proc_write = sg_proc_write_dressz, |
2334 | .proc_release = single_release, |
2335 | }; |
2336 | |
2337 | static int sg_proc_seq_show_version(struct seq_file *s, void *v); |
2338 | static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v); |
2339 | static int sg_proc_seq_show_dev(struct seq_file *s, void *v); |
2340 | static void * dev_seq_start(struct seq_file *s, loff_t *pos); |
2341 | static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos); |
2342 | static void dev_seq_stop(struct seq_file *s, void *v); |
2343 | static const struct seq_operations dev_seq_ops = { |
2344 | .start = dev_seq_start, |
2345 | .next = dev_seq_next, |
2346 | .stop = dev_seq_stop, |
2347 | .show = sg_proc_seq_show_dev, |
2348 | }; |
2349 | |
2350 | static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v); |
2351 | static const struct seq_operations devstrs_seq_ops = { |
2352 | .start = dev_seq_start, |
2353 | .next = dev_seq_next, |
2354 | .stop = dev_seq_stop, |
2355 | .show = sg_proc_seq_show_devstrs, |
2356 | }; |
2357 | |
2358 | static int sg_proc_seq_show_debug(struct seq_file *s, void *v); |
2359 | static const struct seq_operations debug_seq_ops = { |
2360 | .start = dev_seq_start, |
2361 | .next = dev_seq_next, |
2362 | .stop = dev_seq_stop, |
2363 | .show = sg_proc_seq_show_debug, |
2364 | }; |
2365 | |
2366 | static int |
2367 | sg_proc_init(void) |
2368 | { |
2369 | struct proc_dir_entry *p; |
2370 | |
2371 | p = proc_mkdir("scsi/sg" , NULL); |
2372 | if (!p) |
2373 | return 1; |
2374 | |
2375 | proc_create(name: "allow_dio" , S_IRUGO | S_IWUSR, parent: p, proc_ops: &adio_proc_ops); |
2376 | proc_create_seq("debug" , S_IRUGO, p, &debug_seq_ops); |
2377 | proc_create(name: "def_reserved_size" , S_IRUGO | S_IWUSR, parent: p, proc_ops: &dressz_proc_ops); |
2378 | proc_create_single("device_hdr" , S_IRUGO, p, sg_proc_seq_show_devhdr); |
2379 | proc_create_seq("devices" , S_IRUGO, p, &dev_seq_ops); |
2380 | proc_create_seq("device_strs" , S_IRUGO, p, &devstrs_seq_ops); |
2381 | proc_create_single("version" , S_IRUGO, p, sg_proc_seq_show_version); |
2382 | return 0; |
2383 | } |
2384 | |
2385 | |
2386 | static int sg_proc_seq_show_int(struct seq_file *s, void *v) |
2387 | { |
2388 | seq_printf(m: s, fmt: "%d\n" , *((int *)s->private)); |
2389 | return 0; |
2390 | } |
2391 | |
2392 | static int sg_proc_single_open_adio(struct inode *inode, struct file *file) |
2393 | { |
2394 | return single_open(file, sg_proc_seq_show_int, &sg_allow_dio); |
2395 | } |
2396 | |
2397 | static ssize_t |
2398 | sg_proc_write_adio(struct file *filp, const char __user *buffer, |
2399 | size_t count, loff_t *off) |
2400 | { |
2401 | int err; |
2402 | unsigned long num; |
2403 | |
2404 | if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) |
2405 | return -EACCES; |
2406 | err = kstrtoul_from_user(s: buffer, count, base: 0, res: &num); |
2407 | if (err) |
2408 | return err; |
2409 | sg_allow_dio = num ? 1 : 0; |
2410 | return count; |
2411 | } |
2412 | |
2413 | static int sg_proc_single_open_dressz(struct inode *inode, struct file *file) |
2414 | { |
2415 | return single_open(file, sg_proc_seq_show_int, &sg_big_buff); |
2416 | } |
2417 | |
2418 | static ssize_t |
2419 | sg_proc_write_dressz(struct file *filp, const char __user *buffer, |
2420 | size_t count, loff_t *off) |
2421 | { |
2422 | int err; |
2423 | unsigned long k = ULONG_MAX; |
2424 | |
2425 | if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) |
2426 | return -EACCES; |
2427 | |
2428 | err = kstrtoul_from_user(s: buffer, count, base: 0, res: &k); |
2429 | if (err) |
2430 | return err; |
2431 | if (k <= 1048576) { /* limit "big buff" to 1 MB */ |
2432 | sg_big_buff = k; |
2433 | return count; |
2434 | } |
2435 | return -ERANGE; |
2436 | } |
2437 | |
2438 | static int sg_proc_seq_show_version(struct seq_file *s, void *v) |
2439 | { |
2440 | seq_printf(m: s, fmt: "%d\t%s [%s]\n" , sg_version_num, SG_VERSION_STR, |
2441 | sg_version_date); |
2442 | return 0; |
2443 | } |
2444 | |
2445 | static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v) |
2446 | { |
2447 | seq_puts(m: s, s: "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\tonline\n" ); |
2448 | return 0; |
2449 | } |
2450 | |
2451 | struct sg_proc_deviter { |
2452 | loff_t index; |
2453 | size_t max; |
2454 | }; |
2455 | |
2456 | static void * dev_seq_start(struct seq_file *s, loff_t *pos) |
2457 | { |
2458 | struct sg_proc_deviter * it = kmalloc(size: sizeof(*it), GFP_KERNEL); |
2459 | |
2460 | s->private = it; |
2461 | if (! it) |
2462 | return NULL; |
2463 | |
2464 | it->index = *pos; |
2465 | it->max = sg_last_dev(); |
2466 | if (it->index >= it->max) |
2467 | return NULL; |
2468 | return it; |
2469 | } |
2470 | |
2471 | static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos) |
2472 | { |
2473 | struct sg_proc_deviter * it = s->private; |
2474 | |
2475 | *pos = ++it->index; |
2476 | return (it->index < it->max) ? it : NULL; |
2477 | } |
2478 | |
2479 | static void dev_seq_stop(struct seq_file *s, void *v) |
2480 | { |
2481 | kfree(objp: s->private); |
2482 | } |
2483 | |
2484 | static int sg_proc_seq_show_dev(struct seq_file *s, void *v) |
2485 | { |
2486 | struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; |
2487 | Sg_device *sdp; |
2488 | struct scsi_device *scsidp; |
2489 | unsigned long iflags; |
2490 | |
2491 | read_lock_irqsave(&sg_index_lock, iflags); |
2492 | sdp = it ? sg_lookup_dev(dev: it->index) : NULL; |
2493 | if ((NULL == sdp) || (NULL == sdp->device) || |
2494 | (atomic_read(v: &sdp->detaching))) |
2495 | seq_puts(m: s, s: "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n" ); |
2496 | else { |
2497 | scsidp = sdp->device; |
2498 | seq_printf(m: s, fmt: "%d\t%d\t%d\t%llu\t%d\t%d\t%d\t%d\t%d\n" , |
2499 | scsidp->host->host_no, scsidp->channel, |
2500 | scsidp->id, scsidp->lun, (int) scsidp->type, |
2501 | 1, |
2502 | (int) scsidp->queue_depth, |
2503 | (int) scsi_device_busy(sdev: scsidp), |
2504 | (int) scsi_device_online(sdev: scsidp)); |
2505 | } |
2506 | read_unlock_irqrestore(&sg_index_lock, iflags); |
2507 | return 0; |
2508 | } |
2509 | |
2510 | static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v) |
2511 | { |
2512 | struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; |
2513 | Sg_device *sdp; |
2514 | struct scsi_device *scsidp; |
2515 | unsigned long iflags; |
2516 | |
2517 | read_lock_irqsave(&sg_index_lock, iflags); |
2518 | sdp = it ? sg_lookup_dev(dev: it->index) : NULL; |
2519 | scsidp = sdp ? sdp->device : NULL; |
2520 | if (sdp && scsidp && (!atomic_read(v: &sdp->detaching))) |
2521 | seq_printf(m: s, fmt: "%8.8s\t%16.16s\t%4.4s\n" , |
2522 | scsidp->vendor, scsidp->model, scsidp->rev); |
2523 | else |
2524 | seq_puts(m: s, s: "<no active device>\n" ); |
2525 | read_unlock_irqrestore(&sg_index_lock, iflags); |
2526 | return 0; |
2527 | } |
2528 | |
2529 | /* must be called while holding sg_index_lock */ |
2530 | static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp) |
2531 | { |
2532 | int k, new_interface, blen, usg; |
2533 | Sg_request *srp; |
2534 | Sg_fd *fp; |
2535 | const sg_io_hdr_t *hp; |
2536 | const char * cp; |
2537 | unsigned int ms; |
2538 | |
2539 | k = 0; |
2540 | list_for_each_entry(fp, &sdp->sfds, sfd_siblings) { |
2541 | k++; |
2542 | read_lock(&fp->rq_list_lock); /* irqs already disabled */ |
2543 | seq_printf(m: s, fmt: " FD(%d): timeout=%dms bufflen=%d " |
2544 | "(res)sgat=%d low_dma=%d\n" , k, |
2545 | jiffies_to_msecs(j: fp->timeout), |
2546 | fp->reserve.bufflen, |
2547 | (int) fp->reserve.k_use_sg, 0); |
2548 | seq_printf(m: s, fmt: " cmd_q=%d f_packid=%d k_orphan=%d closed=0\n" , |
2549 | (int) fp->cmd_q, (int) fp->force_packid, |
2550 | (int) fp->keep_orphan); |
2551 | list_for_each_entry(srp, &fp->rq_list, entry) { |
2552 | hp = &srp->header; |
2553 | new_interface = (hp->interface_id == '\0') ? 0 : 1; |
2554 | if (srp->res_used) { |
2555 | if (new_interface && |
2556 | (SG_FLAG_MMAP_IO & hp->flags)) |
2557 | cp = " mmap>> " ; |
2558 | else |
2559 | cp = " rb>> " ; |
2560 | } else { |
2561 | if (SG_INFO_DIRECT_IO_MASK & hp->info) |
2562 | cp = " dio>> " ; |
2563 | else |
2564 | cp = " " ; |
2565 | } |
2566 | seq_puts(m: s, s: cp); |
2567 | blen = srp->data.bufflen; |
2568 | usg = srp->data.k_use_sg; |
2569 | seq_puts(m: s, s: srp->done ? |
2570 | ((1 == srp->done) ? "rcv:" : "fin:" ) |
2571 | : "act:" ); |
2572 | seq_printf(m: s, fmt: " id=%d blen=%d" , |
2573 | srp->header.pack_id, blen); |
2574 | if (srp->done) |
2575 | seq_printf(m: s, fmt: " dur=%d" , hp->duration); |
2576 | else { |
2577 | ms = jiffies_to_msecs(j: jiffies); |
2578 | seq_printf(m: s, fmt: " t_o/elap=%d/%d" , |
2579 | (new_interface ? hp->timeout : |
2580 | jiffies_to_msecs(j: fp->timeout)), |
2581 | (ms > hp->duration ? ms - hp->duration : 0)); |
2582 | } |
2583 | seq_printf(m: s, fmt: "ms sgat=%d op=0x%02x\n" , usg, |
2584 | (int) srp->data.cmd_opcode); |
2585 | } |
2586 | if (list_empty(head: &fp->rq_list)) |
2587 | seq_puts(m: s, s: " No requests active\n" ); |
2588 | read_unlock(&fp->rq_list_lock); |
2589 | } |
2590 | } |
2591 | |
2592 | static int sg_proc_seq_show_debug(struct seq_file *s, void *v) |
2593 | { |
2594 | struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; |
2595 | Sg_device *sdp; |
2596 | unsigned long iflags; |
2597 | |
2598 | if (it && (0 == it->index)) |
2599 | seq_printf(m: s, fmt: "max_active_device=%d def_reserved_size=%d\n" , |
2600 | (int)it->max, sg_big_buff); |
2601 | |
2602 | read_lock_irqsave(&sg_index_lock, iflags); |
2603 | sdp = it ? sg_lookup_dev(dev: it->index) : NULL; |
2604 | if (NULL == sdp) |
2605 | goto skip; |
2606 | read_lock(&sdp->sfd_lock); |
2607 | if (!list_empty(head: &sdp->sfds)) { |
2608 | seq_printf(m: s, fmt: " >>> device=%s " , sdp->name); |
2609 | if (atomic_read(v: &sdp->detaching)) |
2610 | seq_puts(m: s, s: "detaching pending close " ); |
2611 | else if (sdp->device) { |
2612 | struct scsi_device *scsidp = sdp->device; |
2613 | |
2614 | seq_printf(m: s, fmt: "%d:%d:%d:%llu em=%d" , |
2615 | scsidp->host->host_no, |
2616 | scsidp->channel, scsidp->id, |
2617 | scsidp->lun, |
2618 | scsidp->host->hostt->emulated); |
2619 | } |
2620 | seq_printf(m: s, fmt: " sg_tablesize=%d excl=%d open_cnt=%d\n" , |
2621 | sdp->sg_tablesize, sdp->exclude, sdp->open_cnt); |
2622 | sg_proc_debug_helper(s, sdp); |
2623 | } |
2624 | read_unlock(&sdp->sfd_lock); |
2625 | skip: |
2626 | read_unlock_irqrestore(&sg_index_lock, iflags); |
2627 | return 0; |
2628 | } |
2629 | |
2630 | #endif /* CONFIG_SCSI_PROC_FS */ |
2631 | |
2632 | module_init(init_sg); |
2633 | module_exit(exit_sg); |
2634 | |