1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * HighPoint RR3xxx/4xxx controller driver for Linux |
4 | * Copyright (C) 2006-2015 HighPoint Technologies, Inc. All Rights Reserved. |
5 | * |
6 | * Please report bugs/comments/suggestions to linux@highpoint-tech.com |
7 | * |
8 | * For more information, visit http://www.highpoint-tech.com |
9 | */ |
10 | #include <linux/module.h> |
11 | #include <linux/types.h> |
12 | #include <linux/string.h> |
13 | #include <linux/kernel.h> |
14 | #include <linux/pci.h> |
15 | #include <linux/interrupt.h> |
16 | #include <linux/errno.h> |
17 | #include <linux/delay.h> |
18 | #include <linux/timer.h> |
19 | #include <linux/spinlock.h> |
20 | #include <linux/gfp.h> |
21 | #include <linux/uaccess.h> |
22 | #include <asm/io.h> |
23 | #include <asm/div64.h> |
24 | #include <scsi/scsi_cmnd.h> |
25 | #include <scsi/scsi_device.h> |
26 | #include <scsi/scsi.h> |
27 | #include <scsi/scsi_tcq.h> |
28 | #include <scsi/scsi_host.h> |
29 | |
30 | #include "hptiop.h" |
31 | |
32 | MODULE_AUTHOR("HighPoint Technologies, Inc." ); |
33 | MODULE_DESCRIPTION("HighPoint RocketRAID 3xxx/4xxx Controller Driver" ); |
34 | |
35 | static char driver_name[] = "hptiop" ; |
36 | static const char driver_name_long[] = "RocketRAID 3xxx/4xxx Controller driver" ; |
37 | static const char driver_ver[] = "v1.10.0" ; |
38 | |
39 | static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec); |
40 | static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag, |
41 | struct hpt_iop_request_scsi_command *req); |
42 | static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 tag); |
43 | static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag); |
44 | static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg); |
45 | |
46 | static int iop_wait_ready_itl(struct hptiop_hba *hba, u32 millisec) |
47 | { |
48 | u32 req = 0; |
49 | int i; |
50 | |
51 | for (i = 0; i < millisec; i++) { |
52 | req = readl(addr: &hba->u.itl.iop->inbound_queue); |
53 | if (req != IOPMU_QUEUE_EMPTY) |
54 | break; |
55 | msleep(msecs: 1); |
56 | } |
57 | |
58 | if (req != IOPMU_QUEUE_EMPTY) { |
59 | writel(val: req, addr: &hba->u.itl.iop->outbound_queue); |
60 | readl(addr: &hba->u.itl.iop->outbound_intstatus); |
61 | return 0; |
62 | } |
63 | |
64 | return -1; |
65 | } |
66 | |
67 | static int iop_wait_ready_mv(struct hptiop_hba *hba, u32 millisec) |
68 | { |
69 | return iop_send_sync_msg(hba, msg: IOPMU_INBOUND_MSG0_NOP, millisec); |
70 | } |
71 | |
72 | static int iop_wait_ready_mvfrey(struct hptiop_hba *hba, u32 millisec) |
73 | { |
74 | return iop_send_sync_msg(hba, msg: IOPMU_INBOUND_MSG0_NOP, millisec); |
75 | } |
76 | |
77 | static void hptiop_request_callback_itl(struct hptiop_hba *hba, u32 tag) |
78 | { |
79 | if (tag & IOPMU_QUEUE_ADDR_HOST_BIT) |
80 | hptiop_host_request_callback_itl(hba, |
81 | tag: tag & ~IOPMU_QUEUE_ADDR_HOST_BIT); |
82 | else |
83 | hptiop_iop_request_callback_itl(hba, tag); |
84 | } |
85 | |
86 | static void hptiop_drain_outbound_queue_itl(struct hptiop_hba *hba) |
87 | { |
88 | u32 req; |
89 | |
90 | while ((req = readl(addr: &hba->u.itl.iop->outbound_queue)) != |
91 | IOPMU_QUEUE_EMPTY) { |
92 | |
93 | if (req & IOPMU_QUEUE_MASK_HOST_BITS) |
94 | hptiop_request_callback_itl(hba, tag: req); |
95 | else { |
96 | struct hpt_iop_request_header __iomem * p; |
97 | |
98 | p = (struct hpt_iop_request_header __iomem *) |
99 | ((char __iomem *)hba->u.itl.iop + req); |
100 | |
101 | if (readl(addr: &p->flags) & IOP_REQUEST_FLAG_SYNC_REQUEST) { |
102 | if (readl(addr: &p->context)) |
103 | hptiop_request_callback_itl(hba, tag: req); |
104 | else |
105 | writel(val: 1, addr: &p->context); |
106 | } |
107 | else |
108 | hptiop_request_callback_itl(hba, tag: req); |
109 | } |
110 | } |
111 | } |
112 | |
113 | static int iop_intr_itl(struct hptiop_hba *hba) |
114 | { |
115 | struct hpt_iopmu_itl __iomem *iop = hba->u.itl.iop; |
116 | void __iomem *plx = hba->u.itl.plx; |
117 | u32 status; |
118 | int ret = 0; |
119 | |
120 | if (plx && readl(addr: plx + 0x11C5C) & 0xf) |
121 | writel(val: 1, addr: plx + 0x11C60); |
122 | |
123 | status = readl(addr: &iop->outbound_intstatus); |
124 | |
125 | if (status & IOPMU_OUTBOUND_INT_MSG0) { |
126 | u32 msg = readl(addr: &iop->outbound_msgaddr0); |
127 | |
128 | dprintk("received outbound msg %x\n" , msg); |
129 | writel(IOPMU_OUTBOUND_INT_MSG0, addr: &iop->outbound_intstatus); |
130 | hptiop_message_callback(hba, msg); |
131 | ret = 1; |
132 | } |
133 | |
134 | if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) { |
135 | hptiop_drain_outbound_queue_itl(hba); |
136 | ret = 1; |
137 | } |
138 | |
139 | return ret; |
140 | } |
141 | |
142 | static u64 mv_outbound_read(struct hpt_iopmu_mv __iomem *mu) |
143 | { |
144 | u32 outbound_tail = readl(addr: &mu->outbound_tail); |
145 | u32 outbound_head = readl(addr: &mu->outbound_head); |
146 | |
147 | if (outbound_tail != outbound_head) { |
148 | u64 p; |
149 | |
150 | memcpy_fromio(&p, &mu->outbound_q[mu->outbound_tail], 8); |
151 | outbound_tail++; |
152 | |
153 | if (outbound_tail == MVIOP_QUEUE_LEN) |
154 | outbound_tail = 0; |
155 | writel(val: outbound_tail, addr: &mu->outbound_tail); |
156 | return p; |
157 | } else |
158 | return 0; |
159 | } |
160 | |
161 | static void mv_inbound_write(u64 p, struct hptiop_hba *hba) |
162 | { |
163 | u32 inbound_head = readl(addr: &hba->u.mv.mu->inbound_head); |
164 | u32 head = inbound_head + 1; |
165 | |
166 | if (head == MVIOP_QUEUE_LEN) |
167 | head = 0; |
168 | |
169 | memcpy_toio(&hba->u.mv.mu->inbound_q[inbound_head], &p, 8); |
170 | writel(val: head, addr: &hba->u.mv.mu->inbound_head); |
171 | writel(MVIOP_MU_INBOUND_INT_POSTQUEUE, |
172 | addr: &hba->u.mv.regs->inbound_doorbell); |
173 | } |
174 | |
175 | static void hptiop_request_callback_mv(struct hptiop_hba *hba, u64 tag) |
176 | { |
177 | u32 req_type = (tag >> 5) & 0x7; |
178 | struct hpt_iop_request_scsi_command *req; |
179 | |
180 | dprintk("hptiop_request_callback_mv: tag=%llx\n" , tag); |
181 | |
182 | BUG_ON((tag & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) == 0); |
183 | |
184 | switch (req_type) { |
185 | case IOP_REQUEST_TYPE_GET_CONFIG: |
186 | case IOP_REQUEST_TYPE_SET_CONFIG: |
187 | hba->msg_done = 1; |
188 | break; |
189 | |
190 | case IOP_REQUEST_TYPE_SCSI_COMMAND: |
191 | req = hba->reqs[tag >> 8].req_virt; |
192 | if (likely(tag & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)) |
193 | req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS); |
194 | |
195 | hptiop_finish_scsi_req(hba, tag: tag>>8, req); |
196 | break; |
197 | |
198 | default: |
199 | break; |
200 | } |
201 | } |
202 | |
203 | static int iop_intr_mv(struct hptiop_hba *hba) |
204 | { |
205 | u32 status; |
206 | int ret = 0; |
207 | |
208 | status = readl(addr: &hba->u.mv.regs->outbound_doorbell); |
209 | writel(val: ~status, addr: &hba->u.mv.regs->outbound_doorbell); |
210 | |
211 | if (status & MVIOP_MU_OUTBOUND_INT_MSG) { |
212 | u32 msg; |
213 | msg = readl(addr: &hba->u.mv.mu->outbound_msg); |
214 | dprintk("received outbound msg %x\n" , msg); |
215 | hptiop_message_callback(hba, msg); |
216 | ret = 1; |
217 | } |
218 | |
219 | if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) { |
220 | u64 tag; |
221 | |
222 | while ((tag = mv_outbound_read(mu: hba->u.mv.mu))) |
223 | hptiop_request_callback_mv(hba, tag); |
224 | ret = 1; |
225 | } |
226 | |
227 | return ret; |
228 | } |
229 | |
230 | static void hptiop_request_callback_mvfrey(struct hptiop_hba *hba, u32 _tag) |
231 | { |
232 | u32 req_type = _tag & 0xf; |
233 | struct hpt_iop_request_scsi_command *req; |
234 | |
235 | switch (req_type) { |
236 | case IOP_REQUEST_TYPE_GET_CONFIG: |
237 | case IOP_REQUEST_TYPE_SET_CONFIG: |
238 | hba->msg_done = 1; |
239 | break; |
240 | |
241 | case IOP_REQUEST_TYPE_SCSI_COMMAND: |
242 | req = hba->reqs[(_tag >> 4) & 0xff].req_virt; |
243 | if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT)) |
244 | req->header.result = IOP_RESULT_SUCCESS; |
245 | hptiop_finish_scsi_req(hba, tag: (_tag >> 4) & 0xff, req); |
246 | break; |
247 | |
248 | default: |
249 | break; |
250 | } |
251 | } |
252 | |
253 | static int iop_intr_mvfrey(struct hptiop_hba *hba) |
254 | { |
255 | u32 _tag, status, cptr, cur_rptr; |
256 | int ret = 0; |
257 | |
258 | if (hba->initialized) |
259 | writel(val: 0, addr: &(hba->u.mvfrey.mu->pcie_f0_int_enable)); |
260 | |
261 | status = readl(addr: &(hba->u.mvfrey.mu->f0_doorbell)); |
262 | if (status) { |
263 | writel(val: status, addr: &(hba->u.mvfrey.mu->f0_doorbell)); |
264 | if (status & CPU_TO_F0_DRBL_MSG_BIT) { |
265 | u32 msg = readl(addr: &(hba->u.mvfrey.mu->cpu_to_f0_msg_a)); |
266 | dprintk("received outbound msg %x\n" , msg); |
267 | hptiop_message_callback(hba, msg); |
268 | } |
269 | ret = 1; |
270 | } |
271 | |
272 | status = readl(addr: &(hba->u.mvfrey.mu->isr_cause)); |
273 | if (status) { |
274 | writel(val: status, addr: &(hba->u.mvfrey.mu->isr_cause)); |
275 | do { |
276 | cptr = *hba->u.mvfrey.outlist_cptr & 0xff; |
277 | cur_rptr = hba->u.mvfrey.outlist_rptr; |
278 | while (cur_rptr != cptr) { |
279 | cur_rptr++; |
280 | if (cur_rptr == hba->u.mvfrey.list_count) |
281 | cur_rptr = 0; |
282 | |
283 | _tag = hba->u.mvfrey.outlist[cur_rptr].val; |
284 | BUG_ON(!(_tag & IOPMU_QUEUE_MASK_HOST_BITS)); |
285 | hptiop_request_callback_mvfrey(hba, _tag); |
286 | ret = 1; |
287 | } |
288 | hba->u.mvfrey.outlist_rptr = cur_rptr; |
289 | } while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff)); |
290 | } |
291 | |
292 | if (hba->initialized) |
293 | writel(val: 0x1010, addr: &(hba->u.mvfrey.mu->pcie_f0_int_enable)); |
294 | |
295 | return ret; |
296 | } |
297 | |
298 | static int iop_send_sync_request_itl(struct hptiop_hba *hba, |
299 | void __iomem *_req, u32 millisec) |
300 | { |
301 | struct hpt_iop_request_header __iomem *req = _req; |
302 | u32 i; |
303 | |
304 | writel(readl(addr: &req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST, addr: &req->flags); |
305 | writel(val: 0, addr: &req->context); |
306 | writel(val: (unsigned long)req - (unsigned long)hba->u.itl.iop, |
307 | addr: &hba->u.itl.iop->inbound_queue); |
308 | readl(addr: &hba->u.itl.iop->outbound_intstatus); |
309 | |
310 | for (i = 0; i < millisec; i++) { |
311 | iop_intr_itl(hba); |
312 | if (readl(addr: &req->context)) |
313 | return 0; |
314 | msleep(msecs: 1); |
315 | } |
316 | |
317 | return -1; |
318 | } |
319 | |
320 | static int iop_send_sync_request_mv(struct hptiop_hba *hba, |
321 | u32 size_bits, u32 millisec) |
322 | { |
323 | struct hpt_iop_request_header *reqhdr = hba->u.mv.internal_req; |
324 | u32 i; |
325 | |
326 | hba->msg_done = 0; |
327 | reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST); |
328 | mv_inbound_write(p: hba->u.mv.internal_req_phy | |
329 | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bits, hba); |
330 | |
331 | for (i = 0; i < millisec; i++) { |
332 | iop_intr_mv(hba); |
333 | if (hba->msg_done) |
334 | return 0; |
335 | msleep(msecs: 1); |
336 | } |
337 | return -1; |
338 | } |
339 | |
340 | static int iop_send_sync_request_mvfrey(struct hptiop_hba *hba, |
341 | u32 size_bits, u32 millisec) |
342 | { |
343 | struct hpt_iop_request_header *reqhdr = |
344 | hba->u.mvfrey.internal_req.req_virt; |
345 | u32 i; |
346 | |
347 | hba->msg_done = 0; |
348 | reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_SYNC_REQUEST); |
349 | hba->ops->post_req(hba, &(hba->u.mvfrey.internal_req)); |
350 | |
351 | for (i = 0; i < millisec; i++) { |
352 | iop_intr_mvfrey(hba); |
353 | if (hba->msg_done) |
354 | break; |
355 | msleep(msecs: 1); |
356 | } |
357 | return hba->msg_done ? 0 : -1; |
358 | } |
359 | |
360 | static void hptiop_post_msg_itl(struct hptiop_hba *hba, u32 msg) |
361 | { |
362 | writel(val: msg, addr: &hba->u.itl.iop->inbound_msgaddr0); |
363 | readl(addr: &hba->u.itl.iop->outbound_intstatus); |
364 | } |
365 | |
366 | static void hptiop_post_msg_mv(struct hptiop_hba *hba, u32 msg) |
367 | { |
368 | writel(val: msg, addr: &hba->u.mv.mu->inbound_msg); |
369 | writel(MVIOP_MU_INBOUND_INT_MSG, addr: &hba->u.mv.regs->inbound_doorbell); |
370 | readl(addr: &hba->u.mv.regs->inbound_doorbell); |
371 | } |
372 | |
373 | static void hptiop_post_msg_mvfrey(struct hptiop_hba *hba, u32 msg) |
374 | { |
375 | writel(val: msg, addr: &(hba->u.mvfrey.mu->f0_to_cpu_msg_a)); |
376 | readl(addr: &(hba->u.mvfrey.mu->f0_to_cpu_msg_a)); |
377 | } |
378 | |
379 | static int iop_send_sync_msg(struct hptiop_hba *hba, u32 msg, u32 millisec) |
380 | { |
381 | u32 i; |
382 | |
383 | hba->msg_done = 0; |
384 | hba->ops->disable_intr(hba); |
385 | hba->ops->post_msg(hba, msg); |
386 | |
387 | for (i = 0; i < millisec; i++) { |
388 | spin_lock_irq(lock: hba->host->host_lock); |
389 | hba->ops->iop_intr(hba); |
390 | spin_unlock_irq(lock: hba->host->host_lock); |
391 | if (hba->msg_done) |
392 | break; |
393 | msleep(msecs: 1); |
394 | } |
395 | |
396 | hba->ops->enable_intr(hba); |
397 | return hba->msg_done? 0 : -1; |
398 | } |
399 | |
400 | static int iop_get_config_itl(struct hptiop_hba *hba, |
401 | struct hpt_iop_request_get_config *config) |
402 | { |
403 | u32 req32; |
404 | struct hpt_iop_request_get_config __iomem *req; |
405 | |
406 | req32 = readl(addr: &hba->u.itl.iop->inbound_queue); |
407 | if (req32 == IOPMU_QUEUE_EMPTY) |
408 | return -1; |
409 | |
410 | req = (struct hpt_iop_request_get_config __iomem *) |
411 | ((unsigned long)hba->u.itl.iop + req32); |
412 | |
413 | writel(val: 0, addr: &req->header.flags); |
414 | writel(val: IOP_REQUEST_TYPE_GET_CONFIG, addr: &req->header.type); |
415 | writel(val: sizeof(struct hpt_iop_request_get_config), addr: &req->header.size); |
416 | writel(val: IOP_RESULT_PENDING, addr: &req->header.result); |
417 | |
418 | if (iop_send_sync_request_itl(hba, req: req, millisec: 20000)) { |
419 | dprintk("Get config send cmd failed\n" ); |
420 | return -1; |
421 | } |
422 | |
423 | memcpy_fromio(config, req, sizeof(*config)); |
424 | writel(val: req32, addr: &hba->u.itl.iop->outbound_queue); |
425 | return 0; |
426 | } |
427 | |
428 | static int iop_get_config_mv(struct hptiop_hba *hba, |
429 | struct hpt_iop_request_get_config *config) |
430 | { |
431 | struct hpt_iop_request_get_config *req = hba->u.mv.internal_req; |
432 | |
433 | req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); |
434 | req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG); |
435 | req->header.size = |
436 | cpu_to_le32(sizeof(struct hpt_iop_request_get_config)); |
437 | req->header.result = cpu_to_le32(IOP_RESULT_PENDING); |
438 | req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG<<5); |
439 | req->header.context_hi32 = 0; |
440 | |
441 | if (iop_send_sync_request_mv(hba, size_bits: 0, millisec: 20000)) { |
442 | dprintk("Get config send cmd failed\n" ); |
443 | return -1; |
444 | } |
445 | |
446 | memcpy(config, req, sizeof(struct hpt_iop_request_get_config)); |
447 | return 0; |
448 | } |
449 | |
450 | static int iop_get_config_mvfrey(struct hptiop_hba *hba, |
451 | struct hpt_iop_request_get_config *config) |
452 | { |
453 | struct hpt_iop_request_get_config *info = hba->u.mvfrey.config; |
454 | |
455 | if (info->header.size != sizeof(struct hpt_iop_request_get_config) || |
456 | info->header.type != IOP_REQUEST_TYPE_GET_CONFIG) |
457 | return -1; |
458 | |
459 | config->interface_version = info->interface_version; |
460 | config->firmware_version = info->firmware_version; |
461 | config->max_requests = info->max_requests; |
462 | config->request_size = info->request_size; |
463 | config->max_sg_count = info->max_sg_count; |
464 | config->data_transfer_length = info->data_transfer_length; |
465 | config->alignment_mask = info->alignment_mask; |
466 | config->max_devices = info->max_devices; |
467 | config->sdram_size = info->sdram_size; |
468 | |
469 | return 0; |
470 | } |
471 | |
472 | static int iop_set_config_itl(struct hptiop_hba *hba, |
473 | struct hpt_iop_request_set_config *config) |
474 | { |
475 | u32 req32; |
476 | struct hpt_iop_request_set_config __iomem *req; |
477 | |
478 | req32 = readl(addr: &hba->u.itl.iop->inbound_queue); |
479 | if (req32 == IOPMU_QUEUE_EMPTY) |
480 | return -1; |
481 | |
482 | req = (struct hpt_iop_request_set_config __iomem *) |
483 | ((unsigned long)hba->u.itl.iop + req32); |
484 | |
485 | memcpy_toio((u8 __iomem *)req + sizeof(struct hpt_iop_request_header), |
486 | (u8 *)config + sizeof(struct hpt_iop_request_header), |
487 | sizeof(struct hpt_iop_request_set_config) - |
488 | sizeof(struct hpt_iop_request_header)); |
489 | |
490 | writel(val: 0, addr: &req->header.flags); |
491 | writel(val: IOP_REQUEST_TYPE_SET_CONFIG, addr: &req->header.type); |
492 | writel(val: sizeof(struct hpt_iop_request_set_config), addr: &req->header.size); |
493 | writel(val: IOP_RESULT_PENDING, addr: &req->header.result); |
494 | |
495 | if (iop_send_sync_request_itl(hba, req: req, millisec: 20000)) { |
496 | dprintk("Set config send cmd failed\n" ); |
497 | return -1; |
498 | } |
499 | |
500 | writel(val: req32, addr: &hba->u.itl.iop->outbound_queue); |
501 | return 0; |
502 | } |
503 | |
504 | static int iop_set_config_mv(struct hptiop_hba *hba, |
505 | struct hpt_iop_request_set_config *config) |
506 | { |
507 | struct hpt_iop_request_set_config *req = hba->u.mv.internal_req; |
508 | |
509 | memcpy(req, config, sizeof(struct hpt_iop_request_set_config)); |
510 | req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); |
511 | req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG); |
512 | req->header.size = |
513 | cpu_to_le32(sizeof(struct hpt_iop_request_set_config)); |
514 | req->header.result = cpu_to_le32(IOP_RESULT_PENDING); |
515 | req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5); |
516 | req->header.context_hi32 = 0; |
517 | |
518 | if (iop_send_sync_request_mv(hba, size_bits: 0, millisec: 20000)) { |
519 | dprintk("Set config send cmd failed\n" ); |
520 | return -1; |
521 | } |
522 | |
523 | return 0; |
524 | } |
525 | |
526 | static int iop_set_config_mvfrey(struct hptiop_hba *hba, |
527 | struct hpt_iop_request_set_config *config) |
528 | { |
529 | struct hpt_iop_request_set_config *req = |
530 | hba->u.mvfrey.internal_req.req_virt; |
531 | |
532 | memcpy(req, config, sizeof(struct hpt_iop_request_set_config)); |
533 | req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); |
534 | req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG); |
535 | req->header.size = |
536 | cpu_to_le32(sizeof(struct hpt_iop_request_set_config)); |
537 | req->header.result = cpu_to_le32(IOP_RESULT_PENDING); |
538 | req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5); |
539 | req->header.context_hi32 = 0; |
540 | |
541 | if (iop_send_sync_request_mvfrey(hba, size_bits: 0, millisec: 20000)) { |
542 | dprintk("Set config send cmd failed\n" ); |
543 | return -1; |
544 | } |
545 | |
546 | return 0; |
547 | } |
548 | |
549 | static void hptiop_enable_intr_itl(struct hptiop_hba *hba) |
550 | { |
551 | writel(val: ~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0), |
552 | addr: &hba->u.itl.iop->outbound_intmask); |
553 | } |
554 | |
555 | static void hptiop_enable_intr_mv(struct hptiop_hba *hba) |
556 | { |
557 | writel(MVIOP_MU_OUTBOUND_INT_POSTQUEUE | MVIOP_MU_OUTBOUND_INT_MSG, |
558 | addr: &hba->u.mv.regs->outbound_intmask); |
559 | } |
560 | |
561 | static void hptiop_enable_intr_mvfrey(struct hptiop_hba *hba) |
562 | { |
563 | writel(CPU_TO_F0_DRBL_MSG_BIT, addr: &(hba->u.mvfrey.mu->f0_doorbell_enable)); |
564 | writel(val: 0x1, addr: &(hba->u.mvfrey.mu->isr_enable)); |
565 | writel(val: 0x1010, addr: &(hba->u.mvfrey.mu->pcie_f0_int_enable)); |
566 | } |
567 | |
568 | static int hptiop_initialize_iop(struct hptiop_hba *hba) |
569 | { |
570 | /* enable interrupts */ |
571 | hba->ops->enable_intr(hba); |
572 | |
573 | hba->initialized = 1; |
574 | |
575 | /* start background tasks */ |
576 | if (iop_send_sync_msg(hba, |
577 | msg: IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, millisec: 5000)) { |
578 | printk(KERN_ERR "scsi%d: fail to start background task\n" , |
579 | hba->host->host_no); |
580 | return -1; |
581 | } |
582 | return 0; |
583 | } |
584 | |
585 | static void __iomem *hptiop_map_pci_bar(struct hptiop_hba *hba, int index) |
586 | { |
587 | u32 mem_base_phy, length; |
588 | void __iomem *mem_base_virt; |
589 | |
590 | struct pci_dev *pcidev = hba->pcidev; |
591 | |
592 | |
593 | if (!(pci_resource_flags(pcidev, index) & IORESOURCE_MEM)) { |
594 | printk(KERN_ERR "scsi%d: pci resource invalid\n" , |
595 | hba->host->host_no); |
596 | return NULL; |
597 | } |
598 | |
599 | mem_base_phy = pci_resource_start(pcidev, index); |
600 | length = pci_resource_len(pcidev, index); |
601 | mem_base_virt = ioremap(offset: mem_base_phy, size: length); |
602 | |
603 | if (!mem_base_virt) { |
604 | printk(KERN_ERR "scsi%d: Fail to ioremap memory space\n" , |
605 | hba->host->host_no); |
606 | return NULL; |
607 | } |
608 | return mem_base_virt; |
609 | } |
610 | |
611 | static int hptiop_map_pci_bar_itl(struct hptiop_hba *hba) |
612 | { |
613 | struct pci_dev *pcidev = hba->pcidev; |
614 | hba->u.itl.iop = hptiop_map_pci_bar(hba, index: 0); |
615 | if (hba->u.itl.iop == NULL) |
616 | return -1; |
617 | if ((pcidev->device & 0xff00) == 0x4400) { |
618 | hba->u.itl.plx = hba->u.itl.iop; |
619 | hba->u.itl.iop = hptiop_map_pci_bar(hba, index: 2); |
620 | if (hba->u.itl.iop == NULL) { |
621 | iounmap(addr: hba->u.itl.plx); |
622 | return -1; |
623 | } |
624 | } |
625 | return 0; |
626 | } |
627 | |
628 | static void hptiop_unmap_pci_bar_itl(struct hptiop_hba *hba) |
629 | { |
630 | if (hba->u.itl.plx) |
631 | iounmap(addr: hba->u.itl.plx); |
632 | iounmap(addr: hba->u.itl.iop); |
633 | } |
634 | |
635 | static int hptiop_map_pci_bar_mv(struct hptiop_hba *hba) |
636 | { |
637 | hba->u.mv.regs = hptiop_map_pci_bar(hba, index: 0); |
638 | if (hba->u.mv.regs == NULL) |
639 | return -1; |
640 | |
641 | hba->u.mv.mu = hptiop_map_pci_bar(hba, index: 2); |
642 | if (hba->u.mv.mu == NULL) { |
643 | iounmap(addr: hba->u.mv.regs); |
644 | return -1; |
645 | } |
646 | |
647 | return 0; |
648 | } |
649 | |
650 | static int hptiop_map_pci_bar_mvfrey(struct hptiop_hba *hba) |
651 | { |
652 | hba->u.mvfrey.config = hptiop_map_pci_bar(hba, index: 0); |
653 | if (hba->u.mvfrey.config == NULL) |
654 | return -1; |
655 | |
656 | hba->u.mvfrey.mu = hptiop_map_pci_bar(hba, index: 2); |
657 | if (hba->u.mvfrey.mu == NULL) { |
658 | iounmap(addr: hba->u.mvfrey.config); |
659 | return -1; |
660 | } |
661 | |
662 | return 0; |
663 | } |
664 | |
665 | static void hptiop_unmap_pci_bar_mv(struct hptiop_hba *hba) |
666 | { |
667 | iounmap(addr: hba->u.mv.regs); |
668 | iounmap(addr: hba->u.mv.mu); |
669 | } |
670 | |
671 | static void hptiop_unmap_pci_bar_mvfrey(struct hptiop_hba *hba) |
672 | { |
673 | iounmap(addr: hba->u.mvfrey.config); |
674 | iounmap(addr: hba->u.mvfrey.mu); |
675 | } |
676 | |
677 | static void hptiop_message_callback(struct hptiop_hba *hba, u32 msg) |
678 | { |
679 | dprintk("iop message 0x%x\n" , msg); |
680 | |
681 | if (msg == IOPMU_INBOUND_MSG0_NOP || |
682 | msg == IOPMU_INBOUND_MSG0_RESET_COMM) |
683 | hba->msg_done = 1; |
684 | |
685 | if (!hba->initialized) |
686 | return; |
687 | |
688 | if (msg == IOPMU_INBOUND_MSG0_RESET) { |
689 | atomic_set(v: &hba->resetting, i: 0); |
690 | wake_up(&hba->reset_wq); |
691 | } |
692 | else if (msg <= IOPMU_INBOUND_MSG0_MAX) |
693 | hba->msg_done = 1; |
694 | } |
695 | |
696 | static struct hptiop_request *get_req(struct hptiop_hba *hba) |
697 | { |
698 | struct hptiop_request *ret; |
699 | |
700 | dprintk("get_req : req=%p\n" , hba->req_list); |
701 | |
702 | ret = hba->req_list; |
703 | if (ret) |
704 | hba->req_list = ret->next; |
705 | |
706 | return ret; |
707 | } |
708 | |
709 | static void free_req(struct hptiop_hba *hba, struct hptiop_request *req) |
710 | { |
711 | dprintk("free_req(%d, %p)\n" , req->index, req); |
712 | req->next = hba->req_list; |
713 | hba->req_list = req; |
714 | } |
715 | |
716 | static void hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag, |
717 | struct hpt_iop_request_scsi_command *req) |
718 | { |
719 | struct scsi_cmnd *scp; |
720 | |
721 | dprintk("hptiop_finish_scsi_req: req=%p, type=%d, " |
722 | "result=%d, context=0x%x tag=%d\n" , |
723 | req, req->header.type, req->header.result, |
724 | req->header.context, tag); |
725 | |
726 | BUG_ON(!req->header.result); |
727 | BUG_ON(req->header.type != cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND)); |
728 | |
729 | scp = hba->reqs[tag].scp; |
730 | |
731 | if (HPT_SCP(scp)->mapped) |
732 | scsi_dma_unmap(cmd: scp); |
733 | |
734 | switch (le32_to_cpu(req->header.result)) { |
735 | case IOP_RESULT_SUCCESS: |
736 | scsi_set_resid(cmd: scp, |
737 | resid: scsi_bufflen(cmd: scp) - le32_to_cpu(req->dataxfer_length)); |
738 | scp->result = (DID_OK<<16); |
739 | break; |
740 | case IOP_RESULT_BAD_TARGET: |
741 | scp->result = (DID_BAD_TARGET<<16); |
742 | break; |
743 | case IOP_RESULT_BUSY: |
744 | scp->result = (DID_BUS_BUSY<<16); |
745 | break; |
746 | case IOP_RESULT_RESET: |
747 | scp->result = (DID_RESET<<16); |
748 | break; |
749 | case IOP_RESULT_FAIL: |
750 | scp->result = (DID_ERROR<<16); |
751 | break; |
752 | case IOP_RESULT_INVALID_REQUEST: |
753 | scp->result = (DID_ABORT<<16); |
754 | break; |
755 | case IOP_RESULT_CHECK_CONDITION: |
756 | scsi_set_resid(cmd: scp, |
757 | resid: scsi_bufflen(cmd: scp) - le32_to_cpu(req->dataxfer_length)); |
758 | scp->result = SAM_STAT_CHECK_CONDITION; |
759 | memcpy(scp->sense_buffer, &req->sg_list, SCSI_SENSE_BUFFERSIZE); |
760 | goto skip_resid; |
761 | |
762 | default: |
763 | scp->result = DID_ABORT << 16; |
764 | break; |
765 | } |
766 | |
767 | scsi_set_resid(cmd: scp, |
768 | resid: scsi_bufflen(cmd: scp) - le32_to_cpu(req->dataxfer_length)); |
769 | |
770 | skip_resid: |
771 | dprintk("scsi_done(%p)\n" , scp); |
772 | scsi_done(cmd: scp); |
773 | free_req(hba, req: &hba->reqs[tag]); |
774 | } |
775 | |
776 | static void hptiop_host_request_callback_itl(struct hptiop_hba *hba, u32 _tag) |
777 | { |
778 | struct hpt_iop_request_scsi_command *req; |
779 | u32 tag; |
780 | |
781 | if (hba->iopintf_v2) { |
782 | tag = _tag & ~IOPMU_QUEUE_REQUEST_RESULT_BIT; |
783 | req = hba->reqs[tag].req_virt; |
784 | if (likely(_tag & IOPMU_QUEUE_REQUEST_RESULT_BIT)) |
785 | req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS); |
786 | } else { |
787 | tag = _tag; |
788 | req = hba->reqs[tag].req_virt; |
789 | } |
790 | |
791 | hptiop_finish_scsi_req(hba, tag, req); |
792 | } |
793 | |
794 | static void hptiop_iop_request_callback_itl(struct hptiop_hba *hba, u32 tag) |
795 | { |
796 | struct hpt_iop_request_header __iomem *req; |
797 | struct hpt_iop_request_ioctl_command __iomem *p; |
798 | struct hpt_ioctl_k *arg; |
799 | |
800 | req = (struct hpt_iop_request_header __iomem *) |
801 | ((unsigned long)hba->u.itl.iop + tag); |
802 | dprintk("hptiop_iop_request_callback_itl: req=%p, type=%d, " |
803 | "result=%d, context=0x%x tag=%d\n" , |
804 | req, readl(&req->type), readl(&req->result), |
805 | readl(&req->context), tag); |
806 | |
807 | BUG_ON(!readl(&req->result)); |
808 | BUG_ON(readl(&req->type) != IOP_REQUEST_TYPE_IOCTL_COMMAND); |
809 | |
810 | p = (struct hpt_iop_request_ioctl_command __iomem *)req; |
811 | arg = (struct hpt_ioctl_k *)(unsigned long) |
812 | (readl(addr: &req->context) | |
813 | ((u64)readl(addr: &req->context_hi32)<<32)); |
814 | |
815 | if (readl(addr: &req->result) == IOP_RESULT_SUCCESS) { |
816 | arg->result = HPT_IOCTL_RESULT_OK; |
817 | |
818 | if (arg->outbuf_size) |
819 | memcpy_fromio(arg->outbuf, |
820 | &p->buf[(readl(addr: &p->inbuf_size) + 3)& ~3], |
821 | arg->outbuf_size); |
822 | |
823 | if (arg->bytes_returned) |
824 | *arg->bytes_returned = arg->outbuf_size; |
825 | } |
826 | else |
827 | arg->result = HPT_IOCTL_RESULT_FAILED; |
828 | |
829 | arg->done(arg); |
830 | writel(val: tag, addr: &hba->u.itl.iop->outbound_queue); |
831 | } |
832 | |
833 | static irqreturn_t hptiop_intr(int irq, void *dev_id) |
834 | { |
835 | struct hptiop_hba *hba = dev_id; |
836 | int handled; |
837 | unsigned long flags; |
838 | |
839 | spin_lock_irqsave(hba->host->host_lock, flags); |
840 | handled = hba->ops->iop_intr(hba); |
841 | spin_unlock_irqrestore(lock: hba->host->host_lock, flags); |
842 | |
843 | return handled; |
844 | } |
845 | |
846 | static int hptiop_buildsgl(struct scsi_cmnd *scp, struct hpt_iopsg *psg) |
847 | { |
848 | struct Scsi_Host *host = scp->device->host; |
849 | struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; |
850 | struct scatterlist *sg; |
851 | int idx, nseg; |
852 | |
853 | nseg = scsi_dma_map(cmd: scp); |
854 | BUG_ON(nseg < 0); |
855 | if (!nseg) |
856 | return 0; |
857 | |
858 | HPT_SCP(scp)->sgcnt = nseg; |
859 | HPT_SCP(scp)->mapped = 1; |
860 | |
861 | BUG_ON(HPT_SCP(scp)->sgcnt > hba->max_sg_descriptors); |
862 | |
863 | scsi_for_each_sg(scp, sg, HPT_SCP(scp)->sgcnt, idx) { |
864 | psg[idx].pci_address = cpu_to_le64(sg_dma_address(sg)) | |
865 | hba->ops->host_phy_flag; |
866 | psg[idx].size = cpu_to_le32(sg_dma_len(sg)); |
867 | psg[idx].eot = (idx == HPT_SCP(scp)->sgcnt - 1) ? |
868 | cpu_to_le32(1) : 0; |
869 | } |
870 | return HPT_SCP(scp)->sgcnt; |
871 | } |
872 | |
873 | static void hptiop_post_req_itl(struct hptiop_hba *hba, |
874 | struct hptiop_request *_req) |
875 | { |
876 | struct hpt_iop_request_header *reqhdr = _req->req_virt; |
877 | |
878 | reqhdr->context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT | |
879 | (u32)_req->index); |
880 | reqhdr->context_hi32 = 0; |
881 | |
882 | if (hba->iopintf_v2) { |
883 | u32 size, size_bits; |
884 | |
885 | size = le32_to_cpu(reqhdr->size); |
886 | if (size < 256) |
887 | size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT; |
888 | else if (size < 512) |
889 | size_bits = IOPMU_QUEUE_ADDR_HOST_BIT; |
890 | else |
891 | size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT | |
892 | IOPMU_QUEUE_ADDR_HOST_BIT; |
893 | writel(val: _req->req_shifted_phy | size_bits, |
894 | addr: &hba->u.itl.iop->inbound_queue); |
895 | } else |
896 | writel(val: _req->req_shifted_phy | IOPMU_QUEUE_ADDR_HOST_BIT, |
897 | addr: &hba->u.itl.iop->inbound_queue); |
898 | } |
899 | |
900 | static void hptiop_post_req_mv(struct hptiop_hba *hba, |
901 | struct hptiop_request *_req) |
902 | { |
903 | struct hpt_iop_request_header *reqhdr = _req->req_virt; |
904 | u32 size, size_bit; |
905 | |
906 | reqhdr->context = cpu_to_le32(_req->index<<8 | |
907 | IOP_REQUEST_TYPE_SCSI_COMMAND<<5); |
908 | reqhdr->context_hi32 = 0; |
909 | size = le32_to_cpu(reqhdr->size); |
910 | |
911 | if (size <= 256) |
912 | size_bit = 0; |
913 | else if (size <= 256*2) |
914 | size_bit = 1; |
915 | else if (size <= 256*3) |
916 | size_bit = 2; |
917 | else |
918 | size_bit = 3; |
919 | |
920 | mv_inbound_write(p: (_req->req_shifted_phy << 5) | |
921 | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size_bit, hba); |
922 | } |
923 | |
924 | static void hptiop_post_req_mvfrey(struct hptiop_hba *hba, |
925 | struct hptiop_request *_req) |
926 | { |
927 | struct hpt_iop_request_header *reqhdr = _req->req_virt; |
928 | u32 index; |
929 | |
930 | reqhdr->flags |= cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT | |
931 | IOP_REQUEST_FLAG_ADDR_BITS | |
932 | ((_req->req_shifted_phy >> 11) & 0xffff0000)); |
933 | reqhdr->context = cpu_to_le32(IOPMU_QUEUE_ADDR_HOST_BIT | |
934 | (_req->index << 4) | reqhdr->type); |
935 | reqhdr->context_hi32 = cpu_to_le32((_req->req_shifted_phy << 5) & |
936 | 0xffffffff); |
937 | |
938 | hba->u.mvfrey.inlist_wptr++; |
939 | index = hba->u.mvfrey.inlist_wptr & 0x3fff; |
940 | |
941 | if (index == hba->u.mvfrey.list_count) { |
942 | index = 0; |
943 | hba->u.mvfrey.inlist_wptr &= ~0x3fff; |
944 | hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE; |
945 | } |
946 | |
947 | hba->u.mvfrey.inlist[index].addr = |
948 | (dma_addr_t)_req->req_shifted_phy << 5; |
949 | hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4; |
950 | writel(val: hba->u.mvfrey.inlist_wptr, |
951 | addr: &(hba->u.mvfrey.mu->inbound_write_ptr)); |
952 | readl(addr: &(hba->u.mvfrey.mu->inbound_write_ptr)); |
953 | } |
954 | |
955 | static int hptiop_reset_comm_itl(struct hptiop_hba *hba) |
956 | { |
957 | return 0; |
958 | } |
959 | |
960 | static int hptiop_reset_comm_mv(struct hptiop_hba *hba) |
961 | { |
962 | return 0; |
963 | } |
964 | |
965 | static int hptiop_reset_comm_mvfrey(struct hptiop_hba *hba) |
966 | { |
967 | u32 list_count = hba->u.mvfrey.list_count; |
968 | |
969 | if (iop_send_sync_msg(hba, msg: IOPMU_INBOUND_MSG0_RESET_COMM, millisec: 3000)) |
970 | return -1; |
971 | |
972 | /* wait 100ms for MCU ready */ |
973 | msleep(msecs: 100); |
974 | |
975 | writel(cpu_to_le32(hba->u.mvfrey.inlist_phy & 0xffffffff), |
976 | addr: &(hba->u.mvfrey.mu->inbound_base)); |
977 | writel(cpu_to_le32((hba->u.mvfrey.inlist_phy >> 16) >> 16), |
978 | addr: &(hba->u.mvfrey.mu->inbound_base_high)); |
979 | |
980 | writel(cpu_to_le32(hba->u.mvfrey.outlist_phy & 0xffffffff), |
981 | addr: &(hba->u.mvfrey.mu->outbound_base)); |
982 | writel(cpu_to_le32((hba->u.mvfrey.outlist_phy >> 16) >> 16), |
983 | addr: &(hba->u.mvfrey.mu->outbound_base_high)); |
984 | |
985 | writel(cpu_to_le32(hba->u.mvfrey.outlist_cptr_phy & 0xffffffff), |
986 | addr: &(hba->u.mvfrey.mu->outbound_shadow_base)); |
987 | writel(cpu_to_le32((hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16), |
988 | addr: &(hba->u.mvfrey.mu->outbound_shadow_base_high)); |
989 | |
990 | hba->u.mvfrey.inlist_wptr = (list_count - 1) | CL_POINTER_TOGGLE; |
991 | *hba->u.mvfrey.outlist_cptr = (list_count - 1) | CL_POINTER_TOGGLE; |
992 | hba->u.mvfrey.outlist_rptr = list_count - 1; |
993 | return 0; |
994 | } |
995 | |
996 | static int hptiop_queuecommand_lck(struct scsi_cmnd *scp) |
997 | { |
998 | struct Scsi_Host *host = scp->device->host; |
999 | struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; |
1000 | struct hpt_iop_request_scsi_command *req; |
1001 | int sg_count = 0; |
1002 | struct hptiop_request *_req; |
1003 | |
1004 | _req = get_req(hba); |
1005 | if (_req == NULL) { |
1006 | dprintk("hptiop_queuecmd : no free req\n" ); |
1007 | return SCSI_MLQUEUE_HOST_BUSY; |
1008 | } |
1009 | |
1010 | _req->scp = scp; |
1011 | |
1012 | dprintk("hptiop_queuecmd(scp=%p) %d/%d/%d/%llu cdb=(%08x-%08x-%08x-%08x) " |
1013 | "req_index=%d, req=%p\n" , |
1014 | scp, |
1015 | host->host_no, scp->device->channel, |
1016 | scp->device->id, scp->device->lun, |
1017 | cpu_to_be32(((u32 *)scp->cmnd)[0]), |
1018 | cpu_to_be32(((u32 *)scp->cmnd)[1]), |
1019 | cpu_to_be32(((u32 *)scp->cmnd)[2]), |
1020 | cpu_to_be32(((u32 *)scp->cmnd)[3]), |
1021 | _req->index, _req->req_virt); |
1022 | |
1023 | scp->result = 0; |
1024 | |
1025 | if (scp->device->channel || |
1026 | (scp->device->id > hba->max_devices) || |
1027 | ((scp->device->id == (hba->max_devices-1)) && scp->device->lun)) { |
1028 | scp->result = DID_BAD_TARGET << 16; |
1029 | free_req(hba, req: _req); |
1030 | goto cmd_done; |
1031 | } |
1032 | |
1033 | req = _req->req_virt; |
1034 | |
1035 | /* build S/G table */ |
1036 | sg_count = hptiop_buildsgl(scp, psg: req->sg_list); |
1037 | if (!sg_count) |
1038 | HPT_SCP(scp)->mapped = 0; |
1039 | |
1040 | req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); |
1041 | req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND); |
1042 | req->header.result = cpu_to_le32(IOP_RESULT_PENDING); |
1043 | req->dataxfer_length = cpu_to_le32(scsi_bufflen(scp)); |
1044 | req->channel = scp->device->channel; |
1045 | req->target = scp->device->id; |
1046 | req->lun = scp->device->lun; |
1047 | req->header.size = cpu_to_le32(struct_size(req, sg_list, sg_count)); |
1048 | |
1049 | memcpy(req->cdb, scp->cmnd, sizeof(req->cdb)); |
1050 | hba->ops->post_req(hba, _req); |
1051 | return 0; |
1052 | |
1053 | cmd_done: |
1054 | dprintk("scsi_done(scp=%p)\n" , scp); |
1055 | scsi_done(cmd: scp); |
1056 | return 0; |
1057 | } |
1058 | |
1059 | static DEF_SCSI_QCMD(hptiop_queuecommand) |
1060 | |
1061 | static const char *hptiop_info(struct Scsi_Host *host) |
1062 | { |
1063 | return driver_name_long; |
1064 | } |
1065 | |
1066 | static int hptiop_reset_hba(struct hptiop_hba *hba) |
1067 | { |
1068 | if (atomic_xchg(v: &hba->resetting, new: 1) == 0) { |
1069 | atomic_inc(v: &hba->reset_count); |
1070 | hba->ops->post_msg(hba, IOPMU_INBOUND_MSG0_RESET); |
1071 | } |
1072 | |
1073 | wait_event_timeout(hba->reset_wq, |
1074 | atomic_read(&hba->resetting) == 0, 60 * HZ); |
1075 | |
1076 | if (atomic_read(v: &hba->resetting)) { |
1077 | /* IOP is in unknown state, abort reset */ |
1078 | printk(KERN_ERR "scsi%d: reset failed\n" , hba->host->host_no); |
1079 | return -1; |
1080 | } |
1081 | |
1082 | if (iop_send_sync_msg(hba, |
1083 | msg: IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, millisec: 5000)) { |
1084 | dprintk("scsi%d: fail to start background task\n" , |
1085 | hba->host->host_no); |
1086 | } |
1087 | |
1088 | return 0; |
1089 | } |
1090 | |
1091 | static int hptiop_reset(struct scsi_cmnd *scp) |
1092 | { |
1093 | struct hptiop_hba * hba = (struct hptiop_hba *)scp->device->host->hostdata; |
1094 | |
1095 | printk(KERN_WARNING "hptiop_reset(%d/%d/%d)\n" , |
1096 | scp->device->host->host_no, -1, -1); |
1097 | |
1098 | return hptiop_reset_hba(hba)? FAILED : SUCCESS; |
1099 | } |
1100 | |
1101 | static int hptiop_adjust_disk_queue_depth(struct scsi_device *sdev, |
1102 | int queue_depth) |
1103 | { |
1104 | struct hptiop_hba *hba = (struct hptiop_hba *)sdev->host->hostdata; |
1105 | |
1106 | if (queue_depth > hba->max_requests) |
1107 | queue_depth = hba->max_requests; |
1108 | return scsi_change_queue_depth(sdev, queue_depth); |
1109 | } |
1110 | |
1111 | static ssize_t hptiop_show_version(struct device *dev, |
1112 | struct device_attribute *attr, char *buf) |
1113 | { |
1114 | return snprintf(buf, PAGE_SIZE, fmt: "%s\n" , driver_ver); |
1115 | } |
1116 | |
1117 | static ssize_t hptiop_show_fw_version(struct device *dev, |
1118 | struct device_attribute *attr, char *buf) |
1119 | { |
1120 | struct Scsi_Host *host = class_to_shost(dev); |
1121 | struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; |
1122 | |
1123 | return snprintf(buf, PAGE_SIZE, fmt: "%d.%d.%d.%d\n" , |
1124 | hba->firmware_version >> 24, |
1125 | (hba->firmware_version >> 16) & 0xff, |
1126 | (hba->firmware_version >> 8) & 0xff, |
1127 | hba->firmware_version & 0xff); |
1128 | } |
1129 | |
1130 | static struct device_attribute hptiop_attr_version = { |
1131 | .attr = { |
1132 | .name = "driver-version" , |
1133 | .mode = S_IRUGO, |
1134 | }, |
1135 | .show = hptiop_show_version, |
1136 | }; |
1137 | |
1138 | static struct device_attribute hptiop_attr_fw_version = { |
1139 | .attr = { |
1140 | .name = "firmware-version" , |
1141 | .mode = S_IRUGO, |
1142 | }, |
1143 | .show = hptiop_show_fw_version, |
1144 | }; |
1145 | |
1146 | static struct attribute *hptiop_host_attrs[] = { |
1147 | &hptiop_attr_version.attr, |
1148 | &hptiop_attr_fw_version.attr, |
1149 | NULL |
1150 | }; |
1151 | |
1152 | ATTRIBUTE_GROUPS(hptiop_host); |
1153 | |
1154 | static int hptiop_slave_config(struct scsi_device *sdev) |
1155 | { |
1156 | if (sdev->type == TYPE_TAPE) |
1157 | blk_queue_max_hw_sectors(sdev->request_queue, 8192); |
1158 | |
1159 | return 0; |
1160 | } |
1161 | |
1162 | static const struct scsi_host_template driver_template = { |
1163 | .module = THIS_MODULE, |
1164 | .name = driver_name, |
1165 | .queuecommand = hptiop_queuecommand, |
1166 | .eh_host_reset_handler = hptiop_reset, |
1167 | .info = hptiop_info, |
1168 | .emulated = 0, |
1169 | .proc_name = driver_name, |
1170 | .shost_groups = hptiop_host_groups, |
1171 | .slave_configure = hptiop_slave_config, |
1172 | .this_id = -1, |
1173 | .change_queue_depth = hptiop_adjust_disk_queue_depth, |
1174 | .cmd_size = sizeof(struct hpt_cmd_priv), |
1175 | }; |
1176 | |
1177 | static int hptiop_internal_memalloc_itl(struct hptiop_hba *hba) |
1178 | { |
1179 | return 0; |
1180 | } |
1181 | |
1182 | static int hptiop_internal_memalloc_mv(struct hptiop_hba *hba) |
1183 | { |
1184 | hba->u.mv.internal_req = dma_alloc_coherent(dev: &hba->pcidev->dev, |
1185 | size: 0x800, dma_handle: &hba->u.mv.internal_req_phy, GFP_KERNEL); |
1186 | if (hba->u.mv.internal_req) |
1187 | return 0; |
1188 | else |
1189 | return -1; |
1190 | } |
1191 | |
1192 | static int hptiop_internal_memalloc_mvfrey(struct hptiop_hba *hba) |
1193 | { |
1194 | u32 list_count = readl(addr: &hba->u.mvfrey.mu->inbound_conf_ctl); |
1195 | char *p; |
1196 | dma_addr_t phy; |
1197 | |
1198 | BUG_ON(hba->max_request_size == 0); |
1199 | |
1200 | if (list_count == 0) { |
1201 | BUG_ON(1); |
1202 | return -1; |
1203 | } |
1204 | |
1205 | list_count >>= 16; |
1206 | |
1207 | hba->u.mvfrey.list_count = list_count; |
1208 | hba->u.mvfrey.internal_mem_size = 0x800 + |
1209 | list_count * sizeof(struct mvfrey_inlist_entry) + |
1210 | list_count * sizeof(struct mvfrey_outlist_entry) + |
1211 | sizeof(int); |
1212 | |
1213 | p = dma_alloc_coherent(dev: &hba->pcidev->dev, |
1214 | size: hba->u.mvfrey.internal_mem_size, dma_handle: &phy, GFP_KERNEL); |
1215 | if (!p) |
1216 | return -1; |
1217 | |
1218 | hba->u.mvfrey.internal_req.req_virt = p; |
1219 | hba->u.mvfrey.internal_req.req_shifted_phy = phy >> 5; |
1220 | hba->u.mvfrey.internal_req.scp = NULL; |
1221 | hba->u.mvfrey.internal_req.next = NULL; |
1222 | |
1223 | p += 0x800; |
1224 | phy += 0x800; |
1225 | |
1226 | hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p; |
1227 | hba->u.mvfrey.inlist_phy = phy; |
1228 | |
1229 | p += list_count * sizeof(struct mvfrey_inlist_entry); |
1230 | phy += list_count * sizeof(struct mvfrey_inlist_entry); |
1231 | |
1232 | hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p; |
1233 | hba->u.mvfrey.outlist_phy = phy; |
1234 | |
1235 | p += list_count * sizeof(struct mvfrey_outlist_entry); |
1236 | phy += list_count * sizeof(struct mvfrey_outlist_entry); |
1237 | |
1238 | hba->u.mvfrey.outlist_cptr = (__le32 *)p; |
1239 | hba->u.mvfrey.outlist_cptr_phy = phy; |
1240 | |
1241 | return 0; |
1242 | } |
1243 | |
1244 | static int hptiop_internal_memfree_itl(struct hptiop_hba *hba) |
1245 | { |
1246 | return 0; |
1247 | } |
1248 | |
1249 | static int hptiop_internal_memfree_mv(struct hptiop_hba *hba) |
1250 | { |
1251 | if (hba->u.mv.internal_req) { |
1252 | dma_free_coherent(dev: &hba->pcidev->dev, size: 0x800, |
1253 | cpu_addr: hba->u.mv.internal_req, dma_handle: hba->u.mv.internal_req_phy); |
1254 | return 0; |
1255 | } else |
1256 | return -1; |
1257 | } |
1258 | |
1259 | static int hptiop_internal_memfree_mvfrey(struct hptiop_hba *hba) |
1260 | { |
1261 | if (hba->u.mvfrey.internal_req.req_virt) { |
1262 | dma_free_coherent(dev: &hba->pcidev->dev, |
1263 | size: hba->u.mvfrey.internal_mem_size, |
1264 | cpu_addr: hba->u.mvfrey.internal_req.req_virt, |
1265 | dma_handle: (dma_addr_t) |
1266 | hba->u.mvfrey.internal_req.req_shifted_phy << 5); |
1267 | return 0; |
1268 | } else |
1269 | return -1; |
1270 | } |
1271 | |
1272 | static int hptiop_probe(struct pci_dev *pcidev, const struct pci_device_id *id) |
1273 | { |
1274 | struct Scsi_Host *host = NULL; |
1275 | struct hptiop_hba *hba; |
1276 | struct hptiop_adapter_ops *iop_ops; |
1277 | struct hpt_iop_request_get_config iop_config; |
1278 | struct hpt_iop_request_set_config set_config; |
1279 | dma_addr_t start_phy; |
1280 | void *start_virt; |
1281 | u32 offset, i, req_size; |
1282 | int rc; |
1283 | |
1284 | dprintk("hptiop_probe(%p)\n" , pcidev); |
1285 | |
1286 | if (pci_enable_device(dev: pcidev)) { |
1287 | printk(KERN_ERR "hptiop: fail to enable pci device\n" ); |
1288 | return -ENODEV; |
1289 | } |
1290 | |
1291 | printk(KERN_INFO "adapter at PCI %d:%d:%d, IRQ %d\n" , |
1292 | pcidev->bus->number, pcidev->devfn >> 3, pcidev->devfn & 7, |
1293 | pcidev->irq); |
1294 | |
1295 | pci_set_master(dev: pcidev); |
1296 | |
1297 | /* Enable 64bit DMA if possible */ |
1298 | iop_ops = (struct hptiop_adapter_ops *)id->driver_data; |
1299 | rc = dma_set_mask(dev: &pcidev->dev, |
1300 | DMA_BIT_MASK(iop_ops->hw_dma_bit_mask)); |
1301 | if (rc) |
1302 | rc = dma_set_mask(dev: &pcidev->dev, DMA_BIT_MASK(32)); |
1303 | |
1304 | if (rc) { |
1305 | printk(KERN_ERR "hptiop: fail to set dma_mask\n" ); |
1306 | goto disable_pci_device; |
1307 | } |
1308 | |
1309 | if (pci_request_regions(pcidev, driver_name)) { |
1310 | printk(KERN_ERR "hptiop: pci_request_regions failed\n" ); |
1311 | goto disable_pci_device; |
1312 | } |
1313 | |
1314 | host = scsi_host_alloc(&driver_template, sizeof(struct hptiop_hba)); |
1315 | if (!host) { |
1316 | printk(KERN_ERR "hptiop: fail to alloc scsi host\n" ); |
1317 | goto free_pci_regions; |
1318 | } |
1319 | |
1320 | hba = (struct hptiop_hba *)host->hostdata; |
1321 | memset(hba, 0, sizeof(struct hptiop_hba)); |
1322 | |
1323 | hba->ops = iop_ops; |
1324 | hba->pcidev = pcidev; |
1325 | hba->host = host; |
1326 | hba->initialized = 0; |
1327 | hba->iopintf_v2 = 0; |
1328 | |
1329 | atomic_set(v: &hba->resetting, i: 0); |
1330 | atomic_set(v: &hba->reset_count, i: 0); |
1331 | |
1332 | init_waitqueue_head(&hba->reset_wq); |
1333 | init_waitqueue_head(&hba->ioctl_wq); |
1334 | |
1335 | host->max_lun = 128; |
1336 | host->max_channel = 0; |
1337 | host->io_port = 0; |
1338 | host->n_io_port = 0; |
1339 | host->irq = pcidev->irq; |
1340 | |
1341 | if (hba->ops->map_pci_bar(hba)) |
1342 | goto free_scsi_host; |
1343 | |
1344 | if (hba->ops->iop_wait_ready(hba, 20000)) { |
1345 | printk(KERN_ERR "scsi%d: firmware not ready\n" , |
1346 | hba->host->host_no); |
1347 | goto unmap_pci_bar; |
1348 | } |
1349 | |
1350 | if (hba->ops->family == MV_BASED_IOP) { |
1351 | if (hba->ops->internal_memalloc(hba)) { |
1352 | printk(KERN_ERR "scsi%d: internal_memalloc failed\n" , |
1353 | hba->host->host_no); |
1354 | goto unmap_pci_bar; |
1355 | } |
1356 | } |
1357 | |
1358 | if (hba->ops->get_config(hba, &iop_config)) { |
1359 | printk(KERN_ERR "scsi%d: get config failed\n" , |
1360 | hba->host->host_no); |
1361 | goto unmap_pci_bar; |
1362 | } |
1363 | |
1364 | hba->max_requests = min(le32_to_cpu(iop_config.max_requests), |
1365 | HPTIOP_MAX_REQUESTS); |
1366 | hba->max_devices = le32_to_cpu(iop_config.max_devices); |
1367 | hba->max_request_size = le32_to_cpu(iop_config.request_size); |
1368 | hba->max_sg_descriptors = le32_to_cpu(iop_config.max_sg_count); |
1369 | hba->firmware_version = le32_to_cpu(iop_config.firmware_version); |
1370 | hba->interface_version = le32_to_cpu(iop_config.interface_version); |
1371 | hba->sdram_size = le32_to_cpu(iop_config.sdram_size); |
1372 | |
1373 | if (hba->ops->family == MVFREY_BASED_IOP) { |
1374 | if (hba->ops->internal_memalloc(hba)) { |
1375 | printk(KERN_ERR "scsi%d: internal_memalloc failed\n" , |
1376 | hba->host->host_no); |
1377 | goto unmap_pci_bar; |
1378 | } |
1379 | if (hba->ops->reset_comm(hba)) { |
1380 | printk(KERN_ERR "scsi%d: reset comm failed\n" , |
1381 | hba->host->host_no); |
1382 | goto unmap_pci_bar; |
1383 | } |
1384 | } |
1385 | |
1386 | if (hba->firmware_version > 0x01020000 || |
1387 | hba->interface_version > 0x01020000) |
1388 | hba->iopintf_v2 = 1; |
1389 | |
1390 | host->max_sectors = le32_to_cpu(iop_config.data_transfer_length) >> 9; |
1391 | host->max_id = le32_to_cpu(iop_config.max_devices); |
1392 | host->sg_tablesize = le32_to_cpu(iop_config.max_sg_count); |
1393 | host->can_queue = le32_to_cpu(iop_config.max_requests); |
1394 | host->cmd_per_lun = le32_to_cpu(iop_config.max_requests); |
1395 | host->max_cmd_len = 16; |
1396 | |
1397 | req_size = struct_size_t(struct hpt_iop_request_scsi_command, |
1398 | sg_list, hba->max_sg_descriptors); |
1399 | if ((req_size & 0x1f) != 0) |
1400 | req_size = (req_size + 0x1f) & ~0x1f; |
1401 | |
1402 | memset(&set_config, 0, sizeof(struct hpt_iop_request_set_config)); |
1403 | set_config.iop_id = cpu_to_le32(host->host_no); |
1404 | set_config.vbus_id = cpu_to_le16(host->host_no); |
1405 | set_config.max_host_request_size = cpu_to_le16(req_size); |
1406 | |
1407 | if (hba->ops->set_config(hba, &set_config)) { |
1408 | printk(KERN_ERR "scsi%d: set config failed\n" , |
1409 | hba->host->host_no); |
1410 | goto unmap_pci_bar; |
1411 | } |
1412 | |
1413 | pci_set_drvdata(pdev: pcidev, data: host); |
1414 | |
1415 | if (request_irq(irq: pcidev->irq, handler: hptiop_intr, IRQF_SHARED, |
1416 | name: driver_name, dev: hba)) { |
1417 | printk(KERN_ERR "scsi%d: request irq %d failed\n" , |
1418 | hba->host->host_no, pcidev->irq); |
1419 | goto unmap_pci_bar; |
1420 | } |
1421 | |
1422 | /* Allocate request mem */ |
1423 | |
1424 | dprintk("req_size=%d, max_requests=%d\n" , req_size, hba->max_requests); |
1425 | |
1426 | hba->req_size = req_size; |
1427 | hba->req_list = NULL; |
1428 | |
1429 | for (i = 0; i < hba->max_requests; i++) { |
1430 | start_virt = dma_alloc_coherent(dev: &pcidev->dev, |
1431 | size: hba->req_size + 0x20, |
1432 | dma_handle: &start_phy, GFP_KERNEL); |
1433 | |
1434 | if (!start_virt) { |
1435 | printk(KERN_ERR "scsi%d: fail to alloc request mem\n" , |
1436 | hba->host->host_no); |
1437 | goto free_request_mem; |
1438 | } |
1439 | |
1440 | hba->dma_coherent[i] = start_virt; |
1441 | hba->dma_coherent_handle[i] = start_phy; |
1442 | |
1443 | if ((start_phy & 0x1f) != 0) { |
1444 | offset = ((start_phy + 0x1f) & ~0x1f) - start_phy; |
1445 | start_phy += offset; |
1446 | start_virt += offset; |
1447 | } |
1448 | |
1449 | hba->reqs[i].next = NULL; |
1450 | hba->reqs[i].req_virt = start_virt; |
1451 | hba->reqs[i].req_shifted_phy = start_phy >> 5; |
1452 | hba->reqs[i].index = i; |
1453 | free_req(hba, req: &hba->reqs[i]); |
1454 | } |
1455 | |
1456 | /* Enable Interrupt and start background task */ |
1457 | if (hptiop_initialize_iop(hba)) |
1458 | goto free_request_mem; |
1459 | |
1460 | if (scsi_add_host(host, dev: &pcidev->dev)) { |
1461 | printk(KERN_ERR "scsi%d: scsi_add_host failed\n" , |
1462 | hba->host->host_no); |
1463 | goto free_request_mem; |
1464 | } |
1465 | |
1466 | scsi_scan_host(host); |
1467 | |
1468 | dprintk("scsi%d: hptiop_probe successfully\n" , hba->host->host_no); |
1469 | return 0; |
1470 | |
1471 | free_request_mem: |
1472 | for (i = 0; i < hba->max_requests; i++) { |
1473 | if (hba->dma_coherent[i] && hba->dma_coherent_handle[i]) |
1474 | dma_free_coherent(dev: &hba->pcidev->dev, |
1475 | size: hba->req_size + 0x20, |
1476 | cpu_addr: hba->dma_coherent[i], |
1477 | dma_handle: hba->dma_coherent_handle[i]); |
1478 | else |
1479 | break; |
1480 | } |
1481 | |
1482 | free_irq(hba->pcidev->irq, hba); |
1483 | |
1484 | unmap_pci_bar: |
1485 | hba->ops->internal_memfree(hba); |
1486 | |
1487 | hba->ops->unmap_pci_bar(hba); |
1488 | |
1489 | free_scsi_host: |
1490 | scsi_host_put(t: host); |
1491 | |
1492 | free_pci_regions: |
1493 | pci_release_regions(pcidev); |
1494 | |
1495 | disable_pci_device: |
1496 | pci_disable_device(dev: pcidev); |
1497 | |
1498 | dprintk("scsi%d: hptiop_probe fail\n" , host ? host->host_no : 0); |
1499 | return -ENODEV; |
1500 | } |
1501 | |
1502 | static void hptiop_shutdown(struct pci_dev *pcidev) |
1503 | { |
1504 | struct Scsi_Host *host = pci_get_drvdata(pdev: pcidev); |
1505 | struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; |
1506 | |
1507 | dprintk("hptiop_shutdown(%p)\n" , hba); |
1508 | |
1509 | /* stop the iop */ |
1510 | if (iop_send_sync_msg(hba, msg: IOPMU_INBOUND_MSG0_SHUTDOWN, millisec: 60000)) |
1511 | printk(KERN_ERR "scsi%d: shutdown the iop timeout\n" , |
1512 | hba->host->host_no); |
1513 | |
1514 | /* disable all outbound interrupts */ |
1515 | hba->ops->disable_intr(hba); |
1516 | } |
1517 | |
1518 | static void hptiop_disable_intr_itl(struct hptiop_hba *hba) |
1519 | { |
1520 | u32 int_mask; |
1521 | |
1522 | int_mask = readl(addr: &hba->u.itl.iop->outbound_intmask); |
1523 | writel(val: int_mask | |
1524 | IOPMU_OUTBOUND_INT_MSG0 | IOPMU_OUTBOUND_INT_POSTQUEUE, |
1525 | addr: &hba->u.itl.iop->outbound_intmask); |
1526 | readl(addr: &hba->u.itl.iop->outbound_intmask); |
1527 | } |
1528 | |
1529 | static void hptiop_disable_intr_mv(struct hptiop_hba *hba) |
1530 | { |
1531 | writel(val: 0, addr: &hba->u.mv.regs->outbound_intmask); |
1532 | readl(addr: &hba->u.mv.regs->outbound_intmask); |
1533 | } |
1534 | |
1535 | static void hptiop_disable_intr_mvfrey(struct hptiop_hba *hba) |
1536 | { |
1537 | writel(val: 0, addr: &(hba->u.mvfrey.mu->f0_doorbell_enable)); |
1538 | readl(addr: &(hba->u.mvfrey.mu->f0_doorbell_enable)); |
1539 | writel(val: 0, addr: &(hba->u.mvfrey.mu->isr_enable)); |
1540 | readl(addr: &(hba->u.mvfrey.mu->isr_enable)); |
1541 | writel(val: 0, addr: &(hba->u.mvfrey.mu->pcie_f0_int_enable)); |
1542 | readl(addr: &(hba->u.mvfrey.mu->pcie_f0_int_enable)); |
1543 | } |
1544 | |
1545 | static void hptiop_remove(struct pci_dev *pcidev) |
1546 | { |
1547 | struct Scsi_Host *host = pci_get_drvdata(pdev: pcidev); |
1548 | struct hptiop_hba *hba = (struct hptiop_hba *)host->hostdata; |
1549 | u32 i; |
1550 | |
1551 | dprintk("scsi%d: hptiop_remove\n" , hba->host->host_no); |
1552 | |
1553 | scsi_remove_host(host); |
1554 | |
1555 | hptiop_shutdown(pcidev); |
1556 | |
1557 | free_irq(hba->pcidev->irq, hba); |
1558 | |
1559 | for (i = 0; i < hba->max_requests; i++) { |
1560 | if (hba->dma_coherent[i] && hba->dma_coherent_handle[i]) |
1561 | dma_free_coherent(dev: &hba->pcidev->dev, |
1562 | size: hba->req_size + 0x20, |
1563 | cpu_addr: hba->dma_coherent[i], |
1564 | dma_handle: hba->dma_coherent_handle[i]); |
1565 | else |
1566 | break; |
1567 | } |
1568 | |
1569 | hba->ops->internal_memfree(hba); |
1570 | |
1571 | hba->ops->unmap_pci_bar(hba); |
1572 | |
1573 | pci_release_regions(hba->pcidev); |
1574 | pci_set_drvdata(pdev: hba->pcidev, NULL); |
1575 | pci_disable_device(dev: hba->pcidev); |
1576 | |
1577 | scsi_host_put(t: host); |
1578 | } |
1579 | |
1580 | static struct hptiop_adapter_ops hptiop_itl_ops = { |
1581 | .family = INTEL_BASED_IOP, |
1582 | .iop_wait_ready = iop_wait_ready_itl, |
1583 | .internal_memalloc = hptiop_internal_memalloc_itl, |
1584 | .internal_memfree = hptiop_internal_memfree_itl, |
1585 | .map_pci_bar = hptiop_map_pci_bar_itl, |
1586 | .unmap_pci_bar = hptiop_unmap_pci_bar_itl, |
1587 | .enable_intr = hptiop_enable_intr_itl, |
1588 | .disable_intr = hptiop_disable_intr_itl, |
1589 | .get_config = iop_get_config_itl, |
1590 | .set_config = iop_set_config_itl, |
1591 | .iop_intr = iop_intr_itl, |
1592 | .post_msg = hptiop_post_msg_itl, |
1593 | .post_req = hptiop_post_req_itl, |
1594 | .hw_dma_bit_mask = 64, |
1595 | .reset_comm = hptiop_reset_comm_itl, |
1596 | .host_phy_flag = cpu_to_le64(0), |
1597 | }; |
1598 | |
1599 | static struct hptiop_adapter_ops hptiop_mv_ops = { |
1600 | .family = MV_BASED_IOP, |
1601 | .iop_wait_ready = iop_wait_ready_mv, |
1602 | .internal_memalloc = hptiop_internal_memalloc_mv, |
1603 | .internal_memfree = hptiop_internal_memfree_mv, |
1604 | .map_pci_bar = hptiop_map_pci_bar_mv, |
1605 | .unmap_pci_bar = hptiop_unmap_pci_bar_mv, |
1606 | .enable_intr = hptiop_enable_intr_mv, |
1607 | .disable_intr = hptiop_disable_intr_mv, |
1608 | .get_config = iop_get_config_mv, |
1609 | .set_config = iop_set_config_mv, |
1610 | .iop_intr = iop_intr_mv, |
1611 | .post_msg = hptiop_post_msg_mv, |
1612 | .post_req = hptiop_post_req_mv, |
1613 | .hw_dma_bit_mask = 33, |
1614 | .reset_comm = hptiop_reset_comm_mv, |
1615 | .host_phy_flag = cpu_to_le64(0), |
1616 | }; |
1617 | |
1618 | static struct hptiop_adapter_ops hptiop_mvfrey_ops = { |
1619 | .family = MVFREY_BASED_IOP, |
1620 | .iop_wait_ready = iop_wait_ready_mvfrey, |
1621 | .internal_memalloc = hptiop_internal_memalloc_mvfrey, |
1622 | .internal_memfree = hptiop_internal_memfree_mvfrey, |
1623 | .map_pci_bar = hptiop_map_pci_bar_mvfrey, |
1624 | .unmap_pci_bar = hptiop_unmap_pci_bar_mvfrey, |
1625 | .enable_intr = hptiop_enable_intr_mvfrey, |
1626 | .disable_intr = hptiop_disable_intr_mvfrey, |
1627 | .get_config = iop_get_config_mvfrey, |
1628 | .set_config = iop_set_config_mvfrey, |
1629 | .iop_intr = iop_intr_mvfrey, |
1630 | .post_msg = hptiop_post_msg_mvfrey, |
1631 | .post_req = hptiop_post_req_mvfrey, |
1632 | .hw_dma_bit_mask = 64, |
1633 | .reset_comm = hptiop_reset_comm_mvfrey, |
1634 | .host_phy_flag = cpu_to_le64(1), |
1635 | }; |
1636 | |
1637 | static struct pci_device_id hptiop_id_table[] = { |
1638 | { PCI_VDEVICE(TTI, 0x3220), (kernel_ulong_t)&hptiop_itl_ops }, |
1639 | { PCI_VDEVICE(TTI, 0x3320), (kernel_ulong_t)&hptiop_itl_ops }, |
1640 | { PCI_VDEVICE(TTI, 0x3410), (kernel_ulong_t)&hptiop_itl_ops }, |
1641 | { PCI_VDEVICE(TTI, 0x3510), (kernel_ulong_t)&hptiop_itl_ops }, |
1642 | { PCI_VDEVICE(TTI, 0x3511), (kernel_ulong_t)&hptiop_itl_ops }, |
1643 | { PCI_VDEVICE(TTI, 0x3520), (kernel_ulong_t)&hptiop_itl_ops }, |
1644 | { PCI_VDEVICE(TTI, 0x3521), (kernel_ulong_t)&hptiop_itl_ops }, |
1645 | { PCI_VDEVICE(TTI, 0x3522), (kernel_ulong_t)&hptiop_itl_ops }, |
1646 | { PCI_VDEVICE(TTI, 0x3530), (kernel_ulong_t)&hptiop_itl_ops }, |
1647 | { PCI_VDEVICE(TTI, 0x3540), (kernel_ulong_t)&hptiop_itl_ops }, |
1648 | { PCI_VDEVICE(TTI, 0x3560), (kernel_ulong_t)&hptiop_itl_ops }, |
1649 | { PCI_VDEVICE(TTI, 0x4210), (kernel_ulong_t)&hptiop_itl_ops }, |
1650 | { PCI_VDEVICE(TTI, 0x4211), (kernel_ulong_t)&hptiop_itl_ops }, |
1651 | { PCI_VDEVICE(TTI, 0x4310), (kernel_ulong_t)&hptiop_itl_ops }, |
1652 | { PCI_VDEVICE(TTI, 0x4311), (kernel_ulong_t)&hptiop_itl_ops }, |
1653 | { PCI_VDEVICE(TTI, 0x4320), (kernel_ulong_t)&hptiop_itl_ops }, |
1654 | { PCI_VDEVICE(TTI, 0x4321), (kernel_ulong_t)&hptiop_itl_ops }, |
1655 | { PCI_VDEVICE(TTI, 0x4322), (kernel_ulong_t)&hptiop_itl_ops }, |
1656 | { PCI_VDEVICE(TTI, 0x4400), (kernel_ulong_t)&hptiop_itl_ops }, |
1657 | { PCI_VDEVICE(TTI, 0x3120), (kernel_ulong_t)&hptiop_mv_ops }, |
1658 | { PCI_VDEVICE(TTI, 0x3122), (kernel_ulong_t)&hptiop_mv_ops }, |
1659 | { PCI_VDEVICE(TTI, 0x3020), (kernel_ulong_t)&hptiop_mv_ops }, |
1660 | { PCI_VDEVICE(TTI, 0x4520), (kernel_ulong_t)&hptiop_mvfrey_ops }, |
1661 | { PCI_VDEVICE(TTI, 0x4522), (kernel_ulong_t)&hptiop_mvfrey_ops }, |
1662 | { PCI_VDEVICE(TTI, 0x3610), (kernel_ulong_t)&hptiop_mvfrey_ops }, |
1663 | { PCI_VDEVICE(TTI, 0x3611), (kernel_ulong_t)&hptiop_mvfrey_ops }, |
1664 | { PCI_VDEVICE(TTI, 0x3620), (kernel_ulong_t)&hptiop_mvfrey_ops }, |
1665 | { PCI_VDEVICE(TTI, 0x3622), (kernel_ulong_t)&hptiop_mvfrey_ops }, |
1666 | { PCI_VDEVICE(TTI, 0x3640), (kernel_ulong_t)&hptiop_mvfrey_ops }, |
1667 | { PCI_VDEVICE(TTI, 0x3660), (kernel_ulong_t)&hptiop_mvfrey_ops }, |
1668 | { PCI_VDEVICE(TTI, 0x3680), (kernel_ulong_t)&hptiop_mvfrey_ops }, |
1669 | { PCI_VDEVICE(TTI, 0x3690), (kernel_ulong_t)&hptiop_mvfrey_ops }, |
1670 | {}, |
1671 | }; |
1672 | |
1673 | MODULE_DEVICE_TABLE(pci, hptiop_id_table); |
1674 | |
1675 | static struct pci_driver hptiop_pci_driver = { |
1676 | .name = driver_name, |
1677 | .id_table = hptiop_id_table, |
1678 | .probe = hptiop_probe, |
1679 | .remove = hptiop_remove, |
1680 | .shutdown = hptiop_shutdown, |
1681 | }; |
1682 | |
1683 | static int __init hptiop_module_init(void) |
1684 | { |
1685 | printk(KERN_INFO "%s %s\n" , driver_name_long, driver_ver); |
1686 | return pci_register_driver(&hptiop_pci_driver); |
1687 | } |
1688 | |
1689 | static void __exit hptiop_module_exit(void) |
1690 | { |
1691 | pci_unregister_driver(dev: &hptiop_pci_driver); |
1692 | } |
1693 | |
1694 | |
1695 | module_init(hptiop_module_init); |
1696 | module_exit(hptiop_module_exit); |
1697 | |
1698 | MODULE_LICENSE("GPL" ); |
1699 | |
1700 | |