1 | // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause |
2 | /* |
3 | * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved. |
4 | */ |
5 | |
6 | #include <linux/module.h> |
7 | #include <linux/pci.h> |
8 | #include <linux/utsname.h> |
9 | #include <linux/version.h> |
10 | |
11 | #include <rdma/ib_user_verbs.h> |
12 | #include <rdma/uverbs_ioctl.h> |
13 | |
14 | #include "efa.h" |
15 | |
16 | #define PCI_DEV_ID_EFA0_VF 0xefa0 |
17 | #define PCI_DEV_ID_EFA1_VF 0xefa1 |
18 | #define PCI_DEV_ID_EFA2_VF 0xefa2 |
19 | |
20 | static const struct pci_device_id efa_pci_tbl[] = { |
21 | { PCI_VDEVICE(AMAZON, PCI_DEV_ID_EFA0_VF) }, |
22 | { PCI_VDEVICE(AMAZON, PCI_DEV_ID_EFA1_VF) }, |
23 | { PCI_VDEVICE(AMAZON, PCI_DEV_ID_EFA2_VF) }, |
24 | { } |
25 | }; |
26 | |
27 | MODULE_AUTHOR("Amazon.com, Inc. or its affiliates" ); |
28 | MODULE_LICENSE("Dual BSD/GPL" ); |
29 | MODULE_DESCRIPTION(DEVICE_NAME); |
30 | MODULE_DEVICE_TABLE(pci, efa_pci_tbl); |
31 | |
32 | #define EFA_REG_BAR 0 |
33 | #define EFA_MEM_BAR 2 |
34 | #define EFA_BASE_BAR_MASK (BIT(EFA_REG_BAR) | BIT(EFA_MEM_BAR)) |
35 | |
36 | #define EFA_AENQ_ENABLED_GROUPS \ |
37 | (BIT(EFA_ADMIN_FATAL_ERROR) | BIT(EFA_ADMIN_WARNING) | \ |
38 | BIT(EFA_ADMIN_NOTIFICATION) | BIT(EFA_ADMIN_KEEP_ALIVE)) |
39 | |
40 | extern const struct uapi_definition efa_uapi_defs[]; |
41 | |
42 | /* This handler will called for unknown event group or unimplemented handlers */ |
43 | static void unimplemented_aenq_handler(void *data, |
44 | struct efa_admin_aenq_entry *aenq_e) |
45 | { |
46 | struct efa_dev *dev = (struct efa_dev *)data; |
47 | |
48 | ibdev_err(ibdev: &dev->ibdev, |
49 | format: "Unknown event was received or event with unimplemented handler\n" ); |
50 | } |
51 | |
52 | static void efa_keep_alive(void *data, struct efa_admin_aenq_entry *aenq_e) |
53 | { |
54 | struct efa_dev *dev = (struct efa_dev *)data; |
55 | |
56 | atomic64_inc(v: &dev->stats.keep_alive_rcvd); |
57 | } |
58 | |
59 | static struct efa_aenq_handlers aenq_handlers = { |
60 | .handlers = { |
61 | [EFA_ADMIN_KEEP_ALIVE] = efa_keep_alive, |
62 | }, |
63 | .unimplemented_handler = unimplemented_aenq_handler |
64 | }; |
65 | |
66 | static void efa_release_bars(struct efa_dev *dev, int bars_mask) |
67 | { |
68 | struct pci_dev *pdev = dev->pdev; |
69 | int release_bars; |
70 | |
71 | release_bars = pci_select_bars(dev: pdev, IORESOURCE_MEM) & bars_mask; |
72 | pci_release_selected_regions(pdev, release_bars); |
73 | } |
74 | |
75 | static void efa_process_comp_eqe(struct efa_dev *dev, struct efa_admin_eqe *eqe) |
76 | { |
77 | u16 cqn = eqe->u.comp_event.cqn; |
78 | struct efa_cq *cq; |
79 | |
80 | /* Safe to load as we're in irq and removal calls synchronize_irq() */ |
81 | cq = xa_load(&dev->cqs_xa, index: cqn); |
82 | if (unlikely(!cq)) { |
83 | ibdev_err_ratelimited(&dev->ibdev, |
84 | "Completion event on non-existent CQ[%u]" , |
85 | cqn); |
86 | return; |
87 | } |
88 | |
89 | cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); |
90 | } |
91 | |
92 | static void efa_process_eqe(struct efa_com_eq *eeq, struct efa_admin_eqe *eqe) |
93 | { |
94 | struct efa_dev *dev = container_of(eeq->edev, struct efa_dev, edev); |
95 | |
96 | if (likely(EFA_GET(&eqe->common, EFA_ADMIN_EQE_EVENT_TYPE) == |
97 | EFA_ADMIN_EQE_EVENT_TYPE_COMPLETION)) |
98 | efa_process_comp_eqe(dev, eqe); |
99 | else |
100 | ibdev_err_ratelimited(&dev->ibdev, |
101 | "Unknown event type received %lu" , |
102 | EFA_GET(&eqe->common, |
103 | EFA_ADMIN_EQE_EVENT_TYPE)); |
104 | } |
105 | |
106 | static irqreturn_t efa_intr_msix_comp(int irq, void *data) |
107 | { |
108 | struct efa_eq *eq = data; |
109 | struct efa_com_dev *edev = eq->eeq.edev; |
110 | |
111 | efa_com_eq_comp_intr_handler(edev, eeq: &eq->eeq); |
112 | |
113 | return IRQ_HANDLED; |
114 | } |
115 | |
116 | static irqreturn_t efa_intr_msix_mgmnt(int irq, void *data) |
117 | { |
118 | struct efa_dev *dev = data; |
119 | |
120 | efa_com_admin_q_comp_intr_handler(edev: &dev->edev); |
121 | efa_com_aenq_intr_handler(edev: &dev->edev, data); |
122 | |
123 | return IRQ_HANDLED; |
124 | } |
125 | |
126 | static int efa_request_irq(struct efa_dev *dev, struct efa_irq *irq) |
127 | { |
128 | int err; |
129 | |
130 | err = request_irq(irq: irq->irqn, handler: irq->handler, flags: 0, name: irq->name, dev: irq->data); |
131 | if (err) { |
132 | dev_err(&dev->pdev->dev, "Failed to request irq %s (%d)\n" , |
133 | irq->name, err); |
134 | return err; |
135 | } |
136 | |
137 | irq_set_affinity_hint(irq: irq->irqn, m: &irq->affinity_hint_mask); |
138 | |
139 | return 0; |
140 | } |
141 | |
142 | static void efa_setup_comp_irq(struct efa_dev *dev, struct efa_eq *eq, |
143 | int vector) |
144 | { |
145 | u32 cpu; |
146 | |
147 | cpu = vector - EFA_COMP_EQS_VEC_BASE; |
148 | snprintf(buf: eq->irq.name, EFA_IRQNAME_SIZE, fmt: "efa-comp%d@pci:%s" , cpu, |
149 | pci_name(pdev: dev->pdev)); |
150 | eq->irq.handler = efa_intr_msix_comp; |
151 | eq->irq.data = eq; |
152 | eq->irq.vector = vector; |
153 | eq->irq.irqn = pci_irq_vector(dev: dev->pdev, nr: vector); |
154 | cpumask_set_cpu(cpu, dstp: &eq->irq.affinity_hint_mask); |
155 | } |
156 | |
157 | static void efa_free_irq(struct efa_dev *dev, struct efa_irq *irq) |
158 | { |
159 | irq_set_affinity_hint(irq: irq->irqn, NULL); |
160 | free_irq(irq->irqn, irq->data); |
161 | } |
162 | |
163 | static void efa_setup_mgmnt_irq(struct efa_dev *dev) |
164 | { |
165 | u32 cpu; |
166 | |
167 | snprintf(buf: dev->admin_irq.name, EFA_IRQNAME_SIZE, |
168 | fmt: "efa-mgmnt@pci:%s" , pci_name(pdev: dev->pdev)); |
169 | dev->admin_irq.handler = efa_intr_msix_mgmnt; |
170 | dev->admin_irq.data = dev; |
171 | dev->admin_irq.vector = dev->admin_msix_vector_idx; |
172 | dev->admin_irq.irqn = pci_irq_vector(dev: dev->pdev, |
173 | nr: dev->admin_msix_vector_idx); |
174 | cpu = cpumask_first(cpu_online_mask); |
175 | cpumask_set_cpu(cpu, |
176 | dstp: &dev->admin_irq.affinity_hint_mask); |
177 | dev_info(&dev->pdev->dev, "Setup irq:%d name:%s\n" , |
178 | dev->admin_irq.irqn, |
179 | dev->admin_irq.name); |
180 | } |
181 | |
182 | static int efa_set_mgmnt_irq(struct efa_dev *dev) |
183 | { |
184 | efa_setup_mgmnt_irq(dev); |
185 | |
186 | return efa_request_irq(dev, irq: &dev->admin_irq); |
187 | } |
188 | |
189 | static int efa_request_doorbell_bar(struct efa_dev *dev) |
190 | { |
191 | u8 db_bar_idx = dev->dev_attr.db_bar; |
192 | struct pci_dev *pdev = dev->pdev; |
193 | int bars; |
194 | int err; |
195 | |
196 | if (!(BIT(db_bar_idx) & EFA_BASE_BAR_MASK)) { |
197 | bars = pci_select_bars(dev: pdev, IORESOURCE_MEM) & BIT(db_bar_idx); |
198 | |
199 | err = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME); |
200 | if (err) { |
201 | dev_err(&dev->pdev->dev, |
202 | "pci_request_selected_regions for bar %d failed %d\n" , |
203 | db_bar_idx, err); |
204 | return err; |
205 | } |
206 | } |
207 | |
208 | dev->db_bar_addr = pci_resource_start(dev->pdev, db_bar_idx); |
209 | dev->db_bar_len = pci_resource_len(dev->pdev, db_bar_idx); |
210 | |
211 | return 0; |
212 | } |
213 | |
214 | static void efa_release_doorbell_bar(struct efa_dev *dev) |
215 | { |
216 | if (!(BIT(dev->dev_attr.db_bar) & EFA_BASE_BAR_MASK)) |
217 | efa_release_bars(dev, BIT(dev->dev_attr.db_bar)); |
218 | } |
219 | |
220 | static void efa_update_hw_hints(struct efa_dev *dev, |
221 | struct efa_com_get_hw_hints_result *hw_hints) |
222 | { |
223 | struct efa_com_dev *edev = &dev->edev; |
224 | |
225 | if (hw_hints->mmio_read_timeout) |
226 | edev->mmio_read.mmio_read_timeout = |
227 | hw_hints->mmio_read_timeout * 1000; |
228 | |
229 | if (hw_hints->poll_interval) |
230 | edev->aq.poll_interval = hw_hints->poll_interval; |
231 | |
232 | if (hw_hints->admin_completion_timeout) |
233 | edev->aq.completion_timeout = |
234 | hw_hints->admin_completion_timeout; |
235 | } |
236 | |
237 | static void efa_stats_init(struct efa_dev *dev) |
238 | { |
239 | atomic64_t *s = (atomic64_t *)&dev->stats; |
240 | int i; |
241 | |
242 | for (i = 0; i < sizeof(dev->stats) / sizeof(*s); i++, s++) |
243 | atomic64_set(v: s, i: 0); |
244 | } |
245 | |
246 | static void efa_set_host_info(struct efa_dev *dev) |
247 | { |
248 | struct efa_admin_set_feature_resp resp = {}; |
249 | struct efa_admin_set_feature_cmd cmd = {}; |
250 | struct efa_admin_host_info *hinf; |
251 | u32 bufsz = sizeof(*hinf); |
252 | dma_addr_t hinf_dma; |
253 | |
254 | if (!efa_com_check_supported_feature_id(edev: &dev->edev, |
255 | feature_id: EFA_ADMIN_HOST_INFO)) |
256 | return; |
257 | |
258 | /* Failures in host info set shall not disturb probe */ |
259 | hinf = dma_alloc_coherent(dev: &dev->pdev->dev, size: bufsz, dma_handle: &hinf_dma, |
260 | GFP_KERNEL); |
261 | if (!hinf) |
262 | return; |
263 | |
264 | strscpy(hinf->os_dist_str, utsname()->release, |
265 | sizeof(hinf->os_dist_str)); |
266 | hinf->os_type = EFA_ADMIN_OS_LINUX; |
267 | strscpy(hinf->kernel_ver_str, utsname()->version, |
268 | sizeof(hinf->kernel_ver_str)); |
269 | hinf->kernel_ver = LINUX_VERSION_CODE; |
270 | EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MAJOR, 0); |
271 | EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MINOR, 0); |
272 | EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_SUB_MINOR, 0); |
273 | EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MODULE_TYPE, 0); |
274 | EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_BUS, dev->pdev->bus->number); |
275 | EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_DEVICE, |
276 | PCI_SLOT(dev->pdev->devfn)); |
277 | EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_FUNCTION, |
278 | PCI_FUNC(dev->pdev->devfn)); |
279 | EFA_SET(&hinf->spec_ver, EFA_ADMIN_HOST_INFO_SPEC_MAJOR, |
280 | EFA_COMMON_SPEC_VERSION_MAJOR); |
281 | EFA_SET(&hinf->spec_ver, EFA_ADMIN_HOST_INFO_SPEC_MINOR, |
282 | EFA_COMMON_SPEC_VERSION_MINOR); |
283 | EFA_SET(&hinf->flags, EFA_ADMIN_HOST_INFO_INTREE, 1); |
284 | EFA_SET(&hinf->flags, EFA_ADMIN_HOST_INFO_GDR, 0); |
285 | |
286 | efa_com_set_feature_ex(edev: &dev->edev, set_resp: &resp, set_cmd: &cmd, feature_id: EFA_ADMIN_HOST_INFO, |
287 | control_buf_dma_addr: hinf_dma, control_buff_size: bufsz); |
288 | |
289 | dma_free_coherent(dev: &dev->pdev->dev, size: bufsz, cpu_addr: hinf, dma_handle: hinf_dma); |
290 | } |
291 | |
292 | static void efa_destroy_eq(struct efa_dev *dev, struct efa_eq *eq) |
293 | { |
294 | efa_com_eq_destroy(edev: &dev->edev, eeq: &eq->eeq); |
295 | efa_free_irq(dev, irq: &eq->irq); |
296 | } |
297 | |
298 | static int efa_create_eq(struct efa_dev *dev, struct efa_eq *eq, u8 msix_vec) |
299 | { |
300 | int err; |
301 | |
302 | efa_setup_comp_irq(dev, eq, vector: msix_vec); |
303 | err = efa_request_irq(dev, irq: &eq->irq); |
304 | if (err) |
305 | return err; |
306 | |
307 | err = efa_com_eq_init(edev: &dev->edev, eeq: &eq->eeq, cb: efa_process_eqe, |
308 | depth: dev->dev_attr.max_eq_depth, msix_vec); |
309 | if (err) |
310 | goto err_free_comp_irq; |
311 | |
312 | return 0; |
313 | |
314 | err_free_comp_irq: |
315 | efa_free_irq(dev, irq: &eq->irq); |
316 | return err; |
317 | } |
318 | |
319 | static int efa_create_eqs(struct efa_dev *dev) |
320 | { |
321 | unsigned int neqs = dev->dev_attr.max_eq; |
322 | int err; |
323 | int i; |
324 | |
325 | neqs = min_t(unsigned int, neqs, |
326 | dev->num_irq_vectors - EFA_COMP_EQS_VEC_BASE); |
327 | |
328 | dev->neqs = neqs; |
329 | dev->eqs = kcalloc(n: neqs, size: sizeof(*dev->eqs), GFP_KERNEL); |
330 | if (!dev->eqs) |
331 | return -ENOMEM; |
332 | |
333 | for (i = 0; i < neqs; i++) { |
334 | err = efa_create_eq(dev, eq: &dev->eqs[i], |
335 | msix_vec: i + EFA_COMP_EQS_VEC_BASE); |
336 | if (err) |
337 | goto err_destroy_eqs; |
338 | } |
339 | |
340 | return 0; |
341 | |
342 | err_destroy_eqs: |
343 | for (i--; i >= 0; i--) |
344 | efa_destroy_eq(dev, eq: &dev->eqs[i]); |
345 | kfree(objp: dev->eqs); |
346 | |
347 | return err; |
348 | } |
349 | |
350 | static void efa_destroy_eqs(struct efa_dev *dev) |
351 | { |
352 | int i; |
353 | |
354 | for (i = 0; i < dev->neqs; i++) |
355 | efa_destroy_eq(dev, eq: &dev->eqs[i]); |
356 | |
357 | kfree(objp: dev->eqs); |
358 | } |
359 | |
360 | static const struct ib_device_ops efa_dev_ops = { |
361 | .owner = THIS_MODULE, |
362 | .driver_id = RDMA_DRIVER_EFA, |
363 | .uverbs_abi_ver = EFA_UVERBS_ABI_VERSION, |
364 | |
365 | .alloc_hw_port_stats = efa_alloc_hw_port_stats, |
366 | .alloc_hw_device_stats = efa_alloc_hw_device_stats, |
367 | .alloc_pd = efa_alloc_pd, |
368 | .alloc_ucontext = efa_alloc_ucontext, |
369 | .create_cq = efa_create_cq, |
370 | .create_qp = efa_create_qp, |
371 | .create_user_ah = efa_create_ah, |
372 | .dealloc_pd = efa_dealloc_pd, |
373 | .dealloc_ucontext = efa_dealloc_ucontext, |
374 | .dereg_mr = efa_dereg_mr, |
375 | .destroy_ah = efa_destroy_ah, |
376 | .destroy_cq = efa_destroy_cq, |
377 | .destroy_qp = efa_destroy_qp, |
378 | .get_hw_stats = efa_get_hw_stats, |
379 | .get_link_layer = efa_port_link_layer, |
380 | .get_port_immutable = efa_get_port_immutable, |
381 | .mmap = efa_mmap, |
382 | .mmap_free = efa_mmap_free, |
383 | .modify_qp = efa_modify_qp, |
384 | .query_device = efa_query_device, |
385 | .query_gid = efa_query_gid, |
386 | .query_pkey = efa_query_pkey, |
387 | .query_port = efa_query_port, |
388 | .query_qp = efa_query_qp, |
389 | .reg_user_mr = efa_reg_mr, |
390 | .reg_user_mr_dmabuf = efa_reg_user_mr_dmabuf, |
391 | |
392 | INIT_RDMA_OBJ_SIZE(ib_ah, efa_ah, ibah), |
393 | INIT_RDMA_OBJ_SIZE(ib_cq, efa_cq, ibcq), |
394 | INIT_RDMA_OBJ_SIZE(ib_pd, efa_pd, ibpd), |
395 | INIT_RDMA_OBJ_SIZE(ib_qp, efa_qp, ibqp), |
396 | INIT_RDMA_OBJ_SIZE(ib_ucontext, efa_ucontext, ibucontext), |
397 | }; |
398 | |
399 | static int efa_ib_device_add(struct efa_dev *dev) |
400 | { |
401 | struct efa_com_get_hw_hints_result hw_hints; |
402 | struct pci_dev *pdev = dev->pdev; |
403 | int err; |
404 | |
405 | efa_stats_init(dev); |
406 | |
407 | err = efa_com_get_device_attr(edev: &dev->edev, result: &dev->dev_attr); |
408 | if (err) |
409 | return err; |
410 | |
411 | dev_dbg(&dev->pdev->dev, "Doorbells bar (%d)\n" , dev->dev_attr.db_bar); |
412 | err = efa_request_doorbell_bar(dev); |
413 | if (err) |
414 | return err; |
415 | |
416 | err = efa_com_get_hw_hints(edev: &dev->edev, result: &hw_hints); |
417 | if (err) |
418 | goto err_release_doorbell_bar; |
419 | |
420 | efa_update_hw_hints(dev, hw_hints: &hw_hints); |
421 | |
422 | /* Try to enable all the available aenq groups */ |
423 | err = efa_com_set_aenq_config(edev: &dev->edev, EFA_AENQ_ENABLED_GROUPS); |
424 | if (err) |
425 | goto err_release_doorbell_bar; |
426 | |
427 | err = efa_create_eqs(dev); |
428 | if (err) |
429 | goto err_release_doorbell_bar; |
430 | |
431 | efa_set_host_info(dev); |
432 | |
433 | dev->ibdev.node_type = RDMA_NODE_UNSPECIFIED; |
434 | dev->ibdev.phys_port_cnt = 1; |
435 | dev->ibdev.num_comp_vectors = dev->neqs ?: 1; |
436 | dev->ibdev.dev.parent = &pdev->dev; |
437 | |
438 | ib_set_device_ops(device: &dev->ibdev, ops: &efa_dev_ops); |
439 | |
440 | dev->ibdev.driver_def = efa_uapi_defs; |
441 | |
442 | err = ib_register_device(device: &dev->ibdev, name: "efa_%d" , dma_device: &pdev->dev); |
443 | if (err) |
444 | goto err_destroy_eqs; |
445 | |
446 | ibdev_info(ibdev: &dev->ibdev, format: "IB device registered\n" ); |
447 | |
448 | return 0; |
449 | |
450 | err_destroy_eqs: |
451 | efa_destroy_eqs(dev); |
452 | err_release_doorbell_bar: |
453 | efa_release_doorbell_bar(dev); |
454 | return err; |
455 | } |
456 | |
457 | static void efa_ib_device_remove(struct efa_dev *dev) |
458 | { |
459 | ibdev_info(ibdev: &dev->ibdev, format: "Unregister ib device\n" ); |
460 | ib_unregister_device(device: &dev->ibdev); |
461 | efa_destroy_eqs(dev); |
462 | efa_com_dev_reset(edev: &dev->edev, reset_reason: EFA_REGS_RESET_NORMAL); |
463 | efa_release_doorbell_bar(dev); |
464 | } |
465 | |
466 | static void efa_disable_msix(struct efa_dev *dev) |
467 | { |
468 | pci_free_irq_vectors(dev: dev->pdev); |
469 | } |
470 | |
471 | static int efa_enable_msix(struct efa_dev *dev) |
472 | { |
473 | int max_vecs, num_vecs; |
474 | |
475 | /* |
476 | * Reserve the max msix vectors we might need, one vector is reserved |
477 | * for admin. |
478 | */ |
479 | max_vecs = min_t(int, pci_msix_vec_count(dev->pdev), |
480 | num_online_cpus() + 1); |
481 | dev_dbg(&dev->pdev->dev, "Trying to enable MSI-X, vectors %d\n" , |
482 | max_vecs); |
483 | |
484 | dev->admin_msix_vector_idx = EFA_MGMNT_MSIX_VEC_IDX; |
485 | num_vecs = pci_alloc_irq_vectors(dev: dev->pdev, min_vecs: 1, |
486 | max_vecs, PCI_IRQ_MSIX); |
487 | |
488 | if (num_vecs < 0) { |
489 | dev_err(&dev->pdev->dev, "Failed to enable MSI-X. error %d\n" , |
490 | num_vecs); |
491 | return -ENOSPC; |
492 | } |
493 | |
494 | dev_dbg(&dev->pdev->dev, "Allocated %d MSI-X vectors\n" , num_vecs); |
495 | |
496 | dev->num_irq_vectors = num_vecs; |
497 | |
498 | return 0; |
499 | } |
500 | |
501 | static int efa_device_init(struct efa_com_dev *edev, struct pci_dev *pdev) |
502 | { |
503 | int dma_width; |
504 | int err; |
505 | |
506 | err = efa_com_dev_reset(edev, reset_reason: EFA_REGS_RESET_NORMAL); |
507 | if (err) |
508 | return err; |
509 | |
510 | err = efa_com_validate_version(edev); |
511 | if (err) |
512 | return err; |
513 | |
514 | dma_width = efa_com_get_dma_width(edev); |
515 | if (dma_width < 0) { |
516 | err = dma_width; |
517 | return err; |
518 | } |
519 | |
520 | err = dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(dma_width)); |
521 | if (err) { |
522 | dev_err(&pdev->dev, "dma_set_mask_and_coherent failed %d\n" , err); |
523 | return err; |
524 | } |
525 | |
526 | dma_set_max_seg_size(dev: &pdev->dev, UINT_MAX); |
527 | return 0; |
528 | } |
529 | |
530 | static struct efa_dev *efa_probe_device(struct pci_dev *pdev) |
531 | { |
532 | struct efa_com_dev *edev; |
533 | struct efa_dev *dev; |
534 | int bars; |
535 | int err; |
536 | |
537 | err = pci_enable_device_mem(dev: pdev); |
538 | if (err) { |
539 | dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n" ); |
540 | return ERR_PTR(error: err); |
541 | } |
542 | |
543 | pci_set_master(dev: pdev); |
544 | |
545 | dev = ib_alloc_device(efa_dev, ibdev); |
546 | if (!dev) { |
547 | dev_err(&pdev->dev, "Device alloc failed\n" ); |
548 | err = -ENOMEM; |
549 | goto err_disable_device; |
550 | } |
551 | |
552 | pci_set_drvdata(pdev, data: dev); |
553 | edev = &dev->edev; |
554 | edev->efa_dev = dev; |
555 | edev->dmadev = &pdev->dev; |
556 | dev->pdev = pdev; |
557 | xa_init(xa: &dev->cqs_xa); |
558 | |
559 | bars = pci_select_bars(dev: pdev, IORESOURCE_MEM) & EFA_BASE_BAR_MASK; |
560 | err = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME); |
561 | if (err) { |
562 | dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n" , |
563 | err); |
564 | goto err_ibdev_destroy; |
565 | } |
566 | |
567 | dev->reg_bar_addr = pci_resource_start(pdev, EFA_REG_BAR); |
568 | dev->reg_bar_len = pci_resource_len(pdev, EFA_REG_BAR); |
569 | dev->mem_bar_addr = pci_resource_start(pdev, EFA_MEM_BAR); |
570 | dev->mem_bar_len = pci_resource_len(pdev, EFA_MEM_BAR); |
571 | |
572 | edev->reg_bar = devm_ioremap(dev: &pdev->dev, |
573 | offset: dev->reg_bar_addr, |
574 | size: dev->reg_bar_len); |
575 | if (!edev->reg_bar) { |
576 | dev_err(&pdev->dev, "Failed to remap register bar\n" ); |
577 | err = -EFAULT; |
578 | goto err_release_bars; |
579 | } |
580 | |
581 | err = efa_com_mmio_reg_read_init(edev); |
582 | if (err) { |
583 | dev_err(&pdev->dev, "Failed to init readless MMIO\n" ); |
584 | goto err_iounmap; |
585 | } |
586 | |
587 | err = efa_device_init(edev, pdev); |
588 | if (err) { |
589 | dev_err(&pdev->dev, "EFA device init failed\n" ); |
590 | if (err == -ETIME) |
591 | err = -EPROBE_DEFER; |
592 | goto err_reg_read_destroy; |
593 | } |
594 | |
595 | err = efa_enable_msix(dev); |
596 | if (err) |
597 | goto err_reg_read_destroy; |
598 | |
599 | edev->aq.msix_vector_idx = dev->admin_msix_vector_idx; |
600 | edev->aenq.msix_vector_idx = dev->admin_msix_vector_idx; |
601 | |
602 | err = efa_set_mgmnt_irq(dev); |
603 | if (err) |
604 | goto err_disable_msix; |
605 | |
606 | err = efa_com_admin_init(edev, aenq_handlers: &aenq_handlers); |
607 | if (err) |
608 | goto err_free_mgmnt_irq; |
609 | |
610 | return dev; |
611 | |
612 | err_free_mgmnt_irq: |
613 | efa_free_irq(dev, irq: &dev->admin_irq); |
614 | err_disable_msix: |
615 | efa_disable_msix(dev); |
616 | err_reg_read_destroy: |
617 | efa_com_mmio_reg_read_destroy(edev); |
618 | err_iounmap: |
619 | devm_iounmap(dev: &pdev->dev, addr: edev->reg_bar); |
620 | err_release_bars: |
621 | efa_release_bars(dev, EFA_BASE_BAR_MASK); |
622 | err_ibdev_destroy: |
623 | ib_dealloc_device(device: &dev->ibdev); |
624 | err_disable_device: |
625 | pci_disable_device(dev: pdev); |
626 | return ERR_PTR(error: err); |
627 | } |
628 | |
629 | static void efa_remove_device(struct pci_dev *pdev) |
630 | { |
631 | struct efa_dev *dev = pci_get_drvdata(pdev); |
632 | struct efa_com_dev *edev; |
633 | |
634 | edev = &dev->edev; |
635 | efa_com_admin_destroy(edev); |
636 | efa_free_irq(dev, irq: &dev->admin_irq); |
637 | efa_disable_msix(dev); |
638 | efa_com_mmio_reg_read_destroy(edev); |
639 | devm_iounmap(dev: &pdev->dev, addr: edev->reg_bar); |
640 | efa_release_bars(dev, EFA_BASE_BAR_MASK); |
641 | xa_destroy(&dev->cqs_xa); |
642 | ib_dealloc_device(device: &dev->ibdev); |
643 | pci_disable_device(dev: pdev); |
644 | } |
645 | |
646 | static int efa_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
647 | { |
648 | struct efa_dev *dev; |
649 | int err; |
650 | |
651 | dev = efa_probe_device(pdev); |
652 | if (IS_ERR(ptr: dev)) |
653 | return PTR_ERR(ptr: dev); |
654 | |
655 | err = efa_ib_device_add(dev); |
656 | if (err) |
657 | goto err_remove_device; |
658 | |
659 | return 0; |
660 | |
661 | err_remove_device: |
662 | efa_remove_device(pdev); |
663 | return err; |
664 | } |
665 | |
666 | static void efa_remove(struct pci_dev *pdev) |
667 | { |
668 | struct efa_dev *dev = pci_get_drvdata(pdev); |
669 | |
670 | efa_ib_device_remove(dev); |
671 | efa_remove_device(pdev); |
672 | } |
673 | |
674 | static struct pci_driver efa_pci_driver = { |
675 | .name = DRV_MODULE_NAME, |
676 | .id_table = efa_pci_tbl, |
677 | .probe = efa_probe, |
678 | .remove = efa_remove, |
679 | }; |
680 | |
681 | module_pci_driver(efa_pci_driver); |
682 | |