| 1 | // SPDX-License-Identifier: GPL-2.0-only |
| 2 | /* Copyright(c) 2023 Intel Corporation */ |
| 3 | #include <linux/device.h> |
| 4 | #include <linux/module.h> |
| 5 | #include <linux/pci.h> |
| 6 | |
| 7 | #include <adf_accel_devices.h> |
| 8 | #include <adf_gen4_hw_data.h> |
| 9 | #include <adf_gen4_config.h> |
| 10 | #include <adf_cfg.h> |
| 11 | #include <adf_common_drv.h> |
| 12 | #include <adf_dbgfs.h> |
| 13 | |
| 14 | #include "adf_420xx_hw_data.h" |
| 15 | |
| 16 | static const struct pci_device_id adf_pci_tbl[] = { |
| 17 | { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_QAT_420XX) }, |
| 18 | { } |
| 19 | }; |
| 20 | MODULE_DEVICE_TABLE(pci, adf_pci_tbl); |
| 21 | |
| 22 | static void adf_cleanup_accel(struct adf_accel_dev *accel_dev) |
| 23 | { |
| 24 | if (accel_dev->hw_device) { |
| 25 | adf_clean_hw_data_420xx(hw_data: accel_dev->hw_device); |
| 26 | accel_dev->hw_device = NULL; |
| 27 | } |
| 28 | adf_dbgfs_exit(accel_dev); |
| 29 | adf_cfg_dev_remove(accel_dev); |
| 30 | adf_devmgr_rm_dev(accel_dev, NULL); |
| 31 | } |
| 32 | |
| 33 | static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) |
| 34 | { |
| 35 | struct adf_accel_dev *accel_dev; |
| 36 | struct adf_accel_pci *accel_pci_dev; |
| 37 | struct adf_hw_device_data *hw_data; |
| 38 | unsigned int i, bar_nr; |
| 39 | unsigned long bar_mask; |
| 40 | struct adf_bar *bar; |
| 41 | int ret; |
| 42 | |
| 43 | if (num_possible_nodes() > 1 && dev_to_node(dev: &pdev->dev) < 0) { |
| 44 | /* |
| 45 | * If the accelerator is connected to a node with no memory |
| 46 | * there is no point in using the accelerator since the remote |
| 47 | * memory transaction will be very slow. |
| 48 | */ |
| 49 | dev_err(&pdev->dev, "Invalid NUMA configuration.\n" ); |
| 50 | return -EINVAL; |
| 51 | } |
| 52 | |
| 53 | accel_dev = devm_kzalloc(dev: &pdev->dev, size: sizeof(*accel_dev), GFP_KERNEL); |
| 54 | if (!accel_dev) |
| 55 | return -ENOMEM; |
| 56 | |
| 57 | INIT_LIST_HEAD(list: &accel_dev->crypto_list); |
| 58 | accel_pci_dev = &accel_dev->accel_pci_dev; |
| 59 | accel_pci_dev->pci_dev = pdev; |
| 60 | |
| 61 | /* |
| 62 | * Add accel device to accel table |
| 63 | * This should be called before adf_cleanup_accel is called |
| 64 | */ |
| 65 | if (adf_devmgr_add_dev(accel_dev, NULL)) { |
| 66 | dev_err(&pdev->dev, "Failed to add new accelerator device.\n" ); |
| 67 | return -EFAULT; |
| 68 | } |
| 69 | |
| 70 | accel_dev->owner = THIS_MODULE; |
| 71 | /* Allocate and initialise device hardware meta-data structure */ |
| 72 | hw_data = devm_kzalloc(dev: &pdev->dev, size: sizeof(*hw_data), GFP_KERNEL); |
| 73 | if (!hw_data) { |
| 74 | ret = -ENOMEM; |
| 75 | goto out_err; |
| 76 | } |
| 77 | |
| 78 | accel_dev->hw_device = hw_data; |
| 79 | adf_init_hw_data_420xx(hw_data: accel_dev->hw_device, dev_id: ent->device); |
| 80 | |
| 81 | pci_read_config_byte(dev: pdev, PCI_REVISION_ID, val: &accel_pci_dev->revid); |
| 82 | pci_read_config_dword(dev: pdev, ADF_GEN4_FUSECTL4_OFFSET, val: &hw_data->fuses[ADF_FUSECTL4]); |
| 83 | |
| 84 | /* Get Accelerators and Accelerators Engines masks */ |
| 85 | hw_data->accel_mask = hw_data->get_accel_mask(hw_data); |
| 86 | hw_data->ae_mask = hw_data->get_ae_mask(hw_data); |
| 87 | accel_pci_dev->sku = hw_data->get_sku(hw_data); |
| 88 | /* If the device has no acceleration engines then ignore it */ |
| 89 | if (!hw_data->accel_mask || !hw_data->ae_mask || |
| 90 | (~hw_data->ae_mask & 0x01)) { |
| 91 | dev_err(&pdev->dev, "No acceleration units found.\n" ); |
| 92 | ret = -EFAULT; |
| 93 | goto out_err; |
| 94 | } |
| 95 | |
| 96 | /* Create device configuration table */ |
| 97 | ret = adf_cfg_dev_add(accel_dev); |
| 98 | if (ret) |
| 99 | goto out_err; |
| 100 | |
| 101 | /* Enable PCI device */ |
| 102 | ret = pcim_enable_device(pdev); |
| 103 | if (ret) { |
| 104 | dev_err(&pdev->dev, "Can't enable PCI device.\n" ); |
| 105 | goto out_err; |
| 106 | } |
| 107 | |
| 108 | /* Set DMA identifier */ |
| 109 | ret = dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(64)); |
| 110 | if (ret) { |
| 111 | dev_err(&pdev->dev, "No usable DMA configuration.\n" ); |
| 112 | goto out_err; |
| 113 | } |
| 114 | |
| 115 | ret = adf_gen4_cfg_dev_init(accel_dev); |
| 116 | if (ret) { |
| 117 | dev_err(&pdev->dev, "Failed to initialize configuration.\n" ); |
| 118 | goto out_err; |
| 119 | } |
| 120 | |
| 121 | /* Get accelerator capabilities mask */ |
| 122 | hw_data->accel_capabilities_mask = hw_data->get_accel_cap(accel_dev); |
| 123 | if (!hw_data->accel_capabilities_mask) { |
| 124 | dev_err(&pdev->dev, "Failed to get capabilities mask.\n" ); |
| 125 | ret = -EINVAL; |
| 126 | goto out_err; |
| 127 | } |
| 128 | |
| 129 | /* Find and map all the device's BARS */ |
| 130 | bar_mask = pci_select_bars(dev: pdev, IORESOURCE_MEM) & ADF_GEN4_BAR_MASK; |
| 131 | |
| 132 | ret = pcim_request_all_regions(pdev, name: pci_name(pdev)); |
| 133 | if (ret) { |
| 134 | dev_err(&pdev->dev, "Failed to request PCI regions.\n" ); |
| 135 | goto out_err; |
| 136 | } |
| 137 | |
| 138 | i = 0; |
| 139 | for_each_set_bit(bar_nr, &bar_mask, PCI_STD_NUM_BARS) { |
| 140 | bar = &accel_pci_dev->pci_bars[i++]; |
| 141 | bar->virt_addr = pcim_iomap(pdev, bar: bar_nr, maxlen: 0); |
| 142 | if (!bar->virt_addr) { |
| 143 | dev_err(&pdev->dev, "Failed to ioremap PCI region.\n" ); |
| 144 | ret = -ENOMEM; |
| 145 | goto out_err; |
| 146 | } |
| 147 | } |
| 148 | |
| 149 | pci_set_master(dev: pdev); |
| 150 | |
| 151 | if (pci_save_state(dev: pdev)) { |
| 152 | dev_err(&pdev->dev, "Failed to save pci state.\n" ); |
| 153 | ret = -ENOMEM; |
| 154 | goto out_err; |
| 155 | } |
| 156 | |
| 157 | accel_dev->ras_errors.enabled = true; |
| 158 | adf_dbgfs_init(accel_dev); |
| 159 | |
| 160 | ret = adf_dev_up(accel_dev, init_config: true); |
| 161 | if (ret) |
| 162 | goto out_err_dev_stop; |
| 163 | |
| 164 | ret = adf_sysfs_init(accel_dev); |
| 165 | if (ret) |
| 166 | goto out_err_dev_stop; |
| 167 | |
| 168 | return ret; |
| 169 | |
| 170 | out_err_dev_stop: |
| 171 | adf_dev_down(accel_dev); |
| 172 | out_err: |
| 173 | adf_cleanup_accel(accel_dev); |
| 174 | return ret; |
| 175 | } |
| 176 | |
| 177 | static void adf_remove(struct pci_dev *pdev) |
| 178 | { |
| 179 | struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pci_dev: pdev); |
| 180 | |
| 181 | if (!accel_dev) { |
| 182 | pr_err("QAT: Driver removal failed\n" ); |
| 183 | return; |
| 184 | } |
| 185 | adf_dev_down(accel_dev); |
| 186 | adf_cleanup_accel(accel_dev); |
| 187 | } |
| 188 | |
| 189 | static void adf_shutdown(struct pci_dev *pdev) |
| 190 | { |
| 191 | struct adf_accel_dev *accel_dev = adf_devmgr_pci_to_accel_dev(pci_dev: pdev); |
| 192 | |
| 193 | adf_dev_down(accel_dev); |
| 194 | } |
| 195 | |
| 196 | static struct pci_driver adf_driver = { |
| 197 | .id_table = adf_pci_tbl, |
| 198 | .name = ADF_420XX_DEVICE_NAME, |
| 199 | .probe = adf_probe, |
| 200 | .remove = adf_remove, |
| 201 | .shutdown = adf_shutdown, |
| 202 | .sriov_configure = adf_sriov_configure, |
| 203 | .err_handler = &adf_err_handler, |
| 204 | }; |
| 205 | |
| 206 | module_pci_driver(adf_driver); |
| 207 | |
| 208 | MODULE_LICENSE("GPL" ); |
| 209 | MODULE_AUTHOR("Intel" ); |
| 210 | MODULE_FIRMWARE(ADF_420XX_FW); |
| 211 | MODULE_FIRMWARE(ADF_420XX_MMP); |
| 212 | MODULE_DESCRIPTION("Intel(R) QuickAssist Technology" ); |
| 213 | MODULE_VERSION(ADF_DRV_VERSION); |
| 214 | MODULE_SOFTDEP("pre: crypto-intel_qat" ); |
| 215 | MODULE_IMPORT_NS("CRYPTO_QAT" ); |
| 216 | |