| 1 | /* |
| 2 | * edac_mc kernel module |
| 3 | * (C) 2005, 2006 Linux Networx (http://lnxi.com) |
| 4 | * This file may be distributed under the terms of the |
| 5 | * GNU General Public License. |
| 6 | * |
| 7 | * Written by Thayne Harbaugh |
| 8 | * Based on work by Dan Hollis <goemon at anime dot net> and others. |
| 9 | * http://www.anime.net/~goemon/linux-ecc/ |
| 10 | * |
| 11 | * Modified by Dave Peterson and Doug Thompson |
| 12 | * |
| 13 | */ |
| 14 | |
| 15 | #include <linux/module.h> |
| 16 | #include <linux/proc_fs.h> |
| 17 | #include <linux/kernel.h> |
| 18 | #include <linux/types.h> |
| 19 | #include <linux/smp.h> |
| 20 | #include <linux/init.h> |
| 21 | #include <linux/sysctl.h> |
| 22 | #include <linux/highmem.h> |
| 23 | #include <linux/timer.h> |
| 24 | #include <linux/slab.h> |
| 25 | #include <linux/jiffies.h> |
| 26 | #include <linux/spinlock.h> |
| 27 | #include <linux/list.h> |
| 28 | #include <linux/ctype.h> |
| 29 | #include <linux/edac.h> |
| 30 | #include <linux/bitops.h> |
| 31 | #include <linux/uaccess.h> |
| 32 | #include <asm/page.h> |
| 33 | #include "edac_mc.h" |
| 34 | #include "edac_module.h" |
| 35 | #include <ras/ras_event.h> |
| 36 | |
| 37 | #ifdef CONFIG_EDAC_ATOMIC_SCRUB |
| 38 | #include <asm/edac.h> |
| 39 | #else |
| 40 | #define edac_atomic_scrub(va, size) do { } while (0) |
| 41 | #endif |
| 42 | |
| 43 | int edac_op_state = EDAC_OPSTATE_INVAL; |
| 44 | EXPORT_SYMBOL_GPL(edac_op_state); |
| 45 | |
| 46 | /* lock to memory controller's control array */ |
| 47 | static DEFINE_MUTEX(mem_ctls_mutex); |
| 48 | static LIST_HEAD(mc_devices); |
| 49 | |
| 50 | /* |
| 51 | * Used to lock EDAC MC to just one module, avoiding two drivers e. g. |
| 52 | * apei/ghes and i7core_edac to be used at the same time. |
| 53 | */ |
| 54 | static const char *edac_mc_owner; |
| 55 | |
| 56 | static struct mem_ctl_info *error_desc_to_mci(struct edac_raw_error_desc *e) |
| 57 | { |
| 58 | return container_of(e, struct mem_ctl_info, error_desc); |
| 59 | } |
| 60 | |
| 61 | unsigned int edac_dimm_info_location(struct dimm_info *dimm, char *buf, |
| 62 | unsigned int len) |
| 63 | { |
| 64 | struct mem_ctl_info *mci = dimm->mci; |
| 65 | int i, n, count = 0; |
| 66 | char *p = buf; |
| 67 | |
| 68 | for (i = 0; i < mci->n_layers; i++) { |
| 69 | n = scnprintf(buf: p, size: len, fmt: "%s %d " , |
| 70 | edac_layer_name[mci->layers[i].type], |
| 71 | dimm->location[i]); |
| 72 | p += n; |
| 73 | len -= n; |
| 74 | count += n; |
| 75 | } |
| 76 | |
| 77 | return count; |
| 78 | } |
| 79 | |
| 80 | #ifdef CONFIG_EDAC_DEBUG |
| 81 | |
| 82 | static void edac_mc_dump_channel(struct rank_info *chan) |
| 83 | { |
| 84 | edac_dbg(4, " channel->chan_idx = %d\n" , chan->chan_idx); |
| 85 | edac_dbg(4, " channel = %p\n" , chan); |
| 86 | edac_dbg(4, " channel->csrow = %p\n" , chan->csrow); |
| 87 | edac_dbg(4, " channel->dimm = %p\n" , chan->dimm); |
| 88 | } |
| 89 | |
| 90 | static void edac_mc_dump_dimm(struct dimm_info *dimm) |
| 91 | { |
| 92 | char location[80]; |
| 93 | |
| 94 | if (!dimm->nr_pages) |
| 95 | return; |
| 96 | |
| 97 | edac_dimm_info_location(dimm, buf: location, len: sizeof(location)); |
| 98 | |
| 99 | edac_dbg(4, "%s%i: %smapped as virtual row %d, chan %d\n" , |
| 100 | dimm->mci->csbased ? "rank" : "dimm" , |
| 101 | dimm->idx, location, dimm->csrow, dimm->cschannel); |
| 102 | edac_dbg(4, " dimm = %p\n" , dimm); |
| 103 | edac_dbg(4, " dimm->label = '%s'\n" , dimm->label); |
| 104 | edac_dbg(4, " dimm->nr_pages = 0x%x\n" , dimm->nr_pages); |
| 105 | edac_dbg(4, " dimm->grain = %d\n" , dimm->grain); |
| 106 | } |
| 107 | |
| 108 | static void edac_mc_dump_csrow(struct csrow_info *csrow) |
| 109 | { |
| 110 | edac_dbg(4, "csrow->csrow_idx = %d\n" , csrow->csrow_idx); |
| 111 | edac_dbg(4, " csrow = %p\n" , csrow); |
| 112 | edac_dbg(4, " csrow->first_page = 0x%lx\n" , csrow->first_page); |
| 113 | edac_dbg(4, " csrow->last_page = 0x%lx\n" , csrow->last_page); |
| 114 | edac_dbg(4, " csrow->page_mask = 0x%lx\n" , csrow->page_mask); |
| 115 | edac_dbg(4, " csrow->nr_channels = %d\n" , csrow->nr_channels); |
| 116 | edac_dbg(4, " csrow->channels = %p\n" , csrow->channels); |
| 117 | edac_dbg(4, " csrow->mci = %p\n" , csrow->mci); |
| 118 | } |
| 119 | |
| 120 | static void edac_mc_dump_mci(struct mem_ctl_info *mci) |
| 121 | { |
| 122 | edac_dbg(3, "\tmci = %p\n" , mci); |
| 123 | edac_dbg(3, "\tmci->mtype_cap = %lx\n" , mci->mtype_cap); |
| 124 | edac_dbg(3, "\tmci->edac_ctl_cap = %lx\n" , mci->edac_ctl_cap); |
| 125 | edac_dbg(3, "\tmci->edac_cap = %lx\n" , mci->edac_cap); |
| 126 | edac_dbg(4, "\tmci->edac_check = %p\n" , mci->edac_check); |
| 127 | edac_dbg(3, "\tmci->nr_csrows = %d, csrows = %p\n" , |
| 128 | mci->nr_csrows, mci->csrows); |
| 129 | edac_dbg(3, "\tmci->nr_dimms = %d, dimms = %p\n" , |
| 130 | mci->tot_dimms, mci->dimms); |
| 131 | edac_dbg(3, "\tdev = %p\n" , mci->pdev); |
| 132 | edac_dbg(3, "\tmod_name:ctl_name = %s:%s\n" , |
| 133 | mci->mod_name, mci->ctl_name); |
| 134 | edac_dbg(3, "\tpvt_info = %p\n\n" , mci->pvt_info); |
| 135 | } |
| 136 | |
| 137 | #endif /* CONFIG_EDAC_DEBUG */ |
| 138 | |
| 139 | const char * const edac_mem_types[] = { |
| 140 | [MEM_EMPTY] = "Empty" , |
| 141 | [MEM_RESERVED] = "Reserved" , |
| 142 | [MEM_UNKNOWN] = "Unknown" , |
| 143 | [MEM_FPM] = "FPM" , |
| 144 | [MEM_EDO] = "EDO" , |
| 145 | [MEM_BEDO] = "BEDO" , |
| 146 | [MEM_SDR] = "Unbuffered-SDR" , |
| 147 | [MEM_RDR] = "Registered-SDR" , |
| 148 | [MEM_DDR] = "Unbuffered-DDR" , |
| 149 | [MEM_RDDR] = "Registered-DDR" , |
| 150 | [MEM_RMBS] = "RMBS" , |
| 151 | [MEM_DDR2] = "Unbuffered-DDR2" , |
| 152 | [MEM_FB_DDR2] = "FullyBuffered-DDR2" , |
| 153 | [MEM_RDDR2] = "Registered-DDR2" , |
| 154 | [MEM_XDR] = "XDR" , |
| 155 | [MEM_DDR3] = "Unbuffered-DDR3" , |
| 156 | [MEM_RDDR3] = "Registered-DDR3" , |
| 157 | [MEM_LRDDR3] = "Load-Reduced-DDR3-RAM" , |
| 158 | [MEM_LPDDR3] = "Low-Power-DDR3-RAM" , |
| 159 | [MEM_DDR4] = "Unbuffered-DDR4" , |
| 160 | [MEM_RDDR4] = "Registered-DDR4" , |
| 161 | [MEM_LPDDR4] = "Low-Power-DDR4-RAM" , |
| 162 | [MEM_LRDDR4] = "Load-Reduced-DDR4-RAM" , |
| 163 | [MEM_DDR5] = "Unbuffered-DDR5" , |
| 164 | [MEM_RDDR5] = "Registered-DDR5" , |
| 165 | [MEM_LRDDR5] = "Load-Reduced-DDR5-RAM" , |
| 166 | [MEM_NVDIMM] = "Non-volatile-RAM" , |
| 167 | [MEM_WIO2] = "Wide-IO-2" , |
| 168 | [MEM_HBM2] = "High-bandwidth-memory-Gen2" , |
| 169 | [MEM_HBM3] = "High-bandwidth-memory-Gen3" , |
| 170 | }; |
| 171 | EXPORT_SYMBOL_GPL(edac_mem_types); |
| 172 | |
| 173 | static void _edac_mc_free(struct mem_ctl_info *mci) |
| 174 | { |
| 175 | put_device(dev: &mci->dev); |
| 176 | } |
| 177 | |
| 178 | static void mci_release(struct device *dev) |
| 179 | { |
| 180 | struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev); |
| 181 | struct csrow_info *csr; |
| 182 | int i, chn, row; |
| 183 | |
| 184 | if (mci->dimms) { |
| 185 | for (i = 0; i < mci->tot_dimms; i++) |
| 186 | kfree(objp: mci->dimms[i]); |
| 187 | kfree(objp: mci->dimms); |
| 188 | } |
| 189 | |
| 190 | if (mci->csrows) { |
| 191 | for (row = 0; row < mci->nr_csrows; row++) { |
| 192 | csr = mci->csrows[row]; |
| 193 | if (!csr) |
| 194 | continue; |
| 195 | |
| 196 | if (csr->channels) { |
| 197 | for (chn = 0; chn < mci->num_cschannel; chn++) |
| 198 | kfree(objp: csr->channels[chn]); |
| 199 | kfree(objp: csr->channels); |
| 200 | } |
| 201 | kfree(objp: csr); |
| 202 | } |
| 203 | kfree(objp: mci->csrows); |
| 204 | } |
| 205 | kfree(objp: mci->pvt_info); |
| 206 | kfree(objp: mci->layers); |
| 207 | kfree(objp: mci); |
| 208 | } |
| 209 | |
| 210 | static int edac_mc_alloc_csrows(struct mem_ctl_info *mci) |
| 211 | { |
| 212 | unsigned int tot_channels = mci->num_cschannel; |
| 213 | unsigned int tot_csrows = mci->nr_csrows; |
| 214 | unsigned int row, chn; |
| 215 | |
| 216 | /* |
| 217 | * Allocate and fill the csrow/channels structs |
| 218 | */ |
| 219 | mci->csrows = kcalloc(tot_csrows, sizeof(*mci->csrows), GFP_KERNEL); |
| 220 | if (!mci->csrows) |
| 221 | return -ENOMEM; |
| 222 | |
| 223 | for (row = 0; row < tot_csrows; row++) { |
| 224 | struct csrow_info *csr; |
| 225 | |
| 226 | csr = kzalloc(sizeof(**mci->csrows), GFP_KERNEL); |
| 227 | if (!csr) |
| 228 | return -ENOMEM; |
| 229 | |
| 230 | mci->csrows[row] = csr; |
| 231 | csr->csrow_idx = row; |
| 232 | csr->mci = mci; |
| 233 | csr->nr_channels = tot_channels; |
| 234 | csr->channels = kcalloc(tot_channels, sizeof(*csr->channels), |
| 235 | GFP_KERNEL); |
| 236 | if (!csr->channels) |
| 237 | return -ENOMEM; |
| 238 | |
| 239 | for (chn = 0; chn < tot_channels; chn++) { |
| 240 | struct rank_info *chan; |
| 241 | |
| 242 | chan = kzalloc(sizeof(**csr->channels), GFP_KERNEL); |
| 243 | if (!chan) |
| 244 | return -ENOMEM; |
| 245 | |
| 246 | csr->channels[chn] = chan; |
| 247 | chan->chan_idx = chn; |
| 248 | chan->csrow = csr; |
| 249 | } |
| 250 | } |
| 251 | |
| 252 | return 0; |
| 253 | } |
| 254 | |
| 255 | static int edac_mc_alloc_dimms(struct mem_ctl_info *mci) |
| 256 | { |
| 257 | unsigned int pos[EDAC_MAX_LAYERS]; |
| 258 | unsigned int row, chn, idx; |
| 259 | int layer; |
| 260 | void *p; |
| 261 | |
| 262 | /* |
| 263 | * Allocate and fill the dimm structs |
| 264 | */ |
| 265 | mci->dimms = kcalloc(mci->tot_dimms, sizeof(*mci->dimms), GFP_KERNEL); |
| 266 | if (!mci->dimms) |
| 267 | return -ENOMEM; |
| 268 | |
| 269 | memset(&pos, 0, sizeof(pos)); |
| 270 | row = 0; |
| 271 | chn = 0; |
| 272 | for (idx = 0; idx < mci->tot_dimms; idx++) { |
| 273 | struct dimm_info *dimm; |
| 274 | struct rank_info *chan; |
| 275 | int n, len; |
| 276 | |
| 277 | chan = mci->csrows[row]->channels[chn]; |
| 278 | |
| 279 | dimm = kzalloc(sizeof(**mci->dimms), GFP_KERNEL); |
| 280 | if (!dimm) |
| 281 | return -ENOMEM; |
| 282 | mci->dimms[idx] = dimm; |
| 283 | dimm->mci = mci; |
| 284 | dimm->idx = idx; |
| 285 | |
| 286 | /* |
| 287 | * Copy DIMM location and initialize it. |
| 288 | */ |
| 289 | len = sizeof(dimm->label); |
| 290 | p = dimm->label; |
| 291 | n = scnprintf(buf: p, size: len, fmt: "mc#%u" , mci->mc_idx); |
| 292 | p += n; |
| 293 | len -= n; |
| 294 | for (layer = 0; layer < mci->n_layers; layer++) { |
| 295 | n = scnprintf(buf: p, size: len, fmt: "%s#%u" , |
| 296 | edac_layer_name[mci->layers[layer].type], |
| 297 | pos[layer]); |
| 298 | p += n; |
| 299 | len -= n; |
| 300 | dimm->location[layer] = pos[layer]; |
| 301 | } |
| 302 | |
| 303 | /* Link it to the csrows old API data */ |
| 304 | chan->dimm = dimm; |
| 305 | dimm->csrow = row; |
| 306 | dimm->cschannel = chn; |
| 307 | |
| 308 | /* Increment csrow location */ |
| 309 | if (mci->layers[0].is_virt_csrow) { |
| 310 | chn++; |
| 311 | if (chn == mci->num_cschannel) { |
| 312 | chn = 0; |
| 313 | row++; |
| 314 | } |
| 315 | } else { |
| 316 | row++; |
| 317 | if (row == mci->nr_csrows) { |
| 318 | row = 0; |
| 319 | chn++; |
| 320 | } |
| 321 | } |
| 322 | |
| 323 | /* Increment dimm location */ |
| 324 | for (layer = mci->n_layers - 1; layer >= 0; layer--) { |
| 325 | pos[layer]++; |
| 326 | if (pos[layer] < mci->layers[layer].size) |
| 327 | break; |
| 328 | pos[layer] = 0; |
| 329 | } |
| 330 | } |
| 331 | |
| 332 | return 0; |
| 333 | } |
| 334 | |
| 335 | struct mem_ctl_info *edac_mc_alloc(unsigned int mc_num, |
| 336 | unsigned int n_layers, |
| 337 | struct edac_mc_layer *layers, |
| 338 | unsigned int sz_pvt) |
| 339 | { |
| 340 | struct mem_ctl_info *mci; |
| 341 | struct edac_mc_layer *layer; |
| 342 | unsigned int idx, tot_dimms = 1; |
| 343 | unsigned int tot_csrows = 1, tot_channels = 1; |
| 344 | bool per_rank = false; |
| 345 | |
| 346 | if (WARN_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0)) |
| 347 | return NULL; |
| 348 | |
| 349 | /* |
| 350 | * Calculate the total amount of dimms and csrows/cschannels while |
| 351 | * in the old API emulation mode |
| 352 | */ |
| 353 | for (idx = 0; idx < n_layers; idx++) { |
| 354 | tot_dimms *= layers[idx].size; |
| 355 | |
| 356 | if (layers[idx].is_virt_csrow) |
| 357 | tot_csrows *= layers[idx].size; |
| 358 | else |
| 359 | tot_channels *= layers[idx].size; |
| 360 | |
| 361 | if (layers[idx].type == EDAC_MC_LAYER_CHIP_SELECT) |
| 362 | per_rank = true; |
| 363 | } |
| 364 | |
| 365 | mci = kzalloc(sizeof(struct mem_ctl_info), GFP_KERNEL); |
| 366 | if (!mci) |
| 367 | return NULL; |
| 368 | |
| 369 | mci->layers = kcalloc(n_layers, sizeof(struct edac_mc_layer), GFP_KERNEL); |
| 370 | if (!mci->layers) |
| 371 | goto error; |
| 372 | |
| 373 | mci->pvt_info = kzalloc(sz_pvt, GFP_KERNEL); |
| 374 | if (!mci->pvt_info) |
| 375 | goto error; |
| 376 | |
| 377 | mci->dev.release = mci_release; |
| 378 | device_initialize(dev: &mci->dev); |
| 379 | |
| 380 | /* setup index and various internal pointers */ |
| 381 | mci->mc_idx = mc_num; |
| 382 | mci->tot_dimms = tot_dimms; |
| 383 | mci->n_layers = n_layers; |
| 384 | memcpy(mci->layers, layers, sizeof(*layer) * n_layers); |
| 385 | mci->nr_csrows = tot_csrows; |
| 386 | mci->num_cschannel = tot_channels; |
| 387 | mci->csbased = per_rank; |
| 388 | |
| 389 | if (edac_mc_alloc_csrows(mci)) |
| 390 | goto error; |
| 391 | |
| 392 | if (edac_mc_alloc_dimms(mci)) |
| 393 | goto error; |
| 394 | |
| 395 | mci->op_state = OP_ALLOC; |
| 396 | |
| 397 | return mci; |
| 398 | |
| 399 | error: |
| 400 | _edac_mc_free(mci); |
| 401 | |
| 402 | return NULL; |
| 403 | } |
| 404 | EXPORT_SYMBOL_GPL(edac_mc_alloc); |
| 405 | |
| 406 | void edac_mc_free(struct mem_ctl_info *mci) |
| 407 | { |
| 408 | edac_dbg(1, "\n" ); |
| 409 | |
| 410 | _edac_mc_free(mci); |
| 411 | } |
| 412 | EXPORT_SYMBOL_GPL(edac_mc_free); |
| 413 | |
| 414 | bool edac_has_mcs(void) |
| 415 | { |
| 416 | bool ret; |
| 417 | |
| 418 | mutex_lock(&mem_ctls_mutex); |
| 419 | |
| 420 | ret = list_empty(head: &mc_devices); |
| 421 | |
| 422 | mutex_unlock(lock: &mem_ctls_mutex); |
| 423 | |
| 424 | return !ret; |
| 425 | } |
| 426 | EXPORT_SYMBOL_GPL(edac_has_mcs); |
| 427 | |
| 428 | /* Caller must hold mem_ctls_mutex */ |
| 429 | static struct mem_ctl_info *__find_mci_by_dev(struct device *dev) |
| 430 | { |
| 431 | struct mem_ctl_info *mci; |
| 432 | struct list_head *item; |
| 433 | |
| 434 | edac_dbg(3, "\n" ); |
| 435 | |
| 436 | list_for_each(item, &mc_devices) { |
| 437 | mci = list_entry(item, struct mem_ctl_info, link); |
| 438 | |
| 439 | if (mci->pdev == dev) |
| 440 | return mci; |
| 441 | } |
| 442 | |
| 443 | return NULL; |
| 444 | } |
| 445 | |
| 446 | /** |
| 447 | * find_mci_by_dev |
| 448 | * |
| 449 | * scan list of controllers looking for the one that manages |
| 450 | * the 'dev' device |
| 451 | * @dev: pointer to a struct device related with the MCI |
| 452 | */ |
| 453 | struct mem_ctl_info *find_mci_by_dev(struct device *dev) |
| 454 | { |
| 455 | struct mem_ctl_info *ret; |
| 456 | |
| 457 | mutex_lock(&mem_ctls_mutex); |
| 458 | ret = __find_mci_by_dev(dev); |
| 459 | mutex_unlock(lock: &mem_ctls_mutex); |
| 460 | |
| 461 | return ret; |
| 462 | } |
| 463 | EXPORT_SYMBOL_GPL(find_mci_by_dev); |
| 464 | |
| 465 | /* |
| 466 | * edac_mc_workq_function |
| 467 | * performs the operation scheduled by a workq request |
| 468 | */ |
| 469 | static void edac_mc_workq_function(struct work_struct *work_req) |
| 470 | { |
| 471 | struct delayed_work *d_work = to_delayed_work(work: work_req); |
| 472 | struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work); |
| 473 | |
| 474 | mutex_lock(&mem_ctls_mutex); |
| 475 | |
| 476 | if (mci->op_state != OP_RUNNING_POLL) { |
| 477 | mutex_unlock(lock: &mem_ctls_mutex); |
| 478 | return; |
| 479 | } |
| 480 | |
| 481 | if (edac_op_state == EDAC_OPSTATE_POLL) |
| 482 | mci->edac_check(mci); |
| 483 | |
| 484 | mutex_unlock(lock: &mem_ctls_mutex); |
| 485 | |
| 486 | /* Queue ourselves again. */ |
| 487 | edac_queue_work(work: &mci->work, delay: msecs_to_jiffies(m: edac_mc_get_poll_msec())); |
| 488 | } |
| 489 | |
| 490 | /* |
| 491 | * edac_mc_reset_delay_period(unsigned long value) |
| 492 | * |
| 493 | * user space has updated our poll period value, need to |
| 494 | * reset our workq delays |
| 495 | */ |
| 496 | void edac_mc_reset_delay_period(unsigned long value) |
| 497 | { |
| 498 | struct mem_ctl_info *mci; |
| 499 | struct list_head *item; |
| 500 | |
| 501 | mutex_lock(&mem_ctls_mutex); |
| 502 | |
| 503 | list_for_each(item, &mc_devices) { |
| 504 | mci = list_entry(item, struct mem_ctl_info, link); |
| 505 | |
| 506 | if (mci->op_state == OP_RUNNING_POLL) |
| 507 | edac_mod_work(work: &mci->work, delay: value); |
| 508 | } |
| 509 | mutex_unlock(lock: &mem_ctls_mutex); |
| 510 | } |
| 511 | |
| 512 | |
| 513 | |
| 514 | /* Return 0 on success, 1 on failure. |
| 515 | * Before calling this function, caller must |
| 516 | * assign a unique value to mci->mc_idx. |
| 517 | * |
| 518 | * locking model: |
| 519 | * |
| 520 | * called with the mem_ctls_mutex lock held |
| 521 | */ |
| 522 | static int add_mc_to_global_list(struct mem_ctl_info *mci) |
| 523 | { |
| 524 | struct list_head *item, *insert_before; |
| 525 | struct mem_ctl_info *p; |
| 526 | |
| 527 | insert_before = &mc_devices; |
| 528 | |
| 529 | p = __find_mci_by_dev(dev: mci->pdev); |
| 530 | if (unlikely(p != NULL)) |
| 531 | goto fail0; |
| 532 | |
| 533 | list_for_each(item, &mc_devices) { |
| 534 | p = list_entry(item, struct mem_ctl_info, link); |
| 535 | |
| 536 | if (p->mc_idx >= mci->mc_idx) { |
| 537 | if (unlikely(p->mc_idx == mci->mc_idx)) |
| 538 | goto fail1; |
| 539 | |
| 540 | insert_before = item; |
| 541 | break; |
| 542 | } |
| 543 | } |
| 544 | |
| 545 | list_add_tail_rcu(new: &mci->link, head: insert_before); |
| 546 | return 0; |
| 547 | |
| 548 | fail0: |
| 549 | edac_printk(KERN_WARNING, EDAC_MC, |
| 550 | "%s (%s) %s %s already assigned %d\n" , dev_name(p->pdev), |
| 551 | edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx); |
| 552 | return 1; |
| 553 | |
| 554 | fail1: |
| 555 | edac_printk(KERN_WARNING, EDAC_MC, |
| 556 | "bug in low-level driver: attempt to assign\n" |
| 557 | " duplicate mc_idx %d in %s()\n" , p->mc_idx, __func__); |
| 558 | return 1; |
| 559 | } |
| 560 | |
| 561 | static int del_mc_from_global_list(struct mem_ctl_info *mci) |
| 562 | { |
| 563 | list_del_rcu(entry: &mci->link); |
| 564 | |
| 565 | /* these are for safe removal of devices from global list while |
| 566 | * NMI handlers may be traversing list |
| 567 | */ |
| 568 | synchronize_rcu(); |
| 569 | INIT_LIST_HEAD(list: &mci->link); |
| 570 | |
| 571 | return list_empty(head: &mc_devices); |
| 572 | } |
| 573 | |
| 574 | struct mem_ctl_info *edac_mc_find(int idx) |
| 575 | { |
| 576 | struct mem_ctl_info *mci; |
| 577 | struct list_head *item; |
| 578 | |
| 579 | mutex_lock(&mem_ctls_mutex); |
| 580 | |
| 581 | list_for_each(item, &mc_devices) { |
| 582 | mci = list_entry(item, struct mem_ctl_info, link); |
| 583 | if (mci->mc_idx == idx) |
| 584 | goto unlock; |
| 585 | } |
| 586 | |
| 587 | mci = NULL; |
| 588 | unlock: |
| 589 | mutex_unlock(lock: &mem_ctls_mutex); |
| 590 | return mci; |
| 591 | } |
| 592 | EXPORT_SYMBOL(edac_mc_find); |
| 593 | |
| 594 | const char *edac_get_owner(void) |
| 595 | { |
| 596 | return edac_mc_owner; |
| 597 | } |
| 598 | EXPORT_SYMBOL_GPL(edac_get_owner); |
| 599 | |
| 600 | /* FIXME - should a warning be printed if no error detection? correction? */ |
| 601 | int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci, |
| 602 | const struct attribute_group **groups) |
| 603 | { |
| 604 | int ret = -EINVAL; |
| 605 | edac_dbg(0, "\n" ); |
| 606 | |
| 607 | #ifdef CONFIG_EDAC_DEBUG |
| 608 | if (edac_debug_level >= 3) |
| 609 | edac_mc_dump_mci(mci); |
| 610 | |
| 611 | if (edac_debug_level >= 4) { |
| 612 | struct dimm_info *dimm; |
| 613 | int i; |
| 614 | |
| 615 | for (i = 0; i < mci->nr_csrows; i++) { |
| 616 | struct csrow_info *csrow = mci->csrows[i]; |
| 617 | u32 nr_pages = 0; |
| 618 | int j; |
| 619 | |
| 620 | for (j = 0; j < csrow->nr_channels; j++) |
| 621 | nr_pages += csrow->channels[j]->dimm->nr_pages; |
| 622 | if (!nr_pages) |
| 623 | continue; |
| 624 | edac_mc_dump_csrow(csrow); |
| 625 | for (j = 0; j < csrow->nr_channels; j++) |
| 626 | if (csrow->channels[j]->dimm->nr_pages) |
| 627 | edac_mc_dump_channel(chan: csrow->channels[j]); |
| 628 | } |
| 629 | |
| 630 | mci_for_each_dimm(mci, dimm) |
| 631 | edac_mc_dump_dimm(dimm); |
| 632 | } |
| 633 | #endif |
| 634 | mutex_lock(&mem_ctls_mutex); |
| 635 | |
| 636 | if (edac_mc_owner && edac_mc_owner != mci->mod_name) { |
| 637 | ret = -EPERM; |
| 638 | goto fail0; |
| 639 | } |
| 640 | |
| 641 | if (add_mc_to_global_list(mci)) |
| 642 | goto fail0; |
| 643 | |
| 644 | /* set load time so that error rate can be tracked */ |
| 645 | mci->start_time = jiffies; |
| 646 | |
| 647 | mci->bus = edac_get_sysfs_subsys(); |
| 648 | |
| 649 | if (edac_create_sysfs_mci_device(mci, groups)) { |
| 650 | edac_mc_printk(mci, KERN_WARNING, |
| 651 | "failed to create sysfs device\n" ); |
| 652 | goto fail1; |
| 653 | } |
| 654 | |
| 655 | if (mci->edac_check) { |
| 656 | mci->op_state = OP_RUNNING_POLL; |
| 657 | |
| 658 | INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function); |
| 659 | edac_queue_work(work: &mci->work, delay: msecs_to_jiffies(m: edac_mc_get_poll_msec())); |
| 660 | |
| 661 | } else { |
| 662 | mci->op_state = OP_RUNNING_INTERRUPT; |
| 663 | } |
| 664 | |
| 665 | /* Report action taken */ |
| 666 | edac_mc_printk(mci, KERN_INFO, |
| 667 | "Giving out device to module %s controller %s: DEV %s (%s)\n" , |
| 668 | mci->mod_name, mci->ctl_name, mci->dev_name, |
| 669 | edac_op_state_to_string(mci->op_state)); |
| 670 | |
| 671 | edac_mc_owner = mci->mod_name; |
| 672 | |
| 673 | mutex_unlock(lock: &mem_ctls_mutex); |
| 674 | return 0; |
| 675 | |
| 676 | fail1: |
| 677 | del_mc_from_global_list(mci); |
| 678 | |
| 679 | fail0: |
| 680 | mutex_unlock(lock: &mem_ctls_mutex); |
| 681 | return ret; |
| 682 | } |
| 683 | EXPORT_SYMBOL_GPL(edac_mc_add_mc_with_groups); |
| 684 | |
| 685 | struct mem_ctl_info *edac_mc_del_mc(struct device *dev) |
| 686 | { |
| 687 | struct mem_ctl_info *mci; |
| 688 | |
| 689 | edac_dbg(0, "\n" ); |
| 690 | |
| 691 | mutex_lock(&mem_ctls_mutex); |
| 692 | |
| 693 | /* find the requested mci struct in the global list */ |
| 694 | mci = __find_mci_by_dev(dev); |
| 695 | if (mci == NULL) { |
| 696 | mutex_unlock(lock: &mem_ctls_mutex); |
| 697 | return NULL; |
| 698 | } |
| 699 | |
| 700 | /* mark MCI offline: */ |
| 701 | mci->op_state = OP_OFFLINE; |
| 702 | |
| 703 | if (del_mc_from_global_list(mci)) |
| 704 | edac_mc_owner = NULL; |
| 705 | |
| 706 | mutex_unlock(lock: &mem_ctls_mutex); |
| 707 | |
| 708 | if (mci->edac_check) |
| 709 | edac_stop_work(work: &mci->work); |
| 710 | |
| 711 | /* remove from sysfs */ |
| 712 | edac_remove_sysfs_mci_device(mci); |
| 713 | |
| 714 | edac_printk(KERN_INFO, EDAC_MC, |
| 715 | "Removed device %d for %s %s: DEV %s\n" , mci->mc_idx, |
| 716 | mci->mod_name, mci->ctl_name, edac_dev_name(mci)); |
| 717 | |
| 718 | return mci; |
| 719 | } |
| 720 | EXPORT_SYMBOL_GPL(edac_mc_del_mc); |
| 721 | |
| 722 | static void edac_mc_scrub_block(unsigned long page, unsigned long offset, |
| 723 | u32 size) |
| 724 | { |
| 725 | struct page *pg; |
| 726 | void *virt_addr; |
| 727 | unsigned long flags = 0; |
| 728 | |
| 729 | edac_dbg(3, "\n" ); |
| 730 | |
| 731 | /* ECC error page was not in our memory. Ignore it. */ |
| 732 | if (!pfn_valid(pfn: page)) |
| 733 | return; |
| 734 | |
| 735 | /* Find the actual page structure then map it and fix */ |
| 736 | pg = pfn_to_page(page); |
| 737 | |
| 738 | if (PageHighMem(page: pg)) |
| 739 | local_irq_save(flags); |
| 740 | |
| 741 | virt_addr = kmap_atomic(page: pg); |
| 742 | |
| 743 | /* Perform architecture specific atomic scrub operation */ |
| 744 | edac_atomic_scrub(va: virt_addr + offset, size); |
| 745 | |
| 746 | /* Unmap and complete */ |
| 747 | kunmap_atomic(virt_addr); |
| 748 | |
| 749 | if (PageHighMem(page: pg)) |
| 750 | local_irq_restore(flags); |
| 751 | } |
| 752 | |
| 753 | /* FIXME - should return -1 */ |
| 754 | int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page) |
| 755 | { |
| 756 | struct csrow_info **csrows = mci->csrows; |
| 757 | int row, i, j, n; |
| 758 | |
| 759 | edac_dbg(1, "MC%d: 0x%lx\n" , mci->mc_idx, page); |
| 760 | row = -1; |
| 761 | |
| 762 | for (i = 0; i < mci->nr_csrows; i++) { |
| 763 | struct csrow_info *csrow = csrows[i]; |
| 764 | n = 0; |
| 765 | for (j = 0; j < csrow->nr_channels; j++) { |
| 766 | struct dimm_info *dimm = csrow->channels[j]->dimm; |
| 767 | n += dimm->nr_pages; |
| 768 | } |
| 769 | if (n == 0) |
| 770 | continue; |
| 771 | |
| 772 | edac_dbg(3, "MC%d: first(0x%lx) page(0x%lx) last(0x%lx) mask(0x%lx)\n" , |
| 773 | mci->mc_idx, |
| 774 | csrow->first_page, page, csrow->last_page, |
| 775 | csrow->page_mask); |
| 776 | |
| 777 | if ((page >= csrow->first_page) && |
| 778 | (page <= csrow->last_page) && |
| 779 | ((page & csrow->page_mask) == |
| 780 | (csrow->first_page & csrow->page_mask))) { |
| 781 | row = i; |
| 782 | break; |
| 783 | } |
| 784 | } |
| 785 | |
| 786 | if (row == -1) |
| 787 | edac_mc_printk(mci, KERN_ERR, |
| 788 | "could not look up page error address %lx\n" , |
| 789 | (unsigned long)page); |
| 790 | |
| 791 | return row; |
| 792 | } |
| 793 | EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page); |
| 794 | |
| 795 | const char *edac_layer_name[] = { |
| 796 | [EDAC_MC_LAYER_BRANCH] = "branch" , |
| 797 | [EDAC_MC_LAYER_CHANNEL] = "channel" , |
| 798 | [EDAC_MC_LAYER_SLOT] = "slot" , |
| 799 | [EDAC_MC_LAYER_CHIP_SELECT] = "csrow" , |
| 800 | [EDAC_MC_LAYER_ALL_MEM] = "memory" , |
| 801 | }; |
| 802 | EXPORT_SYMBOL_GPL(edac_layer_name); |
| 803 | |
| 804 | static void edac_inc_ce_error(struct edac_raw_error_desc *e) |
| 805 | { |
| 806 | int pos[EDAC_MAX_LAYERS] = { e->top_layer, e->mid_layer, e->low_layer }; |
| 807 | struct mem_ctl_info *mci = error_desc_to_mci(e); |
| 808 | struct dimm_info *dimm = edac_get_dimm(mci, layer0: pos[0], layer1: pos[1], layer2: pos[2]); |
| 809 | |
| 810 | mci->ce_mc += e->error_count; |
| 811 | |
| 812 | if (dimm) |
| 813 | dimm->ce_count += e->error_count; |
| 814 | else |
| 815 | mci->ce_noinfo_count += e->error_count; |
| 816 | } |
| 817 | |
| 818 | static void edac_inc_ue_error(struct edac_raw_error_desc *e) |
| 819 | { |
| 820 | int pos[EDAC_MAX_LAYERS] = { e->top_layer, e->mid_layer, e->low_layer }; |
| 821 | struct mem_ctl_info *mci = error_desc_to_mci(e); |
| 822 | struct dimm_info *dimm = edac_get_dimm(mci, layer0: pos[0], layer1: pos[1], layer2: pos[2]); |
| 823 | |
| 824 | mci->ue_mc += e->error_count; |
| 825 | |
| 826 | if (dimm) |
| 827 | dimm->ue_count += e->error_count; |
| 828 | else |
| 829 | mci->ue_noinfo_count += e->error_count; |
| 830 | } |
| 831 | |
| 832 | static void edac_ce_error(struct edac_raw_error_desc *e) |
| 833 | { |
| 834 | struct mem_ctl_info *mci = error_desc_to_mci(e); |
| 835 | unsigned long remapped_page; |
| 836 | |
| 837 | if (edac_mc_get_log_ce()) { |
| 838 | edac_mc_printk(mci, KERN_WARNING, |
| 839 | "%d CE %s%son %s (%s page:0x%lx offset:0x%lx grain:%ld syndrome:0x%lx%s%s)\n" , |
| 840 | e->error_count, e->msg, |
| 841 | *e->msg ? " " : "" , |
| 842 | e->label, e->location, e->page_frame_number, e->offset_in_page, |
| 843 | e->grain, e->syndrome, |
| 844 | *e->other_detail ? " - " : "" , |
| 845 | e->other_detail); |
| 846 | } |
| 847 | |
| 848 | edac_inc_ce_error(e); |
| 849 | |
| 850 | if (mci->scrub_mode == SCRUB_SW_SRC) { |
| 851 | /* |
| 852 | * Some memory controllers (called MCs below) can remap |
| 853 | * memory so that it is still available at a different |
| 854 | * address when PCI devices map into memory. |
| 855 | * MC's that can't do this, lose the memory where PCI |
| 856 | * devices are mapped. This mapping is MC-dependent |
| 857 | * and so we call back into the MC driver for it to |
| 858 | * map the MC page to a physical (CPU) page which can |
| 859 | * then be mapped to a virtual page - which can then |
| 860 | * be scrubbed. |
| 861 | */ |
| 862 | remapped_page = mci->ctl_page_to_phys ? |
| 863 | mci->ctl_page_to_phys(mci, e->page_frame_number) : |
| 864 | e->page_frame_number; |
| 865 | |
| 866 | edac_mc_scrub_block(page: remapped_page, offset: e->offset_in_page, size: e->grain); |
| 867 | } |
| 868 | } |
| 869 | |
| 870 | static void edac_ue_error(struct edac_raw_error_desc *e) |
| 871 | { |
| 872 | struct mem_ctl_info *mci = error_desc_to_mci(e); |
| 873 | |
| 874 | if (edac_mc_get_log_ue()) { |
| 875 | edac_mc_printk(mci, KERN_WARNING, |
| 876 | "%d UE %s%son %s (%s page:0x%lx offset:0x%lx grain:%ld%s%s)\n" , |
| 877 | e->error_count, e->msg, |
| 878 | *e->msg ? " " : "" , |
| 879 | e->label, e->location, e->page_frame_number, e->offset_in_page, |
| 880 | e->grain, |
| 881 | *e->other_detail ? " - " : "" , |
| 882 | e->other_detail); |
| 883 | } |
| 884 | |
| 885 | edac_inc_ue_error(e); |
| 886 | |
| 887 | if (edac_mc_get_panic_on_ue()) { |
| 888 | panic(fmt: "UE %s%son %s (%s page:0x%lx offset:0x%lx grain:%ld%s%s)\n" , |
| 889 | e->msg, |
| 890 | *e->msg ? " " : "" , |
| 891 | e->label, e->location, e->page_frame_number, e->offset_in_page, |
| 892 | e->grain, |
| 893 | *e->other_detail ? " - " : "" , |
| 894 | e->other_detail); |
| 895 | } |
| 896 | } |
| 897 | |
| 898 | static void edac_inc_csrow(struct edac_raw_error_desc *e, int row, int chan) |
| 899 | { |
| 900 | struct mem_ctl_info *mci = error_desc_to_mci(e); |
| 901 | enum hw_event_mc_err_type type = e->type; |
| 902 | u16 count = e->error_count; |
| 903 | |
| 904 | if (row < 0) |
| 905 | return; |
| 906 | |
| 907 | edac_dbg(4, "csrow/channel to increment: (%d,%d)\n" , row, chan); |
| 908 | |
| 909 | if (type == HW_EVENT_ERR_CORRECTED) { |
| 910 | mci->csrows[row]->ce_count += count; |
| 911 | if (chan >= 0) |
| 912 | mci->csrows[row]->channels[chan]->ce_count += count; |
| 913 | } else { |
| 914 | mci->csrows[row]->ue_count += count; |
| 915 | } |
| 916 | } |
| 917 | |
| 918 | void edac_raw_mc_handle_error(struct edac_raw_error_desc *e) |
| 919 | { |
| 920 | struct mem_ctl_info *mci = error_desc_to_mci(e); |
| 921 | u8 grain_bits; |
| 922 | |
| 923 | /* Sanity-check driver-supplied grain value. */ |
| 924 | if (WARN_ON_ONCE(!e->grain)) |
| 925 | e->grain = 1; |
| 926 | |
| 927 | grain_bits = fls_long(l: e->grain - 1); |
| 928 | |
| 929 | /* Report the error via the trace interface */ |
| 930 | if (IS_ENABLED(CONFIG_RAS)) |
| 931 | trace_mc_event(err_type: e->type, error_msg: e->msg, label: e->label, error_count: e->error_count, |
| 932 | mc_index: mci->mc_idx, top_layer: e->top_layer, mid_layer: e->mid_layer, |
| 933 | low_layer: e->low_layer, |
| 934 | address: (e->page_frame_number << PAGE_SHIFT) | e->offset_in_page, |
| 935 | grain_bits, syndrome: e->syndrome, driver_detail: e->other_detail); |
| 936 | |
| 937 | if (e->type == HW_EVENT_ERR_CORRECTED) |
| 938 | edac_ce_error(e); |
| 939 | else |
| 940 | edac_ue_error(e); |
| 941 | } |
| 942 | EXPORT_SYMBOL_GPL(edac_raw_mc_handle_error); |
| 943 | |
| 944 | void edac_mc_handle_error(const enum hw_event_mc_err_type type, |
| 945 | struct mem_ctl_info *mci, |
| 946 | const u16 error_count, |
| 947 | const unsigned long page_frame_number, |
| 948 | const unsigned long offset_in_page, |
| 949 | const unsigned long syndrome, |
| 950 | const int top_layer, |
| 951 | const int mid_layer, |
| 952 | const int low_layer, |
| 953 | const char *msg, |
| 954 | const char *other_detail) |
| 955 | { |
| 956 | struct dimm_info *dimm; |
| 957 | char *p, *end; |
| 958 | int row = -1, chan = -1; |
| 959 | int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer }; |
| 960 | int i, n_labels = 0; |
| 961 | struct edac_raw_error_desc *e = &mci->error_desc; |
| 962 | bool any_memory = true; |
| 963 | const char *prefix; |
| 964 | |
| 965 | edac_dbg(3, "MC%d\n" , mci->mc_idx); |
| 966 | |
| 967 | /* Fills the error report buffer */ |
| 968 | memset(e, 0, sizeof (*e)); |
| 969 | e->error_count = error_count; |
| 970 | e->type = type; |
| 971 | e->top_layer = top_layer; |
| 972 | e->mid_layer = mid_layer; |
| 973 | e->low_layer = low_layer; |
| 974 | e->page_frame_number = page_frame_number; |
| 975 | e->offset_in_page = offset_in_page; |
| 976 | e->syndrome = syndrome; |
| 977 | /* need valid strings here for both: */ |
| 978 | e->msg = msg ?: "" ; |
| 979 | e->other_detail = other_detail ?: "" ; |
| 980 | |
| 981 | /* |
| 982 | * Check if the event report is consistent and if the memory location is |
| 983 | * known. If it is, the DIMM(s) label info will be filled and the DIMM's |
| 984 | * error counters will be incremented. |
| 985 | */ |
| 986 | for (i = 0; i < mci->n_layers; i++) { |
| 987 | if (pos[i] >= (int)mci->layers[i].size) { |
| 988 | |
| 989 | edac_mc_printk(mci, KERN_ERR, |
| 990 | "INTERNAL ERROR: %s value is out of range (%d >= %d)\n" , |
| 991 | edac_layer_name[mci->layers[i].type], |
| 992 | pos[i], mci->layers[i].size); |
| 993 | /* |
| 994 | * Instead of just returning it, let's use what's |
| 995 | * known about the error. The increment routines and |
| 996 | * the DIMM filter logic will do the right thing by |
| 997 | * pointing the likely damaged DIMMs. |
| 998 | */ |
| 999 | pos[i] = -1; |
| 1000 | } |
| 1001 | if (pos[i] >= 0) |
| 1002 | any_memory = false; |
| 1003 | } |
| 1004 | |
| 1005 | /* |
| 1006 | * Get the dimm label/grain that applies to the match criteria. |
| 1007 | * As the error algorithm may not be able to point to just one memory |
| 1008 | * stick, the logic here will get all possible labels that could |
| 1009 | * pottentially be affected by the error. |
| 1010 | * On FB-DIMM memory controllers, for uncorrected errors, it is common |
| 1011 | * to have only the MC channel and the MC dimm (also called "branch") |
| 1012 | * but the channel is not known, as the memory is arranged in pairs, |
| 1013 | * where each memory belongs to a separate channel within the same |
| 1014 | * branch. |
| 1015 | */ |
| 1016 | p = e->label; |
| 1017 | *p = '\0'; |
| 1018 | end = p + sizeof(e->label); |
| 1019 | prefix = "" ; |
| 1020 | |
| 1021 | mci_for_each_dimm(mci, dimm) { |
| 1022 | if (top_layer >= 0 && top_layer != dimm->location[0]) |
| 1023 | continue; |
| 1024 | if (mid_layer >= 0 && mid_layer != dimm->location[1]) |
| 1025 | continue; |
| 1026 | if (low_layer >= 0 && low_layer != dimm->location[2]) |
| 1027 | continue; |
| 1028 | |
| 1029 | /* get the max grain, over the error match range */ |
| 1030 | if (dimm->grain > e->grain) |
| 1031 | e->grain = dimm->grain; |
| 1032 | |
| 1033 | /* |
| 1034 | * If the error is memory-controller wide, there's no need to |
| 1035 | * seek for the affected DIMMs because the whole channel/memory |
| 1036 | * controller/... may be affected. Also, don't show errors for |
| 1037 | * empty DIMM slots. |
| 1038 | */ |
| 1039 | if (!dimm->nr_pages) |
| 1040 | continue; |
| 1041 | |
| 1042 | n_labels++; |
| 1043 | if (n_labels > EDAC_MAX_LABELS) { |
| 1044 | p = e->label; |
| 1045 | *p = '\0'; |
| 1046 | } else { |
| 1047 | p += scnprintf(buf: p, size: end - p, fmt: "%s%s" , prefix, dimm->label); |
| 1048 | prefix = OTHER_LABEL; |
| 1049 | } |
| 1050 | |
| 1051 | /* |
| 1052 | * get csrow/channel of the DIMM, in order to allow |
| 1053 | * incrementing the compat API counters |
| 1054 | */ |
| 1055 | edac_dbg(4, "%s csrows map: (%d,%d)\n" , |
| 1056 | mci->csbased ? "rank" : "dimm" , |
| 1057 | dimm->csrow, dimm->cschannel); |
| 1058 | if (row == -1) |
| 1059 | row = dimm->csrow; |
| 1060 | else if (row >= 0 && row != dimm->csrow) |
| 1061 | row = -2; |
| 1062 | |
| 1063 | if (chan == -1) |
| 1064 | chan = dimm->cschannel; |
| 1065 | else if (chan >= 0 && chan != dimm->cschannel) |
| 1066 | chan = -2; |
| 1067 | } |
| 1068 | |
| 1069 | if (any_memory) |
| 1070 | strscpy(e->label, "any memory" , sizeof(e->label)); |
| 1071 | else if (!*e->label) |
| 1072 | strscpy(e->label, "unknown memory" , sizeof(e->label)); |
| 1073 | |
| 1074 | edac_inc_csrow(e, row, chan); |
| 1075 | |
| 1076 | /* Fill the RAM location data */ |
| 1077 | p = e->location; |
| 1078 | end = p + sizeof(e->location); |
| 1079 | prefix = "" ; |
| 1080 | |
| 1081 | for (i = 0; i < mci->n_layers; i++) { |
| 1082 | if (pos[i] < 0) |
| 1083 | continue; |
| 1084 | |
| 1085 | p += scnprintf(buf: p, size: end - p, fmt: "%s%s:%d" , prefix, |
| 1086 | edac_layer_name[mci->layers[i].type], pos[i]); |
| 1087 | prefix = " " ; |
| 1088 | } |
| 1089 | |
| 1090 | edac_raw_mc_handle_error(e); |
| 1091 | } |
| 1092 | EXPORT_SYMBOL_GPL(edac_mc_handle_error); |
| 1093 | |