1/*
2 * Created: Fri Jan 19 10:48:35 2001 by faith@acm.org
3 *
4 * Copyright 2001 VA Linux Systems, Inc., Sunnyvale, California.
5 * All Rights Reserved.
6 *
7 * Author Rickard E. (Rik) Faith <faith@valinux.com>
8 *
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
15 *
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
18 * Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
26 * DEALINGS IN THE SOFTWARE.
27 */
28
29#include <linux/debugfs.h>
30#include <linux/fs.h>
31#include <linux/module.h>
32#include <linux/moduleparam.h>
33#include <linux/mount.h>
34#include <linux/pseudo_fs.h>
35#include <linux/slab.h>
36#include <linux/srcu.h>
37
38#include <drm/drm_accel.h>
39#include <drm/drm_cache.h>
40#include <drm/drm_client.h>
41#include <drm/drm_color_mgmt.h>
42#include <drm/drm_drv.h>
43#include <drm/drm_file.h>
44#include <drm/drm_managed.h>
45#include <drm/drm_mode_object.h>
46#include <drm/drm_print.h>
47#include <drm/drm_privacy_screen_machine.h>
48
49#include "drm_crtc_internal.h"
50#include "drm_internal.h"
51#include "drm_legacy.h"
52
53MODULE_AUTHOR("Gareth Hughes, Leif Delgass, José Fonseca, Jon Smirl");
54MODULE_DESCRIPTION("DRM shared core routines");
55MODULE_LICENSE("GPL and additional rights");
56
57static DEFINE_SPINLOCK(drm_minor_lock);
58static struct idr drm_minors_idr;
59
60/*
61 * If the drm core fails to init for whatever reason,
62 * we should prevent any drivers from registering with it.
63 * It's best to check this at drm_dev_init(), as some drivers
64 * prefer to embed struct drm_device into their own device
65 * structure and call drm_dev_init() themselves.
66 */
67static bool drm_core_init_complete;
68
69static struct dentry *drm_debugfs_root;
70
71DEFINE_STATIC_SRCU(drm_unplug_srcu);
72
73/*
74 * DRM Minors
75 * A DRM device can provide several char-dev interfaces on the DRM-Major. Each
76 * of them is represented by a drm_minor object. Depending on the capabilities
77 * of the device-driver, different interfaces are registered.
78 *
79 * Minors can be accessed via dev->$minor_name. This pointer is either
80 * NULL or a valid drm_minor pointer and stays valid as long as the device is
81 * valid. This means, DRM minors have the same life-time as the underlying
82 * device. However, this doesn't mean that the minor is active. Minors are
83 * registered and unregistered dynamically according to device-state.
84 */
85
86static struct drm_minor **drm_minor_get_slot(struct drm_device *dev,
87 enum drm_minor_type type)
88{
89 switch (type) {
90 case DRM_MINOR_PRIMARY:
91 return &dev->primary;
92 case DRM_MINOR_RENDER:
93 return &dev->render;
94 case DRM_MINOR_ACCEL:
95 return &dev->accel;
96 default:
97 BUG();
98 }
99}
100
101static void drm_minor_alloc_release(struct drm_device *dev, void *data)
102{
103 struct drm_minor *minor = data;
104 unsigned long flags;
105
106 WARN_ON(dev != minor->dev);
107
108 put_device(dev: minor->kdev);
109
110 if (minor->type == DRM_MINOR_ACCEL) {
111 accel_minor_remove(index: minor->index);
112 } else {
113 spin_lock_irqsave(&drm_minor_lock, flags);
114 idr_remove(&drm_minors_idr, id: minor->index);
115 spin_unlock_irqrestore(lock: &drm_minor_lock, flags);
116 }
117}
118
119static int drm_minor_alloc(struct drm_device *dev, enum drm_minor_type type)
120{
121 struct drm_minor *minor;
122 unsigned long flags;
123 int r;
124
125 minor = drmm_kzalloc(dev, size: sizeof(*minor), GFP_KERNEL);
126 if (!minor)
127 return -ENOMEM;
128
129 minor->type = type;
130 minor->dev = dev;
131
132 idr_preload(GFP_KERNEL);
133 if (type == DRM_MINOR_ACCEL) {
134 r = accel_minor_alloc();
135 } else {
136 spin_lock_irqsave(&drm_minor_lock, flags);
137 r = idr_alloc(&drm_minors_idr,
138 NULL,
139 start: 64 * type,
140 end: 64 * (type + 1),
141 GFP_NOWAIT);
142 spin_unlock_irqrestore(lock: &drm_minor_lock, flags);
143 }
144 idr_preload_end();
145
146 if (r < 0)
147 return r;
148
149 minor->index = r;
150
151 r = drmm_add_action_or_reset(dev, drm_minor_alloc_release, minor);
152 if (r)
153 return r;
154
155 minor->kdev = drm_sysfs_minor_alloc(minor);
156 if (IS_ERR(ptr: minor->kdev))
157 return PTR_ERR(ptr: minor->kdev);
158
159 *drm_minor_get_slot(dev, type) = minor;
160 return 0;
161}
162
163static int drm_minor_register(struct drm_device *dev, enum drm_minor_type type)
164{
165 struct drm_minor *minor;
166 unsigned long flags;
167 int ret;
168
169 DRM_DEBUG("\n");
170
171 minor = *drm_minor_get_slot(dev, type);
172 if (!minor)
173 return 0;
174
175 if (minor->type != DRM_MINOR_ACCEL) {
176 ret = drm_debugfs_register(minor, minor_id: minor->index,
177 root: drm_debugfs_root);
178 if (ret) {
179 DRM_ERROR("DRM: Failed to initialize /sys/kernel/debug/dri.\n");
180 goto err_debugfs;
181 }
182 }
183
184 ret = device_add(dev: minor->kdev);
185 if (ret)
186 goto err_debugfs;
187
188 /* replace NULL with @minor so lookups will succeed from now on */
189 if (minor->type == DRM_MINOR_ACCEL) {
190 accel_minor_replace(minor, index: minor->index);
191 } else {
192 spin_lock_irqsave(&drm_minor_lock, flags);
193 idr_replace(&drm_minors_idr, minor, id: minor->index);
194 spin_unlock_irqrestore(lock: &drm_minor_lock, flags);
195 }
196
197 DRM_DEBUG("new minor registered %d\n", minor->index);
198 return 0;
199
200err_debugfs:
201 drm_debugfs_unregister(minor);
202 return ret;
203}
204
205static void drm_minor_unregister(struct drm_device *dev, enum drm_minor_type type)
206{
207 struct drm_minor *minor;
208 unsigned long flags;
209
210 minor = *drm_minor_get_slot(dev, type);
211 if (!minor || !device_is_registered(dev: minor->kdev))
212 return;
213
214 /* replace @minor with NULL so lookups will fail from now on */
215 if (minor->type == DRM_MINOR_ACCEL) {
216 accel_minor_replace(NULL, index: minor->index);
217 } else {
218 spin_lock_irqsave(&drm_minor_lock, flags);
219 idr_replace(&drm_minors_idr, NULL, id: minor->index);
220 spin_unlock_irqrestore(lock: &drm_minor_lock, flags);
221 }
222
223 device_del(dev: minor->kdev);
224 dev_set_drvdata(dev: minor->kdev, NULL); /* safety belt */
225 drm_debugfs_unregister(minor);
226}
227
228/*
229 * Looks up the given minor-ID and returns the respective DRM-minor object. The
230 * refence-count of the underlying device is increased so you must release this
231 * object with drm_minor_release().
232 *
233 * As long as you hold this minor, it is guaranteed that the object and the
234 * minor->dev pointer will stay valid! However, the device may get unplugged and
235 * unregistered while you hold the minor.
236 */
237struct drm_minor *drm_minor_acquire(unsigned int minor_id)
238{
239 struct drm_minor *minor;
240 unsigned long flags;
241
242 spin_lock_irqsave(&drm_minor_lock, flags);
243 minor = idr_find(&drm_minors_idr, id: minor_id);
244 if (minor)
245 drm_dev_get(dev: minor->dev);
246 spin_unlock_irqrestore(lock: &drm_minor_lock, flags);
247
248 if (!minor) {
249 return ERR_PTR(error: -ENODEV);
250 } else if (drm_dev_is_unplugged(dev: minor->dev)) {
251 drm_dev_put(dev: minor->dev);
252 return ERR_PTR(error: -ENODEV);
253 }
254
255 return minor;
256}
257
258void drm_minor_release(struct drm_minor *minor)
259{
260 drm_dev_put(dev: minor->dev);
261}
262
263/**
264 * DOC: driver instance overview
265 *
266 * A device instance for a drm driver is represented by &struct drm_device. This
267 * is allocated and initialized with devm_drm_dev_alloc(), usually from
268 * bus-specific ->probe() callbacks implemented by the driver. The driver then
269 * needs to initialize all the various subsystems for the drm device like memory
270 * management, vblank handling, modesetting support and initial output
271 * configuration plus obviously initialize all the corresponding hardware bits.
272 * Finally when everything is up and running and ready for userspace the device
273 * instance can be published using drm_dev_register().
274 *
275 * There is also deprecated support for initializing device instances using
276 * bus-specific helpers and the &drm_driver.load callback. But due to
277 * backwards-compatibility needs the device instance have to be published too
278 * early, which requires unpretty global locking to make safe and is therefore
279 * only support for existing drivers not yet converted to the new scheme.
280 *
281 * When cleaning up a device instance everything needs to be done in reverse:
282 * First unpublish the device instance with drm_dev_unregister(). Then clean up
283 * any other resources allocated at device initialization and drop the driver's
284 * reference to &drm_device using drm_dev_put().
285 *
286 * Note that any allocation or resource which is visible to userspace must be
287 * released only when the final drm_dev_put() is called, and not when the
288 * driver is unbound from the underlying physical struct &device. Best to use
289 * &drm_device managed resources with drmm_add_action(), drmm_kmalloc() and
290 * related functions.
291 *
292 * devres managed resources like devm_kmalloc() can only be used for resources
293 * directly related to the underlying hardware device, and only used in code
294 * paths fully protected by drm_dev_enter() and drm_dev_exit().
295 *
296 * Display driver example
297 * ~~~~~~~~~~~~~~~~~~~~~~
298 *
299 * The following example shows a typical structure of a DRM display driver.
300 * The example focus on the probe() function and the other functions that is
301 * almost always present and serves as a demonstration of devm_drm_dev_alloc().
302 *
303 * .. code-block:: c
304 *
305 * struct driver_device {
306 * struct drm_device drm;
307 * void *userspace_facing;
308 * struct clk *pclk;
309 * };
310 *
311 * static const struct drm_driver driver_drm_driver = {
312 * [...]
313 * };
314 *
315 * static int driver_probe(struct platform_device *pdev)
316 * {
317 * struct driver_device *priv;
318 * struct drm_device *drm;
319 * int ret;
320 *
321 * priv = devm_drm_dev_alloc(&pdev->dev, &driver_drm_driver,
322 * struct driver_device, drm);
323 * if (IS_ERR(priv))
324 * return PTR_ERR(priv);
325 * drm = &priv->drm;
326 *
327 * ret = drmm_mode_config_init(drm);
328 * if (ret)
329 * return ret;
330 *
331 * priv->userspace_facing = drmm_kzalloc(..., GFP_KERNEL);
332 * if (!priv->userspace_facing)
333 * return -ENOMEM;
334 *
335 * priv->pclk = devm_clk_get(dev, "PCLK");
336 * if (IS_ERR(priv->pclk))
337 * return PTR_ERR(priv->pclk);
338 *
339 * // Further setup, display pipeline etc
340 *
341 * platform_set_drvdata(pdev, drm);
342 *
343 * drm_mode_config_reset(drm);
344 *
345 * ret = drm_dev_register(drm);
346 * if (ret)
347 * return ret;
348 *
349 * drm_fbdev_generic_setup(drm, 32);
350 *
351 * return 0;
352 * }
353 *
354 * // This function is called before the devm_ resources are released
355 * static int driver_remove(struct platform_device *pdev)
356 * {
357 * struct drm_device *drm = platform_get_drvdata(pdev);
358 *
359 * drm_dev_unregister(drm);
360 * drm_atomic_helper_shutdown(drm)
361 *
362 * return 0;
363 * }
364 *
365 * // This function is called on kernel restart and shutdown
366 * static void driver_shutdown(struct platform_device *pdev)
367 * {
368 * drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
369 * }
370 *
371 * static int __maybe_unused driver_pm_suspend(struct device *dev)
372 * {
373 * return drm_mode_config_helper_suspend(dev_get_drvdata(dev));
374 * }
375 *
376 * static int __maybe_unused driver_pm_resume(struct device *dev)
377 * {
378 * drm_mode_config_helper_resume(dev_get_drvdata(dev));
379 *
380 * return 0;
381 * }
382 *
383 * static const struct dev_pm_ops driver_pm_ops = {
384 * SET_SYSTEM_SLEEP_PM_OPS(driver_pm_suspend, driver_pm_resume)
385 * };
386 *
387 * static struct platform_driver driver_driver = {
388 * .driver = {
389 * [...]
390 * .pm = &driver_pm_ops,
391 * },
392 * .probe = driver_probe,
393 * .remove = driver_remove,
394 * .shutdown = driver_shutdown,
395 * };
396 * module_platform_driver(driver_driver);
397 *
398 * Drivers that want to support device unplugging (USB, DT overlay unload) should
399 * use drm_dev_unplug() instead of drm_dev_unregister(). The driver must protect
400 * regions that is accessing device resources to prevent use after they're
401 * released. This is done using drm_dev_enter() and drm_dev_exit(). There is one
402 * shortcoming however, drm_dev_unplug() marks the drm_device as unplugged before
403 * drm_atomic_helper_shutdown() is called. This means that if the disable code
404 * paths are protected, they will not run on regular driver module unload,
405 * possibly leaving the hardware enabled.
406 */
407
408/**
409 * drm_put_dev - Unregister and release a DRM device
410 * @dev: DRM device
411 *
412 * Called at module unload time or when a PCI device is unplugged.
413 *
414 * Cleans up all DRM device, calling drm_lastclose().
415 *
416 * Note: Use of this function is deprecated. It will eventually go away
417 * completely. Please use drm_dev_unregister() and drm_dev_put() explicitly
418 * instead to make sure that the device isn't userspace accessible any more
419 * while teardown is in progress, ensuring that userspace can't access an
420 * inconsistent state.
421 */
422void drm_put_dev(struct drm_device *dev)
423{
424 DRM_DEBUG("\n");
425
426 if (!dev) {
427 DRM_ERROR("cleanup called no dev\n");
428 return;
429 }
430
431 drm_dev_unregister(dev);
432 drm_dev_put(dev);
433}
434EXPORT_SYMBOL(drm_put_dev);
435
436/**
437 * drm_dev_enter - Enter device critical section
438 * @dev: DRM device
439 * @idx: Pointer to index that will be passed to the matching drm_dev_exit()
440 *
441 * This function marks and protects the beginning of a section that should not
442 * be entered after the device has been unplugged. The section end is marked
443 * with drm_dev_exit(). Calls to this function can be nested.
444 *
445 * Returns:
446 * True if it is OK to enter the section, false otherwise.
447 */
448bool drm_dev_enter(struct drm_device *dev, int *idx)
449{
450 *idx = srcu_read_lock(ssp: &drm_unplug_srcu);
451
452 if (dev->unplugged) {
453 srcu_read_unlock(ssp: &drm_unplug_srcu, idx: *idx);
454 return false;
455 }
456
457 return true;
458}
459EXPORT_SYMBOL(drm_dev_enter);
460
461/**
462 * drm_dev_exit - Exit device critical section
463 * @idx: index returned from drm_dev_enter()
464 *
465 * This function marks the end of a section that should not be entered after
466 * the device has been unplugged.
467 */
468void drm_dev_exit(int idx)
469{
470 srcu_read_unlock(ssp: &drm_unplug_srcu, idx);
471}
472EXPORT_SYMBOL(drm_dev_exit);
473
474/**
475 * drm_dev_unplug - unplug a DRM device
476 * @dev: DRM device
477 *
478 * This unplugs a hotpluggable DRM device, which makes it inaccessible to
479 * userspace operations. Entry-points can use drm_dev_enter() and
480 * drm_dev_exit() to protect device resources in a race free manner. This
481 * essentially unregisters the device like drm_dev_unregister(), but can be
482 * called while there are still open users of @dev.
483 */
484void drm_dev_unplug(struct drm_device *dev)
485{
486 /*
487 * After synchronizing any critical read section is guaranteed to see
488 * the new value of ->unplugged, and any critical section which might
489 * still have seen the old value of ->unplugged is guaranteed to have
490 * finished.
491 */
492 dev->unplugged = true;
493 synchronize_srcu(ssp: &drm_unplug_srcu);
494
495 drm_dev_unregister(dev);
496
497 /* Clear all CPU mappings pointing to this device */
498 unmap_mapping_range(mapping: dev->anon_inode->i_mapping, holebegin: 0, holelen: 0, even_cows: 1);
499}
500EXPORT_SYMBOL(drm_dev_unplug);
501
502/*
503 * DRM internal mount
504 * We want to be able to allocate our own "struct address_space" to control
505 * memory-mappings in VRAM (or stolen RAM, ...). However, core MM does not allow
506 * stand-alone address_space objects, so we need an underlying inode. As there
507 * is no way to allocate an independent inode easily, we need a fake internal
508 * VFS mount-point.
509 *
510 * The drm_fs_inode_new() function allocates a new inode, drm_fs_inode_free()
511 * frees it again. You are allowed to use iget() and iput() to get references to
512 * the inode. But each drm_fs_inode_new() call must be paired with exactly one
513 * drm_fs_inode_free() call (which does not have to be the last iput()).
514 * We use drm_fs_inode_*() to manage our internal VFS mount-point and share it
515 * between multiple inode-users. You could, technically, call
516 * iget() + drm_fs_inode_free() directly after alloc and sometime later do an
517 * iput(), but this way you'd end up with a new vfsmount for each inode.
518 */
519
520static int drm_fs_cnt;
521static struct vfsmount *drm_fs_mnt;
522
523static int drm_fs_init_fs_context(struct fs_context *fc)
524{
525 return init_pseudo(fc, magic: 0x010203ff) ? 0 : -ENOMEM;
526}
527
528static struct file_system_type drm_fs_type = {
529 .name = "drm",
530 .owner = THIS_MODULE,
531 .init_fs_context = drm_fs_init_fs_context,
532 .kill_sb = kill_anon_super,
533};
534
535static struct inode *drm_fs_inode_new(void)
536{
537 struct inode *inode;
538 int r;
539
540 r = simple_pin_fs(&drm_fs_type, mount: &drm_fs_mnt, count: &drm_fs_cnt);
541 if (r < 0) {
542 DRM_ERROR("Cannot mount pseudo fs: %d\n", r);
543 return ERR_PTR(error: r);
544 }
545
546 inode = alloc_anon_inode(drm_fs_mnt->mnt_sb);
547 if (IS_ERR(ptr: inode))
548 simple_release_fs(mount: &drm_fs_mnt, count: &drm_fs_cnt);
549
550 return inode;
551}
552
553static void drm_fs_inode_free(struct inode *inode)
554{
555 if (inode) {
556 iput(inode);
557 simple_release_fs(mount: &drm_fs_mnt, count: &drm_fs_cnt);
558 }
559}
560
561/**
562 * DOC: component helper usage recommendations
563 *
564 * DRM drivers that drive hardware where a logical device consists of a pile of
565 * independent hardware blocks are recommended to use the :ref:`component helper
566 * library<component>`. For consistency and better options for code reuse the
567 * following guidelines apply:
568 *
569 * - The entire device initialization procedure should be run from the
570 * &component_master_ops.master_bind callback, starting with
571 * devm_drm_dev_alloc(), then binding all components with
572 * component_bind_all() and finishing with drm_dev_register().
573 *
574 * - The opaque pointer passed to all components through component_bind_all()
575 * should point at &struct drm_device of the device instance, not some driver
576 * specific private structure.
577 *
578 * - The component helper fills the niche where further standardization of
579 * interfaces is not practical. When there already is, or will be, a
580 * standardized interface like &drm_bridge or &drm_panel, providing its own
581 * functions to find such components at driver load time, like
582 * drm_of_find_panel_or_bridge(), then the component helper should not be
583 * used.
584 */
585
586static void drm_dev_init_release(struct drm_device *dev, void *res)
587{
588 drm_legacy_ctxbitmap_cleanup(dev);
589 drm_legacy_remove_map_hash(dev);
590 drm_fs_inode_free(inode: dev->anon_inode);
591
592 put_device(dev: dev->dev);
593 /* Prevent use-after-free in drm_managed_release when debugging is
594 * enabled. Slightly awkward, but can't really be helped. */
595 dev->dev = NULL;
596 mutex_destroy(lock: &dev->master_mutex);
597 mutex_destroy(lock: &dev->clientlist_mutex);
598 mutex_destroy(lock: &dev->filelist_mutex);
599 mutex_destroy(lock: &dev->struct_mutex);
600 drm_legacy_destroy_members(dev);
601}
602
603static int drm_dev_init(struct drm_device *dev,
604 const struct drm_driver *driver,
605 struct device *parent)
606{
607 struct inode *inode;
608 int ret;
609
610 if (!drm_core_init_complete) {
611 DRM_ERROR("DRM core is not initialized\n");
612 return -ENODEV;
613 }
614
615 if (WARN_ON(!parent))
616 return -EINVAL;
617
618 kref_init(kref: &dev->ref);
619 dev->dev = get_device(dev: parent);
620 dev->driver = driver;
621
622 INIT_LIST_HEAD(list: &dev->managed.resources);
623 spin_lock_init(&dev->managed.lock);
624
625 /* no per-device feature limits by default */
626 dev->driver_features = ~0u;
627
628 if (drm_core_check_feature(dev, feature: DRIVER_COMPUTE_ACCEL) &&
629 (drm_core_check_feature(dev, feature: DRIVER_RENDER) ||
630 drm_core_check_feature(dev, feature: DRIVER_MODESET))) {
631 DRM_ERROR("DRM driver can't be both a compute acceleration and graphics driver\n");
632 return -EINVAL;
633 }
634
635 drm_legacy_init_members(dev);
636 INIT_LIST_HEAD(list: &dev->filelist);
637 INIT_LIST_HEAD(list: &dev->filelist_internal);
638 INIT_LIST_HEAD(list: &dev->clientlist);
639 INIT_LIST_HEAD(list: &dev->vblank_event_list);
640
641 spin_lock_init(&dev->event_lock);
642 mutex_init(&dev->struct_mutex);
643 mutex_init(&dev->filelist_mutex);
644 mutex_init(&dev->clientlist_mutex);
645 mutex_init(&dev->master_mutex);
646
647 ret = drmm_add_action_or_reset(dev, drm_dev_init_release, NULL);
648 if (ret)
649 return ret;
650
651 inode = drm_fs_inode_new();
652 if (IS_ERR(ptr: inode)) {
653 ret = PTR_ERR(ptr: inode);
654 DRM_ERROR("Cannot allocate anonymous inode: %d\n", ret);
655 goto err;
656 }
657
658 dev->anon_inode = inode;
659
660 if (drm_core_check_feature(dev, feature: DRIVER_COMPUTE_ACCEL)) {
661 ret = drm_minor_alloc(dev, type: DRM_MINOR_ACCEL);
662 if (ret)
663 goto err;
664 } else {
665 if (drm_core_check_feature(dev, feature: DRIVER_RENDER)) {
666 ret = drm_minor_alloc(dev, type: DRM_MINOR_RENDER);
667 if (ret)
668 goto err;
669 }
670
671 ret = drm_minor_alloc(dev, type: DRM_MINOR_PRIMARY);
672 if (ret)
673 goto err;
674 }
675
676 ret = drm_legacy_create_map_hash(dev);
677 if (ret)
678 goto err;
679
680 drm_legacy_ctxbitmap_init(dev);
681
682 if (drm_core_check_feature(dev, feature: DRIVER_GEM)) {
683 ret = drm_gem_init(dev);
684 if (ret) {
685 DRM_ERROR("Cannot initialize graphics execution manager (GEM)\n");
686 goto err;
687 }
688 }
689
690 dev->unique = drmm_kstrdup(dev, s: dev_name(dev: parent), GFP_KERNEL);
691 if (!dev->unique) {
692 ret = -ENOMEM;
693 goto err;
694 }
695
696 if (drm_core_check_feature(dev, feature: DRIVER_COMPUTE_ACCEL))
697 accel_debugfs_init(dev);
698 else
699 drm_debugfs_dev_init(dev, root: drm_debugfs_root);
700
701 return 0;
702
703err:
704 drm_managed_release(dev);
705
706 return ret;
707}
708
709static void devm_drm_dev_init_release(void *data)
710{
711 drm_dev_put(dev: data);
712}
713
714static int devm_drm_dev_init(struct device *parent,
715 struct drm_device *dev,
716 const struct drm_driver *driver)
717{
718 int ret;
719
720 ret = drm_dev_init(dev, driver, parent);
721 if (ret)
722 return ret;
723
724 return devm_add_action_or_reset(parent,
725 devm_drm_dev_init_release, dev);
726}
727
728void *__devm_drm_dev_alloc(struct device *parent,
729 const struct drm_driver *driver,
730 size_t size, size_t offset)
731{
732 void *container;
733 struct drm_device *drm;
734 int ret;
735
736 container = kzalloc(size, GFP_KERNEL);
737 if (!container)
738 return ERR_PTR(error: -ENOMEM);
739
740 drm = container + offset;
741 ret = devm_drm_dev_init(parent, dev: drm, driver);
742 if (ret) {
743 kfree(objp: container);
744 return ERR_PTR(error: ret);
745 }
746 drmm_add_final_kfree(dev: drm, container);
747
748 return container;
749}
750EXPORT_SYMBOL(__devm_drm_dev_alloc);
751
752/**
753 * drm_dev_alloc - Allocate new DRM device
754 * @driver: DRM driver to allocate device for
755 * @parent: Parent device object
756 *
757 * This is the deprecated version of devm_drm_dev_alloc(), which does not support
758 * subclassing through embedding the struct &drm_device in a driver private
759 * structure, and which does not support automatic cleanup through devres.
760 *
761 * RETURNS:
762 * Pointer to new DRM device, or ERR_PTR on failure.
763 */
764struct drm_device *drm_dev_alloc(const struct drm_driver *driver,
765 struct device *parent)
766{
767 struct drm_device *dev;
768 int ret;
769
770 dev = kzalloc(size: sizeof(*dev), GFP_KERNEL);
771 if (!dev)
772 return ERR_PTR(error: -ENOMEM);
773
774 ret = drm_dev_init(dev, driver, parent);
775 if (ret) {
776 kfree(objp: dev);
777 return ERR_PTR(error: ret);
778 }
779
780 drmm_add_final_kfree(dev, container: dev);
781
782 return dev;
783}
784EXPORT_SYMBOL(drm_dev_alloc);
785
786static void drm_dev_release(struct kref *ref)
787{
788 struct drm_device *dev = container_of(ref, struct drm_device, ref);
789
790 /* Just in case register/unregister was never called */
791 drm_debugfs_dev_fini(dev);
792
793 if (dev->driver->release)
794 dev->driver->release(dev);
795
796 drm_managed_release(dev);
797
798 kfree(objp: dev->managed.final_kfree);
799}
800
801/**
802 * drm_dev_get - Take reference of a DRM device
803 * @dev: device to take reference of or NULL
804 *
805 * This increases the ref-count of @dev by one. You *must* already own a
806 * reference when calling this. Use drm_dev_put() to drop this reference
807 * again.
808 *
809 * This function never fails. However, this function does not provide *any*
810 * guarantee whether the device is alive or running. It only provides a
811 * reference to the object and the memory associated with it.
812 */
813void drm_dev_get(struct drm_device *dev)
814{
815 if (dev)
816 kref_get(kref: &dev->ref);
817}
818EXPORT_SYMBOL(drm_dev_get);
819
820/**
821 * drm_dev_put - Drop reference of a DRM device
822 * @dev: device to drop reference of or NULL
823 *
824 * This decreases the ref-count of @dev by one. The device is destroyed if the
825 * ref-count drops to zero.
826 */
827void drm_dev_put(struct drm_device *dev)
828{
829 if (dev)
830 kref_put(kref: &dev->ref, release: drm_dev_release);
831}
832EXPORT_SYMBOL(drm_dev_put);
833
834static int create_compat_control_link(struct drm_device *dev)
835{
836 struct drm_minor *minor;
837 char *name;
838 int ret;
839
840 if (!drm_core_check_feature(dev, feature: DRIVER_MODESET))
841 return 0;
842
843 minor = *drm_minor_get_slot(dev, type: DRM_MINOR_PRIMARY);
844 if (!minor)
845 return 0;
846
847 /*
848 * Some existing userspace out there uses the existing of the controlD*
849 * sysfs files to figure out whether it's a modeset driver. It only does
850 * readdir, hence a symlink is sufficient (and the least confusing
851 * option). Otherwise controlD* is entirely unused.
852 *
853 * Old controlD chardev have been allocated in the range
854 * 64-127.
855 */
856 name = kasprintf(GFP_KERNEL, fmt: "controlD%d", minor->index + 64);
857 if (!name)
858 return -ENOMEM;
859
860 ret = sysfs_create_link(kobj: minor->kdev->kobj.parent,
861 target: &minor->kdev->kobj,
862 name);
863
864 kfree(objp: name);
865
866 return ret;
867}
868
869static void remove_compat_control_link(struct drm_device *dev)
870{
871 struct drm_minor *minor;
872 char *name;
873
874 if (!drm_core_check_feature(dev, feature: DRIVER_MODESET))
875 return;
876
877 minor = *drm_minor_get_slot(dev, type: DRM_MINOR_PRIMARY);
878 if (!minor)
879 return;
880
881 name = kasprintf(GFP_KERNEL, fmt: "controlD%d", minor->index + 64);
882 if (!name)
883 return;
884
885 sysfs_remove_link(kobj: minor->kdev->kobj.parent, name);
886
887 kfree(objp: name);
888}
889
890/**
891 * drm_dev_register - Register DRM device
892 * @dev: Device to register
893 * @flags: Flags passed to the driver's .load() function
894 *
895 * Register the DRM device @dev with the system, advertise device to user-space
896 * and start normal device operation. @dev must be initialized via drm_dev_init()
897 * previously.
898 *
899 * Never call this twice on any device!
900 *
901 * NOTE: To ensure backward compatibility with existing drivers method this
902 * function calls the &drm_driver.load method after registering the device
903 * nodes, creating race conditions. Usage of the &drm_driver.load methods is
904 * therefore deprecated, drivers must perform all initialization before calling
905 * drm_dev_register().
906 *
907 * RETURNS:
908 * 0 on success, negative error code on failure.
909 */
910int drm_dev_register(struct drm_device *dev, unsigned long flags)
911{
912 const struct drm_driver *driver = dev->driver;
913 int ret;
914
915 if (!driver->load)
916 drm_mode_config_validate(dev);
917
918 WARN_ON(!dev->managed.final_kfree);
919
920 if (drm_dev_needs_global_mutex(dev))
921 mutex_lock(&drm_global_mutex);
922
923 if (drm_core_check_feature(dev, feature: DRIVER_COMPUTE_ACCEL))
924 accel_debugfs_register(dev);
925 else
926 drm_debugfs_dev_register(dev);
927
928 ret = drm_minor_register(dev, type: DRM_MINOR_RENDER);
929 if (ret)
930 goto err_minors;
931
932 ret = drm_minor_register(dev, type: DRM_MINOR_PRIMARY);
933 if (ret)
934 goto err_minors;
935
936 ret = drm_minor_register(dev, type: DRM_MINOR_ACCEL);
937 if (ret)
938 goto err_minors;
939
940 ret = create_compat_control_link(dev);
941 if (ret)
942 goto err_minors;
943
944 dev->registered = true;
945
946 if (driver->load) {
947 ret = driver->load(dev, flags);
948 if (ret)
949 goto err_minors;
950 }
951
952 if (drm_core_check_feature(dev, feature: DRIVER_MODESET))
953 drm_modeset_register_all(dev);
954
955 DRM_INFO("Initialized %s %d.%d.%d %s for %s on minor %d\n",
956 driver->name, driver->major, driver->minor,
957 driver->patchlevel, driver->date,
958 dev->dev ? dev_name(dev->dev) : "virtual device",
959 dev->primary ? dev->primary->index : dev->accel->index);
960
961 goto out_unlock;
962
963err_minors:
964 remove_compat_control_link(dev);
965 drm_minor_unregister(dev, type: DRM_MINOR_ACCEL);
966 drm_minor_unregister(dev, type: DRM_MINOR_PRIMARY);
967 drm_minor_unregister(dev, type: DRM_MINOR_RENDER);
968out_unlock:
969 if (drm_dev_needs_global_mutex(dev))
970 mutex_unlock(lock: &drm_global_mutex);
971 return ret;
972}
973EXPORT_SYMBOL(drm_dev_register);
974
975/**
976 * drm_dev_unregister - Unregister DRM device
977 * @dev: Device to unregister
978 *
979 * Unregister the DRM device from the system. This does the reverse of
980 * drm_dev_register() but does not deallocate the device. The caller must call
981 * drm_dev_put() to drop their final reference, unless it is managed with devres
982 * (as devices allocated with devm_drm_dev_alloc() are), in which case there is
983 * already an unwind action registered.
984 *
985 * A special form of unregistering for hotpluggable devices is drm_dev_unplug(),
986 * which can be called while there are still open users of @dev.
987 *
988 * This should be called first in the device teardown code to make sure
989 * userspace can't access the device instance any more.
990 */
991void drm_dev_unregister(struct drm_device *dev)
992{
993 if (drm_core_check_feature(dev, feature: DRIVER_LEGACY))
994 drm_lastclose(dev);
995
996 dev->registered = false;
997
998 drm_client_dev_unregister(dev);
999
1000 if (drm_core_check_feature(dev, feature: DRIVER_MODESET))
1001 drm_modeset_unregister_all(dev);
1002
1003 if (dev->driver->unload)
1004 dev->driver->unload(dev);
1005
1006 drm_legacy_pci_agp_destroy(dev);
1007 drm_legacy_rmmaps(dev);
1008
1009 remove_compat_control_link(dev);
1010 drm_minor_unregister(dev, type: DRM_MINOR_ACCEL);
1011 drm_minor_unregister(dev, type: DRM_MINOR_PRIMARY);
1012 drm_minor_unregister(dev, type: DRM_MINOR_RENDER);
1013 drm_debugfs_dev_fini(dev);
1014}
1015EXPORT_SYMBOL(drm_dev_unregister);
1016
1017/*
1018 * DRM Core
1019 * The DRM core module initializes all global DRM objects and makes them
1020 * available to drivers. Once setup, drivers can probe their respective
1021 * devices.
1022 * Currently, core management includes:
1023 * - The "DRM-Global" key/value database
1024 * - Global ID management for connectors
1025 * - DRM major number allocation
1026 * - DRM minor management
1027 * - DRM sysfs class
1028 * - DRM debugfs root
1029 *
1030 * Furthermore, the DRM core provides dynamic char-dev lookups. For each
1031 * interface registered on a DRM device, you can request minor numbers from DRM
1032 * core. DRM core takes care of major-number management and char-dev
1033 * registration. A stub ->open() callback forwards any open() requests to the
1034 * registered minor.
1035 */
1036
1037static int drm_stub_open(struct inode *inode, struct file *filp)
1038{
1039 const struct file_operations *new_fops;
1040 struct drm_minor *minor;
1041 int err;
1042
1043 DRM_DEBUG("\n");
1044
1045 minor = drm_minor_acquire(minor_id: iminor(inode));
1046 if (IS_ERR(ptr: minor))
1047 return PTR_ERR(ptr: minor);
1048
1049 new_fops = fops_get(minor->dev->driver->fops);
1050 if (!new_fops) {
1051 err = -ENODEV;
1052 goto out;
1053 }
1054
1055 replace_fops(filp, new_fops);
1056 if (filp->f_op->open)
1057 err = filp->f_op->open(inode, filp);
1058 else
1059 err = 0;
1060
1061out:
1062 drm_minor_release(minor);
1063
1064 return err;
1065}
1066
1067static const struct file_operations drm_stub_fops = {
1068 .owner = THIS_MODULE,
1069 .open = drm_stub_open,
1070 .llseek = noop_llseek,
1071};
1072
1073static void drm_core_exit(void)
1074{
1075 drm_privacy_screen_lookup_exit();
1076 accel_core_exit();
1077 unregister_chrdev(DRM_MAJOR, name: "drm");
1078 debugfs_remove(dentry: drm_debugfs_root);
1079 drm_sysfs_destroy();
1080 idr_destroy(&drm_minors_idr);
1081 drm_connector_ida_destroy();
1082}
1083
1084static int __init drm_core_init(void)
1085{
1086 int ret;
1087
1088 drm_connector_ida_init();
1089 idr_init(idr: &drm_minors_idr);
1090 drm_memcpy_init_early();
1091
1092 ret = drm_sysfs_init();
1093 if (ret < 0) {
1094 DRM_ERROR("Cannot create DRM class: %d\n", ret);
1095 goto error;
1096 }
1097
1098 drm_debugfs_root = debugfs_create_dir(name: "dri", NULL);
1099
1100 ret = register_chrdev(DRM_MAJOR, name: "drm", fops: &drm_stub_fops);
1101 if (ret < 0)
1102 goto error;
1103
1104 ret = accel_core_init();
1105 if (ret < 0)
1106 goto error;
1107
1108 drm_privacy_screen_lookup_init();
1109
1110 drm_core_init_complete = true;
1111
1112 DRM_DEBUG("Initialized\n");
1113 return 0;
1114
1115error:
1116 drm_core_exit();
1117 return ret;
1118}
1119
1120module_init(drm_core_init);
1121module_exit(drm_core_exit);
1122

source code of linux/drivers/gpu/drm/drm_drv.c