1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2013 Red Hat |
4 | * Author: Rob Clark <robdclark@gmail.com> |
5 | */ |
6 | |
7 | #include "msm_ringbuffer.h" |
8 | #include "msm_gpu.h" |
9 | |
10 | static uint num_hw_submissions = 8; |
11 | MODULE_PARM_DESC(num_hw_submissions, "The max # of jobs to write into ringbuffer (default 8)" ); |
12 | module_param(num_hw_submissions, uint, 0600); |
13 | |
14 | static struct dma_fence *msm_job_run(struct drm_sched_job *job) |
15 | { |
16 | struct msm_gem_submit *submit = to_msm_submit(job); |
17 | struct msm_fence_context *fctx = submit->ring->fctx; |
18 | struct msm_gpu *gpu = submit->gpu; |
19 | struct msm_drm_private *priv = gpu->dev->dev_private; |
20 | int i; |
21 | |
22 | msm_fence_init(fence: submit->hw_fence, fctx); |
23 | |
24 | mutex_lock(&priv->lru.lock); |
25 | |
26 | for (i = 0; i < submit->nr_bos; i++) { |
27 | struct drm_gem_object *obj = submit->bos[i].obj; |
28 | |
29 | msm_gem_unpin_active(obj); |
30 | } |
31 | |
32 | submit->bos_pinned = false; |
33 | |
34 | mutex_unlock(lock: &priv->lru.lock); |
35 | |
36 | /* TODO move submit path over to using a per-ring lock.. */ |
37 | mutex_lock(&gpu->lock); |
38 | |
39 | msm_gpu_submit(gpu, submit); |
40 | |
41 | mutex_unlock(lock: &gpu->lock); |
42 | |
43 | return dma_fence_get(fence: submit->hw_fence); |
44 | } |
45 | |
46 | static void msm_job_free(struct drm_sched_job *job) |
47 | { |
48 | struct msm_gem_submit *submit = to_msm_submit(job); |
49 | |
50 | drm_sched_job_cleanup(job); |
51 | msm_gem_submit_put(submit); |
52 | } |
53 | |
54 | static const struct drm_sched_backend_ops msm_sched_ops = { |
55 | .run_job = msm_job_run, |
56 | .free_job = msm_job_free |
57 | }; |
58 | |
59 | struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id, |
60 | void *memptrs, uint64_t memptrs_iova) |
61 | { |
62 | struct msm_ringbuffer *ring; |
63 | long sched_timeout; |
64 | char name[32]; |
65 | int ret; |
66 | |
67 | /* We assume everwhere that MSM_GPU_RINGBUFFER_SZ is a power of 2 */ |
68 | BUILD_BUG_ON(!is_power_of_2(MSM_GPU_RINGBUFFER_SZ)); |
69 | |
70 | ring = kzalloc(size: sizeof(*ring), GFP_KERNEL); |
71 | if (!ring) { |
72 | ret = -ENOMEM; |
73 | goto fail; |
74 | } |
75 | |
76 | ring->gpu = gpu; |
77 | ring->id = id; |
78 | |
79 | ring->start = msm_gem_kernel_new(dev: gpu->dev, MSM_GPU_RINGBUFFER_SZ, |
80 | check_apriv(gpu, MSM_BO_WC | MSM_BO_GPU_READONLY), |
81 | aspace: gpu->aspace, bo: &ring->bo, iova: &ring->iova); |
82 | |
83 | if (IS_ERR(ptr: ring->start)) { |
84 | ret = PTR_ERR(ptr: ring->start); |
85 | ring->start = NULL; |
86 | goto fail; |
87 | } |
88 | |
89 | msm_gem_object_set_name(bo: ring->bo, fmt: "ring%d" , id); |
90 | |
91 | ring->end = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2); |
92 | ring->next = ring->start; |
93 | ring->cur = ring->start; |
94 | |
95 | ring->memptrs = memptrs; |
96 | ring->memptrs_iova = memptrs_iova; |
97 | |
98 | /* currently managing hangcheck ourselves: */ |
99 | sched_timeout = MAX_SCHEDULE_TIMEOUT; |
100 | |
101 | ret = drm_sched_init(sched: &ring->sched, ops: &msm_sched_ops, NULL, |
102 | num_rqs: DRM_SCHED_PRIORITY_COUNT, |
103 | credit_limit: num_hw_submissions, hang_limit: 0, timeout: sched_timeout, |
104 | NULL, NULL, to_msm_bo(ring->bo)->name, dev: gpu->dev->dev); |
105 | if (ret) { |
106 | goto fail; |
107 | } |
108 | |
109 | INIT_LIST_HEAD(list: &ring->submits); |
110 | spin_lock_init(&ring->submit_lock); |
111 | spin_lock_init(&ring->preempt_lock); |
112 | |
113 | snprintf(buf: name, size: sizeof(name), fmt: "gpu-ring-%d" , ring->id); |
114 | |
115 | ring->fctx = msm_fence_context_alloc(dev: gpu->dev, fenceptr: &ring->memptrs->fence, name); |
116 | |
117 | return ring; |
118 | |
119 | fail: |
120 | msm_ringbuffer_destroy(ring); |
121 | return ERR_PTR(error: ret); |
122 | } |
123 | |
124 | void msm_ringbuffer_destroy(struct msm_ringbuffer *ring) |
125 | { |
126 | if (IS_ERR_OR_NULL(ptr: ring)) |
127 | return; |
128 | |
129 | drm_sched_fini(sched: &ring->sched); |
130 | |
131 | msm_fence_context_free(fctx: ring->fctx); |
132 | |
133 | msm_gem_kernel_put(bo: ring->bo, aspace: ring->gpu->aspace); |
134 | |
135 | kfree(objp: ring); |
136 | } |
137 | |