1/*
2 * Copyright 2016-2018 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include "kfd_kernel_queue.h"
25#include "kfd_device_queue_manager.h"
26#include "kfd_pm4_headers_ai.h"
27#include "kfd_pm4_opcodes.h"
28
29static bool initialize_v9(struct kernel_queue *kq, struct kfd_dev *dev,
30 enum kfd_queue_type type, unsigned int queue_size);
31static void uninitialize_v9(struct kernel_queue *kq);
32static void submit_packet_v9(struct kernel_queue *kq);
33
34void kernel_queue_init_v9(struct kernel_queue_ops *ops)
35{
36 ops->initialize = initialize_v9;
37 ops->uninitialize = uninitialize_v9;
38 ops->submit_packet = submit_packet_v9;
39}
40
41static bool initialize_v9(struct kernel_queue *kq, struct kfd_dev *dev,
42 enum kfd_queue_type type, unsigned int queue_size)
43{
44 int retval;
45
46 retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem);
47 if (retval)
48 return false;
49
50 kq->eop_gpu_addr = kq->eop_mem->gpu_addr;
51 kq->eop_kernel_addr = kq->eop_mem->cpu_ptr;
52
53 memset(kq->eop_kernel_addr, 0, PAGE_SIZE);
54
55 return true;
56}
57
58static void uninitialize_v9(struct kernel_queue *kq)
59{
60 kfd_gtt_sa_free(kq->dev, kq->eop_mem);
61}
62
63static void submit_packet_v9(struct kernel_queue *kq)
64{
65 *kq->wptr64_kernel = kq->pending_wptr64;
66 write_kernel_doorbell64(kq->queue->properties.doorbell_ptr,
67 kq->pending_wptr64);
68}
69
70static int pm_map_process_v9(struct packet_manager *pm,
71 uint32_t *buffer, struct qcm_process_device *qpd)
72{
73 struct pm4_mes_map_process *packet;
74 uint64_t vm_page_table_base_addr = qpd->page_table_base;
75
76 packet = (struct pm4_mes_map_process *)buffer;
77 memset(buffer, 0, sizeof(struct pm4_mes_map_process));
78
79 packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS,
80 sizeof(struct pm4_mes_map_process));
81 packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
82 packet->bitfields2.process_quantum = 1;
83 packet->bitfields2.pasid = qpd->pqm->process->pasid;
84 packet->bitfields14.gds_size = qpd->gds_size;
85 packet->bitfields14.num_gws = qpd->num_gws;
86 packet->bitfields14.num_oac = qpd->num_oac;
87 packet->bitfields14.sdma_enable = 1;
88 packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
89
90 packet->sh_mem_config = qpd->sh_mem_config;
91 packet->sh_mem_bases = qpd->sh_mem_bases;
92 packet->sq_shader_tba_lo = lower_32_bits(qpd->tba_addr >> 8);
93 packet->sq_shader_tba_hi = upper_32_bits(qpd->tba_addr >> 8);
94 packet->sq_shader_tma_lo = lower_32_bits(qpd->tma_addr >> 8);
95 packet->sq_shader_tma_hi = upper_32_bits(qpd->tma_addr >> 8);
96
97 packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
98 packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
99
100 packet->vm_context_page_table_base_addr_lo32 =
101 lower_32_bits(vm_page_table_base_addr);
102 packet->vm_context_page_table_base_addr_hi32 =
103 upper_32_bits(vm_page_table_base_addr);
104
105 return 0;
106}
107
108static int pm_runlist_v9(struct packet_manager *pm, uint32_t *buffer,
109 uint64_t ib, size_t ib_size_in_dwords, bool chain)
110{
111 struct pm4_mes_runlist *packet;
112
113 int concurrent_proc_cnt = 0;
114 struct kfd_dev *kfd = pm->dqm->dev;
115
116 /* Determine the number of processes to map together to HW:
117 * it can not exceed the number of VMIDs available to the
118 * scheduler, and it is determined by the smaller of the number
119 * of processes in the runlist and kfd module parameter
120 * hws_max_conc_proc.
121 * Note: the arbitration between the number of VMIDs and
122 * hws_max_conc_proc has been done in
123 * kgd2kfd_device_init().
124 */
125 concurrent_proc_cnt = min(pm->dqm->processes_count,
126 kfd->max_proc_per_quantum);
127
128 packet = (struct pm4_mes_runlist *)buffer;
129
130 memset(buffer, 0, sizeof(struct pm4_mes_runlist));
131 packet->header.u32All = pm_build_pm4_header(IT_RUN_LIST,
132 sizeof(struct pm4_mes_runlist));
133
134 packet->bitfields4.ib_size = ib_size_in_dwords;
135 packet->bitfields4.chain = chain ? 1 : 0;
136 packet->bitfields4.offload_polling = 0;
137 packet->bitfields4.valid = 1;
138 packet->bitfields4.process_cnt = concurrent_proc_cnt;
139 packet->ordinal2 = lower_32_bits(ib);
140 packet->ib_base_hi = upper_32_bits(ib);
141
142 return 0;
143}
144
145static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer,
146 struct queue *q, bool is_static)
147{
148 struct pm4_mes_map_queues *packet;
149 bool use_static = is_static;
150
151 packet = (struct pm4_mes_map_queues *)buffer;
152 memset(buffer, 0, sizeof(struct pm4_mes_map_queues));
153
154 packet->header.u32All = pm_build_pm4_header(IT_MAP_QUEUES,
155 sizeof(struct pm4_mes_map_queues));
156 packet->bitfields2.alloc_format =
157 alloc_format__mes_map_queues__one_per_pipe_vi;
158 packet->bitfields2.num_queues = 1;
159 packet->bitfields2.queue_sel =
160 queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi;
161
162 packet->bitfields2.engine_sel =
163 engine_sel__mes_map_queues__compute_vi;
164 packet->bitfields2.queue_type =
165 queue_type__mes_map_queues__normal_compute_vi;
166
167 switch (q->properties.type) {
168 case KFD_QUEUE_TYPE_COMPUTE:
169 if (use_static)
170 packet->bitfields2.queue_type =
171 queue_type__mes_map_queues__normal_latency_static_queue_vi;
172 break;
173 case KFD_QUEUE_TYPE_DIQ:
174 packet->bitfields2.queue_type =
175 queue_type__mes_map_queues__debug_interface_queue_vi;
176 break;
177 case KFD_QUEUE_TYPE_SDMA:
178 packet->bitfields2.engine_sel = q->properties.sdma_engine_id +
179 engine_sel__mes_map_queues__sdma0_vi;
180 use_static = false; /* no static queues under SDMA */
181 break;
182 default:
183 WARN(1, "queue type %d", q->properties.type);
184 return -EINVAL;
185 }
186 packet->bitfields3.doorbell_offset =
187 q->properties.doorbell_off;
188
189 packet->mqd_addr_lo =
190 lower_32_bits(q->gart_mqd_addr);
191
192 packet->mqd_addr_hi =
193 upper_32_bits(q->gart_mqd_addr);
194
195 packet->wptr_addr_lo =
196 lower_32_bits((uint64_t)q->properties.write_ptr);
197
198 packet->wptr_addr_hi =
199 upper_32_bits((uint64_t)q->properties.write_ptr);
200
201 return 0;
202}
203
204static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer,
205 enum kfd_queue_type type,
206 enum kfd_unmap_queues_filter filter,
207 uint32_t filter_param, bool reset,
208 unsigned int sdma_engine)
209{
210 struct pm4_mes_unmap_queues *packet;
211
212 packet = (struct pm4_mes_unmap_queues *)buffer;
213 memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues));
214
215 packet->header.u32All = pm_build_pm4_header(IT_UNMAP_QUEUES,
216 sizeof(struct pm4_mes_unmap_queues));
217 switch (type) {
218 case KFD_QUEUE_TYPE_COMPUTE:
219 case KFD_QUEUE_TYPE_DIQ:
220 packet->bitfields2.engine_sel =
221 engine_sel__mes_unmap_queues__compute;
222 break;
223 case KFD_QUEUE_TYPE_SDMA:
224 packet->bitfields2.engine_sel =
225 engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
226 break;
227 default:
228 WARN(1, "queue type %d", type);
229 return -EINVAL;
230 }
231
232 if (reset)
233 packet->bitfields2.action =
234 action__mes_unmap_queues__reset_queues;
235 else
236 packet->bitfields2.action =
237 action__mes_unmap_queues__preempt_queues;
238
239 switch (filter) {
240 case KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE:
241 packet->bitfields2.queue_sel =
242 queue_sel__mes_unmap_queues__perform_request_on_specified_queues;
243 packet->bitfields2.num_queues = 1;
244 packet->bitfields3b.doorbell_offset0 = filter_param;
245 break;
246 case KFD_UNMAP_QUEUES_FILTER_BY_PASID:
247 packet->bitfields2.queue_sel =
248 queue_sel__mes_unmap_queues__perform_request_on_pasid_queues;
249 packet->bitfields3a.pasid = filter_param;
250 break;
251 case KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES:
252 packet->bitfields2.queue_sel =
253 queue_sel__mes_unmap_queues__unmap_all_queues;
254 break;
255 case KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES:
256 /* in this case, we do not preempt static queues */
257 packet->bitfields2.queue_sel =
258 queue_sel__mes_unmap_queues__unmap_all_non_static_queues;
259 break;
260 default:
261 WARN(1, "filter %d", filter);
262 return -EINVAL;
263 }
264
265 return 0;
266
267}
268
269static int pm_query_status_v9(struct packet_manager *pm, uint32_t *buffer,
270 uint64_t fence_address, uint32_t fence_value)
271{
272 struct pm4_mes_query_status *packet;
273
274 packet = (struct pm4_mes_query_status *)buffer;
275 memset(buffer, 0, sizeof(struct pm4_mes_query_status));
276
277
278 packet->header.u32All = pm_build_pm4_header(IT_QUERY_STATUS,
279 sizeof(struct pm4_mes_query_status));
280
281 packet->bitfields2.context_id = 0;
282 packet->bitfields2.interrupt_sel =
283 interrupt_sel__mes_query_status__completion_status;
284 packet->bitfields2.command =
285 command__mes_query_status__fence_only_after_write_ack;
286
287 packet->addr_hi = upper_32_bits((uint64_t)fence_address);
288 packet->addr_lo = lower_32_bits((uint64_t)fence_address);
289 packet->data_hi = upper_32_bits((uint64_t)fence_value);
290 packet->data_lo = lower_32_bits((uint64_t)fence_value);
291
292 return 0;
293}
294
295
296static int pm_release_mem_v9(uint64_t gpu_addr, uint32_t *buffer)
297{
298 struct pm4_mec_release_mem *packet;
299
300 packet = (struct pm4_mec_release_mem *)buffer;
301 memset(buffer, 0, sizeof(struct pm4_mec_release_mem));
302
303 packet->header.u32All = pm_build_pm4_header(IT_RELEASE_MEM,
304 sizeof(struct pm4_mec_release_mem));
305
306 packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT;
307 packet->bitfields2.event_index = event_index__mec_release_mem__end_of_pipe;
308 packet->bitfields2.tcl1_action_ena = 1;
309 packet->bitfields2.tc_action_ena = 1;
310 packet->bitfields2.cache_policy = cache_policy__mec_release_mem__lru;
311
312 packet->bitfields3.data_sel = data_sel__mec_release_mem__send_32_bit_low;
313 packet->bitfields3.int_sel =
314 int_sel__mec_release_mem__send_interrupt_after_write_confirm;
315
316 packet->bitfields4.address_lo_32b = (gpu_addr & 0xffffffff) >> 2;
317 packet->address_hi = upper_32_bits(gpu_addr);
318
319 packet->data_lo = 0;
320
321 return 0;
322}
323
324const struct packet_manager_funcs kfd_v9_pm_funcs = {
325 .map_process = pm_map_process_v9,
326 .runlist = pm_runlist_v9,
327 .set_resources = pm_set_resources_vi,
328 .map_queues = pm_map_queues_v9,
329 .unmap_queues = pm_unmap_queues_v9,
330 .query_status = pm_query_status_v9,
331 .release_mem = pm_release_mem_v9,
332 .map_process_size = sizeof(struct pm4_mes_map_process),
333 .runlist_size = sizeof(struct pm4_mes_runlist),
334 .set_resources_size = sizeof(struct pm4_mes_set_resources),
335 .map_queues_size = sizeof(struct pm4_mes_map_queues),
336 .unmap_queues_size = sizeof(struct pm4_mes_unmap_queues),
337 .query_status_size = sizeof(struct pm4_mes_query_status),
338 .release_mem_size = sizeof(struct pm4_mec_release_mem)
339};
340