1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * ISM driver for s390. |
4 | * |
5 | * Copyright IBM Corp. 2018 |
6 | */ |
7 | #define KMSG_COMPONENT "ism" |
8 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt |
9 | |
10 | #include <linux/module.h> |
11 | #include <linux/types.h> |
12 | #include <linux/interrupt.h> |
13 | #include <linux/device.h> |
14 | #include <linux/err.h> |
15 | #include <linux/ctype.h> |
16 | #include <linux/processor.h> |
17 | |
18 | #include "ism.h" |
19 | |
20 | MODULE_DESCRIPTION("ISM driver for s390" ); |
21 | MODULE_LICENSE("GPL" ); |
22 | |
23 | #define PCI_DEVICE_ID_IBM_ISM 0x04ED |
24 | #define DRV_NAME "ism" |
25 | |
26 | static const struct pci_device_id ism_device_table[] = { |
27 | { PCI_VDEVICE(IBM, PCI_DEVICE_ID_IBM_ISM), 0 }, |
28 | { 0, } |
29 | }; |
30 | MODULE_DEVICE_TABLE(pci, ism_device_table); |
31 | |
32 | static debug_info_t *ism_debug_info; |
33 | static const struct smcd_ops ism_ops; |
34 | |
35 | #define NO_CLIENT 0xff /* must be >= MAX_CLIENTS */ |
36 | static struct ism_client *clients[MAX_CLIENTS]; /* use an array rather than */ |
37 | /* a list for fast mapping */ |
38 | static u8 max_client; |
39 | static DEFINE_MUTEX(clients_lock); |
40 | struct ism_dev_list { |
41 | struct list_head list; |
42 | struct mutex mutex; /* protects ism device list */ |
43 | }; |
44 | |
45 | static struct ism_dev_list ism_dev_list = { |
46 | .list = LIST_HEAD_INIT(ism_dev_list.list), |
47 | .mutex = __MUTEX_INITIALIZER(ism_dev_list.mutex), |
48 | }; |
49 | |
50 | static void ism_setup_forwarding(struct ism_client *client, struct ism_dev *ism) |
51 | { |
52 | unsigned long flags; |
53 | |
54 | spin_lock_irqsave(&ism->lock, flags); |
55 | ism->subs[client->id] = client; |
56 | spin_unlock_irqrestore(lock: &ism->lock, flags); |
57 | } |
58 | |
59 | int ism_register_client(struct ism_client *client) |
60 | { |
61 | struct ism_dev *ism; |
62 | int i, rc = -ENOSPC; |
63 | |
64 | mutex_lock(&ism_dev_list.mutex); |
65 | mutex_lock(&clients_lock); |
66 | for (i = 0; i < MAX_CLIENTS; ++i) { |
67 | if (!clients[i]) { |
68 | clients[i] = client; |
69 | client->id = i; |
70 | if (i == max_client) |
71 | max_client++; |
72 | rc = 0; |
73 | break; |
74 | } |
75 | } |
76 | mutex_unlock(lock: &clients_lock); |
77 | |
78 | if (i < MAX_CLIENTS) { |
79 | /* initialize with all devices that we got so far */ |
80 | list_for_each_entry(ism, &ism_dev_list.list, list) { |
81 | ism->priv[i] = NULL; |
82 | client->add(ism); |
83 | ism_setup_forwarding(client, ism); |
84 | } |
85 | } |
86 | mutex_unlock(lock: &ism_dev_list.mutex); |
87 | |
88 | return rc; |
89 | } |
90 | EXPORT_SYMBOL_GPL(ism_register_client); |
91 | |
92 | int ism_unregister_client(struct ism_client *client) |
93 | { |
94 | struct ism_dev *ism; |
95 | unsigned long flags; |
96 | int rc = 0; |
97 | |
98 | mutex_lock(&ism_dev_list.mutex); |
99 | list_for_each_entry(ism, &ism_dev_list.list, list) { |
100 | spin_lock_irqsave(&ism->lock, flags); |
101 | /* Stop forwarding IRQs and events */ |
102 | ism->subs[client->id] = NULL; |
103 | for (int i = 0; i < ISM_NR_DMBS; ++i) { |
104 | if (ism->sba_client_arr[i] == client->id) { |
105 | WARN(1, "%s: attempt to unregister '%s' with registered dmb(s)\n" , |
106 | __func__, client->name); |
107 | rc = -EBUSY; |
108 | goto err_reg_dmb; |
109 | } |
110 | } |
111 | spin_unlock_irqrestore(lock: &ism->lock, flags); |
112 | } |
113 | mutex_unlock(lock: &ism_dev_list.mutex); |
114 | |
115 | mutex_lock(&clients_lock); |
116 | clients[client->id] = NULL; |
117 | if (client->id + 1 == max_client) |
118 | max_client--; |
119 | mutex_unlock(lock: &clients_lock); |
120 | return rc; |
121 | |
122 | err_reg_dmb: |
123 | spin_unlock_irqrestore(lock: &ism->lock, flags); |
124 | mutex_unlock(lock: &ism_dev_list.mutex); |
125 | return rc; |
126 | } |
127 | EXPORT_SYMBOL_GPL(ism_unregister_client); |
128 | |
129 | static int ism_cmd(struct ism_dev *ism, void *cmd) |
130 | { |
131 | struct ism_req_hdr *req = cmd; |
132 | struct ism_resp_hdr *resp = cmd; |
133 | |
134 | __ism_write_cmd(ism, data: req + 1, offset: sizeof(*req), len: req->len - sizeof(*req)); |
135 | __ism_write_cmd(ism, data: req, offset: 0, len: sizeof(*req)); |
136 | |
137 | WRITE_ONCE(resp->ret, ISM_ERROR); |
138 | |
139 | __ism_read_cmd(ism, data: resp, offset: 0, len: sizeof(*resp)); |
140 | if (resp->ret) { |
141 | debug_text_event(ism_debug_info, 0, "cmd failure" ); |
142 | debug_event(ism_debug_info, 0, resp, sizeof(*resp)); |
143 | goto out; |
144 | } |
145 | __ism_read_cmd(ism, data: resp + 1, offset: sizeof(*resp), len: resp->len - sizeof(*resp)); |
146 | out: |
147 | return resp->ret; |
148 | } |
149 | |
150 | static int ism_cmd_simple(struct ism_dev *ism, u32 cmd_code) |
151 | { |
152 | union ism_cmd_simple cmd; |
153 | |
154 | memset(&cmd, 0, sizeof(cmd)); |
155 | cmd.request.hdr.cmd = cmd_code; |
156 | cmd.request.hdr.len = sizeof(cmd.request); |
157 | |
158 | return ism_cmd(ism, cmd: &cmd); |
159 | } |
160 | |
161 | static int query_info(struct ism_dev *ism) |
162 | { |
163 | union ism_qi cmd; |
164 | |
165 | memset(&cmd, 0, sizeof(cmd)); |
166 | cmd.request.hdr.cmd = ISM_QUERY_INFO; |
167 | cmd.request.hdr.len = sizeof(cmd.request); |
168 | |
169 | if (ism_cmd(ism, cmd: &cmd)) |
170 | goto out; |
171 | |
172 | debug_text_event(ism_debug_info, 3, "query info" ); |
173 | debug_event(ism_debug_info, 3, &cmd.response, sizeof(cmd.response)); |
174 | out: |
175 | return 0; |
176 | } |
177 | |
178 | static int register_sba(struct ism_dev *ism) |
179 | { |
180 | union ism_reg_sba cmd; |
181 | dma_addr_t dma_handle; |
182 | struct ism_sba *sba; |
183 | |
184 | sba = dma_alloc_coherent(dev: &ism->pdev->dev, PAGE_SIZE, dma_handle: &dma_handle, |
185 | GFP_KERNEL); |
186 | if (!sba) |
187 | return -ENOMEM; |
188 | |
189 | memset(&cmd, 0, sizeof(cmd)); |
190 | cmd.request.hdr.cmd = ISM_REG_SBA; |
191 | cmd.request.hdr.len = sizeof(cmd.request); |
192 | cmd.request.sba = dma_handle; |
193 | |
194 | if (ism_cmd(ism, cmd: &cmd)) { |
195 | dma_free_coherent(dev: &ism->pdev->dev, PAGE_SIZE, cpu_addr: sba, dma_handle); |
196 | return -EIO; |
197 | } |
198 | |
199 | ism->sba = sba; |
200 | ism->sba_dma_addr = dma_handle; |
201 | |
202 | return 0; |
203 | } |
204 | |
205 | static int register_ieq(struct ism_dev *ism) |
206 | { |
207 | union ism_reg_ieq cmd; |
208 | dma_addr_t dma_handle; |
209 | struct ism_eq *ieq; |
210 | |
211 | ieq = dma_alloc_coherent(dev: &ism->pdev->dev, PAGE_SIZE, dma_handle: &dma_handle, |
212 | GFP_KERNEL); |
213 | if (!ieq) |
214 | return -ENOMEM; |
215 | |
216 | memset(&cmd, 0, sizeof(cmd)); |
217 | cmd.request.hdr.cmd = ISM_REG_IEQ; |
218 | cmd.request.hdr.len = sizeof(cmd.request); |
219 | cmd.request.ieq = dma_handle; |
220 | cmd.request.len = sizeof(*ieq); |
221 | |
222 | if (ism_cmd(ism, cmd: &cmd)) { |
223 | dma_free_coherent(dev: &ism->pdev->dev, PAGE_SIZE, cpu_addr: ieq, dma_handle); |
224 | return -EIO; |
225 | } |
226 | |
227 | ism->ieq = ieq; |
228 | ism->ieq_idx = -1; |
229 | ism->ieq_dma_addr = dma_handle; |
230 | |
231 | return 0; |
232 | } |
233 | |
234 | static int unregister_sba(struct ism_dev *ism) |
235 | { |
236 | int ret; |
237 | |
238 | if (!ism->sba) |
239 | return 0; |
240 | |
241 | ret = ism_cmd_simple(ism, ISM_UNREG_SBA); |
242 | if (ret && ret != ISM_ERROR) |
243 | return -EIO; |
244 | |
245 | dma_free_coherent(dev: &ism->pdev->dev, PAGE_SIZE, |
246 | cpu_addr: ism->sba, dma_handle: ism->sba_dma_addr); |
247 | |
248 | ism->sba = NULL; |
249 | ism->sba_dma_addr = 0; |
250 | |
251 | return 0; |
252 | } |
253 | |
254 | static int unregister_ieq(struct ism_dev *ism) |
255 | { |
256 | int ret; |
257 | |
258 | if (!ism->ieq) |
259 | return 0; |
260 | |
261 | ret = ism_cmd_simple(ism, ISM_UNREG_IEQ); |
262 | if (ret && ret != ISM_ERROR) |
263 | return -EIO; |
264 | |
265 | dma_free_coherent(dev: &ism->pdev->dev, PAGE_SIZE, |
266 | cpu_addr: ism->ieq, dma_handle: ism->ieq_dma_addr); |
267 | |
268 | ism->ieq = NULL; |
269 | ism->ieq_dma_addr = 0; |
270 | |
271 | return 0; |
272 | } |
273 | |
274 | static int ism_read_local_gid(struct ism_dev *ism) |
275 | { |
276 | union ism_read_gid cmd; |
277 | int ret; |
278 | |
279 | memset(&cmd, 0, sizeof(cmd)); |
280 | cmd.request.hdr.cmd = ISM_READ_GID; |
281 | cmd.request.hdr.len = sizeof(cmd.request); |
282 | |
283 | ret = ism_cmd(ism, cmd: &cmd); |
284 | if (ret) |
285 | goto out; |
286 | |
287 | ism->local_gid = cmd.response.gid; |
288 | out: |
289 | return ret; |
290 | } |
291 | |
292 | static int ism_query_rgid(struct ism_dev *ism, u64 rgid, u32 vid_valid, |
293 | u32 vid) |
294 | { |
295 | union ism_query_rgid cmd; |
296 | |
297 | memset(&cmd, 0, sizeof(cmd)); |
298 | cmd.request.hdr.cmd = ISM_QUERY_RGID; |
299 | cmd.request.hdr.len = sizeof(cmd.request); |
300 | |
301 | cmd.request.rgid = rgid; |
302 | cmd.request.vlan_valid = vid_valid; |
303 | cmd.request.vlan_id = vid; |
304 | |
305 | return ism_cmd(ism, cmd: &cmd); |
306 | } |
307 | |
308 | static void ism_free_dmb(struct ism_dev *ism, struct ism_dmb *dmb) |
309 | { |
310 | clear_bit(nr: dmb->sba_idx, addr: ism->sba_bitmap); |
311 | dma_free_coherent(dev: &ism->pdev->dev, size: dmb->dmb_len, |
312 | cpu_addr: dmb->cpu_addr, dma_handle: dmb->dma_addr); |
313 | } |
314 | |
315 | static int ism_alloc_dmb(struct ism_dev *ism, struct ism_dmb *dmb) |
316 | { |
317 | unsigned long bit; |
318 | |
319 | if (PAGE_ALIGN(dmb->dmb_len) > dma_get_max_seg_size(dev: &ism->pdev->dev)) |
320 | return -EINVAL; |
321 | |
322 | if (!dmb->sba_idx) { |
323 | bit = find_next_zero_bit(addr: ism->sba_bitmap, ISM_NR_DMBS, |
324 | ISM_DMB_BIT_OFFSET); |
325 | if (bit == ISM_NR_DMBS) |
326 | return -ENOSPC; |
327 | |
328 | dmb->sba_idx = bit; |
329 | } |
330 | if (dmb->sba_idx < ISM_DMB_BIT_OFFSET || |
331 | test_and_set_bit(nr: dmb->sba_idx, addr: ism->sba_bitmap)) |
332 | return -EINVAL; |
333 | |
334 | dmb->cpu_addr = dma_alloc_coherent(dev: &ism->pdev->dev, size: dmb->dmb_len, |
335 | dma_handle: &dmb->dma_addr, |
336 | GFP_KERNEL | __GFP_NOWARN | |
337 | __GFP_NOMEMALLOC | __GFP_NORETRY); |
338 | if (!dmb->cpu_addr) |
339 | clear_bit(nr: dmb->sba_idx, addr: ism->sba_bitmap); |
340 | |
341 | return dmb->cpu_addr ? 0 : -ENOMEM; |
342 | } |
343 | |
344 | int ism_register_dmb(struct ism_dev *ism, struct ism_dmb *dmb, |
345 | struct ism_client *client) |
346 | { |
347 | union ism_reg_dmb cmd; |
348 | unsigned long flags; |
349 | int ret; |
350 | |
351 | ret = ism_alloc_dmb(ism, dmb); |
352 | if (ret) |
353 | goto out; |
354 | |
355 | memset(&cmd, 0, sizeof(cmd)); |
356 | cmd.request.hdr.cmd = ISM_REG_DMB; |
357 | cmd.request.hdr.len = sizeof(cmd.request); |
358 | |
359 | cmd.request.dmb = dmb->dma_addr; |
360 | cmd.request.dmb_len = dmb->dmb_len; |
361 | cmd.request.sba_idx = dmb->sba_idx; |
362 | cmd.request.vlan_valid = dmb->vlan_valid; |
363 | cmd.request.vlan_id = dmb->vlan_id; |
364 | cmd.request.rgid = dmb->rgid; |
365 | |
366 | ret = ism_cmd(ism, cmd: &cmd); |
367 | if (ret) { |
368 | ism_free_dmb(ism, dmb); |
369 | goto out; |
370 | } |
371 | dmb->dmb_tok = cmd.response.dmb_tok; |
372 | spin_lock_irqsave(&ism->lock, flags); |
373 | ism->sba_client_arr[dmb->sba_idx - ISM_DMB_BIT_OFFSET] = client->id; |
374 | spin_unlock_irqrestore(lock: &ism->lock, flags); |
375 | out: |
376 | return ret; |
377 | } |
378 | EXPORT_SYMBOL_GPL(ism_register_dmb); |
379 | |
380 | int ism_unregister_dmb(struct ism_dev *ism, struct ism_dmb *dmb) |
381 | { |
382 | union ism_unreg_dmb cmd; |
383 | unsigned long flags; |
384 | int ret; |
385 | |
386 | memset(&cmd, 0, sizeof(cmd)); |
387 | cmd.request.hdr.cmd = ISM_UNREG_DMB; |
388 | cmd.request.hdr.len = sizeof(cmd.request); |
389 | |
390 | cmd.request.dmb_tok = dmb->dmb_tok; |
391 | |
392 | spin_lock_irqsave(&ism->lock, flags); |
393 | ism->sba_client_arr[dmb->sba_idx - ISM_DMB_BIT_OFFSET] = NO_CLIENT; |
394 | spin_unlock_irqrestore(lock: &ism->lock, flags); |
395 | |
396 | ret = ism_cmd(ism, cmd: &cmd); |
397 | if (ret && ret != ISM_ERROR) |
398 | goto out; |
399 | |
400 | ism_free_dmb(ism, dmb); |
401 | out: |
402 | return ret; |
403 | } |
404 | EXPORT_SYMBOL_GPL(ism_unregister_dmb); |
405 | |
406 | static int ism_add_vlan_id(struct ism_dev *ism, u64 vlan_id) |
407 | { |
408 | union ism_set_vlan_id cmd; |
409 | |
410 | memset(&cmd, 0, sizeof(cmd)); |
411 | cmd.request.hdr.cmd = ISM_ADD_VLAN_ID; |
412 | cmd.request.hdr.len = sizeof(cmd.request); |
413 | |
414 | cmd.request.vlan_id = vlan_id; |
415 | |
416 | return ism_cmd(ism, cmd: &cmd); |
417 | } |
418 | |
419 | static int ism_del_vlan_id(struct ism_dev *ism, u64 vlan_id) |
420 | { |
421 | union ism_set_vlan_id cmd; |
422 | |
423 | memset(&cmd, 0, sizeof(cmd)); |
424 | cmd.request.hdr.cmd = ISM_DEL_VLAN_ID; |
425 | cmd.request.hdr.len = sizeof(cmd.request); |
426 | |
427 | cmd.request.vlan_id = vlan_id; |
428 | |
429 | return ism_cmd(ism, cmd: &cmd); |
430 | } |
431 | |
432 | static int ism_signal_ieq(struct ism_dev *ism, u64 rgid, u32 trigger_irq, |
433 | u32 event_code, u64 info) |
434 | { |
435 | union ism_sig_ieq cmd; |
436 | |
437 | memset(&cmd, 0, sizeof(cmd)); |
438 | cmd.request.hdr.cmd = ISM_SIGNAL_IEQ; |
439 | cmd.request.hdr.len = sizeof(cmd.request); |
440 | |
441 | cmd.request.rgid = rgid; |
442 | cmd.request.trigger_irq = trigger_irq; |
443 | cmd.request.event_code = event_code; |
444 | cmd.request.info = info; |
445 | |
446 | return ism_cmd(ism, cmd: &cmd); |
447 | } |
448 | |
449 | static unsigned int max_bytes(unsigned int start, unsigned int len, |
450 | unsigned int boundary) |
451 | { |
452 | return min(boundary - (start & (boundary - 1)), len); |
453 | } |
454 | |
455 | int ism_move(struct ism_dev *ism, u64 dmb_tok, unsigned int idx, bool sf, |
456 | unsigned int offset, void *data, unsigned int size) |
457 | { |
458 | unsigned int bytes; |
459 | u64 dmb_req; |
460 | int ret; |
461 | |
462 | while (size) { |
463 | bytes = max_bytes(start: offset, len: size, PAGE_SIZE); |
464 | dmb_req = ISM_CREATE_REQ(dmb_tok, idx, size == bytes ? sf : 0, |
465 | offset); |
466 | |
467 | ret = __ism_move(ism, dmb_req, data, size: bytes); |
468 | if (ret) |
469 | return ret; |
470 | |
471 | size -= bytes; |
472 | data += bytes; |
473 | offset += bytes; |
474 | } |
475 | |
476 | return 0; |
477 | } |
478 | EXPORT_SYMBOL_GPL(ism_move); |
479 | |
480 | static struct ism_systemeid SYSTEM_EID = { |
481 | .seid_string = "IBM-SYSZ-ISMSEID00000000" , |
482 | .serial_number = "0000" , |
483 | .type = "0000" , |
484 | }; |
485 | |
486 | static void ism_create_system_eid(void) |
487 | { |
488 | struct cpuid id; |
489 | u16 ident_tail; |
490 | char tmp[5]; |
491 | |
492 | get_cpu_id(&id); |
493 | ident_tail = (u16)(id.ident & ISM_IDENT_MASK); |
494 | snprintf(buf: tmp, size: 5, fmt: "%04X" , ident_tail); |
495 | memcpy(&SYSTEM_EID.serial_number, tmp, 4); |
496 | snprintf(buf: tmp, size: 5, fmt: "%04X" , id.machine); |
497 | memcpy(&SYSTEM_EID.type, tmp, 4); |
498 | } |
499 | |
500 | u8 *ism_get_seid(void) |
501 | { |
502 | return SYSTEM_EID.seid_string; |
503 | } |
504 | EXPORT_SYMBOL_GPL(ism_get_seid); |
505 | |
506 | static u16 ism_get_chid(struct ism_dev *ism) |
507 | { |
508 | if (!ism || !ism->pdev) |
509 | return 0; |
510 | |
511 | return to_zpci(ism->pdev)->pchid; |
512 | } |
513 | |
514 | static void ism_handle_event(struct ism_dev *ism) |
515 | { |
516 | struct ism_event *entry; |
517 | struct ism_client *clt; |
518 | int i; |
519 | |
520 | while ((ism->ieq_idx + 1) != READ_ONCE(ism->ieq->header.idx)) { |
521 | if (++(ism->ieq_idx) == ARRAY_SIZE(ism->ieq->entry)) |
522 | ism->ieq_idx = 0; |
523 | |
524 | entry = &ism->ieq->entry[ism->ieq_idx]; |
525 | debug_event(ism_debug_info, 2, entry, sizeof(*entry)); |
526 | for (i = 0; i < max_client; ++i) { |
527 | clt = ism->subs[i]; |
528 | if (clt) |
529 | clt->handle_event(ism, entry); |
530 | } |
531 | } |
532 | } |
533 | |
534 | static irqreturn_t ism_handle_irq(int irq, void *data) |
535 | { |
536 | struct ism_dev *ism = data; |
537 | unsigned long bit, end; |
538 | unsigned long *bv; |
539 | u16 dmbemask; |
540 | u8 client_id; |
541 | |
542 | bv = (void *) &ism->sba->dmb_bits[ISM_DMB_WORD_OFFSET]; |
543 | end = sizeof(ism->sba->dmb_bits) * BITS_PER_BYTE - ISM_DMB_BIT_OFFSET; |
544 | |
545 | spin_lock(lock: &ism->lock); |
546 | ism->sba->s = 0; |
547 | barrier(); |
548 | for (bit = 0;;) { |
549 | bit = find_next_bit_inv(bv, end, bit); |
550 | if (bit >= end) |
551 | break; |
552 | |
553 | clear_bit_inv(bit, bv); |
554 | dmbemask = ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET]; |
555 | ism->sba->dmbe_mask[bit + ISM_DMB_BIT_OFFSET] = 0; |
556 | barrier(); |
557 | client_id = ism->sba_client_arr[bit]; |
558 | if (unlikely(client_id == NO_CLIENT || !ism->subs[client_id])) |
559 | continue; |
560 | ism->subs[client_id]->handle_irq(ism, bit + ISM_DMB_BIT_OFFSET, dmbemask); |
561 | } |
562 | |
563 | if (ism->sba->e) { |
564 | ism->sba->e = 0; |
565 | barrier(); |
566 | ism_handle_event(ism); |
567 | } |
568 | spin_unlock(lock: &ism->lock); |
569 | return IRQ_HANDLED; |
570 | } |
571 | |
572 | static u64 ism_get_local_gid(struct ism_dev *ism) |
573 | { |
574 | return ism->local_gid; |
575 | } |
576 | |
577 | static int ism_dev_init(struct ism_dev *ism) |
578 | { |
579 | struct pci_dev *pdev = ism->pdev; |
580 | int i, ret; |
581 | |
582 | ret = pci_alloc_irq_vectors(dev: pdev, min_vecs: 1, max_vecs: 1, PCI_IRQ_MSI); |
583 | if (ret <= 0) |
584 | goto out; |
585 | |
586 | ism->sba_client_arr = kzalloc(ISM_NR_DMBS, GFP_KERNEL); |
587 | if (!ism->sba_client_arr) |
588 | goto free_vectors; |
589 | memset(ism->sba_client_arr, NO_CLIENT, ISM_NR_DMBS); |
590 | |
591 | ret = request_irq(irq: pci_irq_vector(dev: pdev, nr: 0), handler: ism_handle_irq, flags: 0, |
592 | name: pci_name(pdev), dev: ism); |
593 | if (ret) |
594 | goto free_client_arr; |
595 | |
596 | ret = register_sba(ism); |
597 | if (ret) |
598 | goto free_irq; |
599 | |
600 | ret = register_ieq(ism); |
601 | if (ret) |
602 | goto unreg_sba; |
603 | |
604 | ret = ism_read_local_gid(ism); |
605 | if (ret) |
606 | goto unreg_ieq; |
607 | |
608 | if (!ism_add_vlan_id(ism, ISM_RESERVED_VLANID)) |
609 | /* hardware is V2 capable */ |
610 | ism_create_system_eid(); |
611 | |
612 | mutex_lock(&ism_dev_list.mutex); |
613 | mutex_lock(&clients_lock); |
614 | for (i = 0; i < max_client; ++i) { |
615 | if (clients[i]) { |
616 | clients[i]->add(ism); |
617 | ism_setup_forwarding(client: clients[i], ism); |
618 | } |
619 | } |
620 | mutex_unlock(lock: &clients_lock); |
621 | |
622 | list_add(new: &ism->list, head: &ism_dev_list.list); |
623 | mutex_unlock(lock: &ism_dev_list.mutex); |
624 | |
625 | query_info(ism); |
626 | return 0; |
627 | |
628 | unreg_ieq: |
629 | unregister_ieq(ism); |
630 | unreg_sba: |
631 | unregister_sba(ism); |
632 | free_irq: |
633 | free_irq(pci_irq_vector(dev: pdev, nr: 0), ism); |
634 | free_client_arr: |
635 | kfree(objp: ism->sba_client_arr); |
636 | free_vectors: |
637 | pci_free_irq_vectors(dev: pdev); |
638 | out: |
639 | return ret; |
640 | } |
641 | |
642 | static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id) |
643 | { |
644 | struct ism_dev *ism; |
645 | int ret; |
646 | |
647 | ism = kzalloc(size: sizeof(*ism), GFP_KERNEL); |
648 | if (!ism) |
649 | return -ENOMEM; |
650 | |
651 | spin_lock_init(&ism->lock); |
652 | dev_set_drvdata(dev: &pdev->dev, data: ism); |
653 | ism->pdev = pdev; |
654 | ism->dev.parent = &pdev->dev; |
655 | device_initialize(dev: &ism->dev); |
656 | dev_set_name(dev: &ism->dev, name: dev_name(dev: &pdev->dev)); |
657 | ret = device_add(dev: &ism->dev); |
658 | if (ret) |
659 | goto err_dev; |
660 | |
661 | ret = pci_enable_device_mem(dev: pdev); |
662 | if (ret) |
663 | goto err; |
664 | |
665 | ret = pci_request_mem_regions(pdev, DRV_NAME); |
666 | if (ret) |
667 | goto err_disable; |
668 | |
669 | ret = dma_set_mask_and_coherent(dev: &pdev->dev, DMA_BIT_MASK(64)); |
670 | if (ret) |
671 | goto err_resource; |
672 | |
673 | dma_set_seg_boundary(dev: &pdev->dev, SZ_1M - 1); |
674 | dma_set_max_seg_size(dev: &pdev->dev, SZ_1M); |
675 | pci_set_master(dev: pdev); |
676 | |
677 | ret = ism_dev_init(ism); |
678 | if (ret) |
679 | goto err_resource; |
680 | |
681 | return 0; |
682 | |
683 | err_resource: |
684 | pci_release_mem_regions(pdev); |
685 | err_disable: |
686 | pci_disable_device(dev: pdev); |
687 | err: |
688 | device_del(dev: &ism->dev); |
689 | err_dev: |
690 | dev_set_drvdata(dev: &pdev->dev, NULL); |
691 | kfree(objp: ism); |
692 | |
693 | return ret; |
694 | } |
695 | |
696 | static void ism_dev_exit(struct ism_dev *ism) |
697 | { |
698 | struct pci_dev *pdev = ism->pdev; |
699 | unsigned long flags; |
700 | int i; |
701 | |
702 | spin_lock_irqsave(&ism->lock, flags); |
703 | for (i = 0; i < max_client; ++i) |
704 | ism->subs[i] = NULL; |
705 | spin_unlock_irqrestore(lock: &ism->lock, flags); |
706 | |
707 | mutex_lock(&ism_dev_list.mutex); |
708 | mutex_lock(&clients_lock); |
709 | for (i = 0; i < max_client; ++i) { |
710 | if (clients[i]) |
711 | clients[i]->remove(ism); |
712 | } |
713 | mutex_unlock(lock: &clients_lock); |
714 | |
715 | if (SYSTEM_EID.serial_number[0] != '0' || |
716 | SYSTEM_EID.type[0] != '0') |
717 | ism_del_vlan_id(ism, ISM_RESERVED_VLANID); |
718 | unregister_ieq(ism); |
719 | unregister_sba(ism); |
720 | free_irq(pci_irq_vector(dev: pdev, nr: 0), ism); |
721 | kfree(objp: ism->sba_client_arr); |
722 | pci_free_irq_vectors(dev: pdev); |
723 | list_del_init(entry: &ism->list); |
724 | mutex_unlock(lock: &ism_dev_list.mutex); |
725 | } |
726 | |
727 | static void ism_remove(struct pci_dev *pdev) |
728 | { |
729 | struct ism_dev *ism = dev_get_drvdata(dev: &pdev->dev); |
730 | |
731 | ism_dev_exit(ism); |
732 | |
733 | pci_release_mem_regions(pdev); |
734 | pci_disable_device(dev: pdev); |
735 | device_del(dev: &ism->dev); |
736 | dev_set_drvdata(dev: &pdev->dev, NULL); |
737 | kfree(objp: ism); |
738 | } |
739 | |
740 | static struct pci_driver ism_driver = { |
741 | .name = DRV_NAME, |
742 | .id_table = ism_device_table, |
743 | .probe = ism_probe, |
744 | .remove = ism_remove, |
745 | }; |
746 | |
747 | static int __init ism_init(void) |
748 | { |
749 | int ret; |
750 | |
751 | ism_debug_info = debug_register("ism" , 2, 1, 16); |
752 | if (!ism_debug_info) |
753 | return -ENODEV; |
754 | |
755 | memset(clients, 0, sizeof(clients)); |
756 | max_client = 0; |
757 | debug_register_view(ism_debug_info, &debug_hex_ascii_view); |
758 | ret = pci_register_driver(&ism_driver); |
759 | if (ret) |
760 | debug_unregister(ism_debug_info); |
761 | |
762 | return ret; |
763 | } |
764 | |
765 | static void __exit ism_exit(void) |
766 | { |
767 | pci_unregister_driver(dev: &ism_driver); |
768 | debug_unregister(ism_debug_info); |
769 | } |
770 | |
771 | module_init(ism_init); |
772 | module_exit(ism_exit); |
773 | |
774 | /*************************** SMC-D Implementation *****************************/ |
775 | |
776 | #if IS_ENABLED(CONFIG_SMC) |
777 | static int smcd_query_rgid(struct smcd_dev *smcd, u64 rgid, u32 vid_valid, |
778 | u32 vid) |
779 | { |
780 | return ism_query_rgid(ism: smcd->priv, rgid, vid_valid, vid); |
781 | } |
782 | |
783 | static int smcd_register_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb, |
784 | struct ism_client *client) |
785 | { |
786 | return ism_register_dmb(smcd->priv, (struct ism_dmb *)dmb, client); |
787 | } |
788 | |
789 | static int smcd_unregister_dmb(struct smcd_dev *smcd, struct smcd_dmb *dmb) |
790 | { |
791 | return ism_unregister_dmb(smcd->priv, (struct ism_dmb *)dmb); |
792 | } |
793 | |
794 | static int smcd_add_vlan_id(struct smcd_dev *smcd, u64 vlan_id) |
795 | { |
796 | return ism_add_vlan_id(ism: smcd->priv, vlan_id); |
797 | } |
798 | |
799 | static int smcd_del_vlan_id(struct smcd_dev *smcd, u64 vlan_id) |
800 | { |
801 | return ism_del_vlan_id(ism: smcd->priv, vlan_id); |
802 | } |
803 | |
804 | static int smcd_set_vlan_required(struct smcd_dev *smcd) |
805 | { |
806 | return ism_cmd_simple(ism: smcd->priv, ISM_SET_VLAN); |
807 | } |
808 | |
809 | static int smcd_reset_vlan_required(struct smcd_dev *smcd) |
810 | { |
811 | return ism_cmd_simple(ism: smcd->priv, ISM_RESET_VLAN); |
812 | } |
813 | |
814 | static int smcd_signal_ieq(struct smcd_dev *smcd, u64 rgid, u32 trigger_irq, |
815 | u32 event_code, u64 info) |
816 | { |
817 | return ism_signal_ieq(ism: smcd->priv, rgid, trigger_irq, event_code, info); |
818 | } |
819 | |
820 | static int smcd_move(struct smcd_dev *smcd, u64 dmb_tok, unsigned int idx, |
821 | bool sf, unsigned int offset, void *data, |
822 | unsigned int size) |
823 | { |
824 | return ism_move(smcd->priv, dmb_tok, idx, sf, offset, data, size); |
825 | } |
826 | |
827 | static int smcd_supports_v2(void) |
828 | { |
829 | return SYSTEM_EID.serial_number[0] != '0' || |
830 | SYSTEM_EID.type[0] != '0'; |
831 | } |
832 | |
833 | static u64 smcd_get_local_gid(struct smcd_dev *smcd) |
834 | { |
835 | return ism_get_local_gid(ism: smcd->priv); |
836 | } |
837 | |
838 | static u16 smcd_get_chid(struct smcd_dev *smcd) |
839 | { |
840 | return ism_get_chid(ism: smcd->priv); |
841 | } |
842 | |
843 | static inline struct device *smcd_get_dev(struct smcd_dev *dev) |
844 | { |
845 | struct ism_dev *ism = dev->priv; |
846 | |
847 | return &ism->dev; |
848 | } |
849 | |
850 | static const struct smcd_ops ism_ops = { |
851 | .query_remote_gid = smcd_query_rgid, |
852 | .register_dmb = smcd_register_dmb, |
853 | .unregister_dmb = smcd_unregister_dmb, |
854 | .add_vlan_id = smcd_add_vlan_id, |
855 | .del_vlan_id = smcd_del_vlan_id, |
856 | .set_vlan_required = smcd_set_vlan_required, |
857 | .reset_vlan_required = smcd_reset_vlan_required, |
858 | .signal_event = smcd_signal_ieq, |
859 | .move_data = smcd_move, |
860 | .supports_v2 = smcd_supports_v2, |
861 | .get_system_eid = ism_get_seid, |
862 | .get_local_gid = smcd_get_local_gid, |
863 | .get_chid = smcd_get_chid, |
864 | .get_dev = smcd_get_dev, |
865 | }; |
866 | |
867 | const struct smcd_ops *ism_get_smcd_ops(void) |
868 | { |
869 | return &ism_ops; |
870 | } |
871 | EXPORT_SYMBOL_GPL(ism_get_smcd_ops); |
872 | #endif |
873 | |