1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Serial Attached SCSI (SAS) Discover process
4 *
5 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
6 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
7 */
8
9#include <linux/scatterlist.h>
10#include <linux/slab.h>
11#include <scsi/scsi_host.h>
12#include <scsi/scsi_eh.h>
13#include "sas_internal.h"
14
15#include <scsi/scsi_transport.h>
16#include <scsi/scsi_transport_sas.h>
17#include <scsi/sas_ata.h>
18#include "scsi_sas_internal.h"
19
20/* ---------- Basic task processing for discovery purposes ---------- */
21
22void sas_init_dev(struct domain_device *dev)
23{
24 switch (dev->dev_type) {
25 case SAS_END_DEVICE:
26 INIT_LIST_HEAD(list: &dev->ssp_dev.eh_list_node);
27 break;
28 case SAS_EDGE_EXPANDER_DEVICE:
29 case SAS_FANOUT_EXPANDER_DEVICE:
30 INIT_LIST_HEAD(list: &dev->ex_dev.children);
31 mutex_init(&dev->ex_dev.cmd_mutex);
32 break;
33 default:
34 break;
35 }
36}
37
38/* ---------- Domain device discovery ---------- */
39
40/**
41 * sas_get_port_device - Discover devices which caused port creation
42 * @port: pointer to struct sas_port of interest
43 *
44 * Devices directly attached to a HA port, have no parent. This is
45 * how we know they are (domain) "root" devices. All other devices
46 * do, and should have their "parent" pointer set appropriately as
47 * soon as a child device is discovered.
48 */
49static int sas_get_port_device(struct asd_sas_port *port)
50{
51 struct asd_sas_phy *phy;
52 struct sas_rphy *rphy;
53 struct domain_device *dev;
54 int rc = -ENODEV;
55
56 dev = sas_alloc_device();
57 if (!dev)
58 return -ENOMEM;
59
60 spin_lock_irq(lock: &port->phy_list_lock);
61 if (list_empty(head: &port->phy_list)) {
62 spin_unlock_irq(lock: &port->phy_list_lock);
63 sas_put_device(dev);
64 return -ENODEV;
65 }
66 phy = container_of(port->phy_list.next, struct asd_sas_phy, port_phy_el);
67 spin_lock(lock: &phy->frame_rcvd_lock);
68 memcpy(dev->frame_rcvd, phy->frame_rcvd, min(sizeof(dev->frame_rcvd),
69 (size_t)phy->frame_rcvd_size));
70 spin_unlock(lock: &phy->frame_rcvd_lock);
71 spin_unlock_irq(lock: &port->phy_list_lock);
72
73 if (dev->frame_rcvd[0] == 0x34 && port->oob_mode == SATA_OOB_MODE) {
74 struct dev_to_host_fis *fis =
75 (struct dev_to_host_fis *) dev->frame_rcvd;
76 if (fis->interrupt_reason == 1 && fis->lbal == 1 &&
77 fis->byte_count_low == 0x69 && fis->byte_count_high == 0x96
78 && (fis->device & ~0x10) == 0)
79 dev->dev_type = SAS_SATA_PM;
80 else
81 dev->dev_type = SAS_SATA_DEV;
82 dev->tproto = SAS_PROTOCOL_SATA;
83 } else if (port->oob_mode == SAS_OOB_MODE) {
84 struct sas_identify_frame *id =
85 (struct sas_identify_frame *) dev->frame_rcvd;
86 dev->dev_type = id->dev_type;
87 dev->iproto = id->initiator_bits;
88 dev->tproto = id->target_bits;
89 } else {
90 /* If the oob mode is OOB_NOT_CONNECTED, the port is
91 * disconnected due to race with PHY down. We cannot
92 * continue to discover this port
93 */
94 sas_put_device(dev);
95 pr_warn("Port %016llx is disconnected when discovering\n",
96 SAS_ADDR(port->attached_sas_addr));
97 return -ENODEV;
98 }
99
100 sas_init_dev(dev);
101
102 dev->port = port;
103 switch (dev->dev_type) {
104 case SAS_SATA_DEV:
105 rc = sas_ata_init(dev);
106 if (rc) {
107 rphy = NULL;
108 break;
109 }
110 fallthrough;
111 case SAS_END_DEVICE:
112 rphy = sas_end_device_alloc(port->port);
113 break;
114 case SAS_EDGE_EXPANDER_DEVICE:
115 rphy = sas_expander_alloc(port->port,
116 SAS_EDGE_EXPANDER_DEVICE);
117 break;
118 case SAS_FANOUT_EXPANDER_DEVICE:
119 rphy = sas_expander_alloc(port->port,
120 SAS_FANOUT_EXPANDER_DEVICE);
121 break;
122 default:
123 pr_warn("ERROR: Unidentified device type %d\n", dev->dev_type);
124 rphy = NULL;
125 break;
126 }
127
128 if (!rphy) {
129 sas_put_device(dev);
130 return rc;
131 }
132
133 rphy->identify.phy_identifier = phy->phy->identify.phy_identifier;
134 memcpy(dev->sas_addr, port->attached_sas_addr, SAS_ADDR_SIZE);
135 sas_fill_in_rphy(dev, rphy);
136 sas_hash_addr(hashed: dev->hashed_sas_addr, sas_addr: dev->sas_addr);
137 port->port_dev = dev;
138 dev->linkrate = port->linkrate;
139 dev->min_linkrate = port->linkrate;
140 dev->max_linkrate = port->linkrate;
141 dev->pathways = port->num_phys;
142 memset(port->disc.fanout_sas_addr, 0, SAS_ADDR_SIZE);
143 memset(port->disc.eeds_a, 0, SAS_ADDR_SIZE);
144 memset(port->disc.eeds_b, 0, SAS_ADDR_SIZE);
145 port->disc.max_level = 0;
146 sas_device_set_phy(dev, port: port->port);
147
148 dev->rphy = rphy;
149 get_device(dev: &dev->rphy->dev);
150
151 if (dev_is_sata(dev) || dev->dev_type == SAS_END_DEVICE)
152 list_add_tail(new: &dev->disco_list_node, head: &port->disco_list);
153 else {
154 spin_lock_irq(lock: &port->dev_list_lock);
155 list_add_tail(new: &dev->dev_list_node, head: &port->dev_list);
156 spin_unlock_irq(lock: &port->dev_list_lock);
157 }
158
159 spin_lock_irq(lock: &port->phy_list_lock);
160 list_for_each_entry(phy, &port->phy_list, port_phy_el)
161 sas_phy_set_target(p: phy, dev);
162 spin_unlock_irq(lock: &port->phy_list_lock);
163
164 return 0;
165}
166
167/* ---------- Discover and Revalidate ---------- */
168
169int sas_notify_lldd_dev_found(struct domain_device *dev)
170{
171 int res = 0;
172 struct sas_ha_struct *sas_ha = dev->port->ha;
173 struct Scsi_Host *shost = sas_ha->shost;
174 struct sas_internal *i = to_sas_internal(shost->transportt);
175
176 if (!i->dft->lldd_dev_found)
177 return 0;
178
179 res = i->dft->lldd_dev_found(dev);
180 if (res) {
181 pr_warn("driver on host %s cannot handle device %016llx, error:%d\n",
182 dev_name(sas_ha->dev),
183 SAS_ADDR(dev->sas_addr), res);
184 return res;
185 }
186 set_bit(nr: SAS_DEV_FOUND, addr: &dev->state);
187 kref_get(kref: &dev->kref);
188 return 0;
189}
190
191
192void sas_notify_lldd_dev_gone(struct domain_device *dev)
193{
194 struct sas_ha_struct *sas_ha = dev->port->ha;
195 struct Scsi_Host *shost = sas_ha->shost;
196 struct sas_internal *i = to_sas_internal(shost->transportt);
197
198 if (!i->dft->lldd_dev_gone)
199 return;
200
201 if (test_and_clear_bit(nr: SAS_DEV_FOUND, addr: &dev->state)) {
202 i->dft->lldd_dev_gone(dev);
203 sas_put_device(dev);
204 }
205}
206
207static void sas_probe_devices(struct asd_sas_port *port)
208{
209 struct domain_device *dev, *n;
210
211 /* devices must be domain members before link recovery and probe */
212 list_for_each_entry(dev, &port->disco_list, disco_list_node) {
213 spin_lock_irq(lock: &port->dev_list_lock);
214 list_add_tail(new: &dev->dev_list_node, head: &port->dev_list);
215 spin_unlock_irq(lock: &port->dev_list_lock);
216 }
217
218 sas_probe_sata(port);
219
220 list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node) {
221 int err;
222
223 err = sas_rphy_add(dev->rphy);
224 if (err)
225 sas_fail_probe(dev, func: __func__, err);
226 else
227 list_del_init(entry: &dev->disco_list_node);
228 }
229}
230
231static void sas_suspend_devices(struct work_struct *work)
232{
233 struct asd_sas_phy *phy;
234 struct domain_device *dev;
235 struct sas_discovery_event *ev = to_sas_discovery_event(work);
236 struct asd_sas_port *port = ev->port;
237 struct Scsi_Host *shost = port->ha->shost;
238 struct sas_internal *si = to_sas_internal(shost->transportt);
239
240 clear_bit(nr: DISCE_SUSPEND, addr: &port->disc.pending);
241
242 sas_suspend_sata(port);
243
244 /* lldd is free to forget the domain_device across the
245 * suspension, we force the issue here to keep the reference
246 * counts aligned
247 */
248 list_for_each_entry(dev, &port->dev_list, dev_list_node)
249 sas_notify_lldd_dev_gone(dev);
250
251 /* we are suspending, so we know events are disabled and
252 * phy_list is not being mutated
253 */
254 list_for_each_entry(phy, &port->phy_list, port_phy_el) {
255 if (si->dft->lldd_port_deformed)
256 si->dft->lldd_port_deformed(phy);
257 phy->suspended = 1;
258 port->suspended = 1;
259 }
260}
261
262static void sas_resume_devices(struct work_struct *work)
263{
264 struct sas_discovery_event *ev = to_sas_discovery_event(work);
265 struct asd_sas_port *port = ev->port;
266
267 clear_bit(nr: DISCE_RESUME, addr: &port->disc.pending);
268
269 sas_resume_sata(port);
270}
271
272/**
273 * sas_discover_end_dev - discover an end device (SSP, etc)
274 * @dev: pointer to domain device of interest
275 *
276 * See comment in sas_discover_sata().
277 */
278static int sas_discover_end_dev(struct domain_device *dev)
279{
280 return sas_notify_lldd_dev_found(dev);
281}
282
283/* ---------- Device registration and unregistration ---------- */
284
285void sas_free_device(struct kref *kref)
286{
287 struct domain_device *dev = container_of(kref, typeof(*dev), kref);
288
289 put_device(dev: &dev->rphy->dev);
290 dev->rphy = NULL;
291
292 if (dev->parent)
293 sas_put_device(dev: dev->parent);
294
295 sas_port_put_phy(phy: dev->phy);
296 dev->phy = NULL;
297
298 /* remove the phys and ports, everything else should be gone */
299 if (dev_is_expander(type: dev->dev_type))
300 kfree(objp: dev->ex_dev.ex_phy);
301
302 if (dev_is_sata(dev) && dev->sata_dev.ap) {
303 ata_sas_tport_delete(ap: dev->sata_dev.ap);
304 kfree(objp: dev->sata_dev.ap);
305 ata_host_put(host: dev->sata_dev.ata_host);
306 dev->sata_dev.ata_host = NULL;
307 dev->sata_dev.ap = NULL;
308 }
309
310 kfree(objp: dev);
311}
312
313static void sas_unregister_common_dev(struct asd_sas_port *port, struct domain_device *dev)
314{
315 struct sas_ha_struct *ha = port->ha;
316
317 sas_notify_lldd_dev_gone(dev);
318 if (!dev->parent)
319 dev->port->port_dev = NULL;
320 else
321 list_del_init(entry: &dev->siblings);
322
323 spin_lock_irq(lock: &port->dev_list_lock);
324 list_del_init(entry: &dev->dev_list_node);
325 if (dev_is_sata(dev))
326 sas_ata_end_eh(ap: dev->sata_dev.ap);
327 spin_unlock_irq(lock: &port->dev_list_lock);
328
329 spin_lock_irq(lock: &ha->lock);
330 if (dev->dev_type == SAS_END_DEVICE &&
331 !list_empty(head: &dev->ssp_dev.eh_list_node)) {
332 list_del_init(entry: &dev->ssp_dev.eh_list_node);
333 ha->eh_active--;
334 }
335 spin_unlock_irq(lock: &ha->lock);
336
337 sas_put_device(dev);
338}
339
340void sas_destruct_devices(struct asd_sas_port *port)
341{
342 struct domain_device *dev, *n;
343
344 list_for_each_entry_safe(dev, n, &port->destroy_list, disco_list_node) {
345 list_del_init(entry: &dev->disco_list_node);
346
347 sas_remove_children(&dev->rphy->dev);
348 sas_rphy_delete(dev->rphy);
349 sas_unregister_common_dev(port, dev);
350 }
351}
352
353static void sas_destruct_ports(struct asd_sas_port *port)
354{
355 struct sas_port *sas_port, *p;
356
357 list_for_each_entry_safe(sas_port, p, &port->sas_port_del_list, del_list) {
358 list_del_init(entry: &sas_port->del_list);
359 sas_port_delete(sas_port);
360 }
361}
362
363static bool sas_abort_cmd(struct request *req, void *data)
364{
365 struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq: req);
366 struct domain_device *dev = data;
367
368 if (dev == cmd_to_domain_dev(cmd))
369 blk_abort_request(req);
370 return true;
371}
372
373static void sas_abort_device_scsi_cmds(struct domain_device *dev)
374{
375 struct sas_ha_struct *sas_ha = dev->port->ha;
376 struct Scsi_Host *shost = sas_ha->shost;
377
378 if (dev_is_expander(type: dev->dev_type))
379 return;
380
381 /*
382 * For removed device with active IOs, the user space applications have
383 * to spend very long time waiting for the timeout. This is not
384 * necessary because a removed device will not return the IOs.
385 * Abort the inflight IOs here so that EH can be quickly kicked in.
386 */
387 blk_mq_tagset_busy_iter(tagset: &shost->tag_set, fn: sas_abort_cmd, priv: dev);
388}
389
390void sas_unregister_dev(struct asd_sas_port *port, struct domain_device *dev)
391{
392 if (!test_bit(SAS_DEV_DESTROY, &dev->state) &&
393 !list_empty(head: &dev->disco_list_node)) {
394 /* this rphy never saw sas_rphy_add */
395 list_del_init(entry: &dev->disco_list_node);
396 sas_rphy_free(dev->rphy);
397 sas_unregister_common_dev(port, dev);
398 return;
399 }
400
401 if (!test_and_set_bit(nr: SAS_DEV_DESTROY, addr: &dev->state)) {
402 if (test_bit(SAS_DEV_GONE, &dev->state))
403 sas_abort_device_scsi_cmds(dev);
404 sas_rphy_unlink(dev->rphy);
405 list_move_tail(list: &dev->disco_list_node, head: &port->destroy_list);
406 }
407}
408
409void sas_unregister_domain_devices(struct asd_sas_port *port, int gone)
410{
411 struct domain_device *dev, *n;
412
413 list_for_each_entry_safe_reverse(dev, n, &port->dev_list, dev_list_node) {
414 if (gone)
415 set_bit(nr: SAS_DEV_GONE, addr: &dev->state);
416 sas_unregister_dev(port, dev);
417 }
418
419 list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node)
420 sas_unregister_dev(port, dev);
421
422 port->port->rphy = NULL;
423
424}
425
426void sas_device_set_phy(struct domain_device *dev, struct sas_port *port)
427{
428 struct sas_ha_struct *ha;
429 struct sas_phy *new_phy;
430
431 if (!dev)
432 return;
433
434 ha = dev->port->ha;
435 new_phy = sas_port_get_phy(port);
436
437 /* pin and record last seen phy */
438 spin_lock_irq(lock: &ha->phy_port_lock);
439 if (new_phy) {
440 sas_port_put_phy(phy: dev->phy);
441 dev->phy = new_phy;
442 }
443 spin_unlock_irq(lock: &ha->phy_port_lock);
444}
445
446/* ---------- Discovery and Revalidation ---------- */
447
448/**
449 * sas_discover_domain - discover the domain
450 * @work: work structure embedded in port domain device.
451 *
452 * NOTE: this process _must_ quit (return) as soon as any connection
453 * errors are encountered. Connection recovery is done elsewhere.
454 * Discover process only interrogates devices in order to discover the
455 * domain.
456 */
457static void sas_discover_domain(struct work_struct *work)
458{
459 struct domain_device *dev;
460 int error = 0;
461 struct sas_discovery_event *ev = to_sas_discovery_event(work);
462 struct asd_sas_port *port = ev->port;
463
464 clear_bit(nr: DISCE_DISCOVER_DOMAIN, addr: &port->disc.pending);
465
466 if (port->port_dev)
467 return;
468
469 error = sas_get_port_device(port);
470 if (error)
471 return;
472 dev = port->port_dev;
473
474 pr_debug("DOING DISCOVERY on port %d, pid:%d\n", port->id,
475 task_pid_nr(current));
476
477 switch (dev->dev_type) {
478 case SAS_END_DEVICE:
479 error = sas_discover_end_dev(dev);
480 break;
481 case SAS_EDGE_EXPANDER_DEVICE:
482 case SAS_FANOUT_EXPANDER_DEVICE:
483 error = sas_discover_root_expander(dev);
484 break;
485 case SAS_SATA_DEV:
486 case SAS_SATA_PM:
487 error = sas_discover_sata(dev);
488 break;
489 default:
490 error = -ENXIO;
491 pr_err("unhandled device %d\n", dev->dev_type);
492 break;
493 }
494
495 if (error) {
496 sas_rphy_free(dev->rphy);
497 list_del_init(entry: &dev->disco_list_node);
498 spin_lock_irq(lock: &port->dev_list_lock);
499 list_del_init(entry: &dev->dev_list_node);
500 spin_unlock_irq(lock: &port->dev_list_lock);
501
502 sas_put_device(dev);
503 port->port_dev = NULL;
504 }
505
506 sas_probe_devices(port);
507
508 pr_debug("DONE DISCOVERY on port %d, pid:%d, result:%d\n", port->id,
509 task_pid_nr(current), error);
510}
511
512static void sas_revalidate_domain(struct work_struct *work)
513{
514 int res = 0;
515 struct sas_discovery_event *ev = to_sas_discovery_event(work);
516 struct asd_sas_port *port = ev->port;
517 struct sas_ha_struct *ha = port->ha;
518 struct domain_device *ddev = port->port_dev;
519
520 /* prevent revalidation from finding sata links in recovery */
521 mutex_lock(&ha->disco_mutex);
522 if (test_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state)) {
523 pr_debug("REVALIDATION DEFERRED on port %d, pid:%d\n",
524 port->id, task_pid_nr(current));
525 goto out;
526 }
527
528 clear_bit(nr: DISCE_REVALIDATE_DOMAIN, addr: &port->disc.pending);
529
530 pr_debug("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id,
531 task_pid_nr(current));
532
533 if (ddev && dev_is_expander(type: ddev->dev_type))
534 res = sas_ex_revalidate_domain(dev: ddev);
535
536 pr_debug("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n",
537 port->id, task_pid_nr(current), res);
538 out:
539 mutex_unlock(lock: &ha->disco_mutex);
540
541 sas_destruct_devices(port);
542 sas_destruct_ports(port);
543 sas_probe_devices(port);
544}
545
546/* ---------- Events ---------- */
547
548static void sas_chain_work(struct sas_ha_struct *ha, struct sas_work *sw)
549{
550 /* chained work is not subject to SA_HA_DRAINING or
551 * SAS_HA_REGISTERED, because it is either submitted in the
552 * workqueue, or known to be submitted from a context that is
553 * not racing against draining
554 */
555 queue_work(wq: ha->disco_q, work: &sw->work);
556}
557
558static void sas_chain_event(int event, unsigned long *pending,
559 struct sas_work *sw,
560 struct sas_ha_struct *ha)
561{
562 if (!test_and_set_bit(nr: event, addr: pending)) {
563 unsigned long flags;
564
565 spin_lock_irqsave(&ha->lock, flags);
566 sas_chain_work(ha, sw);
567 spin_unlock_irqrestore(lock: &ha->lock, flags);
568 }
569}
570
571void sas_discover_event(struct asd_sas_port *port, enum discover_event ev)
572{
573 struct sas_discovery *disc;
574
575 if (!port)
576 return;
577 disc = &port->disc;
578
579 BUG_ON(ev >= DISC_NUM_EVENTS);
580
581 sas_chain_event(event: ev, pending: &disc->pending, sw: &disc->disc_work[ev].work, ha: port->ha);
582}
583
584/**
585 * sas_init_disc - initialize the discovery struct in the port
586 * @disc: port discovery structure
587 * @port: pointer to struct port
588 *
589 * Called when the ports are being initialized.
590 */
591void sas_init_disc(struct sas_discovery *disc, struct asd_sas_port *port)
592{
593 int i;
594
595 static const work_func_t sas_event_fns[DISC_NUM_EVENTS] = {
596 [DISCE_DISCOVER_DOMAIN] = sas_discover_domain,
597 [DISCE_REVALIDATE_DOMAIN] = sas_revalidate_domain,
598 [DISCE_SUSPEND] = sas_suspend_devices,
599 [DISCE_RESUME] = sas_resume_devices,
600 };
601
602 disc->pending = 0;
603 for (i = 0; i < DISC_NUM_EVENTS; i++) {
604 INIT_SAS_WORK(sw: &disc->disc_work[i].work, fn: sas_event_fns[i]);
605 disc->disc_work[i].port = port;
606 }
607}
608

source code of linux/drivers/scsi/libsas/sas_discover.c