David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | * Serial Attached SCSI (SAS) Event processing |
| 4 | * |
| 5 | * Copyright (C) 2005 Adaptec, Inc. All rights reserved. |
| 6 | * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7 | */ |
| 8 | |
| 9 | #include <linux/export.h> |
| 10 | #include <scsi/scsi_host.h> |
| 11 | #include "sas_internal.h" |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 12 | |
| 13 | int sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw) |
| 14 | { |
| 15 | /* it's added to the defer_q when draining so return succeed */ |
| 16 | int rc = 1; |
| 17 | |
| 18 | if (!test_bit(SAS_HA_REGISTERED, &ha->state)) |
| 19 | return 0; |
| 20 | |
| 21 | if (test_bit(SAS_HA_DRAINING, &ha->state)) { |
| 22 | /* add it to the defer list, if not already pending */ |
| 23 | if (list_empty(&sw->drain_node)) |
| 24 | list_add_tail(&sw->drain_node, &ha->defer_q); |
| 25 | } else |
| 26 | rc = queue_work(ha->event_q, &sw->work); |
| 27 | |
| 28 | return rc; |
| 29 | } |
| 30 | |
| 31 | static int sas_queue_event(int event, struct sas_work *work, |
| 32 | struct sas_ha_struct *ha) |
| 33 | { |
| 34 | unsigned long flags; |
| 35 | int rc; |
| 36 | |
| 37 | spin_lock_irqsave(&ha->lock, flags); |
| 38 | rc = sas_queue_work(ha, work); |
| 39 | spin_unlock_irqrestore(&ha->lock, flags); |
| 40 | |
| 41 | return rc; |
| 42 | } |
| 43 | |
| 44 | |
| 45 | void __sas_drain_work(struct sas_ha_struct *ha) |
| 46 | { |
| 47 | struct sas_work *sw, *_sw; |
| 48 | int ret; |
| 49 | |
| 50 | set_bit(SAS_HA_DRAINING, &ha->state); |
| 51 | /* flush submitters */ |
| 52 | spin_lock_irq(&ha->lock); |
| 53 | spin_unlock_irq(&ha->lock); |
| 54 | |
| 55 | drain_workqueue(ha->event_q); |
| 56 | drain_workqueue(ha->disco_q); |
| 57 | |
| 58 | spin_lock_irq(&ha->lock); |
| 59 | clear_bit(SAS_HA_DRAINING, &ha->state); |
| 60 | list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) { |
| 61 | list_del_init(&sw->drain_node); |
| 62 | ret = sas_queue_work(ha, sw); |
| 63 | if (ret != 1) |
| 64 | sas_free_event(to_asd_sas_event(&sw->work)); |
| 65 | |
| 66 | } |
| 67 | spin_unlock_irq(&ha->lock); |
| 68 | } |
| 69 | |
| 70 | int sas_drain_work(struct sas_ha_struct *ha) |
| 71 | { |
| 72 | int err; |
| 73 | |
| 74 | err = mutex_lock_interruptible(&ha->drain_mutex); |
| 75 | if (err) |
| 76 | return err; |
| 77 | if (test_bit(SAS_HA_REGISTERED, &ha->state)) |
| 78 | __sas_drain_work(ha); |
| 79 | mutex_unlock(&ha->drain_mutex); |
| 80 | |
| 81 | return 0; |
| 82 | } |
| 83 | EXPORT_SYMBOL_GPL(sas_drain_work); |
| 84 | |
| 85 | void sas_disable_revalidation(struct sas_ha_struct *ha) |
| 86 | { |
| 87 | mutex_lock(&ha->disco_mutex); |
| 88 | set_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state); |
| 89 | mutex_unlock(&ha->disco_mutex); |
| 90 | } |
| 91 | |
| 92 | void sas_enable_revalidation(struct sas_ha_struct *ha) |
| 93 | { |
| 94 | int i; |
| 95 | |
| 96 | mutex_lock(&ha->disco_mutex); |
| 97 | clear_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state); |
| 98 | for (i = 0; i < ha->num_phys; i++) { |
| 99 | struct asd_sas_port *port = ha->sas_port[i]; |
| 100 | const int ev = DISCE_REVALIDATE_DOMAIN; |
| 101 | struct sas_discovery *d = &port->disc; |
| 102 | struct asd_sas_phy *sas_phy; |
| 103 | |
| 104 | if (!test_and_clear_bit(ev, &d->pending)) |
| 105 | continue; |
| 106 | |
| 107 | if (list_empty(&port->phy_list)) |
| 108 | continue; |
| 109 | |
| 110 | sas_phy = container_of(port->phy_list.next, struct asd_sas_phy, |
| 111 | port_phy_el); |
| 112 | ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); |
| 113 | } |
| 114 | mutex_unlock(&ha->disco_mutex); |
| 115 | } |
| 116 | |
| 117 | |
| 118 | static void sas_port_event_worker(struct work_struct *work) |
| 119 | { |
| 120 | struct asd_sas_event *ev = to_asd_sas_event(work); |
| 121 | |
| 122 | sas_port_event_fns[ev->event](work); |
| 123 | sas_free_event(ev); |
| 124 | } |
| 125 | |
| 126 | static void sas_phy_event_worker(struct work_struct *work) |
| 127 | { |
| 128 | struct asd_sas_event *ev = to_asd_sas_event(work); |
| 129 | |
| 130 | sas_phy_event_fns[ev->event](work); |
| 131 | sas_free_event(ev); |
| 132 | } |
| 133 | |
| 134 | static int sas_notify_port_event(struct asd_sas_phy *phy, enum port_event event) |
| 135 | { |
| 136 | struct asd_sas_event *ev; |
| 137 | struct sas_ha_struct *ha = phy->ha; |
| 138 | int ret; |
| 139 | |
| 140 | BUG_ON(event >= PORT_NUM_EVENTS); |
| 141 | |
| 142 | ev = sas_alloc_event(phy); |
| 143 | if (!ev) |
| 144 | return -ENOMEM; |
| 145 | |
| 146 | INIT_SAS_EVENT(ev, sas_port_event_worker, phy, event); |
| 147 | |
| 148 | ret = sas_queue_event(event, &ev->work, ha); |
| 149 | if (ret != 1) |
| 150 | sas_free_event(ev); |
| 151 | |
| 152 | return ret; |
| 153 | } |
| 154 | |
| 155 | int sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event) |
| 156 | { |
| 157 | struct asd_sas_event *ev; |
| 158 | struct sas_ha_struct *ha = phy->ha; |
| 159 | int ret; |
| 160 | |
| 161 | BUG_ON(event >= PHY_NUM_EVENTS); |
| 162 | |
| 163 | ev = sas_alloc_event(phy); |
| 164 | if (!ev) |
| 165 | return -ENOMEM; |
| 166 | |
| 167 | INIT_SAS_EVENT(ev, sas_phy_event_worker, phy, event); |
| 168 | |
| 169 | ret = sas_queue_event(event, &ev->work, ha); |
| 170 | if (ret != 1) |
| 171 | sas_free_event(ev); |
| 172 | |
| 173 | return ret; |
| 174 | } |
| 175 | |
| 176 | int sas_init_events(struct sas_ha_struct *sas_ha) |
| 177 | { |
| 178 | sas_ha->notify_port_event = sas_notify_port_event; |
| 179 | sas_ha->notify_phy_event = sas_notify_phy_event; |
| 180 | |
| 181 | return 0; |
| 182 | } |