1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/export.h>
26#include <scsi/scsi_host.h>
27#include "sas_internal.h"
28#include "sas_dump.h"
29
30void sas_queue_work(struct sas_ha_struct *ha, struct sas_work *sw)
31{
32 if (!test_bit(SAS_HA_REGISTERED, &ha->state))
33 return;
34
35 if (test_bit(SAS_HA_DRAINING, &ha->state)) {
36
37 if (list_empty(&sw->drain_node))
38 list_add(&sw->drain_node, &ha->defer_q);
39 } else
40 scsi_queue_work(ha->core.shost, &sw->work);
41}
42
43static void sas_queue_event(int event, unsigned long *pending,
44 struct sas_work *work,
45 struct sas_ha_struct *ha)
46{
47 if (!test_and_set_bit(event, pending)) {
48 unsigned long flags;
49
50 spin_lock_irqsave(&ha->lock, flags);
51 sas_queue_work(ha, work);
52 spin_unlock_irqrestore(&ha->lock, flags);
53 }
54}
55
56
57void __sas_drain_work(struct sas_ha_struct *ha)
58{
59 struct workqueue_struct *wq = ha->core.shost->work_q;
60 struct sas_work *sw, *_sw;
61
62 set_bit(SAS_HA_DRAINING, &ha->state);
63
64 spin_lock_irq(&ha->lock);
65 spin_unlock_irq(&ha->lock);
66
67 drain_workqueue(wq);
68
69 spin_lock_irq(&ha->lock);
70 clear_bit(SAS_HA_DRAINING, &ha->state);
71 list_for_each_entry_safe(sw, _sw, &ha->defer_q, drain_node) {
72 list_del_init(&sw->drain_node);
73 sas_queue_work(ha, sw);
74 }
75 spin_unlock_irq(&ha->lock);
76}
77
78int sas_drain_work(struct sas_ha_struct *ha)
79{
80 int err;
81
82 err = mutex_lock_interruptible(&ha->drain_mutex);
83 if (err)
84 return err;
85 if (test_bit(SAS_HA_REGISTERED, &ha->state))
86 __sas_drain_work(ha);
87 mutex_unlock(&ha->drain_mutex);
88
89 return 0;
90}
91EXPORT_SYMBOL_GPL(sas_drain_work);
92
93void sas_disable_revalidation(struct sas_ha_struct *ha)
94{
95 mutex_lock(&ha->disco_mutex);
96 set_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state);
97 mutex_unlock(&ha->disco_mutex);
98}
99
100void sas_enable_revalidation(struct sas_ha_struct *ha)
101{
102 int i;
103
104 mutex_lock(&ha->disco_mutex);
105 clear_bit(SAS_HA_ATA_EH_ACTIVE, &ha->state);
106 for (i = 0; i < ha->num_phys; i++) {
107 struct asd_sas_port *port = ha->sas_port[i];
108 const int ev = DISCE_REVALIDATE_DOMAIN;
109 struct sas_discovery *d = &port->disc;
110
111 if (!test_and_clear_bit(ev, &d->pending))
112 continue;
113
114 sas_queue_event(ev, &d->pending, &d->disc_work[ev].work, ha);
115 }
116 mutex_unlock(&ha->disco_mutex);
117}
118
119static void notify_ha_event(struct sas_ha_struct *sas_ha, enum ha_event event)
120{
121 BUG_ON(event >= HA_NUM_EVENTS);
122
123 sas_queue_event(event, &sas_ha->pending,
124 &sas_ha->ha_events[event].work, sas_ha);
125}
126
127static void notify_port_event(struct asd_sas_phy *phy, enum port_event event)
128{
129 struct sas_ha_struct *ha = phy->ha;
130
131 BUG_ON(event >= PORT_NUM_EVENTS);
132
133 sas_queue_event(event, &phy->port_events_pending,
134 &phy->port_events[event].work, ha);
135}
136
137void sas_notify_phy_event(struct asd_sas_phy *phy, enum phy_event event)
138{
139 struct sas_ha_struct *ha = phy->ha;
140
141 BUG_ON(event >= PHY_NUM_EVENTS);
142
143 sas_queue_event(event, &phy->phy_events_pending,
144 &phy->phy_events[event].work, ha);
145}
146
147int sas_init_events(struct sas_ha_struct *sas_ha)
148{
149 static const work_func_t sas_ha_event_fns[HA_NUM_EVENTS] = {
150 [HAE_RESET] = sas_hae_reset,
151 };
152
153 int i;
154
155 for (i = 0; i < HA_NUM_EVENTS; i++) {
156 INIT_SAS_WORK(&sas_ha->ha_events[i].work, sas_ha_event_fns[i]);
157 sas_ha->ha_events[i].ha = sas_ha;
158 }
159
160 sas_ha->notify_ha_event = notify_ha_event;
161 sas_ha->notify_port_event = notify_port_event;
162 sas_ha->notify_phy_event = sas_notify_phy_event;
163
164 return 0;
165}
166