1
2
3
4
5
6
7
8
9#include <linux/spinlock.h>
10#include <linux/slab.h>
11#include <asm/page.h>
12
13#include "smc.h"
14#include "smc_core.h"
15#include "smc_ism.h"
16#include "smc_pnet.h"
17
18struct smcd_dev_list smcd_dev_list = {
19 .list = LIST_HEAD_INIT(smcd_dev_list.list),
20 .lock = __SPIN_LOCK_UNLOCKED(smcd_dev_list.lock)
21};
22
23
24int smc_ism_cantalk(u64 peer_gid, unsigned short vlan_id, struct smcd_dev *smcd)
25{
26 return smcd->ops->query_remote_gid(smcd, peer_gid, vlan_id ? 1 : 0,
27 vlan_id);
28}
29
30int smc_ism_write(struct smcd_dev *smcd, const struct smc_ism_position *pos,
31 void *data, size_t len)
32{
33 int rc;
34
35 rc = smcd->ops->move_data(smcd, pos->token, pos->index, pos->signal,
36 pos->offset, data, len);
37
38 return rc < 0 ? rc : 0;
39}
40
41
42void smc_ism_set_conn(struct smc_connection *conn)
43{
44 unsigned long flags;
45
46 spin_lock_irqsave(&conn->lgr->smcd->lock, flags);
47 conn->lgr->smcd->conn[conn->rmb_desc->sba_idx] = conn;
48 spin_unlock_irqrestore(&conn->lgr->smcd->lock, flags);
49}
50
51
52void smc_ism_unset_conn(struct smc_connection *conn)
53{
54 unsigned long flags;
55
56 if (!conn->rmb_desc)
57 return;
58
59 spin_lock_irqsave(&conn->lgr->smcd->lock, flags);
60 conn->lgr->smcd->conn[conn->rmb_desc->sba_idx] = NULL;
61 spin_unlock_irqrestore(&conn->lgr->smcd->lock, flags);
62}
63
64
65
66
67
68int smc_ism_get_vlan(struct smcd_dev *smcd, unsigned short vlanid)
69{
70 struct smc_ism_vlanid *new_vlan, *vlan;
71 unsigned long flags;
72 int rc = 0;
73
74 if (!vlanid)
75 return -EINVAL;
76
77
78 new_vlan = kzalloc(sizeof(*new_vlan), GFP_KERNEL);
79 if (!new_vlan)
80 return -ENOMEM;
81 new_vlan->vlanid = vlanid;
82 refcount_set(&new_vlan->refcnt, 1);
83
84
85 spin_lock_irqsave(&smcd->lock, flags);
86 list_for_each_entry(vlan, &smcd->vlan, list) {
87 if (vlan->vlanid == vlanid) {
88 refcount_inc(&vlan->refcnt);
89 kfree(new_vlan);
90 goto out;
91 }
92 }
93
94
95
96
97 if (smcd->ops->add_vlan_id(smcd, vlanid)) {
98 kfree(new_vlan);
99 rc = -EIO;
100 goto out;
101 }
102 list_add_tail(&new_vlan->list, &smcd->vlan);
103out:
104 spin_unlock_irqrestore(&smcd->lock, flags);
105 return rc;
106}
107
108
109
110
111
112int smc_ism_put_vlan(struct smcd_dev *smcd, unsigned short vlanid)
113{
114 struct smc_ism_vlanid *vlan;
115 unsigned long flags;
116 bool found = false;
117 int rc = 0;
118
119 if (!vlanid)
120 return -EINVAL;
121
122 spin_lock_irqsave(&smcd->lock, flags);
123 list_for_each_entry(vlan, &smcd->vlan, list) {
124 if (vlan->vlanid == vlanid) {
125 if (!refcount_dec_and_test(&vlan->refcnt))
126 goto out;
127 found = true;
128 break;
129 }
130 }
131 if (!found) {
132 rc = -ENOENT;
133 goto out;
134 }
135
136
137 if (smcd->ops->del_vlan_id(smcd, vlanid))
138 rc = -EIO;
139 list_del(&vlan->list);
140 kfree(vlan);
141out:
142 spin_unlock_irqrestore(&smcd->lock, flags);
143 return rc;
144}
145
146int smc_ism_unregister_dmb(struct smcd_dev *smcd, struct smc_buf_desc *dmb_desc)
147{
148 struct smcd_dmb dmb;
149 int rc = 0;
150
151 if (!dmb_desc->dma_addr)
152 return rc;
153
154 memset(&dmb, 0, sizeof(dmb));
155 dmb.dmb_tok = dmb_desc->token;
156 dmb.sba_idx = dmb_desc->sba_idx;
157 dmb.cpu_addr = dmb_desc->cpu_addr;
158 dmb.dma_addr = dmb_desc->dma_addr;
159 dmb.dmb_len = dmb_desc->len;
160 rc = smcd->ops->unregister_dmb(smcd, &dmb);
161 if (!rc || rc == ISM_ERROR) {
162 dmb_desc->cpu_addr = NULL;
163 dmb_desc->dma_addr = 0;
164 }
165
166 return rc;
167}
168
169int smc_ism_register_dmb(struct smc_link_group *lgr, int dmb_len,
170 struct smc_buf_desc *dmb_desc)
171{
172 struct smcd_dmb dmb;
173 int rc;
174
175 memset(&dmb, 0, sizeof(dmb));
176 dmb.dmb_len = dmb_len;
177 dmb.sba_idx = dmb_desc->sba_idx;
178 dmb.vlan_id = lgr->vlan_id;
179 dmb.rgid = lgr->peer_gid;
180 rc = lgr->smcd->ops->register_dmb(lgr->smcd, &dmb);
181 if (!rc) {
182 dmb_desc->sba_idx = dmb.sba_idx;
183 dmb_desc->token = dmb.dmb_tok;
184 dmb_desc->cpu_addr = dmb.cpu_addr;
185 dmb_desc->dma_addr = dmb.dma_addr;
186 dmb_desc->len = dmb.dmb_len;
187 }
188 return rc;
189}
190
191struct smc_ism_event_work {
192 struct work_struct work;
193 struct smcd_dev *smcd;
194 struct smcd_event event;
195};
196
197#define ISM_EVENT_REQUEST 0x0001
198#define ISM_EVENT_RESPONSE 0x0002
199#define ISM_EVENT_REQUEST_IR 0x00000001
200#define ISM_EVENT_CODE_SHUTDOWN 0x80
201#define ISM_EVENT_CODE_TESTLINK 0x83
202
203union smcd_sw_event_info {
204 u64 info;
205 struct {
206 u8 uid[SMC_LGR_ID_SIZE];
207 unsigned short vlan_id;
208 u16 code;
209 };
210};
211
212static void smcd_handle_sw_event(struct smc_ism_event_work *wrk)
213{
214 union smcd_sw_event_info ev_info;
215
216 ev_info.info = wrk->event.info;
217 switch (wrk->event.code) {
218 case ISM_EVENT_CODE_SHUTDOWN:
219 smc_smcd_terminate(wrk->smcd, wrk->event.tok, ev_info.vlan_id);
220 break;
221 case ISM_EVENT_CODE_TESTLINK:
222 if (ev_info.code == ISM_EVENT_REQUEST) {
223 ev_info.code = ISM_EVENT_RESPONSE;
224 wrk->smcd->ops->signal_event(wrk->smcd,
225 wrk->event.tok,
226 ISM_EVENT_REQUEST_IR,
227 ISM_EVENT_CODE_TESTLINK,
228 ev_info.info);
229 }
230 break;
231 }
232}
233
234int smc_ism_signal_shutdown(struct smc_link_group *lgr)
235{
236 int rc;
237 union smcd_sw_event_info ev_info;
238
239 if (lgr->peer_shutdown)
240 return 0;
241
242 memcpy(ev_info.uid, lgr->id, SMC_LGR_ID_SIZE);
243 ev_info.vlan_id = lgr->vlan_id;
244 ev_info.code = ISM_EVENT_REQUEST;
245 rc = lgr->smcd->ops->signal_event(lgr->smcd, lgr->peer_gid,
246 ISM_EVENT_REQUEST_IR,
247 ISM_EVENT_CODE_SHUTDOWN,
248 ev_info.info);
249 return rc;
250}
251
252
253static void smc_ism_event_work(struct work_struct *work)
254{
255 struct smc_ism_event_work *wrk =
256 container_of(work, struct smc_ism_event_work, work);
257
258 switch (wrk->event.type) {
259 case ISM_EVENT_GID:
260 smc_smcd_terminate(wrk->smcd, wrk->event.tok, VLAN_VID_MASK);
261 break;
262 case ISM_EVENT_DMB:
263 break;
264 case ISM_EVENT_SWR:
265 smcd_handle_sw_event(wrk);
266 break;
267 }
268 kfree(wrk);
269}
270
271static void smcd_release(struct device *dev)
272{
273 struct smcd_dev *smcd = container_of(dev, struct smcd_dev, dev);
274
275 kfree(smcd->conn);
276 kfree(smcd);
277}
278
279struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
280 const struct smcd_ops *ops, int max_dmbs)
281{
282 struct smcd_dev *smcd;
283
284 smcd = kzalloc(sizeof(*smcd), GFP_KERNEL);
285 if (!smcd)
286 return NULL;
287 smcd->conn = kcalloc(max_dmbs, sizeof(struct smc_connection *),
288 GFP_KERNEL);
289 if (!smcd->conn) {
290 kfree(smcd);
291 return NULL;
292 }
293
294 smcd->dev.parent = parent;
295 smcd->dev.release = smcd_release;
296 device_initialize(&smcd->dev);
297 dev_set_name(&smcd->dev, name);
298 smcd->ops = ops;
299 smc_pnetid_by_dev_port(parent, 0, smcd->pnetid);
300
301 spin_lock_init(&smcd->lock);
302 spin_lock_init(&smcd->lgr_lock);
303 INIT_LIST_HEAD(&smcd->vlan);
304 INIT_LIST_HEAD(&smcd->lgr_list);
305 init_waitqueue_head(&smcd->lgrs_deleted);
306 smcd->event_wq = alloc_ordered_workqueue("ism_evt_wq-%s)",
307 WQ_MEM_RECLAIM, name);
308 if (!smcd->event_wq) {
309 kfree(smcd->conn);
310 kfree(smcd);
311 return NULL;
312 }
313 return smcd;
314}
315EXPORT_SYMBOL_GPL(smcd_alloc_dev);
316
317int smcd_register_dev(struct smcd_dev *smcd)
318{
319 spin_lock(&smcd_dev_list.lock);
320 list_add_tail(&smcd->list, &smcd_dev_list.list);
321 spin_unlock(&smcd_dev_list.lock);
322
323 return device_add(&smcd->dev);
324}
325EXPORT_SYMBOL_GPL(smcd_register_dev);
326
327void smcd_unregister_dev(struct smcd_dev *smcd)
328{
329 spin_lock(&smcd_dev_list.lock);
330 list_del_init(&smcd->list);
331 spin_unlock(&smcd_dev_list.lock);
332 smcd->going_away = 1;
333 smc_smcd_terminate_all(smcd);
334 flush_workqueue(smcd->event_wq);
335 destroy_workqueue(smcd->event_wq);
336
337 device_del(&smcd->dev);
338}
339EXPORT_SYMBOL_GPL(smcd_unregister_dev);
340
341void smcd_free_dev(struct smcd_dev *smcd)
342{
343 put_device(&smcd->dev);
344}
345EXPORT_SYMBOL_GPL(smcd_free_dev);
346
347
348
349
350
351
352
353
354
355
356
357
358void smcd_handle_event(struct smcd_dev *smcd, struct smcd_event *event)
359{
360 struct smc_ism_event_work *wrk;
361
362 if (smcd->going_away)
363 return;
364
365 wrk = kmalloc(sizeof(*wrk), GFP_ATOMIC);
366 if (!wrk)
367 return;
368 INIT_WORK(&wrk->work, smc_ism_event_work);
369 wrk->smcd = smcd;
370 wrk->event = *event;
371 queue_work(smcd->event_wq, &wrk->work);
372}
373EXPORT_SYMBOL_GPL(smcd_handle_event);
374
375
376
377
378
379
380
381
382void smcd_handle_irq(struct smcd_dev *smcd, unsigned int dmbno)
383{
384 struct smc_connection *conn = NULL;
385 unsigned long flags;
386
387 spin_lock_irqsave(&smcd->lock, flags);
388 conn = smcd->conn[dmbno];
389 if (conn && !conn->killed)
390 tasklet_schedule(&conn->rx_tsklet);
391 spin_unlock_irqrestore(&smcd->lock, flags);
392}
393EXPORT_SYMBOL_GPL(smcd_handle_irq);
394