1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/module.h>
19#include <linux/idr.h>
20
21#include <linux/mic_common.h>
22#include "../common/mic_dev.h"
23#include "../bus/scif_bus.h"
24#include "scif_peer_bus.h"
25#include "scif_main.h"
26#include "scif_map.h"
27
28struct scif_info scif_info = {
29 .mdev = {
30 .minor = MISC_DYNAMIC_MINOR,
31 .name = "scif",
32 .fops = &scif_fops,
33 }
34};
35
36struct scif_dev *scif_dev;
37struct kmem_cache *unaligned_cache;
38static atomic_t g_loopb_cnt;
39
40
41static void scif_intr_bh_handler(struct work_struct *work)
42{
43 struct scif_dev *scifdev =
44 container_of(work, struct scif_dev, intr_bh);
45
46 if (scifdev_self(scifdev))
47 scif_loopb_msg_handler(scifdev, scifdev->qpairs);
48 else
49 scif_nodeqp_intrhandler(scifdev, scifdev->qpairs);
50}
51
52int scif_setup_intr_wq(struct scif_dev *scifdev)
53{
54 if (!scifdev->intr_wq) {
55 snprintf(scifdev->intr_wqname, sizeof(scifdev->intr_wqname),
56 "SCIF INTR %d", scifdev->node);
57 scifdev->intr_wq =
58 alloc_ordered_workqueue(scifdev->intr_wqname, 0);
59 if (!scifdev->intr_wq)
60 return -ENOMEM;
61 INIT_WORK(&scifdev->intr_bh, scif_intr_bh_handler);
62 }
63 return 0;
64}
65
66void scif_destroy_intr_wq(struct scif_dev *scifdev)
67{
68 if (scifdev->intr_wq) {
69 destroy_workqueue(scifdev->intr_wq);
70 scifdev->intr_wq = NULL;
71 }
72}
73
74irqreturn_t scif_intr_handler(int irq, void *data)
75{
76 struct scif_dev *scifdev = data;
77 struct scif_hw_dev *sdev = scifdev->sdev;
78
79 sdev->hw_ops->ack_interrupt(sdev, scifdev->db);
80 queue_work(scifdev->intr_wq, &scifdev->intr_bh);
81 return IRQ_HANDLED;
82}
83
84static void scif_qp_setup_handler(struct work_struct *work)
85{
86 struct scif_dev *scifdev = container_of(work, struct scif_dev,
87 qp_dwork.work);
88 struct scif_hw_dev *sdev = scifdev->sdev;
89 dma_addr_t da = 0;
90 int err;
91
92 if (scif_is_mgmt_node()) {
93 struct mic_bootparam *bp = sdev->dp;
94
95 da = bp->scif_card_dma_addr;
96 scifdev->rdb = bp->h2c_scif_db;
97 } else {
98 struct mic_bootparam __iomem *bp = sdev->rdp;
99
100 da = readq(&bp->scif_host_dma_addr);
101 scifdev->rdb = ioread8(&bp->c2h_scif_db);
102 }
103 if (da) {
104 err = scif_qp_response(da, scifdev);
105 if (err)
106 dev_err(&scifdev->sdev->dev,
107 "scif_qp_response err %d\n", err);
108 } else {
109 schedule_delayed_work(&scifdev->qp_dwork,
110 msecs_to_jiffies(1000));
111 }
112}
113
114static int scif_setup_scifdev(void)
115{
116
117#define MAX_SCIF_NODES 129
118 int i;
119 u8 num_nodes = MAX_SCIF_NODES;
120
121 scif_dev = kcalloc(num_nodes, sizeof(*scif_dev), GFP_KERNEL);
122 if (!scif_dev)
123 return -ENOMEM;
124 for (i = 0; i < num_nodes; i++) {
125 struct scif_dev *scifdev = &scif_dev[i];
126
127 scifdev->node = i;
128 scifdev->exit = OP_IDLE;
129 init_waitqueue_head(&scifdev->disconn_wq);
130 mutex_init(&scifdev->lock);
131 INIT_WORK(&scifdev->peer_add_work, scif_add_peer_device);
132 INIT_DELAYED_WORK(&scifdev->p2p_dwork,
133 scif_poll_qp_state);
134 INIT_DELAYED_WORK(&scifdev->qp_dwork,
135 scif_qp_setup_handler);
136 INIT_LIST_HEAD(&scifdev->p2p);
137 RCU_INIT_POINTER(scifdev->spdev, NULL);
138 }
139 return 0;
140}
141
142static void scif_destroy_scifdev(void)
143{
144 kfree(scif_dev);
145}
146
147static int scif_probe(struct scif_hw_dev *sdev)
148{
149 struct scif_dev *scifdev = &scif_dev[sdev->dnode];
150 int rc;
151
152 dev_set_drvdata(&sdev->dev, sdev);
153 scifdev->sdev = sdev;
154
155 if (1 == atomic_add_return(1, &g_loopb_cnt)) {
156 struct scif_dev *loopb_dev = &scif_dev[sdev->snode];
157
158 loopb_dev->sdev = sdev;
159 rc = scif_setup_loopback_qp(loopb_dev);
160 if (rc)
161 goto exit;
162 }
163
164 rc = scif_setup_intr_wq(scifdev);
165 if (rc)
166 goto destroy_loopb;
167 rc = scif_setup_qp(scifdev);
168 if (rc)
169 goto destroy_intr;
170 scifdev->db = sdev->hw_ops->next_db(sdev);
171 scifdev->cookie = sdev->hw_ops->request_irq(sdev, scif_intr_handler,
172 "SCIF_INTR", scifdev,
173 scifdev->db);
174 if (IS_ERR(scifdev->cookie)) {
175 rc = PTR_ERR(scifdev->cookie);
176 goto free_qp;
177 }
178 if (scif_is_mgmt_node()) {
179 struct mic_bootparam *bp = sdev->dp;
180
181 bp->c2h_scif_db = scifdev->db;
182 bp->scif_host_dma_addr = scifdev->qp_dma_addr;
183 } else {
184 struct mic_bootparam __iomem *bp = sdev->rdp;
185
186 iowrite8(scifdev->db, &bp->h2c_scif_db);
187 writeq(scifdev->qp_dma_addr, &bp->scif_card_dma_addr);
188 }
189 schedule_delayed_work(&scifdev->qp_dwork,
190 msecs_to_jiffies(1000));
191 return rc;
192free_qp:
193 scif_free_qp(scifdev);
194destroy_intr:
195 scif_destroy_intr_wq(scifdev);
196destroy_loopb:
197 if (atomic_dec_and_test(&g_loopb_cnt))
198 scif_destroy_loopback_qp(&scif_dev[sdev->snode]);
199exit:
200 return rc;
201}
202
203void scif_stop(struct scif_dev *scifdev)
204{
205 struct scif_dev *dev;
206 int i;
207
208 for (i = scif_info.maxid; i >= 0; i--) {
209 dev = &scif_dev[i];
210 if (scifdev_self(dev))
211 continue;
212 scif_handle_remove_node(i);
213 }
214}
215
216static void scif_remove(struct scif_hw_dev *sdev)
217{
218 struct scif_dev *scifdev = &scif_dev[sdev->dnode];
219
220 if (scif_is_mgmt_node()) {
221 struct mic_bootparam *bp = sdev->dp;
222
223 bp->c2h_scif_db = -1;
224 bp->scif_host_dma_addr = 0x0;
225 } else {
226 struct mic_bootparam __iomem *bp = sdev->rdp;
227
228 iowrite8(-1, &bp->h2c_scif_db);
229 writeq(0x0, &bp->scif_card_dma_addr);
230 }
231 if (scif_is_mgmt_node()) {
232 scif_disconnect_node(scifdev->node, true);
233 } else {
234 scif_info.card_initiated_exit = true;
235 scif_stop(scifdev);
236 }
237 if (atomic_dec_and_test(&g_loopb_cnt))
238 scif_destroy_loopback_qp(&scif_dev[sdev->snode]);
239 if (scifdev->cookie) {
240 sdev->hw_ops->free_irq(sdev, scifdev->cookie, scifdev);
241 scifdev->cookie = NULL;
242 }
243 scif_destroy_intr_wq(scifdev);
244 cancel_delayed_work(&scifdev->qp_dwork);
245 scif_free_qp(scifdev);
246 scifdev->rdb = -1;
247 scifdev->sdev = NULL;
248}
249
250static struct scif_hw_dev_id id_table[] = {
251 { MIC_SCIF_DEV, SCIF_DEV_ANY_ID },
252 { 0 },
253};
254
255static struct scif_driver scif_driver = {
256 .driver.name = KBUILD_MODNAME,
257 .driver.owner = THIS_MODULE,
258 .id_table = id_table,
259 .probe = scif_probe,
260 .remove = scif_remove,
261};
262
263static int _scif_init(void)
264{
265 int rc;
266
267 mutex_init(&scif_info.eplock);
268 spin_lock_init(&scif_info.rmalock);
269 spin_lock_init(&scif_info.nb_connect_lock);
270 spin_lock_init(&scif_info.port_lock);
271 mutex_init(&scif_info.conflock);
272 mutex_init(&scif_info.connlock);
273 mutex_init(&scif_info.fencelock);
274 INIT_LIST_HEAD(&scif_info.uaccept);
275 INIT_LIST_HEAD(&scif_info.listen);
276 INIT_LIST_HEAD(&scif_info.zombie);
277 INIT_LIST_HEAD(&scif_info.connected);
278 INIT_LIST_HEAD(&scif_info.disconnected);
279 INIT_LIST_HEAD(&scif_info.rma);
280 INIT_LIST_HEAD(&scif_info.rma_tc);
281 INIT_LIST_HEAD(&scif_info.mmu_notif_cleanup);
282 INIT_LIST_HEAD(&scif_info.fence);
283 INIT_LIST_HEAD(&scif_info.nb_connect_list);
284 init_waitqueue_head(&scif_info.exitwq);
285 scif_info.rma_tc_limit = SCIF_RMA_TEMP_CACHE_LIMIT;
286 scif_info.en_msg_log = 0;
287 scif_info.p2p_enable = 1;
288 rc = scif_setup_scifdev();
289 if (rc)
290 goto error;
291 unaligned_cache = kmem_cache_create("Unaligned_DMA",
292 SCIF_KMEM_UNALIGNED_BUF_SIZE,
293 0, SLAB_HWCACHE_ALIGN, NULL);
294 if (!unaligned_cache) {
295 rc = -ENOMEM;
296 goto free_sdev;
297 }
298 INIT_WORK(&scif_info.misc_work, scif_misc_handler);
299 INIT_WORK(&scif_info.mmu_notif_work, scif_mmu_notif_handler);
300 INIT_WORK(&scif_info.conn_work, scif_conn_handler);
301 idr_init(&scif_ports);
302 return 0;
303free_sdev:
304 scif_destroy_scifdev();
305error:
306 return rc;
307}
308
309static void _scif_exit(void)
310{
311 idr_destroy(&scif_ports);
312 kmem_cache_destroy(unaligned_cache);
313 scif_destroy_scifdev();
314}
315
316static int __init scif_init(void)
317{
318 struct miscdevice *mdev = &scif_info.mdev;
319 int rc;
320
321 _scif_init();
322 iova_cache_get();
323 rc = scif_peer_bus_init();
324 if (rc)
325 goto exit;
326 rc = scif_register_driver(&scif_driver);
327 if (rc)
328 goto peer_bus_exit;
329 rc = misc_register(mdev);
330 if (rc)
331 goto unreg_scif;
332 scif_init_debugfs();
333 return 0;
334unreg_scif:
335 scif_unregister_driver(&scif_driver);
336peer_bus_exit:
337 scif_peer_bus_exit();
338exit:
339 _scif_exit();
340 return rc;
341}
342
343static void __exit scif_exit(void)
344{
345 scif_exit_debugfs();
346 misc_deregister(&scif_info.mdev);
347 scif_unregister_driver(&scif_driver);
348 scif_peer_bus_exit();
349 iova_cache_put();
350 _scif_exit();
351}
352
353module_init(scif_init);
354module_exit(scif_exit);
355
356MODULE_DEVICE_TABLE(scif, id_table);
357MODULE_AUTHOR("Intel Corporation");
358MODULE_DESCRIPTION("Intel(R) SCIF driver");
359MODULE_LICENSE("GPL v2");
360