1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/export.h>
19#include <linux/kthread.h>
20#include <linux/interrupt.h>
21#include <linux/fs.h>
22#include <linux/jiffies.h>
23#include <linux/slab.h>
24#include <linux/pm_runtime.h>
25
26#include <linux/mei.h>
27
28#include "mei_dev.h"
29#include "hbm.h"
30#include "client.h"
31
32
33
34
35
36
37
38
39
40void mei_irq_compl_handler(struct mei_device *dev, struct list_head *cmpl_list)
41{
42 struct mei_cl_cb *cb, *next;
43 struct mei_cl *cl;
44
45 list_for_each_entry_safe(cb, next, cmpl_list, list) {
46 cl = cb->cl;
47 list_del_init(&cb->list);
48
49 dev_dbg(dev->dev, "completing call back.\n");
50 mei_cl_complete(cl, cb);
51 }
52}
53EXPORT_SYMBOL_GPL(mei_irq_compl_handler);
54
55
56
57
58
59
60
61
62
63static inline int mei_cl_hbm_equal(struct mei_cl *cl,
64 struct mei_msg_hdr *mei_hdr)
65{
66 return mei_cl_host_addr(cl) == mei_hdr->host_addr &&
67 mei_cl_me_id(cl) == mei_hdr->me_addr;
68}
69
70
71
72
73
74
75
76static void mei_irq_discard_msg(struct mei_device *dev, struct mei_msg_hdr *hdr)
77{
78
79
80
81
82 mei_read_slots(dev, dev->rd_msg_buf, hdr->length);
83 dev_dbg(dev->dev, "discarding message " MEI_HDR_FMT "\n",
84 MEI_HDR_PRM(hdr));
85}
86
87
88
89
90
91
92
93
94
95
96static int mei_cl_irq_read_msg(struct mei_cl *cl,
97 struct mei_msg_hdr *mei_hdr,
98 struct list_head *cmpl_list)
99{
100 struct mei_device *dev = cl->dev;
101 struct mei_cl_cb *cb;
102 size_t buf_sz;
103
104 cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list);
105 if (!cb) {
106 if (!mei_cl_is_fixed_address(cl)) {
107 cl_err(dev, cl, "pending read cb not found\n");
108 goto discard;
109 }
110 cb = mei_cl_alloc_cb(cl, mei_cl_mtu(cl), MEI_FOP_READ, cl->fp);
111 if (!cb)
112 goto discard;
113 list_add_tail(&cb->list, &cl->rd_pending);
114 }
115
116 if (!mei_cl_is_connected(cl)) {
117 cl_dbg(dev, cl, "not connected\n");
118 cb->status = -ENODEV;
119 goto discard;
120 }
121
122 buf_sz = mei_hdr->length + cb->buf_idx;
123
124 if (buf_sz < cb->buf_idx) {
125 cl_err(dev, cl, "message is too big len %d idx %zu\n",
126 mei_hdr->length, cb->buf_idx);
127 cb->status = -EMSGSIZE;
128 goto discard;
129 }
130
131 if (cb->buf.size < buf_sz) {
132 cl_dbg(dev, cl, "message overflow. size %zu len %d idx %zu\n",
133 cb->buf.size, mei_hdr->length, cb->buf_idx);
134 cb->status = -EMSGSIZE;
135 goto discard;
136 }
137
138 mei_read_slots(dev, cb->buf.data + cb->buf_idx, mei_hdr->length);
139
140 cb->buf_idx += mei_hdr->length;
141
142 if (mei_hdr->msg_complete) {
143 cl_dbg(dev, cl, "completed read length = %zu\n", cb->buf_idx);
144 list_move_tail(&cb->list, cmpl_list);
145 } else {
146 pm_runtime_mark_last_busy(dev->dev);
147 pm_request_autosuspend(dev->dev);
148 }
149
150 return 0;
151
152discard:
153 if (cb)
154 list_move_tail(&cb->list, cmpl_list);
155 mei_irq_discard_msg(dev, mei_hdr);
156 return 0;
157}
158
159
160
161
162
163
164
165
166
167
168static int mei_cl_irq_disconnect_rsp(struct mei_cl *cl, struct mei_cl_cb *cb,
169 struct list_head *cmpl_list)
170{
171 struct mei_device *dev = cl->dev;
172 u32 msg_slots;
173 int slots;
174 int ret;
175
176 slots = mei_hbuf_empty_slots(dev);
177 msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_response));
178
179 if (slots < msg_slots)
180 return -EMSGSIZE;
181
182 ret = mei_hbm_cl_disconnect_rsp(dev, cl);
183 list_move_tail(&cb->list, cmpl_list);
184
185 return ret;
186}
187
188
189
190
191
192
193
194
195
196
197
198static int mei_cl_irq_read(struct mei_cl *cl, struct mei_cl_cb *cb,
199 struct list_head *cmpl_list)
200{
201 struct mei_device *dev = cl->dev;
202 u32 msg_slots;
203 int slots;
204 int ret;
205
206 if (!list_empty(&cl->rd_pending))
207 return 0;
208
209 msg_slots = mei_data2slots(sizeof(struct hbm_flow_control));
210 slots = mei_hbuf_empty_slots(dev);
211
212 if (slots < msg_slots)
213 return -EMSGSIZE;
214
215 ret = mei_hbm_cl_flow_control_req(dev, cl);
216 if (ret) {
217 cl->status = ret;
218 cb->buf_idx = 0;
219 list_move_tail(&cb->list, cmpl_list);
220 return ret;
221 }
222
223 list_move_tail(&cb->list, &cl->rd_pending);
224
225 return 0;
226}
227
228static inline bool hdr_is_hbm(struct mei_msg_hdr *mei_hdr)
229{
230 return mei_hdr->host_addr == 0 && mei_hdr->me_addr == 0;
231}
232
233static inline bool hdr_is_fixed(struct mei_msg_hdr *mei_hdr)
234{
235 return mei_hdr->host_addr == 0 && mei_hdr->me_addr != 0;
236}
237
238static inline int hdr_is_valid(u32 msg_hdr)
239{
240 struct mei_msg_hdr *mei_hdr;
241
242 mei_hdr = (struct mei_msg_hdr *)&msg_hdr;
243 if (!msg_hdr || mei_hdr->reserved)
244 return -EBADMSG;
245
246 return 0;
247}
248
249
250
251
252
253
254
255
256
257
258
259int mei_irq_read_handler(struct mei_device *dev,
260 struct list_head *cmpl_list, s32 *slots)
261{
262 struct mei_msg_hdr *mei_hdr;
263 struct mei_cl *cl;
264 int ret;
265
266 if (!dev->rd_msg_hdr) {
267 dev->rd_msg_hdr = mei_read_hdr(dev);
268 (*slots)--;
269 dev_dbg(dev->dev, "slots =%08x.\n", *slots);
270
271 ret = hdr_is_valid(dev->rd_msg_hdr);
272 if (ret) {
273 dev_err(dev->dev, "corrupted message header 0x%08X\n",
274 dev->rd_msg_hdr);
275 goto end;
276 }
277 }
278
279 mei_hdr = (struct mei_msg_hdr *)&dev->rd_msg_hdr;
280 dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
281
282 if (mei_slots2data(*slots) < mei_hdr->length) {
283 dev_err(dev->dev, "less data available than length=%08x.\n",
284 *slots);
285
286 ret = -ENODATA;
287 goto end;
288 }
289
290
291 if (hdr_is_hbm(mei_hdr)) {
292 ret = mei_hbm_dispatch(dev, mei_hdr);
293 if (ret) {
294 dev_dbg(dev->dev, "mei_hbm_dispatch failed ret = %d\n",
295 ret);
296 goto end;
297 }
298 goto reset_slots;
299 }
300
301
302 list_for_each_entry(cl, &dev->file_list, link) {
303 if (mei_cl_hbm_equal(cl, mei_hdr)) {
304 cl_dbg(dev, cl, "got a message\n");
305 break;
306 }
307 }
308
309
310 if (&cl->link == &dev->file_list) {
311
312
313
314 if (hdr_is_fixed(mei_hdr)) {
315 mei_irq_discard_msg(dev, mei_hdr);
316 ret = 0;
317 goto reset_slots;
318 }
319 dev_err(dev->dev, "no destination client found 0x%08X\n",
320 dev->rd_msg_hdr);
321 ret = -EBADMSG;
322 goto end;
323 }
324
325 ret = mei_cl_irq_read_msg(cl, mei_hdr, cmpl_list);
326
327
328reset_slots:
329
330 *slots = mei_count_full_read_slots(dev);
331 dev->rd_msg_hdr = 0;
332
333 if (*slots == -EOVERFLOW) {
334
335 dev_err(dev->dev, "resetting due to slots overflow.\n");
336
337 ret = -ERANGE;
338 goto end;
339 }
340end:
341 return ret;
342}
343EXPORT_SYMBOL_GPL(mei_irq_read_handler);
344
345
346
347
348
349
350
351
352
353
354
355int mei_irq_write_handler(struct mei_device *dev, struct list_head *cmpl_list)
356{
357
358 struct mei_cl *cl;
359 struct mei_cl_cb *cb, *next;
360 s32 slots;
361 int ret;
362
363
364 if (!mei_hbuf_acquire(dev))
365 return 0;
366
367 slots = mei_hbuf_empty_slots(dev);
368 if (slots <= 0)
369 return -EMSGSIZE;
370
371
372 dev_dbg(dev->dev, "complete all waiting for write cb.\n");
373
374 list_for_each_entry_safe(cb, next, &dev->write_waiting_list, list) {
375 cl = cb->cl;
376
377 cl->status = 0;
378 cl_dbg(dev, cl, "MEI WRITE COMPLETE\n");
379 cl->writing_state = MEI_WRITE_COMPLETE;
380 list_move_tail(&cb->list, cmpl_list);
381 }
382
383
384 dev_dbg(dev->dev, "complete control write list cb.\n");
385 list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list, list) {
386 cl = cb->cl;
387 switch (cb->fop_type) {
388 case MEI_FOP_DISCONNECT:
389
390 ret = mei_cl_irq_disconnect(cl, cb, cmpl_list);
391 if (ret)
392 return ret;
393
394 break;
395 case MEI_FOP_READ:
396
397 ret = mei_cl_irq_read(cl, cb, cmpl_list);
398 if (ret)
399 return ret;
400
401 break;
402 case MEI_FOP_CONNECT:
403
404 ret = mei_cl_irq_connect(cl, cb, cmpl_list);
405 if (ret)
406 return ret;
407
408 break;
409 case MEI_FOP_DISCONNECT_RSP:
410
411 ret = mei_cl_irq_disconnect_rsp(cl, cb, cmpl_list);
412 if (ret)
413 return ret;
414 break;
415
416 case MEI_FOP_NOTIFY_START:
417 case MEI_FOP_NOTIFY_STOP:
418 ret = mei_cl_irq_notify(cl, cb, cmpl_list);
419 if (ret)
420 return ret;
421 break;
422 default:
423 BUG();
424 }
425
426 }
427
428 dev_dbg(dev->dev, "complete write list cb.\n");
429 list_for_each_entry_safe(cb, next, &dev->write_list, list) {
430 cl = cb->cl;
431 ret = mei_cl_irq_write(cl, cb, cmpl_list);
432 if (ret)
433 return ret;
434 }
435 return 0;
436}
437EXPORT_SYMBOL_GPL(mei_irq_write_handler);
438
439
440
441
442
443
444
445static void mei_connect_timeout(struct mei_cl *cl)
446{
447 struct mei_device *dev = cl->dev;
448
449 if (cl->state == MEI_FILE_CONNECTING) {
450 if (dev->hbm_f_dot_supported) {
451 cl->state = MEI_FILE_DISCONNECT_REQUIRED;
452 wake_up(&cl->wait);
453 return;
454 }
455 }
456 mei_reset(dev);
457}
458
459#define MEI_STALL_TIMER_FREQ (2 * HZ)
460
461
462
463
464
465
466
467void mei_schedule_stall_timer(struct mei_device *dev)
468{
469 schedule_delayed_work(&dev->timer_work, MEI_STALL_TIMER_FREQ);
470}
471
472
473
474
475
476
477
478void mei_timer(struct work_struct *work)
479{
480 struct mei_cl *cl;
481 struct mei_device *dev = container_of(work,
482 struct mei_device, timer_work.work);
483 bool reschedule_timer = false;
484
485 mutex_lock(&dev->device_lock);
486
487
488 if (dev->dev_state == MEI_DEV_INIT_CLIENTS &&
489 dev->hbm_state != MEI_HBM_IDLE) {
490
491 if (dev->init_clients_timer) {
492 if (--dev->init_clients_timer == 0) {
493 dev_err(dev->dev, "timer: init clients timeout hbm_state = %d.\n",
494 dev->hbm_state);
495 mei_reset(dev);
496 goto out;
497 }
498 reschedule_timer = true;
499 }
500 }
501
502 if (dev->dev_state != MEI_DEV_ENABLED)
503 goto out;
504
505
506 list_for_each_entry(cl, &dev->file_list, link) {
507 if (cl->timer_count) {
508 if (--cl->timer_count == 0) {
509 dev_err(dev->dev, "timer: connect/disconnect timeout.\n");
510 mei_connect_timeout(cl);
511 goto out;
512 }
513 reschedule_timer = true;
514 }
515 }
516
517out:
518 if (dev->dev_state != MEI_DEV_DISABLED && reschedule_timer)
519 mei_schedule_stall_timer(dev);
520
521 mutex_unlock(&dev->device_lock);
522}
523