1
2
3
4
5
6#include <linux/init.h>
7#include <linux/module.h>
8#include <linux/timer.h>
9#include <linux/sched.h>
10#include <linux/netdevice.h>
11#include <linux/errno.h>
12#include "ozconfig.h"
13#include "ozprotocol.h"
14#include "ozeltbuf.h"
15#include "ozpd.h"
16#include "ozproto.h"
17#include "oztrace.h"
18#include "ozevent.h"
19#include "ozcdev.h"
20#include "ozusbsvc.h"
21#include <asm/unaligned.h>
22#include <linux/uaccess.h>
23#include <net/psnap.h>
24
25
26#define OZ_MAX_TX_POOL_SIZE 6
27
28
29static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd);
30static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f);
31static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f);
32static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f);
33static int oz_send_isoc_frame(struct oz_pd *pd);
34static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f);
35static void oz_isoc_stream_free(struct oz_isoc_stream *st);
36static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data);
37static void oz_isoc_destructor(struct sk_buff *skb);
38static int oz_def_app_init(void);
39static void oz_def_app_term(void);
40static int oz_def_app_start(struct oz_pd *pd, int resume);
41static void oz_def_app_stop(struct oz_pd *pd, int pause);
42static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt);
43
44
45
46static atomic_t g_submitted_isoc = ATOMIC_INIT(0);
47
48
49static const struct oz_app_if g_app_if[OZ_APPID_MAX] = {
50 {oz_usb_init,
51 oz_usb_term,
52 oz_usb_start,
53 oz_usb_stop,
54 oz_usb_rx,
55 oz_usb_heartbeat,
56 oz_usb_farewell,
57 OZ_APPID_USB},
58
59 {oz_def_app_init,
60 oz_def_app_term,
61 oz_def_app_start,
62 oz_def_app_stop,
63 oz_def_app_rx,
64 NULL,
65 NULL,
66 OZ_APPID_UNUSED1},
67
68 {oz_def_app_init,
69 oz_def_app_term,
70 oz_def_app_start,
71 oz_def_app_stop,
72 oz_def_app_rx,
73 NULL,
74 NULL,
75 OZ_APPID_UNUSED2},
76
77 {oz_cdev_init,
78 oz_cdev_term,
79 oz_cdev_start,
80 oz_cdev_stop,
81 oz_cdev_rx,
82 NULL,
83 NULL,
84 OZ_APPID_SERIAL},
85};
86
87
88
89static int oz_def_app_init(void)
90{
91 return 0;
92}
93
94
95
96static void oz_def_app_term(void)
97{
98}
99
100
101
102static int oz_def_app_start(struct oz_pd *pd, int resume)
103{
104 return 0;
105}
106
107
108
109static void oz_def_app_stop(struct oz_pd *pd, int pause)
110{
111}
112
113
114
115static void oz_def_app_rx(struct oz_pd *pd, struct oz_elt *elt)
116{
117}
118
119
120
121void oz_pd_set_state(struct oz_pd *pd, unsigned state)
122{
123 pd->state = state;
124 oz_event_log(OZ_EVT_PD_STATE, 0, 0, NULL, state);
125#ifdef WANT_TRACE
126 switch (state) {
127 case OZ_PD_S_IDLE:
128 oz_trace("PD State: OZ_PD_S_IDLE\n");
129 break;
130 case OZ_PD_S_CONNECTED:
131 oz_trace("PD State: OZ_PD_S_CONNECTED\n");
132 break;
133 case OZ_PD_S_STOPPED:
134 oz_trace("PD State: OZ_PD_S_STOPPED\n");
135 break;
136 case OZ_PD_S_SLEEP:
137 oz_trace("PD State: OZ_PD_S_SLEEP\n");
138 break;
139 }
140#endif
141}
142
143
144
145void oz_pd_get(struct oz_pd *pd)
146{
147 atomic_inc(&pd->ref_count);
148}
149
150
151
152void oz_pd_put(struct oz_pd *pd)
153{
154 if (atomic_dec_and_test(&pd->ref_count))
155 oz_pd_destroy(pd);
156}
157
158
159
160struct oz_pd *oz_pd_alloc(const u8 *mac_addr)
161{
162 struct oz_pd *pd = kzalloc(sizeof(struct oz_pd), GFP_ATOMIC);
163 if (pd) {
164 int i;
165 atomic_set(&pd->ref_count, 2);
166 for (i = 0; i < OZ_APPID_MAX; i++)
167 spin_lock_init(&pd->app_lock[i]);
168 pd->last_rx_pkt_num = 0xffffffff;
169 oz_pd_set_state(pd, OZ_PD_S_IDLE);
170 pd->max_tx_size = OZ_MAX_TX_SIZE;
171 memcpy(pd->mac_addr, mac_addr, ETH_ALEN);
172 if (0 != oz_elt_buf_init(&pd->elt_buff)) {
173 kfree(pd);
174 pd = NULL;
175 }
176 spin_lock_init(&pd->tx_frame_lock);
177 INIT_LIST_HEAD(&pd->tx_queue);
178 INIT_LIST_HEAD(&pd->farewell_list);
179 pd->last_sent_frame = &pd->tx_queue;
180 spin_lock_init(&pd->stream_lock);
181 INIT_LIST_HEAD(&pd->stream_list);
182 }
183 return pd;
184}
185
186
187
188void oz_pd_destroy(struct oz_pd *pd)
189{
190 struct list_head *e;
191 struct oz_tx_frame *f;
192 struct oz_isoc_stream *st;
193 struct oz_farewell *fwell;
194 oz_trace("Destroying PD\n");
195
196
197 e = pd->stream_list.next;
198 while (e != &pd->stream_list) {
199 st = container_of(e, struct oz_isoc_stream, link);
200 e = e->next;
201 oz_isoc_stream_free(st);
202 }
203
204
205 e = pd->tx_queue.next;
206 while (e != &pd->tx_queue) {
207 f = container_of(e, struct oz_tx_frame, link);
208 e = e->next;
209 if (f->skb != NULL)
210 kfree_skb(f->skb);
211 oz_retire_frame(pd, f);
212 }
213 oz_elt_buf_term(&pd->elt_buff);
214
215
216 e = pd->farewell_list.next;
217 while (e != &pd->farewell_list) {
218 fwell = container_of(e, struct oz_farewell, link);
219 e = e->next;
220 kfree(fwell);
221 }
222
223
224 while (pd->tx_pool) {
225 e = pd->tx_pool;
226 pd->tx_pool = e->next;
227 kfree(container_of(e, struct oz_tx_frame, link));
228 }
229 if (pd->net_dev)
230 dev_put(pd->net_dev);
231 kfree(pd);
232}
233
234
235
236int oz_services_start(struct oz_pd *pd, u16 apps, int resume)
237{
238 const struct oz_app_if *ai;
239 int rc = 0;
240 oz_trace("oz_services_start(0x%x) resume(%d)\n", apps, resume);
241 for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
242 if (apps & (1<<ai->app_id)) {
243 if (ai->start(pd, resume)) {
244 rc = -1;
245 oz_trace("Unabled to start service %d\n",
246 ai->app_id);
247 break;
248 }
249 oz_polling_lock_bh();
250 pd->total_apps |= (1<<ai->app_id);
251 if (resume)
252 pd->paused_apps &= ~(1<<ai->app_id);
253 oz_polling_unlock_bh();
254 }
255 }
256 return rc;
257}
258
259
260
261void oz_services_stop(struct oz_pd *pd, u16 apps, int pause)
262{
263 const struct oz_app_if *ai;
264 oz_trace("oz_stop_services(0x%x) pause(%d)\n", apps, pause);
265 for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
266 if (apps & (1<<ai->app_id)) {
267 oz_polling_lock_bh();
268 if (pause) {
269 pd->paused_apps |= (1<<ai->app_id);
270 } else {
271 pd->total_apps &= ~(1<<ai->app_id);
272 pd->paused_apps &= ~(1<<ai->app_id);
273 }
274 oz_polling_unlock_bh();
275 ai->stop(pd, pause);
276 }
277 }
278}
279
280
281
282void oz_pd_heartbeat(struct oz_pd *pd, u16 apps)
283{
284 const struct oz_app_if *ai;
285 int more = 0;
286 for (ai = g_app_if; ai < &g_app_if[OZ_APPID_MAX]; ai++) {
287 if (ai->heartbeat && (apps & (1<<ai->app_id))) {
288 if (ai->heartbeat(pd))
289 more = 1;
290 }
291 }
292 if (more)
293 oz_pd_request_heartbeat(pd);
294 if (pd->mode & OZ_F_ISOC_ANYTIME) {
295 int count = 8;
296 while (count-- && (oz_send_isoc_frame(pd) >= 0))
297 ;
298 }
299}
300
301
302
303void oz_pd_stop(struct oz_pd *pd)
304{
305 u16 stop_apps = 0;
306 oz_trace("oz_pd_stop() State = 0x%x\n", pd->state);
307 oz_pd_indicate_farewells(pd);
308 oz_polling_lock_bh();
309 stop_apps = pd->total_apps;
310 pd->total_apps = 0;
311 pd->paused_apps = 0;
312 oz_polling_unlock_bh();
313 oz_services_stop(pd, stop_apps, 0);
314 oz_polling_lock_bh();
315 oz_pd_set_state(pd, OZ_PD_S_STOPPED);
316
317 list_del(&pd->link);
318 oz_polling_unlock_bh();
319 oz_trace("pd ref count = %d\n", atomic_read(&pd->ref_count));
320 oz_timer_delete(pd, 0);
321 oz_pd_put(pd);
322}
323
324
325
326int oz_pd_sleep(struct oz_pd *pd)
327{
328 int do_stop = 0;
329 u16 stop_apps = 0;
330 oz_polling_lock_bh();
331 if (pd->state & (OZ_PD_S_SLEEP | OZ_PD_S_STOPPED)) {
332 oz_polling_unlock_bh();
333 return 0;
334 }
335 if (pd->keep_alive_j && pd->session_id) {
336 oz_pd_set_state(pd, OZ_PD_S_SLEEP);
337 pd->pulse_time_j = jiffies + pd->keep_alive_j;
338 oz_trace("Sleep Now %lu until %lu\n",
339 jiffies, pd->pulse_time_j);
340 } else {
341 do_stop = 1;
342 }
343 stop_apps = pd->total_apps;
344 oz_polling_unlock_bh();
345 if (do_stop) {
346 oz_pd_stop(pd);
347 } else {
348 oz_services_stop(pd, stop_apps, 1);
349 oz_timer_add(pd, OZ_TIMER_STOP, jiffies + pd->keep_alive_j, 1);
350 }
351 return do_stop;
352}
353
354
355
356static struct oz_tx_frame *oz_tx_frame_alloc(struct oz_pd *pd)
357{
358 struct oz_tx_frame *f = NULL;
359 spin_lock_bh(&pd->tx_frame_lock);
360 if (pd->tx_pool) {
361 f = container_of(pd->tx_pool, struct oz_tx_frame, link);
362 pd->tx_pool = pd->tx_pool->next;
363 pd->tx_pool_count--;
364 }
365 spin_unlock_bh(&pd->tx_frame_lock);
366 if (f == NULL)
367 f = kmalloc(sizeof(struct oz_tx_frame), GFP_ATOMIC);
368 if (f) {
369 f->total_size = sizeof(struct oz_hdr);
370 INIT_LIST_HEAD(&f->link);
371 INIT_LIST_HEAD(&f->elt_list);
372 }
373 return f;
374}
375
376
377
378static void oz_tx_isoc_free(struct oz_pd *pd, struct oz_tx_frame *f)
379{
380 pd->nb_queued_isoc_frames--;
381 list_del_init(&f->link);
382 if (pd->tx_pool_count < OZ_MAX_TX_POOL_SIZE) {
383 f->link.next = pd->tx_pool;
384 pd->tx_pool = &f->link;
385 pd->tx_pool_count++;
386 } else {
387 kfree(f);
388 }
389 oz_trace2(OZ_TRACE_TX_FRAMES, "Releasing ISOC Frame isoc_nb= %d\n",
390 pd->nb_queued_isoc_frames);
391}
392
393
394
395static void oz_tx_frame_free(struct oz_pd *pd, struct oz_tx_frame *f)
396{
397 spin_lock_bh(&pd->tx_frame_lock);
398 if (pd->tx_pool_count < OZ_MAX_TX_POOL_SIZE) {
399 f->link.next = pd->tx_pool;
400 pd->tx_pool = &f->link;
401 pd->tx_pool_count++;
402 f = NULL;
403 }
404 spin_unlock_bh(&pd->tx_frame_lock);
405 kfree(f);
406}
407
408
409
410static void oz_set_more_bit(struct sk_buff *skb)
411{
412 struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
413 oz_hdr->control |= OZ_F_MORE_DATA;
414}
415
416
417
418static void oz_set_last_pkt_nb(struct oz_pd *pd, struct sk_buff *skb)
419{
420 struct oz_hdr *oz_hdr = (struct oz_hdr *)skb_network_header(skb);
421 oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
422}
423
424
425
426int oz_prepare_frame(struct oz_pd *pd, int empty)
427{
428 struct oz_tx_frame *f;
429 if ((pd->mode & OZ_MODE_MASK) != OZ_MODE_TRIGGERED)
430 return -1;
431 if (pd->nb_queued_frames >= OZ_MAX_QUEUED_FRAMES)
432 return -1;
433 if (!empty && !oz_are_elts_available(&pd->elt_buff))
434 return -1;
435 f = oz_tx_frame_alloc(pd);
436 if (f == NULL)
437 return -1;
438 f->skb = NULL;
439 f->hdr.control =
440 (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ACK_REQUESTED;
441 ++pd->last_tx_pkt_num;
442 put_unaligned(cpu_to_le32(pd->last_tx_pkt_num), &f->hdr.pkt_num);
443 if (empty == 0) {
444 oz_select_elts_for_tx(&pd->elt_buff, 0, &f->total_size,
445 pd->max_tx_size, &f->elt_list);
446 }
447 spin_lock(&pd->tx_frame_lock);
448 list_add_tail(&f->link, &pd->tx_queue);
449 pd->nb_queued_frames++;
450 spin_unlock(&pd->tx_frame_lock);
451 return 0;
452}
453
454
455
456static struct sk_buff *oz_build_frame(struct oz_pd *pd, struct oz_tx_frame *f)
457{
458 struct sk_buff *skb;
459 struct net_device *dev = pd->net_dev;
460 struct oz_hdr *oz_hdr;
461 struct oz_elt *elt;
462 struct list_head *e;
463
464
465
466 skb = alloc_skb(f->total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
467 if (skb == NULL)
468 return NULL;
469
470
471 skb_reserve(skb, LL_RESERVED_SPACE(dev));
472 skb_reset_network_header(skb);
473 skb->dev = dev;
474 skb->protocol = htons(OZ_ETHERTYPE);
475 if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
476 dev->dev_addr, skb->len) < 0)
477 goto fail;
478
479
480 oz_hdr = (struct oz_hdr *)skb_put(skb, f->total_size);
481 f->hdr.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
482 memcpy(oz_hdr, &f->hdr, sizeof(struct oz_hdr));
483
484
485 elt = (struct oz_elt *)(oz_hdr+1);
486 for (e = f->elt_list.next; e != &f->elt_list; e = e->next) {
487 struct oz_elt_info *ei;
488 ei = container_of(e, struct oz_elt_info, link);
489 memcpy(elt, ei->data, ei->length);
490 elt = oz_next_elt(elt);
491 }
492 return skb;
493fail:
494 kfree_skb(skb);
495 return NULL;
496}
497
498
499
500static void oz_retire_frame(struct oz_pd *pd, struct oz_tx_frame *f)
501{
502 struct list_head *e;
503 struct oz_elt_info *ei;
504 e = f->elt_list.next;
505 while (e != &f->elt_list) {
506 ei = container_of(e, struct oz_elt_info, link);
507 e = e->next;
508 list_del_init(&ei->link);
509 if (ei->callback)
510 ei->callback(pd, ei->context);
511 spin_lock_bh(&pd->elt_buff.lock);
512 oz_elt_info_free(&pd->elt_buff, ei);
513 spin_unlock_bh(&pd->elt_buff.lock);
514 }
515 oz_tx_frame_free(pd, f);
516 if (pd->elt_buff.free_elts > pd->elt_buff.max_free_elts)
517 oz_trim_elt_pool(&pd->elt_buff);
518}
519
520
521
522static int oz_send_next_queued_frame(struct oz_pd *pd, int more_data)
523{
524 struct sk_buff *skb;
525 struct oz_tx_frame *f;
526 struct list_head *e;
527 spin_lock(&pd->tx_frame_lock);
528 e = pd->last_sent_frame->next;
529 if (e == &pd->tx_queue) {
530 spin_unlock(&pd->tx_frame_lock);
531 return -1;
532 }
533 f = container_of(e, struct oz_tx_frame, link);
534
535 if (f->skb != NULL) {
536 skb = f->skb;
537 oz_tx_isoc_free(pd, f);
538 spin_unlock(&pd->tx_frame_lock);
539 if (more_data)
540 oz_set_more_bit(skb);
541 oz_set_last_pkt_nb(pd, skb);
542 if ((int)atomic_read(&g_submitted_isoc) <
543 OZ_MAX_SUBMITTED_ISOC) {
544 if (dev_queue_xmit(skb) < 0) {
545 oz_trace2(OZ_TRACE_TX_FRAMES,
546 "Dropping ISOC Frame\n");
547 oz_event_log(OZ_EVT_TX_ISOC_DROP, 0, 0, NULL, 0);
548 return -1;
549 }
550 atomic_inc(&g_submitted_isoc);
551 oz_trace2(OZ_TRACE_TX_FRAMES,
552 "Sending ISOC Frame, nb_isoc= %d\n",
553 pd->nb_queued_isoc_frames);
554 return 0;
555 } else {
556 kfree_skb(skb);
557 oz_trace2(OZ_TRACE_TX_FRAMES, "Dropping ISOC Frame>\n");
558 oz_event_log(OZ_EVT_TX_ISOC_DROP, 0, 0, NULL, 0);
559 return -1;
560 }
561 }
562
563 pd->last_sent_frame = e;
564 skb = oz_build_frame(pd, f);
565 spin_unlock(&pd->tx_frame_lock);
566 if (more_data)
567 oz_set_more_bit(skb);
568 oz_trace2(OZ_TRACE_TX_FRAMES, "TX frame PN=0x%x\n", f->hdr.pkt_num);
569 if (skb) {
570 oz_event_log(OZ_EVT_TX_FRAME,
571 0,
572 (((u16)f->hdr.control)<<8)|f->hdr.last_pkt_num,
573 NULL, f->hdr.pkt_num);
574 if (dev_queue_xmit(skb) < 0)
575 return -1;
576
577 }
578 return 0;
579}
580
581
582
583void oz_send_queued_frames(struct oz_pd *pd, int backlog)
584{
585 while (oz_prepare_frame(pd, 0) >= 0)
586 backlog++;
587
588 switch (pd->mode & (OZ_F_ISOC_NO_ELTS | OZ_F_ISOC_ANYTIME)) {
589
590 case OZ_F_ISOC_NO_ELTS: {
591 backlog += pd->nb_queued_isoc_frames;
592 if (backlog <= 0)
593 goto out;
594 if (backlog > OZ_MAX_SUBMITTED_ISOC)
595 backlog = OZ_MAX_SUBMITTED_ISOC;
596 break;
597 }
598 case OZ_NO_ELTS_ANYTIME: {
599 if ((backlog <= 0) && (pd->isoc_sent == 0))
600 goto out;
601 break;
602 }
603 default: {
604 if (backlog <= 0)
605 goto out;
606 break;
607 }
608 }
609 while (backlog--) {
610 if (oz_send_next_queued_frame(pd, backlog) < 0)
611 break;
612 }
613 return;
614
615out: oz_prepare_frame(pd, 1);
616 oz_send_next_queued_frame(pd, 0);
617}
618
619
620
621static int oz_send_isoc_frame(struct oz_pd *pd)
622{
623 struct sk_buff *skb;
624 struct net_device *dev = pd->net_dev;
625 struct oz_hdr *oz_hdr;
626 struct oz_elt *elt;
627 struct list_head *e;
628 struct list_head list;
629 int total_size = sizeof(struct oz_hdr);
630 INIT_LIST_HEAD(&list);
631
632 oz_select_elts_for_tx(&pd->elt_buff, 1, &total_size,
633 pd->max_tx_size, &list);
634 if (list.next == &list)
635 return 0;
636 skb = alloc_skb(total_size + OZ_ALLOCATED_SPACE(dev), GFP_ATOMIC);
637 if (skb == NULL) {
638 oz_trace("Cannot alloc skb\n");
639 oz_elt_info_free_chain(&pd->elt_buff, &list);
640 return -1;
641 }
642 skb_reserve(skb, LL_RESERVED_SPACE(dev));
643 skb_reset_network_header(skb);
644 skb->dev = dev;
645 skb->protocol = htons(OZ_ETHERTYPE);
646 if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
647 dev->dev_addr, skb->len) < 0) {
648 kfree_skb(skb);
649 return -1;
650 }
651 oz_hdr = (struct oz_hdr *)skb_put(skb, total_size);
652 oz_hdr->control = (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
653 oz_hdr->last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
654 elt = (struct oz_elt *)(oz_hdr+1);
655
656 for (e = list.next; e != &list; e = e->next) {
657 struct oz_elt_info *ei;
658 ei = container_of(e, struct oz_elt_info, link);
659 memcpy(elt, ei->data, ei->length);
660 elt = oz_next_elt(elt);
661 }
662 oz_event_log(OZ_EVT_TX_ISOC, 0, 0, NULL, 0);
663 dev_queue_xmit(skb);
664 oz_elt_info_free_chain(&pd->elt_buff, &list);
665 return 0;
666}
667
668
669
670void oz_retire_tx_frames(struct oz_pd *pd, u8 lpn)
671{
672 struct list_head *e;
673 struct oz_tx_frame *f;
674 struct list_head *first = NULL;
675 struct list_head *last = NULL;
676 u8 diff;
677 u32 pkt_num;
678
679 spin_lock(&pd->tx_frame_lock);
680 e = pd->tx_queue.next;
681 while (e != &pd->tx_queue) {
682 f = container_of(e, struct oz_tx_frame, link);
683 pkt_num = le32_to_cpu(get_unaligned(&f->hdr.pkt_num));
684 diff = (lpn - (pkt_num & OZ_LAST_PN_MASK)) & OZ_LAST_PN_MASK;
685 if ((diff > OZ_LAST_PN_HALF_CYCLE) || (pkt_num == 0))
686 break;
687 oz_trace2(OZ_TRACE_TX_FRAMES, "Releasing pkt_num= %u, nb= %d\n",
688 pkt_num, pd->nb_queued_frames);
689 if (first == NULL)
690 first = e;
691 last = e;
692 e = e->next;
693 pd->nb_queued_frames--;
694 }
695 if (first) {
696 last->next->prev = &pd->tx_queue;
697 pd->tx_queue.next = last->next;
698 last->next = NULL;
699 }
700 pd->last_sent_frame = &pd->tx_queue;
701 spin_unlock(&pd->tx_frame_lock);
702 while (first) {
703 f = container_of(first, struct oz_tx_frame, link);
704 first = first->next;
705 oz_retire_frame(pd, f);
706 }
707}
708
709
710
711
712static struct oz_isoc_stream *pd_stream_find(struct oz_pd *pd, u8 ep_num)
713{
714 struct list_head *e;
715 struct oz_isoc_stream *st;
716 list_for_each(e, &pd->stream_list) {
717 st = container_of(e, struct oz_isoc_stream, link);
718 if (st->ep_num == ep_num)
719 return st;
720 }
721 return NULL;
722}
723
724
725
726int oz_isoc_stream_create(struct oz_pd *pd, u8 ep_num)
727{
728 struct oz_isoc_stream *st =
729 kzalloc(sizeof(struct oz_isoc_stream), GFP_ATOMIC);
730 if (!st)
731 return -ENOMEM;
732 st->ep_num = ep_num;
733 spin_lock_bh(&pd->stream_lock);
734 if (!pd_stream_find(pd, ep_num)) {
735 list_add(&st->link, &pd->stream_list);
736 st = NULL;
737 }
738 spin_unlock_bh(&pd->stream_lock);
739 kfree(st);
740 return 0;
741}
742
743
744
745static void oz_isoc_stream_free(struct oz_isoc_stream *st)
746{
747 kfree_skb(st->skb);
748 kfree(st);
749}
750
751
752
753int oz_isoc_stream_delete(struct oz_pd *pd, u8 ep_num)
754{
755 struct oz_isoc_stream *st;
756 spin_lock_bh(&pd->stream_lock);
757 st = pd_stream_find(pd, ep_num);
758 if (st)
759 list_del(&st->link);
760 spin_unlock_bh(&pd->stream_lock);
761 if (st)
762 oz_isoc_stream_free(st);
763 return 0;
764}
765
766
767
768static void oz_isoc_destructor(struct sk_buff *skb)
769{
770 atomic_dec(&g_submitted_isoc);
771 oz_event_log(OZ_EVT_TX_ISOC_DONE, atomic_read(&g_submitted_isoc),
772 0, skb, 0);
773}
774
775
776
777int oz_send_isoc_unit(struct oz_pd *pd, u8 ep_num, const u8 *data, int len)
778{
779 struct net_device *dev = pd->net_dev;
780 struct oz_isoc_stream *st;
781 u8 nb_units = 0;
782 struct sk_buff *skb = NULL;
783 struct oz_hdr *oz_hdr = NULL;
784 int size = 0;
785 spin_lock_bh(&pd->stream_lock);
786 st = pd_stream_find(pd, ep_num);
787 if (st) {
788 skb = st->skb;
789 st->skb = NULL;
790 nb_units = st->nb_units;
791 st->nb_units = 0;
792 oz_hdr = st->oz_hdr;
793 size = st->size;
794 }
795 spin_unlock_bh(&pd->stream_lock);
796 if (!st)
797 return 0;
798 if (!skb) {
799
800 skb = alloc_skb(pd->max_tx_size + OZ_ALLOCATED_SPACE(dev),
801 GFP_ATOMIC);
802 if (skb == NULL)
803 return 0;
804
805 skb_reserve(skb, LL_RESERVED_SPACE(dev));
806 skb_reset_network_header(skb);
807 skb->dev = dev;
808 skb->protocol = htons(OZ_ETHERTYPE);
809
810 skb->priority = 0x7;
811 size = sizeof(struct oz_hdr) + sizeof(struct oz_isoc_large);
812 oz_hdr = (struct oz_hdr *)skb_put(skb, size);
813 }
814 memcpy(skb_put(skb, len), data, len);
815 size += len;
816 if (++nb_units < pd->ms_per_isoc) {
817 spin_lock_bh(&pd->stream_lock);
818 st->skb = skb;
819 st->nb_units = nb_units;
820 st->oz_hdr = oz_hdr;
821 st->size = size;
822 spin_unlock_bh(&pd->stream_lock);
823 } else {
824 struct oz_hdr oz;
825 struct oz_isoc_large iso;
826 spin_lock_bh(&pd->stream_lock);
827 iso.frame_number = st->frame_num;
828 st->frame_num += nb_units;
829 spin_unlock_bh(&pd->stream_lock);
830 oz.control =
831 (OZ_PROTOCOL_VERSION<<OZ_VERSION_SHIFT) | OZ_F_ISOC;
832 oz.last_pkt_num = pd->trigger_pkt_num & OZ_LAST_PN_MASK;
833 oz.pkt_num = 0;
834 iso.endpoint = ep_num;
835 iso.format = OZ_DATA_F_ISOC_LARGE;
836 iso.ms_data = nb_units;
837 memcpy(oz_hdr, &oz, sizeof(oz));
838 memcpy(oz_hdr+1, &iso, sizeof(iso));
839 if (dev_hard_header(skb, dev, OZ_ETHERTYPE, pd->mac_addr,
840 dev->dev_addr, skb->len) < 0)
841 goto out;
842
843 skb->destructor = oz_isoc_destructor;
844
845 if (!(pd->mode & OZ_F_ISOC_ANYTIME)) {
846 struct oz_tx_frame *isoc_unit = NULL;
847 int nb = pd->nb_queued_isoc_frames;
848 if (nb >= pd->isoc_latency) {
849 oz_trace2(OZ_TRACE_TX_FRAMES,
850 "Dropping ISOC Unit nb= %d\n",
851 nb);
852 goto out;
853 }
854 isoc_unit = oz_tx_frame_alloc(pd);
855 if (isoc_unit == NULL)
856 goto out;
857 isoc_unit->hdr = oz;
858 isoc_unit->skb = skb;
859 spin_lock_bh(&pd->tx_frame_lock);
860 list_add_tail(&isoc_unit->link, &pd->tx_queue);
861 pd->nb_queued_isoc_frames++;
862 spin_unlock_bh(&pd->tx_frame_lock);
863 oz_trace2(OZ_TRACE_TX_FRAMES,
864 "Added ISOC Frame to Tx Queue isoc_nb= %d, nb= %d\n",
865 pd->nb_queued_isoc_frames, pd->nb_queued_frames);
866 oz_event_log(OZ_EVT_TX_ISOC, nb_units, iso.frame_number,
867 skb, atomic_read(&g_submitted_isoc));
868 return 0;
869 }
870
871
872 if (atomic_read(&g_submitted_isoc) < OZ_MAX_SUBMITTED_ISOC) {
873 atomic_inc(&g_submitted_isoc);
874 oz_event_log(OZ_EVT_TX_ISOC, nb_units, iso.frame_number,
875 skb, atomic_read(&g_submitted_isoc));
876 if (dev_queue_xmit(skb) < 0) {
877 oz_event_log(OZ_EVT_TX_ISOC_DROP, 0, 0, NULL, 0);
878 return -1;
879 } else
880 return 0;
881 }
882
883out: oz_event_log(OZ_EVT_TX_ISOC_DROP, 0, 0, NULL, 0);
884 kfree_skb(skb);
885 return -1;
886
887 }
888 return 0;
889}
890
891
892
893void oz_apps_init(void)
894{
895 int i;
896 for (i = 0; i < OZ_APPID_MAX; i++)
897 if (g_app_if[i].init)
898 g_app_if[i].init();
899}
900
901
902
903void oz_apps_term(void)
904{
905 int i;
906
907 for (i = 0; i < OZ_APPID_MAX; i++)
908 if (g_app_if[i].term)
909 g_app_if[i].term();
910}
911
912
913
914void oz_handle_app_elt(struct oz_pd *pd, u8 app_id, struct oz_elt *elt)
915{
916 const struct oz_app_if *ai;
917 if (app_id == 0 || app_id > OZ_APPID_MAX)
918 return;
919 ai = &g_app_if[app_id-1];
920 ai->rx(pd, elt);
921}
922
923
924
925void oz_pd_indicate_farewells(struct oz_pd *pd)
926{
927 struct oz_farewell *f;
928 const struct oz_app_if *ai = &g_app_if[OZ_APPID_USB-1];
929 while (1) {
930 oz_polling_lock_bh();
931 if (list_empty(&pd->farewell_list)) {
932 oz_polling_unlock_bh();
933 break;
934 }
935 f = list_first_entry(&pd->farewell_list,
936 struct oz_farewell, link);
937 list_del(&f->link);
938 oz_polling_unlock_bh();
939 if (ai->farewell)
940 ai->farewell(pd, f->ep_num, f->report, f->len);
941 kfree(f);
942 }
943}
944