1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31#include <linux/module.h>
32#include <linux/firmware.h>
33#include <linux/delay.h>
34#include "fmdrv.h"
35#include "fmdrv_v4l2.h"
36#include "fmdrv_common.h"
37#include <linux/ti_wilink_st.h>
38#include "fmdrv_rx.h"
39#include "fmdrv_tx.h"
40
41
42static struct region_info region_configs[] = {
43
44 {
45 .chanl_space = FM_CHANNEL_SPACING_200KHZ * FM_FREQ_MUL,
46 .bot_freq = 87500,
47 .top_freq = 108000,
48 .fm_band = 0,
49 },
50
51 {
52 .chanl_space = FM_CHANNEL_SPACING_200KHZ * FM_FREQ_MUL,
53 .bot_freq = 76000,
54 .top_freq = 90000,
55 .fm_band = 1,
56 },
57};
58
59
60static u8 default_radio_region;
61module_param(default_radio_region, byte, 0);
62MODULE_PARM_DESC(default_radio_region, "Region: 0=Europe/US, 1=Japan");
63
64
65static u32 default_rds_buf = 300;
66module_param(default_rds_buf, uint, 0444);
67MODULE_PARM_DESC(default_rds_buf, "RDS buffer entries");
68
69
70static u32 radio_nr = -1;
71module_param(radio_nr, int, 0444);
72MODULE_PARM_DESC(radio_nr, "Radio Nr");
73
74
75static void fm_irq_send_flag_getcmd(struct fmdev *);
76static void fm_irq_handle_flag_getcmd_resp(struct fmdev *);
77static void fm_irq_handle_hw_malfunction(struct fmdev *);
78static void fm_irq_handle_rds_start(struct fmdev *);
79static void fm_irq_send_rdsdata_getcmd(struct fmdev *);
80static void fm_irq_handle_rdsdata_getcmd_resp(struct fmdev *);
81static void fm_irq_handle_rds_finish(struct fmdev *);
82static void fm_irq_handle_tune_op_ended(struct fmdev *);
83static void fm_irq_handle_power_enb(struct fmdev *);
84static void fm_irq_handle_low_rssi_start(struct fmdev *);
85static void fm_irq_afjump_set_pi(struct fmdev *);
86static void fm_irq_handle_set_pi_resp(struct fmdev *);
87static void fm_irq_afjump_set_pimask(struct fmdev *);
88static void fm_irq_handle_set_pimask_resp(struct fmdev *);
89static void fm_irq_afjump_setfreq(struct fmdev *);
90static void fm_irq_handle_setfreq_resp(struct fmdev *);
91static void fm_irq_afjump_enableint(struct fmdev *);
92static void fm_irq_afjump_enableint_resp(struct fmdev *);
93static void fm_irq_start_afjump(struct fmdev *);
94static void fm_irq_handle_start_afjump_resp(struct fmdev *);
95static void fm_irq_afjump_rd_freq(struct fmdev *);
96static void fm_irq_afjump_rd_freq_resp(struct fmdev *);
97static void fm_irq_handle_low_rssi_finish(struct fmdev *);
98static void fm_irq_send_intmsk_cmd(struct fmdev *);
99static void fm_irq_handle_intmsk_cmd_resp(struct fmdev *);
100
101
102
103
104
105enum fmc_irq_handler_index {
106 FM_SEND_FLAG_GETCMD_IDX,
107 FM_HANDLE_FLAG_GETCMD_RESP_IDX,
108
109
110 FM_HW_MAL_FUNC_IDX,
111
112
113 FM_RDS_START_IDX,
114 FM_RDS_SEND_RDS_GETCMD_IDX,
115 FM_RDS_HANDLE_RDS_GETCMD_RESP_IDX,
116 FM_RDS_FINISH_IDX,
117
118
119 FM_HW_TUNE_OP_ENDED_IDX,
120
121
122 FM_HW_POWER_ENB_IDX,
123
124
125 FM_LOW_RSSI_START_IDX,
126 FM_AF_JUMP_SETPI_IDX,
127 FM_AF_JUMP_HANDLE_SETPI_RESP_IDX,
128 FM_AF_JUMP_SETPI_MASK_IDX,
129 FM_AF_JUMP_HANDLE_SETPI_MASK_RESP_IDX,
130 FM_AF_JUMP_SET_AF_FREQ_IDX,
131 FM_AF_JUMP_HANDLE_SET_AFFREQ_RESP_IDX,
132 FM_AF_JUMP_ENABLE_INT_IDX,
133 FM_AF_JUMP_ENABLE_INT_RESP_IDX,
134 FM_AF_JUMP_START_AFJUMP_IDX,
135 FM_AF_JUMP_HANDLE_START_AFJUMP_RESP_IDX,
136 FM_AF_JUMP_RD_FREQ_IDX,
137 FM_AF_JUMP_RD_FREQ_RESP_IDX,
138 FM_LOW_RSSI_FINISH_IDX,
139
140
141 FM_SEND_INTMSK_CMD_IDX,
142 FM_HANDLE_INTMSK_CMD_RESP_IDX,
143};
144
145
146static int_handler_prototype int_handler_table[] = {
147 fm_irq_send_flag_getcmd,
148 fm_irq_handle_flag_getcmd_resp,
149 fm_irq_handle_hw_malfunction,
150 fm_irq_handle_rds_start,
151 fm_irq_send_rdsdata_getcmd,
152 fm_irq_handle_rdsdata_getcmd_resp,
153 fm_irq_handle_rds_finish,
154 fm_irq_handle_tune_op_ended,
155 fm_irq_handle_power_enb,
156 fm_irq_handle_low_rssi_start,
157 fm_irq_afjump_set_pi,
158 fm_irq_handle_set_pi_resp,
159 fm_irq_afjump_set_pimask,
160 fm_irq_handle_set_pimask_resp,
161 fm_irq_afjump_setfreq,
162 fm_irq_handle_setfreq_resp,
163 fm_irq_afjump_enableint,
164 fm_irq_afjump_enableint_resp,
165 fm_irq_start_afjump,
166 fm_irq_handle_start_afjump_resp,
167 fm_irq_afjump_rd_freq,
168 fm_irq_afjump_rd_freq_resp,
169 fm_irq_handle_low_rssi_finish,
170 fm_irq_send_intmsk_cmd,
171 fm_irq_handle_intmsk_cmd_resp
172};
173
174static long (*g_st_write) (struct sk_buff *skb);
175static struct completion wait_for_fmdrv_reg_comp;
176
177static inline void fm_irq_call(struct fmdev *fmdev)
178{
179 fmdev->irq_info.handlers[fmdev->irq_info.stage](fmdev);
180}
181
182
183static inline void fm_irq_call_stage(struct fmdev *fmdev, u8 stage)
184{
185 fmdev->irq_info.stage = stage;
186 fm_irq_call(fmdev);
187}
188
189static inline void fm_irq_timeout_stage(struct fmdev *fmdev, u8 stage)
190{
191 fmdev->irq_info.stage = stage;
192 mod_timer(&fmdev->irq_info.timer, jiffies + FM_DRV_TX_TIMEOUT);
193}
194
195#ifdef FM_DUMP_TXRX_PKT
196
197inline void dump_tx_skb_data(struct sk_buff *skb)
198{
199 int len, len_org;
200 u8 index;
201 struct fm_cmd_msg_hdr *cmd_hdr;
202
203 cmd_hdr = (struct fm_cmd_msg_hdr *)skb->data;
204 printk(KERN_INFO "<<%shdr:%02x len:%02x opcode:%02x type:%s dlen:%02x",
205 fm_cb(skb)->completion ? " " : "*", cmd_hdr->hdr,
206 cmd_hdr->len, cmd_hdr->op,
207 cmd_hdr->rd_wr ? "RD" : "WR", cmd_hdr->dlen);
208
209 len_org = skb->len - FM_CMD_MSG_HDR_SIZE;
210 if (len_org > 0) {
211 printk(KERN_CONT "\n data(%d): ", cmd_hdr->dlen);
212 len = min(len_org, 14);
213 for (index = 0; index < len; index++)
214 printk(KERN_CONT "%x ",
215 skb->data[FM_CMD_MSG_HDR_SIZE + index]);
216 printk(KERN_CONT "%s", (len_org > 14) ? ".." : "");
217 }
218 printk(KERN_CONT "\n");
219}
220
221
222inline void dump_rx_skb_data(struct sk_buff *skb)
223{
224 int len, len_org;
225 u8 index;
226 struct fm_event_msg_hdr *evt_hdr;
227
228 evt_hdr = (struct fm_event_msg_hdr *)skb->data;
229 printk(KERN_INFO ">> hdr:%02x len:%02x sts:%02x numhci:%02x opcode:%02x type:%s dlen:%02x",
230 evt_hdr->hdr, evt_hdr->len,
231 evt_hdr->status, evt_hdr->num_fm_hci_cmds, evt_hdr->op,
232 (evt_hdr->rd_wr) ? "RD" : "WR", evt_hdr->dlen);
233
234 len_org = skb->len - FM_EVT_MSG_HDR_SIZE;
235 if (len_org > 0) {
236 printk(KERN_CONT "\n data(%d): ", evt_hdr->dlen);
237 len = min(len_org, 14);
238 for (index = 0; index < len; index++)
239 printk(KERN_CONT "%x ",
240 skb->data[FM_EVT_MSG_HDR_SIZE + index]);
241 printk(KERN_CONT "%s", (len_org > 14) ? ".." : "");
242 }
243 printk(KERN_CONT "\n");
244}
245#endif
246
247void fmc_update_region_info(struct fmdev *fmdev, u8 region_to_set)
248{
249 fmdev->rx.region = region_configs[region_to_set];
250}
251
252
253
254
255
256static void recv_tasklet(unsigned long arg)
257{
258 struct fmdev *fmdev;
259 struct fm_irq *irq_info;
260 struct fm_event_msg_hdr *evt_hdr;
261 struct sk_buff *skb;
262 u8 num_fm_hci_cmds;
263 unsigned long flags;
264
265 fmdev = (struct fmdev *)arg;
266 irq_info = &fmdev->irq_info;
267
268 while ((skb = skb_dequeue(&fmdev->rx_q))) {
269 if (skb->len < sizeof(struct fm_event_msg_hdr)) {
270 fmerr("skb(%p) has only %d bytes, at least need %zu bytes to decode\n",
271 skb,
272 skb->len, sizeof(struct fm_event_msg_hdr));
273 kfree_skb(skb);
274 continue;
275 }
276
277 evt_hdr = (void *)skb->data;
278 num_fm_hci_cmds = evt_hdr->num_fm_hci_cmds;
279
280
281 if (evt_hdr->op == FM_INTERRUPT) {
282
283 if (!test_bit(FM_INTTASK_RUNNING, &fmdev->flag)) {
284 set_bit(FM_INTTASK_RUNNING, &fmdev->flag);
285 if (irq_info->stage != 0) {
286 fmerr("Inval stage resetting to zero\n");
287 irq_info->stage = 0;
288 }
289
290
291
292
293
294 irq_info->handlers[irq_info->stage](fmdev);
295 } else {
296 set_bit(FM_INTTASK_SCHEDULE_PENDING, &fmdev->flag);
297 }
298 kfree_skb(skb);
299 }
300
301 else if (evt_hdr->op == fmdev->pre_op && fmdev->resp_comp != NULL) {
302
303 spin_lock_irqsave(&fmdev->resp_skb_lock, flags);
304 fmdev->resp_skb = skb;
305 spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags);
306 complete(fmdev->resp_comp);
307
308 fmdev->resp_comp = NULL;
309 atomic_set(&fmdev->tx_cnt, 1);
310 }
311
312 else if (evt_hdr->op == fmdev->pre_op && fmdev->resp_comp == NULL) {
313 if (fmdev->resp_skb != NULL)
314 fmerr("Response SKB ptr not NULL\n");
315
316 spin_lock_irqsave(&fmdev->resp_skb_lock, flags);
317 fmdev->resp_skb = skb;
318 spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags);
319
320
321 irq_info->handlers[irq_info->stage](fmdev);
322
323 kfree_skb(skb);
324 atomic_set(&fmdev->tx_cnt, 1);
325 } else {
326 fmerr("Nobody claimed SKB(%p),purging\n", skb);
327 }
328
329
330
331
332
333 if (num_fm_hci_cmds && atomic_read(&fmdev->tx_cnt))
334 if (!skb_queue_empty(&fmdev->tx_q))
335 tasklet_schedule(&fmdev->tx_task);
336 }
337}
338
339
340static void send_tasklet(unsigned long arg)
341{
342 struct fmdev *fmdev;
343 struct sk_buff *skb;
344 int len;
345
346 fmdev = (struct fmdev *)arg;
347
348 if (!atomic_read(&fmdev->tx_cnt))
349 return;
350
351
352 if ((jiffies - fmdev->last_tx_jiffies) > FM_DRV_TX_TIMEOUT) {
353 fmerr("TX timeout occurred\n");
354 atomic_set(&fmdev->tx_cnt, 1);
355 }
356
357
358 skb = skb_dequeue(&fmdev->tx_q);
359 if (!skb)
360 return;
361
362 atomic_dec(&fmdev->tx_cnt);
363 fmdev->pre_op = fm_cb(skb)->fm_op;
364
365 if (fmdev->resp_comp != NULL)
366 fmerr("Response completion handler is not NULL\n");
367
368 fmdev->resp_comp = fm_cb(skb)->completion;
369
370
371 len = g_st_write(skb);
372 if (len < 0) {
373 kfree_skb(skb);
374 fmdev->resp_comp = NULL;
375 fmerr("TX tasklet failed to send skb(%p)\n", skb);
376 atomic_set(&fmdev->tx_cnt, 1);
377 } else {
378 fmdev->last_tx_jiffies = jiffies;
379 }
380}
381
382
383
384
385
386static int fm_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload,
387 int payload_len, struct completion *wait_completion)
388{
389 struct sk_buff *skb;
390 struct fm_cmd_msg_hdr *hdr;
391 int size;
392
393 if (fm_op >= FM_INTERRUPT) {
394 fmerr("Invalid fm opcode - %d\n", fm_op);
395 return -EINVAL;
396 }
397 if (test_bit(FM_FW_DW_INPROGRESS, &fmdev->flag) && payload == NULL) {
398 fmerr("Payload data is NULL during fw download\n");
399 return -EINVAL;
400 }
401 if (!test_bit(FM_FW_DW_INPROGRESS, &fmdev->flag))
402 size =
403 FM_CMD_MSG_HDR_SIZE + ((payload == NULL) ? 0 : payload_len);
404 else
405 size = payload_len;
406
407 skb = alloc_skb(size, GFP_ATOMIC);
408 if (!skb) {
409 fmerr("No memory to create new SKB\n");
410 return -ENOMEM;
411 }
412
413
414
415
416 if (!test_bit(FM_FW_DW_INPROGRESS, &fmdev->flag) ||
417 test_bit(FM_INTTASK_RUNNING, &fmdev->flag)) {
418
419 hdr = skb_put(skb, FM_CMD_MSG_HDR_SIZE);
420 hdr->hdr = FM_PKT_LOGICAL_CHAN_NUMBER;
421
422
423 hdr->len = ((payload == NULL) ? 0 : payload_len) + 3;
424
425
426 hdr->op = fm_op;
427
428
429 hdr->rd_wr = type;
430 hdr->dlen = payload_len;
431 fm_cb(skb)->fm_op = fm_op;
432
433
434
435
436
437
438 if (payload != NULL)
439 *(__be16 *)payload = cpu_to_be16(*(u16 *)payload);
440
441 } else if (payload != NULL) {
442 fm_cb(skb)->fm_op = *((u8 *)payload + 2);
443 }
444 if (payload != NULL)
445 skb_put_data(skb, payload, payload_len);
446
447 fm_cb(skb)->completion = wait_completion;
448 skb_queue_tail(&fmdev->tx_q, skb);
449 tasklet_schedule(&fmdev->tx_task);
450
451 return 0;
452}
453
454
455int fmc_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload,
456 unsigned int payload_len, void *response, int *response_len)
457{
458 struct sk_buff *skb;
459 struct fm_event_msg_hdr *evt_hdr;
460 unsigned long flags;
461 int ret;
462
463 init_completion(&fmdev->maintask_comp);
464 ret = fm_send_cmd(fmdev, fm_op, type, payload, payload_len,
465 &fmdev->maintask_comp);
466 if (ret)
467 return ret;
468
469 if (!wait_for_completion_timeout(&fmdev->maintask_comp,
470 FM_DRV_TX_TIMEOUT)) {
471 fmerr("Timeout(%d sec),didn't get regcompletion signal from RX tasklet\n",
472 jiffies_to_msecs(FM_DRV_TX_TIMEOUT) / 1000);
473 return -ETIMEDOUT;
474 }
475 if (!fmdev->resp_skb) {
476 fmerr("Response SKB is missing\n");
477 return -EFAULT;
478 }
479 spin_lock_irqsave(&fmdev->resp_skb_lock, flags);
480 skb = fmdev->resp_skb;
481 fmdev->resp_skb = NULL;
482 spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags);
483
484 evt_hdr = (void *)skb->data;
485 if (evt_hdr->status != 0) {
486 fmerr("Received event pkt status(%d) is not zero\n",
487 evt_hdr->status);
488 kfree_skb(skb);
489 return -EIO;
490 }
491
492 if (response != NULL && response_len != NULL && evt_hdr->dlen) {
493
494 skb_pull(skb, sizeof(struct fm_event_msg_hdr));
495 memcpy(response, skb->data, evt_hdr->dlen);
496 *response_len = evt_hdr->dlen;
497 } else if (response_len != NULL && evt_hdr->dlen == 0) {
498 *response_len = 0;
499 }
500 kfree_skb(skb);
501
502 return 0;
503}
504
505
506static inline int check_cmdresp_status(struct fmdev *fmdev,
507 struct sk_buff **skb)
508{
509 struct fm_event_msg_hdr *fm_evt_hdr;
510 unsigned long flags;
511
512 del_timer(&fmdev->irq_info.timer);
513
514 spin_lock_irqsave(&fmdev->resp_skb_lock, flags);
515 *skb = fmdev->resp_skb;
516 fmdev->resp_skb = NULL;
517 spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags);
518
519 fm_evt_hdr = (void *)(*skb)->data;
520 if (fm_evt_hdr->status != 0) {
521 fmerr("irq: opcode %x response status is not zero Initiating irq recovery process\n",
522 fm_evt_hdr->op);
523
524 mod_timer(&fmdev->irq_info.timer, jiffies + FM_DRV_TX_TIMEOUT);
525 return -1;
526 }
527
528 return 0;
529}
530
531static inline void fm_irq_common_cmd_resp_helper(struct fmdev *fmdev, u8 stage)
532{
533 struct sk_buff *skb;
534
535 if (!check_cmdresp_status(fmdev, &skb))
536 fm_irq_call_stage(fmdev, stage);
537}
538
539
540
541
542
543
544
545
546static void int_timeout_handler(struct timer_list *t)
547{
548 struct fmdev *fmdev;
549 struct fm_irq *fmirq;
550
551 fmdbg("irq: timeout,trying to re-enable fm interrupts\n");
552 fmdev = from_timer(fmdev, t, irq_info.timer);
553 fmirq = &fmdev->irq_info;
554 fmirq->retry++;
555
556 if (fmirq->retry > FM_IRQ_TIMEOUT_RETRY_MAX) {
557
558
559 fmirq->stage = 0;
560 fmirq->retry = 0;
561 fmerr("Recovery action failed duringirq processing, max retry reached\n");
562 return;
563 }
564 fm_irq_call_stage(fmdev, FM_SEND_INTMSK_CMD_IDX);
565}
566
567
568static void fm_irq_send_flag_getcmd(struct fmdev *fmdev)
569{
570 u16 flag;
571
572
573 if (!fm_send_cmd(fmdev, FLAG_GET, REG_RD, NULL, sizeof(flag), NULL))
574 fm_irq_timeout_stage(fmdev, FM_HANDLE_FLAG_GETCMD_RESP_IDX);
575}
576
577static void fm_irq_handle_flag_getcmd_resp(struct fmdev *fmdev)
578{
579 struct sk_buff *skb;
580 struct fm_event_msg_hdr *fm_evt_hdr;
581
582 if (check_cmdresp_status(fmdev, &skb))
583 return;
584
585 fm_evt_hdr = (void *)skb->data;
586
587
588 skb_pull(skb, sizeof(struct fm_event_msg_hdr));
589 memcpy(&fmdev->irq_info.flag, skb->data, fm_evt_hdr->dlen);
590
591 fmdev->irq_info.flag = be16_to_cpu((__force __be16)fmdev->irq_info.flag);
592 fmdbg("irq: flag register(0x%x)\n", fmdev->irq_info.flag);
593
594
595 fm_irq_call_stage(fmdev, FM_HW_MAL_FUNC_IDX);
596}
597
598static void fm_irq_handle_hw_malfunction(struct fmdev *fmdev)
599{
600 if (fmdev->irq_info.flag & FM_MAL_EVENT & fmdev->irq_info.mask)
601 fmerr("irq: HW MAL int received - do nothing\n");
602
603
604 fm_irq_call_stage(fmdev, FM_RDS_START_IDX);
605}
606
607static void fm_irq_handle_rds_start(struct fmdev *fmdev)
608{
609 if (fmdev->irq_info.flag & FM_RDS_EVENT & fmdev->irq_info.mask) {
610 fmdbg("irq: rds threshold reached\n");
611 fmdev->irq_info.stage = FM_RDS_SEND_RDS_GETCMD_IDX;
612 } else {
613
614 fmdev->irq_info.stage = FM_HW_TUNE_OP_ENDED_IDX;
615 }
616
617 fm_irq_call(fmdev);
618}
619
620static void fm_irq_send_rdsdata_getcmd(struct fmdev *fmdev)
621{
622
623 if (!fm_send_cmd(fmdev, RDS_DATA_GET, REG_RD, NULL,
624 (FM_RX_RDS_FIFO_THRESHOLD * 3), NULL))
625 fm_irq_timeout_stage(fmdev, FM_RDS_HANDLE_RDS_GETCMD_RESP_IDX);
626}
627
628
629static void fm_rx_update_af_cache(struct fmdev *fmdev, u8 af)
630{
631 struct tuned_station_info *stat_info = &fmdev->rx.stat_info;
632 u8 reg_idx = fmdev->rx.region.fm_band;
633 u8 index;
634 u32 freq;
635
636
637 if ((af >= FM_RDS_1_AF_FOLLOWS) && (af <= FM_RDS_25_AF_FOLLOWS)) {
638 fmdev->rx.stat_info.af_list_max = (af - FM_RDS_1_AF_FOLLOWS + 1);
639 fmdev->rx.stat_info.afcache_size = 0;
640 fmdbg("No of expected AF : %d\n", fmdev->rx.stat_info.af_list_max);
641 return;
642 }
643
644 if (af < FM_RDS_MIN_AF)
645 return;
646 if (reg_idx == FM_BAND_EUROPE_US && af > FM_RDS_MAX_AF)
647 return;
648 if (reg_idx == FM_BAND_JAPAN && af > FM_RDS_MAX_AF_JAPAN)
649 return;
650
651 freq = fmdev->rx.region.bot_freq + (af * 100);
652 if (freq == fmdev->rx.freq) {
653 fmdbg("Current freq(%d) is matching with received AF(%d)\n",
654 fmdev->rx.freq, freq);
655 return;
656 }
657
658 for (index = 0; index < stat_info->afcache_size; index++) {
659 if (stat_info->af_cache[index] == freq)
660 break;
661 }
662
663 if (index == stat_info->af_list_max) {
664 fmdbg("AF cache is full\n");
665 return;
666 }
667
668
669
670
671 if (index == stat_info->afcache_size) {
672 fmdbg("Storing AF %d to cache index %d\n", freq, index);
673 stat_info->af_cache[index] = freq;
674 stat_info->afcache_size++;
675 }
676}
677
678
679
680
681
682static void fm_rdsparse_swapbytes(struct fmdev *fmdev,
683 struct fm_rdsdata_format *rds_format)
684{
685 u8 index = 0;
686 u8 *rds_buff;
687
688
689
690
691
692
693 if (fmdev->asci_id != 0x6350) {
694 rds_buff = &rds_format->data.groupdatabuff.buff[0];
695 while (index + 1 < FM_RX_RDS_INFO_FIELD_MAX) {
696 swap(rds_buff[index], rds_buff[index + 1]);
697 index += 2;
698 }
699 }
700}
701
702static void fm_irq_handle_rdsdata_getcmd_resp(struct fmdev *fmdev)
703{
704 struct sk_buff *skb;
705 struct fm_rdsdata_format rds_fmt;
706 struct fm_rds *rds = &fmdev->rx.rds;
707 unsigned long group_idx, flags;
708 u8 *rds_data, meta_data, tmpbuf[FM_RDS_BLK_SIZE];
709 u8 type, blk_idx;
710 u16 cur_picode;
711 u32 rds_len;
712
713 if (check_cmdresp_status(fmdev, &skb))
714 return;
715
716
717 skb_pull(skb, sizeof(struct fm_event_msg_hdr));
718 rds_data = skb->data;
719 rds_len = skb->len;
720
721
722 while (rds_len >= FM_RDS_BLK_SIZE) {
723 meta_data = rds_data[2];
724
725 type = (meta_data & 0x07);
726
727
728 blk_idx = (type <= FM_RDS_BLOCK_C ? type : (type - 1));
729 fmdbg("Block index:%d(%s)\n", blk_idx,
730 (meta_data & FM_RDS_STATUS_ERR_MASK) ? "Bad" : "Ok");
731
732 if ((meta_data & FM_RDS_STATUS_ERR_MASK) != 0)
733 break;
734
735 if (blk_idx > FM_RDS_BLK_IDX_D) {
736 fmdbg("Block sequence mismatch\n");
737 rds->last_blk_idx = -1;
738 break;
739 }
740
741
742 memcpy(&rds_fmt.data.groupdatabuff.
743 buff[blk_idx * (FM_RDS_BLK_SIZE - 1)],
744 rds_data, (FM_RDS_BLK_SIZE - 1));
745
746 rds->last_blk_idx = blk_idx;
747
748
749 if (blk_idx == FM_RDS_BLK_IDX_D) {
750 fmdbg("Good block received\n");
751 fm_rdsparse_swapbytes(fmdev, &rds_fmt);
752
753
754
755
756
757 cur_picode = be16_to_cpu((__force __be16)rds_fmt.data.groupgeneral.pidata);
758 if (fmdev->rx.stat_info.picode != cur_picode)
759 fmdev->rx.stat_info.picode = cur_picode;
760
761 fmdbg("picode:%d\n", cur_picode);
762
763 group_idx = (rds_fmt.data.groupgeneral.blk_b[0] >> 3);
764 fmdbg("(fmdrv):Group:%ld%s\n", group_idx/2,
765 (group_idx % 2) ? "B" : "A");
766
767 group_idx = 1 << (rds_fmt.data.groupgeneral.blk_b[0] >> 3);
768 if (group_idx == FM_RDS_GROUP_TYPE_MASK_0A) {
769 fm_rx_update_af_cache(fmdev, rds_fmt.data.group0A.af[0]);
770 fm_rx_update_af_cache(fmdev, rds_fmt.data.group0A.af[1]);
771 }
772 }
773 rds_len -= FM_RDS_BLK_SIZE;
774 rds_data += FM_RDS_BLK_SIZE;
775 }
776
777
778 rds_data = skb->data;
779 rds_len = skb->len;
780
781 spin_lock_irqsave(&fmdev->rds_buff_lock, flags);
782 while (rds_len > 0) {
783
784
785
786
787 type = (rds_data[2] & 0x07);
788 blk_idx = (type <= FM_RDS_BLOCK_C ? type : (type - 1));
789 tmpbuf[2] = blk_idx;
790 tmpbuf[2] |= blk_idx << 3;
791
792
793 tmpbuf[0] = rds_data[0];
794 tmpbuf[1] = rds_data[1];
795
796 memcpy(&rds->buff[rds->wr_idx], &tmpbuf, FM_RDS_BLK_SIZE);
797 rds->wr_idx = (rds->wr_idx + FM_RDS_BLK_SIZE) % rds->buf_size;
798
799
800 if (rds->wr_idx == rds->rd_idx) {
801 fmdbg("RDS buffer overflow\n");
802 rds->wr_idx = 0;
803 rds->rd_idx = 0;
804 break;
805 }
806 rds_len -= FM_RDS_BLK_SIZE;
807 rds_data += FM_RDS_BLK_SIZE;
808 }
809 spin_unlock_irqrestore(&fmdev->rds_buff_lock, flags);
810
811
812 if (rds->wr_idx != rds->rd_idx)
813 wake_up_interruptible(&rds->read_queue);
814
815 fm_irq_call_stage(fmdev, FM_RDS_FINISH_IDX);
816}
817
818static void fm_irq_handle_rds_finish(struct fmdev *fmdev)
819{
820 fm_irq_call_stage(fmdev, FM_HW_TUNE_OP_ENDED_IDX);
821}
822
823static void fm_irq_handle_tune_op_ended(struct fmdev *fmdev)
824{
825 if (fmdev->irq_info.flag & (FM_FR_EVENT | FM_BL_EVENT) & fmdev->
826 irq_info.mask) {
827 fmdbg("irq: tune ended/bandlimit reached\n");
828 if (test_and_clear_bit(FM_AF_SWITCH_INPROGRESS, &fmdev->flag)) {
829 fmdev->irq_info.stage = FM_AF_JUMP_RD_FREQ_IDX;
830 } else {
831 complete(&fmdev->maintask_comp);
832 fmdev->irq_info.stage = FM_HW_POWER_ENB_IDX;
833 }
834 } else
835 fmdev->irq_info.stage = FM_HW_POWER_ENB_IDX;
836
837 fm_irq_call(fmdev);
838}
839
840static void fm_irq_handle_power_enb(struct fmdev *fmdev)
841{
842 if (fmdev->irq_info.flag & FM_POW_ENB_EVENT) {
843 fmdbg("irq: Power Enabled/Disabled\n");
844 complete(&fmdev->maintask_comp);
845 }
846
847 fm_irq_call_stage(fmdev, FM_LOW_RSSI_START_IDX);
848}
849
850static void fm_irq_handle_low_rssi_start(struct fmdev *fmdev)
851{
852 if ((fmdev->rx.af_mode == FM_RX_RDS_AF_SWITCH_MODE_ON) &&
853 (fmdev->irq_info.flag & FM_LEV_EVENT & fmdev->irq_info.mask) &&
854 (fmdev->rx.freq != FM_UNDEFINED_FREQ) &&
855 (fmdev->rx.stat_info.afcache_size != 0)) {
856 fmdbg("irq: rssi level has fallen below threshold level\n");
857
858
859 fmdev->irq_info.mask &= ~FM_LEV_EVENT;
860
861 fmdev->rx.afjump_idx = 0;
862 fmdev->rx.freq_before_jump = fmdev->rx.freq;
863 fmdev->irq_info.stage = FM_AF_JUMP_SETPI_IDX;
864 } else {
865
866 fmdev->irq_info.stage = FM_SEND_INTMSK_CMD_IDX;
867 }
868
869 fm_irq_call(fmdev);
870}
871
872static void fm_irq_afjump_set_pi(struct fmdev *fmdev)
873{
874 u16 payload;
875
876
877 payload = fmdev->rx.stat_info.picode;
878 if (!fm_send_cmd(fmdev, RDS_PI_SET, REG_WR, &payload, sizeof(payload), NULL))
879 fm_irq_timeout_stage(fmdev, FM_AF_JUMP_HANDLE_SETPI_RESP_IDX);
880}
881
882static void fm_irq_handle_set_pi_resp(struct fmdev *fmdev)
883{
884 fm_irq_common_cmd_resp_helper(fmdev, FM_AF_JUMP_SETPI_MASK_IDX);
885}
886
887
888
889
890
891
892static void fm_irq_afjump_set_pimask(struct fmdev *fmdev)
893{
894 u16 payload;
895
896 payload = 0x0000;
897 if (!fm_send_cmd(fmdev, RDS_PI_MASK_SET, REG_WR, &payload, sizeof(payload), NULL))
898 fm_irq_timeout_stage(fmdev, FM_AF_JUMP_HANDLE_SETPI_MASK_RESP_IDX);
899}
900
901static void fm_irq_handle_set_pimask_resp(struct fmdev *fmdev)
902{
903 fm_irq_common_cmd_resp_helper(fmdev, FM_AF_JUMP_SET_AF_FREQ_IDX);
904}
905
906static void fm_irq_afjump_setfreq(struct fmdev *fmdev)
907{
908 u16 frq_index;
909 u16 payload;
910
911 fmdbg("Swtich to %d KHz\n", fmdev->rx.stat_info.af_cache[fmdev->rx.afjump_idx]);
912 frq_index = (fmdev->rx.stat_info.af_cache[fmdev->rx.afjump_idx] -
913 fmdev->rx.region.bot_freq) / FM_FREQ_MUL;
914
915 payload = frq_index;
916 if (!fm_send_cmd(fmdev, AF_FREQ_SET, REG_WR, &payload, sizeof(payload), NULL))
917 fm_irq_timeout_stage(fmdev, FM_AF_JUMP_HANDLE_SET_AFFREQ_RESP_IDX);
918}
919
920static void fm_irq_handle_setfreq_resp(struct fmdev *fmdev)
921{
922 fm_irq_common_cmd_resp_helper(fmdev, FM_AF_JUMP_ENABLE_INT_IDX);
923}
924
925static void fm_irq_afjump_enableint(struct fmdev *fmdev)
926{
927 u16 payload;
928
929
930 payload = FM_FR_EVENT;
931 if (!fm_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload, sizeof(payload), NULL))
932 fm_irq_timeout_stage(fmdev, FM_AF_JUMP_ENABLE_INT_RESP_IDX);
933}
934
935static void fm_irq_afjump_enableint_resp(struct fmdev *fmdev)
936{
937 fm_irq_common_cmd_resp_helper(fmdev, FM_AF_JUMP_START_AFJUMP_IDX);
938}
939
940static void fm_irq_start_afjump(struct fmdev *fmdev)
941{
942 u16 payload;
943
944 payload = FM_TUNER_AF_JUMP_MODE;
945 if (!fm_send_cmd(fmdev, TUNER_MODE_SET, REG_WR, &payload,
946 sizeof(payload), NULL))
947 fm_irq_timeout_stage(fmdev, FM_AF_JUMP_HANDLE_START_AFJUMP_RESP_IDX);
948}
949
950static void fm_irq_handle_start_afjump_resp(struct fmdev *fmdev)
951{
952 struct sk_buff *skb;
953
954 if (check_cmdresp_status(fmdev, &skb))
955 return;
956
957 fmdev->irq_info.stage = FM_SEND_FLAG_GETCMD_IDX;
958 set_bit(FM_AF_SWITCH_INPROGRESS, &fmdev->flag);
959 clear_bit(FM_INTTASK_RUNNING, &fmdev->flag);
960}
961
962static void fm_irq_afjump_rd_freq(struct fmdev *fmdev)
963{
964 u16 payload;
965
966 if (!fm_send_cmd(fmdev, FREQ_SET, REG_RD, NULL, sizeof(payload), NULL))
967 fm_irq_timeout_stage(fmdev, FM_AF_JUMP_RD_FREQ_RESP_IDX);
968}
969
970static void fm_irq_afjump_rd_freq_resp(struct fmdev *fmdev)
971{
972 struct sk_buff *skb;
973 u16 read_freq;
974 u32 curr_freq, jumped_freq;
975
976 if (check_cmdresp_status(fmdev, &skb))
977 return;
978
979
980 skb_pull(skb, sizeof(struct fm_event_msg_hdr));
981 memcpy(&read_freq, skb->data, sizeof(read_freq));
982 read_freq = be16_to_cpu((__force __be16)read_freq);
983 curr_freq = fmdev->rx.region.bot_freq + ((u32)read_freq * FM_FREQ_MUL);
984
985 jumped_freq = fmdev->rx.stat_info.af_cache[fmdev->rx.afjump_idx];
986
987
988 if ((curr_freq != fmdev->rx.freq_before_jump) && (curr_freq == jumped_freq)) {
989 fmdbg("Successfully switched to alternate freq %d\n", curr_freq);
990 fmdev->rx.freq = curr_freq;
991 fm_rx_reset_rds_cache(fmdev);
992
993
994 if (fmdev->rx.af_mode == FM_RX_RDS_AF_SWITCH_MODE_ON)
995 fmdev->irq_info.mask |= FM_LEV_EVENT;
996
997 fmdev->irq_info.stage = FM_LOW_RSSI_FINISH_IDX;
998 } else {
999 fmdev->rx.afjump_idx++;
1000
1001
1002 if (fmdev->rx.afjump_idx >= fmdev->rx.stat_info.afcache_size) {
1003 fmdbg("AF switch processing failed\n");
1004 fmdev->irq_info.stage = FM_LOW_RSSI_FINISH_IDX;
1005 } else {
1006
1007 fmdbg("Trying next freq in AF cache\n");
1008 fmdev->irq_info.stage = FM_AF_JUMP_SETPI_IDX;
1009 }
1010 }
1011 fm_irq_call(fmdev);
1012}
1013
1014static void fm_irq_handle_low_rssi_finish(struct fmdev *fmdev)
1015{
1016 fm_irq_call_stage(fmdev, FM_SEND_INTMSK_CMD_IDX);
1017}
1018
1019static void fm_irq_send_intmsk_cmd(struct fmdev *fmdev)
1020{
1021 u16 payload;
1022
1023
1024 payload = fmdev->irq_info.mask;
1025
1026 if (!fm_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload,
1027 sizeof(payload), NULL))
1028 fm_irq_timeout_stage(fmdev, FM_HANDLE_INTMSK_CMD_RESP_IDX);
1029}
1030
1031static void fm_irq_handle_intmsk_cmd_resp(struct fmdev *fmdev)
1032{
1033 struct sk_buff *skb;
1034
1035 if (check_cmdresp_status(fmdev, &skb))
1036 return;
1037
1038
1039
1040
1041 fmdev->irq_info.stage = FM_SEND_FLAG_GETCMD_IDX;
1042
1043
1044 if (test_and_clear_bit(FM_INTTASK_SCHEDULE_PENDING, &fmdev->flag))
1045 fmdev->irq_info.handlers[fmdev->irq_info.stage](fmdev);
1046 else
1047 clear_bit(FM_INTTASK_RUNNING, &fmdev->flag);
1048}
1049
1050
1051int fmc_is_rds_data_available(struct fmdev *fmdev, struct file *file,
1052 struct poll_table_struct *pts)
1053{
1054 poll_wait(file, &fmdev->rx.rds.read_queue, pts);
1055 if (fmdev->rx.rds.rd_idx != fmdev->rx.rds.wr_idx)
1056 return 0;
1057
1058 return -EAGAIN;
1059}
1060
1061
1062int fmc_transfer_rds_from_internal_buff(struct fmdev *fmdev, struct file *file,
1063 u8 __user *buf, size_t count)
1064{
1065 u32 block_count;
1066 u8 tmpbuf[FM_RDS_BLK_SIZE];
1067 unsigned long flags;
1068 int ret;
1069
1070 if (fmdev->rx.rds.wr_idx == fmdev->rx.rds.rd_idx) {
1071 if (file->f_flags & O_NONBLOCK)
1072 return -EWOULDBLOCK;
1073
1074 ret = wait_event_interruptible(fmdev->rx.rds.read_queue,
1075 (fmdev->rx.rds.wr_idx != fmdev->rx.rds.rd_idx));
1076 if (ret)
1077 return -EINTR;
1078 }
1079
1080
1081 count /= FM_RDS_BLK_SIZE;
1082 block_count = 0;
1083 ret = 0;
1084
1085 while (block_count < count) {
1086 spin_lock_irqsave(&fmdev->rds_buff_lock, flags);
1087
1088 if (fmdev->rx.rds.wr_idx == fmdev->rx.rds.rd_idx) {
1089 spin_unlock_irqrestore(&fmdev->rds_buff_lock, flags);
1090 break;
1091 }
1092 memcpy(tmpbuf, &fmdev->rx.rds.buff[fmdev->rx.rds.rd_idx],
1093 FM_RDS_BLK_SIZE);
1094 fmdev->rx.rds.rd_idx += FM_RDS_BLK_SIZE;
1095 if (fmdev->rx.rds.rd_idx >= fmdev->rx.rds.buf_size)
1096 fmdev->rx.rds.rd_idx = 0;
1097
1098 spin_unlock_irqrestore(&fmdev->rds_buff_lock, flags);
1099
1100 if (copy_to_user(buf, tmpbuf, FM_RDS_BLK_SIZE))
1101 break;
1102
1103 block_count++;
1104 buf += FM_RDS_BLK_SIZE;
1105 ret += FM_RDS_BLK_SIZE;
1106 }
1107 return ret;
1108}
1109
1110int fmc_set_freq(struct fmdev *fmdev, u32 freq_to_set)
1111{
1112 switch (fmdev->curr_fmmode) {
1113 case FM_MODE_RX:
1114 return fm_rx_set_freq(fmdev, freq_to_set);
1115
1116 case FM_MODE_TX:
1117 return fm_tx_set_freq(fmdev, freq_to_set);
1118
1119 default:
1120 return -EINVAL;
1121 }
1122}
1123
1124int fmc_get_freq(struct fmdev *fmdev, u32 *cur_tuned_frq)
1125{
1126 if (fmdev->rx.freq == FM_UNDEFINED_FREQ) {
1127 fmerr("RX frequency is not set\n");
1128 return -EPERM;
1129 }
1130 if (cur_tuned_frq == NULL) {
1131 fmerr("Invalid memory\n");
1132 return -ENOMEM;
1133 }
1134
1135 switch (fmdev->curr_fmmode) {
1136 case FM_MODE_RX:
1137 *cur_tuned_frq = fmdev->rx.freq;
1138 return 0;
1139
1140 case FM_MODE_TX:
1141 *cur_tuned_frq = 0;
1142 return 0;
1143
1144 default:
1145 return -EINVAL;
1146 }
1147
1148}
1149
1150int fmc_set_region(struct fmdev *fmdev, u8 region_to_set)
1151{
1152 switch (fmdev->curr_fmmode) {
1153 case FM_MODE_RX:
1154 return fm_rx_set_region(fmdev, region_to_set);
1155
1156 case FM_MODE_TX:
1157 return fm_tx_set_region(fmdev, region_to_set);
1158
1159 default:
1160 return -EINVAL;
1161 }
1162}
1163
1164int fmc_set_mute_mode(struct fmdev *fmdev, u8 mute_mode_toset)
1165{
1166 switch (fmdev->curr_fmmode) {
1167 case FM_MODE_RX:
1168 return fm_rx_set_mute_mode(fmdev, mute_mode_toset);
1169
1170 case FM_MODE_TX:
1171 return fm_tx_set_mute_mode(fmdev, mute_mode_toset);
1172
1173 default:
1174 return -EINVAL;
1175 }
1176}
1177
1178int fmc_set_stereo_mono(struct fmdev *fmdev, u16 mode)
1179{
1180 switch (fmdev->curr_fmmode) {
1181 case FM_MODE_RX:
1182 return fm_rx_set_stereo_mono(fmdev, mode);
1183
1184 case FM_MODE_TX:
1185 return fm_tx_set_stereo_mono(fmdev, mode);
1186
1187 default:
1188 return -EINVAL;
1189 }
1190}
1191
1192int fmc_set_rds_mode(struct fmdev *fmdev, u8 rds_en_dis)
1193{
1194 switch (fmdev->curr_fmmode) {
1195 case FM_MODE_RX:
1196 return fm_rx_set_rds_mode(fmdev, rds_en_dis);
1197
1198 case FM_MODE_TX:
1199 return fm_tx_set_rds_mode(fmdev, rds_en_dis);
1200
1201 default:
1202 return -EINVAL;
1203 }
1204}
1205
1206
1207static int fm_power_down(struct fmdev *fmdev)
1208{
1209 u16 payload;
1210 int ret;
1211
1212 if (!test_bit(FM_CORE_READY, &fmdev->flag)) {
1213 fmerr("FM core is not ready\n");
1214 return -EPERM;
1215 }
1216 if (fmdev->curr_fmmode == FM_MODE_OFF) {
1217 fmdbg("FM chip is already in OFF state\n");
1218 return 0;
1219 }
1220
1221 payload = 0x0;
1222 ret = fmc_send_cmd(fmdev, FM_POWER_MODE, REG_WR, &payload,
1223 sizeof(payload), NULL, NULL);
1224 if (ret < 0)
1225 return ret;
1226
1227 return fmc_release(fmdev);
1228}
1229
1230
1231static int fm_download_firmware(struct fmdev *fmdev, const u8 *fw_name)
1232{
1233 const struct firmware *fw_entry;
1234 struct bts_header *fw_header;
1235 struct bts_action *action;
1236 struct bts_action_delay *delay;
1237 u8 *fw_data;
1238 int ret, fw_len, cmd_cnt;
1239
1240 cmd_cnt = 0;
1241 set_bit(FM_FW_DW_INPROGRESS, &fmdev->flag);
1242
1243 ret = request_firmware(&fw_entry, fw_name,
1244 &fmdev->radio_dev->dev);
1245 if (ret < 0) {
1246 fmerr("Unable to read firmware(%s) content\n", fw_name);
1247 return ret;
1248 }
1249 fmdbg("Firmware(%s) length : %zu bytes\n", fw_name, fw_entry->size);
1250
1251 fw_data = (void *)fw_entry->data;
1252 fw_len = fw_entry->size;
1253
1254 fw_header = (struct bts_header *)fw_data;
1255 if (fw_header->magic != FM_FW_FILE_HEADER_MAGIC) {
1256 fmerr("%s not a legal TI firmware file\n", fw_name);
1257 ret = -EINVAL;
1258 goto rel_fw;
1259 }
1260 fmdbg("FW(%s) magic number : 0x%x\n", fw_name, fw_header->magic);
1261
1262
1263 fw_data += sizeof(struct bts_header);
1264 fw_len -= sizeof(struct bts_header);
1265
1266 while (fw_data && fw_len > 0) {
1267 action = (struct bts_action *)fw_data;
1268
1269 switch (action->type) {
1270 case ACTION_SEND_COMMAND:
1271 if (fmc_send_cmd(fmdev, 0, 0, action->data,
1272 action->size, NULL, NULL))
1273 goto rel_fw;
1274
1275 cmd_cnt++;
1276 break;
1277
1278 case ACTION_DELAY:
1279 delay = (struct bts_action_delay *)action->data;
1280 mdelay(delay->msec);
1281 break;
1282 }
1283
1284 fw_data += (sizeof(struct bts_action) + (action->size));
1285 fw_len -= (sizeof(struct bts_action) + (action->size));
1286 }
1287 fmdbg("Firmware commands(%d) loaded to chip\n", cmd_cnt);
1288rel_fw:
1289 release_firmware(fw_entry);
1290 clear_bit(FM_FW_DW_INPROGRESS, &fmdev->flag);
1291
1292 return ret;
1293}
1294
1295
1296static int load_default_rx_configuration(struct fmdev *fmdev)
1297{
1298 int ret;
1299
1300 ret = fm_rx_set_volume(fmdev, FM_DEFAULT_RX_VOLUME);
1301 if (ret < 0)
1302 return ret;
1303
1304 return fm_rx_set_rssi_threshold(fmdev, FM_DEFAULT_RSSI_THRESHOLD);
1305}
1306
1307
1308static int fm_power_up(struct fmdev *fmdev, u8 mode)
1309{
1310 u16 payload;
1311 __be16 asic_id, asic_ver;
1312 int resp_len, ret;
1313 u8 fw_name[50];
1314
1315 if (mode >= FM_MODE_ENTRY_MAX) {
1316 fmerr("Invalid firmware download option\n");
1317 return -EINVAL;
1318 }
1319
1320
1321
1322
1323
1324 ret = fmc_prepare(fmdev);
1325 if (ret < 0) {
1326 fmerr("Unable to prepare FM Common\n");
1327 return ret;
1328 }
1329
1330 payload = FM_ENABLE;
1331 if (fmc_send_cmd(fmdev, FM_POWER_MODE, REG_WR, &payload,
1332 sizeof(payload), NULL, NULL))
1333 goto rel;
1334
1335
1336 msleep(20);
1337
1338 if (fmc_send_cmd(fmdev, ASIC_ID_GET, REG_RD, NULL,
1339 sizeof(asic_id), &asic_id, &resp_len))
1340 goto rel;
1341
1342 if (fmc_send_cmd(fmdev, ASIC_VER_GET, REG_RD, NULL,
1343 sizeof(asic_ver), &asic_ver, &resp_len))
1344 goto rel;
1345
1346 fmdbg("ASIC ID: 0x%x , ASIC Version: %d\n",
1347 be16_to_cpu(asic_id), be16_to_cpu(asic_ver));
1348
1349 sprintf(fw_name, "%s_%x.%d.bts", FM_FMC_FW_FILE_START,
1350 be16_to_cpu(asic_id), be16_to_cpu(asic_ver));
1351
1352 ret = fm_download_firmware(fmdev, fw_name);
1353 if (ret < 0) {
1354 fmdbg("Failed to download firmware file %s\n", fw_name);
1355 goto rel;
1356 }
1357 sprintf(fw_name, "%s_%x.%d.bts", (mode == FM_MODE_RX) ?
1358 FM_RX_FW_FILE_START : FM_TX_FW_FILE_START,
1359 be16_to_cpu(asic_id), be16_to_cpu(asic_ver));
1360
1361 ret = fm_download_firmware(fmdev, fw_name);
1362 if (ret < 0) {
1363 fmdbg("Failed to download firmware file %s\n", fw_name);
1364 goto rel;
1365 } else
1366 return ret;
1367rel:
1368 return fmc_release(fmdev);
1369}
1370
1371
1372int fmc_set_mode(struct fmdev *fmdev, u8 fm_mode)
1373{
1374 int ret = 0;
1375
1376 if (fm_mode >= FM_MODE_ENTRY_MAX) {
1377 fmerr("Invalid FM mode\n");
1378 return -EINVAL;
1379 }
1380 if (fmdev->curr_fmmode == fm_mode) {
1381 fmdbg("Already fm is in mode(%d)\n", fm_mode);
1382 return ret;
1383 }
1384
1385 switch (fm_mode) {
1386 case FM_MODE_OFF:
1387 ret = fm_power_down(fmdev);
1388 if (ret < 0) {
1389 fmerr("Failed to set OFF mode\n");
1390 return ret;
1391 }
1392 break;
1393
1394 case FM_MODE_TX:
1395 case FM_MODE_RX:
1396
1397 if (fmdev->curr_fmmode != FM_MODE_OFF) {
1398 ret = fm_power_down(fmdev);
1399 if (ret < 0) {
1400 fmerr("Failed to set OFF mode\n");
1401 return ret;
1402 }
1403 msleep(30);
1404 }
1405 ret = fm_power_up(fmdev, fm_mode);
1406 if (ret < 0) {
1407 fmerr("Failed to load firmware\n");
1408 return ret;
1409 }
1410 }
1411 fmdev->curr_fmmode = fm_mode;
1412
1413
1414 if (fmdev->curr_fmmode == FM_MODE_RX) {
1415 fmdbg("Loading default rx configuration..\n");
1416 ret = load_default_rx_configuration(fmdev);
1417 if (ret < 0)
1418 fmerr("Failed to load default values\n");
1419 }
1420
1421 return ret;
1422}
1423
1424
1425int fmc_get_mode(struct fmdev *fmdev, u8 *fmmode)
1426{
1427 if (!test_bit(FM_CORE_READY, &fmdev->flag)) {
1428 fmerr("FM core is not ready\n");
1429 return -EPERM;
1430 }
1431 if (fmmode == NULL) {
1432 fmerr("Invalid memory\n");
1433 return -ENOMEM;
1434 }
1435
1436 *fmmode = fmdev->curr_fmmode;
1437 return 0;
1438}
1439
1440
1441static long fm_st_receive(void *arg, struct sk_buff *skb)
1442{
1443 struct fmdev *fmdev;
1444
1445 fmdev = (struct fmdev *)arg;
1446
1447 if (skb == NULL) {
1448 fmerr("Invalid SKB received from ST\n");
1449 return -EFAULT;
1450 }
1451
1452 if (skb->cb[0] != FM_PKT_LOGICAL_CHAN_NUMBER) {
1453 fmerr("Received SKB (%p) is not FM Channel 8 pkt\n", skb);
1454 return -EINVAL;
1455 }
1456
1457 memcpy(skb_push(skb, 1), &skb->cb[0], 1);
1458 skb_queue_tail(&fmdev->rx_q, skb);
1459 tasklet_schedule(&fmdev->rx_task);
1460
1461 return 0;
1462}
1463
1464
1465
1466
1467
1468static void fm_st_reg_comp_cb(void *arg, int data)
1469{
1470 struct fmdev *fmdev;
1471
1472 fmdev = (struct fmdev *)arg;
1473 fmdev->streg_cbdata = data;
1474 complete(&wait_for_fmdrv_reg_comp);
1475}
1476
1477
1478
1479
1480
1481int fmc_prepare(struct fmdev *fmdev)
1482{
1483 static struct st_proto_s fm_st_proto;
1484 int ret;
1485
1486 if (test_bit(FM_CORE_READY, &fmdev->flag)) {
1487 fmdbg("FM Core is already up\n");
1488 return 0;
1489 }
1490
1491 memset(&fm_st_proto, 0, sizeof(fm_st_proto));
1492 fm_st_proto.recv = fm_st_receive;
1493 fm_st_proto.match_packet = NULL;
1494 fm_st_proto.reg_complete_cb = fm_st_reg_comp_cb;
1495 fm_st_proto.write = NULL;
1496 fm_st_proto.priv_data = fmdev;
1497 fm_st_proto.chnl_id = 0x08;
1498 fm_st_proto.max_frame_size = 0xff;
1499 fm_st_proto.hdr_len = 1;
1500 fm_st_proto.offset_len_in_hdr = 0;
1501 fm_st_proto.len_size = 1;
1502 fm_st_proto.reserve = 1;
1503
1504 ret = st_register(&fm_st_proto);
1505 if (ret == -EINPROGRESS) {
1506 init_completion(&wait_for_fmdrv_reg_comp);
1507 fmdev->streg_cbdata = -EINPROGRESS;
1508 fmdbg("%s waiting for ST reg completion signal\n", __func__);
1509
1510 if (!wait_for_completion_timeout(&wait_for_fmdrv_reg_comp,
1511 FM_ST_REG_TIMEOUT)) {
1512 fmerr("Timeout(%d sec), didn't get reg completion signal from ST\n",
1513 jiffies_to_msecs(FM_ST_REG_TIMEOUT) / 1000);
1514 return -ETIMEDOUT;
1515 }
1516 if (fmdev->streg_cbdata != 0) {
1517 fmerr("ST reg comp CB called with error status %d\n",
1518 fmdev->streg_cbdata);
1519 return -EAGAIN;
1520 }
1521
1522 ret = 0;
1523 } else if (ret == -1) {
1524 fmerr("st_register failed %d\n", ret);
1525 return -EAGAIN;
1526 }
1527
1528 if (fm_st_proto.write != NULL) {
1529 g_st_write = fm_st_proto.write;
1530 } else {
1531 fmerr("Failed to get ST write func pointer\n");
1532 ret = st_unregister(&fm_st_proto);
1533 if (ret < 0)
1534 fmerr("st_unregister failed %d\n", ret);
1535 return -EAGAIN;
1536 }
1537
1538 spin_lock_init(&fmdev->rds_buff_lock);
1539 spin_lock_init(&fmdev->resp_skb_lock);
1540
1541
1542 skb_queue_head_init(&fmdev->tx_q);
1543 tasklet_init(&fmdev->tx_task, send_tasklet, (unsigned long)fmdev);
1544
1545
1546 skb_queue_head_init(&fmdev->rx_q);
1547 tasklet_init(&fmdev->rx_task, recv_tasklet, (unsigned long)fmdev);
1548
1549 fmdev->irq_info.stage = 0;
1550 atomic_set(&fmdev->tx_cnt, 1);
1551 fmdev->resp_comp = NULL;
1552
1553 timer_setup(&fmdev->irq_info.timer, int_timeout_handler, 0);
1554
1555 fmdev->irq_info.mask = FM_MAL_EVENT;
1556
1557
1558 fmdev->rx.region = region_configs[default_radio_region];
1559
1560 fmdev->rx.mute_mode = FM_MUTE_OFF;
1561 fmdev->rx.rf_depend_mute = FM_RX_RF_DEPENDENT_MUTE_OFF;
1562 fmdev->rx.rds.flag = FM_RDS_DISABLE;
1563 fmdev->rx.freq = FM_UNDEFINED_FREQ;
1564 fmdev->rx.rds_mode = FM_RDS_SYSTEM_RDS;
1565 fmdev->rx.af_mode = FM_RX_RDS_AF_SWITCH_MODE_OFF;
1566 fmdev->irq_info.retry = 0;
1567
1568 fm_rx_reset_rds_cache(fmdev);
1569 init_waitqueue_head(&fmdev->rx.rds.read_queue);
1570
1571 fm_rx_reset_station_info(fmdev);
1572 set_bit(FM_CORE_READY, &fmdev->flag);
1573
1574 return ret;
1575}
1576
1577
1578
1579
1580
1581int fmc_release(struct fmdev *fmdev)
1582{
1583 static struct st_proto_s fm_st_proto;
1584 int ret;
1585
1586 if (!test_bit(FM_CORE_READY, &fmdev->flag)) {
1587 fmdbg("FM Core is already down\n");
1588 return 0;
1589 }
1590
1591 wake_up_interruptible(&fmdev->rx.rds.read_queue);
1592
1593 tasklet_kill(&fmdev->tx_task);
1594 tasklet_kill(&fmdev->rx_task);
1595
1596 skb_queue_purge(&fmdev->tx_q);
1597 skb_queue_purge(&fmdev->rx_q);
1598
1599 fmdev->resp_comp = NULL;
1600 fmdev->rx.freq = 0;
1601
1602 memset(&fm_st_proto, 0, sizeof(fm_st_proto));
1603 fm_st_proto.chnl_id = 0x08;
1604
1605 ret = st_unregister(&fm_st_proto);
1606
1607 if (ret < 0)
1608 fmerr("Failed to de-register FM from ST %d\n", ret);
1609 else
1610 fmdbg("Successfully unregistered from ST\n");
1611
1612 clear_bit(FM_CORE_READY, &fmdev->flag);
1613 return ret;
1614}
1615
1616
1617
1618
1619
1620static int __init fm_drv_init(void)
1621{
1622 struct fmdev *fmdev = NULL;
1623 int ret = -ENOMEM;
1624
1625 fmdbg("FM driver version %s\n", FM_DRV_VERSION);
1626
1627 fmdev = kzalloc(sizeof(struct fmdev), GFP_KERNEL);
1628 if (NULL == fmdev) {
1629 fmerr("Can't allocate operation structure memory\n");
1630 return ret;
1631 }
1632 fmdev->rx.rds.buf_size = default_rds_buf * FM_RDS_BLK_SIZE;
1633 fmdev->rx.rds.buff = kzalloc(fmdev->rx.rds.buf_size, GFP_KERNEL);
1634 if (NULL == fmdev->rx.rds.buff) {
1635 fmerr("Can't allocate rds ring buffer\n");
1636 goto rel_dev;
1637 }
1638
1639 ret = fm_v4l2_init_video_device(fmdev, radio_nr);
1640 if (ret < 0)
1641 goto rel_rdsbuf;
1642
1643 fmdev->irq_info.handlers = int_handler_table;
1644 fmdev->curr_fmmode = FM_MODE_OFF;
1645 fmdev->tx_data.pwr_lvl = FM_PWR_LVL_DEF;
1646 fmdev->tx_data.preemph = FM_TX_PREEMPH_50US;
1647 return ret;
1648
1649rel_rdsbuf:
1650 kfree(fmdev->rx.rds.buff);
1651rel_dev:
1652 kfree(fmdev);
1653
1654 return ret;
1655}
1656
1657
1658static void __exit fm_drv_exit(void)
1659{
1660 struct fmdev *fmdev = NULL;
1661
1662 fmdev = fm_v4l2_deinit_video_device();
1663 if (fmdev != NULL) {
1664 kfree(fmdev->rx.rds.buff);
1665 kfree(fmdev);
1666 }
1667}
1668
1669module_init(fm_drv_init);
1670module_exit(fm_drv_exit);
1671
1672
1673MODULE_AUTHOR("Manjunatha Halli <manjunatha_halli@ti.com>");
1674MODULE_DESCRIPTION("FM Driver for TI's Connectivity chip. " FM_DRV_VERSION);
1675MODULE_VERSION(FM_DRV_VERSION);
1676MODULE_LICENSE("GPL");
1677