1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#include <linux/module.h>
36#include <linux/firmware.h>
37#include <linux/delay.h>
38#include "fmdrv.h"
39#include "fmdrv_v4l2.h"
40#include "fmdrv_common.h"
41#include <linux/ti_wilink_st.h>
42#include "fmdrv_rx.h"
43#include "fmdrv_tx.h"
44
45
46static struct region_info region_configs[] = {
47
48 {
49 .chanl_space = FM_CHANNEL_SPACING_200KHZ * FM_FREQ_MUL,
50 .bot_freq = 87500,
51 .top_freq = 108000,
52 .fm_band = 0,
53 },
54
55 {
56 .chanl_space = FM_CHANNEL_SPACING_200KHZ * FM_FREQ_MUL,
57 .bot_freq = 76000,
58 .top_freq = 90000,
59 .fm_band = 1,
60 },
61};
62
63
64static u8 default_radio_region;
65module_param(default_radio_region, byte, 0);
66MODULE_PARM_DESC(default_radio_region, "Region: 0=Europe/US, 1=Japan");
67
68
69static u32 default_rds_buf = 300;
70module_param(default_rds_buf, uint, 0444);
71MODULE_PARM_DESC(rds_buf, "RDS buffer entries");
72
73
74static u32 radio_nr = -1;
75module_param(radio_nr, int, 0444);
76MODULE_PARM_DESC(radio_nr, "Radio Nr");
77
78
79static void fm_irq_send_flag_getcmd(struct fmdev *);
80static void fm_irq_handle_flag_getcmd_resp(struct fmdev *);
81static void fm_irq_handle_hw_malfunction(struct fmdev *);
82static void fm_irq_handle_rds_start(struct fmdev *);
83static void fm_irq_send_rdsdata_getcmd(struct fmdev *);
84static void fm_irq_handle_rdsdata_getcmd_resp(struct fmdev *);
85static void fm_irq_handle_rds_finish(struct fmdev *);
86static void fm_irq_handle_tune_op_ended(struct fmdev *);
87static void fm_irq_handle_power_enb(struct fmdev *);
88static void fm_irq_handle_low_rssi_start(struct fmdev *);
89static void fm_irq_afjump_set_pi(struct fmdev *);
90static void fm_irq_handle_set_pi_resp(struct fmdev *);
91static void fm_irq_afjump_set_pimask(struct fmdev *);
92static void fm_irq_handle_set_pimask_resp(struct fmdev *);
93static void fm_irq_afjump_setfreq(struct fmdev *);
94static void fm_irq_handle_setfreq_resp(struct fmdev *);
95static void fm_irq_afjump_enableint(struct fmdev *);
96static void fm_irq_afjump_enableint_resp(struct fmdev *);
97static void fm_irq_start_afjump(struct fmdev *);
98static void fm_irq_handle_start_afjump_resp(struct fmdev *);
99static void fm_irq_afjump_rd_freq(struct fmdev *);
100static void fm_irq_afjump_rd_freq_resp(struct fmdev *);
101static void fm_irq_handle_low_rssi_finish(struct fmdev *);
102static void fm_irq_send_intmsk_cmd(struct fmdev *);
103static void fm_irq_handle_intmsk_cmd_resp(struct fmdev *);
104
105
106
107
108
109enum fmc_irq_handler_index {
110 FM_SEND_FLAG_GETCMD_IDX,
111 FM_HANDLE_FLAG_GETCMD_RESP_IDX,
112
113
114 FM_HW_MAL_FUNC_IDX,
115
116
117 FM_RDS_START_IDX,
118 FM_RDS_SEND_RDS_GETCMD_IDX,
119 FM_RDS_HANDLE_RDS_GETCMD_RESP_IDX,
120 FM_RDS_FINISH_IDX,
121
122
123 FM_HW_TUNE_OP_ENDED_IDX,
124
125
126 FM_HW_POWER_ENB_IDX,
127
128
129 FM_LOW_RSSI_START_IDX,
130 FM_AF_JUMP_SETPI_IDX,
131 FM_AF_JUMP_HANDLE_SETPI_RESP_IDX,
132 FM_AF_JUMP_SETPI_MASK_IDX,
133 FM_AF_JUMP_HANDLE_SETPI_MASK_RESP_IDX,
134 FM_AF_JUMP_SET_AF_FREQ_IDX,
135 FM_AF_JUMP_HANDLE_SET_AFFREQ_RESP_IDX,
136 FM_AF_JUMP_ENABLE_INT_IDX,
137 FM_AF_JUMP_ENABLE_INT_RESP_IDX,
138 FM_AF_JUMP_START_AFJUMP_IDX,
139 FM_AF_JUMP_HANDLE_START_AFJUMP_RESP_IDX,
140 FM_AF_JUMP_RD_FREQ_IDX,
141 FM_AF_JUMP_RD_FREQ_RESP_IDX,
142 FM_LOW_RSSI_FINISH_IDX,
143
144
145 FM_SEND_INTMSK_CMD_IDX,
146 FM_HANDLE_INTMSK_CMD_RESP_IDX,
147};
148
149
150static int_handler_prototype int_handler_table[] = {
151 fm_irq_send_flag_getcmd,
152 fm_irq_handle_flag_getcmd_resp,
153 fm_irq_handle_hw_malfunction,
154 fm_irq_handle_rds_start,
155 fm_irq_send_rdsdata_getcmd,
156 fm_irq_handle_rdsdata_getcmd_resp,
157 fm_irq_handle_rds_finish,
158 fm_irq_handle_tune_op_ended,
159 fm_irq_handle_power_enb,
160 fm_irq_handle_low_rssi_start,
161 fm_irq_afjump_set_pi,
162 fm_irq_handle_set_pi_resp,
163 fm_irq_afjump_set_pimask,
164 fm_irq_handle_set_pimask_resp,
165 fm_irq_afjump_setfreq,
166 fm_irq_handle_setfreq_resp,
167 fm_irq_afjump_enableint,
168 fm_irq_afjump_enableint_resp,
169 fm_irq_start_afjump,
170 fm_irq_handle_start_afjump_resp,
171 fm_irq_afjump_rd_freq,
172 fm_irq_afjump_rd_freq_resp,
173 fm_irq_handle_low_rssi_finish,
174 fm_irq_send_intmsk_cmd,
175 fm_irq_handle_intmsk_cmd_resp
176};
177
178long (*g_st_write) (struct sk_buff *skb);
179static struct completion wait_for_fmdrv_reg_comp;
180
181static inline void fm_irq_call(struct fmdev *fmdev)
182{
183 fmdev->irq_info.handlers[fmdev->irq_info.stage](fmdev);
184}
185
186
187static inline void fm_irq_call_stage(struct fmdev *fmdev, u8 stage)
188{
189 fmdev->irq_info.stage = stage;
190 fm_irq_call(fmdev);
191}
192
193static inline void fm_irq_timeout_stage(struct fmdev *fmdev, u8 stage)
194{
195 fmdev->irq_info.stage = stage;
196 mod_timer(&fmdev->irq_info.timer, jiffies + FM_DRV_TX_TIMEOUT);
197}
198
199#ifdef FM_DUMP_TXRX_PKT
200
201inline void dump_tx_skb_data(struct sk_buff *skb)
202{
203 int len, len_org;
204 u8 index;
205 struct fm_cmd_msg_hdr *cmd_hdr;
206
207 cmd_hdr = (struct fm_cmd_msg_hdr *)skb->data;
208 printk(KERN_INFO "<<%shdr:%02x len:%02x opcode:%02x type:%s dlen:%02x",
209 fm_cb(skb)->completion ? " " : "*", cmd_hdr->hdr,
210 cmd_hdr->len, cmd_hdr->op,
211 cmd_hdr->rd_wr ? "RD" : "WR", cmd_hdr->dlen);
212
213 len_org = skb->len - FM_CMD_MSG_HDR_SIZE;
214 if (len_org > 0) {
215 printk("\n data(%d): ", cmd_hdr->dlen);
216 len = min(len_org, 14);
217 for (index = 0; index < len; index++)
218 printk("%x ",
219 skb->data[FM_CMD_MSG_HDR_SIZE + index]);
220 printk("%s", (len_org > 14) ? ".." : "");
221 }
222 printk("\n");
223}
224
225
226inline void dump_rx_skb_data(struct sk_buff *skb)
227{
228 int len, len_org;
229 u8 index;
230 struct fm_event_msg_hdr *evt_hdr;
231
232 evt_hdr = (struct fm_event_msg_hdr *)skb->data;
233 printk(KERN_INFO ">> hdr:%02x len:%02x sts:%02x numhci:%02x "
234 "opcode:%02x type:%s dlen:%02x", evt_hdr->hdr, evt_hdr->len,
235 evt_hdr->status, evt_hdr->num_fm_hci_cmds, evt_hdr->op,
236 (evt_hdr->rd_wr) ? "RD" : "WR", evt_hdr->dlen);
237
238 len_org = skb->len - FM_EVT_MSG_HDR_SIZE;
239 if (len_org > 0) {
240 printk("\n data(%d): ", evt_hdr->dlen);
241 len = min(len_org, 14);
242 for (index = 0; index < len; index++)
243 printk("%x ",
244 skb->data[FM_EVT_MSG_HDR_SIZE + index]);
245 printk("%s", (len_org > 14) ? ".." : "");
246 }
247 printk("\n");
248}
249#endif
250
251void fmc_update_region_info(struct fmdev *fmdev, u8 region_to_set)
252{
253 fmdev->rx.region = region_configs[region_to_set];
254}
255
256
257
258
259
260static void recv_tasklet(unsigned long arg)
261{
262 struct fmdev *fmdev;
263 struct fm_irq *irq_info;
264 struct fm_event_msg_hdr *evt_hdr;
265 struct sk_buff *skb;
266 u8 num_fm_hci_cmds;
267 unsigned long flags;
268
269 fmdev = (struct fmdev *)arg;
270 irq_info = &fmdev->irq_info;
271
272 while ((skb = skb_dequeue(&fmdev->rx_q))) {
273 if (skb->len < sizeof(struct fm_event_msg_hdr)) {
274 fmerr("skb(%p) has only %d bytes, "
275 "at least need %zu bytes to decode\n", skb,
276 skb->len, sizeof(struct fm_event_msg_hdr));
277 kfree_skb(skb);
278 continue;
279 }
280
281 evt_hdr = (void *)skb->data;
282 num_fm_hci_cmds = evt_hdr->num_fm_hci_cmds;
283
284
285 if (evt_hdr->op == FM_INTERRUPT) {
286
287 if (!test_bit(FM_INTTASK_RUNNING, &fmdev->flag)) {
288 set_bit(FM_INTTASK_RUNNING, &fmdev->flag);
289 if (irq_info->stage != 0) {
290 fmerr("Inval stage resetting to zero\n");
291 irq_info->stage = 0;
292 }
293
294
295
296
297
298 irq_info->handlers[irq_info->stage](fmdev);
299 } else {
300 set_bit(FM_INTTASK_SCHEDULE_PENDING, &fmdev->flag);
301 }
302 kfree_skb(skb);
303 }
304
305 else if (evt_hdr->op == fmdev->pre_op && fmdev->resp_comp != NULL) {
306
307 spin_lock_irqsave(&fmdev->resp_skb_lock, flags);
308 fmdev->resp_skb = skb;
309 spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags);
310 complete(fmdev->resp_comp);
311
312 fmdev->resp_comp = NULL;
313 atomic_set(&fmdev->tx_cnt, 1);
314 }
315
316 else if (evt_hdr->op == fmdev->pre_op && fmdev->resp_comp == NULL) {
317 if (fmdev->resp_skb != NULL)
318 fmerr("Response SKB ptr not NULL\n");
319
320 spin_lock_irqsave(&fmdev->resp_skb_lock, flags);
321 fmdev->resp_skb = skb;
322 spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags);
323
324
325 irq_info->handlers[irq_info->stage](fmdev);
326
327 kfree_skb(skb);
328 atomic_set(&fmdev->tx_cnt, 1);
329 } else {
330 fmerr("Nobody claimed SKB(%p),purging\n", skb);
331 }
332
333
334
335
336
337 if (num_fm_hci_cmds && atomic_read(&fmdev->tx_cnt))
338 if (!skb_queue_empty(&fmdev->tx_q))
339 tasklet_schedule(&fmdev->tx_task);
340 }
341}
342
343
344static void send_tasklet(unsigned long arg)
345{
346 struct fmdev *fmdev;
347 struct sk_buff *skb;
348 int len;
349
350 fmdev = (struct fmdev *)arg;
351
352 if (!atomic_read(&fmdev->tx_cnt))
353 return;
354
355
356 if ((jiffies - fmdev->last_tx_jiffies) > FM_DRV_TX_TIMEOUT) {
357 fmerr("TX timeout occurred\n");
358 atomic_set(&fmdev->tx_cnt, 1);
359 }
360
361
362 skb = skb_dequeue(&fmdev->tx_q);
363 if (!skb)
364 return;
365
366 atomic_dec(&fmdev->tx_cnt);
367 fmdev->pre_op = fm_cb(skb)->fm_op;
368
369 if (fmdev->resp_comp != NULL)
370 fmerr("Response completion handler is not NULL\n");
371
372 fmdev->resp_comp = fm_cb(skb)->completion;
373
374
375 len = g_st_write(skb);
376 if (len < 0) {
377 kfree_skb(skb);
378 fmdev->resp_comp = NULL;
379 fmerr("TX tasklet failed to send skb(%p)\n", skb);
380 atomic_set(&fmdev->tx_cnt, 1);
381 } else {
382 fmdev->last_tx_jiffies = jiffies;
383 }
384}
385
386
387
388
389
390static int fm_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload,
391 int payload_len, struct completion *wait_completion)
392{
393 struct sk_buff *skb;
394 struct fm_cmd_msg_hdr *hdr;
395 int size;
396
397 if (fm_op >= FM_INTERRUPT) {
398 fmerr("Invalid fm opcode - %d\n", fm_op);
399 return -EINVAL;
400 }
401 if (test_bit(FM_FW_DW_INPROGRESS, &fmdev->flag) && payload == NULL) {
402 fmerr("Payload data is NULL during fw download\n");
403 return -EINVAL;
404 }
405 if (!test_bit(FM_FW_DW_INPROGRESS, &fmdev->flag))
406 size =
407 FM_CMD_MSG_HDR_SIZE + ((payload == NULL) ? 0 : payload_len);
408 else
409 size = payload_len;
410
411 skb = alloc_skb(size, GFP_ATOMIC);
412 if (!skb) {
413 fmerr("No memory to create new SKB\n");
414 return -ENOMEM;
415 }
416
417
418
419
420 if (!test_bit(FM_FW_DW_INPROGRESS, &fmdev->flag) ||
421 test_bit(FM_INTTASK_RUNNING, &fmdev->flag)) {
422
423 hdr = (struct fm_cmd_msg_hdr *)skb_put(skb, FM_CMD_MSG_HDR_SIZE);
424 hdr->hdr = FM_PKT_LOGICAL_CHAN_NUMBER;
425
426
427 hdr->len = ((payload == NULL) ? 0 : payload_len) + 3;
428
429
430 hdr->op = fm_op;
431
432
433 hdr->rd_wr = type;
434 hdr->dlen = payload_len;
435 fm_cb(skb)->fm_op = fm_op;
436
437
438
439
440
441
442 if (payload != NULL)
443 *(u16 *)payload = cpu_to_be16(*(u16 *)payload);
444
445 } else if (payload != NULL) {
446 fm_cb(skb)->fm_op = *((u8 *)payload + 2);
447 }
448 if (payload != NULL)
449 memcpy(skb_put(skb, payload_len), payload, payload_len);
450
451 fm_cb(skb)->completion = wait_completion;
452 skb_queue_tail(&fmdev->tx_q, skb);
453 tasklet_schedule(&fmdev->tx_task);
454
455 return 0;
456}
457
458
459int fmc_send_cmd(struct fmdev *fmdev, u8 fm_op, u16 type, void *payload,
460 unsigned int payload_len, void *response, int *response_len)
461{
462 struct sk_buff *skb;
463 struct fm_event_msg_hdr *evt_hdr;
464 unsigned long flags;
465 int ret;
466
467 init_completion(&fmdev->maintask_comp);
468 ret = fm_send_cmd(fmdev, fm_op, type, payload, payload_len,
469 &fmdev->maintask_comp);
470 if (ret)
471 return ret;
472
473 if (!wait_for_completion_timeout(&fmdev->maintask_comp,
474 FM_DRV_TX_TIMEOUT)) {
475 fmerr("Timeout(%d sec),didn't get reg"
476 "completion signal from RX tasklet\n",
477 jiffies_to_msecs(FM_DRV_TX_TIMEOUT) / 1000);
478 return -ETIMEDOUT;
479 }
480 if (!fmdev->resp_skb) {
481 fmerr("Response SKB is missing\n");
482 return -EFAULT;
483 }
484 spin_lock_irqsave(&fmdev->resp_skb_lock, flags);
485 skb = fmdev->resp_skb;
486 fmdev->resp_skb = NULL;
487 spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags);
488
489 evt_hdr = (void *)skb->data;
490 if (evt_hdr->status != 0) {
491 fmerr("Received event pkt status(%d) is not zero\n",
492 evt_hdr->status);
493 kfree_skb(skb);
494 return -EIO;
495 }
496
497 if (response != NULL && response_len != NULL && evt_hdr->dlen) {
498
499 skb_pull(skb, sizeof(struct fm_event_msg_hdr));
500 memcpy(response, skb->data, evt_hdr->dlen);
501 *response_len = evt_hdr->dlen;
502 } else if (response_len != NULL && evt_hdr->dlen == 0) {
503 *response_len = 0;
504 }
505 kfree_skb(skb);
506
507 return 0;
508}
509
510
511static inline int check_cmdresp_status(struct fmdev *fmdev,
512 struct sk_buff **skb)
513{
514 struct fm_event_msg_hdr *fm_evt_hdr;
515 unsigned long flags;
516
517 del_timer(&fmdev->irq_info.timer);
518
519 spin_lock_irqsave(&fmdev->resp_skb_lock, flags);
520 *skb = fmdev->resp_skb;
521 fmdev->resp_skb = NULL;
522 spin_unlock_irqrestore(&fmdev->resp_skb_lock, flags);
523
524 fm_evt_hdr = (void *)(*skb)->data;
525 if (fm_evt_hdr->status != 0) {
526 fmerr("irq: opcode %x response status is not zero "
527 "Initiating irq recovery process\n",
528 fm_evt_hdr->op);
529
530 mod_timer(&fmdev->irq_info.timer, jiffies + FM_DRV_TX_TIMEOUT);
531 return -1;
532 }
533
534 return 0;
535}
536
537static inline void fm_irq_common_cmd_resp_helper(struct fmdev *fmdev, u8 stage)
538{
539 struct sk_buff *skb;
540
541 if (!check_cmdresp_status(fmdev, &skb))
542 fm_irq_call_stage(fmdev, stage);
543}
544
545
546
547
548
549
550
551
552static void int_timeout_handler(unsigned long data)
553{
554 struct fmdev *fmdev;
555 struct fm_irq *fmirq;
556
557 fmdbg("irq: timeout,trying to re-enable fm interrupts\n");
558 fmdev = (struct fmdev *)data;
559 fmirq = &fmdev->irq_info;
560 fmirq->retry++;
561
562 if (fmirq->retry > FM_IRQ_TIMEOUT_RETRY_MAX) {
563
564
565 fmirq->stage = 0;
566 fmirq->retry = 0;
567 fmerr("Recovery action failed during"
568 "irq processing, max retry reached\n");
569 return;
570 }
571 fm_irq_call_stage(fmdev, FM_SEND_INTMSK_CMD_IDX);
572}
573
574
575static void fm_irq_send_flag_getcmd(struct fmdev *fmdev)
576{
577 u16 flag;
578
579
580 if (!fm_send_cmd(fmdev, FLAG_GET, REG_RD, NULL, sizeof(flag), NULL))
581 fm_irq_timeout_stage(fmdev, FM_HANDLE_FLAG_GETCMD_RESP_IDX);
582}
583
584static void fm_irq_handle_flag_getcmd_resp(struct fmdev *fmdev)
585{
586 struct sk_buff *skb;
587 struct fm_event_msg_hdr *fm_evt_hdr;
588
589 if (check_cmdresp_status(fmdev, &skb))
590 return;
591
592 fm_evt_hdr = (void *)skb->data;
593
594
595 skb_pull(skb, sizeof(struct fm_event_msg_hdr));
596 memcpy(&fmdev->irq_info.flag, skb->data, fm_evt_hdr->dlen);
597
598 fmdev->irq_info.flag = be16_to_cpu(fmdev->irq_info.flag);
599 fmdbg("irq: flag register(0x%x)\n", fmdev->irq_info.flag);
600
601
602 fm_irq_call_stage(fmdev, FM_HW_MAL_FUNC_IDX);
603}
604
605static void fm_irq_handle_hw_malfunction(struct fmdev *fmdev)
606{
607 if (fmdev->irq_info.flag & FM_MAL_EVENT & fmdev->irq_info.mask)
608 fmerr("irq: HW MAL int received - do nothing\n");
609
610
611 fm_irq_call_stage(fmdev, FM_RDS_START_IDX);
612}
613
614static void fm_irq_handle_rds_start(struct fmdev *fmdev)
615{
616 if (fmdev->irq_info.flag & FM_RDS_EVENT & fmdev->irq_info.mask) {
617 fmdbg("irq: rds threshold reached\n");
618 fmdev->irq_info.stage = FM_RDS_SEND_RDS_GETCMD_IDX;
619 } else {
620
621 fmdev->irq_info.stage = FM_HW_TUNE_OP_ENDED_IDX;
622 }
623
624 fm_irq_call(fmdev);
625}
626
627static void fm_irq_send_rdsdata_getcmd(struct fmdev *fmdev)
628{
629
630 if (!fm_send_cmd(fmdev, RDS_DATA_GET, REG_RD, NULL,
631 (FM_RX_RDS_FIFO_THRESHOLD * 3), NULL))
632 fm_irq_timeout_stage(fmdev, FM_RDS_HANDLE_RDS_GETCMD_RESP_IDX);
633}
634
635
636static void fm_rx_update_af_cache(struct fmdev *fmdev, u8 af)
637{
638 struct tuned_station_info *stat_info = &fmdev->rx.stat_info;
639 u8 reg_idx = fmdev->rx.region.fm_band;
640 u8 index;
641 u32 freq;
642
643
644 if ((af >= FM_RDS_1_AF_FOLLOWS) && (af <= FM_RDS_25_AF_FOLLOWS)) {
645 fmdev->rx.stat_info.af_list_max = (af - FM_RDS_1_AF_FOLLOWS + 1);
646 fmdev->rx.stat_info.afcache_size = 0;
647 fmdbg("No of expected AF : %d\n", fmdev->rx.stat_info.af_list_max);
648 return;
649 }
650
651 if (af < FM_RDS_MIN_AF)
652 return;
653 if (reg_idx == FM_BAND_EUROPE_US && af > FM_RDS_MAX_AF)
654 return;
655 if (reg_idx == FM_BAND_JAPAN && af > FM_RDS_MAX_AF_JAPAN)
656 return;
657
658 freq = fmdev->rx.region.bot_freq + (af * 100);
659 if (freq == fmdev->rx.freq) {
660 fmdbg("Current freq(%d) is matching with received AF(%d)\n",
661 fmdev->rx.freq, freq);
662 return;
663 }
664
665 for (index = 0; index < stat_info->afcache_size; index++) {
666 if (stat_info->af_cache[index] == freq)
667 break;
668 }
669
670 if (index == stat_info->af_list_max) {
671 fmdbg("AF cache is full\n");
672 return;
673 }
674
675
676
677
678 if (index == stat_info->afcache_size) {
679 fmdbg("Storing AF %d to cache index %d\n", freq, index);
680 stat_info->af_cache[index] = freq;
681 stat_info->afcache_size++;
682 }
683}
684
685
686
687
688
689static void fm_rdsparse_swapbytes(struct fmdev *fmdev,
690 struct fm_rdsdata_format *rds_format)
691{
692 u8 byte1;
693 u8 index = 0;
694 u8 *rds_buff;
695
696
697
698
699
700
701 if (fmdev->asci_id != 0x6350) {
702 rds_buff = &rds_format->data.groupdatabuff.buff[0];
703 while (index + 1 < FM_RX_RDS_INFO_FIELD_MAX) {
704 byte1 = rds_buff[index];
705 rds_buff[index] = rds_buff[index + 1];
706 rds_buff[index + 1] = byte1;
707 index += 2;
708 }
709 }
710}
711
712static void fm_irq_handle_rdsdata_getcmd_resp(struct fmdev *fmdev)
713{
714 struct sk_buff *skb;
715 struct fm_rdsdata_format rds_fmt;
716 struct fm_rds *rds = &fmdev->rx.rds;
717 unsigned long group_idx, flags;
718 u8 *rds_data, meta_data, tmpbuf[3];
719 u8 type, blk_idx;
720 u16 cur_picode;
721 u32 rds_len;
722
723 if (check_cmdresp_status(fmdev, &skb))
724 return;
725
726
727 skb_pull(skb, sizeof(struct fm_event_msg_hdr));
728 rds_data = skb->data;
729 rds_len = skb->len;
730
731
732 while (rds_len >= FM_RDS_BLK_SIZE) {
733 meta_data = rds_data[2];
734
735 type = (meta_data & 0x07);
736
737
738 blk_idx = (type <= FM_RDS_BLOCK_C ? type : (type - 1));
739 fmdbg("Block index:%d(%s)\n", blk_idx,
740 (meta_data & FM_RDS_STATUS_ERR_MASK) ? "Bad" : "Ok");
741
742 if ((meta_data & FM_RDS_STATUS_ERR_MASK) != 0)
743 break;
744
745 if (blk_idx < FM_RDS_BLK_IDX_A || blk_idx > FM_RDS_BLK_IDX_D) {
746 fmdbg("Block sequence mismatch\n");
747 rds->last_blk_idx = -1;
748 break;
749 }
750
751
752 memcpy(&rds_fmt.data.groupdatabuff.
753 buff[blk_idx * (FM_RDS_BLK_SIZE - 1)],
754 rds_data, (FM_RDS_BLK_SIZE - 1));
755
756 rds->last_blk_idx = blk_idx;
757
758
759 if (blk_idx == FM_RDS_BLK_IDX_D) {
760 fmdbg("Good block received\n");
761 fm_rdsparse_swapbytes(fmdev, &rds_fmt);
762
763
764
765
766
767 cur_picode = be16_to_cpu(rds_fmt.data.groupgeneral.pidata);
768 if (fmdev->rx.stat_info.picode != cur_picode)
769 fmdev->rx.stat_info.picode = cur_picode;
770
771 fmdbg("picode:%d\n", cur_picode);
772
773 group_idx = (rds_fmt.data.groupgeneral.blk_b[0] >> 3);
774 fmdbg("(fmdrv):Group:%ld%s\n", group_idx/2,
775 (group_idx % 2) ? "B" : "A");
776
777 group_idx = 1 << (rds_fmt.data.groupgeneral.blk_b[0] >> 3);
778 if (group_idx == FM_RDS_GROUP_TYPE_MASK_0A) {
779 fm_rx_update_af_cache(fmdev, rds_fmt.data.group0A.af[0]);
780 fm_rx_update_af_cache(fmdev, rds_fmt.data.group0A.af[1]);
781 }
782 }
783 rds_len -= FM_RDS_BLK_SIZE;
784 rds_data += FM_RDS_BLK_SIZE;
785 }
786
787
788 rds_data = skb->data;
789 rds_len = skb->len;
790
791 spin_lock_irqsave(&fmdev->rds_buff_lock, flags);
792 while (rds_len > 0) {
793
794
795
796
797 type = (rds_data[2] & 0x07);
798 blk_idx = (type <= FM_RDS_BLOCK_C ? type : (type - 1));
799 tmpbuf[2] = blk_idx;
800 tmpbuf[2] |= blk_idx << 3;
801
802
803 tmpbuf[0] = rds_data[0];
804 tmpbuf[1] = rds_data[1];
805
806 memcpy(&rds->buff[rds->wr_idx], &tmpbuf, FM_RDS_BLK_SIZE);
807 rds->wr_idx = (rds->wr_idx + FM_RDS_BLK_SIZE) % rds->buf_size;
808
809
810 if (rds->wr_idx == rds->rd_idx) {
811 fmdbg("RDS buffer overflow\n");
812 rds->wr_idx = 0;
813 rds->rd_idx = 0;
814 break;
815 }
816 rds_len -= FM_RDS_BLK_SIZE;
817 rds_data += FM_RDS_BLK_SIZE;
818 }
819 spin_unlock_irqrestore(&fmdev->rds_buff_lock, flags);
820
821
822 if (rds->wr_idx != rds->rd_idx)
823 wake_up_interruptible(&rds->read_queue);
824
825 fm_irq_call_stage(fmdev, FM_RDS_FINISH_IDX);
826}
827
828static void fm_irq_handle_rds_finish(struct fmdev *fmdev)
829{
830 fm_irq_call_stage(fmdev, FM_HW_TUNE_OP_ENDED_IDX);
831}
832
833static void fm_irq_handle_tune_op_ended(struct fmdev *fmdev)
834{
835 if (fmdev->irq_info.flag & (FM_FR_EVENT | FM_BL_EVENT) & fmdev->
836 irq_info.mask) {
837 fmdbg("irq: tune ended/bandlimit reached\n");
838 if (test_and_clear_bit(FM_AF_SWITCH_INPROGRESS, &fmdev->flag)) {
839 fmdev->irq_info.stage = FM_AF_JUMP_RD_FREQ_IDX;
840 } else {
841 complete(&fmdev->maintask_comp);
842 fmdev->irq_info.stage = FM_HW_POWER_ENB_IDX;
843 }
844 } else
845 fmdev->irq_info.stage = FM_HW_POWER_ENB_IDX;
846
847 fm_irq_call(fmdev);
848}
849
850static void fm_irq_handle_power_enb(struct fmdev *fmdev)
851{
852 if (fmdev->irq_info.flag & FM_POW_ENB_EVENT) {
853 fmdbg("irq: Power Enabled/Disabled\n");
854 complete(&fmdev->maintask_comp);
855 }
856
857 fm_irq_call_stage(fmdev, FM_LOW_RSSI_START_IDX);
858}
859
860static void fm_irq_handle_low_rssi_start(struct fmdev *fmdev)
861{
862 if ((fmdev->rx.af_mode == FM_RX_RDS_AF_SWITCH_MODE_ON) &&
863 (fmdev->irq_info.flag & FM_LEV_EVENT & fmdev->irq_info.mask) &&
864 (fmdev->rx.freq != FM_UNDEFINED_FREQ) &&
865 (fmdev->rx.stat_info.afcache_size != 0)) {
866 fmdbg("irq: rssi level has fallen below threshold level\n");
867
868
869 fmdev->irq_info.mask &= ~FM_LEV_EVENT;
870
871 fmdev->rx.afjump_idx = 0;
872 fmdev->rx.freq_before_jump = fmdev->rx.freq;
873 fmdev->irq_info.stage = FM_AF_JUMP_SETPI_IDX;
874 } else {
875
876 fmdev->irq_info.stage = FM_SEND_INTMSK_CMD_IDX;
877 }
878
879 fm_irq_call(fmdev);
880}
881
882static void fm_irq_afjump_set_pi(struct fmdev *fmdev)
883{
884 u16 payload;
885
886
887 payload = fmdev->rx.stat_info.picode;
888 if (!fm_send_cmd(fmdev, RDS_PI_SET, REG_WR, &payload, sizeof(payload), NULL))
889 fm_irq_timeout_stage(fmdev, FM_AF_JUMP_HANDLE_SETPI_RESP_IDX);
890}
891
892static void fm_irq_handle_set_pi_resp(struct fmdev *fmdev)
893{
894 fm_irq_common_cmd_resp_helper(fmdev, FM_AF_JUMP_SETPI_MASK_IDX);
895}
896
897
898
899
900
901
902static void fm_irq_afjump_set_pimask(struct fmdev *fmdev)
903{
904 u16 payload;
905
906 payload = 0x0000;
907 if (!fm_send_cmd(fmdev, RDS_PI_MASK_SET, REG_WR, &payload, sizeof(payload), NULL))
908 fm_irq_timeout_stage(fmdev, FM_AF_JUMP_HANDLE_SETPI_MASK_RESP_IDX);
909}
910
911static void fm_irq_handle_set_pimask_resp(struct fmdev *fmdev)
912{
913 fm_irq_common_cmd_resp_helper(fmdev, FM_AF_JUMP_SET_AF_FREQ_IDX);
914}
915
916static void fm_irq_afjump_setfreq(struct fmdev *fmdev)
917{
918 u16 frq_index;
919 u16 payload;
920
921 fmdbg("Swtich to %d KHz\n", fmdev->rx.stat_info.af_cache[fmdev->rx.afjump_idx]);
922 frq_index = (fmdev->rx.stat_info.af_cache[fmdev->rx.afjump_idx] -
923 fmdev->rx.region.bot_freq) / FM_FREQ_MUL;
924
925 payload = frq_index;
926 if (!fm_send_cmd(fmdev, AF_FREQ_SET, REG_WR, &payload, sizeof(payload), NULL))
927 fm_irq_timeout_stage(fmdev, FM_AF_JUMP_HANDLE_SET_AFFREQ_RESP_IDX);
928}
929
930static void fm_irq_handle_setfreq_resp(struct fmdev *fmdev)
931{
932 fm_irq_common_cmd_resp_helper(fmdev, FM_AF_JUMP_ENABLE_INT_IDX);
933}
934
935static void fm_irq_afjump_enableint(struct fmdev *fmdev)
936{
937 u16 payload;
938
939
940 payload = FM_FR_EVENT;
941 if (!fm_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload, sizeof(payload), NULL))
942 fm_irq_timeout_stage(fmdev, FM_AF_JUMP_ENABLE_INT_RESP_IDX);
943}
944
945static void fm_irq_afjump_enableint_resp(struct fmdev *fmdev)
946{
947 fm_irq_common_cmd_resp_helper(fmdev, FM_AF_JUMP_START_AFJUMP_IDX);
948}
949
950static void fm_irq_start_afjump(struct fmdev *fmdev)
951{
952 u16 payload;
953
954 payload = FM_TUNER_AF_JUMP_MODE;
955 if (!fm_send_cmd(fmdev, TUNER_MODE_SET, REG_WR, &payload,
956 sizeof(payload), NULL))
957 fm_irq_timeout_stage(fmdev, FM_AF_JUMP_HANDLE_START_AFJUMP_RESP_IDX);
958}
959
960static void fm_irq_handle_start_afjump_resp(struct fmdev *fmdev)
961{
962 struct sk_buff *skb;
963
964 if (check_cmdresp_status(fmdev, &skb))
965 return;
966
967 fmdev->irq_info.stage = FM_SEND_FLAG_GETCMD_IDX;
968 set_bit(FM_AF_SWITCH_INPROGRESS, &fmdev->flag);
969 clear_bit(FM_INTTASK_RUNNING, &fmdev->flag);
970}
971
972static void fm_irq_afjump_rd_freq(struct fmdev *fmdev)
973{
974 u16 payload;
975
976 if (!fm_send_cmd(fmdev, FREQ_SET, REG_RD, NULL, sizeof(payload), NULL))
977 fm_irq_timeout_stage(fmdev, FM_AF_JUMP_RD_FREQ_RESP_IDX);
978}
979
980static void fm_irq_afjump_rd_freq_resp(struct fmdev *fmdev)
981{
982 struct sk_buff *skb;
983 u16 read_freq;
984 u32 curr_freq, jumped_freq;
985
986 if (check_cmdresp_status(fmdev, &skb))
987 return;
988
989
990 skb_pull(skb, sizeof(struct fm_event_msg_hdr));
991 memcpy(&read_freq, skb->data, sizeof(read_freq));
992 read_freq = be16_to_cpu(read_freq);
993 curr_freq = fmdev->rx.region.bot_freq + ((u32)read_freq * FM_FREQ_MUL);
994
995 jumped_freq = fmdev->rx.stat_info.af_cache[fmdev->rx.afjump_idx];
996
997
998 if ((curr_freq != fmdev->rx.freq_before_jump) && (curr_freq == jumped_freq)) {
999 fmdbg("Successfully switched to alternate freq %d\n", curr_freq);
1000 fmdev->rx.freq = curr_freq;
1001 fm_rx_reset_rds_cache(fmdev);
1002
1003
1004 if (fmdev->rx.af_mode == FM_RX_RDS_AF_SWITCH_MODE_ON)
1005 fmdev->irq_info.mask |= FM_LEV_EVENT;
1006
1007 fmdev->irq_info.stage = FM_LOW_RSSI_FINISH_IDX;
1008 } else {
1009 fmdev->rx.afjump_idx++;
1010
1011
1012 if (fmdev->rx.afjump_idx >= fmdev->rx.stat_info.afcache_size) {
1013 fmdbg("AF switch processing failed\n");
1014 fmdev->irq_info.stage = FM_LOW_RSSI_FINISH_IDX;
1015 } else {
1016
1017 fmdbg("Trying next freq in AF cache\n");
1018 fmdev->irq_info.stage = FM_AF_JUMP_SETPI_IDX;
1019 }
1020 }
1021 fm_irq_call(fmdev);
1022}
1023
1024static void fm_irq_handle_low_rssi_finish(struct fmdev *fmdev)
1025{
1026 fm_irq_call_stage(fmdev, FM_SEND_INTMSK_CMD_IDX);
1027}
1028
1029static void fm_irq_send_intmsk_cmd(struct fmdev *fmdev)
1030{
1031 u16 payload;
1032
1033
1034 payload = fmdev->irq_info.mask;
1035
1036 if (!fm_send_cmd(fmdev, INT_MASK_SET, REG_WR, &payload,
1037 sizeof(payload), NULL))
1038 fm_irq_timeout_stage(fmdev, FM_HANDLE_INTMSK_CMD_RESP_IDX);
1039}
1040
1041static void fm_irq_handle_intmsk_cmd_resp(struct fmdev *fmdev)
1042{
1043 struct sk_buff *skb;
1044
1045 if (check_cmdresp_status(fmdev, &skb))
1046 return;
1047
1048
1049
1050
1051 fmdev->irq_info.stage = FM_SEND_FLAG_GETCMD_IDX;
1052
1053
1054 if (test_and_clear_bit(FM_INTTASK_SCHEDULE_PENDING, &fmdev->flag))
1055 fmdev->irq_info.handlers[fmdev->irq_info.stage](fmdev);
1056 else
1057 clear_bit(FM_INTTASK_RUNNING, &fmdev->flag);
1058}
1059
1060
1061int fmc_is_rds_data_available(struct fmdev *fmdev, struct file *file,
1062 struct poll_table_struct *pts)
1063{
1064 poll_wait(file, &fmdev->rx.rds.read_queue, pts);
1065 if (fmdev->rx.rds.rd_idx != fmdev->rx.rds.wr_idx)
1066 return 0;
1067
1068 return -EAGAIN;
1069}
1070
1071
1072int fmc_transfer_rds_from_internal_buff(struct fmdev *fmdev, struct file *file,
1073 u8 __user *buf, size_t count)
1074{
1075 u32 block_count;
1076 unsigned long flags;
1077 int ret;
1078
1079 if (fmdev->rx.rds.wr_idx == fmdev->rx.rds.rd_idx) {
1080 if (file->f_flags & O_NONBLOCK)
1081 return -EWOULDBLOCK;
1082
1083 ret = wait_event_interruptible(fmdev->rx.rds.read_queue,
1084 (fmdev->rx.rds.wr_idx != fmdev->rx.rds.rd_idx));
1085 if (ret)
1086 return -EINTR;
1087 }
1088
1089
1090 count /= 3;
1091 block_count = 0;
1092 ret = 0;
1093
1094 spin_lock_irqsave(&fmdev->rds_buff_lock, flags);
1095
1096 while (block_count < count) {
1097 if (fmdev->rx.rds.wr_idx == fmdev->rx.rds.rd_idx)
1098 break;
1099
1100 if (copy_to_user(buf, &fmdev->rx.rds.buff[fmdev->rx.rds.rd_idx],
1101 FM_RDS_BLK_SIZE))
1102 break;
1103
1104 fmdev->rx.rds.rd_idx += FM_RDS_BLK_SIZE;
1105 if (fmdev->rx.rds.rd_idx >= fmdev->rx.rds.buf_size)
1106 fmdev->rx.rds.rd_idx = 0;
1107
1108 block_count++;
1109 buf += FM_RDS_BLK_SIZE;
1110 ret += FM_RDS_BLK_SIZE;
1111 }
1112 spin_unlock_irqrestore(&fmdev->rds_buff_lock, flags);
1113 return ret;
1114}
1115
1116int fmc_set_freq(struct fmdev *fmdev, u32 freq_to_set)
1117{
1118 switch (fmdev->curr_fmmode) {
1119 case FM_MODE_RX:
1120 return fm_rx_set_freq(fmdev, freq_to_set);
1121
1122 case FM_MODE_TX:
1123 return fm_tx_set_freq(fmdev, freq_to_set);
1124
1125 default:
1126 return -EINVAL;
1127 }
1128}
1129
1130int fmc_get_freq(struct fmdev *fmdev, u32 *cur_tuned_frq)
1131{
1132 if (fmdev->rx.freq == FM_UNDEFINED_FREQ) {
1133 fmerr("RX frequency is not set\n");
1134 return -EPERM;
1135 }
1136 if (cur_tuned_frq == NULL) {
1137 fmerr("Invalid memory\n");
1138 return -ENOMEM;
1139 }
1140
1141 switch (fmdev->curr_fmmode) {
1142 case FM_MODE_RX:
1143 *cur_tuned_frq = fmdev->rx.freq;
1144 return 0;
1145
1146 case FM_MODE_TX:
1147 *cur_tuned_frq = 0;
1148 return 0;
1149
1150 default:
1151 return -EINVAL;
1152 }
1153
1154}
1155
1156int fmc_set_region(struct fmdev *fmdev, u8 region_to_set)
1157{
1158 switch (fmdev->curr_fmmode) {
1159 case FM_MODE_RX:
1160 return fm_rx_set_region(fmdev, region_to_set);
1161
1162 case FM_MODE_TX:
1163 return fm_tx_set_region(fmdev, region_to_set);
1164
1165 default:
1166 return -EINVAL;
1167 }
1168}
1169
1170int fmc_set_mute_mode(struct fmdev *fmdev, u8 mute_mode_toset)
1171{
1172 switch (fmdev->curr_fmmode) {
1173 case FM_MODE_RX:
1174 return fm_rx_set_mute_mode(fmdev, mute_mode_toset);
1175
1176 case FM_MODE_TX:
1177 return fm_tx_set_mute_mode(fmdev, mute_mode_toset);
1178
1179 default:
1180 return -EINVAL;
1181 }
1182}
1183
1184int fmc_set_stereo_mono(struct fmdev *fmdev, u16 mode)
1185{
1186 switch (fmdev->curr_fmmode) {
1187 case FM_MODE_RX:
1188 return fm_rx_set_stereo_mono(fmdev, mode);
1189
1190 case FM_MODE_TX:
1191 return fm_tx_set_stereo_mono(fmdev, mode);
1192
1193 default:
1194 return -EINVAL;
1195 }
1196}
1197
1198int fmc_set_rds_mode(struct fmdev *fmdev, u8 rds_en_dis)
1199{
1200 switch (fmdev->curr_fmmode) {
1201 case FM_MODE_RX:
1202 return fm_rx_set_rds_mode(fmdev, rds_en_dis);
1203
1204 case FM_MODE_TX:
1205 return fm_tx_set_rds_mode(fmdev, rds_en_dis);
1206
1207 default:
1208 return -EINVAL;
1209 }
1210}
1211
1212
1213static int fm_power_down(struct fmdev *fmdev)
1214{
1215 u16 payload;
1216 int ret;
1217
1218 if (!test_bit(FM_CORE_READY, &fmdev->flag)) {
1219 fmerr("FM core is not ready\n");
1220 return -EPERM;
1221 }
1222 if (fmdev->curr_fmmode == FM_MODE_OFF) {
1223 fmdbg("FM chip is already in OFF state\n");
1224 return 0;
1225 }
1226
1227 payload = 0x0;
1228 ret = fmc_send_cmd(fmdev, FM_POWER_MODE, REG_WR, &payload,
1229 sizeof(payload), NULL, NULL);
1230 if (ret < 0)
1231 return ret;
1232
1233 return fmc_release(fmdev);
1234}
1235
1236
1237static int fm_download_firmware(struct fmdev *fmdev, const u8 *fw_name)
1238{
1239 const struct firmware *fw_entry;
1240 struct bts_header *fw_header;
1241 struct bts_action *action;
1242 struct bts_action_delay *delay;
1243 u8 *fw_data;
1244 int ret, fw_len, cmd_cnt;
1245
1246 cmd_cnt = 0;
1247 set_bit(FM_FW_DW_INPROGRESS, &fmdev->flag);
1248
1249 ret = request_firmware(&fw_entry, fw_name,
1250 &fmdev->radio_dev->dev);
1251 if (ret < 0) {
1252 fmerr("Unable to read firmware(%s) content\n", fw_name);
1253 return ret;
1254 }
1255 fmdbg("Firmware(%s) length : %d bytes\n", fw_name, fw_entry->size);
1256
1257 fw_data = (void *)fw_entry->data;
1258 fw_len = fw_entry->size;
1259
1260 fw_header = (struct bts_header *)fw_data;
1261 if (fw_header->magic != FM_FW_FILE_HEADER_MAGIC) {
1262 fmerr("%s not a legal TI firmware file\n", fw_name);
1263 ret = -EINVAL;
1264 goto rel_fw;
1265 }
1266 fmdbg("FW(%s) magic number : 0x%x\n", fw_name, fw_header->magic);
1267
1268
1269 fw_data += sizeof(struct bts_header);
1270 fw_len -= sizeof(struct bts_header);
1271
1272 while (fw_data && fw_len > 0) {
1273 action = (struct bts_action *)fw_data;
1274
1275 switch (action->type) {
1276 case ACTION_SEND_COMMAND:
1277 if (fmc_send_cmd(fmdev, 0, 0, action->data,
1278 action->size, NULL, NULL))
1279 goto rel_fw;
1280
1281 cmd_cnt++;
1282 break;
1283
1284 case ACTION_DELAY:
1285 delay = (struct bts_action_delay *)action->data;
1286 mdelay(delay->msec);
1287 break;
1288 }
1289
1290 fw_data += (sizeof(struct bts_action) + (action->size));
1291 fw_len -= (sizeof(struct bts_action) + (action->size));
1292 }
1293 fmdbg("Firmware commands(%d) loaded to chip\n", cmd_cnt);
1294rel_fw:
1295 release_firmware(fw_entry);
1296 clear_bit(FM_FW_DW_INPROGRESS, &fmdev->flag);
1297
1298 return ret;
1299}
1300
1301
1302static int load_default_rx_configuration(struct fmdev *fmdev)
1303{
1304 int ret;
1305
1306 ret = fm_rx_set_volume(fmdev, FM_DEFAULT_RX_VOLUME);
1307 if (ret < 0)
1308 return ret;
1309
1310 return fm_rx_set_rssi_threshold(fmdev, FM_DEFAULT_RSSI_THRESHOLD);
1311}
1312
1313
1314static int fm_power_up(struct fmdev *fmdev, u8 mode)
1315{
1316 u16 payload, asic_id, asic_ver;
1317 int resp_len, ret;
1318 u8 fw_name[50];
1319
1320 if (mode >= FM_MODE_ENTRY_MAX) {
1321 fmerr("Invalid firmware download option\n");
1322 return -EINVAL;
1323 }
1324
1325
1326
1327
1328
1329 ret = fmc_prepare(fmdev);
1330 if (ret < 0) {
1331 fmerr("Unable to prepare FM Common\n");
1332 return ret;
1333 }
1334
1335 payload = FM_ENABLE;
1336 if (fmc_send_cmd(fmdev, FM_POWER_MODE, REG_WR, &payload,
1337 sizeof(payload), NULL, NULL))
1338 goto rel;
1339
1340
1341 msleep(20);
1342
1343 if (fmc_send_cmd(fmdev, ASIC_ID_GET, REG_RD, NULL,
1344 sizeof(asic_id), &asic_id, &resp_len))
1345 goto rel;
1346
1347 if (fmc_send_cmd(fmdev, ASIC_VER_GET, REG_RD, NULL,
1348 sizeof(asic_ver), &asic_ver, &resp_len))
1349 goto rel;
1350
1351 fmdbg("ASIC ID: 0x%x , ASIC Version: %d\n",
1352 be16_to_cpu(asic_id), be16_to_cpu(asic_ver));
1353
1354 sprintf(fw_name, "%s_%x.%d.bts", FM_FMC_FW_FILE_START,
1355 be16_to_cpu(asic_id), be16_to_cpu(asic_ver));
1356
1357 ret = fm_download_firmware(fmdev, fw_name);
1358 if (ret < 0) {
1359 fmdbg("Failed to download firmware file %s\n", fw_name);
1360 goto rel;
1361 }
1362 sprintf(fw_name, "%s_%x.%d.bts", (mode == FM_MODE_RX) ?
1363 FM_RX_FW_FILE_START : FM_TX_FW_FILE_START,
1364 be16_to_cpu(asic_id), be16_to_cpu(asic_ver));
1365
1366 ret = fm_download_firmware(fmdev, fw_name);
1367 if (ret < 0) {
1368 fmdbg("Failed to download firmware file %s\n", fw_name);
1369 goto rel;
1370 } else
1371 return ret;
1372rel:
1373 return fmc_release(fmdev);
1374}
1375
1376
1377int fmc_set_mode(struct fmdev *fmdev, u8 fm_mode)
1378{
1379 int ret = 0;
1380
1381 if (fm_mode >= FM_MODE_ENTRY_MAX) {
1382 fmerr("Invalid FM mode\n");
1383 return -EINVAL;
1384 }
1385 if (fmdev->curr_fmmode == fm_mode) {
1386 fmdbg("Already fm is in mode(%d)\n", fm_mode);
1387 return ret;
1388 }
1389
1390 switch (fm_mode) {
1391 case FM_MODE_OFF:
1392 ret = fm_power_down(fmdev);
1393 if (ret < 0) {
1394 fmerr("Failed to set OFF mode\n");
1395 return ret;
1396 }
1397 break;
1398
1399 case FM_MODE_TX:
1400 case FM_MODE_RX:
1401
1402 if (fmdev->curr_fmmode != FM_MODE_OFF) {
1403 ret = fm_power_down(fmdev);
1404 if (ret < 0) {
1405 fmerr("Failed to set OFF mode\n");
1406 return ret;
1407 }
1408 msleep(30);
1409 }
1410 ret = fm_power_up(fmdev, fm_mode);
1411 if (ret < 0) {
1412 fmerr("Failed to load firmware\n");
1413 return ret;
1414 }
1415 }
1416 fmdev->curr_fmmode = fm_mode;
1417
1418
1419 if (fmdev->curr_fmmode == FM_MODE_RX) {
1420 fmdbg("Loading default rx configuration..\n");
1421 ret = load_default_rx_configuration(fmdev);
1422 if (ret < 0)
1423 fmerr("Failed to load default values\n");
1424 }
1425
1426 return ret;
1427}
1428
1429
1430int fmc_get_mode(struct fmdev *fmdev, u8 *fmmode)
1431{
1432 if (!test_bit(FM_CORE_READY, &fmdev->flag)) {
1433 fmerr("FM core is not ready\n");
1434 return -EPERM;
1435 }
1436 if (fmmode == NULL) {
1437 fmerr("Invalid memory\n");
1438 return -ENOMEM;
1439 }
1440
1441 *fmmode = fmdev->curr_fmmode;
1442 return 0;
1443}
1444
1445
1446static long fm_st_receive(void *arg, struct sk_buff *skb)
1447{
1448 struct fmdev *fmdev;
1449
1450 fmdev = (struct fmdev *)arg;
1451
1452 if (skb == NULL) {
1453 fmerr("Invalid SKB received from ST\n");
1454 return -EFAULT;
1455 }
1456
1457 if (skb->cb[0] != FM_PKT_LOGICAL_CHAN_NUMBER) {
1458 fmerr("Received SKB (%p) is not FM Channel 8 pkt\n", skb);
1459 return -EINVAL;
1460 }
1461
1462 memcpy(skb_push(skb, 1), &skb->cb[0], 1);
1463 skb_queue_tail(&fmdev->rx_q, skb);
1464 tasklet_schedule(&fmdev->rx_task);
1465
1466 return 0;
1467}
1468
1469
1470
1471
1472
1473static void fm_st_reg_comp_cb(void *arg, char data)
1474{
1475 struct fmdev *fmdev;
1476
1477 fmdev = (struct fmdev *)arg;
1478 fmdev->streg_cbdata = data;
1479 complete(&wait_for_fmdrv_reg_comp);
1480}
1481
1482
1483
1484
1485
1486int fmc_prepare(struct fmdev *fmdev)
1487{
1488 static struct st_proto_s fm_st_proto;
1489 int ret;
1490
1491 if (test_bit(FM_CORE_READY, &fmdev->flag)) {
1492 fmdbg("FM Core is already up\n");
1493 return 0;
1494 }
1495
1496 memset(&fm_st_proto, 0, sizeof(fm_st_proto));
1497 fm_st_proto.recv = fm_st_receive;
1498 fm_st_proto.match_packet = NULL;
1499 fm_st_proto.reg_complete_cb = fm_st_reg_comp_cb;
1500 fm_st_proto.write = NULL;
1501 fm_st_proto.priv_data = fmdev;
1502 fm_st_proto.chnl_id = 0x08;
1503 fm_st_proto.max_frame_size = 0xff;
1504 fm_st_proto.hdr_len = 1;
1505 fm_st_proto.offset_len_in_hdr = 0;
1506 fm_st_proto.len_size = 1;
1507 fm_st_proto.reserve = 1;
1508
1509 ret = st_register(&fm_st_proto);
1510 if (ret == -EINPROGRESS) {
1511 init_completion(&wait_for_fmdrv_reg_comp);
1512 fmdev->streg_cbdata = -EINPROGRESS;
1513 fmdbg("%s waiting for ST reg completion signal\n", __func__);
1514
1515 if (!wait_for_completion_timeout(&wait_for_fmdrv_reg_comp,
1516 FM_ST_REG_TIMEOUT)) {
1517 fmerr("Timeout(%d sec), didn't get reg "
1518 "completion signal from ST\n",
1519 jiffies_to_msecs(FM_ST_REG_TIMEOUT) / 1000);
1520 return -ETIMEDOUT;
1521 }
1522 if (fmdev->streg_cbdata != 0) {
1523 fmerr("ST reg comp CB called with error "
1524 "status %d\n", fmdev->streg_cbdata);
1525 return -EAGAIN;
1526 }
1527
1528 ret = 0;
1529 } else if (ret == -1) {
1530 fmerr("st_register failed %d\n", ret);
1531 return -EAGAIN;
1532 }
1533
1534 if (fm_st_proto.write != NULL) {
1535 g_st_write = fm_st_proto.write;
1536 } else {
1537 fmerr("Failed to get ST write func pointer\n");
1538 ret = st_unregister(&fm_st_proto);
1539 if (ret < 0)
1540 fmerr("st_unregister failed %d\n", ret);
1541 return -EAGAIN;
1542 }
1543
1544 spin_lock_init(&fmdev->rds_buff_lock);
1545 spin_lock_init(&fmdev->resp_skb_lock);
1546
1547
1548 skb_queue_head_init(&fmdev->tx_q);
1549 tasklet_init(&fmdev->tx_task, send_tasklet, (unsigned long)fmdev);
1550
1551
1552 skb_queue_head_init(&fmdev->rx_q);
1553 tasklet_init(&fmdev->rx_task, recv_tasklet, (unsigned long)fmdev);
1554
1555 fmdev->irq_info.stage = 0;
1556 atomic_set(&fmdev->tx_cnt, 1);
1557 fmdev->resp_comp = NULL;
1558
1559 init_timer(&fmdev->irq_info.timer);
1560 fmdev->irq_info.timer.function = &int_timeout_handler;
1561 fmdev->irq_info.timer.data = (unsigned long)fmdev;
1562
1563 fmdev->irq_info.mask = FM_MAL_EVENT;
1564
1565
1566 memcpy(&fmdev->rx.region, ®ion_configs[default_radio_region],
1567 sizeof(struct region_info));
1568
1569 fmdev->rx.mute_mode = FM_MUTE_OFF;
1570 fmdev->rx.rf_depend_mute = FM_RX_RF_DEPENDENT_MUTE_OFF;
1571 fmdev->rx.rds.flag = FM_RDS_DISABLE;
1572 fmdev->rx.freq = FM_UNDEFINED_FREQ;
1573 fmdev->rx.rds_mode = FM_RDS_SYSTEM_RDS;
1574 fmdev->rx.af_mode = FM_RX_RDS_AF_SWITCH_MODE_OFF;
1575 fmdev->irq_info.retry = 0;
1576
1577 fm_rx_reset_rds_cache(fmdev);
1578 init_waitqueue_head(&fmdev->rx.rds.read_queue);
1579
1580 fm_rx_reset_station_info(fmdev);
1581 set_bit(FM_CORE_READY, &fmdev->flag);
1582
1583 return ret;
1584}
1585
1586
1587
1588
1589
1590int fmc_release(struct fmdev *fmdev)
1591{
1592 static struct st_proto_s fm_st_proto;
1593 int ret;
1594
1595 if (!test_bit(FM_CORE_READY, &fmdev->flag)) {
1596 fmdbg("FM Core is already down\n");
1597 return 0;
1598 }
1599
1600 wake_up_interruptible(&fmdev->rx.rds.read_queue);
1601
1602 tasklet_kill(&fmdev->tx_task);
1603 tasklet_kill(&fmdev->rx_task);
1604
1605 skb_queue_purge(&fmdev->tx_q);
1606 skb_queue_purge(&fmdev->rx_q);
1607
1608 fmdev->resp_comp = NULL;
1609 fmdev->rx.freq = 0;
1610
1611 memset(&fm_st_proto, 0, sizeof(fm_st_proto));
1612 fm_st_proto.chnl_id = 0x08;
1613
1614 ret = st_unregister(&fm_st_proto);
1615
1616 if (ret < 0)
1617 fmerr("Failed to de-register FM from ST %d\n", ret);
1618 else
1619 fmdbg("Successfully unregistered from ST\n");
1620
1621 clear_bit(FM_CORE_READY, &fmdev->flag);
1622 return ret;
1623}
1624
1625
1626
1627
1628
1629static int __init fm_drv_init(void)
1630{
1631 struct fmdev *fmdev = NULL;
1632 int ret = -ENOMEM;
1633
1634 fmdbg("FM driver version %s\n", FM_DRV_VERSION);
1635
1636 fmdev = kzalloc(sizeof(struct fmdev), GFP_KERNEL);
1637 if (NULL == fmdev) {
1638 fmerr("Can't allocate operation structure memory\n");
1639 return ret;
1640 }
1641 fmdev->rx.rds.buf_size = default_rds_buf * FM_RDS_BLK_SIZE;
1642 fmdev->rx.rds.buff = kzalloc(fmdev->rx.rds.buf_size, GFP_KERNEL);
1643 if (NULL == fmdev->rx.rds.buff) {
1644 fmerr("Can't allocate rds ring buffer\n");
1645 goto rel_dev;
1646 }
1647
1648 ret = fm_v4l2_init_video_device(fmdev, radio_nr);
1649 if (ret < 0)
1650 goto rel_rdsbuf;
1651
1652 fmdev->irq_info.handlers = int_handler_table;
1653 fmdev->curr_fmmode = FM_MODE_OFF;
1654 fmdev->tx_data.pwr_lvl = FM_PWR_LVL_DEF;
1655 fmdev->tx_data.preemph = FM_TX_PREEMPH_50US;
1656 return ret;
1657
1658rel_rdsbuf:
1659 kfree(fmdev->rx.rds.buff);
1660rel_dev:
1661 kfree(fmdev);
1662
1663 return ret;
1664}
1665
1666
1667static void __exit fm_drv_exit(void)
1668{
1669 struct fmdev *fmdev = NULL;
1670
1671 fmdev = fm_v4l2_deinit_video_device();
1672 if (fmdev != NULL) {
1673 kfree(fmdev->rx.rds.buff);
1674 kfree(fmdev);
1675 }
1676}
1677
1678module_init(fm_drv_init);
1679module_exit(fm_drv_exit);
1680
1681
1682MODULE_AUTHOR("Manjunatha Halli <manjunatha_halli@ti.com>");
1683MODULE_DESCRIPTION("FM Driver for TI's Connectivity chip. " FM_DRV_VERSION);
1684MODULE_VERSION(FM_DRV_VERSION);
1685MODULE_LICENSE("GPL");
1686