1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#define _RTL8188E_XMIT_C_
16#include <osdep_service.h>
17#include <drv_types.h>
18#include <mon.h>
19#include <wifi.h>
20#include <osdep_intf.h>
21#include <usb_ops_linux.h>
22#include <rtl8188e_hal.h>
23
24s32 rtw_hal_init_xmit_priv(struct adapter *adapt)
25{
26 struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
27
28 tasklet_init(&pxmitpriv->xmit_tasklet,
29 (void(*)(unsigned long))rtl8188eu_xmit_tasklet,
30 (unsigned long)adapt);
31 return _SUCCESS;
32}
33
34static u8 urb_zero_packet_chk(struct adapter *adapt, int sz)
35{
36 return !((sz + TXDESC_SIZE) % adapt->HalData->UsbBulkOutSize);
37}
38
39static void rtl8188eu_cal_txdesc_chksum(struct tx_desc *ptxdesc)
40{
41 u16 *usptr = (u16 *)ptxdesc;
42 u32 count = 16;
43 u32 index;
44 u16 checksum = 0;
45
46
47 ptxdesc->txdw7 &= cpu_to_le32(0xffff0000);
48
49 for (index = 0; index < count; index++)
50 checksum = checksum ^ le16_to_cpu(*(__le16 *)(usptr + index));
51 ptxdesc->txdw7 |= cpu_to_le32(0x0000ffff & checksum);
52}
53
54
55
56
57void rtl8188e_fill_fake_txdesc(struct adapter *adapt, u8 *desc, u32 BufferLen, u8 ispspoll, u8 is_btqosnull)
58{
59 struct tx_desc *ptxdesc;
60
61
62 ptxdesc = (struct tx_desc *)desc;
63 memset(desc, 0, TXDESC_SIZE);
64
65
66 ptxdesc->txdw0 |= cpu_to_le32(OWN | FSG | LSG);
67
68 ptxdesc->txdw0 |= cpu_to_le32(((TXDESC_SIZE+OFFSET_SZ)<<OFFSET_SHT)&0x00ff0000);
69
70 ptxdesc->txdw0 |= cpu_to_le32(BufferLen&0x0000ffff);
71
72
73 ptxdesc->txdw1 |= cpu_to_le32((QSLT_MGNT<<QSEL_SHT)&0x00001f00);
74
75
76 if (ispspoll) {
77 ptxdesc->txdw1 |= cpu_to_le32(NAVUSEHDR);
78 } else {
79 ptxdesc->txdw4 |= cpu_to_le32(BIT(7));
80 ptxdesc->txdw3 |= cpu_to_le32((8 << 28));
81 }
82
83 if (is_btqosnull)
84 ptxdesc->txdw2 |= cpu_to_le32(BIT(23));
85
86
87 ptxdesc->txdw4 |= cpu_to_le32(BIT(8));
88
89
90
91 rtl8188eu_cal_txdesc_chksum(ptxdesc);
92}
93
94static void fill_txdesc_sectype(struct pkt_attrib *pattrib, struct tx_desc *ptxdesc)
95{
96 if ((pattrib->encrypt > 0) && !pattrib->bswenc) {
97 switch (pattrib->encrypt) {
98
99 case _WEP40_:
100 case _WEP104_:
101 ptxdesc->txdw1 |= cpu_to_le32((0x01<<SEC_TYPE_SHT)&0x00c00000);
102 ptxdesc->txdw2 |= cpu_to_le32(0x7 << AMPDU_DENSITY_SHT);
103 break;
104 case _TKIP_:
105 case _TKIP_WTMIC_:
106 ptxdesc->txdw1 |= cpu_to_le32((0x01<<SEC_TYPE_SHT)&0x00c00000);
107 ptxdesc->txdw2 |= cpu_to_le32(0x7 << AMPDU_DENSITY_SHT);
108 break;
109 case _AES_:
110 ptxdesc->txdw1 |= cpu_to_le32((0x03<<SEC_TYPE_SHT)&0x00c00000);
111 ptxdesc->txdw2 |= cpu_to_le32(0x7 << AMPDU_DENSITY_SHT);
112 break;
113 case _NO_PRIVACY_:
114 default:
115 break;
116 }
117 }
118}
119
120static void fill_txdesc_vcs(struct pkt_attrib *pattrib, __le32 *pdw)
121{
122 switch (pattrib->vcs_mode) {
123 case RTS_CTS:
124 *pdw |= cpu_to_le32(RTS_EN);
125 break;
126 case CTS_TO_SELF:
127 *pdw |= cpu_to_le32(CTS_2_SELF);
128 break;
129 case NONE_VCS:
130 default:
131 break;
132 }
133 if (pattrib->vcs_mode) {
134 *pdw |= cpu_to_le32(HW_RTS_EN);
135
136 if (pattrib->ht_en) {
137 *pdw |= (pattrib->bwmode&HT_CHANNEL_WIDTH_40) ? cpu_to_le32(BIT(27)) : 0;
138
139 if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
140 *pdw |= cpu_to_le32((0x01 << 28) & 0x30000000);
141 else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_UPPER)
142 *pdw |= cpu_to_le32((0x02 << 28) & 0x30000000);
143 else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_DONT_CARE)
144 *pdw |= 0;
145 else
146 *pdw |= cpu_to_le32((0x03 << 28) & 0x30000000);
147 }
148 }
149}
150
151static void fill_txdesc_phy(struct pkt_attrib *pattrib, __le32 *pdw)
152{
153 if (pattrib->ht_en) {
154 *pdw |= (pattrib->bwmode&HT_CHANNEL_WIDTH_40) ? cpu_to_le32(BIT(25)) : 0;
155
156 if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_LOWER)
157 *pdw |= cpu_to_le32((0x01 << DATA_SC_SHT) & 0x003f0000);
158 else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_UPPER)
159 *pdw |= cpu_to_le32((0x02 << DATA_SC_SHT) & 0x003f0000);
160 else if (pattrib->ch_offset == HAL_PRIME_CHNL_OFFSET_DONT_CARE)
161 *pdw |= 0;
162 else
163 *pdw |= cpu_to_le32((0x03 << DATA_SC_SHT) & 0x003f0000);
164 }
165}
166
167static s32 update_txdesc(struct xmit_frame *pxmitframe, u8 *pmem, s32 sz, u8 bagg_pkt)
168{
169 int pull = 0;
170 uint qsel;
171 u8 data_rate, pwr_status, offset;
172 struct adapter *adapt = pxmitframe->padapter;
173 struct pkt_attrib *pattrib = &pxmitframe->attrib;
174 struct odm_dm_struct *odmpriv = &adapt->HalData->odmpriv;
175 struct tx_desc *ptxdesc = (struct tx_desc *)pmem;
176 struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
177 struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
178 int bmcst = IS_MCAST(pattrib->ra);
179
180 if (adapt->registrypriv.mp_mode == 0) {
181 if ((!bagg_pkt) && (urb_zero_packet_chk(adapt, sz) == 0)) {
182 ptxdesc = (struct tx_desc *)(pmem+PACKET_OFFSET_SZ);
183 pull = 1;
184 }
185 }
186
187 memset(ptxdesc, 0, sizeof(struct tx_desc));
188
189
190 ptxdesc->txdw0 |= cpu_to_le32(OWN | FSG | LSG);
191 ptxdesc->txdw0 |= cpu_to_le32(sz & 0x0000ffff);
192
193 offset = TXDESC_SIZE + OFFSET_SZ;
194
195 ptxdesc->txdw0 |= cpu_to_le32(((offset) << OFFSET_SHT) & 0x00ff0000);
196
197 if (bmcst)
198 ptxdesc->txdw0 |= cpu_to_le32(BMC);
199
200 if (adapt->registrypriv.mp_mode == 0) {
201 if (!bagg_pkt) {
202 if ((pull) && (pxmitframe->pkt_offset > 0))
203 pxmitframe->pkt_offset = pxmitframe->pkt_offset - 1;
204 }
205 }
206
207
208 if (pxmitframe->pkt_offset > 0)
209 ptxdesc->txdw1 |= cpu_to_le32((pxmitframe->pkt_offset << 26) & 0x7c000000);
210
211
212 ptxdesc->txdw4 |= cpu_to_le32(USERATE);
213
214 if ((pxmitframe->frame_tag & 0x0f) == DATA_FRAMETAG) {
215
216 ptxdesc->txdw1 |= cpu_to_le32(pattrib->mac_id & 0x3F);
217
218 qsel = (uint)(pattrib->qsel & 0x0000001f);
219 ptxdesc->txdw1 |= cpu_to_le32((qsel << QSEL_SHT) & 0x00001f00);
220
221 ptxdesc->txdw1 |= cpu_to_le32((pattrib->raid << RATE_ID_SHT) & 0x000F0000);
222
223 fill_txdesc_sectype(pattrib, ptxdesc);
224
225 if (pattrib->ampdu_en) {
226 ptxdesc->txdw2 |= cpu_to_le32(AGG_EN);
227 ptxdesc->txdw6 = cpu_to_le32(0x6666f800);
228 } else {
229 ptxdesc->txdw2 |= cpu_to_le32(AGG_BK);
230 }
231
232
233
234
235 ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum << SEQ_SHT) & 0x0FFF0000);
236
237
238 if (pattrib->qos_en)
239 ptxdesc->txdw4 |= cpu_to_le32(QOS);
240
241
242 if (pxmitframe->agg_num > 1)
243 ptxdesc->txdw5 |= cpu_to_le32((pxmitframe->agg_num << USB_TXAGG_NUM_SHT) & 0xFF000000);
244
245 if ((pattrib->ether_type != 0x888e) &&
246 (pattrib->ether_type != 0x0806) &&
247 (pattrib->ether_type != 0x88b4) &&
248 (pattrib->dhcp_pkt != 1)) {
249
250
251 fill_txdesc_vcs(pattrib, &ptxdesc->txdw4);
252 fill_txdesc_phy(pattrib, &ptxdesc->txdw4);
253
254 ptxdesc->txdw4 |= cpu_to_le32(0x00000008);
255 ptxdesc->txdw5 |= cpu_to_le32(0x0001ff00);
256
257 if (pattrib->ht_en) {
258 if (ODM_RA_GetShortGI_8188E(odmpriv, pattrib->mac_id))
259 ptxdesc->txdw5 |= cpu_to_le32(SGI);
260 }
261 data_rate = ODM_RA_GetDecisionRate_8188E(odmpriv, pattrib->mac_id);
262 ptxdesc->txdw5 |= cpu_to_le32(data_rate & 0x3F);
263 pwr_status = ODM_RA_GetHwPwrStatus_8188E(odmpriv, pattrib->mac_id);
264 ptxdesc->txdw4 |= cpu_to_le32((pwr_status & 0x7) << PWR_STATUS_SHT);
265 } else {
266
267
268
269 ptxdesc->txdw2 |= cpu_to_le32(AGG_BK);
270 if (pmlmeinfo->preamble_mode == PREAMBLE_SHORT)
271 ptxdesc->txdw4 |= cpu_to_le32(BIT(24));
272 ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate));
273 }
274 } else if ((pxmitframe->frame_tag&0x0f) == MGNT_FRAMETAG) {
275
276 ptxdesc->txdw1 |= cpu_to_le32(pattrib->mac_id & 0x3f);
277
278 qsel = (uint)(pattrib->qsel&0x0000001f);
279 ptxdesc->txdw1 |= cpu_to_le32((qsel << QSEL_SHT) & 0x00001f00);
280
281 ptxdesc->txdw1 |= cpu_to_le32((pattrib->raid << RATE_ID_SHT) & 0x000f0000);
282
283
284
285 if (pxmitframe->ack_report)
286 ptxdesc->txdw2 |= cpu_to_le32(BIT(19));
287
288
289 ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum<<SEQ_SHT)&0x0FFF0000);
290
291
292 ptxdesc->txdw5 |= cpu_to_le32(RTY_LMT_EN);
293 if (pattrib->retry_ctrl)
294 ptxdesc->txdw5 |= cpu_to_le32(0x00180000);
295 else
296 ptxdesc->txdw5 |= cpu_to_le32(0x00300000);
297
298 ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate));
299 } else if ((pxmitframe->frame_tag&0x0f) == TXAGG_FRAMETAG) {
300 DBG_88E("pxmitframe->frame_tag == TXAGG_FRAMETAG\n");
301 } else {
302 DBG_88E("pxmitframe->frame_tag = %d\n", pxmitframe->frame_tag);
303
304
305 ptxdesc->txdw1 |= cpu_to_le32((4) & 0x3f);
306
307 ptxdesc->txdw1 |= cpu_to_le32((6 << RATE_ID_SHT) & 0x000f0000);
308
309
310
311
312 ptxdesc->txdw3 |= cpu_to_le32((pattrib->seqnum<<SEQ_SHT)&0x0fff0000);
313
314
315 ptxdesc->txdw5 |= cpu_to_le32(MRateToHwRate(pmlmeext->tx_rate));
316 }
317
318
319
320
321
322
323
324
325
326 if (!pattrib->qos_en) {
327 ptxdesc->txdw3 |= cpu_to_le32(EN_HWSEQ);
328 ptxdesc->txdw4 |= cpu_to_le32(HW_SSN);
329 }
330
331 rtl88eu_dm_set_tx_ant_by_tx_info(odmpriv, pmem, pattrib->mac_id);
332
333 rtl8188eu_cal_txdesc_chksum(ptxdesc);
334 _dbg_dump_tx_info(adapt, pxmitframe->frame_tag, ptxdesc);
335 return pull;
336}
337
338
339static s32 rtw_dump_xframe(struct adapter *adapt, struct xmit_frame *pxmitframe)
340{
341 s32 ret = _SUCCESS;
342 s32 inner_ret = _SUCCESS;
343 int t, sz, w_sz, pull = 0;
344 u8 *mem_addr;
345 u32 ff_hwaddr;
346 struct xmit_buf *pxmitbuf = pxmitframe->pxmitbuf;
347 struct pkt_attrib *pattrib = &pxmitframe->attrib;
348 struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
349 struct security_priv *psecuritypriv = &adapt->securitypriv;
350 if ((pxmitframe->frame_tag == DATA_FRAMETAG) &&
351 (pxmitframe->attrib.ether_type != 0x0806) &&
352 (pxmitframe->attrib.ether_type != 0x888e) &&
353 (pxmitframe->attrib.ether_type != 0x88b4) &&
354 (pxmitframe->attrib.dhcp_pkt != 1))
355 rtw_issue_addbareq_cmd(adapt, pxmitframe);
356 mem_addr = pxmitframe->buf_addr;
357
358 RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_dump_xframe()\n"));
359
360 for (t = 0; t < pattrib->nr_frags; t++) {
361 if (inner_ret != _SUCCESS && ret == _SUCCESS)
362 ret = _FAIL;
363
364 if (t != (pattrib->nr_frags - 1)) {
365 RT_TRACE(_module_rtl871x_xmit_c_, _drv_err_, ("pattrib->nr_frags=%d\n", pattrib->nr_frags));
366
367 sz = pxmitpriv->frag_len;
368 sz = sz - 4 - (psecuritypriv->sw_encrypt ? 0 : pattrib->icv_len);
369 } else {
370
371 sz = pattrib->last_txcmdsz;
372 }
373
374 pull = update_txdesc(pxmitframe, mem_addr, sz, false);
375
376 if (pull) {
377 mem_addr += PACKET_OFFSET_SZ;
378 pxmitframe->buf_addr = mem_addr;
379 w_sz = sz + TXDESC_SIZE;
380 } else {
381 w_sz = sz + TXDESC_SIZE + PACKET_OFFSET_SZ;
382 }
383 ff_hwaddr = rtw_get_ff_hwaddr(pxmitframe);
384
385 inner_ret = usb_write_port(adapt, ff_hwaddr, w_sz, pxmitbuf);
386
387 rtw_count_tx_stats(adapt, pxmitframe, sz);
388
389 RT_TRACE(_module_rtl871x_xmit_c_, _drv_info_, ("rtw_write_port, w_sz=%d\n", w_sz));
390
391 mem_addr += w_sz;
392
393 mem_addr = (u8 *)round_up((size_t)mem_addr, 4);
394 }
395
396 rtw_free_xmitframe(pxmitpriv, pxmitframe);
397
398 if (ret != _SUCCESS)
399 rtw_sctx_done_err(&pxmitbuf->sctx, RTW_SCTX_DONE_UNKNOWN);
400
401 return ret;
402}
403
404static u32 xmitframe_need_length(struct xmit_frame *pxmitframe)
405{
406 struct pkt_attrib *pattrib = &pxmitframe->attrib;
407
408 u32 len;
409
410
411 len = pattrib->hdrlen + pattrib->iv_len +
412 SNAP_SIZE + sizeof(u16) +
413 pattrib->pktlen +
414 ((pattrib->bswenc) ? pattrib->icv_len : 0);
415
416 if (pattrib->encrypt == _TKIP_)
417 len += 8;
418
419 return len;
420}
421
422s32 rtl8188eu_xmitframe_complete(struct adapter *adapt, struct xmit_priv *pxmitpriv)
423{
424 struct xmit_frame *pxmitframe = NULL;
425 struct xmit_frame *pfirstframe = NULL;
426 struct xmit_buf *pxmitbuf;
427
428
429 struct hw_xmit *phwxmit;
430 struct sta_info *psta = NULL;
431 struct tx_servq *ptxservq = NULL;
432
433 struct list_head *xmitframe_plist = NULL, *xmitframe_phead = NULL;
434
435 u32 pbuf;
436 u32 pbuf_tail;
437 u32 len;
438
439 u32 bulksize = adapt->HalData->UsbBulkOutSize;
440 u8 desc_cnt;
441 u32 bulkptr;
442
443
444 u32 ff_hwaddr;
445
446 RT_TRACE(_module_rtl8192c_xmit_c_, _drv_info_, ("+xmitframe_complete\n"));
447
448 pxmitbuf = rtw_alloc_xmitbuf(pxmitpriv);
449 if (pxmitbuf == NULL)
450 return false;
451
452
453 rtw_free_xmitframe(pxmitpriv, pxmitframe);
454
455 pxmitframe = rtw_dequeue_xframe(pxmitpriv, pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
456 if (pxmitframe == NULL) {
457
458 rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
459 return false;
460 }
461
462 pxmitframe->pxmitbuf = pxmitbuf;
463 pxmitframe->buf_addr = pxmitbuf->pbuf;
464 pxmitbuf->priv_data = pxmitframe;
465
466 pxmitframe->agg_num = 1;
467 pxmitframe->pkt_offset = 1;
468
469 rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
470
471
472 rtw_os_xmit_complete(adapt, pxmitframe);
473
474
475 pfirstframe = pxmitframe;
476 len = xmitframe_need_length(pfirstframe) + TXDESC_SIZE + (pfirstframe->pkt_offset*PACKET_OFFSET_SZ);
477 pbuf_tail = len;
478 pbuf = round_up(pbuf_tail, 8);
479
480
481 desc_cnt = 0;
482 bulkptr = bulksize;
483 if (pbuf < bulkptr) {
484 desc_cnt++;
485 } else {
486 desc_cnt = 0;
487 bulkptr = ((pbuf / bulksize) + 1) * bulksize;
488 }
489
490
491 psta = pfirstframe->attrib.psta;
492 switch (pfirstframe->attrib.priority) {
493 case 1:
494 case 2:
495 ptxservq = &(psta->sta_xmitpriv.bk_q);
496 phwxmit = pxmitpriv->hwxmits + 3;
497 break;
498 case 4:
499 case 5:
500 ptxservq = &(psta->sta_xmitpriv.vi_q);
501 phwxmit = pxmitpriv->hwxmits + 1;
502 break;
503 case 6:
504 case 7:
505 ptxservq = &(psta->sta_xmitpriv.vo_q);
506 phwxmit = pxmitpriv->hwxmits;
507 break;
508 case 0:
509 case 3:
510 default:
511 ptxservq = &(psta->sta_xmitpriv.be_q);
512 phwxmit = pxmitpriv->hwxmits + 2;
513 break;
514 }
515 spin_lock_bh(&pxmitpriv->lock);
516
517 xmitframe_phead = get_list_head(&ptxservq->sta_pending);
518 xmitframe_plist = xmitframe_phead->next;
519
520 while (xmitframe_phead != xmitframe_plist) {
521 pxmitframe = container_of(xmitframe_plist, struct xmit_frame, list);
522 xmitframe_plist = xmitframe_plist->next;
523
524 pxmitframe->agg_num = 0;
525 pxmitframe->pkt_offset = 0;
526
527 len = xmitframe_need_length(pxmitframe) + TXDESC_SIZE + (pxmitframe->pkt_offset*PACKET_OFFSET_SZ);
528
529 if (round_up(pbuf + len, 8) > MAX_XMITBUF_SZ) {
530 pxmitframe->agg_num = 1;
531 pxmitframe->pkt_offset = 1;
532 break;
533 }
534 list_del_init(&pxmitframe->list);
535 ptxservq->qcnt--;
536 phwxmit->accnt--;
537
538 pxmitframe->buf_addr = pxmitbuf->pbuf + pbuf;
539
540 rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
541
542 rtw_os_xmit_complete(adapt, pxmitframe);
543
544
545 update_txdesc(pxmitframe, pxmitframe->buf_addr, pxmitframe->attrib.last_txcmdsz, true);
546
547
548 rtw_free_xmitframe(pxmitpriv, pxmitframe);
549
550
551 pbuf_tail = pbuf + len;
552 pbuf = round_up(pbuf_tail, 8);
553
554 pfirstframe->agg_num++;
555 if (MAX_TX_AGG_PACKET_NUMBER == pfirstframe->agg_num)
556 break;
557
558 if (pbuf < bulkptr) {
559 desc_cnt++;
560 if (desc_cnt == adapt->HalData->UsbTxAggDescNum)
561 break;
562 } else {
563 desc_cnt = 0;
564 bulkptr = ((pbuf / bulksize) + 1) * bulksize;
565 }
566 }
567
568 if (list_empty(&ptxservq->sta_pending.queue))
569 list_del_init(&ptxservq->tx_pending);
570
571 spin_unlock_bh(&pxmitpriv->lock);
572 if ((pfirstframe->attrib.ether_type != 0x0806) &&
573 (pfirstframe->attrib.ether_type != 0x888e) &&
574 (pfirstframe->attrib.ether_type != 0x88b4) &&
575 (pfirstframe->attrib.dhcp_pkt != 1))
576 rtw_issue_addbareq_cmd(adapt, pfirstframe);
577
578 if ((pbuf_tail % bulksize) == 0) {
579
580 pbuf_tail -= PACKET_OFFSET_SZ;
581 pfirstframe->buf_addr += PACKET_OFFSET_SZ;
582 pfirstframe->pkt_offset--;
583 }
584
585 update_txdesc(pfirstframe, pfirstframe->buf_addr, pfirstframe->attrib.last_txcmdsz, true);
586
587
588 ff_hwaddr = rtw_get_ff_hwaddr(pfirstframe);
589 usb_write_port(adapt, ff_hwaddr, pbuf_tail, pxmitbuf);
590
591
592 pbuf_tail -= (pfirstframe->agg_num * TXDESC_SIZE);
593 pbuf_tail -= (pfirstframe->pkt_offset * PACKET_OFFSET_SZ);
594
595 rtw_count_tx_stats(adapt, pfirstframe, pbuf_tail);
596
597 rtw_free_xmitframe(pxmitpriv, pfirstframe);
598
599 return true;
600}
601
602
603
604
605
606
607s32 rtw_hal_xmit(struct adapter *adapt, struct xmit_frame *pxmitframe)
608{
609 s32 res;
610 struct xmit_buf *pxmitbuf = NULL;
611 struct xmit_priv *pxmitpriv = &adapt->xmitpriv;
612 struct pkt_attrib *pattrib = &pxmitframe->attrib;
613 struct mlme_priv *pmlmepriv = &adapt->mlmepriv;
614
615 spin_lock_bh(&pxmitpriv->lock);
616
617 if (rtw_txframes_sta_ac_pending(adapt, pattrib) > 0)
618 goto enqueue;
619
620 if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY|_FW_UNDER_LINKING) == true)
621 goto enqueue;
622
623 pxmitbuf = rtw_alloc_xmitbuf(pxmitpriv);
624 if (!pxmitbuf)
625 goto enqueue;
626
627 spin_unlock_bh(&pxmitpriv->lock);
628
629 pxmitframe->pxmitbuf = pxmitbuf;
630 pxmitframe->buf_addr = pxmitbuf->pbuf;
631 pxmitbuf->priv_data = pxmitframe;
632
633 res = rtw_xmitframe_coalesce(adapt, pxmitframe->pkt, pxmitframe);
634
635 if (res == _SUCCESS) {
636 rtw_dump_xframe(adapt, pxmitframe);
637 } else {
638 DBG_88E("==> %s xmitframe_coalsece failed\n", __func__);
639 rtw_free_xmitbuf(pxmitpriv, pxmitbuf);
640 rtw_free_xmitframe(pxmitpriv, pxmitframe);
641 }
642
643 return true;
644
645enqueue:
646 res = rtw_xmitframe_enqueue(adapt, pxmitframe);
647 spin_unlock_bh(&pxmitpriv->lock);
648
649 if (res != _SUCCESS) {
650 RT_TRACE(_module_xmit_osdep_c_, _drv_err_, ("pre_xmitframe: enqueue xmitframe fail\n"));
651 rtw_free_xmitframe(pxmitpriv, pxmitframe);
652
653
654 pxmitpriv->tx_pkts--;
655 pxmitpriv->tx_drop++;
656 return true;
657 }
658
659 return false;
660}
661
662s32 rtw_hal_mgnt_xmit(struct adapter *adapt, struct xmit_frame *pmgntframe)
663{
664 struct xmit_priv *xmitpriv = &adapt->xmitpriv;
665
666 rtl88eu_mon_xmit_hook(adapt->pmondev, pmgntframe, xmitpriv->frag_len);
667 return rtw_dump_xframe(adapt, pmgntframe);
668}
669