root/drivers/net/wireless/mediatek/mt76/tx.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mt76_alloc_txwi
  2. __mt76_get_txwi
  3. mt76_get_txwi
  4. mt76_put_txwi
  5. mt76_tx_free
  6. mt76_txq_get_qid
  7. mt76_check_agg_ssn
  8. mt76_tx_status_lock
  9. mt76_tx_status_unlock
  10. __mt76_tx_status_skb_done
  11. mt76_tx_status_skb_done
  12. mt76_tx_status_skb_add
  13. mt76_tx_status_skb_get
  14. mt76_tx_status_check
  15. mt76_tx_complete_skb
  16. mt76_tx
  17. mt76_txq_dequeue
  18. mt76_queue_ps_skb
  19. mt76_release_buffered_frames
  20. mt76_txq_send_burst
  21. mt76_txq_schedule_list
  22. mt76_txq_schedule
  23. mt76_txq_schedule_all
  24. mt76_tx_tasklet
  25. mt76_stop_tx_queues
  26. mt76_wake_tx_queue
  27. mt76_txq_remove
  28. mt76_txq_init
  29. mt76_ac_to_hwq

   1 // SPDX-License-Identifier: ISC
   2 /*
   3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
   4  */
   5 
   6 #include "mt76.h"
   7 
   8 static struct mt76_txwi_cache *
   9 mt76_alloc_txwi(struct mt76_dev *dev)
  10 {
  11         struct mt76_txwi_cache *t;
  12         dma_addr_t addr;
  13         u8 *txwi;
  14         int size;
  15 
  16         size = L1_CACHE_ALIGN(dev->drv->txwi_size + sizeof(*t));
  17         txwi = devm_kzalloc(dev->dev, size, GFP_ATOMIC);
  18         if (!txwi)
  19                 return NULL;
  20 
  21         addr = dma_map_single(dev->dev, txwi, dev->drv->txwi_size,
  22                               DMA_TO_DEVICE);
  23         t = (struct mt76_txwi_cache *)(txwi + dev->drv->txwi_size);
  24         t->dma_addr = addr;
  25 
  26         return t;
  27 }
  28 
  29 static struct mt76_txwi_cache *
  30 __mt76_get_txwi(struct mt76_dev *dev)
  31 {
  32         struct mt76_txwi_cache *t = NULL;
  33 
  34         spin_lock_bh(&dev->lock);
  35         if (!list_empty(&dev->txwi_cache)) {
  36                 t = list_first_entry(&dev->txwi_cache, struct mt76_txwi_cache,
  37                                      list);
  38                 list_del(&t->list);
  39         }
  40         spin_unlock_bh(&dev->lock);
  41 
  42         return t;
  43 }
  44 
  45 struct mt76_txwi_cache *
  46 mt76_get_txwi(struct mt76_dev *dev)
  47 {
  48         struct mt76_txwi_cache *t = __mt76_get_txwi(dev);
  49 
  50         if (t)
  51                 return t;
  52 
  53         return mt76_alloc_txwi(dev);
  54 }
  55 
  56 void
  57 mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t)
  58 {
  59         if (!t)
  60                 return;
  61 
  62         spin_lock_bh(&dev->lock);
  63         list_add(&t->list, &dev->txwi_cache);
  64         spin_unlock_bh(&dev->lock);
  65 }
  66 EXPORT_SYMBOL_GPL(mt76_put_txwi);
  67 
  68 void mt76_tx_free(struct mt76_dev *dev)
  69 {
  70         struct mt76_txwi_cache *t;
  71 
  72         while ((t = __mt76_get_txwi(dev)) != NULL)
  73                 dma_unmap_single(dev->dev, t->dma_addr, dev->drv->txwi_size,
  74                                  DMA_TO_DEVICE);
  75 }
  76 
  77 static int
  78 mt76_txq_get_qid(struct ieee80211_txq *txq)
  79 {
  80         if (!txq->sta)
  81                 return MT_TXQ_BE;
  82 
  83         return txq->ac;
  84 }
  85 
  86 static void
  87 mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb)
  88 {
  89         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
  90 
  91         if (!ieee80211_is_data_qos(hdr->frame_control) ||
  92             !ieee80211_is_data_present(hdr->frame_control))
  93                 return;
  94 
  95         mtxq->agg_ssn = le16_to_cpu(hdr->seq_ctrl) + 0x10;
  96 }
  97 
  98 void
  99 mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
 100                    __acquires(&dev->status_list.lock)
 101 {
 102         __skb_queue_head_init(list);
 103         spin_lock_bh(&dev->status_list.lock);
 104         __acquire(&dev->status_list.lock);
 105 }
 106 EXPORT_SYMBOL_GPL(mt76_tx_status_lock);
 107 
 108 void
 109 mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
 110                       __releases(&dev->status_list.unlock)
 111 {
 112         struct sk_buff *skb;
 113 
 114         spin_unlock_bh(&dev->status_list.lock);
 115         __release(&dev->status_list.unlock);
 116 
 117         while ((skb = __skb_dequeue(list)) != NULL)
 118                 ieee80211_tx_status(dev->hw, skb);
 119 }
 120 EXPORT_SYMBOL_GPL(mt76_tx_status_unlock);
 121 
 122 static void
 123 __mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags,
 124                           struct sk_buff_head *list)
 125 {
 126         struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 127         struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
 128         u8 done = MT_TX_CB_DMA_DONE | MT_TX_CB_TXS_DONE;
 129 
 130         flags |= cb->flags;
 131         cb->flags = flags;
 132 
 133         if ((flags & done) != done)
 134                 return;
 135 
 136         __skb_unlink(skb, &dev->status_list);
 137 
 138         /* Tx status can be unreliable. if it fails, mark the frame as ACKed */
 139         if (flags & MT_TX_CB_TXS_FAILED) {
 140                 ieee80211_tx_info_clear_status(info);
 141                 info->status.rates[0].idx = -1;
 142                 info->flags |= IEEE80211_TX_STAT_ACK;
 143         }
 144 
 145         __skb_queue_tail(list, skb);
 146 }
 147 
 148 void
 149 mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
 150                         struct sk_buff_head *list)
 151 {
 152         __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list);
 153 }
 154 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done);
 155 
 156 int
 157 mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
 158                        struct sk_buff *skb)
 159 {
 160         struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 161         struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
 162         int pid;
 163 
 164         if (!wcid)
 165                 return MT_PACKET_ID_NO_ACK;
 166 
 167         if (info->flags & IEEE80211_TX_CTL_NO_ACK)
 168                 return MT_PACKET_ID_NO_ACK;
 169 
 170         if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS |
 171                              IEEE80211_TX_CTL_RATE_CTRL_PROBE)))
 172                 return MT_PACKET_ID_NO_SKB;
 173 
 174         spin_lock_bh(&dev->status_list.lock);
 175 
 176         memset(cb, 0, sizeof(*cb));
 177         wcid->packet_id = (wcid->packet_id + 1) & MT_PACKET_ID_MASK;
 178         if (wcid->packet_id == MT_PACKET_ID_NO_ACK ||
 179             wcid->packet_id == MT_PACKET_ID_NO_SKB)
 180                 wcid->packet_id = MT_PACKET_ID_FIRST;
 181 
 182         pid = wcid->packet_id;
 183         cb->wcid = wcid->idx;
 184         cb->pktid = pid;
 185         cb->jiffies = jiffies;
 186 
 187         __skb_queue_tail(&dev->status_list, skb);
 188         spin_unlock_bh(&dev->status_list.lock);
 189 
 190         return pid;
 191 }
 192 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add);
 193 
 194 struct sk_buff *
 195 mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid,
 196                        struct sk_buff_head *list)
 197 {
 198         struct sk_buff *skb, *tmp;
 199 
 200         skb_queue_walk_safe(&dev->status_list, skb, tmp) {
 201                 struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb);
 202 
 203                 if (wcid && cb->wcid != wcid->idx)
 204                         continue;
 205 
 206                 if (cb->pktid == pktid)
 207                         return skb;
 208 
 209                 if (pktid >= 0 && !time_after(jiffies, cb->jiffies +
 210                                               MT_TX_STATUS_SKB_TIMEOUT))
 211                         continue;
 212 
 213                 __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED |
 214                                                     MT_TX_CB_TXS_DONE, list);
 215         }
 216 
 217         return NULL;
 218 }
 219 EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get);
 220 
 221 void
 222 mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, bool flush)
 223 {
 224         struct sk_buff_head list;
 225 
 226         mt76_tx_status_lock(dev, &list);
 227         mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list);
 228         mt76_tx_status_unlock(dev, &list);
 229 }
 230 EXPORT_SYMBOL_GPL(mt76_tx_status_check);
 231 
 232 void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb)
 233 {
 234         struct sk_buff_head list;
 235 
 236         if (!skb->prev) {
 237                 ieee80211_free_txskb(dev->hw, skb);
 238                 return;
 239         }
 240 
 241         mt76_tx_status_lock(dev, &list);
 242         __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list);
 243         mt76_tx_status_unlock(dev, &list);
 244 }
 245 EXPORT_SYMBOL_GPL(mt76_tx_complete_skb);
 246 
 247 void
 248 mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
 249         struct mt76_wcid *wcid, struct sk_buff *skb)
 250 {
 251         struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 252         struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
 253         struct mt76_queue *q;
 254         int qid = skb_get_queue_mapping(skb);
 255 
 256         if (WARN_ON(qid >= MT_TXQ_PSD)) {
 257                 qid = MT_TXQ_BE;
 258                 skb_set_queue_mapping(skb, qid);
 259         }
 260 
 261         if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
 262                 ieee80211_get_tx_rates(info->control.vif, sta, skb,
 263                                        info->control.rates, 1);
 264 
 265         if (sta && ieee80211_is_data_qos(hdr->frame_control)) {
 266                 struct ieee80211_txq *txq;
 267                 struct mt76_txq *mtxq;
 268                 u8 tid;
 269 
 270                 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
 271                 txq = sta->txq[tid];
 272                 mtxq = (struct mt76_txq *)txq->drv_priv;
 273 
 274                 if (mtxq->aggr)
 275                         mt76_check_agg_ssn(mtxq, skb);
 276         }
 277 
 278         q = dev->q_tx[qid].q;
 279 
 280         spin_lock_bh(&q->lock);
 281         dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, sta);
 282         dev->queue_ops->kick(dev, q);
 283 
 284         if (q->queued > q->ndesc - 8 && !q->stopped) {
 285                 ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb));
 286                 q->stopped = true;
 287         }
 288 
 289         spin_unlock_bh(&q->lock);
 290 }
 291 EXPORT_SYMBOL_GPL(mt76_tx);
 292 
 293 static struct sk_buff *
 294 mt76_txq_dequeue(struct mt76_dev *dev, struct mt76_txq *mtxq, bool ps)
 295 {
 296         struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
 297         struct sk_buff *skb;
 298 
 299         skb = skb_dequeue(&mtxq->retry_q);
 300         if (skb) {
 301                 u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
 302 
 303                 if (ps && skb_queue_empty(&mtxq->retry_q))
 304                         ieee80211_sta_set_buffered(txq->sta, tid, false);
 305 
 306                 return skb;
 307         }
 308 
 309         skb = ieee80211_tx_dequeue(dev->hw, txq);
 310         if (!skb)
 311                 return NULL;
 312 
 313         return skb;
 314 }
 315 
 316 static void
 317 mt76_queue_ps_skb(struct mt76_dev *dev, struct ieee80211_sta *sta,
 318                   struct sk_buff *skb, bool last)
 319 {
 320         struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv;
 321         struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 322 
 323         info->control.flags |= IEEE80211_TX_CTRL_PS_RESPONSE;
 324         if (last)
 325                 info->flags |= IEEE80211_TX_STATUS_EOSP |
 326                                IEEE80211_TX_CTL_REQ_TX_STATUS;
 327 
 328         mt76_skb_set_moredata(skb, !last);
 329         dev->queue_ops->tx_queue_skb(dev, MT_TXQ_PSD, skb, wcid, sta);
 330 }
 331 
 332 void
 333 mt76_release_buffered_frames(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
 334                              u16 tids, int nframes,
 335                              enum ieee80211_frame_release_type reason,
 336                              bool more_data)
 337 {
 338         struct mt76_dev *dev = hw->priv;
 339         struct sk_buff *last_skb = NULL;
 340         struct mt76_queue *hwq = dev->q_tx[MT_TXQ_PSD].q;
 341         int i;
 342 
 343         spin_lock_bh(&hwq->lock);
 344         for (i = 0; tids && nframes; i++, tids >>= 1) {
 345                 struct ieee80211_txq *txq = sta->txq[i];
 346                 struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv;
 347                 struct sk_buff *skb;
 348 
 349                 if (!(tids & 1))
 350                         continue;
 351 
 352                 do {
 353                         skb = mt76_txq_dequeue(dev, mtxq, true);
 354                         if (!skb)
 355                                 break;
 356 
 357                         if (mtxq->aggr)
 358                                 mt76_check_agg_ssn(mtxq, skb);
 359 
 360                         nframes--;
 361                         if (last_skb)
 362                                 mt76_queue_ps_skb(dev, sta, last_skb, false);
 363 
 364                         last_skb = skb;
 365                 } while (nframes);
 366         }
 367 
 368         if (last_skb) {
 369                 mt76_queue_ps_skb(dev, sta, last_skb, true);
 370                 dev->queue_ops->kick(dev, hwq);
 371         } else {
 372                 ieee80211_sta_eosp(sta);
 373         }
 374 
 375         spin_unlock_bh(&hwq->lock);
 376 }
 377 EXPORT_SYMBOL_GPL(mt76_release_buffered_frames);
 378 
 379 static int
 380 mt76_txq_send_burst(struct mt76_dev *dev, struct mt76_sw_queue *sq,
 381                     struct mt76_txq *mtxq, bool *empty)
 382 {
 383         struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
 384         enum mt76_txq_id qid = mt76_txq_get_qid(txq);
 385         struct mt76_wcid *wcid = mtxq->wcid;
 386         struct mt76_queue *hwq = sq->q;
 387         struct ieee80211_tx_info *info;
 388         struct sk_buff *skb;
 389         int n_frames = 1, limit;
 390         struct ieee80211_tx_rate tx_rate;
 391         bool ampdu;
 392         bool probe;
 393         int idx;
 394 
 395         if (test_bit(MT_WCID_FLAG_PS, &wcid->flags)) {
 396                 *empty = true;
 397                 return 0;
 398         }
 399 
 400         skb = mt76_txq_dequeue(dev, mtxq, false);
 401         if (!skb) {
 402                 *empty = true;
 403                 return 0;
 404         }
 405 
 406         info = IEEE80211_SKB_CB(skb);
 407         if (!(wcid->tx_info & MT_WCID_TX_INFO_SET))
 408                 ieee80211_get_tx_rates(txq->vif, txq->sta, skb,
 409                                        info->control.rates, 1);
 410         tx_rate = info->control.rates[0];
 411 
 412         probe = (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
 413         ampdu = IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_AMPDU;
 414         limit = ampdu ? 16 : 3;
 415 
 416         if (ampdu)
 417                 mt76_check_agg_ssn(mtxq, skb);
 418 
 419         idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid, txq->sta);
 420 
 421         if (idx < 0)
 422                 return idx;
 423 
 424         do {
 425                 bool cur_ampdu;
 426 
 427                 if (probe)
 428                         break;
 429 
 430                 if (test_bit(MT76_RESET, &dev->state))
 431                         return -EBUSY;
 432 
 433                 skb = mt76_txq_dequeue(dev, mtxq, false);
 434                 if (!skb) {
 435                         *empty = true;
 436                         break;
 437                 }
 438 
 439                 info = IEEE80211_SKB_CB(skb);
 440                 cur_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
 441 
 442                 if (ampdu != cur_ampdu ||
 443                     (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) {
 444                         skb_queue_tail(&mtxq->retry_q, skb);
 445                         break;
 446                 }
 447 
 448                 info->control.rates[0] = tx_rate;
 449 
 450                 if (cur_ampdu)
 451                         mt76_check_agg_ssn(mtxq, skb);
 452 
 453                 idx = dev->queue_ops->tx_queue_skb(dev, qid, skb, wcid,
 454                                                    txq->sta);
 455                 if (idx < 0)
 456                         return idx;
 457 
 458                 n_frames++;
 459         } while (n_frames < limit);
 460 
 461         if (!probe) {
 462                 hwq->entry[idx].qid = sq - dev->q_tx;
 463                 hwq->entry[idx].schedule = true;
 464                 sq->swq_queued++;
 465         }
 466 
 467         dev->queue_ops->kick(dev, hwq);
 468 
 469         return n_frames;
 470 }
 471 
 472 static int
 473 mt76_txq_schedule_list(struct mt76_dev *dev, enum mt76_txq_id qid)
 474 {
 475         struct mt76_sw_queue *sq = &dev->q_tx[qid];
 476         struct mt76_queue *hwq = sq->q;
 477         struct ieee80211_txq *txq;
 478         struct mt76_txq *mtxq;
 479         struct mt76_wcid *wcid;
 480         int ret = 0;
 481 
 482         spin_lock_bh(&hwq->lock);
 483         while (1) {
 484                 bool empty = false;
 485 
 486                 if (sq->swq_queued >= 4)
 487                         break;
 488 
 489                 if (test_bit(MT76_RESET, &dev->state)) {
 490                         ret = -EBUSY;
 491                         break;
 492                 }
 493 
 494                 txq = ieee80211_next_txq(dev->hw, qid);
 495                 if (!txq)
 496                         break;
 497 
 498                 mtxq = (struct mt76_txq *)txq->drv_priv;
 499                 wcid = mtxq->wcid;
 500                 if (wcid && test_bit(MT_WCID_FLAG_PS, &wcid->flags))
 501                         continue;
 502 
 503                 if (mtxq->send_bar && mtxq->aggr) {
 504                         struct ieee80211_txq *txq = mtxq_to_txq(mtxq);
 505                         struct ieee80211_sta *sta = txq->sta;
 506                         struct ieee80211_vif *vif = txq->vif;
 507                         u16 agg_ssn = mtxq->agg_ssn;
 508                         u8 tid = txq->tid;
 509 
 510                         mtxq->send_bar = false;
 511                         spin_unlock_bh(&hwq->lock);
 512                         ieee80211_send_bar(vif, sta->addr, tid, agg_ssn);
 513                         spin_lock_bh(&hwq->lock);
 514                 }
 515 
 516                 ret += mt76_txq_send_burst(dev, sq, mtxq, &empty);
 517                 if (skb_queue_empty(&mtxq->retry_q))
 518                         empty = true;
 519                 ieee80211_return_txq(dev->hw, txq, !empty);
 520         }
 521         spin_unlock_bh(&hwq->lock);
 522 
 523         return ret;
 524 }
 525 
 526 void mt76_txq_schedule(struct mt76_dev *dev, enum mt76_txq_id qid)
 527 {
 528         struct mt76_sw_queue *sq = &dev->q_tx[qid];
 529         int len;
 530 
 531         if (qid >= 4)
 532                 return;
 533 
 534         if (sq->swq_queued >= 4)
 535                 return;
 536 
 537         rcu_read_lock();
 538 
 539         do {
 540                 ieee80211_txq_schedule_start(dev->hw, qid);
 541                 len = mt76_txq_schedule_list(dev, qid);
 542                 ieee80211_txq_schedule_end(dev->hw, qid);
 543         } while (len > 0);
 544 
 545         rcu_read_unlock();
 546 }
 547 EXPORT_SYMBOL_GPL(mt76_txq_schedule);
 548 
 549 void mt76_txq_schedule_all(struct mt76_dev *dev)
 550 {
 551         int i;
 552 
 553         for (i = 0; i <= MT_TXQ_BK; i++)
 554                 mt76_txq_schedule(dev, i);
 555 }
 556 EXPORT_SYMBOL_GPL(mt76_txq_schedule_all);
 557 
 558 void mt76_tx_tasklet(unsigned long data)
 559 {
 560         struct mt76_dev *dev = (struct mt76_dev *)data;
 561 
 562         mt76_txq_schedule_all(dev);
 563 }
 564 
 565 void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
 566                          bool send_bar)
 567 {
 568         int i;
 569 
 570         for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
 571                 struct ieee80211_txq *txq = sta->txq[i];
 572                 struct mt76_queue *hwq;
 573                 struct mt76_txq *mtxq;
 574 
 575                 if (!txq)
 576                         continue;
 577 
 578                 mtxq = (struct mt76_txq *)txq->drv_priv;
 579                 hwq = mtxq->swq->q;
 580 
 581                 spin_lock_bh(&hwq->lock);
 582                 mtxq->send_bar = mtxq->aggr && send_bar;
 583                 spin_unlock_bh(&hwq->lock);
 584         }
 585 }
 586 EXPORT_SYMBOL_GPL(mt76_stop_tx_queues);
 587 
 588 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq)
 589 {
 590         struct mt76_dev *dev = hw->priv;
 591 
 592         if (!test_bit(MT76_STATE_RUNNING, &dev->state))
 593                 return;
 594 
 595         tasklet_schedule(&dev->tx_tasklet);
 596 }
 597 EXPORT_SYMBOL_GPL(mt76_wake_tx_queue);
 598 
 599 void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq)
 600 {
 601         struct mt76_txq *mtxq;
 602         struct sk_buff *skb;
 603 
 604         if (!txq)
 605                 return;
 606 
 607         mtxq = (struct mt76_txq *)txq->drv_priv;
 608 
 609         while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL)
 610                 ieee80211_free_txskb(dev->hw, skb);
 611 }
 612 EXPORT_SYMBOL_GPL(mt76_txq_remove);
 613 
 614 void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq)
 615 {
 616         struct mt76_txq *mtxq = (struct mt76_txq *)txq->drv_priv;
 617 
 618         skb_queue_head_init(&mtxq->retry_q);
 619 
 620         mtxq->swq = &dev->q_tx[mt76_txq_get_qid(txq)];
 621 }
 622 EXPORT_SYMBOL_GPL(mt76_txq_init);
 623 
 624 u8 mt76_ac_to_hwq(u8 ac)
 625 {
 626         static const u8 wmm_queue_map[] = {
 627                 [IEEE80211_AC_BE] = 0,
 628                 [IEEE80211_AC_BK] = 1,
 629                 [IEEE80211_AC_VI] = 2,
 630                 [IEEE80211_AC_VO] = 3,
 631         };
 632 
 633         if (WARN_ON(ac >= IEEE80211_NUM_ACS))
 634                 return 0;
 635 
 636         return wmm_queue_map[ac];
 637 }
 638 EXPORT_SYMBOL_GPL(mt76_ac_to_hwq);

/* [<][>][^][v][top][bottom][index][help] */