root/drivers/net/wireless/mediatek/mt76/mt76.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. mt76_chip
  2. mt76_rev
  3. mt76_channel_state
  4. mt76_get_txwi_ptr
  5. mt76_incr
  6. mt76_decr
  7. mtxq_to_txq
  8. wcid_to_sta
  9. mt76_tx_skb_cb
  10. mt76_insert_hdr_pad
  11. mt76_is_skb_pktid
  12. mt76_tx_status_lock
  13. q2ep
  14. mt76u_bulk_msg

   1 /* SPDX-License-Identifier: ISC */
   2 /*
   3  * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name>
   4  */
   5 
   6 #ifndef __MT76_H
   7 #define __MT76_H
   8 
   9 #include <linux/kernel.h>
  10 #include <linux/io.h>
  11 #include <linux/spinlock.h>
  12 #include <linux/skbuff.h>
  13 #include <linux/leds.h>
  14 #include <linux/usb.h>
  15 #include <linux/average.h>
  16 #include <net/mac80211.h>
  17 #include "util.h"
  18 
  19 #define MT_TX_RING_SIZE     256
  20 #define MT_MCU_RING_SIZE    32
  21 #define MT_RX_BUF_SIZE      2048
  22 #define MT_SKB_HEAD_LEN     128
  23 
  24 struct mt76_dev;
  25 struct mt76_wcid;
  26 
  27 struct mt76_reg_pair {
  28         u32 reg;
  29         u32 value;
  30 };
  31 
  32 enum mt76_bus_type {
  33         MT76_BUS_MMIO,
  34         MT76_BUS_USB,
  35 };
  36 
  37 struct mt76_bus_ops {
  38         u32 (*rr)(struct mt76_dev *dev, u32 offset);
  39         void (*wr)(struct mt76_dev *dev, u32 offset, u32 val);
  40         u32 (*rmw)(struct mt76_dev *dev, u32 offset, u32 mask, u32 val);
  41         void (*write_copy)(struct mt76_dev *dev, u32 offset, const void *data,
  42                            int len);
  43         void (*read_copy)(struct mt76_dev *dev, u32 offset, void *data,
  44                           int len);
  45         int (*wr_rp)(struct mt76_dev *dev, u32 base,
  46                      const struct mt76_reg_pair *rp, int len);
  47         int (*rd_rp)(struct mt76_dev *dev, u32 base,
  48                      struct mt76_reg_pair *rp, int len);
  49         enum mt76_bus_type type;
  50 };
  51 
  52 #define mt76_is_usb(dev) ((dev)->mt76.bus->type == MT76_BUS_USB)
  53 #define mt76_is_mmio(dev) ((dev)->mt76.bus->type == MT76_BUS_MMIO)
  54 
  55 enum mt76_txq_id {
  56         MT_TXQ_VO = IEEE80211_AC_VO,
  57         MT_TXQ_VI = IEEE80211_AC_VI,
  58         MT_TXQ_BE = IEEE80211_AC_BE,
  59         MT_TXQ_BK = IEEE80211_AC_BK,
  60         MT_TXQ_PSD,
  61         MT_TXQ_MCU,
  62         MT_TXQ_BEACON,
  63         MT_TXQ_CAB,
  64         MT_TXQ_FWDL,
  65         __MT_TXQ_MAX
  66 };
  67 
  68 enum mt76_rxq_id {
  69         MT_RXQ_MAIN,
  70         MT_RXQ_MCU,
  71         __MT_RXQ_MAX
  72 };
  73 
  74 struct mt76_queue_buf {
  75         dma_addr_t addr;
  76         int len;
  77 };
  78 
  79 struct mt76_tx_info {
  80         struct mt76_queue_buf buf[32];
  81         struct sk_buff *skb;
  82         int nbuf;
  83         u32 info;
  84 };
  85 
  86 struct mt76_queue_entry {
  87         union {
  88                 void *buf;
  89                 struct sk_buff *skb;
  90         };
  91         union {
  92                 struct mt76_txwi_cache *txwi;
  93                 struct urb *urb;
  94         };
  95         enum mt76_txq_id qid;
  96         bool skip_buf0:1;
  97         bool schedule:1;
  98         bool done:1;
  99 };
 100 
 101 struct mt76_queue_regs {
 102         u32 desc_base;
 103         u32 ring_size;
 104         u32 cpu_idx;
 105         u32 dma_idx;
 106 } __packed __aligned(4);
 107 
 108 struct mt76_queue {
 109         struct mt76_queue_regs __iomem *regs;
 110 
 111         spinlock_t lock;
 112         struct mt76_queue_entry *entry;
 113         struct mt76_desc *desc;
 114 
 115         u16 first;
 116         u16 head;
 117         u16 tail;
 118         int ndesc;
 119         int queued;
 120         int buf_size;
 121         bool stopped;
 122 
 123         u8 buf_offset;
 124         u8 hw_idx;
 125 
 126         dma_addr_t desc_dma;
 127         struct sk_buff *rx_head;
 128         struct page_frag_cache rx_page;
 129 };
 130 
 131 struct mt76_sw_queue {
 132         struct mt76_queue *q;
 133 
 134         struct list_head swq;
 135         int swq_queued;
 136 };
 137 
 138 struct mt76_mcu_ops {
 139         int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data,
 140                             int len, bool wait_resp);
 141         int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base,
 142                          const struct mt76_reg_pair *rp, int len);
 143         int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base,
 144                          struct mt76_reg_pair *rp, int len);
 145         int (*mcu_restart)(struct mt76_dev *dev);
 146 };
 147 
 148 struct mt76_queue_ops {
 149         int (*init)(struct mt76_dev *dev);
 150 
 151         int (*alloc)(struct mt76_dev *dev, struct mt76_queue *q,
 152                      int idx, int n_desc, int bufsize,
 153                      u32 ring_base);
 154 
 155         int (*add_buf)(struct mt76_dev *dev, struct mt76_queue *q,
 156                        struct mt76_queue_buf *buf, int nbufs, u32 info,
 157                        struct sk_buff *skb, void *txwi);
 158 
 159         int (*tx_queue_skb)(struct mt76_dev *dev, enum mt76_txq_id qid,
 160                             struct sk_buff *skb, struct mt76_wcid *wcid,
 161                             struct ieee80211_sta *sta);
 162 
 163         int (*tx_queue_skb_raw)(struct mt76_dev *dev, enum mt76_txq_id qid,
 164                                 struct sk_buff *skb, u32 tx_info);
 165 
 166         void *(*dequeue)(struct mt76_dev *dev, struct mt76_queue *q, bool flush,
 167                          int *len, u32 *info, bool *more);
 168 
 169         void (*rx_reset)(struct mt76_dev *dev, enum mt76_rxq_id qid);
 170 
 171         void (*tx_cleanup)(struct mt76_dev *dev, enum mt76_txq_id qid,
 172                            bool flush);
 173 
 174         void (*kick)(struct mt76_dev *dev, struct mt76_queue *q);
 175 };
 176 
 177 enum mt76_wcid_flags {
 178         MT_WCID_FLAG_CHECK_PS,
 179         MT_WCID_FLAG_PS,
 180 };
 181 
 182 #define MT76_N_WCIDS 128
 183 
 184 DECLARE_EWMA(signal, 10, 8);
 185 
 186 #define MT_WCID_TX_INFO_RATE            GENMASK(15, 0)
 187 #define MT_WCID_TX_INFO_NSS             GENMASK(17, 16)
 188 #define MT_WCID_TX_INFO_TXPWR_ADJ       GENMASK(25, 18)
 189 #define MT_WCID_TX_INFO_SET             BIT(31)
 190 
 191 struct mt76_wcid {
 192         struct mt76_rx_tid __rcu *aggr[IEEE80211_NUM_TIDS];
 193 
 194         struct work_struct aggr_work;
 195 
 196         unsigned long flags;
 197 
 198         struct ewma_signal rssi;
 199         int inactive_count;
 200 
 201         u8 idx;
 202         u8 hw_key_idx;
 203 
 204         u8 sta:1;
 205 
 206         u8 rx_check_pn;
 207         u8 rx_key_pn[IEEE80211_NUM_TIDS][6];
 208         u16 cipher;
 209 
 210         u32 tx_info;
 211         bool sw_iv;
 212 
 213         u8 packet_id;
 214 };
 215 
 216 struct mt76_txq {
 217         struct mt76_sw_queue *swq;
 218         struct mt76_wcid *wcid;
 219 
 220         struct sk_buff_head retry_q;
 221 
 222         u16 agg_ssn;
 223         bool send_bar;
 224         bool aggr;
 225 };
 226 
 227 struct mt76_txwi_cache {
 228         struct list_head list;
 229         dma_addr_t dma_addr;
 230 
 231         struct sk_buff *skb;
 232 };
 233 
 234 struct mt76_rx_tid {
 235         struct rcu_head rcu_head;
 236 
 237         struct mt76_dev *dev;
 238 
 239         spinlock_t lock;
 240         struct delayed_work reorder_work;
 241 
 242         u16 head;
 243         u8 size;
 244         u8 nframes;
 245 
 246         u8 started:1, stopped:1, timer_pending:1;
 247 
 248         struct sk_buff *reorder_buf[];
 249 };
 250 
 251 #define MT_TX_CB_DMA_DONE               BIT(0)
 252 #define MT_TX_CB_TXS_DONE               BIT(1)
 253 #define MT_TX_CB_TXS_FAILED             BIT(2)
 254 
 255 #define MT_PACKET_ID_MASK               GENMASK(6, 0)
 256 #define MT_PACKET_ID_NO_ACK             0
 257 #define MT_PACKET_ID_NO_SKB             1
 258 #define MT_PACKET_ID_FIRST              2
 259 #define MT_PACKET_ID_HAS_RATE           BIT(7)
 260 
 261 #define MT_TX_STATUS_SKB_TIMEOUT        HZ
 262 
 263 struct mt76_tx_cb {
 264         unsigned long jiffies;
 265         u8 wcid;
 266         u8 pktid;
 267         u8 flags;
 268 };
 269 
 270 enum {
 271         MT76_STATE_INITIALIZED,
 272         MT76_STATE_RUNNING,
 273         MT76_STATE_MCU_RUNNING,
 274         MT76_SCANNING,
 275         MT76_RESET,
 276         MT76_REMOVED,
 277         MT76_READING_STATS,
 278 };
 279 
 280 struct mt76_hw_cap {
 281         bool has_2ghz;
 282         bool has_5ghz;
 283 };
 284 
 285 #define MT_TXWI_NO_FREE                 BIT(0)
 286 
 287 struct mt76_driver_ops {
 288         bool tx_aligned4_skbs;
 289         u32 txwi_flags;
 290         u16 txwi_size;
 291 
 292         void (*update_survey)(struct mt76_dev *dev);
 293 
 294         int (*tx_prepare_skb)(struct mt76_dev *dev, void *txwi_ptr,
 295                               enum mt76_txq_id qid, struct mt76_wcid *wcid,
 296                               struct ieee80211_sta *sta,
 297                               struct mt76_tx_info *tx_info);
 298 
 299         void (*tx_complete_skb)(struct mt76_dev *dev, enum mt76_txq_id qid,
 300                                 struct mt76_queue_entry *e);
 301 
 302         bool (*tx_status_data)(struct mt76_dev *dev, u8 *update);
 303 
 304         void (*rx_skb)(struct mt76_dev *dev, enum mt76_rxq_id q,
 305                        struct sk_buff *skb);
 306 
 307         void (*rx_poll_complete)(struct mt76_dev *dev, enum mt76_rxq_id q);
 308 
 309         void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta,
 310                        bool ps);
 311 
 312         int (*sta_add)(struct mt76_dev *dev, struct ieee80211_vif *vif,
 313                        struct ieee80211_sta *sta);
 314 
 315         void (*sta_assoc)(struct mt76_dev *dev, struct ieee80211_vif *vif,
 316                           struct ieee80211_sta *sta);
 317 
 318         void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif,
 319                            struct ieee80211_sta *sta);
 320 };
 321 
 322 struct mt76_channel_state {
 323         u64 cc_active;
 324         u64 cc_busy;
 325 };
 326 
 327 struct mt76_sband {
 328         struct ieee80211_supported_band sband;
 329         struct mt76_channel_state *chan;
 330 };
 331 
 332 struct mt76_rate_power {
 333         union {
 334                 struct {
 335                         s8 cck[4];
 336                         s8 ofdm[8];
 337                         s8 stbc[10];
 338                         s8 ht[16];
 339                         s8 vht[10];
 340                 };
 341                 s8 all[48];
 342         };
 343 };
 344 
 345 /* addr req mask */
 346 #define MT_VEND_TYPE_EEPROM     BIT(31)
 347 #define MT_VEND_TYPE_CFG        BIT(30)
 348 #define MT_VEND_TYPE_MASK       (MT_VEND_TYPE_EEPROM | MT_VEND_TYPE_CFG)
 349 
 350 #define MT_VEND_ADDR(type, n)   (MT_VEND_TYPE_##type | (n))
 351 enum mt_vendor_req {
 352         MT_VEND_DEV_MODE =      0x1,
 353         MT_VEND_WRITE =         0x2,
 354         MT_VEND_MULTI_WRITE =   0x6,
 355         MT_VEND_MULTI_READ =    0x7,
 356         MT_VEND_READ_EEPROM =   0x9,
 357         MT_VEND_WRITE_FCE =     0x42,
 358         MT_VEND_WRITE_CFG =     0x46,
 359         MT_VEND_READ_CFG =      0x47,
 360 };
 361 
 362 enum mt76u_in_ep {
 363         MT_EP_IN_PKT_RX,
 364         MT_EP_IN_CMD_RESP,
 365         __MT_EP_IN_MAX,
 366 };
 367 
 368 enum mt76u_out_ep {
 369         MT_EP_OUT_INBAND_CMD,
 370         MT_EP_OUT_AC_BE,
 371         MT_EP_OUT_AC_BK,
 372         MT_EP_OUT_AC_VI,
 373         MT_EP_OUT_AC_VO,
 374         MT_EP_OUT_HCCA,
 375         __MT_EP_OUT_MAX,
 376 };
 377 
 378 #define MT_TX_SG_MAX_SIZE       8
 379 #define MT_RX_SG_MAX_SIZE       1
 380 #define MT_NUM_TX_ENTRIES       256
 381 #define MT_NUM_RX_ENTRIES       128
 382 #define MCU_RESP_URB_SIZE       1024
 383 struct mt76_usb {
 384         struct mutex usb_ctrl_mtx;
 385         union {
 386                 u8 data[32];
 387                 __le32 reg_val;
 388         };
 389 
 390         struct tasklet_struct rx_tasklet;
 391         struct delayed_work stat_work;
 392 
 393         u8 out_ep[__MT_EP_OUT_MAX];
 394         u8 in_ep[__MT_EP_IN_MAX];
 395         bool sg_en;
 396 
 397         struct mt76u_mcu {
 398                 struct mutex mutex;
 399                 u8 *data;
 400                 u32 msg_seq;
 401 
 402                 /* multiple reads */
 403                 struct mt76_reg_pair *rp;
 404                 int rp_len;
 405                 u32 base;
 406                 bool burst;
 407         } mcu;
 408 };
 409 
 410 struct mt76_mmio {
 411         struct mt76e_mcu {
 412                 struct mutex mutex;
 413 
 414                 wait_queue_head_t wait;
 415                 struct sk_buff_head res_q;
 416 
 417                 u32 msg_seq;
 418         } mcu;
 419         void __iomem *regs;
 420         spinlock_t irq_lock;
 421         u32 irqmask;
 422 };
 423 
 424 struct mt76_dev {
 425         struct ieee80211_hw *hw;
 426         struct cfg80211_chan_def chandef;
 427         struct ieee80211_channel *main_chan;
 428 
 429         spinlock_t lock;
 430         spinlock_t cc_lock;
 431 
 432         struct mutex mutex;
 433 
 434         const struct mt76_bus_ops *bus;
 435         const struct mt76_driver_ops *drv;
 436         const struct mt76_mcu_ops *mcu_ops;
 437         struct device *dev;
 438 
 439         struct net_device napi_dev;
 440         spinlock_t rx_lock;
 441         struct napi_struct napi[__MT_RXQ_MAX];
 442         struct sk_buff_head rx_skb[__MT_RXQ_MAX];
 443 
 444         struct list_head txwi_cache;
 445         struct mt76_sw_queue q_tx[__MT_TXQ_MAX];
 446         struct mt76_queue q_rx[__MT_RXQ_MAX];
 447         const struct mt76_queue_ops *queue_ops;
 448         int tx_dma_idx[4];
 449 
 450         struct tasklet_struct tx_tasklet;
 451         struct napi_struct tx_napi;
 452         struct delayed_work mac_work;
 453 
 454         wait_queue_head_t tx_wait;
 455         struct sk_buff_head status_list;
 456 
 457         unsigned long wcid_mask[MT76_N_WCIDS / BITS_PER_LONG];
 458 
 459         struct mt76_wcid global_wcid;
 460         struct mt76_wcid __rcu *wcid[MT76_N_WCIDS];
 461 
 462         u8 macaddr[ETH_ALEN];
 463         u32 rev;
 464         unsigned long state;
 465 
 466         u8 antenna_mask;
 467         u16 chainmask;
 468 
 469         struct tasklet_struct pre_tbtt_tasklet;
 470         int beacon_int;
 471         u8 beacon_mask;
 472 
 473         struct mt76_sband sband_2g;
 474         struct mt76_sband sband_5g;
 475         struct debugfs_blob_wrapper eeprom;
 476         struct debugfs_blob_wrapper otp;
 477         struct mt76_hw_cap cap;
 478 
 479         struct mt76_rate_power rate_power;
 480         int txpower_conf;
 481         int txpower_cur;
 482 
 483         enum nl80211_dfs_regions region;
 484 
 485         u32 debugfs_reg;
 486 
 487         struct led_classdev led_cdev;
 488         char led_name[32];
 489         bool led_al;
 490         u8 led_pin;
 491 
 492         u8 csa_complete;
 493 
 494         ktime_t survey_time;
 495 
 496         u32 rxfilter;
 497 
 498         union {
 499                 struct mt76_mmio mmio;
 500                 struct mt76_usb usb;
 501         };
 502 };
 503 
 504 enum mt76_phy_type {
 505         MT_PHY_TYPE_CCK,
 506         MT_PHY_TYPE_OFDM,
 507         MT_PHY_TYPE_HT,
 508         MT_PHY_TYPE_HT_GF,
 509         MT_PHY_TYPE_VHT,
 510 };
 511 
 512 struct mt76_rx_status {
 513         struct mt76_wcid *wcid;
 514 
 515         unsigned long reorder_time;
 516 
 517         u8 iv[6];
 518 
 519         u8 aggr:1;
 520         u8 tid;
 521         u16 seqno;
 522 
 523         u16 freq;
 524         u32 flag;
 525         u8 enc_flags;
 526         u8 encoding:2, bw:3;
 527         u8 rate_idx;
 528         u8 nss;
 529         u8 band;
 530         s8 signal;
 531         u8 chains;
 532         s8 chain_signal[IEEE80211_MAX_CHAINS];
 533 };
 534 
 535 #define __mt76_rr(dev, ...)     (dev)->bus->rr((dev), __VA_ARGS__)
 536 #define __mt76_wr(dev, ...)     (dev)->bus->wr((dev), __VA_ARGS__)
 537 #define __mt76_rmw(dev, ...)    (dev)->bus->rmw((dev), __VA_ARGS__)
 538 #define __mt76_wr_copy(dev, ...)        (dev)->bus->write_copy((dev), __VA_ARGS__)
 539 #define __mt76_rr_copy(dev, ...)        (dev)->bus->read_copy((dev), __VA_ARGS__)
 540 
 541 #define __mt76_set(dev, offset, val)    __mt76_rmw(dev, offset, 0, val)
 542 #define __mt76_clear(dev, offset, val)  __mt76_rmw(dev, offset, val, 0)
 543 
 544 #define mt76_rr(dev, ...)       (dev)->mt76.bus->rr(&((dev)->mt76), __VA_ARGS__)
 545 #define mt76_wr(dev, ...)       (dev)->mt76.bus->wr(&((dev)->mt76), __VA_ARGS__)
 546 #define mt76_rmw(dev, ...)      (dev)->mt76.bus->rmw(&((dev)->mt76), __VA_ARGS__)
 547 #define mt76_wr_copy(dev, ...)  (dev)->mt76.bus->write_copy(&((dev)->mt76), __VA_ARGS__)
 548 #define mt76_rr_copy(dev, ...)  (dev)->mt76.bus->read_copy(&((dev)->mt76), __VA_ARGS__)
 549 #define mt76_wr_rp(dev, ...)    (dev)->mt76.bus->wr_rp(&((dev)->mt76), __VA_ARGS__)
 550 #define mt76_rd_rp(dev, ...)    (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__)
 551 
 552 #define mt76_mcu_send_msg(dev, ...)     (dev)->mt76.mcu_ops->mcu_send_msg(&((dev)->mt76), __VA_ARGS__)
 553 #define __mt76_mcu_send_msg(dev, ...)   (dev)->mcu_ops->mcu_send_msg((dev), __VA_ARGS__)
 554 #define mt76_mcu_restart(dev, ...)      (dev)->mt76.mcu_ops->mcu_restart(&((dev)->mt76))
 555 #define __mt76_mcu_restart(dev, ...)    (dev)->mcu_ops->mcu_restart((dev))
 556 
 557 #define mt76_set(dev, offset, val)      mt76_rmw(dev, offset, 0, val)
 558 #define mt76_clear(dev, offset, val)    mt76_rmw(dev, offset, val, 0)
 559 
 560 #define mt76_get_field(_dev, _reg, _field)              \
 561         FIELD_GET(_field, mt76_rr(dev, _reg))
 562 
 563 #define mt76_rmw_field(_dev, _reg, _field, _val)        \
 564         mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
 565 
 566 #define __mt76_rmw_field(_dev, _reg, _field, _val)      \
 567         __mt76_rmw(_dev, _reg, _field, FIELD_PREP(_field, _val))
 568 
 569 #define mt76_hw(dev) (dev)->mt76.hw
 570 
 571 bool __mt76_poll(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
 572                  int timeout);
 573 
 574 #define mt76_poll(dev, ...) __mt76_poll(&((dev)->mt76), __VA_ARGS__)
 575 
 576 bool __mt76_poll_msec(struct mt76_dev *dev, u32 offset, u32 mask, u32 val,
 577                       int timeout);
 578 
 579 #define mt76_poll_msec(dev, ...) __mt76_poll_msec(&((dev)->mt76), __VA_ARGS__)
 580 
 581 void mt76_mmio_init(struct mt76_dev *dev, void __iomem *regs);
 582 void mt76_pci_disable_aspm(struct pci_dev *pdev);
 583 
 584 static inline u16 mt76_chip(struct mt76_dev *dev)
 585 {
 586         return dev->rev >> 16;
 587 }
 588 
 589 static inline u16 mt76_rev(struct mt76_dev *dev)
 590 {
 591         return dev->rev & 0xffff;
 592 }
 593 
 594 #define mt76xx_chip(dev) mt76_chip(&((dev)->mt76))
 595 #define mt76xx_rev(dev) mt76_rev(&((dev)->mt76))
 596 
 597 #define mt76_init_queues(dev)           (dev)->mt76.queue_ops->init(&((dev)->mt76))
 598 #define mt76_queue_alloc(dev, ...)      (dev)->mt76.queue_ops->alloc(&((dev)->mt76), __VA_ARGS__)
 599 #define mt76_tx_queue_skb_raw(dev, ...) (dev)->mt76.queue_ops->tx_queue_skb_raw(&((dev)->mt76), __VA_ARGS__)
 600 #define mt76_tx_queue_skb(dev, ...)     (dev)->mt76.queue_ops->tx_queue_skb(&((dev)->mt76), __VA_ARGS__)
 601 #define mt76_queue_rx_reset(dev, ...)   (dev)->mt76.queue_ops->rx_reset(&((dev)->mt76), __VA_ARGS__)
 602 #define mt76_queue_tx_cleanup(dev, ...) (dev)->mt76.queue_ops->tx_cleanup(&((dev)->mt76), __VA_ARGS__)
 603 #define mt76_queue_kick(dev, ...)       (dev)->mt76.queue_ops->kick(&((dev)->mt76), __VA_ARGS__)
 604 
 605 static inline struct mt76_channel_state *
 606 mt76_channel_state(struct mt76_dev *dev, struct ieee80211_channel *c)
 607 {
 608         struct mt76_sband *msband;
 609         int idx;
 610 
 611         if (c->band == NL80211_BAND_2GHZ)
 612                 msband = &dev->sband_2g;
 613         else
 614                 msband = &dev->sband_5g;
 615 
 616         idx = c - &msband->sband.channels[0];
 617         return &msband->chan[idx];
 618 }
 619 
 620 struct mt76_dev *mt76_alloc_device(struct device *pdev, unsigned int size,
 621                                    const struct ieee80211_ops *ops,
 622                                    const struct mt76_driver_ops *drv_ops);
 623 int mt76_register_device(struct mt76_dev *dev, bool vht,
 624                          struct ieee80211_rate *rates, int n_rates);
 625 void mt76_unregister_device(struct mt76_dev *dev);
 626 void mt76_free_device(struct mt76_dev *dev);
 627 
 628 struct dentry *mt76_register_debugfs(struct mt76_dev *dev);
 629 void mt76_seq_puts_array(struct seq_file *file, const char *str,
 630                          s8 *val, int len);
 631 
 632 int mt76_eeprom_init(struct mt76_dev *dev, int len);
 633 void mt76_eeprom_override(struct mt76_dev *dev);
 634 
 635 static inline u8 *
 636 mt76_get_txwi_ptr(struct mt76_dev *dev, struct mt76_txwi_cache *t)
 637 {
 638         return (u8 *)t - dev->drv->txwi_size;
 639 }
 640 
 641 /* increment with wrap-around */
 642 static inline int mt76_incr(int val, int size)
 643 {
 644         return (val + 1) & (size - 1);
 645 }
 646 
 647 /* decrement with wrap-around */
 648 static inline int mt76_decr(int val, int size)
 649 {
 650         return (val - 1) & (size - 1);
 651 }
 652 
 653 u8 mt76_ac_to_hwq(u8 ac);
 654 
 655 static inline struct ieee80211_txq *
 656 mtxq_to_txq(struct mt76_txq *mtxq)
 657 {
 658         void *ptr = mtxq;
 659 
 660         return container_of(ptr, struct ieee80211_txq, drv_priv);
 661 }
 662 
 663 static inline struct ieee80211_sta *
 664 wcid_to_sta(struct mt76_wcid *wcid)
 665 {
 666         void *ptr = wcid;
 667 
 668         if (!wcid || !wcid->sta)
 669                 return NULL;
 670 
 671         return container_of(ptr, struct ieee80211_sta, drv_priv);
 672 }
 673 
 674 static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb)
 675 {
 676         BUILD_BUG_ON(sizeof(struct mt76_tx_cb) >
 677                      sizeof(IEEE80211_SKB_CB(skb)->status.status_driver_data));
 678         return ((void *)IEEE80211_SKB_CB(skb)->status.status_driver_data);
 679 }
 680 
 681 static inline void mt76_insert_hdr_pad(struct sk_buff *skb)
 682 {
 683         int len = ieee80211_get_hdrlen_from_skb(skb);
 684 
 685         if (len % 4 == 0)
 686                 return;
 687 
 688         skb_push(skb, 2);
 689         memmove(skb->data, skb->data + 2, len);
 690 
 691         skb->data[len] = 0;
 692         skb->data[len + 1] = 0;
 693 }
 694 
 695 static inline bool mt76_is_skb_pktid(u8 pktid)
 696 {
 697         if (pktid & MT_PACKET_ID_HAS_RATE)
 698                 return false;
 699 
 700         return pktid >= MT_PACKET_ID_FIRST;
 701 }
 702 
 703 void mt76_rx(struct mt76_dev *dev, enum mt76_rxq_id q, struct sk_buff *skb);
 704 void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta,
 705              struct mt76_wcid *wcid, struct sk_buff *skb);
 706 void mt76_txq_init(struct mt76_dev *dev, struct ieee80211_txq *txq);
 707 void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq);
 708 void mt76_wake_tx_queue(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
 709 void mt76_stop_tx_queues(struct mt76_dev *dev, struct ieee80211_sta *sta,
 710                          bool send_bar);
 711 void mt76_txq_schedule(struct mt76_dev *dev, enum mt76_txq_id qid);
 712 void mt76_txq_schedule_all(struct mt76_dev *dev);
 713 void mt76_tx_tasklet(unsigned long data);
 714 void mt76_release_buffered_frames(struct ieee80211_hw *hw,
 715                                   struct ieee80211_sta *sta,
 716                                   u16 tids, int nframes,
 717                                   enum ieee80211_frame_release_type reason,
 718                                   bool more_data);
 719 bool mt76_has_tx_pending(struct mt76_dev *dev);
 720 void mt76_set_channel(struct mt76_dev *dev);
 721 int mt76_get_survey(struct ieee80211_hw *hw, int idx,
 722                     struct survey_info *survey);
 723 void mt76_set_stream_caps(struct mt76_dev *dev, bool vht);
 724 
 725 int mt76_rx_aggr_start(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid,
 726                        u16 ssn, u8 size);
 727 void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid);
 728 
 729 void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid,
 730                          struct ieee80211_key_conf *key);
 731 
 732 void mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list)
 733                          __acquires(&dev->status_list.lock);
 734 void mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list)
 735                            __releases(&dev->status_list.lock);
 736 
 737 int mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid,
 738                            struct sk_buff *skb);
 739 struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev,
 740                                        struct mt76_wcid *wcid, int pktid,
 741                                        struct sk_buff_head *list);
 742 void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb,
 743                              struct sk_buff_head *list);
 744 void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb);
 745 void mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid,
 746                           bool flush);
 747 int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 748                    struct ieee80211_sta *sta,
 749                    enum ieee80211_sta_state old_state,
 750                    enum ieee80211_sta_state new_state);
 751 void __mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif,
 752                        struct ieee80211_sta *sta);
 753 
 754 int mt76_get_min_avg_rssi(struct mt76_dev *dev);
 755 
 756 int mt76_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 757                      int *dbm);
 758 
 759 void mt76_csa_check(struct mt76_dev *dev);
 760 void mt76_csa_finish(struct mt76_dev *dev);
 761 
 762 int mt76_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set);
 763 void mt76_insert_ccmp_hdr(struct sk_buff *skb, u8 key_id);
 764 int mt76_get_rate(struct mt76_dev *dev,
 765                   struct ieee80211_supported_band *sband,
 766                   int idx, bool cck);
 767 void mt76_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
 768                   const u8 *mac);
 769 void mt76_sw_scan_complete(struct ieee80211_hw *hw,
 770                            struct ieee80211_vif *vif);
 771 
 772 /* internal */
 773 void mt76_tx_free(struct mt76_dev *dev);
 774 struct mt76_txwi_cache *mt76_get_txwi(struct mt76_dev *dev);
 775 void mt76_put_txwi(struct mt76_dev *dev, struct mt76_txwi_cache *t);
 776 void mt76_rx_complete(struct mt76_dev *dev, struct sk_buff_head *frames,
 777                       struct napi_struct *napi);
 778 void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q,
 779                            struct napi_struct *napi);
 780 void mt76_rx_aggr_reorder(struct sk_buff *skb, struct sk_buff_head *frames);
 781 
 782 /* usb */
 783 static inline bool mt76u_urb_error(struct urb *urb)
 784 {
 785         return urb->status &&
 786                urb->status != -ECONNRESET &&
 787                urb->status != -ESHUTDOWN &&
 788                urb->status != -ENOENT;
 789 }
 790 
 791 /* Map hardware queues to usb endpoints */
 792 static inline u8 q2ep(u8 qid)
 793 {
 794         /* TODO: take management packets to queue 5 */
 795         return qid + 1;
 796 }
 797 
 798 static inline int
 799 mt76u_bulk_msg(struct mt76_dev *dev, void *data, int len, int *actual_len,
 800                int timeout)
 801 {
 802         struct usb_interface *uintf = to_usb_interface(dev->dev);
 803         struct usb_device *udev = interface_to_usbdev(uintf);
 804         struct mt76_usb *usb = &dev->usb;
 805         unsigned int pipe;
 806 
 807         if (actual_len)
 808                 pipe = usb_rcvbulkpipe(udev, usb->in_ep[MT_EP_IN_CMD_RESP]);
 809         else
 810                 pipe = usb_sndbulkpipe(udev, usb->out_ep[MT_EP_OUT_INBAND_CMD]);
 811 
 812         return usb_bulk_msg(udev, pipe, data, len, actual_len, timeout);
 813 }
 814 
 815 int mt76u_vendor_request(struct mt76_dev *dev, u8 req,
 816                          u8 req_type, u16 val, u16 offset,
 817                          void *buf, size_t len);
 818 void mt76u_single_wr(struct mt76_dev *dev, const u8 req,
 819                      const u16 offset, const u32 val);
 820 int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf);
 821 int mt76u_alloc_queues(struct mt76_dev *dev);
 822 void mt76u_stop_tx(struct mt76_dev *dev);
 823 void mt76u_stop_rx(struct mt76_dev *dev);
 824 int mt76u_resume_rx(struct mt76_dev *dev);
 825 void mt76u_queues_deinit(struct mt76_dev *dev);
 826 
 827 struct sk_buff *
 828 mt76_mcu_msg_alloc(const void *data, int head_len,
 829                    int data_len, int tail_len);
 830 void mt76_mcu_rx_event(struct mt76_dev *dev, struct sk_buff *skb);
 831 struct sk_buff *mt76_mcu_get_response(struct mt76_dev *dev,
 832                                       unsigned long expires);
 833 
 834 void mt76_set_irq_mask(struct mt76_dev *dev, u32 addr, u32 clear, u32 set);
 835 
 836 #endif

/* [<][>][^][v][top][bottom][index][help] */