Lines Matching refs:txq

2493 void netif_schedule_queue(struct netdev_queue *txq);
2524 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); in netif_tx_start_all_queues() local
2525 netif_tx_start_queue(txq); in netif_tx_start_all_queues()
2548 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); in netif_tx_wake_all_queues() local
2549 netif_tx_wake_queue(txq); in netif_tx_wake_all_queues()
2579 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); in netif_tx_stop_all_queues() local
2580 netif_tx_stop_queue(txq); in netif_tx_stop_all_queues()
2790 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); in netif_start_subqueue() local
2792 netif_tx_start_queue(txq); in netif_start_subqueue()
2804 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); in netif_stop_subqueue() local
2805 netif_tx_stop_queue(txq); in netif_stop_subqueue()
2818 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); in __netif_subqueue_stopped() local
2820 return netif_tx_queue_stopped(txq); in __netif_subqueue_stopped()
2864 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
2984 struct netdev_queue *txq, int *ret);
3167 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) in __netif_tx_lock() argument
3169 spin_lock(&txq->_xmit_lock); in __netif_tx_lock()
3170 txq->xmit_lock_owner = cpu; in __netif_tx_lock()
3173 static inline void __netif_tx_lock_bh(struct netdev_queue *txq) in __netif_tx_lock_bh() argument
3175 spin_lock_bh(&txq->_xmit_lock); in __netif_tx_lock_bh()
3176 txq->xmit_lock_owner = smp_processor_id(); in __netif_tx_lock_bh()
3179 static inline bool __netif_tx_trylock(struct netdev_queue *txq) in __netif_tx_trylock() argument
3181 bool ok = spin_trylock(&txq->_xmit_lock); in __netif_tx_trylock()
3183 txq->xmit_lock_owner = smp_processor_id(); in __netif_tx_trylock()
3187 static inline void __netif_tx_unlock(struct netdev_queue *txq) in __netif_tx_unlock() argument
3189 txq->xmit_lock_owner = -1; in __netif_tx_unlock()
3190 spin_unlock(&txq->_xmit_lock); in __netif_tx_unlock()
3193 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) in __netif_tx_unlock_bh() argument
3195 txq->xmit_lock_owner = -1; in __netif_tx_unlock_bh()
3196 spin_unlock_bh(&txq->_xmit_lock); in __netif_tx_unlock_bh()
3199 static inline void txq_trans_update(struct netdev_queue *txq) in txq_trans_update() argument
3201 if (txq->xmit_lock_owner != -1) in txq_trans_update()
3202 txq->trans_start = jiffies; in txq_trans_update()
3219 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); in netif_tx_lock() local
3227 __netif_tx_lock(txq, cpu); in netif_tx_lock()
3228 set_bit(__QUEUE_STATE_FROZEN, &txq->state); in netif_tx_lock()
3229 __netif_tx_unlock(txq); in netif_tx_lock()
3244 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); in netif_tx_unlock() local
3250 clear_bit(__QUEUE_STATE_FROZEN, &txq->state); in netif_tx_unlock()
3251 netif_schedule_queue(txq); in netif_tx_unlock()
3262 #define HARD_TX_LOCK(dev, txq, cpu) { \ argument
3264 __netif_tx_lock(txq, cpu); \
3268 #define HARD_TX_TRYLOCK(dev, txq) \ argument
3270 __netif_tx_trylock(txq) : \
3273 #define HARD_TX_UNLOCK(dev, txq) { \ argument
3275 __netif_tx_unlock(txq); \
3287 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); in netif_tx_disable() local
3289 __netif_tx_lock(txq, cpu); in netif_tx_disable()
3290 netif_tx_stop_queue(txq); in netif_tx_disable()
3291 __netif_tx_unlock(txq); in netif_tx_disable()
3614 struct netdev_queue *txq, bool more) in netdev_start_xmit() argument
3621 txq_trans_update(txq); in netdev_start_xmit()