Lines Matching refs:txq
2588 void netif_schedule_queue(struct netdev_queue *txq);
2619 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); in netif_tx_start_all_queues() local
2620 netif_tx_start_queue(txq); in netif_tx_start_all_queues()
2643 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); in netif_tx_wake_all_queues() local
2644 netif_tx_wake_queue(txq); in netif_tx_wake_all_queues()
2873 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); in netif_start_subqueue() local
2875 netif_tx_start_queue(txq); in netif_start_subqueue()
2887 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); in netif_stop_subqueue() local
2888 netif_tx_stop_queue(txq); in netif_stop_subqueue()
2901 struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index); in __netif_subqueue_stopped() local
2903 return netif_tx_queue_stopped(txq); in __netif_subqueue_stopped()
2950 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
3067 struct netdev_queue *txq, int *ret);
3250 static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) in __netif_tx_lock() argument
3252 spin_lock(&txq->_xmit_lock); in __netif_tx_lock()
3253 txq->xmit_lock_owner = cpu; in __netif_tx_lock()
3256 static inline void __netif_tx_lock_bh(struct netdev_queue *txq) in __netif_tx_lock_bh() argument
3258 spin_lock_bh(&txq->_xmit_lock); in __netif_tx_lock_bh()
3259 txq->xmit_lock_owner = smp_processor_id(); in __netif_tx_lock_bh()
3262 static inline bool __netif_tx_trylock(struct netdev_queue *txq) in __netif_tx_trylock() argument
3264 bool ok = spin_trylock(&txq->_xmit_lock); in __netif_tx_trylock()
3266 txq->xmit_lock_owner = smp_processor_id(); in __netif_tx_trylock()
3270 static inline void __netif_tx_unlock(struct netdev_queue *txq) in __netif_tx_unlock() argument
3272 txq->xmit_lock_owner = -1; in __netif_tx_unlock()
3273 spin_unlock(&txq->_xmit_lock); in __netif_tx_unlock()
3276 static inline void __netif_tx_unlock_bh(struct netdev_queue *txq) in __netif_tx_unlock_bh() argument
3278 txq->xmit_lock_owner = -1; in __netif_tx_unlock_bh()
3279 spin_unlock_bh(&txq->_xmit_lock); in __netif_tx_unlock_bh()
3282 static inline void txq_trans_update(struct netdev_queue *txq) in txq_trans_update() argument
3284 if (txq->xmit_lock_owner != -1) in txq_trans_update()
3285 txq->trans_start = jiffies; in txq_trans_update()
3302 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); in netif_tx_lock() local
3310 __netif_tx_lock(txq, cpu); in netif_tx_lock()
3311 set_bit(__QUEUE_STATE_FROZEN, &txq->state); in netif_tx_lock()
3312 __netif_tx_unlock(txq); in netif_tx_lock()
3327 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); in netif_tx_unlock() local
3333 clear_bit(__QUEUE_STATE_FROZEN, &txq->state); in netif_tx_unlock()
3334 netif_schedule_queue(txq); in netif_tx_unlock()
3345 #define HARD_TX_LOCK(dev, txq, cpu) { \ argument
3347 __netif_tx_lock(txq, cpu); \
3351 #define HARD_TX_TRYLOCK(dev, txq) \ argument
3353 __netif_tx_trylock(txq) : \
3356 #define HARD_TX_UNLOCK(dev, txq) { \ argument
3358 __netif_tx_unlock(txq); \
3370 struct netdev_queue *txq = netdev_get_tx_queue(dev, i); in netif_tx_disable() local
3372 __netif_tx_lock(txq, cpu); in netif_tx_disable()
3373 netif_tx_stop_queue(txq); in netif_tx_disable()
3374 __netif_tx_unlock(txq); in netif_tx_disable()
3697 struct netdev_queue *txq, bool more) in netdev_start_xmit() argument
3704 txq_trans_update(txq); in netdev_start_xmit()