/linux-4.1.27/net/ieee802154/6lowpan/ |
H A D | Makefile | 3 ieee802154_6lowpan-y := core.o rx.o reassembly.o tx.o
|
/linux-4.1.27/drivers/net/wireless/ti/wl18xx/ |
H A D | Makefile | 1 wl18xx-objs = main.o acx.o tx.o io.o debugfs.o scan.o cmd.o event.o
|
H A D | debugfs.c | 52 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_prepared_descs, "%u"); 53 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_cmplt, "%u"); 54 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_template_prepared, "%u"); 55 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_data_prepared, "%u"); 56 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_template_programmed, "%u"); 57 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_data_programmed, "%u"); 58 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_burst_programmed, "%u"); 59 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_starts, "%u"); 60 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_imm_resp, "%u"); 61 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_templates, "%u"); 62 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_int_templates, "%u"); 63 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_fw_gen, "%u"); 64 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_data, "%u"); 65 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_null_frame, "%u"); 66 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch, "%u"); 67 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_retry_template, "%u"); 68 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_retry_data, "%u"); 69 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch_pending, "%u"); 70 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch_expiry, "%u"); 71 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_template, "%u"); 72 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_data, "%u"); 73 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_int_template, "%u"); 74 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_frame_checksum, "%u"); 75 WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_checksum_result, "%u"); 76 WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_called, "%u"); 77 WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_mpdu_alloc_failed, "%u"); 78 WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_init_called, "%u"); 79 WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_in_process_called, "%u"); 80 WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_tkip_called, "%u"); 81 WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_key_not_found, "%u"); 82 WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_need_fragmentation, "%u"); 83 WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_bad_mblk_num, "%u"); 84 WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_failed, "%u"); 85 WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_cache_hit, "%u"); 86 WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_cache_miss, "%u"); 317 DEBUGFS_FWSTATS_ADD(tx, tx_prepared_descs); wl18xx_debugfs_add_files() 318 DEBUGFS_FWSTATS_ADD(tx, tx_cmplt); wl18xx_debugfs_add_files() 319 DEBUGFS_FWSTATS_ADD(tx, tx_template_prepared); wl18xx_debugfs_add_files() 320 DEBUGFS_FWSTATS_ADD(tx, tx_data_prepared); wl18xx_debugfs_add_files() 321 DEBUGFS_FWSTATS_ADD(tx, tx_template_programmed); wl18xx_debugfs_add_files() 322 DEBUGFS_FWSTATS_ADD(tx, tx_data_programmed); wl18xx_debugfs_add_files() 323 DEBUGFS_FWSTATS_ADD(tx, tx_burst_programmed); wl18xx_debugfs_add_files() 324 DEBUGFS_FWSTATS_ADD(tx, tx_starts); wl18xx_debugfs_add_files() 325 DEBUGFS_FWSTATS_ADD(tx, tx_imm_resp); wl18xx_debugfs_add_files() 326 DEBUGFS_FWSTATS_ADD(tx, tx_start_templates); wl18xx_debugfs_add_files() 327 DEBUGFS_FWSTATS_ADD(tx, tx_start_int_templates); wl18xx_debugfs_add_files() 328 DEBUGFS_FWSTATS_ADD(tx, tx_start_fw_gen); wl18xx_debugfs_add_files() 329 DEBUGFS_FWSTATS_ADD(tx, tx_start_data); wl18xx_debugfs_add_files() 330 DEBUGFS_FWSTATS_ADD(tx, tx_start_null_frame); wl18xx_debugfs_add_files() 331 DEBUGFS_FWSTATS_ADD(tx, tx_exch); wl18xx_debugfs_add_files() 332 DEBUGFS_FWSTATS_ADD(tx, tx_retry_template); wl18xx_debugfs_add_files() 333 DEBUGFS_FWSTATS_ADD(tx, tx_retry_data); wl18xx_debugfs_add_files() 334 DEBUGFS_FWSTATS_ADD(tx, tx_exch_pending); wl18xx_debugfs_add_files() 335 DEBUGFS_FWSTATS_ADD(tx, tx_exch_expiry); wl18xx_debugfs_add_files() 336 DEBUGFS_FWSTATS_ADD(tx, tx_done_template); wl18xx_debugfs_add_files() 337 DEBUGFS_FWSTATS_ADD(tx, tx_done_data); wl18xx_debugfs_add_files() 338 DEBUGFS_FWSTATS_ADD(tx, tx_done_int_template); wl18xx_debugfs_add_files() 339 DEBUGFS_FWSTATS_ADD(tx, tx_frame_checksum); wl18xx_debugfs_add_files() 340 DEBUGFS_FWSTATS_ADD(tx, tx_checksum_result); wl18xx_debugfs_add_files() 341 DEBUGFS_FWSTATS_ADD(tx, frag_called); wl18xx_debugfs_add_files() 342 DEBUGFS_FWSTATS_ADD(tx, frag_mpdu_alloc_failed); wl18xx_debugfs_add_files() 343 DEBUGFS_FWSTATS_ADD(tx, frag_init_called); wl18xx_debugfs_add_files() 344 DEBUGFS_FWSTATS_ADD(tx, frag_in_process_called); wl18xx_debugfs_add_files() 345 DEBUGFS_FWSTATS_ADD(tx, frag_tkip_called); wl18xx_debugfs_add_files() 346 DEBUGFS_FWSTATS_ADD(tx, frag_key_not_found); wl18xx_debugfs_add_files() 347 DEBUGFS_FWSTATS_ADD(tx, frag_need_fragmentation); wl18xx_debugfs_add_files() 348 DEBUGFS_FWSTATS_ADD(tx, frag_bad_mblk_num); wl18xx_debugfs_add_files() 349 DEBUGFS_FWSTATS_ADD(tx, frag_failed); wl18xx_debugfs_add_files() 350 DEBUGFS_FWSTATS_ADD(tx, frag_cache_hit); wl18xx_debugfs_add_files() 351 DEBUGFS_FWSTATS_ADD(tx, frag_cache_miss); wl18xx_debugfs_add_files()
|
H A D | tx.c | 26 #include "../wlcore/tx.h" 29 #include "tx.h" 85 wl1271_warning("illegal id in tx completion: %d", id); wl18xx_tx_complete_packet() 133 wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p success %d", wl18xx_tx_complete_packet()
|
/linux-4.1.27/net/mac802154/ |
H A D | Makefile | 2 mac802154-objs := main.o rx.o tx.o mac_cmd.o mib.o \
|
/linux-4.1.27/tools/testing/selftests/timers/ |
H A D | valid-adjtimex.c | 53 struct timex tx; clear_time_state() local 56 tx.modes = ADJ_STATUS; clear_time_state() 57 tx.status = 0; clear_time_state() 58 ret = adjtimex(&tx); clear_time_state() 117 struct timex tx; validate_freq() local 123 memset(&tx, 0, sizeof(struct timex)); validate_freq() 128 tx.modes = ADJ_FREQUENCY; validate_freq() 129 tx.freq = valid_freq[i]; validate_freq() 131 ret = adjtimex(&tx); validate_freq() 139 tx.modes = 0; validate_freq() 140 ret = adjtimex(&tx); validate_freq() 141 if (tx.freq != valid_freq[i]) { validate_freq() 143 tx.freq, valid_freq[i]); validate_freq() 147 tx.modes = ADJ_FREQUENCY; validate_freq() 148 tx.freq = outofrange_freq[i]; validate_freq() 150 ret = adjtimex(&tx); validate_freq() 158 tx.modes = 0; validate_freq() 159 ret = adjtimex(&tx); validate_freq() 160 if (tx.freq == outofrange_freq[i]) { validate_freq() 163 tx.freq); validate_freq() 172 tx.modes = ADJ_FREQUENCY; validate_freq() 173 tx.freq = invalid_freq[i]; validate_freq() 174 ret = adjtimex(&tx); validate_freq() 188 tx.modes = ADJ_FREQUENCY; validate_freq() 189 tx.freq = 0; validate_freq() 190 ret = adjtimex(&tx); validate_freq()
|
H A D | set-tai.c | 41 struct timex tx; set_tai() local 43 memset(&tx, 0, sizeof(tx)); set_tai() 45 tx.modes = ADJ_TAI; set_tai() 46 tx.constant = offset; set_tai() 48 return adjtimex(&tx); set_tai() 53 struct timex tx; get_tai() local 55 memset(&tx, 0, sizeof(tx)); get_tai() 57 adjtimex(&tx); get_tai() 58 return tx.tai; get_tai()
|
H A D | leapcrash.c | 43 struct timex tx; clear_time_state() local 52 tx.modes = ADJ_STATUS; clear_time_state() 53 tx.status = STA_PLL; clear_time_state() 54 ret = adjtimex(&tx); clear_time_state() 56 tx.modes = ADJ_STATUS; clear_time_state() 57 tx.status = 0; clear_time_state() 58 ret = adjtimex(&tx); clear_time_state() 73 struct timex tx; main() local 105 tx.modes = 0; main() 106 adjtimex(&tx); main() 109 while (tx.time.tv_sec < next_leap + 1) { main() 111 tx.modes = ADJ_STATUS; main() 112 tx.status = STA_INS; main() 113 adjtimex(&tx); main()
|
H A D | leap-a-day.c | 104 struct timex tx; clear_time_state() local 113 tx.modes = ADJ_STATUS; clear_time_state() 114 tx.status = STA_PLL; clear_time_state() 115 ret = adjtimex(&tx); clear_time_state() 118 tx.modes = ADJ_MAXERROR; clear_time_state() 119 tx.maxerror = 0; clear_time_state() 120 ret = adjtimex(&tx); clear_time_state() 123 tx.modes = ADJ_STATUS; clear_time_state() 124 tx.status = 0; clear_time_state() 125 ret = adjtimex(&tx); clear_time_state() 203 struct timex tx; main() local 226 tx.modes = ADJ_STATUS; main() 228 tx.status = STA_INS; main() 230 tx.status = STA_DEL; main() 231 ret = adjtimex(&tx); main() 239 tx.modes = 0; main() 240 ret = adjtimex(&tx); main() 241 if (tx.status != STA_INS && tx.status != STA_DEL) { main() 262 tx.modes = 0; main() 263 ret = adjtimex(&tx); main() 264 if (tx.status != STA_INS && tx.status != STA_DEL) { main() 266 tx.modes = ADJ_STATUS; main() 268 tx.status = STA_INS; main() 270 tx.status = STA_DEL; main() 271 ret = adjtimex(&tx); main() 275 now = tx.time.tv_sec; main() 280 tx.modes = 0; main() 281 ret = adjtimex(&tx); main() 290 ctime_r(&tx.time.tv_sec, buf); main() 295 tx.time.tv_usec, main() 296 tx.tai, main() 299 now = tx.time.tv_sec; main()
|
H A D | change_skew.c | 49 struct timex tx; change_skew_test() local 52 tx.modes = ADJ_FREQUENCY; change_skew_test() 53 tx.freq = ppm << 16; change_skew_test() 55 ret = adjtimex(&tx); change_skew_test() 71 struct timex tx; main() local 80 tx.modes = ADJ_OFFSET; main() 81 tx.offset = 0; main() 82 ret = adjtimex(&tx); main() 97 tx.modes = ADJ_FREQUENCY; main() 98 tx.offset = 0; main() 99 adjtimex(&tx); main()
|
H A D | skew_consistency.c | 55 struct timex tx; main() local 71 tx.modes = ADJ_FREQUENCY; main() 72 tx.freq = ppm << 16; main() 73 adjtimex(&tx); main() 78 tx.modes = ADJ_FREQUENCY; main() 79 tx.offset = 0; main() 80 adjtimex(&tx); main()
|
/linux-4.1.27/drivers/staging/iio/meter/ |
H A D | ade7854-i2c.c | 27 st->tx[0] = (reg_address >> 8) & 0xFF; ade7854_i2c_write_reg_8() 28 st->tx[1] = reg_address & 0xFF; ade7854_i2c_write_reg_8() 29 st->tx[2] = value; ade7854_i2c_write_reg_8() 31 ret = i2c_master_send(st->i2c, st->tx, 3); ade7854_i2c_write_reg_8() 46 st->tx[0] = (reg_address >> 8) & 0xFF; ade7854_i2c_write_reg_16() 47 st->tx[1] = reg_address & 0xFF; ade7854_i2c_write_reg_16() 48 st->tx[2] = (value >> 8) & 0xFF; ade7854_i2c_write_reg_16() 49 st->tx[3] = value & 0xFF; ade7854_i2c_write_reg_16() 51 ret = i2c_master_send(st->i2c, st->tx, 4); ade7854_i2c_write_reg_16() 66 st->tx[0] = (reg_address >> 8) & 0xFF; ade7854_i2c_write_reg_24() 67 st->tx[1] = reg_address & 0xFF; ade7854_i2c_write_reg_24() 68 st->tx[2] = (value >> 16) & 0xFF; ade7854_i2c_write_reg_24() 69 st->tx[3] = (value >> 8) & 0xFF; ade7854_i2c_write_reg_24() 70 st->tx[4] = value & 0xFF; ade7854_i2c_write_reg_24() 72 ret = i2c_master_send(st->i2c, st->tx, 5); ade7854_i2c_write_reg_24() 87 st->tx[0] = (reg_address >> 8) & 0xFF; ade7854_i2c_write_reg_32() 88 st->tx[1] = reg_address & 0xFF; ade7854_i2c_write_reg_32() 89 st->tx[2] = (value >> 24) & 0xFF; ade7854_i2c_write_reg_32() 90 st->tx[3] = (value >> 16) & 0xFF; ade7854_i2c_write_reg_32() 91 st->tx[4] = (value >> 8) & 0xFF; ade7854_i2c_write_reg_32() 92 st->tx[5] = value & 0xFF; ade7854_i2c_write_reg_32() 94 ret = i2c_master_send(st->i2c, st->tx, 6); ade7854_i2c_write_reg_32() 109 st->tx[0] = (reg_address >> 8) & 0xFF; ade7854_i2c_read_reg_8() 110 st->tx[1] = reg_address & 0xFF; ade7854_i2c_read_reg_8() 112 ret = i2c_master_send(st->i2c, st->tx, 2); ade7854_i2c_read_reg_8() 135 st->tx[0] = (reg_address >> 8) & 0xFF; ade7854_i2c_read_reg_16() 136 st->tx[1] = reg_address & 0xFF; ade7854_i2c_read_reg_16() 138 ret = i2c_master_send(st->i2c, st->tx, 2); ade7854_i2c_read_reg_16() 161 st->tx[0] = (reg_address >> 8) & 0xFF; ade7854_i2c_read_reg_24() 162 st->tx[1] = reg_address & 0xFF; ade7854_i2c_read_reg_24() 164 ret = i2c_master_send(st->i2c, st->tx, 2); ade7854_i2c_read_reg_24() 187 st->tx[0] = (reg_address >> 8) & 0xFF; ade7854_i2c_read_reg_32() 188 st->tx[1] = reg_address & 0xFF; ade7854_i2c_read_reg_32() 190 ret = i2c_master_send(st->i2c, st->tx, 2); ade7854_i2c_read_reg_32()
|
H A D | ade7854-spi.c | 26 .tx_buf = st->tx, ade7854_spi_write_reg_8() 32 st->tx[0] = ADE7854_WRITE_REG; ade7854_spi_write_reg_8() 33 st->tx[1] = (reg_address >> 8) & 0xFF; ade7854_spi_write_reg_8() 34 st->tx[2] = reg_address & 0xFF; ade7854_spi_write_reg_8() 35 st->tx[3] = value & 0xFF; ade7854_spi_write_reg_8() 51 .tx_buf = st->tx, ade7854_spi_write_reg_16() 57 st->tx[0] = ADE7854_WRITE_REG; ade7854_spi_write_reg_16() 58 st->tx[1] = (reg_address >> 8) & 0xFF; ade7854_spi_write_reg_16() 59 st->tx[2] = reg_address & 0xFF; ade7854_spi_write_reg_16() 60 st->tx[3] = (value >> 8) & 0xFF; ade7854_spi_write_reg_16() 61 st->tx[4] = value & 0xFF; ade7854_spi_write_reg_16() 77 .tx_buf = st->tx, ade7854_spi_write_reg_24() 83 st->tx[0] = ADE7854_WRITE_REG; ade7854_spi_write_reg_24() 84 st->tx[1] = (reg_address >> 8) & 0xFF; ade7854_spi_write_reg_24() 85 st->tx[2] = reg_address & 0xFF; ade7854_spi_write_reg_24() 86 st->tx[3] = (value >> 16) & 0xFF; ade7854_spi_write_reg_24() 87 st->tx[4] = (value >> 8) & 0xFF; ade7854_spi_write_reg_24() 88 st->tx[5] = value & 0xFF; ade7854_spi_write_reg_24() 104 .tx_buf = st->tx, ade7854_spi_write_reg_32() 110 st->tx[0] = ADE7854_WRITE_REG; ade7854_spi_write_reg_32() 111 st->tx[1] = (reg_address >> 8) & 0xFF; ade7854_spi_write_reg_32() 112 st->tx[2] = reg_address & 0xFF; ade7854_spi_write_reg_32() 113 st->tx[3] = (value >> 24) & 0xFF; ade7854_spi_write_reg_32() 114 st->tx[4] = (value >> 16) & 0xFF; ade7854_spi_write_reg_32() 115 st->tx[5] = (value >> 8) & 0xFF; ade7854_spi_write_reg_32() 116 st->tx[6] = value & 0xFF; ade7854_spi_write_reg_32() 133 .tx_buf = st->tx, ade7854_spi_read_reg_8() 145 st->tx[0] = ADE7854_READ_REG; ade7854_spi_read_reg_8() 146 st->tx[1] = (reg_address >> 8) & 0xFF; ade7854_spi_read_reg_8() 147 st->tx[2] = reg_address & 0xFF; ade7854_spi_read_reg_8() 171 .tx_buf = st->tx, ade7854_spi_read_reg_16() 182 st->tx[0] = ADE7854_READ_REG; ade7854_spi_read_reg_16() 183 st->tx[1] = (reg_address >> 8) & 0xFF; ade7854_spi_read_reg_16() 184 st->tx[2] = reg_address & 0xFF; ade7854_spi_read_reg_16() 208 .tx_buf = st->tx, ade7854_spi_read_reg_24() 220 st->tx[0] = ADE7854_READ_REG; ade7854_spi_read_reg_24() 221 st->tx[1] = (reg_address >> 8) & 0xFF; ade7854_spi_read_reg_24() 222 st->tx[2] = reg_address & 0xFF; ade7854_spi_read_reg_24() 246 .tx_buf = st->tx, ade7854_spi_read_reg_32() 258 st->tx[0] = ADE7854_READ_REG; ade7854_spi_read_reg_32() 259 st->tx[1] = (reg_address >> 8) & 0xFF; ade7854_spi_read_reg_32() 260 st->tx[2] = reg_address & 0xFF; ade7854_spi_read_reg_32()
|
H A D | ade7759.h | 42 * @buf_lock: mutex to protect tx and rx 43 * @tx: transmit buffer 49 u8 tx[ADE7759_MAX_TX] ____cacheline_aligned;
|
H A D | ade7758_core.c | 36 st->tx[0] = ADE7758_WRITE_REG(reg_address); ade7758_spi_write_reg_8() 37 st->tx[1] = val; ade7758_spi_write_reg_8() 39 ret = spi_write(st->us, st->tx, 2); ade7758_spi_write_reg_8() 54 .tx_buf = st->tx, ade7758_spi_write_reg_16() 61 st->tx[0] = ADE7758_WRITE_REG(reg_address); ade7758_spi_write_reg_16() 62 st->tx[1] = (value >> 8) & 0xFF; ade7758_spi_write_reg_16() 63 st->tx[2] = value & 0xFF; ade7758_spi_write_reg_16() 80 .tx_buf = st->tx, ade7758_spi_write_reg_24() 87 st->tx[0] = ADE7758_WRITE_REG(reg_address); ade7758_spi_write_reg_24() 88 st->tx[1] = (value >> 16) & 0xFF; ade7758_spi_write_reg_24() 89 st->tx[2] = (value >> 8) & 0xFF; ade7758_spi_write_reg_24() 90 st->tx[3] = value & 0xFF; ade7758_spi_write_reg_24() 107 .tx_buf = st->tx, ade7758_spi_read_reg_8() 113 .tx_buf = &st->tx[1], ade7758_spi_read_reg_8() 121 st->tx[0] = ADE7758_READ_REG(reg_address); ade7758_spi_read_reg_8() 122 st->tx[1] = 0; ade7758_spi_read_reg_8() 146 .tx_buf = st->tx, ade7758_spi_read_reg_16() 152 .tx_buf = &st->tx[1], ade7758_spi_read_reg_16() 161 st->tx[0] = ADE7758_READ_REG(reg_address); ade7758_spi_read_reg_16() 162 st->tx[1] = 0; ade7758_spi_read_reg_16() 163 st->tx[2] = 0; ade7758_spi_read_reg_16() 188 .tx_buf = st->tx, ade7758_spi_read_reg_24() 194 .tx_buf = &st->tx[1], ade7758_spi_read_reg_24() 202 st->tx[0] = ADE7758_READ_REG(reg_address); ade7758_spi_read_reg_24() 203 st->tx[1] = 0; ade7758_spi_read_reg_24() 204 st->tx[2] = 0; ade7758_spi_read_reg_24() 205 st->tx[3] = 0; ade7758_spi_read_reg_24() 835 st->tx = kcalloc(ADE7758_MAX_TX, sizeof(*st->tx), GFP_KERNEL); ade7758_probe() 836 if (!st->tx) { ade7758_probe() 877 kfree(st->tx); ade7758_probe() 892 kfree(st->tx); ade7758_remove()
|
H A D | ade7753.h | 61 * @tx: transmit buffer 63 * @buf_lock: mutex to protect tx and rx 68 u8 tx[ADE7753_MAX_TX] ____cacheline_aligned;
|
H A D | ade7754.h | 79 * @buf_lock: mutex to protect tx and rx 80 * @tx: transmit buffer 86 u8 tx[ADE7754_MAX_TX] ____cacheline_aligned;
|
H A D | ade7754.c | 35 st->tx[0] = ADE7754_WRITE_REG(reg_address); ade7754_spi_write_reg_8() 36 st->tx[1] = val; ade7754_spi_write_reg_8() 38 ret = spi_write(st->us, st->tx, 2); ade7754_spi_write_reg_8() 53 st->tx[0] = ADE7754_WRITE_REG(reg_address); ade7754_spi_write_reg_16() 54 st->tx[1] = (value >> 8) & 0xFF; ade7754_spi_write_reg_16() 55 st->tx[2] = value & 0xFF; ade7754_spi_write_reg_16() 56 ret = spi_write(st->us, st->tx, 3); ade7754_spi_write_reg_16() 110 .tx_buf = st->tx, ade7754_spi_read_reg_24() 118 st->tx[0] = ADE7754_READ_REG(reg_address); ade7754_spi_read_reg_24() 119 st->tx[1] = 0; ade7754_spi_read_reg_24() 120 st->tx[2] = 0; ade7754_spi_read_reg_24() 121 st->tx[3] = 0; ade7754_spi_read_reg_24()
|
H A D | ade7753.c | 35 st->tx[0] = ADE7753_WRITE_REG(reg_address); ade7753_spi_write_reg_8() 36 st->tx[1] = val; ade7753_spi_write_reg_8() 38 ret = spi_write(st->us, st->tx, 2); ade7753_spi_write_reg_8() 53 st->tx[0] = ADE7753_WRITE_REG(reg_address); ade7753_spi_write_reg_16() 54 st->tx[1] = (value >> 8) & 0xFF; ade7753_spi_write_reg_16() 55 st->tx[2] = value & 0xFF; ade7753_spi_write_reg_16() 56 ret = spi_write(st->us, st->tx, 3); ade7753_spi_write_reg_16() 110 .tx_buf = st->tx, ade7753_spi_read_reg_24() 114 .rx_buf = st->tx, ade7753_spi_read_reg_24() 121 st->tx[0] = ADE7753_READ_REG(reg_address); ade7753_spi_read_reg_24()
|
H A D | ade7759.c | 35 st->tx[0] = ADE7759_WRITE_REG(reg_address); ade7759_spi_write_reg_8() 36 st->tx[1] = val; ade7759_spi_write_reg_8() 38 ret = spi_write(st->us, st->tx, 2); ade7759_spi_write_reg_8() 53 st->tx[0] = ADE7759_WRITE_REG(reg_address); ade7759_spi_write_reg_16() 54 st->tx[1] = (value >> 8) & 0xFF; ade7759_spi_write_reg_16() 55 st->tx[2] = value & 0xFF; ade7759_spi_write_reg_16() 56 ret = spi_write(st->us, st->tx, 3); ade7759_spi_write_reg_16() 110 .tx_buf = st->tx, ade7759_spi_read_reg_40() 118 st->tx[0] = ADE7759_READ_REG(reg_address); ade7759_spi_read_reg_40() 119 memset(&st->tx[1], 0, 5); ade7759_spi_read_reg_40()
|
/linux-4.1.27/drivers/net/wimax/i2400m/ |
H A D | Makefile | 12 tx.o \ 20 usb-tx.o \
|
/linux-4.1.27/net/mac80211/ |
H A D | wpa.h | 17 ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx); 22 ieee80211_crypto_tkip_encrypt(struct ieee80211_tx_data *tx); 27 ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx, 34 ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx); 36 ieee80211_crypto_aes_cmac_256_encrypt(struct ieee80211_tx_data *tx); 42 ieee80211_crypto_aes_gmac_encrypt(struct ieee80211_tx_data *tx); 46 ieee80211_crypto_hw_encrypt(struct ieee80211_tx_data *tx); 51 ieee80211_crypto_gcmp_encrypt(struct ieee80211_tx_data *tx);
|
H A D | tx.c | 40 static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, ieee80211_duration() argument 46 struct ieee80211_local *local = tx->local; ieee80211_duration() 54 chanctx_conf = rcu_dereference(tx->sdata->vif.chanctx_conf); ieee80211_duration() 62 if (tx->rate.flags & (IEEE80211_TX_RC_MCS | IEEE80211_TX_RC_VHT_MCS)) ieee80211_duration() 66 if (WARN_ON_ONCE(tx->rate.idx < 0)) ieee80211_duration() 70 txrate = &sband->bitrates[tx->rate.idx]; ieee80211_duration() 139 if (tx->sdata->vif.bss_conf.basic_rates & BIT(i)) ieee80211_duration() 145 if (tx->sdata->flags & IEEE80211_SDATA_OPERATING_GMODE) ieee80211_duration() 179 tx->sdata->vif.bss_conf.use_short_preamble, ieee80211_duration() 189 tx->sdata->vif.bss_conf.use_short_preamble, ieee80211_duration() 196 /* tx handlers */ 198 ieee80211_tx_h_dynamic_ps(struct ieee80211_tx_data *tx) ieee80211_tx_h_dynamic_ps() argument 200 struct ieee80211_local *local = tx->local; ieee80211_tx_h_dynamic_ps() 227 if (tx->sdata->vif.type != NL80211_IFTYPE_STATION) ieee80211_tx_h_dynamic_ps() 230 ifmgd = &tx->sdata->u.mgd; ieee80211_tx_h_dynamic_ps() 247 skb_get_queue_mapping(tx->skb) == IEEE80211_AC_VO) ieee80211_tx_h_dynamic_ps() 271 ieee80211_tx_h_check_assoc(struct ieee80211_tx_data *tx) ieee80211_tx_h_check_assoc() argument 274 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; ieee80211_tx_h_check_assoc() 275 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); ieee80211_tx_h_check_assoc() 281 if (unlikely(test_bit(SCAN_SW_SCANNING, &tx->local->scanning)) && ieee80211_tx_h_check_assoc() 282 test_bit(SDATA_STATE_OFFCHANNEL, &tx->sdata->state) && ieee80211_tx_h_check_assoc() 298 if (tx->sdata->vif.type == NL80211_IFTYPE_OCB) ieee80211_tx_h_check_assoc() 301 if (tx->sdata->vif.type == NL80211_IFTYPE_WDS) ieee80211_tx_h_check_assoc() 304 if (tx->flags & IEEE80211_TX_PS_BUFFERED) ieee80211_tx_h_check_assoc() 307 if (tx->sta) ieee80211_tx_h_check_assoc() 308 assoc = test_sta_flag(tx->sta, WLAN_STA_ASSOC); ieee80211_tx_h_check_assoc() 310 if (likely(tx->flags & IEEE80211_TX_UNICAST)) { ieee80211_tx_h_check_assoc() 314 sdata_info(tx->sdata, ieee80211_tx_h_check_assoc() 318 I802_DEBUG_INC(tx->local->tx_handlers_drop_not_assoc); ieee80211_tx_h_check_assoc() 321 } else if (unlikely(tx->sdata->vif.type == NL80211_IFTYPE_AP && ieee80211_tx_h_check_assoc() 323 !atomic_read(&tx->sdata->u.ap.num_mcast_sta))) { ieee80211_tx_h_check_assoc() 386 ieee80211_tx_h_multicast_ps_buf(struct ieee80211_tx_data *tx) ieee80211_tx_h_multicast_ps_buf() argument 388 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); ieee80211_tx_h_multicast_ps_buf() 389 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; ieee80211_tx_h_multicast_ps_buf() 401 if (tx->sdata->vif.type == NL80211_IFTYPE_AP || ieee80211_tx_h_multicast_ps_buf() 402 tx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) { ieee80211_tx_h_multicast_ps_buf() 403 if (!tx->sdata->bss) ieee80211_tx_h_multicast_ps_buf() 406 ps = &tx->sdata->bss->ps; ieee80211_tx_h_multicast_ps_buf() 407 } else if (ieee80211_vif_is_mesh(&tx->sdata->vif)) { ieee80211_tx_h_multicast_ps_buf() 408 ps = &tx->sdata->u.mesh.ps; ieee80211_tx_h_multicast_ps_buf() 421 if (tx->local->hw.flags & IEEE80211_HW_QUEUE_CONTROL) ieee80211_tx_h_multicast_ps_buf() 422 info->hw_queue = tx->sdata->vif.cab_queue; ieee80211_tx_h_multicast_ps_buf() 431 if (!(tx->local->hw.flags & IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING)) ieee80211_tx_h_multicast_ps_buf() 435 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) ieee80211_tx_h_multicast_ps_buf() 436 purge_old_ps_buffers(tx->local); ieee80211_tx_h_multicast_ps_buf() 439 ps_dbg(tx->sdata, ieee80211_tx_h_multicast_ps_buf() 443 tx->local->total_ps_buffered++; ieee80211_tx_h_multicast_ps_buf() 445 skb_queue_tail(&ps->bc_buf, tx->skb); ieee80211_tx_h_multicast_ps_buf() 466 ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx) ieee80211_tx_h_unicast_ps_buf() argument 468 struct sta_info *sta = tx->sta; ieee80211_tx_h_unicast_ps_buf() 469 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); ieee80211_tx_h_unicast_ps_buf() 470 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; ieee80211_tx_h_unicast_ps_buf() 471 struct ieee80211_local *local = tx->local; ieee80211_tx_h_unicast_ps_buf() 480 int ac = skb_get_queue_mapping(tx->skb); ieee80211_tx_h_unicast_ps_buf() 490 if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER) ieee80211_tx_h_unicast_ps_buf() 491 purge_old_ps_buffers(tx->local); ieee80211_tx_h_unicast_ps_buf() 509 ps_dbg(tx->sdata, ieee80211_tx_h_unicast_ps_buf() 514 tx->local->total_ps_buffered++; ieee80211_tx_h_unicast_ps_buf() 517 info->control.vif = &tx->sdata->vif; ieee80211_tx_h_unicast_ps_buf() 520 skb_queue_tail(&sta->ps_tx_buf[ac], tx->skb); ieee80211_tx_h_unicast_ps_buf() 536 ps_dbg(tx->sdata, ieee80211_tx_h_unicast_ps_buf() 545 ieee80211_tx_h_ps_buf(struct ieee80211_tx_data *tx) ieee80211_tx_h_ps_buf() argument 547 if (unlikely(tx->flags & IEEE80211_TX_PS_BUFFERED)) ieee80211_tx_h_ps_buf() 550 if (tx->flags & IEEE80211_TX_UNICAST) ieee80211_tx_h_ps_buf() 551 return ieee80211_tx_h_unicast_ps_buf(tx); ieee80211_tx_h_ps_buf() 553 return ieee80211_tx_h_multicast_ps_buf(tx); ieee80211_tx_h_ps_buf() 557 ieee80211_tx_h_check_control_port_protocol(struct ieee80211_tx_data *tx) ieee80211_tx_h_check_control_port_protocol() argument 559 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); ieee80211_tx_h_check_control_port_protocol() 561 if (unlikely(tx->sdata->control_port_protocol == tx->skb->protocol)) { ieee80211_tx_h_check_control_port_protocol() 562 if (tx->sdata->control_port_no_encrypt) ieee80211_tx_h_check_control_port_protocol() 572 ieee80211_tx_h_select_key(struct ieee80211_tx_data *tx) ieee80211_tx_h_select_key() argument 575 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); ieee80211_tx_h_select_key() 576 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; ieee80211_tx_h_select_key() 579 tx->key = NULL; ieee80211_tx_h_select_key() 580 else if (tx->sta && ieee80211_tx_h_select_key() 581 (key = rcu_dereference(tx->sta->ptk[tx->sta->ptk_idx]))) ieee80211_tx_h_select_key() 582 tx->key = key; ieee80211_tx_h_select_key() 585 ieee80211_is_robust_mgmt_frame(tx->skb) && ieee80211_tx_h_select_key() 586 (key = rcu_dereference(tx->sdata->default_mgmt_key))) ieee80211_tx_h_select_key() 587 tx->key = key; ieee80211_tx_h_select_key() 589 (key = rcu_dereference(tx->sdata->default_multicast_key))) ieee80211_tx_h_select_key() 590 tx->key = key; ieee80211_tx_h_select_key() 592 (key = rcu_dereference(tx->sdata->default_unicast_key))) ieee80211_tx_h_select_key() 593 tx->key = key; ieee80211_tx_h_select_key() 595 tx->key = NULL; ieee80211_tx_h_select_key() 597 if (tx->key) { ieee80211_tx_h_select_key() 600 tx->key->tx_rx_count++; ieee80211_tx_h_select_key() 603 switch (tx->key->conf.cipher) { ieee80211_tx_h_select_key() 608 tx->key = NULL; ieee80211_tx_h_select_key() 615 !ieee80211_use_mfp(hdr->frame_control, tx->sta, ieee80211_tx_h_select_key() 616 tx->skb)) ieee80211_tx_h_select_key() 617 tx->key = NULL; ieee80211_tx_h_select_key() 619 skip_hw = (tx->key->conf.flags & ieee80211_tx_h_select_key() 628 tx->key = NULL; ieee80211_tx_h_select_key() 632 if (unlikely(tx->key && tx->key->flags & KEY_FLAG_TAINTED && ieee80211_tx_h_select_key() 636 if (!skip_hw && tx->key && ieee80211_tx_h_select_key() 637 tx->key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) ieee80211_tx_h_select_key() 638 info->control.hw_key = &tx->key->conf; ieee80211_tx_h_select_key() 645 ieee80211_tx_h_rate_ctrl(struct ieee80211_tx_data *tx) ieee80211_tx_h_rate_ctrl() argument 647 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); ieee80211_tx_h_rate_ctrl() 648 struct ieee80211_hdr *hdr = (void *)tx->skb->data; ieee80211_tx_h_rate_ctrl() 657 sband = tx->local->hw.wiphy->bands[info->band]; ieee80211_tx_h_rate_ctrl() 659 len = min_t(u32, tx->skb->len + FCS_LEN, ieee80211_tx_h_rate_ctrl() 660 tx->local->hw.wiphy->frag_threshold); ieee80211_tx_h_rate_ctrl() 662 /* set up the tx rate control struct we give the RC algo */ ieee80211_tx_h_rate_ctrl() 663 txrc.hw = &tx->local->hw; ieee80211_tx_h_rate_ctrl() 665 txrc.bss_conf = &tx->sdata->vif.bss_conf; ieee80211_tx_h_rate_ctrl() 666 txrc.skb = tx->skb; ieee80211_tx_h_rate_ctrl() 668 txrc.rate_idx_mask = tx->sdata->rc_rateidx_mask[info->band]; ieee80211_tx_h_rate_ctrl() 674 if (tx->sdata->rc_has_mcs_mask[info->band]) ieee80211_tx_h_rate_ctrl() 676 tx->sdata->rc_rateidx_mcs_mask[info->band]; ieee80211_tx_h_rate_ctrl() 678 txrc.bss = (tx->sdata->vif.type == NL80211_IFTYPE_AP || ieee80211_tx_h_rate_ctrl() 679 tx->sdata->vif.type == NL80211_IFTYPE_MESH_POINT || ieee80211_tx_h_rate_ctrl() 680 tx->sdata->vif.type == NL80211_IFTYPE_ADHOC); ieee80211_tx_h_rate_ctrl() 683 if (len > tx->local->hw.wiphy->rts_threshold) { ieee80211_tx_h_rate_ctrl() 688 info->control.use_cts_prot = tx->sdata->vif.bss_conf.use_cts_prot; ieee80211_tx_h_rate_ctrl() 696 if (tx->sdata->vif.bss_conf.use_short_preamble && ieee80211_tx_h_rate_ctrl() 698 (tx->sta && test_sta_flag(tx->sta, WLAN_STA_SHORT_PREAMBLE)))) ieee80211_tx_h_rate_ctrl() 703 if (tx->sta) ieee80211_tx_h_rate_ctrl() 704 assoc = test_sta_flag(tx->sta, WLAN_STA_ASSOC); ieee80211_tx_h_rate_ctrl() 710 if (WARN(test_bit(SCAN_SW_SCANNING, &tx->local->scanning) && assoc && ieee80211_tx_h_rate_ctrl() 711 !rate_usable_index_exists(sband, &tx->sta->sta), ieee80211_tx_h_rate_ctrl() 715 tx->sdata->name, hdr->addr1, ieee80211_tx_h_rate_ctrl() 723 rate_control_get_rate(tx->sdata, tx->sta, &txrc); ieee80211_tx_h_rate_ctrl() 725 if (tx->sta && !info->control.skip_table) ieee80211_tx_h_rate_ctrl() 726 ratetbl = rcu_dereference(tx->sta->sta.rates); ieee80211_tx_h_rate_ctrl() 739 tx->rate = rate; ieee80211_tx_h_rate_ctrl() 744 tx->rate = info->control.rates[0]; ieee80211_tx_h_rate_ctrl() 748 txrc.reported_rate = tx->rate; ieee80211_tx_h_rate_ctrl() 749 if (tx->sta && ieee80211_is_data(hdr->frame_control)) ieee80211_tx_h_rate_ctrl() 750 tx->sta->last_tx_rate = txrc.reported_rate; ieee80211_tx_h_rate_ctrl() 751 } else if (tx->sta) ieee80211_tx_h_rate_ctrl() 752 tx->sta->last_tx_rate = txrc.reported_rate; ieee80211_tx_h_rate_ctrl() 779 ieee80211_tx_h_sequence(struct ieee80211_tx_data *tx) ieee80211_tx_h_sequence() argument 781 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); ieee80211_tx_h_sequence() 782 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)tx->skb->data; ieee80211_tx_h_sequence() 814 hdr->seq_ctrl = cpu_to_le16(tx->sdata->sequence_number); ieee80211_tx_h_sequence() 815 tx->sdata->sequence_number += 0x10; ieee80211_tx_h_sequence() 816 if (tx->sta) ieee80211_tx_h_sequence() 817 tx->sta->tx_msdu[IEEE80211_NUM_TIDS]++; ieee80211_tx_h_sequence() 826 if (!tx->sta) ieee80211_tx_h_sequence() 833 tx->sta->tx_msdu[tid]++; ieee80211_tx_h_sequence() 835 if (!tx->sta->sta.txq[0]) ieee80211_tx_h_sequence() 836 hdr->seq_ctrl = ieee80211_tx_next_seq(tx->sta, tid); ieee80211_tx_h_sequence() 841 static int ieee80211_fragment(struct ieee80211_tx_data *tx, ieee80211_fragment() argument 845 struct ieee80211_local *local = tx->local; ieee80211_fragment() 865 tx->sdata->encrypt_headroom + ieee80211_fragment() 870 __skb_queue_tail(&tx->skbs, tmp); ieee80211_fragment() 873 local->tx_headroom + tx->sdata->encrypt_headroom); ieee80211_fragment() 902 ieee80211_tx_h_fragment(struct ieee80211_tx_data *tx) ieee80211_tx_h_fragment() argument 904 struct sk_buff *skb = tx->skb; ieee80211_tx_h_fragment() 907 int frag_threshold = tx->local->hw.wiphy->frag_threshold; ieee80211_tx_h_fragment() 911 /* no matter what happens, tx->skb moves to tx->skbs */ ieee80211_tx_h_fragment() 912 __skb_queue_tail(&tx->skbs, skb); ieee80211_tx_h_fragment() 913 tx->skb = NULL; ieee80211_tx_h_fragment() 918 if (tx->local->ops->set_frag_threshold) ieee80211_tx_h_fragment() 943 if (ieee80211_fragment(tx, skb, hdrlen, frag_threshold)) ieee80211_tx_h_fragment() 949 skb_queue_walk(&tx->skbs, skb) { ieee80211_tx_h_fragment() 955 if (!skb_queue_is_last(&tx->skbs, skb)) { ieee80211_tx_h_fragment() 977 ieee80211_tx_h_stats(struct ieee80211_tx_data *tx) ieee80211_tx_h_stats() argument 982 if (!tx->sta) ieee80211_tx_h_stats() 985 skb_queue_walk(&tx->skbs, skb) { ieee80211_tx_h_stats() 987 tx->sta->tx_fragments++; ieee80211_tx_h_stats() 988 tx->sta->tx_bytes[ac] += skb->len; ieee80211_tx_h_stats() 991 tx->sta->tx_packets[ac]++; ieee80211_tx_h_stats() 997 ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx) ieee80211_tx_h_encrypt() argument 999 if (!tx->key) ieee80211_tx_h_encrypt() 1002 switch (tx->key->conf.cipher) { ieee80211_tx_h_encrypt() 1005 return ieee80211_crypto_wep_encrypt(tx); ieee80211_tx_h_encrypt() 1007 return ieee80211_crypto_tkip_encrypt(tx); ieee80211_tx_h_encrypt() 1010 tx, IEEE80211_CCMP_MIC_LEN); ieee80211_tx_h_encrypt() 1013 tx, IEEE80211_CCMP_256_MIC_LEN); ieee80211_tx_h_encrypt() 1015 return ieee80211_crypto_aes_cmac_encrypt(tx); ieee80211_tx_h_encrypt() 1017 return ieee80211_crypto_aes_cmac_256_encrypt(tx); ieee80211_tx_h_encrypt() 1020 return ieee80211_crypto_aes_gmac_encrypt(tx); ieee80211_tx_h_encrypt() 1023 return ieee80211_crypto_gcmp_encrypt(tx); ieee80211_tx_h_encrypt() 1025 return ieee80211_crypto_hw_encrypt(tx); ieee80211_tx_h_encrypt() 1032 ieee80211_tx_h_calculate_duration(struct ieee80211_tx_data *tx) ieee80211_tx_h_calculate_duration() argument 1039 skb_queue_walk(&tx->skbs, skb) { ieee80211_tx_h_calculate_duration() 1043 if (!skb_queue_is_last(&tx->skbs, skb)) { ieee80211_tx_h_calculate_duration() 1044 struct sk_buff *next = skb_queue_next(&tx->skbs, skb); ieee80211_tx_h_calculate_duration() 1051 ieee80211_duration(tx, skb, group_addr, next_len); ieee80211_tx_h_calculate_duration() 1059 static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx, ieee80211_tx_prep_agg() argument 1077 } else if (!tx->sta->sta.txq[tid]) { ieee80211_tx_prep_agg() 1078 spin_lock(&tx->sta->lock); ieee80211_tx_prep_agg() 1097 tid_tx = rcu_dereference_protected_tid_tx(tx->sta, tid); ieee80211_tx_prep_agg() 1106 info->control.vif = &tx->sdata->vif; ieee80211_tx_prep_agg() 1113 spin_unlock(&tx->sta->lock); ieee80211_tx_prep_agg() 1116 ieee80211_free_txskb(&tx->local->hw, purge_skb); ieee80211_tx_prep_agg() 1127 * initialises @tx 1133 struct ieee80211_tx_data *tx, ieee80211_tx_prepare() 1142 memset(tx, 0, sizeof(*tx)); ieee80211_tx_prepare() 1143 tx->skb = skb; ieee80211_tx_prepare() 1144 tx->local = local; ieee80211_tx_prepare() 1145 tx->sdata = sdata; ieee80211_tx_prepare() 1146 __skb_queue_head_init(&tx->skbs); ieee80211_tx_prepare() 1159 tx->sta = sta; ieee80211_tx_prepare() 1162 tx->sta = rcu_dereference(sdata->u.vlan.sta); ieee80211_tx_prepare() 1163 if (!tx->sta && sdata->wdev.use_4addr) ieee80211_tx_prepare() 1167 tx->sdata->control_port_protocol == tx->skb->protocol) { ieee80211_tx_prepare() 1168 tx->sta = sta_info_get_bss(sdata, hdr->addr1); ieee80211_tx_prepare() 1170 if (!tx->sta && !is_multicast_ether_addr(hdr->addr1)) ieee80211_tx_prepare() 1171 tx->sta = sta_info_get(sdata, hdr->addr1); ieee80211_tx_prepare() 1174 if (tx->sta && ieee80211_is_data_qos(hdr->frame_control) && ieee80211_tx_prepare() 1183 tid_tx = rcu_dereference(tx->sta->ampdu_mlme.tid_tx[tid]); ieee80211_tx_prepare() 1187 queued = ieee80211_tx_prep_agg(tx, skb, info, ieee80211_tx_prepare() 1196 tx->flags &= ~IEEE80211_TX_UNICAST; ieee80211_tx_prepare() 1199 tx->flags |= IEEE80211_TX_UNICAST; ieee80211_tx_prepare() 1202 if (!(tx->flags & IEEE80211_TX_UNICAST) || ieee80211_tx_prepare() 1208 if (!tx->sta) ieee80211_tx_prepare() 1210 else if (test_and_clear_sta_flag(tx->sta, WLAN_STA_CLEAR_PS_FILT)) ieee80211_tx_prepare() 1353 * later transmission from the tx-pending skb_queue_walk_safe() 1449 static int invoke_tx_handlers(struct ieee80211_tx_data *tx) invoke_tx_handlers() argument 1451 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb); invoke_tx_handlers() 1456 res = txh(tx); \ invoke_tx_handlers() 1466 if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)) invoke_tx_handlers() 1470 __skb_queue_tail(&tx->skbs, tx->skb); invoke_tx_handlers() 1471 tx->skb = NULL; invoke_tx_handlers() 1478 /* handlers after fragment must be aware of tx info fragmentation! */ invoke_tx_handlers() 1481 if (!(tx->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)) invoke_tx_handlers() 1487 I802_DEBUG_INC(tx->local->tx_handlers_drop); invoke_tx_handlers() 1488 if (tx->skb) invoke_tx_handlers() 1489 ieee80211_free_txskb(&tx->local->hw, tx->skb); invoke_tx_handlers() 1491 ieee80211_purge_tx_queue(&tx->local->hw, &tx->skbs); invoke_tx_handlers() 1494 I802_DEBUG_INC(tx->local->tx_handlers_queued); invoke_tx_handlers() 1507 struct ieee80211_tx_data tx; ieee80211_tx_prepare_skb() local 1510 if (ieee80211_tx_prepare(sdata, &tx, NULL, skb) == TX_DROP) ieee80211_tx_prepare_skb() 1517 if (invoke_tx_handlers(&tx)) ieee80211_tx_prepare_skb() 1521 if (tx.sta) ieee80211_tx_prepare_skb() 1522 *sta = &tx.sta->sta; ieee80211_tx_prepare_skb() 1528 skb2 = __skb_dequeue(&tx.skbs); ieee80211_tx_prepare_skb() 1529 if (WARN_ON(skb2 != skb || !skb_queue_empty(&tx.skbs))) { ieee80211_tx_prepare_skb() 1531 ieee80211_purge_tx_queue(hw, &tx.skbs); ieee80211_tx_prepare_skb() 1547 struct ieee80211_tx_data tx; ieee80211_tx() local 1558 /* initialises tx */ ieee80211_tx() 1560 res_prepare = ieee80211_tx_prepare(sdata, &tx, sta, skb); ieee80211_tx() 1575 if (!invoke_tx_handlers(&tx)) ieee80211_tx() 1576 result = __ieee80211_tx(local, &tx.skbs, led_len, ieee80211_tx() 1577 tx.sta, txpending); ieee80211_tx() 2438 struct ieee80211_tx_data tx = { ieee80211_build_data_template() local 2457 tx.sta = sta_info_get(sdata, hdr->addr1); ieee80211_build_data_template() 2458 tx.skb = skb; ieee80211_build_data_template() 2460 if (ieee80211_tx_h_select_key(&tx) != TX_CONTINUE) { ieee80211_build_data_template() 3199 struct ieee80211_tx_data tx; ieee80211_get_buffered_bc() local 3248 if (!ieee80211_tx_prepare(sdata, &tx, NULL, skb)) ieee80211_get_buffered_bc() 3255 tx.flags |= IEEE80211_TX_PS_BUFFERED; ieee80211_get_buffered_bc() 3258 if (invoke_tx_handlers(&tx)) ieee80211_get_buffered_bc() 3376 * requirements are that we do not come into tx with bhs on. __ieee80211_tx_skb_tid_band() 1132 ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata, struct ieee80211_tx_data *tx, struct sta_info *sta, struct sk_buff *skb) ieee80211_tx_prepare() argument
|
H A D | Makefile | 10 ht.o agg-tx.o agg-rx.o \ 25 tx.o \
|
H A D | wpa.c | 30 ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx) ieee80211_tx_h_michael_mic_add() argument 36 struct sk_buff *skb = tx->skb; ieee80211_tx_h_michael_mic_add() 41 if (!tx->key || tx->key->conf.cipher != WLAN_CIPHER_SUITE_TKIP || ieee80211_tx_h_michael_mic_add() 59 tx->local->ops->set_frag_threshold) && ieee80211_tx_h_michael_mic_add() 60 !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) { ieee80211_tx_h_michael_mic_add() 76 key = &tx->key->conf.key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY]; ieee80211_tx_h_michael_mic_add() 184 static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) tkip_encrypt_skb() argument 187 struct ieee80211_key *key = tx->key; tkip_encrypt_skb() 223 key->u.tkip.tx.iv16++; tkip_encrypt_skb() 224 if (key->u.tkip.tx.iv16 == 0) tkip_encrypt_skb() 225 key->u.tkip.tx.iv32++; tkip_encrypt_skb() 236 return ieee80211_tkip_encrypt_data(tx->local->wep_tx_tfm, tkip_encrypt_skb() 242 ieee80211_crypto_tkip_encrypt(struct ieee80211_tx_data *tx) ieee80211_crypto_tkip_encrypt() argument 246 ieee80211_tx_set_protected(tx); ieee80211_crypto_tkip_encrypt() 248 skb_queue_walk(&tx->skbs, skb) { ieee80211_crypto_tkip_encrypt() 249 if (tkip_encrypt_skb(tx, skb) < 0) ieee80211_crypto_tkip_encrypt() 398 static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb, ccmp_encrypt_skb() argument 402 struct ieee80211_key *key = tx->key; ccmp_encrypt_skb() 472 ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx, ieee80211_crypto_ccmp_encrypt() argument 477 ieee80211_tx_set_protected(tx); ieee80211_crypto_ccmp_encrypt() 479 skb_queue_walk(&tx->skbs, skb) { ieee80211_crypto_ccmp_encrypt() 480 if (ccmp_encrypt_skb(tx, skb, mic_len) < 0) ieee80211_crypto_ccmp_encrypt() 624 static int gcmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) gcmp_encrypt_skb() argument 627 struct ieee80211_key *key = tx->key; gcmp_encrypt_skb() 697 ieee80211_crypto_gcmp_encrypt(struct ieee80211_tx_data *tx) ieee80211_crypto_gcmp_encrypt() argument 701 ieee80211_tx_set_protected(tx); ieee80211_crypto_gcmp_encrypt() 703 skb_queue_walk(&tx->skbs, skb) { ieee80211_crypto_gcmp_encrypt() 704 if (gcmp_encrypt_skb(tx, skb) < 0) ieee80211_crypto_gcmp_encrypt() 777 ieee80211_crypto_cs_encrypt(struct ieee80211_tx_data *tx, ieee80211_crypto_cs_encrypt() argument 781 struct ieee80211_key *key = tx->key; ieee80211_crypto_cs_encrypt() 915 ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx) ieee80211_crypto_aes_cmac_encrypt() argument 919 struct ieee80211_key *key = tx->key; ieee80211_crypto_aes_cmac_encrypt() 924 if (WARN_ON(skb_queue_len(&tx->skbs) != 1)) ieee80211_crypto_aes_cmac_encrypt() 927 skb = skb_peek(&tx->skbs); ieee80211_crypto_aes_cmac_encrypt() 959 ieee80211_crypto_aes_cmac_256_encrypt(struct ieee80211_tx_data *tx) ieee80211_crypto_aes_cmac_256_encrypt() argument 963 struct ieee80211_key *key = tx->key; ieee80211_crypto_aes_cmac_256_encrypt() 968 if (WARN_ON(skb_queue_len(&tx->skbs) != 1)) ieee80211_crypto_aes_cmac_256_encrypt() 971 skb = skb_peek(&tx->skbs); ieee80211_crypto_aes_cmac_256_encrypt() 1102 ieee80211_crypto_aes_gmac_encrypt(struct ieee80211_tx_data *tx) ieee80211_crypto_aes_gmac_encrypt() argument 1106 struct ieee80211_key *key = tx->key; ieee80211_crypto_aes_gmac_encrypt() 1113 if (WARN_ON(skb_queue_len(&tx->skbs) != 1)) ieee80211_crypto_aes_gmac_encrypt() 1116 skb = skb_peek(&tx->skbs); ieee80211_crypto_aes_gmac_encrypt() 1206 ieee80211_crypto_hw_encrypt(struct ieee80211_tx_data *tx) ieee80211_crypto_hw_encrypt() argument 1212 skb_queue_walk(&tx->skbs, skb) { ieee80211_crypto_hw_encrypt() 1219 if (tx->key->flags & KEY_FLAG_CIPHER_SCHEME) { ieee80211_crypto_hw_encrypt() 1220 res = ieee80211_crypto_cs_encrypt(tx, skb); ieee80211_crypto_hw_encrypt() 1226 ieee80211_tx_set_protected(tx); ieee80211_crypto_hw_encrypt()
|
H A D | wep.c | 303 static int wep_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) wep_encrypt_skb() argument 309 if (ieee80211_wep_encrypt(tx->local, skb, tx->key->conf.key, wep_encrypt_skb() 310 tx->key->conf.keylen, wep_encrypt_skb() 311 tx->key->conf.keyidx)) wep_encrypt_skb() 315 if (!ieee80211_wep_add_iv(tx->local, skb, wep_encrypt_skb() 316 tx->key->conf.keylen, wep_encrypt_skb() 317 tx->key->conf.keyidx)) wep_encrypt_skb() 325 ieee80211_crypto_wep_encrypt(struct ieee80211_tx_data *tx) ieee80211_crypto_wep_encrypt() argument 329 ieee80211_tx_set_protected(tx); ieee80211_crypto_wep_encrypt() 331 skb_queue_walk(&tx->skbs, skb) { ieee80211_crypto_wep_encrypt() 332 if (wep_encrypt_skb(tx, skb) < 0) { ieee80211_crypto_wep_encrypt() 333 I802_DEBUG_INC(tx->local->tx_handlers_drop_wep); ieee80211_crypto_wep_encrypt()
|
H A D | wep.h | 32 ieee80211_crypto_wep_encrypt(struct ieee80211_tx_data *tx);
|
/linux-4.1.27/drivers/net/wireless/ath/carl9170/ |
H A D | Makefile | 1 carl9170-objs := main.o usb.o cmd.o mac.o phy.o led.o fw.o tx.o rx.o
|
/linux-4.1.27/drivers/staging/gdm72xx/ |
H A D | gdm_sdio.c | 41 static struct sdio_tx *alloc_tx_struct(struct tx_cxt *tx) alloc_tx_struct() argument 54 t->tx_cxt = tx; alloc_tx_struct() 83 static struct sdio_tx *get_tx_struct(struct tx_cxt *tx, int *no_spc) get_tx_struct() argument 87 if (list_empty(&tx->free_list)) get_tx_struct() 90 t = list_entry(tx->free_list.prev, struct sdio_tx, list); get_tx_struct() 93 *no_spc = list_empty(&tx->free_list) ? 1 : 0; get_tx_struct() 99 static void put_tx_struct(struct tx_cxt *tx, struct sdio_tx *t) put_tx_struct() argument 101 list_add_tail(&t->list, &tx->free_list); put_tx_struct() 126 struct tx_cxt *tx = &sdev->tx; release_sdio() local 131 kfree(tx->sdu_buf); release_sdio() 133 list_for_each_entry_safe(t, t_next, &tx->free_list, list) { release_sdio() 138 list_for_each_entry_safe(t, t_next, &tx->sdu_list, list) { release_sdio() 143 list_for_each_entry_safe(t, t_next, &tx->hci_list, list) { release_sdio() 164 struct tx_cxt *tx = &sdev->tx; init_sdio() local 169 INIT_LIST_HEAD(&tx->free_list); init_sdio() 170 INIT_LIST_HEAD(&tx->sdu_list); init_sdio() 171 INIT_LIST_HEAD(&tx->hci_list); init_sdio() 173 spin_lock_init(&tx->lock); init_sdio() 175 tx->sdu_buf = kmalloc(SDU_TX_BUF_SIZE, GFP_KERNEL); init_sdio() 176 if (tx->sdu_buf == NULL) init_sdio() 180 t = alloc_tx_struct(tx); init_sdio() 185 list_add(&t->list, &tx->free_list); init_sdio() 248 static void send_sdu(struct sdio_func *func, struct tx_cxt *tx) send_sdu() argument 257 spin_lock_irqsave(&tx->lock, flags); send_sdu() 260 list_for_each_entry(t, &tx->sdu_list, list) { send_sdu() 266 memcpy(tx->sdu_buf + pos, t->buf, t->len); send_sdu() 267 memset(tx->sdu_buf + pos + t->len, 0, estlen - t->len); send_sdu() 272 hci = (struct hci_s *)(tx->sdu_buf + TYPE_A_HEADER_SIZE); send_sdu() 277 spin_unlock_irqrestore(&tx->lock, flags); send_sdu() 280 tx->sdu_buf + TYPE_A_HEADER_SIZE); send_sdu() 285 buf = tx->sdu_buf + pos - TYPE_A_HEADER_SIZE; send_sdu() 294 spin_lock_irqsave(&tx->lock, flags); send_sdu() 296 for (l = tx->sdu_list.next, i = 0; i < aggr_num; i++, l = next) { send_sdu() 306 do_gettimeofday(&tx->sdu_stamp); send_sdu() 307 spin_unlock_irqrestore(&tx->lock, flags); send_sdu() 310 static void send_hci(struct sdio_func *func, struct tx_cxt *tx, send_hci() argument 320 spin_lock_irqsave(&tx->lock, flags); send_hci() 324 spin_unlock_irqrestore(&tx->lock, flags); send_hci() 331 struct tx_cxt *tx = &sdev->tx; do_tx() local 338 spin_lock_irqsave(&tx->lock, flags); do_tx() 339 if (!tx->can_send) { do_tx() 340 spin_unlock_irqrestore(&tx->lock, flags); do_tx() 344 if (!list_empty(&tx->hci_list)) { do_tx() 345 t = list_entry(tx->hci_list.next, struct sdio_tx, list); do_tx() 348 } else if (!tx->stop_sdu_tx && !list_empty(&tx->sdu_list)) { do_tx() 350 before = &tx->sdu_stamp; do_tx() 356 spin_unlock_irqrestore(&tx->lock, flags); do_tx() 363 spin_unlock_irqrestore(&tx->lock, flags); do_tx() 367 tx->can_send = 0; do_tx() 369 spin_unlock_irqrestore(&tx->lock, flags); do_tx() 372 send_sdu(func, tx); do_tx() 374 send_hci(func, tx, t); do_tx() 381 struct tx_cxt *tx = &sdev->tx; gdm_sdio_send() local 391 spin_lock_irqsave(&tx->lock, flags); gdm_sdio_send() 395 t = get_tx_struct(tx, &no_spc); gdm_sdio_send() 398 spin_unlock_irqrestore(&tx->lock, flags); gdm_sdio_send() 401 list_add_tail(&t->list, &tx->sdu_list); gdm_sdio_send() 409 t = alloc_tx_struct(tx); gdm_sdio_send() 411 spin_unlock_irqrestore(&tx->lock, flags); gdm_sdio_send() 414 list_add_tail(&t->list, &tx->hci_list); gdm_sdio_send() 427 if (tx->can_send) gdm_sdio_send() 430 spin_unlock_irqrestore(&tx->lock, flags); gdm_sdio_send() 441 struct tx_cxt *tx = &sdev->tx; control_sdu_tx_flow() local 445 spin_lock_irqsave(&tx->lock, flags); control_sdu_tx_flow() 453 tx->stop_sdu_tx = 1; control_sdu_tx_flow() 456 tx->stop_sdu_tx = 0; control_sdu_tx_flow() 457 if (tx->can_send) control_sdu_tx_flow() 459 /* If free buffer for sdu tx doesn't exist, then tx queue control_sdu_tx_flow() 463 if (list_empty(&tx->free_list)) control_sdu_tx_flow() 468 spin_unlock_irqrestore(&tx->lock, flags); control_sdu_tx_flow() 476 struct tx_cxt *tx = &sdev->tx; gdm_sdio_irq() local 507 spin_lock_irqsave(&tx->lock, flags); gdm_sdio_irq() 508 tx->can_send = 1; gdm_sdio_irq() 510 if (!list_empty(&tx->sdu_list) || !list_empty(&tx->hci_list)) gdm_sdio_irq() 512 spin_unlock_irqrestore(&tx->lock, flags); gdm_sdio_irq()
|
H A D | gdm_usb.c | 52 static struct usb_tx *alloc_tx_struct(struct tx_cxt *tx) alloc_tx_struct() argument 68 t->tx_cxt = tx; alloc_tx_struct() 112 static struct usb_tx *get_tx_struct(struct tx_cxt *tx, int *no_spc) get_tx_struct() argument 116 if (list_empty(&tx->free_list)) { get_tx_struct() 121 t = list_entry(tx->free_list.next, struct usb_tx, list); get_tx_struct() 124 *no_spc = list_empty(&tx->free_list) ? 1 : 0; get_tx_struct() 130 static void put_tx_struct(struct tx_cxt *tx, struct usb_tx *t) put_tx_struct() argument 132 list_add_tail(&t->list, &tx->free_list); put_tx_struct() 162 struct tx_cxt *tx = &udev->tx; release_usb() local 168 spin_lock_irqsave(&tx->lock, flags); release_usb() 170 list_for_each_entry_safe(t, t_next, &tx->sdu_list, list) { release_usb() 175 list_for_each_entry_safe(t, t_next, &tx->hci_list, list) { release_usb() 180 list_for_each_entry_safe(t, t_next, &tx->free_list, list) { release_usb() 185 spin_unlock_irqrestore(&tx->lock, flags); release_usb() 205 struct tx_cxt *tx = &udev->tx; init_usb() local 211 INIT_LIST_HEAD(&tx->free_list); init_usb() 212 INIT_LIST_HEAD(&tx->sdu_list); init_usb() 213 INIT_LIST_HEAD(&tx->hci_list); init_usb() 215 INIT_LIST_HEAD(&tx->pending_list); init_usb() 221 spin_lock_init(&tx->lock); init_usb() 224 spin_lock_irqsave(&tx->lock, flags); init_usb() 226 t = alloc_tx_struct(tx); init_usb() 228 spin_unlock_irqrestore(&tx->lock, flags); init_usb() 232 list_add(&t->list, &tx->free_list); init_usb() 234 spin_unlock_irqrestore(&tx->lock, flags); init_usb() 255 struct tx_cxt *tx = t->tx_cxt; __gdm_usb_send_complete() local 271 put_tx_struct(tx, t); __gdm_usb_send_complete() 279 struct tx_cxt *tx = t->tx_cxt; gdm_usb_send_complete() local 282 spin_lock_irqsave(&tx->lock, flags); gdm_usb_send_complete() 284 spin_unlock_irqrestore(&tx->lock, flags); gdm_usb_send_complete() 292 struct tx_cxt *tx = &udev->tx; gdm_usb_send() local 311 spin_lock_irqsave(&tx->lock, flags); gdm_usb_send() 315 t = get_tx_struct(tx, &no_spc); gdm_usb_send() 318 spin_unlock_irqrestore(&tx->lock, flags); gdm_usb_send() 321 list_add_tail(&t->list, &tx->sdu_list); gdm_usb_send() 323 t = alloc_tx_struct(tx); gdm_usb_send() 325 spin_unlock_irqrestore(&tx->lock, flags); gdm_usb_send() 328 list_add_tail(&t->list, &tx->hci_list); gdm_usb_send() 348 list_add_tail(&t->p_list, &tx->pending_list); gdm_usb_send() 356 list_add_tail(&t->p_list, &tx->pending_list); gdm_usb_send() 390 spin_unlock_irqrestore(&tx->lock, flags); gdm_usb_send() 400 spin_unlock_irqrestore(&tx->lock, flags); gdm_usb_send() 409 struct tx_cxt *tx = &udev->tx; gdm_usb_rcv_complete() local 419 spin_lock_irqsave(&tx->lock, flags); gdm_usb_rcv_complete() 430 list_for_each_entry(t, &tx->sdu_list, list) gdm_usb_rcv_complete() 434 list_for_each_entry(t, &tx->sdu_list, list) { gdm_usb_rcv_complete() 437 /* If free buffer for sdu tx doesn't gdm_usb_rcv_complete() 438 * exist, then tx queue should not be gdm_usb_rcv_complete() 442 if (list_empty(&tx->free_list)) gdm_usb_rcv_complete() 455 spin_unlock_irqrestore(&tx->lock, flags); gdm_usb_rcv_complete() 497 struct tx_cxt *tx = &udev->tx; do_pm_control() local 505 spin_lock_irqsave(&tx->lock, flags); do_pm_control() 507 (!list_empty(&tx->hci_list) || !list_empty(&tx->sdu_list))) { do_pm_control() 510 list_for_each_entry_safe(t, temp, &tx->pending_list, p_list) { do_pm_control() 520 spin_unlock_irqrestore(&tx->lock, flags); do_pm_control() 698 struct tx_cxt *tx; k_mode_thread() local 709 tx = &udev->tx; k_mode_thread() 726 spin_lock_irqsave(&tx->lock, flags); k_mode_thread() 728 list_for_each_entry_safe(t, temp, &tx->pending_list, k_mode_thread() 740 spin_unlock_irqrestore(&tx->lock, flags); k_mode_thread()
|
/linux-4.1.27/drivers/staging/lustre/lnet/klnds/socklnd/ |
H A D | socklnd_proto.c | 55 ksock_tx_t *tx = conn->ksnc_tx_carrier; ksocknal_next_tx_carrier() local 59 LASSERT(tx != NULL); ksocknal_next_tx_carrier() 62 if (tx->tx_list.next == &conn->ksnc_tx_queue) { ksocknal_next_tx_carrier() 66 conn->ksnc_tx_carrier = list_entry(tx->tx_list.next, ksocknal_next_tx_carrier() 68 LASSERT(conn->ksnc_tx_carrier->tx_msg.ksm_type == tx->tx_msg.ksm_type); ksocknal_next_tx_carrier() 76 ksock_tx_t *tx = conn->ksnc_tx_carrier; ksocknal_queue_tx_zcack_v2() local 83 * . no tx can piggyback cookie of tx_ack (or cookie), just ksocknal_queue_tx_zcack_v2() 85 * . There is tx can piggyback cookie of tx_ack (or cookie), ksocknal_queue_tx_zcack_v2() 86 * piggyback the cookie and return the tx. ksocknal_queue_tx_zcack_v2() 88 if (tx == NULL) { ksocknal_queue_tx_zcack_v2() 97 if (tx->tx_msg.ksm_type == KSOCK_MSG_NOOP) { ksocknal_queue_tx_zcack_v2() 98 /* tx is noop zc-ack, can't piggyback zc-ack cookie */ ksocknal_queue_tx_zcack_v2() 105 LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_LNET); ksocknal_queue_tx_zcack_v2() 106 LASSERT(tx->tx_msg.ksm_zc_cookies[1] == 0); ksocknal_queue_tx_zcack_v2() 112 tx->tx_msg.ksm_zc_cookies[1] = cookie; ksocknal_queue_tx_zcack_v2() 122 ksock_tx_t *tx = conn->ksnc_tx_carrier; ksocknal_queue_tx_msg_v2() local 129 * and replace the NOOP tx, and return the NOOP tx. ksocknal_queue_tx_msg_v2() 131 if (tx == NULL) { /* nothing on queue */ ksocknal_queue_tx_msg_v2() 137 if (tx->tx_msg.ksm_type == KSOCK_MSG_LNET) { /* nothing to carry */ ksocknal_queue_tx_msg_v2() 142 LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_NOOP); ksocknal_queue_tx_msg_v2() 145 tx_msg->tx_msg.ksm_zc_cookies[1] = tx->tx_msg.ksm_zc_cookies[1]; ksocknal_queue_tx_msg_v2() 149 list_add(&tx_msg->tx_list, &tx->tx_list); ksocknal_queue_tx_msg_v2() 150 list_del(&tx->tx_list); ksocknal_queue_tx_msg_v2() 152 return tx; ksocknal_queue_tx_msg_v2() 159 ksock_tx_t *tx; ksocknal_queue_tx_zcack_v3() local 168 tx = conn->ksnc_tx_carrier; ksocknal_queue_tx_zcack_v3() 169 if (tx == NULL) { ksocknal_queue_tx_zcack_v3() 186 if (tx->tx_msg.ksm_zc_cookies[1] == SOCKNAL_KEEPALIVE_PING) { ksocknal_queue_tx_zcack_v3() 188 LASSERT(tx->tx_msg.ksm_zc_cookies[0] == 0); ksocknal_queue_tx_zcack_v3() 189 tx->tx_msg.ksm_zc_cookies[1] = cookie; ksocknal_queue_tx_zcack_v3() 193 if (cookie == tx->tx_msg.ksm_zc_cookies[0] || ksocknal_queue_tx_zcack_v3() 194 cookie == tx->tx_msg.ksm_zc_cookies[1]) { ksocknal_queue_tx_zcack_v3() 200 if (tx->tx_msg.ksm_zc_cookies[0] == 0) { ksocknal_queue_tx_zcack_v3() 201 /* NOOP tx has only one ZC-ACK cookie, can carry at least one more */ ksocknal_queue_tx_zcack_v3() 202 if (tx->tx_msg.ksm_zc_cookies[1] > cookie) { ksocknal_queue_tx_zcack_v3() 203 tx->tx_msg.ksm_zc_cookies[0] = tx->tx_msg.ksm_zc_cookies[1]; ksocknal_queue_tx_zcack_v3() 204 tx->tx_msg.ksm_zc_cookies[1] = cookie; ksocknal_queue_tx_zcack_v3() 206 tx->tx_msg.ksm_zc_cookies[0] = cookie; ksocknal_queue_tx_zcack_v3() 209 if (tx->tx_msg.ksm_zc_cookies[0] - tx->tx_msg.ksm_zc_cookies[1] > 2) { ksocknal_queue_tx_zcack_v3() 219 if (tx->tx_msg.ksm_zc_cookies[0] > tx->tx_msg.ksm_zc_cookies[1]) { ksocknal_queue_tx_zcack_v3() 223 LASSERT(tx->tx_msg.ksm_zc_cookies[0] - ksocknal_queue_tx_zcack_v3() 224 tx->tx_msg.ksm_zc_cookies[1] <= 2); ksocknal_queue_tx_zcack_v3() 226 if (tx->tx_msg.ksm_zc_cookies[0] - ksocknal_queue_tx_zcack_v3() 227 tx->tx_msg.ksm_zc_cookies[1] == 2) { ksocknal_queue_tx_zcack_v3() 228 if (cookie == tx->tx_msg.ksm_zc_cookies[1] + 1) ksocknal_queue_tx_zcack_v3() 230 } else if (cookie == tx->tx_msg.ksm_zc_cookies[1] - 1) { ksocknal_queue_tx_zcack_v3() 231 tmp = tx->tx_msg.ksm_zc_cookies[1]; ksocknal_queue_tx_zcack_v3() 232 } else if (cookie == tx->tx_msg.ksm_zc_cookies[0] + 1) { ksocknal_queue_tx_zcack_v3() 233 tmp = tx->tx_msg.ksm_zc_cookies[0]; ksocknal_queue_tx_zcack_v3() 238 tx->tx_msg.ksm_zc_cookies[0] = tmp - 1; ksocknal_queue_tx_zcack_v3() 239 tx->tx_msg.ksm_zc_cookies[1] = tmp + 1; ksocknal_queue_tx_zcack_v3() 245 if (cookie >= tx->tx_msg.ksm_zc_cookies[0] && ksocknal_queue_tx_zcack_v3() 246 cookie <= tx->tx_msg.ksm_zc_cookies[1]) { ksocknal_queue_tx_zcack_v3() 252 if (cookie == tx->tx_msg.ksm_zc_cookies[1] + 1) { ksocknal_queue_tx_zcack_v3() 253 tx->tx_msg.ksm_zc_cookies[1] = cookie; ksocknal_queue_tx_zcack_v3() 257 if (cookie == tx->tx_msg.ksm_zc_cookies[0] - 1) { ksocknal_queue_tx_zcack_v3() 258 tx->tx_msg.ksm_zc_cookies[0] = cookie; ksocknal_queue_tx_zcack_v3() 266 /* the next tx can piggyback at least 1 ACK */ ksocknal_queue_tx_zcack_v3() 274 ksocknal_match_tx(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk) ksocknal_match_tx() argument 283 if (tx == NULL || tx->tx_lnetmsg == NULL) { ksocknal_match_tx() 287 nob = tx->tx_lnetmsg->msg_len + ksocknal_match_tx() 318 ksocknal_match_tx_v3(ksock_conn_t *conn, ksock_tx_t *tx, int nonblk) ksocknal_match_tx_v3() argument 322 if (tx == NULL || tx->tx_lnetmsg == NULL) ksocknal_match_tx_v3() 325 nob = tx->tx_lnetmsg->msg_len + sizeof(ksock_msg_t); ksocknal_match_tx_v3() 337 else if (tx == NULL || tx->tx_lnetmsg == NULL) ksocknal_match_tx_v3() 366 ksock_tx_t *tx; ksocknal_handle_zcreq() local 392 tx = ksocknal_alloc_tx_noop(cookie, !!remote); ksocknal_handle_zcreq() 393 if (tx == NULL) ksocknal_handle_zcreq() 396 rc = ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id); ksocknal_handle_zcreq() 400 ksocknal_free_tx(tx); ksocknal_handle_zcreq() 409 ksock_tx_t *tx; ksocknal_handle_zcack() local 427 list_for_each_entry_safe(tx, tmp, ksocknal_handle_zcack() 429 __u64 c = tx->tx_msg.ksm_zc_cookies[0]; ksocknal_handle_zcack() 432 tx->tx_msg.ksm_zc_cookies[0] = 0; ksocknal_handle_zcack() 433 list_del(&tx->tx_zc_list); ksocknal_handle_zcack() 434 list_add(&tx->tx_zc_list, &zlist); ksocknal_handle_zcack() 444 tx = list_entry(zlist.next, ksock_tx_t, tx_zc_list); ksocknal_handle_zcack() 445 list_del(&tx->tx_zc_list); ksocknal_handle_zcack() 446 ksocknal_tx_decref(tx); ksocknal_handle_zcack() 714 ksocknal_pack_msg_v1(ksock_tx_t *tx) ksocknal_pack_msg_v1() argument 717 LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP); ksocknal_pack_msg_v1() 718 LASSERT(tx->tx_lnetmsg != NULL); ksocknal_pack_msg_v1() 720 tx->tx_iov[0].iov_base = &tx->tx_lnetmsg->msg_hdr; ksocknal_pack_msg_v1() 721 tx->tx_iov[0].iov_len = sizeof(lnet_hdr_t); ksocknal_pack_msg_v1() 723 tx->tx_resid = tx->tx_nob = tx->tx_lnetmsg->msg_len + sizeof(lnet_hdr_t); ksocknal_pack_msg_v1() 727 ksocknal_pack_msg_v2(ksock_tx_t *tx) ksocknal_pack_msg_v2() argument 729 tx->tx_iov[0].iov_base = &tx->tx_msg; ksocknal_pack_msg_v2() 731 if (tx->tx_lnetmsg != NULL) { ksocknal_pack_msg_v2() 732 LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP); ksocknal_pack_msg_v2() 734 tx->tx_msg.ksm_u.lnetmsg.ksnm_hdr = tx->tx_lnetmsg->msg_hdr; ksocknal_pack_msg_v2() 735 tx->tx_iov[0].iov_len = sizeof(ksock_msg_t); ksocknal_pack_msg_v2() 736 tx->tx_resid = tx->tx_nob = sizeof(ksock_msg_t) + tx->tx_lnetmsg->msg_len; ksocknal_pack_msg_v2() 738 LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_NOOP); ksocknal_pack_msg_v2() 740 tx->tx_iov[0].iov_len = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr); ksocknal_pack_msg_v2() 741 tx->tx_resid = tx->tx_nob = offsetof(ksock_msg_t, ksm_u.lnetmsg.ksnm_hdr); ksocknal_pack_msg_v2()
|
H A D | socklnd_cb.c | 32 ksock_tx_t *tx = NULL; ksocknal_alloc_tx() local 37 /* searching for a noop tx in free list */ ksocknal_alloc_tx() 41 tx = list_entry(ksocknal_data.ksnd_idle_noop_txs. \ ksocknal_alloc_tx() 43 LASSERT(tx->tx_desc_size == size); ksocknal_alloc_tx() 44 list_del(&tx->tx_list); ksocknal_alloc_tx() 50 if (tx == NULL) ksocknal_alloc_tx() 51 LIBCFS_ALLOC(tx, size); ksocknal_alloc_tx() 53 if (tx == NULL) ksocknal_alloc_tx() 56 atomic_set(&tx->tx_refcount, 1); ksocknal_alloc_tx() 57 tx->tx_zc_aborted = 0; ksocknal_alloc_tx() 58 tx->tx_zc_capable = 0; ksocknal_alloc_tx() 59 tx->tx_zc_checked = 0; ksocknal_alloc_tx() 60 tx->tx_desc_size = size; ksocknal_alloc_tx() 64 return tx; ksocknal_alloc_tx() 70 ksock_tx_t *tx; ksocknal_alloc_tx_noop() local 72 tx = ksocknal_alloc_tx(KSOCK_MSG_NOOP, KSOCK_NOOP_TX_SIZE); ksocknal_alloc_tx_noop() 73 if (tx == NULL) { ksocknal_alloc_tx_noop() 74 CERROR("Can't allocate noop tx desc\n"); ksocknal_alloc_tx_noop() 78 tx->tx_conn = NULL; ksocknal_alloc_tx_noop() 79 tx->tx_lnetmsg = NULL; ksocknal_alloc_tx_noop() 80 tx->tx_kiov = NULL; ksocknal_alloc_tx_noop() 81 tx->tx_nkiov = 0; ksocknal_alloc_tx_noop() 82 tx->tx_iov = tx->tx_frags.virt.iov; ksocknal_alloc_tx_noop() 83 tx->tx_niov = 1; ksocknal_alloc_tx_noop() 84 tx->tx_nonblk = nonblk; ksocknal_alloc_tx_noop() 86 socklnd_init_msg(&tx->tx_msg, KSOCK_MSG_NOOP); ksocknal_alloc_tx_noop() 87 tx->tx_msg.ksm_zc_cookies[1] = cookie; ksocknal_alloc_tx_noop() 89 return tx; ksocknal_alloc_tx_noop() 94 ksocknal_free_tx (ksock_tx_t *tx) ksocknal_free_tx() argument 98 if (tx->tx_lnetmsg == NULL && tx->tx_desc_size == KSOCK_NOOP_TX_SIZE) { ksocknal_free_tx() 99 /* it's a noop tx */ ksocknal_free_tx() 102 list_add(&tx->tx_list, &ksocknal_data.ksnd_idle_noop_txs); ksocknal_free_tx() 106 LIBCFS_FREE(tx, tx->tx_desc_size); ksocknal_free_tx() 111 ksocknal_send_iov (ksock_conn_t *conn, ksock_tx_t *tx) ksocknal_send_iov() argument 113 struct kvec *iov = tx->tx_iov; ksocknal_send_iov() 117 LASSERT (tx->tx_niov > 0); ksocknal_send_iov() 119 /* Never touch tx->tx_iov inside ksocknal_lib_send_iov() */ ksocknal_send_iov() 120 rc = ksocknal_lib_send_iov(conn, tx); ksocknal_send_iov() 126 LASSERT (nob <= tx->tx_resid); ksocknal_send_iov() 127 tx->tx_resid -= nob; ksocknal_send_iov() 131 LASSERT (tx->tx_niov > 0); ksocknal_send_iov() 140 tx->tx_iov = ++iov; ksocknal_send_iov() 141 tx->tx_niov--; ksocknal_send_iov() 148 ksocknal_send_kiov (ksock_conn_t *conn, ksock_tx_t *tx) ksocknal_send_kiov() argument 150 lnet_kiov_t *kiov = tx->tx_kiov; ksocknal_send_kiov() 154 LASSERT (tx->tx_niov == 0); ksocknal_send_kiov() 155 LASSERT (tx->tx_nkiov > 0); ksocknal_send_kiov() 157 /* Never touch tx->tx_kiov inside ksocknal_lib_send_kiov() */ ksocknal_send_kiov() 158 rc = ksocknal_lib_send_kiov(conn, tx); ksocknal_send_kiov() 164 LASSERT (nob <= tx->tx_resid); ksocknal_send_kiov() 165 tx->tx_resid -= nob; ksocknal_send_kiov() 169 LASSERT(tx->tx_nkiov > 0); ksocknal_send_kiov() 178 tx->tx_kiov = ++kiov; ksocknal_send_kiov() 179 tx->tx_nkiov--; ksocknal_send_kiov() 186 ksocknal_transmit (ksock_conn_t *conn, ksock_tx_t *tx) ksocknal_transmit() argument 196 LASSERT (tx->tx_resid != 0); ksocknal_transmit() 209 } else if (tx->tx_niov != 0) { ksocknal_transmit() 210 rc = ksocknal_send_iov (conn, tx); ksocknal_transmit() 212 rc = ksocknal_send_kiov (conn, tx); ksocknal_transmit() 245 } while (tx->tx_resid != 0); ksocknal_transmit() 389 ksocknal_tx_done (lnet_ni_t *ni, ksock_tx_t *tx) ksocknal_tx_done() argument 391 lnet_msg_t *lnetmsg = tx->tx_lnetmsg; ksocknal_tx_done() 392 int rc = (tx->tx_resid == 0 && !tx->tx_zc_aborted) ? 0 : -EIO; ksocknal_tx_done() 394 LASSERT(ni != NULL || tx->tx_conn != NULL); ksocknal_tx_done() 396 if (tx->tx_conn != NULL) ksocknal_tx_done() 397 ksocknal_conn_decref(tx->tx_conn); ksocknal_tx_done() 399 if (ni == NULL && tx->tx_conn != NULL) ksocknal_tx_done() 400 ni = tx->tx_conn->ksnc_peer->ksnp_ni; ksocknal_tx_done() 402 ksocknal_free_tx (tx); ksocknal_tx_done() 410 ksock_tx_t *tx; ksocknal_txlist_done() local 413 tx = list_entry (txlist->next, ksock_tx_t, tx_list); ksocknal_txlist_done() 415 if (error && tx->tx_lnetmsg != NULL) { ksocknal_txlist_done() 417 le32_to_cpu (tx->tx_lnetmsg->msg_hdr.type), ksocknal_txlist_done() 418 le32_to_cpu (tx->tx_lnetmsg->msg_hdr.payload_length), ksocknal_txlist_done() 419 libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.src_nid)), ksocknal_txlist_done() 420 libcfs_nid2str(le64_to_cpu(tx->tx_lnetmsg->msg_hdr.dest_nid))); ksocknal_txlist_done() 425 list_del (&tx->tx_list); ksocknal_txlist_done() 427 LASSERT (atomic_read(&tx->tx_refcount) == 1); ksocknal_txlist_done() 428 ksocknal_tx_done (ni, tx); ksocknal_txlist_done() 433 ksocknal_check_zc_req(ksock_tx_t *tx) ksocknal_check_zc_req() argument 435 ksock_conn_t *conn = tx->tx_conn; ksocknal_check_zc_req() 438 /* Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx ksocknal_check_zc_req() 442 * tx_msg.ksm_zc_cookies[0] remains non-zero while tx is on ksocknal_check_zc_req() 444 LASSERT (tx->tx_msg.ksm_type != KSOCK_MSG_NOOP); ksocknal_check_zc_req() 445 LASSERT (tx->tx_zc_capable); ksocknal_check_zc_req() 447 tx->tx_zc_checked = 1; ksocknal_check_zc_req() 453 /* assign cookie and queue tx to pending list, it will be released when ksocknal_check_zc_req() 456 ksocknal_tx_addref(tx); ksocknal_check_zc_req() 461 tx->tx_deadline = ksocknal_check_zc_req() 464 LASSERT (tx->tx_msg.ksm_zc_cookies[0] == 0); ksocknal_check_zc_req() 466 tx->tx_msg.ksm_zc_cookies[0] = peer->ksnp_zc_next_cookie++; ksocknal_check_zc_req() 471 list_add_tail(&tx->tx_zc_list, &peer->ksnp_zc_req_list); ksocknal_check_zc_req() 477 ksocknal_uncheck_zc_req(ksock_tx_t *tx) ksocknal_uncheck_zc_req() argument 479 ksock_peer_t *peer = tx->tx_conn->ksnc_peer; ksocknal_uncheck_zc_req() 481 LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP); ksocknal_uncheck_zc_req() 482 LASSERT(tx->tx_zc_capable); ksocknal_uncheck_zc_req() 484 tx->tx_zc_checked = 0; ksocknal_uncheck_zc_req() 488 if (tx->tx_msg.ksm_zc_cookies[0] == 0) { ksocknal_uncheck_zc_req() 494 tx->tx_msg.ksm_zc_cookies[0] = 0; ksocknal_uncheck_zc_req() 495 list_del(&tx->tx_zc_list); ksocknal_uncheck_zc_req() 499 ksocknal_tx_decref(tx); ksocknal_uncheck_zc_req() 503 ksocknal_process_transmit (ksock_conn_t *conn, ksock_tx_t *tx) ksocknal_process_transmit() argument 507 if (tx->tx_zc_capable && !tx->tx_zc_checked) ksocknal_process_transmit() 508 ksocknal_check_zc_req(tx); ksocknal_process_transmit() 510 rc = ksocknal_transmit (conn, tx); ksocknal_process_transmit() 512 CDEBUG (D_NET, "send(%d) %d\n", tx->tx_resid, rc); ksocknal_process_transmit() 514 if (tx->tx_resid == 0) { ksocknal_process_transmit() 529 CWARN("%u ENOMEM tx %p (%u allocated)\n", ksocknal_process_transmit() 569 if (tx->tx_zc_checked) ksocknal_process_transmit() 570 ksocknal_uncheck_zc_req(tx); ksocknal_process_transmit() 618 ksocknal_find_conn_locked(ksock_peer_t *peer, ksock_tx_t *tx, int nonblk) ksocknal_find_conn_locked() argument 637 rc = c->ksnc_proto->pro_match_tx(c, tx, nonblk); ksocknal_find_conn_locked() 642 case SOCKNAL_MATCH_NO: /* protocol rejected the tx */ ksocknal_find_conn_locked() 675 ksocknal_tx_prep(ksock_conn_t *conn, ksock_tx_t *tx) ksocknal_tx_prep() argument 677 conn->ksnc_proto->pro_pack(tx); ksocknal_tx_prep() 679 atomic_add (tx->tx_nob, &conn->ksnc_tx_nob); ksocknal_tx_prep() 680 ksocknal_conn_addref(conn); /* +1 ref for tx */ ksocknal_tx_prep() 681 tx->tx_conn = conn; ksocknal_tx_prep() 685 ksocknal_queue_tx_locked (ksock_tx_t *tx, ksock_conn_t *conn) ksocknal_queue_tx_locked() argument 688 ksock_msg_t *msg = &tx->tx_msg; ksocknal_queue_tx_locked() 703 ksocknal_tx_prep(conn, tx); ksocknal_queue_tx_locked() 711 LASSERT (lnet_iov_nob (tx->tx_niov, tx->tx_iov) + ksocknal_queue_tx_locked() 712 lnet_kiov_nob(tx->tx_nkiov, tx->tx_kiov) == ksocknal_queue_tx_locked() 713 (unsigned int)tx->tx_nob); ksocknal_queue_tx_locked() 714 LASSERT (tx->tx_niov >= 1); ksocknal_queue_tx_locked() 715 LASSERT (tx->tx_resid == tx->tx_nob); ksocknal_queue_tx_locked() 718 tx, (tx->tx_lnetmsg != NULL) ? tx->tx_lnetmsg->msg_hdr.type: ksocknal_queue_tx_locked() 720 tx->tx_nob, tx->tx_niov, tx->tx_nkiov); ksocknal_queue_tx_locked() 745 if (conn->ksnc_proto->pro_queue_tx_zcack(conn, tx, 0)) ksocknal_queue_tx_locked() 746 ztx = tx; /* ZC ACK piggybacked on ztx release tx later */ ksocknal_queue_tx_locked() 754 ztx = conn->ksnc_proto->pro_queue_tx_msg(conn, tx); ksocknal_queue_tx_locked() 832 ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id) ksocknal_launch_packet() argument 840 LASSERT (tx->tx_conn == NULL); ksocknal_launch_packet() 849 conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk); ksocknal_launch_packet() 854 ksocknal_queue_tx_locked (tx, conn); ksocknal_launch_packet() 895 conn = ksocknal_find_conn_locked(peer, tx, tx->tx_nonblk); ksocknal_launch_packet() 898 ksocknal_queue_tx_locked (tx, conn); ksocknal_launch_packet() 906 tx->tx_deadline = ksocknal_launch_packet() 910 list_add_tail (&tx->tx_list, &peer->ksnp_tx_queue); ksocknal_launch_packet() 933 ksock_tx_t *tx; ksocknal_send() local 958 tx = ksocknal_alloc_tx(KSOCK_MSG_LNET, desc_size); ksocknal_send() 959 if (tx == NULL) { ksocknal_send() 960 CERROR("Can't allocate tx desc type %d size %d\n", ksocknal_send() 967 tx->tx_conn = NULL; /* set when assigned a conn */ ksocknal_send() 968 tx->tx_lnetmsg = lntmsg; ksocknal_send() 971 tx->tx_kiov = NULL; ksocknal_send() 972 tx->tx_nkiov = 0; ksocknal_send() 973 tx->tx_iov = tx->tx_frags.virt.iov; ksocknal_send() 974 tx->tx_niov = 1 + ksocknal_send() 975 lnet_extract_iov(payload_niov, &tx->tx_iov[1], ksocknal_send() 979 tx->tx_niov = 1; ksocknal_send() 980 tx->tx_iov = &tx->tx_frags.paged.iov; ksocknal_send() 981 tx->tx_kiov = tx->tx_frags.paged.kiov; ksocknal_send() 982 tx->tx_nkiov = lnet_extract_kiov(payload_niov, tx->tx_kiov, ksocknal_send() 987 tx->tx_zc_capable = 1; ksocknal_send() 990 socklnd_init_msg(&tx->tx_msg, KSOCK_MSG_LNET); ksocknal_send() 993 rc = ksocknal_launch_packet(ni, tx, target); ksocknal_send() 1000 ksocknal_free_tx(tx); ksocknal_send() 1390 ksock_tx_t *tx; ksocknal_scheduler() local 1474 tx = list_entry(conn->ksnc_tx_queue.next, ksocknal_scheduler() 1477 if (conn->ksnc_tx_carrier == tx) ksocknal_scheduler() 1481 list_del(&tx->tx_list); ksocknal_scheduler() 1496 rc = ksocknal_process_transmit(conn, tx); ksocknal_scheduler() 1499 /* Incomplete send: replace tx on HEAD of tx_queue */ ksocknal_scheduler() 1501 list_add(&tx->tx_list, ksocknal_scheduler() 1504 /* Complete send; tx -ref */ ksocknal_scheduler() 1505 ksocknal_tx_decref(tx); ksocknal_scheduler() 1517 /* reschedule for tx */ ksocknal_scheduler() 2316 ksock_tx_t *tx; ksocknal_flush_stale_txs() local 2322 tx = list_entry (peer->ksnp_tx_queue.next, ksocknal_flush_stale_txs() 2326 tx->tx_deadline)) ksocknal_flush_stale_txs() 2329 list_del (&tx->tx_list); ksocknal_flush_stale_txs() 2330 list_add_tail (&tx->tx_list, &stale_txs); ksocknal_flush_stale_txs() 2343 ksock_tx_t *tx; ksocknal_send_keepalive_locked() local 2381 tx = ksocknal_alloc_tx_noop(1, 1); ksocknal_send_keepalive_locked() 2382 if (tx == NULL) { ksocknal_send_keepalive_locked() 2387 if (ksocknal_launch_packet(peer->ksnp_ni, tx, peer->ksnp_id) == 0) { ksocknal_send_keepalive_locked() 2392 ksocknal_free_tx(tx); ksocknal_send_keepalive_locked() 2405 ksock_tx_t *tx; ksocknal_check_peer_timeouts() local 2440 ksock_tx_t *tx = list_for_each_entry() local 2445 tx->tx_deadline)) { list_for_each_entry() 2461 list_for_each_entry(tx, &peer->ksnp_zc_req_list, tx_zc_list) { list_for_each_entry() 2463 tx->tx_deadline)) list_for_each_entry() 2466 if (tx->tx_conn->ksnc_closing) list_for_each_entry() 2476 tx = list_entry(peer->ksnp_zc_req_list.next, list_for_each_entry() 2478 deadline = tx->tx_deadline; list_for_each_entry() 2479 resid = tx->tx_resid; list_for_each_entry() 2480 conn = tx->tx_conn; list_for_each_entry() 2487 n, libcfs_nid2str(peer->ksnp_id.nid), tx, list_for_each_entry()
|
H A D | socklnd_lib-linux.c | 78 ksocknal_lib_send_iov(ksock_conn_t *conn, ksock_tx_t *tx) ksocknal_lib_send_iov() argument 86 tx->tx_nob == tx->tx_resid && /* frist sending */ ksocknal_lib_send_iov() 87 tx->tx_msg.ksm_csum == 0) /* not checksummed */ ksocknal_lib_send_iov() 88 ksocknal_lib_csum_tx(tx); ksocknal_lib_send_iov() 100 unsigned int niov = tx->tx_niov; ksocknal_lib_send_iov() 106 scratchiov[i] = tx->tx_iov[i]; ksocknal_lib_send_iov() 111 nob < tx->tx_resid) ksocknal_lib_send_iov() 120 ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx) ksocknal_lib_send_kiov() argument 123 lnet_kiov_t *kiov = tx->tx_kiov; ksocknal_lib_send_kiov() 128 LASSERT(tx->tx_lnetmsg != NULL); ksocknal_lib_send_kiov() 132 if (tx->tx_msg.ksm_zc_cookies[0] != 0) { ksocknal_lib_send_kiov() 144 fragsize < tx->tx_resid) ksocknal_lib_send_kiov() 164 unsigned int niov = tx->tx_nkiov; ksocknal_lib_send_kiov() 176 nob < tx->tx_resid) ksocknal_lib_send_kiov() 387 ksocknal_lib_csum_tx(ksock_tx_t *tx) ksocknal_lib_csum_tx() argument 393 LASSERT(tx->tx_iov[0].iov_base == &tx->tx_msg); ksocknal_lib_csum_tx() 394 LASSERT(tx->tx_conn != NULL); ksocknal_lib_csum_tx() 395 LASSERT(tx->tx_conn->ksnc_proto == &ksocknal_protocol_v2x); ksocknal_lib_csum_tx() 397 tx->tx_msg.ksm_csum = 0; ksocknal_lib_csum_tx() 399 csum = ksocknal_csum(~0, tx->tx_iov[0].iov_base, ksocknal_lib_csum_tx() 400 tx->tx_iov[0].iov_len); ksocknal_lib_csum_tx() 402 if (tx->tx_kiov != NULL) { ksocknal_lib_csum_tx() 403 for (i = 0; i < tx->tx_nkiov; i++) { ksocknal_lib_csum_tx() 404 base = kmap(tx->tx_kiov[i].kiov_page) + ksocknal_lib_csum_tx() 405 tx->tx_kiov[i].kiov_offset; ksocknal_lib_csum_tx() 407 csum = ksocknal_csum(csum, base, tx->tx_kiov[i].kiov_len); ksocknal_lib_csum_tx() 409 kunmap(tx->tx_kiov[i].kiov_page); ksocknal_lib_csum_tx() 412 for (i = 1; i < tx->tx_niov; i++) ksocknal_lib_csum_tx() 413 csum = ksocknal_csum(csum, tx->tx_iov[i].iov_base, ksocknal_lib_csum_tx() 414 tx->tx_iov[i].iov_len); ksocknal_lib_csum_tx() 422 tx->tx_msg.ksm_csum = csum; ksocknal_lib_csum_tx() 505 CERROR("Can't set buffer tx %d, rx %d buffers: %d\n", ksocknal_lib_setup_sock()
|
H A D | socklnd.h | 65 /* zombie noop tx list */ 108 int *ksnd_tx_buffer_size; /* socket tx buffer size */ 185 struct list_head ksnd_idle_noop_txs; /* list head for freed noop tx */ 212 atomic_t tx_refcount; /* tx reference count */ 225 unsigned long tx_deadline; /* when (in jiffies) tx times out */ 304 unsigned long ksnc_tx_deadline; /* when (in jiffies) tx times out */ 374 ksock_tx_t *(*pro_queue_tx_msg)(ksock_conn_t *, ksock_tx_t *); /* queue tx on the connection */ 463 ksocknal_tx_addref(ksock_tx_t *tx) ksocknal_tx_addref() argument 465 LASSERT(atomic_read(&tx->tx_refcount) > 0); ksocknal_tx_addref() 466 atomic_inc(&tx->tx_refcount); ksocknal_tx_addref() 469 extern void ksocknal_tx_prep(ksock_conn_t *, ksock_tx_t *tx); 470 extern void ksocknal_tx_done(lnet_ni_t *ni, ksock_tx_t *tx); 473 ksocknal_tx_decref(ksock_tx_t *tx) ksocknal_tx_decref() argument 475 LASSERT(atomic_read(&tx->tx_refcount) > 0); ksocknal_tx_decref() 476 if (atomic_dec_and_test(&tx->tx_refcount)) ksocknal_tx_decref() 477 ksocknal_tx_done(NULL, tx); ksocknal_tx_decref() 538 ksock_tx_t *tx, int nonblk); 540 extern int ksocknal_launch_packet(lnet_ni_t *ni, ksock_tx_t *tx, 543 extern void ksocknal_free_tx(ksock_tx_t *tx); 546 extern void ksocknal_queue_tx_locked(ksock_tx_t *tx, ksock_conn_t *conn); 575 extern int ksocknal_lib_send_iov(ksock_conn_t *conn, ksock_tx_t *tx); 576 extern int ksocknal_lib_send_kiov(ksock_conn_t *conn, ksock_tx_t *tx); 585 extern void ksocknal_lib_csum_tx(ksock_tx_t *tx);
|
/linux-4.1.27/drivers/net/ethernet/pasemi/ |
H A D | pasemi_mac_ethtool.c | 48 { "tx-bytes" }, 49 { "tx-packets" }, 50 { "tx-broadcast-packets" }, 51 { "tx-multicast-packets" }, 52 { "tx-collisions" }, 53 { "tx-late-collisions" }, 54 { "tx-excessive-collisions" }, 55 { "tx-crc-errors" }, 56 { "tx-undersize-errors" }, 57 { "tx-oversize-errors" }, 58 { "tx-64-byte-packets" }, 59 { "tx-65-127-byte-packets" }, 60 { "tx-128-255-byte-packets" }, 61 { "tx-256-511-byte-packets" }, 62 { "tx-512-1023-byte-packets" }, 63 { "tx-1024-1518-byte-packets" }, 115 ering->tx_pending = RING_USED(mac->tx)/2; pasemi_mac_ethtool_get_ringparam()
|
H A D | pasemi_mac.h | 92 struct pasemi_mac_txring *tx; member in struct:pasemi_mac 95 char tx_irq_name[10]; /* "eth%d tx" */ 110 #define TX_DESC(tx, num) ((tx)->chan.ring_virt[(num) & (TX_RING_SIZE-1)]) 111 #define TX_DESC_INFO(tx, num) ((tx)->ring_info[(num) & (TX_RING_SIZE-1)])
|
H A D | pasemi_mac.c | 127 return mac->tx; tx_ring() 734 printk(KERN_ERR "pasemi_mac: tx error. mactx 0x%016llx, "\ pasemi_mac_tx_error() 735 "tx status 0x%016llx\n", mactx, *chan->status); pasemi_mac_tx_error() 1130 mac->tx = pasemi_mac_setup_tx_resources(dev); pasemi_mac_open() 1132 if (!mac->tx) pasemi_mac_open() 1155 write_iob_reg(PAS_IOB_DMA_TXCH_CFG(mac->tx->chan.chno), pasemi_mac_open() 1176 /* enable tx channel */ pasemi_mac_open() 1217 snprintf(mac->tx_irq_name, sizeof(mac->tx_irq_name), "%s tx", pasemi_mac_open() 1220 ret = request_irq(mac->tx->chan.irq, pasemi_mac_tx_intr, 0, pasemi_mac_open() 1221 mac->tx_irq_name, mac->tx); pasemi_mac_open() 1224 mac->tx->chan.irq, ret); pasemi_mac_open() 1242 setup_timer(&mac->tx->clean_timer, pasemi_mac_tx_timer, pasemi_mac_open() 1243 (unsigned long)mac->tx); pasemi_mac_open() 1244 mod_timer(&mac->tx->clean_timer, jiffies + HZ); pasemi_mac_open() 1249 free_irq(mac->tx->chan.irq, mac->tx); pasemi_mac_open() 1254 if (mac->tx) pasemi_mac_open() 1281 "Failed to stop tx channel, tcmdsta %08x\n", sta); pasemi_mac_pause_txchan() 1339 del_timer_sync(&mac->tx->clean_timer); pasemi_mac_close() 1371 free_irq(mac->tx->chan.irq, mac->tx); pasemi_mac_close() 1540 /* no room -- stop the queue and wait for tx intr */ pasemi_mac_start_tx() 1636 disable_irq(mac->tx->chan.irq); pasemi_mac_netpoll() 1637 pasemi_mac_tx_intr(mac->tx->chan.irq, mac->tx); pasemi_mac_netpoll() 1638 enable_irq(mac->tx->chan.irq); pasemi_mac_netpoll() 1864 pasemi_dma_free_chan(&mac->tx->chan); pasemi_mac_remove()
|
/linux-4.1.27/drivers/dma/ |
H A D | virt-dma.c | 17 static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx) to_virt_desc() argument 19 return container_of(tx, struct virt_dma_desc, tx); to_virt_desc() 22 dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx) vchan_tx_submit() argument 24 struct virt_dma_chan *vc = to_virt_chan(tx->chan); vchan_tx_submit() 25 struct virt_dma_desc *vd = to_virt_desc(tx); vchan_tx_submit() 30 cookie = dma_cookie_assign(tx); vchan_tx_submit() 48 if (vd->tx.cookie == cookie) vchan_find_desc() 72 cb = vd->tx.callback; vchan_complete() 73 cb_data = vd->tx.callback_param; vchan_complete() 82 cb = vd->tx.callback; vchan_complete() 83 cb_data = vd->tx.callback_param; vchan_complete()
|
H A D | dmaengine.h | 23 * @tx: descriptor needing cookie 28 static inline dma_cookie_t dma_cookie_assign(struct dma_async_tx_descriptor *tx) dma_cookie_assign() argument 30 struct dma_chan *chan = tx->chan; dma_cookie_assign() 36 tx->cookie = chan->cookie = cookie; dma_cookie_assign() 43 * @tx: descriptor to complete 51 static inline void dma_cookie_complete(struct dma_async_tx_descriptor *tx) dma_cookie_complete() argument 53 BUG_ON(tx->cookie < DMA_MIN_COOKIE); dma_cookie_complete() 54 tx->chan->completed_cookie = tx->cookie; dma_cookie_complete() 55 tx->cookie = 0; dma_cookie_complete()
|
H A D | virt-dma.h | 19 struct dma_async_tx_descriptor tx; member in struct:virt_dma_desc 59 dma_async_tx_descriptor_init(&vd->tx, &vc->chan); vchan_tx_prep() 60 vd->tx.flags = tx_flags; vchan_tx_prep() 61 vd->tx.tx_submit = vchan_tx_submit; vchan_tx_prep() 63 return &vd->tx; vchan_tx_prep() 86 struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); vchan_cookie_complete() 89 cookie = vd->tx.cookie; vchan_cookie_complete() 90 dma_cookie_complete(&vd->tx); vchan_cookie_complete() 104 struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); vchan_cyclic_callback()
|
H A D | mic_x100_dma.c | 90 struct dma_async_tx_descriptor *tx; mic_dma_cleanup() local 99 * updated cookie value from tx->cookie. mic_dma_cleanup() 103 tx = &ch->tx_array[last_tail]; mic_dma_cleanup() 104 if (tx->cookie) { mic_dma_cleanup() 105 dma_cookie_complete(tx); mic_dma_cleanup() 106 if (tx->callback) { mic_dma_cleanup() 107 tx->callback(tx->callback_param); mic_dma_cleanup() 108 tx->callback = NULL; mic_dma_cleanup() 242 static dma_cookie_t mic_dma_tx_submit_unlock(struct dma_async_tx_descriptor *tx) mic_dma_tx_submit_unlock() argument 244 struct mic_dma_chan *mic_ch = to_mic_dma_chan(tx->chan); mic_dma_tx_submit_unlock() 247 dma_cookie_assign(tx); mic_dma_tx_submit_unlock() 248 cookie = tx->cookie; mic_dma_tx_submit_unlock() 252 * assigned a cookie to this tx. mic_dma_tx_submit_unlock() 265 struct dma_async_tx_descriptor *tx = &ch->tx_array[idx]; allocate_tx() local 267 dma_async_tx_descriptor_init(tx, &ch->api_ch); allocate_tx() 268 tx->tx_submit = mic_dma_tx_submit_unlock; allocate_tx() 269 return tx; allocate_tx() 482 struct dma_async_tx_descriptor *tx; mic_dma_drain_chan() local 486 tx = mic_dma_prep_memcpy_lock(&ch->api_ch, 0, 0, 0, DMA_PREP_FENCE); mic_dma_drain_chan() 487 if (!tx) { mic_dma_drain_chan() 492 cookie = tx->tx_submit(tx); mic_dma_drain_chan()
|
H A D | iop-adma.c | 40 #define tx_to_iop_adma_slot(tx) \ 41 container_of(tx, struct iop_adma_desc_slot, async_tx) 64 struct dma_async_tx_descriptor *tx = &desc->async_tx; iop_adma_run_tx_complete_actions() local 66 BUG_ON(tx->cookie < 0); iop_adma_run_tx_complete_actions() 67 if (tx->cookie > 0) { iop_adma_run_tx_complete_actions() 68 cookie = tx->cookie; iop_adma_run_tx_complete_actions() 69 tx->cookie = 0; iop_adma_run_tx_complete_actions() 74 if (tx->callback) iop_adma_run_tx_complete_actions() 75 tx->callback(tx->callback_param); iop_adma_run_tx_complete_actions() 77 dma_descriptor_unmap(tx); iop_adma_run_tx_complete_actions() 83 dma_run_dependencies(tx); iop_adma_run_tx_complete_actions() 371 iop_adma_tx_submit(struct dma_async_tx_descriptor *tx) iop_adma_tx_submit() argument 373 struct iop_adma_desc_slot *sw_desc = tx_to_iop_adma_slot(tx); iop_adma_tx_submit() 374 struct iop_adma_chan *iop_chan = to_iop_adma_chan(tx->chan); iop_adma_tx_submit() 386 cookie = dma_cookie_assign(tx); iop_adma_tx_submit() 850 struct dma_async_tx_descriptor *tx; iop_adma_memcpy_self_test() local 882 tx = iop_adma_prep_dma_memcpy(dma_chan, dest_dma, src_dma, iop_adma_memcpy_self_test() 886 cookie = iop_adma_tx_submit(tx); iop_adma_memcpy_self_test() 926 struct dma_async_tx_descriptor *tx; iop_adma_xor_val_self_test() local 982 tx = iop_adma_prep_dma_xor(dma_chan, dest_dma, dma_srcs, iop_adma_xor_val_self_test() 986 cookie = iop_adma_tx_submit(tx); iop_adma_xor_val_self_test() 1028 tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs, iop_adma_xor_val_self_test() 1033 cookie = iop_adma_tx_submit(tx); iop_adma_xor_val_self_test() 1057 tx = iop_adma_prep_dma_xor_val(dma_chan, dma_srcs, iop_adma_xor_val_self_test() 1062 cookie = iop_adma_tx_submit(tx); iop_adma_xor_val_self_test() 1104 struct dma_async_tx_descriptor *tx; iop_adma_pq_zero_sum_self_test() local 1151 tx = iop_adma_prep_dma_pq(dma_chan, pq_dest, pq_src, iop_adma_pq_zero_sum_self_test() 1157 cookie = iop_adma_tx_submit(tx); iop_adma_pq_zero_sum_self_test() 1189 tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST], iop_adma_pq_zero_sum_self_test() 1194 cookie = iop_adma_tx_submit(tx); iop_adma_pq_zero_sum_self_test() 1221 tx = iop_adma_prep_dma_pq_val(dma_chan, &pq_src[IOP_ADMA_NUM_SRC_TEST], iop_adma_pq_zero_sum_self_test() 1226 cookie = iop_adma_tx_submit(tx); iop_adma_pq_zero_sum_self_test()
|
H A D | xgene-dma.c | 230 #define to_dma_desc_sw(tx) \ 231 container_of(tx, struct xgene_dma_desc_sw, tx) 283 struct dma_async_tx_descriptor tx; member in struct:xgene_dma_desc_sw 303 * are waiting for the ACK bit to be set by the async tx API. 554 static dma_cookie_t xgene_dma_tx_submit(struct dma_async_tx_descriptor *tx) xgene_dma_tx_submit() argument 560 if (unlikely(!tx)) xgene_dma_tx_submit() 563 chan = to_dma_chan(tx->chan); xgene_dma_tx_submit() 564 desc = to_dma_desc_sw(tx); xgene_dma_tx_submit() 568 cookie = dma_cookie_assign(tx); xgene_dma_tx_submit() 583 dma_pool_free(chan->desc_pool, desc, desc->tx.phys); xgene_dma_clean_descriptor() 601 desc->tx.phys = phys; xgene_dma_alloc_descriptor() 602 desc->tx.tx_submit = xgene_dma_tx_submit; xgene_dma_alloc_descriptor() 603 dma_async_tx_descriptor_init(&desc->tx, &chan->dma_chan); xgene_dma_alloc_descriptor() 623 if (async_tx_test_ack(&desc->tx)) xgene_dma_clean_completed_descriptor() 639 struct dma_async_tx_descriptor *tx = &desc->tx; xgene_dma_run_tx_complete_actions() local 648 if (tx->cookie == 0) xgene_dma_run_tx_complete_actions() 651 dma_cookie_complete(tx); xgene_dma_run_tx_complete_actions() 654 if (tx->callback) xgene_dma_run_tx_complete_actions() 655 tx->callback(tx->callback_param); xgene_dma_run_tx_complete_actions() 657 dma_descriptor_unmap(tx); xgene_dma_run_tx_complete_actions() 660 dma_run_dependencies(tx); xgene_dma_run_tx_complete_actions() 682 if (!async_tx_test_ack(&desc->tx)) { xgene_dma_clean_running_descriptor() 692 dma_pool_free(chan->desc_pool, desc, desc->tx.phys); xgene_dma_clean_running_descriptor() 704 /* Get hw descriptor from DMA tx ring */ xgene_chan_xfer_request() 966 new->tx.cookie = 0; xgene_dma_prep_memcpy() 967 async_tx_ack(&new->tx); xgene_dma_prep_memcpy() 978 new->tx.flags = flags; /* client is in control of this ack */ xgene_dma_prep_memcpy() 979 new->tx.cookie = -EBUSY; xgene_dma_prep_memcpy() 982 return &new->tx; xgene_dma_prep_memcpy() 1042 new->tx.cookie = 0; xgene_dma_prep_sg() 1043 async_tx_ack(&new->tx); xgene_dma_prep_sg() 1087 new->tx.flags = flags; /* client is in control of this ack */ xgene_dma_prep_sg() 1088 new->tx.cookie = -EBUSY; xgene_dma_prep_sg() 1091 return &new->tx; xgene_dma_prep_sg() 1127 new->tx.cookie = 0; xgene_dma_prep_xor() 1128 async_tx_ack(&new->tx); xgene_dma_prep_xor() 1134 new->tx.flags = flags; /* client is in control of this ack */ xgene_dma_prep_xor() 1135 new->tx.cookie = -EBUSY; xgene_dma_prep_xor() 1138 return &new->tx; xgene_dma_prep_xor() 1185 new->tx.cookie = 0; xgene_dma_prep_pq() 1186 async_tx_ack(&new->tx); xgene_dma_prep_pq() 1211 new->tx.flags = flags; /* client is in control of this ack */ xgene_dma_prep_pq() 1212 new->tx.cookie = -EBUSY; xgene_dma_prep_pq() 1215 return &new->tx; xgene_dma_prep_pq()
|
/linux-4.1.27/crypto/async_tx/ |
H A D | async_tx.c | 75 * @tx: the new operation 79 struct dma_async_tx_descriptor *tx) async_tx_channel_switch() 87 if (txd_parent(depend_tx) && depend_tx->chan == tx->chan) { async_tx_channel_switch() 88 txd_chain(depend_tx, tx); async_tx_channel_switch() 113 txd_chain(intr_tx, tx); async_tx_channel_switch() 134 tx->tx_submit(tx); async_tx_channel_switch() 156 async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, async_tx_submit() argument 161 tx->callback = submit->cb_fn; async_tx_submit() 162 tx->callback_param = submit->cb_param; async_tx_submit() 174 txd_parent(tx)); async_tx_submit() 186 txd_chain(depend_tx, tx); async_tx_submit() 205 async_tx_channel_switch(depend_tx, tx); async_tx_submit() 208 txd_clear_parent(tx); async_tx_submit() 209 tx->tx_submit(tx); async_tx_submit() 213 txd_clear_parent(tx); async_tx_submit() 214 tx->tx_submit(tx); async_tx_submit() 218 async_tx_ack(tx); async_tx_submit() 238 struct dma_async_tx_descriptor *tx; async_trigger_callback() local 251 tx = device ? device->device_prep_dma_interrupt(chan, 0) : NULL; async_trigger_callback() 253 tx = NULL; async_trigger_callback() 255 if (tx) { async_trigger_callback() 258 async_tx_submit(chan, tx, submit); async_trigger_callback() 268 return tx; async_trigger_callback() 273 * async_tx_quiesce - ensure tx is complete and freeable upon return 274 * @tx - transaction to quiesce 276 void async_tx_quiesce(struct dma_async_tx_descriptor **tx) async_tx_quiesce() argument 278 if (*tx) { async_tx_quiesce() 282 BUG_ON(async_tx_test_ack(*tx)); async_tx_quiesce() 283 if (dma_wait_for_async_tx(*tx) != DMA_COMPLETE) async_tx_quiesce() 286 async_tx_ack(*tx); async_tx_quiesce() 287 *tx = NULL; async_tx_quiesce() 78 async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, struct dma_async_tx_descriptor *tx) async_tx_channel_switch() argument
|
H A D | async_raid6_recov.c | 49 struct dma_async_tx_descriptor *tx; async_sum_product() local 64 tx = dma->device_prep_dma_pq(chan, pq, unmap->addr, 2, coef, async_sum_product() 66 if (tx) { async_sum_product() 67 dma_set_unmap(tx, unmap); async_sum_product() 68 async_tx_submit(chan, tx, submit); async_sum_product() 70 return tx; async_sum_product() 113 struct dma_async_tx_descriptor *tx; async_mult() local 129 tx = dma->device_prep_dma_pq(chan, dma_dest, unmap->addr, async_mult() 132 if (tx) { async_mult() 133 dma_set_unmap(tx, unmap); async_mult() 135 async_tx_submit(chan, tx, submit); async_mult() 136 return tx; async_mult() 163 struct dma_async_tx_descriptor *tx = NULL; __2data_recov_4() local 184 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); __2data_recov_4() 185 tx = async_sum_product(b, srcs, coef, bytes, submit); __2data_recov_4() 190 init_async_submit(submit, flags | ASYNC_TX_XOR_ZERO_DST, tx, cb_fn, __2data_recov_4() 192 tx = async_xor(a, srcs, 0, 2, bytes, submit); __2data_recov_4() 194 return tx; __2data_recov_4() 202 struct dma_async_tx_descriptor *tx = NULL; __2data_recov_5() local 235 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); __2data_recov_5() 236 tx = async_memcpy(dp, g, 0, 0, bytes, submit); __2data_recov_5() 237 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); __2data_recov_5() 238 tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit); __2data_recov_5() 243 init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, __2data_recov_5() 245 tx = async_xor(dp, srcs, 0, 2, bytes, submit); __2data_recov_5() 250 init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, __2data_recov_5() 252 tx = async_xor(dq, srcs, 0, 2, bytes, submit); __2data_recov_5() 259 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); __2data_recov_5() 260 tx = async_sum_product(dq, srcs, coef, bytes, submit); __2data_recov_5() 265 init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn, __2data_recov_5() 267 tx = async_xor(dp, srcs, 0, 2, bytes, submit); __2data_recov_5() 269 return tx; __2data_recov_5() 276 struct dma_async_tx_descriptor *tx = NULL; __2data_recov_n() local 299 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); __2data_recov_n() 300 tx = async_gen_syndrome(blocks, 0, disks, bytes, submit); __2data_recov_n() 311 init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, __2data_recov_n() 313 tx = async_xor(dp, srcs, 0, 2, bytes, submit); __2data_recov_n() 318 init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, __2data_recov_n() 320 tx = async_xor(dq, srcs, 0, 2, bytes, submit); __2data_recov_n() 327 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); __2data_recov_n() 328 tx = async_sum_product(dq, srcs, coef, bytes, submit); __2data_recov_n() 333 init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn, __2data_recov_n() 335 tx = async_xor(dp, srcs, 0, 2, bytes, submit); __2data_recov_n() 337 return tx; __2data_recov_n() 426 struct dma_async_tx_descriptor *tx = NULL; async_raid6_datap_recov() local 490 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, async_raid6_datap_recov() 492 tx = async_memcpy(p, g, 0, 0, bytes, submit); async_raid6_datap_recov() 494 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, async_raid6_datap_recov() 496 tx = async_mult(dq, g, raid6_gfexp[good], bytes, submit); async_raid6_datap_recov() 498 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, async_raid6_datap_recov() 500 tx = async_gen_syndrome(blocks, 0, disks, bytes, submit); async_raid6_datap_recov() 512 init_async_submit(submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, async_raid6_datap_recov() 514 tx = async_xor(dq, srcs, 0, 2, bytes, submit); async_raid6_datap_recov() 516 init_async_submit(submit, ASYNC_TX_FENCE, tx, NULL, NULL, scribble); async_raid6_datap_recov() 517 tx = async_mult(dq, dq, coef, bytes, submit); async_raid6_datap_recov() 521 init_async_submit(submit, flags | ASYNC_TX_XOR_DROP_DST, tx, cb_fn, async_raid6_datap_recov() 523 tx = async_xor(p, srcs, 0, 2, bytes, submit); async_raid6_datap_recov() 525 return tx; async_raid6_datap_recov()
|
H A D | async_xor.c | 40 struct dma_async_tx_descriptor *tx = NULL; do_async_xor() local 78 tx = dma->device_prep_dma_xor(chan, dma_dest, src_list, do_async_xor() 82 if (unlikely(!tx)) do_async_xor() 86 while (unlikely(!tx)) { do_async_xor() 88 tx = dma->device_prep_dma_xor(chan, dma_dest, do_async_xor() 95 dma_set_unmap(tx, unmap); do_async_xor() 96 async_tx_submit(chan, tx, submit); do_async_xor() 97 submit->depend_tx = tx; do_async_xor() 109 return tx; do_async_xor() 188 struct dma_async_tx_descriptor *tx; async_xor() local 208 tx = do_async_xor(chan, unmap, submit); async_xor() 210 return tx; async_xor() 275 struct dma_async_tx_descriptor *tx = NULL; async_xor_val() local 302 tx = device->device_prep_dma_xor_val(chan, unmap->addr, src_cnt, async_xor_val() 305 if (unlikely(!tx)) { async_xor_val() 308 while (!tx) { async_xor_val() 310 tx = device->device_prep_dma_xor_val(chan, async_xor_val() 315 dma_set_unmap(tx, unmap); async_xor_val() 316 async_tx_submit(chan, tx, submit); async_xor_val() 328 tx = async_xor(dest, src_list, offset, src_cnt, len, submit); async_xor_val() 330 async_tx_quiesce(&tx); async_xor_val() 339 return tx; async_xor_val()
|
H A D | async_memcpy.c | 52 struct dma_async_tx_descriptor *tx = NULL; async_memcpy() local 74 tx = device->device_prep_dma_memcpy(chan, unmap->addr[1], async_memcpy() 79 if (tx) { async_memcpy() 82 dma_set_unmap(tx, unmap); async_memcpy() 83 async_tx_submit(chan, tx, submit); async_memcpy() 104 return tx; async_memcpy()
|
H A D | raid6test.c | 72 struct dma_async_tx_descriptor *tx = NULL; raid6_dual_recov() local 82 tx = async_gen_syndrome(ptrs, 0, disks, bytes, &submit); raid6_dual_recov() 100 tx = async_xor(dest, blocks, 0, count, bytes, &submit); raid6_dual_recov() 102 init_async_submit(&submit, 0, tx, NULL, NULL, addr_conv); raid6_dual_recov() 103 tx = async_gen_syndrome(ptrs, 0, disks, bytes, &submit); raid6_dual_recov() 109 tx = async_raid6_datap_recov(disks, bytes, faila, ptrs, &submit); raid6_dual_recov() 113 tx = async_raid6_2data_recov(disks, bytes, faila, failb, ptrs, &submit); raid6_dual_recov() 117 init_async_submit(&submit, ASYNC_TX_ACK, tx, callback, &cmp, addr_conv); raid6_dual_recov() 118 tx = async_syndrome_val(ptrs, 0, disks, bytes, &result, spare, &submit); raid6_dual_recov() 119 async_tx_issue_pending(tx); raid6_dual_recov() 157 struct dma_async_tx_descriptor *tx; test() local 176 tx = async_gen_syndrome(dataptrs, 0, disks, PAGE_SIZE, &submit); test() 177 async_tx_issue_pending(tx); test()
|
H A D | async_pq.c | 55 struct dma_async_tx_descriptor *tx = NULL; do_async_gen_syndrome() local 93 tx = dma->device_prep_dma_pq(chan, dma_dest, do_async_gen_syndrome() 98 if (likely(tx)) do_async_gen_syndrome() 104 dma_set_unmap(tx, unmap); do_async_gen_syndrome() 105 async_tx_submit(chan, tx, submit); do_async_gen_syndrome() 106 submit->depend_tx = tx; do_async_gen_syndrome() 115 return tx; do_async_gen_syndrome() 198 struct dma_async_tx_descriptor *tx; async_gen_syndrome() local 243 tx = do_async_gen_syndrome(chan, coefs, j, unmap, dma_flags, submit); async_gen_syndrome() 245 return tx; async_gen_syndrome() 302 struct dma_async_tx_descriptor *tx; async_syndrome_val() local 357 tx = device->device_prep_dma_pq_val(chan, pq, async_syndrome_val() 363 if (likely(tx)) async_syndrome_val() 369 dma_set_unmap(tx, unmap); async_syndrome_val() 370 async_tx_submit(chan, tx, submit); async_syndrome_val() 372 return tx; async_syndrome_val() 396 tx = NULL; async_syndrome_val() 401 tx = async_xor(spare, blocks, offset, disks-2, len, submit); async_syndrome_val() 402 async_tx_quiesce(&tx); async_syndrome_val() 412 tx = async_gen_syndrome(blocks, offset, disks, len, submit); async_syndrome_val() 413 async_tx_quiesce(&tx); async_syndrome_val()
|
/linux-4.1.27/drivers/staging/lustre/lnet/klnds/o2iblnd/ |
H A D | o2iblnd_cb.c | 44 kiblnd_tx_done(lnet_ni_t *ni, kib_tx_t *tx) kiblnd_tx_done() argument 53 LASSERT(!tx->tx_queued); /* mustn't be queued for sending */ kiblnd_tx_done() 54 LASSERT(tx->tx_sending == 0); /* mustn't be awaiting sent callback */ kiblnd_tx_done() 55 LASSERT(!tx->tx_waiting); /* mustn't be awaiting peer response */ kiblnd_tx_done() 56 LASSERT(tx->tx_pool != NULL); kiblnd_tx_done() 58 kiblnd_unmap_tx(ni, tx); kiblnd_tx_done() 60 /* tx may have up to 2 lnet msgs to finalise */ kiblnd_tx_done() 61 lntmsg[0] = tx->tx_lntmsg[0]; tx->tx_lntmsg[0] = NULL; kiblnd_tx_done() 62 lntmsg[1] = tx->tx_lntmsg[1]; tx->tx_lntmsg[1] = NULL; kiblnd_tx_done() 63 rc = tx->tx_status; kiblnd_tx_done() 65 if (tx->tx_conn != NULL) { kiblnd_tx_done() 66 LASSERT(ni == tx->tx_conn->ibc_peer->ibp_ni); kiblnd_tx_done() 68 kiblnd_conn_decref(tx->tx_conn); kiblnd_tx_done() 69 tx->tx_conn = NULL; kiblnd_tx_done() 72 tx->tx_nwrq = 0; kiblnd_tx_done() 73 tx->tx_status = 0; kiblnd_tx_done() 75 kiblnd_pool_free_node(&tx->tx_pool->tpo_pool, &tx->tx_list); kiblnd_tx_done() 89 kib_tx_t *tx; kiblnd_txlist_done() local 92 tx = list_entry(txlist->next, kib_tx_t, tx_list); kiblnd_txlist_done() 94 list_del(&tx->tx_list); kiblnd_txlist_done() 96 tx->tx_waiting = 0; kiblnd_txlist_done() 97 tx->tx_status = status; kiblnd_txlist_done() 98 kiblnd_tx_done(ni, tx); kiblnd_txlist_done() 107 kib_tx_t *tx; kiblnd_get_idle_tx() local 114 tx = container_of(node, kib_tx_t, tx_list); kiblnd_get_idle_tx() 116 LASSERT(tx->tx_nwrq == 0); kiblnd_get_idle_tx() 117 LASSERT(!tx->tx_queued); kiblnd_get_idle_tx() 118 LASSERT(tx->tx_sending == 0); kiblnd_get_idle_tx() 119 LASSERT(!tx->tx_waiting); kiblnd_get_idle_tx() 120 LASSERT(tx->tx_status == 0); kiblnd_get_idle_tx() 121 LASSERT(tx->tx_conn == NULL); kiblnd_get_idle_tx() 122 LASSERT(tx->tx_lntmsg[0] == NULL); kiblnd_get_idle_tx() 123 LASSERT(tx->tx_lntmsg[1] == NULL); kiblnd_get_idle_tx() 124 LASSERT(tx->tx_u.pmr == NULL); kiblnd_get_idle_tx() 125 LASSERT(tx->tx_nfrags == 0); kiblnd_get_idle_tx() 127 return tx; kiblnd_get_idle_tx() 218 kib_tx_t *tx = list_entry(tmp, kib_tx_t, tx_list); kiblnd_find_waiting_tx_locked() local 220 LASSERT(!tx->tx_queued); kiblnd_find_waiting_tx_locked() 221 LASSERT(tx->tx_sending != 0 || tx->tx_waiting); kiblnd_find_waiting_tx_locked() 223 if (tx->tx_cookie != cookie) kiblnd_find_waiting_tx_locked() 226 if (tx->tx_waiting && kiblnd_find_waiting_tx_locked() 227 tx->tx_msg->ibm_type == txtype) kiblnd_find_waiting_tx_locked() 228 return tx; kiblnd_find_waiting_tx_locked() 231 tx->tx_waiting ? "" : "NOT ", kiblnd_find_waiting_tx_locked() 232 tx->tx_msg->ibm_type, txtype); kiblnd_find_waiting_tx_locked() 240 kib_tx_t *tx; kiblnd_handle_completion() local 246 tx = kiblnd_find_waiting_tx_locked(conn, txtype, cookie); kiblnd_handle_completion() 247 if (tx == NULL) { kiblnd_handle_completion() 256 if (tx->tx_status == 0) { /* success so far */ kiblnd_handle_completion() 258 tx->tx_status = status; kiblnd_handle_completion() 260 lnet_set_reply_msg_len(ni, tx->tx_lntmsg[1], status); kiblnd_handle_completion() 264 tx->tx_waiting = 0; kiblnd_handle_completion() 266 idle = !tx->tx_queued && (tx->tx_sending == 0); kiblnd_handle_completion() 268 list_del(&tx->tx_list); kiblnd_handle_completion() 273 kiblnd_tx_done(ni, tx); kiblnd_handle_completion() 280 kib_tx_t *tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid); kiblnd_send_completion() local 282 if (tx == NULL) { kiblnd_send_completion() 283 CERROR("Can't get tx for completion %x for %s\n", kiblnd_send_completion() 288 tx->tx_msg->ibm_u.completion.ibcm_status = status; kiblnd_send_completion() 289 tx->tx_msg->ibm_u.completion.ibcm_cookie = cookie; kiblnd_send_completion() 290 kiblnd_init_tx_msg(ni, tx, type, sizeof(kib_completion_msg_t)); kiblnd_send_completion() 292 kiblnd_queue_tx(tx, conn); kiblnd_send_completion() 302 kib_tx_t *tx; kiblnd_handle_rx() local 392 tx = kiblnd_find_waiting_tx_locked(conn, IBLND_MSG_PUT_REQ, kiblnd_handle_rx() 394 if (tx != NULL) kiblnd_handle_rx() 395 list_del(&tx->tx_list); kiblnd_handle_rx() 398 if (tx == NULL) { kiblnd_handle_rx() 405 LASSERT(tx->tx_waiting); kiblnd_handle_rx() 410 tx->tx_nwrq = 0; /* overwrite PUT_REQ */ kiblnd_handle_rx() 412 rc2 = kiblnd_init_rdma(conn, tx, IBLND_MSG_PUT_DONE, kiblnd_handle_rx() 421 tx->tx_waiting = 0; /* clear waiting and queue atomically */ kiblnd_handle_rx() 422 kiblnd_queue_tx_locked(tx, conn); kiblnd_handle_rx() 551 kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob) kiblnd_fmr_map_tx() argument 554 __u64 *pages = tx->tx_pages; kiblnd_fmr_map_tx() 562 LASSERT(tx->tx_pool != NULL); kiblnd_fmr_map_tx() 563 LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL); kiblnd_fmr_map_tx() 565 hdev = tx->tx_pool->tpo_hdev; kiblnd_fmr_map_tx() 575 cpt = tx->tx_pool->tpo_pool.po_owner->ps_cpt; kiblnd_fmr_map_tx() 578 rc = kiblnd_fmr_pool_map(fps, pages, npages, 0, &tx->tx_u.fmr); kiblnd_fmr_map_tx() 586 rd->rd_key = (rd != tx->tx_rd) ? tx->tx_u.fmr.fmr_pfmr->fmr->rkey : kiblnd_fmr_map_tx() 587 tx->tx_u.fmr.fmr_pfmr->fmr->lkey; kiblnd_fmr_map_tx() 596 kiblnd_pmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob) kiblnd_pmr_map_tx() argument 604 LASSERT(tx->tx_pool != NULL); kiblnd_pmr_map_tx() 605 LASSERT(tx->tx_pool->tpo_pool.po_owner != NULL); kiblnd_pmr_map_tx() 607 hdev = tx->tx_pool->tpo_hdev; kiblnd_pmr_map_tx() 611 cpt = tx->tx_pool->tpo_pool.po_owner->ps_cpt; kiblnd_pmr_map_tx() 614 rc = kiblnd_pmr_pool_map(pps, hdev, rd, &iova, &tx->tx_u.pmr); kiblnd_pmr_map_tx() 622 rd->rd_key = (rd != tx->tx_rd) ? tx->tx_u.pmr->pmr_mr->rkey : kiblnd_pmr_map_tx() 623 tx->tx_u.pmr->pmr_mr->lkey; kiblnd_pmr_map_tx() 632 kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx) kiblnd_unmap_tx() argument 638 if (net->ibn_fmr_ps != NULL && tx->tx_u.fmr.fmr_pfmr != NULL) { kiblnd_unmap_tx() 639 kiblnd_fmr_pool_unmap(&tx->tx_u.fmr, tx->tx_status); kiblnd_unmap_tx() 640 tx->tx_u.fmr.fmr_pfmr = NULL; kiblnd_unmap_tx() 642 } else if (net->ibn_pmr_ps != NULL && tx->tx_u.pmr != NULL) { kiblnd_unmap_tx() 643 kiblnd_pmr_pool_unmap(tx->tx_u.pmr); kiblnd_unmap_tx() 644 tx->tx_u.pmr = NULL; kiblnd_unmap_tx() 647 if (tx->tx_nfrags != 0) { kiblnd_unmap_tx() 648 kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev, kiblnd_unmap_tx() 649 tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir); kiblnd_unmap_tx() 650 tx->tx_nfrags = 0; kiblnd_unmap_tx() 655 kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, kiblnd_map_tx() argument 658 kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev; kiblnd_map_tx() 666 tx->tx_dmadir = (rd != tx->tx_rd) ? DMA_FROM_DEVICE : DMA_TO_DEVICE; kiblnd_map_tx() 667 tx->tx_nfrags = nfrags; kiblnd_map_tx() 671 tx->tx_frags, tx->tx_nfrags, tx->tx_dmadir); kiblnd_map_tx() 675 hdev->ibh_ibdev, &tx->tx_frags[i]); kiblnd_map_tx() 677 hdev->ibh_ibdev, &tx->tx_frags[i]); kiblnd_map_tx() 685 rd->rd_key = (rd != tx->tx_rd) ? mr->rkey : mr->lkey; kiblnd_map_tx() 690 return kiblnd_fmr_map_tx(net, tx, rd, nob); kiblnd_map_tx() 692 return kiblnd_pmr_map_tx(net, tx, rd, nob); kiblnd_map_tx() 699 kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, kiblnd_setup_rd_iov() argument 720 sg = tx->tx_frags; kiblnd_setup_rd_iov() 748 return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags); kiblnd_setup_rd_iov() 752 kiblnd_setup_rd_kiov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd, kiblnd_setup_rd_kiov() argument 772 sg = tx->tx_frags; kiblnd_setup_rd_kiov() 788 return kiblnd_map_tx(ni, tx, rd, sg - tx->tx_frags); kiblnd_setup_rd_kiov() 792 kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit) 796 kib_msg_t *msg = tx->tx_msg; 803 LASSERT(tx->tx_queued); 805 LASSERT(tx->tx_nwrq > 0); 806 LASSERT(tx->tx_nwrq <= 1 + IBLND_RDMA_FRAGS(ver)); 815 /* tx completions outstanding... */ 836 list_del(&tx->tx_list); 837 tx->tx_queued = 0; 847 kiblnd_tx_done(peer->ibp_ni, tx); 864 /* CAVEAT EMPTOR! This tx could be the PUT_DONE of an RDMA 870 tx->tx_sending++; 871 list_add(&tx->tx_list, &conn->ibc_active_txs); 876 } else if (tx->tx_pool->tpo_pool.po_failed || 877 conn->ibc_hdev != tx->tx_pool->tpo_hdev) { 882 tx->tx_wrq, &bad_wrq); 898 tx->tx_status = rc; 899 tx->tx_waiting = 0; 900 tx->tx_sending--; 902 done = (tx->tx_sending == 0); 904 list_del(&tx->tx_list); 918 kiblnd_tx_done(peer->ibp_ni, tx); 930 kib_tx_t *tx; kiblnd_check_sends() local 948 tx = list_entry(conn->ibc_tx_queue_rsrvd.next, kiblnd_check_sends() 950 list_del(&tx->tx_list); kiblnd_check_sends() 951 list_add_tail(&tx->tx_list, &conn->ibc_tx_queue); kiblnd_check_sends() 958 tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid); kiblnd_check_sends() 959 if (tx != NULL) kiblnd_check_sends() 960 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_NOOP, 0); kiblnd_check_sends() 963 if (tx != NULL) kiblnd_check_sends() 964 kiblnd_queue_tx_locked(tx, conn); kiblnd_check_sends() 974 tx = list_entry(conn->ibc_tx_queue_nocred.next, kiblnd_check_sends() 979 tx = list_entry(conn->ibc_tx_noops.next, kiblnd_check_sends() 983 tx = list_entry(conn->ibc_tx_queue.next, kiblnd_check_sends() 988 if (kiblnd_post_tx_locked(conn, tx, credit) != 0) kiblnd_check_sends() 998 kiblnd_tx_complete(kib_tx_t *tx, int status) kiblnd_tx_complete() argument 1001 kib_conn_t *conn = tx->tx_conn; kiblnd_tx_complete() 1004 LASSERT(tx->tx_sending > 0); kiblnd_tx_complete() 1010 tx->tx_cookie, tx->tx_sending, tx->tx_waiting, kiblnd_tx_complete() 1020 /* I could be racing with rdma completion. Whoever makes 'tx' idle kiblnd_tx_complete() 1023 tx->tx_sending--; kiblnd_tx_complete() 1025 if (tx->tx_msg->ibm_type == IBLND_MSG_NOOP) kiblnd_tx_complete() 1029 tx->tx_waiting = 0; /* don't wait for peer */ kiblnd_tx_complete() 1030 tx->tx_status = -EIO; kiblnd_tx_complete() 1033 idle = (tx->tx_sending == 0) && /* This is the final callback */ kiblnd_tx_complete() 1034 !tx->tx_waiting && /* Not waiting for peer */ kiblnd_tx_complete() 1035 !tx->tx_queued; /* Not re-queued (PUT_DONE) */ kiblnd_tx_complete() 1037 list_del(&tx->tx_list); kiblnd_tx_complete() 1044 kiblnd_tx_done(conn->ibc_peer->ibp_ni, tx); kiblnd_tx_complete() 1052 kiblnd_init_tx_msg(lnet_ni_t *ni, kib_tx_t *tx, int type, int body_nob) kiblnd_init_tx_msg() argument 1054 kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev; kiblnd_init_tx_msg() 1055 struct ib_sge *sge = &tx->tx_sge[tx->tx_nwrq]; kiblnd_init_tx_msg() 1056 struct ib_send_wr *wrq = &tx->tx_wrq[tx->tx_nwrq]; kiblnd_init_tx_msg() 1060 LASSERT(tx->tx_nwrq >= 0); kiblnd_init_tx_msg() 1061 LASSERT(tx->tx_nwrq < IBLND_MAX_RDMA_FRAGS + 1); kiblnd_init_tx_msg() 1064 kiblnd_init_msg(tx->tx_msg, type, body_nob); kiblnd_init_tx_msg() 1066 mr = kiblnd_find_dma_mr(hdev, tx->tx_msgaddr, nob); kiblnd_init_tx_msg() 1070 sge->addr = tx->tx_msgaddr; kiblnd_init_tx_msg() 1076 wrq->wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_TX); kiblnd_init_tx_msg() 1082 tx->tx_nwrq++; kiblnd_init_tx_msg() 1086 kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type, kiblnd_init_rdma() argument 1089 kib_msg_t *ibmsg = tx->tx_msg; kiblnd_init_rdma() 1090 kib_rdma_desc_t *srcrd = tx->tx_rd; kiblnd_init_rdma() 1091 struct ib_sge *sge = &tx->tx_sge[0]; kiblnd_init_rdma() 1092 struct ib_send_wr *wrq = &tx->tx_wrq[0]; kiblnd_init_rdma() 1099 LASSERT(tx->tx_nwrq == 0); kiblnd_init_rdma() 1118 if (tx->tx_nwrq == IBLND_RDMA_FRAGS(conn->ibc_version)) { kiblnd_init_rdma() 1132 sge = &tx->tx_sge[tx->tx_nwrq]; kiblnd_init_rdma() 1137 wrq = &tx->tx_wrq[tx->tx_nwrq]; kiblnd_init_rdma() 1140 wrq->wr_id = kiblnd_ptr2wreqid(tx, IBLND_WID_RDMA); kiblnd_init_rdma() 1154 tx->tx_nwrq++; kiblnd_init_rdma() 1160 tx->tx_nwrq = 0; kiblnd_init_rdma() 1164 kiblnd_init_tx_msg(conn->ibc_peer->ibp_ni, tx, kiblnd_init_rdma() 1171 kiblnd_queue_tx_locked(kib_tx_t *tx, kib_conn_t *conn) kiblnd_queue_tx_locked() argument 1175 LASSERT(tx->tx_nwrq > 0); /* work items set up */ kiblnd_queue_tx_locked() 1176 LASSERT(!tx->tx_queued); /* not queued for sending already */ kiblnd_queue_tx_locked() 1179 tx->tx_queued = 1; kiblnd_queue_tx_locked() 1180 tx->tx_deadline = jiffies + (*kiblnd_tunables.kib_timeout * HZ); kiblnd_queue_tx_locked() 1182 if (tx->tx_conn == NULL) { kiblnd_queue_tx_locked() 1184 tx->tx_conn = conn; kiblnd_queue_tx_locked() 1185 LASSERT(tx->tx_msg->ibm_type != IBLND_MSG_PUT_DONE); kiblnd_queue_tx_locked() 1188 LASSERT(tx->tx_conn == conn); kiblnd_queue_tx_locked() 1189 LASSERT(tx->tx_msg->ibm_type == IBLND_MSG_PUT_DONE); kiblnd_queue_tx_locked() 1192 switch (tx->tx_msg->ibm_type) { kiblnd_queue_tx_locked() 1220 list_add_tail(&tx->tx_list, q); kiblnd_queue_tx_locked() 1224 kiblnd_queue_tx(kib_tx_t *tx, kib_conn_t *conn) kiblnd_queue_tx() argument 1227 kiblnd_queue_tx_locked(tx, conn); kiblnd_queue_tx() 1336 kiblnd_launch_tx(lnet_ni_t *ni, kib_tx_t *tx, lnet_nid_t nid) kiblnd_launch_tx() argument 1345 /* If I get here, I've committed to send, so I complete the tx with kiblnd_launch_tx() 1348 LASSERT(tx == NULL || tx->tx_conn == NULL); /* only set when assigned a conn */ kiblnd_launch_tx() 1349 LASSERT(tx == NULL || tx->tx_nwrq > 0); /* work items have been set up */ kiblnd_launch_tx() 1363 if (tx != NULL) kiblnd_launch_tx() 1364 kiblnd_queue_tx(tx, conn); kiblnd_launch_tx() 1379 if (tx != NULL) kiblnd_launch_tx() 1380 list_add_tail(&tx->tx_list, kiblnd_launch_tx() 1389 if (tx != NULL) kiblnd_launch_tx() 1390 kiblnd_queue_tx(tx, conn); kiblnd_launch_tx() 1402 if (tx != NULL) { kiblnd_launch_tx() 1403 tx->tx_status = -EHOSTUNREACH; kiblnd_launch_tx() 1404 tx->tx_waiting = 0; kiblnd_launch_tx() 1405 kiblnd_tx_done(ni, tx); kiblnd_launch_tx() 1418 if (tx != NULL) kiblnd_launch_tx() 1419 list_add_tail(&tx->tx_list, kiblnd_launch_tx() 1428 if (tx != NULL) kiblnd_launch_tx() 1429 kiblnd_queue_tx(tx, conn); kiblnd_launch_tx() 1444 if (tx != NULL) kiblnd_launch_tx() 1445 list_add_tail(&tx->tx_list, &peer->ibp_tx_queue); kiblnd_launch_tx() 1470 kib_tx_t *tx; kiblnd_send() local 1505 tx = kiblnd_get_idle_tx(ni, target.nid); kiblnd_send() 1506 if (tx == NULL) { kiblnd_send() 1512 ibmsg = tx->tx_msg; kiblnd_send() 1515 rc = kiblnd_setup_rd_iov(ni, tx, kiblnd_send() 1521 rc = kiblnd_setup_rd_kiov(ni, tx, kiblnd_send() 1529 kiblnd_tx_done(ni, tx); kiblnd_send() 1533 nob = offsetof(kib_get_msg_t, ibgm_rd.rd_frags[tx->tx_nfrags]); kiblnd_send() 1534 ibmsg->ibm_u.get.ibgm_cookie = tx->tx_cookie; kiblnd_send() 1537 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_GET_REQ, nob); kiblnd_send() 1539 tx->tx_lntmsg[1] = lnet_create_reply_msg(ni, lntmsg); kiblnd_send() 1540 if (tx->tx_lntmsg[1] == NULL) { kiblnd_send() 1543 kiblnd_tx_done(ni, tx); kiblnd_send() 1547 tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg[0,1] on completion */ kiblnd_send() 1548 tx->tx_waiting = 1; /* waiting for GET_DONE */ kiblnd_send() 1549 kiblnd_launch_tx(ni, tx, target.nid); kiblnd_send() 1559 tx = kiblnd_get_idle_tx(ni, target.nid); kiblnd_send() 1560 if (tx == NULL) { kiblnd_send() 1568 rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd, kiblnd_send() 1572 rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd, kiblnd_send() 1578 kiblnd_tx_done(ni, tx); kiblnd_send() 1582 ibmsg = tx->tx_msg; kiblnd_send() 1584 ibmsg->ibm_u.putreq.ibprm_cookie = tx->tx_cookie; kiblnd_send() 1585 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_REQ, sizeof(kib_putreq_msg_t)); kiblnd_send() 1587 tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */ kiblnd_send() 1588 tx->tx_waiting = 1; /* waiting for PUT_{ACK,NAK} */ kiblnd_send() 1589 kiblnd_launch_tx(ni, tx, target.nid); kiblnd_send() 1598 tx = kiblnd_get_idle_tx(ni, target.nid); kiblnd_send() 1599 if (tx == NULL) { kiblnd_send() 1600 CERROR("Can't send %d to %s: tx descs exhausted\n", kiblnd_send() 1605 ibmsg = tx->tx_msg; kiblnd_send() 1620 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_IMMEDIATE, nob); kiblnd_send() 1622 tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */ kiblnd_send() 1623 kiblnd_launch_tx(ni, tx, target.nid); kiblnd_send() 1636 kib_tx_t *tx; kiblnd_reply() local 1639 tx = kiblnd_get_idle_tx(ni, rx->rx_conn->ibc_peer->ibp_nid); kiblnd_reply() 1640 if (tx == NULL) { kiblnd_reply() 1641 CERROR("Can't get tx for REPLY to %s\n", kiblnd_reply() 1649 rc = kiblnd_setup_rd_iov(ni, tx, tx->tx_rd, kiblnd_reply() 1652 rc = kiblnd_setup_rd_kiov(ni, tx, tx->tx_rd, kiblnd_reply() 1661 rc = kiblnd_init_rdma(rx->rx_conn, tx, kiblnd_reply() 1677 tx->tx_lntmsg[0] = lntmsg; kiblnd_reply() 1680 kiblnd_queue_tx(tx, rx->rx_conn); kiblnd_reply() 1684 kiblnd_tx_done(ni, tx); kiblnd_reply() 1697 kib_tx_t *tx; kiblnd_recv() local 1743 tx = kiblnd_get_idle_tx(ni, conn->ibc_peer->ibp_nid); kiblnd_recv() 1744 if (tx == NULL) { kiblnd_recv() 1745 CERROR("Can't allocate tx for %s\n", kiblnd_recv() 1752 txmsg = tx->tx_msg; kiblnd_recv() 1754 rc = kiblnd_setup_rd_iov(ni, tx, kiblnd_recv() 1758 rc = kiblnd_setup_rd_kiov(ni, tx, kiblnd_recv() 1764 kiblnd_tx_done(ni, tx); kiblnd_recv() 1771 nob = offsetof(kib_putack_msg_t, ibpam_rd.rd_frags[tx->tx_nfrags]); kiblnd_recv() 1773 txmsg->ibm_u.putack.ibpam_dst_cookie = tx->tx_cookie; kiblnd_recv() 1775 kiblnd_init_tx_msg(ni, tx, IBLND_MSG_PUT_ACK, nob); kiblnd_recv() 1777 tx->tx_lntmsg[0] = lntmsg; /* finalise lntmsg on completion */ kiblnd_recv() 1778 tx->tx_waiting = 1; /* waiting for PUT_DONE */ kiblnd_recv() 1779 kiblnd_queue_tx(tx, conn); kiblnd_recv() 1962 kib_tx_t *tx; kiblnd_abort_txs() local 1967 tx = list_entry(tmp, kib_tx_t, tx_list); list_for_each_safe() 1970 LASSERT(!tx->tx_queued); list_for_each_safe() 1971 LASSERT(tx->tx_waiting || list_for_each_safe() 1972 tx->tx_sending != 0); list_for_each_safe() 1974 LASSERT(tx->tx_queued); list_for_each_safe() 1977 tx->tx_status = -ECONNABORTED; list_for_each_safe() 1978 tx->tx_waiting = 0; list_for_each_safe() 1980 if (tx->tx_sending == 0) { list_for_each_safe() 1981 tx->tx_queued = 0; list_for_each_safe() 1982 list_del(&tx->tx_list); list_for_each_safe() 1983 list_add(&tx->tx_list, &zombies); list_for_each_safe() 2005 /* Complete all tx descs not waiting for sends to complete. kiblnd_finalise_conn() 2075 kib_tx_t *tx; kiblnd_connreq_done() local 2153 list_for_each_entry_safe(tx, tmp, &txs, tx_list) { kiblnd_connreq_done() 2154 list_del(&tx->tx_list); kiblnd_connreq_done() 2156 kiblnd_queue_tx_locked(tx, conn); kiblnd_connreq_done() 2986 kib_tx_t *tx; kiblnd_check_txs_locked() local 2990 tx = list_entry(ttmp, kib_tx_t, tx_list); list_for_each() 2993 LASSERT(tx->tx_queued); list_for_each() 2995 LASSERT(!tx->tx_queued); list_for_each() 2996 LASSERT(tx->tx_waiting || tx->tx_sending != 0); list_for_each() 2999 if (cfs_time_aftereq(jiffies, tx->tx_deadline)) { list_for_each() 3000 CERROR("Timed out tx: %s, %lu seconds\n", list_for_each() 3002 cfs_duration_sec(jiffies - tx->tx_deadline)); list_for_each() 3090 * NOOP, but there were no non-blocking tx descs 3250 * failing RDMA because 'tx' might be back on the idle list or kiblnd_complete() 3253 CNETERR("RDMA (tx: %p) failed: %d\n", kiblnd_complete()
|
H A D | o2iblnd.c | 1078 * and connection establishment with a NULL tx */ kiblnd_query() 1196 kib_tx_t *tx; kiblnd_unmap_tx_pool() local 1205 tx = &tpo->tpo_tx_descs[i]; kiblnd_unmap_tx_pool() 1207 KIBLND_UNMAP_ADDR(tx, tx_msgunmap, kiblnd_unmap_tx_pool() 1208 tx->tx_msgaddr), kiblnd_unmap_tx_pool() 1248 kib_tx_t *tx; kiblnd_map_tx_pool() local 1267 tx = &tpo->tpo_tx_descs[i]; kiblnd_map_tx_pool() 1269 tx->tx_msg = (kib_msg_t *)(((char *)page_address(page)) + kiblnd_map_tx_pool() 1272 tx->tx_msgaddr = kiblnd_dma_map_single( kiblnd_map_tx_pool() 1273 tpo->tpo_hdev->ibh_ibdev, tx->tx_msg, kiblnd_map_tx_pool() 1276 tx->tx_msgaddr)); kiblnd_map_tx_pool() 1277 KIBLND_UNMAP_ADDR_SET(tx, tx_msgunmap, tx->tx_msgaddr); kiblnd_map_tx_pool() 1279 list_add(&tx->tx_list, &pool->po_free_list); kiblnd_map_tx_pool() 1753 /* no available tx pool and ... */ kiblnd_pool_alloc_node() 1940 kib_tx_t *tx = &tpo->tpo_tx_descs[i]; kiblnd_destroy_tx_pool() local 1942 list_del(&tx->tx_list); kiblnd_destroy_tx_pool() 1943 if (tx->tx_pages != NULL) kiblnd_destroy_tx_pool() 1944 LIBCFS_FREE(tx->tx_pages, kiblnd_destroy_tx_pool() 1946 sizeof(*tx->tx_pages)); kiblnd_destroy_tx_pool() 1947 if (tx->tx_frags != NULL) kiblnd_destroy_tx_pool() 1948 LIBCFS_FREE(tx->tx_frags, kiblnd_destroy_tx_pool() 1950 sizeof(*tx->tx_frags)); kiblnd_destroy_tx_pool() 1951 if (tx->tx_wrq != NULL) kiblnd_destroy_tx_pool() 1952 LIBCFS_FREE(tx->tx_wrq, kiblnd_destroy_tx_pool() 1954 sizeof(*tx->tx_wrq)); kiblnd_destroy_tx_pool() 1955 if (tx->tx_sge != NULL) kiblnd_destroy_tx_pool() 1956 LIBCFS_FREE(tx->tx_sge, kiblnd_destroy_tx_pool() 1958 sizeof(*tx->tx_sge)); kiblnd_destroy_tx_pool() 1959 if (tx->tx_rd != NULL) kiblnd_destroy_tx_pool() 1960 LIBCFS_FREE(tx->tx_rd, kiblnd_destroy_tx_pool() 2000 CERROR("Can't allocate tx pages: %d\n", npg); kiblnd_create_tx_pool() 2008 CERROR("Can't allocate %d tx descriptors\n", size); kiblnd_create_tx_pool() 2016 kib_tx_t *tx = &tpo->tpo_tx_descs[i]; kiblnd_create_tx_pool() local 2018 tx->tx_pool = tpo; kiblnd_create_tx_pool() 2020 LIBCFS_CPT_ALLOC(tx->tx_pages, kiblnd_create_tx_pool() 2022 LNET_MAX_IOV * sizeof(*tx->tx_pages)); kiblnd_create_tx_pool() 2023 if (tx->tx_pages == NULL) kiblnd_create_tx_pool() 2027 LIBCFS_CPT_ALLOC(tx->tx_frags, lnet_cpt_table(), ps->ps_cpt, kiblnd_create_tx_pool() 2028 IBLND_MAX_RDMA_FRAGS * sizeof(*tx->tx_frags)); kiblnd_create_tx_pool() 2029 if (tx->tx_frags == NULL) kiblnd_create_tx_pool() 2032 sg_init_table(tx->tx_frags, IBLND_MAX_RDMA_FRAGS); kiblnd_create_tx_pool() 2034 LIBCFS_CPT_ALLOC(tx->tx_wrq, lnet_cpt_table(), ps->ps_cpt, kiblnd_create_tx_pool() 2036 sizeof(*tx->tx_wrq)); kiblnd_create_tx_pool() 2037 if (tx->tx_wrq == NULL) kiblnd_create_tx_pool() 2040 LIBCFS_CPT_ALLOC(tx->tx_sge, lnet_cpt_table(), ps->ps_cpt, kiblnd_create_tx_pool() 2042 sizeof(*tx->tx_sge)); kiblnd_create_tx_pool() 2043 if (tx->tx_sge == NULL) kiblnd_create_tx_pool() 2046 LIBCFS_CPT_ALLOC(tx->tx_rd, lnet_cpt_table(), ps->ps_cpt, kiblnd_create_tx_pool() 2049 if (tx->tx_rd == NULL) kiblnd_create_tx_pool() 2067 kib_tx_t *tx = list_entry(node, kib_tx_t, tx_list); kiblnd_tx_init() local 2069 tx->tx_cookie = tps->tps_next_tx_cookie++; kiblnd_tx_init() 2214 CERROR("Failed to allocate tx pool array\n"); kiblnd_net_init_pools()
|
/linux-4.1.27/drivers/staging/gdm724x/ |
H A D | gdm_usb.c | 158 static struct usb_tx_sdu *get_tx_sdu_struct(struct tx_cxt *tx, int *no_spc) get_tx_sdu_struct() argument 162 if (list_empty(&tx->free_list)) get_tx_sdu_struct() 165 t_sdu = list_entry(tx->free_list.next, struct usb_tx_sdu, list); get_tx_sdu_struct() 168 tx->avail_count--; get_tx_sdu_struct() 170 *no_spc = list_empty(&tx->free_list) ? 1 : 0; get_tx_sdu_struct() 175 static void put_tx_struct(struct tx_cxt *tx, struct usb_tx_sdu *t_sdu) put_tx_struct() argument 177 list_add_tail(&t_sdu->list, &tx->free_list); put_tx_struct() 178 tx->avail_count++; put_tx_struct() 260 struct tx_cxt *tx = &udev->tx; release_usb() local 266 spin_lock_irqsave(&tx->lock, flags); release_usb() 267 list_for_each_entry_safe(t_sdu, t_sdu_next, &tx->sdu_list, list) { release_usb() 272 list_for_each_entry_safe(t, t_next, &tx->hci_list, list) { release_usb() 277 list_for_each_entry_safe(t_sdu, t_sdu_next, &tx->free_list, list) { release_usb() 281 spin_unlock_irqrestore(&tx->lock, flags); release_usb() 313 struct tx_cxt *tx = &udev->tx; init_usb() local 323 INIT_LIST_HEAD(&tx->sdu_list); init_usb() 324 INIT_LIST_HEAD(&tx->hci_list); init_usb() 325 INIT_LIST_HEAD(&tx->free_list); init_usb() 329 spin_lock_init(&tx->lock); init_usb() 334 tx->avail_count = 0; init_usb() 346 list_add(&t_sdu->list, &tx->free_list); init_usb() 347 tx->avail_count++; init_usb() 555 struct tx_cxt *tx = t->tx; gdm_usb_send_complete() local 556 struct lte_udev *udev = container_of(tx, struct lte_udev, tx); gdm_usb_send_complete() 569 spin_lock_irqsave(&tx->lock, flags); gdm_usb_send_complete() 572 spin_unlock_irqrestore(&tx->lock, flags); gdm_usb_send_complete() 603 struct tx_cxt *tx = &udev->tx; packet_aggregation() local 613 spin_lock_irqsave(&tx->lock, flags); packet_aggregation() 614 if (list_empty(&tx->sdu_list)) { packet_aggregation() 615 spin_unlock_irqrestore(&tx->lock, flags); packet_aggregation() 619 t_sdu = list_entry(tx->sdu_list.next, struct usb_tx_sdu, list); packet_aggregation() 621 spin_unlock_irqrestore(&tx->lock, flags); packet_aggregation() 626 spin_unlock_irqrestore(&tx->lock, flags); packet_aggregation() 633 if (tx->avail_count > 10) packet_aggregation() 636 spin_lock_irqsave(&tx->lock, flags); packet_aggregation() 637 put_tx_struct(tx, t_sdu); packet_aggregation() 638 spin_unlock_irqrestore(&tx->lock, flags); packet_aggregation() 652 struct tx_cxt *tx = &udev->tx; do_tx() local 664 spin_lock_irqsave(&tx->lock, flags); do_tx() 666 spin_unlock_irqrestore(&tx->lock, flags); do_tx() 671 if (!list_empty(&tx->hci_list)) { do_tx() 672 t = list_entry(tx->hci_list.next, struct usb_tx, list); do_tx() 677 } else if (!list_empty(&tx->sdu_list)) { do_tx() 680 spin_unlock_irqrestore(&tx->lock, flags); do_tx() 686 spin_unlock_irqrestore(&tx->lock, flags); do_tx() 690 t->tx = tx; do_tx() 697 spin_unlock_irqrestore(&tx->lock, flags); do_tx() 700 spin_unlock_irqrestore(&tx->lock, flags); do_tx() 719 struct tx_cxt *tx = &udev->tx; gdm_usb_sdu_send() local 731 spin_lock_irqsave(&tx->lock, flags); gdm_usb_sdu_send() 732 t_sdu = get_tx_sdu_struct(tx, &no_spc); gdm_usb_sdu_send() 733 spin_unlock_irqrestore(&tx->lock, flags); gdm_usb_sdu_send() 760 spin_lock_irqsave(&tx->lock, flags); gdm_usb_sdu_send() 761 list_add_tail(&t_sdu->list, &tx->sdu_list); gdm_usb_sdu_send() 763 spin_unlock_irqrestore(&tx->lock, flags); gdm_usb_sdu_send() 775 struct tx_cxt *tx = &udev->tx; gdm_usb_hci_send() local 794 t->tx = tx; gdm_usb_hci_send() 797 spin_lock_irqsave(&tx->lock, flags); gdm_usb_hci_send() 798 list_add_tail(&t->list, &tx->hci_list); gdm_usb_hci_send() 800 spin_unlock_irqrestore(&tx->lock, flags); gdm_usb_hci_send() 954 struct tx_cxt *tx; gdm_usb_resume() local 982 tx = &udev->tx; gdm_usb_resume() 983 spin_lock_irqsave(&tx->lock, flags); gdm_usb_resume() 985 spin_unlock_irqrestore(&tx->lock, flags); gdm_usb_resume()
|
H A D | gdm_usb.h | 52 struct tx_cxt *tx; member in struct:usb_tx 97 struct tx_cxt tx; member in struct:lte_udev
|
/linux-4.1.27/sound/soc/fsl/ |
H A D | fsl_sai.h | 38 #define FSL_SAI_xCSR(tx) (tx ? FSL_SAI_TCSR : FSL_SAI_RCSR) 39 #define FSL_SAI_xCR1(tx) (tx ? FSL_SAI_TCR1 : FSL_SAI_RCR1) 40 #define FSL_SAI_xCR2(tx) (tx ? FSL_SAI_TCR2 : FSL_SAI_RCR2) 41 #define FSL_SAI_xCR3(tx) (tx ? FSL_SAI_TCR3 : FSL_SAI_RCR3) 42 #define FSL_SAI_xCR4(tx) (tx ? FSL_SAI_TCR4 : FSL_SAI_RCR4) 43 #define FSL_SAI_xCR5(tx) (tx ? FSL_SAI_TCR5 : FSL_SAI_RCR5) 44 #define FSL_SAI_xDR(tx) (tx ? FSL_SAI_TDR : FSL_SAI_RDR) 45 #define FSL_SAI_xFR(tx) (tx ? FSL_SAI_TFR : FSL_SAI_RFR) 46 #define FSL_SAI_xMR(tx) (tx ? FSL_SAI_TMR : FSL_SAI_RMR)
|
H A D | fsl_esai.c | 38 * @fifo_depth: depth of tx/rx FIFO 46 * @synchronous: if using tx/rx synchronous mode 120 * @tx: current setting is for playback or capture 122 static int fsl_esai_divisor_cal(struct snd_soc_dai *dai, bool tx, u32 ratio, fsl_esai_divisor_cal() argument 184 regmap_update_bits(esai_priv->regmap, REG_ESAI_xCCR(tx), fsl_esai_divisor_cal() 193 regmap_update_bits(esai_priv->regmap, REG_ESAI_xCCR(tx), fsl_esai_divisor_cal() 215 bool tx = clk_id <= ESAI_HCKT_EXTAL; fsl_esai_set_dai_sysclk() local 222 if (freq == esai_priv->hck_rate[tx] && dir == esai_priv->hck_dir[tx]) fsl_esai_set_dai_sysclk() 226 esai_priv->sck_div[tx] = true; fsl_esai_set_dai_sysclk() 229 regmap_update_bits(esai_priv->regmap, REG_ESAI_xCCR(tx), fsl_esai_set_dai_sysclk() 267 tx ? 'T' : 'R'); fsl_esai_set_dai_sysclk() 274 ecr |= tx ? ESAI_ECR_ETO : ESAI_ECR_ERO; fsl_esai_set_dai_sysclk() 279 tx ? 'T' : 'R'); fsl_esai_set_dai_sysclk() 283 ret = fsl_esai_divisor_cal(dai, tx, ratio, false, 0); fsl_esai_set_dai_sysclk() 287 esai_priv->sck_div[tx] = false; fsl_esai_set_dai_sysclk() 290 esai_priv->hck_dir[tx] = dir; fsl_esai_set_dai_sysclk() 291 esai_priv->hck_rate[tx] = freq; fsl_esai_set_dai_sysclk() 294 tx ? ESAI_ECR_ETI | ESAI_ECR_ETO : fsl_esai_set_dai_sysclk() 303 static int fsl_esai_set_bclk(struct snd_soc_dai *dai, bool tx, u32 freq) fsl_esai_set_bclk() argument 306 u32 hck_rate = esai_priv->hck_rate[tx]; fsl_esai_set_bclk() 311 if (esai_priv->slave_mode || esai_priv->sck_rate[tx] == freq) fsl_esai_set_bclk() 324 tx ? 'T' : 'R'); fsl_esai_set_bclk() 329 if (!esai_priv->sck_div[tx] && (ratio > 16 || ratio == 0)) { fsl_esai_set_bclk() 334 ret = fsl_esai_divisor_cal(dai, tx, ratio, true, fsl_esai_set_bclk() 335 esai_priv->sck_div[tx] ? 0 : ratio); fsl_esai_set_bclk() 340 esai_priv->sck_rate[tx] = freq; fsl_esai_set_bclk() 512 bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; fsl_esai_hw_params() local 526 ret = fsl_esai_set_bclk(dai, tx, bclk); fsl_esai_hw_params() 531 regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx), fsl_esai_hw_params() 535 regmap_update_bits(esai_priv->regmap, REG_ESAI_xFCR(tx), fsl_esai_hw_params() 539 (tx ? ESAI_xFCR_TE_MASK | ESAI_xFCR_TIEN : ESAI_xFCR_RE_MASK); fsl_esai_hw_params() 541 (tx ? ESAI_xFCR_TE(pins) | ESAI_xFCR_TIEN : ESAI_xFCR_RE(pins)); fsl_esai_hw_params() 543 regmap_update_bits(esai_priv->regmap, REG_ESAI_xFCR(tx), mask, val); fsl_esai_hw_params() 545 mask = ESAI_xCR_xSWS_MASK | (tx ? ESAI_xCR_PADC : 0); fsl_esai_hw_params() 546 val = ESAI_xCR_xSWS(slot_width, width) | (tx ? ESAI_xCR_PADC : 0); fsl_esai_hw_params() 548 regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx), mask, val); fsl_esai_hw_params() 574 bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; fsl_esai_trigger() local 582 regmap_update_bits(esai_priv->regmap, REG_ESAI_xFCR(tx), fsl_esai_trigger() 586 for (i = 0; tx && i < channels; i++) fsl_esai_trigger() 589 regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx), fsl_esai_trigger() 590 tx ? ESAI_xCR_TE_MASK : ESAI_xCR_RE_MASK, fsl_esai_trigger() 591 tx ? ESAI_xCR_TE(pins) : ESAI_xCR_RE(pins)); fsl_esai_trigger() 596 regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx), fsl_esai_trigger() 597 tx ? ESAI_xCR_TE_MASK : ESAI_xCR_RE_MASK, 0); fsl_esai_trigger() 600 regmap_update_bits(esai_priv->regmap, REG_ESAI_xFCR(tx), fsl_esai_trigger() 602 regmap_update_bits(esai_priv->regmap, REG_ESAI_xFCR(tx), fsl_esai_trigger()
|
H A D | fsl_sai.c | 122 bool tx = fsl_dir == FSL_FMT_TRANSMITTER; fsl_sai_set_dai_sysclk_tr() local 142 regmap_update_bits(sai->regmap, FSL_SAI_xCR2(tx), fsl_sai_set_dai_sysclk_tr() 159 dev_err(cpu_dai->dev, "Cannot set tx sysclk: %d\n", ret); fsl_sai_set_dai_sysclk() 175 bool tx = fsl_dir == FSL_FMT_TRANSMITTER; fsl_sai_set_dai_fmt_tr() local 265 regmap_update_bits(sai->regmap, FSL_SAI_xCR2(tx), fsl_sai_set_dai_fmt_tr() 267 regmap_update_bits(sai->regmap, FSL_SAI_xCR4(tx), fsl_sai_set_dai_fmt_tr() 280 dev_err(cpu_dai->dev, "Cannot set tx format: %d\n", ret); fsl_sai_set_dai_fmt() 296 bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; fsl_sai_hw_params() local 314 regmap_update_bits(sai->regmap, FSL_SAI_xCR4(tx), fsl_sai_hw_params() 317 regmap_update_bits(sai->regmap, FSL_SAI_xCR5(tx), fsl_sai_hw_params() 320 regmap_write(sai->regmap, FSL_SAI_xMR(tx), ~0UL - ((1 << channels) - 1)); fsl_sai_hw_params() 329 bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; fsl_sai_trigger() local 349 regmap_update_bits(sai->regmap, FSL_SAI_xCSR(tx), fsl_sai_trigger() 357 regmap_update_bits(sai->regmap, FSL_SAI_xCSR(tx), fsl_sai_trigger() 363 regmap_update_bits(sai->regmap, FSL_SAI_xCSR(tx), fsl_sai_trigger() 365 regmap_update_bits(sai->regmap, FSL_SAI_xCSR(tx), fsl_sai_trigger() 369 regmap_read(sai->regmap, FSL_SAI_xCSR(!tx), &xcsr); fsl_sai_trigger() 380 regmap_read(sai->regmap, FSL_SAI_xCSR(tx), &xcsr); fsl_sai_trigger() 400 bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; fsl_sai_startup() local 410 regmap_update_bits(sai->regmap, FSL_SAI_xCR3(tx), FSL_SAI_CR3_TRCE, fsl_sai_startup() 420 bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; fsl_sai_shutdown() local 422 regmap_update_bits(sai->regmap, FSL_SAI_xCR3(tx), FSL_SAI_CR3_TRCE, 0); fsl_sai_shutdown()
|
H A D | fsl_esai.h | 25 #define REG_ESAI_xFCR(tx) (tx ? REG_ESAI_TFCR : REG_ESAI_RFCR) 26 #define REG_ESAI_xFSR(tx) (tx ? REG_ESAI_TFSR : REG_ESAI_RFSR) 44 #define REG_ESAI_xCR(tx) (tx ? REG_ESAI_TCR : REG_ESAI_RCR) 45 #define REG_ESAI_xCCR(tx) (tx ? REG_ESAI_TCCR : REG_ESAI_RCCR) 50 #define REG_ESAI_xSMA(tx) (tx ? REG_ESAI_TSMA : REG_ESAI_RSMA) 51 #define REG_ESAI_xSMB(tx) (tx ? REG_ESAI_TSMB : REG_ESAI_RSMB)
|
/linux-4.1.27/drivers/spi/ |
H A D | spi-dln2.c | 110 } tx; dln2_spi_enable() local 111 unsigned len = sizeof(tx); dln2_spi_enable() 113 tx.port = dln2->port; dln2_spi_enable() 117 len -= sizeof(tx.wait_for_completion); dln2_spi_enable() 119 tx.wait_for_completion = DLN2_TRANSFERS_WAIT_COMPLETE; dln2_spi_enable() 123 return dln2_transfer_tx(dln2->pdev, cmd, &tx, len); dln2_spi_enable() 139 } tx; dln2_spi_cs_set() local 141 tx.port = dln2->port; dln2_spi_cs_set() 148 tx.cs = ~cs_mask; dln2_spi_cs_set() 150 return dln2_transfer_tx(dln2->pdev, DLN2_SPI_SET_SS, &tx, sizeof(tx)); dln2_spi_cs_set() 169 } tx; dln2_spi_cs_enable() local 172 tx.port = dln2->port; dln2_spi_cs_enable() 173 tx.cs = cs_mask; dln2_spi_cs_enable() 176 return dln2_transfer_tx(dln2->pdev, cmd, &tx, sizeof(tx)); dln2_spi_cs_enable() 191 } tx; dln2_spi_get_cs_num() local 197 tx.port = dln2->port; dln2_spi_get_cs_num() 198 ret = dln2_transfer(dln2->pdev, DLN2_SPI_GET_SS_COUNT, &tx, sizeof(tx), dln2_spi_get_cs_num() 217 } tx; dln2_spi_get_speed() local 223 tx.port = dln2->port; dln2_spi_get_speed() 225 ret = dln2_transfer(dln2->pdev, cmd, &tx, sizeof(tx), &rx, &rx_len); dln2_spi_get_speed() 267 } __packed tx; dln2_spi_set_speed() local 273 tx.port = dln2->port; dln2_spi_set_speed() 274 tx.speed = cpu_to_le32(speed); dln2_spi_set_speed() 276 ret = dln2_transfer(dln2->pdev, DLN2_SPI_SET_FREQUENCY, &tx, sizeof(tx), dln2_spi_set_speed() 294 } tx; dln2_spi_set_mode() local 296 tx.port = dln2->port; dln2_spi_set_mode() 297 tx.mode = mode; dln2_spi_set_mode() 299 return dln2_transfer_tx(dln2->pdev, DLN2_SPI_SET_MODE, &tx, sizeof(tx)); dln2_spi_set_mode() 310 } tx; dln2_spi_set_bpw() local 312 tx.port = dln2->port; dln2_spi_set_bpw() 313 tx.bpw = bpw; dln2_spi_set_bpw() 316 &tx, sizeof(tx)); dln2_spi_set_bpw() 325 } tx; dln2_spi_get_supported_frame_sizes() local 333 tx.port = dln2->port; dln2_spi_get_supported_frame_sizes() 336 &tx, sizeof(tx), rx, &rx_len); dln2_spi_get_supported_frame_sizes() 429 } __packed *tx = dln2->buf; dln2_spi_write_one() local 432 BUILD_BUG_ON(sizeof(*tx) > DLN2_SPI_BUF_SIZE); dln2_spi_write_one() 437 tx->port = dln2->port; dln2_spi_write_one() 438 tx->size = cpu_to_le16(data_len); dln2_spi_write_one() 439 tx->attr = attr; dln2_spi_write_one() 441 dln2_spi_copy_to_buf(tx->buf, data, data_len, dln2->bpw); dln2_spi_write_one() 443 tx_len = sizeof(*tx) + data_len - DLN2_SPI_MAX_XFER_SIZE; dln2_spi_write_one() 444 return dln2_transfer_tx(dln2->pdev, DLN2_SPI_WRITE, tx, tx_len); dln2_spi_write_one() 458 } __packed tx; dln2_spi_read_one() local 470 tx.port = dln2->port; dln2_spi_read_one() 471 tx.size = cpu_to_le16(data_len); dln2_spi_read_one() 472 tx.attr = attr; dln2_spi_read_one() 474 ret = dln2_transfer(dln2->pdev, DLN2_SPI_READ, &tx, sizeof(tx), dln2_spi_read_one() 500 } __packed *tx; dln2_spi_read_write_one() local 507 BUILD_BUG_ON(sizeof(*tx) > DLN2_SPI_BUF_SIZE || dln2_spi_read_write_one() 515 * safe to use the same buffer for both tx and rx. When DLN2 sends the dln2_spi_read_write_one() 516 * response back, with the rx data, we don't need the tx buffer anymore. dln2_spi_read_write_one() 518 tx = dln2->buf; dln2_spi_read_write_one() 521 tx->port = dln2->port; dln2_spi_read_write_one() 522 tx->size = cpu_to_le16(data_len); dln2_spi_read_write_one() 523 tx->attr = attr; dln2_spi_read_write_one() 525 dln2_spi_copy_to_buf(tx->buf, tx_data, data_len, dln2->bpw); dln2_spi_read_write_one() 527 tx_len = sizeof(*tx) + data_len - DLN2_SPI_MAX_XFER_SIZE; dln2_spi_read_write_one() 530 ret = dln2_transfer(dln2->pdev, DLN2_SPI_READ_WRITE, tx, tx_len, dln2_spi_read_write_one()
|
H A D | spi-sirf.c | 119 /* 256 bytes rx/tx FIFO */ 129 * only if the rx/tx buffer and transfer size are 4-bytes aligned, we use dma 149 /* rx & tx bufs from the spi_transfer */ 150 const void *tx; member in struct:sirfsoc_spi 155 /* get word from tx buffer for sending */ 162 /* rx & tx DMA channels */ 171 * if tx size is not more than 4 and rx size is NULL, use 196 const u8 *tx = sspi->tx; spi_sirfsoc_tx_word_u8() local 198 if (tx) { spi_sirfsoc_tx_word_u8() 199 data = *tx++; spi_sirfsoc_tx_word_u8() 200 sspi->tx = tx; spi_sirfsoc_tx_word_u8() 225 const u16 *tx = sspi->tx; spi_sirfsoc_tx_word_u16() local 227 if (tx) { spi_sirfsoc_tx_word_u16() 228 data = *tx++; spi_sirfsoc_tx_word_u16() 229 sspi->tx = tx; spi_sirfsoc_tx_word_u16() 255 const u32 *tx = sspi->tx; spi_sirfsoc_tx_word_u32() local 257 if (tx) { spi_sirfsoc_tx_word_u32() 258 data = *tx++; spi_sirfsoc_tx_word_u32() 259 sspi->tx = tx; spi_sirfsoc_tx_word_u32() 318 memcpy(&cmd, sspi->tx, t->len); spi_sirfsoc_cmd_transfer() 374 sspi->src_start = dma_map_single(&spi->dev, (void *)sspi->tx, t->len, spi_sirfsoc_dma_transfer() 395 * we only wait tx-done event if transferring by DMA. for PIO, spi_sirfsoc_dma_transfer() 396 * we get rx data by writing tx data, so if rx is done, tx has spi_sirfsoc_dma_transfer() 466 sspi->tx = t->tx_buf ? t->tx_buf : sspi->dummypage; spi_sirfsoc_transfer() 701 sspi->tx_chan = dma_request_slave_channel(&pdev->dev, "tx"); spi_sirfsoc_probe() 703 dev_err(&pdev->dev, "can not allocate tx dma channel\n"); spi_sirfsoc_probe()
|
H A D | spi-adi-v3.c | 72 void *tx; member in struct:adi_spi_master 106 u32 tx_dummy_val; /* tx value for rx only transfer */ 222 /* we always choose tx transfer initiate */ adi_spi_restore_state() 238 while (drv_data->tx < drv_data->tx_end) { adi_spi_u8_write() 239 iowrite32(*(u8 *)(drv_data->tx++), &drv_data->regs->tfifo); adi_spi_u8_write() 263 iowrite32(*(u8 *)(drv_data->tx++), &drv_data->regs->tfifo); adi_spi_u8_duplex() 279 while (drv_data->tx < drv_data->tx_end) { adi_spi_u16_write() 280 iowrite32(*(u16 *)drv_data->tx, &drv_data->regs->tfifo); adi_spi_u16_write() 281 drv_data->tx += 2; adi_spi_u16_write() 306 iowrite32(*(u16 *)drv_data->tx, &drv_data->regs->tfifo); adi_spi_u16_duplex() 307 drv_data->tx += 2; adi_spi_u16_duplex() 324 while (drv_data->tx < drv_data->tx_end) { adi_spi_u32_write() 325 iowrite32(*(u32 *)drv_data->tx, &drv_data->regs->tfifo); adi_spi_u32_write() 326 drv_data->tx += 4; adi_spi_u32_write() 351 iowrite32(*(u32 *)drv_data->tx, &drv_data->regs->tfifo); adi_spi_u32_duplex() 352 drv_data->tx += 4; adi_spi_u32_duplex() 398 drv->tx = (void *)t->tx_buf; adi_spi_setup_transfer() 399 drv->tx_end = drv->tx + t->len; adi_spi_setup_transfer() 401 drv->tx = NULL; adi_spi_setup_transfer() 467 tx_buf = drv_data->tx; adi_spi_dma_xfer() 473 } else if (!drv_data->tx) { adi_spi_dma_xfer() 482 tx_buf = drv_data->tx; adi_spi_dma_xfer() 537 if (drv_data->tx != drv_data->tx_end) adi_spi_pio_xfer() 539 } else if (!drv_data->tx) { adi_spi_pio_xfer() 547 if (drv_data->tx != drv_data->tx_end) adi_spi_pio_xfer() 758 "spi tx dma error: %d\n", dma_stat); adi_spi_tx_dma_isr() 759 if (drv_data->tx) adi_spi_tx_dma_isr() 777 /* we may fail on tx dma */ adi_spi_rx_dma_isr() 789 "dma interrupt missing: tx=%d,rx=%d\n", adi_spi_rx_dma_isr() 819 dev_err(dev, "can not get tx dma resource\n"); adi_spi_probe() 864 /* request tx and rx dma */ adi_spi_probe()
|
H A D | spi-omap2-mcspi.c | 403 struct dma_async_tx_descriptor *tx; omap2_mcspi_tx_dma() local 412 tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1, omap2_mcspi_tx_dma() 414 if (tx) { omap2_mcspi_tx_dma() 415 tx->callback = omap2_mcspi_tx_callback; omap2_mcspi_tx_dma() 416 tx->callback_param = spi; omap2_mcspi_tx_dma() 417 dmaengine_submit(tx); omap2_mcspi_tx_dma() 458 struct dma_async_tx_descriptor *tx; omap2_mcspi_rx_dma() local 470 tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, &sg, 1, omap2_mcspi_rx_dma() 473 if (tx) { omap2_mcspi_rx_dma() 474 tx->callback = omap2_mcspi_rx_callback; omap2_mcspi_rx_dma() 475 tx->callback_param = spi; omap2_mcspi_rx_dma() 476 dmaengine_submit(tx); omap2_mcspi_rx_dma() 546 const u8 *tx; omap2_mcspi_txrx_dma() local 590 tx = xfer->tx_buf; omap2_mcspi_txrx_dma() 592 if (tx != NULL) omap2_mcspi_txrx_dma() 598 if (tx != NULL) { omap2_mcspi_txrx_dma() 668 const u8 *tx; omap2_mcspi_txrx_pio() local 671 tx = xfer->tx_buf; omap2_mcspi_txrx_pio() 675 if (tx != NULL) { omap2_mcspi_txrx_pio() 682 word_len, *tx); omap2_mcspi_txrx_pio() 683 writel_relaxed(*tx++, tx_reg); omap2_mcspi_txrx_pio() 692 if (c == 1 && tx == NULL && omap2_mcspi_txrx_pio() 705 } else if (c == 0 && tx == NULL) { omap2_mcspi_txrx_pio() 716 const u16 *tx; omap2_mcspi_txrx_pio() local 719 tx = xfer->tx_buf; omap2_mcspi_txrx_pio() 722 if (tx != NULL) { omap2_mcspi_txrx_pio() 729 word_len, *tx); omap2_mcspi_txrx_pio() 730 writel_relaxed(*tx++, tx_reg); omap2_mcspi_txrx_pio() 739 if (c == 2 && tx == NULL && omap2_mcspi_txrx_pio() 752 } else if (c == 0 && tx == NULL) { omap2_mcspi_txrx_pio() 763 const u32 *tx; omap2_mcspi_txrx_pio() local 766 tx = xfer->tx_buf; omap2_mcspi_txrx_pio() 769 if (tx != NULL) { omap2_mcspi_txrx_pio() 776 word_len, *tx); omap2_mcspi_txrx_pio() 777 writel_relaxed(*tx++, tx_reg); omap2_mcspi_txrx_pio() 786 if (c == 4 && tx == NULL && omap2_mcspi_txrx_pio() 799 } else if (c == 0 && tx == NULL) { omap2_mcspi_txrx_pio() 938 * for both rx and tx. Otherwise we'll do PIO for both rx and tx. 1230 tx_buf ? "tx" : "", omap2_mcspi_transfer_one_message() 1429 sprintf(dma_tx_ch_name, "tx%d", i); omap2_mcspi_probe()
|
H A D | spi-pxa2xx-pxadma.c | 59 if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx)) pxa2xx_spi_map_dma_buffers() 71 /* Modify setup if tx buffer is null */ pxa2xx_spi_map_dma_buffers() 72 if (drv_data->tx == NULL) { pxa2xx_spi_map_dma_buffers() 74 drv_data->tx = drv_data->null_dma_buf; pxa2xx_spi_map_dma_buffers() 79 /* Stream map the tx buffer. Always do DMA_TO_DEVICE first pxa2xx_spi_map_dma_buffers() 81 * the tx and rx buffers overlap. pxa2xx_spi_map_dma_buffers() 83 drv_data->tx_dma = dma_map_single(dev, drv_data->tx, pxa2xx_spi_map_dma_buffers() 217 "dma_handler: bad bus address on tx channel"); pxa2xx_spi_dma_handler() 307 /* Setup tx DMA Channel */ pxa2xx_spi_dma_prepare() 311 if (drv_data->tx == drv_data->null_dma_buf) pxa2xx_spi_dma_prepare() 342 /* Get two DMA channels (rx and tx) */ pxa2xx_spi_dma_setup() 357 dev_err(dev, "problem (%d) requesting tx channel\n", pxa2xx_spi_dma_setup() 408 * divided by (bytes/register); the tx threshold is the inverse of pxa2xx_spi_set_dma_burst_and_threshold() 411 * tx fifo to accept a burst (a tx burst will overwrite the fifo if pxa2xx_spi_set_dma_burst_and_threshold() 413 * space in the rx fifo for any data loaded to the tx fifo. pxa2xx_spi_set_dma_burst_and_threshold() 417 * to burst 16 to the tx fifo, the fifo would have to be empty; pxa2xx_spi_set_dma_burst_and_threshold() 418 * however, the minimum fifo trigger level is 1, and the tx will pxa2xx_spi_set_dma_burst_and_threshold()
|
H A D | spi-fsl-lib.c | 46 const type *tx = mpc8xxx_spi->tx; \ 47 if (!tx) \ 49 data = *tx++ << mpc8xxx_spi->tx_shift; \ 50 mpc8xxx_spi->tx = tx; \
|
H A D | spi-dw.c | 154 /* Return the max entries we can fill into tx fifo */ tx_max() 159 tx_left = (dws->tx_end - dws->tx) / dws->n_bytes; tx_max() 163 * Another concern is about the tx/rx mismatch, we tx_max() 165 * one maximum value for tx, but it doesn't cover the tx_max() 166 * data which is out of tx/rx fifo and inside the tx_max() 170 rxtx_gap = ((dws->rx_end - dws->rx) - (dws->tx_end - dws->tx)) tx_max() 190 /* Set the tx word if the transfer's original "tx" is not null */ dw_writer() 193 txw = *(u8 *)(dws->tx); dw_writer() 195 txw = *(u16 *)(dws->tx); dw_writer() 198 dws->tx += dws->n_bytes; dw_writer() 301 dws->tx = (void *)transfer->tx_buf; dw_spi_transfer_one() 302 dws->tx_end = dws->tx + transfer->len; dw_spi_transfer_one() 346 if (dws->rx && dws->tx) dw_spi_transfer_one()
|
H A D | spi-bfin5xx.c | 87 void *tx; member in struct:bfin_spi_master_data 232 while (drv_data->tx < drv_data->tx_end) { bfin_spi_u8_writer() 233 bfin_write(&drv_data->regs->tdbr, (*(u8 *) (drv_data->tx++))); bfin_spi_u8_writer() 264 bfin_write(&drv_data->regs->tdbr, (*(u8 *) (drv_data->tx++))); bfin_spi_u8_duplex() 282 while (drv_data->tx < drv_data->tx_end) { bfin_spi_u16_writer() 283 bfin_write(&drv_data->regs->tdbr, (*(u16 *) (drv_data->tx))); bfin_spi_u16_writer() 284 drv_data->tx += 2; bfin_spi_u16_writer() 316 bfin_write(&drv_data->regs->tdbr, (*(u16 *) (drv_data->tx))); bfin_spi_u16_duplex() 317 drv_data->tx += 2; bfin_spi_u16_duplex() 391 if ((drv_data->tx && drv_data->tx >= drv_data->tx_end) || bfin_spi_pio_irq_handler() 421 if (drv_data->rx && drv_data->tx) { bfin_spi_pio_irq_handler() 426 u16 *buf2 = (u16 *)drv_data->tx; bfin_spi_pio_irq_handler() 433 u8 *buf2 = (u8 *)drv_data->tx; bfin_spi_pio_irq_handler() 455 } else if (drv_data->tx) { bfin_spi_pio_irq_handler() 459 u16 *buf = (u16 *)drv_data->tx; bfin_spi_pio_irq_handler() 465 u8 *buf = (u8 *)drv_data->tx; bfin_spi_pio_irq_handler() 473 if (drv_data->tx) bfin_spi_pio_irq_handler() 474 drv_data->tx += n_bytes; bfin_spi_pio_irq_handler() 511 if (drv_data->tx != NULL) { bfin_spi_dma_irq_handler() 618 drv_data->tx = (void *)transfer->tx_buf; bfin_spi_pump_transfers() 619 drv_data->tx_end = drv_data->tx + transfer->len; bfin_spi_pump_transfers() 623 drv_data->tx = NULL; bfin_spi_pump_transfers() 716 (unsigned long)drv_data->tx); bfin_spi_pump_transfers() 730 /* In dma mode, rx or tx must be NULL in one transfer */ bfin_spi_pump_transfers() 747 } else if (drv_data->tx != NULL) { bfin_spi_pump_transfers() 751 if (bfin_addr_dcacheable((unsigned long) drv_data->tx)) bfin_spi_pump_transfers() 752 flush_dcache_range((unsigned long) drv_data->tx, bfin_spi_pump_transfers() 753 (unsigned long) (drv_data->tx + bfin_spi_pump_transfers() 756 dma_start_addr = (unsigned long)drv_data->tx; bfin_spi_pump_transfers() 798 if (drv_data->tx == NULL) bfin_spi_pump_transfers() 803 u16 *buf = (u16 *)drv_data->tx; bfin_spi_pump_transfers() 809 u8 *buf = (u8 *)drv_data->tx; bfin_spi_pump_transfers() 814 drv_data->tx += drv_data->n_bytes; bfin_spi_pump_transfers() 827 BUG_ON((drv_data->tx_end - drv_data->tx) != bfin_spi_pump_transfers() 834 if (drv_data->tx != drv_data->tx_end) bfin_spi_pump_transfers() 836 } else if (drv_data->tx != NULL) { bfin_spi_pump_transfers() 843 if (drv_data->tx != drv_data->tx_end) bfin_spi_pump_transfers()
|
H A D | spi-rockchip.c | 186 const void *tx; member in struct:rockchip_spi 251 tx_left = (rs->tx_end - rs->tx) / rs->n_bytes; tx_max() 350 txw = *(u8 *)(rs->tx); rockchip_spi_pio_writer() 352 txw = *(u16 *)(rs->tx); rockchip_spi_pio_writer() 355 rs->tx += rs->n_bytes; rockchip_spi_pio_writer() 379 if (rs->tx) { rockchip_spi_pio_transfer() 380 remain = rs->tx_end - rs->tx; rockchip_spi_pio_transfer() 392 /* If tx, wait until the FIFO data completely. */ rockchip_spi_pio_transfer() 393 if (rs->tx) rockchip_spi_pio_transfer() 465 if (rs->tx) { rockchip_spi_prepare_dma() 481 /* rx must be started before tx due to spi instinct */ rockchip_spi_prepare_dma() 514 if (rs->tx) rockchip_spi_config() 582 rs->tx = xfer->tx_buf; rockchip_spi_transfer_one() 583 rs->tx_end = rs->tx + xfer->len; rockchip_spi_transfer_one() 591 if (rs->tx && rs->rx) rockchip_spi_transfer_one() 593 else if (rs->tx) rockchip_spi_transfer_one() 612 /* tx or tr: spi must be enabled first */ rockchip_spi_transfer_one() 720 rs->dma_tx.ch = dma_request_slave_channel(rs->dev, "tx"); rockchip_spi_probe()
|
H A D | spi-omap-100k.c | 200 const u8 *tx; omap1_spi100k_txrx_pio() local 203 tx = xfer->tx_buf; omap1_spi100k_txrx_pio() 207 spi100k_write_data(spi->master, word_len, *tx++); omap1_spi100k_txrx_pio() 213 const u16 *tx; omap1_spi100k_txrx_pio() local 216 tx = xfer->tx_buf; omap1_spi100k_txrx_pio() 220 spi100k_write_data(spi->master, word_len, *tx++); omap1_spi100k_txrx_pio() 226 const u32 *tx; omap1_spi100k_txrx_pio() local 229 tx = xfer->tx_buf; omap1_spi100k_txrx_pio() 233 spi100k_write_data(spi->master, word_len, *tx); omap1_spi100k_txrx_pio()
|
H A D | spi-altera.c | 60 const unsigned char *tx; member in struct:altera_spi 107 if (hw->tx) { hw_txbyte() 110 return hw->tx[count]; hw_txbyte() 112 return (hw->tx[count * 2] hw_txbyte() 113 | (hw->tx[count * 2 + 1] << 8)); hw_txbyte() 123 hw->tx = t->tx_buf; altera_spi_txrx()
|
H A D | spi-dw-mid.c | 49 struct dw_dma_slave *tx = dws->dma_tx; mid_spi_dma_init() local 71 /* 2. Init tx channel */ mid_spi_dma_init() 72 tx->dma_dev = &dma_dev->dev; mid_spi_dma_init() 73 dws->txchan = dma_request_channel(mask, mid_spi_dma_chan_filter, tx); mid_spi_dma_init() 136 * dws->dma_chan_busy is set before the dma transfer starts, callback for tx 258 /* rx must be started before tx due to spi instinct */ mid_spi_dma_transfer()
|
H A D | spi-bitbang.c | 69 const u8 *tx = t->tx_buf; bitbang_txrx_8() local 75 if (tx) bitbang_txrx_8() 76 word = *tx++; bitbang_txrx_8() 95 const u16 *tx = t->tx_buf; bitbang_txrx_16() local 101 if (tx) bitbang_txrx_16() 102 word = *tx++; bitbang_txrx_16() 121 const u32 *tx = t->tx_buf; bitbang_txrx_32() local 127 if (tx) bitbang_txrx_32() 128 word = *tx++; bitbang_txrx_32()
|
/linux-4.1.27/drivers/net/wireless/ti/wl1251/ |
H A D | Makefile | 1 wl1251-objs = main.o event.o tx.o rx.o ps.o cmd.o \
|
/linux-4.1.27/drivers/net/wireless/ti/wlcore/ |
H A D | Makefile | 1 wlcore-objs = main.o cmd.o io.o event.o tx.o rx.o ps.o acx.o \
|
/linux-4.1.27/drivers/iio/accel/ |
H A D | kxsd9.c | 48 * @buf_lock: protect the rx and tx buffers. 51 * @tx: single tx buffer storage 57 u8 tx[KXSD9_STATE_TX_SIZE]; member in struct:kxsd9_state 86 st->tx[0] = KXSD9_WRITE(KXSD9_REG_CTRL_C); kxsd9_write_scale() 87 st->tx[1] = (ret & ~KXSD9_FS_MASK) | i; kxsd9_write_scale() 89 ret = spi_write(st->us, st->tx, 2); kxsd9_write_scale() 104 .tx_buf = st->tx, kxsd9_read() 113 st->tx[0] = KXSD9_READ(address); kxsd9_read() 204 st->tx[0] = 0x0d; kxsd9_power_up() 205 st->tx[1] = 0x40; kxsd9_power_up() 206 ret = spi_write(st->us, st->tx, 2); kxsd9_power_up() 210 st->tx[0] = 0x0c; kxsd9_power_up() 211 st->tx[1] = 0x9b; kxsd9_power_up() 212 return spi_write(st->us, st->tx, 2); kxsd9_power_up()
|
/linux-4.1.27/drivers/net/ethernet/brocade/bna/ |
H A D | bna_tx_rx.c | 3014 #define call_tx_stop_cbfn(tx) \ 3016 if ((tx)->stop_cbfn) { \ 3019 cbfn = (tx)->stop_cbfn; \ 3020 cbarg = (tx)->stop_cbarg; \ 3021 (tx)->stop_cbfn = NULL; \ 3022 (tx)->stop_cbarg = NULL; \ 3023 cbfn(cbarg, (tx)); \ 3027 #define call_tx_prio_change_cbfn(tx) \ 3029 if ((tx)->prio_change_cbfn) { \ 3031 cbfn = (tx)->prio_change_cbfn; \ 3032 (tx)->prio_change_cbfn = NULL; \ 3033 cbfn((tx)->bna->bnad, (tx)); \ 3037 static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx); 3038 static void bna_bfi_tx_enet_start(struct bna_tx *tx); 3039 static void bna_tx_enet_stop(struct bna_tx *tx); 3067 bna_tx_sm_stopped_entry(struct bna_tx *tx) bna_tx_sm_stopped_entry() argument 3069 call_tx_stop_cbfn(tx); bna_tx_sm_stopped_entry() 3073 bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event) bna_tx_sm_stopped() argument 3077 bfa_fsm_set_state(tx, bna_tx_sm_start_wait); bna_tx_sm_stopped() 3081 call_tx_stop_cbfn(tx); bna_tx_sm_stopped() 3089 call_tx_prio_change_cbfn(tx); bna_tx_sm_stopped() 3102 bna_tx_sm_start_wait_entry(struct bna_tx *tx) bna_tx_sm_start_wait_entry() argument 3104 bna_bfi_tx_enet_start(tx); bna_tx_sm_start_wait_entry() 3108 bna_tx_sm_start_wait(struct bna_tx *tx, enum bna_tx_event event) bna_tx_sm_start_wait() argument 3112 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED); bna_tx_sm_start_wait() 3113 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait); bna_tx_sm_start_wait() 3117 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED); bna_tx_sm_start_wait() 3118 bfa_fsm_set_state(tx, bna_tx_sm_stopped); bna_tx_sm_start_wait() 3122 if (tx->flags & (BNA_TX_F_PRIO_CHANGED | BNA_TX_F_BW_UPDATED)) { bna_tx_sm_start_wait() 3123 tx->flags &= ~(BNA_TX_F_PRIO_CHANGED | bna_tx_sm_start_wait() 3125 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait); bna_tx_sm_start_wait() 3127 bfa_fsm_set_state(tx, bna_tx_sm_started); bna_tx_sm_start_wait() 3131 tx->flags |= BNA_TX_F_PRIO_CHANGED; bna_tx_sm_start_wait() 3135 tx->flags |= BNA_TX_F_BW_UPDATED; bna_tx_sm_start_wait() 3144 bna_tx_sm_started_entry(struct bna_tx *tx) bna_tx_sm_started_entry() argument 3148 int is_regular = (tx->type == BNA_TX_T_REGULAR); bna_tx_sm_started_entry() 3150 list_for_each(qe, &tx->txq_q) { bna_tx_sm_started_entry() 3154 bna_ib_start(tx->bna, &txq->ib, is_regular); bna_tx_sm_started_entry() 3156 tx->tx_resume_cbfn(tx->bna->bnad, tx); bna_tx_sm_started_entry() 3160 bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event) bna_tx_sm_started() argument 3164 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait); bna_tx_sm_started() 3165 tx->tx_stall_cbfn(tx->bna->bnad, tx); bna_tx_sm_started() 3166 bna_tx_enet_stop(tx); bna_tx_sm_started() 3170 bfa_fsm_set_state(tx, bna_tx_sm_failed); bna_tx_sm_started() 3171 tx->tx_stall_cbfn(tx->bna->bnad, tx); bna_tx_sm_started() 3172 tx->tx_cleanup_cbfn(tx->bna->bnad, tx); bna_tx_sm_started() 3177 bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait); bna_tx_sm_started() 3186 bna_tx_sm_stop_wait_entry(struct bna_tx *tx) bna_tx_sm_stop_wait_entry() argument 3191 bna_tx_sm_stop_wait(struct bna_tx *tx, enum bna_tx_event event) bna_tx_sm_stop_wait() argument 3196 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); bna_tx_sm_stop_wait() 3197 tx->tx_cleanup_cbfn(tx->bna->bnad, tx); bna_tx_sm_stop_wait() 3205 bna_tx_enet_stop(tx); bna_tx_sm_stop_wait() 3219 bna_tx_sm_cleanup_wait_entry(struct bna_tx *tx) bna_tx_sm_cleanup_wait_entry() argument 3224 bna_tx_sm_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event) bna_tx_sm_cleanup_wait() argument 3234 bfa_fsm_set_state(tx, bna_tx_sm_stopped); bna_tx_sm_cleanup_wait() 3243 bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx) bna_tx_sm_prio_stop_wait_entry() argument 3245 tx->tx_stall_cbfn(tx->bna->bnad, tx); bna_tx_sm_prio_stop_wait_entry() 3246 bna_tx_enet_stop(tx); bna_tx_sm_prio_stop_wait_entry() 3250 bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event) bna_tx_sm_prio_stop_wait() argument 3254 bfa_fsm_set_state(tx, bna_tx_sm_stop_wait); bna_tx_sm_prio_stop_wait() 3258 bfa_fsm_set_state(tx, bna_tx_sm_failed); bna_tx_sm_prio_stop_wait() 3259 call_tx_prio_change_cbfn(tx); bna_tx_sm_prio_stop_wait() 3260 tx->tx_cleanup_cbfn(tx->bna->bnad, tx); bna_tx_sm_prio_stop_wait() 3264 bfa_fsm_set_state(tx, bna_tx_sm_prio_cleanup_wait); bna_tx_sm_prio_stop_wait() 3278 bna_tx_sm_prio_cleanup_wait_entry(struct bna_tx *tx) bna_tx_sm_prio_cleanup_wait_entry() argument 3280 call_tx_prio_change_cbfn(tx); bna_tx_sm_prio_cleanup_wait_entry() 3281 tx->tx_cleanup_cbfn(tx->bna->bnad, tx); bna_tx_sm_prio_cleanup_wait_entry() 3285 bna_tx_sm_prio_cleanup_wait(struct bna_tx *tx, enum bna_tx_event event) bna_tx_sm_prio_cleanup_wait() argument 3289 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); bna_tx_sm_prio_cleanup_wait() 3293 bfa_fsm_set_state(tx, bna_tx_sm_failed); bna_tx_sm_prio_cleanup_wait() 3302 bfa_fsm_set_state(tx, bna_tx_sm_start_wait); bna_tx_sm_prio_cleanup_wait() 3311 bna_tx_sm_failed_entry(struct bna_tx *tx) bna_tx_sm_failed_entry() argument 3316 bna_tx_sm_failed(struct bna_tx *tx, enum bna_tx_event event) bna_tx_sm_failed() argument 3320 bfa_fsm_set_state(tx, bna_tx_sm_quiesce_wait); bna_tx_sm_failed() 3324 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); bna_tx_sm_failed() 3332 bfa_fsm_set_state(tx, bna_tx_sm_stopped); bna_tx_sm_failed() 3341 bna_tx_sm_quiesce_wait_entry(struct bna_tx *tx) bna_tx_sm_quiesce_wait_entry() argument 3346 bna_tx_sm_quiesce_wait(struct bna_tx *tx, enum bna_tx_event event) bna_tx_sm_quiesce_wait() argument 3350 bfa_fsm_set_state(tx, bna_tx_sm_cleanup_wait); bna_tx_sm_quiesce_wait() 3354 bfa_fsm_set_state(tx, bna_tx_sm_failed); bna_tx_sm_quiesce_wait() 3358 bfa_fsm_set_state(tx, bna_tx_sm_start_wait); bna_tx_sm_quiesce_wait() 3371 bna_bfi_tx_enet_start(struct bna_tx *tx) bna_bfi_tx_enet_start() argument 3373 struct bfi_enet_tx_cfg_req *cfg_req = &tx->bfi_enet_cmd.cfg_req; bna_bfi_tx_enet_start() 3379 BFI_ENET_H2I_TX_CFG_SET_REQ, 0, tx->rid); bna_bfi_tx_enet_start() 3383 cfg_req->num_queues = tx->num_txq; bna_bfi_tx_enet_start() 3384 for (i = 0, qe = bfa_q_first(&tx->txq_q); bna_bfi_tx_enet_start() 3385 i < tx->num_txq; bna_bfi_tx_enet_start() 3413 cfg_req->tx_cfg.vlan_id = htons((u16)tx->txf_vlan_id); bna_bfi_tx_enet_start() 3417 bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, bna_bfi_tx_enet_start() 3419 bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd); bna_bfi_tx_enet_start() 3423 bna_bfi_tx_enet_stop(struct bna_tx *tx) bna_bfi_tx_enet_stop() argument 3425 struct bfi_enet_req *req = &tx->bfi_enet_cmd.req; bna_bfi_tx_enet_stop() 3428 BFI_ENET_H2I_TX_CFG_CLR_REQ, 0, tx->rid); bna_bfi_tx_enet_stop() 3431 bfa_msgq_cmd_set(&tx->msgq_cmd, NULL, NULL, sizeof(struct bfi_enet_req), bna_bfi_tx_enet_stop() 3433 bfa_msgq_cmd_post(&tx->bna->msgq, &tx->msgq_cmd); bna_bfi_tx_enet_stop() 3437 bna_tx_enet_stop(struct bna_tx *tx) bna_tx_enet_stop() argument 3443 list_for_each(qe, &tx->txq_q) { bna_tx_enet_stop() 3445 bna_ib_stop(tx->bna, &txq->ib); bna_tx_enet_stop() 3448 bna_bfi_tx_enet_stop(tx); bna_tx_enet_stop() 3491 struct bna_tx *tx = NULL; bna_tx_get() local 3500 tx = (struct bna_tx *)qe; bna_tx_get() 3501 bfa_q_qe_init(&tx->qe); bna_tx_get() 3502 tx->type = type; bna_tx_get() 3504 return tx; bna_tx_get() 3508 bna_tx_free(struct bna_tx *tx) bna_tx_free() argument 3510 struct bna_tx_mod *tx_mod = &tx->bna->tx_mod; bna_tx_free() 3515 while (!list_empty(&tx->txq_q)) { bna_tx_free() 3516 bfa_q_deq(&tx->txq_q, &txq); bna_tx_free() 3519 txq->tx = NULL; bna_tx_free() 3524 if (qe == &tx->qe) { bna_tx_free() 3525 list_del(&tx->qe); bna_tx_free() 3526 bfa_q_qe_init(&tx->qe); bna_tx_free() 3531 tx->bna = NULL; bna_tx_free() 3532 tx->priv = NULL; bna_tx_free() 3536 if (((struct bna_tx *)qe)->rid < tx->rid) bna_tx_free() 3545 bfa_q_enq_head(&tx_mod->tx_free_q, &tx->qe); bna_tx_free() 3548 list_add_tail(&tx->qe, &tx_mod->tx_free_q); bna_tx_free() 3551 bfa_q_next(&tx->qe) = bfa_q_next(prev_qe); bna_tx_free() 3552 bfa_q_prev(&tx->qe) = prev_qe; bna_tx_free() 3553 bfa_q_next(prev_qe) = &tx->qe; bna_tx_free() 3554 bfa_q_prev(bfa_q_next(&tx->qe)) = &tx->qe; bna_tx_free() 3559 bna_tx_start(struct bna_tx *tx) bna_tx_start() argument 3561 tx->flags |= BNA_TX_F_ENET_STARTED; bna_tx_start() 3562 if (tx->flags & BNA_TX_F_ENABLED) bna_tx_start() 3563 bfa_fsm_send_event(tx, TX_E_START); bna_tx_start() 3567 bna_tx_stop(struct bna_tx *tx) bna_tx_stop() argument 3569 tx->stop_cbfn = bna_tx_mod_cb_tx_stopped; bna_tx_stop() 3570 tx->stop_cbarg = &tx->bna->tx_mod; bna_tx_stop() 3572 tx->flags &= ~BNA_TX_F_ENET_STARTED; bna_tx_stop() 3573 bfa_fsm_send_event(tx, TX_E_STOP); bna_tx_stop() 3577 bna_tx_fail(struct bna_tx *tx) bna_tx_fail() argument 3579 tx->flags &= ~BNA_TX_F_ENET_STARTED; bna_tx_fail() 3580 bfa_fsm_send_event(tx, TX_E_FAIL); bna_tx_fail() 3584 bna_bfi_tx_enet_start_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr) bna_bfi_tx_enet_start_rsp() argument 3586 struct bfi_enet_tx_cfg_rsp *cfg_rsp = &tx->bfi_enet_cmd.cfg_rsp; bna_bfi_tx_enet_start_rsp() 3591 bfa_msgq_rsp_copy(&tx->bna->msgq, (u8 *)cfg_rsp, bna_bfi_tx_enet_start_rsp() 3594 tx->hw_id = cfg_rsp->hw_id; bna_bfi_tx_enet_start_rsp() 3596 for (i = 0, qe = bfa_q_first(&tx->txq_q); bna_bfi_tx_enet_start_rsp() 3597 i < tx->num_txq; i++, qe = bfa_q_next(qe)) { bna_bfi_tx_enet_start_rsp() 3602 tx->bna->pcidev.pci_bar_kva bna_bfi_tx_enet_start_rsp() 3605 tx->bna->pcidev.pci_bar_kva bna_bfi_tx_enet_start_rsp() 3614 bfa_fsm_send_event(tx, TX_E_STARTED); bna_bfi_tx_enet_start_rsp() 3618 bna_bfi_tx_enet_stop_rsp(struct bna_tx *tx, struct bfi_msgq_mhdr *msghdr) bna_bfi_tx_enet_stop_rsp() argument 3620 bfa_fsm_send_event(tx, TX_E_STOPPED); bna_bfi_tx_enet_stop_rsp() 3626 struct bna_tx *tx; bna_bfi_bw_update_aen() local 3630 tx = (struct bna_tx *)qe; bna_bfi_bw_update_aen() 3631 bfa_fsm_send_event(tx, TX_E_BW_UPDATE); bna_bfi_bw_update_aen() 3690 struct bna_tx *tx; bna_tx_create() local 3709 tx = bna_tx_get(tx_mod, tx_cfg->tx_type); bna_tx_create() 3710 if (!tx) bna_tx_create() 3712 tx->bna = bna; bna_tx_create() 3713 tx->priv = priv; bna_tx_create() 3717 INIT_LIST_HEAD(&tx->txq_q); bna_tx_create() 3724 list_add_tail(&txq->qe, &tx->txq_q); bna_tx_create() 3725 txq->tx = tx; bna_tx_create() 3734 tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn; bna_tx_create() 3735 tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn; bna_tx_create() 3737 tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn; bna_tx_create() 3738 tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn; bna_tx_create() 3739 tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn; bna_tx_create() 3741 list_add_tail(&tx->qe, &tx_mod->tx_active_q); bna_tx_create() 3743 tx->num_txq = tx_cfg->num_txq; bna_tx_create() 3745 tx->flags = 0; bna_tx_create() 3746 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_STARTED) { bna_tx_create() 3747 switch (tx->type) { bna_tx_create() 3749 if (!(tx->bna->tx_mod.flags & bna_tx_create() 3751 tx->flags |= BNA_TX_F_ENET_STARTED; bna_tx_create() 3754 if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_ENET_LOOPBACK) bna_tx_create() 3755 tx->flags |= BNA_TX_F_ENET_STARTED; bna_tx_create() 3763 list_for_each(qe, &tx->txq_q) { bna_tx_create() 3809 if (tx->tcb_setup_cbfn) bna_tx_create() 3810 (tx->tcb_setup_cbfn)(bna->bnad, txq->tcb); bna_tx_create() 3820 tx->txf_vlan_id = 0; bna_tx_create() 3822 bfa_fsm_set_state(tx, bna_tx_sm_stopped); bna_tx_create() 3824 tx_mod->rid_mask |= (1 << tx->rid); bna_tx_create() 3826 return tx; bna_tx_create() 3829 bna_tx_free(tx); bna_tx_create() 3834 bna_tx_destroy(struct bna_tx *tx) bna_tx_destroy() argument 3839 list_for_each(qe, &tx->txq_q) { bna_tx_destroy() 3841 if (tx->tcb_destroy_cbfn) bna_tx_destroy() 3842 (tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb); bna_tx_destroy() 3845 tx->bna->tx_mod.rid_mask &= ~(1 << tx->rid); bna_tx_destroy() 3846 bna_tx_free(tx); bna_tx_destroy() 3850 bna_tx_enable(struct bna_tx *tx) bna_tx_enable() argument 3852 if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped) bna_tx_enable() 3855 tx->flags |= BNA_TX_F_ENABLED; bna_tx_enable() 3857 if (tx->flags & BNA_TX_F_ENET_STARTED) bna_tx_enable() 3858 bfa_fsm_send_event(tx, TX_E_START); bna_tx_enable() 3862 bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type, bna_tx_disable() argument 3866 (*cbfn)(tx->bna->bnad, tx); bna_tx_disable() 3870 tx->stop_cbfn = cbfn; bna_tx_disable() 3871 tx->stop_cbarg = tx->bna->bnad; bna_tx_disable() 3873 tx->flags &= ~BNA_TX_F_ENABLED; bna_tx_disable() 3875 bfa_fsm_send_event(tx, TX_E_STOP); bna_tx_disable() 3879 bna_tx_cleanup_complete(struct bna_tx *tx) bna_tx_cleanup_complete() argument 3881 bfa_fsm_send_event(tx, TX_E_CLEANUP_DONE); bna_tx_cleanup_complete() 3885 bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx) bna_tx_mod_cb_tx_stopped() argument 3911 tx_mod->tx = (struct bna_tx *) bna_tx_mod_init() 3922 tx_mod->tx[i].rid = i; bna_tx_mod_init() 3923 bfa_q_qe_init(&tx_mod->tx[i].qe); bna_tx_mod_init() 3924 list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q); bna_tx_mod_init() 3955 struct bna_tx *tx; bna_tx_mod_start() local 3963 tx = (struct bna_tx *)qe; bna_tx_mod_start() 3964 if (tx->type == type) bna_tx_mod_start() 3965 bna_tx_start(tx); bna_tx_mod_start() 3972 struct bna_tx *tx; bna_tx_mod_stop() local 3983 tx = (struct bna_tx *)qe; bna_tx_mod_stop() 3984 if (tx->type == type) { bna_tx_mod_stop() 3986 bna_tx_stop(tx); bna_tx_mod_stop() 3996 struct bna_tx *tx; bna_tx_mod_fail() local 4003 tx = (struct bna_tx *)qe; bna_tx_mod_fail() 4004 bna_tx_fail(tx); bna_tx_mod_fail() 4009 bna_tx_coalescing_timeo_set(struct bna_tx *tx, int coalescing_timeo) bna_tx_coalescing_timeo_set() argument 4014 list_for_each(qe, &tx->txq_q) { bna_tx_coalescing_timeo_set()
|
/linux-4.1.27/drivers/staging/media/lirc/ |
H A D | lirc_zilog.c | 115 struct IR_tx *tx; member in struct:IR 183 * ir->tx set to NULL and deallocated - happens before ir->tx->ir put() release_ir_device() 276 struct IR_tx *tx; get_ir_tx() local 279 tx = ir->tx; get_ir_tx() 280 if (tx != NULL) get_ir_tx() 281 kref_get(&tx->ref); get_ir_tx() 283 return tx; get_ir_tx() 288 struct IR_tx *tx = container_of(ref, struct IR_tx, ref); release_ir_tx() local 289 struct IR *ir = tx->ir; release_ir_tx() 292 /* Don't put_ir_device(tx->ir) here, so our lock doesn't get freed */ release_ir_tx() 293 ir->tx = NULL; release_ir_tx() 294 kfree(tx); release_ir_tx() 297 static int put_ir_tx(struct IR_tx *tx, bool ir_devices_lock_held) put_ir_tx() argument 300 struct IR *ir = tx->ir; put_ir_tx() 303 released = kref_put(&tx->ref, release_ir_tx); put_ir_tx() 305 /* Do a reference put() for the tx->ir reference, if we released tx */ put_ir_tx() 322 struct IR_tx *tx; add_to_buf() local 341 tx = get_ir_tx(ir); add_to_buf() 391 if (tx != NULL) add_to_buf() 392 tx->need_boot = 1; add_to_buf() 447 if (tx != NULL) add_to_buf() 448 put_ir_tx(tx, false); add_to_buf() 645 static int send_data_block(struct IR_tx *tx, unsigned char *data_block) send_data_block() argument 658 dev_dbg(tx->ir->l.dev, "%*ph", 5, buf); send_data_block() 659 ret = i2c_master_send(tx->c, buf, tosend + 1); send_data_block() 661 dev_err(tx->ir->l.dev, send_data_block() 671 static int send_boot_data(struct IR_tx *tx) send_boot_data() argument 677 ret = send_data_block(tx, tx_data->boot_data); send_boot_data() 684 ret = i2c_master_send(tx->c, buf, 2); send_boot_data() 686 dev_err(tx->ir->l.dev, "i2c_master_send failed with %d\n", ret); send_boot_data() 693 * upon attempting to get firmware revision, and tx probe thus fails. send_boot_data() 696 ret = i2c_master_send(tx->c, buf, 1); send_boot_data() 703 dev_err(tx->ir->l.dev, "i2c_master_send failed with %d\n", ret); send_boot_data() 708 ret = i2c_master_recv(tx->c, buf, 4); send_boot_data() 710 dev_err(tx->ir->l.dev, "i2c_master_recv failed with %d\n", ret); send_boot_data() 714 dev_err(tx->ir->l.dev, "unexpected IR TX init response: %02x\n", send_boot_data() 718 dev_notice(tx->ir->l.dev, send_boot_data() 748 static int fw_load(struct IR_tx *tx) fw_load() argument 763 ret = request_firmware(&fw_entry, "haup-ir-blaster.bin", tx->ir->l.dev); fw_load() 765 dev_err(tx->ir->l.dev, fw_load() 771 dev_dbg(tx->ir->l.dev, "firmware of size %zu loaded\n", fw_entry->size); fw_load() 799 dev_err(tx->ir->l.dev, fw_load() 816 dev_dbg(tx->ir->l.dev, "%u IR blaster codesets loaded\n", fw_load() 881 dev_err(tx->ir->l.dev, "firmware is corrupt\n"); fw_load() 979 static int send_code(struct IR_tx *tx, unsigned int code, unsigned int key) send_code() argument 989 dev_err(tx->ir->l.dev, send_code() 997 ret = send_data_block(tx, data_block); send_code() 1004 ret = i2c_master_send(tx->c, buf, 2); send_code() 1006 dev_err(tx->ir->l.dev, "i2c_master_send failed with %d\n", ret); send_code() 1012 ret = i2c_master_send(tx->c, buf, 1); send_code() 1019 dev_err(tx->ir->l.dev, "i2c_master_send failed with %d\n", ret); send_code() 1024 ret = i2c_master_recv(tx->c, buf, 1); send_code() 1026 dev_err(tx->ir->l.dev, "i2c_master_recv failed with %d\n", ret); send_code() 1030 dev_err(tx->ir->l.dev, "unexpected IR TX response #1: %02x\n", send_code() 1038 ret = i2c_master_send(tx->c, buf, 2); send_code() 1040 dev_err(tx->ir->l.dev, "i2c_master_send failed with %d\n", ret); send_code() 1049 if (!tx->post_tx_ready_poll) { send_code() 1050 dev_dbg(tx->ir->l.dev, "sent code %u, key %u\n", code, key); send_code() 1063 ret = i2c_master_send(tx->c, buf, 1); send_code() 1066 dev_dbg(tx->ir->l.dev, send_code() 1071 dev_err(tx->ir->l.dev, send_code() 1078 i = i2c_master_recv(tx->c, buf, 1); send_code() 1080 dev_err(tx->ir->l.dev, "i2c_master_recv failed with %d\n", ret); send_code() 1084 dev_err(tx->ir->l.dev, "unexpected IR TX response #2: %02x\n", send_code() 1090 dev_dbg(tx->ir->l.dev, "sent code %u, key %u\n", code, key); send_code() 1104 struct IR_tx *tx; write() local 1113 tx = get_ir_tx(ir); write() 1114 if (tx == NULL) write() 1117 /* Ensure our tx->c i2c_client remains valid for the duration */ write() 1118 mutex_lock(&tx->client_lock); write() 1119 if (tx->c == NULL) { write() 1120 mutex_unlock(&tx->client_lock); write() 1121 put_ir_tx(tx, false); write() 1135 mutex_unlock(&tx->client_lock); write() 1136 put_ir_tx(tx, false); write() 1141 if (tx->need_boot == 1) { write() 1143 ret = fw_load(tx); write() 1146 mutex_unlock(&tx->client_lock); write() 1147 put_ir_tx(tx, false); write() 1153 ret = send_boot_data(tx); write() 1155 tx->need_boot = 0; write() 1160 ret = send_code(tx, (unsigned)command >> 16, write() 1164 mutex_unlock(&tx->client_lock); write() 1165 put_ir_tx(tx, false); write() 1176 dev_err(tx->ir->l.dev, write() 1180 dev_err(tx->ir->l.dev, write() 1183 mutex_unlock(&tx->client_lock); write() 1184 put_ir_tx(tx, false); write() 1189 tx->need_boot = 1; write() 1198 mutex_unlock(&tx->client_lock); write() 1201 put_ir_tx(tx, false); write() 1408 struct IR_tx *tx = i2c_get_clientdata(client); ir_remove() local 1410 if (tx != NULL) { ir_remove() 1411 mutex_lock(&tx->client_lock); ir_remove() 1412 tx->c = NULL; ir_remove() 1413 mutex_unlock(&tx->client_lock); ir_remove() 1414 put_ir_tx(tx, false); ir_remove() 1450 struct IR_tx *tx; ir_probe() local 1517 tx = kzalloc(sizeof(struct IR_tx), GFP_KERNEL); ir_probe() 1518 if (tx == NULL) { ir_probe() 1522 kref_init(&tx->ref); ir_probe() 1523 ir->tx = tx; ir_probe() 1526 mutex_init(&tx->client_lock); ir_probe() 1527 tx->c = client; ir_probe() 1528 tx->need_boot = 1; ir_probe() 1529 tx->post_tx_ready_poll = ir_probe() 1533 tx->ir = get_ir_device(ir, true); ir_probe() 1535 /* A tx ref goes to the i2c_client */ ir_probe() 1547 fw_load(tx); ir_probe() 1551 dev_info(tx->ir->l.dev, ir_probe() 1558 tx = get_ir_tx(ir); ir_probe() 1591 dev_err(tx->ir->l.dev, ir_probe() 1604 if (tx == NULL) { ir_probe() 1615 dev_err(tx->ir->l.dev, ir_probe() 1628 if (tx != NULL) ir_probe() 1629 put_ir_tx(tx, true); ir_probe() 1640 if (tx != NULL) ir_probe() 1641 put_ir_tx(tx, true); ir_probe()
|
H A D | lirc_sasem.c | 110 int status; /* status of tx completion */ 111 } tx; member in struct:sasem_context 332 init_completion(&context->tx.finished); send_packet() 333 atomic_set(&context->tx.busy, 1); send_packet() 337 atomic_set(&context->tx.busy, 0); send_packet() 343 wait_for_completion(&context->tx.finished); send_packet() 346 retval = context->tx.status; send_packet() 349 "packet tx failed (%d)\n", retval); send_packet() 396 memcpy(context->tx.data_buf, data_buf, n_bytes); vfd_write() 400 context->tx.data_buf[i] = ' '; vfd_write() 420 memcpy(context->usb_tx_buf, context->tx.data_buf, 8); vfd_write() 424 context->tx.data_buf + 8, 8); vfd_write() 434 context->tx.data_buf + 16, 8); vfd_write() 438 context->tx.data_buf + 24, 8); vfd_write() 469 context->tx.status = urb->status; usb_tx_callback() 472 atomic_set(&context->tx.busy, 0); usb_tx_callback() 473 complete(&context->tx.finished); usb_tx_callback() 901 if (atomic_read(&context->tx.busy)) { sasem_disconnect() 904 wait_for_completion(&context->tx.finished); sasem_disconnect()
|
/linux-4.1.27/drivers/iio/gyro/ |
H A D | adxrs450.c | 70 * @buf_lock: mutex to protect tx and rx 71 * @tx: transmit buffer 77 __be32 tx ____cacheline_aligned; 94 u32 tx; adxrs450_spi_read_reg_16() local 98 .tx_buf = &st->tx, adxrs450_spi_read_reg_16() 100 .len = sizeof(st->tx), adxrs450_spi_read_reg_16() 110 tx = ADXRS450_READ_DATA | (reg_address << 17); adxrs450_spi_read_reg_16() 112 if (!(hweight32(tx) & 1)) adxrs450_spi_read_reg_16() 113 tx |= ADXRS450_P; adxrs450_spi_read_reg_16() 115 st->tx = cpu_to_be32(tx); adxrs450_spi_read_reg_16() 142 u32 tx; adxrs450_spi_write_reg_16() local 146 tx = ADXRS450_WRITE_DATA | (reg_address << 17) | (val << 1); adxrs450_spi_write_reg_16() 148 if (!(hweight32(tx) & 1)) adxrs450_spi_write_reg_16() 149 tx |= ADXRS450_P; adxrs450_spi_write_reg_16() 151 st->tx = cpu_to_be32(tx); adxrs450_spi_write_reg_16() 152 ret = spi_write(st->us, &st->tx, sizeof(st->tx)); adxrs450_spi_write_reg_16() 172 .tx_buf = &st->tx, adxrs450_spi_sensor_data() 174 .len = sizeof(st->tx), adxrs450_spi_sensor_data() 184 st->tx = cpu_to_be32(ADXRS450_SENSOR_DATA); adxrs450_spi_sensor_data() 209 u32 tx; adxrs450_spi_initial() local 211 .tx_buf = &st->tx, adxrs450_spi_initial() 214 .len = sizeof(st->tx), adxrs450_spi_initial() 218 tx = ADXRS450_SENSOR_DATA; adxrs450_spi_initial() 220 tx |= (ADXRS450_CHK | ADXRS450_P); adxrs450_spi_initial() 221 st->tx = cpu_to_be32(tx); adxrs450_spi_initial()
|
/linux-4.1.27/drivers/i2c/busses/ |
H A D | i2c-dln2.c | 60 } tx; dln2_i2c_enable() local 62 tx.port = dln2->port; dln2_i2c_enable() 69 return dln2_transfer_tx(dln2->pdev, cmd, &tx, sizeof(tx)); dln2_i2c_enable() 83 } __packed *tx = dln2->buf; dln2_i2c_write() local 86 BUILD_BUG_ON(sizeof(*tx) > DLN2_I2C_BUF_SIZE); dln2_i2c_write() 88 tx->port = dln2->port; dln2_i2c_write() 89 tx->addr = addr; dln2_i2c_write() 90 tx->mem_addr_len = 0; dln2_i2c_write() 91 tx->mem_addr = 0; dln2_i2c_write() 92 tx->buf_len = cpu_to_le16(data_len); dln2_i2c_write() 93 memcpy(tx->buf, data, data_len); dln2_i2c_write() 95 len = sizeof(*tx) + data_len - DLN2_I2C_MAX_XFER_SIZE; dln2_i2c_write() 96 ret = dln2_transfer_tx(dln2->pdev, DLN2_I2C_WRITE, tx, len); dln2_i2c_write() 113 } __packed tx; dln2_i2c_read() local 122 tx.port = dln2->port; dln2_i2c_read() 123 tx.addr = addr; dln2_i2c_read() 124 tx.mem_addr_len = 0; dln2_i2c_read() 125 tx.mem_addr = 0; dln2_i2c_read() 126 tx.buf_len = cpu_to_le16(data_len); dln2_i2c_read() 128 ret = dln2_transfer(dln2->pdev, DLN2_I2C_READ, &tx, sizeof(tx), dln2_i2c_read()
|
H A D | i2c-designware-core.h | 38 * @cmd_complete: tx completion indicator 44 * @msg_write_idx: the element index of the current tx message in the msgs 46 * @tx_buf_len: the length of the current tx buffer 47 * @tx_buf: the current tx buffer 57 * @tx_fifo_depth: depth of the hardware tx fifo 59 * @rx_outstanding: current master-rx elements in tx fifo
|
/linux-4.1.27/drivers/net/ethernet/stmicro/stmmac/ |
H A D | norm_desc.c | 35 if (unlikely(p->des01.tx.error_summary)) { ndesc_get_tx_status() 36 if (unlikely(p->des01.tx.underflow_error)) { ndesc_get_tx_status() 40 if (unlikely(p->des01.tx.no_carrier)) { ndesc_get_tx_status() 44 if (unlikely(p->des01.tx.loss_carrier)) { ndesc_get_tx_status() 48 if (unlikely((p->des01.tx.excessive_deferral) || ndesc_get_tx_status() 49 (p->des01.tx.excessive_collisions) || ndesc_get_tx_status() 50 (p->des01.tx.late_collision))) ndesc_get_tx_status() 51 stats->collisions += p->des01.tx.collision_count; ndesc_get_tx_status() 58 if (unlikely(p->des01.tx.deferred)) ndesc_get_tx_status() 66 return p->des01.tx.buffer1_size; ndesc_get_tx_len() 150 return p->des01.tx.own; ndesc_get_tx_owner() 160 p->des01.tx.own = 1; ndesc_set_tx_owner() 170 return p->des01.tx.last_segment; ndesc_get_tx_ls() 175 int ter = p->des01.tx.end_ring; ndesc_release_tx_desc() 187 p->des01.tx.first_segment = is_fs; ndesc_prepare_tx_desc() 194 p->des01.tx.checksum_insertion = cic_full; ndesc_prepare_tx_desc() 199 p->des01.tx.interrupt = 0; ndesc_clear_tx_ic() 204 p->des01.tx.last_segment = 1; ndesc_close_tx_desc() 205 p->des01.tx.interrupt = 1; ndesc_close_tx_desc() 223 p->des01.tx.time_stamp_enable = 1; ndesc_enable_tx_timestamp() 228 return p->des01.tx.time_stamp_status; ndesc_get_tx_timestamp_status()
|
H A D | descs_com.h | 74 p->des01.tx.end_ring = 1; ndesc_tx_set_on_ring() 79 p->des01.tx.end_ring = ter; ndesc_end_tx_desc_on_ring() 88 p->des01.tx.buffer1_size = len; norm_set_tx_desc_len_on_ring() 122 p->des01.tx.second_address_chained = 1; ndesc_tx_set_on_chain() 127 p->des01.tx.second_address_chained = 1; ndesc_end_tx_desc_on_chain() 132 p->des01.tx.buffer1_size = len; norm_set_tx_desc_len_on_chain()
|
H A D | common.h | 308 /* Invoked by the xmit function to prepare the tx descriptor */ 314 /* Invoked by the xmit function to close the tx descriptor */ 316 /* Clean the tx descriptor as soon as the tx irq is received */ 318 /* Clear interrupt on tx frame completion. When this bit is 321 /* Last tx segment reports the transmit status */ 338 /* Set tx timestamp enable bit */ 340 /* get tx timestamp status */ 358 /* Set tx/rx threshold in the csr6 register
|
/linux-4.1.27/sound/arm/ |
H A D | aaci.h | 79 #define SR_TXU (1 << 9) /* tx underrun */ 81 #define SR_TXB (1 << 7) /* tx busy */ 83 #define SR_TXFF (1 << 5) /* tx fifo full */ 85 #define SR_TXHE (1 << 3) /* tx fifo half empty */ 87 #define SR_TXFE (1 << 1) /* tx fifo empty */ 94 #define ISR_URINTR (1 << 5) /* tx underflow */ 97 #define ISR_TXINTR (1 << 2) /* tx fifo intr */ 98 #define ISR_RXTOINTR (1 << 1) /* tx timeout */ 99 #define ISR_TXCINTR (1 << 0) /* tx complete */ 116 #define ISR_UR (1 << 5) /* tx fifo underrun */ 119 #define ISR_TX (1 << 2) /* tx interrupt status */ 121 #define ISR_TXC (1 << 0) /* tx complete */ 127 #define IE_UR (1 << 5) /* tx fifo underrun */ 130 #define IE_TX (1 << 2) /* tx interrupt status */ 132 #define IE_TXC (1 << 0) /* tx complete */ 139 #define SLFR_12TXE (1 << 11) /* slot 12 tx empty */ 141 #define SLFR_2TXE (1 << 9) /* slot 2 tx empty */ 143 #define SLFR_1TXE (1 << 7) /* slot 1 tx empty */ 145 #define SLFR_12TXB (1 << 5) /* slot 12 tx busy */ 147 #define SLFR_2TXB (1 << 3) /* slot 2 tx busy */ 149 #define SLFR_1TXB (1 << 1) /* slot 1 tx busy */
|
/linux-4.1.27/drivers/atm/ |
H A D | eni.c | 188 if (eni_dev->tx[i].send) dump() 190 eni_dev->tx[i].send,eni_dev->tx[i].words*4); dump() 1036 struct eni_tx *tx; do_tx() local 1050 tx = eni_vcc->tx; do_tx() 1051 NULLCHECK(tx); do_tx() 1090 if (!NEPMOK(tx->tx_pos,size+TX_GAP, do_tx() 1091 eni_in(MID_TX_RDPTR(tx->index)),tx->words)) { do_tx() 1108 DPRINTK("dma_wr is %d, tx_pos is %ld\n",dma_wr,tx->tx_pos); do_tx() 1120 eni_dev->dma[j++] = (((tx->tx_pos+TX_DESCR_SIZE) & (tx->words-1)) << do_tx() 1121 MID_DMA_COUNT_SHIFT) | (tx->index << MID_DMA_CHAN_SHIFT) | do_tx() 1125 if (aal5) put_dma(tx->index,eni_dev->dma,&j,paddr,skb->len); do_tx() 1126 else put_dma(tx->index,eni_dev->dma,&j,paddr+4,skb->len-4); do_tx() 1131 put_dma(tx->index,eni_dev->dma,&j,(unsigned long) do_tx() 1135 put_dma(tx->index,eni_dev->dma,&j,(unsigned long) do_tx() 1141 put_dma(tx->index, eni_dev->dma, &j, eni_dev->zero.dma, do_tx() 1145 eni_dev->dma[j++] = (((tx->tx_pos+size) & (tx->words-1)) << do_tx() 1146 MID_DMA_COUNT_SHIFT) | (tx->index << MID_DMA_CHAN_SHIFT) | do_tx() 1152 (aal5 ? MID_SEG_AAL5 : 0) | (tx->prescaler << MID_SEG_PR_SHIFT) | do_tx() 1153 (tx->resolution << MID_SEG_RATE_SHIFT) | do_tx() 1154 (size/(ATM_CELL_PAYLOAD/4)),tx->send+tx->tx_pos*4); do_tx() 1155 /*printk("dsc = 0x%08lx\n",(unsigned long) readl(tx->send+tx->tx_pos*4));*/ do_tx() 1159 tx->send+((tx->tx_pos+1) & (tx->words-1))*4); do_tx() 1162 writel(skb->len,tx->send+ do_tx() 1163 ((tx->tx_pos+size-AAL5_TRAILER) & (tx->words-1))*4); do_tx() 1170 ENI_PRV_POS(skb) = tx->tx_pos; do_tx() 1173 tx->tx_pos = (tx->tx_pos+size) & (tx->words-1); do_tx() 1174 DPRINTK("dma_wr set to %d, tx_pos is now %ld\n",dma_wr,tx->tx_pos); do_tx() 1184 struct eni_tx *tx; poll_tx() local 1191 tx = &ENI_DEV(dev)->tx[i]; poll_tx() 1192 if (tx->send) poll_tx() 1193 while ((skb = skb_dequeue(&tx->backlog))) { poll_tx() 1197 skb_queue_head(&tx->backlog,skb); poll_tx() 1211 struct eni_tx *tx; dequeue_tx() local 1219 tx = ENI_VCC(vcc)->tx; dequeue_tx() 1220 NULLCHECK(ENI_VCC(vcc)->tx); dequeue_tx() 1222 (unsigned) eni_in(MID_TX_DESCRSTART(tx->index))); dequeue_tx() 1223 if (ENI_VCC(vcc)->txing < tx->words && ENI_PRV_POS(skb) == dequeue_tx() 1224 eni_in(MID_TX_DESCRSTART(tx->index))) { dequeue_tx() 1233 atomic_inc(&vcc->stats->tx); dequeue_tx() 1245 if (!eni_dev->tx[i].send) return eni_dev->tx+i; alloc_tx() 1292 struct eni_tx *tx; reserve_or_set_tx() local 1311 eni_vcc->tx = eni_dev->ubr; reserve_or_set_tx() 1317 new_tx = !eni_vcc->tx; reserve_or_set_tx() 1319 if (!new_tx) tx = eni_vcc->tx; reserve_or_set_tx() 1323 tx = alloc_tx(eni_dev,unlimited); reserve_or_set_tx() 1324 if (!tx) { reserve_or_set_tx() 1328 DPRINTK("got chan %d\n",tx->index); reserve_or_set_tx() 1329 tx->reserved = tx->shaping = 0; reserve_or_set_tx() 1330 tx->send = mem; reserve_or_set_tx() 1331 tx->words = size >> 2; reserve_or_set_tx() 1332 skb_queue_head_init(&tx->backlog); reserve_or_set_tx() 1335 ((tx->send-eni_dev->ram) >> (MID_LOC_SKIP+2)), reserve_or_set_tx() 1336 MID_TX_PLACE(tx->index)); reserve_or_set_tx() 1337 tx->tx_pos = eni_in(MID_TX_DESCRSTART(tx->index)) & reserve_or_set_tx() 1340 error = comp_tx(eni_dev,&rate,tx->reserved,&pre,&res,unlimited); reserve_or_set_tx() 1344 if (!error && !ubr && rate > eni_dev->tx_bw+tx->reserved) reserve_or_set_tx() 1346 if (!error && set_rsv && !set_shp && rate < tx->shaping) reserve_or_set_tx() 1348 if (!error && !set_rsv && rate > tx->reserved && !ubr) reserve_or_set_tx() 1352 tx->send = NULL; reserve_or_set_tx() 1359 eni_dev->tx_bw += tx->reserved; reserve_or_set_tx() 1360 tx->reserved = rate; reserve_or_set_tx() 1364 if (unlimited && new_tx) eni_dev->ubr = tx; reserve_or_set_tx() 1365 tx->prescaler = pre; reserve_or_set_tx() 1366 tx->resolution = res; reserve_or_set_tx() 1367 tx->shaping = rate; reserve_or_set_tx() 1369 if (set_shp) eni_vcc->tx = tx; reserve_or_set_tx() 1370 DPRINTK("rsv %d shp %d\n",tx->reserved,tx->shaping); reserve_or_set_tx() 1377 ENI_VCC(vcc)->tx = NULL; open_tx_first() 1397 if (!eni_vcc->tx) return; close_tx() 1407 txing = skb_peek(&eni_vcc->tx->backlog) || eni_vcc->txing; close_tx() 1416 if (eni_vcc->tx != eni_dev->ubr) { close_tx() 1422 while (eni_in(MID_TX_RDPTR(eni_vcc->tx->index)) != close_tx() 1423 eni_in(MID_TX_DESCRSTART(eni_vcc->tx->index))) close_tx() 1425 eni_free_mem(eni_dev,eni_vcc->tx->send,eni_vcc->tx->words << 2); close_tx() 1426 eni_vcc->tx->send = NULL; close_tx() 1427 eni_dev->tx_bw += eni_vcc->tx->reserved; close_tx() 1429 eni_vcc->tx = NULL; close_tx() 1447 eni_dev->tx[i].send = NULL; start_tx() 1448 eni_dev->tx[i].index = i; start_tx() 1839 DPRINTK("vci 0x%lx,rx 0x%lx, tx 0x%lx,srv 0x%lx,buf 0x%lx\n", eni_start() 1930 eni_vcc->tx = NULL; /* for eni_close after open_rx */ eni_open() 1958 struct eni_tx *tx = ENI_VCC(vcc)->tx; eni_change_qos() local 1963 if (tx == eni_dev->ubr) return -EBADFD; eni_change_qos() 1967 if ((flgs & ATM_MF_DEC_RSV) && rate && rate < tx->reserved) rsv = 1; eni_change_qos() 1968 if ((flgs & ATM_MF_INC_RSV) && (!rate || rate > tx->reserved)) rsv = 1; eni_change_qos() 1969 if ((flgs & ATM_MF_DEC_SHP) && rate && rate < tx->shaping) shp = 1; eni_change_qos() 1970 if ((flgs & ATM_MF_INC_SHP) && (!rate || rate > tx->shaping)) shp = 1; eni_change_qos() 1984 dsc = tx->send+ENI_PRV_POS(skb)*4; eni_change_qos() 1986 (tx->prescaler << MID_SEG_PR_SHIFT) | eni_change_qos() 1987 (tx->resolution << MID_SEG_RATE_SHIFT), dsc); eni_change_qos() 2012 if ((mult.tx && mult.tx <= 100) || (mult.rx &&mult.rx <= 100) || eni_ioctl() 2013 mult.tx > 65536 || mult.rx > 65536) eni_ioctl() 2015 if (mult.tx) eni_dev->tx_mult = mult.tx; eni_ioctl() 2053 if (!ENI_VCC(vcc)->tx) { eni_send() 2077 skb_queue_tail(&ENI_VCC(vcc)->tx->backlog,skb); eni_send() 2154 return sprintf(page,"%4sBuffer multipliers: tx %d%%, rx %d%%\n", eni_proc_read() 2157 struct eni_tx *tx = eni_dev->tx+i; eni_proc_read() local 2159 if (!tx->send) continue; eni_proc_read() 2161 return sprintf(page, "tx[%d]: 0x%lx-0x%lx " eni_proc_read() 2163 (unsigned long) (tx->send - eni_dev->ram), eni_proc_read() 2164 tx->send-eni_dev->ram+tx->words*4-1,tx->words*4, eni_proc_read() 2165 tx->reserved,tx->shaping, eni_proc_read() 2166 tx == eni_dev->ubr ? " (UBR)" : ""); eni_proc_read() 2170 skb_queue_len(&tx->backlog)); eni_proc_read() 2192 if (eni_vcc->tx) length += sprintf(page+length,", "); sk_for_each() 2194 if (eni_vcc->tx) sk_for_each() 2195 length += sprintf(page+length,"tx[%d], txing %d bytes", sk_for_each() 2196 eni_vcc->tx->index,eni_vcc->txing); sk_for_each()
|
H A D | lanai.c | 32 * o AAL0 is stubbed in but the actual rx/tx path isn't written yet: 245 } tx; member in struct:lanai_vcc 284 DECLARE_BITMAP(backlog_vccs, NUM_VCI); /* VCCs with tx backlog */ 723 dma_addr_t dmaaddr = lvcc->tx.buf.dmaaddr; host_vcc_start_tx() 731 (lvcc->tx.atmvcc->qos.txtp.traffic_class == ATM_CBR) ? host_vcc_start_tx() 736 TXADDR1_SET_SIZE(lanai_buf_size_cardorder(&lvcc->tx.buf)), host_vcc_start_tx() 777 while ((skb = skb_dequeue(&lvcc->tx.backlog)) != NULL) lanai_shutdown_tx_vci() 778 lanai_free_skb(lvcc->tx.atmvcc, skb); lanai_shutdown_tx_vci() 788 (((lanai_buf_size(&lvcc->tx.buf) / 1024) * HZ) >> 7); lanai_shutdown_tx_vci() 793 (lvcc->tx.atmvcc->qos.txtp.traffic_class != ATM_CBR || lanai_shutdown_tx_vci() 804 lvcc->tx.atmvcc->dev->number, lvcc->vci); lanai_shutdown_tx_vci() 810 /* 15.2.2 - clear out all tx registers */ lanai_shutdown_tx_vci() 1137 /* space left in tx buffer in bytes */ vcc_tx_space() 1142 r -= ((unsigned long) lvcc->tx.buf.ptr) - vcc_tx_space() 1143 ((unsigned long) lvcc->tx.buf.start); vcc_tx_space() 1146 r += lanai_buf_size(&lvcc->tx.buf); vcc_tx_space() 1153 return !skb_queue_empty(&lvcc->tx.backlog); vcc_is_backlogged() 1167 APRINTK((((unsigned long) lvcc->tx.buf.ptr) & 15) == 0, vcc_tx_add_aal5_descriptor() 1168 "vcc_tx_add_aal5_descriptor: bad ptr=%p\n", lvcc->tx.buf.ptr); vcc_tx_add_aal5_descriptor() 1169 lvcc->tx.buf.ptr += 4; /* Hope the values REALLY don't matter */ vcc_tx_add_aal5_descriptor() 1170 pos = ((unsigned char *) lvcc->tx.buf.ptr) - vcc_tx_add_aal5_descriptor() 1171 (unsigned char *) lvcc->tx.buf.start; vcc_tx_add_aal5_descriptor() 1175 lvcc->tx.buf.start, lvcc->tx.buf.ptr, lvcc->tx.buf.end); vcc_tx_add_aal5_descriptor() 1176 pos = (pos + len) & (lanai_buf_size(&lvcc->tx.buf) - 1); vcc_tx_add_aal5_descriptor() 1180 lvcc->tx.buf.start, lvcc->tx.buf.ptr, lvcc->tx.buf.end); vcc_tx_add_aal5_descriptor() 1181 lvcc->tx.buf.ptr[-1] = vcc_tx_add_aal5_descriptor() 1183 ((lvcc->tx.atmvcc->atm_options & ATM_ATMOPT_CLP) ? vcc_tx_add_aal5_descriptor() 1185 if (lvcc->tx.buf.ptr >= lvcc->tx.buf.end) vcc_tx_add_aal5_descriptor() 1186 lvcc->tx.buf.ptr = lvcc->tx.buf.start; vcc_tx_add_aal5_descriptor() 1193 APRINTK((((unsigned long) lvcc->tx.buf.ptr) & 15) == 8, vcc_tx_add_aal5_trailer() 1194 "vcc_tx_add_aal5_trailer: bad ptr=%p\n", lvcc->tx.buf.ptr); vcc_tx_add_aal5_trailer() 1195 lvcc->tx.buf.ptr += 2; vcc_tx_add_aal5_trailer() 1196 lvcc->tx.buf.ptr[-2] = cpu_to_be32((uu << 24) | (cpi << 16) | len); vcc_tx_add_aal5_trailer() 1197 if (lvcc->tx.buf.ptr >= lvcc->tx.buf.end) vcc_tx_add_aal5_trailer() 1198 lvcc->tx.buf.ptr = lvcc->tx.buf.start; vcc_tx_add_aal5_trailer() 1206 e = ((unsigned char *) lvcc->tx.buf.ptr) + n; vcc_tx_memcpy() 1207 m = e - (unsigned char *) lvcc->tx.buf.end; vcc_tx_memcpy() 1210 memcpy(lvcc->tx.buf.ptr, src, n - m); vcc_tx_memcpy() 1212 memcpy(lvcc->tx.buf.start, src + n - m, m); vcc_tx_memcpy() 1213 e = ((unsigned char *) lvcc->tx.buf.start) + m; vcc_tx_memcpy() 1215 lvcc->tx.buf.ptr = (u32 *) e; vcc_tx_memcpy() 1224 e = ((unsigned char *) lvcc->tx.buf.ptr) + n; vcc_tx_memzero() 1225 m = e - (unsigned char *) lvcc->tx.buf.end; vcc_tx_memzero() 1228 memset(lvcc->tx.buf.ptr, 0, n - m); vcc_tx_memzero() 1230 memset(lvcc->tx.buf.start, 0, m); vcc_tx_memzero() 1231 e = ((unsigned char *) lvcc->tx.buf.start) + m; vcc_tx_memzero() 1233 lvcc->tx.buf.ptr = (u32 *) e; vcc_tx_memzero() 1240 int i, ptr = ((unsigned char *) lvcc->tx.buf.ptr) - lanai_endtx() 1241 (unsigned char *) lvcc->tx.buf.start; lanai_endtx() 1244 ptr, lvcc->vci, lvcc->tx.buf.start, lvcc->tx.buf.ptr, lanai_endtx() 1245 lvcc->tx.buf.end); lanai_endtx() 1297 lanai_free_skb(lvcc->tx.atmvcc, skb); lanai_send_one_aal5() 1298 atomic_inc(&lvcc->tx.atmvcc->stats->tx); lanai_send_one_aal5() 1312 skb = skb_dequeue(&lvcc->tx.backlog); vcc_tx_unqueue_aal5() 1318 skb_queue_head(&lvcc->tx.backlog, skb); vcc_tx_unqueue_aal5() 1344 skb_queue_tail(&lvcc->tx.backlog, skb); vcc_tx_aal5() 1361 /* Remember to increment lvcc->tx.atmvcc->stats->tx */ vcc_tx_aal0() 1362 lanai_free_skb(lvcc->tx.atmvcc, skb); vcc_tx_aal0() 1474 skb_queue_head_init(&lvcc->tx.backlog); new_lanai_vcc() 1516 lvcc->tx.unqueue = vcc_tx_unqueue_aal0; lanai_setup_tx_vci() 1520 lvcc->tx.unqueue = vcc_tx_unqueue_aal5; lanai_setup_tx_vci() 1524 return lanai_get_sized_buffer(lanai, &lvcc->tx.buf, max_sdu, lanai_setup_tx_vci() 1638 if (unlikely(lvcc->tx.atmvcc == NULL)) { handle_service() 1646 lvcc->tx.endptr = SERVICE_GET_END(s); handle_service() 1707 lvcc->tx.unqueue(lanai, lvcc, lvcc->tx.endptr); iter_transmit() 1754 if (lvcc == NULL || lvcc->tx.atmvcc == NULL || iter_dequeue() 1760 lvcc->tx.unqueue(lanai, lvcc, endptr); iter_dequeue() 2005 lvcc->tx.atmvcc != NULL && lvcc->tx.atmvcc != atmvcc) vci_is_ok() 2293 if (lvcc->tx.atmvcc == atmvcc) { lanai_close() 2300 lanai_buf_deallocate(&lvcc->tx.buf, lanai->pci); lanai_close() 2301 lvcc->tx.atmvcc = NULL; lanai_close() 2361 APRINTK(lvcc->tx.atmvcc == NULL, "tx.atmvcc!=NULL, vci=%d\n", lanai_open() 2366 lvcc->tx.atmvcc = atmvcc; lanai_open() 2381 if (atmvcc == lvcc->tx.atmvcc) { lanai_open() 2400 lvcc->tx.atmvcc != atmvcc)) lanai_send() 2524 if (lvcc->tx.atmvcc != NULL) lanai_proc_read() 2527 lvcc->tx.atmvcc->qos.aal == ATM_AAL5 ? 5 : 0, lanai_proc_read() 2528 lanai_buf_size(&lvcc->tx.buf), lanai_proc_read() 2529 lvcc->tx.atmvcc == lanai->cbrvcc ? 'C' : 'U', lanai_proc_read()
|
/linux-4.1.27/drivers/iio/imu/ |
H A D | adis.c | 38 .tx_buf = adis->tx, adis_write_reg() 44 .tx_buf = adis->tx + 2, adis_write_reg() 50 .tx_buf = adis->tx + 4, adis_write_reg() 56 .tx_buf = adis->tx + 6, adis_write_reg() 61 .tx_buf = adis->tx + 8, adis_write_reg() 73 adis->tx[0] = ADIS_WRITE_REG(ADIS_REG_PAGE_ID); adis_write_reg() 74 adis->tx[1] = page; adis_write_reg() 80 adis->tx[8] = ADIS_WRITE_REG(reg + 3); adis_write_reg() 81 adis->tx[9] = (value >> 24) & 0xff; adis_write_reg() 82 adis->tx[6] = ADIS_WRITE_REG(reg + 2); adis_write_reg() 83 adis->tx[7] = (value >> 16) & 0xff; adis_write_reg() 85 adis->tx[4] = ADIS_WRITE_REG(reg + 1); adis_write_reg() 86 adis->tx[5] = (value >> 8) & 0xff; adis_write_reg() 88 adis->tx[2] = ADIS_WRITE_REG(reg); adis_write_reg() 89 adis->tx[3] = value & 0xff; adis_write_reg() 130 .tx_buf = adis->tx, adis_read_reg() 136 .tx_buf = adis->tx + 2, adis_read_reg() 142 .tx_buf = adis->tx + 4, adis_read_reg() 160 adis->tx[0] = ADIS_WRITE_REG(ADIS_REG_PAGE_ID); adis_read_reg() 161 adis->tx[1] = page; adis_read_reg() 167 adis->tx[2] = ADIS_READ_REG(reg + 2); adis_read_reg() 168 adis->tx[3] = 0; adis_read_reg() 171 adis->tx[4] = ADIS_READ_REG(reg); adis_read_reg() 172 adis->tx[5] = 0; adis_read_reg()
|
H A D | adis_buffer.c | 30 __be16 *tx, *rx; adis_update_scan_mode() local 46 tx = rx + scan_count; adis_update_scan_mode() 57 adis->xfer[j].tx_buf = &tx[j]; adis_update_scan_mode() 68 *tx++ = cpu_to_be16((chan->address + 2) << 8); adis_update_scan_mode() 69 *tx++ = cpu_to_be16(chan->address << 8); adis_update_scan_mode() 89 adis->tx[0] = ADIS_WRITE_REG(ADIS_REG_PAGE_ID); adis_trigger_handler() 90 adis->tx[1] = 0; adis_trigger_handler() 91 spi_write(adis->spi, adis->tx, 2); adis_trigger_handler()
|
H A D | adis16400_buffer.c | 22 u8 *tx; adis16400_update_scan_mode() local 43 tx = adis->buffer + burst_length; adis16400_update_scan_mode() 44 tx[0] = ADIS_READ_REG(ADIS16400_GLOB_CMD); adis16400_update_scan_mode() 45 tx[1] = 0; adis16400_update_scan_mode() 47 adis->xfer[0].tx_buf = tx; adis16400_update_scan_mode()
|
/linux-4.1.27/drivers/net/wireless/ath/ath9k/ |
H A D | htc_drv_txrx.c | 58 spin_lock_bh(&priv->tx.tx_lock); ath9k_htc_check_stop_queues() 59 priv->tx.queued_cnt++; ath9k_htc_check_stop_queues() 60 if ((priv->tx.queued_cnt >= ATH9K_HTC_TX_THRESHOLD) && ath9k_htc_check_stop_queues() 61 !(priv->tx.flags & ATH9K_HTC_OP_TX_QUEUES_STOP)) { ath9k_htc_check_stop_queues() 62 priv->tx.flags |= ATH9K_HTC_OP_TX_QUEUES_STOP; ath9k_htc_check_stop_queues() 65 spin_unlock_bh(&priv->tx.tx_lock); ath9k_htc_check_stop_queues() 70 spin_lock_bh(&priv->tx.tx_lock); ath9k_htc_check_wake_queues() 71 if ((priv->tx.queued_cnt < ATH9K_HTC_TX_THRESHOLD) && ath9k_htc_check_wake_queues() 72 (priv->tx.flags & ATH9K_HTC_OP_TX_QUEUES_STOP)) { ath9k_htc_check_wake_queues() 73 priv->tx.flags &= ~ATH9K_HTC_OP_TX_QUEUES_STOP; ath9k_htc_check_wake_queues() 76 spin_unlock_bh(&priv->tx.tx_lock); ath9k_htc_check_wake_queues() 83 spin_lock_bh(&priv->tx.tx_lock); ath9k_htc_tx_get_slot() 84 slot = find_first_zero_bit(priv->tx.tx_slot, MAX_TX_BUF_NUM); ath9k_htc_tx_get_slot() 86 spin_unlock_bh(&priv->tx.tx_lock); ath9k_htc_tx_get_slot() 89 __set_bit(slot, priv->tx.tx_slot); ath9k_htc_tx_get_slot() 90 spin_unlock_bh(&priv->tx.tx_lock); ath9k_htc_tx_get_slot() 97 spin_lock_bh(&priv->tx.tx_lock); ath9k_htc_tx_clear_slot() 98 __clear_bit(slot, priv->tx.tx_slot); ath9k_htc_tx_clear_slot() 99 spin_unlock_bh(&priv->tx.tx_lock); ath9k_htc_tx_clear_slot() 137 epid_queue = &priv->tx.mgmt_ep_queue; get_htc_epid_queue() 139 epid_queue = &priv->tx.cab_ep_queue; get_htc_epid_queue() 141 epid_queue = &priv->tx.data_be_queue; get_htc_epid_queue() 143 epid_queue = &priv->tx.data_bk_queue; get_htc_epid_queue() 145 epid_queue = &priv->tx.data_vi_queue; get_htc_epid_queue() 147 epid_queue = &priv->tx.data_vo_queue; get_htc_epid_queue() 392 spin_lock_bh(&priv->tx.tx_lock); __ath9k_htc_check_tx_aggr() 395 spin_unlock_bh(&priv->tx.tx_lock); __ath9k_htc_check_tx_aggr() 430 spin_lock_bh(&priv->tx.tx_lock); ath9k_htc_check_tx_aggr() 432 spin_unlock_bh(&priv->tx.tx_lock); ath9k_htc_check_tx_aggr() 504 spin_lock_bh(&priv->tx.tx_lock); ath9k_htc_tx_process() 505 if (WARN_ON(--priv->tx.queued_cnt < 0)) ath9k_htc_tx_process() 506 priv->tx.queued_cnt = 0; ath9k_htc_tx_process() 507 spin_unlock_bh(&priv->tx.tx_lock); ath9k_htc_tx_process() 538 spin_lock_bh(&priv->tx.tx_lock); ath9k_htc_tx_drain() 539 priv->tx.flags |= ATH9K_HTC_OP_TX_DRAIN; ath9k_htc_tx_drain() 540 spin_unlock_bh(&priv->tx.tx_lock); ath9k_htc_tx_drain() 550 ath9k_htc_tx_drainq(priv, &priv->tx.mgmt_ep_queue); ath9k_htc_tx_drain() 551 ath9k_htc_tx_drainq(priv, &priv->tx.cab_ep_queue); ath9k_htc_tx_drain() 552 ath9k_htc_tx_drainq(priv, &priv->tx.data_be_queue); ath9k_htc_tx_drain() 553 ath9k_htc_tx_drainq(priv, &priv->tx.data_bk_queue); ath9k_htc_tx_drain() 554 ath9k_htc_tx_drainq(priv, &priv->tx.data_vi_queue); ath9k_htc_tx_drain() 555 ath9k_htc_tx_drainq(priv, &priv->tx.data_vo_queue); ath9k_htc_tx_drain() 556 ath9k_htc_tx_drainq(priv, &priv->tx.tx_failed); ath9k_htc_tx_drain() 568 spin_lock_bh(&priv->tx.tx_lock); ath9k_htc_tx_drain() 569 priv->tx.flags &= ~ATH9K_HTC_OP_TX_DRAIN; ath9k_htc_tx_drain() 570 spin_unlock_bh(&priv->tx.tx_lock); ath9k_htc_tx_drain() 577 spin_lock_bh(&priv->tx.tx_lock); ath9k_tx_failed_tasklet() 578 if (priv->tx.flags & ATH9K_HTC_OP_TX_DRAIN) { ath9k_tx_failed_tasklet() 579 spin_unlock_bh(&priv->tx.tx_lock); ath9k_tx_failed_tasklet() 582 spin_unlock_bh(&priv->tx.tx_lock); ath9k_tx_failed_tasklet() 584 ath9k_htc_tx_drainq(priv, &priv->tx.tx_failed); ath9k_tx_failed_tasklet() 696 skb_queue_tail(&priv->tx.tx_failed, skb); ath9k_htc_txep() 788 ath9k_htc_tx_cleanup_queue(priv, &priv->tx.mgmt_ep_queue); ath9k_htc_tx_cleanup_timer() 789 ath9k_htc_tx_cleanup_queue(priv, &priv->tx.cab_ep_queue); ath9k_htc_tx_cleanup_timer() 790 ath9k_htc_tx_cleanup_queue(priv, &priv->tx.data_be_queue); ath9k_htc_tx_cleanup_timer() 791 ath9k_htc_tx_cleanup_queue(priv, &priv->tx.data_bk_queue); ath9k_htc_tx_cleanup_timer() 792 ath9k_htc_tx_cleanup_queue(priv, &priv->tx.data_vi_queue); ath9k_htc_tx_cleanup_timer() 793 ath9k_htc_tx_cleanup_queue(priv, &priv->tx.data_vo_queue); ath9k_htc_tx_cleanup_timer() 798 mod_timer(&priv->tx.cleanup_timer, ath9k_htc_tx_cleanup_timer() 804 skb_queue_head_init(&priv->tx.mgmt_ep_queue); ath9k_tx_init() 805 skb_queue_head_init(&priv->tx.cab_ep_queue); ath9k_tx_init() 806 skb_queue_head_init(&priv->tx.data_be_queue); ath9k_tx_init() 807 skb_queue_head_init(&priv->tx.data_bk_queue); ath9k_tx_init() 808 skb_queue_head_init(&priv->tx.data_vi_queue); ath9k_tx_init() 809 skb_queue_head_init(&priv->tx.data_vo_queue); ath9k_tx_init() 810 skb_queue_head_init(&priv->tx.tx_failed); ath9k_tx_init()
|
H A D | hif_usb.c | 161 spin_lock(&hif_dev->tx.tx_lock); hif_usb_mgmt_cb() 162 if (hif_dev->tx.flags & HIF_USB_TX_FLUSH) { hif_usb_mgmt_cb() 163 spin_unlock(&hif_dev->tx.tx_lock); hif_usb_mgmt_cb() 168 spin_unlock(&hif_dev->tx.tx_lock); hif_usb_mgmt_cb() 278 spin_lock(&hif_dev->tx.tx_lock); hif_usb_tx_cb() 279 if (hif_dev->tx.flags & HIF_USB_TX_FLUSH) { hif_usb_tx_cb() 280 spin_unlock(&hif_dev->tx.tx_lock); hif_usb_tx_cb() 284 spin_unlock(&hif_dev->tx.tx_lock); hif_usb_tx_cb() 299 spin_lock(&hif_dev->tx.tx_lock); hif_usb_tx_cb() 300 list_move_tail(&tx_buf->list, &hif_dev->tx.tx_buf); hif_usb_tx_cb() 301 hif_dev->tx.tx_buf_cnt++; hif_usb_tx_cb() 302 if (!(hif_dev->tx.flags & HIF_USB_TX_STOP)) hif_usb_tx_cb() 305 spin_unlock(&hif_dev->tx.tx_lock); hif_usb_tx_cb() 318 if (hif_dev->tx.tx_skb_cnt == 0) __hif_usb_tx() 322 if (list_empty(&hif_dev->tx.tx_buf)) __hif_usb_tx() 325 tx_buf = list_first_entry(&hif_dev->tx.tx_buf, struct tx_buf, list); __hif_usb_tx() 326 list_move_tail(&tx_buf->list, &hif_dev->tx.tx_pending); __hif_usb_tx() 327 hif_dev->tx.tx_buf_cnt--; __hif_usb_tx() 329 tx_skb_cnt = min_t(u16, hif_dev->tx.tx_skb_cnt, MAX_TX_AGGR_NUM); __hif_usb_tx() 332 nskb = __skb_dequeue(&hif_dev->tx.tx_skb_queue); __hif_usb_tx() 337 hif_dev->tx.tx_skb_cnt--; __hif_usb_tx() 368 list_move_tail(&tx_buf->list, &hif_dev->tx.tx_buf); __hif_usb_tx() 369 hif_dev->tx.tx_buf_cnt++; __hif_usb_tx() 384 spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); hif_usb_send_tx() 386 if (hif_dev->tx.flags & HIF_USB_TX_STOP) { hif_usb_send_tx() 387 spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); hif_usb_send_tx() 392 if (hif_dev->tx.tx_skb_cnt > MAX_TX_BUF_NUM) { hif_usb_send_tx() 393 spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); hif_usb_send_tx() 397 spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); hif_usb_send_tx() 407 spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); hif_usb_send_tx() 411 __skb_queue_tail(&hif_dev->tx.tx_skb_queue, skb); hif_usb_send_tx() 412 hif_dev->tx.tx_skb_cnt++; hif_usb_send_tx() 416 if ((hif_dev->tx.tx_buf_cnt == MAX_TX_URB_NUM) && hif_usb_send_tx() 417 (hif_dev->tx.tx_skb_cnt < 2)) { hif_usb_send_tx() 421 spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); hif_usb_send_tx() 433 spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); hif_usb_start() 434 hif_dev->tx.flags &= ~HIF_USB_TX_STOP; hif_usb_start() 435 spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); hif_usb_start() 444 spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); hif_usb_stop() 445 ath9k_skb_queue_complete(hif_dev, &hif_dev->tx.tx_skb_queue, false); hif_usb_stop() 446 hif_dev->tx.tx_skb_cnt = 0; hif_usb_stop() 447 hif_dev->tx.flags |= HIF_USB_TX_STOP; hif_usb_stop() 448 spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); hif_usb_stop() 452 &hif_dev->tx.tx_pending, list) { hif_usb_stop() 500 spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); hif_usb_sta_drain() 502 skb_queue_walk_safe(&hif_dev->tx.tx_skb_queue, skb, tmp) { hif_usb_sta_drain() 504 __skb_unlink(skb, &hif_dev->tx.tx_skb_queue); hif_usb_sta_drain() 507 hif_dev->tx.tx_skb_cnt--; hif_usb_sta_drain() 512 spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); hif_usb_sta_drain() 756 &hif_dev->tx.tx_buf, list) { ath9k_hif_usb_dealloc_tx_urbs() 764 spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); ath9k_hif_usb_dealloc_tx_urbs() 765 hif_dev->tx.flags |= HIF_USB_TX_FLUSH; ath9k_hif_usb_dealloc_tx_urbs() 766 spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); ath9k_hif_usb_dealloc_tx_urbs() 769 &hif_dev->tx.tx_pending, list) { ath9k_hif_usb_dealloc_tx_urbs() 785 INIT_LIST_HEAD(&hif_dev->tx.tx_buf); ath9k_hif_usb_alloc_tx_urbs() 786 INIT_LIST_HEAD(&hif_dev->tx.tx_pending); ath9k_hif_usb_alloc_tx_urbs() 787 spin_lock_init(&hif_dev->tx.tx_lock); ath9k_hif_usb_alloc_tx_urbs() 788 __skb_queue_head_init(&hif_dev->tx.tx_skb_queue); ath9k_hif_usb_alloc_tx_urbs() 807 list_add_tail(&tx_buf->list, &hif_dev->tx.tx_buf); ath9k_hif_usb_alloc_tx_urbs() 810 hif_dev->tx.tx_buf_cnt = MAX_TX_URB_NUM; ath9k_hif_usb_alloc_tx_urbs()
|
H A D | mac.h | 242 /* ATH9K_TXDESC_INTREQ forces a tx interrupt to be generated for 243 * the descriptor its marked on. We take a tx interrupt to reap 297 } tx; member in union:ar5416_desc::__anon7695 315 #define ds_ctl2 u.tx.ctl2 316 #define ds_ctl3 u.tx.ctl3 317 #define ds_ctl4 u.tx.ctl4 318 #define ds_ctl5 u.tx.ctl5 319 #define ds_ctl6 u.tx.ctl6 320 #define ds_ctl7 u.tx.ctl7 321 #define ds_ctl8 u.tx.ctl8 322 #define ds_ctl9 u.tx.ctl9 323 #define ds_ctl10 u.tx.ctl10 324 #define ds_ctl11 u.tx.ctl11 326 #define ds_txstatus0 u.tx.status0 327 #define ds_txstatus1 u.tx.status1 328 #define ds_txstatus2 u.tx.status2 329 #define ds_txstatus3 u.tx.status3 330 #define ds_txstatus4 u.tx.status4 331 #define ds_txstatus5 u.tx.status5 332 #define ds_txstatus6 u.tx.status6 333 #define ds_txstatus7 u.tx.status7 334 #define ds_txstatus8 u.tx.status8 335 #define ds_txstatus9 u.tx.status9
|
H A D | htc_drv_debug.c | 296 spin_lock_bh(&priv->tx.tx_lock); read_file_slot() 300 MAX_TX_BUF_NUM, priv->tx.tx_slot, read_file_slot() 301 bitmap_weight(priv->tx.tx_slot, MAX_TX_BUF_NUM)); read_file_slot() 302 spin_unlock_bh(&priv->tx.tx_lock); read_file_slot() 321 "Mgmt endpoint", skb_queue_len(&priv->tx.mgmt_ep_queue)); read_file_queue() 324 "Cab endpoint", skb_queue_len(&priv->tx.cab_ep_queue)); read_file_queue() 327 "Data BE endpoint", skb_queue_len(&priv->tx.data_be_queue)); read_file_queue() 330 "Data BK endpoint", skb_queue_len(&priv->tx.data_bk_queue)); read_file_queue() 333 "Data VI endpoint", skb_queue_len(&priv->tx.data_vi_queue)); read_file_queue() 336 "Data VO endpoint", skb_queue_len(&priv->tx.data_vo_queue)); read_file_queue() 339 "Failed queue", skb_queue_len(&priv->tx.tx_failed)); read_file_queue() 341 spin_lock_bh(&priv->tx.tx_lock); read_file_queue() 343 "Queued count", priv->tx.queued_cnt); read_file_queue() 344 spin_unlock_bh(&priv->tx.tx_lock); read_file_queue()
|
H A D | xmit.c | 166 txq = sc->tx.txq_map[q]; ath_txq_skb_done() 171 txq->pending_frames < sc->tx.txq_max_pending[q]) { ath_txq_skb_done() 365 spin_lock_bh(&sc->tx.txbuflock); ath_tx_get_buffer() 367 if (unlikely(list_empty(&sc->tx.txbuf))) { ath_tx_get_buffer() 368 spin_unlock_bh(&sc->tx.txbuflock); ath_tx_get_buffer() 372 bf = list_first_entry(&sc->tx.txbuf, struct ath_buf, list); ath_tx_get_buffer() 375 spin_unlock_bh(&sc->tx.txbuflock); ath_tx_get_buffer() 382 spin_lock_bh(&sc->tx.txbuflock); ath_tx_return_buffer() 383 list_add_tail(&bf->list, &sc->tx.txbuf); ath_tx_return_buffer() 384 spin_unlock_bh(&sc->tx.txbuflock); ath_tx_return_buffer() 500 * The hardware occasionally sends a tx status for the wrong TID. ath_tx_complete_aggr() 610 * Update tx baw and complete the ath_tx_complete_aggr() 612 * run out of tx buf. ath_tx_complete_aggr() 769 frmlen = sc->tx.max_aggr_framelen[q][modeidx][rates[i].idx]; ath_lookup_rate() 1087 cur_ht20 = sc->tx.max_aggr_framelen[queue][MCS_HT20]; ath_update_max_aggr_framelen() 1088 cur_ht20_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT20_SGI]; ath_update_max_aggr_framelen() 1089 cur_ht40 = sc->tx.max_aggr_framelen[queue][MCS_HT40]; ath_update_max_aggr_framelen() 1090 cur_ht40_sgi = sc->tx.max_aggr_framelen[queue][MCS_HT40_SGI]; ath_update_max_aggr_framelen() 1342 txq == sc->tx.uapsdq) ath_tx_fill_desc() 1631 struct ath_txq *txq = sc->tx.uapsdq; ath9k_release_buffered_frames() 1650 bf = ath_tx_get_tid_subframe(sc, sc->tx.uapsdq, tid, &tid_q); ath9k_release_buffered_frames() 1713 * We mark tx descriptors to receive a DESC interrupt ath_txq_setup() 1714 * when a tx queue gets deep; otherwise waiting for the ath_txq_setup() 1719 * The only potential downside is if the tx queue backs ath_txq_setup() 1721 * due to a lack of tx descriptors. ath_txq_setup() 1739 * normally on parts with too few tx queues ath_txq_setup() 1744 struct ath_txq *txq = &sc->tx.txq[axq_qnum]; ath_txq_setup() 1755 sc->tx.txqsetup |= 1<<axq_qnum; ath_txq_setup() 1761 return &sc->tx.txq[axq_qnum]; ath_txq_setup() 1771 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum); ath_txq_update() 1879 if (!sc->tx.txq[i].axq_depth) ath_drain_all_txq() 1882 if (ath9k_hw_numtxpending(ah, sc->tx.txq[i].axq_qnum)) ath_drain_all_txq() 1898 txq = &sc->tx.txq[i]; ath_drain_all_txq() 1909 sc->tx.txqsetup &= ~(1<<txq->axq_qnum); ath_tx_cleanupq() 2000 txq = sc->tx.txq_map[i]; ath_txq_schedule_all() 2335 * At this point, the vif, hw_key and sta pointers in the tx control ath_tx_start() 2342 if (txq == sc->tx.txq_map[q]) { ath_tx_start() 2344 if (++txq->pending_frames > sc->tx.txq_max_pending[q] && ath_tx_start() 2373 txq = sc->tx.uapsdq; ath_tx_start() 2578 spin_lock_irqsave(&sc->tx.txbuflock, flags); ath_tx_complete_buf() 2579 list_splice_tail_init(bf_q, &sc->tx.txbuf); ath_tx_complete_buf() 2580 spin_unlock_irqrestore(&sc->tx.txbuflock, flags); ath_tx_complete_buf() 2648 ath_dbg(common, QUEUE, "tx queue %d (%x), link %p\n", ath_tx_processq() 2721 ath_tx_processq(sc, &sc->tx.txq[i]); ath_tx_tasklet() 2744 ath_dbg(common, XMIT, "Error processing tx status\n"); ath_tx_edma_tasklet() 2762 txq = &sc->tx.txq[ts.qid]; ath_tx_edma_tasklet() 2844 spin_lock_init(&sc->tx.txbuflock); ath_tx_init() 2846 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf, ath_tx_init() 2847 "tx", nbufs, 1, 1); ath_tx_init() 2850 "Failed to allocate tx descriptors: %d\n", error); ath_tx_init() 2896 ac->txq = sc->tx.txq_map[acno]; ath_tx_node_init()
|
H A D | wmi.c | 170 spin_lock_bh(&priv->tx.tx_lock); ath9k_wmi_event_tasklet() 171 if (priv->tx.flags & ATH9K_HTC_OP_TX_DRAIN) { ath9k_wmi_event_tasklet() 172 spin_unlock_bh(&priv->tx.tx_lock); ath9k_wmi_event_tasklet() 175 spin_unlock_bh(&priv->tx.tx_lock); ath9k_wmi_event_tasklet() 260 connect.ep_callbacks.tx = ath9k_wmi_ctrl_tx; ath9k_wmi_connect()
|
/linux-4.1.27/drivers/media/platform/vivid/ |
H A D | Makefile | 3 vivid-radio-rx.o vivid-radio-tx.o vivid-radio-common.o \
|
/linux-4.1.27/arch/mips/include/asm/mach-bcm63xx/ |
H A D | bcm63xx_iudma.h | 7 * rx/tx dma descriptor
|
/linux-4.1.27/include/uapi/linux/ |
H A D | atm_eni.h | 14 int tx,rx; /* values are in percent and must be > 100 */ member in struct:eni_multipliers
|
H A D | tipc_config.h | 74 #define TIPC_CMD_NOOP 0x0000 /* tx none, rx none */ 75 #define TIPC_CMD_GET_NODES 0x0001 /* tx net_addr, rx node_info(s) */ 76 #define TIPC_CMD_GET_MEDIA_NAMES 0x0002 /* tx none, rx media_name(s) */ 77 #define TIPC_CMD_GET_BEARER_NAMES 0x0003 /* tx none, rx bearer_name(s) */ 78 #define TIPC_CMD_GET_LINKS 0x0004 /* tx net_addr, rx link_info(s) */ 79 #define TIPC_CMD_SHOW_NAME_TABLE 0x0005 /* tx name_tbl_query, rx ultra_string */ 80 #define TIPC_CMD_SHOW_PORTS 0x0006 /* tx none, rx ultra_string */ 81 #define TIPC_CMD_SHOW_LINK_STATS 0x000B /* tx link_name, rx ultra_string */ 82 #define TIPC_CMD_SHOW_STATS 0x000F /* tx unsigned, rx ultra_string */ 91 #define TIPC_CMD_GET_REMOTE_MNG 0x4003 /* tx none, rx unsigned */ 92 #define TIPC_CMD_GET_MAX_PORTS 0x4004 /* tx none, rx unsigned */ 99 #define TIPC_CMD_GET_NETID 0x400B /* tx none, rx unsigned */ 101 #define TIPC_CMD_ENABLE_BEARER 0x4101 /* tx bearer_config, rx none */ 102 #define TIPC_CMD_DISABLE_BEARER 0x4102 /* tx bearer_name, rx none */ 103 #define TIPC_CMD_SET_LINK_TOL 0x4107 /* tx link_config, rx none */ 104 #define TIPC_CMD_SET_LINK_PRI 0x4108 /* tx link_config, rx none */ 105 #define TIPC_CMD_SET_LINK_WINDOW 0x4109 /* tx link_config, rx none */ 108 #define TIPC_CMD_RESET_LINK_STATS 0x410C /* tx link_name, rx none */ 116 #define TIPC_CMD_SET_NODE_ADDR 0x8001 /* tx net_addr, rx none */ 117 #define TIPC_CMD_SET_REMOTE_MNG 0x8003 /* tx unsigned, rx none */ 118 #define TIPC_CMD_SET_MAX_PORTS 0x8004 /* tx unsigned, rx none */ 125 #define TIPC_CMD_SET_NETID 0x800B /* tx unsigned, rx none */ 133 #define TIPC_CMD_NOT_NET_ADMIN 0xC001 /* tx none, rx none */
|
/linux-4.1.27/arch/arm/mach-omap1/ |
H A D | mcbsp.c | 110 .name = "tx", 120 .name = "tx", 137 .name = "tx", 147 .name = "tx", 187 .name = "tx", 197 .name = "tx", 214 .name = "tx", 224 .name = "tx", 241 .name = "tx", 251 .name = "tx", 294 .name = "tx", 304 .name = "tx", 321 .name = "tx", 331 .name = "tx", 348 .name = "tx", 358 .name = "tx",
|
/linux-4.1.27/drivers/rtc/ |
H A D | rtc-mcp795.c | 46 u8 tx[2]; mcp795_rtcc_read() local 48 tx[0] = MCP795_READ; mcp795_rtcc_read() 49 tx[1] = addr; mcp795_rtcc_read() 50 ret = spi_write_then_read(spi, tx, sizeof(tx), buf, count); mcp795_rtcc_read() 63 u8 tx[2 + count]; mcp795_rtcc_write() local 65 tx[0] = MCP795_WRITE; mcp795_rtcc_write() 66 tx[1] = addr; mcp795_rtcc_write() 67 memcpy(&tx[2], data, count); mcp795_rtcc_write() 69 ret = spi_write(spi, tx, 2 + count); mcp795_rtcc_write()
|
/linux-4.1.27/drivers/media/radio/wl128x/ |
H A D | fmdrv_tx.c | 224 fmdbg("tx: mute mode %d\n", mute_mode_toset); fm_tx_set_mute_mode() 238 struct fmtx_data *tx = &fmdev->tx_data; set_audio_io() local 243 payload = tx->audio_io; set_audio_io() 256 struct fmtx_data *tx = &fmdev->tx_data; enable_xmit() local 286 tx->xmit_state = new_xmit_state; enable_xmit() 295 struct fmtx_data *tx = &fmdev->tx_data; fm_tx_set_pwr_lvl() local 300 fmdbg("tx: pwr_level_to_set %ld\n", (long int)new_pwr_lvl); fm_tx_set_pwr_lvl() 304 tx->pwr_lvl = new_pwr_lvl; fm_tx_set_pwr_lvl() 322 tx->pwr_lvl = new_pwr_lvl; fm_tx_set_pwr_lvl() 333 struct fmtx_data *tx = &fmdev->tx_data; fm_tx_set_preemph_filter() local 357 tx->preemph = payload; fm_tx_set_preemph_filter() 385 struct fmtx_data *tx = &fmdev->tx_data; fm_tx_set_freq() local 401 tx->tx_frq = (unsigned long)freq_to_set; fm_tx_set_freq() 402 fmdbg("tx: freq_to_set %ld\n", (long int)tx->tx_frq); fm_tx_set_freq() 413 fm_tx_set_pwr_lvl(fmdev, tx->pwr_lvl); fm_tx_set_freq() 414 fm_tx_set_preemph_filter(fmdev, tx->preemph); fm_tx_set_freq() 416 tx->audio_io = 0x01; /* I2S */ fm_tx_set_freq() 421 tx->aud_mode = FM_STEREO_MODE; fm_tx_set_freq() 422 tx->rds.flag = FM_RDS_DISABLE; fm_tx_set_freq()
|
/linux-4.1.27/drivers/net/ethernet/freescale/ |
H A D | ucc_geth_ethtool.c | 41 "tx-64-frames", 42 "tx-65-127-frames", 43 "tx-128-255-frames", 47 "tx-bytes-ok", 48 "tx-pause-frames", 49 "tx-multicast-frames", 50 "tx-broadcast-frames", 62 "tx-single-collision", 63 "tx-multiple-collision", 64 "tx-late-collsion", 65 "tx-aborted-frames", 66 "tx-lost-frames", 67 "tx-carrier-sense-errors", 68 "tx-frames-ok", 69 "tx-excessive-differ-frames", 70 "tx-256-511-frames", 71 "tx-512-1023-frames", 72 "tx-1024-1518-frames", 73 "tx-jumbo-frames",
|
H A D | gianfar_ethtool.c | 73 "tx-babbling-errors", 74 "tx-underrun-errors", 76 "tx-timeout-errors", 77 "tx-rx-64-frames", 78 "tx-rx-65-127-frames", 79 "tx-rx-128-255-frames", 80 "tx-rx-256-511-frames", 81 "tx-rx-512-1023-frames", 82 "tx-rx-1024-1518-frames", 83 "tx-rx-1519-1522-good-vlan", 101 "tx-byte-counter", 102 "tx-packets", 103 "tx-multicast-packets", 104 "tx-broadcast-packets", 105 "tx-pause-control-frames", 106 "tx-deferral-packets", 107 "tx-excessive-deferral-packets", 108 "tx-single-collision-packets", 109 "tx-multiple-collision-packets", 110 "tx-late-collision-packets", 111 "tx-excessive-collision-packets", 112 "tx-total-collision", 114 "tx-dropped-frames", 115 "tx-jabber-frames", 116 "tx-fcs-errors", 117 "tx-control-frames", 118 "tx-oversize-frames", 119 "tx-undersize-frames", 120 "tx-fragmented-frames", 334 * normal {rx,tx}_* coalescing parameters are used. gfar_gcoalesce() 338 * is above pkt_rate_high, the {rx,tx}_*_high parameters are gfar_gcoalesce() 416 /* Set up tx coalescing */ gfar_scoalesce()
|
/linux-4.1.27/drivers/usb/musb/ |
H A D | tusb6010_omap.c | 39 u8 tx; member in struct:tusb_omap_dma_ch 81 if (chdat->tx) tusb_omap_use_shared_dmareq() 104 * See also musb_dma_completion in plat_uds.c and musb_g_[tx|rx]() in 131 chdat->epnum, chdat->tx ? "tx" : "rx", tusb_omap_dma_cb() 134 if (chdat->tx) tusb_omap_dma_cb() 144 chdat->tx ? "tx" : "rx", chdat->ch, tusb_omap_dma_cb() 160 if (chdat->tx) { tusb_omap_dma_cb() 184 if (!chdat->tx) tusb_omap_dma_cb() 185 musb_dma_completion(musb, chdat->epnum, chdat->tx); tusb_omap_dma_cb() 187 /* We must terminate short tx transfers manually by setting TXPKTRDY. tusb_omap_dma_cb() 195 if (chdat->tx) { tusb_omap_dma_cb() 196 dev_dbg(musb->controller, "terminating short tx packet\n"); tusb_omap_dma_cb() 244 if (chdat->tx) tusb_omap_dma_program() 252 chdat->tx ? "tx" : "rx", chdat->ch, tusb_omap_dma_program() 294 if (chdat->tx) tusb_omap_dma_program() 314 chdat->epnum, chdat->tx ? "tx" : "rx", tusb_omap_dma_program() 321 if (chdat->tx) { tusb_omap_dma_program() 358 chdat->epnum, chdat->tx ? "tx" : "rx", tusb_omap_dma_program() 371 if (chdat->tx) { tusb_omap_dma_program() 392 if (chdat->tx) { tusb_omap_dma_program() 458 if (chdat->tx) tusb_omap_dma_allocate_dmareq() 488 u8 tx) tusb_omap_dma_allocate() 504 if (tx) tusb_omap_dma_allocate() 512 dev_dbg(musb->controller, "Not allowing DMA for ep0 %s\n", tx ? "tx" : "rx"); tusb_omap_dma_allocate() 529 if (tx) { tusb_omap_dma_allocate() 530 chdat->tx = 1; tusb_omap_dma_allocate() 533 chdat->tx = 0; tusb_omap_dma_allocate() 574 chdat->tx ? "tx" : "rx", tusb_omap_dma_allocate() 601 if (chdat->tx) tusb_omap_dma_release() 608 if (chdat->tx) tusb_omap_dma_release() 486 tusb_omap_dma_allocate(struct dma_controller *c, struct musb_hw_ep *hw_ep, u8 tx) tusb_omap_dma_allocate() argument
|
H A D | cppi_dma.c | 32 * 004001ff 00000001 .. 8feff860) Host was just getting NAKed on tx 95 /* zero out entire tx state RAM entry for the channel */ cppi_reset_tx() 96 static void cppi_reset_tx(struct cppi_tx_stateram __iomem *tx, u32 ptr) cppi_reset_tx() argument 98 musb_writel(&tx->tx_head, 0, 0); cppi_reset_tx() 99 musb_writel(&tx->tx_buf, 0, 0); cppi_reset_tx() 100 musb_writel(&tx->tx_current, 0, 0); cppi_reset_tx() 101 musb_writel(&tx->tx_buf_current, 0, 0); cppi_reset_tx() 102 musb_writel(&tx->tx_info, 0, 0); cppi_reset_tx() 103 musb_writel(&tx->tx_rem_len, 0, 0); cppi_reset_tx() 104 /* musb_writel(&tx->tx_dummy, 0, 0); */ cppi_reset_tx() 105 musb_writel(&tx->tx_complete, 0, ptr); cppi_reset_tx() 159 for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { cppi_controller_start() 160 controller->tx[i].transmit = true; cppi_controller_start() 161 controller->tx[i].index = i; cppi_controller_start() 169 for (i = 0; i < ARRAY_SIZE(controller->tx); i++) cppi_controller_start() 170 cppi_pool_init(controller, controller->tx + i); cppi_controller_start() 177 /* initialise tx/rx channel head pointers to zero */ cppi_controller_start() 178 for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { cppi_controller_start() 179 struct cppi_channel *tx_ch = controller->tx + i; cppi_controller_start() 180 struct cppi_tx_stateram __iomem *tx; cppi_controller_start() local 184 tx = tibase + DAVINCI_TXCPPI_STATERAM_OFFSET(i); cppi_controller_start() 185 tx_ch->state_ram = tx; cppi_controller_start() 186 cppi_reset_tx(tx, 0); cppi_controller_start() 205 /* enable tx/rx CPPI control */ cppi_controller_start() 236 for (i = 0; i < ARRAY_SIZE(controller->tx); i++) { cppi_controller_stop() 238 controller->tx[i].last_processed = NULL; cppi_controller_stop() 239 cppi_pool_free(controller->tx + i); cppi_controller_stop() 248 /*disable tx/rx cppi */ cppi_controller_stop() 299 if (index >= ARRAY_SIZE(controller->tx)) { cppi_channel_allocate() 303 cppi_ch = controller->tx + index; cppi_channel_allocate() 384 struct cppi_tx_stateram __iomem *tx = c->state_ram; cppi_dump_tx() local 396 musb_readl(&tx->tx_head, 0), cppi_dump_tx() 397 musb_readl(&tx->tx_buf, 0), cppi_dump_tx() 398 musb_readl(&tx->tx_current, 0), cppi_dump_tx() 399 musb_readl(&tx->tx_buf_current, 0), cppi_dump_tx() 401 musb_readl(&tx->tx_info, 0), cppi_dump_tx() 402 musb_readl(&tx->tx_rem_len, 0), cppi_dump_tx() 404 musb_readl(&tx->tx_complete, 0) cppi_dump_tx() 561 cppi_next_tx_segment(struct musb *musb, struct cppi_channel *tx) cppi_next_tx_segment() argument 563 unsigned maxpacket = tx->maxpacket; cppi_next_tx_segment() 564 dma_addr_t addr = tx->buf_dma + tx->offset; cppi_next_tx_segment() 565 size_t length = tx->buf_len - tx->offset; cppi_next_tx_segment() 569 struct cppi_tx_stateram __iomem *tx_ram = tx->state_ram; cppi_next_tx_segment() 594 tx->index, cppi_next_tx_segment() 600 cppi_rndis_update(tx, 0, musb->ctrl_base, rndis); cppi_next_tx_segment() 608 bd = tx->freelist; cppi_next_tx_segment() 609 tx->head = bd; cppi_next_tx_segment() 610 tx->last_processed = NULL; cppi_next_tx_segment() 626 bd->hw_bufp = tx->buf_dma + tx->offset; cppi_next_tx_segment() 631 if ((tx->offset + maxpacket) <= tx->buf_len) { cppi_next_tx_segment() 632 tx->offset += maxpacket; cppi_next_tx_segment() 640 partial_len = tx->buf_len - tx->offset; cppi_next_tx_segment() 641 tx->offset = tx->buf_len; cppi_next_tx_segment() 655 tx->tail = bd; cppi_next_tx_segment() 663 musb_writel(&tx_ram->tx_head, 0, (u32)tx->freelist->dma); cppi_next_tx_segment() 665 cppi_dump_tx(5, tx, "/S"); cppi_next_tx_segment() 1148 u32 rx, tx; cppi_interrupt() local 1158 tx = musb_readl(tibase, DAVINCI_TXCPPI_MASKED_REG); cppi_interrupt() 1161 if (!tx && !rx) { cppi_interrupt() 1167 dev_dbg(musb->controller, "CPPI IRQ Tx%x Rx%x\n", tx, rx); cppi_interrupt() 1170 for (index = 0; tx; tx = tx >> 1, index++) { cppi_interrupt() 1176 if (!(tx & 1)) cppi_interrupt() 1179 tx_ch = cppi->tx + index; cppi_interrupt() 1454 /* REVISIT tx side _should_ clean up the same way cppi_channel_abort()
|
/linux-4.1.27/drivers/net/wireless/zd1211rw/ |
H A D | zd_usb.c | 469 /* USB_INT_ID_RETRY_FAILED triggered by tx-urb submit can override int_urb_complete() 894 struct zd_usb_tx *tx = &usb->tx; zd_usb_disable_tx() local 897 atomic_set(&tx->enabled, 0); zd_usb_disable_tx() 899 /* kill all submitted tx-urbs */ zd_usb_disable_tx() 900 usb_kill_anchored_urbs(&tx->submitted); zd_usb_disable_tx() 902 spin_lock_irqsave(&tx->lock, flags); zd_usb_disable_tx() 903 WARN_ON(!skb_queue_empty(&tx->submitted_skbs)); zd_usb_disable_tx() 904 WARN_ON(tx->submitted_urbs != 0); zd_usb_disable_tx() 905 tx->submitted_urbs = 0; zd_usb_disable_tx() 906 spin_unlock_irqrestore(&tx->lock, flags); zd_usb_disable_tx() 923 struct zd_usb_tx *tx = &usb->tx; zd_usb_enable_tx() local 925 spin_lock_irqsave(&tx->lock, flags); zd_usb_enable_tx() 926 atomic_set(&tx->enabled, 1); zd_usb_enable_tx() 927 tx->submitted_urbs = 0; zd_usb_enable_tx() 929 tx->stopped = 0; zd_usb_enable_tx() 930 spin_unlock_irqrestore(&tx->lock, flags); zd_usb_enable_tx() 935 struct zd_usb_tx *tx = &usb->tx; tx_dec_submitted_urbs() local 938 spin_lock_irqsave(&tx->lock, flags); tx_dec_submitted_urbs() 939 --tx->submitted_urbs; tx_dec_submitted_urbs() 940 if (tx->stopped && tx->submitted_urbs <= ZD_USB_TX_LOW) { tx_dec_submitted_urbs() 942 tx->stopped = 0; tx_dec_submitted_urbs() 944 spin_unlock_irqrestore(&tx->lock, flags); tx_dec_submitted_urbs() 949 struct zd_usb_tx *tx = &usb->tx; tx_inc_submitted_urbs() local 952 spin_lock_irqsave(&tx->lock, flags); tx_inc_submitted_urbs() 953 ++tx->submitted_urbs; tx_inc_submitted_urbs() 954 if (!tx->stopped && tx->submitted_urbs > ZD_USB_TX_HIGH) { tx_inc_submitted_urbs() 956 tx->stopped = 1; tx_inc_submitted_urbs() 958 spin_unlock_irqrestore(&tx->lock, flags); tx_inc_submitted_urbs() 974 struct zd_usb_tx *tx; tx_urb_complete() local 983 tx = &usb->tx; tx_urb_complete() 1001 skb_unlink(skb, &usb->tx.submitted_skbs); tx_urb_complete() 1007 usb_anchor_urb(urb, &tx->submitted); tx_urb_complete() 1034 struct zd_usb_tx *tx = &usb->tx; zd_usb_tx() local 1036 if (!atomic_read(&tx->enabled)) { zd_usb_tx() 1051 skb_queue_tail(&tx->submitted_skbs, skb); zd_usb_tx() 1052 usb_anchor_urb(urb, &tx->submitted); zd_usb_tx() 1058 skb_unlink(skb, &tx->submitted_skbs); zd_usb_tx() 1071 struct zd_usb_tx *tx = &usb->tx; zd_tx_timeout() local 1072 struct sk_buff_head *q = &tx->submitted_skbs; zd_tx_timeout() 1096 container_of(work, struct zd_usb, tx.watchdog_work.work); zd_tx_watchdog_handler() 1097 struct zd_usb_tx *tx = &usb->tx; zd_tx_watchdog_handler() local 1099 if (!atomic_read(&tx->enabled) || !tx->watchdog_enabled) zd_tx_watchdog_handler() 1112 queue_delayed_work(zd_workqueue, &tx->watchdog_work, zd_tx_watchdog_handler() 1118 struct zd_usb_tx *tx = &usb->tx; zd_tx_watchdog_enable() local 1120 if (!tx->watchdog_enabled) { zd_tx_watchdog_enable() 1122 queue_delayed_work(zd_workqueue, &tx->watchdog_work, zd_tx_watchdog_enable() 1124 tx->watchdog_enabled = 1; zd_tx_watchdog_enable() 1130 struct zd_usb_tx *tx = &usb->tx; zd_tx_watchdog_disable() local 1132 if (tx->watchdog_enabled) { zd_tx_watchdog_disable() 1134 tx->watchdog_enabled = 0; zd_tx_watchdog_disable() 1135 cancel_delayed_work_sync(&tx->watchdog_work); zd_tx_watchdog_disable() 1198 struct zd_usb_tx *tx = &usb->tx; init_usb_tx() local 1200 spin_lock_init(&tx->lock); init_usb_tx() 1201 atomic_set(&tx->enabled, 0); init_usb_tx() 1202 tx->stopped = 0; init_usb_tx() 1203 skb_queue_head_init(&tx->submitted_skbs); init_usb_tx() 1204 init_usb_anchor(&tx->submitted); init_usb_tx() 1205 tx->submitted_urbs = 0; init_usb_tx() 1206 tx->watchdog_enabled = 0; init_usb_tx() 1207 INIT_DELAYED_WORK(&tx->watchdog_work, zd_tx_watchdog_handler); init_usb_tx()
|
/linux-4.1.27/include/linux/dma/ |
H A D | ipu-dma.h | 150 unsigned int sg_len; /* tx-descriptor. */ 162 struct idmac_tx_desc *desc; /* allocated tx-descriptors */ 164 struct list_head free_list; /* free tx-descriptors */ 165 struct list_head queue; /* queued tx-descriptors */ 174 #define to_tx_desc(tx) container_of(tx, struct idmac_tx_desc, txd)
|
/linux-4.1.27/sound/firewire/dice/ |
H A D | dice-proc.c | 61 "global", "tx", "rx", "ext_sync", "unused2" dice_proc_read() 99 } tx; dice_proc_read() member in union:__anon14355 173 quadlets = min_t(u32, tx_rx_header.size, sizeof(buf.tx) / 4); dice_proc_read() 175 if (dice_proc_read_mem(dice, &buf.tx, sections[2] + 2 + dice_proc_read() 179 snd_iprintf(buffer, "tx %u:\n", stream); dice_proc_read() 180 snd_iprintf(buffer, " iso channel: %d\n", (int)buf.tx.iso); dice_proc_read() 182 buf.tx.number_audio); dice_proc_read() 183 snd_iprintf(buffer, " midi ports: %u\n", buf.tx.number_midi); dice_proc_read() 184 snd_iprintf(buffer, " speed: S%u\n", 100u << buf.tx.speed); dice_proc_read() 186 dice_proc_fixup_string(buf.tx.names, TX_NAMES_SIZE); dice_proc_read() 187 snd_iprintf(buffer, " names: %s\n", buf.tx.names); dice_proc_read() 191 buf.tx.ac3_caps); dice_proc_read() 193 buf.tx.ac3_enable); dice_proc_read()
|
/linux-4.1.27/drivers/net/wireless/hostap/ |
H A D | hostap_80211_tx.c | 388 struct hostap_tx_data tx; hostap_master_start_xmit() local 397 tx.skb = skb; hostap_master_start_xmit() 398 tx.sta_ptr = NULL; hostap_master_start_xmit() 413 tx.crypt = local->crypt_info.crypt[local->crypt_info.tx_keyidx]; hostap_master_start_xmit() 414 tx.host_encrypt = 1; hostap_master_start_xmit() 416 tx.crypt = NULL; hostap_master_start_xmit() 417 tx.host_encrypt = 0; hostap_master_start_xmit() 432 tx_ret = hostap_handle_sta_tx(local, &tx); hostap_master_start_xmit() 433 skb = tx.skb; hostap_master_start_xmit() 482 tx.crypt = NULL; hostap_master_start_xmit() 485 if (local->ieee_802_1x && meta->ethertype == ETH_P_PAE && tx.crypt && hostap_master_start_xmit() 490 tx.crypt = NULL; /* no encryption for IEEE 802.1X frames */ hostap_master_start_xmit() 493 if (tx.crypt && (!tx.crypt->ops || !tx.crypt->ops->encrypt_mpdu)) hostap_master_start_xmit() 494 tx.crypt = NULL; hostap_master_start_xmit() 495 else if ((tx.crypt || hostap_master_start_xmit() 514 if (tx.crypt) { hostap_master_start_xmit() 515 skb = hostap_tx_encrypt(skb, tx.crypt); hostap_master_start_xmit() 534 if (local->func->tx == NULL || local->func->tx(skb, dev)) { hostap_master_start_xmit() 547 if (tx.sta_ptr) hostap_master_start_xmit() 548 hostap_handle_sta_release(tx.sta_ptr); hostap_master_start_xmit()
|
/linux-4.1.27/drivers/ipack/devices/ |
H A D | ipoctal.h | 26 * @tx: Number of transmitted bytes 34 unsigned long tx; member in struct:ipoctal_stats
|
/linux-4.1.27/arch/arm/mach-omap2/ |
H A D | omap_hwmod_2xxx_3xxx_ipblock_data.c | 52 { .name = "tx", .dma_req = 49, }, 58 { .name = "tx", .dma_req = 51, }, 64 { .name = "tx", .dma_req = 53, }, 69 { .name = "tx", .dma_req = 27 }, 75 { .name = "tx", .dma_req = 29 }, 102 { .name = "tx", .dma_req = 31 }, 108 { .name = "tx", .dma_req = 33 }, 114 { .name = "tx", .dma_req = 17 },
|
/linux-4.1.27/drivers/net/wireless/iwlwifi/ |
H A D | Makefile | 9 iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
|
/linux-4.1.27/drivers/staging/octeon/ |
H A D | Makefile | 21 octeon-ethernet-y += ethernet-tx.o
|
/linux-4.1.27/include/linux/ |
H A D | serial.h | 21 __u32 cts, dsr, rng, dcd, tx, rx; member in struct:async_icount
|
H A D | async_tx.h | 95 * @tx: descriptor handle to retrieve hardware context 101 static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx) async_tx_issue_pending() argument 103 if (likely(tx)) { async_tx_issue_pending() 104 struct dma_chan *chan = tx->chan; async_tx_issue_pending() 125 static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx) async_tx_issue_pending() argument 160 struct dma_async_tx_descriptor *tx, init_async_submit() 165 args->depend_tx = tx; init_async_submit() 171 void async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx, 207 void async_tx_quiesce(struct dma_async_tx_descriptor **tx); 159 init_async_submit(struct async_submit_ctl *args, enum async_tx_flags flags, struct dma_async_tx_descriptor *tx, dma_async_tx_callback cb_fn, void *cb_param, addr_conv_t *scribble) init_async_submit() argument
|
H A D | scc.h | 73 struct sk_buff_head tx_queue; /* next tx buffer */ 78 struct timer_list tx_t; /* tx timer for this channel */ 79 struct timer_list tx_wdog; /* tx watchdogs */
|
H A D | dmaengine.h | 424 * this tx is sitting on a dependency list 443 dma_cookie_t (*tx_submit)(struct dma_async_tx_descriptor *tx); 455 static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx, dma_set_unmap() argument 459 tx->unmap = unmap; dma_set_unmap() 466 static inline void dma_set_unmap(struct dma_async_tx_descriptor *tx, dma_set_unmap() argument 480 static inline void dma_descriptor_unmap(struct dma_async_tx_descriptor *tx) dma_descriptor_unmap() argument 482 if (tx->unmap) { dma_descriptor_unmap() 483 dmaengine_unmap_put(tx->unmap); dma_descriptor_unmap() 484 tx->unmap = NULL; dma_descriptor_unmap() 912 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx, 915 static inline void async_tx_ack(struct dma_async_tx_descriptor *tx) async_tx_ack() argument 917 tx->flags |= DMA_CTRL_ACK; async_tx_ack() 920 static inline void async_tx_clear_ack(struct dma_async_tx_descriptor *tx) async_tx_clear_ack() argument 922 tx->flags &= ~DMA_CTRL_ACK; async_tx_clear_ack() 925 static inline bool async_tx_test_ack(struct dma_async_tx_descriptor *tx) async_tx_test_ack() argument 927 return (tx->flags & DMA_CTRL_ACK) == DMA_CTRL_ACK; async_tx_test_ack() 930 #define dma_cap_set(tx, mask) __dma_cap_set((tx), &(mask)) 937 #define dma_cap_clear(tx, mask) __dma_cap_clear((tx), &(mask)) 950 #define dma_has_cap(tx, mask) __dma_has_cap((tx), &(mask)) 1032 enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx); 1050 static inline enum dma_status dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx) dma_wait_for_async_tx() argument 1086 void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
|
/linux-4.1.27/drivers/mfd/ |
H A D | ipaq-micro.c | 36 struct ipaq_micro_txdev *tx = µ->tx; ipaq_micro_trigger_tx() local 43 tx->buf[bp++] = CHAR_SOF; ipaq_micro_trigger_tx() 46 tx->buf[bp++] = checksum; ipaq_micro_trigger_tx() 49 tx->buf[bp++] = msg->tx_data[i]; ipaq_micro_trigger_tx() 53 tx->buf[bp++] = checksum; ipaq_micro_trigger_tx() 54 tx->len = bp; ipaq_micro_trigger_tx() 55 tx->index = 0; ipaq_micro_trigger_tx() 57 tx->buf, tx->len, true); ipaq_micro_trigger_tx() 291 struct ipaq_micro_txdev *tx = µ->tx; micro_tx_chars() local 294 while ((tx->index < tx->len) && micro_tx_chars() 296 writel(tx->buf[tx->index], micro->base + UTDR); micro_tx_chars() 297 tx->index++; micro_tx_chars() 343 struct ipaq_micro_txdev *tx = µ->tx; micro_serial_isr() local 365 } while (((tx->index < tx->len) && (status & UTSR0_TFS)) || micro_serial_isr()
|
/linux-4.1.27/drivers/net/wireless/rt2x00/ |
H A D | rt2800mmio.c | 192 * For example, a tx queue rt2800mmio_txdone_find_entry() 194 * can result in tx status reports rt2800mmio_txdone_find_entry() 198 * To mitigate this effect, associate the tx status to the first frame rt2800mmio_txdone_find_entry() 199 * in the tx queue with a matching wcid. rt2800mmio_txdone_find_entry() 204 * Got a matching frame, associate the tx status with rt2800mmio_txdone_find_entry() 221 * Find the first frame without tx status and assign this status to it rt2800mmio_txdone_match_first() 226 * Got a matching frame, associate the tx status with rt2800mmio_txdone_match_first() 262 * this tx status. rt2800mmio_txdone() 273 * processing here and drop the tx status rt2800mmio_txdone() 283 * and drop the tx status. rt2800mmio_txdone() 291 * Let's associate this tx status with the first rt2800mmio_txdone() 298 * We cannot match the tx status to any frame, so just rt2800mmio_txdone() 311 * Release all frames with a valid tx status. rt2800mmio_txdone() 347 * No need to enable the tx status interrupt here as we always rt2800mmio_txstatus_tasklet() 348 * leave it enabled to minimize the possibility of a tx status rt2800mmio_txstatus_tasklet() 428 * Hence, read the TX_STA_FIFO register and copy all tx status rt2800mmio_txstatus_interrupt() 430 * tasklet. We use a tasklet to process the tx status reports rt2800mmio_txstatus_interrupt() 432 * interrupt fires again during tx status processing). rt2800mmio_txstatus_interrupt() 436 * can also be read while the tx status tasklet gets executed. rt2800mmio_txstatus_interrupt() 441 for (i = 0; i < rt2x00dev->tx->limit; i++) { rt2800mmio_txstatus_interrupt() 448 rt2x00_warn(rt2x00dev, "TX status FIFO overrun, drop tx status report\n"); rt2800mmio_txstatus_interrupt() 453 /* Schedule the tasklet for processing the tx status. */ rt2800mmio_txstatus_interrupt() 747 entry_priv = rt2x00dev->tx[0].entries[0].priv_data; rt2800mmio_init_queues() 751 rt2x00dev->tx[0].limit); rt2800mmio_init_queues() 755 entry_priv = rt2x00dev->tx[1].entries[0].priv_data; rt2800mmio_init_queues() 759 rt2x00dev->tx[1].limit); rt2800mmio_init_queues() 763 entry_priv = rt2x00dev->tx[2].entries[0].priv_data; rt2800mmio_init_queues() 767 rt2x00dev->tx[2].limit); rt2800mmio_init_queues() 771 entry_priv = rt2x00dev->tx[3].entries[0].priv_data; rt2800mmio_init_queues() 775 rt2x00dev->tx[3].limit); rt2800mmio_init_queues()
|
H A D | rt2x00config.c | 142 if (config.tx == ANTENNA_SW_DIVERSITY) { rt2x00lib_config_antenna() 145 if (def->tx == ANTENNA_SW_DIVERSITY) rt2x00lib_config_antenna() 146 config.tx = ANTENNA_B; rt2x00lib_config_antenna() 148 config.tx = def->tx; rt2x00lib_config_antenna() 150 } else if (config.tx == ANTENNA_SW_DIVERSITY) rt2x00lib_config_antenna() 151 config.tx = active->tx; rt2x00lib_config_antenna()
|
/linux-4.1.27/drivers/misc/echo/ |
H A D | oslec.h | 78 * @tx: The transmitted audio sample. 83 int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx); 86 * oslec_hpf_tx: Process to high pass filter the tx signal. 88 * @tx: The transmitted auio sample. 92 int16_t oslec_hpf_tx(struct oslec_state *ec, int16_t tx);
|
H A D | echo.c | 347 int16_t oslec_update(struct oslec_state *ec, int16_t tx, int16_t rx) oslec_update() argument 355 * Input scaling was found be required to prevent problems when tx oslec_update() 360 ec->tx = tx; oslec_update() 362 tx >>= 1; oslec_update() 418 new = (int)tx * (int)tx; oslec_update() 429 ec->ltxacc += abs(tx) - ec->ltx; oslec_update() 437 echo_value = fir16(&ec->fir_state, tx); oslec_update() 444 echo_value = fir16(&ec->fir_state_bg, tx); oslec_update() 618 as part of the tx process. See rx HP (DC blocking) filter above, it's 624 by removing any low frequency before it gets to the tx port of the 627 It can also help by removing and DC in the tx signal. DC is bad 639 int16_t oslec_hpf_tx(struct oslec_state *ec, int16_t tx) oslec_hpf_tx() argument 645 tmp = tx << 15; oslec_hpf_tx() 663 tx = tmp1; oslec_hpf_tx() 667 return tx; oslec_hpf_tx()
|
/linux-4.1.27/drivers/net/wireless/cw1200/ |
H A D | bh.c | 242 int *tx) cw1200_bh_rx_helper() 331 *tx = 1; cw1200_bh_rx_helper() 401 pr_err("tx blew up, len %zu\n", tx_len); cw1200_bh_tx_helper() 426 int rx, tx, term, suspend; cw1200_bh() local 461 tx = atomic_xchg(&priv->bh_tx, 0); cw1200_bh() 465 (rx || tx || term || suspend || priv->bh_error); cw1200_bh() 468 pr_debug("[BH] - rx: %d, tx: %d, term: %d, bh_err: %d, suspend: %d, status: %ld\n", cw1200_bh() 469 rx, tx, term, suspend, priv->bh_error, status); cw1200_bh() 483 if (priv->hw_bufs_used && (!rx || !tx)) { cw1200_bh() 548 tx += pending_tx; cw1200_bh() 556 ret = cw1200_bh_rx_helper(priv, &ctrl_reg, &tx); cw1200_bh() 561 ret = cw1200_bh_rx_helper(priv, &ctrl_reg, &tx); cw1200_bh() 567 tx: cw1200_bh() 568 if (tx) { cw1200_bh() 569 tx = 0; cw1200_bh() 576 /* Buffers full. Ensure we process tx cw1200_bh() 579 pending_tx = tx; cw1200_bh() 586 tx = ret; cw1200_bh() 598 if (tx) cw1200_bh() 599 goto tx; cw1200_bh() 240 cw1200_bh_rx_helper(struct cw1200_common *priv, uint16_t *ctrl_reg, int *tx) cw1200_bh_rx_helper() argument
|
H A D | txrx.h | 51 * Device does not accept per-PDU tx retry sequence. 52 * It uses "tx retry policy id" instead, so driver code has to sync 53 * linux tx retry sequences with a retry policy table in the device.
|
H A D | debug.h | 17 int tx; member in struct:cw1200_debug_priv 38 ++priv->debug->tx; cw1200_debug_txed()
|
/linux-4.1.27/drivers/net/ethernet/toshiba/ |
H A D | ps3_gelic_net.h | 101 * a tx frame 128 GELIC_DESCR_DMA_COMPLETE = 0x00000000, /* used in tx */ 130 GELIC_DESCR_DMA_RESPONSE_ERROR = 0x10000000, /* used in rx, tx */ 131 GELIC_DESCR_DMA_PROTECTION_ERROR = 0x20000000, /* used in rx, tx */ 133 GELIC_DESCR_DMA_FORCE_END = 0x50000000, /* used in rx, tx */ 134 GELIC_DESCR_DMA_CARDOWNED = 0xa0000000, /* used in rx, tx */ 140 /* tx descriptor command and status */ 252 __be32 valid_size; /* all zeroes for tx */ 254 __be32 data_error; /* all zeroes for tx */ 270 u16 tx; member in struct:gelic_vlan_id 292 * tx_lock guards tx descriptor list and
|
H A D | spider_net.h | 200 * 1(0) enable r/tx dma 369 #define SPIDER_NET_DESCR_COMPLETE 0x00000000 /* used in rx and tx */ 370 #define SPIDER_NET_DESCR_RESPONSE_ERROR 0x10000000 /* used in rx and tx */ 371 #define SPIDER_NET_DESCR_PROTECTION_ERROR 0x20000000 /* used in rx and tx */ 373 #define SPIDER_NET_DESCR_FORCE_END 0x50000000 /* used in rx and tx */ 374 #define SPIDER_NET_DESCR_CARDOWNED 0xA0000000 /* used in rx and tx */ 392 u32 valid_size; /* all zeroes for tx */ 394 u32 data_error; /* all zeroes for tx */
|
H A D | ps3_gelic_net.c | 273 /* stop tx */ gelic_card_down() 500 * gelic_descr_release_tx - processes a used tx descriptor 504 * releases a used tx descriptor (unmapping, freeing of skb) 545 * gelic_card_release_tx_chain - processes sent tx descriptors 549 * releases the tx descriptors that gelic has finished with 569 "%s: forcing end of tx descriptor " \ gelic_card_release_tx_chain() 584 /* pending tx request */ gelic_card_release_tx_chain() 683 * gelic_card_get_next_tx_descr - returns the next available tx descriptor 704 * gelic_net_set_txdescr_cmdstat - sets the tx descriptor command field 789 card->vlan[type].tx); gelic_descr_prepare_tx() 1161 /* kick outstanding tx descriptor if any */ gelic_card_interrupt() 1401 * @work: work is context of tx timout task 1403 * called as task when tx hangs, resets interface (if interface is up) 1427 * gelic_net_tx_timeout - called when the tx timeout watchdog kicks in. 1430 * called, if tx hangs. Schedules a task that resets the interface 1593 int tx; gelic_card_get_vlan_info() member in struct:__anon7363 1597 .tx = GELIC_LV1_VLAN_TX_ETHERNET_0, gelic_card_get_vlan_info() 1601 .tx = GELIC_LV1_VLAN_TX_WIRELESS, gelic_card_get_vlan_info() 1607 /* tx tag */ gelic_card_get_vlan_info() 1610 vlan_id_ix[i].tx, gelic_card_get_vlan_info() 1615 "get vlan id for tx(%d) failed(%d)\n", gelic_card_get_vlan_info() 1616 vlan_id_ix[i].tx, status); gelic_card_get_vlan_info() 1617 card->vlan[i].tx = 0; gelic_card_get_vlan_info() 1621 card->vlan[i].tx = (u16)v1; gelic_card_get_vlan_info() 1633 card->vlan[i].tx = 0; gelic_card_get_vlan_info() 1639 dev_dbg(ctodev(card), "vlan_id[%d] tx=%02x rx=%02x\n", gelic_card_get_vlan_info() 1640 i, card->vlan[i].tx, card->vlan[i].rx); gelic_card_get_vlan_info() 1643 if (card->vlan[GELIC_PORT_ETHERNET_0].tx) { gelic_card_get_vlan_info() 1644 BUG_ON(!card->vlan[GELIC_PORT_WIRELESS].tx); gelic_card_get_vlan_info() 1651 card->vlan[GELIC_PORT_WIRELESS].tx = 0; gelic_card_get_vlan_info() 1755 dev_dbg(ctodev(card), "descr rx %p, tx %p, size %#lx, num %#x\n", ps3_gelic_driver_probe()
|
/linux-4.1.27/drivers/net/ethernet/broadcom/ |
H A D | bcm63xx_enet.h | 23 /* tx transmit threshold (4 bytes unit), fifo is 256 bytes, the value 29 * hardware maximum rx/tx packet size including FCS, max mtu is 207 /* hw view of rx & tx dma ring */ 211 /* allocated size (in bytes) for rx & tx dma ring */ 250 /* dma channel id for tx */ 253 /* number of dma desc in tx ring */ 262 /* number of available descriptor for tx */ 265 /* next tx descriptor avaiable */ 268 /* next dirty tx descriptor to reclaim */ 271 /* list of skb given to hw for tx */ 274 /* lock used by tx reclaim and xmit */
|
H A D | bcmsysport.c | 236 STAT_MIB_TX("tx_64_octets", mib.tx.pkt_cnt.cnt_64), 237 STAT_MIB_TX("tx_65_127_oct", mib.tx.pkt_cnt.cnt_127), 238 STAT_MIB_TX("tx_128_255_oct", mib.tx.pkt_cnt.cnt_255), 239 STAT_MIB_TX("tx_256_511_oct", mib.tx.pkt_cnt.cnt_511), 240 STAT_MIB_TX("tx_512_1023_oct", mib.tx.pkt_cnt.cnt_1023), 241 STAT_MIB_TX("tx_1024_1518_oct", mib.tx.pkt_cnt.cnt_1518), 242 STAT_MIB_TX("tx_vlan_1519_1522_oct", mib.tx.pkt_cnt.cnt_mgv), 243 STAT_MIB_TX("tx_1522_2047_oct", mib.tx.pkt_cnt.cnt_2047), 244 STAT_MIB_TX("tx_2048_4095_oct", mib.tx.pkt_cnt.cnt_4095), 245 STAT_MIB_TX("tx_4096_9216_oct", mib.tx.pkt_cnt.cnt_9216), 246 STAT_MIB_TX("tx_pkts", mib.tx.pkts), 247 STAT_MIB_TX("tx_multicast", mib.tx.mca), 248 STAT_MIB_TX("tx_broadcast", mib.tx.bca), 249 STAT_MIB_TX("tx_pause", mib.tx.pf), 250 STAT_MIB_TX("tx_control", mib.tx.cf), 251 STAT_MIB_TX("tx_fcs_err", mib.tx.fcs), 252 STAT_MIB_TX("tx_oversize", mib.tx.ovr), 253 STAT_MIB_TX("tx_defer", mib.tx.drf), 254 STAT_MIB_TX("tx_excess_defer", mib.tx.edf), 255 STAT_MIB_TX("tx_single_col", mib.tx.scl), 256 STAT_MIB_TX("tx_multi_col", mib.tx.mcl), 257 STAT_MIB_TX("tx_late_col", mib.tx.lcl), 258 STAT_MIB_TX("tx_excess_col", mib.tx.ecl), 259 STAT_MIB_TX("tx_frags", mib.tx.frg), 260 STAT_MIB_TX("tx_total_col", mib.tx.ncl), 261 STAT_MIB_TX("tx_jabber", mib.tx.jbr), 262 STAT_MIB_TX("tx_bytes", mib.tx.bytes), 263 STAT_MIB_TX("tx_good_pkts", mib.tx.pok), 264 STAT_MIB_TX("tx_unicast", mib.tx.uc), 949 /* lock against tx reclaim in BH context and TX ring full interrupt */ bcm_sysport_xmit()
|
/linux-4.1.27/drivers/media/platform/omap/ |
H A D | omap_vout_vrfb.c | 222 struct vid_vrfb_dma *tx; omap_vout_prepare_vrfb() local 253 tx = &vout->vrfb_dma_tx; omap_vout_prepare_vrfb() 254 tx->tx_status = 0; omap_vout_prepare_vrfb() 255 omap_set_dma_transfer_params(tx->dma_ch, OMAP_DMA_DATA_TYPE_S32, omap_vout_prepare_vrfb() 257 tx->dev_id, 0x0); omap_vout_prepare_vrfb() 259 omap_set_dma_src_params(tx->dma_ch, 0, OMAP_DMA_AMODE_POST_INC, omap_vout_prepare_vrfb() 262 omap_set_dma_src_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16); omap_vout_prepare_vrfb() 266 omap_set_dma_dest_params(tx->dma_ch, 0, OMAP_DMA_AMODE_DOUBLE_IDX, omap_vout_prepare_vrfb() 270 omap_set_dma_dest_burst_mode(tx->dma_ch, OMAP_DMA_DATA_BURST_16); omap_vout_prepare_vrfb() 273 omap_start_dma(tx->dma_ch); omap_vout_prepare_vrfb() 274 wait_event_interruptible_timeout(tx->wait, tx->tx_status == 1, omap_vout_prepare_vrfb() 277 if (tx->tx_status == 0) { omap_vout_prepare_vrfb() 278 omap_stop_dma(tx->dma_ch); omap_vout_prepare_vrfb()
|
/linux-4.1.27/Documentation/ptp/ |
H A D | testptp.c | 53 static int clock_adjtime(clockid_t id, struct timex *tx) clock_adjtime() argument 55 return syscall(__NR_clock_adjtime, id, tx); clock_adjtime() 151 struct timex tx; main() local 290 memset(&tx, 0, sizeof(tx)); main() 291 tx.modes = ADJ_FREQUENCY; main() 292 tx.freq = ppb_to_scaled_ppm(adjfreq); main() 293 if (clock_adjtime(clkid, &tx)) { main() 301 memset(&tx, 0, sizeof(tx)); main() 302 tx.modes = ADJ_SETOFFSET; main() 303 tx.time.tv_sec = adjtime; main() 304 tx.time.tv_usec = 0; main() 305 if (clock_adjtime(clkid, &tx) < 0) { main()
|
/linux-4.1.27/drivers/staging/iio/accel/ |
H A D | lis3l02dq_core.c | 58 .tx_buf = st->tx, lis3l02dq_spi_read_reg_8() 65 st->tx[0] = LIS3L02DQ_READ_REG(reg_address); lis3l02dq_spi_read_reg_8() 66 st->tx[1] = 0; lis3l02dq_spi_read_reg_8() 89 st->tx[0] = LIS3L02DQ_WRITE_REG(reg_address); lis3l02dq_spi_write_reg_8() 90 st->tx[1] = val; lis3l02dq_spi_write_reg_8() 91 ret = spi_write(st->us, st->tx, 2); lis3l02dq_spi_write_reg_8() 111 .tx_buf = st->tx, lis3l02dq_spi_write_reg_s16() 116 .tx_buf = st->tx + 2, lis3l02dq_spi_write_reg_s16() 123 st->tx[0] = LIS3L02DQ_WRITE_REG(lower_reg_address); lis3l02dq_spi_write_reg_s16() 124 st->tx[1] = value & 0xFF; lis3l02dq_spi_write_reg_s16() 125 st->tx[2] = LIS3L02DQ_WRITE_REG(lower_reg_address + 1); lis3l02dq_spi_write_reg_s16() 126 st->tx[3] = (value >> 8) & 0xFF; lis3l02dq_spi_write_reg_s16() 142 .tx_buf = st->tx, lis3l02dq_read_reg_s16() 148 .tx_buf = st->tx + 2, lis3l02dq_read_reg_s16() 156 st->tx[0] = LIS3L02DQ_READ_REG(lower_reg_address); lis3l02dq_read_reg_s16() 157 st->tx[1] = 0; lis3l02dq_read_reg_s16() 158 st->tx[2] = LIS3L02DQ_READ_REG(lower_reg_address + 1); lis3l02dq_read_reg_s16() 159 st->tx[3] = 0; lis3l02dq_read_reg_s16()
|
H A D | adis16220.h | 128 * @tx: transmit buffer 130 * @buf_lock: mutex to protect tx and rx 136 u8 tx[ADIS16220_MAX_TX] ____cacheline_aligned;
|
H A D | lis3l02dq_ring.c | 75 xfers[j].tx_buf = st->tx + 2*j; lis3l02dq_read_all() 76 st->tx[2*j] = read_all_tx_array[i*4]; lis3l02dq_read_all() 77 st->tx[2*j + 1] = 0; lis3l02dq_read_all() 86 xfers[j].tx_buf = st->tx + 2*j; lis3l02dq_read_all() 87 st->tx[2*j] = read_all_tx_array[i*4 + 2]; lis3l02dq_read_all() 88 st->tx[2*j + 1] = 0; lis3l02dq_read_all()
|
/linux-4.1.27/drivers/infiniband/hw/qib/ |
H A D | qib_verbs.c | 931 struct qib_verbs_txreq *tx; __get_txreq() local 943 tx = list_entry(l, struct qib_verbs_txreq, txreq.list); __get_txreq() 954 tx = ERR_PTR(-EBUSY); __get_txreq() 956 return tx; __get_txreq() 962 struct qib_verbs_txreq *tx; get_txreq() local 972 tx = list_entry(l, struct qib_verbs_txreq, txreq.list); get_txreq() 976 tx = __get_txreq(dev, qp); get_txreq() 978 return tx; get_txreq() 981 void qib_put_txreq(struct qib_verbs_txreq *tx) qib_put_txreq() argument 987 qp = tx->qp; qib_put_txreq() 992 if (tx->mr) { qib_put_txreq() 993 qib_put_mr(tx->mr); qib_put_txreq() 994 tx->mr = NULL; qib_put_txreq() 996 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) { qib_put_txreq() 997 tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF; qib_put_txreq() 999 tx->txreq.addr, tx->hdr_dwords << 2, qib_put_txreq() 1001 kfree(tx->align_buf); qib_put_txreq() 1007 list_add(&tx->txreq.list, &dev->txreq_free); qib_put_txreq() 1080 struct qib_verbs_txreq *tx = sdma_complete() local 1082 struct qib_qp *qp = tx->qp; sdma_complete() 1085 if (tx->wqe) sdma_complete() 1086 qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS); sdma_complete() 1090 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) sdma_complete() 1091 hdr = &tx->align_buf->hdr; sdma_complete() 1095 hdr = &dev->pio_hdrs[tx->hdr_inx].hdr; sdma_complete() 1109 qib_put_txreq(tx); sdma_complete() 1143 struct qib_verbs_txreq *tx; qib_verbs_send_dma() local 1149 tx = qp->s_tx; qib_verbs_send_dma() 1150 if (tx) { qib_verbs_send_dma() 1153 ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx); qib_verbs_send_dma() 1157 tx = get_txreq(dev, qp); qib_verbs_send_dma() 1158 if (IS_ERR(tx)) qib_verbs_send_dma() 1163 tx->qp = qp; qib_verbs_send_dma() 1165 tx->wqe = qp->s_wqe; qib_verbs_send_dma() 1166 tx->mr = qp->s_rdma_mr; qib_verbs_send_dma() 1169 tx->txreq.callback = sdma_complete; qib_verbs_send_dma() 1171 tx->txreq.flags = QIB_SDMA_TXREQ_F_HEADTOHOST; qib_verbs_send_dma() 1173 tx->txreq.flags = QIB_SDMA_TXREQ_F_INTREQ; qib_verbs_send_dma() 1175 tx->txreq.flags |= QIB_SDMA_TXREQ_F_USELARGEBUF; qib_verbs_send_dma() 1188 phdr = &dev->pio_hdrs[tx->hdr_inx]; qib_verbs_send_dma() 1192 tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEDESC; qib_verbs_send_dma() 1193 tx->txreq.sg_count = ndesc; qib_verbs_send_dma() 1194 tx->txreq.addr = dev->pio_hdrs_phys + qib_verbs_send_dma() 1195 tx->hdr_inx * sizeof(struct qib_pio_header); qib_verbs_send_dma() 1196 tx->hdr_dwords = hdrwords + 2; /* add PBC length */ qib_verbs_send_dma() 1197 ret = qib_sdma_verbs_send(ppd, ss, dwords, tx); qib_verbs_send_dma() 1202 tx->hdr_dwords = plen + 1; qib_verbs_send_dma() 1203 phdr = kmalloc(tx->hdr_dwords << 2, GFP_ATOMIC); qib_verbs_send_dma() 1211 tx->txreq.addr = dma_map_single(&dd->pcidev->dev, phdr, qib_verbs_send_dma() 1212 tx->hdr_dwords << 2, DMA_TO_DEVICE); qib_verbs_send_dma() 1213 if (dma_mapping_error(&dd->pcidev->dev, tx->txreq.addr)) qib_verbs_send_dma() 1215 tx->align_buf = phdr; qib_verbs_send_dma() 1216 tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEBUF; qib_verbs_send_dma() 1217 tx->txreq.sg_count = 1; qib_verbs_send_dma() 1218 ret = qib_sdma_verbs_send(ppd, NULL, 0, tx); qib_verbs_send_dma() 1224 qib_put_txreq(tx); qib_verbs_send_dma() 1231 ret = PTR_ERR(tx); qib_verbs_send_dma() 2133 struct qib_verbs_txreq *tx; qib_register_ib_device() local 2135 tx = kzalloc(sizeof(*tx), GFP_KERNEL); qib_register_ib_device() 2136 if (!tx) { qib_register_ib_device() 2140 tx->hdr_inx = i; qib_register_ib_device() 2141 list_add(&tx->txreq.list, &dev->txreq_free); qib_register_ib_device() 2263 struct qib_verbs_txreq *tx; qib_register_ib_device() local 2266 tx = list_entry(l, struct qib_verbs_txreq, txreq.list); qib_register_ib_device() 2267 kfree(tx); qib_register_ib_device() 2317 struct qib_verbs_txreq *tx; qib_unregister_ib_device() local 2320 tx = list_entry(l, struct qib_verbs_txreq, txreq.list); qib_unregister_ib_device() 2321 kfree(tx); qib_unregister_ib_device()
|
H A D | qib_sdma.c | 514 struct qib_verbs_txreq *tx) complete_sdma_err_req() 516 atomic_inc(&tx->qp->s_dma_busy); complete_sdma_err_req() 518 tx->txreq.start_idx = 0; complete_sdma_err_req() 519 tx->txreq.next_descq_idx = 0; complete_sdma_err_req() 520 list_add_tail(&tx->txreq.list, &ppd->sdma_activelist); complete_sdma_err_req() 535 struct qib_verbs_txreq *tx) qib_sdma_verbs_send() 551 complete_sdma_err_req(ppd, tx); qib_sdma_verbs_send() 555 if (tx->txreq.sg_count > qib_sdma_descq_freecnt(ppd)) { qib_sdma_verbs_send() 564 dwoffset = tx->hdr_dwords; qib_sdma_verbs_send() 565 make_sdma_desc(ppd, sdmadesc, (u64) tx->txreq.addr, dwoffset, 0); qib_sdma_verbs_send() 568 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF) qib_sdma_verbs_send() 584 tx->txreq.start_idx = tail; qib_sdma_verbs_send() 605 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_USELARGEBUF) qib_sdma_verbs_send() 643 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_HEADTOHOST) qib_sdma_verbs_send() 645 if (tx->txreq.flags & QIB_SDMA_TXREQ_F_INTREQ) qib_sdma_verbs_send() 648 atomic_inc(&tx->qp->s_dma_busy); qib_sdma_verbs_send() 649 tx->txreq.next_descq_idx = tail; qib_sdma_verbs_send() 651 ppd->sdma_descq_added += tx->txreq.sg_count; qib_sdma_verbs_send() 652 list_add_tail(&tx->txreq.list, &ppd->sdma_activelist); qib_sdma_verbs_send() 665 qp = tx->qp; qib_sdma_verbs_send() 666 qib_put_txreq(tx); qib_sdma_verbs_send() 681 qp = tx->qp; qib_sdma_verbs_send() 691 tx->ss = ss; qib_sdma_verbs_send() 692 tx->dwords = dwords; qib_sdma_verbs_send() 693 qp->s_tx = tx; qib_sdma_verbs_send() 710 qib_put_txreq(tx); qib_sdma_verbs_send() 513 complete_sdma_err_req(struct qib_pportdata *ppd, struct qib_verbs_txreq *tx) complete_sdma_err_req() argument 533 qib_sdma_verbs_send(struct qib_pportdata *ppd, struct qib_sge_state *ss, u32 dwords, struct qib_verbs_txreq *tx) qib_sdma_verbs_send() argument
|
/linux-4.1.27/drivers/net/wireless/ath/ath6kl/ |
H A D | trace.h | 91 __field(unsigned int, tx) 105 __entry->tx = 1; 107 __entry->tx = 0; 112 __entry->tx ? "tx" : "rx", 126 __field(unsigned int, tx) 146 __entry->tx = 1; 148 __entry->tx = 0; 165 __entry->tx ? "tx" : "rx",
|
/linux-4.1.27/drivers/ptp/ |
H A D | ptp_clock.c | 127 static int ptp_clock_adjtime(struct posix_clock *pc, struct timex *tx) ptp_clock_adjtime() argument 135 if (tx->modes & ADJ_SETOFFSET) { ptp_clock_adjtime() 140 ts.tv_sec = tx->time.tv_sec; ptp_clock_adjtime() 141 ts.tv_nsec = tx->time.tv_usec; ptp_clock_adjtime() 143 if (!(tx->modes & ADJ_NANO)) ptp_clock_adjtime() 152 } else if (tx->modes & ADJ_FREQUENCY) { ptp_clock_adjtime() 153 s32 ppb = scaled_ppm_to_ppb(tx->freq); ptp_clock_adjtime() 157 ptp->dialed_frequency = tx->freq; ptp_clock_adjtime() 158 } else if (tx->modes == 0) { ptp_clock_adjtime() 159 tx->freq = ptp->dialed_frequency; ptp_clock_adjtime()
|
/linux-4.1.27/drivers/staging/rtl8723au/include/ |
H A D | ieee80211.h | 101 /* tx: cck only , rx: cck only, hw: cck */ 103 /* tx: ofdm only, rx: ofdm & cck, hw: cck & ofdm */ 105 /* tx: ofdm only, rx: ofdm only, hw: ofdm only */ 107 /* tx: MCS only, rx: MCS & cck, hw: MCS & cck */ 109 /* tx: MCS only, rx: MCS & ofdm, hw: ofdm only */ 115 /* tx: cck & ofdm, rx: cck & ofdm & MCS, hw: cck & ofdm */ 117 /* tx: ofdm & MCS, rx: ofdm & cck & MCS, hw: cck & ofdm */ 119 /* tx: ofdm & MCS, rx: ofdm & MCS, hw: ofdm only */ 121 /* tx: ofdm & cck & MCS, rx: ofdm & cck & MCS, hw: ofdm & cck */ 123 /* tx: ofdm & MCS, rx: ofdm & MCS, hw: ofdm only */
|
H A D | rtw_ht.h | 27 u32 tx_amdsu_maxlen; /* 1: 8k, 0:4k ; default:8k, for tx */
|
/linux-4.1.27/drivers/infiniband/ulp/ipoib/ |
H A D | ipoib_cm.c | 695 struct ipoib_cm_tx *tx, post_send() 707 return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr); post_send() 710 void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx) ipoib_cm_send() argument 717 if (unlikely(skb->len > tx->mtu)) { ipoib_cm_send() 719 skb->len, tx->mtu); ipoib_cm_send() 722 ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN); ipoib_cm_send() 727 tx->tx_head, skb->len, tx->qp->qp_num); ipoib_cm_send() 736 tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)]; ipoib_cm_send() 750 rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), ipoib_cm_send() 759 ++tx->tx_head; ipoib_cm_send() 763 tx->qp->qp_num); ipoib_cm_send() 778 struct ipoib_cm_tx *tx = wc->qp->qp_context; ipoib_cm_handle_tx_wc() local 792 tx_req = &tx->tx_ring[wr_id]; ipoib_cm_handle_tx_wc() 804 ++tx->tx_tail; ipoib_cm_handle_tx_wc() 819 neigh = tx->neigh; ipoib_cm_handle_tx_wc() 825 tx->neigh = NULL; ipoib_cm_handle_tx_wc() 828 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { ipoib_cm_handle_tx_wc() 829 list_move(&tx->list, &priv->cm.reap_list); ipoib_cm_handle_tx_wc() 833 clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags); ipoib_cm_handle_tx_wc() 1022 static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_cm_tx *tx) ipoib_cm_create_tx_qp() argument 1033 .qp_context = tx, ipoib_cm_create_tx_qp() 1105 ipoib_warn(priv, "failed to modify tx QP to INIT: %d\n", ret); ipoib_cm_modify_tx_init() 1120 ipoib_warn(priv, "failed to allocate tx ring\n"); ipoib_cm_tx_init() 1129 ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret); ipoib_cm_tx_init() 1136 ipoib_warn(priv, "failed to create tx cm id: %d\n", ret); ipoib_cm_tx_init() 1142 ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret); ipoib_cm_tx_init() 1222 struct ipoib_cm_tx *tx = cm_id->context; ipoib_cm_tx_handler() local 1223 struct ipoib_dev_priv *priv = netdev_priv(tx->dev); ipoib_cm_tx_handler() 1247 neigh = tx->neigh; ipoib_cm_tx_handler() 1253 tx->neigh = NULL; ipoib_cm_tx_handler() 1256 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { ipoib_cm_tx_handler() 1257 list_move(&tx->list, &priv->cm.reap_list); ipoib_cm_tx_handler() 1275 struct ipoib_cm_tx *tx; ipoib_cm_create_tx() local 1277 tx = kzalloc(sizeof *tx, GFP_ATOMIC); ipoib_cm_create_tx() 1278 if (!tx) ipoib_cm_create_tx() 1281 neigh->cm = tx; ipoib_cm_create_tx() 1282 tx->neigh = neigh; ipoib_cm_create_tx() 1283 tx->path = path; ipoib_cm_create_tx() 1284 tx->dev = dev; ipoib_cm_create_tx() 1285 list_add(&tx->list, &priv->cm.start_list); ipoib_cm_create_tx() 1286 set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags); ipoib_cm_create_tx() 1288 return tx; ipoib_cm_create_tx() 1291 void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx) ipoib_cm_destroy_tx() argument 1293 struct ipoib_dev_priv *priv = netdev_priv(tx->dev); ipoib_cm_destroy_tx() 1295 if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) { ipoib_cm_destroy_tx() 1297 list_move(&tx->list, &priv->cm.reap_list); ipoib_cm_destroy_tx() 1300 tx->neigh->daddr + 4); ipoib_cm_destroy_tx() 1301 tx->neigh = NULL; ipoib_cm_destroy_tx() 694 post_send(struct ipoib_dev_priv *priv, struct ipoib_cm_tx *tx, unsigned int wr_id, u64 addr, int len) post_send() argument
|
/linux-4.1.27/drivers/net/can/softing/ |
H A D | softing_main.c | 61 /* trigger the tx queue-ing */ softing_netdev_start_xmit() 80 (card->tx.pending >= TXMAX) || softing_netdev_start_xmit() 81 (priv->tx.pending >= TX_ECHO_SKB_MAX)) softing_netdev_start_xmit() 115 card->tx.last_bus = priv->index; softing_netdev_start_xmit() 116 ++card->tx.pending; softing_netdev_start_xmit() 117 ++priv->tx.pending; softing_netdev_start_xmit() 118 can_put_echo_skb(skb, dev, priv->tx.echo_put); softing_netdev_start_xmit() 119 ++priv->tx.echo_put; softing_netdev_start_xmit() 120 if (priv->tx.echo_put >= TX_ECHO_SKB_MAX) softing_netdev_start_xmit() 121 priv->tx.echo_put = 0; softing_netdev_start_xmit() 126 if (card->tx.pending >= TXMAX) { softing_netdev_start_xmit() 293 /* acknowledge, was tx msg */ softing_handle_1() 295 skb = priv->can.echo_skb[priv->tx.echo_get]; softing_handle_1() 298 can_get_echo_skb(netdev, priv->tx.echo_get); softing_handle_1() 299 ++priv->tx.echo_get; softing_handle_1() 300 if (priv->tx.echo_get >= TX_ECHO_SKB_MAX) softing_handle_1() 301 priv->tx.echo_get = 0; softing_handle_1() 302 if (priv->tx.pending) softing_handle_1() 303 --priv->tx.pending; softing_handle_1() 304 if (card->tx.pending) softing_handle_1() 305 --card->tx.pending; softing_handle_1() 343 /* resume tx queue's */ softing_irq_thread() 344 offset = card->tx.last_bus; softing_irq_thread() 346 if (card->tx.pending >= TXMAX) softing_irq_thread() 355 if (priv->tx.pending >= TX_ECHO_SKB_MAX) softing_irq_thread()
|
/linux-4.1.27/drivers/tty/serial/ |
H A D | max3100.c | 196 static int max3100_sr(struct max3100_port *s, u16 tx, u16 *rx) max3100_sr() argument 207 etx = cpu_to_be16(tx); max3100_sr() 217 dev_dbg(&s->spi->dev, "%04x - %04x\n", tx, *rx); max3100_sr() 265 u16 tx, rx; max3100_work() local 293 tx = 0xffff; max3100_work() 295 tx = s->port.x_char; max3100_work() 296 s->port.icount.tx++; max3100_work() 300 tx = xmit->buf[xmit->tail]; max3100_work() 303 s->port.icount.tx++; max3100_work() 305 if (tx != 0xffff) { max3100_work() 306 max3100_calc_parity(s, &tx); max3100_work() 307 tx |= MAX3100_WD | (s->rts ? MAX3100_RTS : 0); max3100_work() 308 max3100_sr(s, tx, &rx); max3100_work() 578 u16 tx, rx; max3100_shutdown() local 580 tx = MAX3100_WC | MAX3100_SHDN; max3100_shutdown() 581 max3100_sr(s, tx, &rx); max3100_shutdown() 623 u16 tx, rx; max3100_startup() local 624 tx = 0x4001; max3100_startup() 625 max3100_sr(s, tx, &rx); max3100_startup() 748 u16 tx, rx; max3100_probe() local 813 tx = MAX3100_WC | MAX3100_SHDN; max3100_probe() 814 max3100_sr(max3100s[i], tx, &rx); max3100_probe() 869 u16 tx, rx; max3100_suspend() local 871 tx = MAX3100_WC | MAX3100_SHDN; max3100_suspend() 872 max3100_sr(s, tx, &rx); max3100_suspend()
|
H A D | bcm63xx_uart.c | 57 * tx interrupt mask / stat 60 * - tx fifo empty 61 * - tx fifo below threshold 93 * serial core request to check if uart tx fifo is empty 148 * serial core request to disable tx ASAP (used for flow control) 164 * serial core request to (re)enable tx 310 * fill tx fifo with chars to send, stop when fifo is about to be full 320 port->icount.tx++; bcm_uart_do_tx() 344 port->icount.tx++; bcm_uart_do_tx() 399 * enable rx & tx operation on uart 411 * disable rx & tx operation on uart 424 * clear all unread data in rx fifo and unsent data in tx fifo 430 /* empty rx and tx fifo */ bcm_uart_flush() 456 /* set rx/tx fifo thresh to fifo half size */ bcm_uart_startup()
|
/linux-4.1.27/drivers/net/wireless/ath/ath10k/ |
H A D | htt_tx.c | 68 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret); ath10k_htt_tx_alloc_msdu_id() 79 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id); ath10k_htt_tx_free_msdu_id() 88 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", ath10k_htt_tx_alloc() 94 htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev, ath10k_htt_tx_alloc() 445 /* Since HTT 3.0 there is no separate mgmt tx command. However in case ath10k_htt_tx() 446 * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx ath10k_htt_tx() 492 /* Normally all commands go through HTC which manages tx credits for ath10k_htt_tx() 493 * each endpoint and notifies when tx is completed. ath10k_htt_tx() 500 * received. That's why HTC tx completion handler itself is ignored by ath10k_htt_tx() 503 * There is simply no point in pushing HTT TX_FRM through HTC tx path ath10k_htt_tx() 527 /* Prevent firmware from sending up tx inspection requests. There's ath10k_htt_tx() 529 * it to simply rely a regular tx completion with discard status. ath10k_htt_tx() 544 "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n", ath10k_htt_tx() 547 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ", ath10k_htt_tx()
|
/linux-4.1.27/drivers/staging/comedi/drivers/ |
H A D | ni_usb6501.c | 180 u8 *tx = devpriv->usb_tx_buf; ni6501_port_command() local 192 memcpy(tx, READ_PORT_REQUEST, request_size); ni6501_port_command() 193 tx[14] = port[0]; ni6501_port_command() 198 memcpy(tx, WRITE_PORT_REQUEST, request_size); ni6501_port_command() 199 tx[14] = port[0]; ni6501_port_command() 200 tx[17] = bitmap[0]; ni6501_port_command() 205 memcpy(tx, SET_PORT_DIR_REQUEST, request_size); ni6501_port_command() 206 tx[14] = port[0]; ni6501_port_command() 207 tx[15] = port[1]; ni6501_port_command() 208 tx[16] = port[2]; ni6501_port_command() 262 u8 *tx = devpriv->usb_tx_buf; ni6501_counter_command() local 274 memcpy(tx, START_COUNTER_REQUEST, request_size); ni6501_counter_command() 279 memcpy(tx, STOP_COUNTER_REQUEST, request_size); ni6501_counter_command() 284 memcpy(tx, READ_COUNTER_REQUEST, request_size); ni6501_counter_command() 289 memcpy(tx, WRITE_COUNTER_REQUEST, request_size); ni6501_counter_command() 290 /* Setup tx packet: bytes 12,13,14,15 hold the */ ni6501_counter_command() 292 *((__be32 *)&tx[12]) = cpu_to_be32(*val); ni6501_counter_command()
|
/linux-4.1.27/drivers/media/rc/ |
H A D | nuvoton-cir.c | 367 /* clear out the hardware's cir tx fifo */ nvt_clear_tx_fifo() 404 /* clear hardware rx and tx fifos */ nvt_cir_regs_init() 555 spin_lock_irqsave(&nvt->tx.lock, flags); nvt_tx_ir() 558 nvt->tx.buf_count = (ret * sizeof(unsigned)); nvt_tx_ir() 560 memcpy(nvt->tx.buf, txbuf, nvt->tx.buf_count); nvt_tx_ir() 562 nvt->tx.cur_buf_num = 0; nvt_tx_ir() 570 nvt->tx.tx_state = ST_TX_REPLY; nvt_tx_ir() 579 spin_unlock_irqrestore(&nvt->tx.lock, flags); nvt_tx_ir() 581 wait_event(nvt->tx.queue, nvt->tx.tx_state == ST_TX_REQUEST); nvt_tx_ir() 583 spin_lock_irqsave(&nvt->tx.lock, flags); nvt_tx_ir() 584 nvt->tx.tx_state = ST_TX_NONE; nvt_tx_ir() 585 spin_unlock_irqrestore(&nvt->tx.lock, flags); nvt_tx_ir() 743 spin_lock_irqsave(&nvt->tx.lock, flags); nvt_cir_tx_inactive() 744 tx_state = nvt->tx.tx_state; nvt_cir_tx_inactive() 745 spin_unlock_irqrestore(&nvt->tx.lock, flags); nvt_cir_tx_inactive() 801 /* We only do rx if not tx'ing */ nvt_cir_isr() 827 spin_lock_irqsave(&nvt->tx.lock, flags); nvt_cir_isr() 829 pos = nvt->tx.cur_buf_num; nvt_cir_isr() 830 count = nvt->tx.buf_count; nvt_cir_isr() 832 /* Write data into the hardware tx fifo while pos < count */ nvt_cir_isr() 834 nvt_cir_reg_write(nvt, nvt->tx.buf[pos], CIR_STXFIFO); nvt_cir_isr() 835 nvt->tx.cur_buf_num++; nvt_cir_isr() 842 spin_unlock_irqrestore(&nvt->tx.lock, flags); nvt_cir_isr() 847 spin_lock_irqsave(&nvt->tx.lock, flags); nvt_cir_isr() 848 if (nvt->tx.tx_state == ST_TX_REPLY) { nvt_cir_isr() 849 nvt->tx.tx_state = ST_TX_REQUEST; nvt_cir_isr() 850 wake_up(&nvt->tx.queue); nvt_cir_isr() 852 spin_unlock_irqrestore(&nvt->tx.lock, flags); nvt_cir_isr() 935 /* clear hardware rx and tx fifos */ nvt_disable_cir() 981 /* input device for IR remote (and tx) */ nvt_probe() 1022 spin_lock_init(&nvt->tx.lock); nvt_probe() 1027 init_waitqueue_head(&nvt->tx.queue); nvt_probe() 1066 /* tx bits */ nvt_probe() 1156 spin_lock_irqsave(&nvt->tx.lock, flags); nvt_suspend() 1157 nvt->tx.tx_state = ST_TX_NONE; nvt_suspend() 1158 spin_unlock_irqrestore(&nvt->tx.lock, flags); nvt_suspend()
|
H A D | rc-loopback.c | 57 dprintk("invalid tx mask: %u\n", mask); loop_set_tx_mask() 61 dprintk("setting tx mask: %u\n", mask); loop_set_tx_mask() 70 dprintk("setting tx carrier: %u\n", carrier); loop_set_tx_carrier() 113 dprintk("ignoring tx, carrier out of range\n"); loop_tx_ir() 123 dprintk("ignoring tx, rx mask mismatch\n"); loop_tx_ir()
|
/linux-4.1.27/drivers/net/ethernet/myricom/myri10ge/ |
H A D | myri10ge.c | 174 struct myri10ge_tx_buf tx; /* transmit ring */ member in struct:myri10ge_slice_state 355 MODULE_PARM_DESC(myri10ge_max_slices, "Max tx/rx queues"); 1151 ss->tx.req = 0; myri10ge_reset() 1152 ss->tx.done = 0; myri10ge_reset() 1153 ss->tx.pkt_start = 0; myri10ge_reset() 1154 ss->tx.pkt_done = 0; myri10ge_reset() 1159 ss->tx.wake_queue = 0; myri10ge_reset() 1160 ss->tx.stop_queue = 0; myri10ge_reset() 1521 struct myri10ge_tx_buf *tx = &ss->tx; myri10ge_tx_done() local 1526 while (tx->pkt_done != mcp_index) { myri10ge_tx_done() 1527 idx = tx->done & tx->mask; myri10ge_tx_done() 1528 skb = tx->info[idx].skb; myri10ge_tx_done() 1531 tx->info[idx].skb = NULL; myri10ge_tx_done() 1532 if (tx->info[idx].last) { myri10ge_tx_done() 1533 tx->pkt_done++; myri10ge_tx_done() 1534 tx->info[idx].last = 0; myri10ge_tx_done() 1536 tx->done++; myri10ge_tx_done() 1537 len = dma_unmap_len(&tx->info[idx], len); myri10ge_tx_done() 1538 dma_unmap_len_set(&tx->info[idx], len, 0); myri10ge_tx_done() 1545 dma_unmap_addr(&tx->info[idx], myri10ge_tx_done() 1551 dma_unmap_addr(&tx->info[idx], myri10ge_tx_done() 1560 * idle tx queue. If we can't get the lock we leave the queue myri10ge_tx_done() 1569 if (tx->req == tx->done) { myri10ge_tx_done() 1570 tx->queue_active = 0; myri10ge_tx_done() 1571 put_be32(htonl(1), tx->send_stop); myri10ge_tx_done() 1580 tx->req - tx->done < (tx->mask >> 1) && myri10ge_tx_done() 1582 tx->wake_queue++; myri10ge_tx_done() 1720 struct myri10ge_tx_buf *tx = &ss->tx; myri10ge_intr() local 1754 if (send_done_count != tx->pkt_done) myri10ge_intr() 1888 ring->tx_max_pending = mgp->ss[0].tx.mask + 1; myri10ge_get_ringparam() 2016 data[i++] = (unsigned int)ss->tx.pkt_start; myri10ge_get_ethtool_stats() 2017 data[i++] = (unsigned int)ss->tx.pkt_done; myri10ge_get_ethtool_stats() 2018 data[i++] = (unsigned int)ss->tx.req; myri10ge_get_ethtool_stats() 2019 data[i++] = (unsigned int)ss->tx.done; myri10ge_get_ethtool_stats() 2022 data[i++] = (unsigned int)ss->tx.wake_queue; myri10ge_get_ethtool_stats() 2023 data[i++] = (unsigned int)ss->tx.stop_queue; myri10ge_get_ethtool_stats() 2024 data[i++] = (unsigned int)ss->tx.linearized; myri10ge_get_ethtool_stats() 2140 ss->tx.mask = tx_ring_entries - 1; myri10ge_allocate_rings() 2148 * sizeof(*ss->tx.req_list); myri10ge_allocate_rings() 2149 ss->tx.req_bytes = kzalloc(bytes, GFP_KERNEL); myri10ge_allocate_rings() 2150 if (ss->tx.req_bytes == NULL) myri10ge_allocate_rings() 2154 ss->tx.req_list = (struct mcp_kreq_ether_send *) myri10ge_allocate_rings() 2155 ALIGN((unsigned long)ss->tx.req_bytes, 8); myri10ge_allocate_rings() 2156 ss->tx.queue_active = 0; myri10ge_allocate_rings() 2170 bytes = tx_ring_entries * sizeof(*ss->tx.info); myri10ge_allocate_rings() 2171 ss->tx.info = kzalloc(bytes, GFP_KERNEL); myri10ge_allocate_rings() 2172 if (ss->tx.info == NULL) myri10ge_allocate_rings() 2240 kfree(ss->tx.info); myri10ge_allocate_rings() 2249 kfree(ss->tx.req_bytes); myri10ge_allocate_rings() 2250 ss->tx.req_bytes = NULL; myri10ge_allocate_rings() 2251 ss->tx.req_list = NULL; myri10ge_allocate_rings() 2261 struct myri10ge_tx_buf *tx; myri10ge_free_rings() local 2265 if (ss->tx.req_list == NULL) myri10ge_free_rings() 2288 tx = &ss->tx; myri10ge_free_rings() 2289 while (tx->done != tx->req) { myri10ge_free_rings() 2290 idx = tx->done & tx->mask; myri10ge_free_rings() 2291 skb = tx->info[idx].skb; myri10ge_free_rings() 2294 tx->info[idx].skb = NULL; myri10ge_free_rings() 2295 tx->done++; myri10ge_free_rings() 2296 len = dma_unmap_len(&tx->info[idx], len); myri10ge_free_rings() 2297 dma_unmap_len_set(&tx->info[idx], len, 0); myri10ge_free_rings() 2303 dma_unmap_addr(&tx->info[idx], myri10ge_free_rings() 2309 dma_unmap_addr(&tx->info[idx], myri10ge_free_rings() 2318 kfree(ss->tx.info); myri10ge_free_rings() 2324 kfree(ss->tx.req_bytes); myri10ge_free_rings() 2325 ss->tx.req_bytes = NULL; myri10ge_free_rings() 2326 ss->tx.req_list = NULL; myri10ge_free_rings() 2424 ss->tx.lanai = (struct mcp_kreq_ether_send __iomem *) myri10ge_get_txrx() 2438 ss->tx.send_go = (__iomem __be32 *) myri10ge_get_txrx() 2440 ss->tx.send_stop = (__iomem __be32 *) myri10ge_get_txrx() 2583 * supports setting up the tx stats on non-zero myri10ge_open() 2666 if (mgp->ss[0].tx.req_bytes == NULL) myri10ge_close() 2713 myri10ge_submit_req_backwards(struct myri10ge_tx_buf *tx, myri10ge_submit_req_backwards() argument 2717 starting_slot = tx->req; myri10ge_submit_req_backwards() 2720 idx = (starting_slot + cnt) & tx->mask; myri10ge_submit_req_backwards() 2721 myri10ge_pio_copy(&tx->lanai[idx], &src[cnt], sizeof(*src)); myri10ge_submit_req_backwards() 2734 myri10ge_submit_req(struct myri10ge_tx_buf *tx, struct mcp_kreq_ether_send *src, myri10ge_submit_req() argument 2742 idx = tx->req & tx->mask; myri10ge_submit_req() 2747 dst = dstp = &tx->lanai[idx]; myri10ge_submit_req() 2750 if ((idx + cnt) < tx->mask) { myri10ge_submit_req() 2760 myri10ge_submit_req_backwards(tx, src, cnt); myri10ge_submit_req() 2772 tx->req += cnt; myri10ge_submit_req() 2777 struct myri10ge_tx_buf *tx, int idx) myri10ge_unmap_tx_dma() 2783 last_idx = (idx + 1) & tx->mask; myri10ge_unmap_tx_dma() 2784 idx = tx->req & tx->mask; myri10ge_unmap_tx_dma() 2786 len = dma_unmap_len(&tx->info[idx], len); myri10ge_unmap_tx_dma() 2788 if (tx->info[idx].skb != NULL) myri10ge_unmap_tx_dma() 2790 dma_unmap_addr(&tx->info[idx], myri10ge_unmap_tx_dma() 2795 dma_unmap_addr(&tx->info[idx], myri10ge_unmap_tx_dma() 2798 dma_unmap_len_set(&tx->info[idx], len, 0); myri10ge_unmap_tx_dma() 2799 tx->info[idx].skb = NULL; myri10ge_unmap_tx_dma() 2801 idx = (idx + 1) & tx->mask; myri10ge_unmap_tx_dma() 2821 struct myri10ge_tx_buf *tx; myri10ge_xmit() local 2836 tx = &ss->tx; myri10ge_xmit() 2839 req = tx->req_list; myri10ge_xmit() 2840 avail = tx->mask - 1 - (tx->req - tx->done); myri10ge_xmit() 2852 tx->stop_queue++; myri10ge_xmit() 2924 idx = tx->req & tx->mask; myri10ge_xmit() 2925 tx->info[idx].skb = skb; myri10ge_xmit() 2926 dma_unmap_addr_set(&tx->info[idx], bus, bus); myri10ge_xmit() 2927 dma_unmap_len_set(&tx->info[idx], len, len); myri10ge_xmit() 3030 myri10ge_unmap_tx_dma(mgp, tx, idx); myri10ge_xmit() 3033 idx = (count + tx->req) & tx->mask; myri10ge_xmit() 3034 dma_unmap_addr_set(&tx->info[idx], bus, bus); myri10ge_xmit() 3035 dma_unmap_len_set(&tx->info[idx], len, len); myri10ge_xmit() 3045 idx = ((count - 1) + tx->req) & tx->mask; myri10ge_xmit() 3046 tx->info[idx].last = 1; myri10ge_xmit() 3047 myri10ge_submit_req(tx, tx->req_list, count); myri10ge_xmit() 3048 /* if using multiple tx queues, make sure NIC polls the myri10ge_xmit() 3050 if ((mgp->dev->real_num_tx_queues > 1) && tx->queue_active == 0) { myri10ge_xmit() 3051 tx->queue_active = 1; myri10ge_xmit() 3052 put_be32(htonl(1), tx->send_go); myri10ge_xmit() 3056 tx->pkt_start++; myri10ge_xmit() 3058 tx->stop_queue++; myri10ge_xmit() 3064 myri10ge_unmap_tx_dma(mgp, tx, idx); myri10ge_xmit() 3074 tx->linearized++; myri10ge_xmit() 3573 if (ss->tx.req != ss->tx.done && myri10ge_check_slice() 3574 ss->tx.done == ss->watchdog_tx_done && myri10ge_check_slice() 3584 slice, ss->tx.queue_active, ss->tx.req, myri10ge_check_slice() 3585 ss->tx.done, ss->tx.pkt_start, myri10ge_check_slice() 3586 ss->tx.pkt_done, myri10ge_check_slice() 3593 if (ss->watchdog_tx_done != ss->tx.done || myri10ge_check_slice() 3597 ss->watchdog_tx_done = ss->tx.done; myri10ge_check_slice() 3598 ss->watchdog_tx_req = ss->tx.req; myri10ge_check_slice() 4136 dev_info(dev, "%d MSI-X IRQs, tx bndry %d, fw %s, MTRR %s, WC Enabled\n", myri10ge_probe() 4140 dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, MTRR %s, WC Enabled\n", myri10ge_probe() 2776 myri10ge_unmap_tx_dma(struct myri10ge_priv *mgp, struct myri10ge_tx_buf *tx, int idx) myri10ge_unmap_tx_dma() argument
|
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/ |
H A D | gk104.c | 27 gk104_aux_stat(struct nvkm_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx) gk104_aux_stat() argument 31 for (i = 0, *hi = *lo = *rq = *tx = 0; i < 8; i++) { gk104_aux_stat() 35 if ((stat & (8 << (i * 4)))) *tx |= 1 << i; gk104_aux_stat()
|
/linux-4.1.27/drivers/net/ethernet/samsung/sxgbe/ |
H A D | sxgbe_desc.h | 60 /* tx write back Desc 2,3 */ 164 /* Invoked by the xmit function to prepare the tx descriptor */ 182 /* Invoked by the xmit function to close the tx descriptor */ 185 /* Clean the tx descriptor as soon as the tx irq is received */ 188 /* Clear interrupt on tx frame completion. When this bit is 193 /* Last tx segment reports the transmit status */ 199 /* Set tx timestamp enable bit */ 202 /* get tx timestamp status */
|
H A D | sxgbe_desc.c | 72 /* Invoked by the xmit function to close the tx descriptor */ sxgbe_close_tx_desc() 79 /* Clean the tx descriptor as soon as the tx irq is received */ sxgbe_release_tx_desc() 85 /* Clear interrupt on tx frame completion. When this bit is 93 /* Last tx segment reports the transmit status */ sxgbe_get_tx_ls() 105 /* Set tx timestamp enable bit */ sxgbe_tx_enable_tstamp() 111 /* get tx timestamp status */ sxgbe_get_tx_timestamp_status()
|
/linux-4.1.27/arch/sh/kernel/cpu/sh3/ |
H A D | serial-sh7720.c | 24 /* Clear PTCR bit 5-2; enable only tx and rx */ sh7720_sci_init_pins()
|
/linux-4.1.27/Documentation/spi/ |
H A D | spidev_test.c | 103 static void transfer(int fd, uint8_t const *tx, uint8_t const *rx, size_t len) transfer() argument 108 .tx_buf = (unsigned long)tx, transfer() 136 hex_dump(tx, len, 32, "TX"); transfer() 153 " -v --verbose Verbose (show tx buffer)\n" print_usage() 256 uint8_t *tx; main() local 305 tx = malloc(size); main() 307 size = unescape((char *)tx, input_tx, size); main() 308 transfer(fd, tx, rx, size); main() 310 free(tx); main()
|
/linux-4.1.27/drivers/net/wireless/iwlegacy/ |
H A D | 3945-debug.c | 325 struct iwl39_stats_tx *tx, *accum_tx, *delta_tx, *max_tx; il3945_ucode_tx_stats_read() local 341 tx = &il->_3945.stats.tx; il3945_ucode_tx_stats_read() 342 accum_tx = &il->_3945.accum_stats.tx; il3945_ucode_tx_stats_read() 343 delta_tx = &il->_3945.delta_stats.tx; il3945_ucode_tx_stats_read() 344 max_tx = &il->_3945.max_delta.tx; il3945_ucode_tx_stats_read() 354 le32_to_cpu(tx->preamble_cnt), accum_tx->preamble_cnt, il3945_ucode_tx_stats_read() 359 le32_to_cpu(tx->rx_detected_cnt), il3945_ucode_tx_stats_read() 365 le32_to_cpu(tx->bt_prio_defer_cnt), il3945_ucode_tx_stats_read() 371 le32_to_cpu(tx->bt_prio_kill_cnt), il3945_ucode_tx_stats_read() 377 le32_to_cpu(tx->few_bytes_cnt), accum_tx->few_bytes_cnt, il3945_ucode_tx_stats_read() 382 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout, il3945_ucode_tx_stats_read() 387 le32_to_cpu(tx->ack_timeout), accum_tx->ack_timeout, il3945_ucode_tx_stats_read() 392 le32_to_cpu(tx->expected_ack_cnt), il3945_ucode_tx_stats_read() 398 le32_to_cpu(tx->actual_ack_cnt), accum_tx->actual_ack_cnt, il3945_ucode_tx_stats_read()
|
H A D | 4965-debug.c | 479 struct stats_tx *tx, *accum_tx, *delta_tx, *max_tx; il4965_ucode_tx_stats_read() local 494 tx = &il->_4965.stats.tx; il4965_ucode_tx_stats_read() 495 accum_tx = &il->_4965.accum_stats.tx; il4965_ucode_tx_stats_read() 496 delta_tx = &il->_4965.delta_stats.tx; il4965_ucode_tx_stats_read() 497 max_tx = &il->_4965.max_delta.tx; il4965_ucode_tx_stats_read() 503 le32_to_cpu(tx->preamble_cnt), accum_tx->preamble_cnt, il4965_ucode_tx_stats_read() 507 le32_to_cpu(tx->rx_detected_cnt), il4965_ucode_tx_stats_read() 512 le32_to_cpu(tx->bt_prio_defer_cnt), il4965_ucode_tx_stats_read() 517 le32_to_cpu(tx->bt_prio_kill_cnt), il4965_ucode_tx_stats_read() 522 le32_to_cpu(tx->few_bytes_cnt), accum_tx->few_bytes_cnt, il4965_ucode_tx_stats_read() 526 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout, il4965_ucode_tx_stats_read() 530 le32_to_cpu(tx->ack_timeout), accum_tx->ack_timeout, il4965_ucode_tx_stats_read() 534 le32_to_cpu(tx->expected_ack_cnt), il4965_ucode_tx_stats_read() 539 le32_to_cpu(tx->actual_ack_cnt), accum_tx->actual_ack_cnt, il4965_ucode_tx_stats_read() 543 le32_to_cpu(tx->dump_msdu_cnt), accum_tx->dump_msdu_cnt, il4965_ucode_tx_stats_read() 548 le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt), il4965_ucode_tx_stats_read() 555 le32_to_cpu(tx->burst_abort_missing_next_frame_cnt), il4965_ucode_tx_stats_read() 562 le32_to_cpu(tx->cts_timeout_collision), il4965_ucode_tx_stats_read() 569 le32_to_cpu(tx->ack_or_ba_timeout_collision), il4965_ucode_tx_stats_read() 575 le32_to_cpu(tx->agg.ba_timeout), accum_tx->agg.ba_timeout, il4965_ucode_tx_stats_read() 580 le32_to_cpu(tx->agg.ba_reschedule_frames), il4965_ucode_tx_stats_read() 587 le32_to_cpu(tx->agg.scd_query_agg_frame_cnt), il4965_ucode_tx_stats_read() 594 le32_to_cpu(tx->agg.scd_query_no_agg), il4965_ucode_tx_stats_read() 600 le32_to_cpu(tx->agg.scd_query_agg), il4965_ucode_tx_stats_read() 606 le32_to_cpu(tx->agg.scd_query_mismatch), il4965_ucode_tx_stats_read() 612 le32_to_cpu(tx->agg.frame_not_ready), il4965_ucode_tx_stats_read() 618 le32_to_cpu(tx->agg.underrun), accum_tx->agg.underrun, il4965_ucode_tx_stats_read() 622 le32_to_cpu(tx->agg.bt_prio_kill), il4965_ucode_tx_stats_read() 627 le32_to_cpu(tx->agg.rx_ba_rsp_cnt), il4965_ucode_tx_stats_read()
|
/linux-4.1.27/drivers/infiniband/hw/ipath/ |
H A D | ipath_verbs.c | 1004 struct ipath_verbs_txreq *tx = NULL; get_txreq() local 1012 tx = list_entry(l, struct ipath_verbs_txreq, txreq.list); get_txreq() 1015 return tx; get_txreq() 1019 struct ipath_verbs_txreq *tx) put_txreq() 1024 list_add(&tx->txreq.list, &dev->txreq_free); put_txreq() 1030 struct ipath_verbs_txreq *tx = cookie; sdma_complete() local 1031 struct ipath_qp *qp = tx->qp; sdma_complete() 1039 if (tx->wqe) sdma_complete() 1040 ipath_send_complete(qp, tx->wqe, ibs); sdma_complete() 1047 } else if (tx->wqe) { sdma_complete() 1049 ipath_send_complete(qp, tx->wqe, ibs); sdma_complete() 1053 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEBUF) sdma_complete() 1054 kfree(tx->txreq.map_addr); sdma_complete() 1055 put_txreq(dev, tx); sdma_complete() 1104 struct ipath_verbs_txreq *tx; ipath_verbs_send_dma() local 1110 tx = qp->s_tx; ipath_verbs_send_dma() 1111 if (tx) { ipath_verbs_send_dma() 1115 ret = ipath_sdma_verbs_send(dd, tx->ss, tx->len, tx); ipath_verbs_send_dma() 1117 qp->s_tx = tx; ipath_verbs_send_dma() 1123 tx = get_txreq(dev); ipath_verbs_send_dma() 1124 if (!tx) { ipath_verbs_send_dma() 1137 tx->qp = qp; ipath_verbs_send_dma() 1139 tx->wqe = qp->s_wqe; ipath_verbs_send_dma() 1140 tx->txreq.callback = sdma_complete; ipath_verbs_send_dma() 1141 tx->txreq.callback_cookie = tx; ipath_verbs_send_dma() 1142 tx->txreq.flags = IPATH_SDMA_TXREQ_F_HEADTOHOST | ipath_verbs_send_dma() 1145 tx->txreq.flags |= IPATH_SDMA_TXREQ_F_USELARGEBUF; ipath_verbs_send_dma() 1150 tx->txreq.flags |= IPATH_SDMA_TXREQ_F_VL15; ipath_verbs_send_dma() 1164 tx->hdr.pbc[0] = cpu_to_le32(plen); ipath_verbs_send_dma() 1165 tx->hdr.pbc[1] = cpu_to_le32(control); ipath_verbs_send_dma() 1166 memcpy(&tx->hdr.hdr, hdr, hdrwords << 2); ipath_verbs_send_dma() 1167 tx->txreq.sg_count = ndesc; ipath_verbs_send_dma() 1168 tx->map_len = (hdrwords + 2) << 2; ipath_verbs_send_dma() 1169 tx->txreq.map_addr = &tx->hdr; ipath_verbs_send_dma() 1171 ret = ipath_sdma_verbs_send(dd, ss, dwords, tx); ipath_verbs_send_dma() 1174 tx->ss = ss; ipath_verbs_send_dma() 1175 tx->len = dwords; ipath_verbs_send_dma() 1176 qp->s_tx = tx; ipath_verbs_send_dma() 1183 tx->map_len = (plen + 1) << 2; ipath_verbs_send_dma() 1184 piobuf = kmalloc(tx->map_len, GFP_ATOMIC); ipath_verbs_send_dma() 1189 tx->txreq.map_addr = piobuf; ipath_verbs_send_dma() 1190 tx->txreq.flags |= IPATH_SDMA_TXREQ_F_FREEBUF; ipath_verbs_send_dma() 1191 tx->txreq.sg_count = 1; ipath_verbs_send_dma() 1199 ret = ipath_sdma_verbs_send(dd, NULL, 0, tx); ipath_verbs_send_dma() 1206 tx->ss = NULL; ipath_verbs_send_dma() 1207 tx->len = 0; ipath_verbs_send_dma() 1208 qp->s_tx = tx; ipath_verbs_send_dma() 1217 put_txreq(dev, tx); ipath_verbs_send_dma() 1993 struct ipath_verbs_txreq *tx; ipath_register_ib_device() local 2006 tx = kmalloc(dd->ipath_sdma_descq_cnt * sizeof *tx, ipath_register_ib_device() 2008 if (tx == NULL) { ipath_register_ib_device() 2013 tx = NULL; ipath_register_ib_device() 2014 idev->txreq_bufs = tx; ipath_register_ib_device() 2088 for (i = 0; i < dd->ipath_sdma_descq_cnt; i++, tx++) ipath_register_ib_device() 2089 list_add(&tx->txreq.list, &idev->txreq_free); ipath_register_ib_device() 1018 put_txreq(struct ipath_ibdev *dev, struct ipath_verbs_txreq *tx) put_txreq() argument
|
H A D | ipath_sdma.c | 666 struct ipath_verbs_txreq *tx) ipath_sdma_verbs_send() 678 if ((tx->map_len + (dwords<<2)) > dd->ipath_ibmaxlen) { ipath_sdma_verbs_send() 680 tx->map_len + (dwords<<2), dd->ipath_ibmaxlen); ipath_sdma_verbs_send() 693 if (tx->txreq.sg_count > ipath_sdma_descq_freecnt(dd)) { ipath_sdma_verbs_send() 700 addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr, ipath_sdma_verbs_send() 701 tx->map_len, DMA_TO_DEVICE); ipath_sdma_verbs_send() 705 dwoffset = tx->map_len >> 2; ipath_sdma_verbs_send() 710 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF) ipath_sdma_verbs_send() 719 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_FREEDESC) ipath_sdma_verbs_send() 720 tx->txreq.start_idx = tail; ipath_sdma_verbs_send() 747 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_USELARGEBUF) ipath_sdma_verbs_send() 786 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_INTREQ) { ipath_sdma_verbs_send() 795 tx->txreq.next_descq_idx = tail; ipath_sdma_verbs_send() 796 tx->txreq.callback_status = IPATH_SDMA_TXREQ_S_OK; ipath_sdma_verbs_send() 798 dd->ipath_sdma_descq_added += tx->txreq.sg_count; ipath_sdma_verbs_send() 799 list_add_tail(&tx->txreq.list, &dd->ipath_sdma_activelist); ipath_sdma_verbs_send() 800 if (tx->txreq.flags & IPATH_SDMA_TXREQ_F_VL15) ipath_sdma_verbs_send() 664 ipath_sdma_verbs_send(struct ipath_devdata *dd, struct ipath_sge_state *ss, u32 dwords, struct ipath_verbs_txreq *tx) ipath_sdma_verbs_send() argument
|
/linux-4.1.27/drivers/gpio/ |
H A D | gpio-mcp23s08.c | 172 u8 tx[2], rx[1]; mcp23s08_read() local 175 tx[0] = mcp->addr | 0x01; mcp23s08_read() 176 tx[1] = reg; mcp23s08_read() 177 status = spi_write_then_read(mcp->data, tx, sizeof(tx), rx, sizeof(rx)); mcp23s08_read() 183 u8 tx[3]; mcp23s08_write() local 185 tx[0] = mcp->addr; mcp23s08_write() 186 tx[1] = reg; mcp23s08_write() 187 tx[2] = val; mcp23s08_write() 188 return spi_write_then_read(mcp->data, tx, sizeof(tx), NULL, 0); mcp23s08_write() 194 u8 tx[2], *tmp; mcp23s08_read_regs() local 199 tx[0] = mcp->addr | 0x01; mcp23s08_read_regs() 200 tx[1] = reg; mcp23s08_read_regs() 203 status = spi_write_then_read(mcp->data, tx, sizeof(tx), tmp, n); mcp23s08_read_regs() 213 u8 tx[2], rx[2]; mcp23s17_read() local 216 tx[0] = mcp->addr | 0x01; mcp23s17_read() 217 tx[1] = reg << 1; mcp23s17_read() 218 status = spi_write_then_read(mcp->data, tx, sizeof(tx), rx, sizeof(rx)); mcp23s17_read() 224 u8 tx[4]; mcp23s17_write() local 226 tx[0] = mcp->addr; mcp23s17_write() 227 tx[1] = reg << 1; mcp23s17_write() 228 tx[2] = val; mcp23s17_write() 229 tx[3] = val >> 8; mcp23s17_write() 230 return spi_write_then_read(mcp->data, tx, sizeof(tx), NULL, 0); mcp23s17_write() 236 u8 tx[2]; mcp23s17_read_regs() local 241 tx[0] = mcp->addr | 0x01; mcp23s17_read_regs() 242 tx[1] = reg << 1; mcp23s17_read_regs() 244 status = spi_write_then_read(mcp->data, tx, sizeof(tx), mcp23s17_read_regs()
|
/linux-4.1.27/drivers/net/ethernet/intel/ixgbe/ |
H A D | ixgbe_dcb_nl.c | 55 int tx = DCB_TX_CONFIG; ixgbe_copy_dcb_cfg() local 73 if (dst->path[tx].prio_type != src->path[tx].prio_type) { ixgbe_copy_dcb_cfg() 74 dst->path[tx].prio_type = src->path[tx].prio_type; ixgbe_copy_dcb_cfg() 78 if (dst->path[tx].bwg_id != src->path[tx].bwg_id) { ixgbe_copy_dcb_cfg() 79 dst->path[tx].bwg_id = src->path[tx].bwg_id; ixgbe_copy_dcb_cfg() 83 if (dst->path[tx].bwg_percent != src->path[tx].bwg_percent) { ixgbe_copy_dcb_cfg() 84 dst->path[tx].bwg_percent = src->path[tx].bwg_percent; ixgbe_copy_dcb_cfg() 88 if (dst->path[tx].up_to_tc_bitmap != ixgbe_copy_dcb_cfg() 89 src->path[tx].up_to_tc_bitmap) { ixgbe_copy_dcb_cfg() 90 dst->path[tx].up_to_tc_bitmap = ixgbe_copy_dcb_cfg() 91 src->path[tx].up_to_tc_bitmap; ixgbe_copy_dcb_cfg() 120 if (dcfg->bw_percentage[tx][j] != scfg->bw_percentage[tx][j]) { ixgbe_copy_dcb_cfg() 121 dcfg->bw_percentage[tx][j] = scfg->bw_percentage[tx][j]; ixgbe_copy_dcb_cfg()
|
/linux-4.1.27/drivers/dma/sh/ |
H A D | shdma.h | 61 #define tx_to_sh_desc(tx) container_of(tx, struct sh_desc, async_tx)
|
H A D | shdma-base.c | 73 static dma_cookie_t shdma_tx_submit(struct dma_async_tx_descriptor *tx) shdma_tx_submit() argument 76 container_of(tx, struct shdma_desc, async_tx); shdma_tx_submit() 77 struct shdma_chan *schan = to_shdma_chan(tx->chan); shdma_tx_submit() 78 dma_async_tx_callback callback = tx->callback; shdma_tx_submit() 86 cookie = dma_cookie_assign(tx); shdma_tx_submit() 102 chunk->async_tx.callback_param = tx->callback_param; shdma_tx_submit() 111 tx->cookie, &chunk->async_tx, schan->id); shdma_tx_submit() 339 struct dma_async_tx_descriptor *tx = &desc->async_tx; __ld_cleanup() local 341 BUG_ON(tx->cookie > 0 && tx->cookie != desc->cookie); __ld_cleanup() 355 if (tx->cookie > 0) __ld_cleanup() 356 cookie = tx->cookie; __ld_cleanup() 368 if (desc->mark == DESC_COMPLETED && tx->callback) { __ld_cleanup() 370 callback = tx->callback; __ld_cleanup() 371 param = tx->callback_param; __ld_cleanup() 373 tx->cookie, tx, schan->id); __ld_cleanup() 378 if (tx->cookie > 0 || tx->cookie == -EBUSY) { __ld_cleanup() 380 BUG_ON(tx->cookie < 0); __ld_cleanup() 383 head_acked = async_tx_test_ack(tx); __ld_cleanup() 396 tx, tx->cookie); __ld_cleanup() 583 * cookie is at first set to -EBUSY, at tx-submit to a positive shdma_prep_sg() 887 struct dma_async_tx_descriptor *tx = &sdesc->async_tx; shdma_for_each_chan() local 889 if (tx->callback) shdma_for_each_chan() 890 tx->callback(tx->callback_param); shdma_for_each_chan()
|
/linux-4.1.27/arch/mips/netlogic/xlp/ |
H A D | cop2-ex.c | 52 : "r"(r->tx), "r"(r->rx)); nlm_cop2_save() 82 : : "m"(*r), "r"(r->tx), "r"(r->rx)); nlm_cop2_restore()
|
/linux-4.1.27/drivers/dma/ioat/ |
H A D | dma_v3.c | 395 struct dma_async_tx_descriptor *tx; __cleanup() local 406 tx = &desc->txd; __cleanup() 407 if (tx->cookie) { __cleanup() 408 dma_cookie_complete(tx); __cleanup() 409 dma_descriptor_unmap(tx); __cleanup() 410 if (tx->callback) { __cleanup() 411 tx->callback(tx->callback_param); __cleanup() 412 tx->callback = NULL; __cleanup() 416 if (tx->phys == phys_complete) __cleanup() 497 struct dma_async_tx_descriptor *tx; ioat3_eh() local 544 tx = &desc->txd; ioat3_eh() 545 if (tx->cookie) { ioat3_eh() 546 dma_cookie_complete(tx); ioat3_eh() 547 dma_descriptor_unmap(tx); ioat3_eh() 548 if (tx->callback) { ioat3_eh() 549 tx->callback(tx->callback_param); ioat3_eh() 550 tx->callback = NULL; ioat3_eh() 1224 struct dma_async_tx_descriptor *tx; ioat_xor_val_self_test() local 1295 tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, ioat_xor_val_self_test() 1299 if (!tx) { ioat_xor_val_self_test() 1305 async_tx_ack(tx); ioat_xor_val_self_test() 1307 tx->callback = ioat3_dma_test_callback; ioat_xor_val_self_test() 1308 tx->callback_param = &cmp; ioat_xor_val_self_test() 1309 cookie = tx->tx_submit(tx); ioat_xor_val_self_test() 1363 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, ioat_xor_val_self_test() 1366 if (!tx) { ioat_xor_val_self_test() 1372 async_tx_ack(tx); ioat_xor_val_self_test() 1374 tx->callback = ioat3_dma_test_callback; ioat_xor_val_self_test() 1375 tx->callback_param = &cmp; ioat_xor_val_self_test() 1376 cookie = tx->tx_submit(tx); ioat_xor_val_self_test() 1416 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, ioat_xor_val_self_test() 1419 if (!tx) { ioat_xor_val_self_test() 1425 async_tx_ack(tx); ioat_xor_val_self_test() 1427 tx->callback = ioat3_dma_test_callback; ioat_xor_val_self_test() 1428 tx->callback_param = &cmp; ioat_xor_val_self_test() 1429 cookie = tx->tx_submit(tx); ioat_xor_val_self_test()
|
H A D | dma.c | 226 static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) ioat1_tx_submit() argument 228 struct dma_chan *c = tx->chan; ioat1_tx_submit() 230 struct ioat_desc_sw *desc = tx_to_ioat_desc(tx); ioat1_tx_submit() 238 cookie = dma_cookie_assign(tx); ioat1_tx_submit() 606 struct dma_async_tx_descriptor *tx; __cleanup() local 615 tx = &desc->txd; __cleanup() 622 if (tx->cookie) { __cleanup() 623 dma_cookie_complete(tx); __cleanup() 624 dma_descriptor_unmap(tx); __cleanup() 626 if (tx->callback) { __cleanup() 627 tx->callback(tx->callback_param); __cleanup() 628 tx->callback = NULL; __cleanup() 632 if (tx->phys != phys_complete) { __cleanup() 637 if (async_tx_test_ack(tx)) __cleanup() 823 struct dma_async_tx_descriptor *tx; ioat_dma_self_test() local 864 tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src, ioat_dma_self_test() 866 if (!tx) { ioat_dma_self_test() 872 async_tx_ack(tx); ioat_dma_self_test() 874 tx->callback = ioat_dma_test_callback; ioat_dma_self_test() 875 tx->callback_param = &cmp; ioat_dma_self_test() 876 cookie = tx->tx_submit(tx); ioat_dma_self_test()
|
H A D | dma_v2.c | 130 struct dma_async_tx_descriptor *tx; __cleanup() local 144 tx = &desc->txd; __cleanup() 146 if (tx->cookie) { __cleanup() 147 dma_descriptor_unmap(tx); __cleanup() 148 dma_cookie_complete(tx); __cleanup() 149 if (tx->callback) { __cleanup() 150 tx->callback(tx->callback_param); __cleanup() 151 tx->callback = NULL; __cleanup() 155 if (tx->phys == phys_complete) __cleanup() 411 static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx) ioat2_tx_submit_unlock() argument 413 struct dma_chan *c = tx->chan; ioat2_tx_submit_unlock() 418 cookie = dma_cookie_assign(tx); ioat2_tx_submit_unlock()
|
/linux-4.1.27/drivers/isdn/sc/ |
H A D | message.h | 38 #define IS_CM_MESSAGE(mesg, tx, cx, dx) \ 39 ((mesg.type == cmRspType##tx) \ 46 #define IS_CE_MESSAGE(mesg, tx, cx, dx) \ 47 ((mesg.type == ceRspType##tx) \ 49 && (mesg.code == ceRsp##tx##dx))
|
/linux-4.1.27/drivers/net/ |
H A D | xen-netfront.c | 87 /* IRQ name is queue name with "-tx" or "-rx" appended */ 111 char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */ 115 struct xen_netif_tx_front_ring tx; member in struct:netfront_queue 119 * {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries 239 return (queue->tx.req_prod_pvt - queue->tx.rsp_cons) < netfront_tx_slot_available() 373 prod = queue->tx.sring->rsp_prod; xennet_tx_buf_gc() 376 for (cons = queue->tx.rsp_cons; cons != prod; cons++) { xennet_tx_buf_gc() 379 txrsp = RING_GET_RESPONSE(&queue->tx, cons); xennet_tx_buf_gc() 401 queue->tx.rsp_cons = prod; xennet_tx_buf_gc() 411 queue->tx.sring->rsp_event = xennet_tx_buf_gc() 412 prod + ((queue->tx.sring->req_prod - prod) >> 1) + 1; xennet_tx_buf_gc() 414 } while ((cons == prod) && (prod != queue->tx.sring->rsp_prod)); xennet_tx_buf_gc() 424 struct xen_netif_tx_request *tx; xennet_make_one_txreq() local 430 tx = RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); xennet_make_one_txreq() 441 tx->id = id; xennet_make_one_txreq() 442 tx->gref = ref; xennet_make_one_txreq() 443 tx->offset = offset; xennet_make_one_txreq() 444 tx->size = len; xennet_make_one_txreq() 445 tx->flags = 0; xennet_make_one_txreq() 447 return tx; xennet_make_one_txreq() 451 struct netfront_queue *queue, struct xen_netif_tx_request *tx, xennet_make_txreqs() 460 tx->flags |= XEN_NETTXF_more_data; xennet_make_txreqs() 461 tx = xennet_make_one_txreq(queue, skb_get(skb), xennet_make_txreqs() 465 len -= tx->size; xennet_make_txreqs() 468 return tx; xennet_make_txreqs() 518 struct xen_netif_tx_request *tx, *first_tx; xennet_start_xmit() local 569 first_tx = tx = xennet_make_one_txreq(queue, skb, xennet_start_xmit() 573 len -= tx->size; xennet_start_xmit() 577 tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated; xennet_start_xmit() 580 tx->flags |= XEN_NETTXF_data_validated; xennet_start_xmit() 587 RING_GET_REQUEST(&queue->tx, queue->tx.req_prod_pvt++); xennet_start_xmit() 589 tx->flags |= XEN_NETTXF_extra_info; xennet_start_xmit() 603 tx = xennet_make_txreqs(queue, tx, skb, page, offset, len); xennet_start_xmit() 608 tx = xennet_make_txreqs(queue, tx, skb, xennet_start_xmit() 616 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->tx, notify); xennet_start_xmit() 1365 xennet_end_access(queue->tx_ring_ref, queue->tx.sring); xennet_disconnect_backend() 1370 queue->tx.sring = NULL; xennet_disconnect_backend() 1450 "%s-tx", queue->name); setup_netfront_split() 1493 queue->tx.sring = NULL; setup_netfront() 1498 xenbus_dev_fatal(dev, err, "allocating tx ring page"); setup_netfront() 1502 FRONT_RING_INIT(&queue->tx, txs, PAGE_SIZE); setup_netfront() 1585 /* A grant for every tx ring slot */ xennet_init_queue() 1588 pr_alert("can't alloc tx grant refs\n"); xennet_init_queue() 1638 err = xenbus_printf(*xbt, path, "tx-ring-ref", "%u", write_queue_xenstore_keys() 1641 message = "writing tx-ring-ref"; write_queue_xenstore_keys() 1666 "event-channel-tx", "%u", queue->tx_evtchn); write_queue_xenstore_keys() 1668 message = "writing event-channel-tx"; write_queue_xenstore_keys() 450 xennet_make_txreqs( struct netfront_queue *queue, struct xen_netif_tx_request *tx, struct sk_buff *skb, struct page *page, unsigned int offset, unsigned int len) xennet_make_txreqs() argument
|
/linux-4.1.27/drivers/net/vmxnet3/ |
H A D | vmxnet3_ethtool.c | 41 { " TSO pkts tx", offsetof(struct UPT1_TxStats, TSOPktsTxOK) }, 42 { " TSO bytes tx", offsetof(struct UPT1_TxStats, TSOBytesTxOK) }, 43 { " ucast pkts tx", offsetof(struct UPT1_TxStats, ucastPktsTxOK) }, 44 { " ucast bytes tx", offsetof(struct UPT1_TxStats, ucastBytesTxOK) }, 45 { " mcast pkts tx", offsetof(struct UPT1_TxStats, mcastPktsTxOK) }, 46 { " mcast bytes tx", offsetof(struct UPT1_TxStats, mcastBytesTxOK) }, 47 { " bcast pkts tx", offsetof(struct UPT1_TxStats, bcastPktsTxOK) }, 48 { " bcast bytes tx", offsetof(struct UPT1_TxStats, bcastBytesTxOK) }, 49 { " pkts tx err", offsetof(struct UPT1_TxStats, pktsTxError) }, 50 { " pkts tx discard", offsetof(struct UPT1_TxStats, pktsTxDiscard) }, 57 {" drv dropped tx total", offsetof(struct vmxnet3_tq_driver_stats, 111 { "tx timeout count", offsetof(struct vmxnet3_adapter, 530 /* recreate the rx queue and the tx queue based on the vmxnet3_set_ringparam()
|
/linux-4.1.27/drivers/input/gameport/ |
H A D | gameport.c | 84 unsigned int i, t, tx; gameport_measure_speed() local 91 tx = ~0; gameport_measure_speed() 103 if (t < tx) gameport_measure_speed() 104 tx = t; gameport_measure_speed() 109 if (tx) gameport_measure_speed() 110 t /= tx; gameport_measure_speed() 118 unsigned int i, t, t1, t2, t3, tx; old_gameport_measure_speed() local 124 tx = 1 << 30; old_gameport_measure_speed() 134 if ((t = DELTA(t2,t1) - DELTA(t3,t2)) < tx) tx = t; old_gameport_measure_speed() 138 return 59659 / (tx < 1 ? 1 : tx); old_gameport_measure_speed() 143 unsigned long tx, t1, t2, flags; old_gameport_measure_speed() 148 tx = 1 << 30; old_gameport_measure_speed() 157 if (t2 - t1 < tx) tx = t2 - t1; old_gameport_measure_speed() 162 (unsigned long)HZ / (1000 / 50)) / (tx < 1 ? 1 : tx); old_gameport_measure_speed()
|
/linux-4.1.27/drivers/isdn/hysdn/ |
H A D | hysdn_sched.c | 67 /* room in the tx-buffer to the card and data may be sent if needed. */ 156 hysdn_addlog(card, "async tx-cfg chan=%d len=%d", chan, strlen(line) + 1); hysdn_tx_cfgline() 161 hysdn_addlog(card, "async tx-cfg delayed"); hysdn_tx_cfgline() 179 hysdn_addlog(card, "async tx-cfg data queued"); hysdn_tx_cfgline() 186 hysdn_addlog(card, "async tx-cfg waiting for tx-ready"); hysdn_tx_cfgline() 194 hysdn_addlog(card, "async tx-cfg data send"); hysdn_tx_cfgline()
|
/linux-4.1.27/drivers/rpmsg/ |
H A D | virtio_rpmsg_bus.c | 41 * @svq: tx virtqueue 43 * @sbufs: kernel address of tx buffers 44 * @num_bufs: total number of buffers for rx and tx 45 * @last_sbuf: index of last tx buffer used 52 * @sendq: wait queue of sending contexts waiting for a tx buffers 53 * @sleepers: number of senders that are waiting for a tx buffer 581 * either pick the next unused tx buffer get_a_tx_buf() 596 * rpmsg_upref_sleepers() - enable "tx-complete" interrupts, if needed 600 * a tx buffer to become available. 606 * virtio's tx callbacks, so we'd be immediately notified when a tx 607 * buffer is consumed (we rely on virtio's tx callback in order 608 * to wake up sleeping senders as soon as a tx buffer is used by the 616 /* are we the first sleeping context waiting for tx buffers ? */ rpmsg_upref_sleepers() 618 /* enable "tx-complete" interrupts before dozing off */ rpmsg_upref_sleepers() 625 * rpmsg_downref_sleepers() - disable "tx-complete" interrupts, if needed 628 * This function is called after a sender, that waited for a tx buffer 635 * virtio's tx callbacks, to avoid the overhead incurred with handling 643 /* are we the last sleeping context waiting for tx buffers ? */ rpmsg_downref_sleepers() 645 /* disable "tx-complete" interrupts */ rpmsg_downref_sleepers() 721 /* enable "tx-complete" interrupts, if not already enabled */ rpmsg_send_offchannel_raw() 734 /* disable "tx-complete" interrupts if we're the last sleeper */ rpmsg_send_offchannel_raw() 739 dev_err(dev, "timeout waiting for a tx buffer\n"); rpmsg_send_offchannel_raw() 879 * Normally, though, we suppress this "tx complete" interrupt in order to 888 /* wake up potential senders that are waiting for a tx buffer */ rpmsg_xmit_done() 967 /* We expect two virtqueues, rx and tx (and in this order) */ rpmsg_probe() 975 /* we expect symmetric tx/rx vrings */ rpmsg_probe() 1017 /* suppress "tx-complete" interrupts */ rpmsg_probe()
|
/linux-4.1.27/drivers/net/wireless/brcm80211/brcmfmac/ |
H A D | p2p.h | 57 * @BRCMF_P2P_STATUS_ACTION_TX_COMPLETED: action frame tx completed. 58 * @BRCMF_P2P_STATUS_ACTION_TX_NOACK: action frame tx not acked. 62 * @BRCMF_P2P_STATUS_WAITING_NEXT_AF_LISTEN: extra listen time for af tx. 93 * @tx_dst_addr: mac address where tx af should be sent to. 120 * @send_af_done: indication that action frame tx is complete. 123 * @af_tx_sent_jiffies: jiffies time when af tx was transmitted. 126 * @block_gon_req_tx: drop tx go negotiation requets frame.
|
/linux-4.1.27/drivers/net/ethernet/amd/ |
H A D | au1000_eth.h | 31 #define NUM_TX_DMA 4 /* Au1x00 has 4 tx hardware descriptors */ 127 u32 vaddr; /* virtual address of rx/tx buffers */ 128 dma_addr_t dma_addr; /* dma address of rx/tx buffers */
|
H A D | hplance.h | 15 #define LE_JAB 0x02 /* loss of tx clock (???) */
|
/linux-4.1.27/drivers/staging/nvec/ |
H A D | nvec.c | 477 if (nvec->tx->pos != nvec->tx->size) { nvec_tx_completed() 479 nvec->tx->pos = 0; nvec_tx_completed() 541 * nvec_tx_set - Set the message to transfer (nvec->tx) 545 * tx member to it. If the tx_data list is empty, this uses the 552 dev_err(nvec->dev, "empty tx - sending no-op\n"); nvec_tx_set() 556 nvec->tx = &nvec->tx_scratch; nvec_tx_set() 557 list_add_tail(&nvec->tx->node, &nvec->tx_data); nvec_tx_set() 559 nvec->tx = list_first_entry(&nvec->tx_data, struct nvec_msg, nvec_tx_set() 561 nvec->tx->pos = 0; nvec_tx_set() 566 (uint)nvec->tx->size, nvec->tx->data[1]); nvec_tx_set() 641 BUG_ON(nvec->tx->size < 1); nvec_interrupt() 642 to_send = nvec->tx->data[0]; nvec_interrupt() 643 nvec->tx->pos = 1; nvec_interrupt() 658 } else if (nvec->tx && nvec->tx->pos < nvec->tx->size) { nvec_interrupt() 659 to_send = nvec->tx->data[nvec->tx->pos++]; nvec_interrupt() 661 dev_err(nvec->dev, "tx buffer underflow on %p (%u > %u)\n", nvec_interrupt() 662 nvec->tx, nvec_interrupt() 663 (uint) (nvec->tx ? nvec->tx->pos : 0), nvec_interrupt() 664 (uint) (nvec->tx ? nvec->tx->size : 0)); nvec_interrupt()
|
/linux-4.1.27/drivers/net/irda/ |
H A D | donauboe.c | 81 /* If you get rx/tx fifo overflows at high bitrates, you can try increasing */ 119 /* for each ring->tx[] the transmitter can either */ 283 printk (" (%d,%02x)",self->ring->tx[i].len,self->ring->tx[i].control); toshoboe_dumpregs() 490 self->ring->tx[i].len = 0; toshoboe_initring() 491 self->ring->tx[i].control = 0x00; toshoboe_initring() 492 self->ring->tx[i].address = virt_to_bus (self->tx_bufs[i]); toshoboe_initring() 654 PROBE_DEBUG(" (%d,%02x)",self->ring->tx[i].len,self->ring->tx[i].control); toshoboe_dumptx() 712 if (self->ring->tx[txp].control & OBOE_CTL_TX_HW_OWNS) toshoboe_probeinterrupt() 840 self->ring->tx[self->txs].control = toshoboe_probe() 845 self->ring->tx[self->txs].len = toshoboe_probe() 850 self->ring->tx[self->txs].control = toshoboe_probe() 853 self->ring->tx[self->txs].len = toshoboe_probe() 858 self->ring->tx[self->txs].control = toshoboe_probe() 861 self->ring->tx[self->txs].len = toshoboe_probe() 866 self->ring->tx[self->txs].control = toshoboe_probe() 870 self->ring->tx[self->txs].len = toshoboe_probe() 913 self->ring->tx[self->txs].control = toshoboe_probe() 915 self->ring->tx[self->txs].len = 4; toshoboe_probe() 972 pr_debug("%s.tx:%x(%x)%x\n", toshoboe_hard_xmit() 1047 self->ring->tx[self->txs].len = mtt & 0xfff; toshoboe_hard_xmit() 1060 self->ring->tx[self->txs].control = ctl; toshoboe_hard_xmit() 1085 if (self->ring->tx[self->txs].control & OBOE_CTL_TX_HW_OWNS) toshoboe_hard_xmit() 1088 __func__, skb->len, self->ring->tx[self->txs].control, toshoboe_hard_xmit() 1104 self->ring->tx[self->txs].len = len & 0x0fff; toshoboe_hard_xmit() 1115 self->ring->tx[self->txs].control = ctl; toshoboe_hard_xmit() 1163 if (self->ring->tx[i].control & OBOE_CTL_TX_HW_OWNS) toshoboe_interrupt() 1171 if (self->ring->tx[txp].control & OBOE_CTL_TX_HW_OWNS) toshoboe_interrupt() 1175 while (self->ring->tx[txpc].control & OBOE_CTL_TX_HW_OWNS) toshoboe_interrupt() 1181 if (self->ring->tx[txpc].control & OBOE_CTL_TX_HW_OWNS) toshoboe_interrupt() 1182 self->ring->tx[txp].control &= ~OBOE_CTL_TX_RTCENTX; toshoboe_interrupt() 1305 printk (KERN_WARNING DRIVER_NAME ": tx fifo underflow\n"); toshoboe_interrupt()
|
/linux-4.1.27/drivers/net/wireless/rtl818x/rtl8187/ |
H A D | leds.h | 50 /* If the LED is radio or tx/rx */
|
/linux-4.1.27/drivers/staging/rtl8723au/hal/ |
H A D | rtl8723a_sreset.c | 49 DBG_8723A("%s tx hang\n", __func__); rtl8723a_sreset_xmit_status_check()
|
/linux-4.1.27/include/linux/platform_data/ |
H A D | leds-lm355x.h | 53 * lm3556-tx pin
|
/linux-4.1.27/include/linux/spi/ |
H A D | spi_bitbang.h | 25 * already have one (transfer.{tx,rx}_dma is zero), or use PIO
|
/linux-4.1.27/include/linux/can/ |
H A D | led.h | 24 /* keep space for interface name + "-tx"/"-rx"/"-rxtx"
|
/linux-4.1.27/drivers/thunderbolt/ |
H A D | nhi_regs.h | 82 * three bitfields: tx, rx, rx overflow 91 * two bitfields: rx, tx
|
/linux-4.1.27/drivers/media/dvb-frontends/ |
H A D | dibx000_common.c | 256 static int dibx000_i2c_gate_ctrl(struct dibx000_i2c_master *mst, u8 tx[4], dibx000_i2c_gate_ctrl() argument 270 tx[0] = (((mst->base_reg + 1) >> 8) & 0xff); dibx000_i2c_gate_ctrl() 271 tx[1] = ((mst->base_reg + 1) & 0xff); dibx000_i2c_gate_ctrl() 272 tx[2] = val >> 8; dibx000_i2c_gate_ctrl() 273 tx[3] = val & 0xff; dibx000_i2c_gate_ctrl() 406 u8 tx[4]; dibx000_reset_i2c_master() local 407 struct i2c_msg m = {.addr = mst->i2c_addr,.buf = tx,.len = 4 }; dibx000_reset_i2c_master() 409 dibx000_i2c_gate_ctrl(mst, tx, 0, 0); dibx000_reset_i2c_master()
|
/linux-4.1.27/include/xen/interface/io/ |
H A D | netif.h | 48 * "event-channel-tx" and "event-channel-rx" respectively. If frontend 68 * number of tx and rx rings. 76 * event-channel (or event-channel-{tx,rx}) and {tx,rx}-ring-ref keys, 84 * /local/domain/1/device/vif/0/queue-0/tx-ring-ref = "<ring-ref-tx0>" 86 * /local/domain/1/device/vif/0/queue-0/event-channel-tx = "<evtchn-tx0>" 89 * /local/domain/1/device/vif/0/queue-1/tx-ring-ref = "<ring-ref-tx1>" 91 * /local/domain/1/device/vif/0/queue-1/event-channel-tx = "<evtchn-tx1>"
|
/linux-4.1.27/drivers/net/wireless/prism54/ |
H A D | islpci_dev.h | 127 * free/index_mgmt/data_rx/tx (called index, see below), 140 * For tx queues, [index, device_curr_frag) contains fragments 141 * where tx is done; they need to be freed (owned by driver). 144 * increments driver_curr_frag to indicate that more tx work 148 u32 index_mgmt_tx; /* read index mgmt tx queue */ 150 u32 free_data_tx; /* free pointer data tx queue */
|
/linux-4.1.27/net/nfc/nci/ |
H A D | spi.c | 186 struct spi_transfer tx, rx; __nci_spi_read() local 192 memset(&tx, 0, sizeof(struct spi_transfer)); __nci_spi_read() 195 tx.tx_buf = req; __nci_spi_read() 196 tx.len = 2; __nci_spi_read() 197 tx.cs_change = 0; __nci_spi_read() 198 spi_message_add_tail(&tx, &m); __nci_spi_read()
|
/linux-4.1.27/drivers/net/wireless/iwlwifi/mvm/ |
H A D | fw-api-tx.h | 69 * @TX_CMD_FLG_WRITE_TX_POWER: update current tx power value in the mgmt frame 346 * occur if tx failed for this frame when it was a member of a previous 404 * This status relates to reasons the tx might have been blocked or aborted 441 * @wireless_media_time: for non-agg: RTS + CTS + frame tx attempts time + ACK. 442 * for agg: RTS + CTS + aggregation tx time + block-ack time. 444 * @pa_status: tx power info 445 * @pa_integ_res_a: tx power info 446 * @pa_integ_res_b: tx power info 447 * @pa_integ_res_c: tx power info 448 * @measurement_req_id: tx power info 497 * @scd_flow: the tx queue this BA relates to 520 * @tx: the tx commands associated with the beacon frame 528 struct iwl_tx_cmd tx; member in struct:iwl_mac_beacon_cmd 543 * @beacon_notify_hdr: tx response command associated with the beacon
|
/linux-4.1.27/fs/ncpfs/ |
H A D | sock.c | 120 /* We do not need any locking: we first set tx.creq, and then we do sendmsg, ncp_tcp_write_space() 123 if (server->tx.creq) ncp_tcp_write_space() 124 schedule_work(&server->tx.tq); ncp_tcp_write_space() 150 while (!list_empty(&server->tx.requests)) { __abort_ncp_connection() 151 req = list_entry(server->tx.requests.next, struct ncp_request_reply, req); __abort_ncp_connection() 163 req = server->tx.creq; __abort_ncp_connection() 165 server->tx.creq = NULL; __abort_ncp_connection() 222 rq = server->tx.creq; __ncptcp_try_send() 241 server->tx.creq = NULL; __ncptcp_try_send() 314 server->tx.creq = req; ncptcp_start_request() 340 if (server->tx.creq || server->rcv.creq) { ncp_add_request() 342 list_add_tail(&req->req, &server->tx.requests); ncp_add_request() 356 if (list_empty(&server->tx.requests)) { __ncp_next_request() 359 req = list_entry(server->tx.requests.next, struct ncp_request_reply, req); __ncp_next_request() 693 container_of(work, struct ncp_server, tx.tq); ncp_tcp_tx_proc()
|
/linux-4.1.27/drivers/staging/rtl8192u/ |
H A D | r819xU_cmdpkt.c | 10 * 2. RX : Receive tx feedback, beacon state, query configuration 54 RT_TRACE(COMP_FIRMWARE, "=== NULL packet ======> tx full!\n"); SendTxCommandPacket() 102 must be collected in tx feedback info. */ cmpk_count_txstatistic() 175 /* 2. Use tx feedback info to count TX statistics. */ cmpk_handle_tx_feedback() 194 DMESG("send beacon frame tx rate is 6Mbpm\n"); cmdpkt_beacontimerinterrupt_819xusb() 197 DMESG("send beacon frame tx rate is 1Mbpm\n"); cmdpkt_beacontimerinterrupt_819xusb() 319 * Overview: Count aggregated tx status from firmwar of one type rx command 382 * Overview: Firmware add a new tx feedback status to reduce rx command 401 /* 2. Use tx feedback info to count TX statistics. */ cmpk_handle_tx_status() 410 * Overview: Firmware add a new tx rate history
|
/linux-4.1.27/drivers/gpu/drm/msm/hdmi/ |
H A D | hdmi.c | 334 { .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8084_config }, 335 { .compatible = "qcom,hdmi-tx-8074", .data = &hdmi_tx_8074_config }, 336 { .compatible = "qcom,hdmi-tx-8960", .data = &hdmi_tx_8960_config }, 337 { .compatible = "qcom,hdmi-tx-8660", .data = &hdmi_tx_8660_config }, 379 hdmi_cfg->ddc_clk_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-clk"); hdmi_bind() 380 hdmi_cfg->ddc_data_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-data"); hdmi_bind() 381 hdmi_cfg->hpd_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd"); hdmi_bind() 382 hdmi_cfg->mux_en_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-en"); hdmi_bind() 383 hdmi_cfg->mux_sel_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-sel"); hdmi_bind() 384 hdmi_cfg->mux_lpm_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-lpm"); hdmi_bind()
|
/linux-4.1.27/drivers/gpu/drm/ |
H A D | drm_mipi_dsi.c | 356 const u8 *tx = msg->tx_buf; mipi_dsi_create_packet() local 358 packet->header[1] = (msg->tx_len > 0) ? tx[0] : 0; mipi_dsi_create_packet() 359 packet->header[2] = (msg->tx_len > 1) ? tx[1] : 0; mipi_dsi_create_packet() 380 u8 tx[2] = { value & 0xff, value >> 8 }; mipi_dsi_set_maximum_return_packet_size() local 384 .tx_len = sizeof(tx), mipi_dsi_set_maximum_return_packet_size() 385 .tx_buf = tx, mipi_dsi_set_maximum_return_packet_size() 541 u8 *tx; mipi_dsi_dcs_write() local 546 tx = kmalloc(size, GFP_KERNEL); mipi_dsi_dcs_write() 547 if (!tx) mipi_dsi_dcs_write() 551 tx[0] = cmd; mipi_dsi_dcs_write() 552 memcpy(&tx[1], data, len); mipi_dsi_dcs_write() 554 tx = &cmd; mipi_dsi_dcs_write() 558 err = mipi_dsi_dcs_write_buffer(dsi, tx, size); mipi_dsi_dcs_write() 561 kfree(tx); mipi_dsi_dcs_write()
|
/linux-4.1.27/drivers/net/wireless/b43legacy/ |
H A D | dma.c | 162 ring->tx ? "TX" : "RX", update_max_used_slots() 178 B43legacy_WARN_ON(!ring->tx); request_slot() 263 int tx) map_descbuffer() 267 if (tx) map_descbuffer() 283 int tx) unmap_descbuffer() 285 if (tx) unmap_descbuffer() 300 B43legacy_WARN_ON(ring->tx); sync_descbuffer_for_cpu() 311 B43legacy_WARN_ON(ring->tx); sync_descbuffer_for_device() 463 B43legacy_WARN_ON(ring->tx); setup_rx_descbuffer() 548 if (ring->tx) { dmacontroller_setup() 584 if (ring->tx) { dmacontroller_cleanup() 606 B43legacy_WARN_ON(!ring->tx); free_all_descbuffers() 609 if (ring->tx) free_all_descbuffers() 713 ring->tx = true; b43legacy_setup_dmaring() 759 (ring->tx) ? "TX" : "RX", ring->max_used_slots, b43legacy_destroy_dmaring() 1140 B43legacy_WARN_ON(!ring->tx); b43legacy_dma_tx() 1169 b43legacyerr(dev->wl, "DMA tx mapping failure\n"); b43legacy_dma_tx() 1198 B43legacy_WARN_ON(!ring->tx); b43legacy_dma_handle_txstatus() 1409 B43legacy_WARN_ON(ring->tx); b43legacy_dma_rx() 1425 B43legacy_WARN_ON(!ring->tx); b43legacy_dma_tx_suspend_ring() 1431 B43legacy_WARN_ON(!ring->tx); b43legacy_dma_tx_resume_ring() 260 map_descbuffer(struct b43legacy_dmaring *ring, unsigned char *buf, size_t len, int tx) map_descbuffer() argument 280 unmap_descbuffer(struct b43legacy_dmaring *ring, dma_addr_t addr, size_t len, int tx) unmap_descbuffer() argument
|
/linux-4.1.27/drivers/media/usb/dvb-usb/ |
H A D | dib0700_core.c | 59 static int dib0700_ctrl_wr(struct dvb_usb_device *d, u8 *tx, u8 txlen) dib0700_ctrl_wr() argument 64 debug_dump(tx, txlen, deb_data); dib0700_ctrl_wr() 67 tx[0], USB_TYPE_VENDOR | USB_DIR_OUT, 0, 0, tx, txlen, dib0700_ctrl_wr() 76 /* expecting tx buffer: request data[0] ... data[n] (n <= 4) */ dib0700_ctrl_rd() 77 int dib0700_ctrl_rd(struct dvb_usb_device *d, u8 *tx, u8 txlen, u8 *rx, u8 rxlen) dib0700_ctrl_rd() argument 83 err("tx buffer length is smaller than 2. Makes no sense."); dib0700_ctrl_rd() 87 err("tx buffer length is larger than 4. Not supported."); dib0700_ctrl_rd() 92 debug_dump(tx,txlen,deb_data); dib0700_ctrl_rd() 94 value = ((txlen - 2) << 8) | tx[1]; dib0700_ctrl_rd() 97 index |= (tx[2] << 8); dib0700_ctrl_rd() 99 index |= tx[3]; dib0700_ctrl_rd() 101 status = usb_control_msg(d->udev, usb_rcvctrlpipe(d->udev,0), tx[0], dib0700_ctrl_rd()
|
/linux-4.1.27/drivers/net/fddi/skfp/ |
H A D | drvfbi.c | 200 * buffer underrun: can only occur if a tx threshold is specified mac1_irq() 202 if (stl & (FM_STBURS | /* tx buffer underrun syn.q.*/ mac1_irq() 203 FM_STBURA0 | /* tx buffer underrun a.q.0 */ mac1_irq() 204 FM_STBURA1)) { /* tx buffer underrun a.q.2 */ mac1_irq() 209 FM_STXABRS | /* syn. tx abort */ mac1_irq() 210 FM_STXABRA0)) || /* asyn. tx abort */ mac1_irq() 213 formac_tx_restart(smc) ; /* init tx */ mac1_irq() 222 if (stu & (FM_STEFRMA0 | /* end of asyn tx */ mac1_irq() 223 FM_STEFRMS)) { /* end of sync tx */ mac1_irq()
|
/linux-4.1.27/drivers/net/wireless/ath/ath5k/ |
H A D | dma.c | 126 * NOTE: Must be called after setting up tx control descriptor for that 310 "tx dma didn't stop (q:%i, frm:%i) !\n", ath5k_hw_stop_tx_dma() 346 * the queue number and use tx queue type since we only have 2 queues. 389 * the queue number and we use tx queue type since we only have 2 queues 437 * ath5k_hw_update_tx_triglevel() - Update tx trigger level 441 * This function increases/decreases the tx trigger level for the tx fifo 444 * frames more quickly but can lead to tx underruns, raising it a lot can 446 * (64Bytes) and if we get tx underrun we increase it using the increase 449 * XXX: Link this with tx DMA size ? 620 * with tx interrupt flags not being updated ath5k_hw_get_isr() 655 * the same way (schedule the tx tasklet) ath5k_hw_get_isr() 675 * tx trigger level) but we might need it later on*/ ath5k_hw_get_isr() 850 * (driver handles tx/rx buffer setup and 862 * a DMA size of 512 causes rx overruns and tx errors ath5k_hw_dma_init() 868 * TODO: Check out tx trigger level, it's always 64 on dumps but I ath5k_hw_dma_init() 888 * Stop tx/rx DMA and interrupts. Returns 889 * -EBUSY if tx or rx dma failed to stop. 892 * stuck frames on tx queues, only a reset 910 * and disable tx dma */ ath5k_hw_dma_stop()
|
/linux-4.1.27/drivers/net/fddi/skfp/h/ |
H A D | fplustm.h | 55 __le32 txd_tbadr ; /* physical tx buffer address */ 58 __le32 txd_tbadr_hi ; /* physical tx buffer addr (high dword)*/ 75 __le32 rxd_rbadr_hi ; /* physical tx buffer addr (high dword)*/ 100 HW_PTR tx_bmu_ctl ; /* BMU addr for tx start */ 189 struct s_smt_tx_queue *tx[USED_QUEUES] ; member in struct:s_smt_fp
|
/linux-4.1.27/drivers/staging/rtl8712/ |
H A D | rtl871x_mlme.h | 52 #define WIFI_MP_CTX_BACKGROUND 0x00020000 /* in cont. tx background*/ 53 #define WIFI_MP_CTX_ST 0x00040000 /* in cont. tx with 55 #define WIFI_MP_CTX_BACKGROUND_PENDING 0x00080000 /* pending in cont, tx 57 #define WIFI_MP_CTX_CCK_HW 0x00100000 /* in continuous tx*/ 58 #define WIFI_MP_CTX_CCK_CS 0x00200000 /* in cont, tx with carrier
|
/linux-4.1.27/net/atm/ |
H A D | mpoa_proc.c | 244 * add 130.230.54.142 tx=max_pcr,max_sdu rx=max_pcr,max_sdu parse_qos() 259 if (sscanf(buff, "add %hhu.%hhu.%hhu.%hhu tx=%d,%d rx=tx", parse_qos() 263 } else if (sscanf(buff, "add %hhu.%hhu.%hhu.%hhu tx=%d,%d rx=%d,%d", parse_qos() 275 dprintk("parse_qos(): setting qos parameters to tx=%d,%d rx=%d,%d\n", parse_qos()
|