1/* 2 * Tty buffer allocation management 3 */ 4 5#include <linux/types.h> 6#include <linux/errno.h> 7#include <linux/tty.h> 8#include <linux/tty_driver.h> 9#include <linux/tty_flip.h> 10#include <linux/timer.h> 11#include <linux/string.h> 12#include <linux/slab.h> 13#include <linux/sched.h> 14#include <linux/wait.h> 15#include <linux/bitops.h> 16#include <linux/delay.h> 17#include <linux/module.h> 18#include <linux/ratelimit.h> 19 20 21#define MIN_TTYB_SIZE 256 22#define TTYB_ALIGN_MASK 255 23 24/* 25 * Byte threshold to limit memory consumption for flip buffers. 26 * The actual memory limit is > 2x this amount. 27 */ 28#define TTYB_DEFAULT_MEM_LIMIT 65536 29 30/* 31 * We default to dicing tty buffer allocations to this many characters 32 * in order to avoid multiple page allocations. We know the size of 33 * tty_buffer itself but it must also be taken into account that the 34 * the buffer is 256 byte aligned. See tty_buffer_find for the allocation 35 * logic this must match 36 */ 37 38#define TTY_BUFFER_PAGE (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF) 39 40/** 41 * tty_buffer_lock_exclusive - gain exclusive access to buffer 42 * tty_buffer_unlock_exclusive - release exclusive access 43 * 44 * @port - tty_port owning the flip buffer 45 * 46 * Guarantees safe use of the line discipline's receive_buf() method by 47 * excluding the buffer work and any pending flush from using the flip 48 * buffer. Data can continue to be added concurrently to the flip buffer 49 * from the driver side. 50 * 51 * On release, the buffer work is restarted if there is data in the 52 * flip buffer 53 */ 54 55void tty_buffer_lock_exclusive(struct tty_port *port) 56{ 57 struct tty_bufhead *buf = &port->buf; 58 59 atomic_inc(&buf->priority); 60 mutex_lock(&buf->lock); 61} 62EXPORT_SYMBOL_GPL(tty_buffer_lock_exclusive); 63 64void tty_buffer_unlock_exclusive(struct tty_port *port) 65{ 66 struct tty_bufhead *buf = &port->buf; 67 int restart; 68 69 restart = buf->head->commit != buf->head->read; 70 71 atomic_dec(&buf->priority); 72 mutex_unlock(&buf->lock); 73 if (restart) 74 queue_work(system_unbound_wq, &buf->work); 75} 76EXPORT_SYMBOL_GPL(tty_buffer_unlock_exclusive); 77 78/** 79 * tty_buffer_space_avail - return unused buffer space 80 * @port - tty_port owning the flip buffer 81 * 82 * Returns the # of bytes which can be written by the driver without 83 * reaching the buffer limit. 84 * 85 * Note: this does not guarantee that memory is available to write 86 * the returned # of bytes (use tty_prepare_flip_string_xxx() to 87 * pre-allocate if memory guarantee is required). 88 */ 89 90int tty_buffer_space_avail(struct tty_port *port) 91{ 92 int space = port->buf.mem_limit - atomic_read(&port->buf.mem_used); 93 return max(space, 0); 94} 95EXPORT_SYMBOL_GPL(tty_buffer_space_avail); 96 97static void tty_buffer_reset(struct tty_buffer *p, size_t size) 98{ 99 p->used = 0; 100 p->size = size; 101 p->next = NULL; 102 p->commit = 0; 103 p->read = 0; 104 p->flags = 0; 105} 106 107/** 108 * tty_buffer_free_all - free buffers used by a tty 109 * @tty: tty to free from 110 * 111 * Remove all the buffers pending on a tty whether queued with data 112 * or in the free ring. Must be called when the tty is no longer in use 113 */ 114 115void tty_buffer_free_all(struct tty_port *port) 116{ 117 struct tty_bufhead *buf = &port->buf; 118 struct tty_buffer *p, *next; 119 struct llist_node *llist; 120 121 while ((p = buf->head) != NULL) { 122 buf->head = p->next; 123 if (p->size > 0) 124 kfree(p); 125 } 126 llist = llist_del_all(&buf->free); 127 llist_for_each_entry_safe(p, next, llist, free) 128 kfree(p); 129 130 tty_buffer_reset(&buf->sentinel, 0); 131 buf->head = &buf->sentinel; 132 buf->tail = &buf->sentinel; 133 134 atomic_set(&buf->mem_used, 0); 135} 136 137/** 138 * tty_buffer_alloc - allocate a tty buffer 139 * @tty: tty device 140 * @size: desired size (characters) 141 * 142 * Allocate a new tty buffer to hold the desired number of characters. 143 * We round our buffers off in 256 character chunks to get better 144 * allocation behaviour. 145 * Return NULL if out of memory or the allocation would exceed the 146 * per device queue 147 */ 148 149static struct tty_buffer *tty_buffer_alloc(struct tty_port *port, size_t size) 150{ 151 struct llist_node *free; 152 struct tty_buffer *p; 153 154 /* Round the buffer size out */ 155 size = __ALIGN_MASK(size, TTYB_ALIGN_MASK); 156 157 if (size <= MIN_TTYB_SIZE) { 158 free = llist_del_first(&port->buf.free); 159 if (free) { 160 p = llist_entry(free, struct tty_buffer, free); 161 goto found; 162 } 163 } 164 165 /* Should possibly check if this fails for the largest buffer we 166 have queued and recycle that ? */ 167 if (atomic_read(&port->buf.mem_used) > port->buf.mem_limit) 168 return NULL; 169 p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC); 170 if (p == NULL) 171 return NULL; 172 173found: 174 tty_buffer_reset(p, size); 175 atomic_add(size, &port->buf.mem_used); 176 return p; 177} 178 179/** 180 * tty_buffer_free - free a tty buffer 181 * @tty: tty owning the buffer 182 * @b: the buffer to free 183 * 184 * Free a tty buffer, or add it to the free list according to our 185 * internal strategy 186 */ 187 188static void tty_buffer_free(struct tty_port *port, struct tty_buffer *b) 189{ 190 struct tty_bufhead *buf = &port->buf; 191 192 /* Dumb strategy for now - should keep some stats */ 193 WARN_ON(atomic_sub_return(b->size, &buf->mem_used) < 0); 194 195 if (b->size > MIN_TTYB_SIZE) 196 kfree(b); 197 else if (b->size > 0) 198 llist_add(&b->free, &buf->free); 199} 200 201/** 202 * tty_buffer_flush - flush full tty buffers 203 * @tty: tty to flush 204 * @ld: optional ldisc ptr (must be referenced) 205 * 206 * flush all the buffers containing receive data. If ld != NULL, 207 * flush the ldisc input buffer. 208 * 209 * Locking: takes buffer lock to ensure single-threaded flip buffer 210 * 'consumer' 211 */ 212 213void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld) 214{ 215 struct tty_port *port = tty->port; 216 struct tty_bufhead *buf = &port->buf; 217 struct tty_buffer *next; 218 219 atomic_inc(&buf->priority); 220 221 mutex_lock(&buf->lock); 222 while ((next = buf->head->next) != NULL) { 223 tty_buffer_free(port, buf->head); 224 buf->head = next; 225 } 226 buf->head->read = buf->head->commit; 227 228 if (ld && ld->ops->flush_buffer) 229 ld->ops->flush_buffer(tty); 230 231 atomic_dec(&buf->priority); 232 mutex_unlock(&buf->lock); 233} 234 235/** 236 * tty_buffer_request_room - grow tty buffer if needed 237 * @tty: tty structure 238 * @size: size desired 239 * @flags: buffer flags if new buffer allocated (default = 0) 240 * 241 * Make at least size bytes of linear space available for the tty 242 * buffer. If we fail return the size we managed to find. 243 * 244 * Will change over to a new buffer if the current buffer is encoded as 245 * TTY_NORMAL (so has no flags buffer) and the new buffer requires 246 * a flags buffer. 247 */ 248static int __tty_buffer_request_room(struct tty_port *port, size_t size, 249 int flags) 250{ 251 struct tty_bufhead *buf = &port->buf; 252 struct tty_buffer *b, *n; 253 int left, change; 254 255 b = buf->tail; 256 if (b->flags & TTYB_NORMAL) 257 left = 2 * b->size - b->used; 258 else 259 left = b->size - b->used; 260 261 change = (b->flags & TTYB_NORMAL) && (~flags & TTYB_NORMAL); 262 if (change || left < size) { 263 /* This is the slow path - looking for new buffers to use */ 264 if ((n = tty_buffer_alloc(port, size)) != NULL) { 265 n->flags = flags; 266 buf->tail = n; 267 b->commit = b->used; 268 /* paired w/ barrier in flush_to_ldisc(); ensures the 269 * latest commit value can be read before the head is 270 * advanced to the next buffer 271 */ 272 smp_wmb(); 273 b->next = n; 274 } else if (change) 275 size = 0; 276 else 277 size = left; 278 } 279 return size; 280} 281 282int tty_buffer_request_room(struct tty_port *port, size_t size) 283{ 284 return __tty_buffer_request_room(port, size, 0); 285} 286EXPORT_SYMBOL_GPL(tty_buffer_request_room); 287 288/** 289 * tty_insert_flip_string_fixed_flag - Add characters to the tty buffer 290 * @port: tty port 291 * @chars: characters 292 * @flag: flag value for each character 293 * @size: size 294 * 295 * Queue a series of bytes to the tty buffering. All the characters 296 * passed are marked with the supplied flag. Returns the number added. 297 */ 298 299int tty_insert_flip_string_fixed_flag(struct tty_port *port, 300 const unsigned char *chars, char flag, size_t size) 301{ 302 int copied = 0; 303 do { 304 int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE); 305 int flags = (flag == TTY_NORMAL) ? TTYB_NORMAL : 0; 306 int space = __tty_buffer_request_room(port, goal, flags); 307 struct tty_buffer *tb = port->buf.tail; 308 if (unlikely(space == 0)) 309 break; 310 memcpy(char_buf_ptr(tb, tb->used), chars, space); 311 if (~tb->flags & TTYB_NORMAL) 312 memset(flag_buf_ptr(tb, tb->used), flag, space); 313 tb->used += space; 314 copied += space; 315 chars += space; 316 /* There is a small chance that we need to split the data over 317 several buffers. If this is the case we must loop */ 318 } while (unlikely(size > copied)); 319 return copied; 320} 321EXPORT_SYMBOL(tty_insert_flip_string_fixed_flag); 322 323/** 324 * tty_insert_flip_string_flags - Add characters to the tty buffer 325 * @port: tty port 326 * @chars: characters 327 * @flags: flag bytes 328 * @size: size 329 * 330 * Queue a series of bytes to the tty buffering. For each character 331 * the flags array indicates the status of the character. Returns the 332 * number added. 333 */ 334 335int tty_insert_flip_string_flags(struct tty_port *port, 336 const unsigned char *chars, const char *flags, size_t size) 337{ 338 int copied = 0; 339 do { 340 int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE); 341 int space = tty_buffer_request_room(port, goal); 342 struct tty_buffer *tb = port->buf.tail; 343 if (unlikely(space == 0)) 344 break; 345 memcpy(char_buf_ptr(tb, tb->used), chars, space); 346 memcpy(flag_buf_ptr(tb, tb->used), flags, space); 347 tb->used += space; 348 copied += space; 349 chars += space; 350 flags += space; 351 /* There is a small chance that we need to split the data over 352 several buffers. If this is the case we must loop */ 353 } while (unlikely(size > copied)); 354 return copied; 355} 356EXPORT_SYMBOL(tty_insert_flip_string_flags); 357 358/** 359 * tty_schedule_flip - push characters to ldisc 360 * @port: tty port to push from 361 * 362 * Takes any pending buffers and transfers their ownership to the 363 * ldisc side of the queue. It then schedules those characters for 364 * processing by the line discipline. 365 */ 366 367void tty_schedule_flip(struct tty_port *port) 368{ 369 struct tty_bufhead *buf = &port->buf; 370 371 buf->tail->commit = buf->tail->used; 372 schedule_work(&buf->work); 373} 374EXPORT_SYMBOL(tty_schedule_flip); 375 376/** 377 * tty_prepare_flip_string - make room for characters 378 * @port: tty port 379 * @chars: return pointer for character write area 380 * @size: desired size 381 * 382 * Prepare a block of space in the buffer for data. Returns the length 383 * available and buffer pointer to the space which is now allocated and 384 * accounted for as ready for normal characters. This is used for drivers 385 * that need their own block copy routines into the buffer. There is no 386 * guarantee the buffer is a DMA target! 387 */ 388 389int tty_prepare_flip_string(struct tty_port *port, unsigned char **chars, 390 size_t size) 391{ 392 int space = __tty_buffer_request_room(port, size, TTYB_NORMAL); 393 if (likely(space)) { 394 struct tty_buffer *tb = port->buf.tail; 395 *chars = char_buf_ptr(tb, tb->used); 396 if (~tb->flags & TTYB_NORMAL) 397 memset(flag_buf_ptr(tb, tb->used), TTY_NORMAL, space); 398 tb->used += space; 399 } 400 return space; 401} 402EXPORT_SYMBOL_GPL(tty_prepare_flip_string); 403 404 405static int 406receive_buf(struct tty_struct *tty, struct tty_buffer *head, int count) 407{ 408 struct tty_ldisc *disc = tty->ldisc; 409 unsigned char *p = char_buf_ptr(head, head->read); 410 char *f = NULL; 411 412 if (~head->flags & TTYB_NORMAL) 413 f = flag_buf_ptr(head, head->read); 414 415 if (disc->ops->receive_buf2) 416 count = disc->ops->receive_buf2(tty, p, f, count); 417 else { 418 count = min_t(int, count, tty->receive_room); 419 if (count) 420 disc->ops->receive_buf(tty, p, f, count); 421 } 422 head->read += count; 423 return count; 424} 425 426/** 427 * flush_to_ldisc 428 * @work: tty structure passed from work queue. 429 * 430 * This routine is called out of the software interrupt to flush data 431 * from the buffer chain to the line discipline. 432 * 433 * The receive_buf method is single threaded for each tty instance. 434 * 435 * Locking: takes buffer lock to ensure single-threaded flip buffer 436 * 'consumer' 437 */ 438 439static void flush_to_ldisc(struct work_struct *work) 440{ 441 struct tty_port *port = container_of(work, struct tty_port, buf.work); 442 struct tty_bufhead *buf = &port->buf; 443 struct tty_struct *tty; 444 struct tty_ldisc *disc; 445 446 tty = port->itty; 447 if (tty == NULL) 448 return; 449 450 disc = tty_ldisc_ref(tty); 451 if (disc == NULL) 452 return; 453 454 mutex_lock(&buf->lock); 455 456 while (1) { 457 struct tty_buffer *head = buf->head; 458 struct tty_buffer *next; 459 int count; 460 461 /* Ldisc or user is trying to gain exclusive access */ 462 if (atomic_read(&buf->priority)) 463 break; 464 465 next = head->next; 466 /* paired w/ barrier in __tty_buffer_request_room(); 467 * ensures commit value read is not stale if the head 468 * is advancing to the next buffer 469 */ 470 smp_rmb(); 471 count = head->commit - head->read; 472 if (!count) { 473 if (next == NULL) 474 break; 475 buf->head = next; 476 tty_buffer_free(port, head); 477 continue; 478 } 479 480 count = receive_buf(tty, head, count); 481 if (!count) 482 break; 483 } 484 485 mutex_unlock(&buf->lock); 486 487 tty_ldisc_deref(disc); 488} 489 490/** 491 * tty_flip_buffer_push - terminal 492 * @port: tty port to push 493 * 494 * Queue a push of the terminal flip buffers to the line discipline. 495 * Can be called from IRQ/atomic context. 496 * 497 * In the event of the queue being busy for flipping the work will be 498 * held off and retried later. 499 */ 500 501void tty_flip_buffer_push(struct tty_port *port) 502{ 503 tty_schedule_flip(port); 504} 505EXPORT_SYMBOL(tty_flip_buffer_push); 506 507/** 508 * tty_buffer_init - prepare a tty buffer structure 509 * @tty: tty to initialise 510 * 511 * Set up the initial state of the buffer management for a tty device. 512 * Must be called before the other tty buffer functions are used. 513 */ 514 515void tty_buffer_init(struct tty_port *port) 516{ 517 struct tty_bufhead *buf = &port->buf; 518 519 mutex_init(&buf->lock); 520 tty_buffer_reset(&buf->sentinel, 0); 521 buf->head = &buf->sentinel; 522 buf->tail = &buf->sentinel; 523 init_llist_head(&buf->free); 524 atomic_set(&buf->mem_used, 0); 525 atomic_set(&buf->priority, 0); 526 INIT_WORK(&buf->work, flush_to_ldisc); 527 buf->mem_limit = TTYB_DEFAULT_MEM_LIMIT; 528} 529 530/** 531 * tty_buffer_set_limit - change the tty buffer memory limit 532 * @port: tty port to change 533 * 534 * Change the tty buffer memory limit. 535 * Must be called before the other tty buffer functions are used. 536 */ 537 538int tty_buffer_set_limit(struct tty_port *port, int limit) 539{ 540 if (limit < MIN_TTYB_SIZE) 541 return -EINVAL; 542 port->buf.mem_limit = limit; 543 return 0; 544} 545EXPORT_SYMBOL_GPL(tty_buffer_set_limit); 546 547/* slave ptys can claim nested buffer lock when handling BRK and INTR */ 548void tty_buffer_set_lock_subclass(struct tty_port *port) 549{ 550 lockdep_set_subclass(&port->buf.lock, TTY_LOCK_SLAVE); 551} 552 553void tty_buffer_flush_work(struct tty_port *port) 554{ 555 flush_work(&port->buf.work); 556} 557