vt_buf.c revision 256970
1/*- 2 * Copyright (c) 2009 The FreeBSD Foundation 3 * All rights reserved. 4 * 5 * This software was developed by Ed Schouten under sponsorship from the 6 * FreeBSD Foundation. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30#include <sys/cdefs.h> 31__FBSDID("$FreeBSD: user/ed/newcons/sys/dev/vt/vt_buf.c 256970 2013-10-23 14:15:46Z ray $"); 32 33#include <sys/param.h> 34#include <sys/kernel.h> 35#include <sys/lock.h> 36#include <sys/malloc.h> 37#include <sys/mutex.h> 38#include <sys/systm.h> 39 40#include <dev/vt/vt.h> 41 42static MALLOC_DEFINE(M_VTBUF, "vtbuf", "vt buffer"); 43 44#define VTBUF_LOCK(vb) mtx_lock_spin(&(vb)->vb_lock) 45#define VTBUF_UNLOCK(vb) mtx_unlock_spin(&(vb)->vb_lock) 46/* 47 * line4 48 * line5 <--- curroffset (terminal output to that line) 49 * line0 50 * line1 <--- roffset (history display from that point) 51 * line2 52 * line3 53 */ 54int 55vthistory_seek(struct vt_buf *vb, int offset, int whence) 56{ 57 int top, bottom, roffset; 58 59 /* No scrolling if not enabled. */ 60 if ((vb->vb_flags & VBF_SCROLL) == 0) { 61 if (vb->vb_roffset != vb->vb_curroffset) { 62 vb->vb_roffset = vb->vb_curroffset; 63 return (1); 64 } 65 return (0); /* No changes */ 66 } 67 top = (vb->vb_flags & VBF_HISTORY_FULL)? 68 (vb->vb_curroffset + vb->vb_scr_size.tp_row):vb->vb_history_size; 69 bottom = vb->vb_curroffset + vb->vb_history_size; 70 71 /* 72 * Operate on copy of offset value, since it temporary can be bigger 73 * than amount of rows in buffer. 74 */ 75 roffset = vb->vb_roffset + vb->vb_history_size; 76 switch (whence) { 77 case VHS_SET: 78 roffset = offset; 79 break; 80 case VHS_CUR: 81 roffset += offset; 82 break; 83 case VHS_END: 84 /* Go to current offset. */ 85 roffset = vb->vb_curroffset; 86 break; 87 } 88 89 roffset = (roffset < top)?top:roffset; 90 roffset = (roffset > bottom)?bottom:roffset; 91 92 roffset %= vb->vb_history_size; 93 94 if (vb->vb_roffset != roffset) { 95 vb->vb_roffset = roffset; 96 return (1); /* Offset changed, please update sceen. */ 97 } 98 return (0); /* No changes */ 99} 100 101void 102vthistory_addlines(struct vt_buf *vb, int offset) 103{ 104 105 vb->vb_curroffset += offset; 106 if (vb->vb_curroffset < 0) 107 vb->vb_curroffset = 0; 108 vb->vb_curroffset %= vb->vb_history_size; 109 if ((vb->vb_flags & VBF_SCROLL) == 0) { 110 vb->vb_roffset = vb->vb_curroffset; 111 } 112} 113 114void 115vthistory_getpos(const struct vt_buf *vb, unsigned int *offset) 116{ 117 118 *offset = vb->vb_roffset; 119} 120 121static inline uint64_t 122vtbuf_dirty_axis(unsigned int begin, unsigned int end) 123{ 124 uint64_t left, right, mask; 125 126 /* 127 * Mark all bits between begin % 64 and end % 64 dirty. 128 * This code is functionally equivalent to: 129 * 130 * for (i = begin; i < end; i++) 131 * mask |= (uint64_t)1 << (i % 64); 132 */ 133 134 /* Obvious case. Mark everything dirty. */ 135 if (end - begin >= 64) 136 return (VBM_DIRTY); 137 138 /* 1....0; used bits on the left. */ 139 left = VBM_DIRTY << begin % 64; 140 /* 0....1; used bits on the right. */ 141 right = VBM_DIRTY >> -end % 64; 142 143 /* 144 * Only take the intersection. If the result of that is 0, it 145 * means that the selection crossed a 64 bit boundary along the 146 * way, which means we have to take the complement. 147 */ 148 mask = left & right; 149 if (mask == 0) 150 mask = left | right; 151 return (mask); 152} 153 154static inline void 155vtbuf_dirty(struct vt_buf *vb, const term_rect_t *area) 156{ 157 158 VTBUF_LOCK(vb); 159 if (vb->vb_dirtyrect.tr_begin.tp_row > area->tr_begin.tp_row) 160 vb->vb_dirtyrect.tr_begin.tp_row = area->tr_begin.tp_row; 161 if (vb->vb_dirtyrect.tr_begin.tp_col > area->tr_begin.tp_col) 162 vb->vb_dirtyrect.tr_begin.tp_col = area->tr_begin.tp_col; 163 if (vb->vb_dirtyrect.tr_end.tp_row < area->tr_end.tp_row) 164 vb->vb_dirtyrect.tr_end.tp_row = area->tr_end.tp_row; 165 if (vb->vb_dirtyrect.tr_end.tp_col < area->tr_end.tp_col) 166 vb->vb_dirtyrect.tr_end.tp_col = area->tr_end.tp_col; 167 vb->vb_dirtymask.vbm_row |= 168 vtbuf_dirty_axis(area->tr_begin.tp_row, area->tr_end.tp_row); 169 vb->vb_dirtymask.vbm_col |= 170 vtbuf_dirty_axis(area->tr_begin.tp_col, area->tr_end.tp_col); 171 VTBUF_UNLOCK(vb); 172} 173 174static inline void 175vtbuf_dirty_cell(struct vt_buf *vb, const term_pos_t *p) 176{ 177 term_rect_t area; 178 179 area.tr_begin = *p; 180 area.tr_end.tp_row = p->tp_row + 1; 181 area.tr_end.tp_col = p->tp_col + 1; 182 vtbuf_dirty(vb, &area); 183} 184 185static void 186vtbuf_make_undirty(struct vt_buf *vb) 187{ 188 189 vb->vb_dirtyrect.tr_begin = vb->vb_scr_size; 190 vb->vb_dirtyrect.tr_end.tp_row = vb->vb_dirtyrect.tr_end.tp_col = 0; 191 vb->vb_dirtymask.vbm_row = vb->vb_dirtymask.vbm_col = 0; 192} 193 194void 195vtbuf_undirty(struct vt_buf *vb, term_rect_t *r, struct vt_bufmask *m) 196{ 197 198 VTBUF_LOCK(vb); 199 *r = vb->vb_dirtyrect; 200 *m = vb->vb_dirtymask; 201 vtbuf_make_undirty(vb); 202 VTBUF_UNLOCK(vb); 203} 204 205void 206vtbuf_copy(struct vt_buf *vb, const term_rect_t *r, const term_pos_t *p2) 207{ 208 const term_pos_t *p1 = &r->tr_begin; 209 term_rect_t area; 210 unsigned int rows, cols; 211 int pr, rdiff; 212 213 KASSERT(r->tr_begin.tp_row < vb->vb_scr_size.tp_row, 214 ("vtbuf_copy begin.tp_row %d must be less than screen width %d", 215 r->tr_begin.tp_row, vb->vb_scr_size.tp_row)); 216 KASSERT(r->tr_begin.tp_col < vb->vb_scr_size.tp_col, 217 ("vtbuf_copy begin.tp_col %d must be less than screen height %d", 218 r->tr_begin.tp_col, vb->vb_scr_size.tp_col)); 219 220 KASSERT(r->tr_end.tp_row <= vb->vb_scr_size.tp_row, 221 ("vtbuf_copy end.tp_row %d must be less than screen width %d", 222 r->tr_end.tp_row, vb->vb_scr_size.tp_row)); 223 KASSERT(r->tr_end.tp_col <= vb->vb_scr_size.tp_col, 224 ("vtbuf_copy end.tp_col %d must be less than screen height %d", 225 r->tr_end.tp_col, vb->vb_scr_size.tp_col)); 226 227 KASSERT(p2->tp_row < vb->vb_scr_size.tp_row, 228 ("vtbuf_copy tp_row %d must be less than screen width %d", 229 p2->tp_row, vb->vb_scr_size.tp_row)); 230 KASSERT(p2->tp_col < vb->vb_scr_size.tp_col, 231 ("vtbuf_copy tp_col %d must be less than screen height %d", 232 p2->tp_col, vb->vb_scr_size.tp_col)); 233 234 rows = r->tr_end.tp_row - r->tr_begin.tp_row; 235 rdiff = r->tr_begin.tp_row - p2->tp_row; 236 cols = r->tr_end.tp_col - r->tr_begin.tp_col; 237 if (r->tr_begin.tp_row > p2->tp_row && r->tr_begin.tp_col == 0 && 238 r->tr_end.tp_col == vb->vb_scr_size.tp_col && /* Full row. */ 239 (rows + rdiff) == vb->vb_scr_size.tp_row && /* Whole screen. */ 240 rdiff > 0) { /* Only forward dirrection. Do not eat history. */ 241 vthistory_addlines(vb, rdiff); 242 } else if (p2->tp_row < p1->tp_row) { 243 /* Handle overlapping copies of line segments. */ 244 /* Move data up. */ 245 for (pr = 0; pr < rows; pr++) 246 memmove( 247 &VTBUF_FIELD(vb, p2->tp_row + pr, p2->tp_col), 248 &VTBUF_FIELD(vb, p1->tp_row + pr, p1->tp_col), 249 cols * sizeof(term_char_t)); 250 } else { 251 /* Move data down. */ 252 for (pr = rows - 1; pr >= 0; pr--) 253 memmove( 254 &VTBUF_FIELD(vb, p2->tp_row + pr, p2->tp_col), 255 &VTBUF_FIELD(vb, p1->tp_row + pr, p1->tp_col), 256 cols * sizeof(term_char_t)); 257 } 258 259 area.tr_begin = *p2; 260 area.tr_end.tp_row = MIN(p2->tp_row + rows, vb->vb_scr_size.tp_row); 261 area.tr_end.tp_col = MIN(p2->tp_col + cols, vb->vb_scr_size.tp_col); 262 vtbuf_dirty(vb, &area); 263} 264 265static void 266vtbuf_fill(struct vt_buf *vb, const term_rect_t *r, term_char_t c) 267{ 268 unsigned int pr, pc; 269 term_char_t *row; 270 271 for (pr = r->tr_begin.tp_row; pr < r->tr_end.tp_row; pr++) { 272 row = vb->vb_rows[(vb->vb_curroffset + pr) % 273 VTBUF_MAX_HEIGHT(vb)]; 274 for (pc = r->tr_begin.tp_col; pc < r->tr_end.tp_col; pc++) { 275 row[pc] = c; 276 } 277 } 278} 279 280void 281vtbuf_fill_locked(struct vt_buf *vb, const term_rect_t *r, term_char_t c) 282{ 283 KASSERT(r->tr_begin.tp_row < vb->vb_scr_size.tp_row, 284 ("vtbuf_fill_locked begin.tp_row %d must be < screen width %d", 285 r->tr_begin.tp_row, vb->vb_scr_size.tp_row)); 286 KASSERT(r->tr_begin.tp_col < vb->vb_scr_size.tp_col, 287 ("vtbuf_fill_locked begin.tp_col %d must be < screen height %d", 288 r->tr_begin.tp_col, vb->vb_scr_size.tp_col)); 289 290 KASSERT(r->tr_end.tp_row <= vb->vb_scr_size.tp_row, 291 ("vtbuf_fill_locked end.tp_row %d must be <= screen width %d", 292 r->tr_end.tp_row, vb->vb_scr_size.tp_row)); 293 KASSERT(r->tr_end.tp_col <= vb->vb_scr_size.tp_col, 294 ("vtbuf_fill_locked end.tp_col %d must be <= screen height %d", 295 r->tr_end.tp_col, vb->vb_scr_size.tp_col)); 296 297 VTBUF_LOCK(vb); 298 vtbuf_fill(vb, r, c); 299 VTBUF_UNLOCK(vb); 300 301 vtbuf_dirty(vb, r); 302} 303 304static void 305vtbuf_init_rows(struct vt_buf *vb) 306{ 307 int r; 308 309 vb->vb_history_size = MAX(vb->vb_history_size, vb->vb_scr_size.tp_row); 310 311 for (r = 0; r < vb->vb_history_size; r++) 312 vb->vb_rows[r] = &vb->vb_buffer[r * 313 vb->vb_scr_size.tp_col]; 314} 315 316void 317vtbuf_init_early(struct vt_buf *vb) 318{ 319 320 vb->vb_flags |= VBF_CURSOR; 321 vb->vb_roffset = 0; 322 vb->vb_curroffset = 0; 323 324 vtbuf_init_rows(vb); 325 vtbuf_make_undirty(vb); 326 if ((vb->vb_flags & VBF_MTX_INIT) == 0) { 327 mtx_init(&vb->vb_lock, "vtbuf", NULL, MTX_SPIN); 328 vb->vb_flags |= VBF_MTX_INIT; 329 } 330} 331 332void 333vtbuf_init(struct vt_buf *vb, const term_pos_t *p) 334{ 335 int sz; 336 337 vb->vb_scr_size = *p; 338 vb->vb_history_size = VBF_DEFAULT_HISTORY_SIZE; 339 340 if ((vb->vb_flags & VBF_STATIC) == 0) { 341 sz = vb->vb_history_size * p->tp_col * sizeof(term_char_t); 342 vb->vb_buffer = malloc(sz, M_VTBUF, M_WAITOK | M_ZERO); 343 344 sz = vb->vb_history_size * sizeof(term_char_t *); 345 vb->vb_rows = malloc(sz, M_VTBUF, M_WAITOK | M_ZERO); 346 } 347 348 vtbuf_init_early(vb); 349} 350 351void 352vtbuf_sethistory_size(struct vt_buf *vb, int size) 353{ 354 term_pos_t p; 355 356 /* With same size */ 357 p.tp_row = vb->vb_scr_size.tp_row; 358 p.tp_col = vb->vb_scr_size.tp_col; 359 vtbuf_grow(vb, &p, size); 360} 361 362void 363vtbuf_grow(struct vt_buf *vb, const term_pos_t *p, int history_size) 364{ 365 term_char_t *old, *new, **rows, **oldrows, **copyrows, *row; 366 int bufsize, rowssize, w, h, c, r; 367 term_rect_t rect; 368 369 history_size = MAX(history_size, p->tp_row); 370 371 if (history_size > vb->vb_history_size || p->tp_col > 372 vb->vb_scr_size.tp_col) { 373 /* Allocate new buffer. */ 374 bufsize = history_size * p->tp_col * sizeof(term_char_t); 375 new = malloc(bufsize, M_VTBUF, M_WAITOK | M_ZERO); 376 rowssize = history_size * sizeof(term_pos_t *); 377 rows = malloc(rowssize, M_VTBUF, M_WAITOK | M_ZERO); 378 379 /* Toggle it. */ 380 VTBUF_LOCK(vb); 381 old = vb->vb_flags & VBF_STATIC ? NULL : vb->vb_buffer; 382 oldrows = vb->vb_flags & VBF_STATIC ? NULL : vb->vb_rows; 383 copyrows = vb->vb_rows; 384 w = vb->vb_scr_size.tp_col; 385 h = vb->vb_history_size; 386 387 vb->vb_history_size = history_size; 388 vb->vb_buffer = new; 389 vb->vb_rows = rows; 390 vb->vb_flags &= ~VBF_STATIC; 391 vb->vb_scr_size = *p; 392 vtbuf_init_rows(vb); 393 394 /* Copy history and fill extra space. */ 395 for (r = 0; r < history_size; r ++) { 396 row = rows[r]; 397 if (r < h) { /* Copy. */ 398 memmove(rows[r], copyrows[r], 399 MIN(p->tp_col, w) * sizeof(term_char_t)); 400 for (c = MIN(p->tp_col, w); c < p->tp_col; 401 c++) { 402 row[c] = VTBUF_SPACE_CHAR; 403 } 404 } else { /* Just fill. */ 405 rect.tr_begin.tp_col = 0; 406 rect.tr_begin.tp_row = r; 407 rect.tr_end.tp_col = p->tp_col; 408 rect.tr_end.tp_row = p->tp_row; 409 vtbuf_fill(vb, &rect, VTBUF_SPACE_CHAR); 410 break; 411 } 412 } 413 vtbuf_make_undirty(vb); 414 VTBUF_UNLOCK(vb); 415 /* Deallocate old buffer. */ 416 free(old, M_VTBUF); 417 free(oldrows, M_VTBUF); 418 } 419} 420 421void 422vtbuf_putchar(struct vt_buf *vb, const term_pos_t *p, term_char_t c) 423{ 424 term_char_t *row; 425 426 KASSERT(p->tp_row < vb->vb_scr_size.tp_row, 427 ("vtbuf_putchar tp_row %d must be less than screen width %d", 428 p->tp_row, vb->vb_scr_size.tp_row)); 429 KASSERT(p->tp_col < vb->vb_scr_size.tp_col, 430 ("vtbuf_putchar tp_col %d must be less than screen height %d", 431 p->tp_col, vb->vb_scr_size.tp_col)); 432 433 row = vb->vb_rows[(vb->vb_curroffset + p->tp_row) % 434 VTBUF_MAX_HEIGHT(vb)]; 435 if (row[p->tp_col] != c) { 436 VTBUF_LOCK(vb); 437 row[p->tp_col] = c; 438 VTBUF_UNLOCK(vb); 439 vtbuf_dirty_cell(vb, p); 440 } 441} 442 443void 444vtbuf_cursor_position(struct vt_buf *vb, const term_pos_t *p) 445{ 446 447 if (vb->vb_flags & VBF_CURSOR) { 448 vtbuf_dirty_cell(vb, &vb->vb_cursor); 449 vb->vb_cursor = *p; 450 vtbuf_dirty_cell(vb, &vb->vb_cursor); 451 } else { 452 vb->vb_cursor = *p; 453 } 454} 455 456void 457vtbuf_cursor_visibility(struct vt_buf *vb, int yes) 458{ 459 int oflags, nflags; 460 461 VTBUF_LOCK(vb); 462 oflags = vb->vb_flags; 463 if (yes) 464 vb->vb_flags |= VBF_CURSOR; 465 else 466 vb->vb_flags &= ~VBF_CURSOR; 467 nflags = vb->vb_flags; 468 VTBUF_UNLOCK(vb); 469 470 if (oflags != nflags) 471 vtbuf_dirty_cell(vb, &vb->vb_cursor); 472} 473