Deleted Added
full compact
vt_buf.c (271952) vt_buf.c (271973)
1/*-
2 * Copyright (c) 2009, 2013 The FreeBSD Foundation
3 * All rights reserved.
4 *
5 * This software was developed by Ed Schouten under sponsorship from the
6 * FreeBSD Foundation.
7 *
8 * Portions of this software were developed by Oleksandr Rybalko

--- 17 unchanged lines hidden (view full) ---

26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
1/*-
2 * Copyright (c) 2009, 2013 The FreeBSD Foundation
3 * All rights reserved.
4 *
5 * This software was developed by Ed Schouten under sponsorship from the
6 * FreeBSD Foundation.
7 *
8 * Portions of this software were developed by Oleksandr Rybalko

--- 17 unchanged lines hidden (view full) ---

26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * SUCH DAMAGE.
31 */
32
33#include <sys/cdefs.h>
34__FBSDID("$FreeBSD: stable/10/sys/dev/vt/vt_buf.c 271952 2014-09-22 10:21:08Z ray $");
34__FBSDID("$FreeBSD: stable/10/sys/dev/vt/vt_buf.c 271973 2014-09-22 16:13:33Z dumbbell $");
35
36#include <sys/param.h>
37#include <sys/kernel.h>
38#include <sys/lock.h>
39#include <sys/malloc.h>
40#include <sys/mutex.h>
41#include <sys/reboot.h>
42#include <sys/systm.h>

--- 28 unchanged lines hidden (view full) ---

71 /* No scrolling if not enabled. */
72 if ((vb->vb_flags & VBF_SCROLL) == 0) {
73 if (vb->vb_roffset != vb->vb_curroffset) {
74 vb->vb_roffset = vb->vb_curroffset;
75 return (0xffff);
76 }
77 return (0); /* No changes */
78 }
35
36#include <sys/param.h>
37#include <sys/kernel.h>
38#include <sys/lock.h>
39#include <sys/malloc.h>
40#include <sys/mutex.h>
41#include <sys/reboot.h>
42#include <sys/systm.h>

--- 28 unchanged lines hidden (view full) ---

71 /* No scrolling if not enabled. */
72 if ((vb->vb_flags & VBF_SCROLL) == 0) {
73 if (vb->vb_roffset != vb->vb_curroffset) {
74 vb->vb_roffset = vb->vb_curroffset;
75 return (0xffff);
76 }
77 return (0); /* No changes */
78 }
79 top = (vb->vb_flags & VBF_HISTORY_FULL)?
80 (vb->vb_curroffset + vb->vb_scr_size.tp_row):vb->vb_history_size;
81 bottom = vb->vb_curroffset + vb->vb_history_size;
82
79
83 /*
84 * Operate on copy of offset value, since it temporary can be bigger
85 * than amount of rows in buffer.
86 */
87 roffset = vb->vb_roffset + vb->vb_history_size;
80 /* "top" may be a negative integer. */
81 bottom = vb->vb_curroffset;
82 top = (vb->vb_flags & VBF_HISTORY_FULL) ?
83 bottom + vb->vb_scr_size.tp_row - vb->vb_history_size :
84 0;
85
86 roffset = 0; /* Make gcc happy. */
88 switch (whence) {
89 case VHS_SET:
87 switch (whence) {
88 case VHS_SET:
90 roffset = offset + vb->vb_history_size;
89 if (offset < 0)
90 offset = 0;
91 roffset = top + offset;
91 break;
92 case VHS_CUR:
92 break;
93 case VHS_CUR:
94 /*
95 * Operate on copy of offset value, since it temporary
96 * can be bigger than amount of rows in buffer.
97 */
98 roffset = vb->vb_roffset;
99 if (roffset >= bottom + vb->vb_scr_size.tp_row)
100 roffset -= vb->vb_history_size;
101
93 roffset += offset;
102 roffset += offset;
103 roffset = MAX(roffset, top);
104 roffset = MIN(roffset, bottom);
105
106 if (roffset < 0)
107 roffset = vb->vb_history_size + roffset;
108
94 break;
95 case VHS_END:
96 /* Go to current offset. */
109 break;
110 case VHS_END:
111 /* Go to current offset. */
97 roffset = vb->vb_curroffset + vb->vb_history_size;
112 roffset = vb->vb_curroffset;
98 break;
99 }
100
113 break;
114 }
115
101 roffset = (roffset < top)?top:roffset;
102 roffset = (roffset > bottom)?bottom:roffset;
116 diff = vb->vb_roffset != roffset;
117 vb->vb_roffset = roffset;
103
118
104 roffset %= vb->vb_history_size;
105
106 if (vb->vb_roffset != roffset) {
107 diff = vb->vb_roffset - roffset;
108 vb->vb_roffset = roffset;
109 /*
110 * Offset changed, please update Nth lines on sceen.
111 * +N - Nth lines at top;
112 * -N - Nth lines at bottom.
113 */
114 return (diff);
115 }
116 return (0); /* No changes */
119 return (diff);
117}
118
119void
120vthistory_addlines(struct vt_buf *vb, int offset)
121{
122
123 vb->vb_curroffset += offset;
124 if (vb->vb_curroffset < 0)
125 vb->vb_curroffset = 0;
120}
121
122void
123vthistory_addlines(struct vt_buf *vb, int offset)
124{
125
126 vb->vb_curroffset += offset;
127 if (vb->vb_curroffset < 0)
128 vb->vb_curroffset = 0;
129 if (vb->vb_curroffset + vb->vb_scr_size.tp_row >= vb->vb_history_size)
130 vb->vb_flags |= VBF_HISTORY_FULL;
126 vb->vb_curroffset %= vb->vb_history_size;
127 if ((vb->vb_flags & VBF_SCROLL) == 0) {
128 vb->vb_roffset = vb->vb_curroffset;
129 }
130}
131
132void
133vthistory_getpos(const struct vt_buf *vb, unsigned int *offset)

--- 56 unchanged lines hidden (view full) ---

190
191 if ((POS_INDEX(sc, sr) <= POS_INDEX(col, row)) &&
192 (POS_INDEX(col, row) < POS_INDEX(ec, er)))
193 return (1);
194
195 return (0);
196}
197
131 vb->vb_curroffset %= vb->vb_history_size;
132 if ((vb->vb_flags & VBF_SCROLL) == 0) {
133 vb->vb_roffset = vb->vb_curroffset;
134 }
135}
136
137void
138vthistory_getpos(const struct vt_buf *vb, unsigned int *offset)

--- 56 unchanged lines hidden (view full) ---

195
196 if ((POS_INDEX(sc, sr) <= POS_INDEX(col, row)) &&
197 (POS_INDEX(col, row) < POS_INDEX(ec, er)))
198 return (1);
199
200 return (0);
201}
202
198static inline uint64_t
199vtbuf_dirty_axis(unsigned int begin, unsigned int end)
200{
201 uint64_t left, right, mask;
202
203 /*
204 * Mark all bits between begin % 64 and end % 64 dirty.
205 * This code is functionally equivalent to:
206 *
207 * for (i = begin; i < end; i++)
208 * mask |= (uint64_t)1 << (i % 64);
209 */
210
211 /* Obvious case. Mark everything dirty. */
212 if (end - begin >= 64)
213 return (VBM_DIRTY);
214
215 /* 1....0; used bits on the left. */
216 left = VBM_DIRTY << begin % 64;
217 /* 0....1; used bits on the right. */
218 right = VBM_DIRTY >> -end % 64;
219
220 /*
221 * Only take the intersection. If the result of that is 0, it
222 * means that the selection crossed a 64 bit boundary along the
223 * way, which means we have to take the complement.
224 */
225 mask = left & right;
226 if (mask == 0)
227 mask = left | right;
228 return (mask);
229}
230
231static inline void
232vtbuf_dirty_locked(struct vt_buf *vb, const term_rect_t *area)
233{
234
235 if (vb->vb_dirtyrect.tr_begin.tp_row > area->tr_begin.tp_row)
236 vb->vb_dirtyrect.tr_begin.tp_row = area->tr_begin.tp_row;
237 if (vb->vb_dirtyrect.tr_begin.tp_col > area->tr_begin.tp_col)
238 vb->vb_dirtyrect.tr_begin.tp_col = area->tr_begin.tp_col;
239 if (vb->vb_dirtyrect.tr_end.tp_row < area->tr_end.tp_row)
240 vb->vb_dirtyrect.tr_end.tp_row = area->tr_end.tp_row;
241 if (vb->vb_dirtyrect.tr_end.tp_col < area->tr_end.tp_col)
242 vb->vb_dirtyrect.tr_end.tp_col = area->tr_end.tp_col;
203static inline void
204vtbuf_dirty_locked(struct vt_buf *vb, const term_rect_t *area)
205{
206
207 if (vb->vb_dirtyrect.tr_begin.tp_row > area->tr_begin.tp_row)
208 vb->vb_dirtyrect.tr_begin.tp_row = area->tr_begin.tp_row;
209 if (vb->vb_dirtyrect.tr_begin.tp_col > area->tr_begin.tp_col)
210 vb->vb_dirtyrect.tr_begin.tp_col = area->tr_begin.tp_col;
211 if (vb->vb_dirtyrect.tr_end.tp_row < area->tr_end.tp_row)
212 vb->vb_dirtyrect.tr_end.tp_row = area->tr_end.tp_row;
213 if (vb->vb_dirtyrect.tr_end.tp_col < area->tr_end.tp_col)
214 vb->vb_dirtyrect.tr_end.tp_col = area->tr_end.tp_col;
243 vb->vb_dirtymask.vbm_row |=
244 vtbuf_dirty_axis(area->tr_begin.tp_row, area->tr_end.tp_row);
245 vb->vb_dirtymask.vbm_col |=
246 vtbuf_dirty_axis(area->tr_begin.tp_col, area->tr_end.tp_col);
247}
248
249void
250vtbuf_dirty(struct vt_buf *vb, const term_rect_t *area)
251{
252
253 VTBUF_LOCK(vb);
254 vtbuf_dirty_locked(vb, area);

--- 12 unchanged lines hidden (view full) ---

267}
268
269static void
270vtbuf_make_undirty(struct vt_buf *vb)
271{
272
273 vb->vb_dirtyrect.tr_begin = vb->vb_scr_size;
274 vb->vb_dirtyrect.tr_end.tp_row = vb->vb_dirtyrect.tr_end.tp_col = 0;
215}
216
217void
218vtbuf_dirty(struct vt_buf *vb, const term_rect_t *area)
219{
220
221 VTBUF_LOCK(vb);
222 vtbuf_dirty_locked(vb, area);

--- 12 unchanged lines hidden (view full) ---

235}
236
237static void
238vtbuf_make_undirty(struct vt_buf *vb)
239{
240
241 vb->vb_dirtyrect.tr_begin = vb->vb_scr_size;
242 vb->vb_dirtyrect.tr_end.tp_row = vb->vb_dirtyrect.tr_end.tp_col = 0;
275 vb->vb_dirtymask.vbm_row = vb->vb_dirtymask.vbm_col = 0;
276}
277
278void
243}
244
245void
279vtbuf_undirty(struct vt_buf *vb, term_rect_t *r, struct vt_bufmask *m)
246vtbuf_undirty(struct vt_buf *vb, term_rect_t *r)
280{
281
282 VTBUF_LOCK(vb);
283 *r = vb->vb_dirtyrect;
247{
248
249 VTBUF_LOCK(vb);
250 *r = vb->vb_dirtyrect;
284 *m = vb->vb_dirtymask;
285 vtbuf_make_undirty(vb);
286 VTBUF_UNLOCK(vb);
287}
288
289void
290vtbuf_copy(struct vt_buf *vb, const term_rect_t *r, const term_pos_t *p2)
291{
292 const term_pos_t *p1 = &r->tr_begin;

--- 155 unchanged lines hidden (view full) ---

448 p.tp_row = vb->vb_scr_size.tp_row;
449 p.tp_col = vb->vb_scr_size.tp_col;
450 vtbuf_grow(vb, &p, size);
451}
452
453void
454vtbuf_grow(struct vt_buf *vb, const term_pos_t *p, unsigned int history_size)
455{
251 vtbuf_make_undirty(vb);
252 VTBUF_UNLOCK(vb);
253}
254
255void
256vtbuf_copy(struct vt_buf *vb, const term_rect_t *r, const term_pos_t *p2)
257{
258 const term_pos_t *p1 = &r->tr_begin;

--- 155 unchanged lines hidden (view full) ---

414 p.tp_row = vb->vb_scr_size.tp_row;
415 p.tp_col = vb->vb_scr_size.tp_col;
416 vtbuf_grow(vb, &p, size);
417}
418
419void
420vtbuf_grow(struct vt_buf *vb, const term_pos_t *p, unsigned int history_size)
421{
456 term_char_t *old, *new, **rows, **oldrows, **copyrows, *row;
457 int bufsize, rowssize, w, h, c, r;
422 term_char_t *old, *new, **rows, **oldrows, **copyrows, *row, *oldrow;
423 int bufsize, rowssize, w, h, c, r, history_was_full;
424 unsigned int old_history_size;
458 term_rect_t rect;
459
460 history_size = MAX(history_size, p->tp_row);
461
425 term_rect_t rect;
426
427 history_size = MAX(history_size, p->tp_row);
428
462 /* If new screen/history size bigger or buffer is VBF_STATIC. */
463 if ((history_size > vb->vb_history_size) || (p->tp_col >
464 vb->vb_scr_size.tp_col) || (vb->vb_flags & VBF_STATIC)) {
465 /* Allocate new buffer. */
466 bufsize = history_size * p->tp_col * sizeof(term_char_t);
467 new = malloc(bufsize, M_VTBUF, M_WAITOK | M_ZERO);
468 rowssize = history_size * sizeof(term_pos_t *);
469 rows = malloc(rowssize, M_VTBUF, M_WAITOK | M_ZERO);
429 /* Allocate new buffer. */
430 bufsize = history_size * p->tp_col * sizeof(term_char_t);
431 new = malloc(bufsize, M_VTBUF, M_WAITOK | M_ZERO);
432 rowssize = history_size * sizeof(term_pos_t *);
433 rows = malloc(rowssize, M_VTBUF, M_WAITOK | M_ZERO);
470
434
471 /* Toggle it. */
472 VTBUF_LOCK(vb);
473 old = vb->vb_flags & VBF_STATIC ? NULL : vb->vb_buffer;
474 oldrows = vb->vb_flags & VBF_STATIC ? NULL : vb->vb_rows;
475 copyrows = vb->vb_rows;
476 w = vb->vb_scr_size.tp_col;
477 h = vb->vb_history_size;
435 /* Toggle it. */
436 VTBUF_LOCK(vb);
437 old = vb->vb_flags & VBF_STATIC ? NULL : vb->vb_buffer;
438 oldrows = vb->vb_flags & VBF_STATIC ? NULL : vb->vb_rows;
439 copyrows = vb->vb_rows;
478
440
479 vb->vb_history_size = history_size;
480 vb->vb_buffer = new;
481 vb->vb_rows = rows;
482 vb->vb_flags &= ~VBF_STATIC;
483 vb->vb_scr_size = *p;
484 vtbuf_init_rows(vb);
441 w = vb->vb_scr_size.tp_col;
442 h = vb->vb_scr_size.tp_row;
443 old_history_size = vb->vb_history_size;
444 history_was_full = vb->vb_flags & VBF_HISTORY_FULL;
485
445
486 /* Copy history and fill extra space. */
487 for (r = 0; r < history_size; r ++) {
446 vb->vb_history_size = history_size;
447 vb->vb_buffer = new;
448 vb->vb_rows = rows;
449 vb->vb_flags &= ~VBF_STATIC;
450 vb->vb_scr_size = *p;
451 vtbuf_init_rows(vb);
452
453 /* Copy history and fill extra space if needed. */
454 if (history_size > old_history_size) {
455 /*
456 * Copy rows to the new buffer. The first row in the history
457 * is back to index 0, ie. the new buffer doesn't cycle.
458 *
459 * The rest of the new buffer is initialized with blank
460 * content.
461 */
462 for (r = 0; r < old_history_size; r ++) {
463 row = rows[r];
464
465 /* Compute the corresponding row in the old buffer. */
466 if (history_was_full)
467 /*
468 * The buffer is full, the "top" row is
469 * the one just after the viewable area
470 * (curroffset + viewable height) in the
471 * cycling buffer. The corresponding row
472 * is computed from this top row.
473 */
474 oldrow = copyrows[
475 (vb->vb_curroffset + h + r) %
476 old_history_size];
477 else
478 /*
479 * The buffer is not full, therefore,
480 * we didn't cycle already. The
481 * corresponding rows are the same in
482 * both buffers.
483 */
484 oldrow = copyrows[r];
485
486 memmove(row, oldrow,
487 MIN(p->tp_col, w) * sizeof(term_char_t));
488
488 /*
489 * XXX VTBUF_SPACE_CHAR(TERMINAL_NORM_ATTR) will
490 * extended lines of kernel text using the wrong
491 * background color.
492 */
489 /*
490 * XXX VTBUF_SPACE_CHAR(TERMINAL_NORM_ATTR) will
491 * extended lines of kernel text using the wrong
492 * background color.
493 */
493 row = rows[r];
494 if (r < h) { /* Copy. */
495 memmove(rows[r], copyrows[r],
496 MIN(p->tp_col, w) * sizeof(term_char_t));
497 for (c = MIN(p->tp_col, w); c < p->tp_col;
498 c++) {
499 row[c] = VTBUF_SPACE_CHAR(
500 TERMINAL_NORM_ATTR);
501 }
502 } else { /* Just fill. */
503 rect.tr_begin.tp_col = 0;
504 rect.tr_begin.tp_row = r;
505 rect.tr_end.tp_col = p->tp_col;
506 rect.tr_end.tp_row = p->tp_row;
507 vtbuf_fill(vb, &rect,
508 VTBUF_SPACE_CHAR(TERMINAL_NORM_ATTR));
509 break;
494 for (c = MIN(p->tp_col, w); c < p->tp_col; c++) {
495 row[c] = VTBUF_SPACE_CHAR(TERMINAL_NORM_ATTR);
510 }
511 }
496 }
497 }
512 vtbuf_make_undirty(vb);
513 VTBUF_UNLOCK(vb);
514 /* Deallocate old buffer. */
515 free(old, M_VTBUF);
516 free(oldrows, M_VTBUF);
498
499 /* Fill remaining rows. */
500 rect.tr_begin.tp_col = 0;
501 rect.tr_begin.tp_row = old_history_size;
502 rect.tr_end.tp_col = p->tp_col;
503 rect.tr_end.tp_row = p->tp_row;
504 vtbuf_fill(vb, &rect, VTBUF_SPACE_CHAR(TERMINAL_NORM_ATTR));
505
506 vb->vb_flags &= ~VBF_HISTORY_FULL;
517 } else {
507 } else {
518 /* Just update the size. */
519 vb->vb_scr_size = *p;
508 /*
509 * Copy rows to the new buffer. The first row in the history
510 * is back to index 0, ie. the new buffer doesn't cycle.
511 *
512 * (old_history_size - history_size) lines of history are
513 * dropped.
514 */
515 for (r = 0; r < history_size; r ++) {
516 row = rows[r];
517
518 /*
519 * Compute the corresponding row in the old buffer.
520 *
521 * See the equivalent if{} block above for an
522 * explanation.
523 */
524 if (history_was_full)
525 oldrow = copyrows[
526 (vb->vb_curroffset + h + r +
527 (old_history_size - history_size)) %
528 old_history_size];
529 else
530 oldrow = copyrows[
531 (r + (old_history_size - history_size)) %
532 old_history_size];
533
534 memmove(row, oldrow,
535 MIN(p->tp_col, w) * sizeof(term_char_t));
536
537 /*
538 * XXX VTBUF_SPACE_CHAR(TERMINAL_NORM_ATTR) will
539 * extended lines of kernel text using the wrong
540 * background color.
541 */
542 for (c = MIN(p->tp_col, w); c < p->tp_col; c++) {
543 row[c] = VTBUF_SPACE_CHAR(TERMINAL_NORM_ATTR);
544 }
545 }
546
547 if (!history_was_full &&
548 (vb->vb_curroffset + h) >= history_size)
549 vb->vb_flags |= VBF_HISTORY_FULL;
520 }
550 }
551
552 /*
553 * If the screen is already filled (there are non-visible lines
554 * above the current viewable area), adjust curroffset to the
555 * new viewable area.
556 */
557 if (!history_was_full && vb->vb_curroffset > 0) {
558 vb->vb_curroffset = vb->vb_curroffset + h - p->tp_row;
559 if (vb->vb_curroffset < 0)
560 vb->vb_curroffset += vb->vb_history_size;
561 vb->vb_curroffset %= vb->vb_history_size;
562 vb->vb_roffset = vb->vb_curroffset;
563 }
564
565 vtbuf_make_undirty(vb);
566 VTBUF_UNLOCK(vb);
567
568 /* Deallocate old buffer. */
569 free(old, M_VTBUF);
570 free(oldrows, M_VTBUF);
521}
522
523void
524vtbuf_putchar(struct vt_buf *vb, const term_pos_t *p, term_char_t c)
525{
526 term_char_t *row;
527
528 KASSERT(p->tp_row < vb->vb_scr_size.tp_row,

--- 218 unchanged lines hidden ---
571}
572
573void
574vtbuf_putchar(struct vt_buf *vb, const term_pos_t *p, term_char_t c)
575{
576 term_char_t *row;
577
578 KASSERT(p->tp_row < vb->vb_scr_size.tp_row,

--- 218 unchanged lines hidden ---