1// SPDX-License-Identifier: MIT
2/*
3 * Copyright 2020 Noralf Tr��nnes
4 */
5
6#include <linux/lz4.h>
7#include <linux/usb.h>
8#include <linux/vmalloc.h>
9#include <linux/workqueue.h>
10
11#include <drm/drm_atomic.h>
12#include <drm/drm_connector.h>
13#include <drm/drm_damage_helper.h>
14#include <drm/drm_drv.h>
15#include <drm/drm_format_helper.h>
16#include <drm/drm_fourcc.h>
17#include <drm/drm_framebuffer.h>
18#include <drm/drm_gem.h>
19#include <drm/drm_gem_atomic_helper.h>
20#include <drm/drm_gem_framebuffer_helper.h>
21#include <drm/drm_print.h>
22#include <drm/drm_rect.h>
23#include <drm/drm_simple_kms_helper.h>
24#include <drm/gud.h>
25
26#include "gud_internal.h"
27
28/*
29 * Some userspace rendering loops run all displays in the same loop.
30 * This means that a fast display will have to wait for a slow one.
31 * Such users might want to enable this module parameter.
32 */
33static bool gud_async_flush;
34module_param_named(async_flush, gud_async_flush, bool, 0644);
35MODULE_PARM_DESC(async_flush, "Enable asynchronous flushing [default=0]");
36
37/*
38 * FIXME: The driver is probably broken on Big Endian machines.
39 * See discussion:
40 * https://lore.kernel.org/dri-devel/CAKb7UvihLX0hgBOP3VBG7O+atwZcUVCPVuBdfmDMpg0NjXe-cQ@mail.gmail.com/
41 */
42
43static bool gud_is_big_endian(void)
44{
45#if defined(__BIG_ENDIAN)
46	return true;
47#else
48	return false;
49#endif
50}
51
52static size_t gud_xrgb8888_to_r124(u8 *dst, const struct drm_format_info *format,
53				   void *src, struct drm_framebuffer *fb,
54				   struct drm_rect *rect,
55				   struct drm_format_conv_state *fmtcnv_state)
56{
57	unsigned int block_width = drm_format_info_block_width(format, 0);
58	unsigned int bits_per_pixel = 8 / block_width;
59	unsigned int x, y, width, height;
60	u8 pix, *pix8, *block = dst; /* Assign to silence compiler warning */
61	struct iosys_map dst_map, vmap;
62	size_t len;
63	void *buf;
64
65	WARN_ON_ONCE(format->char_per_block[0] != 1);
66
67	/* Start on a byte boundary */
68	rect->x1 = ALIGN_DOWN(rect->x1, block_width);
69	width = drm_rect_width(rect);
70	height = drm_rect_height(rect);
71	len = drm_format_info_min_pitch(format, 0, width) * height;
72
73	buf = kmalloc(width * height, GFP_KERNEL);
74	if (!buf)
75		return 0;
76
77	iosys_map_set_vaddr(&dst_map, buf);
78	iosys_map_set_vaddr(&vmap, src);
79	drm_fb_xrgb8888_to_gray8(&dst_map, NULL, &vmap, fb, rect, fmtcnv_state);
80	pix8 = buf;
81
82	for (y = 0; y < height; y++) {
83		for (x = 0; x < width; x++) {
84			unsigned int pixpos = x % block_width; /* within byte from the left */
85			unsigned int pixshift = (block_width - pixpos - 1) * bits_per_pixel;
86
87			if (!pixpos) {
88				block = dst++;
89				*block = 0;
90			}
91
92			pix = (*pix8++) >> (8 - bits_per_pixel);
93			*block |= pix << pixshift;
94		}
95	}
96
97	kfree(buf);
98
99	return len;
100}
101
102static size_t gud_xrgb8888_to_color(u8 *dst, const struct drm_format_info *format,
103				    void *src, struct drm_framebuffer *fb,
104				    struct drm_rect *rect)
105{
106	unsigned int block_width = drm_format_info_block_width(format, 0);
107	unsigned int bits_per_pixel = 8 / block_width;
108	u8 r, g, b, pix, *block = dst; /* Assign to silence compiler warning */
109	unsigned int x, y, width;
110	__le32 *sbuf32;
111	u32 pix32;
112	size_t len;
113
114	/* Start on a byte boundary */
115	rect->x1 = ALIGN_DOWN(rect->x1, block_width);
116	width = drm_rect_width(rect);
117	len = drm_format_info_min_pitch(format, 0, width) * drm_rect_height(rect);
118
119	for (y = rect->y1; y < rect->y2; y++) {
120		sbuf32 = src + (y * fb->pitches[0]);
121		sbuf32 += rect->x1;
122
123		for (x = 0; x < width; x++) {
124			unsigned int pixpos = x % block_width; /* within byte from the left */
125			unsigned int pixshift = (block_width - pixpos - 1) * bits_per_pixel;
126
127			if (!pixpos) {
128				block = dst++;
129				*block = 0;
130			}
131
132			pix32 = le32_to_cpu(*sbuf32++);
133			r = pix32 >> 16;
134			g = pix32 >> 8;
135			b = pix32;
136
137			switch (format->format) {
138			case GUD_DRM_FORMAT_XRGB1111:
139				pix = ((r >> 7) << 2) | ((g >> 7) << 1) | (b >> 7);
140				break;
141			default:
142				WARN_ON_ONCE(1);
143				return len;
144			}
145
146			*block |= pix << pixshift;
147		}
148	}
149
150	return len;
151}
152
153static int gud_prep_flush(struct gud_device *gdrm, struct drm_framebuffer *fb,
154			  const struct iosys_map *src, bool cached_reads,
155			  const struct drm_format_info *format, struct drm_rect *rect,
156			  struct gud_set_buffer_req *req,
157			  struct drm_format_conv_state *fmtcnv_state)
158{
159	u8 compression = gdrm->compression;
160	struct iosys_map dst;
161	void *vaddr, *buf;
162	size_t pitch, len;
163
164	pitch = drm_format_info_min_pitch(format, 0, drm_rect_width(rect));
165	len = pitch * drm_rect_height(rect);
166	if (len > gdrm->bulk_len)
167		return -E2BIG;
168
169	vaddr = src[0].vaddr;
170retry:
171	if (compression)
172		buf = gdrm->compress_buf;
173	else
174		buf = gdrm->bulk_buf;
175	iosys_map_set_vaddr(&dst, buf);
176
177	/*
178	 * Imported buffers are assumed to be write-combined and thus uncached
179	 * with slow reads (at least on ARM).
180	 */
181	if (format != fb->format) {
182		if (format->format == GUD_DRM_FORMAT_R1) {
183			len = gud_xrgb8888_to_r124(buf, format, vaddr, fb, rect, fmtcnv_state);
184			if (!len)
185				return -ENOMEM;
186		} else if (format->format == DRM_FORMAT_R8) {
187			drm_fb_xrgb8888_to_gray8(&dst, NULL, src, fb, rect, fmtcnv_state);
188		} else if (format->format == DRM_FORMAT_RGB332) {
189			drm_fb_xrgb8888_to_rgb332(&dst, NULL, src, fb, rect, fmtcnv_state);
190		} else if (format->format == DRM_FORMAT_RGB565) {
191			drm_fb_xrgb8888_to_rgb565(&dst, NULL, src, fb, rect, fmtcnv_state,
192						  gud_is_big_endian());
193		} else if (format->format == DRM_FORMAT_RGB888) {
194			drm_fb_xrgb8888_to_rgb888(&dst, NULL, src, fb, rect, fmtcnv_state);
195		} else {
196			len = gud_xrgb8888_to_color(buf, format, vaddr, fb, rect);
197		}
198	} else if (gud_is_big_endian() && format->cpp[0] > 1) {
199		drm_fb_swab(&dst, NULL, src, fb, rect, cached_reads, fmtcnv_state);
200	} else if (compression && cached_reads && pitch == fb->pitches[0]) {
201		/* can compress directly from the framebuffer */
202		buf = vaddr + rect->y1 * pitch;
203	} else {
204		drm_fb_memcpy(&dst, NULL, src, fb, rect);
205	}
206
207	memset(req, 0, sizeof(*req));
208	req->x = cpu_to_le32(rect->x1);
209	req->y = cpu_to_le32(rect->y1);
210	req->width = cpu_to_le32(drm_rect_width(rect));
211	req->height = cpu_to_le32(drm_rect_height(rect));
212	req->length = cpu_to_le32(len);
213
214	if (compression & GUD_COMPRESSION_LZ4) {
215		int complen;
216
217		complen = LZ4_compress_default(buf, gdrm->bulk_buf, len, len, gdrm->lz4_comp_mem);
218		if (complen <= 0) {
219			compression = 0;
220			goto retry;
221		}
222
223		req->compression = GUD_COMPRESSION_LZ4;
224		req->compressed_length = cpu_to_le32(complen);
225	}
226
227	return 0;
228}
229
230struct gud_usb_bulk_context {
231	struct timer_list timer;
232	struct usb_sg_request sgr;
233};
234
235static void gud_usb_bulk_timeout(struct timer_list *t)
236{
237	struct gud_usb_bulk_context *ctx = from_timer(ctx, t, timer);
238
239	usb_sg_cancel(&ctx->sgr);
240}
241
242static int gud_usb_bulk(struct gud_device *gdrm, size_t len)
243{
244	struct gud_usb_bulk_context ctx;
245	int ret;
246
247	ret = usb_sg_init(&ctx.sgr, gud_to_usb_device(gdrm), gdrm->bulk_pipe, 0,
248			  gdrm->bulk_sgt.sgl, gdrm->bulk_sgt.nents, len, GFP_KERNEL);
249	if (ret)
250		return ret;
251
252	timer_setup_on_stack(&ctx.timer, gud_usb_bulk_timeout, 0);
253	mod_timer(&ctx.timer, jiffies + msecs_to_jiffies(3000));
254
255	usb_sg_wait(&ctx.sgr);
256
257	if (!del_timer_sync(&ctx.timer))
258		ret = -ETIMEDOUT;
259	else if (ctx.sgr.status < 0)
260		ret = ctx.sgr.status;
261	else if (ctx.sgr.bytes != len)
262		ret = -EIO;
263
264	destroy_timer_on_stack(&ctx.timer);
265
266	return ret;
267}
268
269static int gud_flush_rect(struct gud_device *gdrm, struct drm_framebuffer *fb,
270			  const struct iosys_map *src, bool cached_reads,
271			  const struct drm_format_info *format, struct drm_rect *rect,
272			  struct drm_format_conv_state *fmtcnv_state)
273{
274	struct gud_set_buffer_req req;
275	size_t len, trlen;
276	int ret;
277
278	drm_dbg(&gdrm->drm, "Flushing [FB:%d] " DRM_RECT_FMT "\n", fb->base.id, DRM_RECT_ARG(rect));
279
280	ret = gud_prep_flush(gdrm, fb, src, cached_reads, format, rect, &req, fmtcnv_state);
281	if (ret)
282		return ret;
283
284	len = le32_to_cpu(req.length);
285
286	if (req.compression)
287		trlen = le32_to_cpu(req.compressed_length);
288	else
289		trlen = len;
290
291	gdrm->stats_length += len;
292	/* Did it wrap around? */
293	if (gdrm->stats_length <= len && gdrm->stats_actual_length) {
294		gdrm->stats_length = len;
295		gdrm->stats_actual_length = 0;
296	}
297	gdrm->stats_actual_length += trlen;
298
299	if (!(gdrm->flags & GUD_DISPLAY_FLAG_FULL_UPDATE) || gdrm->prev_flush_failed) {
300		ret = gud_usb_set(gdrm, GUD_REQ_SET_BUFFER, 0, &req, sizeof(req));
301		if (ret)
302			return ret;
303	}
304
305	ret = gud_usb_bulk(gdrm, trlen);
306	if (ret)
307		gdrm->stats_num_errors++;
308
309	return ret;
310}
311
312void gud_clear_damage(struct gud_device *gdrm)
313{
314	gdrm->damage.x1 = INT_MAX;
315	gdrm->damage.y1 = INT_MAX;
316	gdrm->damage.x2 = 0;
317	gdrm->damage.y2 = 0;
318}
319
320static void gud_flush_damage(struct gud_device *gdrm, struct drm_framebuffer *fb,
321			     const struct iosys_map *src, bool cached_reads,
322			     struct drm_rect *damage)
323{
324	struct drm_format_conv_state fmtcnv_state = DRM_FORMAT_CONV_STATE_INIT;
325	const struct drm_format_info *format;
326	unsigned int i, lines;
327	size_t pitch;
328	int ret;
329
330	format = fb->format;
331	if (format->format == DRM_FORMAT_XRGB8888 && gdrm->xrgb8888_emulation_format)
332		format = gdrm->xrgb8888_emulation_format;
333
334	/* Split update if it's too big */
335	pitch = drm_format_info_min_pitch(format, 0, drm_rect_width(damage));
336	lines = drm_rect_height(damage);
337
338	if (gdrm->bulk_len < lines * pitch)
339		lines = gdrm->bulk_len / pitch;
340
341	for (i = 0; i < DIV_ROUND_UP(drm_rect_height(damage), lines); i++) {
342		struct drm_rect rect = *damage;
343
344		rect.y1 += i * lines;
345		rect.y2 = min_t(u32, rect.y1 + lines, damage->y2);
346
347		ret = gud_flush_rect(gdrm, fb, src, cached_reads, format, &rect, &fmtcnv_state);
348		if (ret) {
349			if (ret != -ENODEV && ret != -ECONNRESET &&
350			    ret != -ESHUTDOWN && ret != -EPROTO)
351				dev_err_ratelimited(fb->dev->dev,
352						    "Failed to flush framebuffer: error=%d\n", ret);
353			gdrm->prev_flush_failed = true;
354			break;
355		}
356	}
357
358	drm_format_conv_state_release(&fmtcnv_state);
359}
360
361void gud_flush_work(struct work_struct *work)
362{
363	struct gud_device *gdrm = container_of(work, struct gud_device, work);
364	struct iosys_map shadow_map;
365	struct drm_framebuffer *fb;
366	struct drm_rect damage;
367	int idx;
368
369	if (!drm_dev_enter(&gdrm->drm, &idx))
370		return;
371
372	mutex_lock(&gdrm->damage_lock);
373	fb = gdrm->fb;
374	gdrm->fb = NULL;
375	iosys_map_set_vaddr(&shadow_map, gdrm->shadow_buf);
376	damage = gdrm->damage;
377	gud_clear_damage(gdrm);
378	mutex_unlock(&gdrm->damage_lock);
379
380	if (!fb)
381		goto out;
382
383	gud_flush_damage(gdrm, fb, &shadow_map, true, &damage);
384
385	drm_framebuffer_put(fb);
386out:
387	drm_dev_exit(idx);
388}
389
390static int gud_fb_queue_damage(struct gud_device *gdrm, struct drm_framebuffer *fb,
391			       const struct iosys_map *src, struct drm_rect *damage)
392{
393	struct drm_framebuffer *old_fb = NULL;
394	struct iosys_map shadow_map;
395
396	mutex_lock(&gdrm->damage_lock);
397
398	if (!gdrm->shadow_buf) {
399		gdrm->shadow_buf = vcalloc(fb->pitches[0], fb->height);
400		if (!gdrm->shadow_buf) {
401			mutex_unlock(&gdrm->damage_lock);
402			return -ENOMEM;
403		}
404	}
405
406	iosys_map_set_vaddr(&shadow_map, gdrm->shadow_buf);
407	iosys_map_incr(&shadow_map, drm_fb_clip_offset(fb->pitches[0], fb->format, damage));
408	drm_fb_memcpy(&shadow_map, fb->pitches, src, fb, damage);
409
410	if (fb != gdrm->fb) {
411		old_fb = gdrm->fb;
412		drm_framebuffer_get(fb);
413		gdrm->fb = fb;
414	}
415
416	gdrm->damage.x1 = min(gdrm->damage.x1, damage->x1);
417	gdrm->damage.y1 = min(gdrm->damage.y1, damage->y1);
418	gdrm->damage.x2 = max(gdrm->damage.x2, damage->x2);
419	gdrm->damage.y2 = max(gdrm->damage.y2, damage->y2);
420
421	mutex_unlock(&gdrm->damage_lock);
422
423	queue_work(system_long_wq, &gdrm->work);
424
425	if (old_fb)
426		drm_framebuffer_put(old_fb);
427
428	return 0;
429}
430
431static void gud_fb_handle_damage(struct gud_device *gdrm, struct drm_framebuffer *fb,
432				 const struct iosys_map *src, struct drm_rect *damage)
433{
434	int ret;
435
436	if (gdrm->flags & GUD_DISPLAY_FLAG_FULL_UPDATE)
437		drm_rect_init(damage, 0, 0, fb->width, fb->height);
438
439	if (gud_async_flush) {
440		ret = gud_fb_queue_damage(gdrm, fb, src, damage);
441		if (ret != -ENOMEM)
442			return;
443	}
444
445	/* Imported buffers are assumed to be WriteCombined with uncached reads */
446	gud_flush_damage(gdrm, fb, src, !fb->obj[0]->import_attach, damage);
447}
448
449int gud_pipe_check(struct drm_simple_display_pipe *pipe,
450		   struct drm_plane_state *new_plane_state,
451		   struct drm_crtc_state *new_crtc_state)
452{
453	struct gud_device *gdrm = to_gud_device(pipe->crtc.dev);
454	struct drm_plane_state *old_plane_state = pipe->plane.state;
455	const struct drm_display_mode *mode = &new_crtc_state->mode;
456	struct drm_atomic_state *state = new_plane_state->state;
457	struct drm_framebuffer *old_fb = old_plane_state->fb;
458	struct drm_connector_state *connector_state = NULL;
459	struct drm_framebuffer *fb = new_plane_state->fb;
460	const struct drm_format_info *format = fb->format;
461	struct drm_connector *connector;
462	unsigned int i, num_properties;
463	struct gud_state_req *req;
464	int idx, ret;
465	size_t len;
466
467	if (WARN_ON_ONCE(!fb))
468		return -EINVAL;
469
470	if (old_plane_state->rotation != new_plane_state->rotation)
471		new_crtc_state->mode_changed = true;
472
473	if (old_fb && old_fb->format != format)
474		new_crtc_state->mode_changed = true;
475
476	if (!new_crtc_state->mode_changed && !new_crtc_state->connectors_changed)
477		return 0;
478
479	/* Only one connector is supported */
480	if (hweight32(new_crtc_state->connector_mask) != 1)
481		return -EINVAL;
482
483	if (format->format == DRM_FORMAT_XRGB8888 && gdrm->xrgb8888_emulation_format)
484		format = gdrm->xrgb8888_emulation_format;
485
486	for_each_new_connector_in_state(state, connector, connector_state, i) {
487		if (connector_state->crtc)
488			break;
489	}
490
491	/*
492	 * DRM_IOCTL_MODE_OBJ_SETPROPERTY on the rotation property will not have
493	 * the connector included in the state.
494	 */
495	if (!connector_state) {
496		struct drm_connector_list_iter conn_iter;
497
498		drm_connector_list_iter_begin(pipe->crtc.dev, &conn_iter);
499		drm_for_each_connector_iter(connector, &conn_iter) {
500			if (connector->state->crtc) {
501				connector_state = connector->state;
502				break;
503			}
504		}
505		drm_connector_list_iter_end(&conn_iter);
506	}
507
508	if (WARN_ON_ONCE(!connector_state))
509		return -ENOENT;
510
511	len = struct_size(req, properties,
512			  size_add(GUD_PROPERTIES_MAX_NUM, GUD_CONNECTOR_PROPERTIES_MAX_NUM));
513	req = kzalloc(len, GFP_KERNEL);
514	if (!req)
515		return -ENOMEM;
516
517	gud_from_display_mode(&req->mode, mode);
518
519	req->format = gud_from_fourcc(format->format);
520	if (WARN_ON_ONCE(!req->format)) {
521		ret = -EINVAL;
522		goto out;
523	}
524
525	req->connector = drm_connector_index(connector_state->connector);
526
527	ret = gud_connector_fill_properties(connector_state, req->properties);
528	if (ret < 0)
529		goto out;
530
531	num_properties = ret;
532	for (i = 0; i < gdrm->num_properties; i++) {
533		u16 prop = gdrm->properties[i];
534		u64 val;
535
536		switch (prop) {
537		case GUD_PROPERTY_ROTATION:
538			/* DRM UAPI matches the protocol so use value directly */
539			val = new_plane_state->rotation;
540			break;
541		default:
542			WARN_ON_ONCE(1);
543			ret = -EINVAL;
544			goto out;
545		}
546
547		req->properties[num_properties + i].prop = cpu_to_le16(prop);
548		req->properties[num_properties + i].val = cpu_to_le64(val);
549		num_properties++;
550	}
551
552	if (drm_dev_enter(fb->dev, &idx)) {
553		len = struct_size(req, properties, num_properties);
554		ret = gud_usb_set(gdrm, GUD_REQ_SET_STATE_CHECK, 0, req, len);
555		drm_dev_exit(idx);
556	}  else {
557		ret = -ENODEV;
558	}
559out:
560	kfree(req);
561
562	return ret;
563}
564
565void gud_pipe_update(struct drm_simple_display_pipe *pipe,
566		     struct drm_plane_state *old_state)
567{
568	struct drm_device *drm = pipe->crtc.dev;
569	struct gud_device *gdrm = to_gud_device(drm);
570	struct drm_plane_state *state = pipe->plane.state;
571	struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state);
572	struct drm_framebuffer *fb = state->fb;
573	struct drm_crtc *crtc = &pipe->crtc;
574	struct drm_rect damage;
575	int ret, idx;
576
577	if (crtc->state->mode_changed || !crtc->state->enable) {
578		cancel_work_sync(&gdrm->work);
579		mutex_lock(&gdrm->damage_lock);
580		if (gdrm->fb) {
581			drm_framebuffer_put(gdrm->fb);
582			gdrm->fb = NULL;
583		}
584		gud_clear_damage(gdrm);
585		vfree(gdrm->shadow_buf);
586		gdrm->shadow_buf = NULL;
587		mutex_unlock(&gdrm->damage_lock);
588	}
589
590	if (!drm_dev_enter(drm, &idx))
591		return;
592
593	if (!old_state->fb)
594		gud_usb_set_u8(gdrm, GUD_REQ_SET_CONTROLLER_ENABLE, 1);
595
596	if (fb && (crtc->state->mode_changed || crtc->state->connectors_changed))
597		gud_usb_set(gdrm, GUD_REQ_SET_STATE_COMMIT, 0, NULL, 0);
598
599	if (crtc->state->active_changed)
600		gud_usb_set_u8(gdrm, GUD_REQ_SET_DISPLAY_ENABLE, crtc->state->active);
601
602	if (!fb)
603		goto ctrl_disable;
604
605	ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
606	if (ret)
607		goto ctrl_disable;
608
609	if (drm_atomic_helper_damage_merged(old_state, state, &damage))
610		gud_fb_handle_damage(gdrm, fb, &shadow_plane_state->data[0], &damage);
611
612	drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
613
614ctrl_disable:
615	if (!crtc->state->enable)
616		gud_usb_set_u8(gdrm, GUD_REQ_SET_CONTROLLER_ENABLE, 0);
617
618	drm_dev_exit(idx);
619}
620