1// Copyright 2016 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "gpu.h"
6
7#include <inttypes.h>
8#include <string.h>
9#include <sys/param.h>
10
11#include <ddk/debug.h>
12#include <fbl/auto_call.h>
13#include <fbl/auto_lock.h>
14#include <zircon/compiler.h>
15#include <zircon/time.h>
16
17#include "trace.h"
18#include "virtio_gpu.h"
19
20#define LOCAL_TRACE 0
21
22namespace virtio {
23
24namespace {
25
26constexpr uint32_t kRefreshRateHz = 30;
27constexpr uint64_t kDisplayId = 1;
28
29zx_status_t to_zx_status(uint32_t type) {
30    LTRACEF("response type %#x\n", type);
31    if (type != VIRTIO_GPU_RESP_OK_NODATA) {
32        return ZX_ERR_NO_MEMORY;
33    }
34    return ZX_OK;
35}
36
37} // namespace
38
39// DDK level ops
40
41typedef struct imported_image {
42    uint32_t resource_id;
43    zx::pmt pmt;
44} imported_image_t;
45
46void GpuDevice::virtio_gpu_set_display_controller_cb(void* ctx, void* cb_ctx,
47                                                     display_controller_cb_t* cb) {
48    GpuDevice* gd = static_cast<GpuDevice*>(ctx);
49    {
50        fbl::AutoLock al(&gd->flush_lock_);
51        gd->dc_cb_ = cb;
52        gd->dc_cb_ctx_ = cb_ctx;
53    }
54
55    added_display_args_t args = {};
56    args.display_id = kDisplayId,
57    args.edid_present = false,
58    args.panel.params = {
59        .width = gd->pmode_.r.width,
60        .height = gd->pmode_.r.height,
61        .refresh_rate_e2 = kRefreshRateHz * 100,
62    },
63    args.pixel_formats = &gd->supported_formats_,
64    args.pixel_format_count = 1,
65    cb->on_displays_changed(cb_ctx, &args, 1, nullptr, 0);
66}
67
68zx_status_t GpuDevice::virtio_gpu_import_vmo_image(void* ctx, image_t* image,
69                                                   zx_handle_t vmo, size_t offset) {
70    GpuDevice* gd = static_cast<GpuDevice*>(ctx);
71    if (image->type != IMAGE_TYPE_SIMPLE) {
72        return ZX_ERR_INVALID_ARGS;
73    }
74
75    fbl::AllocChecker ac;
76    auto import_data = fbl::make_unique_checked<imported_image_t>(&ac);
77    if (!ac.check()) {
78        return ZX_ERR_NO_MEMORY;
79    }
80
81    unsigned pixel_size = ZX_PIXEL_FORMAT_BYTES(image->pixel_format);
82    unsigned size = ROUNDUP(image->width * image->height * pixel_size, PAGE_SIZE);
83    zx_paddr_t paddr;
84    zx_status_t status = zx_bti_pin(gd->bti_.get(), ZX_BTI_PERM_READ | ZX_BTI_CONTIGUOUS,
85                                    vmo, offset, size,
86                                    &paddr, 1, import_data->pmt.reset_and_get_address());
87    if (status != ZX_OK) {
88        return status;
89    }
90
91    status = gd->allocate_2d_resource(&import_data->resource_id, image->width, image->height);
92    if (status != ZX_OK) {
93        zxlogf(ERROR, "%s: failed to allocate 2d resource\n", gd->tag());
94        return status;
95    }
96
97    status = gd->attach_backing(import_data->resource_id, paddr, size);
98    if (status != ZX_OK) {
99        zxlogf(ERROR, "%s: failed to attach backing store\n", gd->tag());
100        return status;
101    }
102
103    image->handle = import_data.release();
104
105    return ZX_OK;
106}
107
108void GpuDevice::virtio_gpu_release_image(void* ctx, image_t* image) {
109    delete reinterpret_cast<imported_image_t*>(image->handle);
110}
111
112void GpuDevice::virtio_gpu_check_configuration(void* ctx,
113                                               const display_config_t** display_configs,
114                                               uint32_t* display_cfg_result,
115                                               uint32_t** layer_cfg_results,
116                                               uint32_t display_count) {
117    GpuDevice* gd = static_cast<GpuDevice*>(ctx);
118    if (display_count != 1) {
119        ZX_DEBUG_ASSERT(display_count == 0);
120        return;
121    }
122    ZX_DEBUG_ASSERT(display_configs[0]->display_id == kDisplayId);
123    bool success;
124    if (display_configs[0]->layer_count != 1) {
125        success = display_configs[0]->layer_count == 0;
126    } else {
127        primary_layer_t* layer = &display_configs[0]->layers[0]->cfg.primary;
128        frame_t frame = {
129                .x_pos = 0, .y_pos = 0, .width = gd->pmode_.r.width, .height = gd->pmode_.r.height,
130        };
131        success = display_configs[0]->layers[0]->type == LAYER_PRIMARY
132                && layer->transform_mode == FRAME_TRANSFORM_IDENTITY
133                && layer->image.width == gd->pmode_.r.width
134                && layer->image.height == gd->pmode_.r.height
135                && memcmp(&layer->dest_frame, &frame, sizeof(frame_t)) == 0
136                && memcmp(&layer->src_frame, &frame, sizeof(frame_t)) == 0
137                && display_configs[0]->cc_flags == 0
138                && layer->alpha_mode == ALPHA_DISABLE;
139    }
140    if (!success) {
141        layer_cfg_results[0][0] = CLIENT_MERGE_BASE;
142        for (unsigned i = 1; i < display_configs[0]->layer_count; i++) {
143            layer_cfg_results[0][i] = CLIENT_MERGE_SRC;
144        }
145    }
146}
147
148void GpuDevice::virtio_gpu_apply_configuration(void* ctx, const display_config_t** display_configs,
149                                               uint32_t display_count) {
150    GpuDevice* gd = static_cast<GpuDevice*>(ctx);
151    void* handle = display_count == 0 || display_configs[0]->layer_count == 0
152            ? nullptr : display_configs[0]->layers[0]->cfg.primary.image.handle;
153
154    {
155        fbl::AutoLock al(&gd->flush_lock_);
156        gd->current_fb_ = reinterpret_cast<imported_image_t*>(handle);
157    }
158
159    gd->Flush();
160}
161
162uint32_t GpuDevice::virtio_gpu_compute_linear_stride(void* ctx, uint32_t width,
163                                                     zx_pixel_format_t format) {
164    return width;
165}
166
167zx_status_t GpuDevice::virtio_gpu_allocate_vmo(void* ctx, uint64_t size, zx_handle_t* vmo_out) {
168    GpuDevice* gd = static_cast<GpuDevice*>(ctx);
169    return zx_vmo_create_contiguous(gd->bti().get(), size, 0, vmo_out);
170}
171
172GpuDevice::GpuDevice(zx_device_t* bus_device, zx::bti bti, fbl::unique_ptr<Backend> backend)
173    : Device(bus_device, fbl::move(bti), fbl::move(backend)) {
174    sem_init(&request_sem_, 0, 1);
175    sem_init(&response_sem_, 0, 0);
176    cnd_init(&flush_cond_);
177
178    memset(&gpu_req_, 0, sizeof(gpu_req_));
179}
180
181GpuDevice::~GpuDevice() {
182    io_buffer_release(&gpu_req_);
183
184    // TODO: clean up allocated physical memory
185    sem_destroy(&request_sem_);
186    sem_destroy(&response_sem_);
187    cnd_destroy(&flush_cond_);
188}
189
190template <typename RequestType, typename ResponseType>
191void GpuDevice::send_command_response(const RequestType* cmd, ResponseType** res) {
192    size_t cmd_len = sizeof(RequestType);
193    size_t res_len = sizeof(ResponseType);
194    LTRACEF("dev %p, cmd %p, cmd_len %zu, res %p, res_len %zu\n", this, cmd, cmd_len, res, res_len);
195
196    // Keep this single message at a time
197    sem_wait(&request_sem_);
198    fbl::MakeAutoCall([this]() { sem_post(&request_sem_); });
199
200    uint16_t i;
201    struct vring_desc* desc = vring_.AllocDescChain(2, &i);
202    ZX_ASSERT(desc);
203
204    void* gpu_req_base = io_buffer_virt(&gpu_req_);
205    zx_paddr_t gpu_req_pa = io_buffer_phys(&gpu_req_);
206
207    memcpy(gpu_req_base, cmd, cmd_len);
208
209    desc->addr = gpu_req_pa;
210    desc->len = static_cast<uint32_t>(cmd_len);
211    desc->flags = VRING_DESC_F_NEXT;
212
213    // Set the second descriptor to the response with the write bit set
214    desc = vring_.DescFromIndex(desc->next);
215    ZX_ASSERT(desc);
216
217    *res = reinterpret_cast<ResponseType*>(static_cast<uint8_t*>(gpu_req_base) + cmd_len);
218    zx_paddr_t res_phys = gpu_req_pa + cmd_len;
219    memset(*res, 0, res_len);
220
221    desc->addr = res_phys;
222    desc->len = static_cast<uint32_t>(res_len);
223    desc->flags = VRING_DESC_F_WRITE;
224
225    // Submit the transfer & wait for the response
226    vring_.SubmitChain(i);
227    vring_.Kick();
228    sem_wait(&response_sem_);
229}
230
231zx_status_t GpuDevice::get_display_info() {
232    LTRACEF("dev %p\n", this);
233
234    // Construct the get display info message
235    virtio_gpu_ctrl_hdr req;
236    memset(&req, 0, sizeof(req));
237    req.type = VIRTIO_GPU_CMD_GET_DISPLAY_INFO;
238
239    // Send the message and get a response
240    virtio_gpu_resp_display_info* info;
241    send_command_response(&req, &info);
242    if (info->hdr.type != VIRTIO_GPU_RESP_OK_DISPLAY_INFO) {
243        return ZX_ERR_NOT_FOUND;
244    }
245
246    // We got a response
247    LTRACEF("response:\n");
248    for (int i = 0; i < VIRTIO_GPU_MAX_SCANOUTS; i++) {
249        if (info->pmodes[i].enabled) {
250            LTRACEF("%u: x %u y %u w %u h %u flags 0x%x\n", i,
251                    info->pmodes[i].r.x, info->pmodes[i].r.y, info->pmodes[i].r.width, info->pmodes[i].r.height,
252                    info->pmodes[i].flags);
253            if (pmode_id_ < 0) {
254                // Save the first valid pmode we see
255                memcpy(&pmode_, &info->pmodes[i], sizeof(pmode_));
256                pmode_id_ = i;
257            }
258        }
259    }
260
261    return ZX_OK;
262}
263
264zx_status_t GpuDevice::allocate_2d_resource(uint32_t* resource_id, uint32_t width, uint32_t height) {
265    LTRACEF("dev %p\n", this);
266
267    ZX_ASSERT(resource_id);
268
269    // Construct the request
270    virtio_gpu_resource_create_2d req;
271    memset(&req, 0, sizeof(req));
272
273    req.hdr.type = VIRTIO_GPU_CMD_RESOURCE_CREATE_2D;
274    req.resource_id = next_resource_id_++;
275    *resource_id = req.resource_id;
276    req.format = VIRTIO_GPU_FORMAT_B8G8R8X8_UNORM;
277    req.width = width;
278    req.height = height;
279
280    // Send the command and get a response
281    struct virtio_gpu_ctrl_hdr* res;
282    send_command_response(&req, &res);
283
284    return to_zx_status(res->type);
285}
286
287zx_status_t GpuDevice::attach_backing(uint32_t resource_id, zx_paddr_t ptr, size_t buf_len) {
288    LTRACEF("dev %p, resource_id %u, ptr %#" PRIxPTR ", buf_len %zu\n", this, resource_id, ptr, buf_len);
289
290    ZX_ASSERT(ptr);
291
292    // Construct the request
293    struct {
294        struct virtio_gpu_resource_attach_backing req;
295        struct virtio_gpu_mem_entry mem;
296    } req;
297    memset(&req, 0, sizeof(req));
298
299    req.req.hdr.type = VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING;
300    req.req.resource_id = resource_id;
301    req.req.nr_entries = 1;
302
303    req.mem.addr = ptr;
304    req.mem.length = (uint32_t)buf_len;
305
306    // Send the command and get a response
307    struct virtio_gpu_ctrl_hdr* res;
308    send_command_response(&req, &res);
309    return to_zx_status(res->type);
310}
311
312zx_status_t GpuDevice::set_scanout(uint32_t scanout_id, uint32_t resource_id, uint32_t width, uint32_t height) {
313    LTRACEF("dev %p, scanout_id %u, resource_id %u, width %u, height %u\n", this, scanout_id, resource_id, width, height);
314
315    // Construct the request
316    virtio_gpu_set_scanout req;
317    memset(&req, 0, sizeof(req));
318
319    req.hdr.type = VIRTIO_GPU_CMD_SET_SCANOUT;
320    req.r.x = req.r.y = 0;
321    req.r.width = width;
322    req.r.height = height;
323    req.scanout_id = scanout_id;
324    req.resource_id = resource_id;
325
326    // Send the command and get a response
327    virtio_gpu_ctrl_hdr* res;
328    send_command_response(&req, &res);
329    return to_zx_status(res->type);
330}
331
332zx_status_t GpuDevice::flush_resource(uint32_t resource_id, uint32_t width, uint32_t height) {
333    LTRACEF("dev %p, resource_id %u, width %u, height %u\n", this, resource_id, width, height);
334
335    // Construct the request
336    virtio_gpu_resource_flush req;
337    memset(&req, 0, sizeof(req));
338
339    req.hdr.type = VIRTIO_GPU_CMD_RESOURCE_FLUSH;
340    req.r.x = req.r.y = 0;
341    req.r.width = width;
342    req.r.height = height;
343    req.resource_id = resource_id;
344
345    // Send the command and get a response
346    virtio_gpu_ctrl_hdr* res;
347    send_command_response(&req, &res);
348    return to_zx_status(res->type);
349}
350
351zx_status_t GpuDevice::transfer_to_host_2d(uint32_t resource_id, uint32_t width, uint32_t height) {
352    LTRACEF("dev %p, resource_id %u, width %u, height %u\n", this, resource_id, width, height);
353
354    // Construct the request
355    virtio_gpu_transfer_to_host_2d req;
356    memset(&req, 0, sizeof(req));
357
358    req.hdr.type = VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D;
359    req.r.x = req.r.y = 0;
360    req.r.width = width;
361    req.r.height = height;
362    req.offset = 0;
363    req.resource_id = resource_id;
364
365    // Send the command and get a response
366    virtio_gpu_ctrl_hdr* res;
367    send_command_response(&req, &res);
368    return to_zx_status(res->type);
369}
370
371void GpuDevice::Flush() {
372    fbl::AutoLock al(&flush_lock_);
373    flush_pending_ = true;
374    cnd_signal(&flush_cond_);
375}
376
377void GpuDevice::virtio_gpu_flusher() {
378    LTRACE_ENTRY;
379    zx_time_t next_deadline = zx_clock_get_monotonic();
380    zx_time_t period = ZX_SEC(1) / kRefreshRateHz;
381    for (;;) {
382        zx_nanosleep(next_deadline);
383
384        bool fb_change;
385        {
386            fbl::AutoLock al(&flush_lock_);
387            fb_change = displayed_fb_ != current_fb_;
388            displayed_fb_ = current_fb_;
389        }
390
391        LTRACEF("flushing\n");
392
393        if (displayed_fb_) {
394            zx_status_t status = transfer_to_host_2d(
395                    displayed_fb_->resource_id, pmode_.r.width, pmode_.r.height);
396            if (status != ZX_OK) {
397                LTRACEF("failed to flush resource\n");
398                continue;
399            }
400
401            status = flush_resource(displayed_fb_->resource_id, pmode_.r.width, pmode_.r.height);
402            if (status != ZX_OK) {
403                LTRACEF("failed to flush resource\n");
404                continue;
405            }
406        }
407
408        if (fb_change) {
409            uint32_t res_id = displayed_fb_ ? displayed_fb_->resource_id : 0;
410            zx_status_t status = set_scanout(pmode_id_, res_id, pmode_.r.width, pmode_.r.height);
411            if (status != ZX_OK) {
412                zxlogf(ERROR, "%s: failed to set scanout\n", tag());
413                continue;
414            }
415        }
416
417        {
418            fbl::AutoLock al(&flush_lock_);
419            if (dc_cb_) {
420                void* handles[] = { static_cast<void*>(displayed_fb_) };
421                dc_cb_->on_display_vsync(dc_cb_ctx_, kDisplayId,
422                                         next_deadline, handles, displayed_fb_ != nullptr);
423            }
424        }
425        next_deadline = zx_time_add_duration(next_deadline, period);
426    }
427}
428
429zx_status_t GpuDevice::virtio_gpu_start() {
430
431    LTRACEF("dev %p\n", this);
432
433    // Get the display info and see if we find a valid pmode
434    zx_status_t status = get_display_info();
435    if (status != ZX_OK) {
436        zxlogf(ERROR, "%s: failed to get display info\n", tag());
437        return status;
438    }
439
440    if (pmode_id_ < 0) {
441        zxlogf(ERROR, "%s: failed to find a pmode, exiting\n", tag());
442        return ZX_ERR_NOT_FOUND;
443    }
444
445    printf("virtio-gpu: found display x %u y %u w %u h %u flags 0x%x\n",
446           pmode_.r.x, pmode_.r.y, pmode_.r.width, pmode_.r.height,
447           pmode_.flags);
448
449    // Run a worker thread to shove in flush events
450    auto virtio_gpu_flusher_entry = [](void* arg) {
451        static_cast<GpuDevice*>(arg)->virtio_gpu_flusher();
452        return 0;
453    };
454    thrd_create_with_name(&flush_thread_, virtio_gpu_flusher_entry, this, "virtio-gpu-flusher");
455    thrd_detach(flush_thread_);
456
457    LTRACEF("publishing device\n");
458
459    display_proto_ops_.set_display_controller_cb = virtio_gpu_set_display_controller_cb;
460    display_proto_ops_.import_vmo_image = virtio_gpu_import_vmo_image;
461    display_proto_ops_.release_image = virtio_gpu_release_image;
462    display_proto_ops_.check_configuration = virtio_gpu_check_configuration;
463    display_proto_ops_.apply_configuration = virtio_gpu_apply_configuration;
464    display_proto_ops_.compute_linear_stride = virtio_gpu_compute_linear_stride;
465    display_proto_ops_.allocate_vmo = virtio_gpu_allocate_vmo;
466
467    // Initialize the zx_device and publish us
468    // Point the ctx of our DDK device at ourself
469    device_add_args_t args = {};
470    args.version = DEVICE_ADD_ARGS_VERSION;
471    args.name = "virtio-gpu-display";
472    args.ctx = this;
473    args.ops = &device_ops_;
474    args.proto_id = ZX_PROTOCOL_DISPLAY_CONTROLLER_IMPL;
475    args.proto_ops = &display_proto_ops_;
476
477    status = device_add(bus_device_, &args, &bus_device_);
478    if (status != ZX_OK) {
479        device_ = nullptr;
480        return status;
481    }
482
483    LTRACE_EXIT;
484    return ZX_OK;
485}
486
487zx_status_t GpuDevice::Init() {
488    LTRACE_ENTRY;
489
490    DeviceReset();
491
492    struct virtio_gpu_config config;
493    CopyDeviceConfig(&config, sizeof(config));
494    LTRACEF("events_read 0x%x\n", config.events_read);
495    LTRACEF("events_clear 0x%x\n", config.events_clear);
496    LTRACEF("num_scanouts 0x%x\n", config.num_scanouts);
497    LTRACEF("reserved 0x%x\n", config.reserved);
498
499    // Ack and set the driver status bit
500    DriverStatusAck();
501
502    // XXX check features bits and ack/nak them
503
504    // Allocate the main vring
505    zx_status_t status = vring_.Init(0, 16);
506    if (status != ZX_OK) {
507        zxlogf(ERROR, "%s: failed to allocate vring\n", tag());
508        return status;
509    }
510
511    // Allocate a GPU request
512    status = io_buffer_init(&gpu_req_, bti_.get(), PAGE_SIZE, IO_BUFFER_RW | IO_BUFFER_CONTIG);
513    if (status != ZX_OK) {
514        zxlogf(ERROR, "%s: cannot alloc gpu_req buffers %d\n", tag(), status);
515        return status;
516    }
517
518    LTRACEF("allocated gpu request at %p, physical address %#" PRIxPTR "\n",
519            io_buffer_virt(&gpu_req_), io_buffer_phys(&gpu_req_));
520
521    StartIrqThread();
522    DriverStatusOk();
523
524    // Start a worker thread that runs through a sequence to finish initializing the GPU
525    auto virtio_gpu_start_entry = [](void* arg) {
526        return static_cast<GpuDevice*>(arg)->virtio_gpu_start();
527    };
528    thrd_create_with_name(&start_thread_, virtio_gpu_start_entry, this, "virtio-gpu-starter");
529    thrd_detach(start_thread_);
530
531    return ZX_OK;
532}
533
534void GpuDevice::IrqRingUpdate() {
535    LTRACE_ENTRY;
536
537    // Parse our descriptor chain, add back to the free queue
538    auto free_chain = [this](vring_used_elem* used_elem) {
539        uint16_t i = static_cast<uint16_t>(used_elem->id);
540        struct vring_desc* desc = vring_.DescFromIndex(i);
541        for (;;) {
542            int next;
543
544            if (desc->flags & VRING_DESC_F_NEXT) {
545                next = desc->next;
546            } else {
547                // End of chain
548                next = -1;
549            }
550
551            vring_.FreeDesc(i);
552
553            if (next < 0) {
554                break;
555            }
556            i = static_cast<uint16_t>(next);
557            desc = vring_.DescFromIndex(i);
558        }
559        // Notify the request thread
560        sem_post(&response_sem_);
561    };
562
563    // Tell the ring to find free chains and hand it back to our lambda
564    vring_.IrqRingUpdate(free_chain);
565}
566
567void GpuDevice::IrqConfigChange() {
568    LTRACE_ENTRY;
569}
570
571} // namespace virtio
572