1// Copyright 2016 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include <ddk/debug.h>
6#include <ddk/device.h>
7#include <ddk/driver.h>
8#include <ddk/binding.h>
9#include <ddk/protocol/block.h>
10
11#include <assert.h>
12#include <fcntl.h>
13#include <inttypes.h>
14#include <lib/cksum.h>
15#include <limits.h>
16#include <stdlib.h>
17#include <stdio.h>
18#include <string.h>
19#include <sys/param.h>
20#include <lib/sync/completion.h>
21#include <threads.h>
22#include <zircon/device/block.h>
23#include <zircon/syscalls.h>
24#include <zircon/types.h>
25
26#include <zircon/hw/gpt.h>
27
28typedef gpt_header_t gpt_t;
29
30#define TXN_SIZE 0x4000 // 128 partition entries
31
32typedef struct gptpart_device {
33    zx_device_t* zxdev;
34    zx_device_t* parent;
35
36    block_protocol_t bp;
37
38    gpt_entry_t gpt_entry;
39
40    block_info_t info;
41    size_t block_op_size;
42} gptpart_device_t;
43
44struct guid {
45    uint32_t data1;
46    uint16_t data2;
47    uint16_t data3;
48    uint8_t data4[8];
49};
50
51static zx_status_t gpt_flush(const gptpart_device_t* gpt);
52
53static void uint8_to_guid_string(char* dst, uint8_t* src) {
54    struct guid* guid = (struct guid*)src;
55    sprintf(dst, "%08X-%04X-%04X-%02X%02X-%02X%02X%02X%02X%02X%02X", guid->data1, guid->data2,
56            guid->data3, guid->data4[0], guid->data4[1], guid->data4[2], guid->data4[3],
57            guid->data4[4], guid->data4[5], guid->data4[6], guid->data4[7]);
58}
59
60static void utf16_to_cstring(char* dst, uint8_t* src, size_t charcount) {
61    while (charcount > 0) {
62        *dst++ = *src;
63        src += 2; // FIXME cheesy
64        charcount -= 2;
65    }
66}
67
68static uint64_t get_lba_count(gptpart_device_t* dev) {
69    // last LBA is inclusive
70    return dev->gpt_entry.last - dev->gpt_entry.first + 1;
71}
72
73static bool validate_header(const gpt_t* header, const block_info_t* info) {
74    if (header->size > sizeof(gpt_t)) {
75        zxlogf(ERROR, "gpt: invalid header size\n");
76        return false;
77    }
78    if (header->magic != GPT_MAGIC) {
79        zxlogf(ERROR, "gpt: bad header magic\n");
80        return false;
81    }
82    gpt_t copy;
83    memcpy(&copy, header, sizeof(gpt_t));
84    copy.crc32 = 0;
85    uint32_t crc = crc32(0, (const unsigned char*)&copy, copy.size);
86    if (crc != header->crc32) {
87        zxlogf(ERROR, "gpt: header crc invalid\n");
88        return false;
89    }
90    if (header->last >= info->block_count) {
91        zxlogf(ERROR, "gpt: last block > block count\n");
92        return false;
93    }
94    if (header->entries_count * header->entries_size > TXN_SIZE) {
95        zxlogf(ERROR, "gpt: entry table too big\n");
96        return false;
97    }
98    return true;
99}
100
101// implement device protocol:
102
103static zx_status_t gpt_ioctl(void* ctx, uint32_t op, const void* cmd, size_t cmdlen,
104                         void* reply, size_t max, size_t* out_actual) {
105    gptpart_device_t* device = ctx;
106    switch (op) {
107    case IOCTL_BLOCK_GET_INFO: {
108        block_info_t* info = reply;
109        if (max < sizeof(*info))
110            return ZX_ERR_BUFFER_TOO_SMALL;
111        memcpy(info, &device->info, sizeof(*info));
112        *out_actual = sizeof(*info);
113        return ZX_OK;
114    }
115    case IOCTL_BLOCK_GET_TYPE_GUID: {
116        char* guid = reply;
117        if (max < GPT_GUID_LEN) return ZX_ERR_BUFFER_TOO_SMALL;
118        memcpy(guid, device->gpt_entry.type, GPT_GUID_LEN);
119        *out_actual = GPT_GUID_LEN;
120        return ZX_OK;
121    }
122    case IOCTL_BLOCK_GET_PARTITION_GUID: {
123        char* guid = reply;
124        if (max < GPT_GUID_LEN) return ZX_ERR_BUFFER_TOO_SMALL;
125        memcpy(guid, device->gpt_entry.guid, GPT_GUID_LEN);
126        *out_actual = GPT_GUID_LEN;
127        return ZX_OK;
128    }
129    case IOCTL_BLOCK_GET_NAME: {
130        char* name = reply;
131        memset(name, 0, max);
132        // save room for the null terminator
133        utf16_to_cstring(name, device->gpt_entry.name, MIN((max - 1) * 2, GPT_NAME_LEN));
134        *out_actual = strnlen(name, GPT_NAME_LEN / 2);
135        return ZX_OK;
136    }
137    case IOCTL_DEVICE_SYNC: {
138        return gpt_flush(device);
139    }
140    default:
141        return ZX_ERR_NOT_SUPPORTED;
142    }
143}
144
145static void gpt_query(void* ctx, block_info_t* bi, size_t* bopsz) {
146    gptpart_device_t* gpt = ctx;
147    memcpy(bi, &gpt->info, sizeof(block_info_t));
148    *bopsz = gpt->block_op_size;
149}
150
151static void gpt_queue(void* ctx, block_op_t* bop) {
152    gptpart_device_t* gpt = ctx;
153
154    switch (bop->command & BLOCK_OP_MASK) {
155    case BLOCK_OP_READ:
156    case BLOCK_OP_WRITE: {
157        size_t blocks = bop->rw.length;
158        size_t max = get_lba_count(gpt);
159
160        // Ensure that the request is in-bounds
161        if ((bop->rw.offset_dev >= max) ||
162            ((max - bop->rw.offset_dev) < blocks)) {
163            bop->completion_cb(bop, ZX_ERR_OUT_OF_RANGE);
164            return;
165        }
166
167        // Adjust for partition starting block
168        bop->rw.offset_dev += gpt->gpt_entry.first;
169        break;
170    }
171    case BLOCK_OP_FLUSH:
172        break;
173    default:
174        bop->completion_cb(bop, ZX_ERR_NOT_SUPPORTED);
175        return;
176    }
177
178    gpt->bp.ops->queue(gpt->bp.ctx, bop);
179}
180
181static void gpt_unbind(void* ctx) {
182    gptpart_device_t* device = ctx;
183    device_remove(device->zxdev);
184}
185
186static void gpt_release(void* ctx) {
187    gptpart_device_t* device = ctx;
188    free(device);
189}
190
191static zx_off_t gpt_get_size(void* ctx) {
192    gptpart_device_t* dev = ctx;
193    //TODO: use query() results, *but* fvm returns different query and getsize
194    // results, and the latter are dynamic...
195    return device_get_size(dev->parent);
196}
197
198static zx_protocol_device_t gpt_proto = {
199    .version = DEVICE_OPS_VERSION,
200    .ioctl = gpt_ioctl,
201    .get_size = gpt_get_size,
202    .unbind = gpt_unbind,
203    .release = gpt_release,
204};
205
206static block_protocol_ops_t block_ops = {
207    .query = gpt_query,
208    .queue = gpt_queue,
209};
210
211static void gpt_sync_complete(block_op_t* bop, zx_status_t status) {
212    // Pass 32bit status back to caller via 32bit command field
213    // Saves from needing custom structs, etc.
214    bop->command = status;
215    sync_completion_signal((sync_completion_t*)bop->cookie);
216}
217
218static zx_status_t gpt_flush(const gptpart_device_t* gpt) {
219    // TODO: Maybe reuse a single allocation rather than allocating each time we
220    // need to flush.
221    block_op_t* bop = calloc(1, gpt->block_op_size);
222    if (bop == NULL) {
223        return ZX_ERR_NO_MEMORY;
224    }
225
226    sync_completion_t completion = SYNC_COMPLETION_INIT;
227
228    bop->command = BLOCK_OP_FLUSH;
229    bop->completion_cb = gpt_sync_complete;
230    bop->cookie = &completion;
231
232    gpt->bp.ops->queue(gpt->bp.ctx, bop);
233    sync_completion_wait(&completion, ZX_TIME_INFINITE);
234    zx_status_t status = bop->command;
235    free(bop);
236    if (status != ZX_OK) {
237        zxlogf(ERROR, "gpt: error %d flushing\n", status);
238        return status;
239    }
240    return ZX_OK;
241}
242
243static zx_status_t vmo_read(zx_handle_t vmo, void* data, uint64_t off, size_t len) {
244    return zx_vmo_read(vmo, data, off, len);
245}
246
247static int gpt_bind_thread(void* arg) {
248    gptpart_device_t* first_dev = (gptpart_device_t*)arg;
249    zx_device_t* dev = first_dev->parent;
250
251    // used to keep track of number of partitions found
252    unsigned partitions = 0;
253
254    block_protocol_t bp;
255    memcpy(&bp, &first_dev->bp, sizeof(bp));
256
257    block_info_t block_info;
258    size_t block_op_size;
259    bp.ops->query(bp.ctx, &block_info, &block_op_size);
260
261    zx_handle_t vmo = ZX_HANDLE_INVALID;
262    block_op_t* bop = calloc(1, block_op_size);
263    if (bop == NULL) {
264        goto unbind;
265    }
266
267    if (zx_vmo_create(TXN_SIZE, 0, &vmo) != ZX_OK) {
268        zxlogf(ERROR, "gpt: cannot allocate vmo\n");
269        goto unbind;
270    }
271
272    // sanity check the default txn size with the block size
273    if ((TXN_SIZE % block_info.block_size) || (TXN_SIZE < block_info.block_size)) {
274        zxlogf(ERROR, "gpt: default txn size=%d is not aligned to blksize=%u!\n",
275               TXN_SIZE, block_info.block_size);
276        goto unbind;
277    }
278
279    sync_completion_t completion = SYNC_COMPLETION_INIT;
280
281    // read partition table header synchronously (LBA1)
282    bop->command = BLOCK_OP_READ;
283    bop->rw.vmo = vmo;
284    bop->rw.length = 1;
285    bop->rw.offset_dev = 1;
286    bop->rw.offset_vmo = 0;
287    bop->rw.pages = NULL;
288    bop->completion_cb = gpt_sync_complete;
289    bop->cookie = &completion;
290
291    bp.ops->queue(bp.ctx, bop);
292    sync_completion_wait(&completion, ZX_TIME_INFINITE);
293    if (bop->command != ZX_OK) {
294        zxlogf(ERROR, "gpt: error %d reading partition header\n", bop->command);
295        goto unbind;
296    }
297
298    // read the header
299    gpt_t header;
300    if (vmo_read(vmo, &header, 0, sizeof(gpt_t)) != ZX_OK) {
301        goto unbind;
302    }
303    if (!validate_header(&header, &block_info)) {
304        goto unbind;
305    }
306
307    zxlogf(SPEW, "gpt: found gpt header %u entries @ lba%" PRIu64 "\n",
308           header.entries_count, header.entries);
309
310    // read partition table entries
311    size_t table_sz = header.entries_count * header.entries_size;
312    if (table_sz > TXN_SIZE) {
313        zxlogf(INFO, "gpt: partition table is larger than the buffer!\n");
314        // FIXME read the whole partition table. ok for now because on pixel2, this is
315        // enough to read the entries that actually contain valid data
316        table_sz = TXN_SIZE;
317    }
318
319    bop->command = BLOCK_OP_READ;
320    bop->rw.vmo = vmo;
321    bop->rw.length = (table_sz + (block_info.block_size - 1)) / block_info.block_size;
322    bop->rw.offset_dev = header.entries;
323    bop->rw.offset_vmo = 0;
324    bop->rw.pages = NULL;
325
326    sync_completion_reset(&completion);
327    bp.ops->queue(bp.ctx, bop);
328    sync_completion_wait(&completion, ZX_TIME_INFINITE);
329    if (bop->command != ZX_OK) {
330        zxlogf(ERROR, "gpt: error %d reading partition table\n", bop->command);
331        goto unbind;
332    }
333
334    uint8_t entries[TXN_SIZE];
335    if (vmo_read(vmo, entries, 0, TXN_SIZE) != ZX_OK) {
336        goto unbind;
337    }
338
339    uint32_t crc = crc32(0, (const unsigned char*)entries, table_sz);
340    if (crc != header.entries_crc) {
341        zxlogf(ERROR, "gpt: entries crc invalid\n");
342        goto unbind;
343    }
344
345    uint64_t dev_block_count = block_info.block_count;
346
347    for (partitions = 0; partitions < header.entries_count; partitions++) {
348        if (partitions * header.entries_size > table_sz) break;
349
350        // skip over entries that look invalid
351        gpt_entry_t* entry = (gpt_entry_t*)(entries + (partitions * sizeof(gpt_entry_t)));
352        if (entry->first < header.first || entry->last > header.last) {
353            continue;
354        }
355        if (entry->first == entry->last) {
356            continue;
357        }
358        if ((entry->last - entry->first + 1) > dev_block_count) {
359            zxlogf(ERROR, "gpt: entry %u too large, last = 0x%" PRIx64
360                   " first = 0x%" PRIx64 " block_count = 0x%" PRIx64 "\n",
361                   partitions, entry->last, entry->first, dev_block_count);
362            continue;
363        }
364
365        gptpart_device_t* device;
366        // use first_dev for first partition
367        if (first_dev) {
368            device = first_dev;
369        } else {
370            device = calloc(1, sizeof(gptpart_device_t));
371            if (!device) {
372                zxlogf(ERROR, "gpt: out of memory!\n");
373                goto unbind;
374            }
375            device->parent = dev;
376            memcpy(&device->bp, &bp, sizeof(bp));
377        }
378
379        memcpy(&device->gpt_entry, entry, sizeof(gpt_entry_t));
380        block_info.block_count = device->gpt_entry.last - device->gpt_entry.first + 1;
381        memcpy(&device->info, &block_info, sizeof(block_info));
382        device->block_op_size = block_op_size;
383
384        char type_guid[GPT_GUID_STRLEN];
385        uint8_to_guid_string(type_guid, device->gpt_entry.type);
386        char partition_guid[GPT_GUID_STRLEN];
387        uint8_to_guid_string(partition_guid, device->gpt_entry.guid);
388        char pname[GPT_NAME_LEN];
389        utf16_to_cstring(pname, device->gpt_entry.name, GPT_NAME_LEN);
390
391        if (first_dev) {
392            // make our initial device visible and use if for partition zero
393            device_make_visible(first_dev->zxdev);
394            first_dev = NULL;
395        } else {
396            char name[128];
397            snprintf(name, sizeof(name), "part-%03u", partitions);
398
399            zxlogf(SPEW, "gpt: partition %u (%s) type=%s guid=%s name=%s first=0x%"
400                   PRIx64 " last=0x%" PRIx64 "\n",
401                   partitions, name, type_guid, partition_guid, pname,
402                   device->gpt_entry.first, device->gpt_entry.last);
403
404            device_add_args_t args = {
405                .version = DEVICE_ADD_ARGS_VERSION,
406                .name = name,
407                .ctx = device,
408                .ops = &gpt_proto,
409                .proto_id = ZX_PROTOCOL_BLOCK_IMPL,
410                .proto_ops = &block_ops,
411            };
412
413            if (device_add(dev, &args, &device->zxdev) != ZX_OK) {
414                free(device);
415                continue;
416            }
417        }
418    }
419
420    free(bop);
421    zx_handle_close(vmo);
422    return 0;
423
424unbind:
425    free(bop);
426    zx_handle_close(vmo);
427    if (first_dev) {
428        // handle case where no partitions were found
429        device_remove(first_dev->zxdev);
430    }
431    return -1;
432}
433
434static zx_status_t gpt_bind(void* ctx, zx_device_t* parent) {
435    // create an invisible device, which will be used for the first partition
436    gptpart_device_t* device = calloc(1, sizeof(gptpart_device_t));
437    if (!device) {
438        return ZX_ERR_NO_MEMORY;
439    }
440    device->parent = parent;
441
442    if (device_get_protocol(parent, ZX_PROTOCOL_BLOCK, &device->bp) != ZX_OK) {
443        zxlogf(ERROR, "gpt: ERROR: block device '%s': does not support block protocol\n",
444               device_get_name(parent));
445        free(device);
446        return ZX_ERR_NOT_SUPPORTED;
447    }
448
449    char name[128];
450    snprintf(name, sizeof(name), "part-%03u", 0);
451
452    device_add_args_t args = {
453        .version = DEVICE_ADD_ARGS_VERSION,
454        .name = name,
455        .ctx = device,
456        .ops = &gpt_proto,
457        .proto_id = ZX_PROTOCOL_BLOCK_IMPL,
458        .proto_ops = &block_ops,
459        .flags = DEVICE_ADD_INVISIBLE,
460    };
461
462    zx_status_t status = device_add(parent, &args, &device->zxdev);
463    if (status != ZX_OK) {
464        free(device);
465        return status;
466    }
467
468    // read partition table asynchronously
469    thrd_t t;
470    status = thrd_create_with_name(&t, gpt_bind_thread, device, "gpt-init");
471    if (status != ZX_OK) {
472        device_remove(device->zxdev);
473    }
474    return status;
475}
476
477static zx_driver_ops_t gpt_driver_ops = {
478    .version = DRIVER_OPS_VERSION,
479    .bind = gpt_bind,
480};
481
482ZIRCON_DRIVER_BEGIN(gpt, gpt_driver_ops, "zircon", "0.1", 2)
483    BI_ABORT_IF_AUTOBIND,
484    BI_MATCH_IF(EQ, BIND_PROTOCOL, ZX_PROTOCOL_BLOCK),
485ZIRCON_DRIVER_END(gpt)
486