1// Copyright 2018 The Fuchsia Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "ram-nand.h"
6
7#include <stdlib.h>
8#include <stdio.h>
9#include <string.h>
10
11#include <ddk/metadata.h>
12#include <ddk/metadata/bad-block.h>
13#include <ddk/metadata/nand.h>
14#include <fbl/algorithm.h>
15#include <fbl/alloc_checker.h>
16#include <fbl/auto_lock.h>
17#include <zircon/assert.h>
18#include <zircon/device/ram-nand.h>
19#include <zircon/driver/binding.h>
20#include <zircon/process.h>
21#include <zircon/syscalls.h>
22
23namespace {
24
25struct RamNandOp {
26    nand_op_t op;
27    list_node_t node;
28};
29
30}  // namespace
31
32NandDevice::NandDevice(const NandParams& params, zx_device_t* parent)
33        : DeviceType(parent), params_(params) {
34}
35
36NandDevice::~NandDevice() {
37    if (thread_created_) {
38        Kill();
39        sync_completion_signal(&wake_signal_);
40        int result_code;
41        thrd_join(worker_, &result_code);
42
43        for (;;) {
44            RamNandOp* nand_op = list_remove_head_type(&txn_list_, RamNandOp, node);
45            if (!nand_op) {
46                break;
47            }
48            nand_op_t* operation = &nand_op->op;
49            operation->completion_cb(operation, ZX_ERR_BAD_STATE);
50        }
51    }
52
53    if (mapped_addr_) {
54        zx_vmar_unmap(zx_vmar_root_self(), mapped_addr_, DdkGetSize());
55    }
56    DdkRemove();
57}
58
59zx_status_t NandDevice::Bind(const ram_nand_info_t& info) {
60    char name[NAME_MAX];
61    zx_status_t status = Init(name, zx::vmo(info.vmo));
62    if (status != ZX_OK) {
63        return status;
64    }
65
66    zx_device_prop_t props[] = {
67        {BIND_PROTOCOL, 0, ZX_PROTOCOL_NAND},
68        {BIND_NAND_CLASS, 0, params_.nand_class},
69    };
70
71    status = DdkAdd(name, DEVICE_ADD_INVISIBLE, props, fbl::count_of(props));
72    if (status != ZX_OK) {
73        return status;
74    }
75
76    if (info.export_nand_config) {
77        nand_config_t config = {
78            .bad_block_config = {
79                .type = kAmlogicUboot,
80                .aml = {
81                    .table_start_block = info.bad_block_config.table_start_block,
82                    .table_end_block = info.bad_block_config.table_end_block,
83                },
84            },
85            .extra_partition_config_count = info.extra_partition_config_count,
86            .extra_partition_config = {},
87        };
88        for (size_t i = 0; i < info.extra_partition_config_count; i++) {
89            memcpy(config.extra_partition_config[i].type_guid,
90                   info.extra_partition_config[i].type_guid, ZBI_PARTITION_GUID_LEN);
91            config.extra_partition_config[i].copy_count =
92                    info.extra_partition_config[i].copy_count;
93            config.extra_partition_config[i].copy_byte_offset =
94                    info.extra_partition_config[i].copy_byte_offset;
95        }
96        status = DdkAddMetadata(DEVICE_METADATA_PRIVATE, &config, sizeof(config));
97        if (status != ZX_OK) {
98            return status;
99        }
100    }
101    if (info.export_partition_map) {
102        status = DdkAddMetadata(DEVICE_METADATA_PARTITION_MAP, &info.partition_map,
103                                sizeof(info.partition_map));
104        if (status != ZX_OK) {
105            return status;
106        }
107    }
108
109    DdkMakeVisible();
110    return ZX_OK;
111}
112
113zx_status_t NandDevice::Init(char name[NAME_MAX], zx::vmo vmo) {
114    ZX_DEBUG_ASSERT(!thread_created_);
115    static uint64_t dev_count = 0;
116    snprintf(name, NAME_MAX, "ram-nand-%" PRIu64, dev_count++);
117
118    zx_status_t status;
119    const bool use_vmo = vmo.is_valid();
120    if (use_vmo) {
121        vmo_ = fbl::move(vmo);
122
123        uint64_t size;
124        status = vmo_.get_size(&size);
125        if (status != ZX_OK) {
126            return status;
127        }
128        if (size < DdkGetSize()) {
129            return ZX_ERR_INVALID_ARGS;
130        }
131    } else {
132        status = zx::vmo::create(DdkGetSize(), 0, &vmo_);
133        if (status != ZX_OK) {
134            return status;
135        }
136    }
137
138    status = zx_vmar_map(zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
139                         0, vmo_.get(), 0, DdkGetSize(), &mapped_addr_);
140    if (status != ZX_OK) {
141        return status;
142    }
143    if (!use_vmo) {
144        memset(reinterpret_cast<char*>(mapped_addr_), 0xff, DdkGetSize());
145    }
146
147    list_initialize(&txn_list_);
148    if (thrd_create(&worker_, WorkerThreadStub, this) != thrd_success) {
149        return ZX_ERR_NO_RESOURCES;
150    }
151    thread_created_ = true;
152
153    return ZX_OK;
154}
155
156void NandDevice::DdkUnbind() {
157    Kill();
158    sync_completion_signal(&wake_signal_);
159    DdkRemove();
160}
161
162zx_status_t NandDevice::DdkIoctl(uint32_t op, const void* in_buf, size_t in_len,
163                                 void* out_buf, size_t out_len, size_t* out_actual) {
164    {
165        fbl::AutoLock lock(&lock_);
166        if (dead_) {
167            return ZX_ERR_BAD_STATE;
168        }
169    }
170
171    switch (op) {
172    case IOCTL_RAM_NAND_UNLINK:
173        DdkUnbind();
174        return ZX_OK;
175
176    default:
177        return ZX_ERR_NOT_SUPPORTED;
178    }
179}
180
181void NandDevice::Query(nand_info_t* info_out, size_t* nand_op_size_out) {
182    *info_out = params_;
183    *nand_op_size_out = sizeof(RamNandOp);
184}
185
186void NandDevice::Queue(nand_op_t* operation) {
187    uint32_t max_pages = params_.NumPages();
188    switch (operation->command) {
189    case NAND_OP_READ:
190    case NAND_OP_WRITE: {
191        if (operation->rw.offset_nand >= max_pages || !operation->rw.length ||
192            (max_pages - operation->rw.offset_nand) < operation->rw.length) {
193            operation->completion_cb(operation, ZX_ERR_OUT_OF_RANGE);
194            return;
195        }
196        if (operation->rw.data_vmo == ZX_HANDLE_INVALID &&
197            operation->rw.oob_vmo == ZX_HANDLE_INVALID) {
198            operation->completion_cb(operation, ZX_ERR_BAD_HANDLE);
199            return;
200        }
201        break;
202    }
203    case NAND_OP_ERASE:
204        if (!operation->erase.num_blocks ||
205            operation->erase.first_block >= params_.num_blocks ||
206            params_.num_blocks - operation->erase.first_block < operation->erase.num_blocks) {
207            operation->completion_cb(operation, ZX_ERR_OUT_OF_RANGE);
208            return;
209        }
210        break;
211
212    default:
213        operation->completion_cb(operation, ZX_ERR_NOT_SUPPORTED);
214        return;
215    }
216
217    if (AddToList(operation)) {
218        sync_completion_signal(&wake_signal_);
219    } else {
220        operation->completion_cb(operation, ZX_ERR_BAD_STATE);
221    }
222}
223
224zx_status_t NandDevice::GetFactoryBadBlockList(uint32_t* bad_blocks, uint32_t bad_block_len,
225                                               uint32_t* num_bad_blocks) {
226    *num_bad_blocks = 0;
227    return ZX_OK;
228}
229
230void NandDevice::Kill() {
231    fbl::AutoLock lock(&lock_);
232    dead_ = true;
233}
234
235bool NandDevice::AddToList(nand_op_t* operation) {
236    fbl::AutoLock lock(&lock_);
237    bool is_dead = dead_;
238    if (!dead_) {
239        RamNandOp* nand_op = reinterpret_cast<RamNandOp*>(operation);
240        list_add_tail(&txn_list_, &nand_op->node);
241    }
242    return !is_dead;
243}
244
245bool NandDevice::RemoveFromList(nand_op_t** operation) {
246    fbl::AutoLock lock(&lock_);
247    bool is_dead = dead_;
248    if (!dead_) {
249        RamNandOp* nand_op = list_remove_head_type(&txn_list_, RamNandOp, node);
250        *operation = reinterpret_cast<nand_op_t*>(nand_op);
251    }
252    return !is_dead;
253}
254
255int NandDevice::WorkerThread() {
256    for (;;) {
257        nand_op_t* operation;
258        for (;;) {
259            if (!RemoveFromList(&operation)) {
260                return 0;
261            }
262            if (operation) {
263                sync_completion_reset(&wake_signal_);
264                break;
265            } else {
266                sync_completion_wait(&wake_signal_, ZX_TIME_INFINITE);
267            }
268        }
269
270        zx_status_t status = ZX_OK;
271
272        switch (operation->command) {
273        case NAND_OP_READ:
274        case NAND_OP_WRITE:
275            status = ReadWriteData(operation);
276            if (status == ZX_OK) {
277                status = ReadWriteOob(operation);
278            }
279            break;
280
281        case NAND_OP_ERASE: {
282            status = Erase(operation);
283            break;
284        }
285        default:
286            ZX_DEBUG_ASSERT(false);  // Unexpected.
287        }
288
289        operation->completion_cb(operation, status);
290    }
291}
292
293int NandDevice::WorkerThreadStub(void* arg) {
294    NandDevice* device = static_cast<NandDevice*>(arg);
295    return device->WorkerThread();
296}
297
298zx_status_t NandDevice::ReadWriteData(nand_op_t* operation) {
299    if (operation->rw.data_vmo == ZX_HANDLE_INVALID) {
300        return ZX_OK;
301    }
302
303    uint32_t nand_addr = operation->rw.offset_nand * params_.page_size;
304    uint64_t vmo_addr = operation->rw.offset_data_vmo * params_.page_size;
305    uint32_t length = operation->rw.length * params_.page_size;
306    void* addr = reinterpret_cast<char*>(mapped_addr_) + nand_addr;
307
308    if (operation->command == NAND_OP_READ) {
309        operation->rw.corrected_bit_flips = 0;
310        return zx_vmo_write(operation->rw.data_vmo, addr, vmo_addr, length);
311    }
312
313    ZX_DEBUG_ASSERT(operation->command == NAND_OP_WRITE);
314
315    // Likely something bad is going on if writing multiple blocks.
316    ZX_DEBUG_ASSERT_MSG(operation->rw.length <= params_.pages_per_block,
317                        "Writing multiple blocks");
318    ZX_DEBUG_ASSERT_MSG(operation->rw.offset_nand / params_.pages_per_block ==
319                        (operation->rw.offset_nand + operation->rw.length - 1)
320                        / params_.pages_per_block,
321                        "Writing multiple blocks");
322
323    return zx_vmo_read(operation->rw.data_vmo, addr, vmo_addr, length);
324}
325
326zx_status_t NandDevice::ReadWriteOob(nand_op_t* operation) {
327    if (operation->rw.oob_vmo == ZX_HANDLE_INVALID) {
328        return ZX_OK;
329    }
330
331    uint32_t nand_addr = MainDataSize() + operation->rw.offset_nand * params_.oob_size;
332    uint64_t vmo_addr = operation->rw.offset_oob_vmo * params_.page_size;
333    uint32_t length = operation->rw.length * params_.oob_size;
334    void* addr = reinterpret_cast<char*>(mapped_addr_) + nand_addr;
335
336    if (operation->command == NAND_OP_READ) {
337        operation->rw.corrected_bit_flips = 0;
338        return zx_vmo_write(operation->rw.oob_vmo, addr, vmo_addr, length);
339    }
340
341    ZX_DEBUG_ASSERT(operation->command == NAND_OP_WRITE);
342    return zx_vmo_read(operation->rw.oob_vmo, addr, vmo_addr, length);
343}
344
345zx_status_t NandDevice::Erase(nand_op_t* operation) {
346    ZX_DEBUG_ASSERT(operation->command == NAND_OP_ERASE);
347
348    uint32_t block_size = params_.page_size * params_.pages_per_block;
349    uint32_t nand_addr = operation->erase.first_block * block_size;
350    uint32_t length = operation->erase.num_blocks * block_size;
351    void* addr = reinterpret_cast<char*>(mapped_addr_) + nand_addr;
352
353    memset(addr, 0xff, length);
354
355    // Clear the OOB area:
356    uint32_t oob_per_block = params_.oob_size * params_.pages_per_block;
357    length = operation->erase.num_blocks * oob_per_block;
358    nand_addr = MainDataSize() + operation->erase.first_block * oob_per_block;
359    addr = reinterpret_cast<char*>(mapped_addr_) + nand_addr;
360
361    memset(addr, 0xff, length);
362
363    return ZX_OK;
364}
365