Lines Matching defs:operation

48             nand_op_t* operation = &nand_op->op;
49 operation->completion_cb(operation, ZX_ERR_BAD_STATE);
186 void NandDevice::Queue(nand_op_t* operation) {
188 switch (operation->command) {
191 if (operation->rw.offset_nand >= max_pages || !operation->rw.length ||
192 (max_pages - operation->rw.offset_nand) < operation->rw.length) {
193 operation->completion_cb(operation, ZX_ERR_OUT_OF_RANGE);
196 if (operation->rw.data_vmo == ZX_HANDLE_INVALID &&
197 operation->rw.oob_vmo == ZX_HANDLE_INVALID) {
198 operation->completion_cb(operation, ZX_ERR_BAD_HANDLE);
204 if (!operation->erase.num_blocks ||
205 operation->erase.first_block >= params_.num_blocks ||
206 params_.num_blocks - operation->erase.first_block < operation->erase.num_blocks) {
207 operation->completion_cb(operation, ZX_ERR_OUT_OF_RANGE);
213 operation->completion_cb(operation, ZX_ERR_NOT_SUPPORTED);
217 if (AddToList(operation)) {
220 operation->completion_cb(operation, ZX_ERR_BAD_STATE);
235 bool NandDevice::AddToList(nand_op_t* operation) {
239 RamNandOp* nand_op = reinterpret_cast<RamNandOp*>(operation);
245 bool NandDevice::RemoveFromList(nand_op_t** operation) {
250 *operation = reinterpret_cast<nand_op_t*>(nand_op);
257 nand_op_t* operation;
259 if (!RemoveFromList(&operation)) {
262 if (operation) {
272 switch (operation->command) {
275 status = ReadWriteData(operation);
277 status = ReadWriteOob(operation);
282 status = Erase(operation);
289 operation->completion_cb(operation, status);
298 zx_status_t NandDevice::ReadWriteData(nand_op_t* operation) {
299 if (operation->rw.data_vmo == ZX_HANDLE_INVALID) {
303 uint32_t nand_addr = operation->rw.offset_nand * params_.page_size;
304 uint64_t vmo_addr = operation->rw.offset_data_vmo * params_.page_size;
305 uint32_t length = operation->rw.length * params_.page_size;
308 if (operation->command == NAND_OP_READ) {
309 operation->rw.corrected_bit_flips = 0;
310 return zx_vmo_write(operation->rw.data_vmo, addr, vmo_addr, length);
313 ZX_DEBUG_ASSERT(operation->command == NAND_OP_WRITE);
316 ZX_DEBUG_ASSERT_MSG(operation->rw.length <= params_.pages_per_block,
318 ZX_DEBUG_ASSERT_MSG(operation->rw.offset_nand / params_.pages_per_block ==
319 (operation->rw.offset_nand + operation->rw.length - 1)
323 return zx_vmo_read(operation->rw.data_vmo, addr, vmo_addr, length);
326 zx_status_t NandDevice::ReadWriteOob(nand_op_t* operation) {
327 if (operation->rw.oob_vmo == ZX_HANDLE_INVALID) {
331 uint32_t nand_addr = MainDataSize() + operation->rw.offset_nand * params_.oob_size;
332 uint64_t vmo_addr = operation->rw.offset_oob_vmo * params_.page_size;
333 uint32_t length = operation->rw.length * params_.oob_size;
336 if (operation->command == NAND_OP_READ) {
337 operation->rw.corrected_bit_flips = 0;
338 return zx_vmo_write(operation->rw.oob_vmo, addr, vmo_addr, length);
341 ZX_DEBUG_ASSERT(operation->command == NAND_OP_WRITE);
342 return zx_vmo_read(operation->rw.oob_vmo, addr, vmo_addr, length);
345 zx_status_t NandDevice::Erase(nand_op_t* operation) {
346 ZX_DEBUG_ASSERT(operation->command == NAND_OP_ERASE);
349 uint32_t nand_addr = operation->erase.first_block * block_size;
350 uint32_t length = operation->erase.num_blocks * block_size;
357 length = operation->erase.num_blocks * oob_per_block;
358 nand_addr = MainDataSize() + operation->erase.first_block * oob_per_block;