1/*
2 * Copyright 2021 David Sebek, dasebek@gmail.com
3 * Copyright 2004-2013 Haiku, Inc.
4 * Copyright 2002-2003 Thomas Kurschel
5 * All rights reserved. Distributed under the terms of the MIT License.
6 */
7
8
9//!	Handling of block device
10
11
12#include <string.h>
13
14#include <AutoDeleter.h>
15
16#include "scsi_periph_int.h"
17
18
19// UNMAP command limits
20#define UNMAP_MAX_LBA_VALUE				UINT64_MAX
21#define UNMAP_MAX_BLOCK_COUNT_VALUE		UINT32_MAX
22#define UNMAP_MAX_DESCRIPTORS			4095
23	// Limit imposed by the UNMAP command structure
24#define UNMAP_DEFAULT_DESCRIPTORS		255
25	// Reasonable default (?) when not specified by the device
26
27// WRITE SAME (16) command limits
28#define WS16_MAX_LBA_VALUE				UINT64_MAX
29#define WS16_MAX_BLOCK_COUNT_VALUE		UINT32_MAX
30
31// WRITE SAME (10) command limits
32#define WS10_MAX_LBA_VALUE				UINT32_MAX
33#define WS10_MAX_BLOCK_COUNT_VALUE		UINT16_MAX
34
35
36struct CapacityInfo {
37	// Result of the READ CAPACITY command
38	bool capacityFilled;
39	uint64 lastLba;
40	uint32 blockSize;
41	uint32 physicalBlockSize;
42
43	// Provisioning info from READ CAPACITY
44	bool provisioningFilled;
45	bool lbpme;
46	bool lbprz;
47};
48
49
50struct UnmapSupport {
51	// UNMAP commands supported by the device
52	bool commandSupportFilled;
53	bool unmapSupported;
54	bool ws16Supported;
55	bool ws10Supported;
56
57	// Block limits for UNMAP commands
58	bool blockLimitsFilled;
59	uint32 maxUnmapLbaCount;
60	uint32 maxUnmapDescriptorCount;
61	uint64 maxWritesameLength;
62};
63
64
65static bool
66prefer_read_capacity_16(scsi_periph_device_info* device)
67{
68	const scsi_res_inquiry* inquiryData = NULL;
69	size_t inquiryDataLength;
70
71	if (gDeviceManager->get_attr_raw(device->node, SCSI_DEVICE_INQUIRY_ITEM,
72				(const void**)&inquiryData, &inquiryDataLength, true) != B_OK
73		|| inquiryDataLength != sizeof(*inquiryData)) {
74		return false;
75	}
76
77	if (inquiryData->protect)
78		return true;
79
80	if (inquiryData->ansi_version > 0x04 /* SPC-2 */)
81		return true;
82
83	return false;
84}
85
86
87static bool
88vpd_pages_supported(scsi_periph_device_info* device)
89{
90	const scsi_res_inquiry* inquiryData = NULL;
91	size_t inquiryDataLength;
92
93	if (gDeviceManager->get_attr_raw(device->node, SCSI_DEVICE_INQUIRY_ITEM,
94				(const void**)&inquiryData, &inquiryDataLength, true) != B_OK
95		|| inquiryDataLength != sizeof(*inquiryData)) {
96		return false;
97	}
98
99	if (inquiryData->ansi_version >= 0x04 /* SPC-2 */)
100		return true;
101
102	return false;
103}
104
105
106static status_t
107read_capacity_10(scsi_periph_device_info* device, scsi_ccb* request,
108	CapacityInfo* capacityInfo)
109{
110	capacityInfo->capacityFilled = false;
111	capacityInfo->provisioningFilled = false;
112
113	scsi_res_read_capacity capacityResult;
114	memset(&capacityResult, 0, sizeof(capacityResult));
115
116	scsi_cmd_read_capacity* cmd = (scsi_cmd_read_capacity*)request->cdb;
117	memset(cmd, 0, sizeof(*cmd));
118	cmd->opcode = SCSI_OP_READ_CAPACITY;
119	// we don't set PMI (partial medium indicator) as we want the whole capacity;
120	// in this case, all other parameters must be zero
121
122	request->flags = SCSI_DIR_IN;
123	request->cdb_length = sizeof(*cmd);
124	request->sort = -1;
125	request->timeout = device->std_timeout;
126
127	request->data = (uint8*)&capacityResult;
128	request->data_length = sizeof(capacityResult);
129	request->sg_list = NULL;
130
131	status_t res = periph_safe_exec(device, request);
132
133	if (res == B_OK && request->data_resid == 0) {
134		capacityInfo->capacityFilled = true;
135		capacityInfo->lastLba
136			= (uint32)B_BENDIAN_TO_HOST_INT32(capacityResult.lba);
137		capacityInfo->blockSize
138			= B_BENDIAN_TO_HOST_INT32(capacityResult.block_size);
139		capacityInfo->physicalBlockSize = capacityInfo->blockSize;
140	}
141
142	return res;
143}
144
145
146static status_t
147read_capacity_16(scsi_periph_device_info* device, scsi_ccb* request,
148	CapacityInfo* capacityInfo)
149{
150	capacityInfo->capacityFilled = false;
151	capacityInfo->provisioningFilled = false;
152
153	scsi_res_read_capacity_long capacityLongResult;
154	memset(&capacityLongResult, 0, sizeof(capacityLongResult));
155
156	scsi_cmd_read_capacity_long* cmd
157		= (scsi_cmd_read_capacity_long*)request->cdb;
158	memset(cmd, 0, sizeof(*cmd));
159	cmd->opcode = SCSI_OP_SERVICE_ACTION_IN;
160	cmd->service_action = SCSI_SAI_READ_CAPACITY_16;
161	cmd->alloc_length = B_HOST_TO_BENDIAN_INT32(sizeof(capacityLongResult));
162
163	request->flags = SCSI_DIR_IN;
164	request->cdb_length = sizeof(*cmd);
165	request->sort = -1;
166	request->timeout = device->std_timeout;
167
168	request->data = (uint8*)&capacityLongResult;
169	request->data_length = sizeof(capacityLongResult);
170	request->sg_list = NULL;
171
172	status_t res = periph_safe_exec(device, request);
173
174	if (res == B_OK && request->data_resid
175			<= (int32)sizeof(scsi_res_read_capacity_long) - 12) {
176		// At least the last LBA and sector size have been transfered
177		capacityInfo->capacityFilled = true;
178		capacityInfo->lastLba
179			= B_BENDIAN_TO_HOST_INT64(capacityLongResult.lba);
180		capacityInfo->blockSize
181			= B_BENDIAN_TO_HOST_INT32(capacityLongResult.block_size);
182		capacityInfo->physicalBlockSize = capacityInfo->blockSize
183			* (1 << capacityLongResult.logical_blocks_per_physical_block_exponent);
184	}
185
186	if (res == B_OK && request->data_resid
187			<= (int32)sizeof(scsi_res_read_capacity_long) - 15) {
188		// lbpme and lbprz bits were received too
189		capacityInfo->provisioningFilled = true;
190		capacityInfo->lbpme = capacityLongResult.lbpme;
191		capacityInfo->lbprz = capacityLongResult.lbprz;
192	}
193
194	return res;
195}
196
197
198static status_t
199get_unmap_commands(scsi_periph_device_info* device, scsi_ccb* request,
200	UnmapSupport* unmapSupport)
201{
202	unmapSupport->commandSupportFilled = false;
203
204	scsi_page_lb_provisioning vpdProvisioning;
205	memset(&vpdProvisioning, 0, sizeof(vpdProvisioning));
206	status_t vpdStatus = vpd_page_get(device, request,
207		SCSI_PAGE_LB_PROVISIONING, &vpdProvisioning, sizeof(vpdProvisioning));
208
209	if (vpdStatus == B_OK
210		&& request->data_resid <= (int32)sizeof(scsi_page_lb_provisioning) - 6
211		&& vpdProvisioning.page_code == SCSI_PAGE_LB_PROVISIONING
212		&& B_BENDIAN_TO_HOST_INT16(vpdProvisioning.page_length) >= 2) {
213		unmapSupport->commandSupportFilled = true;
214		unmapSupport->unmapSupported = vpdProvisioning.lbpu;
215		unmapSupport->ws16Supported = vpdProvisioning.lbpws;
216		unmapSupport->ws10Supported = vpdProvisioning.lbpws10;
217	}
218
219	if (vpdStatus == B_BAD_VALUE)
220		return B_ERROR;
221
222	return vpdStatus;
223}
224
225
226static status_t
227get_unmap_limits(scsi_periph_device_info* device, scsi_ccb* request,
228	UnmapSupport* unmapSupport)
229{
230	unmapSupport->blockLimitsFilled = false;
231
232	scsi_page_block_limits vpdBlockLimits;
233	memset(&vpdBlockLimits, 0, sizeof(vpdBlockLimits));
234	status_t vpdStatus = vpd_page_get(device, request,
235		SCSI_PAGE_BLOCK_LIMITS, &vpdBlockLimits, sizeof(vpdBlockLimits));
236
237	if (vpdStatus == B_OK
238		&& request->data_resid <= (int32)sizeof(scsi_page_block_limits) - 44
239		&& vpdBlockLimits.page_code == SCSI_PAGE_BLOCK_LIMITS
240		&& B_BENDIAN_TO_HOST_INT16(vpdBlockLimits.page_length) == 0x3c) {
241		unmapSupport->blockLimitsFilled = true;
242		unmapSupport->maxUnmapLbaCount = B_BENDIAN_TO_HOST_INT32(
243			vpdBlockLimits.max_unmap_lba_count);
244		unmapSupport->maxUnmapDescriptorCount = B_BENDIAN_TO_HOST_INT32(
245			vpdBlockLimits.max_unmap_blk_count);
246		unmapSupport->maxWritesameLength = B_BENDIAN_TO_HOST_INT64(
247			vpdBlockLimits.max_write_same_length);
248	}
249
250	if (vpdStatus == B_BAD_VALUE)
251		return B_ERROR;
252
253	return vpdStatus;
254}
255
256
257static void
258determine_unmap_support(const UnmapSupport* unmapSupport,
259	enum trim_command* unmapCommand, uint32* maxLbaCount,
260	uint32* maxDescriptorCount)
261{
262#ifdef DEBUG_TRIM
263	if (unmapSupport->commandSupportFilled)
264		dprintf("TRIM: device reports (LBP VPD): LBPU = %d, LBPWS = %d,"
265			" LBPWS10 = %d\n", unmapSupport->unmapSupported,
266			unmapSupport->ws16Supported, unmapSupport->ws10Supported);
267	else
268		dprintf("TRIM: could not get the LBP VPD of the device\n");
269	if (unmapSupport->blockLimitsFilled)
270		dprintf("TRIM: device reports (Block Limits VPD):"
271			"\nTRIM: MAXIMUM UNMAP LBA COUNT = %" B_PRIu32
272			"\nTRIM: MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT = %" B_PRIu32
273			"\nTRIM: MAXIMUM WRITESAME LENGTH = %" B_PRIu64 "\n",
274			unmapSupport->maxUnmapLbaCount,
275			unmapSupport->maxUnmapDescriptorCount,
276			unmapSupport->maxWritesameLength);
277	else
278		dprintf("TRIM: could not get Block Limits VPD of the device\n");
279#endif
280
281	*unmapCommand = TRIM_NONE;
282	*maxLbaCount = 0;
283	*maxDescriptorCount = 0;
284
285	if (!unmapSupport->commandSupportFilled
286		|| !unmapSupport->blockLimitsFilled)
287		return;
288
289	if (unmapSupport->unmapSupported
290		&& unmapSupport->maxUnmapLbaCount > 0
291		&& unmapSupport->maxUnmapDescriptorCount > 0) {
292		*unmapCommand = TRIM_UNMAP;
293		*maxLbaCount = unmapSupport->maxUnmapLbaCount;
294		if (unmapSupport->maxUnmapDescriptorCount == UINT32_MAX
295			|| unmapSupport->maxUnmapDescriptorCount > UNMAP_MAX_DESCRIPTORS) {
296			// Choose a reasonable value instead
297			*maxDescriptorCount = UNMAP_DEFAULT_DESCRIPTORS;
298		} else {
299			*maxDescriptorCount = unmapSupport->maxUnmapDescriptorCount;
300		}
301	}
302
303	if (*unmapCommand == TRIM_NONE && unmapSupport->ws16Supported) {
304		uint64 maxLength = unmapSupport->maxWritesameLength;
305		if (maxLength == 0) {
306			// WRITE SAME limit not reported, try UNMAP limit instead
307			if (unmapSupport->maxUnmapLbaCount > 0)
308				maxLength = unmapSupport->maxUnmapLbaCount;
309			else
310				maxLength = WS16_MAX_BLOCK_COUNT_VALUE;
311		}
312		*unmapCommand = TRIM_WRITESAME16;
313		*maxLbaCount = min_c(maxLength, WS16_MAX_BLOCK_COUNT_VALUE);
314		*maxDescriptorCount = 1;
315	}
316
317	if (*unmapCommand == TRIM_NONE && unmapSupport->ws10Supported) {
318		uint64 maxLength = unmapSupport->maxWritesameLength;
319		if (maxLength == 0) {
320			// WRITE SAME limit not reported, try UNMAP limit instead
321			if (unmapSupport->maxUnmapLbaCount > 0)
322				maxLength = unmapSupport->maxUnmapLbaCount;
323			else
324				maxLength = WS10_MAX_BLOCK_COUNT_VALUE;
325		}
326		*unmapCommand = TRIM_WRITESAME10;
327		*maxLbaCount = min_c(maxLength, WS10_MAX_BLOCK_COUNT_VALUE);
328		*maxDescriptorCount = 1;
329	}
330}
331
332
333status_t
334periph_check_capacity(scsi_periph_device_info* device, scsi_ccb* request)
335{
336	CapacityInfo capacityInfo = {0};
337	status_t res;
338
339	SHOW_FLOW(3, "%p, %p", device, request);
340
341	// driver doesn't support capacity callback - seems to be no block
342	// device driver, so ignore
343	if (device->callbacks->set_capacity == NULL)
344		return B_OK;
345
346	if (prefer_read_capacity_16(device)) {
347		SHOW_FLOW0(3, "READ CAPACITY 16 tried first");
348		res = read_capacity_16(device, request, &capacityInfo);
349
350		if (res == B_ERROR) {
351			SHOW_FLOW0(3, "READ CAPACITY 16 failed, trying READ CAPACITY 10");
352			res = read_capacity_10(device, request, &capacityInfo);
353		}
354	} else {
355		SHOW_FLOW0(3, "READ CAPACITY 10 tried first");
356		res = read_capacity_10(device, request, &capacityInfo);
357
358		if (res == B_OK && capacityInfo.capacityFilled
359			&& capacityInfo.lastLba == UINT32_MAX) {
360			SHOW_FLOW0(3, "Device is too large, trying READ CAPACITY 16");
361			res = read_capacity_16(device, request, &capacityInfo);
362		}
363	}
364
365	uint64 capacity;
366	uint32 blockSize, physicalBlockSize;
367
368	if (capacityInfo.capacityFilled) {
369		capacity = capacityInfo.lastLba + 1;
370		blockSize = capacityInfo.blockSize;
371		physicalBlockSize = capacityInfo.physicalBlockSize;
372	} else {
373		capacity = 0;
374		blockSize = 0;
375		physicalBlockSize = 0;
376	}
377
378	enum trim_command unmapCommand = TRIM_NONE;
379	uint32 maxLbaCount = 0;
380	uint32 maxDescriptorCount = 0;
381
382	if (capacityInfo.provisioningFilled
383		&& capacityInfo.lbpme
384		&& vpd_pages_supported(device)) {
385		UnmapSupport unmapSupport = {0};
386
387		// Don't fail if the device doesn't support the command
388		// but fail if some other error happens
389		if (res == B_OK) {
390			status_t vpdStatus = get_unmap_commands(device, request,
391				&unmapSupport);
392			if (vpdStatus != B_OK && vpdStatus != B_ERROR)
393				res = vpdStatus;
394		}
395
396		if (res == B_OK) {
397			status_t vpdStatus = get_unmap_limits(device, request,
398				&unmapSupport);
399			if (vpdStatus != B_OK && vpdStatus != B_ERROR)
400				res = vpdStatus;
401		}
402
403		determine_unmap_support(&unmapSupport, &unmapCommand,
404				&maxLbaCount, &maxDescriptorCount);
405
406		if (maxLbaCount == 0 || maxDescriptorCount == 0)
407			unmapCommand = TRIM_NONE;
408	}
409
410	if (res == B_DEV_MEDIA_CHANGED) {
411		// in this case, the error handler has already called check_capacity
412		// recursively, so we ignore our (invalid) result
413		SHOW_FLOW0(3, "ignore result because medium change");
414		return B_DEV_MEDIA_CHANGED;
415	}
416
417	if (res == B_OK && !capacityInfo.capacityFilled)
418		// Although the capacity and block size will be set to 0 in this case,
419		// it is also better to inform the caller that these values were not
420		// reported by the device
421		res = B_ERROR;
422
423	SHOW_FLOW(3, "capacity = %" B_PRIu64 ", block_size = %" B_PRIu32
424		" (%sreported)", capacity, blockSize,
425		capacityInfo.capacityFilled ? "" : "not ");
426	SHOW_INFO(1, "TRIM: Setting trim support to %s",
427		unmapCommand == TRIM_NONE ? "disabled"
428			: unmapCommand == TRIM_UNMAP ? "UNMAP"
429			: unmapCommand == TRIM_WRITESAME16 ? "WRITE SAME (16)"
430			: unmapCommand == TRIM_WRITESAME10 ? "WRITE SAME (10)"
431			: "unknown");
432	SHOW_FLOW(3, "TRIM: Block limits: size = %" B_PRIu32
433		", descriptors = %" B_PRIu32, maxLbaCount, maxDescriptorCount);
434
435	mutex_lock(&device->mutex);
436		// Was there a reason why this mutex
437		// was previously locked much earlier?
438
439	device->unmap_command = unmapCommand;
440	device->max_unmap_lba_count = maxLbaCount;
441	device->max_unmap_descriptor_count = maxDescriptorCount;
442
443	device->block_size = blockSize;
444	device->physical_block_size = physicalBlockSize;
445
446	device->callbacks->set_capacity(device->periph_device,
447		capacity, blockSize, physicalBlockSize);
448
449/*	device->byte2blk_shift = log2( device->block_size );
450	if( device->byte2blk_shift < 0 ) {
451		// this may be too restrictive...
452		device->capacity = -1;
453		return ERR_DEV_GENERAL;
454	}*/
455
456	mutex_unlock(&device->mutex);
457
458	SHOW_FLOW(3, "done (%s)", strerror(res));
459
460	return res;
461}
462
463
464static status_t
465trim_unmap(scsi_periph_device_info* device, scsi_ccb* request,
466	scsi_block_range* ranges, uint32 rangeCount, uint64* trimmedBlocks)
467{
468	uint64 maxLength = UNMAP_MAX_BLOCK_COUNT_VALUE;
469	uint64 maxBlocksInRequest = device->max_unmap_lba_count;
470	uint32 maxDescriptors = device->max_unmap_descriptor_count;
471
472	*trimmedBlocks = 0;
473
474	// Allocate a single buffer and re-use it between requests
475	size_t expectedDescriptorCount = 0;
476	for (uint32 i = 0; i < rangeCount; i++) {
477		expectedDescriptorCount += ranges[i].size / maxLength;
478		if (ranges[i].size % maxLength != 0)
479			expectedDescriptorCount++;
480	}
481	expectedDescriptorCount = min_c(expectedDescriptorCount, maxDescriptors);
482
483	size_t unmapListAllocatedSize = (expectedDescriptorCount - 1)
484			* sizeof(scsi_unmap_block_descriptor)
485		+ sizeof(scsi_unmap_parameter_list);
486
487	scsi_unmap_parameter_list* unmapList
488		= (scsi_unmap_parameter_list*)malloc(unmapListAllocatedSize);
489	if (unmapList == NULL)
490		return B_NO_MEMORY;
491
492	MemoryDeleter deleter(unmapList);
493
494	status_t status = B_OK;
495	uint32 descriptorIndex = 0;
496	uint64 trimmedBlocksInRequest = 0;
497	memset(unmapList, 0, unmapListAllocatedSize);
498	for (uint32 i = 0; i < rangeCount; i++) {
499		uint64 lba = ranges[i].lba;
500		uint64 length = ranges[i].size;
501
502		if (length == 0)
503			continue; // Length of 0 would be ignored by the device anyway
504
505		if (lba > UNMAP_MAX_LBA_VALUE) {
506			SHOW_ERROR0(1, "LBA value is too large!"
507				" This unmap range will be skipped.");
508			continue;
509		}
510
511		// Split large ranges if needed.
512		// Range length is limited by:
513		//   - the UNMAP_MAX_BLOCK_COUNT_VALUE constant
514		//   - the total number of LBAs in one UNMAP command is limited by
515		//     the MAX UNMAP LBA COUNT field in the Block Limits VPD page
516		while (length > 0) {
517			uint64 trimLength = min_c(length, maxLength);
518			trimLength = min_c(trimLength,
519					maxBlocksInRequest - trimmedBlocksInRequest);
520			unmapList->blocks[descriptorIndex].lba
521				= B_HOST_TO_BENDIAN_INT64(lba);
522			unmapList->blocks[descriptorIndex].block_count
523				= B_HOST_TO_BENDIAN_INT32(trimLength);
524			descriptorIndex++;
525			trimmedBlocksInRequest += trimLength;
526
527			// Split into multiple requests if needed.
528			// The number of UNMAP block descriptors is limited by:
529			//   - the number of block descriptors cannot exceed the
530			//     MAXIMUM UNMAP PARAMETER COUNT value in the Block Limits VPD
531			//   - the size of our buffer
532			//   - what fits in one UNMAP command
533			//   - the total number of LBAs in one UNMAP command is limited by
534			//     the MAX UNMAP LBA COUNT field in the Block Limits VPD page
535			if (descriptorIndex >= maxDescriptors
536				|| descriptorIndex >= expectedDescriptorCount
537				|| descriptorIndex >= UNMAP_MAX_DESCRIPTORS
538				|| trimmedBlocksInRequest >= maxBlocksInRequest
539				|| (i == rangeCount - 1 && length <= maxLength))
540			{
541				uint16 unmapListSize = (descriptorIndex - 1)
542						* sizeof(scsi_unmap_block_descriptor)
543					+ sizeof(scsi_unmap_parameter_list);
544				unmapList->data_length = B_HOST_TO_BENDIAN_INT16(unmapListSize
545					- offsetof(scsi_unmap_parameter_list, block_data_length));
546				unmapList->block_data_length
547					= B_HOST_TO_BENDIAN_INT16(unmapListSize
548						- offsetof(scsi_unmap_parameter_list, blocks));
549
550				scsi_cmd_unmap* cmd = (scsi_cmd_unmap*)request->cdb;
551				memset(cmd, 0, sizeof(*cmd));
552				cmd->opcode = SCSI_OP_UNMAP;
553				cmd->length = B_HOST_TO_BENDIAN_INT16(unmapListSize);
554
555				request->flags = SCSI_DIR_OUT;
556				request->cdb_length = sizeof(*cmd);
557				request->sort = B_BENDIAN_TO_HOST_INT64(
558					unmapList->blocks[0].lba);
559				request->timeout = device->std_timeout;
560
561				request->data = (uint8*)unmapList;
562				request->data_length = unmapListSize;
563				request->sg_list = NULL;
564
565				SHOW_FLOW(3, "UNMAP data used %" B_PRIu16
566					" of %" B_PRIuSIZE " allocated bytes",
567					unmapListSize, unmapListAllocatedSize);
568
569#ifdef DEBUG_TRIM
570				uint16 scsiRangeCount = (uint16)B_BENDIAN_TO_HOST_INT16(
571					unmapList->block_data_length)
572					/ sizeof(scsi_unmap_block_descriptor);
573				uint64 count = 0;
574				dprintf("TRIM: SCSI: sending an UNMAP command to"
575					" the device (blocks):\n");
576				for (uint16 i = 0; i < scsiRangeCount; i++) {
577					dprintf("[%3" B_PRIu16 "] %" B_PRIu64 " : %" B_PRIu32 "\n",
578						i, (uint64)B_BENDIAN_TO_HOST_INT64(
579							unmapList->blocks[i].lba),
580						(uint32)B_BENDIAN_TO_HOST_INT32(
581							unmapList->blocks[i].block_count));
582					count += (uint32)B_BENDIAN_TO_HOST_INT32(
583							unmapList->blocks[i].block_count);
584				}
585				if (device->max_unmap_lba_count >= count)
586					dprintf("TRIM: SCSI: Previous UNMAP command would fit %"
587						B_PRIu64 " more LBAs\n",
588						device->max_unmap_lba_count - count);
589				else
590					dprintf("TRIM: SCSI: Previous UNMAP ranges exceed the"
591						" device limit!\n");
592#endif /* DEBUG_TRIM */
593
594				status = periph_safe_exec(device, request);
595
596				// peripheral layer only creates "read" error
597				if (status == B_DEV_READ_ERROR)
598					return B_DEV_WRITE_ERROR;
599				else if (status != B_OK)
600					return status;
601
602				*trimmedBlocks += trimmedBlocksInRequest;
603
604				descriptorIndex = 0;
605				trimmedBlocksInRequest = 0;
606				memset(unmapList, 0, unmapListSize);
607			}
608
609			length -= trimLength;
610			lba += trimLength;
611		}
612	}
613
614	return status;
615}
616
617
618static status_t
619trim_writesame16(scsi_periph_device_info* device, scsi_ccb* request,
620	scsi_block_range* ranges, uint32 rangeCount, uint64* trimmedBlocks)
621{
622	status_t status = B_OK;
623	*trimmedBlocks = 0;
624
625	for (uint32 i = 0; i < rangeCount; i++) {
626		uint64 lba = ranges[i].lba;
627		uint64 length = ranges[i].size;
628
629		if (length == 0)
630			continue; // length of 0 would mean the rest of the device!
631
632		if (lba > WS16_MAX_LBA_VALUE) {
633			SHOW_ERROR0(1, "LBA value is too large!"
634				" This unmap range will be skipped.");
635			continue;
636		}
637
638		// Split the range into multiple requests if needed
639		uint64 maxLength = min_c(device->max_unmap_lba_count,
640				WS16_MAX_BLOCK_COUNT_VALUE);
641		while (length > 0) {
642			uint64 trimLength = min_c(length, maxLength);
643			if (trimLength == 0) {
644				SHOW_ERROR0(1,
645					"Error: Length of zero in WRITE SAME (16) detected");
646				break;
647			}
648
649			void* block = malloc(device->block_size);
650			if (block == NULL)
651				return B_NO_MEMORY;
652			MemoryDeleter deleter(block);
653			memset(block, 0, device->block_size);
654
655			scsi_cmd_wsame_16* cmd = (scsi_cmd_wsame_16*)request->cdb;
656			memset(cmd, 0, sizeof(*cmd));
657			cmd->opcode = SCSI_OP_WRITE_SAME_16;
658			cmd->unmap = 1;
659			cmd->lba = B_HOST_TO_BENDIAN_INT64(lba);
660			cmd->length = B_HOST_TO_BENDIAN_INT32(trimLength);
661			//cmd->ndob = 1; // no data is needed if this bit is enabled
662
663			request->flags = SCSI_DIR_OUT;
664			request->cdb_length = sizeof(*cmd);
665			request->sort = lba;
666			request->timeout = device->std_timeout;
667
668			request->data = (uint8*)block;
669			request->data_length = device->block_size;
670			request->sg_list = NULL;
671
672#ifdef DEBUG_TRIM
673			dprintf("TRIM: SCSI: sending a WRITE SAME (16) command to"
674				" the device (blocks):\n");
675			dprintf("%" B_PRIu64 " : %" B_PRIu32 "\n",
676				(uint64)B_BENDIAN_TO_HOST_INT64(cmd->lba),
677				(uint32)B_BENDIAN_TO_HOST_INT32(cmd->length));
678#endif
679
680			status = periph_safe_exec(device, request);
681
682			// peripheral layer only creates "read" error
683			if (status == B_DEV_READ_ERROR)
684				return B_DEV_WRITE_ERROR;
685			else if (status != B_OK)
686				return status;
687
688			*trimmedBlocks += trimLength;
689			length -= trimLength;
690			lba += trimLength;
691		}
692	}
693
694	return status;
695}
696
697
698static status_t
699trim_writesame10(scsi_periph_device_info* device, scsi_ccb* request,
700	scsi_block_range* ranges, uint32 rangeCount, uint64* trimmedBlocks)
701{
702	status_t status = B_OK;
703	*trimmedBlocks = 0;
704
705	for (uint32 i = 0; i < rangeCount; i++) {
706		uint64 lba = ranges[i].lba;
707		uint64 length = ranges[i].size;
708
709		if (length == 0)
710			continue; // length of 0 would mean the rest of the device!
711
712		if (lba > WS10_MAX_LBA_VALUE) {
713			SHOW_ERROR0(1, "LBA value is too large!"
714				" This unmap range will be skipped.");
715			continue;
716		}
717
718		// Split the range into multiple requests if needed
719		uint64 maxLength = min_c(device->max_unmap_lba_count,
720				WS10_MAX_BLOCK_COUNT_VALUE);
721		while (length > 0) {
722			uint64 trimLength = min_c(length, maxLength);
723			if (trimLength == 0) {
724				SHOW_ERROR0(1,
725					"Error: Length of zero in WRITE SAME (10) detected");
726				break;
727			}
728
729			void* block = malloc(device->block_size);
730			if (block == NULL)
731				return B_NO_MEMORY;
732			MemoryDeleter deleter(block);
733			memset(block, 0, device->block_size);
734
735			scsi_cmd_wsame_10* cmd = (scsi_cmd_wsame_10*)request->cdb;
736			memset(cmd, 0, sizeof(*cmd));
737			cmd->opcode = SCSI_OP_WRITE_SAME_10;
738			cmd->unmap = 1;
739			cmd->lba = B_HOST_TO_BENDIAN_INT32(lba);
740			cmd->length = B_HOST_TO_BENDIAN_INT16(trimLength);
741
742			request->flags = SCSI_DIR_OUT;
743			request->cdb_length = sizeof(*cmd);
744			request->sort = lba;
745			request->timeout = device->std_timeout;
746
747			request->data = (uint8*)block;
748			request->data_length = device->block_size;
749			request->sg_list = NULL;
750
751#ifdef DEBUG_TRIM
752			dprintf("TRIM: SCSI: sending a WRITE SAME (10) command to"
753				" the device (blocks):\n");
754			dprintf("%" B_PRIu32 " : %" B_PRIu16 "\n",
755				(uint32)B_BENDIAN_TO_HOST_INT32(cmd->lba),
756				(uint16)B_BENDIAN_TO_HOST_INT16(cmd->length));
757#endif
758
759			status = periph_safe_exec(device, request);
760
761			// peripheral layer only creates "read" error
762			if (status == B_DEV_READ_ERROR)
763				return B_DEV_WRITE_ERROR;
764			else if (status != B_OK)
765				return status;
766
767			*trimmedBlocks += trimLength;
768			length -= trimLength;
769			lba += trimLength;
770		}
771	}
772
773	return status;
774}
775
776
777status_t
778periph_trim_device(scsi_periph_device_info* device, scsi_ccb* request,
779	scsi_block_range* ranges, uint32 rangeCount, uint64* trimmedBlocks)
780{
781	*trimmedBlocks = 0;
782
783	if (device->unmap_command == TRIM_NONE
784		|| device->max_unmap_lba_count == 0
785		|| device->max_unmap_descriptor_count == 0)
786		return B_UNSUPPORTED;
787
788	switch (device->unmap_command) {
789		case TRIM_UNMAP:
790			return trim_unmap(device, request, ranges, rangeCount,
791				trimmedBlocks);
792		case TRIM_WRITESAME16:
793			return trim_writesame16(device, request, ranges, rangeCount,
794				trimmedBlocks);
795		case TRIM_WRITESAME10:
796			return trim_writesame10(device, request, ranges, rangeCount,
797				trimmedBlocks);
798		default:
799			return B_UNSUPPORTED;
800	}
801}
802