1/*
2 * Copyright 2004-2006, Axel D��rfler, axeld@pinc-software.de. All rights reserved.
3 * Copyright 2002/03, Thomas Kurschel. All rights reserved.
4 *
5 * Distributed under the terms of the MIT License.
6 */
7
8/*
9	VM helper functions.
10
11	Important assumption: get_memory_map must combine adjacent
12	physical pages, so contignous memory always leads to a S/G
13	list of length one.
14*/
15
16#include "KernelExport_ext.h"
17#include "wrapper.h"
18
19#include <string.h>
20
21#include <algorithm>
22
23
24/**	get sg list of iovec
25 *	TBD: this should be moved to somewhere in kernel
26 */
27
28status_t
29get_iovec_memory_map(iovec *vec, size_t vec_count, size_t vec_offset, size_t len,
30	physical_entry *map, size_t max_entries, size_t *num_entries, size_t *mapped_len)
31{
32	size_t cur_idx;
33	size_t left_len;
34
35	SHOW_FLOW(3, "vec_count=%lu, vec_offset=%lu, len=%lu, max_entries=%lu",
36		vec_count, vec_offset, len, max_entries);
37
38	// skip iovec blocks if needed
39	while (vec_count > 0 && vec_offset > vec->iov_len) {
40		vec_offset -= vec->iov_len;
41		--vec_count;
42		++vec;
43	}
44
45	for (left_len = len, cur_idx = 0; left_len > 0 && vec_count > 0 && cur_idx < max_entries;) {
46		char *range_start;
47		size_t range_len;
48		status_t res;
49		size_t cur_num_entries, cur_mapped_len;
50		uint32 tmp_idx;
51
52		SHOW_FLOW( 3, "left_len=%d, vec_count=%d, cur_idx=%d",
53			(int)left_len, (int)vec_count, (int)cur_idx );
54
55		// map one iovec
56		range_start = (char *)vec->iov_base + vec_offset;
57		range_len = std::min(vec->iov_len - vec_offset, left_len);
58
59		SHOW_FLOW( 3, "range_start=%" B_PRIxADDR ", range_len=%" B_PRIxSIZE,
60			(addr_t)range_start, range_len );
61
62		vec_offset = 0;
63
64		if ((res = get_memory_map(range_start, range_len, &map[cur_idx],
65				max_entries - cur_idx)) != B_OK) {
66			// according to docu, no error is ever reported - argh!
67			SHOW_ERROR(1, "invalid io_vec passed (%s)", strerror(res));
68			return res;
69		}
70
71		// stupid: get_memory_map does neither tell how many sg blocks
72		// are used nor whether there were enough sg blocks at all;
73		// -> determine that manually
74		// TODO: Use get_memory_map_etc()!
75		cur_mapped_len = 0;
76		cur_num_entries = 0;
77
78		for (tmp_idx = cur_idx; tmp_idx < max_entries; ++tmp_idx) {
79			if (map[tmp_idx].size == 0)
80				break;
81
82			cur_mapped_len += map[tmp_idx].size;
83			++cur_num_entries;
84		}
85
86		if (cur_mapped_len == 0) {
87			panic("get_memory_map() returned empty list; left_len=%d, idx=%d/%d",
88				(int)left_len, (int)cur_idx, (int)max_entries);
89			SHOW_ERROR(2, "get_memory_map() returned empty list; left_len=%d, idx=%d/%d",
90				(int)left_len, (int)cur_idx, (int)max_entries);
91			return B_ERROR;
92		}
93
94		SHOW_FLOW( 3, "cur_num_entries=%d, cur_mapped_len=%x",
95			(int)cur_num_entries, (int)cur_mapped_len );
96
97		// try to combine with previous sg block
98		if (cur_num_entries > 0 && cur_idx > 0
99			&& map[cur_idx].address
100				== map[cur_idx - 1].address + map[cur_idx - 1].size) {
101			SHOW_FLOW0( 3, "combine with previous chunk" );
102			map[cur_idx - 1].size += map[cur_idx].size;
103			memcpy(&map[cur_idx], &map[cur_idx + 1], (cur_num_entries - 1) * sizeof(map[0]));
104			--cur_num_entries;
105		}
106
107		cur_idx += cur_num_entries;
108		left_len -= cur_mapped_len;
109
110		// advance iovec if current one is described completely
111		if (cur_mapped_len == range_len) {
112			++vec;
113			--vec_count;
114		}
115	}
116
117	*num_entries = cur_idx;
118	*mapped_len = len - left_len;
119
120	SHOW_FLOW( 3, "num_entries=%d, mapped_len=%x",
121		(int)*num_entries, (int)*mapped_len );
122
123	return B_OK;
124}
125
126