1/*
2 * Copyright 2004-2006, Axel D��rfler, axeld@pinc-software.de. All rights reserved.
3 * Copyright 2002/03, Thomas Kurschel. All rights reserved.
4 *
5 * Distributed under the terms of the MIT License.
6 */
7
8/*
9	VM helper functions.
10
11	Important assumption: get_memory_map must combine adjacent
12	physical pages, so contignous memory always leads to a S/G
13	list of length one.
14*/
15
16#include "KernelExport_ext.h"
17#include "wrapper.h"
18
19#include <string.h>
20
21#include <algorithm>
22
23
24/**	get sg list of iovec
25 *	TBD: this should be moved to somewhere in kernel
26 */
27
28status_t
29get_iovec_memory_map(iovec *vec, size_t vec_count, size_t vec_offset, size_t len,
30	physical_entry *map, uint32 max_entries, uint32 *num_entries, size_t *mapped_len)
31{
32	uint32 cur_idx;
33	size_t left_len;
34
35	SHOW_FLOW(3, "vec_count=%" B_PRIuSIZE ", vec_offset=%" B_PRIuSIZE ", len=%"
36		B_PRIuSIZE ", max_entries=%" B_PRIu32, vec_count, vec_offset, len,
37		max_entries);
38
39	// skip iovec blocks if needed
40	while (vec_count > 0 && vec_offset > vec->iov_len) {
41		vec_offset -= vec->iov_len;
42		--vec_count;
43		++vec;
44	}
45
46	for (left_len = len, cur_idx = 0; left_len > 0 && vec_count > 0
47		&& cur_idx < max_entries;) {
48		char *range_start;
49		size_t range_len;
50		status_t res;
51		uint32 cur_num_entries, cur_mapped_len;
52		uint32 tmp_idx;
53
54		SHOW_FLOW( 3, "left_len=%d, vec_count=%d, cur_idx=%" B_PRIu32,
55			(int)left_len, (int)vec_count, cur_idx );
56
57		// map one iovec
58		range_start = (char *)vec->iov_base + vec_offset;
59		range_len = std::min(vec->iov_len - vec_offset, left_len);
60
61		SHOW_FLOW( 3, "range_start=%" B_PRIxADDR ", range_len=%" B_PRIxSIZE,
62			(addr_t)range_start, range_len );
63
64		vec_offset = 0;
65
66		cur_num_entries = max_entries - cur_idx;
67		if ((res = get_memory_map_etc(B_CURRENT_TEAM, range_start, range_len,
68				&map[cur_idx], &cur_num_entries)) != B_OK) {
69			// according to docu, no error is ever reported - argh!
70			SHOW_ERROR(1, "invalid io_vec passed (%s)", strerror(res));
71			return res;
72		}
73
74		cur_mapped_len = 0;
75
76		for (tmp_idx = cur_idx; tmp_idx < cur_idx + cur_num_entries; ++tmp_idx)
77			cur_mapped_len += map[tmp_idx].size;
78
79		if (cur_mapped_len == 0) {
80			panic("get_memory_map() returned empty list; left_len=%d, idx=%"
81				B_PRIu32 "/%" B_PRIu32, (int)left_len, cur_idx, max_entries);
82			SHOW_ERROR(2, "get_memory_map() returned empty list; left_len=%d, "
83				"idx=%" B_PRIu32 "/%" B_PRIu32, (int)left_len, cur_idx,
84				max_entries);
85			return B_ERROR;
86		}
87
88		SHOW_FLOW( 3, "cur_num_entries=%" B_PRIu32 ", cur_mapped_len=%x",
89			cur_num_entries, (int)cur_mapped_len );
90
91		// try to combine with previous sg block
92		if (cur_num_entries > 0 && cur_idx > 0
93			&& map[cur_idx].address
94				== map[cur_idx - 1].address + map[cur_idx - 1].size) {
95			SHOW_FLOW0( 3, "combine with previous chunk" );
96			map[cur_idx - 1].size += map[cur_idx].size;
97			memcpy(&map[cur_idx], &map[cur_idx + 1],
98				(cur_num_entries - 1) * sizeof(map[0]));
99			--cur_num_entries;
100		}
101
102		cur_idx += cur_num_entries;
103		left_len -= cur_mapped_len;
104
105		// advance iovec if current one is described completely
106		if (cur_mapped_len == range_len) {
107			++vec;
108			--vec_count;
109		}
110	}
111
112	*num_entries = cur_idx;
113	*mapped_len = len - left_len;
114
115	SHOW_FLOW( 3, "num_entries=%" B_PRIu32 ", mapped_len=%" B_PRIxSIZE,
116		*num_entries, *mapped_len);
117
118	return B_OK;
119}
120
121