sender.c revision 1.17
1/*	$Id: sender.c,v 1.17 2019/02/21 22:08:53 benno Exp $ */
2/*
3 * Copyright (c) 2019 Kristaps Dzonsons <kristaps@bsd.lv>
4 *
5 * Permission to use, copy, modify, and distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17#include <sys/mman.h>
18#include <sys/queue.h>
19#include <sys/stat.h>
20
21#include <assert.h>
22#include <fcntl.h>
23#include <inttypes.h>
24#include <poll.h>
25#include <stdlib.h>
26#include <string.h>
27#include <unistd.h>
28
29#include <openssl/md4.h>
30
31#include "extern.h"
32
33/*
34 * A request from the receiver to download updated file data.
35 */
36struct	send_dl {
37	int32_t		     idx; /* index in our file list */
38	struct blkset	    *blks; /* the sender's block information */
39	TAILQ_ENTRY(send_dl) entries;
40};
41
42/*
43 * The current file being "updated": sent from sender to receiver.
44 * If there is no file being uploaded, "cur" is NULL.
45 */
46struct	send_up {
47	struct send_dl	*cur; /* file being updated or NULL */
48	struct blkstat	 stat; /* status of file being updated */
49};
50
51TAILQ_HEAD(send_dlq, send_dl);
52
53/*
54 * We have finished updating the receiver's file with sender data.
55 * Deallocate and wipe clean all resources required for that.
56 */
57static void
58send_up_reset(struct send_up *p)
59{
60
61	assert(p != NULL);
62
63	/* Free the download request, if applicable. */
64
65	if (p->cur != NULL) {
66		free(p->cur->blks);
67		free(p->cur);
68		p->cur = NULL;
69	}
70
71	/* If we mapped a file for scanning, unmap it and close. */
72
73	if (p->stat.map != MAP_FAILED)
74		munmap(p->stat.map, p->stat.mapsz);
75
76	p->stat.map = MAP_FAILED;
77	p->stat.mapsz = 0;
78
79	if (p->stat.fd != -1)
80		close(p->stat.fd);
81
82	p->stat.fd = -1;
83
84	/* Now clear the in-transfer information. */
85
86	p->stat.offs = 0;
87	p->stat.hint = 0;
88	p->stat.curst = BLKSTAT_NONE;
89}
90
91/*
92 * This is the bulk of the sender work.
93 * Here we tend to an output buffer that responds to receiver requests
94 * for data.
95 * This does not act upon the output descriptor itself so as to avoid
96 * blocking, which otherwise would deadlock the protocol.
97 * Returns zero on failure, non-zero on success.
98 */
99static int
100send_up_fsm(struct sess *sess, size_t *phase,
101	struct send_up *up, void **wb, size_t *wbsz, size_t *wbmax,
102	const struct flist *fl)
103{
104	size_t		 pos = 0, isz = sizeof(int32_t),
105			 dsz = MD4_DIGEST_LENGTH;
106	unsigned char	 fmd[MD4_DIGEST_LENGTH];
107	off_t		 sz;
108	char		 buf[20];
109
110	switch (up->stat.curst) {
111	case BLKSTAT_DATA:
112		/*
113		 * A data segment to be written: buffer both the length
114		 * and the data.
115		 * If we've finished the transfer, move on to the token;
116		 * otherwise, keep sending data.
117		 */
118
119		sz = MINIMUM(MAX_CHUNK,
120			up->stat.curlen - up->stat.curpos);
121		if (!io_lowbuffer_alloc(sess, wb, wbsz, wbmax, isz)) {
122			ERRX1(sess, "io_lowbuffer_alloc");
123			return 0;
124		}
125		io_lowbuffer_int(sess, *wb, &pos, *wbsz, sz);
126		if (!io_lowbuffer_alloc(sess, wb, wbsz, wbmax, sz)) {
127			ERRX1(sess, "io_lowbuffer_alloc");
128			return 0;
129		}
130		io_lowbuffer_buf(sess, *wb, &pos, *wbsz,
131			up->stat.map + up->stat.curpos, sz);
132
133		up->stat.curpos += sz;
134		if (up->stat.curpos == up->stat.curlen)
135			up->stat.curst = BLKSTAT_TOK;
136		return 1;
137	case BLKSTAT_TOK:
138		/*
139		 * The data token following (maybe) a data segment.
140		 * These can also come standalone if, say, the file's
141		 * being fully written.
142		 * It's followed by a hash or another data segment,
143		 * depending on the token.
144		 */
145
146		if (!io_lowbuffer_alloc(sess, wb, wbsz, wbmax, isz)) {
147			ERRX1(sess, "io_lowbuffer_alloc");
148			return 0;
149		}
150		io_lowbuffer_int(sess, *wb,
151			&pos, *wbsz, up->stat.curtok);
152		up->stat.curst = up->stat.curtok ?
153			BLKSTAT_NEXT : BLKSTAT_HASH;
154		return 1;
155	case BLKSTAT_HASH:
156		/*
157		 * The hash following transmission of all file contents.
158		 * This is always followed by the state that we're
159		 * finished with the file.
160		 */
161
162		hash_file(up->stat.map, up->stat.mapsz, fmd, sess);
163		if (!io_lowbuffer_alloc(sess, wb, wbsz, wbmax, dsz)) {
164			ERRX1(sess, "io_lowbuffer_alloc");
165			return 0;
166		}
167		io_lowbuffer_buf(sess, *wb, &pos, *wbsz, fmd, dsz);
168		up->stat.curst = BLKSTAT_DONE;
169		return 1;
170	case BLKSTAT_DONE:
171		/*
172		 * The data has been written.
173		 * Clear our current send file and allow the block below
174		 * to find another.
175		 */
176
177		if (!sess->opts->dry_run)
178			LOG3(sess, "%s: flushed %jd KB total, %.2f%% "
179				"uploaded", fl[up->cur->idx].path,
180				(intmax_t)up->stat.total / 1024,
181				100.0 * up->stat.dirty / up->stat.total);
182		send_up_reset(up);
183		return 1;
184	case BLKSTAT_PHASE:
185		/*
186		 * This is where we actually stop the algorithm: we're
187		 * already at the second phase.
188		 */
189
190		send_up_reset(up);
191		(*phase)++;
192		return 1;
193	case BLKSTAT_NEXT:
194		/*
195		 * Our last case: we need to find the
196		 * next block (and token) to transmit to
197		 * the receiver.
198		 * These will drive the finite state
199		 * machine in the first few conditional
200		 * blocks of this set.
201		 */
202
203		assert(up->stat.fd != -1);
204		blk_match(sess, up->cur->blks,
205			fl[up->cur->idx].path, &up->stat);
206		return 1;
207	case BLKSTAT_NONE:
208		break;
209	}
210
211	assert(BLKSTAT_NONE == up->stat.curst);
212
213	/*
214	 * We've either hit the phase change following the last file (or
215	 * start, or prior phase change), or we need to prime the next
216	 * file for transmission.
217	 * We special-case dry-run mode.
218	 */
219
220	if (up->cur->idx < 0) {
221		if (!io_lowbuffer_alloc(sess, wb, wbsz, wbmax, isz)) {
222			ERRX1(sess, "io_lowbuffer_alloc");
223			return 0;
224		}
225		io_lowbuffer_int(sess, *wb, &pos, *wbsz, -1);
226
227		if (sess->opts->server && sess->rver > 27) {
228			if (!io_lowbuffer_alloc(sess,
229			    wb, wbsz, wbmax, isz)) {
230				ERRX1(sess, "io_lowbuffer_alloc");
231				return 0;
232			}
233			io_lowbuffer_int(sess, *wb, &pos, *wbsz, -1);
234		}
235		up->stat.curst = BLKSTAT_PHASE;
236	} else if (sess->opts->dry_run) {
237		if (!sess->opts->server)
238			LOG1(sess, "%s", fl[up->cur->idx].wpath);
239
240		if (!io_lowbuffer_alloc(sess, wb, wbsz, wbmax, isz)) {
241			ERRX1(sess, "io_lowbuffer_alloc");
242			return 0;
243		}
244		io_lowbuffer_int(sess, *wb, &pos, *wbsz, up->cur->idx);
245		up->stat.curst = BLKSTAT_DONE;
246	} else {
247		assert(up->stat.fd != -1);
248
249		/*
250		 * FIXME: use the nice output of log_file() and so on in
251		 * downloader.c, which means moving this into
252		 * BLKSTAT_DONE instead of having it be here.
253		 */
254
255		if (!sess->opts->server)
256			LOG1(sess, "%s", fl[up->cur->idx].wpath);
257
258		if (!io_lowbuffer_alloc(sess, wb, wbsz, wbmax, 20)) {
259			ERRX1(sess, "io_lowbuffer_alloc");
260			return 0;
261		}
262		assert(sizeof(buf) == 20);
263		blk_recv_ack(sess, buf, up->cur->blks, up->cur->idx);
264		io_lowbuffer_buf(sess, *wb, &pos, *wbsz, buf, 20);
265
266		LOG3(sess, "%s: primed for %jd B total",
267			fl[up->cur->idx].path,
268			(intmax_t)up->cur->blks->size);
269		up->stat.curst = BLKSTAT_NEXT;
270	}
271
272	return 1;
273}
274
275/*
276 * Enqueue a download request, getting it off the read channel as
277 * quickly a possible.
278 * This frees up the read channel for further incoming requests.
279 * We'll handle each element in turn, up to and including the last
280 * request (phase change), which is always a -1 idx.
281 * Returns zero on failure, non-zero on success.
282 */
283static int
284send_dl_enqueue(struct sess *sess, struct send_dlq *q,
285	int32_t idx, const struct flist *fl, size_t flsz, int fd)
286{
287	struct send_dl	*s;
288
289	/* End-of-phase marker. */
290
291	if (idx == -1) {
292		if ((s = calloc(1, sizeof(struct send_dl))) == NULL) {
293			ERR(sess, "calloc");
294			return 0;
295		}
296		s->idx = -1;
297		s->blks = NULL;
298		TAILQ_INSERT_TAIL(q, s, entries);
299		return 1;
300	}
301
302	/* Validate the index. */
303
304	if (idx < 0 || (uint32_t)idx >= flsz) {
305		ERRX(sess, "file index out of bounds: invalid %"
306			PRId32 " out of %zu", idx, flsz);
307		return 0;
308	} else if (S_ISDIR(fl[idx].st.mode)) {
309		ERRX(sess, "blocks requested for "
310			"directory: %s", fl[idx].path);
311		return 0;
312	} else if (S_ISLNK(fl[idx].st.mode)) {
313		ERRX(sess, "blocks requested for "
314			"symlink: %s", fl[idx].path);
315		return 0;
316	} else if (!S_ISREG(fl[idx].st.mode)) {
317		ERRX(sess, "blocks requested for "
318			"special: %s", fl[idx].path);
319		return 0;
320	}
321
322	if ((s = calloc(1, sizeof(struct send_dl))) == NULL) {
323		ERR(sess, "callloc");
324		return 0;
325	}
326	s->idx = idx;
327	s->blks = NULL;
328	TAILQ_INSERT_TAIL(q, s, entries);
329
330	/*
331	 * This blocks til the full blockset has been read.
332	 * That's ok, because the most important thing is getting data
333	 * off the wire.
334	 */
335
336	if (!sess->opts->dry_run) {
337		s->blks = blk_recv(sess, fd, fl[idx].path);
338		if (s->blks == NULL) {
339			ERRX1(sess, "blk_recv");
340			return 0;
341		}
342	}
343	return 1;
344}
345
346/*
347 * A client sender manages the read-only source files and sends data to
348 * the receiver as requested.
349 * First it sends its list of files, then it waits for the server to
350 * request updates to individual files.
351 * It queues requests for updates as soon as it receives them.
352 * Returns zero on failure, non-zero on success.
353 *
354 * Pledges: stdio, rpath, unveil.
355 */
356int
357rsync_sender(struct sess *sess, int fdin,
358	int fdout, size_t argc, char **argv)
359{
360	struct flist	   *fl = NULL;
361	const struct flist *f;
362	size_t		    i, flsz = 0, phase = 0, excl;
363	int		    rc = 0, c;
364	int32_t		    idx;
365	struct pollfd	    pfd[3];
366	struct send_dlq	    sdlq;
367	struct send_dl	   *dl;
368	struct send_up	    up;
369	struct stat	    st;
370	void		   *wbuf = NULL;
371	size_t		    wbufpos = 0, wbufsz = 0, wbufmax = 0;
372	ssize_t		    ssz;
373
374	if (pledge("stdio getpw rpath unveil", NULL) == -1) {
375		ERR(sess, "pledge");
376		return 0;
377	}
378
379	memset(&up, 0, sizeof(struct send_up));
380	TAILQ_INIT(&sdlq);
381	up.stat.fd = -1;
382	up.stat.map = MAP_FAILED;
383
384	/*
385	 * Generate the list of files we want to send from our
386	 * command-line input.
387	 * This will also remove all invalid files.
388	 */
389
390	if (!flist_gen(sess, argc, argv, &fl, &flsz)) {
391		ERRX1(sess, "flist_gen");
392		goto out;
393	}
394
395	/* Client sends zero-length exclusions if deleting. */
396
397	if (!sess->opts->server && sess->opts->del &&
398	     !io_write_int(sess, fdout, 0)) {
399		ERRX1(sess, "io_write_int");
400		goto out;
401	}
402
403	/*
404	 * Then the file list in any mode.
405	 * Finally, the IO error (always zero for us).
406	 */
407
408	if (!flist_send(sess, fdin, fdout, fl, flsz)) {
409		ERRX1(sess, "flist_send");
410		goto out;
411	} else if (!io_write_int(sess, fdout, 0)) {
412		ERRX1(sess, "io_write_int");
413		goto out;
414	}
415
416	/* Exit if we're the server with zero files. */
417
418	if (flsz == 0 && sess->opts->server) {
419		WARNX(sess, "sender has empty file list: exiting");
420		rc = 1;
421		goto out;
422	} else if (!sess->opts->server)
423		LOG1(sess, "Transfer starting: %zu files", flsz);
424
425	/*
426	 * If we're the server, read our exclusion list.
427	 * This is always 0 for now.
428	 */
429
430	if (sess->opts->server) {
431		if (!io_read_size(sess, fdin, &excl)) {
432			ERRX1(sess, "io_read_size");
433			goto out;
434		} else if (excl != 0) {
435			ERRX1(sess, "exclusion list is non-empty");
436			goto out;
437		}
438	}
439
440	/*
441	 * Set up our poll events.
442	 * We start by polling only in receiver requests, enabling other
443	 * poll events on demand.
444	 */
445
446	pfd[0].fd = fdin; /* from receiver */
447	pfd[0].events = POLLIN;
448	pfd[1].fd = -1; /* to receiver */
449	pfd[1].events = POLLOUT;
450	pfd[2].fd = -1; /* from local file */
451	pfd[2].events = POLLIN;
452
453	for (;;) {
454		assert(pfd[0].fd != -1);
455		if ((c = poll(pfd, 3, POLL_TIMEOUT)) == -1) {
456			ERR(sess, "poll");
457			goto out;
458		} else if (c == 0) {
459			ERRX(sess, "poll: timeout");
460			goto out;
461		}
462		for (i = 0; i < 3; i++)
463			if (pfd[i].revents & (POLLERR|POLLNVAL)) {
464				ERRX(sess, "poll: bad fd");
465				goto out;
466			} else if (pfd[i].revents & POLLHUP) {
467				ERRX(sess, "poll: hangup");
468				goto out;
469			}
470
471		/*
472		 * If we have a request coming down off the wire, pull
473		 * it in as quickly as possible into our buffer.
474		 * This unclogs the socket buffers so the data can flow.
475		 * FIXME: if we're multiplexing, we might stall here if
476		 * there's only a log message and no actual data.
477		 * This can be fixed by doing a conditional test.
478		 */
479
480		if (pfd[0].revents & POLLIN)
481			for (;;) {
482				if (!io_read_int(sess, fdin, &idx)) {
483					ERRX1(sess, "io_read_int");
484					goto out;
485				}
486				if (!send_dl_enqueue(sess,
487				    &sdlq, idx, fl, flsz, fdin)) {
488					ERRX1(sess, "send_dl_enqueue");
489					goto out;
490				}
491				c = io_read_check(sess, fdin);
492				if (c < 0) {
493					ERRX1(sess, "io_read_check");
494					goto out;
495				} else if (c == 0)
496					break;
497			}
498
499		/*
500		 * One of our local files has been opened in response
501		 * to a receiver request and now we can map it.
502		 * We'll respond to the event by looking at the map when
503		 * the writer is available.
504		 * Here we also enable the poll event for output.
505		 */
506
507		if (pfd[2].revents & POLLIN) {
508			assert(up.cur != NULL);
509			assert(up.stat.fd != -1);
510			assert(up.stat.map == MAP_FAILED);
511			assert(up.stat.mapsz == 0);
512			f = &fl[up.cur->idx];
513
514			if (fstat(up.stat.fd, &st) == -1) {
515				ERR(sess, "%s: fstat", f->path);
516				goto out;
517			}
518
519			/*
520			 * If the file is zero-length, the map will
521			 * fail, but either way we want to unset that
522			 * we're waiting for the file to open and set
523			 * that we're ready for the output channel.
524			 */
525
526			if ((up.stat.mapsz = st.st_size) > 0) {
527				up.stat.map = mmap(NULL,
528					up.stat.mapsz, PROT_READ,
529					MAP_SHARED, up.stat.fd, 0);
530				if (up.stat.map == MAP_FAILED) {
531					ERR(sess, "%s: mmap", f->path);
532					goto out;
533				}
534			}
535
536			pfd[2].fd = -1;
537			pfd[1].fd = fdout;
538		}
539
540		/*
541		 * If we have buffers waiting to write, write them out
542		 * as soon as we can in a non-blocking fashion.
543		 * We must not be waiting for any local files.
544		 * ALL WRITES MUST HAPPEN HERE.
545		 * This keeps the sender deadlock-free.
546		 */
547
548		if ((pfd[1].revents & POLLOUT) && wbufsz > 0) {
549			assert(pfd[2].fd == -1);
550			assert(wbufsz - wbufpos);
551			ssz = write(fdout,
552				wbuf + wbufpos, wbufsz - wbufpos);
553			if (ssz < 0) {
554				ERR(sess, "write");
555				goto out;
556			}
557			wbufpos += ssz;
558			if (wbufpos == wbufsz)
559				wbufpos = wbufsz = 0;
560			pfd[1].revents &= ~POLLOUT;
561
562			/* This is usually in io.c... */
563
564			sess->total_write += ssz;
565		}
566
567		/*
568		 * Engage the FSM for the current transfer.
569		 * If our phase changes, stop processing.
570		 */
571
572		if (pfd[1].revents & POLLOUT && up.cur != NULL) {
573			assert(pfd[2].fd == -1);
574			assert(wbufpos == 0 && wbufsz == 0);
575			if (!send_up_fsm(sess, &phase,
576			    &up, &wbuf, &wbufsz, &wbufmax, fl)) {
577				ERRX1(sess, "send_up_fsm");
578				goto out;
579			} else if (phase > 1)
580				break;
581		}
582
583		/*
584		 * Incoming queue management.
585		 * If we have no queue component that we're waiting on,
586		 * then pull off the receiver-request queue and start
587		 * processing the request.
588		 */
589
590		if (up.cur == NULL) {
591			assert(pfd[2].fd == -1);
592			assert(up.stat.fd == -1);
593			assert(up.stat.map == MAP_FAILED);
594			assert(up.stat.mapsz == 0);
595			assert(wbufsz == 0 && wbufpos == 0);
596			pfd[1].fd = -1;
597
598			/*
599			 * If there's nothing in the queue, then keep
600			 * the output channel disabled and wait for
601			 * whatever comes next from the reader.
602			 */
603
604			if ((up.cur = TAILQ_FIRST(&sdlq)) == NULL)
605				continue;
606
607			TAILQ_REMOVE(&sdlq, up.cur, entries);
608
609			/*
610			 * End of phase: enable channel to receiver.
611			 * We'll need our output buffer enabled in order
612			 * to process this event.
613			 */
614
615			if (up.cur->idx == -1) {
616				pfd[1].fd = fdout;
617				continue;
618			}
619
620			/*
621			 * Non-blocking open of file.
622			 * This will be picked up in the state machine
623			 * block of not being primed.
624			 */
625
626			up.stat.fd = open(fl[up.cur->idx].path,
627				O_RDONLY|O_NONBLOCK, 0);
628			if (up.stat.fd == -1) {
629				ERR(sess, "%s: open", fl[up.cur->idx].path);
630				goto out;
631			}
632			pfd[2].fd = up.stat.fd;
633		}
634	}
635
636	if (!TAILQ_EMPTY(&sdlq)) {
637		ERRX(sess, "phases complete with files still queued");
638		goto out;
639	}
640
641	if (!sess_stats_send(sess, fdout)) {
642		ERRX1(sess, "sess_stats_end");
643		goto out;
644	}
645
646	/* Final "goodbye" message. */
647
648	if (!io_read_int(sess, fdin, &idx)) {
649		ERRX1(sess, "io_read_int");
650		goto out;
651	} else if (idx != -1) {
652		ERRX(sess, "read incorrect update complete ack");
653		goto out;
654	}
655
656	LOG2(sess, "sender finished updating");
657	rc = 1;
658out:
659	send_up_reset(&up);
660	while ((dl = TAILQ_FIRST(&sdlq)) != NULL) {
661		TAILQ_REMOVE(&sdlq, dl, entries);
662		free(dl->blks);
663		free(dl);
664	}
665	flist_free(fl, flsz);
666	free(wbuf);
667	return rc;
668}
669