1/* Handling of recursive HTTP retrieving. 2 Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 3 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 Free Software Foundation, 4 Inc. 5 6This file is part of GNU Wget. 7 8GNU Wget is free software; you can redistribute it and/or modify 9it under the terms of the GNU General Public License as published by 10the Free Software Foundation; either version 3 of the License, or 11 (at your option) any later version. 12 13GNU Wget is distributed in the hope that it will be useful, 14but WITHOUT ANY WARRANTY; without even the implied warranty of 15MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16GNU General Public License for more details. 17 18You should have received a copy of the GNU General Public License 19along with Wget. If not, see <http://www.gnu.org/licenses/>. 20 21Additional permission under GNU GPL version 3 section 7 22 23If you modify this program, or any covered work, by linking or 24combining it with the OpenSSL project's OpenSSL library (or a 25modified version of that library), containing parts covered by the 26terms of the OpenSSL or SSLeay licenses, the Free Software Foundation 27grants you additional permission to convey the resulting work. 28Corresponding Source for a non-source form of such a combination 29shall include the source code for the parts of OpenSSL used as well 30as that of the covered work. */ 31 32#include "wget.h" 33 34#include <stdio.h> 35#include <stdlib.h> 36#include <string.h> 37#include <unistd.h> 38#include <errno.h> 39#include <assert.h> 40 41#include "url.h" 42#include "recur.h" 43#include "utils.h" 44#include "retr.h" 45#include "ftp.h" 46#include "host.h" 47#include "hash.h" 48#include "res.h" 49#include "convert.h" 50#include "html-url.h" 51#include "css-url.h" 52#include "spider.h" 53 54/* Functions for maintaining the URL queue. */ 55 56struct queue_element { 57 const char *url; /* the URL to download */ 58 const char *referer; /* the referring document */ 59 int depth; /* the depth */ 60 bool html_allowed; /* whether the document is allowed to 61 be treated as HTML. */ 62 struct iri *iri; /* sXXXav */ 63 bool css_allowed; /* whether the document is allowed to 64 be treated as CSS. */ 65 struct queue_element *next; /* next element in queue */ 66}; 67 68struct url_queue { 69 struct queue_element *head; 70 struct queue_element *tail; 71 int count, maxcount; 72}; 73 74/* Create a URL queue. */ 75 76static struct url_queue * 77url_queue_new (void) 78{ 79 struct url_queue *queue = xnew0 (struct url_queue); 80 return queue; 81} 82 83/* Delete a URL queue. */ 84 85static void 86url_queue_delete (struct url_queue *queue) 87{ 88 xfree (queue); 89} 90 91/* Enqueue a URL in the queue. The queue is FIFO: the items will be 92 retrieved ("dequeued") from the queue in the order they were placed 93 into it. */ 94 95static void 96url_enqueue (struct url_queue *queue, struct iri *i, 97 const char *url, const char *referer, int depth, 98 bool html_allowed, bool css_allowed) 99{ 100 struct queue_element *qel = xnew (struct queue_element); 101 qel->iri = i; 102 qel->url = url; 103 qel->referer = referer; 104 qel->depth = depth; 105 qel->html_allowed = html_allowed; 106 qel->css_allowed = css_allowed; 107 qel->next = NULL; 108 109 ++queue->count; 110 if (queue->count > queue->maxcount) 111 queue->maxcount = queue->count; 112 113 DEBUGP (("Enqueuing %s at depth %d\n", 114 quotearg_n_style (0, escape_quoting_style, url), depth)); 115 DEBUGP (("Queue count %d, maxcount %d.\n", queue->count, queue->maxcount)); 116 117 if (i) 118 DEBUGP (("[IRI Enqueuing %s with %s\n", quote_n (0, url), 119 i->uri_encoding ? quote_n (1, i->uri_encoding) : "None")); 120 121 if (queue->tail) 122 queue->tail->next = qel; 123 queue->tail = qel; 124 125 if (!queue->head) 126 queue->head = queue->tail; 127} 128 129/* Take a URL out of the queue. Return true if this operation 130 succeeded, or false if the queue is empty. */ 131 132static bool 133url_dequeue (struct url_queue *queue, struct iri **i, 134 const char **url, const char **referer, int *depth, 135 bool *html_allowed, bool *css_allowed) 136{ 137 struct queue_element *qel = queue->head; 138 139 if (!qel) 140 return false; 141 142 queue->head = queue->head->next; 143 if (!queue->head) 144 queue->tail = NULL; 145 146 *i = qel->iri; 147 *url = qel->url; 148 *referer = qel->referer; 149 *depth = qel->depth; 150 *html_allowed = qel->html_allowed; 151 *css_allowed = qel->css_allowed; 152 153 --queue->count; 154 155 DEBUGP (("Dequeuing %s at depth %d\n", 156 quotearg_n_style (0, escape_quoting_style, qel->url), qel->depth)); 157 DEBUGP (("Queue count %d, maxcount %d.\n", queue->count, queue->maxcount)); 158 159 xfree (qel); 160 return true; 161} 162 163static bool download_child_p (const struct urlpos *, struct url *, int, 164 struct url *, struct hash_table *, struct iri *); 165static bool descend_redirect_p (const char *, struct url *, int, 166 struct url *, struct hash_table *, struct iri *); 167 168 169/* Retrieve a part of the web beginning with START_URL. This used to 170 be called "recursive retrieval", because the old function was 171 recursive and implemented depth-first search. retrieve_tree on the 172 other hand implements breadth-search traversal of the tree, which 173 results in much nicer ordering of downloads. 174 175 The algorithm this function uses is simple: 176 177 1. put START_URL in the queue. 178 2. while there are URLs in the queue: 179 180 3. get next URL from the queue. 181 4. download it. 182 5. if the URL is HTML and its depth does not exceed maximum depth, 183 get the list of URLs embedded therein. 184 6. for each of those URLs do the following: 185 186 7. if the URL is not one of those downloaded before, and if it 187 satisfies the criteria specified by the various command-line 188 options, add it to the queue. */ 189 190uerr_t 191retrieve_tree (struct url *start_url_parsed, struct iri *pi) 192{ 193 uerr_t status = RETROK; 194 195 /* The queue of URLs we need to load. */ 196 struct url_queue *queue; 197 198 /* The URLs we do not wish to enqueue, because they are already in 199 the queue, but haven't been downloaded yet. */ 200 struct hash_table *blacklist; 201 202 struct iri *i = iri_new (); 203 204#define COPYSTR(x) (x) ? xstrdup(x) : NULL; 205 /* Duplicate pi struct if not NULL */ 206 if (pi) 207 { 208 i->uri_encoding = COPYSTR (pi->uri_encoding); 209 i->content_encoding = COPYSTR (pi->content_encoding); 210 i->utf8_encode = pi->utf8_encode; 211 } 212 else 213 set_uri_encoding (i, opt.locale, true); 214#undef COPYSTR 215 216 queue = url_queue_new (); 217 blacklist = make_string_hash_table (0); 218 219 /* Enqueue the starting URL. Use start_url_parsed->url rather than 220 just URL so we enqueue the canonical form of the URL. */ 221 url_enqueue (queue, i, xstrdup (start_url_parsed->url), NULL, 0, true, 222 false); 223 string_set_add (blacklist, start_url_parsed->url); 224 225 while (1) 226 { 227 bool descend = false; 228 char *url, *referer, *file = NULL; 229 int depth; 230 bool html_allowed, css_allowed; 231 bool is_css = false; 232 bool dash_p_leaf_HTML = false; 233 234 if (opt.quota && total_downloaded_bytes > opt.quota) 235 break; 236 if (status == FWRITEERR) 237 break; 238 239 /* Get the next URL from the queue... */ 240 241 if (!url_dequeue (queue, (struct iri **) &i, 242 (const char **)&url, (const char **)&referer, 243 &depth, &html_allowed, &css_allowed)) 244 break; 245 246 /* ...and download it. Note that this download is in most cases 247 unconditional, as download_child_p already makes sure a file 248 doesn't get enqueued twice -- and yet this check is here, and 249 not in download_child_p. This is so that if you run `wget -r 250 URL1 URL2', and a random URL is encountered once under URL1 251 and again under URL2, but at a different (possibly smaller) 252 depth, we want the URL's children to be taken into account 253 the second time. */ 254 if (dl_url_file_map && hash_table_contains (dl_url_file_map, url)) 255 { 256 bool is_css_bool; 257 258 file = xstrdup (hash_table_get (dl_url_file_map, url)); 259 260 DEBUGP (("Already downloaded \"%s\", reusing it from \"%s\".\n", 261 url, file)); 262 263 if ((is_css_bool = (css_allowed 264 && downloaded_css_set 265 && string_set_contains (downloaded_css_set, file))) 266 || (html_allowed 267 && downloaded_html_set 268 && string_set_contains (downloaded_html_set, file))) 269 { 270 descend = true; 271 is_css = is_css_bool; 272 } 273 } 274 else 275 { 276 int dt = 0, url_err; 277 char *redirected = NULL; 278 struct url *url_parsed = url_parse (url, &url_err, i, true); 279 280 status = retrieve_url (url_parsed, url, &file, &redirected, referer, 281 &dt, false, i, true); 282 283 if (html_allowed && file && status == RETROK 284 && (dt & RETROKF) && (dt & TEXTHTML)) 285 { 286 descend = true; 287 is_css = false; 288 } 289 290 /* a little different, css_allowed can override content type 291 lots of web servers serve css with an incorrect content type 292 */ 293 if (file && status == RETROK 294 && (dt & RETROKF) && 295 ((dt & TEXTCSS) || css_allowed)) 296 { 297 descend = true; 298 is_css = true; 299 } 300 301 if (redirected) 302 { 303 /* We have been redirected, possibly to another host, or 304 different path, or wherever. Check whether we really 305 want to follow it. */ 306 if (descend) 307 { 308 if (!descend_redirect_p (redirected, url_parsed, depth, 309 start_url_parsed, blacklist, i)) 310 descend = false; 311 else 312 /* Make sure that the old pre-redirect form gets 313 blacklisted. */ 314 string_set_add (blacklist, url); 315 } 316 317 xfree (url); 318 url = redirected; 319 } 320 else 321 { 322 xfree (url); 323 url = xstrdup (url_parsed->url); 324 } 325 url_free(url_parsed); 326 } 327 328 if (opt.spider) 329 { 330 visited_url (url, referer); 331 } 332 333 if (descend 334 && depth >= opt.reclevel && opt.reclevel != INFINITE_RECURSION) 335 { 336 if (opt.page_requisites 337 && (depth == opt.reclevel || depth == opt.reclevel + 1)) 338 { 339 /* When -p is specified, we are allowed to exceed the 340 maximum depth, but only for the "inline" links, 341 i.e. those that are needed to display the page. 342 Originally this could exceed the depth at most by 343 one, but we allow one more level so that the leaf 344 pages that contain frames can be loaded 345 correctly. */ 346 dash_p_leaf_HTML = true; 347 } 348 else 349 { 350 /* Either -p wasn't specified or it was and we've 351 already spent the two extra (pseudo-)levels that it 352 affords us, so we need to bail out. */ 353 DEBUGP (("Not descending further; at depth %d, max. %d.\n", 354 depth, opt.reclevel)); 355 descend = false; 356 } 357 } 358 359 /* If the downloaded document was HTML or CSS, parse it and enqueue the 360 links it contains. */ 361 362 if (descend) 363 { 364 bool meta_disallow_follow = false; 365 struct urlpos *children 366 = is_css ? get_urls_css_file (file, url) : 367 get_urls_html (file, url, &meta_disallow_follow, i); 368 369 if (opt.use_robots && meta_disallow_follow) 370 { 371 free_urlpos (children); 372 children = NULL; 373 } 374 375 if (children) 376 { 377 struct urlpos *child = children; 378 struct url *url_parsed = url_parse (url, NULL, i, true); 379 struct iri *ci; 380 char *referer_url = url; 381 bool strip_auth = (url_parsed != NULL 382 && url_parsed->user != NULL); 383 assert (url_parsed != NULL); 384 385 /* Strip auth info if present */ 386 if (strip_auth) 387 referer_url = url_string (url_parsed, URL_AUTH_HIDE); 388 389 for (; child; child = child->next) 390 { 391 if (child->ignore_when_downloading) 392 continue; 393 if (dash_p_leaf_HTML && !child->link_inline_p) 394 continue; 395 if (download_child_p (child, url_parsed, depth, start_url_parsed, 396 blacklist, i)) 397 { 398 ci = iri_new (); 399 set_uri_encoding (ci, i->content_encoding, false); 400 url_enqueue (queue, ci, xstrdup (child->url->url), 401 xstrdup (referer_url), depth + 1, 402 child->link_expect_html, 403 child->link_expect_css); 404 /* We blacklist the URL we have enqueued, because we 405 don't want to enqueue (and hence download) the 406 same URL twice. */ 407 string_set_add (blacklist, child->url->url); 408 } 409 } 410 411 if (strip_auth) 412 xfree (referer_url); 413 url_free (url_parsed); 414 free_urlpos (children); 415 } 416 } 417 418 if (file 419 && (opt.delete_after 420 || opt.spider /* opt.recursive is implicitely true */ 421 || !acceptable (file))) 422 { 423 /* Either --delete-after was specified, or we loaded this 424 (otherwise unneeded because of --spider or rejected by -R) 425 HTML file just to harvest its hyperlinks -- in either case, 426 delete the local file. */ 427 DEBUGP (("Removing file due to %s in recursive_retrieve():\n", 428 opt.delete_after ? "--delete-after" : 429 (opt.spider ? "--spider" : 430 "recursive rejection criteria"))); 431 logprintf (LOG_VERBOSE, 432 (opt.delete_after || opt.spider 433 ? _("Removing %s.\n") 434 : _("Removing %s since it should be rejected.\n")), 435 file); 436 if (unlink (file)) 437 logprintf (LOG_NOTQUIET, "unlink: %s\n", strerror (errno)); 438 logputs (LOG_VERBOSE, "\n"); 439 register_delete_file (file); 440 } 441 442 xfree (url); 443 xfree_null (referer); 444 xfree_null (file); 445 iri_free (i); 446 } 447 448 /* If anything is left of the queue due to a premature exit, free it 449 now. */ 450 { 451 char *d1, *d2; 452 int d3; 453 bool d4, d5; 454 struct iri *d6; 455 while (url_dequeue (queue, (struct iri **)&d6, 456 (const char **)&d1, (const char **)&d2, &d3, &d4, &d5)) 457 { 458 iri_free (d6); 459 xfree (d1); 460 xfree_null (d2); 461 } 462 } 463 url_queue_delete (queue); 464 465 string_set_free (blacklist); 466 467 if (opt.quota && total_downloaded_bytes > opt.quota) 468 return QUOTEXC; 469 else if (status == FWRITEERR) 470 return FWRITEERR; 471 else 472 return RETROK; 473} 474 475/* Based on the context provided by retrieve_tree, decide whether a 476 URL is to be descended to. This is only ever called from 477 retrieve_tree, but is in a separate function for clarity. 478 479 The most expensive checks (such as those for robots) are memoized 480 by storing these URLs to BLACKLIST. This may or may not help. It 481 will help if those URLs are encountered many times. */ 482 483static bool 484download_child_p (const struct urlpos *upos, struct url *parent, int depth, 485 struct url *start_url_parsed, struct hash_table *blacklist, 486 struct iri *iri) 487{ 488 struct url *u = upos->url; 489 const char *url = u->url; 490 bool u_scheme_like_http; 491 492 DEBUGP (("Deciding whether to enqueue \"%s\".\n", url)); 493 494 if (string_set_contains (blacklist, url)) 495 { 496 if (opt.spider) 497 { 498 char *referrer = url_string (parent, URL_AUTH_HIDE_PASSWD); 499 DEBUGP (("download_child_p: parent->url is: %s\n", quote (parent->url))); 500 visited_url (url, referrer); 501 xfree (referrer); 502 } 503 DEBUGP (("Already on the black list.\n")); 504 goto out; 505 } 506 507 /* Several things to check for: 508 1. if scheme is not https and https_only requested 509 2. if scheme is not http, and we don't load it 510 3. check for relative links (if relative_only is set) 511 4. check for domain 512 5. check for no-parent 513 6. check for excludes && includes 514 7. check for suffix 515 8. check for same host (if spanhost is unset), with possible 516 gethostbyname baggage 517 9. check for robots.txt 518 519 Addendum: If the URL is FTP, and it is to be loaded, only the 520 domain and suffix settings are "stronger". 521 522 Note that .html files will get loaded regardless of suffix rules 523 (but that is remedied later with unlink) unless the depth equals 524 the maximum depth. 525 526 More time- and memory- consuming tests should be put later on 527 the list. */ 528 529#ifdef HAVE_SSL 530 if (opt.https_only && u->scheme != SCHEME_HTTPS) 531 { 532 DEBUGP (("Not following non-HTTPS links.\n")); 533 goto out; 534 } 535#endif 536 537 /* Determine whether URL under consideration has a HTTP-like scheme. */ 538 u_scheme_like_http = schemes_are_similar_p (u->scheme, SCHEME_HTTP); 539 540 /* 1. Schemes other than HTTP are normally not recursed into. */ 541 if (!u_scheme_like_http && !(u->scheme == SCHEME_FTP && opt.follow_ftp)) 542 { 543 DEBUGP (("Not following non-HTTP schemes.\n")); 544 goto out; 545 } 546 547 /* 2. If it is an absolute link and they are not followed, throw it 548 out. */ 549 if (u_scheme_like_http) 550 if (opt.relative_only && !upos->link_relative_p) 551 { 552 DEBUGP (("It doesn't really look like a relative link.\n")); 553 goto out; 554 } 555 556 /* 3. If its domain is not to be accepted/looked-up, chuck it 557 out. */ 558 if (!accept_domain (u)) 559 { 560 DEBUGP (("The domain was not accepted.\n")); 561 goto out; 562 } 563 564 /* 4. Check for parent directory. 565 566 If we descended to a different host or changed the scheme, ignore 567 opt.no_parent. Also ignore it for documents needed to display 568 the parent page when in -p mode. */ 569 if (opt.no_parent 570 && schemes_are_similar_p (u->scheme, start_url_parsed->scheme) 571 && 0 == strcasecmp (u->host, start_url_parsed->host) 572 && (u->scheme != start_url_parsed->scheme 573 || u->port == start_url_parsed->port) 574 && !(opt.page_requisites && upos->link_inline_p)) 575 { 576 if (!subdir_p (start_url_parsed->dir, u->dir)) 577 { 578 DEBUGP (("Going to \"%s\" would escape \"%s\" with no_parent on.\n", 579 u->dir, start_url_parsed->dir)); 580 goto out; 581 } 582 } 583 584 /* 5. If the file does not match the acceptance list, or is on the 585 rejection list, chuck it out. The same goes for the directory 586 exclusion and inclusion lists. */ 587 if (opt.includes || opt.excludes) 588 { 589 if (!accdir (u->dir)) 590 { 591 DEBUGP (("%s (%s) is excluded/not-included.\n", url, u->dir)); 592 goto out; 593 } 594 } 595 if (!accept_url (url)) 596 { 597 DEBUGP (("%s is excluded/not-included through regex.\n", url)); 598 goto out; 599 } 600 601 /* 6. Check for acceptance/rejection rules. We ignore these rules 602 for directories (no file name to match) and for non-leaf HTMLs, 603 which can lead to other files that do need to be downloaded. (-p 604 automatically implies non-leaf because with -p we can, if 605 necesary, overstep the maximum depth to get the page requisites.) */ 606 if (u->file[0] != '\0' 607 && !(has_html_suffix_p (u->file) 608 /* The exception only applies to non-leaf HTMLs (but -p 609 always implies non-leaf because we can overstep the 610 maximum depth to get the requisites): */ 611 && (/* non-leaf */ 612 opt.reclevel == INFINITE_RECURSION 613 /* also non-leaf */ 614 || depth < opt.reclevel - 1 615 /* -p, which implies non-leaf (see above) */ 616 || opt.page_requisites))) 617 { 618 if (!acceptable (u->file)) 619 { 620 DEBUGP (("%s (%s) does not match acc/rej rules.\n", 621 url, u->file)); 622 goto out; 623 } 624 } 625 626 /* 7. */ 627 if (schemes_are_similar_p (u->scheme, parent->scheme)) 628 if (!opt.spanhost && 0 != strcasecmp (parent->host, u->host)) 629 { 630 DEBUGP (("This is not the same hostname as the parent's (%s and %s).\n", 631 u->host, parent->host)); 632 goto out; 633 } 634 635 /* 8. */ 636 if (opt.use_robots && u_scheme_like_http) 637 { 638 struct robot_specs *specs = res_get_specs (u->host, u->port); 639 if (!specs) 640 { 641 char *rfile; 642 if (res_retrieve_file (url, &rfile, iri)) 643 { 644 specs = res_parse_from_file (rfile); 645 646 /* Delete the robots.txt file if we chose to either delete the 647 files after downloading or we're just running a spider. */ 648 if (opt.delete_after || opt.spider) 649 { 650 logprintf (LOG_VERBOSE, _("Removing %s.\n"), rfile); 651 if (unlink (rfile)) 652 logprintf (LOG_NOTQUIET, "unlink: %s\n", 653 strerror (errno)); 654 } 655 656 xfree (rfile); 657 } 658 else 659 { 660 /* If we cannot get real specs, at least produce 661 dummy ones so that we can register them and stop 662 trying to retrieve them. */ 663 specs = res_parse ("", 0); 664 } 665 res_register_specs (u->host, u->port, specs); 666 } 667 668 /* Now that we have (or don't have) robots.txt specs, we can 669 check what they say. */ 670 if (!res_match_path (specs, u->path)) 671 { 672 DEBUGP (("Not following %s because robots.txt forbids it.\n", url)); 673 string_set_add (blacklist, url); 674 goto out; 675 } 676 } 677 678 /* The URL has passed all the tests. It can be placed in the 679 download queue. */ 680 DEBUGP (("Decided to load it.\n")); 681 682 return true; 683 684 out: 685 DEBUGP (("Decided NOT to load it.\n")); 686 687 return false; 688} 689 690/* This function determines whether we will consider downloading the 691 children of a URL whose download resulted in a redirection, 692 possibly to another host, etc. It is needed very rarely, and thus 693 it is merely a simple-minded wrapper around download_child_p. */ 694 695static bool 696descend_redirect_p (const char *redirected, struct url *orig_parsed, int depth, 697 struct url *start_url_parsed, struct hash_table *blacklist, 698 struct iri *iri) 699{ 700 struct url *new_parsed; 701 struct urlpos *upos; 702 bool success; 703 704 assert (orig_parsed != NULL); 705 706 new_parsed = url_parse (redirected, NULL, NULL, false); 707 assert (new_parsed != NULL); 708 709 upos = xnew0 (struct urlpos); 710 upos->url = new_parsed; 711 712 success = download_child_p (upos, orig_parsed, depth, 713 start_url_parsed, blacklist, iri); 714 715 url_free (new_parsed); 716 xfree (upos); 717 718 if (!success) 719 DEBUGP (("Redirection \"%s\" failed the test.\n", redirected)); 720 721 return success; 722} 723 724/* vim:set sts=2 sw=2 cino+={s: */ 725