From ae1912cb0d494b48d514d937826c9fe83ec96c4d Mon Sep 17 00:00:00 2001 From: Daniel Stenberg Date: Wed, 29 Dec 1999 14:20:26 +0000 Subject: Initial revision --- perl/README | 33 +++++ perl/checklinks.pl | 336 +++++++++++++++++++++++++++++++++++++++++++++ perl/checklinks.pl.in | 336 +++++++++++++++++++++++++++++++++++++++++++++ perl/formfind.pl | 273 ++++++++++++++++++++++++++++++++++++ perl/formfind.pl.in | 273 ++++++++++++++++++++++++++++++++++++ perl/getlinks.pl | 261 +++++++++++++++++++++++++++++++++++ perl/getlinks.pl.in | 261 +++++++++++++++++++++++++++++++++++ perl/recursiveftpget.pl | 67 +++++++++ perl/recursiveftpget.pl.in | 67 +++++++++ 9 files changed, 1907 insertions(+) create mode 100644 perl/README create mode 100644 perl/checklinks.pl create mode 100755 perl/checklinks.pl.in create mode 100644 perl/formfind.pl create mode 100755 perl/formfind.pl.in create mode 100644 perl/getlinks.pl create mode 100755 perl/getlinks.pl.in create mode 100644 perl/recursiveftpget.pl create mode 100755 perl/recursiveftpget.pl.in (limited to 'perl') diff --git a/perl/README b/perl/README new file mode 100644 index 000000000..bb8757aea --- /dev/null +++ b/perl/README @@ -0,0 +1,33 @@ +This is just a small collection of perl scripts that use curl to do +their jobs. + +If you need a proxy configuration in order to get HTTP or FTP +documents, do edit your .curlrc file in your HOME dir to contain: +-x : + +These scripts are all written by Daniel Stenberg. + +checklinks.pl +============= + This script fetches an HTML page, extracts all links and references to + other documents and then goes through them to check that they work. + Reports progress in a format intended for machine-parsing. + +getlinks.pl +=========== + You ever wanted to download a bunch of programs a certain HTML page has + links to? This program extracts all links and references from a web page + and then compares them to the regex you supply. All matches will be + downloaded in the target directory of your choice. + +recursiveftpget.pl +================== + This script recursively downloads all files from a directory on an ftp site + and all subdirectories it has. Optional depth-level. + +formfind.pl +=========== + Downloads an HTML page (or reads stdin) and reports a human readable report + about the FORM(s) present. What method, what URL, which input or select + field, what default values they have and what submit buttons there are. It + is useful if you intend to use curl to properly fake a form submission. diff --git a/perl/checklinks.pl b/perl/checklinks.pl new file mode 100644 index 000000000..347416ac9 --- /dev/null +++ b/perl/checklinks.pl @@ -0,0 +1,336 @@ +#!/usr/local/bin/perl +# +# checklinks.pl +# +# This script extracts all links from a HTML page and checks their validity. +# Written to use 'curl' for URL checking. +# +# Author: Daniel Stenberg +# Version: 0.7 Sept 30, 1998 +# +# HISTORY +# +# 0.5 - Cuts off the #-part from links before checking. +# +# 0.6 - Now deals with error codes 3XX better and follows the Location: +# properly. +# - Added the -x flag that only checks http:// -links +# +# 0.7 - Ok, http://www.viunga.se/main.html didn't realize this had no path +# but a document. Now it does. +# +# + +$in=""; + + argv: +if($ARGV[0] eq "-v" ) { + $verbose = 1; + shift @ARGV; + goto argv; +} +elsif($ARGV[0] eq "-i" ) { + $usestdin = 1; + shift @ARGV; + goto argv; +} +elsif($ARGV[0] eq "-l" ) { + $linenumber = 1; + shift @ARGV; + goto argv; +} +elsif($ARGV[0] eq "-h" ) { + $help = 1; + shift @ARGV; + goto argv; +} +elsif($ARGV[0] eq "-x" ) { + $external = 1; + shift @ARGV; + goto argv; +} + +$geturl = $ARGV[0]; + +if(($geturl eq "") || $help) { + print "Usage: $0 [-hilvx] \n", + " Use a traling slash for directory URLs!\n", + " -h This help text\n", + " -i Read the initial page from stdin\n", + " -l Line number report for BAD links\n", + " -v Verbose mode\n", + " -x Check non-local (external?) links only\n"; + exit; +} + +if($ARGV[1] eq "-") { + print "We use stdin!\n"; + $usestdin = 1; +} + +# This is necessary from where I tried this: +#$proxy =" -x 194.237.142.41:80"; + +# linkchecker, URL will be appended to the right of this command line +# this is the one using HEAD: +$linkcheck = "curl -s -m 20 -I$proxy"; + +# as a second attempt, this will be used. This is not using HEAD but will +# get the whole frigging document! +$linkcheckfull = "curl -s -m 20 -i$proxy"; + +# htmlget, URL will be appended to the right of this command line +$htmlget = "curl -s$proxy"; + +# Parse the input URL and split it into the relevant parts: + +sub SplitURL { + my $inurl = $_[0]; + + if($inurl=~ /^([^:]+):\/\/([^\/]*)\/(.*)\/(.*)/ ) { + $getprotocol = $1; + $getserver = $2; + $getpath = $3; + $getdocument = $4; + } + elsif ($inurl=~ /^([^:]+):\/\/([^\/]*)\/(.*)/ ) { + $getprotocol = $1; + $getserver = $2; + $getpath = $3; + $getdocument = ""; + + if($getpath !~ /\//) { + $getpath =""; + $getdocument = $3; + } + + } + elsif ($inurl=~ /^([^:]+):\/\/(.*)/ ) { + $getprotocol = $1; + $getserver = $2; + $getpath = ""; + $getdocument = ""; + } + else { + print "Couldn't parse the specified URL, retry please!\n"; + exit; + } +} + +&SplitURL($geturl); + +#print "protocol = $getprotocol\n"; +#print "server = $getserver\n"; +#print "path = $getpath\n"; +#print "document = $getdocument\n"; +#exit; + +if(!$usestdin) { + open(HEADGET, "$linkcheck $geturl|") || + die "Couldn't get web page for some reason"; + headget: + while() { +# print $_; + if($_ =~ /HTTP\/.*3\d\d /) { + $pagemoved=1; + } + elsif($pagemoved && + ($_ =~ /^Location: (.*)/)) { + $geturl = $1; + + &SplitURL($geturl); + + $pagemoved++; + last headget; + } + } + close(HEADGET); + + if($pagemoved == 1) { + print "Page is moved but we don't know where. Did you forget the ", + "traling slash?\n"; + exit; + } + + open(WEBGET, "$htmlget $geturl|") || + die "Couldn't get web page for some reason"; + + while() { + $line = $_; + push @indoc, $line; + $line=~ s/\n//g; + $line=~ s/\r//g; +# print $line."\n"; + $in=$in.$line; + } + + close(WEBGET); +} +else { + while() { + $line = $_; + push @indoc, $line; + $line=~ s/\n//g; + $line=~ s/\r//g; + $in=$in.$line; + } +} + +#print length($in)."\n"; + +sub LinkWorks { + my $check = $_[0]; + +# URL encode: +# $check =~s/([^a-zA-Z0-9_:\/.-])/uc sprintf("%%%02x",ord($1))/eg; + + @doc = `$linkcheck \"$check\"`; + + $head = 1; + +# print "COMMAND: $linkcheck \"$check\"\n"; +# print $doc[0]."\n"; + + boo: + if( $doc[0] =~ /^HTTP[^ ]+ (\d+)/ ) { + $error = $1; + + if($error < 400 ) { + return "GOOD"; + } + else { + + if($head && ($error >= 500)) { + # This server doesn't like HEAD! + @doc = `$linkcheckfull \"$check\"`; + $head = 0; + goto boo; + } + return "BAD"; + } + } + return "BAD"; +} + + +sub GetLinks { + my $in = $_[0]; + my @result; + + getlinkloop: + while($in =~ /[^<]*(<[^>]+>)/g ) { + # we have a tag in $1 + $tag = $1; + + if($tag =~ /^