From 65b4a63f56fca1f844540ea44617aa5ee3d58aee Mon Sep 17 00:00:00 2001 From: Daniel Stenberg Date: Fri, 16 Mar 2001 13:04:57 +0000 Subject: moved here from ../ --- perl/contrib/formfind.pl.in | 273 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 273 insertions(+) create mode 100755 perl/contrib/formfind.pl.in (limited to 'perl/contrib/formfind.pl.in') diff --git a/perl/contrib/formfind.pl.in b/perl/contrib/formfind.pl.in new file mode 100755 index 000000000..6428e991e --- /dev/null +++ b/perl/contrib/formfind.pl.in @@ -0,0 +1,273 @@ +#!@PERL@ +# +# formfind.pl +# +# This script gets a HTML page from the specified URL and presents form +# information you may need in order to machine-make a respond to the form. +# +# Written to use 'curl' for URL fetching. +# +# Author: Daniel Stenberg +# Version: 0.1 Nov 12, 1998 +# +# HISTORY +# +# 0.1 - Created now! +# +# TODO +# respect file:// URLs for local file fetches! + +$in=""; + +$usestdin = 0; +if($ARGV[0] eq "" ) { + $usestdin = 1; +} +else { + $geturl = $ARGV[0]; +} + +if(($geturl eq "") && !$usestdin) { + print "Usage: $0 \n", + " Use a traling slash for directory URLs!\n"; + exit; +} +# If you need a proxy for web access, edit your .curlrc file to feature +# -x + +# linkchecker, URL will be appended to the right of this command line +# this is the one using HEAD: +$linkcheck = "curl -s -m 20 -I"; + +# as a second attempt, this will be used. This is not using HEAD but will +# get the whole frigging document! +$linkcheckfull = "curl -s -m 20 -i"; + +# htmlget, URL will be appended to the right of this command line +$htmlget = "curl -s"; + +# urlget, URL will be appended to the right of this command line +# this stores the file with the remote file name in the current dir +$urlget = "curl -O -s"; + +# Parse the input URL and split it into the relevant parts: + +sub SplitURL { + my $inurl = $_[0]; + + if($inurl=~ /^([^:]+):\/\/([^\/]*)\/(.*)\/(.*)/ ) { + $getprotocol = $1; + $getserver = $2; + $getpath = $3; + $getdocument = $4; + } + elsif ($inurl=~ /^([^:]+):\/\/([^\/]*)\/(.*)/ ) { + $getprotocol = $1; + $getserver = $2; + $getpath = $3; + $getdocument = ""; + + if($getpath !~ /\//) { + $getpath =""; + $getdocument = $3; + } + + } + elsif ($inurl=~ /^([^:]+):\/\/(.*)/ ) { + $getprotocol = $1; + $getserver = $2; + $getpath = ""; + $getdocument = ""; + } + else { + print "Couldn't parse the specified URL, retry please!\n"; + exit; + } +} + + +if(!$usestdin) { + + &SplitURL($geturl); +#print "protocol = $getprotocol\n"; +#print "server = $getserver\n"; +#print "path = $getpath\n"; +#print "document = $getdocument\n"; +#exit; + + open(HEADGET, "$linkcheck $geturl|") || + die "Couldn't get web page for some reason"; + headget: + while() { +# print $_; + if($_ =~ /HTTP\/.*3\d\d /) { + $pagemoved=1; + } + elsif($pagemoved && + ($_ =~ /^Location: (.*)/)) { + $geturl = $1; + + &SplitURL($geturl); + + $pagemoved++; + last headget; + } + } + close(HEADGET); + + if($pagemoved == 1) { + print "Page is moved but we don't know where. Did you forget the ", + "traling slash?\n"; + exit; + } + + open(WEBGET, "$htmlget $geturl|") || + die "Couldn't get web page for some reason"; + + while() { + $line = $_; + push @indoc, $line; + $line=~ s/\n//g; + $line=~ s/\r//g; +# print $line."\n"; + $in=$in.$line; + } + + close(WEBGET); +} +else { + while() { + $line = $_; + push @indoc, $line; + $line=~ s/\n//g; + $line=~ s/\r//g; + $in=$in.$line; + } +} + + getlinkloop: + while($in =~ /[^<]*(<[^>]+>)/g ) { + # we have a tag in $1 + $tag = $1; + + if($tag =~ /^