#! /usr/bin/perl # get_harvester_results retreives files of search results from ncbi, # and is released under the terms of the GPL version 2, or any later # version, at your option. See the file README and COPYING for more # information. # Copyright 2004 by Don Armstrong . # $Id: ss,v 1.1 2004/06/29 05:26:35 don Exp $ use threads; use warnings; use strict; use Getopt::Long; use Pod::Usage; =head1 NAME get_harvester_results [options] =head1 SYNOPSIS Options: --dir, -D directory to stick results into [default .] --name, -n file naming scheme [default ${search}_results.$format] --terms, -t file of search terms [default -] --debug, -d debugging level [default 0] --help, -h display this help --man, -m display manual =head1 OPTIONS =over =item B<--debug, -d> Debug verbosity. (Default 0) =item B<--help, -h> Display brief useage information. =item B<--man, -m> Display this manual. =back =head1 EXAMPLES get_harvester_results -D ./harvester_results/ -n '${search}_name.html' < search_parameters Will pretty much do what you want =cut use vars qw($DEBUG $REVISION); BEGIN{ ($REVISION) = q$LastChangedRevision: 1$ =~ /LastChangedRevision:\s+([^\s]+)/; $DEBUG = 0 unless defined $DEBUG; } use IO::File; use URI; use WWW::Mechanize; use Thread::Queue; # XXX parse config file my %options = (debug => 0, help => 0, man => 0, format => 'xml', database => 'gene', dir => '.', name => '${search}_results_harvester', terms => '-', orgn => 'human', harvester_site => 'http://harvester.fzk.de', ); GetOptions(\%options,'format|f=s','database|b=s','name|n=s', 'terms|t=s','dir|D=s','debug|d+','help|h|?','man|m'); pod2usage() if $options{help}; pod2usage({verbose=>2}) if $options{man}; $DEBUG = $options{debug}; if (not -d $options{dir}) { die "$options{dir} does not exist or is not a directory"; } $options{harvester_search_url} = '/cgi-bin/'.$options{orgn}.'/search.cgi?zoom_query=golgi&zoom_per_page=100&zoom_and=1&zoom_sort=0'; #open search terms file my $terms; if ($options{terms} eq '-') { $terms = \*STDIN; } else { $terms = new IO::File $options{terms}, 'r' or die "Unable to open file $options{terms}: $!"; } #For every term my @threads; while (<$terms>) { # Get uids to retrieve chomp; my $search = $_; my $uri = URI->new($options{harvester_site}.$options{harvester_search_url}); $uri->query_form(zoom_query =>[], ); $uri->query_form(zoom_query => $search, ); my $url = $uri->as_string; my $queue = Thread::Queue->new(); my $dir_name = eval qq("$options{name}") or die $@; if (not -d "$options{dir}/$dir_name") { mkdir("$options{dir}/$dir_name") or die "Unable to make directory $options{dir}/$dir_name $!"; } my $wget_thread = threads->new(\&get_url,"$options{dir}/$dir_name",$queue); push @threads,$wget_thread; my $mech = WWW::Mechanize->new(agent => "DA_get_harvester_results/$REVISION"); #HTTP::Request->new('GET', $url); $mech->get($url); my $next_link; do { my @links = $mech->links; $next_link = undef; for my $link (@links) { if ($link->text() =~ /Next /) { $next_link = $link; } elsif ($link->url =~ m#http://harvester.fzk.de/harvester/human/[^\/]+/[^.]+.htm#) { $queue->enqueue($link->url()); } } $mech->follow_link(url=>$next_link->url) if defined $next_link; } while ($next_link); $queue->enqueue(undef); } for my $thread (@threads) { $thread->join; } sub get_url{ my ($dir,$queue) = @_; my @current_urls; while (my $url = $queue->dequeue) { push @current_urls,$url; if (@current_urls >= 30) { wget_urls($dir,@current_urls); @current_urls = (); } } wget_urls($dir,@current_urls) if @current_urls; } sub wget_urls{ my ($dir,@urls) = @_; return unless @urls; system(q(wget),'-nd','-nH','-w','2','--random-wait','-P',$dir,@urls) == 0 or warn "$!"; } __END__