Crawler.py a guest Dec 18th, 2015 903 Never a guest903Never

Not a member of Pastebin yet? Sign Up , it unlocks many cool features!

rawdownloadcloneembedreportprint Python 2.12 KB #simple web crawling program import urllib import urllib . request from urllib . parse import urljoin from bs4 import BeautifulSoup import sys import json import msvcrt def startList ( links ) : for i in links: crawl ( i ) if msvcrt . kbhit ( ) : if ord ( msvcrt . getch ( ) ) == 27 : break def crawl ( url ) : print ( "Opening url " + url + "

" ) html = urllib . request . urlopen ( url ) print ( "Parsing HTML

" ) cleanHtml = BeautifulSoup ( html , "html.parser" ) links = { } invalidLinks = { } try : with open ( 'list.txt' , 'r' ) as f: links = json. load ( f ) with open ( 'invalidList.txt' , 'r' ) as f: invalidLinks = json. load ( f ) except ValueError : pass print ( "Starting Crawler

" ) count = 0 for link in cleanHtml. find_all ( 'a' ) : a = link. get ( 'href' ) if a not in links and a not in invalidLinks and a != '' : try : print ( "Link: " + a + "

" ) if url not in a: a = urljoin ( url , a ) if 'http://' != a [ : 7 ] : a = 'http://' + a print ( "New Link: " + a + "

" ) try : print ( "Adding to list

" ) newHtml = urllib . request . urlopen ( a ) links [ a ] = str ( newHtml. read ( ) ) with open ( 'list.txt' , 'w' ) as f: json. dump ( links , f ) count + = 1 except : invalidLinks [ a ] = 'NULL' with open ( 'invalidList.txt' , 'w' ) as f: json. dump ( invalidLinks , f ) print ( "Invalid url.. " ) print ( "===================" ) except TypeError : pass if msvcrt . kbhit ( ) : if ord ( msvcrt . getch ( ) ) == 27 : break else : pass with open ( 'list.txt' , 'r' ) as f: links = json. load ( f ) print ( "Starting main loop..." ) startList ( links ) def main ( ) : crawl ( sys . argv [ 1 ] ) if __name__ == '__main__' : main ( )

RAW Paste Data

#simple web crawling program import urllib import urllib.request from urllib.parse import urljoin from bs4 import BeautifulSoup import sys import json import msvcrt def startList(links): for i in links: crawl(i) if msvcrt.kbhit(): if ord(msvcrt.getch()) == 27: break def crawl(url): print("Opening url " + url + "

") html = urllib.request.urlopen(url) print("Parsing HTML

") cleanHtml = BeautifulSoup(html, "html.parser") links = {} invalidLinks = {} try: with open('list.txt','r') as f: links = json.load(f) with open('invalidList.txt','r') as f: invalidLinks = json.load(f) except ValueError: pass print("Starting Crawler

") count = 0 for link in cleanHtml.find_all('a'): a = link.get('href') if a not in links and a not in invalidLinks and a != '': try: print("Link: " + a + "

") if url not in a: a = urljoin(url,a) if 'http://' != a[:7]: a = 'http://' + a print("New Link: " + a + "

") try: print("Adding to list

") newHtml = urllib.request.urlopen(a) links[a] = str(newHtml.read()) with open('list.txt','w') as f: json.dump(links,f) count += 1 except: invalidLinks[a] = 'NULL' with open('invalidList.txt','w') as f: json.dump(invalidLinks,f) print("Invalid url.. ") print("===================") except TypeError: pass if msvcrt.kbhit(): if ord(msvcrt.getch()) == 27: break else: pass with open('list.txt','r') as f: links = json.load(f) print("Starting main loop...") startList(links) def main(): crawl(sys.argv[1]) if __name__ == '__main__': main()