Subversion Repositories SmartDukaan

Rev

Rev 12198 | Rev 12275 | Go to most recent revision | Blame | Compare with Previous | Last modification | View Log | RSS feed

import urllib2
from BeautifulSoup import BeautifulSoup, NavigableString
import re
import sys

invalid_tags = ['b', 'i', 'u']
bestSellers = []

def strip_tags(html, invalid_tags):
    soup = BeautifulSoup(html,convertEntities=BeautifulSoup.HTML_ENTITIES)

    for tag in soup.findAll(True):
        if tag.name in invalid_tags:
            s = ""

            for c in tag.contents:
                if not isinstance(c, NavigableString):
                    c = strip_tags(unicode(c), invalid_tags)
                s += unicode(c)

            tag.replaceWith(s)

    return soup

class AmazonScraper:
    def __init__(self):
        self.count_trials = 0
    
    def read(self, url, findStore):
        request = urllib2.Request(url)
        request.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.218 Safari/535.1')
        opener = urllib2.build_opener()
        response_data = ""
        self.findStore = findStore
        try:
            response_data = opener.open(request).read()
            
        except urllib2.HTTPError as e:
            print 'ERROR: ', e
            print 'Retrying'
            self.count_trials += 1
            
            if self.count_trials < 3:
                return self.read(url)
        
        self.response_data=response_data
    
    def createData(self):
        self.soup = strip_tags(self.response_data,invalid_tags)
        self.response_data =None
        return self.scrape(self.soup)
    
    
    def scrape(self,soup):
        sellerData = soup.findAll("div" , {"class" : "a-row a-spacing-mini olpOffer"})
        for data in sellerData:
            print "sellerData****"
            price = data.find('span', attrs={'class' : re.compile('.*olpOfferPrice*')}).find('span').text
            print "Unit cost= ",float(price.replace("Rs.","").replace(",",""))
            unitCost = float(price.replace("Rs.","").replace(",",""))
            shippingCost = data.find('p', attrs={'class' : re.compile('.*olpShippingInfo*')}).find('span').text
            if "FREE" in shippingCost:
                print "shippingCost=0"
                shippingCost = 0
            else:
                print "shippingCost= ",float(shippingCost.replace("+Rs.","").replace("Delivery",""))
                shippingCost = float(shippingCost.replace("+Rs.","").replace("Delivery",""))
            
            sellerColumn =  data.find('p', attrs={'class' : re.compile('.*olpSellerName*')})
            store=""
            if self.findStore:
                print "Seller info ",sellerColumn
                x = sellerColumn.find('a')['href']
                print "&&&&"
                storeUrl = x
                store = self.findStoreFront(storeUrl)
                try:
                    ind = store.index("@ Amazon.in")
                    store = store[0:ind].strip()
                except:
                    try:
                        ind = store.split(":")
                        store = ind[1].strip()
                    except:
                        store =""
            ratingColumn = data.find('p', attrs={'class' : 'a-spacing-small'}).find('a').contents[0]
            print "Rating info ",ratingColumn
            print "***********************"
            return unitCost+shippingCost,store
    
    def findStoreFront(self,storeUrl):
        request = urllib2.Request(storeUrl)
        request.add_header('User-Agent', 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.218 Safari/535.1')
        opener = urllib2.build_opener()
        response_data = ""
        try:
            response_data = opener.open(request).read()
            
        except urllib2.HTTPError as e:
            print 'ERROR: ', e
            print 'Retrying'
            self.count_trials += 1
            
            if self.count_trials < 3:
                return ""
        soup = strip_tags(response_data,invalid_tags)
        response_data =None
        return soup.title.string
            

if __name__ == '__main__':
    scraper = AmazonScraper()
    scraper.read('http://www.amazon.in/gp/offer-listing/B001D0ROGO/ref=olp_sort_ps',True)
    print scraper.createData()