Subversion Repositories SmartDukaan

Rev

Rev 12275 | Blame | Compare with Previous | Last modification | View Log | RSS feed

import urllib2
from BeautifulSoup import BeautifulSoup, NavigableString
from dtr.utils.utils import fetchResponseUsingProxy
import re
import sys

invalid_tags = ['b', 'i', 'u']
bestSellers = []

def strip_tags(html, invalid_tags):
    soup = BeautifulSoup(html,convertEntities=BeautifulSoup.HTML_ENTITIES)

    for tag in soup.findAll(True):
        if tag.name in invalid_tags:
            s = ""

            for c in tag.contents:
                if not isinstance(c, NavigableString):
                    c = strip_tags(unicode(c), invalid_tags)
                s += unicode(c)

            tag.replaceWith(s)

    return soup

class AmazonScraper:
    def __init__(self, livePricing=None):
        self.count_trials = 0
        self.livePricing = livePricing
    
    def read(self, url, findStore):
        response_data = ""
        self.findStore = findStore
        try:
            response_data = fetchResponseUsingProxy(url,livePricing=self.livePricing)
        except Exception as e:
            print 'ERROR: ', e
            print 'Retrying'
            self.count_trials += 1
            
            if self.count_trials < 5:
                return self.read(url)
        
        self.response_data=response_data
        
        if "Server Busy" in self.response_data:
            print "Captcha page, lets try again."
            self.count_trials += 1
            return self.read(url)
        return self.createData()
    
    def createData(self):
        self.soup = strip_tags(self.response_data,invalid_tags)
        self.response_data =None
        return self.scrape(self.soup)
    
    
    def scrape(self,soup):
        sellerData = soup.findAll("div" , {"class" : "a-row a-spacing-mini olpOffer"})
        for data in sellerData:
            print "sellerData****"
            price = data.find('span', attrs={'class' : re.compile('.*olpOfferPrice*')}).find('span').text
            print "Unit cost= ",float(price.replace("Rs.","").replace(",",""))
            unitCost = float(price.replace("Rs.","").replace(",",""))
            shippingCost = data.find('p', attrs={'class' : re.compile('.*olpShippingInfo*')}).find('span').text
            if "FREE" in shippingCost:
                print "shippingCost=0"
                shippingCost = 0
            else:
                print "shippingCost= ",float(shippingCost.replace("+Rs.","").replace("Delivery",""))
                shippingCost = float(shippingCost.replace("+Rs.","").replace("Delivery",""))
            
            sellerColumn =  data.find('p', attrs={'class' : re.compile('.*olpSellerName*')})
            store=""
            if self.findStore:
                print "Seller info ",sellerColumn
                x = sellerColumn.find('a')['href']
                print x
                temp =  sellerColumn.find('a')
                store = temp.text
                if len(store)==0:
                    storeUrl = x
                    dom_in = storeUrl.find("www.amazon.in")
                    if dom_in ==-1:
                        storeUrl="http://amazon.in"+storeUrl
                    store = self.findStoreFront(storeUrl)
                    try:
                        ind = store.index("@ Amazon.in")
                        store = store[0:ind].strip()
                    except:
                        try:
                            ind = store.split(":")
                            store = ind[1].strip()
                        except:
                            store =""
            ratingColumn = data.find('p', attrs={'class' : 'a-spacing-small'}).find('a').contents[0]
            print "Rating info ",ratingColumn
            print "***********************"
            return unitCost+shippingCost,store
    
    def findStoreFront(self,storeUrl):
        try:
            response_data = fetchResponseUsingProxy(storeUrl,livePricing=None)
        except:
            return ""
        soup = strip_tags(response_data,invalid_tags)
        response_data =None
        return soup.title.string
            

if __name__ == '__main__':
    scraper = AmazonScraper()
    print scraper.read('http://www.amazon.in/gp/offer-listing/B006PB44NM/ref=olp_sort_ps',True)