├── .gitignore ├── buff.json ├── executeOrder.py ├── getCookies.py ├── readme.md └── scrapeBuff.py /.gitignore: -------------------------------------------------------------------------------- 1 | # ignore cookies 2 | cookies.pkl 3 | # ignore incomplete files 4 | gatherItems.py 5 | -------------------------------------------------------------------------------- /buff.json: -------------------------------------------------------------------------------- 1 | { 2 | "scraper1": [ 3 | "https://buff.163.com/goods/900503", 4 | "https://buff.163.com/goods/900632", 5 | "https://buff.163.com/goods/900502", 6 | "https://buff.163.com/goods/900610", 7 | "https://buff.163.com/goods/900527", 8 | "https://buff.163.com/goods/900570", 9 | "https://buff.163.com/goods/900617", 10 | "https://buff.163.com/goods/900489", 11 | "https://buff.163.com/goods/900606", 12 | "https://buff.163.com/goods/900491" 13 | ], 14 | "scraper2": [ 15 | "https://buff.163.com/goods/857543", 16 | "https://buff.163.com/goods/857579", 17 | "https://buff.163.com/goods/857583", 18 | "https://buff.163.com/goods/857578", 19 | "https://buff.163.com/goods/857588", 20 | "https://buff.163.com/goods/857570", 21 | "https://buff.163.com/goods/857522", 22 | "https://buff.163.com/goods/857572", 23 | "https://buff.163.com/goods/857579", 24 | "https://buff.163.com/goods/857571" 25 | ], 26 | "scraper3": [ 27 | "https://buff.163.com/goods/36420", 28 | "https://buff.163.com/goods/759227", 29 | "https://buff.163.com/goods/39491", 30 | "https://buff.163.com/goods/759178", 31 | "https://buff.163.com/goods/759255", 32 | "https://buff.163.com/goods/759296", 33 | "https://buff.163.com/goods/759228", 34 | "https://buff.163.com/goods/759378", 35 | "https://buff.163.com/goods/886983", 36 | "https://buff.163.com/goods/886628" 37 | ], 38 | "scraper4": [ 39 | "https://buff.163.com/goods/45283", 40 | "https://buff.163.com/goods/886976", 41 | "https://buff.163.com/goods/45367", 42 | "https://buff.163.com/goods/781594", 43 | "https://buff.163.com/goods/781572", 44 | "https://buff.163.com/goods/45267", 45 | "https://buff.163.com/goods/763240", 46 | "https://buff.163.com/goods/781583", 47 | "https://buff.163.com/goods/763386", 48 | "https://buff.163.com/goods/857565" 49 | ], 50 | "scraper5": [ 51 | "https://buff.163.com/goods/781605", 52 | "https://buff.163.com/goods/781612", 53 | "https://buff.163.com/goods/39801", 54 | "https://buff.163.com/goods/921461", 55 | "https://buff.163.com/goods/921554", 56 | "https://buff.163.com/goods/921428", 57 | "https://buff.163.com/goods/921526", 58 | "https://buff.163.com/goods/921430", 59 | "https://buff.163.com/goods/921564", 60 | "https://buff.163.com/goods/921437" 61 | ], 62 | "scraper6": [ 63 | "https://buff.163.com/goods/921510", 64 | "https://buff.163.com/goods/921434", 65 | "https://buff.163.com/goods/921512", 66 | "https://buff.163.com/goods/921496", 67 | "https://buff.163.com/goods/921451", 68 | "https://buff.163.com/goods/921448", 69 | "https://buff.163.com/goods/921556", 70 | "https://buff.163.com/goods/900513", 71 | "https://buff.163.com/goods/900566", 72 | "https://buff.163.com/goods/900551" 73 | ], 74 | "scraper7": [ 75 | "https://buff.163.com/goods/900611", 76 | "https://buff.163.com/goods/900522", 77 | "https://buff.163.com/goods/900530", 78 | "https://buff.163.com/goods/900586", 79 | "https://buff.163.com/goods/900544", 80 | "https://buff.163.com/goods/900467", 81 | "https://buff.163.com/goods/900560", 82 | "https://buff.163.com/goods/900574", 83 | "https://buff.163.com/goods/900487", 84 | "https://buff.163.com/goods/900472" 85 | ], 86 | "scraper8": [ 87 | "https://buff.163.com/goods/900573", 88 | "https://buff.163.com/goods/886681", 89 | "https://buff.163.com/goods/886678", 90 | "https://buff.163.com/goods/886754", 91 | "https://buff.163.com/goods/886674", 92 | "https://buff.163.com/goods/886651", 93 | "https://buff.163.com/goods/886667", 94 | "https://buff.163.com/goods/857571", 95 | "https://buff.163.com/goods/857543", 96 | "https://buff.163.com/goods/857583" 97 | ], 98 | "scraper9": [ 99 | "https://buff.163.com/goods/857572", 100 | "https://buff.163.com/goods/857522", 101 | "https://buff.163.com/goods/857578", 102 | "https://buff.163.com/goods/857588", 103 | "https://buff.163.com/goods/857570", 104 | "https://buff.163.com/goods/857573", 105 | "https://buff.163.com/goods/857579", 106 | "https://buff.163.com/goods/857563", 107 | "https://buff.163.com/goods/857582" 108 | ] 109 | 110 | 111 | } -------------------------------------------------------------------------------- /executeOrder.py: -------------------------------------------------------------------------------- 1 | import json 2 | from selenium import webdriver 3 | from selenium.webdriver.common.by import By 4 | from selenium.common.exceptions import NoSuchElementException 5 | import time 6 | import pickle 7 | from selenium.webdriver.support.ui import WebDriverWait 8 | from selenium.webdriver.support import expected_conditions as EC 9 | from selenium.common.exceptions import TimeoutException 10 | 11 | def purchase(driver, listing): 12 | driver.find_element(By.XPATH, '/html/body/div[7]/div/div[7]/table/tbody/tr[{}]/td[6]/a'.format(listing+2)).click() #used to purchase 13 | try: 14 | myElem = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.XPATH, '/html/body/div[24]/div[2]/div[4]/a'))) 15 | driver.find_element(By.XPATH, '/html/body/div[24]/div[2]/div[4]/a').click() 16 | print('success') 17 | except TimeoutException: 18 | print("Loading took too much time!") 19 | 20 | try: 21 | myElem = WebDriverWait(driver, 10).until(EC.presence_of_element_located((By.XPATH, '/html/body/div[26]/div[2]/div/div[1]/a'))) 22 | driver.find_element(By.XPATH, '/html/body/div[26]/div[2]/div/div[1]/a').click() 23 | print('success') 24 | except TimeoutException: 25 | print("Loading took too much time!") 26 | 27 | time.sleep(5) -------------------------------------------------------------------------------- /getCookies.py: -------------------------------------------------------------------------------- 1 | import json 2 | from selenium import webdriver 3 | from selenium.webdriver.common.by import By 4 | from selenium.common.exceptions import NoSuchElementException 5 | import time 6 | import pickle 7 | from selenium.webdriver.support.ui import WebDriverWait 8 | from selenium.webdriver.support import expected_conditions as EC 9 | from selenium.common.exceptions import TimeoutException 10 | 11 | options = webdriver.ChromeOptions() 12 | driver = webdriver.Chrome(options=options) 13 | driver.get("https://buff.163.com/goods/857578") 14 | 15 | # Here, you must login to steam from the automated browser. Wait for the browser to exit and the cookies will be saved in the specified location 16 | 17 | time.sleep(120) # wait for manual login which will save cookies 18 | pickle.dump(driver.get_cookies(), open("cookies.pkl", "wb")) #if want to write cookies 19 | 20 | driver.quit() -------------------------------------------------------------------------------- /readme.md: -------------------------------------------------------------------------------- 1 | # Buff163 Bot 2 | 3 | A python script that scrapes and monitors webpages on buff.163.com and allows users to automatically purchase items 4 | 5 | ## Installation 6 | 7 | 1. Clone the repository 8 | ```bash 9 | git clone https://github.com/RamenMode/buff163scraper.git 10 | ``` 11 | 12 | 2. Use the package manager [pip](https://pip.pypa.io/en/stable/)/pip3 to install selenium 13 | 14 | ```bash 15 | pip3 install selenium 16 | ``` 17 | 3. Install [ChromeDriver](https://chromedriver.chromium.org/downloads), making sure to follow the version requirements associated with your chrome browser listed on the website 18 | 19 | ## Usage 20 | 21 | Make sure you are in the 'buffbot' directory 22 | 23 | 1. Obtain cookies for your buff163 session 24 | ```python 25 | python3 getCookies.py 26 | ``` 27 | After the automated session loads, login to your Steam account on Buff and wait until browser automatically closes. *Do not share your cookies with anyone*. 28 | 29 | 2. Specify webpage item pages to scrape in the buff.json, modifying links as you wish. Ex: https://buff.163.com/goods/36436. 30 | 31 | 3. Run the script with parameters, first two being the range of scrapers included and the second being the maximum float acceptable. Below is an example, which uses the links of scraper1, scraper2, scraper3, and scraper4 in buff.json with a maximum float of 0.10 32 | ```python 33 | python3 scrapeBuff.py 1 4 0.10 34 | ``` 35 | * Note that if any items fit the criteria you have listed, they _WILL_ be purchased by the script 36 | 37 | 38 | ## Contributing 39 | 40 | Pull requests are welcome. For major changes, please open an issue first 41 | to discuss what you would like to change. 42 | 43 | Please make sure to update tests as appropriate. 44 | 45 | ## License 46 | 47 | [MIT](https://choosealicense.com/licenses/mit/) 48 | -------------------------------------------------------------------------------- /scrapeBuff.py: -------------------------------------------------------------------------------- 1 | #Note: Buff163 does not release its statistics on rate limits, but upon testing, a total of 3 scrapers at a time works best 2 | from selenium import webdriver 3 | from selenium.webdriver.chrome.options import Options 4 | from selenium.webdriver.support.ui import Select 5 | from selenium.webdriver.common.by import By 6 | from selenium.webdriver.support.ui import WebDriverWait 7 | from selenium.webdriver.support import expected_conditions as EC 8 | from selenium.common.exceptions import NoSuchElementException 9 | from executeOrder import purchase 10 | import json 11 | import time 12 | import re 13 | import os 14 | import threading 15 | import pickle 16 | import sys 17 | 18 | # The notifier function 19 | def notify(title, text): 20 | os.system(""" 21 | osascript -e 'display notification "{}" with title "{}"' 22 | """.format(text, title)) 23 | 24 | def obtainItems(request, driver, maximumFloat): # obtain list of 10 items wear values and prices for the links found in json file. Returns true if match is found 25 | 26 | driver.get(request) # driver configs 27 | cookies = pickle.load(open("cookies.pkl", "rb")) # enable cookies 28 | for cookie in cookies: 29 | driver.add_cookie(cookie) 30 | 31 | isNamed = True 32 | try: 33 | print(driver.find_element(By.XPATH, '/html/body/div[7]/div/div[1]/div[2]/div[1]/h1').text,':\n') 34 | except NoSuchElementException: 35 | print('pass') 36 | isNamed = False 37 | for i in range(10): 38 | try: 39 | wear = driver.find_element(By.XPATH, '/html/body/div[6]/div/div[7]/table/tbody/tr[{}]/td[3]/div/div[1]/div[1]'.format(i+2)) 40 | price = driver.find_element(By.XPATH, '/html/body/div[6]/div/div[7]/table/tbody/tr[{}]/td[5]/div[1]/strong'.format(i+2)) #consistent html behavior across different item links for CS:GO 41 | except NoSuchElementException: 42 | print("could not locate item") 43 | continue 44 | weartext = float(''.join(c for c in wear.text if c.isdigit() or c=='.')) 45 | print("Listing {}".format(i+1)) 46 | print(price.text) 47 | print("Float:", weartext,"\n") 48 | print(threading.active_count()) 49 | if weartext < maximumFloat: 50 | notify("Buff Scraper", "An item has been found that matches your criteria: {} {} \n{}" 51 | .format(driver.find_element(By.XPATH, '/html/body/div[7]/div/div[1]/div[2]/div[1]/h1').text if isNamed else 'Unknown Name', "Listing {}".format(i+1), "Float: {}".format(weartext))) 52 | purchase(driver, i) 53 | driver.quit 54 | 55 | class ScrapeThread(threading.Thread): 56 | def __init__(self, scrapernumber, maximumFloat): 57 | threading.Thread.__init__(self) 58 | self.scrapernumber = scrapernumber 59 | self.maximumFloat = maximumFloat 60 | def run(self): 61 | scrapeCount = 'scraper' + str(self.scrapernumber) 62 | while True: 63 | for link in data[scrapeCount]: 64 | options = webdriver.ChromeOptions() 65 | options.add_argument('--headless') 66 | driver = webdriver.Chrome(options=options) 67 | obtainItems(link, driver, self.maximumFloat) 68 | 69 | def scrape(firstScraper, lastScraper, maximumFloat): 70 | threads = [] 71 | 72 | for scrapeNum in range(firstScraper, lastScraper+1): 73 | t = ScrapeThread(scrapeNum, maximumFloat) 74 | t.start() 75 | threads.append(t) 76 | 77 | for t in threads: 78 | t.join() 79 | 80 | f = open('./buff.json') 81 | data = json.load(f) 82 | 83 | if len(sys.argv) != 4: 84 | print('Please enter the correct number of arguments') 85 | pass 86 | else: 87 | scrape(int(sys.argv[1]), int(sys.argv[2]), float(sys.argv[3])) 88 | 89 | # Note: if you want an example of a function run, uncomment this: 90 | # scrape(1, 4, 0.10) 91 | 92 | # add readme.md 93 | --------------------------------------------------------------------------------