-
Notifications
You must be signed in to change notification settings - Fork 45
/
Copy pathcontract.py
57 lines (48 loc) · 1.97 KB
/
contract.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
# -*- coding: utf-8 -*-
import json
import logging
import os
import re
import scrapy
from scrapy.shell import inspect_response
logger = logging.getLogger(__name__)
class ContractSpider(scrapy.Spider):
name = "contracts"
addressUrl = "https://etherscan.io/address/{}"
allowed_domains = ["etherscan.io"]
current_path = os.path.abspath(__file__)
father_path = os.path.abspath(os.path.dirname(current_path) + os.path.sep + ".")
p = os.path.join(father_path, "contractAddress-20180922-100358.json")
start_urls = []
with open(p) as f:
for i in f.readlines():
start_urls.append(addressUrl.format(json.loads(i)["address"]))
custom_settings = {
"AUTOTHROTTLE_ENABLED": True,
"LOG_LEVEL": "INFO",
"COOKIES_ENABLED": False,
}
addressR = re.compile(r"address/(\S+)")
def parse(self, response):
"Sorry, You have reached your maximum request limit for this resource "
if "maximum request limit" in response.body_as_unicode():
# inspect_response(response, self)
# 重试.
request = response.request
if "retry_times_etherscan" in request.meta:
retry_times_etherscan = request.meta["retry_times_etherscan"]
else:
retry_times_etherscan = 0
retryreq = request.copy()
retryreq.dont_filter = True
retryreq.meta["retry_times_etherscan"] = retry_times_etherscan + 1
retryreq.priority = request.priority + -1
logger.info(
"Retrying %(request)s (failed %(retries)d times)",
{"request": request, "retries": retry_times_etherscan + 1},
)
return retryreq
address = response.url.split("address/")[-1]
name = response.xpath('//a[@data-placement="bottom"]/text()').extract_first()
code = response.css("#editor::text").extract_first()
yield {"name": name, "address": address, "code": code}