-
Notifications
You must be signed in to change notification settings - Fork 0
/
deep_web_crawler.py
25 lines (19 loc) · 904 Bytes
/
deep_web_crawler.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
import requests
from bs4 import BeautifulSoup
def crawl_deep_web(query):
"""Crawl deep web for information related to the query."""
url = "http://deepweblink.onion/search" # Example deep web search engine URL
params = {'q': query}
headers = {'User-Agent': 'Mozilla/5.0'}
response = requests.get(url, params=params, headers=headers, proxies={'http': 'socks5h://localhost:9050', 'https': 'socks5h://localhost:9050'}) # Use Tor proxy
soup = BeautifulSoup(response.text, 'html.parser')
results = []
for item in soup.find_all('div', class_='result'):
title = item.find('h2').text
link = item.find('a')['href']
description = item.find('p').text
results.append({'title': title, 'link': link, 'description': description})
return results
# results = crawl_deep_web("missing person John Doe")
# for result in results:
# print(result)