Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

WIP: refactor #460

Merged
merged 43 commits into from
Jan 14, 2025
Merged
Changes from 1 commit
Commits
Show all changes
43 commits
Select commit Hold shift + click to select a range
d42e55b
step1
BennyThink Sep 21, 2024
57b159c
remove
BennyThink Nov 4, 2024
eedb8e3
add pdm
BennyThink Nov 25, 2024
bd47128
going
BennyThink Nov 25, 2024
5cb76e3
going
BennyThink Nov 25, 2024
48c3304
going
BennyThink Nov 26, 2024
4f02bc2
Improved `extract_code_from_instagram_url` function
SanujaNS Nov 27, 2024
e0f095d
Remove terabox download function
SanujaNS Nov 27, 2024
f10b07f
Remove unused `parse_cookie_file` function
SanujaNS Nov 27, 2024
cf0c2b8
define abstract class
BennyThink Nov 27, 2024
991a036
basic upload done?
BennyThink Nov 28, 2024
580c499
db operation
BennyThink Nov 30, 2024
072f1d1
db operation
BennyThink Nov 30, 2024
deb7683
db operation
BennyThink Nov 30, 2024
9ff9b20
db operation
BennyThink Nov 30, 2024
4c1c710
db operation
BennyThink Nov 30, 2024
0f8df41
fix name_pattern regex of `extract_url_and_name` function
SanujaNS Dec 1, 2024
83a8c73
pre-commit
BennyThink Dec 1, 2024
ef0beca
pre-push
BennyThink Dec 1, 2024
f7bd4fe
pre-push
BennyThink Dec 1, 2024
f8dec38
Refactor: Change single quotes to double quotes for name_pattern regex
SanujaNS Dec 1, 2024
e484098
add more methods
BennyThink Dec 3, 2024
6ba0cc2
rename
BennyThink Dec 3, 2024
5346fb6
use self._bot_msg
BennyThink Dec 3, 2024
38ed4c4
download done?
BennyThink Dec 3, 2024
3b1b9c7
update deps
BennyThink Dec 4, 2024
de04b72
add entrance
BennyThink Jan 11, 2025
f5beefd
runnable
BennyThink Jan 11, 2025
611d39a
fixes
BennyThink Jan 11, 2025
1da0edc
fix
BennyThink Jan 12, 2025
14e3368
record usage fix
BennyThink Jan 12, 2025
1546ed3
add cookies
BennyThink Jan 14, 2025
897211f
fix settings
BennyThink Jan 14, 2025
233193f
hint
BennyThink Jan 14, 2025
f4b49af
hint
BennyThink Jan 14, 2025
bd17efa
reset
BennyThink Jan 14, 2025
ae84522
rename
BennyThink Jan 14, 2025
86e896c
formats fix
BennyThink Jan 14, 2025
da8d45f
update README.md
BennyThink Jan 14, 2025
4f9df5a
update
BennyThink Jan 14, 2025
e909640
Merged master into your-branch-name, resolved conflicts in favor of y…
BennyThink Jan 14, 2025
7f2f16e
rename
BennyThink Jan 14, 2025
e30324b
rename
BennyThink Jan 14, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
88 changes: 0 additions & 88 deletions ytdlbot/engine/special.py
SanujaNS marked this conversation as resolved.
Show resolved Hide resolved
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,6 @@
import requests
from bs4 import BeautifulSoup

from utils import parse_cookie_file


def special_download_entrance(url: str, tempdir: str, bm, **kwargs) -> list:
"""Specific link downloader"""
Expand All @@ -28,22 +26,6 @@ def special_download_entrance(url: str, tempdir: str, bm, **kwargs) -> list:
return pixeldrain(url, tempdir, bm, **kwargs)
elif "krakenfiles.com" in domain:
return krakenfiles(url, tempdir, bm, **kwargs)
elif any(
x in domain
for x in [
"terabox.com",
"nephobox.com",
"4funbox.com",
"mirrobox.com",
"momerybox.com",
"teraboxapp.com",
"1024tera.com",
"terabox.app",
"gibibox.com",
"goaibox.com",
]
):
return terabox(url, tempdir, bm, **kwargs)
else:
raise ValueError(f"Invalid URL: No specific link function found for {url}")

Expand Down Expand Up @@ -79,73 +61,3 @@ def krakenfiles(url: str, tempdir: str, bm, **kwargs):
json_data = response.json()
url = json_data["url"]
return sp_ytdl_download(url, tempdir, bm, **kwargs)


def find_between(s, start, end):
return (s.split(start))[1].split(end)[0]


def terabox(url: str, tempdir: str, bm, **kwargs):
cookies_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), "terabox.txt")
cookies = parse_cookie_file(cookies_file)

headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7",
"Accept-Encoding": "gzip, deflate, br",
"Accept-Language": "en-US,en;q=0.9,hi;q=0.8",
"Connection": "keep-alive",
"DNT": "1",
"Host": "www.terabox.app",
"Sec-Fetch-Dest": "document",
"Sec-Fetch-Mode": "navigate",
"Sec-Fetch-Site": "none",
"Sec-Fetch-User": "?1",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36",
"sec-ch-ua": "'Not A(Brand';v='99', 'Google Chrome';v='121', 'Chromium';v='121'",
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": "'Windows'",
}

session = requests.Session()
session.headers.update(headers)
session.cookies.update(cookies)
temp_req = session.get(url)
request_url = urlparse(temp_req.url)
surl = parse_qs(request_url.query).get("surl")
req = session.get(temp_req.url)
respo = req.text
js_token = find_between(respo, "fn%28%22", "%22%29")
logid = find_between(respo, "dp-logid=", "&")
bdstoken = find_between(respo, 'bdstoken":"', '"')

params = {
"app_id": "250528",
"web": "1",
"channel": "dubox",
"clienttype": "0",
"jsToken": js_token,
"dp-logid": logid,
"page": "1",
"num": "20",
"by": "name",
"order": "asc",
"site_referer": temp_req.url,
"shorturl": surl,
"root": "1,",
}

req2 = session.get("https://www.terabox.app/share/list", params=params)
response_data2 = req2.json()
file_name = response_data2["list"][0]["server_filename"]
sizebytes = int(response_data2["list"][0]["size"])
if sizebytes > 48 * 1024 * 1024:
direct_link = response_data2["list"][0]["dlink"]
url = direct_link.replace("d.terabox.app", "d3.terabox.app")
else:
direct_link_response = session.head(response_data2["list"][0]["dlink"])
direct_link_response_headers = direct_link_response.headers
direct_link = direct_link_response_headers["Location"]
url = direct_link

return sp_ytdl_download(url, tempdir, bm, filename=file_name, **kwargs)