[feat] add 360search engine for searxng

Co-authored-by: Bnyro <bnyro@tutanota.com>
This commit is contained in:
Zhijie He 2025-02-23 13:35:23 +08:00 committed by Bnyro
parent 80f5fad16e
commit 71d1504e57
5 changed files with 159 additions and 1 deletions

View file

@ -0,0 +1,67 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# pylint: disable=invalid-name
"""360Search search engine for searxng"""
from urllib.parse import urlencode
from lxml import html
from searx.utils import extract_text
# Metadata
about = {
"website": "https://www.so.com/",
"wikidata_id": "Q10846064",
"use_official_api": False,
"require_api_key": False,
"results": "HTML",
}
# Engine Configuration
categories = ["general"]
paging = True
time_range_support = True
time_range_dict = {'day': 'd', 'week': 'w', 'month': 'm', 'year': 'y'}
# Base URL
base_url = "https://www.so.com"
def request(query, params):
query_params = {
"pn": params["pageno"],
"q": query,
}
if time_range_dict.get(params['time_range']):
query_params["adv_t"] = time_range_dict.get(params['time_range'])
params["url"] = f"{base_url}/s?{urlencode(query_params)}"
return params
def response(resp):
dom = html.fromstring(resp.text)
results = []
for item in dom.xpath('//li[contains(@class, "res-list")]'):
title = extract_text(item.xpath('.//h3[contains(@class, "res-title")]/a'))
url = extract_text(item.xpath('.//h3[contains(@class, "res-title")]/a/@data-mdurl'))
if not url:
url = extract_text(item.xpath('.//h3[contains(@class, "res-title")]/a/@href'))
content = extract_text(item.xpath('.//p[@class="res-desc"]'))
if not content:
content = extract_text(item.xpath('.//span[@class="res-list-summary"]'))
if title and url:
results.append(
{
"title": title,
"url": url,
"content": content,
}
)
return results

View file

@ -0,0 +1,64 @@
# SPDX-License-Identifier: AGPL-3.0-or-later
# pylint: disable=invalid-name
"""360Search-Videos: A search engine for retrieving videos from 360Search."""
from urllib.parse import urlencode
from datetime import datetime
from searx.exceptions import SearxEngineAPIException
from searx.utils import html_to_text
about = {
"website": "https://tv.360kan.com/",
"use_official_api": False,
"require_api_key": False,
"results": "JSON",
}
paging = True
results_per_page = 10
categories = ["videos"]
base_url = "https://tv.360kan.com"
def request(query, params):
query_params = {"count": 10, "q": query, "start": params["pageno"] * 10}
params["url"] = f"{base_url}/v1/video/list?{urlencode(query_params)}"
return params
def response(resp):
try:
data = resp.json()
except Exception as e:
raise SearxEngineAPIException(f"Invalid response: {e}") from e
results = []
if "data" not in data or "result" not in data["data"]:
raise SearxEngineAPIException("Invalid response")
for entry in data["data"]["result"]:
if not entry.get("title") or not entry.get("play_url"):
continue
published_date = None
if entry.get("publish_time"):
try:
published_date = datetime.fromtimestamp(int(entry["publish_time"]))
except (ValueError, TypeError):
published_date = None
results.append(
{
'url': entry["play_url"],
'title': html_to_text(entry["title"]),
'content': html_to_text(entry["description"]),
'template': 'videos.html',
'publishedDate': published_date,
'thumbnail': entry["cover_img"],
}
)
return results