Merge pull request #526 from ukwt/anime

Add a few search engines
This commit is contained in:
Adam Tauber 2016-04-14 10:59:31 +02:00
commit 85c0351dca
13 changed files with 936 additions and 7 deletions

53
searx/engines/fdroid.py Normal file
View file

@ -0,0 +1,53 @@
"""
F-Droid (a repository of FOSS applications for Android)
@website https://f-droid.org/
@provide-api no
@using-api no
@results HTML
@stable no (HTML can change)
@parse url, title, content
"""
from cgi import escape
from urllib import urlencode
from searx.engines.xpath import extract_text
from lxml import html
# engine dependent config
categories = ['files']
paging = True
# search-url
base_url = 'https://f-droid.org/'
search_url = base_url + 'repository/browse/?{query}'
# do search-request
def request(query, params):
query = urlencode({'fdfilter': query,
'fdpage': params['pageno']})
params['url'] = search_url.format(query=query)
return params
# get response from search-request
def response(resp):
results = []
dom = html.fromstring(resp.text)
for app in dom.xpath('//div[@id="appheader"]'):
url = app.xpath('./ancestor::a/@href')[0]
title = app.xpath('./p/span/text()')[0]
img_src = app.xpath('.//img/@src')[0]
content = extract_text(app.xpath('./p')[0])
content = escape(content.replace(title, '', 1).strip())
results.append({'url': url,
'title': title,
'content': content,
'img_src': img_src})
return results

View file

@ -46,11 +46,11 @@ country_to_hostname = {
'NZ': 'www.google.co.nz', # New Zealand
'PH': 'www.google.com.ph', # Philippines
'SG': 'www.google.com.sg', # Singapore
# 'US': 'www.google.us', # United State, redirect to .com
# 'US': 'www.google.us', # United States, redirect to .com
'ZA': 'www.google.co.za', # South Africa
'AR': 'www.google.com.ar', # Argentina
'CL': 'www.google.cl', # Chile
'ES': 'www.google.es', # Span
'ES': 'www.google.es', # Spain
'MX': 'www.google.com.mx', # Mexico
'EE': 'www.google.ee', # Estonia
'FI': 'www.google.fi', # Finland
@ -61,7 +61,7 @@ country_to_hostname = {
'HU': 'www.google.hu', # Hungary
'IT': 'www.google.it', # Italy
'JP': 'www.google.co.jp', # Japan
'KR': 'www.google.co.kr', # South Korean
'KR': 'www.google.co.kr', # South Korea
'LT': 'www.google.lt', # Lithuania
'LV': 'www.google.lv', # Latvia
'NO': 'www.google.no', # Norway
@ -76,9 +76,9 @@ country_to_hostname = {
'SE': 'www.google.se', # Sweden
'TH': 'www.google.co.th', # Thailand
'TR': 'www.google.com.tr', # Turkey
'UA': 'www.google.com.ua', # Ikraine
# 'CN': 'www.google.cn', # China, only from china ?
'HK': 'www.google.com.hk', # Hong kong
'UA': 'www.google.com.ua', # Ukraine
# 'CN': 'www.google.cn', # China, only from China ?
'HK': 'www.google.com.hk', # Hong Kong
'TW': 'www.google.com.tw' # Taiwan
}

119
searx/engines/nyaa.py Normal file
View file

@ -0,0 +1,119 @@
"""
Nyaa.se (Anime Bittorrent tracker)
@website http://www.nyaa.se/
@provide-api no
@using-api no
@results HTML
@stable no (HTML can change)
@parse url, title, content, seed, leech, torrentfile
"""
from cgi import escape
from urllib import urlencode
from lxml import html
from searx.engines.xpath import extract_text
# engine dependent config
categories = ['files', 'images', 'videos', 'music']
paging = True
# search-url
base_url = 'http://www.nyaa.se/'
search_url = base_url + '?page=search&{query}&offset={offset}'
# xpath queries
xpath_results = '//table[@class="tlist"]//tr[contains(@class, "tlistrow")]'
xpath_category = './/td[@class="tlisticon"]/a'
xpath_title = './/td[@class="tlistname"]/a'
xpath_torrent_file = './/td[@class="tlistdownload"]/a'
xpath_filesize = './/td[@class="tlistsize"]/text()'
xpath_seeds = './/td[@class="tlistsn"]/text()'
xpath_leeches = './/td[@class="tlistln"]/text()'
xpath_downloads = './/td[@class="tlistdn"]/text()'
# convert a variable to integer or return 0 if it's not a number
def int_or_zero(num):
if isinstance(num, list):
if len(num) < 1:
return 0
num = num[0]
if num.isdigit():
return int(num)
return 0
# get multiplier to convert torrent size to bytes
def get_filesize_mul(suffix):
return {
'KB': 1024,
'MB': 1024 ** 2,
'GB': 1024 ** 3,
'TB': 1024 ** 4,
'KIB': 1024,
'MIB': 1024 ** 2,
'GIB': 1024 ** 3,
'TIB': 1024 ** 4
}[str(suffix).upper()]
# do search-request
def request(query, params):
query = urlencode({'term': query})
params['url'] = search_url.format(query=query, offset=params['pageno'])
return params
# get response from search-request
def response(resp):
results = []
dom = html.fromstring(resp.text)
for result in dom.xpath(xpath_results):
# category in which our torrent belongs
category = result.xpath(xpath_category)[0].attrib.get('title')
# torrent title
page_a = result.xpath(xpath_title)[0]
title = escape(extract_text(page_a))
# link to the page
href = page_a.attrib.get('href')
# link to the torrent file
torrent_link = result.xpath(xpath_torrent_file)[0].attrib.get('href')
# torrent size
try:
file_size, suffix = result.xpath(xpath_filesize)[0].split(' ')
file_size = int(float(file_size) * get_filesize_mul(suffix))
except Exception as e:
file_size = None
# seed count
seed = int_or_zero(result.xpath(xpath_seeds))
# leech count
leech = int_or_zero(result.xpath(xpath_leeches))
# torrent downloads count
downloads = int_or_zero(result.xpath(xpath_downloads))
# content string contains all information not included into template
content = 'Category: "{category}". Downloaded {downloads} times.'
content = content.format(category=category, downloads=downloads)
content = escape(content)
results.append({'url': href,
'title': title,
'content': content,
'seed': seed,
'leech': leech,
'filesize': file_size,
'torrentfile': torrent_link,
'template': 'torrent.html'})
return results

77
searx/engines/reddit.py Normal file
View file

@ -0,0 +1,77 @@
"""
Reddit
@website https://www.reddit.com/
@provide-api yes (https://www.reddit.com/dev/api)
@using-api yes
@results JSON
@stable yes
@parse url, title, content, thumbnail, publishedDate
"""
import json
from cgi import escape
from urllib import urlencode
from urlparse import urlparse
from datetime import datetime
# engine dependent config
categories = ['general', 'images', 'news', 'social media']
page_size = 25
# search-url
search_url = 'https://www.reddit.com/search.json?{query}'
# do search-request
def request(query, params):
query = urlencode({'q': query,
'limit': page_size})
params['url'] = search_url.format(query=query)
return params
# get response from search-request
def response(resp):
img_results = []
text_results = []
search_results = json.loads(resp.text)
# return empty array if there are no results
if 'data' not in search_results:
return []
posts = search_results.get('data', {}).get('children', [])
# process results
for post in posts:
data = post['data']
# extract post information
params = {
'url': data['url'],
'title': data['title']
}
# if thumbnail field contains a valid URL, we need to change template
thumbnail = data['thumbnail']
url_info = urlparse(thumbnail)
# netloc & path
if url_info[1] != '' and url_info[2] != '':
params['thumbnail_src'] = thumbnail
params['template'] = 'images.html'
img_results.append(params)
else:
created = datetime.fromtimestamp(data['created_utc'])
content = escape(data['selftext'])
if len(content) > 500:
content = content[:500] + '...'
params['content'] = content
params['publishedDate'] = created
text_results.append(params)
# show images first and text results second
return img_results + text_results

View file

@ -0,0 +1,102 @@
"""
Tokyo Toshokan (A BitTorrent Library for Japanese Media)
@website https://www.tokyotosho.info/
@provide-api no
@using-api no
@results HTML
@stable no (HTML can change)
@parse url, title, publishedDate, seed, leech,
filesize, magnetlink, content
"""
import re
from cgi import escape
from urllib import urlencode
from lxml import html
from searx.engines.xpath import extract_text
from datetime import datetime
from searx.engines.nyaa import int_or_zero, get_filesize_mul
# engine dependent config
categories = ['files', 'videos', 'music']
paging = True
# search-url
base_url = 'https://www.tokyotosho.info/'
search_url = base_url + 'search.php?{query}'
# do search-request
def request(query, params):
query = urlencode({'page': params['pageno'],
'terms': query})
params['url'] = search_url.format(query=query)
return params
# get response from search-request
def response(resp):
results = []
dom = html.fromstring(resp.text)
rows = dom.xpath('//table[@class="listing"]//tr[contains(@class, "category_0")]')
# check if there are no results or page layout was changed so we cannot parse it
# currently there are two rows for each result, so total count must be even
if len(rows) == 0 or len(rows) % 2 != 0:
return []
# regular expression for parsing torrent size strings
size_re = re.compile('Size:\s*([\d.]+)(TB|GB|MB|B)', re.IGNORECASE)
# processing the results, two rows at a time
for i in xrange(0, len(rows), 2):
# parse the first row
name_row = rows[i]
links = name_row.xpath('./td[@class="desc-top"]/a')
params = {
'template': 'torrent.html',
'url': links[-1].attrib.get('href'),
'title': extract_text(links[-1])
}
# I have not yet seen any torrents without magnet links, but
# it's better to be prepared to stumble upon one some day
if len(links) == 2:
magnet = links[0].attrib.get('href')
if magnet.startswith('magnet'):
# okay, we have a valid magnet link, let's add it to the result
params['magnetlink'] = magnet
# no more info in the first row, start parsing the second one
info_row = rows[i + 1]
desc = extract_text(info_row.xpath('./td[@class="desc-bot"]')[0])
for item in desc.split('|'):
item = item.strip()
if item.startswith('Size:'):
try:
# ('1.228', 'GB')
groups = size_re.match(item).groups()
multiplier = get_filesize_mul(groups[1])
params['filesize'] = int(multiplier * float(groups[0]))
except Exception as e:
pass
elif item.startswith('Date:'):
try:
# Date: 2016-02-21 21:44 UTC
date = datetime.strptime(item, 'Date: %Y-%m-%d %H:%M UTC')
params['publishedDate'] = date
except Exception as e:
pass
elif item.startswith('Comment:'):
params['content'] = item
stats = info_row.xpath('./td[@class="stats"]/span')
# has the layout not changed yet?
if len(stats) == 3:
params['seed'] = int_or_zero(extract_text(stats[0]))
params['leech'] = int_or_zero(extract_text(stats[1]))
results.append(params)
return results

93
searx/engines/torrentz.py Normal file
View file

@ -0,0 +1,93 @@
"""
Torrentz.eu (BitTorrent meta-search engine)
@website https://torrentz.eu/
@provide-api no
@using-api no
@results HTML
@stable no (HTML can change, although unlikely,
see https://torrentz.eu/torrentz.btsearch)
@parse url, title, publishedDate, seed, leech, filesize, magnetlink
"""
import re
from cgi import escape
from urllib import urlencode
from lxml import html
from searx.engines.xpath import extract_text
from datetime import datetime
from searx.engines.nyaa import int_or_zero, get_filesize_mul
# engine dependent config
categories = ['files', 'videos', 'music']
paging = True
# search-url
# https://torrentz.eu/search?f=EXAMPLE&p=6
base_url = 'https://torrentz.eu/'
search_url = base_url + 'search?{query}'
# do search-request
def request(query, params):
page = params['pageno'] - 1
query = urlencode({'q': query, 'p': page})
params['url'] = search_url.format(query=query)
return params
# get response from search-request
def response(resp):
results = []
dom = html.fromstring(resp.text)
for result in dom.xpath('//div[@class="results"]/dl'):
name_cell = result.xpath('./dt')[0]
title = extract_text(name_cell)
# skip rows that do not contain a link to a torrent
links = name_cell.xpath('./a')
if len(links) != 1:
continue
# extract url and remove a slash in the beginning
link = links[0].attrib.get('href').lstrip('/')
seed = result.xpath('./dd/span[@class="u"]/text()')[0].replace(',', '')
leech = result.xpath('./dd/span[@class="d"]/text()')[0].replace(',', '')
params = {
'url': base_url + link,
'title': title,
'seed': int_or_zero(seed),
'leech': int_or_zero(leech),
'template': 'torrent.html'
}
# let's try to calculate the torrent size
try:
size_str = result.xpath('./dd/span[@class="s"]/text()')[0]
size, suffix = size_str.split()
params['filesize'] = int(size) * get_filesize_mul(suffix)
except Exception as e:
pass
# does our link contain a valid SHA1 sum?
if re.compile('[0-9a-fA-F]{40}').match(link):
# add a magnet link to the result
params['magnetlink'] = 'magnet:?xt=urn:btih:' + link
# extract and convert creation date
try:
date_str = result.xpath('./dd/span[@class="a"]/span')[0].attrib.get('title')
# Fri, 25 Mar 2016 16:29:01
date = datetime.strptime(date_str, '%a, %d %b %Y %H:%M:%S')
params['publishedDate'] = date
except Exception as e:
pass
results.append(params)
return results

View file

@ -11,6 +11,14 @@ title_xpath = None
suggestion_xpath = ''
results_xpath = ''
# parameters for engines with paging support
#
# number of results on each page
# (only needed if the site requires not a page number, but an offset)
page_size = 1
# number of the first page (usually 0 or 1)
first_page_num = 1
'''
if xpath_results is list, extract the text from each result and concat the list
@ -76,8 +84,14 @@ def normalize_url(url):
def request(query, params):
query = urlencode({'q': query})[2:]
params['url'] = search_url.format(query=query)
fp = {'query': query}
if paging and search_url.find('{pageno}') >= 0:
fp['pageno'] = (params['pageno'] + first_page_num - 1) * page_size
params['url'] = search_url.format(**fp)
params['query'] = query
return params