Spaces:
Sleeping
Sleeping
File size: 6,477 Bytes
dbd3bdf b402455 2291f86 73d2edf b402455 73d2edf b402455 2291f86 b402455 c1df642 b402455 dbd3bdf b402455 2291f86 b402455 c1df642 bdd511a dbd3bdf 2860069 bdd511a dbd3bdf 2860069 2291f86 bdd511a dbd3bdf bdd511a 2291f86 bdd511a 2291f86 b402455 2291f86 2860069 b402455 c1df642 b402455 2291f86 bdd511a 2860069 2291f86 2860069 2291f86 bdd511a 2291f86 b402455 2291f86 dbd3bdf c1df642 2291f86 c1df642 2291f86 b402455 2291f86 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 |
import os, time, json, base64, requests
from bs4 import BeautifulSoup
from flask import Flask, request, jsonify, Response, stream_with_context
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
def scrape_startpage(query, n=10):
s = requests.Session()
s.headers.update({
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7)',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'Connection': 'keep-alive'
})
try:
time.sleep(1)
r = s.get('https://www.startpage.com/sp/search', params={'query': query, 'cat': 'web', 'pl': 'opensearch'})
r.raise_for_status()
soup = BeautifulSoup(r.content, 'html.parser')
results = []
for c in soup.find_all('div', class_='result')[:n]:
t = c.find('a', class_='result-title')
if not t:
continue
d = c.find('p', class_='result-description') or c.find('span', class_='result-description')
results.append({'title': t.get_text(strip=True), 'url': t.get('href'), 'desc': d.get_text(strip=True) if d else ''})
return results
except Exception:
return []
def scrape_duckduckgo(query, n=10):
try:
time.sleep(1)
resp = requests.post("https://html.duckduckgo.com/html/", data={'q': query}, headers={'User-Agent': 'Mozilla/5.0'})
resp.raise_for_status()
soup = BeautifulSoup(resp.text, 'html.parser')
results = []
for a in soup.select('.result__a')[:n]:
title = a.get_text(strip=True)
href = a.get('href')
snippet_elem = a.find_parent('div', class_='result').select_one('.result__snippet')
snippet = snippet_elem.get_text(strip=True) if snippet_elem else ''
results.append({'title': title, 'url': href, 'desc': snippet})
return results
except Exception:
return []
def scrape_qwant(query, n=10):
try:
time.sleep(1)
r = requests.get("https://api.qwant.com/api/search/web", params={"q": query, "count": n, "t": "web"})
r.raise_for_status()
data = r.json()
results = []
for item in data.get("data", {}).get("result", {}).get("items", []):
results.append({'title': item.get('title', ''), 'url': item.get('url', ''), 'desc': item.get('desc', '')})
return results
except Exception:
return []
def scrape_wikipedia(query, n=10):
try:
r = requests.get("https://en.wikipedia.org/w/api.php", params={"action": "query", "list": "search", "format": "json", "srsearch": query, "srlimit": n})
r.raise_for_status()
data = r.json()
results = []
for item in data['query']['search']:
url = f"https://en.wikipedia.org/wiki/{item['title'].replace(' ', '_')}"
results.append({'title': item['title'], 'url': url, 'desc': item['snippet']})
return results
except Exception:
return []
def get_weather_from_ip(ip_address):
try:
ip_data = requests.get(f'https://ipinfo.io/{ip_address}/json').json()
loc = ip_data.get('loc')
if not loc:
return {'error': 'No location'}
lat, lon = loc.split(',')
if ip_data.get('country') != 'US':
return {'error': 'Weather only available in US'}
points = requests.get(f'https://api.weather.gov/points/{lat},{lon}').json()
forecast = requests.get(points['properties']['forecast']).json()
current = forecast['properties']['periods'][0]
return {
'ip': ip_address,
'location': {'city': ip_data.get('city', 'Unknown'), 'country': ip_data.get('country'), 'latitude': lat, 'longitude': lon},
'weather': {'temperature': current['temperature'], 'description': current['detailedForecast']}
}
except Exception as e:
return {'error': str(e)}
@app.route('/search')
def search():
q = request.args.get('q')
if not q:
return jsonify({'error': 'Missing query'}), 400
n = request.args.get('n', default=10, type=int)
results = scrape_startpage(q, n)
if not results:
results = scrape_duckduckgo(q, n)
if not results:
results = scrape_qwant(q, n)
if not results:
results = scrape_wikipedia(q, n)
if not results:
return jsonify({'error': 'All search providers failed'}), 500
return jsonify({'query': q, 'results': results})
@app.route('/weather')
def weather():
ip = request.args.get('ip')
if not ip:
return jsonify({'error': 'Missing ip'}), 400
data = get_weather_from_ip(ip)
if 'error' in data:
return jsonify(data), 400
return jsonify(data)
@app.route('/')
def health():
return jsonify({'status': 'running', 'message': 'GridLock search API'})
def morsify_key():
prefix = "nvapi-"
return prefix + "REDACTED_KEY"
@app.route('/v1/chat/completions', methods=['POST'])
def chat_completions():
data = request.get_json(force=True)
if not data or 'messages' not in data:
return jsonify({'error': 'Missing messages'}), 400
stream = data.get('stream', True)
api_key = morsify_key()
payload = {
"model": data.get("model", "meta/llama-4-scout-17b-16e-instruct"),
"messages": data["messages"],
"max_tokens": data.get("max_tokens", 512),
"temperature": data.get("temperature", 1.0),
"top_p": data.get("top_p", 1.0),
"frequency_penalty": data.get("frequency_penalty", 0.0),
"presence_penalty": data.get("presence_penalty", 0.0),
"stream": stream
}
headers = {"Authorization": f"Bearer {api_key}", "Accept": "text/event-stream" if stream else "application/json"}
try:
r = requests.post("https://integrate.api.nvidia.com/v1/chat/completions", headers=headers, json=payload, stream=stream, timeout=120)
if stream:
def generate():
for line in r.iter_lines():
if line:
yield f"data: {line.decode()}\n\n"
return Response(stream_with_context(generate()), content_type='text/event-stream')
else:
return jsonify(r.json())
except Exception as e:
return jsonify({'error': str(e)}), 500
if __name__ == '__main__':
app.run(host='0.0.0.0', port=7860, debug=True) |