Install
openclaw skills install yula-web-searchYula's custom web search - NO API KEY required. Uses multiple fallback search methods with public services that allow anonymous access. Works by direct curl...
openclaw skills install yula-web-searchCustom web search skill by Yula — NO API KEY REQUIRED.
Uses multiple public anonymous search services that don't require API keys. Works via direct curl requests from local network:
Just works, no configuration needed.
✅ USE this skill when:
❌ DON'T use when:
weather skillDirect request to Chinese Bing, parse HTML to extract result titles and URLs:
QUERY="your search query"
QUERY_ENCODED=$(python3 -c "import urllib.parse; print(urllib.parse.quote('$QUERY'))"
curl -s -m 20 -L -A "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36" "https://cn.bing.com/search?q=$QUERY_ENCODED" | python3 -c "
import re, sys
from html.parser import HTMLParser
class BingParser(HTMLParser):
def __init__(self):
super().__init__()
self.results = []
self.in_h2 = False
self.current_url = None
self.current_title = []
def handle_starttag(self, tag, attrs):
attrs_dict = dict(attrs)
if tag == 'h2':
self.in_h2 = True
self.current_title = []
if tag == 'a' and self.in_h2 and 'href' in attrs_dict:
url = attrs_dict['href']
if 'bing.com' not in url and url.startswith('http'):
self.current_url = url
def handle_endtag(self, tag):
if tag == 'h2':
if self.current_url:
title = ''.join(self.current_title).strip()
self.results.append((title, self.current_url))
self.current_url = None
self.in_h2 = False
def handle_data(self, data):
if self.in_h2 and self.current_url:
self.current_title.append(data)
parser = BingParser()
parser.feed(sys.stdin.read())
for i, (title, url) in enumerate(parser.results[:8]):
print(f'{i+1}\\t{title}\\t{url}")
"
If Bing fails, try Google:
QUERY="your search query"
QUERY_ENCODED=$(python3 -c "import urllib.parse; print(urllib.parse.quote('$QUERY'))"
curl -s -m 20 -L -A "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36" "https://www.google.com/search?q=$QUERY_ENCODED" | python3 -c "
import re, sys
results = []
pattern = r'<h3 class=\"zBAuLc\"><a href=\"([^\"]+)\"'
matches = re.findall(pattern, html)
for i, url in enumerate(matches[:8]):
# Google enconder title is after... extract separately
pass
# Simplified extraction - get first 8 URLs
"
After getting search results, extract text content from top relevant URLs:
def extract_url_content(url):
# Use curl to get HTML
# Use python to extract text content, remove scripts/styles/scripts, get main text
# Return cleaned text content, limit to ~2000 chars per URL
**Example full workflow example:
# After getting search results, select top 2-3 relevant URLs
for (title, url) in selected_urls:
curl -s -m 20 -L -A "USER_AGENT" "$url" | python3 -c "
import sys
from html.parser import HTMLParser
class TextExtractor(HTMLParser):
def __init__(self):
super().__init__()
self.text = []
self.in_script = False
self.in_style = False
def handle_starttag(self, tag, attrs):
if tag == 'script' or tag == 'style' or tag == 'noscript':
self.in_script = True
if tag == 'style':
self.in_style = True
def handle_endtag(self, tag):
if tag == 'script' or tag == 'style' or tag == 'noscript':
self.in_script = False
if tag == 'style':
self.in_style = False
def handle_data(self, data):
if not self.in_script and not self.in_style:
words = data.strip()
if words:
self.text.append(words)
parser = TextExtractor()
parser.feed(sys.stdin.read())
content = ' '.join(parser.text)
# Clean up whitespace and limit length
content = ' '.join(content.split())[:2000]
print(content)
"
Always use a modern browser User-Agent to avoid being blocked immediately:
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36"
"Search 2026腾势Z9GT介绍"
**Step 1: Search Bing → get top 8 results
# Output gives title + url:
1. 2026款腾势Z9GT-腾势官网
URL: https://www.tengshiauto.com/product-detail/26-z9gt.html
2. 【腾势Z9GT】腾势_腾势Z9GT报价_腾势Z9GT图片_汽车之家
URL: https://www.autohome.com.cn/7659
...
Step 2: Select most relevant results
**Step 3: Extract content from each URL
**Step 4: Combine and summarize
1. Search Bing → get (title + url)
↓
2. Filter by relevance → pick top 2-3
↓
3. Extract content from each URL
↓
4. Combine all text
↓
5. Summarize into final answer
↓
6. Present to user with sources
Created by Yula
GitHub: https://github.com/wjzhb/yula-web-search
Copyright (c) 2026 Yula
Licensed under the MIT License
If you find this skill useful, please ⭐ star it on GitHub!