Install
openclaw skills install crawl4aiAI-powered web scraping framework for extracting structured data from websites. Use when Codex needs to crawl, scrape, or extract data from web pages using AI-powered parsing, handle dynamic content, or work with complex HTML structures.
openclaw skills install crawl4aiCrawl4ai is an AI-powered web scraping framework designed to extract structured data from websites efficiently. It combines traditional HTML parsing with AI to handle dynamic content, extract text intelligently, and clean and structure data from complex web pages.
Use when Codex needs to:
Trigger phrases:
from crawl4ai import AsyncWebCrawler, BrowserMode
async def scrape_page(url):
async with AsyncWebCrawler() as crawler:
result = await crawler.arun(
url=url,
browser_mode=BrowserMode.LATEST,
headless=True
)
return result.markdown, result.clean_html
from crawl4ai import AsyncWebCrawler, JsonModeScreener
import json
async def extract_products(url):
async with AsyncWebCrawler() as crawler:
result = await crawler.arun(
url=url,
screenshot=True,
javascript=True,
bypass_cache=True
)
# Extract product data
products = []
for item in result.extracted_content:
if item['type'] == 'product':
products.append({
'name': item['name'],
'price': item['price'],
'url': item['url']
})
return products
Scenario: User wants to scrape a website for all article titles.
from crawl4ai import AsyncWebCrawler
async def scrape_articles(url):
async with AsyncWebCrawler() as crawler:
result = await crawler.arun(
url=url,
javascript=True,
verbose=True
)
# Extract article titles from HTML
articles = result.extracted_content if result.extracted_content else []
titles = [item.get('name', item.get('text', '')) for item in articles]
return titles
Trigger: "Scrape this site for article titles" or "Get all titles from [URL]"
Scenario: Website loads data via JavaScript.
from crawl4ai import AsyncWebCrawler
async def scrape_dynamic_site(url):
async with AsyncWebCrawler() as crawler:
result = await crawler.arun(
url=url,
javascript=True, # Wait for JS execution
wait_for="body", # Wait for specific element
delay=1.5, # Wait time after load
headless=True
)
return result.markdown
Trigger: "Scrape this dynamic website" or "This page needs JavaScript to load data"
Scenario: Extract specific fields like prices, descriptions, etc.
from crawl4ai import AsyncWebCrawler
async def extract_product_details(url):
async with AsyncWebCrawler() as crawler:
result = await crawler.arun(
url=url,
screenshot=True,
js_code="""
const products = document.querySelectorAll('.product');
return Array.from(products).map(p => ({
name: p.querySelector('.name')?.textContent,
price: p.querySelector('.price')?.textContent,
url: p.querySelector('a')?.href
}));
"""
)
return result.extracted_content
Trigger: "Extract product details from this page" or "Get price and name from [URL]"
Scenario: Clean messy HTML and extract clean text.
from crawl4ai import AsyncWebCrawler
async def clean_and_parse(url):
async with AsyncWebCrawler() as crawler:
result = await crawler.arun(
url=url,
remove_tags=['script', 'style', 'nav', 'footer', 'header'],
only_main_content=True
)
# Clean and return markdown
clean_text = result.clean_html
return clean_text
Trigger: "Clean this HTML" or "Extract main content from this page"
async def custom_scrape(url, custom_js):
async with AsyncWebCrawler() as crawler:
result = await crawler.arun(
url=url,
js_code=custom_js,
js_only=True # Only execute JS, don't download resources
)
return result.extracted_content
from crawl4ai import AsyncWebCrawler
async def multi_page_scrape(base_url, urls):
async with AsyncWebCrawler() as crawler:
results = []
for url in urls:
result = await crawler.arun(
url=url,
session_id=f"session_{url}",
bypass_cache=True
)
results.append({
'url': url,
'content': result.markdown,
'status': result.success
})
return results
async def robust_scrape(url):
try:
async with AsyncWebCrawler() as crawler:
result = await crawler.arun(
url=url,
timeout=30000 # 30 seconds timeout
)
if result.success:
return result.markdown, result.extracted_content
else:
print(f"Scraping failed: {result.error_message}")
return None, None
except Exception as e:
print(f"Scraping error: {str(e)}")
return None, None
Crawl4ai supports multiple output formats:
result.markdown)result.clean_html)result.extracted_content)result.screenshot)result.links)Python scripts for common crawling operations:
scrape_single_page.py - Basic scraping utilityscrape_multiple_pages.py - Batch scraping with paginationextract_from_html.py - HTML parsing helperclean_html.py - HTML cleaning utilityDocumentation and examples:
api_reference.md - Complete API documentationexamples.md - Common use cases and patternserror_handling.md - Troubleshooting guide