# robots.txt for SpidyUrl.com
# This file tells search engine crawlers how to interact with our website

# Allow all crawlers to access the entire site
User-agent: *
Allow: /

# Specific rules for different crawlers
User-agent: Googlebot
Allow: /
Crawl-delay: 1

User-agent: Bingbot
Allow: /
Crawl-delay: 2

# Disallow crawling of certain directories (if any exist)
# Disallow: /admin/
# Disallow: /private/
# Disallow: /temp/

# Allow crawling of important files
Allow: /sitemap.xml
Allow: /ads.txt
Allow: /robots.txt

# Sitemap location
Sitemap: https://spidyurl.com/sitemap.xml

# Notes:
# - This robots.txt allows all search engines to crawl our site
# - Crawl-delay helps prevent overwhelming our server
# - Update the sitemap URL when deploying to your actual domain
# - Add any directories you want to block from search engines above