# 51Degrees Robots.txt Feature — String Resources
#
# Edit this file to customise labels, descriptions, and messages shown on the
# Robots.txt admin settings page and in bot-denied responses.
#
# Localisation: copy this file as robots-strings.{locale}.yaml
# (e.g. robots-strings.fr_FR.yaml) alongside this one and translate the values.
# The plugin loads the locale-specific file automatically when WordPress is set
# to that language, falling back to this file for any missing keys.
#
# Value syntax:
#   Single-quoted  'value'  — no escape sequences; use '' for a literal apostrophe.
#   Double-quoted  "value"  — supports \n \t \" \\.
#   HTML tags are permitted in values that are marked "(HTML)"; these are output
#   through wp_kses_post() in the template. All other values use esc_html().
#   Use %s as a sprintf() placeholder for dynamic content inserted at runtime.

robots:

  page:
    title: 'Robots.txt Settings'
    description: 'Configure bot traffic management and robots.txt generation. The generated robots.txt is advisory only — real enforcement happens via server-side 302 redirects based on live 51Degrees crawler detection.'

  notice:
    # (HTML) Shown when the resource key lacks IsCrawler / CrawlerUsage properties.
    no_crawler: '<strong>Warning:</strong> Your Resource Key does not include crawler detection properties (<code>IsCrawler</code> or <code>CrawlerUsage</code>). Robots.txt enforcement will not work until you add these properties to your key at <a href="https://configure.51degrees.com/" target="_blank">configure.51degrees.com</a>.'
    # (HTML) Shown when the resource key has IsCrawler but not CrawlerUsage.
    no_crawler_usage: '<strong>Note:</strong> Your Resource Key includes <code>IsCrawler</code> but not <code>CrawlerUsage</code>. Without <code>CrawlerUsage</code>, category selections have no effect — enforcement falls back to path-based rules only. Upgrade your key at <a href="https://configure.51degrees.com/" target="_blank">configure.51degrees.com</a> for category-based enforcement.'
    # (HTML) Shown when the resource key lacks RobotsTxt properties.
    no_robots_txt: '<strong>Note:</strong> Your Resource Key does not include the RobotsTxt <code>PlainText</code> property. Robots.txt generation will use local rules only. Upgrade your key at <a href="https://configure.51degrees.com/" target="_blank">configure.51degrees.com</a>'
    # Plain text. Shown after a successful Save and Generate action.
    generate_success: 'Robots.txt successfully generated from the 51Degrees Cloud API and cached.'
    # (HTML) Shown when the Cloud API call fails. %s is replaced by the error detail.
    cloud_api_error: '<strong>Cloud API Error:</strong> The robots.txt could not be fetched from the 51Degrees Cloud API. Error: %s. Previous cached content will be used until the issue is resolved.'
    # (HTML) Shown when a physical robots.txt file blocks the virtual one.
    physical_file: '<strong>Warning:</strong> A physical <code>robots.txt</code> file exists in your WordPress root directory. WordPress will serve that file instead of the virtual one generated by this plugin. Delete or rename the physical file to use the virtual robots.txt.'
    # (HTML) Shown when CrawlerUsage categories fail to fetch from the Cloud API.
    categories_fetch_failed: '<strong>Error:</strong> Failed to fetch crawler categories from the 51Degrees Cloud API. The category checkboxes are empty. The API may be temporarily unavailable. Try refreshing the page in a few moments.'
    # Plain text. Appended to common.cloud.rejected / common.cloud.unreachable when a previous robots.txt cache exists.
    cached_state_suffix: 'Cached settings shown below.'
    # Plain text. %s is the last successful refresh timestamp.
    last_refresh_success: 'Last refreshed from cloud: %s'
    # (HTML) %s = error message, %s = timestamp.
    last_refresh_error: '<strong>Last refresh failed:</strong> %s (at %s). The previously cached robots.txt is still being served.'

  field:
    enable_label: 'Enable Robots.txt Hosting'
    enable_checkbox: 'Enable virtual robots.txt generation with allowed crawler categories'
    # (HTML) Description shown below the Enable checkbox.
    enable_description: 'Generates a virtual <code>/robots.txt</code> file. This is advisory only and does not consume cloud requests.'
    enforce_label: 'Enable Crawler Enforcement'
    enforce_checkbox: 'Enable real-time crawler detection and 302 redirect enforcement'
    # (HTML) Description shown below the Enforce checkbox.
    enforce_description: '<strong>Warning:</strong> Every enforcement check consumes a cloud request on each page load. Enable this only if you want active bot blocking. You can host the robots.txt without enforcement to save on cloud requests.'
    categories_label: 'Allowed Crawler Categories'
    # (HTML) Description shown above the category checkboxes.
    categories_description: 'Select crawler categories to allow. Crawlers in the selected categories will be allowed access. If your key does not support <code>CrawlerUsage</code>, category selections have no effect.'
    tdl_label: 'Terms Document Locators (TDL)'
    # (HTML) Description shown above the TDL checkboxes.
    tdl_description: 'TDL lines reference immutable legal documents that define the terms under which crawlers may access your site. Selected URLs are emitted as <code>TDL:</code> lines in robots.txt between <code>User-agent:</code> and <code>Allow:/Disallow:</code>.'
    tdl_mow_label: 'MOW Standard Terms'
    tdl_standard_section_label: 'Standard Terms Document Locators'
    # (HTML) Description shown above the standard TDL checkboxes.
    tdl_standard_section_description: 'Standard TDLs are maintained by external organisations. The plugin checks daily for updated versions and uses the latest available automatically.'
    # (HTML) Description shown above the custom TDL input.
    tdl_custom_description: '<strong>Custom TDL:</strong> Enter one URL per line. Each URL should point to an immutable legal document defining the terms under which crawlers may access your site.'
    tdl_custom_placeholder: "https://example.com/terms/v1\nhttps://example.com/terms/v2"
    redirect_url_label: 'Redirect URL'
    redirect_url_placeholder: 'https://example.com/bot-landing'
    redirect_url_description: 'URL to redirect detected bots to (302 redirect). Leave empty to deny access without redirect.'
    custom_top_label: 'Custom Top Entries'
    custom_top_placeholder: 'User-agent: *'
    custom_top_description: 'Custom robots.txt entries prepended before the 51Degrees section.'
    custom_bottom_label: 'Custom Bottom Entries'
    custom_bottom_placeholder: 'Sitemap: https://example.com/sitemap.xml'
    custom_bottom_description: 'Custom robots.txt entries appended after the 51Degrees section.'

  button:
    save: 'Save and Generate'

  preview:
    title: 'Preview'
    description: 'Preview of the generated robots.txt content:'
    empty: '# No rules configured yet. Enable robots.txt management and configure settings above.'

  links:
    view: 'View robots.txt'

  bot_denied:
    # HTML page title and body for the access-denied response served to blocked crawlers.
    title: 'Access Denied'
    message: 'Access denied for crawlers — please refer to terms listed in robots.txt.'
